diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..879b96eeb1435f804289662d11a16cf425464f6d --- /dev/null +++ b/.gitignore @@ -0,0 +1,3 @@ +models/ +sdxl_models/ +preprocess/ckpts/ diff --git a/.ipynb_checkpoints/README-checkpoint.md b/.ipynb_checkpoints/README-checkpoint.md new file mode 100644 index 0000000000000000000000000000000000000000..f6ad979770c94501ebe522c1c13e65512c67adec --- /dev/null +++ b/.ipynb_checkpoints/README-checkpoint.md @@ -0,0 +1,14 @@ +--- +title: VISTA +emoji: ๐Ÿ–ผ +colorFrom: purple +colorTo: red +sdk: gradio +sdk_version: 5.44.0 +app_file: app.py +pinned: false +license: cc-by-nc-4.0 +short_description: VISTA Demo Page +--- + +Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/.ipynb_checkpoints/app-checkpoint.py b/.ipynb_checkpoints/app-checkpoint.py new file mode 100644 index 0000000000000000000000000000000000000000..67159da8447a92d8ee3113bcf9313cfe7babd914 --- /dev/null +++ b/.ipynb_checkpoints/app-checkpoint.py @@ -0,0 +1,422 @@ +import os +import tempfile +from dataclasses import dataclass +from functools import lru_cache +from typing import Optional, Tuple + +import gradio as gr +import torch + +# === ๋„ค ์ฝ”๋“œ์—์„œ ์“ฐ๋˜ import ๊ทธ๋Œ€๋กœ === +from diffusers import UniPCMultistepScheduler +from diffusers3.models.controlnet import ControlNetModel +from diffusers3.pipelines.controlnet.pipeline_controlnet_sd_xl_img2img_img import ( + StableDiffusionXLControlNetImg2ImgPipeline, +) +from ip_adapter import IPAdapterXL + +import cv2 +import numpy as np +import imageio +from PIL import Image, ImageOps + +from preprocess.simple_extractor import run_simple_extractor + + +# ========================= +# ์‚ฌ์šฉ์ž ํ™˜๊ฒฝ/๊ฒฝ๋กœ ์„ค์ • (A์•ˆ: repo์— ํฌํ•จ) +# ========================= +# base/controlnet์€ HF Hub์—์„œ ๋‚ด๋ ค๋ฐ›์Œ (๊ทธ๋Œ€๋กœ ์œ ์ง€) +base_model_path = "stabilityai/stable-diffusion-xl-base-1.0" +controlnet_path = "diffusers/controlnet-depth-sdxl-1.0" + +# ์•„๋ž˜ 2๊ฐœ๋Š” Space repo์— "๊ทธ๋Œ€๋กœ ํฌํ•จ"๋˜์–ด ์žˆ์–ด์•ผ ํ•จ +image_encoder_path = "models/image_encoder" +ip_ckpt = "sdxl_models/ip-adapter_sdxl_vit-h.bin" + +DEFAULT_STEPS = 30 +DEBUG_SAVE = False + +# ๋„ค ์ฝ”๋“œ ๊ตฌ์กฐ ์œ ์ง€: person์„ height=1024๋กœ ๋งž์ถ˜ ๋’ค H,W๋ฅผ ์ „์—ญ์œผ๋กœ ์”€ +H: Optional[int] = None +W: Optional[int] = None + + +# ========================= +# ์œ ํ‹ธ: ๋กœ์ปฌ ์—์…‹ ์ฒดํฌ +# ========================= +def _ensure_exists(path: str, name: str): + if not os.path.exists(path): + raise FileNotFoundError(f"{name} not found: {path}") + + +def check_local_assets(): + _ensure_exists(image_encoder_path, "image_encoder_path") + _ensure_exists(ip_ckpt, "ip_ckpt") + + +# ========================= +# Lazy Loading: pipe/controlnet๋งŒ 1ํšŒ ๋กœ๋”ฉ +# ========================= +@lru_cache(maxsize=1) +def get_pipe_and_device() -> Tuple[StableDiffusionXLControlNetImg2ImgPipeline, str, torch.dtype]: + check_local_assets() + + device = "cuda" if torch.cuda.is_available() else "cpu" + dtype = torch.float16 if device == "cuda" else torch.float32 + + cn_kwargs = dict( + torch_dtype=dtype, + use_safetensors=True, + ) + if dtype == torch.float16: + cn_kwargs["variant"] = "fp16" + + controlnet = ControlNetModel.from_pretrained(controlnet_path, **cn_kwargs) + pipe = StableDiffusionXLControlNetImg2ImgPipeline.from_pretrained( + base_model_path, + controlnet=controlnet, + use_safetensors=True, + torch_dtype=dtype, + add_watermarker=False, + ).to(device) + + pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config) + pipe.enable_attention_slicing() + + # xformers๋Š” ์žˆ์œผ๋ฉด ์“ฐ๊ณ , ์—†์œผ๋ฉด ์กฐ์šฉํžˆ ํŒจ์Šค + try: + pipe.enable_xformers_memory_efficient_attention() + except Exception: + pass + + return pipe, device, dtype + + +# ========================= +# ๋„ค ์ฝ”๋“œ(ํ•จ์ˆ˜๋“ค) - ํ•„์š” ์ตœ์†Œ๋งŒ ๊ทธ๋Œ€๋กœ ํฌํ•จ +# ========================= +@dataclass +class Paths: + person_path: str + depth_path: str # ์‚ฌ์‹ค์ƒ sketch/guide ์ด๋ฏธ์ง€ (๋„ค ์ฝ”๋“œ ์œ ์ง€) + style_path: str + output_path: str + + +def _imread_or_raise(path: str, flag=cv2.IMREAD_COLOR): + img = cv2.imread(path, flag) + if img is None: + raise FileNotFoundError(f"cv2.imread failed: {path} (exists={os.path.exists(path)})") + return img + + +def compute_hw_from_person(person_path: str): + """ + person ์›๋ณธ ์ด๋ฏธ์ง€ ๊ธฐ์ค€: + - height๊ฐ€ ์ •ํ™•ํžˆ 1024๊ฐ€ ๋˜๋„๋ก ์Šค์ผ€์ผ + - aspect ratio ์œ ์ง€ + => H=1024, W=round(orig_w * (1024/orig_h)) + + ์•ˆ์ „์žฅ์น˜: W๊ฐ€ 1024๋ฅผ ๋„˜์œผ๋ฉด 1024๋กœ cap (padding ์Œ์ˆ˜ ๋ฐฉ์ง€) + """ + img = cv2.imread(person_path) + if img is None: + raise FileNotFoundError(f"cv2.imread failed: {person_path} (exists={os.path.exists(person_path)})") + + orig_h, orig_w = img.shape[:2] + target_h = 1024 + scale = target_h / float(orig_h) + target_w = int(round(orig_w * scale)) + + if target_w > 1024: + target_w = 1024 # ๋ฐ๋ชจ ์•ˆ์ •์„ฑ ์šฐ์„  (padding ์Œ์ˆ˜ ๋ฐฉ์ง€) + + return target_h, target_w + + +def invert_sketch_area(sketch_pil: Image.Image) -> Image.Image: + return ImageOps.invert(sketch_pil.convert("L")).convert("RGB") + + +def fill_sketch_from_image_path_to_pil(image_path: str) -> Image.Image: + global H, W + if H is None or W is None: + raise RuntimeError("Global H/W not set. Call run_one() first.") + + img = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE) + if img is None: + raise ValueError(f"์ด๋ฏธ์ง€๋ฅผ ๋ถˆ๋Ÿฌ์˜ฌ ์ˆ˜ ์—†์Šต๋‹ˆ๋‹ค: {image_path}") + + img = cv2.resize(img, (W, H), interpolation=cv2.INTER_NEAREST) + + # ํฐ์ƒ‰=๋ฐฐ๊ฒฝ/๊ฒ€์ •=์„  ๊ฐ€์ • โ†’ ํ•„์š”ํ•˜๋ฉด threshold ์กฐ์ ˆ + threshold = 127 + _, binary = cv2.threshold(img, threshold, 255, cv2.THRESH_BINARY_INV) + + contours, _ = cv2.findContours(binary, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) + filled = np.zeros_like(binary) + cv2.drawContours(filled, contours, -1, 255, thickness=cv2.FILLED) + + filled_rgb = cv2.cvtColor(filled, cv2.COLOR_GRAY2RGB) + return Image.fromarray(filled_rgb) + + +def merge_white_regions_or(img1: Image.Image, img2: Image.Image) -> Image.Image: + """ + ๋‘ ์ด๋ฏธ์ง€์—์„œ 'ํฐ์ƒ‰(255)' ์˜์—ญ์„ OR๋กœ ํ•ฉ์น˜๊ธฐ (๋„ค ์ฝ”๋“œ ์˜๋„ ์œ ์ง€) + """ + a = np.array(img1.convert("RGB"), dtype=np.uint8) + b = np.array(img2.convert("RGB"), dtype=np.uint8) + + white_a = np.all(a == 255, axis=-1) + white_b = np.all(b == 255, axis=-1) + out = a.copy() + out[white_b] = 255 + out[white_a] = 255 + return Image.fromarray(out) + + +def preprocess_mask(mask_img: Image.Image) -> Image.Image: + """ + ๋งˆ์Šคํฌ ์ „์ฒ˜๋ฆฌ: L๋กœ ๋งŒ๋“ค๊ณ , threshold ๋“ฑ ์ ์šฉ (ํ•„์š” ์ตœ์†Œ) + """ + m = np.array(mask_img.convert("L"), dtype=np.uint8) + # ํฐ์ƒ‰/๊ฒ€์ • ์–‘์ชฝ ์ผ€์ด์Šค ๋Œ€์‘: ๋‹จ์ˆœ threshold + _, m = cv2.threshold(m, 127, 255, cv2.THRESH_BINARY) + return Image.fromarray(m).convert("RGB") + + +def make_depth(depth_path: str) -> Image.Image: + """ + ๋„ค ์ฝ”๋“œ์˜ 'depth'๋Š” ์‹ค์ œ๋กœ sketch ๊ธฐ๋ฐ˜ guide ์ด๋ฏธ์ง€ ์ƒ์„ฑ ๋กœ์ง. + """ + global H, W + if H is None or W is None: + raise RuntimeError("Global H/W not set. Call run_one() first.") + + depth_img = _imread_or_raise(depth_path, 0) + inverted_depth = cv2.bitwise_not(depth_img) + contours, _ = cv2.findContours(inverted_depth, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) + + filled_depth = inverted_depth.copy() + cv2.drawContours(filled_depth, contours, -1, (255), thickness=cv2.FILLED) + + filled_depth = cv2.resize(filled_depth, (W, H), interpolation=cv2.INTER_AREA) + filled_depth_rgb = cv2.cvtColor(filled_depth, cv2.COLOR_GRAY2RGB) + + # width=1024 ๊ธฐ์ค€ padding(๋˜๋Š” crop)๋กœ ๋งž์ถ”๊ธฐ + target_width = 1024 + cur_w = filled_depth_rgb.shape[1] + if cur_w < target_width: + padding = (target_width - cur_w) // 2 + filled_depth_rgb = cv2.copyMakeBorder( + filled_depth_rgb, 0, 0, padding, padding, + borderType=cv2.BORDER_CONSTANT, value=[0, 0, 0] + ) + elif cur_w > target_width: + left = (cur_w - target_width) // 2 + filled_depth_rgb = filled_depth_rgb[:, left:left + target_width] + + return Image.fromarray(filled_depth_rgb) + + +def center_crop_lr_to_768x1024(arr: np.ndarray) -> np.ndarray: + """ + ์ขŒ์šฐ ์ค‘์•™ ํฌ๋กญํ•ด์„œ width=768, height=1024๋กœ ๋งž์ถ”๋Š” ๋กœ์ง(๋„ค ์ฝ”๋“œ ์˜๋„ ์œ ์ง€) + """ + target_h, target_w = 1024, 768 + h, w = arr.shape[:2] + if h != target_h: + # ์•ˆ์ „: height๋Š” 1024๋ฅผ ๊ธฐ๋Œ€ํ•˜์ง€๋งŒ, ํ˜น์‹œ ๋‹ค๋ฅด๋ฉด ๋ฆฌ์‚ฌ์ด์ฆˆ + arr = cv2.resize(arr, (w, target_h), interpolation=cv2.INTER_AREA) + h, w = arr.shape[:2] + if w < target_w: + # ๋„ˆ๋ฌด ์ข์œผ๋ฉด ํŒจ๋”ฉ + pad = (target_w - w) // 2 + arr = cv2.copyMakeBorder(arr, 0, 0, pad, pad, cv2.BORDER_CONSTANT, value=[255, 255, 255]) + w = arr.shape[1] + left = (w - target_w) // 2 + return arr[:, left:left + target_w] + + +def save_cropped(imgs, out_path: str): + np_imgs = [np.asarray(im) for im in imgs] + cropped = [center_crop_lr_to_768x1024(x) for x in np_imgs] + out = np.concatenate(cropped, axis=1) + os.makedirs(os.path.dirname(out_path), exist_ok=True) + imageio.imsave(out_path, out) + + +def run_one(paths: Paths, prompt: str, steps: int = DEFAULT_STEPS): + """ + ๋„ค inference ํ•จ์ˆ˜ ๊ตฌ์กฐ๋ฅผ ๊ทธ๋Œ€๋กœ ์œ ์ง€ํ•˜๋˜, + pipe๋Š” lazy loader์—์„œ ๊ฐ€์ ธ์˜ค๋„๋ก๋งŒ ๋ฐ”๊ฟ”์„œ Space์—์„œ ์•ˆ์ •์ ์œผ๋กœ ๋™์ž‘ํ•˜๊ฒŒ ํ•จ. + """ + global H, W + + pipe, device, _dtype = get_pipe_and_device() + + # ์ „์—ญ H/W ์„ธํŒ… + H, W = compute_hw_from_person(paths.person_path) + + # ===== parsing/segmentation(๋„ค ์ฝ”๋“œ ํ๋ฆ„ ์œ ์ง€) ===== + res = run_simple_extractor( + person_path=paths.person_path, + category="Upper-clothes", + ) + parsing_img = res["images"][0] if res.get("images") else None + if parsing_img is None: + raise RuntimeError("run_simple_extractor returned no parsing images.") + + # sketch(=depth_path) ๋ฐ˜์˜ํ•ด์„œ mask ๋งŒ๋“ค๊ธฐ + sketch_area = fill_sketch_from_image_path_to_pil(paths.depth_path) + sketch_area_inv = invert_sketch_area(sketch_area) + merged_img = merge_white_regions_or(parsing_img, sketch_area_inv) + mask_pil = preprocess_mask(merged_img) + + # control image(=depth_map) ์ƒ์„ฑ + depth_map = make_depth(paths.depth_path) + + # ===== person/garment ์ด๋ฏธ์ง€ ์ „์ฒ˜๋ฆฌ(๋„ค ์ฝ”๋“œ ํ๋ฆ„ ์œ ์ง€: width=1024 ๊ธฐ์ค€ padding/crop) ===== + person_bgr = _imread_or_raise(paths.person_path) + person_bgr = cv2.resize(person_bgr, (W, H), interpolation=cv2.INTER_AREA) + + target_width = 1024 + cur_w = person_bgr.shape[1] + if cur_w < target_width: + padding = (target_width - cur_w) // 2 + padded_person = cv2.copyMakeBorder( + person_bgr, 0, 0, padding, padding, + borderType=cv2.BORDER_CONSTANT, value=[255, 255, 255] + ) + elif cur_w > target_width: + left = (cur_w - target_width) // 2 + padded_person = person_bgr[:, left:left + target_width] + else: + padded_person = person_bgr + + person_pil = Image.fromarray(cv2.cvtColor(padded_person, cv2.COLOR_BGR2RGB)) + + # garment ์ด๋ฏธ์ง€/๋งˆ์Šคํฌ๋Š” parsing_img ๊ธฐ๋ฐ˜(๋„ค ์ฝ”๋“œ ํ๋ฆ„ ๋‹จ์ˆœํ™”: ๋™์ผ ์‚ฌ์ด์ฆˆ๋กœ ๋งž์ถค) + garment_rgb = np.array(person_pil.convert("RGB"), dtype=np.uint8) + garment_pil = Image.fromarray(garment_rgb) + + garment_mask_bgr = np.array(parsing_img.convert("L"), dtype=np.uint8) + garment_mask_bgr = cv2.resize(garment_mask_bgr, (W, H), interpolation=cv2.INTER_AREA) + garment_mask_rgb = cv2.cvtColor(garment_mask_bgr, cv2.COLOR_GRAY2RGB) + + # padding/crop ๋™์ผ ์ ์šฉ + cur_w2 = garment_mask_rgb.shape[1] + if cur_w2 < target_width: + padding2 = (target_width - cur_w2) // 2 + garment_mask_rgb = cv2.copyMakeBorder( + garment_mask_rgb, 0, 0, padding2, padding2, + borderType=cv2.BORDER_CONSTANT, value=[0, 0, 0] + ) + elif cur_w2 > target_width: + left2 = (cur_w2 - target_width) // 2 + garment_mask_rgb = garment_mask_rgb[:, left2:left2 + target_width] + + garment_mask_pil = Image.fromarray(garment_mask_rgb) + + # ===== IPAdapterXL ํ˜ธ์ถœ(๋„ค ์ฝ”๋“œ ๊ตฌ์กฐ ์œ ์ง€) ===== + ip_model = IPAdapterXL( + pipe, + image_encoder_path, + ip_ckpt, + device, + mask_pil, + person_pil, + content_scale=0.3, + style_scale=0.5, + garment_images=garment_pil, + garment_mask=garment_mask_pil, + ) + + style_img = Image.open(paths.style_path).convert("RGB") + + with torch.inference_mode(): + images = ip_model.generate( + pil_image=style_img, + image=person_pil, + control_image=depth_map, + strength=1.0, + num_samples=1, + num_inference_steps=int(steps), + shape_prompt="", + prompt=prompt or "", + num=0, + scale=None, + controlnet_conditioning_scale=0.7, + guidance_scale=7.5, + ) + + save_cropped(images, paths.output_path) + + +# ========================= +# Gradio UI +# ========================= +def set_seed(seed: int): + if seed is None or seed < 0: + return + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + + +def infer_web(person_fp, sketch_fp, style_fp, prompt, steps, seed, debug_save): + global DEBUG_SAVE + DEBUG_SAVE = bool(debug_save) + + if person_fp is None or sketch_fp is None or style_fp is None: + raise gr.Error("person / sketch(guide) / style ์ด๋ฏธ์ง€๋ฅผ ๋ชจ๋‘ ์—…๋กœ๋“œํ•ด์•ผ ํ•ฉ๋‹ˆ๋‹ค.") + + set_seed(int(seed) if seed is not None else -1) + + tmp_dir = tempfile.mkdtemp(prefix="feat_demo_") + out_path = os.path.join(tmp_dir, "result.png") + + paths = Paths( + person_path=person_fp, + depth_path=sketch_fp, + style_path=style_fp, + output_path=out_path, + ) + + # lazy load๋Š” ์—ฌ๊ธฐ์„œ ํŠธ๋ฆฌ๊ฑฐ๋จ + run_one(paths, prompt=prompt, steps=int(steps)) + + out_img = Image.open(out_path).convert("RGB") + return out_img, out_path + + +with gr.Blocks(title="FEAT Demo (HF Spaces)") as demo: + gr.Markdown("## FEAT Demo\nperson / sketch(guide) / style ์ž…๋ ฅ์œผ๋กœ ๊ฒฐ๊ณผ๋ฅผ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค.") + + with gr.Row(): + person_in = gr.Image(label="Person Image", type="filepath") + sketch_in = gr.Image(label="Sketch / Guide Image (depth_path)", type="filepath") + style_in = gr.Image(label="Style Image", type="filepath") + + with gr.Row(): + prompt_in = gr.Textbox(label="Prompt", value="upper garment", lines=2) + steps_in = gr.Slider(1, 80, value=DEFAULT_STEPS, step=1, label="Steps") + + with gr.Row(): + seed_in = gr.Number(label="Seed (-1 = random)", value=-1, precision=0) + debug_in = gr.Checkbox(label="Debug Save (๋А๋ฆผ)", value=False) + + run_btn = gr.Button("Run") + out_img = gr.Image(label="Output (stitched/cropped)", type="pil") + out_file = gr.File(label="Download result.png") + + run_btn.click( + fn=infer_web, + inputs=[person_in, sketch_in, style_in, prompt_in, steps_in, seed_in, debug_in], + outputs=[out_img, out_file], + ) + +# Spaces ๊ถŒ์žฅ: queue()๋กœ ์•ˆ์ •์„ฑ/๋™์‹œ์„ฑ ํ™•๋ณด +demo.queue() diff --git a/.ipynb_checkpoints/requirements-checkpoint.txt b/.ipynb_checkpoints/requirements-checkpoint.txt new file mode 100644 index 0000000000000000000000000000000000000000..5db52c46c8048db2a9e6561e44104dd98f521995 --- /dev/null +++ b/.ipynb_checkpoints/requirements-checkpoint.txt @@ -0,0 +1,20 @@ +gradio>=4.0,<5.0 + +# core +numpy +Pillow +imageio + +# your pinned libs (safe) +diffusers==0.32.2 +transformers==4.46.3 + +# opencv (spaces friendly) +opencv-python-headless==4.10.0.84 + +# torch: spaces ํ™˜๊ฒฝ์—์„œ +cu121 ํ•€ํ•˜๋ฉด ๊นจ์ง€๋Š” ๊ฒฝ์šฐ๊ฐ€ ๋งŽ์•„์„œ ๋ฒ”์œ„ ๊ถŒ์žฅ +torch>=2.3,<2.4 + +# ํ”ํžˆ ํ•„์š” (diffusers/transformers ์ชฝ) +accelerate +safetensors \ No newline at end of file diff --git a/diffusers3/Untitled.ipynb b/diffusers3/Untitled.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..71f1f883ccd8f287c4f406b12c042475be22a07a --- /dev/null +++ b/diffusers3/Untitled.ipynb @@ -0,0 +1,58 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 3, + "id": "19479786-a4e0-4ec4-a1a6-2d9b1259c6d1", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "ename": "ImportError", + "evalue": "attempted relative import beyond top-level package", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mImportError\u001b[0m Traceback (most recent call last)", + "Cell \u001b[0;32mIn[3], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mpipelines\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mcontrolnet\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mpipeline_controlnet_sd_xl\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m StableDiffusionXLControlNetPipeline\n", + "File \u001b[0;32m~/data/diffusers/src/diffusers/pipelines/__init__.py:3\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mtyping\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m TYPE_CHECKING\n\u001b[0;32m----> 3\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mutils\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m (\n\u001b[1;32m 4\u001b[0m DIFFUSERS_SLOW_IMPORT,\n\u001b[1;32m 5\u001b[0m OptionalDependencyNotAvailable,\n\u001b[1;32m 6\u001b[0m _LazyModule,\n\u001b[1;32m 7\u001b[0m get_objects_from_module,\n\u001b[1;32m 8\u001b[0m is_flax_available,\n\u001b[1;32m 9\u001b[0m is_k_diffusion_available,\n\u001b[1;32m 10\u001b[0m is_librosa_available,\n\u001b[1;32m 11\u001b[0m is_note_seq_available,\n\u001b[1;32m 12\u001b[0m is_onnx_available,\n\u001b[1;32m 13\u001b[0m is_sentencepiece_available,\n\u001b[1;32m 14\u001b[0m is_torch_available,\n\u001b[1;32m 15\u001b[0m is_torch_npu_available,\n\u001b[1;32m 16\u001b[0m is_transformers_available,\n\u001b[1;32m 17\u001b[0m )\n\u001b[1;32m 20\u001b[0m \u001b[38;5;66;03m# These modules contain pipelines from multiple libraries/frameworks\u001b[39;00m\n\u001b[1;32m 21\u001b[0m _dummy_objects \u001b[38;5;241m=\u001b[39m {}\n", + "\u001b[0;31mImportError\u001b[0m: attempted relative import beyond top-level package" + ] + } + ], + "source": [ + "from pipelines.controlnet.pipeline_controlnet_sd_xl import StableDiffusionXLControlNetPipeline" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0c6ff172-beff-470f-aabd-12440d1333b0", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.12" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/diffusers3/__init__.py b/diffusers3/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5b505b6a1f3adc69dc0aba30a0cb8e187896bcea --- /dev/null +++ b/diffusers3/__init__.py @@ -0,0 +1,934 @@ +__version__ = "0.31.0.dev0" + +from typing import TYPE_CHECKING + +from .utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + is_flax_available, + is_k_diffusion_available, + is_librosa_available, + is_note_seq_available, + is_onnx_available, + is_scipy_available, + is_sentencepiece_available, + is_torch_available, + is_torchsde_available, + is_transformers_available, +) + + +# Lazy Import based on +# https://github.com/huggingface/transformers/blob/main/src/transformers/__init__.py + +# When adding a new object to this init, please add it to `_import_structure`. The `_import_structure` is a dictionary submodule to list of object names, +# and is used to defer the actual importing for when the objects are requested. +# This way `import diffusers` provides the names in the namespace without actually importing anything (and especially none of the backends). + +_import_structure = { + "configuration_utils": ["ConfigMixin"], + "loaders": ["FromOriginalModelMixin"], + "models": [], + "pipelines": [], + "schedulers": [], + "utils": [ + "OptionalDependencyNotAvailable", + "is_flax_available", + "is_inflect_available", + "is_invisible_watermark_available", + "is_k_diffusion_available", + "is_k_diffusion_version", + "is_librosa_available", + "is_note_seq_available", + "is_onnx_available", + "is_scipy_available", + "is_torch_available", + "is_torchsde_available", + "is_transformers_available", + "is_transformers_version", + "is_unidecode_available", + "logging", + ], +} + +try: + if not is_onnx_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from .utils import dummy_onnx_objects # noqa F403 + + _import_structure["utils.dummy_onnx_objects"] = [ + name for name in dir(dummy_onnx_objects) if not name.startswith("_") + ] + +else: + _import_structure["pipelines"].extend(["OnnxRuntimeModel"]) + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from .utils import dummy_pt_objects # noqa F403 + + _import_structure["utils.dummy_pt_objects"] = [name for name in dir(dummy_pt_objects) if not name.startswith("_")] + +else: + _import_structure["models"].extend( + [ + "AsymmetricAutoencoderKL", + "AuraFlowTransformer2DModel", + "AutoencoderKL", + "AutoencoderKLCogVideoX", + "AutoencoderKLTemporalDecoder", + "AutoencoderOobleck", + "AutoencoderTiny", + "CogVideoXTransformer3DModel", + "ConsistencyDecoderVAE", + "ControlNetModel", + "ControlNetXSAdapter", + "DiTTransformer2DModel", + "FluxControlNetModel", + "FluxMultiControlNetModel", + "FluxTransformer2DModel", + "HunyuanDiT2DControlNetModel", + "HunyuanDiT2DModel", + "HunyuanDiT2DMultiControlNetModel", + "I2VGenXLUNet", + "Kandinsky3UNet", + "LatteTransformer3DModel", + "LuminaNextDiT2DModel", + "ModelMixin", + "MotionAdapter", + "MultiAdapter", + "PixArtTransformer2DModel", + "PriorTransformer", + "SD3ControlNetModel", + "SD3MultiControlNetModel", + "SD3Transformer2DModel", + "SparseControlNetModel", + "StableAudioDiTModel", + "StableCascadeUNet", + "T2IAdapter", + "T5FilmDecoder", + "Transformer2DModel", + "UNet1DModel", + "UNet2DConditionModel", + "UNet2DModel", + "UNet3DConditionModel", + "UNetControlNetXSModel", + "UNetMotionModel", + "UNetSpatioTemporalConditionModel", + "UVit2DModel", + "VQModel", + ] + ) + + _import_structure["optimization"] = [ + "get_constant_schedule", + "get_constant_schedule_with_warmup", + "get_cosine_schedule_with_warmup", + "get_cosine_with_hard_restarts_schedule_with_warmup", + "get_linear_schedule_with_warmup", + "get_polynomial_decay_schedule_with_warmup", + "get_scheduler", + ] + _import_structure["pipelines"].extend( + [ + "AudioPipelineOutput", + "AutoPipelineForImage2Image", + "AutoPipelineForInpainting", + "AutoPipelineForText2Image", + "ConsistencyModelPipeline", + "DanceDiffusionPipeline", + "DDIMPipeline", + "DDPMPipeline", + "DiffusionPipeline", + "DiTPipeline", + "ImagePipelineOutput", + "KarrasVePipeline", + "LDMPipeline", + "LDMSuperResolutionPipeline", + "PNDMPipeline", + "RePaintPipeline", + "ScoreSdeVePipeline", + "StableDiffusionMixin", + ] + ) + _import_structure["schedulers"].extend( + [ + "AmusedScheduler", + "CMStochasticIterativeScheduler", + "CogVideoXDDIMScheduler", + "CogVideoXDPMScheduler", + "DDIMInverseScheduler", + "DDIMParallelScheduler", + "DDIMScheduler", + "DDPMParallelScheduler", + "DDPMScheduler", + "DDPMWuerstchenScheduler", + "DEISMultistepScheduler", + "DPMSolverMultistepInverseScheduler", + "DPMSolverMultistepScheduler", + "DPMSolverSinglestepScheduler", + "EDMDPMSolverMultistepScheduler", + "EDMEulerScheduler", + "EulerAncestralDiscreteScheduler", + "EulerDiscreteScheduler", + "FlowMatchEulerDiscreteScheduler", + "FlowMatchHeunDiscreteScheduler", + "HeunDiscreteScheduler", + "IPNDMScheduler", + "KarrasVeScheduler", + "KDPM2AncestralDiscreteScheduler", + "KDPM2DiscreteScheduler", + "LCMScheduler", + "PNDMScheduler", + "RePaintScheduler", + "SASolverScheduler", + "SchedulerMixin", + "ScoreSdeVeScheduler", + "TCDScheduler", + "UnCLIPScheduler", + "UniPCMultistepScheduler", + "VQDiffusionScheduler", + ] + ) + _import_structure["training_utils"] = ["EMAModel"] + +try: + if not (is_torch_available() and is_scipy_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from .utils import dummy_torch_and_scipy_objects # noqa F403 + + _import_structure["utils.dummy_torch_and_scipy_objects"] = [ + name for name in dir(dummy_torch_and_scipy_objects) if not name.startswith("_") + ] + +else: + _import_structure["schedulers"].extend(["LMSDiscreteScheduler"]) + +try: + if not (is_torch_available() and is_torchsde_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from .utils import dummy_torch_and_torchsde_objects # noqa F403 + + _import_structure["utils.dummy_torch_and_torchsde_objects"] = [ + name for name in dir(dummy_torch_and_torchsde_objects) if not name.startswith("_") + ] + +else: + _import_structure["schedulers"].extend(["CosineDPMSolverMultistepScheduler", "DPMSolverSDEScheduler"]) + +try: + if not (is_torch_available() and is_transformers_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from .utils import dummy_torch_and_transformers_objects # noqa F403 + + _import_structure["utils.dummy_torch_and_transformers_objects"] = [ + name for name in dir(dummy_torch_and_transformers_objects) if not name.startswith("_") + ] + +else: + _import_structure["pipelines"].extend( + [ + "AltDiffusionImg2ImgPipeline", + "AltDiffusionPipeline", + "AmusedImg2ImgPipeline", + "AmusedInpaintPipeline", + "AmusedPipeline", + "AnimateDiffControlNetPipeline", + "AnimateDiffPAGPipeline", + "AnimateDiffPipeline", + "AnimateDiffSDXLPipeline", + "AnimateDiffSparseControlNetPipeline", + "AnimateDiffVideoToVideoControlNetPipeline", + "AnimateDiffVideoToVideoPipeline", + "AudioLDM2Pipeline", + "AudioLDM2ProjectionModel", + "AudioLDM2UNet2DConditionModel", + "AudioLDMPipeline", + "AuraFlowPipeline", + "BlipDiffusionControlNetPipeline", + "BlipDiffusionPipeline", + "CLIPImageProjection", + "CogVideoXPipeline", + "CogVideoXVideoToVideoPipeline", + "CycleDiffusionPipeline", + "FluxControlNetPipeline", + "FluxImg2ImgPipeline", + "FluxInpaintPipeline", + "FluxPipeline", + "HunyuanDiTControlNetPipeline", + "HunyuanDiTPAGPipeline", + "HunyuanDiTPipeline", + "I2VGenXLPipeline", + "IFImg2ImgPipeline", + "IFImg2ImgSuperResolutionPipeline", + "IFInpaintingPipeline", + "IFInpaintingSuperResolutionPipeline", + "IFPipeline", + "IFSuperResolutionPipeline", + "ImageTextPipelineOutput", + "Kandinsky3Img2ImgPipeline", + "Kandinsky3Pipeline", + "KandinskyCombinedPipeline", + "KandinskyImg2ImgCombinedPipeline", + "KandinskyImg2ImgPipeline", + "KandinskyInpaintCombinedPipeline", + "KandinskyInpaintPipeline", + "KandinskyPipeline", + "KandinskyPriorPipeline", + "KandinskyV22CombinedPipeline", + "KandinskyV22ControlnetImg2ImgPipeline", + "KandinskyV22ControlnetPipeline", + "KandinskyV22Img2ImgCombinedPipeline", + "KandinskyV22Img2ImgPipeline", + "KandinskyV22InpaintCombinedPipeline", + "KandinskyV22InpaintPipeline", + "KandinskyV22Pipeline", + "KandinskyV22PriorEmb2EmbPipeline", + "KandinskyV22PriorPipeline", + "LatentConsistencyModelImg2ImgPipeline", + "LatentConsistencyModelPipeline", + "LattePipeline", + "LDMTextToImagePipeline", + "LEditsPPPipelineStableDiffusion", + "LEditsPPPipelineStableDiffusionXL", + "LuminaText2ImgPipeline", + "MarigoldDepthPipeline", + "MarigoldNormalsPipeline", + "MusicLDMPipeline", + "PaintByExamplePipeline", + "PIAPipeline", + "PixArtAlphaPipeline", + "PixArtSigmaPAGPipeline", + "PixArtSigmaPipeline", + "SemanticStableDiffusionPipeline", + "ShapEImg2ImgPipeline", + "ShapEPipeline", + "StableAudioPipeline", + "StableAudioProjectionModel", + "StableCascadeCombinedPipeline", + "StableCascadeDecoderPipeline", + "StableCascadePriorPipeline", + "StableDiffusion3ControlNetInpaintingPipeline", + "StableDiffusion3ControlNetPipeline", + "StableDiffusion3Img2ImgPipeline", + "StableDiffusion3InpaintPipeline", + "StableDiffusion3PAGPipeline", + "StableDiffusion3Pipeline", + "StableDiffusionAdapterPipeline", + "StableDiffusionAttendAndExcitePipeline", + "StableDiffusionControlNetImg2ImgPipeline", + "StableDiffusionControlNetInpaintPipeline", + "StableDiffusionControlNetPAGPipeline", + "StableDiffusionControlNetPipeline", + "StableDiffusionControlNetXSPipeline", + "StableDiffusionDepth2ImgPipeline", + "StableDiffusionDiffEditPipeline", + "StableDiffusionGLIGENPipeline", + "StableDiffusionGLIGENTextImagePipeline", + "StableDiffusionImageVariationPipeline", + "StableDiffusionImg2ImgPipeline", + "StableDiffusionInpaintPipeline", + "StableDiffusionInpaintPipelineLegacy", + "StableDiffusionInstructPix2PixPipeline", + "StableDiffusionLatentUpscalePipeline", + "StableDiffusionLDM3DPipeline", + "StableDiffusionModelEditingPipeline", + "StableDiffusionPAGPipeline", + "StableDiffusionPanoramaPipeline", + "StableDiffusionParadigmsPipeline", + "StableDiffusionPipeline", + "StableDiffusionPipelineSafe", + "StableDiffusionPix2PixZeroPipeline", + "StableDiffusionSAGPipeline", + "StableDiffusionUpscalePipeline", + "StableDiffusionXLAdapterPipeline", + "StableDiffusionXLControlNetImg2ImgPipeline", + "StableDiffusionXLControlNetInpaintPipeline", + "StableDiffusionXLControlNetPAGImg2ImgPipeline", + "StableDiffusionXLControlNetPAGPipeline", + "StableDiffusionXLControlNetPipeline", + "StableDiffusionXLControlNetXSPipeline", + "StableDiffusionXLImg2ImgPipeline", + "StableDiffusionXLInpaintPipeline", + "StableDiffusionXLInstructPix2PixPipeline", + "StableDiffusionXLPAGImg2ImgPipeline", + "StableDiffusionXLPAGInpaintPipeline", + "StableDiffusionXLPAGPipeline", + "StableDiffusionXLPipeline", + "StableUnCLIPImg2ImgPipeline", + "StableUnCLIPPipeline", + "StableVideoDiffusionPipeline", + "TextToVideoSDPipeline", + "TextToVideoZeroPipeline", + "TextToVideoZeroSDXLPipeline", + "UnCLIPImageVariationPipeline", + "UnCLIPPipeline", + "UniDiffuserModel", + "UniDiffuserPipeline", + "UniDiffuserTextDecoder", + "VersatileDiffusionDualGuidedPipeline", + "VersatileDiffusionImageVariationPipeline", + "VersatileDiffusionPipeline", + "VersatileDiffusionTextToImagePipeline", + "VideoToVideoSDPipeline", + "VQDiffusionPipeline", + "WuerstchenCombinedPipeline", + "WuerstchenDecoderPipeline", + "WuerstchenPriorPipeline", + ] + ) + +try: + if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from .utils import dummy_torch_and_transformers_and_k_diffusion_objects # noqa F403 + + _import_structure["utils.dummy_torch_and_transformers_and_k_diffusion_objects"] = [ + name for name in dir(dummy_torch_and_transformers_and_k_diffusion_objects) if not name.startswith("_") + ] + +else: + _import_structure["pipelines"].extend(["StableDiffusionKDiffusionPipeline", "StableDiffusionXLKDiffusionPipeline"]) + +try: + if not (is_torch_available() and is_transformers_available() and is_sentencepiece_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from .utils import dummy_torch_and_transformers_and_sentencepiece_objects # noqa F403 + + _import_structure["utils.dummy_torch_and_transformers_and_sentencepiece_objects"] = [ + name for name in dir(dummy_torch_and_transformers_and_sentencepiece_objects) if not name.startswith("_") + ] + +else: + _import_structure["pipelines"].extend(["KolorsImg2ImgPipeline", "KolorsPAGPipeline", "KolorsPipeline"]) + +try: + if not (is_torch_available() and is_transformers_available() and is_onnx_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from .utils import dummy_torch_and_transformers_and_onnx_objects # noqa F403 + + _import_structure["utils.dummy_torch_and_transformers_and_onnx_objects"] = [ + name for name in dir(dummy_torch_and_transformers_and_onnx_objects) if not name.startswith("_") + ] + +else: + _import_structure["pipelines"].extend( + [ + "OnnxStableDiffusionImg2ImgPipeline", + "OnnxStableDiffusionInpaintPipeline", + "OnnxStableDiffusionInpaintPipelineLegacy", + "OnnxStableDiffusionPipeline", + "OnnxStableDiffusionUpscalePipeline", + "StableDiffusionOnnxPipeline", + ] + ) + +try: + if not (is_torch_available() and is_librosa_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from .utils import dummy_torch_and_librosa_objects # noqa F403 + + _import_structure["utils.dummy_torch_and_librosa_objects"] = [ + name for name in dir(dummy_torch_and_librosa_objects) if not name.startswith("_") + ] + +else: + _import_structure["pipelines"].extend(["AudioDiffusionPipeline", "Mel"]) + +try: + if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from .utils import dummy_transformers_and_torch_and_note_seq_objects # noqa F403 + + _import_structure["utils.dummy_transformers_and_torch_and_note_seq_objects"] = [ + name for name in dir(dummy_transformers_and_torch_and_note_seq_objects) if not name.startswith("_") + ] + + +else: + _import_structure["pipelines"].extend(["SpectrogramDiffusionPipeline"]) + +try: + if not is_flax_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from .utils import dummy_flax_objects # noqa F403 + + _import_structure["utils.dummy_flax_objects"] = [ + name for name in dir(dummy_flax_objects) if not name.startswith("_") + ] + + +else: + _import_structure["models.controlnet_flax"] = ["FlaxControlNetModel"] + _import_structure["models.modeling_flax_utils"] = ["FlaxModelMixin"] + _import_structure["models.unets.unet_2d_condition_flax"] = ["FlaxUNet2DConditionModel"] + _import_structure["models.vae_flax"] = ["FlaxAutoencoderKL"] + _import_structure["pipelines"].extend(["FlaxDiffusionPipeline"]) + _import_structure["schedulers"].extend( + [ + "FlaxDDIMScheduler", + "FlaxDDPMScheduler", + "FlaxDPMSolverMultistepScheduler", + "FlaxEulerDiscreteScheduler", + "FlaxKarrasVeScheduler", + "FlaxLMSDiscreteScheduler", + "FlaxPNDMScheduler", + "FlaxSchedulerMixin", + "FlaxScoreSdeVeScheduler", + ] + ) + + +try: + if not (is_flax_available() and is_transformers_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from .utils import dummy_flax_and_transformers_objects # noqa F403 + + _import_structure["utils.dummy_flax_and_transformers_objects"] = [ + name for name in dir(dummy_flax_and_transformers_objects) if not name.startswith("_") + ] + + +else: + _import_structure["pipelines"].extend( + [ + "FlaxStableDiffusionControlNetPipeline", + "FlaxStableDiffusionImg2ImgPipeline", + "FlaxStableDiffusionInpaintPipeline", + "FlaxStableDiffusionPipeline", + "FlaxStableDiffusionXLPipeline", + ] + ) + +try: + if not (is_note_seq_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from .utils import dummy_note_seq_objects # noqa F403 + + _import_structure["utils.dummy_note_seq_objects"] = [ + name for name in dir(dummy_note_seq_objects) if not name.startswith("_") + ] + + +else: + _import_structure["pipelines"].extend(["MidiProcessor"]) + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + from .configuration_utils import ConfigMixin + + try: + if not is_onnx_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from .utils.dummy_onnx_objects import * # noqa F403 + else: + from .pipelines import OnnxRuntimeModel + + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from .utils.dummy_pt_objects import * # noqa F403 + else: + from .models import ( + AsymmetricAutoencoderKL, + AuraFlowTransformer2DModel, + AutoencoderKL, + AutoencoderKLCogVideoX, + AutoencoderKLTemporalDecoder, + AutoencoderOobleck, + AutoencoderTiny, + CogVideoXTransformer3DModel, + ConsistencyDecoderVAE, + ControlNetModel, + ControlNetXSAdapter, + DiTTransformer2DModel, + FluxControlNetModel, + FluxMultiControlNetModel, + FluxTransformer2DModel, + HunyuanDiT2DControlNetModel, + HunyuanDiT2DModel, + HunyuanDiT2DMultiControlNetModel, + I2VGenXLUNet, + Kandinsky3UNet, + LatteTransformer3DModel, + LuminaNextDiT2DModel, + ModelMixin, + MotionAdapter, + MultiAdapter, + PixArtTransformer2DModel, + PriorTransformer, + SD3ControlNetModel, + SD3MultiControlNetModel, + SD3Transformer2DModel, + SparseControlNetModel, + StableAudioDiTModel, + T2IAdapter, + T5FilmDecoder, + Transformer2DModel, + UNet1DModel, + UNet2DConditionModel, + UNet2DModel, + UNet3DConditionModel, + UNetControlNetXSModel, + UNetMotionModel, + UNetSpatioTemporalConditionModel, + UVit2DModel, + VQModel, + ) + from .optimization import ( + get_constant_schedule, + get_constant_schedule_with_warmup, + get_cosine_schedule_with_warmup, + get_cosine_with_hard_restarts_schedule_with_warmup, + get_linear_schedule_with_warmup, + get_polynomial_decay_schedule_with_warmup, + get_scheduler, + ) + from .pipelines import ( + AudioPipelineOutput, + AutoPipelineForImage2Image, + AutoPipelineForInpainting, + AutoPipelineForText2Image, + BlipDiffusionControlNetPipeline, + BlipDiffusionPipeline, + CLIPImageProjection, + ConsistencyModelPipeline, + DanceDiffusionPipeline, + DDIMPipeline, + DDPMPipeline, + DiffusionPipeline, + DiTPipeline, + ImagePipelineOutput, + KarrasVePipeline, + LDMPipeline, + LDMSuperResolutionPipeline, + PNDMPipeline, + RePaintPipeline, + ScoreSdeVePipeline, + StableDiffusionMixin, + ) + from .schedulers import ( + AmusedScheduler, + CMStochasticIterativeScheduler, + CogVideoXDDIMScheduler, + CogVideoXDPMScheduler, + DDIMInverseScheduler, + DDIMParallelScheduler, + DDIMScheduler, + DDPMParallelScheduler, + DDPMScheduler, + DDPMWuerstchenScheduler, + DEISMultistepScheduler, + DPMSolverMultistepInverseScheduler, + DPMSolverMultistepScheduler, + DPMSolverSinglestepScheduler, + EDMDPMSolverMultistepScheduler, + EDMEulerScheduler, + EulerAncestralDiscreteScheduler, + EulerDiscreteScheduler, + FlowMatchEulerDiscreteScheduler, + FlowMatchHeunDiscreteScheduler, + HeunDiscreteScheduler, + IPNDMScheduler, + KarrasVeScheduler, + KDPM2AncestralDiscreteScheduler, + KDPM2DiscreteScheduler, + LCMScheduler, + PNDMScheduler, + RePaintScheduler, + SASolverScheduler, + SchedulerMixin, + ScoreSdeVeScheduler, + TCDScheduler, + UnCLIPScheduler, + UniPCMultistepScheduler, + VQDiffusionScheduler, + ) + from .training_utils import EMAModel + + try: + if not (is_torch_available() and is_scipy_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from .utils.dummy_torch_and_scipy_objects import * # noqa F403 + else: + from .schedulers import LMSDiscreteScheduler + + try: + if not (is_torch_available() and is_torchsde_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from .utils.dummy_torch_and_torchsde_objects import * # noqa F403 + else: + from .schedulers import CosineDPMSolverMultistepScheduler, DPMSolverSDEScheduler + + try: + if not (is_torch_available() and is_transformers_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from .utils.dummy_torch_and_transformers_objects import * # noqa F403 + else: + from .pipelines import ( + AltDiffusionImg2ImgPipeline, + AltDiffusionPipeline, + AmusedImg2ImgPipeline, + AmusedInpaintPipeline, + AmusedPipeline, + AnimateDiffControlNetPipeline, + AnimateDiffPAGPipeline, + AnimateDiffPipeline, + AnimateDiffSDXLPipeline, + AnimateDiffSparseControlNetPipeline, + AnimateDiffVideoToVideoControlNetPipeline, + AnimateDiffVideoToVideoPipeline, + AudioLDM2Pipeline, + AudioLDM2ProjectionModel, + AudioLDM2UNet2DConditionModel, + AudioLDMPipeline, + AuraFlowPipeline, + CLIPImageProjection, + CogVideoXPipeline, + CogVideoXVideoToVideoPipeline, + CycleDiffusionPipeline, + FluxControlNetPipeline, + FluxImg2ImgPipeline, + FluxInpaintPipeline, + FluxPipeline, + HunyuanDiTControlNetPipeline, + HunyuanDiTPAGPipeline, + HunyuanDiTPipeline, + I2VGenXLPipeline, + IFImg2ImgPipeline, + IFImg2ImgSuperResolutionPipeline, + IFInpaintingPipeline, + IFInpaintingSuperResolutionPipeline, + IFPipeline, + IFSuperResolutionPipeline, + ImageTextPipelineOutput, + Kandinsky3Img2ImgPipeline, + Kandinsky3Pipeline, + KandinskyCombinedPipeline, + KandinskyImg2ImgCombinedPipeline, + KandinskyImg2ImgPipeline, + KandinskyInpaintCombinedPipeline, + KandinskyInpaintPipeline, + KandinskyPipeline, + KandinskyPriorPipeline, + KandinskyV22CombinedPipeline, + KandinskyV22ControlnetImg2ImgPipeline, + KandinskyV22ControlnetPipeline, + KandinskyV22Img2ImgCombinedPipeline, + KandinskyV22Img2ImgPipeline, + KandinskyV22InpaintCombinedPipeline, + KandinskyV22InpaintPipeline, + KandinskyV22Pipeline, + KandinskyV22PriorEmb2EmbPipeline, + KandinskyV22PriorPipeline, + LatentConsistencyModelImg2ImgPipeline, + LatentConsistencyModelPipeline, + LattePipeline, + LDMTextToImagePipeline, + LEditsPPPipelineStableDiffusion, + LEditsPPPipelineStableDiffusionXL, + LuminaText2ImgPipeline, + MarigoldDepthPipeline, + MarigoldNormalsPipeline, + MusicLDMPipeline, + PaintByExamplePipeline, + PIAPipeline, + PixArtAlphaPipeline, + PixArtSigmaPAGPipeline, + PixArtSigmaPipeline, + SemanticStableDiffusionPipeline, + ShapEImg2ImgPipeline, + ShapEPipeline, + StableAudioPipeline, + StableAudioProjectionModel, + StableCascadeCombinedPipeline, + StableCascadeDecoderPipeline, + StableCascadePriorPipeline, + StableDiffusion3ControlNetPipeline, + StableDiffusion3Img2ImgPipeline, + StableDiffusion3InpaintPipeline, + StableDiffusion3PAGPipeline, + StableDiffusion3Pipeline, + StableDiffusionAdapterPipeline, + StableDiffusionAttendAndExcitePipeline, + StableDiffusionControlNetImg2ImgPipeline, + StableDiffusionControlNetInpaintPipeline, + StableDiffusionControlNetPAGPipeline, + StableDiffusionControlNetPipeline, + StableDiffusionControlNetXSPipeline, + StableDiffusionDepth2ImgPipeline, + StableDiffusionDiffEditPipeline, + StableDiffusionGLIGENPipeline, + StableDiffusionGLIGENTextImagePipeline, + StableDiffusionImageVariationPipeline, + StableDiffusionImg2ImgPipeline, + StableDiffusionInpaintPipeline, + StableDiffusionInpaintPipelineLegacy, + StableDiffusionInstructPix2PixPipeline, + StableDiffusionLatentUpscalePipeline, + StableDiffusionLDM3DPipeline, + StableDiffusionModelEditingPipeline, + StableDiffusionPAGPipeline, + StableDiffusionPanoramaPipeline, + StableDiffusionParadigmsPipeline, + StableDiffusionPipeline, + StableDiffusionPipelineSafe, + StableDiffusionPix2PixZeroPipeline, + StableDiffusionSAGPipeline, + StableDiffusionUpscalePipeline, + StableDiffusionXLAdapterPipeline, + StableDiffusionXLControlNetImg2ImgPipeline, + StableDiffusionXLControlNetInpaintPipeline, + StableDiffusionXLControlNetPAGImg2ImgPipeline, + StableDiffusionXLControlNetPAGPipeline, + StableDiffusionXLControlNetPipeline, + StableDiffusionXLControlNetXSPipeline, + StableDiffusionXLImg2ImgPipeline, + StableDiffusionXLInpaintPipeline, + StableDiffusionXLInstructPix2PixPipeline, + StableDiffusionXLPAGImg2ImgPipeline, + StableDiffusionXLPAGInpaintPipeline, + StableDiffusionXLPAGPipeline, + StableDiffusionXLPipeline, + StableUnCLIPImg2ImgPipeline, + StableUnCLIPPipeline, + StableVideoDiffusionPipeline, + TextToVideoSDPipeline, + TextToVideoZeroPipeline, + TextToVideoZeroSDXLPipeline, + UnCLIPImageVariationPipeline, + UnCLIPPipeline, + UniDiffuserModel, + UniDiffuserPipeline, + UniDiffuserTextDecoder, + VersatileDiffusionDualGuidedPipeline, + VersatileDiffusionImageVariationPipeline, + VersatileDiffusionPipeline, + VersatileDiffusionTextToImagePipeline, + VideoToVideoSDPipeline, + VQDiffusionPipeline, + WuerstchenCombinedPipeline, + WuerstchenDecoderPipeline, + WuerstchenPriorPipeline, + ) + + try: + if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403 + else: + from .pipelines import StableDiffusionKDiffusionPipeline, StableDiffusionXLKDiffusionPipeline + + try: + if not (is_torch_available() and is_transformers_available() and is_sentencepiece_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from .utils.dummy_torch_and_transformers_and_sentencepiece_objects import * # noqa F403 + else: + from .pipelines import KolorsImg2ImgPipeline, KolorsPAGPipeline, KolorsPipeline + try: + if not (is_torch_available() and is_transformers_available() and is_onnx_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403 + else: + from .pipelines import ( + OnnxStableDiffusionImg2ImgPipeline, + OnnxStableDiffusionInpaintPipeline, + OnnxStableDiffusionInpaintPipelineLegacy, + OnnxStableDiffusionPipeline, + OnnxStableDiffusionUpscalePipeline, + StableDiffusionOnnxPipeline, + ) + + try: + if not (is_torch_available() and is_librosa_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from .utils.dummy_torch_and_librosa_objects import * # noqa F403 + else: + from .pipelines import AudioDiffusionPipeline, Mel + + try: + if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 + else: + from .pipelines import SpectrogramDiffusionPipeline + + try: + if not is_flax_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from .utils.dummy_flax_objects import * # noqa F403 + else: + from .models.controlnet_flax import FlaxControlNetModel + from .models.modeling_flax_utils import FlaxModelMixin + from .models.unets.unet_2d_condition_flax import FlaxUNet2DConditionModel + from .models.vae_flax import FlaxAutoencoderKL + from .pipelines import FlaxDiffusionPipeline + from .schedulers import ( + FlaxDDIMScheduler, + FlaxDDPMScheduler, + FlaxDPMSolverMultistepScheduler, + FlaxEulerDiscreteScheduler, + FlaxKarrasVeScheduler, + FlaxLMSDiscreteScheduler, + FlaxPNDMScheduler, + FlaxSchedulerMixin, + FlaxScoreSdeVeScheduler, + ) + + try: + if not (is_flax_available() and is_transformers_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from .utils.dummy_flax_and_transformers_objects import * # noqa F403 + else: + from .pipelines import ( + FlaxStableDiffusionControlNetPipeline, + FlaxStableDiffusionImg2ImgPipeline, + FlaxStableDiffusionInpaintPipeline, + FlaxStableDiffusionPipeline, + FlaxStableDiffusionXLPipeline, + ) + + try: + if not (is_note_seq_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from .utils.dummy_note_seq_objects import * # noqa F403 + else: + from .pipelines import MidiProcessor + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + extra_objects={"__version__": __version__}, + ) diff --git a/diffusers3/__pycache__/__init__.cpython-310.pyc b/diffusers3/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9323dc88f3d357f6a42991bd555730c4a6d4e686 Binary files /dev/null and b/diffusers3/__pycache__/__init__.cpython-310.pyc differ diff --git a/diffusers3/__pycache__/__init__.cpython-38.pyc b/diffusers3/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..15f6db672b3262c59efb9dc323972067c09b3e1b Binary files /dev/null and b/diffusers3/__pycache__/__init__.cpython-38.pyc differ diff --git a/diffusers3/__pycache__/callbacks.cpython-310.pyc b/diffusers3/__pycache__/callbacks.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..56104212f8e87c7a80aff14d5df9bfad1119fdce Binary files /dev/null and b/diffusers3/__pycache__/callbacks.cpython-310.pyc differ diff --git a/diffusers3/__pycache__/callbacks.cpython-38.pyc b/diffusers3/__pycache__/callbacks.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..02254633aea1551beb0f3adc817cce3423465486 Binary files /dev/null and b/diffusers3/__pycache__/callbacks.cpython-38.pyc differ diff --git a/diffusers3/__pycache__/configuration_utils.cpython-310.pyc b/diffusers3/__pycache__/configuration_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..253125ed829e660b05d4664e34cc8bdb1af20ab5 Binary files /dev/null and b/diffusers3/__pycache__/configuration_utils.cpython-310.pyc differ diff --git a/diffusers3/__pycache__/configuration_utils.cpython-38.pyc b/diffusers3/__pycache__/configuration_utils.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d793123a54256718ae1a6bc2b84736d0ea3e6eb3 Binary files /dev/null and b/diffusers3/__pycache__/configuration_utils.cpython-38.pyc differ diff --git a/diffusers3/__pycache__/dependency_versions_check.cpython-310.pyc b/diffusers3/__pycache__/dependency_versions_check.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..14a12650dc0fb0f0d5dba816a1ca07c93d71bae2 Binary files /dev/null and b/diffusers3/__pycache__/dependency_versions_check.cpython-310.pyc differ diff --git a/diffusers3/__pycache__/dependency_versions_check.cpython-38.pyc b/diffusers3/__pycache__/dependency_versions_check.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f6ed11882429fbe29d985930887a730b1f4ec228 Binary files /dev/null and b/diffusers3/__pycache__/dependency_versions_check.cpython-38.pyc differ diff --git a/diffusers3/__pycache__/dependency_versions_table.cpython-310.pyc b/diffusers3/__pycache__/dependency_versions_table.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e56ad786354c443b9adfb0b608cad82de2cb05f6 Binary files /dev/null and b/diffusers3/__pycache__/dependency_versions_table.cpython-310.pyc differ diff --git a/diffusers3/__pycache__/dependency_versions_table.cpython-38.pyc b/diffusers3/__pycache__/dependency_versions_table.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..41ecf627ccdfc0d6ce0e9907bb71dbd74b40db52 Binary files /dev/null and b/diffusers3/__pycache__/dependency_versions_table.cpython-38.pyc differ diff --git a/diffusers3/__pycache__/image_processor.cpython-310.pyc b/diffusers3/__pycache__/image_processor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..050f1803b137987495752e866d818b79fb6a51b3 Binary files /dev/null and b/diffusers3/__pycache__/image_processor.cpython-310.pyc differ diff --git a/diffusers3/__pycache__/image_processor.cpython-38.pyc b/diffusers3/__pycache__/image_processor.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..72ee555d373a8693ff0d149a981092629ae7abc5 Binary files /dev/null and b/diffusers3/__pycache__/image_processor.cpython-38.pyc differ diff --git a/diffusers3/callbacks.py b/diffusers3/callbacks.py new file mode 100644 index 0000000000000000000000000000000000000000..38542407e31fa1255eb26b563632c7a9f3d2fded --- /dev/null +++ b/diffusers3/callbacks.py @@ -0,0 +1,156 @@ +from typing import Any, Dict, List + +from .configuration_utils import ConfigMixin, register_to_config +from .utils import CONFIG_NAME + + +class PipelineCallback(ConfigMixin): + """ + Base class for all the official callbacks used in a pipeline. This class provides a structure for implementing + custom callbacks and ensures that all callbacks have a consistent interface. + + Please implement the following: + `tensor_inputs`: This should return a list of tensor inputs specific to your callback. You will only be able to + include + variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. + `callback_fn`: This method defines the core functionality of your callback. + """ + + config_name = CONFIG_NAME + + @register_to_config + def __init__(self, cutoff_step_ratio=1.0, cutoff_step_index=None): + super().__init__() + + if (cutoff_step_ratio is None and cutoff_step_index is None) or ( + cutoff_step_ratio is not None and cutoff_step_index is not None + ): + raise ValueError("Either cutoff_step_ratio or cutoff_step_index should be provided, not both or none.") + + if cutoff_step_ratio is not None and ( + not isinstance(cutoff_step_ratio, float) or not (0.0 <= cutoff_step_ratio <= 1.0) + ): + raise ValueError("cutoff_step_ratio must be a float between 0.0 and 1.0.") + + @property + def tensor_inputs(self) -> List[str]: + raise NotImplementedError(f"You need to set the attribute `tensor_inputs` for {self.__class__}") + + def callback_fn(self, pipeline, step_index, timesteps, callback_kwargs) -> Dict[str, Any]: + raise NotImplementedError(f"You need to implement the method `callback_fn` for {self.__class__}") + + def __call__(self, pipeline, step_index, timestep, callback_kwargs) -> Dict[str, Any]: + return self.callback_fn(pipeline, step_index, timestep, callback_kwargs) + + +class MultiPipelineCallbacks: + """ + This class is designed to handle multiple pipeline callbacks. It accepts a list of PipelineCallback objects and + provides a unified interface for calling all of them. + """ + + def __init__(self, callbacks: List[PipelineCallback]): + self.callbacks = callbacks + + @property + def tensor_inputs(self) -> List[str]: + return [input for callback in self.callbacks for input in callback.tensor_inputs] + + def __call__(self, pipeline, step_index, timestep, callback_kwargs) -> Dict[str, Any]: + """ + Calls all the callbacks in order with the given arguments and returns the final callback_kwargs. + """ + for callback in self.callbacks: + callback_kwargs = callback(pipeline, step_index, timestep, callback_kwargs) + + return callback_kwargs + + +class SDCFGCutoffCallback(PipelineCallback): + """ + Callback function for Stable Diffusion Pipelines. After certain number of steps (set by `cutoff_step_ratio` or + `cutoff_step_index`), this callback will disable the CFG. + + Note: This callback mutates the pipeline by changing the `_guidance_scale` attribute to 0.0 after the cutoff step. + """ + + tensor_inputs = ["prompt_embeds"] + + def callback_fn(self, pipeline, step_index, timestep, callback_kwargs) -> Dict[str, Any]: + cutoff_step_ratio = self.config.cutoff_step_ratio + cutoff_step_index = self.config.cutoff_step_index + + # Use cutoff_step_index if it's not None, otherwise use cutoff_step_ratio + cutoff_step = ( + cutoff_step_index if cutoff_step_index is not None else int(pipeline.num_timesteps * cutoff_step_ratio) + ) + + if step_index == cutoff_step: + prompt_embeds = callback_kwargs[self.tensor_inputs[0]] + prompt_embeds = prompt_embeds[-1:] # "-1" denotes the embeddings for conditional text tokens. + + pipeline._guidance_scale = 0.0 + + callback_kwargs[self.tensor_inputs[0]] = prompt_embeds + return callback_kwargs + + +class SDXLCFGCutoffCallback(PipelineCallback): + """ + Callback function for Stable Diffusion XL Pipelines. After certain number of steps (set by `cutoff_step_ratio` or + `cutoff_step_index`), this callback will disable the CFG. + + Note: This callback mutates the pipeline by changing the `_guidance_scale` attribute to 0.0 after the cutoff step. + """ + + tensor_inputs = ["prompt_embeds", "add_text_embeds", "add_time_ids"] + + def callback_fn(self, pipeline, step_index, timestep, callback_kwargs) -> Dict[str, Any]: + cutoff_step_ratio = self.config.cutoff_step_ratio + cutoff_step_index = self.config.cutoff_step_index + + # Use cutoff_step_index if it's not None, otherwise use cutoff_step_ratio + cutoff_step = ( + cutoff_step_index if cutoff_step_index is not None else int(pipeline.num_timesteps * cutoff_step_ratio) + ) + + if step_index == cutoff_step: + prompt_embeds = callback_kwargs[self.tensor_inputs[0]] + prompt_embeds = prompt_embeds[-1:] # "-1" denotes the embeddings for conditional text tokens. + + add_text_embeds = callback_kwargs[self.tensor_inputs[1]] + add_text_embeds = add_text_embeds[-1:] # "-1" denotes the embeddings for conditional pooled text tokens + + add_time_ids = callback_kwargs[self.tensor_inputs[2]] + add_time_ids = add_time_ids[-1:] # "-1" denotes the embeddings for conditional added time vector + + pipeline._guidance_scale = 0.0 + + callback_kwargs[self.tensor_inputs[0]] = prompt_embeds + callback_kwargs[self.tensor_inputs[1]] = add_text_embeds + callback_kwargs[self.tensor_inputs[2]] = add_time_ids + return callback_kwargs + + +class IPAdapterScaleCutoffCallback(PipelineCallback): + """ + Callback function for any pipeline that inherits `IPAdapterMixin`. After certain number of steps (set by + `cutoff_step_ratio` or `cutoff_step_index`), this callback will set the IP Adapter scale to `0.0`. + + Note: This callback mutates the IP Adapter attention processors by setting the scale to 0.0 after the cutoff step. + """ + + tensor_inputs = [] + + def callback_fn(self, pipeline, step_index, timestep, callback_kwargs) -> Dict[str, Any]: + cutoff_step_ratio = self.config.cutoff_step_ratio + cutoff_step_index = self.config.cutoff_step_index + + # Use cutoff_step_index if it's not None, otherwise use cutoff_step_ratio + cutoff_step = ( + cutoff_step_index if cutoff_step_index is not None else int(pipeline.num_timesteps * cutoff_step_ratio) + ) + + if step_index == cutoff_step: + pipeline.set_ip_adapter_scale(0.0) + return callback_kwargs diff --git a/diffusers3/commands/__init__.py b/diffusers3/commands/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8208283f6e40a8e46175b1672d6bf44f9d83a02b --- /dev/null +++ b/diffusers3/commands/__init__.py @@ -0,0 +1,27 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from abc import ABC, abstractmethod +from argparse import ArgumentParser + + +class BaseDiffusersCLICommand(ABC): + @staticmethod + @abstractmethod + def register_subcommand(parser: ArgumentParser): + raise NotImplementedError() + + @abstractmethod + def run(self): + raise NotImplementedError() diff --git a/diffusers3/commands/diffusers_cli.py b/diffusers3/commands/diffusers_cli.py new file mode 100644 index 0000000000000000000000000000000000000000..f582c3bcd0df0c5167a8de18123b4474e64bb344 --- /dev/null +++ b/diffusers3/commands/diffusers_cli.py @@ -0,0 +1,43 @@ +#!/usr/bin/env python +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from argparse import ArgumentParser + +from .env import EnvironmentCommand +from .fp16_safetensors import FP16SafetensorsCommand + + +def main(): + parser = ArgumentParser("Diffusers CLI tool", usage="diffusers-cli []") + commands_parser = parser.add_subparsers(help="diffusers-cli command helpers") + + # Register commands + EnvironmentCommand.register_subcommand(commands_parser) + FP16SafetensorsCommand.register_subcommand(commands_parser) + + # Let's go + args = parser.parse_args() + + if not hasattr(args, "func"): + parser.print_help() + exit(1) + + # Run + service = args.func(args) + service.run() + + +if __name__ == "__main__": + main() diff --git a/diffusers3/commands/env.py b/diffusers3/commands/env.py new file mode 100644 index 0000000000000000000000000000000000000000..d0af30bf1c65984c25a484c7f77b35f80f1a9fa9 --- /dev/null +++ b/diffusers3/commands/env.py @@ -0,0 +1,180 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import platform +import subprocess +from argparse import ArgumentParser + +import huggingface_hub + +from .. import __version__ as version +from ..utils import ( + is_accelerate_available, + is_bitsandbytes_available, + is_flax_available, + is_google_colab, + is_peft_available, + is_safetensors_available, + is_torch_available, + is_transformers_available, + is_xformers_available, +) +from . import BaseDiffusersCLICommand + + +def info_command_factory(_): + return EnvironmentCommand() + + +class EnvironmentCommand(BaseDiffusersCLICommand): + @staticmethod + def register_subcommand(parser: ArgumentParser) -> None: + download_parser = parser.add_parser("env") + download_parser.set_defaults(func=info_command_factory) + + def run(self) -> dict: + hub_version = huggingface_hub.__version__ + + safetensors_version = "not installed" + if is_safetensors_available(): + import safetensors + + safetensors_version = safetensors.__version__ + + pt_version = "not installed" + pt_cuda_available = "NA" + if is_torch_available(): + import torch + + pt_version = torch.__version__ + pt_cuda_available = torch.cuda.is_available() + + flax_version = "not installed" + jax_version = "not installed" + jaxlib_version = "not installed" + jax_backend = "NA" + if is_flax_available(): + import flax + import jax + import jaxlib + + flax_version = flax.__version__ + jax_version = jax.__version__ + jaxlib_version = jaxlib.__version__ + jax_backend = jax.lib.xla_bridge.get_backend().platform + + transformers_version = "not installed" + if is_transformers_available(): + import transformers + + transformers_version = transformers.__version__ + + accelerate_version = "not installed" + if is_accelerate_available(): + import accelerate + + accelerate_version = accelerate.__version__ + + peft_version = "not installed" + if is_peft_available(): + import peft + + peft_version = peft.__version__ + + bitsandbytes_version = "not installed" + if is_bitsandbytes_available(): + import bitsandbytes + + bitsandbytes_version = bitsandbytes.__version__ + + xformers_version = "not installed" + if is_xformers_available(): + import xformers + + xformers_version = xformers.__version__ + + platform_info = platform.platform() + + is_google_colab_str = "Yes" if is_google_colab() else "No" + + accelerator = "NA" + if platform.system() in {"Linux", "Windows"}: + try: + sp = subprocess.Popen( + ["nvidia-smi", "--query-gpu=gpu_name,memory.total", "--format=csv,noheader"], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) + out_str, _ = sp.communicate() + out_str = out_str.decode("utf-8") + + if len(out_str) > 0: + accelerator = out_str.strip() + except FileNotFoundError: + pass + elif platform.system() == "Darwin": # Mac OS + try: + sp = subprocess.Popen( + ["system_profiler", "SPDisplaysDataType"], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) + out_str, _ = sp.communicate() + out_str = out_str.decode("utf-8") + + start = out_str.find("Chipset Model:") + if start != -1: + start += len("Chipset Model:") + end = out_str.find("\n", start) + accelerator = out_str[start:end].strip() + + start = out_str.find("VRAM (Total):") + if start != -1: + start += len("VRAM (Total):") + end = out_str.find("\n", start) + accelerator += " VRAM: " + out_str[start:end].strip() + except FileNotFoundError: + pass + else: + print("It seems you are running an unusual OS. Could you fill in the accelerator manually?") + + info = { + "๐Ÿค— Diffusers version": version, + "Platform": platform_info, + "Running on Google Colab?": is_google_colab_str, + "Python version": platform.python_version(), + "PyTorch version (GPU?)": f"{pt_version} ({pt_cuda_available})", + "Flax version (CPU?/GPU?/TPU?)": f"{flax_version} ({jax_backend})", + "Jax version": jax_version, + "JaxLib version": jaxlib_version, + "Huggingface_hub version": hub_version, + "Transformers version": transformers_version, + "Accelerate version": accelerate_version, + "PEFT version": peft_version, + "Bitsandbytes version": bitsandbytes_version, + "Safetensors version": safetensors_version, + "xFormers version": xformers_version, + "Accelerator": accelerator, + "Using GPU in script?": "", + "Using distributed or parallel set-up in script?": "", + } + + print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n") + print(self.format_dict(info)) + + return info + + @staticmethod + def format_dict(d: dict) -> str: + return "\n".join([f"- {prop}: {val}" for prop, val in d.items()]) + "\n" diff --git a/diffusers3/commands/fp16_safetensors.py b/diffusers3/commands/fp16_safetensors.py new file mode 100644 index 0000000000000000000000000000000000000000..b26b8816bc4cf1a6272d3eebf5ab2be8a5dd865b --- /dev/null +++ b/diffusers3/commands/fp16_safetensors.py @@ -0,0 +1,132 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Usage example: + diffusers-cli fp16_safetensors --ckpt_id=openai/shap-e --fp16 --use_safetensors +""" + +import glob +import json +import warnings +from argparse import ArgumentParser, Namespace +from importlib import import_module + +import huggingface_hub +import torch +from huggingface_hub import hf_hub_download +from packaging import version + +from ..utils import logging +from . import BaseDiffusersCLICommand + + +def conversion_command_factory(args: Namespace): + if args.use_auth_token: + warnings.warn( + "The `--use_auth_token` flag is deprecated and will be removed in a future version. Authentication is now" + " handled automatically if user is logged in." + ) + return FP16SafetensorsCommand(args.ckpt_id, args.fp16, args.use_safetensors) + + +class FP16SafetensorsCommand(BaseDiffusersCLICommand): + @staticmethod + def register_subcommand(parser: ArgumentParser): + conversion_parser = parser.add_parser("fp16_safetensors") + conversion_parser.add_argument( + "--ckpt_id", + type=str, + help="Repo id of the checkpoints on which to run the conversion. Example: 'openai/shap-e'.", + ) + conversion_parser.add_argument( + "--fp16", action="store_true", help="If serializing the variables in FP16 precision." + ) + conversion_parser.add_argument( + "--use_safetensors", action="store_true", help="If serializing in the safetensors format." + ) + conversion_parser.add_argument( + "--use_auth_token", + action="store_true", + help="When working with checkpoints having private visibility. When used `huggingface-cli login` needs to be run beforehand.", + ) + conversion_parser.set_defaults(func=conversion_command_factory) + + def __init__(self, ckpt_id: str, fp16: bool, use_safetensors: bool): + self.logger = logging.get_logger("diffusers-cli/fp16_safetensors") + self.ckpt_id = ckpt_id + self.local_ckpt_dir = f"/tmp/{ckpt_id}" + self.fp16 = fp16 + + self.use_safetensors = use_safetensors + + if not self.use_safetensors and not self.fp16: + raise NotImplementedError( + "When `use_safetensors` and `fp16` both are False, then this command is of no use." + ) + + def run(self): + if version.parse(huggingface_hub.__version__) < version.parse("0.9.0"): + raise ImportError( + "The huggingface_hub version must be >= 0.9.0 to use this command. Please update your huggingface_hub" + " installation." + ) + else: + from huggingface_hub import create_commit + from huggingface_hub._commit_api import CommitOperationAdd + + model_index = hf_hub_download(repo_id=self.ckpt_id, filename="model_index.json") + with open(model_index, "r") as f: + pipeline_class_name = json.load(f)["_class_name"] + pipeline_class = getattr(import_module("diffusers"), pipeline_class_name) + self.logger.info(f"Pipeline class imported: {pipeline_class_name}.") + + # Load the appropriate pipeline. We could have use `DiffusionPipeline` + # here, but just to avoid any rough edge cases. + pipeline = pipeline_class.from_pretrained( + self.ckpt_id, torch_dtype=torch.float16 if self.fp16 else torch.float32 + ) + pipeline.save_pretrained( + self.local_ckpt_dir, + safe_serialization=True if self.use_safetensors else False, + variant="fp16" if self.fp16 else None, + ) + self.logger.info(f"Pipeline locally saved to {self.local_ckpt_dir}.") + + # Fetch all the paths. + if self.fp16: + modified_paths = glob.glob(f"{self.local_ckpt_dir}/*/*.fp16.*") + elif self.use_safetensors: + modified_paths = glob.glob(f"{self.local_ckpt_dir}/*/*.safetensors") + + # Prepare for the PR. + commit_message = f"Serialize variables with FP16: {self.fp16} and safetensors: {self.use_safetensors}." + operations = [] + for path in modified_paths: + operations.append(CommitOperationAdd(path_in_repo="/".join(path.split("/")[4:]), path_or_fileobj=path)) + + # Open the PR. + commit_description = ( + "Variables converted by the [`diffusers`' `fp16_safetensors`" + " CLI](https://github.com/huggingface/diffusers/blob/main/src/diffusers/commands/fp16_safetensors.py)." + ) + hub_pr_url = create_commit( + repo_id=self.ckpt_id, + operations=operations, + commit_message=commit_message, + commit_description=commit_description, + repo_type="model", + create_pr=True, + ).pr_url + self.logger.info(f"PR created here: {hub_pr_url}.") diff --git a/diffusers3/configuration_utils.py b/diffusers3/configuration_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..3dccd785cae4211ec8e8ee8a723c781430e99abe --- /dev/null +++ b/diffusers3/configuration_utils.py @@ -0,0 +1,720 @@ +# coding=utf-8 +# Copyright 2024 The HuggingFace Inc. team. +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""ConfigMixin base class and utilities.""" + +import dataclasses +import functools +import importlib +import inspect +import json +import os +import re +from collections import OrderedDict +from pathlib import Path +from typing import Any, Dict, Tuple, Union + +import numpy as np +from huggingface_hub import create_repo, hf_hub_download +from huggingface_hub.utils import ( + EntryNotFoundError, + RepositoryNotFoundError, + RevisionNotFoundError, + validate_hf_hub_args, +) +from requests import HTTPError + +from . import __version__ +from .utils import ( + HUGGINGFACE_CO_RESOLVE_ENDPOINT, + DummyObject, + deprecate, + extract_commit_hash, + http_user_agent, + logging, +) + + +logger = logging.get_logger(__name__) + +_re_configuration_file = re.compile(r"config\.(.*)\.json") + + +class FrozenDict(OrderedDict): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + for key, value in self.items(): + setattr(self, key, value) + + self.__frozen = True + + def __delitem__(self, *args, **kwargs): + raise Exception(f"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.") + + def setdefault(self, *args, **kwargs): + raise Exception(f"You cannot use ``setdefault`` on a {self.__class__.__name__} instance.") + + def pop(self, *args, **kwargs): + raise Exception(f"You cannot use ``pop`` on a {self.__class__.__name__} instance.") + + def update(self, *args, **kwargs): + raise Exception(f"You cannot use ``update`` on a {self.__class__.__name__} instance.") + + def __setattr__(self, name, value): + if hasattr(self, "__frozen") and self.__frozen: + raise Exception(f"You cannot use ``__setattr__`` on a {self.__class__.__name__} instance.") + super().__setattr__(name, value) + + def __setitem__(self, name, value): + if hasattr(self, "__frozen") and self.__frozen: + raise Exception(f"You cannot use ``__setattr__`` on a {self.__class__.__name__} instance.") + super().__setitem__(name, value) + + +class ConfigMixin: + r""" + Base class for all configuration classes. All configuration parameters are stored under `self.config`. Also + provides the [`~ConfigMixin.from_config`] and [`~ConfigMixin.save_config`] methods for loading, downloading, and + saving classes that inherit from [`ConfigMixin`]. + + Class attributes: + - **config_name** (`str`) -- A filename under which the config should stored when calling + [`~ConfigMixin.save_config`] (should be overridden by parent class). + - **ignore_for_config** (`List[str]`) -- A list of attributes that should not be saved in the config (should be + overridden by subclass). + - **has_compatibles** (`bool`) -- Whether the class has compatible classes (should be overridden by subclass). + - **_deprecated_kwargs** (`List[str]`) -- Keyword arguments that are deprecated. Note that the `init` function + should only have a `kwargs` argument if at least one argument is deprecated (should be overridden by + subclass). + """ + + config_name = None + ignore_for_config = [] + has_compatibles = False + + _deprecated_kwargs = [] + + def register_to_config(self, **kwargs): + if self.config_name is None: + raise NotImplementedError(f"Make sure that {self.__class__} has defined a class name `config_name`") + # Special case for `kwargs` used in deprecation warning added to schedulers + # TODO: remove this when we remove the deprecation warning, and the `kwargs` argument, + # or solve in a more general way. + kwargs.pop("kwargs", None) + + if not hasattr(self, "_internal_dict"): + internal_dict = kwargs + else: + previous_dict = dict(self._internal_dict) + internal_dict = {**self._internal_dict, **kwargs} + logger.debug(f"Updating config from {previous_dict} to {internal_dict}") + + self._internal_dict = FrozenDict(internal_dict) + + def __getattr__(self, name: str) -> Any: + """The only reason we overwrite `getattr` here is to gracefully deprecate accessing + config attributes directly. See https://github.com/huggingface/diffusers/pull/3129 + + This function is mostly copied from PyTorch's __getattr__ overwrite: + https://pytorch.org/docs/stable/_modules/torch/nn/modules/module.html#Module + """ + + is_in_config = "_internal_dict" in self.__dict__ and hasattr(self.__dict__["_internal_dict"], name) + is_attribute = name in self.__dict__ + + if is_in_config and not is_attribute: + deprecation_message = f"Accessing config attribute `{name}` directly via '{type(self).__name__}' object attribute is deprecated. Please access '{name}' over '{type(self).__name__}'s config object instead, e.g. 'scheduler.config.{name}'." + deprecate("direct config name access", "1.0.0", deprecation_message, standard_warn=False) + return self._internal_dict[name] + + raise AttributeError(f"'{type(self).__name__}' object has no attribute '{name}'") + + def save_config(self, save_directory: Union[str, os.PathLike], push_to_hub: bool = False, **kwargs): + """ + Save a configuration object to the directory specified in `save_directory` so that it can be reloaded using the + [`~ConfigMixin.from_config`] class method. + + Args: + save_directory (`str` or `os.PathLike`): + Directory where the configuration JSON file is saved (will be created if it does not exist). + push_to_hub (`bool`, *optional*, defaults to `False`): + Whether or not to push your model to the Hugging Face Hub after saving it. You can specify the + repository you want to push to with `repo_id` (will default to the name of `save_directory` in your + namespace). + kwargs (`Dict[str, Any]`, *optional*): + Additional keyword arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method. + """ + if os.path.isfile(save_directory): + raise AssertionError(f"Provided path ({save_directory}) should be a directory, not a file") + + os.makedirs(save_directory, exist_ok=True) + + # If we save using the predefined names, we can load using `from_config` + output_config_file = os.path.join(save_directory, self.config_name) + + self.to_json_file(output_config_file) + logger.info(f"Configuration saved in {output_config_file}") + + if push_to_hub: + commit_message = kwargs.pop("commit_message", None) + private = kwargs.pop("private", False) + create_pr = kwargs.pop("create_pr", False) + token = kwargs.pop("token", None) + repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1]) + repo_id = create_repo(repo_id, exist_ok=True, private=private, token=token).repo_id + + self._upload_folder( + save_directory, + repo_id, + token=token, + commit_message=commit_message, + create_pr=create_pr, + ) + + @classmethod + def from_config(cls, config: Union[FrozenDict, Dict[str, Any]] = None, return_unused_kwargs=False, **kwargs): + r""" + Instantiate a Python class from a config dictionary. + + Parameters: + config (`Dict[str, Any]`): + A config dictionary from which the Python class is instantiated. Make sure to only load configuration + files of compatible classes. + return_unused_kwargs (`bool`, *optional*, defaults to `False`): + Whether kwargs that are not consumed by the Python class should be returned or not. + kwargs (remaining dictionary of keyword arguments, *optional*): + Can be used to update the configuration object (after it is loaded) and initiate the Python class. + `**kwargs` are passed directly to the underlying scheduler/model's `__init__` method and eventually + overwrite the same named arguments in `config`. + + Returns: + [`ModelMixin`] or [`SchedulerMixin`]: + A model or scheduler object instantiated from a config dictionary. + + Examples: + + ```python + >>> from diffusers import DDPMScheduler, DDIMScheduler, PNDMScheduler + + >>> # Download scheduler from huggingface.co and cache. + >>> scheduler = DDPMScheduler.from_pretrained("google/ddpm-cifar10-32") + + >>> # Instantiate DDIM scheduler class with same config as DDPM + >>> scheduler = DDIMScheduler.from_config(scheduler.config) + + >>> # Instantiate PNDM scheduler class with same config as DDPM + >>> scheduler = PNDMScheduler.from_config(scheduler.config) + ``` + """ + # <===== TO BE REMOVED WITH DEPRECATION + # TODO(Patrick) - make sure to remove the following lines when config=="model_path" is deprecated + if "pretrained_model_name_or_path" in kwargs: + config = kwargs.pop("pretrained_model_name_or_path") + + if config is None: + raise ValueError("Please make sure to provide a config as the first positional argument.") + # ======> + + if not isinstance(config, dict): + deprecation_message = "It is deprecated to pass a pretrained model name or path to `from_config`." + if "Scheduler" in cls.__name__: + deprecation_message += ( + f"If you were trying to load a scheduler, please use {cls}.from_pretrained(...) instead." + " Otherwise, please make sure to pass a configuration dictionary instead. This functionality will" + " be removed in v1.0.0." + ) + elif "Model" in cls.__name__: + deprecation_message += ( + f"If you were trying to load a model, please use {cls}.load_config(...) followed by" + f" {cls}.from_config(...) instead. Otherwise, please make sure to pass a configuration dictionary" + " instead. This functionality will be removed in v1.0.0." + ) + deprecate("config-passed-as-path", "1.0.0", deprecation_message, standard_warn=False) + config, kwargs = cls.load_config(pretrained_model_name_or_path=config, return_unused_kwargs=True, **kwargs) + + init_dict, unused_kwargs, hidden_dict = cls.extract_init_dict(config, **kwargs) + + # Allow dtype to be specified on initialization + if "dtype" in unused_kwargs: + init_dict["dtype"] = unused_kwargs.pop("dtype") + + # add possible deprecated kwargs + for deprecated_kwarg in cls._deprecated_kwargs: + if deprecated_kwarg in unused_kwargs: + init_dict[deprecated_kwarg] = unused_kwargs.pop(deprecated_kwarg) + + # Return model and optionally state and/or unused_kwargs + model = cls(**init_dict) + + # make sure to also save config parameters that might be used for compatible classes + # update _class_name + if "_class_name" in hidden_dict: + hidden_dict["_class_name"] = cls.__name__ + + model.register_to_config(**hidden_dict) + + # add hidden kwargs of compatible classes to unused_kwargs + unused_kwargs = {**unused_kwargs, **hidden_dict} + + if return_unused_kwargs: + return (model, unused_kwargs) + else: + return model + + @classmethod + def get_config_dict(cls, *args, **kwargs): + deprecation_message = ( + f" The function get_config_dict is deprecated. Please use {cls}.load_config instead. This function will be" + " removed in version v1.0.0" + ) + deprecate("get_config_dict", "1.0.0", deprecation_message, standard_warn=False) + return cls.load_config(*args, **kwargs) + + @classmethod + @validate_hf_hub_args + def load_config( + cls, + pretrained_model_name_or_path: Union[str, os.PathLike], + return_unused_kwargs=False, + return_commit_hash=False, + **kwargs, + ) -> Tuple[Dict[str, Any], Dict[str, Any]]: + r""" + Load a model or scheduler configuration. + + Parameters: + pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*): + Can be either: + + - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on + the Hub. + - A path to a *directory* (for example `./my_model_directory`) containing model weights saved with + [`~ConfigMixin.save_config`]. + + cache_dir (`Union[str, os.PathLike]`, *optional*): + Path to a directory where a downloaded pretrained model configuration is cached if the standard cache + is not used. + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force the (re-)download of the model weights and configuration files, overriding the + cached versions if they exist. + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. + output_loading_info(`bool`, *optional*, defaults to `False`): + Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages. + local_files_only (`bool`, *optional*, defaults to `False`): + Whether to only load local model weights and configuration files or not. If set to `True`, the model + won't be downloaded from the Hub. + token (`str` or *bool*, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from + `diffusers-cli login` (stored in `~/.huggingface`) is used. + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier + allowed by Git. + subfolder (`str`, *optional*, defaults to `""`): + The subfolder location of a model file within a larger model repository on the Hub or locally. + return_unused_kwargs (`bool`, *optional*, defaults to `False): + Whether unused keyword arguments of the config are returned. + return_commit_hash (`bool`, *optional*, defaults to `False): + Whether the `commit_hash` of the loaded configuration are returned. + + Returns: + `dict`: + A dictionary of all the parameters stored in a JSON configuration file. + + """ + cache_dir = kwargs.pop("cache_dir", None) + local_dir = kwargs.pop("local_dir", None) + local_dir_use_symlinks = kwargs.pop("local_dir_use_symlinks", "auto") + force_download = kwargs.pop("force_download", False) + proxies = kwargs.pop("proxies", None) + token = kwargs.pop("token", None) + local_files_only = kwargs.pop("local_files_only", False) + revision = kwargs.pop("revision", None) + _ = kwargs.pop("mirror", None) + subfolder = kwargs.pop("subfolder", None) + user_agent = kwargs.pop("user_agent", {}) + + user_agent = {**user_agent, "file_type": "config"} + user_agent = http_user_agent(user_agent) + + pretrained_model_name_or_path = str(pretrained_model_name_or_path) + + if cls.config_name is None: + raise ValueError( + "`self.config_name` is not defined. Note that one should not load a config from " + "`ConfigMixin`. Please make sure to define `config_name` in a class inheriting from `ConfigMixin`" + ) + + if os.path.isfile(pretrained_model_name_or_path): + config_file = pretrained_model_name_or_path + elif os.path.isdir(pretrained_model_name_or_path): + if subfolder is not None and os.path.isfile( + os.path.join(pretrained_model_name_or_path, subfolder, cls.config_name) + ): + config_file = os.path.join(pretrained_model_name_or_path, subfolder, cls.config_name) + elif os.path.isfile(os.path.join(pretrained_model_name_or_path, cls.config_name)): + # Load from a PyTorch checkpoint + config_file = os.path.join(pretrained_model_name_or_path, cls.config_name) + else: + raise EnvironmentError( + f"Error no file named {cls.config_name} found in directory {pretrained_model_name_or_path}." + ) + else: + try: + # Load from URL or cache if already cached + config_file = hf_hub_download( + pretrained_model_name_or_path, + filename=cls.config_name, + cache_dir=cache_dir, + force_download=force_download, + proxies=proxies, + local_files_only=local_files_only, + token=token, + user_agent=user_agent, + subfolder=subfolder, + revision=revision, + local_dir=local_dir, + local_dir_use_symlinks=local_dir_use_symlinks, + ) + except RepositoryNotFoundError: + raise EnvironmentError( + f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier" + " listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a" + " token having permission to this repo with `token` or log in with `huggingface-cli login`." + ) + except RevisionNotFoundError: + raise EnvironmentError( + f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for" + " this model name. Check the model page at" + f" 'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions." + ) + except EntryNotFoundError: + raise EnvironmentError( + f"{pretrained_model_name_or_path} does not appear to have a file named {cls.config_name}." + ) + except HTTPError as err: + raise EnvironmentError( + "There was a specific connection error when trying to load" + f" {pretrained_model_name_or_path}:\n{err}" + ) + except ValueError: + raise EnvironmentError( + f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it" + f" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a" + f" directory containing a {cls.config_name} file.\nCheckout your internet connection or see how to" + " run the library in offline mode at" + " 'https://huggingface.co/docs/diffusers/installation#offline-mode'." + ) + except EnvironmentError: + raise EnvironmentError( + f"Can't load config for '{pretrained_model_name_or_path}'. If you were trying to load it from " + "'https://huggingface.co/models', make sure you don't have a local directory with the same name. " + f"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory " + f"containing a {cls.config_name} file" + ) + + try: + # Load config dict + config_dict = cls._dict_from_json_file(config_file) + + commit_hash = extract_commit_hash(config_file) + except (json.JSONDecodeError, UnicodeDecodeError): + raise EnvironmentError(f"It looks like the config file at '{config_file}' is not a valid JSON file.") + + if not (return_unused_kwargs or return_commit_hash): + return config_dict + + outputs = (config_dict,) + + if return_unused_kwargs: + outputs += (kwargs,) + + if return_commit_hash: + outputs += (commit_hash,) + + return outputs + + @staticmethod + def _get_init_keys(input_class): + return set(dict(inspect.signature(input_class.__init__).parameters).keys()) + + @classmethod + def extract_init_dict(cls, config_dict, **kwargs): + # Skip keys that were not present in the original config, so default __init__ values were used + used_defaults = config_dict.get("_use_default_values", []) + config_dict = {k: v for k, v in config_dict.items() if k not in used_defaults and k != "_use_default_values"} + + # 0. Copy origin config dict + original_dict = dict(config_dict.items()) + + # 1. Retrieve expected config attributes from __init__ signature + expected_keys = cls._get_init_keys(cls) + expected_keys.remove("self") + # remove general kwargs if present in dict + if "kwargs" in expected_keys: + expected_keys.remove("kwargs") + # remove flax internal keys + if hasattr(cls, "_flax_internal_args"): + for arg in cls._flax_internal_args: + expected_keys.remove(arg) + + # 2. Remove attributes that cannot be expected from expected config attributes + # remove keys to be ignored + if len(cls.ignore_for_config) > 0: + expected_keys = expected_keys - set(cls.ignore_for_config) + + # load diffusers library to import compatible and original scheduler + diffusers_library = importlib.import_module(__name__.split(".")[0]) + + if cls.has_compatibles: + compatible_classes = [c for c in cls._get_compatibles() if not isinstance(c, DummyObject)] + else: + compatible_classes = [] + + expected_keys_comp_cls = set() + for c in compatible_classes: + expected_keys_c = cls._get_init_keys(c) + expected_keys_comp_cls = expected_keys_comp_cls.union(expected_keys_c) + expected_keys_comp_cls = expected_keys_comp_cls - cls._get_init_keys(cls) + config_dict = {k: v for k, v in config_dict.items() if k not in expected_keys_comp_cls} + + # remove attributes from orig class that cannot be expected + orig_cls_name = config_dict.pop("_class_name", cls.__name__) + if ( + isinstance(orig_cls_name, str) + and orig_cls_name != cls.__name__ + and hasattr(diffusers_library, orig_cls_name) + ): + orig_cls = getattr(diffusers_library, orig_cls_name) + unexpected_keys_from_orig = cls._get_init_keys(orig_cls) - expected_keys + config_dict = {k: v for k, v in config_dict.items() if k not in unexpected_keys_from_orig} + elif not isinstance(orig_cls_name, str) and not isinstance(orig_cls_name, (list, tuple)): + raise ValueError( + "Make sure that the `_class_name` is of type string or list of string (for custom pipelines)." + ) + + # remove private attributes + config_dict = {k: v for k, v in config_dict.items() if not k.startswith("_")} + + # 3. Create keyword arguments that will be passed to __init__ from expected keyword arguments + init_dict = {} + for key in expected_keys: + # if config param is passed to kwarg and is present in config dict + # it should overwrite existing config dict key + if key in kwargs and key in config_dict: + config_dict[key] = kwargs.pop(key) + + if key in kwargs: + # overwrite key + init_dict[key] = kwargs.pop(key) + elif key in config_dict: + # use value from config dict + init_dict[key] = config_dict.pop(key) + + # 4. Give nice warning if unexpected values have been passed + if len(config_dict) > 0: + logger.warning( + f"The config attributes {config_dict} were passed to {cls.__name__}, " + "but are not expected and will be ignored. Please verify your " + f"{cls.config_name} configuration file." + ) + + # 5. Give nice info if config attributes are initialized to default because they have not been passed + passed_keys = set(init_dict.keys()) + if len(expected_keys - passed_keys) > 0: + logger.info( + f"{expected_keys - passed_keys} was not found in config. Values will be initialized to default values." + ) + + # 6. Define unused keyword arguments + unused_kwargs = {**config_dict, **kwargs} + + # 7. Define "hidden" config parameters that were saved for compatible classes + hidden_config_dict = {k: v for k, v in original_dict.items() if k not in init_dict} + + return init_dict, unused_kwargs, hidden_config_dict + + @classmethod + def _dict_from_json_file(cls, json_file: Union[str, os.PathLike]): + with open(json_file, "r", encoding="utf-8") as reader: + text = reader.read() + return json.loads(text) + + def __repr__(self): + return f"{self.__class__.__name__} {self.to_json_string()}" + + @property + def config(self) -> Dict[str, Any]: + """ + Returns the config of the class as a frozen dictionary + + Returns: + `Dict[str, Any]`: Config of the class. + """ + return self._internal_dict + + def to_json_string(self) -> str: + """ + Serializes the configuration instance to a JSON string. + + Returns: + `str`: + String containing all the attributes that make up the configuration instance in JSON format. + """ + config_dict = self._internal_dict if hasattr(self, "_internal_dict") else {} + config_dict["_class_name"] = self.__class__.__name__ + config_dict["_diffusers_version"] = __version__ + + def to_json_saveable(value): + if isinstance(value, np.ndarray): + value = value.tolist() + elif isinstance(value, Path): + value = value.as_posix() + return value + + config_dict = {k: to_json_saveable(v) for k, v in config_dict.items()} + # Don't save "_ignore_files" or "_use_default_values" + config_dict.pop("_ignore_files", None) + config_dict.pop("_use_default_values", None) + + return json.dumps(config_dict, indent=2, sort_keys=True) + "\n" + + def to_json_file(self, json_file_path: Union[str, os.PathLike]): + """ + Save the configuration instance's parameters to a JSON file. + + Args: + json_file_path (`str` or `os.PathLike`): + Path to the JSON file to save a configuration instance's parameters. + """ + with open(json_file_path, "w", encoding="utf-8") as writer: + writer.write(self.to_json_string()) + + +def register_to_config(init): + r""" + Decorator to apply on the init of classes inheriting from [`ConfigMixin`] so that all the arguments are + automatically sent to `self.register_for_config`. To ignore a specific argument accepted by the init but that + shouldn't be registered in the config, use the `ignore_for_config` class variable + + Warning: Once decorated, all private arguments (beginning with an underscore) are trashed and not sent to the init! + """ + + @functools.wraps(init) + def inner_init(self, *args, **kwargs): + # Ignore private kwargs in the init. + init_kwargs = {k: v for k, v in kwargs.items() if not k.startswith("_")} + config_init_kwargs = {k: v for k, v in kwargs.items() if k.startswith("_")} + if not isinstance(self, ConfigMixin): + raise RuntimeError( + f"`@register_for_config` was applied to {self.__class__.__name__} init method, but this class does " + "not inherit from `ConfigMixin`." + ) + + ignore = getattr(self, "ignore_for_config", []) + # Get positional arguments aligned with kwargs + new_kwargs = {} + signature = inspect.signature(init) + parameters = { + name: p.default for i, (name, p) in enumerate(signature.parameters.items()) if i > 0 and name not in ignore + } + for arg, name in zip(args, parameters.keys()): + new_kwargs[name] = arg + + # Then add all kwargs + new_kwargs.update( + { + k: init_kwargs.get(k, default) + for k, default in parameters.items() + if k not in ignore and k not in new_kwargs + } + ) + + # Take note of the parameters that were not present in the loaded config + if len(set(new_kwargs.keys()) - set(init_kwargs)) > 0: + new_kwargs["_use_default_values"] = list(set(new_kwargs.keys()) - set(init_kwargs)) + + new_kwargs = {**config_init_kwargs, **new_kwargs} + getattr(self, "register_to_config")(**new_kwargs) + init(self, *args, **init_kwargs) + + return inner_init + + +def flax_register_to_config(cls): + original_init = cls.__init__ + + @functools.wraps(original_init) + def init(self, *args, **kwargs): + if not isinstance(self, ConfigMixin): + raise RuntimeError( + f"`@register_for_config` was applied to {self.__class__.__name__} init method, but this class does " + "not inherit from `ConfigMixin`." + ) + + # Ignore private kwargs in the init. Retrieve all passed attributes + init_kwargs = dict(kwargs.items()) + + # Retrieve default values + fields = dataclasses.fields(self) + default_kwargs = {} + for field in fields: + # ignore flax specific attributes + if field.name in self._flax_internal_args: + continue + if type(field.default) == dataclasses._MISSING_TYPE: + default_kwargs[field.name] = None + else: + default_kwargs[field.name] = getattr(self, field.name) + + # Make sure init_kwargs override default kwargs + new_kwargs = {**default_kwargs, **init_kwargs} + # dtype should be part of `init_kwargs`, but not `new_kwargs` + if "dtype" in new_kwargs: + new_kwargs.pop("dtype") + + # Get positional arguments aligned with kwargs + for i, arg in enumerate(args): + name = fields[i].name + new_kwargs[name] = arg + + # Take note of the parameters that were not present in the loaded config + if len(set(new_kwargs.keys()) - set(init_kwargs)) > 0: + new_kwargs["_use_default_values"] = list(set(new_kwargs.keys()) - set(init_kwargs)) + + getattr(self, "register_to_config")(**new_kwargs) + original_init(self, *args, **kwargs) + + cls.__init__ = init + return cls + + +class LegacyConfigMixin(ConfigMixin): + r""" + A subclass of `ConfigMixin` to resolve class mapping from legacy classes (like `Transformer2DModel`) to more + pipeline-specific classes (like `DiTTransformer2DModel`). + """ + + @classmethod + def from_config(cls, config: Union[FrozenDict, Dict[str, Any]] = None, return_unused_kwargs=False, **kwargs): + # To prevent dependency import problem. + from .models.model_loading_utils import _fetch_remapped_cls_from_config + + # resolve remapping + remapped_class = _fetch_remapped_cls_from_config(config, cls) + + return remapped_class.from_config(config, return_unused_kwargs, **kwargs) diff --git a/diffusers3/dependency_versions_check.py b/diffusers3/dependency_versions_check.py new file mode 100644 index 0000000000000000000000000000000000000000..0728b3a7c0932cd06c920947e1ea57f3864f239a --- /dev/null +++ b/diffusers3/dependency_versions_check.py @@ -0,0 +1,34 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .dependency_versions_table import deps +from .utils.versions import require_version, require_version_core + + +# define which module versions we always want to check at run time +# (usually the ones defined in `install_requires` in setup.py) +# +# order specific notes: +# - tqdm must be checked before tokenizers + +pkgs_to_check_at_runtime = "python requests filelock numpy".split() +for pkg in pkgs_to_check_at_runtime: + if pkg in deps: + require_version_core(deps[pkg]) + else: + raise ValueError(f"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py") + + +def dep_version_check(pkg, hint=None): + require_version(deps[pkg], hint) diff --git a/diffusers3/dependency_versions_table.py b/diffusers3/dependency_versions_table.py new file mode 100644 index 0000000000000000000000000000000000000000..9e7bf242eca78aca87da4c01b0e3a9830c781299 --- /dev/null +++ b/diffusers3/dependency_versions_table.py @@ -0,0 +1,46 @@ +# THIS FILE HAS BEEN AUTOGENERATED. To update: +# 1. modify the `_deps` dict in setup.py +# 2. run `make deps_table_update` +deps = { + "Pillow": "Pillow", + "accelerate": "accelerate>=0.31.0", + "compel": "compel==0.1.8", + "datasets": "datasets", + "filelock": "filelock", + "flax": "flax>=0.4.1", + "hf-doc-builder": "hf-doc-builder>=0.3.0", + "huggingface-hub": "huggingface-hub>=0.23.2", + "requests-mock": "requests-mock==1.10.0", + "importlib_metadata": "importlib_metadata", + "invisible-watermark": "invisible-watermark>=0.2.0", + "isort": "isort>=5.5.4", + "jax": "jax>=0.4.1", + "jaxlib": "jaxlib>=0.4.1", + "Jinja2": "Jinja2", + "k-diffusion": "k-diffusion>=0.0.12", + "torchsde": "torchsde", + "note_seq": "note_seq", + "librosa": "librosa", + "numpy": "numpy", + "parameterized": "parameterized", + "peft": "peft>=0.6.0", + "protobuf": "protobuf>=3.20.3,<4", + "pytest": "pytest", + "pytest-timeout": "pytest-timeout", + "pytest-xdist": "pytest-xdist", + "python": "python>=3.8.0", + "ruff": "ruff==0.1.5", + "safetensors": "safetensors>=0.3.1", + "sentencepiece": "sentencepiece>=0.1.91,!=0.1.92", + "GitPython": "GitPython<3.1.19", + "scipy": "scipy", + "onnx": "onnx", + "regex": "regex!=2019.12.17", + "requests": "requests", + "tensorboard": "tensorboard", + "torch": "torch>=1.4", + "torchvision": "torchvision", + "transformers": "transformers>=4.41.2", + "urllib3": "urllib3<=2.0.0", + "black": "black", +} diff --git a/diffusers3/experimental/README.md b/diffusers3/experimental/README.md new file mode 100644 index 0000000000000000000000000000000000000000..81a9de81c73728ea41eb6e8617a5429c3c9645ff --- /dev/null +++ b/diffusers3/experimental/README.md @@ -0,0 +1,5 @@ +# ๐Ÿงจ Diffusers Experimental + +We are adding experimental code to support novel applications and usages of the Diffusers library. +Currently, the following experiments are supported: +* Reinforcement learning via an implementation of the [Diffuser](https://arxiv.org/abs/2205.09991) model. \ No newline at end of file diff --git a/diffusers3/experimental/__init__.py b/diffusers3/experimental/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ebc8155403016dfd8ad7fb78d246f9da9098ac50 --- /dev/null +++ b/diffusers3/experimental/__init__.py @@ -0,0 +1 @@ +from .rl import ValueGuidedRLPipeline diff --git a/diffusers3/experimental/rl/__init__.py b/diffusers3/experimental/rl/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7b338d3173e12d478b6b6d6fd0e50650a0ab5a4c --- /dev/null +++ b/diffusers3/experimental/rl/__init__.py @@ -0,0 +1 @@ +from .value_guided_sampling import ValueGuidedRLPipeline diff --git a/diffusers3/experimental/rl/value_guided_sampling.py b/diffusers3/experimental/rl/value_guided_sampling.py new file mode 100644 index 0000000000000000000000000000000000000000..2f9de857480ec590ae1f795bb2d29bbeccec1331 --- /dev/null +++ b/diffusers3/experimental/rl/value_guided_sampling.py @@ -0,0 +1,153 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import numpy as np +import torch +import tqdm + +from ...models.unets.unet_1d import UNet1DModel +from ...pipelines import DiffusionPipeline +from ...utils.dummy_pt_objects import DDPMScheduler +from ...utils.torch_utils import randn_tensor + + +class ValueGuidedRLPipeline(DiffusionPipeline): + r""" + Pipeline for value-guided sampling from a diffusion model trained to predict sequences of states. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + Parameters: + value_function ([`UNet1DModel`]): + A specialized UNet for fine-tuning trajectories base on reward. + unet ([`UNet1DModel`]): + UNet architecture to denoise the encoded trajectories. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded trajectories. Default for this + application is [`DDPMScheduler`]. + env (): + An environment following the OpenAI gym API to act in. For now only Hopper has pretrained models. + """ + + def __init__( + self, + value_function: UNet1DModel, + unet: UNet1DModel, + scheduler: DDPMScheduler, + env, + ): + super().__init__() + + self.register_modules(value_function=value_function, unet=unet, scheduler=scheduler, env=env) + + self.data = env.get_dataset() + self.means = {} + for key in self.data.keys(): + try: + self.means[key] = self.data[key].mean() + except: # noqa: E722 + pass + self.stds = {} + for key in self.data.keys(): + try: + self.stds[key] = self.data[key].std() + except: # noqa: E722 + pass + self.state_dim = env.observation_space.shape[0] + self.action_dim = env.action_space.shape[0] + + def normalize(self, x_in, key): + return (x_in - self.means[key]) / self.stds[key] + + def de_normalize(self, x_in, key): + return x_in * self.stds[key] + self.means[key] + + def to_torch(self, x_in): + if isinstance(x_in, dict): + return {k: self.to_torch(v) for k, v in x_in.items()} + elif torch.is_tensor(x_in): + return x_in.to(self.unet.device) + return torch.tensor(x_in, device=self.unet.device) + + def reset_x0(self, x_in, cond, act_dim): + for key, val in cond.items(): + x_in[:, key, act_dim:] = val.clone() + return x_in + + def run_diffusion(self, x, conditions, n_guide_steps, scale): + batch_size = x.shape[0] + y = None + for i in tqdm.tqdm(self.scheduler.timesteps): + # create batch of timesteps to pass into model + timesteps = torch.full((batch_size,), i, device=self.unet.device, dtype=torch.long) + for _ in range(n_guide_steps): + with torch.enable_grad(): + x.requires_grad_() + + # permute to match dimension for pre-trained models + y = self.value_function(x.permute(0, 2, 1), timesteps).sample + grad = torch.autograd.grad([y.sum()], [x])[0] + + posterior_variance = self.scheduler._get_variance(i) + model_std = torch.exp(0.5 * posterior_variance) + grad = model_std * grad + + grad[timesteps < 2] = 0 + x = x.detach() + x = x + scale * grad + x = self.reset_x0(x, conditions, self.action_dim) + + prev_x = self.unet(x.permute(0, 2, 1), timesteps).sample.permute(0, 2, 1) + + # TODO: verify deprecation of this kwarg + x = self.scheduler.step(prev_x, i, x)["prev_sample"] + + # apply conditions to the trajectory (set the initial state) + x = self.reset_x0(x, conditions, self.action_dim) + x = self.to_torch(x) + return x, y + + def __call__(self, obs, batch_size=64, planning_horizon=32, n_guide_steps=2, scale=0.1): + # normalize the observations and create batch dimension + obs = self.normalize(obs, "observations") + obs = obs[None].repeat(batch_size, axis=0) + + conditions = {0: self.to_torch(obs)} + shape = (batch_size, planning_horizon, self.state_dim + self.action_dim) + + # generate initial noise and apply our conditions (to make the trajectories start at current state) + x1 = randn_tensor(shape, device=self.unet.device) + x = self.reset_x0(x1, conditions, self.action_dim) + x = self.to_torch(x) + + # run the diffusion process + x, y = self.run_diffusion(x, conditions, n_guide_steps, scale) + + # sort output trajectories by value + sorted_idx = y.argsort(0, descending=True).squeeze() + sorted_values = x[sorted_idx] + actions = sorted_values[:, :, : self.action_dim] + actions = actions.detach().cpu().numpy() + denorm_actions = self.de_normalize(actions, key="actions") + + # select the action with the highest value + if y is not None: + selected_index = 0 + else: + # if we didn't run value guiding, select a random action + selected_index = np.random.randint(0, batch_size) + + denorm_actions = denorm_actions[selected_index, 0] + return denorm_actions diff --git a/diffusers3/image_processor.py b/diffusers3/image_processor.py new file mode 100644 index 0000000000000000000000000000000000000000..d58bd9e3e3758906d6a2648a5bec31bfc422a1fe --- /dev/null +++ b/diffusers3/image_processor.py @@ -0,0 +1,1103 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +import warnings +from typing import List, Optional, Tuple, Union + +import numpy as np +import PIL.Image +import torch +import torch.nn.functional as F +from PIL import Image, ImageFilter, ImageOps + +from .configuration_utils import ConfigMixin, register_to_config +from .utils import CONFIG_NAME, PIL_INTERPOLATION, deprecate + + +PipelineImageInput = Union[ + PIL.Image.Image, + np.ndarray, + torch.Tensor, + List[PIL.Image.Image], + List[np.ndarray], + List[torch.Tensor], +] + +PipelineDepthInput = PipelineImageInput + + +def is_valid_image(image): + return isinstance(image, PIL.Image.Image) or isinstance(image, (np.ndarray, torch.Tensor)) and image.ndim in (2, 3) + + +def is_valid_image_imagelist(images): + # check if the image input is one of the supported formats for image and image list: + # it can be either one of below 3 + # (1) a 4d pytorch tensor or numpy array, + # (2) a valid image: PIL.Image.Image, 2-d np.ndarray or torch.Tensor (grayscale image), 3-d np.ndarray or torch.Tensor + # (3) a list of valid image + if isinstance(images, (np.ndarray, torch.Tensor)) and images.ndim == 4: + return True + elif is_valid_image(images): + return True + elif isinstance(images, list): + return all(is_valid_image(image) for image in images) + return False + + +class VaeImageProcessor(ConfigMixin): + """ + Image processor for VAE. + + Args: + do_resize (`bool`, *optional*, defaults to `True`): + Whether to downscale the image's (height, width) dimensions to multiples of `vae_scale_factor`. Can accept + `height` and `width` arguments from [`image_processor.VaeImageProcessor.preprocess`] method. + vae_scale_factor (`int`, *optional*, defaults to `8`): + VAE scale factor. If `do_resize` is `True`, the image is automatically resized to multiples of this factor. + resample (`str`, *optional*, defaults to `lanczos`): + Resampling filter to use when resizing the image. + do_normalize (`bool`, *optional*, defaults to `True`): + Whether to normalize the image to [-1,1]. + do_binarize (`bool`, *optional*, defaults to `False`): + Whether to binarize the image to 0/1. + do_convert_rgb (`bool`, *optional*, defaults to be `False`): + Whether to convert the images to RGB format. + do_convert_grayscale (`bool`, *optional*, defaults to be `False`): + Whether to convert the images to grayscale format. + """ + + config_name = CONFIG_NAME + + @register_to_config + def __init__( + self, + do_resize: bool = True, + vae_scale_factor: int = 8, + vae_latent_channels: int = 4, + resample: str = "lanczos", + do_normalize: bool = True, + do_binarize: bool = False, + do_convert_rgb: bool = False, + do_convert_grayscale: bool = False, + ): + super().__init__() + if do_convert_rgb and do_convert_grayscale: + raise ValueError( + "`do_convert_rgb` and `do_convert_grayscale` can not both be set to `True`," + " if you intended to convert the image into RGB format, please set `do_convert_grayscale = False`.", + " if you intended to convert the image into grayscale format, please set `do_convert_rgb = False`", + ) + + @staticmethod + def numpy_to_pil(images: np.ndarray) -> List[PIL.Image.Image]: + """ + Convert a numpy image or a batch of images to a PIL image. + """ + if images.ndim == 3: + images = images[None, ...] + images = (images * 255).round().astype("uint8") + if images.shape[-1] == 1: + # special case for grayscale (single channel) images + pil_images = [Image.fromarray(image.squeeze(), mode="L") for image in images] + else: + pil_images = [Image.fromarray(image) for image in images] + + return pil_images + + @staticmethod + def pil_to_numpy(images: Union[List[PIL.Image.Image], PIL.Image.Image]) -> np.ndarray: + """ + Convert a PIL image or a list of PIL images to NumPy arrays. + """ + if not isinstance(images, list): + images = [images] + images = [np.array(image).astype(np.float32) / 255.0 for image in images] + images = np.stack(images, axis=0) + + return images + + @staticmethod + def numpy_to_pt(images: np.ndarray) -> torch.Tensor: + """ + Convert a NumPy image to a PyTorch tensor. + """ + if images.ndim == 3: + images = images[..., None] + + images = torch.from_numpy(images.transpose(0, 3, 1, 2)) + return images + + @staticmethod + def pt_to_numpy(images: torch.Tensor) -> np.ndarray: + """ + Convert a PyTorch tensor to a NumPy image. + """ + images = images.cpu().permute(0, 2, 3, 1).float().numpy() + return images + + @staticmethod + def normalize(images: Union[np.ndarray, torch.Tensor]) -> Union[np.ndarray, torch.Tensor]: + """ + Normalize an image array to [-1,1]. + """ + return 2.0 * images - 1.0 + + @staticmethod + def denormalize(images: Union[np.ndarray, torch.Tensor]) -> Union[np.ndarray, torch.Tensor]: + """ + Denormalize an image array to [0,1]. + """ + return (images / 2 + 0.5).clamp(0, 1) + + @staticmethod + def convert_to_rgb(image: PIL.Image.Image) -> PIL.Image.Image: + """ + Converts a PIL image to RGB format. + """ + image = image.convert("RGB") + + return image + + @staticmethod + def convert_to_grayscale(image: PIL.Image.Image) -> PIL.Image.Image: + """ + Converts a PIL image to grayscale format. + """ + image = image.convert("L") + + return image + + @staticmethod + def blur(image: PIL.Image.Image, blur_factor: int = 4) -> PIL.Image.Image: + """ + Applies Gaussian blur to an image. + """ + image = image.filter(ImageFilter.GaussianBlur(blur_factor)) + + return image + + @staticmethod + def get_crop_region(mask_image: PIL.Image.Image, width: int, height: int, pad=0): + """ + Finds a rectangular region that contains all masked ares in an image, and expands region to match the aspect + ratio of the original image; for example, if user drew mask in a 128x32 region, and the dimensions for + processing are 512x512, the region will be expanded to 128x128. + + Args: + mask_image (PIL.Image.Image): Mask image. + width (int): Width of the image to be processed. + height (int): Height of the image to be processed. + pad (int, optional): Padding to be added to the crop region. Defaults to 0. + + Returns: + tuple: (x1, y1, x2, y2) represent a rectangular region that contains all masked ares in an image and + matches the original aspect ratio. + """ + + mask_image = mask_image.convert("L") + mask = np.array(mask_image) + + # 1. find a rectangular region that contains all masked ares in an image + h, w = mask.shape + crop_left = 0 + for i in range(w): + if not (mask[:, i] == 0).all(): + break + crop_left += 1 + + crop_right = 0 + for i in reversed(range(w)): + if not (mask[:, i] == 0).all(): + break + crop_right += 1 + + crop_top = 0 + for i in range(h): + if not (mask[i] == 0).all(): + break + crop_top += 1 + + crop_bottom = 0 + for i in reversed(range(h)): + if not (mask[i] == 0).all(): + break + crop_bottom += 1 + + # 2. add padding to the crop region + x1, y1, x2, y2 = ( + int(max(crop_left - pad, 0)), + int(max(crop_top - pad, 0)), + int(min(w - crop_right + pad, w)), + int(min(h - crop_bottom + pad, h)), + ) + + # 3. expands crop region to match the aspect ratio of the image to be processed + ratio_crop_region = (x2 - x1) / (y2 - y1) + ratio_processing = width / height + + if ratio_crop_region > ratio_processing: + desired_height = (x2 - x1) / ratio_processing + desired_height_diff = int(desired_height - (y2 - y1)) + y1 -= desired_height_diff // 2 + y2 += desired_height_diff - desired_height_diff // 2 + if y2 >= mask_image.height: + diff = y2 - mask_image.height + y2 -= diff + y1 -= diff + if y1 < 0: + y2 -= y1 + y1 -= y1 + if y2 >= mask_image.height: + y2 = mask_image.height + else: + desired_width = (y2 - y1) * ratio_processing + desired_width_diff = int(desired_width - (x2 - x1)) + x1 -= desired_width_diff // 2 + x2 += desired_width_diff - desired_width_diff // 2 + if x2 >= mask_image.width: + diff = x2 - mask_image.width + x2 -= diff + x1 -= diff + if x1 < 0: + x2 -= x1 + x1 -= x1 + if x2 >= mask_image.width: + x2 = mask_image.width + + return x1, y1, x2, y2 + + def _resize_and_fill( + self, + image: PIL.Image.Image, + width: int, + height: int, + ) -> PIL.Image.Image: + """ + Resize the image to fit within the specified width and height, maintaining the aspect ratio, and then center + the image within the dimensions, filling empty with data from image. + + Args: + image: The image to resize. + width: The width to resize the image to. + height: The height to resize the image to. + """ + + ratio = width / height + src_ratio = image.width / image.height + + src_w = width if ratio < src_ratio else image.width * height // image.height + src_h = height if ratio >= src_ratio else image.height * width // image.width + + resized = image.resize((src_w, src_h), resample=PIL_INTERPOLATION["lanczos"]) + res = Image.new("RGB", (width, height)) + res.paste(resized, box=(width // 2 - src_w // 2, height // 2 - src_h // 2)) + + if ratio < src_ratio: + fill_height = height // 2 - src_h // 2 + if fill_height > 0: + res.paste(resized.resize((width, fill_height), box=(0, 0, width, 0)), box=(0, 0)) + res.paste( + resized.resize((width, fill_height), box=(0, resized.height, width, resized.height)), + box=(0, fill_height + src_h), + ) + elif ratio > src_ratio: + fill_width = width // 2 - src_w // 2 + if fill_width > 0: + res.paste(resized.resize((fill_width, height), box=(0, 0, 0, height)), box=(0, 0)) + res.paste( + resized.resize((fill_width, height), box=(resized.width, 0, resized.width, height)), + box=(fill_width + src_w, 0), + ) + + return res + + def _resize_and_crop( + self, + image: PIL.Image.Image, + width: int, + height: int, + ) -> PIL.Image.Image: + """ + Resize the image to fit within the specified width and height, maintaining the aspect ratio, and then center + the image within the dimensions, cropping the excess. + + Args: + image: The image to resize. + width: The width to resize the image to. + height: The height to resize the image to. + """ + ratio = width / height + src_ratio = image.width / image.height + + src_w = width if ratio > src_ratio else image.width * height // image.height + src_h = height if ratio <= src_ratio else image.height * width // image.width + + resized = image.resize((src_w, src_h), resample=PIL_INTERPOLATION["lanczos"]) + res = Image.new("RGB", (width, height)) + res.paste(resized, box=(width // 2 - src_w // 2, height // 2 - src_h // 2)) + return res + + def resize( + self, + image: Union[PIL.Image.Image, np.ndarray, torch.Tensor], + height: int, + width: int, + resize_mode: str = "default", # "default", "fill", "crop" + ) -> Union[PIL.Image.Image, np.ndarray, torch.Tensor]: + """ + Resize image. + + Args: + image (`PIL.Image.Image`, `np.ndarray` or `torch.Tensor`): + The image input, can be a PIL image, numpy array or pytorch tensor. + height (`int`): + The height to resize to. + width (`int`): + The width to resize to. + resize_mode (`str`, *optional*, defaults to `default`): + The resize mode to use, can be one of `default` or `fill`. If `default`, will resize the image to fit + within the specified width and height, and it may not maintaining the original aspect ratio. If `fill`, + will resize the image to fit within the specified width and height, maintaining the aspect ratio, and + then center the image within the dimensions, filling empty with data from image. If `crop`, will resize + the image to fit within the specified width and height, maintaining the aspect ratio, and then center + the image within the dimensions, cropping the excess. Note that resize_mode `fill` and `crop` are only + supported for PIL image input. + + Returns: + `PIL.Image.Image`, `np.ndarray` or `torch.Tensor`: + The resized image. + """ + if resize_mode != "default" and not isinstance(image, PIL.Image.Image): + raise ValueError(f"Only PIL image input is supported for resize_mode {resize_mode}") + if isinstance(image, PIL.Image.Image): + if resize_mode == "default": + image = image.resize((width, height), resample=PIL_INTERPOLATION[self.config.resample]) + elif resize_mode == "fill": + image = self._resize_and_fill(image, width, height) + elif resize_mode == "crop": + image = self._resize_and_crop(image, width, height) + else: + raise ValueError(f"resize_mode {resize_mode} is not supported") + + elif isinstance(image, torch.Tensor): + image = torch.nn.functional.interpolate( + image, + size=(height, width), + ) + elif isinstance(image, np.ndarray): + image = self.numpy_to_pt(image) + image = torch.nn.functional.interpolate( + image, + size=(height, width), + ) + image = self.pt_to_numpy(image) + return image + + def binarize(self, image: PIL.Image.Image) -> PIL.Image.Image: + """ + Create a mask. + + Args: + image (`PIL.Image.Image`): + The image input, should be a PIL image. + + Returns: + `PIL.Image.Image`: + The binarized image. Values less than 0.5 are set to 0, values greater than 0.5 are set to 1. + """ + image[image < 0.5] = 0 + image[image >= 0.5] = 1 + + return image + + def get_default_height_width( + self, + image: Union[PIL.Image.Image, np.ndarray, torch.Tensor], + height: Optional[int] = None, + width: Optional[int] = None, + ) -> Tuple[int, int]: + """ + This function return the height and width that are downscaled to the next integer multiple of + `vae_scale_factor`. + + Args: + image(`PIL.Image.Image`, `np.ndarray` or `torch.Tensor`): + The image input, can be a PIL image, numpy array or pytorch tensor. if it is a numpy array, should have + shape `[batch, height, width]` or `[batch, height, width, channel]` if it is a pytorch tensor, should + have shape `[batch, channel, height, width]`. + height (`int`, *optional*, defaults to `None`): + The height in preprocessed image. If `None`, will use the height of `image` input. + width (`int`, *optional*`, defaults to `None`): + The width in preprocessed. If `None`, will use the width of the `image` input. + """ + + if height is None: + if isinstance(image, PIL.Image.Image): + height = image.height + elif isinstance(image, torch.Tensor): + height = image.shape[2] + else: + height = image.shape[1] + + if width is None: + if isinstance(image, PIL.Image.Image): + width = image.width + elif isinstance(image, torch.Tensor): + width = image.shape[3] + else: + width = image.shape[2] + + width, height = ( + x - x % self.config.vae_scale_factor for x in (width, height) + ) # resize to integer multiple of vae_scale_factor + + return height, width + + def preprocess( + self, + image: PipelineImageInput, + height: Optional[int] = None, + width: Optional[int] = None, + resize_mode: str = "default", # "default", "fill", "crop" + crops_coords: Optional[Tuple[int, int, int, int]] = None, + ) -> torch.Tensor: + """ + Preprocess the image input. + + Args: + image (`pipeline_image_input`): + The image input, accepted formats are PIL images, NumPy arrays, PyTorch tensors; Also accept list of + supported formats. + height (`int`, *optional*, defaults to `None`): + The height in preprocessed image. If `None`, will use the `get_default_height_width()` to get default + height. + width (`int`, *optional*`, defaults to `None`): + The width in preprocessed. If `None`, will use get_default_height_width()` to get the default width. + resize_mode (`str`, *optional*, defaults to `default`): + The resize mode, can be one of `default` or `fill`. If `default`, will resize the image to fit within + the specified width and height, and it may not maintaining the original aspect ratio. If `fill`, will + resize the image to fit within the specified width and height, maintaining the aspect ratio, and then + center the image within the dimensions, filling empty with data from image. If `crop`, will resize the + image to fit within the specified width and height, maintaining the aspect ratio, and then center the + image within the dimensions, cropping the excess. Note that resize_mode `fill` and `crop` are only + supported for PIL image input. + crops_coords (`List[Tuple[int, int, int, int]]`, *optional*, defaults to `None`): + The crop coordinates for each image in the batch. If `None`, will not crop the image. + """ + supported_formats = (PIL.Image.Image, np.ndarray, torch.Tensor) + + # Expand the missing dimension for 3-dimensional pytorch tensor or numpy array that represents grayscale image + if self.config.do_convert_grayscale and isinstance(image, (torch.Tensor, np.ndarray)) and image.ndim == 3: + if isinstance(image, torch.Tensor): + # if image is a pytorch tensor could have 2 possible shapes: + # 1. batch x height x width: we should insert the channel dimension at position 1 + # 2. channel x height x width: we should insert batch dimension at position 0, + # however, since both channel and batch dimension has same size 1, it is same to insert at position 1 + # for simplicity, we insert a dimension of size 1 at position 1 for both cases + image = image.unsqueeze(1) + else: + # if it is a numpy array, it could have 2 possible shapes: + # 1. batch x height x width: insert channel dimension on last position + # 2. height x width x channel: insert batch dimension on first position + if image.shape[-1] == 1: + image = np.expand_dims(image, axis=0) + else: + image = np.expand_dims(image, axis=-1) + + if isinstance(image, list) and isinstance(image[0], np.ndarray) and image[0].ndim == 4: + warnings.warn( + "Passing `image` as a list of 4d np.ndarray is deprecated." + "Please concatenate the list along the batch dimension and pass it as a single 4d np.ndarray", + FutureWarning, + ) + image = np.concatenate(image, axis=0) + if isinstance(image, list) and isinstance(image[0], torch.Tensor) and image[0].ndim == 4: + warnings.warn( + "Passing `image` as a list of 4d torch.Tensor is deprecated." + "Please concatenate the list along the batch dimension and pass it as a single 4d torch.Tensor", + FutureWarning, + ) + image = torch.cat(image, axis=0) + + if not is_valid_image_imagelist(image): + raise ValueError( + f"Input is in incorrect format. Currently, we only support {', '.join(str(x) for x in supported_formats)}" + ) + if not isinstance(image, list): + image = [image] + + if isinstance(image[0], PIL.Image.Image): + if crops_coords is not None: + image = [i.crop(crops_coords) for i in image] + if self.config.do_resize: + height, width = self.get_default_height_width(image[0], height, width) + image = [self.resize(i, height, width, resize_mode=resize_mode) for i in image] + if self.config.do_convert_rgb: + image = [self.convert_to_rgb(i) for i in image] + elif self.config.do_convert_grayscale: + image = [self.convert_to_grayscale(i) for i in image] + image = self.pil_to_numpy(image) # to np + image = self.numpy_to_pt(image) # to pt + + elif isinstance(image[0], np.ndarray): + image = np.concatenate(image, axis=0) if image[0].ndim == 4 else np.stack(image, axis=0) + + image = self.numpy_to_pt(image) + + height, width = self.get_default_height_width(image, height, width) + if self.config.do_resize: + image = self.resize(image, height, width) + + elif isinstance(image[0], torch.Tensor): + image = torch.cat(image, axis=0) if image[0].ndim == 4 else torch.stack(image, axis=0) + + if self.config.do_convert_grayscale and image.ndim == 3: + image = image.unsqueeze(1) + + channel = image.shape[1] + # don't need any preprocess if the image is latents + if channel == self.config.vae_latent_channels: + return image + + height, width = self.get_default_height_width(image, height, width) + if self.config.do_resize: + image = self.resize(image, height, width) + + # expected range [0,1], normalize to [-1,1] + do_normalize = self.config.do_normalize + if do_normalize and image.min() < 0: + warnings.warn( + "Passing `image` as torch tensor with value range in [-1,1] is deprecated. The expected value range for image tensor is [0,1] " + f"when passing as pytorch tensor or numpy Array. You passed `image` with value range [{image.min()},{image.max()}]", + FutureWarning, + ) + do_normalize = False + if do_normalize: + image = self.normalize(image) + + if self.config.do_binarize: + image = self.binarize(image) + + return image + + def postprocess( + self, + image: torch.Tensor, + output_type: str = "pil", + do_denormalize: Optional[List[bool]] = None, + ) -> Union[PIL.Image.Image, np.ndarray, torch.Tensor]: + """ + Postprocess the image output from tensor to `output_type`. + + Args: + image (`torch.Tensor`): + The image input, should be a pytorch tensor with shape `B x C x H x W`. + output_type (`str`, *optional*, defaults to `pil`): + The output type of the image, can be one of `pil`, `np`, `pt`, `latent`. + do_denormalize (`List[bool]`, *optional*, defaults to `None`): + Whether to denormalize the image to [0,1]. If `None`, will use the value of `do_normalize` in the + `VaeImageProcessor` config. + + Returns: + `PIL.Image.Image`, `np.ndarray` or `torch.Tensor`: + The postprocessed image. + """ + if not isinstance(image, torch.Tensor): + raise ValueError( + f"Input for postprocessing is in incorrect format: {type(image)}. We only support pytorch tensor" + ) + if output_type not in ["latent", "pt", "np", "pil"]: + deprecation_message = ( + f"the output_type {output_type} is outdated and has been set to `np`. Please make sure to set it to one of these instead: " + "`pil`, `np`, `pt`, `latent`" + ) + deprecate("Unsupported output_type", "1.0.0", deprecation_message, standard_warn=False) + output_type = "np" + + if output_type == "latent": + return image + + if do_denormalize is None: + do_denormalize = [self.config.do_normalize] * image.shape[0] + + image = torch.stack( + [self.denormalize(image[i]) if do_denormalize[i] else image[i] for i in range(image.shape[0])] + ) + + if output_type == "pt": + return image + + image = self.pt_to_numpy(image) + + if output_type == "np": + return image + + if output_type == "pil": + return self.numpy_to_pil(image) + + def apply_overlay( + self, + mask: PIL.Image.Image, + init_image: PIL.Image.Image, + image: PIL.Image.Image, + crop_coords: Optional[Tuple[int, int, int, int]] = None, + ) -> PIL.Image.Image: + """ + overlay the inpaint output to the original image + """ + + width, height = image.width, image.height + + init_image = self.resize(init_image, width=width, height=height) + mask = self.resize(mask, width=width, height=height) + + init_image_masked = PIL.Image.new("RGBa", (width, height)) + init_image_masked.paste(init_image.convert("RGBA").convert("RGBa"), mask=ImageOps.invert(mask.convert("L"))) + init_image_masked = init_image_masked.convert("RGBA") + + if crop_coords is not None: + x, y, x2, y2 = crop_coords + w = x2 - x + h = y2 - y + base_image = PIL.Image.new("RGBA", (width, height)) + image = self.resize(image, height=h, width=w, resize_mode="crop") + base_image.paste(image, (x, y)) + image = base_image.convert("RGB") + + image = image.convert("RGBA") + image.alpha_composite(init_image_masked) + image = image.convert("RGB") + + return image + + +class VaeImageProcessorLDM3D(VaeImageProcessor): + """ + Image processor for VAE LDM3D. + + Args: + do_resize (`bool`, *optional*, defaults to `True`): + Whether to downscale the image's (height, width) dimensions to multiples of `vae_scale_factor`. + vae_scale_factor (`int`, *optional*, defaults to `8`): + VAE scale factor. If `do_resize` is `True`, the image is automatically resized to multiples of this factor. + resample (`str`, *optional*, defaults to `lanczos`): + Resampling filter to use when resizing the image. + do_normalize (`bool`, *optional*, defaults to `True`): + Whether to normalize the image to [-1,1]. + """ + + config_name = CONFIG_NAME + + @register_to_config + def __init__( + self, + do_resize: bool = True, + vae_scale_factor: int = 8, + resample: str = "lanczos", + do_normalize: bool = True, + ): + super().__init__() + + @staticmethod + def numpy_to_pil(images: np.ndarray) -> List[PIL.Image.Image]: + """ + Convert a NumPy image or a batch of images to a PIL image. + """ + if images.ndim == 3: + images = images[None, ...] + images = (images * 255).round().astype("uint8") + if images.shape[-1] == 1: + # special case for grayscale (single channel) images + pil_images = [Image.fromarray(image.squeeze(), mode="L") for image in images] + else: + pil_images = [Image.fromarray(image[:, :, :3]) for image in images] + + return pil_images + + @staticmethod + def depth_pil_to_numpy(images: Union[List[PIL.Image.Image], PIL.Image.Image]) -> np.ndarray: + """ + Convert a PIL image or a list of PIL images to NumPy arrays. + """ + if not isinstance(images, list): + images = [images] + + images = [np.array(image).astype(np.float32) / (2**16 - 1) for image in images] + images = np.stack(images, axis=0) + return images + + @staticmethod + def rgblike_to_depthmap(image: Union[np.ndarray, torch.Tensor]) -> Union[np.ndarray, torch.Tensor]: + """ + Args: + image: RGB-like depth image + + Returns: depth map + + """ + return image[:, :, 1] * 2**8 + image[:, :, 2] + + def numpy_to_depth(self, images: np.ndarray) -> List[PIL.Image.Image]: + """ + Convert a NumPy depth image or a batch of images to a PIL image. + """ + if images.ndim == 3: + images = images[None, ...] + images_depth = images[:, :, :, 3:] + if images.shape[-1] == 6: + images_depth = (images_depth * 255).round().astype("uint8") + pil_images = [ + Image.fromarray(self.rgblike_to_depthmap(image_depth), mode="I;16") for image_depth in images_depth + ] + elif images.shape[-1] == 4: + images_depth = (images_depth * 65535.0).astype(np.uint16) + pil_images = [Image.fromarray(image_depth, mode="I;16") for image_depth in images_depth] + else: + raise Exception("Not supported") + + return pil_images + + def postprocess( + self, + image: torch.Tensor, + output_type: str = "pil", + do_denormalize: Optional[List[bool]] = None, + ) -> Union[PIL.Image.Image, np.ndarray, torch.Tensor]: + """ + Postprocess the image output from tensor to `output_type`. + + Args: + image (`torch.Tensor`): + The image input, should be a pytorch tensor with shape `B x C x H x W`. + output_type (`str`, *optional*, defaults to `pil`): + The output type of the image, can be one of `pil`, `np`, `pt`, `latent`. + do_denormalize (`List[bool]`, *optional*, defaults to `None`): + Whether to denormalize the image to [0,1]. If `None`, will use the value of `do_normalize` in the + `VaeImageProcessor` config. + + Returns: + `PIL.Image.Image`, `np.ndarray` or `torch.Tensor`: + The postprocessed image. + """ + if not isinstance(image, torch.Tensor): + raise ValueError( + f"Input for postprocessing is in incorrect format: {type(image)}. We only support pytorch tensor" + ) + if output_type not in ["latent", "pt", "np", "pil"]: + deprecation_message = ( + f"the output_type {output_type} is outdated and has been set to `np`. Please make sure to set it to one of these instead: " + "`pil`, `np`, `pt`, `latent`" + ) + deprecate("Unsupported output_type", "1.0.0", deprecation_message, standard_warn=False) + output_type = "np" + + if do_denormalize is None: + do_denormalize = [self.config.do_normalize] * image.shape[0] + + image = torch.stack( + [self.denormalize(image[i]) if do_denormalize[i] else image[i] for i in range(image.shape[0])] + ) + + image = self.pt_to_numpy(image) + + if output_type == "np": + if image.shape[-1] == 6: + image_depth = np.stack([self.rgblike_to_depthmap(im[:, :, 3:]) for im in image], axis=0) + else: + image_depth = image[:, :, :, 3:] + return image[:, :, :, :3], image_depth + + if output_type == "pil": + return self.numpy_to_pil(image), self.numpy_to_depth(image) + else: + raise Exception(f"This type {output_type} is not supported") + + def preprocess( + self, + rgb: Union[torch.Tensor, PIL.Image.Image, np.ndarray], + depth: Union[torch.Tensor, PIL.Image.Image, np.ndarray], + height: Optional[int] = None, + width: Optional[int] = None, + target_res: Optional[int] = None, + ) -> torch.Tensor: + """ + Preprocess the image input. Accepted formats are PIL images, NumPy arrays or PyTorch tensors. + """ + supported_formats = (PIL.Image.Image, np.ndarray, torch.Tensor) + + # Expand the missing dimension for 3-dimensional pytorch tensor or numpy array that represents grayscale image + if self.config.do_convert_grayscale and isinstance(rgb, (torch.Tensor, np.ndarray)) and rgb.ndim == 3: + raise Exception("This is not yet supported") + + if isinstance(rgb, supported_formats): + rgb = [rgb] + depth = [depth] + elif not (isinstance(rgb, list) and all(isinstance(i, supported_formats) for i in rgb)): + raise ValueError( + f"Input is in incorrect format: {[type(i) for i in rgb]}. Currently, we only support {', '.join(supported_formats)}" + ) + + if isinstance(rgb[0], PIL.Image.Image): + if self.config.do_convert_rgb: + raise Exception("This is not yet supported") + # rgb = [self.convert_to_rgb(i) for i in rgb] + # depth = [self.convert_to_depth(i) for i in depth] #TODO define convert_to_depth + if self.config.do_resize or target_res: + height, width = self.get_default_height_width(rgb[0], height, width) if not target_res else target_res + rgb = [self.resize(i, height, width) for i in rgb] + depth = [self.resize(i, height, width) for i in depth] + rgb = self.pil_to_numpy(rgb) # to np + rgb = self.numpy_to_pt(rgb) # to pt + + depth = self.depth_pil_to_numpy(depth) # to np + depth = self.numpy_to_pt(depth) # to pt + + elif isinstance(rgb[0], np.ndarray): + rgb = np.concatenate(rgb, axis=0) if rgb[0].ndim == 4 else np.stack(rgb, axis=0) + rgb = self.numpy_to_pt(rgb) + height, width = self.get_default_height_width(rgb, height, width) + if self.config.do_resize: + rgb = self.resize(rgb, height, width) + + depth = np.concatenate(depth, axis=0) if rgb[0].ndim == 4 else np.stack(depth, axis=0) + depth = self.numpy_to_pt(depth) + height, width = self.get_default_height_width(depth, height, width) + if self.config.do_resize: + depth = self.resize(depth, height, width) + + elif isinstance(rgb[0], torch.Tensor): + raise Exception("This is not yet supported") + # rgb = torch.cat(rgb, axis=0) if rgb[0].ndim == 4 else torch.stack(rgb, axis=0) + + # if self.config.do_convert_grayscale and rgb.ndim == 3: + # rgb = rgb.unsqueeze(1) + + # channel = rgb.shape[1] + + # height, width = self.get_default_height_width(rgb, height, width) + # if self.config.do_resize: + # rgb = self.resize(rgb, height, width) + + # depth = torch.cat(depth, axis=0) if depth[0].ndim == 4 else torch.stack(depth, axis=0) + + # if self.config.do_convert_grayscale and depth.ndim == 3: + # depth = depth.unsqueeze(1) + + # channel = depth.shape[1] + # # don't need any preprocess if the image is latents + # if depth == 4: + # return rgb, depth + + # height, width = self.get_default_height_width(depth, height, width) + # if self.config.do_resize: + # depth = self.resize(depth, height, width) + # expected range [0,1], normalize to [-1,1] + do_normalize = self.config.do_normalize + if rgb.min() < 0 and do_normalize: + warnings.warn( + "Passing `image` as torch tensor with value range in [-1,1] is deprecated. The expected value range for image tensor is [0,1] " + f"when passing as pytorch tensor or numpy Array. You passed `image` with value range [{rgb.min()},{rgb.max()}]", + FutureWarning, + ) + do_normalize = False + + if do_normalize: + rgb = self.normalize(rgb) + depth = self.normalize(depth) + + if self.config.do_binarize: + rgb = self.binarize(rgb) + depth = self.binarize(depth) + + return rgb, depth + + +class IPAdapterMaskProcessor(VaeImageProcessor): + """ + Image processor for IP Adapter image masks. + + Args: + do_resize (`bool`, *optional*, defaults to `True`): + Whether to downscale the image's (height, width) dimensions to multiples of `vae_scale_factor`. + vae_scale_factor (`int`, *optional*, defaults to `8`): + VAE scale factor. If `do_resize` is `True`, the image is automatically resized to multiples of this factor. + resample (`str`, *optional*, defaults to `lanczos`): + Resampling filter to use when resizing the image. + do_normalize (`bool`, *optional*, defaults to `False`): + Whether to normalize the image to [-1,1]. + do_binarize (`bool`, *optional*, defaults to `True`): + Whether to binarize the image to 0/1. + do_convert_grayscale (`bool`, *optional*, defaults to be `True`): + Whether to convert the images to grayscale format. + + """ + + config_name = CONFIG_NAME + + @register_to_config + def __init__( + self, + do_resize: bool = True, + vae_scale_factor: int = 8, + resample: str = "lanczos", + do_normalize: bool = False, + do_binarize: bool = True, + do_convert_grayscale: bool = True, + ): + super().__init__( + do_resize=do_resize, + vae_scale_factor=vae_scale_factor, + resample=resample, + do_normalize=do_normalize, + do_binarize=do_binarize, + do_convert_grayscale=do_convert_grayscale, + ) + + @staticmethod + def downsample(mask: torch.Tensor, batch_size: int, num_queries: int, value_embed_dim: int): + """ + Downsamples the provided mask tensor to match the expected dimensions for scaled dot-product attention. If the + aspect ratio of the mask does not match the aspect ratio of the output image, a warning is issued. + + Args: + mask (`torch.Tensor`): + The input mask tensor generated with `IPAdapterMaskProcessor.preprocess()`. + batch_size (`int`): + The batch size. + num_queries (`int`): + The number of queries. + value_embed_dim (`int`): + The dimensionality of the value embeddings. + + Returns: + `torch.Tensor`: + The downsampled mask tensor. + + """ + o_h = mask.shape[1] + o_w = mask.shape[2] + ratio = o_w / o_h + mask_h = int(math.sqrt(num_queries / ratio)) + mask_h = int(mask_h) + int((num_queries % int(mask_h)) != 0) + mask_w = num_queries // mask_h + + mask_downsample = F.interpolate(mask.unsqueeze(0), size=(mask_h, mask_w), mode="bicubic").squeeze(0) + + # Repeat batch_size times + if mask_downsample.shape[0] < batch_size: + mask_downsample = mask_downsample.repeat(batch_size, 1, 1) + + mask_downsample = mask_downsample.view(mask_downsample.shape[0], -1) + + downsampled_area = mask_h * mask_w + # If the output image and the mask do not have the same aspect ratio, tensor shapes will not match + # Pad tensor if downsampled_mask.shape[1] is smaller than num_queries + if downsampled_area < num_queries: + warnings.warn( + "The aspect ratio of the mask does not match the aspect ratio of the output image. " + "Please update your masks or adjust the output size for optimal performance.", + UserWarning, + ) + mask_downsample = F.pad(mask_downsample, (0, num_queries - mask_downsample.shape[1]), value=0.0) + # Discard last embeddings if downsampled_mask.shape[1] is bigger than num_queries + if downsampled_area > num_queries: + warnings.warn( + "The aspect ratio of the mask does not match the aspect ratio of the output image. " + "Please update your masks or adjust the output size for optimal performance.", + UserWarning, + ) + mask_downsample = mask_downsample[:, :num_queries] + + # Repeat last dimension to match SDPA output shape + mask_downsample = mask_downsample.view(mask_downsample.shape[0], mask_downsample.shape[1], 1).repeat( + 1, 1, value_embed_dim + ) + + return mask_downsample + + +class PixArtImageProcessor(VaeImageProcessor): + """ + Image processor for PixArt image resize and crop. + + Args: + do_resize (`bool`, *optional*, defaults to `True`): + Whether to downscale the image's (height, width) dimensions to multiples of `vae_scale_factor`. Can accept + `height` and `width` arguments from [`image_processor.VaeImageProcessor.preprocess`] method. + vae_scale_factor (`int`, *optional*, defaults to `8`): + VAE scale factor. If `do_resize` is `True`, the image is automatically resized to multiples of this factor. + resample (`str`, *optional*, defaults to `lanczos`): + Resampling filter to use when resizing the image. + do_normalize (`bool`, *optional*, defaults to `True`): + Whether to normalize the image to [-1,1]. + do_binarize (`bool`, *optional*, defaults to `False`): + Whether to binarize the image to 0/1. + do_convert_rgb (`bool`, *optional*, defaults to be `False`): + Whether to convert the images to RGB format. + do_convert_grayscale (`bool`, *optional*, defaults to be `False`): + Whether to convert the images to grayscale format. + """ + + @register_to_config + def __init__( + self, + do_resize: bool = True, + vae_scale_factor: int = 8, + resample: str = "lanczos", + do_normalize: bool = True, + do_binarize: bool = False, + do_convert_grayscale: bool = False, + ): + super().__init__( + do_resize=do_resize, + vae_scale_factor=vae_scale_factor, + resample=resample, + do_normalize=do_normalize, + do_binarize=do_binarize, + do_convert_grayscale=do_convert_grayscale, + ) + + @staticmethod + def classify_height_width_bin(height: int, width: int, ratios: dict) -> Tuple[int, int]: + """Returns binned height and width.""" + ar = float(height / width) + closest_ratio = min(ratios.keys(), key=lambda ratio: abs(float(ratio) - ar)) + default_hw = ratios[closest_ratio] + return int(default_hw[0]), int(default_hw[1]) + + @staticmethod + def resize_and_crop_tensor(samples: torch.Tensor, new_width: int, new_height: int) -> torch.Tensor: + orig_height, orig_width = samples.shape[2], samples.shape[3] + + # Check if resizing is needed + if orig_height != new_height or orig_width != new_width: + ratio = max(new_height / orig_height, new_width / orig_width) + resized_width = int(orig_width * ratio) + resized_height = int(orig_height * ratio) + + # Resize + samples = F.interpolate( + samples, size=(resized_height, resized_width), mode="bilinear", align_corners=False + ) + + # Center Crop + start_x = (resized_width - new_width) // 2 + end_x = start_x + new_width + start_y = (resized_height - new_height) // 2 + end_y = start_y + new_height + samples = samples[:, :, start_y:end_y, start_x:end_x] + + return samples diff --git a/diffusers3/loaders/__init__.py b/diffusers3/loaders/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..bccd37ddc42fec07ab2fa34798b060c56683aec1 --- /dev/null +++ b/diffusers3/loaders/__init__.py @@ -0,0 +1,100 @@ +from typing import TYPE_CHECKING + +from ..utils import DIFFUSERS_SLOW_IMPORT, _LazyModule, deprecate +from ..utils.import_utils import is_peft_available, is_torch_available, is_transformers_available + + +def text_encoder_lora_state_dict(text_encoder): + deprecate( + "text_encoder_load_state_dict in `models`", + "0.27.0", + "`text_encoder_lora_state_dict` is deprecated and will be removed in 0.27.0. Make sure to retrieve the weights using `get_peft_model`. See https://huggingface.co/docs/peft/v0.6.2/en/quicktour#peftmodel for more information.", + ) + state_dict = {} + + for name, module in text_encoder_attn_modules(text_encoder): + for k, v in module.q_proj.lora_linear_layer.state_dict().items(): + state_dict[f"{name}.q_proj.lora_linear_layer.{k}"] = v + + for k, v in module.k_proj.lora_linear_layer.state_dict().items(): + state_dict[f"{name}.k_proj.lora_linear_layer.{k}"] = v + + for k, v in module.v_proj.lora_linear_layer.state_dict().items(): + state_dict[f"{name}.v_proj.lora_linear_layer.{k}"] = v + + for k, v in module.out_proj.lora_linear_layer.state_dict().items(): + state_dict[f"{name}.out_proj.lora_linear_layer.{k}"] = v + + return state_dict + + +if is_transformers_available(): + + def text_encoder_attn_modules(text_encoder): + deprecate( + "text_encoder_attn_modules in `models`", + "0.27.0", + "`text_encoder_lora_state_dict` is deprecated and will be removed in 0.27.0. Make sure to retrieve the weights using `get_peft_model`. See https://huggingface.co/docs/peft/v0.6.2/en/quicktour#peftmodel for more information.", + ) + from transformers import CLIPTextModel, CLIPTextModelWithProjection + + attn_modules = [] + + if isinstance(text_encoder, (CLIPTextModel, CLIPTextModelWithProjection)): + for i, layer in enumerate(text_encoder.text_model.encoder.layers): + name = f"text_model.encoder.layers.{i}.self_attn" + mod = layer.self_attn + attn_modules.append((name, mod)) + else: + raise ValueError(f"do not know how to get attention modules for: {text_encoder.__class__.__name__}") + + return attn_modules + + +_import_structure = {} + +if is_torch_available(): + _import_structure["single_file_model"] = ["FromOriginalModelMixin"] + + _import_structure["unet"] = ["UNet2DConditionLoadersMixin"] + _import_structure["utils"] = ["AttnProcsLayers"] + if is_transformers_available(): + _import_structure["single_file"] = ["FromSingleFileMixin"] + _import_structure["lora_pipeline"] = [ + "AmusedLoraLoaderMixin", + "StableDiffusionLoraLoaderMixin", + "SD3LoraLoaderMixin", + "StableDiffusionXLLoraLoaderMixin", + "LoraLoaderMixin", + "FluxLoraLoaderMixin", + ] + _import_structure["textual_inversion"] = ["TextualInversionLoaderMixin"] + _import_structure["ip_adapter"] = ["IPAdapterMixin"] + +_import_structure["peft"] = ["PeftAdapterMixin"] + + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + if is_torch_available(): + from .single_file_model import FromOriginalModelMixin + from .unet import UNet2DConditionLoadersMixin + from .utils import AttnProcsLayers + + if is_transformers_available(): + from .ip_adapter import IPAdapterMixin + from .lora_pipeline import ( + AmusedLoraLoaderMixin, + FluxLoraLoaderMixin, + LoraLoaderMixin, + SD3LoraLoaderMixin, + StableDiffusionLoraLoaderMixin, + StableDiffusionXLLoraLoaderMixin, + ) + from .single_file import FromSingleFileMixin + from .textual_inversion import TextualInversionLoaderMixin + + from .peft import PeftAdapterMixin +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/diffusers3/loaders/__pycache__/__init__.cpython-310.pyc b/diffusers3/loaders/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7ac2a6d9d5fe4071563bd493a5fc291359f5c1bf Binary files /dev/null and b/diffusers3/loaders/__pycache__/__init__.cpython-310.pyc differ diff --git a/diffusers3/loaders/__pycache__/__init__.cpython-38.pyc b/diffusers3/loaders/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1cc8bab376abee22cb043c0d3e6428e0788bcb7d Binary files /dev/null and b/diffusers3/loaders/__pycache__/__init__.cpython-38.pyc differ diff --git a/diffusers3/loaders/__pycache__/ip_adapter.cpython-310.pyc b/diffusers3/loaders/__pycache__/ip_adapter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f1cf342136e86cb75426fdf5be562b2f73db11f3 Binary files /dev/null and b/diffusers3/loaders/__pycache__/ip_adapter.cpython-310.pyc differ diff --git a/diffusers3/loaders/__pycache__/ip_adapter.cpython-38.pyc b/diffusers3/loaders/__pycache__/ip_adapter.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3ba933d89b275e99aae6294e434a3efd6cf6c7d8 Binary files /dev/null and b/diffusers3/loaders/__pycache__/ip_adapter.cpython-38.pyc differ diff --git a/diffusers3/loaders/__pycache__/lora_base.cpython-310.pyc b/diffusers3/loaders/__pycache__/lora_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..251fdeb282d24f3fc62765d665d11b274e178cd5 Binary files /dev/null and b/diffusers3/loaders/__pycache__/lora_base.cpython-310.pyc differ diff --git a/diffusers3/loaders/__pycache__/lora_base.cpython-38.pyc b/diffusers3/loaders/__pycache__/lora_base.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..90fc610a2f1b2cec061b9c6f359422a8335a5c0f Binary files /dev/null and b/diffusers3/loaders/__pycache__/lora_base.cpython-38.pyc differ diff --git a/diffusers3/loaders/__pycache__/lora_conversion_utils.cpython-310.pyc b/diffusers3/loaders/__pycache__/lora_conversion_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0e85ba214d91b448b036b1c6b1c7d05dec892a59 Binary files /dev/null and b/diffusers3/loaders/__pycache__/lora_conversion_utils.cpython-310.pyc differ diff --git a/diffusers3/loaders/__pycache__/lora_conversion_utils.cpython-38.pyc b/diffusers3/loaders/__pycache__/lora_conversion_utils.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1cd6d429aab0640383d08f4eec52ed4286a7910b Binary files /dev/null and b/diffusers3/loaders/__pycache__/lora_conversion_utils.cpython-38.pyc differ diff --git a/diffusers3/loaders/__pycache__/lora_pipeline.cpython-310.pyc b/diffusers3/loaders/__pycache__/lora_pipeline.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..178bc176b2be14d4624195ad5b52a154faf58437 Binary files /dev/null and b/diffusers3/loaders/__pycache__/lora_pipeline.cpython-310.pyc differ diff --git a/diffusers3/loaders/__pycache__/lora_pipeline.cpython-38.pyc b/diffusers3/loaders/__pycache__/lora_pipeline.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..75a51f6898d779cdc52bf452e4de96b18f627282 Binary files /dev/null and b/diffusers3/loaders/__pycache__/lora_pipeline.cpython-38.pyc differ diff --git a/diffusers3/loaders/__pycache__/peft.cpython-310.pyc b/diffusers3/loaders/__pycache__/peft.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..17a7bbdb3b7ab281a7c71e6763a74e4cb2928a02 Binary files /dev/null and b/diffusers3/loaders/__pycache__/peft.cpython-310.pyc differ diff --git a/diffusers3/loaders/__pycache__/peft.cpython-38.pyc b/diffusers3/loaders/__pycache__/peft.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3dff22eea21687b9fd010a5ecbe38ac6eb19375d Binary files /dev/null and b/diffusers3/loaders/__pycache__/peft.cpython-38.pyc differ diff --git a/diffusers3/loaders/__pycache__/single_file.cpython-310.pyc b/diffusers3/loaders/__pycache__/single_file.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..56c9edadc925ca7511db206380e1d0feb8f1834e Binary files /dev/null and b/diffusers3/loaders/__pycache__/single_file.cpython-310.pyc differ diff --git a/diffusers3/loaders/__pycache__/single_file.cpython-38.pyc b/diffusers3/loaders/__pycache__/single_file.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9c61b6f452b40d95235f35f7beb5e38cfd72d01f Binary files /dev/null and b/diffusers3/loaders/__pycache__/single_file.cpython-38.pyc differ diff --git a/diffusers3/loaders/__pycache__/single_file_model.cpython-310.pyc b/diffusers3/loaders/__pycache__/single_file_model.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..618cd8055ba9ec8ab799dcfe68e44d3aa5682470 Binary files /dev/null and b/diffusers3/loaders/__pycache__/single_file_model.cpython-310.pyc differ diff --git a/diffusers3/loaders/__pycache__/single_file_model.cpython-38.pyc b/diffusers3/loaders/__pycache__/single_file_model.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e31bb769d41daf61e2f89cb1527a33ea39622453 Binary files /dev/null and b/diffusers3/loaders/__pycache__/single_file_model.cpython-38.pyc differ diff --git a/diffusers3/loaders/__pycache__/single_file_utils.cpython-310.pyc b/diffusers3/loaders/__pycache__/single_file_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..19800cf244cb38d92c7afdf5732f2dbc591d59a8 Binary files /dev/null and b/diffusers3/loaders/__pycache__/single_file_utils.cpython-310.pyc differ diff --git a/diffusers3/loaders/__pycache__/single_file_utils.cpython-38.pyc b/diffusers3/loaders/__pycache__/single_file_utils.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b7dbcead2b2aa52dd522bdf38d430708bb00011a Binary files /dev/null and b/diffusers3/loaders/__pycache__/single_file_utils.cpython-38.pyc differ diff --git a/diffusers3/loaders/__pycache__/textual_inversion.cpython-310.pyc b/diffusers3/loaders/__pycache__/textual_inversion.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ec4ce55f77985960c50772b23a868950c171dc76 Binary files /dev/null and b/diffusers3/loaders/__pycache__/textual_inversion.cpython-310.pyc differ diff --git a/diffusers3/loaders/__pycache__/textual_inversion.cpython-38.pyc b/diffusers3/loaders/__pycache__/textual_inversion.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9c91f68458f6ae1a640f950ba8900d0f7267a26b Binary files /dev/null and b/diffusers3/loaders/__pycache__/textual_inversion.cpython-38.pyc differ diff --git a/diffusers3/loaders/__pycache__/unet.cpython-310.pyc b/diffusers3/loaders/__pycache__/unet.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c605357deb09c274c0906d71af0507a0b6f39b1f Binary files /dev/null and b/diffusers3/loaders/__pycache__/unet.cpython-310.pyc differ diff --git a/diffusers3/loaders/__pycache__/unet.cpython-38.pyc b/diffusers3/loaders/__pycache__/unet.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..51cfe17dc5a5c01d7abd57c19fcc9635b0b29ee0 Binary files /dev/null and b/diffusers3/loaders/__pycache__/unet.cpython-38.pyc differ diff --git a/diffusers3/loaders/__pycache__/unet_loader_utils.cpython-310.pyc b/diffusers3/loaders/__pycache__/unet_loader_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1b977d7210c8118b280f7f96eff206c88a97e703 Binary files /dev/null and b/diffusers3/loaders/__pycache__/unet_loader_utils.cpython-310.pyc differ diff --git a/diffusers3/loaders/__pycache__/unet_loader_utils.cpython-38.pyc b/diffusers3/loaders/__pycache__/unet_loader_utils.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a834a0236ae7e2cdbae147216d51382cf2ca5913 Binary files /dev/null and b/diffusers3/loaders/__pycache__/unet_loader_utils.cpython-38.pyc differ diff --git a/diffusers3/loaders/__pycache__/utils.cpython-310.pyc b/diffusers3/loaders/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7540794cb4427ed669efad64b4b33a3ff3fd4659 Binary files /dev/null and b/diffusers3/loaders/__pycache__/utils.cpython-310.pyc differ diff --git a/diffusers3/loaders/__pycache__/utils.cpython-38.pyc b/diffusers3/loaders/__pycache__/utils.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8407948b52801ac7b278f02a8d5467e12be76bbb Binary files /dev/null and b/diffusers3/loaders/__pycache__/utils.cpython-38.pyc differ diff --git a/diffusers3/loaders/ip_adapter.py b/diffusers3/loaders/ip_adapter.py new file mode 100644 index 0000000000000000000000000000000000000000..1006dab9e4b97d98903b7bdb40a5e152b9134824 --- /dev/null +++ b/diffusers3/loaders/ip_adapter.py @@ -0,0 +1,348 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from pathlib import Path +from typing import Dict, List, Optional, Union + +import torch +import torch.nn.functional as F +from huggingface_hub.utils import validate_hf_hub_args +from safetensors import safe_open + +from ..models.modeling_utils import _LOW_CPU_MEM_USAGE_DEFAULT, load_state_dict +from ..utils import ( + USE_PEFT_BACKEND, + _get_model_file, + is_accelerate_available, + is_torch_version, + is_transformers_available, + logging, +) +from .unet_loader_utils import _maybe_expand_lora_scales + + +if is_transformers_available(): + from transformers import ( + CLIPImageProcessor, + CLIPVisionModelWithProjection, + ) + + from ..models.attention_processor import ( + AttnProcessor, + AttnProcessor2_0, + IPAdapterAttnProcessor, + IPAdapterAttnProcessor2_0, + ) + +logger = logging.get_logger(__name__) + + +class IPAdapterMixin: + """Mixin for handling IP Adapters.""" + + @validate_hf_hub_args + def load_ip_adapter( + self, + pretrained_model_name_or_path_or_dict: Union[str, List[str], Dict[str, torch.Tensor]], + subfolder: Union[str, List[str]], + weight_name: Union[str, List[str]], + image_encoder_folder: Optional[str] = "image_encoder", + **kwargs, + ): + """ + Parameters: + pretrained_model_name_or_path_or_dict (`str` or `List[str]` or `os.PathLike` or `List[os.PathLike]` or `dict` or `List[dict]`): + Can be either: + + - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on + the Hub. + - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved + with [`ModelMixin.save_pretrained`]. + - A [torch state + dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict). + subfolder (`str` or `List[str]`): + The subfolder location of a model file within a larger model repository on the Hub or locally. If a + list is passed, it should have the same length as `weight_name`. + weight_name (`str` or `List[str]`): + The name of the weight file to load. If a list is passed, it should have the same length as + `weight_name`. + image_encoder_folder (`str`, *optional*, defaults to `image_encoder`): + The subfolder location of the image encoder within a larger model repository on the Hub or locally. + Pass `None` to not load the image encoder. If the image encoder is located in a folder inside + `subfolder`, you only need to pass the name of the folder that contains image encoder weights, e.g. + `image_encoder_folder="image_encoder"`. If the image encoder is located in a folder other than + `subfolder`, you should pass the path to the folder that contains image encoder weights, for example, + `image_encoder_folder="different_subfolder/image_encoder"`. + cache_dir (`Union[str, os.PathLike]`, *optional*): + Path to a directory where a downloaded pretrained model configuration is cached if the standard cache + is not used. + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force the (re-)download of the model weights and configuration files, overriding the + cached versions if they exist. + + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. + local_files_only (`bool`, *optional*, defaults to `False`): + Whether to only load local model weights and configuration files or not. If set to `True`, the model + won't be downloaded from the Hub. + token (`str` or *bool*, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from + `diffusers-cli login` (stored in `~/.huggingface`) is used. + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier + allowed by Git. + low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`): + Speed up model loading only loading the pretrained weights and not initializing the weights. This also + tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model. + Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this + argument to `True` will raise an error. + """ + + # handle the list inputs for multiple IP Adapters + if not isinstance(weight_name, list): + weight_name = [weight_name] + + if not isinstance(pretrained_model_name_or_path_or_dict, list): + pretrained_model_name_or_path_or_dict = [pretrained_model_name_or_path_or_dict] + if len(pretrained_model_name_or_path_or_dict) == 1: + pretrained_model_name_or_path_or_dict = pretrained_model_name_or_path_or_dict * len(weight_name) + + if not isinstance(subfolder, list): + subfolder = [subfolder] + if len(subfolder) == 1: + subfolder = subfolder * len(weight_name) + + if len(weight_name) != len(pretrained_model_name_or_path_or_dict): + raise ValueError("`weight_name` and `pretrained_model_name_or_path_or_dict` must have the same length.") + + if len(weight_name) != len(subfolder): + raise ValueError("`weight_name` and `subfolder` must have the same length.") + + # Load the main state dict first. + cache_dir = kwargs.pop("cache_dir", None) + force_download = kwargs.pop("force_download", False) + proxies = kwargs.pop("proxies", None) + local_files_only = kwargs.pop("local_files_only", None) + token = kwargs.pop("token", None) + revision = kwargs.pop("revision", None) + low_cpu_mem_usage = kwargs.pop("low_cpu_mem_usage", _LOW_CPU_MEM_USAGE_DEFAULT) + + if low_cpu_mem_usage and not is_accelerate_available(): + low_cpu_mem_usage = False + logger.warning( + "Cannot initialize model with low cpu memory usage because `accelerate` was not found in the" + " environment. Defaulting to `low_cpu_mem_usage=False`. It is strongly recommended to install" + " `accelerate` for faster and less memory-intense model loading. You can do so with: \n```\npip" + " install accelerate\n```\n." + ) + + if low_cpu_mem_usage is True and not is_torch_version(">=", "1.9.0"): + raise NotImplementedError( + "Low memory initialization requires torch >= 1.9.0. Please either update your PyTorch version or set" + " `low_cpu_mem_usage=False`." + ) + + user_agent = { + "file_type": "attn_procs_weights", + "framework": "pytorch", + } + state_dicts = [] + for pretrained_model_name_or_path_or_dict, weight_name, subfolder in zip( + pretrained_model_name_or_path_or_dict, weight_name, subfolder + ): + if not isinstance(pretrained_model_name_or_path_or_dict, dict): + model_file = _get_model_file( + pretrained_model_name_or_path_or_dict, + weights_name=weight_name, + cache_dir=cache_dir, + force_download=force_download, + proxies=proxies, + local_files_only=local_files_only, + token=token, + revision=revision, + subfolder=subfolder, + user_agent=user_agent, + ) + if weight_name.endswith(".safetensors"): + state_dict = {"image_proj": {}, "ip_adapter": {}} + with safe_open(model_file, framework="pt", device="cpu") as f: + for key in f.keys(): + if key.startswith("image_proj."): + state_dict["image_proj"][key.replace("image_proj.", "")] = f.get_tensor(key) + elif key.startswith("ip_adapter."): + state_dict["ip_adapter"][key.replace("ip_adapter.", "")] = f.get_tensor(key) + else: + state_dict = load_state_dict(model_file) + else: + state_dict = pretrained_model_name_or_path_or_dict + + keys = list(state_dict.keys()) + if keys != ["image_proj", "ip_adapter"]: + raise ValueError("Required keys are (`image_proj` and `ip_adapter`) missing from the state dict.") + + state_dicts.append(state_dict) + + # load CLIP image encoder here if it has not been registered to the pipeline yet + if hasattr(self, "image_encoder") and getattr(self, "image_encoder", None) is None: + if image_encoder_folder is not None: + if not isinstance(pretrained_model_name_or_path_or_dict, dict): + logger.info(f"loading image_encoder from {pretrained_model_name_or_path_or_dict}") + if image_encoder_folder.count("/") == 0: + image_encoder_subfolder = Path(subfolder, image_encoder_folder).as_posix() + else: + image_encoder_subfolder = Path(image_encoder_folder).as_posix() + + image_encoder = CLIPVisionModelWithProjection.from_pretrained( + pretrained_model_name_or_path_or_dict, + subfolder=image_encoder_subfolder, + low_cpu_mem_usage=low_cpu_mem_usage, + cache_dir=cache_dir, + local_files_only=local_files_only, + ).to(self.device, dtype=self.dtype) + self.register_modules(image_encoder=image_encoder) + else: + raise ValueError( + "`image_encoder` cannot be loaded because `pretrained_model_name_or_path_or_dict` is a state dict." + ) + else: + logger.warning( + "image_encoder is not loaded since `image_encoder_folder=None` passed. You will not be able to use `ip_adapter_image` when calling the pipeline with IP-Adapter." + "Use `ip_adapter_image_embeds` to pass pre-generated image embedding instead." + ) + + # create feature extractor if it has not been registered to the pipeline yet + if hasattr(self, "feature_extractor") and getattr(self, "feature_extractor", None) is None: + # FaceID IP adapters don't need the image encoder so it's not present, in this case we default to 224 + default_clip_size = 224 + clip_image_size = ( + self.image_encoder.config.image_size if self.image_encoder is not None else default_clip_size + ) + feature_extractor = CLIPImageProcessor(size=clip_image_size, crop_size=clip_image_size) + self.register_modules(feature_extractor=feature_extractor) + + # load ip-adapter into unet + unet = getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet + unet._load_ip_adapter_weights(state_dicts, low_cpu_mem_usage=low_cpu_mem_usage) + + extra_loras = unet._load_ip_adapter_loras(state_dicts) + if extra_loras != {}: + if not USE_PEFT_BACKEND: + logger.warning("PEFT backend is required to load these weights.") + else: + # apply the IP Adapter Face ID LoRA weights + peft_config = getattr(unet, "peft_config", {}) + for k, lora in extra_loras.items(): + if f"faceid_{k}" not in peft_config: + self.load_lora_weights(lora, adapter_name=f"faceid_{k}") + self.set_adapters([f"faceid_{k}"], adapter_weights=[1.0]) + + def set_ip_adapter_scale(self, scale): + """ + Set IP-Adapter scales per-transformer block. Input `scale` could be a single config or a list of configs for + granular control over each IP-Adapter behavior. A config can be a float or a dictionary. + + Example: + + ```py + # To use original IP-Adapter + scale = 1.0 + pipeline.set_ip_adapter_scale(scale) + + # To use style block only + scale = { + "up": {"block_0": [0.0, 1.0, 0.0]}, + } + pipeline.set_ip_adapter_scale(scale) + + # To use style+layout blocks + scale = { + "down": {"block_2": [0.0, 1.0]}, + "up": {"block_0": [0.0, 1.0, 0.0]}, + } + pipeline.set_ip_adapter_scale(scale) + + # To use style and layout from 2 reference images + scales = [{"down": {"block_2": [0.0, 1.0]}}, {"up": {"block_0": [0.0, 1.0, 0.0]}}] + pipeline.set_ip_adapter_scale(scales) + ``` + """ + unet = getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet + if not isinstance(scale, list): + scale = [scale] + scale_configs = _maybe_expand_lora_scales(unet, scale, default_scale=0.0) + + for attn_name, attn_processor in unet.attn_processors.items(): + if isinstance(attn_processor, (IPAdapterAttnProcessor, IPAdapterAttnProcessor2_0)): + if len(scale_configs) != len(attn_processor.scale): + raise ValueError( + f"Cannot assign {len(scale_configs)} scale_configs to " + f"{len(attn_processor.scale)} IP-Adapter." + ) + elif len(scale_configs) == 1: + scale_configs = scale_configs * len(attn_processor.scale) + for i, scale_config in enumerate(scale_configs): + if isinstance(scale_config, dict): + for k, s in scale_config.items(): + if attn_name.startswith(k): + attn_processor.scale[i] = s + else: + attn_processor.scale[i] = scale_config + + def unload_ip_adapter(self): + """ + Unloads the IP Adapter weights + + Examples: + + ```python + >>> # Assuming `pipeline` is already loaded with the IP Adapter weights. + >>> pipeline.unload_ip_adapter() + >>> ... + ``` + """ + # remove CLIP image encoder + if hasattr(self, "image_encoder") and getattr(self, "image_encoder", None) is not None: + self.image_encoder = None + self.register_to_config(image_encoder=[None, None]) + + # remove feature extractor only when safety_checker is None as safety_checker uses + # the feature_extractor later + if not hasattr(self, "safety_checker"): + if hasattr(self, "feature_extractor") and getattr(self, "feature_extractor", None) is not None: + self.feature_extractor = None + self.register_to_config(feature_extractor=[None, None]) + + # remove hidden encoder + self.unet.encoder_hid_proj = None + self.unet.config.encoder_hid_dim_type = None + + # Kolors: restore `encoder_hid_proj` with `text_encoder_hid_proj` + if hasattr(self.unet, "text_encoder_hid_proj") and self.unet.text_encoder_hid_proj is not None: + self.unet.encoder_hid_proj = self.unet.text_encoder_hid_proj + self.unet.text_encoder_hid_proj = None + self.unet.config.encoder_hid_dim_type = "text_proj" + + # restore original Unet attention processors layers + attn_procs = {} + for name, value in self.unet.attn_processors.items(): + attn_processor_class = ( + AttnProcessor2_0() if hasattr(F, "scaled_dot_product_attention") else AttnProcessor() + ) + attn_procs[name] = ( + attn_processor_class + if isinstance(value, (IPAdapterAttnProcessor, IPAdapterAttnProcessor2_0)) + else value.__class__() + ) + self.unet.set_attn_processor(attn_procs) diff --git a/diffusers3/loaders/lora_base.py b/diffusers3/loaders/lora_base.py new file mode 100644 index 0000000000000000000000000000000000000000..4b963270427bc87969b26a122e94c62813daba69 --- /dev/null +++ b/diffusers3/loaders/lora_base.py @@ -0,0 +1,752 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +import inspect +import os +from pathlib import Path +from typing import Callable, Dict, List, Optional, Union + +import safetensors +import torch +import torch.nn as nn +from huggingface_hub import model_info +from huggingface_hub.constants import HF_HUB_OFFLINE + +from ..models.modeling_utils import ModelMixin, load_state_dict +from ..utils import ( + USE_PEFT_BACKEND, + _get_model_file, + delete_adapter_layers, + deprecate, + is_accelerate_available, + is_peft_available, + is_transformers_available, + logging, + recurse_remove_peft_layers, + set_adapter_layers, + set_weights_and_activate_adapters, +) + + +if is_transformers_available(): + from transformers import PreTrainedModel + +if is_peft_available(): + from peft.tuners.tuners_utils import BaseTunerLayer + +if is_accelerate_available(): + from accelerate.hooks import AlignDevicesHook, CpuOffload, remove_hook_from_module + +logger = logging.get_logger(__name__) + + +def fuse_text_encoder_lora(text_encoder, lora_scale=1.0, safe_fusing=False, adapter_names=None): + """ + Fuses LoRAs for the text encoder. + + Args: + text_encoder (`torch.nn.Module`): + The text encoder module to set the adapter layers for. If `None`, it will try to get the `text_encoder` + attribute. + lora_scale (`float`, defaults to 1.0): + Controls how much to influence the outputs with the LoRA parameters. + safe_fusing (`bool`, defaults to `False`): + Whether to check fused weights for NaN values before fusing and if values are NaN not fusing them. + adapter_names (`List[str]` or `str`): + The names of the adapters to use. + """ + merge_kwargs = {"safe_merge": safe_fusing} + + for module in text_encoder.modules(): + if isinstance(module, BaseTunerLayer): + if lora_scale != 1.0: + module.scale_layer(lora_scale) + + # For BC with previous PEFT versions, we need to check the signature + # of the `merge` method to see if it supports the `adapter_names` argument. + supported_merge_kwargs = list(inspect.signature(module.merge).parameters) + if "adapter_names" in supported_merge_kwargs: + merge_kwargs["adapter_names"] = adapter_names + elif "adapter_names" not in supported_merge_kwargs and adapter_names is not None: + raise ValueError( + "The `adapter_names` argument is not supported with your PEFT version. " + "Please upgrade to the latest version of PEFT. `pip install -U peft`" + ) + + module.merge(**merge_kwargs) + + +def unfuse_text_encoder_lora(text_encoder): + """ + Unfuses LoRAs for the text encoder. + + Args: + text_encoder (`torch.nn.Module`): + The text encoder module to set the adapter layers for. If `None`, it will try to get the `text_encoder` + attribute. + """ + for module in text_encoder.modules(): + if isinstance(module, BaseTunerLayer): + module.unmerge() + + +def set_adapters_for_text_encoder( + adapter_names: Union[List[str], str], + text_encoder: Optional["PreTrainedModel"] = None, # noqa: F821 + text_encoder_weights: Optional[Union[float, List[float], List[None]]] = None, +): + """ + Sets the adapter layers for the text encoder. + + Args: + adapter_names (`List[str]` or `str`): + The names of the adapters to use. + text_encoder (`torch.nn.Module`, *optional*): + The text encoder module to set the adapter layers for. If `None`, it will try to get the `text_encoder` + attribute. + text_encoder_weights (`List[float]`, *optional*): + The weights to use for the text encoder. If `None`, the weights are set to `1.0` for all the adapters. + """ + if text_encoder is None: + raise ValueError( + "The pipeline does not have a default `pipe.text_encoder` class. Please make sure to pass a `text_encoder` instead." + ) + + def process_weights(adapter_names, weights): + # Expand weights into a list, one entry per adapter + # e.g. for 2 adapters: 7 -> [7,7] ; [3, None] -> [3, None] + if not isinstance(weights, list): + weights = [weights] * len(adapter_names) + + if len(adapter_names) != len(weights): + raise ValueError( + f"Length of adapter names {len(adapter_names)} is not equal to the length of the weights {len(weights)}" + ) + + # Set None values to default of 1.0 + # e.g. [7,7] -> [7,7] ; [3, None] -> [3,1] + weights = [w if w is not None else 1.0 for w in weights] + + return weights + + adapter_names = [adapter_names] if isinstance(adapter_names, str) else adapter_names + text_encoder_weights = process_weights(adapter_names, text_encoder_weights) + set_weights_and_activate_adapters(text_encoder, adapter_names, text_encoder_weights) + + +def disable_lora_for_text_encoder(text_encoder: Optional["PreTrainedModel"] = None): + """ + Disables the LoRA layers for the text encoder. + + Args: + text_encoder (`torch.nn.Module`, *optional*): + The text encoder module to disable the LoRA layers for. If `None`, it will try to get the `text_encoder` + attribute. + """ + if text_encoder is None: + raise ValueError("Text Encoder not found.") + set_adapter_layers(text_encoder, enabled=False) + + +def enable_lora_for_text_encoder(text_encoder: Optional["PreTrainedModel"] = None): + """ + Enables the LoRA layers for the text encoder. + + Args: + text_encoder (`torch.nn.Module`, *optional*): + The text encoder module to enable the LoRA layers for. If `None`, it will try to get the `text_encoder` + attribute. + """ + if text_encoder is None: + raise ValueError("Text Encoder not found.") + set_adapter_layers(text_encoder, enabled=True) + + +def _remove_text_encoder_monkey_patch(text_encoder): + recurse_remove_peft_layers(text_encoder) + if getattr(text_encoder, "peft_config", None) is not None: + del text_encoder.peft_config + text_encoder._hf_peft_config_loaded = None + + +class LoraBaseMixin: + """Utility class for handling LoRAs.""" + + _lora_loadable_modules = [] + num_fused_loras = 0 + + def load_lora_weights(self, **kwargs): + raise NotImplementedError("`load_lora_weights()` is not implemented.") + + @classmethod + def save_lora_weights(cls, **kwargs): + raise NotImplementedError("`save_lora_weights()` not implemented.") + + @classmethod + def lora_state_dict(cls, **kwargs): + raise NotImplementedError("`lora_state_dict()` is not implemented.") + + @classmethod + def _optionally_disable_offloading(cls, _pipeline): + """ + Optionally removes offloading in case the pipeline has been already sequentially offloaded to CPU. + + Args: + _pipeline (`DiffusionPipeline`): + The pipeline to disable offloading for. + + Returns: + tuple: + A tuple indicating if `is_model_cpu_offload` or `is_sequential_cpu_offload` is True. + """ + is_model_cpu_offload = False + is_sequential_cpu_offload = False + + if _pipeline is not None and _pipeline.hf_device_map is None: + for _, component in _pipeline.components.items(): + if isinstance(component, nn.Module) and hasattr(component, "_hf_hook"): + if not is_model_cpu_offload: + is_model_cpu_offload = isinstance(component._hf_hook, CpuOffload) + if not is_sequential_cpu_offload: + is_sequential_cpu_offload = ( + isinstance(component._hf_hook, AlignDevicesHook) + or hasattr(component._hf_hook, "hooks") + and isinstance(component._hf_hook.hooks[0], AlignDevicesHook) + ) + + logger.info( + "Accelerate hooks detected. Since you have called `load_lora_weights()`, the previous hooks will be first removed. Then the LoRA parameters will be loaded and the hooks will be applied again." + ) + remove_hook_from_module(component, recurse=is_sequential_cpu_offload) + + return (is_model_cpu_offload, is_sequential_cpu_offload) + + @classmethod + def _fetch_state_dict( + cls, + pretrained_model_name_or_path_or_dict, + weight_name, + use_safetensors, + local_files_only, + cache_dir, + force_download, + proxies, + token, + revision, + subfolder, + user_agent, + allow_pickle, + ): + from .lora_pipeline import LORA_WEIGHT_NAME, LORA_WEIGHT_NAME_SAFE + + model_file = None + if not isinstance(pretrained_model_name_or_path_or_dict, dict): + # Let's first try to load .safetensors weights + if (use_safetensors and weight_name is None) or ( + weight_name is not None and weight_name.endswith(".safetensors") + ): + try: + # Here we're relaxing the loading check to enable more Inference API + # friendliness where sometimes, it's not at all possible to automatically + # determine `weight_name`. + if weight_name is None: + weight_name = cls._best_guess_weight_name( + pretrained_model_name_or_path_or_dict, + file_extension=".safetensors", + local_files_only=local_files_only, + ) + model_file = _get_model_file( + pretrained_model_name_or_path_or_dict, + weights_name=weight_name or LORA_WEIGHT_NAME_SAFE, + cache_dir=cache_dir, + force_download=force_download, + proxies=proxies, + local_files_only=local_files_only, + token=token, + revision=revision, + subfolder=subfolder, + user_agent=user_agent, + ) + state_dict = safetensors.torch.load_file(model_file, device="cpu") + except (IOError, safetensors.SafetensorError) as e: + if not allow_pickle: + raise e + # try loading non-safetensors weights + model_file = None + pass + + if model_file is None: + if weight_name is None: + weight_name = cls._best_guess_weight_name( + pretrained_model_name_or_path_or_dict, file_extension=".bin", local_files_only=local_files_only + ) + model_file = _get_model_file( + pretrained_model_name_or_path_or_dict, + weights_name=weight_name or LORA_WEIGHT_NAME, + cache_dir=cache_dir, + force_download=force_download, + proxies=proxies, + local_files_only=local_files_only, + token=token, + revision=revision, + subfolder=subfolder, + user_agent=user_agent, + ) + state_dict = load_state_dict(model_file) + else: + state_dict = pretrained_model_name_or_path_or_dict + + return state_dict + + @classmethod + def _best_guess_weight_name( + cls, pretrained_model_name_or_path_or_dict, file_extension=".safetensors", local_files_only=False + ): + from .lora_pipeline import LORA_WEIGHT_NAME, LORA_WEIGHT_NAME_SAFE + + if local_files_only or HF_HUB_OFFLINE: + raise ValueError("When using the offline mode, you must specify a `weight_name`.") + + targeted_files = [] + + if os.path.isfile(pretrained_model_name_or_path_or_dict): + return + elif os.path.isdir(pretrained_model_name_or_path_or_dict): + targeted_files = [ + f for f in os.listdir(pretrained_model_name_or_path_or_dict) if f.endswith(file_extension) + ] + else: + files_in_repo = model_info(pretrained_model_name_or_path_or_dict).siblings + targeted_files = [f.rfilename for f in files_in_repo if f.rfilename.endswith(file_extension)] + if len(targeted_files) == 0: + return + + # "scheduler" does not correspond to a LoRA checkpoint. + # "optimizer" does not correspond to a LoRA checkpoint + # only top-level checkpoints are considered and not the other ones, hence "checkpoint". + unallowed_substrings = {"scheduler", "optimizer", "checkpoint"} + targeted_files = list( + filter(lambda x: all(substring not in x for substring in unallowed_substrings), targeted_files) + ) + + if any(f.endswith(LORA_WEIGHT_NAME) for f in targeted_files): + targeted_files = list(filter(lambda x: x.endswith(LORA_WEIGHT_NAME), targeted_files)) + elif any(f.endswith(LORA_WEIGHT_NAME_SAFE) for f in targeted_files): + targeted_files = list(filter(lambda x: x.endswith(LORA_WEIGHT_NAME_SAFE), targeted_files)) + + if len(targeted_files) > 1: + raise ValueError( + f"Provided path contains more than one weights file in the {file_extension} format. Either specify `weight_name` in `load_lora_weights` or make sure there's only one `.safetensors` or `.bin` file in {pretrained_model_name_or_path_or_dict}." + ) + weight_name = targeted_files[0] + return weight_name + + def unload_lora_weights(self): + """ + Unloads the LoRA parameters. + + Examples: + + ```python + >>> # Assuming `pipeline` is already loaded with the LoRA parameters. + >>> pipeline.unload_lora_weights() + >>> ... + ``` + """ + if not USE_PEFT_BACKEND: + raise ValueError("PEFT backend is required for this method.") + + for component in self._lora_loadable_modules: + model = getattr(self, component, None) + if model is not None: + if issubclass(model.__class__, ModelMixin): + model.unload_lora() + elif issubclass(model.__class__, PreTrainedModel): + _remove_text_encoder_monkey_patch(model) + + def fuse_lora( + self, + components: List[str] = [], + lora_scale: float = 1.0, + safe_fusing: bool = False, + adapter_names: Optional[List[str]] = None, + **kwargs, + ): + r""" + Fuses the LoRA parameters into the original parameters of the corresponding blocks. + + + + This is an experimental API. + + + + Args: + components: (`List[str]`): List of LoRA-injectable components to fuse the LoRAs into. + lora_scale (`float`, defaults to 1.0): + Controls how much to influence the outputs with the LoRA parameters. + safe_fusing (`bool`, defaults to `False`): + Whether to check fused weights for NaN values before fusing and if values are NaN not fusing them. + adapter_names (`List[str]`, *optional*): + Adapter names to be used for fusing. If nothing is passed, all active adapters will be fused. + + Example: + + ```py + from diffusers import DiffusionPipeline + import torch + + pipeline = DiffusionPipeline.from_pretrained( + "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 + ).to("cuda") + pipeline.load_lora_weights("nerijs/pixel-art-xl", weight_name="pixel-art-xl.safetensors", adapter_name="pixel") + pipeline.fuse_lora(lora_scale=0.7) + ``` + """ + if "fuse_unet" in kwargs: + depr_message = "Passing `fuse_unet` to `fuse_lora()` is deprecated and will be ignored. Please use the `components` argument and provide a list of the components whose LoRAs are to be fused. `fuse_unet` will be removed in a future version." + deprecate( + "fuse_unet", + "1.0.0", + depr_message, + ) + if "fuse_transformer" in kwargs: + depr_message = "Passing `fuse_transformer` to `fuse_lora()` is deprecated and will be ignored. Please use the `components` argument and provide a list of the components whose LoRAs are to be fused. `fuse_transformer` will be removed in a future version." + deprecate( + "fuse_transformer", + "1.0.0", + depr_message, + ) + if "fuse_text_encoder" in kwargs: + depr_message = "Passing `fuse_text_encoder` to `fuse_lora()` is deprecated and will be ignored. Please use the `components` argument and provide a list of the components whose LoRAs are to be fused. `fuse_text_encoder` will be removed in a future version." + deprecate( + "fuse_text_encoder", + "1.0.0", + depr_message, + ) + + if len(components) == 0: + raise ValueError("`components` cannot be an empty list.") + + for fuse_component in components: + if fuse_component not in self._lora_loadable_modules: + raise ValueError(f"{fuse_component} is not found in {self._lora_loadable_modules=}.") + + model = getattr(self, fuse_component, None) + if model is not None: + # check if diffusers model + if issubclass(model.__class__, ModelMixin): + model.fuse_lora(lora_scale, safe_fusing=safe_fusing, adapter_names=adapter_names) + # handle transformers models. + if issubclass(model.__class__, PreTrainedModel): + fuse_text_encoder_lora( + model, lora_scale=lora_scale, safe_fusing=safe_fusing, adapter_names=adapter_names + ) + + self.num_fused_loras += 1 + + def unfuse_lora(self, components: List[str] = [], **kwargs): + r""" + Reverses the effect of + [`pipe.fuse_lora()`](https://huggingface.co/docs/diffusers/main/en/api/loaders#diffusers.loaders.LoraBaseMixin.fuse_lora). + + + + This is an experimental API. + + + + Args: + components (`List[str]`): List of LoRA-injectable components to unfuse LoRA from. + unfuse_unet (`bool`, defaults to `True`): Whether to unfuse the UNet LoRA parameters. + unfuse_text_encoder (`bool`, defaults to `True`): + Whether to unfuse the text encoder LoRA parameters. If the text encoder wasn't monkey-patched with the + LoRA parameters then it won't have any effect. + """ + if "unfuse_unet" in kwargs: + depr_message = "Passing `unfuse_unet` to `unfuse_lora()` is deprecated and will be ignored. Please use the `components` argument. `unfuse_unet` will be removed in a future version." + deprecate( + "unfuse_unet", + "1.0.0", + depr_message, + ) + if "unfuse_transformer" in kwargs: + depr_message = "Passing `unfuse_transformer` to `unfuse_lora()` is deprecated and will be ignored. Please use the `components` argument. `unfuse_transformer` will be removed in a future version." + deprecate( + "unfuse_transformer", + "1.0.0", + depr_message, + ) + if "unfuse_text_encoder" in kwargs: + depr_message = "Passing `unfuse_text_encoder` to `unfuse_lora()` is deprecated and will be ignored. Please use the `components` argument. `unfuse_text_encoder` will be removed in a future version." + deprecate( + "unfuse_text_encoder", + "1.0.0", + depr_message, + ) + + if len(components) == 0: + raise ValueError("`components` cannot be an empty list.") + + for fuse_component in components: + if fuse_component not in self._lora_loadable_modules: + raise ValueError(f"{fuse_component} is not found in {self._lora_loadable_modules=}.") + + model = getattr(self, fuse_component, None) + if model is not None: + if issubclass(model.__class__, (ModelMixin, PreTrainedModel)): + for module in model.modules(): + if isinstance(module, BaseTunerLayer): + module.unmerge() + + self.num_fused_loras -= 1 + + def set_adapters( + self, + adapter_names: Union[List[str], str], + adapter_weights: Optional[Union[float, Dict, List[float], List[Dict]]] = None, + ): + adapter_names = [adapter_names] if isinstance(adapter_names, str) else adapter_names + + adapter_weights = copy.deepcopy(adapter_weights) + + # Expand weights into a list, one entry per adapter + if not isinstance(adapter_weights, list): + adapter_weights = [adapter_weights] * len(adapter_names) + + if len(adapter_names) != len(adapter_weights): + raise ValueError( + f"Length of adapter names {len(adapter_names)} is not equal to the length of the weights {len(adapter_weights)}" + ) + + list_adapters = self.get_list_adapters() # eg {"unet": ["adapter1", "adapter2"], "text_encoder": ["adapter2"]} + all_adapters = { + adapter for adapters in list_adapters.values() for adapter in adapters + } # eg ["adapter1", "adapter2"] + invert_list_adapters = { + adapter: [part for part, adapters in list_adapters.items() if adapter in adapters] + for adapter in all_adapters + } # eg {"adapter1": ["unet"], "adapter2": ["unet", "text_encoder"]} + + # Decompose weights into weights for denoiser and text encoders. + _component_adapter_weights = {} + for component in self._lora_loadable_modules: + model = getattr(self, component) + + for adapter_name, weights in zip(adapter_names, adapter_weights): + if isinstance(weights, dict): + component_adapter_weights = weights.pop(component, None) + + if component_adapter_weights is not None and not hasattr(self, component): + logger.warning( + f"Lora weight dict contains {component} weights but will be ignored because pipeline does not have {component}." + ) + + if component_adapter_weights is not None and component not in invert_list_adapters[adapter_name]: + logger.warning( + ( + f"Lora weight dict for adapter '{adapter_name}' contains {component}," + f"but this will be ignored because {adapter_name} does not contain weights for {component}." + f"Valid parts for {adapter_name} are: {invert_list_adapters[adapter_name]}." + ) + ) + + else: + component_adapter_weights = weights + + _component_adapter_weights.setdefault(component, []) + _component_adapter_weights[component].append(component_adapter_weights) + + if issubclass(model.__class__, ModelMixin): + model.set_adapters(adapter_names, _component_adapter_weights[component]) + elif issubclass(model.__class__, PreTrainedModel): + set_adapters_for_text_encoder(adapter_names, model, _component_adapter_weights[component]) + + def disable_lora(self): + if not USE_PEFT_BACKEND: + raise ValueError("PEFT backend is required for this method.") + + for component in self._lora_loadable_modules: + model = getattr(self, component, None) + if model is not None: + if issubclass(model.__class__, ModelMixin): + model.disable_lora() + elif issubclass(model.__class__, PreTrainedModel): + disable_lora_for_text_encoder(model) + + def enable_lora(self): + if not USE_PEFT_BACKEND: + raise ValueError("PEFT backend is required for this method.") + + for component in self._lora_loadable_modules: + model = getattr(self, component, None) + if model is not None: + if issubclass(model.__class__, ModelMixin): + model.enable_lora() + elif issubclass(model.__class__, PreTrainedModel): + enable_lora_for_text_encoder(model) + + def delete_adapters(self, adapter_names: Union[List[str], str]): + """ + Args: + Deletes the LoRA layers of `adapter_name` for the unet and text-encoder(s). + adapter_names (`Union[List[str], str]`): + The names of the adapter to delete. Can be a single string or a list of strings + """ + if not USE_PEFT_BACKEND: + raise ValueError("PEFT backend is required for this method.") + + if isinstance(adapter_names, str): + adapter_names = [adapter_names] + + for component in self._lora_loadable_modules: + model = getattr(self, component, None) + if model is not None: + if issubclass(model.__class__, ModelMixin): + model.delete_adapters(adapter_names) + elif issubclass(model.__class__, PreTrainedModel): + for adapter_name in adapter_names: + delete_adapter_layers(model, adapter_name) + + def get_active_adapters(self) -> List[str]: + """ + Gets the list of the current active adapters. + + Example: + + ```python + from diffusers import DiffusionPipeline + + pipeline = DiffusionPipeline.from_pretrained( + "stabilityai/stable-diffusion-xl-base-1.0", + ).to("cuda") + pipeline.load_lora_weights("CiroN2022/toy-face", weight_name="toy_face_sdxl.safetensors", adapter_name="toy") + pipeline.get_active_adapters() + ``` + """ + if not USE_PEFT_BACKEND: + raise ValueError( + "PEFT backend is required for this method. Please install the latest version of PEFT `pip install -U peft`" + ) + + active_adapters = [] + + for component in self._lora_loadable_modules: + model = getattr(self, component, None) + if model is not None and issubclass(model.__class__, ModelMixin): + for module in model.modules(): + if isinstance(module, BaseTunerLayer): + active_adapters = module.active_adapters + break + + return active_adapters + + def get_list_adapters(self) -> Dict[str, List[str]]: + """ + Gets the current list of all available adapters in the pipeline. + """ + if not USE_PEFT_BACKEND: + raise ValueError( + "PEFT backend is required for this method. Please install the latest version of PEFT `pip install -U peft`" + ) + + set_adapters = {} + + for component in self._lora_loadable_modules: + model = getattr(self, component, None) + if ( + model is not None + and issubclass(model.__class__, (ModelMixin, PreTrainedModel)) + and hasattr(model, "peft_config") + ): + set_adapters[component] = list(model.peft_config.keys()) + + return set_adapters + + def set_lora_device(self, adapter_names: List[str], device: Union[torch.device, str, int]) -> None: + """ + Moves the LoRAs listed in `adapter_names` to a target device. Useful for offloading the LoRA to the CPU in case + you want to load multiple adapters and free some GPU memory. + + Args: + adapter_names (`List[str]`): + List of adapters to send device to. + device (`Union[torch.device, str, int]`): + Device to send the adapters to. Can be either a torch device, a str or an integer. + """ + if not USE_PEFT_BACKEND: + raise ValueError("PEFT backend is required for this method.") + + for component in self._lora_loadable_modules: + model = getattr(self, component, None) + if model is not None: + for module in model.modules(): + if isinstance(module, BaseTunerLayer): + for adapter_name in adapter_names: + module.lora_A[adapter_name].to(device) + module.lora_B[adapter_name].to(device) + # this is a param, not a module, so device placement is not in-place -> re-assign + if hasattr(module, "lora_magnitude_vector") and module.lora_magnitude_vector is not None: + module.lora_magnitude_vector[adapter_name] = module.lora_magnitude_vector[ + adapter_name + ].to(device) + + @staticmethod + def pack_weights(layers, prefix): + layers_weights = layers.state_dict() if isinstance(layers, torch.nn.Module) else layers + layers_state_dict = {f"{prefix}.{module_name}": param for module_name, param in layers_weights.items()} + return layers_state_dict + + @staticmethod + def write_lora_layers( + state_dict: Dict[str, torch.Tensor], + save_directory: str, + is_main_process: bool, + weight_name: str, + save_function: Callable, + safe_serialization: bool, + ): + from .lora_pipeline import LORA_WEIGHT_NAME, LORA_WEIGHT_NAME_SAFE + + if os.path.isfile(save_directory): + logger.error(f"Provided path ({save_directory}) should be a directory, not a file") + return + + if save_function is None: + if safe_serialization: + + def save_function(weights, filename): + return safetensors.torch.save_file(weights, filename, metadata={"format": "pt"}) + + else: + save_function = torch.save + + os.makedirs(save_directory, exist_ok=True) + + if weight_name is None: + if safe_serialization: + weight_name = LORA_WEIGHT_NAME_SAFE + else: + weight_name = LORA_WEIGHT_NAME + + save_path = Path(save_directory, weight_name).as_posix() + save_function(state_dict, save_path) + logger.info(f"Model weights saved in {save_path}") + + @property + def lora_scale(self) -> float: + # property function that returns the lora scale which can be set at run time by the pipeline. + # if _lora_scale has not been set, return 1 + return self._lora_scale if hasattr(self, "_lora_scale") else 1.0 diff --git a/diffusers3/loaders/lora_conversion_utils.py b/diffusers3/loaders/lora_conversion_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..f6dea33e8e82d1634d3833d5cc3d9870a0e6590b --- /dev/null +++ b/diffusers3/loaders/lora_conversion_utils.py @@ -0,0 +1,623 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import re + +import torch + +from ..utils import is_peft_version, logging + + +logger = logging.get_logger(__name__) + + +def _maybe_map_sgm_blocks_to_diffusers(state_dict, unet_config, delimiter="_", block_slice_pos=5): + # 1. get all state_dict_keys + all_keys = list(state_dict.keys()) + sgm_patterns = ["input_blocks", "middle_block", "output_blocks"] + + # 2. check if needs remapping, if not return original dict + is_in_sgm_format = False + for key in all_keys: + if any(p in key for p in sgm_patterns): + is_in_sgm_format = True + break + + if not is_in_sgm_format: + return state_dict + + # 3. Else remap from SGM patterns + new_state_dict = {} + inner_block_map = ["resnets", "attentions", "upsamplers"] + + # Retrieves # of down, mid and up blocks + input_block_ids, middle_block_ids, output_block_ids = set(), set(), set() + + for layer in all_keys: + if "text" in layer: + new_state_dict[layer] = state_dict.pop(layer) + else: + layer_id = int(layer.split(delimiter)[:block_slice_pos][-1]) + if sgm_patterns[0] in layer: + input_block_ids.add(layer_id) + elif sgm_patterns[1] in layer: + middle_block_ids.add(layer_id) + elif sgm_patterns[2] in layer: + output_block_ids.add(layer_id) + else: + raise ValueError(f"Checkpoint not supported because layer {layer} not supported.") + + input_blocks = { + layer_id: [key for key in state_dict if f"input_blocks{delimiter}{layer_id}" in key] + for layer_id in input_block_ids + } + middle_blocks = { + layer_id: [key for key in state_dict if f"middle_block{delimiter}{layer_id}" in key] + for layer_id in middle_block_ids + } + output_blocks = { + layer_id: [key for key in state_dict if f"output_blocks{delimiter}{layer_id}" in key] + for layer_id in output_block_ids + } + + # Rename keys accordingly + for i in input_block_ids: + block_id = (i - 1) // (unet_config.layers_per_block + 1) + layer_in_block_id = (i - 1) % (unet_config.layers_per_block + 1) + + for key in input_blocks[i]: + inner_block_id = int(key.split(delimiter)[block_slice_pos]) + inner_block_key = inner_block_map[inner_block_id] if "op" not in key else "downsamplers" + inner_layers_in_block = str(layer_in_block_id) if "op" not in key else "0" + new_key = delimiter.join( + key.split(delimiter)[: block_slice_pos - 1] + + [str(block_id), inner_block_key, inner_layers_in_block] + + key.split(delimiter)[block_slice_pos + 1 :] + ) + new_state_dict[new_key] = state_dict.pop(key) + + for i in middle_block_ids: + key_part = None + if i == 0: + key_part = [inner_block_map[0], "0"] + elif i == 1: + key_part = [inner_block_map[1], "0"] + elif i == 2: + key_part = [inner_block_map[0], "1"] + else: + raise ValueError(f"Invalid middle block id {i}.") + + for key in middle_blocks[i]: + new_key = delimiter.join( + key.split(delimiter)[: block_slice_pos - 1] + key_part + key.split(delimiter)[block_slice_pos:] + ) + new_state_dict[new_key] = state_dict.pop(key) + + for i in output_block_ids: + block_id = i // (unet_config.layers_per_block + 1) + layer_in_block_id = i % (unet_config.layers_per_block + 1) + + for key in output_blocks[i]: + inner_block_id = int(key.split(delimiter)[block_slice_pos]) + inner_block_key = inner_block_map[inner_block_id] + inner_layers_in_block = str(layer_in_block_id) if inner_block_id < 2 else "0" + new_key = delimiter.join( + key.split(delimiter)[: block_slice_pos - 1] + + [str(block_id), inner_block_key, inner_layers_in_block] + + key.split(delimiter)[block_slice_pos + 1 :] + ) + new_state_dict[new_key] = state_dict.pop(key) + + if len(state_dict) > 0: + raise ValueError("At this point all state dict entries have to be converted.") + + return new_state_dict + + +def _convert_non_diffusers_lora_to_diffusers(state_dict, unet_name="unet", text_encoder_name="text_encoder"): + """ + Converts a non-Diffusers LoRA state dict to a Diffusers compatible state dict. + + Args: + state_dict (`dict`): The state dict to convert. + unet_name (`str`, optional): The name of the U-Net module in the Diffusers model. Defaults to "unet". + text_encoder_name (`str`, optional): The name of the text encoder module in the Diffusers model. Defaults to + "text_encoder". + + Returns: + `tuple`: A tuple containing the converted state dict and a dictionary of alphas. + """ + unet_state_dict = {} + te_state_dict = {} + te2_state_dict = {} + network_alphas = {} + + # Check for DoRA-enabled LoRAs. + dora_present_in_unet = any("dora_scale" in k and "lora_unet_" in k for k in state_dict) + dora_present_in_te = any("dora_scale" in k and ("lora_te_" in k or "lora_te1_" in k) for k in state_dict) + dora_present_in_te2 = any("dora_scale" in k and "lora_te2_" in k for k in state_dict) + if dora_present_in_unet or dora_present_in_te or dora_present_in_te2: + if is_peft_version("<", "0.9.0"): + raise ValueError( + "You need `peft` 0.9.0 at least to use DoRA-enabled LoRAs. Please upgrade your installation of `peft`." + ) + + # Iterate over all LoRA weights. + all_lora_keys = list(state_dict.keys()) + for key in all_lora_keys: + if not key.endswith("lora_down.weight"): + continue + + # Extract LoRA name. + lora_name = key.split(".")[0] + + # Find corresponding up weight and alpha. + lora_name_up = lora_name + ".lora_up.weight" + lora_name_alpha = lora_name + ".alpha" + + # Handle U-Net LoRAs. + if lora_name.startswith("lora_unet_"): + diffusers_name = _convert_unet_lora_key(key) + + # Store down and up weights. + unet_state_dict[diffusers_name] = state_dict.pop(key) + unet_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up) + + # Store DoRA scale if present. + if dora_present_in_unet: + dora_scale_key_to_replace = "_lora.down." if "_lora.down." in diffusers_name else ".lora.down." + unet_state_dict[ + diffusers_name.replace(dora_scale_key_to_replace, ".lora_magnitude_vector.") + ] = state_dict.pop(key.replace("lora_down.weight", "dora_scale")) + + # Handle text encoder LoRAs. + elif lora_name.startswith(("lora_te_", "lora_te1_", "lora_te2_")): + diffusers_name = _convert_text_encoder_lora_key(key, lora_name) + + # Store down and up weights for te or te2. + if lora_name.startswith(("lora_te_", "lora_te1_")): + te_state_dict[diffusers_name] = state_dict.pop(key) + te_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up) + else: + te2_state_dict[diffusers_name] = state_dict.pop(key) + te2_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up) + + # Store DoRA scale if present. + if dora_present_in_te or dora_present_in_te2: + dora_scale_key_to_replace_te = ( + "_lora.down." if "_lora.down." in diffusers_name else ".lora_linear_layer." + ) + if lora_name.startswith(("lora_te_", "lora_te1_")): + te_state_dict[ + diffusers_name.replace(dora_scale_key_to_replace_te, ".lora_magnitude_vector.") + ] = state_dict.pop(key.replace("lora_down.weight", "dora_scale")) + elif lora_name.startswith("lora_te2_"): + te2_state_dict[ + diffusers_name.replace(dora_scale_key_to_replace_te, ".lora_magnitude_vector.") + ] = state_dict.pop(key.replace("lora_down.weight", "dora_scale")) + + # Store alpha if present. + if lora_name_alpha in state_dict: + alpha = state_dict.pop(lora_name_alpha).item() + network_alphas.update(_get_alpha_name(lora_name_alpha, diffusers_name, alpha)) + + # Check if any keys remain. + if len(state_dict) > 0: + raise ValueError(f"The following keys have not been correctly renamed: \n\n {', '.join(state_dict.keys())}") + + logger.info("Non-diffusers checkpoint detected.") + + # Construct final state dict. + unet_state_dict = {f"{unet_name}.{module_name}": params for module_name, params in unet_state_dict.items()} + te_state_dict = {f"{text_encoder_name}.{module_name}": params for module_name, params in te_state_dict.items()} + te2_state_dict = ( + {f"text_encoder_2.{module_name}": params for module_name, params in te2_state_dict.items()} + if len(te2_state_dict) > 0 + else None + ) + if te2_state_dict is not None: + te_state_dict.update(te2_state_dict) + + new_state_dict = {**unet_state_dict, **te_state_dict} + return new_state_dict, network_alphas + + +def _convert_unet_lora_key(key): + """ + Converts a U-Net LoRA key to a Diffusers compatible key. + """ + diffusers_name = key.replace("lora_unet_", "").replace("_", ".") + + # Replace common U-Net naming patterns. + diffusers_name = diffusers_name.replace("input.blocks", "down_blocks") + diffusers_name = diffusers_name.replace("down.blocks", "down_blocks") + diffusers_name = diffusers_name.replace("middle.block", "mid_block") + diffusers_name = diffusers_name.replace("mid.block", "mid_block") + diffusers_name = diffusers_name.replace("output.blocks", "up_blocks") + diffusers_name = diffusers_name.replace("up.blocks", "up_blocks") + diffusers_name = diffusers_name.replace("transformer.blocks", "transformer_blocks") + diffusers_name = diffusers_name.replace("to.q.lora", "to_q_lora") + diffusers_name = diffusers_name.replace("to.k.lora", "to_k_lora") + diffusers_name = diffusers_name.replace("to.v.lora", "to_v_lora") + diffusers_name = diffusers_name.replace("to.out.0.lora", "to_out_lora") + diffusers_name = diffusers_name.replace("proj.in", "proj_in") + diffusers_name = diffusers_name.replace("proj.out", "proj_out") + diffusers_name = diffusers_name.replace("emb.layers", "time_emb_proj") + + # SDXL specific conversions. + if "emb" in diffusers_name and "time.emb.proj" not in diffusers_name: + pattern = r"\.\d+(?=\D*$)" + diffusers_name = re.sub(pattern, "", diffusers_name, count=1) + if ".in." in diffusers_name: + diffusers_name = diffusers_name.replace("in.layers.2", "conv1") + if ".out." in diffusers_name: + diffusers_name = diffusers_name.replace("out.layers.3", "conv2") + if "downsamplers" in diffusers_name or "upsamplers" in diffusers_name: + diffusers_name = diffusers_name.replace("op", "conv") + if "skip" in diffusers_name: + diffusers_name = diffusers_name.replace("skip.connection", "conv_shortcut") + + # LyCORIS specific conversions. + if "time.emb.proj" in diffusers_name: + diffusers_name = diffusers_name.replace("time.emb.proj", "time_emb_proj") + if "conv.shortcut" in diffusers_name: + diffusers_name = diffusers_name.replace("conv.shortcut", "conv_shortcut") + + # General conversions. + if "transformer_blocks" in diffusers_name: + if "attn1" in diffusers_name or "attn2" in diffusers_name: + diffusers_name = diffusers_name.replace("attn1", "attn1.processor") + diffusers_name = diffusers_name.replace("attn2", "attn2.processor") + elif "ff" in diffusers_name: + pass + elif any(key in diffusers_name for key in ("proj_in", "proj_out")): + pass + else: + pass + + return diffusers_name + + +def _convert_text_encoder_lora_key(key, lora_name): + """ + Converts a text encoder LoRA key to a Diffusers compatible key. + """ + if lora_name.startswith(("lora_te_", "lora_te1_")): + key_to_replace = "lora_te_" if lora_name.startswith("lora_te_") else "lora_te1_" + else: + key_to_replace = "lora_te2_" + + diffusers_name = key.replace(key_to_replace, "").replace("_", ".") + diffusers_name = diffusers_name.replace("text.model", "text_model") + diffusers_name = diffusers_name.replace("self.attn", "self_attn") + diffusers_name = diffusers_name.replace("q.proj.lora", "to_q_lora") + diffusers_name = diffusers_name.replace("k.proj.lora", "to_k_lora") + diffusers_name = diffusers_name.replace("v.proj.lora", "to_v_lora") + diffusers_name = diffusers_name.replace("out.proj.lora", "to_out_lora") + diffusers_name = diffusers_name.replace("text.projection", "text_projection") + + if "self_attn" in diffusers_name or "text_projection" in diffusers_name: + pass + elif "mlp" in diffusers_name: + # Be aware that this is the new diffusers convention and the rest of the code might + # not utilize it yet. + diffusers_name = diffusers_name.replace(".lora.", ".lora_linear_layer.") + return diffusers_name + + +def _get_alpha_name(lora_name_alpha, diffusers_name, alpha): + """ + Gets the correct alpha name for the Diffusers model. + """ + if lora_name_alpha.startswith("lora_unet_"): + prefix = "unet." + elif lora_name_alpha.startswith(("lora_te_", "lora_te1_")): + prefix = "text_encoder." + else: + prefix = "text_encoder_2." + new_name = prefix + diffusers_name.split(".lora.")[0] + ".alpha" + return {new_name: alpha} + + +# The utilities under `_convert_kohya_flux_lora_to_diffusers()` +# are taken from https://github.com/kohya-ss/sd-scripts/blob/a61cf73a5cb5209c3f4d1a3688dd276a4dfd1ecb/networks/convert_flux_lora.py +# All credits go to `kohya-ss`. +def _convert_kohya_flux_lora_to_diffusers(state_dict): + def _convert_to_ai_toolkit(sds_sd, ait_sd, sds_key, ait_key): + if sds_key + ".lora_down.weight" not in sds_sd: + return + down_weight = sds_sd.pop(sds_key + ".lora_down.weight") + + # scale weight by alpha and dim + rank = down_weight.shape[0] + alpha = sds_sd.pop(sds_key + ".alpha").item() # alpha is scalar + scale = alpha / rank # LoRA is scaled by 'alpha / rank' in forward pass, so we need to scale it back here + + # calculate scale_down and scale_up to keep the same value. if scale is 4, scale_down is 2 and scale_up is 2 + scale_down = scale + scale_up = 1.0 + while scale_down * 2 < scale_up: + scale_down *= 2 + scale_up /= 2 + + ait_sd[ait_key + ".lora_A.weight"] = down_weight * scale_down + ait_sd[ait_key + ".lora_B.weight"] = sds_sd.pop(sds_key + ".lora_up.weight") * scale_up + + def _convert_to_ai_toolkit_cat(sds_sd, ait_sd, sds_key, ait_keys, dims=None): + if sds_key + ".lora_down.weight" not in sds_sd: + return + down_weight = sds_sd.pop(sds_key + ".lora_down.weight") + up_weight = sds_sd.pop(sds_key + ".lora_up.weight") + sd_lora_rank = down_weight.shape[0] + + # scale weight by alpha and dim + alpha = sds_sd.pop(sds_key + ".alpha") + scale = alpha / sd_lora_rank + + # calculate scale_down and scale_up + scale_down = scale + scale_up = 1.0 + while scale_down * 2 < scale_up: + scale_down *= 2 + scale_up /= 2 + + down_weight = down_weight * scale_down + up_weight = up_weight * scale_up + + # calculate dims if not provided + num_splits = len(ait_keys) + if dims is None: + dims = [up_weight.shape[0] // num_splits] * num_splits + else: + assert sum(dims) == up_weight.shape[0] + + # check upweight is sparse or not + is_sparse = False + if sd_lora_rank % num_splits == 0: + ait_rank = sd_lora_rank // num_splits + is_sparse = True + i = 0 + for j in range(len(dims)): + for k in range(len(dims)): + if j == k: + continue + is_sparse = is_sparse and torch.all( + up_weight[i : i + dims[j], k * ait_rank : (k + 1) * ait_rank] == 0 + ) + i += dims[j] + if is_sparse: + logger.info(f"weight is sparse: {sds_key}") + + # make ai-toolkit weight + ait_down_keys = [k + ".lora_A.weight" for k in ait_keys] + ait_up_keys = [k + ".lora_B.weight" for k in ait_keys] + if not is_sparse: + # down_weight is copied to each split + ait_sd.update({k: down_weight for k in ait_down_keys}) + + # up_weight is split to each split + ait_sd.update({k: v for k, v in zip(ait_up_keys, torch.split(up_weight, dims, dim=0))}) # noqa: C416 + else: + # down_weight is chunked to each split + ait_sd.update({k: v for k, v in zip(ait_down_keys, torch.chunk(down_weight, num_splits, dim=0))}) # noqa: C416 + + # up_weight is sparse: only non-zero values are copied to each split + i = 0 + for j in range(len(dims)): + ait_sd[ait_up_keys[j]] = up_weight[i : i + dims[j], j * ait_rank : (j + 1) * ait_rank].contiguous() + i += dims[j] + + def _convert_sd_scripts_to_ai_toolkit(sds_sd): + ait_sd = {} + for i in range(19): + _convert_to_ai_toolkit( + sds_sd, + ait_sd, + f"lora_unet_double_blocks_{i}_img_attn_proj", + f"transformer.transformer_blocks.{i}.attn.to_out.0", + ) + _convert_to_ai_toolkit_cat( + sds_sd, + ait_sd, + f"lora_unet_double_blocks_{i}_img_attn_qkv", + [ + f"transformer.transformer_blocks.{i}.attn.to_q", + f"transformer.transformer_blocks.{i}.attn.to_k", + f"transformer.transformer_blocks.{i}.attn.to_v", + ], + ) + _convert_to_ai_toolkit( + sds_sd, + ait_sd, + f"lora_unet_double_blocks_{i}_img_mlp_0", + f"transformer.transformer_blocks.{i}.ff.net.0.proj", + ) + _convert_to_ai_toolkit( + sds_sd, + ait_sd, + f"lora_unet_double_blocks_{i}_img_mlp_2", + f"transformer.transformer_blocks.{i}.ff.net.2", + ) + _convert_to_ai_toolkit( + sds_sd, + ait_sd, + f"lora_unet_double_blocks_{i}_img_mod_lin", + f"transformer.transformer_blocks.{i}.norm1.linear", + ) + _convert_to_ai_toolkit( + sds_sd, + ait_sd, + f"lora_unet_double_blocks_{i}_txt_attn_proj", + f"transformer.transformer_blocks.{i}.attn.to_add_out", + ) + _convert_to_ai_toolkit_cat( + sds_sd, + ait_sd, + f"lora_unet_double_blocks_{i}_txt_attn_qkv", + [ + f"transformer.transformer_blocks.{i}.attn.add_q_proj", + f"transformer.transformer_blocks.{i}.attn.add_k_proj", + f"transformer.transformer_blocks.{i}.attn.add_v_proj", + ], + ) + _convert_to_ai_toolkit( + sds_sd, + ait_sd, + f"lora_unet_double_blocks_{i}_txt_mlp_0", + f"transformer.transformer_blocks.{i}.ff_context.net.0.proj", + ) + _convert_to_ai_toolkit( + sds_sd, + ait_sd, + f"lora_unet_double_blocks_{i}_txt_mlp_2", + f"transformer.transformer_blocks.{i}.ff_context.net.2", + ) + _convert_to_ai_toolkit( + sds_sd, + ait_sd, + f"lora_unet_double_blocks_{i}_txt_mod_lin", + f"transformer.transformer_blocks.{i}.norm1_context.linear", + ) + + for i in range(38): + _convert_to_ai_toolkit_cat( + sds_sd, + ait_sd, + f"lora_unet_single_blocks_{i}_linear1", + [ + f"transformer.single_transformer_blocks.{i}.attn.to_q", + f"transformer.single_transformer_blocks.{i}.attn.to_k", + f"transformer.single_transformer_blocks.{i}.attn.to_v", + f"transformer.single_transformer_blocks.{i}.proj_mlp", + ], + dims=[3072, 3072, 3072, 12288], + ) + _convert_to_ai_toolkit( + sds_sd, + ait_sd, + f"lora_unet_single_blocks_{i}_linear2", + f"transformer.single_transformer_blocks.{i}.proj_out", + ) + _convert_to_ai_toolkit( + sds_sd, + ait_sd, + f"lora_unet_single_blocks_{i}_modulation_lin", + f"transformer.single_transformer_blocks.{i}.norm.linear", + ) + + if len(sds_sd) > 0: + logger.warning(f"Unsuppored keys for ai-toolkit: {sds_sd.keys()}") + + return ait_sd + + return _convert_sd_scripts_to_ai_toolkit(state_dict) + + +# Adapted from https://gist.github.com/Leommm-byte/6b331a1e9bd53271210b26543a7065d6 +# Some utilities were reused from +# https://github.com/kohya-ss/sd-scripts/blob/a61cf73a5cb5209c3f4d1a3688dd276a4dfd1ecb/networks/convert_flux_lora.py +def _convert_xlabs_flux_lora_to_diffusers(old_state_dict): + new_state_dict = {} + orig_keys = list(old_state_dict.keys()) + + def handle_qkv(sds_sd, ait_sd, sds_key, ait_keys, dims=None): + down_weight = sds_sd.pop(sds_key) + up_weight = sds_sd.pop(sds_key.replace(".down.weight", ".up.weight")) + + # calculate dims if not provided + num_splits = len(ait_keys) + if dims is None: + dims = [up_weight.shape[0] // num_splits] * num_splits + else: + assert sum(dims) == up_weight.shape[0] + + # make ai-toolkit weight + ait_down_keys = [k + ".lora_A.weight" for k in ait_keys] + ait_up_keys = [k + ".lora_B.weight" for k in ait_keys] + + # down_weight is copied to each split + ait_sd.update({k: down_weight for k in ait_down_keys}) + + # up_weight is split to each split + ait_sd.update({k: v for k, v in zip(ait_up_keys, torch.split(up_weight, dims, dim=0))}) # noqa: C416 + + for old_key in orig_keys: + # Handle double_blocks + if old_key.startswith(("diffusion_model.double_blocks", "double_blocks")): + block_num = re.search(r"double_blocks\.(\d+)", old_key).group(1) + new_key = f"transformer.transformer_blocks.{block_num}" + + if "processor.proj_lora1" in old_key: + new_key += ".attn.to_out.0" + elif "processor.proj_lora2" in old_key: + new_key += ".attn.to_add_out" + # Handle text latents. + elif "processor.qkv_lora2" in old_key and "up" not in old_key: + handle_qkv( + old_state_dict, + new_state_dict, + old_key, + [ + f"transformer.transformer_blocks.{block_num}.attn.add_q_proj", + f"transformer.transformer_blocks.{block_num}.attn.add_k_proj", + f"transformer.transformer_blocks.{block_num}.attn.add_v_proj", + ], + ) + # continue + # Handle image latents. + elif "processor.qkv_lora1" in old_key and "up" not in old_key: + handle_qkv( + old_state_dict, + new_state_dict, + old_key, + [ + f"transformer.transformer_blocks.{block_num}.attn.to_q", + f"transformer.transformer_blocks.{block_num}.attn.to_k", + f"transformer.transformer_blocks.{block_num}.attn.to_v", + ], + ) + # continue + + if "down" in old_key: + new_key += ".lora_A.weight" + elif "up" in old_key: + new_key += ".lora_B.weight" + + # Handle single_blocks + elif old_key.startswith("diffusion_model.single_blocks", "single_blocks"): + block_num = re.search(r"single_blocks\.(\d+)", old_key).group(1) + new_key = f"transformer.single_transformer_blocks.{block_num}" + + if "proj_lora1" in old_key or "proj_lora2" in old_key: + new_key += ".proj_out" + elif "qkv_lora1" in old_key or "qkv_lora2" in old_key: + new_key += ".norm.linear" + + if "down" in old_key: + new_key += ".lora_A.weight" + elif "up" in old_key: + new_key += ".lora_B.weight" + + else: + # Handle other potential key patterns here + new_key = old_key + + # Since we already handle qkv above. + if "qkv" not in old_key: + new_state_dict[new_key] = old_state_dict.pop(old_key) + + if len(old_state_dict) > 0: + raise ValueError(f"`old_state_dict` should be at this point but has: {list(old_state_dict.keys())}.") + + return new_state_dict diff --git a/diffusers3/loaders/lora_pipeline.py b/diffusers3/loaders/lora_pipeline.py new file mode 100644 index 0000000000000000000000000000000000000000..7d644d68415338e1fdc99c446f150e29ead0effd --- /dev/null +++ b/diffusers3/loaders/lora_pipeline.py @@ -0,0 +1,2283 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os +from typing import Callable, Dict, List, Optional, Union + +import torch +from huggingface_hub.utils import validate_hf_hub_args + +from ..utils import ( + USE_PEFT_BACKEND, + convert_state_dict_to_diffusers, + convert_state_dict_to_peft, + convert_unet_state_dict_to_peft, + deprecate, + get_adapter_name, + get_peft_kwargs, + is_peft_version, + is_transformers_available, + logging, + scale_lora_layers, +) +from .lora_base import LoraBaseMixin +from .lora_conversion_utils import ( + _convert_kohya_flux_lora_to_diffusers, + _convert_non_diffusers_lora_to_diffusers, + _convert_xlabs_flux_lora_to_diffusers, + _maybe_map_sgm_blocks_to_diffusers, +) + + +if is_transformers_available(): + from ..models.lora import text_encoder_attn_modules, text_encoder_mlp_modules + +logger = logging.get_logger(__name__) + +TEXT_ENCODER_NAME = "text_encoder" +UNET_NAME = "unet" +TRANSFORMER_NAME = "transformer" + +LORA_WEIGHT_NAME = "pytorch_lora_weights.bin" +LORA_WEIGHT_NAME_SAFE = "pytorch_lora_weights.safetensors" + + +class StableDiffusionLoraLoaderMixin(LoraBaseMixin): + r""" + Load LoRA layers into Stable Diffusion [`UNet2DConditionModel`] and + [`CLIPTextModel`](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel). + """ + + _lora_loadable_modules = ["unet", "text_encoder"] + unet_name = UNET_NAME + text_encoder_name = TEXT_ENCODER_NAME + + def load_lora_weights( + self, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], adapter_name=None, **kwargs + ): + """ + Load LoRA weights specified in `pretrained_model_name_or_path_or_dict` into `self.unet` and + `self.text_encoder`. + + All kwargs are forwarded to `self.lora_state_dict`. + + See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details on how the state dict is + loaded. + + See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_unet`] for more details on how the state dict is + loaded into `self.unet`. + + See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_text_encoder`] for more details on how the state + dict is loaded into `self.text_encoder`. + + Parameters: + pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`): + See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`]. + kwargs (`dict`, *optional*): + See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`]. + adapter_name (`str`, *optional*): + Adapter name to be used for referencing the loaded adapter model. If not specified, it will use + `default_{i}` where i is the total number of adapters being loaded. + """ + if not USE_PEFT_BACKEND: + raise ValueError("PEFT backend is required for this method.") + + # if a dict is passed, copy it instead of modifying it inplace + if isinstance(pretrained_model_name_or_path_or_dict, dict): + pretrained_model_name_or_path_or_dict = pretrained_model_name_or_path_or_dict.copy() + + # First, ensure that the checkpoint is a compatible one and can be successfully loaded. + state_dict, network_alphas = self.lora_state_dict(pretrained_model_name_or_path_or_dict, **kwargs) + + is_correct_format = all("lora" in key or "dora_scale" in key for key in state_dict.keys()) + if not is_correct_format: + raise ValueError("Invalid LoRA checkpoint.") + + self.load_lora_into_unet( + state_dict, + network_alphas=network_alphas, + unet=getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet, + adapter_name=adapter_name, + _pipeline=self, + ) + self.load_lora_into_text_encoder( + state_dict, + network_alphas=network_alphas, + text_encoder=getattr(self, self.text_encoder_name) + if not hasattr(self, "text_encoder") + else self.text_encoder, + lora_scale=self.lora_scale, + adapter_name=adapter_name, + _pipeline=self, + ) + + @classmethod + @validate_hf_hub_args + def lora_state_dict( + cls, + pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], + **kwargs, + ): + r""" + Return state dict for lora weights and the network alphas. + + + + We support loading A1111 formatted LoRA checkpoints in a limited capacity. + + This function is experimental and might change in the future. + + + + Parameters: + pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`): + Can be either: + + - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on + the Hub. + - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved + with [`ModelMixin.save_pretrained`]. + - A [torch state + dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict). + + cache_dir (`Union[str, os.PathLike]`, *optional*): + Path to a directory where a downloaded pretrained model configuration is cached if the standard cache + is not used. + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force the (re-)download of the model weights and configuration files, overriding the + cached versions if they exist. + + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. + local_files_only (`bool`, *optional*, defaults to `False`): + Whether to only load local model weights and configuration files or not. If set to `True`, the model + won't be downloaded from the Hub. + token (`str` or *bool*, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from + `diffusers-cli login` (stored in `~/.huggingface`) is used. + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier + allowed by Git. + subfolder (`str`, *optional*, defaults to `""`): + The subfolder location of a model file within a larger model repository on the Hub or locally. + weight_name (`str`, *optional*, defaults to None): + Name of the serialized state dict file. + """ + # Load the main state dict first which has the LoRA layers for either of + # UNet and text encoder or both. + cache_dir = kwargs.pop("cache_dir", None) + force_download = kwargs.pop("force_download", False) + proxies = kwargs.pop("proxies", None) + local_files_only = kwargs.pop("local_files_only", None) + token = kwargs.pop("token", None) + revision = kwargs.pop("revision", None) + subfolder = kwargs.pop("subfolder", None) + weight_name = kwargs.pop("weight_name", None) + unet_config = kwargs.pop("unet_config", None) + use_safetensors = kwargs.pop("use_safetensors", None) + + allow_pickle = False + if use_safetensors is None: + use_safetensors = True + allow_pickle = True + + user_agent = { + "file_type": "attn_procs_weights", + "framework": "pytorch", + } + + state_dict = cls._fetch_state_dict( + pretrained_model_name_or_path_or_dict=pretrained_model_name_or_path_or_dict, + weight_name=weight_name, + use_safetensors=use_safetensors, + local_files_only=local_files_only, + cache_dir=cache_dir, + force_download=force_download, + proxies=proxies, + token=token, + revision=revision, + subfolder=subfolder, + user_agent=user_agent, + allow_pickle=allow_pickle, + ) + + network_alphas = None + # TODO: replace it with a method from `state_dict_utils` + if all( + ( + k.startswith("lora_te_") + or k.startswith("lora_unet_") + or k.startswith("lora_te1_") + or k.startswith("lora_te2_") + ) + for k in state_dict.keys() + ): + # Map SDXL blocks correctly. + if unet_config is not None: + # use unet config to remap block numbers + state_dict = _maybe_map_sgm_blocks_to_diffusers(state_dict, unet_config) + state_dict, network_alphas = _convert_non_diffusers_lora_to_diffusers(state_dict) + + return state_dict, network_alphas + + @classmethod + def load_lora_into_unet(cls, state_dict, network_alphas, unet, adapter_name=None, _pipeline=None): + """ + This will load the LoRA layers specified in `state_dict` into `unet`. + + Parameters: + state_dict (`dict`): + A standard state dict containing the lora layer parameters. The keys can either be indexed directly + into the unet or prefixed with an additional `unet` which can be used to distinguish between text + encoder lora layers. + network_alphas (`Dict[str, float]`): + The value of the network alpha used for stable learning and preventing underflow. This value has the + same meaning as the `--network_alpha` option in the kohya-ss trainer script. Refer to [this + link](https://github.com/darkstorm2150/sd-scripts/blob/main/docs/train_network_README-en.md#execute-learning). + unet (`UNet2DConditionModel`): + The UNet model to load the LoRA layers into. + adapter_name (`str`, *optional*): + Adapter name to be used for referencing the loaded adapter model. If not specified, it will use + `default_{i}` where i is the total number of adapters being loaded. + """ + if not USE_PEFT_BACKEND: + raise ValueError("PEFT backend is required for this method.") + + # If the serialization format is new (introduced in https://github.com/huggingface/diffusers/pull/2918), + # then the `state_dict` keys should have `cls.unet_name` and/or `cls.text_encoder_name` as + # their prefixes. + keys = list(state_dict.keys()) + only_text_encoder = all(key.startswith(cls.text_encoder_name) for key in keys) + if not only_text_encoder: + # Load the layers corresponding to UNet. + logger.info(f"Loading {cls.unet_name}.") + unet.load_attn_procs( + state_dict, network_alphas=network_alphas, adapter_name=adapter_name, _pipeline=_pipeline + ) + + @classmethod + def load_lora_into_text_encoder( + cls, + state_dict, + network_alphas, + text_encoder, + prefix=None, + lora_scale=1.0, + adapter_name=None, + _pipeline=None, + ): + """ + This will load the LoRA layers specified in `state_dict` into `text_encoder` + + Parameters: + state_dict (`dict`): + A standard state dict containing the lora layer parameters. The key should be prefixed with an + additional `text_encoder` to distinguish between unet lora layers. + network_alphas (`Dict[str, float]`): + The value of the network alpha used for stable learning and preventing underflow. This value has the + same meaning as the `--network_alpha` option in the kohya-ss trainer script. Refer to [this + link](https://github.com/darkstorm2150/sd-scripts/blob/main/docs/train_network_README-en.md#execute-learning). + text_encoder (`CLIPTextModel`): + The text encoder model to load the LoRA layers into. + prefix (`str`): + Expected prefix of the `text_encoder` in the `state_dict`. + lora_scale (`float`): + How much to scale the output of the lora linear layer before it is added with the output of the regular + lora layer. + adapter_name (`str`, *optional*): + Adapter name to be used for referencing the loaded adapter model. If not specified, it will use + `default_{i}` where i is the total number of adapters being loaded. + """ + if not USE_PEFT_BACKEND: + raise ValueError("PEFT backend is required for this method.") + + from peft import LoraConfig + + # If the serialization format is new (introduced in https://github.com/huggingface/diffusers/pull/2918), + # then the `state_dict` keys should have `self.unet_name` and/or `self.text_encoder_name` as + # their prefixes. + keys = list(state_dict.keys()) + prefix = cls.text_encoder_name if prefix is None else prefix + + # Safe prefix to check with. + if any(cls.text_encoder_name in key for key in keys): + # Load the layers corresponding to text encoder and make necessary adjustments. + text_encoder_keys = [k for k in keys if k.startswith(prefix) and k.split(".")[0] == prefix] + text_encoder_lora_state_dict = { + k.replace(f"{prefix}.", ""): v for k, v in state_dict.items() if k in text_encoder_keys + } + + if len(text_encoder_lora_state_dict) > 0: + logger.info(f"Loading {prefix}.") + rank = {} + text_encoder_lora_state_dict = convert_state_dict_to_diffusers(text_encoder_lora_state_dict) + + # convert state dict + text_encoder_lora_state_dict = convert_state_dict_to_peft(text_encoder_lora_state_dict) + + for name, _ in text_encoder_attn_modules(text_encoder): + for module in ("out_proj", "q_proj", "k_proj", "v_proj"): + rank_key = f"{name}.{module}.lora_B.weight" + if rank_key not in text_encoder_lora_state_dict: + continue + rank[rank_key] = text_encoder_lora_state_dict[rank_key].shape[1] + + for name, _ in text_encoder_mlp_modules(text_encoder): + for module in ("fc1", "fc2"): + rank_key = f"{name}.{module}.lora_B.weight" + if rank_key not in text_encoder_lora_state_dict: + continue + rank[rank_key] = text_encoder_lora_state_dict[rank_key].shape[1] + + if network_alphas is not None: + alpha_keys = [ + k for k in network_alphas.keys() if k.startswith(prefix) and k.split(".")[0] == prefix + ] + network_alphas = { + k.replace(f"{prefix}.", ""): v for k, v in network_alphas.items() if k in alpha_keys + } + + lora_config_kwargs = get_peft_kwargs(rank, network_alphas, text_encoder_lora_state_dict, is_unet=False) + if "use_dora" in lora_config_kwargs: + if lora_config_kwargs["use_dora"]: + if is_peft_version("<", "0.9.0"): + raise ValueError( + "You need `peft` 0.9.0 at least to use DoRA-enabled LoRAs. Please upgrade your installation of `peft`." + ) + else: + if is_peft_version("<", "0.9.0"): + lora_config_kwargs.pop("use_dora") + lora_config = LoraConfig(**lora_config_kwargs) + + # adapter_name + if adapter_name is None: + adapter_name = get_adapter_name(text_encoder) + + is_model_cpu_offload, is_sequential_cpu_offload = cls._optionally_disable_offloading(_pipeline) + + # inject LoRA layers and load the state dict + # in transformers we automatically check whether the adapter name is already in use or not + text_encoder.load_adapter( + adapter_name=adapter_name, + adapter_state_dict=text_encoder_lora_state_dict, + peft_config=lora_config, + ) + + # scale LoRA layers with `lora_scale` + scale_lora_layers(text_encoder, weight=lora_scale) + + text_encoder.to(device=text_encoder.device, dtype=text_encoder.dtype) + + # Offload back. + if is_model_cpu_offload: + _pipeline.enable_model_cpu_offload() + elif is_sequential_cpu_offload: + _pipeline.enable_sequential_cpu_offload() + # Unsafe code /> + + @classmethod + def save_lora_weights( + cls, + save_directory: Union[str, os.PathLike], + unet_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None, + text_encoder_lora_layers: Dict[str, torch.nn.Module] = None, + is_main_process: bool = True, + weight_name: str = None, + save_function: Callable = None, + safe_serialization: bool = True, + ): + r""" + Save the LoRA parameters corresponding to the UNet and text encoder. + + Arguments: + save_directory (`str` or `os.PathLike`): + Directory to save LoRA parameters to. Will be created if it doesn't exist. + unet_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`): + State dict of the LoRA layers corresponding to the `unet`. + text_encoder_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`): + State dict of the LoRA layers corresponding to the `text_encoder`. Must explicitly pass the text + encoder LoRA state dict because it comes from ๐Ÿค— Transformers. + is_main_process (`bool`, *optional*, defaults to `True`): + Whether the process calling this is the main process or not. Useful during distributed training and you + need to call this function on all processes. In this case, set `is_main_process=True` only on the main + process to avoid race conditions. + save_function (`Callable`): + The function to use to save the state dictionary. Useful during distributed training when you need to + replace `torch.save` with another method. Can be configured with the environment variable + `DIFFUSERS_SAVE_MODE`. + safe_serialization (`bool`, *optional*, defaults to `True`): + Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`. + """ + state_dict = {} + + if not (unet_lora_layers or text_encoder_lora_layers): + raise ValueError("You must pass at least one of `unet_lora_layers` and `text_encoder_lora_layers`.") + + if unet_lora_layers: + state_dict.update(cls.pack_weights(unet_lora_layers, cls.unet_name)) + + if text_encoder_lora_layers: + state_dict.update(cls.pack_weights(text_encoder_lora_layers, cls.text_encoder_name)) + + # Save the model + cls.write_lora_layers( + state_dict=state_dict, + save_directory=save_directory, + is_main_process=is_main_process, + weight_name=weight_name, + save_function=save_function, + safe_serialization=safe_serialization, + ) + + def fuse_lora( + self, + components: List[str] = ["unet", "text_encoder"], + lora_scale: float = 1.0, + safe_fusing: bool = False, + adapter_names: Optional[List[str]] = None, + **kwargs, + ): + r""" + Fuses the LoRA parameters into the original parameters of the corresponding blocks. + + + + This is an experimental API. + + + + Args: + components: (`List[str]`): List of LoRA-injectable components to fuse the LoRAs into. + lora_scale (`float`, defaults to 1.0): + Controls how much to influence the outputs with the LoRA parameters. + safe_fusing (`bool`, defaults to `False`): + Whether to check fused weights for NaN values before fusing and if values are NaN not fusing them. + adapter_names (`List[str]`, *optional*): + Adapter names to be used for fusing. If nothing is passed, all active adapters will be fused. + + Example: + + ```py + from diffusers import DiffusionPipeline + import torch + + pipeline = DiffusionPipeline.from_pretrained( + "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 + ).to("cuda") + pipeline.load_lora_weights("nerijs/pixel-art-xl", weight_name="pixel-art-xl.safetensors", adapter_name="pixel") + pipeline.fuse_lora(lora_scale=0.7) + ``` + """ + super().fuse_lora( + components=components, lora_scale=lora_scale, safe_fusing=safe_fusing, adapter_names=adapter_names + ) + + def unfuse_lora(self, components: List[str] = ["unet", "text_encoder"], **kwargs): + r""" + Reverses the effect of + [`pipe.fuse_lora()`](https://huggingface.co/docs/diffusers/main/en/api/loaders#diffusers.loaders.LoraBaseMixin.fuse_lora). + + + + This is an experimental API. + + + + Args: + components (`List[str]`): List of LoRA-injectable components to unfuse LoRA from. + unfuse_unet (`bool`, defaults to `True`): Whether to unfuse the UNet LoRA parameters. + unfuse_text_encoder (`bool`, defaults to `True`): + Whether to unfuse the text encoder LoRA parameters. If the text encoder wasn't monkey-patched with the + LoRA parameters then it won't have any effect. + """ + super().unfuse_lora(components=components) + + +class StableDiffusionXLLoraLoaderMixin(LoraBaseMixin): + r""" + Load LoRA layers into Stable Diffusion XL [`UNet2DConditionModel`], + [`CLIPTextModel`](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), and + [`CLIPTextModelWithProjection`](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection). + """ + + _lora_loadable_modules = ["unet", "text_encoder", "text_encoder_2"] + unet_name = UNET_NAME + text_encoder_name = TEXT_ENCODER_NAME + + def load_lora_weights( + self, + pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], + adapter_name: Optional[str] = None, + **kwargs, + ): + """ + Load LoRA weights specified in `pretrained_model_name_or_path_or_dict` into `self.unet` and + `self.text_encoder`. + + All kwargs are forwarded to `self.lora_state_dict`. + + See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details on how the state dict is + loaded. + + See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_unet`] for more details on how the state dict is + loaded into `self.unet`. + + See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_text_encoder`] for more details on how the state + dict is loaded into `self.text_encoder`. + + Parameters: + pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`): + See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`]. + adapter_name (`str`, *optional*): + Adapter name to be used for referencing the loaded adapter model. If not specified, it will use + `default_{i}` where i is the total number of adapters being loaded. + kwargs (`dict`, *optional*): + See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`]. + """ + if not USE_PEFT_BACKEND: + raise ValueError("PEFT backend is required for this method.") + + # We could have accessed the unet config from `lora_state_dict()` too. We pass + # it here explicitly to be able to tell that it's coming from an SDXL + # pipeline. + + # if a dict is passed, copy it instead of modifying it inplace + if isinstance(pretrained_model_name_or_path_or_dict, dict): + pretrained_model_name_or_path_or_dict = pretrained_model_name_or_path_or_dict.copy() + + # First, ensure that the checkpoint is a compatible one and can be successfully loaded. + state_dict, network_alphas = self.lora_state_dict( + pretrained_model_name_or_path_or_dict, + unet_config=self.unet.config, + **kwargs, + ) + is_correct_format = all("lora" in key or "dora_scale" in key for key in state_dict.keys()) + if not is_correct_format: + raise ValueError("Invalid LoRA checkpoint.") + + self.load_lora_into_unet( + state_dict, network_alphas=network_alphas, unet=self.unet, adapter_name=adapter_name, _pipeline=self + ) + text_encoder_state_dict = {k: v for k, v in state_dict.items() if "text_encoder." in k} + if len(text_encoder_state_dict) > 0: + self.load_lora_into_text_encoder( + text_encoder_state_dict, + network_alphas=network_alphas, + text_encoder=self.text_encoder, + prefix="text_encoder", + lora_scale=self.lora_scale, + adapter_name=adapter_name, + _pipeline=self, + ) + + text_encoder_2_state_dict = {k: v for k, v in state_dict.items() if "text_encoder_2." in k} + if len(text_encoder_2_state_dict) > 0: + self.load_lora_into_text_encoder( + text_encoder_2_state_dict, + network_alphas=network_alphas, + text_encoder=self.text_encoder_2, + prefix="text_encoder_2", + lora_scale=self.lora_scale, + adapter_name=adapter_name, + _pipeline=self, + ) + + @classmethod + @validate_hf_hub_args + # Copied from diffusers.loaders.lora_pipeline.StableDiffusionLoraLoaderMixin.lora_state_dict + def lora_state_dict( + cls, + pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], + **kwargs, + ): + r""" + Return state dict for lora weights and the network alphas. + + + + We support loading A1111 formatted LoRA checkpoints in a limited capacity. + + This function is experimental and might change in the future. + + + + Parameters: + pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`): + Can be either: + + - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on + the Hub. + - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved + with [`ModelMixin.save_pretrained`]. + - A [torch state + dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict). + + cache_dir (`Union[str, os.PathLike]`, *optional*): + Path to a directory where a downloaded pretrained model configuration is cached if the standard cache + is not used. + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force the (re-)download of the model weights and configuration files, overriding the + cached versions if they exist. + + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. + local_files_only (`bool`, *optional*, defaults to `False`): + Whether to only load local model weights and configuration files or not. If set to `True`, the model + won't be downloaded from the Hub. + token (`str` or *bool*, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from + `diffusers-cli login` (stored in `~/.huggingface`) is used. + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier + allowed by Git. + subfolder (`str`, *optional*, defaults to `""`): + The subfolder location of a model file within a larger model repository on the Hub or locally. + weight_name (`str`, *optional*, defaults to None): + Name of the serialized state dict file. + """ + # Load the main state dict first which has the LoRA layers for either of + # UNet and text encoder or both. + cache_dir = kwargs.pop("cache_dir", None) + force_download = kwargs.pop("force_download", False) + proxies = kwargs.pop("proxies", None) + local_files_only = kwargs.pop("local_files_only", None) + token = kwargs.pop("token", None) + revision = kwargs.pop("revision", None) + subfolder = kwargs.pop("subfolder", None) + weight_name = kwargs.pop("weight_name", None) + unet_config = kwargs.pop("unet_config", None) + use_safetensors = kwargs.pop("use_safetensors", None) + + allow_pickle = False + if use_safetensors is None: + use_safetensors = True + allow_pickle = True + + user_agent = { + "file_type": "attn_procs_weights", + "framework": "pytorch", + } + + state_dict = cls._fetch_state_dict( + pretrained_model_name_or_path_or_dict=pretrained_model_name_or_path_or_dict, + weight_name=weight_name, + use_safetensors=use_safetensors, + local_files_only=local_files_only, + cache_dir=cache_dir, + force_download=force_download, + proxies=proxies, + token=token, + revision=revision, + subfolder=subfolder, + user_agent=user_agent, + allow_pickle=allow_pickle, + ) + + network_alphas = None + # TODO: replace it with a method from `state_dict_utils` + if all( + ( + k.startswith("lora_te_") + or k.startswith("lora_unet_") + or k.startswith("lora_te1_") + or k.startswith("lora_te2_") + ) + for k in state_dict.keys() + ): + # Map SDXL blocks correctly. + if unet_config is not None: + # use unet config to remap block numbers + state_dict = _maybe_map_sgm_blocks_to_diffusers(state_dict, unet_config) + state_dict, network_alphas = _convert_non_diffusers_lora_to_diffusers(state_dict) + + return state_dict, network_alphas + + @classmethod + # Copied from diffusers.loaders.lora_pipeline.StableDiffusionLoraLoaderMixin.load_lora_into_unet + def load_lora_into_unet(cls, state_dict, network_alphas, unet, adapter_name=None, _pipeline=None): + """ + This will load the LoRA layers specified in `state_dict` into `unet`. + + Parameters: + state_dict (`dict`): + A standard state dict containing the lora layer parameters. The keys can either be indexed directly + into the unet or prefixed with an additional `unet` which can be used to distinguish between text + encoder lora layers. + network_alphas (`Dict[str, float]`): + The value of the network alpha used for stable learning and preventing underflow. This value has the + same meaning as the `--network_alpha` option in the kohya-ss trainer script. Refer to [this + link](https://github.com/darkstorm2150/sd-scripts/blob/main/docs/train_network_README-en.md#execute-learning). + unet (`UNet2DConditionModel`): + The UNet model to load the LoRA layers into. + adapter_name (`str`, *optional*): + Adapter name to be used for referencing the loaded adapter model. If not specified, it will use + `default_{i}` where i is the total number of adapters being loaded. + """ + if not USE_PEFT_BACKEND: + raise ValueError("PEFT backend is required for this method.") + + # If the serialization format is new (introduced in https://github.com/huggingface/diffusers/pull/2918), + # then the `state_dict` keys should have `cls.unet_name` and/or `cls.text_encoder_name` as + # their prefixes. + keys = list(state_dict.keys()) + only_text_encoder = all(key.startswith(cls.text_encoder_name) for key in keys) + if not only_text_encoder: + # Load the layers corresponding to UNet. + logger.info(f"Loading {cls.unet_name}.") + unet.load_attn_procs( + state_dict, network_alphas=network_alphas, adapter_name=adapter_name, _pipeline=_pipeline + ) + + @classmethod + # Copied from diffusers.loaders.lora_pipeline.StableDiffusionLoraLoaderMixin.load_lora_into_text_encoder + def load_lora_into_text_encoder( + cls, + state_dict, + network_alphas, + text_encoder, + prefix=None, + lora_scale=1.0, + adapter_name=None, + _pipeline=None, + ): + """ + This will load the LoRA layers specified in `state_dict` into `text_encoder` + + Parameters: + state_dict (`dict`): + A standard state dict containing the lora layer parameters. The key should be prefixed with an + additional `text_encoder` to distinguish between unet lora layers. + network_alphas (`Dict[str, float]`): + The value of the network alpha used for stable learning and preventing underflow. This value has the + same meaning as the `--network_alpha` option in the kohya-ss trainer script. Refer to [this + link](https://github.com/darkstorm2150/sd-scripts/blob/main/docs/train_network_README-en.md#execute-learning). + text_encoder (`CLIPTextModel`): + The text encoder model to load the LoRA layers into. + prefix (`str`): + Expected prefix of the `text_encoder` in the `state_dict`. + lora_scale (`float`): + How much to scale the output of the lora linear layer before it is added with the output of the regular + lora layer. + adapter_name (`str`, *optional*): + Adapter name to be used for referencing the loaded adapter model. If not specified, it will use + `default_{i}` where i is the total number of adapters being loaded. + """ + if not USE_PEFT_BACKEND: + raise ValueError("PEFT backend is required for this method.") + + from peft import LoraConfig + + # If the serialization format is new (introduced in https://github.com/huggingface/diffusers/pull/2918), + # then the `state_dict` keys should have `self.unet_name` and/or `self.text_encoder_name` as + # their prefixes. + keys = list(state_dict.keys()) + prefix = cls.text_encoder_name if prefix is None else prefix + + # Safe prefix to check with. + if any(cls.text_encoder_name in key for key in keys): + # Load the layers corresponding to text encoder and make necessary adjustments. + text_encoder_keys = [k for k in keys if k.startswith(prefix) and k.split(".")[0] == prefix] + text_encoder_lora_state_dict = { + k.replace(f"{prefix}.", ""): v for k, v in state_dict.items() if k in text_encoder_keys + } + + if len(text_encoder_lora_state_dict) > 0: + logger.info(f"Loading {prefix}.") + rank = {} + text_encoder_lora_state_dict = convert_state_dict_to_diffusers(text_encoder_lora_state_dict) + + # convert state dict + text_encoder_lora_state_dict = convert_state_dict_to_peft(text_encoder_lora_state_dict) + + for name, _ in text_encoder_attn_modules(text_encoder): + for module in ("out_proj", "q_proj", "k_proj", "v_proj"): + rank_key = f"{name}.{module}.lora_B.weight" + if rank_key not in text_encoder_lora_state_dict: + continue + rank[rank_key] = text_encoder_lora_state_dict[rank_key].shape[1] + + for name, _ in text_encoder_mlp_modules(text_encoder): + for module in ("fc1", "fc2"): + rank_key = f"{name}.{module}.lora_B.weight" + if rank_key not in text_encoder_lora_state_dict: + continue + rank[rank_key] = text_encoder_lora_state_dict[rank_key].shape[1] + + if network_alphas is not None: + alpha_keys = [ + k for k in network_alphas.keys() if k.startswith(prefix) and k.split(".")[0] == prefix + ] + network_alphas = { + k.replace(f"{prefix}.", ""): v for k, v in network_alphas.items() if k in alpha_keys + } + + lora_config_kwargs = get_peft_kwargs(rank, network_alphas, text_encoder_lora_state_dict, is_unet=False) + if "use_dora" in lora_config_kwargs: + if lora_config_kwargs["use_dora"]: + if is_peft_version("<", "0.9.0"): + raise ValueError( + "You need `peft` 0.9.0 at least to use DoRA-enabled LoRAs. Please upgrade your installation of `peft`." + ) + else: + if is_peft_version("<", "0.9.0"): + lora_config_kwargs.pop("use_dora") + lora_config = LoraConfig(**lora_config_kwargs) + + # adapter_name + if adapter_name is None: + adapter_name = get_adapter_name(text_encoder) + + is_model_cpu_offload, is_sequential_cpu_offload = cls._optionally_disable_offloading(_pipeline) + + # inject LoRA layers and load the state dict + # in transformers we automatically check whether the adapter name is already in use or not + text_encoder.load_adapter( + adapter_name=adapter_name, + adapter_state_dict=text_encoder_lora_state_dict, + peft_config=lora_config, + ) + + # scale LoRA layers with `lora_scale` + scale_lora_layers(text_encoder, weight=lora_scale) + + text_encoder.to(device=text_encoder.device, dtype=text_encoder.dtype) + + # Offload back. + if is_model_cpu_offload: + _pipeline.enable_model_cpu_offload() + elif is_sequential_cpu_offload: + _pipeline.enable_sequential_cpu_offload() + # Unsafe code /> + + @classmethod + def save_lora_weights( + cls, + save_directory: Union[str, os.PathLike], + unet_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None, + text_encoder_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None, + text_encoder_2_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None, + is_main_process: bool = True, + weight_name: str = None, + save_function: Callable = None, + safe_serialization: bool = True, + ): + r""" + Save the LoRA parameters corresponding to the UNet and text encoder. + + Arguments: + save_directory (`str` or `os.PathLike`): + Directory to save LoRA parameters to. Will be created if it doesn't exist. + unet_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`): + State dict of the LoRA layers corresponding to the `unet`. + text_encoder_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`): + State dict of the LoRA layers corresponding to the `text_encoder`. Must explicitly pass the text + encoder LoRA state dict because it comes from ๐Ÿค— Transformers. + text_encoder_2_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`): + State dict of the LoRA layers corresponding to the `text_encoder_2`. Must explicitly pass the text + encoder LoRA state dict because it comes from ๐Ÿค— Transformers. + is_main_process (`bool`, *optional*, defaults to `True`): + Whether the process calling this is the main process or not. Useful during distributed training and you + need to call this function on all processes. In this case, set `is_main_process=True` only on the main + process to avoid race conditions. + save_function (`Callable`): + The function to use to save the state dictionary. Useful during distributed training when you need to + replace `torch.save` with another method. Can be configured with the environment variable + `DIFFUSERS_SAVE_MODE`. + safe_serialization (`bool`, *optional*, defaults to `True`): + Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`. + """ + state_dict = {} + + if not (unet_lora_layers or text_encoder_lora_layers or text_encoder_2_lora_layers): + raise ValueError( + "You must pass at least one of `unet_lora_layers`, `text_encoder_lora_layers` or `text_encoder_2_lora_layers`." + ) + + if unet_lora_layers: + state_dict.update(cls.pack_weights(unet_lora_layers, "unet")) + + if text_encoder_lora_layers: + state_dict.update(cls.pack_weights(text_encoder_lora_layers, "text_encoder")) + + if text_encoder_2_lora_layers: + state_dict.update(cls.pack_weights(text_encoder_2_lora_layers, "text_encoder_2")) + + cls.write_lora_layers( + state_dict=state_dict, + save_directory=save_directory, + is_main_process=is_main_process, + weight_name=weight_name, + save_function=save_function, + safe_serialization=safe_serialization, + ) + + def fuse_lora( + self, + components: List[str] = ["unet", "text_encoder", "text_encoder_2"], + lora_scale: float = 1.0, + safe_fusing: bool = False, + adapter_names: Optional[List[str]] = None, + **kwargs, + ): + r""" + Fuses the LoRA parameters into the original parameters of the corresponding blocks. + + + + This is an experimental API. + + + + Args: + components: (`List[str]`): List of LoRA-injectable components to fuse the LoRAs into. + lora_scale (`float`, defaults to 1.0): + Controls how much to influence the outputs with the LoRA parameters. + safe_fusing (`bool`, defaults to `False`): + Whether to check fused weights for NaN values before fusing and if values are NaN not fusing them. + adapter_names (`List[str]`, *optional*): + Adapter names to be used for fusing. If nothing is passed, all active adapters will be fused. + + Example: + + ```py + from diffusers import DiffusionPipeline + import torch + + pipeline = DiffusionPipeline.from_pretrained( + "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 + ).to("cuda") + pipeline.load_lora_weights("nerijs/pixel-art-xl", weight_name="pixel-art-xl.safetensors", adapter_name="pixel") + pipeline.fuse_lora(lora_scale=0.7) + ``` + """ + super().fuse_lora( + components=components, lora_scale=lora_scale, safe_fusing=safe_fusing, adapter_names=adapter_names + ) + + def unfuse_lora(self, components: List[str] = ["unet", "text_encoder", "text_encoder_2"], **kwargs): + r""" + Reverses the effect of + [`pipe.fuse_lora()`](https://huggingface.co/docs/diffusers/main/en/api/loaders#diffusers.loaders.LoraBaseMixin.fuse_lora). + + + + This is an experimental API. + + + + Args: + components (`List[str]`): List of LoRA-injectable components to unfuse LoRA from. + unfuse_unet (`bool`, defaults to `True`): Whether to unfuse the UNet LoRA parameters. + unfuse_text_encoder (`bool`, defaults to `True`): + Whether to unfuse the text encoder LoRA parameters. If the text encoder wasn't monkey-patched with the + LoRA parameters then it won't have any effect. + """ + super().unfuse_lora(components=components) + + +class SD3LoraLoaderMixin(LoraBaseMixin): + r""" + Load LoRA layers into [`SD3Transformer2DModel`], + [`CLIPTextModel`](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), and + [`CLIPTextModelWithProjection`](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection). + + Specific to [`StableDiffusion3Pipeline`]. + """ + + _lora_loadable_modules = ["transformer", "text_encoder", "text_encoder_2"] + transformer_name = TRANSFORMER_NAME + text_encoder_name = TEXT_ENCODER_NAME + + @classmethod + @validate_hf_hub_args + def lora_state_dict( + cls, + pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], + **kwargs, + ): + r""" + Return state dict for lora weights and the network alphas. + + + + We support loading A1111 formatted LoRA checkpoints in a limited capacity. + + This function is experimental and might change in the future. + + + + Parameters: + pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`): + Can be either: + + - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on + the Hub. + - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved + with [`ModelMixin.save_pretrained`]. + - A [torch state + dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict). + + cache_dir (`Union[str, os.PathLike]`, *optional*): + Path to a directory where a downloaded pretrained model configuration is cached if the standard cache + is not used. + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force the (re-)download of the model weights and configuration files, overriding the + cached versions if they exist. + + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. + local_files_only (`bool`, *optional*, defaults to `False`): + Whether to only load local model weights and configuration files or not. If set to `True`, the model + won't be downloaded from the Hub. + token (`str` or *bool*, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from + `diffusers-cli login` (stored in `~/.huggingface`) is used. + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier + allowed by Git. + subfolder (`str`, *optional*, defaults to `""`): + The subfolder location of a model file within a larger model repository on the Hub or locally. + + """ + # Load the main state dict first which has the LoRA layers for either of + # transformer and text encoder or both. + cache_dir = kwargs.pop("cache_dir", None) + force_download = kwargs.pop("force_download", False) + proxies = kwargs.pop("proxies", None) + local_files_only = kwargs.pop("local_files_only", None) + token = kwargs.pop("token", None) + revision = kwargs.pop("revision", None) + subfolder = kwargs.pop("subfolder", None) + weight_name = kwargs.pop("weight_name", None) + use_safetensors = kwargs.pop("use_safetensors", None) + + allow_pickle = False + if use_safetensors is None: + use_safetensors = True + allow_pickle = True + + user_agent = { + "file_type": "attn_procs_weights", + "framework": "pytorch", + } + + state_dict = cls._fetch_state_dict( + pretrained_model_name_or_path_or_dict=pretrained_model_name_or_path_or_dict, + weight_name=weight_name, + use_safetensors=use_safetensors, + local_files_only=local_files_only, + cache_dir=cache_dir, + force_download=force_download, + proxies=proxies, + token=token, + revision=revision, + subfolder=subfolder, + user_agent=user_agent, + allow_pickle=allow_pickle, + ) + + return state_dict + + def load_lora_weights( + self, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], adapter_name=None, **kwargs + ): + """ + Load LoRA weights specified in `pretrained_model_name_or_path_or_dict` into `self.unet` and + `self.text_encoder`. + + All kwargs are forwarded to `self.lora_state_dict`. + + See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details on how the state dict is + loaded. + + See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_transformer`] for more details on how the state + dict is loaded into `self.transformer`. + + Parameters: + pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`): + See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`]. + kwargs (`dict`, *optional*): + See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`]. + adapter_name (`str`, *optional*): + Adapter name to be used for referencing the loaded adapter model. If not specified, it will use + `default_{i}` where i is the total number of adapters being loaded. + """ + if not USE_PEFT_BACKEND: + raise ValueError("PEFT backend is required for this method.") + + # if a dict is passed, copy it instead of modifying it inplace + if isinstance(pretrained_model_name_or_path_or_dict, dict): + pretrained_model_name_or_path_or_dict = pretrained_model_name_or_path_or_dict.copy() + + # First, ensure that the checkpoint is a compatible one and can be successfully loaded. + state_dict = self.lora_state_dict(pretrained_model_name_or_path_or_dict, **kwargs) + + is_correct_format = all("lora" in key or "dora_scale" in key for key in state_dict.keys()) + if not is_correct_format: + raise ValueError("Invalid LoRA checkpoint.") + + self.load_lora_into_transformer( + state_dict, + transformer=getattr(self, self.transformer_name) if not hasattr(self, "transformer") else self.transformer, + adapter_name=adapter_name, + _pipeline=self, + ) + + text_encoder_state_dict = {k: v for k, v in state_dict.items() if "text_encoder." in k} + if len(text_encoder_state_dict) > 0: + self.load_lora_into_text_encoder( + text_encoder_state_dict, + network_alphas=None, + text_encoder=self.text_encoder, + prefix="text_encoder", + lora_scale=self.lora_scale, + adapter_name=adapter_name, + _pipeline=self, + ) + + text_encoder_2_state_dict = {k: v for k, v in state_dict.items() if "text_encoder_2." in k} + if len(text_encoder_2_state_dict) > 0: + self.load_lora_into_text_encoder( + text_encoder_2_state_dict, + network_alphas=None, + text_encoder=self.text_encoder_2, + prefix="text_encoder_2", + lora_scale=self.lora_scale, + adapter_name=adapter_name, + _pipeline=self, + ) + + @classmethod + def load_lora_into_transformer(cls, state_dict, transformer, adapter_name=None, _pipeline=None): + """ + This will load the LoRA layers specified in `state_dict` into `transformer`. + + Parameters: + state_dict (`dict`): + A standard state dict containing the lora layer parameters. The keys can either be indexed directly + into the unet or prefixed with an additional `unet` which can be used to distinguish between text + encoder lora layers. + transformer (`SD3Transformer2DModel`): + The Transformer model to load the LoRA layers into. + adapter_name (`str`, *optional*): + Adapter name to be used for referencing the loaded adapter model. If not specified, it will use + `default_{i}` where i is the total number of adapters being loaded. + """ + from peft import LoraConfig, inject_adapter_in_model, set_peft_model_state_dict + + keys = list(state_dict.keys()) + + transformer_keys = [k for k in keys if k.startswith(cls.transformer_name)] + state_dict = { + k.replace(f"{cls.transformer_name}.", ""): v for k, v in state_dict.items() if k in transformer_keys + } + + if len(state_dict.keys()) > 0: + # check with first key if is not in peft format + first_key = next(iter(state_dict.keys())) + if "lora_A" not in first_key: + state_dict = convert_unet_state_dict_to_peft(state_dict) + + if adapter_name in getattr(transformer, "peft_config", {}): + raise ValueError( + f"Adapter name {adapter_name} already in use in the transformer - please select a new adapter name." + ) + + rank = {} + for key, val in state_dict.items(): + if "lora_B" in key: + rank[key] = val.shape[1] + + lora_config_kwargs = get_peft_kwargs(rank, network_alpha_dict=None, peft_state_dict=state_dict) + if "use_dora" in lora_config_kwargs: + if lora_config_kwargs["use_dora"] and is_peft_version("<", "0.9.0"): + raise ValueError( + "You need `peft` 0.9.0 at least to use DoRA-enabled LoRAs. Please upgrade your installation of `peft`." + ) + else: + lora_config_kwargs.pop("use_dora") + lora_config = LoraConfig(**lora_config_kwargs) + + # adapter_name + if adapter_name is None: + adapter_name = get_adapter_name(transformer) + + # In case the pipeline has been already offloaded to CPU - temporarily remove the hooks + # otherwise loading LoRA weights will lead to an error + is_model_cpu_offload, is_sequential_cpu_offload = cls._optionally_disable_offloading(_pipeline) + + inject_adapter_in_model(lora_config, transformer, adapter_name=adapter_name) + incompatible_keys = set_peft_model_state_dict(transformer, state_dict, adapter_name) + + if incompatible_keys is not None: + # check only for unexpected keys + unexpected_keys = getattr(incompatible_keys, "unexpected_keys", None) + if unexpected_keys: + logger.warning( + f"Loading adapter weights from state_dict led to unexpected keys not found in the model: " + f" {unexpected_keys}. " + ) + + # Offload back. + if is_model_cpu_offload: + _pipeline.enable_model_cpu_offload() + elif is_sequential_cpu_offload: + _pipeline.enable_sequential_cpu_offload() + # Unsafe code /> + + @classmethod + # Copied from diffusers.loaders.lora_pipeline.StableDiffusionLoraLoaderMixin.load_lora_into_text_encoder + def load_lora_into_text_encoder( + cls, + state_dict, + network_alphas, + text_encoder, + prefix=None, + lora_scale=1.0, + adapter_name=None, + _pipeline=None, + ): + """ + This will load the LoRA layers specified in `state_dict` into `text_encoder` + + Parameters: + state_dict (`dict`): + A standard state dict containing the lora layer parameters. The key should be prefixed with an + additional `text_encoder` to distinguish between unet lora layers. + network_alphas (`Dict[str, float]`): + The value of the network alpha used for stable learning and preventing underflow. This value has the + same meaning as the `--network_alpha` option in the kohya-ss trainer script. Refer to [this + link](https://github.com/darkstorm2150/sd-scripts/blob/main/docs/train_network_README-en.md#execute-learning). + text_encoder (`CLIPTextModel`): + The text encoder model to load the LoRA layers into. + prefix (`str`): + Expected prefix of the `text_encoder` in the `state_dict`. + lora_scale (`float`): + How much to scale the output of the lora linear layer before it is added with the output of the regular + lora layer. + adapter_name (`str`, *optional*): + Adapter name to be used for referencing the loaded adapter model. If not specified, it will use + `default_{i}` where i is the total number of adapters being loaded. + """ + if not USE_PEFT_BACKEND: + raise ValueError("PEFT backend is required for this method.") + + from peft import LoraConfig + + # If the serialization format is new (introduced in https://github.com/huggingface/diffusers/pull/2918), + # then the `state_dict` keys should have `self.unet_name` and/or `self.text_encoder_name` as + # their prefixes. + keys = list(state_dict.keys()) + prefix = cls.text_encoder_name if prefix is None else prefix + + # Safe prefix to check with. + if any(cls.text_encoder_name in key for key in keys): + # Load the layers corresponding to text encoder and make necessary adjustments. + text_encoder_keys = [k for k in keys if k.startswith(prefix) and k.split(".")[0] == prefix] + text_encoder_lora_state_dict = { + k.replace(f"{prefix}.", ""): v for k, v in state_dict.items() if k in text_encoder_keys + } + + if len(text_encoder_lora_state_dict) > 0: + logger.info(f"Loading {prefix}.") + rank = {} + text_encoder_lora_state_dict = convert_state_dict_to_diffusers(text_encoder_lora_state_dict) + + # convert state dict + text_encoder_lora_state_dict = convert_state_dict_to_peft(text_encoder_lora_state_dict) + + for name, _ in text_encoder_attn_modules(text_encoder): + for module in ("out_proj", "q_proj", "k_proj", "v_proj"): + rank_key = f"{name}.{module}.lora_B.weight" + if rank_key not in text_encoder_lora_state_dict: + continue + rank[rank_key] = text_encoder_lora_state_dict[rank_key].shape[1] + + for name, _ in text_encoder_mlp_modules(text_encoder): + for module in ("fc1", "fc2"): + rank_key = f"{name}.{module}.lora_B.weight" + if rank_key not in text_encoder_lora_state_dict: + continue + rank[rank_key] = text_encoder_lora_state_dict[rank_key].shape[1] + + if network_alphas is not None: + alpha_keys = [ + k for k in network_alphas.keys() if k.startswith(prefix) and k.split(".")[0] == prefix + ] + network_alphas = { + k.replace(f"{prefix}.", ""): v for k, v in network_alphas.items() if k in alpha_keys + } + + lora_config_kwargs = get_peft_kwargs(rank, network_alphas, text_encoder_lora_state_dict, is_unet=False) + if "use_dora" in lora_config_kwargs: + if lora_config_kwargs["use_dora"]: + if is_peft_version("<", "0.9.0"): + raise ValueError( + "You need `peft` 0.9.0 at least to use DoRA-enabled LoRAs. Please upgrade your installation of `peft`." + ) + else: + if is_peft_version("<", "0.9.0"): + lora_config_kwargs.pop("use_dora") + lora_config = LoraConfig(**lora_config_kwargs) + + # adapter_name + if adapter_name is None: + adapter_name = get_adapter_name(text_encoder) + + is_model_cpu_offload, is_sequential_cpu_offload = cls._optionally_disable_offloading(_pipeline) + + # inject LoRA layers and load the state dict + # in transformers we automatically check whether the adapter name is already in use or not + text_encoder.load_adapter( + adapter_name=adapter_name, + adapter_state_dict=text_encoder_lora_state_dict, + peft_config=lora_config, + ) + + # scale LoRA layers with `lora_scale` + scale_lora_layers(text_encoder, weight=lora_scale) + + text_encoder.to(device=text_encoder.device, dtype=text_encoder.dtype) + + # Offload back. + if is_model_cpu_offload: + _pipeline.enable_model_cpu_offload() + elif is_sequential_cpu_offload: + _pipeline.enable_sequential_cpu_offload() + # Unsafe code /> + + @classmethod + def save_lora_weights( + cls, + save_directory: Union[str, os.PathLike], + transformer_lora_layers: Dict[str, torch.nn.Module] = None, + text_encoder_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None, + text_encoder_2_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None, + is_main_process: bool = True, + weight_name: str = None, + save_function: Callable = None, + safe_serialization: bool = True, + ): + r""" + Save the LoRA parameters corresponding to the UNet and text encoder. + + Arguments: + save_directory (`str` or `os.PathLike`): + Directory to save LoRA parameters to. Will be created if it doesn't exist. + transformer_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`): + State dict of the LoRA layers corresponding to the `transformer`. + text_encoder_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`): + State dict of the LoRA layers corresponding to the `text_encoder`. Must explicitly pass the text + encoder LoRA state dict because it comes from ๐Ÿค— Transformers. + text_encoder_2_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`): + State dict of the LoRA layers corresponding to the `text_encoder_2`. Must explicitly pass the text + encoder LoRA state dict because it comes from ๐Ÿค— Transformers. + is_main_process (`bool`, *optional*, defaults to `True`): + Whether the process calling this is the main process or not. Useful during distributed training and you + need to call this function on all processes. In this case, set `is_main_process=True` only on the main + process to avoid race conditions. + save_function (`Callable`): + The function to use to save the state dictionary. Useful during distributed training when you need to + replace `torch.save` with another method. Can be configured with the environment variable + `DIFFUSERS_SAVE_MODE`. + safe_serialization (`bool`, *optional*, defaults to `True`): + Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`. + """ + state_dict = {} + + if not (transformer_lora_layers or text_encoder_lora_layers or text_encoder_2_lora_layers): + raise ValueError( + "You must pass at least one of `transformer_lora_layers`, `text_encoder_lora_layers`, `text_encoder_2_lora_layers`." + ) + + if transformer_lora_layers: + state_dict.update(cls.pack_weights(transformer_lora_layers, cls.transformer_name)) + + if text_encoder_lora_layers: + state_dict.update(cls.pack_weights(text_encoder_lora_layers, "text_encoder")) + + if text_encoder_2_lora_layers: + state_dict.update(cls.pack_weights(text_encoder_2_lora_layers, "text_encoder_2")) + + # Save the model + cls.write_lora_layers( + state_dict=state_dict, + save_directory=save_directory, + is_main_process=is_main_process, + weight_name=weight_name, + save_function=save_function, + safe_serialization=safe_serialization, + ) + + def fuse_lora( + self, + components: List[str] = ["transformer", "text_encoder", "text_encoder_2"], + lora_scale: float = 1.0, + safe_fusing: bool = False, + adapter_names: Optional[List[str]] = None, + **kwargs, + ): + r""" + Fuses the LoRA parameters into the original parameters of the corresponding blocks. + + + + This is an experimental API. + + + + Args: + components: (`List[str]`): List of LoRA-injectable components to fuse the LoRAs into. + lora_scale (`float`, defaults to 1.0): + Controls how much to influence the outputs with the LoRA parameters. + safe_fusing (`bool`, defaults to `False`): + Whether to check fused weights for NaN values before fusing and if values are NaN not fusing them. + adapter_names (`List[str]`, *optional*): + Adapter names to be used for fusing. If nothing is passed, all active adapters will be fused. + + Example: + + ```py + from diffusers import DiffusionPipeline + import torch + + pipeline = DiffusionPipeline.from_pretrained( + "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 + ).to("cuda") + pipeline.load_lora_weights("nerijs/pixel-art-xl", weight_name="pixel-art-xl.safetensors", adapter_name="pixel") + pipeline.fuse_lora(lora_scale=0.7) + ``` + """ + super().fuse_lora( + components=components, lora_scale=lora_scale, safe_fusing=safe_fusing, adapter_names=adapter_names + ) + + def unfuse_lora(self, components: List[str] = ["transformer", "text_encoder", "text_encoder_2"], **kwargs): + r""" + Reverses the effect of + [`pipe.fuse_lora()`](https://huggingface.co/docs/diffusers/main/en/api/loaders#diffusers.loaders.LoraBaseMixin.fuse_lora). + + + + This is an experimental API. + + + + Args: + components (`List[str]`): List of LoRA-injectable components to unfuse LoRA from. + unfuse_unet (`bool`, defaults to `True`): Whether to unfuse the UNet LoRA parameters. + unfuse_text_encoder (`bool`, defaults to `True`): + Whether to unfuse the text encoder LoRA parameters. If the text encoder wasn't monkey-patched with the + LoRA parameters then it won't have any effect. + """ + super().unfuse_lora(components=components) + + +class FluxLoraLoaderMixin(LoraBaseMixin): + r""" + Load LoRA layers into [`FluxTransformer2DModel`], + [`CLIPTextModel`](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel). + + Specific to [`StableDiffusion3Pipeline`]. + """ + + _lora_loadable_modules = ["transformer", "text_encoder"] + transformer_name = TRANSFORMER_NAME + text_encoder_name = TEXT_ENCODER_NAME + + @classmethod + @validate_hf_hub_args + def lora_state_dict( + cls, + pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], + return_alphas: bool = False, + **kwargs, + ): + r""" + Return state dict for lora weights and the network alphas. + + + + We support loading A1111 formatted LoRA checkpoints in a limited capacity. + + This function is experimental and might change in the future. + + + + Parameters: + pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`): + Can be either: + + - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on + the Hub. + - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved + with [`ModelMixin.save_pretrained`]. + - A [torch state + dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict). + + cache_dir (`Union[str, os.PathLike]`, *optional*): + Path to a directory where a downloaded pretrained model configuration is cached if the standard cache + is not used. + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force the (re-)download of the model weights and configuration files, overriding the + cached versions if they exist. + + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. + local_files_only (`bool`, *optional*, defaults to `False`): + Whether to only load local model weights and configuration files or not. If set to `True`, the model + won't be downloaded from the Hub. + token (`str` or *bool*, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from + `diffusers-cli login` (stored in `~/.huggingface`) is used. + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier + allowed by Git. + subfolder (`str`, *optional*, defaults to `""`): + The subfolder location of a model file within a larger model repository on the Hub or locally. + + """ + # Load the main state dict first which has the LoRA layers for either of + # transformer and text encoder or both. + cache_dir = kwargs.pop("cache_dir", None) + force_download = kwargs.pop("force_download", False) + proxies = kwargs.pop("proxies", None) + local_files_only = kwargs.pop("local_files_only", None) + token = kwargs.pop("token", None) + revision = kwargs.pop("revision", None) + subfolder = kwargs.pop("subfolder", None) + weight_name = kwargs.pop("weight_name", None) + use_safetensors = kwargs.pop("use_safetensors", None) + + allow_pickle = False + if use_safetensors is None: + use_safetensors = True + allow_pickle = True + + user_agent = { + "file_type": "attn_procs_weights", + "framework": "pytorch", + } + + state_dict = cls._fetch_state_dict( + pretrained_model_name_or_path_or_dict=pretrained_model_name_or_path_or_dict, + weight_name=weight_name, + use_safetensors=use_safetensors, + local_files_only=local_files_only, + cache_dir=cache_dir, + force_download=force_download, + proxies=proxies, + token=token, + revision=revision, + subfolder=subfolder, + user_agent=user_agent, + allow_pickle=allow_pickle, + ) + + # TODO (sayakpaul): to a follow-up to clean and try to unify the conditions. + + is_kohya = any(".lora_down.weight" in k for k in state_dict) + if is_kohya: + state_dict = _convert_kohya_flux_lora_to_diffusers(state_dict) + # Kohya already takes care of scaling the LoRA parameters with alpha. + return (state_dict, None) if return_alphas else state_dict + + is_xlabs = any("processor" in k for k in state_dict) + if is_xlabs: + state_dict = _convert_xlabs_flux_lora_to_diffusers(state_dict) + # xlabs doesn't use `alpha`. + return (state_dict, None) if return_alphas else state_dict + + # For state dicts like + # https://huggingface.co/TheLastBen/Jon_Snow_Flux_LoRA + keys = list(state_dict.keys()) + network_alphas = {} + for k in keys: + if "alpha" in k: + alpha_value = state_dict.get(k) + if (torch.is_tensor(alpha_value) and torch.is_floating_point(alpha_value)) or isinstance( + alpha_value, float + ): + network_alphas[k] = state_dict.pop(k) + else: + raise ValueError( + f"The alpha key ({k}) seems to be incorrect. If you think this error is unexpected, please open as issue." + ) + + if return_alphas: + return state_dict, network_alphas + else: + return state_dict + + def load_lora_weights( + self, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], adapter_name=None, **kwargs + ): + """ + Load LoRA weights specified in `pretrained_model_name_or_path_or_dict` into `self.transformer` and + `self.text_encoder`. + + All kwargs are forwarded to `self.lora_state_dict`. + + See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details on how the state dict is + loaded. + + See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_transformer`] for more details on how the state + dict is loaded into `self.transformer`. + + Parameters: + pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`): + See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`]. + kwargs (`dict`, *optional*): + See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`]. + adapter_name (`str`, *optional*): + Adapter name to be used for referencing the loaded adapter model. If not specified, it will use + `default_{i}` where i is the total number of adapters being loaded. + """ + if not USE_PEFT_BACKEND: + raise ValueError("PEFT backend is required for this method.") + + # if a dict is passed, copy it instead of modifying it inplace + if isinstance(pretrained_model_name_or_path_or_dict, dict): + pretrained_model_name_or_path_or_dict = pretrained_model_name_or_path_or_dict.copy() + + # First, ensure that the checkpoint is a compatible one and can be successfully loaded. + state_dict, network_alphas = self.lora_state_dict( + pretrained_model_name_or_path_or_dict, return_alphas=True, **kwargs + ) + + is_correct_format = all("lora" in key or "dora_scale" in key for key in state_dict.keys()) + if not is_correct_format: + raise ValueError("Invalid LoRA checkpoint.") + + self.load_lora_into_transformer( + state_dict, + network_alphas=network_alphas, + transformer=getattr(self, self.transformer_name) if not hasattr(self, "transformer") else self.transformer, + adapter_name=adapter_name, + _pipeline=self, + ) + + text_encoder_state_dict = {k: v for k, v in state_dict.items() if "text_encoder." in k} + if len(text_encoder_state_dict) > 0: + self.load_lora_into_text_encoder( + text_encoder_state_dict, + network_alphas=network_alphas, + text_encoder=self.text_encoder, + prefix="text_encoder", + lora_scale=self.lora_scale, + adapter_name=adapter_name, + _pipeline=self, + ) + + @classmethod + def load_lora_into_transformer(cls, state_dict, network_alphas, transformer, adapter_name=None, _pipeline=None): + """ + This will load the LoRA layers specified in `state_dict` into `transformer`. + + Parameters: + state_dict (`dict`): + A standard state dict containing the lora layer parameters. The keys can either be indexed directly + into the unet or prefixed with an additional `unet` which can be used to distinguish between text + encoder lora layers. + network_alphas (`Dict[str, float]`): + The value of the network alpha used for stable learning and preventing underflow. This value has the + same meaning as the `--network_alpha` option in the kohya-ss trainer script. Refer to [this + link](https://github.com/darkstorm2150/sd-scripts/blob/main/docs/train_network_README-en.md#execute-learning). + transformer (`SD3Transformer2DModel`): + The Transformer model to load the LoRA layers into. + adapter_name (`str`, *optional*): + Adapter name to be used for referencing the loaded adapter model. If not specified, it will use + `default_{i}` where i is the total number of adapters being loaded. + """ + from peft import LoraConfig, inject_adapter_in_model, set_peft_model_state_dict + + keys = list(state_dict.keys()) + + transformer_keys = [k for k in keys if k.startswith(cls.transformer_name)] + state_dict = { + k.replace(f"{cls.transformer_name}.", ""): v for k, v in state_dict.items() if k in transformer_keys + } + + if len(state_dict.keys()) > 0: + # check with first key if is not in peft format + first_key = next(iter(state_dict.keys())) + if "lora_A" not in first_key: + state_dict = convert_unet_state_dict_to_peft(state_dict) + + if adapter_name in getattr(transformer, "peft_config", {}): + raise ValueError( + f"Adapter name {adapter_name} already in use in the transformer - please select a new adapter name." + ) + + rank = {} + for key, val in state_dict.items(): + if "lora_B" in key: + rank[key] = val.shape[1] + + if network_alphas is not None and len(network_alphas) >= 1: + prefix = cls.transformer_name + alpha_keys = [k for k in network_alphas.keys() if k.startswith(prefix) and k.split(".")[0] == prefix] + network_alphas = {k.replace(f"{prefix}.", ""): v for k, v in network_alphas.items() if k in alpha_keys} + + lora_config_kwargs = get_peft_kwargs(rank, network_alpha_dict=network_alphas, peft_state_dict=state_dict) + if "use_dora" in lora_config_kwargs: + if lora_config_kwargs["use_dora"] and is_peft_version("<", "0.9.0"): + raise ValueError( + "You need `peft` 0.9.0 at least to use DoRA-enabled LoRAs. Please upgrade your installation of `peft`." + ) + else: + lora_config_kwargs.pop("use_dora") + lora_config = LoraConfig(**lora_config_kwargs) + + # adapter_name + if adapter_name is None: + adapter_name = get_adapter_name(transformer) + + # In case the pipeline has been already offloaded to CPU - temporarily remove the hooks + # otherwise loading LoRA weights will lead to an error + is_model_cpu_offload, is_sequential_cpu_offload = cls._optionally_disable_offloading(_pipeline) + + inject_adapter_in_model(lora_config, transformer, adapter_name=adapter_name) + incompatible_keys = set_peft_model_state_dict(transformer, state_dict, adapter_name) + + if incompatible_keys is not None: + # check only for unexpected keys + unexpected_keys = getattr(incompatible_keys, "unexpected_keys", None) + if unexpected_keys: + logger.warning( + f"Loading adapter weights from state_dict led to unexpected keys not found in the model: " + f" {unexpected_keys}. " + ) + + # Offload back. + if is_model_cpu_offload: + _pipeline.enable_model_cpu_offload() + elif is_sequential_cpu_offload: + _pipeline.enable_sequential_cpu_offload() + # Unsafe code /> + + @classmethod + # Copied from diffusers.loaders.lora_pipeline.StableDiffusionLoraLoaderMixin.load_lora_into_text_encoder + def load_lora_into_text_encoder( + cls, + state_dict, + network_alphas, + text_encoder, + prefix=None, + lora_scale=1.0, + adapter_name=None, + _pipeline=None, + ): + """ + This will load the LoRA layers specified in `state_dict` into `text_encoder` + + Parameters: + state_dict (`dict`): + A standard state dict containing the lora layer parameters. The key should be prefixed with an + additional `text_encoder` to distinguish between unet lora layers. + network_alphas (`Dict[str, float]`): + The value of the network alpha used for stable learning and preventing underflow. This value has the + same meaning as the `--network_alpha` option in the kohya-ss trainer script. Refer to [this + link](https://github.com/darkstorm2150/sd-scripts/blob/main/docs/train_network_README-en.md#execute-learning). + text_encoder (`CLIPTextModel`): + The text encoder model to load the LoRA layers into. + prefix (`str`): + Expected prefix of the `text_encoder` in the `state_dict`. + lora_scale (`float`): + How much to scale the output of the lora linear layer before it is added with the output of the regular + lora layer. + adapter_name (`str`, *optional*): + Adapter name to be used for referencing the loaded adapter model. If not specified, it will use + `default_{i}` where i is the total number of adapters being loaded. + """ + if not USE_PEFT_BACKEND: + raise ValueError("PEFT backend is required for this method.") + + from peft import LoraConfig + + # If the serialization format is new (introduced in https://github.com/huggingface/diffusers/pull/2918), + # then the `state_dict` keys should have `self.unet_name` and/or `self.text_encoder_name` as + # their prefixes. + keys = list(state_dict.keys()) + prefix = cls.text_encoder_name if prefix is None else prefix + + # Safe prefix to check with. + if any(cls.text_encoder_name in key for key in keys): + # Load the layers corresponding to text encoder and make necessary adjustments. + text_encoder_keys = [k for k in keys if k.startswith(prefix) and k.split(".")[0] == prefix] + text_encoder_lora_state_dict = { + k.replace(f"{prefix}.", ""): v for k, v in state_dict.items() if k in text_encoder_keys + } + + if len(text_encoder_lora_state_dict) > 0: + logger.info(f"Loading {prefix}.") + rank = {} + text_encoder_lora_state_dict = convert_state_dict_to_diffusers(text_encoder_lora_state_dict) + + # convert state dict + text_encoder_lora_state_dict = convert_state_dict_to_peft(text_encoder_lora_state_dict) + + for name, _ in text_encoder_attn_modules(text_encoder): + for module in ("out_proj", "q_proj", "k_proj", "v_proj"): + rank_key = f"{name}.{module}.lora_B.weight" + if rank_key not in text_encoder_lora_state_dict: + continue + rank[rank_key] = text_encoder_lora_state_dict[rank_key].shape[1] + + for name, _ in text_encoder_mlp_modules(text_encoder): + for module in ("fc1", "fc2"): + rank_key = f"{name}.{module}.lora_B.weight" + if rank_key not in text_encoder_lora_state_dict: + continue + rank[rank_key] = text_encoder_lora_state_dict[rank_key].shape[1] + + if network_alphas is not None: + alpha_keys = [ + k for k in network_alphas.keys() if k.startswith(prefix) and k.split(".")[0] == prefix + ] + network_alphas = { + k.replace(f"{prefix}.", ""): v for k, v in network_alphas.items() if k in alpha_keys + } + + lora_config_kwargs = get_peft_kwargs(rank, network_alphas, text_encoder_lora_state_dict, is_unet=False) + if "use_dora" in lora_config_kwargs: + if lora_config_kwargs["use_dora"]: + if is_peft_version("<", "0.9.0"): + raise ValueError( + "You need `peft` 0.9.0 at least to use DoRA-enabled LoRAs. Please upgrade your installation of `peft`." + ) + else: + if is_peft_version("<", "0.9.0"): + lora_config_kwargs.pop("use_dora") + lora_config = LoraConfig(**lora_config_kwargs) + + # adapter_name + if adapter_name is None: + adapter_name = get_adapter_name(text_encoder) + + is_model_cpu_offload, is_sequential_cpu_offload = cls._optionally_disable_offloading(_pipeline) + + # inject LoRA layers and load the state dict + # in transformers we automatically check whether the adapter name is already in use or not + text_encoder.load_adapter( + adapter_name=adapter_name, + adapter_state_dict=text_encoder_lora_state_dict, + peft_config=lora_config, + ) + + # scale LoRA layers with `lora_scale` + scale_lora_layers(text_encoder, weight=lora_scale) + + text_encoder.to(device=text_encoder.device, dtype=text_encoder.dtype) + + # Offload back. + if is_model_cpu_offload: + _pipeline.enable_model_cpu_offload() + elif is_sequential_cpu_offload: + _pipeline.enable_sequential_cpu_offload() + # Unsafe code /> + + @classmethod + # Copied from diffusers.loaders.lora_pipeline.StableDiffusionLoraLoaderMixin.save_lora_weights with unet->transformer + def save_lora_weights( + cls, + save_directory: Union[str, os.PathLike], + transformer_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None, + text_encoder_lora_layers: Dict[str, torch.nn.Module] = None, + is_main_process: bool = True, + weight_name: str = None, + save_function: Callable = None, + safe_serialization: bool = True, + ): + r""" + Save the LoRA parameters corresponding to the UNet and text encoder. + + Arguments: + save_directory (`str` or `os.PathLike`): + Directory to save LoRA parameters to. Will be created if it doesn't exist. + transformer_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`): + State dict of the LoRA layers corresponding to the `transformer`. + text_encoder_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`): + State dict of the LoRA layers corresponding to the `text_encoder`. Must explicitly pass the text + encoder LoRA state dict because it comes from ๐Ÿค— Transformers. + is_main_process (`bool`, *optional*, defaults to `True`): + Whether the process calling this is the main process or not. Useful during distributed training and you + need to call this function on all processes. In this case, set `is_main_process=True` only on the main + process to avoid race conditions. + save_function (`Callable`): + The function to use to save the state dictionary. Useful during distributed training when you need to + replace `torch.save` with another method. Can be configured with the environment variable + `DIFFUSERS_SAVE_MODE`. + safe_serialization (`bool`, *optional*, defaults to `True`): + Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`. + """ + state_dict = {} + + if not (transformer_lora_layers or text_encoder_lora_layers): + raise ValueError("You must pass at least one of `transformer_lora_layers` and `text_encoder_lora_layers`.") + + if transformer_lora_layers: + state_dict.update(cls.pack_weights(transformer_lora_layers, cls.transformer_name)) + + if text_encoder_lora_layers: + state_dict.update(cls.pack_weights(text_encoder_lora_layers, cls.text_encoder_name)) + + # Save the model + cls.write_lora_layers( + state_dict=state_dict, + save_directory=save_directory, + is_main_process=is_main_process, + weight_name=weight_name, + save_function=save_function, + safe_serialization=safe_serialization, + ) + + # Copied from diffusers.loaders.lora_pipeline.StableDiffusionLoraLoaderMixin.fuse_lora with unet->transformer + def fuse_lora( + self, + components: List[str] = ["transformer", "text_encoder"], + lora_scale: float = 1.0, + safe_fusing: bool = False, + adapter_names: Optional[List[str]] = None, + **kwargs, + ): + r""" + Fuses the LoRA parameters into the original parameters of the corresponding blocks. + + + + This is an experimental API. + + + + Args: + components: (`List[str]`): List of LoRA-injectable components to fuse the LoRAs into. + lora_scale (`float`, defaults to 1.0): + Controls how much to influence the outputs with the LoRA parameters. + safe_fusing (`bool`, defaults to `False`): + Whether to check fused weights for NaN values before fusing and if values are NaN not fusing them. + adapter_names (`List[str]`, *optional*): + Adapter names to be used for fusing. If nothing is passed, all active adapters will be fused. + + Example: + + ```py + from diffusers import DiffusionPipeline + import torch + + pipeline = DiffusionPipeline.from_pretrained( + "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 + ).to("cuda") + pipeline.load_lora_weights("nerijs/pixel-art-xl", weight_name="pixel-art-xl.safetensors", adapter_name="pixel") + pipeline.fuse_lora(lora_scale=0.7) + ``` + """ + super().fuse_lora( + components=components, lora_scale=lora_scale, safe_fusing=safe_fusing, adapter_names=adapter_names + ) + + def unfuse_lora(self, components: List[str] = ["transformer", "text_encoder"], **kwargs): + r""" + Reverses the effect of + [`pipe.fuse_lora()`](https://huggingface.co/docs/diffusers/main/en/api/loaders#diffusers.loaders.LoraBaseMixin.fuse_lora). + + + + This is an experimental API. + + + + Args: + components (`List[str]`): List of LoRA-injectable components to unfuse LoRA from. + """ + super().unfuse_lora(components=components) + + +# The reason why we subclass from `StableDiffusionLoraLoaderMixin` here is because Amused initially +# relied on `StableDiffusionLoraLoaderMixin` for its LoRA support. +class AmusedLoraLoaderMixin(StableDiffusionLoraLoaderMixin): + _lora_loadable_modules = ["transformer", "text_encoder"] + transformer_name = TRANSFORMER_NAME + text_encoder_name = TEXT_ENCODER_NAME + + @classmethod + def load_lora_into_transformer(cls, state_dict, network_alphas, transformer, adapter_name=None, _pipeline=None): + """ + This will load the LoRA layers specified in `state_dict` into `transformer`. + + Parameters: + state_dict (`dict`): + A standard state dict containing the lora layer parameters. The keys can either be indexed directly + into the unet or prefixed with an additional `unet` which can be used to distinguish between text + encoder lora layers. + network_alphas (`Dict[str, float]`): + The value of the network alpha used for stable learning and preventing underflow. This value has the + same meaning as the `--network_alpha` option in the kohya-ss trainer script. Refer to [this + link](https://github.com/darkstorm2150/sd-scripts/blob/main/docs/train_network_README-en.md#execute-learning). + unet (`UNet2DConditionModel`): + The UNet model to load the LoRA layers into. + adapter_name (`str`, *optional*): + Adapter name to be used for referencing the loaded adapter model. If not specified, it will use + `default_{i}` where i is the total number of adapters being loaded. + """ + if not USE_PEFT_BACKEND: + raise ValueError("PEFT backend is required for this method.") + + from peft import LoraConfig, inject_adapter_in_model, set_peft_model_state_dict + + keys = list(state_dict.keys()) + + transformer_keys = [k for k in keys if k.startswith(cls.transformer_name)] + state_dict = { + k.replace(f"{cls.transformer_name}.", ""): v for k, v in state_dict.items() if k in transformer_keys + } + + if network_alphas is not None: + alpha_keys = [k for k in network_alphas.keys() if k.startswith(cls.transformer_name)] + network_alphas = { + k.replace(f"{cls.transformer_name}.", ""): v for k, v in network_alphas.items() if k in alpha_keys + } + + if len(state_dict.keys()) > 0: + if adapter_name in getattr(transformer, "peft_config", {}): + raise ValueError( + f"Adapter name {adapter_name} already in use in the transformer - please select a new adapter name." + ) + + rank = {} + for key, val in state_dict.items(): + if "lora_B" in key: + rank[key] = val.shape[1] + + lora_config_kwargs = get_peft_kwargs(rank, network_alphas, state_dict) + if "use_dora" in lora_config_kwargs: + if lora_config_kwargs["use_dora"] and is_peft_version("<", "0.9.0"): + raise ValueError( + "You need `peft` 0.9.0 at least to use DoRA-enabled LoRAs. Please upgrade your installation of `peft`." + ) + else: + lora_config_kwargs.pop("use_dora") + lora_config = LoraConfig(**lora_config_kwargs) + + # adapter_name + if adapter_name is None: + adapter_name = get_adapter_name(transformer) + + # In case the pipeline has been already offloaded to CPU - temporarily remove the hooks + # otherwise loading LoRA weights will lead to an error + is_model_cpu_offload, is_sequential_cpu_offload = cls._optionally_disable_offloading(_pipeline) + + inject_adapter_in_model(lora_config, transformer, adapter_name=adapter_name) + incompatible_keys = set_peft_model_state_dict(transformer, state_dict, adapter_name) + + if incompatible_keys is not None: + # check only for unexpected keys + unexpected_keys = getattr(incompatible_keys, "unexpected_keys", None) + if unexpected_keys: + logger.warning( + f"Loading adapter weights from state_dict led to unexpected keys not found in the model: " + f" {unexpected_keys}. " + ) + + # Offload back. + if is_model_cpu_offload: + _pipeline.enable_model_cpu_offload() + elif is_sequential_cpu_offload: + _pipeline.enable_sequential_cpu_offload() + # Unsafe code /> + + @classmethod + # Copied from diffusers.loaders.lora_pipeline.StableDiffusionLoraLoaderMixin.load_lora_into_text_encoder + def load_lora_into_text_encoder( + cls, + state_dict, + network_alphas, + text_encoder, + prefix=None, + lora_scale=1.0, + adapter_name=None, + _pipeline=None, + ): + """ + This will load the LoRA layers specified in `state_dict` into `text_encoder` + + Parameters: + state_dict (`dict`): + A standard state dict containing the lora layer parameters. The key should be prefixed with an + additional `text_encoder` to distinguish between unet lora layers. + network_alphas (`Dict[str, float]`): + The value of the network alpha used for stable learning and preventing underflow. This value has the + same meaning as the `--network_alpha` option in the kohya-ss trainer script. Refer to [this + link](https://github.com/darkstorm2150/sd-scripts/blob/main/docs/train_network_README-en.md#execute-learning). + text_encoder (`CLIPTextModel`): + The text encoder model to load the LoRA layers into. + prefix (`str`): + Expected prefix of the `text_encoder` in the `state_dict`. + lora_scale (`float`): + How much to scale the output of the lora linear layer before it is added with the output of the regular + lora layer. + adapter_name (`str`, *optional*): + Adapter name to be used for referencing the loaded adapter model. If not specified, it will use + `default_{i}` where i is the total number of adapters being loaded. + """ + if not USE_PEFT_BACKEND: + raise ValueError("PEFT backend is required for this method.") + + from peft import LoraConfig + + # If the serialization format is new (introduced in https://github.com/huggingface/diffusers/pull/2918), + # then the `state_dict` keys should have `self.unet_name` and/or `self.text_encoder_name` as + # their prefixes. + keys = list(state_dict.keys()) + prefix = cls.text_encoder_name if prefix is None else prefix + + # Safe prefix to check with. + if any(cls.text_encoder_name in key for key in keys): + # Load the layers corresponding to text encoder and make necessary adjustments. + text_encoder_keys = [k for k in keys if k.startswith(prefix) and k.split(".")[0] == prefix] + text_encoder_lora_state_dict = { + k.replace(f"{prefix}.", ""): v for k, v in state_dict.items() if k in text_encoder_keys + } + + if len(text_encoder_lora_state_dict) > 0: + logger.info(f"Loading {prefix}.") + rank = {} + text_encoder_lora_state_dict = convert_state_dict_to_diffusers(text_encoder_lora_state_dict) + + # convert state dict + text_encoder_lora_state_dict = convert_state_dict_to_peft(text_encoder_lora_state_dict) + + for name, _ in text_encoder_attn_modules(text_encoder): + for module in ("out_proj", "q_proj", "k_proj", "v_proj"): + rank_key = f"{name}.{module}.lora_B.weight" + if rank_key not in text_encoder_lora_state_dict: + continue + rank[rank_key] = text_encoder_lora_state_dict[rank_key].shape[1] + + for name, _ in text_encoder_mlp_modules(text_encoder): + for module in ("fc1", "fc2"): + rank_key = f"{name}.{module}.lora_B.weight" + if rank_key not in text_encoder_lora_state_dict: + continue + rank[rank_key] = text_encoder_lora_state_dict[rank_key].shape[1] + + if network_alphas is not None: + alpha_keys = [ + k for k in network_alphas.keys() if k.startswith(prefix) and k.split(".")[0] == prefix + ] + network_alphas = { + k.replace(f"{prefix}.", ""): v for k, v in network_alphas.items() if k in alpha_keys + } + + lora_config_kwargs = get_peft_kwargs(rank, network_alphas, text_encoder_lora_state_dict, is_unet=False) + if "use_dora" in lora_config_kwargs: + if lora_config_kwargs["use_dora"]: + if is_peft_version("<", "0.9.0"): + raise ValueError( + "You need `peft` 0.9.0 at least to use DoRA-enabled LoRAs. Please upgrade your installation of `peft`." + ) + else: + if is_peft_version("<", "0.9.0"): + lora_config_kwargs.pop("use_dora") + lora_config = LoraConfig(**lora_config_kwargs) + + # adapter_name + if adapter_name is None: + adapter_name = get_adapter_name(text_encoder) + + is_model_cpu_offload, is_sequential_cpu_offload = cls._optionally_disable_offloading(_pipeline) + + # inject LoRA layers and load the state dict + # in transformers we automatically check whether the adapter name is already in use or not + text_encoder.load_adapter( + adapter_name=adapter_name, + adapter_state_dict=text_encoder_lora_state_dict, + peft_config=lora_config, + ) + + # scale LoRA layers with `lora_scale` + scale_lora_layers(text_encoder, weight=lora_scale) + + text_encoder.to(device=text_encoder.device, dtype=text_encoder.dtype) + + # Offload back. + if is_model_cpu_offload: + _pipeline.enable_model_cpu_offload() + elif is_sequential_cpu_offload: + _pipeline.enable_sequential_cpu_offload() + # Unsafe code /> + + @classmethod + def save_lora_weights( + cls, + save_directory: Union[str, os.PathLike], + text_encoder_lora_layers: Dict[str, torch.nn.Module] = None, + transformer_lora_layers: Dict[str, torch.nn.Module] = None, + is_main_process: bool = True, + weight_name: str = None, + save_function: Callable = None, + safe_serialization: bool = True, + ): + r""" + Save the LoRA parameters corresponding to the UNet and text encoder. + + Arguments: + save_directory (`str` or `os.PathLike`): + Directory to save LoRA parameters to. Will be created if it doesn't exist. + unet_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`): + State dict of the LoRA layers corresponding to the `unet`. + text_encoder_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`): + State dict of the LoRA layers corresponding to the `text_encoder`. Must explicitly pass the text + encoder LoRA state dict because it comes from ๐Ÿค— Transformers. + is_main_process (`bool`, *optional*, defaults to `True`): + Whether the process calling this is the main process or not. Useful during distributed training and you + need to call this function on all processes. In this case, set `is_main_process=True` only on the main + process to avoid race conditions. + save_function (`Callable`): + The function to use to save the state dictionary. Useful during distributed training when you need to + replace `torch.save` with another method. Can be configured with the environment variable + `DIFFUSERS_SAVE_MODE`. + safe_serialization (`bool`, *optional*, defaults to `True`): + Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`. + """ + state_dict = {} + + if not (transformer_lora_layers or text_encoder_lora_layers): + raise ValueError("You must pass at least one of `transformer_lora_layers` or `text_encoder_lora_layers`.") + + if transformer_lora_layers: + state_dict.update(cls.pack_weights(transformer_lora_layers, cls.transformer_name)) + + if text_encoder_lora_layers: + state_dict.update(cls.pack_weights(text_encoder_lora_layers, cls.text_encoder_name)) + + # Save the model + cls.write_lora_layers( + state_dict=state_dict, + save_directory=save_directory, + is_main_process=is_main_process, + weight_name=weight_name, + save_function=save_function, + safe_serialization=safe_serialization, + ) + + +class LoraLoaderMixin(StableDiffusionLoraLoaderMixin): + def __init__(self, *args, **kwargs): + deprecation_message = "LoraLoaderMixin is deprecated and this will be removed in a future version. Please use `StableDiffusionLoraLoaderMixin`, instead." + deprecate("LoraLoaderMixin", "1.0.0", deprecation_message) + super().__init__(*args, **kwargs) diff --git a/diffusers3/loaders/peft.py b/diffusers3/loaders/peft.py new file mode 100644 index 0000000000000000000000000000000000000000..89d6a28b14dd937f99a6ea9e77d46eac3f4511f3 --- /dev/null +++ b/diffusers3/loaders/peft.py @@ -0,0 +1,395 @@ +# coding=utf-8 +# Copyright 2024 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import inspect +from functools import partial +from typing import Dict, List, Optional, Union + +from ..utils import ( + MIN_PEFT_VERSION, + USE_PEFT_BACKEND, + check_peft_version, + delete_adapter_layers, + is_peft_available, + set_adapter_layers, + set_weights_and_activate_adapters, +) +from .unet_loader_utils import _maybe_expand_lora_scales + + +_SET_ADAPTER_SCALE_FN_MAPPING = { + "UNet2DConditionModel": _maybe_expand_lora_scales, + "UNetMotionModel": _maybe_expand_lora_scales, + "SD3Transformer2DModel": lambda model_cls, weights: weights, + "FluxTransformer2DModel": lambda model_cls, weights: weights, +} + + +class PeftAdapterMixin: + """ + A class containing all functions for loading and using adapters weights that are supported in PEFT library. For + more details about adapters and injecting them in a base model, check out the PEFT + [documentation](https://huggingface.co/docs/peft/index). + + Install the latest version of PEFT, and use this mixin to: + + - Attach new adapters in the model. + - Attach multiple adapters and iteratively activate/deactivate them. + - Activate/deactivate all adapters from the model. + - Get a list of the active adapters. + """ + + _hf_peft_config_loaded = False + + def set_adapters( + self, + adapter_names: Union[List[str], str], + weights: Optional[Union[float, Dict, List[float], List[Dict], List[None]]] = None, + ): + """ + Set the currently active adapters for use in the UNet. + + Args: + adapter_names (`List[str]` or `str`): + The names of the adapters to use. + adapter_weights (`Union[List[float], float]`, *optional*): + The adapter(s) weights to use with the UNet. If `None`, the weights are set to `1.0` for all the + adapters. + + Example: + + ```py + from diffusers import AutoPipelineForText2Image + import torch + + pipeline = AutoPipelineForText2Image.from_pretrained( + "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 + ).to("cuda") + pipeline.load_lora_weights( + "jbilcke-hf/sdxl-cinematic-1", weight_name="pytorch_lora_weights.safetensors", adapter_name="cinematic" + ) + pipeline.load_lora_weights("nerijs/pixel-art-xl", weight_name="pixel-art-xl.safetensors", adapter_name="pixel") + pipeline.set_adapters(["cinematic", "pixel"], adapter_weights=[0.5, 0.5]) + ``` + """ + if not USE_PEFT_BACKEND: + raise ValueError("PEFT backend is required for `set_adapters()`.") + + adapter_names = [adapter_names] if isinstance(adapter_names, str) else adapter_names + + # Expand weights into a list, one entry per adapter + # examples for e.g. 2 adapters: [{...}, 7] -> [7,7] ; None -> [None, None] + if not isinstance(weights, list): + weights = [weights] * len(adapter_names) + + if len(adapter_names) != len(weights): + raise ValueError( + f"Length of adapter names {len(adapter_names)} is not equal to the length of their weights {len(weights)}." + ) + + # Set None values to default of 1.0 + # e.g. [{...}, 7] -> [{...}, 7] ; [None, None] -> [1.0, 1.0] + weights = [w if w is not None else 1.0 for w in weights] + + # e.g. [{...}, 7] -> [{expanded dict...}, 7] + scale_expansion_fn = _SET_ADAPTER_SCALE_FN_MAPPING[self.__class__.__name__] + weights = scale_expansion_fn(self, weights) + + set_weights_and_activate_adapters(self, adapter_names, weights) + + def add_adapter(self, adapter_config, adapter_name: str = "default") -> None: + r""" + Adds a new adapter to the current model for training. If no adapter name is passed, a default name is assigned + to the adapter to follow the convention of the PEFT library. + + If you are not familiar with adapters and PEFT methods, we invite you to read more about them in the PEFT + [documentation](https://huggingface.co/docs/peft). + + Args: + adapter_config (`[~peft.PeftConfig]`): + The configuration of the adapter to add; supported adapters are non-prefix tuning and adaption prompt + methods. + adapter_name (`str`, *optional*, defaults to `"default"`): + The name of the adapter to add. If no name is passed, a default name is assigned to the adapter. + """ + check_peft_version(min_version=MIN_PEFT_VERSION) + + if not is_peft_available(): + raise ImportError("PEFT is not available. Please install PEFT to use this function: `pip install peft`.") + + from peft import PeftConfig, inject_adapter_in_model + + if not self._hf_peft_config_loaded: + self._hf_peft_config_loaded = True + elif adapter_name in self.peft_config: + raise ValueError(f"Adapter with name {adapter_name} already exists. Please use a different name.") + + if not isinstance(adapter_config, PeftConfig): + raise ValueError( + f"adapter_config should be an instance of PeftConfig. Got {type(adapter_config)} instead." + ) + + # Unlike transformers, here we don't need to retrieve the name_or_path of the unet as the loading logic is + # handled by the `load_lora_layers` or `StableDiffusionLoraLoaderMixin`. Therefore we set it to `None` here. + adapter_config.base_model_name_or_path = None + inject_adapter_in_model(adapter_config, self, adapter_name) + self.set_adapter(adapter_name) + + def set_adapter(self, adapter_name: Union[str, List[str]]) -> None: + """ + Sets a specific adapter by forcing the model to only use that adapter and disables the other adapters. + + If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT + [documentation](https://huggingface.co/docs/peft). + + Args: + adapter_name (Union[str, List[str]])): + The list of adapters to set or the adapter name in the case of a single adapter. + """ + check_peft_version(min_version=MIN_PEFT_VERSION) + + if not self._hf_peft_config_loaded: + raise ValueError("No adapter loaded. Please load an adapter first.") + + if isinstance(adapter_name, str): + adapter_name = [adapter_name] + + missing = set(adapter_name) - set(self.peft_config) + if len(missing) > 0: + raise ValueError( + f"Following adapter(s) could not be found: {', '.join(missing)}. Make sure you are passing the correct adapter name(s)." + f" current loaded adapters are: {list(self.peft_config.keys())}" + ) + + from peft.tuners.tuners_utils import BaseTunerLayer + + _adapters_has_been_set = False + + for _, module in self.named_modules(): + if isinstance(module, BaseTunerLayer): + if hasattr(module, "set_adapter"): + module.set_adapter(adapter_name) + # Previous versions of PEFT does not support multi-adapter inference + elif not hasattr(module, "set_adapter") and len(adapter_name) != 1: + raise ValueError( + "You are trying to set multiple adapters and you have a PEFT version that does not support multi-adapter inference. Please upgrade to the latest version of PEFT." + " `pip install -U peft` or `pip install -U git+https://github.com/huggingface/peft.git`" + ) + else: + module.active_adapter = adapter_name + _adapters_has_been_set = True + + if not _adapters_has_been_set: + raise ValueError( + "Did not succeeded in setting the adapter. Please make sure you are using a model that supports adapters." + ) + + def disable_adapters(self) -> None: + r""" + Disable all adapters attached to the model and fallback to inference with the base model only. + + If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT + [documentation](https://huggingface.co/docs/peft). + """ + check_peft_version(min_version=MIN_PEFT_VERSION) + + if not self._hf_peft_config_loaded: + raise ValueError("No adapter loaded. Please load an adapter first.") + + from peft.tuners.tuners_utils import BaseTunerLayer + + for _, module in self.named_modules(): + if isinstance(module, BaseTunerLayer): + if hasattr(module, "enable_adapters"): + module.enable_adapters(enabled=False) + else: + # support for older PEFT versions + module.disable_adapters = True + + def enable_adapters(self) -> None: + """ + Enable adapters that are attached to the model. The model uses `self.active_adapters()` to retrieve the list of + adapters to enable. + + If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT + [documentation](https://huggingface.co/docs/peft). + """ + check_peft_version(min_version=MIN_PEFT_VERSION) + + if not self._hf_peft_config_loaded: + raise ValueError("No adapter loaded. Please load an adapter first.") + + from peft.tuners.tuners_utils import BaseTunerLayer + + for _, module in self.named_modules(): + if isinstance(module, BaseTunerLayer): + if hasattr(module, "enable_adapters"): + module.enable_adapters(enabled=True) + else: + # support for older PEFT versions + module.disable_adapters = False + + def active_adapters(self) -> List[str]: + """ + Gets the current list of active adapters of the model. + + If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT + [documentation](https://huggingface.co/docs/peft). + """ + check_peft_version(min_version=MIN_PEFT_VERSION) + + if not is_peft_available(): + raise ImportError("PEFT is not available. Please install PEFT to use this function: `pip install peft`.") + + if not self._hf_peft_config_loaded: + raise ValueError("No adapter loaded. Please load an adapter first.") + + from peft.tuners.tuners_utils import BaseTunerLayer + + for _, module in self.named_modules(): + if isinstance(module, BaseTunerLayer): + return module.active_adapter + + def fuse_lora(self, lora_scale=1.0, safe_fusing=False, adapter_names=None): + if not USE_PEFT_BACKEND: + raise ValueError("PEFT backend is required for `fuse_lora()`.") + + self.lora_scale = lora_scale + self._safe_fusing = safe_fusing + self.apply(partial(self._fuse_lora_apply, adapter_names=adapter_names)) + + def _fuse_lora_apply(self, module, adapter_names=None): + from peft.tuners.tuners_utils import BaseTunerLayer + + merge_kwargs = {"safe_merge": self._safe_fusing} + + if isinstance(module, BaseTunerLayer): + if self.lora_scale != 1.0: + module.scale_layer(self.lora_scale) + + # For BC with prevous PEFT versions, we need to check the signature + # of the `merge` method to see if it supports the `adapter_names` argument. + supported_merge_kwargs = list(inspect.signature(module.merge).parameters) + if "adapter_names" in supported_merge_kwargs: + merge_kwargs["adapter_names"] = adapter_names + elif "adapter_names" not in supported_merge_kwargs and adapter_names is not None: + raise ValueError( + "The `adapter_names` argument is not supported with your PEFT version. Please upgrade" + " to the latest version of PEFT. `pip install -U peft`" + ) + + module.merge(**merge_kwargs) + + def unfuse_lora(self): + if not USE_PEFT_BACKEND: + raise ValueError("PEFT backend is required for `unfuse_lora()`.") + self.apply(self._unfuse_lora_apply) + + def _unfuse_lora_apply(self, module): + from peft.tuners.tuners_utils import BaseTunerLayer + + if isinstance(module, BaseTunerLayer): + module.unmerge() + + def unload_lora(self): + if not USE_PEFT_BACKEND: + raise ValueError("PEFT backend is required for `unload_lora()`.") + + from ..utils import recurse_remove_peft_layers + + recurse_remove_peft_layers(self) + if hasattr(self, "peft_config"): + del self.peft_config + + def disable_lora(self): + """ + Disables the active LoRA layers of the underlying model. + + Example: + + ```py + from diffusers import AutoPipelineForText2Image + import torch + + pipeline = AutoPipelineForText2Image.from_pretrained( + "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 + ).to("cuda") + pipeline.load_lora_weights( + "jbilcke-hf/sdxl-cinematic-1", weight_name="pytorch_lora_weights.safetensors", adapter_name="cinematic" + ) + pipeline.disable_lora() + ``` + """ + if not USE_PEFT_BACKEND: + raise ValueError("PEFT backend is required for this method.") + set_adapter_layers(self, enabled=False) + + def enable_lora(self): + """ + Enables the active LoRA layers of the underlying model. + + Example: + + ```py + from diffusers import AutoPipelineForText2Image + import torch + + pipeline = AutoPipelineForText2Image.from_pretrained( + "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 + ).to("cuda") + pipeline.load_lora_weights( + "jbilcke-hf/sdxl-cinematic-1", weight_name="pytorch_lora_weights.safetensors", adapter_name="cinematic" + ) + pipeline.enable_lora() + ``` + """ + if not USE_PEFT_BACKEND: + raise ValueError("PEFT backend is required for this method.") + set_adapter_layers(self, enabled=True) + + def delete_adapters(self, adapter_names: Union[List[str], str]): + """ + Delete an adapter's LoRA layers from the underlying model. + + Args: + adapter_names (`Union[List[str], str]`): + The names (single string or list of strings) of the adapter to delete. + + Example: + + ```py + from diffusers import AutoPipelineForText2Image + import torch + + pipeline = AutoPipelineForText2Image.from_pretrained( + "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 + ).to("cuda") + pipeline.load_lora_weights( + "jbilcke-hf/sdxl-cinematic-1", weight_name="pytorch_lora_weights.safetensors", adapter_names="cinematic" + ) + pipeline.delete_adapters("cinematic") + ``` + """ + if not USE_PEFT_BACKEND: + raise ValueError("PEFT backend is required for this method.") + + if isinstance(adapter_names, str): + adapter_names = [adapter_names] + + for adapter_name in adapter_names: + delete_adapter_layers(self, adapter_name) + + # Pop also the corresponding adapter from the config + if hasattr(self, "peft_config"): + self.peft_config.pop(adapter_name, None) diff --git a/diffusers3/loaders/single_file.py b/diffusers3/loaders/single_file.py new file mode 100644 index 0000000000000000000000000000000000000000..c0cbfc7138572d0c6aa74e51776920eeed29d864 --- /dev/null +++ b/diffusers3/loaders/single_file.py @@ -0,0 +1,550 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import importlib +import inspect +import os + +import torch +from huggingface_hub import snapshot_download +from huggingface_hub.utils import LocalEntryNotFoundError, validate_hf_hub_args +from packaging import version + +from ..utils import deprecate, is_transformers_available, logging +from .single_file_utils import ( + SingleFileComponentError, + _is_legacy_scheduler_kwargs, + _is_model_weights_in_cached_folder, + _legacy_load_clip_tokenizer, + _legacy_load_safety_checker, + _legacy_load_scheduler, + create_diffusers_clip_model_from_ldm, + create_diffusers_t5_model_from_checkpoint, + fetch_diffusers_config, + fetch_original_config, + is_clip_model_in_single_file, + is_t5_in_single_file, + load_single_file_checkpoint, +) + + +logger = logging.get_logger(__name__) + +# Legacy behaviour. `from_single_file` does not load the safety checker unless explicitly provided +SINGLE_FILE_OPTIONAL_COMPONENTS = ["safety_checker"] + +if is_transformers_available(): + import transformers + from transformers import PreTrainedModel, PreTrainedTokenizer + + +def load_single_file_sub_model( + library_name, + class_name, + name, + checkpoint, + pipelines, + is_pipeline_module, + cached_model_config_path, + original_config=None, + local_files_only=False, + torch_dtype=None, + is_legacy_loading=False, + **kwargs, +): + if is_pipeline_module: + pipeline_module = getattr(pipelines, library_name) + class_obj = getattr(pipeline_module, class_name) + else: + # else we just import it from the library. + library = importlib.import_module(library_name) + class_obj = getattr(library, class_name) + + if is_transformers_available(): + transformers_version = version.parse(version.parse(transformers.__version__).base_version) + else: + transformers_version = "N/A" + + is_transformers_model = ( + is_transformers_available() + and issubclass(class_obj, PreTrainedModel) + and transformers_version >= version.parse("4.20.0") + ) + is_tokenizer = ( + is_transformers_available() + and issubclass(class_obj, PreTrainedTokenizer) + and transformers_version >= version.parse("4.20.0") + ) + + diffusers_module = importlib.import_module(__name__.split(".")[0]) + is_diffusers_single_file_model = issubclass(class_obj, diffusers_module.FromOriginalModelMixin) + is_diffusers_model = issubclass(class_obj, diffusers_module.ModelMixin) + is_diffusers_scheduler = issubclass(class_obj, diffusers_module.SchedulerMixin) + + if is_diffusers_single_file_model: + load_method = getattr(class_obj, "from_single_file") + + # We cannot provide two different config options to the `from_single_file` method + # Here we have to ignore loading the config from `cached_model_config_path` if `original_config` is provided + if original_config: + cached_model_config_path = None + + loaded_sub_model = load_method( + pretrained_model_link_or_path_or_dict=checkpoint, + original_config=original_config, + config=cached_model_config_path, + subfolder=name, + torch_dtype=torch_dtype, + local_files_only=local_files_only, + **kwargs, + ) + + elif is_transformers_model and is_clip_model_in_single_file(class_obj, checkpoint): + loaded_sub_model = create_diffusers_clip_model_from_ldm( + class_obj, + checkpoint=checkpoint, + config=cached_model_config_path, + subfolder=name, + torch_dtype=torch_dtype, + local_files_only=local_files_only, + is_legacy_loading=is_legacy_loading, + ) + + elif is_transformers_model and is_t5_in_single_file(checkpoint): + loaded_sub_model = create_diffusers_t5_model_from_checkpoint( + class_obj, + checkpoint=checkpoint, + config=cached_model_config_path, + subfolder=name, + torch_dtype=torch_dtype, + local_files_only=local_files_only, + ) + + elif is_tokenizer and is_legacy_loading: + loaded_sub_model = _legacy_load_clip_tokenizer( + class_obj, checkpoint=checkpoint, config=cached_model_config_path, local_files_only=local_files_only + ) + + elif is_diffusers_scheduler and (is_legacy_loading or _is_legacy_scheduler_kwargs(kwargs)): + loaded_sub_model = _legacy_load_scheduler( + class_obj, checkpoint=checkpoint, component_name=name, original_config=original_config, **kwargs + ) + + else: + if not hasattr(class_obj, "from_pretrained"): + raise ValueError( + ( + f"The component {class_obj.__name__} cannot be loaded as it does not seem to have" + " a supported loading method." + ) + ) + + loading_kwargs = {} + loading_kwargs.update( + { + "pretrained_model_name_or_path": cached_model_config_path, + "subfolder": name, + "local_files_only": local_files_only, + } + ) + + # Schedulers and Tokenizers don't make use of torch_dtype + # Skip passing it to those objects + if issubclass(class_obj, torch.nn.Module): + loading_kwargs.update({"torch_dtype": torch_dtype}) + + if is_diffusers_model or is_transformers_model: + if not _is_model_weights_in_cached_folder(cached_model_config_path, name): + raise SingleFileComponentError( + f"Failed to load {class_name}. Weights for this component appear to be missing in the checkpoint." + ) + + load_method = getattr(class_obj, "from_pretrained") + loaded_sub_model = load_method(**loading_kwargs) + + return loaded_sub_model + + +def _map_component_types_to_config_dict(component_types): + diffusers_module = importlib.import_module(__name__.split(".")[0]) + config_dict = {} + component_types.pop("self", None) + + if is_transformers_available(): + transformers_version = version.parse(version.parse(transformers.__version__).base_version) + else: + transformers_version = "N/A" + + for component_name, component_value in component_types.items(): + is_diffusers_model = issubclass(component_value[0], diffusers_module.ModelMixin) + is_scheduler_enum = component_value[0].__name__ == "KarrasDiffusionSchedulers" + is_scheduler = issubclass(component_value[0], diffusers_module.SchedulerMixin) + + is_transformers_model = ( + is_transformers_available() + and issubclass(component_value[0], PreTrainedModel) + and transformers_version >= version.parse("4.20.0") + ) + is_transformers_tokenizer = ( + is_transformers_available() + and issubclass(component_value[0], PreTrainedTokenizer) + and transformers_version >= version.parse("4.20.0") + ) + + if is_diffusers_model and component_name not in SINGLE_FILE_OPTIONAL_COMPONENTS: + config_dict[component_name] = ["diffusers", component_value[0].__name__] + + elif is_scheduler_enum or is_scheduler: + if is_scheduler_enum: + # Since we cannot fetch a scheduler config from the hub, we default to DDIMScheduler + # if the type hint is a KarrassDiffusionSchedulers enum + config_dict[component_name] = ["diffusers", "DDIMScheduler"] + + elif is_scheduler: + config_dict[component_name] = ["diffusers", component_value[0].__name__] + + elif ( + is_transformers_model or is_transformers_tokenizer + ) and component_name not in SINGLE_FILE_OPTIONAL_COMPONENTS: + config_dict[component_name] = ["transformers", component_value[0].__name__] + + else: + config_dict[component_name] = [None, None] + + return config_dict + + +def _infer_pipeline_config_dict(pipeline_class): + parameters = inspect.signature(pipeline_class.__init__).parameters + required_parameters = {k: v for k, v in parameters.items() if v.default == inspect._empty} + component_types = pipeline_class._get_signature_types() + + # Ignore parameters that are not required for the pipeline + component_types = {k: v for k, v in component_types.items() if k in required_parameters} + config_dict = _map_component_types_to_config_dict(component_types) + + return config_dict + + +def _download_diffusers_model_config_from_hub( + pretrained_model_name_or_path, + cache_dir, + revision, + proxies, + force_download=None, + local_files_only=None, + token=None, +): + allow_patterns = ["**/*.json", "*.json", "*.txt", "**/*.txt", "**/*.model"] + cached_model_path = snapshot_download( + pretrained_model_name_or_path, + cache_dir=cache_dir, + revision=revision, + proxies=proxies, + force_download=force_download, + local_files_only=local_files_only, + token=token, + allow_patterns=allow_patterns, + ) + + return cached_model_path + + +class FromSingleFileMixin: + """ + Load model weights saved in the `.ckpt` format into a [`DiffusionPipeline`]. + """ + + @classmethod + @validate_hf_hub_args + def from_single_file(cls, pretrained_model_link_or_path, **kwargs): + r""" + Instantiate a [`DiffusionPipeline`] from pretrained pipeline weights saved in the `.ckpt` or `.safetensors` + format. The pipeline is set in evaluation mode (`model.eval()`) by default. + + Parameters: + pretrained_model_link_or_path (`str` or `os.PathLike`, *optional*): + Can be either: + - A link to the `.ckpt` file (for example + `"https://huggingface.co//blob/main/.ckpt"`) on the Hub. + - A path to a *file* containing all pipeline weights. + torch_dtype (`str` or `torch.dtype`, *optional*): + Override the default `torch.dtype` and load the model with another dtype. + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force the (re-)download of the model weights and configuration files, overriding the + cached versions if they exist. + cache_dir (`Union[str, os.PathLike]`, *optional*): + Path to a directory where a downloaded pretrained model configuration is cached if the standard cache + is not used. + + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. + local_files_only (`bool`, *optional*, defaults to `False`): + Whether to only load local model weights and configuration files or not. If set to `True`, the model + won't be downloaded from the Hub. + token (`str` or *bool*, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from + `diffusers-cli login` (stored in `~/.huggingface`) is used. + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier + allowed by Git. + original_config_file (`str`, *optional*): + The path to the original config file that was used to train the model. If not provided, the config file + will be inferred from the checkpoint file. + config (`str`, *optional*): + Can be either: + - A string, the *repo id* (for example `CompVis/ldm-text2im-large-256`) of a pretrained pipeline + hosted on the Hub. + - A path to a *directory* (for example `./my_pipeline_directory/`) containing the pipeline + component configs in Diffusers format. + kwargs (remaining dictionary of keyword arguments, *optional*): + Can be used to overwrite load and saveable variables (the pipeline components of the specific pipeline + class). The overwritten components are passed directly to the pipelines `__init__` method. See example + below for more information. + + Examples: + + ```py + >>> from diffusers import StableDiffusionPipeline + + >>> # Download pipeline from huggingface.co and cache. + >>> pipeline = StableDiffusionPipeline.from_single_file( + ... "https://huggingface.co/WarriorMama777/OrangeMixs/blob/main/Models/AbyssOrangeMix/AbyssOrangeMix.safetensors" + ... ) + + >>> # Download pipeline from local file + >>> # file is downloaded under ./v1-5-pruned-emaonly.ckpt + >>> pipeline = StableDiffusionPipeline.from_single_file("./v1-5-pruned-emaonly.ckpt") + + >>> # Enable float16 and move to GPU + >>> pipeline = StableDiffusionPipeline.from_single_file( + ... "https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.ckpt", + ... torch_dtype=torch.float16, + ... ) + >>> pipeline.to("cuda") + ``` + + """ + original_config_file = kwargs.pop("original_config_file", None) + config = kwargs.pop("config", None) + original_config = kwargs.pop("original_config", None) + + if original_config_file is not None: + deprecation_message = ( + "`original_config_file` argument is deprecated and will be removed in future versions." + "please use the `original_config` argument instead." + ) + deprecate("original_config_file", "1.0.0", deprecation_message) + original_config = original_config_file + + force_download = kwargs.pop("force_download", False) + proxies = kwargs.pop("proxies", None) + token = kwargs.pop("token", None) + cache_dir = kwargs.pop("cache_dir", None) + local_files_only = kwargs.pop("local_files_only", False) + revision = kwargs.pop("revision", None) + torch_dtype = kwargs.pop("torch_dtype", None) + + is_legacy_loading = False + + # We shouldn't allow configuring individual models components through a Pipeline creation method + # These model kwargs should be deprecated + scaling_factor = kwargs.get("scaling_factor", None) + if scaling_factor is not None: + deprecation_message = ( + "Passing the `scaling_factor` argument to `from_single_file is deprecated " + "and will be ignored in future versions." + ) + deprecate("scaling_factor", "1.0.0", deprecation_message) + + if original_config is not None: + original_config = fetch_original_config(original_config, local_files_only=local_files_only) + + from ..pipelines.pipeline_utils import _get_pipeline_class + + pipeline_class = _get_pipeline_class(cls, config=None) + + checkpoint = load_single_file_checkpoint( + pretrained_model_link_or_path, + force_download=force_download, + proxies=proxies, + token=token, + cache_dir=cache_dir, + local_files_only=local_files_only, + revision=revision, + ) + + if config is None: + config = fetch_diffusers_config(checkpoint) + default_pretrained_model_config_name = config["pretrained_model_name_or_path"] + else: + default_pretrained_model_config_name = config + + if not os.path.isdir(default_pretrained_model_config_name): + # Provided config is a repo_id + if default_pretrained_model_config_name.count("/") > 1: + raise ValueError( + f'The provided config "{config}"' + " is neither a valid local path nor a valid repo id. Please check the parameter." + ) + try: + # Attempt to download the config files for the pipeline + cached_model_config_path = _download_diffusers_model_config_from_hub( + default_pretrained_model_config_name, + cache_dir=cache_dir, + revision=revision, + proxies=proxies, + force_download=force_download, + local_files_only=local_files_only, + token=token, + ) + config_dict = pipeline_class.load_config(cached_model_config_path) + + except LocalEntryNotFoundError: + # `local_files_only=True` but a local diffusers format model config is not available in the cache + # If `original_config` is not provided, we need override `local_files_only` to False + # to fetch the config files from the hub so that we have a way + # to configure the pipeline components. + + if original_config is None: + logger.warning( + "`local_files_only` is True but no local configs were found for this checkpoint.\n" + "Attempting to download the necessary config files for this pipeline.\n" + ) + cached_model_config_path = _download_diffusers_model_config_from_hub( + default_pretrained_model_config_name, + cache_dir=cache_dir, + revision=revision, + proxies=proxies, + force_download=force_download, + local_files_only=False, + token=token, + ) + config_dict = pipeline_class.load_config(cached_model_config_path) + + else: + # For backwards compatibility + # If `original_config` is provided, then we need to assume we are using legacy loading for pipeline components + logger.warning( + "Detected legacy `from_single_file` loading behavior. Attempting to create the pipeline based on inferred components.\n" + "This may lead to errors if the model components are not correctly inferred. \n" + "To avoid this warning, please explicity pass the `config` argument to `from_single_file` with a path to a local diffusers model repo \n" + "e.g. `from_single_file(, config=) \n" + "or run `from_single_file` with `local_files_only=False` first to update the local cache directory with " + "the necessary config files.\n" + ) + is_legacy_loading = True + cached_model_config_path = None + + config_dict = _infer_pipeline_config_dict(pipeline_class) + config_dict["_class_name"] = pipeline_class.__name__ + + else: + # Provided config is a path to a local directory attempt to load directly. + cached_model_config_path = default_pretrained_model_config_name + config_dict = pipeline_class.load_config(cached_model_config_path) + + # pop out "_ignore_files" as it is only needed for download + config_dict.pop("_ignore_files", None) + + expected_modules, optional_kwargs = pipeline_class._get_signature_keys(cls) + passed_class_obj = {k: kwargs.pop(k) for k in expected_modules if k in kwargs} + passed_pipe_kwargs = {k: kwargs.pop(k) for k in optional_kwargs if k in kwargs} + + init_dict, unused_kwargs, _ = pipeline_class.extract_init_dict(config_dict, **kwargs) + init_kwargs = {k: init_dict.pop(k) for k in optional_kwargs if k in init_dict} + init_kwargs = {**init_kwargs, **passed_pipe_kwargs} + + from diffusers import pipelines + + # remove `null` components + def load_module(name, value): + if value[0] is None: + return False + if name in passed_class_obj and passed_class_obj[name] is None: + return False + if name in SINGLE_FILE_OPTIONAL_COMPONENTS: + return False + + return True + + init_dict = {k: v for k, v in init_dict.items() if load_module(k, v)} + + for name, (library_name, class_name) in logging.tqdm( + sorted(init_dict.items()), desc="Loading pipeline components..." + ): + loaded_sub_model = None + is_pipeline_module = hasattr(pipelines, library_name) + + if name in passed_class_obj: + loaded_sub_model = passed_class_obj[name] + + else: + try: + loaded_sub_model = load_single_file_sub_model( + library_name=library_name, + class_name=class_name, + name=name, + checkpoint=checkpoint, + is_pipeline_module=is_pipeline_module, + cached_model_config_path=cached_model_config_path, + pipelines=pipelines, + torch_dtype=torch_dtype, + original_config=original_config, + local_files_only=local_files_only, + is_legacy_loading=is_legacy_loading, + **kwargs, + ) + except SingleFileComponentError as e: + raise SingleFileComponentError( + ( + f"{e.message}\n" + f"Please load the component before passing it in as an argument to `from_single_file`.\n" + f"\n" + f"{name} = {class_name}.from_pretrained('...')\n" + f"pipe = {pipeline_class.__name__}.from_single_file(, {name}={name})\n" + f"\n" + ) + ) + + init_kwargs[name] = loaded_sub_model + + missing_modules = set(expected_modules) - set(init_kwargs.keys()) + passed_modules = list(passed_class_obj.keys()) + optional_modules = pipeline_class._optional_components + + if len(missing_modules) > 0 and missing_modules <= set(passed_modules + optional_modules): + for module in missing_modules: + init_kwargs[module] = passed_class_obj.get(module, None) + elif len(missing_modules) > 0: + passed_modules = set(list(init_kwargs.keys()) + list(passed_class_obj.keys())) - optional_kwargs + raise ValueError( + f"Pipeline {pipeline_class} expected {expected_modules}, but only {passed_modules} were passed." + ) + + # deprecated kwargs + load_safety_checker = kwargs.pop("load_safety_checker", None) + if load_safety_checker is not None: + deprecation_message = ( + "Please pass instances of `StableDiffusionSafetyChecker` and `AutoImageProcessor`" + "using the `safety_checker` and `feature_extractor` arguments in `from_single_file`" + ) + deprecate("load_safety_checker", "1.0.0", deprecation_message) + + safety_checker_components = _legacy_load_safety_checker(local_files_only, torch_dtype) + init_kwargs.update(safety_checker_components) + + pipe = pipeline_class(**init_kwargs) + + return pipe diff --git a/diffusers3/loaders/single_file_model.py b/diffusers3/loaders/single_file_model.py new file mode 100644 index 0000000000000000000000000000000000000000..3fe1abfbead51be31035d2f3cdcf55dca48c8e94 --- /dev/null +++ b/diffusers3/loaders/single_file_model.py @@ -0,0 +1,318 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import importlib +import inspect +import re +from contextlib import nullcontext +from typing import Optional + +from huggingface_hub.utils import validate_hf_hub_args + +from ..utils import deprecate, is_accelerate_available, logging +from .single_file_utils import ( + SingleFileComponentError, + convert_animatediff_checkpoint_to_diffusers, + convert_controlnet_checkpoint, + convert_flux_transformer_checkpoint_to_diffusers, + convert_ldm_unet_checkpoint, + convert_ldm_vae_checkpoint, + convert_sd3_transformer_checkpoint_to_diffusers, + convert_stable_cascade_unet_single_file_to_diffusers, + create_controlnet_diffusers_config_from_ldm, + create_unet_diffusers_config_from_ldm, + create_vae_diffusers_config_from_ldm, + fetch_diffusers_config, + fetch_original_config, + load_single_file_checkpoint, +) + + +logger = logging.get_logger(__name__) + + +if is_accelerate_available(): + from accelerate import init_empty_weights + + from ..models.modeling_utils import load_model_dict_into_meta + + +SINGLE_FILE_LOADABLE_CLASSES = { + "StableCascadeUNet": { + "checkpoint_mapping_fn": convert_stable_cascade_unet_single_file_to_diffusers, + }, + "UNet2DConditionModel": { + "checkpoint_mapping_fn": convert_ldm_unet_checkpoint, + "config_mapping_fn": create_unet_diffusers_config_from_ldm, + "default_subfolder": "unet", + "legacy_kwargs": { + "num_in_channels": "in_channels", # Legacy kwargs supported by `from_single_file` mapped to new args + }, + }, + "AutoencoderKL": { + "checkpoint_mapping_fn": convert_ldm_vae_checkpoint, + "config_mapping_fn": create_vae_diffusers_config_from_ldm, + "default_subfolder": "vae", + }, + "ControlNetModel": { + "checkpoint_mapping_fn": convert_controlnet_checkpoint, + "config_mapping_fn": create_controlnet_diffusers_config_from_ldm, + }, + "SD3Transformer2DModel": { + "checkpoint_mapping_fn": convert_sd3_transformer_checkpoint_to_diffusers, + "default_subfolder": "transformer", + }, + "MotionAdapter": { + "checkpoint_mapping_fn": convert_animatediff_checkpoint_to_diffusers, + }, + "SparseControlNetModel": { + "checkpoint_mapping_fn": convert_animatediff_checkpoint_to_diffusers, + }, + "FluxTransformer2DModel": { + "checkpoint_mapping_fn": convert_flux_transformer_checkpoint_to_diffusers, + "default_subfolder": "transformer", + }, +} + + +def _get_single_file_loadable_mapping_class(cls): + diffusers_module = importlib.import_module(__name__.split(".")[0]) + for loadable_class_str in SINGLE_FILE_LOADABLE_CLASSES: + loadable_class = getattr(diffusers_module, loadable_class_str) + + if issubclass(cls, loadable_class): + return loadable_class_str + + return None + + +def _get_mapping_function_kwargs(mapping_fn, **kwargs): + parameters = inspect.signature(mapping_fn).parameters + + mapping_kwargs = {} + for parameter in parameters: + if parameter in kwargs: + mapping_kwargs[parameter] = kwargs[parameter] + + return mapping_kwargs + + +class FromOriginalModelMixin: + """ + Load pretrained weights saved in the `.ckpt` or `.safetensors` format into a model. + """ + + @classmethod + @validate_hf_hub_args + def from_single_file(cls, pretrained_model_link_or_path_or_dict: Optional[str] = None, **kwargs): + r""" + Instantiate a model from pretrained weights saved in the original `.ckpt` or `.safetensors` format. The model + is set in evaluation mode (`model.eval()`) by default. + + Parameters: + pretrained_model_link_or_path_or_dict (`str`, *optional*): + Can be either: + - A link to the `.safetensors` or `.ckpt` file (for example + `"https://huggingface.co//blob/main/.safetensors"`) on the Hub. + - A path to a local *file* containing the weights of the component model. + - A state dict containing the component model weights. + config (`str`, *optional*): + - A string, the *repo id* (for example `CompVis/ldm-text2im-large-256`) of a pretrained pipeline hosted + on the Hub. + - A path to a *directory* (for example `./my_pipeline_directory/`) containing the pipeline component + configs in Diffusers format. + subfolder (`str`, *optional*, defaults to `""`): + The subfolder location of a model file within a larger model repository on the Hub or locally. + original_config (`str`, *optional*): + Dict or path to a yaml file containing the configuration for the model in its original format. + If a dict is provided, it will be used to initialize the model configuration. + torch_dtype (`str` or `torch.dtype`, *optional*): + Override the default `torch.dtype` and load the model with another dtype. If `"auto"` is passed, the + dtype is automatically derived from the model's weights. + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force the (re-)download of the model weights and configuration files, overriding the + cached versions if they exist. + cache_dir (`Union[str, os.PathLike]`, *optional*): + Path to a directory where a downloaded pretrained model configuration is cached if the standard cache + is not used. + + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. + local_files_only (`bool`, *optional*, defaults to `False`): + Whether to only load local model weights and configuration files or not. If set to True, the model + won't be downloaded from the Hub. + token (`str` or *bool*, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from + `diffusers-cli login` (stored in `~/.huggingface`) is used. + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier + allowed by Git. + kwargs (remaining dictionary of keyword arguments, *optional*): + Can be used to overwrite load and saveable variables (for example the pipeline components of the + specific pipeline class). The overwritten components are directly passed to the pipelines `__init__` + method. See example below for more information. + + ```py + >>> from diffusers import StableCascadeUNet + + >>> ckpt_path = "https://huggingface.co/stabilityai/stable-cascade/blob/main/stage_b_lite.safetensors" + >>> model = StableCascadeUNet.from_single_file(ckpt_path) + ``` + """ + + mapping_class_name = _get_single_file_loadable_mapping_class(cls) + # if class_name not in SINGLE_FILE_LOADABLE_CLASSES: + if mapping_class_name is None: + raise ValueError( + f"FromOriginalModelMixin is currently only compatible with {', '.join(SINGLE_FILE_LOADABLE_CLASSES.keys())}" + ) + + pretrained_model_link_or_path = kwargs.get("pretrained_model_link_or_path", None) + if pretrained_model_link_or_path is not None: + deprecation_message = ( + "Please use `pretrained_model_link_or_path_or_dict` argument instead for model classes" + ) + deprecate("pretrained_model_link_or_path", "1.0.0", deprecation_message) + pretrained_model_link_or_path_or_dict = pretrained_model_link_or_path + + config = kwargs.pop("config", None) + original_config = kwargs.pop("original_config", None) + + if config is not None and original_config is not None: + raise ValueError( + "`from_single_file` cannot accept both `config` and `original_config` arguments. Please provide only one of these arguments" + ) + + force_download = kwargs.pop("force_download", False) + proxies = kwargs.pop("proxies", None) + token = kwargs.pop("token", None) + cache_dir = kwargs.pop("cache_dir", None) + local_files_only = kwargs.pop("local_files_only", None) + subfolder = kwargs.pop("subfolder", None) + revision = kwargs.pop("revision", None) + torch_dtype = kwargs.pop("torch_dtype", None) + + if isinstance(pretrained_model_link_or_path_or_dict, dict): + checkpoint = pretrained_model_link_or_path_or_dict + else: + checkpoint = load_single_file_checkpoint( + pretrained_model_link_or_path_or_dict, + force_download=force_download, + proxies=proxies, + token=token, + cache_dir=cache_dir, + local_files_only=local_files_only, + revision=revision, + ) + + mapping_functions = SINGLE_FILE_LOADABLE_CLASSES[mapping_class_name] + + checkpoint_mapping_fn = mapping_functions["checkpoint_mapping_fn"] + if original_config: + if "config_mapping_fn" in mapping_functions: + config_mapping_fn = mapping_functions["config_mapping_fn"] + else: + config_mapping_fn = None + + if config_mapping_fn is None: + raise ValueError( + ( + f"`original_config` has been provided for {mapping_class_name} but no mapping function" + "was found to convert the original config to a Diffusers config in" + "`diffusers.loaders.single_file_utils`" + ) + ) + + if isinstance(original_config, str): + # If original_config is a URL or filepath fetch the original_config dict + original_config = fetch_original_config(original_config, local_files_only=local_files_only) + + config_mapping_kwargs = _get_mapping_function_kwargs(config_mapping_fn, **kwargs) + diffusers_model_config = config_mapping_fn( + original_config=original_config, checkpoint=checkpoint, **config_mapping_kwargs + ) + else: + if config: + if isinstance(config, str): + default_pretrained_model_config_name = config + else: + raise ValueError( + ( + "Invalid `config` argument. Please provide a string representing a repo id" + "or path to a local Diffusers model repo." + ) + ) + + else: + config = fetch_diffusers_config(checkpoint) + default_pretrained_model_config_name = config["pretrained_model_name_or_path"] + + if "default_subfolder" in mapping_functions: + subfolder = mapping_functions["default_subfolder"] + + subfolder = subfolder or config.pop( + "subfolder", None + ) # some configs contain a subfolder key, e.g. StableCascadeUNet + + diffusers_model_config = cls.load_config( + pretrained_model_name_or_path=default_pretrained_model_config_name, + subfolder=subfolder, + local_files_only=local_files_only, + ) + expected_kwargs, optional_kwargs = cls._get_signature_keys(cls) + + # Map legacy kwargs to new kwargs + if "legacy_kwargs" in mapping_functions: + legacy_kwargs = mapping_functions["legacy_kwargs"] + for legacy_key, new_key in legacy_kwargs.items(): + if legacy_key in kwargs: + kwargs[new_key] = kwargs.pop(legacy_key) + + model_kwargs = {k: kwargs.get(k) for k in kwargs if k in expected_kwargs or k in optional_kwargs} + diffusers_model_config.update(model_kwargs) + + checkpoint_mapping_kwargs = _get_mapping_function_kwargs(checkpoint_mapping_fn, **kwargs) + diffusers_format_checkpoint = checkpoint_mapping_fn( + config=diffusers_model_config, checkpoint=checkpoint, **checkpoint_mapping_kwargs + ) + if not diffusers_format_checkpoint: + raise SingleFileComponentError( + f"Failed to load {mapping_class_name}. Weights for this component appear to be missing in the checkpoint." + ) + + ctx = init_empty_weights if is_accelerate_available() else nullcontext + with ctx(): + model = cls.from_config(diffusers_model_config) + + if is_accelerate_available(): + unexpected_keys = load_model_dict_into_meta(model, diffusers_format_checkpoint, dtype=torch_dtype) + + else: + _, unexpected_keys = model.load_state_dict(diffusers_format_checkpoint, strict=False) + + if model._keys_to_ignore_on_load_unexpected is not None: + for pat in model._keys_to_ignore_on_load_unexpected: + unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None] + + if len(unexpected_keys) > 0: + logger.warning( + f"Some weights of the model checkpoint were not used when initializing {cls.__name__}: \n {[', '.join(unexpected_keys)]}" + ) + + if torch_dtype is not None: + model.to(torch_dtype) + + model.eval() + + return model diff --git a/diffusers3/loaders/single_file_utils.py b/diffusers3/loaders/single_file_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..d620c15e8377158109b76e4e929bae398a0346e6 --- /dev/null +++ b/diffusers3/loaders/single_file_utils.py @@ -0,0 +1,2098 @@ +# coding=utf-8 +# Copyright 2024 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Conversion script for the Stable Diffusion checkpoints.""" + +import os +import re +from contextlib import nullcontext +from io import BytesIO +from urllib.parse import urlparse + +import requests +import torch +import yaml + +from ..models.modeling_utils import load_state_dict +from ..schedulers import ( + DDIMScheduler, + DPMSolverMultistepScheduler, + EDMDPMSolverMultistepScheduler, + EulerAncestralDiscreteScheduler, + EulerDiscreteScheduler, + HeunDiscreteScheduler, + LMSDiscreteScheduler, + PNDMScheduler, +) +from ..utils import ( + SAFETENSORS_WEIGHTS_NAME, + WEIGHTS_NAME, + deprecate, + is_accelerate_available, + is_transformers_available, + logging, +) +from ..utils.hub_utils import _get_model_file + + +if is_transformers_available(): + from transformers import AutoImageProcessor + +if is_accelerate_available(): + from accelerate import init_empty_weights + + from ..models.modeling_utils import load_model_dict_into_meta + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +CHECKPOINT_KEY_NAMES = { + "v2": "model.diffusion_model.input_blocks.2.1.transformer_blocks.0.attn2.to_k.weight", + "xl_base": "conditioner.embedders.1.model.transformer.resblocks.9.mlp.c_proj.bias", + "xl_refiner": "conditioner.embedders.0.model.transformer.resblocks.9.mlp.c_proj.bias", + "upscale": "model.diffusion_model.input_blocks.10.0.skip_connection.bias", + "controlnet": "control_model.time_embed.0.weight", + "playground-v2-5": "edm_mean", + "inpainting": "model.diffusion_model.input_blocks.0.0.weight", + "clip": "cond_stage_model.transformer.text_model.embeddings.position_embedding.weight", + "clip_sdxl": "conditioner.embedders.0.transformer.text_model.embeddings.position_embedding.weight", + "clip_sd3": "text_encoders.clip_l.transformer.text_model.embeddings.position_embedding.weight", + "open_clip": "cond_stage_model.model.token_embedding.weight", + "open_clip_sdxl": "conditioner.embedders.1.model.positional_embedding", + "open_clip_sdxl_refiner": "conditioner.embedders.0.model.text_projection", + "open_clip_sd3": "text_encoders.clip_g.transformer.text_model.embeddings.position_embedding.weight", + "stable_cascade_stage_b": "down_blocks.1.0.channelwise.0.weight", + "stable_cascade_stage_c": "clip_txt_mapper.weight", + "sd3": "model.diffusion_model.joint_blocks.0.context_block.adaLN_modulation.1.bias", + "animatediff": "down_blocks.0.motion_modules.0.temporal_transformer.transformer_blocks.0.attention_blocks.0.pos_encoder.pe", + "animatediff_v2": "mid_block.motion_modules.0.temporal_transformer.norm.bias", + "animatediff_sdxl_beta": "up_blocks.2.motion_modules.0.temporal_transformer.norm.weight", + "animatediff_scribble": "controlnet_cond_embedding.conv_in.weight", + "animatediff_rgb": "controlnet_cond_embedding.weight", + "flux": [ + "double_blocks.0.img_attn.norm.key_norm.scale", + "model.diffusion_model.double_blocks.0.img_attn.norm.key_norm.scale", + ], +} + +DIFFUSERS_DEFAULT_PIPELINE_PATHS = { + "xl_base": {"pretrained_model_name_or_path": "stabilityai/stable-diffusion-xl-base-1.0"}, + "xl_refiner": {"pretrained_model_name_or_path": "stabilityai/stable-diffusion-xl-refiner-1.0"}, + "xl_inpaint": {"pretrained_model_name_or_path": "diffusers/stable-diffusion-xl-1.0-inpainting-0.1"}, + "playground-v2-5": {"pretrained_model_name_or_path": "playgroundai/playground-v2.5-1024px-aesthetic"}, + "upscale": {"pretrained_model_name_or_path": "stabilityai/stable-diffusion-x4-upscaler"}, + "inpainting": {"pretrained_model_name_or_path": "Lykon/dreamshaper-8-inpainting"}, + "inpainting_v2": {"pretrained_model_name_or_path": "stabilityai/stable-diffusion-2-inpainting"}, + "controlnet": {"pretrained_model_name_or_path": "lllyasviel/control_v11p_sd15_canny"}, + "v2": {"pretrained_model_name_or_path": "stabilityai/stable-diffusion-2-1"}, + "v1": {"pretrained_model_name_or_path": "Lykon/dreamshaper-8"}, + "stable_cascade_stage_b": {"pretrained_model_name_or_path": "stabilityai/stable-cascade", "subfolder": "decoder"}, + "stable_cascade_stage_b_lite": { + "pretrained_model_name_or_path": "stabilityai/stable-cascade", + "subfolder": "decoder_lite", + }, + "stable_cascade_stage_c": { + "pretrained_model_name_or_path": "stabilityai/stable-cascade-prior", + "subfolder": "prior", + }, + "stable_cascade_stage_c_lite": { + "pretrained_model_name_or_path": "stabilityai/stable-cascade-prior", + "subfolder": "prior_lite", + }, + "sd3": { + "pretrained_model_name_or_path": "stabilityai/stable-diffusion-3-medium-diffusers", + }, + "animatediff_v1": {"pretrained_model_name_or_path": "guoyww/animatediff-motion-adapter-v1-5"}, + "animatediff_v2": {"pretrained_model_name_or_path": "guoyww/animatediff-motion-adapter-v1-5-2"}, + "animatediff_v3": {"pretrained_model_name_or_path": "guoyww/animatediff-motion-adapter-v1-5-3"}, + "animatediff_sdxl_beta": {"pretrained_model_name_or_path": "guoyww/animatediff-motion-adapter-sdxl-beta"}, + "animatediff_scribble": {"pretrained_model_name_or_path": "guoyww/animatediff-sparsectrl-scribble"}, + "animatediff_rgb": {"pretrained_model_name_or_path": "guoyww/animatediff-sparsectrl-rgb"}, + "flux-dev": {"pretrained_model_name_or_path": "black-forest-labs/FLUX.1-dev"}, + "flux-schnell": {"pretrained_model_name_or_path": "black-forest-labs/FLUX.1-schnell"}, +} + +# Use to configure model sample size when original config is provided +DIFFUSERS_TO_LDM_DEFAULT_IMAGE_SIZE_MAP = { + "xl_base": 1024, + "xl_refiner": 1024, + "xl_inpaint": 1024, + "playground-v2-5": 1024, + "upscale": 512, + "inpainting": 512, + "inpainting_v2": 512, + "controlnet": 512, + "v2": 768, + "v1": 512, +} + + +DIFFUSERS_TO_LDM_MAPPING = { + "unet": { + "layers": { + "time_embedding.linear_1.weight": "time_embed.0.weight", + "time_embedding.linear_1.bias": "time_embed.0.bias", + "time_embedding.linear_2.weight": "time_embed.2.weight", + "time_embedding.linear_2.bias": "time_embed.2.bias", + "conv_in.weight": "input_blocks.0.0.weight", + "conv_in.bias": "input_blocks.0.0.bias", + "conv_norm_out.weight": "out.0.weight", + "conv_norm_out.bias": "out.0.bias", + "conv_out.weight": "out.2.weight", + "conv_out.bias": "out.2.bias", + }, + "class_embed_type": { + "class_embedding.linear_1.weight": "label_emb.0.0.weight", + "class_embedding.linear_1.bias": "label_emb.0.0.bias", + "class_embedding.linear_2.weight": "label_emb.0.2.weight", + "class_embedding.linear_2.bias": "label_emb.0.2.bias", + }, + "addition_embed_type": { + "add_embedding.linear_1.weight": "label_emb.0.0.weight", + "add_embedding.linear_1.bias": "label_emb.0.0.bias", + "add_embedding.linear_2.weight": "label_emb.0.2.weight", + "add_embedding.linear_2.bias": "label_emb.0.2.bias", + }, + }, + "controlnet": { + "layers": { + "time_embedding.linear_1.weight": "time_embed.0.weight", + "time_embedding.linear_1.bias": "time_embed.0.bias", + "time_embedding.linear_2.weight": "time_embed.2.weight", + "time_embedding.linear_2.bias": "time_embed.2.bias", + "conv_in.weight": "input_blocks.0.0.weight", + "conv_in.bias": "input_blocks.0.0.bias", + "controlnet_cond_embedding.conv_in.weight": "input_hint_block.0.weight", + "controlnet_cond_embedding.conv_in.bias": "input_hint_block.0.bias", + "controlnet_cond_embedding.conv_out.weight": "input_hint_block.14.weight", + "controlnet_cond_embedding.conv_out.bias": "input_hint_block.14.bias", + }, + "class_embed_type": { + "class_embedding.linear_1.weight": "label_emb.0.0.weight", + "class_embedding.linear_1.bias": "label_emb.0.0.bias", + "class_embedding.linear_2.weight": "label_emb.0.2.weight", + "class_embedding.linear_2.bias": "label_emb.0.2.bias", + }, + "addition_embed_type": { + "add_embedding.linear_1.weight": "label_emb.0.0.weight", + "add_embedding.linear_1.bias": "label_emb.0.0.bias", + "add_embedding.linear_2.weight": "label_emb.0.2.weight", + "add_embedding.linear_2.bias": "label_emb.0.2.bias", + }, + }, + "vae": { + "encoder.conv_in.weight": "encoder.conv_in.weight", + "encoder.conv_in.bias": "encoder.conv_in.bias", + "encoder.conv_out.weight": "encoder.conv_out.weight", + "encoder.conv_out.bias": "encoder.conv_out.bias", + "encoder.conv_norm_out.weight": "encoder.norm_out.weight", + "encoder.conv_norm_out.bias": "encoder.norm_out.bias", + "decoder.conv_in.weight": "decoder.conv_in.weight", + "decoder.conv_in.bias": "decoder.conv_in.bias", + "decoder.conv_out.weight": "decoder.conv_out.weight", + "decoder.conv_out.bias": "decoder.conv_out.bias", + "decoder.conv_norm_out.weight": "decoder.norm_out.weight", + "decoder.conv_norm_out.bias": "decoder.norm_out.bias", + "quant_conv.weight": "quant_conv.weight", + "quant_conv.bias": "quant_conv.bias", + "post_quant_conv.weight": "post_quant_conv.weight", + "post_quant_conv.bias": "post_quant_conv.bias", + }, + "openclip": { + "layers": { + "text_model.embeddings.position_embedding.weight": "positional_embedding", + "text_model.embeddings.token_embedding.weight": "token_embedding.weight", + "text_model.final_layer_norm.weight": "ln_final.weight", + "text_model.final_layer_norm.bias": "ln_final.bias", + "text_projection.weight": "text_projection", + }, + "transformer": { + "text_model.encoder.layers.": "resblocks.", + "layer_norm1": "ln_1", + "layer_norm2": "ln_2", + ".fc1.": ".c_fc.", + ".fc2.": ".c_proj.", + ".self_attn": ".attn", + "transformer.text_model.final_layer_norm.": "ln_final.", + "transformer.text_model.embeddings.token_embedding.weight": "token_embedding.weight", + "transformer.text_model.embeddings.position_embedding.weight": "positional_embedding", + }, + }, +} + +SD_2_TEXT_ENCODER_KEYS_TO_IGNORE = [ + "cond_stage_model.model.transformer.resblocks.23.attn.in_proj_bias", + "cond_stage_model.model.transformer.resblocks.23.attn.in_proj_weight", + "cond_stage_model.model.transformer.resblocks.23.attn.out_proj.bias", + "cond_stage_model.model.transformer.resblocks.23.attn.out_proj.weight", + "cond_stage_model.model.transformer.resblocks.23.ln_1.bias", + "cond_stage_model.model.transformer.resblocks.23.ln_1.weight", + "cond_stage_model.model.transformer.resblocks.23.ln_2.bias", + "cond_stage_model.model.transformer.resblocks.23.ln_2.weight", + "cond_stage_model.model.transformer.resblocks.23.mlp.c_fc.bias", + "cond_stage_model.model.transformer.resblocks.23.mlp.c_fc.weight", + "cond_stage_model.model.transformer.resblocks.23.mlp.c_proj.bias", + "cond_stage_model.model.transformer.resblocks.23.mlp.c_proj.weight", + "cond_stage_model.model.text_projection", +] + +# To support legacy scheduler_type argument +SCHEDULER_DEFAULT_CONFIG = { + "beta_schedule": "scaled_linear", + "beta_start": 0.00085, + "beta_end": 0.012, + "interpolation_type": "linear", + "num_train_timesteps": 1000, + "prediction_type": "epsilon", + "sample_max_value": 1.0, + "set_alpha_to_one": False, + "skip_prk_steps": True, + "steps_offset": 1, + "timestep_spacing": "leading", +} + +LDM_VAE_KEYS = ["first_stage_model.", "vae."] +LDM_VAE_DEFAULT_SCALING_FACTOR = 0.18215 +PLAYGROUND_VAE_SCALING_FACTOR = 0.5 +LDM_UNET_KEY = "model.diffusion_model." +LDM_CONTROLNET_KEY = "control_model." +LDM_CLIP_PREFIX_TO_REMOVE = [ + "cond_stage_model.transformer.", + "conditioner.embedders.0.transformer.", +] +LDM_OPEN_CLIP_TEXT_PROJECTION_DIM = 1024 +SCHEDULER_LEGACY_KWARGS = ["prediction_type", "scheduler_type"] + +VALID_URL_PREFIXES = ["https://huggingface.co/", "huggingface.co/", "hf.co/", "https://hf.co/"] + + +class SingleFileComponentError(Exception): + def __init__(self, message=None): + self.message = message + super().__init__(self.message) + + +def is_valid_url(url): + result = urlparse(url) + if result.scheme and result.netloc: + return True + + return False + + +def _extract_repo_id_and_weights_name(pretrained_model_name_or_path): + if not is_valid_url(pretrained_model_name_or_path): + raise ValueError("Invalid `pretrained_model_name_or_path` provided. Please set it to a valid URL.") + + pattern = r"([^/]+)/([^/]+)/(?:blob/main/)?(.+)" + weights_name = None + repo_id = (None,) + for prefix in VALID_URL_PREFIXES: + pretrained_model_name_or_path = pretrained_model_name_or_path.replace(prefix, "") + match = re.match(pattern, pretrained_model_name_or_path) + if not match: + logger.warning("Unable to identify the repo_id and weights_name from the provided URL.") + return repo_id, weights_name + + repo_id = f"{match.group(1)}/{match.group(2)}" + weights_name = match.group(3) + + return repo_id, weights_name + + +def _is_model_weights_in_cached_folder(cached_folder, name): + pretrained_model_name_or_path = os.path.join(cached_folder, name) + weights_exist = False + + for weights_name in [WEIGHTS_NAME, SAFETENSORS_WEIGHTS_NAME]: + if os.path.isfile(os.path.join(pretrained_model_name_or_path, weights_name)): + weights_exist = True + + return weights_exist + + +def _is_legacy_scheduler_kwargs(kwargs): + return any(k in SCHEDULER_LEGACY_KWARGS for k in kwargs.keys()) + + +def load_single_file_checkpoint( + pretrained_model_link_or_path, + force_download=False, + proxies=None, + token=None, + cache_dir=None, + local_files_only=None, + revision=None, +): + if os.path.isfile(pretrained_model_link_or_path): + pretrained_model_link_or_path = pretrained_model_link_or_path + + else: + repo_id, weights_name = _extract_repo_id_and_weights_name(pretrained_model_link_or_path) + pretrained_model_link_or_path = _get_model_file( + repo_id, + weights_name=weights_name, + force_download=force_download, + cache_dir=cache_dir, + proxies=proxies, + local_files_only=local_files_only, + token=token, + revision=revision, + ) + + checkpoint = load_state_dict(pretrained_model_link_or_path) + + # some checkpoints contain the model state dict under a "state_dict" key + while "state_dict" in checkpoint: + checkpoint = checkpoint["state_dict"] + + return checkpoint + + +def fetch_original_config(original_config_file, local_files_only=False): + if os.path.isfile(original_config_file): + with open(original_config_file, "r") as fp: + original_config_file = fp.read() + + elif is_valid_url(original_config_file): + if local_files_only: + raise ValueError( + "`local_files_only` is set to True, but a URL was provided as `original_config_file`. " + "Please provide a valid local file path." + ) + + original_config_file = BytesIO(requests.get(original_config_file).content) + + else: + raise ValueError("Invalid `original_config_file` provided. Please set it to a valid file path or URL.") + + original_config = yaml.safe_load(original_config_file) + + return original_config + + +def is_clip_model(checkpoint): + if CHECKPOINT_KEY_NAMES["clip"] in checkpoint: + return True + + return False + + +def is_clip_sdxl_model(checkpoint): + if CHECKPOINT_KEY_NAMES["clip_sdxl"] in checkpoint: + return True + + return False + + +def is_clip_sd3_model(checkpoint): + if CHECKPOINT_KEY_NAMES["clip_sd3"] in checkpoint: + return True + + return False + + +def is_open_clip_model(checkpoint): + if CHECKPOINT_KEY_NAMES["open_clip"] in checkpoint: + return True + + return False + + +def is_open_clip_sdxl_model(checkpoint): + if CHECKPOINT_KEY_NAMES["open_clip_sdxl"] in checkpoint: + return True + + return False + + +def is_open_clip_sd3_model(checkpoint): + if CHECKPOINT_KEY_NAMES["open_clip_sd3"] in checkpoint: + return True + + return False + + +def is_open_clip_sdxl_refiner_model(checkpoint): + if CHECKPOINT_KEY_NAMES["open_clip_sdxl_refiner"] in checkpoint: + return True + + return False + + +def is_clip_model_in_single_file(class_obj, checkpoint): + is_clip_in_checkpoint = any( + [ + is_clip_model(checkpoint), + is_clip_sd3_model(checkpoint), + is_open_clip_model(checkpoint), + is_open_clip_sdxl_model(checkpoint), + is_open_clip_sdxl_refiner_model(checkpoint), + is_open_clip_sd3_model(checkpoint), + ] + ) + if ( + class_obj.__name__ == "CLIPTextModel" or class_obj.__name__ == "CLIPTextModelWithProjection" + ) and is_clip_in_checkpoint: + return True + + return False + + +def infer_diffusers_model_type(checkpoint): + if ( + CHECKPOINT_KEY_NAMES["inpainting"] in checkpoint + and checkpoint[CHECKPOINT_KEY_NAMES["inpainting"]].shape[1] == 9 + ): + if CHECKPOINT_KEY_NAMES["v2"] in checkpoint and checkpoint[CHECKPOINT_KEY_NAMES["v2"]].shape[-1] == 1024: + model_type = "inpainting_v2" + elif CHECKPOINT_KEY_NAMES["xl_base"] in checkpoint: + model_type = "xl_inpaint" + else: + model_type = "inpainting" + + elif CHECKPOINT_KEY_NAMES["v2"] in checkpoint and checkpoint[CHECKPOINT_KEY_NAMES["v2"]].shape[-1] == 1024: + model_type = "v2" + + elif CHECKPOINT_KEY_NAMES["playground-v2-5"] in checkpoint: + model_type = "playground-v2-5" + + elif CHECKPOINT_KEY_NAMES["xl_base"] in checkpoint: + model_type = "xl_base" + + elif CHECKPOINT_KEY_NAMES["xl_refiner"] in checkpoint: + model_type = "xl_refiner" + + elif CHECKPOINT_KEY_NAMES["upscale"] in checkpoint: + model_type = "upscale" + + elif CHECKPOINT_KEY_NAMES["controlnet"] in checkpoint: + model_type = "controlnet" + + elif ( + CHECKPOINT_KEY_NAMES["stable_cascade_stage_c"] in checkpoint + and checkpoint[CHECKPOINT_KEY_NAMES["stable_cascade_stage_c"]].shape[0] == 1536 + ): + model_type = "stable_cascade_stage_c_lite" + + elif ( + CHECKPOINT_KEY_NAMES["stable_cascade_stage_c"] in checkpoint + and checkpoint[CHECKPOINT_KEY_NAMES["stable_cascade_stage_c"]].shape[0] == 2048 + ): + model_type = "stable_cascade_stage_c" + + elif ( + CHECKPOINT_KEY_NAMES["stable_cascade_stage_b"] in checkpoint + and checkpoint[CHECKPOINT_KEY_NAMES["stable_cascade_stage_b"]].shape[-1] == 576 + ): + model_type = "stable_cascade_stage_b_lite" + + elif ( + CHECKPOINT_KEY_NAMES["stable_cascade_stage_b"] in checkpoint + and checkpoint[CHECKPOINT_KEY_NAMES["stable_cascade_stage_b"]].shape[-1] == 640 + ): + model_type = "stable_cascade_stage_b" + + elif CHECKPOINT_KEY_NAMES["sd3"] in checkpoint: + model_type = "sd3" + + elif CHECKPOINT_KEY_NAMES["animatediff"] in checkpoint: + if CHECKPOINT_KEY_NAMES["animatediff_scribble"] in checkpoint: + model_type = "animatediff_scribble" + + elif CHECKPOINT_KEY_NAMES["animatediff_rgb"] in checkpoint: + model_type = "animatediff_rgb" + + elif CHECKPOINT_KEY_NAMES["animatediff_v2"] in checkpoint: + model_type = "animatediff_v2" + + elif checkpoint[CHECKPOINT_KEY_NAMES["animatediff_sdxl_beta"]].shape[-1] == 320: + model_type = "animatediff_sdxl_beta" + + elif checkpoint[CHECKPOINT_KEY_NAMES["animatediff"]].shape[1] == 24: + model_type = "animatediff_v1" + + else: + model_type = "animatediff_v3" + + elif any(key in checkpoint for key in CHECKPOINT_KEY_NAMES["flux"]): + if any( + g in checkpoint for g in ["guidance_in.in_layer.bias", "model.diffusion_model.guidance_in.in_layer.bias"] + ): + model_type = "flux-dev" + else: + model_type = "flux-schnell" + else: + model_type = "v1" + + return model_type + + +def fetch_diffusers_config(checkpoint): + model_type = infer_diffusers_model_type(checkpoint) + model_path = DIFFUSERS_DEFAULT_PIPELINE_PATHS[model_type] + + return model_path + + +def set_image_size(checkpoint, image_size=None): + if image_size: + return image_size + + model_type = infer_diffusers_model_type(checkpoint) + image_size = DIFFUSERS_TO_LDM_DEFAULT_IMAGE_SIZE_MAP[model_type] + + return image_size + + +# Copied from diffusers.pipelines.stable_diffusion.convert_from_ckpt.conv_attn_to_linear +def conv_attn_to_linear(checkpoint): + keys = list(checkpoint.keys()) + attn_keys = ["query.weight", "key.weight", "value.weight"] + for key in keys: + if ".".join(key.split(".")[-2:]) in attn_keys: + if checkpoint[key].ndim > 2: + checkpoint[key] = checkpoint[key][:, :, 0, 0] + elif "proj_attn.weight" in key: + if checkpoint[key].ndim > 2: + checkpoint[key] = checkpoint[key][:, :, 0] + + +def create_unet_diffusers_config_from_ldm( + original_config, checkpoint, image_size=None, upcast_attention=None, num_in_channels=None +): + """ + Creates a config for the diffusers based on the config of the LDM model. + """ + if image_size is not None: + deprecation_message = ( + "Configuring UNet2DConditionModel with the `image_size` argument to `from_single_file`" + "is deprecated and will be ignored in future versions." + ) + deprecate("image_size", "1.0.0", deprecation_message) + + image_size = set_image_size(checkpoint, image_size=image_size) + + if ( + "unet_config" in original_config["model"]["params"] + and original_config["model"]["params"]["unet_config"] is not None + ): + unet_params = original_config["model"]["params"]["unet_config"]["params"] + else: + unet_params = original_config["model"]["params"]["network_config"]["params"] + + if num_in_channels is not None: + deprecation_message = ( + "Configuring UNet2DConditionModel with the `num_in_channels` argument to `from_single_file`" + "is deprecated and will be ignored in future versions." + ) + deprecate("image_size", "1.0.0", deprecation_message) + in_channels = num_in_channels + else: + in_channels = unet_params["in_channels"] + + vae_params = original_config["model"]["params"]["first_stage_config"]["params"]["ddconfig"] + block_out_channels = [unet_params["model_channels"] * mult for mult in unet_params["channel_mult"]] + + down_block_types = [] + resolution = 1 + for i in range(len(block_out_channels)): + block_type = "CrossAttnDownBlock2D" if resolution in unet_params["attention_resolutions"] else "DownBlock2D" + down_block_types.append(block_type) + if i != len(block_out_channels) - 1: + resolution *= 2 + + up_block_types = [] + for i in range(len(block_out_channels)): + block_type = "CrossAttnUpBlock2D" if resolution in unet_params["attention_resolutions"] else "UpBlock2D" + up_block_types.append(block_type) + resolution //= 2 + + if unet_params["transformer_depth"] is not None: + transformer_layers_per_block = ( + unet_params["transformer_depth"] + if isinstance(unet_params["transformer_depth"], int) + else list(unet_params["transformer_depth"]) + ) + else: + transformer_layers_per_block = 1 + + vae_scale_factor = 2 ** (len(vae_params["ch_mult"]) - 1) + + head_dim = unet_params["num_heads"] if "num_heads" in unet_params else None + use_linear_projection = ( + unet_params["use_linear_in_transformer"] if "use_linear_in_transformer" in unet_params else False + ) + if use_linear_projection: + # stable diffusion 2-base-512 and 2-768 + if head_dim is None: + head_dim_mult = unet_params["model_channels"] // unet_params["num_head_channels"] + head_dim = [head_dim_mult * c for c in list(unet_params["channel_mult"])] + + class_embed_type = None + addition_embed_type = None + addition_time_embed_dim = None + projection_class_embeddings_input_dim = None + context_dim = None + + if unet_params["context_dim"] is not None: + context_dim = ( + unet_params["context_dim"] + if isinstance(unet_params["context_dim"], int) + else unet_params["context_dim"][0] + ) + + if "num_classes" in unet_params: + if unet_params["num_classes"] == "sequential": + if context_dim in [2048, 1280]: + # SDXL + addition_embed_type = "text_time" + addition_time_embed_dim = 256 + else: + class_embed_type = "projection" + assert "adm_in_channels" in unet_params + projection_class_embeddings_input_dim = unet_params["adm_in_channels"] + + config = { + "sample_size": image_size // vae_scale_factor, + "in_channels": in_channels, + "down_block_types": down_block_types, + "block_out_channels": block_out_channels, + "layers_per_block": unet_params["num_res_blocks"], + "cross_attention_dim": context_dim, + "attention_head_dim": head_dim, + "use_linear_projection": use_linear_projection, + "class_embed_type": class_embed_type, + "addition_embed_type": addition_embed_type, + "addition_time_embed_dim": addition_time_embed_dim, + "projection_class_embeddings_input_dim": projection_class_embeddings_input_dim, + "transformer_layers_per_block": transformer_layers_per_block, + } + + if upcast_attention is not None: + deprecation_message = ( + "Configuring UNet2DConditionModel with the `upcast_attention` argument to `from_single_file`" + "is deprecated and will be ignored in future versions." + ) + deprecate("image_size", "1.0.0", deprecation_message) + config["upcast_attention"] = upcast_attention + + if "disable_self_attentions" in unet_params: + config["only_cross_attention"] = unet_params["disable_self_attentions"] + + if "num_classes" in unet_params and isinstance(unet_params["num_classes"], int): + config["num_class_embeds"] = unet_params["num_classes"] + + config["out_channels"] = unet_params["out_channels"] + config["up_block_types"] = up_block_types + + return config + + +def create_controlnet_diffusers_config_from_ldm(original_config, checkpoint, image_size=None, **kwargs): + if image_size is not None: + deprecation_message = ( + "Configuring ControlNetModel with the `image_size` argument" + "is deprecated and will be ignored in future versions." + ) + deprecate("image_size", "1.0.0", deprecation_message) + + image_size = set_image_size(checkpoint, image_size=image_size) + + unet_params = original_config["model"]["params"]["control_stage_config"]["params"] + diffusers_unet_config = create_unet_diffusers_config_from_ldm(original_config, image_size=image_size) + + controlnet_config = { + "conditioning_channels": unet_params["hint_channels"], + "in_channels": diffusers_unet_config["in_channels"], + "down_block_types": diffusers_unet_config["down_block_types"], + "block_out_channels": diffusers_unet_config["block_out_channels"], + "layers_per_block": diffusers_unet_config["layers_per_block"], + "cross_attention_dim": diffusers_unet_config["cross_attention_dim"], + "attention_head_dim": diffusers_unet_config["attention_head_dim"], + "use_linear_projection": diffusers_unet_config["use_linear_projection"], + "class_embed_type": diffusers_unet_config["class_embed_type"], + "addition_embed_type": diffusers_unet_config["addition_embed_type"], + "addition_time_embed_dim": diffusers_unet_config["addition_time_embed_dim"], + "projection_class_embeddings_input_dim": diffusers_unet_config["projection_class_embeddings_input_dim"], + "transformer_layers_per_block": diffusers_unet_config["transformer_layers_per_block"], + } + + return controlnet_config + + +def create_vae_diffusers_config_from_ldm(original_config, checkpoint, image_size=None, scaling_factor=None): + """ + Creates a config for the diffusers based on the config of the LDM model. + """ + if image_size is not None: + deprecation_message = ( + "Configuring AutoencoderKL with the `image_size` argument" + "is deprecated and will be ignored in future versions." + ) + deprecate("image_size", "1.0.0", deprecation_message) + + image_size = set_image_size(checkpoint, image_size=image_size) + + if "edm_mean" in checkpoint and "edm_std" in checkpoint: + latents_mean = checkpoint["edm_mean"] + latents_std = checkpoint["edm_std"] + else: + latents_mean = None + latents_std = None + + vae_params = original_config["model"]["params"]["first_stage_config"]["params"]["ddconfig"] + if (scaling_factor is None) and (latents_mean is not None) and (latents_std is not None): + scaling_factor = PLAYGROUND_VAE_SCALING_FACTOR + + elif (scaling_factor is None) and ("scale_factor" in original_config["model"]["params"]): + scaling_factor = original_config["model"]["params"]["scale_factor"] + + elif scaling_factor is None: + scaling_factor = LDM_VAE_DEFAULT_SCALING_FACTOR + + block_out_channels = [vae_params["ch"] * mult for mult in vae_params["ch_mult"]] + down_block_types = ["DownEncoderBlock2D"] * len(block_out_channels) + up_block_types = ["UpDecoderBlock2D"] * len(block_out_channels) + + config = { + "sample_size": image_size, + "in_channels": vae_params["in_channels"], + "out_channels": vae_params["out_ch"], + "down_block_types": down_block_types, + "up_block_types": up_block_types, + "block_out_channels": block_out_channels, + "latent_channels": vae_params["z_channels"], + "layers_per_block": vae_params["num_res_blocks"], + "scaling_factor": scaling_factor, + } + if latents_mean is not None and latents_std is not None: + config.update({"latents_mean": latents_mean, "latents_std": latents_std}) + + return config + + +def update_unet_resnet_ldm_to_diffusers(ldm_keys, new_checkpoint, checkpoint, mapping=None): + for ldm_key in ldm_keys: + diffusers_key = ( + ldm_key.replace("in_layers.0", "norm1") + .replace("in_layers.2", "conv1") + .replace("out_layers.0", "norm2") + .replace("out_layers.3", "conv2") + .replace("emb_layers.1", "time_emb_proj") + .replace("skip_connection", "conv_shortcut") + ) + if mapping: + diffusers_key = diffusers_key.replace(mapping["old"], mapping["new"]) + new_checkpoint[diffusers_key] = checkpoint.get(ldm_key) + + +def update_unet_attention_ldm_to_diffusers(ldm_keys, new_checkpoint, checkpoint, mapping): + for ldm_key in ldm_keys: + diffusers_key = ldm_key.replace(mapping["old"], mapping["new"]) + new_checkpoint[diffusers_key] = checkpoint.get(ldm_key) + + +def update_vae_resnet_ldm_to_diffusers(keys, new_checkpoint, checkpoint, mapping): + for ldm_key in keys: + diffusers_key = ldm_key.replace(mapping["old"], mapping["new"]).replace("nin_shortcut", "conv_shortcut") + new_checkpoint[diffusers_key] = checkpoint.get(ldm_key) + + +def update_vae_attentions_ldm_to_diffusers(keys, new_checkpoint, checkpoint, mapping): + for ldm_key in keys: + diffusers_key = ( + ldm_key.replace(mapping["old"], mapping["new"]) + .replace("norm.weight", "group_norm.weight") + .replace("norm.bias", "group_norm.bias") + .replace("q.weight", "to_q.weight") + .replace("q.bias", "to_q.bias") + .replace("k.weight", "to_k.weight") + .replace("k.bias", "to_k.bias") + .replace("v.weight", "to_v.weight") + .replace("v.bias", "to_v.bias") + .replace("proj_out.weight", "to_out.0.weight") + .replace("proj_out.bias", "to_out.0.bias") + ) + new_checkpoint[diffusers_key] = checkpoint.get(ldm_key) + + # proj_attn.weight has to be converted from conv 1D to linear + shape = new_checkpoint[diffusers_key].shape + + if len(shape) == 3: + new_checkpoint[diffusers_key] = new_checkpoint[diffusers_key][:, :, 0] + elif len(shape) == 4: + new_checkpoint[diffusers_key] = new_checkpoint[diffusers_key][:, :, 0, 0] + + +def convert_stable_cascade_unet_single_file_to_diffusers(checkpoint, **kwargs): + is_stage_c = "clip_txt_mapper.weight" in checkpoint + + if is_stage_c: + state_dict = {} + for key in checkpoint.keys(): + if key.endswith("in_proj_weight"): + weights = checkpoint[key].chunk(3, 0) + state_dict[key.replace("attn.in_proj_weight", "to_q.weight")] = weights[0] + state_dict[key.replace("attn.in_proj_weight", "to_k.weight")] = weights[1] + state_dict[key.replace("attn.in_proj_weight", "to_v.weight")] = weights[2] + elif key.endswith("in_proj_bias"): + weights = checkpoint[key].chunk(3, 0) + state_dict[key.replace("attn.in_proj_bias", "to_q.bias")] = weights[0] + state_dict[key.replace("attn.in_proj_bias", "to_k.bias")] = weights[1] + state_dict[key.replace("attn.in_proj_bias", "to_v.bias")] = weights[2] + elif key.endswith("out_proj.weight"): + weights = checkpoint[key] + state_dict[key.replace("attn.out_proj.weight", "to_out.0.weight")] = weights + elif key.endswith("out_proj.bias"): + weights = checkpoint[key] + state_dict[key.replace("attn.out_proj.bias", "to_out.0.bias")] = weights + else: + state_dict[key] = checkpoint[key] + else: + state_dict = {} + for key in checkpoint.keys(): + if key.endswith("in_proj_weight"): + weights = checkpoint[key].chunk(3, 0) + state_dict[key.replace("attn.in_proj_weight", "to_q.weight")] = weights[0] + state_dict[key.replace("attn.in_proj_weight", "to_k.weight")] = weights[1] + state_dict[key.replace("attn.in_proj_weight", "to_v.weight")] = weights[2] + elif key.endswith("in_proj_bias"): + weights = checkpoint[key].chunk(3, 0) + state_dict[key.replace("attn.in_proj_bias", "to_q.bias")] = weights[0] + state_dict[key.replace("attn.in_proj_bias", "to_k.bias")] = weights[1] + state_dict[key.replace("attn.in_proj_bias", "to_v.bias")] = weights[2] + elif key.endswith("out_proj.weight"): + weights = checkpoint[key] + state_dict[key.replace("attn.out_proj.weight", "to_out.0.weight")] = weights + elif key.endswith("out_proj.bias"): + weights = checkpoint[key] + state_dict[key.replace("attn.out_proj.bias", "to_out.0.bias")] = weights + # rename clip_mapper to clip_txt_pooled_mapper + elif key.endswith("clip_mapper.weight"): + weights = checkpoint[key] + state_dict[key.replace("clip_mapper.weight", "clip_txt_pooled_mapper.weight")] = weights + elif key.endswith("clip_mapper.bias"): + weights = checkpoint[key] + state_dict[key.replace("clip_mapper.bias", "clip_txt_pooled_mapper.bias")] = weights + else: + state_dict[key] = checkpoint[key] + + return state_dict + + +def convert_ldm_unet_checkpoint(checkpoint, config, extract_ema=False, **kwargs): + """ + Takes a state dict and a config, and returns a converted checkpoint. + """ + # extract state_dict for UNet + unet_state_dict = {} + keys = list(checkpoint.keys()) + unet_key = LDM_UNET_KEY + + # at least a 100 parameters have to start with `model_ema` in order for the checkpoint to be EMA + if sum(k.startswith("model_ema") for k in keys) > 100 and extract_ema: + logger.warning("Checkpoint has both EMA and non-EMA weights.") + logger.warning( + "In this conversion only the EMA weights are extracted. If you want to instead extract the non-EMA" + " weights (useful to continue fine-tuning), please make sure to remove the `--extract_ema` flag." + ) + for key in keys: + if key.startswith("model.diffusion_model"): + flat_ema_key = "model_ema." + "".join(key.split(".")[1:]) + unet_state_dict[key.replace(unet_key, "")] = checkpoint.get(flat_ema_key) + else: + if sum(k.startswith("model_ema") for k in keys) > 100: + logger.warning( + "In this conversion only the non-EMA weights are extracted. If you want to instead extract the EMA" + " weights (usually better for inference), please make sure to add the `--extract_ema` flag." + ) + for key in keys: + if key.startswith(unet_key): + unet_state_dict[key.replace(unet_key, "")] = checkpoint.get(key) + + new_checkpoint = {} + ldm_unet_keys = DIFFUSERS_TO_LDM_MAPPING["unet"]["layers"] + for diffusers_key, ldm_key in ldm_unet_keys.items(): + if ldm_key not in unet_state_dict: + continue + new_checkpoint[diffusers_key] = unet_state_dict[ldm_key] + + if ("class_embed_type" in config) and (config["class_embed_type"] in ["timestep", "projection"]): + class_embed_keys = DIFFUSERS_TO_LDM_MAPPING["unet"]["class_embed_type"] + for diffusers_key, ldm_key in class_embed_keys.items(): + new_checkpoint[diffusers_key] = unet_state_dict[ldm_key] + + if ("addition_embed_type" in config) and (config["addition_embed_type"] == "text_time"): + addition_embed_keys = DIFFUSERS_TO_LDM_MAPPING["unet"]["addition_embed_type"] + for diffusers_key, ldm_key in addition_embed_keys.items(): + new_checkpoint[diffusers_key] = unet_state_dict[ldm_key] + + # Relevant to StableDiffusionUpscalePipeline + if "num_class_embeds" in config: + if (config["num_class_embeds"] is not None) and ("label_emb.weight" in unet_state_dict): + new_checkpoint["class_embedding.weight"] = unet_state_dict["label_emb.weight"] + + # Retrieves the keys for the input blocks only + num_input_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "input_blocks" in layer}) + input_blocks = { + layer_id: [key for key in unet_state_dict if f"input_blocks.{layer_id}" in key] + for layer_id in range(num_input_blocks) + } + + # Retrieves the keys for the middle blocks only + num_middle_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "middle_block" in layer}) + middle_blocks = { + layer_id: [key for key in unet_state_dict if f"middle_block.{layer_id}" in key] + for layer_id in range(num_middle_blocks) + } + + # Retrieves the keys for the output blocks only + num_output_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "output_blocks" in layer}) + output_blocks = { + layer_id: [key for key in unet_state_dict if f"output_blocks.{layer_id}" in key] + for layer_id in range(num_output_blocks) + } + + # Down blocks + for i in range(1, num_input_blocks): + block_id = (i - 1) // (config["layers_per_block"] + 1) + layer_in_block_id = (i - 1) % (config["layers_per_block"] + 1) + + resnets = [ + key for key in input_blocks[i] if f"input_blocks.{i}.0" in key and f"input_blocks.{i}.0.op" not in key + ] + update_unet_resnet_ldm_to_diffusers( + resnets, + new_checkpoint, + unet_state_dict, + {"old": f"input_blocks.{i}.0", "new": f"down_blocks.{block_id}.resnets.{layer_in_block_id}"}, + ) + + if f"input_blocks.{i}.0.op.weight" in unet_state_dict: + new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.conv.weight"] = unet_state_dict.get( + f"input_blocks.{i}.0.op.weight" + ) + new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.conv.bias"] = unet_state_dict.get( + f"input_blocks.{i}.0.op.bias" + ) + + attentions = [key for key in input_blocks[i] if f"input_blocks.{i}.1" in key] + if attentions: + update_unet_attention_ldm_to_diffusers( + attentions, + new_checkpoint, + unet_state_dict, + {"old": f"input_blocks.{i}.1", "new": f"down_blocks.{block_id}.attentions.{layer_in_block_id}"}, + ) + + # Mid blocks + for key in middle_blocks.keys(): + diffusers_key = max(key - 1, 0) + if key % 2 == 0: + update_unet_resnet_ldm_to_diffusers( + middle_blocks[key], + new_checkpoint, + unet_state_dict, + mapping={"old": f"middle_block.{key}", "new": f"mid_block.resnets.{diffusers_key}"}, + ) + else: + update_unet_attention_ldm_to_diffusers( + middle_blocks[key], + new_checkpoint, + unet_state_dict, + mapping={"old": f"middle_block.{key}", "new": f"mid_block.attentions.{diffusers_key}"}, + ) + + # Up Blocks + for i in range(num_output_blocks): + block_id = i // (config["layers_per_block"] + 1) + layer_in_block_id = i % (config["layers_per_block"] + 1) + + resnets = [ + key for key in output_blocks[i] if f"output_blocks.{i}.0" in key and f"output_blocks.{i}.0.op" not in key + ] + update_unet_resnet_ldm_to_diffusers( + resnets, + new_checkpoint, + unet_state_dict, + {"old": f"output_blocks.{i}.0", "new": f"up_blocks.{block_id}.resnets.{layer_in_block_id}"}, + ) + + attentions = [ + key for key in output_blocks[i] if f"output_blocks.{i}.1" in key and f"output_blocks.{i}.1.conv" not in key + ] + if attentions: + update_unet_attention_ldm_to_diffusers( + attentions, + new_checkpoint, + unet_state_dict, + {"old": f"output_blocks.{i}.1", "new": f"up_blocks.{block_id}.attentions.{layer_in_block_id}"}, + ) + + if f"output_blocks.{i}.1.conv.weight" in unet_state_dict: + new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.weight"] = unet_state_dict[ + f"output_blocks.{i}.1.conv.weight" + ] + new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.bias"] = unet_state_dict[ + f"output_blocks.{i}.1.conv.bias" + ] + if f"output_blocks.{i}.2.conv.weight" in unet_state_dict: + new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.weight"] = unet_state_dict[ + f"output_blocks.{i}.2.conv.weight" + ] + new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.bias"] = unet_state_dict[ + f"output_blocks.{i}.2.conv.bias" + ] + + return new_checkpoint + + +def convert_controlnet_checkpoint( + checkpoint, + config, + **kwargs, +): + # Some controlnet ckpt files are distributed independently from the rest of the + # model components i.e. https://huggingface.co/thibaud/controlnet-sd21/ + if "time_embed.0.weight" in checkpoint: + controlnet_state_dict = checkpoint + + else: + controlnet_state_dict = {} + keys = list(checkpoint.keys()) + controlnet_key = LDM_CONTROLNET_KEY + for key in keys: + if key.startswith(controlnet_key): + controlnet_state_dict[key.replace(controlnet_key, "")] = checkpoint.get(key) + + new_checkpoint = {} + ldm_controlnet_keys = DIFFUSERS_TO_LDM_MAPPING["controlnet"]["layers"] + for diffusers_key, ldm_key in ldm_controlnet_keys.items(): + if ldm_key not in controlnet_state_dict: + continue + new_checkpoint[diffusers_key] = controlnet_state_dict[ldm_key] + + # Retrieves the keys for the input blocks only + num_input_blocks = len( + {".".join(layer.split(".")[:2]) for layer in controlnet_state_dict if "input_blocks" in layer} + ) + input_blocks = { + layer_id: [key for key in controlnet_state_dict if f"input_blocks.{layer_id}" in key] + for layer_id in range(num_input_blocks) + } + + # Down blocks + for i in range(1, num_input_blocks): + block_id = (i - 1) // (config["layers_per_block"] + 1) + layer_in_block_id = (i - 1) % (config["layers_per_block"] + 1) + + resnets = [ + key for key in input_blocks[i] if f"input_blocks.{i}.0" in key and f"input_blocks.{i}.0.op" not in key + ] + update_unet_resnet_ldm_to_diffusers( + resnets, + new_checkpoint, + controlnet_state_dict, + {"old": f"input_blocks.{i}.0", "new": f"down_blocks.{block_id}.resnets.{layer_in_block_id}"}, + ) + + if f"input_blocks.{i}.0.op.weight" in controlnet_state_dict: + new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.conv.weight"] = controlnet_state_dict.get( + f"input_blocks.{i}.0.op.weight" + ) + new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.conv.bias"] = controlnet_state_dict.get( + f"input_blocks.{i}.0.op.bias" + ) + + attentions = [key for key in input_blocks[i] if f"input_blocks.{i}.1" in key] + if attentions: + update_unet_attention_ldm_to_diffusers( + attentions, + new_checkpoint, + controlnet_state_dict, + {"old": f"input_blocks.{i}.1", "new": f"down_blocks.{block_id}.attentions.{layer_in_block_id}"}, + ) + + # controlnet down blocks + for i in range(num_input_blocks): + new_checkpoint[f"controlnet_down_blocks.{i}.weight"] = controlnet_state_dict.get(f"zero_convs.{i}.0.weight") + new_checkpoint[f"controlnet_down_blocks.{i}.bias"] = controlnet_state_dict.get(f"zero_convs.{i}.0.bias") + + # Retrieves the keys for the middle blocks only + num_middle_blocks = len( + {".".join(layer.split(".")[:2]) for layer in controlnet_state_dict if "middle_block" in layer} + ) + middle_blocks = { + layer_id: [key for key in controlnet_state_dict if f"middle_block.{layer_id}" in key] + for layer_id in range(num_middle_blocks) + } + + # Mid blocks + for key in middle_blocks.keys(): + diffusers_key = max(key - 1, 0) + if key % 2 == 0: + update_unet_resnet_ldm_to_diffusers( + middle_blocks[key], + new_checkpoint, + controlnet_state_dict, + mapping={"old": f"middle_block.{key}", "new": f"mid_block.resnets.{diffusers_key}"}, + ) + else: + update_unet_attention_ldm_to_diffusers( + middle_blocks[key], + new_checkpoint, + controlnet_state_dict, + mapping={"old": f"middle_block.{key}", "new": f"mid_block.attentions.{diffusers_key}"}, + ) + + # mid block + new_checkpoint["controlnet_mid_block.weight"] = controlnet_state_dict.get("middle_block_out.0.weight") + new_checkpoint["controlnet_mid_block.bias"] = controlnet_state_dict.get("middle_block_out.0.bias") + + # controlnet cond embedding blocks + cond_embedding_blocks = { + ".".join(layer.split(".")[:2]) + for layer in controlnet_state_dict + if "input_hint_block" in layer and ("input_hint_block.0" not in layer) and ("input_hint_block.14" not in layer) + } + num_cond_embedding_blocks = len(cond_embedding_blocks) + + for idx in range(1, num_cond_embedding_blocks + 1): + diffusers_idx = idx - 1 + cond_block_id = 2 * idx + + new_checkpoint[f"controlnet_cond_embedding.blocks.{diffusers_idx}.weight"] = controlnet_state_dict.get( + f"input_hint_block.{cond_block_id}.weight" + ) + new_checkpoint[f"controlnet_cond_embedding.blocks.{diffusers_idx}.bias"] = controlnet_state_dict.get( + f"input_hint_block.{cond_block_id}.bias" + ) + + return new_checkpoint + + +def convert_ldm_vae_checkpoint(checkpoint, config): + # extract state dict for VAE + # remove the LDM_VAE_KEY prefix from the ldm checkpoint keys so that it is easier to map them to diffusers keys + vae_state_dict = {} + keys = list(checkpoint.keys()) + vae_key = "" + for ldm_vae_key in LDM_VAE_KEYS: + if any(k.startswith(ldm_vae_key) for k in keys): + vae_key = ldm_vae_key + + for key in keys: + if key.startswith(vae_key): + vae_state_dict[key.replace(vae_key, "")] = checkpoint.get(key) + + new_checkpoint = {} + vae_diffusers_ldm_map = DIFFUSERS_TO_LDM_MAPPING["vae"] + for diffusers_key, ldm_key in vae_diffusers_ldm_map.items(): + if ldm_key not in vae_state_dict: + continue + new_checkpoint[diffusers_key] = vae_state_dict[ldm_key] + + # Retrieves the keys for the encoder down blocks only + num_down_blocks = len(config["down_block_types"]) + down_blocks = { + layer_id: [key for key in vae_state_dict if f"down.{layer_id}" in key] for layer_id in range(num_down_blocks) + } + + for i in range(num_down_blocks): + resnets = [key for key in down_blocks[i] if f"down.{i}" in key and f"down.{i}.downsample" not in key] + update_vae_resnet_ldm_to_diffusers( + resnets, + new_checkpoint, + vae_state_dict, + mapping={"old": f"down.{i}.block", "new": f"down_blocks.{i}.resnets"}, + ) + if f"encoder.down.{i}.downsample.conv.weight" in vae_state_dict: + new_checkpoint[f"encoder.down_blocks.{i}.downsamplers.0.conv.weight"] = vae_state_dict.get( + f"encoder.down.{i}.downsample.conv.weight" + ) + new_checkpoint[f"encoder.down_blocks.{i}.downsamplers.0.conv.bias"] = vae_state_dict.get( + f"encoder.down.{i}.downsample.conv.bias" + ) + + mid_resnets = [key for key in vae_state_dict if "encoder.mid.block" in key] + num_mid_res_blocks = 2 + for i in range(1, num_mid_res_blocks + 1): + resnets = [key for key in mid_resnets if f"encoder.mid.block_{i}" in key] + update_vae_resnet_ldm_to_diffusers( + resnets, + new_checkpoint, + vae_state_dict, + mapping={"old": f"mid.block_{i}", "new": f"mid_block.resnets.{i - 1}"}, + ) + + mid_attentions = [key for key in vae_state_dict if "encoder.mid.attn" in key] + update_vae_attentions_ldm_to_diffusers( + mid_attentions, new_checkpoint, vae_state_dict, mapping={"old": "mid.attn_1", "new": "mid_block.attentions.0"} + ) + + # Retrieves the keys for the decoder up blocks only + num_up_blocks = len(config["up_block_types"]) + up_blocks = { + layer_id: [key for key in vae_state_dict if f"up.{layer_id}" in key] for layer_id in range(num_up_blocks) + } + + for i in range(num_up_blocks): + block_id = num_up_blocks - 1 - i + resnets = [ + key for key in up_blocks[block_id] if f"up.{block_id}" in key and f"up.{block_id}.upsample" not in key + ] + update_vae_resnet_ldm_to_diffusers( + resnets, + new_checkpoint, + vae_state_dict, + mapping={"old": f"up.{block_id}.block", "new": f"up_blocks.{i}.resnets"}, + ) + if f"decoder.up.{block_id}.upsample.conv.weight" in vae_state_dict: + new_checkpoint[f"decoder.up_blocks.{i}.upsamplers.0.conv.weight"] = vae_state_dict[ + f"decoder.up.{block_id}.upsample.conv.weight" + ] + new_checkpoint[f"decoder.up_blocks.{i}.upsamplers.0.conv.bias"] = vae_state_dict[ + f"decoder.up.{block_id}.upsample.conv.bias" + ] + + mid_resnets = [key for key in vae_state_dict if "decoder.mid.block" in key] + num_mid_res_blocks = 2 + for i in range(1, num_mid_res_blocks + 1): + resnets = [key for key in mid_resnets if f"decoder.mid.block_{i}" in key] + update_vae_resnet_ldm_to_diffusers( + resnets, + new_checkpoint, + vae_state_dict, + mapping={"old": f"mid.block_{i}", "new": f"mid_block.resnets.{i - 1}"}, + ) + + mid_attentions = [key for key in vae_state_dict if "decoder.mid.attn" in key] + update_vae_attentions_ldm_to_diffusers( + mid_attentions, new_checkpoint, vae_state_dict, mapping={"old": "mid.attn_1", "new": "mid_block.attentions.0"} + ) + conv_attn_to_linear(new_checkpoint) + + return new_checkpoint + + +def convert_ldm_clip_checkpoint(checkpoint, remove_prefix=None): + keys = list(checkpoint.keys()) + text_model_dict = {} + + remove_prefixes = [] + remove_prefixes.extend(LDM_CLIP_PREFIX_TO_REMOVE) + if remove_prefix: + remove_prefixes.append(remove_prefix) + + for key in keys: + for prefix in remove_prefixes: + if key.startswith(prefix): + diffusers_key = key.replace(prefix, "") + text_model_dict[diffusers_key] = checkpoint.get(key) + + return text_model_dict + + +def convert_open_clip_checkpoint( + text_model, + checkpoint, + prefix="cond_stage_model.model.", +): + text_model_dict = {} + text_proj_key = prefix + "text_projection" + + if text_proj_key in checkpoint: + text_proj_dim = int(checkpoint[text_proj_key].shape[0]) + elif hasattr(text_model.config, "projection_dim"): + text_proj_dim = text_model.config.projection_dim + else: + text_proj_dim = LDM_OPEN_CLIP_TEXT_PROJECTION_DIM + + keys = list(checkpoint.keys()) + keys_to_ignore = SD_2_TEXT_ENCODER_KEYS_TO_IGNORE + + openclip_diffusers_ldm_map = DIFFUSERS_TO_LDM_MAPPING["openclip"]["layers"] + for diffusers_key, ldm_key in openclip_diffusers_ldm_map.items(): + ldm_key = prefix + ldm_key + if ldm_key not in checkpoint: + continue + if ldm_key in keys_to_ignore: + continue + if ldm_key.endswith("text_projection"): + text_model_dict[diffusers_key] = checkpoint[ldm_key].T.contiguous() + else: + text_model_dict[diffusers_key] = checkpoint[ldm_key] + + for key in keys: + if key in keys_to_ignore: + continue + + if not key.startswith(prefix + "transformer."): + continue + + diffusers_key = key.replace(prefix + "transformer.", "") + transformer_diffusers_to_ldm_map = DIFFUSERS_TO_LDM_MAPPING["openclip"]["transformer"] + for new_key, old_key in transformer_diffusers_to_ldm_map.items(): + diffusers_key = ( + diffusers_key.replace(old_key, new_key).replace(".in_proj_weight", "").replace(".in_proj_bias", "") + ) + + if key.endswith(".in_proj_weight"): + weight_value = checkpoint.get(key) + + text_model_dict[diffusers_key + ".q_proj.weight"] = weight_value[:text_proj_dim, :].clone().detach() + text_model_dict[diffusers_key + ".k_proj.weight"] = ( + weight_value[text_proj_dim : text_proj_dim * 2, :].clone().detach() + ) + text_model_dict[diffusers_key + ".v_proj.weight"] = weight_value[text_proj_dim * 2 :, :].clone().detach() + + elif key.endswith(".in_proj_bias"): + weight_value = checkpoint.get(key) + text_model_dict[diffusers_key + ".q_proj.bias"] = weight_value[:text_proj_dim].clone().detach() + text_model_dict[diffusers_key + ".k_proj.bias"] = ( + weight_value[text_proj_dim : text_proj_dim * 2].clone().detach() + ) + text_model_dict[diffusers_key + ".v_proj.bias"] = weight_value[text_proj_dim * 2 :].clone().detach() + else: + text_model_dict[diffusers_key] = checkpoint.get(key) + + return text_model_dict + + +def create_diffusers_clip_model_from_ldm( + cls, + checkpoint, + subfolder="", + config=None, + torch_dtype=None, + local_files_only=None, + is_legacy_loading=False, +): + if config: + config = {"pretrained_model_name_or_path": config} + else: + config = fetch_diffusers_config(checkpoint) + + # For backwards compatibility + # Older versions of `from_single_file` expected CLIP configs to be placed in their original transformers model repo + # in the cache_dir, rather than in a subfolder of the Diffusers model + if is_legacy_loading: + logger.warning( + ( + "Detected legacy CLIP loading behavior. Please run `from_single_file` with `local_files_only=False once to update " + "the local cache directory with the necessary CLIP model config files. " + "Attempting to load CLIP model from legacy cache directory." + ) + ) + + if is_clip_model(checkpoint) or is_clip_sdxl_model(checkpoint): + clip_config = "openai/clip-vit-large-patch14" + config["pretrained_model_name_or_path"] = clip_config + subfolder = "" + + elif is_open_clip_model(checkpoint): + clip_config = "stabilityai/stable-diffusion-2" + config["pretrained_model_name_or_path"] = clip_config + subfolder = "text_encoder" + + else: + clip_config = "laion/CLIP-ViT-bigG-14-laion2B-39B-b160k" + config["pretrained_model_name_or_path"] = clip_config + subfolder = "" + + model_config = cls.config_class.from_pretrained(**config, subfolder=subfolder, local_files_only=local_files_only) + ctx = init_empty_weights if is_accelerate_available() else nullcontext + with ctx(): + model = cls(model_config) + + position_embedding_dim = model.text_model.embeddings.position_embedding.weight.shape[-1] + + if is_clip_model(checkpoint): + diffusers_format_checkpoint = convert_ldm_clip_checkpoint(checkpoint) + + elif ( + is_clip_sdxl_model(checkpoint) + and checkpoint[CHECKPOINT_KEY_NAMES["clip_sdxl"]].shape[-1] == position_embedding_dim + ): + diffusers_format_checkpoint = convert_ldm_clip_checkpoint(checkpoint) + + elif ( + is_clip_sd3_model(checkpoint) + and checkpoint[CHECKPOINT_KEY_NAMES["clip_sd3"]].shape[-1] == position_embedding_dim + ): + diffusers_format_checkpoint = convert_ldm_clip_checkpoint(checkpoint, "text_encoders.clip_l.transformer.") + diffusers_format_checkpoint["text_projection.weight"] = torch.eye(position_embedding_dim) + + elif is_open_clip_model(checkpoint): + prefix = "cond_stage_model.model." + diffusers_format_checkpoint = convert_open_clip_checkpoint(model, checkpoint, prefix=prefix) + + elif ( + is_open_clip_sdxl_model(checkpoint) + and checkpoint[CHECKPOINT_KEY_NAMES["open_clip_sdxl"]].shape[-1] == position_embedding_dim + ): + prefix = "conditioner.embedders.1.model." + diffusers_format_checkpoint = convert_open_clip_checkpoint(model, checkpoint, prefix=prefix) + + elif is_open_clip_sdxl_refiner_model(checkpoint): + prefix = "conditioner.embedders.0.model." + diffusers_format_checkpoint = convert_open_clip_checkpoint(model, checkpoint, prefix=prefix) + + elif ( + is_open_clip_sd3_model(checkpoint) + and checkpoint[CHECKPOINT_KEY_NAMES["open_clip_sd3"]].shape[-1] == position_embedding_dim + ): + diffusers_format_checkpoint = convert_ldm_clip_checkpoint(checkpoint, "text_encoders.clip_g.transformer.") + + else: + raise ValueError("The provided checkpoint does not seem to contain a valid CLIP model.") + + if is_accelerate_available(): + unexpected_keys = load_model_dict_into_meta(model, diffusers_format_checkpoint, dtype=torch_dtype) + else: + _, unexpected_keys = model.load_state_dict(diffusers_format_checkpoint, strict=False) + + if model._keys_to_ignore_on_load_unexpected is not None: + for pat in model._keys_to_ignore_on_load_unexpected: + unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None] + + if len(unexpected_keys) > 0: + logger.warning( + f"Some weights of the model checkpoint were not used when initializing {cls.__name__}: \n {[', '.join(unexpected_keys)]}" + ) + + if torch_dtype is not None: + model.to(torch_dtype) + + model.eval() + + return model + + +def _legacy_load_scheduler( + cls, + checkpoint, + component_name, + original_config=None, + **kwargs, +): + scheduler_type = kwargs.get("scheduler_type", None) + prediction_type = kwargs.get("prediction_type", None) + + if scheduler_type is not None: + deprecation_message = ( + "Please pass an instance of a Scheduler object directly to the `scheduler` argument in `from_single_file`\n\n" + "Example:\n\n" + "from diffusers import StableDiffusionPipeline, DDIMScheduler\n\n" + "scheduler = DDIMScheduler()\n" + "pipe = StableDiffusionPipeline.from_single_file(, scheduler=scheduler)\n" + ) + deprecate("scheduler_type", "1.0.0", deprecation_message) + + if prediction_type is not None: + deprecation_message = ( + "Please configure an instance of a Scheduler with the appropriate `prediction_type` and " + "pass the object directly to the `scheduler` argument in `from_single_file`.\n\n" + "Example:\n\n" + "from diffusers import StableDiffusionPipeline, DDIMScheduler\n\n" + 'scheduler = DDIMScheduler(prediction_type="v_prediction")\n' + "pipe = StableDiffusionPipeline.from_single_file(, scheduler=scheduler)\n" + ) + deprecate("prediction_type", "1.0.0", deprecation_message) + + scheduler_config = SCHEDULER_DEFAULT_CONFIG + model_type = infer_diffusers_model_type(checkpoint=checkpoint) + + global_step = checkpoint["global_step"] if "global_step" in checkpoint else None + + if original_config: + num_train_timesteps = getattr(original_config["model"]["params"], "timesteps", 1000) + else: + num_train_timesteps = 1000 + + scheduler_config["num_train_timesteps"] = num_train_timesteps + + if model_type == "v2": + if prediction_type is None: + # NOTE: For stable diffusion 2 base it is recommended to pass `prediction_type=="epsilon"` # as it relies on a brittle global step parameter here + prediction_type = "epsilon" if global_step == 875000 else "v_prediction" + + else: + prediction_type = prediction_type or "epsilon" + + scheduler_config["prediction_type"] = prediction_type + + if model_type in ["xl_base", "xl_refiner"]: + scheduler_type = "euler" + elif model_type == "playground": + scheduler_type = "edm_dpm_solver_multistep" + else: + if original_config: + beta_start = original_config["model"]["params"].get("linear_start") + beta_end = original_config["model"]["params"].get("linear_end") + + else: + beta_start = 0.02 + beta_end = 0.085 + + scheduler_config["beta_start"] = beta_start + scheduler_config["beta_end"] = beta_end + scheduler_config["beta_schedule"] = "scaled_linear" + scheduler_config["clip_sample"] = False + scheduler_config["set_alpha_to_one"] = False + + # to deal with an edge case StableDiffusionUpscale pipeline has two schedulers + if component_name == "low_res_scheduler": + return cls.from_config( + { + "beta_end": 0.02, + "beta_schedule": "scaled_linear", + "beta_start": 0.0001, + "clip_sample": True, + "num_train_timesteps": 1000, + "prediction_type": "epsilon", + "trained_betas": None, + "variance_type": "fixed_small", + } + ) + + if scheduler_type is None: + return cls.from_config(scheduler_config) + + elif scheduler_type == "pndm": + scheduler_config["skip_prk_steps"] = True + scheduler = PNDMScheduler.from_config(scheduler_config) + + elif scheduler_type == "lms": + scheduler = LMSDiscreteScheduler.from_config(scheduler_config) + + elif scheduler_type == "heun": + scheduler = HeunDiscreteScheduler.from_config(scheduler_config) + + elif scheduler_type == "euler": + scheduler = EulerDiscreteScheduler.from_config(scheduler_config) + + elif scheduler_type == "euler-ancestral": + scheduler = EulerAncestralDiscreteScheduler.from_config(scheduler_config) + + elif scheduler_type == "dpm": + scheduler = DPMSolverMultistepScheduler.from_config(scheduler_config) + + elif scheduler_type == "ddim": + scheduler = DDIMScheduler.from_config(scheduler_config) + + elif scheduler_type == "edm_dpm_solver_multistep": + scheduler_config = { + "algorithm_type": "dpmsolver++", + "dynamic_thresholding_ratio": 0.995, + "euler_at_final": False, + "final_sigmas_type": "zero", + "lower_order_final": True, + "num_train_timesteps": 1000, + "prediction_type": "epsilon", + "rho": 7.0, + "sample_max_value": 1.0, + "sigma_data": 0.5, + "sigma_max": 80.0, + "sigma_min": 0.002, + "solver_order": 2, + "solver_type": "midpoint", + "thresholding": False, + } + scheduler = EDMDPMSolverMultistepScheduler(**scheduler_config) + + else: + raise ValueError(f"Scheduler of type {scheduler_type} doesn't exist!") + + return scheduler + + +def _legacy_load_clip_tokenizer(cls, checkpoint, config=None, local_files_only=False): + if config: + config = {"pretrained_model_name_or_path": config} + else: + config = fetch_diffusers_config(checkpoint) + + if is_clip_model(checkpoint) or is_clip_sdxl_model(checkpoint): + clip_config = "openai/clip-vit-large-patch14" + config["pretrained_model_name_or_path"] = clip_config + subfolder = "" + + elif is_open_clip_model(checkpoint): + clip_config = "stabilityai/stable-diffusion-2" + config["pretrained_model_name_or_path"] = clip_config + subfolder = "tokenizer" + + else: + clip_config = "laion/CLIP-ViT-bigG-14-laion2B-39B-b160k" + config["pretrained_model_name_or_path"] = clip_config + subfolder = "" + + tokenizer = cls.from_pretrained(**config, subfolder=subfolder, local_files_only=local_files_only) + + return tokenizer + + +def _legacy_load_safety_checker(local_files_only, torch_dtype): + # Support for loading safety checker components using the deprecated + # `load_safety_checker` argument. + + from ..pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker + + feature_extractor = AutoImageProcessor.from_pretrained( + "CompVis/stable-diffusion-safety-checker", local_files_only=local_files_only, torch_dtype=torch_dtype + ) + safety_checker = StableDiffusionSafetyChecker.from_pretrained( + "CompVis/stable-diffusion-safety-checker", local_files_only=local_files_only, torch_dtype=torch_dtype + ) + + return {"safety_checker": safety_checker, "feature_extractor": feature_extractor} + + +# in SD3 original implementation of AdaLayerNormContinuous, it split linear projection output into shift, scale; +# while in diffusers it split into scale, shift. Here we swap the linear projection weights in order to be able to use diffusers implementation +def swap_scale_shift(weight, dim): + shift, scale = weight.chunk(2, dim=0) + new_weight = torch.cat([scale, shift], dim=0) + return new_weight + + +def convert_sd3_transformer_checkpoint_to_diffusers(checkpoint, **kwargs): + converted_state_dict = {} + keys = list(checkpoint.keys()) + for k in keys: + if "model.diffusion_model." in k: + checkpoint[k.replace("model.diffusion_model.", "")] = checkpoint.pop(k) + + num_layers = list(set(int(k.split(".", 2)[1]) for k in checkpoint if "joint_blocks" in k))[-1] + 1 # noqa: C401 + caption_projection_dim = 1536 + + # Positional and patch embeddings. + converted_state_dict["pos_embed.pos_embed"] = checkpoint.pop("pos_embed") + converted_state_dict["pos_embed.proj.weight"] = checkpoint.pop("x_embedder.proj.weight") + converted_state_dict["pos_embed.proj.bias"] = checkpoint.pop("x_embedder.proj.bias") + + # Timestep embeddings. + converted_state_dict["time_text_embed.timestep_embedder.linear_1.weight"] = checkpoint.pop( + "t_embedder.mlp.0.weight" + ) + converted_state_dict["time_text_embed.timestep_embedder.linear_1.bias"] = checkpoint.pop("t_embedder.mlp.0.bias") + converted_state_dict["time_text_embed.timestep_embedder.linear_2.weight"] = checkpoint.pop( + "t_embedder.mlp.2.weight" + ) + converted_state_dict["time_text_embed.timestep_embedder.linear_2.bias"] = checkpoint.pop("t_embedder.mlp.2.bias") + + # Context projections. + converted_state_dict["context_embedder.weight"] = checkpoint.pop("context_embedder.weight") + converted_state_dict["context_embedder.bias"] = checkpoint.pop("context_embedder.bias") + + # Pooled context projection. + converted_state_dict["time_text_embed.text_embedder.linear_1.weight"] = checkpoint.pop("y_embedder.mlp.0.weight") + converted_state_dict["time_text_embed.text_embedder.linear_1.bias"] = checkpoint.pop("y_embedder.mlp.0.bias") + converted_state_dict["time_text_embed.text_embedder.linear_2.weight"] = checkpoint.pop("y_embedder.mlp.2.weight") + converted_state_dict["time_text_embed.text_embedder.linear_2.bias"] = checkpoint.pop("y_embedder.mlp.2.bias") + + # Transformer blocks ๐ŸŽธ. + for i in range(num_layers): + # Q, K, V + sample_q, sample_k, sample_v = torch.chunk( + checkpoint.pop(f"joint_blocks.{i}.x_block.attn.qkv.weight"), 3, dim=0 + ) + context_q, context_k, context_v = torch.chunk( + checkpoint.pop(f"joint_blocks.{i}.context_block.attn.qkv.weight"), 3, dim=0 + ) + sample_q_bias, sample_k_bias, sample_v_bias = torch.chunk( + checkpoint.pop(f"joint_blocks.{i}.x_block.attn.qkv.bias"), 3, dim=0 + ) + context_q_bias, context_k_bias, context_v_bias = torch.chunk( + checkpoint.pop(f"joint_blocks.{i}.context_block.attn.qkv.bias"), 3, dim=0 + ) + + converted_state_dict[f"transformer_blocks.{i}.attn.to_q.weight"] = torch.cat([sample_q]) + converted_state_dict[f"transformer_blocks.{i}.attn.to_q.bias"] = torch.cat([sample_q_bias]) + converted_state_dict[f"transformer_blocks.{i}.attn.to_k.weight"] = torch.cat([sample_k]) + converted_state_dict[f"transformer_blocks.{i}.attn.to_k.bias"] = torch.cat([sample_k_bias]) + converted_state_dict[f"transformer_blocks.{i}.attn.to_v.weight"] = torch.cat([sample_v]) + converted_state_dict[f"transformer_blocks.{i}.attn.to_v.bias"] = torch.cat([sample_v_bias]) + + converted_state_dict[f"transformer_blocks.{i}.attn.add_q_proj.weight"] = torch.cat([context_q]) + converted_state_dict[f"transformer_blocks.{i}.attn.add_q_proj.bias"] = torch.cat([context_q_bias]) + converted_state_dict[f"transformer_blocks.{i}.attn.add_k_proj.weight"] = torch.cat([context_k]) + converted_state_dict[f"transformer_blocks.{i}.attn.add_k_proj.bias"] = torch.cat([context_k_bias]) + converted_state_dict[f"transformer_blocks.{i}.attn.add_v_proj.weight"] = torch.cat([context_v]) + converted_state_dict[f"transformer_blocks.{i}.attn.add_v_proj.bias"] = torch.cat([context_v_bias]) + + # output projections. + converted_state_dict[f"transformer_blocks.{i}.attn.to_out.0.weight"] = checkpoint.pop( + f"joint_blocks.{i}.x_block.attn.proj.weight" + ) + converted_state_dict[f"transformer_blocks.{i}.attn.to_out.0.bias"] = checkpoint.pop( + f"joint_blocks.{i}.x_block.attn.proj.bias" + ) + if not (i == num_layers - 1): + converted_state_dict[f"transformer_blocks.{i}.attn.to_add_out.weight"] = checkpoint.pop( + f"joint_blocks.{i}.context_block.attn.proj.weight" + ) + converted_state_dict[f"transformer_blocks.{i}.attn.to_add_out.bias"] = checkpoint.pop( + f"joint_blocks.{i}.context_block.attn.proj.bias" + ) + + # norms. + converted_state_dict[f"transformer_blocks.{i}.norm1.linear.weight"] = checkpoint.pop( + f"joint_blocks.{i}.x_block.adaLN_modulation.1.weight" + ) + converted_state_dict[f"transformer_blocks.{i}.norm1.linear.bias"] = checkpoint.pop( + f"joint_blocks.{i}.x_block.adaLN_modulation.1.bias" + ) + if not (i == num_layers - 1): + converted_state_dict[f"transformer_blocks.{i}.norm1_context.linear.weight"] = checkpoint.pop( + f"joint_blocks.{i}.context_block.adaLN_modulation.1.weight" + ) + converted_state_dict[f"transformer_blocks.{i}.norm1_context.linear.bias"] = checkpoint.pop( + f"joint_blocks.{i}.context_block.adaLN_modulation.1.bias" + ) + else: + converted_state_dict[f"transformer_blocks.{i}.norm1_context.linear.weight"] = swap_scale_shift( + checkpoint.pop(f"joint_blocks.{i}.context_block.adaLN_modulation.1.weight"), + dim=caption_projection_dim, + ) + converted_state_dict[f"transformer_blocks.{i}.norm1_context.linear.bias"] = swap_scale_shift( + checkpoint.pop(f"joint_blocks.{i}.context_block.adaLN_modulation.1.bias"), + dim=caption_projection_dim, + ) + + # ffs. + converted_state_dict[f"transformer_blocks.{i}.ff.net.0.proj.weight"] = checkpoint.pop( + f"joint_blocks.{i}.x_block.mlp.fc1.weight" + ) + converted_state_dict[f"transformer_blocks.{i}.ff.net.0.proj.bias"] = checkpoint.pop( + f"joint_blocks.{i}.x_block.mlp.fc1.bias" + ) + converted_state_dict[f"transformer_blocks.{i}.ff.net.2.weight"] = checkpoint.pop( + f"joint_blocks.{i}.x_block.mlp.fc2.weight" + ) + converted_state_dict[f"transformer_blocks.{i}.ff.net.2.bias"] = checkpoint.pop( + f"joint_blocks.{i}.x_block.mlp.fc2.bias" + ) + if not (i == num_layers - 1): + converted_state_dict[f"transformer_blocks.{i}.ff_context.net.0.proj.weight"] = checkpoint.pop( + f"joint_blocks.{i}.context_block.mlp.fc1.weight" + ) + converted_state_dict[f"transformer_blocks.{i}.ff_context.net.0.proj.bias"] = checkpoint.pop( + f"joint_blocks.{i}.context_block.mlp.fc1.bias" + ) + converted_state_dict[f"transformer_blocks.{i}.ff_context.net.2.weight"] = checkpoint.pop( + f"joint_blocks.{i}.context_block.mlp.fc2.weight" + ) + converted_state_dict[f"transformer_blocks.{i}.ff_context.net.2.bias"] = checkpoint.pop( + f"joint_blocks.{i}.context_block.mlp.fc2.bias" + ) + + # Final blocks. + converted_state_dict["proj_out.weight"] = checkpoint.pop("final_layer.linear.weight") + converted_state_dict["proj_out.bias"] = checkpoint.pop("final_layer.linear.bias") + converted_state_dict["norm_out.linear.weight"] = swap_scale_shift( + checkpoint.pop("final_layer.adaLN_modulation.1.weight"), dim=caption_projection_dim + ) + converted_state_dict["norm_out.linear.bias"] = swap_scale_shift( + checkpoint.pop("final_layer.adaLN_modulation.1.bias"), dim=caption_projection_dim + ) + + return converted_state_dict + + +def is_t5_in_single_file(checkpoint): + if "text_encoders.t5xxl.transformer.shared.weight" in checkpoint: + return True + + return False + + +def convert_sd3_t5_checkpoint_to_diffusers(checkpoint): + keys = list(checkpoint.keys()) + text_model_dict = {} + + remove_prefixes = ["text_encoders.t5xxl.transformer."] + + for key in keys: + for prefix in remove_prefixes: + if key.startswith(prefix): + diffusers_key = key.replace(prefix, "") + text_model_dict[diffusers_key] = checkpoint.get(key) + + return text_model_dict + + +def create_diffusers_t5_model_from_checkpoint( + cls, + checkpoint, + subfolder="", + config=None, + torch_dtype=None, + local_files_only=None, +): + if config: + config = {"pretrained_model_name_or_path": config} + else: + config = fetch_diffusers_config(checkpoint) + + model_config = cls.config_class.from_pretrained(**config, subfolder=subfolder, local_files_only=local_files_only) + ctx = init_empty_weights if is_accelerate_available() else nullcontext + with ctx(): + model = cls(model_config) + + diffusers_format_checkpoint = convert_sd3_t5_checkpoint_to_diffusers(checkpoint) + + if is_accelerate_available(): + unexpected_keys = load_model_dict_into_meta(model, diffusers_format_checkpoint, dtype=torch_dtype) + if model._keys_to_ignore_on_load_unexpected is not None: + for pat in model._keys_to_ignore_on_load_unexpected: + unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None] + + if len(unexpected_keys) > 0: + logger.warning( + f"Some weights of the model checkpoint were not used when initializing {cls.__name__}: \n {[', '.join(unexpected_keys)]}" + ) + + else: + model.load_state_dict(diffusers_format_checkpoint) + + use_keep_in_fp32_modules = (cls._keep_in_fp32_modules is not None) and (torch_dtype == torch.float16) + if use_keep_in_fp32_modules: + keep_in_fp32_modules = model._keep_in_fp32_modules + else: + keep_in_fp32_modules = [] + + if keep_in_fp32_modules is not None: + for name, param in model.named_parameters(): + if any(module_to_keep_in_fp32 in name.split(".") for module_to_keep_in_fp32 in keep_in_fp32_modules): + # param = param.to(torch.float32) does not work here as only in the local scope. + param.data = param.data.to(torch.float32) + + return model + + +def convert_animatediff_checkpoint_to_diffusers(checkpoint, **kwargs): + converted_state_dict = {} + for k, v in checkpoint.items(): + if "pos_encoder" in k: + continue + + else: + converted_state_dict[ + k.replace(".norms.0", ".norm1") + .replace(".norms.1", ".norm2") + .replace(".ff_norm", ".norm3") + .replace(".attention_blocks.0", ".attn1") + .replace(".attention_blocks.1", ".attn2") + .replace(".temporal_transformer", "") + ] = v + + return converted_state_dict + + +def convert_flux_transformer_checkpoint_to_diffusers(checkpoint, **kwargs): + converted_state_dict = {} + keys = list(checkpoint.keys()) + for k in keys: + if "model.diffusion_model." in k: + checkpoint[k.replace("model.diffusion_model.", "")] = checkpoint.pop(k) + + num_layers = list(set(int(k.split(".", 2)[1]) for k in checkpoint if "double_blocks." in k))[-1] + 1 # noqa: C401 + num_single_layers = list(set(int(k.split(".", 2)[1]) for k in checkpoint if "single_blocks." in k))[-1] + 1 # noqa: C401 + mlp_ratio = 4.0 + inner_dim = 3072 + + # in SD3 original implementation of AdaLayerNormContinuous, it split linear projection output into shift, scale; + # while in diffusers it split into scale, shift. Here we swap the linear projection weights in order to be able to use diffusers implementation + def swap_scale_shift(weight): + shift, scale = weight.chunk(2, dim=0) + new_weight = torch.cat([scale, shift], dim=0) + return new_weight + + ## time_text_embed.timestep_embedder <- time_in + converted_state_dict["time_text_embed.timestep_embedder.linear_1.weight"] = checkpoint.pop( + "time_in.in_layer.weight" + ) + converted_state_dict["time_text_embed.timestep_embedder.linear_1.bias"] = checkpoint.pop("time_in.in_layer.bias") + converted_state_dict["time_text_embed.timestep_embedder.linear_2.weight"] = checkpoint.pop( + "time_in.out_layer.weight" + ) + converted_state_dict["time_text_embed.timestep_embedder.linear_2.bias"] = checkpoint.pop("time_in.out_layer.bias") + + ## time_text_embed.text_embedder <- vector_in + converted_state_dict["time_text_embed.text_embedder.linear_1.weight"] = checkpoint.pop("vector_in.in_layer.weight") + converted_state_dict["time_text_embed.text_embedder.linear_1.bias"] = checkpoint.pop("vector_in.in_layer.bias") + converted_state_dict["time_text_embed.text_embedder.linear_2.weight"] = checkpoint.pop( + "vector_in.out_layer.weight" + ) + converted_state_dict["time_text_embed.text_embedder.linear_2.bias"] = checkpoint.pop("vector_in.out_layer.bias") + + # guidance + has_guidance = any("guidance" in k for k in checkpoint) + if has_guidance: + converted_state_dict["time_text_embed.guidance_embedder.linear_1.weight"] = checkpoint.pop( + "guidance_in.in_layer.weight" + ) + converted_state_dict["time_text_embed.guidance_embedder.linear_1.bias"] = checkpoint.pop( + "guidance_in.in_layer.bias" + ) + converted_state_dict["time_text_embed.guidance_embedder.linear_2.weight"] = checkpoint.pop( + "guidance_in.out_layer.weight" + ) + converted_state_dict["time_text_embed.guidance_embedder.linear_2.bias"] = checkpoint.pop( + "guidance_in.out_layer.bias" + ) + + # context_embedder + converted_state_dict["context_embedder.weight"] = checkpoint.pop("txt_in.weight") + converted_state_dict["context_embedder.bias"] = checkpoint.pop("txt_in.bias") + + # x_embedder + converted_state_dict["x_embedder.weight"] = checkpoint.pop("img_in.weight") + converted_state_dict["x_embedder.bias"] = checkpoint.pop("img_in.bias") + + # double transformer blocks + for i in range(num_layers): + block_prefix = f"transformer_blocks.{i}." + # norms. + ## norm1 + converted_state_dict[f"{block_prefix}norm1.linear.weight"] = checkpoint.pop( + f"double_blocks.{i}.img_mod.lin.weight" + ) + converted_state_dict[f"{block_prefix}norm1.linear.bias"] = checkpoint.pop( + f"double_blocks.{i}.img_mod.lin.bias" + ) + ## norm1_context + converted_state_dict[f"{block_prefix}norm1_context.linear.weight"] = checkpoint.pop( + f"double_blocks.{i}.txt_mod.lin.weight" + ) + converted_state_dict[f"{block_prefix}norm1_context.linear.bias"] = checkpoint.pop( + f"double_blocks.{i}.txt_mod.lin.bias" + ) + # Q, K, V + sample_q, sample_k, sample_v = torch.chunk(checkpoint.pop(f"double_blocks.{i}.img_attn.qkv.weight"), 3, dim=0) + context_q, context_k, context_v = torch.chunk( + checkpoint.pop(f"double_blocks.{i}.txt_attn.qkv.weight"), 3, dim=0 + ) + sample_q_bias, sample_k_bias, sample_v_bias = torch.chunk( + checkpoint.pop(f"double_blocks.{i}.img_attn.qkv.bias"), 3, dim=0 + ) + context_q_bias, context_k_bias, context_v_bias = torch.chunk( + checkpoint.pop(f"double_blocks.{i}.txt_attn.qkv.bias"), 3, dim=0 + ) + converted_state_dict[f"{block_prefix}attn.to_q.weight"] = torch.cat([sample_q]) + converted_state_dict[f"{block_prefix}attn.to_q.bias"] = torch.cat([sample_q_bias]) + converted_state_dict[f"{block_prefix}attn.to_k.weight"] = torch.cat([sample_k]) + converted_state_dict[f"{block_prefix}attn.to_k.bias"] = torch.cat([sample_k_bias]) + converted_state_dict[f"{block_prefix}attn.to_v.weight"] = torch.cat([sample_v]) + converted_state_dict[f"{block_prefix}attn.to_v.bias"] = torch.cat([sample_v_bias]) + converted_state_dict[f"{block_prefix}attn.add_q_proj.weight"] = torch.cat([context_q]) + converted_state_dict[f"{block_prefix}attn.add_q_proj.bias"] = torch.cat([context_q_bias]) + converted_state_dict[f"{block_prefix}attn.add_k_proj.weight"] = torch.cat([context_k]) + converted_state_dict[f"{block_prefix}attn.add_k_proj.bias"] = torch.cat([context_k_bias]) + converted_state_dict[f"{block_prefix}attn.add_v_proj.weight"] = torch.cat([context_v]) + converted_state_dict[f"{block_prefix}attn.add_v_proj.bias"] = torch.cat([context_v_bias]) + # qk_norm + converted_state_dict[f"{block_prefix}attn.norm_q.weight"] = checkpoint.pop( + f"double_blocks.{i}.img_attn.norm.query_norm.scale" + ) + converted_state_dict[f"{block_prefix}attn.norm_k.weight"] = checkpoint.pop( + f"double_blocks.{i}.img_attn.norm.key_norm.scale" + ) + converted_state_dict[f"{block_prefix}attn.norm_added_q.weight"] = checkpoint.pop( + f"double_blocks.{i}.txt_attn.norm.query_norm.scale" + ) + converted_state_dict[f"{block_prefix}attn.norm_added_k.weight"] = checkpoint.pop( + f"double_blocks.{i}.txt_attn.norm.key_norm.scale" + ) + # ff img_mlp + converted_state_dict[f"{block_prefix}ff.net.0.proj.weight"] = checkpoint.pop( + f"double_blocks.{i}.img_mlp.0.weight" + ) + converted_state_dict[f"{block_prefix}ff.net.0.proj.bias"] = checkpoint.pop(f"double_blocks.{i}.img_mlp.0.bias") + converted_state_dict[f"{block_prefix}ff.net.2.weight"] = checkpoint.pop(f"double_blocks.{i}.img_mlp.2.weight") + converted_state_dict[f"{block_prefix}ff.net.2.bias"] = checkpoint.pop(f"double_blocks.{i}.img_mlp.2.bias") + converted_state_dict[f"{block_prefix}ff_context.net.0.proj.weight"] = checkpoint.pop( + f"double_blocks.{i}.txt_mlp.0.weight" + ) + converted_state_dict[f"{block_prefix}ff_context.net.0.proj.bias"] = checkpoint.pop( + f"double_blocks.{i}.txt_mlp.0.bias" + ) + converted_state_dict[f"{block_prefix}ff_context.net.2.weight"] = checkpoint.pop( + f"double_blocks.{i}.txt_mlp.2.weight" + ) + converted_state_dict[f"{block_prefix}ff_context.net.2.bias"] = checkpoint.pop( + f"double_blocks.{i}.txt_mlp.2.bias" + ) + # output projections. + converted_state_dict[f"{block_prefix}attn.to_out.0.weight"] = checkpoint.pop( + f"double_blocks.{i}.img_attn.proj.weight" + ) + converted_state_dict[f"{block_prefix}attn.to_out.0.bias"] = checkpoint.pop( + f"double_blocks.{i}.img_attn.proj.bias" + ) + converted_state_dict[f"{block_prefix}attn.to_add_out.weight"] = checkpoint.pop( + f"double_blocks.{i}.txt_attn.proj.weight" + ) + converted_state_dict[f"{block_prefix}attn.to_add_out.bias"] = checkpoint.pop( + f"double_blocks.{i}.txt_attn.proj.bias" + ) + + # single transfomer blocks + for i in range(num_single_layers): + block_prefix = f"single_transformer_blocks.{i}." + # norm.linear <- single_blocks.0.modulation.lin + converted_state_dict[f"{block_prefix}norm.linear.weight"] = checkpoint.pop( + f"single_blocks.{i}.modulation.lin.weight" + ) + converted_state_dict[f"{block_prefix}norm.linear.bias"] = checkpoint.pop( + f"single_blocks.{i}.modulation.lin.bias" + ) + # Q, K, V, mlp + mlp_hidden_dim = int(inner_dim * mlp_ratio) + split_size = (inner_dim, inner_dim, inner_dim, mlp_hidden_dim) + q, k, v, mlp = torch.split(checkpoint.pop(f"single_blocks.{i}.linear1.weight"), split_size, dim=0) + q_bias, k_bias, v_bias, mlp_bias = torch.split( + checkpoint.pop(f"single_blocks.{i}.linear1.bias"), split_size, dim=0 + ) + converted_state_dict[f"{block_prefix}attn.to_q.weight"] = torch.cat([q]) + converted_state_dict[f"{block_prefix}attn.to_q.bias"] = torch.cat([q_bias]) + converted_state_dict[f"{block_prefix}attn.to_k.weight"] = torch.cat([k]) + converted_state_dict[f"{block_prefix}attn.to_k.bias"] = torch.cat([k_bias]) + converted_state_dict[f"{block_prefix}attn.to_v.weight"] = torch.cat([v]) + converted_state_dict[f"{block_prefix}attn.to_v.bias"] = torch.cat([v_bias]) + converted_state_dict[f"{block_prefix}proj_mlp.weight"] = torch.cat([mlp]) + converted_state_dict[f"{block_prefix}proj_mlp.bias"] = torch.cat([mlp_bias]) + # qk norm + converted_state_dict[f"{block_prefix}attn.norm_q.weight"] = checkpoint.pop( + f"single_blocks.{i}.norm.query_norm.scale" + ) + converted_state_dict[f"{block_prefix}attn.norm_k.weight"] = checkpoint.pop( + f"single_blocks.{i}.norm.key_norm.scale" + ) + # output projections. + converted_state_dict[f"{block_prefix}proj_out.weight"] = checkpoint.pop(f"single_blocks.{i}.linear2.weight") + converted_state_dict[f"{block_prefix}proj_out.bias"] = checkpoint.pop(f"single_blocks.{i}.linear2.bias") + + converted_state_dict["proj_out.weight"] = checkpoint.pop("final_layer.linear.weight") + converted_state_dict["proj_out.bias"] = checkpoint.pop("final_layer.linear.bias") + converted_state_dict["norm_out.linear.weight"] = swap_scale_shift( + checkpoint.pop("final_layer.adaLN_modulation.1.weight") + ) + converted_state_dict["norm_out.linear.bias"] = swap_scale_shift( + checkpoint.pop("final_layer.adaLN_modulation.1.bias") + ) + + return converted_state_dict diff --git a/diffusers3/loaders/textual_inversion.py b/diffusers3/loaders/textual_inversion.py new file mode 100644 index 0000000000000000000000000000000000000000..574b89233cc15cb15f0fad56de74fdc29bd77fba --- /dev/null +++ b/diffusers3/loaders/textual_inversion.py @@ -0,0 +1,578 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Dict, List, Optional, Union + +import safetensors +import torch +from huggingface_hub.utils import validate_hf_hub_args +from torch import nn + +from ..models.modeling_utils import load_state_dict +from ..utils import _get_model_file, is_accelerate_available, is_transformers_available, logging + + +if is_transformers_available(): + from transformers import PreTrainedModel, PreTrainedTokenizer + +if is_accelerate_available(): + from accelerate.hooks import AlignDevicesHook, CpuOffload, remove_hook_from_module + +logger = logging.get_logger(__name__) + +TEXT_INVERSION_NAME = "learned_embeds.bin" +TEXT_INVERSION_NAME_SAFE = "learned_embeds.safetensors" + + +@validate_hf_hub_args +def load_textual_inversion_state_dicts(pretrained_model_name_or_paths, **kwargs): + cache_dir = kwargs.pop("cache_dir", None) + force_download = kwargs.pop("force_download", False) + proxies = kwargs.pop("proxies", None) + local_files_only = kwargs.pop("local_files_only", None) + token = kwargs.pop("token", None) + revision = kwargs.pop("revision", None) + subfolder = kwargs.pop("subfolder", None) + weight_name = kwargs.pop("weight_name", None) + use_safetensors = kwargs.pop("use_safetensors", None) + + allow_pickle = False + if use_safetensors is None: + use_safetensors = True + allow_pickle = True + + user_agent = { + "file_type": "text_inversion", + "framework": "pytorch", + } + state_dicts = [] + for pretrained_model_name_or_path in pretrained_model_name_or_paths: + if not isinstance(pretrained_model_name_or_path, (dict, torch.Tensor)): + # 3.1. Load textual inversion file + model_file = None + + # Let's first try to load .safetensors weights + if (use_safetensors and weight_name is None) or ( + weight_name is not None and weight_name.endswith(".safetensors") + ): + try: + model_file = _get_model_file( + pretrained_model_name_or_path, + weights_name=weight_name or TEXT_INVERSION_NAME_SAFE, + cache_dir=cache_dir, + force_download=force_download, + proxies=proxies, + local_files_only=local_files_only, + token=token, + revision=revision, + subfolder=subfolder, + user_agent=user_agent, + ) + state_dict = safetensors.torch.load_file(model_file, device="cpu") + except Exception as e: + if not allow_pickle: + raise e + + model_file = None + + if model_file is None: + model_file = _get_model_file( + pretrained_model_name_or_path, + weights_name=weight_name or TEXT_INVERSION_NAME, + cache_dir=cache_dir, + force_download=force_download, + proxies=proxies, + local_files_only=local_files_only, + token=token, + revision=revision, + subfolder=subfolder, + user_agent=user_agent, + ) + state_dict = load_state_dict(model_file) + else: + state_dict = pretrained_model_name_or_path + + state_dicts.append(state_dict) + + return state_dicts + + +class TextualInversionLoaderMixin: + r""" + Load Textual Inversion tokens and embeddings to the tokenizer and text encoder. + """ + + def maybe_convert_prompt(self, prompt: Union[str, List[str]], tokenizer: "PreTrainedTokenizer"): # noqa: F821 + r""" + Processes prompts that include a special token corresponding to a multi-vector textual inversion embedding to + be replaced with multiple special tokens each corresponding to one of the vectors. If the prompt has no textual + inversion token or if the textual inversion token is a single vector, the input prompt is returned. + + Parameters: + prompt (`str` or list of `str`): + The prompt or prompts to guide the image generation. + tokenizer (`PreTrainedTokenizer`): + The tokenizer responsible for encoding the prompt into input tokens. + + Returns: + `str` or list of `str`: The converted prompt + """ + if not isinstance(prompt, List): + prompts = [prompt] + else: + prompts = prompt + + prompts = [self._maybe_convert_prompt(p, tokenizer) for p in prompts] + + if not isinstance(prompt, List): + return prompts[0] + + return prompts + + def _maybe_convert_prompt(self, prompt: str, tokenizer: "PreTrainedTokenizer"): # noqa: F821 + r""" + Maybe convert a prompt into a "multi vector"-compatible prompt. If the prompt includes a token that corresponds + to a multi-vector textual inversion embedding, this function will process the prompt so that the special token + is replaced with multiple special tokens each corresponding to one of the vectors. If the prompt has no textual + inversion token or a textual inversion token that is a single vector, the input prompt is simply returned. + + Parameters: + prompt (`str`): + The prompt to guide the image generation. + tokenizer (`PreTrainedTokenizer`): + The tokenizer responsible for encoding the prompt into input tokens. + + Returns: + `str`: The converted prompt + """ + tokens = tokenizer.tokenize(prompt) + unique_tokens = set(tokens) + for token in unique_tokens: + if token in tokenizer.added_tokens_encoder: + replacement = token + i = 1 + while f"{token}_{i}" in tokenizer.added_tokens_encoder: + replacement += f" {token}_{i}" + i += 1 + + prompt = prompt.replace(token, replacement) + + return prompt + + def _check_text_inv_inputs(self, tokenizer, text_encoder, pretrained_model_name_or_paths, tokens): + if tokenizer is None: + raise ValueError( + f"{self.__class__.__name__} requires `self.tokenizer` or passing a `tokenizer` of type `PreTrainedTokenizer` for calling" + f" `{self.load_textual_inversion.__name__}`" + ) + + if text_encoder is None: + raise ValueError( + f"{self.__class__.__name__} requires `self.text_encoder` or passing a `text_encoder` of type `PreTrainedModel` for calling" + f" `{self.load_textual_inversion.__name__}`" + ) + + if len(pretrained_model_name_or_paths) > 1 and len(pretrained_model_name_or_paths) != len(tokens): + raise ValueError( + f"You have passed a list of models of length {len(pretrained_model_name_or_paths)}, and list of tokens of length {len(tokens)} " + f"Make sure both lists have the same length." + ) + + valid_tokens = [t for t in tokens if t is not None] + if len(set(valid_tokens)) < len(valid_tokens): + raise ValueError(f"You have passed a list of tokens that contains duplicates: {tokens}") + + @staticmethod + def _retrieve_tokens_and_embeddings(tokens, state_dicts, tokenizer): + all_tokens = [] + all_embeddings = [] + for state_dict, token in zip(state_dicts, tokens): + if isinstance(state_dict, torch.Tensor): + if token is None: + raise ValueError( + "You are trying to load a textual inversion embedding that has been saved as a PyTorch tensor. Make sure to pass the name of the corresponding token in this case: `token=...`." + ) + loaded_token = token + embedding = state_dict + elif len(state_dict) == 1: + # diffusers + loaded_token, embedding = next(iter(state_dict.items())) + elif "string_to_param" in state_dict: + # A1111 + loaded_token = state_dict["name"] + embedding = state_dict["string_to_param"]["*"] + else: + raise ValueError( + f"Loaded state dictionary is incorrect: {state_dict}. \n\n" + "Please verify that the loaded state dictionary of the textual embedding either only has a single key or includes the `string_to_param`" + " input key." + ) + + if token is not None and loaded_token != token: + logger.info(f"The loaded token: {loaded_token} is overwritten by the passed token {token}.") + else: + token = loaded_token + + if token in tokenizer.get_vocab(): + raise ValueError( + f"Token {token} already in tokenizer vocabulary. Please choose a different token name or remove {token} and embedding from the tokenizer and text encoder." + ) + + all_tokens.append(token) + all_embeddings.append(embedding) + + return all_tokens, all_embeddings + + @staticmethod + def _extend_tokens_and_embeddings(tokens, embeddings, tokenizer): + all_tokens = [] + all_embeddings = [] + + for embedding, token in zip(embeddings, tokens): + if f"{token}_1" in tokenizer.get_vocab(): + multi_vector_tokens = [token] + i = 1 + while f"{token}_{i}" in tokenizer.added_tokens_encoder: + multi_vector_tokens.append(f"{token}_{i}") + i += 1 + + raise ValueError( + f"Multi-vector Token {multi_vector_tokens} already in tokenizer vocabulary. Please choose a different token name or remove the {multi_vector_tokens} and embedding from the tokenizer and text encoder." + ) + + is_multi_vector = len(embedding.shape) > 1 and embedding.shape[0] > 1 + if is_multi_vector: + all_tokens += [token] + [f"{token}_{i}" for i in range(1, embedding.shape[0])] + all_embeddings += [e for e in embedding] # noqa: C416 + else: + all_tokens += [token] + all_embeddings += [embedding[0]] if len(embedding.shape) > 1 else [embedding] + + return all_tokens, all_embeddings + + @validate_hf_hub_args + def load_textual_inversion( + self, + pretrained_model_name_or_path: Union[str, List[str], Dict[str, torch.Tensor], List[Dict[str, torch.Tensor]]], + token: Optional[Union[str, List[str]]] = None, + tokenizer: Optional["PreTrainedTokenizer"] = None, # noqa: F821 + text_encoder: Optional["PreTrainedModel"] = None, # noqa: F821 + **kwargs, + ): + r""" + Load Textual Inversion embeddings into the text encoder of [`StableDiffusionPipeline`] (both ๐Ÿค— Diffusers and + Automatic1111 formats are supported). + + Parameters: + pretrained_model_name_or_path (`str` or `os.PathLike` or `List[str or os.PathLike]` or `Dict` or `List[Dict]`): + Can be either one of the following or a list of them: + + - A string, the *model id* (for example `sd-concepts-library/low-poly-hd-logos-icons`) of a + pretrained model hosted on the Hub. + - A path to a *directory* (for example `./my_text_inversion_directory/`) containing the textual + inversion weights. + - A path to a *file* (for example `./my_text_inversions.pt`) containing textual inversion weights. + - A [torch state + dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict). + + token (`str` or `List[str]`, *optional*): + Override the token to use for the textual inversion weights. If `pretrained_model_name_or_path` is a + list, then `token` must also be a list of equal length. + text_encoder ([`~transformers.CLIPTextModel`], *optional*): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + If not specified, function will take self.tokenizer. + tokenizer ([`~transformers.CLIPTokenizer`], *optional*): + A `CLIPTokenizer` to tokenize text. If not specified, function will take self.tokenizer. + weight_name (`str`, *optional*): + Name of a custom weight file. This should be used when: + + - The saved textual inversion file is in ๐Ÿค— Diffusers format, but was saved under a specific weight + name such as `text_inv.bin`. + - The saved textual inversion file is in the Automatic1111 format. + cache_dir (`Union[str, os.PathLike]`, *optional*): + Path to a directory where a downloaded pretrained model configuration is cached if the standard cache + is not used. + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force the (re-)download of the model weights and configuration files, overriding the + cached versions if they exist. + + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. + local_files_only (`bool`, *optional*, defaults to `False`): + Whether to only load local model weights and configuration files or not. If set to `True`, the model + won't be downloaded from the Hub. + token (`str` or *bool*, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from + `diffusers-cli login` (stored in `~/.huggingface`) is used. + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier + allowed by Git. + subfolder (`str`, *optional*, defaults to `""`): + The subfolder location of a model file within a larger model repository on the Hub or locally. + mirror (`str`, *optional*): + Mirror source to resolve accessibility issues if you're downloading a model in China. We do not + guarantee the timeliness or safety of the source, and you should refer to the mirror site for more + information. + + Example: + + To load a Textual Inversion embedding vector in ๐Ÿค— Diffusers format: + + ```py + from diffusers import StableDiffusionPipeline + import torch + + model_id = "runwayml/stable-diffusion-v1-5" + pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda") + + pipe.load_textual_inversion("sd-concepts-library/cat-toy") + + prompt = "A backpack" + + image = pipe(prompt, num_inference_steps=50).images[0] + image.save("cat-backpack.png") + ``` + + To load a Textual Inversion embedding vector in Automatic1111 format, make sure to download the vector first + (for example from [civitAI](https://civitai.com/models/3036?modelVersionId=9857)) and then load the vector + locally: + + ```py + from diffusers import StableDiffusionPipeline + import torch + + model_id = "runwayml/stable-diffusion-v1-5" + pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda") + + pipe.load_textual_inversion("./charturnerv2.pt", token="charturnerv2") + + prompt = "charturnerv2, multiple views of the same character in the same outfit, a character turnaround of a woman wearing a black jacket and red shirt, best quality, intricate details." + + image = pipe(prompt, num_inference_steps=50).images[0] + image.save("character.png") + ``` + + """ + # 1. Set correct tokenizer and text encoder + tokenizer = tokenizer or getattr(self, "tokenizer", None) + text_encoder = text_encoder or getattr(self, "text_encoder", None) + + # 2. Normalize inputs + pretrained_model_name_or_paths = ( + [pretrained_model_name_or_path] + if not isinstance(pretrained_model_name_or_path, list) + else pretrained_model_name_or_path + ) + tokens = [token] if not isinstance(token, list) else token + if tokens[0] is None: + tokens = tokens * len(pretrained_model_name_or_paths) + + # 3. Check inputs + self._check_text_inv_inputs(tokenizer, text_encoder, pretrained_model_name_or_paths, tokens) + + # 4. Load state dicts of textual embeddings + state_dicts = load_textual_inversion_state_dicts(pretrained_model_name_or_paths, **kwargs) + + # 4.1 Handle the special case when state_dict is a tensor that contains n embeddings for n tokens + if len(tokens) > 1 and len(state_dicts) == 1: + if isinstance(state_dicts[0], torch.Tensor): + state_dicts = list(state_dicts[0]) + if len(tokens) != len(state_dicts): + raise ValueError( + f"You have passed a state_dict contains {len(state_dicts)} embeddings, and list of tokens of length {len(tokens)} " + f"Make sure both have the same length." + ) + + # 4. Retrieve tokens and embeddings + tokens, embeddings = self._retrieve_tokens_and_embeddings(tokens, state_dicts, tokenizer) + + # 5. Extend tokens and embeddings for multi vector + tokens, embeddings = self._extend_tokens_and_embeddings(tokens, embeddings, tokenizer) + + # 6. Make sure all embeddings have the correct size + expected_emb_dim = text_encoder.get_input_embeddings().weight.shape[-1] + if any(expected_emb_dim != emb.shape[-1] for emb in embeddings): + raise ValueError( + "Loaded embeddings are of incorrect shape. Expected each textual inversion embedding " + "to be of shape {input_embeddings.shape[-1]}, but are {embeddings.shape[-1]} " + ) + + # 7. Now we can be sure that loading the embedding matrix works + # < Unsafe code: + + # 7.1 Offload all hooks in case the pipeline was cpu offloaded before make sure, we offload and onload again + is_model_cpu_offload = False + is_sequential_cpu_offload = False + if self.hf_device_map is None: + for _, component in self.components.items(): + if isinstance(component, nn.Module): + if hasattr(component, "_hf_hook"): + is_model_cpu_offload = isinstance(getattr(component, "_hf_hook"), CpuOffload) + is_sequential_cpu_offload = ( + isinstance(getattr(component, "_hf_hook"), AlignDevicesHook) + or hasattr(component._hf_hook, "hooks") + and isinstance(component._hf_hook.hooks[0], AlignDevicesHook) + ) + logger.info( + "Accelerate hooks detected. Since you have called `load_textual_inversion()`, the previous hooks will be first removed. Then the textual inversion parameters will be loaded and the hooks will be applied again." + ) + remove_hook_from_module(component, recurse=is_sequential_cpu_offload) + + # 7.2 save expected device and dtype + device = text_encoder.device + dtype = text_encoder.dtype + + # 7.3 Increase token embedding matrix + text_encoder.resize_token_embeddings(len(tokenizer) + len(tokens)) + input_embeddings = text_encoder.get_input_embeddings().weight + + # 7.4 Load token and embedding + for token, embedding in zip(tokens, embeddings): + # add tokens and get ids + tokenizer.add_tokens(token) + token_id = tokenizer.convert_tokens_to_ids(token) + input_embeddings.data[token_id] = embedding + logger.info(f"Loaded textual inversion embedding for {token}.") + + input_embeddings.to(dtype=dtype, device=device) + + # 7.5 Offload the model again + if is_model_cpu_offload: + self.enable_model_cpu_offload() + elif is_sequential_cpu_offload: + self.enable_sequential_cpu_offload() + + # / Unsafe Code > + + def unload_textual_inversion( + self, + tokens: Optional[Union[str, List[str]]] = None, + tokenizer: Optional["PreTrainedTokenizer"] = None, + text_encoder: Optional["PreTrainedModel"] = None, + ): + r""" + Unload Textual Inversion embeddings from the text encoder of [`StableDiffusionPipeline`] + + Example: + ```py + from diffusers import AutoPipelineForText2Image + import torch + + pipeline = AutoPipelineForText2Image.from_pretrained("runwayml/stable-diffusion-v1-5") + + # Example 1 + pipeline.load_textual_inversion("sd-concepts-library/gta5-artwork") + pipeline.load_textual_inversion("sd-concepts-library/moeb-style") + + # Remove all token embeddings + pipeline.unload_textual_inversion() + + # Example 2 + pipeline.load_textual_inversion("sd-concepts-library/moeb-style") + pipeline.load_textual_inversion("sd-concepts-library/gta5-artwork") + + # Remove just one token + pipeline.unload_textual_inversion("") + + # Example 3: unload from SDXL + pipeline = AutoPipelineForText2Image.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0") + embedding_path = hf_hub_download( + repo_id="linoyts/web_y2k", filename="web_y2k_emb.safetensors", repo_type="model" + ) + + # load embeddings to the text encoders + state_dict = load_file(embedding_path) + + # load embeddings of text_encoder 1 (CLIP ViT-L/14) + pipeline.load_textual_inversion( + state_dict["clip_l"], + token=["", ""], + text_encoder=pipeline.text_encoder, + tokenizer=pipeline.tokenizer, + ) + # load embeddings of text_encoder 2 (CLIP ViT-G/14) + pipeline.load_textual_inversion( + state_dict["clip_g"], + token=["", ""], + text_encoder=pipeline.text_encoder_2, + tokenizer=pipeline.tokenizer_2, + ) + + # Unload explicitly from both text encoders abd tokenizers + pipeline.unload_textual_inversion( + tokens=["", ""], text_encoder=pipeline.text_encoder, tokenizer=pipeline.tokenizer + ) + pipeline.unload_textual_inversion( + tokens=["", ""], text_encoder=pipeline.text_encoder_2, tokenizer=pipeline.tokenizer_2 + ) + ``` + """ + + tokenizer = tokenizer or getattr(self, "tokenizer", None) + text_encoder = text_encoder or getattr(self, "text_encoder", None) + + # Get textual inversion tokens and ids + token_ids = [] + last_special_token_id = None + + if tokens: + if isinstance(tokens, str): + tokens = [tokens] + for added_token_id, added_token in tokenizer.added_tokens_decoder.items(): + if not added_token.special: + if added_token.content in tokens: + token_ids.append(added_token_id) + else: + last_special_token_id = added_token_id + if len(token_ids) == 0: + raise ValueError("No tokens to remove found") + else: + tokens = [] + for added_token_id, added_token in tokenizer.added_tokens_decoder.items(): + if not added_token.special: + token_ids.append(added_token_id) + tokens.append(added_token.content) + else: + last_special_token_id = added_token_id + + # Delete from tokenizer + for token_id, token_to_remove in zip(token_ids, tokens): + del tokenizer._added_tokens_decoder[token_id] + del tokenizer._added_tokens_encoder[token_to_remove] + + # Make all token ids sequential in tokenizer + key_id = 1 + for token_id in tokenizer.added_tokens_decoder: + if token_id > last_special_token_id and token_id > last_special_token_id + key_id: + token = tokenizer._added_tokens_decoder[token_id] + tokenizer._added_tokens_decoder[last_special_token_id + key_id] = token + del tokenizer._added_tokens_decoder[token_id] + tokenizer._added_tokens_encoder[token.content] = last_special_token_id + key_id + key_id += 1 + tokenizer._update_trie() + + # Delete from text encoder + text_embedding_dim = text_encoder.get_input_embeddings().embedding_dim + temp_text_embedding_weights = text_encoder.get_input_embeddings().weight + text_embedding_weights = temp_text_embedding_weights[: last_special_token_id + 1] + to_append = [] + for i in range(last_special_token_id + 1, temp_text_embedding_weights.shape[0]): + if i not in token_ids: + to_append.append(temp_text_embedding_weights[i].unsqueeze(0)) + if len(to_append) > 0: + to_append = torch.cat(to_append, dim=0) + text_embedding_weights = torch.cat([text_embedding_weights, to_append], dim=0) + text_embeddings_filtered = nn.Embedding(text_embedding_weights.shape[0], text_embedding_dim) + text_embeddings_filtered.weight.data = text_embedding_weights + text_encoder.set_input_embeddings(text_embeddings_filtered) diff --git a/diffusers3/loaders/unet.py b/diffusers3/loaders/unet.py new file mode 100644 index 0000000000000000000000000000000000000000..32ace77b62246938ea263cc28f70593bcff51205 --- /dev/null +++ b/diffusers3/loaders/unet.py @@ -0,0 +1,906 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os +from collections import defaultdict +from contextlib import nullcontext +from pathlib import Path +from typing import Callable, Dict, Union + +import safetensors +import torch +import torch.nn.functional as F +from huggingface_hub.utils import validate_hf_hub_args +from torch import nn + +from ..models.embeddings import ( + ImageProjection, + IPAdapterFaceIDImageProjection, + IPAdapterFaceIDPlusImageProjection, + IPAdapterFullImageProjection, + IPAdapterPlusImageProjection, + MultiIPAdapterImageProjection, +) +from ..models.modeling_utils import load_model_dict_into_meta, load_state_dict +from ..utils import ( + USE_PEFT_BACKEND, + _get_model_file, + convert_unet_state_dict_to_peft, + get_adapter_name, + get_peft_kwargs, + is_accelerate_available, + is_peft_version, + is_torch_version, + logging, +) +from .lora_pipeline import LORA_WEIGHT_NAME, LORA_WEIGHT_NAME_SAFE, TEXT_ENCODER_NAME, UNET_NAME +from .utils import AttnProcsLayers + + +if is_accelerate_available(): + from accelerate.hooks import AlignDevicesHook, CpuOffload, remove_hook_from_module + +logger = logging.get_logger(__name__) + + +CUSTOM_DIFFUSION_WEIGHT_NAME = "pytorch_custom_diffusion_weights.bin" +CUSTOM_DIFFUSION_WEIGHT_NAME_SAFE = "pytorch_custom_diffusion_weights.safetensors" + + +class UNet2DConditionLoadersMixin: + """ + Load LoRA layers into a [`UNet2DCondtionModel`]. + """ + + text_encoder_name = TEXT_ENCODER_NAME + unet_name = UNET_NAME + + @validate_hf_hub_args + def load_attn_procs(self, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], **kwargs): + r""" + Load pretrained attention processor layers into [`UNet2DConditionModel`]. Attention processor layers have to be + defined in + [`attention_processor.py`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py) + and be a `torch.nn.Module` class. Currently supported: LoRA, Custom Diffusion. For LoRA, one must install + `peft`: `pip install -U peft`. + + Parameters: + pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`): + Can be either: + + - A string, the model id (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on + the Hub. + - A path to a directory (for example `./my_model_directory`) containing the model weights saved + with [`ModelMixin.save_pretrained`]. + - A [torch state + dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict). + + cache_dir (`Union[str, os.PathLike]`, *optional*): + Path to a directory where a downloaded pretrained model configuration is cached if the standard cache + is not used. + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force the (re-)download of the model weights and configuration files, overriding the + cached versions if they exist. + + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. + local_files_only (`bool`, *optional*, defaults to `False`): + Whether to only load local model weights and configuration files or not. If set to `True`, the model + won't be downloaded from the Hub. + token (`str` or *bool*, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from + `diffusers-cli login` (stored in `~/.huggingface`) is used. + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier + allowed by Git. + subfolder (`str`, *optional*, defaults to `""`): + The subfolder location of a model file within a larger model repository on the Hub or locally. + network_alphas (`Dict[str, float]`): + The value of the network alpha used for stable learning and preventing underflow. This value has the + same meaning as the `--network_alpha` option in the kohya-ss trainer script. Refer to [this + link](https://github.com/darkstorm2150/sd-scripts/blob/main/docs/train_network_README-en.md#execute-learning). + adapter_name (`str`, *optional*, defaults to None): + Adapter name to be used for referencing the loaded adapter model. If not specified, it will use + `default_{i}` where i is the total number of adapters being loaded. + weight_name (`str`, *optional*, defaults to None): + Name of the serialized state dict file. + + Example: + + ```py + from diffusers import AutoPipelineForText2Image + import torch + + pipeline = AutoPipelineForText2Image.from_pretrained( + "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 + ).to("cuda") + pipeline.unet.load_attn_procs( + "jbilcke-hf/sdxl-cinematic-1", weight_name="pytorch_lora_weights.safetensors", adapter_name="cinematic" + ) + ``` + """ + cache_dir = kwargs.pop("cache_dir", None) + force_download = kwargs.pop("force_download", False) + proxies = kwargs.pop("proxies", None) + local_files_only = kwargs.pop("local_files_only", None) + token = kwargs.pop("token", None) + revision = kwargs.pop("revision", None) + subfolder = kwargs.pop("subfolder", None) + weight_name = kwargs.pop("weight_name", None) + use_safetensors = kwargs.pop("use_safetensors", None) + adapter_name = kwargs.pop("adapter_name", None) + _pipeline = kwargs.pop("_pipeline", None) + network_alphas = kwargs.pop("network_alphas", None) + allow_pickle = False + + if use_safetensors is None: + use_safetensors = True + allow_pickle = True + + user_agent = { + "file_type": "attn_procs_weights", + "framework": "pytorch", + } + + model_file = None + if not isinstance(pretrained_model_name_or_path_or_dict, dict): + # Let's first try to load .safetensors weights + if (use_safetensors and weight_name is None) or ( + weight_name is not None and weight_name.endswith(".safetensors") + ): + try: + model_file = _get_model_file( + pretrained_model_name_or_path_or_dict, + weights_name=weight_name or LORA_WEIGHT_NAME_SAFE, + cache_dir=cache_dir, + force_download=force_download, + proxies=proxies, + local_files_only=local_files_only, + token=token, + revision=revision, + subfolder=subfolder, + user_agent=user_agent, + ) + state_dict = safetensors.torch.load_file(model_file, device="cpu") + except IOError as e: + if not allow_pickle: + raise e + # try loading non-safetensors weights + pass + if model_file is None: + model_file = _get_model_file( + pretrained_model_name_or_path_or_dict, + weights_name=weight_name or LORA_WEIGHT_NAME, + cache_dir=cache_dir, + force_download=force_download, + proxies=proxies, + local_files_only=local_files_only, + token=token, + revision=revision, + subfolder=subfolder, + user_agent=user_agent, + ) + state_dict = load_state_dict(model_file) + else: + state_dict = pretrained_model_name_or_path_or_dict + + is_custom_diffusion = any("custom_diffusion" in k for k in state_dict.keys()) + is_lora = all(("lora" in k or k.endswith(".alpha")) for k in state_dict.keys()) + is_model_cpu_offload = False + is_sequential_cpu_offload = False + + if is_custom_diffusion: + attn_processors = self._process_custom_diffusion(state_dict=state_dict) + elif is_lora: + is_model_cpu_offload, is_sequential_cpu_offload = self._process_lora( + state_dict=state_dict, + unet_identifier_key=self.unet_name, + network_alphas=network_alphas, + adapter_name=adapter_name, + _pipeline=_pipeline, + ) + else: + raise ValueError( + f"{model_file} does not seem to be in the correct format expected by Custom Diffusion training." + ) + + # + + def _process_custom_diffusion(self, state_dict): + from ..models.attention_processor import CustomDiffusionAttnProcessor + + attn_processors = {} + custom_diffusion_grouped_dict = defaultdict(dict) + for key, value in state_dict.items(): + if len(value) == 0: + custom_diffusion_grouped_dict[key] = {} + else: + if "to_out" in key: + attn_processor_key, sub_key = ".".join(key.split(".")[:-3]), ".".join(key.split(".")[-3:]) + else: + attn_processor_key, sub_key = ".".join(key.split(".")[:-2]), ".".join(key.split(".")[-2:]) + custom_diffusion_grouped_dict[attn_processor_key][sub_key] = value + + for key, value_dict in custom_diffusion_grouped_dict.items(): + if len(value_dict) == 0: + attn_processors[key] = CustomDiffusionAttnProcessor( + train_kv=False, train_q_out=False, hidden_size=None, cross_attention_dim=None + ) + else: + cross_attention_dim = value_dict["to_k_custom_diffusion.weight"].shape[1] + hidden_size = value_dict["to_k_custom_diffusion.weight"].shape[0] + train_q_out = True if "to_q_custom_diffusion.weight" in value_dict else False + attn_processors[key] = CustomDiffusionAttnProcessor( + train_kv=True, + train_q_out=train_q_out, + hidden_size=hidden_size, + cross_attention_dim=cross_attention_dim, + ) + attn_processors[key].load_state_dict(value_dict) + + return attn_processors + + def _process_lora(self, state_dict, unet_identifier_key, network_alphas, adapter_name, _pipeline): + # This method does the following things: + # 1. Filters the `state_dict` with keys matching `unet_identifier_key` when using the non-legacy + # format. For legacy format no filtering is applied. + # 2. Converts the `state_dict` to the `peft` compatible format. + # 3. Creates a `LoraConfig` and then injects the converted `state_dict` into the UNet per the + # `LoraConfig` specs. + # 4. It also reports if the underlying `_pipeline` has any kind of offloading inside of it. + if not USE_PEFT_BACKEND: + raise ValueError("PEFT backend is required for this method.") + + from peft import LoraConfig, inject_adapter_in_model, set_peft_model_state_dict + + keys = list(state_dict.keys()) + + unet_keys = [k for k in keys if k.startswith(unet_identifier_key)] + unet_state_dict = { + k.replace(f"{unet_identifier_key}.", ""): v for k, v in state_dict.items() if k in unet_keys + } + + if network_alphas is not None: + alpha_keys = [k for k in network_alphas.keys() if k.startswith(unet_identifier_key)] + network_alphas = { + k.replace(f"{unet_identifier_key}.", ""): v for k, v in network_alphas.items() if k in alpha_keys + } + + is_model_cpu_offload = False + is_sequential_cpu_offload = False + state_dict_to_be_used = unet_state_dict if len(unet_state_dict) > 0 else state_dict + + if len(state_dict_to_be_used) > 0: + if adapter_name in getattr(self, "peft_config", {}): + raise ValueError( + f"Adapter name {adapter_name} already in use in the Unet - please select a new adapter name." + ) + + state_dict = convert_unet_state_dict_to_peft(state_dict_to_be_used) + + if network_alphas is not None: + # The alphas state dict have the same structure as Unet, thus we convert it to peft format using + # `convert_unet_state_dict_to_peft` method. + network_alphas = convert_unet_state_dict_to_peft(network_alphas) + + rank = {} + for key, val in state_dict.items(): + if "lora_B" in key: + rank[key] = val.shape[1] + + lora_config_kwargs = get_peft_kwargs(rank, network_alphas, state_dict, is_unet=True) + if "use_dora" in lora_config_kwargs: + if lora_config_kwargs["use_dora"]: + if is_peft_version("<", "0.9.0"): + raise ValueError( + "You need `peft` 0.9.0 at least to use DoRA-enabled LoRAs. Please upgrade your installation of `peft`." + ) + else: + if is_peft_version("<", "0.9.0"): + lora_config_kwargs.pop("use_dora") + lora_config = LoraConfig(**lora_config_kwargs) + + # adapter_name + if adapter_name is None: + adapter_name = get_adapter_name(self) + + # In case the pipeline has been already offloaded to CPU - temporarily remove the hooks + # otherwise loading LoRA weights will lead to an error + is_model_cpu_offload, is_sequential_cpu_offload = self._optionally_disable_offloading(_pipeline) + + inject_adapter_in_model(lora_config, self, adapter_name=adapter_name) + incompatible_keys = set_peft_model_state_dict(self, state_dict, adapter_name) + + if incompatible_keys is not None: + # check only for unexpected keys + unexpected_keys = getattr(incompatible_keys, "unexpected_keys", None) + if unexpected_keys: + logger.warning( + f"Loading adapter weights from state_dict led to unexpected keys not found in the model: " + f" {unexpected_keys}. " + ) + + return is_model_cpu_offload, is_sequential_cpu_offload + + @classmethod + # Copied from diffusers.loaders.lora_base.LoraBaseMixin._optionally_disable_offloading + def _optionally_disable_offloading(cls, _pipeline): + """ + Optionally removes offloading in case the pipeline has been already sequentially offloaded to CPU. + + Args: + _pipeline (`DiffusionPipeline`): + The pipeline to disable offloading for. + + Returns: + tuple: + A tuple indicating if `is_model_cpu_offload` or `is_sequential_cpu_offload` is True. + """ + is_model_cpu_offload = False + is_sequential_cpu_offload = False + + if _pipeline is not None and _pipeline.hf_device_map is None: + for _, component in _pipeline.components.items(): + if isinstance(component, nn.Module) and hasattr(component, "_hf_hook"): + if not is_model_cpu_offload: + is_model_cpu_offload = isinstance(component._hf_hook, CpuOffload) + if not is_sequential_cpu_offload: + is_sequential_cpu_offload = ( + isinstance(component._hf_hook, AlignDevicesHook) + or hasattr(component._hf_hook, "hooks") + and isinstance(component._hf_hook.hooks[0], AlignDevicesHook) + ) + + logger.info( + "Accelerate hooks detected. Since you have called `load_lora_weights()`, the previous hooks will be first removed. Then the LoRA parameters will be loaded and the hooks will be applied again." + ) + remove_hook_from_module(component, recurse=is_sequential_cpu_offload) + + return (is_model_cpu_offload, is_sequential_cpu_offload) + + def save_attn_procs( + self, + save_directory: Union[str, os.PathLike], + is_main_process: bool = True, + weight_name: str = None, + save_function: Callable = None, + safe_serialization: bool = True, + **kwargs, + ): + r""" + Save attention processor layers to a directory so that it can be reloaded with the + [`~loaders.UNet2DConditionLoadersMixin.load_attn_procs`] method. + + Arguments: + save_directory (`str` or `os.PathLike`): + Directory to save an attention processor to (will be created if it doesn't exist). + is_main_process (`bool`, *optional*, defaults to `True`): + Whether the process calling this is the main process or not. Useful during distributed training and you + need to call this function on all processes. In this case, set `is_main_process=True` only on the main + process to avoid race conditions. + save_function (`Callable`): + The function to use to save the state dictionary. Useful during distributed training when you need to + replace `torch.save` with another method. Can be configured with the environment variable + `DIFFUSERS_SAVE_MODE`. + safe_serialization (`bool`, *optional*, defaults to `True`): + Whether to save the model using `safetensors` or with `pickle`. + + Example: + + ```py + import torch + from diffusers import DiffusionPipeline + + pipeline = DiffusionPipeline.from_pretrained( + "CompVis/stable-diffusion-v1-4", + torch_dtype=torch.float16, + ).to("cuda") + pipeline.unet.load_attn_procs("path-to-save-model", weight_name="pytorch_custom_diffusion_weights.bin") + pipeline.unet.save_attn_procs("path-to-save-model", weight_name="pytorch_custom_diffusion_weights.bin") + ``` + """ + from ..models.attention_processor import ( + CustomDiffusionAttnProcessor, + CustomDiffusionAttnProcessor2_0, + CustomDiffusionXFormersAttnProcessor, + ) + + if os.path.isfile(save_directory): + logger.error(f"Provided path ({save_directory}) should be a directory, not a file") + return + + is_custom_diffusion = any( + isinstance( + x, + (CustomDiffusionAttnProcessor, CustomDiffusionAttnProcessor2_0, CustomDiffusionXFormersAttnProcessor), + ) + for (_, x) in self.attn_processors.items() + ) + if is_custom_diffusion: + state_dict = self._get_custom_diffusion_state_dict() + if save_function is None and safe_serialization: + # safetensors does not support saving dicts with non-tensor values + empty_state_dict = {k: v for k, v in state_dict.items() if not isinstance(v, torch.Tensor)} + if len(empty_state_dict) > 0: + logger.warning( + f"Safetensors does not support saving dicts with non-tensor values. " + f"The following keys will be ignored: {empty_state_dict.keys()}" + ) + state_dict = {k: v for k, v in state_dict.items() if isinstance(v, torch.Tensor)} + else: + if not USE_PEFT_BACKEND: + raise ValueError("PEFT backend is required for saving LoRAs using the `save_attn_procs()` method.") + + from peft.utils import get_peft_model_state_dict + + state_dict = get_peft_model_state_dict(self) + + if save_function is None: + if safe_serialization: + + def save_function(weights, filename): + return safetensors.torch.save_file(weights, filename, metadata={"format": "pt"}) + + else: + save_function = torch.save + + os.makedirs(save_directory, exist_ok=True) + + if weight_name is None: + if safe_serialization: + weight_name = CUSTOM_DIFFUSION_WEIGHT_NAME_SAFE if is_custom_diffusion else LORA_WEIGHT_NAME_SAFE + else: + weight_name = CUSTOM_DIFFUSION_WEIGHT_NAME if is_custom_diffusion else LORA_WEIGHT_NAME + + # Save the model + save_path = Path(save_directory, weight_name).as_posix() + save_function(state_dict, save_path) + logger.info(f"Model weights saved in {save_path}") + + def _get_custom_diffusion_state_dict(self): + from ..models.attention_processor import ( + CustomDiffusionAttnProcessor, + CustomDiffusionAttnProcessor2_0, + CustomDiffusionXFormersAttnProcessor, + ) + + model_to_save = AttnProcsLayers( + { + y: x + for (y, x) in self.attn_processors.items() + if isinstance( + x, + ( + CustomDiffusionAttnProcessor, + CustomDiffusionAttnProcessor2_0, + CustomDiffusionXFormersAttnProcessor, + ), + ) + } + ) + state_dict = model_to_save.state_dict() + for name, attn in self.attn_processors.items(): + if len(attn.state_dict()) == 0: + state_dict[name] = {} + + return state_dict + + def _convert_ip_adapter_image_proj_to_diffusers(self, state_dict, low_cpu_mem_usage=False): + if low_cpu_mem_usage: + if is_accelerate_available(): + from accelerate import init_empty_weights + + else: + low_cpu_mem_usage = False + logger.warning( + "Cannot initialize model with low cpu memory usage because `accelerate` was not found in the" + " environment. Defaulting to `low_cpu_mem_usage=False`. It is strongly recommended to install" + " `accelerate` for faster and less memory-intense model loading. You can do so with: \n```\npip" + " install accelerate\n```\n." + ) + + if low_cpu_mem_usage is True and not is_torch_version(">=", "1.9.0"): + raise NotImplementedError( + "Low memory initialization requires torch >= 1.9.0. Please either update your PyTorch version or set" + " `low_cpu_mem_usage=False`." + ) + + updated_state_dict = {} + image_projection = None + init_context = init_empty_weights if low_cpu_mem_usage else nullcontext + + if "proj.weight" in state_dict: + # IP-Adapter + num_image_text_embeds = 4 + clip_embeddings_dim = state_dict["proj.weight"].shape[-1] + cross_attention_dim = state_dict["proj.weight"].shape[0] // 4 + + with init_context(): + image_projection = ImageProjection( + cross_attention_dim=cross_attention_dim, + image_embed_dim=clip_embeddings_dim, + num_image_text_embeds=num_image_text_embeds, + ) + + for key, value in state_dict.items(): + diffusers_name = key.replace("proj", "image_embeds") + updated_state_dict[diffusers_name] = value + + elif "proj.3.weight" in state_dict: + # IP-Adapter Full + clip_embeddings_dim = state_dict["proj.0.weight"].shape[0] + cross_attention_dim = state_dict["proj.3.weight"].shape[0] + + with init_context(): + image_projection = IPAdapterFullImageProjection( + cross_attention_dim=cross_attention_dim, image_embed_dim=clip_embeddings_dim + ) + + for key, value in state_dict.items(): + diffusers_name = key.replace("proj.0", "ff.net.0.proj") + diffusers_name = diffusers_name.replace("proj.2", "ff.net.2") + diffusers_name = diffusers_name.replace("proj.3", "norm") + updated_state_dict[diffusers_name] = value + + elif "perceiver_resampler.proj_in.weight" in state_dict: + # IP-Adapter Face ID Plus + id_embeddings_dim = state_dict["proj.0.weight"].shape[1] + embed_dims = state_dict["perceiver_resampler.proj_in.weight"].shape[0] + hidden_dims = state_dict["perceiver_resampler.proj_in.weight"].shape[1] + output_dims = state_dict["perceiver_resampler.proj_out.weight"].shape[0] + heads = state_dict["perceiver_resampler.layers.0.0.to_q.weight"].shape[0] // 64 + + with init_context(): + image_projection = IPAdapterFaceIDPlusImageProjection( + embed_dims=embed_dims, + output_dims=output_dims, + hidden_dims=hidden_dims, + heads=heads, + id_embeddings_dim=id_embeddings_dim, + ) + + for key, value in state_dict.items(): + diffusers_name = key.replace("perceiver_resampler.", "") + diffusers_name = diffusers_name.replace("0.to", "attn.to") + diffusers_name = diffusers_name.replace("0.1.0.", "0.ff.0.") + diffusers_name = diffusers_name.replace("0.1.1.weight", "0.ff.1.net.0.proj.weight") + diffusers_name = diffusers_name.replace("0.1.3.weight", "0.ff.1.net.2.weight") + diffusers_name = diffusers_name.replace("1.1.0.", "1.ff.0.") + diffusers_name = diffusers_name.replace("1.1.1.weight", "1.ff.1.net.0.proj.weight") + diffusers_name = diffusers_name.replace("1.1.3.weight", "1.ff.1.net.2.weight") + diffusers_name = diffusers_name.replace("2.1.0.", "2.ff.0.") + diffusers_name = diffusers_name.replace("2.1.1.weight", "2.ff.1.net.0.proj.weight") + diffusers_name = diffusers_name.replace("2.1.3.weight", "2.ff.1.net.2.weight") + diffusers_name = diffusers_name.replace("3.1.0.", "3.ff.0.") + diffusers_name = diffusers_name.replace("3.1.1.weight", "3.ff.1.net.0.proj.weight") + diffusers_name = diffusers_name.replace("3.1.3.weight", "3.ff.1.net.2.weight") + diffusers_name = diffusers_name.replace("layers.0.0", "layers.0.ln0") + diffusers_name = diffusers_name.replace("layers.0.1", "layers.0.ln1") + diffusers_name = diffusers_name.replace("layers.1.0", "layers.1.ln0") + diffusers_name = diffusers_name.replace("layers.1.1", "layers.1.ln1") + diffusers_name = diffusers_name.replace("layers.2.0", "layers.2.ln0") + diffusers_name = diffusers_name.replace("layers.2.1", "layers.2.ln1") + diffusers_name = diffusers_name.replace("layers.3.0", "layers.3.ln0") + diffusers_name = diffusers_name.replace("layers.3.1", "layers.3.ln1") + + if "norm1" in diffusers_name: + updated_state_dict[diffusers_name.replace("0.norm1", "0")] = value + elif "norm2" in diffusers_name: + updated_state_dict[diffusers_name.replace("0.norm2", "1")] = value + elif "to_kv" in diffusers_name: + v_chunk = value.chunk(2, dim=0) + updated_state_dict[diffusers_name.replace("to_kv", "to_k")] = v_chunk[0] + updated_state_dict[diffusers_name.replace("to_kv", "to_v")] = v_chunk[1] + elif "to_out" in diffusers_name: + updated_state_dict[diffusers_name.replace("to_out", "to_out.0")] = value + elif "proj.0.weight" == diffusers_name: + updated_state_dict["proj.net.0.proj.weight"] = value + elif "proj.0.bias" == diffusers_name: + updated_state_dict["proj.net.0.proj.bias"] = value + elif "proj.2.weight" == diffusers_name: + updated_state_dict["proj.net.2.weight"] = value + elif "proj.2.bias" == diffusers_name: + updated_state_dict["proj.net.2.bias"] = value + else: + updated_state_dict[diffusers_name] = value + + elif "norm.weight" in state_dict: + # IP-Adapter Face ID + id_embeddings_dim_in = state_dict["proj.0.weight"].shape[1] + id_embeddings_dim_out = state_dict["proj.0.weight"].shape[0] + multiplier = id_embeddings_dim_out // id_embeddings_dim_in + norm_layer = "norm.weight" + cross_attention_dim = state_dict[norm_layer].shape[0] + num_tokens = state_dict["proj.2.weight"].shape[0] // cross_attention_dim + + with init_context(): + image_projection = IPAdapterFaceIDImageProjection( + cross_attention_dim=cross_attention_dim, + image_embed_dim=id_embeddings_dim_in, + mult=multiplier, + num_tokens=num_tokens, + ) + + for key, value in state_dict.items(): + diffusers_name = key.replace("proj.0", "ff.net.0.proj") + diffusers_name = diffusers_name.replace("proj.2", "ff.net.2") + updated_state_dict[diffusers_name] = value + + else: + # IP-Adapter Plus + num_image_text_embeds = state_dict["latents"].shape[1] + embed_dims = state_dict["proj_in.weight"].shape[1] + output_dims = state_dict["proj_out.weight"].shape[0] + hidden_dims = state_dict["latents"].shape[2] + attn_key_present = any("attn" in k for k in state_dict) + heads = ( + state_dict["layers.0.attn.to_q.weight"].shape[0] // 64 + if attn_key_present + else state_dict["layers.0.0.to_q.weight"].shape[0] // 64 + ) + + with init_context(): + image_projection = IPAdapterPlusImageProjection( + embed_dims=embed_dims, + output_dims=output_dims, + hidden_dims=hidden_dims, + heads=heads, + num_queries=num_image_text_embeds, + ) + + for key, value in state_dict.items(): + diffusers_name = key.replace("0.to", "2.to") + + diffusers_name = diffusers_name.replace("0.0.norm1", "0.ln0") + diffusers_name = diffusers_name.replace("0.0.norm2", "0.ln1") + diffusers_name = diffusers_name.replace("1.0.norm1", "1.ln0") + diffusers_name = diffusers_name.replace("1.0.norm2", "1.ln1") + diffusers_name = diffusers_name.replace("2.0.norm1", "2.ln0") + diffusers_name = diffusers_name.replace("2.0.norm2", "2.ln1") + diffusers_name = diffusers_name.replace("3.0.norm1", "3.ln0") + diffusers_name = diffusers_name.replace("3.0.norm2", "3.ln1") + + if "to_kv" in diffusers_name: + parts = diffusers_name.split(".") + parts[2] = "attn" + diffusers_name = ".".join(parts) + v_chunk = value.chunk(2, dim=0) + updated_state_dict[diffusers_name.replace("to_kv", "to_k")] = v_chunk[0] + updated_state_dict[diffusers_name.replace("to_kv", "to_v")] = v_chunk[1] + elif "to_q" in diffusers_name: + parts = diffusers_name.split(".") + parts[2] = "attn" + diffusers_name = ".".join(parts) + updated_state_dict[diffusers_name] = value + elif "to_out" in diffusers_name: + parts = diffusers_name.split(".") + parts[2] = "attn" + diffusers_name = ".".join(parts) + updated_state_dict[diffusers_name.replace("to_out", "to_out.0")] = value + else: + diffusers_name = diffusers_name.replace("0.1.0", "0.ff.0") + diffusers_name = diffusers_name.replace("0.1.1", "0.ff.1.net.0.proj") + diffusers_name = diffusers_name.replace("0.1.3", "0.ff.1.net.2") + + diffusers_name = diffusers_name.replace("1.1.0", "1.ff.0") + diffusers_name = diffusers_name.replace("1.1.1", "1.ff.1.net.0.proj") + diffusers_name = diffusers_name.replace("1.1.3", "1.ff.1.net.2") + + diffusers_name = diffusers_name.replace("2.1.0", "2.ff.0") + diffusers_name = diffusers_name.replace("2.1.1", "2.ff.1.net.0.proj") + diffusers_name = diffusers_name.replace("2.1.3", "2.ff.1.net.2") + + diffusers_name = diffusers_name.replace("3.1.0", "3.ff.0") + diffusers_name = diffusers_name.replace("3.1.1", "3.ff.1.net.0.proj") + diffusers_name = diffusers_name.replace("3.1.3", "3.ff.1.net.2") + updated_state_dict[diffusers_name] = value + + if not low_cpu_mem_usage: + image_projection.load_state_dict(updated_state_dict, strict=True) + else: + load_model_dict_into_meta(image_projection, updated_state_dict, device=self.device, dtype=self.dtype) + + return image_projection + + def _convert_ip_adapter_attn_to_diffusers(self, state_dicts, low_cpu_mem_usage=False): + from ..models.attention_processor import ( + IPAdapterAttnProcessor, + IPAdapterAttnProcessor2_0, + ) + + if low_cpu_mem_usage: + if is_accelerate_available(): + from accelerate import init_empty_weights + + else: + low_cpu_mem_usage = False + logger.warning( + "Cannot initialize model with low cpu memory usage because `accelerate` was not found in the" + " environment. Defaulting to `low_cpu_mem_usage=False`. It is strongly recommended to install" + " `accelerate` for faster and less memory-intense model loading. You can do so with: \n```\npip" + " install accelerate\n```\n." + ) + + if low_cpu_mem_usage is True and not is_torch_version(">=", "1.9.0"): + raise NotImplementedError( + "Low memory initialization requires torch >= 1.9.0. Please either update your PyTorch version or set" + " `low_cpu_mem_usage=False`." + ) + + # set ip-adapter cross-attention processors & load state_dict + attn_procs = {} + key_id = 1 + init_context = init_empty_weights if low_cpu_mem_usage else nullcontext + for name in self.attn_processors.keys(): + cross_attention_dim = None if name.endswith("attn1.processor") else self.config.cross_attention_dim + if name.startswith("mid_block"): + hidden_size = self.config.block_out_channels[-1] + elif name.startswith("up_blocks"): + block_id = int(name[len("up_blocks.")]) + hidden_size = list(reversed(self.config.block_out_channels))[block_id] + elif name.startswith("down_blocks"): + block_id = int(name[len("down_blocks.")]) + hidden_size = self.config.block_out_channels[block_id] + + if cross_attention_dim is None or "motion_modules" in name: + attn_processor_class = self.attn_processors[name].__class__ + attn_procs[name] = attn_processor_class() + + else: + attn_processor_class = ( + IPAdapterAttnProcessor2_0 if hasattr(F, "scaled_dot_product_attention") else IPAdapterAttnProcessor + ) + num_image_text_embeds = [] + for state_dict in state_dicts: + if "proj.weight" in state_dict["image_proj"]: + # IP-Adapter + num_image_text_embeds += [4] + elif "proj.3.weight" in state_dict["image_proj"]: + # IP-Adapter Full Face + num_image_text_embeds += [257] # 256 CLIP tokens + 1 CLS token + elif "perceiver_resampler.proj_in.weight" in state_dict["image_proj"]: + # IP-Adapter Face ID Plus + num_image_text_embeds += [4] + elif "norm.weight" in state_dict["image_proj"]: + # IP-Adapter Face ID + num_image_text_embeds += [4] + else: + # IP-Adapter Plus + num_image_text_embeds += [state_dict["image_proj"]["latents"].shape[1]] + + with init_context(): + attn_procs[name] = attn_processor_class( + hidden_size=hidden_size, + cross_attention_dim=cross_attention_dim, + scale=1.0, + num_tokens=num_image_text_embeds, + ) + + value_dict = {} + for i, state_dict in enumerate(state_dicts): + value_dict.update({f"to_k_ip.{i}.weight": state_dict["ip_adapter"][f"{key_id}.to_k_ip.weight"]}) + value_dict.update({f"to_v_ip.{i}.weight": state_dict["ip_adapter"][f"{key_id}.to_v_ip.weight"]}) + + if not low_cpu_mem_usage: + attn_procs[name].load_state_dict(value_dict) + else: + device = next(iter(value_dict.values())).device + dtype = next(iter(value_dict.values())).dtype + load_model_dict_into_meta(attn_procs[name], value_dict, device=device, dtype=dtype) + + key_id += 2 + + return attn_procs + + def _load_ip_adapter_weights(self, state_dicts, low_cpu_mem_usage=False): + if not isinstance(state_dicts, list): + state_dicts = [state_dicts] + + # Kolors Unet already has a `encoder_hid_proj` + if ( + self.encoder_hid_proj is not None + and self.config.encoder_hid_dim_type == "text_proj" + and not hasattr(self, "text_encoder_hid_proj") + ): + self.text_encoder_hid_proj = self.encoder_hid_proj + + # Set encoder_hid_proj after loading ip_adapter weights, + # because `IPAdapterPlusImageProjection` also has `attn_processors`. + self.encoder_hid_proj = None + + attn_procs = self._convert_ip_adapter_attn_to_diffusers(state_dicts, low_cpu_mem_usage=low_cpu_mem_usage) + self.set_attn_processor(attn_procs) + + # convert IP-Adapter Image Projection layers to diffusers + image_projection_layers = [] + for state_dict in state_dicts: + image_projection_layer = self._convert_ip_adapter_image_proj_to_diffusers( + state_dict["image_proj"], low_cpu_mem_usage=low_cpu_mem_usage + ) + image_projection_layers.append(image_projection_layer) + + self.encoder_hid_proj = MultiIPAdapterImageProjection(image_projection_layers) + self.config.encoder_hid_dim_type = "ip_image_proj" + + self.to(dtype=self.dtype, device=self.device) + + def _load_ip_adapter_loras(self, state_dicts): + lora_dicts = {} + for key_id, name in enumerate(self.attn_processors.keys()): + for i, state_dict in enumerate(state_dicts): + if f"{key_id}.to_k_lora.down.weight" in state_dict["ip_adapter"]: + if i not in lora_dicts: + lora_dicts[i] = {} + lora_dicts[i].update( + { + f"unet.{name}.to_k_lora.down.weight": state_dict["ip_adapter"][ + f"{key_id}.to_k_lora.down.weight" + ] + } + ) + lora_dicts[i].update( + { + f"unet.{name}.to_q_lora.down.weight": state_dict["ip_adapter"][ + f"{key_id}.to_q_lora.down.weight" + ] + } + ) + lora_dicts[i].update( + { + f"unet.{name}.to_v_lora.down.weight": state_dict["ip_adapter"][ + f"{key_id}.to_v_lora.down.weight" + ] + } + ) + lora_dicts[i].update( + { + f"unet.{name}.to_out_lora.down.weight": state_dict["ip_adapter"][ + f"{key_id}.to_out_lora.down.weight" + ] + } + ) + lora_dicts[i].update( + {f"unet.{name}.to_k_lora.up.weight": state_dict["ip_adapter"][f"{key_id}.to_k_lora.up.weight"]} + ) + lora_dicts[i].update( + {f"unet.{name}.to_q_lora.up.weight": state_dict["ip_adapter"][f"{key_id}.to_q_lora.up.weight"]} + ) + lora_dicts[i].update( + {f"unet.{name}.to_v_lora.up.weight": state_dict["ip_adapter"][f"{key_id}.to_v_lora.up.weight"]} + ) + lora_dicts[i].update( + { + f"unet.{name}.to_out_lora.up.weight": state_dict["ip_adapter"][ + f"{key_id}.to_out_lora.up.weight" + ] + } + ) + return lora_dicts diff --git a/diffusers3/loaders/unet_loader_utils.py b/diffusers3/loaders/unet_loader_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..8f202ed4d44bdf0965dcf6e02efbfe26c42c8705 --- /dev/null +++ b/diffusers3/loaders/unet_loader_utils.py @@ -0,0 +1,163 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import copy +from typing import TYPE_CHECKING, Dict, List, Union + +from ..utils import logging + + +if TYPE_CHECKING: + # import here to avoid circular imports + from ..models import UNet2DConditionModel + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +def _translate_into_actual_layer_name(name): + """Translate user-friendly name (e.g. 'mid') into actual layer name (e.g. 'mid_block.attentions.0')""" + if name == "mid": + return "mid_block.attentions.0" + + updown, block, attn = name.split(".") + + updown = updown.replace("down", "down_blocks").replace("up", "up_blocks") + block = block.replace("block_", "") + attn = "attentions." + attn + + return ".".join((updown, block, attn)) + + +def _maybe_expand_lora_scales( + unet: "UNet2DConditionModel", weight_scales: List[Union[float, Dict]], default_scale=1.0 +): + blocks_with_transformer = { + "down": [i for i, block in enumerate(unet.down_blocks) if hasattr(block, "attentions")], + "up": [i for i, block in enumerate(unet.up_blocks) if hasattr(block, "attentions")], + } + transformer_per_block = {"down": unet.config.layers_per_block, "up": unet.config.layers_per_block + 1} + + expanded_weight_scales = [ + _maybe_expand_lora_scales_for_one_adapter( + weight_for_adapter, + blocks_with_transformer, + transformer_per_block, + unet.state_dict(), + default_scale=default_scale, + ) + for weight_for_adapter in weight_scales + ] + + return expanded_weight_scales + + +def _maybe_expand_lora_scales_for_one_adapter( + scales: Union[float, Dict], + blocks_with_transformer: Dict[str, int], + transformer_per_block: Dict[str, int], + state_dict: None, + default_scale: float = 1.0, +): + """ + Expands the inputs into a more granular dictionary. See the example below for more details. + + Parameters: + scales (`Union[float, Dict]`): + Scales dict to expand. + blocks_with_transformer (`Dict[str, int]`): + Dict with keys 'up' and 'down', showing which blocks have transformer layers + transformer_per_block (`Dict[str, int]`): + Dict with keys 'up' and 'down', showing how many transformer layers each block has + + E.g. turns + ```python + scales = {"down": 2, "mid": 3, "up": {"block_0": 4, "block_1": [5, 6, 7]}} + blocks_with_transformer = {"down": [1, 2], "up": [0, 1]} + transformer_per_block = {"down": 2, "up": 3} + ``` + into + ```python + { + "down.block_1.0": 2, + "down.block_1.1": 2, + "down.block_2.0": 2, + "down.block_2.1": 2, + "mid": 3, + "up.block_0.0": 4, + "up.block_0.1": 4, + "up.block_0.2": 4, + "up.block_1.0": 5, + "up.block_1.1": 6, + "up.block_1.2": 7, + } + ``` + """ + if sorted(blocks_with_transformer.keys()) != ["down", "up"]: + raise ValueError("blocks_with_transformer needs to be a dict with keys `'down' and `'up'`") + + if sorted(transformer_per_block.keys()) != ["down", "up"]: + raise ValueError("transformer_per_block needs to be a dict with keys `'down' and `'up'`") + + if not isinstance(scales, dict): + # don't expand if scales is a single number + return scales + + scales = copy.deepcopy(scales) + + if "mid" not in scales: + scales["mid"] = default_scale + elif isinstance(scales["mid"], list): + if len(scales["mid"]) == 1: + scales["mid"] = scales["mid"][0] + else: + raise ValueError(f"Expected 1 scales for mid, got {len(scales['mid'])}.") + + for updown in ["up", "down"]: + if updown not in scales: + scales[updown] = default_scale + + # eg {"down": 1} to {"down": {"block_1": 1, "block_2": 1}}} + if not isinstance(scales[updown], dict): + scales[updown] = {f"block_{i}": copy.deepcopy(scales[updown]) for i in blocks_with_transformer[updown]} + + # eg {"down": {"block_1": 1}} to {"down": {"block_1": [1, 1]}} + for i in blocks_with_transformer[updown]: + block = f"block_{i}" + # set not assigned blocks to default scale + if block not in scales[updown]: + scales[updown][block] = default_scale + if not isinstance(scales[updown][block], list): + scales[updown][block] = [scales[updown][block] for _ in range(transformer_per_block[updown])] + elif len(scales[updown][block]) == 1: + # a list specifying scale to each masked IP input + scales[updown][block] = scales[updown][block] * transformer_per_block[updown] + elif len(scales[updown][block]) != transformer_per_block[updown]: + raise ValueError( + f"Expected {transformer_per_block[updown]} scales for {updown}.{block}, got {len(scales[updown][block])}." + ) + + # eg {"down": "block_1": [1, 1]}} to {"down.block_1.0": 1, "down.block_1.1": 1} + for i in blocks_with_transformer[updown]: + block = f"block_{i}" + for tf_idx, value in enumerate(scales[updown][block]): + scales[f"{updown}.{block}.{tf_idx}"] = value + + del scales[updown] + + for layer in scales.keys(): + if not any(_translate_into_actual_layer_name(layer) in module for module in state_dict.keys()): + raise ValueError( + f"Can't set lora scale for layer {layer}. It either doesn't exist in this unet or it has no attentions." + ) + + return {_translate_into_actual_layer_name(name): weight for name, weight in scales.items()} diff --git a/diffusers3/loaders/utils.py b/diffusers3/loaders/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..142d72bf6b77edf4af72ee0d30d3d190cd4b3eef --- /dev/null +++ b/diffusers3/loaders/utils.py @@ -0,0 +1,59 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Dict + +import torch + + +class AttnProcsLayers(torch.nn.Module): + def __init__(self, state_dict: Dict[str, torch.Tensor]): + super().__init__() + self.layers = torch.nn.ModuleList(state_dict.values()) + self.mapping = dict(enumerate(state_dict.keys())) + self.rev_mapping = {v: k for k, v in enumerate(state_dict.keys())} + + # .processor for unet, .self_attn for text encoder + self.split_keys = [".processor", ".self_attn"] + + # we add a hook to state_dict() and load_state_dict() so that the + # naming fits with `unet.attn_processors` + def map_to(module, state_dict, *args, **kwargs): + new_state_dict = {} + for key, value in state_dict.items(): + num = int(key.split(".")[1]) # 0 is always "layers" + new_key = key.replace(f"layers.{num}", module.mapping[num]) + new_state_dict[new_key] = value + + return new_state_dict + + def remap_key(key, state_dict): + for k in self.split_keys: + if k in key: + return key.split(k)[0] + k + + raise ValueError( + f"There seems to be a problem with the state_dict: {set(state_dict.keys())}. {key} has to have one of {self.split_keys}." + ) + + def map_from(module, state_dict, *args, **kwargs): + all_keys = list(state_dict.keys()) + for key in all_keys: + replace_key = remap_key(key, state_dict) + new_key = key.replace(replace_key, f"layers.{module.rev_mapping[replace_key]}") + state_dict[new_key] = state_dict[key] + del state_dict[key] + + self._register_state_dict_hook(map_to) + self._register_load_state_dict_pre_hook(map_from, with_module=True) diff --git a/diffusers3/optimization.py b/diffusers3/optimization.py new file mode 100644 index 0000000000000000000000000000000000000000..f20bd94edffaa9b569d3fb395b2a4e826b96b146 --- /dev/null +++ b/diffusers3/optimization.py @@ -0,0 +1,361 @@ +# coding=utf-8 +# Copyright 2024 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""PyTorch optimization for diffusion models.""" + +import math +from enum import Enum +from typing import Optional, Union + +from torch.optim import Optimizer +from torch.optim.lr_scheduler import LambdaLR + +from .utils import logging + + +logger = logging.get_logger(__name__) + + +class SchedulerType(Enum): + LINEAR = "linear" + COSINE = "cosine" + COSINE_WITH_RESTARTS = "cosine_with_restarts" + POLYNOMIAL = "polynomial" + CONSTANT = "constant" + CONSTANT_WITH_WARMUP = "constant_with_warmup" + PIECEWISE_CONSTANT = "piecewise_constant" + + +def get_constant_schedule(optimizer: Optimizer, last_epoch: int = -1) -> LambdaLR: + """ + Create a schedule with a constant learning rate, using the learning rate set in optimizer. + + Args: + optimizer ([`~torch.optim.Optimizer`]): + The optimizer for which to schedule the learning rate. + last_epoch (`int`, *optional*, defaults to -1): + The index of the last epoch when resuming training. + + Return: + `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. + """ + return LambdaLR(optimizer, lambda _: 1, last_epoch=last_epoch) + + +def get_constant_schedule_with_warmup(optimizer: Optimizer, num_warmup_steps: int, last_epoch: int = -1) -> LambdaLR: + """ + Create a schedule with a constant learning rate preceded by a warmup period during which the learning rate + increases linearly between 0 and the initial lr set in the optimizer. + + Args: + optimizer ([`~torch.optim.Optimizer`]): + The optimizer for which to schedule the learning rate. + num_warmup_steps (`int`): + The number of steps for the warmup phase. + last_epoch (`int`, *optional*, defaults to -1): + The index of the last epoch when resuming training. + + Return: + `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. + """ + + def lr_lambda(current_step: int): + if current_step < num_warmup_steps: + return float(current_step) / float(max(1.0, num_warmup_steps)) + return 1.0 + + return LambdaLR(optimizer, lr_lambda, last_epoch=last_epoch) + + +def get_piecewise_constant_schedule(optimizer: Optimizer, step_rules: str, last_epoch: int = -1) -> LambdaLR: + """ + Create a schedule with a constant learning rate, using the learning rate set in optimizer. + + Args: + optimizer ([`~torch.optim.Optimizer`]): + The optimizer for which to schedule the learning rate. + step_rules (`string`): + The rules for the learning rate. ex: rule_steps="1:10,0.1:20,0.01:30,0.005" it means that the learning rate + if multiple 1 for the first 10 steps, multiple 0.1 for the next 20 steps, multiple 0.01 for the next 30 + steps and multiple 0.005 for the other steps. + last_epoch (`int`, *optional*, defaults to -1): + The index of the last epoch when resuming training. + + Return: + `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. + """ + + rules_dict = {} + rule_list = step_rules.split(",") + for rule_str in rule_list[:-1]: + value_str, steps_str = rule_str.split(":") + steps = int(steps_str) + value = float(value_str) + rules_dict[steps] = value + last_lr_multiple = float(rule_list[-1]) + + def create_rules_function(rules_dict, last_lr_multiple): + def rule_func(steps: int) -> float: + sorted_steps = sorted(rules_dict.keys()) + for i, sorted_step in enumerate(sorted_steps): + if steps < sorted_step: + return rules_dict[sorted_steps[i]] + return last_lr_multiple + + return rule_func + + rules_func = create_rules_function(rules_dict, last_lr_multiple) + + return LambdaLR(optimizer, rules_func, last_epoch=last_epoch) + + +def get_linear_schedule_with_warmup( + optimizer: Optimizer, num_warmup_steps: int, num_training_steps: int, last_epoch: int = -1 +) -> LambdaLR: + """ + Create a schedule with a learning rate that decreases linearly from the initial lr set in the optimizer to 0, after + a warmup period during which it increases linearly from 0 to the initial lr set in the optimizer. + + Args: + optimizer ([`~torch.optim.Optimizer`]): + The optimizer for which to schedule the learning rate. + num_warmup_steps (`int`): + The number of steps for the warmup phase. + num_training_steps (`int`): + The total number of training steps. + last_epoch (`int`, *optional*, defaults to -1): + The index of the last epoch when resuming training. + + Return: + `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. + """ + + def lr_lambda(current_step: int): + if current_step < num_warmup_steps: + return float(current_step) / float(max(1, num_warmup_steps)) + return max( + 0.0, float(num_training_steps - current_step) / float(max(1, num_training_steps - num_warmup_steps)) + ) + + return LambdaLR(optimizer, lr_lambda, last_epoch) + + +def get_cosine_schedule_with_warmup( + optimizer: Optimizer, num_warmup_steps: int, num_training_steps: int, num_cycles: float = 0.5, last_epoch: int = -1 +) -> LambdaLR: + """ + Create a schedule with a learning rate that decreases following the values of the cosine function between the + initial lr set in the optimizer to 0, after a warmup period during which it increases linearly between 0 and the + initial lr set in the optimizer. + + Args: + optimizer ([`~torch.optim.Optimizer`]): + The optimizer for which to schedule the learning rate. + num_warmup_steps (`int`): + The number of steps for the warmup phase. + num_training_steps (`int`): + The total number of training steps. + num_periods (`float`, *optional*, defaults to 0.5): + The number of periods of the cosine function in a schedule (the default is to just decrease from the max + value to 0 following a half-cosine). + last_epoch (`int`, *optional*, defaults to -1): + The index of the last epoch when resuming training. + + Return: + `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. + """ + + def lr_lambda(current_step): + if current_step < num_warmup_steps: + return float(current_step) / float(max(1, num_warmup_steps)) + progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps)) + return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(num_cycles) * 2.0 * progress))) + + return LambdaLR(optimizer, lr_lambda, last_epoch) + + +def get_cosine_with_hard_restarts_schedule_with_warmup( + optimizer: Optimizer, num_warmup_steps: int, num_training_steps: int, num_cycles: int = 1, last_epoch: int = -1 +) -> LambdaLR: + """ + Create a schedule with a learning rate that decreases following the values of the cosine function between the + initial lr set in the optimizer to 0, with several hard restarts, after a warmup period during which it increases + linearly between 0 and the initial lr set in the optimizer. + + Args: + optimizer ([`~torch.optim.Optimizer`]): + The optimizer for which to schedule the learning rate. + num_warmup_steps (`int`): + The number of steps for the warmup phase. + num_training_steps (`int`): + The total number of training steps. + num_cycles (`int`, *optional*, defaults to 1): + The number of hard restarts to use. + last_epoch (`int`, *optional*, defaults to -1): + The index of the last epoch when resuming training. + + Return: + `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. + """ + + def lr_lambda(current_step): + if current_step < num_warmup_steps: + return float(current_step) / float(max(1, num_warmup_steps)) + progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps)) + if progress >= 1.0: + return 0.0 + return max(0.0, 0.5 * (1.0 + math.cos(math.pi * ((float(num_cycles) * progress) % 1.0)))) + + return LambdaLR(optimizer, lr_lambda, last_epoch) + + +def get_polynomial_decay_schedule_with_warmup( + optimizer: Optimizer, + num_warmup_steps: int, + num_training_steps: int, + lr_end: float = 1e-7, + power: float = 1.0, + last_epoch: int = -1, +) -> LambdaLR: + """ + Create a schedule with a learning rate that decreases as a polynomial decay from the initial lr set in the + optimizer to end lr defined by *lr_end*, after a warmup period during which it increases linearly from 0 to the + initial lr set in the optimizer. + + Args: + optimizer ([`~torch.optim.Optimizer`]): + The optimizer for which to schedule the learning rate. + num_warmup_steps (`int`): + The number of steps for the warmup phase. + num_training_steps (`int`): + The total number of training steps. + lr_end (`float`, *optional*, defaults to 1e-7): + The end LR. + power (`float`, *optional*, defaults to 1.0): + Power factor. + last_epoch (`int`, *optional*, defaults to -1): + The index of the last epoch when resuming training. + + Note: *power* defaults to 1.0 as in the fairseq implementation, which in turn is based on the original BERT + implementation at + https://github.com/google-research/bert/blob/f39e881b169b9d53bea03d2d341b31707a6c052b/optimization.py#L37 + + Return: + `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. + + """ + + lr_init = optimizer.defaults["lr"] + if not (lr_init > lr_end): + raise ValueError(f"lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})") + + def lr_lambda(current_step: int): + if current_step < num_warmup_steps: + return float(current_step) / float(max(1, num_warmup_steps)) + elif current_step > num_training_steps: + return lr_end / lr_init # as LambdaLR multiplies by lr_init + else: + lr_range = lr_init - lr_end + decay_steps = num_training_steps - num_warmup_steps + pct_remaining = 1 - (current_step - num_warmup_steps) / decay_steps + decay = lr_range * pct_remaining**power + lr_end + return decay / lr_init # as LambdaLR multiplies by lr_init + + return LambdaLR(optimizer, lr_lambda, last_epoch) + + +TYPE_TO_SCHEDULER_FUNCTION = { + SchedulerType.LINEAR: get_linear_schedule_with_warmup, + SchedulerType.COSINE: get_cosine_schedule_with_warmup, + SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup, + SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup, + SchedulerType.CONSTANT: get_constant_schedule, + SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup, + SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule, +} + + +def get_scheduler( + name: Union[str, SchedulerType], + optimizer: Optimizer, + step_rules: Optional[str] = None, + num_warmup_steps: Optional[int] = None, + num_training_steps: Optional[int] = None, + num_cycles: int = 1, + power: float = 1.0, + last_epoch: int = -1, +) -> LambdaLR: + """ + Unified API to get any scheduler from its name. + + Args: + name (`str` or `SchedulerType`): + The name of the scheduler to use. + optimizer (`torch.optim.Optimizer`): + The optimizer that will be used during training. + step_rules (`str`, *optional*): + A string representing the step rules to use. This is only used by the `PIECEWISE_CONSTANT` scheduler. + num_warmup_steps (`int`, *optional*): + The number of warmup steps to do. This is not required by all schedulers (hence the argument being + optional), the function will raise an error if it's unset and the scheduler type requires it. + num_training_steps (`int``, *optional*): + The number of training steps to do. This is not required by all schedulers (hence the argument being + optional), the function will raise an error if it's unset and the scheduler type requires it. + num_cycles (`int`, *optional*): + The number of hard restarts used in `COSINE_WITH_RESTARTS` scheduler. + power (`float`, *optional*, defaults to 1.0): + Power factor. See `POLYNOMIAL` scheduler + last_epoch (`int`, *optional*, defaults to -1): + The index of the last epoch when resuming training. + """ + name = SchedulerType(name) + schedule_func = TYPE_TO_SCHEDULER_FUNCTION[name] + if name == SchedulerType.CONSTANT: + return schedule_func(optimizer, last_epoch=last_epoch) + + if name == SchedulerType.PIECEWISE_CONSTANT: + return schedule_func(optimizer, step_rules=step_rules, last_epoch=last_epoch) + + # All other schedulers require `num_warmup_steps` + if num_warmup_steps is None: + raise ValueError(f"{name} requires `num_warmup_steps`, please provide that argument.") + + if name == SchedulerType.CONSTANT_WITH_WARMUP: + return schedule_func(optimizer, num_warmup_steps=num_warmup_steps, last_epoch=last_epoch) + + # All other schedulers require `num_training_steps` + if num_training_steps is None: + raise ValueError(f"{name} requires `num_training_steps`, please provide that argument.") + + if name == SchedulerType.COSINE_WITH_RESTARTS: + return schedule_func( + optimizer, + num_warmup_steps=num_warmup_steps, + num_training_steps=num_training_steps, + num_cycles=num_cycles, + last_epoch=last_epoch, + ) + + if name == SchedulerType.POLYNOMIAL: + return schedule_func( + optimizer, + num_warmup_steps=num_warmup_steps, + num_training_steps=num_training_steps, + power=power, + last_epoch=last_epoch, + ) + + return schedule_func( + optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_training_steps, last_epoch=last_epoch + ) diff --git a/diffusers3/pipelines/.ipynb_checkpoints/Untitled-checkpoint.ipynb b/diffusers3/pipelines/.ipynb_checkpoints/Untitled-checkpoint.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..363fcab7ed6e9634e198cf5555ceb88932c9a245 --- /dev/null +++ b/diffusers3/pipelines/.ipynb_checkpoints/Untitled-checkpoint.ipynb @@ -0,0 +1,6 @@ +{ + "cells": [], + "metadata": {}, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/diffusers3/pipelines/.ipynb_checkpoints/__init__-checkpoint.py b/diffusers3/pipelines/.ipynb_checkpoints/__init__-checkpoint.py new file mode 100644 index 0000000000000000000000000000000000000000..e4d37a905b86724cd1a0a33eb5e6e63d11fa50ed --- /dev/null +++ b/diffusers3/pipelines/.ipynb_checkpoints/__init__-checkpoint.py @@ -0,0 +1,721 @@ +from typing import TYPE_CHECKING + +from ..utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_flax_available, + is_k_diffusion_available, + is_librosa_available, + is_note_seq_available, + is_onnx_available, + is_sentencepiece_available, + is_torch_available, + is_torch_npu_available, + is_transformers_available, +) + + +# These modules contain pipelines from multiple libraries/frameworks +_dummy_objects = {} +_import_structure = { + "controlnet": [], + "controlnet_hunyuandit": [], + "controlnet_sd3": [], + "controlnet_xs": [], + "deprecated": [], + "latent_diffusion": [], + "ledits_pp": [], + "marigold": [], + "pag": [], + "stable_diffusion": [], + "stable_diffusion_xl": [], +} + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ..utils import dummy_pt_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_pt_objects)) +else: + _import_structure["auto_pipeline"] = [ + "AutoPipelineForImage2Image", + "AutoPipelineForInpainting", + "AutoPipelineForText2Image", + ] + _import_structure["consistency_models"] = ["ConsistencyModelPipeline"] + _import_structure["dance_diffusion"] = ["DanceDiffusionPipeline"] + _import_structure["ddim"] = ["DDIMPipeline"] + _import_structure["ddpm"] = ["DDPMPipeline"] + _import_structure["dit"] = ["DiTPipeline"] + _import_structure["latent_diffusion"].extend(["LDMSuperResolutionPipeline"]) + _import_structure["pipeline_utils"] = [ + "AudioPipelineOutput", + "DiffusionPipeline", + "StableDiffusionMixin", + "ImagePipelineOutput", + ] + _import_structure["deprecated"].extend( + [ + "PNDMPipeline", + "LDMPipeline", + "RePaintPipeline", + "ScoreSdeVePipeline", + "KarrasVePipeline", + ] + ) +try: + if not (is_torch_available() and is_librosa_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ..utils import dummy_torch_and_librosa_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_librosa_objects)) +else: + _import_structure["deprecated"].extend(["AudioDiffusionPipeline", "Mel"]) + +try: + if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ..utils import dummy_transformers_and_torch_and_note_seq_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_transformers_and_torch_and_note_seq_objects)) +else: + _import_structure["deprecated"].extend( + [ + "MidiProcessor", + "SpectrogramDiffusionPipeline", + ] + ) + +try: + if not (is_torch_available() and is_transformers_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ..utils import dummy_torch_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["deprecated"].extend( + [ + "VQDiffusionPipeline", + "AltDiffusionPipeline", + "AltDiffusionImg2ImgPipeline", + "CycleDiffusionPipeline", + "StableDiffusionInpaintPipelineLegacy", + "StableDiffusionPix2PixZeroPipeline", + "StableDiffusionParadigmsPipeline", + "StableDiffusionModelEditingPipeline", + "VersatileDiffusionDualGuidedPipeline", + "VersatileDiffusionImageVariationPipeline", + "VersatileDiffusionPipeline", + "VersatileDiffusionTextToImagePipeline", + ] + ) + _import_structure["amused"] = ["AmusedImg2ImgPipeline", "AmusedInpaintPipeline", "AmusedPipeline"] + _import_structure["animatediff"] = [ + "AnimateDiffPipeline", + "AnimateDiffControlNetPipeline", + "AnimateDiffSDXLPipeline", + "AnimateDiffSparseControlNetPipeline", + "AnimateDiffVideoToVideoPipeline", + "AnimateDiffVideoToVideoControlNetPipeline", + ] + _import_structure["flux"] = [ + "FluxControlNetPipeline", + "FluxImg2ImgPipeline", + "FluxInpaintPipeline", + "FluxPipeline", + ] + _import_structure["audioldm"] = ["AudioLDMPipeline"] + _import_structure["audioldm2"] = [ + "AudioLDM2Pipeline", + "AudioLDM2ProjectionModel", + "AudioLDM2UNet2DConditionModel", + ] + _import_structure["blip_diffusion"] = ["BlipDiffusionPipeline"] + _import_structure["cogvideo"] = ["CogVideoXPipeline", "CogVideoXVideoToVideoPipeline"] + _import_structure["controlnet"].extend( + [ + "BlipDiffusionControlNetPipeline", + "StableDiffusionControlNetImg2ImgPipeline", + "StableDiffusionControlNetInpaintPipeline", + "StableDiffusionControlNetPipeline", + "StableDiffusionXLControlNetImg2ImgPipeline", + "StableDiffusionXLControlNetInpaintPipeline", + "StableDiffusionXLControlNetPipeline", + ] + ) + _import_structure["pag"].extend( + [ + "AnimateDiffPAGPipeline", + "KolorsPAGPipeline", + "HunyuanDiTPAGPipeline", + "StableDiffusion3PAGPipeline", + "StableDiffusionPAGPipeline", + "StableDiffusionControlNetPAGPipeline", + "StableDiffusionXLPAGPipeline", + "StableDiffusionXLPAGInpaintPipeline", + "StableDiffusionXLControlNetPAGImg2ImgPipeline", + "StableDiffusionXLControlNetPAGPipeline", + "StableDiffusionXLPAGImg2ImgPipeline", + "PixArtSigmaPAGPipeline", + ] + ) + _import_structure["controlnet_xs"].extend( + [ + "StableDiffusionControlNetXSPipeline", + "StableDiffusionXLControlNetXSPipeline", + ] + ) + _import_structure["controlnet_hunyuandit"].extend( + [ + "HunyuanDiTControlNetPipeline", + ] + ) + _import_structure["controlnet_sd3"].extend( + [ + "StableDiffusion3ControlNetPipeline", + "StableDiffusion3ControlNetInpaintingPipeline", + ] + ) + _import_structure["deepfloyd_if"] = [ + "IFImg2ImgPipeline", + "IFImg2ImgSuperResolutionPipeline", + "IFInpaintingPipeline", + "IFInpaintingSuperResolutionPipeline", + "IFPipeline", + "IFSuperResolutionPipeline", + ] + _import_structure["hunyuandit"] = ["HunyuanDiTPipeline"] + _import_structure["kandinsky"] = [ + "KandinskyCombinedPipeline", + "KandinskyImg2ImgCombinedPipeline", + "KandinskyImg2ImgPipeline", + "KandinskyInpaintCombinedPipeline", + "KandinskyInpaintPipeline", + "KandinskyPipeline", + "KandinskyPriorPipeline", + ] + _import_structure["kandinsky2_2"] = [ + "KandinskyV22CombinedPipeline", + "KandinskyV22ControlnetImg2ImgPipeline", + "KandinskyV22ControlnetPipeline", + "KandinskyV22Img2ImgCombinedPipeline", + "KandinskyV22Img2ImgPipeline", + "KandinskyV22InpaintCombinedPipeline", + "KandinskyV22InpaintPipeline", + "KandinskyV22Pipeline", + "KandinskyV22PriorEmb2EmbPipeline", + "KandinskyV22PriorPipeline", + ] + _import_structure["kandinsky3"] = [ + "Kandinsky3Img2ImgPipeline", + "Kandinsky3Pipeline", + ] + _import_structure["latent_consistency_models"] = [ + "LatentConsistencyModelImg2ImgPipeline", + "LatentConsistencyModelPipeline", + ] + _import_structure["latent_diffusion"].extend(["LDMTextToImagePipeline"]) + _import_structure["ledits_pp"].extend( + [ + "LEditsPPPipelineStableDiffusion", + "LEditsPPPipelineStableDiffusionXL", + ] + ) + _import_structure["latte"] = ["LattePipeline"] + _import_structure["lumina"] = ["LuminaText2ImgPipeline"] + _import_structure["marigold"].extend( + [ + "MarigoldDepthPipeline", + "MarigoldNormalsPipeline", + ] + ) + _import_structure["musicldm"] = ["MusicLDMPipeline"] + _import_structure["paint_by_example"] = ["PaintByExamplePipeline"] + _import_structure["pia"] = ["PIAPipeline"] + _import_structure["pixart_alpha"] = ["PixArtAlphaPipeline", "PixArtSigmaPipeline"] + _import_structure["semantic_stable_diffusion"] = ["SemanticStableDiffusionPipeline"] + _import_structure["shap_e"] = ["ShapEImg2ImgPipeline", "ShapEPipeline"] + _import_structure["stable_audio"] = [ + "StableAudioProjectionModel", + "StableAudioPipeline", + ] + _import_structure["stable_cascade"] = [ + "StableCascadeCombinedPipeline", + "StableCascadeDecoderPipeline", + "StableCascadePriorPipeline", + ] + _import_structure["stable_diffusion"].extend( + [ + "CLIPImageProjection", + "StableDiffusionDepth2ImgPipeline", + "StableDiffusionImageVariationPipeline", + "StableDiffusionImg2ImgPipeline", + "StableDiffusionInpaintPipeline", + "StableDiffusionInstructPix2PixPipeline", + "StableDiffusionLatentUpscalePipeline", + "StableDiffusionPipeline", + "StableDiffusionUpscalePipeline", + "StableUnCLIPImg2ImgPipeline", + "StableUnCLIPPipeline", + "StableDiffusionLDM3DPipeline", + ] + ) + _import_structure["aura_flow"] = ["AuraFlowPipeline"] + _import_structure["stable_diffusion_3"] = [ + "StableDiffusion3Pipeline", + "StableDiffusion3Img2ImgPipeline", + "StableDiffusion3InpaintPipeline", + ] + _import_structure["stable_diffusion_attend_and_excite"] = ["StableDiffusionAttendAndExcitePipeline"] + _import_structure["stable_diffusion_safe"] = ["StableDiffusionPipelineSafe"] + _import_structure["stable_diffusion_sag"] = ["StableDiffusionSAGPipeline"] + _import_structure["stable_diffusion_gligen"] = [ + "StableDiffusionGLIGENPipeline", + "StableDiffusionGLIGENTextImagePipeline", + ] + _import_structure["stable_video_diffusion"] = ["StableVideoDiffusionPipeline"] + _import_structure["stable_diffusion_xl"].extend( + [ + "StableDiffusionXLImg2ImgPipeline", + "StableDiffusionXLInpaintPipeline", + "StableDiffusionXLInstructPix2PixPipeline", + "StableDiffusionXLPipeline", + ] + ) + _import_structure["stable_diffusion_diffedit"] = ["StableDiffusionDiffEditPipeline"] + _import_structure["stable_diffusion_ldm3d"] = ["StableDiffusionLDM3DPipeline"] + _import_structure["stable_diffusion_panorama"] = ["StableDiffusionPanoramaPipeline"] + _import_structure["t2i_adapter"] = [ + "StableDiffusionAdapterPipeline", + "StableDiffusionXLAdapterPipeline", + ] + _import_structure["text_to_video_synthesis"] = [ + "TextToVideoSDPipeline", + "TextToVideoZeroPipeline", + "TextToVideoZeroSDXLPipeline", + "VideoToVideoSDPipeline", + ] + _import_structure["i2vgen_xl"] = ["I2VGenXLPipeline"] + _import_structure["unclip"] = ["UnCLIPImageVariationPipeline", "UnCLIPPipeline"] + _import_structure["unidiffuser"] = [ + "ImageTextPipelineOutput", + "UniDiffuserModel", + "UniDiffuserPipeline", + "UniDiffuserTextDecoder", + ] + _import_structure["wuerstchen"] = [ + "WuerstchenCombinedPipeline", + "WuerstchenDecoderPipeline", + "WuerstchenPriorPipeline", + ] +try: + if not is_onnx_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ..utils import dummy_onnx_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_onnx_objects)) +else: + _import_structure["onnx_utils"] = ["OnnxRuntimeModel"] +try: + if not (is_torch_available() and is_transformers_available() and is_onnx_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ..utils import dummy_torch_and_transformers_and_onnx_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_and_onnx_objects)) +else: + _import_structure["stable_diffusion"].extend( + [ + "OnnxStableDiffusionImg2ImgPipeline", + "OnnxStableDiffusionInpaintPipeline", + "OnnxStableDiffusionPipeline", + "OnnxStableDiffusionUpscalePipeline", + "StableDiffusionOnnxPipeline", + ] + ) + +try: + if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ..utils import ( + dummy_torch_and_transformers_and_k_diffusion_objects, + ) + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_and_k_diffusion_objects)) +else: + _import_structure["stable_diffusion_k_diffusion"] = [ + "StableDiffusionKDiffusionPipeline", + "StableDiffusionXLKDiffusionPipeline", + ] + +try: + if not (is_torch_available() and is_transformers_available() and is_sentencepiece_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ..utils import ( + dummy_torch_and_transformers_and_sentencepiece_objects, + ) + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_and_sentencepiece_objects)) +else: + _import_structure["kolors"] = [ + "KolorsPipeline", + "KolorsImg2ImgPipeline", + ] + +try: + if not is_flax_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ..utils import dummy_flax_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_flax_objects)) +else: + _import_structure["pipeline_flax_utils"] = ["FlaxDiffusionPipeline"] +try: + if not (is_flax_available() and is_transformers_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ..utils import dummy_flax_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_flax_and_transformers_objects)) +else: + _import_structure["controlnet"].extend(["FlaxStableDiffusionControlNetPipeline"]) + _import_structure["stable_diffusion"].extend( + [ + "FlaxStableDiffusionImg2ImgPipeline", + "FlaxStableDiffusionInpaintPipeline", + "FlaxStableDiffusionPipeline", + ] + ) + _import_structure["stable_diffusion_xl"].extend( + [ + "FlaxStableDiffusionXLPipeline", + ] + ) + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ..utils.dummy_pt_objects import * # noqa F403 + + else: + from .auto_pipeline import ( + AutoPipelineForImage2Image, + AutoPipelineForInpainting, + AutoPipelineForText2Image, + ) + from .consistency_models import ConsistencyModelPipeline + from .dance_diffusion import DanceDiffusionPipeline + from .ddim import DDIMPipeline + from .ddpm import DDPMPipeline + from .deprecated import KarrasVePipeline, LDMPipeline, PNDMPipeline, RePaintPipeline, ScoreSdeVePipeline + from .dit import DiTPipeline + from .latent_diffusion import LDMSuperResolutionPipeline + from .pipeline_utils import ( + AudioPipelineOutput, + DiffusionPipeline, + ImagePipelineOutput, + StableDiffusionMixin, + ) + + try: + if not (is_torch_available() and is_librosa_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ..utils.dummy_torch_and_librosa_objects import * + else: + from .deprecated import AudioDiffusionPipeline, Mel + + try: + if not (is_torch_available() and is_transformers_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ..utils.dummy_torch_and_transformers_objects import * + else: + from .amused import AmusedImg2ImgPipeline, AmusedInpaintPipeline, AmusedPipeline + from .animatediff import ( + AnimateDiffControlNetPipeline, + AnimateDiffPipeline, + AnimateDiffSDXLPipeline, + AnimateDiffSparseControlNetPipeline, + AnimateDiffVideoToVideoControlNetPipeline, + AnimateDiffVideoToVideoPipeline, + ) + from .audioldm import AudioLDMPipeline + from .audioldm2 import ( + AudioLDM2Pipeline, + AudioLDM2ProjectionModel, + AudioLDM2UNet2DConditionModel, + ) + from .aura_flow import AuraFlowPipeline + from .blip_diffusion import BlipDiffusionPipeline + from .cogvideo import CogVideoXPipeline, CogVideoXVideoToVideoPipeline + from .controlnet import ( + BlipDiffusionControlNetPipeline, + StableDiffusionControlNetImg2ImgPipeline, + StableDiffusionControlNetInpaintPipeline, + StableDiffusionControlNetPipeline, + StableDiffusionXLControlNetImg2ImgPipeline, + StableDiffusionXLControlNetInpaintPipeline, + StableDiffusionXLControlNetPipeline, + ) + from .controlnet_hunyuandit import ( + HunyuanDiTControlNetPipeline, + ) + from .controlnet_sd3 import StableDiffusion3ControlNetInpaintingPipeline, StableDiffusion3ControlNetPipeline + from .controlnet_xs import ( + StableDiffusionControlNetXSPipeline, + StableDiffusionXLControlNetXSPipeline, + ) + from .deepfloyd_if import ( + IFImg2ImgPipeline, + IFImg2ImgSuperResolutionPipeline, + IFInpaintingPipeline, + IFInpaintingSuperResolutionPipeline, + IFPipeline, + IFSuperResolutionPipeline, + ) + from .deprecated import ( + AltDiffusionImg2ImgPipeline, + AltDiffusionPipeline, + CycleDiffusionPipeline, + StableDiffusionInpaintPipelineLegacy, + StableDiffusionModelEditingPipeline, + StableDiffusionParadigmsPipeline, + StableDiffusionPix2PixZeroPipeline, + VersatileDiffusionDualGuidedPipeline, + VersatileDiffusionImageVariationPipeline, + VersatileDiffusionPipeline, + VersatileDiffusionTextToImagePipeline, + VQDiffusionPipeline, + ) + from .flux import FluxControlNetPipeline, FluxImg2ImgPipeline, FluxInpaintPipeline, FluxPipeline + from .hunyuandit import HunyuanDiTPipeline + from .i2vgen_xl import I2VGenXLPipeline + from .kandinsky import ( + KandinskyCombinedPipeline, + KandinskyImg2ImgCombinedPipeline, + KandinskyImg2ImgPipeline, + KandinskyInpaintCombinedPipeline, + KandinskyInpaintPipeline, + KandinskyPipeline, + KandinskyPriorPipeline, + ) + from .kandinsky2_2 import ( + KandinskyV22CombinedPipeline, + KandinskyV22ControlnetImg2ImgPipeline, + KandinskyV22ControlnetPipeline, + KandinskyV22Img2ImgCombinedPipeline, + KandinskyV22Img2ImgPipeline, + KandinskyV22InpaintCombinedPipeline, + KandinskyV22InpaintPipeline, + KandinskyV22Pipeline, + KandinskyV22PriorEmb2EmbPipeline, + KandinskyV22PriorPipeline, + ) + from .kandinsky3 import ( + Kandinsky3Img2ImgPipeline, + Kandinsky3Pipeline, + ) + from .latent_consistency_models import ( + LatentConsistencyModelImg2ImgPipeline, + LatentConsistencyModelPipeline, + ) + from .latent_diffusion import LDMTextToImagePipeline + from .latte import LattePipeline + from .ledits_pp import ( + LEditsPPDiffusionPipelineOutput, + LEditsPPInversionPipelineOutput, + LEditsPPPipelineStableDiffusion, + LEditsPPPipelineStableDiffusionXL, + ) + from .lumina import LuminaText2ImgPipeline + from .marigold import ( + MarigoldDepthPipeline, + MarigoldNormalsPipeline, + ) + from .musicldm import MusicLDMPipeline + from .pag import ( + AnimateDiffPAGPipeline, + HunyuanDiTPAGPipeline, + KolorsPAGPipeline, + PixArtSigmaPAGPipeline, + StableDiffusion3PAGPipeline, + StableDiffusionControlNetPAGPipeline, + StableDiffusionPAGPipeline, + StableDiffusionXLControlNetPAGImg2ImgPipeline, + StableDiffusionXLControlNetPAGPipeline, + StableDiffusionXLPAGImg2ImgPipeline, + StableDiffusionXLPAGInpaintPipeline, + StableDiffusionXLPAGPipeline, + ) + from .paint_by_example import PaintByExamplePipeline + from .pia import PIAPipeline + from .pixart_alpha import PixArtAlphaPipeline, PixArtSigmaPipeline + from .semantic_stable_diffusion import SemanticStableDiffusionPipeline + from .shap_e import ShapEImg2ImgPipeline, ShapEPipeline + from .stable_audio import StableAudioPipeline, StableAudioProjectionModel + from .stable_cascade import ( + StableCascadeCombinedPipeline, + StableCascadeDecoderPipeline, + StableCascadePriorPipeline, + ) + from .stable_diffusion import ( + CLIPImageProjection, + StableDiffusionDepth2ImgPipeline, + StableDiffusionImageVariationPipeline, + StableDiffusionImg2ImgPipeline, + StableDiffusionInpaintPipeline, + StableDiffusionInstructPix2PixPipeline, + StableDiffusionLatentUpscalePipeline, + StableDiffusionPipeline, + StableDiffusionUpscalePipeline, + StableUnCLIPImg2ImgPipeline, + StableUnCLIPPipeline, + ) + from .stable_diffusion_3 import ( + StableDiffusion3Img2ImgPipeline, + StableDiffusion3InpaintPipeline, + StableDiffusion3Pipeline, + ) + from .stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline + from .stable_diffusion_diffedit import StableDiffusionDiffEditPipeline + from .stable_diffusion_gligen import StableDiffusionGLIGENPipeline, StableDiffusionGLIGENTextImagePipeline + from .stable_diffusion_ldm3d import StableDiffusionLDM3DPipeline + from .stable_diffusion_panorama import StableDiffusionPanoramaPipeline + from .stable_diffusion_safe import StableDiffusionPipelineSafe + from .stable_diffusion_sag import StableDiffusionSAGPipeline + from .stable_diffusion_xl import ( + StableDiffusionXLImg2ImgPipeline, + StableDiffusionXLInpaintPipeline, + StableDiffusionXLInstructPix2PixPipeline, + StableDiffusionXLPipeline, + ) + from .stable_video_diffusion import StableVideoDiffusionPipeline + from .t2i_adapter import ( + StableDiffusionAdapterPipeline, + StableDiffusionXLAdapterPipeline, + ) + from .text_to_video_synthesis import ( + TextToVideoSDPipeline, + TextToVideoZeroPipeline, + TextToVideoZeroSDXLPipeline, + VideoToVideoSDPipeline, + ) + from .unclip import UnCLIPImageVariationPipeline, UnCLIPPipeline + from .unidiffuser import ( + ImageTextPipelineOutput, + UniDiffuserModel, + UniDiffuserPipeline, + UniDiffuserTextDecoder, + ) + from .wuerstchen import ( + WuerstchenCombinedPipeline, + WuerstchenDecoderPipeline, + WuerstchenPriorPipeline, + ) + + try: + if not is_onnx_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ..utils.dummy_onnx_objects import * # noqa F403 + + else: + from .onnx_utils import OnnxRuntimeModel + + try: + if not (is_torch_available() and is_transformers_available() and is_onnx_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ..utils.dummy_torch_and_transformers_and_onnx_objects import * + else: + from .stable_diffusion import ( + OnnxStableDiffusionImg2ImgPipeline, + OnnxStableDiffusionInpaintPipeline, + OnnxStableDiffusionPipeline, + OnnxStableDiffusionUpscalePipeline, + StableDiffusionOnnxPipeline, + ) + + try: + if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ..utils.dummy_torch_and_transformers_and_k_diffusion_objects import * + else: + from .stable_diffusion_k_diffusion import ( + StableDiffusionKDiffusionPipeline, + StableDiffusionXLKDiffusionPipeline, + ) + + try: + if not (is_torch_available() and is_transformers_available() and is_sentencepiece_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ..utils.dummy_torch_and_transformers_and_sentencepiece_objects import * + else: + from .kolors import ( + KolorsImg2ImgPipeline, + KolorsPipeline, + ) + + try: + if not is_flax_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ..utils.dummy_flax_objects import * # noqa F403 + else: + from .pipeline_flax_utils import FlaxDiffusionPipeline + + try: + if not (is_flax_available() and is_transformers_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ..utils.dummy_flax_and_transformers_objects import * + else: + from .controlnet import FlaxStableDiffusionControlNetPipeline + from .stable_diffusion import ( + FlaxStableDiffusionImg2ImgPipeline, + FlaxStableDiffusionInpaintPipeline, + FlaxStableDiffusionPipeline, + ) + from .stable_diffusion_xl import ( + FlaxStableDiffusionXLPipeline, + ) + + try: + if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ..utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 + + else: + from .deprecated import ( + MidiProcessor, + SpectrogramDiffusionPipeline, + ) + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/diffusers3/pipelines/.ipynb_checkpoints/free_noise_utils-checkpoint.py b/diffusers3/pipelines/.ipynb_checkpoints/free_noise_utils-checkpoint.py new file mode 100644 index 0000000000000000000000000000000000000000..dc0071a494e30d135f80b611def70c3d967ecf90 --- /dev/null +++ b/diffusers3/pipelines/.ipynb_checkpoints/free_noise_utils-checkpoint.py @@ -0,0 +1,596 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Callable, Dict, List, Optional, Tuple, Union + +import torch +import torch.nn as nn + +from ..models.attention import BasicTransformerBlock, FreeNoiseTransformerBlock +from ..models.resnet import Downsample2D, ResnetBlock2D, Upsample2D +from ..models.transformers.transformer_2d import Transformer2DModel +from ..models.unets.unet_motion_model import ( + AnimateDiffTransformer3D, + CrossAttnDownBlockMotion, + DownBlockMotion, + UpBlockMotion, +) +from ..pipelines.pipeline_utils import DiffusionPipeline +from ..utils import logging +from ..utils.torch_utils import randn_tensor + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +class SplitInferenceModule(nn.Module): + r""" + A wrapper module class that splits inputs along a specified dimension before performing a forward pass. + + This module is useful when you need to perform inference on large tensors in a memory-efficient way by breaking + them into smaller chunks, processing each chunk separately, and then reassembling the results. + + Args: + module (`nn.Module`): + The underlying PyTorch module that will be applied to each chunk of split inputs. + split_size (`int`, defaults to `1`): + The size of each chunk after splitting the input tensor. + split_dim (`int`, defaults to `0`): + The dimension along which the input tensors are split. + input_kwargs_to_split (`List[str]`, defaults to `["hidden_states"]`): + A list of keyword arguments (strings) that represent the input tensors to be split. + + Workflow: + 1. The keyword arguments specified in `input_kwargs_to_split` are split into smaller chunks using + `torch.split()` along the dimension `split_dim` and with a chunk size of `split_size`. + 2. The `module` is invoked once for each split with both the split inputs and any unchanged arguments + that were passed. + 3. The output tensors from each split are concatenated back together along `split_dim` before returning. + + Example: + ```python + >>> import torch + >>> import torch.nn as nn + + >>> model = nn.Linear(1000, 1000) + >>> split_module = SplitInferenceModule(model, split_size=2, split_dim=0, input_kwargs_to_split=["input"]) + + >>> input_tensor = torch.randn(42, 1000) + >>> # Will split the tensor into 21 slices of shape [2, 1000]. + >>> output = split_module(input=input_tensor) + ``` + + It is also possible to nest `SplitInferenceModule` across different split dimensions for more complex + multi-dimensional splitting. + """ + + def __init__( + self, + module: nn.Module, + split_size: int = 1, + split_dim: int = 0, + input_kwargs_to_split: List[str] = ["hidden_states"], + ) -> None: + super().__init__() + + self.module = module + self.split_size = split_size + self.split_dim = split_dim + self.input_kwargs_to_split = set(input_kwargs_to_split) + + def forward(self, *args, **kwargs) -> Union[torch.Tensor, Tuple[torch.Tensor]]: + r"""Forward method for the `SplitInferenceModule`. + + This method processes the input by splitting specified keyword arguments along a given dimension, running the + underlying module on each split, and then concatenating the results. The splitting is controlled by the + `split_size` and `split_dim` parameters specified during initialization. + + Args: + *args (`Any`): + Positional arguments that are passed directly to the `module` without modification. + **kwargs (`Dict[str, torch.Tensor]`): + Keyword arguments passed to the underlying `module`. Only keyword arguments whose names match the + entries in `input_kwargs_to_split` and are of type `torch.Tensor` will be split. The remaining keyword + arguments are passed unchanged. + + Returns: + `Union[torch.Tensor, Tuple[torch.Tensor]]`: + The outputs obtained from `SplitInferenceModule` are the same as if the underlying module was inferred + without it. + - If the underlying module returns a single tensor, the result will be a single concatenated tensor + along the same `split_dim` after processing all splits. + - If the underlying module returns a tuple of tensors, each element of the tuple will be concatenated + along the `split_dim` across all splits, and the final result will be a tuple of concatenated tensors. + """ + split_inputs = {} + + # 1. Split inputs that were specified during initialization and also present in passed kwargs + for key in list(kwargs.keys()): + if key not in self.input_kwargs_to_split or not torch.is_tensor(kwargs[key]): + continue + split_inputs[key] = torch.split(kwargs[key], self.split_size, self.split_dim) + kwargs.pop(key) + + # 2. Invoke forward pass across each split + results = [] + for split_input in zip(*split_inputs.values()): + inputs = dict(zip(split_inputs.keys(), split_input)) + inputs.update(kwargs) + + intermediate_tensor_or_tensor_tuple = self.module(*args, **inputs) + results.append(intermediate_tensor_or_tensor_tuple) + + # 3. Concatenate split restuls to obtain final outputs + if isinstance(results[0], torch.Tensor): + return torch.cat(results, dim=self.split_dim) + elif isinstance(results[0], tuple): + return tuple([torch.cat(x, dim=self.split_dim) for x in zip(*results)]) + else: + raise ValueError( + "In order to use the SplitInferenceModule, it is necessary for the underlying `module` to either return a torch.Tensor or a tuple of torch.Tensor's." + ) + + +class AnimateDiffFreeNoiseMixin: + r"""Mixin class for [FreeNoise](https://arxiv.org/abs/2310.15169).""" + + def _enable_free_noise_in_block(self, block: Union[CrossAttnDownBlockMotion, DownBlockMotion, UpBlockMotion]): + r"""Helper function to enable FreeNoise in transformer blocks.""" + + for motion_module in block.motion_modules: + num_transformer_blocks = len(motion_module.transformer_blocks) + + for i in range(num_transformer_blocks): + if isinstance(motion_module.transformer_blocks[i], FreeNoiseTransformerBlock): + motion_module.transformer_blocks[i].set_free_noise_properties( + self._free_noise_context_length, + self._free_noise_context_stride, + self._free_noise_weighting_scheme, + ) + else: + assert isinstance(motion_module.transformer_blocks[i], BasicTransformerBlock) + basic_transfomer_block = motion_module.transformer_blocks[i] + + motion_module.transformer_blocks[i] = FreeNoiseTransformerBlock( + dim=basic_transfomer_block.dim, + num_attention_heads=basic_transfomer_block.num_attention_heads, + attention_head_dim=basic_transfomer_block.attention_head_dim, + dropout=basic_transfomer_block.dropout, + cross_attention_dim=basic_transfomer_block.cross_attention_dim, + activation_fn=basic_transfomer_block.activation_fn, + attention_bias=basic_transfomer_block.attention_bias, + only_cross_attention=basic_transfomer_block.only_cross_attention, + double_self_attention=basic_transfomer_block.double_self_attention, + positional_embeddings=basic_transfomer_block.positional_embeddings, + num_positional_embeddings=basic_transfomer_block.num_positional_embeddings, + context_length=self._free_noise_context_length, + context_stride=self._free_noise_context_stride, + weighting_scheme=self._free_noise_weighting_scheme, + ).to(device=self.device, dtype=self.dtype) + + motion_module.transformer_blocks[i].load_state_dict( + basic_transfomer_block.state_dict(), strict=True + ) + motion_module.transformer_blocks[i].set_chunk_feed_forward( + basic_transfomer_block._chunk_size, basic_transfomer_block._chunk_dim + ) + + def _disable_free_noise_in_block(self, block: Union[CrossAttnDownBlockMotion, DownBlockMotion, UpBlockMotion]): + r"""Helper function to disable FreeNoise in transformer blocks.""" + + for motion_module in block.motion_modules: + num_transformer_blocks = len(motion_module.transformer_blocks) + + for i in range(num_transformer_blocks): + if isinstance(motion_module.transformer_blocks[i], FreeNoiseTransformerBlock): + free_noise_transfomer_block = motion_module.transformer_blocks[i] + + motion_module.transformer_blocks[i] = BasicTransformerBlock( + dim=free_noise_transfomer_block.dim, + num_attention_heads=free_noise_transfomer_block.num_attention_heads, + attention_head_dim=free_noise_transfomer_block.attention_head_dim, + dropout=free_noise_transfomer_block.dropout, + cross_attention_dim=free_noise_transfomer_block.cross_attention_dim, + activation_fn=free_noise_transfomer_block.activation_fn, + attention_bias=free_noise_transfomer_block.attention_bias, + only_cross_attention=free_noise_transfomer_block.only_cross_attention, + double_self_attention=free_noise_transfomer_block.double_self_attention, + positional_embeddings=free_noise_transfomer_block.positional_embeddings, + num_positional_embeddings=free_noise_transfomer_block.num_positional_embeddings, + ).to(device=self.device, dtype=self.dtype) + + motion_module.transformer_blocks[i].load_state_dict( + free_noise_transfomer_block.state_dict(), strict=True + ) + motion_module.transformer_blocks[i].set_chunk_feed_forward( + free_noise_transfomer_block._chunk_size, free_noise_transfomer_block._chunk_dim + ) + + def _check_inputs_free_noise( + self, + prompt, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + num_frames, + ) -> None: + if not isinstance(prompt, (str, dict)): + raise ValueError(f"Expected `prompt` to have type `str` or `dict` but found {type(prompt)=}") + + if negative_prompt is not None: + if not isinstance(negative_prompt, (str, dict)): + raise ValueError( + f"Expected `negative_prompt` to have type `str` or `dict` but found {type(negative_prompt)=}" + ) + + if prompt_embeds is not None or negative_prompt_embeds is not None: + raise ValueError("`prompt_embeds` and `negative_prompt_embeds` is not supported in FreeNoise yet.") + + frame_indices = [isinstance(x, int) for x in prompt.keys()] + frame_prompts = [isinstance(x, str) for x in prompt.values()] + min_frame = min(list(prompt.keys())) + max_frame = max(list(prompt.keys())) + + if not all(frame_indices): + raise ValueError("Expected integer keys in `prompt` dict for FreeNoise.") + if not all(frame_prompts): + raise ValueError("Expected str values in `prompt` dict for FreeNoise.") + if min_frame != 0: + raise ValueError("The minimum frame index in `prompt` dict must be 0 as a starting prompt is necessary.") + if max_frame >= num_frames: + raise ValueError( + f"The maximum frame index in `prompt` dict must be lesser than {num_frames=} and follow 0-based indexing." + ) + + def _encode_prompt_free_noise( + self, + prompt: Union[str, Dict[int, str]], + num_frames: int, + device: torch.device, + num_videos_per_prompt: int, + do_classifier_free_guidance: bool, + negative_prompt: Optional[Union[str, Dict[int, str]]] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ) -> torch.Tensor: + if negative_prompt is None: + negative_prompt = "" + + # Ensure that we have a dictionary of prompts + if isinstance(prompt, str): + prompt = {0: prompt} + if isinstance(negative_prompt, str): + negative_prompt = {0: negative_prompt} + + self._check_inputs_free_noise(prompt, negative_prompt, prompt_embeds, negative_prompt_embeds, num_frames) + + # Sort the prompts based on frame indices + prompt = dict(sorted(prompt.items())) + negative_prompt = dict(sorted(negative_prompt.items())) + + # Ensure that we have a prompt for the last frame index + prompt[num_frames - 1] = prompt[list(prompt.keys())[-1]] + negative_prompt[num_frames - 1] = negative_prompt[list(negative_prompt.keys())[-1]] + + frame_indices = list(prompt.keys()) + frame_prompts = list(prompt.values()) + frame_negative_indices = list(negative_prompt.keys()) + frame_negative_prompts = list(negative_prompt.values()) + + # Generate and interpolate positive prompts + prompt_embeds, _ = self.encode_prompt( + prompt=frame_prompts, + device=device, + num_images_per_prompt=num_videos_per_prompt, + do_classifier_free_guidance=False, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + lora_scale=lora_scale, + clip_skip=clip_skip, + ) + + shape = (num_frames, *prompt_embeds.shape[1:]) + prompt_interpolation_embeds = prompt_embeds.new_zeros(shape) + + for i in range(len(frame_indices) - 1): + start_frame = frame_indices[i] + end_frame = frame_indices[i + 1] + start_tensor = prompt_embeds[i].unsqueeze(0) + end_tensor = prompt_embeds[i + 1].unsqueeze(0) + + prompt_interpolation_embeds[start_frame : end_frame + 1] = self._free_noise_prompt_interpolation_callback( + start_frame, end_frame, start_tensor, end_tensor + ) + + # Generate and interpolate negative prompts + negative_prompt_embeds = None + negative_prompt_interpolation_embeds = None + + if do_classifier_free_guidance: + _, negative_prompt_embeds = self.encode_prompt( + prompt=[""] * len(frame_negative_prompts), + device=device, + num_images_per_prompt=num_videos_per_prompt, + do_classifier_free_guidance=True, + negative_prompt=frame_negative_prompts, + prompt_embeds=None, + negative_prompt_embeds=None, + lora_scale=lora_scale, + clip_skip=clip_skip, + ) + + negative_prompt_interpolation_embeds = negative_prompt_embeds.new_zeros(shape) + + for i in range(len(frame_negative_indices) - 1): + start_frame = frame_negative_indices[i] + end_frame = frame_negative_indices[i + 1] + start_tensor = negative_prompt_embeds[i].unsqueeze(0) + end_tensor = negative_prompt_embeds[i + 1].unsqueeze(0) + + negative_prompt_interpolation_embeds[ + start_frame : end_frame + 1 + ] = self._free_noise_prompt_interpolation_callback(start_frame, end_frame, start_tensor, end_tensor) + + prompt_embeds = prompt_interpolation_embeds + negative_prompt_embeds = negative_prompt_interpolation_embeds + + if do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + return prompt_embeds, negative_prompt_embeds + + def _prepare_latents_free_noise( + self, + batch_size: int, + num_channels_latents: int, + num_frames: int, + height: int, + width: int, + dtype: torch.dtype, + device: torch.device, + generator: Optional[torch.Generator] = None, + latents: Optional[torch.Tensor] = None, + ): + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + context_num_frames = ( + self._free_noise_context_length if self._free_noise_context_length == "repeat_context" else num_frames + ) + + shape = ( + batch_size, + num_channels_latents, + context_num_frames, + height // self.vae_scale_factor, + width // self.vae_scale_factor, + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + if self._free_noise_noise_type == "random": + return latents + else: + if latents.size(2) == num_frames: + return latents + elif latents.size(2) != self._free_noise_context_length: + raise ValueError( + f"You have passed `latents` as a parameter to FreeNoise. The expected number of frames is either {num_frames} or {self._free_noise_context_length}, but found {latents.size(2)}" + ) + latents = latents.to(device) + + if self._free_noise_noise_type == "shuffle_context": + for i in range(self._free_noise_context_length, num_frames, self._free_noise_context_stride): + # ensure window is within bounds + window_start = max(0, i - self._free_noise_context_length) + window_end = min(num_frames, window_start + self._free_noise_context_stride) + window_length = window_end - window_start + + if window_length == 0: + break + + indices = torch.LongTensor(list(range(window_start, window_end))) + shuffled_indices = indices[torch.randperm(window_length, generator=generator)] + + current_start = i + current_end = min(num_frames, current_start + window_length) + if current_end == current_start + window_length: + # batch of frames perfectly fits the window + latents[:, :, current_start:current_end] = latents[:, :, shuffled_indices] + else: + # handle the case where the last batch of frames does not fit perfectly with the window + prefix_length = current_end - current_start + shuffled_indices = shuffled_indices[:prefix_length] + latents[:, :, current_start:current_end] = latents[:, :, shuffled_indices] + + elif self._free_noise_noise_type == "repeat_context": + num_repeats = (num_frames + self._free_noise_context_length - 1) // self._free_noise_context_length + latents = torch.cat([latents] * num_repeats, dim=2) + + latents = latents[:, :, :num_frames] + return latents + + def _lerp( + self, start_index: int, end_index: int, start_tensor: torch.Tensor, end_tensor: torch.Tensor + ) -> torch.Tensor: + num_indices = end_index - start_index + 1 + interpolated_tensors = [] + + for i in range(num_indices): + alpha = i / (num_indices - 1) + interpolated_tensor = (1 - alpha) * start_tensor + alpha * end_tensor + interpolated_tensors.append(interpolated_tensor) + + interpolated_tensors = torch.cat(interpolated_tensors) + return interpolated_tensors + + def enable_free_noise( + self, + context_length: Optional[int] = 16, + context_stride: int = 4, + weighting_scheme: str = "pyramid", + noise_type: str = "shuffle_context", + prompt_interpolation_callback: Optional[ + Callable[[DiffusionPipeline, int, int, torch.Tensor, torch.Tensor], torch.Tensor] + ] = None, + ) -> None: + r""" + Enable long video generation using FreeNoise. + + Args: + context_length (`int`, defaults to `16`, *optional*): + The number of video frames to process at once. It's recommended to set this to the maximum frames the + Motion Adapter was trained with (usually 16/24/32). If `None`, the default value from the motion + adapter config is used. + context_stride (`int`, *optional*): + Long videos are generated by processing many frames. FreeNoise processes these frames in sliding + windows of size `context_length`. Context stride allows you to specify how many frames to skip between + each window. For example, a context length of 16 and context stride of 4 would process 24 frames as: + [0, 15], [4, 19], [8, 23] (0-based indexing) + weighting_scheme (`str`, defaults to `pyramid`): + Weighting scheme for averaging latents after accumulation in FreeNoise blocks. The following weighting + schemes are supported currently: + - "flat" + Performs weighting averaging with a flat weight pattern: [1, 1, 1, 1, 1]. + - "pyramid" + Performs weighted averaging with a pyramid like weight pattern: [1, 2, 3, 2, 1]. + - "delayed_reverse_sawtooth" + Performs weighted averaging with low weights for earlier frames and high-to-low weights for + later frames: [0.01, 0.01, 3, 2, 1]. + noise_type (`str`, defaults to "shuffle_context"): + Must be one of ["shuffle_context", "repeat_context", "random"]. + - "shuffle_context" + Shuffles a fixed batch of `context_length` latents to create a final latent of size + `num_frames`. This is usually the best setting for most generation scenarious. However, there + might be visible repetition noticeable in the kinds of motion/animation generated. + - "repeated_context" + Repeats a fixed batch of `context_length` latents to create a final latent of size + `num_frames`. + - "random" + The final latents are random without any repetition. + """ + + allowed_weighting_scheme = ["flat", "pyramid", "delayed_reverse_sawtooth"] + allowed_noise_type = ["shuffle_context", "repeat_context", "random"] + + if context_length > self.motion_adapter.config.motion_max_seq_length: + logger.warning( + f"You have set {context_length=} which is greater than {self.motion_adapter.config.motion_max_seq_length=}. This can lead to bad generation results." + ) + if weighting_scheme not in allowed_weighting_scheme: + raise ValueError( + f"The parameter `weighting_scheme` must be one of {allowed_weighting_scheme}, but got {weighting_scheme=}" + ) + if noise_type not in allowed_noise_type: + raise ValueError(f"The parameter `noise_type` must be one of {allowed_noise_type}, but got {noise_type=}") + + self._free_noise_context_length = context_length or self.motion_adapter.config.motion_max_seq_length + self._free_noise_context_stride = context_stride + self._free_noise_weighting_scheme = weighting_scheme + self._free_noise_noise_type = noise_type + self._free_noise_prompt_interpolation_callback = prompt_interpolation_callback or self._lerp + + if hasattr(self.unet.mid_block, "motion_modules"): + blocks = [*self.unet.down_blocks, self.unet.mid_block, *self.unet.up_blocks] + else: + blocks = [*self.unet.down_blocks, *self.unet.up_blocks] + + for block in blocks: + self._enable_free_noise_in_block(block) + + def disable_free_noise(self) -> None: + r"""Disable the FreeNoise sampling mechanism.""" + self._free_noise_context_length = None + + if hasattr(self.unet.mid_block, "motion_modules"): + blocks = [*self.unet.down_blocks, self.unet.mid_block, *self.unet.up_blocks] + else: + blocks = [*self.unet.down_blocks, *self.unet.up_blocks] + + blocks = [*self.unet.down_blocks, self.unet.mid_block, *self.unet.up_blocks] + for block in blocks: + self._disable_free_noise_in_block(block) + + def _enable_split_inference_motion_modules_( + self, motion_modules: List[AnimateDiffTransformer3D], spatial_split_size: int + ) -> None: + for motion_module in motion_modules: + motion_module.proj_in = SplitInferenceModule(motion_module.proj_in, spatial_split_size, 0, ["input"]) + + for i in range(len(motion_module.transformer_blocks)): + motion_module.transformer_blocks[i] = SplitInferenceModule( + motion_module.transformer_blocks[i], + spatial_split_size, + 0, + ["hidden_states", "encoder_hidden_states"], + ) + + motion_module.proj_out = SplitInferenceModule(motion_module.proj_out, spatial_split_size, 0, ["input"]) + + def _enable_split_inference_attentions_( + self, attentions: List[Transformer2DModel], temporal_split_size: int + ) -> None: + for i in range(len(attentions)): + attentions[i] = SplitInferenceModule( + attentions[i], temporal_split_size, 0, ["hidden_states", "encoder_hidden_states"] + ) + + def _enable_split_inference_resnets_(self, resnets: List[ResnetBlock2D], temporal_split_size: int) -> None: + for i in range(len(resnets)): + resnets[i] = SplitInferenceModule(resnets[i], temporal_split_size, 0, ["input_tensor", "temb"]) + + def _enable_split_inference_samplers_( + self, samplers: Union[List[Downsample2D], List[Upsample2D]], temporal_split_size: int + ) -> None: + for i in range(len(samplers)): + samplers[i] = SplitInferenceModule(samplers[i], temporal_split_size, 0, ["hidden_states"]) + + def enable_free_noise_split_inference(self, spatial_split_size: int = 256, temporal_split_size: int = 16) -> None: + r""" + Enable FreeNoise memory optimizations by utilizing + [`~diffusers.pipelines.free_noise_utils.SplitInferenceModule`] across different intermediate modeling blocks. + + Args: + spatial_split_size (`int`, defaults to `256`): + The split size across spatial dimensions for internal blocks. This is used in facilitating split + inference across the effective batch dimension (`[B x H x W, F, C]`) of intermediate tensors in motion + modeling blocks. + temporal_split_size (`int`, defaults to `16`): + The split size across temporal dimensions for internal blocks. This is used in facilitating split + inference across the effective batch dimension (`[B x F, H x W, C]`) of intermediate tensors in spatial + attention, resnets, downsampling and upsampling blocks. + """ + # TODO(aryan): Discuss on what's the best way to provide more control to users + blocks = [*self.unet.down_blocks, self.unet.mid_block, *self.unet.up_blocks] + for block in blocks: + if getattr(block, "motion_modules", None) is not None: + self._enable_split_inference_motion_modules_(block.motion_modules, spatial_split_size) + if getattr(block, "attentions", None) is not None: + self._enable_split_inference_attentions_(block.attentions, temporal_split_size) + if getattr(block, "resnets", None) is not None: + self._enable_split_inference_resnets_(block.resnets, temporal_split_size) + if getattr(block, "downsamplers", None) is not None: + self._enable_split_inference_samplers_(block.downsamplers, temporal_split_size) + if getattr(block, "upsamplers", None) is not None: + self._enable_split_inference_samplers_(block.upsamplers, temporal_split_size) + + @property + def free_noise_enabled(self): + return hasattr(self, "_free_noise_context_length") and self._free_noise_context_length is not None diff --git a/diffusers3/pipelines/.ipynb_checkpoints/minus-checkpoint.py b/diffusers3/pipelines/.ipynb_checkpoints/minus-checkpoint.py new file mode 100644 index 0000000000000000000000000000000000000000..3cf57cc6ce88817569605e3df34eb90920a528ae --- /dev/null +++ b/diffusers3/pipelines/.ipynb_checkpoints/minus-checkpoint.py @@ -0,0 +1,3 @@ +class minus_test(): + def minus(a,b): + return a - b \ No newline at end of file diff --git a/diffusers3/pipelines/README.md b/diffusers3/pipelines/README.md new file mode 100644 index 0000000000000000000000000000000000000000..d5125ae5caf244425a0a372f054b069c98f27670 --- /dev/null +++ b/diffusers3/pipelines/README.md @@ -0,0 +1,171 @@ +# ๐Ÿงจ Diffusers Pipelines + +Pipelines provide a simple way to run state-of-the-art diffusion models in inference. +Most diffusion systems consist of multiple independently-trained models and highly adaptable scheduler +components - all of which are needed to have a functioning end-to-end diffusion system. + +As an example, [Stable Diffusion](https://huggingface.co/blog/stable_diffusion) has three independently trained models: +- [Autoencoder](https://github.com/huggingface/diffusers/blob/5cbed8e0d157f65d3ddc2420dfd09f2df630e978/src/diffusers/models/vae.py#L392) +- [Conditional Unet](https://github.com/huggingface/diffusers/blob/5cbed8e0d157f65d3ddc2420dfd09f2df630e978/src/diffusers/models/unet_2d_condition.py#L12) +- [CLIP text encoder](https://huggingface.co/docs/transformers/main/en/model_doc/clip#transformers.CLIPTextModel) +- a scheduler component, [scheduler](https://github.com/huggingface/diffusers/blob/main/src/diffusers/schedulers/scheduling_pndm.py), +- a [CLIPImageProcessor](https://huggingface.co/docs/transformers/main/en/model_doc/clip#transformers.CLIPImageProcessor), +- as well as a [safety checker](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/safety_checker.py). +All of these components are necessary to run stable diffusion in inference even though they were trained +or created independently from each other. + +To that end, we strive to offer all open-sourced, state-of-the-art diffusion system under a unified API. +More specifically, we strive to provide pipelines that +- 1. can load the officially published weights and yield 1-to-1 the same outputs as the original implementation according to the corresponding paper (*e.g.* [LDMTextToImagePipeline](https://github.com/huggingface/diffusers/tree/main/src/diffusers/pipelines/latent_diffusion), uses the officially released weights of [High-Resolution Image Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752)), +- 2. have a simple user interface to run the model in inference (see the [Pipelines API](#pipelines-api) section), +- 3. are easy to understand with code that is self-explanatory and can be read along-side the official paper (see [Pipelines summary](#pipelines-summary)), +- 4. can easily be contributed by the community (see the [Contribution](#contribution) section). + +**Note** that pipelines do not (and should not) offer any training functionality. +If you are looking for *official* training examples, please have a look at [examples](https://github.com/huggingface/diffusers/tree/main/examples). + + +## Pipelines Summary + +The following table summarizes all officially supported pipelines, their corresponding paper, and if +available a colab notebook to directly try them out. + +| Pipeline | Source | Tasks | Colab +|-------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------|:---:|:---:| +| [dance diffusion](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/dance_diffusion) | [**Dance Diffusion**](https://github.com/Harmonai-org/sample-generator) | *Unconditional Audio Generation* | +| [ddpm](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/ddpm) | [**Denoising Diffusion Probabilistic Models**](https://arxiv.org/abs/2006.11239) | *Unconditional Image Generation* | +| [ddim](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/ddim) | [**Denoising Diffusion Implicit Models**](https://arxiv.org/abs/2010.02502) | *Unconditional Image Generation* | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/training_example.ipynb) +| [latent_diffusion](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/latent_diffusion) | [**High-Resolution Image Synthesis with Latent Diffusion Models**](https://arxiv.org/abs/2112.10752) | *Text-to-Image Generation* | +| [latent_diffusion_uncond](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/latent_diffusion_uncond) | [**High-Resolution Image Synthesis with Latent Diffusion Models**](https://arxiv.org/abs/2112.10752) | *Unconditional Image Generation* | +| [pndm](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/pndm) | [**Pseudo Numerical Methods for Diffusion Models on Manifolds**](https://arxiv.org/abs/2202.09778) | *Unconditional Image Generation* | +| [score_sde_ve](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/score_sde_ve) | [**Score-Based Generative Modeling through Stochastic Differential Equations**](https://openreview.net/forum?id=PxTIG12RRHS) | *Unconditional Image Generation* | +| [score_sde_vp](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/score_sde_vp) | [**Score-Based Generative Modeling through Stochastic Differential Equations**](https://openreview.net/forum?id=PxTIG12RRHS) | *Unconditional Image Generation* | +| [stable_diffusion](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion) | [**Stable Diffusion**](https://stability.ai/blog/stable-diffusion-public-release) | *Text-to-Image Generation* | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/stable_diffusion.ipynb) +| [stable_diffusion](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion) | [**Stable Diffusion**](https://stability.ai/blog/stable-diffusion-public-release) | *Image-to-Image Text-Guided Generation* | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/image_2_image_using_diffusers.ipynb) +| [stable_diffusion](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion) | [**Stable Diffusion**](https://stability.ai/blog/stable-diffusion-public-release) | *Text-Guided Image Inpainting* | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/in_painting_with_stable_diffusion_using_diffusers.ipynb) +| [stochastic_karras_ve](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stochastic_karras_ve) | [**Elucidating the Design Space of Diffusion-Based Generative Models**](https://arxiv.org/abs/2206.00364) | *Unconditional Image Generation* | + +**Note**: Pipelines are simple examples of how to play around with the diffusion systems as described in the corresponding papers. +However, most of them can be adapted to use different scheduler components or even different model components. Some pipeline examples are shown in the [Examples](#examples) below. + +## Pipelines API + +Diffusion models often consist of multiple independently-trained models or other previously existing components. + + +Each model has been trained independently on a different task and the scheduler can easily be swapped out and replaced with a different one. +During inference, we however want to be able to easily load all components and use them in inference - even if one component, *e.g.* CLIP's text encoder, originates from a different library, such as [Transformers](https://github.com/huggingface/transformers). To that end, all pipelines provide the following functionality: + +- [`from_pretrained` method](https://github.com/huggingface/diffusers/blob/5cbed8e0d157f65d3ddc2420dfd09f2df630e978/src/diffusers/pipeline_utils.py#L139) that accepts a Hugging Face Hub repository id, *e.g.* [runwayml/stable-diffusion-v1-5](https://huggingface.co/runwayml/stable-diffusion-v1-5) or a path to a local directory, *e.g.* +"./stable-diffusion". To correctly retrieve which models and components should be loaded, one has to provide a `model_index.json` file, *e.g.* [runwayml/stable-diffusion-v1-5/model_index.json](https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/model_index.json), which defines all components that should be +loaded into the pipelines. More specifically, for each model/component one needs to define the format `: ["", ""]`. `` is the attribute name given to the loaded instance of `` which can be found in the library or pipeline folder called `""`. +- [`save_pretrained`](https://github.com/huggingface/diffusers/blob/5cbed8e0d157f65d3ddc2420dfd09f2df630e978/src/diffusers/pipeline_utils.py#L90) that accepts a local path, *e.g.* `./stable-diffusion` under which all models/components of the pipeline will be saved. For each component/model a folder is created inside the local path that is named after the given attribute name, *e.g.* `./stable_diffusion/unet`. +In addition, a `model_index.json` file is created at the root of the local path, *e.g.* `./stable_diffusion/model_index.json` so that the complete pipeline can again be instantiated +from the local path. +- [`to`](https://github.com/huggingface/diffusers/blob/5cbed8e0d157f65d3ddc2420dfd09f2df630e978/src/diffusers/pipeline_utils.py#L118) which accepts a `string` or `torch.device` to move all models that are of type `torch.nn.Module` to the passed device. The behavior is fully analogous to [PyTorch's `to` method](https://pytorch.org/docs/stable/generated/torch.nn.Module.html#torch.nn.Module.to). +- [`__call__`] method to use the pipeline in inference. `__call__` defines inference logic of the pipeline and should ideally encompass all aspects of it, from pre-processing to forwarding tensors to the different models and schedulers, as well as post-processing. The API of the `__call__` method can strongly vary from pipeline to pipeline. *E.g.* a text-to-image pipeline, such as [`StableDiffusionPipeline`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py) should accept among other things the text prompt to generate the image. A pure image generation pipeline, such as [DDPMPipeline](https://github.com/huggingface/diffusers/tree/main/src/diffusers/pipelines/ddpm) on the other hand can be run without providing any inputs. To better understand what inputs can be adapted for +each pipeline, one should look directly into the respective pipeline. + +**Note**: All pipelines have PyTorch's autograd disabled by decorating the `__call__` method with a [`torch.no_grad`](https://pytorch.org/docs/stable/generated/torch.no_grad.html) decorator because pipelines should +not be used for training. If you want to store the gradients during the forward pass, we recommend writing your own pipeline, see also our [community-examples](https://github.com/huggingface/diffusers/tree/main/examples/community) + +## Contribution + +We are more than happy about any contribution to the officially supported pipelines ๐Ÿค—. We aspire +all of our pipelines to be **self-contained**, **easy-to-tweak**, **beginner-friendly** and for **one-purpose-only**. + +- **Self-contained**: A pipeline shall be as self-contained as possible. More specifically, this means that all functionality should be either directly defined in the pipeline file itself, should be inherited from (and only from) the [`DiffusionPipeline` class](https://github.com/huggingface/diffusers/blob/5cbed8e0d157f65d3ddc2420dfd09f2df630e978/src/diffusers/pipeline_utils.py#L56) or be directly attached to the model and scheduler components of the pipeline. +- **Easy-to-use**: Pipelines should be extremely easy to use - one should be able to load the pipeline and +use it for its designated task, *e.g.* text-to-image generation, in just a couple of lines of code. Most +logic including pre-processing, an unrolled diffusion loop, and post-processing should all happen inside the `__call__` method. +- **Easy-to-tweak**: Certain pipelines will not be able to handle all use cases and tasks that you might like them to. If you want to use a certain pipeline for a specific use case that is not yet supported, you might have to copy the pipeline file and tweak the code to your needs. We try to make the pipeline code as readable as possible so that each part โ€“from pre-processing to diffusing to post-processingโ€“ can easily be adapted. If you would like the community to benefit from your customized pipeline, we would love to see a contribution to our [community-examples](https://github.com/huggingface/diffusers/tree/main/examples/community). If you feel that an important pipeline should be part of the official pipelines but isn't, a contribution to the [official pipelines](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines) would be even better. +- **One-purpose-only**: Pipelines should be used for one task and one task only. Even if two tasks are very similar from a modeling point of view, *e.g.* image2image translation and in-painting, pipelines shall be used for one task only to keep them *easy-to-tweak* and *readable*. + +## Examples + +### Text-to-Image generation with Stable Diffusion + +```python +# make sure you're logged in with `huggingface-cli login` +from diffusers import StableDiffusionPipeline, LMSDiscreteScheduler + +pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") +pipe = pipe.to("cuda") + +prompt = "a photo of an astronaut riding a horse on mars" +image = pipe(prompt).images[0] + +image.save("astronaut_rides_horse.png") +``` + +### Image-to-Image text-guided generation with Stable Diffusion + +The `StableDiffusionImg2ImgPipeline` lets you pass a text prompt and an initial image to condition the generation of new images. + +```python +import requests +from PIL import Image +from io import BytesIO + +from diffusers import StableDiffusionImg2ImgPipeline + +# load the pipeline +device = "cuda" +pipe = StableDiffusionImg2ImgPipeline.from_pretrained( + "runwayml/stable-diffusion-v1-5", + torch_dtype=torch.float16, +).to(device) + +# let's download an initial image +url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg" + +response = requests.get(url) +init_image = Image.open(BytesIO(response.content)).convert("RGB") +init_image = init_image.resize((768, 512)) + +prompt = "A fantasy landscape, trending on artstation" + +images = pipe(prompt=prompt, image=init_image, strength=0.75, guidance_scale=7.5).images + +images[0].save("fantasy_landscape.png") +``` +You can also run this example on colab [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/image_2_image_using_diffusers.ipynb) + +### Tweak prompts reusing seeds and latents + +You can generate your own latents to reproduce results, or tweak your prompt on a specific result you liked. [This notebook](https://github.com/pcuenca/diffusers-examples/blob/main/notebooks/stable-diffusion-seeds.ipynb) shows how to do it step by step. You can also run it in Google Colab [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/pcuenca/diffusers-examples/blob/main/notebooks/stable-diffusion-seeds.ipynb). + + +### In-painting using Stable Diffusion + +The `StableDiffusionInpaintPipeline` lets you edit specific parts of an image by providing a mask and text prompt. + +```python +import PIL +import requests +import torch +from io import BytesIO + +from diffusers import StableDiffusionInpaintPipeline + +def download_image(url): + response = requests.get(url) + return PIL.Image.open(BytesIO(response.content)).convert("RGB") + +img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png" +mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png" + +init_image = download_image(img_url).resize((512, 512)) +mask_image = download_image(mask_url).resize((512, 512)) + +pipe = StableDiffusionInpaintPipeline.from_pretrained( + "runwayml/stable-diffusion-inpainting", + torch_dtype=torch.float16, +) +pipe = pipe.to("cuda") + +prompt = "Face of a yellow cat, high resolution, sitting on a park bench" +image = pipe(prompt=prompt, image=init_image, mask_image=mask_image).images[0] +``` + +You can also run this example on colab [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/in_painting_with_stable_diffusion_using_diffusers.ipynb) diff --git a/diffusers3/pipelines/Untitled.ipynb b/diffusers3/pipelines/Untitled.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..69c0cba8bd645a52f95c356f1969b64f41fe68d1 --- /dev/null +++ b/diffusers3/pipelines/Untitled.ipynb @@ -0,0 +1,89 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "id": "cefd16a3-ea3a-40b7-90a3-bcf63e957596", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "from diffusers.pipelines.controlnet.pipeline_controlnet_sd_xl import StableDiffusionXLControlNetPipeline\n" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "567a9a48-3db8-423b-aa3d-750e80825eb4", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "!export PYTHONPATH=/pipelines" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "f0d33586-8572-4da3-b90e-bfbd1173dcd7", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "/root/data/diffusers/src/diffusers/pipelines\n" + ] + } + ], + "source": [ + "!pwd" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "4f5cf2a4-d3c0-43c9-90dd-1a04fcee06b9", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "!set PYTHONPATH=C:\\root\\data\\diffusers\\src\\diffusers\\pipelines" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "04d9ba91-798f-4994-b4ef-26e3c359b5f6", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.12" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/diffusers3/pipelines/__init__.py b/diffusers3/pipelines/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e4d37a905b86724cd1a0a33eb5e6e63d11fa50ed --- /dev/null +++ b/diffusers3/pipelines/__init__.py @@ -0,0 +1,721 @@ +from typing import TYPE_CHECKING + +from ..utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_flax_available, + is_k_diffusion_available, + is_librosa_available, + is_note_seq_available, + is_onnx_available, + is_sentencepiece_available, + is_torch_available, + is_torch_npu_available, + is_transformers_available, +) + + +# These modules contain pipelines from multiple libraries/frameworks +_dummy_objects = {} +_import_structure = { + "controlnet": [], + "controlnet_hunyuandit": [], + "controlnet_sd3": [], + "controlnet_xs": [], + "deprecated": [], + "latent_diffusion": [], + "ledits_pp": [], + "marigold": [], + "pag": [], + "stable_diffusion": [], + "stable_diffusion_xl": [], +} + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ..utils import dummy_pt_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_pt_objects)) +else: + _import_structure["auto_pipeline"] = [ + "AutoPipelineForImage2Image", + "AutoPipelineForInpainting", + "AutoPipelineForText2Image", + ] + _import_structure["consistency_models"] = ["ConsistencyModelPipeline"] + _import_structure["dance_diffusion"] = ["DanceDiffusionPipeline"] + _import_structure["ddim"] = ["DDIMPipeline"] + _import_structure["ddpm"] = ["DDPMPipeline"] + _import_structure["dit"] = ["DiTPipeline"] + _import_structure["latent_diffusion"].extend(["LDMSuperResolutionPipeline"]) + _import_structure["pipeline_utils"] = [ + "AudioPipelineOutput", + "DiffusionPipeline", + "StableDiffusionMixin", + "ImagePipelineOutput", + ] + _import_structure["deprecated"].extend( + [ + "PNDMPipeline", + "LDMPipeline", + "RePaintPipeline", + "ScoreSdeVePipeline", + "KarrasVePipeline", + ] + ) +try: + if not (is_torch_available() and is_librosa_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ..utils import dummy_torch_and_librosa_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_librosa_objects)) +else: + _import_structure["deprecated"].extend(["AudioDiffusionPipeline", "Mel"]) + +try: + if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ..utils import dummy_transformers_and_torch_and_note_seq_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_transformers_and_torch_and_note_seq_objects)) +else: + _import_structure["deprecated"].extend( + [ + "MidiProcessor", + "SpectrogramDiffusionPipeline", + ] + ) + +try: + if not (is_torch_available() and is_transformers_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ..utils import dummy_torch_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["deprecated"].extend( + [ + "VQDiffusionPipeline", + "AltDiffusionPipeline", + "AltDiffusionImg2ImgPipeline", + "CycleDiffusionPipeline", + "StableDiffusionInpaintPipelineLegacy", + "StableDiffusionPix2PixZeroPipeline", + "StableDiffusionParadigmsPipeline", + "StableDiffusionModelEditingPipeline", + "VersatileDiffusionDualGuidedPipeline", + "VersatileDiffusionImageVariationPipeline", + "VersatileDiffusionPipeline", + "VersatileDiffusionTextToImagePipeline", + ] + ) + _import_structure["amused"] = ["AmusedImg2ImgPipeline", "AmusedInpaintPipeline", "AmusedPipeline"] + _import_structure["animatediff"] = [ + "AnimateDiffPipeline", + "AnimateDiffControlNetPipeline", + "AnimateDiffSDXLPipeline", + "AnimateDiffSparseControlNetPipeline", + "AnimateDiffVideoToVideoPipeline", + "AnimateDiffVideoToVideoControlNetPipeline", + ] + _import_structure["flux"] = [ + "FluxControlNetPipeline", + "FluxImg2ImgPipeline", + "FluxInpaintPipeline", + "FluxPipeline", + ] + _import_structure["audioldm"] = ["AudioLDMPipeline"] + _import_structure["audioldm2"] = [ + "AudioLDM2Pipeline", + "AudioLDM2ProjectionModel", + "AudioLDM2UNet2DConditionModel", + ] + _import_structure["blip_diffusion"] = ["BlipDiffusionPipeline"] + _import_structure["cogvideo"] = ["CogVideoXPipeline", "CogVideoXVideoToVideoPipeline"] + _import_structure["controlnet"].extend( + [ + "BlipDiffusionControlNetPipeline", + "StableDiffusionControlNetImg2ImgPipeline", + "StableDiffusionControlNetInpaintPipeline", + "StableDiffusionControlNetPipeline", + "StableDiffusionXLControlNetImg2ImgPipeline", + "StableDiffusionXLControlNetInpaintPipeline", + "StableDiffusionXLControlNetPipeline", + ] + ) + _import_structure["pag"].extend( + [ + "AnimateDiffPAGPipeline", + "KolorsPAGPipeline", + "HunyuanDiTPAGPipeline", + "StableDiffusion3PAGPipeline", + "StableDiffusionPAGPipeline", + "StableDiffusionControlNetPAGPipeline", + "StableDiffusionXLPAGPipeline", + "StableDiffusionXLPAGInpaintPipeline", + "StableDiffusionXLControlNetPAGImg2ImgPipeline", + "StableDiffusionXLControlNetPAGPipeline", + "StableDiffusionXLPAGImg2ImgPipeline", + "PixArtSigmaPAGPipeline", + ] + ) + _import_structure["controlnet_xs"].extend( + [ + "StableDiffusionControlNetXSPipeline", + "StableDiffusionXLControlNetXSPipeline", + ] + ) + _import_structure["controlnet_hunyuandit"].extend( + [ + "HunyuanDiTControlNetPipeline", + ] + ) + _import_structure["controlnet_sd3"].extend( + [ + "StableDiffusion3ControlNetPipeline", + "StableDiffusion3ControlNetInpaintingPipeline", + ] + ) + _import_structure["deepfloyd_if"] = [ + "IFImg2ImgPipeline", + "IFImg2ImgSuperResolutionPipeline", + "IFInpaintingPipeline", + "IFInpaintingSuperResolutionPipeline", + "IFPipeline", + "IFSuperResolutionPipeline", + ] + _import_structure["hunyuandit"] = ["HunyuanDiTPipeline"] + _import_structure["kandinsky"] = [ + "KandinskyCombinedPipeline", + "KandinskyImg2ImgCombinedPipeline", + "KandinskyImg2ImgPipeline", + "KandinskyInpaintCombinedPipeline", + "KandinskyInpaintPipeline", + "KandinskyPipeline", + "KandinskyPriorPipeline", + ] + _import_structure["kandinsky2_2"] = [ + "KandinskyV22CombinedPipeline", + "KandinskyV22ControlnetImg2ImgPipeline", + "KandinskyV22ControlnetPipeline", + "KandinskyV22Img2ImgCombinedPipeline", + "KandinskyV22Img2ImgPipeline", + "KandinskyV22InpaintCombinedPipeline", + "KandinskyV22InpaintPipeline", + "KandinskyV22Pipeline", + "KandinskyV22PriorEmb2EmbPipeline", + "KandinskyV22PriorPipeline", + ] + _import_structure["kandinsky3"] = [ + "Kandinsky3Img2ImgPipeline", + "Kandinsky3Pipeline", + ] + _import_structure["latent_consistency_models"] = [ + "LatentConsistencyModelImg2ImgPipeline", + "LatentConsistencyModelPipeline", + ] + _import_structure["latent_diffusion"].extend(["LDMTextToImagePipeline"]) + _import_structure["ledits_pp"].extend( + [ + "LEditsPPPipelineStableDiffusion", + "LEditsPPPipelineStableDiffusionXL", + ] + ) + _import_structure["latte"] = ["LattePipeline"] + _import_structure["lumina"] = ["LuminaText2ImgPipeline"] + _import_structure["marigold"].extend( + [ + "MarigoldDepthPipeline", + "MarigoldNormalsPipeline", + ] + ) + _import_structure["musicldm"] = ["MusicLDMPipeline"] + _import_structure["paint_by_example"] = ["PaintByExamplePipeline"] + _import_structure["pia"] = ["PIAPipeline"] + _import_structure["pixart_alpha"] = ["PixArtAlphaPipeline", "PixArtSigmaPipeline"] + _import_structure["semantic_stable_diffusion"] = ["SemanticStableDiffusionPipeline"] + _import_structure["shap_e"] = ["ShapEImg2ImgPipeline", "ShapEPipeline"] + _import_structure["stable_audio"] = [ + "StableAudioProjectionModel", + "StableAudioPipeline", + ] + _import_structure["stable_cascade"] = [ + "StableCascadeCombinedPipeline", + "StableCascadeDecoderPipeline", + "StableCascadePriorPipeline", + ] + _import_structure["stable_diffusion"].extend( + [ + "CLIPImageProjection", + "StableDiffusionDepth2ImgPipeline", + "StableDiffusionImageVariationPipeline", + "StableDiffusionImg2ImgPipeline", + "StableDiffusionInpaintPipeline", + "StableDiffusionInstructPix2PixPipeline", + "StableDiffusionLatentUpscalePipeline", + "StableDiffusionPipeline", + "StableDiffusionUpscalePipeline", + "StableUnCLIPImg2ImgPipeline", + "StableUnCLIPPipeline", + "StableDiffusionLDM3DPipeline", + ] + ) + _import_structure["aura_flow"] = ["AuraFlowPipeline"] + _import_structure["stable_diffusion_3"] = [ + "StableDiffusion3Pipeline", + "StableDiffusion3Img2ImgPipeline", + "StableDiffusion3InpaintPipeline", + ] + _import_structure["stable_diffusion_attend_and_excite"] = ["StableDiffusionAttendAndExcitePipeline"] + _import_structure["stable_diffusion_safe"] = ["StableDiffusionPipelineSafe"] + _import_structure["stable_diffusion_sag"] = ["StableDiffusionSAGPipeline"] + _import_structure["stable_diffusion_gligen"] = [ + "StableDiffusionGLIGENPipeline", + "StableDiffusionGLIGENTextImagePipeline", + ] + _import_structure["stable_video_diffusion"] = ["StableVideoDiffusionPipeline"] + _import_structure["stable_diffusion_xl"].extend( + [ + "StableDiffusionXLImg2ImgPipeline", + "StableDiffusionXLInpaintPipeline", + "StableDiffusionXLInstructPix2PixPipeline", + "StableDiffusionXLPipeline", + ] + ) + _import_structure["stable_diffusion_diffedit"] = ["StableDiffusionDiffEditPipeline"] + _import_structure["stable_diffusion_ldm3d"] = ["StableDiffusionLDM3DPipeline"] + _import_structure["stable_diffusion_panorama"] = ["StableDiffusionPanoramaPipeline"] + _import_structure["t2i_adapter"] = [ + "StableDiffusionAdapterPipeline", + "StableDiffusionXLAdapterPipeline", + ] + _import_structure["text_to_video_synthesis"] = [ + "TextToVideoSDPipeline", + "TextToVideoZeroPipeline", + "TextToVideoZeroSDXLPipeline", + "VideoToVideoSDPipeline", + ] + _import_structure["i2vgen_xl"] = ["I2VGenXLPipeline"] + _import_structure["unclip"] = ["UnCLIPImageVariationPipeline", "UnCLIPPipeline"] + _import_structure["unidiffuser"] = [ + "ImageTextPipelineOutput", + "UniDiffuserModel", + "UniDiffuserPipeline", + "UniDiffuserTextDecoder", + ] + _import_structure["wuerstchen"] = [ + "WuerstchenCombinedPipeline", + "WuerstchenDecoderPipeline", + "WuerstchenPriorPipeline", + ] +try: + if not is_onnx_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ..utils import dummy_onnx_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_onnx_objects)) +else: + _import_structure["onnx_utils"] = ["OnnxRuntimeModel"] +try: + if not (is_torch_available() and is_transformers_available() and is_onnx_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ..utils import dummy_torch_and_transformers_and_onnx_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_and_onnx_objects)) +else: + _import_structure["stable_diffusion"].extend( + [ + "OnnxStableDiffusionImg2ImgPipeline", + "OnnxStableDiffusionInpaintPipeline", + "OnnxStableDiffusionPipeline", + "OnnxStableDiffusionUpscalePipeline", + "StableDiffusionOnnxPipeline", + ] + ) + +try: + if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ..utils import ( + dummy_torch_and_transformers_and_k_diffusion_objects, + ) + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_and_k_diffusion_objects)) +else: + _import_structure["stable_diffusion_k_diffusion"] = [ + "StableDiffusionKDiffusionPipeline", + "StableDiffusionXLKDiffusionPipeline", + ] + +try: + if not (is_torch_available() and is_transformers_available() and is_sentencepiece_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ..utils import ( + dummy_torch_and_transformers_and_sentencepiece_objects, + ) + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_and_sentencepiece_objects)) +else: + _import_structure["kolors"] = [ + "KolorsPipeline", + "KolorsImg2ImgPipeline", + ] + +try: + if not is_flax_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ..utils import dummy_flax_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_flax_objects)) +else: + _import_structure["pipeline_flax_utils"] = ["FlaxDiffusionPipeline"] +try: + if not (is_flax_available() and is_transformers_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ..utils import dummy_flax_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_flax_and_transformers_objects)) +else: + _import_structure["controlnet"].extend(["FlaxStableDiffusionControlNetPipeline"]) + _import_structure["stable_diffusion"].extend( + [ + "FlaxStableDiffusionImg2ImgPipeline", + "FlaxStableDiffusionInpaintPipeline", + "FlaxStableDiffusionPipeline", + ] + ) + _import_structure["stable_diffusion_xl"].extend( + [ + "FlaxStableDiffusionXLPipeline", + ] + ) + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ..utils.dummy_pt_objects import * # noqa F403 + + else: + from .auto_pipeline import ( + AutoPipelineForImage2Image, + AutoPipelineForInpainting, + AutoPipelineForText2Image, + ) + from .consistency_models import ConsistencyModelPipeline + from .dance_diffusion import DanceDiffusionPipeline + from .ddim import DDIMPipeline + from .ddpm import DDPMPipeline + from .deprecated import KarrasVePipeline, LDMPipeline, PNDMPipeline, RePaintPipeline, ScoreSdeVePipeline + from .dit import DiTPipeline + from .latent_diffusion import LDMSuperResolutionPipeline + from .pipeline_utils import ( + AudioPipelineOutput, + DiffusionPipeline, + ImagePipelineOutput, + StableDiffusionMixin, + ) + + try: + if not (is_torch_available() and is_librosa_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ..utils.dummy_torch_and_librosa_objects import * + else: + from .deprecated import AudioDiffusionPipeline, Mel + + try: + if not (is_torch_available() and is_transformers_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ..utils.dummy_torch_and_transformers_objects import * + else: + from .amused import AmusedImg2ImgPipeline, AmusedInpaintPipeline, AmusedPipeline + from .animatediff import ( + AnimateDiffControlNetPipeline, + AnimateDiffPipeline, + AnimateDiffSDXLPipeline, + AnimateDiffSparseControlNetPipeline, + AnimateDiffVideoToVideoControlNetPipeline, + AnimateDiffVideoToVideoPipeline, + ) + from .audioldm import AudioLDMPipeline + from .audioldm2 import ( + AudioLDM2Pipeline, + AudioLDM2ProjectionModel, + AudioLDM2UNet2DConditionModel, + ) + from .aura_flow import AuraFlowPipeline + from .blip_diffusion import BlipDiffusionPipeline + from .cogvideo import CogVideoXPipeline, CogVideoXVideoToVideoPipeline + from .controlnet import ( + BlipDiffusionControlNetPipeline, + StableDiffusionControlNetImg2ImgPipeline, + StableDiffusionControlNetInpaintPipeline, + StableDiffusionControlNetPipeline, + StableDiffusionXLControlNetImg2ImgPipeline, + StableDiffusionXLControlNetInpaintPipeline, + StableDiffusionXLControlNetPipeline, + ) + from .controlnet_hunyuandit import ( + HunyuanDiTControlNetPipeline, + ) + from .controlnet_sd3 import StableDiffusion3ControlNetInpaintingPipeline, StableDiffusion3ControlNetPipeline + from .controlnet_xs import ( + StableDiffusionControlNetXSPipeline, + StableDiffusionXLControlNetXSPipeline, + ) + from .deepfloyd_if import ( + IFImg2ImgPipeline, + IFImg2ImgSuperResolutionPipeline, + IFInpaintingPipeline, + IFInpaintingSuperResolutionPipeline, + IFPipeline, + IFSuperResolutionPipeline, + ) + from .deprecated import ( + AltDiffusionImg2ImgPipeline, + AltDiffusionPipeline, + CycleDiffusionPipeline, + StableDiffusionInpaintPipelineLegacy, + StableDiffusionModelEditingPipeline, + StableDiffusionParadigmsPipeline, + StableDiffusionPix2PixZeroPipeline, + VersatileDiffusionDualGuidedPipeline, + VersatileDiffusionImageVariationPipeline, + VersatileDiffusionPipeline, + VersatileDiffusionTextToImagePipeline, + VQDiffusionPipeline, + ) + from .flux import FluxControlNetPipeline, FluxImg2ImgPipeline, FluxInpaintPipeline, FluxPipeline + from .hunyuandit import HunyuanDiTPipeline + from .i2vgen_xl import I2VGenXLPipeline + from .kandinsky import ( + KandinskyCombinedPipeline, + KandinskyImg2ImgCombinedPipeline, + KandinskyImg2ImgPipeline, + KandinskyInpaintCombinedPipeline, + KandinskyInpaintPipeline, + KandinskyPipeline, + KandinskyPriorPipeline, + ) + from .kandinsky2_2 import ( + KandinskyV22CombinedPipeline, + KandinskyV22ControlnetImg2ImgPipeline, + KandinskyV22ControlnetPipeline, + KandinskyV22Img2ImgCombinedPipeline, + KandinskyV22Img2ImgPipeline, + KandinskyV22InpaintCombinedPipeline, + KandinskyV22InpaintPipeline, + KandinskyV22Pipeline, + KandinskyV22PriorEmb2EmbPipeline, + KandinskyV22PriorPipeline, + ) + from .kandinsky3 import ( + Kandinsky3Img2ImgPipeline, + Kandinsky3Pipeline, + ) + from .latent_consistency_models import ( + LatentConsistencyModelImg2ImgPipeline, + LatentConsistencyModelPipeline, + ) + from .latent_diffusion import LDMTextToImagePipeline + from .latte import LattePipeline + from .ledits_pp import ( + LEditsPPDiffusionPipelineOutput, + LEditsPPInversionPipelineOutput, + LEditsPPPipelineStableDiffusion, + LEditsPPPipelineStableDiffusionXL, + ) + from .lumina import LuminaText2ImgPipeline + from .marigold import ( + MarigoldDepthPipeline, + MarigoldNormalsPipeline, + ) + from .musicldm import MusicLDMPipeline + from .pag import ( + AnimateDiffPAGPipeline, + HunyuanDiTPAGPipeline, + KolorsPAGPipeline, + PixArtSigmaPAGPipeline, + StableDiffusion3PAGPipeline, + StableDiffusionControlNetPAGPipeline, + StableDiffusionPAGPipeline, + StableDiffusionXLControlNetPAGImg2ImgPipeline, + StableDiffusionXLControlNetPAGPipeline, + StableDiffusionXLPAGImg2ImgPipeline, + StableDiffusionXLPAGInpaintPipeline, + StableDiffusionXLPAGPipeline, + ) + from .paint_by_example import PaintByExamplePipeline + from .pia import PIAPipeline + from .pixart_alpha import PixArtAlphaPipeline, PixArtSigmaPipeline + from .semantic_stable_diffusion import SemanticStableDiffusionPipeline + from .shap_e import ShapEImg2ImgPipeline, ShapEPipeline + from .stable_audio import StableAudioPipeline, StableAudioProjectionModel + from .stable_cascade import ( + StableCascadeCombinedPipeline, + StableCascadeDecoderPipeline, + StableCascadePriorPipeline, + ) + from .stable_diffusion import ( + CLIPImageProjection, + StableDiffusionDepth2ImgPipeline, + StableDiffusionImageVariationPipeline, + StableDiffusionImg2ImgPipeline, + StableDiffusionInpaintPipeline, + StableDiffusionInstructPix2PixPipeline, + StableDiffusionLatentUpscalePipeline, + StableDiffusionPipeline, + StableDiffusionUpscalePipeline, + StableUnCLIPImg2ImgPipeline, + StableUnCLIPPipeline, + ) + from .stable_diffusion_3 import ( + StableDiffusion3Img2ImgPipeline, + StableDiffusion3InpaintPipeline, + StableDiffusion3Pipeline, + ) + from .stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline + from .stable_diffusion_diffedit import StableDiffusionDiffEditPipeline + from .stable_diffusion_gligen import StableDiffusionGLIGENPipeline, StableDiffusionGLIGENTextImagePipeline + from .stable_diffusion_ldm3d import StableDiffusionLDM3DPipeline + from .stable_diffusion_panorama import StableDiffusionPanoramaPipeline + from .stable_diffusion_safe import StableDiffusionPipelineSafe + from .stable_diffusion_sag import StableDiffusionSAGPipeline + from .stable_diffusion_xl import ( + StableDiffusionXLImg2ImgPipeline, + StableDiffusionXLInpaintPipeline, + StableDiffusionXLInstructPix2PixPipeline, + StableDiffusionXLPipeline, + ) + from .stable_video_diffusion import StableVideoDiffusionPipeline + from .t2i_adapter import ( + StableDiffusionAdapterPipeline, + StableDiffusionXLAdapterPipeline, + ) + from .text_to_video_synthesis import ( + TextToVideoSDPipeline, + TextToVideoZeroPipeline, + TextToVideoZeroSDXLPipeline, + VideoToVideoSDPipeline, + ) + from .unclip import UnCLIPImageVariationPipeline, UnCLIPPipeline + from .unidiffuser import ( + ImageTextPipelineOutput, + UniDiffuserModel, + UniDiffuserPipeline, + UniDiffuserTextDecoder, + ) + from .wuerstchen import ( + WuerstchenCombinedPipeline, + WuerstchenDecoderPipeline, + WuerstchenPriorPipeline, + ) + + try: + if not is_onnx_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ..utils.dummy_onnx_objects import * # noqa F403 + + else: + from .onnx_utils import OnnxRuntimeModel + + try: + if not (is_torch_available() and is_transformers_available() and is_onnx_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ..utils.dummy_torch_and_transformers_and_onnx_objects import * + else: + from .stable_diffusion import ( + OnnxStableDiffusionImg2ImgPipeline, + OnnxStableDiffusionInpaintPipeline, + OnnxStableDiffusionPipeline, + OnnxStableDiffusionUpscalePipeline, + StableDiffusionOnnxPipeline, + ) + + try: + if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ..utils.dummy_torch_and_transformers_and_k_diffusion_objects import * + else: + from .stable_diffusion_k_diffusion import ( + StableDiffusionKDiffusionPipeline, + StableDiffusionXLKDiffusionPipeline, + ) + + try: + if not (is_torch_available() and is_transformers_available() and is_sentencepiece_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ..utils.dummy_torch_and_transformers_and_sentencepiece_objects import * + else: + from .kolors import ( + KolorsImg2ImgPipeline, + KolorsPipeline, + ) + + try: + if not is_flax_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ..utils.dummy_flax_objects import * # noqa F403 + else: + from .pipeline_flax_utils import FlaxDiffusionPipeline + + try: + if not (is_flax_available() and is_transformers_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ..utils.dummy_flax_and_transformers_objects import * + else: + from .controlnet import FlaxStableDiffusionControlNetPipeline + from .stable_diffusion import ( + FlaxStableDiffusionImg2ImgPipeline, + FlaxStableDiffusionInpaintPipeline, + FlaxStableDiffusionPipeline, + ) + from .stable_diffusion_xl import ( + FlaxStableDiffusionXLPipeline, + ) + + try: + if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ..utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 + + else: + from .deprecated import ( + MidiProcessor, + SpectrogramDiffusionPipeline, + ) + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/diffusers3/pipelines/__pycache__/__init__.cpython-310.pyc b/diffusers3/pipelines/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1e9aa4b68ad4793610596ae049ad75cb9c28efe6 Binary files /dev/null and b/diffusers3/pipelines/__pycache__/__init__.cpython-310.pyc differ diff --git a/diffusers3/pipelines/__pycache__/__init__.cpython-38.pyc b/diffusers3/pipelines/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c82dcf0eaacf898b1e847e874c0cf27fb6f9099d Binary files /dev/null and b/diffusers3/pipelines/__pycache__/__init__.cpython-38.pyc differ diff --git a/diffusers3/pipelines/__pycache__/onnx_utils.cpython-38.pyc b/diffusers3/pipelines/__pycache__/onnx_utils.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d7bdcad1b8edbaaa4c1d810aaecb48b054838366 Binary files /dev/null and b/diffusers3/pipelines/__pycache__/onnx_utils.cpython-38.pyc differ diff --git a/diffusers3/pipelines/__pycache__/pipeline_loading_utils.cpython-310.pyc b/diffusers3/pipelines/__pycache__/pipeline_loading_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e57dbcbe7a0960ac964850971d345b9befa7cc57 Binary files /dev/null and b/diffusers3/pipelines/__pycache__/pipeline_loading_utils.cpython-310.pyc differ diff --git a/diffusers3/pipelines/__pycache__/pipeline_loading_utils.cpython-38.pyc b/diffusers3/pipelines/__pycache__/pipeline_loading_utils.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b25643600a2c5e3846f25a28818becfd2ab5d5fb Binary files /dev/null and b/diffusers3/pipelines/__pycache__/pipeline_loading_utils.cpython-38.pyc differ diff --git a/diffusers3/pipelines/__pycache__/pipeline_utils.cpython-310.pyc b/diffusers3/pipelines/__pycache__/pipeline_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5289274d6cec42df3b2b522b832ef979af6b1a10 Binary files /dev/null and b/diffusers3/pipelines/__pycache__/pipeline_utils.cpython-310.pyc differ diff --git a/diffusers3/pipelines/__pycache__/pipeline_utils.cpython-38.pyc b/diffusers3/pipelines/__pycache__/pipeline_utils.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..da9a76d261afa881ff156c4839da918010d529e8 Binary files /dev/null and b/diffusers3/pipelines/__pycache__/pipeline_utils.cpython-38.pyc differ diff --git a/diffusers3/pipelines/amused/__init__.py b/diffusers3/pipelines/amused/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3c4d07a426b54fabfcdf35bfb8e4486cd828b3b3 --- /dev/null +++ b/diffusers3/pipelines/amused/__init__.py @@ -0,0 +1,62 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import ( + AmusedImg2ImgPipeline, + AmusedInpaintPipeline, + AmusedPipeline, + ) + + _dummy_objects.update( + { + "AmusedPipeline": AmusedPipeline, + "AmusedImg2ImgPipeline": AmusedImg2ImgPipeline, + "AmusedInpaintPipeline": AmusedInpaintPipeline, + } + ) +else: + _import_structure["pipeline_amused"] = ["AmusedPipeline"] + _import_structure["pipeline_amused_img2img"] = ["AmusedImg2ImgPipeline"] + _import_structure["pipeline_amused_inpaint"] = ["AmusedInpaintPipeline"] + + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import ( + AmusedPipeline, + ) + else: + from .pipeline_amused import AmusedPipeline + from .pipeline_amused_img2img import AmusedImg2ImgPipeline + from .pipeline_amused_inpaint import AmusedInpaintPipeline + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/diffusers3/pipelines/amused/pipeline_amused.py b/diffusers3/pipelines/amused/pipeline_amused.py new file mode 100644 index 0000000000000000000000000000000000000000..a8c24b0aeeccc8e1980e98b4c3b7bd62900ff630 --- /dev/null +++ b/diffusers3/pipelines/amused/pipeline_amused.py @@ -0,0 +1,328 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import torch +from transformers import CLIPTextModelWithProjection, CLIPTokenizer + +from ...image_processor import VaeImageProcessor +from ...models import UVit2DModel, VQModel +from ...schedulers import AmusedScheduler +from ...utils import replace_example_docstring +from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput + + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import AmusedPipeline + + >>> pipe = AmusedPipeline.from_pretrained("amused/amused-512", variant="fp16", torch_dtype=torch.float16) + >>> pipe = pipe.to("cuda") + + >>> prompt = "a photo of an astronaut riding a horse on mars" + >>> image = pipe(prompt).images[0] + ``` +""" + + +class AmusedPipeline(DiffusionPipeline): + image_processor: VaeImageProcessor + vqvae: VQModel + tokenizer: CLIPTokenizer + text_encoder: CLIPTextModelWithProjection + transformer: UVit2DModel + scheduler: AmusedScheduler + + model_cpu_offload_seq = "text_encoder->transformer->vqvae" + + def __init__( + self, + vqvae: VQModel, + tokenizer: CLIPTokenizer, + text_encoder: CLIPTextModelWithProjection, + transformer: UVit2DModel, + scheduler: AmusedScheduler, + ): + super().__init__() + + self.register_modules( + vqvae=vqvae, + tokenizer=tokenizer, + text_encoder=text_encoder, + transformer=transformer, + scheduler=scheduler, + ) + self.vae_scale_factor = 2 ** (len(self.vqvae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_normalize=False) + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Optional[Union[List[str], str]] = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 12, + guidance_scale: float = 10.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + generator: Optional[torch.Generator] = None, + latents: Optional[torch.IntTensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + negative_encoder_hidden_states: Optional[torch.Tensor] = None, + output_type="pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, + callback_steps: int = 1, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + micro_conditioning_aesthetic_score: int = 6, + micro_conditioning_crop_coord: Tuple[int, int] = (0, 0), + temperature: Union[int, Tuple[int, int], List[int]] = (2, 0), + ): + """ + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + height (`int`, *optional*, defaults to `self.transformer.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 16): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 10.0): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + generator (`torch.Generator`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.IntTensor`, *optional*): + Pre-generated tokens representing latent vectors in `self.vqvae`, to be used as inputs for image + gneration. If not provided, the starting latents will be completely masked. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. A single vector from the + pooled and projected final hidden states. + encoder_hidden_states (`torch.Tensor`, *optional*): + Pre-generated penultimate hidden states from the text encoder providing additional text conditioning. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + negative_encoder_hidden_states (`torch.Tensor`, *optional*): + Analogous to `encoder_hidden_states` for the positive prompt. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + micro_conditioning_aesthetic_score (`int`, *optional*, defaults to 6): + The targeted aesthetic score according to the laion aesthetic classifier. See + https://laion.ai/blog/laion-aesthetics/ and the micro-conditioning section of + https://arxiv.org/abs/2307.01952. + micro_conditioning_crop_coord (`Tuple[int]`, *optional*, defaults to (0, 0)): + The targeted height, width crop coordinates. See the micro-conditioning section of + https://arxiv.org/abs/2307.01952. + temperature (`Union[int, Tuple[int, int], List[int]]`, *optional*, defaults to (2, 0)): + Configures the temperature scheduler on `self.scheduler` see `AmusedScheduler#set_timesteps`. + + Examples: + + Returns: + [`~pipelines.pipeline_utils.ImagePipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.pipeline_utils.ImagePipelineOutput`] is returned, otherwise a + `tuple` is returned where the first element is a list with the generated images. + """ + if (prompt_embeds is not None and encoder_hidden_states is None) or ( + prompt_embeds is None and encoder_hidden_states is not None + ): + raise ValueError("pass either both `prompt_embeds` and `encoder_hidden_states` or neither") + + if (negative_prompt_embeds is not None and negative_encoder_hidden_states is None) or ( + negative_prompt_embeds is None and negative_encoder_hidden_states is not None + ): + raise ValueError( + "pass either both `negatve_prompt_embeds` and `negative_encoder_hidden_states` or neither" + ) + + if (prompt is None and prompt_embeds is None) or (prompt is not None and prompt_embeds is not None): + raise ValueError("pass only one of `prompt` or `prompt_embeds`") + + if isinstance(prompt, str): + prompt = [prompt] + + if prompt is not None: + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + batch_size = batch_size * num_images_per_prompt + + if height is None: + height = self.transformer.config.sample_size * self.vae_scale_factor + + if width is None: + width = self.transformer.config.sample_size * self.vae_scale_factor + + if prompt_embeds is None: + input_ids = self.tokenizer( + prompt, + return_tensors="pt", + padding="max_length", + truncation=True, + max_length=self.tokenizer.model_max_length, + ).input_ids.to(self._execution_device) + + outputs = self.text_encoder(input_ids, return_dict=True, output_hidden_states=True) + prompt_embeds = outputs.text_embeds + encoder_hidden_states = outputs.hidden_states[-2] + + prompt_embeds = prompt_embeds.repeat(num_images_per_prompt, 1) + encoder_hidden_states = encoder_hidden_states.repeat(num_images_per_prompt, 1, 1) + + if guidance_scale > 1.0: + if negative_prompt_embeds is None: + if negative_prompt is None: + negative_prompt = [""] * len(prompt) + + if isinstance(negative_prompt, str): + negative_prompt = [negative_prompt] + + input_ids = self.tokenizer( + negative_prompt, + return_tensors="pt", + padding="max_length", + truncation=True, + max_length=self.tokenizer.model_max_length, + ).input_ids.to(self._execution_device) + + outputs = self.text_encoder(input_ids, return_dict=True, output_hidden_states=True) + negative_prompt_embeds = outputs.text_embeds + negative_encoder_hidden_states = outputs.hidden_states[-2] + + negative_prompt_embeds = negative_prompt_embeds.repeat(num_images_per_prompt, 1) + negative_encoder_hidden_states = negative_encoder_hidden_states.repeat(num_images_per_prompt, 1, 1) + + prompt_embeds = torch.concat([negative_prompt_embeds, prompt_embeds]) + encoder_hidden_states = torch.concat([negative_encoder_hidden_states, encoder_hidden_states]) + + # Note that the micro conditionings _do_ flip the order of width, height for the original size + # and the crop coordinates. This is how it was done in the original code base + micro_conds = torch.tensor( + [ + width, + height, + micro_conditioning_crop_coord[0], + micro_conditioning_crop_coord[1], + micro_conditioning_aesthetic_score, + ], + device=self._execution_device, + dtype=encoder_hidden_states.dtype, + ) + micro_conds = micro_conds.unsqueeze(0) + micro_conds = micro_conds.expand(2 * batch_size if guidance_scale > 1.0 else batch_size, -1) + + shape = (batch_size, height // self.vae_scale_factor, width // self.vae_scale_factor) + + if latents is None: + latents = torch.full( + shape, self.scheduler.config.mask_token_id, dtype=torch.long, device=self._execution_device + ) + + self.scheduler.set_timesteps(num_inference_steps, temperature, self._execution_device) + + num_warmup_steps = len(self.scheduler.timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, timestep in enumerate(self.scheduler.timesteps): + if guidance_scale > 1.0: + model_input = torch.cat([latents] * 2) + else: + model_input = latents + + model_output = self.transformer( + model_input, + micro_conds=micro_conds, + pooled_text_emb=prompt_embeds, + encoder_hidden_states=encoder_hidden_states, + cross_attention_kwargs=cross_attention_kwargs, + ) + + if guidance_scale > 1.0: + uncond_logits, cond_logits = model_output.chunk(2) + model_output = uncond_logits + guidance_scale * (cond_logits - uncond_logits) + + latents = self.scheduler.step( + model_output=model_output, + timestep=timestep, + sample=latents, + generator=generator, + ).prev_sample + + if i == len(self.scheduler.timesteps) - 1 or ( + (i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0 + ): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, timestep, latents) + + if output_type == "latent": + output = latents + else: + needs_upcasting = self.vqvae.dtype == torch.float16 and self.vqvae.config.force_upcast + + if needs_upcasting: + self.vqvae.float() + + output = self.vqvae.decode( + latents, + force_not_quantize=True, + shape=( + batch_size, + height // self.vae_scale_factor, + width // self.vae_scale_factor, + self.vqvae.config.latent_channels, + ), + ).sample.clip(0, 1) + output = self.image_processor.postprocess(output, output_type) + + if needs_upcasting: + self.vqvae.half() + + self.maybe_free_model_hooks() + + if not return_dict: + return (output,) + + return ImagePipelineOutput(output) diff --git a/diffusers3/pipelines/amused/pipeline_amused_img2img.py b/diffusers3/pipelines/amused/pipeline_amused_img2img.py new file mode 100644 index 0000000000000000000000000000000000000000..c74275b414d4283634b2aecb8037d6946c338244 --- /dev/null +++ b/diffusers3/pipelines/amused/pipeline_amused_img2img.py @@ -0,0 +1,349 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import torch +from transformers import CLIPTextModelWithProjection, CLIPTokenizer + +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...models import UVit2DModel, VQModel +from ...schedulers import AmusedScheduler +from ...utils import replace_example_docstring +from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput + + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import AmusedImg2ImgPipeline + >>> from diffusers.utils import load_image + + >>> pipe = AmusedImg2ImgPipeline.from_pretrained( + ... "amused/amused-512", variant="fp16", torch_dtype=torch.float16 + ... ) + >>> pipe = pipe.to("cuda") + + >>> prompt = "winter mountains" + >>> input_image = ( + ... load_image( + ... "https://huggingface.co/datasets/diffusers/docs-images/resolve/main/open_muse/mountains.jpg" + ... ) + ... .resize((512, 512)) + ... .convert("RGB") + ... ) + >>> image = pipe(prompt, input_image).images[0] + ``` +""" + + +class AmusedImg2ImgPipeline(DiffusionPipeline): + image_processor: VaeImageProcessor + vqvae: VQModel + tokenizer: CLIPTokenizer + text_encoder: CLIPTextModelWithProjection + transformer: UVit2DModel + scheduler: AmusedScheduler + + model_cpu_offload_seq = "text_encoder->transformer->vqvae" + + # TODO - when calling self.vqvae.quantize, it uses self.vqvae.quantize.embedding.weight before + # the forward method of self.vqvae.quantize, so the hook doesn't get called to move the parameter + # off the meta device. There should be a way to fix this instead of just not offloading it + _exclude_from_cpu_offload = ["vqvae"] + + def __init__( + self, + vqvae: VQModel, + tokenizer: CLIPTokenizer, + text_encoder: CLIPTextModelWithProjection, + transformer: UVit2DModel, + scheduler: AmusedScheduler, + ): + super().__init__() + + self.register_modules( + vqvae=vqvae, + tokenizer=tokenizer, + text_encoder=text_encoder, + transformer=transformer, + scheduler=scheduler, + ) + self.vae_scale_factor = 2 ** (len(self.vqvae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_normalize=False) + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Optional[Union[List[str], str]] = None, + image: PipelineImageInput = None, + strength: float = 0.5, + num_inference_steps: int = 12, + guidance_scale: float = 10.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + generator: Optional[torch.Generator] = None, + prompt_embeds: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + negative_encoder_hidden_states: Optional[torch.Tensor] = None, + output_type="pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, + callback_steps: int = 1, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + micro_conditioning_aesthetic_score: int = 6, + micro_conditioning_crop_coord: Tuple[int, int] = (0, 0), + temperature: Union[int, Tuple[int, int], List[int]] = (2, 0), + ): + """ + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): + `Image`, numpy array or tensor representing an image batch to be used as the starting point. For both + numpy array and pytorch tensor, the expected value range is between `[0, 1]` If it's a tensor or a list + or tensors, the expected shape should be `(B, C, H, W)` or `(C, H, W)`. If it is a numpy array or a + list of arrays, the expected shape should be `(B, H, W, C)` or `(H, W, C)` It can also accept image + latents as `image`, but if passing latents directly it is not encoded again. + strength (`float`, *optional*, defaults to 0.5): + Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a + starting point and more noise is added the higher the `strength`. The number of denoising steps depends + on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising + process runs for the full number of iterations specified in `num_inference_steps`. A value of 1 + essentially ignores `image`. + num_inference_steps (`int`, *optional*, defaults to 12): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 10.0): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + generator (`torch.Generator`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. A single vector from the + pooled and projected final hidden states. + encoder_hidden_states (`torch.Tensor`, *optional*): + Pre-generated penultimate hidden states from the text encoder providing additional text conditioning. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + negative_encoder_hidden_states (`torch.Tensor`, *optional*): + Analogous to `encoder_hidden_states` for the positive prompt. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + micro_conditioning_aesthetic_score (`int`, *optional*, defaults to 6): + The targeted aesthetic score according to the laion aesthetic classifier. See + https://laion.ai/blog/laion-aesthetics/ and the micro-conditioning section of + https://arxiv.org/abs/2307.01952. + micro_conditioning_crop_coord (`Tuple[int]`, *optional*, defaults to (0, 0)): + The targeted height, width crop coordinates. See the micro-conditioning section of + https://arxiv.org/abs/2307.01952. + temperature (`Union[int, Tuple[int, int], List[int]]`, *optional*, defaults to (2, 0)): + Configures the temperature scheduler on `self.scheduler` see `AmusedScheduler#set_timesteps`. + + Examples: + + Returns: + [`~pipelines.pipeline_utils.ImagePipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.pipeline_utils.ImagePipelineOutput`] is returned, otherwise a + `tuple` is returned where the first element is a list with the generated images. + """ + + if (prompt_embeds is not None and encoder_hidden_states is None) or ( + prompt_embeds is None and encoder_hidden_states is not None + ): + raise ValueError("pass either both `prompt_embeds` and `encoder_hidden_states` or neither") + + if (negative_prompt_embeds is not None and negative_encoder_hidden_states is None) or ( + negative_prompt_embeds is None and negative_encoder_hidden_states is not None + ): + raise ValueError( + "pass either both `negative_prompt_embeds` and `negative_encoder_hidden_states` or neither" + ) + + if (prompt is None and prompt_embeds is None) or (prompt is not None and prompt_embeds is not None): + raise ValueError("pass only one of `prompt` or `prompt_embeds`") + + if isinstance(prompt, str): + prompt = [prompt] + + if prompt is not None: + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + batch_size = batch_size * num_images_per_prompt + + if prompt_embeds is None: + input_ids = self.tokenizer( + prompt, + return_tensors="pt", + padding="max_length", + truncation=True, + max_length=self.tokenizer.model_max_length, + ).input_ids.to(self._execution_device) + + outputs = self.text_encoder(input_ids, return_dict=True, output_hidden_states=True) + prompt_embeds = outputs.text_embeds + encoder_hidden_states = outputs.hidden_states[-2] + + prompt_embeds = prompt_embeds.repeat(num_images_per_prompt, 1) + encoder_hidden_states = encoder_hidden_states.repeat(num_images_per_prompt, 1, 1) + + if guidance_scale > 1.0: + if negative_prompt_embeds is None: + if negative_prompt is None: + negative_prompt = [""] * len(prompt) + + if isinstance(negative_prompt, str): + negative_prompt = [negative_prompt] + + input_ids = self.tokenizer( + negative_prompt, + return_tensors="pt", + padding="max_length", + truncation=True, + max_length=self.tokenizer.model_max_length, + ).input_ids.to(self._execution_device) + + outputs = self.text_encoder(input_ids, return_dict=True, output_hidden_states=True) + negative_prompt_embeds = outputs.text_embeds + negative_encoder_hidden_states = outputs.hidden_states[-2] + + negative_prompt_embeds = negative_prompt_embeds.repeat(num_images_per_prompt, 1) + negative_encoder_hidden_states = negative_encoder_hidden_states.repeat(num_images_per_prompt, 1, 1) + + prompt_embeds = torch.concat([negative_prompt_embeds, prompt_embeds]) + encoder_hidden_states = torch.concat([negative_encoder_hidden_states, encoder_hidden_states]) + + image = self.image_processor.preprocess(image) + + height, width = image.shape[-2:] + + # Note that the micro conditionings _do_ flip the order of width, height for the original size + # and the crop coordinates. This is how it was done in the original code base + micro_conds = torch.tensor( + [ + width, + height, + micro_conditioning_crop_coord[0], + micro_conditioning_crop_coord[1], + micro_conditioning_aesthetic_score, + ], + device=self._execution_device, + dtype=encoder_hidden_states.dtype, + ) + + micro_conds = micro_conds.unsqueeze(0) + micro_conds = micro_conds.expand(2 * batch_size if guidance_scale > 1.0 else batch_size, -1) + + self.scheduler.set_timesteps(num_inference_steps, temperature, self._execution_device) + num_inference_steps = int(len(self.scheduler.timesteps) * strength) + start_timestep_idx = len(self.scheduler.timesteps) - num_inference_steps + + needs_upcasting = self.vqvae.dtype == torch.float16 and self.vqvae.config.force_upcast + + if needs_upcasting: + self.vqvae.float() + + latents = self.vqvae.encode(image.to(dtype=self.vqvae.dtype, device=self._execution_device)).latents + latents_bsz, channels, latents_height, latents_width = latents.shape + latents = self.vqvae.quantize(latents)[2][2].reshape(latents_bsz, latents_height, latents_width) + latents = self.scheduler.add_noise( + latents, self.scheduler.timesteps[start_timestep_idx - 1], generator=generator + ) + latents = latents.repeat(num_images_per_prompt, 1, 1) + + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i in range(start_timestep_idx, len(self.scheduler.timesteps)): + timestep = self.scheduler.timesteps[i] + + if guidance_scale > 1.0: + model_input = torch.cat([latents] * 2) + else: + model_input = latents + + model_output = self.transformer( + model_input, + micro_conds=micro_conds, + pooled_text_emb=prompt_embeds, + encoder_hidden_states=encoder_hidden_states, + cross_attention_kwargs=cross_attention_kwargs, + ) + + if guidance_scale > 1.0: + uncond_logits, cond_logits = model_output.chunk(2) + model_output = uncond_logits + guidance_scale * (cond_logits - uncond_logits) + + latents = self.scheduler.step( + model_output=model_output, + timestep=timestep, + sample=latents, + generator=generator, + ).prev_sample + + if i == len(self.scheduler.timesteps) - 1 or ((i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, timestep, latents) + + if output_type == "latent": + output = latents + else: + output = self.vqvae.decode( + latents, + force_not_quantize=True, + shape=( + batch_size, + height // self.vae_scale_factor, + width // self.vae_scale_factor, + self.vqvae.config.latent_channels, + ), + ).sample.clip(0, 1) + output = self.image_processor.postprocess(output, output_type) + + if needs_upcasting: + self.vqvae.half() + + self.maybe_free_model_hooks() + + if not return_dict: + return (output,) + + return ImagePipelineOutput(output) diff --git a/diffusers3/pipelines/amused/pipeline_amused_inpaint.py b/diffusers3/pipelines/amused/pipeline_amused_inpaint.py new file mode 100644 index 0000000000000000000000000000000000000000..24801e0ef977f40280b7af35c21cd427a8f2cf4b --- /dev/null +++ b/diffusers3/pipelines/amused/pipeline_amused_inpaint.py @@ -0,0 +1,380 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import torch +from transformers import CLIPTextModelWithProjection, CLIPTokenizer + +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...models import UVit2DModel, VQModel +from ...schedulers import AmusedScheduler +from ...utils import replace_example_docstring +from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput + + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import AmusedInpaintPipeline + >>> from diffusers.utils import load_image + + >>> pipe = AmusedInpaintPipeline.from_pretrained( + ... "amused/amused-512", variant="fp16", torch_dtype=torch.float16 + ... ) + >>> pipe = pipe.to("cuda") + + >>> prompt = "fall mountains" + >>> input_image = ( + ... load_image( + ... "https://huggingface.co/datasets/diffusers/docs-images/resolve/main/open_muse/mountains_1.jpg" + ... ) + ... .resize((512, 512)) + ... .convert("RGB") + ... ) + >>> mask = ( + ... load_image( + ... "https://huggingface.co/datasets/diffusers/docs-images/resolve/main/open_muse/mountains_1_mask.png" + ... ) + ... .resize((512, 512)) + ... .convert("L") + ... ) + >>> pipe(prompt, input_image, mask).images[0].save("out.png") + ``` +""" + + +class AmusedInpaintPipeline(DiffusionPipeline): + image_processor: VaeImageProcessor + vqvae: VQModel + tokenizer: CLIPTokenizer + text_encoder: CLIPTextModelWithProjection + transformer: UVit2DModel + scheduler: AmusedScheduler + + model_cpu_offload_seq = "text_encoder->transformer->vqvae" + + # TODO - when calling self.vqvae.quantize, it uses self.vqvae.quantize.embedding.weight before + # the forward method of self.vqvae.quantize, so the hook doesn't get called to move the parameter + # off the meta device. There should be a way to fix this instead of just not offloading it + _exclude_from_cpu_offload = ["vqvae"] + + def __init__( + self, + vqvae: VQModel, + tokenizer: CLIPTokenizer, + text_encoder: CLIPTextModelWithProjection, + transformer: UVit2DModel, + scheduler: AmusedScheduler, + ): + super().__init__() + + self.register_modules( + vqvae=vqvae, + tokenizer=tokenizer, + text_encoder=text_encoder, + transformer=transformer, + scheduler=scheduler, + ) + self.vae_scale_factor = 2 ** (len(self.vqvae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_normalize=False) + self.mask_processor = VaeImageProcessor( + vae_scale_factor=self.vae_scale_factor, + do_normalize=False, + do_binarize=True, + do_convert_grayscale=True, + do_resize=True, + ) + self.scheduler.register_to_config(masking_schedule="linear") + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Optional[Union[List[str], str]] = None, + image: PipelineImageInput = None, + mask_image: PipelineImageInput = None, + strength: float = 1.0, + num_inference_steps: int = 12, + guidance_scale: float = 10.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + generator: Optional[torch.Generator] = None, + prompt_embeds: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + negative_encoder_hidden_states: Optional[torch.Tensor] = None, + output_type="pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, + callback_steps: int = 1, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + micro_conditioning_aesthetic_score: int = 6, + micro_conditioning_crop_coord: Tuple[int, int] = (0, 0), + temperature: Union[int, Tuple[int, int], List[int]] = (2, 0), + ): + """ + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): + `Image`, numpy array or tensor representing an image batch to be used as the starting point. For both + numpy array and pytorch tensor, the expected value range is between `[0, 1]` If it's a tensor or a list + or tensors, the expected shape should be `(B, C, H, W)` or `(C, H, W)`. If it is a numpy array or a + list of arrays, the expected shape should be `(B, H, W, C)` or `(H, W, C)` It can also accept image + latents as `image`, but if passing latents directly it is not encoded again. + mask_image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): + `Image`, numpy array or tensor representing an image batch to mask `image`. White pixels in the mask + are repainted while black pixels are preserved. If `mask_image` is a PIL image, it is converted to a + single channel (luminance) before use. If it's a numpy array or pytorch tensor, it should contain one + color channel (L) instead of 3, so the expected shape for pytorch tensor would be `(B, 1, H, W)`, `(B, + H, W)`, `(1, H, W)`, `(H, W)`. And for numpy array would be for `(B, H, W, 1)`, `(B, H, W)`, `(H, W, + 1)`, or `(H, W)`. + strength (`float`, *optional*, defaults to 1.0): + Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a + starting point and more noise is added the higher the `strength`. The number of denoising steps depends + on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising + process runs for the full number of iterations specified in `num_inference_steps`. A value of 1 + essentially ignores `image`. + num_inference_steps (`int`, *optional*, defaults to 16): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 10.0): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + generator (`torch.Generator`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. A single vector from the + pooled and projected final hidden states. + encoder_hidden_states (`torch.Tensor`, *optional*): + Pre-generated penultimate hidden states from the text encoder providing additional text conditioning. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + negative_encoder_hidden_states (`torch.Tensor`, *optional*): + Analogous to `encoder_hidden_states` for the positive prompt. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + micro_conditioning_aesthetic_score (`int`, *optional*, defaults to 6): + The targeted aesthetic score according to the laion aesthetic classifier. See + https://laion.ai/blog/laion-aesthetics/ and the micro-conditioning section of + https://arxiv.org/abs/2307.01952. + micro_conditioning_crop_coord (`Tuple[int]`, *optional*, defaults to (0, 0)): + The targeted height, width crop coordinates. See the micro-conditioning section of + https://arxiv.org/abs/2307.01952. + temperature (`Union[int, Tuple[int, int], List[int]]`, *optional*, defaults to (2, 0)): + Configures the temperature scheduler on `self.scheduler` see `AmusedScheduler#set_timesteps`. + + Examples: + + Returns: + [`~pipelines.pipeline_utils.ImagePipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.pipeline_utils.ImagePipelineOutput`] is returned, otherwise a + `tuple` is returned where the first element is a list with the generated images. + """ + + if (prompt_embeds is not None and encoder_hidden_states is None) or ( + prompt_embeds is None and encoder_hidden_states is not None + ): + raise ValueError("pass either both `prompt_embeds` and `encoder_hidden_states` or neither") + + if (negative_prompt_embeds is not None and negative_encoder_hidden_states is None) or ( + negative_prompt_embeds is None and negative_encoder_hidden_states is not None + ): + raise ValueError( + "pass either both `negatve_prompt_embeds` and `negative_encoder_hidden_states` or neither" + ) + + if (prompt is None and prompt_embeds is None) or (prompt is not None and prompt_embeds is not None): + raise ValueError("pass only one of `prompt` or `prompt_embeds`") + + if isinstance(prompt, str): + prompt = [prompt] + + if prompt is not None: + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + batch_size = batch_size * num_images_per_prompt + + if prompt_embeds is None: + input_ids = self.tokenizer( + prompt, + return_tensors="pt", + padding="max_length", + truncation=True, + max_length=self.tokenizer.model_max_length, + ).input_ids.to(self._execution_device) + + outputs = self.text_encoder(input_ids, return_dict=True, output_hidden_states=True) + prompt_embeds = outputs.text_embeds + encoder_hidden_states = outputs.hidden_states[-2] + + prompt_embeds = prompt_embeds.repeat(num_images_per_prompt, 1) + encoder_hidden_states = encoder_hidden_states.repeat(num_images_per_prompt, 1, 1) + + if guidance_scale > 1.0: + if negative_prompt_embeds is None: + if negative_prompt is None: + negative_prompt = [""] * len(prompt) + + if isinstance(negative_prompt, str): + negative_prompt = [negative_prompt] + + input_ids = self.tokenizer( + negative_prompt, + return_tensors="pt", + padding="max_length", + truncation=True, + max_length=self.tokenizer.model_max_length, + ).input_ids.to(self._execution_device) + + outputs = self.text_encoder(input_ids, return_dict=True, output_hidden_states=True) + negative_prompt_embeds = outputs.text_embeds + negative_encoder_hidden_states = outputs.hidden_states[-2] + + negative_prompt_embeds = negative_prompt_embeds.repeat(num_images_per_prompt, 1) + negative_encoder_hidden_states = negative_encoder_hidden_states.repeat(num_images_per_prompt, 1, 1) + + prompt_embeds = torch.concat([negative_prompt_embeds, prompt_embeds]) + encoder_hidden_states = torch.concat([negative_encoder_hidden_states, encoder_hidden_states]) + + image = self.image_processor.preprocess(image) + + height, width = image.shape[-2:] + + # Note that the micro conditionings _do_ flip the order of width, height for the original size + # and the crop coordinates. This is how it was done in the original code base + micro_conds = torch.tensor( + [ + width, + height, + micro_conditioning_crop_coord[0], + micro_conditioning_crop_coord[1], + micro_conditioning_aesthetic_score, + ], + device=self._execution_device, + dtype=encoder_hidden_states.dtype, + ) + + micro_conds = micro_conds.unsqueeze(0) + micro_conds = micro_conds.expand(2 * batch_size if guidance_scale > 1.0 else batch_size, -1) + + self.scheduler.set_timesteps(num_inference_steps, temperature, self._execution_device) + num_inference_steps = int(len(self.scheduler.timesteps) * strength) + start_timestep_idx = len(self.scheduler.timesteps) - num_inference_steps + + needs_upcasting = self.vqvae.dtype == torch.float16 and self.vqvae.config.force_upcast + + if needs_upcasting: + self.vqvae.float() + + latents = self.vqvae.encode(image.to(dtype=self.vqvae.dtype, device=self._execution_device)).latents + latents_bsz, channels, latents_height, latents_width = latents.shape + latents = self.vqvae.quantize(latents)[2][2].reshape(latents_bsz, latents_height, latents_width) + + mask = self.mask_processor.preprocess( + mask_image, height // self.vae_scale_factor, width // self.vae_scale_factor + ) + mask = mask.reshape(mask.shape[0], latents_height, latents_width).bool().to(latents.device) + latents[mask] = self.scheduler.config.mask_token_id + + starting_mask_ratio = mask.sum() / latents.numel() + + latents = latents.repeat(num_images_per_prompt, 1, 1) + + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i in range(start_timestep_idx, len(self.scheduler.timesteps)): + timestep = self.scheduler.timesteps[i] + + if guidance_scale > 1.0: + model_input = torch.cat([latents] * 2) + else: + model_input = latents + + model_output = self.transformer( + model_input, + micro_conds=micro_conds, + pooled_text_emb=prompt_embeds, + encoder_hidden_states=encoder_hidden_states, + cross_attention_kwargs=cross_attention_kwargs, + ) + + if guidance_scale > 1.0: + uncond_logits, cond_logits = model_output.chunk(2) + model_output = uncond_logits + guidance_scale * (cond_logits - uncond_logits) + + latents = self.scheduler.step( + model_output=model_output, + timestep=timestep, + sample=latents, + generator=generator, + starting_mask_ratio=starting_mask_ratio, + ).prev_sample + + if i == len(self.scheduler.timesteps) - 1 or ((i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, timestep, latents) + + if output_type == "latent": + output = latents + else: + output = self.vqvae.decode( + latents, + force_not_quantize=True, + shape=( + batch_size, + height // self.vae_scale_factor, + width // self.vae_scale_factor, + self.vqvae.config.latent_channels, + ), + ).sample.clip(0, 1) + output = self.image_processor.postprocess(output, output_type) + + if needs_upcasting: + self.vqvae.half() + + self.maybe_free_model_hooks() + + if not return_dict: + return (output,) + + return ImagePipelineOutput(output) diff --git a/diffusers3/pipelines/animatediff/__init__.py b/diffusers3/pipelines/animatediff/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d916abf2d85dfeee217479f14711dc2d33002ae6 --- /dev/null +++ b/diffusers3/pipelines/animatediff/__init__.py @@ -0,0 +1,57 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {"pipeline_output": ["AnimateDiffPipelineOutput"]} + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["pipeline_animatediff"] = ["AnimateDiffPipeline"] + _import_structure["pipeline_animatediff_controlnet"] = ["AnimateDiffControlNetPipeline"] + _import_structure["pipeline_animatediff_sdxl"] = ["AnimateDiffSDXLPipeline"] + _import_structure["pipeline_animatediff_sparsectrl"] = ["AnimateDiffSparseControlNetPipeline"] + _import_structure["pipeline_animatediff_video2video"] = ["AnimateDiffVideoToVideoPipeline"] + _import_structure["pipeline_animatediff_video2video_controlnet"] = ["AnimateDiffVideoToVideoControlNetPipeline"] + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * + + else: + from .pipeline_animatediff import AnimateDiffPipeline + from .pipeline_animatediff_controlnet import AnimateDiffControlNetPipeline + from .pipeline_animatediff_sdxl import AnimateDiffSDXLPipeline + from .pipeline_animatediff_sparsectrl import AnimateDiffSparseControlNetPipeline + from .pipeline_animatediff_video2video import AnimateDiffVideoToVideoPipeline + from .pipeline_animatediff_video2video_controlnet import AnimateDiffVideoToVideoControlNetPipeline + from .pipeline_output import AnimateDiffPipelineOutput + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/diffusers3/pipelines/animatediff/pipeline_animatediff.py b/diffusers3/pipelines/animatediff/pipeline_animatediff.py new file mode 100644 index 0000000000000000000000000000000000000000..cb6f50f43c4fd958abb9d90f40b418361fa7a18c --- /dev/null +++ b/diffusers3/pipelines/animatediff/pipeline_animatediff.py @@ -0,0 +1,860 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Any, Callable, Dict, List, Optional, Union + +import torch +from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection + +from ...image_processor import PipelineImageInput +from ...loaders import IPAdapterMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin +from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel, UNetMotionModel +from ...models.lora import adjust_lora_scale_text_encoder +from ...models.unets.unet_motion_model import MotionAdapter +from ...schedulers import ( + DDIMScheduler, + DPMSolverMultistepScheduler, + EulerAncestralDiscreteScheduler, + EulerDiscreteScheduler, + LMSDiscreteScheduler, + PNDMScheduler, +) +from ...utils import ( + USE_PEFT_BACKEND, + deprecate, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import randn_tensor +from ...video_processor import VideoProcessor +from ..free_init_utils import FreeInitMixin +from ..free_noise_utils import AnimateDiffFreeNoiseMixin +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from .pipeline_output import AnimateDiffPipelineOutput + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import MotionAdapter, AnimateDiffPipeline, DDIMScheduler + >>> from diffusers.utils import export_to_gif + + >>> adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2") + >>> pipe = AnimateDiffPipeline.from_pretrained("frankjoshua/toonyou_beta6", motion_adapter=adapter) + >>> pipe.scheduler = DDIMScheduler(beta_schedule="linear", steps_offset=1, clip_sample=False) + >>> output = pipe(prompt="A corgi walking in the park") + >>> frames = output.frames[0] + >>> export_to_gif(frames, "animation.gif") + ``` +""" + + +class AnimateDiffPipeline( + DiffusionPipeline, + StableDiffusionMixin, + TextualInversionLoaderMixin, + IPAdapterMixin, + StableDiffusionLoraLoaderMixin, + FreeInitMixin, + AnimateDiffFreeNoiseMixin, +): + r""" + Pipeline for text-to-video generation. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for saving LoRA weights + - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer (`CLIPTokenizer`): + A [`~transformers.CLIPTokenizer`] to tokenize text. + unet ([`UNet2DConditionModel`]): + A [`UNet2DConditionModel`] used to create a UNetMotionModel to denoise the encoded video latents. + motion_adapter ([`MotionAdapter`]): + A [`MotionAdapter`] to be used in combination with `unet` to denoise the encoded video latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + """ + + model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae" + _optional_components = ["feature_extractor", "image_encoder", "motion_adapter"] + _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: Union[UNet2DConditionModel, UNetMotionModel], + motion_adapter: MotionAdapter, + scheduler: Union[ + DDIMScheduler, + PNDMScheduler, + LMSDiscreteScheduler, + EulerDiscreteScheduler, + EulerAncestralDiscreteScheduler, + DPMSolverMultistepScheduler, + ], + feature_extractor: CLIPImageProcessor = None, + image_encoder: CLIPVisionModelWithProjection = None, + ): + super().__init__() + if isinstance(unet, UNet2DConditionModel): + unet = UNetMotionModel.from_unet2d(unet, motion_adapter) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + motion_adapter=motion_adapter, + scheduler=scheduler, + feature_extractor=feature_extractor, + image_encoder=image_encoder, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.video_processor = VideoProcessor(do_resize=False, vae_scale_factor=self.vae_scale_factor) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt with num_images_per_prompt -> num_videos_per_prompt + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if self.text_encoder is not None: + if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return prompt_embeds, negative_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image + def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + if output_hidden_states: + image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] + image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_enc_hidden_states = self.image_encoder( + torch.zeros_like(image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( + num_images_per_prompt, dim=0 + ) + return image_enc_hidden_states, uncond_image_enc_hidden_states + else: + image_embeds = self.image_encoder(image).image_embeds + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_embeds = torch.zeros_like(image_embeds) + + return image_embeds, uncond_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds + def prepare_ip_adapter_image_embeds( + self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance + ): + image_embeds = [] + if do_classifier_free_guidance: + negative_image_embeds = [] + if ip_adapter_image_embeds is None: + if not isinstance(ip_adapter_image, list): + ip_adapter_image = [ip_adapter_image] + + if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): + raise ValueError( + f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." + ) + + for single_ip_adapter_image, image_proj_layer in zip( + ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers + ): + output_hidden_state = not isinstance(image_proj_layer, ImageProjection) + single_image_embeds, single_negative_image_embeds = self.encode_image( + single_ip_adapter_image, device, 1, output_hidden_state + ) + + image_embeds.append(single_image_embeds[None, :]) + if do_classifier_free_guidance: + negative_image_embeds.append(single_negative_image_embeds[None, :]) + else: + for single_image_embeds in ip_adapter_image_embeds: + if do_classifier_free_guidance: + single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2) + negative_image_embeds.append(single_negative_image_embeds) + image_embeds.append(single_image_embeds) + + ip_adapter_image_embeds = [] + for i, single_image_embeds in enumerate(image_embeds): + single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) + if do_classifier_free_guidance: + single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) + + single_image_embeds = single_image_embeds.to(device=device) + ip_adapter_image_embeds.append(single_image_embeds) + + return ip_adapter_image_embeds + + def decode_latents(self, latents, decode_chunk_size: int = 16): + latents = 1 / self.vae.config.scaling_factor * latents + + batch_size, channels, num_frames, height, width = latents.shape + latents = latents.permute(0, 2, 1, 3, 4).reshape(batch_size * num_frames, channels, height, width) + + video = [] + for i in range(0, latents.shape[0], decode_chunk_size): + batch_latents = latents[i : i + decode_chunk_size] + batch_latents = self.vae.decode(batch_latents).sample + video.append(batch_latents) + + video = torch.cat(video) + video = video[None, :].reshape((batch_size, num_frames, -1) + video.shape[2:]).permute(0, 2, 1, 3, 4) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + video = video.float() + return video + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + height, + width, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + ip_adapter_image=None, + ip_adapter_image_embeds=None, + callback_on_step_end_tensor_inputs=None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and not isinstance(prompt, (str, list, dict)): + raise ValueError(f"`prompt` has to be of type `str`, `list` or `dict` but is {type(prompt)=}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if ip_adapter_image is not None and ip_adapter_image_embeds is not None: + raise ValueError( + "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined." + ) + + if ip_adapter_image_embeds is not None: + if not isinstance(ip_adapter_image_embeds, list): + raise ValueError( + f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}" + ) + elif ip_adapter_image_embeds[0].ndim not in [3, 4]: + raise ValueError( + f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D" + ) + + def prepare_latents( + self, batch_size, num_channels_latents, num_frames, height, width, dtype, device, generator, latents=None + ): + # If FreeNoise is enabled, generate latents as described in Equation (7) of [FreeNoise](https://arxiv.org/abs/2310.15169) + if self.free_noise_enabled: + latents = self._prepare_latents_free_noise( + batch_size, num_channels_latents, num_frames, height, width, dtype, device, generator, latents + ) + + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + shape = ( + batch_size, + num_channels_latents, + num_frames, + height // self.vae_scale_factor, + width // self.vae_scale_factor, + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def clip_skip(self): + return self._clip_skip + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 + + @property + def cross_attention_kwargs(self): + return self._cross_attention_kwargs + + @property + def num_timesteps(self): + return self._num_timesteps + + @property + def interrupt(self): + return self._interrupt + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Optional[Union[str, List[str]]] = None, + num_frames: Optional[int] = 16, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + guidance_scale: float = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_videos_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + clip_skip: Optional[int] = None, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + decode_chunk_size: int = 16, + **kwargs, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated video. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated video. + num_frames (`int`, *optional*, defaults to 16): + The number of video frames that are generated. Defaults to 16 frames which at 8 frames per seconds + amounts to 2 seconds of video. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality videos at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for video + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. Latents should be of shape + `(batch_size, num_channel, num_frames, height, width)`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + ip_adapter_image: (`PipelineImageInput`, *optional*): + Optional image input to work with IP Adapters. + ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*): + Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of + IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should + contain the negative image embedding if `do_classifier_free_guidance` is set to `True`. If not + provided, embeddings are computed from the `ip_adapter_image` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated video. Choose between `torch.Tensor`, `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.text_to_video_synthesis.TextToVideoSDPipelineOutput`] instead + of a plain tuple. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + decode_chunk_size (`int`, defaults to `16`): + The number of frames to decode at a time when calling `decode_latents` method. + + Examples: + + Returns: + [`~pipelines.animatediff.pipeline_output.AnimateDiffPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.animatediff.pipeline_output.AnimateDiffPipelineOutput`] is + returned, otherwise a `tuple` is returned where the first element is a list with the generated frames. + """ + + callback = kwargs.pop("callback", None) + callback_steps = kwargs.pop("callback_steps", None) + + if callback is not None: + deprecate( + "callback", + "1.0.0", + "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", + ) + if callback_steps is not None: + deprecate( + "callback_steps", + "1.0.0", + "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", + ) + + # 0. Default height and width to unet + height = height or self.unet.config.sample_size * self.vae_scale_factor + width = width or self.unet.config.sample_size * self.vae_scale_factor + + num_videos_per_prompt = 1 + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + height, + width, + callback_steps, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + ip_adapter_image, + ip_adapter_image_embeds, + callback_on_step_end_tensor_inputs, + ) + + self._guidance_scale = guidance_scale + self._clip_skip = clip_skip + self._cross_attention_kwargs = cross_attention_kwargs + self._interrupt = False + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, (str, dict)): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + # 3. Encode input prompt + text_encoder_lora_scale = ( + self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None + ) + if self.free_noise_enabled: + prompt_embeds, negative_prompt_embeds = self._encode_prompt_free_noise( + prompt=prompt, + num_frames=num_frames, + device=device, + num_videos_per_prompt=num_videos_per_prompt, + do_classifier_free_guidance=self.do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=text_encoder_lora_scale, + clip_skip=self.clip_skip, + ) + else: + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + device, + num_videos_per_prompt, + self.do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=text_encoder_lora_scale, + clip_skip=self.clip_skip, + ) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + if self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + prompt_embeds = prompt_embeds.repeat_interleave(repeats=num_frames, dim=0) + + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + image_embeds = self.prepare_ip_adapter_image_embeds( + ip_adapter_image, + ip_adapter_image_embeds, + device, + batch_size * num_videos_per_prompt, + self.do_classifier_free_guidance, + ) + + # 4. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # 5. Prepare latent variables + num_channels_latents = self.unet.config.in_channels + latents = self.prepare_latents( + batch_size * num_videos_per_prompt, + num_channels_latents, + num_frames, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7. Add image embeds for IP-Adapter + added_cond_kwargs = ( + {"image_embeds": image_embeds} + if ip_adapter_image is not None or ip_adapter_image_embeds is not None + else None + ) + + num_free_init_iters = self._free_init_num_iters if self.free_init_enabled else 1 + for free_init_iter in range(num_free_init_iters): + if self.free_init_enabled: + latents, timesteps = self._apply_free_init( + latents, free_init_iter, num_inference_steps, device, latents.dtype, generator + ) + + self._num_timesteps = len(timesteps) + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + + # 8. Denoising loop + with self.progress_bar(total=self._num_timesteps) as progress_bar: + for i, t in enumerate(timesteps): + if self.interrupt: + continue + + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs=cross_attention_kwargs, + added_cond_kwargs=added_cond_kwargs, + ).sample + + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + callback(i, t, latents) + + # 9. Post processing + if output_type == "latent": + video = latents + else: + video_tensor = self.decode_latents(latents, decode_chunk_size) + video = self.video_processor.postprocess_video(video=video_tensor, output_type=output_type) + + # 10. Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (video,) + + return AnimateDiffPipelineOutput(frames=video) diff --git a/diffusers3/pipelines/animatediff/pipeline_animatediff_controlnet.py b/diffusers3/pipelines/animatediff/pipeline_animatediff_controlnet.py new file mode 100644 index 0000000000000000000000000000000000000000..5357d6d5b8d96609a8ecb5d81892dfc5371dc53f --- /dev/null +++ b/diffusers3/pipelines/animatediff/pipeline_animatediff_controlnet.py @@ -0,0 +1,1100 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import torch +import torch.nn.functional as F +from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection + +from ...image_processor import PipelineImageInput +from ...loaders import IPAdapterMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin +from ...models import AutoencoderKL, ControlNetModel, ImageProjection, UNet2DConditionModel, UNetMotionModel +from ...models.lora import adjust_lora_scale_text_encoder +from ...models.unets.unet_motion_model import MotionAdapter +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import USE_PEFT_BACKEND, logging, scale_lora_layers, unscale_lora_layers +from ...utils.torch_utils import is_compiled_module, randn_tensor +from ...video_processor import VideoProcessor +from ..controlnet.multicontrolnet import MultiControlNetModel +from ..free_init_utils import FreeInitMixin +from ..free_noise_utils import AnimateDiffFreeNoiseMixin +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from .pipeline_output import AnimateDiffPipelineOutput + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import ( + ... AnimateDiffControlNetPipeline, + ... AutoencoderKL, + ... ControlNetModel, + ... MotionAdapter, + ... LCMScheduler, + ... ) + >>> from diffusers.utils import export_to_gif, load_video + + >>> # Additionally, you will need a preprocess videos before they can be used with the ControlNet + >>> # HF maintains just the right package for it: `pip install controlnet_aux` + >>> from controlnet_aux.processor import ZoeDetector + + >>> # Download controlnets from https://huggingface.co/lllyasviel/ControlNet-v1-1 to use .from_single_file + >>> # Download Diffusers-format controlnets, such as https://huggingface.co/lllyasviel/sd-controlnet-depth, to use .from_pretrained() + >>> controlnet = ControlNetModel.from_single_file("control_v11f1p_sd15_depth.pth", torch_dtype=torch.float16) + + >>> # We use AnimateLCM for this example but one can use the original motion adapters as well (for example, https://huggingface.co/guoyww/animatediff-motion-adapter-v1-5-3) + >>> motion_adapter = MotionAdapter.from_pretrained("wangfuyun/AnimateLCM") + + >>> vae = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse", torch_dtype=torch.float16) + >>> pipe: AnimateDiffControlNetPipeline = AnimateDiffControlNetPipeline.from_pretrained( + ... "SG161222/Realistic_Vision_V5.1_noVAE", + ... motion_adapter=motion_adapter, + ... controlnet=controlnet, + ... vae=vae, + ... ).to(device="cuda", dtype=torch.float16) + >>> pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config, beta_schedule="linear") + >>> pipe.load_lora_weights( + ... "wangfuyun/AnimateLCM", weight_name="AnimateLCM_sd15_t2v_lora.safetensors", adapter_name="lcm-lora" + ... ) + >>> pipe.set_adapters(["lcm-lora"], [0.8]) + + >>> depth_detector = ZoeDetector.from_pretrained("lllyasviel/Annotators").to("cuda") + >>> video = load_video( + ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/animatediff-vid2vid-input-1.gif" + ... ) + >>> conditioning_frames = [] + + >>> with pipe.progress_bar(total=len(video)) as progress_bar: + ... for frame in video: + ... conditioning_frames.append(depth_detector(frame)) + ... progress_bar.update() + + >>> prompt = "a panda, playing a guitar, sitting in a pink boat, in the ocean, mountains in background, realistic, high quality" + >>> negative_prompt = "bad quality, worst quality" + + >>> video = pipe( + ... prompt=prompt, + ... negative_prompt=negative_prompt, + ... num_frames=len(video), + ... num_inference_steps=10, + ... guidance_scale=2.0, + ... conditioning_frames=conditioning_frames, + ... generator=torch.Generator().manual_seed(42), + ... ).frames[0] + + >>> export_to_gif(video, "animatediff_controlnet.gif", fps=8) + ``` +""" + + +class AnimateDiffControlNetPipeline( + DiffusionPipeline, + StableDiffusionMixin, + TextualInversionLoaderMixin, + IPAdapterMixin, + StableDiffusionLoraLoaderMixin, + FreeInitMixin, + AnimateDiffFreeNoiseMixin, +): + r""" + Pipeline for text-to-video generation with ControlNet guidance. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for saving LoRA weights + - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer (`CLIPTokenizer`): + A [`~transformers.CLIPTokenizer`] to tokenize text. + unet ([`UNet2DConditionModel`]): + A [`UNet2DConditionModel`] used to create a UNetMotionModel to denoise the encoded video latents. + motion_adapter ([`MotionAdapter`]): + A [`MotionAdapter`] to be used in combination with `unet` to denoise the encoded video latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + """ + + model_cpu_offload_seq = "text_encoder->unet->vae" + _optional_components = ["feature_extractor", "image_encoder"] + _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: Union[UNet2DConditionModel, UNetMotionModel], + motion_adapter: MotionAdapter, + controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel], + scheduler: KarrasDiffusionSchedulers, + feature_extractor: Optional[CLIPImageProcessor] = None, + image_encoder: Optional[CLIPVisionModelWithProjection] = None, + ): + super().__init__() + if isinstance(unet, UNet2DConditionModel): + unet = UNetMotionModel.from_unet2d(unet, motion_adapter) + + if isinstance(controlnet, (list, tuple)): + controlnet = MultiControlNetModel(controlnet) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + motion_adapter=motion_adapter, + controlnet=controlnet, + scheduler=scheduler, + feature_extractor=feature_extractor, + image_encoder=image_encoder, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.video_processor = VideoProcessor(vae_scale_factor=self.vae_scale_factor) + self.control_video_processor = VideoProcessor( + vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False + ) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt with num_images_per_prompt -> num_videos_per_prompt + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if self.text_encoder is not None: + if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return prompt_embeds, negative_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image + def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + if output_hidden_states: + image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] + image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_enc_hidden_states = self.image_encoder( + torch.zeros_like(image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( + num_images_per_prompt, dim=0 + ) + return image_enc_hidden_states, uncond_image_enc_hidden_states + else: + image_embeds = self.image_encoder(image).image_embeds + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_embeds = torch.zeros_like(image_embeds) + + return image_embeds, uncond_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds + def prepare_ip_adapter_image_embeds( + self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance + ): + image_embeds = [] + if do_classifier_free_guidance: + negative_image_embeds = [] + if ip_adapter_image_embeds is None: + if not isinstance(ip_adapter_image, list): + ip_adapter_image = [ip_adapter_image] + + if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): + raise ValueError( + f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." + ) + + for single_ip_adapter_image, image_proj_layer in zip( + ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers + ): + output_hidden_state = not isinstance(image_proj_layer, ImageProjection) + single_image_embeds, single_negative_image_embeds = self.encode_image( + single_ip_adapter_image, device, 1, output_hidden_state + ) + + image_embeds.append(single_image_embeds[None, :]) + if do_classifier_free_guidance: + negative_image_embeds.append(single_negative_image_embeds[None, :]) + else: + for single_image_embeds in ip_adapter_image_embeds: + if do_classifier_free_guidance: + single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2) + negative_image_embeds.append(single_negative_image_embeds) + image_embeds.append(single_image_embeds) + + ip_adapter_image_embeds = [] + for i, single_image_embeds in enumerate(image_embeds): + single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) + if do_classifier_free_guidance: + single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) + + single_image_embeds = single_image_embeds.to(device=device) + ip_adapter_image_embeds.append(single_image_embeds) + + return ip_adapter_image_embeds + + # Copied from diffusers.pipelines.animatediff.pipeline_animatediff.AnimateDiffPipeline.decode_latents + def decode_latents(self, latents, decode_chunk_size: int = 16): + latents = 1 / self.vae.config.scaling_factor * latents + + batch_size, channels, num_frames, height, width = latents.shape + latents = latents.permute(0, 2, 1, 3, 4).reshape(batch_size * num_frames, channels, height, width) + + video = [] + for i in range(0, latents.shape[0], decode_chunk_size): + batch_latents = latents[i : i + decode_chunk_size] + batch_latents = self.vae.decode(batch_latents).sample + video.append(batch_latents) + + video = torch.cat(video) + video = video[None, :].reshape((batch_size, num_frames, -1) + video.shape[2:]).permute(0, 2, 1, 3, 4) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + video = video.float() + return video + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + height, + width, + num_frames, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + callback_on_step_end_tensor_inputs=None, + video=None, + controlnet_conditioning_scale=1.0, + control_guidance_start=0.0, + control_guidance_end=1.0, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and not isinstance(prompt, (str, list, dict)): + raise ValueError(f"`prompt` has to be of type `str`, `list` or `dict` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + # `prompt` needs more sophisticated handling when there are multiple + # conditionings. + if isinstance(self.controlnet, MultiControlNetModel): + if isinstance(prompt, list): + logger.warning( + f"You have {len(self.controlnet.nets)} ControlNets and you have passed {len(prompt)}" + " prompts. The conditionings will be fixed across the prompts." + ) + + # Check `image` + is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance( + self.controlnet, torch._dynamo.eval_frame.OptimizedModule + ) + if ( + isinstance(self.controlnet, ControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, ControlNetModel) + ): + if not isinstance(video, list): + raise TypeError(f"For single controlnet, `image` must be of type `list` but got {type(video)}") + if len(video) != num_frames: + raise ValueError(f"Excepted image to have length {num_frames} but got {len(video)=}") + elif ( + isinstance(self.controlnet, MultiControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, MultiControlNetModel) + ): + if not isinstance(video, list) or not isinstance(video[0], list): + raise TypeError(f"For multiple controlnets: `image` must be type list of lists but got {type(video)=}") + if len(video[0]) != num_frames: + raise ValueError(f"Expected length of image sublist as {num_frames} but got {len(video[0])=}") + if any(len(img) != len(video[0]) for img in video): + raise ValueError("All conditioning frame batches for multicontrolnet must be same size") + else: + assert False + + # Check `controlnet_conditioning_scale` + if ( + isinstance(self.controlnet, ControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, ControlNetModel) + ): + if not isinstance(controlnet_conditioning_scale, float): + raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.") + elif ( + isinstance(self.controlnet, MultiControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, MultiControlNetModel) + ): + if isinstance(controlnet_conditioning_scale, list): + if any(isinstance(i, list) for i in controlnet_conditioning_scale): + raise ValueError("A single batch of multiple conditionings are supported at the moment.") + elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len( + self.controlnet.nets + ): + raise ValueError( + "For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have" + " the same length as the number of controlnets" + ) + else: + assert False + + if not isinstance(control_guidance_start, (tuple, list)): + control_guidance_start = [control_guidance_start] + + if not isinstance(control_guidance_end, (tuple, list)): + control_guidance_end = [control_guidance_end] + + if len(control_guidance_start) != len(control_guidance_end): + raise ValueError( + f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list." + ) + + if isinstance(self.controlnet, MultiControlNetModel): + if len(control_guidance_start) != len(self.controlnet.nets): + raise ValueError( + f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}." + ) + + for start, end in zip(control_guidance_start, control_guidance_end): + if start >= end: + raise ValueError( + f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}." + ) + if start < 0.0: + raise ValueError(f"control guidance start: {start} can't be smaller than 0.") + if end > 1.0: + raise ValueError(f"control guidance end: {end} can't be larger than 1.0.") + + # Copied from diffusers.pipelines.animatediff.pipeline_animatediff.AnimateDiffPipeline.prepare_latents + def prepare_latents( + self, batch_size, num_channels_latents, num_frames, height, width, dtype, device, generator, latents=None + ): + # If FreeNoise is enabled, generate latents as described in Equation (7) of [FreeNoise](https://arxiv.org/abs/2310.15169) + if self.free_noise_enabled: + latents = self._prepare_latents_free_noise( + batch_size, num_channels_latents, num_frames, height, width, dtype, device, generator, latents + ) + + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + shape = ( + batch_size, + num_channels_latents, + num_frames, + height // self.vae_scale_factor, + width // self.vae_scale_factor, + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + def prepare_video( + self, + video, + width, + height, + batch_size, + num_videos_per_prompt, + device, + dtype, + do_classifier_free_guidance=False, + guess_mode=False, + ): + video = self.control_video_processor.preprocess_video(video, height=height, width=width).to( + dtype=torch.float32 + ) + video = video.permute(0, 2, 1, 3, 4).flatten(0, 1) + video_batch_size = video.shape[0] + + if video_batch_size == 1: + repeat_by = batch_size + else: + # image batch size is the same as prompt batch size + repeat_by = num_videos_per_prompt + + video = video.repeat_interleave(repeat_by, dim=0) + video = video.to(device=device, dtype=dtype) + + if do_classifier_free_guidance and not guess_mode: + video = torch.cat([video] * 2) + + return video + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def clip_skip(self): + return self._clip_skip + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 + + @property + def cross_attention_kwargs(self): + return self._cross_attention_kwargs + + @property + def num_timesteps(self): + return self._num_timesteps + + @property + def interrupt(self): + return self._interrupt + + @torch.no_grad() + def __call__( + self, + prompt: Union[str, List[str]] = None, + num_frames: Optional[int] = 16, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + guidance_scale: float = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_videos_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + ip_adapter_image_embeds: Optional[PipelineImageInput] = None, + conditioning_frames: Optional[List[PipelineImageInput]] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + controlnet_conditioning_scale: Union[float, List[float]] = 1.0, + guess_mode: bool = False, + control_guidance_start: Union[float, List[float]] = 0.0, + control_guidance_end: Union[float, List[float]] = 1.0, + clip_skip: Optional[int] = None, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + decode_chunk_size: int = 16, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated video. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated video. + num_frames (`int`, *optional*, defaults to 16): + The number of video frames that are generated. Defaults to 16 frames which at 8 frames per seconds + amounts to 2 seconds of video. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality videos at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for video + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. Latents should be of shape + `(batch_size, num_channel, num_frames, height, width)`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + ip_adapter_image (`PipelineImageInput`, *optional*): + Optional image input to work with IP Adapters. + ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*): + Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of + IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should + contain the negative image embedding if `do_classifier_free_guidance` is set to `True`. If not + provided, embeddings are computed from the `ip_adapter_image` input argument. + conditioning_frames (`List[PipelineImageInput]`, *optional*): + The ControlNet input condition to provide guidance to the `unet` for generation. If multiple + ControlNets are specified, images must be passed as a list such that each element of the list can be + correctly batched for input to a single ControlNet. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated video. Choose between `torch.Tensor`, `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.text_to_video_synthesis.TextToVideoSDPipelineOutput`] instead + of a plain tuple. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0): + The outputs of the ControlNet are multiplied by `controlnet_conditioning_scale` before they are added + to the residual in the original `unet`. If multiple ControlNets are specified in `init`, you can set + the corresponding scale as a list. + guess_mode (`bool`, *optional*, defaults to `False`): + The ControlNet encoder tries to recognize the content of the input image even if you remove all + prompts. A `guidance_scale` value between 3.0 and 5.0 is recommended. + control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0): + The percentage of total steps at which the ControlNet starts applying. + control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0): + The percentage of total steps at which the ControlNet stops applying. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + + Examples: + + Returns: + [`~pipelines.animatediff.pipeline_output.AnimateDiffPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.animatediff.pipeline_output.AnimateDiffPipelineOutput`] is + returned, otherwise a `tuple` is returned where the first element is a list with the generated frames. + """ + controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet + + # align format for control guidance + if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list): + control_guidance_start = len(control_guidance_end) * [control_guidance_start] + elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list): + control_guidance_end = len(control_guidance_start) * [control_guidance_end] + elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list): + mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1 + control_guidance_start, control_guidance_end = ( + mult * [control_guidance_start], + mult * [control_guidance_end], + ) + + # 0. Default height and width to unet + height = height or self.unet.config.sample_size * self.vae_scale_factor + width = width or self.unet.config.sample_size * self.vae_scale_factor + + num_videos_per_prompt = 1 + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt=prompt, + height=height, + width=width, + num_frames=num_frames, + negative_prompt=negative_prompt, + callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + video=conditioning_frames, + controlnet_conditioning_scale=controlnet_conditioning_scale, + control_guidance_start=control_guidance_start, + control_guidance_end=control_guidance_end, + ) + + self._guidance_scale = guidance_scale + self._clip_skip = clip_skip + self._cross_attention_kwargs = cross_attention_kwargs + self._interrupt = False + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, (str, dict)): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float): + controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets) + + global_pool_conditions = ( + controlnet.config.global_pool_conditions + if isinstance(controlnet, ControlNetModel) + else controlnet.nets[0].config.global_pool_conditions + ) + guess_mode = guess_mode or global_pool_conditions + + # 3. Encode input prompt + text_encoder_lora_scale = ( + cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None + ) + if self.free_noise_enabled: + prompt_embeds, negative_prompt_embeds = self._encode_prompt_free_noise( + prompt=prompt, + num_frames=num_frames, + device=device, + num_videos_per_prompt=num_videos_per_prompt, + do_classifier_free_guidance=self.do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=text_encoder_lora_scale, + clip_skip=self.clip_skip, + ) + else: + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + device, + num_videos_per_prompt, + self.do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=text_encoder_lora_scale, + clip_skip=self.clip_skip, + ) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + if self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + prompt_embeds = prompt_embeds.repeat_interleave(repeats=num_frames, dim=0) + + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + image_embeds = self.prepare_ip_adapter_image_embeds( + ip_adapter_image, + ip_adapter_image_embeds, + device, + batch_size * num_videos_per_prompt, + self.do_classifier_free_guidance, + ) + + if isinstance(controlnet, ControlNetModel): + conditioning_frames = self.prepare_video( + video=conditioning_frames, + width=width, + height=height, + batch_size=batch_size * num_videos_per_prompt * num_frames, + num_videos_per_prompt=num_videos_per_prompt, + device=device, + dtype=controlnet.dtype, + do_classifier_free_guidance=self.do_classifier_free_guidance, + guess_mode=guess_mode, + ) + elif isinstance(controlnet, MultiControlNetModel): + cond_prepared_videos = [] + for frame_ in conditioning_frames: + prepared_video = self.prepare_video( + video=frame_, + width=width, + height=height, + batch_size=batch_size * num_videos_per_prompt * num_frames, + num_videos_per_prompt=num_videos_per_prompt, + device=device, + dtype=controlnet.dtype, + do_classifier_free_guidance=self.do_classifier_free_guidance, + guess_mode=guess_mode, + ) + cond_prepared_videos.append(prepared_video) + conditioning_frames = cond_prepared_videos + else: + assert False + + # 4. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # 5. Prepare latent variables + num_channels_latents = self.unet.config.in_channels + latents = self.prepare_latents( + batch_size * num_videos_per_prompt, + num_channels_latents, + num_frames, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7. Add image embeds for IP-Adapter + added_cond_kwargs = ( + {"image_embeds": image_embeds} + if ip_adapter_image is not None or ip_adapter_image_embeds is not None + else None + ) + + # 7.1 Create tensor stating which controlnets to keep + controlnet_keep = [] + for i in range(len(timesteps)): + keeps = [ + 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e) + for s, e in zip(control_guidance_start, control_guidance_end) + ] + controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps) + + num_free_init_iters = self._free_init_num_iters if self.free_init_enabled else 1 + for free_init_iter in range(num_free_init_iters): + if self.free_init_enabled: + latents, timesteps = self._apply_free_init( + latents, free_init_iter, num_inference_steps, device, latents.dtype, generator + ) + + self._num_timesteps = len(timesteps) + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + + # 8. Denoising loop + with self.progress_bar(total=self._num_timesteps) as progress_bar: + for i, t in enumerate(timesteps): + if self.interrupt: + continue + + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + if guess_mode and self.do_classifier_free_guidance: + # Infer ControlNet only for the conditional batch. + control_model_input = latents + control_model_input = self.scheduler.scale_model_input(control_model_input, t) + controlnet_prompt_embeds = prompt_embeds.chunk(2)[1] + else: + control_model_input = latent_model_input + controlnet_prompt_embeds = prompt_embeds + + if isinstance(controlnet_keep[i], list): + cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])] + else: + controlnet_cond_scale = controlnet_conditioning_scale + if isinstance(controlnet_cond_scale, list): + controlnet_cond_scale = controlnet_cond_scale[0] + cond_scale = controlnet_cond_scale * controlnet_keep[i] + + control_model_input = torch.transpose(control_model_input, 1, 2) + control_model_input = control_model_input.reshape( + (-1, control_model_input.shape[2], control_model_input.shape[3], control_model_input.shape[4]) + ) + + down_block_res_samples, mid_block_res_sample = self.controlnet( + control_model_input, + t, + encoder_hidden_states=controlnet_prompt_embeds, + controlnet_cond=conditioning_frames, + conditioning_scale=cond_scale, + guess_mode=guess_mode, + return_dict=False, + ) + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs=self.cross_attention_kwargs, + added_cond_kwargs=added_cond_kwargs, + down_block_additional_residuals=down_block_res_samples, + mid_block_additional_residual=mid_block_res_sample, + ).sample + + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + + # 9. Post processing + if output_type == "latent": + video = latents + else: + video_tensor = self.decode_latents(latents, decode_chunk_size) + video = self.video_processor.postprocess_video(video=video_tensor, output_type=output_type) + + # 10. Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (video,) + + return AnimateDiffPipelineOutput(frames=video) diff --git a/diffusers3/pipelines/animatediff/pipeline_animatediff_sdxl.py b/diffusers3/pipelines/animatediff/pipeline_animatediff_sdxl.py new file mode 100644 index 0000000000000000000000000000000000000000..e531c91c168fc1dc780c7f2588d53d7666baad7c --- /dev/null +++ b/diffusers3/pipelines/animatediff/pipeline_animatediff_sdxl.py @@ -0,0 +1,1276 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import torch +from transformers import ( + CLIPImageProcessor, + CLIPTextModel, + CLIPTextModelWithProjection, + CLIPTokenizer, + CLIPVisionModelWithProjection, +) + +from ...image_processor import PipelineImageInput +from ...loaders import ( + FromSingleFileMixin, + IPAdapterMixin, + StableDiffusionXLLoraLoaderMixin, + TextualInversionLoaderMixin, +) +from ...models import AutoencoderKL, ImageProjection, MotionAdapter, UNet2DConditionModel, UNetMotionModel +from ...models.attention_processor import ( + AttnProcessor2_0, + FusedAttnProcessor2_0, + XFormersAttnProcessor, +) +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import ( + DDIMScheduler, + DPMSolverMultistepScheduler, + EulerAncestralDiscreteScheduler, + EulerDiscreteScheduler, + LMSDiscreteScheduler, + PNDMScheduler, +) +from ...utils import ( + USE_PEFT_BACKEND, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import randn_tensor +from ...video_processor import VideoProcessor +from ..free_init_utils import FreeInitMixin +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from .pipeline_output import AnimateDiffPipelineOutput + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers.models import MotionAdapter + >>> from diffusers import AnimateDiffSDXLPipeline, DDIMScheduler + >>> from diffusers.utils import export_to_gif + + >>> adapter = MotionAdapter.from_pretrained( + ... "a-r-r-o-w/animatediff-motion-adapter-sdxl-beta", torch_dtype=torch.float16 + ... ) + + >>> model_id = "stabilityai/stable-diffusion-xl-base-1.0" + >>> scheduler = DDIMScheduler.from_pretrained( + ... model_id, + ... subfolder="scheduler", + ... clip_sample=False, + ... timestep_spacing="linspace", + ... beta_schedule="linear", + ... steps_offset=1, + ... ) + >>> pipe = AnimateDiffSDXLPipeline.from_pretrained( + ... model_id, + ... motion_adapter=adapter, + ... scheduler=scheduler, + ... torch_dtype=torch.float16, + ... variant="fp16", + ... ).to("cuda") + + >>> # enable memory savings + >>> pipe.enable_vae_slicing() + >>> pipe.enable_vae_tiling() + + >>> output = pipe( + ... prompt="a panda surfing in the ocean, realistic, high quality", + ... negative_prompt="low quality, worst quality", + ... num_inference_steps=20, + ... guidance_scale=8, + ... width=1024, + ... height=1024, + ... num_frames=16, + ... ) + + >>> frames = output.frames[0] + >>> export_to_gif(frames, "animation.gif") + ``` +""" + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg +def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): + """ + Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4 + """ + std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) + std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) + # rescale the results from guidance (fixes overexposure) + noise_pred_rescaled = noise_cfg * (std_text / std_cfg) + # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images + noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg + return noise_cfg + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + sigmas: Optional[List[float]] = None, + **kwargs, +): + """ + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles + custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` + must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, + `num_inference_steps` and `sigmas` must be `None`. + sigmas (`List[float]`, *optional*): + Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, + `num_inference_steps` and `timesteps` must be `None`. + + Returns: + `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the + second element is the number of inference steps. + """ + if timesteps is not None and sigmas is not None: + raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") + if timesteps is not None: + accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + elif sigmas is not None: + accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accept_sigmas: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" sigmas schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + + +class AnimateDiffSDXLPipeline( + DiffusionPipeline, + StableDiffusionMixin, + FromSingleFileMixin, + StableDiffusionXLLoraLoaderMixin, + TextualInversionLoaderMixin, + IPAdapterMixin, + FreeInitMixin, +): + r""" + Pipeline for text-to-video generation using Stable Diffusion XL. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files + - [`~loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`] for saving LoRA weights + - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModel`]): + Frozen text-encoder. Stable Diffusion XL uses the text portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically + the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. + text_encoder_2 ([` CLIPTextModelWithProjection`]): + Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection), + specifically the + [laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k) + variant. + tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + tokenizer_2 (`CLIPTokenizer`): + Second Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + unet ([`UNet2DConditionModel`]): + Conditional U-Net architecture to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`): + Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of + `stabilityai/stable-diffusion-xl-base-1-0`. + """ + + model_cpu_offload_seq = "text_encoder->text_encoder_2->image_encoder->unet->vae" + _optional_components = [ + "tokenizer", + "tokenizer_2", + "text_encoder", + "text_encoder_2", + "image_encoder", + "feature_extractor", + ] + _callback_tensor_inputs = [ + "latents", + "prompt_embeds", + "negative_prompt_embeds", + "add_text_embeds", + "add_time_ids", + "negative_pooled_prompt_embeds", + "negative_add_time_ids", + ] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + text_encoder_2: CLIPTextModelWithProjection, + tokenizer: CLIPTokenizer, + tokenizer_2: CLIPTokenizer, + unet: Union[UNet2DConditionModel, UNetMotionModel], + motion_adapter: MotionAdapter, + scheduler: Union[ + DDIMScheduler, + PNDMScheduler, + LMSDiscreteScheduler, + EulerDiscreteScheduler, + EulerAncestralDiscreteScheduler, + DPMSolverMultistepScheduler, + ], + image_encoder: CLIPVisionModelWithProjection = None, + feature_extractor: CLIPImageProcessor = None, + force_zeros_for_empty_prompt: bool = True, + ): + super().__init__() + + if isinstance(unet, UNet2DConditionModel): + unet = UNetMotionModel.from_unet2d(unet, motion_adapter) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + text_encoder_2=text_encoder_2, + tokenizer=tokenizer, + tokenizer_2=tokenizer_2, + unet=unet, + motion_adapter=motion_adapter, + scheduler=scheduler, + image_encoder=image_encoder, + feature_extractor=feature_extractor, + ) + self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.video_processor = VideoProcessor(vae_scale_factor=self.vae_scale_factor) + + self.default_sample_size = self.unet.config.sample_size + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.encode_prompt with num_images_per_prompt->num_videos_per_prompt + def encode_prompt( + self, + prompt: str, + prompt_2: Optional[str] = None, + device: Optional[torch.device] = None, + num_videos_per_prompt: int = 1, + do_classifier_free_guidance: bool = True, + negative_prompt: Optional[str] = None, + negative_prompt_2: Optional[str] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + pooled_prompt_embeds: Optional[torch.Tensor] = None, + negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in both text-encoders + device: (`torch.device`): + torch device + num_videos_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + lora_scale (`float`, *optional*): + A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + device = device or self._execution_device + + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if self.text_encoder is not None: + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale) + else: + scale_lora_layers(self.text_encoder_2, lora_scale) + + prompt = [prompt] if isinstance(prompt, str) else prompt + + if prompt is not None: + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + # Define tokenizers and text encoders + tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] + text_encoders = ( + [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] + ) + + if prompt_embeds is None: + prompt_2 = prompt_2 or prompt + prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 + + # textual inversion: process multi-vector tokens if necessary + prompt_embeds_list = [] + prompts = [prompt, prompt_2] + for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders): + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, tokenizer) + + text_inputs = tokenizer( + prompt, + padding="max_length", + max_length=tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + + text_input_ids = text_inputs.input_ids + untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {tokenizer.model_max_length} tokens: {removed_text}" + ) + + prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) + + # We are only ALWAYS interested in the pooled output of the final text encoder + pooled_prompt_embeds = prompt_embeds[0] + if clip_skip is None: + prompt_embeds = prompt_embeds.hidden_states[-2] + else: + # "2" because SDXL always indexes from the penultimate layer. + prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)] + + prompt_embeds_list.append(prompt_embeds) + + prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) + + # get unconditional embeddings for classifier free guidance + zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt + if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: + negative_prompt_embeds = torch.zeros_like(prompt_embeds) + negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) + elif do_classifier_free_guidance and negative_prompt_embeds is None: + negative_prompt = negative_prompt or "" + negative_prompt_2 = negative_prompt_2 or negative_prompt + + # normalize str to list + negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt + negative_prompt_2 = ( + batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 + ) + + uncond_tokens: List[str] + if prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = [negative_prompt, negative_prompt_2] + + negative_prompt_embeds_list = [] + for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders): + if isinstance(self, TextualInversionLoaderMixin): + negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = tokenizer( + negative_prompt, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + negative_prompt_embeds = text_encoder( + uncond_input.input_ids.to(device), + output_hidden_states=True, + ) + # We are only ALWAYS interested in the pooled output of the final text encoder + negative_pooled_prompt_embeds = negative_prompt_embeds[0] + negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] + + negative_prompt_embeds_list.append(negative_prompt_embeds) + + negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) + + if self.text_encoder_2 is not None: + prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) + else: + prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_videos_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_videos_per_prompt, seq_len, -1) + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + if self.text_encoder_2 is not None: + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) + else: + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_videos_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_videos_per_prompt, seq_len, -1) + + pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_videos_per_prompt).view( + bs_embed * num_videos_per_prompt, -1 + ) + if do_classifier_free_guidance: + negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_videos_per_prompt).view( + bs_embed * num_videos_per_prompt, -1 + ) + + if self.text_encoder is not None: + if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder_2, lora_scale) + + return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image + def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + if output_hidden_states: + image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] + image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_enc_hidden_states = self.image_encoder( + torch.zeros_like(image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( + num_images_per_prompt, dim=0 + ) + return image_enc_hidden_states, uncond_image_enc_hidden_states + else: + image_embeds = self.image_encoder(image).image_embeds + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_embeds = torch.zeros_like(image_embeds) + + return image_embeds, uncond_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds + def prepare_ip_adapter_image_embeds( + self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance + ): + image_embeds = [] + if do_classifier_free_guidance: + negative_image_embeds = [] + if ip_adapter_image_embeds is None: + if not isinstance(ip_adapter_image, list): + ip_adapter_image = [ip_adapter_image] + + if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): + raise ValueError( + f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." + ) + + for single_ip_adapter_image, image_proj_layer in zip( + ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers + ): + output_hidden_state = not isinstance(image_proj_layer, ImageProjection) + single_image_embeds, single_negative_image_embeds = self.encode_image( + single_ip_adapter_image, device, 1, output_hidden_state + ) + + image_embeds.append(single_image_embeds[None, :]) + if do_classifier_free_guidance: + negative_image_embeds.append(single_negative_image_embeds[None, :]) + else: + for single_image_embeds in ip_adapter_image_embeds: + if do_classifier_free_guidance: + single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2) + negative_image_embeds.append(single_negative_image_embeds) + image_embeds.append(single_image_embeds) + + ip_adapter_image_embeds = [] + for i, single_image_embeds in enumerate(image_embeds): + single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) + if do_classifier_free_guidance: + single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) + + single_image_embeds = single_image_embeds.to(device=device) + ip_adapter_image_embeds.append(single_image_embeds) + + return ip_adapter_image_embeds + + # Copied from diffusers.pipelines.text_to_video_synthesis/pipeline_text_to_video_synth.TextToVideoSDPipeline.decode_latents + def decode_latents(self, latents): + latents = 1 / self.vae.config.scaling_factor * latents + + batch_size, channels, num_frames, height, width = latents.shape + latents = latents.permute(0, 2, 1, 3, 4).reshape(batch_size * num_frames, channels, height, width) + + image = self.vae.decode(latents).sample + video = image[None, :].reshape((batch_size, num_frames, -1) + image.shape[2:]).permute(0, 2, 1, 3, 4) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + video = video.float() + return video + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + prompt_2, + height, + width, + negative_prompt=None, + negative_prompt_2=None, + prompt_embeds=None, + negative_prompt_embeds=None, + pooled_prompt_embeds=None, + negative_pooled_prompt_embeds=None, + callback_on_step_end_tensor_inputs=None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt_2 is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)): + raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + elif negative_prompt_2 is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if prompt_embeds is not None and pooled_prompt_embeds is None: + raise ValueError( + "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`." + ) + + if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: + raise ValueError( + "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`." + ) + + # Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_synth.TextToVideoSDPipeline.prepare_latents + def prepare_latents( + self, batch_size, num_channels_latents, num_frames, height, width, dtype, device, generator, latents=None + ): + shape = ( + batch_size, + num_channels_latents, + num_frames, + height // self.vae_scale_factor, + width // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + def _get_add_time_ids( + self, original_size, crops_coords_top_left, target_size, dtype, text_encoder_projection_dim=None + ): + add_time_ids = list(original_size + crops_coords_top_left + target_size) + + passed_add_embed_dim = ( + self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim + ) + expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features + + if expected_add_embed_dim != passed_add_embed_dim: + raise ValueError( + f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`." + ) + + add_time_ids = torch.tensor([add_time_ids], dtype=dtype) + return add_time_ids + + def upcast_vae(self): + dtype = self.vae.dtype + self.vae.to(dtype=torch.float32) + use_torch_2_0_or_xformers = isinstance( + self.vae.decoder.mid_block.attentions[0].processor, + ( + AttnProcessor2_0, + XFormersAttnProcessor, + FusedAttnProcessor2_0, + ), + ) + # if xformers or torch_2_0 is used attention block does not need + # to be in float32 which can save lots of memory + if use_torch_2_0_or_xformers: + self.vae.post_quant_conv.to(dtype) + self.vae.decoder.conv_in.to(dtype) + self.vae.decoder.mid_block.to(dtype) + + # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding + def get_guidance_scale_embedding( + self, w: torch.Tensor, embedding_dim: int = 512, dtype: torch.dtype = torch.float32 + ) -> torch.Tensor: + """ + See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 + + Args: + w (`torch.Tensor`): + Generate embedding vectors with a specified guidance scale to subsequently enrich timestep embeddings. + embedding_dim (`int`, *optional*, defaults to 512): + Dimension of the embeddings to generate. + dtype (`torch.dtype`, *optional*, defaults to `torch.float32`): + Data type of the generated embeddings. + + Returns: + `torch.Tensor`: Embedding vectors with shape `(len(w), embedding_dim)`. + """ + assert len(w.shape) == 1 + w = w * 1000.0 + + half_dim = embedding_dim // 2 + emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) + emb = w.to(dtype)[:, None] * emb[None, :] + emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) + if embedding_dim % 2 == 1: # zero pad + emb = torch.nn.functional.pad(emb, (0, 1)) + assert emb.shape == (w.shape[0], embedding_dim) + return emb + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def guidance_rescale(self): + return self._guidance_rescale + + @property + def clip_skip(self): + return self._clip_skip + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None + + @property + def cross_attention_kwargs(self): + return self._cross_attention_kwargs + + @property + def denoising_end(self): + return self._denoising_end + + @property + def num_timesteps(self): + return self._num_timesteps + + @property + def interrupt(self): + return self._interrupt + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + prompt_2: Optional[Union[str, List[str]]] = None, + num_frames: int = 16, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + timesteps: List[int] = None, + sigmas: List[float] = None, + denoising_end: Optional[float] = None, + guidance_scale: float = 5.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + negative_prompt_2: Optional[Union[str, List[str]]] = None, + num_videos_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + pooled_prompt_embeds: Optional[torch.Tensor] = None, + negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + guidance_rescale: float = 0.0, + original_size: Optional[Tuple[int, int]] = None, + crops_coords_top_left: Tuple[int, int] = (0, 0), + target_size: Optional[Tuple[int, int]] = None, + negative_original_size: Optional[Tuple[int, int]] = None, + negative_crops_coords_top_left: Tuple[int, int] = (0, 0), + negative_target_size: Optional[Tuple[int, int]] = None, + clip_skip: Optional[int] = None, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + ): + r""" + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the video generation. If not defined, one has to pass `prompt_embeds`. + instead. + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in both text-encoders + num_frames: + The number of video frames that are generated. Defaults to 16 frames which at 8 frames per seconds + amounts to 2 seconds of video. + height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The height in pixels of the generated video. This is set to 1024 by default for the best results. + Anything below 512 pixels won't work well for + [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) + and checkpoints that are not specifically fine-tuned on low resolutions. + width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The width in pixels of the generated video. This is set to 1024 by default for the best results. + Anything below 512 pixels won't work well for + [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) + and checkpoints that are not specifically fine-tuned on low resolutions. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality video at the + expense of slower inference. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument + in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is + passed will be used. Must be in descending order. + sigmas (`List[float]`, *optional*): + Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in + their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed + will be used. + denoising_end (`float`, *optional*): + When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be + completed before it is intentionally prematurely terminated. As a result, the returned sample will + still retain a substantial amount of noise as determined by the discrete timesteps selected by the + scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a + "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image + Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output) + guidance_scale (`float`, *optional*, defaults to 5.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower video quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the video generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the video generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders + num_videos_per_prompt (`int`, *optional*, defaults to 1): + The number of videos to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to + [`schedulers.DDIMScheduler`], will be ignored for others. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for video + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + ip_adapter_image: (`PipelineImageInput`, *optional*): + Optional image input to work with IP Adapters. + ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*): + Pre-generated image embeddings for IP-Adapter. If not provided, embeddings are computed from the + `ip_adapter_image` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated video. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion_xl.AnimateDiffPipelineOutput`] instead of a + plain tuple. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + guidance_rescale (`float`, *optional*, defaults to 0.0): + Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are + Flawed](https://arxiv.org/pdf/2305.08891.pdf) `guidance_scale` is defined as `ฯ†` in equation 16. of + [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). + Guidance rescale factor should fix overexposure when using zero terminal SNR. + original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled. + `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as + explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position + `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting + `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + For most cases, `target_size` should be set to the desired height and width of the generated image. If + not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in + section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + To negatively condition the generation process based on a specific image resolution. Part of SDXL's + micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's + micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + To negatively condition the generation process based on a target image resolution. It should be as same + as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + + Examples: + + Returns: + [`~pipelines.animatediff.pipeline_output.AnimateDiffPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.animatediff.pipeline_output.AnimateDiffPipelineOutput`] is + returned, otherwise a `tuple` is returned where the first element is a list with the generated frames. + """ + + # 0. Default height and width to unet + height = height or self.default_sample_size * self.vae_scale_factor + width = width or self.default_sample_size * self.vae_scale_factor + + num_videos_per_prompt = 1 + + original_size = original_size or (height, width) + target_size = target_size or (height, width) + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + prompt_2, + height, + width, + negative_prompt, + negative_prompt_2, + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + callback_on_step_end_tensor_inputs, + ) + + self._guidance_scale = guidance_scale + self._guidance_rescale = guidance_rescale + self._clip_skip = clip_skip + self._cross_attention_kwargs = cross_attention_kwargs + self._denoising_end = denoising_end + self._interrupt = False + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + # 3. Encode input prompt + lora_scale = ( + self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None + ) + + ( + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + ) = self.encode_prompt( + prompt=prompt, + prompt_2=prompt_2, + device=device, + num_videos_per_prompt=num_videos_per_prompt, + do_classifier_free_guidance=self.do_classifier_free_guidance, + negative_prompt=negative_prompt, + negative_prompt_2=negative_prompt_2, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, + lora_scale=lora_scale, + clip_skip=self.clip_skip, + ) + + # 4. Prepare timesteps + timesteps, num_inference_steps = retrieve_timesteps( + self.scheduler, num_inference_steps, device, timesteps, sigmas + ) + + # 5. Prepare latent variables + num_channels_latents = self.unet.config.in_channels + latents = self.prepare_latents( + batch_size * num_videos_per_prompt, + num_channels_latents, + num_frames, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7. Prepare added time ids & embeddings + add_text_embeds = pooled_prompt_embeds + if self.text_encoder_2 is None: + text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) + else: + text_encoder_projection_dim = self.text_encoder_2.config.projection_dim + + add_time_ids = self._get_add_time_ids( + original_size, + crops_coords_top_left, + target_size, + dtype=prompt_embeds.dtype, + text_encoder_projection_dim=text_encoder_projection_dim, + ) + if negative_original_size is not None and negative_target_size is not None: + negative_add_time_ids = self._get_add_time_ids( + negative_original_size, + negative_crops_coords_top_left, + negative_target_size, + dtype=prompt_embeds.dtype, + text_encoder_projection_dim=text_encoder_projection_dim, + ) + else: + negative_add_time_ids = add_time_ids + + if self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) + add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) + add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0) + + prompt_embeds = prompt_embeds.repeat_interleave(repeats=num_frames, dim=0) + + prompt_embeds = prompt_embeds.to(device) + add_text_embeds = add_text_embeds.to(device) + add_time_ids = add_time_ids.to(device).repeat(batch_size * num_videos_per_prompt, 1) + + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + image_embeds = self.prepare_ip_adapter_image_embeds( + ip_adapter_image, + ip_adapter_image_embeds, + device, + batch_size * num_videos_per_prompt, + self.do_classifier_free_guidance, + ) + + # 7.1 Apply denoising_end + if ( + self.denoising_end is not None + and isinstance(self.denoising_end, float) + and self.denoising_end > 0 + and self.denoising_end < 1 + ): + discrete_timestep_cutoff = int( + round( + self.scheduler.config.num_train_timesteps + - (self.denoising_end * self.scheduler.config.num_train_timesteps) + ) + ) + num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps))) + timesteps = timesteps[:num_inference_steps] + + # 8. Optionally get Guidance Scale Embedding + timestep_cond = None + if self.unet.config.time_cond_proj_dim is not None: + guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_videos_per_prompt) + timestep_cond = self.get_guidance_scale_embedding( + guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim + ).to(device=device, dtype=latents.dtype) + + num_free_init_iters = self._free_init_num_iters if self.free_init_enabled else 1 + for free_init_iter in range(num_free_init_iters): + if self.free_init_enabled: + latents, timesteps = self._apply_free_init( + latents, free_init_iter, num_inference_steps, device, latents.dtype, generator + ) + + self._num_timesteps = len(timesteps) + + # 9. Denoising loop + with self.progress_bar(total=self._num_timesteps) as progress_bar: + for i, t in enumerate(timesteps): + if self.interrupt: + continue + + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents + + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} + if ip_adapter_image is not None or ip_adapter_image_embeds: + added_cond_kwargs["image_embeds"] = image_embeds + + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + timestep_cond=timestep_cond, + cross_attention_kwargs=self.cross_attention_kwargs, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) + + if self.do_classifier_free_guidance and self.guidance_rescale > 0.0: + # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf + noise_pred = rescale_noise_cfg( + noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale + ) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + add_text_embeds = callback_outputs.pop("add_text_embeds", add_text_embeds) + negative_pooled_prompt_embeds = callback_outputs.pop( + "negative_pooled_prompt_embeds", negative_pooled_prompt_embeds + ) + add_time_ids = callback_outputs.pop("add_time_ids", add_time_ids) + negative_add_time_ids = callback_outputs.pop("negative_add_time_ids", negative_add_time_ids) + + progress_bar.update() + + # make sure the VAE is in float32 mode, as it overflows in float16 + needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast + + if needs_upcasting: + self.upcast_vae() + latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) + + # 10. Post processing + if output_type == "latent": + video = latents + else: + video_tensor = self.decode_latents(latents) + video = self.video_processor.postprocess_video(video=video_tensor, output_type=output_type) + + # cast back to fp16 if needed + if needs_upcasting: + self.vae.to(dtype=torch.float16) + + # 11. Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (video,) + + return AnimateDiffPipelineOutput(frames=video) diff --git a/diffusers3/pipelines/animatediff/pipeline_animatediff_sparsectrl.py b/diffusers3/pipelines/animatediff/pipeline_animatediff_sparsectrl.py new file mode 100644 index 0000000000000000000000000000000000000000..8b037cdc34fbb45282e06d65798844429f1be2c5 --- /dev/null +++ b/diffusers3/pipelines/animatediff/pipeline_animatediff_sparsectrl.py @@ -0,0 +1,1010 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import numpy as np +import PIL +import torch +import torch.nn.functional as F +from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection + +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import IPAdapterMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin +from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel, UNetMotionModel +from ...models.controlnet_sparsectrl import SparseControlNetModel +from ...models.lora import adjust_lora_scale_text_encoder +from ...models.unets.unet_motion_model import MotionAdapter +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import ( + USE_PEFT_BACKEND, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import is_compiled_module, randn_tensor +from ...video_processor import VideoProcessor +from ..free_init_utils import FreeInitMixin +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from .pipeline_output import AnimateDiffPipelineOutput + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```python + >>> import torch + >>> from diffusers import AnimateDiffSparseControlNetPipeline + >>> from diffusers.models import AutoencoderKL, MotionAdapter, SparseControlNetModel + >>> from diffusers.schedulers import DPMSolverMultistepScheduler + >>> from diffusers.utils import export_to_gif, load_image + + >>> model_id = "SG161222/Realistic_Vision_V5.1_noVAE" + >>> motion_adapter_id = "guoyww/animatediff-motion-adapter-v1-5-3" + >>> controlnet_id = "guoyww/animatediff-sparsectrl-scribble" + >>> lora_adapter_id = "guoyww/animatediff-motion-lora-v1-5-3" + >>> vae_id = "stabilityai/sd-vae-ft-mse" + >>> device = "cuda" + + >>> motion_adapter = MotionAdapter.from_pretrained(motion_adapter_id, torch_dtype=torch.float16).to(device) + >>> controlnet = SparseControlNetModel.from_pretrained(controlnet_id, torch_dtype=torch.float16).to(device) + >>> vae = AutoencoderKL.from_pretrained(vae_id, torch_dtype=torch.float16).to(device) + >>> scheduler = DPMSolverMultistepScheduler.from_pretrained( + ... model_id, + ... subfolder="scheduler", + ... beta_schedule="linear", + ... algorithm_type="dpmsolver++", + ... use_karras_sigmas=True, + ... ) + >>> pipe = AnimateDiffSparseControlNetPipeline.from_pretrained( + ... model_id, + ... motion_adapter=motion_adapter, + ... controlnet=controlnet, + ... vae=vae, + ... scheduler=scheduler, + ... torch_dtype=torch.float16, + ... ).to(device) + >>> pipe.load_lora_weights(lora_adapter_id, adapter_name="motion_lora") + >>> pipe.fuse_lora(lora_scale=1.0) + + >>> prompt = "an aerial view of a cyberpunk city, night time, neon lights, masterpiece, high quality" + >>> negative_prompt = "low quality, worst quality, letterboxed" + + >>> image_files = [ + ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/animatediff-scribble-1.png", + ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/animatediff-scribble-2.png", + ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/animatediff-scribble-3.png", + ... ] + >>> condition_frame_indices = [0, 8, 15] + >>> conditioning_frames = [load_image(img_file) for img_file in image_files] + + >>> video = pipe( + ... prompt=prompt, + ... negative_prompt=negative_prompt, + ... num_inference_steps=25, + ... conditioning_frames=conditioning_frames, + ... controlnet_conditioning_scale=1.0, + ... controlnet_frame_indices=condition_frame_indices, + ... generator=torch.Generator().manual_seed(1337), + ... ).frames[0] + >>> export_to_gif(video, "output.gif") + ``` +""" + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents +def retrieve_latents( + encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" +): + if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": + return encoder_output.latent_dist.sample(generator) + elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": + return encoder_output.latent_dist.mode() + elif hasattr(encoder_output, "latents"): + return encoder_output.latents + else: + raise AttributeError("Could not access latents of provided encoder_output") + + +class AnimateDiffSparseControlNetPipeline( + DiffusionPipeline, + StableDiffusionMixin, + TextualInversionLoaderMixin, + IPAdapterMixin, + StableDiffusionLoraLoaderMixin, + FreeInitMixin, +): + r""" + Pipeline for controlled text-to-video generation using the method described in [SparseCtrl: Adding Sparse Controls + to Text-to-Video Diffusion Models](https://arxiv.org/abs/2311.16933). + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for saving LoRA weights + - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer (`CLIPTokenizer`): + A [`~transformers.CLIPTokenizer`] to tokenize text. + unet ([`UNet2DConditionModel`]): + A [`UNet2DConditionModel`] used to create a UNetMotionModel to denoise the encoded video latents. + motion_adapter ([`MotionAdapter`]): + A [`MotionAdapter`] to be used in combination with `unet` to denoise the encoded video latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + """ + + model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae" + _optional_components = ["feature_extractor", "image_encoder", "motion_adapter"] + _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: Union[UNet2DConditionModel, UNetMotionModel], + motion_adapter: MotionAdapter, + controlnet: SparseControlNetModel, + scheduler: KarrasDiffusionSchedulers, + feature_extractor: CLIPImageProcessor = None, + image_encoder: CLIPVisionModelWithProjection = None, + ): + super().__init__() + if isinstance(unet, UNet2DConditionModel): + unet = UNetMotionModel.from_unet2d(unet, motion_adapter) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + motion_adapter=motion_adapter, + controlnet=controlnet, + scheduler=scheduler, + feature_extractor=feature_extractor, + image_encoder=image_encoder, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.video_processor = VideoProcessor(do_resize=False, vae_scale_factor=self.vae_scale_factor) + self.control_image_processor = VaeImageProcessor( + vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False + ) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt with num_images_per_prompt -> num_videos_per_prompt + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if self.text_encoder is not None: + if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return prompt_embeds, negative_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image + def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + if output_hidden_states: + image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] + image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_enc_hidden_states = self.image_encoder( + torch.zeros_like(image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( + num_images_per_prompt, dim=0 + ) + return image_enc_hidden_states, uncond_image_enc_hidden_states + else: + image_embeds = self.image_encoder(image).image_embeds + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_embeds = torch.zeros_like(image_embeds) + + return image_embeds, uncond_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds + def prepare_ip_adapter_image_embeds( + self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance + ): + image_embeds = [] + if do_classifier_free_guidance: + negative_image_embeds = [] + if ip_adapter_image_embeds is None: + if not isinstance(ip_adapter_image, list): + ip_adapter_image = [ip_adapter_image] + + if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): + raise ValueError( + f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." + ) + + for single_ip_adapter_image, image_proj_layer in zip( + ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers + ): + output_hidden_state = not isinstance(image_proj_layer, ImageProjection) + single_image_embeds, single_negative_image_embeds = self.encode_image( + single_ip_adapter_image, device, 1, output_hidden_state + ) + + image_embeds.append(single_image_embeds[None, :]) + if do_classifier_free_guidance: + negative_image_embeds.append(single_negative_image_embeds[None, :]) + else: + for single_image_embeds in ip_adapter_image_embeds: + if do_classifier_free_guidance: + single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2) + negative_image_embeds.append(single_negative_image_embeds) + image_embeds.append(single_image_embeds) + + ip_adapter_image_embeds = [] + for i, single_image_embeds in enumerate(image_embeds): + single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) + if do_classifier_free_guidance: + single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) + + single_image_embeds = single_image_embeds.to(device=device) + ip_adapter_image_embeds.append(single_image_embeds) + + return ip_adapter_image_embeds + + # Copied from diffusers.pipelines.text_to_video_synthesis/pipeline_text_to_video_synth.TextToVideoSDPipeline.decode_latents + def decode_latents(self, latents): + latents = 1 / self.vae.config.scaling_factor * latents + + batch_size, channels, num_frames, height, width = latents.shape + latents = latents.permute(0, 2, 1, 3, 4).reshape(batch_size * num_frames, channels, height, width) + + image = self.vae.decode(latents).sample + video = image[None, :].reshape((batch_size, num_frames, -1) + image.shape[2:]).permute(0, 2, 1, 3, 4) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + video = video.float() + return video + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + height, + width, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + ip_adapter_image=None, + ip_adapter_image_embeds=None, + callback_on_step_end_tensor_inputs=None, + image=None, + controlnet_conditioning_scale: float = 1.0, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if ip_adapter_image is not None and ip_adapter_image_embeds is not None: + raise ValueError( + "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined." + ) + + if ip_adapter_image_embeds is not None: + if not isinstance(ip_adapter_image_embeds, list): + raise ValueError( + f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}" + ) + elif ip_adapter_image_embeds[0].ndim not in [3, 4]: + raise ValueError( + f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D" + ) + + is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance( + self.controlnet, torch._dynamo.eval_frame.OptimizedModule + ) + + # check `image` + if ( + isinstance(self.controlnet, SparseControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, SparseControlNetModel) + ): + if isinstance(image, list): + for image_ in image: + self.check_image(image_, prompt, prompt_embeds) + else: + self.check_image(image, prompt, prompt_embeds) + else: + assert False + + # Check `controlnet_conditioning_scale` + if ( + isinstance(self.controlnet, SparseControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, SparseControlNetModel) + ): + if not isinstance(controlnet_conditioning_scale, float): + raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.") + else: + assert False + + # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.check_image + def check_image(self, image, prompt, prompt_embeds): + image_is_pil = isinstance(image, PIL.Image.Image) + image_is_tensor = isinstance(image, torch.Tensor) + image_is_np = isinstance(image, np.ndarray) + image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image) + image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor) + image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray) + + if ( + not image_is_pil + and not image_is_tensor + and not image_is_np + and not image_is_pil_list + and not image_is_tensor_list + and not image_is_np_list + ): + raise TypeError( + f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}" + ) + + if image_is_pil: + image_batch_size = 1 + else: + image_batch_size = len(image) + + if prompt is not None and isinstance(prompt, str): + prompt_batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + prompt_batch_size = len(prompt) + elif prompt_embeds is not None: + prompt_batch_size = prompt_embeds.shape[0] + + if image_batch_size != 1 and image_batch_size != prompt_batch_size: + raise ValueError( + f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}" + ) + + # Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_synth.TextToVideoSDPipeline.prepare_latents + def prepare_latents( + self, batch_size, num_channels_latents, num_frames, height, width, dtype, device, generator, latents=None + ): + shape = ( + batch_size, + num_channels_latents, + num_frames, + height // self.vae_scale_factor, + width // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + def prepare_image(self, image, width, height, device, dtype): + image = self.control_image_processor.preprocess(image, height=height, width=width) + controlnet_images = image.unsqueeze(0).to(device, dtype) + batch_size, num_frames, channels, height, width = controlnet_images.shape + + # TODO: remove below line + assert controlnet_images.min() >= 0 and controlnet_images.max() <= 1 + + if self.controlnet.use_simplified_condition_embedding: + controlnet_images = controlnet_images.reshape(batch_size * num_frames, channels, height, width) + controlnet_images = 2 * controlnet_images - 1 + conditioning_frames = retrieve_latents(self.vae.encode(controlnet_images)) * self.vae.config.scaling_factor + conditioning_frames = conditioning_frames.reshape( + batch_size, num_frames, 4, height // self.vae_scale_factor, width // self.vae_scale_factor + ) + else: + conditioning_frames = controlnet_images + + conditioning_frames = conditioning_frames.permute(0, 2, 1, 3, 4) # [b, c, f, h, w] + return conditioning_frames + + def prepare_sparse_control_conditioning( + self, + conditioning_frames: torch.Tensor, + num_frames: int, + controlnet_frame_indices: int, + device: torch.device, + dtype: torch.dtype, + ) -> Tuple[torch.Tensor, torch.Tensor]: + assert conditioning_frames.shape[2] >= len(controlnet_frame_indices) + + batch_size, channels, _, height, width = conditioning_frames.shape + controlnet_cond = torch.zeros((batch_size, channels, num_frames, height, width), dtype=dtype, device=device) + controlnet_cond_mask = torch.zeros((batch_size, 1, num_frames, height, width), dtype=dtype, device=device) + controlnet_cond[:, :, controlnet_frame_indices] = conditioning_frames[:, :, : len(controlnet_frame_indices)] + controlnet_cond_mask[:, :, controlnet_frame_indices] = 1 + + return controlnet_cond, controlnet_cond_mask + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def clip_skip(self): + return self._clip_skip + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 + + @property + def cross_attention_kwargs(self): + return self._cross_attention_kwargs + + @property + def num_timesteps(self): + return self._num_timesteps + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Optional[Union[str, List[str]]] = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_frames: int = 16, + num_inference_steps: int = 50, + guidance_scale: float = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_videos_per_prompt: int = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None, + conditioning_frames: Optional[List[PipelineImageInput]] = None, + output_type: str = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + controlnet_conditioning_scale: Union[float, List[float]] = 1.0, + controlnet_frame_indices: List[int] = [0], + guess_mode: bool = False, + clip_skip: Optional[int] = None, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated video. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated video. + num_frames (`int`, *optional*, defaults to 16): + The number of video frames that are generated. Defaults to 16 frames which at 8 frames per seconds + amounts to 2 seconds of video. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality videos at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for video + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. Latents should be of shape + `(batch_size, num_channel, num_frames, height, width)`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + ip_adapter_image: (`PipelineImageInput`, *optional*): + Optional image input to work with IP Adapters. + ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*): + Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of + IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should + contain the negative image embedding if `do_classifier_free_guidance` is set to `True`. If not + provided, embeddings are computed from the `ip_adapter_image` input argument. + conditioning_frames (`List[PipelineImageInput]`, *optional*): + The SparseControlNet input to provide guidance to the `unet` for generation. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated video. Choose between `torch.Tensor`, `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.text_to_video_synthesis.TextToVideoSDPipelineOutput`] instead + of a plain tuple. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0): + The outputs of the ControlNet are multiplied by `controlnet_conditioning_scale` before they are added + to the residual in the original `unet`. If multiple ControlNets are specified in `init`, you can set + the corresponding scale as a list. + controlnet_frame_indices (`List[int]`): + The indices where the conditioning frames must be applied for generation. Multiple frames can be + provided to guide the model to generate similar structure outputs, where the `unet` can + "fill-in-the-gaps" for interpolation videos, or a single frame could be provided for general expected + structure. Must have the same length as `conditioning_frames`. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + + Examples: + + Returns: + [`~pipelines.animatediff.pipeline_output.AnimateDiffPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.animatediff.pipeline_output.AnimateDiffPipelineOutput`] is + returned, otherwise a `tuple` is returned where the first element is a list with the generated frames. + """ + controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet + + # 0. Default height and width to unet + height = height or self.unet.config.sample_size * self.vae_scale_factor + width = width or self.unet.config.sample_size * self.vae_scale_factor + num_videos_per_prompt = 1 + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt=prompt, + height=height, + width=width, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + ip_adapter_image=ip_adapter_image, + ip_adapter_image_embeds=ip_adapter_image_embeds, + callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, + image=conditioning_frames, + controlnet_conditioning_scale=controlnet_conditioning_scale, + ) + + self._guidance_scale = guidance_scale + self._clip_skip = clip_skip + self._cross_attention_kwargs = cross_attention_kwargs + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + global_pool_conditions = ( + controlnet.config.global_pool_conditions + if isinstance(controlnet, SparseControlNetModel) + else controlnet.nets[0].config.global_pool_conditions + ) + guess_mode = guess_mode or global_pool_conditions + + # 3. Encode input prompt + text_encoder_lora_scale = ( + self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None + ) + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + device, + num_videos_per_prompt, + self.do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=text_encoder_lora_scale, + clip_skip=self.clip_skip, + ) + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + if self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + prompt_embeds = prompt_embeds.repeat_interleave(repeats=num_frames, dim=0) + + # 4. Prepare IP-Adapter embeddings + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + image_embeds = self.prepare_ip_adapter_image_embeds( + ip_adapter_image, + ip_adapter_image_embeds, + device, + batch_size * num_videos_per_prompt, + self.do_classifier_free_guidance, + ) + + # 5. Prepare controlnet conditioning + conditioning_frames = self.prepare_image(conditioning_frames, width, height, device, controlnet.dtype) + controlnet_cond, controlnet_cond_mask = self.prepare_sparse_control_conditioning( + conditioning_frames, num_frames, controlnet_frame_indices, device, controlnet.dtype + ) + + # 6. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # 7. Prepare latent variables + num_channels_latents = self.unet.config.in_channels + latents = self.prepare_latents( + batch_size * num_videos_per_prompt, + num_channels_latents, + num_frames, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 8. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 9. Add image embeds for IP-Adapter + added_cond_kwargs = ( + {"image_embeds": image_embeds} + if ip_adapter_image is not None or ip_adapter_image_embeds is not None + else None + ) + + num_free_init_iters = self._free_init_num_iters if self.free_init_enabled else 1 + for free_init_iter in range(num_free_init_iters): + if self.free_init_enabled: + latents, timesteps = self._apply_free_init( + latents, free_init_iter, num_inference_steps, device, latents.dtype, generator + ) + + self._num_timesteps = len(timesteps) + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + + # 10. Denoising loop + with self.progress_bar(total=self._num_timesteps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + if guess_mode and self.do_classifier_free_guidance: + # Infer SparseControlNetModel only for the conditional batch. + control_model_input = latents + control_model_input = self.scheduler.scale_model_input(control_model_input, t) + controlnet_prompt_embeds = prompt_embeds.chunk(2)[1] + else: + control_model_input = latent_model_input + controlnet_prompt_embeds = prompt_embeds + + down_block_res_samples, mid_block_res_sample = self.controlnet( + control_model_input, + t, + encoder_hidden_states=controlnet_prompt_embeds, + controlnet_cond=controlnet_cond, + conditioning_mask=controlnet_cond_mask, + conditioning_scale=controlnet_conditioning_scale, + guess_mode=guess_mode, + return_dict=False, + ) + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs=cross_attention_kwargs, + added_cond_kwargs=added_cond_kwargs, + down_block_additional_residuals=down_block_res_samples, + mid_block_additional_residual=mid_block_res_sample, + ).sample + + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + + # 11. Post processing + if output_type == "latent": + video = latents + else: + video_tensor = self.decode_latents(latents) + video = self.video_processor.postprocess_video(video=video_tensor, output_type=output_type) + + # 12. Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (video,) + + return AnimateDiffPipelineOutput(frames=video) diff --git a/diffusers3/pipelines/animatediff/pipeline_animatediff_video2video.py b/diffusers3/pipelines/animatediff/pipeline_animatediff_video2video.py new file mode 100644 index 0000000000000000000000000000000000000000..1ebe2b9b60ddd84efd2fb78e3369d523a73f8d4f --- /dev/null +++ b/diffusers3/pipelines/animatediff/pipeline_animatediff_video2video.py @@ -0,0 +1,1059 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Any, Callable, Dict, List, Optional, Union + +import torch +from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection + +from ...image_processor import PipelineImageInput +from ...loaders import IPAdapterMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin +from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel, UNetMotionModel +from ...models.lora import adjust_lora_scale_text_encoder +from ...models.unets.unet_motion_model import MotionAdapter +from ...schedulers import ( + DDIMScheduler, + DPMSolverMultistepScheduler, + EulerAncestralDiscreteScheduler, + EulerDiscreteScheduler, + LMSDiscreteScheduler, + PNDMScheduler, +) +from ...utils import USE_PEFT_BACKEND, logging, scale_lora_layers, unscale_lora_layers +from ...utils.torch_utils import randn_tensor +from ...video_processor import VideoProcessor +from ..free_init_utils import FreeInitMixin +from ..free_noise_utils import AnimateDiffFreeNoiseMixin +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from .pipeline_output import AnimateDiffPipelineOutput + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import imageio + >>> import requests + >>> import torch + >>> from diffusers import AnimateDiffVideoToVideoPipeline, DDIMScheduler, MotionAdapter + >>> from diffusers.utils import export_to_gif + >>> from io import BytesIO + >>> from PIL import Image + + >>> adapter = MotionAdapter.from_pretrained( + ... "guoyww/animatediff-motion-adapter-v1-5-2", torch_dtype=torch.float16 + ... ) + >>> pipe = AnimateDiffVideoToVideoPipeline.from_pretrained( + ... "SG161222/Realistic_Vision_V5.1_noVAE", motion_adapter=adapter + ... ).to("cuda") + >>> pipe.scheduler = DDIMScheduler( + ... beta_schedule="linear", steps_offset=1, clip_sample=False, timespace_spacing="linspace" + ... ) + + + >>> def load_video(file_path: str): + ... images = [] + + ... if file_path.startswith(("http://", "https://")): + ... # If the file_path is a URL + ... response = requests.get(file_path) + ... response.raise_for_status() + ... content = BytesIO(response.content) + ... vid = imageio.get_reader(content) + ... else: + ... # Assuming it's a local file path + ... vid = imageio.get_reader(file_path) + + ... for frame in vid: + ... pil_image = Image.fromarray(frame) + ... images.append(pil_image) + + ... return images + + + >>> video = load_video( + ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/animatediff-vid2vid-input-1.gif" + ... ) + >>> output = pipe( + ... video=video, prompt="panda playing a guitar, on a boat, in the ocean, high quality", strength=0.5 + ... ) + >>> frames = output.frames[0] + >>> export_to_gif(frames, "animation.gif") + ``` +""" + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents +def retrieve_latents( + encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" +): + if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": + return encoder_output.latent_dist.sample(generator) + elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": + return encoder_output.latent_dist.mode() + elif hasattr(encoder_output, "latents"): + return encoder_output.latents + else: + raise AttributeError("Could not access latents of provided encoder_output") + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + sigmas: Optional[List[float]] = None, + **kwargs, +): + """ + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles + custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` + must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, + `num_inference_steps` and `sigmas` must be `None`. + sigmas (`List[float]`, *optional*): + Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, + `num_inference_steps` and `timesteps` must be `None`. + + Returns: + `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the + second element is the number of inference steps. + """ + if timesteps is not None and sigmas is not None: + raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") + if timesteps is not None: + accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + elif sigmas is not None: + accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accept_sigmas: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" sigmas schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + + +class AnimateDiffVideoToVideoPipeline( + DiffusionPipeline, + StableDiffusionMixin, + TextualInversionLoaderMixin, + IPAdapterMixin, + StableDiffusionLoraLoaderMixin, + FreeInitMixin, + AnimateDiffFreeNoiseMixin, +): + r""" + Pipeline for video-to-video generation. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for saving LoRA weights + - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer (`CLIPTokenizer`): + A [`~transformers.CLIPTokenizer`] to tokenize text. + unet ([`UNet2DConditionModel`]): + A [`UNet2DConditionModel`] used to create a UNetMotionModel to denoise the encoded video latents. + motion_adapter ([`MotionAdapter`]): + A [`MotionAdapter`] to be used in combination with `unet` to denoise the encoded video latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + """ + + model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae" + _optional_components = ["feature_extractor", "image_encoder", "motion_adapter"] + _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: UNet2DConditionModel, + motion_adapter: MotionAdapter, + scheduler: Union[ + DDIMScheduler, + PNDMScheduler, + LMSDiscreteScheduler, + EulerDiscreteScheduler, + EulerAncestralDiscreteScheduler, + DPMSolverMultistepScheduler, + ], + feature_extractor: CLIPImageProcessor = None, + image_encoder: CLIPVisionModelWithProjection = None, + ): + super().__init__() + if isinstance(unet, UNet2DConditionModel): + unet = UNetMotionModel.from_unet2d(unet, motion_adapter) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + motion_adapter=motion_adapter, + scheduler=scheduler, + feature_extractor=feature_extractor, + image_encoder=image_encoder, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.video_processor = VideoProcessor(vae_scale_factor=self.vae_scale_factor) + + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if prompt is not None and isinstance(prompt, (str, dict)): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if self.text_encoder is not None: + if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return prompt_embeds, negative_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image + def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + if output_hidden_states: + image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] + image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_enc_hidden_states = self.image_encoder( + torch.zeros_like(image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( + num_images_per_prompt, dim=0 + ) + return image_enc_hidden_states, uncond_image_enc_hidden_states + else: + image_embeds = self.image_encoder(image).image_embeds + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_embeds = torch.zeros_like(image_embeds) + + return image_embeds, uncond_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds + def prepare_ip_adapter_image_embeds( + self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance + ): + image_embeds = [] + if do_classifier_free_guidance: + negative_image_embeds = [] + if ip_adapter_image_embeds is None: + if not isinstance(ip_adapter_image, list): + ip_adapter_image = [ip_adapter_image] + + if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): + raise ValueError( + f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." + ) + + for single_ip_adapter_image, image_proj_layer in zip( + ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers + ): + output_hidden_state = not isinstance(image_proj_layer, ImageProjection) + single_image_embeds, single_negative_image_embeds = self.encode_image( + single_ip_adapter_image, device, 1, output_hidden_state + ) + + image_embeds.append(single_image_embeds[None, :]) + if do_classifier_free_guidance: + negative_image_embeds.append(single_negative_image_embeds[None, :]) + else: + for single_image_embeds in ip_adapter_image_embeds: + if do_classifier_free_guidance: + single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2) + negative_image_embeds.append(single_negative_image_embeds) + image_embeds.append(single_image_embeds) + + ip_adapter_image_embeds = [] + for i, single_image_embeds in enumerate(image_embeds): + single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) + if do_classifier_free_guidance: + single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) + + single_image_embeds = single_image_embeds.to(device=device) + ip_adapter_image_embeds.append(single_image_embeds) + + return ip_adapter_image_embeds + + def encode_video(self, video, generator, decode_chunk_size: int = 16) -> torch.Tensor: + latents = [] + for i in range(0, len(video), decode_chunk_size): + batch_video = video[i : i + decode_chunk_size] + batch_video = retrieve_latents(self.vae.encode(batch_video), generator=generator) + latents.append(batch_video) + return torch.cat(latents) + + # Copied from diffusers.pipelines.animatediff.pipeline_animatediff.AnimateDiffPipeline.decode_latents + def decode_latents(self, latents, decode_chunk_size: int = 16): + latents = 1 / self.vae.config.scaling_factor * latents + + batch_size, channels, num_frames, height, width = latents.shape + latents = latents.permute(0, 2, 1, 3, 4).reshape(batch_size * num_frames, channels, height, width) + + video = [] + for i in range(0, latents.shape[0], decode_chunk_size): + batch_latents = latents[i : i + decode_chunk_size] + batch_latents = self.vae.decode(batch_latents).sample + video.append(batch_latents) + + video = torch.cat(video) + video = video[None, :].reshape((batch_size, num_frames, -1) + video.shape[2:]).permute(0, 2, 1, 3, 4) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + video = video.float() + return video + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + strength, + height, + width, + video=None, + latents=None, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + ip_adapter_image=None, + ip_adapter_image_embeds=None, + callback_on_step_end_tensor_inputs=None, + ): + if strength < 0 or strength > 1: + raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}") + + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and not isinstance(prompt, (str, list, dict)): + raise ValueError(f"`prompt` has to be of type `str`, `list` or `dict` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if video is not None and latents is not None: + raise ValueError("Only one of `video` or `latents` should be provided") + + if ip_adapter_image is not None and ip_adapter_image_embeds is not None: + raise ValueError( + "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined." + ) + + if ip_adapter_image_embeds is not None: + if not isinstance(ip_adapter_image_embeds, list): + raise ValueError( + f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}" + ) + elif ip_adapter_image_embeds[0].ndim not in [3, 4]: + raise ValueError( + f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D" + ) + + def get_timesteps(self, num_inference_steps, timesteps, strength, device): + # get the original timestep using init_timestep + init_timestep = min(int(num_inference_steps * strength), num_inference_steps) + + t_start = max(num_inference_steps - init_timestep, 0) + timesteps = timesteps[t_start * self.scheduler.order :] + + return timesteps, num_inference_steps - t_start + + def prepare_latents( + self, + video: Optional[torch.Tensor] = None, + height: int = 64, + width: int = 64, + num_channels_latents: int = 4, + batch_size: int = 1, + timestep: Optional[int] = None, + dtype: Optional[torch.dtype] = None, + device: Optional[torch.device] = None, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + decode_chunk_size: int = 16, + add_noise: bool = False, + ) -> torch.Tensor: + num_frames = video.shape[1] if latents is None else latents.shape[2] + shape = ( + batch_size, + num_channels_latents, + num_frames, + height // self.vae_scale_factor, + width // self.vae_scale_factor, + ) + + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + # make sure the VAE is in float32 mode, as it overflows in float16 + if self.vae.config.force_upcast: + video = video.float() + self.vae.to(dtype=torch.float32) + + if isinstance(generator, list): + if len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + init_latents = [ + self.encode_video(video[i], generator[i], decode_chunk_size).unsqueeze(0) + for i in range(batch_size) + ] + else: + init_latents = [self.encode_video(vid, generator, decode_chunk_size).unsqueeze(0) for vid in video] + + init_latents = torch.cat(init_latents, dim=0) + + # restore vae to original dtype + if self.vae.config.force_upcast: + self.vae.to(dtype) + + init_latents = init_latents.to(dtype) + init_latents = self.vae.config.scaling_factor * init_latents + + if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0: + # expand init_latents for batch_size + error_message = ( + f"You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial" + " images (`image`). Please make sure to update your script to pass as many initial images as text prompts" + ) + raise ValueError(error_message) + elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0: + raise ValueError( + f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts." + ) + else: + init_latents = torch.cat([init_latents], dim=0) + + noise = randn_tensor(init_latents.shape, generator=generator, device=device, dtype=dtype) + latents = self.scheduler.add_noise(init_latents, noise, timestep).permute(0, 2, 1, 3, 4) + else: + if shape != latents.shape: + # [B, C, F, H, W] + raise ValueError(f"`latents` expected to have {shape=}, but found {latents.shape=}") + + latents = latents.to(device, dtype=dtype) + + if add_noise: + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + latents = self.scheduler.add_noise(latents, noise, timestep) + + return latents + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def clip_skip(self): + return self._clip_skip + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 + + @property + def cross_attention_kwargs(self): + return self._cross_attention_kwargs + + @property + def num_timesteps(self): + return self._num_timesteps + + @property + def interrupt(self): + return self._interrupt + + @torch.no_grad() + def __call__( + self, + video: List[List[PipelineImageInput]] = None, + prompt: Optional[Union[str, List[str]]] = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + enforce_inference_steps: bool = False, + timesteps: Optional[List[int]] = None, + sigmas: Optional[List[float]] = None, + guidance_scale: float = 7.5, + strength: float = 0.8, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_videos_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + clip_skip: Optional[int] = None, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + decode_chunk_size: int = 16, + ): + r""" + The call function to the pipeline for generation. + + Args: + video (`List[PipelineImageInput]`): + The input video to condition the generation on. Must be a list of images/frames of the video. + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated video. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated video. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality videos at the + expense of slower inference. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument + in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is + passed will be used. Must be in descending order. + sigmas (`List[float]`, *optional*): + Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in + their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed + will be used. + strength (`float`, *optional*, defaults to 0.8): + Higher strength leads to more differences between original video and generated video. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for video + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. Latents should be of shape + `(batch_size, num_channel, num_frames, height, width)`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + ip_adapter_image: (`PipelineImageInput`, *optional*): + Optional image input to work with IP Adapters. + ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*): + Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of + IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should + contain the negative image embedding if `do_classifier_free_guidance` is set to `True`. If not + provided, embeddings are computed from the `ip_adapter_image` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated video. Choose between `torch.Tensor`, `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`AnimateDiffPipelineOutput`] instead of a plain tuple. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + decode_chunk_size (`int`, defaults to `16`): + The number of frames to decode at a time when calling `decode_latents` method. + + Examples: + + Returns: + [`pipelines.animatediff.pipeline_output.AnimateDiffPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`pipelines.animatediff.pipeline_output.AnimateDiffPipelineOutput`] is + returned, otherwise a `tuple` is returned where the first element is a list with the generated frames. + """ + + # 0. Default height and width to unet + height = height or self.unet.config.sample_size * self.vae_scale_factor + width = width or self.unet.config.sample_size * self.vae_scale_factor + + num_videos_per_prompt = 1 + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt=prompt, + strength=strength, + height=height, + width=width, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + video=video, + latents=latents, + ip_adapter_image=ip_adapter_image, + ip_adapter_image_embeds=ip_adapter_image_embeds, + callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, + ) + + self._guidance_scale = guidance_scale + self._clip_skip = clip_skip + self._cross_attention_kwargs = cross_attention_kwargs + self._interrupt = False + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, (str, dict)): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + dtype = self.dtype + + # 3. Prepare timesteps + if not enforce_inference_steps: + timesteps, num_inference_steps = retrieve_timesteps( + self.scheduler, num_inference_steps, device, timesteps, sigmas + ) + timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, timesteps, strength, device) + latent_timestep = timesteps[:1].repeat(batch_size * num_videos_per_prompt) + else: + denoising_inference_steps = int(num_inference_steps / strength) + timesteps, denoising_inference_steps = retrieve_timesteps( + self.scheduler, denoising_inference_steps, device, timesteps, sigmas + ) + timesteps = timesteps[-num_inference_steps:] + latent_timestep = timesteps[:1].repeat(batch_size * num_videos_per_prompt) + + # 4. Prepare latent variables + if latents is None: + video = self.video_processor.preprocess_video(video, height=height, width=width) + # Move the number of frames before the number of channels. + video = video.permute(0, 2, 1, 3, 4) + video = video.to(device=device, dtype=dtype) + num_channels_latents = self.unet.config.in_channels + latents = self.prepare_latents( + video=video, + height=height, + width=width, + num_channels_latents=num_channels_latents, + batch_size=batch_size * num_videos_per_prompt, + timestep=latent_timestep, + dtype=dtype, + device=device, + generator=generator, + latents=latents, + decode_chunk_size=decode_chunk_size, + add_noise=enforce_inference_steps, + ) + + # 5. Encode input prompt + text_encoder_lora_scale = ( + self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None + ) + num_frames = latents.shape[2] + if self.free_noise_enabled: + prompt_embeds, negative_prompt_embeds = self._encode_prompt_free_noise( + prompt=prompt, + num_frames=num_frames, + device=device, + num_videos_per_prompt=num_videos_per_prompt, + do_classifier_free_guidance=self.do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=text_encoder_lora_scale, + clip_skip=self.clip_skip, + ) + else: + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + device, + num_videos_per_prompt, + self.do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=text_encoder_lora_scale, + clip_skip=self.clip_skip, + ) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + if self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + prompt_embeds = prompt_embeds.repeat_interleave(repeats=num_frames, dim=0) + + # 6. Prepare IP-Adapter embeddings + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + image_embeds = self.prepare_ip_adapter_image_embeds( + ip_adapter_image, + ip_adapter_image_embeds, + device, + batch_size * num_videos_per_prompt, + self.do_classifier_free_guidance, + ) + + # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 8. Add image embeds for IP-Adapter + added_cond_kwargs = ( + {"image_embeds": image_embeds} + if ip_adapter_image is not None or ip_adapter_image_embeds is not None + else None + ) + + num_free_init_iters = self._free_init_num_iters if self.free_init_enabled else 1 + for free_init_iter in range(num_free_init_iters): + if self.free_init_enabled: + latents, timesteps = self._apply_free_init( + latents, free_init_iter, num_inference_steps, device, latents.dtype, generator + ) + num_inference_steps = len(timesteps) + # make sure to readjust timesteps based on strength + timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, timesteps, strength, device) + + self._num_timesteps = len(timesteps) + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + + # 9. Denoising loop + with self.progress_bar(total=self._num_timesteps) as progress_bar: + for i, t in enumerate(timesteps): + if self.interrupt: + continue + + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs=self.cross_attention_kwargs, + added_cond_kwargs=added_cond_kwargs, + ).sample + + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + + # 10. Post-processing + if output_type == "latent": + video = latents + else: + video_tensor = self.decode_latents(latents, decode_chunk_size) + video = self.video_processor.postprocess_video(video=video_tensor, output_type=output_type) + + # 11. Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (video,) + + return AnimateDiffPipelineOutput(frames=video) diff --git a/diffusers3/pipelines/animatediff/pipeline_animatediff_video2video_controlnet.py b/diffusers3/pipelines/animatediff/pipeline_animatediff_video2video_controlnet.py new file mode 100644 index 0000000000000000000000000000000000000000..1d26f95a2f586aba68f55f7b9eadf6cca58eac71 --- /dev/null +++ b/diffusers3/pipelines/animatediff/pipeline_animatediff_video2video_controlnet.py @@ -0,0 +1,1341 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import torch +import torch.nn.functional as F +from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection + +from ...image_processor import PipelineImageInput +from ...loaders import IPAdapterMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin +from ...models import AutoencoderKL, ControlNetModel, ImageProjection, UNet2DConditionModel, UNetMotionModel +from ...models.lora import adjust_lora_scale_text_encoder +from ...models.unets.unet_motion_model import MotionAdapter +from ...schedulers import ( + DDIMScheduler, + DPMSolverMultistepScheduler, + EulerAncestralDiscreteScheduler, + EulerDiscreteScheduler, + LMSDiscreteScheduler, + PNDMScheduler, +) +from ...utils import USE_PEFT_BACKEND, logging, scale_lora_layers, unscale_lora_layers +from ...utils.torch_utils import is_compiled_module, randn_tensor +from ...video_processor import VideoProcessor +from ..controlnet.multicontrolnet import MultiControlNetModel +from ..free_init_utils import FreeInitMixin +from ..free_noise_utils import AnimateDiffFreeNoiseMixin +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from .pipeline_output import AnimateDiffPipelineOutput + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from PIL import Image + >>> from tqdm.auto import tqdm + + >>> from diffusers import AnimateDiffVideoToVideoControlNetPipeline + >>> from diffusers.utils import export_to_gif, load_video + >>> from diffusers import AutoencoderKL, ControlNetModel, MotionAdapter, LCMScheduler + + >>> controlnet = ControlNetModel.from_pretrained( + ... "lllyasviel/sd-controlnet-openpose", torch_dtype=torch.float16 + ... ) + >>> motion_adapter = MotionAdapter.from_pretrained("wangfuyun/AnimateLCM") + >>> vae = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse", torch_dtype=torch.float16) + + >>> pipe = AnimateDiffVideoToVideoControlNetPipeline.from_pretrained( + ... "SG161222/Realistic_Vision_V5.1_noVAE", + ... motion_adapter=motion_adapter, + ... controlnet=controlnet, + ... vae=vae, + ... ).to(device="cuda", dtype=torch.float16) + + >>> pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config, beta_schedule="linear") + >>> pipe.load_lora_weights( + ... "wangfuyun/AnimateLCM", weight_name="AnimateLCM_sd15_t2v_lora.safetensors", adapter_name="lcm-lora" + ... ) + >>> pipe.set_adapters(["lcm-lora"], [0.8]) + + >>> video = load_video( + ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/dance.gif" + ... ) + >>> video = [frame.convert("RGB") for frame in video] + + >>> from controlnet_aux.processor import OpenposeDetector + + >>> open_pose = OpenposeDetector.from_pretrained("lllyasviel/Annotators").to("cuda") + >>> for frame in tqdm(video): + ... conditioning_frames.append(open_pose(frame)) + + >>> prompt = "astronaut in space, dancing" + >>> negative_prompt = "bad quality, worst quality, jpeg artifacts, ugly" + + >>> strength = 0.8 + >>> with torch.inference_mode(): + ... video = pipe( + ... video=video, + ... prompt=prompt, + ... negative_prompt=negative_prompt, + ... num_inference_steps=10, + ... guidance_scale=2.0, + ... controlnet_conditioning_scale=0.75, + ... conditioning_frames=conditioning_frames, + ... strength=strength, + ... generator=torch.Generator().manual_seed(42), + ... ).frames[0] + + >>> video = [frame.resize(conditioning_frames[0].size) for frame in video] + >>> export_to_gif(video, f"animatediff_vid2vid_controlnet.gif", fps=8) + ``` +""" + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents +def retrieve_latents( + encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" +): + if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": + return encoder_output.latent_dist.sample(generator) + elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": + return encoder_output.latent_dist.mode() + elif hasattr(encoder_output, "latents"): + return encoder_output.latents + else: + raise AttributeError("Could not access latents of provided encoder_output") + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + sigmas: Optional[List[float]] = None, + **kwargs, +): + """ + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles + custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` + must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, + `num_inference_steps` and `sigmas` must be `None`. + sigmas (`List[float]`, *optional*): + Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, + `num_inference_steps` and `timesteps` must be `None`. + + Returns: + `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the + second element is the number of inference steps. + """ + if timesteps is not None and sigmas is not None: + raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") + if timesteps is not None: + accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + elif sigmas is not None: + accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accept_sigmas: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" sigmas schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + + +class AnimateDiffVideoToVideoControlNetPipeline( + DiffusionPipeline, + StableDiffusionMixin, + TextualInversionLoaderMixin, + IPAdapterMixin, + StableDiffusionLoraLoaderMixin, + FreeInitMixin, + AnimateDiffFreeNoiseMixin, +): + r""" + Pipeline for video-to-video generation with ControlNet guidance. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for saving LoRA weights + - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer (`CLIPTokenizer`): + A [`~transformers.CLIPTokenizer`] to tokenize text. + unet ([`UNet2DConditionModel`]): + A [`UNet2DConditionModel`] used to create a UNetMotionModel to denoise the encoded video latents. + motion_adapter ([`MotionAdapter`]): + A [`MotionAdapter`] to be used in combination with `unet` to denoise the encoded video latents. + controlnet ([`ControlNetModel`] or `List[ControlNetModel]` or `Tuple[ControlNetModel]` or `MultiControlNetModel`): + Provides additional conditioning to the `unet` during the denoising process. If you set multiple + ControlNets as a list, the outputs from each ControlNet are added together to create one combined + additional conditioning. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + """ + + model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae" + _optional_components = ["feature_extractor", "image_encoder", "motion_adapter"] + _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: UNet2DConditionModel, + motion_adapter: MotionAdapter, + controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel], + scheduler: Union[ + DDIMScheduler, + PNDMScheduler, + LMSDiscreteScheduler, + EulerDiscreteScheduler, + EulerAncestralDiscreteScheduler, + DPMSolverMultistepScheduler, + ], + feature_extractor: CLIPImageProcessor = None, + image_encoder: CLIPVisionModelWithProjection = None, + ): + super().__init__() + if isinstance(unet, UNet2DConditionModel): + unet = UNetMotionModel.from_unet2d(unet, motion_adapter) + + if isinstance(controlnet, (list, tuple)): + controlnet = MultiControlNetModel(controlnet) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + motion_adapter=motion_adapter, + controlnet=controlnet, + scheduler=scheduler, + feature_extractor=feature_extractor, + image_encoder=image_encoder, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.video_processor = VideoProcessor(vae_scale_factor=self.vae_scale_factor) + self.control_video_processor = VideoProcessor( + vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False + ) + + # Copied from diffusers.pipelines.animatediff.pipeline_animatediff_video2video.AnimateDiffVideoToVideoPipeline.encode_prompt + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if prompt is not None and isinstance(prompt, (str, dict)): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if self.text_encoder is not None: + if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return prompt_embeds, negative_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image + def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + if output_hidden_states: + image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] + image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_enc_hidden_states = self.image_encoder( + torch.zeros_like(image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( + num_images_per_prompt, dim=0 + ) + return image_enc_hidden_states, uncond_image_enc_hidden_states + else: + image_embeds = self.image_encoder(image).image_embeds + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_embeds = torch.zeros_like(image_embeds) + + return image_embeds, uncond_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds + def prepare_ip_adapter_image_embeds( + self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance + ): + image_embeds = [] + if do_classifier_free_guidance: + negative_image_embeds = [] + if ip_adapter_image_embeds is None: + if not isinstance(ip_adapter_image, list): + ip_adapter_image = [ip_adapter_image] + + if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): + raise ValueError( + f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." + ) + + for single_ip_adapter_image, image_proj_layer in zip( + ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers + ): + output_hidden_state = not isinstance(image_proj_layer, ImageProjection) + single_image_embeds, single_negative_image_embeds = self.encode_image( + single_ip_adapter_image, device, 1, output_hidden_state + ) + + image_embeds.append(single_image_embeds[None, :]) + if do_classifier_free_guidance: + negative_image_embeds.append(single_negative_image_embeds[None, :]) + else: + for single_image_embeds in ip_adapter_image_embeds: + if do_classifier_free_guidance: + single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2) + negative_image_embeds.append(single_negative_image_embeds) + image_embeds.append(single_image_embeds) + + ip_adapter_image_embeds = [] + for i, single_image_embeds in enumerate(image_embeds): + single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) + if do_classifier_free_guidance: + single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) + + single_image_embeds = single_image_embeds.to(device=device) + ip_adapter_image_embeds.append(single_image_embeds) + + return ip_adapter_image_embeds + + # Copied from diffusers.pipelines.animatediff.pipeline_animatediff_video2video.AnimateDiffVideoToVideoPipeline.encode_video + def encode_video(self, video, generator, decode_chunk_size: int = 16) -> torch.Tensor: + latents = [] + for i in range(0, len(video), decode_chunk_size): + batch_video = video[i : i + decode_chunk_size] + batch_video = retrieve_latents(self.vae.encode(batch_video), generator=generator) + latents.append(batch_video) + return torch.cat(latents) + + # Copied from diffusers.pipelines.animatediff.pipeline_animatediff.AnimateDiffPipeline.decode_latents + def decode_latents(self, latents, decode_chunk_size: int = 16): + latents = 1 / self.vae.config.scaling_factor * latents + + batch_size, channels, num_frames, height, width = latents.shape + latents = latents.permute(0, 2, 1, 3, 4).reshape(batch_size * num_frames, channels, height, width) + + video = [] + for i in range(0, latents.shape[0], decode_chunk_size): + batch_latents = latents[i : i + decode_chunk_size] + batch_latents = self.vae.decode(batch_latents).sample + video.append(batch_latents) + + video = torch.cat(video) + video = video[None, :].reshape((batch_size, num_frames, -1) + video.shape[2:]).permute(0, 2, 1, 3, 4) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + video = video.float() + return video + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + strength, + height, + width, + video=None, + conditioning_frames=None, + latents=None, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + ip_adapter_image=None, + ip_adapter_image_embeds=None, + callback_on_step_end_tensor_inputs=None, + controlnet_conditioning_scale=1.0, + control_guidance_start=0.0, + control_guidance_end=1.0, + ): + if strength < 0 or strength > 1: + raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}") + + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and not isinstance(prompt, (str, list, dict)): + raise ValueError(f"`prompt` has to be of type `str`, `list` or `dict` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if video is not None and latents is not None: + raise ValueError("Only one of `video` or `latents` should be provided") + + if ip_adapter_image is not None and ip_adapter_image_embeds is not None: + raise ValueError( + "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined." + ) + + if ip_adapter_image_embeds is not None: + if not isinstance(ip_adapter_image_embeds, list): + raise ValueError( + f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}" + ) + elif ip_adapter_image_embeds[0].ndim not in [3, 4]: + raise ValueError( + f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D" + ) + + if isinstance(self.controlnet, MultiControlNetModel): + if isinstance(prompt, list): + logger.warning( + f"You have {len(self.controlnet.nets)} ControlNets and you have passed {len(prompt)}" + " prompts. The conditionings will be fixed across the prompts." + ) + is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance( + self.controlnet, torch._dynamo.eval_frame.OptimizedModule + ) + + num_frames = len(video) if latents is None else latents.shape[2] + + if ( + isinstance(self.controlnet, ControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, ControlNetModel) + ): + if not isinstance(conditioning_frames, list): + raise TypeError( + f"For single controlnet, `image` must be of type `list` but got {type(conditioning_frames)}" + ) + if len(conditioning_frames) != num_frames: + raise ValueError(f"Excepted image to have length {num_frames} but got {len(conditioning_frames)=}") + elif ( + isinstance(self.controlnet, MultiControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, MultiControlNetModel) + ): + if not isinstance(conditioning_frames, list) or not isinstance(conditioning_frames[0], list): + raise TypeError( + f"For multiple controlnets: `image` must be type list of lists but got {type(conditioning_frames)=}" + ) + if len(conditioning_frames[0]) != num_frames: + raise ValueError( + f"Expected length of image sublist as {num_frames} but got {len(conditioning_frames)=}" + ) + if any(len(img) != len(conditioning_frames[0]) for img in conditioning_frames): + raise ValueError("All conditioning frame batches for multicontrolnet must be same size") + else: + assert False + + # Check `controlnet_conditioning_scale` + if ( + isinstance(self.controlnet, ControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, ControlNetModel) + ): + if not isinstance(controlnet_conditioning_scale, float): + raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.") + elif ( + isinstance(self.controlnet, MultiControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, MultiControlNetModel) + ): + if isinstance(controlnet_conditioning_scale, list): + if any(isinstance(i, list) for i in controlnet_conditioning_scale): + raise ValueError("A single batch of multiple conditionings are supported at the moment.") + elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len( + self.controlnet.nets + ): + raise ValueError( + "For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have" + " the same length as the number of controlnets" + ) + else: + assert False + + if not isinstance(control_guidance_start, (tuple, list)): + control_guidance_start = [control_guidance_start] + + if not isinstance(control_guidance_end, (tuple, list)): + control_guidance_end = [control_guidance_end] + + if len(control_guidance_start) != len(control_guidance_end): + raise ValueError( + f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list." + ) + + if isinstance(self.controlnet, MultiControlNetModel): + if len(control_guidance_start) != len(self.controlnet.nets): + raise ValueError( + f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}." + ) + + for start, end in zip(control_guidance_start, control_guidance_end): + if start >= end: + raise ValueError( + f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}." + ) + if start < 0.0: + raise ValueError(f"control guidance start: {start} can't be smaller than 0.") + if end > 1.0: + raise ValueError(f"control guidance end: {end} can't be larger than 1.0.") + + # Copied from diffusers.pipelines.animatediff.pipeline_animatediff_video2video.AnimateDiffVideoToVideoPipeline.get_timesteps + def get_timesteps(self, num_inference_steps, timesteps, strength, device): + # get the original timestep using init_timestep + init_timestep = min(int(num_inference_steps * strength), num_inference_steps) + + t_start = max(num_inference_steps - init_timestep, 0) + timesteps = timesteps[t_start * self.scheduler.order :] + + return timesteps, num_inference_steps - t_start + + # Copied from diffusers.pipelines.animatediff.pipeline_animatediff_video2video.AnimateDiffVideoToVideoPipeline.prepare_latents + def prepare_latents( + self, + video: Optional[torch.Tensor] = None, + height: int = 64, + width: int = 64, + num_channels_latents: int = 4, + batch_size: int = 1, + timestep: Optional[int] = None, + dtype: Optional[torch.dtype] = None, + device: Optional[torch.device] = None, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + decode_chunk_size: int = 16, + add_noise: bool = False, + ) -> torch.Tensor: + num_frames = video.shape[1] if latents is None else latents.shape[2] + shape = ( + batch_size, + num_channels_latents, + num_frames, + height // self.vae_scale_factor, + width // self.vae_scale_factor, + ) + + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + # make sure the VAE is in float32 mode, as it overflows in float16 + if self.vae.config.force_upcast: + video = video.float() + self.vae.to(dtype=torch.float32) + + if isinstance(generator, list): + if len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + init_latents = [ + self.encode_video(video[i], generator[i], decode_chunk_size).unsqueeze(0) + for i in range(batch_size) + ] + else: + init_latents = [self.encode_video(vid, generator, decode_chunk_size).unsqueeze(0) for vid in video] + + init_latents = torch.cat(init_latents, dim=0) + + # restore vae to original dtype + if self.vae.config.force_upcast: + self.vae.to(dtype) + + init_latents = init_latents.to(dtype) + init_latents = self.vae.config.scaling_factor * init_latents + + if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0: + # expand init_latents for batch_size + error_message = ( + f"You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial" + " images (`image`). Please make sure to update your script to pass as many initial images as text prompts" + ) + raise ValueError(error_message) + elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0: + raise ValueError( + f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts." + ) + else: + init_latents = torch.cat([init_latents], dim=0) + + noise = randn_tensor(init_latents.shape, generator=generator, device=device, dtype=dtype) + latents = self.scheduler.add_noise(init_latents, noise, timestep).permute(0, 2, 1, 3, 4) + else: + if shape != latents.shape: + # [B, C, F, H, W] + raise ValueError(f"`latents` expected to have {shape=}, but found {latents.shape=}") + + latents = latents.to(device, dtype=dtype) + + if add_noise: + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + latents = self.scheduler.add_noise(latents, noise, timestep) + + return latents + + # Copied from diffusers.pipelines.animatediff.pipeline_animatediff_controlnet.AnimateDiffControlNetPipeline.prepare_video + def prepare_conditioning_frames( + self, + video, + width, + height, + batch_size, + num_videos_per_prompt, + device, + dtype, + do_classifier_free_guidance=False, + guess_mode=False, + ): + video = self.control_video_processor.preprocess_video(video, height=height, width=width).to( + dtype=torch.float32 + ) + video = video.permute(0, 2, 1, 3, 4).flatten(0, 1) + video_batch_size = video.shape[0] + + if video_batch_size == 1: + repeat_by = batch_size + else: + # image batch size is the same as prompt batch size + repeat_by = num_videos_per_prompt + + video = video.repeat_interleave(repeat_by, dim=0) + video = video.to(device=device, dtype=dtype) + + if do_classifier_free_guidance and not guess_mode: + video = torch.cat([video] * 2) + + return video + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def clip_skip(self): + return self._clip_skip + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 + + @property + def cross_attention_kwargs(self): + return self._cross_attention_kwargs + + @property + def num_timesteps(self): + return self._num_timesteps + + @property + def interrupt(self): + return self._interrupt + + @torch.no_grad() + def __call__( + self, + video: List[List[PipelineImageInput]] = None, + prompt: Optional[Union[str, List[str]]] = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + enforce_inference_steps: bool = False, + timesteps: Optional[List[int]] = None, + sigmas: Optional[List[float]] = None, + guidance_scale: float = 7.5, + strength: float = 0.8, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_videos_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None, + conditioning_frames: Optional[List[PipelineImageInput]] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + controlnet_conditioning_scale: Union[float, List[float]] = 1.0, + guess_mode: bool = False, + control_guidance_start: Union[float, List[float]] = 0.0, + control_guidance_end: Union[float, List[float]] = 1.0, + clip_skip: Optional[int] = None, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + decode_chunk_size: int = 16, + ): + r""" + The call function to the pipeline for generation. + + Args: + video (`List[PipelineImageInput]`): + The input video to condition the generation on. Must be a list of images/frames of the video. + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated video. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated video. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality videos at the + expense of slower inference. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument + in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is + passed will be used. Must be in descending order. + sigmas (`List[float]`, *optional*): + Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in + their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed + will be used. + strength (`float`, *optional*, defaults to 0.8): + Higher strength leads to more differences between original video and generated video. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for video + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. Latents should be of shape + `(batch_size, num_channel, num_frames, height, width)`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + ip_adapter_image: (`PipelineImageInput`, *optional*): + Optional image input to work with IP Adapters. + ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*): + Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of + IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should + contain the negative image embedding if `do_classifier_free_guidance` is set to `True`. If not + provided, embeddings are computed from the `ip_adapter_image` input argument. + conditioning_frames (`List[PipelineImageInput]`, *optional*): + The ControlNet input condition to provide guidance to the `unet` for generation. If multiple + ControlNets are specified, images must be passed as a list such that each element of the list can be + correctly batched for input to a single ControlNet. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated video. Choose between `torch.Tensor`, `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`AnimateDiffPipelineOutput`] instead of a plain tuple. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0): + The outputs of the ControlNet are multiplied by `controlnet_conditioning_scale` before they are added + to the residual in the original `unet`. If multiple ControlNets are specified in `init`, you can set + the corresponding scale as a list. + guess_mode (`bool`, *optional*, defaults to `False`): + The ControlNet encoder tries to recognize the content of the input image even if you remove all + prompts. A `guidance_scale` value between 3.0 and 5.0 is recommended. + control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0): + The percentage of total steps at which the ControlNet starts applying. + control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0): + The percentage of total steps at which the ControlNet stops applying. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + decode_chunk_size (`int`, defaults to `16`): + The number of frames to decode at a time when calling `decode_latents` method. + + Examples: + + Returns: + [`pipelines.animatediff.pipeline_output.AnimateDiffPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`pipelines.animatediff.pipeline_output.AnimateDiffPipelineOutput`] is + returned, otherwise a `tuple` is returned where the first element is a list with the generated frames. + """ + + controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet + + # align format for control guidance + if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list): + control_guidance_start = len(control_guidance_end) * [control_guidance_start] + elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list): + control_guidance_end = len(control_guidance_start) * [control_guidance_end] + elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list): + mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1 + control_guidance_start, control_guidance_end = ( + mult * [control_guidance_start], + mult * [control_guidance_end], + ) + + # 0. Default height and width to unet + height = height or self.unet.config.sample_size * self.vae_scale_factor + width = width or self.unet.config.sample_size * self.vae_scale_factor + + num_videos_per_prompt = 1 + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt=prompt, + strength=strength, + height=height, + width=width, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + video=video, + conditioning_frames=conditioning_frames, + latents=latents, + ip_adapter_image=ip_adapter_image, + ip_adapter_image_embeds=ip_adapter_image_embeds, + callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, + controlnet_conditioning_scale=controlnet_conditioning_scale, + control_guidance_start=control_guidance_start, + control_guidance_end=control_guidance_end, + ) + + self._guidance_scale = guidance_scale + self._clip_skip = clip_skip + self._cross_attention_kwargs = cross_attention_kwargs + self._interrupt = False + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, (str, dict)): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + dtype = self.dtype + + # 3. Prepare timesteps + if not enforce_inference_steps: + timesteps, num_inference_steps = retrieve_timesteps( + self.scheduler, num_inference_steps, device, timesteps, sigmas + ) + timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, timesteps, strength, device) + latent_timestep = timesteps[:1].repeat(batch_size * num_videos_per_prompt) + else: + denoising_inference_steps = int(num_inference_steps / strength) + timesteps, denoising_inference_steps = retrieve_timesteps( + self.scheduler, denoising_inference_steps, device, timesteps, sigmas + ) + timesteps = timesteps[-num_inference_steps:] + latent_timestep = timesteps[:1].repeat(batch_size * num_videos_per_prompt) + + # 4. Prepare latent variables + if latents is None: + video = self.video_processor.preprocess_video(video, height=height, width=width) + # Move the number of frames before the number of channels. + video = video.permute(0, 2, 1, 3, 4) + video = video.to(device=device, dtype=dtype) + + num_channels_latents = self.unet.config.in_channels + latents = self.prepare_latents( + video=video, + height=height, + width=width, + num_channels_latents=num_channels_latents, + batch_size=batch_size * num_videos_per_prompt, + timestep=latent_timestep, + dtype=dtype, + device=device, + generator=generator, + latents=latents, + decode_chunk_size=decode_chunk_size, + add_noise=enforce_inference_steps, + ) + + # 5. Encode input prompt + text_encoder_lora_scale = ( + self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None + ) + num_frames = latents.shape[2] + if self.free_noise_enabled: + prompt_embeds, negative_prompt_embeds = self._encode_prompt_free_noise( + prompt=prompt, + num_frames=num_frames, + device=device, + num_videos_per_prompt=num_videos_per_prompt, + do_classifier_free_guidance=self.do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=text_encoder_lora_scale, + clip_skip=self.clip_skip, + ) + else: + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + device, + num_videos_per_prompt, + self.do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=text_encoder_lora_scale, + clip_skip=self.clip_skip, + ) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + if self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + prompt_embeds = prompt_embeds.repeat_interleave(repeats=num_frames, dim=0) + + # 6. Prepare IP-Adapter embeddings + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + image_embeds = self.prepare_ip_adapter_image_embeds( + ip_adapter_image, + ip_adapter_image_embeds, + device, + batch_size * num_videos_per_prompt, + self.do_classifier_free_guidance, + ) + + # 7. Prepare ControlNet conditions + if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float): + controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets) + + global_pool_conditions = ( + controlnet.config.global_pool_conditions + if isinstance(controlnet, ControlNetModel) + else controlnet.nets[0].config.global_pool_conditions + ) + guess_mode = guess_mode or global_pool_conditions + + controlnet_keep = [] + for i in range(len(timesteps)): + keeps = [ + 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e) + for s, e in zip(control_guidance_start, control_guidance_end) + ] + controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps) + + if isinstance(controlnet, ControlNetModel): + conditioning_frames = self.prepare_conditioning_frames( + video=conditioning_frames, + width=width, + height=height, + batch_size=batch_size * num_videos_per_prompt * num_frames, + num_videos_per_prompt=num_videos_per_prompt, + device=device, + dtype=controlnet.dtype, + do_classifier_free_guidance=self.do_classifier_free_guidance, + guess_mode=guess_mode, + ) + elif isinstance(controlnet, MultiControlNetModel): + cond_prepared_videos = [] + for frame_ in conditioning_frames: + prepared_video = self.prepare_conditioning_frames( + video=frame_, + width=width, + height=height, + batch_size=batch_size * num_videos_per_prompt * num_frames, + num_videos_per_prompt=num_videos_per_prompt, + device=device, + dtype=controlnet.dtype, + do_classifier_free_guidance=self.do_classifier_free_guidance, + guess_mode=guess_mode, + ) + cond_prepared_videos.append(prepared_video) + conditioning_frames = cond_prepared_videos + else: + assert False + + # 8. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 9. Add image embeds for IP-Adapter + added_cond_kwargs = ( + {"image_embeds": image_embeds} + if ip_adapter_image is not None or ip_adapter_image_embeds is not None + else None + ) + + num_free_init_iters = self._free_init_num_iters if self.free_init_enabled else 1 + for free_init_iter in range(num_free_init_iters): + if self.free_init_enabled: + latents, timesteps = self._apply_free_init( + latents, free_init_iter, num_inference_steps, device, latents.dtype, generator + ) + num_inference_steps = len(timesteps) + # make sure to readjust timesteps based on strength + timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, timesteps, strength, device) + + self._num_timesteps = len(timesteps) + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + + # 10. Denoising loop + with self.progress_bar(total=self._num_timesteps) as progress_bar: + for i, t in enumerate(timesteps): + if self.interrupt: + continue + + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + if guess_mode and self.do_classifier_free_guidance: + # Infer ControlNet only for the conditional batch. + control_model_input = latents + control_model_input = self.scheduler.scale_model_input(control_model_input, t) + controlnet_prompt_embeds = prompt_embeds.chunk(2)[1] + else: + control_model_input = latent_model_input + controlnet_prompt_embeds = prompt_embeds + + if isinstance(controlnet_keep[i], list): + cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])] + else: + controlnet_cond_scale = controlnet_conditioning_scale + if isinstance(controlnet_cond_scale, list): + controlnet_cond_scale = controlnet_cond_scale[0] + cond_scale = controlnet_cond_scale * controlnet_keep[i] + + control_model_input = torch.transpose(control_model_input, 1, 2) + control_model_input = control_model_input.reshape( + (-1, control_model_input.shape[2], control_model_input.shape[3], control_model_input.shape[4]) + ) + + down_block_res_samples, mid_block_res_sample = self.controlnet( + control_model_input, + t, + encoder_hidden_states=controlnet_prompt_embeds, + controlnet_cond=conditioning_frames, + conditioning_scale=cond_scale, + guess_mode=guess_mode, + return_dict=False, + ) + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs=self.cross_attention_kwargs, + added_cond_kwargs=added_cond_kwargs, + down_block_additional_residuals=down_block_res_samples, + mid_block_additional_residual=mid_block_res_sample, + ).sample + + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + + # 11. Post-processing + if output_type == "latent": + video = latents + else: + video_tensor = self.decode_latents(latents, decode_chunk_size) + video = self.video_processor.postprocess_video(video=video_tensor, output_type=output_type) + + # 12. Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (video,) + + return AnimateDiffPipelineOutput(frames=video) diff --git a/diffusers3/pipelines/animatediff/pipeline_output.py b/diffusers3/pipelines/animatediff/pipeline_output.py new file mode 100644 index 0000000000000000000000000000000000000000..2417223cf95e682aa9b86f4c6013c5285c9afce4 --- /dev/null +++ b/diffusers3/pipelines/animatediff/pipeline_output.py @@ -0,0 +1,24 @@ +from dataclasses import dataclass +from typing import List, Union + +import numpy as np +import PIL.Image +import torch + +from ...utils import BaseOutput + + +@dataclass +class AnimateDiffPipelineOutput(BaseOutput): + r""" + Output class for AnimateDiff pipelines. + + Args: + frames (`torch.Tensor`, `np.ndarray`, or List[List[PIL.Image.Image]]): + List of video outputs - It can be a nested list of length `batch_size,` with each sub-list containing + denoised + PIL image sequences of length `num_frames.` It can also be a NumPy array or Torch tensor of shape + `(batch_size, num_frames, channels, height, width)` + """ + + frames: Union[torch.Tensor, np.ndarray, List[List[PIL.Image.Image]]] diff --git a/diffusers3/pipelines/audioldm/__init__.py b/diffusers3/pipelines/audioldm/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a002b4aa72e0a180c7042c406667d37122d6e4cc --- /dev/null +++ b/diffusers3/pipelines/audioldm/__init__.py @@ -0,0 +1,51 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + is_torch_available, + is_transformers_available, + is_transformers_version, +) + + +_dummy_objects = {} +_import_structure = {} + +try: + if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.27.0")): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import ( + AudioLDMPipeline, + ) + + _dummy_objects.update({"AudioLDMPipeline": AudioLDMPipeline}) +else: + _import_structure["pipeline_audioldm"] = ["AudioLDMPipeline"] + + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.27.0")): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import ( + AudioLDMPipeline, + ) + + else: + from .pipeline_audioldm import AudioLDMPipeline +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/diffusers3/pipelines/audioldm/pipeline_audioldm.py b/diffusers3/pipelines/audioldm/pipeline_audioldm.py new file mode 100644 index 0000000000000000000000000000000000000000..105ca40f773fff966f9d95b795e1e6c69a107e00 --- /dev/null +++ b/diffusers3/pipelines/audioldm/pipeline_audioldm.py @@ -0,0 +1,546 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Any, Callable, Dict, List, Optional, Union + +import numpy as np +import torch +import torch.nn.functional as F +from transformers import ClapTextModelWithProjection, RobertaTokenizer, RobertaTokenizerFast, SpeechT5HifiGan + +from ...models import AutoencoderKL, UNet2DConditionModel +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import logging, replace_example_docstring +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline, StableDiffusionMixin + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> from diffusers import AudioLDMPipeline + >>> import torch + >>> import scipy + + >>> repo_id = "cvssp/audioldm-s-full-v2" + >>> pipe = AudioLDMPipeline.from_pretrained(repo_id, torch_dtype=torch.float16) + >>> pipe = pipe.to("cuda") + + >>> prompt = "Techno music with a strong, upbeat tempo and high melodic riffs" + >>> audio = pipe(prompt, num_inference_steps=10, audio_length_in_s=5.0).audios[0] + + >>> # save the audio sample as a .wav file + >>> scipy.io.wavfile.write("techno.wav", rate=16000, data=audio) + ``` +""" + + +class AudioLDMPipeline(DiffusionPipeline, StableDiffusionMixin): + r""" + Pipeline for text-to-audio generation using AudioLDM. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.ClapTextModelWithProjection`]): + Frozen text-encoder (`ClapTextModelWithProjection`, specifically the + [laion/clap-htsat-unfused](https://huggingface.co/laion/clap-htsat-unfused) variant. + tokenizer ([`PreTrainedTokenizer`]): + A [`~transformers.RobertaTokenizer`] to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded audio latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded audio latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + vocoder ([`~transformers.SpeechT5HifiGan`]): + Vocoder of class `SpeechT5HifiGan`. + """ + + model_cpu_offload_seq = "text_encoder->unet->vae" + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: ClapTextModelWithProjection, + tokenizer: Union[RobertaTokenizer, RobertaTokenizerFast], + unet: UNet2DConditionModel, + scheduler: KarrasDiffusionSchedulers, + vocoder: SpeechT5HifiGan, + ): + super().__init__() + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + vocoder=vocoder, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + + def _encode_prompt( + self, + prompt, + device, + num_waveforms_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device (`torch.device`): + torch device + num_waveforms_per_prompt (`int`): + number of waveforms that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the audio generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + """ + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + attention_mask = text_inputs.attention_mask + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLAP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + prompt_embeds = self.text_encoder( + text_input_ids.to(device), + attention_mask=attention_mask.to(device), + ) + prompt_embeds = prompt_embeds.text_embeds + # additional L_2 normalization over each hidden-state + prompt_embeds = F.normalize(prompt_embeds, dim=-1) + + prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) + + ( + bs_embed, + seq_len, + ) = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_waveforms_per_prompt) + prompt_embeds = prompt_embeds.view(bs_embed * num_waveforms_per_prompt, seq_len) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + uncond_input_ids = uncond_input.input_ids.to(device) + attention_mask = uncond_input.attention_mask.to(device) + + negative_prompt_embeds = self.text_encoder( + uncond_input_ids, + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds.text_embeds + # additional L_2 normalization over each hidden-state + negative_prompt_embeds = F.normalize(negative_prompt_embeds, dim=-1) + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_waveforms_per_prompt) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_waveforms_per_prompt, seq_len) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + return prompt_embeds + + def decode_latents(self, latents): + latents = 1 / self.vae.config.scaling_factor * latents + mel_spectrogram = self.vae.decode(latents).sample + return mel_spectrogram + + def mel_spectrogram_to_waveform(self, mel_spectrogram): + if mel_spectrogram.dim() == 4: + mel_spectrogram = mel_spectrogram.squeeze(1) + + waveform = self.vocoder(mel_spectrogram) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + waveform = waveform.cpu().float() + return waveform + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + audio_length_in_s, + vocoder_upsample_factor, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + ): + min_audio_length_in_s = vocoder_upsample_factor * self.vae_scale_factor + if audio_length_in_s < min_audio_length_in_s: + raise ValueError( + f"`audio_length_in_s` has to be a positive value greater than or equal to {min_audio_length_in_s}, but " + f"is {audio_length_in_s}." + ) + + if self.vocoder.config.model_in_dim % self.vae_scale_factor != 0: + raise ValueError( + f"The number of frequency bins in the vocoder's log-mel spectrogram has to be divisible by the " + f"VAE scale factor, but got {self.vocoder.config.model_in_dim} bins and a scale factor of " + f"{self.vae_scale_factor}." + ) + + if (callback_steps is None) or ( + callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents with width->self.vocoder.config.model_in_dim + def prepare_latents(self, batch_size, num_channels_latents, height, dtype, device, generator, latents=None): + shape = ( + batch_size, + num_channels_latents, + int(height) // self.vae_scale_factor, + int(self.vocoder.config.model_in_dim) // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + audio_length_in_s: Optional[float] = None, + num_inference_steps: int = 10, + guidance_scale: float = 2.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_waveforms_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, + callback_steps: Optional[int] = 1, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + output_type: Optional[str] = "np", + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide audio generation. If not defined, you need to pass `prompt_embeds`. + audio_length_in_s (`int`, *optional*, defaults to 5.12): + The length of the generated audio sample in seconds. + num_inference_steps (`int`, *optional*, defaults to 10): + The number of denoising steps. More denoising steps usually lead to a higher quality audio at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 2.5): + A higher guidance scale value encourages the model to generate audio that is closely linked to the text + `prompt` at the expense of lower sound quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in audio generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_waveforms_per_prompt (`int`, *optional*, defaults to 1): + The number of waveforms to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.AudioPipelineOutput`] instead of a plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + output_type (`str`, *optional*, defaults to `"np"`): + The output format of the generated image. Choose between `"np"` to return a NumPy `np.ndarray` or + `"pt"` to return a PyTorch `torch.Tensor` object. + + Examples: + + Returns: + [`~pipelines.AudioPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.AudioPipelineOutput`] is returned, otherwise a `tuple` is + returned where the first element is a list with the generated audio. + """ + # 0. Convert audio input length from seconds to spectrogram height + vocoder_upsample_factor = np.prod(self.vocoder.config.upsample_rates) / self.vocoder.config.sampling_rate + + if audio_length_in_s is None: + audio_length_in_s = self.unet.config.sample_size * self.vae_scale_factor * vocoder_upsample_factor + + height = int(audio_length_in_s / vocoder_upsample_factor) + + original_waveform_length = int(audio_length_in_s * self.vocoder.config.sampling_rate) + if height % self.vae_scale_factor != 0: + height = int(np.ceil(height / self.vae_scale_factor)) * self.vae_scale_factor + logger.info( + f"Audio length in seconds {audio_length_in_s} is increased to {height * vocoder_upsample_factor} " + f"so that it can be handled by the model. It will be cut to {audio_length_in_s} after the " + f"denoising process." + ) + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + audio_length_in_s, + vocoder_upsample_factor, + callback_steps, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + ) + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Encode input prompt + prompt_embeds = self._encode_prompt( + prompt, + device, + num_waveforms_per_prompt, + do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + ) + + # 4. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # 5. Prepare latent variables + num_channels_latents = self.unet.config.in_channels + latents = self.prepare_latents( + batch_size * num_waveforms_per_prompt, + num_channels_latents, + height, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 6. Prepare extra step kwargs + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=None, + class_labels=prompt_embeds, + cross_attention_kwargs=cross_attention_kwargs, + ).sample + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + # 8. Post-processing + mel_spectrogram = self.decode_latents(latents) + + audio = self.mel_spectrogram_to_waveform(mel_spectrogram) + + audio = audio[:, :original_waveform_length] + + if output_type == "np": + audio = audio.numpy() + + if not return_dict: + return (audio,) + + return AudioPipelineOutput(audios=audio) diff --git a/diffusers3/pipelines/audioldm2/__init__.py b/diffusers3/pipelines/audioldm2/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..23cd0e44f89217b8391d0ce236070271db9aaf83 --- /dev/null +++ b/diffusers3/pipelines/audioldm2/__init__.py @@ -0,0 +1,50 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_torch_available, + is_transformers_available, + is_transformers_version, +) + + +_dummy_objects = {} +_import_structure = {} + +try: + if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.27.0")): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["modeling_audioldm2"] = ["AudioLDM2ProjectionModel", "AudioLDM2UNet2DConditionModel"] + _import_structure["pipeline_audioldm2"] = ["AudioLDM2Pipeline"] + + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.27.0")): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * + + else: + from .modeling_audioldm2 import AudioLDM2ProjectionModel, AudioLDM2UNet2DConditionModel + from .pipeline_audioldm2 import AudioLDM2Pipeline + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/diffusers3/pipelines/audioldm2/modeling_audioldm2.py b/diffusers3/pipelines/audioldm2/modeling_audioldm2.py new file mode 100644 index 0000000000000000000000000000000000000000..2af3078f74121eaca6aee7e0512c7d361044ece3 --- /dev/null +++ b/diffusers3/pipelines/audioldm2/modeling_audioldm2.py @@ -0,0 +1,1530 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass +from typing import Any, Dict, List, Optional, Tuple, Union + +import torch +import torch.nn as nn +import torch.utils.checkpoint + +from ...configuration_utils import ConfigMixin, register_to_config +from ...loaders import UNet2DConditionLoadersMixin +from ...models.activations import get_activation +from ...models.attention_processor import ( + ADDED_KV_ATTENTION_PROCESSORS, + CROSS_ATTENTION_PROCESSORS, + AttentionProcessor, + AttnAddedKVProcessor, + AttnProcessor, +) +from ...models.embeddings import ( + TimestepEmbedding, + Timesteps, +) +from ...models.modeling_utils import ModelMixin +from ...models.resnet import Downsample2D, ResnetBlock2D, Upsample2D +from ...models.transformers.transformer_2d import Transformer2DModel +from ...models.unets.unet_2d_blocks import DownBlock2D, UpBlock2D +from ...models.unets.unet_2d_condition import UNet2DConditionOutput +from ...utils import BaseOutput, is_torch_version, logging + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +def add_special_tokens(hidden_states, attention_mask, sos_token, eos_token): + batch_size = hidden_states.shape[0] + + if attention_mask is not None: + # Add two more steps to attn mask + new_attn_mask_step = attention_mask.new_ones((batch_size, 1)) + attention_mask = torch.concat([new_attn_mask_step, attention_mask, new_attn_mask_step], dim=-1) + + # Add the SOS / EOS tokens at the start / end of the sequence respectively + sos_token = sos_token.expand(batch_size, 1, -1) + eos_token = eos_token.expand(batch_size, 1, -1) + hidden_states = torch.concat([sos_token, hidden_states, eos_token], dim=1) + return hidden_states, attention_mask + + +@dataclass +class AudioLDM2ProjectionModelOutput(BaseOutput): + """ + Args: + Class for AudioLDM2 projection layer's outputs. + hidden_states (`torch.Tensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states obtained by linearly projecting the hidden-states for each of the text + encoders and subsequently concatenating them together. + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices, formed by concatenating the attention masks + for the two text encoders together. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + """ + + hidden_states: torch.Tensor + attention_mask: Optional[torch.LongTensor] = None + + +class AudioLDM2ProjectionModel(ModelMixin, ConfigMixin): + """ + A simple linear projection model to map two text embeddings to a shared latent space. It also inserts learned + embedding vectors at the start and end of each text embedding sequence respectively. Each variable appended with + `_1` refers to that corresponding to the second text encoder. Otherwise, it is from the first. + + Args: + text_encoder_dim (`int`): + Dimensionality of the text embeddings from the first text encoder (CLAP). + text_encoder_1_dim (`int`): + Dimensionality of the text embeddings from the second text encoder (T5 or VITS). + langauge_model_dim (`int`): + Dimensionality of the text embeddings from the language model (GPT2). + """ + + @register_to_config + def __init__( + self, + text_encoder_dim, + text_encoder_1_dim, + langauge_model_dim, + use_learned_position_embedding=None, + max_seq_length=None, + ): + super().__init__() + # additional projection layers for each text encoder + self.projection = nn.Linear(text_encoder_dim, langauge_model_dim) + self.projection_1 = nn.Linear(text_encoder_1_dim, langauge_model_dim) + + # learnable SOS / EOS token embeddings for each text encoder + self.sos_embed = nn.Parameter(torch.ones(langauge_model_dim)) + self.eos_embed = nn.Parameter(torch.ones(langauge_model_dim)) + + self.sos_embed_1 = nn.Parameter(torch.ones(langauge_model_dim)) + self.eos_embed_1 = nn.Parameter(torch.ones(langauge_model_dim)) + + self.use_learned_position_embedding = use_learned_position_embedding + + # learable positional embedding for vits encoder + if self.use_learned_position_embedding is not None: + self.learnable_positional_embedding = torch.nn.Parameter( + torch.zeros((1, text_encoder_1_dim, max_seq_length)) + ) + + def forward( + self, + hidden_states: Optional[torch.Tensor] = None, + hidden_states_1: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.LongTensor] = None, + attention_mask_1: Optional[torch.LongTensor] = None, + ): + hidden_states = self.projection(hidden_states) + hidden_states, attention_mask = add_special_tokens( + hidden_states, attention_mask, sos_token=self.sos_embed, eos_token=self.eos_embed + ) + + # Add positional embedding for Vits hidden state + if self.use_learned_position_embedding is not None: + hidden_states_1 = (hidden_states_1.permute(0, 2, 1) + self.learnable_positional_embedding).permute(0, 2, 1) + + hidden_states_1 = self.projection_1(hidden_states_1) + hidden_states_1, attention_mask_1 = add_special_tokens( + hidden_states_1, attention_mask_1, sos_token=self.sos_embed_1, eos_token=self.eos_embed_1 + ) + + # concatenate clap and t5 text encoding + hidden_states = torch.cat([hidden_states, hidden_states_1], dim=1) + + # concatenate attention masks + if attention_mask is None and attention_mask_1 is not None: + attention_mask = attention_mask_1.new_ones((hidden_states[:2])) + elif attention_mask is not None and attention_mask_1 is None: + attention_mask_1 = attention_mask.new_ones((hidden_states_1[:2])) + + if attention_mask is not None and attention_mask_1 is not None: + attention_mask = torch.cat([attention_mask, attention_mask_1], dim=-1) + else: + attention_mask = None + + return AudioLDM2ProjectionModelOutput( + hidden_states=hidden_states, + attention_mask=attention_mask, + ) + + +class AudioLDM2UNet2DConditionModel(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin): + r""" + A conditional 2D UNet model that takes a noisy sample, conditional state, and a timestep and returns a sample + shaped output. Compared to the vanilla [`UNet2DConditionModel`], this variant optionally includes an additional + self-attention layer in each Transformer block, as well as multiple cross-attention layers. It also allows for up + to two cross-attention embeddings, `encoder_hidden_states` and `encoder_hidden_states_1`. + + This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented + for all models (such as downloading or saving). + + Parameters: + sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`): + Height and width of input/output sample. + in_channels (`int`, *optional*, defaults to 4): Number of channels in the input sample. + out_channels (`int`, *optional*, defaults to 4): Number of channels in the output. + flip_sin_to_cos (`bool`, *optional*, defaults to `False`): + Whether to flip the sin to cos in the time embedding. + freq_shift (`int`, *optional*, defaults to 0): The frequency shift to apply to the time embedding. + down_block_types (`Tuple[str]`, *optional*, defaults to `("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D")`): + The tuple of downsample blocks to use. + mid_block_type (`str`, *optional*, defaults to `"UNetMidBlock2DCrossAttn"`): + Block type for middle of UNet, it can only be `UNetMidBlock2DCrossAttn` for AudioLDM2. + up_block_types (`Tuple[str]`, *optional*, defaults to `("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")`): + The tuple of upsample blocks to use. + only_cross_attention (`bool` or `Tuple[bool]`, *optional*, default to `False`): + Whether to include self-attention in the basic transformer blocks, see + [`~models.attention.BasicTransformerBlock`]. + block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`): + The tuple of output channels for each block. + layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block. + downsample_padding (`int`, *optional*, defaults to 1): The padding to use for the downsampling convolution. + mid_block_scale_factor (`float`, *optional*, defaults to 1.0): The scale factor to use for the mid block. + act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use. + norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization. + If `None`, normalization and activation layers is skipped in post-processing. + norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon to use for the normalization. + cross_attention_dim (`int` or `Tuple[int]`, *optional*, defaults to 1280): + The dimension of the cross attention features. + transformer_layers_per_block (`int` or `Tuple[int]`, *optional*, defaults to 1): + The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`]. Only relevant for + [`~models.unet_2d_blocks.CrossAttnDownBlock2D`], [`~models.unet_2d_blocks.CrossAttnUpBlock2D`], + [`~models.unet_2d_blocks.UNetMidBlock2DCrossAttn`]. + attention_head_dim (`int`, *optional*, defaults to 8): The dimension of the attention heads. + num_attention_heads (`int`, *optional*): + The number of attention heads. If not defined, defaults to `attention_head_dim` + resnet_time_scale_shift (`str`, *optional*, defaults to `"default"`): Time scale shift config + for ResNet blocks (see [`~models.resnet.ResnetBlock2D`]). Choose from `default` or `scale_shift`. + class_embed_type (`str`, *optional*, defaults to `None`): + The type of class embedding to use which is ultimately summed with the time embeddings. Choose from `None`, + `"timestep"`, `"identity"`, `"projection"`, or `"simple_projection"`. + num_class_embeds (`int`, *optional*, defaults to `None`): + Input dimension of the learnable embedding matrix to be projected to `time_embed_dim`, when performing + class conditioning with `class_embed_type` equal to `None`. + time_embedding_type (`str`, *optional*, defaults to `positional`): + The type of position embedding to use for timesteps. Choose from `positional` or `fourier`. + time_embedding_dim (`int`, *optional*, defaults to `None`): + An optional override for the dimension of the projected time embedding. + time_embedding_act_fn (`str`, *optional*, defaults to `None`): + Optional activation function to use only once on the time embeddings before they are passed to the rest of + the UNet. Choose from `silu`, `mish`, `gelu`, and `swish`. + timestep_post_act (`str`, *optional*, defaults to `None`): + The second activation function to use in timestep embedding. Choose from `silu`, `mish` and `gelu`. + time_cond_proj_dim (`int`, *optional*, defaults to `None`): + The dimension of `cond_proj` layer in the timestep embedding. + conv_in_kernel (`int`, *optional*, default to `3`): The kernel size of `conv_in` layer. + conv_out_kernel (`int`, *optional*, default to `3`): The kernel size of `conv_out` layer. + projection_class_embeddings_input_dim (`int`, *optional*): The dimension of the `class_labels` input when + `class_embed_type="projection"`. Required when `class_embed_type="projection"`. + class_embeddings_concat (`bool`, *optional*, defaults to `False`): Whether to concatenate the time + embeddings with the class embeddings. + """ + + _supports_gradient_checkpointing = True + + @register_to_config + def __init__( + self, + sample_size: Optional[int] = None, + in_channels: int = 4, + out_channels: int = 4, + flip_sin_to_cos: bool = True, + freq_shift: int = 0, + down_block_types: Tuple[str] = ( + "CrossAttnDownBlock2D", + "CrossAttnDownBlock2D", + "CrossAttnDownBlock2D", + "DownBlock2D", + ), + mid_block_type: Optional[str] = "UNetMidBlock2DCrossAttn", + up_block_types: Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D"), + only_cross_attention: Union[bool, Tuple[bool]] = False, + block_out_channels: Tuple[int] = (320, 640, 1280, 1280), + layers_per_block: Union[int, Tuple[int]] = 2, + downsample_padding: int = 1, + mid_block_scale_factor: float = 1, + act_fn: str = "silu", + norm_num_groups: Optional[int] = 32, + norm_eps: float = 1e-5, + cross_attention_dim: Union[int, Tuple[int]] = 1280, + transformer_layers_per_block: Union[int, Tuple[int]] = 1, + attention_head_dim: Union[int, Tuple[int]] = 8, + num_attention_heads: Optional[Union[int, Tuple[int]]] = None, + use_linear_projection: bool = False, + class_embed_type: Optional[str] = None, + num_class_embeds: Optional[int] = None, + upcast_attention: bool = False, + resnet_time_scale_shift: str = "default", + time_embedding_type: str = "positional", + time_embedding_dim: Optional[int] = None, + time_embedding_act_fn: Optional[str] = None, + timestep_post_act: Optional[str] = None, + time_cond_proj_dim: Optional[int] = None, + conv_in_kernel: int = 3, + conv_out_kernel: int = 3, + projection_class_embeddings_input_dim: Optional[int] = None, + class_embeddings_concat: bool = False, + ): + super().__init__() + + self.sample_size = sample_size + + if num_attention_heads is not None: + raise ValueError( + "At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19." + ) + + # If `num_attention_heads` is not defined (which is the case for most models) + # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. + # The reason for this behavior is to correct for incorrectly named variables that were introduced + # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 + # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking + # which is why we correct for the naming here. + num_attention_heads = num_attention_heads or attention_head_dim + + # Check inputs + if len(down_block_types) != len(up_block_types): + raise ValueError( + f"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}." + ) + + if len(block_out_channels) != len(down_block_types): + raise ValueError( + f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}." + ) + + if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types): + raise ValueError( + f"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}." + ) + + if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len(down_block_types): + raise ValueError( + f"Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}." + ) + + if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types): + raise ValueError( + f"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}." + ) + + if isinstance(cross_attention_dim, list) and len(cross_attention_dim) != len(down_block_types): + raise ValueError( + f"Must provide the same number of `cross_attention_dim` as `down_block_types`. `cross_attention_dim`: {cross_attention_dim}. `down_block_types`: {down_block_types}." + ) + + if not isinstance(layers_per_block, int) and len(layers_per_block) != len(down_block_types): + raise ValueError( + f"Must provide the same number of `layers_per_block` as `down_block_types`. `layers_per_block`: {layers_per_block}. `down_block_types`: {down_block_types}." + ) + + # input + conv_in_padding = (conv_in_kernel - 1) // 2 + self.conv_in = nn.Conv2d( + in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding + ) + + # time + if time_embedding_type == "positional": + time_embed_dim = time_embedding_dim or block_out_channels[0] * 4 + + self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift) + timestep_input_dim = block_out_channels[0] + else: + raise ValueError(f"{time_embedding_type} does not exist. Please make sure to use `positional`.") + + self.time_embedding = TimestepEmbedding( + timestep_input_dim, + time_embed_dim, + act_fn=act_fn, + post_act_fn=timestep_post_act, + cond_proj_dim=time_cond_proj_dim, + ) + + # class embedding + if class_embed_type is None and num_class_embeds is not None: + self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim) + elif class_embed_type == "timestep": + self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim, act_fn=act_fn) + elif class_embed_type == "identity": + self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim) + elif class_embed_type == "projection": + if projection_class_embeddings_input_dim is None: + raise ValueError( + "`class_embed_type`: 'projection' requires `projection_class_embeddings_input_dim` be set" + ) + # The projection `class_embed_type` is the same as the timestep `class_embed_type` except + # 1. the `class_labels` inputs are not first converted to sinusoidal embeddings + # 2. it projects from an arbitrary input dimension. + # + # Note that `TimestepEmbedding` is quite general, being mainly linear layers and activations. + # When used for embedding actual timesteps, the timesteps are first converted to sinusoidal embeddings. + # As a result, `TimestepEmbedding` can be passed arbitrary vectors. + self.class_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim) + elif class_embed_type == "simple_projection": + if projection_class_embeddings_input_dim is None: + raise ValueError( + "`class_embed_type`: 'simple_projection' requires `projection_class_embeddings_input_dim` be set" + ) + self.class_embedding = nn.Linear(projection_class_embeddings_input_dim, time_embed_dim) + else: + self.class_embedding = None + + if time_embedding_act_fn is None: + self.time_embed_act = None + else: + self.time_embed_act = get_activation(time_embedding_act_fn) + + self.down_blocks = nn.ModuleList([]) + self.up_blocks = nn.ModuleList([]) + + if isinstance(only_cross_attention, bool): + only_cross_attention = [only_cross_attention] * len(down_block_types) + + if isinstance(num_attention_heads, int): + num_attention_heads = (num_attention_heads,) * len(down_block_types) + + if isinstance(cross_attention_dim, int): + cross_attention_dim = (cross_attention_dim,) * len(down_block_types) + + if isinstance(layers_per_block, int): + layers_per_block = [layers_per_block] * len(down_block_types) + + if isinstance(transformer_layers_per_block, int): + transformer_layers_per_block = [transformer_layers_per_block] * len(down_block_types) + + if class_embeddings_concat: + # The time embeddings are concatenated with the class embeddings. The dimension of the + # time embeddings passed to the down, middle, and up blocks is twice the dimension of the + # regular time embeddings + blocks_time_embed_dim = time_embed_dim * 2 + else: + blocks_time_embed_dim = time_embed_dim + + # down + output_channel = block_out_channels[0] + for i, down_block_type in enumerate(down_block_types): + input_channel = output_channel + output_channel = block_out_channels[i] + is_final_block = i == len(block_out_channels) - 1 + + down_block = get_down_block( + down_block_type, + num_layers=layers_per_block[i], + transformer_layers_per_block=transformer_layers_per_block[i], + in_channels=input_channel, + out_channels=output_channel, + temb_channels=blocks_time_embed_dim, + add_downsample=not is_final_block, + resnet_eps=norm_eps, + resnet_act_fn=act_fn, + resnet_groups=norm_num_groups, + cross_attention_dim=cross_attention_dim[i], + num_attention_heads=num_attention_heads[i], + downsample_padding=downsample_padding, + use_linear_projection=use_linear_projection, + only_cross_attention=only_cross_attention[i], + upcast_attention=upcast_attention, + resnet_time_scale_shift=resnet_time_scale_shift, + ) + self.down_blocks.append(down_block) + + # mid + if mid_block_type == "UNetMidBlock2DCrossAttn": + self.mid_block = UNetMidBlock2DCrossAttn( + transformer_layers_per_block=transformer_layers_per_block[-1], + in_channels=block_out_channels[-1], + temb_channels=blocks_time_embed_dim, + resnet_eps=norm_eps, + resnet_act_fn=act_fn, + output_scale_factor=mid_block_scale_factor, + resnet_time_scale_shift=resnet_time_scale_shift, + cross_attention_dim=cross_attention_dim[-1], + num_attention_heads=num_attention_heads[-1], + resnet_groups=norm_num_groups, + use_linear_projection=use_linear_projection, + upcast_attention=upcast_attention, + ) + else: + raise ValueError( + f"unknown mid_block_type : {mid_block_type}. Should be `UNetMidBlock2DCrossAttn` for AudioLDM2." + ) + + # count how many layers upsample the images + self.num_upsamplers = 0 + + # up + reversed_block_out_channels = list(reversed(block_out_channels)) + reversed_num_attention_heads = list(reversed(num_attention_heads)) + reversed_layers_per_block = list(reversed(layers_per_block)) + reversed_cross_attention_dim = list(reversed(cross_attention_dim)) + reversed_transformer_layers_per_block = list(reversed(transformer_layers_per_block)) + only_cross_attention = list(reversed(only_cross_attention)) + + output_channel = reversed_block_out_channels[0] + for i, up_block_type in enumerate(up_block_types): + is_final_block = i == len(block_out_channels) - 1 + + prev_output_channel = output_channel + output_channel = reversed_block_out_channels[i] + input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)] + + # add upsample block for all BUT final layer + if not is_final_block: + add_upsample = True + self.num_upsamplers += 1 + else: + add_upsample = False + + up_block = get_up_block( + up_block_type, + num_layers=reversed_layers_per_block[i] + 1, + transformer_layers_per_block=reversed_transformer_layers_per_block[i], + in_channels=input_channel, + out_channels=output_channel, + prev_output_channel=prev_output_channel, + temb_channels=blocks_time_embed_dim, + add_upsample=add_upsample, + resnet_eps=norm_eps, + resnet_act_fn=act_fn, + resnet_groups=norm_num_groups, + cross_attention_dim=reversed_cross_attention_dim[i], + num_attention_heads=reversed_num_attention_heads[i], + use_linear_projection=use_linear_projection, + only_cross_attention=only_cross_attention[i], + upcast_attention=upcast_attention, + resnet_time_scale_shift=resnet_time_scale_shift, + ) + self.up_blocks.append(up_block) + prev_output_channel = output_channel + + # out + if norm_num_groups is not None: + self.conv_norm_out = nn.GroupNorm( + num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps + ) + + self.conv_act = get_activation(act_fn) + + else: + self.conv_norm_out = None + self.conv_act = None + + conv_out_padding = (conv_out_kernel - 1) // 2 + self.conv_out = nn.Conv2d( + block_out_channels[0], out_channels, kernel_size=conv_out_kernel, padding=conv_out_padding + ) + + @property + # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors + def attn_processors(self) -> Dict[str, AttentionProcessor]: + r""" + Returns: + `dict` of attention processors: A dictionary containing all attention processors used in the model with + indexed by its weight name. + """ + # set recursively + processors = {} + + def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): + if hasattr(module, "get_processor"): + processors[f"{name}.processor"] = module.get_processor() + + for sub_name, child in module.named_children(): + fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) + + return processors + + for name, module in self.named_children(): + fn_recursive_add_processors(name, module, processors) + + return processors + + # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor + def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): + r""" + Sets the attention processor to use to compute attention. + + Parameters: + processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): + The instantiated processor class or a dictionary of processor classes that will be set as the processor + for **all** `Attention` layers. + + If `processor` is a dict, the key needs to define the path to the corresponding cross attention + processor. This is strongly recommended when setting trainable attention processors. + + """ + count = len(self.attn_processors.keys()) + + if isinstance(processor, dict) and len(processor) != count: + raise ValueError( + f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" + f" number of attention layers: {count}. Please make sure to pass {count} processor classes." + ) + + def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): + if hasattr(module, "set_processor"): + if not isinstance(processor, dict): + module.set_processor(processor) + else: + module.set_processor(processor.pop(f"{name}.processor")) + + for sub_name, child in module.named_children(): + fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) + + for name, module in self.named_children(): + fn_recursive_attn_processor(name, module, processor) + + # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_default_attn_processor + def set_default_attn_processor(self): + """ + Disables custom attention processors and sets the default attention implementation. + """ + if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): + processor = AttnAddedKVProcessor() + elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): + processor = AttnProcessor() + else: + raise ValueError( + f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}" + ) + + self.set_attn_processor(processor) + + # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attention_slice + def set_attention_slice(self, slice_size): + r""" + Enable sliced attention computation. + + When this option is enabled, the attention module splits the input tensor in slices to compute attention in + several steps. This is useful for saving some memory in exchange for a small decrease in speed. + + Args: + slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`): + When `"auto"`, input to the attention heads is halved, so attention is computed in two steps. If + `"max"`, maximum amount of memory is saved by running only one slice at a time. If a number is + provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim` + must be a multiple of `slice_size`. + """ + sliceable_head_dims = [] + + def fn_recursive_retrieve_sliceable_dims(module: torch.nn.Module): + if hasattr(module, "set_attention_slice"): + sliceable_head_dims.append(module.sliceable_head_dim) + + for child in module.children(): + fn_recursive_retrieve_sliceable_dims(child) + + # retrieve number of attention layers + for module in self.children(): + fn_recursive_retrieve_sliceable_dims(module) + + num_sliceable_layers = len(sliceable_head_dims) + + if slice_size == "auto": + # half the attention head size is usually a good trade-off between + # speed and memory + slice_size = [dim // 2 for dim in sliceable_head_dims] + elif slice_size == "max": + # make smallest slice possible + slice_size = num_sliceable_layers * [1] + + slice_size = num_sliceable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size + + if len(slice_size) != len(sliceable_head_dims): + raise ValueError( + f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different" + f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}." + ) + + for i in range(len(slice_size)): + size = slice_size[i] + dim = sliceable_head_dims[i] + if size is not None and size > dim: + raise ValueError(f"size {size} has to be smaller or equal to {dim}.") + + # Recursively walk through all the children. + # Any children which exposes the set_attention_slice method + # gets the message + def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]): + if hasattr(module, "set_attention_slice"): + module.set_attention_slice(slice_size.pop()) + + for child in module.children(): + fn_recursive_set_attention_slice(child, slice_size) + + reversed_slice_size = list(reversed(slice_size)) + for module in self.children(): + fn_recursive_set_attention_slice(module, reversed_slice_size) + + # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel._set_gradient_checkpointing + def _set_gradient_checkpointing(self, module, value=False): + if hasattr(module, "gradient_checkpointing"): + module.gradient_checkpointing = value + + def forward( + self, + sample: torch.Tensor, + timestep: Union[torch.Tensor, float, int], + encoder_hidden_states: torch.Tensor, + class_labels: Optional[torch.Tensor] = None, + timestep_cond: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + encoder_attention_mask: Optional[torch.Tensor] = None, + return_dict: bool = True, + encoder_hidden_states_1: Optional[torch.Tensor] = None, + encoder_attention_mask_1: Optional[torch.Tensor] = None, + ) -> Union[UNet2DConditionOutput, Tuple]: + r""" + The [`AudioLDM2UNet2DConditionModel`] forward method. + + Args: + sample (`torch.Tensor`): + The noisy input tensor with the following shape `(batch, channel, height, width)`. + timestep (`torch.Tensor` or `float` or `int`): The number of timesteps to denoise an input. + encoder_hidden_states (`torch.Tensor`): + The encoder hidden states with shape `(batch, sequence_length, feature_dim)`. + encoder_attention_mask (`torch.Tensor`): + A cross-attention mask of shape `(batch, sequence_length)` is applied to `encoder_hidden_states`. If + `True` the mask is kept, otherwise if `False` it is discarded. Mask will be converted into a bias, + which adds large negative values to the attention scores corresponding to "discard" tokens. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] instead of a plain + tuple. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttnProcessor`]. + encoder_hidden_states_1 (`torch.Tensor`, *optional*): + A second set of encoder hidden states with shape `(batch, sequence_length_2, feature_dim_2)`. Can be + used to condition the model on a different set of embeddings to `encoder_hidden_states`. + encoder_attention_mask_1 (`torch.Tensor`, *optional*): + A cross-attention mask of shape `(batch, sequence_length_2)` is applied to `encoder_hidden_states_1`. + If `True` the mask is kept, otherwise if `False` it is discarded. Mask will be converted into a bias, + which adds large negative values to the attention scores corresponding to "discard" tokens. + + Returns: + [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] or `tuple`: + If `return_dict` is True, an [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] is returned, + otherwise a `tuple` is returned where the first element is the sample tensor. + """ + # By default samples have to be AT least a multiple of the overall upsampling factor. + # The overall upsampling factor is equal to 2 ** (# num of upsampling layers). + # However, the upsampling interpolation output size can be forced to fit any upsampling size + # on the fly if necessary. + default_overall_up_factor = 2**self.num_upsamplers + + # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor` + forward_upsample_size = False + upsample_size = None + + if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]): + logger.info("Forward upsample size to force interpolation output size.") + forward_upsample_size = True + + # ensure attention_mask is a bias, and give it a singleton query_tokens dimension + # expects mask of shape: + # [batch, key_tokens] + # adds singleton query_tokens dimension: + # [batch, 1, key_tokens] + # this helps to broadcast it as a bias over attention scores, which will be in one of the following shapes: + # [batch, heads, query_tokens, key_tokens] (e.g. torch sdp attn) + # [batch * heads, query_tokens, key_tokens] (e.g. xformers or classic attn) + if attention_mask is not None: + # assume that mask is expressed as: + # (1 = keep, 0 = discard) + # convert mask into a bias that can be added to attention scores: + # (keep = +0, discard = -10000.0) + attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0 + attention_mask = attention_mask.unsqueeze(1) + + # convert encoder_attention_mask to a bias the same way we do for attention_mask + if encoder_attention_mask is not None: + encoder_attention_mask = (1 - encoder_attention_mask.to(sample.dtype)) * -10000.0 + encoder_attention_mask = encoder_attention_mask.unsqueeze(1) + + if encoder_attention_mask_1 is not None: + encoder_attention_mask_1 = (1 - encoder_attention_mask_1.to(sample.dtype)) * -10000.0 + encoder_attention_mask_1 = encoder_attention_mask_1.unsqueeze(1) + + # 1. time + timesteps = timestep + if not torch.is_tensor(timesteps): + # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can + # This would be a good case for the `match` statement (Python 3.10+) + is_mps = sample.device.type == "mps" + if isinstance(timestep, float): + dtype = torch.float32 if is_mps else torch.float64 + else: + dtype = torch.int32 if is_mps else torch.int64 + timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device) + elif len(timesteps.shape) == 0: + timesteps = timesteps[None].to(sample.device) + + # broadcast to batch dimension in a way that's compatible with ONNX/Core ML + timesteps = timesteps.expand(sample.shape[0]) + + t_emb = self.time_proj(timesteps) + + # `Timesteps` does not contain any weights and will always return f32 tensors + # but time_embedding might actually be running in fp16. so we need to cast here. + # there might be better ways to encapsulate this. + t_emb = t_emb.to(dtype=sample.dtype) + + emb = self.time_embedding(t_emb, timestep_cond) + aug_emb = None + + if self.class_embedding is not None: + if class_labels is None: + raise ValueError("class_labels should be provided when num_class_embeds > 0") + + if self.config.class_embed_type == "timestep": + class_labels = self.time_proj(class_labels) + + # `Timesteps` does not contain any weights and will always return f32 tensors + # there might be better ways to encapsulate this. + class_labels = class_labels.to(dtype=sample.dtype) + + class_emb = self.class_embedding(class_labels).to(dtype=sample.dtype) + + if self.config.class_embeddings_concat: + emb = torch.cat([emb, class_emb], dim=-1) + else: + emb = emb + class_emb + + emb = emb + aug_emb if aug_emb is not None else emb + + if self.time_embed_act is not None: + emb = self.time_embed_act(emb) + + # 2. pre-process + sample = self.conv_in(sample) + + # 3. down + down_block_res_samples = (sample,) + for downsample_block in self.down_blocks: + if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention: + sample, res_samples = downsample_block( + hidden_states=sample, + temb=emb, + encoder_hidden_states=encoder_hidden_states, + attention_mask=attention_mask, + cross_attention_kwargs=cross_attention_kwargs, + encoder_attention_mask=encoder_attention_mask, + encoder_hidden_states_1=encoder_hidden_states_1, + encoder_attention_mask_1=encoder_attention_mask_1, + ) + else: + sample, res_samples = downsample_block(hidden_states=sample, temb=emb) + + down_block_res_samples += res_samples + + # 4. mid + if self.mid_block is not None: + sample = self.mid_block( + sample, + emb, + encoder_hidden_states=encoder_hidden_states, + attention_mask=attention_mask, + cross_attention_kwargs=cross_attention_kwargs, + encoder_attention_mask=encoder_attention_mask, + encoder_hidden_states_1=encoder_hidden_states_1, + encoder_attention_mask_1=encoder_attention_mask_1, + ) + + # 5. up + for i, upsample_block in enumerate(self.up_blocks): + is_final_block = i == len(self.up_blocks) - 1 + + res_samples = down_block_res_samples[-len(upsample_block.resnets) :] + down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)] + + # if we have not reached the final block and need to forward the + # upsample size, we do it here + if not is_final_block and forward_upsample_size: + upsample_size = down_block_res_samples[-1].shape[2:] + + if hasattr(upsample_block, "has_cross_attention") and upsample_block.has_cross_attention: + sample = upsample_block( + hidden_states=sample, + temb=emb, + res_hidden_states_tuple=res_samples, + encoder_hidden_states=encoder_hidden_states, + cross_attention_kwargs=cross_attention_kwargs, + upsample_size=upsample_size, + attention_mask=attention_mask, + encoder_attention_mask=encoder_attention_mask, + encoder_hidden_states_1=encoder_hidden_states_1, + encoder_attention_mask_1=encoder_attention_mask_1, + ) + else: + sample = upsample_block( + hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, upsample_size=upsample_size + ) + + # 6. post-process + if self.conv_norm_out: + sample = self.conv_norm_out(sample) + sample = self.conv_act(sample) + sample = self.conv_out(sample) + + if not return_dict: + return (sample,) + + return UNet2DConditionOutput(sample=sample) + + +def get_down_block( + down_block_type, + num_layers, + in_channels, + out_channels, + temb_channels, + add_downsample, + resnet_eps, + resnet_act_fn, + transformer_layers_per_block=1, + num_attention_heads=None, + resnet_groups=None, + cross_attention_dim=None, + downsample_padding=None, + use_linear_projection=False, + only_cross_attention=False, + upcast_attention=False, + resnet_time_scale_shift="default", +): + down_block_type = down_block_type[7:] if down_block_type.startswith("UNetRes") else down_block_type + if down_block_type == "DownBlock2D": + return DownBlock2D( + num_layers=num_layers, + in_channels=in_channels, + out_channels=out_channels, + temb_channels=temb_channels, + add_downsample=add_downsample, + resnet_eps=resnet_eps, + resnet_act_fn=resnet_act_fn, + resnet_groups=resnet_groups, + downsample_padding=downsample_padding, + resnet_time_scale_shift=resnet_time_scale_shift, + ) + elif down_block_type == "CrossAttnDownBlock2D": + if cross_attention_dim is None: + raise ValueError("cross_attention_dim must be specified for CrossAttnDownBlock2D") + return CrossAttnDownBlock2D( + num_layers=num_layers, + transformer_layers_per_block=transformer_layers_per_block, + in_channels=in_channels, + out_channels=out_channels, + temb_channels=temb_channels, + add_downsample=add_downsample, + resnet_eps=resnet_eps, + resnet_act_fn=resnet_act_fn, + resnet_groups=resnet_groups, + downsample_padding=downsample_padding, + cross_attention_dim=cross_attention_dim, + num_attention_heads=num_attention_heads, + use_linear_projection=use_linear_projection, + only_cross_attention=only_cross_attention, + upcast_attention=upcast_attention, + resnet_time_scale_shift=resnet_time_scale_shift, + ) + raise ValueError(f"{down_block_type} does not exist.") + + +def get_up_block( + up_block_type, + num_layers, + in_channels, + out_channels, + prev_output_channel, + temb_channels, + add_upsample, + resnet_eps, + resnet_act_fn, + transformer_layers_per_block=1, + num_attention_heads=None, + resnet_groups=None, + cross_attention_dim=None, + use_linear_projection=False, + only_cross_attention=False, + upcast_attention=False, + resnet_time_scale_shift="default", +): + up_block_type = up_block_type[7:] if up_block_type.startswith("UNetRes") else up_block_type + if up_block_type == "UpBlock2D": + return UpBlock2D( + num_layers=num_layers, + in_channels=in_channels, + out_channels=out_channels, + prev_output_channel=prev_output_channel, + temb_channels=temb_channels, + add_upsample=add_upsample, + resnet_eps=resnet_eps, + resnet_act_fn=resnet_act_fn, + resnet_groups=resnet_groups, + resnet_time_scale_shift=resnet_time_scale_shift, + ) + elif up_block_type == "CrossAttnUpBlock2D": + if cross_attention_dim is None: + raise ValueError("cross_attention_dim must be specified for CrossAttnUpBlock2D") + return CrossAttnUpBlock2D( + num_layers=num_layers, + transformer_layers_per_block=transformer_layers_per_block, + in_channels=in_channels, + out_channels=out_channels, + prev_output_channel=prev_output_channel, + temb_channels=temb_channels, + add_upsample=add_upsample, + resnet_eps=resnet_eps, + resnet_act_fn=resnet_act_fn, + resnet_groups=resnet_groups, + cross_attention_dim=cross_attention_dim, + num_attention_heads=num_attention_heads, + use_linear_projection=use_linear_projection, + only_cross_attention=only_cross_attention, + upcast_attention=upcast_attention, + resnet_time_scale_shift=resnet_time_scale_shift, + ) + raise ValueError(f"{up_block_type} does not exist.") + + +class CrossAttnDownBlock2D(nn.Module): + def __init__( + self, + in_channels: int, + out_channels: int, + temb_channels: int, + dropout: float = 0.0, + num_layers: int = 1, + transformer_layers_per_block: int = 1, + resnet_eps: float = 1e-6, + resnet_time_scale_shift: str = "default", + resnet_act_fn: str = "swish", + resnet_groups: int = 32, + resnet_pre_norm: bool = True, + num_attention_heads=1, + cross_attention_dim=1280, + output_scale_factor=1.0, + downsample_padding=1, + add_downsample=True, + use_linear_projection=False, + only_cross_attention=False, + upcast_attention=False, + ): + super().__init__() + resnets = [] + attentions = [] + + self.has_cross_attention = True + self.num_attention_heads = num_attention_heads + + if isinstance(cross_attention_dim, int): + cross_attention_dim = (cross_attention_dim,) + if isinstance(cross_attention_dim, (list, tuple)) and len(cross_attention_dim) > 4: + raise ValueError( + "Only up to 4 cross-attention layers are supported. Ensure that the length of cross-attention " + f"dims is less than or equal to 4. Got cross-attention dims {cross_attention_dim} of length {len(cross_attention_dim)}" + ) + self.cross_attention_dim = cross_attention_dim + + for i in range(num_layers): + in_channels = in_channels if i == 0 else out_channels + resnets.append( + ResnetBlock2D( + in_channels=in_channels, + out_channels=out_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + time_embedding_norm=resnet_time_scale_shift, + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + pre_norm=resnet_pre_norm, + ) + ) + for j in range(len(cross_attention_dim)): + attentions.append( + Transformer2DModel( + num_attention_heads, + out_channels // num_attention_heads, + in_channels=out_channels, + num_layers=transformer_layers_per_block, + cross_attention_dim=cross_attention_dim[j], + norm_num_groups=resnet_groups, + use_linear_projection=use_linear_projection, + only_cross_attention=only_cross_attention, + upcast_attention=upcast_attention, + double_self_attention=True if cross_attention_dim[j] is None else False, + ) + ) + self.attentions = nn.ModuleList(attentions) + self.resnets = nn.ModuleList(resnets) + + if add_downsample: + self.downsamplers = nn.ModuleList( + [ + Downsample2D( + out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op" + ) + ] + ) + else: + self.downsamplers = None + + self.gradient_checkpointing = False + + def forward( + self, + hidden_states: torch.Tensor, + temb: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + encoder_attention_mask: Optional[torch.Tensor] = None, + encoder_hidden_states_1: Optional[torch.Tensor] = None, + encoder_attention_mask_1: Optional[torch.Tensor] = None, + ): + output_states = () + num_layers = len(self.resnets) + num_attention_per_layer = len(self.attentions) // num_layers + + encoder_hidden_states_1 = ( + encoder_hidden_states_1 if encoder_hidden_states_1 is not None else encoder_hidden_states + ) + encoder_attention_mask_1 = ( + encoder_attention_mask_1 if encoder_hidden_states_1 is not None else encoder_attention_mask + ) + + for i in range(num_layers): + if self.training and self.gradient_checkpointing: + + def create_custom_forward(module, return_dict=None): + def custom_forward(*inputs): + if return_dict is not None: + return module(*inputs, return_dict=return_dict) + else: + return module(*inputs) + + return custom_forward + + ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {} + hidden_states = torch.utils.checkpoint.checkpoint( + create_custom_forward(self.resnets[i]), + hidden_states, + temb, + **ckpt_kwargs, + ) + for idx, cross_attention_dim in enumerate(self.cross_attention_dim): + if cross_attention_dim is not None and idx <= 1: + forward_encoder_hidden_states = encoder_hidden_states + forward_encoder_attention_mask = encoder_attention_mask + elif cross_attention_dim is not None and idx > 1: + forward_encoder_hidden_states = encoder_hidden_states_1 + forward_encoder_attention_mask = encoder_attention_mask_1 + else: + forward_encoder_hidden_states = None + forward_encoder_attention_mask = None + hidden_states = torch.utils.checkpoint.checkpoint( + create_custom_forward(self.attentions[i * num_attention_per_layer + idx], return_dict=False), + hidden_states, + forward_encoder_hidden_states, + None, # timestep + None, # class_labels + cross_attention_kwargs, + attention_mask, + forward_encoder_attention_mask, + **ckpt_kwargs, + )[0] + else: + hidden_states = self.resnets[i](hidden_states, temb) + for idx, cross_attention_dim in enumerate(self.cross_attention_dim): + if cross_attention_dim is not None and idx <= 1: + forward_encoder_hidden_states = encoder_hidden_states + forward_encoder_attention_mask = encoder_attention_mask + elif cross_attention_dim is not None and idx > 1: + forward_encoder_hidden_states = encoder_hidden_states_1 + forward_encoder_attention_mask = encoder_attention_mask_1 + else: + forward_encoder_hidden_states = None + forward_encoder_attention_mask = None + hidden_states = self.attentions[i * num_attention_per_layer + idx]( + hidden_states, + attention_mask=attention_mask, + encoder_hidden_states=forward_encoder_hidden_states, + encoder_attention_mask=forward_encoder_attention_mask, + return_dict=False, + )[0] + + output_states = output_states + (hidden_states,) + + if self.downsamplers is not None: + for downsampler in self.downsamplers: + hidden_states = downsampler(hidden_states) + + output_states = output_states + (hidden_states,) + + return hidden_states, output_states + + +class UNetMidBlock2DCrossAttn(nn.Module): + def __init__( + self, + in_channels: int, + temb_channels: int, + dropout: float = 0.0, + num_layers: int = 1, + transformer_layers_per_block: int = 1, + resnet_eps: float = 1e-6, + resnet_time_scale_shift: str = "default", + resnet_act_fn: str = "swish", + resnet_groups: int = 32, + resnet_pre_norm: bool = True, + num_attention_heads=1, + output_scale_factor=1.0, + cross_attention_dim=1280, + use_linear_projection=False, + upcast_attention=False, + ): + super().__init__() + + self.has_cross_attention = True + self.num_attention_heads = num_attention_heads + resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32) + + if isinstance(cross_attention_dim, int): + cross_attention_dim = (cross_attention_dim,) + if isinstance(cross_attention_dim, (list, tuple)) and len(cross_attention_dim) > 4: + raise ValueError( + "Only up to 4 cross-attention layers are supported. Ensure that the length of cross-attention " + f"dims is less than or equal to 4. Got cross-attention dims {cross_attention_dim} of length {len(cross_attention_dim)}" + ) + self.cross_attention_dim = cross_attention_dim + + # there is always at least one resnet + resnets = [ + ResnetBlock2D( + in_channels=in_channels, + out_channels=in_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + time_embedding_norm=resnet_time_scale_shift, + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + pre_norm=resnet_pre_norm, + ) + ] + attentions = [] + + for i in range(num_layers): + for j in range(len(cross_attention_dim)): + attentions.append( + Transformer2DModel( + num_attention_heads, + in_channels // num_attention_heads, + in_channels=in_channels, + num_layers=transformer_layers_per_block, + cross_attention_dim=cross_attention_dim[j], + norm_num_groups=resnet_groups, + use_linear_projection=use_linear_projection, + upcast_attention=upcast_attention, + double_self_attention=True if cross_attention_dim[j] is None else False, + ) + ) + resnets.append( + ResnetBlock2D( + in_channels=in_channels, + out_channels=in_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + time_embedding_norm=resnet_time_scale_shift, + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + pre_norm=resnet_pre_norm, + ) + ) + + self.attentions = nn.ModuleList(attentions) + self.resnets = nn.ModuleList(resnets) + + self.gradient_checkpointing = False + + def forward( + self, + hidden_states: torch.Tensor, + temb: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + encoder_attention_mask: Optional[torch.Tensor] = None, + encoder_hidden_states_1: Optional[torch.Tensor] = None, + encoder_attention_mask_1: Optional[torch.Tensor] = None, + ) -> torch.Tensor: + hidden_states = self.resnets[0](hidden_states, temb) + num_attention_per_layer = len(self.attentions) // (len(self.resnets) - 1) + + encoder_hidden_states_1 = ( + encoder_hidden_states_1 if encoder_hidden_states_1 is not None else encoder_hidden_states + ) + encoder_attention_mask_1 = ( + encoder_attention_mask_1 if encoder_hidden_states_1 is not None else encoder_attention_mask + ) + + for i in range(len(self.resnets[1:])): + if self.training and self.gradient_checkpointing: + + def create_custom_forward(module, return_dict=None): + def custom_forward(*inputs): + if return_dict is not None: + return module(*inputs, return_dict=return_dict) + else: + return module(*inputs) + + return custom_forward + + ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {} + for idx, cross_attention_dim in enumerate(self.cross_attention_dim): + if cross_attention_dim is not None and idx <= 1: + forward_encoder_hidden_states = encoder_hidden_states + forward_encoder_attention_mask = encoder_attention_mask + elif cross_attention_dim is not None and idx > 1: + forward_encoder_hidden_states = encoder_hidden_states_1 + forward_encoder_attention_mask = encoder_attention_mask_1 + else: + forward_encoder_hidden_states = None + forward_encoder_attention_mask = None + hidden_states = torch.utils.checkpoint.checkpoint( + create_custom_forward(self.attentions[i * num_attention_per_layer + idx], return_dict=False), + hidden_states, + forward_encoder_hidden_states, + None, # timestep + None, # class_labels + cross_attention_kwargs, + attention_mask, + forward_encoder_attention_mask, + **ckpt_kwargs, + )[0] + hidden_states = torch.utils.checkpoint.checkpoint( + create_custom_forward(self.resnets[i + 1]), + hidden_states, + temb, + **ckpt_kwargs, + ) + else: + for idx, cross_attention_dim in enumerate(self.cross_attention_dim): + if cross_attention_dim is not None and idx <= 1: + forward_encoder_hidden_states = encoder_hidden_states + forward_encoder_attention_mask = encoder_attention_mask + elif cross_attention_dim is not None and idx > 1: + forward_encoder_hidden_states = encoder_hidden_states_1 + forward_encoder_attention_mask = encoder_attention_mask_1 + else: + forward_encoder_hidden_states = None + forward_encoder_attention_mask = None + hidden_states = self.attentions[i * num_attention_per_layer + idx]( + hidden_states, + attention_mask=attention_mask, + encoder_hidden_states=forward_encoder_hidden_states, + encoder_attention_mask=forward_encoder_attention_mask, + return_dict=False, + )[0] + + hidden_states = self.resnets[i + 1](hidden_states, temb) + + return hidden_states + + +class CrossAttnUpBlock2D(nn.Module): + def __init__( + self, + in_channels: int, + out_channels: int, + prev_output_channel: int, + temb_channels: int, + dropout: float = 0.0, + num_layers: int = 1, + transformer_layers_per_block: int = 1, + resnet_eps: float = 1e-6, + resnet_time_scale_shift: str = "default", + resnet_act_fn: str = "swish", + resnet_groups: int = 32, + resnet_pre_norm: bool = True, + num_attention_heads=1, + cross_attention_dim=1280, + output_scale_factor=1.0, + add_upsample=True, + use_linear_projection=False, + only_cross_attention=False, + upcast_attention=False, + ): + super().__init__() + resnets = [] + attentions = [] + + self.has_cross_attention = True + self.num_attention_heads = num_attention_heads + + if isinstance(cross_attention_dim, int): + cross_attention_dim = (cross_attention_dim,) + if isinstance(cross_attention_dim, (list, tuple)) and len(cross_attention_dim) > 4: + raise ValueError( + "Only up to 4 cross-attention layers are supported. Ensure that the length of cross-attention " + f"dims is less than or equal to 4. Got cross-attention dims {cross_attention_dim} of length {len(cross_attention_dim)}" + ) + self.cross_attention_dim = cross_attention_dim + + for i in range(num_layers): + res_skip_channels = in_channels if (i == num_layers - 1) else out_channels + resnet_in_channels = prev_output_channel if i == 0 else out_channels + + resnets.append( + ResnetBlock2D( + in_channels=resnet_in_channels + res_skip_channels, + out_channels=out_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + time_embedding_norm=resnet_time_scale_shift, + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + pre_norm=resnet_pre_norm, + ) + ) + for j in range(len(cross_attention_dim)): + attentions.append( + Transformer2DModel( + num_attention_heads, + out_channels // num_attention_heads, + in_channels=out_channels, + num_layers=transformer_layers_per_block, + cross_attention_dim=cross_attention_dim[j], + norm_num_groups=resnet_groups, + use_linear_projection=use_linear_projection, + only_cross_attention=only_cross_attention, + upcast_attention=upcast_attention, + double_self_attention=True if cross_attention_dim[j] is None else False, + ) + ) + self.attentions = nn.ModuleList(attentions) + self.resnets = nn.ModuleList(resnets) + + if add_upsample: + self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)]) + else: + self.upsamplers = None + + self.gradient_checkpointing = False + + def forward( + self, + hidden_states: torch.Tensor, + res_hidden_states_tuple: Tuple[torch.Tensor, ...], + temb: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + upsample_size: Optional[int] = None, + attention_mask: Optional[torch.Tensor] = None, + encoder_attention_mask: Optional[torch.Tensor] = None, + encoder_hidden_states_1: Optional[torch.Tensor] = None, + encoder_attention_mask_1: Optional[torch.Tensor] = None, + ): + num_layers = len(self.resnets) + num_attention_per_layer = len(self.attentions) // num_layers + + encoder_hidden_states_1 = ( + encoder_hidden_states_1 if encoder_hidden_states_1 is not None else encoder_hidden_states + ) + encoder_attention_mask_1 = ( + encoder_attention_mask_1 if encoder_hidden_states_1 is not None else encoder_attention_mask + ) + + for i in range(num_layers): + # pop res hidden states + res_hidden_states = res_hidden_states_tuple[-1] + res_hidden_states_tuple = res_hidden_states_tuple[:-1] + hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) + + if self.training and self.gradient_checkpointing: + + def create_custom_forward(module, return_dict=None): + def custom_forward(*inputs): + if return_dict is not None: + return module(*inputs, return_dict=return_dict) + else: + return module(*inputs) + + return custom_forward + + ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {} + hidden_states = torch.utils.checkpoint.checkpoint( + create_custom_forward(self.resnets[i]), + hidden_states, + temb, + **ckpt_kwargs, + ) + for idx, cross_attention_dim in enumerate(self.cross_attention_dim): + if cross_attention_dim is not None and idx <= 1: + forward_encoder_hidden_states = encoder_hidden_states + forward_encoder_attention_mask = encoder_attention_mask + elif cross_attention_dim is not None and idx > 1: + forward_encoder_hidden_states = encoder_hidden_states_1 + forward_encoder_attention_mask = encoder_attention_mask_1 + else: + forward_encoder_hidden_states = None + forward_encoder_attention_mask = None + hidden_states = torch.utils.checkpoint.checkpoint( + create_custom_forward(self.attentions[i * num_attention_per_layer + idx], return_dict=False), + hidden_states, + forward_encoder_hidden_states, + None, # timestep + None, # class_labels + cross_attention_kwargs, + attention_mask, + forward_encoder_attention_mask, + **ckpt_kwargs, + )[0] + else: + hidden_states = self.resnets[i](hidden_states, temb) + for idx, cross_attention_dim in enumerate(self.cross_attention_dim): + if cross_attention_dim is not None and idx <= 1: + forward_encoder_hidden_states = encoder_hidden_states + forward_encoder_attention_mask = encoder_attention_mask + elif cross_attention_dim is not None and idx > 1: + forward_encoder_hidden_states = encoder_hidden_states_1 + forward_encoder_attention_mask = encoder_attention_mask_1 + else: + forward_encoder_hidden_states = None + forward_encoder_attention_mask = None + hidden_states = self.attentions[i * num_attention_per_layer + idx]( + hidden_states, + attention_mask=attention_mask, + encoder_hidden_states=forward_encoder_hidden_states, + encoder_attention_mask=forward_encoder_attention_mask, + return_dict=False, + )[0] + + if self.upsamplers is not None: + for upsampler in self.upsamplers: + hidden_states = upsampler(hidden_states, upsample_size) + + return hidden_states diff --git a/diffusers3/pipelines/audioldm2/pipeline_audioldm2.py b/diffusers3/pipelines/audioldm2/pipeline_audioldm2.py new file mode 100644 index 0000000000000000000000000000000000000000..b45771d7de7451b6560627deef4a7b254671a45e --- /dev/null +++ b/diffusers3/pipelines/audioldm2/pipeline_audioldm2.py @@ -0,0 +1,1065 @@ +# Copyright 2024 CVSSP, ByteDance and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Any, Callable, Dict, List, Optional, Union + +import numpy as np +import torch +from transformers import ( + ClapFeatureExtractor, + ClapModel, + GPT2Model, + RobertaTokenizer, + RobertaTokenizerFast, + SpeechT5HifiGan, + T5EncoderModel, + T5Tokenizer, + T5TokenizerFast, + VitsModel, + VitsTokenizer, +) + +from ...models import AutoencoderKL +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import ( + is_accelerate_available, + is_accelerate_version, + is_librosa_available, + logging, + replace_example_docstring, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline +from .modeling_audioldm2 import AudioLDM2ProjectionModel, AudioLDM2UNet2DConditionModel + + +if is_librosa_available(): + import librosa + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import scipy + >>> import torch + >>> from diffusers import AudioLDM2Pipeline + + >>> repo_id = "cvssp/audioldm2" + >>> pipe = AudioLDM2Pipeline.from_pretrained(repo_id, torch_dtype=torch.float16) + >>> pipe = pipe.to("cuda") + + >>> # define the prompts + >>> prompt = "The sound of a hammer hitting a wooden surface." + >>> negative_prompt = "Low quality." + + >>> # set the seed for generator + >>> generator = torch.Generator("cuda").manual_seed(0) + + >>> # run the generation + >>> audio = pipe( + ... prompt, + ... negative_prompt=negative_prompt, + ... num_inference_steps=200, + ... audio_length_in_s=10.0, + ... num_waveforms_per_prompt=3, + ... generator=generator, + ... ).audios + + >>> # save the best audio sample (index 0) as a .wav file + >>> scipy.io.wavfile.write("techno.wav", rate=16000, data=audio[0]) + ``` + ``` + #Using AudioLDM2 for Text To Speech + >>> import scipy + >>> import torch + >>> from diffusers import AudioLDM2Pipeline + + >>> repo_id = "anhnct/audioldm2_gigaspeech" + >>> pipe = AudioLDM2Pipeline.from_pretrained(repo_id, torch_dtype=torch.float16) + >>> pipe = pipe.to("cuda") + + >>> # define the prompts + >>> prompt = "A female reporter is speaking" + >>> transcript = "wish you have a good day" + + >>> # set the seed for generator + >>> generator = torch.Generator("cuda").manual_seed(0) + + >>> # run the generation + >>> audio = pipe( + ... prompt, + ... transcription=transcript, + ... num_inference_steps=200, + ... audio_length_in_s=10.0, + ... num_waveforms_per_prompt=2, + ... generator=generator, + ... max_new_tokens=512, #Must set max_new_tokens equa to 512 for TTS + ... ).audios + + >>> # save the best audio sample (index 0) as a .wav file + >>> scipy.io.wavfile.write("tts.wav", rate=16000, data=audio[0]) + ``` +""" + + +def prepare_inputs_for_generation( + inputs_embeds, + attention_mask=None, + past_key_values=None, + **kwargs, +): + if past_key_values is not None: + # only last token for inputs_embeds if past is defined in kwargs + inputs_embeds = inputs_embeds[:, -1:] + + return { + "inputs_embeds": inputs_embeds, + "attention_mask": attention_mask, + "past_key_values": past_key_values, + "use_cache": kwargs.get("use_cache"), + } + + +class AudioLDM2Pipeline(DiffusionPipeline): + r""" + Pipeline for text-to-audio generation using AudioLDM2. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.ClapModel`]): + First frozen text-encoder. AudioLDM2 uses the joint audio-text embedding model + [CLAP](https://huggingface.co/docs/transformers/model_doc/clap#transformers.CLAPTextModelWithProjection), + specifically the [laion/clap-htsat-unfused](https://huggingface.co/laion/clap-htsat-unfused) variant. The + text branch is used to encode the text prompt to a prompt embedding. The full audio-text model is used to + rank generated waveforms against the text prompt by computing similarity scores. + text_encoder_2 ([`~transformers.T5EncoderModel`, `~transformers.VitsModel`]): + Second frozen text-encoder. AudioLDM2 uses the encoder of + [T5](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5EncoderModel), specifically the + [google/flan-t5-large](https://huggingface.co/google/flan-t5-large) variant. Second frozen text-encoder use + for TTS. AudioLDM2 uses the encoder of + [Vits](https://huggingface.co/docs/transformers/model_doc/vits#transformers.VitsModel). + projection_model ([`AudioLDM2ProjectionModel`]): + A trained model used to linearly project the hidden-states from the first and second text encoder models + and insert learned SOS and EOS token embeddings. The projected hidden-states from the two text encoders are + concatenated to give the input to the language model. A Learned Position Embedding for the Vits + hidden-states + language_model ([`~transformers.GPT2Model`]): + An auto-regressive language model used to generate a sequence of hidden-states conditioned on the projected + outputs from the two text encoders. + tokenizer ([`~transformers.RobertaTokenizer`]): + Tokenizer to tokenize text for the first frozen text-encoder. + tokenizer_2 ([`~transformers.T5Tokenizer`, `~transformers.VitsTokenizer`]): + Tokenizer to tokenize text for the second frozen text-encoder. + feature_extractor ([`~transformers.ClapFeatureExtractor`]): + Feature extractor to pre-process generated audio waveforms to log-mel spectrograms for automatic scoring. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded audio latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded audio latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + vocoder ([`~transformers.SpeechT5HifiGan`]): + Vocoder of class `SpeechT5HifiGan` to convert the mel-spectrogram latents to the final audio waveform. + """ + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: ClapModel, + text_encoder_2: Union[T5EncoderModel, VitsModel], + projection_model: AudioLDM2ProjectionModel, + language_model: GPT2Model, + tokenizer: Union[RobertaTokenizer, RobertaTokenizerFast], + tokenizer_2: Union[T5Tokenizer, T5TokenizerFast, VitsTokenizer], + feature_extractor: ClapFeatureExtractor, + unet: AudioLDM2UNet2DConditionModel, + scheduler: KarrasDiffusionSchedulers, + vocoder: SpeechT5HifiGan, + ): + super().__init__() + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + text_encoder_2=text_encoder_2, + projection_model=projection_model, + language_model=language_model, + tokenizer=tokenizer, + tokenizer_2=tokenizer_2, + feature_extractor=feature_extractor, + unet=unet, + scheduler=scheduler, + vocoder=vocoder, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + + # Copied from diffusers.pipelines.pipeline_utils.StableDiffusionMixin.enable_vae_slicing + def enable_vae_slicing(self): + r""" + Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to + compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. + """ + self.vae.enable_slicing() + + # Copied from diffusers.pipelines.pipeline_utils.StableDiffusionMixin.disable_vae_slicing + def disable_vae_slicing(self): + r""" + Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to + computing decoding in one step. + """ + self.vae.disable_slicing() + + def enable_model_cpu_offload(self, gpu_id=0): + r""" + Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared + to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward` + method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with + `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`. + """ + if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"): + from accelerate import cpu_offload_with_hook + else: + raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.") + + device = torch.device(f"cuda:{gpu_id}") + + if self.device.type != "cpu": + self.to("cpu", silence_dtype_warnings=True) + torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) + + model_sequence = [ + self.text_encoder.text_model, + self.text_encoder.text_projection, + self.text_encoder_2, + self.projection_model, + self.language_model, + self.unet, + self.vae, + self.vocoder, + self.text_encoder, + ] + + hook = None + for cpu_offloaded_model in model_sequence: + _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook) + + # We'll offload the last model manually. + self.final_offload_hook = hook + + def generate_language_model( + self, + inputs_embeds: torch.Tensor = None, + max_new_tokens: int = 8, + **model_kwargs, + ): + """ + + Generates a sequence of hidden-states from the language model, conditioned on the embedding inputs. + + Parameters: + inputs_embeds (`torch.Tensor` of shape `(batch_size, sequence_length, hidden_size)`): + The sequence used as a prompt for the generation. + max_new_tokens (`int`): + Number of new tokens to generate. + model_kwargs (`Dict[str, Any]`, *optional*): + Ad hoc parametrization of additional model-specific kwargs that will be forwarded to the `forward` + function of the model. + + Return: + `inputs_embeds (`torch.Tensor` of shape `(batch_size, sequence_length, hidden_size)`): + The sequence of generated hidden-states. + """ + max_new_tokens = max_new_tokens if max_new_tokens is not None else self.language_model.config.max_new_tokens + model_kwargs = self.language_model._get_initial_cache_position(inputs_embeds, model_kwargs) + for _ in range(max_new_tokens): + # prepare model inputs + model_inputs = prepare_inputs_for_generation(inputs_embeds, **model_kwargs) + + # forward pass to get next hidden states + output = self.language_model(**model_inputs, return_dict=True) + + next_hidden_states = output.last_hidden_state + + # Update the model input + inputs_embeds = torch.cat([inputs_embeds, next_hidden_states[:, -1:, :]], dim=1) + + # Update generated hidden states, model inputs, and length for next step + model_kwargs = self.language_model._update_model_kwargs_for_generation(output, model_kwargs) + + return inputs_embeds[:, -max_new_tokens:, :] + + def encode_prompt( + self, + prompt, + device, + num_waveforms_per_prompt, + do_classifier_free_guidance, + transcription=None, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + generated_prompt_embeds: Optional[torch.Tensor] = None, + negative_generated_prompt_embeds: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.LongTensor] = None, + negative_attention_mask: Optional[torch.LongTensor] = None, + max_new_tokens: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + transcription (`str` or `List[str]`): + transcription of text to speech + device (`torch.device`): + torch device + num_waveforms_per_prompt (`int`): + number of waveforms that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the audio generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.Tensor`, *optional*): + Pre-computed text embeddings from the Flan T5 model. Can be used to easily tweak text inputs, *e.g.* + prompt weighting. If not provided, text embeddings will be computed from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-computed negative text embeddings from the Flan T5 model. Can be used to easily tweak text inputs, + *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be computed from + `negative_prompt` input argument. + generated_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings from the GPT2 langauge model. Can be used to easily tweak text inputs, + *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input + argument. + negative_generated_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings from the GPT2 language model. Can be used to easily tweak text + inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be computed from + `negative_prompt` input argument. + attention_mask (`torch.LongTensor`, *optional*): + Pre-computed attention mask to be applied to the `prompt_embeds`. If not provided, attention mask will + be computed from `prompt` input argument. + negative_attention_mask (`torch.LongTensor`, *optional*): + Pre-computed attention mask to be applied to the `negative_prompt_embeds`. If not provided, attention + mask will be computed from `negative_prompt` input argument. + max_new_tokens (`int`, *optional*, defaults to None): + The number of new tokens to generate with the GPT2 language model. + Returns: + prompt_embeds (`torch.Tensor`): + Text embeddings from the Flan T5 model. + attention_mask (`torch.LongTensor`): + Attention mask to be applied to the `prompt_embeds`. + generated_prompt_embeds (`torch.Tensor`): + Text embeddings generated from the GPT2 langauge model. + + Example: + + ```python + >>> import scipy + >>> import torch + >>> from diffusers import AudioLDM2Pipeline + + >>> repo_id = "cvssp/audioldm2" + >>> pipe = AudioLDM2Pipeline.from_pretrained(repo_id, torch_dtype=torch.float16) + >>> pipe = pipe.to("cuda") + + >>> # Get text embedding vectors + >>> prompt_embeds, attention_mask, generated_prompt_embeds = pipe.encode_prompt( + ... prompt="Techno music with a strong, upbeat tempo and high melodic riffs", + ... device="cuda", + ... do_classifier_free_guidance=True, + ... ) + + >>> # Pass text embeddings to pipeline for text-conditional audio generation + >>> audio = pipe( + ... prompt_embeds=prompt_embeds, + ... attention_mask=attention_mask, + ... generated_prompt_embeds=generated_prompt_embeds, + ... num_inference_steps=200, + ... audio_length_in_s=10.0, + ... ).audios[0] + + >>> # save generated audio sample + >>> scipy.io.wavfile.write("techno.wav", rate=16000, data=audio) + ```""" + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + # Define tokenizers and text encoders + tokenizers = [self.tokenizer, self.tokenizer_2] + is_vits_text_encoder = isinstance(self.text_encoder_2, VitsModel) + + if is_vits_text_encoder: + text_encoders = [self.text_encoder, self.text_encoder_2.text_encoder] + else: + text_encoders = [self.text_encoder, self.text_encoder_2] + + if prompt_embeds is None: + prompt_embeds_list = [] + attention_mask_list = [] + + for tokenizer, text_encoder in zip(tokenizers, text_encoders): + use_prompt = isinstance( + tokenizer, (RobertaTokenizer, RobertaTokenizerFast, T5Tokenizer, T5TokenizerFast) + ) + text_inputs = tokenizer( + prompt if use_prompt else transcription, + padding="max_length" + if isinstance(tokenizer, (RobertaTokenizer, RobertaTokenizerFast, VitsTokenizer)) + else True, + max_length=tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + attention_mask = text_inputs.attention_mask + untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1]) + logger.warning( + f"The following part of your input was truncated because {text_encoder.config.model_type} can " + f"only handle sequences up to {tokenizer.model_max_length} tokens: {removed_text}" + ) + + text_input_ids = text_input_ids.to(device) + attention_mask = attention_mask.to(device) + + if text_encoder.config.model_type == "clap": + prompt_embeds = text_encoder.get_text_features( + text_input_ids, + attention_mask=attention_mask, + ) + # append the seq-len dim: (bs, hidden_size) -> (bs, seq_len, hidden_size) + prompt_embeds = prompt_embeds[:, None, :] + # make sure that we attend to this single hidden-state + attention_mask = attention_mask.new_ones((batch_size, 1)) + elif is_vits_text_encoder: + # Add end_token_id and attention mask in the end of sequence phonemes + for text_input_id, text_attention_mask in zip(text_input_ids, attention_mask): + for idx, phoneme_id in enumerate(text_input_id): + if phoneme_id == 0: + text_input_id[idx] = 182 + text_attention_mask[idx] = 1 + break + prompt_embeds = text_encoder( + text_input_ids, attention_mask=attention_mask, padding_mask=attention_mask.unsqueeze(-1) + ) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = text_encoder( + text_input_ids, + attention_mask=attention_mask, + ) + prompt_embeds = prompt_embeds[0] + + prompt_embeds_list.append(prompt_embeds) + attention_mask_list.append(attention_mask) + + projection_output = self.projection_model( + hidden_states=prompt_embeds_list[0], + hidden_states_1=prompt_embeds_list[1], + attention_mask=attention_mask_list[0], + attention_mask_1=attention_mask_list[1], + ) + projected_prompt_embeds = projection_output.hidden_states + projected_attention_mask = projection_output.attention_mask + + generated_prompt_embeds = self.generate_language_model( + projected_prompt_embeds, + attention_mask=projected_attention_mask, + max_new_tokens=max_new_tokens, + ) + + prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) + attention_mask = ( + attention_mask.to(device=device) + if attention_mask is not None + else torch.ones(prompt_embeds.shape[:2], dtype=torch.long, device=device) + ) + generated_prompt_embeds = generated_prompt_embeds.to(dtype=self.language_model.dtype, device=device) + + bs_embed, seq_len, hidden_size = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_waveforms_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_waveforms_per_prompt, seq_len, hidden_size) + + # duplicate attention mask for each generation per prompt + attention_mask = attention_mask.repeat(1, num_waveforms_per_prompt) + attention_mask = attention_mask.view(bs_embed * num_waveforms_per_prompt, seq_len) + + bs_embed, seq_len, hidden_size = generated_prompt_embeds.shape + # duplicate generated embeddings for each generation per prompt, using mps friendly method + generated_prompt_embeds = generated_prompt_embeds.repeat(1, num_waveforms_per_prompt, 1) + generated_prompt_embeds = generated_prompt_embeds.view( + bs_embed * num_waveforms_per_prompt, seq_len, hidden_size + ) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + negative_prompt_embeds_list = [] + negative_attention_mask_list = [] + max_length = prompt_embeds.shape[1] + for tokenizer, text_encoder in zip(tokenizers, text_encoders): + uncond_input = tokenizer( + uncond_tokens, + padding="max_length", + max_length=tokenizer.model_max_length + if isinstance(tokenizer, (RobertaTokenizer, RobertaTokenizerFast, VitsTokenizer)) + else max_length, + truncation=True, + return_tensors="pt", + ) + + uncond_input_ids = uncond_input.input_ids.to(device) + negative_attention_mask = uncond_input.attention_mask.to(device) + + if text_encoder.config.model_type == "clap": + negative_prompt_embeds = text_encoder.get_text_features( + uncond_input_ids, + attention_mask=negative_attention_mask, + ) + # append the seq-len dim: (bs, hidden_size) -> (bs, seq_len, hidden_size) + negative_prompt_embeds = negative_prompt_embeds[:, None, :] + # make sure that we attend to this single hidden-state + negative_attention_mask = negative_attention_mask.new_ones((batch_size, 1)) + elif is_vits_text_encoder: + negative_prompt_embeds = torch.zeros( + batch_size, + tokenizer.model_max_length, + text_encoder.config.hidden_size, + ).to(dtype=self.text_encoder_2.dtype, device=device) + negative_attention_mask = torch.zeros(batch_size, tokenizer.model_max_length).to( + dtype=self.text_encoder_2.dtype, device=device + ) + else: + negative_prompt_embeds = text_encoder( + uncond_input_ids, + attention_mask=negative_attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + negative_prompt_embeds_list.append(negative_prompt_embeds) + negative_attention_mask_list.append(negative_attention_mask) + + projection_output = self.projection_model( + hidden_states=negative_prompt_embeds_list[0], + hidden_states_1=negative_prompt_embeds_list[1], + attention_mask=negative_attention_mask_list[0], + attention_mask_1=negative_attention_mask_list[1], + ) + negative_projected_prompt_embeds = projection_output.hidden_states + negative_projected_attention_mask = projection_output.attention_mask + + negative_generated_prompt_embeds = self.generate_language_model( + negative_projected_prompt_embeds, + attention_mask=negative_projected_attention_mask, + max_new_tokens=max_new_tokens, + ) + + if do_classifier_free_guidance: + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) + negative_attention_mask = ( + negative_attention_mask.to(device=device) + if negative_attention_mask is not None + else torch.ones(negative_prompt_embeds.shape[:2], dtype=torch.long, device=device) + ) + negative_generated_prompt_embeds = negative_generated_prompt_embeds.to( + dtype=self.language_model.dtype, device=device + ) + + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_waveforms_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_waveforms_per_prompt, seq_len, -1) + + # duplicate unconditional attention mask for each generation per prompt + negative_attention_mask = negative_attention_mask.repeat(1, num_waveforms_per_prompt) + negative_attention_mask = negative_attention_mask.view(batch_size * num_waveforms_per_prompt, seq_len) + + # duplicate unconditional generated embeddings for each generation per prompt + seq_len = negative_generated_prompt_embeds.shape[1] + negative_generated_prompt_embeds = negative_generated_prompt_embeds.repeat(1, num_waveforms_per_prompt, 1) + negative_generated_prompt_embeds = negative_generated_prompt_embeds.view( + batch_size * num_waveforms_per_prompt, seq_len, -1 + ) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + attention_mask = torch.cat([negative_attention_mask, attention_mask]) + generated_prompt_embeds = torch.cat([negative_generated_prompt_embeds, generated_prompt_embeds]) + + return prompt_embeds, attention_mask, generated_prompt_embeds + + # Copied from diffusers.pipelines.audioldm.pipeline_audioldm.AudioLDMPipeline.mel_spectrogram_to_waveform + def mel_spectrogram_to_waveform(self, mel_spectrogram): + if mel_spectrogram.dim() == 4: + mel_spectrogram = mel_spectrogram.squeeze(1) + + waveform = self.vocoder(mel_spectrogram) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + waveform = waveform.cpu().float() + return waveform + + def score_waveforms(self, text, audio, num_waveforms_per_prompt, device, dtype): + if not is_librosa_available(): + logger.info( + "Automatic scoring of the generated audio waveforms against the input prompt text requires the " + "`librosa` package to resample the generated waveforms. Returning the audios in the order they were " + "generated. To enable automatic scoring, install `librosa` with: `pip install librosa`." + ) + return audio + inputs = self.tokenizer(text, return_tensors="pt", padding=True) + resampled_audio = librosa.resample( + audio.numpy(), orig_sr=self.vocoder.config.sampling_rate, target_sr=self.feature_extractor.sampling_rate + ) + inputs["input_features"] = self.feature_extractor( + list(resampled_audio), return_tensors="pt", sampling_rate=self.feature_extractor.sampling_rate + ).input_features.type(dtype) + inputs = inputs.to(device) + + # compute the audio-text similarity score using the CLAP model + logits_per_text = self.text_encoder(**inputs).logits_per_text + # sort by the highest matching generations per prompt + indices = torch.argsort(logits_per_text, dim=1, descending=True)[:, :num_waveforms_per_prompt] + audio = torch.index_select(audio, 0, indices.reshape(-1).cpu()) + return audio + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + audio_length_in_s, + vocoder_upsample_factor, + callback_steps, + transcription=None, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + generated_prompt_embeds=None, + negative_generated_prompt_embeds=None, + attention_mask=None, + negative_attention_mask=None, + ): + min_audio_length_in_s = vocoder_upsample_factor * self.vae_scale_factor + if audio_length_in_s < min_audio_length_in_s: + raise ValueError( + f"`audio_length_in_s` has to be a positive value greater than or equal to {min_audio_length_in_s}, but " + f"is {audio_length_in_s}." + ) + + if self.vocoder.config.model_in_dim % self.vae_scale_factor != 0: + raise ValueError( + f"The number of frequency bins in the vocoder's log-mel spectrogram has to be divisible by the " + f"VAE scale factor, but got {self.vocoder.config.model_in_dim} bins and a scale factor of " + f"{self.vae_scale_factor}." + ) + + if (callback_steps is None) or ( + callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and (prompt_embeds is None or generated_prompt_embeds is None): + raise ValueError( + "Provide either `prompt`, or `prompt_embeds` and `generated_prompt_embeds`. Cannot leave " + "`prompt` undefined without specifying both `prompt_embeds` and `generated_prompt_embeds`." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + elif negative_prompt_embeds is not None and negative_generated_prompt_embeds is None: + raise ValueError( + "Cannot forward `negative_prompt_embeds` without `negative_generated_prompt_embeds`. Ensure that" + "both arguments are specified" + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + if attention_mask is not None and attention_mask.shape != prompt_embeds.shape[:2]: + raise ValueError( + "`attention_mask should have the same batch size and sequence length as `prompt_embeds`, but got:" + f"`attention_mask: {attention_mask.shape} != `prompt_embeds` {prompt_embeds.shape}" + ) + + if transcription is None: + if self.text_encoder_2.config.model_type == "vits": + raise ValueError("Cannot forward without transcription. Please make sure to" " have transcription") + elif transcription is not None and ( + not isinstance(transcription, str) and not isinstance(transcription, list) + ): + raise ValueError(f"`transcription` has to be of type `str` or `list` but is {type(transcription)}") + + if generated_prompt_embeds is not None and negative_generated_prompt_embeds is not None: + if generated_prompt_embeds.shape != negative_generated_prompt_embeds.shape: + raise ValueError( + "`generated_prompt_embeds` and `negative_generated_prompt_embeds` must have the same shape when " + f"passed directly, but got: `generated_prompt_embeds` {generated_prompt_embeds.shape} != " + f"`negative_generated_prompt_embeds` {negative_generated_prompt_embeds.shape}." + ) + if ( + negative_attention_mask is not None + and negative_attention_mask.shape != negative_prompt_embeds.shape[:2] + ): + raise ValueError( + "`attention_mask should have the same batch size and sequence length as `prompt_embeds`, but got:" + f"`attention_mask: {negative_attention_mask.shape} != `prompt_embeds` {negative_prompt_embeds.shape}" + ) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents with width->self.vocoder.config.model_in_dim + def prepare_latents(self, batch_size, num_channels_latents, height, dtype, device, generator, latents=None): + shape = ( + batch_size, + num_channels_latents, + int(height) // self.vae_scale_factor, + int(self.vocoder.config.model_in_dim) // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + transcription: Union[str, List[str]] = None, + audio_length_in_s: Optional[float] = None, + num_inference_steps: int = 200, + guidance_scale: float = 3.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_waveforms_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + generated_prompt_embeds: Optional[torch.Tensor] = None, + negative_generated_prompt_embeds: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.LongTensor] = None, + negative_attention_mask: Optional[torch.LongTensor] = None, + max_new_tokens: Optional[int] = None, + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, + callback_steps: Optional[int] = 1, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + output_type: Optional[str] = "np", + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide audio generation. If not defined, you need to pass `prompt_embeds`. + transcription (`str` or `List[str]`, *optional*):\ + The transcript for text to speech. + audio_length_in_s (`int`, *optional*, defaults to 10.24): + The length of the generated audio sample in seconds. + num_inference_steps (`int`, *optional*, defaults to 200): + The number of denoising steps. More denoising steps usually lead to a higher quality audio at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 3.5): + A higher guidance scale value encourages the model to generate audio that is closely linked to the text + `prompt` at the expense of lower sound quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in audio generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_waveforms_per_prompt (`int`, *optional*, defaults to 1): + The number of waveforms to generate per prompt. If `num_waveforms_per_prompt > 1`, then automatic + scoring is performed between the generated outputs and the text prompt. This scoring ranks the + generated waveforms based on their cosine similarity with the text input in the joint text-audio + embedding space. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for spectrogram + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + generated_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings from the GPT2 langauge model. Can be used to easily tweak text inputs, + *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input + argument. + negative_generated_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings from the GPT2 language model. Can be used to easily tweak text + inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be computed from + `negative_prompt` input argument. + attention_mask (`torch.LongTensor`, *optional*): + Pre-computed attention mask to be applied to the `prompt_embeds`. If not provided, attention mask will + be computed from `prompt` input argument. + negative_attention_mask (`torch.LongTensor`, *optional*): + Pre-computed attention mask to be applied to the `negative_prompt_embeds`. If not provided, attention + mask will be computed from `negative_prompt` input argument. + max_new_tokens (`int`, *optional*, defaults to None): + Number of new tokens to generate with the GPT2 language model. If not provided, number of tokens will + be taken from the config of the model. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + output_type (`str`, *optional*, defaults to `"np"`): + The output format of the generated audio. Choose between `"np"` to return a NumPy `np.ndarray` or + `"pt"` to return a PyTorch `torch.Tensor` object. Set to `"latent"` to return the latent diffusion + model (LDM) output. + + Examples: + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated audio. + """ + # 0. Convert audio input length from seconds to spectrogram height + vocoder_upsample_factor = np.prod(self.vocoder.config.upsample_rates) / self.vocoder.config.sampling_rate + + if audio_length_in_s is None: + audio_length_in_s = self.unet.config.sample_size * self.vae_scale_factor * vocoder_upsample_factor + + height = int(audio_length_in_s / vocoder_upsample_factor) + + original_waveform_length = int(audio_length_in_s * self.vocoder.config.sampling_rate) + if height % self.vae_scale_factor != 0: + height = int(np.ceil(height / self.vae_scale_factor)) * self.vae_scale_factor + logger.info( + f"Audio length in seconds {audio_length_in_s} is increased to {height * vocoder_upsample_factor} " + f"so that it can be handled by the model. It will be cut to {audio_length_in_s} after the " + f"denoising process." + ) + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + audio_length_in_s, + vocoder_upsample_factor, + callback_steps, + transcription, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + generated_prompt_embeds, + negative_generated_prompt_embeds, + attention_mask, + negative_attention_mask, + ) + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Encode input prompt + prompt_embeds, attention_mask, generated_prompt_embeds = self.encode_prompt( + prompt, + device, + num_waveforms_per_prompt, + do_classifier_free_guidance, + transcription, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + generated_prompt_embeds=generated_prompt_embeds, + negative_generated_prompt_embeds=negative_generated_prompt_embeds, + attention_mask=attention_mask, + negative_attention_mask=negative_attention_mask, + max_new_tokens=max_new_tokens, + ) + + # 4. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # 5. Prepare latent variables + num_channels_latents = self.unet.config.in_channels + latents = self.prepare_latents( + batch_size * num_waveforms_per_prompt, + num_channels_latents, + height, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 6. Prepare extra step kwargs + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=generated_prompt_embeds, + encoder_hidden_states_1=prompt_embeds, + encoder_attention_mask_1=attention_mask, + return_dict=False, + )[0] + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + self.maybe_free_model_hooks() + + # 8. Post-processing + if not output_type == "latent": + latents = 1 / self.vae.config.scaling_factor * latents + mel_spectrogram = self.vae.decode(latents).sample + else: + return AudioPipelineOutput(audios=latents) + + audio = self.mel_spectrogram_to_waveform(mel_spectrogram) + + audio = audio[:, :original_waveform_length] + + # 9. Automatic scoring + if num_waveforms_per_prompt > 1 and prompt is not None: + audio = self.score_waveforms( + text=prompt, + audio=audio, + num_waveforms_per_prompt=num_waveforms_per_prompt, + device=device, + dtype=prompt_embeds.dtype, + ) + + if output_type == "np": + audio = audio.numpy() + + if not return_dict: + return (audio,) + + return AudioPipelineOutput(audios=audio) diff --git a/diffusers3/pipelines/aura_flow/__init__.py b/diffusers3/pipelines/aura_flow/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e1917baa61e28abf0970080c90b5864579cbe8f9 --- /dev/null +++ b/diffusers3/pipelines/aura_flow/__init__.py @@ -0,0 +1,48 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["pipeline_aura_flow"] = ["AuraFlowPipeline"] + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * + else: + from .pipeline_aura_flow import AuraFlowPipeline + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/diffusers3/pipelines/aura_flow/pipeline_aura_flow.py b/diffusers3/pipelines/aura_flow/pipeline_aura_flow.py new file mode 100644 index 0000000000000000000000000000000000000000..6a86b5ceded92d6aab61f7006fdc58edcb8aefb2 --- /dev/null +++ b/diffusers3/pipelines/aura_flow/pipeline_aura_flow.py @@ -0,0 +1,591 @@ +# Copyright 2024 AuraFlow Authors and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import inspect +from typing import List, Optional, Tuple, Union + +import torch +from transformers import T5Tokenizer, UMT5EncoderModel + +from ...image_processor import VaeImageProcessor +from ...models import AuraFlowTransformer2DModel, AutoencoderKL +from ...models.attention_processor import AttnProcessor2_0, FusedAttnProcessor2_0, XFormersAttnProcessor +from ...schedulers import FlowMatchEulerDiscreteScheduler +from ...utils import logging, replace_example_docstring +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import AuraFlowPipeline + + >>> pipe = AuraFlowPipeline.from_pretrained("fal/AuraFlow", torch_dtype=torch.float16) + >>> pipe = pipe.to("cuda") + >>> prompt = "A cat holding a sign that says hello world" + >>> image = pipe(prompt).images[0] + >>> image.save("aura_flow.png") + ``` +""" + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + sigmas: Optional[List[float]] = None, + **kwargs, +): + """ + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles + custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` + must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, + `num_inference_steps` and `sigmas` must be `None`. + sigmas (`List[float]`, *optional*): + Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, + `num_inference_steps` and `timesteps` must be `None`. + + Returns: + `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the + second element is the number of inference steps. + """ + if timesteps is not None and sigmas is not None: + raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") + if timesteps is not None: + accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + elif sigmas is not None: + accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accept_sigmas: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" sigmas schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + + +class AuraFlowPipeline(DiffusionPipeline): + r""" + Args: + tokenizer (`T5TokenizerFast`): + Tokenizer of class + [T5Tokenizer](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5Tokenizer). + text_encoder ([`T5EncoderModel`]): + Frozen text-encoder. AuraFlow uses + [T5](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5EncoderModel), specifically the + [EleutherAI/pile-t5-xl](https://huggingface.co/EleutherAI/pile-t5-xl) variant. + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + transformer ([`AuraFlowTransformer2DModel`]): + Conditional Transformer (MMDiT and DiT) architecture to denoise the encoded image latents. + scheduler ([`FlowMatchEulerDiscreteScheduler`]): + A scheduler to be used in combination with `transformer` to denoise the encoded image latents. + """ + + _optional_components = [] + model_cpu_offload_seq = "text_encoder->transformer->vae" + + def __init__( + self, + tokenizer: T5Tokenizer, + text_encoder: UMT5EncoderModel, + vae: AutoencoderKL, + transformer: AuraFlowTransformer2DModel, + scheduler: FlowMatchEulerDiscreteScheduler, + ): + super().__init__() + + self.register_modules( + tokenizer=tokenizer, text_encoder=text_encoder, vae=vae, transformer=transformer, scheduler=scheduler + ) + + self.vae_scale_factor = ( + 2 ** (len(self.vae.config.block_out_channels) - 1) if hasattr(self, "vae") and self.vae is not None else 8 + ) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + + def check_inputs( + self, + prompt, + height, + width, + negative_prompt, + prompt_embeds=None, + negative_prompt_embeds=None, + prompt_attention_mask=None, + negative_prompt_attention_mask=None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and prompt_attention_mask is None: + raise ValueError("Must provide `prompt_attention_mask` when specifying `prompt_embeds`.") + + if negative_prompt_embeds is not None and negative_prompt_attention_mask is None: + raise ValueError("Must provide `negative_prompt_attention_mask` when specifying `negative_prompt_embeds`.") + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + if prompt_attention_mask.shape != negative_prompt_attention_mask.shape: + raise ValueError( + "`prompt_attention_mask` and `negative_prompt_attention_mask` must have the same shape when passed directly, but" + f" got: `prompt_attention_mask` {prompt_attention_mask.shape} != `negative_prompt_attention_mask`" + f" {negative_prompt_attention_mask.shape}." + ) + + def encode_prompt( + self, + prompt: Union[str, List[str]], + negative_prompt: Union[str, List[str]] = None, + do_classifier_free_guidance: bool = True, + num_images_per_prompt: int = 1, + device: Optional[torch.device] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + prompt_attention_mask: Optional[torch.Tensor] = None, + negative_prompt_attention_mask: Optional[torch.Tensor] = None, + max_sequence_length: int = 256, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + negative_prompt (`str` or `List[str]`, *optional*): + The prompt not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` + instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). + do_classifier_free_guidance (`bool`, *optional*, defaults to `True`): + whether to use classifier free guidance or not + num_images_per_prompt (`int`, *optional*, defaults to 1): + number of images that should be generated per prompt + device: (`torch.device`, *optional*): + torch device to place the resulting embeddings on + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + prompt_attention_mask (`torch.Tensor`, *optional*): + Pre-generated attention mask for text embeddings. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. + negative_prompt_attention_mask (`torch.Tensor`, *optional*): + Pre-generated attention mask for negative text embeddings. + max_sequence_length (`int`, defaults to 256): Maximum sequence length to use for the prompt. + """ + if device is None: + device = self._execution_device + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + max_length = max_sequence_length + if prompt_embeds is None: + text_inputs = self.tokenizer( + prompt, + truncation=True, + max_length=max_length, + padding="max_length", + return_tensors="pt", + ) + text_input_ids = text_inputs["input_ids"] + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode(untruncated_ids[:, max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because T5 can only handle sequences up to" + f" {max_length} tokens: {removed_text}" + ) + + text_inputs = {k: v.to(device) for k, v in text_inputs.items()} + prompt_embeds = self.text_encoder(**text_inputs)[0] + prompt_attention_mask = text_inputs["attention_mask"].unsqueeze(-1).expand(prompt_embeds.shape) + prompt_embeds = prompt_embeds * prompt_attention_mask + + if self.text_encoder is not None: + dtype = self.text_encoder.dtype + elif self.transformer is not None: + dtype = self.transformer.dtype + else: + dtype = None + + prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings and attention mask for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + prompt_attention_mask = prompt_attention_mask.reshape(bs_embed, -1) + prompt_attention_mask = prompt_attention_mask.repeat(num_images_per_prompt, 1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + negative_prompt = negative_prompt or "" + uncond_tokens = [negative_prompt] * batch_size if isinstance(negative_prompt, str) else negative_prompt + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + truncation=True, + max_length=max_length, + padding="max_length", + return_tensors="pt", + ) + uncond_input = {k: v.to(device) for k, v in uncond_input.items()} + negative_prompt_embeds = self.text_encoder(**uncond_input)[0] + negative_prompt_attention_mask = ( + uncond_input["attention_mask"].unsqueeze(-1).expand(negative_prompt_embeds.shape) + ) + negative_prompt_embeds = negative_prompt_embeds * negative_prompt_attention_mask + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + negative_prompt_attention_mask = negative_prompt_attention_mask.reshape(bs_embed, -1) + negative_prompt_attention_mask = negative_prompt_attention_mask.repeat(num_images_per_prompt, 1) + else: + negative_prompt_embeds = None + negative_prompt_attention_mask = None + + return prompt_embeds, prompt_attention_mask, negative_prompt_embeds, negative_prompt_attention_mask + + # Copied from diffusers.pipelines.stable_diffusion_3.pipeline_stable_diffusion_3.StableDiffusion3Pipeline.prepare_latents + def prepare_latents( + self, + batch_size, + num_channels_latents, + height, + width, + dtype, + device, + generator, + latents=None, + ): + if latents is not None: + return latents.to(device=device, dtype=dtype) + + shape = ( + batch_size, + num_channels_latents, + int(height) // self.vae_scale_factor, + int(width) // self.vae_scale_factor, + ) + + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + + return latents + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.upcast_vae + def upcast_vae(self): + dtype = self.vae.dtype + self.vae.to(dtype=torch.float32) + use_torch_2_0_or_xformers = isinstance( + self.vae.decoder.mid_block.attentions[0].processor, + ( + AttnProcessor2_0, + XFormersAttnProcessor, + FusedAttnProcessor2_0, + ), + ) + # if xformers or torch_2_0 is used attention block does not need + # to be in float32 which can save lots of memory + if use_torch_2_0_or_xformers: + self.vae.post_quant_conv.to(dtype) + self.vae.decoder.conv_in.to(dtype) + self.vae.decoder.mid_block.to(dtype) + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + negative_prompt: Union[str, List[str]] = None, + num_inference_steps: int = 50, + timesteps: List[int] = None, + sigmas: List[float] = None, + guidance_scale: float = 3.5, + num_images_per_prompt: Optional[int] = 1, + height: Optional[int] = 1024, + width: Optional[int] = 1024, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + prompt_attention_mask: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_attention_mask: Optional[torch.Tensor] = None, + max_sequence_length: int = 256, + output_type: Optional[str] = "pil", + return_dict: bool = True, + ) -> Union[ImagePipelineOutput, Tuple]: + r""" + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + height (`int`, *optional*, defaults to self.transformer.config.sample_size * self.vae_scale_factor): + The height in pixels of the generated image. This is set to 1024 by default for best results. + width (`int`, *optional*, defaults to self.transformer.config.sample_size * self.vae_scale_factor): + The width in pixels of the generated image. This is set to 1024 by default for best results. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + sigmas (`List[float]`, *optional*): + Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, + `num_inference_steps` and `timesteps` must be `None`. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument + in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is + passed will be used. Must be in descending order. + guidance_scale (`float`, *optional*, defaults to 5.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + prompt_attention_mask (`torch.Tensor`, *optional*): + Pre-generated attention mask for text embeddings. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + negative_prompt_attention_mask (`torch.Tensor`, *optional*): + Pre-generated attention mask for negative text embeddings. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] instead + of a plain tuple. + max_sequence_length (`int` defaults to 256): Maximum sequence length to use with the `prompt`. + + Examples: + + Returns: [`~pipelines.ImagePipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is returned + where the first element is a list with the generated images. + """ + # 1. Check inputs. Raise error if not correct + height = height or self.transformer.config.sample_size * self.vae_scale_factor + width = width or self.transformer.config.sample_size * self.vae_scale_factor + + self.check_inputs( + prompt, + height, + width, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + prompt_attention_mask, + negative_prompt_attention_mask, + ) + + # 2. Determine batch size. + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Encode input prompt + ( + prompt_embeds, + prompt_attention_mask, + negative_prompt_embeds, + negative_prompt_attention_mask, + ) = self.encode_prompt( + prompt=prompt, + negative_prompt=negative_prompt, + do_classifier_free_guidance=do_classifier_free_guidance, + num_images_per_prompt=num_images_per_prompt, + device=device, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + prompt_attention_mask=prompt_attention_mask, + negative_prompt_attention_mask=negative_prompt_attention_mask, + max_sequence_length=max_sequence_length, + ) + if do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) + + # 4. Prepare timesteps + + # sigmas = np.linspace(1.0, 1 / num_inference_steps, num_inference_steps) + timesteps, num_inference_steps = retrieve_timesteps( + self.scheduler, num_inference_steps, device, timesteps, sigmas + ) + + # 5. Prepare latents. + latent_channels = self.transformer.config.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + latent_channels, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 6. Denoising loop + num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + + # aura use timestep value between 0 and 1, with t=1 as noise and t=0 as the image + # broadcast to batch dimension in a way that's compatible with ONNX/Core ML + timestep = torch.tensor([t / 1000]).expand(latent_model_input.shape[0]) + timestep = timestep.to(latents.device, dtype=latents.dtype) + + # predict noise model_output + noise_pred = self.transformer( + latent_model_input, + encoder_hidden_states=prompt_embeds, + timestep=timestep, + return_dict=False, + )[0] + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0] + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + + if output_type == "latent": + image = latents + else: + # make sure the VAE is in float32 mode, as it overflows in float16 + needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast + if needs_upcasting: + self.upcast_vae() + latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + image = self.image_processor.postprocess(image, output_type=output_type) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return ImagePipelineOutput(images=image) diff --git a/diffusers3/pipelines/auto_pipeline.py b/diffusers3/pipelines/auto_pipeline.py new file mode 100644 index 0000000000000000000000000000000000000000..39ceadb5acef5dee9d042e6abcdccfcb78d421e0 --- /dev/null +++ b/diffusers3/pipelines/auto_pipeline.py @@ -0,0 +1,1093 @@ +# coding=utf-8 +# Copyright 2024 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from collections import OrderedDict + +from huggingface_hub.utils import validate_hf_hub_args + +from ..configuration_utils import ConfigMixin +from ..utils import is_sentencepiece_available +from .aura_flow import AuraFlowPipeline +from .controlnet import ( + StableDiffusionControlNetImg2ImgPipeline, + StableDiffusionControlNetInpaintPipeline, + StableDiffusionControlNetPipeline, + StableDiffusionXLControlNetImg2ImgPipeline, + StableDiffusionXLControlNetInpaintPipeline, + StableDiffusionXLControlNetPipeline, +) +from .deepfloyd_if import IFImg2ImgPipeline, IFInpaintingPipeline, IFPipeline +from .flux import FluxControlNetPipeline, FluxImg2ImgPipeline, FluxInpaintPipeline, FluxPipeline +from .hunyuandit import HunyuanDiTPipeline +from .kandinsky import ( + KandinskyCombinedPipeline, + KandinskyImg2ImgCombinedPipeline, + KandinskyImg2ImgPipeline, + KandinskyInpaintCombinedPipeline, + KandinskyInpaintPipeline, + KandinskyPipeline, +) +from .kandinsky2_2 import ( + KandinskyV22CombinedPipeline, + KandinskyV22Img2ImgCombinedPipeline, + KandinskyV22Img2ImgPipeline, + KandinskyV22InpaintCombinedPipeline, + KandinskyV22InpaintPipeline, + KandinskyV22Pipeline, +) +from .kandinsky3 import Kandinsky3Img2ImgPipeline, Kandinsky3Pipeline +from .latent_consistency_models import LatentConsistencyModelImg2ImgPipeline, LatentConsistencyModelPipeline +from .lumina import LuminaText2ImgPipeline +from .pag import ( + HunyuanDiTPAGPipeline, + PixArtSigmaPAGPipeline, + StableDiffusion3PAGPipeline, + StableDiffusionControlNetPAGPipeline, + StableDiffusionPAGPipeline, + StableDiffusionXLControlNetPAGImg2ImgPipeline, + StableDiffusionXLControlNetPAGPipeline, + StableDiffusionXLPAGImg2ImgPipeline, + StableDiffusionXLPAGInpaintPipeline, + StableDiffusionXLPAGPipeline, +) +from .pixart_alpha import PixArtAlphaPipeline, PixArtSigmaPipeline +from .stable_cascade import StableCascadeCombinedPipeline, StableCascadeDecoderPipeline +from .stable_diffusion import ( + StableDiffusionImg2ImgPipeline, + StableDiffusionInpaintPipeline, + StableDiffusionPipeline, +) +from .stable_diffusion_3 import ( + StableDiffusion3Img2ImgPipeline, + StableDiffusion3InpaintPipeline, + StableDiffusion3Pipeline, +) +from .stable_diffusion_xl import ( + StableDiffusionXLImg2ImgPipeline, + StableDiffusionXLInpaintPipeline, + StableDiffusionXLPipeline, +) +from .wuerstchen import WuerstchenCombinedPipeline, WuerstchenDecoderPipeline + + +AUTO_TEXT2IMAGE_PIPELINES_MAPPING = OrderedDict( + [ + ("stable-diffusion", StableDiffusionPipeline), + ("stable-diffusion-xl", StableDiffusionXLPipeline), + ("stable-diffusion-3", StableDiffusion3Pipeline), + ("stable-diffusion-3-pag", StableDiffusion3PAGPipeline), + ("if", IFPipeline), + ("hunyuan", HunyuanDiTPipeline), + ("hunyuan-pag", HunyuanDiTPAGPipeline), + ("kandinsky", KandinskyCombinedPipeline), + ("kandinsky22", KandinskyV22CombinedPipeline), + ("kandinsky3", Kandinsky3Pipeline), + ("stable-diffusion-controlnet", StableDiffusionControlNetPipeline), + ("stable-diffusion-xl-controlnet", StableDiffusionXLControlNetPipeline), + ("wuerstchen", WuerstchenCombinedPipeline), + ("cascade", StableCascadeCombinedPipeline), + ("lcm", LatentConsistencyModelPipeline), + ("pixart-alpha", PixArtAlphaPipeline), + ("pixart-sigma", PixArtSigmaPipeline), + ("stable-diffusion-pag", StableDiffusionPAGPipeline), + ("stable-diffusion-controlnet-pag", StableDiffusionControlNetPAGPipeline), + ("stable-diffusion-xl-pag", StableDiffusionXLPAGPipeline), + ("stable-diffusion-xl-controlnet-pag", StableDiffusionXLControlNetPAGPipeline), + ("pixart-sigma-pag", PixArtSigmaPAGPipeline), + ("auraflow", AuraFlowPipeline), + ("flux", FluxPipeline), + ("flux-controlnet", FluxControlNetPipeline), + ("lumina", LuminaText2ImgPipeline), + ] +) + +AUTO_IMAGE2IMAGE_PIPELINES_MAPPING = OrderedDict( + [ + ("stable-diffusion", StableDiffusionImg2ImgPipeline), + ("stable-diffusion-xl", StableDiffusionXLImg2ImgPipeline), + ("stable-diffusion-3", StableDiffusion3Img2ImgPipeline), + ("if", IFImg2ImgPipeline), + ("kandinsky", KandinskyImg2ImgCombinedPipeline), + ("kandinsky22", KandinskyV22Img2ImgCombinedPipeline), + ("kandinsky3", Kandinsky3Img2ImgPipeline), + ("stable-diffusion-controlnet", StableDiffusionControlNetImg2ImgPipeline), + ("stable-diffusion-xl-controlnet", StableDiffusionXLControlNetImg2ImgPipeline), + ("stable-diffusion-xl-pag", StableDiffusionXLPAGImg2ImgPipeline), + ("stable-diffusion-xl-controlnet-pag", StableDiffusionXLControlNetPAGImg2ImgPipeline), + ("lcm", LatentConsistencyModelImg2ImgPipeline), + ("flux", FluxImg2ImgPipeline), + ] +) + +AUTO_INPAINT_PIPELINES_MAPPING = OrderedDict( + [ + ("stable-diffusion", StableDiffusionInpaintPipeline), + ("stable-diffusion-xl", StableDiffusionXLInpaintPipeline), + ("stable-diffusion-3", StableDiffusion3InpaintPipeline), + ("if", IFInpaintingPipeline), + ("kandinsky", KandinskyInpaintCombinedPipeline), + ("kandinsky22", KandinskyV22InpaintCombinedPipeline), + ("stable-diffusion-controlnet", StableDiffusionControlNetInpaintPipeline), + ("stable-diffusion-xl-controlnet", StableDiffusionXLControlNetInpaintPipeline), + ("stable-diffusion-xl-pag", StableDiffusionXLPAGInpaintPipeline), + ("flux", FluxInpaintPipeline), + ] +) + +_AUTO_TEXT2IMAGE_DECODER_PIPELINES_MAPPING = OrderedDict( + [ + ("kandinsky", KandinskyPipeline), + ("kandinsky22", KandinskyV22Pipeline), + ("wuerstchen", WuerstchenDecoderPipeline), + ("cascade", StableCascadeDecoderPipeline), + ] +) +_AUTO_IMAGE2IMAGE_DECODER_PIPELINES_MAPPING = OrderedDict( + [ + ("kandinsky", KandinskyImg2ImgPipeline), + ("kandinsky22", KandinskyV22Img2ImgPipeline), + ] +) +_AUTO_INPAINT_DECODER_PIPELINES_MAPPING = OrderedDict( + [ + ("kandinsky", KandinskyInpaintPipeline), + ("kandinsky22", KandinskyV22InpaintPipeline), + ] +) + +if is_sentencepiece_available(): + from .kolors import KolorsImg2ImgPipeline, KolorsPipeline + from .pag import KolorsPAGPipeline + + AUTO_TEXT2IMAGE_PIPELINES_MAPPING["kolors"] = KolorsPipeline + AUTO_TEXT2IMAGE_PIPELINES_MAPPING["kolors-pag"] = KolorsPAGPipeline + AUTO_IMAGE2IMAGE_PIPELINES_MAPPING["kolors"] = KolorsImg2ImgPipeline + +SUPPORTED_TASKS_MAPPINGS = [ + AUTO_TEXT2IMAGE_PIPELINES_MAPPING, + AUTO_IMAGE2IMAGE_PIPELINES_MAPPING, + AUTO_INPAINT_PIPELINES_MAPPING, + _AUTO_TEXT2IMAGE_DECODER_PIPELINES_MAPPING, + _AUTO_IMAGE2IMAGE_DECODER_PIPELINES_MAPPING, + _AUTO_INPAINT_DECODER_PIPELINES_MAPPING, +] + + +def _get_connected_pipeline(pipeline_cls): + # for now connected pipelines can only be loaded from decoder pipelines, such as kandinsky-community/kandinsky-2-2-decoder + if pipeline_cls in _AUTO_TEXT2IMAGE_DECODER_PIPELINES_MAPPING.values(): + return _get_task_class( + AUTO_TEXT2IMAGE_PIPELINES_MAPPING, pipeline_cls.__name__, throw_error_if_not_exist=False + ) + if pipeline_cls in _AUTO_IMAGE2IMAGE_DECODER_PIPELINES_MAPPING.values(): + return _get_task_class( + AUTO_IMAGE2IMAGE_PIPELINES_MAPPING, pipeline_cls.__name__, throw_error_if_not_exist=False + ) + if pipeline_cls in _AUTO_INPAINT_DECODER_PIPELINES_MAPPING.values(): + return _get_task_class(AUTO_INPAINT_PIPELINES_MAPPING, pipeline_cls.__name__, throw_error_if_not_exist=False) + + +def _get_task_class(mapping, pipeline_class_name, throw_error_if_not_exist: bool = True): + def get_model(pipeline_class_name): + for task_mapping in SUPPORTED_TASKS_MAPPINGS: + for model_name, pipeline in task_mapping.items(): + if pipeline.__name__ == pipeline_class_name: + return model_name + + model_name = get_model(pipeline_class_name) + + if model_name is not None: + task_class = mapping.get(model_name, None) + if task_class is not None: + return task_class + + if throw_error_if_not_exist: + raise ValueError(f"AutoPipeline can't find a pipeline linked to {pipeline_class_name} for {model_name}") + + +class AutoPipelineForText2Image(ConfigMixin): + r""" + + [`AutoPipelineForText2Image`] is a generic pipeline class that instantiates a text-to-image pipeline class. The + specific underlying pipeline class is automatically selected from either the + [`~AutoPipelineForText2Image.from_pretrained`] or [`~AutoPipelineForText2Image.from_pipe`] methods. + + This class cannot be instantiated using `__init__()` (throws an error). + + Class attributes: + + - **config_name** (`str`) -- The configuration filename that stores the class and module names of all the + diffusion pipeline's components. + + """ + + config_name = "model_index.json" + + def __init__(self, *args, **kwargs): + raise EnvironmentError( + f"{self.__class__.__name__} is designed to be instantiated " + f"using the `{self.__class__.__name__}.from_pretrained(pretrained_model_name_or_path)` or " + f"`{self.__class__.__name__}.from_pipe(pipeline)` methods." + ) + + @classmethod + @validate_hf_hub_args + def from_pretrained(cls, pretrained_model_or_path, **kwargs): + r""" + Instantiates a text-to-image Pytorch diffusion pipeline from pretrained pipeline weight. + + The from_pretrained() method takes care of returning the correct pipeline class instance by: + 1. Detect the pipeline class of the pretrained_model_or_path based on the _class_name property of its + config object + 2. Find the text-to-image pipeline linked to the pipeline class using pattern matching on pipeline class + name. + + If a `controlnet` argument is passed, it will instantiate a [`StableDiffusionControlNetPipeline`] object. + + The pipeline is set in evaluation mode (`model.eval()`) by default. + + If you get the error message below, you need to finetune the weights for your downstream task: + + ``` + Some weights of UNet2DConditionModel were not initialized from the model checkpoint at runwayml/stable-diffusion-v1-5 and are newly initialized because the shapes did not match: + - conv_in.weight: found shape torch.Size([320, 4, 3, 3]) in the checkpoint and torch.Size([320, 9, 3, 3]) in the model instantiated + You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. + ``` + + Parameters: + pretrained_model_or_path (`str` or `os.PathLike`, *optional*): + Can be either: + + - A string, the *repo id* (for example `CompVis/ldm-text2im-large-256`) of a pretrained pipeline + hosted on the Hub. + - A path to a *directory* (for example `./my_pipeline_directory/`) containing pipeline weights + saved using + [`~DiffusionPipeline.save_pretrained`]. + torch_dtype (`str` or `torch.dtype`, *optional*): + Override the default `torch.dtype` and load the model with another dtype. If "auto" is passed, the + dtype is automatically derived from the model's weights. + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force the (re-)download of the model weights and configuration files, overriding the + cached versions if they exist. + cache_dir (`Union[str, os.PathLike]`, *optional*): + Path to a directory where a downloaded pretrained model configuration is cached if the standard cache + is not used. + + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. + output_loading_info(`bool`, *optional*, defaults to `False`): + Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages. + local_files_only (`bool`, *optional*, defaults to `False`): + Whether to only load local model weights and configuration files or not. If set to `True`, the model + won't be downloaded from the Hub. + token (`str` or *bool*, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from + `diffusers-cli login` (stored in `~/.huggingface`) is used. + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier + allowed by Git. + custom_revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, or a commit id similar to + `revision` when loading a custom pipeline from the Hub. It can be a ๐Ÿค— Diffusers version when loading a + custom pipeline from GitHub, otherwise it defaults to `"main"` when loading from the Hub. + mirror (`str`, *optional*): + Mirror source to resolve accessibility issues if youโ€™re downloading a model in China. We do not + guarantee the timeliness or safety of the source, and you should refer to the mirror site for more + information. + device_map (`str` or `Dict[str, Union[int, str, torch.device]]`, *optional*): + A map that specifies where each submodule should go. It doesnโ€™t need to be defined for each + parameter/buffer name; once a given module name is inside, every submodule of it will be sent to the + same device. + + Set `device_map="auto"` to have ๐Ÿค— Accelerate automatically compute the most optimized `device_map`. For + more information about each option see [designing a device + map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map). + max_memory (`Dict`, *optional*): + A dictionary device identifier for the maximum memory. Will default to the maximum memory available for + each GPU and the available CPU RAM if unset. + offload_folder (`str` or `os.PathLike`, *optional*): + The path to offload weights if device_map contains the value `"disk"`. + offload_state_dict (`bool`, *optional*): + If `True`, temporarily offloads the CPU state dict to the hard drive to avoid running out of CPU RAM if + the weight of the CPU state dict + the biggest shard of the checkpoint does not fit. Defaults to `True` + when there is some disk offload. + low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`): + Speed up model loading only loading the pretrained weights and not initializing the weights. This also + tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model. + Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this + argument to `True` will raise an error. + use_safetensors (`bool`, *optional*, defaults to `None`): + If set to `None`, the safetensors weights are downloaded if they're available **and** if the + safetensors library is installed. If set to `True`, the model is forcibly loaded from safetensors + weights. If set to `False`, safetensors weights are not loaded. + kwargs (remaining dictionary of keyword arguments, *optional*): + Can be used to overwrite load and saveable variables (the pipeline components of the specific pipeline + class). The overwritten components are passed directly to the pipelines `__init__` method. See example + below for more information. + variant (`str`, *optional*): + Load weights from a specified variant filename such as `"fp16"` or `"ema"`. This is ignored when + loading `from_flax`. + + + + To use private or [gated](https://huggingface.co/docs/hub/models-gated#gated-models) models, log-in with + `huggingface-cli login`. + + + + Examples: + + ```py + >>> from diffusers import AutoPipelineForText2Image + + >>> pipeline = AutoPipelineForText2Image.from_pretrained("runwayml/stable-diffusion-v1-5") + >>> image = pipeline(prompt).images[0] + ``` + """ + cache_dir = kwargs.pop("cache_dir", None) + force_download = kwargs.pop("force_download", False) + proxies = kwargs.pop("proxies", None) + token = kwargs.pop("token", None) + local_files_only = kwargs.pop("local_files_only", False) + revision = kwargs.pop("revision", None) + + load_config_kwargs = { + "cache_dir": cache_dir, + "force_download": force_download, + "proxies": proxies, + "token": token, + "local_files_only": local_files_only, + "revision": revision, + } + + config = cls.load_config(pretrained_model_or_path, **load_config_kwargs) + orig_class_name = config["_class_name"] + + if "controlnet" in kwargs: + orig_class_name = config["_class_name"].replace("Pipeline", "ControlNetPipeline") + if "enable_pag" in kwargs: + enable_pag = kwargs.pop("enable_pag") + if enable_pag: + orig_class_name = orig_class_name.replace("Pipeline", "PAGPipeline") + + text_2_image_cls = _get_task_class(AUTO_TEXT2IMAGE_PIPELINES_MAPPING, orig_class_name) + + kwargs = {**load_config_kwargs, **kwargs} + return text_2_image_cls.from_pretrained(pretrained_model_or_path, **kwargs) + + @classmethod + def from_pipe(cls, pipeline, **kwargs): + r""" + Instantiates a text-to-image Pytorch diffusion pipeline from another instantiated diffusion pipeline class. + + The from_pipe() method takes care of returning the correct pipeline class instance by finding the text-to-image + pipeline linked to the pipeline class using pattern matching on pipeline class name. + + All the modules the pipeline contains will be used to initialize the new pipeline without reallocating + additional memory. + + The pipeline is set in evaluation mode (`model.eval()`) by default. + + Parameters: + pipeline (`DiffusionPipeline`): + an instantiated `DiffusionPipeline` object + + ```py + >>> from diffusers import AutoPipelineForText2Image, AutoPipelineForImage2Image + + >>> pipe_i2i = AutoPipelineForImage2Image.from_pretrained( + ... "runwayml/stable-diffusion-v1-5", requires_safety_checker=False + ... ) + + >>> pipe_t2i = AutoPipelineForText2Image.from_pipe(pipe_i2i) + >>> image = pipe_t2i(prompt).images[0] + ``` + """ + + original_config = dict(pipeline.config) + original_cls_name = pipeline.__class__.__name__ + + # derive the pipeline class to instantiate + text_2_image_cls = _get_task_class(AUTO_TEXT2IMAGE_PIPELINES_MAPPING, original_cls_name) + + if "controlnet" in kwargs: + if kwargs["controlnet"] is not None: + to_replace = "PAGPipeline" if "PAG" in text_2_image_cls.__name__ else "Pipeline" + text_2_image_cls = _get_task_class( + AUTO_TEXT2IMAGE_PIPELINES_MAPPING, + text_2_image_cls.__name__.replace("ControlNet", "").replace(to_replace, "ControlNet" + to_replace), + ) + else: + text_2_image_cls = _get_task_class( + AUTO_TEXT2IMAGE_PIPELINES_MAPPING, + text_2_image_cls.__name__.replace("ControlNet", ""), + ) + + if "enable_pag" in kwargs: + enable_pag = kwargs.pop("enable_pag") + if enable_pag: + text_2_image_cls = _get_task_class( + AUTO_TEXT2IMAGE_PIPELINES_MAPPING, + text_2_image_cls.__name__.replace("PAG", "").replace("Pipeline", "PAGPipeline"), + ) + else: + text_2_image_cls = _get_task_class( + AUTO_TEXT2IMAGE_PIPELINES_MAPPING, + text_2_image_cls.__name__.replace("PAG", ""), + ) + + # define expected module and optional kwargs given the pipeline signature + expected_modules, optional_kwargs = text_2_image_cls._get_signature_keys(text_2_image_cls) + + pretrained_model_name_or_path = original_config.pop("_name_or_path", None) + + # allow users pass modules in `kwargs` to override the original pipeline's components + passed_class_obj = {k: kwargs.pop(k) for k in expected_modules if k in kwargs} + original_class_obj = { + k: pipeline.components[k] + for k, v in pipeline.components.items() + if k in expected_modules and k not in passed_class_obj + } + + # allow users pass optional kwargs to override the original pipelines config attribute + passed_pipe_kwargs = {k: kwargs.pop(k) for k in optional_kwargs if k in kwargs} + original_pipe_kwargs = { + k: original_config[k] + for k, v in original_config.items() + if k in optional_kwargs and k not in passed_pipe_kwargs + } + + # config that were not expected by original pipeline is stored as private attribute + # we will pass them as optional arguments if they can be accepted by the pipeline + additional_pipe_kwargs = [ + k[1:] + for k in original_config.keys() + if k.startswith("_") and k[1:] in optional_kwargs and k[1:] not in passed_pipe_kwargs + ] + for k in additional_pipe_kwargs: + original_pipe_kwargs[k] = original_config.pop(f"_{k}") + + text_2_image_kwargs = {**passed_class_obj, **original_class_obj, **passed_pipe_kwargs, **original_pipe_kwargs} + + # store unused config as private attribute + unused_original_config = { + f"{'' if k.startswith('_') else '_'}{k}": original_config[k] + for k, v in original_config.items() + if k not in text_2_image_kwargs + } + + missing_modules = set(expected_modules) - set(pipeline._optional_components) - set(text_2_image_kwargs.keys()) + + if len(missing_modules) > 0: + raise ValueError( + f"Pipeline {text_2_image_cls} expected {expected_modules}, but only {set(list(passed_class_obj.keys()) + list(original_class_obj.keys()))} were passed" + ) + + model = text_2_image_cls(**text_2_image_kwargs) + model.register_to_config(_name_or_path=pretrained_model_name_or_path) + model.register_to_config(**unused_original_config) + + return model + + +class AutoPipelineForImage2Image(ConfigMixin): + r""" + + [`AutoPipelineForImage2Image`] is a generic pipeline class that instantiates an image-to-image pipeline class. The + specific underlying pipeline class is automatically selected from either the + [`~AutoPipelineForImage2Image.from_pretrained`] or [`~AutoPipelineForImage2Image.from_pipe`] methods. + + This class cannot be instantiated using `__init__()` (throws an error). + + Class attributes: + + - **config_name** (`str`) -- The configuration filename that stores the class and module names of all the + diffusion pipeline's components. + + """ + + config_name = "model_index.json" + + def __init__(self, *args, **kwargs): + raise EnvironmentError( + f"{self.__class__.__name__} is designed to be instantiated " + f"using the `{self.__class__.__name__}.from_pretrained(pretrained_model_name_or_path)` or " + f"`{self.__class__.__name__}.from_pipe(pipeline)` methods." + ) + + @classmethod + @validate_hf_hub_args + def from_pretrained(cls, pretrained_model_or_path, **kwargs): + r""" + Instantiates a image-to-image Pytorch diffusion pipeline from pretrained pipeline weight. + + The from_pretrained() method takes care of returning the correct pipeline class instance by: + 1. Detect the pipeline class of the pretrained_model_or_path based on the _class_name property of its + config object + 2. Find the image-to-image pipeline linked to the pipeline class using pattern matching on pipeline class + name. + + If a `controlnet` argument is passed, it will instantiate a [`StableDiffusionControlNetImg2ImgPipeline`] + object. + + The pipeline is set in evaluation mode (`model.eval()`) by default. + + If you get the error message below, you need to finetune the weights for your downstream task: + + ``` + Some weights of UNet2DConditionModel were not initialized from the model checkpoint at runwayml/stable-diffusion-v1-5 and are newly initialized because the shapes did not match: + - conv_in.weight: found shape torch.Size([320, 4, 3, 3]) in the checkpoint and torch.Size([320, 9, 3, 3]) in the model instantiated + You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. + ``` + + Parameters: + pretrained_model_or_path (`str` or `os.PathLike`, *optional*): + Can be either: + + - A string, the *repo id* (for example `CompVis/ldm-text2im-large-256`) of a pretrained pipeline + hosted on the Hub. + - A path to a *directory* (for example `./my_pipeline_directory/`) containing pipeline weights + saved using + [`~DiffusionPipeline.save_pretrained`]. + torch_dtype (`str` or `torch.dtype`, *optional*): + Override the default `torch.dtype` and load the model with another dtype. If "auto" is passed, the + dtype is automatically derived from the model's weights. + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force the (re-)download of the model weights and configuration files, overriding the + cached versions if they exist. + cache_dir (`Union[str, os.PathLike]`, *optional*): + Path to a directory where a downloaded pretrained model configuration is cached if the standard cache + is not used. + + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. + output_loading_info(`bool`, *optional*, defaults to `False`): + Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages. + local_files_only (`bool`, *optional*, defaults to `False`): + Whether to only load local model weights and configuration files or not. If set to `True`, the model + won't be downloaded from the Hub. + token (`str` or *bool*, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from + `diffusers-cli login` (stored in `~/.huggingface`) is used. + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier + allowed by Git. + custom_revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, or a commit id similar to + `revision` when loading a custom pipeline from the Hub. It can be a ๐Ÿค— Diffusers version when loading a + custom pipeline from GitHub, otherwise it defaults to `"main"` when loading from the Hub. + mirror (`str`, *optional*): + Mirror source to resolve accessibility issues if youโ€™re downloading a model in China. We do not + guarantee the timeliness or safety of the source, and you should refer to the mirror site for more + information. + device_map (`str` or `Dict[str, Union[int, str, torch.device]]`, *optional*): + A map that specifies where each submodule should go. It doesnโ€™t need to be defined for each + parameter/buffer name; once a given module name is inside, every submodule of it will be sent to the + same device. + + Set `device_map="auto"` to have ๐Ÿค— Accelerate automatically compute the most optimized `device_map`. For + more information about each option see [designing a device + map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map). + max_memory (`Dict`, *optional*): + A dictionary device identifier for the maximum memory. Will default to the maximum memory available for + each GPU and the available CPU RAM if unset. + offload_folder (`str` or `os.PathLike`, *optional*): + The path to offload weights if device_map contains the value `"disk"`. + offload_state_dict (`bool`, *optional*): + If `True`, temporarily offloads the CPU state dict to the hard drive to avoid running out of CPU RAM if + the weight of the CPU state dict + the biggest shard of the checkpoint does not fit. Defaults to `True` + when there is some disk offload. + low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`): + Speed up model loading only loading the pretrained weights and not initializing the weights. This also + tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model. + Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this + argument to `True` will raise an error. + use_safetensors (`bool`, *optional*, defaults to `None`): + If set to `None`, the safetensors weights are downloaded if they're available **and** if the + safetensors library is installed. If set to `True`, the model is forcibly loaded from safetensors + weights. If set to `False`, safetensors weights are not loaded. + kwargs (remaining dictionary of keyword arguments, *optional*): + Can be used to overwrite load and saveable variables (the pipeline components of the specific pipeline + class). The overwritten components are passed directly to the pipelines `__init__` method. See example + below for more information. + variant (`str`, *optional*): + Load weights from a specified variant filename such as `"fp16"` or `"ema"`. This is ignored when + loading `from_flax`. + + + + To use private or [gated](https://huggingface.co/docs/hub/models-gated#gated-models) models, log-in with + `huggingface-cli login`. + + + + Examples: + + ```py + >>> from diffusers import AutoPipelineForImage2Image + + >>> pipeline = AutoPipelineForImage2Image.from_pretrained("runwayml/stable-diffusion-v1-5") + >>> image = pipeline(prompt, image).images[0] + ``` + """ + cache_dir = kwargs.pop("cache_dir", None) + force_download = kwargs.pop("force_download", False) + proxies = kwargs.pop("proxies", None) + token = kwargs.pop("token", None) + local_files_only = kwargs.pop("local_files_only", False) + revision = kwargs.pop("revision", None) + + load_config_kwargs = { + "cache_dir": cache_dir, + "force_download": force_download, + "proxies": proxies, + "token": token, + "local_files_only": local_files_only, + "revision": revision, + } + + config = cls.load_config(pretrained_model_or_path, **load_config_kwargs) + orig_class_name = config["_class_name"] + + # the `orig_class_name` can be: + # `- *Pipeline` (for regular text-to-image checkpoint) + # `- *Img2ImgPipeline` (for refiner checkpoint) + to_replace = "Img2ImgPipeline" if "Img2Img" in config["_class_name"] else "Pipeline" + + if "controlnet" in kwargs: + orig_class_name = orig_class_name.replace(to_replace, "ControlNet" + to_replace) + if "enable_pag" in kwargs: + enable_pag = kwargs.pop("enable_pag") + if enable_pag: + orig_class_name = orig_class_name.replace(to_replace, "PAG" + to_replace) + + image_2_image_cls = _get_task_class(AUTO_IMAGE2IMAGE_PIPELINES_MAPPING, orig_class_name) + + kwargs = {**load_config_kwargs, **kwargs} + return image_2_image_cls.from_pretrained(pretrained_model_or_path, **kwargs) + + @classmethod + def from_pipe(cls, pipeline, **kwargs): + r""" + Instantiates a image-to-image Pytorch diffusion pipeline from another instantiated diffusion pipeline class. + + The from_pipe() method takes care of returning the correct pipeline class instance by finding the + image-to-image pipeline linked to the pipeline class using pattern matching on pipeline class name. + + All the modules the pipeline contains will be used to initialize the new pipeline without reallocating + additional memory. + + The pipeline is set in evaluation mode (`model.eval()`) by default. + + Parameters: + pipeline (`DiffusionPipeline`): + an instantiated `DiffusionPipeline` object + + Examples: + + ```py + >>> from diffusers import AutoPipelineForText2Image, AutoPipelineForImage2Image + + >>> pipe_t2i = AutoPipelineForText2Image.from_pretrained( + ... "runwayml/stable-diffusion-v1-5", requires_safety_checker=False + ... ) + + >>> pipe_i2i = AutoPipelineForImage2Image.from_pipe(pipe_t2i) + >>> image = pipe_i2i(prompt, image).images[0] + ``` + """ + + original_config = dict(pipeline.config) + original_cls_name = pipeline.__class__.__name__ + + # derive the pipeline class to instantiate + image_2_image_cls = _get_task_class(AUTO_IMAGE2IMAGE_PIPELINES_MAPPING, original_cls_name) + + if "controlnet" in kwargs: + if kwargs["controlnet"] is not None: + to_replace = "Img2ImgPipeline" + if "PAG" in image_2_image_cls.__name__: + to_replace = "PAG" + to_replace + image_2_image_cls = _get_task_class( + AUTO_IMAGE2IMAGE_PIPELINES_MAPPING, + image_2_image_cls.__name__.replace("ControlNet", "").replace( + to_replace, "ControlNet" + to_replace + ), + ) + else: + image_2_image_cls = _get_task_class( + AUTO_IMAGE2IMAGE_PIPELINES_MAPPING, + image_2_image_cls.__name__.replace("ControlNet", ""), + ) + + if "enable_pag" in kwargs: + enable_pag = kwargs.pop("enable_pag") + if enable_pag: + image_2_image_cls = _get_task_class( + AUTO_IMAGE2IMAGE_PIPELINES_MAPPING, + image_2_image_cls.__name__.replace("PAG", "").replace("Img2ImgPipeline", "PAGImg2ImgPipeline"), + ) + else: + image_2_image_cls = _get_task_class( + AUTO_IMAGE2IMAGE_PIPELINES_MAPPING, + image_2_image_cls.__name__.replace("PAG", ""), + ) + + # define expected module and optional kwargs given the pipeline signature + expected_modules, optional_kwargs = image_2_image_cls._get_signature_keys(image_2_image_cls) + + pretrained_model_name_or_path = original_config.pop("_name_or_path", None) + + # allow users pass modules in `kwargs` to override the original pipeline's components + passed_class_obj = {k: kwargs.pop(k) for k in expected_modules if k in kwargs} + original_class_obj = { + k: pipeline.components[k] + for k, v in pipeline.components.items() + if k in expected_modules and k not in passed_class_obj + } + + # allow users pass optional kwargs to override the original pipelines config attribute + passed_pipe_kwargs = {k: kwargs.pop(k) for k in optional_kwargs if k in kwargs} + original_pipe_kwargs = { + k: original_config[k] + for k, v in original_config.items() + if k in optional_kwargs and k not in passed_pipe_kwargs + } + + # config attribute that were not expected by original pipeline is stored as its private attribute + # we will pass them as optional arguments if they can be accepted by the pipeline + additional_pipe_kwargs = [ + k[1:] + for k in original_config.keys() + if k.startswith("_") and k[1:] in optional_kwargs and k[1:] not in passed_pipe_kwargs + ] + for k in additional_pipe_kwargs: + original_pipe_kwargs[k] = original_config.pop(f"_{k}") + + image_2_image_kwargs = {**passed_class_obj, **original_class_obj, **passed_pipe_kwargs, **original_pipe_kwargs} + + # store unused config as private attribute + unused_original_config = { + f"{'' if k.startswith('_') else '_'}{k}": original_config[k] + for k, v in original_config.items() + if k not in image_2_image_kwargs + } + + missing_modules = set(expected_modules) - set(pipeline._optional_components) - set(image_2_image_kwargs.keys()) + + if len(missing_modules) > 0: + raise ValueError( + f"Pipeline {image_2_image_cls} expected {expected_modules}, but only {set(list(passed_class_obj.keys()) + list(original_class_obj.keys()))} were passed" + ) + + model = image_2_image_cls(**image_2_image_kwargs) + model.register_to_config(_name_or_path=pretrained_model_name_or_path) + model.register_to_config(**unused_original_config) + + return model + + +class AutoPipelineForInpainting(ConfigMixin): + r""" + + [`AutoPipelineForInpainting`] is a generic pipeline class that instantiates an inpainting pipeline class. The + specific underlying pipeline class is automatically selected from either the + [`~AutoPipelineForInpainting.from_pretrained`] or [`~AutoPipelineForInpainting.from_pipe`] methods. + + This class cannot be instantiated using `__init__()` (throws an error). + + Class attributes: + + - **config_name** (`str`) -- The configuration filename that stores the class and module names of all the + diffusion pipeline's components. + + """ + + config_name = "model_index.json" + + def __init__(self, *args, **kwargs): + raise EnvironmentError( + f"{self.__class__.__name__} is designed to be instantiated " + f"using the `{self.__class__.__name__}.from_pretrained(pretrained_model_name_or_path)` or " + f"`{self.__class__.__name__}.from_pipe(pipeline)` methods." + ) + + @classmethod + @validate_hf_hub_args + def from_pretrained(cls, pretrained_model_or_path, **kwargs): + r""" + Instantiates a inpainting Pytorch diffusion pipeline from pretrained pipeline weight. + + The from_pretrained() method takes care of returning the correct pipeline class instance by: + 1. Detect the pipeline class of the pretrained_model_or_path based on the _class_name property of its + config object + 2. Find the inpainting pipeline linked to the pipeline class using pattern matching on pipeline class name. + + If a `controlnet` argument is passed, it will instantiate a [`StableDiffusionControlNetInpaintPipeline`] + object. + + The pipeline is set in evaluation mode (`model.eval()`) by default. + + If you get the error message below, you need to finetune the weights for your downstream task: + + ``` + Some weights of UNet2DConditionModel were not initialized from the model checkpoint at runwayml/stable-diffusion-v1-5 and are newly initialized because the shapes did not match: + - conv_in.weight: found shape torch.Size([320, 4, 3, 3]) in the checkpoint and torch.Size([320, 9, 3, 3]) in the model instantiated + You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. + ``` + + Parameters: + pretrained_model_or_path (`str` or `os.PathLike`, *optional*): + Can be either: + + - A string, the *repo id* (for example `CompVis/ldm-text2im-large-256`) of a pretrained pipeline + hosted on the Hub. + - A path to a *directory* (for example `./my_pipeline_directory/`) containing pipeline weights + saved using + [`~DiffusionPipeline.save_pretrained`]. + torch_dtype (`str` or `torch.dtype`, *optional*): + Override the default `torch.dtype` and load the model with another dtype. If "auto" is passed, the + dtype is automatically derived from the model's weights. + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force the (re-)download of the model weights and configuration files, overriding the + cached versions if they exist. + cache_dir (`Union[str, os.PathLike]`, *optional*): + Path to a directory where a downloaded pretrained model configuration is cached if the standard cache + is not used. + + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. + output_loading_info(`bool`, *optional*, defaults to `False`): + Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages. + local_files_only (`bool`, *optional*, defaults to `False`): + Whether to only load local model weights and configuration files or not. If set to `True`, the model + won't be downloaded from the Hub. + token (`str` or *bool*, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from + `diffusers-cli login` (stored in `~/.huggingface`) is used. + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier + allowed by Git. + custom_revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, or a commit id similar to + `revision` when loading a custom pipeline from the Hub. It can be a ๐Ÿค— Diffusers version when loading a + custom pipeline from GitHub, otherwise it defaults to `"main"` when loading from the Hub. + mirror (`str`, *optional*): + Mirror source to resolve accessibility issues if youโ€™re downloading a model in China. We do not + guarantee the timeliness or safety of the source, and you should refer to the mirror site for more + information. + device_map (`str` or `Dict[str, Union[int, str, torch.device]]`, *optional*): + A map that specifies where each submodule should go. It doesnโ€™t need to be defined for each + parameter/buffer name; once a given module name is inside, every submodule of it will be sent to the + same device. + + Set `device_map="auto"` to have ๐Ÿค— Accelerate automatically compute the most optimized `device_map`. For + more information about each option see [designing a device + map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map). + max_memory (`Dict`, *optional*): + A dictionary device identifier for the maximum memory. Will default to the maximum memory available for + each GPU and the available CPU RAM if unset. + offload_folder (`str` or `os.PathLike`, *optional*): + The path to offload weights if device_map contains the value `"disk"`. + offload_state_dict (`bool`, *optional*): + If `True`, temporarily offloads the CPU state dict to the hard drive to avoid running out of CPU RAM if + the weight of the CPU state dict + the biggest shard of the checkpoint does not fit. Defaults to `True` + when there is some disk offload. + low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`): + Speed up model loading only loading the pretrained weights and not initializing the weights. This also + tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model. + Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this + argument to `True` will raise an error. + use_safetensors (`bool`, *optional*, defaults to `None`): + If set to `None`, the safetensors weights are downloaded if they're available **and** if the + safetensors library is installed. If set to `True`, the model is forcibly loaded from safetensors + weights. If set to `False`, safetensors weights are not loaded. + kwargs (remaining dictionary of keyword arguments, *optional*): + Can be used to overwrite load and saveable variables (the pipeline components of the specific pipeline + class). The overwritten components are passed directly to the pipelines `__init__` method. See example + below for more information. + variant (`str`, *optional*): + Load weights from a specified variant filename such as `"fp16"` or `"ema"`. This is ignored when + loading `from_flax`. + + + + To use private or [gated](https://huggingface.co/docs/hub/models-gated#gated-models) models, log-in with + `huggingface-cli login`. + + + + Examples: + + ```py + >>> from diffusers import AutoPipelineForInpainting + + >>> pipeline = AutoPipelineForInpainting.from_pretrained("runwayml/stable-diffusion-v1-5") + >>> image = pipeline(prompt, image=init_image, mask_image=mask_image).images[0] + ``` + """ + cache_dir = kwargs.pop("cache_dir", None) + force_download = kwargs.pop("force_download", False) + proxies = kwargs.pop("proxies", None) + token = kwargs.pop("token", None) + local_files_only = kwargs.pop("local_files_only", False) + revision = kwargs.pop("revision", None) + + load_config_kwargs = { + "cache_dir": cache_dir, + "force_download": force_download, + "proxies": proxies, + "token": token, + "local_files_only": local_files_only, + "revision": revision, + } + + config = cls.load_config(pretrained_model_or_path, **load_config_kwargs) + orig_class_name = config["_class_name"] + + # The `orig_class_name`` can be: + # `- *InpaintPipeline` (for inpaint-specific checkpoint) + # - or *Pipeline (for regular text-to-image checkpoint) + to_replace = "InpaintPipeline" if "Inpaint" in config["_class_name"] else "Pipeline" + + if "controlnet" in kwargs: + orig_class_name = orig_class_name.replace(to_replace, "ControlNet" + to_replace) + if "enable_pag" in kwargs: + enable_pag = kwargs.pop("enable_pag") + if enable_pag: + orig_class_name = orig_class_name.replace(to_replace, "PAG" + to_replace) + inpainting_cls = _get_task_class(AUTO_INPAINT_PIPELINES_MAPPING, orig_class_name) + + kwargs = {**load_config_kwargs, **kwargs} + return inpainting_cls.from_pretrained(pretrained_model_or_path, **kwargs) + + @classmethod + def from_pipe(cls, pipeline, **kwargs): + r""" + Instantiates a inpainting Pytorch diffusion pipeline from another instantiated diffusion pipeline class. + + The from_pipe() method takes care of returning the correct pipeline class instance by finding the inpainting + pipeline linked to the pipeline class using pattern matching on pipeline class name. + + All the modules the pipeline class contain will be used to initialize the new pipeline without reallocating + additional memory. + + The pipeline is set in evaluation mode (`model.eval()`) by default. + + Parameters: + pipeline (`DiffusionPipeline`): + an instantiated `DiffusionPipeline` object + + Examples: + + ```py + >>> from diffusers import AutoPipelineForText2Image, AutoPipelineForInpainting + + >>> pipe_t2i = AutoPipelineForText2Image.from_pretrained( + ... "DeepFloyd/IF-I-XL-v1.0", requires_safety_checker=False + ... ) + + >>> pipe_inpaint = AutoPipelineForInpainting.from_pipe(pipe_t2i) + >>> image = pipe_inpaint(prompt, image=init_image, mask_image=mask_image).images[0] + ``` + """ + original_config = dict(pipeline.config) + original_cls_name = pipeline.__class__.__name__ + + # derive the pipeline class to instantiate + inpainting_cls = _get_task_class(AUTO_INPAINT_PIPELINES_MAPPING, original_cls_name) + + if "controlnet" in kwargs: + if kwargs["controlnet"] is not None: + inpainting_cls = _get_task_class( + AUTO_INPAINT_PIPELINES_MAPPING, + inpainting_cls.__name__.replace("ControlNet", "").replace( + "InpaintPipeline", "ControlNetInpaintPipeline" + ), + ) + else: + inpainting_cls = _get_task_class( + AUTO_INPAINT_PIPELINES_MAPPING, + inpainting_cls.__name__.replace("ControlNetInpaintPipeline", "InpaintPipeline"), + ) + + if "enable_pag" in kwargs: + enable_pag = kwargs.pop("enable_pag") + if enable_pag: + inpainting_cls = _get_task_class( + AUTO_INPAINT_PIPELINES_MAPPING, + inpainting_cls.__name__.replace("PAG", "").replace("InpaintPipeline", "PAGInpaintPipeline"), + ) + else: + inpainting_cls = _get_task_class( + AUTO_INPAINT_PIPELINES_MAPPING, + inpainting_cls.__name__.replace("PAGInpaintPipeline", "InpaintPipeline"), + ) + + # define expected module and optional kwargs given the pipeline signature + expected_modules, optional_kwargs = inpainting_cls._get_signature_keys(inpainting_cls) + + pretrained_model_name_or_path = original_config.pop("_name_or_path", None) + + # allow users pass modules in `kwargs` to override the original pipeline's components + passed_class_obj = {k: kwargs.pop(k) for k in expected_modules if k in kwargs} + original_class_obj = { + k: pipeline.components[k] + for k, v in pipeline.components.items() + if k in expected_modules and k not in passed_class_obj + } + + # allow users pass optional kwargs to override the original pipelines config attribute + passed_pipe_kwargs = {k: kwargs.pop(k) for k in optional_kwargs if k in kwargs} + original_pipe_kwargs = { + k: original_config[k] + for k, v in original_config.items() + if k in optional_kwargs and k not in passed_pipe_kwargs + } + + # config that were not expected by original pipeline is stored as private attribute + # we will pass them as optional arguments if they can be accepted by the pipeline + additional_pipe_kwargs = [ + k[1:] + for k in original_config.keys() + if k.startswith("_") and k[1:] in optional_kwargs and k[1:] not in passed_pipe_kwargs + ] + for k in additional_pipe_kwargs: + original_pipe_kwargs[k] = original_config.pop(f"_{k}") + + inpainting_kwargs = {**passed_class_obj, **original_class_obj, **passed_pipe_kwargs, **original_pipe_kwargs} + + # store unused config as private attribute + unused_original_config = { + f"{'' if k.startswith('_') else '_'}{k}": original_config[k] + for k, v in original_config.items() + if k not in inpainting_kwargs + } + + missing_modules = set(expected_modules) - set(pipeline._optional_components) - set(inpainting_kwargs.keys()) + + if len(missing_modules) > 0: + raise ValueError( + f"Pipeline {inpainting_cls} expected {expected_modules}, but only {set(list(passed_class_obj.keys()) + list(original_class_obj.keys()))} were passed" + ) + + model = inpainting_cls(**inpainting_kwargs) + model.register_to_config(_name_or_path=pretrained_model_name_or_path) + model.register_to_config(**unused_original_config) + + return model diff --git a/diffusers3/pipelines/blip_diffusion/__init__.py b/diffusers3/pipelines/blip_diffusion/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..af6c879d5ce88aa8edec0691e987444ff1d3dfec --- /dev/null +++ b/diffusers3/pipelines/blip_diffusion/__init__.py @@ -0,0 +1,20 @@ +from dataclasses import dataclass +from typing import List, Optional, Union + +import numpy as np +import PIL +from PIL import Image + +from ...utils import OptionalDependencyNotAvailable, is_torch_available, is_transformers_available + + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline +else: + from .blip_image_processing import BlipImageProcessor + from .modeling_blip2 import Blip2QFormerModel + from .modeling_ctx_clip import ContextCLIPTextModel + from .pipeline_blip_diffusion import BlipDiffusionPipeline diff --git a/diffusers3/pipelines/blip_diffusion/blip_image_processing.py b/diffusers3/pipelines/blip_diffusion/blip_image_processing.py new file mode 100644 index 0000000000000000000000000000000000000000..d92a0766905909b8c6284d336ba6effde20dc8d1 --- /dev/null +++ b/diffusers3/pipelines/blip_diffusion/blip_image_processing.py @@ -0,0 +1,318 @@ +# coding=utf-8 +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Image processor class for BLIP.""" + +from typing import Dict, List, Optional, Union + +import numpy as np +import torch +from transformers.image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict +from transformers.image_transforms import convert_to_rgb, resize, to_channel_dimension_format +from transformers.image_utils import ( + OPENAI_CLIP_MEAN, + OPENAI_CLIP_STD, + ChannelDimension, + ImageInput, + PILImageResampling, + infer_channel_dimension_format, + is_scaled_image, + make_list_of_images, + to_numpy_array, + valid_images, +) +from transformers.utils import TensorType, is_vision_available, logging + +from diffusers.utils import numpy_to_pil + + +if is_vision_available(): + import PIL.Image + + +logger = logging.get_logger(__name__) + + +# We needed some extra functions on top of the ones in transformers.image_processing_utils.BaseImageProcessor, namely center crop +# Copy-pasted from transformers.models.blip.image_processing_blip.BlipImageProcessor +class BlipImageProcessor(BaseImageProcessor): + r""" + Constructs a BLIP image processor. + + Args: + do_resize (`bool`, *optional*, defaults to `True`): + Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the + `do_resize` parameter in the `preprocess` method. + size (`dict`, *optional*, defaults to `{"height": 384, "width": 384}`): + Size of the output image after resizing. Can be overridden by the `size` parameter in the `preprocess` + method. + resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): + Resampling filter to use if resizing the image. Only has an effect if `do_resize` is set to `True`. Can be + overridden by the `resample` parameter in the `preprocess` method. + do_rescale (`bool`, *optional*, defaults to `True`): + Wwhether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the + `do_rescale` parameter in the `preprocess` method. + rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): + Scale factor to use if rescaling the image. Only has an effect if `do_rescale` is set to `True`. Can be + overridden by the `rescale_factor` parameter in the `preprocess` method. + do_normalize (`bool`, *optional*, defaults to `True`): + Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` + method. Can be overridden by the `do_normalize` parameter in the `preprocess` method. + image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`): + Mean to use if normalizing the image. This is a float or list of floats the length of the number of + channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. Can be + overridden by the `image_mean` parameter in the `preprocess` method. + image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`): + Standard deviation to use if normalizing the image. This is a float or list of floats the length of the + number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. + Can be overridden by the `image_std` parameter in the `preprocess` method. + do_convert_rgb (`bool`, *optional*, defaults to `True`): + Whether to convert the image to RGB. + """ + + model_input_names = ["pixel_values"] + + def __init__( + self, + do_resize: bool = True, + size: Dict[str, int] = None, + resample: PILImageResampling = PILImageResampling.BICUBIC, + do_rescale: bool = True, + rescale_factor: Union[int, float] = 1 / 255, + do_normalize: bool = True, + image_mean: Optional[Union[float, List[float]]] = None, + image_std: Optional[Union[float, List[float]]] = None, + do_convert_rgb: bool = True, + do_center_crop: bool = True, + **kwargs, + ) -> None: + super().__init__(**kwargs) + size = size if size is not None else {"height": 224, "width": 224} + size = get_size_dict(size, default_to_square=True) + + self.do_resize = do_resize + self.size = size + self.resample = resample + self.do_rescale = do_rescale + self.rescale_factor = rescale_factor + self.do_normalize = do_normalize + self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN + self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD + self.do_convert_rgb = do_convert_rgb + self.do_center_crop = do_center_crop + + # Copy-pasted from transformers.models.vit.image_processing_vit.ViTImageProcessor.resize with PILImageResampling.BILINEAR->PILImageResampling.BICUBIC + def resize( + self, + image: np.ndarray, + size: Dict[str, int], + resample: PILImageResampling = PILImageResampling.BICUBIC, + data_format: Optional[Union[str, ChannelDimension]] = None, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + **kwargs, + ) -> np.ndarray: + """ + Resize an image to `(size["height"], size["width"])`. + + Args: + image (`np.ndarray`): + Image to resize. + size (`Dict[str, int]`): + Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image. + resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): + `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BICUBIC`. + data_format (`ChannelDimension` or `str`, *optional*): + The channel dimension format for the output image. If unset, the channel dimension format of the input + image is used. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. + input_data_format (`ChannelDimension` or `str`, *optional*): + The channel dimension format for the input image. If unset, the channel dimension format is inferred + from the input image. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. + + Returns: + `np.ndarray`: The resized image. + """ + size = get_size_dict(size) + if "height" not in size or "width" not in size: + raise ValueError(f"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}") + output_size = (size["height"], size["width"]) + return resize( + image, + size=output_size, + resample=resample, + data_format=data_format, + input_data_format=input_data_format, + **kwargs, + ) + + def preprocess( + self, + images: ImageInput, + do_resize: Optional[bool] = None, + size: Optional[Dict[str, int]] = None, + resample: PILImageResampling = None, + do_rescale: Optional[bool] = None, + do_center_crop: Optional[bool] = None, + rescale_factor: Optional[float] = None, + do_normalize: Optional[bool] = None, + image_mean: Optional[Union[float, List[float]]] = None, + image_std: Optional[Union[float, List[float]]] = None, + return_tensors: Optional[Union[str, TensorType]] = None, + do_convert_rgb: bool = None, + data_format: ChannelDimension = ChannelDimension.FIRST, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + **kwargs, + ) -> PIL.Image.Image: + """ + Preprocess an image or batch of images. + + Args: + images (`ImageInput`): + Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If + passing in images with pixel values between 0 and 1, set `do_rescale=False`. + do_resize (`bool`, *optional*, defaults to `self.do_resize`): + Whether to resize the image. + size (`Dict[str, int]`, *optional*, defaults to `self.size`): + Controls the size of the image after `resize`. The shortest edge of the image is resized to + `size["shortest_edge"]` whilst preserving the aspect ratio. If the longest edge of this resized image + is > `int(size["shortest_edge"] * (1333 / 800))`, then the image is resized again to make the longest + edge equal to `int(size["shortest_edge"] * (1333 / 800))`. + resample (`PILImageResampling`, *optional*, defaults to `self.resample`): + Resampling filter to use if resizing the image. Only has an effect if `do_resize` is set to `True`. + do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): + Whether to rescale the image values between [0 - 1]. + rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): + Rescale factor to rescale the image by if `do_rescale` is set to `True`. + do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): + Whether to normalize the image. + image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`): + Image mean to normalize the image by if `do_normalize` is set to `True`. + image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`): + Image standard deviation to normalize the image by if `do_normalize` is set to `True`. + do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`): + Whether to convert the image to RGB. + return_tensors (`str` or `TensorType`, *optional*): + The type of tensors to return. Can be one of: + - Unset: Return a list of `np.ndarray`. + - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. + - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. + - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. + - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. + data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): + The channel dimension format for the output image. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + - Unset: Use the channel dimension format of the input image. + input_data_format (`ChannelDimension` or `str`, *optional*): + The channel dimension format for the input image. If unset, the channel dimension format is inferred + from the input image. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. + """ + do_resize = do_resize if do_resize is not None else self.do_resize + resample = resample if resample is not None else self.resample + do_rescale = do_rescale if do_rescale is not None else self.do_rescale + rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor + do_normalize = do_normalize if do_normalize is not None else self.do_normalize + image_mean = image_mean if image_mean is not None else self.image_mean + image_std = image_std if image_std is not None else self.image_std + do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb + do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop + + size = size if size is not None else self.size + size = get_size_dict(size, default_to_square=False) + images = make_list_of_images(images) + + if not valid_images(images): + raise ValueError( + "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " + "torch.Tensor, tf.Tensor or jax.ndarray." + ) + + if do_resize and size is None or resample is None: + raise ValueError("Size and resample must be specified if do_resize is True.") + + if do_rescale and rescale_factor is None: + raise ValueError("Rescale factor must be specified if do_rescale is True.") + + if do_normalize and (image_mean is None or image_std is None): + raise ValueError("Image mean and std must be specified if do_normalize is True.") + + # PIL RGBA images are converted to RGB + if do_convert_rgb: + images = [convert_to_rgb(image) for image in images] + + # All transformations expect numpy arrays. + images = [to_numpy_array(image) for image in images] + + if is_scaled_image(images[0]) and do_rescale: + logger.warning_once( + "It looks like you are trying to rescale already rescaled images. If the input" + " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." + ) + if input_data_format is None: + # We assume that all images have the same channel dimension format. + input_data_format = infer_channel_dimension_format(images[0]) + + if do_resize: + images = [ + self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) + for image in images + ] + + if do_rescale: + images = [ + self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) + for image in images + ] + if do_normalize: + images = [ + self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) + for image in images + ] + if do_center_crop: + images = [self.center_crop(image, size, input_data_format=input_data_format) for image in images] + + images = [ + to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images + ] + + encoded_outputs = BatchFeature(data={"pixel_values": images}, tensor_type=return_tensors) + return encoded_outputs + + # Follows diffusers.VaeImageProcessor.postprocess + def postprocess(self, sample: torch.Tensor, output_type: str = "pil"): + if output_type not in ["pt", "np", "pil"]: + raise ValueError( + f"output_type={output_type} is not supported. Make sure to choose one of ['pt', 'np', or 'pil']" + ) + + # Equivalent to diffusers.VaeImageProcessor.denormalize + sample = (sample / 2 + 0.5).clamp(0, 1) + if output_type == "pt": + return sample + + # Equivalent to diffusers.VaeImageProcessor.pt_to_numpy + sample = sample.cpu().permute(0, 2, 3, 1).numpy() + if output_type == "np": + return sample + # Output_type must be 'pil' + sample = numpy_to_pil(sample) + return sample diff --git a/diffusers3/pipelines/blip_diffusion/modeling_blip2.py b/diffusers3/pipelines/blip_diffusion/modeling_blip2.py new file mode 100644 index 0000000000000000000000000000000000000000..1be4761a99875a09cca9a30de42f2634aa36c903 --- /dev/null +++ b/diffusers3/pipelines/blip_diffusion/modeling_blip2.py @@ -0,0 +1,642 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Optional, Tuple, Union + +import torch +import torch.utils.checkpoint +from torch import nn +from transformers import BertTokenizer +from transformers.activations import QuickGELUActivation as QuickGELU +from transformers.modeling_outputs import ( + BaseModelOutputWithPastAndCrossAttentions, + BaseModelOutputWithPooling, + BaseModelOutputWithPoolingAndCrossAttentions, +) +from transformers.models.blip_2.configuration_blip_2 import Blip2Config, Blip2VisionConfig +from transformers.models.blip_2.modeling_blip_2 import ( + Blip2Encoder, + Blip2PreTrainedModel, + Blip2QFormerAttention, + Blip2QFormerIntermediate, + Blip2QFormerOutput, +) +from transformers.pytorch_utils import apply_chunking_to_forward +from transformers.utils import ( + logging, + replace_return_docstrings, +) + + +logger = logging.get_logger(__name__) + + +# There is an implementation of Blip2 in `transformers` : https://github.com/huggingface/transformers/blob/main/src/transformers/models/blip_2/modeling_blip_2.py. +# But it doesn't support getting multimodal embeddings. So, this module can be +# replaced with a future `transformers` version supports that. +class Blip2TextEmbeddings(nn.Module): + """Construct the embeddings from word and position embeddings.""" + + def __init__(self, config): + super().__init__() + self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) + self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) + + # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load + # any TensorFlow checkpoint file + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + # position_ids (1, len position emb) is contiguous in memory and exported when serialized + self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) + self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") + + self.config = config + + def forward( + self, + input_ids=None, + position_ids=None, + query_embeds=None, + past_key_values_length=0, + ): + if input_ids is not None: + seq_length = input_ids.size()[1] + else: + seq_length = 0 + + if position_ids is None: + position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length].clone() + + if input_ids is not None: + embeddings = self.word_embeddings(input_ids) + if self.position_embedding_type == "absolute": + position_embeddings = self.position_embeddings(position_ids) + embeddings = embeddings + position_embeddings + + if query_embeds is not None: + batch_size = embeddings.shape[0] + # repeat the query embeddings for batch size + query_embeds = query_embeds.repeat(batch_size, 1, 1) + embeddings = torch.cat((query_embeds, embeddings), dim=1) + else: + embeddings = query_embeds + embeddings = embeddings.to(query_embeds.dtype) + embeddings = self.LayerNorm(embeddings) + embeddings = self.dropout(embeddings) + return embeddings + + +# Copy-pasted from transformers.models.blip.modeling_blip.BlipVisionEmbeddings with Blip->Blip2 +class Blip2VisionEmbeddings(nn.Module): + def __init__(self, config: Blip2VisionConfig): + super().__init__() + self.config = config + self.embed_dim = config.hidden_size + self.image_size = config.image_size + self.patch_size = config.patch_size + + self.class_embedding = nn.Parameter(torch.randn(1, 1, self.embed_dim)) + + self.patch_embedding = nn.Conv2d( + in_channels=3, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size, bias=False + ) + + self.num_patches = (self.image_size // self.patch_size) ** 2 + self.num_positions = self.num_patches + 1 + + self.position_embedding = nn.Parameter(torch.randn(1, self.num_positions, self.embed_dim)) + + def forward(self, pixel_values: torch.Tensor) -> torch.Tensor: + batch_size = pixel_values.shape[0] + target_dtype = self.patch_embedding.weight.dtype + patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) # shape = [*, width, grid, grid] + patch_embeds = patch_embeds.flatten(2).transpose(1, 2) + + class_embeds = self.class_embedding.expand(batch_size, 1, -1).to(target_dtype) + embeddings = torch.cat([class_embeds, patch_embeds], dim=1) + embeddings = embeddings + self.position_embedding[:, : embeddings.size(1), :].to(target_dtype) + return embeddings + + +# The Qformer encoder, which takes the visual embeddings, and the text input, to get multimodal embeddings +class Blip2QFormerEncoder(nn.Module): + def __init__(self, config): + super().__init__() + self.config = config + self.layer = nn.ModuleList( + [Blip2QFormerLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] + ) + self.gradient_checkpointing = False + + def forward( + self, + hidden_states, + attention_mask=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_values=None, + use_cache=None, + output_attentions=False, + output_hidden_states=False, + return_dict=True, + query_length=0, + ): + all_hidden_states = () if output_hidden_states else None + all_self_attentions = () if output_attentions else None + all_cross_attentions = () if output_attentions else None + + next_decoder_cache = () if use_cache else None + + for i in range(self.config.num_hidden_layers): + layer_module = self.layer[i] + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + layer_head_mask = head_mask[i] if head_mask is not None else None + past_key_value = past_key_values[i] if past_key_values is not None else None + + if getattr(self.config, "gradient_checkpointing", False) and self.training: + if use_cache: + logger.warning( + "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." + ) + use_cache = False + + def create_custom_forward(module): + def custom_forward(*inputs): + return module(*inputs, past_key_value, output_attentions, query_length) + + return custom_forward + + layer_outputs = torch.utils.checkpoint.checkpoint( + create_custom_forward(layer_module), + hidden_states, + attention_mask, + layer_head_mask, + encoder_hidden_states, + encoder_attention_mask, + ) + else: + layer_outputs = layer_module( + hidden_states, + attention_mask, + layer_head_mask, + encoder_hidden_states, + encoder_attention_mask, + past_key_value, + output_attentions, + query_length, + ) + + hidden_states = layer_outputs[0] + if use_cache: + next_decoder_cache += (layer_outputs[-1],) + if output_attentions: + all_self_attentions = all_self_attentions + (layer_outputs[1],) + if layer_module.has_cross_attention: + all_cross_attentions = all_cross_attentions + (layer_outputs[2],) + + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if not return_dict: + return tuple( + v + for v in [ + hidden_states, + next_decoder_cache, + all_hidden_states, + all_self_attentions, + all_cross_attentions, + ] + if v is not None + ) + return BaseModelOutputWithPastAndCrossAttentions( + last_hidden_state=hidden_states, + past_key_values=next_decoder_cache, + hidden_states=all_hidden_states, + attentions=all_self_attentions, + cross_attentions=all_cross_attentions, + ) + + +# The layers making up the Qformer encoder +class Blip2QFormerLayer(nn.Module): + def __init__(self, config, layer_idx): + super().__init__() + self.chunk_size_feed_forward = config.chunk_size_feed_forward + self.seq_len_dim = 1 + self.attention = Blip2QFormerAttention(config) + + self.layer_idx = layer_idx + + if layer_idx % config.cross_attention_frequency == 0: + self.crossattention = Blip2QFormerAttention(config, is_cross_attention=True) + self.has_cross_attention = True + else: + self.has_cross_attention = False + + self.intermediate = Blip2QFormerIntermediate(config) + self.intermediate_query = Blip2QFormerIntermediate(config) + self.output_query = Blip2QFormerOutput(config) + self.output = Blip2QFormerOutput(config) + + def forward( + self, + hidden_states, + attention_mask=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_value=None, + output_attentions=False, + query_length=0, + ): + # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 + self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None + self_attention_outputs = self.attention( + hidden_states, + attention_mask, + head_mask, + output_attentions=output_attentions, + past_key_value=self_attn_past_key_value, + ) + attention_output = self_attention_outputs[0] + outputs = self_attention_outputs[1:-1] + + present_key_value = self_attention_outputs[-1] + + if query_length > 0: + query_attention_output = attention_output[:, :query_length, :] + + if self.has_cross_attention: + if encoder_hidden_states is None: + raise ValueError("encoder_hidden_states must be given for cross-attention layers") + cross_attention_outputs = self.crossattention( + query_attention_output, + attention_mask, + head_mask, + encoder_hidden_states, + encoder_attention_mask, + output_attentions=output_attentions, + ) + query_attention_output = cross_attention_outputs[0] + # add cross attentions if we output attention weights + outputs = outputs + cross_attention_outputs[1:-1] + + layer_output = apply_chunking_to_forward( + self.feed_forward_chunk_query, + self.chunk_size_feed_forward, + self.seq_len_dim, + query_attention_output, + ) + + if attention_output.shape[1] > query_length: + layer_output_text = apply_chunking_to_forward( + self.feed_forward_chunk, + self.chunk_size_feed_forward, + self.seq_len_dim, + attention_output[:, query_length:, :], + ) + layer_output = torch.cat([layer_output, layer_output_text], dim=1) + else: + layer_output = apply_chunking_to_forward( + self.feed_forward_chunk, + self.chunk_size_feed_forward, + self.seq_len_dim, + attention_output, + ) + outputs = (layer_output,) + outputs + + outputs = outputs + (present_key_value,) + + return outputs + + def feed_forward_chunk(self, attention_output): + intermediate_output = self.intermediate(attention_output) + layer_output = self.output(intermediate_output, attention_output) + return layer_output + + def feed_forward_chunk_query(self, attention_output): + intermediate_output = self.intermediate_query(attention_output) + layer_output = self.output_query(intermediate_output, attention_output) + return layer_output + + +# ProjLayer used to project the multimodal Blip2 embeddings to be used in the text encoder +class ProjLayer(nn.Module): + def __init__(self, in_dim, out_dim, hidden_dim, drop_p=0.1, eps=1e-12): + super().__init__() + + # Dense1 -> Act -> Dense2 -> Drop -> Res -> Norm + self.dense1 = nn.Linear(in_dim, hidden_dim) + self.act_fn = QuickGELU() + self.dense2 = nn.Linear(hidden_dim, out_dim) + self.dropout = nn.Dropout(drop_p) + + self.LayerNorm = nn.LayerNorm(out_dim, eps=eps) + + def forward(self, x): + x_in = x + + x = self.LayerNorm(x) + x = self.dropout(self.dense2(self.act_fn(self.dense1(x)))) + x_in + + return x + + +# Copy-pasted from transformers.models.blip.modeling_blip.BlipVisionModel with Blip->Blip2, BLIP->BLIP_2 +class Blip2VisionModel(Blip2PreTrainedModel): + main_input_name = "pixel_values" + config_class = Blip2VisionConfig + + def __init__(self, config: Blip2VisionConfig): + super().__init__(config) + self.config = config + embed_dim = config.hidden_size + self.embeddings = Blip2VisionEmbeddings(config) + self.pre_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) + self.encoder = Blip2Encoder(config) + self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) + + self.post_init() + + @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=Blip2VisionConfig) + def forward( + self, + pixel_values: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutputWithPooling]: + r""" + Returns: + + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if pixel_values is None: + raise ValueError("You have to specify pixel_values") + + hidden_states = self.embeddings(pixel_values) + hidden_states = self.pre_layernorm(hidden_states) + encoder_outputs = self.encoder( + inputs_embeds=hidden_states, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + last_hidden_state = encoder_outputs[0] + last_hidden_state = self.post_layernorm(last_hidden_state) + + pooled_output = last_hidden_state[:, 0, :] + pooled_output = self.post_layernorm(pooled_output) + + if not return_dict: + return (last_hidden_state, pooled_output) + encoder_outputs[1:] + + return BaseModelOutputWithPooling( + last_hidden_state=last_hidden_state, + pooler_output=pooled_output, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + ) + + def get_input_embeddings(self): + return self.embeddings + + +# Qformer model, used to get multimodal embeddings from the text and image inputs +class Blip2QFormerModel(Blip2PreTrainedModel): + """ + Querying Transformer (Q-Former), used in BLIP-2. + """ + + def __init__(self, config: Blip2Config): + super().__init__(config) + self.config = config + self.embeddings = Blip2TextEmbeddings(config.qformer_config) + self.visual_encoder = Blip2VisionModel(config.vision_config) + self.query_tokens = nn.Parameter(torch.zeros(1, config.num_query_tokens, config.qformer_config.hidden_size)) + if not hasattr(config, "tokenizer") or config.tokenizer is None: + self.tokenizer = BertTokenizer.from_pretrained("bert-base-uncased", truncation_side="right") + else: + self.tokenizer = BertTokenizer.from_pretrained(config.tokenizer, truncation_side="right") + self.tokenizer.add_special_tokens({"bos_token": "[DEC]"}) + self.proj_layer = ProjLayer( + in_dim=config.qformer_config.hidden_size, + out_dim=config.qformer_config.hidden_size, + hidden_dim=config.qformer_config.hidden_size * 4, + drop_p=0.1, + eps=1e-12, + ) + + self.encoder = Blip2QFormerEncoder(config.qformer_config) + + self.post_init() + + def get_input_embeddings(self): + return self.embeddings.word_embeddings + + def set_input_embeddings(self, value): + self.embeddings.word_embeddings = value + + def _prune_heads(self, heads_to_prune): + """ + Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base + class PreTrainedModel + """ + for layer, heads in heads_to_prune.items(): + self.encoder.layer[layer].attention.prune_heads(heads) + + def get_extended_attention_mask( + self, + attention_mask: torch.Tensor, + input_shape: Tuple[int], + device: torch.device, + has_query: bool = False, + ) -> torch.Tensor: + """ + Makes broadcastable attention and causal masks so that future and masked tokens are ignored. + + Arguments: + attention_mask (`torch.Tensor`): + Mask with ones indicating tokens to attend to, zeros for tokens to ignore. + input_shape (`Tuple[int]`): + The shape of the input to the model. + device (`torch.device`): + The device of the input to the model. + + Returns: + `torch.Tensor` The extended attention mask, with a the same dtype as `attention_mask.dtype`. + """ + # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] + # ourselves in which case we just need to make it broadcastable to all heads. + if attention_mask.dim() == 3: + extended_attention_mask = attention_mask[:, None, :, :] + elif attention_mask.dim() == 2: + # Provided a padding mask of dimensions [batch_size, seq_length] + # - the model is an encoder, so make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length] + extended_attention_mask = attention_mask[:, None, None, :] + else: + raise ValueError( + "Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format( + input_shape, attention_mask.shape + ) + ) + + # Since attention_mask is 1.0 for positions we want to attend and 0.0 for + # masked positions, this operation will create a tensor which is 0.0 for + # positions we want to attend and -10000.0 for masked positions. + # Since we are adding it to the raw scores before the softmax, this is + # effectively the same as removing these entirely. + extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility + extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 + return extended_attention_mask + + def forward( + self, + text_input=None, + image_input=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_values=None, + use_cache=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + ): + r""" + encoder_hidden_states (`torch.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, `optional`): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if + the model is configured as a decoder. + encoder_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, `optional`): + Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in + the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + past_key_values (`tuple(tuple(torch.Tensor))` of length `config.n_layers` with each tuple having 4 tensors of: + shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and + value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are + used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key + value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape + `(batch_size, sequence_length)`. + use_cache (`bool`, `optional`): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + """ + + text = self.tokenizer(text_input, return_tensors="pt", padding=True) + text = text.to(self.device) + input_ids = text.input_ids + batch_size = input_ids.shape[0] + query_atts = torch.ones((batch_size, self.query_tokens.size()[1]), dtype=torch.long).to(self.device) + attention_mask = torch.cat([query_atts, text.attention_mask], dim=1) + + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # past_key_values_length + past_key_values_length = ( + past_key_values[0][0].shape[2] - self.config.query_length if past_key_values is not None else 0 + ) + + query_length = self.query_tokens.shape[1] + + embedding_output = self.embeddings( + input_ids=input_ids, + query_embeds=self.query_tokens, + past_key_values_length=past_key_values_length, + ) + + # embedding_output = self.layernorm(query_embeds) + # embedding_output = self.dropout(embedding_output) + + input_shape = embedding_output.size()[:-1] + batch_size, seq_length = input_shape + device = embedding_output.device + + image_embeds_frozen = self.visual_encoder(image_input).last_hidden_state + # image_embeds_frozen = torch.ones_like(image_embeds_frozen) + encoder_hidden_states = image_embeds_frozen + + if attention_mask is None: + attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device) + + # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] + # ourselves in which case we just need to make it broadcastable to all heads. + extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape, device) + + # If a 2D or 3D attention mask is provided for the cross-attention + # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] + if encoder_hidden_states is not None: + if isinstance(encoder_hidden_states, list): + encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[0].size() + else: + encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() + encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) + + if isinstance(encoder_attention_mask, list): + encoder_extended_attention_mask = [self.invert_attention_mask(mask) for mask in encoder_attention_mask] + elif encoder_attention_mask is None: + encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) + encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) + else: + encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) + else: + encoder_extended_attention_mask = None + + # Prepare head mask if needed + # 1.0 in head_mask indicate we keep the head + # attention_probs has shape bsz x n_heads x N x N + # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] + # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] + head_mask = self.get_head_mask(head_mask, self.config.qformer_config.num_hidden_layers) + + encoder_outputs = self.encoder( + embedding_output, + attention_mask=extended_attention_mask, + head_mask=head_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_extended_attention_mask, + past_key_values=past_key_values, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + query_length=query_length, + ) + sequence_output = encoder_outputs[0] + pooled_output = sequence_output[:, 0, :] + + if not return_dict: + return self.proj_layer(sequence_output[:, :query_length, :]) + + return BaseModelOutputWithPoolingAndCrossAttentions( + last_hidden_state=sequence_output, + pooler_output=pooled_output, + past_key_values=encoder_outputs.past_key_values, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + cross_attentions=encoder_outputs.cross_attentions, + ) diff --git a/diffusers3/pipelines/blip_diffusion/modeling_ctx_clip.py b/diffusers3/pipelines/blip_diffusion/modeling_ctx_clip.py new file mode 100644 index 0000000000000000000000000000000000000000..d29dddf64b01e46c964814385f900da80d1746bb --- /dev/null +++ b/diffusers3/pipelines/blip_diffusion/modeling_ctx_clip.py @@ -0,0 +1,223 @@ +# Copyright 2024 Salesforce.com, inc. +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Optional, Tuple, Union + +import torch +from torch import nn +from transformers import CLIPPreTrainedModel +from transformers.modeling_outputs import BaseModelOutputWithPooling +from transformers.models.clip.configuration_clip import CLIPTextConfig +from transformers.models.clip.modeling_clip import CLIPEncoder + + +def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): + """ + Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. + """ + bsz, src_len = mask.size() + tgt_len = tgt_len if tgt_len is not None else src_len + + expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) + + inverted_mask = 1.0 - expanded_mask + + return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) + + +# This is a modified version of the CLIPTextModel from transformers.models.clip.modeling_clip +# Which allows for an extra input of "context embeddings", which are the query embeddings used in Qformer +# They pass through the clip model, along with the text embeddings, and interact with them using self attention +class ContextCLIPTextModel(CLIPPreTrainedModel): + config_class = CLIPTextConfig + + _no_split_modules = ["CLIPEncoderLayer"] + + def __init__(self, config: CLIPTextConfig): + super().__init__(config) + self.text_model = ContextCLIPTextTransformer(config) + # Initialize weights and apply final processing + self.post_init() + + def forward( + self, + ctx_embeddings: torch.Tensor = None, + ctx_begin_pos: list = None, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutputWithPooling]: + return self.text_model( + ctx_embeddings=ctx_embeddings, + ctx_begin_pos=ctx_begin_pos, + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + +class ContextCLIPTextTransformer(nn.Module): + def __init__(self, config: CLIPTextConfig): + super().__init__() + self.config = config + embed_dim = config.hidden_size + self.embeddings = ContextCLIPTextEmbeddings(config) + self.encoder = CLIPEncoder(config) + self.final_layer_norm = nn.LayerNorm(embed_dim) + + def forward( + self, + ctx_embeddings: torch.Tensor, + ctx_begin_pos: list, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutputWithPooling]: + r""" + Returns: + + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if input_ids is None: + raise ValueError("You have to specify either input_ids") + + input_shape = input_ids.size() + input_ids = input_ids.view(-1, input_shape[-1]) + + hidden_states = self.embeddings( + input_ids=input_ids, + position_ids=position_ids, + ctx_embeddings=ctx_embeddings, + ctx_begin_pos=ctx_begin_pos, + ) + + bsz, seq_len = input_shape + if ctx_embeddings is not None: + seq_len += ctx_embeddings.size(1) + # CLIP's text model uses causal mask, prepare it here. + # https://github.com/openai/CLIP/blob/cfcffb90e69f37bf2ff1e988237a0fbe41f33c04/clip/model.py#L324 + causal_attention_mask = self._build_causal_attention_mask(bsz, seq_len, hidden_states.dtype).to( + hidden_states.device + ) + # expand attention_mask + if attention_mask is not None: + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + attention_mask = _expand_mask(attention_mask, hidden_states.dtype) + + encoder_outputs = self.encoder( + inputs_embeds=hidden_states, + attention_mask=attention_mask, + causal_attention_mask=causal_attention_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + last_hidden_state = encoder_outputs[0] + last_hidden_state = self.final_layer_norm(last_hidden_state) + + # text_embeds.shape = [batch_size, sequence_length, transformer.width] + # take features from the eot embedding (eot_token is the highest number in each sequence) + # casting to torch.int for onnx compatibility: argmax doesn't support int64 inputs with opset 14 + pooled_output = last_hidden_state[ + torch.arange(last_hidden_state.shape[0], device=input_ids.device), + input_ids.to(torch.int).argmax(dim=-1), + ] + + if not return_dict: + return (last_hidden_state, pooled_output) + encoder_outputs[1:] + + return BaseModelOutputWithPooling( + last_hidden_state=last_hidden_state, + pooler_output=pooled_output, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + ) + + def _build_causal_attention_mask(self, bsz, seq_len, dtype): + # lazily create causal attention mask, with full attention between the vision tokens + # pytorch uses additive attention mask; fill with -inf + mask = torch.empty(bsz, seq_len, seq_len, dtype=dtype) + mask.fill_(torch.tensor(torch.finfo(dtype).min)) + mask.triu_(1) # zero out the lower diagonal + mask = mask.unsqueeze(1) # expand mask + return mask + + +class ContextCLIPTextEmbeddings(nn.Module): + def __init__(self, config: CLIPTextConfig): + super().__init__() + embed_dim = config.hidden_size + + self.token_embedding = nn.Embedding(config.vocab_size, embed_dim) + self.position_embedding = nn.Embedding(config.max_position_embeddings, embed_dim) + + # position_ids (1, len position emb) is contiguous in memory and exported when serialized + self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) + + def forward( + self, + ctx_embeddings: torch.Tensor, + ctx_begin_pos: list, + input_ids: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + ) -> torch.Tensor: + if ctx_embeddings is None: + ctx_len = 0 + else: + ctx_len = ctx_embeddings.shape[1] + + seq_length = (input_ids.shape[-1] if input_ids is not None else inputs_embeds.shape[-2]) + ctx_len + + if position_ids is None: + position_ids = self.position_ids[:, :seq_length] + + if inputs_embeds is None: + inputs_embeds = self.token_embedding(input_ids) + + # for each input embeddings, add the ctx embeddings at the correct position + input_embeds_ctx = [] + bsz = inputs_embeds.shape[0] + + if ctx_embeddings is not None: + for i in range(bsz): + cbp = ctx_begin_pos[i] + + prefix = inputs_embeds[i, :cbp] + # remove the special token embedding + suffix = inputs_embeds[i, cbp:] + + input_embeds_ctx.append(torch.cat([prefix, ctx_embeddings[i], suffix], dim=0)) + + inputs_embeds = torch.stack(input_embeds_ctx, dim=0) + + position_embeddings = self.position_embedding(position_ids) + embeddings = inputs_embeds + position_embeddings + + return embeddings diff --git a/diffusers3/pipelines/blip_diffusion/pipeline_blip_diffusion.py b/diffusers3/pipelines/blip_diffusion/pipeline_blip_diffusion.py new file mode 100644 index 0000000000000000000000000000000000000000..ff23247b5f813d854369f68ac6c512c5f33ac19b --- /dev/null +++ b/diffusers3/pipelines/blip_diffusion/pipeline_blip_diffusion.py @@ -0,0 +1,348 @@ +# Copyright 2024 Salesforce.com, inc. +# Copyright 2024 The HuggingFace Team. All rights reserved.# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import List, Optional, Union + +import PIL.Image +import torch +from transformers import CLIPTokenizer + +from ...models import AutoencoderKL, UNet2DConditionModel +from ...schedulers import PNDMScheduler +from ...utils import ( + logging, + replace_example_docstring, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput +from .blip_image_processing import BlipImageProcessor +from .modeling_blip2 import Blip2QFormerModel +from .modeling_ctx_clip import ContextCLIPTextModel + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> from diffusers.pipelines import BlipDiffusionPipeline + >>> from diffusers.utils import load_image + >>> import torch + + >>> blip_diffusion_pipe = BlipDiffusionPipeline.from_pretrained( + ... "Salesforce/blipdiffusion", torch_dtype=torch.float16 + ... ).to("cuda") + + + >>> cond_subject = "dog" + >>> tgt_subject = "dog" + >>> text_prompt_input = "swimming underwater" + + >>> cond_image = load_image( + ... "https://huggingface.co/datasets/ayushtues/blipdiffusion_images/resolve/main/dog.jpg" + ... ) + >>> guidance_scale = 7.5 + >>> num_inference_steps = 25 + >>> negative_prompt = "over-exposure, under-exposure, saturated, duplicate, out of frame, lowres, cropped, worst quality, low quality, jpeg artifacts, morbid, mutilated, out of frame, ugly, bad anatomy, bad proportions, deformed, blurry, duplicate" + + + >>> output = blip_diffusion_pipe( + ... text_prompt_input, + ... cond_image, + ... cond_subject, + ... tgt_subject, + ... guidance_scale=guidance_scale, + ... num_inference_steps=num_inference_steps, + ... neg_prompt=negative_prompt, + ... height=512, + ... width=512, + ... ).images + >>> output[0].save("image.png") + ``` +""" + + +class BlipDiffusionPipeline(DiffusionPipeline): + """ + Pipeline for Zero-Shot Subject Driven Generation using Blip Diffusion. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + Args: + tokenizer ([`CLIPTokenizer`]): + Tokenizer for the text encoder + text_encoder ([`ContextCLIPTextModel`]): + Text encoder to encode the text prompt + vae ([`AutoencoderKL`]): + VAE model to map the latents to the image + unet ([`UNet2DConditionModel`]): + Conditional U-Net architecture to denoise the image embedding. + scheduler ([`PNDMScheduler`]): + A scheduler to be used in combination with `unet` to generate image latents. + qformer ([`Blip2QFormerModel`]): + QFormer model to get multi-modal embeddings from the text and image. + image_processor ([`BlipImageProcessor`]): + Image Processor to preprocess and postprocess the image. + ctx_begin_pos (int, `optional`, defaults to 2): + Position of the context token in the text encoder. + """ + + model_cpu_offload_seq = "qformer->text_encoder->unet->vae" + + def __init__( + self, + tokenizer: CLIPTokenizer, + text_encoder: ContextCLIPTextModel, + vae: AutoencoderKL, + unet: UNet2DConditionModel, + scheduler: PNDMScheduler, + qformer: Blip2QFormerModel, + image_processor: BlipImageProcessor, + ctx_begin_pos: int = 2, + mean: List[float] = None, + std: List[float] = None, + ): + super().__init__() + + self.register_modules( + tokenizer=tokenizer, + text_encoder=text_encoder, + vae=vae, + unet=unet, + scheduler=scheduler, + qformer=qformer, + image_processor=image_processor, + ) + self.register_to_config(ctx_begin_pos=ctx_begin_pos, mean=mean, std=std) + + def get_query_embeddings(self, input_image, src_subject): + return self.qformer(image_input=input_image, text_input=src_subject, return_dict=False) + + # from the original Blip Diffusion code, speciefies the target subject and augments the prompt by repeating it + def _build_prompt(self, prompts, tgt_subjects, prompt_strength=1.0, prompt_reps=20): + rv = [] + for prompt, tgt_subject in zip(prompts, tgt_subjects): + prompt = f"a {tgt_subject} {prompt.strip()}" + # a trick to amplify the prompt + rv.append(", ".join([prompt] * int(prompt_strength * prompt_reps))) + + return rv + + # Copied from diffusers.pipelines.consistency_models.pipeline_consistency_models.ConsistencyModelPipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels, height, width, dtype, device, generator, latents=None): + shape = (batch_size, num_channels, height, width) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device=device, dtype=dtype) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + def encode_prompt(self, query_embeds, prompt, device=None): + device = device or self._execution_device + + # embeddings for prompt, with query_embeds as context + max_len = self.text_encoder.text_model.config.max_position_embeddings + max_len -= self.qformer.config.num_query_tokens + + tokenized_prompt = self.tokenizer( + prompt, + padding="max_length", + truncation=True, + max_length=max_len, + return_tensors="pt", + ).to(device) + + batch_size = query_embeds.shape[0] + ctx_begin_pos = [self.config.ctx_begin_pos] * batch_size + + text_embeddings = self.text_encoder( + input_ids=tokenized_prompt.input_ids, + ctx_embeddings=query_embeds, + ctx_begin_pos=ctx_begin_pos, + )[0] + + return text_embeddings + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: List[str], + reference_image: PIL.Image.Image, + source_subject_category: List[str], + target_subject_category: List[str], + latents: Optional[torch.Tensor] = None, + guidance_scale: float = 7.5, + height: int = 512, + width: int = 512, + num_inference_steps: int = 50, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + neg_prompt: Optional[str] = "", + prompt_strength: float = 1.0, + prompt_reps: int = 20, + output_type: Optional[str] = "pil", + return_dict: bool = True, + ): + """ + Function invoked when calling the pipeline for generation. + + Args: + prompt (`List[str]`): + The prompt or prompts to guide the image generation. + reference_image (`PIL.Image.Image`): + The reference image to condition the generation on. + source_subject_category (`List[str]`): + The source subject category. + target_subject_category (`List[str]`): + The target subject category. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by random sampling. + guidance_scale (`float`, *optional*, defaults to 7.5): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + height (`int`, *optional*, defaults to 512): + The height of the generated image. + width (`int`, *optional*, defaults to 512): + The width of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + neg_prompt (`str`, *optional*, defaults to ""): + The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored + if `guidance_scale` is less than `1`). + prompt_strength (`float`, *optional*, defaults to 1.0): + The strength of the prompt. Specifies the number of times the prompt is repeated along with prompt_reps + to amplify the prompt. + prompt_reps (`int`, *optional*, defaults to 20): + The number of times the prompt is repeated along with prompt_strength to amplify the prompt. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` + (`np.array`) or `"pt"` (`torch.Tensor`). + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. + Examples: + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple` + """ + device = self._execution_device + + reference_image = self.image_processor.preprocess( + reference_image, image_mean=self.config.mean, image_std=self.config.std, return_tensors="pt" + )["pixel_values"] + reference_image = reference_image.to(device) + + if isinstance(prompt, str): + prompt = [prompt] + if isinstance(source_subject_category, str): + source_subject_category = [source_subject_category] + if isinstance(target_subject_category, str): + target_subject_category = [target_subject_category] + + batch_size = len(prompt) + + prompt = self._build_prompt( + prompts=prompt, + tgt_subjects=target_subject_category, + prompt_strength=prompt_strength, + prompt_reps=prompt_reps, + ) + query_embeds = self.get_query_embeddings(reference_image, source_subject_category) + text_embeddings = self.encode_prompt(query_embeds, prompt, device) + do_classifier_free_guidance = guidance_scale > 1.0 + if do_classifier_free_guidance: + max_length = self.text_encoder.text_model.config.max_position_embeddings + + uncond_input = self.tokenizer( + [neg_prompt] * batch_size, + padding="max_length", + max_length=max_length, + return_tensors="pt", + ) + uncond_embeddings = self.text_encoder( + input_ids=uncond_input.input_ids.to(device), + ctx_embeddings=None, + )[0] + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) + + scale_down_factor = 2 ** (len(self.unet.config.block_out_channels) - 1) + latents = self.prepare_latents( + batch_size=batch_size, + num_channels=self.unet.config.in_channels, + height=height // scale_down_factor, + width=width // scale_down_factor, + generator=generator, + latents=latents, + dtype=self.unet.dtype, + device=device, + ) + # set timesteps + extra_set_kwargs = {} + self.scheduler.set_timesteps(num_inference_steps, **extra_set_kwargs) + + for i, t in enumerate(self.progress_bar(self.scheduler.timesteps)): + # expand the latents if we are doing classifier free guidance + do_classifier_free_guidance = guidance_scale > 1.0 + + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + + noise_pred = self.unet( + latent_model_input, + timestep=t, + encoder_hidden_states=text_embeddings, + down_block_additional_residuals=None, + mid_block_additional_residual=None, + )["sample"] + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + latents = self.scheduler.step( + noise_pred, + t, + latents, + )["prev_sample"] + + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + image = self.image_processor.postprocess(image, output_type=output_type) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return ImagePipelineOutput(images=image) diff --git a/diffusers3/pipelines/cogvideo/__init__.py b/diffusers3/pipelines/cogvideo/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..baf0de3482c3850423da2b77a21a8cc774d0a993 --- /dev/null +++ b/diffusers3/pipelines/cogvideo/__init__.py @@ -0,0 +1,50 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["pipeline_cogvideox"] = ["CogVideoXPipeline"] + _import_structure["pipeline_cogvideox_video2video"] = ["CogVideoXVideoToVideoPipeline"] + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * + else: + from .pipeline_cogvideox import CogVideoXPipeline + from .pipeline_cogvideox_video2video import CogVideoXVideoToVideoPipeline + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/diffusers3/pipelines/cogvideo/pipeline_cogvideox.py b/diffusers3/pipelines/cogvideo/pipeline_cogvideox.py new file mode 100644 index 0000000000000000000000000000000000000000..3af47c1774377b2c1ceb056d8eb6143a037a2a77 --- /dev/null +++ b/diffusers3/pipelines/cogvideo/pipeline_cogvideox.py @@ -0,0 +1,730 @@ +# Copyright 2024 The CogVideoX team, Tsinghua University & ZhipuAI and The HuggingFace Team. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import math +from typing import Callable, Dict, List, Optional, Tuple, Union + +import torch +from transformers import T5EncoderModel, T5Tokenizer + +from ...callbacks import MultiPipelineCallbacks, PipelineCallback +from ...models import AutoencoderKLCogVideoX, CogVideoXTransformer3DModel +from ...models.embeddings import get_3d_rotary_pos_embed +from ...pipelines.pipeline_utils import DiffusionPipeline +from ...schedulers import CogVideoXDDIMScheduler, CogVideoXDPMScheduler +from ...utils import logging, replace_example_docstring +from ...utils.torch_utils import randn_tensor +from ...video_processor import VideoProcessor +from .pipeline_output import CogVideoXPipelineOutput + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +EXAMPLE_DOC_STRING = """ + Examples: + ```python + >>> import torch + >>> from diffusers import CogVideoXPipeline + >>> from diffusers.utils import export_to_video + + >>> # Models: "THUDM/CogVideoX-2b" or "THUDM/CogVideoX-5b" + >>> pipe = CogVideoXPipeline.from_pretrained("THUDM/CogVideoX-2b", torch_dtype=torch.float16).to("cuda") + >>> prompt = ( + ... "A panda, dressed in a small, red jacket and a tiny hat, sits on a wooden stool in a serene bamboo forest. " + ... "The panda's fluffy paws strum a miniature acoustic guitar, producing soft, melodic tunes. Nearby, a few other " + ... "pandas gather, watching curiously and some clapping in rhythm. Sunlight filters through the tall bamboo, " + ... "casting a gentle glow on the scene. The panda's face is expressive, showing concentration and joy as it plays. " + ... "The background includes a small, flowing stream and vibrant green foliage, enhancing the peaceful and magical " + ... "atmosphere of this unique musical performance." + ... ) + >>> video = pipe(prompt=prompt, guidance_scale=6, num_inference_steps=50).frames[0] + >>> export_to_video(video, "output.mp4", fps=8) + ``` +""" + + +# Similar to diffusers.pipelines.hunyuandit.pipeline_hunyuandit.get_resize_crop_region_for_grid +def get_resize_crop_region_for_grid(src, tgt_width, tgt_height): + tw = tgt_width + th = tgt_height + h, w = src + r = h / w + if r > (th / tw): + resize_height = th + resize_width = int(round(th / h * w)) + else: + resize_width = tw + resize_height = int(round(tw / w * h)) + + crop_top = int(round((th - resize_height) / 2.0)) + crop_left = int(round((tw - resize_width) / 2.0)) + + return (crop_top, crop_left), (crop_top + resize_height, crop_left + resize_width) + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + sigmas: Optional[List[float]] = None, + **kwargs, +): + """ + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles + custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` + must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, + `num_inference_steps` and `sigmas` must be `None`. + sigmas (`List[float]`, *optional*): + Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, + `num_inference_steps` and `timesteps` must be `None`. + + Returns: + `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the + second element is the number of inference steps. + """ + if timesteps is not None and sigmas is not None: + raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") + if timesteps is not None: + accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + elif sigmas is not None: + accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accept_sigmas: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" sigmas schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + + +class CogVideoXPipeline(DiffusionPipeline): + r""" + Pipeline for text-to-video generation using CogVideoX. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode videos to and from latent representations. + text_encoder ([`T5EncoderModel`]): + Frozen text-encoder. CogVideoX uses + [T5](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5EncoderModel); specifically the + [t5-v1_1-xxl](https://huggingface.co/PixArt-alpha/PixArt-alpha/tree/main/t5-v1_1-xxl) variant. + tokenizer (`T5Tokenizer`): + Tokenizer of class + [T5Tokenizer](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5Tokenizer). + transformer ([`CogVideoXTransformer3DModel`]): + A text conditioned `CogVideoXTransformer3DModel` to denoise the encoded video latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `transformer` to denoise the encoded video latents. + """ + + _optional_components = [] + model_cpu_offload_seq = "text_encoder->transformer->vae" + + _callback_tensor_inputs = [ + "latents", + "prompt_embeds", + "negative_prompt_embeds", + ] + + def __init__( + self, + tokenizer: T5Tokenizer, + text_encoder: T5EncoderModel, + vae: AutoencoderKLCogVideoX, + transformer: CogVideoXTransformer3DModel, + scheduler: Union[CogVideoXDDIMScheduler, CogVideoXDPMScheduler], + ): + super().__init__() + + self.register_modules( + tokenizer=tokenizer, text_encoder=text_encoder, vae=vae, transformer=transformer, scheduler=scheduler + ) + self.vae_scale_factor_spatial = ( + 2 ** (len(self.vae.config.block_out_channels) - 1) if hasattr(self, "vae") and self.vae is not None else 8 + ) + self.vae_scale_factor_temporal = ( + self.vae.config.temporal_compression_ratio if hasattr(self, "vae") and self.vae is not None else 4 + ) + + self.video_processor = VideoProcessor(vae_scale_factor=self.vae_scale_factor_spatial) + + def _get_t5_prompt_embeds( + self, + prompt: Union[str, List[str]] = None, + num_videos_per_prompt: int = 1, + max_sequence_length: int = 226, + device: Optional[torch.device] = None, + dtype: Optional[torch.dtype] = None, + ): + device = device or self._execution_device + dtype = dtype or self.text_encoder.dtype + + prompt = [prompt] if isinstance(prompt, str) else prompt + batch_size = len(prompt) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=max_sequence_length, + truncation=True, + add_special_tokens=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): + removed_text = self.tokenizer.batch_decode(untruncated_ids[:, max_sequence_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because `max_sequence_length` is set to " + f" {max_sequence_length} tokens: {removed_text}" + ) + + prompt_embeds = self.text_encoder(text_input_ids.to(device))[0] + prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) + + # duplicate text embeddings for each generation per prompt, using mps friendly method + _, seq_len, _ = prompt_embeds.shape + prompt_embeds = prompt_embeds.repeat(1, num_videos_per_prompt, 1) + prompt_embeds = prompt_embeds.view(batch_size * num_videos_per_prompt, seq_len, -1) + + return prompt_embeds + + def encode_prompt( + self, + prompt: Union[str, List[str]], + negative_prompt: Optional[Union[str, List[str]]] = None, + do_classifier_free_guidance: bool = True, + num_videos_per_prompt: int = 1, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + max_sequence_length: int = 226, + device: Optional[torch.device] = None, + dtype: Optional[torch.dtype] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + do_classifier_free_guidance (`bool`, *optional*, defaults to `True`): + Whether to use classifier free guidance or not. + num_videos_per_prompt (`int`, *optional*, defaults to 1): + Number of videos that should be generated per prompt. torch device to place the resulting embeddings on + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + device: (`torch.device`, *optional*): + torch device + dtype: (`torch.dtype`, *optional*): + torch dtype + """ + device = device or self._execution_device + + prompt = [prompt] if isinstance(prompt, str) else prompt + if prompt is not None: + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + prompt_embeds = self._get_t5_prompt_embeds( + prompt=prompt, + num_videos_per_prompt=num_videos_per_prompt, + max_sequence_length=max_sequence_length, + device=device, + dtype=dtype, + ) + + if do_classifier_free_guidance and negative_prompt_embeds is None: + negative_prompt = negative_prompt or "" + negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt + + if prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + + negative_prompt_embeds = self._get_t5_prompt_embeds( + prompt=negative_prompt, + num_videos_per_prompt=num_videos_per_prompt, + max_sequence_length=max_sequence_length, + device=device, + dtype=dtype, + ) + + return prompt_embeds, negative_prompt_embeds + + def prepare_latents( + self, batch_size, num_channels_latents, num_frames, height, width, dtype, device, generator, latents=None + ): + shape = ( + batch_size, + (num_frames - 1) // self.vae_scale_factor_temporal + 1, + num_channels_latents, + height // self.vae_scale_factor_spatial, + width // self.vae_scale_factor_spatial, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + def decode_latents(self, latents: torch.Tensor) -> torch.Tensor: + latents = latents.permute(0, 2, 1, 3, 4) # [batch_size, num_channels, num_frames, height, width] + latents = 1 / self.vae.config.scaling_factor * latents + + frames = self.vae.decode(latents).sample + return frames + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + # Copied from diffusers.pipelines.latte.pipeline_latte.LattePipeline.check_inputs + def check_inputs( + self, + prompt, + height, + width, + negative_prompt, + callback_on_step_end_tensor_inputs, + prompt_embeds=None, + negative_prompt_embeds=None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + def fuse_qkv_projections(self) -> None: + r"""Enables fused QKV projections.""" + self.fusing_transformer = True + self.transformer.fuse_qkv_projections() + + def unfuse_qkv_projections(self) -> None: + r"""Disable QKV projection fusion if enabled.""" + if not self.fusing_transformer: + logger.warning("The Transformer was not initially fused for QKV projections. Doing nothing.") + else: + self.transformer.unfuse_qkv_projections() + self.fusing_transformer = False + + def _prepare_rotary_positional_embeddings( + self, + height: int, + width: int, + num_frames: int, + device: torch.device, + ) -> Tuple[torch.Tensor, torch.Tensor]: + grid_height = height // (self.vae_scale_factor_spatial * self.transformer.config.patch_size) + grid_width = width // (self.vae_scale_factor_spatial * self.transformer.config.patch_size) + base_size_width = 720 // (self.vae_scale_factor_spatial * self.transformer.config.patch_size) + base_size_height = 480 // (self.vae_scale_factor_spatial * self.transformer.config.patch_size) + + grid_crops_coords = get_resize_crop_region_for_grid( + (grid_height, grid_width), base_size_width, base_size_height + ) + freqs_cos, freqs_sin = get_3d_rotary_pos_embed( + embed_dim=self.transformer.config.attention_head_dim, + crops_coords=grid_crops_coords, + grid_size=(grid_height, grid_width), + temporal_size=num_frames, + ) + + freqs_cos = freqs_cos.to(device=device) + freqs_sin = freqs_sin.to(device=device) + return freqs_cos, freqs_sin + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def num_timesteps(self): + return self._num_timesteps + + @property + def interrupt(self): + return self._interrupt + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Optional[Union[str, List[str]]] = None, + negative_prompt: Optional[Union[str, List[str]]] = None, + height: int = 480, + width: int = 720, + num_frames: int = 49, + num_inference_steps: int = 50, + timesteps: Optional[List[int]] = None, + guidance_scale: float = 6, + use_dynamic_cfg: bool = False, + num_videos_per_prompt: int = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + output_type: str = "pil", + return_dict: bool = True, + callback_on_step_end: Optional[ + Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks] + ] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + max_sequence_length: int = 226, + ) -> Union[CogVideoXPipelineOutput, Tuple]: + """ + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The height in pixels of the generated image. This is set to 1024 by default for the best results. + width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The width in pixels of the generated image. This is set to 1024 by default for the best results. + num_frames (`int`, defaults to `48`): + Number of frames to generate. Must be divisible by self.vae_scale_factor_temporal. Generated video will + contain 1 extra frame because CogVideoX is conditioned with (num_seconds * fps + 1) frames where + num_seconds is 6 and fps is 4. However, since videos can be saved at any fps, the only condition that + needs to be satisfied is that of divisibility mentioned above. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument + in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is + passed will be used. Must be in descending order. + guidance_scale (`float`, *optional*, defaults to 7.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + num_videos_per_prompt (`int`, *optional*, defaults to 1): + The number of videos to generate per prompt. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] instead + of a plain tuple. + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + max_sequence_length (`int`, defaults to `226`): + Maximum sequence length in encoded prompt. Must be consistent with + `self.transformer.config.max_text_seq_length` otherwise may lead to poor results. + + Examples: + + Returns: + [`~pipelines.cogvideo.pipeline_cogvideox.CogVideoXPipelineOutput`] or `tuple`: + [`~pipelines.cogvideo.pipeline_cogvideox.CogVideoXPipelineOutput`] if `return_dict` is True, otherwise a + `tuple`. When returning a tuple, the first element is a list with the generated images. + """ + + if num_frames > 49: + raise ValueError( + "The number of frames must be less than 49 for now due to static positional embeddings. This will be updated in the future to remove this limitation." + ) + + if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): + callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs + + height = height or self.transformer.config.sample_size * self.vae_scale_factor_spatial + width = width or self.transformer.config.sample_size * self.vae_scale_factor_spatial + num_videos_per_prompt = 1 + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + height, + width, + negative_prompt, + callback_on_step_end_tensor_inputs, + prompt_embeds, + negative_prompt_embeds, + ) + self._guidance_scale = guidance_scale + self._interrupt = False + + # 2. Default call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Encode input prompt + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + negative_prompt, + do_classifier_free_guidance, + num_videos_per_prompt=num_videos_per_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + max_sequence_length=max_sequence_length, + device=device, + ) + if do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) + + # 4. Prepare timesteps + timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps) + self._num_timesteps = len(timesteps) + + # 5. Prepare latents. + latent_channels = self.transformer.config.in_channels + latents = self.prepare_latents( + batch_size * num_videos_per_prompt, + latent_channels, + num_frames, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7. Create rotary embeds if required + image_rotary_emb = ( + self._prepare_rotary_positional_embeddings(height, width, latents.size(1), device) + if self.transformer.config.use_rotary_positional_embeddings + else None + ) + + # 8. Denoising loop + num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) + + with self.progress_bar(total=num_inference_steps) as progress_bar: + # for DPM-solver++ + old_pred_original_sample = None + for i, t in enumerate(timesteps): + if self.interrupt: + continue + + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # broadcast to batch dimension in a way that's compatible with ONNX/Core ML + timestep = t.expand(latent_model_input.shape[0]) + + # predict noise model_output + noise_pred = self.transformer( + hidden_states=latent_model_input, + encoder_hidden_states=prompt_embeds, + timestep=timestep, + image_rotary_emb=image_rotary_emb, + return_dict=False, + )[0] + noise_pred = noise_pred.float() + + # perform guidance + if use_dynamic_cfg: + self._guidance_scale = 1 + guidance_scale * ( + (1 - math.cos(math.pi * ((num_inference_steps - t.item()) / num_inference_steps) ** 5.0)) / 2 + ) + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + if not isinstance(self.scheduler, CogVideoXDPMScheduler): + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] + else: + latents, old_pred_original_sample = self.scheduler.step( + noise_pred, + old_pred_original_sample, + t, + timesteps[i - 1] if i > 0 else None, + latents, + **extra_step_kwargs, + return_dict=False, + ) + latents = latents.to(prompt_embeds.dtype) + + # call the callback, if provided + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + + if not output_type == "latent": + video = self.decode_latents(latents) + video = self.video_processor.postprocess_video(video=video, output_type=output_type) + else: + video = latents + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (video,) + + return CogVideoXPipelineOutput(frames=video) diff --git a/diffusers3/pipelines/cogvideo/pipeline_cogvideox_video2video.py b/diffusers3/pipelines/cogvideo/pipeline_cogvideox_video2video.py new file mode 100644 index 0000000000000000000000000000000000000000..16686d1ab7ac734ea1007a7358c6cdd4349cb2b9 --- /dev/null +++ b/diffusers3/pipelines/cogvideo/pipeline_cogvideox_video2video.py @@ -0,0 +1,812 @@ +# Copyright 2024 The CogVideoX team, Tsinghua University & ZhipuAI and The HuggingFace Team. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import math +from typing import Callable, Dict, List, Optional, Tuple, Union + +import torch +from PIL import Image +from transformers import T5EncoderModel, T5Tokenizer + +from ...callbacks import MultiPipelineCallbacks, PipelineCallback +from ...models import AutoencoderKLCogVideoX, CogVideoXTransformer3DModel +from ...models.embeddings import get_3d_rotary_pos_embed +from ...pipelines.pipeline_utils import DiffusionPipeline +from ...schedulers import CogVideoXDDIMScheduler, CogVideoXDPMScheduler +from ...utils import ( + logging, + replace_example_docstring, +) +from ...utils.torch_utils import randn_tensor +from ...video_processor import VideoProcessor +from .pipeline_output import CogVideoXPipelineOutput + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +EXAMPLE_DOC_STRING = """ + Examples: + ```python + >>> import torch + >>> from diffusers import CogVideoXDPMScheduler, CogVideoXVideoToVideoPipeline + >>> from diffusers.utils import export_to_video, load_video + + >>> # Models: "THUDM/CogVideoX-2b" or "THUDM/CogVideoX-5b" + >>> pipe = CogVideoXVideoToVideoPipeline.from_pretrained("THUDM/CogVideoX-5b", torch_dtype=torch.bfloat16) + >>> pipe.to("cuda") + >>> pipe.scheduler = CogVideoXDPMScheduler.from_config(pipe.scheduler.config) + + >>> input_video = load_video( + ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/hiker.mp4" + ... ) + >>> prompt = ( + ... "An astronaut stands triumphantly at the peak of a towering mountain. Panorama of rugged peaks and " + ... "valleys. Very futuristic vibe and animated aesthetic. Highlights of purple and golden colors in " + ... "the scene. The sky is looks like an animated/cartoonish dream of galaxies, nebulae, stars, planets, " + ... "moons, but the remainder of the scene is mostly realistic." + ... ) + + >>> video = pipe( + ... video=input_video, prompt=prompt, strength=0.8, guidance_scale=6, num_inference_steps=50 + ... ).frames[0] + >>> export_to_video(video, "output.mp4", fps=8) + ``` +""" + + +# Similar to diffusers.pipelines.hunyuandit.pipeline_hunyuandit.get_resize_crop_region_for_grid +def get_resize_crop_region_for_grid(src, tgt_width, tgt_height): + tw = tgt_width + th = tgt_height + h, w = src + r = h / w + if r > (th / tw): + resize_height = th + resize_width = int(round(th / h * w)) + else: + resize_width = tw + resize_height = int(round(tw / w * h)) + + crop_top = int(round((th - resize_height) / 2.0)) + crop_left = int(round((tw - resize_width) / 2.0)) + + return (crop_top, crop_left), (crop_top + resize_height, crop_left + resize_width) + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + sigmas: Optional[List[float]] = None, + **kwargs, +): + """ + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles + custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` + must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, + `num_inference_steps` and `sigmas` must be `None`. + sigmas (`List[float]`, *optional*): + Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, + `num_inference_steps` and `timesteps` must be `None`. + + Returns: + `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the + second element is the number of inference steps. + """ + if timesteps is not None and sigmas is not None: + raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") + if timesteps is not None: + accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + elif sigmas is not None: + accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accept_sigmas: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" sigmas schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents +def retrieve_latents( + encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" +): + if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": + return encoder_output.latent_dist.sample(generator) + elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": + return encoder_output.latent_dist.mode() + elif hasattr(encoder_output, "latents"): + return encoder_output.latents + else: + raise AttributeError("Could not access latents of provided encoder_output") + + +class CogVideoXVideoToVideoPipeline(DiffusionPipeline): + r""" + Pipeline for video-to-video generation using CogVideoX. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode videos to and from latent representations. + text_encoder ([`T5EncoderModel`]): + Frozen text-encoder. CogVideoX uses + [T5](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5EncoderModel); specifically the + [t5-v1_1-xxl](https://huggingface.co/PixArt-alpha/PixArt-alpha/tree/main/t5-v1_1-xxl) variant. + tokenizer (`T5Tokenizer`): + Tokenizer of class + [T5Tokenizer](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5Tokenizer). + transformer ([`CogVideoXTransformer3DModel`]): + A text conditioned `CogVideoXTransformer3DModel` to denoise the encoded video latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `transformer` to denoise the encoded video latents. + """ + + _optional_components = [] + model_cpu_offload_seq = "text_encoder->transformer->vae" + + _callback_tensor_inputs = [ + "latents", + "prompt_embeds", + "negative_prompt_embeds", + ] + + def __init__( + self, + tokenizer: T5Tokenizer, + text_encoder: T5EncoderModel, + vae: AutoencoderKLCogVideoX, + transformer: CogVideoXTransformer3DModel, + scheduler: Union[CogVideoXDDIMScheduler, CogVideoXDPMScheduler], + ): + super().__init__() + + self.register_modules( + tokenizer=tokenizer, text_encoder=text_encoder, vae=vae, transformer=transformer, scheduler=scheduler + ) + self.vae_scale_factor_spatial = ( + 2 ** (len(self.vae.config.block_out_channels) - 1) if hasattr(self, "vae") and self.vae is not None else 8 + ) + self.vae_scale_factor_temporal = ( + self.vae.config.temporal_compression_ratio if hasattr(self, "vae") and self.vae is not None else 4 + ) + + self.video_processor = VideoProcessor(vae_scale_factor=self.vae_scale_factor_spatial) + + # Copied from diffusers.pipelines.cogvideo.pipeline_cogvideox.CogVideoXPipeline._get_t5_prompt_embeds + def _get_t5_prompt_embeds( + self, + prompt: Union[str, List[str]] = None, + num_videos_per_prompt: int = 1, + max_sequence_length: int = 226, + device: Optional[torch.device] = None, + dtype: Optional[torch.dtype] = None, + ): + device = device or self._execution_device + dtype = dtype or self.text_encoder.dtype + + prompt = [prompt] if isinstance(prompt, str) else prompt + batch_size = len(prompt) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=max_sequence_length, + truncation=True, + add_special_tokens=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): + removed_text = self.tokenizer.batch_decode(untruncated_ids[:, max_sequence_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because `max_sequence_length` is set to " + f" {max_sequence_length} tokens: {removed_text}" + ) + + prompt_embeds = self.text_encoder(text_input_ids.to(device))[0] + prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) + + # duplicate text embeddings for each generation per prompt, using mps friendly method + _, seq_len, _ = prompt_embeds.shape + prompt_embeds = prompt_embeds.repeat(1, num_videos_per_prompt, 1) + prompt_embeds = prompt_embeds.view(batch_size * num_videos_per_prompt, seq_len, -1) + + return prompt_embeds + + # Copied from diffusers.pipelines.cogvideo.pipeline_cogvideox.CogVideoXPipeline.encode_prompt + def encode_prompt( + self, + prompt: Union[str, List[str]], + negative_prompt: Optional[Union[str, List[str]]] = None, + do_classifier_free_guidance: bool = True, + num_videos_per_prompt: int = 1, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + max_sequence_length: int = 226, + device: Optional[torch.device] = None, + dtype: Optional[torch.dtype] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + do_classifier_free_guidance (`bool`, *optional*, defaults to `True`): + Whether to use classifier free guidance or not. + num_videos_per_prompt (`int`, *optional*, defaults to 1): + Number of videos that should be generated per prompt. torch device to place the resulting embeddings on + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + device: (`torch.device`, *optional*): + torch device + dtype: (`torch.dtype`, *optional*): + torch dtype + """ + device = device or self._execution_device + + prompt = [prompt] if isinstance(prompt, str) else prompt + if prompt is not None: + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + prompt_embeds = self._get_t5_prompt_embeds( + prompt=prompt, + num_videos_per_prompt=num_videos_per_prompt, + max_sequence_length=max_sequence_length, + device=device, + dtype=dtype, + ) + + if do_classifier_free_guidance and negative_prompt_embeds is None: + negative_prompt = negative_prompt or "" + negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt + + if prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + + negative_prompt_embeds = self._get_t5_prompt_embeds( + prompt=negative_prompt, + num_videos_per_prompt=num_videos_per_prompt, + max_sequence_length=max_sequence_length, + device=device, + dtype=dtype, + ) + + return prompt_embeds, negative_prompt_embeds + + def prepare_latents( + self, + video: Optional[torch.Tensor] = None, + batch_size: int = 1, + num_channels_latents: int = 16, + height: int = 60, + width: int = 90, + dtype: Optional[torch.dtype] = None, + device: Optional[torch.device] = None, + generator: Optional[torch.Generator] = None, + latents: Optional[torch.Tensor] = None, + timestep: Optional[torch.Tensor] = None, + ): + num_frames = (video.size(2) - 1) // self.vae_scale_factor_temporal + 1 if latents is None else latents.size(1) + + shape = ( + batch_size, + num_frames, + num_channels_latents, + height // self.vae_scale_factor_spatial, + width // self.vae_scale_factor_spatial, + ) + + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + if isinstance(generator, list): + if len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + init_latents = [ + retrieve_latents(self.vae.encode(video[i].unsqueeze(0)), generator[i]) for i in range(batch_size) + ] + else: + init_latents = [retrieve_latents(self.vae.encode(vid.unsqueeze(0)), generator) for vid in video] + + init_latents = torch.cat(init_latents, dim=0).to(dtype).permute(0, 2, 1, 3, 4) # [B, F, C, H, W] + init_latents = self.vae.config.scaling_factor * init_latents + + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + latents = self.scheduler.add_noise(init_latents, noise, timestep) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + # Copied from diffusers.pipelines.cogvideo.pipeline_cogvideox.CogVideoXPipeline.decode_latents + def decode_latents(self, latents: torch.Tensor) -> torch.Tensor: + latents = latents.permute(0, 2, 1, 3, 4) # [batch_size, num_channels, num_frames, height, width] + latents = 1 / self.vae.config.scaling_factor * latents + + frames = self.vae.decode(latents).sample + return frames + + # Copied from diffusers.pipelines.animatediff.pipeline_animatediff_video2video.AnimateDiffVideoToVideoPipeline.get_timesteps + def get_timesteps(self, num_inference_steps, timesteps, strength, device): + # get the original timestep using init_timestep + init_timestep = min(int(num_inference_steps * strength), num_inference_steps) + + t_start = max(num_inference_steps - init_timestep, 0) + timesteps = timesteps[t_start * self.scheduler.order :] + + return timesteps, num_inference_steps - t_start + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + height, + width, + strength, + negative_prompt, + callback_on_step_end_tensor_inputs, + video=None, + latents=None, + prompt_embeds=None, + negative_prompt_embeds=None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if strength < 0 or strength > 1: + raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}") + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if video is not None and latents is not None: + raise ValueError("Only one of `video` or `latents` should be provided") + + # Copied from diffusers.pipelines.cogvideo.pipeline_cogvideox.CogVideoXPipeline.fuse_qkv_projections + def fuse_qkv_projections(self) -> None: + r"""Enables fused QKV projections.""" + self.fusing_transformer = True + self.transformer.fuse_qkv_projections() + + # Copied from diffusers.pipelines.cogvideo.pipeline_cogvideox.CogVideoXPipeline.unfuse_qkv_projections + def unfuse_qkv_projections(self) -> None: + r"""Disable QKV projection fusion if enabled.""" + if not self.fusing_transformer: + logger.warning("The Transformer was not initially fused for QKV projections. Doing nothing.") + else: + self.transformer.unfuse_qkv_projections() + self.fusing_transformer = False + + # Copied from diffusers.pipelines.cogvideo.pipeline_cogvideox.CogVideoXPipeline._prepare_rotary_positional_embeddings + def _prepare_rotary_positional_embeddings( + self, + height: int, + width: int, + num_frames: int, + device: torch.device, + ) -> Tuple[torch.Tensor, torch.Tensor]: + grid_height = height // (self.vae_scale_factor_spatial * self.transformer.config.patch_size) + grid_width = width // (self.vae_scale_factor_spatial * self.transformer.config.patch_size) + base_size_width = 720 // (self.vae_scale_factor_spatial * self.transformer.config.patch_size) + base_size_height = 480 // (self.vae_scale_factor_spatial * self.transformer.config.patch_size) + + grid_crops_coords = get_resize_crop_region_for_grid( + (grid_height, grid_width), base_size_width, base_size_height + ) + freqs_cos, freqs_sin = get_3d_rotary_pos_embed( + embed_dim=self.transformer.config.attention_head_dim, + crops_coords=grid_crops_coords, + grid_size=(grid_height, grid_width), + temporal_size=num_frames, + ) + + freqs_cos = freqs_cos.to(device=device) + freqs_sin = freqs_sin.to(device=device) + return freqs_cos, freqs_sin + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def num_timesteps(self): + return self._num_timesteps + + @property + def interrupt(self): + return self._interrupt + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + video: List[Image.Image] = None, + prompt: Optional[Union[str, List[str]]] = None, + negative_prompt: Optional[Union[str, List[str]]] = None, + height: int = 480, + width: int = 720, + num_inference_steps: int = 50, + timesteps: Optional[List[int]] = None, + strength: float = 0.8, + guidance_scale: float = 6, + use_dynamic_cfg: bool = False, + num_videos_per_prompt: int = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + output_type: str = "pil", + return_dict: bool = True, + callback_on_step_end: Optional[ + Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks] + ] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + max_sequence_length: int = 226, + ) -> Union[CogVideoXPipelineOutput, Tuple]: + """ + Function invoked when calling the pipeline for generation. + + Args: + video (`List[PIL.Image.Image]`): + The input video to condition the generation on. Must be a list of images/frames of the video. + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The height in pixels of the generated image. This is set to 1024 by default for the best results. + width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The width in pixels of the generated image. This is set to 1024 by default for the best results. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument + in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is + passed will be used. Must be in descending order. + strength (`float`, *optional*, defaults to 0.8): + Higher strength leads to more differences between original video and generated video. + guidance_scale (`float`, *optional*, defaults to 7.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + num_videos_per_prompt (`int`, *optional*, defaults to 1): + The number of videos to generate per prompt. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] instead + of a plain tuple. + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + max_sequence_length (`int`, defaults to `226`): + Maximum sequence length in encoded prompt. Must be consistent with + `self.transformer.config.max_text_seq_length` otherwise may lead to poor results. + + Examples: + + Returns: + [`~pipelines.cogvideo.pipeline_output.CogVideoXPipelineOutput`] or `tuple`: + [`~pipelines.cogvideo.pipeline_output.CogVideoXPipelineOutput`] if `return_dict` is True, otherwise a + `tuple`. When returning a tuple, the first element is a list with the generated images. + """ + + if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): + callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs + + height = height or self.transformer.config.sample_size * self.vae_scale_factor_spatial + width = width or self.transformer.config.sample_size * self.vae_scale_factor_spatial + num_videos_per_prompt = 1 + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + height, + width, + strength, + negative_prompt, + callback_on_step_end_tensor_inputs, + prompt_embeds, + negative_prompt_embeds, + ) + self._guidance_scale = guidance_scale + self._interrupt = False + + # 2. Default call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Encode input prompt + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + negative_prompt, + do_classifier_free_guidance, + num_videos_per_prompt=num_videos_per_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + max_sequence_length=max_sequence_length, + device=device, + ) + if do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) + + # 4. Prepare timesteps + timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps) + timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, timesteps, strength, device) + latent_timestep = timesteps[:1].repeat(batch_size * num_videos_per_prompt) + self._num_timesteps = len(timesteps) + + # 5. Prepare latents + if latents is None: + video = self.video_processor.preprocess_video(video, height=height, width=width) + video = video.to(device=device, dtype=prompt_embeds.dtype) + + latent_channels = self.transformer.config.in_channels + latents = self.prepare_latents( + video, + batch_size * num_videos_per_prompt, + latent_channels, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + latent_timestep, + ) + + # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7. Create rotary embeds if required + image_rotary_emb = ( + self._prepare_rotary_positional_embeddings(height, width, latents.size(1), device) + if self.transformer.config.use_rotary_positional_embeddings + else None + ) + + # 8. Denoising loop + num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) + + with self.progress_bar(total=num_inference_steps) as progress_bar: + # for DPM-solver++ + old_pred_original_sample = None + for i, t in enumerate(timesteps): + if self.interrupt: + continue + + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # broadcast to batch dimension in a way that's compatible with ONNX/Core ML + timestep = t.expand(latent_model_input.shape[0]) + + # predict noise model_output + noise_pred = self.transformer( + hidden_states=latent_model_input, + encoder_hidden_states=prompt_embeds, + timestep=timestep, + image_rotary_emb=image_rotary_emb, + return_dict=False, + )[0] + noise_pred = noise_pred.float() + + # perform guidance + if use_dynamic_cfg: + self._guidance_scale = 1 + guidance_scale * ( + (1 - math.cos(math.pi * ((num_inference_steps - t.item()) / num_inference_steps) ** 5.0)) / 2 + ) + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + if not isinstance(self.scheduler, CogVideoXDPMScheduler): + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] + else: + latents, old_pred_original_sample = self.scheduler.step( + noise_pred, + old_pred_original_sample, + t, + timesteps[i - 1] if i > 0 else None, + latents, + **extra_step_kwargs, + return_dict=False, + ) + latents = latents.to(prompt_embeds.dtype) + + # call the callback, if provided + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + + if not output_type == "latent": + video = self.decode_latents(latents) + video = self.video_processor.postprocess_video(video=video, output_type=output_type) + else: + video = latents + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (video,) + + return CogVideoXPipelineOutput(frames=video) diff --git a/diffusers3/pipelines/cogvideo/pipeline_output.py b/diffusers3/pipelines/cogvideo/pipeline_output.py new file mode 100644 index 0000000000000000000000000000000000000000..3de030dd6928db49ab0bc4d11868a93ac98dea50 --- /dev/null +++ b/diffusers3/pipelines/cogvideo/pipeline_output.py @@ -0,0 +1,20 @@ +from dataclasses import dataclass + +import torch + +from diffusers.utils import BaseOutput + + +@dataclass +class CogVideoXPipelineOutput(BaseOutput): + r""" + Output class for CogVideo pipelines. + + Args: + frames (`torch.Tensor`, `np.ndarray`, or List[List[PIL.Image.Image]]): + List of video outputs - It can be a nested list of length `batch_size,` with each sub-list containing + denoised PIL image sequences of length `num_frames.` It can also be a NumPy array or Torch tensor of shape + `(batch_size, num_frames, channels, height, width)`. + """ + + frames: torch.Tensor diff --git a/diffusers3/pipelines/consistency_models/__init__.py b/diffusers3/pipelines/consistency_models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..162d91c010acf95aa2daf87c51ab1e0c68361fd5 --- /dev/null +++ b/diffusers3/pipelines/consistency_models/__init__.py @@ -0,0 +1,24 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + _LazyModule, +) + + +_import_structure = { + "pipeline_consistency_models": ["ConsistencyModelPipeline"], +} + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + from .pipeline_consistency_models import ConsistencyModelPipeline + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) diff --git a/diffusers3/pipelines/consistency_models/pipeline_consistency_models.py b/diffusers3/pipelines/consistency_models/pipeline_consistency_models.py new file mode 100644 index 0000000000000000000000000000000000000000..d2f67a6989175a5a31419fc7d8bd324f8f2099c2 --- /dev/null +++ b/diffusers3/pipelines/consistency_models/pipeline_consistency_models.py @@ -0,0 +1,275 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Callable, List, Optional, Union + +import torch + +from ...models import UNet2DModel +from ...schedulers import CMStochasticIterativeScheduler +from ...utils import ( + logging, + replace_example_docstring, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + + >>> from diffusers import ConsistencyModelPipeline + + >>> device = "cuda" + >>> # Load the cd_imagenet64_l2 checkpoint. + >>> model_id_or_path = "openai/diffusers-cd_imagenet64_l2" + >>> pipe = ConsistencyModelPipeline.from_pretrained(model_id_or_path, torch_dtype=torch.float16) + >>> pipe.to(device) + + >>> # Onestep Sampling + >>> image = pipe(num_inference_steps=1).images[0] + >>> image.save("cd_imagenet64_l2_onestep_sample.png") + + >>> # Onestep sampling, class-conditional image generation + >>> # ImageNet-64 class label 145 corresponds to king penguins + >>> image = pipe(num_inference_steps=1, class_labels=145).images[0] + >>> image.save("cd_imagenet64_l2_onestep_sample_penguin.png") + + >>> # Multistep sampling, class-conditional image generation + >>> # Timesteps can be explicitly specified; the particular timesteps below are from the original GitHub repo: + >>> # https://github.com/openai/consistency_models/blob/main/scripts/launch.sh#L77 + >>> image = pipe(num_inference_steps=None, timesteps=[22, 0], class_labels=145).images[0] + >>> image.save("cd_imagenet64_l2_multistep_sample_penguin.png") + ``` +""" + + +class ConsistencyModelPipeline(DiffusionPipeline): + r""" + Pipeline for unconditional or class-conditional image generation. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + Args: + unet ([`UNet2DModel`]): + A `UNet2DModel` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Currently only + compatible with [`CMStochasticIterativeScheduler`]. + """ + + model_cpu_offload_seq = "unet" + + def __init__(self, unet: UNet2DModel, scheduler: CMStochasticIterativeScheduler) -> None: + super().__init__() + + self.register_modules( + unet=unet, + scheduler=scheduler, + ) + + self.safety_checker = None + + def prepare_latents(self, batch_size, num_channels, height, width, dtype, device, generator, latents=None): + shape = (batch_size, num_channels, height, width) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device=device, dtype=dtype) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + # Follows diffusers.VaeImageProcessor.postprocess + def postprocess_image(self, sample: torch.Tensor, output_type: str = "pil"): + if output_type not in ["pt", "np", "pil"]: + raise ValueError( + f"output_type={output_type} is not supported. Make sure to choose one of ['pt', 'np', or 'pil']" + ) + + # Equivalent to diffusers.VaeImageProcessor.denormalize + sample = (sample / 2 + 0.5).clamp(0, 1) + if output_type == "pt": + return sample + + # Equivalent to diffusers.VaeImageProcessor.pt_to_numpy + sample = sample.cpu().permute(0, 2, 3, 1).numpy() + if output_type == "np": + return sample + + # Output_type must be 'pil' + sample = self.numpy_to_pil(sample) + return sample + + def prepare_class_labels(self, batch_size, device, class_labels=None): + if self.unet.config.num_class_embeds is not None: + if isinstance(class_labels, list): + class_labels = torch.tensor(class_labels, dtype=torch.int) + elif isinstance(class_labels, int): + assert batch_size == 1, "Batch size must be 1 if classes is an int" + class_labels = torch.tensor([class_labels], dtype=torch.int) + elif class_labels is None: + # Randomly generate batch_size class labels + # TODO: should use generator here? int analogue of randn_tensor is not exposed in ...utils + class_labels = torch.randint(0, self.unet.config.num_class_embeds, size=(batch_size,)) + class_labels = class_labels.to(device) + else: + class_labels = None + return class_labels + + def check_inputs(self, num_inference_steps, timesteps, latents, batch_size, img_size, callback_steps): + if num_inference_steps is None and timesteps is None: + raise ValueError("Exactly one of `num_inference_steps` or `timesteps` must be supplied.") + + if num_inference_steps is not None and timesteps is not None: + logger.warning( + f"Both `num_inference_steps`: {num_inference_steps} and `timesteps`: {timesteps} are supplied;" + " `timesteps` will be used over `num_inference_steps`." + ) + + if latents is not None: + expected_shape = (batch_size, 3, img_size, img_size) + if latents.shape != expected_shape: + raise ValueError(f"The shape of latents is {latents.shape} but is expected to be {expected_shape}.") + + if (callback_steps is None) or ( + callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + batch_size: int = 1, + class_labels: Optional[Union[torch.Tensor, List[int], int]] = None, + num_inference_steps: int = 1, + timesteps: List[int] = None, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, + callback_steps: int = 1, + ): + r""" + Args: + batch_size (`int`, *optional*, defaults to 1): + The number of images to generate. + class_labels (`torch.Tensor` or `List[int]` or `int`, *optional*): + Optional class labels for conditioning class-conditional consistency models. Not used if the model is + not class-conditional. + num_inference_steps (`int`, *optional*, defaults to 1): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process. If not defined, equal spaced `num_inference_steps` + timesteps are used. Must be in descending order. + generator (`torch.Generator`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + + Examples: + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is + returned where the first element is a list with the generated images. + """ + # 0. Prepare call parameters + img_size = self.unet.config.sample_size + device = self._execution_device + + # 1. Check inputs + self.check_inputs(num_inference_steps, timesteps, latents, batch_size, img_size, callback_steps) + + # 2. Prepare image latents + # Sample image latents x_0 ~ N(0, sigma_0^2 * I) + sample = self.prepare_latents( + batch_size=batch_size, + num_channels=self.unet.config.in_channels, + height=img_size, + width=img_size, + dtype=self.unet.dtype, + device=device, + generator=generator, + latents=latents, + ) + + # 3. Handle class_labels for class-conditional models + class_labels = self.prepare_class_labels(batch_size, device, class_labels=class_labels) + + # 4. Prepare timesteps + if timesteps is not None: + self.scheduler.set_timesteps(timesteps=timesteps, device=device) + timesteps = self.scheduler.timesteps + num_inference_steps = len(timesteps) + else: + self.scheduler.set_timesteps(num_inference_steps) + timesteps = self.scheduler.timesteps + + # 5. Denoising loop + # Multistep sampling: implements Algorithm 1 in the paper + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + scaled_sample = self.scheduler.scale_model_input(sample, t) + model_output = self.unet(scaled_sample, t, class_labels=class_labels, return_dict=False)[0] + + sample = self.scheduler.step(model_output, t, sample, generator=generator)[0] + + # call the callback, if provided + progress_bar.update() + if callback is not None and i % callback_steps == 0: + callback(i, t, sample) + + # 6. Post-process image sample + image = self.postprocess_image(sample, output_type=output_type) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return ImagePipelineOutput(images=image) diff --git a/diffusers3/pipelines/controlnet/.ipynb_checkpoints/Untitled-checkpoint.ipynb b/diffusers3/pipelines/controlnet/.ipynb_checkpoints/Untitled-checkpoint.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..566a448147016e40096e443031434bc0859a4dc1 --- /dev/null +++ b/diffusers3/pipelines/controlnet/.ipynb_checkpoints/Untitled-checkpoint.ipynb @@ -0,0 +1,35 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 5, + "id": "fc52916d-3847-46b8-9f70-8e4089f7e7cf", + "metadata": {}, + "outputs": [], + "source": [ + "from diffusers.pipelines.controlnet.pipeline_controlnet_sd_xl import StableDiffusionXLControlNetPipeline\n" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.12" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/diffusers3/pipelines/controlnet/.ipynb_checkpoints/__init__-checkpoint.py b/diffusers3/pipelines/controlnet/.ipynb_checkpoints/__init__-checkpoint.py new file mode 100644 index 0000000000000000000000000000000000000000..8fac3c5db4a8b642e703422fa017610cc71c4de0 --- /dev/null +++ b/diffusers3/pipelines/controlnet/.ipynb_checkpoints/__init__-checkpoint.py @@ -0,0 +1,91 @@ +from typing import TYPE_CHECKING + +# from ...utils import ( +# DIFFUSERS_SLOW_IMPORT, +# OptionalDependencyNotAvailable, +# _LazyModule, +# get_objects_from_module, +# is_flax_available, +# is_torch_available, +# is_transformers_available, +# ) + +from diffusers.utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_flax_available, + is_torch_available, + is_transformers_available, +) + + + +_dummy_objects = {} +_import_structure = {} + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["multicontrolnet"] = ["MultiControlNetModel"] + _import_structure["pipeline_controlnet"] = ["StableDiffusionControlNetPipeline"] + _import_structure["pipeline_controlnet_blip_diffusion"] = ["BlipDiffusionControlNetPipeline"] + _import_structure["pipeline_controlnet_img2img"] = ["StableDiffusionControlNetImg2ImgPipeline"] + _import_structure["pipeline_controlnet_inpaint"] = ["StableDiffusionControlNetInpaintPipeline"] + _import_structure["pipeline_controlnet_inpaint_sd_xl"] = ["StableDiffusionXLControlNetInpaintPipeline"] + _import_structure["pipeline_controlnet_sd_xl"] = ["StableDiffusionXLControlNetPipeline"] + _import_structure["pipeline_controlnet_sd_xl_img2img"] = ["StableDiffusionXLControlNetImg2ImgPipeline"] +try: + if not (is_transformers_available() and is_flax_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_flax_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_flax_and_transformers_objects)) +else: + _import_structure["pipeline_flax_controlnet"] = ["FlaxStableDiffusionControlNetPipeline"] + + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * + else: + from .multicontrolnet import MultiControlNetModel + from .pipeline_controlnet import StableDiffusionControlNetPipeline + from .pipeline_controlnet_blip_diffusion import BlipDiffusionControlNetPipeline + from .pipeline_controlnet_img2img import StableDiffusionControlNetImg2ImgPipeline + from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline + from .pipeline_controlnet_inpaint_sd_xl import StableDiffusionXLControlNetInpaintPipeline + from .pipeline_controlnet_sd_xl import StableDiffusionXLControlNetPipeline + from .pipeline_controlnet_sd_xl_img2img import StableDiffusionXLControlNetImg2ImgPipeline + + try: + if not (is_transformers_available() and is_flax_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ...utils.dummy_flax_and_transformers_objects import * # noqa F403 + else: + from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline + + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/diffusers3/pipelines/controlnet/.ipynb_checkpoints/callbacks-checkpoint.py b/diffusers3/pipelines/controlnet/.ipynb_checkpoints/callbacks-checkpoint.py new file mode 100644 index 0000000000000000000000000000000000000000..38542407e31fa1255eb26b563632c7a9f3d2fded --- /dev/null +++ b/diffusers3/pipelines/controlnet/.ipynb_checkpoints/callbacks-checkpoint.py @@ -0,0 +1,156 @@ +from typing import Any, Dict, List + +from .configuration_utils import ConfigMixin, register_to_config +from .utils import CONFIG_NAME + + +class PipelineCallback(ConfigMixin): + """ + Base class for all the official callbacks used in a pipeline. This class provides a structure for implementing + custom callbacks and ensures that all callbacks have a consistent interface. + + Please implement the following: + `tensor_inputs`: This should return a list of tensor inputs specific to your callback. You will only be able to + include + variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. + `callback_fn`: This method defines the core functionality of your callback. + """ + + config_name = CONFIG_NAME + + @register_to_config + def __init__(self, cutoff_step_ratio=1.0, cutoff_step_index=None): + super().__init__() + + if (cutoff_step_ratio is None and cutoff_step_index is None) or ( + cutoff_step_ratio is not None and cutoff_step_index is not None + ): + raise ValueError("Either cutoff_step_ratio or cutoff_step_index should be provided, not both or none.") + + if cutoff_step_ratio is not None and ( + not isinstance(cutoff_step_ratio, float) or not (0.0 <= cutoff_step_ratio <= 1.0) + ): + raise ValueError("cutoff_step_ratio must be a float between 0.0 and 1.0.") + + @property + def tensor_inputs(self) -> List[str]: + raise NotImplementedError(f"You need to set the attribute `tensor_inputs` for {self.__class__}") + + def callback_fn(self, pipeline, step_index, timesteps, callback_kwargs) -> Dict[str, Any]: + raise NotImplementedError(f"You need to implement the method `callback_fn` for {self.__class__}") + + def __call__(self, pipeline, step_index, timestep, callback_kwargs) -> Dict[str, Any]: + return self.callback_fn(pipeline, step_index, timestep, callback_kwargs) + + +class MultiPipelineCallbacks: + """ + This class is designed to handle multiple pipeline callbacks. It accepts a list of PipelineCallback objects and + provides a unified interface for calling all of them. + """ + + def __init__(self, callbacks: List[PipelineCallback]): + self.callbacks = callbacks + + @property + def tensor_inputs(self) -> List[str]: + return [input for callback in self.callbacks for input in callback.tensor_inputs] + + def __call__(self, pipeline, step_index, timestep, callback_kwargs) -> Dict[str, Any]: + """ + Calls all the callbacks in order with the given arguments and returns the final callback_kwargs. + """ + for callback in self.callbacks: + callback_kwargs = callback(pipeline, step_index, timestep, callback_kwargs) + + return callback_kwargs + + +class SDCFGCutoffCallback(PipelineCallback): + """ + Callback function for Stable Diffusion Pipelines. After certain number of steps (set by `cutoff_step_ratio` or + `cutoff_step_index`), this callback will disable the CFG. + + Note: This callback mutates the pipeline by changing the `_guidance_scale` attribute to 0.0 after the cutoff step. + """ + + tensor_inputs = ["prompt_embeds"] + + def callback_fn(self, pipeline, step_index, timestep, callback_kwargs) -> Dict[str, Any]: + cutoff_step_ratio = self.config.cutoff_step_ratio + cutoff_step_index = self.config.cutoff_step_index + + # Use cutoff_step_index if it's not None, otherwise use cutoff_step_ratio + cutoff_step = ( + cutoff_step_index if cutoff_step_index is not None else int(pipeline.num_timesteps * cutoff_step_ratio) + ) + + if step_index == cutoff_step: + prompt_embeds = callback_kwargs[self.tensor_inputs[0]] + prompt_embeds = prompt_embeds[-1:] # "-1" denotes the embeddings for conditional text tokens. + + pipeline._guidance_scale = 0.0 + + callback_kwargs[self.tensor_inputs[0]] = prompt_embeds + return callback_kwargs + + +class SDXLCFGCutoffCallback(PipelineCallback): + """ + Callback function for Stable Diffusion XL Pipelines. After certain number of steps (set by `cutoff_step_ratio` or + `cutoff_step_index`), this callback will disable the CFG. + + Note: This callback mutates the pipeline by changing the `_guidance_scale` attribute to 0.0 after the cutoff step. + """ + + tensor_inputs = ["prompt_embeds", "add_text_embeds", "add_time_ids"] + + def callback_fn(self, pipeline, step_index, timestep, callback_kwargs) -> Dict[str, Any]: + cutoff_step_ratio = self.config.cutoff_step_ratio + cutoff_step_index = self.config.cutoff_step_index + + # Use cutoff_step_index if it's not None, otherwise use cutoff_step_ratio + cutoff_step = ( + cutoff_step_index if cutoff_step_index is not None else int(pipeline.num_timesteps * cutoff_step_ratio) + ) + + if step_index == cutoff_step: + prompt_embeds = callback_kwargs[self.tensor_inputs[0]] + prompt_embeds = prompt_embeds[-1:] # "-1" denotes the embeddings for conditional text tokens. + + add_text_embeds = callback_kwargs[self.tensor_inputs[1]] + add_text_embeds = add_text_embeds[-1:] # "-1" denotes the embeddings for conditional pooled text tokens + + add_time_ids = callback_kwargs[self.tensor_inputs[2]] + add_time_ids = add_time_ids[-1:] # "-1" denotes the embeddings for conditional added time vector + + pipeline._guidance_scale = 0.0 + + callback_kwargs[self.tensor_inputs[0]] = prompt_embeds + callback_kwargs[self.tensor_inputs[1]] = add_text_embeds + callback_kwargs[self.tensor_inputs[2]] = add_time_ids + return callback_kwargs + + +class IPAdapterScaleCutoffCallback(PipelineCallback): + """ + Callback function for any pipeline that inherits `IPAdapterMixin`. After certain number of steps (set by + `cutoff_step_ratio` or `cutoff_step_index`), this callback will set the IP Adapter scale to `0.0`. + + Note: This callback mutates the IP Adapter attention processors by setting the scale to 0.0 after the cutoff step. + """ + + tensor_inputs = [] + + def callback_fn(self, pipeline, step_index, timestep, callback_kwargs) -> Dict[str, Any]: + cutoff_step_ratio = self.config.cutoff_step_ratio + cutoff_step_index = self.config.cutoff_step_index + + # Use cutoff_step_index if it's not None, otherwise use cutoff_step_ratio + cutoff_step = ( + cutoff_step_index if cutoff_step_index is not None else int(pipeline.num_timesteps * cutoff_step_ratio) + ) + + if step_index == cutoff_step: + pipeline.set_ip_adapter_scale(0.0) + return callback_kwargs diff --git a/diffusers3/pipelines/controlnet/.ipynb_checkpoints/multicontrolnet-checkpoint.py b/diffusers3/pipelines/controlnet/.ipynb_checkpoints/multicontrolnet-checkpoint.py new file mode 100644 index 0000000000000000000000000000000000000000..e3c5ec6eed0379e7d92fd92abcabfce3c466fc45 --- /dev/null +++ b/diffusers3/pipelines/controlnet/.ipynb_checkpoints/multicontrolnet-checkpoint.py @@ -0,0 +1,183 @@ +import os +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import torch +from torch import nn + +from ...models.controlnet import ControlNetModel, ControlNetOutput +from ...models.modeling_utils import ModelMixin +from ...utils import logging + + +logger = logging.get_logger(__name__) + + +class MultiControlNetModel(ModelMixin): + r""" + Multiple `ControlNetModel` wrapper class for Multi-ControlNet + + This module is a wrapper for multiple instances of the `ControlNetModel`. The `forward()` API is designed to be + compatible with `ControlNetModel`. + + Args: + controlnets (`List[ControlNetModel]`): + Provides additional conditioning to the unet during the denoising process. You must set multiple + `ControlNetModel` as a list. + """ + + def __init__(self, controlnets: Union[List[ControlNetModel], Tuple[ControlNetModel]]): + super().__init__() + self.nets = nn.ModuleList(controlnets) + + def forward( + self, + sample: torch.Tensor, + timestep: Union[torch.Tensor, float, int], + encoder_hidden_states: torch.Tensor, + controlnet_cond: List[torch.tensor], + conditioning_scale: List[float], + class_labels: Optional[torch.Tensor] = None, + timestep_cond: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + guess_mode: bool = False, + return_dict: bool = True, + ) -> Union[ControlNetOutput, Tuple]: + for i, (image, scale, controlnet) in enumerate(zip(controlnet_cond, conditioning_scale, self.nets)): + down_samples, mid_sample = controlnet( + sample=sample, + timestep=timestep, + encoder_hidden_states=encoder_hidden_states, + controlnet_cond=image, + conditioning_scale=scale, + class_labels=class_labels, + timestep_cond=timestep_cond, + attention_mask=attention_mask, + added_cond_kwargs=added_cond_kwargs, + cross_attention_kwargs=cross_attention_kwargs, + guess_mode=guess_mode, + return_dict=return_dict, + ) + + # merge samples + if i == 0: + down_block_res_samples, mid_block_res_sample = down_samples, mid_sample + else: + down_block_res_samples = [ + samples_prev + samples_curr + for samples_prev, samples_curr in zip(down_block_res_samples, down_samples) + ] + mid_block_res_sample += mid_sample + + return down_block_res_samples, mid_block_res_sample + + def save_pretrained( + self, + save_directory: Union[str, os.PathLike], + is_main_process: bool = True, + save_function: Callable = None, + safe_serialization: bool = True, + variant: Optional[str] = None, + ): + """ + Save a model and its configuration file to a directory, so that it can be re-loaded using the + `[`~pipelines.controlnet.MultiControlNetModel.from_pretrained`]` class method. + + Arguments: + save_directory (`str` or `os.PathLike`): + Directory to which to save. Will be created if it doesn't exist. + is_main_process (`bool`, *optional*, defaults to `True`): + Whether the process calling this is the main process or not. Useful when in distributed training like + TPUs and need to call this function on all processes. In this case, set `is_main_process=True` only on + the main process to avoid race conditions. + save_function (`Callable`): + The function to use to save the state dictionary. Useful on distributed training like TPUs when one + need to replace `torch.save` by another method. Can be configured with the environment variable + `DIFFUSERS_SAVE_MODE`. + safe_serialization (`bool`, *optional*, defaults to `True`): + Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`). + variant (`str`, *optional*): + If specified, weights are saved in the format pytorch_model..bin. + """ + for idx, controlnet in enumerate(self.nets): + suffix = "" if idx == 0 else f"_{idx}" + controlnet.save_pretrained( + save_directory + suffix, + is_main_process=is_main_process, + save_function=save_function, + safe_serialization=safe_serialization, + variant=variant, + ) + + @classmethod + def from_pretrained(cls, pretrained_model_path: Optional[Union[str, os.PathLike]], **kwargs): + r""" + Instantiate a pretrained MultiControlNet model from multiple pre-trained controlnet models. + + The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated). To train + the model, you should first set it back in training mode with `model.train()`. + + The warning *Weights from XXX not initialized from pretrained model* means that the weights of XXX do not come + pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning + task. + + The warning *Weights from XXX not used in YYY* means that the layer XXX is not used by YYY, therefore those + weights are discarded. + + Parameters: + pretrained_model_path (`os.PathLike`): + A path to a *directory* containing model weights saved using + [`~diffusers.pipelines.controlnet.MultiControlNetModel.save_pretrained`], e.g., + `./my_model_directory/controlnet`. + torch_dtype (`str` or `torch.dtype`, *optional*): + Override the default `torch.dtype` and load the model under this dtype. If `"auto"` is passed the dtype + will be automatically derived from the model's weights. + output_loading_info(`bool`, *optional*, defaults to `False`): + Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages. + device_map (`str` or `Dict[str, Union[int, str, torch.device]]`, *optional*): + A map that specifies where each submodule should go. It doesn't need to be refined to each + parameter/buffer name, once a given module name is inside, every submodule of it will be sent to the + same device. + + To have Accelerate compute the most optimized `device_map` automatically, set `device_map="auto"`. For + more information about each option see [designing a device + map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map). + max_memory (`Dict`, *optional*): + A dictionary device identifier to maximum memory. Will default to the maximum memory available for each + GPU and the available CPU RAM if unset. + low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`): + Speed up model loading by not initializing the weights and only loading the pre-trained weights. This + also tries to not use more than 1x model size in CPU memory (including peak memory) while loading the + model. This is only supported when torch version >= 1.9.0. If you are using an older version of torch, + setting this argument to `True` will raise an error. + variant (`str`, *optional*): + If specified load weights from `variant` filename, *e.g.* pytorch_model..bin. `variant` is + ignored when using `from_flax`. + use_safetensors (`bool`, *optional*, defaults to `None`): + If set to `None`, the `safetensors` weights will be downloaded if they're available **and** if the + `safetensors` library is installed. If set to `True`, the model will be forcibly loaded from + `safetensors` weights. If set to `False`, loading will *not* use `safetensors`. + """ + idx = 0 + controlnets = [] + + # load controlnet and append to list until no controlnet directory exists anymore + # first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained` + # second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ... + model_path_to_load = pretrained_model_path + while os.path.isdir(model_path_to_load): + controlnet = ControlNetModel.from_pretrained(model_path_to_load, **kwargs) + controlnets.append(controlnet) + + idx += 1 + model_path_to_load = pretrained_model_path + f"_{idx}" + + logger.info(f"{len(controlnets)} controlnets loaded from {pretrained_model_path}.") + + if len(controlnets) == 0: + raise ValueError( + f"No ControlNets found under {os.path.dirname(pretrained_model_path)}. Expected at least {pretrained_model_path + '_0'}." + ) + + return cls(controlnets) diff --git a/diffusers3/pipelines/controlnet/.ipynb_checkpoints/pipeline_controlnet_sd_xl-checkpoint.py b/diffusers3/pipelines/controlnet/.ipynb_checkpoints/pipeline_controlnet_sd_xl-checkpoint.py new file mode 100644 index 0000000000000000000000000000000000000000..88be64acdf65764415354c9216d30a66136db547 --- /dev/null +++ b/diffusers3/pipelines/controlnet/.ipynb_checkpoints/pipeline_controlnet_sd_xl-checkpoint.py @@ -0,0 +1,1889 @@ +# -*- coding: utf-8 -*- +# text_encoder_lora_scale# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import inspect +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import numpy as np +import PIL.Image +import torch +import torch.nn.functional as F +from transformers import ( + CLIPImageProcessor, + CLIPTextModel, + CLIPTextModelWithProjection, + CLIPTokenizer, + CLIPVisionModelWithProjection, +) +from torchvision import transforms +from diffusers.utils.import_utils import is_invisible_watermark_available + +from ...callbacks import MultiPipelineCallbacks, PipelineCallback +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import ( + FromSingleFileMixin, + IPAdapterMixin, + StableDiffusionXLLoraLoaderMixin, + TextualInversionLoaderMixin, +) +from ...models import AutoencoderKL, ControlNetModel, ImageProjection, UNet2DConditionModel +from ...models.attention_processor import ( + AttnProcessor2_0, + XFormersAttnProcessor, +) +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import ( + USE_PEFT_BACKEND, + deprecate, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import is_compiled_module, is_torch_version, randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from ..stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput + + +if is_invisible_watermark_available(): + from ..stable_diffusion_xl.watermark import StableDiffusionXLWatermarker + +from .multicontrolnet import MultiControlNetModel + + +# ์ˆ˜์ • +from diffusers import PNDMScheduler + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> # !pip install opencv-python transformers accelerate + >>> from diffusers import StableDiffusionXLControlNetPipeline, ControlNetModel, AutoencoderKL + >>> from diffusers.utils import load_image + >>> import numpy as np + >>> import torch + + >>> import cv2 + >>> from PIL import Image + + >>> prompt = "aerial view, a futuristic research complex in a bright foggy jungle, hard lighting" + >>> negative_prompt = "low quality, bad quality, sketches" + + >>> # download an image + >>> image = load_image( + ... "https://hf.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/hf-logo.png" + ... ) + + >>> # initialize the models and pipeline + >>> controlnet_conditioning_scale = 0.5 # recommended for good generalization + >>> controlnet = ControlNetModel.from_pretrained( + ... "diffusers/controlnet-canny-sdxl-1.0", torch_dtype=torch.float16 + ... ) + >>> vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16) + >>> pipe = StableDiffusionXLControlNetPipeline.from_pretrained( + ... "stabilityai/stable-diffusion-xl-base-1.0", controlnet=controlnet, vae=vae, torch_dtype=torch.float16 + ... ) + >>> pipe.enable_model_cpu_offload() + + >>> # get canny image + >>> image = np.array(image) + >>> image = cv2.Canny(image, 100, 200) + >>> image = image[:, :, None] + >>> image = np.concatenate([image, image, image], axis=2) + >>> canny_image = Image.fromarray(image) + + >>> # generate image + >>> image = pipe( + ... prompt, controlnet_conditioning_scale=controlnet_conditioning_scale, image=canny_image + ... ).images[0] + ``` +""" + +def prepare_mask( + mask: Union[PIL.Image.Image, np.ndarray, torch.Tensor] +) -> torch.Tensor: + """ + Prepares a mask to be consumed by the Stable Diffusion pipeline. This means that this input will be + converted to ``torch.Tensor`` with shapes ``batch x channels x height x width`` where ``channels`` is ``1`` for + the ``mask``. + + The ``mask`` will be binarized (``mask > 0.5``) and cast to ``torch.float32`` too. + + Args: + mask (_type_): The mask to apply to the image, i.e. regions to inpaint. + It can be a ``PIL.Image``, or a ``height x width`` ``np.array`` or a ``1 x height x width`` + ``torch.Tensor`` or a ``batch x 1 x height x width`` ``torch.Tensor``. + + + Raises: + ValueError: ``torch.Tensor`` images should be in the ``[-1, 1]`` range. ValueError: ``torch.Tensor`` mask + should be in the ``[0, 1]`` range. + TypeError: ``mask`` is a ``torch.Tensor`` but ``image`` is not + (ot the other way around). + + Returns: + torch.Tensor: mask as ``torch.Tensor`` with 4 dimensions: ``batch x channels x height x width``. + """ + if isinstance(mask, torch.Tensor): + print("^^^^") + print("mask_1") + if not isinstance(mask, torch.Tensor): + raise TypeError( + f"`image` is a torch.Tensor but `mask` (type: {type(mask)} is not" + ) + + # Batch and add channel dim for single mask + if mask.ndim == 2: + mask = mask.unsqueeze(0).unsqueeze(0) + + # Batch single mask or add channel dim + if mask.ndim == 3: + # Single batched mask, no channel dim or single mask not batched but channel dim + if mask.shape[0] == 1: + mask = mask.unsqueeze(0) + + # Batched masks no channel dim + else: + mask = mask.unsqueeze(1) + + # Check mask is in [0, 1] + if mask.min() < 0 or mask.max() > 1: + raise ValueError("Mask should be in [0, 1] range") + + # Binarize mask + mask[mask < 0.5] = 0 + mask[mask >= 0.5] = 1 + else: + print("^^^^") + print("mask_2") + # preprocess mask + if isinstance(mask, (PIL.Image.Image, np.ndarray)): + mask = [mask] + + if isinstance(mask, list) and isinstance(mask[0], PIL.Image.Image): + mask = np.concatenate( + [np.array(m.convert("L"))[None, None, :] for m in mask], axis=0 + ) + mask = mask.astype(np.float32) / 255.0 + elif isinstance(mask, list) and isinstance(mask[0], np.ndarray): + mask = np.concatenate([m[None, None, :] for m in mask], axis=0) + + mask[mask < 0.5] = 0 + mask[mask >= 0.5] = 1 + mask = torch.from_numpy(mask) + + return mask + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + sigmas: Optional[List[float]] = None, + **kwargs, +): + """ + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles + custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` + must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, + `num_inference_steps` and `sigmas` must be `None`. + sigmas (`List[float]`, *optional*): + Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, + `num_inference_steps` and `timesteps` must be `None`. + + Returns: + `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the + second element is the number of inference steps. + """ + if timesteps is not None and sigmas is not None: + raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") + if timesteps is not None: + accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + elif sigmas is not None: + accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accept_sigmas: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" sigmas schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + + +class StableDiffusionXLControlNetPipeline( + DiffusionPipeline, + StableDiffusionMixin, + TextualInversionLoaderMixin, + StableDiffusionXLLoraLoaderMixin, + IPAdapterMixin, + FromSingleFileMixin, +): + r""" + Pipeline for text-to-image generation using Stable Diffusion XL with ControlNet guidance. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`] for saving LoRA weights + - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files + - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + text_encoder_2 ([`~transformers.CLIPTextModelWithProjection`]): + Second frozen text-encoder + ([laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k)). + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + tokenizer_2 ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + controlnet ([`ControlNetModel`] or `List[ControlNetModel]`): + Provides additional conditioning to the `unet` during the denoising process. If you set multiple + ControlNets as a list, the outputs from each ControlNet are added together to create one combined + additional conditioning. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`): + Whether the negative prompt embeddings should always be set to 0. Also see the config of + `stabilityai/stable-diffusion-xl-base-1-0`. + add_watermarker (`bool`, *optional*): + Whether to use the [invisible_watermark](https://github.com/ShieldMnt/invisible-watermark/) library to + watermark output images. If not defined, it defaults to `True` if the package is installed; otherwise no + watermarker is used. + """ + + # leave controlnet out on purpose because it iterates with unet + model_cpu_offload_seq = "text_encoder->text_encoder_2->image_encoder->unet->vae" + _optional_components = [ + "tokenizer", + "tokenizer_2", + "text_encoder", + "text_encoder_2", + "feature_extractor", + "image_encoder", + ] + _callback_tensor_inputs = [ + "latents", + "prompt_embeds", + "negative_prompt_embeds", + "add_text_embeds", + "add_time_ids", + "negative_pooled_prompt_embeds", + "negative_add_time_ids", + ] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + text_encoder_2: CLIPTextModelWithProjection, + tokenizer: CLIPTokenizer, + tokenizer_2: CLIPTokenizer, + unet: UNet2DConditionModel, + controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel], + scheduler: KarrasDiffusionSchedulers, + force_zeros_for_empty_prompt: bool = True, + add_watermarker: Optional[bool] = None, + feature_extractor: CLIPImageProcessor = None, + image_encoder: CLIPVisionModelWithProjection = None, + ): + super().__init__() + + if isinstance(controlnet, (list, tuple)): + controlnet = MultiControlNetModel(controlnet) + + + + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + text_encoder_2=text_encoder_2, + tokenizer=tokenizer, + tokenizer_2=tokenizer_2, + unet=unet, + controlnet=controlnet, + scheduler=scheduler, + feature_extractor=feature_extractor, + image_encoder=image_encoder, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True) + self.control_image_processor = VaeImageProcessor( + vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False + ) + add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available() + + if add_watermarker: + self.watermark = StableDiffusionXLWatermarker() + else: + self.watermark = None + + self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.encode_prompt + def encode_prompt( + self, + prompt: str, + prompt_2: Optional[str] = None, + device: Optional[torch.device] = None, + num_images_per_prompt: int = 1, + do_classifier_free_guidance: bool = True, + negative_prompt: Optional[str] = None, + negative_prompt_2: Optional[str] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + pooled_prompt_embeds: Optional[torch.Tensor] = None, + negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in both text-encoders + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + lora_scale (`float`, *optional*): + A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + device = device or self._execution_device + + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if self.text_encoder is not None: + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale) + else: + scale_lora_layers(self.text_encoder_2, lora_scale) + + prompt = [prompt] if isinstance(prompt, str) else prompt + + if prompt is not None: + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + # Define tokenizers and text encoders + tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] + text_encoders = ( + [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] + ) + + if prompt_embeds is None: + prompt_2 = prompt_2 or prompt + prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 + + # textual inversion: process multi-vector tokens if necessary + prompt_embeds_list = [] + prompts = [prompt, prompt_2] + for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders): + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, tokenizer) + + text_inputs = tokenizer( + prompt, + padding="max_length", + max_length=tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + + text_input_ids = text_inputs.input_ids + untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {tokenizer.model_max_length} tokens: {removed_text}" + ) + + prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) + + # We are only ALWAYS interested in the pooled output of the final text encoder + pooled_prompt_embeds = prompt_embeds[0] + if clip_skip is None: + prompt_embeds = prompt_embeds.hidden_states[-2] + else: + # "2" because SDXL always indexes from the penultimate layer. + prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)] + + prompt_embeds_list.append(prompt_embeds) + + prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) + + # get unconditional embeddings for classifier free guidance + zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt + if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: + negative_prompt_embeds = torch.zeros_like(prompt_embeds) + negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) + elif do_classifier_free_guidance and negative_prompt_embeds is None: + negative_prompt = negative_prompt or "" + negative_prompt_2 = negative_prompt_2 or negative_prompt + + # normalize str to list + negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt + negative_prompt_2 = ( + batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 + ) + + uncond_tokens: List[str] + if prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = [negative_prompt, negative_prompt_2] + + negative_prompt_embeds_list = [] + for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders): + if isinstance(self, TextualInversionLoaderMixin): + negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = tokenizer( + negative_prompt, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + negative_prompt_embeds = text_encoder( + uncond_input.input_ids.to(device), + output_hidden_states=True, + ) + # We are only ALWAYS interested in the pooled output of the final text encoder + negative_pooled_prompt_embeds = negative_prompt_embeds[0] + negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] + + negative_prompt_embeds_list.append(negative_prompt_embeds) + + negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) + + if self.text_encoder_2 is not None: + prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) + else: + prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + if self.text_encoder_2 is not None: + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) + else: + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed * num_images_per_prompt, -1 + ) + if do_classifier_free_guidance: + negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed * num_images_per_prompt, -1 + ) + + if self.text_encoder is not None: + if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder_2, lora_scale) + + return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image + def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + if output_hidden_states: + image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] + image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_enc_hidden_states = self.image_encoder( + torch.zeros_like(image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( + num_images_per_prompt, dim=0 + ) + return image_enc_hidden_states, uncond_image_enc_hidden_states + else: + image_embeds = self.image_encoder(image).image_embeds + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_embeds = torch.zeros_like(image_embeds) + + return image_embeds, uncond_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds + def prepare_ip_adapter_image_embeds( + self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance + ): + image_embeds = [] + if do_classifier_free_guidance: + negative_image_embeds = [] + if ip_adapter_image_embeds is None: + if not isinstance(ip_adapter_image, list): + ip_adapter_image = [ip_adapter_image] + + if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): + raise ValueError( + f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." + ) + + for single_ip_adapter_image, image_proj_layer in zip( + ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers + ): + output_hidden_state = not isinstance(image_proj_layer, ImageProjection) + single_image_embeds, single_negative_image_embeds = self.encode_image( + single_ip_adapter_image, device, 1, output_hidden_state + ) + + image_embeds.append(single_image_embeds[None, :]) + if do_classifier_free_guidance: + negative_image_embeds.append(single_negative_image_embeds[None, :]) + else: + for single_image_embeds in ip_adapter_image_embeds: + if do_classifier_free_guidance: + single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2) + negative_image_embeds.append(single_negative_image_embeds) + image_embeds.append(single_image_embeds) + + ip_adapter_image_embeds = [] + for i, single_image_embeds in enumerate(image_embeds): + single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) + if do_classifier_free_guidance: + single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) + + single_image_embeds = single_image_embeds.to(device=device) + ip_adapter_image_embeds.append(single_image_embeds) + + return ip_adapter_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + prompt_2, + image, + callback_steps, + negative_prompt=None, + negative_prompt_2=None, + prompt_embeds=None, + negative_prompt_embeds=None, + pooled_prompt_embeds=None, + ip_adapter_image=None, + ip_adapter_image_embeds=None, + negative_pooled_prompt_embeds=None, + controlnet_conditioning_scale=1.0, + control_guidance_start=0.0, + control_guidance_end=1.0, + callback_on_step_end_tensor_inputs=None, + ): + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt_2 is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)): + raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + elif negative_prompt_2 is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if prompt_embeds is not None and pooled_prompt_embeds is None: + raise ValueError( + "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`." + ) + + if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: + raise ValueError( + "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`." + ) + + # `prompt` needs more sophisticated handling when there are multiple + # conditionings. + if isinstance(self.controlnet, MultiControlNetModel): + if isinstance(prompt, list): + logger.warning( + f"You have {len(self.controlnet.nets)} ControlNets and you have passed {len(prompt)}" + " prompts. The conditionings will be fixed across the prompts." + ) + + # Check `image` + is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance( + self.controlnet, torch._dynamo.eval_frame.OptimizedModule + ) + + + if ( + isinstance(self.controlnet, ControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, ControlNetModel) + # and isinstance(ControlNetModel._orig_mod, ControlNetModel) + ): + self.check_image(image, prompt, prompt_embeds) + + elif ( + isinstance(self.controlnet, MultiControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, MultiControlNetModel) + ): + if not isinstance(image, list): + raise TypeError("For multiple controlnets: `image` must be type `list`") + + # When `image` is a nested list: + # (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]]) + elif any(isinstance(i, list) for i in image): + raise ValueError("A single batch of multiple conditionings are supported at the moment.") + elif len(image) != len(self.controlnet.nets): + raise ValueError( + f"For multiple controlnets: `image` must have the same length as the number of controlnets, but got {len(image)} images and {len(self.controlnet.nets)} ControlNets." + ) + + for image_ in image: + self.check_image(image_, prompt, prompt_embeds) + else: + + assert False + + # Check `controlnet_conditioning_scale` + if ( + isinstance(self.controlnet, ControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, ControlNetModel) + ): + if not isinstance(controlnet_conditioning_scale, float): + raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.") + elif ( + isinstance(self.controlnet, MultiControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, MultiControlNetModel) + ): + if isinstance(controlnet_conditioning_scale, list): + if any(isinstance(i, list) for i in controlnet_conditioning_scale): + raise ValueError("A single batch of multiple conditionings are supported at the moment.") + elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len( + self.controlnet.nets + ): + raise ValueError( + "For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have" + " the same length as the number of controlnets" + ) + else: + assert False + + if not isinstance(control_guidance_start, (tuple, list)): + control_guidance_start = [control_guidance_start] + + if not isinstance(control_guidance_end, (tuple, list)): + control_guidance_end = [control_guidance_end] + + if len(control_guidance_start) != len(control_guidance_end): + raise ValueError( + f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list." + ) + + if isinstance(self.controlnet, MultiControlNetModel): + if len(control_guidance_start) != len(self.controlnet.nets): + raise ValueError( + f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}." + ) + + for start, end in zip(control_guidance_start, control_guidance_end): + if start >= end: + raise ValueError( + f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}." + ) + if start < 0.0: + raise ValueError(f"control guidance start: {start} can't be smaller than 0.") + if end > 1.0: + raise ValueError(f"control guidance end: {end} can't be larger than 1.0.") + + if ip_adapter_image is not None and ip_adapter_image_embeds is not None: + raise ValueError( + "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined." + ) + + if ip_adapter_image_embeds is not None: + if not isinstance(ip_adapter_image_embeds, list): + raise ValueError( + f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}" + ) + elif ip_adapter_image_embeds[0].ndim not in [3, 4]: + raise ValueError( + f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D" + ) + + # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.check_image + def check_image(self, image, prompt, prompt_embeds): + image_is_pil = isinstance(image, PIL.Image.Image) + image_is_tensor = isinstance(image, torch.Tensor) + image_is_np = isinstance(image, np.ndarray) + image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image) + image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor) + image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray) + + if ( + not image_is_pil + and not image_is_tensor + and not image_is_np + and not image_is_pil_list + and not image_is_tensor_list + and not image_is_np_list + ): + raise TypeError( + f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}" + ) + + if image_is_pil: + image_batch_size = 1 + else: + image_batch_size = len(image) + + if prompt is not None and isinstance(prompt, str): + prompt_batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + prompt_batch_size = len(prompt) + elif prompt_embeds is not None: + prompt_batch_size = prompt_embeds.shape[0] + + if image_batch_size != 1 and image_batch_size != prompt_batch_size: + raise ValueError( + f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}" + ) + + # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.prepare_image + + # depth map + def prepare_image( + self, + image, + width, + height, + batch_size, + num_images_per_prompt, + device, + dtype, + do_classifier_free_guidance=False, + guess_mode=False, + ): + image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32) + image_batch_size = image.shape[0] + + if image_batch_size == 1: + repeat_by = batch_size + else: + # image batch size is the same as prompt batch size + repeat_by = num_images_per_prompt + + image = image.repeat_interleave(repeat_by, dim=0) + + image = image.to(device=device, dtype=dtype) + + if do_classifier_free_guidance and not guess_mode: + image = torch.cat([image] * 2) + + return image + + + def prepare_latents( + self, + image, + timestep, + batch_size, + num_images_per_prompt, + dtype, + device, + generator=None, + ): + if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): + raise ValueError( + f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" + ) + + image = image.to(device=device, dtype=dtype) + + batch_size = batch_size * num_images_per_prompt + + if image.shape[1] == 4: + init_latents = image + + else: + if isinstance(generator, list) and len(generator) != batch_size: + + + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + elif isinstance(generator, list): + + init_latents = [ + self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) + for i in range(batch_size) + ] + print("***") + print("vae_1") + init_latents = torch.cat(init_latents, dim=0) + else: + print("***") + print("vae_2") + init_latents = self.vae.encode(image).latent_dist.sample(generator) + + init_latents = self.vae.config.scaling_factor * init_latents + + if ( + batch_size > init_latents.shape[0] + and batch_size % init_latents.shape[0] == 0 + ): + # expand init_latents for batch_size + deprecation_message = ( + f"You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial" + " images (`image`). Initial images are now duplicating to match the number of text prompts. Note" + " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update" + " your script to pass as many initial images as text prompts to suppress this warning." + ) + deprecate( + "len(prompt) != len(image)", + "1.0.0", + deprecation_message, + standard_warn=False, + ) + additional_image_per_prompt = batch_size // init_latents.shape[0] + init_latents = torch.cat( + [init_latents] * additional_image_per_prompt, dim=0 + ) + elif ( + batch_size > init_latents.shape[0] + and batch_size % init_latents.shape[0] != 0 + ): + raise ValueError( + f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts." + ) + else: + init_latents = torch.cat([init_latents], dim=0) + + shape = init_latents.shape + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + + # get latents + + latents = self.scheduler.add_noise(init_latents, noise, timestep) + + return init_latents, noise + + + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents_origin(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + + shape = ( + batch_size, + num_channels_latents, + int(height) // self.vae_scale_factor, + int(width) // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + print("latents is None") + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + return latents + + + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline._get_add_time_ids + def _get_add_time_ids( + self, original_size, crops_coords_top_left, target_size, dtype, text_encoder_projection_dim=None + ): + add_time_ids = list(original_size + crops_coords_top_left + target_size) + + passed_add_embed_dim = ( + self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim + ) + expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features + + if expected_add_embed_dim != passed_add_embed_dim: + raise ValueError( + f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`." + ) + + add_time_ids = torch.tensor([add_time_ids], dtype=dtype) + return add_time_ids + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae + def upcast_vae(self): + dtype = self.vae.dtype + self.vae.to(dtype=torch.float32) + use_torch_2_0_or_xformers = isinstance( + self.vae.decoder.mid_block.attentions[0].processor, + ( + AttnProcessor2_0, + XFormersAttnProcessor, + ), + ) + # if xformers or torch_2_0 is used attention block does not need + # to be in float32 which can save lots of memory + if use_torch_2_0_or_xformers: + self.vae.post_quant_conv.to(dtype) + self.vae.decoder.conv_in.to(dtype) + self.vae.decoder.mid_block.to(dtype) + + # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding + def get_guidance_scale_embedding( + self, w: torch.Tensor, embedding_dim: int = 512, dtype: torch.dtype = torch.float32 + ) -> torch.Tensor: + """ + See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 + + Args: + w (`torch.Tensor`): + Generate embedding vectors with a specified guidance scale to subsequently enrich timestep embeddings. + embedding_dim (`int`, *optional*, defaults to 512): + Dimension of the embeddings to generate. + dtype (`torch.dtype`, *optional*, defaults to `torch.float32`): + Data type of the generated embeddings. + + Returns: + `torch.Tensor`: Embedding vectors with shape `(len(w), embedding_dim)`. + """ + assert len(w.shape) == 1 + w = w * 1000.0 + + half_dim = embedding_dim // 2 + emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) + emb = w.to(dtype)[:, None] * emb[None, :] + emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) + if embedding_dim % 2 == 1: # zero pad + emb = torch.nn.functional.pad(emb, (0, 1)) + assert emb.shape == (w.shape[0], embedding_dim) + return emb + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def clip_skip(self): + return self._clip_skip + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None + + @property + def cross_attention_kwargs(self): + return self._cross_attention_kwargs + + @property + def denoising_end(self): + return self._denoising_end + + @property + def num_timesteps(self): + return self._num_timesteps + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + shape_prompt: Union[str, List[str]] = None, + num: List[int] = None, + prompt_2: Optional[Union[str, List[str]]] = None, + image: PipelineImageInput = None, + mask_image: Union[torch.FloatTensor, PIL.Image.Image] = None, + sketch_image: Union[torch.FloatTensor, PIL.Image.Image] = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + timesteps: List[int] = None, + sigmas: List[float] = None, + denoising_end: Optional[float] = None, + guidance_scale: float = 5.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + negative_prompt_2: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + pooled_prompt_embeds: Optional[torch.Tensor] = None, + negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, + shape_prompt_embeds: Optional[torch.Tensor] = None, + shape_negative_prompt_embeds: Optional[torch.Tensor] = None, + shape_pooled_prompt_embeds: Optional[torch.Tensor] = None, + shape_negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + controlnet_conditioning_scale: Union[float, List[float]] = 1.0, + guess_mode: bool = False, + control_guidance_start: Union[float, List[float]] = 0.0, + control_guidance_end: Union[float, List[float]] = 1.0, + original_size: Tuple[int, int] = None, + crops_coords_top_left: Tuple[int, int] = (0, 0), + target_size: Tuple[int, int] = None, + negative_original_size: Optional[Tuple[int, int]] = None, + negative_crops_coords_top_left: Tuple[int, int] = (0, 0), + negative_target_size: Optional[Tuple[int, int]] = None, + clip_skip: Optional[int] = None, + callback_on_step_end: Optional[ + Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks] + ] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + **kwargs, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in both text-encoders. + image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,: + `List[List[torch.Tensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`): + The ControlNet input condition to provide guidance to the `unet` for generation. If the type is + specified as `torch.Tensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also be accepted + as an image. The dimensions of the output image defaults to `image`'s dimensions. If height and/or + width are passed, `image` is resized accordingly. If multiple ControlNets are specified in `init`, + images must be passed as a list such that each element of the list can be correctly batched for input + to a single ControlNet. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. Anything below 512 pixels won't work well for + [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) + and checkpoints that are not specifically fine-tuned on low resolutions. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. Anything below 512 pixels won't work well for + [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) + and checkpoints that are not specifically fine-tuned on low resolutions. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument + in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is + passed will be used. Must be in descending order. + sigmas (`List[float]`, *optional*): + Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in + their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed + will be used. + denoising_end (`float`, *optional*): + When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be + completed before it is intentionally prematurely terminated. As a result, the returned sample will + still retain a substantial amount of noise as determined by the discrete timesteps selected by the + scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a + "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image + Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output) + guidance_scale (`float`, *optional*, defaults to 5.0): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. This is sent to `tokenizer_2` + and `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders. + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, pooled text embeddings are generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs (prompt + weighting). If not provided, pooled `negative_prompt_embeds` are generated from `negative_prompt` input + argument. + ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. + ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*): + Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of + IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should + contain the negative image embedding if `do_classifier_free_guidance` is set to `True`. If not + provided, embeddings are computed from the `ip_adapter_image` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0): + The outputs of the ControlNet are multiplied by `controlnet_conditioning_scale` before they are added + to the residual in the original `unet`. If multiple ControlNets are specified in `init`, you can set + the corresponding scale as a list. + guess_mode (`bool`, *optional*, defaults to `False`): + The ControlNet encoder tries to recognize the content of the input image even if you remove all + prompts. A `guidance_scale` value between 3.0 and 5.0 is recommended. + control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0): + The percentage of total steps at which the ControlNet starts applying. + control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0): + The percentage of total steps at which the ControlNet stops applying. + original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled. + `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as + explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position + `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting + `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + For most cases, `target_size` should be set to the desired height and width of the generated image. If + not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in + section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + To negatively condition the generation process based on a specific image resolution. Part of SDXL's + micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's + micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + To negatively condition the generation process based on a target image resolution. It should be as same + as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*): + A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of + each denoising step during the inference. with the following arguments: `callback_on_step_end(self: + DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a + list of all tensors as specified by `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + + Examples: + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned containing the output images. + """ + + callback = kwargs.pop("callback", None) + callback_steps = kwargs.pop("callback_steps", None) + + if callback is not None: + deprecate( + "callback", + "1.0.0", + "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", + ) + if callback_steps is not None: + deprecate( + "callback_steps", + "1.0.0", + "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", + ) + + if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): + callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs + + controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet + + # align format for control guidance + if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list): + control_guidance_start = len(control_guidance_end) * [control_guidance_start] + elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list): + control_guidance_end = len(control_guidance_start) * [control_guidance_end] + elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list): + mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1 + control_guidance_start, control_guidance_end = ( + mult * [control_guidance_start], + mult * [control_guidance_end], + ) + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + prompt_2, + image, + callback_steps, + negative_prompt, + negative_prompt_2, + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + ip_adapter_image, + ip_adapter_image_embeds, + negative_pooled_prompt_embeds, + controlnet_conditioning_scale, + control_guidance_start, + control_guidance_end, + callback_on_step_end_tensor_inputs, + ) + + + + + self._guidance_scale = guidance_scale + self._clip_skip = clip_skip + self._cross_attention_kwargs = cross_attention_kwargs + self._denoising_end = denoising_end + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float): + controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets) + + global_pool_conditions = ( + controlnet.config.global_pool_conditions + if isinstance(controlnet, ControlNetModel) + else controlnet.nets[0].config.global_pool_conditions + ) + guess_mode = guess_mode or global_pool_conditions + + # 3.1 Encode input prompt + text_encoder_lora_scale = ( + self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None + ) + ( + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + ) = self.encode_prompt( + prompt, + prompt_2, + device, + num_images_per_prompt, + self.do_classifier_free_guidance, + negative_prompt, + negative_prompt_2, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, + lora_scale=text_encoder_lora_scale, + clip_skip=self.clip_skip, + ) + + + prompt_embeds_origin = prompt_embeds + ( + shape_prompt_embeds, + shape_negative_prompt_embeds, + shape_pooled_prompt_embeds, + shape_negative_pooled_prompt_embeds, + ) = self.encode_prompt( + shape_prompt, + prompt_2, + device, + num_images_per_prompt, + self.do_classifier_free_guidance, + negative_prompt, + negative_prompt_2, + prompt_embeds=shape_prompt_embeds, + negative_prompt_embeds=shape_negative_prompt_embeds, + pooled_prompt_embeds=shape_pooled_prompt_embeds, + negative_pooled_prompt_embeds=shape_negative_pooled_prompt_embeds, + lora_scale=text_encoder_lora_scale, + clip_skip=self.clip_skip, + ) + + + # 3.2 Encode ip_adapter_image + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + image_embeds = self.prepare_ip_adapter_image_embeds( + ip_adapter_image, + ip_adapter_image_embeds, + device, + batch_size * num_images_per_prompt, + self.do_classifier_free_guidance, + ) + + # 4. Prepare image + if isinstance(controlnet, ControlNetModel): + image = self.prepare_image( + image=image, + width=width, + height=height, + batch_size=batch_size * num_images_per_prompt, + num_images_per_prompt=num_images_per_prompt, + device=device, + dtype=controlnet.dtype, + do_classifier_free_guidance=self.do_classifier_free_guidance, + guess_mode=guess_mode, + ) + height, width = image.shape[-2:] + + + elif isinstance(controlnet, MultiControlNetModel): + images = [] + + for image_ in image: + image_ = self.prepare_image( + image=image_, + width=width, + height=height, + batch_size=batch_size * num_images_per_prompt, + num_images_per_prompt=num_images_per_prompt, + device=device, + dtype=controlnet.dtype, + do_classifier_free_guidance=self.do_classifier_free_guidance, + guess_mode=guess_mode, + ) + + images.append(image_) + + image = images + height, width = image[0].shape[-2:] + else: + assert False + + + + # 5. Prepare timesteps + timesteps, num_inference_steps = retrieve_timesteps( + self.scheduler, num_inference_steps, device, timesteps, sigmas + ) + self._num_timesteps = len(timesteps) + + latent_timestep = timesteps[:1] # type: ignore + latent_timestep = latent_timestep.to(torch.long) + + + # 6. Prepare latent variables + + mask = prepare_mask(mask=mask_image) + + sketch_image = self.image_processor.preprocess(sketch_image) + + init_latents, noise= self.prepare_latents( + sketch_image, + latent_timestep, + batch_size, + num_images_per_prompt, + prompt_embeds.dtype, + device, + generator, + ) + + num_channels_latents = self.unet.config.in_channels + latents_with_noise = self.prepare_latents_origin( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + + # 6.1. Prepare mask + + height, width = mask.shape[-2:] + mask = torch.nn.functional.interpolate( + mask, size=( + 128, + 128 + ) + ) + mask = torch.nn.functional.interpolate( + mask, size=(height // self.vae_scale_factor, width // self.vae_scale_factor) + ) + + mask = mask.to(device) + mask = mask.to(prompt_embeds.dtype) + + # 6.5 Optionally get Guidance Scale Embedding + timestep_cond = None + if self.unet.config.time_cond_proj_dim is not None: + guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) + timestep_cond = self.get_guidance_scale_embedding( + guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim + ).to(device=device, dtype=latents.dtype) + + # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7.1 Create tensor stating which controlnets to keep + controlnet_keep = [] + for i in range(len(timesteps)): + keeps = [ + 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e) + for s, e in zip(control_guidance_start, control_guidance_end) + ] + controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps) + + # 7.2 Prepare added time ids & embeddings + if isinstance(image, list): + original_size = original_size or image[0].shape[-2:] + else: + original_size = original_size or image.shape[-2:] + target_size = target_size or (height, width) + + add_text_embeds = pooled_prompt_embeds + shape_add_text_embeds = shape_pooled_prompt_embeds + if self.text_encoder_2 is None: + text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) + else: + text_encoder_projection_dim = self.text_encoder_2.config.projection_dim + + add_time_ids = self._get_add_time_ids( + original_size, + crops_coords_top_left, + target_size, + dtype=prompt_embeds.dtype, + text_encoder_projection_dim=text_encoder_projection_dim, + ) + + if negative_original_size is not None and negative_target_size is not None: + negative_add_time_ids = self._get_add_time_ids( + negative_original_size, + negative_crops_coords_top_left, + negative_target_size, + dtype=prompt_embeds.dtype, + text_encoder_projection_dim=text_encoder_projection_dim, + ) + else: + negative_add_time_ids = add_time_ids + + print("self.do_classifier_free_guidance: ", self.do_classifier_free_guidance) + + if self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) + add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) + add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0) + + shape_prompt_embeds = torch.cat([shape_negative_prompt_embeds, shape_prompt_embeds], dim=0) + shape_add_text_embeds = torch.cat([shape_negative_pooled_prompt_embeds, shape_add_text_embeds], dim=0) + shape_add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0) + + prompt_embeds = prompt_embeds.to(device) + prompt_embeds_origin = prompt_embeds + add_text_embeds = add_text_embeds.to(device) + add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1) + + shape_prompt_embeds = shape_prompt_embeds.to(device) + shape_add_text_embeds = shape_add_text_embeds.to(device) + shape_add_time_ids = shape_add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1) + + # 8. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + + # 8.1 Apply denoising_end + if ( + self.denoising_end is not None + and isinstance(self.denoising_end, float) + and self.denoising_end > 0 + and self.denoising_end < 1 + ): + discrete_timestep_cutoff = int( + round( + self.scheduler.config.num_train_timesteps + - (self.denoising_end * self.scheduler.config.num_train_timesteps) + ) + ) + num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps))) + timesteps = timesteps[:num_inference_steps] + + is_unet_compiled = is_compiled_module(self.unet) + is_controlnet_compiled = is_compiled_module(self.controlnet) + is_torch_higher_equal_2_1 = is_torch_version(">=", "2.1") + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + + if i < num: + prompt_embeds = shape_prompt_embeds + else: + prompt_embeds = prompt_embeds_origin + + # Relevant thread: + # https://dev-discuss.pytorch.org/t/cudagraphs-in-pytorch-2-0/1428 + if (is_unet_compiled and is_controlnet_compiled) and is_torch_higher_equal_2_1: + torch._inductor.cudagraph_mark_step_begin() + + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents_with_noise] * 2) if self.do_classifier_free_guidance else latents_with_noise + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} + + # controlnet(s) inference + if guess_mode and self.do_classifier_free_guidance: + # Infer ControlNet only for the conditional batch. + control_model_input = latents + control_model_input = self.scheduler.scale_model_input(control_model_input, t) + controlnet_prompt_embeds = prompt_embeds.chunk(2)[1] + controlnet_added_cond_kwargs = { + "text_embeds": add_text_embeds.chunk(2)[1], + "time_ids": add_time_ids.chunk(2)[1], + } + else: + control_model_input = latent_model_input + controlnet_prompt_embeds = prompt_embeds + controlnet_added_cond_kwargs = added_cond_kwargs + + if isinstance(controlnet_keep[i], list): + cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])] + else: + controlnet_cond_scale = controlnet_conditioning_scale + if isinstance(controlnet_cond_scale, list): + controlnet_cond_scale = controlnet_cond_scale[0] + cond_scale = controlnet_cond_scale * controlnet_keep[i] + + down_block_res_samples, mid_block_res_sample = self.controlnet( + control_model_input, + t, + encoder_hidden_states=controlnet_prompt_embeds, + controlnet_cond=image, + conditioning_scale=cond_scale, + guess_mode=guess_mode, + added_cond_kwargs=controlnet_added_cond_kwargs, + return_dict=False, + ) + + if guess_mode and self.do_classifier_free_guidance: + # Inferred ControlNet only for the conditional batch. + # To apply the output of ControlNet to both the unconditional and conditional batches, + # add 0 to the unconditional batch to keep it unchanged. + down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples] + mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample]) + + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + added_cond_kwargs["image_embeds"] = image_embeds + + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + timestep_cond=timestep_cond, + cross_attention_kwargs=self.cross_attention_kwargs, + down_block_additional_residuals=down_block_res_samples, + mid_block_additional_residual=mid_block_res_sample, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + + latents_with_noise = self.scheduler.step( + noise_pred, + t, + latents_with_noise, + **extra_step_kwargs, + return_dict=False, + )[0] + + + # masking process + tmp = t.unsqueeze(0) + init_latents_proper = self.scheduler.add_noise( + init_latents, noise, tmp + ).to(device) + + mask = (mask > 0.5).to(prompt_embeds.dtype) + latents_with_noise = ( + mask * latents_with_noise + (1 - mask) * init_latents_proper + ) +# latents_with_noise = ( +# mask * latents_with_noise +# ) + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + add_text_embeds = callback_outputs.pop("add_text_embeds", add_text_embeds) + negative_pooled_prompt_embeds = callback_outputs.pop( + "negative_pooled_prompt_embeds", negative_pooled_prompt_embeds + ) + add_time_ids = callback_outputs.pop("add_time_ids", add_time_ids) + negative_add_time_ids = callback_outputs.pop("negative_add_time_ids", negative_add_time_ids) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents_with_noise) + + if not output_type == "latent": + # make sure the VAE is in float32 mode, as it overflows in float16 + needs_upcasting = self.vae.dtype == torch.float32 and self.vae.config.force_upcast + + if needs_upcasting: + self.upcast_vae() + latents_with_noise = latents_with_noise.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) + + # unscale/denormalize the latents + # denormalize with the mean and std if available and not None + has_latents_mean = hasattr(self.vae.config, "latents_mean") and self.vae.config.latents_mean is not None + has_latents_std = hasattr(self.vae.config, "latents_std") and self.vae.config.latents_std is not None + if has_latents_mean and has_latents_std: + latents_mean = ( + torch.tensor(self.vae.config.latents_mean).view(1, 4, 1, 1).to(latents_with_noise.device, latents_with_noise.dtype) + ) + latents_std = ( + torch.tensor(self.vae.config.latents_std).view(1, 4, 1, 1).to(latents_with_noise.device, latents_with_noise.dtype) + ) + latents_with_noise = latents_with_noise * latents_std / self.vae.config.scaling_factor + latents_mean + else: + latents_with_noise = latents_with_noise / self.vae.config.scaling_factor + + image = self.vae.decode(latents_with_noise, return_dict=False)[0] + + # cast back to fp16 if needed + if needs_upcasting: + self.vae.to(dtype=torch.float32) + else: + image = latents_with_noise + + if not output_type == "latent": + # apply watermark if available + if self.watermark is not None: + image = self.watermark.apply_watermark(image) + + image = self.image_processor.postprocess(image, output_type=output_type) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return StableDiffusionXLPipelineOutput(images=image) diff --git a/diffusers3/pipelines/controlnet/.ipynb_checkpoints/pipeline_controlnet_sd_xl3-checkpoint.py b/diffusers3/pipelines/controlnet/.ipynb_checkpoints/pipeline_controlnet_sd_xl3-checkpoint.py new file mode 100644 index 0000000000000000000000000000000000000000..c36a7ff12a2716025c0e21f1782488cb86359837 --- /dev/null +++ b/diffusers3/pipelines/controlnet/.ipynb_checkpoints/pipeline_controlnet_sd_xl3-checkpoint.py @@ -0,0 +1,1831 @@ +# text_encoder_lora_scale# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import inspect +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import numpy as np +import PIL.Image +import torch +import torch.nn.functional as F +from transformers import ( + CLIPImageProcessor, + CLIPTextModel, + CLIPTextModelWithProjection, + CLIPTokenizer, + CLIPVisionModelWithProjection, +) +from torchvision import transforms +from diffusers.utils.import_utils import is_invisible_watermark_available + +from ...callbacks import MultiPipelineCallbacks, PipelineCallback +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import ( + FromSingleFileMixin, + IPAdapterMixin, + StableDiffusionXLLoraLoaderMixin, + TextualInversionLoaderMixin, +) +from ...models import AutoencoderKL, ControlNetModel, ImageProjection, UNet2DConditionModel +from ...models.attention_processor import ( + AttnProcessor2_0, + XFormersAttnProcessor, +) +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import ( + USE_PEFT_BACKEND, + deprecate, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import is_compiled_module, is_torch_version, randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from ..stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput + + +if is_invisible_watermark_available(): + from ..stable_diffusion_xl.watermark import StableDiffusionXLWatermarker + +from .multicontrolnet import MultiControlNetModel + + +# ์ˆ˜์ • +from diffusers import PNDMScheduler + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> # !pip install opencv-python transformers accelerate + >>> from diffusers import StableDiffusionXLControlNetPipeline, ControlNetModel, AutoencoderKL + >>> from diffusers.utils import load_image + >>> import numpy as np + >>> import torch + + >>> import cv2 + >>> from PIL import Image + + >>> prompt = "aerial view, a futuristic research complex in a bright foggy jungle, hard lighting" + >>> negative_prompt = "low quality, bad quality, sketches" + + >>> # download an image + >>> image = load_image( + ... "https://hf.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/hf-logo.png" + ... ) + + >>> # initialize the models and pipeline + >>> controlnet_conditioning_scale = 0.5 # recommended for good generalization + >>> controlnet = ControlNetModel.from_pretrained( + ... "diffusers/controlnet-canny-sdxl-1.0", torch_dtype=torch.float16 + ... ) + >>> vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16) + >>> pipe = StableDiffusionXLControlNetPipeline.from_pretrained( + ... "stabilityai/stable-diffusion-xl-base-1.0", controlnet=controlnet, vae=vae, torch_dtype=torch.float16 + ... ) + >>> pipe.enable_model_cpu_offload() + + >>> # get canny image + >>> image = np.array(image) + >>> image = cv2.Canny(image, 100, 200) + >>> image = image[:, :, None] + >>> image = np.concatenate([image, image, image], axis=2) + >>> canny_image = Image.fromarray(image) + + >>> # generate image + >>> image = pipe( + ... prompt, controlnet_conditioning_scale=controlnet_conditioning_scale, image=canny_image + ... ).images[0] + ``` +""" + +def prepare_mask( + mask: Union[PIL.Image.Image, np.ndarray, torch.Tensor] +) -> torch.Tensor: + """ + Prepares a mask to be consumed by the Stable Diffusion pipeline. This means that this input will be + converted to ``torch.Tensor`` with shapes ``batch x channels x height x width`` where ``channels`` is ``1`` for + the ``mask``. + + The ``mask`` will be binarized (``mask > 0.5``) and cast to ``torch.float32`` too. + + Args: + mask (_type_): The mask to apply to the image, i.e. regions to inpaint. + It can be a ``PIL.Image``, or a ``height x width`` ``np.array`` or a ``1 x height x width`` + ``torch.Tensor`` or a ``batch x 1 x height x width`` ``torch.Tensor``. + + + Raises: + ValueError: ``torch.Tensor`` images should be in the ``[-1, 1]`` range. ValueError: ``torch.Tensor`` mask + should be in the ``[0, 1]`` range. + TypeError: ``mask`` is a ``torch.Tensor`` but ``image`` is not + (ot the other way around). + + Returns: + torch.Tensor: mask as ``torch.Tensor`` with 4 dimensions: ``batch x channels x height x width``. + """ + if isinstance(mask, torch.Tensor): + if not isinstance(mask, torch.Tensor): + raise TypeError( + f"`image` is a torch.Tensor but `mask` (type: {type(mask)} is not" + ) + + # Batch and add channel dim for single mask + if mask.ndim == 2: + mask = mask.unsqueeze(0).unsqueeze(0) + + # Batch single mask or add channel dim + if mask.ndim == 3: + # Single batched mask, no channel dim or single mask not batched but channel dim + if mask.shape[0] == 1: + mask = mask.unsqueeze(0) + + # Batched masks no channel dim + else: + mask = mask.unsqueeze(1) + + # Check mask is in [0, 1] + if mask.min() < 0 or mask.max() > 1: + raise ValueError("Mask should be in [0, 1] range") + + # Binarize mask + mask[mask < 0.5] = 0 + mask[mask >= 0.5] = 1 + else: + # preprocess mask + if isinstance(mask, (PIL.Image.Image, np.ndarray)): + mask = [mask] + + if isinstance(mask, list) and isinstance(mask[0], PIL.Image.Image): + mask = np.concatenate( + [np.array(m.convert("L"))[None, None, :] for m in mask], axis=0 + ) + mask = mask.astype(np.float32) / 255.0 + elif isinstance(mask, list) and isinstance(mask[0], np.ndarray): + mask = np.concatenate([m[None, None, :] for m in mask], axis=0) + + mask[mask < 0.5] = 0 + mask[mask >= 0.5] = 1 + mask = torch.from_numpy(mask) + + return mask + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + sigmas: Optional[List[float]] = None, + **kwargs, +): + """ + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles + custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` + must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, + `num_inference_steps` and `sigmas` must be `None`. + sigmas (`List[float]`, *optional*): + Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, + `num_inference_steps` and `timesteps` must be `None`. + + Returns: + `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the + second element is the number of inference steps. + """ + if timesteps is not None and sigmas is not None: + raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") + if timesteps is not None: + accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + elif sigmas is not None: + accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accept_sigmas: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" sigmas schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + + +class StableDiffusionXLControlNetPipeline( + DiffusionPipeline, + StableDiffusionMixin, + TextualInversionLoaderMixin, + StableDiffusionXLLoraLoaderMixin, + IPAdapterMixin, + FromSingleFileMixin, +): + r""" + Pipeline for text-to-image generation using Stable Diffusion XL with ControlNet guidance. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`] for saving LoRA weights + - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files + - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + text_encoder_2 ([`~transformers.CLIPTextModelWithProjection`]): + Second frozen text-encoder + ([laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k)). + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + tokenizer_2 ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + controlnet ([`ControlNetModel`] or `List[ControlNetModel]`): + Provides additional conditioning to the `unet` during the denoising process. If you set multiple + ControlNets as a list, the outputs from each ControlNet are added together to create one combined + additional conditioning. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`): + Whether the negative prompt embeddings should always be set to 0. Also see the config of + `stabilityai/stable-diffusion-xl-base-1-0`. + add_watermarker (`bool`, *optional*): + Whether to use the [invisible_watermark](https://github.com/ShieldMnt/invisible-watermark/) library to + watermark output images. If not defined, it defaults to `True` if the package is installed; otherwise no + watermarker is used. + """ + + # leave controlnet out on purpose because it iterates with unet + model_cpu_offload_seq = "text_encoder->text_encoder_2->image_encoder->unet->vae" + _optional_components = [ + "tokenizer", + "tokenizer_2", + "text_encoder", + "text_encoder_2", + "feature_extractor", + "image_encoder", + ] + _callback_tensor_inputs = [ + "latents", + "prompt_embeds", + "negative_prompt_embeds", + "add_text_embeds", + "add_time_ids", + "negative_pooled_prompt_embeds", + "negative_add_time_ids", + ] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + text_encoder_2: CLIPTextModelWithProjection, + tokenizer: CLIPTokenizer, + tokenizer_2: CLIPTokenizer, + unet: UNet2DConditionModel, + controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel], + scheduler: KarrasDiffusionSchedulers, + force_zeros_for_empty_prompt: bool = True, + add_watermarker: Optional[bool] = None, + feature_extractor: CLIPImageProcessor = None, + image_encoder: CLIPVisionModelWithProjection = None, + ): + super().__init__() + + if isinstance(controlnet, (list, tuple)): + controlnet = MultiControlNetModel(controlnet) + + + + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + text_encoder_2=text_encoder_2, + tokenizer=tokenizer, + tokenizer_2=tokenizer_2, + unet=unet, + controlnet=controlnet, + scheduler=scheduler, + feature_extractor=feature_extractor, + image_encoder=image_encoder, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True) + self.control_image_processor = VaeImageProcessor( + vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False + ) + add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available() + + if add_watermarker: + self.watermark = StableDiffusionXLWatermarker() + else: + self.watermark = None + + self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.encode_prompt + def encode_prompt( + self, + prompt: str, + prompt_2: Optional[str] = None, + device: Optional[torch.device] = None, + num_images_per_prompt: int = 1, + do_classifier_free_guidance: bool = True, + negative_prompt: Optional[str] = None, + negative_prompt_2: Optional[str] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + pooled_prompt_embeds: Optional[torch.Tensor] = None, + negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in both text-encoders + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + lora_scale (`float`, *optional*): + A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + device = device or self._execution_device + + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if self.text_encoder is not None: + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale) + else: + scale_lora_layers(self.text_encoder_2, lora_scale) + + prompt = [prompt] if isinstance(prompt, str) else prompt + + if prompt is not None: + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + # Define tokenizers and text encoders + tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] + text_encoders = ( + [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] + ) + + if prompt_embeds is None: + prompt_2 = prompt_2 or prompt + prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 + + # textual inversion: process multi-vector tokens if necessary + prompt_embeds_list = [] + prompts = [prompt, prompt_2] + for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders): + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, tokenizer) + + text_inputs = tokenizer( + prompt, + padding="max_length", + max_length=tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + + text_input_ids = text_inputs.input_ids + untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {tokenizer.model_max_length} tokens: {removed_text}" + ) + + prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) + + # We are only ALWAYS interested in the pooled output of the final text encoder + pooled_prompt_embeds = prompt_embeds[0] + if clip_skip is None: + prompt_embeds = prompt_embeds.hidden_states[-2] + else: + # "2" because SDXL always indexes from the penultimate layer. + prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)] + + prompt_embeds_list.append(prompt_embeds) + + prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) + + # get unconditional embeddings for classifier free guidance + zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt + if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: + negative_prompt_embeds = torch.zeros_like(prompt_embeds) + negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) + elif do_classifier_free_guidance and negative_prompt_embeds is None: + negative_prompt = negative_prompt or "" + negative_prompt_2 = negative_prompt_2 or negative_prompt + + # normalize str to list + negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt + negative_prompt_2 = ( + batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 + ) + + uncond_tokens: List[str] + if prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = [negative_prompt, negative_prompt_2] + + negative_prompt_embeds_list = [] + for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders): + if isinstance(self, TextualInversionLoaderMixin): + negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = tokenizer( + negative_prompt, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + negative_prompt_embeds = text_encoder( + uncond_input.input_ids.to(device), + output_hidden_states=True, + ) + # We are only ALWAYS interested in the pooled output of the final text encoder + negative_pooled_prompt_embeds = negative_prompt_embeds[0] + negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] + + negative_prompt_embeds_list.append(negative_prompt_embeds) + + negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) + + if self.text_encoder_2 is not None: + prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) + else: + prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + if self.text_encoder_2 is not None: + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) + else: + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed * num_images_per_prompt, -1 + ) + if do_classifier_free_guidance: + negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed * num_images_per_prompt, -1 + ) + + if self.text_encoder is not None: + if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder_2, lora_scale) + + return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image + def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + if output_hidden_states: + image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] + image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_enc_hidden_states = self.image_encoder( + torch.zeros_like(image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( + num_images_per_prompt, dim=0 + ) + return image_enc_hidden_states, uncond_image_enc_hidden_states + else: + image_embeds = self.image_encoder(image).image_embeds + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_embeds = torch.zeros_like(image_embeds) + + return image_embeds, uncond_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds + def prepare_ip_adapter_image_embeds( + self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance + ): + image_embeds = [] + if do_classifier_free_guidance: + negative_image_embeds = [] + if ip_adapter_image_embeds is None: + if not isinstance(ip_adapter_image, list): + ip_adapter_image = [ip_adapter_image] + + if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): + raise ValueError( + f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." + ) + + for single_ip_adapter_image, image_proj_layer in zip( + ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers + ): + output_hidden_state = not isinstance(image_proj_layer, ImageProjection) + single_image_embeds, single_negative_image_embeds = self.encode_image( + single_ip_adapter_image, device, 1, output_hidden_state + ) + + image_embeds.append(single_image_embeds[None, :]) + if do_classifier_free_guidance: + negative_image_embeds.append(single_negative_image_embeds[None, :]) + else: + for single_image_embeds in ip_adapter_image_embeds: + if do_classifier_free_guidance: + single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2) + negative_image_embeds.append(single_negative_image_embeds) + image_embeds.append(single_image_embeds) + + ip_adapter_image_embeds = [] + for i, single_image_embeds in enumerate(image_embeds): + single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) + if do_classifier_free_guidance: + single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) + + single_image_embeds = single_image_embeds.to(device=device) + ip_adapter_image_embeds.append(single_image_embeds) + + return ip_adapter_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + prompt_2, + image, + callback_steps, + negative_prompt=None, + negative_prompt_2=None, + prompt_embeds=None, + negative_prompt_embeds=None, + pooled_prompt_embeds=None, + ip_adapter_image=None, + ip_adapter_image_embeds=None, + negative_pooled_prompt_embeds=None, + controlnet_conditioning_scale=1.0, + control_guidance_start=0.0, + control_guidance_end=1.0, + callback_on_step_end_tensor_inputs=None, + ): + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt_2 is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)): + raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + elif negative_prompt_2 is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if prompt_embeds is not None and pooled_prompt_embeds is None: + raise ValueError( + "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`." + ) + + if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: + raise ValueError( + "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`." + ) + + # `prompt` needs more sophisticated handling when there are multiple + # conditionings. + if isinstance(self.controlnet, MultiControlNetModel): + if isinstance(prompt, list): + logger.warning( + f"You have {len(self.controlnet.nets)} ControlNets and you have passed {len(prompt)}" + " prompts. The conditionings will be fixed across the prompts." + ) + + # Check `image` + is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance( + self.controlnet, torch._dynamo.eval_frame.OptimizedModule + ) + + + if ( + isinstance(self.controlnet, ControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, ControlNetModel) + # and isinstance(ControlNetModel._orig_mod, ControlNetModel) + ): + self.check_image(image, prompt, prompt_embeds) + + elif ( + isinstance(self.controlnet, MultiControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, MultiControlNetModel) + ): + if not isinstance(image, list): + raise TypeError("For multiple controlnets: `image` must be type `list`") + + # When `image` is a nested list: + # (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]]) + elif any(isinstance(i, list) for i in image): + raise ValueError("A single batch of multiple conditionings are supported at the moment.") + elif len(image) != len(self.controlnet.nets): + raise ValueError( + f"For multiple controlnets: `image` must have the same length as the number of controlnets, but got {len(image)} images and {len(self.controlnet.nets)} ControlNets." + ) + + for image_ in image: + self.check_image(image_, prompt, prompt_embeds) + else: + + assert False + + # Check `controlnet_conditioning_scale` + if ( + isinstance(self.controlnet, ControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, ControlNetModel) + ): + if not isinstance(controlnet_conditioning_scale, float): + raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.") + elif ( + isinstance(self.controlnet, MultiControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, MultiControlNetModel) + ): + if isinstance(controlnet_conditioning_scale, list): + if any(isinstance(i, list) for i in controlnet_conditioning_scale): + raise ValueError("A single batch of multiple conditionings are supported at the moment.") + elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len( + self.controlnet.nets + ): + raise ValueError( + "For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have" + " the same length as the number of controlnets" + ) + else: + assert False + + if not isinstance(control_guidance_start, (tuple, list)): + control_guidance_start = [control_guidance_start] + + if not isinstance(control_guidance_end, (tuple, list)): + control_guidance_end = [control_guidance_end] + + if len(control_guidance_start) != len(control_guidance_end): + raise ValueError( + f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list." + ) + + if isinstance(self.controlnet, MultiControlNetModel): + if len(control_guidance_start) != len(self.controlnet.nets): + raise ValueError( + f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}." + ) + + for start, end in zip(control_guidance_start, control_guidance_end): + if start >= end: + raise ValueError( + f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}." + ) + if start < 0.0: + raise ValueError(f"control guidance start: {start} can't be smaller than 0.") + if end > 1.0: + raise ValueError(f"control guidance end: {end} can't be larger than 1.0.") + + if ip_adapter_image is not None and ip_adapter_image_embeds is not None: + raise ValueError( + "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined." + ) + + if ip_adapter_image_embeds is not None: + if not isinstance(ip_adapter_image_embeds, list): + raise ValueError( + f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}" + ) + elif ip_adapter_image_embeds[0].ndim not in [3, 4]: + raise ValueError( + f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D" + ) + + # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.check_image + def check_image(self, image, prompt, prompt_embeds): + image_is_pil = isinstance(image, PIL.Image.Image) + image_is_tensor = isinstance(image, torch.Tensor) + image_is_np = isinstance(image, np.ndarray) + image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image) + image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor) + image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray) + + if ( + not image_is_pil + and not image_is_tensor + and not image_is_np + and not image_is_pil_list + and not image_is_tensor_list + and not image_is_np_list + ): + raise TypeError( + f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}" + ) + + if image_is_pil: + image_batch_size = 1 + else: + image_batch_size = len(image) + + if prompt is not None and isinstance(prompt, str): + prompt_batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + prompt_batch_size = len(prompt) + elif prompt_embeds is not None: + prompt_batch_size = prompt_embeds.shape[0] + + if image_batch_size != 1 and image_batch_size != prompt_batch_size: + raise ValueError( + f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}" + ) + + # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.prepare_image + + # depth map + def prepare_image( + self, + image, + width, + height, + batch_size, + num_images_per_prompt, + device, + dtype, + do_classifier_free_guidance=False, + guess_mode=False, + ): + image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32) + image_batch_size = image.shape[0] + + if image_batch_size == 1: + repeat_by = batch_size + else: + # image batch size is the same as prompt batch size + repeat_by = num_images_per_prompt + + image = image.repeat_interleave(repeat_by, dim=0) + + image = image.to(device=device, dtype=dtype) + + if do_classifier_free_guidance and not guess_mode: + image = torch.cat([image] * 2) + + return image + + + def prepare_latents( + self, + image, + timestep, + batch_size, + num_images_per_prompt, + dtype, + device, + generator=None, + ): + if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): + raise ValueError( + f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" + ) + + image = image.to(device=device, dtype=dtype) + + batch_size = batch_size * num_images_per_prompt + + if image.shape[1] == 4: + init_latents = image + + else: + if isinstance(generator, list) and len(generator) != batch_size: + + + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + elif isinstance(generator, list): + + init_latents = [ + self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) + for i in range(batch_size) + ] + init_latents = torch.cat(init_latents, dim=0) + else: + + init_latents = self.vae.encode(image).latent_dist.sample(generator) + + init_latents = self.vae.config.scaling_factor * init_latents + + if ( + batch_size > init_latents.shape[0] + and batch_size % init_latents.shape[0] == 0 + ): + # expand init_latents for batch_size + deprecation_message = ( + f"You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial" + " images (`image`). Initial images are now duplicating to match the number of text prompts. Note" + " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update" + " your script to pass as many initial images as text prompts to suppress this warning." + ) + deprecate( + "len(prompt) != len(image)", + "1.0.0", + deprecation_message, + standard_warn=False, + ) + additional_image_per_prompt = batch_size // init_latents.shape[0] + init_latents = torch.cat( + [init_latents] * additional_image_per_prompt, dim=0 + ) + elif ( + batch_size > init_latents.shape[0] + and batch_size % init_latents.shape[0] != 0 + ): + raise ValueError( + f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts." + ) + else: + init_latents = torch.cat([init_latents], dim=0) + + shape = init_latents.shape + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + + # get latents + + latents = self.scheduler.add_noise(init_latents, noise, timestep) + + return init_latents, noise + + + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents_origin(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + + shape = ( + batch_size, + num_channels_latents, + int(height) // self.vae_scale_factor, + int(width) // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + return latents, noise + + + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline._get_add_time_ids + def _get_add_time_ids( + self, original_size, crops_coords_top_left, target_size, dtype, text_encoder_projection_dim=None + ): + add_time_ids = list(original_size + crops_coords_top_left + target_size) + + passed_add_embed_dim = ( + self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim + ) + expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features + + if expected_add_embed_dim != passed_add_embed_dim: + raise ValueError( + f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`." + ) + + add_time_ids = torch.tensor([add_time_ids], dtype=dtype) + return add_time_ids + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae + def upcast_vae(self): + dtype = self.vae.dtype + self.vae.to(dtype=torch.float32) + use_torch_2_0_or_xformers = isinstance( + self.vae.decoder.mid_block.attentions[0].processor, + ( + AttnProcessor2_0, + XFormersAttnProcessor, + ), + ) + # if xformers or torch_2_0 is used attention block does not need + # to be in float32 which can save lots of memory + if use_torch_2_0_or_xformers: + self.vae.post_quant_conv.to(dtype) + self.vae.decoder.conv_in.to(dtype) + self.vae.decoder.mid_block.to(dtype) + + # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding + def get_guidance_scale_embedding( + self, w: torch.Tensor, embedding_dim: int = 512, dtype: torch.dtype = torch.float32 + ) -> torch.Tensor: + """ + See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 + + Args: + w (`torch.Tensor`): + Generate embedding vectors with a specified guidance scale to subsequently enrich timestep embeddings. + embedding_dim (`int`, *optional*, defaults to 512): + Dimension of the embeddings to generate. + dtype (`torch.dtype`, *optional*, defaults to `torch.float32`): + Data type of the generated embeddings. + + Returns: + `torch.Tensor`: Embedding vectors with shape `(len(w), embedding_dim)`. + """ + assert len(w.shape) == 1 + w = w * 1000.0 + + half_dim = embedding_dim // 2 + emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) + emb = w.to(dtype)[:, None] * emb[None, :] + emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) + if embedding_dim % 2 == 1: # zero pad + emb = torch.nn.functional.pad(emb, (0, 1)) + assert emb.shape == (w.shape[0], embedding_dim) + return emb + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def clip_skip(self): + return self._clip_skip + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None + + @property + def cross_attention_kwargs(self): + return self._cross_attention_kwargs + + @property + def denoising_end(self): + return self._denoising_end + + @property + def num_timesteps(self): + return self._num_timesteps + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + prompt_2: Optional[Union[str, List[str]]] = None, + image: PipelineImageInput = None, + mask_image: Union[torch.FloatTensor, PIL.Image.Image] = None, + sketch_image: Union[torch.FloatTensor, PIL.Image.Image] = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + timesteps: List[int] = None, + sigmas: List[float] = None, + denoising_end: Optional[float] = None, + guidance_scale: float = 5.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + negative_prompt_2: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + pooled_prompt_embeds: Optional[torch.Tensor] = None, + negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + controlnet_conditioning_scale: Union[float, List[float]] = 1.0, + guess_mode: bool = False, + control_guidance_start: Union[float, List[float]] = 0.0, + control_guidance_end: Union[float, List[float]] = 1.0, + original_size: Tuple[int, int] = None, + crops_coords_top_left: Tuple[int, int] = (0, 0), + target_size: Tuple[int, int] = None, + negative_original_size: Optional[Tuple[int, int]] = None, + negative_crops_coords_top_left: Tuple[int, int] = (0, 0), + negative_target_size: Optional[Tuple[int, int]] = None, + clip_skip: Optional[int] = None, + callback_on_step_end: Optional[ + Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks] + ] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + **kwargs, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in both text-encoders. + image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,: + `List[List[torch.Tensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`): + The ControlNet input condition to provide guidance to the `unet` for generation. If the type is + specified as `torch.Tensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also be accepted + as an image. The dimensions of the output image defaults to `image`'s dimensions. If height and/or + width are passed, `image` is resized accordingly. If multiple ControlNets are specified in `init`, + images must be passed as a list such that each element of the list can be correctly batched for input + to a single ControlNet. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. Anything below 512 pixels won't work well for + [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) + and checkpoints that are not specifically fine-tuned on low resolutions. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. Anything below 512 pixels won't work well for + [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) + and checkpoints that are not specifically fine-tuned on low resolutions. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument + in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is + passed will be used. Must be in descending order. + sigmas (`List[float]`, *optional*): + Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in + their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed + will be used. + denoising_end (`float`, *optional*): + When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be + completed before it is intentionally prematurely terminated. As a result, the returned sample will + still retain a substantial amount of noise as determined by the discrete timesteps selected by the + scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a + "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image + Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output) + guidance_scale (`float`, *optional*, defaults to 5.0): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. This is sent to `tokenizer_2` + and `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders. + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, pooled text embeddings are generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs (prompt + weighting). If not provided, pooled `negative_prompt_embeds` are generated from `negative_prompt` input + argument. + ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. + ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*): + Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of + IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should + contain the negative image embedding if `do_classifier_free_guidance` is set to `True`. If not + provided, embeddings are computed from the `ip_adapter_image` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0): + The outputs of the ControlNet are multiplied by `controlnet_conditioning_scale` before they are added + to the residual in the original `unet`. If multiple ControlNets are specified in `init`, you can set + the corresponding scale as a list. + guess_mode (`bool`, *optional*, defaults to `False`): + The ControlNet encoder tries to recognize the content of the input image even if you remove all + prompts. A `guidance_scale` value between 3.0 and 5.0 is recommended. + control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0): + The percentage of total steps at which the ControlNet starts applying. + control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0): + The percentage of total steps at which the ControlNet stops applying. + original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled. + `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as + explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position + `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting + `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + For most cases, `target_size` should be set to the desired height and width of the generated image. If + not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in + section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + To negatively condition the generation process based on a specific image resolution. Part of SDXL's + micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's + micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + To negatively condition the generation process based on a target image resolution. It should be as same + as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*): + A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of + each denoising step during the inference. with the following arguments: `callback_on_step_end(self: + DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a + list of all tensors as specified by `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + + Examples: + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned containing the output images. + """ + + callback = kwargs.pop("callback", None) + callback_steps = kwargs.pop("callback_steps", None) + + if callback is not None: + deprecate( + "callback", + "1.0.0", + "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", + ) + if callback_steps is not None: + deprecate( + "callback_steps", + "1.0.0", + "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", + ) + + if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): + callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs + + controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet + + # align format for control guidance + if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list): + control_guidance_start = len(control_guidance_end) * [control_guidance_start] + elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list): + control_guidance_end = len(control_guidance_start) * [control_guidance_end] + elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list): + mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1 + control_guidance_start, control_guidance_end = ( + mult * [control_guidance_start], + mult * [control_guidance_end], + ) + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + prompt_2, + image, + callback_steps, + negative_prompt, + negative_prompt_2, + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + ip_adapter_image, + ip_adapter_image_embeds, + negative_pooled_prompt_embeds, + controlnet_conditioning_scale, + control_guidance_start, + control_guidance_end, + callback_on_step_end_tensor_inputs, + ) + + + + + self._guidance_scale = guidance_scale + self._clip_skip = clip_skip + self._cross_attention_kwargs = cross_attention_kwargs + self._denoising_end = denoising_end + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float): + controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets) + + global_pool_conditions = ( + controlnet.config.global_pool_conditions + if isinstance(controlnet, ControlNetModel) + else controlnet.nets[0].config.global_pool_conditions + ) + guess_mode = guess_mode or global_pool_conditions + + # 3.1 Encode input prompt + text_encoder_lora_scale = ( + self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None + ) + ( + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + ) = self.encode_prompt( + prompt, + prompt_2, + device, + num_images_per_prompt, + self.do_classifier_free_guidance, + negative_prompt, + negative_prompt_2, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, + lora_scale=text_encoder_lora_scale, + clip_skip=self.clip_skip, + ) + + # 3.2 Encode ip_adapter_image + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + image_embeds = self.prepare_ip_adapter_image_embeds( + ip_adapter_image, + ip_adapter_image_embeds, + device, + batch_size * num_images_per_prompt, + self.do_classifier_free_guidance, + ) + + # 4. Prepare image + if isinstance(controlnet, ControlNetModel): + image = self.prepare_image( + image=image, + width=width, + height=height, + batch_size=batch_size * num_images_per_prompt, + num_images_per_prompt=num_images_per_prompt, + device=device, + dtype=controlnet.dtype, + do_classifier_free_guidance=self.do_classifier_free_guidance, + guess_mode=guess_mode, + ) + height, width = image.shape[-2:] + + + elif isinstance(controlnet, MultiControlNetModel): + images = [] + + for image_ in image: + image_ = self.prepare_image( + image=image_, + width=width, + height=height, + batch_size=batch_size * num_images_per_prompt, + num_images_per_prompt=num_images_per_prompt, + device=device, + dtype=controlnet.dtype, + do_classifier_free_guidance=self.do_classifier_free_guidance, + guess_mode=guess_mode, + ) + + images.append(image_) + + image = images + height, width = image[0].shape[-2:] + else: + assert False + + mask = prepare_mask(mask=mask_image) + + # 5. Prepare timesteps + timesteps, num_inference_steps = retrieve_timesteps( + self.scheduler, num_inference_steps, device, timesteps, sigmas + ) + self._num_timesteps = len(timesteps) + + latent_timestep = timesteps[:1] # type: ignore + latent_timestep = latent_timestep.to(torch.long) + + + # 6. Prepare latent variables + +# sketch_image = self.image_processor.preprocess(sketch_image) + +# init_latents, noise= self.prepare_latents( +# sketch_image, +# latent_timestep, +# batch_size, +# num_images_per_prompt, +# prompt_embeds.dtype, +# device, +# generator, +# ) + + num_channels_latents = self.unet.config.in_channels + latents_with_noise, noise = self.prepare_latents_origin( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + + # 6.1. Prepare mask + + height, width = mask.shape[-2:] + mask = torch.nn.functional.interpolate( + mask, size=( + 128, + 128 + ) + ) + mask = torch.nn.functional.interpolate( + mask, size=(height // self.vae_scale_factor, width // self.vae_scale_factor) + ) + + mask = mask.to(device) + mask = mask.to(prompt_embeds.dtype) + + # 6.5 Optionally get Guidance Scale Embedding + timestep_cond = None + if self.unet.config.time_cond_proj_dim is not None: + guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) + timestep_cond = self.get_guidance_scale_embedding( + guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim + ).to(device=device, dtype=latents.dtype) + + # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7.1 Create tensor stating which controlnets to keep + controlnet_keep = [] + for i in range(len(timesteps)): + keeps = [ + 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e) + for s, e in zip(control_guidance_start, control_guidance_end) + ] + controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps) + + # 7.2 Prepare added time ids & embeddings + if isinstance(image, list): + original_size = original_size or image[0].shape[-2:] + else: + original_size = original_size or image.shape[-2:] + target_size = target_size or (height, width) + + add_text_embeds = pooled_prompt_embeds + if self.text_encoder_2 is None: + text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) + else: + text_encoder_projection_dim = self.text_encoder_2.config.projection_dim + + add_time_ids = self._get_add_time_ids( + original_size, + crops_coords_top_left, + target_size, + dtype=prompt_embeds.dtype, + text_encoder_projection_dim=text_encoder_projection_dim, + ) + + if negative_original_size is not None and negative_target_size is not None: + negative_add_time_ids = self._get_add_time_ids( + negative_original_size, + negative_crops_coords_top_left, + negative_target_size, + dtype=prompt_embeds.dtype, + text_encoder_projection_dim=text_encoder_projection_dim, + ) + else: + negative_add_time_ids = add_time_ids + + if self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) + add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) + add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0) + + prompt_embeds = prompt_embeds.to(device) + add_text_embeds = add_text_embeds.to(device) + add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1) + + # 8. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + + # 8.1 Apply denoising_end + if ( + self.denoising_end is not None + and isinstance(self.denoising_end, float) + and self.denoising_end > 0 + and self.denoising_end < 1 + ): + discrete_timestep_cutoff = int( + round( + self.scheduler.config.num_train_timesteps + - (self.denoising_end * self.scheduler.config.num_train_timesteps) + ) + ) + num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps))) + timesteps = timesteps[:num_inference_steps] + + is_unet_compiled = is_compiled_module(self.unet) + is_controlnet_compiled = is_compiled_module(self.controlnet) + is_torch_higher_equal_2_1 = is_torch_version(">=", "2.1") + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # Relevant thread: + # https://dev-discuss.pytorch.org/t/cudagraphs-in-pytorch-2-0/1428 + if (is_unet_compiled and is_controlnet_compiled) and is_torch_higher_equal_2_1: + torch._inductor.cudagraph_mark_step_begin() + + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents_with_noise] * 2) if self.do_classifier_free_guidance else latents_with_noise + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} + + # controlnet(s) inference + if guess_mode and self.do_classifier_free_guidance: + # Infer ControlNet only for the conditional batch. + print("์—ฌ๊ธฐ๋„ ๋ฐ”๊ฟ”์•ผ ํ•˜๋‚˜ ๋ด„") + control_model_input = latents + control_model_input = self.scheduler.scale_model_input(control_model_input, t) + controlnet_prompt_embeds = prompt_embeds.chunk(2)[1] + controlnet_added_cond_kwargs = { + "text_embeds": add_text_embeds.chunk(2)[1], + "time_ids": add_time_ids.chunk(2)[1], + } + else: + control_model_input = latent_model_input + controlnet_prompt_embeds = prompt_embeds + controlnet_added_cond_kwargs = added_cond_kwargs + + if isinstance(controlnet_keep[i], list): + cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])] + else: + controlnet_cond_scale = controlnet_conditioning_scale + if isinstance(controlnet_cond_scale, list): + controlnet_cond_scale = controlnet_cond_scale[0] + cond_scale = controlnet_cond_scale * controlnet_keep[i] + + down_block_res_samples, mid_block_res_sample = self.controlnet( + control_model_input, + t, + encoder_hidden_states=controlnet_prompt_embeds, + controlnet_cond=image, + conditioning_scale=cond_scale, + guess_mode=guess_mode, + added_cond_kwargs=controlnet_added_cond_kwargs, + return_dict=False, + ) + + if guess_mode and self.do_classifier_free_guidance: + # Inferred ControlNet only for the conditional batch. + # To apply the output of ControlNet to both the unconditional and conditional batches, + # add 0 to the unconditional batch to keep it unchanged. + down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples] + mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample]) + + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + added_cond_kwargs["image_embeds"] = image_embeds + + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + timestep_cond=timestep_cond, + cross_attention_kwargs=self.cross_attention_kwargs, + down_block_additional_residuals=down_block_res_samples, + mid_block_additional_residual=mid_block_res_sample, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + + latents_with_noise = self.scheduler.step( + noise_pred, + t, + latents_with_noise, + **extra_step_kwargs, + return_dict=False, + )[0] + + + # masking process +# tmp = t.unsqueeze(0) +# init_latents_proper = self.scheduler.add_noise( +# init_latents, noise, tmp +# ).to(device) + +# mask = (mask > 0.5).to(prompt_embeds.dtype) +# latents_with_noise = ( +# mask * latents_with_noise + (1 - mask) * init_latents_proper +# ) + + + if callback_on_step_end is not None: + print("์—ฌ๊ธฐ๋„ ๋ฐ”๊ฟ”์•ผ ํ•˜๋‚˜ ๋ด„ 2") + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + add_text_embeds = callback_outputs.pop("add_text_embeds", add_text_embeds) + negative_pooled_prompt_embeds = callback_outputs.pop( + "negative_pooled_prompt_embeds", negative_pooled_prompt_embeds + ) + add_time_ids = callback_outputs.pop("add_time_ids", add_time_ids) + negative_add_time_ids = callback_outputs.pop("negative_add_time_ids", negative_add_time_ids) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents_with_noise) + + if not output_type == "latent": + # make sure the VAE is in float32 mode, as it overflows in float16 + needs_upcasting = self.vae.dtype == torch.float32 and self.vae.config.force_upcast + + if needs_upcasting: + self.upcast_vae() + latents_with_noise = latents_with_noise.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) + + # unscale/denormalize the latents + # denormalize with the mean and std if available and not None + has_latents_mean = hasattr(self.vae.config, "latents_mean") and self.vae.config.latents_mean is not None + has_latents_std = hasattr(self.vae.config, "latents_std") and self.vae.config.latents_std is not None + if has_latents_mean and has_latents_std: + print("์—ฌ๊ธฐ ๋ฐ”๊พธ๊ธด ํ–ˆ๋Š”๋ฐ...") + latents_mean = ( + torch.tensor(self.vae.config.latents_mean).view(1, 4, 1, 1).to(latents_with_noise.device, latents_with_noise.dtype) + ) + latents_std = ( + torch.tensor(self.vae.config.latents_std).view(1, 4, 1, 1).to(latents_with_noise.device, latents_with_noise.dtype) + ) + latents_with_noise = latents_with_noise * latents_std / self.vae.config.scaling_factor + latents_mean + else: + latents_with_noise = latents_with_noise / self.vae.config.scaling_factor + + image = self.vae.decode(latents_with_noise, return_dict=False)[0] + + # cast back to fp16 if needed + if needs_upcasting: + self.vae.to(dtype=torch.float32) + else: + image = latents_with_noise + + if not output_type == "latent": + # apply watermark if available + if self.watermark is not None: + image = self.watermark.apply_watermark(image) + + image = self.image_processor.postprocess(image, output_type=output_type) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return StableDiffusionXLPipelineOutput(images=image) diff --git a/diffusers3/pipelines/controlnet/.ipynb_checkpoints/pipeline_controlnet_sd_xl_img2img-checkpoint.py b/diffusers3/pipelines/controlnet/.ipynb_checkpoints/pipeline_controlnet_sd_xl_img2img-checkpoint.py new file mode 100644 index 0000000000000000000000000000000000000000..af19f3c309f87e446e772806eccbbc23fc2fa93b --- /dev/null +++ b/diffusers3/pipelines/controlnet/.ipynb_checkpoints/pipeline_controlnet_sd_xl_img2img-checkpoint.py @@ -0,0 +1,1656 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import inspect +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import numpy as np +import PIL.Image +import torch +import torch.nn.functional as F +from transformers import ( + CLIPImageProcessor, + CLIPTextModel, + CLIPTextModelWithProjection, + CLIPTokenizer, + CLIPVisionModelWithProjection, +) + +from diffusers.utils.import_utils import is_invisible_watermark_available + +from ...callbacks import MultiPipelineCallbacks, PipelineCallback +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import ( + FromSingleFileMixin, + IPAdapterMixin, + StableDiffusionXLLoraLoaderMixin, + TextualInversionLoaderMixin, +) +from ...models import AutoencoderKL, ControlNetModel, ImageProjection, UNet2DConditionModel +from ...models.attention_processor import ( + AttnProcessor2_0, + XFormersAttnProcessor, +) +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import ( + USE_PEFT_BACKEND, + deprecate, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import is_compiled_module, randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from ..stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput + + +if is_invisible_watermark_available(): + from ..stable_diffusion_xl.watermark import StableDiffusionXLWatermarker + +from .multicontrolnet import MultiControlNetModel + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> # pip install accelerate transformers safetensors diffusers + + >>> import torch + >>> import numpy as np + >>> from PIL import Image + + >>> from transformers import DPTImageProcessor, DPTForDepthEstimation + >>> from diffusers import ControlNetModel, StableDiffusionXLControlNetImg2ImgPipeline, AutoencoderKL + >>> from diffusers.utils import load_image + + + >>> depth_estimator = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas").to("cuda") + >>> feature_extractor = DPTImageProcessor.from_pretrained("Intel/dpt-hybrid-midas") + >>> controlnet = ControlNetModel.from_pretrained( + ... "diffusers/controlnet-depth-sdxl-1.0-small", + ... variant="fp16", + ... use_safetensors=True, + ... torch_dtype=torch.float16, + ... ) + >>> vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16) + >>> pipe = StableDiffusionXLControlNetImg2ImgPipeline.from_pretrained( + ... "stabilityai/stable-diffusion-xl-base-1.0", + ... controlnet=controlnet, + ... vae=vae, + ... variant="fp16", + ... use_safetensors=True, + ... torch_dtype=torch.float16, + ... ) + >>> pipe.enable_model_cpu_offload() + + + >>> def get_depth_map(image): + ... image = feature_extractor(images=image, return_tensors="pt").pixel_values.to("cuda") + ... with torch.no_grad(), torch.autocast("cuda"): + ... depth_map = depth_estimator(image).predicted_depth + + ... depth_map = torch.nn.functional.interpolate( + ... depth_map.unsqueeze(1), + ... size=(1024, 1024), + ... mode="bicubic", + ... align_corners=False, + ... ) + ... depth_min = torch.amin(depth_map, dim=[1, 2, 3], keepdim=True) + ... depth_max = torch.amax(depth_map, dim=[1, 2, 3], keepdim=True) + ... depth_map = (depth_map - depth_min) / (depth_max - depth_min) + ... image = torch.cat([depth_map] * 3, dim=1) + ... image = image.permute(0, 2, 3, 1).cpu().numpy()[0] + ... image = Image.fromarray((image * 255.0).clip(0, 255).astype(np.uint8)) + ... return image + + + >>> prompt = "A robot, 4k photo" + >>> image = load_image( + ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" + ... "/kandinsky/cat.png" + ... ).resize((1024, 1024)) + >>> controlnet_conditioning_scale = 0.5 # recommended for good generalization + >>> depth_image = get_depth_map(image) + + >>> images = pipe( + ... prompt, + ... image=image, + ... control_image=depth_image, + ... strength=0.99, + ... num_inference_steps=50, + ... controlnet_conditioning_scale=controlnet_conditioning_scale, + ... ).images + >>> images[0].save(f"robot_cat.png") + ``` +""" + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents +def retrieve_latents( + encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" +): + if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": + return encoder_output.latent_dist.sample(generator) + elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": + return encoder_output.latent_dist.mode() + elif hasattr(encoder_output, "latents"): + return encoder_output.latents + else: + raise AttributeError("Could not access latents of provided encoder_output") + + +class StableDiffusionXLControlNetImg2ImgPipeline( + DiffusionPipeline, + StableDiffusionMixin, + TextualInversionLoaderMixin, + StableDiffusionXLLoraLoaderMixin, + FromSingleFileMixin, + IPAdapterMixin, +): + r""" + Pipeline for image-to-image generation using Stable Diffusion XL with ControlNet guidance. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`] for saving LoRA weights + - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModel`]): + Frozen text-encoder. Stable Diffusion uses the text portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically + the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. + text_encoder_2 ([` CLIPTextModelWithProjection`]): + Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection), + specifically the + [laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k) + variant. + tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + tokenizer_2 (`CLIPTokenizer`): + Second Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. + controlnet ([`ControlNetModel`] or `List[ControlNetModel]`): + Provides additional conditioning to the unet during the denoising process. If you set multiple ControlNets + as a list, the outputs from each ControlNet are added together to create one combined additional + conditioning. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + requires_aesthetics_score (`bool`, *optional*, defaults to `"False"`): + Whether the `unet` requires an `aesthetic_score` condition to be passed during inference. Also see the + config of `stabilityai/stable-diffusion-xl-refiner-1-0`. + force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`): + Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of + `stabilityai/stable-diffusion-xl-base-1-0`. + add_watermarker (`bool`, *optional*): + Whether to use the [invisible_watermark library](https://github.com/ShieldMnt/invisible-watermark/) to + watermark output images. If not defined, it will default to True if the package is installed, otherwise no + watermarker will be used. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. + """ + + model_cpu_offload_seq = "text_encoder->text_encoder_2->image_encoder->unet->vae" + _optional_components = [ + "tokenizer", + "tokenizer_2", + "text_encoder", + "text_encoder_2", + "feature_extractor", + "image_encoder", + ] + _callback_tensor_inputs = [ + "latents", + "prompt_embeds", + "negative_prompt_embeds", + "add_text_embeds", + "add_time_ids", + "negative_pooled_prompt_embeds", + "add_neg_time_ids", + ] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + text_encoder_2: CLIPTextModelWithProjection, + tokenizer: CLIPTokenizer, + tokenizer_2: CLIPTokenizer, + unet: UNet2DConditionModel, + controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel], + scheduler: KarrasDiffusionSchedulers, + requires_aesthetics_score: bool = False, + force_zeros_for_empty_prompt: bool = True, + add_watermarker: Optional[bool] = None, + feature_extractor: CLIPImageProcessor = None, + image_encoder: CLIPVisionModelWithProjection = None, + ): + super().__init__() + + if isinstance(controlnet, (list, tuple)): + controlnet = MultiControlNetModel(controlnet) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + text_encoder_2=text_encoder_2, + tokenizer=tokenizer, + tokenizer_2=tokenizer_2, + unet=unet, + controlnet=controlnet, + scheduler=scheduler, + feature_extractor=feature_extractor, + image_encoder=image_encoder, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True) + self.control_image_processor = VaeImageProcessor( + vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False + ) + add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available() + + if add_watermarker: + self.watermark = StableDiffusionXLWatermarker() + else: + self.watermark = None + + self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) + self.register_to_config(requires_aesthetics_score=requires_aesthetics_score) + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.encode_prompt + def encode_prompt( + self, + prompt: str, + prompt_2: Optional[str] = None, + device: Optional[torch.device] = None, + num_images_per_prompt: int = 1, + do_classifier_free_guidance: bool = True, + negative_prompt: Optional[str] = None, + negative_prompt_2: Optional[str] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + pooled_prompt_embeds: Optional[torch.Tensor] = None, + negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in both text-encoders + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + lora_scale (`float`, *optional*): + A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + device = device or self._execution_device + + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if self.text_encoder is not None: + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale) + else: + scale_lora_layers(self.text_encoder_2, lora_scale) + + prompt = [prompt] if isinstance(prompt, str) else prompt + + if prompt is not None: + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + # Define tokenizers and text encoders + tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] + text_encoders = ( + [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] + ) + + if prompt_embeds is None: + prompt_2 = prompt_2 or prompt + prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 + + # textual inversion: process multi-vector tokens if necessary + prompt_embeds_list = [] + prompts = [prompt, prompt_2] + for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders): + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, tokenizer) + + text_inputs = tokenizer( + prompt, + padding="max_length", + max_length=tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + + text_input_ids = text_inputs.input_ids + untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {tokenizer.model_max_length} tokens: {removed_text}" + ) + + prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) + + # We are only ALWAYS interested in the pooled output of the final text encoder + pooled_prompt_embeds = prompt_embeds[0] + if clip_skip is None: + prompt_embeds = prompt_embeds.hidden_states[-2] + else: + # "2" because SDXL always indexes from the penultimate layer. + prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)] + + prompt_embeds_list.append(prompt_embeds) + + prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) + + # get unconditional embeddings for classifier free guidance + zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt + if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: + negative_prompt_embeds = torch.zeros_like(prompt_embeds) + negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) + elif do_classifier_free_guidance and negative_prompt_embeds is None: + negative_prompt = negative_prompt or "" + negative_prompt_2 = negative_prompt_2 or negative_prompt + + # normalize str to list + negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt + negative_prompt_2 = ( + batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 + ) + + uncond_tokens: List[str] + if prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = [negative_prompt, negative_prompt_2] + + negative_prompt_embeds_list = [] + for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders): + if isinstance(self, TextualInversionLoaderMixin): + negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = tokenizer( + negative_prompt, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + negative_prompt_embeds = text_encoder( + uncond_input.input_ids.to(device), + output_hidden_states=True, + ) + # We are only ALWAYS interested in the pooled output of the final text encoder + negative_pooled_prompt_embeds = negative_prompt_embeds[0] + negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] + + negative_prompt_embeds_list.append(negative_prompt_embeds) + + negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) + + if self.text_encoder_2 is not None: + prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) + else: + prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + if self.text_encoder_2 is not None: + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) + else: + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed * num_images_per_prompt, -1 + ) + if do_classifier_free_guidance: + negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed * num_images_per_prompt, -1 + ) + + if self.text_encoder is not None: + if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder_2, lora_scale) + + return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image + def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + if output_hidden_states: + image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] + image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_enc_hidden_states = self.image_encoder( + torch.zeros_like(image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( + num_images_per_prompt, dim=0 + ) + return image_enc_hidden_states, uncond_image_enc_hidden_states + else: + image_embeds = self.image_encoder(image).image_embeds + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_embeds = torch.zeros_like(image_embeds) + + return image_embeds, uncond_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds + def prepare_ip_adapter_image_embeds( + self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance + ): + image_embeds = [] + if do_classifier_free_guidance: + negative_image_embeds = [] + if ip_adapter_image_embeds is None: + if not isinstance(ip_adapter_image, list): + ip_adapter_image = [ip_adapter_image] + + if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): + raise ValueError( + f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." + ) + + for single_ip_adapter_image, image_proj_layer in zip( + ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers + ): + output_hidden_state = not isinstance(image_proj_layer, ImageProjection) + single_image_embeds, single_negative_image_embeds = self.encode_image( + single_ip_adapter_image, device, 1, output_hidden_state + ) + + image_embeds.append(single_image_embeds[None, :]) + if do_classifier_free_guidance: + negative_image_embeds.append(single_negative_image_embeds[None, :]) + else: + for single_image_embeds in ip_adapter_image_embeds: + if do_classifier_free_guidance: + single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2) + negative_image_embeds.append(single_negative_image_embeds) + image_embeds.append(single_image_embeds) + + ip_adapter_image_embeds = [] + for i, single_image_embeds in enumerate(image_embeds): + single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) + if do_classifier_free_guidance: + single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) + + single_image_embeds = single_image_embeds.to(device=device) + ip_adapter_image_embeds.append(single_image_embeds) + + return ip_adapter_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + prompt_2, + image, + strength, + num_inference_steps, + callback_steps, + negative_prompt=None, + negative_prompt_2=None, + prompt_embeds=None, + negative_prompt_embeds=None, + pooled_prompt_embeds=None, + negative_pooled_prompt_embeds=None, + ip_adapter_image=None, + ip_adapter_image_embeds=None, + controlnet_conditioning_scale=1.0, + control_guidance_start=0.0, + control_guidance_end=1.0, + callback_on_step_end_tensor_inputs=None, + ): + if strength < 0 or strength > 1: + raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}") + if num_inference_steps is None: + raise ValueError("`num_inference_steps` cannot be None.") + elif not isinstance(num_inference_steps, int) or num_inference_steps <= 0: + raise ValueError( + f"`num_inference_steps` has to be a positive integer but is {num_inference_steps} of type" + f" {type(num_inference_steps)}." + ) + + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt_2 is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)): + raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + elif negative_prompt_2 is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if prompt_embeds is not None and pooled_prompt_embeds is None: + raise ValueError( + "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`." + ) + + if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: + raise ValueError( + "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`." + ) + + # `prompt` needs more sophisticated handling when there are multiple + # conditionings. + if isinstance(self.controlnet, MultiControlNetModel): + if isinstance(prompt, list): + logger.warning( + f"You have {len(self.controlnet.nets)} ControlNets and you have passed {len(prompt)}" + " prompts. The conditionings will be fixed across the prompts." + ) + + # Check `image` + is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance( + self.controlnet, torch._dynamo.eval_frame.OptimizedModule + ) + if ( + isinstance(self.controlnet, ControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, ControlNetModel) + ): + self.check_image(image, prompt, prompt_embeds) + elif ( + isinstance(self.controlnet, MultiControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, MultiControlNetModel) + ): + if not isinstance(image, list): + raise TypeError("For multiple controlnets: `image` must be type `list`") + + # When `image` is a nested list: + # (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]]) + elif any(isinstance(i, list) for i in image): + raise ValueError("A single batch of multiple conditionings are supported at the moment.") + elif len(image) != len(self.controlnet.nets): + raise ValueError( + f"For multiple controlnets: `image` must have the same length as the number of controlnets, but got {len(image)} images and {len(self.controlnet.nets)} ControlNets." + ) + + for image_ in image: + self.check_image(image_, prompt, prompt_embeds) + else: + assert False + + # Check `controlnet_conditioning_scale` + if ( + isinstance(self.controlnet, ControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, ControlNetModel) + ): + if not isinstance(controlnet_conditioning_scale, float): + raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.") + elif ( + isinstance(self.controlnet, MultiControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, MultiControlNetModel) + ): + if isinstance(controlnet_conditioning_scale, list): + if any(isinstance(i, list) for i in controlnet_conditioning_scale): + raise ValueError("A single batch of multiple conditionings are supported at the moment.") + elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len( + self.controlnet.nets + ): + raise ValueError( + "For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have" + " the same length as the number of controlnets" + ) + else: + assert False + + if not isinstance(control_guidance_start, (tuple, list)): + control_guidance_start = [control_guidance_start] + + if not isinstance(control_guidance_end, (tuple, list)): + control_guidance_end = [control_guidance_end] + + if len(control_guidance_start) != len(control_guidance_end): + raise ValueError( + f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list." + ) + + if isinstance(self.controlnet, MultiControlNetModel): + if len(control_guidance_start) != len(self.controlnet.nets): + raise ValueError( + f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}." + ) + + for start, end in zip(control_guidance_start, control_guidance_end): + if start >= end: + raise ValueError( + f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}." + ) + if start < 0.0: + raise ValueError(f"control guidance start: {start} can't be smaller than 0.") + if end > 1.0: + raise ValueError(f"control guidance end: {end} can't be larger than 1.0.") + + if ip_adapter_image is not None and ip_adapter_image_embeds is not None: + raise ValueError( + "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined." + ) + + if ip_adapter_image_embeds is not None: + if not isinstance(ip_adapter_image_embeds, list): + raise ValueError( + f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}" + ) + elif ip_adapter_image_embeds[0].ndim not in [3, 4]: + raise ValueError( + f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D" + ) + + # Copied from diffusers.pipelines.controlnet.pipeline_controlnet_sd_xl.StableDiffusionXLControlNetPipeline.check_image + def check_image(self, image, prompt, prompt_embeds): + image_is_pil = isinstance(image, PIL.Image.Image) + image_is_tensor = isinstance(image, torch.Tensor) + image_is_np = isinstance(image, np.ndarray) + image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image) + image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor) + image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray) + + if ( + not image_is_pil + and not image_is_tensor + and not image_is_np + and not image_is_pil_list + and not image_is_tensor_list + and not image_is_np_list + ): + raise TypeError( + f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}" + ) + + if image_is_pil: + image_batch_size = 1 + else: + image_batch_size = len(image) + + if prompt is not None and isinstance(prompt, str): + prompt_batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + prompt_batch_size = len(prompt) + elif prompt_embeds is not None: + prompt_batch_size = prompt_embeds.shape[0] + + if image_batch_size != 1 and image_batch_size != prompt_batch_size: + raise ValueError( + f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}" + ) + + # Copied from diffusers.pipelines.controlnet.pipeline_controlnet_sd_xl.StableDiffusionXLControlNetPipeline.prepare_image + def prepare_control_image( + self, + image, + width, + height, + batch_size, + num_images_per_prompt, + device, + dtype, + do_classifier_free_guidance=False, + guess_mode=False, + ): + image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32) + image_batch_size = image.shape[0] + + if image_batch_size == 1: + repeat_by = batch_size + else: + # image batch size is the same as prompt batch size + repeat_by = num_images_per_prompt + + image = image.repeat_interleave(repeat_by, dim=0) + + image = image.to(device=device, dtype=dtype) + + if do_classifier_free_guidance and not guess_mode: + image = torch.cat([image] * 2) + + return image + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps + def get_timesteps(self, num_inference_steps, strength, device): + # get the original timestep using init_timestep + init_timestep = min(int(num_inference_steps * strength), num_inference_steps) + + t_start = max(num_inference_steps - init_timestep, 0) + timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] + if hasattr(self.scheduler, "set_begin_index"): + self.scheduler.set_begin_index(t_start * self.scheduler.order) + + return timesteps, num_inference_steps - t_start + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_img2img.StableDiffusionXLImg2ImgPipeline.prepare_latents + def prepare_latents( + self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None, add_noise=True + ): + if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): + raise ValueError( + f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" + ) + + latents_mean = latents_std = None + if hasattr(self.vae.config, "latents_mean") and self.vae.config.latents_mean is not None: + latents_mean = torch.tensor(self.vae.config.latents_mean).view(1, 4, 1, 1) + if hasattr(self.vae.config, "latents_std") and self.vae.config.latents_std is not None: + latents_std = torch.tensor(self.vae.config.latents_std).view(1, 4, 1, 1) + + # Offload text encoder if `enable_model_cpu_offload` was enabled + if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: + self.text_encoder_2.to("cpu") + torch.cuda.empty_cache() + + image = image.to(device=device, dtype=dtype) + + batch_size = batch_size * num_images_per_prompt + + if image.shape[1] == 4: + init_latents = image + + else: + # make sure the VAE is in float32 mode, as it overflows in float16 + if self.vae.config.force_upcast: + image = image.float() + self.vae.to(dtype=torch.float32) + + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + elif isinstance(generator, list): + if image.shape[0] < batch_size and batch_size % image.shape[0] == 0: + image = torch.cat([image] * (batch_size // image.shape[0]), dim=0) + elif image.shape[0] < batch_size and batch_size % image.shape[0] != 0: + raise ValueError( + f"Cannot duplicate `image` of batch size {image.shape[0]} to effective batch_size {batch_size} " + ) + + init_latents = [ + retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i]) + for i in range(batch_size) + ] + init_latents = torch.cat(init_latents, dim=0) + else: + init_latents = retrieve_latents(self.vae.encode(image), generator=generator) + + if self.vae.config.force_upcast: + self.vae.to(dtype) + + init_latents = init_latents.to(dtype) + if latents_mean is not None and latents_std is not None: + latents_mean = latents_mean.to(device=device, dtype=dtype) + latents_std = latents_std.to(device=device, dtype=dtype) + init_latents = (init_latents - latents_mean) * self.vae.config.scaling_factor / latents_std + else: + init_latents = self.vae.config.scaling_factor * init_latents + + if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0: + # expand init_latents for batch_size + additional_image_per_prompt = batch_size // init_latents.shape[0] + init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0) + elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0: + raise ValueError( + f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts." + ) + else: + init_latents = torch.cat([init_latents], dim=0) + + if add_noise: + shape = init_latents.shape + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + # get latents + init_latents = self.scheduler.add_noise(init_latents, noise, timestep) + + latents = init_latents + + return latents + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_img2img.StableDiffusionXLImg2ImgPipeline._get_add_time_ids + def _get_add_time_ids( + self, + original_size, + crops_coords_top_left, + target_size, + aesthetic_score, + negative_aesthetic_score, + negative_original_size, + negative_crops_coords_top_left, + negative_target_size, + dtype, + text_encoder_projection_dim=None, + ): + if self.config.requires_aesthetics_score: + add_time_ids = list(original_size + crops_coords_top_left + (aesthetic_score,)) + add_neg_time_ids = list( + negative_original_size + negative_crops_coords_top_left + (negative_aesthetic_score,) + ) + else: + add_time_ids = list(original_size + crops_coords_top_left + target_size) + add_neg_time_ids = list(negative_original_size + crops_coords_top_left + negative_target_size) + + passed_add_embed_dim = ( + self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim + ) + expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features + + if ( + expected_add_embed_dim > passed_add_embed_dim + and (expected_add_embed_dim - passed_add_embed_dim) == self.unet.config.addition_time_embed_dim + ): + raise ValueError( + f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to enable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=True)` to make sure `aesthetic_score` {aesthetic_score} and `negative_aesthetic_score` {negative_aesthetic_score} is correctly used by the model." + ) + elif ( + expected_add_embed_dim < passed_add_embed_dim + and (passed_add_embed_dim - expected_add_embed_dim) == self.unet.config.addition_time_embed_dim + ): + raise ValueError( + f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to disable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=False)` to make sure `target_size` {target_size} is correctly used by the model." + ) + elif expected_add_embed_dim != passed_add_embed_dim: + raise ValueError( + f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`." + ) + + add_time_ids = torch.tensor([add_time_ids], dtype=dtype) + add_neg_time_ids = torch.tensor([add_neg_time_ids], dtype=dtype) + + return add_time_ids, add_neg_time_ids + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae + def upcast_vae(self): + dtype = self.vae.dtype + self.vae.to(dtype=torch.float32) + use_torch_2_0_or_xformers = isinstance( + self.vae.decoder.mid_block.attentions[0].processor, + ( + AttnProcessor2_0, + XFormersAttnProcessor, + ), + ) + # if xformers or torch_2_0 is used attention block does not need + # to be in float32 which can save lots of memory + if use_torch_2_0_or_xformers: + self.vae.post_quant_conv.to(dtype) + self.vae.decoder.conv_in.to(dtype) + self.vae.decoder.mid_block.to(dtype) + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def clip_skip(self): + return self._clip_skip + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 + + @property + def cross_attention_kwargs(self): + return self._cross_attention_kwargs + + @property + def num_timesteps(self): + return self._num_timesteps + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + prompt_2: Optional[Union[str, List[str]]] = None, + image: PipelineImageInput = None, + control_image: PipelineImageInput = None, + height: Optional[int] = None, + width: Optional[int] = None, + strength: float = 0.8, + num_inference_steps: int = 50, + guidance_scale: float = 5.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + negative_prompt_2: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + pooled_prompt_embeds: Optional[torch.Tensor] = None, + negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + controlnet_conditioning_scale: Union[float, List[float]] = 0.8, + guess_mode: bool = False, + control_guidance_start: Union[float, List[float]] = 0.0, + control_guidance_end: Union[float, List[float]] = 1.0, + original_size: Tuple[int, int] = None, + crops_coords_top_left: Tuple[int, int] = (0, 0), + target_size: Tuple[int, int] = None, + negative_original_size: Optional[Tuple[int, int]] = None, + negative_crops_coords_top_left: Tuple[int, int] = (0, 0), + negative_target_size: Optional[Tuple[int, int]] = None, + aesthetic_score: float = 6.0, + negative_aesthetic_score: float = 2.5, + clip_skip: Optional[int] = None, + callback_on_step_end: Optional[ + Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks] + ] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + **kwargs, + ): + r""" + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in both text-encoders + image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,: + `List[List[torch.Tensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`): + The initial image will be used as the starting point for the image generation process. Can also accept + image latents as `image`, if passing latents directly, it will not be encoded again. + control_image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,: + `List[List[torch.Tensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`): + The ControlNet input condition. ControlNet uses this input condition to generate guidance to Unet. If + the type is specified as `torch.Tensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also + be accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If height + and/or width are passed, `image` is resized according to them. If multiple ControlNets are specified in + init, images must be passed as a list such that each element of the list can be correctly batched for + input to a single controlnet. + height (`int`, *optional*, defaults to the size of control_image): + The height in pixels of the generated image. Anything below 512 pixels won't work well for + [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) + and checkpoints that are not specifically fine-tuned on low resolutions. + width (`int`, *optional*, defaults to the size of control_image): + The width in pixels of the generated image. Anything below 512 pixels won't work well for + [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) + and checkpoints that are not specifically fine-tuned on low resolutions. + strength (`float`, *optional*, defaults to 0.8): + Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a + starting point and more noise is added the higher the `strength`. The number of denoising steps depends + on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising + process runs for the full number of iterations specified in `num_inference_steps`. A value of 1 + essentially ignores `image`. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to + [`schedulers.DDIMScheduler`], will be ignored for others. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. + ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*): + Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of + IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should + contain the negative image embedding if `do_classifier_free_guidance` is set to `True`. If not + provided, embeddings are computed from the `ip_adapter_image` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0): + The outputs of the controlnet are multiplied by `controlnet_conditioning_scale` before they are added + to the residual in the original unet. If multiple ControlNets are specified in init, you can set the + corresponding scale as a list. + guess_mode (`bool`, *optional*, defaults to `False`): + In this mode, the ControlNet encoder will try best to recognize the content of the input image even if + you remove all prompts. The `guidance_scale` between 3.0 and 5.0 is recommended. + control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0): + The percentage of total steps at which the controlnet starts applying. + control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0): + The percentage of total steps at which the controlnet stops applying. + original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled. + `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as + explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position + `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting + `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + For most cases, `target_size` should be set to the desired height and width of the generated image. If + not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in + section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + To negatively condition the generation process based on a specific image resolution. Part of SDXL's + micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's + micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + To negatively condition the generation process based on a target image resolution. It should be as same + as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + aesthetic_score (`float`, *optional*, defaults to 6.0): + Used to simulate an aesthetic score of the generated image by influencing the positive text condition. + Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + negative_aesthetic_score (`float`, *optional*, defaults to 2.5): + Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). Can be used to + simulate an aesthetic score of the generated image by influencing the negative text condition. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*): + A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of + each denoising step during the inference. with the following arguments: `callback_on_step_end(self: + DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a + list of all tensors as specified by `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + + Examples: + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple` + containing the output images. + """ + + callback = kwargs.pop("callback", None) + callback_steps = kwargs.pop("callback_steps", None) + + if callback is not None: + deprecate( + "callback", + "1.0.0", + "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", + ) + if callback_steps is not None: + deprecate( + "callback_steps", + "1.0.0", + "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", + ) + + if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): + callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs + + controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet + + # align format for control guidance + if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list): + control_guidance_start = len(control_guidance_end) * [control_guidance_start] + elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list): + control_guidance_end = len(control_guidance_start) * [control_guidance_end] + elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list): + mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1 + control_guidance_start, control_guidance_end = ( + mult * [control_guidance_start], + mult * [control_guidance_end], + ) + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + prompt_2, + control_image, + strength, + num_inference_steps, + callback_steps, + negative_prompt, + negative_prompt_2, + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + ip_adapter_image, + ip_adapter_image_embeds, + controlnet_conditioning_scale, + control_guidance_start, + control_guidance_end, + callback_on_step_end_tensor_inputs, + ) + + self._guidance_scale = guidance_scale + self._clip_skip = clip_skip + self._cross_attention_kwargs = cross_attention_kwargs + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float): + controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets) + + global_pool_conditions = ( + controlnet.config.global_pool_conditions + if isinstance(controlnet, ControlNetModel) + else controlnet.nets[0].config.global_pool_conditions + ) + guess_mode = guess_mode or global_pool_conditions + + # 3.1. Encode input prompt + text_encoder_lora_scale = ( + self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None + ) + ( + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + ) = self.encode_prompt( + prompt, + prompt_2, + device, + num_images_per_prompt, + self.do_classifier_free_guidance, + negative_prompt, + negative_prompt_2, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, + lora_scale=text_encoder_lora_scale, + clip_skip=self.clip_skip, + ) + + # 3.2 Encode ip_adapter_image + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + image_embeds = self.prepare_ip_adapter_image_embeds( + ip_adapter_image, + ip_adapter_image_embeds, + device, + batch_size * num_images_per_prompt, + self.do_classifier_free_guidance, + ) + + # 4. Prepare image and controlnet_conditioning_image + image = self.image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32) + + if isinstance(controlnet, ControlNetModel): + control_image = self.prepare_control_image( + image=control_image, + width=width, + height=height, + batch_size=batch_size * num_images_per_prompt, + num_images_per_prompt=num_images_per_prompt, + device=device, + dtype=controlnet.dtype, + do_classifier_free_guidance=self.do_classifier_free_guidance, + guess_mode=guess_mode, + ) + height, width = control_image.shape[-2:] + elif isinstance(controlnet, MultiControlNetModel): + control_images = [] + + for control_image_ in control_image: + control_image_ = self.prepare_control_image( + image=control_image_, + width=width, + height=height, + batch_size=batch_size * num_images_per_prompt, + num_images_per_prompt=num_images_per_prompt, + device=device, + dtype=controlnet.dtype, + do_classifier_free_guidance=self.do_classifier_free_guidance, + guess_mode=guess_mode, + ) + + control_images.append(control_image_) + + control_image = control_images + height, width = control_image[0].shape[-2:] + else: + assert False + + # 5. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device) + latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) + self._num_timesteps = len(timesteps) + + # 6. Prepare latent variables + if latents is None: + latents = self.prepare_latents( + image, + latent_timestep, + batch_size, + num_images_per_prompt, + prompt_embeds.dtype, + device, + generator, + True, + ) + + # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7.1 Create tensor stating which controlnets to keep + controlnet_keep = [] + for i in range(len(timesteps)): + keeps = [ + 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e) + for s, e in zip(control_guidance_start, control_guidance_end) + ] + controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps) + + # 7.2 Prepare added time ids & embeddings + if isinstance(control_image, list): + original_size = original_size or control_image[0].shape[-2:] + else: + original_size = original_size or control_image.shape[-2:] + target_size = target_size or (height, width) + + if negative_original_size is None: + negative_original_size = original_size + if negative_target_size is None: + negative_target_size = target_size + add_text_embeds = pooled_prompt_embeds + + if self.text_encoder_2 is None: + text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) + else: + text_encoder_projection_dim = self.text_encoder_2.config.projection_dim + + add_time_ids, add_neg_time_ids = self._get_add_time_ids( + original_size, + crops_coords_top_left, + target_size, + aesthetic_score, + negative_aesthetic_score, + negative_original_size, + negative_crops_coords_top_left, + negative_target_size, + dtype=prompt_embeds.dtype, + text_encoder_projection_dim=text_encoder_projection_dim, + ) + add_time_ids = add_time_ids.repeat(batch_size * num_images_per_prompt, 1) + + if self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) + add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) + add_neg_time_ids = add_neg_time_ids.repeat(batch_size * num_images_per_prompt, 1) + add_time_ids = torch.cat([add_neg_time_ids, add_time_ids], dim=0) + + prompt_embeds = prompt_embeds.to(device) + add_text_embeds = add_text_embeds.to(device) + add_time_ids = add_time_ids.to(device) + + # 8. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} + + # controlnet(s) inference + if guess_mode and self.do_classifier_free_guidance: + # Infer ControlNet only for the conditional batch. + control_model_input = latents + control_model_input = self.scheduler.scale_model_input(control_model_input, t) + controlnet_prompt_embeds = prompt_embeds.chunk(2)[1] + controlnet_added_cond_kwargs = { + "text_embeds": add_text_embeds.chunk(2)[1], + "time_ids": add_time_ids.chunk(2)[1], + } + else: + control_model_input = latent_model_input + controlnet_prompt_embeds = prompt_embeds + controlnet_added_cond_kwargs = added_cond_kwargs + + if isinstance(controlnet_keep[i], list): + cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])] + else: + controlnet_cond_scale = controlnet_conditioning_scale + if isinstance(controlnet_cond_scale, list): + controlnet_cond_scale = controlnet_cond_scale[0] + cond_scale = controlnet_cond_scale * controlnet_keep[i] + down_block_res_samples, mid_block_res_sample = self.controlnet( + control_model_input, + t, + encoder_hidden_states=controlnet_prompt_embeds, + controlnet_cond=control_image, + conditioning_scale=cond_scale, + guess_mode=guess_mode, + added_cond_kwargs=controlnet_added_cond_kwargs, + return_dict=False, + ) + + if guess_mode and self.do_classifier_free_guidance: + # Inferred ControlNet only for the conditional batch. + # To apply the output of ControlNet to both the unconditional and conditional batches, + # add 0 to the unconditional batch to keep it unchanged. + down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples] + mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample]) + + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + added_cond_kwargs["image_embeds"] = image_embeds + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs=self.cross_attention_kwargs, + down_block_additional_residuals=down_block_res_samples, + mid_block_additional_residual=mid_block_res_sample, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + add_text_embeds = callback_outputs.pop("add_text_embeds", add_text_embeds) + negative_pooled_prompt_embeds = callback_outputs.pop( + "negative_pooled_prompt_embeds", negative_pooled_prompt_embeds + ) + add_time_ids = callback_outputs.pop("add_time_ids", add_time_ids) + add_neg_time_ids = callback_outputs.pop("add_neg_time_ids", add_neg_time_ids) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + # If we do sequential model offloading, let's offload unet and controlnet + # manually for max memory savings + if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: + self.unet.to("cpu") + self.controlnet.to("cpu") + torch.cuda.empty_cache() + + if not output_type == "latent": + # make sure the VAE is in float32 mode, as it overflows in float16 + needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast + + if needs_upcasting: + self.upcast_vae() + latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) + + # unscale/denormalize the latents + # denormalize with the mean and std if available and not None + has_latents_mean = hasattr(self.vae.config, "latents_mean") and self.vae.config.latents_mean is not None + has_latents_std = hasattr(self.vae.config, "latents_std") and self.vae.config.latents_std is not None + if has_latents_mean and has_latents_std: + latents_mean = ( + torch.tensor(self.vae.config.latents_mean).view(1, 4, 1, 1).to(latents.device, latents.dtype) + ) + latents_std = ( + torch.tensor(self.vae.config.latents_std).view(1, 4, 1, 1).to(latents.device, latents.dtype) + ) + latents = latents * latents_std / self.vae.config.scaling_factor + latents_mean + else: + latents = latents / self.vae.config.scaling_factor + + image = self.vae.decode(latents, return_dict=False)[0] + + # cast back to fp16 if needed + if needs_upcasting: + self.vae.to(dtype=torch.float16) + else: + image = latents + return StableDiffusionXLPipelineOutput(images=image) + + # apply watermark if available + if self.watermark is not None: + image = self.watermark.apply_watermark(image) + + image = self.image_processor.postprocess(image, output_type=output_type) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return StableDiffusionXLPipelineOutput(images=image) diff --git a/diffusers3/pipelines/controlnet/.ipynb_checkpoints/pipeline_controlnet_sd_xl_img2img_img-checkpoint.py b/diffusers3/pipelines/controlnet/.ipynb_checkpoints/pipeline_controlnet_sd_xl_img2img_img-checkpoint.py new file mode 100644 index 0000000000000000000000000000000000000000..f3d69e333aabd52d55a5ed2b7a6107f715ee14df --- /dev/null +++ b/diffusers3/pipelines/controlnet/.ipynb_checkpoints/pipeline_controlnet_sd_xl_img2img_img-checkpoint.py @@ -0,0 +1,2728 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by apfplicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import inspect +from typing import Any, Callable, Dict, List, Optional, Tuple, Union +from torchvision.transforms.functional import pil_to_tensor + +import numpy as np +import PIL +import PIL.Image +import torch +import torch.nn.functional as F +from transformers import ( + CLIPImageProcessor, + CLIPTextModel, + CLIPTextModelWithProjection, + CLIPTokenizer, + CLIPVisionModelWithProjection, +) + +from diffusers.utils.import_utils import is_invisible_watermark_available + +from ...callbacks import MultiPipelineCallbacks, PipelineCallback +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import ( + FromSingleFileMixin, + IPAdapterMixin, + StableDiffusionXLLoraLoaderMixin, + TextualInversionLoaderMixin, +) +from ...models import AutoencoderKL, ControlNetModel, ImageProjection, UNet2DConditionModel +from ...models.attention_processor import ( + AttnProcessor2_0, + XFormersAttnProcessor, +) +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import ( + USE_PEFT_BACKEND, + deprecate, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import is_compiled_module, randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from ..stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput + + +if is_invisible_watermark_available(): + from ..stable_diffusion_xl.watermark import StableDiffusionXLWatermarker + +from .multicontrolnet import MultiControlNetModel + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> # pip install accelerate transformers safetensors diffusers + + >>> import torch + >>> import numpy as np + >>> from PIL import Image + + >>> from transformers import DPTImageProcessor, DPTForDepthEstimation + >>> from diffusers import ControlNetModel, StableDiffusionXLControlNetImg2ImgPipeline, AutoencoderKL + >>> from diffusers.utils import load_image + + + >>> depth_estimator = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas").to("cuda") + >>> feature_extractor = DPTImageProcessor.from_pretrained("Intel/dpt-hybrid-midas") + >>> controlnet = ControlNetModel.from_pretrained( + ... "diffusers/controlnet-depth-sdxl-1.0-small", + ... variant="fp16", + ... use_safetensors=True, + ... torch_dtype=torch.float16, + ... ) + >>> vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16) + >>> pipe = StableDiffusionXLControlNetImg2ImgPipeline.from_pretrained( + ... "stabilityai/stable-diffusion-xl-base-1.0", + ... controlnet=controlnet, + ... vae=vae, + ... variant="fp16", + ... use_safetensors=True, + ... torch_dtype=torch.float16, + ... ) + >>> pipe.enable_model_cpu_offload() + + + >>> def get_depth_map(image): + ... image = feature_extractor(images=image, return_tensors="pt").pixel_values.to("cuda") + ... with torch.no_grad(), torch.autocast("cuda"): + ... depth_map = depth_estimator(image).predicted_depth + + ... depth_map = torch.nn.functional.interpolate( + ... depth_map.unsqueeze(1), + ... size=(1024, 1024), + ... mode="bicubic", + ... align_corners=False, + ... ) + ... depth_min = torch.amin(depth_map, dim=[1, 2, 3], keepdim=True) + ... depth_max = torch.amax(depth_map, dim=[1, 2, 3], keepdim=True) + ... depth_map = (depth_map - depth_min) / (depth_max - depth_min) + ... image = torch.cat([depth_map] * 3, dim=1) + ... image = image.permute(0, 2, 3, 1).cpu().numpy()[0] + ... image = Image.fromarray((image * 255.0).clip(0, 255).astype(np.uint8)) + ... return image + + + >>> prompt = "A robot, 4k photo" + >>> image = load_image( + ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" + ... "/kandinsky/cat.png" + ... ).resize((1024, 1024)) + >>> controlnet_conditioning_scale = 0.5 # recommended for good generalization + >>> depth_image = get_depth_map(image) + + >>> images = pipe( + ... prompt, + ... image=image, + ... control_image=depth_image, + ... strength=0.99, + ... num_inference_steps=50, + ... controlnet_conditioning_scale=controlnet_conditioning_scale, + ... ).images + >>> images[0].save(f"robot_cat.png") + ``` +""" + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents +def retrieve_latents( + encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" +): + if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": + return encoder_output.latent_dist.sample(generator) + elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": + return encoder_output.latent_dist.mode() + elif hasattr(encoder_output, "latents"): + return encoder_output.latents + else: + raise AttributeError("Could not access latents of provided encoder_output") + +def prepare_mask( + mask: Union[PIL.Image.Image, np.ndarray, torch.Tensor] +) -> torch.Tensor: + """ + Prepares a mask to be consumed by the Stable Diffusion pipeline. This means that this input will be + converted to ``torch.Tensor`` with shapes ``batch x channels x height x width`` where ``channels`` is ``1`` for + the ``mask``. + + The ``mask`` will be binarized (``mask > 0.5``) and cast to ``torch.float32`` too. + + Args: + mask (_type_): The mask to apply to the image, i.e. regions to inpaint. + It can be a ``PIL.Image``, or a ``height x width`` ``np.array`` or a ``1 x height x width`` + ``torch.Tensor`` or a ``batch x 1 x height x width`` ``torch.Tensor``. + + + Raises: + ValueError: ``torch.Tensor`` images should be in the ``[-1, 1]`` range. ValueError: ``torch.Tensor`` mask + should be in the ``[0, 1]`` range. + TypeError: ``mask`` is a ``torch.Tensor`` but ``image`` is not + (ot the other way around). + + Returns: + torch.Tensor: mask as ``torch.Tensor`` with 4 dimensions: ``batch x channels x height x width``. + """ + if isinstance(mask, torch.Tensor): + print("^^^^") + print("mask_1") + if not isinstance(mask, torch.Tensor): + raise TypeError( + f"`image` is a torch.Tensor but `mask` (type: {type(mask)} is not" + ) + + # Batch and add channel dim for single mask + if mask.ndim == 2: + mask = mask.unsqueeze(0).unsqueeze(0) + + # Batch single mask or add channel dim + if mask.ndim == 3: + # Single batched mask, no channel dim or single mask not batched but channel dim + if mask.shape[0] == 1: + mask = mask.unsqueeze(0) + + # Batched masks no channel dim + else: + mask = mask.unsqueeze(1) + + # Check mask is in [0, 1] + if mask.min() < 0 or mask.max() > 1: + raise ValueError("Mask should be in [0, 1] range") + + # Binarize mask + mask[mask < 0.5] = 0 + mask[mask >= 0.5] = 1 + else: + print("^^^^") + print("mask_2") + # preprocess mask + if isinstance(mask, (PIL.Image.Image, np.ndarray)): + mask = [mask] + + if isinstance(mask, list) and isinstance(mask[0], PIL.Image.Image): + mask = np.concatenate( + [np.array(m.convert("L"))[None, None, :] for m in mask], axis=0 + ) + mask = mask.astype(np.float32) / 255.0 + elif isinstance(mask, list) and isinstance(mask[0], np.ndarray): + mask = np.concatenate([m[None, None, :] for m in mask], axis=0) + + mask[mask < 0.5] = 0 + mask[mask >= 0.5] = 1 + mask = torch.from_numpy(mask) + + return mask + + + +class StableDiffusionXLControlNetImg2ImgPipeline( + DiffusionPipeline, + StableDiffusionMixin, + TextualInversionLoaderMixin, + StableDiffusionXLLoraLoaderMixin, + FromSingleFileMixin, + IPAdapterMixin, +): + r""" + Pipeline for image-to-image generation using Stable Diffusion XL with ControlNet guidance. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`] for saving LoRA weights + - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModel`]): + Frozen text-encoder. Stable Diffusion uses the text portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically + the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. + text_encoder_2 ([` CLIPTextModelWithProjection`]): + Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection), + specifically the + [laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k) + variant. + tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + tokenizer_2 (`CLIPTokenizer`): + Second Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. + controlnet ([`ControlNetModel`] or `List[ControlNetModel]`): + Provides additional conditioning to the unet during the denoising process. If you set multiple ControlNets + as a list, the outputs from each ControlNet are added together to create one combined additional + conditioning. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + requires_aesthetics_score (`bool`, *optional*, defaults to `"False"`): + Whether the `unet` requires an `aesthetic_score` condition to be passed during inference. Also see the + config of `stabilityai/stable-diffusion-xl-refiner-1-0`. + force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`): + Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of + `stabilityai/stable-diffusion-xl-base-1-0`. + add_watermarker (`bool`, *optional*): + Whether to use the [invisible_watermark library](https://github.com/ShieldMnt/invisible-watermark/) to + watermark output images. If not defined, it will default to True if the package is installed, otherwise no + watermarker will be used. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. + """ + + model_cpu_offload_seq = "text_encoder->text_encoder_2->image_encoder->unet->vae" + _optional_components = [ + "tokenizer", + "tokenizer_2", + "text_encoder", + "text_encoder_2", + "feature_extractor", + "image_encoder", + ] + _callback_tensor_inputs = [ + "latents", + "prompt_embeds", + "negative_prompt_embeds", + "add_text_embeds", + "add_time_ids", + "negative_pooled_prompt_embeds", + "add_neg_time_ids", + ] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + text_encoder_2: CLIPTextModelWithProjection, + tokenizer: CLIPTokenizer, + tokenizer_2: CLIPTokenizer, + unet: UNet2DConditionModel, + controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel], + scheduler: KarrasDiffusionSchedulers, + requires_aesthetics_score: bool = False, + force_zeros_for_empty_prompt: bool = True, + add_watermarker: Optional[bool] = None, + feature_extractor: CLIPImageProcessor = None, + image_encoder: CLIPVisionModelWithProjection = None, + ): + super().__init__() + + if isinstance(controlnet, (list, tuple)): + controlnet = MultiControlNetModel(controlnet) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + text_encoder_2=text_encoder_2, + tokenizer=tokenizer, + tokenizer_2=tokenizer_2, + unet=unet, + controlnet=controlnet, + scheduler=scheduler, + feature_extractor=feature_extractor, + image_encoder=image_encoder, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True) + self.control_image_processor = VaeImageProcessor( + vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False + ) + add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available() + + if add_watermarker: + self.watermark = StableDiffusionXLWatermarker() + else: + self.watermark = None + + self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) + self.register_to_config(requires_aesthetics_score=requires_aesthetics_score) + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.encode_prompt + def encode_prompt( + self, + prompt: str, + prompt_2: Optional[str] = None, + device: Optional[torch.device] = None, + num_images_per_prompt: int = 1, + do_classifier_free_guidance: bool = True, + negative_prompt: Optional[str] = None, + negative_prompt_2: Optional[str] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + pooled_prompt_embeds: Optional[torch.Tensor] = None, + negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in both text-encoders + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + lora_scale (`float`, *optional*): + A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + device = device or self._execution_device + + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if self.text_encoder is not None: + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale) + else: + scale_lora_layers(self.text_encoder_2, lora_scale) + + prompt = [prompt] if isinstance(prompt, str) else prompt + + if prompt is not None: + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + # Define tokenizers and text encoders + tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] + text_encoders = ( + [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] + ) + + if prompt_embeds is None: + prompt_2 = prompt_2 or prompt + prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 + + # textual inversion: process multi-vector tokens if necessary + prompt_embeds_list = [] + prompts = [prompt, prompt_2] + for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders): + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, tokenizer) + + text_inputs = tokenizer( + prompt, + padding="max_length", + max_length=tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + + text_input_ids = text_inputs.input_ids + untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {tokenizer.model_max_length} tokens: {removed_text}" + ) + + prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) + + # We are only ALWAYS interested in the pooled output of the final text encoder + pooled_prompt_embeds = prompt_embeds[0] + if clip_skip is None: + prompt_embeds = prompt_embeds.hidden_states[-2] + else: + # "2" because SDXL always indexes from the penultimate layer. + prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)] + + prompt_embeds_list.append(prompt_embeds) + + prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) + + # get unconditional embeddings for classifier free guidance + zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt + if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: + negative_prompt_embeds = torch.zeros_like(prompt_embeds) + negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) + elif do_classifier_free_guidance and negative_prompt_embeds is None: + negative_prompt = negative_prompt or "" + negative_prompt_2 = negative_prompt_2 or negative_prompt + + # normalize str to list + negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt + negative_prompt_2 = ( + batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 + ) + + uncond_tokens: List[str] + if prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = [negative_prompt, negative_prompt_2] + + negative_prompt_embeds_list = [] + for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders): + if isinstance(self, TextualInversionLoaderMixin): + negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = tokenizer( + negative_prompt, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + negative_prompt_embeds = text_encoder( + uncond_input.input_ids.to(device), + output_hidden_states=True, + ) + # We are only ALWAYS interested in the pooled output of the final text encoder + negative_pooled_prompt_embeds = negative_prompt_embeds[0] + negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] + + negative_prompt_embeds_list.append(negative_prompt_embeds) + + negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) + + if self.text_encoder_2 is not None: + prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) + else: + prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + if self.text_encoder_2 is not None: + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) + else: + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed * num_images_per_prompt, -1 + ) + if do_classifier_free_guidance: + negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed * num_images_per_prompt, -1 + ) + + if self.text_encoder is not None: + if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder_2, lora_scale) + + return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image + def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + if output_hidden_states: + image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] + image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_enc_hidden_states = self.image_encoder( + torch.zeros_like(image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( + num_images_per_prompt, dim=0 + ) + return image_enc_hidden_states, uncond_image_enc_hidden_states + else: + image_embeds = self.image_encoder(image).image_embeds + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_embeds = torch.zeros_like(image_embeds) + + return image_embeds, uncond_image_embeds + + def _get_vae(self): + vae = getattr(self, "vae", None) or getattr(self, "autoencoder", None) + if vae is None and hasattr(self, "components"): + try: + vae = self.components.get("vae", None) + except Exception: + vae = None + if vae is None: + raise RuntimeError( + "VAE not found on pipeline. Ensure the pipeline is fully constructed " + "and VAE is attached as `vae`, `autoencoder`, or components['vae']." + ) + return vae + + def _get_vae_scaling(self): + vae = self._get_vae() + return getattr(getattr(vae, "config", None), "scaling_factor", getattr(vae, "scaling_factor", 0.18215)) + + + + def _setup_openclip(self, device="cuda"): + model, _, _ = open_clip.create_model_and_transforms( + "ViT-H-14", pretrained="laion2b_s32b_b79k" + ) + model.eval().to(device, dtype=torch.float32) + for p in model.parameters(): + p.requires_grad_(False) + + object.__setattr__(self, "clip_model", model) + object.__setattr__(self, "clip_image_encoder", lambda x: self.clip_model.encode_image(x)) + object.__setattr__(self, "clip_text_encoder", lambda t: self.clip_model.encode_text(t)) + object.__setattr__(self, "clip_tokenizer", open_clip.get_tokenizer("ViT-H-14")) + + object.__setattr__(self, "clip_image_size", 224) + object.__setattr__(self, "clip_mean", (0.48145466, 0.4578275, 0.40821073)) + object.__setattr__(self, "clip_std", (0.26862954, 0.26130258, 0.27577711)) + + object.__setattr__(self, "cav_words", ["garment", "clothes", "apparel", "outfit"]) + object.__setattr__(self, "cav_alpha", 0.3) + object.__setattr__(self, "cav_steps", 1) + object.__setattr__(self, "cav_channel_dir", True) + + def prepare_ip_adapter_image_embeds( + self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance + ): + image_embeds = [] + if do_classifier_free_guidance: + negative_image_embeds = [] + if ip_adapter_image_embeds is None: + if not isinstance(ip_adapter_image, list): + ip_adapter_image = [ip_adapter_image] + + if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): + raise ValueError( + f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." + ) + + for single_ip_adapter_image, image_proj_layer in zip( + ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers + ): + output_hidden_state = not isinstance(image_proj_layer, ImageProjection) + single_image_embeds, single_negative_image_embeds = self.encode_image( + single_ip_adapter_image, device, 1, output_hidden_state + ) + + image_embeds.append(single_image_embeds[None, :]) + if do_classifier_free_guidance: + negative_image_embeds.append(single_negative_image_embeds[None, :]) + else: + for single_image_embeds in ip_adapter_image_embeds: + if do_classifier_free_guidance: + single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2) + negative_image_embeds.append(single_negative_image_embeds) + image_embeds.append(single_image_embeds) + + ip_adapter_image_embeds = [] + for i, single_image_embeds in enumerate(image_embeds): + single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) + if do_classifier_free_guidance: + single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) + + single_image_embeds = single_image_embeds.to(device=device) + ip_adapter_image_embeds.append(single_image_embeds) + + return ip_adapter_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + prompt_2, + image, + strength, + num_inference_steps, + callback_steps, + negative_prompt=None, + negative_prompt_2=None, + prompt_embeds=None, + negative_prompt_embeds=None, + pooled_prompt_embeds=None, + negative_pooled_prompt_embeds=None, + ip_adapter_image=None, + ip_adapter_image_embeds=None, + controlnet_conditioning_scale=1.0, + control_guidance_start=0.0, + control_guidance_end=1.0, + callback_on_step_end_tensor_inputs=None, + ): + if strength < 0 or strength > 1: + raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}") + if num_inference_steps is None: + raise ValueError("`num_inference_steps` cannot be None.") + elif not isinstance(num_inference_steps, int) or num_inference_steps <= 0: + raise ValueError( + f"`num_inference_steps` has to be a positive integer but is {num_inference_steps} of type" + f" {type(num_inference_steps)}." + ) + + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt_2 is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)): + raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + elif negative_prompt_2 is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if prompt_embeds is not None and pooled_prompt_embeds is None: + raise ValueError( + "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`." + ) + + if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: + raise ValueError( + "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`." + ) + + # `prompt` needs more sophisticated handling when there are multiple + # conditionings. + if isinstance(self.controlnet, MultiControlNetModel): + if isinstance(prompt, list): + logger.warning( + f"You have {len(self.controlnet.nets)} ControlNets and you have passed {len(prompt)}" + " prompts. The conditionings will be fixed across the prompts." + ) + + # Check `image` + is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance( + self.controlnet, torch._dynamo.eval_frame.OptimizedModule + ) + if ( + isinstance(self.controlnet, ControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, ControlNetModel) + ): + self.check_image(image, prompt, prompt_embeds) + elif ( + isinstance(self.controlnet, MultiControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, MultiControlNetModel) + ): + if not isinstance(image, list): + raise TypeError("For multiple controlnets: `image` must be type `list`") + + # When `image` is a nested list: + # (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]]) + elif any(isinstance(i, list) for i in image): + raise ValueError("A single batch of multiple conditionings are supported at the moment.") + elif len(image) != len(self.controlnet.nets): + raise ValueError( + f"For multiple controlnets: `image` must have the same length as the number of controlnets, but got {len(image)} images and {len(self.controlnet.nets)} ControlNets." + ) + + for image_ in image: + self.check_image(image_, prompt, prompt_embeds) + else: + assert False + + # Check `controlnet_conditioning_scale` + if ( + isinstance(self.controlnet, ControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, ControlNetModel) + ): + if not isinstance(controlnet_conditioning_scale, float): + raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.") + elif ( + isinstance(self.controlnet, MultiControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, MultiControlNetModel) + ): + if isinstance(controlnet_conditioning_scale, list): + if any(isinstance(i, list) for i in controlnet_conditioning_scale): + raise ValueError("A single batch of multiple conditionings are supported at the moment.") + elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len( + self.controlnet.nets + ): + raise ValueError( + "For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have" + " the same length as the number of controlnets" + ) + else: + assert False + + if not isinstance(control_guidance_start, (tuple, list)): + control_guidance_start = [control_guidance_start] + + if not isinstance(control_guidance_end, (tuple, list)): + control_guidance_end = [control_guidance_end] + + if len(control_guidance_start) != len(control_guidance_end): + raise ValueError( + f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list." + ) + + if isinstance(self.controlnet, MultiControlNetModel): + if len(control_guidance_start) != len(self.controlnet.nets): + raise ValueError( + f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}." + ) + + for start, end in zip(control_guidance_start, control_guidance_end): + if start >= end: + raise ValueError( + f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}." + ) + if start < 0.0: + raise ValueError(f"control guidance start: {start} can't be smaller than 0.") + if end > 1.0: + raise ValueError(f"control guidance end: {end} can't be larger than 1.0.") + + if ip_adapter_image is not None and ip_adapter_image_embeds is not None: + raise ValueError( + "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined." + ) + + if ip_adapter_image_embeds is not None: + if not isinstance(ip_adapter_image_embeds, list): + raise ValueError( + f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}" + ) + elif ip_adapter_image_embeds[0].ndim not in [3, 4]: + raise ValueError( + f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D" + ) + + # Copied from diffusers.pipelines.controlnet.pipeline_controlnet_sd_xl.StableDiffusionXLControlNetPipeline.check_image + def check_image(self, image, prompt, prompt_embeds): + image_is_pil = isinstance(image, PIL.Image.Image) + image_is_tensor = isinstance(image, torch.Tensor) + image_is_np = isinstance(image, np.ndarray) + image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image) + image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor) + image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray) + + if ( + not image_is_pil + and not image_is_tensor + and not image_is_np + and not image_is_pil_list + and not image_is_tensor_list + and not image_is_np_list + ): + raise TypeError( + f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}" + ) + + if image_is_pil: + image_batch_size = 1 + else: + image_batch_size = len(image) + + if prompt is not None and isinstance(prompt, str): + prompt_batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + prompt_batch_size = len(prompt) + elif prompt_embeds is not None: + prompt_batch_size = prompt_embeds.shape[0] + + if image_batch_size != 1 and image_batch_size != prompt_batch_size: + raise ValueError( + f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}" + ) + + # Copied from diffusers.pipelines.controlnet.pipeline_controlnet_sd_xl.StableDiffusionXLControlNetPipeline.prepare_image + def prepare_control_image( + self, + image, + width, + height, + batch_size, + num_images_per_prompt, + device, + dtype, + do_classifier_free_guidance=False, + guess_mode=False, + ): + image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32) + image_batch_size = image.shape[0] + + if image_batch_size == 1: + repeat_by = batch_size + else: + # image batch size is the same as prompt batch size + repeat_by = num_images_per_prompt + + image = image.repeat_interleave(repeat_by, dim=0) + + image = image.to(device=device, dtype=dtype) + + if do_classifier_free_guidance and not guess_mode: + image = torch.cat([image] * 2) + + return image + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps + def get_timesteps(self, num_inference_steps, strength, device): + # get the original timestep using init_timestep + init_timestep = min(int(num_inference_steps * strength), num_inference_steps) + + t_start = max(num_inference_steps - init_timestep, 0) + timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] + if hasattr(self.scheduler, "set_begin_index"): + self.scheduler.set_begin_index(t_start * self.scheduler.order) + + return timesteps, num_inference_steps - t_start + + def _sdxl_decode_to_01(self, z): + vae = self._get_vae() + sf = getattr(getattr(vae, "config", None), "scaling_factor", + getattr(vae, "scaling_factor", 0.18215)) + x = vae.decode(z / sf).sample # [-1, 1] + x = (x.clamp(-1, 1) + 1.0) / 2.0 + return x + + + def _ensure_openclip(self, device=None): + if getattr(self, "clip_model", None) is not None: + return + import open_clip, torch + if device is None: + device = getattr(self, "device", "cuda" if torch.cuda.is_available() else "cpu") + + model, _, _ = open_clip.create_model_and_transforms( + "ViT-H-14", pretrained="laion2b_s32b_b79k" + ) + model.eval().to(device, dtype=torch.float32) + for p in model.parameters(): + p.requires_grad_(False) + + # __setattr__ ํ›…์„ ์šฐํšŒํ•ด์„œ ์†์„ฑ ์ฃผ์ž… + object.__setattr__(self, "clip_model", model) + object.__setattr__(self, "clip_image_encoder", lambda x: self.clip_model.encode_image(x)) + object.__setattr__(self, "clip_text_encoder", lambda t: self.clip_model.encode_text(t)) + object.__setattr__(self, "clip_tokenizer", open_clip.get_tokenizer("ViT-H-14")) + + object.__setattr__(self, "clip_image_size", 224) + object.__setattr__(self, "clip_mean", (0.48145466, 0.4578275, 0.40821073)) + object.__setattr__(self, "clip_std", (0.26862954, 0.26130258, 0.27577711)) + + # ์ œ๋กœ์ƒท Grad-CAV ํ•˜์ดํผ + object.__setattr__(self, "cav_words", ["garment", "clothes", "apparel", "outfit"]) + object.__setattr__(self, "cav_alpha", 0.3) + object.__setattr__(self, "cav_steps", 1) + object.__setattr__(self, "cav_channel_dir", True) + + + def upcast_vae_safe(self): + vae = self._get_vae() + try: + dev = next(vae.parameters()).device + except StopIteration: + import torch + dev = getattr(self, "device", "cuda" if torch.cuda.is_available() else "cpu") + orig_dtype = vae.dtype + object.__setattr__(self, "_vae_orig_dtype", orig_dtype) + + vae.to(device=dev, dtype=torch.float32) + + # xformers/torch2.0 ์ตœ์ ํ™” ๊ฐ์ง€ ํ›„ ์ผ๋ถ€ ๋ธ”๋ก๋งŒ ์›๋ž˜ dtype์œผ๋กœ (device๋Š” ๋™์ผ) + try: + from diffusers.models.attention_processor import AttnProcessor2_0, XFormersAttnProcessor + proc = vae.decoder.mid_block.attentions[0].processor + use_opt = isinstance(proc, (AttnProcessor2_0, XFormersAttnProcessor)) + except Exception: + use_opt = False + if use_opt: + vae.post_quant_conv.to(device=dev, dtype=orig_dtype) + vae.decoder.conv_in.to(device=dev, dtype=orig_dtype) + vae.decoder.mid_block.to(device=dev, dtype=orig_dtype) + + + + + + def _clip_preprocess_tensor(self, x01): + import torch.nn.functional as F + target = getattr(self, "clip_image_size", 224) + x = F.interpolate(x01, size=(target, target), mode="bilinear", align_corners=False) + + # ๊ฐ™์€ device/dtype๋กœ mean/std ์ƒ์„ฑ + mean = x.new_tensor(getattr(self, "clip_mean", (0.48145466, 0.4578275, 0.40821073))).view(1,3,1,1) + std = x.new_tensor(getattr(self, "clip_std", (0.26862954, 0.26130258, 0.27577711))).view(1,3,1,1) + return (x - mean) / std + + def prepare_latents( + self, + image, + timestep, + batch_size, + num_images_per_prompt, + dtype, + device, + generator=None, + add_noise=True, + # === NEW: garment subspace removal === + garment_images=None, # torch.Tensor [Ng,3,H,W] or PIL.Image or list[Tensor|PIL] + garment_rank=None, # (ํ˜„์žฌ ์ถ• 1๊ฐœ ์‚ฌ์šฉ; ํ–ฅํ›„ ํ™•์žฅ์šฉ) int or None + garment_alpha=None, # float or None (default=0.75) + garment_mask=None, # ํ‘/๋ฐฑ ๋งˆ์Šคํฌ (PIL.Image or torch.Tensor): ํฐ=์˜๋ฅ˜(1), ๊ฒ€=๋ฐฐ๊ฒฝ(0) + ): + """ + StableDiffusion(SDXL) prepare_latents with optional garment subspace removal (orthogonal projection). + """ + import torch + import torch.nn.functional as F + from diffusers.utils.torch_utils import randn_tensor + from torchvision.transforms.functional import pil_to_tensor + import PIL + + # ---------------------- small utils ---------------------- + eps = 1e-12 + + def _ensure_bchw(x: torch.Tensor) -> torch.Tensor: + if isinstance(x, torch.Tensor): + if x.ndim == 3: # [C,H,W] + return x.unsqueeze(0) + return x + raise ValueError("Tensor expected") + + def _gaussian_blur(mask: torch.Tensor, ksize=5, sigma=2.0): + pad = ksize // 2 + ax = torch.arange(-pad, pad + 1, device=mask.device, dtype=mask.dtype) + k1 = torch.exp(-(ax ** 2) / (2 * sigma ** 2)) + k1 = k1 / k1.sum() + k2d = (k1[:, None] @ k1[None, :]) + k2d = k2d / k2d.sum() + k = k2d.unsqueeze(0).unsqueeze(0) + chans = mask.shape[1] + if chans != 1: + k = k.repeat(chans, 1, 1, 1) + groups = chans + else: + groups = 1 + return F.conv2d(F.pad(mask, (pad, pad, pad, pad), mode="reflect"), k, groups=groups) + + def _highpass_latent(t: torch.Tensor, k=5, w=0.4): + pad = k // 2 + blur = F.avg_pool2d(F.pad(t.unsqueeze(0), (pad, pad, pad, pad), mode="reflect"), + kernel_size=k, stride=1).squeeze(0) + return t * (1 - w) + (t - blur) * w + + def _prep_mask_soft(mask_src, H, W, C, dev, dt, ksize=5, sigma=2.0, gamma=1.05): + if mask_src is None: + return None + if isinstance(mask_src, PIL.Image.Image): + m = pil_to_tensor(mask_src.convert("L")).float() / 255.0 + m = m.unsqueeze(0) # [1,1,h,w] + elif isinstance(mask_src, torch.Tensor): + m = mask_src + if m.ndim == 2: + m = m.unsqueeze(0).unsqueeze(0) + elif m.ndim == 3: + m = m.unsqueeze(0) + if m.max() > 1.0: + m = m / 255.0 + else: + raise ValueError("garment_mask must be PIL/Tensor") + m = F.interpolate(m, size=(H, W), mode="bilinear", align_corners=False) + m = _gaussian_blur(m, ksize=ksize, sigma=sigma) + m = m.clamp(0, 1).pow(gamma) + if m.shape[1] == 1: + m = m.repeat(1, C, 1, 1) + return m.to(device=dev, dtype=dt) + + def _prep_mask_hard_out(mask_src, H, W, C, dev, dt, thresh=0.5): + if mask_src is None: + return None + if isinstance(mask_src, PIL.Image.Image): + m0 = pil_to_tensor(mask_src.convert("L")).float() / 255.0 + m0 = m0.unsqueeze(0) # [1,1,h,w] + elif isinstance(mask_src, torch.Tensor): + m0 = mask_src + if m0.ndim == 2: + m0 = m0.unsqueeze(0).unsqueeze(0) + elif m0.ndim == 3: + m0 = m0.unsqueeze(0) + if m0.max() > 1.0: + m0 = m0 / 255.0 + else: + raise ValueError("garment_mask must be PIL/Tensor") + m0 = F.interpolate(m0, size=(H, W), mode="nearest") + m_in_hard = (m0 > thresh).float() + m_out_hard = 1.0 - m_in_hard + if C != 1: + m_out_hard = m_out_hard.repeat(1, C, 1, 1) + return m_out_hard.to(device=dev, dtype=dt) + + # ---------------------- allow PIL/list โ†’ Tensor ---------------------- + if isinstance(image, list): + imgs = [] + for im in image: + if isinstance(im, PIL.Image.Image): + t = pil_to_tensor(im).float() / 255.0 + imgs.append(_ensure_bchw(t)) + elif isinstance(im, torch.Tensor): + imgs.append(_ensure_bchw(im)) + else: + raise ValueError(f"Unsupported element in image list: {type(im)}") + image = torch.cat(imgs, dim=0) + elif isinstance(image, PIL.Image.Image): + image = pil_to_tensor(image).float() / 255.0 + image = _ensure_bchw(image) + + if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): + raise ValueError(f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}") + + # ---------------------- latents_mean/std (as in original) ---------------------- + latents_mean = latents_std = None + if hasattr(self.vae.config, "latents_mean") and self.vae.config.latents_mean is not None: + latents_mean = torch.tensor(self.vae.config.latents_mean).view(1, 4, 1, 1) + if hasattr(self.vae.config, "latents_std") and self.vae.config.latents_std is not None: + latents_std = torch.tensor(self.vae.config.latents_std).view(1, 4, 1, 1) + + # Offload text encoder if `enable_model_cpu_offload` was enabled + if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: + self.text_encoder_2.to("cpu") + torch.cuda.empty_cache() + + image = image.to(device=device, dtype=dtype) + + eff_bs = batch_size * num_images_per_prompt + + # ---------------------- encode image to latents (original logic) ---------------------- + if image.shape[1] == 4: + init_latents = image + else: + # VAE float32 for stability + if self.vae.config.force_upcast: + image = image.float() + self.vae.to(dtype=torch.float32) + + if isinstance(generator, list) and len(generator) != eff_bs: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {eff_bs}. Make sure the batch size matches the length of the generators." + ) + elif isinstance(generator, list): + if image.shape[0] < eff_bs and eff_bs % image.shape[0] == 0: + image = torch.cat([image] * (eff_bs // image.shape[0]), dim=0) + elif image.shape[0] < eff_bs and eff_bs % image.shape[0] != 0: + raise ValueError( + f"Cannot duplicate `image` of batch size {image.shape[0]} to effective batch_size {eff_bs} " + ) + # use original helper + init_latents = torch.cat( + [retrieve_latents(self.vae.encode(image[i:i+1]), generator=generator[i]) for i in range(eff_bs)], + dim=0, + ) + else: + init_latents = retrieve_latents(self.vae.encode(image), generator=generator) + + if self.vae.config.force_upcast: + self.vae.to(dtype) + + init_latents = init_latents.to(dtype) + + # original normalization branch + if latents_mean is not None and latents_std is not None: + latents_mean = latents_mean.to(device=device, dtype=dtype) + latents_std = latents_std.to(device=device, dtype=dtype) + init_latents = (init_latents - latents_mean) * self.vae.config.scaling_factor / latents_std + else: + init_latents = self.vae.config.scaling_factor * init_latents + + if eff_bs > init_latents.shape[0] and eff_bs % init_latents.shape[0] == 0: + additional_image_per_prompt = eff_bs // init_latents.shape[0] + init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0) + elif eff_bs > init_latents.shape[0] and eff_bs % init_latents.shape[0] != 0: + raise ValueError( + f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {eff_bs} text prompts." + ) + else: + init_latents = torch.cat([init_latents], dim=0) + + + if garment_images is not None: + def _encode_and_norm(img_like): + if isinstance(img_like, PIL.Image.Image): + g = pil_to_tensor(img_like.convert("RGB")).float() / 255.0 + g = _ensure_bchw(g) + elif isinstance(img_like, torch.Tensor): + g = img_like + if g.ndim == 3: + g = g.unsqueeze(0) + elif isinstance(img_like, list): + gs = [] + for it in img_like: + if isinstance(it, PIL.Image.Image): + t = pil_to_tensor(it.convert("RGB")).float() / 255.0 + gs.append(_ensure_bchw(t)) + elif isinstance(it, torch.Tensor): + gs.append(_ensure_bchw(it)) + else: + raise ValueError(f"Unsupported in garment_images list: {type(it)}") + g = torch.cat(gs, dim=0) + else: + raise ValueError(f"garment_images type error: {type(img_like)}") + + if g.shape[1] == 1: + g = g.repeat(1, 3, 1, 1) + + g = g.to(device=device, dtype=dtype) + if self.vae.config.force_upcast: + self.vae.to(dtype=torch.float32) + z = retrieve_latents(self.vae.encode(g), generator=generator).to(torch.float32) + self.vae.to(dtype) + else: + z = retrieve_latents(self.vae.encode(g), generator=generator) + + z = z.to(dtype) + if latents_mean is not None and latents_std is not None: + z = (z - latents_mean.to(device=device, dtype=dtype)) * self.vae.config.scaling_factor / latents_std.to(device=device, dtype=dtype) + else: + z = self.vae.config.scaling_factor * z + return z + + # garment latent and a white reference to form a direction + z_g = _encode_and_norm(garment_images) + + white_rgb = torch.ones_like(image[:, :3]) if image.shape[1] >= 3 else torch.ones(image.shape[0], 3, image.shape[2], image.shape[3], device=device, dtype=dtype) + z_white = _encode_and_norm(white_rgb) + + v = z_g - z_white + B, C, H, W = init_latents.shape + + # soft/hard masks at latent resolution + M_soft = _prep_mask_soft(garment_mask, H, W, C, init_latents.device, init_latents.dtype) + M_out_hard = _prep_mask_hard_out(garment_mask, H, W, C, init_latents.device, init_latents.dtype, thresh=0.5) + + alpha = float(garment_alpha) if garment_alpha is not None else 0.8 # removal strength + beta = 0.38 # inside blend with previous + + # refine garment direction a bit (reduce DC / emphasize edges) + v0 = _highpass_latent(v[0], k=5, w=0.4) + + Z_base = init_latents.clone() + Z = init_latents + z_work = Z.clone() + + # mean inside (for DC-safe centering) + if M_soft is not None: + mean_z0 = (Z * M_soft).sum([1,2,3], keepdim=True) / (M_soft.sum([1,2,3], keepdim=True).clamp_min(eps)) + u_src = v0 * M_soft[0] + w = M_soft[0].sum().clamp_min(eps) + u_src = u_src - (u_src * M_soft[0]).sum() / w * M_soft[0] + else: + mean_z0 = Z.mean([2,3], keepdim=True) + u_src = v0 + + # unit axis + u = u_src.reshape(-1) + u_norm = u.norm() + if u_norm < 1e-6: + # axis degenerate โ†’ skip (still add noise later) + if add_noise: + noise = randn_tensor(init_latents.shape, generator=generator, device=device, dtype=dtype) + init_latents = self.scheduler.add_noise(init_latents, noise, timestep) + return init_latents + + u = u / (u_norm + eps) + + # diagnostics baseline + zc_init = (Z - mean_z0) * (M_soft if M_soft is not None else 1) + coeff_before = (zc_init.reshape(B, -1) @ u) + + # iterative masked projection + iter_n = int(getattr(self, "garment_iter", 1)) + for it in range(max(1, iter_n)): + mean_z = (z_work * M_soft).sum([1,2,3], keepdim=True) / (M_soft.sum([1,2,3], keepdim=True).clamp_min(eps)) if M_soft is not None \ + else z_work.mean([2,3], keepdim=True) + zc = (z_work - mean_z) * (M_soft if M_soft is not None else 1) + zc_flat = zc.reshape(B, -1) + + # slight decay + alpha_i = alpha * (0.90 ** it) + + # boundary boost + if M_soft is not None: + R = (M_soft[0] * (1.0 - M_soft[0])) + if R.max() > 0: + R = R / (R.max() + eps) + alpha_map = alpha_i * (0.5 + 0.5 * M_soft[0] + 0.5 * R) + alpha_map = alpha_map.clamp(max=alpha * 1.2) + else: + alpha_map = torch.full_like(z_work[0], alpha_i) + + coeff = (zc_flat @ u) + proj = coeff.unsqueeze(1) * u.unsqueeze(0) + zproj = (zc_flat - alpha_map.reshape(1, -1) * proj).reshape(B, C, H, W) + + if M_soft is None: + z_new = (1 - beta) * zproj + beta * z_work + else: + z_new = z_work * (1 - M_soft) + ((1 - beta) * zproj + beta * z_work) * M_soft + + # inside-only mean/std restore + if M_soft is not None: + wsum = M_soft.sum([1,2,3], keepdim=True).clamp_min(eps) + + def _mstats(X): + mean_in = (X * M_soft).sum([1,2,3], keepdim=True) / wsum + Xm = (X - mean_in) * M_soft + std_in = (Xm.pow(2).sum([1,2,3], keepdim=True) / wsum).sqrt() + return mean_in, std_in + + mean_in0, std_in0 = _mstats(z_work) + mean_in1, std_in1 = _mstats(z_new) + + z_new = z_new + (mean_in0 - mean_in1) * M_soft + gain = ((std_in0 + eps) / (std_in1 + eps)).clamp(0.97, 1.03) + z_new = z_new * (1 - M_soft) + ((z_new - mean_in0) * gain + mean_in0) * M_soft + + z_work = z_new + + # outside mean lock + if M_soft is not None: + wsum_out_soft = (1 - M_soft).sum([1,2,3], keepdim=True).clamp_min(eps) + mean_out_base = (Z_base * (1 - M_soft)).sum([1,2,3], keepdim=True) / wsum_out_soft + mean_out_fin = (z_work * (1 - M_soft)).sum([1,2,3], keepdim=True) / wsum_out_soft + delta_out = (mean_out_base - mean_out_fin) + + M_out_hard2 = _prep_mask_hard_out(garment_mask, H, W, C, init_latents.device, init_latents.dtype, thresh=0.5) + if M_out_hard2 is not None: + z_work = z_work + delta_out * M_out_hard2 + else: + z_work = z_work + delta_out * (1 - M_soft) + + # outside std very tight + W_out = _prep_mask_hard_out(garment_mask, H, W, C, init_latents.device, init_latents.dtype, thresh=0.55) + if W_out is not None: + wsum = W_out.sum([1,2,3], keepdim=True).clamp_min(eps) + mu0 = (Z_base * W_out).sum([1,2,3], keepdim=True) / wsum + mu1 = (z_work * W_out).sum([1,2,3], keepdim=True) / wsum + + def _std(X, mu): + Xm = (X - mu) * W_out + return (Xm.pow(2).sum([1,2,3], keepdim=True) / wsum).sqrt() + + s0 = _std(Z_base, mu0) + s1 = _std(z_work, mu1) + g_out = ((s0 + eps) / (s1 + eps)).clamp(0.995, 1.005) + z_work = z_work * (1 - W_out) + ((z_work - mu1) * g_out + mu1) * W_out + + init_latents = z_work + + + if True: + with torch.no_grad(): + ZAc = (init_latents - mean_z0) * (M_soft if M_soft is not None else 1) + coeff_after = (ZAc.reshape(B,-1) @ u) + coeff_ratio = (coeff_after.abs().mean() / (coeff_before.abs().mean() + eps)).item() + energy_ratio = (coeff_after.pow(2).mean() / (coeff_before.pow(2).mean() + eps)).item() + print(f" !!!![Garment|Projection] coeff โ†“ ratio = {coeff_ratio:.4f}, energy โ†“ ratio = {energy_ratio:.4f}") + + # ---------------------- add noise (original) ---------------------- + if add_noise: + shape = init_latents.shape + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + # debug + # print("timestep:", timestep) + init_latents = self.scheduler.add_noise(init_latents, noise, timestep) + + return init_latents + + + + + + def prepare_latents_origin(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + + shape = ( + batch_size, + num_channels_latents, + int(height) // self.vae_scale_factor, + int(width) // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + return latents + + + + + + + # def prepare_latents_( + # self, + # image, + # timestep, + # batch_size, + # num_images_per_prompt, + # dtype, + # device, + # generator=None, + # ): + # if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): + # raise ValueError( + # f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" + # ) + + # image = image.to(device=device, dtype=dtype) + + # batch_size = batch_size * num_images_per_prompt + + # if image.shape[1] == 4: + # init_latents = image + + # else: + # if isinstance(generator, list) and len(generator) != batch_size: + + + # raise ValueError( + # f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + # f" size of {batch_size}. Make sure the batch size matches the length of the generators." + # ) + + # elif isinstance(generator, list): + + # init_latents = [ + # self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) + # for i in range(batch_size) + # ] + # print("***") + # print("vae_1") + # init_latents = torch.cat(init_latents, dim=0) + # else: + # print("***") + # print("vae_2") + # init_latents = self.vae.encode(image).latent_dist.sample(generator) + + # init_latents = self.vae.config.scaling_factor * init_latents + + # if ( + # batch_size > init_latents.shape[0] + # and batch_size % init_latents.shape[0] == 0 + # ): + # # expand init_latents for batch_size + # deprecation_message = ( + # f"You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial" + # " images (`image`). Initial images are now duplicating to match the number of text prompts. Note" + # " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update" + # " your script to pass as many initial images as text prompts to suppress this warning." + # ) + # deprecate( + # "len(prompt) != len(image)", + # "1.0.0", + # deprecation_message, + # standard_warn=False, + # ) + # additional_image_per_prompt = batch_size // init_latents.shape[0] + # init_latents = torch.cat( + # [init_latents] * additional_image_per_prompt, dim=0 + # ) + # elif ( + # batch_size > init_latents.shape[0] + # and batch_size % init_latents.shape[0] != 0 + # ): + # raise ValueError( + # f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts." + # ) + # else: + # init_latents = torch.cat([init_latents], dim=0) + + # shape = init_latents.shape + # noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + + # # get latents + + # return init_latents, noise + +# ์ตœ์ข… + def prepare_latents_( + self, + image, + timestep, + batch_size, + num_images_per_prompt, + dtype, + device, + generator=None, + garment_images=None, + garment_rank=None, + garment_alpha=None, + garment_mask=None, + ): + import torch + import torch.nn.functional as F + from diffusers.utils.torch_utils import randn_tensor + from torchvision.transforms.functional import pil_to_tensor + import PIL + + eps = 1e-12 + + # ---------------------- util ---------------------- + def _ensure_bchw(x): + if isinstance(x, torch.Tensor): + if x.ndim == 3: # [C,H,W] + return x.unsqueeze(0) + return x + raise ValueError("Tensor expected") + + # Gaussian blur for feather (soft mask) + def _gaussian_blur(mask, ksize=5, sigma=2.0): + pad = ksize // 2 + ax = torch.arange(-pad, pad + 1, device=mask.device, dtype=mask.dtype) + k1 = torch.exp(-(ax ** 2) / (2 * sigma ** 2)) + k1 = k1 / k1.sum() + k2d = (k1[:, None] @ k1[None, :]) + k2d = k2d / k2d.sum() + k = k2d.unsqueeze(0).unsqueeze(0) + chans = mask.shape[1] + if chans != 1: + k = k.repeat(chans, 1, 1, 1) + groups = chans + else: + groups = 1 + return F.conv2d(F.pad(mask, (pad, pad, pad, pad), mode="reflect"), k, groups=groups) + + # high-pass for garment vector + def _highpass_latent(t, k=5, w=0.4): + pad = k // 2 + blur = F.avg_pool2d(F.pad(t.unsqueeze(0), (pad, pad, pad, pad), mode="reflect"), + kernel_size=k, stride=1).squeeze(0) + return t * (1 - w) + (t - blur) * w + + # prepare soft mask (feathered, used for blending/projection weights) + def _prep_mask_soft(mask_src, H, W, C, dev, dt, ksize=5, sigma=2.0, gamma=1.05): + if mask_src is None: + return None + if isinstance(mask_src, PIL.Image.Image): + m = pil_to_tensor(mask_src.convert("L")).float() / 255.0 + m = m.unsqueeze(0) + elif isinstance(mask_src, torch.Tensor): + m = mask_src + if m.ndim == 2: + m = m.unsqueeze(0).unsqueeze(0) + elif m.ndim == 3: + m = m.unsqueeze(0) + if m.max() > 1.0: + m = m / 255.0 + else: + raise ValueError("garment_mask must be PIL/Tensor") + m = F.interpolate(m, size=(H, W), mode="bilinear", align_corners=False) + m = _gaussian_blur(m, ksize=ksize, sigma=sigma) + m = m.clamp(0, 1).pow(gamma) + if m.shape[1] == 1: + m = m.repeat(1, C, 1, 1) + return m.to(device=dev, dtype=dt) + + # prepare hard OUTSIDE mask (strict outside = 1, inside = 0) from the ORIGINAL mask (no feather) + def _prep_mask_hard_out(mask_src, H, W, C, dev, dt, thresh=0.5): + if mask_src is None: + return None + if isinstance(mask_src, PIL.Image.Image): + m0 = pil_to_tensor(mask_src.convert("L")).float() / 255.0 # [1,h,w] + m0 = m0.unsqueeze(0) # [1,1,h,w] + elif isinstance(mask_src, torch.Tensor): + m0 = mask_src + if m0.ndim == 2: + m0 = m0.unsqueeze(0).unsqueeze(0) + elif m0.ndim == 3: + m0 = m0.unsqueeze(0) + if m0.max() > 1.0: + m0 = m0 / 255.0 + else: + raise ValueError("garment_mask must be PIL/Tensor") + m0 = F.interpolate(m0, size=(H, W), mode="nearest") + m_in_hard = (m0 > thresh).float() # inside=1 / outside=0 + m_out_hard = 1.0 - m_in_hard # outside=1 / inside=0 + if C != 1: + m_out_hard = m_out_hard.repeat(1, C, 1, 1) + return m_out_hard.to(device=dev, dtype=dt) + + # ---------------------- image preprocessing ---------------------- + if isinstance(image, list): + imgs = [] + for im in image: + if isinstance(im, PIL.Image.Image): + t = pil_to_tensor(im).float() / 255.0 + imgs.append(_ensure_bchw(t)) + else: + imgs.append(_ensure_bchw(im)) + image = torch.cat(imgs, dim=0) + elif isinstance(image, PIL.Image.Image): + image = pil_to_tensor(image).float() / 255.0 + image = _ensure_bchw(image) + image = image.to(device=device, dtype=dtype) + + eff_bs = batch_size * num_images_per_prompt + + # ---------------------- VAE encode ---------------------- + if image.shape[1] == 4: + init_latents = image + else: + if isinstance(generator, list) and len(generator) != eff_bs: + raise ValueError("generator length mismatch") + elif isinstance(generator, list): + init_latents = torch.cat( + [self.vae.encode(image[i:i+1]).latent_dist.sample(generator[i]) for i in range(eff_bs)], dim=0) + else: + init_latents = self.vae.encode(image).latent_dist.sample(generator) + init_latents = self.vae.config.scaling_factor * init_latents + + # ---------------------- batch align ---------------------- + if (eff_bs > init_latents.shape[0]) and (eff_bs % init_latents.shape[0] == 0): + additional = eff_bs // init_latents.shape[0] + init_latents = torch.cat([init_latents] * additional, dim=0) + else: + init_latents = torch.cat([init_latents], dim=0) + + # ====================================================== + # Garment subspace removal + # ====================================================== + if garment_images is not None: + # garment_images -> [Ng,3,H,W] + if isinstance(garment_images, PIL.Image.Image): + g_img = pil_to_tensor(garment_images).float() / 255.0 + if g_img.shape[0] == 1: + g_img = g_img.repeat(3,1,1) + g_img = g_img.unsqueeze(0) + elif isinstance(garment_images, torch.Tensor): + g_img = garment_images + if g_img.ndim == 3: + if g_img.shape[0] == 1: + g_img = g_img.repeat(3,1,1) + g_img = g_img.unsqueeze(0) + elif g_img.ndim == 4 and g_img.shape[1] == 1: + g_img = g_img.repeat(1,3,1,1) + else: + raise ValueError("garment_images type error") + g_img = g_img.to(device=device, dtype=dtype) + + with torch.no_grad(): + z_g = self.vae.encode(g_img).latent_dist.sample(generator) + z_g = self.vae.config.scaling_factor * z_g + + B, C, H, W = init_latents.shape + # soft/hard-out masks + M_soft = _prep_mask_soft(garment_mask, H, W, C, init_latents.device, init_latents.dtype) + M_out_hard = _prep_mask_hard_out(garment_mask, H, W, C, init_latents.device, init_latents.dtype, thresh=0.5) + + alpha = float(garment_alpha) if garment_alpha is not None else 0.8 + beta = 0 + + # --- Garment vector refinement --- + white = torch.ones_like(g_img) + z_white = self.vae.encode(white).latent_dist.sample(generator) + z_white = self.vae.config.scaling_factor * z_white + v = z_g - z_white + v0 = _highpass_latent(v[0], k=5, w=0.4) + + # snapshot for outside mean lock + Z_base = init_latents.clone() + + Z = init_latents + z_work = Z.clone() + + # --- DC-safe u (remove mean component in mask) + if M_soft is not None: + mean_z0 = (Z * M_soft).sum([1,2,3], keepdim=True) / (M_soft.sum([1,2,3], keepdim=True).clamp_min(eps)) + u_src = v0 * M_soft[0] + w = M_soft[0].sum().clamp_min(eps) + u_src = u_src - (u_src * M_soft[0]).sum() / w * M_soft[0] + else: + mean_z0 = Z.mean([2,3], keepdim=True) + u_src = v0 + u = u_src.reshape(-1) + u_norm = u.norm() + if u_norm < 1e-6: + print("[Guard] projection axis ~0; skip") + noise = randn_tensor(init_latents.shape, generator=generator, device=device, dtype=dtype) + return init_latents, noise + u = u / (u_norm + eps) + + # baseline for verification + zc_init = (Z - mean_z0) * (M_soft if M_soft is not None else 1) + coeff_before = (zc_init.reshape(B, -1) @ u) + + # --- iterative projection (outside๋Š” ์†๋Œ€์ง€ ์•Š๋˜, feather ์˜ํ–ฅ ์ตœ์†Œํ™”) --- + iter_n = 1 + for it in range(max(1, iter_n)): + mean_z = (z_work * M_soft).sum([1,2,3], keepdim=True) / (M_soft.sum([1,2,3], keepdim=True).clamp_min(eps)) if M_soft is not None \ + else z_work.mean([2,3], keepdim=True) + zc = (z_work - mean_z) * (M_soft if M_soft is not None else 1) + zc_flat = zc.reshape(B, -1) + + # per-iter alpha with mild decay + alpha_i = alpha * (0.90 ** it) + # alpha_map = ((0.5 + 0.5 * M_soft[0]) * alpha_i) if M_soft is not None else torch.full_like(z_work[0], alpha_i) + R = (M_soft[0] * (1.0 - M_soft[0])) + if R.max() > 0: + R = R / (R.max() + eps) + alpha_map = alpha * (0.5 + 0.5 * M_soft[0] + 0.5 * R) # ๊ฒฝ๊ณ„์—์„œ +0.5*alpha ์ถ”๊ฐ€ + alpha_map = alpha_map.clamp(max=alpha * 1.2) # ๊ณผ๋„ ์ƒ์Šน ๋ฐฉ์ง€ + + coeff = (zc_flat @ u) + proj = coeff.unsqueeze(1) * u.unsqueeze(0) + zproj = (zc_flat - alpha_map.reshape(1, -1) * proj).reshape(B, C, H, W) + + # blend only inside (outside path keeps original) + if M_soft is None: + z_new = (1 - beta) * zproj + beta * z_work + else: + z_new = z_work * (1 - M_soft) + ((1 - beta) * zproj + beta * z_work) * M_soft + + # inside-only mean/std restore + if M_soft is not None: + wsum = M_soft.sum([1,2,3], keepdim=True).clamp_min(eps) + def mstats(X): + mean_in = (X * M_soft).sum([1,2,3], keepdim=True) / wsum + Xm = (X - mean_in) * M_soft + std_in = (Xm.pow(2).sum([1,2,3], keepdim=True) / wsum).sqrt() + return mean_in, std_in + mean_in0, std_in0 = mstats(z_work) + mean_in1, std_in1 = mstats(z_new) + + # mean restore (inside drift = 0 relative to previous iter) + z_new = z_new + (mean_in0 - mean_in1) * M_soft + + # std restore (tight clamp) + gain = ((std_in0 + eps) / (std_in1 + eps)).clamp(0.97, 1.03) + z_new = z_new * (1 - M_soft) + ((z_new - mean_in0) * gain + mean_in0) * M_soft + + z_work = z_new + + # ---------------------- Outside mean lock (keep outside brightness) ---------------------- + if M_soft is not None: + wsum_out_soft = (1 - M_soft).sum([1,2,3], keepdim=True).clamp_min(eps) + mean_out_base = (Z_base * (1 - M_soft)).sum([1,2,3], keepdim=True) / wsum_out_soft + mean_out_fin = (z_work * (1 - M_soft)).sum([1,2,3], keepdim=True) / wsum_out_soft + delta_out = (mean_out_base - mean_out_fin) # 100% correction + + # apply purely on strict outside to avoid boundary tint + M_out_hard = _prep_mask_hard_out(garment_mask, H, W, C, init_latents.device, init_latents.dtype, thresh=0.5) + if M_out_hard is not None: + z_work = z_work + delta_out * M_out_hard + else: + z_work = z_work + delta_out * (1 - M_soft) + + + # ---- (์„ ํƒ) Outside std lock: ์•„์ฃผ ํƒ€์ดํŠธํ•˜๊ฒŒ ---- + W = _prep_mask_hard_out(garment_mask, H, W, C, init_latents.device, init_latents.dtype, thresh=0.55) + if W is not None: + wsum = W.sum([1,2,3], keepdim=True).clamp_min(eps) + mu0 = (Z_base * W).sum([1,2,3], keepdim=True) / wsum + mu1 = (z_work * W).sum([1,2,3], keepdim=True) / wsum + + def _std(X, mu): + Xm = (X - mu) * W + return (Xm.pow(2).sum([1,2,3], keepdim=True) / wsum).sqrt() + + s0 = _std(Z_base, mu0) # ๊ธฐ์ค€ std + s1 = _std(z_work, mu1) # ํ˜„์žฌ std + g_out = ((s0 + eps) / (s1 + eps)).clamp(0.995, 1.005) # ์•„์ฃผ ๋ฏธ์„ธํ•˜๊ฒŒ๋งŒ ๋ณด์ • + + z_work = z_work * (1 - W) + ((z_work - mu1) * g_out + mu1) * W + + + init_latents = z_work + + + # ====================== EXTRA DIAGNOSTICS: is it projection or blur? ====================== + with torch.no_grad(): + # (A) ฮ”z-alignment in latent (inside mask) + W_in = M_soft if M_soft is not None else torch.ones_like(init_latents) + dZ = ((init_latents - Z) * W_in).reshape(B, -1) + num = (dZ @ u).pow(2).mean() + den = (dZ.pow(2).sum(dim=1).mean() + eps) + align_idx = (num / den).item() + + + def _rand_perp_energy(trials=4): + vals = [] + for _ in range(trials): + r = torch.randn_like(u) + r = r - (r @ u) * u + r = r / (r.norm() + eps) + vals.append((dZ @ r).pow(2).mean()) + return torch.stack(vals).mean() + perp_energy = _rand_perp_energy(trials=6).item() + u_energy = num.item() + + perp_u_ratio = (perp_energy + eps) / (u_energy + eps) + + # (C) Pixel-domain high-frequency check via Laplacian + sf = float(self.vae.config.scaling_factor) + x0 = self.vae.decode(Z / sf).sample # [B,3,Hx,Wx] + x1 = self.vae.decode(init_latents / sf).sample + + def _lap_energy(x, M_like): + + k = torch.tensor([[0.,1.,0.], + [1.,-4.,1.], + [0.,1.,0.]], device=x.device, dtype=x.dtype).view(1,1,3,3) + Cx = x.shape[1] + K = k.repeat(Cx,1,1,1) + y = F.conv2d(x, K, padding=1, groups=Cx).abs() # |โˆ‡ยฒx| + + if M_like is None: + return y.mean() + + M_pix = M_like + if M_pix.shape[2:] != x.shape[2:]: + M_pix = F.interpolate(M_pix, size=x.shape[2:], mode="nearest") + if M_pix.shape[1] != x.shape[1]: + M_pix = M_pix[:, :1].repeat(1, x.shape[1], 1, 1) + + w = M_pix.sum([1,2,3], keepdim=True).clamp_min(eps) + return ((y * M_pix).sum([1,2,3], keepdim=True) / w).mean() + + hf_in0 = _lap_energy(x0, M_soft).item() + hf_in1 = _lap_energy(x1, M_soft).item() + hf_out0 = _lap_energy(x0, 1 - M_soft if M_soft is not None else None).item() + hf_out1 = _lap_energy(x1, 1 - M_soft if M_soft is not None else None).item() + + hf_in_ratio = (hf_in1 / (hf_in0 + eps)) + hf_out_ratio = (hf_out1 / (hf_out0 + eps)) + + print(f"[Diag|Align] A=ฮ”zยทu energy fraction (inside) = {align_idx:.3f} (โ†‘๋ฉด ํˆฌ์˜)") + print(f"[Diag|Perp] perp/u energy ratio (inside) = {perp_u_ratio:.3f} (โ†“๋ฉด ํˆฌ์˜)") + print(f"[Diag|HF] inside HF ratio={hf_in_ratio:.3f}, outside HF ratio={hf_out_ratio:.3f} (๋ธ”๋Ÿฌ๋ฉด HFโ‰ช1)") + + + + # ---------------------- verification ---------------------- + with torch.no_grad(): + ZAc = (init_latents - mean_z0) * (M_soft if M_soft is not None else 1) + coeff_after = (ZAc.reshape(B,-1) @ u) + coeff_ratio = (coeff_after.abs().mean() / (coeff_before.abs().mean() + eps)).item() + energy_ratio = (coeff_after.pow(2).mean() / (coeff_before.pow(2).mean() + eps)).item() + + if M_soft is not None: + def _mstats(z): + w_in = M_soft.sum([1,2,3], keepdim=True).clamp_min(eps) + w_out = (1 - M_soft).sum([1,2,3], keepdim=True).clamp_min(eps) + mean_in = (z*M_soft).sum([1,2,3], keepdim=True) / w_in + mean_out = (z*(1-M_soft)).sum([1,2,3], keepdim=True) / w_out + std_in = (((z-mean_in)*M_soft)**2).sum([1,2,3], keepdim=True).div(w_in).sqrt() + std_out = (((z-mean_out)*(1-M_soft))**2).sum([1,2,3], keepdim=True).div(w_out).sqrt() + return mean_in.mean(), std_in.mean(), mean_out.mean(), std_out.mean() + + mean_in0, std_in0, mean_out0, std_out0 = _mstats(Z) + mean_in1, std_in1, mean_out1, std_out1 = _mstats(init_latents) + + print(f"[Garment|Projection] coeff โ†“ ratio = {coeff_ratio:.4f}, energy โ†“ ratio = {energy_ratio:.4f}") + print(f"[Garment|Leak] inside ฮ”mean={float(mean_in1-mean_in0):+.5f}, ฮ”std={float(std_in1-std_in0):+.5f} " + f"| outside ฮ”mean={float(mean_out1-mean_out0):+.5f}, ฮ”std={float(std_out1-std_out0):+.5f}") + else: + print(f"[Garment|Projection] coeff โ†“ ratio = {coeff_ratio:.4f}, energy โ†“ ratio = {energy_ratio:.4f}") + + # ---------------------- noise ---------------------- + noise = randn_tensor(init_latents.shape, generator=generator, device=device, dtype=dtype) + return init_latents, noise + + + + + + + + + + + + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_img2img.StableDiffusionXLImg2ImgPipeline._get_add_time_ids + def _get_add_time_ids( + self, + original_size, + crops_coords_top_left, + target_size, + aesthetic_score, + negative_aesthetic_score, + negative_original_size, + negative_crops_coords_top_left, + negative_target_size, + dtype, + text_encoder_projection_dim=None, + ): + if self.config.requires_aesthetics_score: + add_time_ids = list(original_size + crops_coords_top_left + (aesthetic_score,)) + add_neg_time_ids = list( + negative_original_size + negative_crops_coords_top_left + (negative_aesthetic_score,) + ) + else: + add_time_ids = list(original_size + crops_coords_top_left + target_size) + add_neg_time_ids = list(negative_original_size + crops_coords_top_left + negative_target_size) + + passed_add_embed_dim = ( + self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim + ) + expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features + + if ( + expected_add_embed_dim > passed_add_embed_dim + and (expected_add_embed_dim - passed_add_embed_dim) == self.unet.config.addition_time_embed_dim + ): + raise ValueError( + f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to enable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=True)` to make sure `aesthetic_score` {aesthetic_score} and `negative_aesthetic_score` {negative_aesthetic_score} is correctly used by the model." + ) + elif ( + expected_add_embed_dim < passed_add_embed_dim + and (passed_add_embed_dim - expected_add_embed_dim) == self.unet.config.addition_time_embed_dim + ): + raise ValueError( + f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to disable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=False)` to make sure `target_size` {target_size} is correctly used by the model." + ) + elif expected_add_embed_dim != passed_add_embed_dim: + raise ValueError( + f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`." + ) + + add_time_ids = torch.tensor([add_time_ids], dtype=dtype) + add_neg_time_ids = torch.tensor([add_neg_time_ids], dtype=dtype) + + return add_time_ids, add_neg_time_ids + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae + def upcast_vae(self): + dtype = self.vae.dtype + self.vae.to(dtype=torch.float32) + use_torch_2_0_or_xformers = isinstance( + self.vae.decoder.mid_block.attentions[0].processor, + ( + AttnProcessor2_0, + XFormersAttnProcessor, + ), + ) + # if xformers or torch_2_0 is used attention block does not need + # to be in float32 which can save lots of memory + if use_torch_2_0_or_xformers: + self.vae.post_quant_conv.to(dtype) + self.vae.decoder.conv_in.to(dtype) + self.vae.decoder.mid_block.to(dtype) + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def clip_skip(self): + return self._clip_skip + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 + + @property + def cross_attention_kwargs(self): + return self._cross_attention_kwargs + + @property + def num_timesteps(self): + return self._num_timesteps + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + prompt_2: Optional[Union[str, List[str]]] = None, + image: PipelineImageInput = None, + mask_image: Union[torch.FloatTensor, PIL.Image.Image] = None, + sketch_image: Union[torch.FloatTensor, PIL.Image.Image] = None, + garment_images: Union[torch.FloatTensor, List[torch.FloatTensor], PIL.Image.Image, List[PIL.Image.Image]] = None, + garment_mask: PIL.Image.Image = None, + control_image: PipelineImageInput = None, + height: Optional[int] = None, + width: Optional[int] = None, + strength: float = 0.8, + num_inference_steps: int = 50, + guidance_scale: float = 5.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + negative_prompt_2: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + pooled_prompt_embeds: Optional[torch.Tensor] = None, + negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + controlnet_conditioning_scale: Union[float, List[float]] = 0.8, + guess_mode: bool = False, + control_guidance_start: Union[float, List[float]] = 0.0, + control_guidance_end: Union[float, List[float]] = 1.0, + original_size: Tuple[int, int] = None, + crops_coords_top_left: Tuple[int, int] = (0, 0), + target_size: Tuple[int, int] = None, + negative_original_size: Optional[Tuple[int, int]] = None, + negative_crops_coords_top_left: Tuple[int, int] = (0, 0), + negative_target_size: Optional[Tuple[int, int]] = None, + aesthetic_score: float = 6.0, + negative_aesthetic_score: float = 2.5, + clip_skip: Optional[int] = None, + callback_on_step_end: Optional[ + Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks] + ] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + **kwargs, + ): + r""" + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in both text-encoders + image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,: + `List[List[torch.Tensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`): + The initial image will be used as the starting point for the image generation process. Can also accept + image latents as `image`, if passing latents directly, it will not be encoded again. + control_image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,: + `List[List[torch.Tensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`): + The ControlNet input condition. ControlNet uses this input condition to generate guidance to Unet. If + the type is specified as `torch.Tensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also + be accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If height + and/or width are passed, `image` is resized according to them. If multiple ControlNets are specified in + init, images must be passed as a list such that each element of the list can be correctly batched for + input to a single controlnet. + height (`int`, *optional*, defaults to the size of control_image): + The height in pixels of the generated image. Anything below 512 pixels won't work well for + [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) + and checkpoints that are not specifically fine-tuned on low resolutions. + width (`int`, *optional*, defaults to the size of control_image): + The width in pixels of the generated image. Anything below 512 pixels won't work well for + [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) + and checkpoints that are not specifically fine-tuned on low resolutions. + strength (`float`, *optional*, defaults to 0.8): + Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a + starting point and more noise is added the higher the `strength`. The number of denoising steps depends + on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising + process runs for the full number of iterations specified in `num_inference_steps`. A value of 1 + essentially ignores `image`. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to + [`schedulers.DDIMScheduler`], will be ignored for others. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. + ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*): + Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of + IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should + contain the negative image embedding if `do_classifier_free_guidance` is set to `True`. If not + provided, embeddings are computed from the `ip_adapter_image` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0): + The outputs of the controlnet are multiplied by `controlnet_conditioning_scale` before they are added + to the residual in the original unet. If multiple ControlNets are specified in init, you can set the + corresponding scale as a list. + guess_mode (`bool`, *optional*, defaults to `False`): + In this mode, the ControlNet encoder will try best to recognize the content of the input image even if + you remove all prompts. The `guidance_scale` between 3.0 and 5.0 is recommended. + control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0): + The percentage of total steps at which the controlnet starts applying. + control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0): + The percentage of total steps at which the controlnet stops applying. + original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled. + `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as + explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position + `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting + `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + For most cases, `target_size` should be set to the desired height and width of the generated image. If + not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in + section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + To negatively condition the generation process based on a specific image resolution. Part of SDXL's + micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's + micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + To negatively condition the generation process based on a target image resolution. It should be as same + as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + aesthetic_score (`float`, *optional*, defaults to 6.0): + Used to simulate an aesthetic score of the generated image by influencing the positive text condition. + Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + negative_aesthetic_score (`float`, *optional*, defaults to 2.5): + Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). Can be used to + simulate an aesthetic score of the generated image by influencing the negative text condition. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*): + A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of + each denoising step during the inference. with the following arguments: `callback_on_step_end(self: + DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a + list of all tensors as specified by `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + + Examples: + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple` + containing the output images. + """ + + callback = kwargs.pop("callback", None) + callback_steps = kwargs.pop("callback_steps", None) + + print("-------") + print("&&& image: ", type(image)) + + + if callback is not None: + deprecate( + "callback", + "1.0.0", + "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", + ) + if callback_steps is not None: + deprecate( + "callback_steps", + "1.0.0", + "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", + ) + + if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): + callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs + + controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet + + # align format for control guidance + if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list): + control_guidance_start = len(control_guidance_end) * [control_guidance_start] + elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list): + control_guidance_end = len(control_guidance_start) * [control_guidance_end] + elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list): + mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1 + control_guidance_start, control_guidance_end = ( + mult * [control_guidance_start], + mult * [control_guidance_end], + ) + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + prompt_2, + control_image, + strength, + num_inference_steps, + callback_steps, + negative_prompt, + negative_prompt_2, + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + ip_adapter_image, + ip_adapter_image_embeds, + controlnet_conditioning_scale, + control_guidance_start, + control_guidance_end, + callback_on_step_end_tensor_inputs, + ) + + self._guidance_scale = guidance_scale + self._clip_skip = clip_skip + self._cross_attention_kwargs = cross_attention_kwargs + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float): + controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets) + + global_pool_conditions = ( + controlnet.config.global_pool_conditions + if isinstance(controlnet, ControlNetModel) + else controlnet.nets[0].config.global_pool_conditions + ) + guess_mode = guess_mode or global_pool_conditions + + + + # 3.1. Encode input prompt + text_encoder_lora_scale = ( + self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None + ) + ( + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + ) = self.encode_prompt( + prompt, + prompt_2, + device, + num_images_per_prompt, + self.do_classifier_free_guidance, + negative_prompt, + negative_prompt_2, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, + lora_scale=text_encoder_lora_scale, + clip_skip=self.clip_skip, + ) + + # 3.2 Encode ip_adapter_image + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + image_embeds = self.prepare_ip_adapter_image_embeds( + ip_adapter_image, + ip_adapter_image_embeds, + device, + batch_size * num_images_per_prompt, + self.do_classifier_free_guidance, + ) + + # 4. Prepare image and controlnet_conditioning_image + image = self.image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32) + + if isinstance(controlnet, ControlNetModel): + control_image = self.prepare_control_image( + image=control_image, + width=width, + height=height, + batch_size=batch_size * num_images_per_prompt, + num_images_per_prompt=num_images_per_prompt, + device=device, + dtype=controlnet.dtype, + do_classifier_free_guidance=self.do_classifier_free_guidance, + guess_mode=guess_mode, + ) + height, width = control_image.shape[-2:] + elif isinstance(controlnet, MultiControlNetModel): + control_images = [] + + for control_image_ in control_image: + control_image_ = self.prepare_control_image( + image=control_image_, + width=width, + height=height, + batch_size=batch_size * num_images_per_prompt, + num_images_per_prompt=num_images_per_prompt, + device=device, + dtype=controlnet.dtype, + do_classifier_free_guidance=self.do_classifier_free_guidance, + guess_mode=guess_mode, + ) + + control_images.append(control_image_) + + control_image = control_images + height, width = control_image[0].shape[-2:] + else: + assert False + + # 5. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device) + latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) + self._num_timesteps = len(timesteps) + + + + # 6. Prepare latent variables + + mask = prepare_mask(mask=mask_image) + + sketch_image = self.image_processor.preprocess(sketch_image) + + if latents is None: + latents_with_noise = self.prepare_latents( + sketch_image, + latent_timestep, + batch_size, + num_images_per_prompt, + prompt_embeds.dtype, + device, + generator, + True, + garment_images = garment_images, + garment_mask = garment_mask, + ) + + + + init_latents, noise= self.prepare_latents_( + image = sketch_image, + timestep = latent_timestep, + batch_size = batch_size, + num_images_per_prompt = num_images_per_prompt, + dtype = prompt_embeds.dtype, + device = device, + generator = generator, + garment_images = garment_images, + garment_mask = garment_mask, + + ) + + # 6.1. Prepare mask + + height, width = mask.shape[-2:] + mask = torch.nn.functional.interpolate( + mask, size=( + 128, + 128 + ) + ) + mask = torch.nn.functional.interpolate( + mask, size=(height // self.vae_scale_factor, width // self.vae_scale_factor) + ) + + mask = mask.to(device) + mask = mask.to(prompt_embeds.dtype) + + + + # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7.1 Create tensor stating which controlnets to keep + controlnet_keep = [] + for i in range(len(timesteps)): + keeps = [ + 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e) + for s, e in zip(control_guidance_start, control_guidance_end) + ] + controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps) + + # 7.2 Prepare added time ids & embeddings + if isinstance(control_image, list): + original_size = original_size or control_image[0].shape[-2:] + else: + original_size = original_size or control_image.shape[-2:] + target_size = target_size or (height, width) + + if negative_original_size is None: + negative_original_size = original_size + if negative_target_size is None: + negative_target_size = target_size + add_text_embeds = pooled_prompt_embeds + + if self.text_encoder_2 is None: + text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) + else: + text_encoder_projection_dim = self.text_encoder_2.config.projection_dim + + add_time_ids, add_neg_time_ids = self._get_add_time_ids( + original_size, + crops_coords_top_left, + target_size, + aesthetic_score, + negative_aesthetic_score, + negative_original_size, + negative_crops_coords_top_left, + negative_target_size, + dtype=prompt_embeds.dtype, + text_encoder_projection_dim=text_encoder_projection_dim, + ) + add_time_ids = add_time_ids.repeat(batch_size * num_images_per_prompt, 1) + + if self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) + add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) + add_neg_time_ids = add_neg_time_ids.repeat(batch_size * num_images_per_prompt, 1) + add_time_ids = torch.cat([add_neg_time_ids, add_time_ids], dim=0) + + prompt_embeds = prompt_embeds.to(device) + add_text_embeds = add_text_embeds.to(device) + add_time_ids = add_time_ids.to(device) + + # 8. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): +# print("#####") +# print("timesteps: ", t) +# print("#####") + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents_with_noise] * 2) if self.do_classifier_free_guidance else latents_with_noise + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} + + # controlnet(s) inference + if guess_mode and self.do_classifier_free_guidance: + # Infer ControlNet only for the conditional batch. + control_model_input = latents + control_model_input = self.scheduler.scale_model_input(control_model_input, t) + controlnet_prompt_embeds = prompt_embeds.chunk(2)[1] + controlnet_added_cond_kwargs = { + "text_embeds": add_text_embeds.chunk(2)[1], + "time_ids": add_time_ids.chunk(2)[1], + } + else: + control_model_input = latent_model_input + controlnet_prompt_embeds = prompt_embeds + controlnet_added_cond_kwargs = added_cond_kwargs + + if isinstance(controlnet_keep[i], list): + cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])] + else: + controlnet_cond_scale = controlnet_conditioning_scale + if isinstance(controlnet_cond_scale, list): + controlnet_cond_scale = controlnet_cond_scale[0] + cond_scale = controlnet_cond_scale * controlnet_keep[i] + down_block_res_samples, mid_block_res_sample = self.controlnet( + control_model_input, + t, + encoder_hidden_states=controlnet_prompt_embeds, + controlnet_cond=control_image, + conditioning_scale=cond_scale, + guess_mode=guess_mode, + added_cond_kwargs=controlnet_added_cond_kwargs, + return_dict=False, + ) + + if guess_mode and self.do_classifier_free_guidance: + # Inferred ControlNet only for the conditional batch. + # To apply the output of ControlNet to both the unconditional and conditional batches, + # add 0 to the unconditional batch to keep it unchanged. + down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples] + mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample]) + + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + added_cond_kwargs["image_embeds"] = image_embeds + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs=self.cross_attention_kwargs, + down_block_additional_residuals=down_block_res_samples, + mid_block_additional_residual=mid_block_res_sample, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents_with_noise = self.scheduler.step(noise_pred, t, latents_with_noise, **extra_step_kwargs, return_dict=False)[0] + + # masking process + tmp = t.unsqueeze(0) + init_latents_proper = self.scheduler.add_noise( + init_latents, noise, tmp + ).to(device) + + mask = (mask > 0.5).to(prompt_embeds.dtype) + latents_with_noise = ( + mask * latents_with_noise + (1 - mask) * init_latents_proper + ) + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + add_text_embeds = callback_outputs.pop("add_text_embeds", add_text_embeds) + negative_pooled_prompt_embeds = callback_outputs.pop( + "negative_pooled_prompt_embeds", negative_pooled_prompt_embeds + ) + add_time_ids = callback_outputs.pop("add_time_ids", add_time_ids) + add_neg_time_ids = callback_outputs.pop("add_neg_time_ids", add_neg_time_ids) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents_with_noise) + + # If we do sequential model offloading, let's offload unet and controlnet + # manually for max memory savings + if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: + self.unet.to("cpu") + self.controlnet.to("cpu") + torch.cuda.empty_cache() + + if not output_type == "latent": + # make sure the VAE is in float32 mode, as it overflows in float16 + needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast + + if needs_upcasting: + self.upcast_vae() + latents_with_noise = latents_with_noise.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) + + # unscale/denormalize the latents + # denormalize with the mean and std if available and not None + has_latents_mean = hasattr(self.vae.config, "latents_mean") and self.vae.config.latents_mean is not None + has_latents_std = hasattr(self.vae.config, "latents_std") and self.vae.config.latents_std is not None + if has_latents_mean and has_latents_std: + latents_mean = ( + torch.tensor(self.vae.config.latents_mean).view(1, 4, 1, 1).to(latents_with_noise.device, latents_with_noise.dtype) + ) + latents_std = ( + torch.tensor(self.vae.config.latents_std).view(1, 4, 1, 1).to(latents_with_noise.device, latents_with_noise.dtype) + ) + latents_with_noise = latents_with_noise * latents_std / self.vae.config.scaling_factor + latents_mean + else: + latents_with_noise = latents_with_noise / self.vae.config.scaling_factor + +# sf = self.vae.config.scaling_factor # ์˜ˆ: 0.18215 + image = self.vae.decode(latents_with_noise, return_dict=False)[0] +# image = self.vae.decode(init_latents/sf, return_dict=False)[0] + + + # cast back to fp16 if needed + if needs_upcasting: + self.vae.to(dtype=torch.float16) + else: + image = latents_with_noise + return StableDiffusionXLPipelineOutput(images=image) + + # apply watermark if available + if self.watermark is not None: + image = self.watermark.apply_watermark(image) + + image = self.image_processor.postprocess(image, output_type=output_type) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return StableDiffusionXLPipelineOutput(images=image) diff --git a/diffusers3/pipelines/controlnet/Untitled.ipynb b/diffusers3/pipelines/controlnet/Untitled.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..ef8dfb8b97cca490c0a2b88e005cc8904579b49d --- /dev/null +++ b/diffusers3/pipelines/controlnet/Untitled.ipynb @@ -0,0 +1,74 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "id": "fc52916d-3847-46b8-9f70-8e4089f7e7cf", + "metadata": {}, + "outputs": [ + { + "ename": "ImportError", + "evalue": "attempted relative import with no known parent package", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mImportError\u001b[0m Traceback (most recent call last)", + "Cell \u001b[0;32mIn[1], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mpipeline_controlnet_sd_xl\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m StableDiffusionXLControlNetPipeline\n", + "File \u001b[0;32m~/data/diffusers/src/diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl.py:33\u001b[0m\n\u001b[1;32m 23\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mtransformers\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m (\n\u001b[1;32m 24\u001b[0m CLIPImageProcessor,\n\u001b[1;32m 25\u001b[0m CLIPTextModel,\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 28\u001b[0m CLIPVisionModelWithProjection,\n\u001b[1;32m 29\u001b[0m )\n\u001b[1;32m 31\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mdiffusers\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mutils\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mimport_utils\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m is_invisible_watermark_available\n\u001b[0;32m---> 33\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mcallbacks\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m MultiPipelineCallbacks, PipelineCallback\n\u001b[1;32m 34\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mimage_processor\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m PipelineImageInput, VaeImageProcessor\n\u001b[1;32m 35\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mloaders\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m (\n\u001b[1;32m 36\u001b[0m FromSingleFileMixin,\n\u001b[1;32m 37\u001b[0m IPAdapterMixin,\n\u001b[1;32m 38\u001b[0m StableDiffusionXLLoraLoaderMixin,\n\u001b[1;32m 39\u001b[0m TextualInversionLoaderMixin,\n\u001b[1;32m 40\u001b[0m )\n", + "File \u001b[0;32m~/data/diffusers/src/diffusers/pipelines/controlnet/callbacks.py:3\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mtyping\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m Any, Dict, List\n\u001b[0;32m----> 3\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mconfiguration_utils\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m ConfigMixin, register_to_config\n\u001b[1;32m 4\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mutils\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m CONFIG_NAME\n\u001b[1;32m 7\u001b[0m \u001b[38;5;28;01mclass\u001b[39;00m \u001b[38;5;21;01mPipelineCallback\u001b[39;00m(ConfigMixin):\n", + "\u001b[0;31mImportError\u001b[0m: attempted relative import with no known parent package" + ] + } + ], + "source": [ + "from pipeline_controlnet_sd_xl import StableDiffusionXLControlNetPipeline" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e123e4e2-3a99-4cd4-a6d0-951c3828ba80", + "metadata": {}, + "outputs": [], + "source": [ + "---> 33 from callbacks import MultiPipelineCallbacks, PipelineCallback\n", + " 34 from ...image_processor import PipelineImageInput, VaeImageProcessor\n", + " 35 from ...loaders import (\n", + " 36 FromSingleFileMixin,\n", + " 37 IPAdapterMixin,\n", + " 38 StableDiffusionXLLoraLoaderMixin,\n", + " 39 TextualInversionLoaderMixin,\n", + " 40 )\n", + "\n", + "File ~/data/diffusers/src/diffusers/pipelines/controlnet/callbacks.py:3\n", + " 1 from typing import Any, Dict, List\n", + "----> 3 from .configuration_utils import ConfigMixin, register_to_config\n", + " 4 from .utils import CONFIG_NAME\n", + " 7 class PipelineCallback(ConfigMixin):\n", + "\n", + "ImportError: attempted relative import with no known parent package" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.12" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/diffusers3/pipelines/controlnet/__init__.py b/diffusers3/pipelines/controlnet/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8fac3c5db4a8b642e703422fa017610cc71c4de0 --- /dev/null +++ b/diffusers3/pipelines/controlnet/__init__.py @@ -0,0 +1,91 @@ +from typing import TYPE_CHECKING + +# from ...utils import ( +# DIFFUSERS_SLOW_IMPORT, +# OptionalDependencyNotAvailable, +# _LazyModule, +# get_objects_from_module, +# is_flax_available, +# is_torch_available, +# is_transformers_available, +# ) + +from diffusers.utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_flax_available, + is_torch_available, + is_transformers_available, +) + + + +_dummy_objects = {} +_import_structure = {} + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["multicontrolnet"] = ["MultiControlNetModel"] + _import_structure["pipeline_controlnet"] = ["StableDiffusionControlNetPipeline"] + _import_structure["pipeline_controlnet_blip_diffusion"] = ["BlipDiffusionControlNetPipeline"] + _import_structure["pipeline_controlnet_img2img"] = ["StableDiffusionControlNetImg2ImgPipeline"] + _import_structure["pipeline_controlnet_inpaint"] = ["StableDiffusionControlNetInpaintPipeline"] + _import_structure["pipeline_controlnet_inpaint_sd_xl"] = ["StableDiffusionXLControlNetInpaintPipeline"] + _import_structure["pipeline_controlnet_sd_xl"] = ["StableDiffusionXLControlNetPipeline"] + _import_structure["pipeline_controlnet_sd_xl_img2img"] = ["StableDiffusionXLControlNetImg2ImgPipeline"] +try: + if not (is_transformers_available() and is_flax_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_flax_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_flax_and_transformers_objects)) +else: + _import_structure["pipeline_flax_controlnet"] = ["FlaxStableDiffusionControlNetPipeline"] + + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * + else: + from .multicontrolnet import MultiControlNetModel + from .pipeline_controlnet import StableDiffusionControlNetPipeline + from .pipeline_controlnet_blip_diffusion import BlipDiffusionControlNetPipeline + from .pipeline_controlnet_img2img import StableDiffusionControlNetImg2ImgPipeline + from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline + from .pipeline_controlnet_inpaint_sd_xl import StableDiffusionXLControlNetInpaintPipeline + from .pipeline_controlnet_sd_xl import StableDiffusionXLControlNetPipeline + from .pipeline_controlnet_sd_xl_img2img import StableDiffusionXLControlNetImg2ImgPipeline + + try: + if not (is_transformers_available() and is_flax_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ...utils.dummy_flax_and_transformers_objects import * # noqa F403 + else: + from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline + + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/diffusers3/pipelines/controlnet/__pycache__/__init__.cpython-310.pyc b/diffusers3/pipelines/controlnet/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2be5e40f7c5f26c1f4ee89acd26f7a79285ca85a Binary files /dev/null and b/diffusers3/pipelines/controlnet/__pycache__/__init__.cpython-310.pyc differ diff --git a/diffusers3/pipelines/controlnet/__pycache__/__init__.cpython-38.pyc b/diffusers3/pipelines/controlnet/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a862e872d37ca483a02d712e56d31287a62adc40 Binary files /dev/null and b/diffusers3/pipelines/controlnet/__pycache__/__init__.cpython-38.pyc differ diff --git a/diffusers3/pipelines/controlnet/__pycache__/multicontrolnet.cpython-310.pyc b/diffusers3/pipelines/controlnet/__pycache__/multicontrolnet.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..47c9f8cb56d1d460c5ac31d7e075c1fff6aa00b9 Binary files /dev/null and b/diffusers3/pipelines/controlnet/__pycache__/multicontrolnet.cpython-310.pyc differ diff --git a/diffusers3/pipelines/controlnet/__pycache__/multicontrolnet.cpython-38.pyc b/diffusers3/pipelines/controlnet/__pycache__/multicontrolnet.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5ced6d91eff0bf78a0bba10d655a076a56501298 Binary files /dev/null and b/diffusers3/pipelines/controlnet/__pycache__/multicontrolnet.cpython-38.pyc differ diff --git a/diffusers3/pipelines/controlnet/__pycache__/pipeline_controlnet_sd_xl.cpython-310.pyc b/diffusers3/pipelines/controlnet/__pycache__/pipeline_controlnet_sd_xl.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6b98fd8d5d42b174a85ce96420795ed4d1a7738b Binary files /dev/null and b/diffusers3/pipelines/controlnet/__pycache__/pipeline_controlnet_sd_xl.cpython-310.pyc differ diff --git a/diffusers3/pipelines/controlnet/__pycache__/pipeline_controlnet_sd_xl.cpython-38.pyc b/diffusers3/pipelines/controlnet/__pycache__/pipeline_controlnet_sd_xl.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f17988f47dbd549497f7e5e954fa4aee73dac3bd Binary files /dev/null and b/diffusers3/pipelines/controlnet/__pycache__/pipeline_controlnet_sd_xl.cpython-38.pyc differ diff --git a/diffusers3/pipelines/controlnet/__pycache__/pipeline_controlnet_sd_xl_img2img_img.cpython-38.pyc b/diffusers3/pipelines/controlnet/__pycache__/pipeline_controlnet_sd_xl_img2img_img.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d4b6d71de3fab97eb06a3dadd48c79d1ce75f39c Binary files /dev/null and b/diffusers3/pipelines/controlnet/__pycache__/pipeline_controlnet_sd_xl_img2img_img.cpython-38.pyc differ diff --git a/diffusers3/pipelines/controlnet/callbacks.py b/diffusers3/pipelines/controlnet/callbacks.py new file mode 100644 index 0000000000000000000000000000000000000000..38542407e31fa1255eb26b563632c7a9f3d2fded --- /dev/null +++ b/diffusers3/pipelines/controlnet/callbacks.py @@ -0,0 +1,156 @@ +from typing import Any, Dict, List + +from .configuration_utils import ConfigMixin, register_to_config +from .utils import CONFIG_NAME + + +class PipelineCallback(ConfigMixin): + """ + Base class for all the official callbacks used in a pipeline. This class provides a structure for implementing + custom callbacks and ensures that all callbacks have a consistent interface. + + Please implement the following: + `tensor_inputs`: This should return a list of tensor inputs specific to your callback. You will only be able to + include + variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. + `callback_fn`: This method defines the core functionality of your callback. + """ + + config_name = CONFIG_NAME + + @register_to_config + def __init__(self, cutoff_step_ratio=1.0, cutoff_step_index=None): + super().__init__() + + if (cutoff_step_ratio is None and cutoff_step_index is None) or ( + cutoff_step_ratio is not None and cutoff_step_index is not None + ): + raise ValueError("Either cutoff_step_ratio or cutoff_step_index should be provided, not both or none.") + + if cutoff_step_ratio is not None and ( + not isinstance(cutoff_step_ratio, float) or not (0.0 <= cutoff_step_ratio <= 1.0) + ): + raise ValueError("cutoff_step_ratio must be a float between 0.0 and 1.0.") + + @property + def tensor_inputs(self) -> List[str]: + raise NotImplementedError(f"You need to set the attribute `tensor_inputs` for {self.__class__}") + + def callback_fn(self, pipeline, step_index, timesteps, callback_kwargs) -> Dict[str, Any]: + raise NotImplementedError(f"You need to implement the method `callback_fn` for {self.__class__}") + + def __call__(self, pipeline, step_index, timestep, callback_kwargs) -> Dict[str, Any]: + return self.callback_fn(pipeline, step_index, timestep, callback_kwargs) + + +class MultiPipelineCallbacks: + """ + This class is designed to handle multiple pipeline callbacks. It accepts a list of PipelineCallback objects and + provides a unified interface for calling all of them. + """ + + def __init__(self, callbacks: List[PipelineCallback]): + self.callbacks = callbacks + + @property + def tensor_inputs(self) -> List[str]: + return [input for callback in self.callbacks for input in callback.tensor_inputs] + + def __call__(self, pipeline, step_index, timestep, callback_kwargs) -> Dict[str, Any]: + """ + Calls all the callbacks in order with the given arguments and returns the final callback_kwargs. + """ + for callback in self.callbacks: + callback_kwargs = callback(pipeline, step_index, timestep, callback_kwargs) + + return callback_kwargs + + +class SDCFGCutoffCallback(PipelineCallback): + """ + Callback function for Stable Diffusion Pipelines. After certain number of steps (set by `cutoff_step_ratio` or + `cutoff_step_index`), this callback will disable the CFG. + + Note: This callback mutates the pipeline by changing the `_guidance_scale` attribute to 0.0 after the cutoff step. + """ + + tensor_inputs = ["prompt_embeds"] + + def callback_fn(self, pipeline, step_index, timestep, callback_kwargs) -> Dict[str, Any]: + cutoff_step_ratio = self.config.cutoff_step_ratio + cutoff_step_index = self.config.cutoff_step_index + + # Use cutoff_step_index if it's not None, otherwise use cutoff_step_ratio + cutoff_step = ( + cutoff_step_index if cutoff_step_index is not None else int(pipeline.num_timesteps * cutoff_step_ratio) + ) + + if step_index == cutoff_step: + prompt_embeds = callback_kwargs[self.tensor_inputs[0]] + prompt_embeds = prompt_embeds[-1:] # "-1" denotes the embeddings for conditional text tokens. + + pipeline._guidance_scale = 0.0 + + callback_kwargs[self.tensor_inputs[0]] = prompt_embeds + return callback_kwargs + + +class SDXLCFGCutoffCallback(PipelineCallback): + """ + Callback function for Stable Diffusion XL Pipelines. After certain number of steps (set by `cutoff_step_ratio` or + `cutoff_step_index`), this callback will disable the CFG. + + Note: This callback mutates the pipeline by changing the `_guidance_scale` attribute to 0.0 after the cutoff step. + """ + + tensor_inputs = ["prompt_embeds", "add_text_embeds", "add_time_ids"] + + def callback_fn(self, pipeline, step_index, timestep, callback_kwargs) -> Dict[str, Any]: + cutoff_step_ratio = self.config.cutoff_step_ratio + cutoff_step_index = self.config.cutoff_step_index + + # Use cutoff_step_index if it's not None, otherwise use cutoff_step_ratio + cutoff_step = ( + cutoff_step_index if cutoff_step_index is not None else int(pipeline.num_timesteps * cutoff_step_ratio) + ) + + if step_index == cutoff_step: + prompt_embeds = callback_kwargs[self.tensor_inputs[0]] + prompt_embeds = prompt_embeds[-1:] # "-1" denotes the embeddings for conditional text tokens. + + add_text_embeds = callback_kwargs[self.tensor_inputs[1]] + add_text_embeds = add_text_embeds[-1:] # "-1" denotes the embeddings for conditional pooled text tokens + + add_time_ids = callback_kwargs[self.tensor_inputs[2]] + add_time_ids = add_time_ids[-1:] # "-1" denotes the embeddings for conditional added time vector + + pipeline._guidance_scale = 0.0 + + callback_kwargs[self.tensor_inputs[0]] = prompt_embeds + callback_kwargs[self.tensor_inputs[1]] = add_text_embeds + callback_kwargs[self.tensor_inputs[2]] = add_time_ids + return callback_kwargs + + +class IPAdapterScaleCutoffCallback(PipelineCallback): + """ + Callback function for any pipeline that inherits `IPAdapterMixin`. After certain number of steps (set by + `cutoff_step_ratio` or `cutoff_step_index`), this callback will set the IP Adapter scale to `0.0`. + + Note: This callback mutates the IP Adapter attention processors by setting the scale to 0.0 after the cutoff step. + """ + + tensor_inputs = [] + + def callback_fn(self, pipeline, step_index, timestep, callback_kwargs) -> Dict[str, Any]: + cutoff_step_ratio = self.config.cutoff_step_ratio + cutoff_step_index = self.config.cutoff_step_index + + # Use cutoff_step_index if it's not None, otherwise use cutoff_step_ratio + cutoff_step = ( + cutoff_step_index if cutoff_step_index is not None else int(pipeline.num_timesteps * cutoff_step_ratio) + ) + + if step_index == cutoff_step: + pipeline.set_ip_adapter_scale(0.0) + return callback_kwargs diff --git a/diffusers3/pipelines/controlnet/multicontrolnet.py b/diffusers3/pipelines/controlnet/multicontrolnet.py new file mode 100644 index 0000000000000000000000000000000000000000..e3c5ec6eed0379e7d92fd92abcabfce3c466fc45 --- /dev/null +++ b/diffusers3/pipelines/controlnet/multicontrolnet.py @@ -0,0 +1,183 @@ +import os +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import torch +from torch import nn + +from ...models.controlnet import ControlNetModel, ControlNetOutput +from ...models.modeling_utils import ModelMixin +from ...utils import logging + + +logger = logging.get_logger(__name__) + + +class MultiControlNetModel(ModelMixin): + r""" + Multiple `ControlNetModel` wrapper class for Multi-ControlNet + + This module is a wrapper for multiple instances of the `ControlNetModel`. The `forward()` API is designed to be + compatible with `ControlNetModel`. + + Args: + controlnets (`List[ControlNetModel]`): + Provides additional conditioning to the unet during the denoising process. You must set multiple + `ControlNetModel` as a list. + """ + + def __init__(self, controlnets: Union[List[ControlNetModel], Tuple[ControlNetModel]]): + super().__init__() + self.nets = nn.ModuleList(controlnets) + + def forward( + self, + sample: torch.Tensor, + timestep: Union[torch.Tensor, float, int], + encoder_hidden_states: torch.Tensor, + controlnet_cond: List[torch.tensor], + conditioning_scale: List[float], + class_labels: Optional[torch.Tensor] = None, + timestep_cond: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + guess_mode: bool = False, + return_dict: bool = True, + ) -> Union[ControlNetOutput, Tuple]: + for i, (image, scale, controlnet) in enumerate(zip(controlnet_cond, conditioning_scale, self.nets)): + down_samples, mid_sample = controlnet( + sample=sample, + timestep=timestep, + encoder_hidden_states=encoder_hidden_states, + controlnet_cond=image, + conditioning_scale=scale, + class_labels=class_labels, + timestep_cond=timestep_cond, + attention_mask=attention_mask, + added_cond_kwargs=added_cond_kwargs, + cross_attention_kwargs=cross_attention_kwargs, + guess_mode=guess_mode, + return_dict=return_dict, + ) + + # merge samples + if i == 0: + down_block_res_samples, mid_block_res_sample = down_samples, mid_sample + else: + down_block_res_samples = [ + samples_prev + samples_curr + for samples_prev, samples_curr in zip(down_block_res_samples, down_samples) + ] + mid_block_res_sample += mid_sample + + return down_block_res_samples, mid_block_res_sample + + def save_pretrained( + self, + save_directory: Union[str, os.PathLike], + is_main_process: bool = True, + save_function: Callable = None, + safe_serialization: bool = True, + variant: Optional[str] = None, + ): + """ + Save a model and its configuration file to a directory, so that it can be re-loaded using the + `[`~pipelines.controlnet.MultiControlNetModel.from_pretrained`]` class method. + + Arguments: + save_directory (`str` or `os.PathLike`): + Directory to which to save. Will be created if it doesn't exist. + is_main_process (`bool`, *optional*, defaults to `True`): + Whether the process calling this is the main process or not. Useful when in distributed training like + TPUs and need to call this function on all processes. In this case, set `is_main_process=True` only on + the main process to avoid race conditions. + save_function (`Callable`): + The function to use to save the state dictionary. Useful on distributed training like TPUs when one + need to replace `torch.save` by another method. Can be configured with the environment variable + `DIFFUSERS_SAVE_MODE`. + safe_serialization (`bool`, *optional*, defaults to `True`): + Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`). + variant (`str`, *optional*): + If specified, weights are saved in the format pytorch_model..bin. + """ + for idx, controlnet in enumerate(self.nets): + suffix = "" if idx == 0 else f"_{idx}" + controlnet.save_pretrained( + save_directory + suffix, + is_main_process=is_main_process, + save_function=save_function, + safe_serialization=safe_serialization, + variant=variant, + ) + + @classmethod + def from_pretrained(cls, pretrained_model_path: Optional[Union[str, os.PathLike]], **kwargs): + r""" + Instantiate a pretrained MultiControlNet model from multiple pre-trained controlnet models. + + The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated). To train + the model, you should first set it back in training mode with `model.train()`. + + The warning *Weights from XXX not initialized from pretrained model* means that the weights of XXX do not come + pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning + task. + + The warning *Weights from XXX not used in YYY* means that the layer XXX is not used by YYY, therefore those + weights are discarded. + + Parameters: + pretrained_model_path (`os.PathLike`): + A path to a *directory* containing model weights saved using + [`~diffusers.pipelines.controlnet.MultiControlNetModel.save_pretrained`], e.g., + `./my_model_directory/controlnet`. + torch_dtype (`str` or `torch.dtype`, *optional*): + Override the default `torch.dtype` and load the model under this dtype. If `"auto"` is passed the dtype + will be automatically derived from the model's weights. + output_loading_info(`bool`, *optional*, defaults to `False`): + Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages. + device_map (`str` or `Dict[str, Union[int, str, torch.device]]`, *optional*): + A map that specifies where each submodule should go. It doesn't need to be refined to each + parameter/buffer name, once a given module name is inside, every submodule of it will be sent to the + same device. + + To have Accelerate compute the most optimized `device_map` automatically, set `device_map="auto"`. For + more information about each option see [designing a device + map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map). + max_memory (`Dict`, *optional*): + A dictionary device identifier to maximum memory. Will default to the maximum memory available for each + GPU and the available CPU RAM if unset. + low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`): + Speed up model loading by not initializing the weights and only loading the pre-trained weights. This + also tries to not use more than 1x model size in CPU memory (including peak memory) while loading the + model. This is only supported when torch version >= 1.9.0. If you are using an older version of torch, + setting this argument to `True` will raise an error. + variant (`str`, *optional*): + If specified load weights from `variant` filename, *e.g.* pytorch_model..bin. `variant` is + ignored when using `from_flax`. + use_safetensors (`bool`, *optional*, defaults to `None`): + If set to `None`, the `safetensors` weights will be downloaded if they're available **and** if the + `safetensors` library is installed. If set to `True`, the model will be forcibly loaded from + `safetensors` weights. If set to `False`, loading will *not* use `safetensors`. + """ + idx = 0 + controlnets = [] + + # load controlnet and append to list until no controlnet directory exists anymore + # first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained` + # second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ... + model_path_to_load = pretrained_model_path + while os.path.isdir(model_path_to_load): + controlnet = ControlNetModel.from_pretrained(model_path_to_load, **kwargs) + controlnets.append(controlnet) + + idx += 1 + model_path_to_load = pretrained_model_path + f"_{idx}" + + logger.info(f"{len(controlnets)} controlnets loaded from {pretrained_model_path}.") + + if len(controlnets) == 0: + raise ValueError( + f"No ControlNets found under {os.path.dirname(pretrained_model_path)}. Expected at least {pretrained_model_path + '_0'}." + ) + + return cls(controlnets) diff --git a/diffusers3/pipelines/controlnet/pipeline_controlnet.py b/diffusers3/pipelines/controlnet/pipeline_controlnet.py new file mode 100644 index 0000000000000000000000000000000000000000..9b2fefe7b0a43fc55cde25504066861fb4e61be2 --- /dev/null +++ b/diffusers3/pipelines/controlnet/pipeline_controlnet.py @@ -0,0 +1,1348 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import inspect +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import numpy as np +import PIL.Image +import torch +import torch.nn.functional as F +from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection + +from ...callbacks import MultiPipelineCallbacks, PipelineCallback +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import FromSingleFileMixin, IPAdapterMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin +from ...models import AutoencoderKL, ControlNetModel, ImageProjection, UNet2DConditionModel +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import ( + USE_PEFT_BACKEND, + deprecate, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import is_compiled_module, is_torch_version, randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from ..stable_diffusion.pipeline_output import StableDiffusionPipelineOutput +from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker +from .multicontrolnet import MultiControlNetModel + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> # !pip install opencv-python transformers accelerate + >>> from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler + >>> from diffusers.utils import load_image + >>> import numpy as np + >>> import torch + + >>> import cv2 + >>> from PIL import Image + + >>> # download an image + >>> image = load_image( + ... "https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/input_image_vermeer.png" + ... ) + >>> image = np.array(image) + + >>> # get canny image + >>> image = cv2.Canny(image, 100, 200) + >>> image = image[:, :, None] + >>> image = np.concatenate([image, image, image], axis=2) + >>> canny_image = Image.fromarray(image) + + >>> # load control net and stable diffusion v1-5 + >>> controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16) + >>> pipe = StableDiffusionControlNetPipeline.from_pretrained( + ... "runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16 + ... ) + + >>> # speed up diffusion process with faster scheduler and memory optimization + >>> pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config) + >>> # remove following line if xformers is not installed + >>> pipe.enable_xformers_memory_efficient_attention() + + >>> pipe.enable_model_cpu_offload() + + >>> # generate image + >>> generator = torch.manual_seed(0) + >>> image = pipe( + ... "futuristic-looking woman", num_inference_steps=20, generator=generator, image=canny_image + ... ).images[0] + ``` +""" + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + sigmas: Optional[List[float]] = None, + **kwargs, +): + """ + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles + custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` + must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, + `num_inference_steps` and `sigmas` must be `None`. + sigmas (`List[float]`, *optional*): + Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, + `num_inference_steps` and `timesteps` must be `None`. + + Returns: + `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the + second element is the number of inference steps. + """ + if timesteps is not None and sigmas is not None: + raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") + if timesteps is not None: + accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + elif sigmas is not None: + accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accept_sigmas: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" sigmas schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + + +class StableDiffusionControlNetPipeline( + DiffusionPipeline, + StableDiffusionMixin, + TextualInversionLoaderMixin, + StableDiffusionLoraLoaderMixin, + IPAdapterMixin, + FromSingleFileMixin, +): + r""" + Pipeline for text-to-image generation using Stable Diffusion with ControlNet guidance. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for saving LoRA weights + - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files + - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + controlnet ([`ControlNetModel`] or `List[ControlNetModel]`): + Provides additional conditioning to the `unet` during the denoising process. If you set multiple + ControlNets as a list, the outputs from each ControlNet are added together to create one combined + additional conditioning. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details + about a model's potential harms. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. + """ + + model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae" + _optional_components = ["safety_checker", "feature_extractor", "image_encoder"] + _exclude_from_cpu_offload = ["safety_checker"] + _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: UNet2DConditionModel, + controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel], + scheduler: KarrasDiffusionSchedulers, + safety_checker: StableDiffusionSafetyChecker, + feature_extractor: CLIPImageProcessor, + image_encoder: CLIPVisionModelWithProjection = None, + requires_safety_checker: bool = True, + ): + super().__init__() + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + if isinstance(controlnet, (list, tuple)): + controlnet = MultiControlNetModel(controlnet) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + controlnet=controlnet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + image_encoder=image_encoder, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True) + self.control_image_processor = VaeImageProcessor( + vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False + ) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt + def _encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + **kwargs, + ): + deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." + deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) + + prompt_embeds_tuple = self.encode_prompt( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=lora_scale, + **kwargs, + ) + + # concatenate for backwards comp + prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) + + return prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if self.text_encoder is not None: + if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return prompt_embeds, negative_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image + def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + if output_hidden_states: + image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] + image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_enc_hidden_states = self.image_encoder( + torch.zeros_like(image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( + num_images_per_prompt, dim=0 + ) + return image_enc_hidden_states, uncond_image_enc_hidden_states + else: + image_embeds = self.image_encoder(image).image_embeds + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_embeds = torch.zeros_like(image_embeds) + + return image_embeds, uncond_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds + def prepare_ip_adapter_image_embeds( + self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance + ): + image_embeds = [] + if do_classifier_free_guidance: + negative_image_embeds = [] + if ip_adapter_image_embeds is None: + if not isinstance(ip_adapter_image, list): + ip_adapter_image = [ip_adapter_image] + + if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): + raise ValueError( + f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." + ) + + for single_ip_adapter_image, image_proj_layer in zip( + ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers + ): + output_hidden_state = not isinstance(image_proj_layer, ImageProjection) + single_image_embeds, single_negative_image_embeds = self.encode_image( + single_ip_adapter_image, device, 1, output_hidden_state + ) + + image_embeds.append(single_image_embeds[None, :]) + if do_classifier_free_guidance: + negative_image_embeds.append(single_negative_image_embeds[None, :]) + else: + for single_image_embeds in ip_adapter_image_embeds: + if do_classifier_free_guidance: + single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2) + negative_image_embeds.append(single_negative_image_embeds) + image_embeds.append(single_image_embeds) + + ip_adapter_image_embeds = [] + for i, single_image_embeds in enumerate(image_embeds): + single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) + if do_classifier_free_guidance: + single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) + + single_image_embeds = single_image_embeds.to(device=device) + ip_adapter_image_embeds.append(single_image_embeds) + + return ip_adapter_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is None: + has_nsfw_concept = None + else: + if torch.is_tensor(image): + feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") + else: + feature_extractor_input = self.image_processor.numpy_to_pil(image) + safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) + image, has_nsfw_concept = self.safety_checker( + images=image, clip_input=safety_checker_input.pixel_values.to(dtype) + ) + return image, has_nsfw_concept + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents + def decode_latents(self, latents): + deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" + deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) + + latents = 1 / self.vae.config.scaling_factor * latents + image = self.vae.decode(latents, return_dict=False)[0] + image = (image / 2 + 0.5).clamp(0, 1) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + return image + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + image, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + ip_adapter_image=None, + ip_adapter_image_embeds=None, + controlnet_conditioning_scale=1.0, + control_guidance_start=0.0, + control_guidance_end=1.0, + callback_on_step_end_tensor_inputs=None, + ): + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + # Check `image` + is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance( + self.controlnet, torch._dynamo.eval_frame.OptimizedModule + ) + if ( + isinstance(self.controlnet, ControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, ControlNetModel) + ): + self.check_image(image, prompt, prompt_embeds) + elif ( + isinstance(self.controlnet, MultiControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, MultiControlNetModel) + ): + if not isinstance(image, list): + raise TypeError("For multiple controlnets: `image` must be type `list`") + + # When `image` is a nested list: + # (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]]) + elif any(isinstance(i, list) for i in image): + transposed_image = [list(t) for t in zip(*image)] + if len(transposed_image) != len(self.controlnet.nets): + raise ValueError( + f"For multiple controlnets: if you pass`image` as a list of list, each sublist must have the same length as the number of controlnets, but the sublists in `image` got {len(transposed_image)} images and {len(self.controlnet.nets)} ControlNets." + ) + for image_ in transposed_image: + self.check_image(image_, prompt, prompt_embeds) + elif len(image) != len(self.controlnet.nets): + raise ValueError( + f"For multiple controlnets: `image` must have the same length as the number of controlnets, but got {len(image)} images and {len(self.controlnet.nets)} ControlNets." + ) + else: + for image_ in image: + self.check_image(image_, prompt, prompt_embeds) + else: + assert False + + # Check `controlnet_conditioning_scale` + if ( + isinstance(self.controlnet, ControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, ControlNetModel) + ): + if not isinstance(controlnet_conditioning_scale, float): + raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.") + elif ( + isinstance(self.controlnet, MultiControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, MultiControlNetModel) + ): + if isinstance(controlnet_conditioning_scale, list): + if any(isinstance(i, list) for i in controlnet_conditioning_scale): + raise ValueError( + "A single batch of varying conditioning scale settings (e.g. [[1.0, 0.5], [0.2, 0.8]]) is not supported at the moment. " + "The conditioning scale must be fixed across the batch." + ) + elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len( + self.controlnet.nets + ): + raise ValueError( + "For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have" + " the same length as the number of controlnets" + ) + else: + assert False + + if not isinstance(control_guidance_start, (tuple, list)): + control_guidance_start = [control_guidance_start] + + if not isinstance(control_guidance_end, (tuple, list)): + control_guidance_end = [control_guidance_end] + + if len(control_guidance_start) != len(control_guidance_end): + raise ValueError( + f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list." + ) + + if isinstance(self.controlnet, MultiControlNetModel): + if len(control_guidance_start) != len(self.controlnet.nets): + raise ValueError( + f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}." + ) + + for start, end in zip(control_guidance_start, control_guidance_end): + if start >= end: + raise ValueError( + f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}." + ) + if start < 0.0: + raise ValueError(f"control guidance start: {start} can't be smaller than 0.") + if end > 1.0: + raise ValueError(f"control guidance end: {end} can't be larger than 1.0.") + + if ip_adapter_image is not None and ip_adapter_image_embeds is not None: + raise ValueError( + "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined." + ) + + if ip_adapter_image_embeds is not None: + if not isinstance(ip_adapter_image_embeds, list): + raise ValueError( + f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}" + ) + elif ip_adapter_image_embeds[0].ndim not in [3, 4]: + raise ValueError( + f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D" + ) + + def check_image(self, image, prompt, prompt_embeds): + image_is_pil = isinstance(image, PIL.Image.Image) + image_is_tensor = isinstance(image, torch.Tensor) + image_is_np = isinstance(image, np.ndarray) + image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image) + image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor) + image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray) + + if ( + not image_is_pil + and not image_is_tensor + and not image_is_np + and not image_is_pil_list + and not image_is_tensor_list + and not image_is_np_list + ): + raise TypeError( + f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}" + ) + + if image_is_pil: + image_batch_size = 1 + else: + image_batch_size = len(image) + + if prompt is not None and isinstance(prompt, str): + prompt_batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + prompt_batch_size = len(prompt) + elif prompt_embeds is not None: + prompt_batch_size = prompt_embeds.shape[0] + + if image_batch_size != 1 and image_batch_size != prompt_batch_size: + raise ValueError( + f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}" + ) + + def prepare_image( + self, + image, + width, + height, + batch_size, + num_images_per_prompt, + device, + dtype, + do_classifier_free_guidance=False, + guess_mode=False, + ): + image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32) + image_batch_size = image.shape[0] + + if image_batch_size == 1: + repeat_by = batch_size + else: + # image batch size is the same as prompt batch size + repeat_by = num_images_per_prompt + + image = image.repeat_interleave(repeat_by, dim=0) + + image = image.to(device=device, dtype=dtype) + + if do_classifier_free_guidance and not guess_mode: + image = torch.cat([image] * 2) + + return image + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = ( + batch_size, + num_channels_latents, + int(height) // self.vae_scale_factor, + int(width) // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding + def get_guidance_scale_embedding( + self, w: torch.Tensor, embedding_dim: int = 512, dtype: torch.dtype = torch.float32 + ) -> torch.Tensor: + """ + See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 + + Args: + w (`torch.Tensor`): + Generate embedding vectors with a specified guidance scale to subsequently enrich timestep embeddings. + embedding_dim (`int`, *optional*, defaults to 512): + Dimension of the embeddings to generate. + dtype (`torch.dtype`, *optional*, defaults to `torch.float32`): + Data type of the generated embeddings. + + Returns: + `torch.Tensor`: Embedding vectors with shape `(len(w), embedding_dim)`. + """ + assert len(w.shape) == 1 + w = w * 1000.0 + + half_dim = embedding_dim // 2 + emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) + emb = w.to(dtype)[:, None] * emb[None, :] + emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) + if embedding_dim % 2 == 1: # zero pad + emb = torch.nn.functional.pad(emb, (0, 1)) + assert emb.shape == (w.shape[0], embedding_dim) + return emb + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def clip_skip(self): + return self._clip_skip + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None + + @property + def cross_attention_kwargs(self): + return self._cross_attention_kwargs + + @property + def num_timesteps(self): + return self._num_timesteps + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + image: PipelineImageInput = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + timesteps: List[int] = None, + sigmas: List[float] = None, + guidance_scale: float = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + controlnet_conditioning_scale: Union[float, List[float]] = 1.0, + guess_mode: bool = False, + control_guidance_start: Union[float, List[float]] = 0.0, + control_guidance_end: Union[float, List[float]] = 1.0, + clip_skip: Optional[int] = None, + callback_on_step_end: Optional[ + Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks] + ] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + **kwargs, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,: + `List[List[torch.Tensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`): + The ControlNet input condition to provide guidance to the `unet` for generation. If the type is + specified as `torch.Tensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also be accepted + as an image. The dimensions of the output image defaults to `image`'s dimensions. If height and/or + width are passed, `image` is resized accordingly. If multiple ControlNets are specified in `init`, + images must be passed as a list such that each element of the list can be correctly batched for input + to a single ControlNet. When `prompt` is a list, and if a list of images is passed for a single + ControlNet, each will be paired with each prompt in the `prompt` list. This also applies to multiple + ControlNets, where a list of image lists can be passed to batch for each prompt and each ControlNet. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument + in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is + passed will be used. Must be in descending order. + sigmas (`List[float]`, *optional*): + Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in + their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed + will be used. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. + ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*): + Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of + IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should + contain the negative image embedding if `do_classifier_free_guidance` is set to `True`. If not + provided, embeddings are computed from the `ip_adapter_image` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0): + The outputs of the ControlNet are multiplied by `controlnet_conditioning_scale` before they are added + to the residual in the original `unet`. If multiple ControlNets are specified in `init`, you can set + the corresponding scale as a list. + guess_mode (`bool`, *optional*, defaults to `False`): + The ControlNet encoder tries to recognize the content of the input image even if you remove all + prompts. A `guidance_scale` value between 3.0 and 5.0 is recommended. + control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0): + The percentage of total steps at which the ControlNet starts applying. + control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0): + The percentage of total steps at which the ControlNet stops applying. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*): + A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of + each denoising step during the inference. with the following arguments: `callback_on_step_end(self: + DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a + list of all tensors as specified by `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + + Examples: + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images and the + second element is a list of `bool`s indicating whether the corresponding generated image contains + "not-safe-for-work" (nsfw) content. + """ + + callback = kwargs.pop("callback", None) + callback_steps = kwargs.pop("callback_steps", None) + + if callback is not None: + deprecate( + "callback", + "1.0.0", + "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", + ) + if callback_steps is not None: + deprecate( + "callback_steps", + "1.0.0", + "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", + ) + + if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): + callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs + + controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet + + # align format for control guidance + if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list): + control_guidance_start = len(control_guidance_end) * [control_guidance_start] + elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list): + control_guidance_end = len(control_guidance_start) * [control_guidance_end] + elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list): + mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1 + control_guidance_start, control_guidance_end = ( + mult * [control_guidance_start], + mult * [control_guidance_end], + ) + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + image, + callback_steps, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + ip_adapter_image, + ip_adapter_image_embeds, + controlnet_conditioning_scale, + control_guidance_start, + control_guidance_end, + callback_on_step_end_tensor_inputs, + ) + + self._guidance_scale = guidance_scale + self._clip_skip = clip_skip + self._cross_attention_kwargs = cross_attention_kwargs + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float): + controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets) + + global_pool_conditions = ( + controlnet.config.global_pool_conditions + if isinstance(controlnet, ControlNetModel) + else controlnet.nets[0].config.global_pool_conditions + ) + guess_mode = guess_mode or global_pool_conditions + + # 3. Encode input prompt + text_encoder_lora_scale = ( + self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None + ) + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + device, + num_images_per_prompt, + self.do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=text_encoder_lora_scale, + clip_skip=self.clip_skip, + ) + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + if self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + image_embeds = self.prepare_ip_adapter_image_embeds( + ip_adapter_image, + ip_adapter_image_embeds, + device, + batch_size * num_images_per_prompt, + self.do_classifier_free_guidance, + ) + + # 4. Prepare image + if isinstance(controlnet, ControlNetModel): + image = self.prepare_image( + image=image, + width=width, + height=height, + batch_size=batch_size * num_images_per_prompt, + num_images_per_prompt=num_images_per_prompt, + device=device, + dtype=controlnet.dtype, + do_classifier_free_guidance=self.do_classifier_free_guidance, + guess_mode=guess_mode, + ) + height, width = image.shape[-2:] + elif isinstance(controlnet, MultiControlNetModel): + images = [] + + # Nested lists as ControlNet condition + if isinstance(image[0], list): + # Transpose the nested image list + image = [list(t) for t in zip(*image)] + + for image_ in image: + image_ = self.prepare_image( + image=image_, + width=width, + height=height, + batch_size=batch_size * num_images_per_prompt, + num_images_per_prompt=num_images_per_prompt, + device=device, + dtype=controlnet.dtype, + do_classifier_free_guidance=self.do_classifier_free_guidance, + guess_mode=guess_mode, + ) + + images.append(image_) + + image = images + height, width = image[0].shape[-2:] + else: + assert False + + # 5. Prepare timesteps + timesteps, num_inference_steps = retrieve_timesteps( + self.scheduler, num_inference_steps, device, timesteps, sigmas + ) + self._num_timesteps = len(timesteps) + + # 6. Prepare latent variables + num_channels_latents = self.unet.config.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 6.5 Optionally get Guidance Scale Embedding + timestep_cond = None + if self.unet.config.time_cond_proj_dim is not None: + guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) + timestep_cond = self.get_guidance_scale_embedding( + guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim + ).to(device=device, dtype=latents.dtype) + + # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7.1 Add image embeds for IP-Adapter + added_cond_kwargs = ( + {"image_embeds": image_embeds} + if ip_adapter_image is not None or ip_adapter_image_embeds is not None + else None + ) + + # 7.2 Create tensor stating which controlnets to keep + controlnet_keep = [] + for i in range(len(timesteps)): + keeps = [ + 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e) + for s, e in zip(control_guidance_start, control_guidance_end) + ] + controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps) + + # 8. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + is_unet_compiled = is_compiled_module(self.unet) + is_controlnet_compiled = is_compiled_module(self.controlnet) + is_torch_higher_equal_2_1 = is_torch_version(">=", "2.1") + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # Relevant thread: + # https://dev-discuss.pytorch.org/t/cudagraphs-in-pytorch-2-0/1428 + if (is_unet_compiled and is_controlnet_compiled) and is_torch_higher_equal_2_1: + torch._inductor.cudagraph_mark_step_begin() + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # controlnet(s) inference + if guess_mode and self.do_classifier_free_guidance: + # Infer ControlNet only for the conditional batch. + control_model_input = latents + control_model_input = self.scheduler.scale_model_input(control_model_input, t) + controlnet_prompt_embeds = prompt_embeds.chunk(2)[1] + else: + control_model_input = latent_model_input + controlnet_prompt_embeds = prompt_embeds + + if isinstance(controlnet_keep[i], list): + cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])] + else: + controlnet_cond_scale = controlnet_conditioning_scale + if isinstance(controlnet_cond_scale, list): + controlnet_cond_scale = controlnet_cond_scale[0] + cond_scale = controlnet_cond_scale * controlnet_keep[i] + + down_block_res_samples, mid_block_res_sample = self.controlnet( + control_model_input, + t, + encoder_hidden_states=controlnet_prompt_embeds, + controlnet_cond=image, + conditioning_scale=cond_scale, + guess_mode=guess_mode, + return_dict=False, + ) + + if guess_mode and self.do_classifier_free_guidance: + # Inferred ControlNet only for the conditional batch. + # To apply the output of ControlNet to both the unconditional and conditional batches, + # add 0 to the unconditional batch to keep it unchanged. + down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples] + mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample]) + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + timestep_cond=timestep_cond, + cross_attention_kwargs=self.cross_attention_kwargs, + down_block_additional_residuals=down_block_res_samples, + mid_block_additional_residual=mid_block_res_sample, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + # If we do sequential model offloading, let's offload unet and controlnet + # manually for max memory savings + if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: + self.unet.to("cpu") + self.controlnet.to("cpu") + torch.cuda.empty_cache() + + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[ + 0 + ] + image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) + else: + image = latents + has_nsfw_concept = None + + if has_nsfw_concept is None: + do_denormalize = [True] * image.shape[0] + else: + do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] + + image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image, has_nsfw_concept) + + return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) diff --git a/diffusers3/pipelines/controlnet/pipeline_controlnet_blip_diffusion.py b/diffusers3/pipelines/controlnet/pipeline_controlnet_blip_diffusion.py new file mode 100644 index 0000000000000000000000000000000000000000..86e0ddef663e959969607ff235ef7efab877780b --- /dev/null +++ b/diffusers3/pipelines/controlnet/pipeline_controlnet_blip_diffusion.py @@ -0,0 +1,413 @@ +# Copyright 2024 Salesforce.com, inc. +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import List, Optional, Union + +import PIL.Image +import torch +from transformers import CLIPTokenizer + +from ...models import AutoencoderKL, ControlNetModel, UNet2DConditionModel +from ...schedulers import PNDMScheduler +from ...utils import ( + logging, + replace_example_docstring, +) +from ...utils.torch_utils import randn_tensor +from ..blip_diffusion.blip_image_processing import BlipImageProcessor +from ..blip_diffusion.modeling_blip2 import Blip2QFormerModel +from ..blip_diffusion.modeling_ctx_clip import ContextCLIPTextModel +from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> from diffusers.pipelines import BlipDiffusionControlNetPipeline + >>> from diffusers.utils import load_image + >>> from controlnet_aux import CannyDetector + >>> import torch + + >>> blip_diffusion_pipe = BlipDiffusionControlNetPipeline.from_pretrained( + ... "Salesforce/blipdiffusion-controlnet", torch_dtype=torch.float16 + ... ).to("cuda") + + >>> style_subject = "flower" + >>> tgt_subject = "teapot" + >>> text_prompt = "on a marble table" + + >>> cldm_cond_image = load_image( + ... "https://huggingface.co/datasets/ayushtues/blipdiffusion_images/resolve/main/kettle.jpg" + ... ).resize((512, 512)) + >>> canny = CannyDetector() + >>> cldm_cond_image = canny(cldm_cond_image, 30, 70, output_type="pil") + >>> style_image = load_image( + ... "https://huggingface.co/datasets/ayushtues/blipdiffusion_images/resolve/main/flower.jpg" + ... ) + >>> guidance_scale = 7.5 + >>> num_inference_steps = 50 + >>> negative_prompt = "over-exposure, under-exposure, saturated, duplicate, out of frame, lowres, cropped, worst quality, low quality, jpeg artifacts, morbid, mutilated, out of frame, ugly, bad anatomy, bad proportions, deformed, blurry, duplicate" + + + >>> output = blip_diffusion_pipe( + ... text_prompt, + ... style_image, + ... cldm_cond_image, + ... style_subject, + ... tgt_subject, + ... guidance_scale=guidance_scale, + ... num_inference_steps=num_inference_steps, + ... neg_prompt=negative_prompt, + ... height=512, + ... width=512, + ... ).images + >>> output[0].save("image.png") + ``` +""" + + +class BlipDiffusionControlNetPipeline(DiffusionPipeline): + """ + Pipeline for Canny Edge based Controlled subject-driven generation using Blip Diffusion. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + Args: + tokenizer ([`CLIPTokenizer`]): + Tokenizer for the text encoder + text_encoder ([`ContextCLIPTextModel`]): + Text encoder to encode the text prompt + vae ([`AutoencoderKL`]): + VAE model to map the latents to the image + unet ([`UNet2DConditionModel`]): + Conditional U-Net architecture to denoise the image embedding. + scheduler ([`PNDMScheduler`]): + A scheduler to be used in combination with `unet` to generate image latents. + qformer ([`Blip2QFormerModel`]): + QFormer model to get multi-modal embeddings from the text and image. + controlnet ([`ControlNetModel`]): + ControlNet model to get the conditioning image embedding. + image_processor ([`BlipImageProcessor`]): + Image Processor to preprocess and postprocess the image. + ctx_begin_pos (int, `optional`, defaults to 2): + Position of the context token in the text encoder. + """ + + model_cpu_offload_seq = "qformer->text_encoder->unet->vae" + + def __init__( + self, + tokenizer: CLIPTokenizer, + text_encoder: ContextCLIPTextModel, + vae: AutoencoderKL, + unet: UNet2DConditionModel, + scheduler: PNDMScheduler, + qformer: Blip2QFormerModel, + controlnet: ControlNetModel, + image_processor: BlipImageProcessor, + ctx_begin_pos: int = 2, + mean: List[float] = None, + std: List[float] = None, + ): + super().__init__() + + self.register_modules( + tokenizer=tokenizer, + text_encoder=text_encoder, + vae=vae, + unet=unet, + scheduler=scheduler, + qformer=qformer, + controlnet=controlnet, + image_processor=image_processor, + ) + self.register_to_config(ctx_begin_pos=ctx_begin_pos, mean=mean, std=std) + + def get_query_embeddings(self, input_image, src_subject): + return self.qformer(image_input=input_image, text_input=src_subject, return_dict=False) + + # from the original Blip Diffusion code, speciefies the target subject and augments the prompt by repeating it + def _build_prompt(self, prompts, tgt_subjects, prompt_strength=1.0, prompt_reps=20): + rv = [] + for prompt, tgt_subject in zip(prompts, tgt_subjects): + prompt = f"a {tgt_subject} {prompt.strip()}" + # a trick to amplify the prompt + rv.append(", ".join([prompt] * int(prompt_strength * prompt_reps))) + + return rv + + # Copied from diffusers.pipelines.consistency_models.pipeline_consistency_models.ConsistencyModelPipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels, height, width, dtype, device, generator, latents=None): + shape = (batch_size, num_channels, height, width) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device=device, dtype=dtype) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + def encode_prompt(self, query_embeds, prompt, device=None): + device = device or self._execution_device + + # embeddings for prompt, with query_embeds as context + max_len = self.text_encoder.text_model.config.max_position_embeddings + max_len -= self.qformer.config.num_query_tokens + + tokenized_prompt = self.tokenizer( + prompt, + padding="max_length", + truncation=True, + max_length=max_len, + return_tensors="pt", + ).to(device) + + batch_size = query_embeds.shape[0] + ctx_begin_pos = [self.config.ctx_begin_pos] * batch_size + + text_embeddings = self.text_encoder( + input_ids=tokenized_prompt.input_ids, + ctx_embeddings=query_embeds, + ctx_begin_pos=ctx_begin_pos, + )[0] + + return text_embeddings + + # Adapted from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.prepare_image + def prepare_control_image( + self, + image, + width, + height, + batch_size, + num_images_per_prompt, + device, + dtype, + do_classifier_free_guidance=False, + ): + image = self.image_processor.preprocess( + image, + size={"width": width, "height": height}, + do_rescale=True, + do_center_crop=False, + do_normalize=False, + return_tensors="pt", + )["pixel_values"].to(device) + image_batch_size = image.shape[0] + + if image_batch_size == 1: + repeat_by = batch_size + else: + # image batch size is the same as prompt batch size + repeat_by = num_images_per_prompt + + image = image.repeat_interleave(repeat_by, dim=0) + + image = image.to(device=device, dtype=dtype) + + if do_classifier_free_guidance: + image = torch.cat([image] * 2) + + return image + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: List[str], + reference_image: PIL.Image.Image, + condtioning_image: PIL.Image.Image, + source_subject_category: List[str], + target_subject_category: List[str], + latents: Optional[torch.Tensor] = None, + guidance_scale: float = 7.5, + height: int = 512, + width: int = 512, + num_inference_steps: int = 50, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + neg_prompt: Optional[str] = "", + prompt_strength: float = 1.0, + prompt_reps: int = 20, + output_type: Optional[str] = "pil", + return_dict: bool = True, + ): + """ + Function invoked when calling the pipeline for generation. + + Args: + prompt (`List[str]`): + The prompt or prompts to guide the image generation. + reference_image (`PIL.Image.Image`): + The reference image to condition the generation on. + condtioning_image (`PIL.Image.Image`): + The conditioning canny edge image to condition the generation on. + source_subject_category (`List[str]`): + The source subject category. + target_subject_category (`List[str]`): + The target subject category. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by random sampling. + guidance_scale (`float`, *optional*, defaults to 7.5): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + height (`int`, *optional*, defaults to 512): + The height of the generated image. + width (`int`, *optional*, defaults to 512): + The width of the generated image. + seed (`int`, *optional*, defaults to 42): + The seed to use for random generation. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + neg_prompt (`str`, *optional*, defaults to ""): + The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored + if `guidance_scale` is less than `1`). + prompt_strength (`float`, *optional*, defaults to 1.0): + The strength of the prompt. Specifies the number of times the prompt is repeated along with prompt_reps + to amplify the prompt. + prompt_reps (`int`, *optional*, defaults to 20): + The number of times the prompt is repeated along with prompt_strength to amplify the prompt. + Examples: + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple` + """ + device = self._execution_device + + reference_image = self.image_processor.preprocess( + reference_image, image_mean=self.config.mean, image_std=self.config.std, return_tensors="pt" + )["pixel_values"] + reference_image = reference_image.to(device) + + if isinstance(prompt, str): + prompt = [prompt] + if isinstance(source_subject_category, str): + source_subject_category = [source_subject_category] + if isinstance(target_subject_category, str): + target_subject_category = [target_subject_category] + + batch_size = len(prompt) + + prompt = self._build_prompt( + prompts=prompt, + tgt_subjects=target_subject_category, + prompt_strength=prompt_strength, + prompt_reps=prompt_reps, + ) + query_embeds = self.get_query_embeddings(reference_image, source_subject_category) + text_embeddings = self.encode_prompt(query_embeds, prompt, device) + # 3. unconditional embedding + do_classifier_free_guidance = guidance_scale > 1.0 + if do_classifier_free_guidance: + max_length = self.text_encoder.text_model.config.max_position_embeddings + + uncond_input = self.tokenizer( + [neg_prompt] * batch_size, + padding="max_length", + max_length=max_length, + return_tensors="pt", + ) + uncond_embeddings = self.text_encoder( + input_ids=uncond_input.input_ids.to(device), + ctx_embeddings=None, + )[0] + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) + scale_down_factor = 2 ** (len(self.unet.config.block_out_channels) - 1) + latents = self.prepare_latents( + batch_size=batch_size, + num_channels=self.unet.config.in_channels, + height=height // scale_down_factor, + width=width // scale_down_factor, + generator=generator, + latents=latents, + dtype=self.unet.dtype, + device=device, + ) + # set timesteps + extra_set_kwargs = {} + self.scheduler.set_timesteps(num_inference_steps, **extra_set_kwargs) + + cond_image = self.prepare_control_image( + image=condtioning_image, + width=width, + height=height, + batch_size=batch_size, + num_images_per_prompt=1, + device=device, + dtype=self.controlnet.dtype, + do_classifier_free_guidance=do_classifier_free_guidance, + ) + + for i, t in enumerate(self.progress_bar(self.scheduler.timesteps)): + # expand the latents if we are doing classifier free guidance + do_classifier_free_guidance = guidance_scale > 1.0 + + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + down_block_res_samples, mid_block_res_sample = self.controlnet( + latent_model_input, + t, + encoder_hidden_states=text_embeddings, + controlnet_cond=cond_image, + return_dict=False, + ) + + noise_pred = self.unet( + latent_model_input, + timestep=t, + encoder_hidden_states=text_embeddings, + down_block_additional_residuals=down_block_res_samples, + mid_block_additional_residual=mid_block_res_sample, + )["sample"] + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + latents = self.scheduler.step( + noise_pred, + t, + latents, + )["prev_sample"] + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + image = self.image_processor.postprocess(image, output_type=output_type) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return ImagePipelineOutput(images=image) diff --git a/diffusers3/pipelines/controlnet/pipeline_controlnet_img2img.py b/diffusers3/pipelines/controlnet/pipeline_controlnet_img2img.py new file mode 100644 index 0000000000000000000000000000000000000000..2a4f46d61990e03064f782e1818b2e01ab3c8368 --- /dev/null +++ b/diffusers3/pipelines/controlnet/pipeline_controlnet_img2img.py @@ -0,0 +1,1319 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import numpy as np +import PIL.Image +import torch +import torch.nn.functional as F +from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection + +from ...callbacks import MultiPipelineCallbacks, PipelineCallback +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import FromSingleFileMixin, IPAdapterMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin +from ...models import AutoencoderKL, ControlNetModel, ImageProjection, UNet2DConditionModel +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import ( + USE_PEFT_BACKEND, + deprecate, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import is_compiled_module, randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from ..stable_diffusion import StableDiffusionPipelineOutput +from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker +from .multicontrolnet import MultiControlNetModel + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> # !pip install opencv-python transformers accelerate + >>> from diffusers import StableDiffusionControlNetImg2ImgPipeline, ControlNetModel, UniPCMultistepScheduler + >>> from diffusers.utils import load_image + >>> import numpy as np + >>> import torch + + >>> import cv2 + >>> from PIL import Image + + >>> # download an image + >>> image = load_image( + ... "https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/input_image_vermeer.png" + ... ) + >>> np_image = np.array(image) + + >>> # get canny image + >>> np_image = cv2.Canny(np_image, 100, 200) + >>> np_image = np_image[:, :, None] + >>> np_image = np.concatenate([np_image, np_image, np_image], axis=2) + >>> canny_image = Image.fromarray(np_image) + + >>> # load control net and stable diffusion v1-5 + >>> controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16) + >>> pipe = StableDiffusionControlNetImg2ImgPipeline.from_pretrained( + ... "runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16 + ... ) + + >>> # speed up diffusion process with faster scheduler and memory optimization + >>> pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config) + >>> pipe.enable_model_cpu_offload() + + >>> # generate image + >>> generator = torch.manual_seed(0) + >>> image = pipe( + ... "futuristic-looking woman", + ... num_inference_steps=20, + ... generator=generator, + ... image=image, + ... control_image=canny_image, + ... ).images[0] + ``` +""" + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents +def retrieve_latents( + encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" +): + if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": + return encoder_output.latent_dist.sample(generator) + elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": + return encoder_output.latent_dist.mode() + elif hasattr(encoder_output, "latents"): + return encoder_output.latents + else: + raise AttributeError("Could not access latents of provided encoder_output") + + +def prepare_image(image): + if isinstance(image, torch.Tensor): + # Batch single image + if image.ndim == 3: + image = image.unsqueeze(0) + + image = image.to(dtype=torch.float32) + else: + # preprocess image + if isinstance(image, (PIL.Image.Image, np.ndarray)): + image = [image] + + if isinstance(image, list) and isinstance(image[0], PIL.Image.Image): + image = [np.array(i.convert("RGB"))[None, :] for i in image] + image = np.concatenate(image, axis=0) + elif isinstance(image, list) and isinstance(image[0], np.ndarray): + image = np.concatenate([i[None, :] for i in image], axis=0) + + image = image.transpose(0, 3, 1, 2) + image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0 + + return image + + +class StableDiffusionControlNetImg2ImgPipeline( + DiffusionPipeline, + StableDiffusionMixin, + TextualInversionLoaderMixin, + StableDiffusionLoraLoaderMixin, + IPAdapterMixin, + FromSingleFileMixin, +): + r""" + Pipeline for image-to-image generation using Stable Diffusion with ControlNet guidance. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for saving LoRA weights + - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files + - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + controlnet ([`ControlNetModel`] or `List[ControlNetModel]`): + Provides additional conditioning to the `unet` during the denoising process. If you set multiple + ControlNets as a list, the outputs from each ControlNet are added together to create one combined + additional conditioning. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details + about a model's potential harms. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. + """ + + model_cpu_offload_seq = "text_encoder->unet->vae" + _optional_components = ["safety_checker", "feature_extractor", "image_encoder"] + _exclude_from_cpu_offload = ["safety_checker"] + _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: UNet2DConditionModel, + controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel], + scheduler: KarrasDiffusionSchedulers, + safety_checker: StableDiffusionSafetyChecker, + feature_extractor: CLIPImageProcessor, + image_encoder: CLIPVisionModelWithProjection = None, + requires_safety_checker: bool = True, + ): + super().__init__() + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + if isinstance(controlnet, (list, tuple)): + controlnet = MultiControlNetModel(controlnet) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + controlnet=controlnet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + image_encoder=image_encoder, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True) + self.control_image_processor = VaeImageProcessor( + vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False + ) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt + def _encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + **kwargs, + ): + deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." + deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) + + prompt_embeds_tuple = self.encode_prompt( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=lora_scale, + **kwargs, + ) + + # concatenate for backwards comp + prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) + + return prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if self.text_encoder is not None: + if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return prompt_embeds, negative_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image + def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + if output_hidden_states: + image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] + image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_enc_hidden_states = self.image_encoder( + torch.zeros_like(image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( + num_images_per_prompt, dim=0 + ) + return image_enc_hidden_states, uncond_image_enc_hidden_states + else: + image_embeds = self.image_encoder(image).image_embeds + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_embeds = torch.zeros_like(image_embeds) + + return image_embeds, uncond_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds + def prepare_ip_adapter_image_embeds( + self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance + ): + image_embeds = [] + if do_classifier_free_guidance: + negative_image_embeds = [] + if ip_adapter_image_embeds is None: + if not isinstance(ip_adapter_image, list): + ip_adapter_image = [ip_adapter_image] + + if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): + raise ValueError( + f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." + ) + + for single_ip_adapter_image, image_proj_layer in zip( + ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers + ): + output_hidden_state = not isinstance(image_proj_layer, ImageProjection) + single_image_embeds, single_negative_image_embeds = self.encode_image( + single_ip_adapter_image, device, 1, output_hidden_state + ) + + image_embeds.append(single_image_embeds[None, :]) + if do_classifier_free_guidance: + negative_image_embeds.append(single_negative_image_embeds[None, :]) + else: + for single_image_embeds in ip_adapter_image_embeds: + if do_classifier_free_guidance: + single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2) + negative_image_embeds.append(single_negative_image_embeds) + image_embeds.append(single_image_embeds) + + ip_adapter_image_embeds = [] + for i, single_image_embeds in enumerate(image_embeds): + single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) + if do_classifier_free_guidance: + single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) + + single_image_embeds = single_image_embeds.to(device=device) + ip_adapter_image_embeds.append(single_image_embeds) + + return ip_adapter_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is None: + has_nsfw_concept = None + else: + if torch.is_tensor(image): + feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") + else: + feature_extractor_input = self.image_processor.numpy_to_pil(image) + safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) + image, has_nsfw_concept = self.safety_checker( + images=image, clip_input=safety_checker_input.pixel_values.to(dtype) + ) + return image, has_nsfw_concept + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents + def decode_latents(self, latents): + deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" + deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) + + latents = 1 / self.vae.config.scaling_factor * latents + image = self.vae.decode(latents, return_dict=False)[0] + image = (image / 2 + 0.5).clamp(0, 1) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + return image + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + image, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + ip_adapter_image=None, + ip_adapter_image_embeds=None, + controlnet_conditioning_scale=1.0, + control_guidance_start=0.0, + control_guidance_end=1.0, + callback_on_step_end_tensor_inputs=None, + ): + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + # `prompt` needs more sophisticated handling when there are multiple + # conditionings. + if isinstance(self.controlnet, MultiControlNetModel): + if isinstance(prompt, list): + logger.warning( + f"You have {len(self.controlnet.nets)} ControlNets and you have passed {len(prompt)}" + " prompts. The conditionings will be fixed across the prompts." + ) + + # Check `image` + is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance( + self.controlnet, torch._dynamo.eval_frame.OptimizedModule + ) + if ( + isinstance(self.controlnet, ControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, ControlNetModel) + ): + self.check_image(image, prompt, prompt_embeds) + elif ( + isinstance(self.controlnet, MultiControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, MultiControlNetModel) + ): + if not isinstance(image, list): + raise TypeError("For multiple controlnets: `image` must be type `list`") + + # When `image` is a nested list: + # (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]]) + elif any(isinstance(i, list) for i in image): + raise ValueError("A single batch of multiple conditionings are supported at the moment.") + elif len(image) != len(self.controlnet.nets): + raise ValueError( + f"For multiple controlnets: `image` must have the same length as the number of controlnets, but got {len(image)} images and {len(self.controlnet.nets)} ControlNets." + ) + + for image_ in image: + self.check_image(image_, prompt, prompt_embeds) + else: + assert False + + # Check `controlnet_conditioning_scale` + if ( + isinstance(self.controlnet, ControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, ControlNetModel) + ): + if not isinstance(controlnet_conditioning_scale, float): + raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.") + elif ( + isinstance(self.controlnet, MultiControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, MultiControlNetModel) + ): + if isinstance(controlnet_conditioning_scale, list): + if any(isinstance(i, list) for i in controlnet_conditioning_scale): + raise ValueError("A single batch of multiple conditionings are supported at the moment.") + elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len( + self.controlnet.nets + ): + raise ValueError( + "For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have" + " the same length as the number of controlnets" + ) + else: + assert False + + if len(control_guidance_start) != len(control_guidance_end): + raise ValueError( + f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list." + ) + + if isinstance(self.controlnet, MultiControlNetModel): + if len(control_guidance_start) != len(self.controlnet.nets): + raise ValueError( + f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}." + ) + + for start, end in zip(control_guidance_start, control_guidance_end): + if start >= end: + raise ValueError( + f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}." + ) + if start < 0.0: + raise ValueError(f"control guidance start: {start} can't be smaller than 0.") + if end > 1.0: + raise ValueError(f"control guidance end: {end} can't be larger than 1.0.") + + if ip_adapter_image is not None and ip_adapter_image_embeds is not None: + raise ValueError( + "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined." + ) + + if ip_adapter_image_embeds is not None: + if not isinstance(ip_adapter_image_embeds, list): + raise ValueError( + f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}" + ) + elif ip_adapter_image_embeds[0].ndim not in [3, 4]: + raise ValueError( + f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D" + ) + + # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.check_image + def check_image(self, image, prompt, prompt_embeds): + image_is_pil = isinstance(image, PIL.Image.Image) + image_is_tensor = isinstance(image, torch.Tensor) + image_is_np = isinstance(image, np.ndarray) + image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image) + image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor) + image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray) + + if ( + not image_is_pil + and not image_is_tensor + and not image_is_np + and not image_is_pil_list + and not image_is_tensor_list + and not image_is_np_list + ): + raise TypeError( + f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}" + ) + + if image_is_pil: + image_batch_size = 1 + else: + image_batch_size = len(image) + + if prompt is not None and isinstance(prompt, str): + prompt_batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + prompt_batch_size = len(prompt) + elif prompt_embeds is not None: + prompt_batch_size = prompt_embeds.shape[0] + + if image_batch_size != 1 and image_batch_size != prompt_batch_size: + raise ValueError( + f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}" + ) + + # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.prepare_image + def prepare_control_image( + self, + image, + width, + height, + batch_size, + num_images_per_prompt, + device, + dtype, + do_classifier_free_guidance=False, + guess_mode=False, + ): + image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32) + image_batch_size = image.shape[0] + + if image_batch_size == 1: + repeat_by = batch_size + else: + # image batch size is the same as prompt batch size + repeat_by = num_images_per_prompt + + image = image.repeat_interleave(repeat_by, dim=0) + + image = image.to(device=device, dtype=dtype) + + if do_classifier_free_guidance and not guess_mode: + image = torch.cat([image] * 2) + + return image + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps + def get_timesteps(self, num_inference_steps, strength, device): + # get the original timestep using init_timestep + init_timestep = min(int(num_inference_steps * strength), num_inference_steps) + + t_start = max(num_inference_steps - init_timestep, 0) + timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] + if hasattr(self.scheduler, "set_begin_index"): + self.scheduler.set_begin_index(t_start * self.scheduler.order) + + return timesteps, num_inference_steps - t_start + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.prepare_latents + def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None): + if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): + raise ValueError( + f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" + ) + + image = image.to(device=device, dtype=dtype) + + batch_size = batch_size * num_images_per_prompt + + if image.shape[1] == 4: + init_latents = image + + else: + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + elif isinstance(generator, list): + if image.shape[0] < batch_size and batch_size % image.shape[0] == 0: + image = torch.cat([image] * (batch_size // image.shape[0]), dim=0) + elif image.shape[0] < batch_size and batch_size % image.shape[0] != 0: + raise ValueError( + f"Cannot duplicate `image` of batch size {image.shape[0]} to effective batch_size {batch_size} " + ) + + init_latents = [ + retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i]) + for i in range(batch_size) + ] + init_latents = torch.cat(init_latents, dim=0) + else: + init_latents = retrieve_latents(self.vae.encode(image), generator=generator) + + init_latents = self.vae.config.scaling_factor * init_latents + + if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0: + # expand init_latents for batch_size + deprecation_message = ( + f"You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial" + " images (`image`). Initial images are now duplicating to match the number of text prompts. Note" + " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update" + " your script to pass as many initial images as text prompts to suppress this warning." + ) + deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False) + additional_image_per_prompt = batch_size // init_latents.shape[0] + init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0) + elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0: + raise ValueError( + f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts." + ) + else: + init_latents = torch.cat([init_latents], dim=0) + + shape = init_latents.shape + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + + # get latents + init_latents = self.scheduler.add_noise(init_latents, noise, timestep) + latents = init_latents + + return latents + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def clip_skip(self): + return self._clip_skip + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 + + @property + def cross_attention_kwargs(self): + return self._cross_attention_kwargs + + @property + def num_timesteps(self): + return self._num_timesteps + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + image: PipelineImageInput = None, + control_image: PipelineImageInput = None, + height: Optional[int] = None, + width: Optional[int] = None, + strength: float = 0.8, + num_inference_steps: int = 50, + guidance_scale: float = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + controlnet_conditioning_scale: Union[float, List[float]] = 0.8, + guess_mode: bool = False, + control_guidance_start: Union[float, List[float]] = 0.0, + control_guidance_end: Union[float, List[float]] = 1.0, + clip_skip: Optional[int] = None, + callback_on_step_end: Optional[ + Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks] + ] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + **kwargs, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,: + `List[List[torch.Tensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`): + The initial image to be used as the starting point for the image generation process. Can also accept + image latents as `image`, and if passing latents directly they are not encoded again. + control_image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,: + `List[List[torch.Tensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`): + The ControlNet input condition to provide guidance to the `unet` for generation. If the type is + specified as `torch.Tensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also be accepted + as an image. The dimensions of the output image defaults to `image`'s dimensions. If height and/or + width are passed, `image` is resized accordingly. If multiple ControlNets are specified in `init`, + images must be passed as a list such that each element of the list can be correctly batched for input + to a single ControlNet. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + strength (`float`, *optional*, defaults to 0.8): + Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a + starting point and more noise is added the higher the `strength`. The number of denoising steps depends + on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising + process runs for the full number of iterations specified in `num_inference_steps`. A value of 1 + essentially ignores `image`. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. + ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*): + Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of + IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should + contain the negative image embedding if `do_classifier_free_guidance` is set to `True`. If not + provided, embeddings are computed from the `ip_adapter_image` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0): + The outputs of the ControlNet are multiplied by `controlnet_conditioning_scale` before they are added + to the residual in the original `unet`. If multiple ControlNets are specified in `init`, you can set + the corresponding scale as a list. + guess_mode (`bool`, *optional*, defaults to `False`): + The ControlNet encoder tries to recognize the content of the input image even if you remove all + prompts. A `guidance_scale` value between 3.0 and 5.0 is recommended. + control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0): + The percentage of total steps at which the ControlNet starts applying. + control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0): + The percentage of total steps at which the ControlNet stops applying. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*): + A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of + each denoising step during the inference. with the following arguments: `callback_on_step_end(self: + DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a + list of all tensors as specified by `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + + Examples: + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images and the + second element is a list of `bool`s indicating whether the corresponding generated image contains + "not-safe-for-work" (nsfw) content. + """ + + callback = kwargs.pop("callback", None) + callback_steps = kwargs.pop("callback_steps", None) + + if callback is not None: + deprecate( + "callback", + "1.0.0", + "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", + ) + if callback_steps is not None: + deprecate( + "callback_steps", + "1.0.0", + "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", + ) + + if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): + callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs + + controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet + + # align format for control guidance + if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list): + control_guidance_start = len(control_guidance_end) * [control_guidance_start] + elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list): + control_guidance_end = len(control_guidance_start) * [control_guidance_end] + elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list): + mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1 + control_guidance_start, control_guidance_end = ( + mult * [control_guidance_start], + mult * [control_guidance_end], + ) + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + control_image, + callback_steps, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + ip_adapter_image, + ip_adapter_image_embeds, + controlnet_conditioning_scale, + control_guidance_start, + control_guidance_end, + callback_on_step_end_tensor_inputs, + ) + + self._guidance_scale = guidance_scale + self._clip_skip = clip_skip + self._cross_attention_kwargs = cross_attention_kwargs + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float): + controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets) + + global_pool_conditions = ( + controlnet.config.global_pool_conditions + if isinstance(controlnet, ControlNetModel) + else controlnet.nets[0].config.global_pool_conditions + ) + guess_mode = guess_mode or global_pool_conditions + + # 3. Encode input prompt + text_encoder_lora_scale = ( + self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None + ) + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + device, + num_images_per_prompt, + self.do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=text_encoder_lora_scale, + clip_skip=self.clip_skip, + ) + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + if self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + image_embeds = self.prepare_ip_adapter_image_embeds( + ip_adapter_image, + ip_adapter_image_embeds, + device, + batch_size * num_images_per_prompt, + self.do_classifier_free_guidance, + ) + + # 4. Prepare image + image = self.image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32) + + # 5. Prepare controlnet_conditioning_image + if isinstance(controlnet, ControlNetModel): + control_image = self.prepare_control_image( + image=control_image, + width=width, + height=height, + batch_size=batch_size * num_images_per_prompt, + num_images_per_prompt=num_images_per_prompt, + device=device, + dtype=controlnet.dtype, + do_classifier_free_guidance=self.do_classifier_free_guidance, + guess_mode=guess_mode, + ) + elif isinstance(controlnet, MultiControlNetModel): + control_images = [] + + for control_image_ in control_image: + control_image_ = self.prepare_control_image( + image=control_image_, + width=width, + height=height, + batch_size=batch_size * num_images_per_prompt, + num_images_per_prompt=num_images_per_prompt, + device=device, + dtype=controlnet.dtype, + do_classifier_free_guidance=self.do_classifier_free_guidance, + guess_mode=guess_mode, + ) + + control_images.append(control_image_) + + control_image = control_images + else: + assert False + + # 5. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device) + latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) + self._num_timesteps = len(timesteps) + + # 6. Prepare latent variables + if latents is None: + latents = self.prepare_latents( + image, + latent_timestep, + batch_size, + num_images_per_prompt, + prompt_embeds.dtype, + device, + generator, + ) + + # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7.1 Add image embeds for IP-Adapter + added_cond_kwargs = ( + {"image_embeds": image_embeds} + if ip_adapter_image is not None or ip_adapter_image_embeds is not None + else None + ) + + # 7.2 Create tensor stating which controlnets to keep + controlnet_keep = [] + for i in range(len(timesteps)): + keeps = [ + 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e) + for s, e in zip(control_guidance_start, control_guidance_end) + ] + controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps) + + # 8. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # controlnet(s) inference + if guess_mode and self.do_classifier_free_guidance: + # Infer ControlNet only for the conditional batch. + control_model_input = latents + control_model_input = self.scheduler.scale_model_input(control_model_input, t) + controlnet_prompt_embeds = prompt_embeds.chunk(2)[1] + else: + control_model_input = latent_model_input + controlnet_prompt_embeds = prompt_embeds + + if isinstance(controlnet_keep[i], list): + cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])] + else: + controlnet_cond_scale = controlnet_conditioning_scale + if isinstance(controlnet_cond_scale, list): + controlnet_cond_scale = controlnet_cond_scale[0] + cond_scale = controlnet_cond_scale * controlnet_keep[i] + + down_block_res_samples, mid_block_res_sample = self.controlnet( + control_model_input, + t, + encoder_hidden_states=controlnet_prompt_embeds, + controlnet_cond=control_image, + conditioning_scale=cond_scale, + guess_mode=guess_mode, + return_dict=False, + ) + + if guess_mode and self.do_classifier_free_guidance: + # Inferred ControlNet only for the conditional batch. + # To apply the output of ControlNet to both the unconditional and conditional batches, + # add 0 to the unconditional batch to keep it unchanged. + down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples] + mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample]) + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs=self.cross_attention_kwargs, + down_block_additional_residuals=down_block_res_samples, + mid_block_additional_residual=mid_block_res_sample, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + # If we do sequential model offloading, let's offload unet and controlnet + # manually for max memory savings + if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: + self.unet.to("cpu") + self.controlnet.to("cpu") + torch.cuda.empty_cache() + + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[ + 0 + ] + image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) + else: + image = latents + has_nsfw_concept = None + + if has_nsfw_concept is None: + do_denormalize = [True] * image.shape[0] + else: + do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] + + image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image, has_nsfw_concept) + + return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) diff --git a/diffusers3/pipelines/controlnet/pipeline_controlnet_inpaint.py b/diffusers3/pipelines/controlnet/pipeline_controlnet_inpaint.py new file mode 100644 index 0000000000000000000000000000000000000000..9f7d464f9a911b750992b1111d86d053697eb58a --- /dev/null +++ b/diffusers3/pipelines/controlnet/pipeline_controlnet_inpaint.py @@ -0,0 +1,1504 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/ + +import inspect +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import numpy as np +import PIL.Image +import torch +import torch.nn.functional as F +from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection + +from ...callbacks import MultiPipelineCallbacks, PipelineCallback +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import FromSingleFileMixin, IPAdapterMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin +from ...models import AutoencoderKL, ControlNetModel, ImageProjection, UNet2DConditionModel +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import ( + USE_PEFT_BACKEND, + deprecate, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import is_compiled_module, randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from ..stable_diffusion import StableDiffusionPipelineOutput +from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker +from .multicontrolnet import MultiControlNetModel + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> # !pip install transformers accelerate + >>> from diffusers import StableDiffusionControlNetInpaintPipeline, ControlNetModel, DDIMScheduler + >>> from diffusers.utils import load_image + >>> import numpy as np + >>> import torch + + >>> init_image = load_image( + ... "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_inpaint/boy.png" + ... ) + >>> init_image = init_image.resize((512, 512)) + + >>> generator = torch.Generator(device="cpu").manual_seed(1) + + >>> mask_image = load_image( + ... "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_inpaint/boy_mask.png" + ... ) + >>> mask_image = mask_image.resize((512, 512)) + + + >>> def make_canny_condition(image): + ... image = np.array(image) + ... image = cv2.Canny(image, 100, 200) + ... image = image[:, :, None] + ... image = np.concatenate([image, image, image], axis=2) + ... image = Image.fromarray(image) + ... return image + + + >>> control_image = make_canny_condition(init_image) + + >>> controlnet = ControlNetModel.from_pretrained( + ... "lllyasviel/control_v11p_sd15_inpaint", torch_dtype=torch.float16 + ... ) + >>> pipe = StableDiffusionControlNetInpaintPipeline.from_pretrained( + ... "runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16 + ... ) + + >>> pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) + >>> pipe.enable_model_cpu_offload() + + >>> # generate image + >>> image = pipe( + ... "a handsome man with ray-ban sunglasses", + ... num_inference_steps=20, + ... generator=generator, + ... eta=1.0, + ... image=init_image, + ... mask_image=mask_image, + ... control_image=control_image, + ... ).images[0] + ``` +""" + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents +def retrieve_latents( + encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" +): + if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": + return encoder_output.latent_dist.sample(generator) + elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": + return encoder_output.latent_dist.mode() + elif hasattr(encoder_output, "latents"): + return encoder_output.latents + else: + raise AttributeError("Could not access latents of provided encoder_output") + + +class StableDiffusionControlNetInpaintPipeline( + DiffusionPipeline, + StableDiffusionMixin, + TextualInversionLoaderMixin, + StableDiffusionLoraLoaderMixin, + IPAdapterMixin, + FromSingleFileMixin, +): + r""" + Pipeline for image inpainting using Stable Diffusion with ControlNet guidance. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for saving LoRA weights + - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files + - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters + + + + This pipeline can be used with checkpoints that have been specifically fine-tuned for inpainting + ([runwayml/stable-diffusion-inpainting](https://huggingface.co/runwayml/stable-diffusion-inpainting)) as well as + default text-to-image Stable Diffusion checkpoints + ([runwayml/stable-diffusion-v1-5](https://huggingface.co/runwayml/stable-diffusion-v1-5)). Default text-to-image + Stable Diffusion checkpoints might be preferable for ControlNets that have been fine-tuned on those, such as + [lllyasviel/control_v11p_sd15_inpaint](https://huggingface.co/lllyasviel/control_v11p_sd15_inpaint). + + + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + controlnet ([`ControlNetModel`] or `List[ControlNetModel]`): + Provides additional conditioning to the `unet` during the denoising process. If you set multiple + ControlNets as a list, the outputs from each ControlNet are added together to create one combined + additional conditioning. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details + about a model's potential harms. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. + """ + + model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae" + _optional_components = ["safety_checker", "feature_extractor", "image_encoder"] + _exclude_from_cpu_offload = ["safety_checker"] + _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: UNet2DConditionModel, + controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel], + scheduler: KarrasDiffusionSchedulers, + safety_checker: StableDiffusionSafetyChecker, + feature_extractor: CLIPImageProcessor, + image_encoder: CLIPVisionModelWithProjection = None, + requires_safety_checker: bool = True, + ): + super().__init__() + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + if isinstance(controlnet, (list, tuple)): + controlnet = MultiControlNetModel(controlnet) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + controlnet=controlnet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + image_encoder=image_encoder, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + self.mask_processor = VaeImageProcessor( + vae_scale_factor=self.vae_scale_factor, do_normalize=False, do_binarize=True, do_convert_grayscale=True + ) + self.control_image_processor = VaeImageProcessor( + vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False + ) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt + def _encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + **kwargs, + ): + deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." + deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) + + prompt_embeds_tuple = self.encode_prompt( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=lora_scale, + **kwargs, + ) + + # concatenate for backwards comp + prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) + + return prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if self.text_encoder is not None: + if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return prompt_embeds, negative_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image + def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + if output_hidden_states: + image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] + image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_enc_hidden_states = self.image_encoder( + torch.zeros_like(image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( + num_images_per_prompt, dim=0 + ) + return image_enc_hidden_states, uncond_image_enc_hidden_states + else: + image_embeds = self.image_encoder(image).image_embeds + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_embeds = torch.zeros_like(image_embeds) + + return image_embeds, uncond_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds + def prepare_ip_adapter_image_embeds( + self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance + ): + image_embeds = [] + if do_classifier_free_guidance: + negative_image_embeds = [] + if ip_adapter_image_embeds is None: + if not isinstance(ip_adapter_image, list): + ip_adapter_image = [ip_adapter_image] + + if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): + raise ValueError( + f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." + ) + + for single_ip_adapter_image, image_proj_layer in zip( + ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers + ): + output_hidden_state = not isinstance(image_proj_layer, ImageProjection) + single_image_embeds, single_negative_image_embeds = self.encode_image( + single_ip_adapter_image, device, 1, output_hidden_state + ) + + image_embeds.append(single_image_embeds[None, :]) + if do_classifier_free_guidance: + negative_image_embeds.append(single_negative_image_embeds[None, :]) + else: + for single_image_embeds in ip_adapter_image_embeds: + if do_classifier_free_guidance: + single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2) + negative_image_embeds.append(single_negative_image_embeds) + image_embeds.append(single_image_embeds) + + ip_adapter_image_embeds = [] + for i, single_image_embeds in enumerate(image_embeds): + single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) + if do_classifier_free_guidance: + single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) + + single_image_embeds = single_image_embeds.to(device=device) + ip_adapter_image_embeds.append(single_image_embeds) + + return ip_adapter_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is None: + has_nsfw_concept = None + else: + if torch.is_tensor(image): + feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") + else: + feature_extractor_input = self.image_processor.numpy_to_pil(image) + safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) + image, has_nsfw_concept = self.safety_checker( + images=image, clip_input=safety_checker_input.pixel_values.to(dtype) + ) + return image, has_nsfw_concept + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents + def decode_latents(self, latents): + deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" + deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) + + latents = 1 / self.vae.config.scaling_factor * latents + image = self.vae.decode(latents, return_dict=False)[0] + image = (image / 2 + 0.5).clamp(0, 1) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + return image + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps + def get_timesteps(self, num_inference_steps, strength, device): + # get the original timestep using init_timestep + init_timestep = min(int(num_inference_steps * strength), num_inference_steps) + + t_start = max(num_inference_steps - init_timestep, 0) + timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] + if hasattr(self.scheduler, "set_begin_index"): + self.scheduler.set_begin_index(t_start * self.scheduler.order) + + return timesteps, num_inference_steps - t_start + + def check_inputs( + self, + prompt, + image, + mask_image, + height, + width, + callback_steps, + output_type, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + ip_adapter_image=None, + ip_adapter_image_embeds=None, + controlnet_conditioning_scale=1.0, + control_guidance_start=0.0, + control_guidance_end=1.0, + callback_on_step_end_tensor_inputs=None, + padding_mask_crop=None, + ): + if height is not None and height % 8 != 0 or width is not None and width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if padding_mask_crop is not None: + if not isinstance(image, PIL.Image.Image): + raise ValueError( + f"The image should be a PIL image when inpainting mask crop, but is of type" f" {type(image)}." + ) + if not isinstance(mask_image, PIL.Image.Image): + raise ValueError( + f"The mask image should be a PIL image when inpainting mask crop, but is of type" + f" {type(mask_image)}." + ) + if output_type != "pil": + raise ValueError(f"The output type should be PIL when inpainting mask crop, but is" f" {output_type}.") + + # `prompt` needs more sophisticated handling when there are multiple + # conditionings. + if isinstance(self.controlnet, MultiControlNetModel): + if isinstance(prompt, list): + logger.warning( + f"You have {len(self.controlnet.nets)} ControlNets and you have passed {len(prompt)}" + " prompts. The conditionings will be fixed across the prompts." + ) + + # Check `image` + is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance( + self.controlnet, torch._dynamo.eval_frame.OptimizedModule + ) + if ( + isinstance(self.controlnet, ControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, ControlNetModel) + ): + self.check_image(image, prompt, prompt_embeds) + elif ( + isinstance(self.controlnet, MultiControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, MultiControlNetModel) + ): + if not isinstance(image, list): + raise TypeError("For multiple controlnets: `image` must be type `list`") + + # When `image` is a nested list: + # (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]]) + elif any(isinstance(i, list) for i in image): + raise ValueError("A single batch of multiple conditionings are supported at the moment.") + elif len(image) != len(self.controlnet.nets): + raise ValueError( + f"For multiple controlnets: `image` must have the same length as the number of controlnets, but got {len(image)} images and {len(self.controlnet.nets)} ControlNets." + ) + + for image_ in image: + self.check_image(image_, prompt, prompt_embeds) + else: + assert False + + # Check `controlnet_conditioning_scale` + if ( + isinstance(self.controlnet, ControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, ControlNetModel) + ): + if not isinstance(controlnet_conditioning_scale, float): + raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.") + elif ( + isinstance(self.controlnet, MultiControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, MultiControlNetModel) + ): + if isinstance(controlnet_conditioning_scale, list): + if any(isinstance(i, list) for i in controlnet_conditioning_scale): + raise ValueError("A single batch of multiple conditionings are supported at the moment.") + elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len( + self.controlnet.nets + ): + raise ValueError( + "For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have" + " the same length as the number of controlnets" + ) + else: + assert False + + if len(control_guidance_start) != len(control_guidance_end): + raise ValueError( + f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list." + ) + + if isinstance(self.controlnet, MultiControlNetModel): + if len(control_guidance_start) != len(self.controlnet.nets): + raise ValueError( + f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}." + ) + + for start, end in zip(control_guidance_start, control_guidance_end): + if start >= end: + raise ValueError( + f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}." + ) + if start < 0.0: + raise ValueError(f"control guidance start: {start} can't be smaller than 0.") + if end > 1.0: + raise ValueError(f"control guidance end: {end} can't be larger than 1.0.") + + if ip_adapter_image is not None and ip_adapter_image_embeds is not None: + raise ValueError( + "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined." + ) + + if ip_adapter_image_embeds is not None: + if not isinstance(ip_adapter_image_embeds, list): + raise ValueError( + f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}" + ) + elif ip_adapter_image_embeds[0].ndim not in [3, 4]: + raise ValueError( + f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D" + ) + + # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.check_image + def check_image(self, image, prompt, prompt_embeds): + image_is_pil = isinstance(image, PIL.Image.Image) + image_is_tensor = isinstance(image, torch.Tensor) + image_is_np = isinstance(image, np.ndarray) + image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image) + image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor) + image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray) + + if ( + not image_is_pil + and not image_is_tensor + and not image_is_np + and not image_is_pil_list + and not image_is_tensor_list + and not image_is_np_list + ): + raise TypeError( + f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}" + ) + + if image_is_pil: + image_batch_size = 1 + else: + image_batch_size = len(image) + + if prompt is not None and isinstance(prompt, str): + prompt_batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + prompt_batch_size = len(prompt) + elif prompt_embeds is not None: + prompt_batch_size = prompt_embeds.shape[0] + + if image_batch_size != 1 and image_batch_size != prompt_batch_size: + raise ValueError( + f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}" + ) + + def prepare_control_image( + self, + image, + width, + height, + batch_size, + num_images_per_prompt, + device, + dtype, + crops_coords, + resize_mode, + do_classifier_free_guidance=False, + guess_mode=False, + ): + image = self.control_image_processor.preprocess( + image, height=height, width=width, crops_coords=crops_coords, resize_mode=resize_mode + ).to(dtype=torch.float32) + image_batch_size = image.shape[0] + + if image_batch_size == 1: + repeat_by = batch_size + else: + # image batch size is the same as prompt batch size + repeat_by = num_images_per_prompt + + image = image.repeat_interleave(repeat_by, dim=0) + + image = image.to(device=device, dtype=dtype) + + if do_classifier_free_guidance and not guess_mode: + image = torch.cat([image] * 2) + + return image + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_inpaint.StableDiffusionInpaintPipeline.prepare_latents + def prepare_latents( + self, + batch_size, + num_channels_latents, + height, + width, + dtype, + device, + generator, + latents=None, + image=None, + timestep=None, + is_strength_max=True, + return_noise=False, + return_image_latents=False, + ): + shape = ( + batch_size, + num_channels_latents, + int(height) // self.vae_scale_factor, + int(width) // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if (image is None or timestep is None) and not is_strength_max: + raise ValueError( + "Since strength < 1. initial latents are to be initialised as a combination of Image + Noise." + "However, either the image or the noise timestep has not been provided." + ) + + if return_image_latents or (latents is None and not is_strength_max): + image = image.to(device=device, dtype=dtype) + + if image.shape[1] == 4: + image_latents = image + else: + image_latents = self._encode_vae_image(image=image, generator=generator) + image_latents = image_latents.repeat(batch_size // image_latents.shape[0], 1, 1, 1) + + if latents is None: + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + # if strength is 1. then initialise the latents to noise, else initial to image + noise + latents = noise if is_strength_max else self.scheduler.add_noise(image_latents, noise, timestep) + # if pure noise then scale the initial latents by the Scheduler's init sigma + latents = latents * self.scheduler.init_noise_sigma if is_strength_max else latents + else: + noise = latents.to(device) + latents = noise * self.scheduler.init_noise_sigma + + outputs = (latents,) + + if return_noise: + outputs += (noise,) + + if return_image_latents: + outputs += (image_latents,) + + return outputs + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_inpaint.StableDiffusionInpaintPipeline.prepare_mask_latents + def prepare_mask_latents( + self, mask, masked_image, batch_size, height, width, dtype, device, generator, do_classifier_free_guidance + ): + # resize the mask to latents shape as we concatenate the mask to the latents + # we do that before converting to dtype to avoid breaking in case we're using cpu_offload + # and half precision + mask = torch.nn.functional.interpolate( + mask, size=(height // self.vae_scale_factor, width // self.vae_scale_factor) + ) + mask = mask.to(device=device, dtype=dtype) + + masked_image = masked_image.to(device=device, dtype=dtype) + + if masked_image.shape[1] == 4: + masked_image_latents = masked_image + else: + masked_image_latents = self._encode_vae_image(masked_image, generator=generator) + + # duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method + if mask.shape[0] < batch_size: + if not batch_size % mask.shape[0] == 0: + raise ValueError( + "The passed mask and the required batch size don't match. Masks are supposed to be duplicated to" + f" a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number" + " of masks that you pass is divisible by the total requested batch size." + ) + mask = mask.repeat(batch_size // mask.shape[0], 1, 1, 1) + if masked_image_latents.shape[0] < batch_size: + if not batch_size % masked_image_latents.shape[0] == 0: + raise ValueError( + "The passed images and the required batch size don't match. Images are supposed to be duplicated" + f" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed." + " Make sure the number of images that you pass is divisible by the total requested batch size." + ) + masked_image_latents = masked_image_latents.repeat(batch_size // masked_image_latents.shape[0], 1, 1, 1) + + mask = torch.cat([mask] * 2) if do_classifier_free_guidance else mask + masked_image_latents = ( + torch.cat([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents + ) + + # aligning device to prevent device errors when concating it with the latent model input + masked_image_latents = masked_image_latents.to(device=device, dtype=dtype) + return mask, masked_image_latents + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_inpaint.StableDiffusionInpaintPipeline._encode_vae_image + def _encode_vae_image(self, image: torch.Tensor, generator: torch.Generator): + if isinstance(generator, list): + image_latents = [ + retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i]) + for i in range(image.shape[0]) + ] + image_latents = torch.cat(image_latents, dim=0) + else: + image_latents = retrieve_latents(self.vae.encode(image), generator=generator) + + image_latents = self.vae.config.scaling_factor * image_latents + + return image_latents + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def clip_skip(self): + return self._clip_skip + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 + + @property + def cross_attention_kwargs(self): + return self._cross_attention_kwargs + + @property + def num_timesteps(self): + return self._num_timesteps + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + image: PipelineImageInput = None, + mask_image: PipelineImageInput = None, + control_image: PipelineImageInput = None, + height: Optional[int] = None, + width: Optional[int] = None, + padding_mask_crop: Optional[int] = None, + strength: float = 1.0, + num_inference_steps: int = 50, + guidance_scale: float = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + controlnet_conditioning_scale: Union[float, List[float]] = 0.5, + guess_mode: bool = False, + control_guidance_start: Union[float, List[float]] = 0.0, + control_guidance_end: Union[float, List[float]] = 1.0, + clip_skip: Optional[int] = None, + callback_on_step_end: Optional[ + Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks] + ] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + **kwargs, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, + `List[PIL.Image.Image]`, or `List[np.ndarray]`): + `Image`, NumPy array or tensor representing an image batch to be used as the starting point. For both + NumPy array and PyTorch tensor, the expected value range is between `[0, 1]`. If it's a tensor or a + list or tensors, the expected shape should be `(B, C, H, W)` or `(C, H, W)`. If it is a NumPy array or + a list of arrays, the expected shape should be `(B, H, W, C)` or `(H, W, C)`. It can also accept image + latents as `image`, but if passing latents directly it is not encoded again. + mask_image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, + `List[PIL.Image.Image]`, or `List[np.ndarray]`): + `Image`, NumPy array or tensor representing an image batch to mask `image`. White pixels in the mask + are repainted while black pixels are preserved. If `mask_image` is a PIL image, it is converted to a + single channel (luminance) before use. If it's a NumPy array or PyTorch tensor, it should contain one + color channel (L) instead of 3, so the expected shape for PyTorch tensor would be `(B, 1, H, W)`, `(B, + H, W)`, `(1, H, W)`, `(H, W)`. And for NumPy array, it would be for `(B, H, W, 1)`, `(B, H, W)`, `(H, + W, 1)`, or `(H, W)`. + control_image (`torch.Tensor`, `PIL.Image.Image`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, + `List[List[torch.Tensor]]`, or `List[List[PIL.Image.Image]]`): + The ControlNet input condition to provide guidance to the `unet` for generation. If the type is + specified as `torch.Tensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also be accepted + as an image. The dimensions of the output image defaults to `image`'s dimensions. If height and/or + width are passed, `image` is resized accordingly. If multiple ControlNets are specified in `init`, + images must be passed as a list such that each element of the list can be correctly batched for input + to a single ControlNet. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + padding_mask_crop (`int`, *optional*, defaults to `None`): + The size of margin in the crop to be applied to the image and masking. If `None`, no crop is applied to + image and mask_image. If `padding_mask_crop` is not `None`, it will first find a rectangular region + with the same aspect ration of the image and contains all masked area, and then expand that area based + on `padding_mask_crop`. The image and mask_image will then be cropped based on the expanded area before + resizing to the original image size for inpainting. This is useful when the masked area is small while + the image is large and contain information irrelevant for inpainting, such as background. + strength (`float`, *optional*, defaults to 1.0): + Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a + starting point and more noise is added the higher the `strength`. The number of denoising steps depends + on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising + process runs for the full number of iterations specified in `num_inference_steps`. A value of 1 + essentially ignores `image`. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. + ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*): + Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of + IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should + contain the negative image embedding if `do_classifier_free_guidance` is set to `True`. If not + provided, embeddings are computed from the `ip_adapter_image` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 0.5): + The outputs of the ControlNet are multiplied by `controlnet_conditioning_scale` before they are added + to the residual in the original `unet`. If multiple ControlNets are specified in `init`, you can set + the corresponding scale as a list. + guess_mode (`bool`, *optional*, defaults to `False`): + The ControlNet encoder tries to recognize the content of the input image even if you remove all + prompts. A `guidance_scale` value between 3.0 and 5.0 is recommended. + control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0): + The percentage of total steps at which the ControlNet starts applying. + control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0): + The percentage of total steps at which the ControlNet stops applying. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*): + A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of + each denoising step during the inference. with the following arguments: `callback_on_step_end(self: + DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a + list of all tensors as specified by `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + + Examples: + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images and the + second element is a list of `bool`s indicating whether the corresponding generated image contains + "not-safe-for-work" (nsfw) content. + """ + + callback = kwargs.pop("callback", None) + callback_steps = kwargs.pop("callback_steps", None) + + if callback is not None: + deprecate( + "callback", + "1.0.0", + "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", + ) + if callback_steps is not None: + deprecate( + "callback_steps", + "1.0.0", + "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", + ) + + if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): + callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs + + controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet + + # align format for control guidance + if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list): + control_guidance_start = len(control_guidance_end) * [control_guidance_start] + elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list): + control_guidance_end = len(control_guidance_start) * [control_guidance_end] + elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list): + mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1 + control_guidance_start, control_guidance_end = ( + mult * [control_guidance_start], + mult * [control_guidance_end], + ) + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + control_image, + mask_image, + height, + width, + callback_steps, + output_type, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + ip_adapter_image, + ip_adapter_image_embeds, + controlnet_conditioning_scale, + control_guidance_start, + control_guidance_end, + callback_on_step_end_tensor_inputs, + padding_mask_crop, + ) + + self._guidance_scale = guidance_scale + self._clip_skip = clip_skip + self._cross_attention_kwargs = cross_attention_kwargs + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if padding_mask_crop is not None: + height, width = self.image_processor.get_default_height_width(image, height, width) + crops_coords = self.mask_processor.get_crop_region(mask_image, width, height, pad=padding_mask_crop) + resize_mode = "fill" + else: + crops_coords = None + resize_mode = "default" + + device = self._execution_device + + if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float): + controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets) + + global_pool_conditions = ( + controlnet.config.global_pool_conditions + if isinstance(controlnet, ControlNetModel) + else controlnet.nets[0].config.global_pool_conditions + ) + guess_mode = guess_mode or global_pool_conditions + + # 3. Encode input prompt + text_encoder_lora_scale = ( + self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None + ) + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + device, + num_images_per_prompt, + self.do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=text_encoder_lora_scale, + clip_skip=self.clip_skip, + ) + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + if self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + image_embeds = self.prepare_ip_adapter_image_embeds( + ip_adapter_image, + ip_adapter_image_embeds, + device, + batch_size * num_images_per_prompt, + self.do_classifier_free_guidance, + ) + + # 4. Prepare image + if isinstance(controlnet, ControlNetModel): + control_image = self.prepare_control_image( + image=control_image, + width=width, + height=height, + batch_size=batch_size * num_images_per_prompt, + num_images_per_prompt=num_images_per_prompt, + device=device, + dtype=controlnet.dtype, + crops_coords=crops_coords, + resize_mode=resize_mode, + do_classifier_free_guidance=self.do_classifier_free_guidance, + guess_mode=guess_mode, + ) + elif isinstance(controlnet, MultiControlNetModel): + control_images = [] + + for control_image_ in control_image: + control_image_ = self.prepare_control_image( + image=control_image_, + width=width, + height=height, + batch_size=batch_size * num_images_per_prompt, + num_images_per_prompt=num_images_per_prompt, + device=device, + dtype=controlnet.dtype, + crops_coords=crops_coords, + resize_mode=resize_mode, + do_classifier_free_guidance=self.do_classifier_free_guidance, + guess_mode=guess_mode, + ) + + control_images.append(control_image_) + + control_image = control_images + else: + assert False + + # 4.1 Preprocess mask and image - resizes image and mask w.r.t height and width + original_image = image + init_image = self.image_processor.preprocess( + image, height=height, width=width, crops_coords=crops_coords, resize_mode=resize_mode + ) + init_image = init_image.to(dtype=torch.float32) + + mask = self.mask_processor.preprocess( + mask_image, height=height, width=width, resize_mode=resize_mode, crops_coords=crops_coords + ) + + masked_image = init_image * (mask < 0.5) + _, _, height, width = init_image.shape + + # 5. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps, num_inference_steps = self.get_timesteps( + num_inference_steps=num_inference_steps, strength=strength, device=device + ) + # at which timestep to set the initial noise (n.b. 50% if strength is 0.5) + latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) + # create a boolean to check if the strength is set to 1. if so then initialise the latents with pure noise + is_strength_max = strength == 1.0 + self._num_timesteps = len(timesteps) + + # 6. Prepare latent variables + num_channels_latents = self.vae.config.latent_channels + num_channels_unet = self.unet.config.in_channels + return_image_latents = num_channels_unet == 4 + latents_outputs = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + image=init_image, + timestep=latent_timestep, + is_strength_max=is_strength_max, + return_noise=True, + return_image_latents=return_image_latents, + ) + + if return_image_latents: + latents, noise, image_latents = latents_outputs + else: + latents, noise = latents_outputs + + # 7. Prepare mask latent variables + mask, masked_image_latents = self.prepare_mask_latents( + mask, + masked_image, + batch_size * num_images_per_prompt, + height, + width, + prompt_embeds.dtype, + device, + generator, + self.do_classifier_free_guidance, + ) + + # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7.1 Add image embeds for IP-Adapter + added_cond_kwargs = ( + {"image_embeds": image_embeds} + if ip_adapter_image is not None or ip_adapter_image_embeds is not None + else None + ) + + # 7.2 Create tensor stating which controlnets to keep + controlnet_keep = [] + for i in range(len(timesteps)): + keeps = [ + 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e) + for s, e in zip(control_guidance_start, control_guidance_end) + ] + controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps) + + # 8. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # controlnet(s) inference + if guess_mode and self.do_classifier_free_guidance: + # Infer ControlNet only for the conditional batch. + control_model_input = latents + control_model_input = self.scheduler.scale_model_input(control_model_input, t) + controlnet_prompt_embeds = prompt_embeds.chunk(2)[1] + else: + control_model_input = latent_model_input + controlnet_prompt_embeds = prompt_embeds + + if isinstance(controlnet_keep[i], list): + cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])] + else: + controlnet_cond_scale = controlnet_conditioning_scale + if isinstance(controlnet_cond_scale, list): + controlnet_cond_scale = controlnet_cond_scale[0] + cond_scale = controlnet_cond_scale * controlnet_keep[i] + + down_block_res_samples, mid_block_res_sample = self.controlnet( + control_model_input, + t, + encoder_hidden_states=controlnet_prompt_embeds, + controlnet_cond=control_image, + conditioning_scale=cond_scale, + guess_mode=guess_mode, + return_dict=False, + ) + + if guess_mode and self.do_classifier_free_guidance: + # Inferred ControlNet only for the conditional batch. + # To apply the output of ControlNet to both the unconditional and conditional batches, + # add 0 to the unconditional batch to keep it unchanged. + down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples] + mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample]) + + # predict the noise residual + if num_channels_unet == 9: + latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1) + + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs=self.cross_attention_kwargs, + down_block_additional_residuals=down_block_res_samples, + mid_block_additional_residual=mid_block_res_sample, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] + + if num_channels_unet == 4: + init_latents_proper = image_latents + if self.do_classifier_free_guidance: + init_mask, _ = mask.chunk(2) + else: + init_mask = mask + + if i < len(timesteps) - 1: + noise_timestep = timesteps[i + 1] + init_latents_proper = self.scheduler.add_noise( + init_latents_proper, noise, torch.tensor([noise_timestep]) + ) + + latents = (1 - init_mask) * init_latents_proper + init_mask * latents + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + # If we do sequential model offloading, let's offload unet and controlnet + # manually for max memory savings + if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: + self.unet.to("cpu") + self.controlnet.to("cpu") + torch.cuda.empty_cache() + + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[ + 0 + ] + image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) + else: + image = latents + has_nsfw_concept = None + + if has_nsfw_concept is None: + do_denormalize = [True] * image.shape[0] + else: + do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] + + image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) + + if padding_mask_crop is not None: + image = [self.image_processor.apply_overlay(mask_image, original_image, i, crops_coords) for i in image] + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image, has_nsfw_concept) + + return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) diff --git a/diffusers3/pipelines/controlnet/pipeline_controlnet_inpaint_sd_xl.py b/diffusers3/pipelines/controlnet/pipeline_controlnet_inpaint_sd_xl.py new file mode 100644 index 0000000000000000000000000000000000000000..17fd2cb6c81d1182d57a776804c7a6c0bbc6f5a3 --- /dev/null +++ b/diffusers3/pipelines/controlnet/pipeline_controlnet_inpaint_sd_xl.py @@ -0,0 +1,1845 @@ +# Copyright 2024 Harutatsu Akiyama, Jinbin Bai, and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import numpy as np +import PIL.Image +import torch +import torch.nn.functional as F +from transformers import ( + CLIPImageProcessor, + CLIPTextModel, + CLIPTextModelWithProjection, + CLIPTokenizer, + CLIPVisionModelWithProjection, +) + +from ...callbacks import MultiPipelineCallbacks, PipelineCallback +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import ( + FromSingleFileMixin, + IPAdapterMixin, + StableDiffusionXLLoraLoaderMixin, + TextualInversionLoaderMixin, +) +from ...models import AutoencoderKL, ControlNetModel, ImageProjection, UNet2DConditionModel +from ...models.attention_processor import ( + AttnProcessor2_0, + XFormersAttnProcessor, +) +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import ( + USE_PEFT_BACKEND, + deprecate, + is_invisible_watermark_available, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import is_compiled_module, randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from ..stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput +from .multicontrolnet import MultiControlNetModel + + +if is_invisible_watermark_available(): + from diffusers.pipelines.stable_diffusion_xl.watermark import StableDiffusionXLWatermarker + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents +def retrieve_latents( + encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" +): + if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": + return encoder_output.latent_dist.sample(generator) + elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": + return encoder_output.latent_dist.mode() + elif hasattr(encoder_output, "latents"): + return encoder_output.latents + else: + raise AttributeError("Could not access latents of provided encoder_output") + + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> # !pip install transformers accelerate + >>> from diffusers import StableDiffusionXLControlNetInpaintPipeline, ControlNetModel, DDIMScheduler + >>> from diffusers.utils import load_image + >>> from PIL import Image + >>> import numpy as np + >>> import torch + + >>> init_image = load_image( + ... "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_inpaint/boy.png" + ... ) + >>> init_image = init_image.resize((1024, 1024)) + + >>> generator = torch.Generator(device="cpu").manual_seed(1) + + >>> mask_image = load_image( + ... "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_inpaint/boy_mask.png" + ... ) + >>> mask_image = mask_image.resize((1024, 1024)) + + + >>> def make_canny_condition(image): + ... image = np.array(image) + ... image = cv2.Canny(image, 100, 200) + ... image = image[:, :, None] + ... image = np.concatenate([image, image, image], axis=2) + ... image = Image.fromarray(image) + ... return image + + + >>> control_image = make_canny_condition(init_image) + + >>> controlnet = ControlNetModel.from_pretrained( + ... "diffusers/controlnet-canny-sdxl-1.0", torch_dtype=torch.float16 + ... ) + >>> pipe = StableDiffusionXLControlNetInpaintPipeline.from_pretrained( + ... "stabilityai/stable-diffusion-xl-base-1.0", controlnet=controlnet, torch_dtype=torch.float16 + ... ) + + >>> pipe.enable_model_cpu_offload() + + >>> # generate image + >>> image = pipe( + ... "a handsome man with ray-ban sunglasses", + ... num_inference_steps=20, + ... generator=generator, + ... eta=1.0, + ... image=init_image, + ... mask_image=mask_image, + ... control_image=control_image, + ... ).images[0] + ``` +""" + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg +def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): + """ + Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4 + """ + std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) + std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) + # rescale the results from guidance (fixes overexposure) + noise_pred_rescaled = noise_cfg * (std_text / std_cfg) + # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images + noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg + return noise_cfg + + +class StableDiffusionXLControlNetInpaintPipeline( + DiffusionPipeline, + StableDiffusionMixin, + StableDiffusionXLLoraLoaderMixin, + FromSingleFileMixin, + IPAdapterMixin, + TextualInversionLoaderMixin, +): + r""" + Pipeline for text-to-image generation using Stable Diffusion XL. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`] for saving LoRA weights + - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files + - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModel`]): + Frozen text-encoder. Stable Diffusion XL uses the text portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically + the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. + text_encoder_2 ([` CLIPTextModelWithProjection`]): + Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection), + specifically the + [laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k) + variant. + tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + tokenizer_2 (`CLIPTokenizer`): + Second Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + """ + + model_cpu_offload_seq = "text_encoder->text_encoder_2->unet->vae" + + _optional_components = [ + "tokenizer", + "tokenizer_2", + "text_encoder", + "text_encoder_2", + "image_encoder", + "feature_extractor", + ] + _callback_tensor_inputs = [ + "latents", + "prompt_embeds", + "negative_prompt_embeds", + "add_text_embeds", + "add_time_ids", + "negative_pooled_prompt_embeds", + "add_neg_time_ids", + "mask", + "masked_image_latents", + ] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + text_encoder_2: CLIPTextModelWithProjection, + tokenizer: CLIPTokenizer, + tokenizer_2: CLIPTokenizer, + unet: UNet2DConditionModel, + controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel], + scheduler: KarrasDiffusionSchedulers, + requires_aesthetics_score: bool = False, + force_zeros_for_empty_prompt: bool = True, + add_watermarker: Optional[bool] = None, + feature_extractor: Optional[CLIPImageProcessor] = None, + image_encoder: Optional[CLIPVisionModelWithProjection] = None, + ): + super().__init__() + + if isinstance(controlnet, (list, tuple)): + controlnet = MultiControlNetModel(controlnet) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + text_encoder_2=text_encoder_2, + tokenizer=tokenizer, + tokenizer_2=tokenizer_2, + unet=unet, + controlnet=controlnet, + scheduler=scheduler, + feature_extractor=feature_extractor, + image_encoder=image_encoder, + ) + self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) + self.register_to_config(requires_aesthetics_score=requires_aesthetics_score) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + self.mask_processor = VaeImageProcessor( + vae_scale_factor=self.vae_scale_factor, do_normalize=False, do_binarize=True, do_convert_grayscale=True + ) + self.control_image_processor = VaeImageProcessor( + vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False + ) + + add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available() + + if add_watermarker: + self.watermark = StableDiffusionXLWatermarker() + else: + self.watermark = None + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.encode_prompt + def encode_prompt( + self, + prompt: str, + prompt_2: Optional[str] = None, + device: Optional[torch.device] = None, + num_images_per_prompt: int = 1, + do_classifier_free_guidance: bool = True, + negative_prompt: Optional[str] = None, + negative_prompt_2: Optional[str] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + pooled_prompt_embeds: Optional[torch.Tensor] = None, + negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in both text-encoders + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + lora_scale (`float`, *optional*): + A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + device = device or self._execution_device + + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if self.text_encoder is not None: + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale) + else: + scale_lora_layers(self.text_encoder_2, lora_scale) + + prompt = [prompt] if isinstance(prompt, str) else prompt + + if prompt is not None: + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + # Define tokenizers and text encoders + tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] + text_encoders = ( + [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] + ) + + if prompt_embeds is None: + prompt_2 = prompt_2 or prompt + prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 + + # textual inversion: process multi-vector tokens if necessary + prompt_embeds_list = [] + prompts = [prompt, prompt_2] + for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders): + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, tokenizer) + + text_inputs = tokenizer( + prompt, + padding="max_length", + max_length=tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + + text_input_ids = text_inputs.input_ids + untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {tokenizer.model_max_length} tokens: {removed_text}" + ) + + prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) + + # We are only ALWAYS interested in the pooled output of the final text encoder + pooled_prompt_embeds = prompt_embeds[0] + if clip_skip is None: + prompt_embeds = prompt_embeds.hidden_states[-2] + else: + # "2" because SDXL always indexes from the penultimate layer. + prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)] + + prompt_embeds_list.append(prompt_embeds) + + prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) + + # get unconditional embeddings for classifier free guidance + zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt + if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: + negative_prompt_embeds = torch.zeros_like(prompt_embeds) + negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) + elif do_classifier_free_guidance and negative_prompt_embeds is None: + negative_prompt = negative_prompt or "" + negative_prompt_2 = negative_prompt_2 or negative_prompt + + # normalize str to list + negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt + negative_prompt_2 = ( + batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 + ) + + uncond_tokens: List[str] + if prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = [negative_prompt, negative_prompt_2] + + negative_prompt_embeds_list = [] + for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders): + if isinstance(self, TextualInversionLoaderMixin): + negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = tokenizer( + negative_prompt, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + negative_prompt_embeds = text_encoder( + uncond_input.input_ids.to(device), + output_hidden_states=True, + ) + # We are only ALWAYS interested in the pooled output of the final text encoder + negative_pooled_prompt_embeds = negative_prompt_embeds[0] + negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] + + negative_prompt_embeds_list.append(negative_prompt_embeds) + + negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) + + if self.text_encoder_2 is not None: + prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) + else: + prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + if self.text_encoder_2 is not None: + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) + else: + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed * num_images_per_prompt, -1 + ) + if do_classifier_free_guidance: + negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed * num_images_per_prompt, -1 + ) + + if self.text_encoder is not None: + if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder_2, lora_scale) + + return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image + def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + if output_hidden_states: + image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] + image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_enc_hidden_states = self.image_encoder( + torch.zeros_like(image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( + num_images_per_prompt, dim=0 + ) + return image_enc_hidden_states, uncond_image_enc_hidden_states + else: + image_embeds = self.image_encoder(image).image_embeds + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_embeds = torch.zeros_like(image_embeds) + + return image_embeds, uncond_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds + def prepare_ip_adapter_image_embeds( + self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance + ): + image_embeds = [] + if do_classifier_free_guidance: + negative_image_embeds = [] + if ip_adapter_image_embeds is None: + if not isinstance(ip_adapter_image, list): + ip_adapter_image = [ip_adapter_image] + + if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): + raise ValueError( + f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." + ) + + for single_ip_adapter_image, image_proj_layer in zip( + ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers + ): + output_hidden_state = not isinstance(image_proj_layer, ImageProjection) + single_image_embeds, single_negative_image_embeds = self.encode_image( + single_ip_adapter_image, device, 1, output_hidden_state + ) + + image_embeds.append(single_image_embeds[None, :]) + if do_classifier_free_guidance: + negative_image_embeds.append(single_negative_image_embeds[None, :]) + else: + for single_image_embeds in ip_adapter_image_embeds: + if do_classifier_free_guidance: + single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2) + negative_image_embeds.append(single_negative_image_embeds) + image_embeds.append(single_image_embeds) + + ip_adapter_image_embeds = [] + for i, single_image_embeds in enumerate(image_embeds): + single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) + if do_classifier_free_guidance: + single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) + + single_image_embeds = single_image_embeds.to(device=device) + ip_adapter_image_embeds.append(single_image_embeds) + + return ip_adapter_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_image(self, image, prompt, prompt_embeds): + image_is_pil = isinstance(image, PIL.Image.Image) + image_is_tensor = isinstance(image, torch.Tensor) + image_is_np = isinstance(image, np.ndarray) + image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image) + image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor) + image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray) + + if ( + not image_is_pil + and not image_is_tensor + and not image_is_np + and not image_is_pil_list + and not image_is_tensor_list + and not image_is_np_list + ): + raise TypeError( + f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}" + ) + + if image_is_pil: + image_batch_size = 1 + else: + image_batch_size = len(image) + + if prompt is not None and isinstance(prompt, str): + prompt_batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + prompt_batch_size = len(prompt) + elif prompt_embeds is not None: + prompt_batch_size = prompt_embeds.shape[0] + + if image_batch_size != 1 and image_batch_size != prompt_batch_size: + raise ValueError( + f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}" + ) + + def check_inputs( + self, + prompt, + prompt_2, + image, + mask_image, + strength, + num_inference_steps, + callback_steps, + output_type, + negative_prompt=None, + negative_prompt_2=None, + prompt_embeds=None, + negative_prompt_embeds=None, + ip_adapter_image=None, + ip_adapter_image_embeds=None, + pooled_prompt_embeds=None, + negative_pooled_prompt_embeds=None, + controlnet_conditioning_scale=1.0, + control_guidance_start=0.0, + control_guidance_end=1.0, + callback_on_step_end_tensor_inputs=None, + padding_mask_crop=None, + ): + if strength < 0 or strength > 1: + raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}") + if num_inference_steps is None: + raise ValueError("`num_inference_steps` cannot be None.") + elif not isinstance(num_inference_steps, int) or num_inference_steps <= 0: + raise ValueError( + f"`num_inference_steps` has to be a positive integer but is {num_inference_steps} of type" + f" {type(num_inference_steps)}." + ) + + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt_2 is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)): + raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + elif negative_prompt_2 is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if padding_mask_crop is not None: + if not isinstance(image, PIL.Image.Image): + raise ValueError( + f"The image should be a PIL image when inpainting mask crop, but is of type" f" {type(image)}." + ) + if not isinstance(mask_image, PIL.Image.Image): + raise ValueError( + f"The mask image should be a PIL image when inpainting mask crop, but is of type" + f" {type(mask_image)}." + ) + if output_type != "pil": + raise ValueError(f"The output type should be PIL when inpainting mask crop, but is" f" {output_type}.") + + if prompt_embeds is not None and pooled_prompt_embeds is None: + raise ValueError( + "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`." + ) + + if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: + raise ValueError( + "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`." + ) + + # `prompt` needs more sophisticated handling when there are multiple + # conditionings. + if isinstance(self.controlnet, MultiControlNetModel): + if isinstance(prompt, list): + logger.warning( + f"You have {len(self.controlnet.nets)} ControlNets and you have passed {len(prompt)}" + " prompts. The conditionings will be fixed across the prompts." + ) + + # Check `image` + is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance( + self.controlnet, torch._dynamo.eval_frame.OptimizedModule + ) + if ( + isinstance(self.controlnet, ControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, ControlNetModel) + ): + self.check_image(image, prompt, prompt_embeds) + elif ( + isinstance(self.controlnet, MultiControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, MultiControlNetModel) + ): + if not isinstance(image, list): + raise TypeError("For multiple controlnets: `image` must be type `list`") + + # When `image` is a nested list: + # (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]]) + elif any(isinstance(i, list) for i in image): + raise ValueError("A single batch of multiple conditionings are supported at the moment.") + elif len(image) != len(self.controlnet.nets): + raise ValueError( + f"For multiple controlnets: `image` must have the same length as the number of controlnets, but got {len(image)} images and {len(self.controlnet.nets)} ControlNets." + ) + + for image_ in image: + self.check_image(image_, prompt, prompt_embeds) + else: + assert False + + # Check `controlnet_conditioning_scale` + if ( + isinstance(self.controlnet, ControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, ControlNetModel) + ): + if not isinstance(controlnet_conditioning_scale, float): + raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.") + elif ( + isinstance(self.controlnet, MultiControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, MultiControlNetModel) + ): + if isinstance(controlnet_conditioning_scale, list): + if any(isinstance(i, list) for i in controlnet_conditioning_scale): + raise ValueError("A single batch of multiple conditionings are supported at the moment.") + elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len( + self.controlnet.nets + ): + raise ValueError( + "For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have" + " the same length as the number of controlnets" + ) + else: + assert False + + if not isinstance(control_guidance_start, (tuple, list)): + control_guidance_start = [control_guidance_start] + + if not isinstance(control_guidance_end, (tuple, list)): + control_guidance_end = [control_guidance_end] + + if len(control_guidance_start) != len(control_guidance_end): + raise ValueError( + f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list." + ) + + if isinstance(self.controlnet, MultiControlNetModel): + if len(control_guidance_start) != len(self.controlnet.nets): + raise ValueError( + f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}." + ) + + for start, end in zip(control_guidance_start, control_guidance_end): + if start >= end: + raise ValueError( + f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}." + ) + if start < 0.0: + raise ValueError(f"control guidance start: {start} can't be smaller than 0.") + if end > 1.0: + raise ValueError(f"control guidance end: {end} can't be larger than 1.0.") + + if ip_adapter_image is not None and ip_adapter_image_embeds is not None: + raise ValueError( + "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined." + ) + + if ip_adapter_image_embeds is not None: + if not isinstance(ip_adapter_image_embeds, list): + raise ValueError( + f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}" + ) + elif ip_adapter_image_embeds[0].ndim not in [3, 4]: + raise ValueError( + f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D" + ) + + def prepare_control_image( + self, + image, + width, + height, + batch_size, + num_images_per_prompt, + device, + dtype, + crops_coords, + resize_mode, + do_classifier_free_guidance=False, + guess_mode=False, + ): + image = self.control_image_processor.preprocess( + image, height=height, width=width, crops_coords=crops_coords, resize_mode=resize_mode + ).to(dtype=torch.float32) + image_batch_size = image.shape[0] + + if image_batch_size == 1: + repeat_by = batch_size + else: + # image batch size is the same as prompt batch size + repeat_by = num_images_per_prompt + + image = image.repeat_interleave(repeat_by, dim=0) + + image = image.to(device=device, dtype=dtype) + + if do_classifier_free_guidance and not guess_mode: + image = torch.cat([image] * 2) + + return image + + def prepare_latents( + self, + batch_size, + num_channels_latents, + height, + width, + dtype, + device, + generator, + latents=None, + image=None, + timestep=None, + is_strength_max=True, + add_noise=True, + return_noise=False, + return_image_latents=False, + ): + shape = ( + batch_size, + num_channels_latents, + int(height) // self.vae_scale_factor, + int(width) // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if (image is None or timestep is None) and not is_strength_max: + raise ValueError( + "Since strength < 1. initial latents are to be initialised as a combination of Image + Noise." + "However, either the image or the noise timestep has not been provided." + ) + + if return_image_latents or (latents is None and not is_strength_max): + image = image.to(device=device, dtype=dtype) + + if image.shape[1] == 4: + image_latents = image + else: + image_latents = self._encode_vae_image(image=image, generator=generator) + image_latents = image_latents.repeat(batch_size // image_latents.shape[0], 1, 1, 1) + + if latents is None and add_noise: + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + # if strength is 1. then initialise the latents to noise, else initial to image + noise + latents = noise if is_strength_max else self.scheduler.add_noise(image_latents, noise, timestep) + # if pure noise then scale the initial latents by the Scheduler's init sigma + latents = latents * self.scheduler.init_noise_sigma if is_strength_max else latents + elif add_noise: + noise = latents.to(device) + latents = noise * self.scheduler.init_noise_sigma + else: + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + latents = image_latents.to(device) + + outputs = (latents,) + + if return_noise: + outputs += (noise,) + + if return_image_latents: + outputs += (image_latents,) + + return outputs + + def _encode_vae_image(self, image: torch.Tensor, generator: torch.Generator): + dtype = image.dtype + if self.vae.config.force_upcast: + image = image.float() + self.vae.to(dtype=torch.float32) + + if isinstance(generator, list): + image_latents = [ + retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i]) + for i in range(image.shape[0]) + ] + image_latents = torch.cat(image_latents, dim=0) + else: + image_latents = retrieve_latents(self.vae.encode(image), generator=generator) + + if self.vae.config.force_upcast: + self.vae.to(dtype) + + image_latents = image_latents.to(dtype) + image_latents = self.vae.config.scaling_factor * image_latents + + return image_latents + + def prepare_mask_latents( + self, mask, masked_image, batch_size, height, width, dtype, device, generator, do_classifier_free_guidance + ): + # resize the mask to latents shape as we concatenate the mask to the latents + # we do that before converting to dtype to avoid breaking in case we're using cpu_offload + # and half precision + mask = torch.nn.functional.interpolate( + mask, size=(height // self.vae_scale_factor, width // self.vae_scale_factor) + ) + mask = mask.to(device=device, dtype=dtype) + + # duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method + if mask.shape[0] < batch_size: + if not batch_size % mask.shape[0] == 0: + raise ValueError( + "The passed mask and the required batch size don't match. Masks are supposed to be duplicated to" + f" a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number" + " of masks that you pass is divisible by the total requested batch size." + ) + mask = mask.repeat(batch_size // mask.shape[0], 1, 1, 1) + + mask = torch.cat([mask] * 2) if do_classifier_free_guidance else mask + + masked_image_latents = None + if masked_image is not None: + masked_image = masked_image.to(device=device, dtype=dtype) + masked_image_latents = self._encode_vae_image(masked_image, generator=generator) + if masked_image_latents.shape[0] < batch_size: + if not batch_size % masked_image_latents.shape[0] == 0: + raise ValueError( + "The passed images and the required batch size don't match. Images are supposed to be duplicated" + f" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed." + " Make sure the number of images that you pass is divisible by the total requested batch size." + ) + masked_image_latents = masked_image_latents.repeat( + batch_size // masked_image_latents.shape[0], 1, 1, 1 + ) + + masked_image_latents = ( + torch.cat([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents + ) + + # aligning device to prevent device errors when concating it with the latent model input + masked_image_latents = masked_image_latents.to(device=device, dtype=dtype) + + return mask, masked_image_latents + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_img2img.StableDiffusionXLImg2ImgPipeline.get_timesteps + def get_timesteps(self, num_inference_steps, strength, device, denoising_start=None): + # get the original timestep using init_timestep + if denoising_start is None: + init_timestep = min(int(num_inference_steps * strength), num_inference_steps) + t_start = max(num_inference_steps - init_timestep, 0) + + timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] + if hasattr(self.scheduler, "set_begin_index"): + self.scheduler.set_begin_index(t_start * self.scheduler.order) + + return timesteps, num_inference_steps - t_start + + else: + # Strength is irrelevant if we directly request a timestep to start at; + # that is, strength is determined by the denoising_start instead. + discrete_timestep_cutoff = int( + round( + self.scheduler.config.num_train_timesteps + - (denoising_start * self.scheduler.config.num_train_timesteps) + ) + ) + + num_inference_steps = (self.scheduler.timesteps < discrete_timestep_cutoff).sum().item() + if self.scheduler.order == 2 and num_inference_steps % 2 == 0: + # if the scheduler is a 2nd order scheduler we might have to do +1 + # because `num_inference_steps` might be even given that every timestep + # (except the highest one) is duplicated. If `num_inference_steps` is even it would + # mean that we cut the timesteps in the middle of the denoising step + # (between 1st and 2nd derivative) which leads to incorrect results. By adding 1 + # we ensure that the denoising process always ends after the 2nd derivate step of the scheduler + num_inference_steps = num_inference_steps + 1 + + # because t_n+1 >= t_n, we slice the timesteps starting from the end + t_start = len(self.scheduler.timesteps) - num_inference_steps + timesteps = self.scheduler.timesteps[t_start:] + if hasattr(self.scheduler, "set_begin_index"): + self.scheduler.set_begin_index(t_start) + return timesteps, num_inference_steps + + def _get_add_time_ids( + self, + original_size, + crops_coords_top_left, + target_size, + aesthetic_score, + negative_aesthetic_score, + dtype, + text_encoder_projection_dim=None, + ): + if self.config.requires_aesthetics_score: + add_time_ids = list(original_size + crops_coords_top_left + (aesthetic_score,)) + add_neg_time_ids = list(original_size + crops_coords_top_left + (negative_aesthetic_score,)) + else: + add_time_ids = list(original_size + crops_coords_top_left + target_size) + add_neg_time_ids = list(original_size + crops_coords_top_left + target_size) + + passed_add_embed_dim = ( + self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim + ) + expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features + + if ( + expected_add_embed_dim > passed_add_embed_dim + and (expected_add_embed_dim - passed_add_embed_dim) == self.unet.config.addition_time_embed_dim + ): + raise ValueError( + f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to enable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=True)` to make sure `aesthetic_score` {aesthetic_score} and `negative_aesthetic_score` {negative_aesthetic_score} is correctly used by the model." + ) + elif ( + expected_add_embed_dim < passed_add_embed_dim + and (passed_add_embed_dim - expected_add_embed_dim) == self.unet.config.addition_time_embed_dim + ): + raise ValueError( + f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to disable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=False)` to make sure `target_size` {target_size} is correctly used by the model." + ) + elif expected_add_embed_dim != passed_add_embed_dim: + raise ValueError( + f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`." + ) + + add_time_ids = torch.tensor([add_time_ids], dtype=dtype) + add_neg_time_ids = torch.tensor([add_neg_time_ids], dtype=dtype) + + return add_time_ids, add_neg_time_ids + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae + def upcast_vae(self): + dtype = self.vae.dtype + self.vae.to(dtype=torch.float32) + use_torch_2_0_or_xformers = isinstance( + self.vae.decoder.mid_block.attentions[0].processor, + ( + AttnProcessor2_0, + XFormersAttnProcessor, + ), + ) + # if xformers or torch_2_0 is used attention block does not need + # to be in float32 which can save lots of memory + if use_torch_2_0_or_xformers: + self.vae.post_quant_conv.to(dtype) + self.vae.decoder.conv_in.to(dtype) + self.vae.decoder.mid_block.to(dtype) + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def clip_skip(self): + return self._clip_skip + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 + + @property + def cross_attention_kwargs(self): + return self._cross_attention_kwargs + + @property + def num_timesteps(self): + return self._num_timesteps + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + prompt_2: Optional[Union[str, List[str]]] = None, + image: PipelineImageInput = None, + mask_image: PipelineImageInput = None, + control_image: Union[ + PipelineImageInput, + List[PipelineImageInput], + ] = None, + height: Optional[int] = None, + width: Optional[int] = None, + padding_mask_crop: Optional[int] = None, + strength: float = 0.9999, + num_inference_steps: int = 50, + denoising_start: Optional[float] = None, + denoising_end: Optional[float] = None, + guidance_scale: float = 5.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + negative_prompt_2: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None, + pooled_prompt_embeds: Optional[torch.Tensor] = None, + negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + controlnet_conditioning_scale: Union[float, List[float]] = 1.0, + guess_mode: bool = False, + control_guidance_start: Union[float, List[float]] = 0.0, + control_guidance_end: Union[float, List[float]] = 1.0, + guidance_rescale: float = 0.0, + original_size: Tuple[int, int] = None, + crops_coords_top_left: Tuple[int, int] = (0, 0), + target_size: Tuple[int, int] = None, + aesthetic_score: float = 6.0, + negative_aesthetic_score: float = 2.5, + clip_skip: Optional[int] = None, + callback_on_step_end: Optional[ + Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks] + ] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + **kwargs, + ): + r""" + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in both text-encoders + image (`PIL.Image.Image`): + `Image`, or tensor representing an image batch which will be inpainted, *i.e.* parts of the image will + be masked out with `mask_image` and repainted according to `prompt`. + mask_image (`PIL.Image.Image`): + `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be + repainted, while black pixels will be preserved. If `mask_image` is a PIL image, it will be converted + to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L) + instead of 3, so the expected shape would be `(B, H, W, 1)`. + height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The width in pixels of the generated image. + padding_mask_crop (`int`, *optional*, defaults to `None`): + The size of margin in the crop to be applied to the image and masking. If `None`, no crop is applied to + image and mask_image. If `padding_mask_crop` is not `None`, it will first find a rectangular region + with the same aspect ration of the image and contains all masked area, and then expand that area based + on `padding_mask_crop`. The image and mask_image will then be cropped based on the expanded area before + resizing to the original image size for inpainting. This is useful when the masked area is small while + the image is large and contain information irrelevant for inpainting, such as background. + strength (`float`, *optional*, defaults to 0.9999): + Conceptually, indicates how much to transform the masked portion of the reference `image`. Must be + between 0 and 1. `image` will be used as a starting point, adding more noise to it the larger the + `strength`. The number of denoising steps depends on the amount of noise initially added. When + `strength` is 1, added noise will be maximum and the denoising process will run for the full number of + iterations specified in `num_inference_steps`. A value of 1, therefore, essentially ignores the masked + portion of the reference `image`. Note that in the case of `denoising_start` being declared as an + integer, the value of `strength` will be ignored. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + denoising_start (`float`, *optional*): + When specified, indicates the fraction (between 0.0 and 1.0) of the total denoising process to be + bypassed before it is initiated. Consequently, the initial part of the denoising process is skipped and + it is assumed that the passed `image` is a partly denoised image. Note that when this is specified, + strength will be ignored. The `denoising_start` parameter is particularly beneficial when this pipeline + is integrated into a "Mixture of Denoisers" multi-pipeline setup, as detailed in [**Refining the Image + Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output). + denoising_end (`float`, *optional*): + When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be + completed before it is intentionally prematurely terminated. As a result, the returned sample will + still retain a substantial amount of noise (ca. final 20% of timesteps still needed) and should be + denoised by a successor pipeline that has `denoising_start` set to 0.8 so that it only denoises the + final 20% of the scheduler. The denoising_end parameter should ideally be utilized when this pipeline + forms a part of a "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image + Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output). + guidance_scale (`float`, *optional*, defaults to 7.5): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. + ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*): + Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of + IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should + contain the negative image embedding if `do_classifier_free_guidance` is set to `True`. If not + provided, embeddings are computed from the `ip_adapter_image` input argument. + pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to + [`schedulers.DDIMScheduler`], will be ignored for others. + generator (`torch.Generator`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled. + `original_size` defaults to `(width, height)` if not specified. Part of SDXL's micro-conditioning as + explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position + `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting + `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + For most cases, `target_size` should be set to the desired height and width of the generated image. If + not specified it will default to `(width, height)`. Part of SDXL's micro-conditioning as explained in + section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + aesthetic_score (`float`, *optional*, defaults to 6.0): + Used to simulate an aesthetic score of the generated image by influencing the positive text condition. + Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + negative_aesthetic_score (`float`, *optional*, defaults to 2.5): + Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). Can be used to + simulate an aesthetic score of the generated image by influencing the negative text condition. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*): + A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of + each denoising step during the inference. with the following arguments: `callback_on_step_end(self: + DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a + list of all tensors as specified by `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + + Examples: + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionXLPipelineOutput`] or `tuple`: + [`~pipelines.stable_diffusion.StableDiffusionXLPipelineOutput`] if `return_dict` is True, otherwise a + `tuple. `tuple. When returning a tuple, the first element is a list with the generated images. + """ + + callback = kwargs.pop("callback", None) + callback_steps = kwargs.pop("callback_steps", None) + + if callback is not None: + deprecate( + "callback", + "1.0.0", + "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", + ) + if callback_steps is not None: + deprecate( + "callback_steps", + "1.0.0", + "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", + ) + + if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): + callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs + + controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet + + # align format for control guidance + if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list): + control_guidance_start = len(control_guidance_end) * [control_guidance_start] + elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list): + control_guidance_end = len(control_guidance_start) * [control_guidance_end] + elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list): + mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1 + control_guidance_start, control_guidance_end = ( + mult * [control_guidance_start], + mult * [control_guidance_end], + ) + + # # 0.0 Default height and width to unet + # height = height or self.unet.config.sample_size * self.vae_scale_factor + # width = width or self.unet.config.sample_size * self.vae_scale_factor + + # 0.1 align format for control guidance + if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list): + control_guidance_start = len(control_guidance_end) * [control_guidance_start] + elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list): + control_guidance_end = len(control_guidance_start) * [control_guidance_end] + elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list): + mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1 + control_guidance_start, control_guidance_end = ( + mult * [control_guidance_start], + mult * [control_guidance_end], + ) + + # 1. Check inputs + self.check_inputs( + prompt, + prompt_2, + control_image, + mask_image, + strength, + num_inference_steps, + callback_steps, + output_type, + negative_prompt, + negative_prompt_2, + prompt_embeds, + negative_prompt_embeds, + ip_adapter_image, + ip_adapter_image_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + controlnet_conditioning_scale, + control_guidance_start, + control_guidance_end, + callback_on_step_end_tensor_inputs, + padding_mask_crop, + ) + + self._guidance_scale = guidance_scale + self._clip_skip = clip_skip + self._cross_attention_kwargs = cross_attention_kwargs + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float): + controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets) + + # 3. Encode input prompt + text_encoder_lora_scale = ( + self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None + ) + + ( + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + ) = self.encode_prompt( + prompt=prompt, + prompt_2=prompt_2, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=self.do_classifier_free_guidance, + negative_prompt=negative_prompt, + negative_prompt_2=negative_prompt_2, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, + lora_scale=text_encoder_lora_scale, + clip_skip=self.clip_skip, + ) + + # 3.1 Encode ip_adapter_image + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + image_embeds = self.prepare_ip_adapter_image_embeds( + ip_adapter_image, + ip_adapter_image_embeds, + device, + batch_size * num_images_per_prompt, + self.do_classifier_free_guidance, + ) + + # 4. set timesteps + def denoising_value_valid(dnv): + return isinstance(dnv, float) and 0 < dnv < 1 + + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps, num_inference_steps = self.get_timesteps( + num_inference_steps, + strength, + device, + denoising_start=denoising_start if denoising_value_valid(denoising_start) else None, + ) + # check that number of inference steps is not < 1 - as this doesn't make sense + if num_inference_steps < 1: + raise ValueError( + f"After adjusting the num_inference_steps by strength parameter: {strength}, the number of pipeline" + f"steps is {num_inference_steps} which is < 1 and not appropriate for this pipeline." + ) + # at which timestep to set the initial noise (n.b. 50% if strength is 0.5) + latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) + # create a boolean to check if the strength is set to 1. if so then initialise the latents with pure noise + is_strength_max = strength == 1.0 + self._num_timesteps = len(timesteps) + + # 5. Preprocess mask and image - resizes image and mask w.r.t height and width + # 5.1 Prepare init image + if padding_mask_crop is not None: + height, width = self.image_processor.get_default_height_width(image, height, width) + crops_coords = self.mask_processor.get_crop_region(mask_image, width, height, pad=padding_mask_crop) + resize_mode = "fill" + else: + crops_coords = None + resize_mode = "default" + + original_image = image + init_image = self.image_processor.preprocess( + image, height=height, width=width, crops_coords=crops_coords, resize_mode=resize_mode + ) + init_image = init_image.to(dtype=torch.float32) + + # 5.2 Prepare control images + if isinstance(controlnet, ControlNetModel): + control_image = self.prepare_control_image( + image=control_image, + width=width, + height=height, + batch_size=batch_size * num_images_per_prompt, + num_images_per_prompt=num_images_per_prompt, + device=device, + dtype=controlnet.dtype, + crops_coords=crops_coords, + resize_mode=resize_mode, + do_classifier_free_guidance=self.do_classifier_free_guidance, + guess_mode=guess_mode, + ) + elif isinstance(controlnet, MultiControlNetModel): + control_images = [] + + for control_image_ in control_image: + control_image_ = self.prepare_control_image( + image=control_image_, + width=width, + height=height, + batch_size=batch_size * num_images_per_prompt, + num_images_per_prompt=num_images_per_prompt, + device=device, + dtype=controlnet.dtype, + crops_coords=crops_coords, + resize_mode=resize_mode, + do_classifier_free_guidance=self.do_classifier_free_guidance, + guess_mode=guess_mode, + ) + + control_images.append(control_image_) + + control_image = control_images + else: + raise ValueError(f"{controlnet.__class__} is not supported.") + + # 5.3 Prepare mask + mask = self.mask_processor.preprocess( + mask_image, height=height, width=width, resize_mode=resize_mode, crops_coords=crops_coords + ) + + masked_image = init_image * (mask < 0.5) + _, _, height, width = init_image.shape + + # 6. Prepare latent variables + num_channels_latents = self.vae.config.latent_channels + num_channels_unet = self.unet.config.in_channels + return_image_latents = num_channels_unet == 4 + + add_noise = True if denoising_start is None else False + latents_outputs = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + image=init_image, + timestep=latent_timestep, + is_strength_max=is_strength_max, + add_noise=add_noise, + return_noise=True, + return_image_latents=return_image_latents, + ) + + if return_image_latents: + latents, noise, image_latents = latents_outputs + else: + latents, noise = latents_outputs + + # 7. Prepare mask latent variables + mask, masked_image_latents = self.prepare_mask_latents( + mask, + masked_image, + batch_size * num_images_per_prompt, + height, + width, + prompt_embeds.dtype, + device, + generator, + self.do_classifier_free_guidance, + ) + + # 8. Check that sizes of mask, masked image and latents match + if num_channels_unet == 9: + # default case for runwayml/stable-diffusion-inpainting + num_channels_mask = mask.shape[1] + num_channels_masked_image = masked_image_latents.shape[1] + if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels: + raise ValueError( + f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects" + f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +" + f" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}" + f" = {num_channels_latents+num_channels_masked_image+num_channels_mask}. Please verify the config of" + " `pipeline.unet` or your `mask_image` or `image` input." + ) + elif num_channels_unet != 4: + raise ValueError( + f"The unet {self.unet.__class__} should have either 4 or 9 input channels, not {self.unet.config.in_channels}." + ) + # 8.1 Prepare extra step kwargs. + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 8.2 Create tensor stating which controlnets to keep + controlnet_keep = [] + for i in range(len(timesteps)): + keeps = [ + 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e) + for s, e in zip(control_guidance_start, control_guidance_end) + ] + controlnet_keep.append(keeps if isinstance(controlnet, MultiControlNetModel) else keeps[0]) + + # 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + height, width = latents.shape[-2:] + height = height * self.vae_scale_factor + width = width * self.vae_scale_factor + + original_size = original_size or (height, width) + target_size = target_size or (height, width) + + # 10. Prepare added time ids & embeddings + add_text_embeds = pooled_prompt_embeds + if self.text_encoder_2 is None: + text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) + else: + text_encoder_projection_dim = self.text_encoder_2.config.projection_dim + + add_time_ids, add_neg_time_ids = self._get_add_time_ids( + original_size, + crops_coords_top_left, + target_size, + aesthetic_score, + negative_aesthetic_score, + dtype=prompt_embeds.dtype, + text_encoder_projection_dim=text_encoder_projection_dim, + ) + add_time_ids = add_time_ids.repeat(batch_size * num_images_per_prompt, 1) + + if self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) + add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) + add_neg_time_ids = add_neg_time_ids.repeat(batch_size * num_images_per_prompt, 1) + add_time_ids = torch.cat([add_neg_time_ids, add_time_ids], dim=0) + + prompt_embeds = prompt_embeds.to(device) + add_text_embeds = add_text_embeds.to(device) + add_time_ids = add_time_ids.to(device) + + # 11. Denoising loop + num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) + + if ( + denoising_end is not None + and denoising_start is not None + and denoising_value_valid(denoising_end) + and denoising_value_valid(denoising_start) + and denoising_start >= denoising_end + ): + raise ValueError( + f"`denoising_start`: {denoising_start} cannot be larger than or equal to `denoising_end`: " + + f" {denoising_end} when using type float." + ) + elif denoising_end is not None and denoising_value_valid(denoising_end): + discrete_timestep_cutoff = int( + round( + self.scheduler.config.num_train_timesteps + - (denoising_end * self.scheduler.config.num_train_timesteps) + ) + ) + num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps))) + timesteps = timesteps[:num_inference_steps] + + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents + + # concat latents, mask, masked_image_latents in the channel dimension + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} + + # controlnet(s) inference + if guess_mode and self.do_classifier_free_guidance: + # Infer ControlNet only for the conditional batch. + control_model_input = latents + control_model_input = self.scheduler.scale_model_input(control_model_input, t) + controlnet_prompt_embeds = prompt_embeds.chunk(2)[1] + controlnet_added_cond_kwargs = { + "text_embeds": add_text_embeds.chunk(2)[1], + "time_ids": add_time_ids.chunk(2)[1], + } + else: + control_model_input = latent_model_input + controlnet_prompt_embeds = prompt_embeds + controlnet_added_cond_kwargs = added_cond_kwargs + + if isinstance(controlnet_keep[i], list): + cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])] + else: + controlnet_cond_scale = controlnet_conditioning_scale + if isinstance(controlnet_cond_scale, list): + controlnet_cond_scale = controlnet_cond_scale[0] + cond_scale = controlnet_cond_scale * controlnet_keep[i] + + # # Resize control_image to match the size of the input to the controlnet + # if control_image.shape[-2:] != control_model_input.shape[-2:]: + # control_image = F.interpolate(control_image, size=control_model_input.shape[-2:], mode="bilinear", align_corners=False) + + down_block_res_samples, mid_block_res_sample = self.controlnet( + control_model_input, + t, + encoder_hidden_states=controlnet_prompt_embeds, + controlnet_cond=control_image, + conditioning_scale=cond_scale, + guess_mode=guess_mode, + added_cond_kwargs=controlnet_added_cond_kwargs, + return_dict=False, + ) + + if guess_mode and self.do_classifier_free_guidance: + # Inferred ControlNet only for the conditional batch. + # To apply the output of ControlNet to both the unconditional and conditional batches, + # add 0 to the unconditional batch to keep it unchanged. + down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples] + mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample]) + + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + added_cond_kwargs["image_embeds"] = image_embeds + + if num_channels_unet == 9: + latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1) + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs=self.cross_attention_kwargs, + down_block_additional_residuals=down_block_res_samples, + mid_block_additional_residual=mid_block_res_sample, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + if self.do_classifier_free_guidance and guidance_rescale > 0.0: + # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf + noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] + + if num_channels_unet == 4: + init_latents_proper = image_latents + if self.do_classifier_free_guidance: + init_mask, _ = mask.chunk(2) + else: + init_mask = mask + + if i < len(timesteps) - 1: + noise_timestep = timesteps[i + 1] + init_latents_proper = self.scheduler.add_noise( + init_latents_proper, noise, torch.tensor([noise_timestep]) + ) + + latents = (1 - init_mask) * init_latents_proper + init_mask * latents + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + # make sure the VAE is in float32 mode, as it overflows in float16 + if self.vae.dtype == torch.float16 and self.vae.config.force_upcast: + self.upcast_vae() + latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) + + # If we do sequential model offloading, let's offload unet and controlnet + # manually for max memory savings + if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: + self.unet.to("cpu") + self.controlnet.to("cpu") + torch.cuda.empty_cache() + + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + else: + return StableDiffusionXLPipelineOutput(images=latents) + + # apply watermark if available + if self.watermark is not None: + image = self.watermark.apply_watermark(image) + + image = self.image_processor.postprocess(image, output_type=output_type) + + if padding_mask_crop is not None: + image = [self.image_processor.apply_overlay(mask_image, original_image, i, crops_coords) for i in image] + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return StableDiffusionXLPipelineOutput(images=image) diff --git a/diffusers3/pipelines/controlnet/pipeline_controlnet_sd_xl.py b/diffusers3/pipelines/controlnet/pipeline_controlnet_sd_xl.py new file mode 100644 index 0000000000000000000000000000000000000000..88be64acdf65764415354c9216d30a66136db547 --- /dev/null +++ b/diffusers3/pipelines/controlnet/pipeline_controlnet_sd_xl.py @@ -0,0 +1,1889 @@ +# -*- coding: utf-8 -*- +# text_encoder_lora_scale# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import inspect +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import numpy as np +import PIL.Image +import torch +import torch.nn.functional as F +from transformers import ( + CLIPImageProcessor, + CLIPTextModel, + CLIPTextModelWithProjection, + CLIPTokenizer, + CLIPVisionModelWithProjection, +) +from torchvision import transforms +from diffusers.utils.import_utils import is_invisible_watermark_available + +from ...callbacks import MultiPipelineCallbacks, PipelineCallback +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import ( + FromSingleFileMixin, + IPAdapterMixin, + StableDiffusionXLLoraLoaderMixin, + TextualInversionLoaderMixin, +) +from ...models import AutoencoderKL, ControlNetModel, ImageProjection, UNet2DConditionModel +from ...models.attention_processor import ( + AttnProcessor2_0, + XFormersAttnProcessor, +) +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import ( + USE_PEFT_BACKEND, + deprecate, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import is_compiled_module, is_torch_version, randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from ..stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput + + +if is_invisible_watermark_available(): + from ..stable_diffusion_xl.watermark import StableDiffusionXLWatermarker + +from .multicontrolnet import MultiControlNetModel + + +# ์ˆ˜์ • +from diffusers import PNDMScheduler + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> # !pip install opencv-python transformers accelerate + >>> from diffusers import StableDiffusionXLControlNetPipeline, ControlNetModel, AutoencoderKL + >>> from diffusers.utils import load_image + >>> import numpy as np + >>> import torch + + >>> import cv2 + >>> from PIL import Image + + >>> prompt = "aerial view, a futuristic research complex in a bright foggy jungle, hard lighting" + >>> negative_prompt = "low quality, bad quality, sketches" + + >>> # download an image + >>> image = load_image( + ... "https://hf.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/hf-logo.png" + ... ) + + >>> # initialize the models and pipeline + >>> controlnet_conditioning_scale = 0.5 # recommended for good generalization + >>> controlnet = ControlNetModel.from_pretrained( + ... "diffusers/controlnet-canny-sdxl-1.0", torch_dtype=torch.float16 + ... ) + >>> vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16) + >>> pipe = StableDiffusionXLControlNetPipeline.from_pretrained( + ... "stabilityai/stable-diffusion-xl-base-1.0", controlnet=controlnet, vae=vae, torch_dtype=torch.float16 + ... ) + >>> pipe.enable_model_cpu_offload() + + >>> # get canny image + >>> image = np.array(image) + >>> image = cv2.Canny(image, 100, 200) + >>> image = image[:, :, None] + >>> image = np.concatenate([image, image, image], axis=2) + >>> canny_image = Image.fromarray(image) + + >>> # generate image + >>> image = pipe( + ... prompt, controlnet_conditioning_scale=controlnet_conditioning_scale, image=canny_image + ... ).images[0] + ``` +""" + +def prepare_mask( + mask: Union[PIL.Image.Image, np.ndarray, torch.Tensor] +) -> torch.Tensor: + """ + Prepares a mask to be consumed by the Stable Diffusion pipeline. This means that this input will be + converted to ``torch.Tensor`` with shapes ``batch x channels x height x width`` where ``channels`` is ``1`` for + the ``mask``. + + The ``mask`` will be binarized (``mask > 0.5``) and cast to ``torch.float32`` too. + + Args: + mask (_type_): The mask to apply to the image, i.e. regions to inpaint. + It can be a ``PIL.Image``, or a ``height x width`` ``np.array`` or a ``1 x height x width`` + ``torch.Tensor`` or a ``batch x 1 x height x width`` ``torch.Tensor``. + + + Raises: + ValueError: ``torch.Tensor`` images should be in the ``[-1, 1]`` range. ValueError: ``torch.Tensor`` mask + should be in the ``[0, 1]`` range. + TypeError: ``mask`` is a ``torch.Tensor`` but ``image`` is not + (ot the other way around). + + Returns: + torch.Tensor: mask as ``torch.Tensor`` with 4 dimensions: ``batch x channels x height x width``. + """ + if isinstance(mask, torch.Tensor): + print("^^^^") + print("mask_1") + if not isinstance(mask, torch.Tensor): + raise TypeError( + f"`image` is a torch.Tensor but `mask` (type: {type(mask)} is not" + ) + + # Batch and add channel dim for single mask + if mask.ndim == 2: + mask = mask.unsqueeze(0).unsqueeze(0) + + # Batch single mask or add channel dim + if mask.ndim == 3: + # Single batched mask, no channel dim or single mask not batched but channel dim + if mask.shape[0] == 1: + mask = mask.unsqueeze(0) + + # Batched masks no channel dim + else: + mask = mask.unsqueeze(1) + + # Check mask is in [0, 1] + if mask.min() < 0 or mask.max() > 1: + raise ValueError("Mask should be in [0, 1] range") + + # Binarize mask + mask[mask < 0.5] = 0 + mask[mask >= 0.5] = 1 + else: + print("^^^^") + print("mask_2") + # preprocess mask + if isinstance(mask, (PIL.Image.Image, np.ndarray)): + mask = [mask] + + if isinstance(mask, list) and isinstance(mask[0], PIL.Image.Image): + mask = np.concatenate( + [np.array(m.convert("L"))[None, None, :] for m in mask], axis=0 + ) + mask = mask.astype(np.float32) / 255.0 + elif isinstance(mask, list) and isinstance(mask[0], np.ndarray): + mask = np.concatenate([m[None, None, :] for m in mask], axis=0) + + mask[mask < 0.5] = 0 + mask[mask >= 0.5] = 1 + mask = torch.from_numpy(mask) + + return mask + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + sigmas: Optional[List[float]] = None, + **kwargs, +): + """ + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles + custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` + must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, + `num_inference_steps` and `sigmas` must be `None`. + sigmas (`List[float]`, *optional*): + Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, + `num_inference_steps` and `timesteps` must be `None`. + + Returns: + `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the + second element is the number of inference steps. + """ + if timesteps is not None and sigmas is not None: + raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") + if timesteps is not None: + accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + elif sigmas is not None: + accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accept_sigmas: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" sigmas schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + + +class StableDiffusionXLControlNetPipeline( + DiffusionPipeline, + StableDiffusionMixin, + TextualInversionLoaderMixin, + StableDiffusionXLLoraLoaderMixin, + IPAdapterMixin, + FromSingleFileMixin, +): + r""" + Pipeline for text-to-image generation using Stable Diffusion XL with ControlNet guidance. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`] for saving LoRA weights + - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files + - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + text_encoder_2 ([`~transformers.CLIPTextModelWithProjection`]): + Second frozen text-encoder + ([laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k)). + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + tokenizer_2 ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + controlnet ([`ControlNetModel`] or `List[ControlNetModel]`): + Provides additional conditioning to the `unet` during the denoising process. If you set multiple + ControlNets as a list, the outputs from each ControlNet are added together to create one combined + additional conditioning. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`): + Whether the negative prompt embeddings should always be set to 0. Also see the config of + `stabilityai/stable-diffusion-xl-base-1-0`. + add_watermarker (`bool`, *optional*): + Whether to use the [invisible_watermark](https://github.com/ShieldMnt/invisible-watermark/) library to + watermark output images. If not defined, it defaults to `True` if the package is installed; otherwise no + watermarker is used. + """ + + # leave controlnet out on purpose because it iterates with unet + model_cpu_offload_seq = "text_encoder->text_encoder_2->image_encoder->unet->vae" + _optional_components = [ + "tokenizer", + "tokenizer_2", + "text_encoder", + "text_encoder_2", + "feature_extractor", + "image_encoder", + ] + _callback_tensor_inputs = [ + "latents", + "prompt_embeds", + "negative_prompt_embeds", + "add_text_embeds", + "add_time_ids", + "negative_pooled_prompt_embeds", + "negative_add_time_ids", + ] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + text_encoder_2: CLIPTextModelWithProjection, + tokenizer: CLIPTokenizer, + tokenizer_2: CLIPTokenizer, + unet: UNet2DConditionModel, + controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel], + scheduler: KarrasDiffusionSchedulers, + force_zeros_for_empty_prompt: bool = True, + add_watermarker: Optional[bool] = None, + feature_extractor: CLIPImageProcessor = None, + image_encoder: CLIPVisionModelWithProjection = None, + ): + super().__init__() + + if isinstance(controlnet, (list, tuple)): + controlnet = MultiControlNetModel(controlnet) + + + + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + text_encoder_2=text_encoder_2, + tokenizer=tokenizer, + tokenizer_2=tokenizer_2, + unet=unet, + controlnet=controlnet, + scheduler=scheduler, + feature_extractor=feature_extractor, + image_encoder=image_encoder, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True) + self.control_image_processor = VaeImageProcessor( + vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False + ) + add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available() + + if add_watermarker: + self.watermark = StableDiffusionXLWatermarker() + else: + self.watermark = None + + self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.encode_prompt + def encode_prompt( + self, + prompt: str, + prompt_2: Optional[str] = None, + device: Optional[torch.device] = None, + num_images_per_prompt: int = 1, + do_classifier_free_guidance: bool = True, + negative_prompt: Optional[str] = None, + negative_prompt_2: Optional[str] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + pooled_prompt_embeds: Optional[torch.Tensor] = None, + negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in both text-encoders + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + lora_scale (`float`, *optional*): + A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + device = device or self._execution_device + + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if self.text_encoder is not None: + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale) + else: + scale_lora_layers(self.text_encoder_2, lora_scale) + + prompt = [prompt] if isinstance(prompt, str) else prompt + + if prompt is not None: + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + # Define tokenizers and text encoders + tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] + text_encoders = ( + [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] + ) + + if prompt_embeds is None: + prompt_2 = prompt_2 or prompt + prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 + + # textual inversion: process multi-vector tokens if necessary + prompt_embeds_list = [] + prompts = [prompt, prompt_2] + for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders): + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, tokenizer) + + text_inputs = tokenizer( + prompt, + padding="max_length", + max_length=tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + + text_input_ids = text_inputs.input_ids + untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {tokenizer.model_max_length} tokens: {removed_text}" + ) + + prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) + + # We are only ALWAYS interested in the pooled output of the final text encoder + pooled_prompt_embeds = prompt_embeds[0] + if clip_skip is None: + prompt_embeds = prompt_embeds.hidden_states[-2] + else: + # "2" because SDXL always indexes from the penultimate layer. + prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)] + + prompt_embeds_list.append(prompt_embeds) + + prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) + + # get unconditional embeddings for classifier free guidance + zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt + if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: + negative_prompt_embeds = torch.zeros_like(prompt_embeds) + negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) + elif do_classifier_free_guidance and negative_prompt_embeds is None: + negative_prompt = negative_prompt or "" + negative_prompt_2 = negative_prompt_2 or negative_prompt + + # normalize str to list + negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt + negative_prompt_2 = ( + batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 + ) + + uncond_tokens: List[str] + if prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = [negative_prompt, negative_prompt_2] + + negative_prompt_embeds_list = [] + for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders): + if isinstance(self, TextualInversionLoaderMixin): + negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = tokenizer( + negative_prompt, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + negative_prompt_embeds = text_encoder( + uncond_input.input_ids.to(device), + output_hidden_states=True, + ) + # We are only ALWAYS interested in the pooled output of the final text encoder + negative_pooled_prompt_embeds = negative_prompt_embeds[0] + negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] + + negative_prompt_embeds_list.append(negative_prompt_embeds) + + negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) + + if self.text_encoder_2 is not None: + prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) + else: + prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + if self.text_encoder_2 is not None: + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) + else: + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed * num_images_per_prompt, -1 + ) + if do_classifier_free_guidance: + negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed * num_images_per_prompt, -1 + ) + + if self.text_encoder is not None: + if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder_2, lora_scale) + + return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image + def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + if output_hidden_states: + image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] + image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_enc_hidden_states = self.image_encoder( + torch.zeros_like(image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( + num_images_per_prompt, dim=0 + ) + return image_enc_hidden_states, uncond_image_enc_hidden_states + else: + image_embeds = self.image_encoder(image).image_embeds + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_embeds = torch.zeros_like(image_embeds) + + return image_embeds, uncond_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds + def prepare_ip_adapter_image_embeds( + self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance + ): + image_embeds = [] + if do_classifier_free_guidance: + negative_image_embeds = [] + if ip_adapter_image_embeds is None: + if not isinstance(ip_adapter_image, list): + ip_adapter_image = [ip_adapter_image] + + if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): + raise ValueError( + f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." + ) + + for single_ip_adapter_image, image_proj_layer in zip( + ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers + ): + output_hidden_state = not isinstance(image_proj_layer, ImageProjection) + single_image_embeds, single_negative_image_embeds = self.encode_image( + single_ip_adapter_image, device, 1, output_hidden_state + ) + + image_embeds.append(single_image_embeds[None, :]) + if do_classifier_free_guidance: + negative_image_embeds.append(single_negative_image_embeds[None, :]) + else: + for single_image_embeds in ip_adapter_image_embeds: + if do_classifier_free_guidance: + single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2) + negative_image_embeds.append(single_negative_image_embeds) + image_embeds.append(single_image_embeds) + + ip_adapter_image_embeds = [] + for i, single_image_embeds in enumerate(image_embeds): + single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) + if do_classifier_free_guidance: + single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) + + single_image_embeds = single_image_embeds.to(device=device) + ip_adapter_image_embeds.append(single_image_embeds) + + return ip_adapter_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + prompt_2, + image, + callback_steps, + negative_prompt=None, + negative_prompt_2=None, + prompt_embeds=None, + negative_prompt_embeds=None, + pooled_prompt_embeds=None, + ip_adapter_image=None, + ip_adapter_image_embeds=None, + negative_pooled_prompt_embeds=None, + controlnet_conditioning_scale=1.0, + control_guidance_start=0.0, + control_guidance_end=1.0, + callback_on_step_end_tensor_inputs=None, + ): + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt_2 is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)): + raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + elif negative_prompt_2 is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if prompt_embeds is not None and pooled_prompt_embeds is None: + raise ValueError( + "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`." + ) + + if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: + raise ValueError( + "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`." + ) + + # `prompt` needs more sophisticated handling when there are multiple + # conditionings. + if isinstance(self.controlnet, MultiControlNetModel): + if isinstance(prompt, list): + logger.warning( + f"You have {len(self.controlnet.nets)} ControlNets and you have passed {len(prompt)}" + " prompts. The conditionings will be fixed across the prompts." + ) + + # Check `image` + is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance( + self.controlnet, torch._dynamo.eval_frame.OptimizedModule + ) + + + if ( + isinstance(self.controlnet, ControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, ControlNetModel) + # and isinstance(ControlNetModel._orig_mod, ControlNetModel) + ): + self.check_image(image, prompt, prompt_embeds) + + elif ( + isinstance(self.controlnet, MultiControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, MultiControlNetModel) + ): + if not isinstance(image, list): + raise TypeError("For multiple controlnets: `image` must be type `list`") + + # When `image` is a nested list: + # (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]]) + elif any(isinstance(i, list) for i in image): + raise ValueError("A single batch of multiple conditionings are supported at the moment.") + elif len(image) != len(self.controlnet.nets): + raise ValueError( + f"For multiple controlnets: `image` must have the same length as the number of controlnets, but got {len(image)} images and {len(self.controlnet.nets)} ControlNets." + ) + + for image_ in image: + self.check_image(image_, prompt, prompt_embeds) + else: + + assert False + + # Check `controlnet_conditioning_scale` + if ( + isinstance(self.controlnet, ControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, ControlNetModel) + ): + if not isinstance(controlnet_conditioning_scale, float): + raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.") + elif ( + isinstance(self.controlnet, MultiControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, MultiControlNetModel) + ): + if isinstance(controlnet_conditioning_scale, list): + if any(isinstance(i, list) for i in controlnet_conditioning_scale): + raise ValueError("A single batch of multiple conditionings are supported at the moment.") + elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len( + self.controlnet.nets + ): + raise ValueError( + "For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have" + " the same length as the number of controlnets" + ) + else: + assert False + + if not isinstance(control_guidance_start, (tuple, list)): + control_guidance_start = [control_guidance_start] + + if not isinstance(control_guidance_end, (tuple, list)): + control_guidance_end = [control_guidance_end] + + if len(control_guidance_start) != len(control_guidance_end): + raise ValueError( + f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list." + ) + + if isinstance(self.controlnet, MultiControlNetModel): + if len(control_guidance_start) != len(self.controlnet.nets): + raise ValueError( + f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}." + ) + + for start, end in zip(control_guidance_start, control_guidance_end): + if start >= end: + raise ValueError( + f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}." + ) + if start < 0.0: + raise ValueError(f"control guidance start: {start} can't be smaller than 0.") + if end > 1.0: + raise ValueError(f"control guidance end: {end} can't be larger than 1.0.") + + if ip_adapter_image is not None and ip_adapter_image_embeds is not None: + raise ValueError( + "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined." + ) + + if ip_adapter_image_embeds is not None: + if not isinstance(ip_adapter_image_embeds, list): + raise ValueError( + f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}" + ) + elif ip_adapter_image_embeds[0].ndim not in [3, 4]: + raise ValueError( + f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D" + ) + + # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.check_image + def check_image(self, image, prompt, prompt_embeds): + image_is_pil = isinstance(image, PIL.Image.Image) + image_is_tensor = isinstance(image, torch.Tensor) + image_is_np = isinstance(image, np.ndarray) + image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image) + image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor) + image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray) + + if ( + not image_is_pil + and not image_is_tensor + and not image_is_np + and not image_is_pil_list + and not image_is_tensor_list + and not image_is_np_list + ): + raise TypeError( + f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}" + ) + + if image_is_pil: + image_batch_size = 1 + else: + image_batch_size = len(image) + + if prompt is not None and isinstance(prompt, str): + prompt_batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + prompt_batch_size = len(prompt) + elif prompt_embeds is not None: + prompt_batch_size = prompt_embeds.shape[0] + + if image_batch_size != 1 and image_batch_size != prompt_batch_size: + raise ValueError( + f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}" + ) + + # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.prepare_image + + # depth map + def prepare_image( + self, + image, + width, + height, + batch_size, + num_images_per_prompt, + device, + dtype, + do_classifier_free_guidance=False, + guess_mode=False, + ): + image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32) + image_batch_size = image.shape[0] + + if image_batch_size == 1: + repeat_by = batch_size + else: + # image batch size is the same as prompt batch size + repeat_by = num_images_per_prompt + + image = image.repeat_interleave(repeat_by, dim=0) + + image = image.to(device=device, dtype=dtype) + + if do_classifier_free_guidance and not guess_mode: + image = torch.cat([image] * 2) + + return image + + + def prepare_latents( + self, + image, + timestep, + batch_size, + num_images_per_prompt, + dtype, + device, + generator=None, + ): + if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): + raise ValueError( + f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" + ) + + image = image.to(device=device, dtype=dtype) + + batch_size = batch_size * num_images_per_prompt + + if image.shape[1] == 4: + init_latents = image + + else: + if isinstance(generator, list) and len(generator) != batch_size: + + + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + elif isinstance(generator, list): + + init_latents = [ + self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) + for i in range(batch_size) + ] + print("***") + print("vae_1") + init_latents = torch.cat(init_latents, dim=0) + else: + print("***") + print("vae_2") + init_latents = self.vae.encode(image).latent_dist.sample(generator) + + init_latents = self.vae.config.scaling_factor * init_latents + + if ( + batch_size > init_latents.shape[0] + and batch_size % init_latents.shape[0] == 0 + ): + # expand init_latents for batch_size + deprecation_message = ( + f"You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial" + " images (`image`). Initial images are now duplicating to match the number of text prompts. Note" + " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update" + " your script to pass as many initial images as text prompts to suppress this warning." + ) + deprecate( + "len(prompt) != len(image)", + "1.0.0", + deprecation_message, + standard_warn=False, + ) + additional_image_per_prompt = batch_size // init_latents.shape[0] + init_latents = torch.cat( + [init_latents] * additional_image_per_prompt, dim=0 + ) + elif ( + batch_size > init_latents.shape[0] + and batch_size % init_latents.shape[0] != 0 + ): + raise ValueError( + f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts." + ) + else: + init_latents = torch.cat([init_latents], dim=0) + + shape = init_latents.shape + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + + # get latents + + latents = self.scheduler.add_noise(init_latents, noise, timestep) + + return init_latents, noise + + + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents_origin(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + + shape = ( + batch_size, + num_channels_latents, + int(height) // self.vae_scale_factor, + int(width) // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + print("latents is None") + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + return latents + + + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline._get_add_time_ids + def _get_add_time_ids( + self, original_size, crops_coords_top_left, target_size, dtype, text_encoder_projection_dim=None + ): + add_time_ids = list(original_size + crops_coords_top_left + target_size) + + passed_add_embed_dim = ( + self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim + ) + expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features + + if expected_add_embed_dim != passed_add_embed_dim: + raise ValueError( + f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`." + ) + + add_time_ids = torch.tensor([add_time_ids], dtype=dtype) + return add_time_ids + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae + def upcast_vae(self): + dtype = self.vae.dtype + self.vae.to(dtype=torch.float32) + use_torch_2_0_or_xformers = isinstance( + self.vae.decoder.mid_block.attentions[0].processor, + ( + AttnProcessor2_0, + XFormersAttnProcessor, + ), + ) + # if xformers or torch_2_0 is used attention block does not need + # to be in float32 which can save lots of memory + if use_torch_2_0_or_xformers: + self.vae.post_quant_conv.to(dtype) + self.vae.decoder.conv_in.to(dtype) + self.vae.decoder.mid_block.to(dtype) + + # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding + def get_guidance_scale_embedding( + self, w: torch.Tensor, embedding_dim: int = 512, dtype: torch.dtype = torch.float32 + ) -> torch.Tensor: + """ + See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 + + Args: + w (`torch.Tensor`): + Generate embedding vectors with a specified guidance scale to subsequently enrich timestep embeddings. + embedding_dim (`int`, *optional*, defaults to 512): + Dimension of the embeddings to generate. + dtype (`torch.dtype`, *optional*, defaults to `torch.float32`): + Data type of the generated embeddings. + + Returns: + `torch.Tensor`: Embedding vectors with shape `(len(w), embedding_dim)`. + """ + assert len(w.shape) == 1 + w = w * 1000.0 + + half_dim = embedding_dim // 2 + emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) + emb = w.to(dtype)[:, None] * emb[None, :] + emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) + if embedding_dim % 2 == 1: # zero pad + emb = torch.nn.functional.pad(emb, (0, 1)) + assert emb.shape == (w.shape[0], embedding_dim) + return emb + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def clip_skip(self): + return self._clip_skip + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None + + @property + def cross_attention_kwargs(self): + return self._cross_attention_kwargs + + @property + def denoising_end(self): + return self._denoising_end + + @property + def num_timesteps(self): + return self._num_timesteps + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + shape_prompt: Union[str, List[str]] = None, + num: List[int] = None, + prompt_2: Optional[Union[str, List[str]]] = None, + image: PipelineImageInput = None, + mask_image: Union[torch.FloatTensor, PIL.Image.Image] = None, + sketch_image: Union[torch.FloatTensor, PIL.Image.Image] = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + timesteps: List[int] = None, + sigmas: List[float] = None, + denoising_end: Optional[float] = None, + guidance_scale: float = 5.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + negative_prompt_2: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + pooled_prompt_embeds: Optional[torch.Tensor] = None, + negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, + shape_prompt_embeds: Optional[torch.Tensor] = None, + shape_negative_prompt_embeds: Optional[torch.Tensor] = None, + shape_pooled_prompt_embeds: Optional[torch.Tensor] = None, + shape_negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + controlnet_conditioning_scale: Union[float, List[float]] = 1.0, + guess_mode: bool = False, + control_guidance_start: Union[float, List[float]] = 0.0, + control_guidance_end: Union[float, List[float]] = 1.0, + original_size: Tuple[int, int] = None, + crops_coords_top_left: Tuple[int, int] = (0, 0), + target_size: Tuple[int, int] = None, + negative_original_size: Optional[Tuple[int, int]] = None, + negative_crops_coords_top_left: Tuple[int, int] = (0, 0), + negative_target_size: Optional[Tuple[int, int]] = None, + clip_skip: Optional[int] = None, + callback_on_step_end: Optional[ + Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks] + ] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + **kwargs, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in both text-encoders. + image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,: + `List[List[torch.Tensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`): + The ControlNet input condition to provide guidance to the `unet` for generation. If the type is + specified as `torch.Tensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also be accepted + as an image. The dimensions of the output image defaults to `image`'s dimensions. If height and/or + width are passed, `image` is resized accordingly. If multiple ControlNets are specified in `init`, + images must be passed as a list such that each element of the list can be correctly batched for input + to a single ControlNet. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. Anything below 512 pixels won't work well for + [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) + and checkpoints that are not specifically fine-tuned on low resolutions. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. Anything below 512 pixels won't work well for + [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) + and checkpoints that are not specifically fine-tuned on low resolutions. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument + in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is + passed will be used. Must be in descending order. + sigmas (`List[float]`, *optional*): + Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in + their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed + will be used. + denoising_end (`float`, *optional*): + When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be + completed before it is intentionally prematurely terminated. As a result, the returned sample will + still retain a substantial amount of noise as determined by the discrete timesteps selected by the + scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a + "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image + Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output) + guidance_scale (`float`, *optional*, defaults to 5.0): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. This is sent to `tokenizer_2` + and `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders. + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, pooled text embeddings are generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs (prompt + weighting). If not provided, pooled `negative_prompt_embeds` are generated from `negative_prompt` input + argument. + ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. + ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*): + Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of + IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should + contain the negative image embedding if `do_classifier_free_guidance` is set to `True`. If not + provided, embeddings are computed from the `ip_adapter_image` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0): + The outputs of the ControlNet are multiplied by `controlnet_conditioning_scale` before they are added + to the residual in the original `unet`. If multiple ControlNets are specified in `init`, you can set + the corresponding scale as a list. + guess_mode (`bool`, *optional*, defaults to `False`): + The ControlNet encoder tries to recognize the content of the input image even if you remove all + prompts. A `guidance_scale` value between 3.0 and 5.0 is recommended. + control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0): + The percentage of total steps at which the ControlNet starts applying. + control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0): + The percentage of total steps at which the ControlNet stops applying. + original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled. + `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as + explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position + `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting + `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + For most cases, `target_size` should be set to the desired height and width of the generated image. If + not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in + section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + To negatively condition the generation process based on a specific image resolution. Part of SDXL's + micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's + micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + To negatively condition the generation process based on a target image resolution. It should be as same + as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*): + A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of + each denoising step during the inference. with the following arguments: `callback_on_step_end(self: + DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a + list of all tensors as specified by `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + + Examples: + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned containing the output images. + """ + + callback = kwargs.pop("callback", None) + callback_steps = kwargs.pop("callback_steps", None) + + if callback is not None: + deprecate( + "callback", + "1.0.0", + "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", + ) + if callback_steps is not None: + deprecate( + "callback_steps", + "1.0.0", + "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", + ) + + if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): + callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs + + controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet + + # align format for control guidance + if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list): + control_guidance_start = len(control_guidance_end) * [control_guidance_start] + elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list): + control_guidance_end = len(control_guidance_start) * [control_guidance_end] + elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list): + mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1 + control_guidance_start, control_guidance_end = ( + mult * [control_guidance_start], + mult * [control_guidance_end], + ) + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + prompt_2, + image, + callback_steps, + negative_prompt, + negative_prompt_2, + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + ip_adapter_image, + ip_adapter_image_embeds, + negative_pooled_prompt_embeds, + controlnet_conditioning_scale, + control_guidance_start, + control_guidance_end, + callback_on_step_end_tensor_inputs, + ) + + + + + self._guidance_scale = guidance_scale + self._clip_skip = clip_skip + self._cross_attention_kwargs = cross_attention_kwargs + self._denoising_end = denoising_end + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float): + controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets) + + global_pool_conditions = ( + controlnet.config.global_pool_conditions + if isinstance(controlnet, ControlNetModel) + else controlnet.nets[0].config.global_pool_conditions + ) + guess_mode = guess_mode or global_pool_conditions + + # 3.1 Encode input prompt + text_encoder_lora_scale = ( + self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None + ) + ( + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + ) = self.encode_prompt( + prompt, + prompt_2, + device, + num_images_per_prompt, + self.do_classifier_free_guidance, + negative_prompt, + negative_prompt_2, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, + lora_scale=text_encoder_lora_scale, + clip_skip=self.clip_skip, + ) + + + prompt_embeds_origin = prompt_embeds + ( + shape_prompt_embeds, + shape_negative_prompt_embeds, + shape_pooled_prompt_embeds, + shape_negative_pooled_prompt_embeds, + ) = self.encode_prompt( + shape_prompt, + prompt_2, + device, + num_images_per_prompt, + self.do_classifier_free_guidance, + negative_prompt, + negative_prompt_2, + prompt_embeds=shape_prompt_embeds, + negative_prompt_embeds=shape_negative_prompt_embeds, + pooled_prompt_embeds=shape_pooled_prompt_embeds, + negative_pooled_prompt_embeds=shape_negative_pooled_prompt_embeds, + lora_scale=text_encoder_lora_scale, + clip_skip=self.clip_skip, + ) + + + # 3.2 Encode ip_adapter_image + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + image_embeds = self.prepare_ip_adapter_image_embeds( + ip_adapter_image, + ip_adapter_image_embeds, + device, + batch_size * num_images_per_prompt, + self.do_classifier_free_guidance, + ) + + # 4. Prepare image + if isinstance(controlnet, ControlNetModel): + image = self.prepare_image( + image=image, + width=width, + height=height, + batch_size=batch_size * num_images_per_prompt, + num_images_per_prompt=num_images_per_prompt, + device=device, + dtype=controlnet.dtype, + do_classifier_free_guidance=self.do_classifier_free_guidance, + guess_mode=guess_mode, + ) + height, width = image.shape[-2:] + + + elif isinstance(controlnet, MultiControlNetModel): + images = [] + + for image_ in image: + image_ = self.prepare_image( + image=image_, + width=width, + height=height, + batch_size=batch_size * num_images_per_prompt, + num_images_per_prompt=num_images_per_prompt, + device=device, + dtype=controlnet.dtype, + do_classifier_free_guidance=self.do_classifier_free_guidance, + guess_mode=guess_mode, + ) + + images.append(image_) + + image = images + height, width = image[0].shape[-2:] + else: + assert False + + + + # 5. Prepare timesteps + timesteps, num_inference_steps = retrieve_timesteps( + self.scheduler, num_inference_steps, device, timesteps, sigmas + ) + self._num_timesteps = len(timesteps) + + latent_timestep = timesteps[:1] # type: ignore + latent_timestep = latent_timestep.to(torch.long) + + + # 6. Prepare latent variables + + mask = prepare_mask(mask=mask_image) + + sketch_image = self.image_processor.preprocess(sketch_image) + + init_latents, noise= self.prepare_latents( + sketch_image, + latent_timestep, + batch_size, + num_images_per_prompt, + prompt_embeds.dtype, + device, + generator, + ) + + num_channels_latents = self.unet.config.in_channels + latents_with_noise = self.prepare_latents_origin( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + + # 6.1. Prepare mask + + height, width = mask.shape[-2:] + mask = torch.nn.functional.interpolate( + mask, size=( + 128, + 128 + ) + ) + mask = torch.nn.functional.interpolate( + mask, size=(height // self.vae_scale_factor, width // self.vae_scale_factor) + ) + + mask = mask.to(device) + mask = mask.to(prompt_embeds.dtype) + + # 6.5 Optionally get Guidance Scale Embedding + timestep_cond = None + if self.unet.config.time_cond_proj_dim is not None: + guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) + timestep_cond = self.get_guidance_scale_embedding( + guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim + ).to(device=device, dtype=latents.dtype) + + # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7.1 Create tensor stating which controlnets to keep + controlnet_keep = [] + for i in range(len(timesteps)): + keeps = [ + 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e) + for s, e in zip(control_guidance_start, control_guidance_end) + ] + controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps) + + # 7.2 Prepare added time ids & embeddings + if isinstance(image, list): + original_size = original_size or image[0].shape[-2:] + else: + original_size = original_size or image.shape[-2:] + target_size = target_size or (height, width) + + add_text_embeds = pooled_prompt_embeds + shape_add_text_embeds = shape_pooled_prompt_embeds + if self.text_encoder_2 is None: + text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) + else: + text_encoder_projection_dim = self.text_encoder_2.config.projection_dim + + add_time_ids = self._get_add_time_ids( + original_size, + crops_coords_top_left, + target_size, + dtype=prompt_embeds.dtype, + text_encoder_projection_dim=text_encoder_projection_dim, + ) + + if negative_original_size is not None and negative_target_size is not None: + negative_add_time_ids = self._get_add_time_ids( + negative_original_size, + negative_crops_coords_top_left, + negative_target_size, + dtype=prompt_embeds.dtype, + text_encoder_projection_dim=text_encoder_projection_dim, + ) + else: + negative_add_time_ids = add_time_ids + + print("self.do_classifier_free_guidance: ", self.do_classifier_free_guidance) + + if self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) + add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) + add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0) + + shape_prompt_embeds = torch.cat([shape_negative_prompt_embeds, shape_prompt_embeds], dim=0) + shape_add_text_embeds = torch.cat([shape_negative_pooled_prompt_embeds, shape_add_text_embeds], dim=0) + shape_add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0) + + prompt_embeds = prompt_embeds.to(device) + prompt_embeds_origin = prompt_embeds + add_text_embeds = add_text_embeds.to(device) + add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1) + + shape_prompt_embeds = shape_prompt_embeds.to(device) + shape_add_text_embeds = shape_add_text_embeds.to(device) + shape_add_time_ids = shape_add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1) + + # 8. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + + # 8.1 Apply denoising_end + if ( + self.denoising_end is not None + and isinstance(self.denoising_end, float) + and self.denoising_end > 0 + and self.denoising_end < 1 + ): + discrete_timestep_cutoff = int( + round( + self.scheduler.config.num_train_timesteps + - (self.denoising_end * self.scheduler.config.num_train_timesteps) + ) + ) + num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps))) + timesteps = timesteps[:num_inference_steps] + + is_unet_compiled = is_compiled_module(self.unet) + is_controlnet_compiled = is_compiled_module(self.controlnet) + is_torch_higher_equal_2_1 = is_torch_version(">=", "2.1") + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + + if i < num: + prompt_embeds = shape_prompt_embeds + else: + prompt_embeds = prompt_embeds_origin + + # Relevant thread: + # https://dev-discuss.pytorch.org/t/cudagraphs-in-pytorch-2-0/1428 + if (is_unet_compiled and is_controlnet_compiled) and is_torch_higher_equal_2_1: + torch._inductor.cudagraph_mark_step_begin() + + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents_with_noise] * 2) if self.do_classifier_free_guidance else latents_with_noise + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} + + # controlnet(s) inference + if guess_mode and self.do_classifier_free_guidance: + # Infer ControlNet only for the conditional batch. + control_model_input = latents + control_model_input = self.scheduler.scale_model_input(control_model_input, t) + controlnet_prompt_embeds = prompt_embeds.chunk(2)[1] + controlnet_added_cond_kwargs = { + "text_embeds": add_text_embeds.chunk(2)[1], + "time_ids": add_time_ids.chunk(2)[1], + } + else: + control_model_input = latent_model_input + controlnet_prompt_embeds = prompt_embeds + controlnet_added_cond_kwargs = added_cond_kwargs + + if isinstance(controlnet_keep[i], list): + cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])] + else: + controlnet_cond_scale = controlnet_conditioning_scale + if isinstance(controlnet_cond_scale, list): + controlnet_cond_scale = controlnet_cond_scale[0] + cond_scale = controlnet_cond_scale * controlnet_keep[i] + + down_block_res_samples, mid_block_res_sample = self.controlnet( + control_model_input, + t, + encoder_hidden_states=controlnet_prompt_embeds, + controlnet_cond=image, + conditioning_scale=cond_scale, + guess_mode=guess_mode, + added_cond_kwargs=controlnet_added_cond_kwargs, + return_dict=False, + ) + + if guess_mode and self.do_classifier_free_guidance: + # Inferred ControlNet only for the conditional batch. + # To apply the output of ControlNet to both the unconditional and conditional batches, + # add 0 to the unconditional batch to keep it unchanged. + down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples] + mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample]) + + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + added_cond_kwargs["image_embeds"] = image_embeds + + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + timestep_cond=timestep_cond, + cross_attention_kwargs=self.cross_attention_kwargs, + down_block_additional_residuals=down_block_res_samples, + mid_block_additional_residual=mid_block_res_sample, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + + latents_with_noise = self.scheduler.step( + noise_pred, + t, + latents_with_noise, + **extra_step_kwargs, + return_dict=False, + )[0] + + + # masking process + tmp = t.unsqueeze(0) + init_latents_proper = self.scheduler.add_noise( + init_latents, noise, tmp + ).to(device) + + mask = (mask > 0.5).to(prompt_embeds.dtype) + latents_with_noise = ( + mask * latents_with_noise + (1 - mask) * init_latents_proper + ) +# latents_with_noise = ( +# mask * latents_with_noise +# ) + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + add_text_embeds = callback_outputs.pop("add_text_embeds", add_text_embeds) + negative_pooled_prompt_embeds = callback_outputs.pop( + "negative_pooled_prompt_embeds", negative_pooled_prompt_embeds + ) + add_time_ids = callback_outputs.pop("add_time_ids", add_time_ids) + negative_add_time_ids = callback_outputs.pop("negative_add_time_ids", negative_add_time_ids) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents_with_noise) + + if not output_type == "latent": + # make sure the VAE is in float32 mode, as it overflows in float16 + needs_upcasting = self.vae.dtype == torch.float32 and self.vae.config.force_upcast + + if needs_upcasting: + self.upcast_vae() + latents_with_noise = latents_with_noise.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) + + # unscale/denormalize the latents + # denormalize with the mean and std if available and not None + has_latents_mean = hasattr(self.vae.config, "latents_mean") and self.vae.config.latents_mean is not None + has_latents_std = hasattr(self.vae.config, "latents_std") and self.vae.config.latents_std is not None + if has_latents_mean and has_latents_std: + latents_mean = ( + torch.tensor(self.vae.config.latents_mean).view(1, 4, 1, 1).to(latents_with_noise.device, latents_with_noise.dtype) + ) + latents_std = ( + torch.tensor(self.vae.config.latents_std).view(1, 4, 1, 1).to(latents_with_noise.device, latents_with_noise.dtype) + ) + latents_with_noise = latents_with_noise * latents_std / self.vae.config.scaling_factor + latents_mean + else: + latents_with_noise = latents_with_noise / self.vae.config.scaling_factor + + image = self.vae.decode(latents_with_noise, return_dict=False)[0] + + # cast back to fp16 if needed + if needs_upcasting: + self.vae.to(dtype=torch.float32) + else: + image = latents_with_noise + + if not output_type == "latent": + # apply watermark if available + if self.watermark is not None: + image = self.watermark.apply_watermark(image) + + image = self.image_processor.postprocess(image, output_type=output_type) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return StableDiffusionXLPipelineOutput(images=image) diff --git a/diffusers3/pipelines/controlnet/pipeline_controlnet_sd_xl2.py b/diffusers3/pipelines/controlnet/pipeline_controlnet_sd_xl2.py new file mode 100644 index 0000000000000000000000000000000000000000..1bf001c0f8051f26dfa63aa12e36103f48df96ac --- /dev/null +++ b/diffusers3/pipelines/controlnet/pipeline_controlnet_sd_xl2.py @@ -0,0 +1,1711 @@ +# text_encoder_lora_scale# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import inspect +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import numpy as np +import PIL.Image +import torch +import torch.nn.functional as F +from transformers import ( + CLIPImageProcessor, + CLIPTextModel, + CLIPTextModelWithProjection, + CLIPTokenizer, + CLIPVisionModelWithProjection, +) + +from diffusers.utils.import_utils import is_invisible_watermark_available + +from ...callbacks import MultiPipelineCallbacks, PipelineCallback +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import ( + FromSingleFileMixin, + IPAdapterMixin, + StableDiffusionXLLoraLoaderMixin, + TextualInversionLoaderMixin, +) +from ...models import AutoencoderKL, ControlNetModel, ImageProjection, UNet2DConditionModel +from ...models.attention_processor import ( + AttnProcessor2_0, + XFormersAttnProcessor, +) +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import ( + USE_PEFT_BACKEND, + deprecate, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import is_compiled_module, is_torch_version, randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from ..stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput + + +if is_invisible_watermark_available(): + from ..stable_diffusion_xl.watermark import StableDiffusionXLWatermarker + +from .multicontrolnet import MultiControlNetModel + + +# ์ˆ˜์ • +from diffusers import PNDMScheduler + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> # !pip install opencv-python transformers accelerate + >>> from diffusers import StableDiffusionXLControlNetPipeline, ControlNetModel, AutoencoderKL + >>> from diffusers.utils import load_image + >>> import numpy as np + >>> import torch + + >>> import cv2 + >>> from PIL import Image + + >>> prompt = "aerial view, a futuristic research complex in a bright foggy jungle, hard lighting" + >>> negative_prompt = "low quality, bad quality, sketches" + + >>> # download an image + >>> image = load_image( + ... "https://hf.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/hf-logo.png" + ... ) + + >>> # initialize the models and pipeline + >>> controlnet_conditioning_scale = 0.5 # recommended for good generalization + >>> controlnet = ControlNetModel.from_pretrained( + ... "diffusers/controlnet-canny-sdxl-1.0", torch_dtype=torch.float16 + ... ) + >>> vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16) + >>> pipe = StableDiffusionXLControlNetPipeline.from_pretrained( + ... "stabilityai/stable-diffusion-xl-base-1.0", controlnet=controlnet, vae=vae, torch_dtype=torch.float16 + ... ) + >>> pipe.enable_model_cpu_offload() + + >>> # get canny image + >>> image = np.array(image) + >>> image = cv2.Canny(image, 100, 200) + >>> image = image[:, :, None] + >>> image = np.concatenate([image, image, image], axis=2) + >>> canny_image = Image.fromarray(image) + + >>> # generate image + >>> image = pipe( + ... prompt, controlnet_conditioning_scale=controlnet_conditioning_scale, image=canny_image + ... ).images[0] + ``` +""" + +def prepare_mask( + mask: Union[PIL.Image.Image, np.ndarray, torch.Tensor] +) -> torch.Tensor: + """ + Prepares a mask to be consumed by the Stable Diffusion pipeline. This means that this input will be + converted to ``torch.Tensor`` with shapes ``batch x channels x height x width`` where ``channels`` is ``1`` for + the ``mask``. + + The ``mask`` will be binarized (``mask > 0.5``) and cast to ``torch.float32`` too. + + Args: + mask (_type_): The mask to apply to the image, i.e. regions to inpaint. + It can be a ``PIL.Image``, or a ``height x width`` ``np.array`` or a ``1 x height x width`` + ``torch.Tensor`` or a ``batch x 1 x height x width`` ``torch.Tensor``. + + + Raises: + ValueError: ``torch.Tensor`` images should be in the ``[-1, 1]`` range. ValueError: ``torch.Tensor`` mask + should be in the ``[0, 1]`` range. + TypeError: ``mask`` is a ``torch.Tensor`` but ``image`` is not + (ot the other way around). + + Returns: + torch.Tensor: mask as ``torch.Tensor`` with 4 dimensions: ``batch x channels x height x width``. + """ + if isinstance(mask, torch.Tensor): + if not isinstance(mask, torch.Tensor): + raise TypeError( + f"`image` is a torch.Tensor but `mask` (type: {type(mask)} is not" + ) + + # Batch and add channel dim for single mask + if mask.ndim == 2: + mask = mask.unsqueeze(0).unsqueeze(0) + + # Batch single mask or add channel dim + if mask.ndim == 3: + # Single batched mask, no channel dim or single mask not batched but channel dim + if mask.shape[0] == 1: + mask = mask.unsqueeze(0) + + # Batched masks no channel dim + else: + mask = mask.unsqueeze(1) + + # Check mask is in [0, 1] + if mask.min() < 0 or mask.max() > 1: + raise ValueError("Mask should be in [0, 1] range") + + # Binarize mask + mask[mask < 0.5] = 0 + mask[mask >= 0.5] = 1 + else: + # preprocess mask + if isinstance(mask, (PIL.Image.Image, np.ndarray)): + mask = [mask] + + if isinstance(mask, list) and isinstance(mask[0], PIL.Image.Image): + mask = np.concatenate( + [np.array(m.convert("L"))[None, None, :] for m in mask], axis=0 + ) + mask = mask.astype(np.float32) / 255.0 + elif isinstance(mask, list) and isinstance(mask[0], np.ndarray): + mask = np.concatenate([m[None, None, :] for m in mask], axis=0) + + mask[mask < 0.5] = 0 + mask[mask >= 0.5] = 1 + mask = torch.from_numpy(mask) + + return mask + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + sigmas: Optional[List[float]] = None, + **kwargs, +): + """ + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles + custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` + must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, + `num_inference_steps` and `sigmas` must be `None`. + sigmas (`List[float]`, *optional*): + Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, + `num_inference_steps` and `timesteps` must be `None`. + + Returns: + `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the + second element is the number of inference steps. + """ + if timesteps is not None and sigmas is not None: + raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") + if timesteps is not None: + accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + elif sigmas is not None: + accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accept_sigmas: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" sigmas schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + + +class StableDiffusionXLControlNetPipeline( + DiffusionPipeline, + StableDiffusionMixin, + TextualInversionLoaderMixin, + StableDiffusionXLLoraLoaderMixin, + IPAdapterMixin, + FromSingleFileMixin, +): + r""" + Pipeline for text-to-image generation using Stable Diffusion XL with ControlNet guidance. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`] for saving LoRA weights + - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files + - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + text_encoder_2 ([`~transformers.CLIPTextModelWithProjection`]): + Second frozen text-encoder + ([laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k)). + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + tokenizer_2 ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + controlnet ([`ControlNetModel`] or `List[ControlNetModel]`): + Provides additional conditioning to the `unet` during the denoising process. If you set multiple + ControlNets as a list, the outputs from each ControlNet are added together to create one combined + additional conditioning. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`): + Whether the negative prompt embeddings should always be set to 0. Also see the config of + `stabilityai/stable-diffusion-xl-base-1-0`. + add_watermarker (`bool`, *optional*): + Whether to use the [invisible_watermark](https://github.com/ShieldMnt/invisible-watermark/) library to + watermark output images. If not defined, it defaults to `True` if the package is installed; otherwise no + watermarker is used. + """ + + # leave controlnet out on purpose because it iterates with unet + model_cpu_offload_seq = "text_encoder->text_encoder_2->image_encoder->unet->vae" + _optional_components = [ + "tokenizer", + "tokenizer_2", + "text_encoder", + "text_encoder_2", + "feature_extractor", + "image_encoder", + ] + _callback_tensor_inputs = [ + "latents", + "prompt_embeds", + "negative_prompt_embeds", + "add_text_embeds", + "add_time_ids", + "negative_pooled_prompt_embeds", + "negative_add_time_ids", + ] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + text_encoder_2: CLIPTextModelWithProjection, + tokenizer: CLIPTokenizer, + tokenizer_2: CLIPTokenizer, + unet: UNet2DConditionModel, + controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel], + scheduler: KarrasDiffusionSchedulers, + force_zeros_for_empty_prompt: bool = True, + add_watermarker: Optional[bool] = None, + feature_extractor: CLIPImageProcessor = None, + image_encoder: CLIPVisionModelWithProjection = None, + ): + super().__init__() + + if isinstance(controlnet, (list, tuple)): + controlnet = MultiControlNetModel(controlnet) + + + + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + text_encoder_2=text_encoder_2, + tokenizer=tokenizer, + tokenizer_2=tokenizer_2, + unet=unet, + controlnet=controlnet, + scheduler=scheduler, + feature_extractor=feature_extractor, + image_encoder=image_encoder, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True) + self.control_image_processor = VaeImageProcessor( + vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False + ) + add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available() + + if add_watermarker: + self.watermark = StableDiffusionXLWatermarker() + else: + self.watermark = None + + self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.encode_prompt + def encode_prompt( + self, + prompt: str, + prompt_2: Optional[str] = None, + device: Optional[torch.device] = None, + num_images_per_prompt: int = 1, + do_classifier_free_guidance: bool = True, + negative_prompt: Optional[str] = None, + negative_prompt_2: Optional[str] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + pooled_prompt_embeds: Optional[torch.Tensor] = None, + negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in both text-encoders + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + lora_scale (`float`, *optional*): + A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + device = device or self._execution_device + + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if self.text_encoder is not None: + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale) + else: + scale_lora_layers(self.text_encoder_2, lora_scale) + + prompt = [prompt] if isinstance(prompt, str) else prompt + + if prompt is not None: + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + # Define tokenizers and text encoders + tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] + text_encoders = ( + [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] + ) + + if prompt_embeds is None: + prompt_2 = prompt_2 or prompt + prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 + + # textual inversion: process multi-vector tokens if necessary + prompt_embeds_list = [] + prompts = [prompt, prompt_2] + for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders): + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, tokenizer) + + text_inputs = tokenizer( + prompt, + padding="max_length", + max_length=tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + + text_input_ids = text_inputs.input_ids + untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {tokenizer.model_max_length} tokens: {removed_text}" + ) + + prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) + + # We are only ALWAYS interested in the pooled output of the final text encoder + pooled_prompt_embeds = prompt_embeds[0] + if clip_skip is None: + prompt_embeds = prompt_embeds.hidden_states[-2] + else: + # "2" because SDXL always indexes from the penultimate layer. + prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)] + + prompt_embeds_list.append(prompt_embeds) + + prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) + + # get unconditional embeddings for classifier free guidance + zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt + if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: + negative_prompt_embeds = torch.zeros_like(prompt_embeds) + negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) + elif do_classifier_free_guidance and negative_prompt_embeds is None: + negative_prompt = negative_prompt or "" + negative_prompt_2 = negative_prompt_2 or negative_prompt + + # normalize str to list + negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt + negative_prompt_2 = ( + batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 + ) + + uncond_tokens: List[str] + if prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = [negative_prompt, negative_prompt_2] + + negative_prompt_embeds_list = [] + for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders): + if isinstance(self, TextualInversionLoaderMixin): + negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = tokenizer( + negative_prompt, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + negative_prompt_embeds = text_encoder( + uncond_input.input_ids.to(device), + output_hidden_states=True, + ) + # We are only ALWAYS interested in the pooled output of the final text encoder + negative_pooled_prompt_embeds = negative_prompt_embeds[0] + negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] + + negative_prompt_embeds_list.append(negative_prompt_embeds) + + negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) + + if self.text_encoder_2 is not None: + prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) + else: + prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + if self.text_encoder_2 is not None: + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) + else: + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed * num_images_per_prompt, -1 + ) + if do_classifier_free_guidance: + negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed * num_images_per_prompt, -1 + ) + + if self.text_encoder is not None: + if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder_2, lora_scale) + + return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image + def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + if output_hidden_states: + image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] + image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_enc_hidden_states = self.image_encoder( + torch.zeros_like(image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( + num_images_per_prompt, dim=0 + ) + return image_enc_hidden_states, uncond_image_enc_hidden_states + else: + image_embeds = self.image_encoder(image).image_embeds + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_embeds = torch.zeros_like(image_embeds) + + return image_embeds, uncond_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds + def prepare_ip_adapter_image_embeds( + self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance + ): + image_embeds = [] + if do_classifier_free_guidance: + negative_image_embeds = [] + if ip_adapter_image_embeds is None: + if not isinstance(ip_adapter_image, list): + ip_adapter_image = [ip_adapter_image] + + if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): + raise ValueError( + f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." + ) + + for single_ip_adapter_image, image_proj_layer in zip( + ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers + ): + output_hidden_state = not isinstance(image_proj_layer, ImageProjection) + single_image_embeds, single_negative_image_embeds = self.encode_image( + single_ip_adapter_image, device, 1, output_hidden_state + ) + + image_embeds.append(single_image_embeds[None, :]) + if do_classifier_free_guidance: + negative_image_embeds.append(single_negative_image_embeds[None, :]) + else: + for single_image_embeds in ip_adapter_image_embeds: + if do_classifier_free_guidance: + single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2) + negative_image_embeds.append(single_negative_image_embeds) + image_embeds.append(single_image_embeds) + + ip_adapter_image_embeds = [] + for i, single_image_embeds in enumerate(image_embeds): + single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) + if do_classifier_free_guidance: + single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) + + single_image_embeds = single_image_embeds.to(device=device) + ip_adapter_image_embeds.append(single_image_embeds) + + return ip_adapter_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + prompt_2, + image, + callback_steps, + negative_prompt=None, + negative_prompt_2=None, + prompt_embeds=None, + negative_prompt_embeds=None, + pooled_prompt_embeds=None, + ip_adapter_image=None, + ip_adapter_image_embeds=None, + negative_pooled_prompt_embeds=None, + controlnet_conditioning_scale=1.0, + control_guidance_start=0.0, + control_guidance_end=1.0, + callback_on_step_end_tensor_inputs=None, + ): + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt_2 is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)): + raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + elif negative_prompt_2 is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if prompt_embeds is not None and pooled_prompt_embeds is None: + raise ValueError( + "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`." + ) + + if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: + raise ValueError( + "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`." + ) + + # `prompt` needs more sophisticated handling when there are multiple + # conditionings. + if isinstance(self.controlnet, MultiControlNetModel): + if isinstance(prompt, list): + logger.warning( + f"You have {len(self.controlnet.nets)} ControlNets and you have passed {len(prompt)}" + " prompts. The conditionings will be fixed across the prompts." + ) + + # Check `image` + is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance( + self.controlnet, torch._dynamo.eval_frame.OptimizedModule + ) + + + if ( + isinstance(self.controlnet, ControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, ControlNetModel) + # and isinstance(ControlNetModel._orig_mod, ControlNetModel) + ): + self.check_image(image, prompt, prompt_embeds) + + elif ( + isinstance(self.controlnet, MultiControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, MultiControlNetModel) + ): + if not isinstance(image, list): + raise TypeError("For multiple controlnets: `image` must be type `list`") + + # When `image` is a nested list: + # (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]]) + elif any(isinstance(i, list) for i in image): + raise ValueError("A single batch of multiple conditionings are supported at the moment.") + elif len(image) != len(self.controlnet.nets): + raise ValueError( + f"For multiple controlnets: `image` must have the same length as the number of controlnets, but got {len(image)} images and {len(self.controlnet.nets)} ControlNets." + ) + + for image_ in image: + self.check_image(image_, prompt, prompt_embeds) + else: + + assert False + + # Check `controlnet_conditioning_scale` + if ( + isinstance(self.controlnet, ControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, ControlNetModel) + ): + if not isinstance(controlnet_conditioning_scale, float): + raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.") + elif ( + isinstance(self.controlnet, MultiControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, MultiControlNetModel) + ): + if isinstance(controlnet_conditioning_scale, list): + if any(isinstance(i, list) for i in controlnet_conditioning_scale): + raise ValueError("A single batch of multiple conditionings are supported at the moment.") + elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len( + self.controlnet.nets + ): + raise ValueError( + "For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have" + " the same length as the number of controlnets" + ) + else: + assert False + + if not isinstance(control_guidance_start, (tuple, list)): + control_guidance_start = [control_guidance_start] + + if not isinstance(control_guidance_end, (tuple, list)): + control_guidance_end = [control_guidance_end] + + if len(control_guidance_start) != len(control_guidance_end): + raise ValueError( + f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list." + ) + + if isinstance(self.controlnet, MultiControlNetModel): + if len(control_guidance_start) != len(self.controlnet.nets): + raise ValueError( + f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}." + ) + + for start, end in zip(control_guidance_start, control_guidance_end): + if start >= end: + raise ValueError( + f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}." + ) + if start < 0.0: + raise ValueError(f"control guidance start: {start} can't be smaller than 0.") + if end > 1.0: + raise ValueError(f"control guidance end: {end} can't be larger than 1.0.") + + if ip_adapter_image is not None and ip_adapter_image_embeds is not None: + raise ValueError( + "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined." + ) + + if ip_adapter_image_embeds is not None: + if not isinstance(ip_adapter_image_embeds, list): + raise ValueError( + f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}" + ) + elif ip_adapter_image_embeds[0].ndim not in [3, 4]: + raise ValueError( + f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D" + ) + + # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.check_image + def check_image(self, image, prompt, prompt_embeds): + image_is_pil = isinstance(image, PIL.Image.Image) + image_is_tensor = isinstance(image, torch.Tensor) + image_is_np = isinstance(image, np.ndarray) + image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image) + image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor) + image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray) + + if ( + not image_is_pil + and not image_is_tensor + and not image_is_np + and not image_is_pil_list + and not image_is_tensor_list + and not image_is_np_list + ): + raise TypeError( + f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}" + ) + + if image_is_pil: + image_batch_size = 1 + else: + image_batch_size = len(image) + + if prompt is not None and isinstance(prompt, str): + prompt_batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + prompt_batch_size = len(prompt) + elif prompt_embeds is not None: + prompt_batch_size = prompt_embeds.shape[0] + + if image_batch_size != 1 and image_batch_size != prompt_batch_size: + raise ValueError( + f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}" + ) + + # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.prepare_image + def prepare_image( + self, + image, + width, + height, + batch_size, + num_images_per_prompt, + device, + dtype, + do_classifier_free_guidance=False, + guess_mode=False, + ): + image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32) + image_batch_size = image.shape[0] + + if image_batch_size == 1: + repeat_by = batch_size + else: + # image batch size is the same as prompt batch size + repeat_by = num_images_per_prompt + + image = image.repeat_interleave(repeat_by, dim=0) + + image = image.to(device=device, dtype=dtype) + + if do_classifier_free_guidance and not guess_mode: + image = torch.cat([image] * 2) + + return image + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = ( + batch_size, + num_channels_latents, + int(height) // self.vae_scale_factor, + int(width) // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + return latents, noise + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline._get_add_time_ids + def _get_add_time_ids( + self, original_size, crops_coords_top_left, target_size, dtype, text_encoder_projection_dim=None + ): + add_time_ids = list(original_size + crops_coords_top_left + target_size) + + passed_add_embed_dim = ( + self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim + ) + expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features + + if expected_add_embed_dim != passed_add_embed_dim: + raise ValueError( + f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`." + ) + + add_time_ids = torch.tensor([add_time_ids], dtype=dtype) + return add_time_ids + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae + def upcast_vae(self): + dtype = self.vae.dtype + self.vae.to(dtype=torch.float32) + use_torch_2_0_or_xformers = isinstance( + self.vae.decoder.mid_block.attentions[0].processor, + ( + AttnProcessor2_0, + XFormersAttnProcessor, + ), + ) + # if xformers or torch_2_0 is used attention block does not need + # to be in float32 which can save lots of memory + if use_torch_2_0_or_xformers: + self.vae.post_quant_conv.to(dtype) + self.vae.decoder.conv_in.to(dtype) + self.vae.decoder.mid_block.to(dtype) + + # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding + def get_guidance_scale_embedding( + self, w: torch.Tensor, embedding_dim: int = 512, dtype: torch.dtype = torch.float32 + ) -> torch.Tensor: + """ + See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 + + Args: + w (`torch.Tensor`): + Generate embedding vectors with a specified guidance scale to subsequently enrich timestep embeddings. + embedding_dim (`int`, *optional*, defaults to 512): + Dimension of the embeddings to generate. + dtype (`torch.dtype`, *optional*, defaults to `torch.float32`): + Data type of the generated embeddings. + + Returns: + `torch.Tensor`: Embedding vectors with shape `(len(w), embedding_dim)`. + """ + assert len(w.shape) == 1 + w = w * 1000.0 + + half_dim = embedding_dim // 2 + emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) + emb = w.to(dtype)[:, None] * emb[None, :] + emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) + if embedding_dim % 2 == 1: # zero pad + emb = torch.nn.functional.pad(emb, (0, 1)) + assert emb.shape == (w.shape[0], embedding_dim) + return emb + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def clip_skip(self): + return self._clip_skip + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None + + @property + def cross_attention_kwargs(self): + return self._cross_attention_kwargs + + @property + def denoising_end(self): + return self._denoising_end + + @property + def num_timesteps(self): + return self._num_timesteps + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + prompt_2: Optional[Union[str, List[str]]] = None, + image: PipelineImageInput = None, + mask_image: Union[torch.FloatTensor, PIL.Image.Image] = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + timesteps: List[int] = None, + sigmas: List[float] = None, + denoising_end: Optional[float] = None, + guidance_scale: float = 5.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + negative_prompt_2: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + pooled_prompt_embeds: Optional[torch.Tensor] = None, + negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + controlnet_conditioning_scale: Union[float, List[float]] = 1.0, + guess_mode: bool = False, + control_guidance_start: Union[float, List[float]] = 0.0, + control_guidance_end: Union[float, List[float]] = 1.0, + original_size: Tuple[int, int] = None, + crops_coords_top_left: Tuple[int, int] = (0, 0), + target_size: Tuple[int, int] = None, + negative_original_size: Optional[Tuple[int, int]] = None, + negative_crops_coords_top_left: Tuple[int, int] = (0, 0), + negative_target_size: Optional[Tuple[int, int]] = None, + clip_skip: Optional[int] = None, + callback_on_step_end: Optional[ + Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks] + ] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + **kwargs, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in both text-encoders. + image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,: + `List[List[torch.Tensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`): + The ControlNet input condition to provide guidance to the `unet` for generation. If the type is + specified as `torch.Tensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also be accepted + as an image. The dimensions of the output image defaults to `image`'s dimensions. If height and/or + width are passed, `image` is resized accordingly. If multiple ControlNets are specified in `init`, + images must be passed as a list such that each element of the list can be correctly batched for input + to a single ControlNet. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. Anything below 512 pixels won't work well for + [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) + and checkpoints that are not specifically fine-tuned on low resolutions. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. Anything below 512 pixels won't work well for + [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) + and checkpoints that are not specifically fine-tuned on low resolutions. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument + in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is + passed will be used. Must be in descending order. + sigmas (`List[float]`, *optional*): + Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in + their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed + will be used. + denoising_end (`float`, *optional*): + When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be + completed before it is intentionally prematurely terminated. As a result, the returned sample will + still retain a substantial amount of noise as determined by the discrete timesteps selected by the + scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a + "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image + Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output) + guidance_scale (`float`, *optional*, defaults to 5.0): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. This is sent to `tokenizer_2` + and `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders. + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, pooled text embeddings are generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs (prompt + weighting). If not provided, pooled `negative_prompt_embeds` are generated from `negative_prompt` input + argument. + ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. + ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*): + Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of + IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should + contain the negative image embedding if `do_classifier_free_guidance` is set to `True`. If not + provided, embeddings are computed from the `ip_adapter_image` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0): + The outputs of the ControlNet are multiplied by `controlnet_conditioning_scale` before they are added + to the residual in the original `unet`. If multiple ControlNets are specified in `init`, you can set + the corresponding scale as a list. + guess_mode (`bool`, *optional*, defaults to `False`): + The ControlNet encoder tries to recognize the content of the input image even if you remove all + prompts. A `guidance_scale` value between 3.0 and 5.0 is recommended. + control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0): + The percentage of total steps at which the ControlNet starts applying. + control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0): + The percentage of total steps at which the ControlNet stops applying. + original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled. + `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as + explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position + `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting + `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + For most cases, `target_size` should be set to the desired height and width of the generated image. If + not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in + section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + To negatively condition the generation process based on a specific image resolution. Part of SDXL's + micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's + micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + To negatively condition the generation process based on a target image resolution. It should be as same + as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*): + A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of + each denoising step during the inference. with the following arguments: `callback_on_step_end(self: + DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a + list of all tensors as specified by `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + + Examples: + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned containing the output images. + """ + + callback = kwargs.pop("callback", None) + callback_steps = kwargs.pop("callback_steps", None) + + if callback is not None: + deprecate( + "callback", + "1.0.0", + "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", + ) + if callback_steps is not None: + deprecate( + "callback_steps", + "1.0.0", + "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", + ) + + if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): + callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs + + controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet + + # align format for control guidance + if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list): + control_guidance_start = len(control_guidance_end) * [control_guidance_start] + elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list): + control_guidance_end = len(control_guidance_start) * [control_guidance_end] + elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list): + mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1 + control_guidance_start, control_guidance_end = ( + mult * [control_guidance_start], + mult * [control_guidance_end], + ) + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + prompt_2, + image, + callback_steps, + negative_prompt, + negative_prompt_2, + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + ip_adapter_image, + ip_adapter_image_embeds, + negative_pooled_prompt_embeds, + controlnet_conditioning_scale, + control_guidance_start, + control_guidance_end, + callback_on_step_end_tensor_inputs, + ) + + + + if mask_image is None: + raise ValueError("`mask_image` input cannot be undefined.") + + self._guidance_scale = guidance_scale + self._clip_skip = clip_skip + self._cross_attention_kwargs = cross_attention_kwargs + self._denoising_end = denoising_end + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float): + controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets) + + global_pool_conditions = ( + controlnet.config.global_pool_conditions + if isinstance(controlnet, ControlNetModel) + else controlnet.nets[0].config.global_pool_conditions + ) + guess_mode = guess_mode or global_pool_conditions + + # 3.1 Encode input prompt + text_encoder_lora_scale = ( + self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None + ) + ( + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + ) = self.encode_prompt( + prompt, + prompt_2, + device, + num_images_per_prompt, + self.do_classifier_free_guidance, + negative_prompt, + negative_prompt_2, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, + lora_scale=text_encoder_lora_scale, + clip_skip=self.clip_skip, + ) + + # 3.2 Encode ip_adapter_image + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + image_embeds = self.prepare_ip_adapter_image_embeds( + ip_adapter_image, + ip_adapter_image_embeds, + device, + batch_size * num_images_per_prompt, + self.do_classifier_free_guidance, + ) + + # 4. Prepare image + if isinstance(controlnet, ControlNetModel): + image = self.prepare_image( + image=image, + width=width, + height=height, + batch_size=batch_size * num_images_per_prompt, + num_images_per_prompt=num_images_per_prompt, + device=device, + dtype=controlnet.dtype, + do_classifier_free_guidance=self.do_classifier_free_guidance, + guess_mode=guess_mode, + ) + height, width = image.shape[-2:] + mask = prepare_mask(mask=mask_image) + + elif isinstance(controlnet, MultiControlNetModel): + images = [] + + for image_ in image: + image_ = self.prepare_image( + image=image_, + width=width, + height=height, + batch_size=batch_size * num_images_per_prompt, + num_images_per_prompt=num_images_per_prompt, + device=device, + dtype=controlnet.dtype, + do_classifier_free_guidance=self.do_classifier_free_guidance, + guess_mode=guess_mode, + ) + + images.append(image_) + + image = images + height, width = image[0].shape[-2:] + mask = prepare_mask(mask=mask_image) + else: + assert False + + # 5. Prepare timesteps + timesteps, num_inference_steps = retrieve_timesteps( + self.scheduler, num_inference_steps, device, timesteps, sigmas + ) + self._num_timesteps = len(timesteps) + + # 6. Prepare latent variables + num_channels_latents = self.unet.config.in_channels + latents, noise = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + init_latents = latents + + height, width = mask.shape[-2:] + mask = torch.nn.functional.interpolate( + mask, size=( + 128, + 128 + ) + ) + + mask = mask.to(device) + mask = mask.to(prompt_embeds.dtype) + + # 6.5 Optionally get Guidance Scale Embedding + timestep_cond = None + if self.unet.config.time_cond_proj_dim is not None: + guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) + timestep_cond = self.get_guidance_scale_embedding( + guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim + ).to(device=device, dtype=latents.dtype) + + # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7.1 Create tensor stating which controlnets to keep + controlnet_keep = [] + for i in range(len(timesteps)): + keeps = [ + 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e) + for s, e in zip(control_guidance_start, control_guidance_end) + ] + controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps) + + # 7.2 Prepare added time ids & embeddings + if isinstance(image, list): + original_size = original_size or image[0].shape[-2:] + else: + original_size = original_size or image.shape[-2:] + target_size = target_size or (height, width) + + add_text_embeds = pooled_prompt_embeds + if self.text_encoder_2 is None: + text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) + else: + text_encoder_projection_dim = self.text_encoder_2.config.projection_dim + + add_time_ids = self._get_add_time_ids( + original_size, + crops_coords_top_left, + target_size, + dtype=prompt_embeds.dtype, + text_encoder_projection_dim=text_encoder_projection_dim, + ) + + if negative_original_size is not None and negative_target_size is not None: + negative_add_time_ids = self._get_add_time_ids( + negative_original_size, + negative_crops_coords_top_left, + negative_target_size, + dtype=prompt_embeds.dtype, + text_encoder_projection_dim=text_encoder_projection_dim, + ) + else: + negative_add_time_ids = add_time_ids + + if self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) + add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) + add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0) + + prompt_embeds = prompt_embeds.to(device) + add_text_embeds = add_text_embeds.to(device) + add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1) + + # 8. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + + # 8.1 Apply denoising_end + if ( + self.denoising_end is not None + and isinstance(self.denoising_end, float) + and self.denoising_end > 0 + and self.denoising_end < 1 + ): + discrete_timestep_cutoff = int( + round( + self.scheduler.config.num_train_timesteps + - (self.denoising_end * self.scheduler.config.num_train_timesteps) + ) + ) + num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps))) + timesteps = timesteps[:num_inference_steps] + + is_unet_compiled = is_compiled_module(self.unet) + is_controlnet_compiled = is_compiled_module(self.controlnet) + is_torch_higher_equal_2_1 = is_torch_version(">=", "2.1") + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # Relevant thread: + # https://dev-discuss.pytorch.org/t/cudagraphs-in-pytorch-2-0/1428 + if (is_unet_compiled and is_controlnet_compiled) and is_torch_higher_equal_2_1: + torch._inductor.cudagraph_mark_step_begin() + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} + + # controlnet(s) inference + if guess_mode and self.do_classifier_free_guidance: + # Infer ControlNet only for the conditional batch. + control_model_input = latents + control_model_input = self.scheduler.scale_model_input(control_model_input, t) + controlnet_prompt_embeds = prompt_embeds.chunk(2)[1] + controlnet_added_cond_kwargs = { + "text_embeds": add_text_embeds.chunk(2)[1], + "time_ids": add_time_ids.chunk(2)[1], + } + else: + control_model_input = latent_model_input + controlnet_prompt_embeds = prompt_embeds + controlnet_added_cond_kwargs = added_cond_kwargs + + if isinstance(controlnet_keep[i], list): + cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])] + else: + controlnet_cond_scale = controlnet_conditioning_scale + if isinstance(controlnet_cond_scale, list): + controlnet_cond_scale = controlnet_cond_scale[0] + cond_scale = controlnet_cond_scale * controlnet_keep[i] + + down_block_res_samples, mid_block_res_sample = self.controlnet( + control_model_input, + t, + encoder_hidden_states=controlnet_prompt_embeds, + controlnet_cond=image, + conditioning_scale=cond_scale, + guess_mode=guess_mode, + added_cond_kwargs=controlnet_added_cond_kwargs, + return_dict=False, + ) + + if guess_mode and self.do_classifier_free_guidance: + # Inferred ControlNet only for the conditional batch. + # To apply the output of ControlNet to both the unconditional and conditional batches, + # add 0 to the unconditional batch to keep it unchanged. + down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples] + mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample]) + + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + added_cond_kwargs["image_embeds"] = image_embeds + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + timestep_cond=timestep_cond, + cross_attention_kwargs=self.cross_attention_kwargs, + down_block_additional_residuals=down_block_res_samples, + mid_block_additional_residual=mid_block_res_sample, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + # print() + # print(self.scheduler.compatibles) + # print() + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] + + # masking process + + scheduler2 = PNDMScheduler.from_config(self.scheduler.config) + + tmp = t.to(torch.long) + init_latents_proper = scheduler2.add_noise( + init_latents, noise, tmp).to(device) + + mask = (mask > 0.5).to(prompt_embeds.dtype) + + + latents = ( + mask * latents + (1 - mask) * init_latents_proper + ) + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + add_text_embeds = callback_outputs.pop("add_text_embeds", add_text_embeds) + negative_pooled_prompt_embeds = callback_outputs.pop( + "negative_pooled_prompt_embeds", negative_pooled_prompt_embeds + ) + add_time_ids = callback_outputs.pop("add_time_ids", add_time_ids) + negative_add_time_ids = callback_outputs.pop("negative_add_time_ids", negative_add_time_ids) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + if not output_type == "latent": + # make sure the VAE is in float32 mode, as it overflows in float16 + needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast + + if needs_upcasting: + self.upcast_vae() + latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) + + # unscale/denormalize the latents + # denormalize with the mean and std if available and not None + has_latents_mean = hasattr(self.vae.config, "latents_mean") and self.vae.config.latents_mean is not None + has_latents_std = hasattr(self.vae.config, "latents_std") and self.vae.config.latents_std is not None + if has_latents_mean and has_latents_std: + latents_mean = ( + torch.tensor(self.vae.config.latents_mean).view(1, 4, 1, 1).to(latents.device, latents.dtype) + ) + latents_std = ( + torch.tensor(self.vae.config.latents_std).view(1, 4, 1, 1).to(latents.device, latents.dtype) + ) + latents = latents * latents_std / self.vae.config.scaling_factor + latents_mean + else: + latents = latents / self.vae.config.scaling_factor + + image = self.vae.decode(latents, return_dict=False)[0] + + # cast back to fp16 if needed + if needs_upcasting: + self.vae.to(dtype=torch.float16) + else: + image = latents + + if not output_type == "latent": + # apply watermark if available + if self.watermark is not None: + image = self.watermark.apply_watermark(image) + + image = self.image_processor.postprocess(image, output_type=output_type) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return StableDiffusionXLPipelineOutput(images=image) diff --git a/diffusers3/pipelines/controlnet/pipeline_controlnet_sd_xl3.py b/diffusers3/pipelines/controlnet/pipeline_controlnet_sd_xl3.py new file mode 100644 index 0000000000000000000000000000000000000000..c36a7ff12a2716025c0e21f1782488cb86359837 --- /dev/null +++ b/diffusers3/pipelines/controlnet/pipeline_controlnet_sd_xl3.py @@ -0,0 +1,1831 @@ +# text_encoder_lora_scale# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import inspect +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import numpy as np +import PIL.Image +import torch +import torch.nn.functional as F +from transformers import ( + CLIPImageProcessor, + CLIPTextModel, + CLIPTextModelWithProjection, + CLIPTokenizer, + CLIPVisionModelWithProjection, +) +from torchvision import transforms +from diffusers.utils.import_utils import is_invisible_watermark_available + +from ...callbacks import MultiPipelineCallbacks, PipelineCallback +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import ( + FromSingleFileMixin, + IPAdapterMixin, + StableDiffusionXLLoraLoaderMixin, + TextualInversionLoaderMixin, +) +from ...models import AutoencoderKL, ControlNetModel, ImageProjection, UNet2DConditionModel +from ...models.attention_processor import ( + AttnProcessor2_0, + XFormersAttnProcessor, +) +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import ( + USE_PEFT_BACKEND, + deprecate, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import is_compiled_module, is_torch_version, randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from ..stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput + + +if is_invisible_watermark_available(): + from ..stable_diffusion_xl.watermark import StableDiffusionXLWatermarker + +from .multicontrolnet import MultiControlNetModel + + +# ์ˆ˜์ • +from diffusers import PNDMScheduler + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> # !pip install opencv-python transformers accelerate + >>> from diffusers import StableDiffusionXLControlNetPipeline, ControlNetModel, AutoencoderKL + >>> from diffusers.utils import load_image + >>> import numpy as np + >>> import torch + + >>> import cv2 + >>> from PIL import Image + + >>> prompt = "aerial view, a futuristic research complex in a bright foggy jungle, hard lighting" + >>> negative_prompt = "low quality, bad quality, sketches" + + >>> # download an image + >>> image = load_image( + ... "https://hf.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/hf-logo.png" + ... ) + + >>> # initialize the models and pipeline + >>> controlnet_conditioning_scale = 0.5 # recommended for good generalization + >>> controlnet = ControlNetModel.from_pretrained( + ... "diffusers/controlnet-canny-sdxl-1.0", torch_dtype=torch.float16 + ... ) + >>> vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16) + >>> pipe = StableDiffusionXLControlNetPipeline.from_pretrained( + ... "stabilityai/stable-diffusion-xl-base-1.0", controlnet=controlnet, vae=vae, torch_dtype=torch.float16 + ... ) + >>> pipe.enable_model_cpu_offload() + + >>> # get canny image + >>> image = np.array(image) + >>> image = cv2.Canny(image, 100, 200) + >>> image = image[:, :, None] + >>> image = np.concatenate([image, image, image], axis=2) + >>> canny_image = Image.fromarray(image) + + >>> # generate image + >>> image = pipe( + ... prompt, controlnet_conditioning_scale=controlnet_conditioning_scale, image=canny_image + ... ).images[0] + ``` +""" + +def prepare_mask( + mask: Union[PIL.Image.Image, np.ndarray, torch.Tensor] +) -> torch.Tensor: + """ + Prepares a mask to be consumed by the Stable Diffusion pipeline. This means that this input will be + converted to ``torch.Tensor`` with shapes ``batch x channels x height x width`` where ``channels`` is ``1`` for + the ``mask``. + + The ``mask`` will be binarized (``mask > 0.5``) and cast to ``torch.float32`` too. + + Args: + mask (_type_): The mask to apply to the image, i.e. regions to inpaint. + It can be a ``PIL.Image``, or a ``height x width`` ``np.array`` or a ``1 x height x width`` + ``torch.Tensor`` or a ``batch x 1 x height x width`` ``torch.Tensor``. + + + Raises: + ValueError: ``torch.Tensor`` images should be in the ``[-1, 1]`` range. ValueError: ``torch.Tensor`` mask + should be in the ``[0, 1]`` range. + TypeError: ``mask`` is a ``torch.Tensor`` but ``image`` is not + (ot the other way around). + + Returns: + torch.Tensor: mask as ``torch.Tensor`` with 4 dimensions: ``batch x channels x height x width``. + """ + if isinstance(mask, torch.Tensor): + if not isinstance(mask, torch.Tensor): + raise TypeError( + f"`image` is a torch.Tensor but `mask` (type: {type(mask)} is not" + ) + + # Batch and add channel dim for single mask + if mask.ndim == 2: + mask = mask.unsqueeze(0).unsqueeze(0) + + # Batch single mask or add channel dim + if mask.ndim == 3: + # Single batched mask, no channel dim or single mask not batched but channel dim + if mask.shape[0] == 1: + mask = mask.unsqueeze(0) + + # Batched masks no channel dim + else: + mask = mask.unsqueeze(1) + + # Check mask is in [0, 1] + if mask.min() < 0 or mask.max() > 1: + raise ValueError("Mask should be in [0, 1] range") + + # Binarize mask + mask[mask < 0.5] = 0 + mask[mask >= 0.5] = 1 + else: + # preprocess mask + if isinstance(mask, (PIL.Image.Image, np.ndarray)): + mask = [mask] + + if isinstance(mask, list) and isinstance(mask[0], PIL.Image.Image): + mask = np.concatenate( + [np.array(m.convert("L"))[None, None, :] for m in mask], axis=0 + ) + mask = mask.astype(np.float32) / 255.0 + elif isinstance(mask, list) and isinstance(mask[0], np.ndarray): + mask = np.concatenate([m[None, None, :] for m in mask], axis=0) + + mask[mask < 0.5] = 0 + mask[mask >= 0.5] = 1 + mask = torch.from_numpy(mask) + + return mask + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + sigmas: Optional[List[float]] = None, + **kwargs, +): + """ + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles + custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` + must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, + `num_inference_steps` and `sigmas` must be `None`. + sigmas (`List[float]`, *optional*): + Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, + `num_inference_steps` and `timesteps` must be `None`. + + Returns: + `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the + second element is the number of inference steps. + """ + if timesteps is not None and sigmas is not None: + raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") + if timesteps is not None: + accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + elif sigmas is not None: + accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accept_sigmas: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" sigmas schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + + +class StableDiffusionXLControlNetPipeline( + DiffusionPipeline, + StableDiffusionMixin, + TextualInversionLoaderMixin, + StableDiffusionXLLoraLoaderMixin, + IPAdapterMixin, + FromSingleFileMixin, +): + r""" + Pipeline for text-to-image generation using Stable Diffusion XL with ControlNet guidance. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`] for saving LoRA weights + - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files + - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + text_encoder_2 ([`~transformers.CLIPTextModelWithProjection`]): + Second frozen text-encoder + ([laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k)). + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + tokenizer_2 ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + controlnet ([`ControlNetModel`] or `List[ControlNetModel]`): + Provides additional conditioning to the `unet` during the denoising process. If you set multiple + ControlNets as a list, the outputs from each ControlNet are added together to create one combined + additional conditioning. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`): + Whether the negative prompt embeddings should always be set to 0. Also see the config of + `stabilityai/stable-diffusion-xl-base-1-0`. + add_watermarker (`bool`, *optional*): + Whether to use the [invisible_watermark](https://github.com/ShieldMnt/invisible-watermark/) library to + watermark output images. If not defined, it defaults to `True` if the package is installed; otherwise no + watermarker is used. + """ + + # leave controlnet out on purpose because it iterates with unet + model_cpu_offload_seq = "text_encoder->text_encoder_2->image_encoder->unet->vae" + _optional_components = [ + "tokenizer", + "tokenizer_2", + "text_encoder", + "text_encoder_2", + "feature_extractor", + "image_encoder", + ] + _callback_tensor_inputs = [ + "latents", + "prompt_embeds", + "negative_prompt_embeds", + "add_text_embeds", + "add_time_ids", + "negative_pooled_prompt_embeds", + "negative_add_time_ids", + ] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + text_encoder_2: CLIPTextModelWithProjection, + tokenizer: CLIPTokenizer, + tokenizer_2: CLIPTokenizer, + unet: UNet2DConditionModel, + controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel], + scheduler: KarrasDiffusionSchedulers, + force_zeros_for_empty_prompt: bool = True, + add_watermarker: Optional[bool] = None, + feature_extractor: CLIPImageProcessor = None, + image_encoder: CLIPVisionModelWithProjection = None, + ): + super().__init__() + + if isinstance(controlnet, (list, tuple)): + controlnet = MultiControlNetModel(controlnet) + + + + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + text_encoder_2=text_encoder_2, + tokenizer=tokenizer, + tokenizer_2=tokenizer_2, + unet=unet, + controlnet=controlnet, + scheduler=scheduler, + feature_extractor=feature_extractor, + image_encoder=image_encoder, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True) + self.control_image_processor = VaeImageProcessor( + vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False + ) + add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available() + + if add_watermarker: + self.watermark = StableDiffusionXLWatermarker() + else: + self.watermark = None + + self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.encode_prompt + def encode_prompt( + self, + prompt: str, + prompt_2: Optional[str] = None, + device: Optional[torch.device] = None, + num_images_per_prompt: int = 1, + do_classifier_free_guidance: bool = True, + negative_prompt: Optional[str] = None, + negative_prompt_2: Optional[str] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + pooled_prompt_embeds: Optional[torch.Tensor] = None, + negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in both text-encoders + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + lora_scale (`float`, *optional*): + A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + device = device or self._execution_device + + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if self.text_encoder is not None: + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale) + else: + scale_lora_layers(self.text_encoder_2, lora_scale) + + prompt = [prompt] if isinstance(prompt, str) else prompt + + if prompt is not None: + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + # Define tokenizers and text encoders + tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] + text_encoders = ( + [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] + ) + + if prompt_embeds is None: + prompt_2 = prompt_2 or prompt + prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 + + # textual inversion: process multi-vector tokens if necessary + prompt_embeds_list = [] + prompts = [prompt, prompt_2] + for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders): + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, tokenizer) + + text_inputs = tokenizer( + prompt, + padding="max_length", + max_length=tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + + text_input_ids = text_inputs.input_ids + untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {tokenizer.model_max_length} tokens: {removed_text}" + ) + + prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) + + # We are only ALWAYS interested in the pooled output of the final text encoder + pooled_prompt_embeds = prompt_embeds[0] + if clip_skip is None: + prompt_embeds = prompt_embeds.hidden_states[-2] + else: + # "2" because SDXL always indexes from the penultimate layer. + prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)] + + prompt_embeds_list.append(prompt_embeds) + + prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) + + # get unconditional embeddings for classifier free guidance + zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt + if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: + negative_prompt_embeds = torch.zeros_like(prompt_embeds) + negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) + elif do_classifier_free_guidance and negative_prompt_embeds is None: + negative_prompt = negative_prompt or "" + negative_prompt_2 = negative_prompt_2 or negative_prompt + + # normalize str to list + negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt + negative_prompt_2 = ( + batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 + ) + + uncond_tokens: List[str] + if prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = [negative_prompt, negative_prompt_2] + + negative_prompt_embeds_list = [] + for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders): + if isinstance(self, TextualInversionLoaderMixin): + negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = tokenizer( + negative_prompt, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + negative_prompt_embeds = text_encoder( + uncond_input.input_ids.to(device), + output_hidden_states=True, + ) + # We are only ALWAYS interested in the pooled output of the final text encoder + negative_pooled_prompt_embeds = negative_prompt_embeds[0] + negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] + + negative_prompt_embeds_list.append(negative_prompt_embeds) + + negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) + + if self.text_encoder_2 is not None: + prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) + else: + prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + if self.text_encoder_2 is not None: + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) + else: + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed * num_images_per_prompt, -1 + ) + if do_classifier_free_guidance: + negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed * num_images_per_prompt, -1 + ) + + if self.text_encoder is not None: + if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder_2, lora_scale) + + return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image + def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + if output_hidden_states: + image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] + image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_enc_hidden_states = self.image_encoder( + torch.zeros_like(image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( + num_images_per_prompt, dim=0 + ) + return image_enc_hidden_states, uncond_image_enc_hidden_states + else: + image_embeds = self.image_encoder(image).image_embeds + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_embeds = torch.zeros_like(image_embeds) + + return image_embeds, uncond_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds + def prepare_ip_adapter_image_embeds( + self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance + ): + image_embeds = [] + if do_classifier_free_guidance: + negative_image_embeds = [] + if ip_adapter_image_embeds is None: + if not isinstance(ip_adapter_image, list): + ip_adapter_image = [ip_adapter_image] + + if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): + raise ValueError( + f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." + ) + + for single_ip_adapter_image, image_proj_layer in zip( + ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers + ): + output_hidden_state = not isinstance(image_proj_layer, ImageProjection) + single_image_embeds, single_negative_image_embeds = self.encode_image( + single_ip_adapter_image, device, 1, output_hidden_state + ) + + image_embeds.append(single_image_embeds[None, :]) + if do_classifier_free_guidance: + negative_image_embeds.append(single_negative_image_embeds[None, :]) + else: + for single_image_embeds in ip_adapter_image_embeds: + if do_classifier_free_guidance: + single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2) + negative_image_embeds.append(single_negative_image_embeds) + image_embeds.append(single_image_embeds) + + ip_adapter_image_embeds = [] + for i, single_image_embeds in enumerate(image_embeds): + single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) + if do_classifier_free_guidance: + single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) + + single_image_embeds = single_image_embeds.to(device=device) + ip_adapter_image_embeds.append(single_image_embeds) + + return ip_adapter_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + prompt_2, + image, + callback_steps, + negative_prompt=None, + negative_prompt_2=None, + prompt_embeds=None, + negative_prompt_embeds=None, + pooled_prompt_embeds=None, + ip_adapter_image=None, + ip_adapter_image_embeds=None, + negative_pooled_prompt_embeds=None, + controlnet_conditioning_scale=1.0, + control_guidance_start=0.0, + control_guidance_end=1.0, + callback_on_step_end_tensor_inputs=None, + ): + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt_2 is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)): + raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + elif negative_prompt_2 is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if prompt_embeds is not None and pooled_prompt_embeds is None: + raise ValueError( + "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`." + ) + + if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: + raise ValueError( + "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`." + ) + + # `prompt` needs more sophisticated handling when there are multiple + # conditionings. + if isinstance(self.controlnet, MultiControlNetModel): + if isinstance(prompt, list): + logger.warning( + f"You have {len(self.controlnet.nets)} ControlNets and you have passed {len(prompt)}" + " prompts. The conditionings will be fixed across the prompts." + ) + + # Check `image` + is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance( + self.controlnet, torch._dynamo.eval_frame.OptimizedModule + ) + + + if ( + isinstance(self.controlnet, ControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, ControlNetModel) + # and isinstance(ControlNetModel._orig_mod, ControlNetModel) + ): + self.check_image(image, prompt, prompt_embeds) + + elif ( + isinstance(self.controlnet, MultiControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, MultiControlNetModel) + ): + if not isinstance(image, list): + raise TypeError("For multiple controlnets: `image` must be type `list`") + + # When `image` is a nested list: + # (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]]) + elif any(isinstance(i, list) for i in image): + raise ValueError("A single batch of multiple conditionings are supported at the moment.") + elif len(image) != len(self.controlnet.nets): + raise ValueError( + f"For multiple controlnets: `image` must have the same length as the number of controlnets, but got {len(image)} images and {len(self.controlnet.nets)} ControlNets." + ) + + for image_ in image: + self.check_image(image_, prompt, prompt_embeds) + else: + + assert False + + # Check `controlnet_conditioning_scale` + if ( + isinstance(self.controlnet, ControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, ControlNetModel) + ): + if not isinstance(controlnet_conditioning_scale, float): + raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.") + elif ( + isinstance(self.controlnet, MultiControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, MultiControlNetModel) + ): + if isinstance(controlnet_conditioning_scale, list): + if any(isinstance(i, list) for i in controlnet_conditioning_scale): + raise ValueError("A single batch of multiple conditionings are supported at the moment.") + elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len( + self.controlnet.nets + ): + raise ValueError( + "For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have" + " the same length as the number of controlnets" + ) + else: + assert False + + if not isinstance(control_guidance_start, (tuple, list)): + control_guidance_start = [control_guidance_start] + + if not isinstance(control_guidance_end, (tuple, list)): + control_guidance_end = [control_guidance_end] + + if len(control_guidance_start) != len(control_guidance_end): + raise ValueError( + f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list." + ) + + if isinstance(self.controlnet, MultiControlNetModel): + if len(control_guidance_start) != len(self.controlnet.nets): + raise ValueError( + f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}." + ) + + for start, end in zip(control_guidance_start, control_guidance_end): + if start >= end: + raise ValueError( + f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}." + ) + if start < 0.0: + raise ValueError(f"control guidance start: {start} can't be smaller than 0.") + if end > 1.0: + raise ValueError(f"control guidance end: {end} can't be larger than 1.0.") + + if ip_adapter_image is not None and ip_adapter_image_embeds is not None: + raise ValueError( + "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined." + ) + + if ip_adapter_image_embeds is not None: + if not isinstance(ip_adapter_image_embeds, list): + raise ValueError( + f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}" + ) + elif ip_adapter_image_embeds[0].ndim not in [3, 4]: + raise ValueError( + f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D" + ) + + # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.check_image + def check_image(self, image, prompt, prompt_embeds): + image_is_pil = isinstance(image, PIL.Image.Image) + image_is_tensor = isinstance(image, torch.Tensor) + image_is_np = isinstance(image, np.ndarray) + image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image) + image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor) + image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray) + + if ( + not image_is_pil + and not image_is_tensor + and not image_is_np + and not image_is_pil_list + and not image_is_tensor_list + and not image_is_np_list + ): + raise TypeError( + f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}" + ) + + if image_is_pil: + image_batch_size = 1 + else: + image_batch_size = len(image) + + if prompt is not None and isinstance(prompt, str): + prompt_batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + prompt_batch_size = len(prompt) + elif prompt_embeds is not None: + prompt_batch_size = prompt_embeds.shape[0] + + if image_batch_size != 1 and image_batch_size != prompt_batch_size: + raise ValueError( + f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}" + ) + + # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.prepare_image + + # depth map + def prepare_image( + self, + image, + width, + height, + batch_size, + num_images_per_prompt, + device, + dtype, + do_classifier_free_guidance=False, + guess_mode=False, + ): + image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32) + image_batch_size = image.shape[0] + + if image_batch_size == 1: + repeat_by = batch_size + else: + # image batch size is the same as prompt batch size + repeat_by = num_images_per_prompt + + image = image.repeat_interleave(repeat_by, dim=0) + + image = image.to(device=device, dtype=dtype) + + if do_classifier_free_guidance and not guess_mode: + image = torch.cat([image] * 2) + + return image + + + def prepare_latents( + self, + image, + timestep, + batch_size, + num_images_per_prompt, + dtype, + device, + generator=None, + ): + if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): + raise ValueError( + f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" + ) + + image = image.to(device=device, dtype=dtype) + + batch_size = batch_size * num_images_per_prompt + + if image.shape[1] == 4: + init_latents = image + + else: + if isinstance(generator, list) and len(generator) != batch_size: + + + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + elif isinstance(generator, list): + + init_latents = [ + self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) + for i in range(batch_size) + ] + init_latents = torch.cat(init_latents, dim=0) + else: + + init_latents = self.vae.encode(image).latent_dist.sample(generator) + + init_latents = self.vae.config.scaling_factor * init_latents + + if ( + batch_size > init_latents.shape[0] + and batch_size % init_latents.shape[0] == 0 + ): + # expand init_latents for batch_size + deprecation_message = ( + f"You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial" + " images (`image`). Initial images are now duplicating to match the number of text prompts. Note" + " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update" + " your script to pass as many initial images as text prompts to suppress this warning." + ) + deprecate( + "len(prompt) != len(image)", + "1.0.0", + deprecation_message, + standard_warn=False, + ) + additional_image_per_prompt = batch_size // init_latents.shape[0] + init_latents = torch.cat( + [init_latents] * additional_image_per_prompt, dim=0 + ) + elif ( + batch_size > init_latents.shape[0] + and batch_size % init_latents.shape[0] != 0 + ): + raise ValueError( + f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts." + ) + else: + init_latents = torch.cat([init_latents], dim=0) + + shape = init_latents.shape + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + + # get latents + + latents = self.scheduler.add_noise(init_latents, noise, timestep) + + return init_latents, noise + + + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents_origin(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + + shape = ( + batch_size, + num_channels_latents, + int(height) // self.vae_scale_factor, + int(width) // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + return latents, noise + + + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline._get_add_time_ids + def _get_add_time_ids( + self, original_size, crops_coords_top_left, target_size, dtype, text_encoder_projection_dim=None + ): + add_time_ids = list(original_size + crops_coords_top_left + target_size) + + passed_add_embed_dim = ( + self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim + ) + expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features + + if expected_add_embed_dim != passed_add_embed_dim: + raise ValueError( + f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`." + ) + + add_time_ids = torch.tensor([add_time_ids], dtype=dtype) + return add_time_ids + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae + def upcast_vae(self): + dtype = self.vae.dtype + self.vae.to(dtype=torch.float32) + use_torch_2_0_or_xformers = isinstance( + self.vae.decoder.mid_block.attentions[0].processor, + ( + AttnProcessor2_0, + XFormersAttnProcessor, + ), + ) + # if xformers or torch_2_0 is used attention block does not need + # to be in float32 which can save lots of memory + if use_torch_2_0_or_xformers: + self.vae.post_quant_conv.to(dtype) + self.vae.decoder.conv_in.to(dtype) + self.vae.decoder.mid_block.to(dtype) + + # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding + def get_guidance_scale_embedding( + self, w: torch.Tensor, embedding_dim: int = 512, dtype: torch.dtype = torch.float32 + ) -> torch.Tensor: + """ + See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 + + Args: + w (`torch.Tensor`): + Generate embedding vectors with a specified guidance scale to subsequently enrich timestep embeddings. + embedding_dim (`int`, *optional*, defaults to 512): + Dimension of the embeddings to generate. + dtype (`torch.dtype`, *optional*, defaults to `torch.float32`): + Data type of the generated embeddings. + + Returns: + `torch.Tensor`: Embedding vectors with shape `(len(w), embedding_dim)`. + """ + assert len(w.shape) == 1 + w = w * 1000.0 + + half_dim = embedding_dim // 2 + emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) + emb = w.to(dtype)[:, None] * emb[None, :] + emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) + if embedding_dim % 2 == 1: # zero pad + emb = torch.nn.functional.pad(emb, (0, 1)) + assert emb.shape == (w.shape[0], embedding_dim) + return emb + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def clip_skip(self): + return self._clip_skip + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None + + @property + def cross_attention_kwargs(self): + return self._cross_attention_kwargs + + @property + def denoising_end(self): + return self._denoising_end + + @property + def num_timesteps(self): + return self._num_timesteps + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + prompt_2: Optional[Union[str, List[str]]] = None, + image: PipelineImageInput = None, + mask_image: Union[torch.FloatTensor, PIL.Image.Image] = None, + sketch_image: Union[torch.FloatTensor, PIL.Image.Image] = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + timesteps: List[int] = None, + sigmas: List[float] = None, + denoising_end: Optional[float] = None, + guidance_scale: float = 5.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + negative_prompt_2: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + pooled_prompt_embeds: Optional[torch.Tensor] = None, + negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + controlnet_conditioning_scale: Union[float, List[float]] = 1.0, + guess_mode: bool = False, + control_guidance_start: Union[float, List[float]] = 0.0, + control_guidance_end: Union[float, List[float]] = 1.0, + original_size: Tuple[int, int] = None, + crops_coords_top_left: Tuple[int, int] = (0, 0), + target_size: Tuple[int, int] = None, + negative_original_size: Optional[Tuple[int, int]] = None, + negative_crops_coords_top_left: Tuple[int, int] = (0, 0), + negative_target_size: Optional[Tuple[int, int]] = None, + clip_skip: Optional[int] = None, + callback_on_step_end: Optional[ + Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks] + ] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + **kwargs, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in both text-encoders. + image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,: + `List[List[torch.Tensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`): + The ControlNet input condition to provide guidance to the `unet` for generation. If the type is + specified as `torch.Tensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also be accepted + as an image. The dimensions of the output image defaults to `image`'s dimensions. If height and/or + width are passed, `image` is resized accordingly. If multiple ControlNets are specified in `init`, + images must be passed as a list such that each element of the list can be correctly batched for input + to a single ControlNet. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. Anything below 512 pixels won't work well for + [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) + and checkpoints that are not specifically fine-tuned on low resolutions. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. Anything below 512 pixels won't work well for + [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) + and checkpoints that are not specifically fine-tuned on low resolutions. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument + in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is + passed will be used. Must be in descending order. + sigmas (`List[float]`, *optional*): + Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in + their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed + will be used. + denoising_end (`float`, *optional*): + When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be + completed before it is intentionally prematurely terminated. As a result, the returned sample will + still retain a substantial amount of noise as determined by the discrete timesteps selected by the + scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a + "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image + Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output) + guidance_scale (`float`, *optional*, defaults to 5.0): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. This is sent to `tokenizer_2` + and `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders. + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, pooled text embeddings are generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs (prompt + weighting). If not provided, pooled `negative_prompt_embeds` are generated from `negative_prompt` input + argument. + ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. + ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*): + Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of + IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should + contain the negative image embedding if `do_classifier_free_guidance` is set to `True`. If not + provided, embeddings are computed from the `ip_adapter_image` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0): + The outputs of the ControlNet are multiplied by `controlnet_conditioning_scale` before they are added + to the residual in the original `unet`. If multiple ControlNets are specified in `init`, you can set + the corresponding scale as a list. + guess_mode (`bool`, *optional*, defaults to `False`): + The ControlNet encoder tries to recognize the content of the input image even if you remove all + prompts. A `guidance_scale` value between 3.0 and 5.0 is recommended. + control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0): + The percentage of total steps at which the ControlNet starts applying. + control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0): + The percentage of total steps at which the ControlNet stops applying. + original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled. + `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as + explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position + `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting + `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + For most cases, `target_size` should be set to the desired height and width of the generated image. If + not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in + section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + To negatively condition the generation process based on a specific image resolution. Part of SDXL's + micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's + micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + To negatively condition the generation process based on a target image resolution. It should be as same + as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*): + A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of + each denoising step during the inference. with the following arguments: `callback_on_step_end(self: + DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a + list of all tensors as specified by `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + + Examples: + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned containing the output images. + """ + + callback = kwargs.pop("callback", None) + callback_steps = kwargs.pop("callback_steps", None) + + if callback is not None: + deprecate( + "callback", + "1.0.0", + "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", + ) + if callback_steps is not None: + deprecate( + "callback_steps", + "1.0.0", + "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", + ) + + if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): + callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs + + controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet + + # align format for control guidance + if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list): + control_guidance_start = len(control_guidance_end) * [control_guidance_start] + elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list): + control_guidance_end = len(control_guidance_start) * [control_guidance_end] + elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list): + mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1 + control_guidance_start, control_guidance_end = ( + mult * [control_guidance_start], + mult * [control_guidance_end], + ) + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + prompt_2, + image, + callback_steps, + negative_prompt, + negative_prompt_2, + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + ip_adapter_image, + ip_adapter_image_embeds, + negative_pooled_prompt_embeds, + controlnet_conditioning_scale, + control_guidance_start, + control_guidance_end, + callback_on_step_end_tensor_inputs, + ) + + + + + self._guidance_scale = guidance_scale + self._clip_skip = clip_skip + self._cross_attention_kwargs = cross_attention_kwargs + self._denoising_end = denoising_end + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float): + controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets) + + global_pool_conditions = ( + controlnet.config.global_pool_conditions + if isinstance(controlnet, ControlNetModel) + else controlnet.nets[0].config.global_pool_conditions + ) + guess_mode = guess_mode or global_pool_conditions + + # 3.1 Encode input prompt + text_encoder_lora_scale = ( + self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None + ) + ( + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + ) = self.encode_prompt( + prompt, + prompt_2, + device, + num_images_per_prompt, + self.do_classifier_free_guidance, + negative_prompt, + negative_prompt_2, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, + lora_scale=text_encoder_lora_scale, + clip_skip=self.clip_skip, + ) + + # 3.2 Encode ip_adapter_image + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + image_embeds = self.prepare_ip_adapter_image_embeds( + ip_adapter_image, + ip_adapter_image_embeds, + device, + batch_size * num_images_per_prompt, + self.do_classifier_free_guidance, + ) + + # 4. Prepare image + if isinstance(controlnet, ControlNetModel): + image = self.prepare_image( + image=image, + width=width, + height=height, + batch_size=batch_size * num_images_per_prompt, + num_images_per_prompt=num_images_per_prompt, + device=device, + dtype=controlnet.dtype, + do_classifier_free_guidance=self.do_classifier_free_guidance, + guess_mode=guess_mode, + ) + height, width = image.shape[-2:] + + + elif isinstance(controlnet, MultiControlNetModel): + images = [] + + for image_ in image: + image_ = self.prepare_image( + image=image_, + width=width, + height=height, + batch_size=batch_size * num_images_per_prompt, + num_images_per_prompt=num_images_per_prompt, + device=device, + dtype=controlnet.dtype, + do_classifier_free_guidance=self.do_classifier_free_guidance, + guess_mode=guess_mode, + ) + + images.append(image_) + + image = images + height, width = image[0].shape[-2:] + else: + assert False + + mask = prepare_mask(mask=mask_image) + + # 5. Prepare timesteps + timesteps, num_inference_steps = retrieve_timesteps( + self.scheduler, num_inference_steps, device, timesteps, sigmas + ) + self._num_timesteps = len(timesteps) + + latent_timestep = timesteps[:1] # type: ignore + latent_timestep = latent_timestep.to(torch.long) + + + # 6. Prepare latent variables + +# sketch_image = self.image_processor.preprocess(sketch_image) + +# init_latents, noise= self.prepare_latents( +# sketch_image, +# latent_timestep, +# batch_size, +# num_images_per_prompt, +# prompt_embeds.dtype, +# device, +# generator, +# ) + + num_channels_latents = self.unet.config.in_channels + latents_with_noise, noise = self.prepare_latents_origin( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + + # 6.1. Prepare mask + + height, width = mask.shape[-2:] + mask = torch.nn.functional.interpolate( + mask, size=( + 128, + 128 + ) + ) + mask = torch.nn.functional.interpolate( + mask, size=(height // self.vae_scale_factor, width // self.vae_scale_factor) + ) + + mask = mask.to(device) + mask = mask.to(prompt_embeds.dtype) + + # 6.5 Optionally get Guidance Scale Embedding + timestep_cond = None + if self.unet.config.time_cond_proj_dim is not None: + guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) + timestep_cond = self.get_guidance_scale_embedding( + guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim + ).to(device=device, dtype=latents.dtype) + + # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7.1 Create tensor stating which controlnets to keep + controlnet_keep = [] + for i in range(len(timesteps)): + keeps = [ + 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e) + for s, e in zip(control_guidance_start, control_guidance_end) + ] + controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps) + + # 7.2 Prepare added time ids & embeddings + if isinstance(image, list): + original_size = original_size or image[0].shape[-2:] + else: + original_size = original_size or image.shape[-2:] + target_size = target_size or (height, width) + + add_text_embeds = pooled_prompt_embeds + if self.text_encoder_2 is None: + text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) + else: + text_encoder_projection_dim = self.text_encoder_2.config.projection_dim + + add_time_ids = self._get_add_time_ids( + original_size, + crops_coords_top_left, + target_size, + dtype=prompt_embeds.dtype, + text_encoder_projection_dim=text_encoder_projection_dim, + ) + + if negative_original_size is not None and negative_target_size is not None: + negative_add_time_ids = self._get_add_time_ids( + negative_original_size, + negative_crops_coords_top_left, + negative_target_size, + dtype=prompt_embeds.dtype, + text_encoder_projection_dim=text_encoder_projection_dim, + ) + else: + negative_add_time_ids = add_time_ids + + if self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) + add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) + add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0) + + prompt_embeds = prompt_embeds.to(device) + add_text_embeds = add_text_embeds.to(device) + add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1) + + # 8. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + + # 8.1 Apply denoising_end + if ( + self.denoising_end is not None + and isinstance(self.denoising_end, float) + and self.denoising_end > 0 + and self.denoising_end < 1 + ): + discrete_timestep_cutoff = int( + round( + self.scheduler.config.num_train_timesteps + - (self.denoising_end * self.scheduler.config.num_train_timesteps) + ) + ) + num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps))) + timesteps = timesteps[:num_inference_steps] + + is_unet_compiled = is_compiled_module(self.unet) + is_controlnet_compiled = is_compiled_module(self.controlnet) + is_torch_higher_equal_2_1 = is_torch_version(">=", "2.1") + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # Relevant thread: + # https://dev-discuss.pytorch.org/t/cudagraphs-in-pytorch-2-0/1428 + if (is_unet_compiled and is_controlnet_compiled) and is_torch_higher_equal_2_1: + torch._inductor.cudagraph_mark_step_begin() + + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents_with_noise] * 2) if self.do_classifier_free_guidance else latents_with_noise + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} + + # controlnet(s) inference + if guess_mode and self.do_classifier_free_guidance: + # Infer ControlNet only for the conditional batch. + print("์—ฌ๊ธฐ๋„ ๋ฐ”๊ฟ”์•ผ ํ•˜๋‚˜ ๋ด„") + control_model_input = latents + control_model_input = self.scheduler.scale_model_input(control_model_input, t) + controlnet_prompt_embeds = prompt_embeds.chunk(2)[1] + controlnet_added_cond_kwargs = { + "text_embeds": add_text_embeds.chunk(2)[1], + "time_ids": add_time_ids.chunk(2)[1], + } + else: + control_model_input = latent_model_input + controlnet_prompt_embeds = prompt_embeds + controlnet_added_cond_kwargs = added_cond_kwargs + + if isinstance(controlnet_keep[i], list): + cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])] + else: + controlnet_cond_scale = controlnet_conditioning_scale + if isinstance(controlnet_cond_scale, list): + controlnet_cond_scale = controlnet_cond_scale[0] + cond_scale = controlnet_cond_scale * controlnet_keep[i] + + down_block_res_samples, mid_block_res_sample = self.controlnet( + control_model_input, + t, + encoder_hidden_states=controlnet_prompt_embeds, + controlnet_cond=image, + conditioning_scale=cond_scale, + guess_mode=guess_mode, + added_cond_kwargs=controlnet_added_cond_kwargs, + return_dict=False, + ) + + if guess_mode and self.do_classifier_free_guidance: + # Inferred ControlNet only for the conditional batch. + # To apply the output of ControlNet to both the unconditional and conditional batches, + # add 0 to the unconditional batch to keep it unchanged. + down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples] + mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample]) + + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + added_cond_kwargs["image_embeds"] = image_embeds + + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + timestep_cond=timestep_cond, + cross_attention_kwargs=self.cross_attention_kwargs, + down_block_additional_residuals=down_block_res_samples, + mid_block_additional_residual=mid_block_res_sample, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + + latents_with_noise = self.scheduler.step( + noise_pred, + t, + latents_with_noise, + **extra_step_kwargs, + return_dict=False, + )[0] + + + # masking process +# tmp = t.unsqueeze(0) +# init_latents_proper = self.scheduler.add_noise( +# init_latents, noise, tmp +# ).to(device) + +# mask = (mask > 0.5).to(prompt_embeds.dtype) +# latents_with_noise = ( +# mask * latents_with_noise + (1 - mask) * init_latents_proper +# ) + + + if callback_on_step_end is not None: + print("์—ฌ๊ธฐ๋„ ๋ฐ”๊ฟ”์•ผ ํ•˜๋‚˜ ๋ด„ 2") + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + add_text_embeds = callback_outputs.pop("add_text_embeds", add_text_embeds) + negative_pooled_prompt_embeds = callback_outputs.pop( + "negative_pooled_prompt_embeds", negative_pooled_prompt_embeds + ) + add_time_ids = callback_outputs.pop("add_time_ids", add_time_ids) + negative_add_time_ids = callback_outputs.pop("negative_add_time_ids", negative_add_time_ids) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents_with_noise) + + if not output_type == "latent": + # make sure the VAE is in float32 mode, as it overflows in float16 + needs_upcasting = self.vae.dtype == torch.float32 and self.vae.config.force_upcast + + if needs_upcasting: + self.upcast_vae() + latents_with_noise = latents_with_noise.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) + + # unscale/denormalize the latents + # denormalize with the mean and std if available and not None + has_latents_mean = hasattr(self.vae.config, "latents_mean") and self.vae.config.latents_mean is not None + has_latents_std = hasattr(self.vae.config, "latents_std") and self.vae.config.latents_std is not None + if has_latents_mean and has_latents_std: + print("์—ฌ๊ธฐ ๋ฐ”๊พธ๊ธด ํ–ˆ๋Š”๋ฐ...") + latents_mean = ( + torch.tensor(self.vae.config.latents_mean).view(1, 4, 1, 1).to(latents_with_noise.device, latents_with_noise.dtype) + ) + latents_std = ( + torch.tensor(self.vae.config.latents_std).view(1, 4, 1, 1).to(latents_with_noise.device, latents_with_noise.dtype) + ) + latents_with_noise = latents_with_noise * latents_std / self.vae.config.scaling_factor + latents_mean + else: + latents_with_noise = latents_with_noise / self.vae.config.scaling_factor + + image = self.vae.decode(latents_with_noise, return_dict=False)[0] + + # cast back to fp16 if needed + if needs_upcasting: + self.vae.to(dtype=torch.float32) + else: + image = latents_with_noise + + if not output_type == "latent": + # apply watermark if available + if self.watermark is not None: + image = self.watermark.apply_watermark(image) + + image = self.image_processor.postprocess(image, output_type=output_type) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return StableDiffusionXLPipelineOutput(images=image) diff --git a/diffusers3/pipelines/controlnet/pipeline_controlnet_sd_xl_.py b/diffusers3/pipelines/controlnet/pipeline_controlnet_sd_xl_.py new file mode 100644 index 0000000000000000000000000000000000000000..92b390e732337d31da66e3dbe229ee89ab743d66 --- /dev/null +++ b/diffusers3/pipelines/controlnet/pipeline_controlnet_sd_xl_.py @@ -0,0 +1,1832 @@ +# -*- coding: utf-8 -*- +# text_encoder_lora_scale# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import inspect +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import numpy as np +import PIL.Image +import torch +import torch.nn.functional as F +from transformers import ( + CLIPImageProcessor, + CLIPTextModel, + CLIPTextModelWithProjection, + CLIPTokenizer, + CLIPVisionModelWithProjection, +) +from torchvision import transforms +from diffusers.utils.import_utils import is_invisible_watermark_available + +from ...callbacks import MultiPipelineCallbacks, PipelineCallback +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import ( + FromSingleFileMixin, + IPAdapterMixin, + StableDiffusionXLLoraLoaderMixin, + TextualInversionLoaderMixin, +) +from ...models import AutoencoderKL, ControlNetModel, ImageProjection, UNet2DConditionModel +from ...models.attention_processor import ( + AttnProcessor2_0, + XFormersAttnProcessor, +) +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import ( + USE_PEFT_BACKEND, + deprecate, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import is_compiled_module, is_torch_version, randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from ..stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput + + +if is_invisible_watermark_available(): + from ..stable_diffusion_xl.watermark import StableDiffusionXLWatermarker + +from .multicontrolnet import MultiControlNetModel + + +# ์ˆ˜์ • +from diffusers import PNDMScheduler + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> # !pip install opencv-python transformers accelerate + >>> from diffusers import StableDiffusionXLControlNetPipeline, ControlNetModel, AutoencoderKL + >>> from diffusers.utils import load_image + >>> import numpy as np + >>> import torch + + >>> import cv2 + >>> from PIL import Image + + >>> prompt = "aerial view, a futuristic research complex in a bright foggy jungle, hard lighting" + >>> negative_prompt = "low quality, bad quality, sketches" + + >>> # download an image + >>> image = load_image( + ... "https://hf.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/hf-logo.png" + ... ) + + >>> # initialize the models and pipeline + >>> controlnet_conditioning_scale = 0.5 # recommended for good generalization + >>> controlnet = ControlNetModel.from_pretrained( + ... "diffusers/controlnet-canny-sdxl-1.0", torch_dtype=torch.float16 + ... ) + >>> vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16) + >>> pipe = StableDiffusionXLControlNetPipeline.from_pretrained( + ... "stabilityai/stable-diffusion-xl-base-1.0", controlnet=controlnet, vae=vae, torch_dtype=torch.float16 + ... ) + >>> pipe.enable_model_cpu_offload() + + >>> # get canny image + >>> image = np.array(image) + >>> image = cv2.Canny(image, 100, 200) + >>> image = image[:, :, None] + >>> image = np.concatenate([image, image, image], axis=2) + >>> canny_image = Image.fromarray(image) + + >>> # generate image + >>> image = pipe( + ... prompt, controlnet_conditioning_scale=controlnet_conditioning_scale, image=canny_image + ... ).images[0] + ``` +""" + +def prepare_mask( + mask: Union[PIL.Image.Image, np.ndarray, torch.Tensor] +) -> torch.Tensor: + """ + Prepares a mask to be consumed by the Stable Diffusion pipeline. This means that this input will be + converted to ``torch.Tensor`` with shapes ``batch x channels x height x width`` where ``channels`` is ``1`` for + the ``mask``. + + The ``mask`` will be binarized (``mask > 0.5``) and cast to ``torch.float32`` too. + + Args: + mask (_type_): The mask to apply to the image, i.e. regions to inpaint. + It can be a ``PIL.Image``, or a ``height x width`` ``np.array`` or a ``1 x height x width`` + ``torch.Tensor`` or a ``batch x 1 x height x width`` ``torch.Tensor``. + + + Raises: + ValueError: ``torch.Tensor`` images should be in the ``[-1, 1]`` range. ValueError: ``torch.Tensor`` mask + should be in the ``[0, 1]`` range. + TypeError: ``mask`` is a ``torch.Tensor`` but ``image`` is not + (ot the other way around). + + Returns: + torch.Tensor: mask as ``torch.Tensor`` with 4 dimensions: ``batch x channels x height x width``. + """ + if isinstance(mask, torch.Tensor): + if not isinstance(mask, torch.Tensor): + raise TypeError( + f"`image` is a torch.Tensor but `mask` (type: {type(mask)} is not" + ) + + # Batch and add channel dim for single mask + if mask.ndim == 2: + mask = mask.unsqueeze(0).unsqueeze(0) + + # Batch single mask or add channel dim + if mask.ndim == 3: + # Single batched mask, no channel dim or single mask not batched but channel dim + if mask.shape[0] == 1: + mask = mask.unsqueeze(0) + + # Batched masks no channel dim + else: + mask = mask.unsqueeze(1) + + # Check mask is in [0, 1] + if mask.min() < 0 or mask.max() > 1: + raise ValueError("Mask should be in [0, 1] range") + + # Binarize mask + mask[mask < 0.5] = 0 + mask[mask >= 0.5] = 1 + else: + # preprocess mask + if isinstance(mask, (PIL.Image.Image, np.ndarray)): + mask = [mask] + + if isinstance(mask, list) and isinstance(mask[0], PIL.Image.Image): + mask = np.concatenate( + [np.array(m.convert("L"))[None, None, :] for m in mask], axis=0 + ) + mask = mask.astype(np.float32) / 255.0 + elif isinstance(mask, list) and isinstance(mask[0], np.ndarray): + mask = np.concatenate([m[None, None, :] for m in mask], axis=0) + + mask[mask < 0.5] = 0 + mask[mask >= 0.5] = 1 + mask = torch.from_numpy(mask) + + return mask + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + sigmas: Optional[List[float]] = None, + **kwargs, +): + """ + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles + custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` + must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, + `num_inference_steps` and `sigmas` must be `None`. + sigmas (`List[float]`, *optional*): + Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, + `num_inference_steps` and `timesteps` must be `None`. + + Returns: + `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the + second element is the number of inference steps. + """ + if timesteps is not None and sigmas is not None: + raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") + if timesteps is not None: + accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + elif sigmas is not None: + accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accept_sigmas: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" sigmas schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + + +class StableDiffusionXLControlNetPipeline( + DiffusionPipeline, + StableDiffusionMixin, + TextualInversionLoaderMixin, + StableDiffusionXLLoraLoaderMixin, + IPAdapterMixin, + FromSingleFileMixin, +): + r""" + Pipeline for text-to-image generation using Stable Diffusion XL with ControlNet guidance. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`] for saving LoRA weights + - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files + - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + text_encoder_2 ([`~transformers.CLIPTextModelWithProjection`]): + Second frozen text-encoder + ([laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k)). + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + tokenizer_2 ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + controlnet ([`ControlNetModel`] or `List[ControlNetModel]`): + Provides additional conditioning to the `unet` during the denoising process. If you set multiple + ControlNets as a list, the outputs from each ControlNet are added together to create one combined + additional conditioning. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`): + Whether the negative prompt embeddings should always be set to 0. Also see the config of + `stabilityai/stable-diffusion-xl-base-1-0`. + add_watermarker (`bool`, *optional*): + Whether to use the [invisible_watermark](https://github.com/ShieldMnt/invisible-watermark/) library to + watermark output images. If not defined, it defaults to `True` if the package is installed; otherwise no + watermarker is used. + """ + + # leave controlnet out on purpose because it iterates with unet + model_cpu_offload_seq = "text_encoder->text_encoder_2->image_encoder->unet->vae" + _optional_components = [ + "tokenizer", + "tokenizer_2", + "text_encoder", + "text_encoder_2", + "feature_extractor", + "image_encoder", + ] + _callback_tensor_inputs = [ + "latents", + "prompt_embeds", + "negative_prompt_embeds", + "add_text_embeds", + "add_time_ids", + "negative_pooled_prompt_embeds", + "negative_add_time_ids", + ] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + text_encoder_2: CLIPTextModelWithProjection, + tokenizer: CLIPTokenizer, + tokenizer_2: CLIPTokenizer, + unet: UNet2DConditionModel, + controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel], + scheduler: KarrasDiffusionSchedulers, + force_zeros_for_empty_prompt: bool = True, + add_watermarker: Optional[bool] = None, + feature_extractor: CLIPImageProcessor = None, + image_encoder: CLIPVisionModelWithProjection = None, + ): + super().__init__() + + if isinstance(controlnet, (list, tuple)): + controlnet = MultiControlNetModel(controlnet) + + + + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + text_encoder_2=text_encoder_2, + tokenizer=tokenizer, + tokenizer_2=tokenizer_2, + unet=unet, + controlnet=controlnet, + scheduler=scheduler, + feature_extractor=feature_extractor, + image_encoder=image_encoder, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True) + self.control_image_processor = VaeImageProcessor( + vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False + ) + add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available() + + if add_watermarker: + self.watermark = StableDiffusionXLWatermarker() + else: + self.watermark = None + + self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.encode_prompt + def encode_prompt( + self, + prompt: str, + prompt_2: Optional[str] = None, + device: Optional[torch.device] = None, + num_images_per_prompt: int = 1, + do_classifier_free_guidance: bool = True, + negative_prompt: Optional[str] = None, + negative_prompt_2: Optional[str] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + pooled_prompt_embeds: Optional[torch.Tensor] = None, + negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in both text-encoders + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + lora_scale (`float`, *optional*): + A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + device = device or self._execution_device + + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if self.text_encoder is not None: + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale) + else: + scale_lora_layers(self.text_encoder_2, lora_scale) + + prompt = [prompt] if isinstance(prompt, str) else prompt + + if prompt is not None: + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + # Define tokenizers and text encoders + tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] + text_encoders = ( + [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] + ) + + if prompt_embeds is None: + prompt_2 = prompt_2 or prompt + prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 + + # textual inversion: process multi-vector tokens if necessary + prompt_embeds_list = [] + prompts = [prompt, prompt_2] + for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders): + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, tokenizer) + + text_inputs = tokenizer( + prompt, + padding="max_length", + max_length=tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + + text_input_ids = text_inputs.input_ids + untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {tokenizer.model_max_length} tokens: {removed_text}" + ) + + prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) + + # We are only ALWAYS interested in the pooled output of the final text encoder + pooled_prompt_embeds = prompt_embeds[0] + if clip_skip is None: + prompt_embeds = prompt_embeds.hidden_states[-2] + else: + # "2" because SDXL always indexes from the penultimate layer. + prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)] + + prompt_embeds_list.append(prompt_embeds) + + prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) + + # get unconditional embeddings for classifier free guidance + zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt + if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: + negative_prompt_embeds = torch.zeros_like(prompt_embeds) + negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) + elif do_classifier_free_guidance and negative_prompt_embeds is None: + negative_prompt = negative_prompt or "" + negative_prompt_2 = negative_prompt_2 or negative_prompt + + # normalize str to list + negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt + negative_prompt_2 = ( + batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 + ) + + uncond_tokens: List[str] + if prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = [negative_prompt, negative_prompt_2] + + negative_prompt_embeds_list = [] + for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders): + if isinstance(self, TextualInversionLoaderMixin): + negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = tokenizer( + negative_prompt, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + negative_prompt_embeds = text_encoder( + uncond_input.input_ids.to(device), + output_hidden_states=True, + ) + # We are only ALWAYS interested in the pooled output of the final text encoder + negative_pooled_prompt_embeds = negative_prompt_embeds[0] + negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] + + negative_prompt_embeds_list.append(negative_prompt_embeds) + + negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) + + if self.text_encoder_2 is not None: + prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) + else: + prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + if self.text_encoder_2 is not None: + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) + else: + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed * num_images_per_prompt, -1 + ) + if do_classifier_free_guidance: + negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed * num_images_per_prompt, -1 + ) + + if self.text_encoder is not None: + if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder_2, lora_scale) + + return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image + def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + if output_hidden_states: + image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] + image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_enc_hidden_states = self.image_encoder( + torch.zeros_like(image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( + num_images_per_prompt, dim=0 + ) + return image_enc_hidden_states, uncond_image_enc_hidden_states + else: + image_embeds = self.image_encoder(image).image_embeds + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_embeds = torch.zeros_like(image_embeds) + + return image_embeds, uncond_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds + def prepare_ip_adapter_image_embeds( + self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance + ): + image_embeds = [] + if do_classifier_free_guidance: + negative_image_embeds = [] + if ip_adapter_image_embeds is None: + if not isinstance(ip_adapter_image, list): + ip_adapter_image = [ip_adapter_image] + + if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): + raise ValueError( + f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." + ) + + for single_ip_adapter_image, image_proj_layer in zip( + ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers + ): + output_hidden_state = not isinstance(image_proj_layer, ImageProjection) + single_image_embeds, single_negative_image_embeds = self.encode_image( + single_ip_adapter_image, device, 1, output_hidden_state + ) + + image_embeds.append(single_image_embeds[None, :]) + if do_classifier_free_guidance: + negative_image_embeds.append(single_negative_image_embeds[None, :]) + else: + for single_image_embeds in ip_adapter_image_embeds: + if do_classifier_free_guidance: + single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2) + negative_image_embeds.append(single_negative_image_embeds) + image_embeds.append(single_image_embeds) + + ip_adapter_image_embeds = [] + for i, single_image_embeds in enumerate(image_embeds): + single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) + if do_classifier_free_guidance: + single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) + + single_image_embeds = single_image_embeds.to(device=device) + ip_adapter_image_embeds.append(single_image_embeds) + + return ip_adapter_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + prompt_2, + image, + callback_steps, + negative_prompt=None, + negative_prompt_2=None, + prompt_embeds=None, + negative_prompt_embeds=None, + pooled_prompt_embeds=None, + ip_adapter_image=None, + ip_adapter_image_embeds=None, + negative_pooled_prompt_embeds=None, + controlnet_conditioning_scale=1.0, + control_guidance_start=0.0, + control_guidance_end=1.0, + callback_on_step_end_tensor_inputs=None, + ): + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt_2 is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)): + raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + elif negative_prompt_2 is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if prompt_embeds is not None and pooled_prompt_embeds is None: + raise ValueError( + "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`." + ) + + if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: + raise ValueError( + "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`." + ) + + # `prompt` needs more sophisticated handling when there are multiple + # conditionings. + if isinstance(self.controlnet, MultiControlNetModel): + if isinstance(prompt, list): + logger.warning( + f"You have {len(self.controlnet.nets)} ControlNets and you have passed {len(prompt)}" + " prompts. The conditionings will be fixed across the prompts." + ) + + # Check `image` + is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance( + self.controlnet, torch._dynamo.eval_frame.OptimizedModule + ) + + + if ( + isinstance(self.controlnet, ControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, ControlNetModel) + # and isinstance(ControlNetModel._orig_mod, ControlNetModel) + ): + self.check_image(image, prompt, prompt_embeds) + + elif ( + isinstance(self.controlnet, MultiControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, MultiControlNetModel) + ): + if not isinstance(image, list): + raise TypeError("For multiple controlnets: `image` must be type `list`") + + # When `image` is a nested list: + # (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]]) + elif any(isinstance(i, list) for i in image): + raise ValueError("A single batch of multiple conditionings are supported at the moment.") + elif len(image) != len(self.controlnet.nets): + raise ValueError( + f"For multiple controlnets: `image` must have the same length as the number of controlnets, but got {len(image)} images and {len(self.controlnet.nets)} ControlNets." + ) + + for image_ in image: + self.check_image(image_, prompt, prompt_embeds) + else: + + assert False + + # Check `controlnet_conditioning_scale` + if ( + isinstance(self.controlnet, ControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, ControlNetModel) + ): + if not isinstance(controlnet_conditioning_scale, float): + raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.") + elif ( + isinstance(self.controlnet, MultiControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, MultiControlNetModel) + ): + if isinstance(controlnet_conditioning_scale, list): + if any(isinstance(i, list) for i in controlnet_conditioning_scale): + raise ValueError("A single batch of multiple conditionings are supported at the moment.") + elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len( + self.controlnet.nets + ): + raise ValueError( + "For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have" + " the same length as the number of controlnets" + ) + else: + assert False + + if not isinstance(control_guidance_start, (tuple, list)): + control_guidance_start = [control_guidance_start] + + if not isinstance(control_guidance_end, (tuple, list)): + control_guidance_end = [control_guidance_end] + + if len(control_guidance_start) != len(control_guidance_end): + raise ValueError( + f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list." + ) + + if isinstance(self.controlnet, MultiControlNetModel): + if len(control_guidance_start) != len(self.controlnet.nets): + raise ValueError( + f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}." + ) + + for start, end in zip(control_guidance_start, control_guidance_end): + if start >= end: + raise ValueError( + f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}." + ) + if start < 0.0: + raise ValueError(f"control guidance start: {start} can't be smaller than 0.") + if end > 1.0: + raise ValueError(f"control guidance end: {end} can't be larger than 1.0.") + + if ip_adapter_image is not None and ip_adapter_image_embeds is not None: + raise ValueError( + "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined." + ) + + if ip_adapter_image_embeds is not None: + if not isinstance(ip_adapter_image_embeds, list): + raise ValueError( + f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}" + ) + elif ip_adapter_image_embeds[0].ndim not in [3, 4]: + raise ValueError( + f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D" + ) + + # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.check_image + def check_image(self, image, prompt, prompt_embeds): + image_is_pil = isinstance(image, PIL.Image.Image) + image_is_tensor = isinstance(image, torch.Tensor) + image_is_np = isinstance(image, np.ndarray) + image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image) + image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor) + image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray) + + if ( + not image_is_pil + and not image_is_tensor + and not image_is_np + and not image_is_pil_list + and not image_is_tensor_list + and not image_is_np_list + ): + raise TypeError( + f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}" + ) + + if image_is_pil: + image_batch_size = 1 + else: + image_batch_size = len(image) + + if prompt is not None and isinstance(prompt, str): + prompt_batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + prompt_batch_size = len(prompt) + elif prompt_embeds is not None: + prompt_batch_size = prompt_embeds.shape[0] + + if image_batch_size != 1 and image_batch_size != prompt_batch_size: + raise ValueError( + f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}" + ) + + # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.prepare_image + + # depth map + def prepare_image( + self, + image, + width, + height, + batch_size, + num_images_per_prompt, + device, + dtype, + do_classifier_free_guidance=False, + guess_mode=False, + ): + image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32) + image_batch_size = image.shape[0] + + if image_batch_size == 1: + repeat_by = batch_size + else: + # image batch size is the same as prompt batch size + repeat_by = num_images_per_prompt + + image = image.repeat_interleave(repeat_by, dim=0) + + image = image.to(device=device, dtype=dtype) + + if do_classifier_free_guidance and not guess_mode: + image = torch.cat([image] * 2) + + return image + + + def prepare_latents( + self, + image, + timestep, + batch_size, + num_images_per_prompt, + dtype, + device, + generator=None, + ): + if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): + raise ValueError( + f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" + ) + + image = image.to(device=device, dtype=dtype) + + batch_size = batch_size * num_images_per_prompt + + if image.shape[1] == 4: + init_latents = image + + else: + if isinstance(generator, list) and len(generator) != batch_size: + + + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + elif isinstance(generator, list): + + init_latents = [ + self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) + for i in range(batch_size) + ] + init_latents = torch.cat(init_latents, dim=0) + else: + + init_latents = self.vae.encode(image).latent_dist.sample(generator) + + init_latents = self.vae.config.scaling_factor * init_latents + + if ( + batch_size > init_latents.shape[0] + and batch_size % init_latents.shape[0] == 0 + ): + # expand init_latents for batch_size + deprecation_message = ( + f"You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial" + " images (`image`). Initial images are now duplicating to match the number of text prompts. Note" + " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update" + " your script to pass as many initial images as text prompts to suppress this warning." + ) + deprecate( + "len(prompt) != len(image)", + "1.0.0", + deprecation_message, + standard_warn=False, + ) + additional_image_per_prompt = batch_size // init_latents.shape[0] + init_latents = torch.cat( + [init_latents] * additional_image_per_prompt, dim=0 + ) + elif ( + batch_size > init_latents.shape[0] + and batch_size % init_latents.shape[0] != 0 + ): + raise ValueError( + f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts." + ) + else: + init_latents = torch.cat([init_latents], dim=0) + + shape = init_latents.shape + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + + # get latents + + latents = self.scheduler.add_noise(init_latents, noise, timestep) + + return init_latents, noise + + + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents_origin(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + + shape = ( + batch_size, + num_channels_latents, + int(height) // self.vae_scale_factor, + int(width) // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + print("latents is None") + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + return latents + + + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline._get_add_time_ids + def _get_add_time_ids( + self, original_size, crops_coords_top_left, target_size, dtype, text_encoder_projection_dim=None + ): + add_time_ids = list(original_size + crops_coords_top_left + target_size) + + passed_add_embed_dim = ( + self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim + ) + expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features + + if expected_add_embed_dim != passed_add_embed_dim: + raise ValueError( + f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`." + ) + + add_time_ids = torch.tensor([add_time_ids], dtype=dtype) + return add_time_ids + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae + def upcast_vae(self): + dtype = self.vae.dtype + self.vae.to(dtype=torch.float32) + use_torch_2_0_or_xformers = isinstance( + self.vae.decoder.mid_block.attentions[0].processor, + ( + AttnProcessor2_0, + XFormersAttnProcessor, + ), + ) + # if xformers or torch_2_0 is used attention block does not need + # to be in float32 which can save lots of memory + if use_torch_2_0_or_xformers: + self.vae.post_quant_conv.to(dtype) + self.vae.decoder.conv_in.to(dtype) + self.vae.decoder.mid_block.to(dtype) + + # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding + def get_guidance_scale_embedding( + self, w: torch.Tensor, embedding_dim: int = 512, dtype: torch.dtype = torch.float32 + ) -> torch.Tensor: + """ + See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 + + Args: + w (`torch.Tensor`): + Generate embedding vectors with a specified guidance scale to subsequently enrich timestep embeddings. + embedding_dim (`int`, *optional*, defaults to 512): + Dimension of the embeddings to generate. + dtype (`torch.dtype`, *optional*, defaults to `torch.float32`): + Data type of the generated embeddings. + + Returns: + `torch.Tensor`: Embedding vectors with shape `(len(w), embedding_dim)`. + """ + assert len(w.shape) == 1 + w = w * 1000.0 + + half_dim = embedding_dim // 2 + emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) + emb = w.to(dtype)[:, None] * emb[None, :] + emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) + if embedding_dim % 2 == 1: # zero pad + emb = torch.nn.functional.pad(emb, (0, 1)) + assert emb.shape == (w.shape[0], embedding_dim) + return emb + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def clip_skip(self): + return self._clip_skip + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None + + @property + def cross_attention_kwargs(self): + return self._cross_attention_kwargs + + @property + def denoising_end(self): + return self._denoising_end + + @property + def num_timesteps(self): + return self._num_timesteps + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + prompt_2: Optional[Union[str, List[str]]] = None, + image: PipelineImageInput = None, + mask_image: Union[torch.FloatTensor, PIL.Image.Image] = None, + sketch_image: Union[torch.FloatTensor, PIL.Image.Image] = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + timesteps: List[int] = None, + sigmas: List[float] = None, + denoising_end: Optional[float] = None, + guidance_scale: float = 5.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + negative_prompt_2: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + pooled_prompt_embeds: Optional[torch.Tensor] = None, + negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + controlnet_conditioning_scale: Union[float, List[float]] = 1.0, + guess_mode: bool = False, + control_guidance_start: Union[float, List[float]] = 0.0, + control_guidance_end: Union[float, List[float]] = 1.0, + original_size: Tuple[int, int] = None, + crops_coords_top_left: Tuple[int, int] = (0, 0), + target_size: Tuple[int, int] = None, + negative_original_size: Optional[Tuple[int, int]] = None, + negative_crops_coords_top_left: Tuple[int, int] = (0, 0), + negative_target_size: Optional[Tuple[int, int]] = None, + clip_skip: Optional[int] = None, + callback_on_step_end: Optional[ + Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks] + ] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + **kwargs, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in both text-encoders. + image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,: + `List[List[torch.Tensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`): + The ControlNet input condition to provide guidance to the `unet` for generation. If the type is + specified as `torch.Tensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also be accepted + as an image. The dimensions of the output image defaults to `image`'s dimensions. If height and/or + width are passed, `image` is resized accordingly. If multiple ControlNets are specified in `init`, + images must be passed as a list such that each element of the list can be correctly batched for input + to a single ControlNet. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. Anything below 512 pixels won't work well for + [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) + and checkpoints that are not specifically fine-tuned on low resolutions. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. Anything below 512 pixels won't work well for + [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) + and checkpoints that are not specifically fine-tuned on low resolutions. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument + in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is + passed will be used. Must be in descending order. + sigmas (`List[float]`, *optional*): + Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in + their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed + will be used. + denoising_end (`float`, *optional*): + When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be + completed before it is intentionally prematurely terminated. As a result, the returned sample will + still retain a substantial amount of noise as determined by the discrete timesteps selected by the + scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a + "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image + Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output) + guidance_scale (`float`, *optional*, defaults to 5.0): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. This is sent to `tokenizer_2` + and `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders. + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, pooled text embeddings are generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs (prompt + weighting). If not provided, pooled `negative_prompt_embeds` are generated from `negative_prompt` input + argument. + ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. + ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*): + Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of + IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should + contain the negative image embedding if `do_classifier_free_guidance` is set to `True`. If not + provided, embeddings are computed from the `ip_adapter_image` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0): + The outputs of the ControlNet are multiplied by `controlnet_conditioning_scale` before they are added + to the residual in the original `unet`. If multiple ControlNets are specified in `init`, you can set + the corresponding scale as a list. + guess_mode (`bool`, *optional*, defaults to `False`): + The ControlNet encoder tries to recognize the content of the input image even if you remove all + prompts. A `guidance_scale` value between 3.0 and 5.0 is recommended. + control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0): + The percentage of total steps at which the ControlNet starts applying. + control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0): + The percentage of total steps at which the ControlNet stops applying. + original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled. + `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as + explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position + `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting + `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + For most cases, `target_size` should be set to the desired height and width of the generated image. If + not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in + section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + To negatively condition the generation process based on a specific image resolution. Part of SDXL's + micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's + micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + To negatively condition the generation process based on a target image resolution. It should be as same + as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*): + A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of + each denoising step during the inference. with the following arguments: `callback_on_step_end(self: + DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a + list of all tensors as specified by `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + + Examples: + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned containing the output images. + """ + + callback = kwargs.pop("callback", None) + callback_steps = kwargs.pop("callback_steps", None) + + if callback is not None: + deprecate( + "callback", + "1.0.0", + "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", + ) + if callback_steps is not None: + deprecate( + "callback_steps", + "1.0.0", + "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", + ) + + if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): + callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs + + controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet + + # align format for control guidance + if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list): + control_guidance_start = len(control_guidance_end) * [control_guidance_start] + elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list): + control_guidance_end = len(control_guidance_start) * [control_guidance_end] + elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list): + mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1 + control_guidance_start, control_guidance_end = ( + mult * [control_guidance_start], + mult * [control_guidance_end], + ) + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + prompt_2, + image, + callback_steps, + negative_prompt, + negative_prompt_2, + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + ip_adapter_image, + ip_adapter_image_embeds, + negative_pooled_prompt_embeds, + controlnet_conditioning_scale, + control_guidance_start, + control_guidance_end, + callback_on_step_end_tensor_inputs, + ) + + + + + self._guidance_scale = guidance_scale + self._clip_skip = clip_skip + self._cross_attention_kwargs = cross_attention_kwargs + self._denoising_end = denoising_end + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float): + controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets) + + global_pool_conditions = ( + controlnet.config.global_pool_conditions + if isinstance(controlnet, ControlNetModel) + else controlnet.nets[0].config.global_pool_conditions + ) + guess_mode = guess_mode or global_pool_conditions + + # 3.1 Encode input prompt + text_encoder_lora_scale = ( + self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None + ) + ( + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + ) = self.encode_prompt( + prompt, + prompt_2, + device, + num_images_per_prompt, + self.do_classifier_free_guidance, + negative_prompt, + negative_prompt_2, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, + lora_scale=text_encoder_lora_scale, + clip_skip=self.clip_skip, + ) + + # 3.2 Encode ip_adapter_image + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + image_embeds = self.prepare_ip_adapter_image_embeds( + ip_adapter_image, + ip_adapter_image_embeds, + device, + batch_size * num_images_per_prompt, + self.do_classifier_free_guidance, + ) + + # 4. Prepare image + if isinstance(controlnet, ControlNetModel): + image = self.prepare_image( + image=image, + width=width, + height=height, + batch_size=batch_size * num_images_per_prompt, + num_images_per_prompt=num_images_per_prompt, + device=device, + dtype=controlnet.dtype, + do_classifier_free_guidance=self.do_classifier_free_guidance, + guess_mode=guess_mode, + ) + height, width = image.shape[-2:] + + + elif isinstance(controlnet, MultiControlNetModel): + images = [] + + for image_ in image: + image_ = self.prepare_image( + image=image_, + width=width, + height=height, + batch_size=batch_size * num_images_per_prompt, + num_images_per_prompt=num_images_per_prompt, + device=device, + dtype=controlnet.dtype, + do_classifier_free_guidance=self.do_classifier_free_guidance, + guess_mode=guess_mode, + ) + + images.append(image_) + + image = images + height, width = image[0].shape[-2:] + else: + assert False + + mask = prepare_mask(mask=mask_image) + + # 5. Prepare timesteps + timesteps, num_inference_steps = retrieve_timesteps( + self.scheduler, num_inference_steps, device, timesteps, sigmas + ) + self._num_timesteps = len(timesteps) + + latent_timestep = timesteps[:1] # type: ignore + latent_timestep = latent_timestep.to(torch.long) + + + # 6. Prepare latent variables + + sketch_image = self.image_processor.preprocess(sketch_image) + + init_latents, noise= self.prepare_latents( + sketch_image, + latent_timestep, + batch_size, + num_images_per_prompt, + prompt_embeds.dtype, + device, + generator, + ) + + num_channels_latents = self.unet.config.in_channels + latents_with_noise = self.prepare_latents_origin( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + + # 6.1. Prepare mask + + height, width = mask.shape[-2:] + mask = torch.nn.functional.interpolate( + mask, size=( + 128, + 128 + ) + ) + mask = torch.nn.functional.interpolate( + mask, size=(height // self.vae_scale_factor, width // self.vae_scale_factor) + ) + + mask = mask.to(device) + mask = mask.to(prompt_embeds.dtype) + + # 6.5 Optionally get Guidance Scale Embedding + timestep_cond = None + if self.unet.config.time_cond_proj_dim is not None: + guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) + timestep_cond = self.get_guidance_scale_embedding( + guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim + ).to(device=device, dtype=latents.dtype) + + # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7.1 Create tensor stating which controlnets to keep + controlnet_keep = [] + for i in range(len(timesteps)): + keeps = [ + 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e) + for s, e in zip(control_guidance_start, control_guidance_end) + ] + controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps) + + # 7.2 Prepare added time ids & embeddings + if isinstance(image, list): + original_size = original_size or image[0].shape[-2:] + else: + original_size = original_size or image.shape[-2:] + target_size = target_size or (height, width) + + add_text_embeds = pooled_prompt_embeds + if self.text_encoder_2 is None: + text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) + else: + text_encoder_projection_dim = self.text_encoder_2.config.projection_dim + + add_time_ids = self._get_add_time_ids( + original_size, + crops_coords_top_left, + target_size, + dtype=prompt_embeds.dtype, + text_encoder_projection_dim=text_encoder_projection_dim, + ) + + if negative_original_size is not None and negative_target_size is not None: + negative_add_time_ids = self._get_add_time_ids( + negative_original_size, + negative_crops_coords_top_left, + negative_target_size, + dtype=prompt_embeds.dtype, + text_encoder_projection_dim=text_encoder_projection_dim, + ) + else: + negative_add_time_ids = add_time_ids + + if self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) + add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) + add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0) + + prompt_embeds = prompt_embeds.to(device) + add_text_embeds = add_text_embeds.to(device) + add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1) + + # 8. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + + # 8.1 Apply denoising_end + if ( + self.denoising_end is not None + and isinstance(self.denoising_end, float) + and self.denoising_end > 0 + and self.denoising_end < 1 + ): + discrete_timestep_cutoff = int( + round( + self.scheduler.config.num_train_timesteps + - (self.denoising_end * self.scheduler.config.num_train_timesteps) + ) + ) + num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps))) + timesteps = timesteps[:num_inference_steps] + + is_unet_compiled = is_compiled_module(self.unet) + is_controlnet_compiled = is_compiled_module(self.controlnet) + is_torch_higher_equal_2_1 = is_torch_version(">=", "2.1") + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # Relevant thread: + # https://dev-discuss.pytorch.org/t/cudagraphs-in-pytorch-2-0/1428 + if (is_unet_compiled and is_controlnet_compiled) and is_torch_higher_equal_2_1: + torch._inductor.cudagraph_mark_step_begin() + + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents_with_noise] * 2) if self.do_classifier_free_guidance else latents_with_noise + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} + + # controlnet(s) inference + if guess_mode and self.do_classifier_free_guidance: + # Infer ControlNet only for the conditional batch. + print("์—ฌ๊ธฐ๋„ ๋ฐ”๊ฟ”์•ผ ํ•˜๋‚˜ ๋ด„") + control_model_input = latents + control_model_input = self.scheduler.scale_model_input(control_model_input, t) + controlnet_prompt_embeds = prompt_embeds.chunk(2)[1] + controlnet_added_cond_kwargs = { + "text_embeds": add_text_embeds.chunk(2)[1], + "time_ids": add_time_ids.chunk(2)[1], + } + else: + control_model_input = latent_model_input + controlnet_prompt_embeds = prompt_embeds + controlnet_added_cond_kwargs = added_cond_kwargs + + if isinstance(controlnet_keep[i], list): + cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])] + else: + controlnet_cond_scale = controlnet_conditioning_scale + if isinstance(controlnet_cond_scale, list): + controlnet_cond_scale = controlnet_cond_scale[0] + cond_scale = controlnet_cond_scale * controlnet_keep[i] + + down_block_res_samples, mid_block_res_sample = self.controlnet( + control_model_input, + t, + encoder_hidden_states=controlnet_prompt_embeds, + controlnet_cond=image, + conditioning_scale=cond_scale, + guess_mode=guess_mode, + added_cond_kwargs=controlnet_added_cond_kwargs, + return_dict=False, + ) + + if guess_mode and self.do_classifier_free_guidance: + # Inferred ControlNet only for the conditional batch. + # To apply the output of ControlNet to both the unconditional and conditional batches, + # add 0 to the unconditional batch to keep it unchanged. + down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples] + mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample]) + + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + added_cond_kwargs["image_embeds"] = image_embeds + + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + timestep_cond=timestep_cond, + cross_attention_kwargs=self.cross_attention_kwargs, + down_block_additional_residuals=down_block_res_samples, + mid_block_additional_residual=mid_block_res_sample, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + + latents_with_noise = self.scheduler.step( + noise_pred, + t, + latents_with_noise, + **extra_step_kwargs, + return_dict=False, + )[0] + + + # masking process + tmp = t.unsqueeze(0) + init_latents_proper = self.scheduler.add_noise( + init_latents, noise, tmp + ).to(device) + + mask = (mask > 0.5).to(prompt_embeds.dtype) + latents_with_noise = ( + mask * latents_with_noise + (1 - mask) * init_latents_proper + ) + + if callback_on_step_end is not None: + print("์—ฌ๊ธฐ๋„ ๋ฐ”๊ฟ”์•ผ ํ•˜๋‚˜ ๋ด„ 2") + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + add_text_embeds = callback_outputs.pop("add_text_embeds", add_text_embeds) + negative_pooled_prompt_embeds = callback_outputs.pop( + "negative_pooled_prompt_embeds", negative_pooled_prompt_embeds + ) + add_time_ids = callback_outputs.pop("add_time_ids", add_time_ids) + negative_add_time_ids = callback_outputs.pop("negative_add_time_ids", negative_add_time_ids) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents_with_noise) + + if not output_type == "latent": + # make sure the VAE is in float32 mode, as it overflows in float16 + needs_upcasting = self.vae.dtype == torch.float32 and self.vae.config.force_upcast + + if needs_upcasting: + self.upcast_vae() + latents_with_noise = latents_with_noise.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) + + # unscale/denormalize the latents + # denormalize with the mean and std if available and not None + has_latents_mean = hasattr(self.vae.config, "latents_mean") and self.vae.config.latents_mean is not None + has_latents_std = hasattr(self.vae.config, "latents_std") and self.vae.config.latents_std is not None + if has_latents_mean and has_latents_std: + print("์—ฌ๊ธฐ ๋ฐ”๊พธ๊ธด ํ–ˆ๋Š”๋ฐ...") + latents_mean = ( + torch.tensor(self.vae.config.latents_mean).view(1, 4, 1, 1).to(latents_with_noise.device, latents_with_noise.dtype) + ) + latents_std = ( + torch.tensor(self.vae.config.latents_std).view(1, 4, 1, 1).to(latents_with_noise.device, latents_with_noise.dtype) + ) + latents_with_noise = latents_with_noise * latents_std / self.vae.config.scaling_factor + latents_mean + else: + latents_with_noise = latents_with_noise / self.vae.config.scaling_factor + + image = self.vae.decode(latents_with_noise, return_dict=False)[0] + + # cast back to fp16 if needed + if needs_upcasting: + self.vae.to(dtype=torch.float32) + else: + image = latents_with_noise + + if not output_type == "latent": + # apply watermark if available + if self.watermark is not None: + image = self.watermark.apply_watermark(image) + + image = self.image_processor.postprocess(image, output_type=output_type) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return StableDiffusionXLPipelineOutput(images=image) diff --git a/diffusers3/pipelines/controlnet/pipeline_controlnet_sd_xl_img2img.py b/diffusers3/pipelines/controlnet/pipeline_controlnet_sd_xl_img2img.py new file mode 100644 index 0000000000000000000000000000000000000000..af19f3c309f87e446e772806eccbbc23fc2fa93b --- /dev/null +++ b/diffusers3/pipelines/controlnet/pipeline_controlnet_sd_xl_img2img.py @@ -0,0 +1,1656 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import inspect +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import numpy as np +import PIL.Image +import torch +import torch.nn.functional as F +from transformers import ( + CLIPImageProcessor, + CLIPTextModel, + CLIPTextModelWithProjection, + CLIPTokenizer, + CLIPVisionModelWithProjection, +) + +from diffusers.utils.import_utils import is_invisible_watermark_available + +from ...callbacks import MultiPipelineCallbacks, PipelineCallback +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import ( + FromSingleFileMixin, + IPAdapterMixin, + StableDiffusionXLLoraLoaderMixin, + TextualInversionLoaderMixin, +) +from ...models import AutoencoderKL, ControlNetModel, ImageProjection, UNet2DConditionModel +from ...models.attention_processor import ( + AttnProcessor2_0, + XFormersAttnProcessor, +) +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import ( + USE_PEFT_BACKEND, + deprecate, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import is_compiled_module, randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from ..stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput + + +if is_invisible_watermark_available(): + from ..stable_diffusion_xl.watermark import StableDiffusionXLWatermarker + +from .multicontrolnet import MultiControlNetModel + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> # pip install accelerate transformers safetensors diffusers + + >>> import torch + >>> import numpy as np + >>> from PIL import Image + + >>> from transformers import DPTImageProcessor, DPTForDepthEstimation + >>> from diffusers import ControlNetModel, StableDiffusionXLControlNetImg2ImgPipeline, AutoencoderKL + >>> from diffusers.utils import load_image + + + >>> depth_estimator = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas").to("cuda") + >>> feature_extractor = DPTImageProcessor.from_pretrained("Intel/dpt-hybrid-midas") + >>> controlnet = ControlNetModel.from_pretrained( + ... "diffusers/controlnet-depth-sdxl-1.0-small", + ... variant="fp16", + ... use_safetensors=True, + ... torch_dtype=torch.float16, + ... ) + >>> vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16) + >>> pipe = StableDiffusionXLControlNetImg2ImgPipeline.from_pretrained( + ... "stabilityai/stable-diffusion-xl-base-1.0", + ... controlnet=controlnet, + ... vae=vae, + ... variant="fp16", + ... use_safetensors=True, + ... torch_dtype=torch.float16, + ... ) + >>> pipe.enable_model_cpu_offload() + + + >>> def get_depth_map(image): + ... image = feature_extractor(images=image, return_tensors="pt").pixel_values.to("cuda") + ... with torch.no_grad(), torch.autocast("cuda"): + ... depth_map = depth_estimator(image).predicted_depth + + ... depth_map = torch.nn.functional.interpolate( + ... depth_map.unsqueeze(1), + ... size=(1024, 1024), + ... mode="bicubic", + ... align_corners=False, + ... ) + ... depth_min = torch.amin(depth_map, dim=[1, 2, 3], keepdim=True) + ... depth_max = torch.amax(depth_map, dim=[1, 2, 3], keepdim=True) + ... depth_map = (depth_map - depth_min) / (depth_max - depth_min) + ... image = torch.cat([depth_map] * 3, dim=1) + ... image = image.permute(0, 2, 3, 1).cpu().numpy()[0] + ... image = Image.fromarray((image * 255.0).clip(0, 255).astype(np.uint8)) + ... return image + + + >>> prompt = "A robot, 4k photo" + >>> image = load_image( + ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" + ... "/kandinsky/cat.png" + ... ).resize((1024, 1024)) + >>> controlnet_conditioning_scale = 0.5 # recommended for good generalization + >>> depth_image = get_depth_map(image) + + >>> images = pipe( + ... prompt, + ... image=image, + ... control_image=depth_image, + ... strength=0.99, + ... num_inference_steps=50, + ... controlnet_conditioning_scale=controlnet_conditioning_scale, + ... ).images + >>> images[0].save(f"robot_cat.png") + ``` +""" + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents +def retrieve_latents( + encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" +): + if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": + return encoder_output.latent_dist.sample(generator) + elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": + return encoder_output.latent_dist.mode() + elif hasattr(encoder_output, "latents"): + return encoder_output.latents + else: + raise AttributeError("Could not access latents of provided encoder_output") + + +class StableDiffusionXLControlNetImg2ImgPipeline( + DiffusionPipeline, + StableDiffusionMixin, + TextualInversionLoaderMixin, + StableDiffusionXLLoraLoaderMixin, + FromSingleFileMixin, + IPAdapterMixin, +): + r""" + Pipeline for image-to-image generation using Stable Diffusion XL with ControlNet guidance. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`] for saving LoRA weights + - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModel`]): + Frozen text-encoder. Stable Diffusion uses the text portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically + the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. + text_encoder_2 ([` CLIPTextModelWithProjection`]): + Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection), + specifically the + [laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k) + variant. + tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + tokenizer_2 (`CLIPTokenizer`): + Second Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. + controlnet ([`ControlNetModel`] or `List[ControlNetModel]`): + Provides additional conditioning to the unet during the denoising process. If you set multiple ControlNets + as a list, the outputs from each ControlNet are added together to create one combined additional + conditioning. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + requires_aesthetics_score (`bool`, *optional*, defaults to `"False"`): + Whether the `unet` requires an `aesthetic_score` condition to be passed during inference. Also see the + config of `stabilityai/stable-diffusion-xl-refiner-1-0`. + force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`): + Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of + `stabilityai/stable-diffusion-xl-base-1-0`. + add_watermarker (`bool`, *optional*): + Whether to use the [invisible_watermark library](https://github.com/ShieldMnt/invisible-watermark/) to + watermark output images. If not defined, it will default to True if the package is installed, otherwise no + watermarker will be used. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. + """ + + model_cpu_offload_seq = "text_encoder->text_encoder_2->image_encoder->unet->vae" + _optional_components = [ + "tokenizer", + "tokenizer_2", + "text_encoder", + "text_encoder_2", + "feature_extractor", + "image_encoder", + ] + _callback_tensor_inputs = [ + "latents", + "prompt_embeds", + "negative_prompt_embeds", + "add_text_embeds", + "add_time_ids", + "negative_pooled_prompt_embeds", + "add_neg_time_ids", + ] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + text_encoder_2: CLIPTextModelWithProjection, + tokenizer: CLIPTokenizer, + tokenizer_2: CLIPTokenizer, + unet: UNet2DConditionModel, + controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel], + scheduler: KarrasDiffusionSchedulers, + requires_aesthetics_score: bool = False, + force_zeros_for_empty_prompt: bool = True, + add_watermarker: Optional[bool] = None, + feature_extractor: CLIPImageProcessor = None, + image_encoder: CLIPVisionModelWithProjection = None, + ): + super().__init__() + + if isinstance(controlnet, (list, tuple)): + controlnet = MultiControlNetModel(controlnet) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + text_encoder_2=text_encoder_2, + tokenizer=tokenizer, + tokenizer_2=tokenizer_2, + unet=unet, + controlnet=controlnet, + scheduler=scheduler, + feature_extractor=feature_extractor, + image_encoder=image_encoder, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True) + self.control_image_processor = VaeImageProcessor( + vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False + ) + add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available() + + if add_watermarker: + self.watermark = StableDiffusionXLWatermarker() + else: + self.watermark = None + + self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) + self.register_to_config(requires_aesthetics_score=requires_aesthetics_score) + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.encode_prompt + def encode_prompt( + self, + prompt: str, + prompt_2: Optional[str] = None, + device: Optional[torch.device] = None, + num_images_per_prompt: int = 1, + do_classifier_free_guidance: bool = True, + negative_prompt: Optional[str] = None, + negative_prompt_2: Optional[str] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + pooled_prompt_embeds: Optional[torch.Tensor] = None, + negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in both text-encoders + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + lora_scale (`float`, *optional*): + A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + device = device or self._execution_device + + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if self.text_encoder is not None: + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale) + else: + scale_lora_layers(self.text_encoder_2, lora_scale) + + prompt = [prompt] if isinstance(prompt, str) else prompt + + if prompt is not None: + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + # Define tokenizers and text encoders + tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] + text_encoders = ( + [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] + ) + + if prompt_embeds is None: + prompt_2 = prompt_2 or prompt + prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 + + # textual inversion: process multi-vector tokens if necessary + prompt_embeds_list = [] + prompts = [prompt, prompt_2] + for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders): + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, tokenizer) + + text_inputs = tokenizer( + prompt, + padding="max_length", + max_length=tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + + text_input_ids = text_inputs.input_ids + untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {tokenizer.model_max_length} tokens: {removed_text}" + ) + + prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) + + # We are only ALWAYS interested in the pooled output of the final text encoder + pooled_prompt_embeds = prompt_embeds[0] + if clip_skip is None: + prompt_embeds = prompt_embeds.hidden_states[-2] + else: + # "2" because SDXL always indexes from the penultimate layer. + prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)] + + prompt_embeds_list.append(prompt_embeds) + + prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) + + # get unconditional embeddings for classifier free guidance + zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt + if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: + negative_prompt_embeds = torch.zeros_like(prompt_embeds) + negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) + elif do_classifier_free_guidance and negative_prompt_embeds is None: + negative_prompt = negative_prompt or "" + negative_prompt_2 = negative_prompt_2 or negative_prompt + + # normalize str to list + negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt + negative_prompt_2 = ( + batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 + ) + + uncond_tokens: List[str] + if prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = [negative_prompt, negative_prompt_2] + + negative_prompt_embeds_list = [] + for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders): + if isinstance(self, TextualInversionLoaderMixin): + negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = tokenizer( + negative_prompt, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + negative_prompt_embeds = text_encoder( + uncond_input.input_ids.to(device), + output_hidden_states=True, + ) + # We are only ALWAYS interested in the pooled output of the final text encoder + negative_pooled_prompt_embeds = negative_prompt_embeds[0] + negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] + + negative_prompt_embeds_list.append(negative_prompt_embeds) + + negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) + + if self.text_encoder_2 is not None: + prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) + else: + prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + if self.text_encoder_2 is not None: + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) + else: + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed * num_images_per_prompt, -1 + ) + if do_classifier_free_guidance: + negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed * num_images_per_prompt, -1 + ) + + if self.text_encoder is not None: + if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder_2, lora_scale) + + return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image + def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + if output_hidden_states: + image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] + image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_enc_hidden_states = self.image_encoder( + torch.zeros_like(image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( + num_images_per_prompt, dim=0 + ) + return image_enc_hidden_states, uncond_image_enc_hidden_states + else: + image_embeds = self.image_encoder(image).image_embeds + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_embeds = torch.zeros_like(image_embeds) + + return image_embeds, uncond_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds + def prepare_ip_adapter_image_embeds( + self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance + ): + image_embeds = [] + if do_classifier_free_guidance: + negative_image_embeds = [] + if ip_adapter_image_embeds is None: + if not isinstance(ip_adapter_image, list): + ip_adapter_image = [ip_adapter_image] + + if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): + raise ValueError( + f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." + ) + + for single_ip_adapter_image, image_proj_layer in zip( + ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers + ): + output_hidden_state = not isinstance(image_proj_layer, ImageProjection) + single_image_embeds, single_negative_image_embeds = self.encode_image( + single_ip_adapter_image, device, 1, output_hidden_state + ) + + image_embeds.append(single_image_embeds[None, :]) + if do_classifier_free_guidance: + negative_image_embeds.append(single_negative_image_embeds[None, :]) + else: + for single_image_embeds in ip_adapter_image_embeds: + if do_classifier_free_guidance: + single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2) + negative_image_embeds.append(single_negative_image_embeds) + image_embeds.append(single_image_embeds) + + ip_adapter_image_embeds = [] + for i, single_image_embeds in enumerate(image_embeds): + single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) + if do_classifier_free_guidance: + single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) + + single_image_embeds = single_image_embeds.to(device=device) + ip_adapter_image_embeds.append(single_image_embeds) + + return ip_adapter_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + prompt_2, + image, + strength, + num_inference_steps, + callback_steps, + negative_prompt=None, + negative_prompt_2=None, + prompt_embeds=None, + negative_prompt_embeds=None, + pooled_prompt_embeds=None, + negative_pooled_prompt_embeds=None, + ip_adapter_image=None, + ip_adapter_image_embeds=None, + controlnet_conditioning_scale=1.0, + control_guidance_start=0.0, + control_guidance_end=1.0, + callback_on_step_end_tensor_inputs=None, + ): + if strength < 0 or strength > 1: + raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}") + if num_inference_steps is None: + raise ValueError("`num_inference_steps` cannot be None.") + elif not isinstance(num_inference_steps, int) or num_inference_steps <= 0: + raise ValueError( + f"`num_inference_steps` has to be a positive integer but is {num_inference_steps} of type" + f" {type(num_inference_steps)}." + ) + + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt_2 is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)): + raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + elif negative_prompt_2 is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if prompt_embeds is not None and pooled_prompt_embeds is None: + raise ValueError( + "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`." + ) + + if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: + raise ValueError( + "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`." + ) + + # `prompt` needs more sophisticated handling when there are multiple + # conditionings. + if isinstance(self.controlnet, MultiControlNetModel): + if isinstance(prompt, list): + logger.warning( + f"You have {len(self.controlnet.nets)} ControlNets and you have passed {len(prompt)}" + " prompts. The conditionings will be fixed across the prompts." + ) + + # Check `image` + is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance( + self.controlnet, torch._dynamo.eval_frame.OptimizedModule + ) + if ( + isinstance(self.controlnet, ControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, ControlNetModel) + ): + self.check_image(image, prompt, prompt_embeds) + elif ( + isinstance(self.controlnet, MultiControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, MultiControlNetModel) + ): + if not isinstance(image, list): + raise TypeError("For multiple controlnets: `image` must be type `list`") + + # When `image` is a nested list: + # (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]]) + elif any(isinstance(i, list) for i in image): + raise ValueError("A single batch of multiple conditionings are supported at the moment.") + elif len(image) != len(self.controlnet.nets): + raise ValueError( + f"For multiple controlnets: `image` must have the same length as the number of controlnets, but got {len(image)} images and {len(self.controlnet.nets)} ControlNets." + ) + + for image_ in image: + self.check_image(image_, prompt, prompt_embeds) + else: + assert False + + # Check `controlnet_conditioning_scale` + if ( + isinstance(self.controlnet, ControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, ControlNetModel) + ): + if not isinstance(controlnet_conditioning_scale, float): + raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.") + elif ( + isinstance(self.controlnet, MultiControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, MultiControlNetModel) + ): + if isinstance(controlnet_conditioning_scale, list): + if any(isinstance(i, list) for i in controlnet_conditioning_scale): + raise ValueError("A single batch of multiple conditionings are supported at the moment.") + elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len( + self.controlnet.nets + ): + raise ValueError( + "For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have" + " the same length as the number of controlnets" + ) + else: + assert False + + if not isinstance(control_guidance_start, (tuple, list)): + control_guidance_start = [control_guidance_start] + + if not isinstance(control_guidance_end, (tuple, list)): + control_guidance_end = [control_guidance_end] + + if len(control_guidance_start) != len(control_guidance_end): + raise ValueError( + f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list." + ) + + if isinstance(self.controlnet, MultiControlNetModel): + if len(control_guidance_start) != len(self.controlnet.nets): + raise ValueError( + f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}." + ) + + for start, end in zip(control_guidance_start, control_guidance_end): + if start >= end: + raise ValueError( + f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}." + ) + if start < 0.0: + raise ValueError(f"control guidance start: {start} can't be smaller than 0.") + if end > 1.0: + raise ValueError(f"control guidance end: {end} can't be larger than 1.0.") + + if ip_adapter_image is not None and ip_adapter_image_embeds is not None: + raise ValueError( + "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined." + ) + + if ip_adapter_image_embeds is not None: + if not isinstance(ip_adapter_image_embeds, list): + raise ValueError( + f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}" + ) + elif ip_adapter_image_embeds[0].ndim not in [3, 4]: + raise ValueError( + f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D" + ) + + # Copied from diffusers.pipelines.controlnet.pipeline_controlnet_sd_xl.StableDiffusionXLControlNetPipeline.check_image + def check_image(self, image, prompt, prompt_embeds): + image_is_pil = isinstance(image, PIL.Image.Image) + image_is_tensor = isinstance(image, torch.Tensor) + image_is_np = isinstance(image, np.ndarray) + image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image) + image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor) + image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray) + + if ( + not image_is_pil + and not image_is_tensor + and not image_is_np + and not image_is_pil_list + and not image_is_tensor_list + and not image_is_np_list + ): + raise TypeError( + f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}" + ) + + if image_is_pil: + image_batch_size = 1 + else: + image_batch_size = len(image) + + if prompt is not None and isinstance(prompt, str): + prompt_batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + prompt_batch_size = len(prompt) + elif prompt_embeds is not None: + prompt_batch_size = prompt_embeds.shape[0] + + if image_batch_size != 1 and image_batch_size != prompt_batch_size: + raise ValueError( + f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}" + ) + + # Copied from diffusers.pipelines.controlnet.pipeline_controlnet_sd_xl.StableDiffusionXLControlNetPipeline.prepare_image + def prepare_control_image( + self, + image, + width, + height, + batch_size, + num_images_per_prompt, + device, + dtype, + do_classifier_free_guidance=False, + guess_mode=False, + ): + image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32) + image_batch_size = image.shape[0] + + if image_batch_size == 1: + repeat_by = batch_size + else: + # image batch size is the same as prompt batch size + repeat_by = num_images_per_prompt + + image = image.repeat_interleave(repeat_by, dim=0) + + image = image.to(device=device, dtype=dtype) + + if do_classifier_free_guidance and not guess_mode: + image = torch.cat([image] * 2) + + return image + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps + def get_timesteps(self, num_inference_steps, strength, device): + # get the original timestep using init_timestep + init_timestep = min(int(num_inference_steps * strength), num_inference_steps) + + t_start = max(num_inference_steps - init_timestep, 0) + timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] + if hasattr(self.scheduler, "set_begin_index"): + self.scheduler.set_begin_index(t_start * self.scheduler.order) + + return timesteps, num_inference_steps - t_start + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_img2img.StableDiffusionXLImg2ImgPipeline.prepare_latents + def prepare_latents( + self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None, add_noise=True + ): + if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): + raise ValueError( + f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" + ) + + latents_mean = latents_std = None + if hasattr(self.vae.config, "latents_mean") and self.vae.config.latents_mean is not None: + latents_mean = torch.tensor(self.vae.config.latents_mean).view(1, 4, 1, 1) + if hasattr(self.vae.config, "latents_std") and self.vae.config.latents_std is not None: + latents_std = torch.tensor(self.vae.config.latents_std).view(1, 4, 1, 1) + + # Offload text encoder if `enable_model_cpu_offload` was enabled + if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: + self.text_encoder_2.to("cpu") + torch.cuda.empty_cache() + + image = image.to(device=device, dtype=dtype) + + batch_size = batch_size * num_images_per_prompt + + if image.shape[1] == 4: + init_latents = image + + else: + # make sure the VAE is in float32 mode, as it overflows in float16 + if self.vae.config.force_upcast: + image = image.float() + self.vae.to(dtype=torch.float32) + + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + elif isinstance(generator, list): + if image.shape[0] < batch_size and batch_size % image.shape[0] == 0: + image = torch.cat([image] * (batch_size // image.shape[0]), dim=0) + elif image.shape[0] < batch_size and batch_size % image.shape[0] != 0: + raise ValueError( + f"Cannot duplicate `image` of batch size {image.shape[0]} to effective batch_size {batch_size} " + ) + + init_latents = [ + retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i]) + for i in range(batch_size) + ] + init_latents = torch.cat(init_latents, dim=0) + else: + init_latents = retrieve_latents(self.vae.encode(image), generator=generator) + + if self.vae.config.force_upcast: + self.vae.to(dtype) + + init_latents = init_latents.to(dtype) + if latents_mean is not None and latents_std is not None: + latents_mean = latents_mean.to(device=device, dtype=dtype) + latents_std = latents_std.to(device=device, dtype=dtype) + init_latents = (init_latents - latents_mean) * self.vae.config.scaling_factor / latents_std + else: + init_latents = self.vae.config.scaling_factor * init_latents + + if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0: + # expand init_latents for batch_size + additional_image_per_prompt = batch_size // init_latents.shape[0] + init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0) + elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0: + raise ValueError( + f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts." + ) + else: + init_latents = torch.cat([init_latents], dim=0) + + if add_noise: + shape = init_latents.shape + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + # get latents + init_latents = self.scheduler.add_noise(init_latents, noise, timestep) + + latents = init_latents + + return latents + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_img2img.StableDiffusionXLImg2ImgPipeline._get_add_time_ids + def _get_add_time_ids( + self, + original_size, + crops_coords_top_left, + target_size, + aesthetic_score, + negative_aesthetic_score, + negative_original_size, + negative_crops_coords_top_left, + negative_target_size, + dtype, + text_encoder_projection_dim=None, + ): + if self.config.requires_aesthetics_score: + add_time_ids = list(original_size + crops_coords_top_left + (aesthetic_score,)) + add_neg_time_ids = list( + negative_original_size + negative_crops_coords_top_left + (negative_aesthetic_score,) + ) + else: + add_time_ids = list(original_size + crops_coords_top_left + target_size) + add_neg_time_ids = list(negative_original_size + crops_coords_top_left + negative_target_size) + + passed_add_embed_dim = ( + self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim + ) + expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features + + if ( + expected_add_embed_dim > passed_add_embed_dim + and (expected_add_embed_dim - passed_add_embed_dim) == self.unet.config.addition_time_embed_dim + ): + raise ValueError( + f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to enable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=True)` to make sure `aesthetic_score` {aesthetic_score} and `negative_aesthetic_score` {negative_aesthetic_score} is correctly used by the model." + ) + elif ( + expected_add_embed_dim < passed_add_embed_dim + and (passed_add_embed_dim - expected_add_embed_dim) == self.unet.config.addition_time_embed_dim + ): + raise ValueError( + f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to disable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=False)` to make sure `target_size` {target_size} is correctly used by the model." + ) + elif expected_add_embed_dim != passed_add_embed_dim: + raise ValueError( + f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`." + ) + + add_time_ids = torch.tensor([add_time_ids], dtype=dtype) + add_neg_time_ids = torch.tensor([add_neg_time_ids], dtype=dtype) + + return add_time_ids, add_neg_time_ids + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae + def upcast_vae(self): + dtype = self.vae.dtype + self.vae.to(dtype=torch.float32) + use_torch_2_0_or_xformers = isinstance( + self.vae.decoder.mid_block.attentions[0].processor, + ( + AttnProcessor2_0, + XFormersAttnProcessor, + ), + ) + # if xformers or torch_2_0 is used attention block does not need + # to be in float32 which can save lots of memory + if use_torch_2_0_or_xformers: + self.vae.post_quant_conv.to(dtype) + self.vae.decoder.conv_in.to(dtype) + self.vae.decoder.mid_block.to(dtype) + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def clip_skip(self): + return self._clip_skip + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 + + @property + def cross_attention_kwargs(self): + return self._cross_attention_kwargs + + @property + def num_timesteps(self): + return self._num_timesteps + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + prompt_2: Optional[Union[str, List[str]]] = None, + image: PipelineImageInput = None, + control_image: PipelineImageInput = None, + height: Optional[int] = None, + width: Optional[int] = None, + strength: float = 0.8, + num_inference_steps: int = 50, + guidance_scale: float = 5.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + negative_prompt_2: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + pooled_prompt_embeds: Optional[torch.Tensor] = None, + negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + controlnet_conditioning_scale: Union[float, List[float]] = 0.8, + guess_mode: bool = False, + control_guidance_start: Union[float, List[float]] = 0.0, + control_guidance_end: Union[float, List[float]] = 1.0, + original_size: Tuple[int, int] = None, + crops_coords_top_left: Tuple[int, int] = (0, 0), + target_size: Tuple[int, int] = None, + negative_original_size: Optional[Tuple[int, int]] = None, + negative_crops_coords_top_left: Tuple[int, int] = (0, 0), + negative_target_size: Optional[Tuple[int, int]] = None, + aesthetic_score: float = 6.0, + negative_aesthetic_score: float = 2.5, + clip_skip: Optional[int] = None, + callback_on_step_end: Optional[ + Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks] + ] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + **kwargs, + ): + r""" + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in both text-encoders + image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,: + `List[List[torch.Tensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`): + The initial image will be used as the starting point for the image generation process. Can also accept + image latents as `image`, if passing latents directly, it will not be encoded again. + control_image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,: + `List[List[torch.Tensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`): + The ControlNet input condition. ControlNet uses this input condition to generate guidance to Unet. If + the type is specified as `torch.Tensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also + be accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If height + and/or width are passed, `image` is resized according to them. If multiple ControlNets are specified in + init, images must be passed as a list such that each element of the list can be correctly batched for + input to a single controlnet. + height (`int`, *optional*, defaults to the size of control_image): + The height in pixels of the generated image. Anything below 512 pixels won't work well for + [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) + and checkpoints that are not specifically fine-tuned on low resolutions. + width (`int`, *optional*, defaults to the size of control_image): + The width in pixels of the generated image. Anything below 512 pixels won't work well for + [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) + and checkpoints that are not specifically fine-tuned on low resolutions. + strength (`float`, *optional*, defaults to 0.8): + Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a + starting point and more noise is added the higher the `strength`. The number of denoising steps depends + on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising + process runs for the full number of iterations specified in `num_inference_steps`. A value of 1 + essentially ignores `image`. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to + [`schedulers.DDIMScheduler`], will be ignored for others. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. + ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*): + Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of + IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should + contain the negative image embedding if `do_classifier_free_guidance` is set to `True`. If not + provided, embeddings are computed from the `ip_adapter_image` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0): + The outputs of the controlnet are multiplied by `controlnet_conditioning_scale` before they are added + to the residual in the original unet. If multiple ControlNets are specified in init, you can set the + corresponding scale as a list. + guess_mode (`bool`, *optional*, defaults to `False`): + In this mode, the ControlNet encoder will try best to recognize the content of the input image even if + you remove all prompts. The `guidance_scale` between 3.0 and 5.0 is recommended. + control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0): + The percentage of total steps at which the controlnet starts applying. + control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0): + The percentage of total steps at which the controlnet stops applying. + original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled. + `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as + explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position + `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting + `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + For most cases, `target_size` should be set to the desired height and width of the generated image. If + not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in + section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + To negatively condition the generation process based on a specific image resolution. Part of SDXL's + micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's + micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + To negatively condition the generation process based on a target image resolution. It should be as same + as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + aesthetic_score (`float`, *optional*, defaults to 6.0): + Used to simulate an aesthetic score of the generated image by influencing the positive text condition. + Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + negative_aesthetic_score (`float`, *optional*, defaults to 2.5): + Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). Can be used to + simulate an aesthetic score of the generated image by influencing the negative text condition. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*): + A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of + each denoising step during the inference. with the following arguments: `callback_on_step_end(self: + DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a + list of all tensors as specified by `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + + Examples: + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple` + containing the output images. + """ + + callback = kwargs.pop("callback", None) + callback_steps = kwargs.pop("callback_steps", None) + + if callback is not None: + deprecate( + "callback", + "1.0.0", + "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", + ) + if callback_steps is not None: + deprecate( + "callback_steps", + "1.0.0", + "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", + ) + + if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): + callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs + + controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet + + # align format for control guidance + if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list): + control_guidance_start = len(control_guidance_end) * [control_guidance_start] + elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list): + control_guidance_end = len(control_guidance_start) * [control_guidance_end] + elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list): + mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1 + control_guidance_start, control_guidance_end = ( + mult * [control_guidance_start], + mult * [control_guidance_end], + ) + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + prompt_2, + control_image, + strength, + num_inference_steps, + callback_steps, + negative_prompt, + negative_prompt_2, + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + ip_adapter_image, + ip_adapter_image_embeds, + controlnet_conditioning_scale, + control_guidance_start, + control_guidance_end, + callback_on_step_end_tensor_inputs, + ) + + self._guidance_scale = guidance_scale + self._clip_skip = clip_skip + self._cross_attention_kwargs = cross_attention_kwargs + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float): + controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets) + + global_pool_conditions = ( + controlnet.config.global_pool_conditions + if isinstance(controlnet, ControlNetModel) + else controlnet.nets[0].config.global_pool_conditions + ) + guess_mode = guess_mode or global_pool_conditions + + # 3.1. Encode input prompt + text_encoder_lora_scale = ( + self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None + ) + ( + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + ) = self.encode_prompt( + prompt, + prompt_2, + device, + num_images_per_prompt, + self.do_classifier_free_guidance, + negative_prompt, + negative_prompt_2, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, + lora_scale=text_encoder_lora_scale, + clip_skip=self.clip_skip, + ) + + # 3.2 Encode ip_adapter_image + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + image_embeds = self.prepare_ip_adapter_image_embeds( + ip_adapter_image, + ip_adapter_image_embeds, + device, + batch_size * num_images_per_prompt, + self.do_classifier_free_guidance, + ) + + # 4. Prepare image and controlnet_conditioning_image + image = self.image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32) + + if isinstance(controlnet, ControlNetModel): + control_image = self.prepare_control_image( + image=control_image, + width=width, + height=height, + batch_size=batch_size * num_images_per_prompt, + num_images_per_prompt=num_images_per_prompt, + device=device, + dtype=controlnet.dtype, + do_classifier_free_guidance=self.do_classifier_free_guidance, + guess_mode=guess_mode, + ) + height, width = control_image.shape[-2:] + elif isinstance(controlnet, MultiControlNetModel): + control_images = [] + + for control_image_ in control_image: + control_image_ = self.prepare_control_image( + image=control_image_, + width=width, + height=height, + batch_size=batch_size * num_images_per_prompt, + num_images_per_prompt=num_images_per_prompt, + device=device, + dtype=controlnet.dtype, + do_classifier_free_guidance=self.do_classifier_free_guidance, + guess_mode=guess_mode, + ) + + control_images.append(control_image_) + + control_image = control_images + height, width = control_image[0].shape[-2:] + else: + assert False + + # 5. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device) + latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) + self._num_timesteps = len(timesteps) + + # 6. Prepare latent variables + if latents is None: + latents = self.prepare_latents( + image, + latent_timestep, + batch_size, + num_images_per_prompt, + prompt_embeds.dtype, + device, + generator, + True, + ) + + # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7.1 Create tensor stating which controlnets to keep + controlnet_keep = [] + for i in range(len(timesteps)): + keeps = [ + 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e) + for s, e in zip(control_guidance_start, control_guidance_end) + ] + controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps) + + # 7.2 Prepare added time ids & embeddings + if isinstance(control_image, list): + original_size = original_size or control_image[0].shape[-2:] + else: + original_size = original_size or control_image.shape[-2:] + target_size = target_size or (height, width) + + if negative_original_size is None: + negative_original_size = original_size + if negative_target_size is None: + negative_target_size = target_size + add_text_embeds = pooled_prompt_embeds + + if self.text_encoder_2 is None: + text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) + else: + text_encoder_projection_dim = self.text_encoder_2.config.projection_dim + + add_time_ids, add_neg_time_ids = self._get_add_time_ids( + original_size, + crops_coords_top_left, + target_size, + aesthetic_score, + negative_aesthetic_score, + negative_original_size, + negative_crops_coords_top_left, + negative_target_size, + dtype=prompt_embeds.dtype, + text_encoder_projection_dim=text_encoder_projection_dim, + ) + add_time_ids = add_time_ids.repeat(batch_size * num_images_per_prompt, 1) + + if self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) + add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) + add_neg_time_ids = add_neg_time_ids.repeat(batch_size * num_images_per_prompt, 1) + add_time_ids = torch.cat([add_neg_time_ids, add_time_ids], dim=0) + + prompt_embeds = prompt_embeds.to(device) + add_text_embeds = add_text_embeds.to(device) + add_time_ids = add_time_ids.to(device) + + # 8. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} + + # controlnet(s) inference + if guess_mode and self.do_classifier_free_guidance: + # Infer ControlNet only for the conditional batch. + control_model_input = latents + control_model_input = self.scheduler.scale_model_input(control_model_input, t) + controlnet_prompt_embeds = prompt_embeds.chunk(2)[1] + controlnet_added_cond_kwargs = { + "text_embeds": add_text_embeds.chunk(2)[1], + "time_ids": add_time_ids.chunk(2)[1], + } + else: + control_model_input = latent_model_input + controlnet_prompt_embeds = prompt_embeds + controlnet_added_cond_kwargs = added_cond_kwargs + + if isinstance(controlnet_keep[i], list): + cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])] + else: + controlnet_cond_scale = controlnet_conditioning_scale + if isinstance(controlnet_cond_scale, list): + controlnet_cond_scale = controlnet_cond_scale[0] + cond_scale = controlnet_cond_scale * controlnet_keep[i] + down_block_res_samples, mid_block_res_sample = self.controlnet( + control_model_input, + t, + encoder_hidden_states=controlnet_prompt_embeds, + controlnet_cond=control_image, + conditioning_scale=cond_scale, + guess_mode=guess_mode, + added_cond_kwargs=controlnet_added_cond_kwargs, + return_dict=False, + ) + + if guess_mode and self.do_classifier_free_guidance: + # Inferred ControlNet only for the conditional batch. + # To apply the output of ControlNet to both the unconditional and conditional batches, + # add 0 to the unconditional batch to keep it unchanged. + down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples] + mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample]) + + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + added_cond_kwargs["image_embeds"] = image_embeds + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs=self.cross_attention_kwargs, + down_block_additional_residuals=down_block_res_samples, + mid_block_additional_residual=mid_block_res_sample, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + add_text_embeds = callback_outputs.pop("add_text_embeds", add_text_embeds) + negative_pooled_prompt_embeds = callback_outputs.pop( + "negative_pooled_prompt_embeds", negative_pooled_prompt_embeds + ) + add_time_ids = callback_outputs.pop("add_time_ids", add_time_ids) + add_neg_time_ids = callback_outputs.pop("add_neg_time_ids", add_neg_time_ids) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + # If we do sequential model offloading, let's offload unet and controlnet + # manually for max memory savings + if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: + self.unet.to("cpu") + self.controlnet.to("cpu") + torch.cuda.empty_cache() + + if not output_type == "latent": + # make sure the VAE is in float32 mode, as it overflows in float16 + needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast + + if needs_upcasting: + self.upcast_vae() + latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) + + # unscale/denormalize the latents + # denormalize with the mean and std if available and not None + has_latents_mean = hasattr(self.vae.config, "latents_mean") and self.vae.config.latents_mean is not None + has_latents_std = hasattr(self.vae.config, "latents_std") and self.vae.config.latents_std is not None + if has_latents_mean and has_latents_std: + latents_mean = ( + torch.tensor(self.vae.config.latents_mean).view(1, 4, 1, 1).to(latents.device, latents.dtype) + ) + latents_std = ( + torch.tensor(self.vae.config.latents_std).view(1, 4, 1, 1).to(latents.device, latents.dtype) + ) + latents = latents * latents_std / self.vae.config.scaling_factor + latents_mean + else: + latents = latents / self.vae.config.scaling_factor + + image = self.vae.decode(latents, return_dict=False)[0] + + # cast back to fp16 if needed + if needs_upcasting: + self.vae.to(dtype=torch.float16) + else: + image = latents + return StableDiffusionXLPipelineOutput(images=image) + + # apply watermark if available + if self.watermark is not None: + image = self.watermark.apply_watermark(image) + + image = self.image_processor.postprocess(image, output_type=output_type) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return StableDiffusionXLPipelineOutput(images=image) diff --git a/diffusers3/pipelines/controlnet/pipeline_controlnet_sd_xl_img2img_img.py b/diffusers3/pipelines/controlnet/pipeline_controlnet_sd_xl_img2img_img.py new file mode 100644 index 0000000000000000000000000000000000000000..f3d69e333aabd52d55a5ed2b7a6107f715ee14df --- /dev/null +++ b/diffusers3/pipelines/controlnet/pipeline_controlnet_sd_xl_img2img_img.py @@ -0,0 +1,2728 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by apfplicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import inspect +from typing import Any, Callable, Dict, List, Optional, Tuple, Union +from torchvision.transforms.functional import pil_to_tensor + +import numpy as np +import PIL +import PIL.Image +import torch +import torch.nn.functional as F +from transformers import ( + CLIPImageProcessor, + CLIPTextModel, + CLIPTextModelWithProjection, + CLIPTokenizer, + CLIPVisionModelWithProjection, +) + +from diffusers.utils.import_utils import is_invisible_watermark_available + +from ...callbacks import MultiPipelineCallbacks, PipelineCallback +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import ( + FromSingleFileMixin, + IPAdapterMixin, + StableDiffusionXLLoraLoaderMixin, + TextualInversionLoaderMixin, +) +from ...models import AutoencoderKL, ControlNetModel, ImageProjection, UNet2DConditionModel +from ...models.attention_processor import ( + AttnProcessor2_0, + XFormersAttnProcessor, +) +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import ( + USE_PEFT_BACKEND, + deprecate, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import is_compiled_module, randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from ..stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput + + +if is_invisible_watermark_available(): + from ..stable_diffusion_xl.watermark import StableDiffusionXLWatermarker + +from .multicontrolnet import MultiControlNetModel + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> # pip install accelerate transformers safetensors diffusers + + >>> import torch + >>> import numpy as np + >>> from PIL import Image + + >>> from transformers import DPTImageProcessor, DPTForDepthEstimation + >>> from diffusers import ControlNetModel, StableDiffusionXLControlNetImg2ImgPipeline, AutoencoderKL + >>> from diffusers.utils import load_image + + + >>> depth_estimator = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas").to("cuda") + >>> feature_extractor = DPTImageProcessor.from_pretrained("Intel/dpt-hybrid-midas") + >>> controlnet = ControlNetModel.from_pretrained( + ... "diffusers/controlnet-depth-sdxl-1.0-small", + ... variant="fp16", + ... use_safetensors=True, + ... torch_dtype=torch.float16, + ... ) + >>> vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16) + >>> pipe = StableDiffusionXLControlNetImg2ImgPipeline.from_pretrained( + ... "stabilityai/stable-diffusion-xl-base-1.0", + ... controlnet=controlnet, + ... vae=vae, + ... variant="fp16", + ... use_safetensors=True, + ... torch_dtype=torch.float16, + ... ) + >>> pipe.enable_model_cpu_offload() + + + >>> def get_depth_map(image): + ... image = feature_extractor(images=image, return_tensors="pt").pixel_values.to("cuda") + ... with torch.no_grad(), torch.autocast("cuda"): + ... depth_map = depth_estimator(image).predicted_depth + + ... depth_map = torch.nn.functional.interpolate( + ... depth_map.unsqueeze(1), + ... size=(1024, 1024), + ... mode="bicubic", + ... align_corners=False, + ... ) + ... depth_min = torch.amin(depth_map, dim=[1, 2, 3], keepdim=True) + ... depth_max = torch.amax(depth_map, dim=[1, 2, 3], keepdim=True) + ... depth_map = (depth_map - depth_min) / (depth_max - depth_min) + ... image = torch.cat([depth_map] * 3, dim=1) + ... image = image.permute(0, 2, 3, 1).cpu().numpy()[0] + ... image = Image.fromarray((image * 255.0).clip(0, 255).astype(np.uint8)) + ... return image + + + >>> prompt = "A robot, 4k photo" + >>> image = load_image( + ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" + ... "/kandinsky/cat.png" + ... ).resize((1024, 1024)) + >>> controlnet_conditioning_scale = 0.5 # recommended for good generalization + >>> depth_image = get_depth_map(image) + + >>> images = pipe( + ... prompt, + ... image=image, + ... control_image=depth_image, + ... strength=0.99, + ... num_inference_steps=50, + ... controlnet_conditioning_scale=controlnet_conditioning_scale, + ... ).images + >>> images[0].save(f"robot_cat.png") + ``` +""" + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents +def retrieve_latents( + encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" +): + if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": + return encoder_output.latent_dist.sample(generator) + elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": + return encoder_output.latent_dist.mode() + elif hasattr(encoder_output, "latents"): + return encoder_output.latents + else: + raise AttributeError("Could not access latents of provided encoder_output") + +def prepare_mask( + mask: Union[PIL.Image.Image, np.ndarray, torch.Tensor] +) -> torch.Tensor: + """ + Prepares a mask to be consumed by the Stable Diffusion pipeline. This means that this input will be + converted to ``torch.Tensor`` with shapes ``batch x channels x height x width`` where ``channels`` is ``1`` for + the ``mask``. + + The ``mask`` will be binarized (``mask > 0.5``) and cast to ``torch.float32`` too. + + Args: + mask (_type_): The mask to apply to the image, i.e. regions to inpaint. + It can be a ``PIL.Image``, or a ``height x width`` ``np.array`` or a ``1 x height x width`` + ``torch.Tensor`` or a ``batch x 1 x height x width`` ``torch.Tensor``. + + + Raises: + ValueError: ``torch.Tensor`` images should be in the ``[-1, 1]`` range. ValueError: ``torch.Tensor`` mask + should be in the ``[0, 1]`` range. + TypeError: ``mask`` is a ``torch.Tensor`` but ``image`` is not + (ot the other way around). + + Returns: + torch.Tensor: mask as ``torch.Tensor`` with 4 dimensions: ``batch x channels x height x width``. + """ + if isinstance(mask, torch.Tensor): + print("^^^^") + print("mask_1") + if not isinstance(mask, torch.Tensor): + raise TypeError( + f"`image` is a torch.Tensor but `mask` (type: {type(mask)} is not" + ) + + # Batch and add channel dim for single mask + if mask.ndim == 2: + mask = mask.unsqueeze(0).unsqueeze(0) + + # Batch single mask or add channel dim + if mask.ndim == 3: + # Single batched mask, no channel dim or single mask not batched but channel dim + if mask.shape[0] == 1: + mask = mask.unsqueeze(0) + + # Batched masks no channel dim + else: + mask = mask.unsqueeze(1) + + # Check mask is in [0, 1] + if mask.min() < 0 or mask.max() > 1: + raise ValueError("Mask should be in [0, 1] range") + + # Binarize mask + mask[mask < 0.5] = 0 + mask[mask >= 0.5] = 1 + else: + print("^^^^") + print("mask_2") + # preprocess mask + if isinstance(mask, (PIL.Image.Image, np.ndarray)): + mask = [mask] + + if isinstance(mask, list) and isinstance(mask[0], PIL.Image.Image): + mask = np.concatenate( + [np.array(m.convert("L"))[None, None, :] for m in mask], axis=0 + ) + mask = mask.astype(np.float32) / 255.0 + elif isinstance(mask, list) and isinstance(mask[0], np.ndarray): + mask = np.concatenate([m[None, None, :] for m in mask], axis=0) + + mask[mask < 0.5] = 0 + mask[mask >= 0.5] = 1 + mask = torch.from_numpy(mask) + + return mask + + + +class StableDiffusionXLControlNetImg2ImgPipeline( + DiffusionPipeline, + StableDiffusionMixin, + TextualInversionLoaderMixin, + StableDiffusionXLLoraLoaderMixin, + FromSingleFileMixin, + IPAdapterMixin, +): + r""" + Pipeline for image-to-image generation using Stable Diffusion XL with ControlNet guidance. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`] for saving LoRA weights + - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModel`]): + Frozen text-encoder. Stable Diffusion uses the text portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically + the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. + text_encoder_2 ([` CLIPTextModelWithProjection`]): + Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection), + specifically the + [laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k) + variant. + tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + tokenizer_2 (`CLIPTokenizer`): + Second Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. + controlnet ([`ControlNetModel`] or `List[ControlNetModel]`): + Provides additional conditioning to the unet during the denoising process. If you set multiple ControlNets + as a list, the outputs from each ControlNet are added together to create one combined additional + conditioning. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + requires_aesthetics_score (`bool`, *optional*, defaults to `"False"`): + Whether the `unet` requires an `aesthetic_score` condition to be passed during inference. Also see the + config of `stabilityai/stable-diffusion-xl-refiner-1-0`. + force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`): + Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of + `stabilityai/stable-diffusion-xl-base-1-0`. + add_watermarker (`bool`, *optional*): + Whether to use the [invisible_watermark library](https://github.com/ShieldMnt/invisible-watermark/) to + watermark output images. If not defined, it will default to True if the package is installed, otherwise no + watermarker will be used. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. + """ + + model_cpu_offload_seq = "text_encoder->text_encoder_2->image_encoder->unet->vae" + _optional_components = [ + "tokenizer", + "tokenizer_2", + "text_encoder", + "text_encoder_2", + "feature_extractor", + "image_encoder", + ] + _callback_tensor_inputs = [ + "latents", + "prompt_embeds", + "negative_prompt_embeds", + "add_text_embeds", + "add_time_ids", + "negative_pooled_prompt_embeds", + "add_neg_time_ids", + ] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + text_encoder_2: CLIPTextModelWithProjection, + tokenizer: CLIPTokenizer, + tokenizer_2: CLIPTokenizer, + unet: UNet2DConditionModel, + controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel], + scheduler: KarrasDiffusionSchedulers, + requires_aesthetics_score: bool = False, + force_zeros_for_empty_prompt: bool = True, + add_watermarker: Optional[bool] = None, + feature_extractor: CLIPImageProcessor = None, + image_encoder: CLIPVisionModelWithProjection = None, + ): + super().__init__() + + if isinstance(controlnet, (list, tuple)): + controlnet = MultiControlNetModel(controlnet) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + text_encoder_2=text_encoder_2, + tokenizer=tokenizer, + tokenizer_2=tokenizer_2, + unet=unet, + controlnet=controlnet, + scheduler=scheduler, + feature_extractor=feature_extractor, + image_encoder=image_encoder, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True) + self.control_image_processor = VaeImageProcessor( + vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False + ) + add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available() + + if add_watermarker: + self.watermark = StableDiffusionXLWatermarker() + else: + self.watermark = None + + self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) + self.register_to_config(requires_aesthetics_score=requires_aesthetics_score) + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.encode_prompt + def encode_prompt( + self, + prompt: str, + prompt_2: Optional[str] = None, + device: Optional[torch.device] = None, + num_images_per_prompt: int = 1, + do_classifier_free_guidance: bool = True, + negative_prompt: Optional[str] = None, + negative_prompt_2: Optional[str] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + pooled_prompt_embeds: Optional[torch.Tensor] = None, + negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in both text-encoders + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + lora_scale (`float`, *optional*): + A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + device = device or self._execution_device + + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if self.text_encoder is not None: + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale) + else: + scale_lora_layers(self.text_encoder_2, lora_scale) + + prompt = [prompt] if isinstance(prompt, str) else prompt + + if prompt is not None: + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + # Define tokenizers and text encoders + tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] + text_encoders = ( + [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] + ) + + if prompt_embeds is None: + prompt_2 = prompt_2 or prompt + prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 + + # textual inversion: process multi-vector tokens if necessary + prompt_embeds_list = [] + prompts = [prompt, prompt_2] + for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders): + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, tokenizer) + + text_inputs = tokenizer( + prompt, + padding="max_length", + max_length=tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + + text_input_ids = text_inputs.input_ids + untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {tokenizer.model_max_length} tokens: {removed_text}" + ) + + prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) + + # We are only ALWAYS interested in the pooled output of the final text encoder + pooled_prompt_embeds = prompt_embeds[0] + if clip_skip is None: + prompt_embeds = prompt_embeds.hidden_states[-2] + else: + # "2" because SDXL always indexes from the penultimate layer. + prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)] + + prompt_embeds_list.append(prompt_embeds) + + prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) + + # get unconditional embeddings for classifier free guidance + zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt + if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: + negative_prompt_embeds = torch.zeros_like(prompt_embeds) + negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) + elif do_classifier_free_guidance and negative_prompt_embeds is None: + negative_prompt = negative_prompt or "" + negative_prompt_2 = negative_prompt_2 or negative_prompt + + # normalize str to list + negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt + negative_prompt_2 = ( + batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 + ) + + uncond_tokens: List[str] + if prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = [negative_prompt, negative_prompt_2] + + negative_prompt_embeds_list = [] + for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders): + if isinstance(self, TextualInversionLoaderMixin): + negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = tokenizer( + negative_prompt, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + negative_prompt_embeds = text_encoder( + uncond_input.input_ids.to(device), + output_hidden_states=True, + ) + # We are only ALWAYS interested in the pooled output of the final text encoder + negative_pooled_prompt_embeds = negative_prompt_embeds[0] + negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] + + negative_prompt_embeds_list.append(negative_prompt_embeds) + + negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) + + if self.text_encoder_2 is not None: + prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) + else: + prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + if self.text_encoder_2 is not None: + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) + else: + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed * num_images_per_prompt, -1 + ) + if do_classifier_free_guidance: + negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed * num_images_per_prompt, -1 + ) + + if self.text_encoder is not None: + if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder_2, lora_scale) + + return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image + def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + if output_hidden_states: + image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] + image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_enc_hidden_states = self.image_encoder( + torch.zeros_like(image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( + num_images_per_prompt, dim=0 + ) + return image_enc_hidden_states, uncond_image_enc_hidden_states + else: + image_embeds = self.image_encoder(image).image_embeds + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_embeds = torch.zeros_like(image_embeds) + + return image_embeds, uncond_image_embeds + + def _get_vae(self): + vae = getattr(self, "vae", None) or getattr(self, "autoencoder", None) + if vae is None and hasattr(self, "components"): + try: + vae = self.components.get("vae", None) + except Exception: + vae = None + if vae is None: + raise RuntimeError( + "VAE not found on pipeline. Ensure the pipeline is fully constructed " + "and VAE is attached as `vae`, `autoencoder`, or components['vae']." + ) + return vae + + def _get_vae_scaling(self): + vae = self._get_vae() + return getattr(getattr(vae, "config", None), "scaling_factor", getattr(vae, "scaling_factor", 0.18215)) + + + + def _setup_openclip(self, device="cuda"): + model, _, _ = open_clip.create_model_and_transforms( + "ViT-H-14", pretrained="laion2b_s32b_b79k" + ) + model.eval().to(device, dtype=torch.float32) + for p in model.parameters(): + p.requires_grad_(False) + + object.__setattr__(self, "clip_model", model) + object.__setattr__(self, "clip_image_encoder", lambda x: self.clip_model.encode_image(x)) + object.__setattr__(self, "clip_text_encoder", lambda t: self.clip_model.encode_text(t)) + object.__setattr__(self, "clip_tokenizer", open_clip.get_tokenizer("ViT-H-14")) + + object.__setattr__(self, "clip_image_size", 224) + object.__setattr__(self, "clip_mean", (0.48145466, 0.4578275, 0.40821073)) + object.__setattr__(self, "clip_std", (0.26862954, 0.26130258, 0.27577711)) + + object.__setattr__(self, "cav_words", ["garment", "clothes", "apparel", "outfit"]) + object.__setattr__(self, "cav_alpha", 0.3) + object.__setattr__(self, "cav_steps", 1) + object.__setattr__(self, "cav_channel_dir", True) + + def prepare_ip_adapter_image_embeds( + self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance + ): + image_embeds = [] + if do_classifier_free_guidance: + negative_image_embeds = [] + if ip_adapter_image_embeds is None: + if not isinstance(ip_adapter_image, list): + ip_adapter_image = [ip_adapter_image] + + if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): + raise ValueError( + f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." + ) + + for single_ip_adapter_image, image_proj_layer in zip( + ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers + ): + output_hidden_state = not isinstance(image_proj_layer, ImageProjection) + single_image_embeds, single_negative_image_embeds = self.encode_image( + single_ip_adapter_image, device, 1, output_hidden_state + ) + + image_embeds.append(single_image_embeds[None, :]) + if do_classifier_free_guidance: + negative_image_embeds.append(single_negative_image_embeds[None, :]) + else: + for single_image_embeds in ip_adapter_image_embeds: + if do_classifier_free_guidance: + single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2) + negative_image_embeds.append(single_negative_image_embeds) + image_embeds.append(single_image_embeds) + + ip_adapter_image_embeds = [] + for i, single_image_embeds in enumerate(image_embeds): + single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) + if do_classifier_free_guidance: + single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) + + single_image_embeds = single_image_embeds.to(device=device) + ip_adapter_image_embeds.append(single_image_embeds) + + return ip_adapter_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + prompt_2, + image, + strength, + num_inference_steps, + callback_steps, + negative_prompt=None, + negative_prompt_2=None, + prompt_embeds=None, + negative_prompt_embeds=None, + pooled_prompt_embeds=None, + negative_pooled_prompt_embeds=None, + ip_adapter_image=None, + ip_adapter_image_embeds=None, + controlnet_conditioning_scale=1.0, + control_guidance_start=0.0, + control_guidance_end=1.0, + callback_on_step_end_tensor_inputs=None, + ): + if strength < 0 or strength > 1: + raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}") + if num_inference_steps is None: + raise ValueError("`num_inference_steps` cannot be None.") + elif not isinstance(num_inference_steps, int) or num_inference_steps <= 0: + raise ValueError( + f"`num_inference_steps` has to be a positive integer but is {num_inference_steps} of type" + f" {type(num_inference_steps)}." + ) + + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt_2 is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)): + raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + elif negative_prompt_2 is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if prompt_embeds is not None and pooled_prompt_embeds is None: + raise ValueError( + "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`." + ) + + if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: + raise ValueError( + "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`." + ) + + # `prompt` needs more sophisticated handling when there are multiple + # conditionings. + if isinstance(self.controlnet, MultiControlNetModel): + if isinstance(prompt, list): + logger.warning( + f"You have {len(self.controlnet.nets)} ControlNets and you have passed {len(prompt)}" + " prompts. The conditionings will be fixed across the prompts." + ) + + # Check `image` + is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance( + self.controlnet, torch._dynamo.eval_frame.OptimizedModule + ) + if ( + isinstance(self.controlnet, ControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, ControlNetModel) + ): + self.check_image(image, prompt, prompt_embeds) + elif ( + isinstance(self.controlnet, MultiControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, MultiControlNetModel) + ): + if not isinstance(image, list): + raise TypeError("For multiple controlnets: `image` must be type `list`") + + # When `image` is a nested list: + # (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]]) + elif any(isinstance(i, list) for i in image): + raise ValueError("A single batch of multiple conditionings are supported at the moment.") + elif len(image) != len(self.controlnet.nets): + raise ValueError( + f"For multiple controlnets: `image` must have the same length as the number of controlnets, but got {len(image)} images and {len(self.controlnet.nets)} ControlNets." + ) + + for image_ in image: + self.check_image(image_, prompt, prompt_embeds) + else: + assert False + + # Check `controlnet_conditioning_scale` + if ( + isinstance(self.controlnet, ControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, ControlNetModel) + ): + if not isinstance(controlnet_conditioning_scale, float): + raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.") + elif ( + isinstance(self.controlnet, MultiControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, MultiControlNetModel) + ): + if isinstance(controlnet_conditioning_scale, list): + if any(isinstance(i, list) for i in controlnet_conditioning_scale): + raise ValueError("A single batch of multiple conditionings are supported at the moment.") + elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len( + self.controlnet.nets + ): + raise ValueError( + "For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have" + " the same length as the number of controlnets" + ) + else: + assert False + + if not isinstance(control_guidance_start, (tuple, list)): + control_guidance_start = [control_guidance_start] + + if not isinstance(control_guidance_end, (tuple, list)): + control_guidance_end = [control_guidance_end] + + if len(control_guidance_start) != len(control_guidance_end): + raise ValueError( + f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list." + ) + + if isinstance(self.controlnet, MultiControlNetModel): + if len(control_guidance_start) != len(self.controlnet.nets): + raise ValueError( + f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}." + ) + + for start, end in zip(control_guidance_start, control_guidance_end): + if start >= end: + raise ValueError( + f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}." + ) + if start < 0.0: + raise ValueError(f"control guidance start: {start} can't be smaller than 0.") + if end > 1.0: + raise ValueError(f"control guidance end: {end} can't be larger than 1.0.") + + if ip_adapter_image is not None and ip_adapter_image_embeds is not None: + raise ValueError( + "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined." + ) + + if ip_adapter_image_embeds is not None: + if not isinstance(ip_adapter_image_embeds, list): + raise ValueError( + f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}" + ) + elif ip_adapter_image_embeds[0].ndim not in [3, 4]: + raise ValueError( + f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D" + ) + + # Copied from diffusers.pipelines.controlnet.pipeline_controlnet_sd_xl.StableDiffusionXLControlNetPipeline.check_image + def check_image(self, image, prompt, prompt_embeds): + image_is_pil = isinstance(image, PIL.Image.Image) + image_is_tensor = isinstance(image, torch.Tensor) + image_is_np = isinstance(image, np.ndarray) + image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image) + image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor) + image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray) + + if ( + not image_is_pil + and not image_is_tensor + and not image_is_np + and not image_is_pil_list + and not image_is_tensor_list + and not image_is_np_list + ): + raise TypeError( + f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}" + ) + + if image_is_pil: + image_batch_size = 1 + else: + image_batch_size = len(image) + + if prompt is not None and isinstance(prompt, str): + prompt_batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + prompt_batch_size = len(prompt) + elif prompt_embeds is not None: + prompt_batch_size = prompt_embeds.shape[0] + + if image_batch_size != 1 and image_batch_size != prompt_batch_size: + raise ValueError( + f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}" + ) + + # Copied from diffusers.pipelines.controlnet.pipeline_controlnet_sd_xl.StableDiffusionXLControlNetPipeline.prepare_image + def prepare_control_image( + self, + image, + width, + height, + batch_size, + num_images_per_prompt, + device, + dtype, + do_classifier_free_guidance=False, + guess_mode=False, + ): + image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32) + image_batch_size = image.shape[0] + + if image_batch_size == 1: + repeat_by = batch_size + else: + # image batch size is the same as prompt batch size + repeat_by = num_images_per_prompt + + image = image.repeat_interleave(repeat_by, dim=0) + + image = image.to(device=device, dtype=dtype) + + if do_classifier_free_guidance and not guess_mode: + image = torch.cat([image] * 2) + + return image + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps + def get_timesteps(self, num_inference_steps, strength, device): + # get the original timestep using init_timestep + init_timestep = min(int(num_inference_steps * strength), num_inference_steps) + + t_start = max(num_inference_steps - init_timestep, 0) + timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] + if hasattr(self.scheduler, "set_begin_index"): + self.scheduler.set_begin_index(t_start * self.scheduler.order) + + return timesteps, num_inference_steps - t_start + + def _sdxl_decode_to_01(self, z): + vae = self._get_vae() + sf = getattr(getattr(vae, "config", None), "scaling_factor", + getattr(vae, "scaling_factor", 0.18215)) + x = vae.decode(z / sf).sample # [-1, 1] + x = (x.clamp(-1, 1) + 1.0) / 2.0 + return x + + + def _ensure_openclip(self, device=None): + if getattr(self, "clip_model", None) is not None: + return + import open_clip, torch + if device is None: + device = getattr(self, "device", "cuda" if torch.cuda.is_available() else "cpu") + + model, _, _ = open_clip.create_model_and_transforms( + "ViT-H-14", pretrained="laion2b_s32b_b79k" + ) + model.eval().to(device, dtype=torch.float32) + for p in model.parameters(): + p.requires_grad_(False) + + # __setattr__ ํ›…์„ ์šฐํšŒํ•ด์„œ ์†์„ฑ ์ฃผ์ž… + object.__setattr__(self, "clip_model", model) + object.__setattr__(self, "clip_image_encoder", lambda x: self.clip_model.encode_image(x)) + object.__setattr__(self, "clip_text_encoder", lambda t: self.clip_model.encode_text(t)) + object.__setattr__(self, "clip_tokenizer", open_clip.get_tokenizer("ViT-H-14")) + + object.__setattr__(self, "clip_image_size", 224) + object.__setattr__(self, "clip_mean", (0.48145466, 0.4578275, 0.40821073)) + object.__setattr__(self, "clip_std", (0.26862954, 0.26130258, 0.27577711)) + + # ์ œ๋กœ์ƒท Grad-CAV ํ•˜์ดํผ + object.__setattr__(self, "cav_words", ["garment", "clothes", "apparel", "outfit"]) + object.__setattr__(self, "cav_alpha", 0.3) + object.__setattr__(self, "cav_steps", 1) + object.__setattr__(self, "cav_channel_dir", True) + + + def upcast_vae_safe(self): + vae = self._get_vae() + try: + dev = next(vae.parameters()).device + except StopIteration: + import torch + dev = getattr(self, "device", "cuda" if torch.cuda.is_available() else "cpu") + orig_dtype = vae.dtype + object.__setattr__(self, "_vae_orig_dtype", orig_dtype) + + vae.to(device=dev, dtype=torch.float32) + + # xformers/torch2.0 ์ตœ์ ํ™” ๊ฐ์ง€ ํ›„ ์ผ๋ถ€ ๋ธ”๋ก๋งŒ ์›๋ž˜ dtype์œผ๋กœ (device๋Š” ๋™์ผ) + try: + from diffusers.models.attention_processor import AttnProcessor2_0, XFormersAttnProcessor + proc = vae.decoder.mid_block.attentions[0].processor + use_opt = isinstance(proc, (AttnProcessor2_0, XFormersAttnProcessor)) + except Exception: + use_opt = False + if use_opt: + vae.post_quant_conv.to(device=dev, dtype=orig_dtype) + vae.decoder.conv_in.to(device=dev, dtype=orig_dtype) + vae.decoder.mid_block.to(device=dev, dtype=orig_dtype) + + + + + + def _clip_preprocess_tensor(self, x01): + import torch.nn.functional as F + target = getattr(self, "clip_image_size", 224) + x = F.interpolate(x01, size=(target, target), mode="bilinear", align_corners=False) + + # ๊ฐ™์€ device/dtype๋กœ mean/std ์ƒ์„ฑ + mean = x.new_tensor(getattr(self, "clip_mean", (0.48145466, 0.4578275, 0.40821073))).view(1,3,1,1) + std = x.new_tensor(getattr(self, "clip_std", (0.26862954, 0.26130258, 0.27577711))).view(1,3,1,1) + return (x - mean) / std + + def prepare_latents( + self, + image, + timestep, + batch_size, + num_images_per_prompt, + dtype, + device, + generator=None, + add_noise=True, + # === NEW: garment subspace removal === + garment_images=None, # torch.Tensor [Ng,3,H,W] or PIL.Image or list[Tensor|PIL] + garment_rank=None, # (ํ˜„์žฌ ์ถ• 1๊ฐœ ์‚ฌ์šฉ; ํ–ฅํ›„ ํ™•์žฅ์šฉ) int or None + garment_alpha=None, # float or None (default=0.75) + garment_mask=None, # ํ‘/๋ฐฑ ๋งˆ์Šคํฌ (PIL.Image or torch.Tensor): ํฐ=์˜๋ฅ˜(1), ๊ฒ€=๋ฐฐ๊ฒฝ(0) + ): + """ + StableDiffusion(SDXL) prepare_latents with optional garment subspace removal (orthogonal projection). + """ + import torch + import torch.nn.functional as F + from diffusers.utils.torch_utils import randn_tensor + from torchvision.transforms.functional import pil_to_tensor + import PIL + + # ---------------------- small utils ---------------------- + eps = 1e-12 + + def _ensure_bchw(x: torch.Tensor) -> torch.Tensor: + if isinstance(x, torch.Tensor): + if x.ndim == 3: # [C,H,W] + return x.unsqueeze(0) + return x + raise ValueError("Tensor expected") + + def _gaussian_blur(mask: torch.Tensor, ksize=5, sigma=2.0): + pad = ksize // 2 + ax = torch.arange(-pad, pad + 1, device=mask.device, dtype=mask.dtype) + k1 = torch.exp(-(ax ** 2) / (2 * sigma ** 2)) + k1 = k1 / k1.sum() + k2d = (k1[:, None] @ k1[None, :]) + k2d = k2d / k2d.sum() + k = k2d.unsqueeze(0).unsqueeze(0) + chans = mask.shape[1] + if chans != 1: + k = k.repeat(chans, 1, 1, 1) + groups = chans + else: + groups = 1 + return F.conv2d(F.pad(mask, (pad, pad, pad, pad), mode="reflect"), k, groups=groups) + + def _highpass_latent(t: torch.Tensor, k=5, w=0.4): + pad = k // 2 + blur = F.avg_pool2d(F.pad(t.unsqueeze(0), (pad, pad, pad, pad), mode="reflect"), + kernel_size=k, stride=1).squeeze(0) + return t * (1 - w) + (t - blur) * w + + def _prep_mask_soft(mask_src, H, W, C, dev, dt, ksize=5, sigma=2.0, gamma=1.05): + if mask_src is None: + return None + if isinstance(mask_src, PIL.Image.Image): + m = pil_to_tensor(mask_src.convert("L")).float() / 255.0 + m = m.unsqueeze(0) # [1,1,h,w] + elif isinstance(mask_src, torch.Tensor): + m = mask_src + if m.ndim == 2: + m = m.unsqueeze(0).unsqueeze(0) + elif m.ndim == 3: + m = m.unsqueeze(0) + if m.max() > 1.0: + m = m / 255.0 + else: + raise ValueError("garment_mask must be PIL/Tensor") + m = F.interpolate(m, size=(H, W), mode="bilinear", align_corners=False) + m = _gaussian_blur(m, ksize=ksize, sigma=sigma) + m = m.clamp(0, 1).pow(gamma) + if m.shape[1] == 1: + m = m.repeat(1, C, 1, 1) + return m.to(device=dev, dtype=dt) + + def _prep_mask_hard_out(mask_src, H, W, C, dev, dt, thresh=0.5): + if mask_src is None: + return None + if isinstance(mask_src, PIL.Image.Image): + m0 = pil_to_tensor(mask_src.convert("L")).float() / 255.0 + m0 = m0.unsqueeze(0) # [1,1,h,w] + elif isinstance(mask_src, torch.Tensor): + m0 = mask_src + if m0.ndim == 2: + m0 = m0.unsqueeze(0).unsqueeze(0) + elif m0.ndim == 3: + m0 = m0.unsqueeze(0) + if m0.max() > 1.0: + m0 = m0 / 255.0 + else: + raise ValueError("garment_mask must be PIL/Tensor") + m0 = F.interpolate(m0, size=(H, W), mode="nearest") + m_in_hard = (m0 > thresh).float() + m_out_hard = 1.0 - m_in_hard + if C != 1: + m_out_hard = m_out_hard.repeat(1, C, 1, 1) + return m_out_hard.to(device=dev, dtype=dt) + + # ---------------------- allow PIL/list โ†’ Tensor ---------------------- + if isinstance(image, list): + imgs = [] + for im in image: + if isinstance(im, PIL.Image.Image): + t = pil_to_tensor(im).float() / 255.0 + imgs.append(_ensure_bchw(t)) + elif isinstance(im, torch.Tensor): + imgs.append(_ensure_bchw(im)) + else: + raise ValueError(f"Unsupported element in image list: {type(im)}") + image = torch.cat(imgs, dim=0) + elif isinstance(image, PIL.Image.Image): + image = pil_to_tensor(image).float() / 255.0 + image = _ensure_bchw(image) + + if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): + raise ValueError(f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}") + + # ---------------------- latents_mean/std (as in original) ---------------------- + latents_mean = latents_std = None + if hasattr(self.vae.config, "latents_mean") and self.vae.config.latents_mean is not None: + latents_mean = torch.tensor(self.vae.config.latents_mean).view(1, 4, 1, 1) + if hasattr(self.vae.config, "latents_std") and self.vae.config.latents_std is not None: + latents_std = torch.tensor(self.vae.config.latents_std).view(1, 4, 1, 1) + + # Offload text encoder if `enable_model_cpu_offload` was enabled + if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: + self.text_encoder_2.to("cpu") + torch.cuda.empty_cache() + + image = image.to(device=device, dtype=dtype) + + eff_bs = batch_size * num_images_per_prompt + + # ---------------------- encode image to latents (original logic) ---------------------- + if image.shape[1] == 4: + init_latents = image + else: + # VAE float32 for stability + if self.vae.config.force_upcast: + image = image.float() + self.vae.to(dtype=torch.float32) + + if isinstance(generator, list) and len(generator) != eff_bs: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {eff_bs}. Make sure the batch size matches the length of the generators." + ) + elif isinstance(generator, list): + if image.shape[0] < eff_bs and eff_bs % image.shape[0] == 0: + image = torch.cat([image] * (eff_bs // image.shape[0]), dim=0) + elif image.shape[0] < eff_bs and eff_bs % image.shape[0] != 0: + raise ValueError( + f"Cannot duplicate `image` of batch size {image.shape[0]} to effective batch_size {eff_bs} " + ) + # use original helper + init_latents = torch.cat( + [retrieve_latents(self.vae.encode(image[i:i+1]), generator=generator[i]) for i in range(eff_bs)], + dim=0, + ) + else: + init_latents = retrieve_latents(self.vae.encode(image), generator=generator) + + if self.vae.config.force_upcast: + self.vae.to(dtype) + + init_latents = init_latents.to(dtype) + + # original normalization branch + if latents_mean is not None and latents_std is not None: + latents_mean = latents_mean.to(device=device, dtype=dtype) + latents_std = latents_std.to(device=device, dtype=dtype) + init_latents = (init_latents - latents_mean) * self.vae.config.scaling_factor / latents_std + else: + init_latents = self.vae.config.scaling_factor * init_latents + + if eff_bs > init_latents.shape[0] and eff_bs % init_latents.shape[0] == 0: + additional_image_per_prompt = eff_bs // init_latents.shape[0] + init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0) + elif eff_bs > init_latents.shape[0] and eff_bs % init_latents.shape[0] != 0: + raise ValueError( + f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {eff_bs} text prompts." + ) + else: + init_latents = torch.cat([init_latents], dim=0) + + + if garment_images is not None: + def _encode_and_norm(img_like): + if isinstance(img_like, PIL.Image.Image): + g = pil_to_tensor(img_like.convert("RGB")).float() / 255.0 + g = _ensure_bchw(g) + elif isinstance(img_like, torch.Tensor): + g = img_like + if g.ndim == 3: + g = g.unsqueeze(0) + elif isinstance(img_like, list): + gs = [] + for it in img_like: + if isinstance(it, PIL.Image.Image): + t = pil_to_tensor(it.convert("RGB")).float() / 255.0 + gs.append(_ensure_bchw(t)) + elif isinstance(it, torch.Tensor): + gs.append(_ensure_bchw(it)) + else: + raise ValueError(f"Unsupported in garment_images list: {type(it)}") + g = torch.cat(gs, dim=0) + else: + raise ValueError(f"garment_images type error: {type(img_like)}") + + if g.shape[1] == 1: + g = g.repeat(1, 3, 1, 1) + + g = g.to(device=device, dtype=dtype) + if self.vae.config.force_upcast: + self.vae.to(dtype=torch.float32) + z = retrieve_latents(self.vae.encode(g), generator=generator).to(torch.float32) + self.vae.to(dtype) + else: + z = retrieve_latents(self.vae.encode(g), generator=generator) + + z = z.to(dtype) + if latents_mean is not None and latents_std is not None: + z = (z - latents_mean.to(device=device, dtype=dtype)) * self.vae.config.scaling_factor / latents_std.to(device=device, dtype=dtype) + else: + z = self.vae.config.scaling_factor * z + return z + + # garment latent and a white reference to form a direction + z_g = _encode_and_norm(garment_images) + + white_rgb = torch.ones_like(image[:, :3]) if image.shape[1] >= 3 else torch.ones(image.shape[0], 3, image.shape[2], image.shape[3], device=device, dtype=dtype) + z_white = _encode_and_norm(white_rgb) + + v = z_g - z_white + B, C, H, W = init_latents.shape + + # soft/hard masks at latent resolution + M_soft = _prep_mask_soft(garment_mask, H, W, C, init_latents.device, init_latents.dtype) + M_out_hard = _prep_mask_hard_out(garment_mask, H, W, C, init_latents.device, init_latents.dtype, thresh=0.5) + + alpha = float(garment_alpha) if garment_alpha is not None else 0.8 # removal strength + beta = 0.38 # inside blend with previous + + # refine garment direction a bit (reduce DC / emphasize edges) + v0 = _highpass_latent(v[0], k=5, w=0.4) + + Z_base = init_latents.clone() + Z = init_latents + z_work = Z.clone() + + # mean inside (for DC-safe centering) + if M_soft is not None: + mean_z0 = (Z * M_soft).sum([1,2,3], keepdim=True) / (M_soft.sum([1,2,3], keepdim=True).clamp_min(eps)) + u_src = v0 * M_soft[0] + w = M_soft[0].sum().clamp_min(eps) + u_src = u_src - (u_src * M_soft[0]).sum() / w * M_soft[0] + else: + mean_z0 = Z.mean([2,3], keepdim=True) + u_src = v0 + + # unit axis + u = u_src.reshape(-1) + u_norm = u.norm() + if u_norm < 1e-6: + # axis degenerate โ†’ skip (still add noise later) + if add_noise: + noise = randn_tensor(init_latents.shape, generator=generator, device=device, dtype=dtype) + init_latents = self.scheduler.add_noise(init_latents, noise, timestep) + return init_latents + + u = u / (u_norm + eps) + + # diagnostics baseline + zc_init = (Z - mean_z0) * (M_soft if M_soft is not None else 1) + coeff_before = (zc_init.reshape(B, -1) @ u) + + # iterative masked projection + iter_n = int(getattr(self, "garment_iter", 1)) + for it in range(max(1, iter_n)): + mean_z = (z_work * M_soft).sum([1,2,3], keepdim=True) / (M_soft.sum([1,2,3], keepdim=True).clamp_min(eps)) if M_soft is not None \ + else z_work.mean([2,3], keepdim=True) + zc = (z_work - mean_z) * (M_soft if M_soft is not None else 1) + zc_flat = zc.reshape(B, -1) + + # slight decay + alpha_i = alpha * (0.90 ** it) + + # boundary boost + if M_soft is not None: + R = (M_soft[0] * (1.0 - M_soft[0])) + if R.max() > 0: + R = R / (R.max() + eps) + alpha_map = alpha_i * (0.5 + 0.5 * M_soft[0] + 0.5 * R) + alpha_map = alpha_map.clamp(max=alpha * 1.2) + else: + alpha_map = torch.full_like(z_work[0], alpha_i) + + coeff = (zc_flat @ u) + proj = coeff.unsqueeze(1) * u.unsqueeze(0) + zproj = (zc_flat - alpha_map.reshape(1, -1) * proj).reshape(B, C, H, W) + + if M_soft is None: + z_new = (1 - beta) * zproj + beta * z_work + else: + z_new = z_work * (1 - M_soft) + ((1 - beta) * zproj + beta * z_work) * M_soft + + # inside-only mean/std restore + if M_soft is not None: + wsum = M_soft.sum([1,2,3], keepdim=True).clamp_min(eps) + + def _mstats(X): + mean_in = (X * M_soft).sum([1,2,3], keepdim=True) / wsum + Xm = (X - mean_in) * M_soft + std_in = (Xm.pow(2).sum([1,2,3], keepdim=True) / wsum).sqrt() + return mean_in, std_in + + mean_in0, std_in0 = _mstats(z_work) + mean_in1, std_in1 = _mstats(z_new) + + z_new = z_new + (mean_in0 - mean_in1) * M_soft + gain = ((std_in0 + eps) / (std_in1 + eps)).clamp(0.97, 1.03) + z_new = z_new * (1 - M_soft) + ((z_new - mean_in0) * gain + mean_in0) * M_soft + + z_work = z_new + + # outside mean lock + if M_soft is not None: + wsum_out_soft = (1 - M_soft).sum([1,2,3], keepdim=True).clamp_min(eps) + mean_out_base = (Z_base * (1 - M_soft)).sum([1,2,3], keepdim=True) / wsum_out_soft + mean_out_fin = (z_work * (1 - M_soft)).sum([1,2,3], keepdim=True) / wsum_out_soft + delta_out = (mean_out_base - mean_out_fin) + + M_out_hard2 = _prep_mask_hard_out(garment_mask, H, W, C, init_latents.device, init_latents.dtype, thresh=0.5) + if M_out_hard2 is not None: + z_work = z_work + delta_out * M_out_hard2 + else: + z_work = z_work + delta_out * (1 - M_soft) + + # outside std very tight + W_out = _prep_mask_hard_out(garment_mask, H, W, C, init_latents.device, init_latents.dtype, thresh=0.55) + if W_out is not None: + wsum = W_out.sum([1,2,3], keepdim=True).clamp_min(eps) + mu0 = (Z_base * W_out).sum([1,2,3], keepdim=True) / wsum + mu1 = (z_work * W_out).sum([1,2,3], keepdim=True) / wsum + + def _std(X, mu): + Xm = (X - mu) * W_out + return (Xm.pow(2).sum([1,2,3], keepdim=True) / wsum).sqrt() + + s0 = _std(Z_base, mu0) + s1 = _std(z_work, mu1) + g_out = ((s0 + eps) / (s1 + eps)).clamp(0.995, 1.005) + z_work = z_work * (1 - W_out) + ((z_work - mu1) * g_out + mu1) * W_out + + init_latents = z_work + + + if True: + with torch.no_grad(): + ZAc = (init_latents - mean_z0) * (M_soft if M_soft is not None else 1) + coeff_after = (ZAc.reshape(B,-1) @ u) + coeff_ratio = (coeff_after.abs().mean() / (coeff_before.abs().mean() + eps)).item() + energy_ratio = (coeff_after.pow(2).mean() / (coeff_before.pow(2).mean() + eps)).item() + print(f" !!!![Garment|Projection] coeff โ†“ ratio = {coeff_ratio:.4f}, energy โ†“ ratio = {energy_ratio:.4f}") + + # ---------------------- add noise (original) ---------------------- + if add_noise: + shape = init_latents.shape + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + # debug + # print("timestep:", timestep) + init_latents = self.scheduler.add_noise(init_latents, noise, timestep) + + return init_latents + + + + + + def prepare_latents_origin(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + + shape = ( + batch_size, + num_channels_latents, + int(height) // self.vae_scale_factor, + int(width) // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + return latents + + + + + + + # def prepare_latents_( + # self, + # image, + # timestep, + # batch_size, + # num_images_per_prompt, + # dtype, + # device, + # generator=None, + # ): + # if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): + # raise ValueError( + # f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" + # ) + + # image = image.to(device=device, dtype=dtype) + + # batch_size = batch_size * num_images_per_prompt + + # if image.shape[1] == 4: + # init_latents = image + + # else: + # if isinstance(generator, list) and len(generator) != batch_size: + + + # raise ValueError( + # f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + # f" size of {batch_size}. Make sure the batch size matches the length of the generators." + # ) + + # elif isinstance(generator, list): + + # init_latents = [ + # self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) + # for i in range(batch_size) + # ] + # print("***") + # print("vae_1") + # init_latents = torch.cat(init_latents, dim=0) + # else: + # print("***") + # print("vae_2") + # init_latents = self.vae.encode(image).latent_dist.sample(generator) + + # init_latents = self.vae.config.scaling_factor * init_latents + + # if ( + # batch_size > init_latents.shape[0] + # and batch_size % init_latents.shape[0] == 0 + # ): + # # expand init_latents for batch_size + # deprecation_message = ( + # f"You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial" + # " images (`image`). Initial images are now duplicating to match the number of text prompts. Note" + # " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update" + # " your script to pass as many initial images as text prompts to suppress this warning." + # ) + # deprecate( + # "len(prompt) != len(image)", + # "1.0.0", + # deprecation_message, + # standard_warn=False, + # ) + # additional_image_per_prompt = batch_size // init_latents.shape[0] + # init_latents = torch.cat( + # [init_latents] * additional_image_per_prompt, dim=0 + # ) + # elif ( + # batch_size > init_latents.shape[0] + # and batch_size % init_latents.shape[0] != 0 + # ): + # raise ValueError( + # f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts." + # ) + # else: + # init_latents = torch.cat([init_latents], dim=0) + + # shape = init_latents.shape + # noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + + # # get latents + + # return init_latents, noise + +# ์ตœ์ข… + def prepare_latents_( + self, + image, + timestep, + batch_size, + num_images_per_prompt, + dtype, + device, + generator=None, + garment_images=None, + garment_rank=None, + garment_alpha=None, + garment_mask=None, + ): + import torch + import torch.nn.functional as F + from diffusers.utils.torch_utils import randn_tensor + from torchvision.transforms.functional import pil_to_tensor + import PIL + + eps = 1e-12 + + # ---------------------- util ---------------------- + def _ensure_bchw(x): + if isinstance(x, torch.Tensor): + if x.ndim == 3: # [C,H,W] + return x.unsqueeze(0) + return x + raise ValueError("Tensor expected") + + # Gaussian blur for feather (soft mask) + def _gaussian_blur(mask, ksize=5, sigma=2.0): + pad = ksize // 2 + ax = torch.arange(-pad, pad + 1, device=mask.device, dtype=mask.dtype) + k1 = torch.exp(-(ax ** 2) / (2 * sigma ** 2)) + k1 = k1 / k1.sum() + k2d = (k1[:, None] @ k1[None, :]) + k2d = k2d / k2d.sum() + k = k2d.unsqueeze(0).unsqueeze(0) + chans = mask.shape[1] + if chans != 1: + k = k.repeat(chans, 1, 1, 1) + groups = chans + else: + groups = 1 + return F.conv2d(F.pad(mask, (pad, pad, pad, pad), mode="reflect"), k, groups=groups) + + # high-pass for garment vector + def _highpass_latent(t, k=5, w=0.4): + pad = k // 2 + blur = F.avg_pool2d(F.pad(t.unsqueeze(0), (pad, pad, pad, pad), mode="reflect"), + kernel_size=k, stride=1).squeeze(0) + return t * (1 - w) + (t - blur) * w + + # prepare soft mask (feathered, used for blending/projection weights) + def _prep_mask_soft(mask_src, H, W, C, dev, dt, ksize=5, sigma=2.0, gamma=1.05): + if mask_src is None: + return None + if isinstance(mask_src, PIL.Image.Image): + m = pil_to_tensor(mask_src.convert("L")).float() / 255.0 + m = m.unsqueeze(0) + elif isinstance(mask_src, torch.Tensor): + m = mask_src + if m.ndim == 2: + m = m.unsqueeze(0).unsqueeze(0) + elif m.ndim == 3: + m = m.unsqueeze(0) + if m.max() > 1.0: + m = m / 255.0 + else: + raise ValueError("garment_mask must be PIL/Tensor") + m = F.interpolate(m, size=(H, W), mode="bilinear", align_corners=False) + m = _gaussian_blur(m, ksize=ksize, sigma=sigma) + m = m.clamp(0, 1).pow(gamma) + if m.shape[1] == 1: + m = m.repeat(1, C, 1, 1) + return m.to(device=dev, dtype=dt) + + # prepare hard OUTSIDE mask (strict outside = 1, inside = 0) from the ORIGINAL mask (no feather) + def _prep_mask_hard_out(mask_src, H, W, C, dev, dt, thresh=0.5): + if mask_src is None: + return None + if isinstance(mask_src, PIL.Image.Image): + m0 = pil_to_tensor(mask_src.convert("L")).float() / 255.0 # [1,h,w] + m0 = m0.unsqueeze(0) # [1,1,h,w] + elif isinstance(mask_src, torch.Tensor): + m0 = mask_src + if m0.ndim == 2: + m0 = m0.unsqueeze(0).unsqueeze(0) + elif m0.ndim == 3: + m0 = m0.unsqueeze(0) + if m0.max() > 1.0: + m0 = m0 / 255.0 + else: + raise ValueError("garment_mask must be PIL/Tensor") + m0 = F.interpolate(m0, size=(H, W), mode="nearest") + m_in_hard = (m0 > thresh).float() # inside=1 / outside=0 + m_out_hard = 1.0 - m_in_hard # outside=1 / inside=0 + if C != 1: + m_out_hard = m_out_hard.repeat(1, C, 1, 1) + return m_out_hard.to(device=dev, dtype=dt) + + # ---------------------- image preprocessing ---------------------- + if isinstance(image, list): + imgs = [] + for im in image: + if isinstance(im, PIL.Image.Image): + t = pil_to_tensor(im).float() / 255.0 + imgs.append(_ensure_bchw(t)) + else: + imgs.append(_ensure_bchw(im)) + image = torch.cat(imgs, dim=0) + elif isinstance(image, PIL.Image.Image): + image = pil_to_tensor(image).float() / 255.0 + image = _ensure_bchw(image) + image = image.to(device=device, dtype=dtype) + + eff_bs = batch_size * num_images_per_prompt + + # ---------------------- VAE encode ---------------------- + if image.shape[1] == 4: + init_latents = image + else: + if isinstance(generator, list) and len(generator) != eff_bs: + raise ValueError("generator length mismatch") + elif isinstance(generator, list): + init_latents = torch.cat( + [self.vae.encode(image[i:i+1]).latent_dist.sample(generator[i]) for i in range(eff_bs)], dim=0) + else: + init_latents = self.vae.encode(image).latent_dist.sample(generator) + init_latents = self.vae.config.scaling_factor * init_latents + + # ---------------------- batch align ---------------------- + if (eff_bs > init_latents.shape[0]) and (eff_bs % init_latents.shape[0] == 0): + additional = eff_bs // init_latents.shape[0] + init_latents = torch.cat([init_latents] * additional, dim=0) + else: + init_latents = torch.cat([init_latents], dim=0) + + # ====================================================== + # Garment subspace removal + # ====================================================== + if garment_images is not None: + # garment_images -> [Ng,3,H,W] + if isinstance(garment_images, PIL.Image.Image): + g_img = pil_to_tensor(garment_images).float() / 255.0 + if g_img.shape[0] == 1: + g_img = g_img.repeat(3,1,1) + g_img = g_img.unsqueeze(0) + elif isinstance(garment_images, torch.Tensor): + g_img = garment_images + if g_img.ndim == 3: + if g_img.shape[0] == 1: + g_img = g_img.repeat(3,1,1) + g_img = g_img.unsqueeze(0) + elif g_img.ndim == 4 and g_img.shape[1] == 1: + g_img = g_img.repeat(1,3,1,1) + else: + raise ValueError("garment_images type error") + g_img = g_img.to(device=device, dtype=dtype) + + with torch.no_grad(): + z_g = self.vae.encode(g_img).latent_dist.sample(generator) + z_g = self.vae.config.scaling_factor * z_g + + B, C, H, W = init_latents.shape + # soft/hard-out masks + M_soft = _prep_mask_soft(garment_mask, H, W, C, init_latents.device, init_latents.dtype) + M_out_hard = _prep_mask_hard_out(garment_mask, H, W, C, init_latents.device, init_latents.dtype, thresh=0.5) + + alpha = float(garment_alpha) if garment_alpha is not None else 0.8 + beta = 0 + + # --- Garment vector refinement --- + white = torch.ones_like(g_img) + z_white = self.vae.encode(white).latent_dist.sample(generator) + z_white = self.vae.config.scaling_factor * z_white + v = z_g - z_white + v0 = _highpass_latent(v[0], k=5, w=0.4) + + # snapshot for outside mean lock + Z_base = init_latents.clone() + + Z = init_latents + z_work = Z.clone() + + # --- DC-safe u (remove mean component in mask) + if M_soft is not None: + mean_z0 = (Z * M_soft).sum([1,2,3], keepdim=True) / (M_soft.sum([1,2,3], keepdim=True).clamp_min(eps)) + u_src = v0 * M_soft[0] + w = M_soft[0].sum().clamp_min(eps) + u_src = u_src - (u_src * M_soft[0]).sum() / w * M_soft[0] + else: + mean_z0 = Z.mean([2,3], keepdim=True) + u_src = v0 + u = u_src.reshape(-1) + u_norm = u.norm() + if u_norm < 1e-6: + print("[Guard] projection axis ~0; skip") + noise = randn_tensor(init_latents.shape, generator=generator, device=device, dtype=dtype) + return init_latents, noise + u = u / (u_norm + eps) + + # baseline for verification + zc_init = (Z - mean_z0) * (M_soft if M_soft is not None else 1) + coeff_before = (zc_init.reshape(B, -1) @ u) + + # --- iterative projection (outside๋Š” ์†๋Œ€์ง€ ์•Š๋˜, feather ์˜ํ–ฅ ์ตœ์†Œํ™”) --- + iter_n = 1 + for it in range(max(1, iter_n)): + mean_z = (z_work * M_soft).sum([1,2,3], keepdim=True) / (M_soft.sum([1,2,3], keepdim=True).clamp_min(eps)) if M_soft is not None \ + else z_work.mean([2,3], keepdim=True) + zc = (z_work - mean_z) * (M_soft if M_soft is not None else 1) + zc_flat = zc.reshape(B, -1) + + # per-iter alpha with mild decay + alpha_i = alpha * (0.90 ** it) + # alpha_map = ((0.5 + 0.5 * M_soft[0]) * alpha_i) if M_soft is not None else torch.full_like(z_work[0], alpha_i) + R = (M_soft[0] * (1.0 - M_soft[0])) + if R.max() > 0: + R = R / (R.max() + eps) + alpha_map = alpha * (0.5 + 0.5 * M_soft[0] + 0.5 * R) # ๊ฒฝ๊ณ„์—์„œ +0.5*alpha ์ถ”๊ฐ€ + alpha_map = alpha_map.clamp(max=alpha * 1.2) # ๊ณผ๋„ ์ƒ์Šน ๋ฐฉ์ง€ + + coeff = (zc_flat @ u) + proj = coeff.unsqueeze(1) * u.unsqueeze(0) + zproj = (zc_flat - alpha_map.reshape(1, -1) * proj).reshape(B, C, H, W) + + # blend only inside (outside path keeps original) + if M_soft is None: + z_new = (1 - beta) * zproj + beta * z_work + else: + z_new = z_work * (1 - M_soft) + ((1 - beta) * zproj + beta * z_work) * M_soft + + # inside-only mean/std restore + if M_soft is not None: + wsum = M_soft.sum([1,2,3], keepdim=True).clamp_min(eps) + def mstats(X): + mean_in = (X * M_soft).sum([1,2,3], keepdim=True) / wsum + Xm = (X - mean_in) * M_soft + std_in = (Xm.pow(2).sum([1,2,3], keepdim=True) / wsum).sqrt() + return mean_in, std_in + mean_in0, std_in0 = mstats(z_work) + mean_in1, std_in1 = mstats(z_new) + + # mean restore (inside drift = 0 relative to previous iter) + z_new = z_new + (mean_in0 - mean_in1) * M_soft + + # std restore (tight clamp) + gain = ((std_in0 + eps) / (std_in1 + eps)).clamp(0.97, 1.03) + z_new = z_new * (1 - M_soft) + ((z_new - mean_in0) * gain + mean_in0) * M_soft + + z_work = z_new + + # ---------------------- Outside mean lock (keep outside brightness) ---------------------- + if M_soft is not None: + wsum_out_soft = (1 - M_soft).sum([1,2,3], keepdim=True).clamp_min(eps) + mean_out_base = (Z_base * (1 - M_soft)).sum([1,2,3], keepdim=True) / wsum_out_soft + mean_out_fin = (z_work * (1 - M_soft)).sum([1,2,3], keepdim=True) / wsum_out_soft + delta_out = (mean_out_base - mean_out_fin) # 100% correction + + # apply purely on strict outside to avoid boundary tint + M_out_hard = _prep_mask_hard_out(garment_mask, H, W, C, init_latents.device, init_latents.dtype, thresh=0.5) + if M_out_hard is not None: + z_work = z_work + delta_out * M_out_hard + else: + z_work = z_work + delta_out * (1 - M_soft) + + + # ---- (์„ ํƒ) Outside std lock: ์•„์ฃผ ํƒ€์ดํŠธํ•˜๊ฒŒ ---- + W = _prep_mask_hard_out(garment_mask, H, W, C, init_latents.device, init_latents.dtype, thresh=0.55) + if W is not None: + wsum = W.sum([1,2,3], keepdim=True).clamp_min(eps) + mu0 = (Z_base * W).sum([1,2,3], keepdim=True) / wsum + mu1 = (z_work * W).sum([1,2,3], keepdim=True) / wsum + + def _std(X, mu): + Xm = (X - mu) * W + return (Xm.pow(2).sum([1,2,3], keepdim=True) / wsum).sqrt() + + s0 = _std(Z_base, mu0) # ๊ธฐ์ค€ std + s1 = _std(z_work, mu1) # ํ˜„์žฌ std + g_out = ((s0 + eps) / (s1 + eps)).clamp(0.995, 1.005) # ์•„์ฃผ ๋ฏธ์„ธํ•˜๊ฒŒ๋งŒ ๋ณด์ • + + z_work = z_work * (1 - W) + ((z_work - mu1) * g_out + mu1) * W + + + init_latents = z_work + + + # ====================== EXTRA DIAGNOSTICS: is it projection or blur? ====================== + with torch.no_grad(): + # (A) ฮ”z-alignment in latent (inside mask) + W_in = M_soft if M_soft is not None else torch.ones_like(init_latents) + dZ = ((init_latents - Z) * W_in).reshape(B, -1) + num = (dZ @ u).pow(2).mean() + den = (dZ.pow(2).sum(dim=1).mean() + eps) + align_idx = (num / den).item() + + + def _rand_perp_energy(trials=4): + vals = [] + for _ in range(trials): + r = torch.randn_like(u) + r = r - (r @ u) * u + r = r / (r.norm() + eps) + vals.append((dZ @ r).pow(2).mean()) + return torch.stack(vals).mean() + perp_energy = _rand_perp_energy(trials=6).item() + u_energy = num.item() + + perp_u_ratio = (perp_energy + eps) / (u_energy + eps) + + # (C) Pixel-domain high-frequency check via Laplacian + sf = float(self.vae.config.scaling_factor) + x0 = self.vae.decode(Z / sf).sample # [B,3,Hx,Wx] + x1 = self.vae.decode(init_latents / sf).sample + + def _lap_energy(x, M_like): + + k = torch.tensor([[0.,1.,0.], + [1.,-4.,1.], + [0.,1.,0.]], device=x.device, dtype=x.dtype).view(1,1,3,3) + Cx = x.shape[1] + K = k.repeat(Cx,1,1,1) + y = F.conv2d(x, K, padding=1, groups=Cx).abs() # |โˆ‡ยฒx| + + if M_like is None: + return y.mean() + + M_pix = M_like + if M_pix.shape[2:] != x.shape[2:]: + M_pix = F.interpolate(M_pix, size=x.shape[2:], mode="nearest") + if M_pix.shape[1] != x.shape[1]: + M_pix = M_pix[:, :1].repeat(1, x.shape[1], 1, 1) + + w = M_pix.sum([1,2,3], keepdim=True).clamp_min(eps) + return ((y * M_pix).sum([1,2,3], keepdim=True) / w).mean() + + hf_in0 = _lap_energy(x0, M_soft).item() + hf_in1 = _lap_energy(x1, M_soft).item() + hf_out0 = _lap_energy(x0, 1 - M_soft if M_soft is not None else None).item() + hf_out1 = _lap_energy(x1, 1 - M_soft if M_soft is not None else None).item() + + hf_in_ratio = (hf_in1 / (hf_in0 + eps)) + hf_out_ratio = (hf_out1 / (hf_out0 + eps)) + + print(f"[Diag|Align] A=ฮ”zยทu energy fraction (inside) = {align_idx:.3f} (โ†‘๋ฉด ํˆฌ์˜)") + print(f"[Diag|Perp] perp/u energy ratio (inside) = {perp_u_ratio:.3f} (โ†“๋ฉด ํˆฌ์˜)") + print(f"[Diag|HF] inside HF ratio={hf_in_ratio:.3f}, outside HF ratio={hf_out_ratio:.3f} (๋ธ”๋Ÿฌ๋ฉด HFโ‰ช1)") + + + + # ---------------------- verification ---------------------- + with torch.no_grad(): + ZAc = (init_latents - mean_z0) * (M_soft if M_soft is not None else 1) + coeff_after = (ZAc.reshape(B,-1) @ u) + coeff_ratio = (coeff_after.abs().mean() / (coeff_before.abs().mean() + eps)).item() + energy_ratio = (coeff_after.pow(2).mean() / (coeff_before.pow(2).mean() + eps)).item() + + if M_soft is not None: + def _mstats(z): + w_in = M_soft.sum([1,2,3], keepdim=True).clamp_min(eps) + w_out = (1 - M_soft).sum([1,2,3], keepdim=True).clamp_min(eps) + mean_in = (z*M_soft).sum([1,2,3], keepdim=True) / w_in + mean_out = (z*(1-M_soft)).sum([1,2,3], keepdim=True) / w_out + std_in = (((z-mean_in)*M_soft)**2).sum([1,2,3], keepdim=True).div(w_in).sqrt() + std_out = (((z-mean_out)*(1-M_soft))**2).sum([1,2,3], keepdim=True).div(w_out).sqrt() + return mean_in.mean(), std_in.mean(), mean_out.mean(), std_out.mean() + + mean_in0, std_in0, mean_out0, std_out0 = _mstats(Z) + mean_in1, std_in1, mean_out1, std_out1 = _mstats(init_latents) + + print(f"[Garment|Projection] coeff โ†“ ratio = {coeff_ratio:.4f}, energy โ†“ ratio = {energy_ratio:.4f}") + print(f"[Garment|Leak] inside ฮ”mean={float(mean_in1-mean_in0):+.5f}, ฮ”std={float(std_in1-std_in0):+.5f} " + f"| outside ฮ”mean={float(mean_out1-mean_out0):+.5f}, ฮ”std={float(std_out1-std_out0):+.5f}") + else: + print(f"[Garment|Projection] coeff โ†“ ratio = {coeff_ratio:.4f}, energy โ†“ ratio = {energy_ratio:.4f}") + + # ---------------------- noise ---------------------- + noise = randn_tensor(init_latents.shape, generator=generator, device=device, dtype=dtype) + return init_latents, noise + + + + + + + + + + + + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_img2img.StableDiffusionXLImg2ImgPipeline._get_add_time_ids + def _get_add_time_ids( + self, + original_size, + crops_coords_top_left, + target_size, + aesthetic_score, + negative_aesthetic_score, + negative_original_size, + negative_crops_coords_top_left, + negative_target_size, + dtype, + text_encoder_projection_dim=None, + ): + if self.config.requires_aesthetics_score: + add_time_ids = list(original_size + crops_coords_top_left + (aesthetic_score,)) + add_neg_time_ids = list( + negative_original_size + negative_crops_coords_top_left + (negative_aesthetic_score,) + ) + else: + add_time_ids = list(original_size + crops_coords_top_left + target_size) + add_neg_time_ids = list(negative_original_size + crops_coords_top_left + negative_target_size) + + passed_add_embed_dim = ( + self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim + ) + expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features + + if ( + expected_add_embed_dim > passed_add_embed_dim + and (expected_add_embed_dim - passed_add_embed_dim) == self.unet.config.addition_time_embed_dim + ): + raise ValueError( + f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to enable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=True)` to make sure `aesthetic_score` {aesthetic_score} and `negative_aesthetic_score` {negative_aesthetic_score} is correctly used by the model." + ) + elif ( + expected_add_embed_dim < passed_add_embed_dim + and (passed_add_embed_dim - expected_add_embed_dim) == self.unet.config.addition_time_embed_dim + ): + raise ValueError( + f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to disable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=False)` to make sure `target_size` {target_size} is correctly used by the model." + ) + elif expected_add_embed_dim != passed_add_embed_dim: + raise ValueError( + f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`." + ) + + add_time_ids = torch.tensor([add_time_ids], dtype=dtype) + add_neg_time_ids = torch.tensor([add_neg_time_ids], dtype=dtype) + + return add_time_ids, add_neg_time_ids + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae + def upcast_vae(self): + dtype = self.vae.dtype + self.vae.to(dtype=torch.float32) + use_torch_2_0_or_xformers = isinstance( + self.vae.decoder.mid_block.attentions[0].processor, + ( + AttnProcessor2_0, + XFormersAttnProcessor, + ), + ) + # if xformers or torch_2_0 is used attention block does not need + # to be in float32 which can save lots of memory + if use_torch_2_0_or_xformers: + self.vae.post_quant_conv.to(dtype) + self.vae.decoder.conv_in.to(dtype) + self.vae.decoder.mid_block.to(dtype) + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def clip_skip(self): + return self._clip_skip + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 + + @property + def cross_attention_kwargs(self): + return self._cross_attention_kwargs + + @property + def num_timesteps(self): + return self._num_timesteps + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + prompt_2: Optional[Union[str, List[str]]] = None, + image: PipelineImageInput = None, + mask_image: Union[torch.FloatTensor, PIL.Image.Image] = None, + sketch_image: Union[torch.FloatTensor, PIL.Image.Image] = None, + garment_images: Union[torch.FloatTensor, List[torch.FloatTensor], PIL.Image.Image, List[PIL.Image.Image]] = None, + garment_mask: PIL.Image.Image = None, + control_image: PipelineImageInput = None, + height: Optional[int] = None, + width: Optional[int] = None, + strength: float = 0.8, + num_inference_steps: int = 50, + guidance_scale: float = 5.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + negative_prompt_2: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + pooled_prompt_embeds: Optional[torch.Tensor] = None, + negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + controlnet_conditioning_scale: Union[float, List[float]] = 0.8, + guess_mode: bool = False, + control_guidance_start: Union[float, List[float]] = 0.0, + control_guidance_end: Union[float, List[float]] = 1.0, + original_size: Tuple[int, int] = None, + crops_coords_top_left: Tuple[int, int] = (0, 0), + target_size: Tuple[int, int] = None, + negative_original_size: Optional[Tuple[int, int]] = None, + negative_crops_coords_top_left: Tuple[int, int] = (0, 0), + negative_target_size: Optional[Tuple[int, int]] = None, + aesthetic_score: float = 6.0, + negative_aesthetic_score: float = 2.5, + clip_skip: Optional[int] = None, + callback_on_step_end: Optional[ + Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks] + ] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + **kwargs, + ): + r""" + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in both text-encoders + image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,: + `List[List[torch.Tensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`): + The initial image will be used as the starting point for the image generation process. Can also accept + image latents as `image`, if passing latents directly, it will not be encoded again. + control_image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,: + `List[List[torch.Tensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`): + The ControlNet input condition. ControlNet uses this input condition to generate guidance to Unet. If + the type is specified as `torch.Tensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also + be accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If height + and/or width are passed, `image` is resized according to them. If multiple ControlNets are specified in + init, images must be passed as a list such that each element of the list can be correctly batched for + input to a single controlnet. + height (`int`, *optional*, defaults to the size of control_image): + The height in pixels of the generated image. Anything below 512 pixels won't work well for + [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) + and checkpoints that are not specifically fine-tuned on low resolutions. + width (`int`, *optional*, defaults to the size of control_image): + The width in pixels of the generated image. Anything below 512 pixels won't work well for + [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) + and checkpoints that are not specifically fine-tuned on low resolutions. + strength (`float`, *optional*, defaults to 0.8): + Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a + starting point and more noise is added the higher the `strength`. The number of denoising steps depends + on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising + process runs for the full number of iterations specified in `num_inference_steps`. A value of 1 + essentially ignores `image`. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to + [`schedulers.DDIMScheduler`], will be ignored for others. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. + ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*): + Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of + IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should + contain the negative image embedding if `do_classifier_free_guidance` is set to `True`. If not + provided, embeddings are computed from the `ip_adapter_image` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0): + The outputs of the controlnet are multiplied by `controlnet_conditioning_scale` before they are added + to the residual in the original unet. If multiple ControlNets are specified in init, you can set the + corresponding scale as a list. + guess_mode (`bool`, *optional*, defaults to `False`): + In this mode, the ControlNet encoder will try best to recognize the content of the input image even if + you remove all prompts. The `guidance_scale` between 3.0 and 5.0 is recommended. + control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0): + The percentage of total steps at which the controlnet starts applying. + control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0): + The percentage of total steps at which the controlnet stops applying. + original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled. + `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as + explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position + `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting + `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + For most cases, `target_size` should be set to the desired height and width of the generated image. If + not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in + section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + To negatively condition the generation process based on a specific image resolution. Part of SDXL's + micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's + micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + To negatively condition the generation process based on a target image resolution. It should be as same + as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + aesthetic_score (`float`, *optional*, defaults to 6.0): + Used to simulate an aesthetic score of the generated image by influencing the positive text condition. + Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + negative_aesthetic_score (`float`, *optional*, defaults to 2.5): + Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). Can be used to + simulate an aesthetic score of the generated image by influencing the negative text condition. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*): + A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of + each denoising step during the inference. with the following arguments: `callback_on_step_end(self: + DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a + list of all tensors as specified by `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + + Examples: + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple` + containing the output images. + """ + + callback = kwargs.pop("callback", None) + callback_steps = kwargs.pop("callback_steps", None) + + print("-------") + print("&&& image: ", type(image)) + + + if callback is not None: + deprecate( + "callback", + "1.0.0", + "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", + ) + if callback_steps is not None: + deprecate( + "callback_steps", + "1.0.0", + "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", + ) + + if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): + callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs + + controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet + + # align format for control guidance + if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list): + control_guidance_start = len(control_guidance_end) * [control_guidance_start] + elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list): + control_guidance_end = len(control_guidance_start) * [control_guidance_end] + elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list): + mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1 + control_guidance_start, control_guidance_end = ( + mult * [control_guidance_start], + mult * [control_guidance_end], + ) + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + prompt_2, + control_image, + strength, + num_inference_steps, + callback_steps, + negative_prompt, + negative_prompt_2, + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + ip_adapter_image, + ip_adapter_image_embeds, + controlnet_conditioning_scale, + control_guidance_start, + control_guidance_end, + callback_on_step_end_tensor_inputs, + ) + + self._guidance_scale = guidance_scale + self._clip_skip = clip_skip + self._cross_attention_kwargs = cross_attention_kwargs + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float): + controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets) + + global_pool_conditions = ( + controlnet.config.global_pool_conditions + if isinstance(controlnet, ControlNetModel) + else controlnet.nets[0].config.global_pool_conditions + ) + guess_mode = guess_mode or global_pool_conditions + + + + # 3.1. Encode input prompt + text_encoder_lora_scale = ( + self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None + ) + ( + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + ) = self.encode_prompt( + prompt, + prompt_2, + device, + num_images_per_prompt, + self.do_classifier_free_guidance, + negative_prompt, + negative_prompt_2, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, + lora_scale=text_encoder_lora_scale, + clip_skip=self.clip_skip, + ) + + # 3.2 Encode ip_adapter_image + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + image_embeds = self.prepare_ip_adapter_image_embeds( + ip_adapter_image, + ip_adapter_image_embeds, + device, + batch_size * num_images_per_prompt, + self.do_classifier_free_guidance, + ) + + # 4. Prepare image and controlnet_conditioning_image + image = self.image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32) + + if isinstance(controlnet, ControlNetModel): + control_image = self.prepare_control_image( + image=control_image, + width=width, + height=height, + batch_size=batch_size * num_images_per_prompt, + num_images_per_prompt=num_images_per_prompt, + device=device, + dtype=controlnet.dtype, + do_classifier_free_guidance=self.do_classifier_free_guidance, + guess_mode=guess_mode, + ) + height, width = control_image.shape[-2:] + elif isinstance(controlnet, MultiControlNetModel): + control_images = [] + + for control_image_ in control_image: + control_image_ = self.prepare_control_image( + image=control_image_, + width=width, + height=height, + batch_size=batch_size * num_images_per_prompt, + num_images_per_prompt=num_images_per_prompt, + device=device, + dtype=controlnet.dtype, + do_classifier_free_guidance=self.do_classifier_free_guidance, + guess_mode=guess_mode, + ) + + control_images.append(control_image_) + + control_image = control_images + height, width = control_image[0].shape[-2:] + else: + assert False + + # 5. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device) + latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) + self._num_timesteps = len(timesteps) + + + + # 6. Prepare latent variables + + mask = prepare_mask(mask=mask_image) + + sketch_image = self.image_processor.preprocess(sketch_image) + + if latents is None: + latents_with_noise = self.prepare_latents( + sketch_image, + latent_timestep, + batch_size, + num_images_per_prompt, + prompt_embeds.dtype, + device, + generator, + True, + garment_images = garment_images, + garment_mask = garment_mask, + ) + + + + init_latents, noise= self.prepare_latents_( + image = sketch_image, + timestep = latent_timestep, + batch_size = batch_size, + num_images_per_prompt = num_images_per_prompt, + dtype = prompt_embeds.dtype, + device = device, + generator = generator, + garment_images = garment_images, + garment_mask = garment_mask, + + ) + + # 6.1. Prepare mask + + height, width = mask.shape[-2:] + mask = torch.nn.functional.interpolate( + mask, size=( + 128, + 128 + ) + ) + mask = torch.nn.functional.interpolate( + mask, size=(height // self.vae_scale_factor, width // self.vae_scale_factor) + ) + + mask = mask.to(device) + mask = mask.to(prompt_embeds.dtype) + + + + # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7.1 Create tensor stating which controlnets to keep + controlnet_keep = [] + for i in range(len(timesteps)): + keeps = [ + 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e) + for s, e in zip(control_guidance_start, control_guidance_end) + ] + controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps) + + # 7.2 Prepare added time ids & embeddings + if isinstance(control_image, list): + original_size = original_size or control_image[0].shape[-2:] + else: + original_size = original_size or control_image.shape[-2:] + target_size = target_size or (height, width) + + if negative_original_size is None: + negative_original_size = original_size + if negative_target_size is None: + negative_target_size = target_size + add_text_embeds = pooled_prompt_embeds + + if self.text_encoder_2 is None: + text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) + else: + text_encoder_projection_dim = self.text_encoder_2.config.projection_dim + + add_time_ids, add_neg_time_ids = self._get_add_time_ids( + original_size, + crops_coords_top_left, + target_size, + aesthetic_score, + negative_aesthetic_score, + negative_original_size, + negative_crops_coords_top_left, + negative_target_size, + dtype=prompt_embeds.dtype, + text_encoder_projection_dim=text_encoder_projection_dim, + ) + add_time_ids = add_time_ids.repeat(batch_size * num_images_per_prompt, 1) + + if self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) + add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) + add_neg_time_ids = add_neg_time_ids.repeat(batch_size * num_images_per_prompt, 1) + add_time_ids = torch.cat([add_neg_time_ids, add_time_ids], dim=0) + + prompt_embeds = prompt_embeds.to(device) + add_text_embeds = add_text_embeds.to(device) + add_time_ids = add_time_ids.to(device) + + # 8. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): +# print("#####") +# print("timesteps: ", t) +# print("#####") + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents_with_noise] * 2) if self.do_classifier_free_guidance else latents_with_noise + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} + + # controlnet(s) inference + if guess_mode and self.do_classifier_free_guidance: + # Infer ControlNet only for the conditional batch. + control_model_input = latents + control_model_input = self.scheduler.scale_model_input(control_model_input, t) + controlnet_prompt_embeds = prompt_embeds.chunk(2)[1] + controlnet_added_cond_kwargs = { + "text_embeds": add_text_embeds.chunk(2)[1], + "time_ids": add_time_ids.chunk(2)[1], + } + else: + control_model_input = latent_model_input + controlnet_prompt_embeds = prompt_embeds + controlnet_added_cond_kwargs = added_cond_kwargs + + if isinstance(controlnet_keep[i], list): + cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])] + else: + controlnet_cond_scale = controlnet_conditioning_scale + if isinstance(controlnet_cond_scale, list): + controlnet_cond_scale = controlnet_cond_scale[0] + cond_scale = controlnet_cond_scale * controlnet_keep[i] + down_block_res_samples, mid_block_res_sample = self.controlnet( + control_model_input, + t, + encoder_hidden_states=controlnet_prompt_embeds, + controlnet_cond=control_image, + conditioning_scale=cond_scale, + guess_mode=guess_mode, + added_cond_kwargs=controlnet_added_cond_kwargs, + return_dict=False, + ) + + if guess_mode and self.do_classifier_free_guidance: + # Inferred ControlNet only for the conditional batch. + # To apply the output of ControlNet to both the unconditional and conditional batches, + # add 0 to the unconditional batch to keep it unchanged. + down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples] + mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample]) + + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + added_cond_kwargs["image_embeds"] = image_embeds + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs=self.cross_attention_kwargs, + down_block_additional_residuals=down_block_res_samples, + mid_block_additional_residual=mid_block_res_sample, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents_with_noise = self.scheduler.step(noise_pred, t, latents_with_noise, **extra_step_kwargs, return_dict=False)[0] + + # masking process + tmp = t.unsqueeze(0) + init_latents_proper = self.scheduler.add_noise( + init_latents, noise, tmp + ).to(device) + + mask = (mask > 0.5).to(prompt_embeds.dtype) + latents_with_noise = ( + mask * latents_with_noise + (1 - mask) * init_latents_proper + ) + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + add_text_embeds = callback_outputs.pop("add_text_embeds", add_text_embeds) + negative_pooled_prompt_embeds = callback_outputs.pop( + "negative_pooled_prompt_embeds", negative_pooled_prompt_embeds + ) + add_time_ids = callback_outputs.pop("add_time_ids", add_time_ids) + add_neg_time_ids = callback_outputs.pop("add_neg_time_ids", add_neg_time_ids) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents_with_noise) + + # If we do sequential model offloading, let's offload unet and controlnet + # manually for max memory savings + if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: + self.unet.to("cpu") + self.controlnet.to("cpu") + torch.cuda.empty_cache() + + if not output_type == "latent": + # make sure the VAE is in float32 mode, as it overflows in float16 + needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast + + if needs_upcasting: + self.upcast_vae() + latents_with_noise = latents_with_noise.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) + + # unscale/denormalize the latents + # denormalize with the mean and std if available and not None + has_latents_mean = hasattr(self.vae.config, "latents_mean") and self.vae.config.latents_mean is not None + has_latents_std = hasattr(self.vae.config, "latents_std") and self.vae.config.latents_std is not None + if has_latents_mean and has_latents_std: + latents_mean = ( + torch.tensor(self.vae.config.latents_mean).view(1, 4, 1, 1).to(latents_with_noise.device, latents_with_noise.dtype) + ) + latents_std = ( + torch.tensor(self.vae.config.latents_std).view(1, 4, 1, 1).to(latents_with_noise.device, latents_with_noise.dtype) + ) + latents_with_noise = latents_with_noise * latents_std / self.vae.config.scaling_factor + latents_mean + else: + latents_with_noise = latents_with_noise / self.vae.config.scaling_factor + +# sf = self.vae.config.scaling_factor # ์˜ˆ: 0.18215 + image = self.vae.decode(latents_with_noise, return_dict=False)[0] +# image = self.vae.decode(init_latents/sf, return_dict=False)[0] + + + # cast back to fp16 if needed + if needs_upcasting: + self.vae.to(dtype=torch.float16) + else: + image = latents_with_noise + return StableDiffusionXLPipelineOutput(images=image) + + # apply watermark if available + if self.watermark is not None: + image = self.watermark.apply_watermark(image) + + image = self.image_processor.postprocess(image, output_type=output_type) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return StableDiffusionXLPipelineOutput(images=image) diff --git a/diffusers3/pipelines/controlnet/pipeline_flax_controlnet.py b/diffusers3/pipelines/controlnet/pipeline_flax_controlnet.py new file mode 100644 index 0000000000000000000000000000000000000000..8a2cc08dbb2bf18c96f5594c629c54d042c1c751 --- /dev/null +++ b/diffusers3/pipelines/controlnet/pipeline_flax_controlnet.py @@ -0,0 +1,532 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import warnings +from functools import partial +from typing import Dict, List, Optional, Union + +import jax +import jax.numpy as jnp +import numpy as np +from flax.core.frozen_dict import FrozenDict +from flax.jax_utils import unreplicate +from flax.training.common_utils import shard +from PIL import Image +from transformers import CLIPImageProcessor, CLIPTokenizer, FlaxCLIPTextModel + +from ...models import FlaxAutoencoderKL, FlaxControlNetModel, FlaxUNet2DConditionModel +from ...schedulers import ( + FlaxDDIMScheduler, + FlaxDPMSolverMultistepScheduler, + FlaxLMSDiscreteScheduler, + FlaxPNDMScheduler, +) +from ...utils import PIL_INTERPOLATION, logging, replace_example_docstring +from ..pipeline_flax_utils import FlaxDiffusionPipeline +from ..stable_diffusion import FlaxStableDiffusionPipelineOutput +from ..stable_diffusion.safety_checker_flax import FlaxStableDiffusionSafetyChecker + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +# Set to True to use python for loop instead of jax.fori_loop for easier debugging +DEBUG = False + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import jax + >>> import numpy as np + >>> import jax.numpy as jnp + >>> from flax.jax_utils import replicate + >>> from flax.training.common_utils import shard + >>> from diffusers.utils import load_image, make_image_grid + >>> from PIL import Image + >>> from diffusers import FlaxStableDiffusionControlNetPipeline, FlaxControlNetModel + + + >>> def create_key(seed=0): + ... return jax.random.PRNGKey(seed) + + + >>> rng = create_key(0) + + >>> # get canny image + >>> canny_image = load_image( + ... "https://huggingface.co/datasets/YiYiXu/test-doc-assets/resolve/main/blog_post_cell_10_output_0.jpeg" + ... ) + + >>> prompts = "best quality, extremely detailed" + >>> negative_prompts = "monochrome, lowres, bad anatomy, worst quality, low quality" + + >>> # load control net and stable diffusion v1-5 + >>> controlnet, controlnet_params = FlaxControlNetModel.from_pretrained( + ... "lllyasviel/sd-controlnet-canny", from_pt=True, dtype=jnp.float32 + ... ) + >>> pipe, params = FlaxStableDiffusionControlNetPipeline.from_pretrained( + ... "runwayml/stable-diffusion-v1-5", controlnet=controlnet, revision="flax", dtype=jnp.float32 + ... ) + >>> params["controlnet"] = controlnet_params + + >>> num_samples = jax.device_count() + >>> rng = jax.random.split(rng, jax.device_count()) + + >>> prompt_ids = pipe.prepare_text_inputs([prompts] * num_samples) + >>> negative_prompt_ids = pipe.prepare_text_inputs([negative_prompts] * num_samples) + >>> processed_image = pipe.prepare_image_inputs([canny_image] * num_samples) + + >>> p_params = replicate(params) + >>> prompt_ids = shard(prompt_ids) + >>> negative_prompt_ids = shard(negative_prompt_ids) + >>> processed_image = shard(processed_image) + + >>> output = pipe( + ... prompt_ids=prompt_ids, + ... image=processed_image, + ... params=p_params, + ... prng_seed=rng, + ... num_inference_steps=50, + ... neg_prompt_ids=negative_prompt_ids, + ... jit=True, + ... ).images + + >>> output_images = pipe.numpy_to_pil(np.asarray(output.reshape((num_samples,) + output.shape[-3:]))) + >>> output_images = make_image_grid(output_images, num_samples // 4, 4) + >>> output_images.save("generated_image.png") + ``` +""" + + +class FlaxStableDiffusionControlNetPipeline(FlaxDiffusionPipeline): + r""" + Flax-based pipeline for text-to-image generation using Stable Diffusion with ControlNet Guidance. + + This model inherits from [`FlaxDiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + Args: + vae ([`FlaxAutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.FlaxCLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + unet ([`FlaxUNet2DConditionModel`]): + A `FlaxUNet2DConditionModel` to denoise the encoded image latents. + controlnet ([`FlaxControlNetModel`]: + Provides additional conditioning to the `unet` during the denoising process. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`FlaxDDIMScheduler`], [`FlaxLMSDiscreteScheduler`], [`FlaxPNDMScheduler`], or + [`FlaxDPMSolverMultistepScheduler`]. + safety_checker ([`FlaxStableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details + about a model's potential harms. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. + """ + + def __init__( + self, + vae: FlaxAutoencoderKL, + text_encoder: FlaxCLIPTextModel, + tokenizer: CLIPTokenizer, + unet: FlaxUNet2DConditionModel, + controlnet: FlaxControlNetModel, + scheduler: Union[ + FlaxDDIMScheduler, FlaxPNDMScheduler, FlaxLMSDiscreteScheduler, FlaxDPMSolverMultistepScheduler + ], + safety_checker: FlaxStableDiffusionSafetyChecker, + feature_extractor: CLIPImageProcessor, + dtype: jnp.dtype = jnp.float32, + ): + super().__init__() + self.dtype = dtype + + if safety_checker is None: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + controlnet=controlnet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + + def prepare_text_inputs(self, prompt: Union[str, List[str]]): + if not isinstance(prompt, (str, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + text_input = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="np", + ) + + return text_input.input_ids + + def prepare_image_inputs(self, image: Union[Image.Image, List[Image.Image]]): + if not isinstance(image, (Image.Image, list)): + raise ValueError(f"image has to be of type `PIL.Image.Image` or list but is {type(image)}") + + if isinstance(image, Image.Image): + image = [image] + + processed_images = jnp.concatenate([preprocess(img, jnp.float32) for img in image]) + + return processed_images + + def _get_has_nsfw_concepts(self, features, params): + has_nsfw_concepts = self.safety_checker(features, params) + return has_nsfw_concepts + + def _run_safety_checker(self, images, safety_model_params, jit=False): + # safety_model_params should already be replicated when jit is True + pil_images = [Image.fromarray(image) for image in images] + features = self.feature_extractor(pil_images, return_tensors="np").pixel_values + + if jit: + features = shard(features) + has_nsfw_concepts = _p_get_has_nsfw_concepts(self, features, safety_model_params) + has_nsfw_concepts = unshard(has_nsfw_concepts) + safety_model_params = unreplicate(safety_model_params) + else: + has_nsfw_concepts = self._get_has_nsfw_concepts(features, safety_model_params) + + images_was_copied = False + for idx, has_nsfw_concept in enumerate(has_nsfw_concepts): + if has_nsfw_concept: + if not images_was_copied: + images_was_copied = True + images = images.copy() + + images[idx] = np.zeros(images[idx].shape, dtype=np.uint8) # black image + + if any(has_nsfw_concepts): + warnings.warn( + "Potential NSFW content was detected in one or more images. A black image will be returned" + " instead. Try again with a different prompt and/or seed." + ) + + return images, has_nsfw_concepts + + def _generate( + self, + prompt_ids: jnp.ndarray, + image: jnp.ndarray, + params: Union[Dict, FrozenDict], + prng_seed: jax.Array, + num_inference_steps: int, + guidance_scale: float, + latents: Optional[jnp.ndarray] = None, + neg_prompt_ids: Optional[jnp.ndarray] = None, + controlnet_conditioning_scale: float = 1.0, + ): + height, width = image.shape[-2:] + if height % 64 != 0 or width % 64 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 64 but are {height} and {width}.") + + # get prompt text embeddings + prompt_embeds = self.text_encoder(prompt_ids, params=params["text_encoder"])[0] + + # TODO: currently it is assumed `do_classifier_free_guidance = guidance_scale > 1.0` + # implement this conditional `do_classifier_free_guidance = guidance_scale > 1.0` + batch_size = prompt_ids.shape[0] + + max_length = prompt_ids.shape[-1] + + if neg_prompt_ids is None: + uncond_input = self.tokenizer( + [""] * batch_size, padding="max_length", max_length=max_length, return_tensors="np" + ).input_ids + else: + uncond_input = neg_prompt_ids + negative_prompt_embeds = self.text_encoder(uncond_input, params=params["text_encoder"])[0] + context = jnp.concatenate([negative_prompt_embeds, prompt_embeds]) + + image = jnp.concatenate([image] * 2) + + latents_shape = ( + batch_size, + self.unet.config.in_channels, + height // self.vae_scale_factor, + width // self.vae_scale_factor, + ) + if latents is None: + latents = jax.random.normal(prng_seed, shape=latents_shape, dtype=jnp.float32) + else: + if latents.shape != latents_shape: + raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}") + + def loop_body(step, args): + latents, scheduler_state = args + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + latents_input = jnp.concatenate([latents] * 2) + + t = jnp.array(scheduler_state.timesteps, dtype=jnp.int32)[step] + timestep = jnp.broadcast_to(t, latents_input.shape[0]) + + latents_input = self.scheduler.scale_model_input(scheduler_state, latents_input, t) + + down_block_res_samples, mid_block_res_sample = self.controlnet.apply( + {"params": params["controlnet"]}, + jnp.array(latents_input), + jnp.array(timestep, dtype=jnp.int32), + encoder_hidden_states=context, + controlnet_cond=image, + conditioning_scale=controlnet_conditioning_scale, + return_dict=False, + ) + + # predict the noise residual + noise_pred = self.unet.apply( + {"params": params["unet"]}, + jnp.array(latents_input), + jnp.array(timestep, dtype=jnp.int32), + encoder_hidden_states=context, + down_block_additional_residuals=down_block_res_samples, + mid_block_additional_residual=mid_block_res_sample, + ).sample + + # perform guidance + noise_pred_uncond, noise_prediction_text = jnp.split(noise_pred, 2, axis=0) + noise_pred = noise_pred_uncond + guidance_scale * (noise_prediction_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents, scheduler_state = self.scheduler.step(scheduler_state, noise_pred, t, latents).to_tuple() + return latents, scheduler_state + + scheduler_state = self.scheduler.set_timesteps( + params["scheduler"], num_inference_steps=num_inference_steps, shape=latents_shape + ) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * params["scheduler"].init_noise_sigma + + if DEBUG: + # run with python for loop + for i in range(num_inference_steps): + latents, scheduler_state = loop_body(i, (latents, scheduler_state)) + else: + latents, _ = jax.lax.fori_loop(0, num_inference_steps, loop_body, (latents, scheduler_state)) + + # scale and decode the image latents with vae + latents = 1 / self.vae.config.scaling_factor * latents + image = self.vae.apply({"params": params["vae"]}, latents, method=self.vae.decode).sample + + image = (image / 2 + 0.5).clip(0, 1).transpose(0, 2, 3, 1) + return image + + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt_ids: jnp.ndarray, + image: jnp.ndarray, + params: Union[Dict, FrozenDict], + prng_seed: jax.Array, + num_inference_steps: int = 50, + guidance_scale: Union[float, jnp.ndarray] = 7.5, + latents: jnp.ndarray = None, + neg_prompt_ids: jnp.ndarray = None, + controlnet_conditioning_scale: Union[float, jnp.ndarray] = 1.0, + return_dict: bool = True, + jit: bool = False, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt_ids (`jnp.ndarray`): + The prompt or prompts to guide the image generation. + image (`jnp.ndarray`): + Array representing the ControlNet input condition to provide guidance to the `unet` for generation. + params (`Dict` or `FrozenDict`): + Dictionary containing the model parameters/weights. + prng_seed (`jax.Array`): + Array containing random number generator key. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + latents (`jnp.ndarray`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + array is generated by sampling using the supplied random `generator`. + controlnet_conditioning_scale (`float` or `jnp.ndarray`, *optional*, defaults to 1.0): + The outputs of the ControlNet are multiplied by `controlnet_conditioning_scale` before they are added + to the residual in the original `unet`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.FlaxStableDiffusionPipelineOutput`] instead of + a plain tuple. + jit (`bool`, defaults to `False`): + Whether to run `pmap` versions of the generation and safety scoring functions. + + + + This argument exists because `__call__` is not yet end-to-end pmap-able. It will be removed in a + future release. + + + + Examples: + + Returns: + [`~pipelines.stable_diffusion.FlaxStableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.FlaxStableDiffusionPipelineOutput`] is + returned, otherwise a `tuple` is returned where the first element is a list with the generated images + and the second element is a list of `bool`s indicating whether the corresponding generated image + contains "not-safe-for-work" (nsfw) content. + """ + + height, width = image.shape[-2:] + + if isinstance(guidance_scale, float): + # Convert to a tensor so each device gets a copy. Follow the prompt_ids for + # shape information, as they may be sharded (when `jit` is `True`), or not. + guidance_scale = jnp.array([guidance_scale] * prompt_ids.shape[0]) + if len(prompt_ids.shape) > 2: + # Assume sharded + guidance_scale = guidance_scale[:, None] + + if isinstance(controlnet_conditioning_scale, float): + # Convert to a tensor so each device gets a copy. Follow the prompt_ids for + # shape information, as they may be sharded (when `jit` is `True`), or not. + controlnet_conditioning_scale = jnp.array([controlnet_conditioning_scale] * prompt_ids.shape[0]) + if len(prompt_ids.shape) > 2: + # Assume sharded + controlnet_conditioning_scale = controlnet_conditioning_scale[:, None] + + if jit: + images = _p_generate( + self, + prompt_ids, + image, + params, + prng_seed, + num_inference_steps, + guidance_scale, + latents, + neg_prompt_ids, + controlnet_conditioning_scale, + ) + else: + images = self._generate( + prompt_ids, + image, + params, + prng_seed, + num_inference_steps, + guidance_scale, + latents, + neg_prompt_ids, + controlnet_conditioning_scale, + ) + + if self.safety_checker is not None: + safety_params = params["safety_checker"] + images_uint8_casted = (images * 255).round().astype("uint8") + num_devices, batch_size = images.shape[:2] + + images_uint8_casted = np.asarray(images_uint8_casted).reshape(num_devices * batch_size, height, width, 3) + images_uint8_casted, has_nsfw_concept = self._run_safety_checker(images_uint8_casted, safety_params, jit) + images = np.array(images) + + # block images + if any(has_nsfw_concept): + for i, is_nsfw in enumerate(has_nsfw_concept): + if is_nsfw: + images[i] = np.asarray(images_uint8_casted[i]) + + images = images.reshape(num_devices, batch_size, height, width, 3) + else: + images = np.asarray(images) + has_nsfw_concept = False + + if not return_dict: + return (images, has_nsfw_concept) + + return FlaxStableDiffusionPipelineOutput(images=images, nsfw_content_detected=has_nsfw_concept) + + +# Static argnums are pipe, num_inference_steps. A change would trigger recompilation. +# Non-static args are (sharded) input tensors mapped over their first dimension (hence, `0`). +@partial( + jax.pmap, + in_axes=(None, 0, 0, 0, 0, None, 0, 0, 0, 0), + static_broadcasted_argnums=(0, 5), +) +def _p_generate( + pipe, + prompt_ids, + image, + params, + prng_seed, + num_inference_steps, + guidance_scale, + latents, + neg_prompt_ids, + controlnet_conditioning_scale, +): + return pipe._generate( + prompt_ids, + image, + params, + prng_seed, + num_inference_steps, + guidance_scale, + latents, + neg_prompt_ids, + controlnet_conditioning_scale, + ) + + +@partial(jax.pmap, static_broadcasted_argnums=(0,)) +def _p_get_has_nsfw_concepts(pipe, features, params): + return pipe._get_has_nsfw_concepts(features, params) + + +def unshard(x: jnp.ndarray): + # einops.rearrange(x, 'd b ... -> (d b) ...') + num_devices, batch_size = x.shape[:2] + rest = x.shape[2:] + return x.reshape(num_devices * batch_size, *rest) + + +def preprocess(image, dtype): + image = image.convert("RGB") + w, h = image.size + w, h = (x - x % 64 for x in (w, h)) # resize to integer multiple of 64 + image = image.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]) + image = jnp.array(image).astype(dtype) / 255.0 + image = image[None].transpose(0, 3, 1, 2) + return image diff --git a/diffusers3/pipelines/controlnet_hunyuandit/__init__.py b/diffusers3/pipelines/controlnet_hunyuandit/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..34c59795de328f2fa2cbe610625d866e4710d4a2 --- /dev/null +++ b/diffusers3/pipelines/controlnet_hunyuandit/__init__.py @@ -0,0 +1,48 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["pipeline_hunyuandit_controlnet"] = ["HunyuanDiTControlNetPipeline"] + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * + else: + from .pipeline_hunyuandit_controlnet import HunyuanDiTControlNetPipeline + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/diffusers3/pipelines/controlnet_hunyuandit/pipeline_hunyuandit_controlnet.py b/diffusers3/pipelines/controlnet_hunyuandit/pipeline_hunyuandit_controlnet.py new file mode 100644 index 0000000000000000000000000000000000000000..10c521e8499707ba5b38054251b0bbd3c15004cd --- /dev/null +++ b/diffusers3/pipelines/controlnet_hunyuandit/pipeline_hunyuandit_controlnet.py @@ -0,0 +1,1042 @@ +# Copyright 2024 HunyuanDiT Authors and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Callable, Dict, List, Optional, Tuple, Union + +import numpy as np +import torch +from transformers import BertModel, BertTokenizer, CLIPImageProcessor, MT5Tokenizer, T5EncoderModel + +from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput + +from ...callbacks import MultiPipelineCallbacks, PipelineCallback +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...models import AutoencoderKL, HunyuanDiT2DControlNetModel, HunyuanDiT2DModel, HunyuanDiT2DMultiControlNetModel +from ...models.embeddings import get_2d_rotary_pos_embed +from ...pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker +from ...schedulers import DDPMScheduler +from ...utils import ( + is_torch_xla_available, + logging, + replace_example_docstring, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline + + +if is_torch_xla_available(): + import torch_xla.core.xla_model as xm + + XLA_AVAILABLE = True +else: + XLA_AVAILABLE = False + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + from diffusers import HunyuanDiT2DControlNetModel, HunyuanDiTControlNetPipeline + import torch + + controlnet = HunyuanDiT2DControlNetModel.from_pretrained( + "Tencent-Hunyuan/HunyuanDiT-v1.1-ControlNet-Diffusers-Canny", torch_dtype=torch.float16 + ) + + pipe = HunyuanDiTControlNetPipeline.from_pretrained( + "Tencent-Hunyuan/HunyuanDiT-v1.1-Diffusers", controlnet=controlnet, torch_dtype=torch.float16 + ) + pipe.to("cuda") + + from diffusers.utils import load_image + + cond_image = load_image( + "https://huggingface.co/Tencent-Hunyuan/HunyuanDiT-v1.1-ControlNet-Diffusers-Canny/resolve/main/canny.jpg?download=true" + ) + + ## You may also use English prompt as HunyuanDiT supports both English and Chinese + prompt = "ๅœจๅคœๆ™š็š„้…’ๅบ—้—จๅ‰๏ผŒไธ€ๅบงๅค่€็š„ไธญๅ›ฝ้ฃŽๆ ผ็š„็‹ฎๅญ้›•ๅƒ็Ÿ—็ซ‹็€๏ผŒๅฎƒ็š„็œผ็›้—ช็ƒ็€ๅ…‰่Š’๏ผŒไปฟไฝ›ๅœจๅฎˆๆŠค็€่ฟ™ๅบงๅปบ็ญ‘ใ€‚่ƒŒๆ™ฏๆ˜ฏๅคœๆ™š็š„้…’ๅบ—ๅ‰๏ผŒๆž„ๅ›พๆ–นๅผๆ˜ฏ็‰นๅ†™๏ผŒๅนณ่ง†๏ผŒๅฑ…ไธญๆž„ๅ›พใ€‚่ฟ™ๅผ ็…ง็‰‡ๅ‘ˆ็Žฐไบ†็œŸๅฎžๆ‘„ๅฝฑ้ฃŽๆ ผ๏ผŒ่•ดๅซไบ†ไธญๅ›ฝ้›•ๅก‘ๆ–‡ๅŒ–๏ผŒๅŒๆ—ถๅฑ•็Žฐไบ†็ฅž็ง˜ๆฐ›ๅ›ด" + # prompt="At night, an ancient Chinese-style lion statue stands in front of the hotel, its eyes gleaming as if guarding the building. The background is the hotel entrance at night, with a close-up, eye-level, and centered composition. This photo presents a realistic photographic style, embodies Chinese sculpture culture, and reveals a mysterious atmosphere." + image = pipe( + prompt, + height=1024, + width=1024, + control_image=cond_image, + num_inference_steps=50, + ).images[0] + ``` +""" + +STANDARD_RATIO = np.array( + [ + 1.0, # 1:1 + 4.0 / 3.0, # 4:3 + 3.0 / 4.0, # 3:4 + 16.0 / 9.0, # 16:9 + 9.0 / 16.0, # 9:16 + ] +) +STANDARD_SHAPE = [ + [(1024, 1024), (1280, 1280)], # 1:1 + [(1024, 768), (1152, 864), (1280, 960)], # 4:3 + [(768, 1024), (864, 1152), (960, 1280)], # 3:4 + [(1280, 768)], # 16:9 + [(768, 1280)], # 9:16 +] +STANDARD_AREA = [np.array([w * h for w, h in shapes]) for shapes in STANDARD_SHAPE] +SUPPORTED_SHAPE = [ + (1024, 1024), + (1280, 1280), # 1:1 + (1024, 768), + (1152, 864), + (1280, 960), # 4:3 + (768, 1024), + (864, 1152), + (960, 1280), # 3:4 + (1280, 768), # 16:9 + (768, 1280), # 9:16 +] + + +def map_to_standard_shapes(target_width, target_height): + target_ratio = target_width / target_height + closest_ratio_idx = np.argmin(np.abs(STANDARD_RATIO - target_ratio)) + closest_area_idx = np.argmin(np.abs(STANDARD_AREA[closest_ratio_idx] - target_width * target_height)) + width, height = STANDARD_SHAPE[closest_ratio_idx][closest_area_idx] + return width, height + + +def get_resize_crop_region_for_grid(src, tgt_size): + th = tw = tgt_size + h, w = src + + r = h / w + + # resize + if r > 1: + resize_height = th + resize_width = int(round(th / h * w)) + else: + resize_width = tw + resize_height = int(round(tw / w * h)) + + crop_top = int(round((th - resize_height) / 2.0)) + crop_left = int(round((tw - resize_width) / 2.0)) + + return (crop_top, crop_left), (crop_top + resize_height, crop_left + resize_width) + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg +def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): + """ + Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4 + """ + std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) + std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) + # rescale the results from guidance (fixes overexposure) + noise_pred_rescaled = noise_cfg * (std_text / std_cfg) + # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images + noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg + return noise_cfg + + +class HunyuanDiTControlNetPipeline(DiffusionPipeline): + r""" + Pipeline for English/Chinese-to-image generation using HunyuanDiT. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + HunyuanDiT uses two text encoders: [mT5](https://huggingface.co/google/mt5-base) and [bilingual CLIP](fine-tuned by + ourselves) + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. We use + `sdxl-vae-fp16-fix`. + text_encoder (Optional[`~transformers.BertModel`, `~transformers.CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + HunyuanDiT uses a fine-tuned [bilingual CLIP]. + tokenizer (Optional[`~transformers.BertTokenizer`, `~transformers.CLIPTokenizer`]): + A `BertTokenizer` or `CLIPTokenizer` to tokenize text. + transformer ([`HunyuanDiT2DModel`]): + The HunyuanDiT model designed by Tencent Hunyuan. + text_encoder_2 (`T5EncoderModel`): + The mT5 embedder. Specifically, it is 't5-v1_1-xxl'. + tokenizer_2 (`MT5Tokenizer`): + The tokenizer for the mT5 embedder. + scheduler ([`DDPMScheduler`]): + A scheduler to be used in combination with HunyuanDiT to denoise the encoded image latents. + controlnet ([`HunyuanDiT2DControlNetModel`] or `List[HunyuanDiT2DControlNetModel]` or [`HunyuanDiT2DControlNetModel`]): + Provides additional conditioning to the `unet` during the denoising process. If you set multiple + ControlNets as a list, the outputs from each ControlNet are added together to create one combined + additional conditioning. + """ + + model_cpu_offload_seq = "text_encoder->text_encoder_2->transformer->vae" + _optional_components = [ + "safety_checker", + "feature_extractor", + "text_encoder_2", + "tokenizer_2", + "text_encoder", + "tokenizer", + ] + _exclude_from_cpu_offload = ["safety_checker"] + _callback_tensor_inputs = [ + "latents", + "prompt_embeds", + "negative_prompt_embeds", + "prompt_embeds_2", + "negative_prompt_embeds_2", + ] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: BertModel, + tokenizer: BertTokenizer, + transformer: HunyuanDiT2DModel, + scheduler: DDPMScheduler, + safety_checker: StableDiffusionSafetyChecker, + feature_extractor: CLIPImageProcessor, + controlnet: Union[ + HunyuanDiT2DControlNetModel, + List[HunyuanDiT2DControlNetModel], + Tuple[HunyuanDiT2DControlNetModel], + HunyuanDiT2DMultiControlNetModel, + ], + text_encoder_2=T5EncoderModel, + tokenizer_2=MT5Tokenizer, + requires_safety_checker: bool = True, + ): + super().__init__() + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + tokenizer_2=tokenizer_2, + transformer=transformer, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + text_encoder_2=text_encoder_2, + controlnet=controlnet, + ) + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + self.vae_scale_factor = ( + 2 ** (len(self.vae.config.block_out_channels) - 1) if hasattr(self, "vae") and self.vae is not None else 8 + ) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + self.register_to_config(requires_safety_checker=requires_safety_checker) + self.default_sample_size = ( + self.transformer.config.sample_size + if hasattr(self, "transformer") and self.transformer is not None + else 128 + ) + + # Copied from diffusers.pipelines.hunyuandit.pipeline_hunyuandit.HunyuanDiTPipeline.encode_prompt + def encode_prompt( + self, + prompt: str, + device: torch.device = None, + dtype: torch.dtype = None, + num_images_per_prompt: int = 1, + do_classifier_free_guidance: bool = True, + negative_prompt: Optional[str] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + prompt_attention_mask: Optional[torch.Tensor] = None, + negative_prompt_attention_mask: Optional[torch.Tensor] = None, + max_sequence_length: Optional[int] = None, + text_encoder_index: int = 0, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + dtype (`torch.dtype`): + torch dtype + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + prompt_attention_mask (`torch.Tensor`, *optional*): + Attention mask for the prompt. Required when `prompt_embeds` is passed directly. + negative_prompt_attention_mask (`torch.Tensor`, *optional*): + Attention mask for the negative prompt. Required when `negative_prompt_embeds` is passed directly. + max_sequence_length (`int`, *optional*): maximum sequence length to use for the prompt. + text_encoder_index (`int`, *optional*): + Index of the text encoder to use. `0` for clip and `1` for T5. + """ + if dtype is None: + if self.text_encoder_2 is not None: + dtype = self.text_encoder_2.dtype + elif self.transformer is not None: + dtype = self.transformer.dtype + else: + dtype = None + + if device is None: + device = self._execution_device + + tokenizers = [self.tokenizer, self.tokenizer_2] + text_encoders = [self.text_encoder, self.text_encoder_2] + + tokenizer = tokenizers[text_encoder_index] + text_encoder = text_encoders[text_encoder_index] + + if max_sequence_length is None: + if text_encoder_index == 0: + max_length = 77 + if text_encoder_index == 1: + max_length = 256 + else: + max_length = max_sequence_length + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + text_inputs = tokenizer( + prompt, + padding="max_length", + max_length=max_length, + truncation=True, + return_attention_mask=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {tokenizer.model_max_length} tokens: {removed_text}" + ) + + prompt_attention_mask = text_inputs.attention_mask.to(device) + prompt_embeds = text_encoder( + text_input_ids.to(device), + attention_mask=prompt_attention_mask, + ) + prompt_embeds = prompt_embeds[0] + prompt_attention_mask = prompt_attention_mask.repeat(num_images_per_prompt, 1) + + prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + max_length = prompt_embeds.shape[1] + uncond_input = tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + negative_prompt_attention_mask = uncond_input.attention_mask.to(device) + negative_prompt_embeds = text_encoder( + uncond_input.input_ids.to(device), + attention_mask=negative_prompt_attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + negative_prompt_attention_mask = negative_prompt_attention_mask.repeat(num_images_per_prompt, 1) + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + return prompt_embeds, negative_prompt_embeds, prompt_attention_mask, negative_prompt_attention_mask + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is None: + has_nsfw_concept = None + else: + if torch.is_tensor(image): + feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") + else: + feature_extractor_input = self.image_processor.numpy_to_pil(image) + safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) + image, has_nsfw_concept = self.safety_checker( + images=image, clip_input=safety_checker_input.pixel_values.to(dtype) + ) + return image, has_nsfw_concept + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + # Copied from diffusers.pipelines.hunyuandit.pipeline_hunyuandit.HunyuanDiTPipeline.check_inputs + def check_inputs( + self, + prompt, + height, + width, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + prompt_attention_mask=None, + negative_prompt_attention_mask=None, + prompt_embeds_2=None, + negative_prompt_embeds_2=None, + prompt_attention_mask_2=None, + negative_prompt_attention_mask_2=None, + callback_on_step_end_tensor_inputs=None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is None and prompt_embeds_2 is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds_2`. Cannot leave both `prompt` and `prompt_embeds_2` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if prompt_embeds is not None and prompt_attention_mask is None: + raise ValueError("Must provide `prompt_attention_mask` when specifying `prompt_embeds`.") + + if prompt_embeds_2 is not None and prompt_attention_mask_2 is None: + raise ValueError("Must provide `prompt_attention_mask_2` when specifying `prompt_embeds_2`.") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if negative_prompt_embeds is not None and negative_prompt_attention_mask is None: + raise ValueError("Must provide `negative_prompt_attention_mask` when specifying `negative_prompt_embeds`.") + + if negative_prompt_embeds_2 is not None and negative_prompt_attention_mask_2 is None: + raise ValueError( + "Must provide `negative_prompt_attention_mask_2` when specifying `negative_prompt_embeds_2`." + ) + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + if prompt_embeds_2 is not None and negative_prompt_embeds_2 is not None: + if prompt_embeds_2.shape != negative_prompt_embeds_2.shape: + raise ValueError( + "`prompt_embeds_2` and `negative_prompt_embeds_2` must have the same shape when passed directly, but" + f" got: `prompt_embeds_2` {prompt_embeds_2.shape} != `negative_prompt_embeds_2`" + f" {negative_prompt_embeds_2.shape}." + ) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = ( + batch_size, + num_channels_latents, + int(height) // self.vae_scale_factor, + int(width) // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + # Copied from diffusers.pipelines.controlnet_sd3.pipeline_stable_diffusion_3_controlnet.StableDiffusion3ControlNetPipeline.prepare_image + def prepare_image( + self, + image, + width, + height, + batch_size, + num_images_per_prompt, + device, + dtype, + do_classifier_free_guidance=False, + guess_mode=False, + ): + if isinstance(image, torch.Tensor): + pass + else: + image = self.image_processor.preprocess(image, height=height, width=width) + + image_batch_size = image.shape[0] + + if image_batch_size == 1: + repeat_by = batch_size + else: + # image batch size is the same as prompt batch size + repeat_by = num_images_per_prompt + + image = image.repeat_interleave(repeat_by, dim=0) + + image = image.to(device=device, dtype=dtype) + + if do_classifier_free_guidance and not guess_mode: + image = torch.cat([image] * 2) + + return image + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def guidance_rescale(self): + return self._guidance_rescale + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 + + @property + def num_timesteps(self): + return self._num_timesteps + + @property + def interrupt(self): + return self._interrupt + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: Optional[int] = 50, + guidance_scale: Optional[float] = 5.0, + control_image: PipelineImageInput = None, + controlnet_conditioning_scale: Union[float, List[float]] = 1.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: Optional[float] = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + prompt_embeds_2: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds_2: Optional[torch.Tensor] = None, + prompt_attention_mask: Optional[torch.Tensor] = None, + prompt_attention_mask_2: Optional[torch.Tensor] = None, + negative_prompt_attention_mask: Optional[torch.Tensor] = None, + negative_prompt_attention_mask_2: Optional[torch.Tensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback_on_step_end: Optional[ + Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks] + ] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + guidance_rescale: float = 0.0, + original_size: Optional[Tuple[int, int]] = (1024, 1024), + target_size: Optional[Tuple[int, int]] = None, + crops_coords_top_left: Tuple[int, int] = (0, 0), + use_resolution_binning: bool = True, + ): + r""" + The call function to the pipeline for generation with HunyuanDiT. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + height (`int`): + The height in pixels of the generated image. + width (`int`): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. This parameter is modulated by `strength`. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0): + The percentage of total steps at which the ControlNet starts applying. + control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0): + The percentage of total steps at which the ControlNet stops applying. + control_image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,: + `List[List[torch.Tensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`): + The ControlNet input condition to provide guidance to the `unet` for generation. If the type is + specified as `torch.Tensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also be accepted + as an image. The dimensions of the output image defaults to `image`'s dimensions. If height and/or + width are passed, `image` is resized accordingly. If multiple ControlNets are specified in `init`, + images must be passed as a list such that each element of the list can be correctly batched for input + to a single ControlNet. + controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0): + The outputs of the ControlNet are multiplied by `controlnet_conditioning_scale` before they are added + to the residual in the original `unet`. If multiple ControlNets are specified in `init`, you can set + the corresponding scale as a list. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + prompt_embeds_2 (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + negative_prompt_embeds_2 (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + prompt_attention_mask (`torch.Tensor`, *optional*): + Attention mask for the prompt. Required when `prompt_embeds` is passed directly. + prompt_attention_mask_2 (`torch.Tensor`, *optional*): + Attention mask for the prompt. Required when `prompt_embeds_2` is passed directly. + negative_prompt_attention_mask (`torch.Tensor`, *optional*): + Attention mask for the negative prompt. Required when `negative_prompt_embeds` is passed directly. + negative_prompt_attention_mask_2 (`torch.Tensor`, *optional*): + Attention mask for the negative prompt. Required when `negative_prompt_embeds_2` is passed directly. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback_on_step_end (`Callable[[int, int, Dict], None]`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*): + A callback function or a list of callback functions to be called at the end of each denoising step. + callback_on_step_end_tensor_inputs (`List[str]`, *optional*): + A list of tensor inputs that should be passed to the callback function. If not defined, all tensor + inputs will be passed. + guidance_rescale (`float`, *optional*, defaults to 0.0): + Rescale the noise_cfg according to `guidance_rescale`. Based on findings of [Common Diffusion Noise + Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4 + original_size (`Tuple[int, int]`, *optional*, defaults to `(1024, 1024)`): + The original size of the image. Used to calculate the time ids. + target_size (`Tuple[int, int]`, *optional*): + The target size of the image. Used to calculate the time ids. + crops_coords_top_left (`Tuple[int, int]`, *optional*, defaults to `(0, 0)`): + The top left coordinates of the crop. Used to calculate the time ids. + use_resolution_binning (`bool`, *optional*, defaults to `True`): + Whether to use resolution binning or not. If `True`, the input resolution will be mapped to the closest + standard resolution. Supported resolutions are 1024x1024, 1280x1280, 1024x768, 1152x864, 1280x960, + 768x1024, 864x1152, 960x1280, 1280x768, and 768x1280. It is recommended to set this to `True`. + + Examples: + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images and the + second element is a list of `bool`s indicating whether the corresponding generated image contains + "not-safe-for-work" (nsfw) content. + """ + + if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): + callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs + + # 0. default height and width + height = height or self.default_sample_size * self.vae_scale_factor + width = width or self.default_sample_size * self.vae_scale_factor + height = int((height // 16) * 16) + width = int((width // 16) * 16) + + if use_resolution_binning and (height, width) not in SUPPORTED_SHAPE: + width, height = map_to_standard_shapes(width, height) + height = int(height) + width = int(width) + logger.warning(f"Reshaped to (height, width)=({height}, {width}), Supported shapes are {SUPPORTED_SHAPE}") + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + height, + width, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + prompt_attention_mask, + negative_prompt_attention_mask, + prompt_embeds_2, + negative_prompt_embeds_2, + prompt_attention_mask_2, + negative_prompt_attention_mask_2, + callback_on_step_end_tensor_inputs, + ) + self._guidance_scale = guidance_scale + self._guidance_rescale = guidance_rescale + self._interrupt = False + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + # 3. Encode input prompt + + ( + prompt_embeds, + negative_prompt_embeds, + prompt_attention_mask, + negative_prompt_attention_mask, + ) = self.encode_prompt( + prompt=prompt, + device=device, + dtype=self.transformer.dtype, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=self.do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + prompt_attention_mask=prompt_attention_mask, + negative_prompt_attention_mask=negative_prompt_attention_mask, + max_sequence_length=77, + text_encoder_index=0, + ) + ( + prompt_embeds_2, + negative_prompt_embeds_2, + prompt_attention_mask_2, + negative_prompt_attention_mask_2, + ) = self.encode_prompt( + prompt=prompt, + device=device, + dtype=self.transformer.dtype, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=self.do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds_2, + negative_prompt_embeds=negative_prompt_embeds_2, + prompt_attention_mask=prompt_attention_mask_2, + negative_prompt_attention_mask=negative_prompt_attention_mask_2, + max_sequence_length=256, + text_encoder_index=1, + ) + + # 4. Prepare control image + if isinstance(self.controlnet, HunyuanDiT2DControlNetModel): + control_image = self.prepare_image( + image=control_image, + width=width, + height=height, + batch_size=batch_size * num_images_per_prompt, + num_images_per_prompt=num_images_per_prompt, + device=device, + dtype=self.dtype, + do_classifier_free_guidance=self.do_classifier_free_guidance, + guess_mode=False, + ) + height, width = control_image.shape[-2:] + + control_image = self.vae.encode(control_image).latent_dist.sample() + control_image = control_image * self.vae.config.scaling_factor + + elif isinstance(self.controlnet, HunyuanDiT2DMultiControlNetModel): + control_images = [] + + for control_image_ in control_image: + control_image_ = self.prepare_image( + image=control_image_, + width=width, + height=height, + batch_size=batch_size * num_images_per_prompt, + num_images_per_prompt=num_images_per_prompt, + device=device, + dtype=self.dtype, + do_classifier_free_guidance=self.do_classifier_free_guidance, + guess_mode=False, + ) + + control_image_ = self.vae.encode(control_image_).latent_dist.sample() + control_image_ = control_image_ * self.vae.config.scaling_factor + + control_images.append(control_image_) + + control_image = control_images + else: + assert False + + # 5. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # 6. Prepare latent variables + num_channels_latents = self.transformer.config.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 8. create image_rotary_emb, style embedding & time ids + grid_height = height // 8 // self.transformer.config.patch_size + grid_width = width // 8 // self.transformer.config.patch_size + base_size = 512 // 8 // self.transformer.config.patch_size + grid_crops_coords = get_resize_crop_region_for_grid((grid_height, grid_width), base_size) + image_rotary_emb = get_2d_rotary_pos_embed( + self.transformer.inner_dim // self.transformer.num_heads, grid_crops_coords, (grid_height, grid_width) + ) + + style = torch.tensor([0], device=device) + + target_size = target_size or (height, width) + add_time_ids = list(original_size + target_size + crops_coords_top_left) + add_time_ids = torch.tensor([add_time_ids], dtype=prompt_embeds.dtype) + + if self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + prompt_attention_mask = torch.cat([negative_prompt_attention_mask, prompt_attention_mask]) + prompt_embeds_2 = torch.cat([negative_prompt_embeds_2, prompt_embeds_2]) + prompt_attention_mask_2 = torch.cat([negative_prompt_attention_mask_2, prompt_attention_mask_2]) + add_time_ids = torch.cat([add_time_ids] * 2, dim=0) + style = torch.cat([style] * 2, dim=0) + + prompt_embeds = prompt_embeds.to(device=device) + prompt_attention_mask = prompt_attention_mask.to(device=device) + prompt_embeds_2 = prompt_embeds_2.to(device=device) + prompt_attention_mask_2 = prompt_attention_mask_2.to(device=device) + add_time_ids = add_time_ids.to(dtype=prompt_embeds.dtype, device=device).repeat( + batch_size * num_images_per_prompt, 1 + ) + style = style.to(device=device).repeat(batch_size * num_images_per_prompt) + + # 9. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + self._num_timesteps = len(timesteps) + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + if self.interrupt: + continue + + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # expand scalar t to 1-D tensor to match the 1st dim of latent_model_input + t_expand = torch.tensor([t] * latent_model_input.shape[0], device=device).to( + dtype=latent_model_input.dtype + ) + + # controlnet(s) inference + control_block_samples = self.controlnet( + latent_model_input, + t_expand, + encoder_hidden_states=prompt_embeds, + text_embedding_mask=prompt_attention_mask, + encoder_hidden_states_t5=prompt_embeds_2, + text_embedding_mask_t5=prompt_attention_mask_2, + image_meta_size=add_time_ids, + style=style, + image_rotary_emb=image_rotary_emb, + return_dict=False, + controlnet_cond=control_image, + conditioning_scale=controlnet_conditioning_scale, + )[0] + + # predict the noise residual + noise_pred = self.transformer( + latent_model_input, + t_expand, + encoder_hidden_states=prompt_embeds, + text_embedding_mask=prompt_attention_mask, + encoder_hidden_states_t5=prompt_embeds_2, + text_embedding_mask_t5=prompt_attention_mask_2, + image_meta_size=add_time_ids, + style=style, + image_rotary_emb=image_rotary_emb, + return_dict=False, + controlnet_block_samples=control_block_samples, + )[0] + + noise_pred, _ = noise_pred.chunk(2, dim=1) + + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + if self.do_classifier_free_guidance and guidance_rescale > 0.0: + # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf + noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + prompt_embeds_2 = callback_outputs.pop("prompt_embeds_2", prompt_embeds_2) + negative_prompt_embeds_2 = callback_outputs.pop( + "negative_prompt_embeds_2", negative_prompt_embeds_2 + ) + + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + + if XLA_AVAILABLE: + xm.mark_step() + + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) + else: + image = latents + has_nsfw_concept = None + + if has_nsfw_concept is None: + do_denormalize = [True] * image.shape[0] + else: + do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] + + image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image, has_nsfw_concept) + + return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) diff --git a/diffusers3/pipelines/controlnet_sd3/__init__.py b/diffusers3/pipelines/controlnet_sd3/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..aeb61dc8e247555e654d2f84c035dff53328ea0d --- /dev/null +++ b/diffusers3/pipelines/controlnet_sd3/__init__.py @@ -0,0 +1,57 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_flax_available, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["pipeline_stable_diffusion_3_controlnet"] = ["StableDiffusion3ControlNetPipeline"] + _import_structure["pipeline_stable_diffusion_3_controlnet_inpainting"] = [ + "StableDiffusion3ControlNetInpaintingPipeline" + ] + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * + else: + from .pipeline_stable_diffusion_3_controlnet import StableDiffusion3ControlNetPipeline + from .pipeline_stable_diffusion_3_controlnet_inpainting import StableDiffusion3ControlNetInpaintingPipeline + + try: + if not (is_transformers_available() and is_flax_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ...utils.dummy_flax_and_transformers_objects import * # noqa F403 + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/diffusers3/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet.py b/diffusers3/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet.py new file mode 100644 index 0000000000000000000000000000000000000000..f5246027a49d23bd0e3eaa34bca674a9617f2cff --- /dev/null +++ b/diffusers3/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet.py @@ -0,0 +1,1097 @@ +# Copyright 2024 Stability AI, The HuggingFace Team and The InstantX Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import torch +from transformers import ( + CLIPTextModelWithProjection, + CLIPTokenizer, + T5EncoderModel, + T5TokenizerFast, +) + +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import FromSingleFileMixin, SD3LoraLoaderMixin +from ...models.autoencoders import AutoencoderKL +from ...models.controlnet_sd3 import SD3ControlNetModel, SD3MultiControlNetModel +from ...models.transformers import SD3Transformer2DModel +from ...schedulers import FlowMatchEulerDiscreteScheduler +from ...utils import ( + USE_PEFT_BACKEND, + is_torch_xla_available, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline +from ..stable_diffusion_3.pipeline_output import StableDiffusion3PipelineOutput + + +if is_torch_xla_available(): + import torch_xla.core.xla_model as xm + + XLA_AVAILABLE = True +else: + XLA_AVAILABLE = False + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import StableDiffusion3ControlNetPipeline + >>> from diffusers.models import SD3ControlNetModel, SD3MultiControlNetModel + >>> from diffusers.utils import load_image + + >>> controlnet = SD3ControlNetModel.from_pretrained("InstantX/SD3-Controlnet-Canny", torch_dtype=torch.float16) + + >>> pipe = StableDiffusion3ControlNetPipeline.from_pretrained( + ... "stabilityai/stable-diffusion-3-medium-diffusers", controlnet=controlnet, torch_dtype=torch.float16 + ... ) + >>> pipe.to("cuda") + >>> control_image = load_image("https://huggingface.co/InstantX/SD3-Controlnet-Canny/resolve/main/canny.jpg") + >>> prompt = "A girl holding a sign that says InstantX" + >>> image = pipe(prompt, control_image=control_image, controlnet_conditioning_scale=0.7).images[0] + >>> image.save("sd3.png") + ``` +""" + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + sigmas: Optional[List[float]] = None, + **kwargs, +): + """ + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles + custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` + must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, + `num_inference_steps` and `sigmas` must be `None`. + sigmas (`List[float]`, *optional*): + Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, + `num_inference_steps` and `timesteps` must be `None`. + + Returns: + `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the + second element is the number of inference steps. + """ + if timesteps is not None and sigmas is not None: + raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") + if timesteps is not None: + accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + elif sigmas is not None: + accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accept_sigmas: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" sigmas schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + + +class StableDiffusion3ControlNetPipeline(DiffusionPipeline, SD3LoraLoaderMixin, FromSingleFileMixin): + r""" + Args: + transformer ([`SD3Transformer2DModel`]): + Conditional Transformer (MMDiT) architecture to denoise the encoded image latents. + scheduler ([`FlowMatchEulerDiscreteScheduler`]): + A scheduler to be used in combination with `transformer` to denoise the encoded image latents. + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModelWithProjection`]): + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection), + specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant, + with an additional added projection layer that is initialized with a diagonal matrix with the `hidden_size` + as its dimension. + text_encoder_2 ([`CLIPTextModelWithProjection`]): + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection), + specifically the + [laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k) + variant. + text_encoder_3 ([`T5EncoderModel`]): + Frozen text-encoder. Stable Diffusion 3 uses + [T5](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5EncoderModel), specifically the + [t5-v1_1-xxl](https://huggingface.co/google/t5-v1_1-xxl) variant. + tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + tokenizer_2 (`CLIPTokenizer`): + Second Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + tokenizer_3 (`T5TokenizerFast`): + Tokenizer of class + [T5Tokenizer](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5Tokenizer). + controlnet ([`SD3ControlNetModel`] or `List[SD3ControlNetModel]` or [`SD3MultiControlNetModel`]): + Provides additional conditioning to the `unet` during the denoising process. If you set multiple + ControlNets as a list, the outputs from each ControlNet are added together to create one combined + additional conditioning. + """ + + model_cpu_offload_seq = "text_encoder->text_encoder_2->text_encoder_3->transformer->vae" + _optional_components = [] + _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds", "negative_pooled_prompt_embeds"] + + def __init__( + self, + transformer: SD3Transformer2DModel, + scheduler: FlowMatchEulerDiscreteScheduler, + vae: AutoencoderKL, + text_encoder: CLIPTextModelWithProjection, + tokenizer: CLIPTokenizer, + text_encoder_2: CLIPTextModelWithProjection, + tokenizer_2: CLIPTokenizer, + text_encoder_3: T5EncoderModel, + tokenizer_3: T5TokenizerFast, + controlnet: Union[ + SD3ControlNetModel, List[SD3ControlNetModel], Tuple[SD3ControlNetModel], SD3MultiControlNetModel + ], + ): + super().__init__() + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + text_encoder_2=text_encoder_2, + text_encoder_3=text_encoder_3, + tokenizer=tokenizer, + tokenizer_2=tokenizer_2, + tokenizer_3=tokenizer_3, + transformer=transformer, + scheduler=scheduler, + controlnet=controlnet, + ) + self.vae_scale_factor = ( + 2 ** (len(self.vae.config.block_out_channels) - 1) if hasattr(self, "vae") and self.vae is not None else 8 + ) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + self.tokenizer_max_length = ( + self.tokenizer.model_max_length if hasattr(self, "tokenizer") and self.tokenizer is not None else 77 + ) + self.default_sample_size = ( + self.transformer.config.sample_size + if hasattr(self, "transformer") and self.transformer is not None + else 128 + ) + + # Copied from diffusers.pipelines.stable_diffusion_3.pipeline_stable_diffusion_3.StableDiffusion3Pipeline._get_t5_prompt_embeds + def _get_t5_prompt_embeds( + self, + prompt: Union[str, List[str]] = None, + num_images_per_prompt: int = 1, + max_sequence_length: int = 256, + device: Optional[torch.device] = None, + dtype: Optional[torch.dtype] = None, + ): + device = device or self._execution_device + dtype = dtype or self.text_encoder.dtype + + prompt = [prompt] if isinstance(prompt, str) else prompt + batch_size = len(prompt) + + if self.text_encoder_3 is None: + return torch.zeros( + ( + batch_size * num_images_per_prompt, + self.tokenizer_max_length, + self.transformer.config.joint_attention_dim, + ), + device=device, + dtype=dtype, + ) + + text_inputs = self.tokenizer_3( + prompt, + padding="max_length", + max_length=max_sequence_length, + truncation=True, + add_special_tokens=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer_3(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): + removed_text = self.tokenizer_3.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because `max_sequence_length` is set to " + f" {max_sequence_length} tokens: {removed_text}" + ) + + prompt_embeds = self.text_encoder_3(text_input_ids.to(device))[0] + + dtype = self.text_encoder_3.dtype + prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) + + _, seq_len, _ = prompt_embeds.shape + + # duplicate text embeddings and attention mask for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + return prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion_3.pipeline_stable_diffusion_3.StableDiffusion3Pipeline._get_clip_prompt_embeds + def _get_clip_prompt_embeds( + self, + prompt: Union[str, List[str]], + num_images_per_prompt: int = 1, + device: Optional[torch.device] = None, + clip_skip: Optional[int] = None, + clip_model_index: int = 0, + ): + device = device or self._execution_device + + clip_tokenizers = [self.tokenizer, self.tokenizer_2] + clip_text_encoders = [self.text_encoder, self.text_encoder_2] + + tokenizer = clip_tokenizers[clip_model_index] + text_encoder = clip_text_encoders[clip_model_index] + + prompt = [prompt] if isinstance(prompt, str) else prompt + batch_size = len(prompt) + + text_inputs = tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer_max_length, + truncation=True, + return_tensors="pt", + ) + + text_input_ids = text_inputs.input_ids + untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): + removed_text = tokenizer.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer_max_length} tokens: {removed_text}" + ) + prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) + pooled_prompt_embeds = prompt_embeds[0] + + if clip_skip is None: + prompt_embeds = prompt_embeds.hidden_states[-2] + else: + prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)] + + prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) + + _, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt, 1) + pooled_prompt_embeds = pooled_prompt_embeds.view(batch_size * num_images_per_prompt, -1) + + return prompt_embeds, pooled_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion_3.pipeline_stable_diffusion_3.StableDiffusion3Pipeline.encode_prompt + def encode_prompt( + self, + prompt: Union[str, List[str]], + prompt_2: Union[str, List[str]], + prompt_3: Union[str, List[str]], + device: Optional[torch.device] = None, + num_images_per_prompt: int = 1, + do_classifier_free_guidance: bool = True, + negative_prompt: Optional[Union[str, List[str]]] = None, + negative_prompt_2: Optional[Union[str, List[str]]] = None, + negative_prompt_3: Optional[Union[str, List[str]]] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + clip_skip: Optional[int] = None, + max_sequence_length: int = 256, + lora_scale: Optional[float] = None, + ): + r""" + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in all text-encoders + prompt_3 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_3` and `text_encoder_3`. If not defined, `prompt` is + used in all text-encoders + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in all the text-encoders. + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_3` and + `text_encoder_3`. If not defined, `negative_prompt` is used in both text-encoders + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + lora_scale (`float`, *optional*): + A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + """ + device = device or self._execution_device + + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, SD3LoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if self.text_encoder is not None and USE_PEFT_BACKEND: + scale_lora_layers(self.text_encoder, lora_scale) + if self.text_encoder_2 is not None and USE_PEFT_BACKEND: + scale_lora_layers(self.text_encoder_2, lora_scale) + + prompt = [prompt] if isinstance(prompt, str) else prompt + if prompt is not None: + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + prompt_2 = prompt_2 or prompt + prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 + + prompt_3 = prompt_3 or prompt + prompt_3 = [prompt_3] if isinstance(prompt_3, str) else prompt_3 + + prompt_embed, pooled_prompt_embed = self._get_clip_prompt_embeds( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + clip_skip=clip_skip, + clip_model_index=0, + ) + prompt_2_embed, pooled_prompt_2_embed = self._get_clip_prompt_embeds( + prompt=prompt_2, + device=device, + num_images_per_prompt=num_images_per_prompt, + clip_skip=clip_skip, + clip_model_index=1, + ) + clip_prompt_embeds = torch.cat([prompt_embed, prompt_2_embed], dim=-1) + + t5_prompt_embed = self._get_t5_prompt_embeds( + prompt=prompt_3, + num_images_per_prompt=num_images_per_prompt, + max_sequence_length=max_sequence_length, + device=device, + ) + + clip_prompt_embeds = torch.nn.functional.pad( + clip_prompt_embeds, (0, t5_prompt_embed.shape[-1] - clip_prompt_embeds.shape[-1]) + ) + + prompt_embeds = torch.cat([clip_prompt_embeds, t5_prompt_embed], dim=-2) + pooled_prompt_embeds = torch.cat([pooled_prompt_embed, pooled_prompt_2_embed], dim=-1) + + if do_classifier_free_guidance and negative_prompt_embeds is None: + negative_prompt = negative_prompt or "" + negative_prompt_2 = negative_prompt_2 or negative_prompt + negative_prompt_3 = negative_prompt_3 or negative_prompt + + # normalize str to list + negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt + negative_prompt_2 = ( + batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 + ) + negative_prompt_3 = ( + batch_size * [negative_prompt_3] if isinstance(negative_prompt_3, str) else negative_prompt_3 + ) + + if prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + + negative_prompt_embed, negative_pooled_prompt_embed = self._get_clip_prompt_embeds( + negative_prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + clip_skip=None, + clip_model_index=0, + ) + negative_prompt_2_embed, negative_pooled_prompt_2_embed = self._get_clip_prompt_embeds( + negative_prompt_2, + device=device, + num_images_per_prompt=num_images_per_prompt, + clip_skip=None, + clip_model_index=1, + ) + negative_clip_prompt_embeds = torch.cat([negative_prompt_embed, negative_prompt_2_embed], dim=-1) + + t5_negative_prompt_embed = self._get_t5_prompt_embeds( + prompt=negative_prompt_3, + num_images_per_prompt=num_images_per_prompt, + max_sequence_length=max_sequence_length, + device=device, + ) + + negative_clip_prompt_embeds = torch.nn.functional.pad( + negative_clip_prompt_embeds, + (0, t5_negative_prompt_embed.shape[-1] - negative_clip_prompt_embeds.shape[-1]), + ) + + negative_prompt_embeds = torch.cat([negative_clip_prompt_embeds, t5_negative_prompt_embed], dim=-2) + negative_pooled_prompt_embeds = torch.cat( + [negative_pooled_prompt_embed, negative_pooled_prompt_2_embed], dim=-1 + ) + + if self.text_encoder is not None: + if isinstance(self, SD3LoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if isinstance(self, SD3LoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder_2, lora_scale) + + return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds + + def check_inputs( + self, + prompt, + prompt_2, + prompt_3, + height, + width, + negative_prompt=None, + negative_prompt_2=None, + negative_prompt_3=None, + prompt_embeds=None, + negative_prompt_embeds=None, + pooled_prompt_embeds=None, + negative_pooled_prompt_embeds=None, + callback_on_step_end_tensor_inputs=None, + max_sequence_length=None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt_2 is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt_3 is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt_3`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)): + raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}") + elif prompt_3 is not None and (not isinstance(prompt_3, str) and not isinstance(prompt_3, list)): + raise ValueError(f"`prompt_3` has to be of type `str` or `list` but is {type(prompt_3)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + elif negative_prompt_2 is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + elif negative_prompt_3 is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt_3`: {negative_prompt_3} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if prompt_embeds is not None and pooled_prompt_embeds is None: + raise ValueError( + "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`." + ) + + if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: + raise ValueError( + "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`." + ) + + if max_sequence_length is not None and max_sequence_length > 512: + raise ValueError(f"`max_sequence_length` cannot be greater than 512 but is {max_sequence_length}") + + # Copied from diffusers.pipelines.stable_diffusion_3.pipeline_stable_diffusion_3.StableDiffusion3Pipeline.prepare_latents + def prepare_latents( + self, + batch_size, + num_channels_latents, + height, + width, + dtype, + device, + generator, + latents=None, + ): + if latents is not None: + return latents.to(device=device, dtype=dtype) + + shape = ( + batch_size, + num_channels_latents, + int(height) // self.vae_scale_factor, + int(width) // self.vae_scale_factor, + ) + + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + + return latents + + def prepare_image( + self, + image, + width, + height, + batch_size, + num_images_per_prompt, + device, + dtype, + do_classifier_free_guidance=False, + guess_mode=False, + ): + if isinstance(image, torch.Tensor): + pass + else: + image = self.image_processor.preprocess(image, height=height, width=width) + + image_batch_size = image.shape[0] + + if image_batch_size == 1: + repeat_by = batch_size + else: + # image batch size is the same as prompt batch size + repeat_by = num_images_per_prompt + + image = image.repeat_interleave(repeat_by, dim=0) + + image = image.to(device=device, dtype=dtype) + + if do_classifier_free_guidance and not guess_mode: + image = torch.cat([image] * 2) + + return image + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def clip_skip(self): + return self._clip_skip + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 + + @property + def joint_attention_kwargs(self): + return self._joint_attention_kwargs + + @property + def num_timesteps(self): + return self._num_timesteps + + @property + def interrupt(self): + return self._interrupt + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + prompt_2: Optional[Union[str, List[str]]] = None, + prompt_3: Optional[Union[str, List[str]]] = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 28, + timesteps: List[int] = None, + guidance_scale: float = 7.0, + control_guidance_start: Union[float, List[float]] = 0.0, + control_guidance_end: Union[float, List[float]] = 1.0, + control_image: PipelineImageInput = None, + controlnet_conditioning_scale: Union[float, List[float]] = 1.0, + controlnet_pooled_projections: Optional[torch.FloatTensor] = None, + negative_prompt: Optional[Union[str, List[str]]] = None, + negative_prompt_2: Optional[Union[str, List[str]]] = None, + negative_prompt_3: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + joint_attention_kwargs: Optional[Dict[str, Any]] = None, + clip_skip: Optional[int] = None, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + max_sequence_length: int = 256, + ): + r""" + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + will be used instead + prompt_3 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to `tokenizer_3` and `text_encoder_3`. If not defined, `prompt` is + will be used instead + height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The height in pixels of the generated image. This is set to 1024 by default for the best results. + width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The width in pixels of the generated image. This is set to 1024 by default for the best results. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument + in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is + passed will be used. Must be in descending order. + guidance_scale (`float`, *optional*, defaults to 5.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0): + The percentage of total steps at which the ControlNet starts applying. + control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0): + The percentage of total steps at which the ControlNet stops applying. + control_image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,: + `List[List[torch.Tensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`): + The ControlNet input condition to provide guidance to the `unet` for generation. If the type is + specified as `torch.Tensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also be accepted + as an image. The dimensions of the output image defaults to `image`'s dimensions. If height and/or + width are passed, `image` is resized accordingly. If multiple ControlNets are specified in `init`, + images must be passed as a list such that each element of the list can be correctly batched for input + to a single ControlNet. + controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0): + The outputs of the ControlNet are multiplied by `controlnet_conditioning_scale` before they are added + to the residual in the original `unet`. If multiple ControlNets are specified in `init`, you can set + the corresponding scale as a list. + controlnet_pooled_projections (`torch.FloatTensor` of shape `(batch_size, projection_dim)`): + Embeddings projected from the embeddings of controlnet input conditions. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used instead + negative_prompt_3 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_3` and + `text_encoder_3`. If not defined, `negative_prompt` is used instead + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] instead + of a plain tuple. + joint_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + max_sequence_length (`int` defaults to 256): Maximum sequence length to use with the `prompt`. + + Examples: + + Returns: + [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] or `tuple`: + [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] if `return_dict` is True, otherwise a + `tuple`. When returning a tuple, the first element is a list with the generated images. + """ + + height = height or self.default_sample_size * self.vae_scale_factor + width = width or self.default_sample_size * self.vae_scale_factor + + # align format for control guidance + if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list): + control_guidance_start = len(control_guidance_end) * [control_guidance_start] + elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list): + control_guidance_end = len(control_guidance_start) * [control_guidance_end] + elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list): + mult = len(self.controlnet.nets) if isinstance(self.controlnet, SD3MultiControlNetModel) else 1 + control_guidance_start, control_guidance_end = ( + mult * [control_guidance_start], + mult * [control_guidance_end], + ) + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + prompt_2, + prompt_3, + height, + width, + negative_prompt=negative_prompt, + negative_prompt_2=negative_prompt_2, + negative_prompt_3=negative_prompt_3, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, + callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, + max_sequence_length=max_sequence_length, + ) + + self._guidance_scale = guidance_scale + self._clip_skip = clip_skip + self._joint_attention_kwargs = joint_attention_kwargs + self._interrupt = False + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + dtype = self.transformer.dtype + + ( + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + ) = self.encode_prompt( + prompt=prompt, + prompt_2=prompt_2, + prompt_3=prompt_3, + negative_prompt=negative_prompt, + negative_prompt_2=negative_prompt_2, + negative_prompt_3=negative_prompt_3, + do_classifier_free_guidance=self.do_classifier_free_guidance, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, + device=device, + clip_skip=self.clip_skip, + num_images_per_prompt=num_images_per_prompt, + max_sequence_length=max_sequence_length, + ) + + if self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) + pooled_prompt_embeds = torch.cat([negative_pooled_prompt_embeds, pooled_prompt_embeds], dim=0) + + # 3. Prepare control image + if isinstance(self.controlnet, SD3ControlNetModel): + control_image = self.prepare_image( + image=control_image, + width=width, + height=height, + batch_size=batch_size * num_images_per_prompt, + num_images_per_prompt=num_images_per_prompt, + device=device, + dtype=dtype, + do_classifier_free_guidance=self.do_classifier_free_guidance, + guess_mode=False, + ) + height, width = control_image.shape[-2:] + + control_image = self.vae.encode(control_image).latent_dist.sample() + control_image = control_image * self.vae.config.scaling_factor + + elif isinstance(self.controlnet, SD3MultiControlNetModel): + control_images = [] + + for control_image_ in control_image: + control_image_ = self.prepare_image( + image=control_image_, + width=width, + height=height, + batch_size=batch_size * num_images_per_prompt, + num_images_per_prompt=num_images_per_prompt, + device=device, + dtype=dtype, + do_classifier_free_guidance=self.do_classifier_free_guidance, + guess_mode=False, + ) + + control_image_ = self.vae.encode(control_image_).latent_dist.sample() + control_image_ = control_image_ * self.vae.config.scaling_factor + + control_images.append(control_image_) + + control_image = control_images + else: + assert False + + if controlnet_pooled_projections is None: + controlnet_pooled_projections = torch.zeros_like(pooled_prompt_embeds) + else: + controlnet_pooled_projections = controlnet_pooled_projections or pooled_prompt_embeds + + # 4. Prepare timesteps + timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps) + num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) + self._num_timesteps = len(timesteps) + + # 5. Prepare latent variables + num_channels_latents = self.transformer.config.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 6. Create tensor stating which controlnets to keep + controlnet_keep = [] + for i in range(len(timesteps)): + keeps = [ + 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e) + for s, e in zip(control_guidance_start, control_guidance_end) + ] + controlnet_keep.append(keeps[0] if isinstance(self.controlnet, SD3ControlNetModel) else keeps) + + # 7. Denoising loop + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + if self.interrupt: + continue + + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents + # broadcast to batch dimension in a way that's compatible with ONNX/Core ML + timestep = t.expand(latent_model_input.shape[0]) + + if isinstance(controlnet_keep[i], list): + cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])] + else: + controlnet_cond_scale = controlnet_conditioning_scale + if isinstance(controlnet_cond_scale, list): + controlnet_cond_scale = controlnet_cond_scale[0] + cond_scale = controlnet_cond_scale * controlnet_keep[i] + + # controlnet(s) inference + control_block_samples = self.controlnet( + hidden_states=latent_model_input, + timestep=timestep, + encoder_hidden_states=prompt_embeds, + pooled_projections=controlnet_pooled_projections, + joint_attention_kwargs=self.joint_attention_kwargs, + controlnet_cond=control_image, + conditioning_scale=cond_scale, + return_dict=False, + )[0] + + noise_pred = self.transformer( + hidden_states=latent_model_input, + timestep=timestep, + encoder_hidden_states=prompt_embeds, + pooled_projections=pooled_prompt_embeds, + block_controlnet_hidden_states=control_block_samples, + joint_attention_kwargs=self.joint_attention_kwargs, + return_dict=False, + )[0] + + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents_dtype = latents.dtype + latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0] + + if latents.dtype != latents_dtype: + if torch.backends.mps.is_available(): + # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272 + latents = latents.to(latents_dtype) + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + negative_pooled_prompt_embeds = callback_outputs.pop( + "negative_pooled_prompt_embeds", negative_pooled_prompt_embeds + ) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + + if XLA_AVAILABLE: + xm.mark_step() + + if output_type == "latent": + image = latents + + else: + latents = (latents / self.vae.config.scaling_factor) + self.vae.config.shift_factor + + image = self.vae.decode(latents, return_dict=False)[0] + image = self.image_processor.postprocess(image, output_type=output_type) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return StableDiffusion3PipelineOutput(images=image) diff --git a/diffusers3/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet_inpainting.py b/diffusers3/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet_inpainting.py new file mode 100644 index 0000000000000000000000000000000000000000..47fc6d6daf158d9576ec8308d6e441c109dbe247 --- /dev/null +++ b/diffusers3/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet_inpainting.py @@ -0,0 +1,1144 @@ +# Copyright 2024 Stability AI, The HuggingFace Team and The AlimamaCreative Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import torch +from transformers import ( + CLIPTextModelWithProjection, + CLIPTokenizer, + T5EncoderModel, + T5TokenizerFast, +) + +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import FromSingleFileMixin, SD3LoraLoaderMixin +from ...models.autoencoders import AutoencoderKL +from ...models.controlnet_sd3 import SD3ControlNetModel, SD3MultiControlNetModel +from ...models.transformers import SD3Transformer2DModel +from ...schedulers import FlowMatchEulerDiscreteScheduler +from ...utils import ( + USE_PEFT_BACKEND, + is_torch_xla_available, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline +from ..stable_diffusion_3.pipeline_output import StableDiffusion3PipelineOutput + + +if is_torch_xla_available(): + import torch_xla.core.xla_model as xm + + XLA_AVAILABLE = True +else: + XLA_AVAILABLE = False + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers.utils import load_image, check_min_version + >>> from diffusers.pipelines import StableDiffusion3ControlNetInpaintingPipeline + >>> from diffusers.models.controlnet_sd3 import SD3ControlNetModel + + >>> controlnet = SD3ControlNetModel.from_pretrained( + ... "alimama-creative/SD3-Controlnet-Inpainting", use_safetensors=True, extra_conditioning_channels=1 + ... ) + >>> pipe = StableDiffusion3ControlNetInpaintingPipeline.from_pretrained( + ... "stabilityai/stable-diffusion-3-medium-diffusers", + ... controlnet=controlnet, + ... torch_dtype=torch.float16, + ... ) + >>> pipe.text_encoder.to(torch.float16) + >>> pipe.controlnet.to(torch.float16) + >>> pipe.to("cuda") + + >>> image = load_image( + ... "https://huggingface.co/alimama-creative/SD3-Controlnet-Inpainting/resolve/main/images/dog.png" + ... ) + >>> mask = load_image( + ... "https://huggingface.co/alimama-creative/SD3-Controlnet-Inpainting/resolve/main/images/dog_mask.png" + ... ) + >>> width = 1024 + >>> height = 1024 + >>> prompt = "A cat is sitting next to a puppy." + >>> generator = torch.Generator(device="cuda").manual_seed(24) + >>> res_image = pipe( + ... negative_prompt="deformed, distorted, disfigured, poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, mutated hands and fingers, disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation, NSFW", + ... prompt=prompt, + ... height=height, + ... width=width, + ... control_image=image, + ... control_mask=mask, + ... num_inference_steps=28, + ... generator=generator, + ... controlnet_conditioning_scale=0.95, + ... guidance_scale=7, + ... ).images[0] + >>> res_image.save(f"sd3.png") + ``` +""" + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + sigmas: Optional[List[float]] = None, + **kwargs, +): + """ + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles + custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` + must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, + `num_inference_steps` and `sigmas` must be `None`. + sigmas (`List[float]`, *optional*): + Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, + `num_inference_steps` and `timesteps` must be `None`. + + Returns: + `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the + second element is the number of inference steps. + """ + if timesteps is not None and sigmas is not None: + raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") + if timesteps is not None: + accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + elif sigmas is not None: + accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accept_sigmas: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" sigmas schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + + +class StableDiffusion3ControlNetInpaintingPipeline(DiffusionPipeline, SD3LoraLoaderMixin, FromSingleFileMixin): + r""" + Args: + transformer ([`SD3Transformer2DModel`]): + Conditional Transformer (MMDiT) architecture to denoise the encoded image latents. + scheduler ([`FlowMatchEulerDiscreteScheduler`]): + A scheduler to be used in combination with `transformer` to denoise the encoded image latents. + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModelWithProjection`]): + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection), + specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant, + with an additional added projection layer that is initialized with a diagonal matrix with the `hidden_size` + as its dimension. + text_encoder_2 ([`CLIPTextModelWithProjection`]): + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection), + specifically the + [laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k) + variant. + text_encoder_3 ([`T5EncoderModel`]): + Frozen text-encoder. Stable Diffusion 3 uses + [T5](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5EncoderModel), specifically the + [t5-v1_1-xxl](https://huggingface.co/google/t5-v1_1-xxl) variant. + tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + tokenizer_2 (`CLIPTokenizer`): + Second Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + tokenizer_3 (`T5TokenizerFast`): + Tokenizer of class + [T5Tokenizer](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5Tokenizer). + controlnet ([`SD3ControlNetModel`] or `List[SD3ControlNetModel]` or [`SD3MultiControlNetModel`]): + Provides additional conditioning to the `unet` during the denoising process. If you set multiple + ControlNets as a list, the outputs from each ControlNet are added together to create one combined + additional conditioning. + """ + + model_cpu_offload_seq = "text_encoder->text_encoder_2->text_encoder_3->transformer->vae" + _optional_components = [] + _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds", "negative_pooled_prompt_embeds"] + + def __init__( + self, + transformer: SD3Transformer2DModel, + scheduler: FlowMatchEulerDiscreteScheduler, + vae: AutoencoderKL, + text_encoder: CLIPTextModelWithProjection, + tokenizer: CLIPTokenizer, + text_encoder_2: CLIPTextModelWithProjection, + tokenizer_2: CLIPTokenizer, + text_encoder_3: T5EncoderModel, + tokenizer_3: T5TokenizerFast, + controlnet: Union[ + SD3ControlNetModel, List[SD3ControlNetModel], Tuple[SD3ControlNetModel], SD3MultiControlNetModel + ], + ): + super().__init__() + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + text_encoder_2=text_encoder_2, + text_encoder_3=text_encoder_3, + tokenizer=tokenizer, + tokenizer_2=tokenizer_2, + tokenizer_3=tokenizer_3, + transformer=transformer, + scheduler=scheduler, + controlnet=controlnet, + ) + self.vae_scale_factor = ( + 2 ** (len(self.vae.config.block_out_channels) - 1) if hasattr(self, "vae") and self.vae is not None else 8 + ) + self.image_processor = VaeImageProcessor( + vae_scale_factor=self.vae_scale_factor, do_resize=True, do_convert_rgb=True, do_normalize=True + ) + self.mask_processor = VaeImageProcessor( + vae_scale_factor=self.vae_scale_factor, + do_resize=True, + do_convert_grayscale=True, + do_normalize=False, + do_binarize=True, + ) + self.tokenizer_max_length = ( + self.tokenizer.model_max_length if hasattr(self, "tokenizer") and self.tokenizer is not None else 77 + ) + self.default_sample_size = ( + self.transformer.config.sample_size + if hasattr(self, "transformer") and self.transformer is not None + else 128 + ) + + # Copied from diffusers.pipelines.stable_diffusion_3.pipeline_stable_diffusion_3.StableDiffusion3Pipeline._get_t5_prompt_embeds + def _get_t5_prompt_embeds( + self, + prompt: Union[str, List[str]] = None, + num_images_per_prompt: int = 1, + max_sequence_length: int = 256, + device: Optional[torch.device] = None, + dtype: Optional[torch.dtype] = None, + ): + device = device or self._execution_device + dtype = dtype or self.text_encoder.dtype + + prompt = [prompt] if isinstance(prompt, str) else prompt + batch_size = len(prompt) + + if self.text_encoder_3 is None: + return torch.zeros( + ( + batch_size * num_images_per_prompt, + self.tokenizer_max_length, + self.transformer.config.joint_attention_dim, + ), + device=device, + dtype=dtype, + ) + + text_inputs = self.tokenizer_3( + prompt, + padding="max_length", + max_length=max_sequence_length, + truncation=True, + add_special_tokens=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer_3(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): + removed_text = self.tokenizer_3.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because `max_sequence_length` is set to " + f" {max_sequence_length} tokens: {removed_text}" + ) + + prompt_embeds = self.text_encoder_3(text_input_ids.to(device))[0] + + dtype = self.text_encoder_3.dtype + prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) + + _, seq_len, _ = prompt_embeds.shape + + # duplicate text embeddings and attention mask for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + return prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion_3.pipeline_stable_diffusion_3.StableDiffusion3Pipeline._get_clip_prompt_embeds + def _get_clip_prompt_embeds( + self, + prompt: Union[str, List[str]], + num_images_per_prompt: int = 1, + device: Optional[torch.device] = None, + clip_skip: Optional[int] = None, + clip_model_index: int = 0, + ): + device = device or self._execution_device + + clip_tokenizers = [self.tokenizer, self.tokenizer_2] + clip_text_encoders = [self.text_encoder, self.text_encoder_2] + + tokenizer = clip_tokenizers[clip_model_index] + text_encoder = clip_text_encoders[clip_model_index] + + prompt = [prompt] if isinstance(prompt, str) else prompt + batch_size = len(prompt) + + text_inputs = tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer_max_length, + truncation=True, + return_tensors="pt", + ) + + text_input_ids = text_inputs.input_ids + untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): + removed_text = tokenizer.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer_max_length} tokens: {removed_text}" + ) + prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) + pooled_prompt_embeds = prompt_embeds[0] + + if clip_skip is None: + prompt_embeds = prompt_embeds.hidden_states[-2] + else: + prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)] + + prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) + + _, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt, 1) + pooled_prompt_embeds = pooled_prompt_embeds.view(batch_size * num_images_per_prompt, -1) + + return prompt_embeds, pooled_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion_3.pipeline_stable_diffusion_3.StableDiffusion3Pipeline.encode_prompt + def encode_prompt( + self, + prompt: Union[str, List[str]], + prompt_2: Union[str, List[str]], + prompt_3: Union[str, List[str]], + device: Optional[torch.device] = None, + num_images_per_prompt: int = 1, + do_classifier_free_guidance: bool = True, + negative_prompt: Optional[Union[str, List[str]]] = None, + negative_prompt_2: Optional[Union[str, List[str]]] = None, + negative_prompt_3: Optional[Union[str, List[str]]] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + clip_skip: Optional[int] = None, + max_sequence_length: int = 256, + lora_scale: Optional[float] = None, + ): + r""" + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in all text-encoders + prompt_3 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_3` and `text_encoder_3`. If not defined, `prompt` is + used in all text-encoders + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in all the text-encoders. + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_3` and + `text_encoder_3`. If not defined, `negative_prompt` is used in both text-encoders + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + lora_scale (`float`, *optional*): + A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + """ + device = device or self._execution_device + + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, SD3LoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if self.text_encoder is not None and USE_PEFT_BACKEND: + scale_lora_layers(self.text_encoder, lora_scale) + if self.text_encoder_2 is not None and USE_PEFT_BACKEND: + scale_lora_layers(self.text_encoder_2, lora_scale) + + prompt = [prompt] if isinstance(prompt, str) else prompt + if prompt is not None: + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + prompt_2 = prompt_2 or prompt + prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 + + prompt_3 = prompt_3 or prompt + prompt_3 = [prompt_3] if isinstance(prompt_3, str) else prompt_3 + + prompt_embed, pooled_prompt_embed = self._get_clip_prompt_embeds( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + clip_skip=clip_skip, + clip_model_index=0, + ) + prompt_2_embed, pooled_prompt_2_embed = self._get_clip_prompt_embeds( + prompt=prompt_2, + device=device, + num_images_per_prompt=num_images_per_prompt, + clip_skip=clip_skip, + clip_model_index=1, + ) + clip_prompt_embeds = torch.cat([prompt_embed, prompt_2_embed], dim=-1) + + t5_prompt_embed = self._get_t5_prompt_embeds( + prompt=prompt_3, + num_images_per_prompt=num_images_per_prompt, + max_sequence_length=max_sequence_length, + device=device, + ) + + clip_prompt_embeds = torch.nn.functional.pad( + clip_prompt_embeds, (0, t5_prompt_embed.shape[-1] - clip_prompt_embeds.shape[-1]) + ) + + prompt_embeds = torch.cat([clip_prompt_embeds, t5_prompt_embed], dim=-2) + pooled_prompt_embeds = torch.cat([pooled_prompt_embed, pooled_prompt_2_embed], dim=-1) + + if do_classifier_free_guidance and negative_prompt_embeds is None: + negative_prompt = negative_prompt or "" + negative_prompt_2 = negative_prompt_2 or negative_prompt + negative_prompt_3 = negative_prompt_3 or negative_prompt + + # normalize str to list + negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt + negative_prompt_2 = ( + batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 + ) + negative_prompt_3 = ( + batch_size * [negative_prompt_3] if isinstance(negative_prompt_3, str) else negative_prompt_3 + ) + + if prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + + negative_prompt_embed, negative_pooled_prompt_embed = self._get_clip_prompt_embeds( + negative_prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + clip_skip=None, + clip_model_index=0, + ) + negative_prompt_2_embed, negative_pooled_prompt_2_embed = self._get_clip_prompt_embeds( + negative_prompt_2, + device=device, + num_images_per_prompt=num_images_per_prompt, + clip_skip=None, + clip_model_index=1, + ) + negative_clip_prompt_embeds = torch.cat([negative_prompt_embed, negative_prompt_2_embed], dim=-1) + + t5_negative_prompt_embed = self._get_t5_prompt_embeds( + prompt=negative_prompt_3, + num_images_per_prompt=num_images_per_prompt, + max_sequence_length=max_sequence_length, + device=device, + ) + + negative_clip_prompt_embeds = torch.nn.functional.pad( + negative_clip_prompt_embeds, + (0, t5_negative_prompt_embed.shape[-1] - negative_clip_prompt_embeds.shape[-1]), + ) + + negative_prompt_embeds = torch.cat([negative_clip_prompt_embeds, t5_negative_prompt_embed], dim=-2) + negative_pooled_prompt_embeds = torch.cat( + [negative_pooled_prompt_embed, negative_pooled_prompt_2_embed], dim=-1 + ) + + if self.text_encoder is not None: + if isinstance(self, SD3LoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if isinstance(self, SD3LoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder_2, lora_scale) + + return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion_3.pipeline_stable_diffusion_3.StableDiffusion3Pipeline.check_inputs + def check_inputs( + self, + prompt, + prompt_2, + prompt_3, + height, + width, + negative_prompt=None, + negative_prompt_2=None, + negative_prompt_3=None, + prompt_embeds=None, + negative_prompt_embeds=None, + pooled_prompt_embeds=None, + negative_pooled_prompt_embeds=None, + callback_on_step_end_tensor_inputs=None, + max_sequence_length=None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt_2 is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt_3 is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt_3`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)): + raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}") + elif prompt_3 is not None and (not isinstance(prompt_3, str) and not isinstance(prompt_3, list)): + raise ValueError(f"`prompt_3` has to be of type `str` or `list` but is {type(prompt_3)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + elif negative_prompt_2 is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + elif negative_prompt_3 is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt_3`: {negative_prompt_3} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if prompt_embeds is not None and pooled_prompt_embeds is None: + raise ValueError( + "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`." + ) + + if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: + raise ValueError( + "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`." + ) + + if max_sequence_length is not None and max_sequence_length > 512: + raise ValueError(f"`max_sequence_length` cannot be greater than 512 but is {max_sequence_length}") + + # Copied from diffusers.pipelines.stable_diffusion_3.pipeline_stable_diffusion_3.StableDiffusion3Pipeline.prepare_latents + def prepare_latents( + self, + batch_size, + num_channels_latents, + height, + width, + dtype, + device, + generator, + latents=None, + ): + if latents is not None: + return latents.to(device=device, dtype=dtype) + + shape = ( + batch_size, + num_channels_latents, + int(height) // self.vae_scale_factor, + int(width) // self.vae_scale_factor, + ) + + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + + return latents + + def prepare_image_with_mask( + self, + image, + mask, + width, + height, + batch_size, + num_images_per_prompt, + device, + dtype, + do_classifier_free_guidance=False, + guess_mode=False, + ): + if isinstance(image, torch.Tensor): + pass + else: + image = self.image_processor.preprocess(image, height=height, width=width) + + image_batch_size = image.shape[0] + + # Prepare image + if image_batch_size == 1: + repeat_by = batch_size + else: + # image batch size is the same as prompt batch size + repeat_by = num_images_per_prompt + + image = image.repeat_interleave(repeat_by, dim=0) + + image = image.to(device=device, dtype=dtype) + + # Prepare mask + if isinstance(mask, torch.Tensor): + pass + else: + mask = self.mask_processor.preprocess(mask, height=height, width=width) + mask = mask.repeat_interleave(repeat_by, dim=0) + mask = mask.to(device=device, dtype=dtype) + + # Get masked image + masked_image = image.clone() + masked_image[(mask > 0.5).repeat(1, 3, 1, 1)] = -1 + + # Encode to latents + image_latents = self.vae.encode(masked_image).latent_dist.sample() + image_latents = (image_latents - self.vae.config.shift_factor) * self.vae.config.scaling_factor + image_latents = image_latents.to(dtype) + + mask = torch.nn.functional.interpolate( + mask, size=(height // self.vae_scale_factor, width // self.vae_scale_factor) + ) + mask = 1 - mask + control_image = torch.cat([image_latents, mask], dim=1) + + if do_classifier_free_guidance and not guess_mode: + control_image = torch.cat([control_image] * 2) + + return control_image + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def clip_skip(self): + return self._clip_skip + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 + + @property + def joint_attention_kwargs(self): + return self._joint_attention_kwargs + + @property + def num_timesteps(self): + return self._num_timesteps + + @property + def interrupt(self): + return self._interrupt + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + prompt_2: Optional[Union[str, List[str]]] = None, + prompt_3: Optional[Union[str, List[str]]] = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 28, + timesteps: List[int] = None, + guidance_scale: float = 7.0, + control_guidance_start: Union[float, List[float]] = 0.0, + control_guidance_end: Union[float, List[float]] = 1.0, + control_image: PipelineImageInput = None, + control_mask: PipelineImageInput = None, + controlnet_conditioning_scale: Union[float, List[float]] = 1.0, + controlnet_pooled_projections: Optional[torch.FloatTensor] = None, + negative_prompt: Optional[Union[str, List[str]]] = None, + negative_prompt_2: Optional[Union[str, List[str]]] = None, + negative_prompt_3: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + joint_attention_kwargs: Optional[Dict[str, Any]] = None, + clip_skip: Optional[int] = None, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + max_sequence_length: int = 256, + ): + r""" + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + will be used instead + prompt_3 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to `tokenizer_3` and `text_encoder_3`. If not defined, `prompt` is + will be used instead + height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The height in pixels of the generated image. This is set to 1024 by default for the best results. + width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The width in pixels of the generated image. This is set to 1024 by default for the best results. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument + in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is + passed will be used. Must be in descending order. + guidance_scale (`float`, *optional*, defaults to 5.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0): + The percentage of total steps at which the ControlNet starts applying. + control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0): + The percentage of total steps at which the ControlNet stops applying. + control_image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`): + `Image`, numpy array or tensor representing an image batch to be inpainted (which parts of the image to + be masked out with `control_mask` and repainted according to `prompt`). For both numpy array and + pytorch tensor, the expected value range is between `[0, 1]` If it's a tensor or a list or tensors, the + expected shape should be `(B, C, H, W)`. If it is a numpy array or a list of arrays, the expected shape + should be `(B, H, W, C)` or `(H, W, C)`. + control_mask (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`): + `Image`, numpy array or tensor representing an image batch to mask `image`. White pixels in the mask + are repainted while black pixels are preserved. If `mask_image` is a PIL image, it is converted to a + single channel (luminance) before use. If it's a numpy array or pytorch tensor, it should contain one + color channel (L) instead of 3, so the expected shape for pytorch tensor would be `(B, 1, H, W)`. And + for numpy array would be for `(B, H, W, 1)`, `(B, H, W)`, `(H, W, 1)`, or `(H, W)`. + controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0): + The outputs of the ControlNet are multiplied by `controlnet_conditioning_scale` before they are added + to the residual in the original `unet`. If multiple ControlNets are specified in `init`, you can set + the corresponding scale as a list. + controlnet_pooled_projections (`torch.FloatTensor` of shape `(batch_size, projection_dim)`): + Embeddings projected from the embeddings of controlnet input conditions. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used instead + negative_prompt_3 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_3` and + `text_encoder_3`. If not defined, `negative_prompt` is used instead + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] instead + of a plain tuple. + joint_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + max_sequence_length (`int` defaults to 256): Maximum sequence length to use with the `prompt`. + + Examples: + + Returns: + [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] or `tuple`: + [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] if `return_dict` is True, otherwise a + `tuple`. When returning a tuple, the first element is a list with the generated images. + """ + + height = height or self.default_sample_size * self.vae_scale_factor + width = width or self.default_sample_size * self.vae_scale_factor + + # align format for control guidance + if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list): + control_guidance_start = len(control_guidance_end) * [control_guidance_start] + elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list): + control_guidance_end = len(control_guidance_start) * [control_guidance_end] + elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list): + mult = len(self.controlnet.nets) if isinstance(self.controlnet, SD3MultiControlNetModel) else 1 + control_guidance_start, control_guidance_end = ( + mult * [control_guidance_start], + mult * [control_guidance_end], + ) + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + prompt_2, + prompt_3, + height, + width, + negative_prompt=negative_prompt, + negative_prompt_2=negative_prompt_2, + negative_prompt_3=negative_prompt_3, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, + callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, + max_sequence_length=max_sequence_length, + ) + + self._guidance_scale = guidance_scale + self._clip_skip = clip_skip + self._joint_attention_kwargs = joint_attention_kwargs + self._interrupt = False + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + dtype = self.transformer.dtype + + ( + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + ) = self.encode_prompt( + prompt=prompt, + prompt_2=prompt_2, + prompt_3=prompt_3, + negative_prompt=negative_prompt, + negative_prompt_2=negative_prompt_2, + negative_prompt_3=negative_prompt_3, + do_classifier_free_guidance=self.do_classifier_free_guidance, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, + device=device, + clip_skip=self.clip_skip, + num_images_per_prompt=num_images_per_prompt, + max_sequence_length=max_sequence_length, + ) + + if self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) + pooled_prompt_embeds = torch.cat([negative_pooled_prompt_embeds, pooled_prompt_embeds], dim=0) + + # 3. Prepare control image + if isinstance(self.controlnet, SD3ControlNetModel): + control_image = self.prepare_image_with_mask( + image=control_image, + mask=control_mask, + width=width, + height=height, + batch_size=batch_size * num_images_per_prompt, + num_images_per_prompt=num_images_per_prompt, + device=device, + dtype=dtype, + do_classifier_free_guidance=self.do_classifier_free_guidance, + guess_mode=False, + ) + latent_height, latent_width = control_image.shape[-2:] + + height = latent_height * self.vae_scale_factor + width = latent_width * self.vae_scale_factor + + elif isinstance(self.controlnet, SD3MultiControlNetModel): + raise NotImplementedError("MultiControlNetModel is not supported for SD3ControlNetInpaintingPipeline.") + else: + assert False + + if controlnet_pooled_projections is None: + controlnet_pooled_projections = torch.zeros_like(pooled_prompt_embeds) + else: + controlnet_pooled_projections = controlnet_pooled_projections or pooled_prompt_embeds + + # 4. Prepare timesteps + timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps) + num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) + self._num_timesteps = len(timesteps) + + # 5. Prepare latent variables + num_channels_latents = self.transformer.config.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 6. Create tensor stating which controlnets to keep + controlnet_keep = [] + for i in range(len(timesteps)): + keeps = [ + 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e) + for s, e in zip(control_guidance_start, control_guidance_end) + ] + controlnet_keep.append(keeps[0] if isinstance(self.controlnet, SD3ControlNetModel) else keeps) + + # 7. Denoising loop + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + if self.interrupt: + continue + + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents + # broadcast to batch dimension in a way that's compatible with ONNX/Core ML + timestep = t.expand(latent_model_input.shape[0]) + + if isinstance(controlnet_keep[i], list): + cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])] + else: + controlnet_cond_scale = controlnet_conditioning_scale + if isinstance(controlnet_cond_scale, list): + controlnet_cond_scale = controlnet_cond_scale[0] + cond_scale = controlnet_cond_scale * controlnet_keep[i] + + # controlnet(s) inference + control_block_samples = self.controlnet( + hidden_states=latent_model_input, + timestep=timestep, + encoder_hidden_states=prompt_embeds, + pooled_projections=controlnet_pooled_projections, + joint_attention_kwargs=self.joint_attention_kwargs, + controlnet_cond=control_image, + conditioning_scale=cond_scale, + return_dict=False, + )[0] + + noise_pred = self.transformer( + hidden_states=latent_model_input, + timestep=timestep, + encoder_hidden_states=prompt_embeds, + pooled_projections=pooled_prompt_embeds, + block_controlnet_hidden_states=control_block_samples, + joint_attention_kwargs=self.joint_attention_kwargs, + return_dict=False, + )[0] + + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents_dtype = latents.dtype + latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0] + + if latents.dtype != latents_dtype: + if torch.backends.mps.is_available(): + # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272 + latents = latents.to(latents_dtype) + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + negative_pooled_prompt_embeds = callback_outputs.pop( + "negative_pooled_prompt_embeds", negative_pooled_prompt_embeds + ) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + + if XLA_AVAILABLE: + xm.mark_step() + + if output_type == "latent": + image = latents + + else: + latents = (latents / self.vae.config.scaling_factor) + self.vae.config.shift_factor + latents = latents.to(dtype=self.vae.dtype) + + image = self.vae.decode(latents, return_dict=False)[0] + image = self.image_processor.postprocess(image, output_type=output_type) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return StableDiffusion3PipelineOutput(images=image) diff --git a/diffusers3/pipelines/controlnet_xs/__init__.py b/diffusers3/pipelines/controlnet_xs/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..978278b184f985a452f9d518a1d0eb4f271c74fd --- /dev/null +++ b/diffusers3/pipelines/controlnet_xs/__init__.py @@ -0,0 +1,68 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_flax_available, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["pipeline_controlnet_xs"] = ["StableDiffusionControlNetXSPipeline"] + _import_structure["pipeline_controlnet_xs_sd_xl"] = ["StableDiffusionXLControlNetXSPipeline"] +try: + if not (is_transformers_available() and is_flax_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_flax_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_flax_and_transformers_objects)) +else: + pass # _import_structure["pipeline_flax_controlnet"] = ["FlaxStableDiffusionControlNetPipeline"] + + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * + else: + from .pipeline_controlnet_xs import StableDiffusionControlNetXSPipeline + from .pipeline_controlnet_xs_sd_xl import StableDiffusionXLControlNetXSPipeline + + try: + if not (is_transformers_available() and is_flax_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ...utils.dummy_flax_and_transformers_objects import * # noqa F403 + else: + pass # from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline + + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/diffusers3/pipelines/controlnet_xs/pipeline_controlnet_xs.py b/diffusers3/pipelines/controlnet_xs/pipeline_controlnet_xs.py new file mode 100644 index 0000000000000000000000000000000000000000..ca10e65de8a421eb67391d5d900f3eb33777dbb6 --- /dev/null +++ b/diffusers3/pipelines/controlnet_xs/pipeline_controlnet_xs.py @@ -0,0 +1,916 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Any, Callable, Dict, List, Optional, Union + +import numpy as np +import PIL.Image +import torch +import torch.nn.functional as F +from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer + +from ...callbacks import MultiPipelineCallbacks, PipelineCallback +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import FromSingleFileMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin +from ...models import AutoencoderKL, ControlNetXSAdapter, UNet2DConditionModel, UNetControlNetXSModel +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import ( + USE_PEFT_BACKEND, + deprecate, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import is_compiled_module, is_torch_version, randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from ..stable_diffusion.pipeline_output import StableDiffusionPipelineOutput +from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> # !pip install opencv-python transformers accelerate + >>> from diffusers import StableDiffusionControlNetXSPipeline, ControlNetXSAdapter + >>> from diffusers.utils import load_image + >>> import numpy as np + >>> import torch + + >>> import cv2 + >>> from PIL import Image + + >>> prompt = "aerial view, a futuristic research complex in a bright foggy jungle, hard lighting" + >>> negative_prompt = "low quality, bad quality, sketches" + + >>> # download an image + >>> image = load_image( + ... "https://hf.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/hf-logo.png" + ... ) + + >>> # initialize the models and pipeline + >>> controlnet_conditioning_scale = 0.5 + + >>> controlnet = ControlNetXSAdapter.from_pretrained( + ... "UmerHA/Testing-ConrolNetXS-SD2.1-canny", torch_dtype=torch.float16 + ... ) + >>> pipe = StableDiffusionControlNetXSPipeline.from_pretrained( + ... "stabilityai/stable-diffusion-2-1-base", controlnet=controlnet, torch_dtype=torch.float16 + ... ) + >>> pipe.enable_model_cpu_offload() + + >>> # get canny image + >>> image = np.array(image) + >>> image = cv2.Canny(image, 100, 200) + >>> image = image[:, :, None] + >>> image = np.concatenate([image, image, image], axis=2) + >>> canny_image = Image.fromarray(image) + >>> # generate image + >>> image = pipe( + ... prompt, controlnet_conditioning_scale=controlnet_conditioning_scale, image=canny_image + ... ).images[0] + ``` +""" + + +class StableDiffusionControlNetXSPipeline( + DiffusionPipeline, + StableDiffusionMixin, + TextualInversionLoaderMixin, + StableDiffusionLoraLoaderMixin, + FromSingleFileMixin, +): + r""" + Pipeline for text-to-image generation using Stable Diffusion with ControlNet-XS guidance. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for saving LoRA weights + - [`loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A [`UNet2DConditionModel`] used to create a UNetControlNetXSModel to denoise the encoded image latents. + controlnet ([`ControlNetXSAdapter`]): + A [`ControlNetXSAdapter`] to be used in combination with `unet` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details + about a model's potential harms. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. + """ + + model_cpu_offload_seq = "text_encoder->unet->vae" + _optional_components = ["safety_checker", "feature_extractor"] + _exclude_from_cpu_offload = ["safety_checker"] + _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: Union[UNet2DConditionModel, UNetControlNetXSModel], + controlnet: ControlNetXSAdapter, + scheduler: KarrasDiffusionSchedulers, + safety_checker: StableDiffusionSafetyChecker, + feature_extractor: CLIPImageProcessor, + requires_safety_checker: bool = True, + ): + super().__init__() + + if isinstance(unet, UNet2DConditionModel): + unet = UNetControlNetXSModel.from_unet(unet, controlnet) + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + controlnet=controlnet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True) + self.control_image_processor = VaeImageProcessor( + vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False + ) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt + def _encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + **kwargs, + ): + deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." + deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) + + prompt_embeds_tuple = self.encode_prompt( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=lora_scale, + **kwargs, + ) + + # concatenate for backwards comp + prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) + + return prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if self.text_encoder is not None: + if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return prompt_embeds, negative_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is None: + has_nsfw_concept = None + else: + if torch.is_tensor(image): + feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") + else: + feature_extractor_input = self.image_processor.numpy_to_pil(image) + safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) + image, has_nsfw_concept = self.safety_checker( + images=image, clip_input=safety_checker_input.pixel_values.to(dtype) + ) + return image, has_nsfw_concept + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents + def decode_latents(self, latents): + deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" + deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) + + latents = 1 / self.vae.config.scaling_factor * latents + image = self.vae.decode(latents, return_dict=False)[0] + image = (image / 2 + 0.5).clamp(0, 1) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + return image + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + image, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + controlnet_conditioning_scale=1.0, + control_guidance_start=0.0, + control_guidance_end=1.0, + callback_on_step_end_tensor_inputs=None, + ): + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + # Check `image` and `controlnet_conditioning_scale` + is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance( + self.unet, torch._dynamo.eval_frame.OptimizedModule + ) + if ( + isinstance(self.unet, UNetControlNetXSModel) + or is_compiled + and isinstance(self.unet._orig_mod, UNetControlNetXSModel) + ): + self.check_image(image, prompt, prompt_embeds) + if not isinstance(controlnet_conditioning_scale, float): + raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.") + else: + assert False + + start, end = control_guidance_start, control_guidance_end + if start >= end: + raise ValueError( + f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}." + ) + if start < 0.0: + raise ValueError(f"control guidance start: {start} can't be smaller than 0.") + if end > 1.0: + raise ValueError(f"control guidance end: {end} can't be larger than 1.0.") + + def check_image(self, image, prompt, prompt_embeds): + image_is_pil = isinstance(image, PIL.Image.Image) + image_is_tensor = isinstance(image, torch.Tensor) + image_is_np = isinstance(image, np.ndarray) + image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image) + image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor) + image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray) + + if ( + not image_is_pil + and not image_is_tensor + and not image_is_np + and not image_is_pil_list + and not image_is_tensor_list + and not image_is_np_list + ): + raise TypeError( + f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}" + ) + + if image_is_pil: + image_batch_size = 1 + else: + image_batch_size = len(image) + + if prompt is not None and isinstance(prompt, str): + prompt_batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + prompt_batch_size = len(prompt) + elif prompt_embeds is not None: + prompt_batch_size = prompt_embeds.shape[0] + + if image_batch_size != 1 and image_batch_size != prompt_batch_size: + raise ValueError( + f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}" + ) + + def prepare_image( + self, + image, + width, + height, + batch_size, + num_images_per_prompt, + device, + dtype, + do_classifier_free_guidance=False, + ): + image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32) + image_batch_size = image.shape[0] + + if image_batch_size == 1: + repeat_by = batch_size + else: + # image batch size is the same as prompt batch size + repeat_by = num_images_per_prompt + + image = image.repeat_interleave(repeat_by, dim=0) + + image = image.to(device=device, dtype=dtype) + + if do_classifier_free_guidance: + image = torch.cat([image] * 2) + + return image + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = ( + batch_size, + num_channels_latents, + int(height) // self.vae_scale_factor, + int(width) // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + @property + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.guidance_scale + def guidance_scale(self): + return self._guidance_scale + + @property + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.clip_skip + def clip_skip(self): + return self._clip_skip + + @property + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.do_classifier_free_guidance + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None + + @property + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.cross_attention_kwargs + def cross_attention_kwargs(self): + return self._cross_attention_kwargs + + @property + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.num_timesteps + def num_timesteps(self): + return self._num_timesteps + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + image: PipelineImageInput = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + guidance_scale: float = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + controlnet_conditioning_scale: Union[float, List[float]] = 1.0, + control_guidance_start: float = 0.0, + control_guidance_end: float = 1.0, + clip_skip: Optional[int] = None, + callback_on_step_end: Optional[ + Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks] + ] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,: + `List[List[torch.Tensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`): + The ControlNet input condition to provide guidance to the `unet` for generation. If the type is + specified as `torch.Tensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also be accepted + as an image. The dimensions of the output image defaults to `image`'s dimensions. If height and/or + width are passed, `image` is resized accordingly. If multiple ControlNets are specified in `init`, + images must be passed as a list such that each element of the list can be correctly batched for input + to a single ControlNet. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0): + The outputs of the ControlNet are multiplied by `controlnet_conditioning_scale` before they are added + to the residual in the original `unet`. If multiple ControlNets are specified in `init`, you can set + the corresponding scale as a list. + control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0): + The percentage of total steps at which the ControlNet starts applying. + control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0): + The percentage of total steps at which the ControlNet stops applying. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*): + A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of + each denoising step during the inference. with the following arguments: `callback_on_step_end(self: + DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a + list of all tensors as specified by `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeine class. + Examples: + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images and the + second element is a list of `bool`s indicating whether the corresponding generated image contains + "not-safe-for-work" (nsfw) content. + """ + + if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): + callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs + + unet = self.unet._orig_mod if is_compiled_module(self.unet) else self.unet + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + image, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + controlnet_conditioning_scale, + control_guidance_start, + control_guidance_end, + callback_on_step_end_tensor_inputs, + ) + + self._guidance_scale = guidance_scale + self._clip_skip = clip_skip + self._cross_attention_kwargs = cross_attention_kwargs + self._interrupt = False + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Encode input prompt + text_encoder_lora_scale = ( + cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None + ) + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=text_encoder_lora_scale, + clip_skip=clip_skip, + ) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + if do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + # 4. Prepare image + image = self.prepare_image( + image=image, + width=width, + height=height, + batch_size=batch_size * num_images_per_prompt, + num_images_per_prompt=num_images_per_prompt, + device=device, + dtype=unet.dtype, + do_classifier_free_guidance=do_classifier_free_guidance, + ) + height, width = image.shape[-2:] + + # 5. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # 6. Prepare latent variables + num_channels_latents = self.unet.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 8. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + self._num_timesteps = len(timesteps) + is_controlnet_compiled = is_compiled_module(self.unet) + is_torch_higher_equal_2_1 = is_torch_version(">=", "2.1") + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # Relevant thread: + # https://dev-discuss.pytorch.org/t/cudagraphs-in-pytorch-2-0/1428 + if is_controlnet_compiled and is_torch_higher_equal_2_1: + torch._inductor.cudagraph_mark_step_begin() + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + apply_control = ( + i / len(timesteps) >= control_guidance_start and (i + 1) / len(timesteps) <= control_guidance_end + ) + noise_pred = self.unet( + sample=latent_model_input, + timestep=t, + encoder_hidden_states=prompt_embeds, + controlnet_cond=image, + conditioning_scale=controlnet_conditioning_scale, + cross_attention_kwargs=cross_attention_kwargs, + return_dict=True, + apply_control=apply_control, + ).sample + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + + # If we do sequential model offloading, let's offload unet and controlnet + # manually for max memory savings + if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: + self.unet.to("cpu") + self.controlnet.to("cpu") + torch.cuda.empty_cache() + + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[ + 0 + ] + image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) + else: + image = latents + has_nsfw_concept = None + + if has_nsfw_concept is None: + do_denormalize = [True] * image.shape[0] + else: + do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] + + image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image, has_nsfw_concept) + + return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) diff --git a/diffusers3/pipelines/controlnet_xs/pipeline_controlnet_xs_sd_xl.py b/diffusers3/pipelines/controlnet_xs/pipeline_controlnet_xs_sd_xl.py new file mode 100644 index 0000000000000000000000000000000000000000..326cfdab7be784a504b45a7a5fde2a06cb7f5c36 --- /dev/null +++ b/diffusers3/pipelines/controlnet_xs/pipeline_controlnet_xs_sd_xl.py @@ -0,0 +1,1111 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import numpy as np +import PIL.Image +import torch +import torch.nn.functional as F +from transformers import ( + CLIPImageProcessor, + CLIPTextModel, + CLIPTextModelWithProjection, + CLIPTokenizer, +) + +from diffusers.utils.import_utils import is_invisible_watermark_available + +from ...callbacks import MultiPipelineCallbacks, PipelineCallback +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import FromSingleFileMixin, StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin +from ...models import AutoencoderKL, ControlNetXSAdapter, UNet2DConditionModel, UNetControlNetXSModel +from ...models.attention_processor import ( + AttnProcessor2_0, + XFormersAttnProcessor, +) +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import ( + USE_PEFT_BACKEND, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import is_compiled_module, is_torch_version, randn_tensor +from ..pipeline_utils import DiffusionPipeline +from ..stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput + + +if is_invisible_watermark_available(): + from ..stable_diffusion_xl.watermark import StableDiffusionXLWatermarker + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> # !pip install opencv-python transformers accelerate + >>> from diffusers import StableDiffusionXLControlNetXSPipeline, ControlNetXSAdapter, AutoencoderKL + >>> from diffusers.utils import load_image + >>> import numpy as np + >>> import torch + + >>> import cv2 + >>> from PIL import Image + + >>> prompt = "aerial view, a futuristic research complex in a bright foggy jungle, hard lighting" + >>> negative_prompt = "low quality, bad quality, sketches" + + >>> # download an image + >>> image = load_image( + ... "https://hf.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/hf-logo.png" + ... ) + + >>> # initialize the models and pipeline + >>> controlnet_conditioning_scale = 0.5 + >>> vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16) + >>> controlnet = ControlNetXSAdapter.from_pretrained( + ... "UmerHA/Testing-ConrolNetXS-SDXL-canny", torch_dtype=torch.float16 + ... ) + >>> pipe = StableDiffusionXLControlNetXSPipeline.from_pretrained( + ... "stabilityai/stable-diffusion-xl-base-1.0", controlnet=controlnet, torch_dtype=torch.float16 + ... ) + >>> pipe.enable_model_cpu_offload() + + >>> # get canny image + >>> image = np.array(image) + >>> image = cv2.Canny(image, 100, 200) + >>> image = image[:, :, None] + >>> image = np.concatenate([image, image, image], axis=2) + >>> canny_image = Image.fromarray(image) + + >>> # generate image + >>> image = pipe( + ... prompt, controlnet_conditioning_scale=controlnet_conditioning_scale, image=canny_image + ... ).images[0] + ``` +""" + + +class StableDiffusionXLControlNetXSPipeline( + DiffusionPipeline, + TextualInversionLoaderMixin, + StableDiffusionXLLoraLoaderMixin, + FromSingleFileMixin, +): + r""" + Pipeline for text-to-image generation using Stable Diffusion XL with ControlNet-XS guidance. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + text_encoder_2 ([`~transformers.CLIPTextModelWithProjection`]): + Second frozen text-encoder + ([laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k)). + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + tokenizer_2 ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A [`UNet2DConditionModel`] used to create a UNetControlNetXSModel to denoise the encoded image latents. + controlnet ([`ControlNetXSAdapter`]): + A [`ControlNetXSAdapter`] to be used in combination with `unet` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`): + Whether the negative prompt embeddings should always be set to 0. Also see the config of + `stabilityai/stable-diffusion-xl-base-1-0`. + add_watermarker (`bool`, *optional*): + Whether to use the [invisible_watermark](https://github.com/ShieldMnt/invisible-watermark/) library to + watermark output images. If not defined, it defaults to `True` if the package is installed; otherwise no + watermarker is used. + """ + + model_cpu_offload_seq = "text_encoder->text_encoder_2->unet->vae" + _optional_components = [ + "tokenizer", + "tokenizer_2", + "text_encoder", + "text_encoder_2", + "feature_extractor", + ] + _callback_tensor_inputs = [ + "latents", + "prompt_embeds", + "negative_prompt_embeds", + "add_text_embeds", + "add_time_ids", + "negative_pooled_prompt_embeds", + "negative_add_time_ids", + ] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + text_encoder_2: CLIPTextModelWithProjection, + tokenizer: CLIPTokenizer, + tokenizer_2: CLIPTokenizer, + unet: Union[UNet2DConditionModel, UNetControlNetXSModel], + controlnet: ControlNetXSAdapter, + scheduler: KarrasDiffusionSchedulers, + force_zeros_for_empty_prompt: bool = True, + add_watermarker: Optional[bool] = None, + feature_extractor: CLIPImageProcessor = None, + ): + super().__init__() + + if isinstance(unet, UNet2DConditionModel): + unet = UNetControlNetXSModel.from_unet(unet, controlnet) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + text_encoder_2=text_encoder_2, + tokenizer=tokenizer, + tokenizer_2=tokenizer_2, + unet=unet, + controlnet=controlnet, + scheduler=scheduler, + feature_extractor=feature_extractor, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True) + self.control_image_processor = VaeImageProcessor( + vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False + ) + add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available() + + if add_watermarker: + self.watermark = StableDiffusionXLWatermarker() + else: + self.watermark = None + + self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.encode_prompt + def encode_prompt( + self, + prompt: str, + prompt_2: Optional[str] = None, + device: Optional[torch.device] = None, + num_images_per_prompt: int = 1, + do_classifier_free_guidance: bool = True, + negative_prompt: Optional[str] = None, + negative_prompt_2: Optional[str] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + pooled_prompt_embeds: Optional[torch.Tensor] = None, + negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in both text-encoders + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + lora_scale (`float`, *optional*): + A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + device = device or self._execution_device + + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if self.text_encoder is not None: + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale) + else: + scale_lora_layers(self.text_encoder_2, lora_scale) + + prompt = [prompt] if isinstance(prompt, str) else prompt + + if prompt is not None: + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + # Define tokenizers and text encoders + tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] + text_encoders = ( + [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] + ) + + if prompt_embeds is None: + prompt_2 = prompt_2 or prompt + prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 + + # textual inversion: process multi-vector tokens if necessary + prompt_embeds_list = [] + prompts = [prompt, prompt_2] + for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders): + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, tokenizer) + + text_inputs = tokenizer( + prompt, + padding="max_length", + max_length=tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + + text_input_ids = text_inputs.input_ids + untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {tokenizer.model_max_length} tokens: {removed_text}" + ) + + prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) + + # We are only ALWAYS interested in the pooled output of the final text encoder + pooled_prompt_embeds = prompt_embeds[0] + if clip_skip is None: + prompt_embeds = prompt_embeds.hidden_states[-2] + else: + # "2" because SDXL always indexes from the penultimate layer. + prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)] + + prompt_embeds_list.append(prompt_embeds) + + prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) + + # get unconditional embeddings for classifier free guidance + zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt + if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: + negative_prompt_embeds = torch.zeros_like(prompt_embeds) + negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) + elif do_classifier_free_guidance and negative_prompt_embeds is None: + negative_prompt = negative_prompt or "" + negative_prompt_2 = negative_prompt_2 or negative_prompt + + # normalize str to list + negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt + negative_prompt_2 = ( + batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 + ) + + uncond_tokens: List[str] + if prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = [negative_prompt, negative_prompt_2] + + negative_prompt_embeds_list = [] + for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders): + if isinstance(self, TextualInversionLoaderMixin): + negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = tokenizer( + negative_prompt, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + negative_prompt_embeds = text_encoder( + uncond_input.input_ids.to(device), + output_hidden_states=True, + ) + # We are only ALWAYS interested in the pooled output of the final text encoder + negative_pooled_prompt_embeds = negative_prompt_embeds[0] + negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] + + negative_prompt_embeds_list.append(negative_prompt_embeds) + + negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) + + if self.text_encoder_2 is not None: + prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) + else: + prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + if self.text_encoder_2 is not None: + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) + else: + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed * num_images_per_prompt, -1 + ) + if do_classifier_free_guidance: + negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed * num_images_per_prompt, -1 + ) + + if self.text_encoder is not None: + if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder_2, lora_scale) + + return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + prompt_2, + image, + negative_prompt=None, + negative_prompt_2=None, + prompt_embeds=None, + negative_prompt_embeds=None, + pooled_prompt_embeds=None, + negative_pooled_prompt_embeds=None, + controlnet_conditioning_scale=1.0, + control_guidance_start=0.0, + control_guidance_end=1.0, + callback_on_step_end_tensor_inputs=None, + ): + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt_2 is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)): + raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + elif negative_prompt_2 is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if prompt_embeds is not None and pooled_prompt_embeds is None: + raise ValueError( + "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`." + ) + + if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: + raise ValueError( + "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`." + ) + + # Check `image` and ``controlnet_conditioning_scale`` + is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance( + self.unet, torch._dynamo.eval_frame.OptimizedModule + ) + if ( + isinstance(self.unet, UNetControlNetXSModel) + or is_compiled + and isinstance(self.unet._orig_mod, UNetControlNetXSModel) + ): + self.check_image(image, prompt, prompt_embeds) + if not isinstance(controlnet_conditioning_scale, float): + raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.") + else: + assert False + + start, end = control_guidance_start, control_guidance_end + if start >= end: + raise ValueError( + f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}." + ) + if start < 0.0: + raise ValueError(f"control guidance start: {start} can't be smaller than 0.") + if end > 1.0: + raise ValueError(f"control guidance end: {end} can't be larger than 1.0.") + + # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.check_image + def check_image(self, image, prompt, prompt_embeds): + image_is_pil = isinstance(image, PIL.Image.Image) + image_is_tensor = isinstance(image, torch.Tensor) + image_is_np = isinstance(image, np.ndarray) + image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image) + image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor) + image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray) + + if ( + not image_is_pil + and not image_is_tensor + and not image_is_np + and not image_is_pil_list + and not image_is_tensor_list + and not image_is_np_list + ): + raise TypeError( + f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}" + ) + + if image_is_pil: + image_batch_size = 1 + else: + image_batch_size = len(image) + + if prompt is not None and isinstance(prompt, str): + prompt_batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + prompt_batch_size = len(prompt) + elif prompt_embeds is not None: + prompt_batch_size = prompt_embeds.shape[0] + + if image_batch_size != 1 and image_batch_size != prompt_batch_size: + raise ValueError( + f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}" + ) + + def prepare_image( + self, + image, + width, + height, + batch_size, + num_images_per_prompt, + device, + dtype, + do_classifier_free_guidance=False, + ): + image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32) + image_batch_size = image.shape[0] + + if image_batch_size == 1: + repeat_by = batch_size + else: + # image batch size is the same as prompt batch size + repeat_by = num_images_per_prompt + + image = image.repeat_interleave(repeat_by, dim=0) + + image = image.to(device=device, dtype=dtype) + + if do_classifier_free_guidance: + image = torch.cat([image] * 2) + + return image + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = ( + batch_size, + num_channels_latents, + int(height) // self.vae_scale_factor, + int(width) // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + def _get_add_time_ids( + self, original_size, crops_coords_top_left, target_size, dtype, text_encoder_projection_dim=None + ): + add_time_ids = list(original_size + crops_coords_top_left + target_size) + + passed_add_embed_dim = ( + self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim + ) + expected_add_embed_dim = self.unet.base_add_embedding.linear_1.in_features + + if expected_add_embed_dim != passed_add_embed_dim: + raise ValueError( + f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`." + ) + + add_time_ids = torch.tensor([add_time_ids], dtype=dtype) + return add_time_ids + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae + def upcast_vae(self): + dtype = self.vae.dtype + self.vae.to(dtype=torch.float32) + use_torch_2_0_or_xformers = isinstance( + self.vae.decoder.mid_block.attentions[0].processor, + ( + AttnProcessor2_0, + XFormersAttnProcessor, + ), + ) + # if xformers or torch_2_0 is used attention block does not need + # to be in float32 which can save lots of memory + if use_torch_2_0_or_xformers: + self.vae.post_quant_conv.to(dtype) + self.vae.decoder.conv_in.to(dtype) + self.vae.decoder.mid_block.to(dtype) + + @property + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.guidance_scale + def guidance_scale(self): + return self._guidance_scale + + @property + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.clip_skip + def clip_skip(self): + return self._clip_skip + + @property + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.do_classifier_free_guidance + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None + + @property + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.cross_attention_kwargs + def cross_attention_kwargs(self): + return self._cross_attention_kwargs + + @property + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.num_timesteps + def num_timesteps(self): + return self._num_timesteps + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + prompt_2: Optional[Union[str, List[str]]] = None, + image: PipelineImageInput = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + guidance_scale: float = 5.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + negative_prompt_2: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + pooled_prompt_embeds: Optional[torch.Tensor] = None, + negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + controlnet_conditioning_scale: Union[float, List[float]] = 1.0, + control_guidance_start: float = 0.0, + control_guidance_end: float = 1.0, + original_size: Tuple[int, int] = None, + crops_coords_top_left: Tuple[int, int] = (0, 0), + target_size: Tuple[int, int] = None, + negative_original_size: Optional[Tuple[int, int]] = None, + negative_crops_coords_top_left: Tuple[int, int] = (0, 0), + negative_target_size: Optional[Tuple[int, int]] = None, + clip_skip: Optional[int] = None, + callback_on_step_end: Optional[ + Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks] + ] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in both text-encoders. + image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,: + `List[List[torch.Tensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`): + The ControlNet input condition to provide guidance to the `unet` for generation. If the type is + specified as `torch.Tensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also be accepted + as an image. The dimensions of the output image defaults to `image`'s dimensions. If height and/or + width are passed, `image` is resized accordingly. If multiple ControlNets are specified in `init`, + images must be passed as a list such that each element of the list can be correctly batched for input + to a single ControlNet. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. Anything below 512 pixels won't work well for + [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) + and checkpoints that are not specifically fine-tuned on low resolutions. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. Anything below 512 pixels won't work well for + [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) + and checkpoints that are not specifically fine-tuned on low resolutions. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 5.0): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. This is sent to `tokenizer_2` + and `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders. + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, pooled text embeddings are generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs (prompt + weighting). If not provided, pooled `negative_prompt_embeds` are generated from `negative_prompt` input + argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0): + The outputs of the ControlNet are multiplied by `controlnet_conditioning_scale` before they are added + to the residual in the original `unet`. + control_guidance_start (`float`, *optional*, defaults to 0.0): + The percentage of total steps at which the ControlNet starts applying. + control_guidance_end (`float`, *optional*, defaults to 1.0): + The percentage of total steps at which the ControlNet stops applying. + original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled. + `original_size` defaults to `(width, height)` if not specified. Part of SDXL's micro-conditioning as + explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position + `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting + `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + For most cases, `target_size` should be set to the desired height and width of the generated image. If + not specified it will default to `(width, height)`. Part of SDXL's micro-conditioning as explained in + section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + To negatively condition the generation process based on a specific image resolution. Part of SDXL's + micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's + micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + To negatively condition the generation process based on a target image resolution. It should be as same + as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*): + A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of + each denoising step during the inference. with the following arguments: `callback_on_step_end(self: + DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a + list of all tensors as specified by `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeine class. + + Examples: + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionXLPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionXLPipelineOutput`] is + returned, otherwise a `tuple` is returned containing the output images. + """ + + if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): + callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs + + unet = self.unet._orig_mod if is_compiled_module(self.unet) else self.unet + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + prompt_2, + image, + negative_prompt, + negative_prompt_2, + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + controlnet_conditioning_scale, + control_guidance_start, + control_guidance_end, + callback_on_step_end_tensor_inputs, + ) + + self._guidance_scale = guidance_scale + self._clip_skip = clip_skip + self._cross_attention_kwargs = cross_attention_kwargs + self._interrupt = False + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Encode input prompt + text_encoder_lora_scale = ( + cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None + ) + ( + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + ) = self.encode_prompt( + prompt, + prompt_2, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt, + negative_prompt_2, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, + lora_scale=text_encoder_lora_scale, + clip_skip=clip_skip, + ) + + # 4. Prepare image + if isinstance(unet, UNetControlNetXSModel): + image = self.prepare_image( + image=image, + width=width, + height=height, + batch_size=batch_size * num_images_per_prompt, + num_images_per_prompt=num_images_per_prompt, + device=device, + dtype=unet.dtype, + do_classifier_free_guidance=do_classifier_free_guidance, + ) + height, width = image.shape[-2:] + else: + assert False + + # 5. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # 6. Prepare latent variables + num_channels_latents = self.unet.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7.1 Prepare added time ids & embeddings + if isinstance(image, list): + original_size = original_size or image[0].shape[-2:] + else: + original_size = original_size or image.shape[-2:] + target_size = target_size or (height, width) + + add_text_embeds = pooled_prompt_embeds + if self.text_encoder_2 is None: + text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) + else: + text_encoder_projection_dim = self.text_encoder_2.config.projection_dim + + add_time_ids = self._get_add_time_ids( + original_size, + crops_coords_top_left, + target_size, + dtype=prompt_embeds.dtype, + text_encoder_projection_dim=text_encoder_projection_dim, + ) + + if negative_original_size is not None and negative_target_size is not None: + negative_add_time_ids = self._get_add_time_ids( + negative_original_size, + negative_crops_coords_top_left, + negative_target_size, + dtype=prompt_embeds.dtype, + text_encoder_projection_dim=text_encoder_projection_dim, + ) + else: + negative_add_time_ids = add_time_ids + + if do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) + add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) + add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0) + + prompt_embeds = prompt_embeds.to(device) + add_text_embeds = add_text_embeds.to(device) + add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1) + + # 8. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + self._num_timesteps = len(timesteps) + is_controlnet_compiled = is_compiled_module(self.unet) + is_torch_higher_equal_2_1 = is_torch_version(">=", "2.1") + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # Relevant thread: + # https://dev-discuss.pytorch.org/t/cudagraphs-in-pytorch-2-0/1428 + if is_controlnet_compiled and is_torch_higher_equal_2_1: + torch._inductor.cudagraph_mark_step_begin() + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} + + # predict the noise residual + apply_control = ( + i / len(timesteps) >= control_guidance_start and (i + 1) / len(timesteps) <= control_guidance_end + ) + noise_pred = self.unet( + sample=latent_model_input, + timestep=t, + encoder_hidden_states=prompt_embeds, + controlnet_cond=image, + conditioning_scale=controlnet_conditioning_scale, + cross_attention_kwargs=cross_attention_kwargs, + added_cond_kwargs=added_cond_kwargs, + return_dict=True, + apply_control=apply_control, + ).sample + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + + # manually for max memory savings + if self.vae.dtype == torch.float16 and self.vae.config.force_upcast: + self.upcast_vae() + latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) + + if not output_type == "latent": + # make sure the VAE is in float32 mode, as it overflows in float16 + needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast + + if needs_upcasting: + self.upcast_vae() + latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) + + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + + # cast back to fp16 if needed + if needs_upcasting: + self.vae.to(dtype=torch.float16) + else: + image = latents + + if not output_type == "latent": + # apply watermark if available + if self.watermark is not None: + image = self.watermark.apply_watermark(image) + + image = self.image_processor.postprocess(image, output_type=output_type) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return StableDiffusionXLPipelineOutput(images=image) diff --git a/diffusers3/pipelines/dance_diffusion/__init__.py b/diffusers3/pipelines/dance_diffusion/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..0d3e466dfa65b2e9890451607959ed45d092cae7 --- /dev/null +++ b/diffusers3/pipelines/dance_diffusion/__init__.py @@ -0,0 +1,18 @@ +from typing import TYPE_CHECKING + +from ...utils import DIFFUSERS_SLOW_IMPORT, _LazyModule + + +_import_structure = {"pipeline_dance_diffusion": ["DanceDiffusionPipeline"]} + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + from .pipeline_dance_diffusion import DanceDiffusionPipeline +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) diff --git a/diffusers3/pipelines/dance_diffusion/pipeline_dance_diffusion.py b/diffusers3/pipelines/dance_diffusion/pipeline_dance_diffusion.py new file mode 100644 index 0000000000000000000000000000000000000000..bcd36c412b54ceb47df088a032161aa88d83950f --- /dev/null +++ b/diffusers3/pipelines/dance_diffusion/pipeline_dance_diffusion.py @@ -0,0 +1,156 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from typing import List, Optional, Tuple, Union + +import torch + +from ...utils import logging +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +class DanceDiffusionPipeline(DiffusionPipeline): + r""" + Pipeline for audio generation. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + Parameters: + unet ([`UNet1DModel`]): + A `UNet1DModel` to denoise the encoded audio. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded audio latents. Can be one of + [`IPNDMScheduler`]. + """ + + model_cpu_offload_seq = "unet" + + def __init__(self, unet, scheduler): + super().__init__() + self.register_modules(unet=unet, scheduler=scheduler) + + @torch.no_grad() + def __call__( + self, + batch_size: int = 1, + num_inference_steps: int = 100, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + audio_length_in_s: Optional[float] = None, + return_dict: bool = True, + ) -> Union[AudioPipelineOutput, Tuple]: + r""" + The call function to the pipeline for generation. + + Args: + batch_size (`int`, *optional*, defaults to 1): + The number of audio samples to generate. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher-quality audio sample at + the expense of slower inference. + generator (`torch.Generator`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + audio_length_in_s (`float`, *optional*, defaults to `self.unet.config.sample_size/self.unet.config.sample_rate`): + The length of the generated audio sample in seconds. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.AudioPipelineOutput`] instead of a plain tuple. + + Example: + + ```py + from diffusers import DiffusionPipeline + from scipy.io.wavfile import write + + model_id = "harmonai/maestro-150k" + pipe = DiffusionPipeline.from_pretrained(model_id) + pipe = pipe.to("cuda") + + audios = pipe(audio_length_in_s=4.0).audios + + # To save locally + for i, audio in enumerate(audios): + write(f"maestro_test_{i}.wav", pipe.unet.sample_rate, audio.transpose()) + + # To dislay in google colab + import IPython.display as ipd + + for audio in audios: + display(ipd.Audio(audio, rate=pipe.unet.sample_rate)) + ``` + + Returns: + [`~pipelines.AudioPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.AudioPipelineOutput`] is returned, otherwise a `tuple` is + returned where the first element is a list with the generated audio. + """ + + if audio_length_in_s is None: + audio_length_in_s = self.unet.config.sample_size / self.unet.config.sample_rate + + sample_size = audio_length_in_s * self.unet.config.sample_rate + + down_scale_factor = 2 ** len(self.unet.up_blocks) + if sample_size < 3 * down_scale_factor: + raise ValueError( + f"{audio_length_in_s} is too small. Make sure it's bigger or equal to" + f" {3 * down_scale_factor / self.unet.config.sample_rate}." + ) + + original_sample_size = int(sample_size) + if sample_size % down_scale_factor != 0: + sample_size = ( + (audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1 + ) * down_scale_factor + logger.info( + f"{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled" + f" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising" + " process." + ) + sample_size = int(sample_size) + + dtype = next(self.unet.parameters()).dtype + shape = (batch_size, self.unet.config.in_channels, sample_size) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + audio = randn_tensor(shape, generator=generator, device=self._execution_device, dtype=dtype) + + # set step values + self.scheduler.set_timesteps(num_inference_steps, device=audio.device) + self.scheduler.timesteps = self.scheduler.timesteps.to(dtype) + + for t in self.progress_bar(self.scheduler.timesteps): + # 1. predict noise model_output + model_output = self.unet(audio, t).sample + + # 2. compute previous audio sample: x_t -> t_t-1 + audio = self.scheduler.step(model_output, t, audio).prev_sample + + audio = audio.clamp(-1, 1).float().cpu().numpy() + + audio = audio[:, :, :original_sample_size] + + if not return_dict: + return (audio,) + + return AudioPipelineOutput(audios=audio) diff --git a/diffusers3/pipelines/ddim/__init__.py b/diffusers3/pipelines/ddim/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d9eede47c897370a23c47c05291690881c987025 --- /dev/null +++ b/diffusers3/pipelines/ddim/__init__.py @@ -0,0 +1,18 @@ +from typing import TYPE_CHECKING + +from ...utils import DIFFUSERS_SLOW_IMPORT, _LazyModule + + +_import_structure = {"pipeline_ddim": ["DDIMPipeline"]} + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + from .pipeline_ddim import DDIMPipeline +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) diff --git a/diffusers3/pipelines/ddim/pipeline_ddim.py b/diffusers3/pipelines/ddim/pipeline_ddim.py new file mode 100644 index 0000000000000000000000000000000000000000..a3b967ed369bd3e7287aa6dc628ecad6484a6359 --- /dev/null +++ b/diffusers3/pipelines/ddim/pipeline_ddim.py @@ -0,0 +1,154 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import List, Optional, Tuple, Union + +import torch + +from ...schedulers import DDIMScheduler +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput + + +class DDIMPipeline(DiffusionPipeline): + r""" + Pipeline for image generation. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + Parameters: + unet ([`UNet2DModel`]): + A `UNet2DModel` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image. Can be one of + [`DDPMScheduler`], or [`DDIMScheduler`]. + """ + + model_cpu_offload_seq = "unet" + + def __init__(self, unet, scheduler): + super().__init__() + + # make sure scheduler can always be converted to DDIM + scheduler = DDIMScheduler.from_config(scheduler.config) + + self.register_modules(unet=unet, scheduler=scheduler) + + @torch.no_grad() + def __call__( + self, + batch_size: int = 1, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + eta: float = 0.0, + num_inference_steps: int = 50, + use_clipped_model_output: Optional[bool] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + ) -> Union[ImagePipelineOutput, Tuple]: + r""" + The call function to the pipeline for generation. + + Args: + batch_size (`int`, *optional*, defaults to 1): + The number of images to generate. + generator (`torch.Generator`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. A value of `0` corresponds to + DDIM and `1` corresponds to DDPM. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + use_clipped_model_output (`bool`, *optional*, defaults to `None`): + If `True` or `False`, see documentation for [`DDIMScheduler.step`]. If `None`, nothing is passed + downstream to the scheduler (use `None` for schedulers which don't support this argument). + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. + + Example: + + ```py + >>> from diffusers import DDIMPipeline + >>> import PIL.Image + >>> import numpy as np + + >>> # load model and scheduler + >>> pipe = DDIMPipeline.from_pretrained("fusing/ddim-lsun-bedroom") + + >>> # run pipeline in inference (sample random noise and denoise) + >>> image = pipe(eta=0.0, num_inference_steps=50) + + >>> # process image to PIL + >>> image_processed = image.cpu().permute(0, 2, 3, 1) + >>> image_processed = (image_processed + 1.0) * 127.5 + >>> image_processed = image_processed.numpy().astype(np.uint8) + >>> image_pil = PIL.Image.fromarray(image_processed[0]) + + >>> # save image + >>> image_pil.save("test.png") + ``` + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is + returned where the first element is a list with the generated images + """ + + # Sample gaussian noise to begin loop + if isinstance(self.unet.config.sample_size, int): + image_shape = ( + batch_size, + self.unet.config.in_channels, + self.unet.config.sample_size, + self.unet.config.sample_size, + ) + else: + image_shape = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size) + + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + image = randn_tensor(image_shape, generator=generator, device=self._execution_device, dtype=self.unet.dtype) + + # set step values + self.scheduler.set_timesteps(num_inference_steps) + + for t in self.progress_bar(self.scheduler.timesteps): + # 1. predict noise model_output + model_output = self.unet(image, t).sample + + # 2. predict previous mean of image x_t-1 and add variance depending on eta + # eta corresponds to ฮท in paper and should be between [0, 1] + # do x_t -> x_t-1 + image = self.scheduler.step( + model_output, t, image, eta=eta, use_clipped_model_output=use_clipped_model_output, generator=generator + ).prev_sample + + image = (image / 2 + 0.5).clamp(0, 1) + image = image.cpu().permute(0, 2, 3, 1).numpy() + if output_type == "pil": + image = self.numpy_to_pil(image) + + if not return_dict: + return (image,) + + return ImagePipelineOutput(images=image) diff --git a/diffusers3/pipelines/ddpm/__init__.py b/diffusers3/pipelines/ddpm/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..eb41dd1dcf642c791f3d7b0d985efcaf3e4a2c22 --- /dev/null +++ b/diffusers3/pipelines/ddpm/__init__.py @@ -0,0 +1,22 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + _LazyModule, +) + + +_import_structure = {"pipeline_ddpm": ["DDPMPipeline"]} + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + from .pipeline_ddpm import DDPMPipeline + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) diff --git a/diffusers3/pipelines/ddpm/pipeline_ddpm.py b/diffusers3/pipelines/ddpm/pipeline_ddpm.py new file mode 100644 index 0000000000000000000000000000000000000000..093a3cdfe512a7fdda0529bc2c6d47721508620e --- /dev/null +++ b/diffusers3/pipelines/ddpm/pipeline_ddpm.py @@ -0,0 +1,127 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from typing import List, Optional, Tuple, Union + +import torch + +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput + + +class DDPMPipeline(DiffusionPipeline): + r""" + Pipeline for image generation. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + Parameters: + unet ([`UNet2DModel`]): + A `UNet2DModel` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image. Can be one of + [`DDPMScheduler`], or [`DDIMScheduler`]. + """ + + model_cpu_offload_seq = "unet" + + def __init__(self, unet, scheduler): + super().__init__() + self.register_modules(unet=unet, scheduler=scheduler) + + @torch.no_grad() + def __call__( + self, + batch_size: int = 1, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + num_inference_steps: int = 1000, + output_type: Optional[str] = "pil", + return_dict: bool = True, + ) -> Union[ImagePipelineOutput, Tuple]: + r""" + The call function to the pipeline for generation. + + Args: + batch_size (`int`, *optional*, defaults to 1): + The number of images to generate. + generator (`torch.Generator`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + num_inference_steps (`int`, *optional*, defaults to 1000): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. + + Example: + + ```py + >>> from diffusers import DDPMPipeline + + >>> # load model and scheduler + >>> pipe = DDPMPipeline.from_pretrained("google/ddpm-cat-256") + + >>> # run pipeline in inference (sample random noise and denoise) + >>> image = pipe().images[0] + + >>> # save image + >>> image.save("ddpm_generated_image.png") + ``` + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is + returned where the first element is a list with the generated images + """ + # Sample gaussian noise to begin loop + if isinstance(self.unet.config.sample_size, int): + image_shape = ( + batch_size, + self.unet.config.in_channels, + self.unet.config.sample_size, + self.unet.config.sample_size, + ) + else: + image_shape = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size) + + if self.device.type == "mps": + # randn does not work reproducibly on mps + image = randn_tensor(image_shape, generator=generator) + image = image.to(self.device) + else: + image = randn_tensor(image_shape, generator=generator, device=self.device) + + # set step values + self.scheduler.set_timesteps(num_inference_steps) + + for t in self.progress_bar(self.scheduler.timesteps): + # 1. predict noise model_output + model_output = self.unet(image, t).sample + + # 2. compute previous image: x_t -> x_t-1 + image = self.scheduler.step(model_output, t, image, generator=generator).prev_sample + + image = (image / 2 + 0.5).clamp(0, 1) + image = image.cpu().permute(0, 2, 3, 1).numpy() + if output_type == "pil": + image = self.numpy_to_pil(image) + + if not return_dict: + return (image,) + + return ImagePipelineOutput(images=image) diff --git a/diffusers3/pipelines/deepfloyd_if/__init__.py b/diffusers3/pipelines/deepfloyd_if/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..79aab1fb186a857dd0a3353c4b5905b4595b5b7b --- /dev/null +++ b/diffusers3/pipelines/deepfloyd_if/__init__.py @@ -0,0 +1,85 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = { + "timesteps": [ + "fast27_timesteps", + "smart100_timesteps", + "smart185_timesteps", + "smart27_timesteps", + "smart50_timesteps", + "super100_timesteps", + "super27_timesteps", + "super40_timesteps", + ] +} + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["pipeline_if"] = ["IFPipeline"] + _import_structure["pipeline_if_img2img"] = ["IFImg2ImgPipeline"] + _import_structure["pipeline_if_img2img_superresolution"] = ["IFImg2ImgSuperResolutionPipeline"] + _import_structure["pipeline_if_inpainting"] = ["IFInpaintingPipeline"] + _import_structure["pipeline_if_inpainting_superresolution"] = ["IFInpaintingSuperResolutionPipeline"] + _import_structure["pipeline_if_superresolution"] = ["IFSuperResolutionPipeline"] + _import_structure["pipeline_output"] = ["IFPipelineOutput"] + _import_structure["safety_checker"] = ["IFSafetyChecker"] + _import_structure["watermark"] = ["IFWatermarker"] + + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * + else: + from .pipeline_if import IFPipeline + from .pipeline_if_img2img import IFImg2ImgPipeline + from .pipeline_if_img2img_superresolution import IFImg2ImgSuperResolutionPipeline + from .pipeline_if_inpainting import IFInpaintingPipeline + from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline + from .pipeline_if_superresolution import IFSuperResolutionPipeline + from .pipeline_output import IFPipelineOutput + from .safety_checker import IFSafetyChecker + from .timesteps import ( + fast27_timesteps, + smart27_timesteps, + smart50_timesteps, + smart100_timesteps, + smart185_timesteps, + super27_timesteps, + super40_timesteps, + super100_timesteps, + ) + from .watermark import IFWatermarker + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/diffusers3/pipelines/deepfloyd_if/pipeline_if.py b/diffusers3/pipelines/deepfloyd_if/pipeline_if.py new file mode 100644 index 0000000000000000000000000000000000000000..f545b24bec5c17c1f75673bb7d35b3753bcc41aa --- /dev/null +++ b/diffusers3/pipelines/deepfloyd_if/pipeline_if.py @@ -0,0 +1,774 @@ +import html +import inspect +import re +import urllib.parse as ul +from typing import Any, Callable, Dict, List, Optional, Union + +import torch +from transformers import CLIPImageProcessor, T5EncoderModel, T5Tokenizer + +from ...loaders import StableDiffusionLoraLoaderMixin +from ...models import UNet2DConditionModel +from ...schedulers import DDPMScheduler +from ...utils import ( + BACKENDS_MAPPING, + is_bs4_available, + is_ftfy_available, + logging, + replace_example_docstring, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline +from .pipeline_output import IFPipelineOutput +from .safety_checker import IFSafetyChecker +from .watermark import IFWatermarker + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +if is_bs4_available(): + from bs4 import BeautifulSoup + +if is_ftfy_available(): + import ftfy + + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> from diffusers import IFPipeline, IFSuperResolutionPipeline, DiffusionPipeline + >>> from diffusers.utils import pt_to_pil + >>> import torch + + >>> pipe = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0", variant="fp16", torch_dtype=torch.float16) + >>> pipe.enable_model_cpu_offload() + + >>> prompt = 'a photo of a kangaroo wearing an orange hoodie and blue sunglasses standing in front of the eiffel tower holding a sign that says "very deep learning"' + >>> prompt_embeds, negative_embeds = pipe.encode_prompt(prompt) + + >>> image = pipe(prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_embeds, output_type="pt").images + + >>> # save intermediate image + >>> pil_image = pt_to_pil(image) + >>> pil_image[0].save("./if_stage_I.png") + + >>> super_res_1_pipe = IFSuperResolutionPipeline.from_pretrained( + ... "DeepFloyd/IF-II-L-v1.0", text_encoder=None, variant="fp16", torch_dtype=torch.float16 + ... ) + >>> super_res_1_pipe.enable_model_cpu_offload() + + >>> image = super_res_1_pipe( + ... image=image, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_embeds, output_type="pt" + ... ).images + + >>> # save intermediate image + >>> pil_image = pt_to_pil(image) + >>> pil_image[0].save("./if_stage_I.png") + + >>> safety_modules = { + ... "feature_extractor": pipe.feature_extractor, + ... "safety_checker": pipe.safety_checker, + ... "watermarker": pipe.watermarker, + ... } + >>> super_res_2_pipe = DiffusionPipeline.from_pretrained( + ... "stabilityai/stable-diffusion-x4-upscaler", **safety_modules, torch_dtype=torch.float16 + ... ) + >>> super_res_2_pipe.enable_model_cpu_offload() + + >>> image = super_res_2_pipe( + ... prompt=prompt, + ... image=image, + ... ).images + >>> image[0].save("./if_stage_II.png") + ``` +""" + + +class IFPipeline(DiffusionPipeline, StableDiffusionLoraLoaderMixin): + tokenizer: T5Tokenizer + text_encoder: T5EncoderModel + + unet: UNet2DConditionModel + scheduler: DDPMScheduler + + feature_extractor: Optional[CLIPImageProcessor] + safety_checker: Optional[IFSafetyChecker] + + watermarker: Optional[IFWatermarker] + + bad_punct_regex = re.compile( + r"[" + + "#ยฎโ€ขยฉโ„ข&@ยทยบยฝยพยฟยกยง~" + + r"\)" + + r"\(" + + r"\]" + + r"\[" + + r"\}" + + r"\{" + + r"\|" + + "\\" + + r"\/" + + r"\*" + + r"]{1,}" + ) # noqa + + _optional_components = ["tokenizer", "text_encoder", "safety_checker", "feature_extractor", "watermarker"] + model_cpu_offload_seq = "text_encoder->unet" + _exclude_from_cpu_offload = ["watermarker"] + + def __init__( + self, + tokenizer: T5Tokenizer, + text_encoder: T5EncoderModel, + unet: UNet2DConditionModel, + scheduler: DDPMScheduler, + safety_checker: Optional[IFSafetyChecker], + feature_extractor: Optional[CLIPImageProcessor], + watermarker: Optional[IFWatermarker], + requires_safety_checker: bool = True, + ): + super().__init__() + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the IF license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + self.register_modules( + tokenizer=tokenizer, + text_encoder=text_encoder, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + watermarker=watermarker, + ) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + @torch.no_grad() + def encode_prompt( + self, + prompt: Union[str, List[str]], + do_classifier_free_guidance: bool = True, + num_images_per_prompt: int = 1, + device: Optional[torch.device] = None, + negative_prompt: Optional[Union[str, List[str]]] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + clean_caption: bool = False, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + do_classifier_free_guidance (`bool`, *optional*, defaults to `True`): + whether to use classifier free guidance or not + num_images_per_prompt (`int`, *optional*, defaults to 1): + number of images that should be generated per prompt + device: (`torch.device`, *optional*): + torch device to place the resulting embeddings on + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead. + Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + clean_caption (bool, defaults to `False`): + If `True`, the function will preprocess and clean the provided caption before encoding. + """ + if prompt is not None and negative_prompt is not None: + if type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + + if device is None: + device = self._execution_device + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + # while T5 can handle much longer input sequences than 77, the text encoder was trained with a max length of 77 for IF + max_length = 77 + + if prompt_embeds is None: + prompt = self._text_preprocessing(prompt, clean_caption=clean_caption) + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=max_length, + truncation=True, + add_special_tokens=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode(untruncated_ids[:, max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {max_length} tokens: {removed_text}" + ) + + attention_mask = text_inputs.attention_mask.to(device) + + prompt_embeds = self.text_encoder( + text_input_ids.to(device), + attention_mask=attention_mask, + ) + prompt_embeds = prompt_embeds[0] + + if self.text_encoder is not None: + dtype = self.text_encoder.dtype + elif self.unet is not None: + dtype = self.unet.dtype + else: + dtype = None + + prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + uncond_tokens = self._text_preprocessing(uncond_tokens, clean_caption=clean_caption) + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_attention_mask=True, + add_special_tokens=True, + return_tensors="pt", + ) + attention_mask = uncond_input.attention_mask.to(device) + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + else: + negative_prompt_embeds = None + + return prompt_embeds, negative_prompt_embeds + + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is not None: + safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device) + image, nsfw_detected, watermark_detected = self.safety_checker( + images=image, + clip_input=safety_checker_input.pixel_values.to(dtype=dtype), + ) + else: + nsfw_detected = None + watermark_detected = None + + return image, nsfw_detected, watermark_detected + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + ): + if (callback_steps is None) or ( + callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + def prepare_intermediate_images(self, batch_size, num_channels, height, width, dtype, device, generator): + shape = (batch_size, num_channels, height, width) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + intermediate_images = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + + # scale the initial noise by the standard deviation required by the scheduler + intermediate_images = intermediate_images * self.scheduler.init_noise_sigma + return intermediate_images + + def _text_preprocessing(self, text, clean_caption=False): + if clean_caption and not is_bs4_available(): + logger.warning(BACKENDS_MAPPING["bs4"][-1].format("Setting `clean_caption=True`")) + logger.warning("Setting `clean_caption` to False...") + clean_caption = False + + if clean_caption and not is_ftfy_available(): + logger.warning(BACKENDS_MAPPING["ftfy"][-1].format("Setting `clean_caption=True`")) + logger.warning("Setting `clean_caption` to False...") + clean_caption = False + + if not isinstance(text, (tuple, list)): + text = [text] + + def process(text: str): + if clean_caption: + text = self._clean_caption(text) + text = self._clean_caption(text) + else: + text = text.lower().strip() + return text + + return [process(t) for t in text] + + def _clean_caption(self, caption): + caption = str(caption) + caption = ul.unquote_plus(caption) + caption = caption.strip().lower() + caption = re.sub("", "person", caption) + # urls: + caption = re.sub( + r"\b((?:https?:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa + "", + caption, + ) # regex for urls + caption = re.sub( + r"\b((?:www:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa + "", + caption, + ) # regex for urls + # html: + caption = BeautifulSoup(caption, features="html.parser").text + + # @ + caption = re.sub(r"@[\w\d]+\b", "", caption) + + # 31C0โ€”31EF CJK Strokes + # 31F0โ€”31FF Katakana Phonetic Extensions + # 3200โ€”32FF Enclosed CJK Letters and Months + # 3300โ€”33FF CJK Compatibility + # 3400โ€”4DBF CJK Unified Ideographs Extension A + # 4DC0โ€”4DFF Yijing Hexagram Symbols + # 4E00โ€”9FFF CJK Unified Ideographs + caption = re.sub(r"[\u31c0-\u31ef]+", "", caption) + caption = re.sub(r"[\u31f0-\u31ff]+", "", caption) + caption = re.sub(r"[\u3200-\u32ff]+", "", caption) + caption = re.sub(r"[\u3300-\u33ff]+", "", caption) + caption = re.sub(r"[\u3400-\u4dbf]+", "", caption) + caption = re.sub(r"[\u4dc0-\u4dff]+", "", caption) + caption = re.sub(r"[\u4e00-\u9fff]+", "", caption) + ####################################################### + + # ะฒัะต ะฒะธะดั‹ ั‚ะธั€ะต / all types of dash --> "-" + caption = re.sub( + r"[\u002D\u058A\u05BE\u1400\u1806\u2010-\u2015\u2E17\u2E1A\u2E3A\u2E3B\u2E40\u301C\u3030\u30A0\uFE31\uFE32\uFE58\uFE63\uFF0D]+", # noqa + "-", + caption, + ) + + # ะบะฐะฒั‹ั‡ะบะธ ะบ ะพะดะฝะพะผัƒ ัั‚ะฐะฝะดะฐั€ั‚ัƒ + caption = re.sub(r"[`ยดยซยปโ€œโ€ยจ]", '"', caption) + caption = re.sub(r"[โ€˜โ€™]", "'", caption) + + # " + caption = re.sub(r""?", "", caption) + # & + caption = re.sub(r"&", "", caption) + + # ip adresses: + caption = re.sub(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", " ", caption) + + # article ids: + caption = re.sub(r"\d:\d\d\s+$", "", caption) + + # \n + caption = re.sub(r"\\n", " ", caption) + + # "#123" + caption = re.sub(r"#\d{1,3}\b", "", caption) + # "#12345.." + caption = re.sub(r"#\d{5,}\b", "", caption) + # "123456.." + caption = re.sub(r"\b\d{6,}\b", "", caption) + # filenames: + caption = re.sub(r"[\S]+\.(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)", "", caption) + + # + caption = re.sub(r"[\"\']{2,}", r'"', caption) # """AUSVERKAUFT""" + caption = re.sub(r"[\.]{2,}", r" ", caption) # """AUSVERKAUFT""" + + caption = re.sub(self.bad_punct_regex, r" ", caption) # ***AUSVERKAUFT***, #AUSVERKAUFT + caption = re.sub(r"\s+\.\s+", r" ", caption) # " . " + + # this-is-my-cute-cat / this_is_my_cute_cat + regex2 = re.compile(r"(?:\-|\_)") + if len(re.findall(regex2, caption)) > 3: + caption = re.sub(regex2, " ", caption) + + caption = ftfy.fix_text(caption) + caption = html.unescape(html.unescape(caption)) + + caption = re.sub(r"\b[a-zA-Z]{1,3}\d{3,15}\b", "", caption) # jc6640 + caption = re.sub(r"\b[a-zA-Z]+\d+[a-zA-Z]+\b", "", caption) # jc6640vc + caption = re.sub(r"\b\d+[a-zA-Z]+\d+\b", "", caption) # 6640vc231 + + caption = re.sub(r"(worldwide\s+)?(free\s+)?shipping", "", caption) + caption = re.sub(r"(free\s)?download(\sfree)?", "", caption) + caption = re.sub(r"\bclick\b\s(?:for|on)\s\w+", "", caption) + caption = re.sub(r"\b(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)(\simage[s]?)?", "", caption) + caption = re.sub(r"\bpage\s+\d+\b", "", caption) + + caption = re.sub(r"\b\d*[a-zA-Z]+\d+[a-zA-Z]+\d+[a-zA-Z\d]*\b", r" ", caption) # j2d1a2a... + + caption = re.sub(r"\b\d+\.?\d*[xั…ร—]\d+\.?\d*\b", "", caption) + + caption = re.sub(r"\b\s+\:\s+", r": ", caption) + caption = re.sub(r"(\D[,\./])\b", r"\1 ", caption) + caption = re.sub(r"\s+", " ", caption) + + caption.strip() + + caption = re.sub(r"^[\"\']([\w\W]+)[\"\']$", r"\1", caption) + caption = re.sub(r"^[\'\_,\-\:;]", r"", caption) + caption = re.sub(r"[\'\_,\-\:\-\+]$", r"", caption) + caption = re.sub(r"^\.\S+$", "", caption) + + return caption.strip() + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + num_inference_steps: int = 100, + timesteps: List[int] = None, + guidance_scale: float = 7.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + height: Optional[int] = None, + width: Optional[int] = None, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, + callback_steps: int = 1, + clean_caption: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + ): + """ + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process. If not defined, equal spaced `num_inference_steps` + timesteps are used. Must be in descending order. + guidance_scale (`float`, *optional*, defaults to 7.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + height (`int`, *optional*, defaults to self.unet.config.sample_size): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to self.unet.config.sample_size): + The width in pixels of the generated image. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to + [`schedulers.DDIMScheduler`], will be ignored for others. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.IFPipelineOutput`] instead of a plain tuple. + callback (`Callable`, *optional*): + A function that will be called every `callback_steps` steps during inference. The function will be + called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function will be called. If not specified, the callback will be + called at every step. + clean_caption (`bool`, *optional*, defaults to `True`): + Whether or not to clean the caption before creating embeddings. Requires `beautifulsoup4` and `ftfy` to + be installed. If the dependencies are not installed, the embeddings will be created from the raw + prompt. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + + Examples: + + Returns: + [`~pipelines.stable_diffusion.IFPipelineOutput`] or `tuple`: + [`~pipelines.stable_diffusion.IFPipelineOutput`] if `return_dict` is True, otherwise a `tuple. When + returning a tuple, the first element is a list with the generated images, and the second element is a list + of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw) + or watermarked content, according to the `safety_checker`. + """ + # 1. Check inputs. Raise error if not correct + self.check_inputs(prompt, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds) + + # 2. Define call parameters + height = height or self.unet.config.sample_size + width = width or self.unet.config.sample_size + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Encode input prompt + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + do_classifier_free_guidance, + num_images_per_prompt=num_images_per_prompt, + device=device, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + clean_caption=clean_caption, + ) + + if do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + # 4. Prepare timesteps + if timesteps is not None: + self.scheduler.set_timesteps(timesteps=timesteps, device=device) + timesteps = self.scheduler.timesteps + num_inference_steps = len(timesteps) + else: + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + if hasattr(self.scheduler, "set_begin_index"): + self.scheduler.set_begin_index(0) + + # 5. Prepare intermediate images + intermediate_images = self.prepare_intermediate_images( + batch_size * num_images_per_prompt, + self.unet.config.in_channels, + height, + width, + prompt_embeds.dtype, + device, + generator, + ) + + # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # HACK: see comment in `enable_model_cpu_offload` + if hasattr(self, "text_encoder_offload_hook") and self.text_encoder_offload_hook is not None: + self.text_encoder_offload_hook.offload() + + # 7. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + model_input = ( + torch.cat([intermediate_images] * 2) if do_classifier_free_guidance else intermediate_images + ) + model_input = self.scheduler.scale_model_input(model_input, t) + + # predict the noise residual + noise_pred = self.unet( + model_input, + t, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs=cross_attention_kwargs, + return_dict=False, + )[0] + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred_uncond, _ = noise_pred_uncond.split(model_input.shape[1], dim=1) + noise_pred_text, predicted_variance = noise_pred_text.split(model_input.shape[1], dim=1) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + noise_pred = torch.cat([noise_pred, predicted_variance], dim=1) + + if self.scheduler.config.variance_type not in ["learned", "learned_range"]: + noise_pred, _ = noise_pred.split(model_input.shape[1], dim=1) + + # compute the previous noisy sample x_t -> x_t-1 + intermediate_images = self.scheduler.step( + noise_pred, t, intermediate_images, **extra_step_kwargs, return_dict=False + )[0] + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + callback(i, t, intermediate_images) + + image = intermediate_images + + if output_type == "pil": + # 8. Post-processing + image = (image / 2 + 0.5).clamp(0, 1) + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + + # 9. Run safety checker + image, nsfw_detected, watermark_detected = self.run_safety_checker(image, device, prompt_embeds.dtype) + + # 10. Convert to PIL + image = self.numpy_to_pil(image) + + # 11. Apply watermark + if self.watermarker is not None: + image = self.watermarker.apply_watermark(image, self.unet.config.sample_size) + elif output_type == "pt": + nsfw_detected = None + watermark_detected = None + + if hasattr(self, "unet_offload_hook") and self.unet_offload_hook is not None: + self.unet_offload_hook.offload() + else: + # 8. Post-processing + image = (image / 2 + 0.5).clamp(0, 1) + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + + # 9. Run safety checker + image, nsfw_detected, watermark_detected = self.run_safety_checker(image, device, prompt_embeds.dtype) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image, nsfw_detected, watermark_detected) + + return IFPipelineOutput(images=image, nsfw_detected=nsfw_detected, watermark_detected=watermark_detected) diff --git a/diffusers3/pipelines/deepfloyd_if/pipeline_if_img2img.py b/diffusers3/pipelines/deepfloyd_if/pipeline_if_img2img.py new file mode 100644 index 0000000000000000000000000000000000000000..07017912575dbae3338a845b82b83720601e689d --- /dev/null +++ b/diffusers3/pipelines/deepfloyd_if/pipeline_if_img2img.py @@ -0,0 +1,895 @@ +import html +import inspect +import re +import urllib.parse as ul +from typing import Any, Callable, Dict, List, Optional, Union + +import numpy as np +import PIL.Image +import torch +from transformers import CLIPImageProcessor, T5EncoderModel, T5Tokenizer + +from ...loaders import StableDiffusionLoraLoaderMixin +from ...models import UNet2DConditionModel +from ...schedulers import DDPMScheduler +from ...utils import ( + BACKENDS_MAPPING, + PIL_INTERPOLATION, + is_bs4_available, + is_ftfy_available, + logging, + replace_example_docstring, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline +from .pipeline_output import IFPipelineOutput +from .safety_checker import IFSafetyChecker +from .watermark import IFWatermarker + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +if is_bs4_available(): + from bs4 import BeautifulSoup + +if is_ftfy_available(): + import ftfy + + +def resize(images: PIL.Image.Image, img_size: int) -> PIL.Image.Image: + w, h = images.size + + coef = w / h + + w, h = img_size, img_size + + if coef >= 1: + w = int(round(img_size / 8 * coef) * 8) + else: + h = int(round(img_size / 8 / coef) * 8) + + images = images.resize((w, h), resample=PIL_INTERPOLATION["bicubic"], reducing_gap=None) + + return images + + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> from diffusers import IFImg2ImgPipeline, IFImg2ImgSuperResolutionPipeline, DiffusionPipeline + >>> from diffusers.utils import pt_to_pil + >>> import torch + >>> from PIL import Image + >>> import requests + >>> from io import BytesIO + + >>> url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg" + >>> response = requests.get(url) + >>> original_image = Image.open(BytesIO(response.content)).convert("RGB") + >>> original_image = original_image.resize((768, 512)) + + >>> pipe = IFImg2ImgPipeline.from_pretrained( + ... "DeepFloyd/IF-I-XL-v1.0", + ... variant="fp16", + ... torch_dtype=torch.float16, + ... ) + >>> pipe.enable_model_cpu_offload() + + >>> prompt = "A fantasy landscape in style minecraft" + >>> prompt_embeds, negative_embeds = pipe.encode_prompt(prompt) + + >>> image = pipe( + ... image=original_image, + ... prompt_embeds=prompt_embeds, + ... negative_prompt_embeds=negative_embeds, + ... output_type="pt", + ... ).images + + >>> # save intermediate image + >>> pil_image = pt_to_pil(image) + >>> pil_image[0].save("./if_stage_I.png") + + >>> super_res_1_pipe = IFImg2ImgSuperResolutionPipeline.from_pretrained( + ... "DeepFloyd/IF-II-L-v1.0", + ... text_encoder=None, + ... variant="fp16", + ... torch_dtype=torch.float16, + ... ) + >>> super_res_1_pipe.enable_model_cpu_offload() + + >>> image = super_res_1_pipe( + ... image=image, + ... original_image=original_image, + ... prompt_embeds=prompt_embeds, + ... negative_prompt_embeds=negative_embeds, + ... ).images + >>> image[0].save("./if_stage_II.png") + ``` +""" + + +class IFImg2ImgPipeline(DiffusionPipeline, StableDiffusionLoraLoaderMixin): + tokenizer: T5Tokenizer + text_encoder: T5EncoderModel + + unet: UNet2DConditionModel + scheduler: DDPMScheduler + + feature_extractor: Optional[CLIPImageProcessor] + safety_checker: Optional[IFSafetyChecker] + + watermarker: Optional[IFWatermarker] + + bad_punct_regex = re.compile( + r"[" + + "#ยฎโ€ขยฉโ„ข&@ยทยบยฝยพยฟยกยง~" + + r"\)" + + r"\(" + + r"\]" + + r"\[" + + r"\}" + + r"\{" + + r"\|" + + "\\" + + r"\/" + + r"\*" + + r"]{1,}" + ) # noqa + + _optional_components = ["tokenizer", "text_encoder", "safety_checker", "feature_extractor", "watermarker"] + model_cpu_offload_seq = "text_encoder->unet" + _exclude_from_cpu_offload = ["watermarker"] + + def __init__( + self, + tokenizer: T5Tokenizer, + text_encoder: T5EncoderModel, + unet: UNet2DConditionModel, + scheduler: DDPMScheduler, + safety_checker: Optional[IFSafetyChecker], + feature_extractor: Optional[CLIPImageProcessor], + watermarker: Optional[IFWatermarker], + requires_safety_checker: bool = True, + ): + super().__init__() + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the IF license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + self.register_modules( + tokenizer=tokenizer, + text_encoder=text_encoder, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + watermarker=watermarker, + ) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + @torch.no_grad() + def encode_prompt( + self, + prompt: Union[str, List[str]], + do_classifier_free_guidance: bool = True, + num_images_per_prompt: int = 1, + device: Optional[torch.device] = None, + negative_prompt: Optional[Union[str, List[str]]] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + clean_caption: bool = False, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + do_classifier_free_guidance (`bool`, *optional*, defaults to `True`): + whether to use classifier free guidance or not + num_images_per_prompt (`int`, *optional*, defaults to 1): + number of images that should be generated per prompt + device: (`torch.device`, *optional*): + torch device to place the resulting embeddings on + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead. + Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + clean_caption (bool, defaults to `False`): + If `True`, the function will preprocess and clean the provided caption before encoding. + """ + if prompt is not None and negative_prompt is not None: + if type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + + if device is None: + device = self._execution_device + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + # while T5 can handle much longer input sequences than 77, the text encoder was trained with a max length of 77 for IF + max_length = 77 + + if prompt_embeds is None: + prompt = self._text_preprocessing(prompt, clean_caption=clean_caption) + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=max_length, + truncation=True, + add_special_tokens=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode(untruncated_ids[:, max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {max_length} tokens: {removed_text}" + ) + + attention_mask = text_inputs.attention_mask.to(device) + + prompt_embeds = self.text_encoder( + text_input_ids.to(device), + attention_mask=attention_mask, + ) + prompt_embeds = prompt_embeds[0] + + if self.text_encoder is not None: + dtype = self.text_encoder.dtype + elif self.unet is not None: + dtype = self.unet.dtype + else: + dtype = None + + prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + uncond_tokens = self._text_preprocessing(uncond_tokens, clean_caption=clean_caption) + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_attention_mask=True, + add_special_tokens=True, + return_tensors="pt", + ) + attention_mask = uncond_input.attention_mask.to(device) + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + else: + negative_prompt_embeds = None + + return prompt_embeds, negative_prompt_embeds + + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.run_safety_checker + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is not None: + safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device) + image, nsfw_detected, watermark_detected = self.safety_checker( + images=image, + clip_input=safety_checker_input.pixel_values.to(dtype=dtype), + ) + else: + nsfw_detected = None + watermark_detected = None + + return image, nsfw_detected, watermark_detected + + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + image, + batch_size, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + ): + if (callback_steps is None) or ( + callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if isinstance(image, list): + check_image_type = image[0] + else: + check_image_type = image + + if ( + not isinstance(check_image_type, torch.Tensor) + and not isinstance(check_image_type, PIL.Image.Image) + and not isinstance(check_image_type, np.ndarray) + ): + raise ValueError( + "`image` has to be of type `torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, or List[...] but is" + f" {type(check_image_type)}" + ) + + if isinstance(image, list): + image_batch_size = len(image) + elif isinstance(image, torch.Tensor): + image_batch_size = image.shape[0] + elif isinstance(image, PIL.Image.Image): + image_batch_size = 1 + elif isinstance(image, np.ndarray): + image_batch_size = image.shape[0] + else: + assert False + + if batch_size != image_batch_size: + raise ValueError(f"image batch size: {image_batch_size} must be same as prompt batch size {batch_size}") + + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline._text_preprocessing + def _text_preprocessing(self, text, clean_caption=False): + if clean_caption and not is_bs4_available(): + logger.warning(BACKENDS_MAPPING["bs4"][-1].format("Setting `clean_caption=True`")) + logger.warning("Setting `clean_caption` to False...") + clean_caption = False + + if clean_caption and not is_ftfy_available(): + logger.warning(BACKENDS_MAPPING["ftfy"][-1].format("Setting `clean_caption=True`")) + logger.warning("Setting `clean_caption` to False...") + clean_caption = False + + if not isinstance(text, (tuple, list)): + text = [text] + + def process(text: str): + if clean_caption: + text = self._clean_caption(text) + text = self._clean_caption(text) + else: + text = text.lower().strip() + return text + + return [process(t) for t in text] + + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline._clean_caption + def _clean_caption(self, caption): + caption = str(caption) + caption = ul.unquote_plus(caption) + caption = caption.strip().lower() + caption = re.sub("", "person", caption) + # urls: + caption = re.sub( + r"\b((?:https?:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa + "", + caption, + ) # regex for urls + caption = re.sub( + r"\b((?:www:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa + "", + caption, + ) # regex for urls + # html: + caption = BeautifulSoup(caption, features="html.parser").text + + # @ + caption = re.sub(r"@[\w\d]+\b", "", caption) + + # 31C0โ€”31EF CJK Strokes + # 31F0โ€”31FF Katakana Phonetic Extensions + # 3200โ€”32FF Enclosed CJK Letters and Months + # 3300โ€”33FF CJK Compatibility + # 3400โ€”4DBF CJK Unified Ideographs Extension A + # 4DC0โ€”4DFF Yijing Hexagram Symbols + # 4E00โ€”9FFF CJK Unified Ideographs + caption = re.sub(r"[\u31c0-\u31ef]+", "", caption) + caption = re.sub(r"[\u31f0-\u31ff]+", "", caption) + caption = re.sub(r"[\u3200-\u32ff]+", "", caption) + caption = re.sub(r"[\u3300-\u33ff]+", "", caption) + caption = re.sub(r"[\u3400-\u4dbf]+", "", caption) + caption = re.sub(r"[\u4dc0-\u4dff]+", "", caption) + caption = re.sub(r"[\u4e00-\u9fff]+", "", caption) + ####################################################### + + # ะฒัะต ะฒะธะดั‹ ั‚ะธั€ะต / all types of dash --> "-" + caption = re.sub( + r"[\u002D\u058A\u05BE\u1400\u1806\u2010-\u2015\u2E17\u2E1A\u2E3A\u2E3B\u2E40\u301C\u3030\u30A0\uFE31\uFE32\uFE58\uFE63\uFF0D]+", # noqa + "-", + caption, + ) + + # ะบะฐะฒั‹ั‡ะบะธ ะบ ะพะดะฝะพะผัƒ ัั‚ะฐะฝะดะฐั€ั‚ัƒ + caption = re.sub(r"[`ยดยซยปโ€œโ€ยจ]", '"', caption) + caption = re.sub(r"[โ€˜โ€™]", "'", caption) + + # " + caption = re.sub(r""?", "", caption) + # & + caption = re.sub(r"&", "", caption) + + # ip adresses: + caption = re.sub(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", " ", caption) + + # article ids: + caption = re.sub(r"\d:\d\d\s+$", "", caption) + + # \n + caption = re.sub(r"\\n", " ", caption) + + # "#123" + caption = re.sub(r"#\d{1,3}\b", "", caption) + # "#12345.." + caption = re.sub(r"#\d{5,}\b", "", caption) + # "123456.." + caption = re.sub(r"\b\d{6,}\b", "", caption) + # filenames: + caption = re.sub(r"[\S]+\.(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)", "", caption) + + # + caption = re.sub(r"[\"\']{2,}", r'"', caption) # """AUSVERKAUFT""" + caption = re.sub(r"[\.]{2,}", r" ", caption) # """AUSVERKAUFT""" + + caption = re.sub(self.bad_punct_regex, r" ", caption) # ***AUSVERKAUFT***, #AUSVERKAUFT + caption = re.sub(r"\s+\.\s+", r" ", caption) # " . " + + # this-is-my-cute-cat / this_is_my_cute_cat + regex2 = re.compile(r"(?:\-|\_)") + if len(re.findall(regex2, caption)) > 3: + caption = re.sub(regex2, " ", caption) + + caption = ftfy.fix_text(caption) + caption = html.unescape(html.unescape(caption)) + + caption = re.sub(r"\b[a-zA-Z]{1,3}\d{3,15}\b", "", caption) # jc6640 + caption = re.sub(r"\b[a-zA-Z]+\d+[a-zA-Z]+\b", "", caption) # jc6640vc + caption = re.sub(r"\b\d+[a-zA-Z]+\d+\b", "", caption) # 6640vc231 + + caption = re.sub(r"(worldwide\s+)?(free\s+)?shipping", "", caption) + caption = re.sub(r"(free\s)?download(\sfree)?", "", caption) + caption = re.sub(r"\bclick\b\s(?:for|on)\s\w+", "", caption) + caption = re.sub(r"\b(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)(\simage[s]?)?", "", caption) + caption = re.sub(r"\bpage\s+\d+\b", "", caption) + + caption = re.sub(r"\b\d*[a-zA-Z]+\d+[a-zA-Z]+\d+[a-zA-Z\d]*\b", r" ", caption) # j2d1a2a... + + caption = re.sub(r"\b\d+\.?\d*[xั…ร—]\d+\.?\d*\b", "", caption) + + caption = re.sub(r"\b\s+\:\s+", r": ", caption) + caption = re.sub(r"(\D[,\./])\b", r"\1 ", caption) + caption = re.sub(r"\s+", " ", caption) + + caption.strip() + + caption = re.sub(r"^[\"\']([\w\W]+)[\"\']$", r"\1", caption) + caption = re.sub(r"^[\'\_,\-\:;]", r"", caption) + caption = re.sub(r"[\'\_,\-\:\-\+]$", r"", caption) + caption = re.sub(r"^\.\S+$", "", caption) + + return caption.strip() + + def preprocess_image(self, image: PIL.Image.Image) -> torch.Tensor: + if not isinstance(image, list): + image = [image] + + def numpy_to_pt(images): + if images.ndim == 3: + images = images[..., None] + + images = torch.from_numpy(images.transpose(0, 3, 1, 2)) + return images + + if isinstance(image[0], PIL.Image.Image): + new_image = [] + + for image_ in image: + image_ = image_.convert("RGB") + image_ = resize(image_, self.unet.config.sample_size) + image_ = np.array(image_) + image_ = image_.astype(np.float32) + image_ = image_ / 127.5 - 1 + new_image.append(image_) + + image = new_image + + image = np.stack(image, axis=0) # to np + image = numpy_to_pt(image) # to pt + + elif isinstance(image[0], np.ndarray): + image = np.concatenate(image, axis=0) if image[0].ndim == 4 else np.stack(image, axis=0) + image = numpy_to_pt(image) + + elif isinstance(image[0], torch.Tensor): + image = torch.cat(image, axis=0) if image[0].ndim == 4 else torch.stack(image, axis=0) + + return image + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps + def get_timesteps(self, num_inference_steps, strength): + # get the original timestep using init_timestep + init_timestep = min(int(num_inference_steps * strength), num_inference_steps) + + t_start = max(num_inference_steps - init_timestep, 0) + timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] + if hasattr(self.scheduler, "set_begin_index"): + self.scheduler.set_begin_index(t_start * self.scheduler.order) + + return timesteps, num_inference_steps - t_start + + def prepare_intermediate_images( + self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None + ): + _, channels, height, width = image.shape + + batch_size = batch_size * num_images_per_prompt + + shape = (batch_size, channels, height, width) + + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + + image = image.repeat_interleave(num_images_per_prompt, dim=0) + image = self.scheduler.add_noise(image, noise, timestep) + + return image + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + image: Union[ + PIL.Image.Image, torch.Tensor, np.ndarray, List[PIL.Image.Image], List[torch.Tensor], List[np.ndarray] + ] = None, + strength: float = 0.7, + num_inference_steps: int = 80, + timesteps: List[int] = None, + guidance_scale: float = 10.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, + callback_steps: int = 1, + clean_caption: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + ): + """ + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + image (`torch.Tensor` or `PIL.Image.Image`): + `Image`, or tensor representing an image batch, that will be used as the starting point for the + process. + strength (`float`, *optional*, defaults to 0.7): + Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image` + will be used as a starting point, adding more noise to it the larger the `strength`. The number of + denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will + be maximum and the denoising process will run for the full number of iterations specified in + `num_inference_steps`. A value of 1, therefore, essentially ignores `image`. + num_inference_steps (`int`, *optional*, defaults to 80): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process. If not defined, equal spaced `num_inference_steps` + timesteps are used. Must be in descending order. + guidance_scale (`float`, *optional*, defaults to 10.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to + [`schedulers.DDIMScheduler`], will be ignored for others. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.IFPipelineOutput`] instead of a plain tuple. + callback (`Callable`, *optional*): + A function that will be called every `callback_steps` steps during inference. The function will be + called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function will be called. If not specified, the callback will be + called at every step. + clean_caption (`bool`, *optional*, defaults to `True`): + Whether or not to clean the caption before creating embeddings. Requires `beautifulsoup4` and `ftfy` to + be installed. If the dependencies are not installed, the embeddings will be created from the raw + prompt. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + + Examples: + + Returns: + [`~pipelines.stable_diffusion.IFPipelineOutput`] or `tuple`: + [`~pipelines.stable_diffusion.IFPipelineOutput`] if `return_dict` is True, otherwise a `tuple. When + returning a tuple, the first element is a list with the generated images, and the second element is a list + of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw) + or watermarked content, according to the `safety_checker`. + """ + # 1. Check inputs. Raise error if not correct + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + self.check_inputs( + prompt, image, batch_size, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds + ) + + # 2. Define call parameters + device = self._execution_device + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Encode input prompt + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + do_classifier_free_guidance, + num_images_per_prompt=num_images_per_prompt, + device=device, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + clean_caption=clean_caption, + ) + + if do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + dtype = prompt_embeds.dtype + + # 4. Prepare timesteps + if timesteps is not None: + self.scheduler.set_timesteps(timesteps=timesteps, device=device) + timesteps = self.scheduler.timesteps + num_inference_steps = len(timesteps) + else: + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength) + + # 5. Prepare intermediate images + image = self.preprocess_image(image) + image = image.to(device=device, dtype=dtype) + + noise_timestep = timesteps[0:1] + noise_timestep = noise_timestep.repeat(batch_size * num_images_per_prompt) + + intermediate_images = self.prepare_intermediate_images( + image, noise_timestep, batch_size, num_images_per_prompt, dtype, device, generator + ) + + # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # HACK: see comment in `enable_model_cpu_offload` + if hasattr(self, "text_encoder_offload_hook") and self.text_encoder_offload_hook is not None: + self.text_encoder_offload_hook.offload() + + # 7. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + model_input = ( + torch.cat([intermediate_images] * 2) if do_classifier_free_guidance else intermediate_images + ) + model_input = self.scheduler.scale_model_input(model_input, t) + + # predict the noise residual + noise_pred = self.unet( + model_input, + t, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs=cross_attention_kwargs, + return_dict=False, + )[0] + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred_uncond, _ = noise_pred_uncond.split(model_input.shape[1], dim=1) + noise_pred_text, predicted_variance = noise_pred_text.split(model_input.shape[1], dim=1) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + noise_pred = torch.cat([noise_pred, predicted_variance], dim=1) + + if self.scheduler.config.variance_type not in ["learned", "learned_range"]: + noise_pred, _ = noise_pred.split(model_input.shape[1], dim=1) + + # compute the previous noisy sample x_t -> x_t-1 + intermediate_images = self.scheduler.step( + noise_pred, t, intermediate_images, **extra_step_kwargs, return_dict=False + )[0] + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + callback(i, t, intermediate_images) + + image = intermediate_images + + if output_type == "pil": + # 8. Post-processing + image = (image / 2 + 0.5).clamp(0, 1) + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + + # 9. Run safety checker + image, nsfw_detected, watermark_detected = self.run_safety_checker(image, device, prompt_embeds.dtype) + + # 10. Convert to PIL + image = self.numpy_to_pil(image) + + # 11. Apply watermark + if self.watermarker is not None: + self.watermarker.apply_watermark(image, self.unet.config.sample_size) + elif output_type == "pt": + nsfw_detected = None + watermark_detected = None + + if hasattr(self, "unet_offload_hook") and self.unet_offload_hook is not None: + self.unet_offload_hook.offload() + else: + # 8. Post-processing + image = (image / 2 + 0.5).clamp(0, 1) + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + + # 9. Run safety checker + image, nsfw_detected, watermark_detected = self.run_safety_checker(image, device, prompt_embeds.dtype) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image, nsfw_detected, watermark_detected) + + return IFPipelineOutput(images=image, nsfw_detected=nsfw_detected, watermark_detected=watermark_detected) diff --git a/diffusers3/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py b/diffusers3/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py new file mode 100644 index 0000000000000000000000000000000000000000..6685ba6d774ae575118b88c3cc3809afad251434 --- /dev/null +++ b/diffusers3/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py @@ -0,0 +1,1011 @@ +import html +import inspect +import re +import urllib.parse as ul +from typing import Any, Callable, Dict, List, Optional, Union + +import numpy as np +import PIL.Image +import torch +import torch.nn.functional as F +from transformers import CLIPImageProcessor, T5EncoderModel, T5Tokenizer + +from ...loaders import StableDiffusionLoraLoaderMixin +from ...models import UNet2DConditionModel +from ...schedulers import DDPMScheduler +from ...utils import ( + BACKENDS_MAPPING, + PIL_INTERPOLATION, + is_bs4_available, + is_ftfy_available, + logging, + replace_example_docstring, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline +from .pipeline_output import IFPipelineOutput +from .safety_checker import IFSafetyChecker +from .watermark import IFWatermarker + + +if is_bs4_available(): + from bs4 import BeautifulSoup + +if is_ftfy_available(): + import ftfy + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +# Copied from diffusers.pipelines.deepfloyd_if.pipeline_if_img2img.resize +def resize(images: PIL.Image.Image, img_size: int) -> PIL.Image.Image: + w, h = images.size + + coef = w / h + + w, h = img_size, img_size + + if coef >= 1: + w = int(round(img_size / 8 * coef) * 8) + else: + h = int(round(img_size / 8 / coef) * 8) + + images = images.resize((w, h), resample=PIL_INTERPOLATION["bicubic"], reducing_gap=None) + + return images + + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> from diffusers import IFImg2ImgPipeline, IFImg2ImgSuperResolutionPipeline, DiffusionPipeline + >>> from diffusers.utils import pt_to_pil + >>> import torch + >>> from PIL import Image + >>> import requests + >>> from io import BytesIO + + >>> url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg" + >>> response = requests.get(url) + >>> original_image = Image.open(BytesIO(response.content)).convert("RGB") + >>> original_image = original_image.resize((768, 512)) + + >>> pipe = IFImg2ImgPipeline.from_pretrained( + ... "DeepFloyd/IF-I-XL-v1.0", + ... variant="fp16", + ... torch_dtype=torch.float16, + ... ) + >>> pipe.enable_model_cpu_offload() + + >>> prompt = "A fantasy landscape in style minecraft" + >>> prompt_embeds, negative_embeds = pipe.encode_prompt(prompt) + + >>> image = pipe( + ... image=original_image, + ... prompt_embeds=prompt_embeds, + ... negative_prompt_embeds=negative_embeds, + ... output_type="pt", + ... ).images + + >>> # save intermediate image + >>> pil_image = pt_to_pil(image) + >>> pil_image[0].save("./if_stage_I.png") + + >>> super_res_1_pipe = IFImg2ImgSuperResolutionPipeline.from_pretrained( + ... "DeepFloyd/IF-II-L-v1.0", + ... text_encoder=None, + ... variant="fp16", + ... torch_dtype=torch.float16, + ... ) + >>> super_res_1_pipe.enable_model_cpu_offload() + + >>> image = super_res_1_pipe( + ... image=image, + ... original_image=original_image, + ... prompt_embeds=prompt_embeds, + ... negative_prompt_embeds=negative_embeds, + ... ).images + >>> image[0].save("./if_stage_II.png") + ``` +""" + + +class IFImg2ImgSuperResolutionPipeline(DiffusionPipeline, StableDiffusionLoraLoaderMixin): + tokenizer: T5Tokenizer + text_encoder: T5EncoderModel + + unet: UNet2DConditionModel + scheduler: DDPMScheduler + image_noising_scheduler: DDPMScheduler + + feature_extractor: Optional[CLIPImageProcessor] + safety_checker: Optional[IFSafetyChecker] + + watermarker: Optional[IFWatermarker] + + bad_punct_regex = re.compile( + r"[" + + "#ยฎโ€ขยฉโ„ข&@ยทยบยฝยพยฟยกยง~" + + r"\)" + + r"\(" + + r"\]" + + r"\[" + + r"\}" + + r"\{" + + r"\|" + + "\\" + + r"\/" + + r"\*" + + r"]{1,}" + ) # noqa + + _optional_components = ["tokenizer", "text_encoder", "safety_checker", "feature_extractor"] + model_cpu_offload_seq = "text_encoder->unet" + _exclude_from_cpu_offload = ["watermarker"] + + def __init__( + self, + tokenizer: T5Tokenizer, + text_encoder: T5EncoderModel, + unet: UNet2DConditionModel, + scheduler: DDPMScheduler, + image_noising_scheduler: DDPMScheduler, + safety_checker: Optional[IFSafetyChecker], + feature_extractor: Optional[CLIPImageProcessor], + watermarker: Optional[IFWatermarker], + requires_safety_checker: bool = True, + ): + super().__init__() + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the IF license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + if unet.config.in_channels != 6: + logger.warning( + "It seems like you have loaded a checkpoint that shall not be used for super resolution from {unet.config._name_or_path} as it accepts {unet.config.in_channels} input channels instead of 6. Please make sure to pass a super resolution checkpoint as the `'unet'`: IFSuperResolutionPipeline.from_pretrained(unet=super_resolution_unet, ...)`." + ) + + self.register_modules( + tokenizer=tokenizer, + text_encoder=text_encoder, + unet=unet, + scheduler=scheduler, + image_noising_scheduler=image_noising_scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + watermarker=watermarker, + ) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline._text_preprocessing + def _text_preprocessing(self, text, clean_caption=False): + if clean_caption and not is_bs4_available(): + logger.warning(BACKENDS_MAPPING["bs4"][-1].format("Setting `clean_caption=True`")) + logger.warning("Setting `clean_caption` to False...") + clean_caption = False + + if clean_caption and not is_ftfy_available(): + logger.warning(BACKENDS_MAPPING["ftfy"][-1].format("Setting `clean_caption=True`")) + logger.warning("Setting `clean_caption` to False...") + clean_caption = False + + if not isinstance(text, (tuple, list)): + text = [text] + + def process(text: str): + if clean_caption: + text = self._clean_caption(text) + text = self._clean_caption(text) + else: + text = text.lower().strip() + return text + + return [process(t) for t in text] + + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline._clean_caption + def _clean_caption(self, caption): + caption = str(caption) + caption = ul.unquote_plus(caption) + caption = caption.strip().lower() + caption = re.sub("", "person", caption) + # urls: + caption = re.sub( + r"\b((?:https?:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa + "", + caption, + ) # regex for urls + caption = re.sub( + r"\b((?:www:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa + "", + caption, + ) # regex for urls + # html: + caption = BeautifulSoup(caption, features="html.parser").text + + # @ + caption = re.sub(r"@[\w\d]+\b", "", caption) + + # 31C0โ€”31EF CJK Strokes + # 31F0โ€”31FF Katakana Phonetic Extensions + # 3200โ€”32FF Enclosed CJK Letters and Months + # 3300โ€”33FF CJK Compatibility + # 3400โ€”4DBF CJK Unified Ideographs Extension A + # 4DC0โ€”4DFF Yijing Hexagram Symbols + # 4E00โ€”9FFF CJK Unified Ideographs + caption = re.sub(r"[\u31c0-\u31ef]+", "", caption) + caption = re.sub(r"[\u31f0-\u31ff]+", "", caption) + caption = re.sub(r"[\u3200-\u32ff]+", "", caption) + caption = re.sub(r"[\u3300-\u33ff]+", "", caption) + caption = re.sub(r"[\u3400-\u4dbf]+", "", caption) + caption = re.sub(r"[\u4dc0-\u4dff]+", "", caption) + caption = re.sub(r"[\u4e00-\u9fff]+", "", caption) + ####################################################### + + # ะฒัะต ะฒะธะดั‹ ั‚ะธั€ะต / all types of dash --> "-" + caption = re.sub( + r"[\u002D\u058A\u05BE\u1400\u1806\u2010-\u2015\u2E17\u2E1A\u2E3A\u2E3B\u2E40\u301C\u3030\u30A0\uFE31\uFE32\uFE58\uFE63\uFF0D]+", # noqa + "-", + caption, + ) + + # ะบะฐะฒั‹ั‡ะบะธ ะบ ะพะดะฝะพะผัƒ ัั‚ะฐะฝะดะฐั€ั‚ัƒ + caption = re.sub(r"[`ยดยซยปโ€œโ€ยจ]", '"', caption) + caption = re.sub(r"[โ€˜โ€™]", "'", caption) + + # " + caption = re.sub(r""?", "", caption) + # & + caption = re.sub(r"&", "", caption) + + # ip adresses: + caption = re.sub(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", " ", caption) + + # article ids: + caption = re.sub(r"\d:\d\d\s+$", "", caption) + + # \n + caption = re.sub(r"\\n", " ", caption) + + # "#123" + caption = re.sub(r"#\d{1,3}\b", "", caption) + # "#12345.." + caption = re.sub(r"#\d{5,}\b", "", caption) + # "123456.." + caption = re.sub(r"\b\d{6,}\b", "", caption) + # filenames: + caption = re.sub(r"[\S]+\.(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)", "", caption) + + # + caption = re.sub(r"[\"\']{2,}", r'"', caption) # """AUSVERKAUFT""" + caption = re.sub(r"[\.]{2,}", r" ", caption) # """AUSVERKAUFT""" + + caption = re.sub(self.bad_punct_regex, r" ", caption) # ***AUSVERKAUFT***, #AUSVERKAUFT + caption = re.sub(r"\s+\.\s+", r" ", caption) # " . " + + # this-is-my-cute-cat / this_is_my_cute_cat + regex2 = re.compile(r"(?:\-|\_)") + if len(re.findall(regex2, caption)) > 3: + caption = re.sub(regex2, " ", caption) + + caption = ftfy.fix_text(caption) + caption = html.unescape(html.unescape(caption)) + + caption = re.sub(r"\b[a-zA-Z]{1,3}\d{3,15}\b", "", caption) # jc6640 + caption = re.sub(r"\b[a-zA-Z]+\d+[a-zA-Z]+\b", "", caption) # jc6640vc + caption = re.sub(r"\b\d+[a-zA-Z]+\d+\b", "", caption) # 6640vc231 + + caption = re.sub(r"(worldwide\s+)?(free\s+)?shipping", "", caption) + caption = re.sub(r"(free\s)?download(\sfree)?", "", caption) + caption = re.sub(r"\bclick\b\s(?:for|on)\s\w+", "", caption) + caption = re.sub(r"\b(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)(\simage[s]?)?", "", caption) + caption = re.sub(r"\bpage\s+\d+\b", "", caption) + + caption = re.sub(r"\b\d*[a-zA-Z]+\d+[a-zA-Z]+\d+[a-zA-Z\d]*\b", r" ", caption) # j2d1a2a... + + caption = re.sub(r"\b\d+\.?\d*[xั…ร—]\d+\.?\d*\b", "", caption) + + caption = re.sub(r"\b\s+\:\s+", r": ", caption) + caption = re.sub(r"(\D[,\./])\b", r"\1 ", caption) + caption = re.sub(r"\s+", " ", caption) + + caption.strip() + + caption = re.sub(r"^[\"\']([\w\W]+)[\"\']$", r"\1", caption) + caption = re.sub(r"^[\'\_,\-\:;]", r"", caption) + caption = re.sub(r"[\'\_,\-\:\-\+]$", r"", caption) + caption = re.sub(r"^\.\S+$", "", caption) + + return caption.strip() + + @torch.no_grad() + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.encode_prompt + def encode_prompt( + self, + prompt: Union[str, List[str]], + do_classifier_free_guidance: bool = True, + num_images_per_prompt: int = 1, + device: Optional[torch.device] = None, + negative_prompt: Optional[Union[str, List[str]]] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + clean_caption: bool = False, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + do_classifier_free_guidance (`bool`, *optional*, defaults to `True`): + whether to use classifier free guidance or not + num_images_per_prompt (`int`, *optional*, defaults to 1): + number of images that should be generated per prompt + device: (`torch.device`, *optional*): + torch device to place the resulting embeddings on + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead. + Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + clean_caption (bool, defaults to `False`): + If `True`, the function will preprocess and clean the provided caption before encoding. + """ + if prompt is not None and negative_prompt is not None: + if type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + + if device is None: + device = self._execution_device + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + # while T5 can handle much longer input sequences than 77, the text encoder was trained with a max length of 77 for IF + max_length = 77 + + if prompt_embeds is None: + prompt = self._text_preprocessing(prompt, clean_caption=clean_caption) + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=max_length, + truncation=True, + add_special_tokens=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode(untruncated_ids[:, max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {max_length} tokens: {removed_text}" + ) + + attention_mask = text_inputs.attention_mask.to(device) + + prompt_embeds = self.text_encoder( + text_input_ids.to(device), + attention_mask=attention_mask, + ) + prompt_embeds = prompt_embeds[0] + + if self.text_encoder is not None: + dtype = self.text_encoder.dtype + elif self.unet is not None: + dtype = self.unet.dtype + else: + dtype = None + + prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + uncond_tokens = self._text_preprocessing(uncond_tokens, clean_caption=clean_caption) + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_attention_mask=True, + add_special_tokens=True, + return_tensors="pt", + ) + attention_mask = uncond_input.attention_mask.to(device) + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + else: + negative_prompt_embeds = None + + return prompt_embeds, negative_prompt_embeds + + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.run_safety_checker + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is not None: + safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device) + image, nsfw_detected, watermark_detected = self.safety_checker( + images=image, + clip_input=safety_checker_input.pixel_values.to(dtype=dtype), + ) + else: + nsfw_detected = None + watermark_detected = None + + return image, nsfw_detected, watermark_detected + + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + image, + original_image, + batch_size, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + ): + if (callback_steps is None) or ( + callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + # image + + if isinstance(image, list): + check_image_type = image[0] + else: + check_image_type = image + + if ( + not isinstance(check_image_type, torch.Tensor) + and not isinstance(check_image_type, PIL.Image.Image) + and not isinstance(check_image_type, np.ndarray) + ): + raise ValueError( + "`image` has to be of type `torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, or List[...] but is" + f" {type(check_image_type)}" + ) + + if isinstance(image, list): + image_batch_size = len(image) + elif isinstance(image, torch.Tensor): + image_batch_size = image.shape[0] + elif isinstance(image, PIL.Image.Image): + image_batch_size = 1 + elif isinstance(image, np.ndarray): + image_batch_size = image.shape[0] + else: + assert False + + if batch_size != image_batch_size: + raise ValueError(f"image batch size: {image_batch_size} must be same as prompt batch size {batch_size}") + + # original_image + + if isinstance(original_image, list): + check_image_type = original_image[0] + else: + check_image_type = original_image + + if ( + not isinstance(check_image_type, torch.Tensor) + and not isinstance(check_image_type, PIL.Image.Image) + and not isinstance(check_image_type, np.ndarray) + ): + raise ValueError( + "`original_image` has to be of type `torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, or List[...] but is" + f" {type(check_image_type)}" + ) + + if isinstance(original_image, list): + image_batch_size = len(original_image) + elif isinstance(original_image, torch.Tensor): + image_batch_size = original_image.shape[0] + elif isinstance(original_image, PIL.Image.Image): + image_batch_size = 1 + elif isinstance(original_image, np.ndarray): + image_batch_size = original_image.shape[0] + else: + assert False + + if batch_size != image_batch_size: + raise ValueError( + f"original_image batch size: {image_batch_size} must be same as prompt batch size {batch_size}" + ) + + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if_img2img.IFImg2ImgPipeline.preprocess_image with preprocess_image -> preprocess_original_image + def preprocess_original_image(self, image: PIL.Image.Image) -> torch.Tensor: + if not isinstance(image, list): + image = [image] + + def numpy_to_pt(images): + if images.ndim == 3: + images = images[..., None] + + images = torch.from_numpy(images.transpose(0, 3, 1, 2)) + return images + + if isinstance(image[0], PIL.Image.Image): + new_image = [] + + for image_ in image: + image_ = image_.convert("RGB") + image_ = resize(image_, self.unet.config.sample_size) + image_ = np.array(image_) + image_ = image_.astype(np.float32) + image_ = image_ / 127.5 - 1 + new_image.append(image_) + + image = new_image + + image = np.stack(image, axis=0) # to np + image = numpy_to_pt(image) # to pt + + elif isinstance(image[0], np.ndarray): + image = np.concatenate(image, axis=0) if image[0].ndim == 4 else np.stack(image, axis=0) + image = numpy_to_pt(image) + + elif isinstance(image[0], torch.Tensor): + image = torch.cat(image, axis=0) if image[0].ndim == 4 else torch.stack(image, axis=0) + + return image + + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if_superresolution.IFSuperResolutionPipeline.preprocess_image + def preprocess_image(self, image: PIL.Image.Image, num_images_per_prompt, device) -> torch.Tensor: + if not isinstance(image, torch.Tensor) and not isinstance(image, list): + image = [image] + + if isinstance(image[0], PIL.Image.Image): + image = [np.array(i).astype(np.float32) / 127.5 - 1.0 for i in image] + + image = np.stack(image, axis=0) # to np + image = torch.from_numpy(image.transpose(0, 3, 1, 2)) + elif isinstance(image[0], np.ndarray): + image = np.stack(image, axis=0) # to np + if image.ndim == 5: + image = image[0] + + image = torch.from_numpy(image.transpose(0, 3, 1, 2)) + elif isinstance(image, list) and isinstance(image[0], torch.Tensor): + dims = image[0].ndim + + if dims == 3: + image = torch.stack(image, dim=0) + elif dims == 4: + image = torch.concat(image, dim=0) + else: + raise ValueError(f"Image must have 3 or 4 dimensions, instead got {dims}") + + image = image.to(device=device, dtype=self.unet.dtype) + + image = image.repeat_interleave(num_images_per_prompt, dim=0) + + return image + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps + def get_timesteps(self, num_inference_steps, strength): + # get the original timestep using init_timestep + init_timestep = min(int(num_inference_steps * strength), num_inference_steps) + + t_start = max(num_inference_steps - init_timestep, 0) + timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] + if hasattr(self.scheduler, "set_begin_index"): + self.scheduler.set_begin_index(t_start * self.scheduler.order) + + return timesteps, num_inference_steps - t_start + + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if_img2img.IFImg2ImgPipeline.prepare_intermediate_images + def prepare_intermediate_images( + self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None + ): + _, channels, height, width = image.shape + + batch_size = batch_size * num_images_per_prompt + + shape = (batch_size, channels, height, width) + + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + + image = image.repeat_interleave(num_images_per_prompt, dim=0) + image = self.scheduler.add_noise(image, noise, timestep) + + return image + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + image: Union[PIL.Image.Image, np.ndarray, torch.Tensor], + original_image: Union[ + PIL.Image.Image, torch.Tensor, np.ndarray, List[PIL.Image.Image], List[torch.Tensor], List[np.ndarray] + ] = None, + strength: float = 0.8, + prompt: Union[str, List[str]] = None, + num_inference_steps: int = 50, + timesteps: List[int] = None, + guidance_scale: float = 4.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, + callback_steps: int = 1, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + noise_level: int = 250, + clean_caption: bool = True, + ): + """ + Function invoked when calling the pipeline for generation. + + Args: + image (`torch.Tensor` or `PIL.Image.Image`): + `Image`, or tensor representing an image batch, that will be used as the starting point for the + process. + original_image (`torch.Tensor` or `PIL.Image.Image`): + The original image that `image` was varied from. + strength (`float`, *optional*, defaults to 0.8): + Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image` + will be used as a starting point, adding more noise to it the larger the `strength`. The number of + denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will + be maximum and the denoising process will run for the full number of iterations specified in + `num_inference_steps`. A value of 1, therefore, essentially ignores `image`. + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process. If not defined, equal spaced `num_inference_steps` + timesteps are used. Must be in descending order. + guidance_scale (`float`, *optional*, defaults to 4.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to + [`schedulers.DDIMScheduler`], will be ignored for others. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.IFPipelineOutput`] instead of a plain tuple. + callback (`Callable`, *optional*): + A function that will be called every `callback_steps` steps during inference. The function will be + called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function will be called. If not specified, the callback will be + called at every step. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + noise_level (`int`, *optional*, defaults to 250): + The amount of noise to add to the upscaled image. Must be in the range `[0, 1000)` + clean_caption (`bool`, *optional*, defaults to `True`): + Whether or not to clean the caption before creating embeddings. Requires `beautifulsoup4` and `ftfy` to + be installed. If the dependencies are not installed, the embeddings will be created from the raw + prompt. + + Examples: + + Returns: + [`~pipelines.stable_diffusion.IFPipelineOutput`] or `tuple`: + [`~pipelines.stable_diffusion.IFPipelineOutput`] if `return_dict` is True, otherwise a `tuple. When + returning a tuple, the first element is a list with the generated images, and the second element is a list + of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw) + or watermarked content, according to the `safety_checker`. + """ + # 1. Check inputs. Raise error if not correct + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + self.check_inputs( + prompt, + image, + original_image, + batch_size, + callback_steps, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + ) + + # 2. Define call parameters + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + device = self._execution_device + + # 3. Encode input prompt + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + do_classifier_free_guidance, + num_images_per_prompt=num_images_per_prompt, + device=device, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + clean_caption=clean_caption, + ) + + if do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + dtype = prompt_embeds.dtype + + # 4. Prepare timesteps + if timesteps is not None: + self.scheduler.set_timesteps(timesteps=timesteps, device=device) + timesteps = self.scheduler.timesteps + num_inference_steps = len(timesteps) + else: + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength) + + # 5. prepare original image + original_image = self.preprocess_original_image(original_image) + original_image = original_image.to(device=device, dtype=dtype) + + # 6. Prepare intermediate images + noise_timestep = timesteps[0:1] + noise_timestep = noise_timestep.repeat(batch_size * num_images_per_prompt) + + intermediate_images = self.prepare_intermediate_images( + original_image, + noise_timestep, + batch_size, + num_images_per_prompt, + dtype, + device, + generator, + ) + + # 7. Prepare upscaled image and noise level + _, _, height, width = original_image.shape + + image = self.preprocess_image(image, num_images_per_prompt, device) + + upscaled = F.interpolate(image, (height, width), mode="bilinear", align_corners=True) + + noise_level = torch.tensor([noise_level] * upscaled.shape[0], device=upscaled.device) + noise = randn_tensor(upscaled.shape, generator=generator, device=upscaled.device, dtype=upscaled.dtype) + upscaled = self.image_noising_scheduler.add_noise(upscaled, noise, timesteps=noise_level) + + if do_classifier_free_guidance: + noise_level = torch.cat([noise_level] * 2) + + # 8. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # HACK: see comment in `enable_model_cpu_offload` + if hasattr(self, "text_encoder_offload_hook") and self.text_encoder_offload_hook is not None: + self.text_encoder_offload_hook.offload() + + # 9. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + model_input = torch.cat([intermediate_images, upscaled], dim=1) + + model_input = torch.cat([model_input] * 2) if do_classifier_free_guidance else model_input + model_input = self.scheduler.scale_model_input(model_input, t) + + # predict the noise residual + noise_pred = self.unet( + model_input, + t, + encoder_hidden_states=prompt_embeds, + class_labels=noise_level, + cross_attention_kwargs=cross_attention_kwargs, + return_dict=False, + )[0] + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred_uncond, _ = noise_pred_uncond.split(model_input.shape[1] // 2, dim=1) + noise_pred_text, predicted_variance = noise_pred_text.split(model_input.shape[1] // 2, dim=1) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + noise_pred = torch.cat([noise_pred, predicted_variance], dim=1) + + if self.scheduler.config.variance_type not in ["learned", "learned_range"]: + noise_pred, _ = noise_pred.split(intermediate_images.shape[1], dim=1) + + # compute the previous noisy sample x_t -> x_t-1 + intermediate_images = self.scheduler.step( + noise_pred, t, intermediate_images, **extra_step_kwargs, return_dict=False + )[0] + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + callback(i, t, intermediate_images) + + image = intermediate_images + + if output_type == "pil": + # 10. Post-processing + image = (image / 2 + 0.5).clamp(0, 1) + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + + # 11. Run safety checker + image, nsfw_detected, watermark_detected = self.run_safety_checker(image, device, prompt_embeds.dtype) + + # 12. Convert to PIL + image = self.numpy_to_pil(image) + + # 13. Apply watermark + if self.watermarker is not None: + self.watermarker.apply_watermark(image, self.unet.config.sample_size) + elif output_type == "pt": + nsfw_detected = None + watermark_detected = None + + else: + # 10. Post-processing + image = (image / 2 + 0.5).clamp(0, 1) + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + + # 11. Run safety checker + image, nsfw_detected, watermark_detected = self.run_safety_checker(image, device, prompt_embeds.dtype) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image, nsfw_detected, watermark_detected) + + return IFPipelineOutput(images=image, nsfw_detected=nsfw_detected, watermark_detected=watermark_detected) diff --git a/diffusers3/pipelines/deepfloyd_if/pipeline_if_inpainting.py b/diffusers3/pipelines/deepfloyd_if/pipeline_if_inpainting.py new file mode 100644 index 0000000000000000000000000000000000000000..7fca0bc0443cb3d9aa4cf366c6ab2058d085ec1d --- /dev/null +++ b/diffusers3/pipelines/deepfloyd_if/pipeline_if_inpainting.py @@ -0,0 +1,1014 @@ +import html +import inspect +import re +import urllib.parse as ul +from typing import Any, Callable, Dict, List, Optional, Union + +import numpy as np +import PIL.Image +import torch +from transformers import CLIPImageProcessor, T5EncoderModel, T5Tokenizer + +from ...loaders import StableDiffusionLoraLoaderMixin +from ...models import UNet2DConditionModel +from ...schedulers import DDPMScheduler +from ...utils import ( + BACKENDS_MAPPING, + PIL_INTERPOLATION, + is_bs4_available, + is_ftfy_available, + logging, + replace_example_docstring, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline +from .pipeline_output import IFPipelineOutput +from .safety_checker import IFSafetyChecker +from .watermark import IFWatermarker + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +if is_bs4_available(): + from bs4 import BeautifulSoup + +if is_ftfy_available(): + import ftfy + + +# Copied from diffusers.pipelines.deepfloyd_if.pipeline_if_img2img.resize +def resize(images: PIL.Image.Image, img_size: int) -> PIL.Image.Image: + w, h = images.size + + coef = w / h + + w, h = img_size, img_size + + if coef >= 1: + w = int(round(img_size / 8 * coef) * 8) + else: + h = int(round(img_size / 8 / coef) * 8) + + images = images.resize((w, h), resample=PIL_INTERPOLATION["bicubic"], reducing_gap=None) + + return images + + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> from diffusers import IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, DiffusionPipeline + >>> from diffusers.utils import pt_to_pil + >>> import torch + >>> from PIL import Image + >>> import requests + >>> from io import BytesIO + + >>> url = "https://huggingface.co/datasets/diffusers/docs-images/resolve/main/if/person.png" + >>> response = requests.get(url) + >>> original_image = Image.open(BytesIO(response.content)).convert("RGB") + >>> original_image = original_image + + >>> url = "https://huggingface.co/datasets/diffusers/docs-images/resolve/main/if/glasses_mask.png" + >>> response = requests.get(url) + >>> mask_image = Image.open(BytesIO(response.content)) + >>> mask_image = mask_image + + >>> pipe = IFInpaintingPipeline.from_pretrained( + ... "DeepFloyd/IF-I-XL-v1.0", variant="fp16", torch_dtype=torch.float16 + ... ) + >>> pipe.enable_model_cpu_offload() + + >>> prompt = "blue sunglasses" + >>> prompt_embeds, negative_embeds = pipe.encode_prompt(prompt) + + >>> image = pipe( + ... image=original_image, + ... mask_image=mask_image, + ... prompt_embeds=prompt_embeds, + ... negative_prompt_embeds=negative_embeds, + ... output_type="pt", + ... ).images + + >>> # save intermediate image + >>> pil_image = pt_to_pil(image) + >>> pil_image[0].save("./if_stage_I.png") + + >>> super_res_1_pipe = IFInpaintingSuperResolutionPipeline.from_pretrained( + ... "DeepFloyd/IF-II-L-v1.0", text_encoder=None, variant="fp16", torch_dtype=torch.float16 + ... ) + >>> super_res_1_pipe.enable_model_cpu_offload() + + >>> image = super_res_1_pipe( + ... image=image, + ... mask_image=mask_image, + ... original_image=original_image, + ... prompt_embeds=prompt_embeds, + ... negative_prompt_embeds=negative_embeds, + ... ).images + >>> image[0].save("./if_stage_II.png") + ``` +""" + + +class IFInpaintingPipeline(DiffusionPipeline, StableDiffusionLoraLoaderMixin): + tokenizer: T5Tokenizer + text_encoder: T5EncoderModel + + unet: UNet2DConditionModel + scheduler: DDPMScheduler + + feature_extractor: Optional[CLIPImageProcessor] + safety_checker: Optional[IFSafetyChecker] + + watermarker: Optional[IFWatermarker] + + bad_punct_regex = re.compile( + r"[" + + "#ยฎโ€ขยฉโ„ข&@ยทยบยฝยพยฟยกยง~" + + r"\)" + + r"\(" + + r"\]" + + r"\[" + + r"\}" + + r"\{" + + r"\|" + + "\\" + + r"\/" + + r"\*" + + r"]{1,}" + ) # noqa + + _optional_components = ["tokenizer", "text_encoder", "safety_checker", "feature_extractor", "watermarker"] + model_cpu_offload_seq = "text_encoder->unet" + _exclude_from_cpu_offload = ["watermarker"] + + def __init__( + self, + tokenizer: T5Tokenizer, + text_encoder: T5EncoderModel, + unet: UNet2DConditionModel, + scheduler: DDPMScheduler, + safety_checker: Optional[IFSafetyChecker], + feature_extractor: Optional[CLIPImageProcessor], + watermarker: Optional[IFWatermarker], + requires_safety_checker: bool = True, + ): + super().__init__() + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the IF license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + self.register_modules( + tokenizer=tokenizer, + text_encoder=text_encoder, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + watermarker=watermarker, + ) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + @torch.no_grad() + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.encode_prompt + def encode_prompt( + self, + prompt: Union[str, List[str]], + do_classifier_free_guidance: bool = True, + num_images_per_prompt: int = 1, + device: Optional[torch.device] = None, + negative_prompt: Optional[Union[str, List[str]]] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + clean_caption: bool = False, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + do_classifier_free_guidance (`bool`, *optional*, defaults to `True`): + whether to use classifier free guidance or not + num_images_per_prompt (`int`, *optional*, defaults to 1): + number of images that should be generated per prompt + device: (`torch.device`, *optional*): + torch device to place the resulting embeddings on + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead. + Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + clean_caption (bool, defaults to `False`): + If `True`, the function will preprocess and clean the provided caption before encoding. + """ + if prompt is not None and negative_prompt is not None: + if type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + + if device is None: + device = self._execution_device + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + # while T5 can handle much longer input sequences than 77, the text encoder was trained with a max length of 77 for IF + max_length = 77 + + if prompt_embeds is None: + prompt = self._text_preprocessing(prompt, clean_caption=clean_caption) + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=max_length, + truncation=True, + add_special_tokens=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode(untruncated_ids[:, max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {max_length} tokens: {removed_text}" + ) + + attention_mask = text_inputs.attention_mask.to(device) + + prompt_embeds = self.text_encoder( + text_input_ids.to(device), + attention_mask=attention_mask, + ) + prompt_embeds = prompt_embeds[0] + + if self.text_encoder is not None: + dtype = self.text_encoder.dtype + elif self.unet is not None: + dtype = self.unet.dtype + else: + dtype = None + + prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + uncond_tokens = self._text_preprocessing(uncond_tokens, clean_caption=clean_caption) + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_attention_mask=True, + add_special_tokens=True, + return_tensors="pt", + ) + attention_mask = uncond_input.attention_mask.to(device) + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + else: + negative_prompt_embeds = None + + return prompt_embeds, negative_prompt_embeds + + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.run_safety_checker + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is not None: + safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device) + image, nsfw_detected, watermark_detected = self.safety_checker( + images=image, + clip_input=safety_checker_input.pixel_values.to(dtype=dtype), + ) + else: + nsfw_detected = None + watermark_detected = None + + return image, nsfw_detected, watermark_detected + + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + image, + mask_image, + batch_size, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + ): + if (callback_steps is None) or ( + callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + # image + + if isinstance(image, list): + check_image_type = image[0] + else: + check_image_type = image + + if ( + not isinstance(check_image_type, torch.Tensor) + and not isinstance(check_image_type, PIL.Image.Image) + and not isinstance(check_image_type, np.ndarray) + ): + raise ValueError( + "`image` has to be of type `torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, or List[...] but is" + f" {type(check_image_type)}" + ) + + if isinstance(image, list): + image_batch_size = len(image) + elif isinstance(image, torch.Tensor): + image_batch_size = image.shape[0] + elif isinstance(image, PIL.Image.Image): + image_batch_size = 1 + elif isinstance(image, np.ndarray): + image_batch_size = image.shape[0] + else: + assert False + + if batch_size != image_batch_size: + raise ValueError(f"image batch size: {image_batch_size} must be same as prompt batch size {batch_size}") + + # mask_image + + if isinstance(mask_image, list): + check_image_type = mask_image[0] + else: + check_image_type = mask_image + + if ( + not isinstance(check_image_type, torch.Tensor) + and not isinstance(check_image_type, PIL.Image.Image) + and not isinstance(check_image_type, np.ndarray) + ): + raise ValueError( + "`mask_image` has to be of type `torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, or List[...] but is" + f" {type(check_image_type)}" + ) + + if isinstance(mask_image, list): + image_batch_size = len(mask_image) + elif isinstance(mask_image, torch.Tensor): + image_batch_size = mask_image.shape[0] + elif isinstance(mask_image, PIL.Image.Image): + image_batch_size = 1 + elif isinstance(mask_image, np.ndarray): + image_batch_size = mask_image.shape[0] + else: + assert False + + if image_batch_size != 1 and batch_size != image_batch_size: + raise ValueError( + f"mask_image batch size: {image_batch_size} must be `1` or the same as prompt batch size {batch_size}" + ) + + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline._text_preprocessing + def _text_preprocessing(self, text, clean_caption=False): + if clean_caption and not is_bs4_available(): + logger.warning(BACKENDS_MAPPING["bs4"][-1].format("Setting `clean_caption=True`")) + logger.warning("Setting `clean_caption` to False...") + clean_caption = False + + if clean_caption and not is_ftfy_available(): + logger.warning(BACKENDS_MAPPING["ftfy"][-1].format("Setting `clean_caption=True`")) + logger.warning("Setting `clean_caption` to False...") + clean_caption = False + + if not isinstance(text, (tuple, list)): + text = [text] + + def process(text: str): + if clean_caption: + text = self._clean_caption(text) + text = self._clean_caption(text) + else: + text = text.lower().strip() + return text + + return [process(t) for t in text] + + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline._clean_caption + def _clean_caption(self, caption): + caption = str(caption) + caption = ul.unquote_plus(caption) + caption = caption.strip().lower() + caption = re.sub("", "person", caption) + # urls: + caption = re.sub( + r"\b((?:https?:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa + "", + caption, + ) # regex for urls + caption = re.sub( + r"\b((?:www:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa + "", + caption, + ) # regex for urls + # html: + caption = BeautifulSoup(caption, features="html.parser").text + + # @ + caption = re.sub(r"@[\w\d]+\b", "", caption) + + # 31C0โ€”31EF CJK Strokes + # 31F0โ€”31FF Katakana Phonetic Extensions + # 3200โ€”32FF Enclosed CJK Letters and Months + # 3300โ€”33FF CJK Compatibility + # 3400โ€”4DBF CJK Unified Ideographs Extension A + # 4DC0โ€”4DFF Yijing Hexagram Symbols + # 4E00โ€”9FFF CJK Unified Ideographs + caption = re.sub(r"[\u31c0-\u31ef]+", "", caption) + caption = re.sub(r"[\u31f0-\u31ff]+", "", caption) + caption = re.sub(r"[\u3200-\u32ff]+", "", caption) + caption = re.sub(r"[\u3300-\u33ff]+", "", caption) + caption = re.sub(r"[\u3400-\u4dbf]+", "", caption) + caption = re.sub(r"[\u4dc0-\u4dff]+", "", caption) + caption = re.sub(r"[\u4e00-\u9fff]+", "", caption) + ####################################################### + + # ะฒัะต ะฒะธะดั‹ ั‚ะธั€ะต / all types of dash --> "-" + caption = re.sub( + r"[\u002D\u058A\u05BE\u1400\u1806\u2010-\u2015\u2E17\u2E1A\u2E3A\u2E3B\u2E40\u301C\u3030\u30A0\uFE31\uFE32\uFE58\uFE63\uFF0D]+", # noqa + "-", + caption, + ) + + # ะบะฐะฒั‹ั‡ะบะธ ะบ ะพะดะฝะพะผัƒ ัั‚ะฐะฝะดะฐั€ั‚ัƒ + caption = re.sub(r"[`ยดยซยปโ€œโ€ยจ]", '"', caption) + caption = re.sub(r"[โ€˜โ€™]", "'", caption) + + # " + caption = re.sub(r""?", "", caption) + # & + caption = re.sub(r"&", "", caption) + + # ip adresses: + caption = re.sub(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", " ", caption) + + # article ids: + caption = re.sub(r"\d:\d\d\s+$", "", caption) + + # \n + caption = re.sub(r"\\n", " ", caption) + + # "#123" + caption = re.sub(r"#\d{1,3}\b", "", caption) + # "#12345.." + caption = re.sub(r"#\d{5,}\b", "", caption) + # "123456.." + caption = re.sub(r"\b\d{6,}\b", "", caption) + # filenames: + caption = re.sub(r"[\S]+\.(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)", "", caption) + + # + caption = re.sub(r"[\"\']{2,}", r'"', caption) # """AUSVERKAUFT""" + caption = re.sub(r"[\.]{2,}", r" ", caption) # """AUSVERKAUFT""" + + caption = re.sub(self.bad_punct_regex, r" ", caption) # ***AUSVERKAUFT***, #AUSVERKAUFT + caption = re.sub(r"\s+\.\s+", r" ", caption) # " . " + + # this-is-my-cute-cat / this_is_my_cute_cat + regex2 = re.compile(r"(?:\-|\_)") + if len(re.findall(regex2, caption)) > 3: + caption = re.sub(regex2, " ", caption) + + caption = ftfy.fix_text(caption) + caption = html.unescape(html.unescape(caption)) + + caption = re.sub(r"\b[a-zA-Z]{1,3}\d{3,15}\b", "", caption) # jc6640 + caption = re.sub(r"\b[a-zA-Z]+\d+[a-zA-Z]+\b", "", caption) # jc6640vc + caption = re.sub(r"\b\d+[a-zA-Z]+\d+\b", "", caption) # 6640vc231 + + caption = re.sub(r"(worldwide\s+)?(free\s+)?shipping", "", caption) + caption = re.sub(r"(free\s)?download(\sfree)?", "", caption) + caption = re.sub(r"\bclick\b\s(?:for|on)\s\w+", "", caption) + caption = re.sub(r"\b(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)(\simage[s]?)?", "", caption) + caption = re.sub(r"\bpage\s+\d+\b", "", caption) + + caption = re.sub(r"\b\d*[a-zA-Z]+\d+[a-zA-Z]+\d+[a-zA-Z\d]*\b", r" ", caption) # j2d1a2a... + + caption = re.sub(r"\b\d+\.?\d*[xั…ร—]\d+\.?\d*\b", "", caption) + + caption = re.sub(r"\b\s+\:\s+", r": ", caption) + caption = re.sub(r"(\D[,\./])\b", r"\1 ", caption) + caption = re.sub(r"\s+", " ", caption) + + caption.strip() + + caption = re.sub(r"^[\"\']([\w\W]+)[\"\']$", r"\1", caption) + caption = re.sub(r"^[\'\_,\-\:;]", r"", caption) + caption = re.sub(r"[\'\_,\-\:\-\+]$", r"", caption) + caption = re.sub(r"^\.\S+$", "", caption) + + return caption.strip() + + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if_img2img.IFImg2ImgPipeline.preprocess_image + def preprocess_image(self, image: PIL.Image.Image) -> torch.Tensor: + if not isinstance(image, list): + image = [image] + + def numpy_to_pt(images): + if images.ndim == 3: + images = images[..., None] + + images = torch.from_numpy(images.transpose(0, 3, 1, 2)) + return images + + if isinstance(image[0], PIL.Image.Image): + new_image = [] + + for image_ in image: + image_ = image_.convert("RGB") + image_ = resize(image_, self.unet.config.sample_size) + image_ = np.array(image_) + image_ = image_.astype(np.float32) + image_ = image_ / 127.5 - 1 + new_image.append(image_) + + image = new_image + + image = np.stack(image, axis=0) # to np + image = numpy_to_pt(image) # to pt + + elif isinstance(image[0], np.ndarray): + image = np.concatenate(image, axis=0) if image[0].ndim == 4 else np.stack(image, axis=0) + image = numpy_to_pt(image) + + elif isinstance(image[0], torch.Tensor): + image = torch.cat(image, axis=0) if image[0].ndim == 4 else torch.stack(image, axis=0) + + return image + + def preprocess_mask_image(self, mask_image) -> torch.Tensor: + if not isinstance(mask_image, list): + mask_image = [mask_image] + + if isinstance(mask_image[0], torch.Tensor): + mask_image = torch.cat(mask_image, axis=0) if mask_image[0].ndim == 4 else torch.stack(mask_image, axis=0) + + if mask_image.ndim == 2: + # Batch and add channel dim for single mask + mask_image = mask_image.unsqueeze(0).unsqueeze(0) + elif mask_image.ndim == 3 and mask_image.shape[0] == 1: + # Single mask, the 0'th dimension is considered to be + # the existing batch size of 1 + mask_image = mask_image.unsqueeze(0) + elif mask_image.ndim == 3 and mask_image.shape[0] != 1: + # Batch of mask, the 0'th dimension is considered to be + # the batching dimension + mask_image = mask_image.unsqueeze(1) + + mask_image[mask_image < 0.5] = 0 + mask_image[mask_image >= 0.5] = 1 + + elif isinstance(mask_image[0], PIL.Image.Image): + new_mask_image = [] + + for mask_image_ in mask_image: + mask_image_ = mask_image_.convert("L") + mask_image_ = resize(mask_image_, self.unet.config.sample_size) + mask_image_ = np.array(mask_image_) + mask_image_ = mask_image_[None, None, :] + new_mask_image.append(mask_image_) + + mask_image = new_mask_image + + mask_image = np.concatenate(mask_image, axis=0) + mask_image = mask_image.astype(np.float32) / 255.0 + mask_image[mask_image < 0.5] = 0 + mask_image[mask_image >= 0.5] = 1 + mask_image = torch.from_numpy(mask_image) + + elif isinstance(mask_image[0], np.ndarray): + mask_image = np.concatenate([m[None, None, :] for m in mask_image], axis=0) + + mask_image[mask_image < 0.5] = 0 + mask_image[mask_image >= 0.5] = 1 + mask_image = torch.from_numpy(mask_image) + + return mask_image + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps + def get_timesteps(self, num_inference_steps, strength): + # get the original timestep using init_timestep + init_timestep = min(int(num_inference_steps * strength), num_inference_steps) + + t_start = max(num_inference_steps - init_timestep, 0) + timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] + if hasattr(self.scheduler, "set_begin_index"): + self.scheduler.set_begin_index(t_start * self.scheduler.order) + + return timesteps, num_inference_steps - t_start + + def prepare_intermediate_images( + self, image, timestep, batch_size, num_images_per_prompt, dtype, device, mask_image, generator=None + ): + image_batch_size, channels, height, width = image.shape + + batch_size = batch_size * num_images_per_prompt + + shape = (batch_size, channels, height, width) + + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + + image = image.repeat_interleave(num_images_per_prompt, dim=0) + noised_image = self.scheduler.add_noise(image, noise, timestep) + + image = (1 - mask_image) * image + mask_image * noised_image + + return image + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + image: Union[ + PIL.Image.Image, torch.Tensor, np.ndarray, List[PIL.Image.Image], List[torch.Tensor], List[np.ndarray] + ] = None, + mask_image: Union[ + PIL.Image.Image, torch.Tensor, np.ndarray, List[PIL.Image.Image], List[torch.Tensor], List[np.ndarray] + ] = None, + strength: float = 1.0, + num_inference_steps: int = 50, + timesteps: List[int] = None, + guidance_scale: float = 7.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, + callback_steps: int = 1, + clean_caption: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + ): + """ + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + image (`torch.Tensor` or `PIL.Image.Image`): + `Image`, or tensor representing an image batch, that will be used as the starting point for the + process. + mask_image (`PIL.Image.Image`): + `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be + repainted, while black pixels will be preserved. If `mask_image` is a PIL image, it will be converted + to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L) + instead of 3, so the expected shape would be `(B, H, W, 1)`. + strength (`float`, *optional*, defaults to 1.0): + Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image` + will be used as a starting point, adding more noise to it the larger the `strength`. The number of + denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will + be maximum and the denoising process will run for the full number of iterations specified in + `num_inference_steps`. A value of 1, therefore, essentially ignores `image`. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process. If not defined, equal spaced `num_inference_steps` + timesteps are used. Must be in descending order. + guidance_scale (`float`, *optional*, defaults to 7.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to + [`schedulers.DDIMScheduler`], will be ignored for others. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.IFPipelineOutput`] instead of a plain tuple. + callback (`Callable`, *optional*): + A function that will be called every `callback_steps` steps during inference. The function will be + called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function will be called. If not specified, the callback will be + called at every step. + clean_caption (`bool`, *optional*, defaults to `True`): + Whether or not to clean the caption before creating embeddings. Requires `beautifulsoup4` and `ftfy` to + be installed. If the dependencies are not installed, the embeddings will be created from the raw + prompt. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + + Examples: + + Returns: + [`~pipelines.stable_diffusion.IFPipelineOutput`] or `tuple`: + [`~pipelines.stable_diffusion.IFPipelineOutput`] if `return_dict` is True, otherwise a `tuple. When + returning a tuple, the first element is a list with the generated images, and the second element is a list + of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw) + or watermarked content, according to the `safety_checker`. + """ + # 1. Check inputs. Raise error if not correct + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + self.check_inputs( + prompt, + image, + mask_image, + batch_size, + callback_steps, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + ) + + # 2. Define call parameters + device = self._execution_device + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Encode input prompt + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + do_classifier_free_guidance, + num_images_per_prompt=num_images_per_prompt, + device=device, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + clean_caption=clean_caption, + ) + + if do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + dtype = prompt_embeds.dtype + + # 4. Prepare timesteps + if timesteps is not None: + self.scheduler.set_timesteps(timesteps=timesteps, device=device) + timesteps = self.scheduler.timesteps + num_inference_steps = len(timesteps) + else: + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength) + + # 5. Prepare intermediate images + image = self.preprocess_image(image) + image = image.to(device=device, dtype=dtype) + + mask_image = self.preprocess_mask_image(mask_image) + mask_image = mask_image.to(device=device, dtype=dtype) + + if mask_image.shape[0] == 1: + mask_image = mask_image.repeat_interleave(batch_size * num_images_per_prompt, dim=0) + else: + mask_image = mask_image.repeat_interleave(num_images_per_prompt, dim=0) + + noise_timestep = timesteps[0:1] + noise_timestep = noise_timestep.repeat(batch_size * num_images_per_prompt) + + intermediate_images = self.prepare_intermediate_images( + image, noise_timestep, batch_size, num_images_per_prompt, dtype, device, mask_image, generator + ) + + # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # HACK: see comment in `enable_model_cpu_offload` + if hasattr(self, "text_encoder_offload_hook") and self.text_encoder_offload_hook is not None: + self.text_encoder_offload_hook.offload() + + # 7. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + model_input = ( + torch.cat([intermediate_images] * 2) if do_classifier_free_guidance else intermediate_images + ) + model_input = self.scheduler.scale_model_input(model_input, t) + + # predict the noise residual + noise_pred = self.unet( + model_input, + t, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs=cross_attention_kwargs, + return_dict=False, + )[0] + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred_uncond, _ = noise_pred_uncond.split(model_input.shape[1], dim=1) + noise_pred_text, predicted_variance = noise_pred_text.split(model_input.shape[1], dim=1) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + noise_pred = torch.cat([noise_pred, predicted_variance], dim=1) + + if self.scheduler.config.variance_type not in ["learned", "learned_range"]: + noise_pred, _ = noise_pred.split(model_input.shape[1], dim=1) + + # compute the previous noisy sample x_t -> x_t-1 + prev_intermediate_images = intermediate_images + + intermediate_images = self.scheduler.step( + noise_pred, t, intermediate_images, **extra_step_kwargs, return_dict=False + )[0] + + intermediate_images = (1 - mask_image) * prev_intermediate_images + mask_image * intermediate_images + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + callback(i, t, intermediate_images) + + image = intermediate_images + + if output_type == "pil": + # 8. Post-processing + image = (image / 2 + 0.5).clamp(0, 1) + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + + # 9. Run safety checker + image, nsfw_detected, watermark_detected = self.run_safety_checker(image, device, prompt_embeds.dtype) + + # 10. Convert to PIL + image = self.numpy_to_pil(image) + + # 11. Apply watermark + if self.watermarker is not None: + self.watermarker.apply_watermark(image, self.unet.config.sample_size) + elif output_type == "pt": + nsfw_detected = None + watermark_detected = None + + if hasattr(self, "unet_offload_hook") and self.unet_offload_hook is not None: + self.unet_offload_hook.offload() + else: + # 8. Post-processing + image = (image / 2 + 0.5).clamp(0, 1) + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + + # 9. Run safety checker + image, nsfw_detected, watermark_detected = self.run_safety_checker(image, device, prompt_embeds.dtype) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image, nsfw_detected, watermark_detected) + + return IFPipelineOutput(images=image, nsfw_detected=nsfw_detected, watermark_detected=watermark_detected) diff --git a/diffusers3/pipelines/deepfloyd_if/pipeline_if_inpainting_superresolution.py b/diffusers3/pipelines/deepfloyd_if/pipeline_if_inpainting_superresolution.py new file mode 100644 index 0000000000000000000000000000000000000000..4f04a1de2a6edec03c341cfbd1d696eae8754a1c --- /dev/null +++ b/diffusers3/pipelines/deepfloyd_if/pipeline_if_inpainting_superresolution.py @@ -0,0 +1,1121 @@ +import html +import inspect +import re +import urllib.parse as ul +from typing import Any, Callable, Dict, List, Optional, Union + +import numpy as np +import PIL.Image +import torch +import torch.nn.functional as F +from transformers import CLIPImageProcessor, T5EncoderModel, T5Tokenizer + +from ...loaders import StableDiffusionLoraLoaderMixin +from ...models import UNet2DConditionModel +from ...schedulers import DDPMScheduler +from ...utils import ( + BACKENDS_MAPPING, + PIL_INTERPOLATION, + is_bs4_available, + is_ftfy_available, + logging, + replace_example_docstring, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline +from .pipeline_output import IFPipelineOutput +from .safety_checker import IFSafetyChecker +from .watermark import IFWatermarker + + +if is_bs4_available(): + from bs4 import BeautifulSoup + +if is_ftfy_available(): + import ftfy + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +# Copied from diffusers.pipelines.deepfloyd_if.pipeline_if_img2img.resize +def resize(images: PIL.Image.Image, img_size: int) -> PIL.Image.Image: + w, h = images.size + + coef = w / h + + w, h = img_size, img_size + + if coef >= 1: + w = int(round(img_size / 8 * coef) * 8) + else: + h = int(round(img_size / 8 / coef) * 8) + + images = images.resize((w, h), resample=PIL_INTERPOLATION["bicubic"], reducing_gap=None) + + return images + + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> from diffusers import IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, DiffusionPipeline + >>> from diffusers.utils import pt_to_pil + >>> import torch + >>> from PIL import Image + >>> import requests + >>> from io import BytesIO + + >>> url = "https://huggingface.co/datasets/diffusers/docs-images/resolve/main/if/person.png" + >>> response = requests.get(url) + >>> original_image = Image.open(BytesIO(response.content)).convert("RGB") + >>> original_image = original_image + + >>> url = "https://huggingface.co/datasets/diffusers/docs-images/resolve/main/if/glasses_mask.png" + >>> response = requests.get(url) + >>> mask_image = Image.open(BytesIO(response.content)) + >>> mask_image = mask_image + + >>> pipe = IFInpaintingPipeline.from_pretrained( + ... "DeepFloyd/IF-I-XL-v1.0", variant="fp16", torch_dtype=torch.float16 + ... ) + >>> pipe.enable_model_cpu_offload() + + >>> prompt = "blue sunglasses" + + >>> prompt_embeds, negative_embeds = pipe.encode_prompt(prompt) + >>> image = pipe( + ... image=original_image, + ... mask_image=mask_image, + ... prompt_embeds=prompt_embeds, + ... negative_prompt_embeds=negative_embeds, + ... output_type="pt", + ... ).images + + >>> # save intermediate image + >>> pil_image = pt_to_pil(image) + >>> pil_image[0].save("./if_stage_I.png") + + >>> super_res_1_pipe = IFInpaintingSuperResolutionPipeline.from_pretrained( + ... "DeepFloyd/IF-II-L-v1.0", text_encoder=None, variant="fp16", torch_dtype=torch.float16 + ... ) + >>> super_res_1_pipe.enable_model_cpu_offload() + + >>> image = super_res_1_pipe( + ... image=image, + ... mask_image=mask_image, + ... original_image=original_image, + ... prompt_embeds=prompt_embeds, + ... negative_prompt_embeds=negative_embeds, + ... ).images + >>> image[0].save("./if_stage_II.png") + ``` + """ + + +class IFInpaintingSuperResolutionPipeline(DiffusionPipeline, StableDiffusionLoraLoaderMixin): + tokenizer: T5Tokenizer + text_encoder: T5EncoderModel + + unet: UNet2DConditionModel + scheduler: DDPMScheduler + image_noising_scheduler: DDPMScheduler + + feature_extractor: Optional[CLIPImageProcessor] + safety_checker: Optional[IFSafetyChecker] + + watermarker: Optional[IFWatermarker] + + bad_punct_regex = re.compile( + r"[" + + "#ยฎโ€ขยฉโ„ข&@ยทยบยฝยพยฟยกยง~" + + r"\)" + + r"\(" + + r"\]" + + r"\[" + + r"\}" + + r"\{" + + r"\|" + + "\\" + + r"\/" + + r"\*" + + r"]{1,}" + ) # noqa + + model_cpu_offload_seq = "text_encoder->unet" + _optional_components = ["tokenizer", "text_encoder", "safety_checker", "feature_extractor", "watermarker"] + _exclude_from_cpu_offload = ["watermarker"] + + def __init__( + self, + tokenizer: T5Tokenizer, + text_encoder: T5EncoderModel, + unet: UNet2DConditionModel, + scheduler: DDPMScheduler, + image_noising_scheduler: DDPMScheduler, + safety_checker: Optional[IFSafetyChecker], + feature_extractor: Optional[CLIPImageProcessor], + watermarker: Optional[IFWatermarker], + requires_safety_checker: bool = True, + ): + super().__init__() + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the IF license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + if unet.config.in_channels != 6: + logger.warning( + "It seems like you have loaded a checkpoint that shall not be used for super resolution from {unet.config._name_or_path} as it accepts {unet.config.in_channels} input channels instead of 6. Please make sure to pass a super resolution checkpoint as the `'unet'`: IFSuperResolutionPipeline.from_pretrained(unet=super_resolution_unet, ...)`." + ) + + self.register_modules( + tokenizer=tokenizer, + text_encoder=text_encoder, + unet=unet, + scheduler=scheduler, + image_noising_scheduler=image_noising_scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + watermarker=watermarker, + ) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline._text_preprocessing + def _text_preprocessing(self, text, clean_caption=False): + if clean_caption and not is_bs4_available(): + logger.warning(BACKENDS_MAPPING["bs4"][-1].format("Setting `clean_caption=True`")) + logger.warning("Setting `clean_caption` to False...") + clean_caption = False + + if clean_caption and not is_ftfy_available(): + logger.warning(BACKENDS_MAPPING["ftfy"][-1].format("Setting `clean_caption=True`")) + logger.warning("Setting `clean_caption` to False...") + clean_caption = False + + if not isinstance(text, (tuple, list)): + text = [text] + + def process(text: str): + if clean_caption: + text = self._clean_caption(text) + text = self._clean_caption(text) + else: + text = text.lower().strip() + return text + + return [process(t) for t in text] + + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline._clean_caption + def _clean_caption(self, caption): + caption = str(caption) + caption = ul.unquote_plus(caption) + caption = caption.strip().lower() + caption = re.sub("", "person", caption) + # urls: + caption = re.sub( + r"\b((?:https?:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa + "", + caption, + ) # regex for urls + caption = re.sub( + r"\b((?:www:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa + "", + caption, + ) # regex for urls + # html: + caption = BeautifulSoup(caption, features="html.parser").text + + # @ + caption = re.sub(r"@[\w\d]+\b", "", caption) + + # 31C0โ€”31EF CJK Strokes + # 31F0โ€”31FF Katakana Phonetic Extensions + # 3200โ€”32FF Enclosed CJK Letters and Months + # 3300โ€”33FF CJK Compatibility + # 3400โ€”4DBF CJK Unified Ideographs Extension A + # 4DC0โ€”4DFF Yijing Hexagram Symbols + # 4E00โ€”9FFF CJK Unified Ideographs + caption = re.sub(r"[\u31c0-\u31ef]+", "", caption) + caption = re.sub(r"[\u31f0-\u31ff]+", "", caption) + caption = re.sub(r"[\u3200-\u32ff]+", "", caption) + caption = re.sub(r"[\u3300-\u33ff]+", "", caption) + caption = re.sub(r"[\u3400-\u4dbf]+", "", caption) + caption = re.sub(r"[\u4dc0-\u4dff]+", "", caption) + caption = re.sub(r"[\u4e00-\u9fff]+", "", caption) + ####################################################### + + # ะฒัะต ะฒะธะดั‹ ั‚ะธั€ะต / all types of dash --> "-" + caption = re.sub( + r"[\u002D\u058A\u05BE\u1400\u1806\u2010-\u2015\u2E17\u2E1A\u2E3A\u2E3B\u2E40\u301C\u3030\u30A0\uFE31\uFE32\uFE58\uFE63\uFF0D]+", # noqa + "-", + caption, + ) + + # ะบะฐะฒั‹ั‡ะบะธ ะบ ะพะดะฝะพะผัƒ ัั‚ะฐะฝะดะฐั€ั‚ัƒ + caption = re.sub(r"[`ยดยซยปโ€œโ€ยจ]", '"', caption) + caption = re.sub(r"[โ€˜โ€™]", "'", caption) + + # " + caption = re.sub(r""?", "", caption) + # & + caption = re.sub(r"&", "", caption) + + # ip adresses: + caption = re.sub(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", " ", caption) + + # article ids: + caption = re.sub(r"\d:\d\d\s+$", "", caption) + + # \n + caption = re.sub(r"\\n", " ", caption) + + # "#123" + caption = re.sub(r"#\d{1,3}\b", "", caption) + # "#12345.." + caption = re.sub(r"#\d{5,}\b", "", caption) + # "123456.." + caption = re.sub(r"\b\d{6,}\b", "", caption) + # filenames: + caption = re.sub(r"[\S]+\.(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)", "", caption) + + # + caption = re.sub(r"[\"\']{2,}", r'"', caption) # """AUSVERKAUFT""" + caption = re.sub(r"[\.]{2,}", r" ", caption) # """AUSVERKAUFT""" + + caption = re.sub(self.bad_punct_regex, r" ", caption) # ***AUSVERKAUFT***, #AUSVERKAUFT + caption = re.sub(r"\s+\.\s+", r" ", caption) # " . " + + # this-is-my-cute-cat / this_is_my_cute_cat + regex2 = re.compile(r"(?:\-|\_)") + if len(re.findall(regex2, caption)) > 3: + caption = re.sub(regex2, " ", caption) + + caption = ftfy.fix_text(caption) + caption = html.unescape(html.unescape(caption)) + + caption = re.sub(r"\b[a-zA-Z]{1,3}\d{3,15}\b", "", caption) # jc6640 + caption = re.sub(r"\b[a-zA-Z]+\d+[a-zA-Z]+\b", "", caption) # jc6640vc + caption = re.sub(r"\b\d+[a-zA-Z]+\d+\b", "", caption) # 6640vc231 + + caption = re.sub(r"(worldwide\s+)?(free\s+)?shipping", "", caption) + caption = re.sub(r"(free\s)?download(\sfree)?", "", caption) + caption = re.sub(r"\bclick\b\s(?:for|on)\s\w+", "", caption) + caption = re.sub(r"\b(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)(\simage[s]?)?", "", caption) + caption = re.sub(r"\bpage\s+\d+\b", "", caption) + + caption = re.sub(r"\b\d*[a-zA-Z]+\d+[a-zA-Z]+\d+[a-zA-Z\d]*\b", r" ", caption) # j2d1a2a... + + caption = re.sub(r"\b\d+\.?\d*[xั…ร—]\d+\.?\d*\b", "", caption) + + caption = re.sub(r"\b\s+\:\s+", r": ", caption) + caption = re.sub(r"(\D[,\./])\b", r"\1 ", caption) + caption = re.sub(r"\s+", " ", caption) + + caption.strip() + + caption = re.sub(r"^[\"\']([\w\W]+)[\"\']$", r"\1", caption) + caption = re.sub(r"^[\'\_,\-\:;]", r"", caption) + caption = re.sub(r"[\'\_,\-\:\-\+]$", r"", caption) + caption = re.sub(r"^\.\S+$", "", caption) + + return caption.strip() + + @torch.no_grad() + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.encode_prompt + def encode_prompt( + self, + prompt: Union[str, List[str]], + do_classifier_free_guidance: bool = True, + num_images_per_prompt: int = 1, + device: Optional[torch.device] = None, + negative_prompt: Optional[Union[str, List[str]]] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + clean_caption: bool = False, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + do_classifier_free_guidance (`bool`, *optional*, defaults to `True`): + whether to use classifier free guidance or not + num_images_per_prompt (`int`, *optional*, defaults to 1): + number of images that should be generated per prompt + device: (`torch.device`, *optional*): + torch device to place the resulting embeddings on + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead. + Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + clean_caption (bool, defaults to `False`): + If `True`, the function will preprocess and clean the provided caption before encoding. + """ + if prompt is not None and negative_prompt is not None: + if type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + + if device is None: + device = self._execution_device + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + # while T5 can handle much longer input sequences than 77, the text encoder was trained with a max length of 77 for IF + max_length = 77 + + if prompt_embeds is None: + prompt = self._text_preprocessing(prompt, clean_caption=clean_caption) + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=max_length, + truncation=True, + add_special_tokens=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode(untruncated_ids[:, max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {max_length} tokens: {removed_text}" + ) + + attention_mask = text_inputs.attention_mask.to(device) + + prompt_embeds = self.text_encoder( + text_input_ids.to(device), + attention_mask=attention_mask, + ) + prompt_embeds = prompt_embeds[0] + + if self.text_encoder is not None: + dtype = self.text_encoder.dtype + elif self.unet is not None: + dtype = self.unet.dtype + else: + dtype = None + + prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + uncond_tokens = self._text_preprocessing(uncond_tokens, clean_caption=clean_caption) + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_attention_mask=True, + add_special_tokens=True, + return_tensors="pt", + ) + attention_mask = uncond_input.attention_mask.to(device) + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + else: + negative_prompt_embeds = None + + return prompt_embeds, negative_prompt_embeds + + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.run_safety_checker + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is not None: + safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device) + image, nsfw_detected, watermark_detected = self.safety_checker( + images=image, + clip_input=safety_checker_input.pixel_values.to(dtype=dtype), + ) + else: + nsfw_detected = None + watermark_detected = None + + return image, nsfw_detected, watermark_detected + + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + image, + original_image, + mask_image, + batch_size, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + ): + if (callback_steps is None) or ( + callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + # image + + if isinstance(image, list): + check_image_type = image[0] + else: + check_image_type = image + + if ( + not isinstance(check_image_type, torch.Tensor) + and not isinstance(check_image_type, PIL.Image.Image) + and not isinstance(check_image_type, np.ndarray) + ): + raise ValueError( + "`image` has to be of type `torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, or List[...] but is" + f" {type(check_image_type)}" + ) + + if isinstance(image, list): + image_batch_size = len(image) + elif isinstance(image, torch.Tensor): + image_batch_size = image.shape[0] + elif isinstance(image, PIL.Image.Image): + image_batch_size = 1 + elif isinstance(image, np.ndarray): + image_batch_size = image.shape[0] + else: + assert False + + if batch_size != image_batch_size: + raise ValueError(f"image batch size: {image_batch_size} must be same as prompt batch size {batch_size}") + + # original_image + + if isinstance(original_image, list): + check_image_type = original_image[0] + else: + check_image_type = original_image + + if ( + not isinstance(check_image_type, torch.Tensor) + and not isinstance(check_image_type, PIL.Image.Image) + and not isinstance(check_image_type, np.ndarray) + ): + raise ValueError( + "`original_image` has to be of type `torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, or List[...] but is" + f" {type(check_image_type)}" + ) + + if isinstance(original_image, list): + image_batch_size = len(original_image) + elif isinstance(original_image, torch.Tensor): + image_batch_size = original_image.shape[0] + elif isinstance(original_image, PIL.Image.Image): + image_batch_size = 1 + elif isinstance(original_image, np.ndarray): + image_batch_size = original_image.shape[0] + else: + assert False + + if batch_size != image_batch_size: + raise ValueError( + f"original_image batch size: {image_batch_size} must be same as prompt batch size {batch_size}" + ) + + # mask_image + + if isinstance(mask_image, list): + check_image_type = mask_image[0] + else: + check_image_type = mask_image + + if ( + not isinstance(check_image_type, torch.Tensor) + and not isinstance(check_image_type, PIL.Image.Image) + and not isinstance(check_image_type, np.ndarray) + ): + raise ValueError( + "`mask_image` has to be of type `torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, or List[...] but is" + f" {type(check_image_type)}" + ) + + if isinstance(mask_image, list): + image_batch_size = len(mask_image) + elif isinstance(mask_image, torch.Tensor): + image_batch_size = mask_image.shape[0] + elif isinstance(mask_image, PIL.Image.Image): + image_batch_size = 1 + elif isinstance(mask_image, np.ndarray): + image_batch_size = mask_image.shape[0] + else: + assert False + + if image_batch_size != 1 and batch_size != image_batch_size: + raise ValueError( + f"mask_image batch size: {image_batch_size} must be `1` or the same as prompt batch size {batch_size}" + ) + + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if_img2img.IFImg2ImgPipeline.preprocess_image with preprocess_image -> preprocess_original_image + def preprocess_original_image(self, image: PIL.Image.Image) -> torch.Tensor: + if not isinstance(image, list): + image = [image] + + def numpy_to_pt(images): + if images.ndim == 3: + images = images[..., None] + + images = torch.from_numpy(images.transpose(0, 3, 1, 2)) + return images + + if isinstance(image[0], PIL.Image.Image): + new_image = [] + + for image_ in image: + image_ = image_.convert("RGB") + image_ = resize(image_, self.unet.config.sample_size) + image_ = np.array(image_) + image_ = image_.astype(np.float32) + image_ = image_ / 127.5 - 1 + new_image.append(image_) + + image = new_image + + image = np.stack(image, axis=0) # to np + image = numpy_to_pt(image) # to pt + + elif isinstance(image[0], np.ndarray): + image = np.concatenate(image, axis=0) if image[0].ndim == 4 else np.stack(image, axis=0) + image = numpy_to_pt(image) + + elif isinstance(image[0], torch.Tensor): + image = torch.cat(image, axis=0) if image[0].ndim == 4 else torch.stack(image, axis=0) + + return image + + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if_superresolution.IFSuperResolutionPipeline.preprocess_image + def preprocess_image(self, image: PIL.Image.Image, num_images_per_prompt, device) -> torch.Tensor: + if not isinstance(image, torch.Tensor) and not isinstance(image, list): + image = [image] + + if isinstance(image[0], PIL.Image.Image): + image = [np.array(i).astype(np.float32) / 127.5 - 1.0 for i in image] + + image = np.stack(image, axis=0) # to np + image = torch.from_numpy(image.transpose(0, 3, 1, 2)) + elif isinstance(image[0], np.ndarray): + image = np.stack(image, axis=0) # to np + if image.ndim == 5: + image = image[0] + + image = torch.from_numpy(image.transpose(0, 3, 1, 2)) + elif isinstance(image, list) and isinstance(image[0], torch.Tensor): + dims = image[0].ndim + + if dims == 3: + image = torch.stack(image, dim=0) + elif dims == 4: + image = torch.concat(image, dim=0) + else: + raise ValueError(f"Image must have 3 or 4 dimensions, instead got {dims}") + + image = image.to(device=device, dtype=self.unet.dtype) + + image = image.repeat_interleave(num_images_per_prompt, dim=0) + + return image + + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if_inpainting.IFInpaintingPipeline.preprocess_mask_image + def preprocess_mask_image(self, mask_image) -> torch.Tensor: + if not isinstance(mask_image, list): + mask_image = [mask_image] + + if isinstance(mask_image[0], torch.Tensor): + mask_image = torch.cat(mask_image, axis=0) if mask_image[0].ndim == 4 else torch.stack(mask_image, axis=0) + + if mask_image.ndim == 2: + # Batch and add channel dim for single mask + mask_image = mask_image.unsqueeze(0).unsqueeze(0) + elif mask_image.ndim == 3 and mask_image.shape[0] == 1: + # Single mask, the 0'th dimension is considered to be + # the existing batch size of 1 + mask_image = mask_image.unsqueeze(0) + elif mask_image.ndim == 3 and mask_image.shape[0] != 1: + # Batch of mask, the 0'th dimension is considered to be + # the batching dimension + mask_image = mask_image.unsqueeze(1) + + mask_image[mask_image < 0.5] = 0 + mask_image[mask_image >= 0.5] = 1 + + elif isinstance(mask_image[0], PIL.Image.Image): + new_mask_image = [] + + for mask_image_ in mask_image: + mask_image_ = mask_image_.convert("L") + mask_image_ = resize(mask_image_, self.unet.config.sample_size) + mask_image_ = np.array(mask_image_) + mask_image_ = mask_image_[None, None, :] + new_mask_image.append(mask_image_) + + mask_image = new_mask_image + + mask_image = np.concatenate(mask_image, axis=0) + mask_image = mask_image.astype(np.float32) / 255.0 + mask_image[mask_image < 0.5] = 0 + mask_image[mask_image >= 0.5] = 1 + mask_image = torch.from_numpy(mask_image) + + elif isinstance(mask_image[0], np.ndarray): + mask_image = np.concatenate([m[None, None, :] for m in mask_image], axis=0) + + mask_image[mask_image < 0.5] = 0 + mask_image[mask_image >= 0.5] = 1 + mask_image = torch.from_numpy(mask_image) + + return mask_image + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps + def get_timesteps(self, num_inference_steps, strength): + # get the original timestep using init_timestep + init_timestep = min(int(num_inference_steps * strength), num_inference_steps) + + t_start = max(num_inference_steps - init_timestep, 0) + timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] + if hasattr(self.scheduler, "set_begin_index"): + self.scheduler.set_begin_index(t_start * self.scheduler.order) + + return timesteps, num_inference_steps - t_start + + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if_inpainting.IFInpaintingPipeline.prepare_intermediate_images + def prepare_intermediate_images( + self, image, timestep, batch_size, num_images_per_prompt, dtype, device, mask_image, generator=None + ): + image_batch_size, channels, height, width = image.shape + + batch_size = batch_size * num_images_per_prompt + + shape = (batch_size, channels, height, width) + + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + + image = image.repeat_interleave(num_images_per_prompt, dim=0) + noised_image = self.scheduler.add_noise(image, noise, timestep) + + image = (1 - mask_image) * image + mask_image * noised_image + + return image + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + image: Union[PIL.Image.Image, np.ndarray, torch.Tensor], + original_image: Union[ + PIL.Image.Image, torch.Tensor, np.ndarray, List[PIL.Image.Image], List[torch.Tensor], List[np.ndarray] + ] = None, + mask_image: Union[ + PIL.Image.Image, torch.Tensor, np.ndarray, List[PIL.Image.Image], List[torch.Tensor], List[np.ndarray] + ] = None, + strength: float = 0.8, + prompt: Union[str, List[str]] = None, + num_inference_steps: int = 100, + timesteps: List[int] = None, + guidance_scale: float = 4.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, + callback_steps: int = 1, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + noise_level: int = 0, + clean_caption: bool = True, + ): + """ + Function invoked when calling the pipeline for generation. + + Args: + image (`torch.Tensor` or `PIL.Image.Image`): + `Image`, or tensor representing an image batch, that will be used as the starting point for the + process. + original_image (`torch.Tensor` or `PIL.Image.Image`): + The original image that `image` was varied from. + mask_image (`PIL.Image.Image`): + `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be + repainted, while black pixels will be preserved. If `mask_image` is a PIL image, it will be converted + to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L) + instead of 3, so the expected shape would be `(B, H, W, 1)`. + strength (`float`, *optional*, defaults to 0.8): + Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image` + will be used as a starting point, adding more noise to it the larger the `strength`. The number of + denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will + be maximum and the denoising process will run for the full number of iterations specified in + `num_inference_steps`. A value of 1, therefore, essentially ignores `image`. + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process. If not defined, equal spaced `num_inference_steps` + timesteps are used. Must be in descending order. + guidance_scale (`float`, *optional*, defaults to 4.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to + [`schedulers.DDIMScheduler`], will be ignored for others. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.IFPipelineOutput`] instead of a plain tuple. + callback (`Callable`, *optional*): + A function that will be called every `callback_steps` steps during inference. The function will be + called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function will be called. If not specified, the callback will be + called at every step. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + noise_level (`int`, *optional*, defaults to 0): + The amount of noise to add to the upscaled image. Must be in the range `[0, 1000)` + clean_caption (`bool`, *optional*, defaults to `True`): + Whether or not to clean the caption before creating embeddings. Requires `beautifulsoup4` and `ftfy` to + be installed. If the dependencies are not installed, the embeddings will be created from the raw + prompt. + + Examples: + + Returns: + [`~pipelines.stable_diffusion.IFPipelineOutput`] or `tuple`: + [`~pipelines.stable_diffusion.IFPipelineOutput`] if `return_dict` is True, otherwise a `tuple. When + returning a tuple, the first element is a list with the generated images, and the second element is a list + of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw) + or watermarked content, according to the `safety_checker`. + """ + # 1. Check inputs. Raise error if not correct + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + self.check_inputs( + prompt, + image, + original_image, + mask_image, + batch_size, + callback_steps, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + ) + + # 2. Define call parameters + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + device = self._execution_device + + # 3. Encode input prompt + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + do_classifier_free_guidance, + num_images_per_prompt=num_images_per_prompt, + device=device, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + clean_caption=clean_caption, + ) + + if do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + dtype = prompt_embeds.dtype + + # 4. Prepare timesteps + if timesteps is not None: + self.scheduler.set_timesteps(timesteps=timesteps, device=device) + timesteps = self.scheduler.timesteps + num_inference_steps = len(timesteps) + else: + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength) + + # 5. prepare original image + original_image = self.preprocess_original_image(original_image) + original_image = original_image.to(device=device, dtype=dtype) + + # 6. prepare mask image + mask_image = self.preprocess_mask_image(mask_image) + mask_image = mask_image.to(device=device, dtype=dtype) + + if mask_image.shape[0] == 1: + mask_image = mask_image.repeat_interleave(batch_size * num_images_per_prompt, dim=0) + else: + mask_image = mask_image.repeat_interleave(num_images_per_prompt, dim=0) + + # 6. Prepare intermediate images + noise_timestep = timesteps[0:1] + noise_timestep = noise_timestep.repeat(batch_size * num_images_per_prompt) + + intermediate_images = self.prepare_intermediate_images( + original_image, + noise_timestep, + batch_size, + num_images_per_prompt, + dtype, + device, + mask_image, + generator, + ) + + # 7. Prepare upscaled image and noise level + _, _, height, width = original_image.shape + + image = self.preprocess_image(image, num_images_per_prompt, device) + + upscaled = F.interpolate(image, (height, width), mode="bilinear", align_corners=True) + + noise_level = torch.tensor([noise_level] * upscaled.shape[0], device=upscaled.device) + noise = randn_tensor(upscaled.shape, generator=generator, device=upscaled.device, dtype=upscaled.dtype) + upscaled = self.image_noising_scheduler.add_noise(upscaled, noise, timesteps=noise_level) + + if do_classifier_free_guidance: + noise_level = torch.cat([noise_level] * 2) + + # 8. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # HACK: see comment in `enable_model_cpu_offload` + if hasattr(self, "text_encoder_offload_hook") and self.text_encoder_offload_hook is not None: + self.text_encoder_offload_hook.offload() + + # 9. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + model_input = torch.cat([intermediate_images, upscaled], dim=1) + + model_input = torch.cat([model_input] * 2) if do_classifier_free_guidance else model_input + model_input = self.scheduler.scale_model_input(model_input, t) + + # predict the noise residual + noise_pred = self.unet( + model_input, + t, + encoder_hidden_states=prompt_embeds, + class_labels=noise_level, + cross_attention_kwargs=cross_attention_kwargs, + return_dict=False, + )[0] + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred_uncond, _ = noise_pred_uncond.split(model_input.shape[1] // 2, dim=1) + noise_pred_text, predicted_variance = noise_pred_text.split(model_input.shape[1] // 2, dim=1) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + noise_pred = torch.cat([noise_pred, predicted_variance], dim=1) + + if self.scheduler.config.variance_type not in ["learned", "learned_range"]: + noise_pred, _ = noise_pred.split(intermediate_images.shape[1], dim=1) + + # compute the previous noisy sample x_t -> x_t-1 + prev_intermediate_images = intermediate_images + + intermediate_images = self.scheduler.step( + noise_pred, t, intermediate_images, **extra_step_kwargs, return_dict=False + )[0] + + intermediate_images = (1 - mask_image) * prev_intermediate_images + mask_image * intermediate_images + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + callback(i, t, intermediate_images) + + image = intermediate_images + + if output_type == "pil": + # 10. Post-processing + image = (image / 2 + 0.5).clamp(0, 1) + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + + # 11. Run safety checker + image, nsfw_detected, watermark_detected = self.run_safety_checker(image, device, prompt_embeds.dtype) + + # 12. Convert to PIL + image = self.numpy_to_pil(image) + + # 13. Apply watermark + if self.watermarker is not None: + self.watermarker.apply_watermark(image, self.unet.config.sample_size) + elif output_type == "pt": + nsfw_detected = None + watermark_detected = None + + else: + # 10. Post-processing + image = (image / 2 + 0.5).clamp(0, 1) + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + + # 11. Run safety checker + image, nsfw_detected, watermark_detected = self.run_safety_checker(image, device, prompt_embeds.dtype) + + self.maybe_free_model_hooks() + + if not return_dict: + return (image, nsfw_detected, watermark_detected) + + return IFPipelineOutput(images=image, nsfw_detected=nsfw_detected, watermark_detected=watermark_detected) diff --git a/diffusers3/pipelines/deepfloyd_if/pipeline_if_superresolution.py b/diffusers3/pipelines/deepfloyd_if/pipeline_if_superresolution.py new file mode 100644 index 0000000000000000000000000000000000000000..891963f2a904fb9d219ab1987df150d90156ea4a --- /dev/null +++ b/diffusers3/pipelines/deepfloyd_if/pipeline_if_superresolution.py @@ -0,0 +1,870 @@ +import html +import inspect +import re +import urllib.parse as ul +from typing import Any, Callable, Dict, List, Optional, Union + +import numpy as np +import PIL.Image +import torch +import torch.nn.functional as F +from transformers import CLIPImageProcessor, T5EncoderModel, T5Tokenizer + +from ...loaders import StableDiffusionLoraLoaderMixin +from ...models import UNet2DConditionModel +from ...schedulers import DDPMScheduler +from ...utils import ( + BACKENDS_MAPPING, + is_bs4_available, + is_ftfy_available, + logging, + replace_example_docstring, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline +from .pipeline_output import IFPipelineOutput +from .safety_checker import IFSafetyChecker +from .watermark import IFWatermarker + + +if is_bs4_available(): + from bs4 import BeautifulSoup + +if is_ftfy_available(): + import ftfy + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> from diffusers import IFPipeline, IFSuperResolutionPipeline, DiffusionPipeline + >>> from diffusers.utils import pt_to_pil + >>> import torch + + >>> pipe = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0", variant="fp16", torch_dtype=torch.float16) + >>> pipe.enable_model_cpu_offload() + + >>> prompt = 'a photo of a kangaroo wearing an orange hoodie and blue sunglasses standing in front of the eiffel tower holding a sign that says "very deep learning"' + >>> prompt_embeds, negative_embeds = pipe.encode_prompt(prompt) + + >>> image = pipe(prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_embeds, output_type="pt").images + + >>> # save intermediate image + >>> pil_image = pt_to_pil(image) + >>> pil_image[0].save("./if_stage_I.png") + + >>> super_res_1_pipe = IFSuperResolutionPipeline.from_pretrained( + ... "DeepFloyd/IF-II-L-v1.0", text_encoder=None, variant="fp16", torch_dtype=torch.float16 + ... ) + >>> super_res_1_pipe.enable_model_cpu_offload() + + >>> image = super_res_1_pipe( + ... image=image, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_embeds + ... ).images + >>> image[0].save("./if_stage_II.png") + ``` +""" + + +class IFSuperResolutionPipeline(DiffusionPipeline, StableDiffusionLoraLoaderMixin): + tokenizer: T5Tokenizer + text_encoder: T5EncoderModel + + unet: UNet2DConditionModel + scheduler: DDPMScheduler + image_noising_scheduler: DDPMScheduler + + feature_extractor: Optional[CLIPImageProcessor] + safety_checker: Optional[IFSafetyChecker] + + watermarker: Optional[IFWatermarker] + + bad_punct_regex = re.compile( + r"[" + + "#ยฎโ€ขยฉโ„ข&@ยทยบยฝยพยฟยกยง~" + + r"\)" + + r"\(" + + r"\]" + + r"\[" + + r"\}" + + r"\{" + + r"\|" + + "\\" + + r"\/" + + r"\*" + + r"]{1,}" + ) # noqa + + _optional_components = ["tokenizer", "text_encoder", "safety_checker", "feature_extractor", "watermarker"] + model_cpu_offload_seq = "text_encoder->unet" + _exclude_from_cpu_offload = ["watermarker"] + + def __init__( + self, + tokenizer: T5Tokenizer, + text_encoder: T5EncoderModel, + unet: UNet2DConditionModel, + scheduler: DDPMScheduler, + image_noising_scheduler: DDPMScheduler, + safety_checker: Optional[IFSafetyChecker], + feature_extractor: Optional[CLIPImageProcessor], + watermarker: Optional[IFWatermarker], + requires_safety_checker: bool = True, + ): + super().__init__() + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the IF license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + if unet.config.in_channels != 6: + logger.warning( + "It seems like you have loaded a checkpoint that shall not be used for super resolution from {unet.config._name_or_path} as it accepts {unet.config.in_channels} input channels instead of 6. Please make sure to pass a super resolution checkpoint as the `'unet'`: IFSuperResolutionPipeline.from_pretrained(unet=super_resolution_unet, ...)`." + ) + + self.register_modules( + tokenizer=tokenizer, + text_encoder=text_encoder, + unet=unet, + scheduler=scheduler, + image_noising_scheduler=image_noising_scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + watermarker=watermarker, + ) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline._text_preprocessing + def _text_preprocessing(self, text, clean_caption=False): + if clean_caption and not is_bs4_available(): + logger.warning(BACKENDS_MAPPING["bs4"][-1].format("Setting `clean_caption=True`")) + logger.warning("Setting `clean_caption` to False...") + clean_caption = False + + if clean_caption and not is_ftfy_available(): + logger.warning(BACKENDS_MAPPING["ftfy"][-1].format("Setting `clean_caption=True`")) + logger.warning("Setting `clean_caption` to False...") + clean_caption = False + + if not isinstance(text, (tuple, list)): + text = [text] + + def process(text: str): + if clean_caption: + text = self._clean_caption(text) + text = self._clean_caption(text) + else: + text = text.lower().strip() + return text + + return [process(t) for t in text] + + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline._clean_caption + def _clean_caption(self, caption): + caption = str(caption) + caption = ul.unquote_plus(caption) + caption = caption.strip().lower() + caption = re.sub("", "person", caption) + # urls: + caption = re.sub( + r"\b((?:https?:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa + "", + caption, + ) # regex for urls + caption = re.sub( + r"\b((?:www:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa + "", + caption, + ) # regex for urls + # html: + caption = BeautifulSoup(caption, features="html.parser").text + + # @ + caption = re.sub(r"@[\w\d]+\b", "", caption) + + # 31C0โ€”31EF CJK Strokes + # 31F0โ€”31FF Katakana Phonetic Extensions + # 3200โ€”32FF Enclosed CJK Letters and Months + # 3300โ€”33FF CJK Compatibility + # 3400โ€”4DBF CJK Unified Ideographs Extension A + # 4DC0โ€”4DFF Yijing Hexagram Symbols + # 4E00โ€”9FFF CJK Unified Ideographs + caption = re.sub(r"[\u31c0-\u31ef]+", "", caption) + caption = re.sub(r"[\u31f0-\u31ff]+", "", caption) + caption = re.sub(r"[\u3200-\u32ff]+", "", caption) + caption = re.sub(r"[\u3300-\u33ff]+", "", caption) + caption = re.sub(r"[\u3400-\u4dbf]+", "", caption) + caption = re.sub(r"[\u4dc0-\u4dff]+", "", caption) + caption = re.sub(r"[\u4e00-\u9fff]+", "", caption) + ####################################################### + + # ะฒัะต ะฒะธะดั‹ ั‚ะธั€ะต / all types of dash --> "-" + caption = re.sub( + r"[\u002D\u058A\u05BE\u1400\u1806\u2010-\u2015\u2E17\u2E1A\u2E3A\u2E3B\u2E40\u301C\u3030\u30A0\uFE31\uFE32\uFE58\uFE63\uFF0D]+", # noqa + "-", + caption, + ) + + # ะบะฐะฒั‹ั‡ะบะธ ะบ ะพะดะฝะพะผัƒ ัั‚ะฐะฝะดะฐั€ั‚ัƒ + caption = re.sub(r"[`ยดยซยปโ€œโ€ยจ]", '"', caption) + caption = re.sub(r"[โ€˜โ€™]", "'", caption) + + # " + caption = re.sub(r""?", "", caption) + # & + caption = re.sub(r"&", "", caption) + + # ip adresses: + caption = re.sub(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", " ", caption) + + # article ids: + caption = re.sub(r"\d:\d\d\s+$", "", caption) + + # \n + caption = re.sub(r"\\n", " ", caption) + + # "#123" + caption = re.sub(r"#\d{1,3}\b", "", caption) + # "#12345.." + caption = re.sub(r"#\d{5,}\b", "", caption) + # "123456.." + caption = re.sub(r"\b\d{6,}\b", "", caption) + # filenames: + caption = re.sub(r"[\S]+\.(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)", "", caption) + + # + caption = re.sub(r"[\"\']{2,}", r'"', caption) # """AUSVERKAUFT""" + caption = re.sub(r"[\.]{2,}", r" ", caption) # """AUSVERKAUFT""" + + caption = re.sub(self.bad_punct_regex, r" ", caption) # ***AUSVERKAUFT***, #AUSVERKAUFT + caption = re.sub(r"\s+\.\s+", r" ", caption) # " . " + + # this-is-my-cute-cat / this_is_my_cute_cat + regex2 = re.compile(r"(?:\-|\_)") + if len(re.findall(regex2, caption)) > 3: + caption = re.sub(regex2, " ", caption) + + caption = ftfy.fix_text(caption) + caption = html.unescape(html.unescape(caption)) + + caption = re.sub(r"\b[a-zA-Z]{1,3}\d{3,15}\b", "", caption) # jc6640 + caption = re.sub(r"\b[a-zA-Z]+\d+[a-zA-Z]+\b", "", caption) # jc6640vc + caption = re.sub(r"\b\d+[a-zA-Z]+\d+\b", "", caption) # 6640vc231 + + caption = re.sub(r"(worldwide\s+)?(free\s+)?shipping", "", caption) + caption = re.sub(r"(free\s)?download(\sfree)?", "", caption) + caption = re.sub(r"\bclick\b\s(?:for|on)\s\w+", "", caption) + caption = re.sub(r"\b(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)(\simage[s]?)?", "", caption) + caption = re.sub(r"\bpage\s+\d+\b", "", caption) + + caption = re.sub(r"\b\d*[a-zA-Z]+\d+[a-zA-Z]+\d+[a-zA-Z\d]*\b", r" ", caption) # j2d1a2a... + + caption = re.sub(r"\b\d+\.?\d*[xั…ร—]\d+\.?\d*\b", "", caption) + + caption = re.sub(r"\b\s+\:\s+", r": ", caption) + caption = re.sub(r"(\D[,\./])\b", r"\1 ", caption) + caption = re.sub(r"\s+", " ", caption) + + caption.strip() + + caption = re.sub(r"^[\"\']([\w\W]+)[\"\']$", r"\1", caption) + caption = re.sub(r"^[\'\_,\-\:;]", r"", caption) + caption = re.sub(r"[\'\_,\-\:\-\+]$", r"", caption) + caption = re.sub(r"^\.\S+$", "", caption) + + return caption.strip() + + @torch.no_grad() + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.encode_prompt + def encode_prompt( + self, + prompt: Union[str, List[str]], + do_classifier_free_guidance: bool = True, + num_images_per_prompt: int = 1, + device: Optional[torch.device] = None, + negative_prompt: Optional[Union[str, List[str]]] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + clean_caption: bool = False, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + do_classifier_free_guidance (`bool`, *optional*, defaults to `True`): + whether to use classifier free guidance or not + num_images_per_prompt (`int`, *optional*, defaults to 1): + number of images that should be generated per prompt + device: (`torch.device`, *optional*): + torch device to place the resulting embeddings on + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead. + Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + clean_caption (bool, defaults to `False`): + If `True`, the function will preprocess and clean the provided caption before encoding. + """ + if prompt is not None and negative_prompt is not None: + if type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + + if device is None: + device = self._execution_device + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + # while T5 can handle much longer input sequences than 77, the text encoder was trained with a max length of 77 for IF + max_length = 77 + + if prompt_embeds is None: + prompt = self._text_preprocessing(prompt, clean_caption=clean_caption) + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=max_length, + truncation=True, + add_special_tokens=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode(untruncated_ids[:, max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {max_length} tokens: {removed_text}" + ) + + attention_mask = text_inputs.attention_mask.to(device) + + prompt_embeds = self.text_encoder( + text_input_ids.to(device), + attention_mask=attention_mask, + ) + prompt_embeds = prompt_embeds[0] + + if self.text_encoder is not None: + dtype = self.text_encoder.dtype + elif self.unet is not None: + dtype = self.unet.dtype + else: + dtype = None + + prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + uncond_tokens = self._text_preprocessing(uncond_tokens, clean_caption=clean_caption) + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_attention_mask=True, + add_special_tokens=True, + return_tensors="pt", + ) + attention_mask = uncond_input.attention_mask.to(device) + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + else: + negative_prompt_embeds = None + + return prompt_embeds, negative_prompt_embeds + + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.run_safety_checker + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is not None: + safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device) + image, nsfw_detected, watermark_detected = self.safety_checker( + images=image, + clip_input=safety_checker_input.pixel_values.to(dtype=dtype), + ) + else: + nsfw_detected = None + watermark_detected = None + + return image, nsfw_detected, watermark_detected + + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + image, + batch_size, + noise_level, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + ): + if (callback_steps is None) or ( + callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if noise_level < 0 or noise_level >= self.image_noising_scheduler.config.num_train_timesteps: + raise ValueError( + f"`noise_level`: {noise_level} must be a valid timestep in `self.noising_scheduler`, [0, {self.image_noising_scheduler.config.num_train_timesteps})" + ) + + if isinstance(image, list): + check_image_type = image[0] + else: + check_image_type = image + + if ( + not isinstance(check_image_type, torch.Tensor) + and not isinstance(check_image_type, PIL.Image.Image) + and not isinstance(check_image_type, np.ndarray) + ): + raise ValueError( + "`image` has to be of type `torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, or List[...] but is" + f" {type(check_image_type)}" + ) + + if isinstance(image, list): + image_batch_size = len(image) + elif isinstance(image, torch.Tensor): + image_batch_size = image.shape[0] + elif isinstance(image, PIL.Image.Image): + image_batch_size = 1 + elif isinstance(image, np.ndarray): + image_batch_size = image.shape[0] + else: + assert False + + if batch_size != image_batch_size: + raise ValueError(f"image batch size: {image_batch_size} must be same as prompt batch size {batch_size}") + + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.prepare_intermediate_images + def prepare_intermediate_images(self, batch_size, num_channels, height, width, dtype, device, generator): + shape = (batch_size, num_channels, height, width) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + intermediate_images = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + + # scale the initial noise by the standard deviation required by the scheduler + intermediate_images = intermediate_images * self.scheduler.init_noise_sigma + return intermediate_images + + def preprocess_image(self, image, num_images_per_prompt, device): + if not isinstance(image, torch.Tensor) and not isinstance(image, list): + image = [image] + + if isinstance(image[0], PIL.Image.Image): + image = [np.array(i).astype(np.float32) / 127.5 - 1.0 for i in image] + + image = np.stack(image, axis=0) # to np + image = torch.from_numpy(image.transpose(0, 3, 1, 2)) + elif isinstance(image[0], np.ndarray): + image = np.stack(image, axis=0) # to np + if image.ndim == 5: + image = image[0] + + image = torch.from_numpy(image.transpose(0, 3, 1, 2)) + elif isinstance(image, list) and isinstance(image[0], torch.Tensor): + dims = image[0].ndim + + if dims == 3: + image = torch.stack(image, dim=0) + elif dims == 4: + image = torch.concat(image, dim=0) + else: + raise ValueError(f"Image must have 3 or 4 dimensions, instead got {dims}") + + image = image.to(device=device, dtype=self.unet.dtype) + + image = image.repeat_interleave(num_images_per_prompt, dim=0) + + return image + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + height: int = None, + width: int = None, + image: Union[PIL.Image.Image, np.ndarray, torch.Tensor] = None, + num_inference_steps: int = 50, + timesteps: List[int] = None, + guidance_scale: float = 4.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, + callback_steps: int = 1, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + noise_level: int = 250, + clean_caption: bool = True, + ): + """ + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + height (`int`, *optional*, defaults to None): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to None): + The width in pixels of the generated image. + image (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`): + The image to be upscaled. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + timesteps (`List[int]`, *optional*, defaults to None): + Custom timesteps to use for the denoising process. If not defined, equal spaced `num_inference_steps` + timesteps are used. Must be in descending order. + guidance_scale (`float`, *optional*, defaults to 4.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to + [`schedulers.DDIMScheduler`], will be ignored for others. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.IFPipelineOutput`] instead of a plain tuple. + callback (`Callable`, *optional*): + A function that will be called every `callback_steps` steps during inference. The function will be + called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function will be called. If not specified, the callback will be + called at every step. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + noise_level (`int`, *optional*, defaults to 250): + The amount of noise to add to the upscaled image. Must be in the range `[0, 1000)` + clean_caption (`bool`, *optional*, defaults to `True`): + Whether or not to clean the caption before creating embeddings. Requires `beautifulsoup4` and `ftfy` to + be installed. If the dependencies are not installed, the embeddings will be created from the raw + prompt. + + Examples: + + Returns: + [`~pipelines.stable_diffusion.IFPipelineOutput`] or `tuple`: + [`~pipelines.stable_diffusion.IFPipelineOutput`] if `return_dict` is True, otherwise a `tuple. When + returning a tuple, the first element is a list with the generated images, and the second element is a list + of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw) + or watermarked content, according to the `safety_checker`. + """ + # 1. Check inputs. Raise error if not correct + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + self.check_inputs( + prompt, + image, + batch_size, + noise_level, + callback_steps, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + ) + + # 2. Define call parameters + + height = height or self.unet.config.sample_size + width = width or self.unet.config.sample_size + + device = self._execution_device + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Encode input prompt + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + do_classifier_free_guidance, + num_images_per_prompt=num_images_per_prompt, + device=device, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + clean_caption=clean_caption, + ) + + if do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + # 4. Prepare timesteps + if timesteps is not None: + self.scheduler.set_timesteps(timesteps=timesteps, device=device) + timesteps = self.scheduler.timesteps + num_inference_steps = len(timesteps) + else: + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + if hasattr(self.scheduler, "set_begin_index"): + self.scheduler.set_begin_index(0) + + # 5. Prepare intermediate images + num_channels = self.unet.config.in_channels // 2 + intermediate_images = self.prepare_intermediate_images( + batch_size * num_images_per_prompt, + num_channels, + height, + width, + prompt_embeds.dtype, + device, + generator, + ) + + # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7. Prepare upscaled image and noise level + image = self.preprocess_image(image, num_images_per_prompt, device) + upscaled = F.interpolate(image, (height, width), mode="bilinear", align_corners=True) + + noise_level = torch.tensor([noise_level] * upscaled.shape[0], device=upscaled.device) + noise = randn_tensor(upscaled.shape, generator=generator, device=upscaled.device, dtype=upscaled.dtype) + upscaled = self.image_noising_scheduler.add_noise(upscaled, noise, timesteps=noise_level) + + if do_classifier_free_guidance: + noise_level = torch.cat([noise_level] * 2) + + # HACK: see comment in `enable_model_cpu_offload` + if hasattr(self, "text_encoder_offload_hook") and self.text_encoder_offload_hook is not None: + self.text_encoder_offload_hook.offload() + + # 8. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + model_input = torch.cat([intermediate_images, upscaled], dim=1) + + model_input = torch.cat([model_input] * 2) if do_classifier_free_guidance else model_input + model_input = self.scheduler.scale_model_input(model_input, t) + + # predict the noise residual + noise_pred = self.unet( + model_input, + t, + encoder_hidden_states=prompt_embeds, + class_labels=noise_level, + cross_attention_kwargs=cross_attention_kwargs, + return_dict=False, + )[0] + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred_uncond, _ = noise_pred_uncond.split(model_input.shape[1] // 2, dim=1) + noise_pred_text, predicted_variance = noise_pred_text.split(model_input.shape[1] // 2, dim=1) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + noise_pred = torch.cat([noise_pred, predicted_variance], dim=1) + + if self.scheduler.config.variance_type not in ["learned", "learned_range"]: + noise_pred, _ = noise_pred.split(intermediate_images.shape[1], dim=1) + + # compute the previous noisy sample x_t -> x_t-1 + intermediate_images = self.scheduler.step( + noise_pred, t, intermediate_images, **extra_step_kwargs, return_dict=False + )[0] + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + callback(i, t, intermediate_images) + + image = intermediate_images + + if output_type == "pil": + # 9. Post-processing + image = (image / 2 + 0.5).clamp(0, 1) + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + + # 10. Run safety checker + image, nsfw_detected, watermark_detected = self.run_safety_checker(image, device, prompt_embeds.dtype) + + # 11. Convert to PIL + image = self.numpy_to_pil(image) + + # 12. Apply watermark + if self.watermarker is not None: + self.watermarker.apply_watermark(image, self.unet.config.sample_size) + elif output_type == "pt": + nsfw_detected = None + watermark_detected = None + + if hasattr(self, "unet_offload_hook") and self.unet_offload_hook is not None: + self.unet_offload_hook.offload() + else: + # 9. Post-processing + image = (image / 2 + 0.5).clamp(0, 1) + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + + # 10. Run safety checker + image, nsfw_detected, watermark_detected = self.run_safety_checker(image, device, prompt_embeds.dtype) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image, nsfw_detected, watermark_detected) + + return IFPipelineOutput(images=image, nsfw_detected=nsfw_detected, watermark_detected=watermark_detected) diff --git a/diffusers3/pipelines/deepfloyd_if/pipeline_output.py b/diffusers3/pipelines/deepfloyd_if/pipeline_output.py new file mode 100644 index 0000000000000000000000000000000000000000..7f39ab5ba70ccbcaa1ca10438fe829d243277e06 --- /dev/null +++ b/diffusers3/pipelines/deepfloyd_if/pipeline_output.py @@ -0,0 +1,28 @@ +from dataclasses import dataclass +from typing import List, Optional, Union + +import numpy as np +import PIL.Image + +from ...utils import BaseOutput + + +@dataclass +class IFPipelineOutput(BaseOutput): + """ + Args: + Output class for Stable Diffusion pipelines. + images (`List[PIL.Image.Image]` or `np.ndarray`) + List of denoised PIL images of length `batch_size` or numpy array of shape `(batch_size, height, width, + num_channels)`. PIL images or numpy array present the denoised images of the diffusion pipeline. + nsfw_detected (`List[bool]`) + List of flags denoting whether the corresponding generated image likely represents "not-safe-for-work" + (nsfw) content or a watermark. `None` if safety checking could not be performed. + watermark_detected (`List[bool]`) + List of flags denoting whether the corresponding generated image likely has a watermark. `None` if safety + checking could not be performed. + """ + + images: Union[List[PIL.Image.Image], np.ndarray] + nsfw_detected: Optional[List[bool]] + watermark_detected: Optional[List[bool]] diff --git a/diffusers3/pipelines/deepfloyd_if/safety_checker.py b/diffusers3/pipelines/deepfloyd_if/safety_checker.py new file mode 100644 index 0000000000000000000000000000000000000000..8ffeed580bbea1514b11bf7a168a952328d8f424 --- /dev/null +++ b/diffusers3/pipelines/deepfloyd_if/safety_checker.py @@ -0,0 +1,59 @@ +import numpy as np +import torch +import torch.nn as nn +from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel + +from ...utils import logging + + +logger = logging.get_logger(__name__) + + +class IFSafetyChecker(PreTrainedModel): + config_class = CLIPConfig + + _no_split_modules = ["CLIPEncoderLayer"] + + def __init__(self, config: CLIPConfig): + super().__init__(config) + + self.vision_model = CLIPVisionModelWithProjection(config.vision_config) + + self.p_head = nn.Linear(config.vision_config.projection_dim, 1) + self.w_head = nn.Linear(config.vision_config.projection_dim, 1) + + @torch.no_grad() + def forward(self, clip_input, images, p_threshold=0.5, w_threshold=0.5): + image_embeds = self.vision_model(clip_input)[0] + + nsfw_detected = self.p_head(image_embeds) + nsfw_detected = nsfw_detected.flatten() + nsfw_detected = nsfw_detected > p_threshold + nsfw_detected = nsfw_detected.tolist() + + if any(nsfw_detected): + logger.warning( + "Potential NSFW content was detected in one or more images. A black image will be returned instead." + " Try again with a different prompt and/or seed." + ) + + for idx, nsfw_detected_ in enumerate(nsfw_detected): + if nsfw_detected_: + images[idx] = np.zeros(images[idx].shape) + + watermark_detected = self.w_head(image_embeds) + watermark_detected = watermark_detected.flatten() + watermark_detected = watermark_detected > w_threshold + watermark_detected = watermark_detected.tolist() + + if any(watermark_detected): + logger.warning( + "Potential watermarked content was detected in one or more images. A black image will be returned instead." + " Try again with a different prompt and/or seed." + ) + + for idx, watermark_detected_ in enumerate(watermark_detected): + if watermark_detected_: + images[idx] = np.zeros(images[idx].shape) + + return images, nsfw_detected, watermark_detected diff --git a/diffusers3/pipelines/deepfloyd_if/timesteps.py b/diffusers3/pipelines/deepfloyd_if/timesteps.py new file mode 100644 index 0000000000000000000000000000000000000000..d44285c017bbb2ccffa4ae86dd77792a048625d9 --- /dev/null +++ b/diffusers3/pipelines/deepfloyd_if/timesteps.py @@ -0,0 +1,579 @@ +fast27_timesteps = [ + 999, + 800, + 799, + 600, + 599, + 500, + 400, + 399, + 377, + 355, + 333, + 311, + 288, + 266, + 244, + 222, + 200, + 199, + 177, + 155, + 133, + 111, + 88, + 66, + 44, + 22, + 0, +] + +smart27_timesteps = [ + 999, + 976, + 952, + 928, + 905, + 882, + 858, + 857, + 810, + 762, + 715, + 714, + 572, + 429, + 428, + 286, + 285, + 238, + 190, + 143, + 142, + 118, + 95, + 71, + 47, + 24, + 0, +] + +smart50_timesteps = [ + 999, + 988, + 977, + 966, + 955, + 944, + 933, + 922, + 911, + 900, + 899, + 879, + 859, + 840, + 820, + 800, + 799, + 766, + 733, + 700, + 699, + 650, + 600, + 599, + 500, + 499, + 400, + 399, + 350, + 300, + 299, + 266, + 233, + 200, + 199, + 179, + 159, + 140, + 120, + 100, + 99, + 88, + 77, + 66, + 55, + 44, + 33, + 22, + 11, + 0, +] + +smart100_timesteps = [ + 999, + 995, + 992, + 989, + 985, + 981, + 978, + 975, + 971, + 967, + 964, + 961, + 957, + 956, + 951, + 947, + 942, + 937, + 933, + 928, + 923, + 919, + 914, + 913, + 908, + 903, + 897, + 892, + 887, + 881, + 876, + 871, + 870, + 864, + 858, + 852, + 846, + 840, + 834, + 828, + 827, + 820, + 813, + 806, + 799, + 792, + 785, + 784, + 777, + 770, + 763, + 756, + 749, + 742, + 741, + 733, + 724, + 716, + 707, + 699, + 698, + 688, + 677, + 666, + 656, + 655, + 645, + 634, + 623, + 613, + 612, + 598, + 584, + 570, + 569, + 555, + 541, + 527, + 526, + 505, + 484, + 483, + 462, + 440, + 439, + 396, + 395, + 352, + 351, + 308, + 307, + 264, + 263, + 220, + 219, + 176, + 132, + 88, + 44, + 0, +] + +smart185_timesteps = [ + 999, + 997, + 995, + 992, + 990, + 988, + 986, + 984, + 981, + 979, + 977, + 975, + 972, + 970, + 968, + 966, + 964, + 961, + 959, + 957, + 956, + 954, + 951, + 949, + 946, + 944, + 941, + 939, + 936, + 934, + 931, + 929, + 926, + 924, + 921, + 919, + 916, + 914, + 913, + 910, + 907, + 905, + 902, + 899, + 896, + 893, + 891, + 888, + 885, + 882, + 879, + 877, + 874, + 871, + 870, + 867, + 864, + 861, + 858, + 855, + 852, + 849, + 846, + 843, + 840, + 837, + 834, + 831, + 828, + 827, + 824, + 821, + 817, + 814, + 811, + 808, + 804, + 801, + 798, + 795, + 791, + 788, + 785, + 784, + 780, + 777, + 774, + 770, + 766, + 763, + 760, + 756, + 752, + 749, + 746, + 742, + 741, + 737, + 733, + 730, + 726, + 722, + 718, + 714, + 710, + 707, + 703, + 699, + 698, + 694, + 690, + 685, + 681, + 677, + 673, + 669, + 664, + 660, + 656, + 655, + 650, + 646, + 641, + 636, + 632, + 627, + 622, + 618, + 613, + 612, + 607, + 602, + 596, + 591, + 586, + 580, + 575, + 570, + 569, + 563, + 557, + 551, + 545, + 539, + 533, + 527, + 526, + 519, + 512, + 505, + 498, + 491, + 484, + 483, + 474, + 466, + 457, + 449, + 440, + 439, + 428, + 418, + 407, + 396, + 395, + 381, + 366, + 352, + 351, + 330, + 308, + 307, + 286, + 264, + 263, + 242, + 220, + 219, + 176, + 175, + 132, + 131, + 88, + 44, + 0, +] + +super27_timesteps = [ + 999, + 991, + 982, + 974, + 966, + 958, + 950, + 941, + 933, + 925, + 916, + 908, + 900, + 899, + 874, + 850, + 825, + 800, + 799, + 700, + 600, + 500, + 400, + 300, + 200, + 100, + 0, +] + +super40_timesteps = [ + 999, + 992, + 985, + 978, + 971, + 964, + 957, + 949, + 942, + 935, + 928, + 921, + 914, + 907, + 900, + 899, + 879, + 859, + 840, + 820, + 800, + 799, + 766, + 733, + 700, + 699, + 650, + 600, + 599, + 500, + 499, + 400, + 399, + 300, + 299, + 200, + 199, + 100, + 99, + 0, +] + +super100_timesteps = [ + 999, + 996, + 992, + 989, + 985, + 982, + 979, + 975, + 972, + 968, + 965, + 961, + 958, + 955, + 951, + 948, + 944, + 941, + 938, + 934, + 931, + 927, + 924, + 920, + 917, + 914, + 910, + 907, + 903, + 900, + 899, + 891, + 884, + 876, + 869, + 861, + 853, + 846, + 838, + 830, + 823, + 815, + 808, + 800, + 799, + 788, + 777, + 766, + 755, + 744, + 733, + 722, + 711, + 700, + 699, + 688, + 677, + 666, + 655, + 644, + 633, + 622, + 611, + 600, + 599, + 585, + 571, + 557, + 542, + 528, + 514, + 500, + 499, + 485, + 471, + 457, + 442, + 428, + 414, + 400, + 399, + 379, + 359, + 340, + 320, + 300, + 299, + 279, + 259, + 240, + 220, + 200, + 199, + 166, + 133, + 100, + 99, + 66, + 33, + 0, +] diff --git a/diffusers3/pipelines/deepfloyd_if/watermark.py b/diffusers3/pipelines/deepfloyd_if/watermark.py new file mode 100644 index 0000000000000000000000000000000000000000..e03e3fab026a5702eb9c45a4d19fd3d4a0d3da6d --- /dev/null +++ b/diffusers3/pipelines/deepfloyd_if/watermark.py @@ -0,0 +1,46 @@ +from typing import List + +import PIL.Image +import torch +from PIL import Image + +from ...configuration_utils import ConfigMixin +from ...models.modeling_utils import ModelMixin +from ...utils import PIL_INTERPOLATION + + +class IFWatermarker(ModelMixin, ConfigMixin): + def __init__(self): + super().__init__() + + self.register_buffer("watermark_image", torch.zeros((62, 62, 4))) + self.watermark_image_as_pil = None + + def apply_watermark(self, images: List[PIL.Image.Image], sample_size=None): + # Copied from https://github.com/deep-floyd/IF/blob/b77482e36ca2031cb94dbca1001fc1e6400bf4ab/deepfloyd_if/modules/base.py#L287 + + h = images[0].height + w = images[0].width + + sample_size = sample_size or h + + coef = min(h / sample_size, w / sample_size) + img_h, img_w = (int(h / coef), int(w / coef)) if coef < 1 else (h, w) + + S1, S2 = 1024**2, img_w * img_h + K = (S2 / S1) ** 0.5 + wm_size, wm_x, wm_y = int(K * 62), img_w - int(14 * K), img_h - int(14 * K) + + if self.watermark_image_as_pil is None: + watermark_image = self.watermark_image.to(torch.uint8).cpu().numpy() + watermark_image = Image.fromarray(watermark_image, mode="RGBA") + self.watermark_image_as_pil = watermark_image + + wm_img = self.watermark_image_as_pil.resize( + (wm_size, wm_size), PIL_INTERPOLATION["bicubic"], reducing_gap=None + ) + + for pil_img in images: + pil_img.paste(wm_img, box=(wm_x - wm_size, wm_y - wm_size, wm_x, wm_y), mask=wm_img.split()[-1]) + + return images diff --git a/diffusers3/pipelines/deprecated/README.md b/diffusers3/pipelines/deprecated/README.md new file mode 100644 index 0000000000000000000000000000000000000000..1e21dbbbd96ca532a4ba286a84f244f12f177fb5 --- /dev/null +++ b/diffusers3/pipelines/deprecated/README.md @@ -0,0 +1,3 @@ +# Deprecated Pipelines + +This folder contains pipelines that have very low usage as measured by model downloads, issues and PRs. While you can still use the pipelines just as before, we will stop testing the pipelines and will not accept any changes to existing files. \ No newline at end of file diff --git a/diffusers3/pipelines/deprecated/__init__.py b/diffusers3/pipelines/deprecated/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9936323170adbceac2c5c25e3881ea731d8602e1 --- /dev/null +++ b/diffusers3/pipelines/deprecated/__init__.py @@ -0,0 +1,153 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_librosa_available, + is_note_seq_available, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_pt_objects + + _dummy_objects.update(get_objects_from_module(dummy_pt_objects)) +else: + _import_structure["latent_diffusion_uncond"] = ["LDMPipeline"] + _import_structure["pndm"] = ["PNDMPipeline"] + _import_structure["repaint"] = ["RePaintPipeline"] + _import_structure["score_sde_ve"] = ["ScoreSdeVePipeline"] + _import_structure["stochastic_karras_ve"] = ["KarrasVePipeline"] + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["alt_diffusion"] = [ + "AltDiffusionImg2ImgPipeline", + "AltDiffusionPipeline", + "AltDiffusionPipelineOutput", + ] + _import_structure["versatile_diffusion"] = [ + "VersatileDiffusionDualGuidedPipeline", + "VersatileDiffusionImageVariationPipeline", + "VersatileDiffusionPipeline", + "VersatileDiffusionTextToImagePipeline", + ] + _import_structure["vq_diffusion"] = ["VQDiffusionPipeline"] + _import_structure["stable_diffusion_variants"] = [ + "CycleDiffusionPipeline", + "StableDiffusionInpaintPipelineLegacy", + "StableDiffusionPix2PixZeroPipeline", + "StableDiffusionParadigmsPipeline", + "StableDiffusionModelEditingPipeline", + ] + +try: + if not (is_torch_available() and is_librosa_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_librosa_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_librosa_objects)) + +else: + _import_structure["audio_diffusion"] = ["AudioDiffusionPipeline", "Mel"] + +try: + if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_transformers_and_torch_and_note_seq_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_transformers_and_torch_and_note_seq_objects)) + +else: + _import_structure["spectrogram_diffusion"] = ["MidiProcessor", "SpectrogramDiffusionPipeline"] + + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ...utils.dummy_pt_objects import * + + else: + from .latent_diffusion_uncond import LDMPipeline + from .pndm import PNDMPipeline + from .repaint import RePaintPipeline + from .score_sde_ve import ScoreSdeVePipeline + from .stochastic_karras_ve import KarrasVePipeline + + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * + + else: + from .alt_diffusion import AltDiffusionImg2ImgPipeline, AltDiffusionPipeline, AltDiffusionPipelineOutput + from .audio_diffusion import AudioDiffusionPipeline, Mel + from .spectrogram_diffusion import SpectrogramDiffusionPipeline + from .stable_diffusion_variants import ( + CycleDiffusionPipeline, + StableDiffusionInpaintPipelineLegacy, + StableDiffusionModelEditingPipeline, + StableDiffusionParadigmsPipeline, + StableDiffusionPix2PixZeroPipeline, + ) + from .stochastic_karras_ve import KarrasVePipeline + from .versatile_diffusion import ( + VersatileDiffusionDualGuidedPipeline, + VersatileDiffusionImageVariationPipeline, + VersatileDiffusionPipeline, + VersatileDiffusionTextToImagePipeline, + ) + from .vq_diffusion import VQDiffusionPipeline + + try: + if not (is_torch_available() and is_librosa_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_librosa_objects import * + else: + from .audio_diffusion import AudioDiffusionPipeline, Mel + + try: + if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 + else: + from .spectrogram_diffusion import ( + MidiProcessor, + SpectrogramDiffusionPipeline, + ) + + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/diffusers3/pipelines/deprecated/alt_diffusion/__init__.py b/diffusers3/pipelines/deprecated/alt_diffusion/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..71fa15b3feff08dc4008d1fa02ba61ad1300efed --- /dev/null +++ b/diffusers3/pipelines/deprecated/alt_diffusion/__init__.py @@ -0,0 +1,53 @@ +from typing import TYPE_CHECKING + +from ....utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ....utils import dummy_torch_and_transformers_objects + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["modeling_roberta_series"] = ["RobertaSeriesModelWithTransformation"] + _import_structure["pipeline_alt_diffusion"] = ["AltDiffusionPipeline"] + _import_structure["pipeline_alt_diffusion_img2img"] = ["AltDiffusionImg2ImgPipeline"] + + _import_structure["pipeline_output"] = ["AltDiffusionPipelineOutput"] + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ....utils.dummy_torch_and_transformers_objects import * + + else: + from .modeling_roberta_series import RobertaSeriesModelWithTransformation + from .pipeline_alt_diffusion import AltDiffusionPipeline + from .pipeline_alt_diffusion_img2img import AltDiffusionImg2ImgPipeline + from .pipeline_output import AltDiffusionPipelineOutput + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/diffusers3/pipelines/deprecated/alt_diffusion/modeling_roberta_series.py b/diffusers3/pipelines/deprecated/alt_diffusion/modeling_roberta_series.py new file mode 100644 index 0000000000000000000000000000000000000000..f69f905b56c528a292340fcf33ee3b016096d1f4 --- /dev/null +++ b/diffusers3/pipelines/deprecated/alt_diffusion/modeling_roberta_series.py @@ -0,0 +1,124 @@ +from dataclasses import dataclass +from typing import Optional, Tuple + +import torch +from torch import nn +from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel +from transformers.utils import ModelOutput + + +@dataclass +class TransformationModelOutput(ModelOutput): + """ + Base class for text model's outputs that also contains a pooling of the last hidden states. + + Args: + text_embeds (`torch.Tensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`): + The text embeddings obtained by applying the projection layer to the pooler_output. + last_hidden_state (`torch.Tensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the model. + hidden_states (`tuple(torch.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.Tensor` (one for the output of the embeddings, if the model has an embedding layer, + one + for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + projection_state: Optional[torch.Tensor] = None + last_hidden_state: torch.Tensor = None + hidden_states: Optional[Tuple[torch.Tensor]] = None + attentions: Optional[Tuple[torch.Tensor]] = None + + +class RobertaSeriesConfig(XLMRobertaConfig): + def __init__( + self, + pad_token_id=1, + bos_token_id=0, + eos_token_id=2, + project_dim=512, + pooler_fn="cls", + learn_encoder=False, + use_attention_mask=True, + **kwargs, + ): + super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) + self.project_dim = project_dim + self.pooler_fn = pooler_fn + self.learn_encoder = learn_encoder + self.use_attention_mask = use_attention_mask + + +class RobertaSeriesModelWithTransformation(RobertaPreTrainedModel): + _keys_to_ignore_on_load_unexpected = [r"pooler", r"logit_scale"] + _keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"] + base_model_prefix = "roberta" + config_class = RobertaSeriesConfig + + def __init__(self, config): + super().__init__(config) + self.roberta = XLMRobertaModel(config) + self.transformation = nn.Linear(config.hidden_size, config.project_dim) + self.has_pre_transformation = getattr(config, "has_pre_transformation", False) + if self.has_pre_transformation: + self.transformation_pre = nn.Linear(config.hidden_size, config.project_dim) + self.pre_LN = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.post_init() + + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + encoder_attention_mask: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + return_dict: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + ): + r""" """ + + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.base_model( + input_ids=input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + output_attentions=output_attentions, + output_hidden_states=True if self.has_pre_transformation else output_hidden_states, + return_dict=return_dict, + ) + + if self.has_pre_transformation: + sequence_output2 = outputs["hidden_states"][-2] + sequence_output2 = self.pre_LN(sequence_output2) + projection_state2 = self.transformation_pre(sequence_output2) + + return TransformationModelOutput( + projection_state=projection_state2, + last_hidden_state=outputs.last_hidden_state, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + else: + projection_state = self.transformation(outputs.last_hidden_state) + return TransformationModelOutput( + projection_state=projection_state, + last_hidden_state=outputs.last_hidden_state, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) diff --git a/diffusers3/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py b/diffusers3/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py new file mode 100644 index 0000000000000000000000000000000000000000..d6730ee610c9b52812797b3097209efae77dc3f8 --- /dev/null +++ b/diffusers3/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion.py @@ -0,0 +1,974 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Any, Callable, Dict, List, Optional, Union + +import torch +from packaging import version +from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection, XLMRobertaTokenizer + +from ....configuration_utils import FrozenDict +from ....image_processor import PipelineImageInput, VaeImageProcessor +from ....loaders import ( + FromSingleFileMixin, + IPAdapterMixin, + StableDiffusionLoraLoaderMixin, + TextualInversionLoaderMixin, +) +from ....models import AutoencoderKL, ImageProjection, UNet2DConditionModel +from ....models.lora import adjust_lora_scale_text_encoder +from ....schedulers import KarrasDiffusionSchedulers +from ....utils import ( + USE_PEFT_BACKEND, + deprecate, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ....utils.torch_utils import randn_tensor +from ...pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from ...stable_diffusion.safety_checker import StableDiffusionSafetyChecker +from .modeling_roberta_series import RobertaSeriesModelWithTransformation +from .pipeline_output import AltDiffusionPipelineOutput + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import AltDiffusionPipeline + + >>> pipe = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion-m9", torch_dtype=torch.float16) + >>> pipe = pipe.to("cuda") + + >>> # "dark elf princess, highly detailed, d & d, fantasy, highly detailed, digital painting, trending on artstation, concept art, sharp focus, illustration, art by artgerm and greg rutkowski and fuji choko and viktoria gavrilenko and hoang lap" + >>> prompt = "้ป‘ๆš—็ฒพ็ตๅ…ฌไธป๏ผŒ้žๅธธ่ฏฆ็ป†๏ผŒๅนปๆƒณ๏ผŒ้žๅธธ่ฏฆ็ป†๏ผŒๆ•ฐๅญ—็ป˜็”ป๏ผŒๆฆ‚ๅฟต่‰บๆœฏ๏ผŒๆ•้”็š„็„ฆ็‚น๏ผŒๆ’ๅ›พ" + >>> image = pipe(prompt).images[0] + ``` +""" + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg +def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): + """ + Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4 + """ + std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) + std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) + # rescale the results from guidance (fixes overexposure) + noise_pred_rescaled = noise_cfg * (std_text / std_cfg) + # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images + noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg + return noise_cfg + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + sigmas: Optional[List[float]] = None, + **kwargs, +): + """ + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles + custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` + must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, + `num_inference_steps` and `sigmas` must be `None`. + sigmas (`List[float]`, *optional*): + Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, + `num_inference_steps` and `timesteps` must be `None`. + + Returns: + `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the + second element is the number of inference steps. + """ + if timesteps is not None and sigmas is not None: + raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") + if timesteps is not None: + accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + elif sigmas is not None: + accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accept_sigmas: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" sigmas schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + + +class AltDiffusionPipeline( + DiffusionPipeline, + StableDiffusionMixin, + TextualInversionLoaderMixin, + StableDiffusionLoraLoaderMixin, + IPAdapterMixin, + FromSingleFileMixin, +): + r""" + Pipeline for text-to-image generation using Alt Diffusion. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for saving LoRA weights + - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files + - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.RobertaSeriesModelWithTransformation`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer ([`~transformers.XLMRobertaTokenizer`]): + A `XLMRobertaTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details + about a model's potential harms. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. + """ + + model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae" + _optional_components = ["safety_checker", "feature_extractor", "image_encoder"] + _exclude_from_cpu_offload = ["safety_checker"] + _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: RobertaSeriesModelWithTransformation, + tokenizer: XLMRobertaTokenizer, + unet: UNet2DConditionModel, + scheduler: KarrasDiffusionSchedulers, + safety_checker: StableDiffusionSafetyChecker, + feature_extractor: CLIPImageProcessor, + image_encoder: CLIPVisionModelWithProjection = None, + requires_safety_checker: bool = True, + ): + super().__init__() + + if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: + deprecation_message = ( + f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" + f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " + "to update the config accordingly as leaving `steps_offset` might led to incorrect results" + " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," + " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" + " file" + ) + deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(scheduler.config) + new_config["steps_offset"] = 1 + scheduler._internal_dict = FrozenDict(new_config) + + if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True: + deprecation_message = ( + f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." + " `clip_sample` should be set to False in the configuration file. Please make sure to update the" + " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" + " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" + " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" + ) + deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(scheduler.config) + new_config["clip_sample"] = False + scheduler._internal_dict = FrozenDict(new_config) + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Alt Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( + version.parse(unet.config._diffusers_version).base_version + ) < version.parse("0.9.0.dev0") + is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 + if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: + deprecation_message = ( + "The configuration file of the unet has set the default `sample_size` to smaller than" + " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the" + " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" + " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" + " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" + " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" + " in the config might lead to incorrect results in future versions. If you have downloaded this" + " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" + " the `unet/config.json` file" + ) + deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(unet.config) + new_config["sample_size"] = 64 + unet._internal_dict = FrozenDict(new_config) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + image_encoder=image_encoder, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + def _encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + **kwargs, + ): + deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." + deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) + + prompt_embeds_tuple = self.encode_prompt( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=lora_scale, + **kwargs, + ) + + # concatenate for backwards comp + prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) + + return prompt_embeds + + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return prompt_embeds, negative_prompt_embeds + + def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + if output_hidden_states: + image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] + image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_enc_hidden_states = self.image_encoder( + torch.zeros_like(image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( + num_images_per_prompt, dim=0 + ) + return image_enc_hidden_states, uncond_image_enc_hidden_states + else: + image_embeds = self.image_encoder(image).image_embeds + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_embeds = torch.zeros_like(image_embeds) + + return image_embeds, uncond_image_embeds + + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is None: + has_nsfw_concept = None + else: + if torch.is_tensor(image): + feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") + else: + feature_extractor_input = self.image_processor.numpy_to_pil(image) + safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) + image, has_nsfw_concept = self.safety_checker( + images=image, clip_input=safety_checker_input.pixel_values.to(dtype) + ) + return image, has_nsfw_concept + + def decode_latents(self, latents): + deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" + deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) + + latents = 1 / self.vae.config.scaling_factor * latents + image = self.vae.decode(latents, return_dict=False)[0] + image = (image / 2 + 0.5).clamp(0, 1) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + return image + + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + height, + width, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + callback_on_step_end_tensor_inputs=None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = ( + batch_size, + num_channels_latents, + int(height) // self.vae_scale_factor, + int(width) // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32): + """ + See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 + + Args: + timesteps (`torch.Tensor`): + generate embedding vectors at these timesteps + embedding_dim (`int`, *optional*, defaults to 512): + dimension of the embeddings to generate + dtype: + data type of the generated embeddings + + Returns: + `torch.Tensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)` + """ + assert len(w.shape) == 1 + w = w * 1000.0 + + half_dim = embedding_dim // 2 + emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) + emb = w.to(dtype)[:, None] * emb[None, :] + emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) + if embedding_dim % 2 == 1: # zero pad + emb = torch.nn.functional.pad(emb, (0, 1)) + assert emb.shape == (w.shape[0], embedding_dim) + return emb + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def guidance_rescale(self): + return self._guidance_rescale + + @property + def clip_skip(self): + return self._clip_skip + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None + + @property + def cross_attention_kwargs(self): + return self._cross_attention_kwargs + + @property + def num_timesteps(self): + return self._num_timesteps + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + timesteps: List[int] = None, + sigmas: List[float] = None, + guidance_scale: float = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + guidance_rescale: float = 0.0, + clip_skip: Optional[int] = None, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + **kwargs, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument + in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is + passed will be used. Must be in descending order. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.AltDiffusionPipelineOutput`] instead of a + plain tuple. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + guidance_rescale (`float`, *optional*, defaults to 0.0): + Guidance rescale factor from [Common Diffusion Noise Schedules and Sample Steps are + Flawed](https://arxiv.org/pdf/2305.08891.pdf). Guidance rescale factor should fix overexposure when + using zero terminal SNR. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + + Examples: + + Returns: + [`~pipelines.stable_diffusion.AltDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.AltDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images and the + second element is a list of `bool`s indicating whether the corresponding generated image contains + "not-safe-for-work" (nsfw) content. + """ + + callback = kwargs.pop("callback", None) + callback_steps = kwargs.pop("callback_steps", None) + + if callback is not None: + deprecate( + "callback", + "1.0.0", + "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", + ) + if callback_steps is not None: + deprecate( + "callback_steps", + "1.0.0", + "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", + ) + + # 0. Default height and width to unet + height = height or self.unet.config.sample_size * self.vae_scale_factor + width = width or self.unet.config.sample_size * self.vae_scale_factor + # to deal with lora scaling and other possible forward hooks + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + height, + width, + callback_steps, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + callback_on_step_end_tensor_inputs, + ) + + self._guidance_scale = guidance_scale + self._guidance_rescale = guidance_rescale + self._clip_skip = clip_skip + self._cross_attention_kwargs = cross_attention_kwargs + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + # 3. Encode input prompt + lora_scale = ( + self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None + ) + + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + device, + num_images_per_prompt, + self.do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=lora_scale, + clip_skip=self.clip_skip, + ) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + if self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + if ip_adapter_image is not None: + output_hidden_state = False if isinstance(self.unet.encoder_hid_proj, ImageProjection) else True + image_embeds, negative_image_embeds = self.encode_image( + ip_adapter_image, device, num_images_per_prompt, output_hidden_state + ) + if self.do_classifier_free_guidance: + image_embeds = torch.cat([negative_image_embeds, image_embeds]) + + # 4. Prepare timesteps + timesteps, num_inference_steps = retrieve_timesteps( + self.scheduler, num_inference_steps, device, timesteps, sigmas + ) + + # 5. Prepare latent variables + num_channels_latents = self.unet.config.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 6.1 Add image embeds for IP-Adapter + added_cond_kwargs = {"image_embeds": image_embeds} if ip_adapter_image is not None else None + + # 6.2 Optionally get Guidance Scale Embedding + timestep_cond = None + if self.unet.config.time_cond_proj_dim is not None: + guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) + timestep_cond = self.get_guidance_scale_embedding( + guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim + ).to(device=device, dtype=latents.dtype) + + # 7. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + self._num_timesteps = len(timesteps) + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + timestep_cond=timestep_cond, + cross_attention_kwargs=self.cross_attention_kwargs, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) + + if self.do_classifier_free_guidance and self.guidance_rescale > 0.0: + # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf + noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[ + 0 + ] + image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) + else: + image = latents + has_nsfw_concept = None + + if has_nsfw_concept is None: + do_denormalize = [True] * image.shape[0] + else: + do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] + + image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image, has_nsfw_concept) + + return AltDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) diff --git a/diffusers3/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion_img2img.py b/diffusers3/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion_img2img.py new file mode 100644 index 0000000000000000000000000000000000000000..6fbf5ccb274d92d5b70dc2e892a08440ab361c0d --- /dev/null +++ b/diffusers3/pipelines/deprecated/alt_diffusion/pipeline_alt_diffusion_img2img.py @@ -0,0 +1,1041 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Any, Callable, Dict, List, Optional, Union + +import numpy as np +import PIL.Image +import torch +from packaging import version +from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection, XLMRobertaTokenizer + +from ....configuration_utils import FrozenDict +from ....image_processor import PipelineImageInput, VaeImageProcessor +from ....loaders import ( + FromSingleFileMixin, + IPAdapterMixin, + StableDiffusionLoraLoaderMixin, + TextualInversionLoaderMixin, +) +from ....models import AutoencoderKL, ImageProjection, UNet2DConditionModel +from ....models.lora import adjust_lora_scale_text_encoder +from ....schedulers import KarrasDiffusionSchedulers +from ....utils import ( + PIL_INTERPOLATION, + USE_PEFT_BACKEND, + deprecate, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ....utils.torch_utils import randn_tensor +from ...pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from ...stable_diffusion.safety_checker import StableDiffusionSafetyChecker +from .modeling_roberta_series import RobertaSeriesModelWithTransformation +from .pipeline_output import AltDiffusionPipelineOutput + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import requests + >>> import torch + >>> from PIL import Image + >>> from io import BytesIO + + >>> from diffusers import AltDiffusionImg2ImgPipeline + + >>> device = "cuda" + >>> model_id_or_path = "BAAI/AltDiffusion-m9" + >>> pipe = AltDiffusionImg2ImgPipeline.from_pretrained(model_id_or_path, torch_dtype=torch.float16) + >>> pipe = pipe.to(device) + + >>> url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg" + + >>> response = requests.get(url) + >>> init_image = Image.open(BytesIO(response.content)).convert("RGB") + >>> init_image = init_image.resize((768, 512)) + + >>> # "A fantasy landscape, trending on artstation" + >>> prompt = "ๅนปๆƒณ้ฃŽๆ™ฏ, artstation" + + >>> images = pipe(prompt=prompt, image=init_image, strength=0.75, guidance_scale=7.5).images + >>> images[0].save("ๅนปๆƒณ้ฃŽๆ™ฏ.png") + ``` +""" + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents +def retrieve_latents( + encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" +): + if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": + return encoder_output.latent_dist.sample(generator) + elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": + return encoder_output.latent_dist.mode() + elif hasattr(encoder_output, "latents"): + return encoder_output.latents + else: + raise AttributeError("Could not access latents of provided encoder_output") + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.preprocess +def preprocess(image): + deprecation_message = "The preprocess method is deprecated and will be removed in diffusers 1.0.0. Please use VaeImageProcessor.preprocess(...) instead" + deprecate("preprocess", "1.0.0", deprecation_message, standard_warn=False) + if isinstance(image, torch.Tensor): + return image + elif isinstance(image, PIL.Image.Image): + image = [image] + + if isinstance(image[0], PIL.Image.Image): + w, h = image[0].size + w, h = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8 + + image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image] + image = np.concatenate(image, axis=0) + image = np.array(image).astype(np.float32) / 255.0 + image = image.transpose(0, 3, 1, 2) + image = 2.0 * image - 1.0 + image = torch.from_numpy(image) + elif isinstance(image[0], torch.Tensor): + image = torch.cat(image, dim=0) + return image + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + sigmas: Optional[List[float]] = None, + **kwargs, +): + """ + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles + custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` + must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, + `num_inference_steps` and `sigmas` must be `None`. + sigmas (`List[float]`, *optional*): + Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, + `num_inference_steps` and `timesteps` must be `None`. + + Returns: + `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the + second element is the number of inference steps. + """ + if timesteps is not None and sigmas is not None: + raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") + if timesteps is not None: + accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + elif sigmas is not None: + accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accept_sigmas: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" sigmas schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + + +class AltDiffusionImg2ImgPipeline( + DiffusionPipeline, + StableDiffusionMixin, + TextualInversionLoaderMixin, + IPAdapterMixin, + StableDiffusionLoraLoaderMixin, + FromSingleFileMixin, +): + r""" + Pipeline for text-guided image-to-image generation using Alt Diffusion. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for saving LoRA weights + - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files + - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.RobertaSeriesModelWithTransformation`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer ([`~transformers.XLMRobertaTokenizer`]): + A `XLMRobertaTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details + about a model's potential harms. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. + """ + + model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae" + _optional_components = ["safety_checker", "feature_extractor", "image_encoder"] + _exclude_from_cpu_offload = ["safety_checker"] + _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: RobertaSeriesModelWithTransformation, + tokenizer: XLMRobertaTokenizer, + unet: UNet2DConditionModel, + scheduler: KarrasDiffusionSchedulers, + safety_checker: StableDiffusionSafetyChecker, + feature_extractor: CLIPImageProcessor, + image_encoder: CLIPVisionModelWithProjection = None, + requires_safety_checker: bool = True, + ): + super().__init__() + + if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: + deprecation_message = ( + f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" + f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " + "to update the config accordingly as leaving `steps_offset` might led to incorrect results" + " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," + " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" + " file" + ) + deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(scheduler.config) + new_config["steps_offset"] = 1 + scheduler._internal_dict = FrozenDict(new_config) + + if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True: + deprecation_message = ( + f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." + " `clip_sample` should be set to False in the configuration file. Please make sure to update the" + " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" + " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" + " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" + ) + deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(scheduler.config) + new_config["clip_sample"] = False + scheduler._internal_dict = FrozenDict(new_config) + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Alt Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( + version.parse(unet.config._diffusers_version).base_version + ) < version.parse("0.9.0.dev0") + is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 + if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: + deprecation_message = ( + "The configuration file of the unet has set the default `sample_size` to smaller than" + " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the" + " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" + " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" + " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" + " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" + " in the config might lead to incorrect results in future versions. If you have downloaded this" + " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" + " the `unet/config.json` file" + ) + deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(unet.config) + new_config["sample_size"] = 64 + unet._internal_dict = FrozenDict(new_config) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + image_encoder=image_encoder, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + def _encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + **kwargs, + ): + deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." + deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) + + prompt_embeds_tuple = self.encode_prompt( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=lora_scale, + **kwargs, + ) + + # concatenate for backwards comp + prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) + + return prompt_embeds + + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return prompt_embeds, negative_prompt_embeds + + def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + if output_hidden_states: + image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] + image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_enc_hidden_states = self.image_encoder( + torch.zeros_like(image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( + num_images_per_prompt, dim=0 + ) + return image_enc_hidden_states, uncond_image_enc_hidden_states + else: + image_embeds = self.image_encoder(image).image_embeds + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_embeds = torch.zeros_like(image_embeds) + + return image_embeds, uncond_image_embeds + + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is None: + has_nsfw_concept = None + else: + if torch.is_tensor(image): + feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") + else: + feature_extractor_input = self.image_processor.numpy_to_pil(image) + safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) + image, has_nsfw_concept = self.safety_checker( + images=image, clip_input=safety_checker_input.pixel_values.to(dtype) + ) + return image, has_nsfw_concept + + def decode_latents(self, latents): + deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" + deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) + + latents = 1 / self.vae.config.scaling_factor * latents + image = self.vae.decode(latents, return_dict=False)[0] + image = (image / 2 + 0.5).clamp(0, 1) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + return image + + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + strength, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + callback_on_step_end_tensor_inputs=None, + ): + if strength < 0 or strength > 1: + raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}") + + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + def get_timesteps(self, num_inference_steps, strength, device): + # get the original timestep using init_timestep + init_timestep = min(int(num_inference_steps * strength), num_inference_steps) + + t_start = max(num_inference_steps - init_timestep, 0) + timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] + + return timesteps, num_inference_steps - t_start + + def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None): + if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): + raise ValueError( + f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" + ) + + image = image.to(device=device, dtype=dtype) + + batch_size = batch_size * num_images_per_prompt + + if image.shape[1] == 4: + init_latents = image + + else: + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + elif isinstance(generator, list): + init_latents = [ + retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i]) + for i in range(batch_size) + ] + init_latents = torch.cat(init_latents, dim=0) + else: + init_latents = retrieve_latents(self.vae.encode(image), generator=generator) + + init_latents = self.vae.config.scaling_factor * init_latents + + if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0: + # expand init_latents for batch_size + deprecation_message = ( + f"You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial" + " images (`image`). Initial images are now duplicating to match the number of text prompts. Note" + " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update" + " your script to pass as many initial images as text prompts to suppress this warning." + ) + deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False) + additional_image_per_prompt = batch_size // init_latents.shape[0] + init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0) + elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0: + raise ValueError( + f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts." + ) + else: + init_latents = torch.cat([init_latents], dim=0) + + shape = init_latents.shape + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + + # get latents + init_latents = self.scheduler.add_noise(init_latents, noise, timestep) + latents = init_latents + + return latents + + def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32): + """ + See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 + + Args: + timesteps (`torch.Tensor`): + generate embedding vectors at these timesteps + embedding_dim (`int`, *optional*, defaults to 512): + dimension of the embeddings to generate + dtype: + data type of the generated embeddings + + Returns: + `torch.Tensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)` + """ + assert len(w.shape) == 1 + w = w * 1000.0 + + half_dim = embedding_dim // 2 + emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) + emb = w.to(dtype)[:, None] * emb[None, :] + emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) + if embedding_dim % 2 == 1: # zero pad + emb = torch.nn.functional.pad(emb, (0, 1)) + assert emb.shape == (w.shape[0], embedding_dim) + return emb + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def clip_skip(self): + return self._clip_skip + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None + + @property + def cross_attention_kwargs(self): + return self._cross_attention_kwargs + + @property + def num_timesteps(self): + return self._num_timesteps + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + image: PipelineImageInput = None, + strength: float = 0.8, + num_inference_steps: Optional[int] = 50, + timesteps: List[int] = None, + sigmas: List[float] = None, + guidance_scale: Optional[float] = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: Optional[float] = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + clip_skip: int = None, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + **kwargs, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): + `Image`, numpy array or tensor representing an image batch to be used as the starting point. For both + numpy array and pytorch tensor, the expected value range is between `[0, 1]` If it's a tensor or a list + or tensors, the expected shape should be `(B, C, H, W)` or `(C, H, W)`. If it is a numpy array or a + list of arrays, the expected shape should be `(B, H, W, C)` or `(H, W, C)` It can also accept image + latents as `image`, but if passing latents directly it is not encoded again. + strength (`float`, *optional*, defaults to 0.8): + Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a + starting point and more noise is added the higher the `strength`. The number of denoising steps depends + on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising + process runs for the full number of iterations specified in `num_inference_steps`. A value of 1 + essentially ignores `image`. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. This parameter is modulated by `strength`. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument + in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is + passed will be used. Must be in descending order. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.AltDiffusionPipelineOutput`] instead of a + plain tuple. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + Examples: + + Returns: + [`~pipelines.stable_diffusion.AltDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.AltDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images and the + second element is a list of `bool`s indicating whether the corresponding generated image contains + "not-safe-for-work" (nsfw) content. + """ + + callback = kwargs.pop("callback", None) + callback_steps = kwargs.pop("callback_steps", None) + + if callback is not None: + deprecate( + "callback", + "1.0.0", + "Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", + ) + if callback_steps is not None: + deprecate( + "callback_steps", + "1.0.0", + "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", + ) + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + strength, + callback_steps, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + callback_on_step_end_tensor_inputs, + ) + + self._guidance_scale = guidance_scale + self._clip_skip = clip_skip + self._cross_attention_kwargs = cross_attention_kwargs + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + # 3. Encode input prompt + text_encoder_lora_scale = ( + self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None + ) + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + device, + num_images_per_prompt, + self.do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=text_encoder_lora_scale, + clip_skip=self.clip_skip, + ) + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + if self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + if ip_adapter_image is not None: + output_hidden_state = False if isinstance(self.unet.encoder_hid_proj, ImageProjection) else True + image_embeds, negative_image_embeds = self.encode_image( + ip_adapter_image, device, num_images_per_prompt, output_hidden_state + ) + if self.do_classifier_free_guidance: + image_embeds = torch.cat([negative_image_embeds, image_embeds]) + + # 4. Preprocess image + image = self.image_processor.preprocess(image) + + # 5. set timesteps + timesteps, num_inference_steps = retrieve_timesteps( + self.scheduler, num_inference_steps, device, timesteps, sigmas + ) + timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device) + latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) + + # 6. Prepare latent variables + latents = self.prepare_latents( + image, + latent_timestep, + batch_size, + num_images_per_prompt, + prompt_embeds.dtype, + device, + generator, + ) + + # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7.1 Add image embeds for IP-Adapter + added_cond_kwargs = {"image_embeds": image_embeds} if ip_adapter_image is not None else None + + # 7.2 Optionally get Guidance Scale Embedding + timestep_cond = None + if self.unet.config.time_cond_proj_dim is not None: + guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) + timestep_cond = self.get_guidance_scale_embedding( + guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim + ).to(device=device, dtype=latents.dtype) + + # 8. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + self._num_timesteps = len(timesteps) + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + timestep_cond=timestep_cond, + cross_attention_kwargs=self.cross_attention_kwargs, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[ + 0 + ] + image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) + else: + image = latents + has_nsfw_concept = None + + if has_nsfw_concept is None: + do_denormalize = [True] * image.shape[0] + else: + do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] + + image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image, has_nsfw_concept) + + return AltDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) diff --git a/diffusers3/pipelines/deprecated/alt_diffusion/pipeline_output.py b/diffusers3/pipelines/deprecated/alt_diffusion/pipeline_output.py new file mode 100644 index 0000000000000000000000000000000000000000..dd174ae3c21fe4110babd503f0418366472059ff --- /dev/null +++ b/diffusers3/pipelines/deprecated/alt_diffusion/pipeline_output.py @@ -0,0 +1,28 @@ +from dataclasses import dataclass +from typing import List, Optional, Union + +import numpy as np +import PIL.Image + +from ....utils import ( + BaseOutput, +) + + +@dataclass +# Copied from diffusers.pipelines.stable_diffusion.pipeline_output.StableDiffusionPipelineOutput with Stable->Alt +class AltDiffusionPipelineOutput(BaseOutput): + """ + Output class for Alt Diffusion pipelines. + + Args: + images (`List[PIL.Image.Image]` or `np.ndarray`) + List of denoised PIL images of length `batch_size` or NumPy array of shape `(batch_size, height, width, + num_channels)`. + nsfw_content_detected (`List[bool]`) + List indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content or + `None` if safety checking could not be performed. + """ + + images: Union[List[PIL.Image.Image], np.ndarray] + nsfw_content_detected: Optional[List[bool]] diff --git a/diffusers3/pipelines/deprecated/audio_diffusion/__init__.py b/diffusers3/pipelines/deprecated/audio_diffusion/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3127951863a7db3f9dd8e42ac5ab64fa9ac3ec0c --- /dev/null +++ b/diffusers3/pipelines/deprecated/audio_diffusion/__init__.py @@ -0,0 +1,23 @@ +from typing import TYPE_CHECKING + +from ....utils import DIFFUSERS_SLOW_IMPORT, _LazyModule + + +_import_structure = { + "mel": ["Mel"], + "pipeline_audio_diffusion": ["AudioDiffusionPipeline"], +} + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + from .mel import Mel + from .pipeline_audio_diffusion import AudioDiffusionPipeline + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) diff --git a/diffusers3/pipelines/deprecated/audio_diffusion/mel.py b/diffusers3/pipelines/deprecated/audio_diffusion/mel.py new file mode 100644 index 0000000000000000000000000000000000000000..3426c3ad0428efc69f2f8dee7a620df3c78b1405 --- /dev/null +++ b/diffusers3/pipelines/deprecated/audio_diffusion/mel.py @@ -0,0 +1,179 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import numpy as np # noqa: E402 + +from ....configuration_utils import ConfigMixin, register_to_config +from ....schedulers.scheduling_utils import SchedulerMixin + + +try: + import librosa # noqa: E402 + + _librosa_can_be_imported = True + _import_error = "" +except Exception as e: + _librosa_can_be_imported = False + _import_error = ( + f"Cannot import librosa because {e}. Make sure to correctly install librosa to be able to install it." + ) + + +from PIL import Image # noqa: E402 + + +class Mel(ConfigMixin, SchedulerMixin): + """ + Parameters: + x_res (`int`): + x resolution of spectrogram (time). + y_res (`int`): + y resolution of spectrogram (frequency bins). + sample_rate (`int`): + Sample rate of audio. + n_fft (`int`): + Number of Fast Fourier Transforms. + hop_length (`int`): + Hop length (a higher number is recommended if `y_res` < 256). + top_db (`int`): + Loudest decibel value. + n_iter (`int`): + Number of iterations for Griffin-Lim Mel inversion. + """ + + config_name = "mel_config.json" + + @register_to_config + def __init__( + self, + x_res: int = 256, + y_res: int = 256, + sample_rate: int = 22050, + n_fft: int = 2048, + hop_length: int = 512, + top_db: int = 80, + n_iter: int = 32, + ): + self.hop_length = hop_length + self.sr = sample_rate + self.n_fft = n_fft + self.top_db = top_db + self.n_iter = n_iter + self.set_resolution(x_res, y_res) + self.audio = None + + if not _librosa_can_be_imported: + raise ValueError(_import_error) + + def set_resolution(self, x_res: int, y_res: int): + """Set resolution. + + Args: + x_res (`int`): + x resolution of spectrogram (time). + y_res (`int`): + y resolution of spectrogram (frequency bins). + """ + self.x_res = x_res + self.y_res = y_res + self.n_mels = self.y_res + self.slice_size = self.x_res * self.hop_length - 1 + + def load_audio(self, audio_file: str = None, raw_audio: np.ndarray = None): + """Load audio. + + Args: + audio_file (`str`): + An audio file that must be on disk due to [Librosa](https://librosa.org/) limitation. + raw_audio (`np.ndarray`): + The raw audio file as a NumPy array. + """ + if audio_file is not None: + self.audio, _ = librosa.load(audio_file, mono=True, sr=self.sr) + else: + self.audio = raw_audio + + # Pad with silence if necessary. + if len(self.audio) < self.x_res * self.hop_length: + self.audio = np.concatenate([self.audio, np.zeros((self.x_res * self.hop_length - len(self.audio),))]) + + def get_number_of_slices(self) -> int: + """Get number of slices in audio. + + Returns: + `int`: + Number of spectograms audio can be sliced into. + """ + return len(self.audio) // self.slice_size + + def get_audio_slice(self, slice: int = 0) -> np.ndarray: + """Get slice of audio. + + Args: + slice (`int`): + Slice number of audio (out of `get_number_of_slices()`). + + Returns: + `np.ndarray`: + The audio slice as a NumPy array. + """ + return self.audio[self.slice_size * slice : self.slice_size * (slice + 1)] + + def get_sample_rate(self) -> int: + """Get sample rate. + + Returns: + `int`: + Sample rate of audio. + """ + return self.sr + + def audio_slice_to_image(self, slice: int) -> Image.Image: + """Convert slice of audio to spectrogram. + + Args: + slice (`int`): + Slice number of audio to convert (out of `get_number_of_slices()`). + + Returns: + `PIL Image`: + A grayscale image of `x_res x y_res`. + """ + S = librosa.feature.melspectrogram( + y=self.get_audio_slice(slice), sr=self.sr, n_fft=self.n_fft, hop_length=self.hop_length, n_mels=self.n_mels + ) + log_S = librosa.power_to_db(S, ref=np.max, top_db=self.top_db) + bytedata = (((log_S + self.top_db) * 255 / self.top_db).clip(0, 255) + 0.5).astype(np.uint8) + image = Image.fromarray(bytedata) + return image + + def image_to_audio(self, image: Image.Image) -> np.ndarray: + """Converts spectrogram to audio. + + Args: + image (`PIL Image`): + An grayscale image of `x_res x y_res`. + + Returns: + audio (`np.ndarray`): + The audio as a NumPy array. + """ + bytedata = np.frombuffer(image.tobytes(), dtype="uint8").reshape((image.height, image.width)) + log_S = bytedata.astype("float") * self.top_db / 255 - self.top_db + S = librosa.db_to_power(log_S) + audio = librosa.feature.inverse.mel_to_audio( + S, sr=self.sr, n_fft=self.n_fft, hop_length=self.hop_length, n_iter=self.n_iter + ) + return audio diff --git a/diffusers3/pipelines/deprecated/audio_diffusion/pipeline_audio_diffusion.py b/diffusers3/pipelines/deprecated/audio_diffusion/pipeline_audio_diffusion.py new file mode 100644 index 0000000000000000000000000000000000000000..47044e050acf7221e3da74d78defa042d5d583f2 --- /dev/null +++ b/diffusers3/pipelines/deprecated/audio_diffusion/pipeline_audio_diffusion.py @@ -0,0 +1,329 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from math import acos, sin +from typing import List, Tuple, Union + +import numpy as np +import torch +from PIL import Image + +from ....models import AutoencoderKL, UNet2DConditionModel +from ....schedulers import DDIMScheduler, DDPMScheduler +from ....utils.torch_utils import randn_tensor +from ...pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput +from .mel import Mel + + +class AudioDiffusionPipeline(DiffusionPipeline): + """ + Pipeline for audio diffusion. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + Parameters: + vqae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + mel ([`Mel`]): + Transform audio into a spectrogram. + scheduler ([`DDIMScheduler`] or [`DDPMScheduler`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`] or [`DDPMScheduler`]. + """ + + _optional_components = ["vqvae"] + + def __init__( + self, + vqvae: AutoencoderKL, + unet: UNet2DConditionModel, + mel: Mel, + scheduler: Union[DDIMScheduler, DDPMScheduler], + ): + super().__init__() + self.register_modules(unet=unet, scheduler=scheduler, mel=mel, vqvae=vqvae) + + def get_default_steps(self) -> int: + """Returns default number of steps recommended for inference. + + Returns: + `int`: + The number of steps. + """ + return 50 if isinstance(self.scheduler, DDIMScheduler) else 1000 + + @torch.no_grad() + def __call__( + self, + batch_size: int = 1, + audio_file: str = None, + raw_audio: np.ndarray = None, + slice: int = 0, + start_step: int = 0, + steps: int = None, + generator: torch.Generator = None, + mask_start_secs: float = 0, + mask_end_secs: float = 0, + step_generator: torch.Generator = None, + eta: float = 0, + noise: torch.Tensor = None, + encoding: torch.Tensor = None, + return_dict=True, + ) -> Union[ + Union[AudioPipelineOutput, ImagePipelineOutput], + Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]], + ]: + """ + The call function to the pipeline for generation. + + Args: + batch_size (`int`): + Number of samples to generate. + audio_file (`str`): + An audio file that must be on disk due to [Librosa](https://librosa.org/) limitation. + raw_audio (`np.ndarray`): + The raw audio file as a NumPy array. + slice (`int`): + Slice number of audio to convert. + start_step (int): + Step to start diffusion from. + steps (`int`): + Number of denoising steps (defaults to `50` for DDIM and `1000` for DDPM). + generator (`torch.Generator`): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + mask_start_secs (`float`): + Number of seconds of audio to mask (not generate) at start. + mask_end_secs (`float`): + Number of seconds of audio to mask (not generate) at end. + step_generator (`torch.Generator`): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) used to denoise. + None + eta (`float`): + Corresponds to parameter eta (ฮท) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + noise (`torch.Tensor`): + A noise tensor of shape `(batch_size, 1, height, width)` or `None`. + encoding (`torch.Tensor`): + A tensor for [`UNet2DConditionModel`] of shape `(batch_size, seq_length, cross_attention_dim)`. + return_dict (`bool`): + Whether or not to return a [`AudioPipelineOutput`], [`ImagePipelineOutput`] or a plain tuple. + + Examples: + + For audio diffusion: + + ```py + import torch + from IPython.display import Audio + from diffusers import DiffusionPipeline + + device = "cuda" if torch.cuda.is_available() else "cpu" + pipe = DiffusionPipeline.from_pretrained("teticio/audio-diffusion-256").to(device) + + output = pipe() + display(output.images[0]) + display(Audio(output.audios[0], rate=mel.get_sample_rate())) + ``` + + For latent audio diffusion: + + ```py + import torch + from IPython.display import Audio + from diffusers import DiffusionPipeline + + device = "cuda" if torch.cuda.is_available() else "cpu" + pipe = DiffusionPipeline.from_pretrained("teticio/latent-audio-diffusion-256").to(device) + + output = pipe() + display(output.images[0]) + display(Audio(output.audios[0], rate=pipe.mel.get_sample_rate())) + ``` + + For other tasks like variation, inpainting, outpainting, etc: + + ```py + output = pipe( + raw_audio=output.audios[0, 0], + start_step=int(pipe.get_default_steps() / 2), + mask_start_secs=1, + mask_end_secs=1, + ) + display(output.images[0]) + display(Audio(output.audios[0], rate=pipe.mel.get_sample_rate())) + ``` + + Returns: + `List[PIL Image]`: + A list of Mel spectrograms (`float`, `List[np.ndarray]`) with the sample rate and raw audio. + """ + + steps = steps or self.get_default_steps() + self.scheduler.set_timesteps(steps) + step_generator = step_generator or generator + # For backwards compatibility + if isinstance(self.unet.config.sample_size, int): + self.unet.config.sample_size = (self.unet.config.sample_size, self.unet.config.sample_size) + if noise is None: + noise = randn_tensor( + ( + batch_size, + self.unet.config.in_channels, + self.unet.config.sample_size[0], + self.unet.config.sample_size[1], + ), + generator=generator, + device=self.device, + ) + images = noise + mask = None + + if audio_file is not None or raw_audio is not None: + self.mel.load_audio(audio_file, raw_audio) + input_image = self.mel.audio_slice_to_image(slice) + input_image = np.frombuffer(input_image.tobytes(), dtype="uint8").reshape( + (input_image.height, input_image.width) + ) + input_image = (input_image / 255) * 2 - 1 + input_images = torch.tensor(input_image[np.newaxis, :, :], dtype=torch.float).to(self.device) + + if self.vqvae is not None: + input_images = self.vqvae.encode(torch.unsqueeze(input_images, 0)).latent_dist.sample( + generator=generator + )[0] + input_images = self.vqvae.config.scaling_factor * input_images + + if start_step > 0: + images[0, 0] = self.scheduler.add_noise(input_images, noise, self.scheduler.timesteps[start_step - 1]) + + pixels_per_second = ( + self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length + ) + mask_start = int(mask_start_secs * pixels_per_second) + mask_end = int(mask_end_secs * pixels_per_second) + mask = self.scheduler.add_noise(input_images, noise, torch.tensor(self.scheduler.timesteps[start_step:])) + + for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:])): + if isinstance(self.unet, UNet2DConditionModel): + model_output = self.unet(images, t, encoding)["sample"] + else: + model_output = self.unet(images, t)["sample"] + + if isinstance(self.scheduler, DDIMScheduler): + images = self.scheduler.step( + model_output=model_output, + timestep=t, + sample=images, + eta=eta, + generator=step_generator, + )["prev_sample"] + else: + images = self.scheduler.step( + model_output=model_output, + timestep=t, + sample=images, + generator=step_generator, + )["prev_sample"] + + if mask is not None: + if mask_start > 0: + images[:, :, :, :mask_start] = mask[:, step, :, :mask_start] + if mask_end > 0: + images[:, :, :, -mask_end:] = mask[:, step, :, -mask_end:] + + if self.vqvae is not None: + # 0.18215 was scaling factor used in training to ensure unit variance + images = 1 / self.vqvae.config.scaling_factor * images + images = self.vqvae.decode(images)["sample"] + + images = (images / 2 + 0.5).clamp(0, 1) + images = images.cpu().permute(0, 2, 3, 1).numpy() + images = (images * 255).round().astype("uint8") + images = list( + (Image.fromarray(_[:, :, 0]) for _ in images) + if images.shape[3] == 1 + else (Image.fromarray(_, mode="RGB").convert("L") for _ in images) + ) + + audios = [self.mel.image_to_audio(_) for _ in images] + if not return_dict: + return images, (self.mel.get_sample_rate(), audios) + + return BaseOutput(**AudioPipelineOutput(np.array(audios)[:, np.newaxis, :]), **ImagePipelineOutput(images)) + + @torch.no_grad() + def encode(self, images: List[Image.Image], steps: int = 50) -> np.ndarray: + """ + Reverse the denoising step process to recover a noisy image from the generated image. + + Args: + images (`List[PIL Image]`): + List of images to encode. + steps (`int`): + Number of encoding steps to perform (defaults to `50`). + + Returns: + `np.ndarray`: + A noise tensor of shape `(batch_size, 1, height, width)`. + """ + + # Only works with DDIM as this method is deterministic + assert isinstance(self.scheduler, DDIMScheduler) + self.scheduler.set_timesteps(steps) + sample = np.array( + [np.frombuffer(image.tobytes(), dtype="uint8").reshape((1, image.height, image.width)) for image in images] + ) + sample = (sample / 255) * 2 - 1 + sample = torch.Tensor(sample).to(self.device) + + for t in self.progress_bar(torch.flip(self.scheduler.timesteps, (0,))): + prev_timestep = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps + alpha_prod_t = self.scheduler.alphas_cumprod[t] + alpha_prod_t_prev = ( + self.scheduler.alphas_cumprod[prev_timestep] + if prev_timestep >= 0 + else self.scheduler.final_alpha_cumprod + ) + beta_prod_t = 1 - alpha_prod_t + model_output = self.unet(sample, t)["sample"] + pred_sample_direction = (1 - alpha_prod_t_prev) ** (0.5) * model_output + sample = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5) + sample = sample * alpha_prod_t ** (0.5) + beta_prod_t ** (0.5) * model_output + + return sample + + @staticmethod + def slerp(x0: torch.Tensor, x1: torch.Tensor, alpha: float) -> torch.Tensor: + """Spherical Linear intERPolation. + + Args: + x0 (`torch.Tensor`): + The first tensor to interpolate between. + x1 (`torch.Tensor`): + Second tensor to interpolate between. + alpha (`float`): + Interpolation between 0 and 1 + + Returns: + `torch.Tensor`: + The interpolated tensor. + """ + + theta = acos(torch.dot(torch.flatten(x0), torch.flatten(x1)) / torch.norm(x0) / torch.norm(x1)) + return sin((1 - alpha) * theta) * x0 / sin(theta) + sin(alpha * theta) * x1 / sin(theta) diff --git a/diffusers3/pipelines/deprecated/latent_diffusion_uncond/__init__.py b/diffusers3/pipelines/deprecated/latent_diffusion_uncond/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..214f5bbca969f9ae0629578c72aaf339f86ded88 --- /dev/null +++ b/diffusers3/pipelines/deprecated/latent_diffusion_uncond/__init__.py @@ -0,0 +1,18 @@ +from typing import TYPE_CHECKING + +from ....utils import DIFFUSERS_SLOW_IMPORT, _LazyModule + + +_import_structure = {"pipeline_latent_diffusion_uncond": ["LDMPipeline"]} + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + from .pipeline_latent_diffusion_uncond import LDMPipeline +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) diff --git a/diffusers3/pipelines/deprecated/latent_diffusion_uncond/pipeline_latent_diffusion_uncond.py b/diffusers3/pipelines/deprecated/latent_diffusion_uncond/pipeline_latent_diffusion_uncond.py new file mode 100644 index 0000000000000000000000000000000000000000..7fe5d59f771da48ea3c7fb549dbb9762cac4620e --- /dev/null +++ b/diffusers3/pipelines/deprecated/latent_diffusion_uncond/pipeline_latent_diffusion_uncond.py @@ -0,0 +1,130 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import List, Optional, Tuple, Union + +import torch + +from ....models import UNet2DModel, VQModel +from ....schedulers import DDIMScheduler +from ....utils.torch_utils import randn_tensor +from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput + + +class LDMPipeline(DiffusionPipeline): + r""" + Pipeline for unconditional image generation using latent diffusion. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + Parameters: + vqvae ([`VQModel`]): + Vector-quantized (VQ) model to encode and decode images to and from latent representations. + unet ([`UNet2DModel`]): + A `UNet2DModel` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + [`DDIMScheduler`] is used in combination with `unet` to denoise the encoded image latents. + """ + + def __init__(self, vqvae: VQModel, unet: UNet2DModel, scheduler: DDIMScheduler): + super().__init__() + self.register_modules(vqvae=vqvae, unet=unet, scheduler=scheduler) + + @torch.no_grad() + def __call__( + self, + batch_size: int = 1, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + eta: float = 0.0, + num_inference_steps: int = 50, + output_type: Optional[str] = "pil", + return_dict: bool = True, + **kwargs, + ) -> Union[Tuple, ImagePipelineOutput]: + r""" + The call function to the pipeline for generation. + + Args: + batch_size (`int`, *optional*, defaults to 1): + Number of images to generate. + generator (`torch.Generator`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. + + Example: + + ```py + >>> from diffusers import LDMPipeline + + >>> # load model and scheduler + >>> pipe = LDMPipeline.from_pretrained("CompVis/ldm-celebahq-256") + + >>> # run pipeline in inference (sample random noise and denoise) + >>> image = pipe().images[0] + ``` + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is + returned where the first element is a list with the generated images + """ + + latents = randn_tensor( + (batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size), + generator=generator, + ) + latents = latents.to(self.device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + + self.scheduler.set_timesteps(num_inference_steps) + + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + + extra_kwargs = {} + if accepts_eta: + extra_kwargs["eta"] = eta + + for t in self.progress_bar(self.scheduler.timesteps): + latent_model_input = self.scheduler.scale_model_input(latents, t) + # predict the noise residual + noise_prediction = self.unet(latent_model_input, t).sample + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_prediction, t, latents, **extra_kwargs).prev_sample + + # adjust latents with inverse of vae scale + latents = latents / self.vqvae.config.scaling_factor + # decode the image latents with the VAE + image = self.vqvae.decode(latents).sample + + image = (image / 2 + 0.5).clamp(0, 1) + image = image.cpu().permute(0, 2, 3, 1).numpy() + if output_type == "pil": + image = self.numpy_to_pil(image) + + if not return_dict: + return (image,) + + return ImagePipelineOutput(images=image) diff --git a/diffusers3/pipelines/deprecated/pndm/__init__.py b/diffusers3/pipelines/deprecated/pndm/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5e3bdba74079d77576655e22b43014a0438a9c2e --- /dev/null +++ b/diffusers3/pipelines/deprecated/pndm/__init__.py @@ -0,0 +1,18 @@ +from typing import TYPE_CHECKING + +from ....utils import DIFFUSERS_SLOW_IMPORT, _LazyModule + + +_import_structure = {"pipeline_pndm": ["PNDMPipeline"]} + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + from .pipeline_pndm import PNDMPipeline +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) diff --git a/diffusers3/pipelines/deprecated/pndm/pipeline_pndm.py b/diffusers3/pipelines/deprecated/pndm/pipeline_pndm.py new file mode 100644 index 0000000000000000000000000000000000000000..ef78af1940cedd07718a9e05d99da50e38a220d3 --- /dev/null +++ b/diffusers3/pipelines/deprecated/pndm/pipeline_pndm.py @@ -0,0 +1,121 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from typing import List, Optional, Tuple, Union + +import torch + +from ....models import UNet2DModel +from ....schedulers import PNDMScheduler +from ....utils.torch_utils import randn_tensor +from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput + + +class PNDMPipeline(DiffusionPipeline): + r""" + Pipeline for unconditional image generation. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + Parameters: + unet ([`UNet2DModel`]): + A `UNet2DModel` to denoise the encoded image latents. + scheduler ([`PNDMScheduler`]): + A `PNDMScheduler` to be used in combination with `unet` to denoise the encoded image. + """ + + unet: UNet2DModel + scheduler: PNDMScheduler + + def __init__(self, unet: UNet2DModel, scheduler: PNDMScheduler): + super().__init__() + + scheduler = PNDMScheduler.from_config(scheduler.config) + + self.register_modules(unet=unet, scheduler=scheduler) + + @torch.no_grad() + def __call__( + self, + batch_size: int = 1, + num_inference_steps: int = 50, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + **kwargs, + ) -> Union[ImagePipelineOutput, Tuple]: + r""" + The call function to the pipeline for generation. + + Args: + batch_size (`int`, `optional`, defaults to 1): + The number of images to generate. + num_inference_steps (`int`, `optional`, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + generator (`torch.Generator`, `optional`): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + output_type (`str`, `optional`, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`ImagePipelineOutput`] instead of a plain tuple. + + Example: + + ```py + >>> from diffusers import PNDMPipeline + + >>> # load model and scheduler + >>> pndm = PNDMPipeline.from_pretrained("google/ddpm-cifar10-32") + + >>> # run pipeline in inference (sample random noise and denoise) + >>> image = pndm().images[0] + + >>> # save image + >>> image.save("pndm_generated_image.png") + ``` + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is + returned where the first element is a list with the generated images. + """ + # For more information on the sampling method you can take a look at Algorithm 2 of + # the official paper: https://arxiv.org/pdf/2202.09778.pdf + + # Sample gaussian noise to begin loop + image = randn_tensor( + (batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size), + generator=generator, + device=self.device, + ) + + self.scheduler.set_timesteps(num_inference_steps) + for t in self.progress_bar(self.scheduler.timesteps): + model_output = self.unet(image, t).sample + + image = self.scheduler.step(model_output, t, image).prev_sample + + image = (image / 2 + 0.5).clamp(0, 1) + image = image.cpu().permute(0, 2, 3, 1).numpy() + if output_type == "pil": + image = self.numpy_to_pil(image) + + if not return_dict: + return (image,) + + return ImagePipelineOutput(images=image) diff --git a/diffusers3/pipelines/deprecated/repaint/__init__.py b/diffusers3/pipelines/deprecated/repaint/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2c6b04af52d40e8a2bfa2aa5812b9fb8b1da06f5 --- /dev/null +++ b/diffusers3/pipelines/deprecated/repaint/__init__.py @@ -0,0 +1,19 @@ +from typing import TYPE_CHECKING + +from ....utils import DIFFUSERS_SLOW_IMPORT, _LazyModule + + +_import_structure = {"pipeline_repaint": ["RePaintPipeline"]} + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + from .pipeline_repaint import RePaintPipeline + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) diff --git a/diffusers3/pipelines/deprecated/repaint/pipeline_repaint.py b/diffusers3/pipelines/deprecated/repaint/pipeline_repaint.py new file mode 100644 index 0000000000000000000000000000000000000000..101d315dfe59d3a298625cc79c9ca1754d9d181a --- /dev/null +++ b/diffusers3/pipelines/deprecated/repaint/pipeline_repaint.py @@ -0,0 +1,230 @@ +# Copyright 2024 ETH Zurich Computer Vision Lab and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from typing import List, Optional, Tuple, Union + +import numpy as np +import PIL.Image +import torch + +from ....models import UNet2DModel +from ....schedulers import RePaintScheduler +from ....utils import PIL_INTERPOLATION, deprecate, logging +from ....utils.torch_utils import randn_tensor +from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.preprocess +def _preprocess_image(image: Union[List, PIL.Image.Image, torch.Tensor]): + deprecation_message = "The preprocess method is deprecated and will be removed in diffusers 1.0.0. Please use VaeImageProcessor.preprocess(...) instead" + deprecate("preprocess", "1.0.0", deprecation_message, standard_warn=False) + if isinstance(image, torch.Tensor): + return image + elif isinstance(image, PIL.Image.Image): + image = [image] + + if isinstance(image[0], PIL.Image.Image): + w, h = image[0].size + w, h = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8 + + image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image] + image = np.concatenate(image, axis=0) + image = np.array(image).astype(np.float32) / 255.0 + image = image.transpose(0, 3, 1, 2) + image = 2.0 * image - 1.0 + image = torch.from_numpy(image) + elif isinstance(image[0], torch.Tensor): + image = torch.cat(image, dim=0) + return image + + +def _preprocess_mask(mask: Union[List, PIL.Image.Image, torch.Tensor]): + if isinstance(mask, torch.Tensor): + return mask + elif isinstance(mask, PIL.Image.Image): + mask = [mask] + + if isinstance(mask[0], PIL.Image.Image): + w, h = mask[0].size + w, h = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 + mask = [np.array(m.convert("L").resize((w, h), resample=PIL_INTERPOLATION["nearest"]))[None, :] for m in mask] + mask = np.concatenate(mask, axis=0) + mask = mask.astype(np.float32) / 255.0 + mask[mask < 0.5] = 0 + mask[mask >= 0.5] = 1 + mask = torch.from_numpy(mask) + elif isinstance(mask[0], torch.Tensor): + mask = torch.cat(mask, dim=0) + return mask + + +class RePaintPipeline(DiffusionPipeline): + r""" + Pipeline for image inpainting using RePaint. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + Parameters: + unet ([`UNet2DModel`]): + A `UNet2DModel` to denoise the encoded image latents. + scheduler ([`RePaintScheduler`]): + A `RePaintScheduler` to be used in combination with `unet` to denoise the encoded image. + """ + + unet: UNet2DModel + scheduler: RePaintScheduler + model_cpu_offload_seq = "unet" + + def __init__(self, unet, scheduler): + super().__init__() + self.register_modules(unet=unet, scheduler=scheduler) + + @torch.no_grad() + def __call__( + self, + image: Union[torch.Tensor, PIL.Image.Image], + mask_image: Union[torch.Tensor, PIL.Image.Image], + num_inference_steps: int = 250, + eta: float = 0.0, + jump_length: int = 10, + jump_n_sample: int = 10, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + ) -> Union[ImagePipelineOutput, Tuple]: + r""" + The call function to the pipeline for generation. + + Args: + image (`torch.Tensor` or `PIL.Image.Image`): + The original image to inpaint on. + mask_image (`torch.Tensor` or `PIL.Image.Image`): + The mask_image where 0.0 define which part of the original image to inpaint. + num_inference_steps (`int`, *optional*, defaults to 1000): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + eta (`float`): + The weight of the added noise in a diffusion step. Its value is between 0.0 and 1.0; 0.0 corresponds to + DDIM and 1.0 is the DDPM scheduler. + jump_length (`int`, *optional*, defaults to 10): + The number of steps taken forward in time before going backward in time for a single jump ("j" in + RePaint paper). Take a look at Figure 9 and 10 in the [paper](https://arxiv.org/pdf/2201.09865.pdf). + jump_n_sample (`int`, *optional*, defaults to 10): + The number of times to make a forward time jump for a given chosen time sample. Take a look at Figure 9 + and 10 in the [paper](https://arxiv.org/pdf/2201.09865.pdf). + generator (`torch.Generator`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + output_type (`str`, `optional`, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`ImagePipelineOutput`] instead of a plain tuple. + + Example: + + ```py + >>> from io import BytesIO + >>> import torch + >>> import PIL + >>> import requests + >>> from diffusers import RePaintPipeline, RePaintScheduler + + + >>> def download_image(url): + ... response = requests.get(url) + ... return PIL.Image.open(BytesIO(response.content)).convert("RGB") + + + >>> img_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/repaint/celeba_hq_256.png" + >>> mask_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/repaint/mask_256.png" + + >>> # Load the original image and the mask as PIL images + >>> original_image = download_image(img_url).resize((256, 256)) + >>> mask_image = download_image(mask_url).resize((256, 256)) + + >>> # Load the RePaint scheduler and pipeline based on a pretrained DDPM model + >>> scheduler = RePaintScheduler.from_pretrained("google/ddpm-ema-celebahq-256") + >>> pipe = RePaintPipeline.from_pretrained("google/ddpm-ema-celebahq-256", scheduler=scheduler) + >>> pipe = pipe.to("cuda") + + >>> generator = torch.Generator(device="cuda").manual_seed(0) + >>> output = pipe( + ... image=original_image, + ... mask_image=mask_image, + ... num_inference_steps=250, + ... eta=0.0, + ... jump_length=10, + ... jump_n_sample=10, + ... generator=generator, + ... ) + >>> inpainted_image = output.images[0] + ``` + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is + returned where the first element is a list with the generated images. + """ + + original_image = image + + original_image = _preprocess_image(original_image) + original_image = original_image.to(device=self._execution_device, dtype=self.unet.dtype) + mask_image = _preprocess_mask(mask_image) + mask_image = mask_image.to(device=self._execution_device, dtype=self.unet.dtype) + + batch_size = original_image.shape[0] + + # sample gaussian noise to begin the loop + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + image_shape = original_image.shape + image = randn_tensor(image_shape, generator=generator, device=self._execution_device, dtype=self.unet.dtype) + + # set step values + self.scheduler.set_timesteps(num_inference_steps, jump_length, jump_n_sample, self._execution_device) + self.scheduler.eta = eta + + t_last = self.scheduler.timesteps[0] + 1 + generator = generator[0] if isinstance(generator, list) else generator + for i, t in enumerate(self.progress_bar(self.scheduler.timesteps)): + if t < t_last: + # predict the noise residual + model_output = self.unet(image, t).sample + # compute previous image: x_t -> x_t-1 + image = self.scheduler.step(model_output, t, image, original_image, mask_image, generator).prev_sample + + else: + # compute the reverse: x_t-1 -> x_t + image = self.scheduler.undo_step(image, t_last, generator) + t_last = t + + image = (image / 2 + 0.5).clamp(0, 1) + image = image.cpu().permute(0, 2, 3, 1).numpy() + if output_type == "pil": + image = self.numpy_to_pil(image) + + if not return_dict: + return (image,) + + return ImagePipelineOutput(images=image) diff --git a/diffusers3/pipelines/deprecated/score_sde_ve/__init__.py b/diffusers3/pipelines/deprecated/score_sde_ve/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..87c167c3dbd26e0408a41ef197a42dc5eb7038d7 --- /dev/null +++ b/diffusers3/pipelines/deprecated/score_sde_ve/__init__.py @@ -0,0 +1,19 @@ +from typing import TYPE_CHECKING + +from ....utils import DIFFUSERS_SLOW_IMPORT, _LazyModule + + +_import_structure = {"pipeline_score_sde_ve": ["ScoreSdeVePipeline"]} + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + from .pipeline_score_sde_ve import ScoreSdeVePipeline + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) diff --git a/diffusers3/pipelines/deprecated/score_sde_ve/pipeline_score_sde_ve.py b/diffusers3/pipelines/deprecated/score_sde_ve/pipeline_score_sde_ve.py new file mode 100644 index 0000000000000000000000000000000000000000..b0bb114a81b75a979b49c0884e921a33beedff4f --- /dev/null +++ b/diffusers3/pipelines/deprecated/score_sde_ve/pipeline_score_sde_ve.py @@ -0,0 +1,109 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import List, Optional, Tuple, Union + +import torch + +from ....models import UNet2DModel +from ....schedulers import ScoreSdeVeScheduler +from ....utils.torch_utils import randn_tensor +from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput + + +class ScoreSdeVePipeline(DiffusionPipeline): + r""" + Pipeline for unconditional image generation. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + Parameters: + unet ([`UNet2DModel`]): + A `UNet2DModel` to denoise the encoded image. + scheduler ([`ScoreSdeVeScheduler`]): + A `ScoreSdeVeScheduler` to be used in combination with `unet` to denoise the encoded image. + """ + + unet: UNet2DModel + scheduler: ScoreSdeVeScheduler + + def __init__(self, unet: UNet2DModel, scheduler: ScoreSdeVeScheduler): + super().__init__() + self.register_modules(unet=unet, scheduler=scheduler) + + @torch.no_grad() + def __call__( + self, + batch_size: int = 1, + num_inference_steps: int = 2000, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + **kwargs, + ) -> Union[ImagePipelineOutput, Tuple]: + r""" + The call function to the pipeline for generation. + + Args: + batch_size (`int`, *optional*, defaults to 1): + The number of images to generate. + generator (`torch.Generator`, `optional`): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + output_type (`str`, `optional`, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`ImagePipelineOutput`] instead of a plain tuple. + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is + returned where the first element is a list with the generated images. + """ + + img_size = self.unet.config.sample_size + shape = (batch_size, 3, img_size, img_size) + + model = self.unet + + sample = randn_tensor(shape, generator=generator) * self.scheduler.init_noise_sigma + sample = sample.to(self.device) + + self.scheduler.set_timesteps(num_inference_steps) + self.scheduler.set_sigmas(num_inference_steps) + + for i, t in enumerate(self.progress_bar(self.scheduler.timesteps)): + sigma_t = self.scheduler.sigmas[i] * torch.ones(shape[0], device=self.device) + + # correction step + for _ in range(self.scheduler.config.correct_steps): + model_output = self.unet(sample, sigma_t).sample + sample = self.scheduler.step_correct(model_output, sample, generator=generator).prev_sample + + # prediction step + model_output = model(sample, sigma_t).sample + output = self.scheduler.step_pred(model_output, t, sample, generator=generator) + + sample, sample_mean = output.prev_sample, output.prev_sample_mean + + sample = sample_mean.clamp(0, 1) + sample = sample.cpu().permute(0, 2, 3, 1).numpy() + if output_type == "pil": + sample = self.numpy_to_pil(sample) + + if not return_dict: + return (sample,) + + return ImagePipelineOutput(images=sample) diff --git a/diffusers3/pipelines/deprecated/spectrogram_diffusion/__init__.py b/diffusers3/pipelines/deprecated/spectrogram_diffusion/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..150954baa0eb8f8a7216b4891effc14a71e21b1b --- /dev/null +++ b/diffusers3/pipelines/deprecated/spectrogram_diffusion/__init__.py @@ -0,0 +1,75 @@ +# flake8: noqa +from typing import TYPE_CHECKING +from ....utils import ( + DIFFUSERS_SLOW_IMPORT, + _LazyModule, + is_note_seq_available, + OptionalDependencyNotAvailable, + is_torch_available, + is_transformers_available, + get_objects_from_module, +) + +_dummy_objects = {} +_import_structure = {} + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ....utils import dummy_torch_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["continous_encoder"] = ["SpectrogramContEncoder"] + _import_structure["notes_encoder"] = ["SpectrogramNotesEncoder"] + _import_structure["pipeline_spectrogram_diffusion"] = [ + "SpectrogramContEncoder", + "SpectrogramDiffusionPipeline", + "T5FilmDecoder", + ] +try: + if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ....utils import dummy_transformers_and_torch_and_note_seq_objects + + _dummy_objects.update(get_objects_from_module(dummy_transformers_and_torch_and_note_seq_objects)) +else: + _import_structure["midi_utils"] = ["MidiProcessor"] + + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + + except OptionalDependencyNotAvailable: + from ....utils.dummy_torch_and_transformers_objects import * + else: + from .pipeline_spectrogram_diffusion import SpectrogramDiffusionPipeline + from .pipeline_spectrogram_diffusion import SpectrogramContEncoder + from .pipeline_spectrogram_diffusion import SpectrogramNotesEncoder + from .pipeline_spectrogram_diffusion import T5FilmDecoder + + try: + if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ....utils.dummy_transformers_and_torch_and_note_seq_objects import * + + else: + from .midi_utils import MidiProcessor + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/diffusers3/pipelines/deprecated/spectrogram_diffusion/continuous_encoder.py b/diffusers3/pipelines/deprecated/spectrogram_diffusion/continuous_encoder.py new file mode 100644 index 0000000000000000000000000000000000000000..8664c2fb67113972976a24aa980d3aa7778b807e --- /dev/null +++ b/diffusers3/pipelines/deprecated/spectrogram_diffusion/continuous_encoder.py @@ -0,0 +1,92 @@ +# Copyright 2022 The Music Spectrogram Diffusion Authors. +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch +import torch.nn as nn +from transformers.modeling_utils import ModuleUtilsMixin +from transformers.models.t5.modeling_t5 import ( + T5Block, + T5Config, + T5LayerNorm, +) + +from ....configuration_utils import ConfigMixin, register_to_config +from ....models import ModelMixin + + +class SpectrogramContEncoder(ModelMixin, ConfigMixin, ModuleUtilsMixin): + @register_to_config + def __init__( + self, + input_dims: int, + targets_context_length: int, + d_model: int, + dropout_rate: float, + num_layers: int, + num_heads: int, + d_kv: int, + d_ff: int, + feed_forward_proj: str, + is_decoder: bool = False, + ): + super().__init__() + + self.input_proj = nn.Linear(input_dims, d_model, bias=False) + + self.position_encoding = nn.Embedding(targets_context_length, d_model) + self.position_encoding.weight.requires_grad = False + + self.dropout_pre = nn.Dropout(p=dropout_rate) + + t5config = T5Config( + d_model=d_model, + num_heads=num_heads, + d_kv=d_kv, + d_ff=d_ff, + feed_forward_proj=feed_forward_proj, + dropout_rate=dropout_rate, + is_decoder=is_decoder, + is_encoder_decoder=False, + ) + self.encoders = nn.ModuleList() + for lyr_num in range(num_layers): + lyr = T5Block(t5config) + self.encoders.append(lyr) + + self.layer_norm = T5LayerNorm(d_model) + self.dropout_post = nn.Dropout(p=dropout_rate) + + def forward(self, encoder_inputs, encoder_inputs_mask): + x = self.input_proj(encoder_inputs) + + # terminal relative positional encodings + max_positions = encoder_inputs.shape[1] + input_positions = torch.arange(max_positions, device=encoder_inputs.device) + + seq_lens = encoder_inputs_mask.sum(-1) + input_positions = torch.roll(input_positions.unsqueeze(0), tuple(seq_lens.tolist()), dims=0) + x += self.position_encoding(input_positions) + + x = self.dropout_pre(x) + + # inverted the attention mask + input_shape = encoder_inputs.size() + extended_attention_mask = self.get_extended_attention_mask(encoder_inputs_mask, input_shape) + + for lyr in self.encoders: + x = lyr(x, extended_attention_mask)[0] + x = self.layer_norm(x) + + return self.dropout_post(x), encoder_inputs_mask diff --git a/diffusers3/pipelines/deprecated/spectrogram_diffusion/midi_utils.py b/diffusers3/pipelines/deprecated/spectrogram_diffusion/midi_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..e777e844935e76427cd92ead59b7d23eb8b1cbe5 --- /dev/null +++ b/diffusers3/pipelines/deprecated/spectrogram_diffusion/midi_utils.py @@ -0,0 +1,667 @@ +# Copyright 2022 The Music Spectrogram Diffusion Authors. +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import dataclasses +import math +import os +from typing import Any, Callable, List, Mapping, MutableMapping, Optional, Sequence, Tuple, Union + +import numpy as np +import torch +import torch.nn.functional as F + +from ....utils import is_note_seq_available +from .pipeline_spectrogram_diffusion import TARGET_FEATURE_LENGTH + + +if is_note_seq_available(): + import note_seq +else: + raise ImportError("Please install note-seq via `pip install note-seq`") + + +INPUT_FEATURE_LENGTH = 2048 + +SAMPLE_RATE = 16000 +HOP_SIZE = 320 +FRAME_RATE = int(SAMPLE_RATE // HOP_SIZE) + +DEFAULT_STEPS_PER_SECOND = 100 +DEFAULT_MAX_SHIFT_SECONDS = 10 +DEFAULT_NUM_VELOCITY_BINS = 1 + +SLAKH_CLASS_PROGRAMS = { + "Acoustic Piano": 0, + "Electric Piano": 4, + "Chromatic Percussion": 8, + "Organ": 16, + "Acoustic Guitar": 24, + "Clean Electric Guitar": 26, + "Distorted Electric Guitar": 29, + "Acoustic Bass": 32, + "Electric Bass": 33, + "Violin": 40, + "Viola": 41, + "Cello": 42, + "Contrabass": 43, + "Orchestral Harp": 46, + "Timpani": 47, + "String Ensemble": 48, + "Synth Strings": 50, + "Choir and Voice": 52, + "Orchestral Hit": 55, + "Trumpet": 56, + "Trombone": 57, + "Tuba": 58, + "French Horn": 60, + "Brass Section": 61, + "Soprano/Alto Sax": 64, + "Tenor Sax": 66, + "Baritone Sax": 67, + "Oboe": 68, + "English Horn": 69, + "Bassoon": 70, + "Clarinet": 71, + "Pipe": 73, + "Synth Lead": 80, + "Synth Pad": 88, +} + + +@dataclasses.dataclass +class NoteRepresentationConfig: + """Configuration note representations.""" + + onsets_only: bool + include_ties: bool + + +@dataclasses.dataclass +class NoteEventData: + pitch: int + velocity: Optional[int] = None + program: Optional[int] = None + is_drum: Optional[bool] = None + instrument: Optional[int] = None + + +@dataclasses.dataclass +class NoteEncodingState: + """Encoding state for note transcription, keeping track of active pitches.""" + + # velocity bin for active pitches and programs + active_pitches: MutableMapping[Tuple[int, int], int] = dataclasses.field(default_factory=dict) + + +@dataclasses.dataclass +class EventRange: + type: str + min_value: int + max_value: int + + +@dataclasses.dataclass +class Event: + type: str + value: int + + +class Tokenizer: + def __init__(self, regular_ids: int): + # The special tokens: 0=PAD, 1=EOS, and 2=UNK + self._num_special_tokens = 3 + self._num_regular_tokens = regular_ids + + def encode(self, token_ids): + encoded = [] + for token_id in token_ids: + if not 0 <= token_id < self._num_regular_tokens: + raise ValueError( + f"token_id {token_id} does not fall within valid range of [0, {self._num_regular_tokens})" + ) + encoded.append(token_id + self._num_special_tokens) + + # Add EOS token + encoded.append(1) + + # Pad to till INPUT_FEATURE_LENGTH + encoded = encoded + [0] * (INPUT_FEATURE_LENGTH - len(encoded)) + + return encoded + + +class Codec: + """Encode and decode events. + + Useful for declaring what certain ranges of a vocabulary should be used for. This is intended to be used from + Python before encoding or after decoding with GenericTokenVocabulary. This class is more lightweight and does not + include things like EOS or UNK token handling. + + To ensure that 'shift' events are always the first block of the vocab and start at 0, that event type is required + and specified separately. + """ + + def __init__(self, max_shift_steps: int, steps_per_second: float, event_ranges: List[EventRange]): + """Define Codec. + + Args: + max_shift_steps: Maximum number of shift steps that can be encoded. + steps_per_second: Shift steps will be interpreted as having a duration of + 1 / steps_per_second. + event_ranges: Other supported event types and their ranges. + """ + self.steps_per_second = steps_per_second + self._shift_range = EventRange(type="shift", min_value=0, max_value=max_shift_steps) + self._event_ranges = [self._shift_range] + event_ranges + # Ensure all event types have unique names. + assert len(self._event_ranges) == len({er.type for er in self._event_ranges}) + + @property + def num_classes(self) -> int: + return sum(er.max_value - er.min_value + 1 for er in self._event_ranges) + + # The next couple methods are simplified special case methods just for shift + # events that are intended to be used from within autograph functions. + + def is_shift_event_index(self, index: int) -> bool: + return (self._shift_range.min_value <= index) and (index <= self._shift_range.max_value) + + @property + def max_shift_steps(self) -> int: + return self._shift_range.max_value + + def encode_event(self, event: Event) -> int: + """Encode an event to an index.""" + offset = 0 + for er in self._event_ranges: + if event.type == er.type: + if not er.min_value <= event.value <= er.max_value: + raise ValueError( + f"Event value {event.value} is not within valid range " + f"[{er.min_value}, {er.max_value}] for type {event.type}" + ) + return offset + event.value - er.min_value + offset += er.max_value - er.min_value + 1 + + raise ValueError(f"Unknown event type: {event.type}") + + def event_type_range(self, event_type: str) -> Tuple[int, int]: + """Return [min_id, max_id] for an event type.""" + offset = 0 + for er in self._event_ranges: + if event_type == er.type: + return offset, offset + (er.max_value - er.min_value) + offset += er.max_value - er.min_value + 1 + + raise ValueError(f"Unknown event type: {event_type}") + + def decode_event_index(self, index: int) -> Event: + """Decode an event index to an Event.""" + offset = 0 + for er in self._event_ranges: + if offset <= index <= offset + er.max_value - er.min_value: + return Event(type=er.type, value=er.min_value + index - offset) + offset += er.max_value - er.min_value + 1 + + raise ValueError(f"Unknown event index: {index}") + + +@dataclasses.dataclass +class ProgramGranularity: + # both tokens_map_fn and program_map_fn should be idempotent + tokens_map_fn: Callable[[Sequence[int], Codec], Sequence[int]] + program_map_fn: Callable[[int], int] + + +def drop_programs(tokens, codec: Codec): + """Drops program change events from a token sequence.""" + min_program_id, max_program_id = codec.event_type_range("program") + return tokens[(tokens < min_program_id) | (tokens > max_program_id)] + + +def programs_to_midi_classes(tokens, codec): + """Modifies program events to be the first program in the MIDI class.""" + min_program_id, max_program_id = codec.event_type_range("program") + is_program = (tokens >= min_program_id) & (tokens <= max_program_id) + return np.where(is_program, min_program_id + 8 * ((tokens - min_program_id) // 8), tokens) + + +PROGRAM_GRANULARITIES = { + # "flat" granularity; drop program change tokens and set NoteSequence + # programs to zero + "flat": ProgramGranularity(tokens_map_fn=drop_programs, program_map_fn=lambda program: 0), + # map each program to the first program in its MIDI class + "midi_class": ProgramGranularity( + tokens_map_fn=programs_to_midi_classes, program_map_fn=lambda program: 8 * (program // 8) + ), + # leave programs as is + "full": ProgramGranularity(tokens_map_fn=lambda tokens, codec: tokens, program_map_fn=lambda program: program), +} + + +def frame(signal, frame_length, frame_step, pad_end=False, pad_value=0, axis=-1): + """ + equivalent of tf.signal.frame + """ + signal_length = signal.shape[axis] + if pad_end: + frames_overlap = frame_length - frame_step + rest_samples = np.abs(signal_length - frames_overlap) % np.abs(frame_length - frames_overlap) + pad_size = int(frame_length - rest_samples) + + if pad_size != 0: + pad_axis = [0] * signal.ndim + pad_axis[axis] = pad_size + signal = F.pad(signal, pad_axis, "constant", pad_value) + frames = signal.unfold(axis, frame_length, frame_step) + return frames + + +def program_to_slakh_program(program): + # this is done very hackily, probably should use a custom mapping + for slakh_program in sorted(SLAKH_CLASS_PROGRAMS.values(), reverse=True): + if program >= slakh_program: + return slakh_program + + +def audio_to_frames( + samples, + hop_size: int, + frame_rate: int, +) -> Tuple[Sequence[Sequence[int]], torch.Tensor]: + """Convert audio samples to non-overlapping frames and frame times.""" + frame_size = hop_size + samples = np.pad(samples, [0, frame_size - len(samples) % frame_size], mode="constant") + + # Split audio into frames. + frames = frame( + torch.Tensor(samples).unsqueeze(0), + frame_length=frame_size, + frame_step=frame_size, + pad_end=False, # TODO check why its off by 1 here when True + ) + + num_frames = len(samples) // frame_size + + times = np.arange(num_frames) / frame_rate + return frames, times + + +def note_sequence_to_onsets_and_offsets_and_programs( + ns: note_seq.NoteSequence, +) -> Tuple[Sequence[float], Sequence[NoteEventData]]: + """Extract onset & offset times and pitches & programs from a NoteSequence. + + The onset & offset times will not necessarily be in sorted order. + + Args: + ns: NoteSequence from which to extract onsets and offsets. + + Returns: + times: A list of note onset and offset times. values: A list of NoteEventData objects where velocity is zero for + note + offsets. + """ + # Sort by program and pitch and put offsets before onsets as a tiebreaker for + # subsequent stable sort. + notes = sorted(ns.notes, key=lambda note: (note.is_drum, note.program, note.pitch)) + times = [note.end_time for note in notes if not note.is_drum] + [note.start_time for note in notes] + values = [ + NoteEventData(pitch=note.pitch, velocity=0, program=note.program, is_drum=False) + for note in notes + if not note.is_drum + ] + [ + NoteEventData(pitch=note.pitch, velocity=note.velocity, program=note.program, is_drum=note.is_drum) + for note in notes + ] + return times, values + + +def num_velocity_bins_from_codec(codec: Codec): + """Get number of velocity bins from event codec.""" + lo, hi = codec.event_type_range("velocity") + return hi - lo + + +# segment an array into segments of length n +def segment(a, n): + return [a[i : i + n] for i in range(0, len(a), n)] + + +def velocity_to_bin(velocity, num_velocity_bins): + if velocity == 0: + return 0 + else: + return math.ceil(num_velocity_bins * velocity / note_seq.MAX_MIDI_VELOCITY) + + +def note_event_data_to_events( + state: Optional[NoteEncodingState], + value: NoteEventData, + codec: Codec, +) -> Sequence[Event]: + """Convert note event data to a sequence of events.""" + if value.velocity is None: + # onsets only, no program or velocity + return [Event("pitch", value.pitch)] + else: + num_velocity_bins = num_velocity_bins_from_codec(codec) + velocity_bin = velocity_to_bin(value.velocity, num_velocity_bins) + if value.program is None: + # onsets + offsets + velocities only, no programs + if state is not None: + state.active_pitches[(value.pitch, 0)] = velocity_bin + return [Event("velocity", velocity_bin), Event("pitch", value.pitch)] + else: + if value.is_drum: + # drum events use a separate vocabulary + return [Event("velocity", velocity_bin), Event("drum", value.pitch)] + else: + # program + velocity + pitch + if state is not None: + state.active_pitches[(value.pitch, value.program)] = velocity_bin + return [ + Event("program", value.program), + Event("velocity", velocity_bin), + Event("pitch", value.pitch), + ] + + +def note_encoding_state_to_events(state: NoteEncodingState) -> Sequence[Event]: + """Output program and pitch events for active notes plus a final tie event.""" + events = [] + for pitch, program in sorted(state.active_pitches.keys(), key=lambda k: k[::-1]): + if state.active_pitches[(pitch, program)]: + events += [Event("program", program), Event("pitch", pitch)] + events.append(Event("tie", 0)) + return events + + +def encode_and_index_events( + state, event_times, event_values, codec, frame_times, encode_event_fn, encoding_state_to_events_fn=None +): + """Encode a sequence of timed events and index to audio frame times. + + Encodes time shifts as repeated single step shifts for later run length encoding. + + Optionally, also encodes a sequence of "state events", keeping track of the current encoding state at each audio + frame. This can be used e.g. to prepend events representing the current state to a targets segment. + + Args: + state: Initial event encoding state. + event_times: Sequence of event times. + event_values: Sequence of event values. + encode_event_fn: Function that transforms event value into a sequence of one + or more Event objects. + codec: An Codec object that maps Event objects to indices. + frame_times: Time for every audio frame. + encoding_state_to_events_fn: Function that transforms encoding state into a + sequence of one or more Event objects. + + Returns: + events: Encoded events and shifts. event_start_indices: Corresponding start event index for every audio frame. + Note: one event can correspond to multiple audio indices due to sampling rate differences. This makes + splitting sequences tricky because the same event can appear at the end of one sequence and the beginning of + another. + event_end_indices: Corresponding end event index for every audio frame. Used + to ensure when slicing that one chunk ends where the next begins. Should always be true that + event_end_indices[i] = event_start_indices[i + 1]. + state_events: Encoded "state" events representing the encoding state before + each event. + state_event_indices: Corresponding state event index for every audio frame. + """ + indices = np.argsort(event_times, kind="stable") + event_steps = [round(event_times[i] * codec.steps_per_second) for i in indices] + event_values = [event_values[i] for i in indices] + + events = [] + state_events = [] + event_start_indices = [] + state_event_indices = [] + + cur_step = 0 + cur_event_idx = 0 + cur_state_event_idx = 0 + + def fill_event_start_indices_to_cur_step(): + while ( + len(event_start_indices) < len(frame_times) + and frame_times[len(event_start_indices)] < cur_step / codec.steps_per_second + ): + event_start_indices.append(cur_event_idx) + state_event_indices.append(cur_state_event_idx) + + for event_step, event_value in zip(event_steps, event_values): + while event_step > cur_step: + events.append(codec.encode_event(Event(type="shift", value=1))) + cur_step += 1 + fill_event_start_indices_to_cur_step() + cur_event_idx = len(events) + cur_state_event_idx = len(state_events) + if encoding_state_to_events_fn: + # Dump state to state events *before* processing the next event, because + # we want to capture the state prior to the occurrence of the event. + for e in encoding_state_to_events_fn(state): + state_events.append(codec.encode_event(e)) + + for e in encode_event_fn(state, event_value, codec): + events.append(codec.encode_event(e)) + + # After the last event, continue filling out the event_start_indices array. + # The inequality is not strict because if our current step lines up exactly + # with (the start of) an audio frame, we need to add an additional shift event + # to "cover" that frame. + while cur_step / codec.steps_per_second <= frame_times[-1]: + events.append(codec.encode_event(Event(type="shift", value=1))) + cur_step += 1 + fill_event_start_indices_to_cur_step() + cur_event_idx = len(events) + + # Now fill in event_end_indices. We need this extra array to make sure that + # when we slice events, each slice ends exactly where the subsequent slice + # begins. + event_end_indices = event_start_indices[1:] + [len(events)] + + events = np.array(events).astype(np.int32) + state_events = np.array(state_events).astype(np.int32) + event_start_indices = segment(np.array(event_start_indices).astype(np.int32), TARGET_FEATURE_LENGTH) + event_end_indices = segment(np.array(event_end_indices).astype(np.int32), TARGET_FEATURE_LENGTH) + state_event_indices = segment(np.array(state_event_indices).astype(np.int32), TARGET_FEATURE_LENGTH) + + outputs = [] + for start_indices, end_indices, event_indices in zip(event_start_indices, event_end_indices, state_event_indices): + outputs.append( + { + "inputs": events, + "event_start_indices": start_indices, + "event_end_indices": end_indices, + "state_events": state_events, + "state_event_indices": event_indices, + } + ) + + return outputs + + +def extract_sequence_with_indices(features, state_events_end_token=None, feature_key="inputs"): + """Extract target sequence corresponding to audio token segment.""" + features = features.copy() + start_idx = features["event_start_indices"][0] + end_idx = features["event_end_indices"][-1] + + features[feature_key] = features[feature_key][start_idx:end_idx] + + if state_events_end_token is not None: + # Extract the state events corresponding to the audio start token, and + # prepend them to the targets array. + state_event_start_idx = features["state_event_indices"][0] + state_event_end_idx = state_event_start_idx + 1 + while features["state_events"][state_event_end_idx - 1] != state_events_end_token: + state_event_end_idx += 1 + features[feature_key] = np.concatenate( + [ + features["state_events"][state_event_start_idx:state_event_end_idx], + features[feature_key], + ], + axis=0, + ) + + return features + + +def map_midi_programs( + feature, codec: Codec, granularity_type: str = "full", feature_key: str = "inputs" +) -> Mapping[str, Any]: + """Apply MIDI program map to token sequences.""" + granularity = PROGRAM_GRANULARITIES[granularity_type] + + feature[feature_key] = granularity.tokens_map_fn(feature[feature_key], codec) + return feature + + +def run_length_encode_shifts_fn( + features, + codec: Codec, + feature_key: str = "inputs", + state_change_event_types: Sequence[str] = (), +) -> Callable[[Mapping[str, Any]], Mapping[str, Any]]: + """Return a function that run-length encodes shifts for a given codec. + + Args: + codec: The Codec to use for shift events. + feature_key: The feature key for which to run-length encode shifts. + state_change_event_types: A list of event types that represent state + changes; tokens corresponding to these event types will be interpreted as state changes and redundant ones + will be removed. + + Returns: + A preprocessing function that run-length encodes single-step shifts. + """ + state_change_event_ranges = [codec.event_type_range(event_type) for event_type in state_change_event_types] + + def run_length_encode_shifts(features: MutableMapping[str, Any]) -> Mapping[str, Any]: + """Combine leading/interior shifts, trim trailing shifts. + + Args: + features: Dict of features to process. + + Returns: + A dict of features. + """ + events = features[feature_key] + + shift_steps = 0 + total_shift_steps = 0 + output = np.array([], dtype=np.int32) + + current_state = np.zeros(len(state_change_event_ranges), dtype=np.int32) + + for event in events: + if codec.is_shift_event_index(event): + shift_steps += 1 + total_shift_steps += 1 + + else: + # If this event is a state change and has the same value as the current + # state, we can skip it entirely. + is_redundant = False + for i, (min_index, max_index) in enumerate(state_change_event_ranges): + if (min_index <= event) and (event <= max_index): + if current_state[i] == event: + is_redundant = True + current_state[i] = event + if is_redundant: + continue + + # Once we've reached a non-shift event, RLE all previous shift events + # before outputting the non-shift event. + if shift_steps > 0: + shift_steps = total_shift_steps + while shift_steps > 0: + output_steps = np.minimum(codec.max_shift_steps, shift_steps) + output = np.concatenate([output, [output_steps]], axis=0) + shift_steps -= output_steps + output = np.concatenate([output, [event]], axis=0) + + features[feature_key] = output + return features + + return run_length_encode_shifts(features) + + +def note_representation_processor_chain(features, codec: Codec, note_representation_config: NoteRepresentationConfig): + tie_token = codec.encode_event(Event("tie", 0)) + state_events_end_token = tie_token if note_representation_config.include_ties else None + + features = extract_sequence_with_indices( + features, state_events_end_token=state_events_end_token, feature_key="inputs" + ) + + features = map_midi_programs(features, codec) + + features = run_length_encode_shifts_fn(features, codec, state_change_event_types=["velocity", "program"]) + + return features + + +class MidiProcessor: + def __init__(self): + self.codec = Codec( + max_shift_steps=DEFAULT_MAX_SHIFT_SECONDS * DEFAULT_STEPS_PER_SECOND, + steps_per_second=DEFAULT_STEPS_PER_SECOND, + event_ranges=[ + EventRange("pitch", note_seq.MIN_MIDI_PITCH, note_seq.MAX_MIDI_PITCH), + EventRange("velocity", 0, DEFAULT_NUM_VELOCITY_BINS), + EventRange("tie", 0, 0), + EventRange("program", note_seq.MIN_MIDI_PROGRAM, note_seq.MAX_MIDI_PROGRAM), + EventRange("drum", note_seq.MIN_MIDI_PITCH, note_seq.MAX_MIDI_PITCH), + ], + ) + self.tokenizer = Tokenizer(self.codec.num_classes) + self.note_representation_config = NoteRepresentationConfig(onsets_only=False, include_ties=True) + + def __call__(self, midi: Union[bytes, os.PathLike, str]): + if not isinstance(midi, bytes): + with open(midi, "rb") as f: + midi = f.read() + + ns = note_seq.midi_to_note_sequence(midi) + ns_sus = note_seq.apply_sustain_control_changes(ns) + + for note in ns_sus.notes: + if not note.is_drum: + note.program = program_to_slakh_program(note.program) + + samples = np.zeros(int(ns_sus.total_time * SAMPLE_RATE)) + + _, frame_times = audio_to_frames(samples, HOP_SIZE, FRAME_RATE) + times, values = note_sequence_to_onsets_and_offsets_and_programs(ns_sus) + + events = encode_and_index_events( + state=NoteEncodingState(), + event_times=times, + event_values=values, + frame_times=frame_times, + codec=self.codec, + encode_event_fn=note_event_data_to_events, + encoding_state_to_events_fn=note_encoding_state_to_events, + ) + + events = [ + note_representation_processor_chain(event, self.codec, self.note_representation_config) for event in events + ] + input_tokens = [self.tokenizer.encode(event["inputs"]) for event in events] + + return input_tokens diff --git a/diffusers3/pipelines/deprecated/spectrogram_diffusion/notes_encoder.py b/diffusers3/pipelines/deprecated/spectrogram_diffusion/notes_encoder.py new file mode 100644 index 0000000000000000000000000000000000000000..1259f0bf056aa40ad111b4f282210687430ad68f --- /dev/null +++ b/diffusers3/pipelines/deprecated/spectrogram_diffusion/notes_encoder.py @@ -0,0 +1,86 @@ +# Copyright 2022 The Music Spectrogram Diffusion Authors. +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch +import torch.nn as nn +from transformers.modeling_utils import ModuleUtilsMixin +from transformers.models.t5.modeling_t5 import T5Block, T5Config, T5LayerNorm + +from ....configuration_utils import ConfigMixin, register_to_config +from ....models import ModelMixin + + +class SpectrogramNotesEncoder(ModelMixin, ConfigMixin, ModuleUtilsMixin): + @register_to_config + def __init__( + self, + max_length: int, + vocab_size: int, + d_model: int, + dropout_rate: float, + num_layers: int, + num_heads: int, + d_kv: int, + d_ff: int, + feed_forward_proj: str, + is_decoder: bool = False, + ): + super().__init__() + + self.token_embedder = nn.Embedding(vocab_size, d_model) + + self.position_encoding = nn.Embedding(max_length, d_model) + self.position_encoding.weight.requires_grad = False + + self.dropout_pre = nn.Dropout(p=dropout_rate) + + t5config = T5Config( + vocab_size=vocab_size, + d_model=d_model, + num_heads=num_heads, + d_kv=d_kv, + d_ff=d_ff, + dropout_rate=dropout_rate, + feed_forward_proj=feed_forward_proj, + is_decoder=is_decoder, + is_encoder_decoder=False, + ) + + self.encoders = nn.ModuleList() + for lyr_num in range(num_layers): + lyr = T5Block(t5config) + self.encoders.append(lyr) + + self.layer_norm = T5LayerNorm(d_model) + self.dropout_post = nn.Dropout(p=dropout_rate) + + def forward(self, encoder_input_tokens, encoder_inputs_mask): + x = self.token_embedder(encoder_input_tokens) + + seq_length = encoder_input_tokens.shape[1] + inputs_positions = torch.arange(seq_length, device=encoder_input_tokens.device) + x += self.position_encoding(inputs_positions) + + x = self.dropout_pre(x) + + # inverted the attention mask + input_shape = encoder_input_tokens.size() + extended_attention_mask = self.get_extended_attention_mask(encoder_inputs_mask, input_shape) + + for lyr in self.encoders: + x = lyr(x, extended_attention_mask)[0] + x = self.layer_norm(x) + + return self.dropout_post(x), encoder_inputs_mask diff --git a/diffusers3/pipelines/deprecated/spectrogram_diffusion/pipeline_spectrogram_diffusion.py b/diffusers3/pipelines/deprecated/spectrogram_diffusion/pipeline_spectrogram_diffusion.py new file mode 100644 index 0000000000000000000000000000000000000000..b8ac8e1416bf7049f3835f62f5762aa237c34890 --- /dev/null +++ b/diffusers3/pipelines/deprecated/spectrogram_diffusion/pipeline_spectrogram_diffusion.py @@ -0,0 +1,269 @@ +# Copyright 2022 The Music Spectrogram Diffusion Authors. +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +from typing import Any, Callable, List, Optional, Tuple, Union + +import numpy as np +import torch + +from ....models import T5FilmDecoder +from ....schedulers import DDPMScheduler +from ....utils import is_onnx_available, logging +from ....utils.torch_utils import randn_tensor + + +if is_onnx_available(): + from ...onnx_utils import OnnxRuntimeModel + +from ...pipeline_utils import AudioPipelineOutput, DiffusionPipeline +from .continuous_encoder import SpectrogramContEncoder +from .notes_encoder import SpectrogramNotesEncoder + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +TARGET_FEATURE_LENGTH = 256 + + +class SpectrogramDiffusionPipeline(DiffusionPipeline): + r""" + Pipeline for unconditional audio generation. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + Args: + notes_encoder ([`SpectrogramNotesEncoder`]): + continuous_encoder ([`SpectrogramContEncoder`]): + decoder ([`T5FilmDecoder`]): + A [`T5FilmDecoder`] to denoise the encoded audio latents. + scheduler ([`DDPMScheduler`]): + A scheduler to be used in combination with `decoder` to denoise the encoded audio latents. + melgan ([`OnnxRuntimeModel`]): + """ + + _optional_components = ["melgan"] + + def __init__( + self, + notes_encoder: SpectrogramNotesEncoder, + continuous_encoder: SpectrogramContEncoder, + decoder: T5FilmDecoder, + scheduler: DDPMScheduler, + melgan: OnnxRuntimeModel if is_onnx_available() else Any, + ) -> None: + super().__init__() + + # From MELGAN + self.min_value = math.log(1e-5) # Matches MelGAN training. + self.max_value = 4.0 # Largest value for most examples + self.n_dims = 128 + + self.register_modules( + notes_encoder=notes_encoder, + continuous_encoder=continuous_encoder, + decoder=decoder, + scheduler=scheduler, + melgan=melgan, + ) + + def scale_features(self, features, output_range=(-1.0, 1.0), clip=False): + """Linearly scale features to network outputs range.""" + min_out, max_out = output_range + if clip: + features = torch.clip(features, self.min_value, self.max_value) + # Scale to [0, 1]. + zero_one = (features - self.min_value) / (self.max_value - self.min_value) + # Scale to [min_out, max_out]. + return zero_one * (max_out - min_out) + min_out + + def scale_to_features(self, outputs, input_range=(-1.0, 1.0), clip=False): + """Invert by linearly scaling network outputs to features range.""" + min_out, max_out = input_range + outputs = torch.clip(outputs, min_out, max_out) if clip else outputs + # Scale to [0, 1]. + zero_one = (outputs - min_out) / (max_out - min_out) + # Scale to [self.min_value, self.max_value]. + return zero_one * (self.max_value - self.min_value) + self.min_value + + def encode(self, input_tokens, continuous_inputs, continuous_mask): + tokens_mask = input_tokens > 0 + tokens_encoded, tokens_mask = self.notes_encoder( + encoder_input_tokens=input_tokens, encoder_inputs_mask=tokens_mask + ) + + continuous_encoded, continuous_mask = self.continuous_encoder( + encoder_inputs=continuous_inputs, encoder_inputs_mask=continuous_mask + ) + + return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)] + + def decode(self, encodings_and_masks, input_tokens, noise_time): + timesteps = noise_time + if not torch.is_tensor(timesteps): + timesteps = torch.tensor([timesteps], dtype=torch.long, device=input_tokens.device) + elif torch.is_tensor(timesteps) and len(timesteps.shape) == 0: + timesteps = timesteps[None].to(input_tokens.device) + + # broadcast to batch dimension in a way that's compatible with ONNX/Core ML + timesteps = timesteps * torch.ones(input_tokens.shape[0], dtype=timesteps.dtype, device=timesteps.device) + + logits = self.decoder( + encodings_and_masks=encodings_and_masks, decoder_input_tokens=input_tokens, decoder_noise_time=timesteps + ) + return logits + + @torch.no_grad() + def __call__( + self, + input_tokens: List[List[int]], + generator: Optional[torch.Generator] = None, + num_inference_steps: int = 100, + return_dict: bool = True, + output_type: str = "np", + callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, + callback_steps: int = 1, + ) -> Union[AudioPipelineOutput, Tuple]: + if (callback_steps is None) or ( + callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + r""" + The call function to the pipeline for generation. + + Args: + input_tokens (`List[List[int]]`): + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality audio at the + expense of slower inference. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.AudioPipelineOutput`] instead of a plain tuple. + output_type (`str`, *optional*, defaults to `"np"`): + The output format of the generated audio. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + + Example: + + ```py + >>> from diffusers import SpectrogramDiffusionPipeline, MidiProcessor + + >>> pipe = SpectrogramDiffusionPipeline.from_pretrained("google/music-spectrogram-diffusion") + >>> pipe = pipe.to("cuda") + >>> processor = MidiProcessor() + + >>> # Download MIDI from: wget http://www.piano-midi.de/midis/beethoven/beethoven_hammerklavier_2.mid + >>> output = pipe(processor("beethoven_hammerklavier_2.mid")) + + >>> audio = output.audios[0] + ``` + + Returns: + [`pipelines.AudioPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`pipelines.AudioPipelineOutput`] is returned, otherwise a `tuple` is + returned where the first element is a list with the generated audio. + """ + + pred_mel = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims], dtype=np.float32) + full_pred_mel = np.zeros([1, 0, self.n_dims], np.float32) + ones = torch.ones((1, TARGET_FEATURE_LENGTH), dtype=bool, device=self.device) + + for i, encoder_input_tokens in enumerate(input_tokens): + if i == 0: + encoder_continuous_inputs = torch.from_numpy(pred_mel[:1].copy()).to( + device=self.device, dtype=self.decoder.dtype + ) + # The first chunk has no previous context. + encoder_continuous_mask = torch.zeros((1, TARGET_FEATURE_LENGTH), dtype=bool, device=self.device) + else: + # The full song pipeline does not feed in a context feature, so the mask + # will be all 0s after the feature converter. Because we know we're + # feeding in a full context chunk from the previous prediction, set it + # to all 1s. + encoder_continuous_mask = ones + + encoder_continuous_inputs = self.scale_features( + encoder_continuous_inputs, output_range=[-1.0, 1.0], clip=True + ) + + encodings_and_masks = self.encode( + input_tokens=torch.IntTensor([encoder_input_tokens]).to(device=self.device), + continuous_inputs=encoder_continuous_inputs, + continuous_mask=encoder_continuous_mask, + ) + + # Sample encoder_continuous_inputs shaped gaussian noise to begin loop + x = randn_tensor( + shape=encoder_continuous_inputs.shape, + generator=generator, + device=self.device, + dtype=self.decoder.dtype, + ) + + # set step values + self.scheduler.set_timesteps(num_inference_steps) + + # Denoising diffusion loop + for j, t in enumerate(self.progress_bar(self.scheduler.timesteps)): + output = self.decode( + encodings_and_masks=encodings_and_masks, + input_tokens=x, + noise_time=t / self.scheduler.config.num_train_timesteps, # rescale to [0, 1) + ) + + # Compute previous output: x_t -> x_t-1 + x = self.scheduler.step(output, t, x, generator=generator).prev_sample + + mel = self.scale_to_features(x, input_range=[-1.0, 1.0]) + encoder_continuous_inputs = mel[:1] + pred_mel = mel.cpu().float().numpy() + + full_pred_mel = np.concatenate([full_pred_mel, pred_mel[:1]], axis=1) + + # call the callback, if provided + if callback is not None and i % callback_steps == 0: + callback(i, full_pred_mel) + + logger.info("Generated segment", i) + + if output_type == "np" and not is_onnx_available(): + raise ValueError( + "Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'." + ) + elif output_type == "np" and self.melgan is None: + raise ValueError( + "Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'." + ) + + if output_type == "np": + output = self.melgan(input_features=full_pred_mel.astype(np.float32)) + else: + output = full_pred_mel + + if not return_dict: + return (output,) + + return AudioPipelineOutput(audios=output) diff --git a/diffusers3/pipelines/deprecated/stable_diffusion_variants/__init__.py b/diffusers3/pipelines/deprecated/stable_diffusion_variants/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..36cf1a33ce6ada8e718aabadb9a706737aee30bd --- /dev/null +++ b/diffusers3/pipelines/deprecated/stable_diffusion_variants/__init__.py @@ -0,0 +1,55 @@ +from typing import TYPE_CHECKING + +from ....utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ....utils import dummy_torch_and_transformers_objects + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["pipeline_cycle_diffusion"] = ["CycleDiffusionPipeline"] + _import_structure["pipeline_stable_diffusion_inpaint_legacy"] = ["StableDiffusionInpaintPipelineLegacy"] + _import_structure["pipeline_stable_diffusion_model_editing"] = ["StableDiffusionModelEditingPipeline"] + + _import_structure["pipeline_stable_diffusion_paradigms"] = ["StableDiffusionParadigmsPipeline"] + _import_structure["pipeline_stable_diffusion_pix2pix_zero"] = ["StableDiffusionPix2PixZeroPipeline"] + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ....utils.dummy_torch_and_transformers_objects import * + + else: + from .pipeline_cycle_diffusion import CycleDiffusionPipeline + from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy + from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline + from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline + from .pipeline_stable_diffusion_pix2pix_zero import StableDiffusionPix2PixZeroPipeline + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/diffusers3/pipelines/deprecated/stable_diffusion_variants/pipeline_cycle_diffusion.py b/diffusers3/pipelines/deprecated/stable_diffusion_variants/pipeline_cycle_diffusion.py new file mode 100644 index 0000000000000000000000000000000000000000..777be883cb9dfc86f8a5268620a746214da7dba4 --- /dev/null +++ b/diffusers3/pipelines/deprecated/stable_diffusion_variants/pipeline_cycle_diffusion.py @@ -0,0 +1,949 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Any, Callable, Dict, List, Optional, Union + +import numpy as np +import PIL.Image +import torch +from packaging import version +from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer + +from ....configuration_utils import FrozenDict +from ....image_processor import PipelineImageInput, VaeImageProcessor +from ....loaders import StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin +from ....models import AutoencoderKL, UNet2DConditionModel +from ....models.lora import adjust_lora_scale_text_encoder +from ....schedulers import DDIMScheduler +from ....utils import PIL_INTERPOLATION, USE_PEFT_BACKEND, deprecate, logging, scale_lora_layers, unscale_lora_layers +from ....utils.torch_utils import randn_tensor +from ...pipeline_utils import DiffusionPipeline +from ...stable_diffusion.pipeline_output import StableDiffusionPipelineOutput +from ...stable_diffusion.safety_checker import StableDiffusionSafetyChecker + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.preprocess +def preprocess(image): + deprecation_message = "The preprocess method is deprecated and will be removed in diffusers 1.0.0. Please use VaeImageProcessor.preprocess(...) instead" + deprecate("preprocess", "1.0.0", deprecation_message, standard_warn=False) + if isinstance(image, torch.Tensor): + return image + elif isinstance(image, PIL.Image.Image): + image = [image] + + if isinstance(image[0], PIL.Image.Image): + w, h = image[0].size + w, h = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8 + + image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image] + image = np.concatenate(image, axis=0) + image = np.array(image).astype(np.float32) / 255.0 + image = image.transpose(0, 3, 1, 2) + image = 2.0 * image - 1.0 + image = torch.from_numpy(image) + elif isinstance(image[0], torch.Tensor): + image = torch.cat(image, dim=0) + return image + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents +def retrieve_latents( + encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" +): + if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": + return encoder_output.latent_dist.sample(generator) + elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": + return encoder_output.latent_dist.mode() + elif hasattr(encoder_output, "latents"): + return encoder_output.latents + else: + raise AttributeError("Could not access latents of provided encoder_output") + + +def posterior_sample(scheduler, latents, timestep, clean_latents, generator, eta): + # 1. get previous step value (=t-1) + prev_timestep = timestep - scheduler.config.num_train_timesteps // scheduler.num_inference_steps + + if prev_timestep <= 0: + return clean_latents + + # 2. compute alphas, betas + alpha_prod_t = scheduler.alphas_cumprod[timestep] + alpha_prod_t_prev = ( + scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else scheduler.final_alpha_cumprod + ) + + variance = scheduler._get_variance(timestep, prev_timestep) + std_dev_t = eta * variance ** (0.5) + + # direction pointing to x_t + e_t = (latents - alpha_prod_t ** (0.5) * clean_latents) / (1 - alpha_prod_t) ** (0.5) + dir_xt = (1.0 - alpha_prod_t_prev - std_dev_t**2) ** (0.5) * e_t + noise = std_dev_t * randn_tensor( + clean_latents.shape, dtype=clean_latents.dtype, device=clean_latents.device, generator=generator + ) + prev_latents = alpha_prod_t_prev ** (0.5) * clean_latents + dir_xt + noise + + return prev_latents + + +def compute_noise(scheduler, prev_latents, latents, timestep, noise_pred, eta): + # 1. get previous step value (=t-1) + prev_timestep = timestep - scheduler.config.num_train_timesteps // scheduler.num_inference_steps + + # 2. compute alphas, betas + alpha_prod_t = scheduler.alphas_cumprod[timestep] + alpha_prod_t_prev = ( + scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else scheduler.final_alpha_cumprod + ) + + beta_prod_t = 1 - alpha_prod_t + + # 3. compute predicted original sample from predicted noise also called + # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf + pred_original_sample = (latents - beta_prod_t ** (0.5) * noise_pred) / alpha_prod_t ** (0.5) + + # 4. Clip "predicted x_0" + if scheduler.config.clip_sample: + pred_original_sample = torch.clamp(pred_original_sample, -1, 1) + + # 5. compute variance: "sigma_t(ฮท)" -> see formula (16) + # ฯƒ_t = sqrt((1 โˆ’ ฮฑ_tโˆ’1)/(1 โˆ’ ฮฑ_t)) * sqrt(1 โˆ’ ฮฑ_t/ฮฑ_tโˆ’1) + variance = scheduler._get_variance(timestep, prev_timestep) + std_dev_t = eta * variance ** (0.5) + + # 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf + pred_sample_direction = (1 - alpha_prod_t_prev - std_dev_t**2) ** (0.5) * noise_pred + + noise = (prev_latents - (alpha_prod_t_prev ** (0.5) * pred_original_sample + pred_sample_direction)) / ( + variance ** (0.5) * eta + ) + return noise + + +class CycleDiffusionPipeline(DiffusionPipeline, TextualInversionLoaderMixin, StableDiffusionLoraLoaderMixin): + r""" + Pipeline for text-guided image to image generation using Stable Diffusion. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for saving LoRA weights + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can only be an + instance of [`DDIMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details + about a model's potential harms. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. + """ + + model_cpu_offload_seq = "text_encoder->unet->vae" + _optional_components = ["safety_checker", "feature_extractor"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: UNet2DConditionModel, + scheduler: DDIMScheduler, + safety_checker: StableDiffusionSafetyChecker, + feature_extractor: CLIPImageProcessor, + requires_safety_checker: bool = True, + ): + super().__init__() + + if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: + deprecation_message = ( + f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" + f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " + "to update the config accordingly as leaving `steps_offset` might led to incorrect results" + " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," + " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" + " file" + ) + deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(scheduler.config) + new_config["steps_offset"] = 1 + scheduler._internal_dict = FrozenDict(new_config) + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( + version.parse(unet.config._diffusers_version).base_version + ) < version.parse("0.9.0.dev0") + is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 + if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: + deprecation_message = ( + "The configuration file of the unet has set the default `sample_size` to smaller than" + " 64 which seems highly unlikely .If you're checkpoint is a fine-tuned version of any of the" + " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" + " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" + " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" + " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" + " in the config might lead to incorrect results in future versions. If you have downloaded this" + " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" + " the `unet/config.json` file" + ) + deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(unet.config) + new_config["sample_size"] = 64 + unet._internal_dict = FrozenDict(new_config) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt + def _encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + **kwargs, + ): + deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." + deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) + + prompt_embeds_tuple = self.encode_prompt( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=lora_scale, + **kwargs, + ) + + # concatenate for backwards comp + prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) + + return prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if self.text_encoder is not None: + if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return prompt_embeds, negative_prompt_embeds + + def check_inputs( + self, + prompt, + strength, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + callback_on_step_end_tensor_inputs=None, + ): + if strength < 0 or strength > 1: + raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}") + + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is None: + has_nsfw_concept = None + else: + if torch.is_tensor(image): + feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") + else: + feature_extractor_input = self.image_processor.numpy_to_pil(image) + safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) + image, has_nsfw_concept = self.safety_checker( + images=image, clip_input=safety_checker_input.pixel_values.to(dtype) + ) + return image, has_nsfw_concept + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents + def decode_latents(self, latents): + deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" + deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) + + latents = 1 / self.vae.config.scaling_factor * latents + image = self.vae.decode(latents, return_dict=False)[0] + image = (image / 2 + 0.5).clamp(0, 1) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + return image + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps + def get_timesteps(self, num_inference_steps, strength, device): + # get the original timestep using init_timestep + init_timestep = min(int(num_inference_steps * strength), num_inference_steps) + + t_start = max(num_inference_steps - init_timestep, 0) + timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] + if hasattr(self.scheduler, "set_begin_index"): + self.scheduler.set_begin_index(t_start * self.scheduler.order) + + return timesteps, num_inference_steps - t_start + + def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None): + image = image.to(device=device, dtype=dtype) + + batch_size = image.shape[0] + + if image.shape[1] == 4: + init_latents = image + + else: + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if isinstance(generator, list): + init_latents = [ + retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i]) + for i in range(image.shape[0]) + ] + init_latents = torch.cat(init_latents, dim=0) + else: + init_latents = retrieve_latents(self.vae.encode(image), generator=generator) + + init_latents = self.vae.config.scaling_factor * init_latents + + if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0: + # expand init_latents for batch_size + deprecation_message = ( + f"You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial" + " images (`image`). Initial images are now duplicating to match the number of text prompts. Note" + " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update" + " your script to pass as many initial images as text prompts to suppress this warning." + ) + deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False) + additional_image_per_prompt = batch_size // init_latents.shape[0] + init_latents = torch.cat([init_latents] * additional_image_per_prompt * num_images_per_prompt, dim=0) + elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0: + raise ValueError( + f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts." + ) + else: + init_latents = torch.cat([init_latents] * num_images_per_prompt, dim=0) + + # add noise to latents using the timestep + shape = init_latents.shape + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + + # get latents + clean_latents = init_latents + init_latents = self.scheduler.add_noise(init_latents, noise, timestep) + latents = init_latents + + return latents, clean_latents + + @torch.no_grad() + def __call__( + self, + prompt: Union[str, List[str]], + source_prompt: Union[str, List[str]], + image: PipelineImageInput = None, + strength: float = 0.8, + num_inference_steps: Optional[int] = 50, + guidance_scale: Optional[float] = 7.5, + source_guidance_scale: Optional[float] = 1, + num_images_per_prompt: Optional[int] = 1, + eta: Optional[float] = 0.1, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + prompt_embeds: Optional[torch.Tensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, + callback_steps: int = 1, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + clip_skip: Optional[int] = None, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`): + The prompt or prompts to guide the image generation. + image (`torch.Tensor` `np.ndarray`, `PIL.Image.Image`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): + `Image` or tensor representing an image batch to be used as the starting point. Can also accept image + latents as `image`, but if passing latents directly it is not encoded again. + strength (`float`, *optional*, defaults to 0.8): + Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a + starting point and more noise is added the higher the `strength`. The number of denoising steps depends + on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising + process runs for the full number of iterations specified in `num_inference_steps`. A value of 1 + essentially ignores `image`. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. This parameter is modulated by `strength`. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + source_guidance_scale (`float`, *optional*, defaults to 1): + Guidance scale for the source prompt. This is useful to control the amount of influence the source + prompt has for encoding. + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + Example: + + ```py + import requests + import torch + from PIL import Image + from io import BytesIO + + from diffusers import CycleDiffusionPipeline, DDIMScheduler + + # load the pipeline + # make sure you're logged in with `huggingface-cli login` + model_id_or_path = "CompVis/stable-diffusion-v1-4" + scheduler = DDIMScheduler.from_pretrained(model_id_or_path, subfolder="scheduler") + pipe = CycleDiffusionPipeline.from_pretrained(model_id_or_path, scheduler=scheduler).to("cuda") + + # let's download an initial image + url = "https://raw.githubusercontent.com/ChenWu98/cycle-diffusion/main/data/dalle2/An%20astronaut%20riding%20a%20horse.png" + response = requests.get(url) + init_image = Image.open(BytesIO(response.content)).convert("RGB") + init_image = init_image.resize((512, 512)) + init_image.save("horse.png") + + # let's specify a prompt + source_prompt = "An astronaut riding a horse" + prompt = "An astronaut riding an elephant" + + # call the pipeline + image = pipe( + prompt=prompt, + source_prompt=source_prompt, + image=init_image, + num_inference_steps=100, + eta=0.1, + strength=0.8, + guidance_scale=2, + source_guidance_scale=1, + ).images[0] + + image.save("horse_to_elephant.png") + + # let's try another example + # See more samples at the original repo: https://github.com/ChenWu98/cycle-diffusion + url = ( + "https://raw.githubusercontent.com/ChenWu98/cycle-diffusion/main/data/dalle2/A%20black%20colored%20car.png" + ) + response = requests.get(url) + init_image = Image.open(BytesIO(response.content)).convert("RGB") + init_image = init_image.resize((512, 512)) + init_image.save("black.png") + + source_prompt = "A black colored car" + prompt = "A blue colored car" + + # call the pipeline + torch.manual_seed(0) + image = pipe( + prompt=prompt, + source_prompt=source_prompt, + image=init_image, + num_inference_steps=100, + eta=0.1, + strength=0.85, + guidance_scale=3, + source_guidance_scale=1, + ).images[0] + + image.save("black_to_blue.png") + ``` + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images and the + second element is a list of `bool`s indicating whether the corresponding generated image contains + "not-safe-for-work" (nsfw) content. + """ + # 1. Check inputs + self.check_inputs(prompt, strength, callback_steps) + + # 2. Define call parameters + batch_size = 1 if isinstance(prompt, str) else len(prompt) + device = self._execution_device + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Encode input prompt + text_encoder_lora_scale = ( + cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None + ) + prompt_embeds_tuple = self.encode_prompt( + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + prompt_embeds=prompt_embeds, + lora_scale=text_encoder_lora_scale, + clip_skip=clip_skip, + ) + source_prompt_embeds_tuple = self.encode_prompt( + source_prompt, device, num_images_per_prompt, do_classifier_free_guidance, None, clip_skip=clip_skip + ) + if prompt_embeds_tuple[1] is not None: + prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) + else: + prompt_embeds = prompt_embeds_tuple[0] + if source_prompt_embeds_tuple[1] is not None: + source_prompt_embeds = torch.cat([source_prompt_embeds_tuple[1], source_prompt_embeds_tuple[0]]) + else: + source_prompt_embeds = source_prompt_embeds_tuple[0] + + # 4. Preprocess image + image = self.image_processor.preprocess(image) + + # 5. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device) + latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) + + # 6. Prepare latent variables + latents, clean_latents = self.prepare_latents( + image, latent_timestep, batch_size, num_images_per_prompt, prompt_embeds.dtype, device, generator + ) + source_latents = latents + + # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + generator = extra_step_kwargs.pop("generator", None) + + # 8. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + source_latent_model_input = ( + torch.cat([source_latents] * 2) if do_classifier_free_guidance else source_latents + ) + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + source_latent_model_input = self.scheduler.scale_model_input(source_latent_model_input, t) + + # predict the noise residual + if do_classifier_free_guidance: + concat_latent_model_input = torch.stack( + [ + source_latent_model_input[0], + latent_model_input[0], + source_latent_model_input[1], + latent_model_input[1], + ], + dim=0, + ) + concat_prompt_embeds = torch.stack( + [ + source_prompt_embeds[0], + prompt_embeds[0], + source_prompt_embeds[1], + prompt_embeds[1], + ], + dim=0, + ) + else: + concat_latent_model_input = torch.cat( + [ + source_latent_model_input, + latent_model_input, + ], + dim=0, + ) + concat_prompt_embeds = torch.cat( + [ + source_prompt_embeds, + prompt_embeds, + ], + dim=0, + ) + + concat_noise_pred = self.unet( + concat_latent_model_input, + t, + cross_attention_kwargs=cross_attention_kwargs, + encoder_hidden_states=concat_prompt_embeds, + ).sample + + # perform guidance + if do_classifier_free_guidance: + ( + source_noise_pred_uncond, + noise_pred_uncond, + source_noise_pred_text, + noise_pred_text, + ) = concat_noise_pred.chunk(4, dim=0) + + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + source_noise_pred = source_noise_pred_uncond + source_guidance_scale * ( + source_noise_pred_text - source_noise_pred_uncond + ) + + else: + (source_noise_pred, noise_pred) = concat_noise_pred.chunk(2, dim=0) + + # Sample source_latents from the posterior distribution. + prev_source_latents = posterior_sample( + self.scheduler, source_latents, t, clean_latents, generator=generator, **extra_step_kwargs + ) + # Compute noise. + noise = compute_noise( + self.scheduler, prev_source_latents, source_latents, t, source_noise_pred, **extra_step_kwargs + ) + source_latents = prev_source_latents + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step( + noise_pred, t, latents, variance_noise=noise, **extra_step_kwargs + ).prev_sample + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + # 9. Post-processing + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) + else: + image = latents + has_nsfw_concept = None + + if has_nsfw_concept is None: + do_denormalize = [True] * image.shape[0] + else: + do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] + + image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) + self.maybe_free_model_hooks() + + if not return_dict: + return (image, has_nsfw_concept) + + return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) diff --git a/diffusers3/pipelines/deprecated/stable_diffusion_variants/pipeline_onnx_stable_diffusion_inpaint_legacy.py b/diffusers3/pipelines/deprecated/stable_diffusion_variants/pipeline_onnx_stable_diffusion_inpaint_legacy.py new file mode 100644 index 0000000000000000000000000000000000000000..0aa5e68bfcb4b276cafea63fce6e2a5f3e2f79c2 --- /dev/null +++ b/diffusers3/pipelines/deprecated/stable_diffusion_variants/pipeline_onnx_stable_diffusion_inpaint_legacy.py @@ -0,0 +1,542 @@ +import inspect +from typing import Callable, List, Optional, Union + +import numpy as np +import PIL.Image +import torch +from transformers import CLIPImageProcessor, CLIPTokenizer + +from ....configuration_utils import FrozenDict +from ....schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler +from ....utils import deprecate, logging +from ...onnx_utils import ORT_TO_NP_TYPE, OnnxRuntimeModel +from ...pipeline_utils import DiffusionPipeline +from ...stable_diffusion.pipeline_output import StableDiffusionPipelineOutput + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +def preprocess(image): + w, h = image.size + w, h = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 + image = image.resize((w, h), resample=PIL.Image.LANCZOS) + image = np.array(image).astype(np.float32) / 255.0 + image = image[None].transpose(0, 3, 1, 2) + return 2.0 * image - 1.0 + + +def preprocess_mask(mask, scale_factor=8): + mask = mask.convert("L") + w, h = mask.size + w, h = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 + mask = mask.resize((w // scale_factor, h // scale_factor), resample=PIL.Image.NEAREST) + mask = np.array(mask).astype(np.float32) / 255.0 + mask = np.tile(mask, (4, 1, 1)) + mask = mask[None].transpose(0, 1, 2, 3) # what does this step do? + mask = 1 - mask # repaint white, keep black + return mask + + +class OnnxStableDiffusionInpaintPipelineLegacy(DiffusionPipeline): + r""" + Pipeline for text-guided image inpainting using Stable Diffusion. This is a *legacy feature* for Onnx pipelines to + provide compatibility with StableDiffusionInpaintPipelineLegacy and may be removed in the future. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModel`]): + Frozen text-encoder. Stable Diffusion uses the text portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically + the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. + tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details. + feature_extractor ([`CLIPImageProcessor`]): + Model that extracts features from generated images to be used as inputs for the `safety_checker`. + """ + + _optional_components = ["safety_checker", "feature_extractor"] + _is_onnx = True + + vae_encoder: OnnxRuntimeModel + vae_decoder: OnnxRuntimeModel + text_encoder: OnnxRuntimeModel + tokenizer: CLIPTokenizer + unet: OnnxRuntimeModel + scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] + safety_checker: OnnxRuntimeModel + feature_extractor: CLIPImageProcessor + + def __init__( + self, + vae_encoder: OnnxRuntimeModel, + vae_decoder: OnnxRuntimeModel, + text_encoder: OnnxRuntimeModel, + tokenizer: CLIPTokenizer, + unet: OnnxRuntimeModel, + scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], + safety_checker: OnnxRuntimeModel, + feature_extractor: CLIPImageProcessor, + requires_safety_checker: bool = True, + ): + super().__init__() + + if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: + deprecation_message = ( + f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" + f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " + "to update the config accordingly as leaving `steps_offset` might led to incorrect results" + " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," + " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" + " file" + ) + deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(scheduler.config) + new_config["steps_offset"] = 1 + scheduler._internal_dict = FrozenDict(new_config) + + if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True: + deprecation_message = ( + f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." + " `clip_sample` should be set to False in the configuration file. Please make sure to update the" + " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" + " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" + " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" + ) + deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(scheduler.config) + new_config["clip_sample"] = False + scheduler._internal_dict = FrozenDict(new_config) + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + self.register_modules( + vae_encoder=vae_encoder, + vae_decoder=vae_decoder, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + ) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_onnx_stable_diffusion.OnnxStableDiffusionPipeline._encode_prompt + def _encode_prompt( + self, + prompt: Union[str, List[str]], + num_images_per_prompt: Optional[int], + do_classifier_free_guidance: bool, + negative_prompt: Optional[str], + prompt_embeds: Optional[np.ndarray] = None, + negative_prompt_embeds: Optional[np.ndarray] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`): + prompt to be encoded + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`): + The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored + if `guidance_scale` is less than `1`). + prompt_embeds (`np.ndarray`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`np.ndarray`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + """ + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # get prompt text embeddings + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="np", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="max_length", return_tensors="np").input_ids + + if not np.array_equal(text_input_ids, untruncated_ids): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + prompt_embeds = self.text_encoder(input_ids=text_input_ids.astype(np.int32))[0] + + prompt_embeds = np.repeat(prompt_embeds, num_images_per_prompt, axis=0) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] * batch_size + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="np", + ) + negative_prompt_embeds = self.text_encoder(input_ids=uncond_input.input_ids.astype(np.int32))[0] + + if do_classifier_free_guidance: + negative_prompt_embeds = np.repeat(negative_prompt_embeds, num_images_per_prompt, axis=0) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + prompt_embeds = np.concatenate([negative_prompt_embeds, prompt_embeds]) + + return prompt_embeds + + def check_inputs( + self, + prompt, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + ): + if (callback_steps is None) or ( + callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + def __call__( + self, + prompt: Union[str, List[str]], + image: Union[np.ndarray, PIL.Image.Image] = None, + mask_image: Union[np.ndarray, PIL.Image.Image] = None, + strength: float = 0.8, + num_inference_steps: Optional[int] = 50, + guidance_scale: Optional[float] = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: Optional[float] = 0.0, + generator: Optional[np.random.RandomState] = None, + prompt_embeds: Optional[np.ndarray] = None, + negative_prompt_embeds: Optional[np.ndarray] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, np.ndarray], None]] = None, + callback_steps: int = 1, + ): + r""" + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`): + The prompt or prompts to guide the image generation. + image (`nd.ndarray` or `PIL.Image.Image`): + `Image`, or tensor representing an image batch, that will be used as the starting point for the + process. This is the image whose masked region will be inpainted. + mask_image (`nd.ndarray` or `PIL.Image.Image`): + `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be + replaced by noise and therefore repainted, while black pixels will be preserved. If `mask_image` is a + PIL image, it will be converted to a single channel (luminance) before use. If it's a tensor, it should + contain one color channel (L) instead of 3, so the expected shape would be `(B, H, W, 1)`.uu + strength (`float`, *optional*, defaults to 0.8): + Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image` + will be used as a starting point, adding more noise to it the larger the `strength`. The number of + denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will + be maximum and the denoising process will run for the full number of iterations specified in + `num_inference_steps`. A value of 1, therefore, essentially ignores `image`. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. This parameter will be modulated by `strength`. + guidance_scale (`float`, *optional*, defaults to 7.5): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored + if `guidance_scale` is less than `1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (?) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to + [`schedulers.DDIMScheduler`], will be ignored for others. + generator (`np.random.RandomState`, *optional*): + A np.random.RandomState to make generation deterministic. + prompt_embeds (`np.ndarray`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`np.ndarray`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that will be called every `callback_steps` steps during inference. The function will be + called with the following arguments: `callback(step: int, timestep: int, latents: np.ndarray)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function will be called. If not specified, the callback will be + called at every step. + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. + When returning a tuple, the first element is a list with the generated images, and the second element is a + list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" + (nsfw) content, according to the `safety_checker`. + """ + + # check inputs. Raise error if not correct + self.check_inputs(prompt, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds) + + # define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if strength < 0 or strength > 1: + raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}") + + if generator is None: + generator = np.random + + # set timesteps + self.scheduler.set_timesteps(num_inference_steps) + + if isinstance(image, PIL.Image.Image): + image = preprocess(image) + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + prompt_embeds = self._encode_prompt( + prompt, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + ) + + latents_dtype = prompt_embeds.dtype + image = image.astype(latents_dtype) + + # encode the init image into latents and scale the latents + init_latents = self.vae_encoder(sample=image)[0] + init_latents = 0.18215 * init_latents + + # Expand init_latents for batch_size and num_images_per_prompt + init_latents = np.concatenate([init_latents] * num_images_per_prompt, axis=0) + init_latents_orig = init_latents + + # preprocess mask + if not isinstance(mask_image, np.ndarray): + mask_image = preprocess_mask(mask_image, 8) + mask_image = mask_image.astype(latents_dtype) + mask = np.concatenate([mask_image] * num_images_per_prompt, axis=0) + + # check sizes + if not mask.shape == init_latents.shape: + raise ValueError("The mask and image should be the same size!") + + # get the original timestep using init_timestep + offset = self.scheduler.config.get("steps_offset", 0) + init_timestep = int(num_inference_steps * strength) + offset + init_timestep = min(init_timestep, num_inference_steps) + + timesteps = self.scheduler.timesteps.numpy()[-init_timestep] + timesteps = np.array([timesteps] * batch_size * num_images_per_prompt) + + # add noise to latents using the timesteps + noise = generator.randn(*init_latents.shape).astype(latents_dtype) + init_latents = self.scheduler.add_noise( + torch.from_numpy(init_latents), torch.from_numpy(noise), torch.from_numpy(timesteps) + ) + init_latents = init_latents.numpy() + + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (?) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ? in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + latents = init_latents + + t_start = max(num_inference_steps - init_timestep + offset, 0) + timesteps = self.scheduler.timesteps[t_start:].numpy() + timestep_dtype = next( + (input.type for input in self.unet.model.get_inputs() if input.name == "timestep"), "tensor(float)" + ) + timestep_dtype = ORT_TO_NP_TYPE[timestep_dtype] + + for i, t in enumerate(self.progress_bar(timesteps)): + # expand the latents if we are doing classifier free guidance + latent_model_input = np.concatenate([latents] * 2) if do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + timestep = np.array([t], dtype=timestep_dtype) + noise_pred = self.unet(sample=latent_model_input, timestep=timestep, encoder_hidden_states=prompt_embeds)[ + 0 + ] + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = np.split(noise_pred, 2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step( + torch.from_numpy(noise_pred), t, torch.from_numpy(latents), **extra_step_kwargs + ).prev_sample + + latents = latents.numpy() + + init_latents_proper = self.scheduler.add_noise( + torch.from_numpy(init_latents_orig), torch.from_numpy(noise), torch.from_numpy(np.array([t])) + ) + + init_latents_proper = init_latents_proper.numpy() + + latents = (init_latents_proper * mask) + (latents * (1 - mask)) + + # call the callback, if provided + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + latents = 1 / 0.18215 * latents + # image = self.vae_decoder(latent_sample=latents)[0] + # it seems likes there is a strange result for using half-precision vae decoder if batchsize>1 + image = np.concatenate( + [self.vae_decoder(latent_sample=latents[i : i + 1])[0] for i in range(latents.shape[0])] + ) + + image = np.clip(image / 2 + 0.5, 0, 1) + image = image.transpose((0, 2, 3, 1)) + + if self.safety_checker is not None: + safety_checker_input = self.feature_extractor( + self.numpy_to_pil(image), return_tensors="np" + ).pixel_values.astype(image.dtype) + # There will throw an error if use safety_checker batchsize>1 + images, has_nsfw_concept = [], [] + for i in range(image.shape[0]): + image_i, has_nsfw_concept_i = self.safety_checker( + clip_input=safety_checker_input[i : i + 1], images=image[i : i + 1] + ) + images.append(image_i) + has_nsfw_concept.append(has_nsfw_concept_i[0]) + image = np.concatenate(images) + else: + has_nsfw_concept = None + + if output_type == "pil": + image = self.numpy_to_pil(image) + + if not return_dict: + return (image, has_nsfw_concept) + + return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) diff --git a/diffusers3/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_inpaint_legacy.py b/diffusers3/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_inpaint_legacy.py new file mode 100644 index 0000000000000000000000000000000000000000..ce7ad3b0dfe94500173b411e5cdbf1bad608d6d1 --- /dev/null +++ b/diffusers3/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_inpaint_legacy.py @@ -0,0 +1,784 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Any, Callable, Dict, List, Optional, Union + +import numpy as np +import PIL.Image +import torch +from packaging import version +from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer + +from ....configuration_utils import FrozenDict +from ....image_processor import VaeImageProcessor +from ....loaders import FromSingleFileMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin +from ....models import AutoencoderKL, UNet2DConditionModel +from ....models.lora import adjust_lora_scale_text_encoder +from ....schedulers import KarrasDiffusionSchedulers +from ....utils import PIL_INTERPOLATION, USE_PEFT_BACKEND, deprecate, logging, scale_lora_layers, unscale_lora_layers +from ....utils.torch_utils import randn_tensor +from ...pipeline_utils import DiffusionPipeline +from ...stable_diffusion import StableDiffusionPipelineOutput +from ...stable_diffusion.safety_checker import StableDiffusionSafetyChecker + + +logger = logging.get_logger(__name__) + + +def preprocess_image(image, batch_size): + w, h = image.size + w, h = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8 + image = image.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]) + image = np.array(image).astype(np.float32) / 255.0 + image = np.vstack([image[None].transpose(0, 3, 1, 2)] * batch_size) + image = torch.from_numpy(image) + return 2.0 * image - 1.0 + + +def preprocess_mask(mask, batch_size, scale_factor=8): + if not isinstance(mask, torch.Tensor): + mask = mask.convert("L") + w, h = mask.size + w, h = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8 + mask = mask.resize((w // scale_factor, h // scale_factor), resample=PIL_INTERPOLATION["nearest"]) + mask = np.array(mask).astype(np.float32) / 255.0 + mask = np.tile(mask, (4, 1, 1)) + mask = np.vstack([mask[None]] * batch_size) + mask = 1 - mask # repaint white, keep black + mask = torch.from_numpy(mask) + return mask + + else: + valid_mask_channel_sizes = [1, 3] + # if mask channel is fourth tensor dimension, permute dimensions to pytorch standard (B, C, H, W) + if mask.shape[3] in valid_mask_channel_sizes: + mask = mask.permute(0, 3, 1, 2) + elif mask.shape[1] not in valid_mask_channel_sizes: + raise ValueError( + f"Mask channel dimension of size in {valid_mask_channel_sizes} should be second or fourth dimension," + f" but received mask of shape {tuple(mask.shape)}" + ) + # (potentially) reduce mask channel dimension from 3 to 1 for broadcasting to latent shape + mask = mask.mean(dim=1, keepdim=True) + h, w = mask.shape[-2:] + h, w = (x - x % 8 for x in (h, w)) # resize to integer multiple of 8 + mask = torch.nn.functional.interpolate(mask, (h // scale_factor, w // scale_factor)) + return mask + + +class StableDiffusionInpaintPipelineLegacy( + DiffusionPipeline, TextualInversionLoaderMixin, StableDiffusionLoraLoaderMixin, FromSingleFileMixin +): + r""" + Pipeline for text-guided image inpainting using Stable Diffusion. *This is an experimental feature*. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + In addition the pipeline inherits the following loading methods: + - *Textual-Inversion*: [`loaders.TextualInversionLoaderMixin.load_textual_inversion`] + - *LoRA*: [`loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] + - *Ckpt*: [`loaders.FromSingleFileMixin.from_single_file`] + + as well as the following saving methods: + - *LoRA*: [`loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModel`]): + Frozen text-encoder. Stable Diffusion uses the text portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically + the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. + tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details. + feature_extractor ([`CLIPImageProcessor`]): + Model that extracts features from generated images to be used as inputs for the `safety_checker`. + """ + + model_cpu_offload_seq = "text_encoder->unet->vae" + _optional_components = ["feature_extractor"] + _exclude_from_cpu_offload = ["safety_checker"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: UNet2DConditionModel, + scheduler: KarrasDiffusionSchedulers, + safety_checker: StableDiffusionSafetyChecker, + feature_extractor: CLIPImageProcessor, + requires_safety_checker: bool = True, + ): + super().__init__() + + deprecation_message = ( + f"The class {self.__class__} is deprecated and will be removed in v1.0.0. You can achieve exactly the same functionality" + "by loading your model into `StableDiffusionInpaintPipeline` instead. See https://github.com/huggingface/diffusers/pull/3533" + "for more information." + ) + deprecate("legacy is outdated", "1.0.0", deprecation_message, standard_warn=False) + + if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: + deprecation_message = ( + f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" + f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " + "to update the config accordingly as leaving `steps_offset` might led to incorrect results" + " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," + " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" + " file" + ) + deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(scheduler.config) + new_config["steps_offset"] = 1 + scheduler._internal_dict = FrozenDict(new_config) + + if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True: + deprecation_message = ( + f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." + " `clip_sample` should be set to False in the configuration file. Please make sure to update the" + " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" + " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" + " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" + ) + deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(scheduler.config) + new_config["clip_sample"] = False + scheduler._internal_dict = FrozenDict(new_config) + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( + version.parse(unet.config._diffusers_version).base_version + ) < version.parse("0.9.0.dev0") + is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 + if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: + deprecation_message = ( + "The configuration file of the unet has set the default `sample_size` to smaller than" + " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the" + " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" + " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" + " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" + " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" + " in the config might lead to incorrect results in future versions. If you have downloaded this" + " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" + " the `unet/config.json` file" + ) + deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(unet.config) + new_config["sample_size"] = 64 + unet._internal_dict = FrozenDict(new_config) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt + def _encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + **kwargs, + ): + deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." + deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) + + prompt_embeds_tuple = self.encode_prompt( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=lora_scale, + **kwargs, + ) + + # concatenate for backwards comp + prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) + + return prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if self.text_encoder is not None: + if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return prompt_embeds, negative_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is None: + has_nsfw_concept = None + else: + if torch.is_tensor(image): + feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") + else: + feature_extractor_input = self.image_processor.numpy_to_pil(image) + safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) + image, has_nsfw_concept = self.safety_checker( + images=image, clip_input=safety_checker_input.pixel_values.to(dtype) + ) + return image, has_nsfw_concept + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents + def decode_latents(self, latents): + deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" + deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) + + latents = 1 / self.vae.config.scaling_factor * latents + image = self.vae.decode(latents, return_dict=False)[0] + image = (image / 2 + 0.5).clamp(0, 1) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + return image + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + strength, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + callback_on_step_end_tensor_inputs=None, + ): + if strength < 0 or strength > 1: + raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}") + + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + def get_timesteps(self, num_inference_steps, strength, device): + # get the original timestep using init_timestep + init_timestep = min(int(num_inference_steps * strength), num_inference_steps) + + t_start = max(num_inference_steps - init_timestep, 0) + timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] + + return timesteps, num_inference_steps - t_start + + def prepare_latents(self, image, timestep, num_images_per_prompt, dtype, device, generator): + image = image.to(device=device, dtype=dtype) + init_latent_dist = self.vae.encode(image).latent_dist + init_latents = init_latent_dist.sample(generator=generator) + init_latents = self.vae.config.scaling_factor * init_latents + + # Expand init_latents for batch_size and num_images_per_prompt + init_latents = torch.cat([init_latents] * num_images_per_prompt, dim=0) + init_latents_orig = init_latents + + # add noise to latents using the timesteps + noise = randn_tensor(init_latents.shape, generator=generator, device=device, dtype=dtype) + init_latents = self.scheduler.add_noise(init_latents, noise, timestep) + latents = init_latents + return latents, init_latents_orig, noise + + @torch.no_grad() + def __call__( + self, + prompt: Union[str, List[str]] = None, + image: Union[torch.Tensor, PIL.Image.Image] = None, + mask_image: Union[torch.Tensor, PIL.Image.Image] = None, + strength: float = 0.8, + num_inference_steps: Optional[int] = 50, + guidance_scale: Optional[float] = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + add_predicted_noise: Optional[bool] = False, + eta: Optional[float] = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, + callback_steps: int = 1, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + clip_skip: Optional[int] = None, + ): + r""" + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + image (`torch.Tensor` or `PIL.Image.Image`): + `Image`, or tensor representing an image batch, that will be used as the starting point for the + process. This is the image whose masked region will be inpainted. + mask_image (`torch.Tensor` or `PIL.Image.Image`): + `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be + replaced by noise and therefore repainted, while black pixels will be preserved. If `mask_image` is a + PIL image, it will be converted to a single channel (luminance) before use. If mask is a tensor, the + expected shape should be either `(B, H, W, C)` or `(B, C, H, W)`, where C is 1 or 3. + strength (`float`, *optional*, defaults to 0.8): + Conceptually, indicates how much to inpaint the masked area. Must be between 0 and 1. When `strength` + is 1, the denoising process will be run on the masked area for the full number of iterations specified + in `num_inference_steps`. `image` will be used as a reference for the masked area, adding more noise to + that region the larger the `strength`. If `strength` is 0, no inpainting will occur. + num_inference_steps (`int`, *optional*, defaults to 50): + The reference number of denoising steps. More denoising steps usually lead to a higher quality image at + the expense of slower inference. This parameter will be modulated by `strength`, as explained above. + guidance_scale (`float`, *optional*, defaults to 7.5): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds`. instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` + is less than `1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + add_predicted_noise (`bool`, *optional*, defaults to True): + Use predicted noise instead of random noise when constructing noisy versions of the original image in + the reverse diffusion process + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to + [`schedulers.DDIMScheduler`], will be ignored for others. + generator (`torch.Generator`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that will be called every `callback_steps` steps during inference. The function will be + called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function will be called. If not specified, the callback will be + called at every step. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. + When returning a tuple, the first element is a list with the generated images, and the second element is a + list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" + (nsfw) content, according to the `safety_checker`. + """ + # 1. Check inputs + self.check_inputs(prompt, strength, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds) + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Encode input prompt + text_encoder_lora_scale = ( + cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None + ) + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=text_encoder_lora_scale, + clip_skip=clip_skip, + ) + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + if do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + # 4. Preprocess image and mask + if not isinstance(image, torch.Tensor): + image = preprocess_image(image, batch_size) + + mask_image = preprocess_mask(mask_image, batch_size, self.vae_scale_factor) + + # 5. set timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device) + latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) + + # 6. Prepare latent variables + # encode the init image into latents and scale the latents + latents, init_latents_orig, noise = self.prepare_latents( + image, latent_timestep, num_images_per_prompt, prompt_embeds.dtype, device, generator + ) + + # 7. Prepare mask latent + mask = mask_image.to(device=device, dtype=latents.dtype) + mask = torch.cat([mask] * num_images_per_prompt) + + # 8. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 9. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs=cross_attention_kwargs, + return_dict=False, + )[0] + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] + # masking + if add_predicted_noise: + init_latents_proper = self.scheduler.add_noise( + init_latents_orig, noise_pred_uncond, torch.tensor([t]) + ) + else: + init_latents_proper = self.scheduler.add_noise(init_latents_orig, noise, torch.tensor([t])) + + latents = (init_latents_proper * mask) + (latents * (1 - mask)) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + # use original latents corresponding to unmasked portions of the image + latents = (init_latents_orig * mask) + (latents * (1 - mask)) + + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) + else: + image = latents + has_nsfw_concept = None + + if has_nsfw_concept is None: + do_denormalize = [True] * image.shape[0] + else: + do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] + + image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image, has_nsfw_concept) + + return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) diff --git a/diffusers3/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_model_editing.py b/diffusers3/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_model_editing.py new file mode 100644 index 0000000000000000000000000000000000000000..9e91986896bd4ec46560a8cc4f03845ccade4c61 --- /dev/null +++ b/diffusers3/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_model_editing.py @@ -0,0 +1,830 @@ +# Copyright 2024 TIME Authors and The HuggingFace Team. All rights reserved." +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +import inspect +from typing import Any, Callable, Dict, List, Optional, Union + +import torch +from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer + +from ....image_processor import VaeImageProcessor +from ....loaders import StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin +from ....models import AutoencoderKL, UNet2DConditionModel +from ....models.lora import adjust_lora_scale_text_encoder +from ....schedulers import PNDMScheduler +from ....schedulers.scheduling_utils import SchedulerMixin +from ....utils import USE_PEFT_BACKEND, deprecate, logging, scale_lora_layers, unscale_lora_layers +from ....utils.torch_utils import randn_tensor +from ...pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from ...stable_diffusion.pipeline_output import StableDiffusionPipelineOutput +from ...stable_diffusion.safety_checker import StableDiffusionSafetyChecker + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +AUGS_CONST = ["A photo of ", "An image of ", "A picture of "] + + +class StableDiffusionModelEditingPipeline( + DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, StableDiffusionLoraLoaderMixin +): + r""" + Pipeline for text-to-image model editing. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for saving LoRA weights + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details + about a model's potential harms. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. + with_to_k ([`bool`]): + Whether to edit the key projection matrices along with the value projection matrices. + with_augs ([`list`]): + Textual augmentations to apply while editing the text-to-image model. Set to `[]` for no augmentations. + """ + + model_cpu_offload_seq = "text_encoder->unet->vae" + _optional_components = ["safety_checker", "feature_extractor"] + _exclude_from_cpu_offload = ["safety_checker"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: UNet2DConditionModel, + scheduler: SchedulerMixin, + safety_checker: StableDiffusionSafetyChecker, + feature_extractor: CLIPImageProcessor, + requires_safety_checker: bool = True, + with_to_k: bool = True, + with_augs: list = AUGS_CONST, + ): + super().__init__() + + if isinstance(scheduler, PNDMScheduler): + logger.error("PNDMScheduler for this pipeline is currently not supported.") + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + self.with_to_k = with_to_k + self.with_augs = with_augs + + # get cross-attention layers + ca_layers = [] + + def append_ca(net_): + if net_.__class__.__name__ == "CrossAttention": + ca_layers.append(net_) + elif hasattr(net_, "children"): + for net__ in net_.children(): + append_ca(net__) + + # recursively find all cross-attention layers in unet + for net in self.unet.named_children(): + if "down" in net[0]: + append_ca(net[1]) + elif "up" in net[0]: + append_ca(net[1]) + elif "mid" in net[0]: + append_ca(net[1]) + + # get projection matrices + self.ca_clip_layers = [l for l in ca_layers if l.to_v.in_features == 768] + self.projection_matrices = [l.to_v for l in self.ca_clip_layers] + self.og_matrices = [copy.deepcopy(l.to_v) for l in self.ca_clip_layers] + if self.with_to_k: + self.projection_matrices = self.projection_matrices + [l.to_k for l in self.ca_clip_layers] + self.og_matrices = self.og_matrices + [copy.deepcopy(l.to_k) for l in self.ca_clip_layers] + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt + def _encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + **kwargs, + ): + deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." + deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) + + prompt_embeds_tuple = self.encode_prompt( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=lora_scale, + **kwargs, + ) + + # concatenate for backwards comp + prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) + + return prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if self.text_encoder is not None: + if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return prompt_embeds, negative_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is None: + has_nsfw_concept = None + else: + if torch.is_tensor(image): + feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") + else: + feature_extractor_input = self.image_processor.numpy_to_pil(image) + safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) + image, has_nsfw_concept = self.safety_checker( + images=image, clip_input=safety_checker_input.pixel_values.to(dtype) + ) + return image, has_nsfw_concept + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents + def decode_latents(self, latents): + deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" + deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) + + latents = 1 / self.vae.config.scaling_factor * latents + image = self.vae.decode(latents, return_dict=False)[0] + image = (image / 2 + 0.5).clamp(0, 1) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + return image + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + height, + width, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + callback_on_step_end_tensor_inputs=None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = ( + batch_size, + num_channels_latents, + int(height) // self.vae_scale_factor, + int(width) // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + @torch.no_grad() + def edit_model( + self, + source_prompt: str, + destination_prompt: str, + lamb: float = 0.1, + restart_params: bool = True, + ): + r""" + Apply model editing via closed-form solution (see Eq. 5 in the TIME [paper](https://arxiv.org/abs/2303.08084)). + + Args: + source_prompt (`str`): + The source prompt containing the concept to be edited. + destination_prompt (`str`): + The destination prompt. Must contain all words from `source_prompt` with additional ones to specify the + target edit. + lamb (`float`, *optional*, defaults to 0.1): + The lambda parameter specifying the regularization intesity. Smaller values increase the editing power. + restart_params (`bool`, *optional*, defaults to True): + Restart the model parameters to their pre-trained version before editing. This is done to avoid edit + compounding. When it is `False`, edits accumulate. + """ + + # restart LDM parameters + if restart_params: + num_ca_clip_layers = len(self.ca_clip_layers) + for idx_, l in enumerate(self.ca_clip_layers): + l.to_v = copy.deepcopy(self.og_matrices[idx_]) + self.projection_matrices[idx_] = l.to_v + if self.with_to_k: + l.to_k = copy.deepcopy(self.og_matrices[num_ca_clip_layers + idx_]) + self.projection_matrices[num_ca_clip_layers + idx_] = l.to_k + + # set up sentences + old_texts = [source_prompt] + new_texts = [destination_prompt] + # add augmentations + base = old_texts[0] if old_texts[0][0:1] != "A" else "a" + old_texts[0][1:] + for aug in self.with_augs: + old_texts.append(aug + base) + base = new_texts[0] if new_texts[0][0:1] != "A" else "a" + new_texts[0][1:] + for aug in self.with_augs: + new_texts.append(aug + base) + + # prepare input k* and v* + old_embs, new_embs = [], [] + for old_text, new_text in zip(old_texts, new_texts): + text_input = self.tokenizer( + [old_text, new_text], + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_embeddings = self.text_encoder(text_input.input_ids.to(self.device))[0] + old_emb, new_emb = text_embeddings + old_embs.append(old_emb) + new_embs.append(new_emb) + + # identify corresponding destinations for each token in old_emb + idxs_replaces = [] + for old_text, new_text in zip(old_texts, new_texts): + tokens_a = self.tokenizer(old_text).input_ids + tokens_b = self.tokenizer(new_text).input_ids + tokens_a = [self.tokenizer.encode("a ")[1] if self.tokenizer.decode(t) == "an" else t for t in tokens_a] + tokens_b = [self.tokenizer.encode("a ")[1] if self.tokenizer.decode(t) == "an" else t for t in tokens_b] + num_orig_tokens = len(tokens_a) + idxs_replace = [] + j = 0 + for i in range(num_orig_tokens): + curr_token = tokens_a[i] + while tokens_b[j] != curr_token: + j += 1 + idxs_replace.append(j) + j += 1 + while j < 77: + idxs_replace.append(j) + j += 1 + while len(idxs_replace) < 77: + idxs_replace.append(76) + idxs_replaces.append(idxs_replace) + + # prepare batch: for each pair of setences, old context and new values + contexts, valuess = [], [] + for old_emb, new_emb, idxs_replace in zip(old_embs, new_embs, idxs_replaces): + context = old_emb.detach() + values = [] + with torch.no_grad(): + for layer in self.projection_matrices: + values.append(layer(new_emb[idxs_replace]).detach()) + contexts.append(context) + valuess.append(values) + + # edit the model + for layer_num in range(len(self.projection_matrices)): + # mat1 = \lambda W + \sum{v k^T} + mat1 = lamb * self.projection_matrices[layer_num].weight + + # mat2 = \lambda I + \sum{k k^T} + mat2 = lamb * torch.eye( + self.projection_matrices[layer_num].weight.shape[1], + device=self.projection_matrices[layer_num].weight.device, + ) + + # aggregate sums for mat1, mat2 + for context, values in zip(contexts, valuess): + context_vector = context.reshape(context.shape[0], context.shape[1], 1) + context_vector_T = context.reshape(context.shape[0], 1, context.shape[1]) + value_vector = values[layer_num].reshape(values[layer_num].shape[0], values[layer_num].shape[1], 1) + for_mat1 = (value_vector @ context_vector_T).sum(dim=0) + for_mat2 = (context_vector @ context_vector_T).sum(dim=0) + mat1 += for_mat1 + mat2 += for_mat2 + + # update projection matrix + self.projection_matrices[layer_num].weight = torch.nn.Parameter(mat1 @ torch.inverse(mat2)) + + @torch.no_grad() + def __call__( + self, + prompt: Union[str, List[str]] = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + guidance_scale: float = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, + callback_steps: int = 1, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + clip_skip: Optional[int] = None, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + + Examples: + + ```py + >>> import torch + >>> from diffusers import StableDiffusionModelEditingPipeline + + >>> model_ckpt = "CompVis/stable-diffusion-v1-4" + >>> pipe = StableDiffusionModelEditingPipeline.from_pretrained(model_ckpt) + + >>> pipe = pipe.to("cuda") + + >>> source_prompt = "A pack of roses" + >>> destination_prompt = "A pack of blue roses" + >>> pipe.edit_model(source_prompt, destination_prompt) + + >>> prompt = "A field of roses" + >>> image = pipe(prompt).images[0] + ``` + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images and the + second element is a list of `bool`s indicating whether the corresponding generated image contains + "not-safe-for-work" (nsfw) content. + """ + # 0. Default height and width to unet + height = height or self.unet.config.sample_size * self.vae_scale_factor + width = width or self.unet.config.sample_size * self.vae_scale_factor + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds + ) + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Encode input prompt + text_encoder_lora_scale = ( + cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None + ) + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=text_encoder_lora_scale, + clip_skip=clip_skip, + ) + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + if do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + # 4. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # 5. Prepare latent variables + num_channels_latents = self.unet.config.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs=cross_attention_kwargs, + ).sample + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) + else: + image = latents + has_nsfw_concept = None + + if has_nsfw_concept is None: + do_denormalize = [True] * image.shape[0] + else: + do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] + + image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image, has_nsfw_concept) + + return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) diff --git a/diffusers3/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_paradigms.py b/diffusers3/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_paradigms.py new file mode 100644 index 0000000000000000000000000000000000000000..be21900ab55a65c33350862e8f3634b9f4213ca8 --- /dev/null +++ b/diffusers3/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_paradigms.py @@ -0,0 +1,796 @@ +# Copyright 2024 ParaDiGMS authors and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Any, Callable, Dict, List, Optional, Union + +import torch +from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer + +from ....image_processor import VaeImageProcessor +from ....loaders import FromSingleFileMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin +from ....models import AutoencoderKL, UNet2DConditionModel +from ....models.lora import adjust_lora_scale_text_encoder +from ....schedulers import KarrasDiffusionSchedulers +from ....utils import ( + USE_PEFT_BACKEND, + deprecate, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ....utils.torch_utils import randn_tensor +from ...pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from ...stable_diffusion.pipeline_output import StableDiffusionPipelineOutput +from ...stable_diffusion.safety_checker import StableDiffusionSafetyChecker + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import DDPMParallelScheduler + >>> from diffusers import StableDiffusionParadigmsPipeline + + >>> scheduler = DDPMParallelScheduler.from_pretrained("runwayml/stable-diffusion-v1-5", subfolder="scheduler") + + >>> pipe = StableDiffusionParadigmsPipeline.from_pretrained( + ... "runwayml/stable-diffusion-v1-5", scheduler=scheduler, torch_dtype=torch.float16 + ... ) + >>> pipe = pipe.to("cuda") + + >>> ngpu, batch_per_device = torch.cuda.device_count(), 5 + >>> pipe.wrapped_unet = torch.nn.DataParallel(pipe.unet, device_ids=[d for d in range(ngpu)]) + + >>> prompt = "a photo of an astronaut riding a horse on mars" + >>> image = pipe(prompt, parallel=ngpu * batch_per_device, num_inference_steps=1000).images[0] + ``` +""" + + +class StableDiffusionParadigmsPipeline( + DiffusionPipeline, + StableDiffusionMixin, + TextualInversionLoaderMixin, + StableDiffusionLoraLoaderMixin, + FromSingleFileMixin, +): + r""" + Pipeline for text-to-image generation using a parallelized version of Stable Diffusion. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for saving LoRA weights + - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details + about a model's potential harms. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. + """ + + model_cpu_offload_seq = "text_encoder->unet->vae" + _optional_components = ["safety_checker", "feature_extractor"] + _exclude_from_cpu_offload = ["safety_checker"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: UNet2DConditionModel, + scheduler: KarrasDiffusionSchedulers, + safety_checker: StableDiffusionSafetyChecker, + feature_extractor: CLIPImageProcessor, + requires_safety_checker: bool = True, + ): + super().__init__() + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + # attribute to wrap the unet with torch.nn.DataParallel when running multiple denoising steps on multiple GPUs + self.wrapped_unet = self.unet + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt + def _encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + **kwargs, + ): + deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." + deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) + + prompt_embeds_tuple = self.encode_prompt( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=lora_scale, + **kwargs, + ) + + # concatenate for backwards comp + prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) + + return prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if self.text_encoder is not None: + if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return prompt_embeds, negative_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is None: + has_nsfw_concept = None + else: + if torch.is_tensor(image): + feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") + else: + feature_extractor_input = self.image_processor.numpy_to_pil(image) + safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) + image, has_nsfw_concept = self.safety_checker( + images=image, clip_input=safety_checker_input.pixel_values.to(dtype) + ) + return image, has_nsfw_concept + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + height, + width, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + callback_on_step_end_tensor_inputs=None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = ( + batch_size, + num_channels_latents, + int(height) // self.vae_scale_factor, + int(width) // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + def _cumsum(self, input, dim, debug=False): + if debug: + # cumsum_cuda_kernel does not have a deterministic implementation + # so perform cumsum on cpu for debugging purposes + return torch.cumsum(input.cpu().float(), dim=dim).to(input.device) + else: + return torch.cumsum(input, dim=dim) + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + parallel: int = 10, + tolerance: float = 0.1, + guidance_scale: float = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, + callback_steps: int = 1, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + debug: bool = False, + clip_skip: int = None, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + parallel (`int`, *optional*, defaults to 10): + The batch size to use when doing parallel sampling. More parallelism may lead to faster inference but + requires higher memory usage and can also require more total FLOPs. + tolerance (`float`, *optional*, defaults to 0.1): + The error tolerance for determining when to slide the batch window forward for parallel sampling. Lower + tolerance usually leads to less or no degradation. Higher tolerance is faster but can risk degradation + of sample quality. The tolerance is specified as a ratio of the scheduler's noise magnitude. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + debug (`bool`, *optional*, defaults to `False`): + Whether or not to run in debug mode. In debug mode, `torch.cumsum` is evaluated using the CPU. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + Examples: + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images and the + second element is a list of `bool`s indicating whether the corresponding generated image contains + "not-safe-for-work" (nsfw) content. + """ + # 0. Default height and width to unet + height = height or self.unet.config.sample_size * self.vae_scale_factor + width = width or self.unet.config.sample_size * self.vae_scale_factor + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds + ) + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Encode input prompt + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + clip_skip=clip_skip, + ) + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + if do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + # 4. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + + # 5. Prepare latent variables + num_channels_latents = self.unet.config.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + extra_step_kwargs.pop("generator", None) + + # # 7. Denoising loop + scheduler = self.scheduler + parallel = min(parallel, len(scheduler.timesteps)) + + begin_idx = 0 + end_idx = parallel + latents_time_evolution_buffer = torch.stack([latents] * (len(scheduler.timesteps) + 1)) + + # We must make sure the noise of stochastic schedulers such as DDPM is sampled only once per timestep. + # Sampling inside the parallel denoising loop will mess this up, so we pre-sample the noise vectors outside the denoising loop. + noise_array = torch.zeros_like(latents_time_evolution_buffer) + for j in range(len(scheduler.timesteps)): + base_noise = randn_tensor( + shape=latents.shape, generator=generator, device=latents.device, dtype=prompt_embeds.dtype + ) + noise = (self.scheduler._get_variance(scheduler.timesteps[j]) ** 0.5) * base_noise + noise_array[j] = noise.clone() + + # We specify the error tolerance as a ratio of the scheduler's noise magnitude. We similarly compute the error tolerance + # outside of the denoising loop to avoid recomputing it at every step. + # We will be dividing the norm of the noise, so we store its inverse here to avoid a division at every step. + inverse_variance_norm = 1.0 / torch.tensor( + [scheduler._get_variance(scheduler.timesteps[j]) for j in range(len(scheduler.timesteps))] + [0] + ).to(noise_array.device) + latent_dim = noise_array[0, 0].numel() + inverse_variance_norm = inverse_variance_norm[:, None] / latent_dim + + scaled_tolerance = tolerance**2 + + with self.progress_bar(total=num_inference_steps) as progress_bar: + steps = 0 + while begin_idx < len(scheduler.timesteps): + # these have shape (parallel_dim, 2*batch_size, ...) + # parallel_len is at most parallel, but could be less if we are at the end of the timesteps + # we are processing batch window of timesteps spanning [begin_idx, end_idx) + parallel_len = end_idx - begin_idx + + block_prompt_embeds = torch.stack([prompt_embeds] * parallel_len) + block_latents = latents_time_evolution_buffer[begin_idx:end_idx] + block_t = scheduler.timesteps[begin_idx:end_idx, None].repeat(1, batch_size * num_images_per_prompt) + t_vec = block_t + if do_classifier_free_guidance: + t_vec = t_vec.repeat(1, 2) + + # expand the latents if we are doing classifier free guidance + latent_model_input = ( + torch.cat([block_latents] * 2, dim=1) if do_classifier_free_guidance else block_latents + ) + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t_vec) + + # if parallel_len is small, no need to use multiple GPUs + net = self.wrapped_unet if parallel_len > 3 else self.unet + # predict the noise residual, shape is now [parallel_len * 2 * batch_size * num_images_per_prompt, ...] + model_output = net( + latent_model_input.flatten(0, 1), + t_vec.flatten(0, 1), + encoder_hidden_states=block_prompt_embeds.flatten(0, 1), + cross_attention_kwargs=cross_attention_kwargs, + return_dict=False, + )[0] + + per_latent_shape = model_output.shape[1:] + if do_classifier_free_guidance: + model_output = model_output.reshape( + parallel_len, 2, batch_size * num_images_per_prompt, *per_latent_shape + ) + noise_pred_uncond, noise_pred_text = model_output[:, 0], model_output[:, 1] + model_output = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + model_output = model_output.reshape( + parallel_len * batch_size * num_images_per_prompt, *per_latent_shape + ) + + block_latents_denoise = scheduler.batch_step_no_noise( + model_output=model_output, + timesteps=block_t.flatten(0, 1), + sample=block_latents.flatten(0, 1), + **extra_step_kwargs, + ).reshape(block_latents.shape) + + # back to shape (parallel_dim, batch_size, ...) + # now we want to add the pre-sampled noise + # parallel sampling algorithm requires computing the cumulative drift from the beginning + # of the window, so we need to compute cumulative sum of the deltas and the pre-sampled noises. + delta = block_latents_denoise - block_latents + cumulative_delta = self._cumsum(delta, dim=0, debug=debug) + cumulative_noise = self._cumsum(noise_array[begin_idx:end_idx], dim=0, debug=debug) + + # if we are using an ODE-like scheduler (like DDIM), we don't want to add noise + if scheduler._is_ode_scheduler: + cumulative_noise = 0 + + block_latents_new = ( + latents_time_evolution_buffer[begin_idx][None,] + cumulative_delta + cumulative_noise + ) + cur_error = torch.linalg.norm( + (block_latents_new - latents_time_evolution_buffer[begin_idx + 1 : end_idx + 1]).reshape( + parallel_len, batch_size * num_images_per_prompt, -1 + ), + dim=-1, + ).pow(2) + error_ratio = cur_error * inverse_variance_norm[begin_idx + 1 : end_idx + 1] + + # find the first index of the vector error_ratio that is greater than error tolerance + # we can shift the window for the next iteration up to this index + error_ratio = torch.nn.functional.pad( + error_ratio, (0, 0, 0, 1), value=1e9 + ) # handle the case when everything is below ratio, by padding the end of parallel_len dimension + any_error_at_time = torch.max(error_ratio > scaled_tolerance, dim=1).values.int() + ind = torch.argmax(any_error_at_time).item() + + # compute the new begin and end idxs for the window + new_begin_idx = begin_idx + min(1 + ind, parallel) + new_end_idx = min(new_begin_idx + parallel, len(scheduler.timesteps)) + + # store the computed latents for the current window in the global buffer + latents_time_evolution_buffer[begin_idx + 1 : end_idx + 1] = block_latents_new + # initialize the new sliding window latents with the end of the current window, + # should be better than random initialization + latents_time_evolution_buffer[end_idx : new_end_idx + 1] = latents_time_evolution_buffer[end_idx][ + None, + ] + + steps += 1 + + progress_bar.update(new_begin_idx - begin_idx) + if callback is not None and steps % callback_steps == 0: + callback(begin_idx, block_t[begin_idx], latents_time_evolution_buffer[begin_idx]) + + begin_idx = new_begin_idx + end_idx = new_end_idx + + latents = latents_time_evolution_buffer[-1] + + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) + else: + image = latents + has_nsfw_concept = None + + if has_nsfw_concept is None: + do_denormalize = [True] * image.shape[0] + else: + do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] + + image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image, has_nsfw_concept) + + return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) diff --git a/diffusers3/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_pix2pix_zero.py b/diffusers3/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_pix2pix_zero.py new file mode 100644 index 0000000000000000000000000000000000000000..2978972200c7ae9dc2d967a698753bd564e9a913 --- /dev/null +++ b/diffusers3/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_pix2pix_zero.py @@ -0,0 +1,1310 @@ +# Copyright 2024 Pix2Pix Zero Authors and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from dataclasses import dataclass +from typing import Any, Callable, Dict, List, Optional, Union + +import numpy as np +import PIL.Image +import torch +import torch.nn.functional as F +from transformers import ( + BlipForConditionalGeneration, + BlipProcessor, + CLIPImageProcessor, + CLIPTextModel, + CLIPTokenizer, +) + +from ....image_processor import PipelineImageInput, VaeImageProcessor +from ....loaders import StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin +from ....models import AutoencoderKL, UNet2DConditionModel +from ....models.attention_processor import Attention +from ....models.lora import adjust_lora_scale_text_encoder +from ....schedulers import DDIMScheduler, DDPMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler +from ....schedulers.scheduling_ddim_inverse import DDIMInverseScheduler +from ....utils import ( + PIL_INTERPOLATION, + USE_PEFT_BACKEND, + BaseOutput, + deprecate, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ....utils.torch_utils import randn_tensor +from ...pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from ...stable_diffusion.pipeline_output import StableDiffusionPipelineOutput +from ...stable_diffusion.safety_checker import StableDiffusionSafetyChecker + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +@dataclass +class Pix2PixInversionPipelineOutput(BaseOutput, TextualInversionLoaderMixin): + """ + Output class for Stable Diffusion pipelines. + + Args: + latents (`torch.Tensor`) + inverted latents tensor + images (`List[PIL.Image.Image]` or `np.ndarray`) + List of denoised PIL images of length `batch_size` or numpy array of shape `(batch_size, height, width, + num_channels)`. PIL images or numpy array present the denoised images of the diffusion pipeline. + """ + + latents: torch.Tensor + images: Union[List[PIL.Image.Image], np.ndarray] + + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import requests + >>> import torch + + >>> from diffusers import DDIMScheduler, StableDiffusionPix2PixZeroPipeline + + + >>> def download(embedding_url, local_filepath): + ... r = requests.get(embedding_url) + ... with open(local_filepath, "wb") as f: + ... f.write(r.content) + + + >>> model_ckpt = "CompVis/stable-diffusion-v1-4" + >>> pipeline = StableDiffusionPix2PixZeroPipeline.from_pretrained(model_ckpt, torch_dtype=torch.float16) + >>> pipeline.scheduler = DDIMScheduler.from_config(pipeline.scheduler.config) + >>> pipeline.to("cuda") + + >>> prompt = "a high resolution painting of a cat in the style of van gough" + >>> source_emb_url = "https://hf.co/datasets/sayakpaul/sample-datasets/resolve/main/cat.pt" + >>> target_emb_url = "https://hf.co/datasets/sayakpaul/sample-datasets/resolve/main/dog.pt" + + >>> for url in [source_emb_url, target_emb_url]: + ... download(url, url.split("/")[-1]) + + >>> src_embeds = torch.load(source_emb_url.split("/")[-1]) + >>> target_embeds = torch.load(target_emb_url.split("/")[-1]) + >>> images = pipeline( + ... prompt, + ... source_embeds=src_embeds, + ... target_embeds=target_embeds, + ... num_inference_steps=50, + ... cross_attention_guidance_amount=0.15, + ... ).images + + >>> images[0].save("edited_image_dog.png") + ``` +""" + +EXAMPLE_INVERT_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from transformers import BlipForConditionalGeneration, BlipProcessor + >>> from diffusers import DDIMScheduler, DDIMInverseScheduler, StableDiffusionPix2PixZeroPipeline + + >>> import requests + >>> from PIL import Image + + >>> captioner_id = "Salesforce/blip-image-captioning-base" + >>> processor = BlipProcessor.from_pretrained(captioner_id) + >>> model = BlipForConditionalGeneration.from_pretrained( + ... captioner_id, torch_dtype=torch.float16, low_cpu_mem_usage=True + ... ) + + >>> sd_model_ckpt = "CompVis/stable-diffusion-v1-4" + >>> pipeline = StableDiffusionPix2PixZeroPipeline.from_pretrained( + ... sd_model_ckpt, + ... caption_generator=model, + ... caption_processor=processor, + ... torch_dtype=torch.float16, + ... safety_checker=None, + ... ) + + >>> pipeline.scheduler = DDIMScheduler.from_config(pipeline.scheduler.config) + >>> pipeline.inverse_scheduler = DDIMInverseScheduler.from_config(pipeline.scheduler.config) + >>> pipeline.enable_model_cpu_offload() + + >>> img_url = "https://github.com/pix2pixzero/pix2pix-zero/raw/main/assets/test_images/cats/cat_6.png" + + >>> raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB").resize((512, 512)) + >>> # generate caption + >>> caption = pipeline.generate_caption(raw_image) + + >>> # "a photography of a cat with flowers and dai dai daie - daie - daie kasaii" + >>> inv_latents = pipeline.invert(caption, image=raw_image).latents + >>> # we need to generate source and target embeds + + >>> source_prompts = ["a cat sitting on the street", "a cat playing in the field", "a face of a cat"] + + >>> target_prompts = ["a dog sitting on the street", "a dog playing in the field", "a face of a dog"] + + >>> source_embeds = pipeline.get_embeds(source_prompts) + >>> target_embeds = pipeline.get_embeds(target_prompts) + >>> # the latents can then be used to edit a real image + >>> # when using Stable Diffusion 2 or other models that use v-prediction + >>> # set `cross_attention_guidance_amount` to 0.01 or less to avoid input latent gradient explosion + + >>> image = pipeline( + ... caption, + ... source_embeds=source_embeds, + ... target_embeds=target_embeds, + ... num_inference_steps=50, + ... cross_attention_guidance_amount=0.15, + ... generator=generator, + ... latents=inv_latents, + ... negative_prompt=caption, + ... ).images[0] + >>> image.save("edited_image.png") + ``` +""" + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.preprocess +def preprocess(image): + deprecation_message = "The preprocess method is deprecated and will be removed in diffusers 1.0.0. Please use VaeImageProcessor.preprocess(...) instead" + deprecate("preprocess", "1.0.0", deprecation_message, standard_warn=False) + if isinstance(image, torch.Tensor): + return image + elif isinstance(image, PIL.Image.Image): + image = [image] + + if isinstance(image[0], PIL.Image.Image): + w, h = image[0].size + w, h = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8 + + image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image] + image = np.concatenate(image, axis=0) + image = np.array(image).astype(np.float32) / 255.0 + image = image.transpose(0, 3, 1, 2) + image = 2.0 * image - 1.0 + image = torch.from_numpy(image) + elif isinstance(image[0], torch.Tensor): + image = torch.cat(image, dim=0) + return image + + +def prepare_unet(unet: UNet2DConditionModel): + """Modifies the UNet (`unet`) to perform Pix2Pix Zero optimizations.""" + pix2pix_zero_attn_procs = {} + for name in unet.attn_processors.keys(): + module_name = name.replace(".processor", "") + module = unet.get_submodule(module_name) + if "attn2" in name: + pix2pix_zero_attn_procs[name] = Pix2PixZeroAttnProcessor(is_pix2pix_zero=True) + module.requires_grad_(True) + else: + pix2pix_zero_attn_procs[name] = Pix2PixZeroAttnProcessor(is_pix2pix_zero=False) + module.requires_grad_(False) + + unet.set_attn_processor(pix2pix_zero_attn_procs) + return unet + + +class Pix2PixZeroL2Loss: + def __init__(self): + self.loss = 0.0 + + def compute_loss(self, predictions, targets): + self.loss += ((predictions - targets) ** 2).sum((1, 2)).mean(0) + + +class Pix2PixZeroAttnProcessor: + """An attention processor class to store the attention weights. + In Pix2Pix Zero, it happens during computations in the cross-attention blocks.""" + + def __init__(self, is_pix2pix_zero=False): + self.is_pix2pix_zero = is_pix2pix_zero + if self.is_pix2pix_zero: + self.reference_cross_attn_map = {} + + def __call__( + self, + attn: Attention, + hidden_states, + encoder_hidden_states=None, + attention_mask=None, + timestep=None, + loss=None, + ): + batch_size, sequence_length, _ = hidden_states.shape + attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) + query = attn.to_q(hidden_states) + + if encoder_hidden_states is None: + encoder_hidden_states = hidden_states + elif attn.norm_cross: + encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) + + key = attn.to_k(encoder_hidden_states) + value = attn.to_v(encoder_hidden_states) + + query = attn.head_to_batch_dim(query) + key = attn.head_to_batch_dim(key) + value = attn.head_to_batch_dim(value) + + attention_probs = attn.get_attention_scores(query, key, attention_mask) + if self.is_pix2pix_zero and timestep is not None: + # new bookkeeping to save the attention weights. + if loss is None: + self.reference_cross_attn_map[timestep.item()] = attention_probs.detach().cpu() + # compute loss + elif loss is not None: + prev_attn_probs = self.reference_cross_attn_map.pop(timestep.item()) + loss.compute_loss(attention_probs, prev_attn_probs.to(attention_probs.device)) + + hidden_states = torch.bmm(attention_probs, value) + hidden_states = attn.batch_to_head_dim(hidden_states) + + # linear proj + hidden_states = attn.to_out[0](hidden_states) + # dropout + hidden_states = attn.to_out[1](hidden_states) + + return hidden_states + + +class StableDiffusionPix2PixZeroPipeline(DiffusionPipeline, StableDiffusionMixin): + r""" + Pipeline for pixel-level image editing using Pix2Pix Zero. Based on Stable Diffusion. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModel`]): + Frozen text-encoder. Stable Diffusion uses the text portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically + the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. + tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], [`EulerAncestralDiscreteScheduler`], or [`DDPMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details. + feature_extractor ([`CLIPImageProcessor`]): + Model that extracts features from generated images to be used as inputs for the `safety_checker`. + requires_safety_checker (bool): + Whether the pipeline requires a safety checker. We recommend setting it to True if you're using the + pipeline publicly. + """ + + model_cpu_offload_seq = "text_encoder->unet->vae" + _optional_components = [ + "safety_checker", + "feature_extractor", + "caption_generator", + "caption_processor", + "inverse_scheduler", + ] + _exclude_from_cpu_offload = ["safety_checker"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: UNet2DConditionModel, + scheduler: Union[DDPMScheduler, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler], + feature_extractor: CLIPImageProcessor, + safety_checker: StableDiffusionSafetyChecker, + inverse_scheduler: DDIMInverseScheduler, + caption_generator: BlipForConditionalGeneration, + caption_processor: BlipProcessor, + requires_safety_checker: bool = True, + ): + super().__init__() + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + caption_processor=caption_processor, + caption_generator=caption_generator, + inverse_scheduler=inverse_scheduler, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt + def _encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + **kwargs, + ): + deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." + deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) + + prompt_embeds_tuple = self.encode_prompt( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=lora_scale, + **kwargs, + ) + + # concatenate for backwards comp + prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) + + return prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if self.text_encoder is not None: + if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return prompt_embeds, negative_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is None: + has_nsfw_concept = None + else: + if torch.is_tensor(image): + feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") + else: + feature_extractor_input = self.image_processor.numpy_to_pil(image) + safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) + image, has_nsfw_concept = self.safety_checker( + images=image, clip_input=safety_checker_input.pixel_values.to(dtype) + ) + return image, has_nsfw_concept + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents + def decode_latents(self, latents): + deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" + deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) + + latents = 1 / self.vae.config.scaling_factor * latents + image = self.vae.decode(latents, return_dict=False)[0] + image = (image / 2 + 0.5).clamp(0, 1) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + return image + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + source_embeds, + target_embeds, + callback_steps, + prompt_embeds=None, + ): + if (callback_steps is None) or ( + callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + if source_embeds is None and target_embeds is None: + raise ValueError("`source_embeds` and `target_embeds` cannot be undefined.") + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = ( + batch_size, + num_channels_latents, + int(height) // self.vae_scale_factor, + int(width) // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + @torch.no_grad() + def generate_caption(self, images): + """Generates caption for a given image.""" + text = "a photography of" + + prev_device = self.caption_generator.device + + device = self._execution_device + inputs = self.caption_processor(images, text, return_tensors="pt").to( + device=device, dtype=self.caption_generator.dtype + ) + self.caption_generator.to(device) + outputs = self.caption_generator.generate(**inputs, max_new_tokens=128) + + # offload caption generator + self.caption_generator.to(prev_device) + + caption = self.caption_processor.batch_decode(outputs, skip_special_tokens=True)[0] + return caption + + def construct_direction(self, embs_source: torch.Tensor, embs_target: torch.Tensor): + """Constructs the edit direction to steer the image generation process semantically.""" + return (embs_target.mean(0) - embs_source.mean(0)).unsqueeze(0) + + @torch.no_grad() + def get_embeds(self, prompt: List[str], batch_size: int = 16) -> torch.Tensor: + num_prompts = len(prompt) + embeds = [] + for i in range(0, num_prompts, batch_size): + prompt_slice = prompt[i : i + batch_size] + + input_ids = self.tokenizer( + prompt_slice, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ).input_ids + + input_ids = input_ids.to(self.text_encoder.device) + embeds.append(self.text_encoder(input_ids)[0]) + + return torch.cat(embeds, dim=0).mean(0)[None] + + def prepare_image_latents(self, image, batch_size, dtype, device, generator=None): + if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): + raise ValueError( + f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" + ) + + image = image.to(device=device, dtype=dtype) + + if image.shape[1] == 4: + latents = image + + else: + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if isinstance(generator, list): + latents = [ + self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(batch_size) + ] + latents = torch.cat(latents, dim=0) + else: + latents = self.vae.encode(image).latent_dist.sample(generator) + + latents = self.vae.config.scaling_factor * latents + + if batch_size != latents.shape[0]: + if batch_size % latents.shape[0] == 0: + # expand image_latents for batch_size + deprecation_message = ( + f"You have passed {batch_size} text prompts (`prompt`), but only {latents.shape[0]} initial" + " images (`image`). Initial images are now duplicating to match the number of text prompts. Note" + " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update" + " your script to pass as many initial images as text prompts to suppress this warning." + ) + deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False) + additional_latents_per_image = batch_size // latents.shape[0] + latents = torch.cat([latents] * additional_latents_per_image, dim=0) + else: + raise ValueError( + f"Cannot duplicate `image` of batch size {latents.shape[0]} to {batch_size} text prompts." + ) + else: + latents = torch.cat([latents], dim=0) + + return latents + + def get_epsilon(self, model_output: torch.Tensor, sample: torch.Tensor, timestep: int): + pred_type = self.inverse_scheduler.config.prediction_type + alpha_prod_t = self.inverse_scheduler.alphas_cumprod[timestep] + + beta_prod_t = 1 - alpha_prod_t + + if pred_type == "epsilon": + return model_output + elif pred_type == "sample": + return (sample - alpha_prod_t ** (0.5) * model_output) / beta_prod_t ** (0.5) + elif pred_type == "v_prediction": + return (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample + else: + raise ValueError( + f"prediction_type given as {pred_type} must be one of `epsilon`, `sample`, or `v_prediction`" + ) + + def auto_corr_loss(self, hidden_states, generator=None): + reg_loss = 0.0 + for i in range(hidden_states.shape[0]): + for j in range(hidden_states.shape[1]): + noise = hidden_states[i : i + 1, j : j + 1, :, :] + while True: + roll_amount = torch.randint(noise.shape[2] // 2, (1,), generator=generator).item() + reg_loss += (noise * torch.roll(noise, shifts=roll_amount, dims=2)).mean() ** 2 + reg_loss += (noise * torch.roll(noise, shifts=roll_amount, dims=3)).mean() ** 2 + + if noise.shape[2] <= 8: + break + noise = F.avg_pool2d(noise, kernel_size=2) + return reg_loss + + def kl_divergence(self, hidden_states): + mean = hidden_states.mean() + var = hidden_states.var() + return var + mean**2 - 1 - torch.log(var + 1e-7) + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Optional[Union[str, List[str]]] = None, + source_embeds: torch.Tensor = None, + target_embeds: torch.Tensor = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + guidance_scale: float = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + cross_attention_guidance_amount: float = 0.1, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, + callback_steps: Optional[int] = 1, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + clip_skip: Optional[int] = None, + ): + r""" + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + source_embeds (`torch.Tensor`): + Source concept embeddings. Generation of the embeddings as per the [original + paper](https://arxiv.org/abs/2302.03027). Used in discovering the edit direction. + target_embeds (`torch.Tensor`): + Target concept embeddings. Generation of the embeddings as per the [original + paper](https://arxiv.org/abs/2302.03027). Used in discovering the edit direction. + height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to + [`schedulers.DDIMScheduler`], will be ignored for others. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + cross_attention_guidance_amount (`float`, defaults to 0.1): + Amount of guidance needed from the reference cross-attention maps. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that will be called every `callback_steps` steps during inference. The function will be + called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function will be called. If not specified, the callback will be + called at every step. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + Examples: + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. + When returning a tuple, the first element is a list with the generated images, and the second element is a + list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" + (nsfw) content, according to the `safety_checker`. + """ + # 0. Define the spatial resolutions. + height = height or self.unet.config.sample_size * self.vae_scale_factor + width = width or self.unet.config.sample_size * self.vae_scale_factor + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + source_embeds, + target_embeds, + callback_steps, + prompt_embeds, + ) + + # 3. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + if cross_attention_kwargs is None: + cross_attention_kwargs = {} + + device = self._execution_device + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Encode input prompt + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + clip_skip=clip_skip, + ) + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + if do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + # 4. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # 5. Generate the inverted noise from the input image or any other image + # generated from the input prompt. + num_channels_latents = self.unet.config.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + latents_init = latents.clone() + + # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 8. Rejig the UNet so that we can obtain the cross-attenion maps and + # use them for guiding the subsequent image generation. + self.unet = prepare_unet(self.unet) + + # 7. Denoising loop where we obtain the cross-attention maps. + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs={"timestep": t}, + ).sample + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + # 8. Compute the edit directions. + edit_direction = self.construct_direction(source_embeds, target_embeds).to(prompt_embeds.device) + + # 9. Edit the prompt embeddings as per the edit directions discovered. + prompt_embeds_edit = prompt_embeds.clone() + prompt_embeds_edit[1:2] += edit_direction + + # 10. Second denoising loop to generate the edited image. + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + latents = latents_init + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # we want to learn the latent such that it steers the generation + # process towards the edited direction, so make the make initial + # noise learnable + x_in = latent_model_input.detach().clone() + x_in.requires_grad = True + + # optimizer + opt = torch.optim.SGD([x_in], lr=cross_attention_guidance_amount) + + with torch.enable_grad(): + # initialize loss + loss = Pix2PixZeroL2Loss() + + # predict the noise residual + noise_pred = self.unet( + x_in, + t, + encoder_hidden_states=prompt_embeds_edit.detach(), + cross_attention_kwargs={"timestep": t, "loss": loss}, + ).sample + + loss.loss.backward(retain_graph=False) + opt.step() + + # recompute the noise + noise_pred = self.unet( + x_in.detach(), + t, + encoder_hidden_states=prompt_embeds_edit, + cross_attention_kwargs={"timestep": None}, + ).sample + + latents = x_in.detach().chunk(2)[0] + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) + else: + image = latents + has_nsfw_concept = None + + if has_nsfw_concept is None: + do_denormalize = [True] * image.shape[0] + else: + do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] + + image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image, has_nsfw_concept) + + return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_INVERT_DOC_STRING) + def invert( + self, + prompt: Optional[str] = None, + image: PipelineImageInput = None, + num_inference_steps: int = 50, + guidance_scale: float = 1, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + cross_attention_guidance_amount: float = 0.1, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, + callback_steps: Optional[int] = 1, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + lambda_auto_corr: float = 20.0, + lambda_kl: float = 20.0, + num_reg_steps: int = 5, + num_auto_corr_rolls: int = 5, + ): + r""" + Function used to generate inverted latents given a prompt and image. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + image (`torch.Tensor` `np.ndarray`, `PIL.Image.Image`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): + `Image`, or tensor representing an image batch which will be used for conditioning. Can also accept + image latents as `image`, if passing latents directly, it will not be encoded again. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 1): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + cross_attention_guidance_amount (`float`, defaults to 0.1): + Amount of guidance needed from the reference cross-attention maps. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that will be called every `callback_steps` steps during inference. The function will be + called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function will be called. If not specified, the callback will be + called at every step. + lambda_auto_corr (`float`, *optional*, defaults to 20.0): + Lambda parameter to control auto correction + lambda_kl (`float`, *optional*, defaults to 20.0): + Lambda parameter to control Kullbackโ€“Leibler divergence output + num_reg_steps (`int`, *optional*, defaults to 5): + Number of regularization loss steps + num_auto_corr_rolls (`int`, *optional*, defaults to 5): + Number of auto correction roll steps + + Examples: + + Returns: + [`~pipelines.stable_diffusion.pipeline_stable_diffusion_pix2pix_zero.Pix2PixInversionPipelineOutput`] or + `tuple`: + [`~pipelines.stable_diffusion.pipeline_stable_diffusion_pix2pix_zero.Pix2PixInversionPipelineOutput`] if + `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is the inverted + latents tensor and then second is the corresponding decoded image. + """ + # 1. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + if cross_attention_kwargs is None: + cross_attention_kwargs = {} + + device = self._execution_device + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Preprocess image + image = self.image_processor.preprocess(image) + + # 4. Prepare latent variables + latents = self.prepare_image_latents(image, batch_size, self.vae.dtype, device, generator) + + # 5. Encode input prompt + num_images_per_prompt = 1 + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + prompt_embeds=prompt_embeds, + ) + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + if do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + # 4. Prepare timesteps + self.inverse_scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.inverse_scheduler.timesteps + + # 6. Rejig the UNet so that we can obtain the cross-attenion maps and + # use them for guiding the subsequent image generation. + self.unet = prepare_unet(self.unet) + + # 7. Denoising loop where we obtain the cross-attention maps. + num_warmup_steps = len(timesteps) - num_inference_steps * self.inverse_scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + latent_model_input = self.inverse_scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs={"timestep": t}, + ).sample + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # regularization of the noise prediction + with torch.enable_grad(): + for _ in range(num_reg_steps): + if lambda_auto_corr > 0: + for _ in range(num_auto_corr_rolls): + var = torch.autograd.Variable(noise_pred.detach().clone(), requires_grad=True) + + # Derive epsilon from model output before regularizing to IID standard normal + var_epsilon = self.get_epsilon(var, latent_model_input.detach(), t) + + l_ac = self.auto_corr_loss(var_epsilon, generator=generator) + l_ac.backward() + + grad = var.grad.detach() / num_auto_corr_rolls + noise_pred = noise_pred - lambda_auto_corr * grad + + if lambda_kl > 0: + var = torch.autograd.Variable(noise_pred.detach().clone(), requires_grad=True) + + # Derive epsilon from model output before regularizing to IID standard normal + var_epsilon = self.get_epsilon(var, latent_model_input.detach(), t) + + l_kld = self.kl_divergence(var_epsilon) + l_kld.backward() + + grad = var.grad.detach() + noise_pred = noise_pred - lambda_kl * grad + + noise_pred = noise_pred.detach() + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.inverse_scheduler.step(noise_pred, t, latents).prev_sample + + # call the callback, if provided + if i == len(timesteps) - 1 or ( + (i + 1) > num_warmup_steps and (i + 1) % self.inverse_scheduler.order == 0 + ): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + inverted_latents = latents.detach().clone() + + # 8. Post-processing + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + image = self.image_processor.postprocess(image, output_type=output_type) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (inverted_latents, image) + + return Pix2PixInversionPipelineOutput(latents=inverted_latents, images=image) diff --git a/diffusers3/pipelines/deprecated/stochastic_karras_ve/__init__.py b/diffusers3/pipelines/deprecated/stochastic_karras_ve/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..15c9a8c27f98dd7e1913bd57dfd5e8dae71172b4 --- /dev/null +++ b/diffusers3/pipelines/deprecated/stochastic_karras_ve/__init__.py @@ -0,0 +1,19 @@ +from typing import TYPE_CHECKING + +from ....utils import DIFFUSERS_SLOW_IMPORT, _LazyModule + + +_import_structure = {"pipeline_stochastic_karras_ve": ["KarrasVePipeline"]} + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + from .pipeline_stochastic_karras_ve import KarrasVePipeline + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) diff --git a/diffusers3/pipelines/deprecated/stochastic_karras_ve/pipeline_stochastic_karras_ve.py b/diffusers3/pipelines/deprecated/stochastic_karras_ve/pipeline_stochastic_karras_ve.py new file mode 100644 index 0000000000000000000000000000000000000000..023edb4ce4bd766b364aaeafcdfb2a2c1eaf9980 --- /dev/null +++ b/diffusers3/pipelines/deprecated/stochastic_karras_ve/pipeline_stochastic_karras_ve.py @@ -0,0 +1,128 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import List, Optional, Tuple, Union + +import torch + +from ....models import UNet2DModel +from ....schedulers import KarrasVeScheduler +from ....utils.torch_utils import randn_tensor +from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput + + +class KarrasVePipeline(DiffusionPipeline): + r""" + Pipeline for unconditional image generation. + + Parameters: + unet ([`UNet2DModel`]): + A `UNet2DModel` to denoise the encoded image. + scheduler ([`KarrasVeScheduler`]): + A scheduler to be used in combination with `unet` to denoise the encoded image. + """ + + # add type hints for linting + unet: UNet2DModel + scheduler: KarrasVeScheduler + + def __init__(self, unet: UNet2DModel, scheduler: KarrasVeScheduler): + super().__init__() + self.register_modules(unet=unet, scheduler=scheduler) + + @torch.no_grad() + def __call__( + self, + batch_size: int = 1, + num_inference_steps: int = 50, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + **kwargs, + ) -> Union[Tuple, ImagePipelineOutput]: + r""" + The call function to the pipeline for generation. + + Args: + batch_size (`int`, *optional*, defaults to 1): + The number of images to generate. + generator (`torch.Generator`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`ImagePipelineOutput`] instead of a plain tuple. + + Example: + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is + returned where the first element is a list with the generated images. + """ + + img_size = self.unet.config.sample_size + shape = (batch_size, 3, img_size, img_size) + + model = self.unet + + # sample x_0 ~ N(0, sigma_0^2 * I) + sample = randn_tensor(shape, generator=generator, device=self.device) * self.scheduler.init_noise_sigma + + self.scheduler.set_timesteps(num_inference_steps) + + for t in self.progress_bar(self.scheduler.timesteps): + # here sigma_t == t_i from the paper + sigma = self.scheduler.schedule[t] + sigma_prev = self.scheduler.schedule[t - 1] if t > 0 else 0 + + # 1. Select temporarily increased noise level sigma_hat + # 2. Add new noise to move from sample_i to sample_hat + sample_hat, sigma_hat = self.scheduler.add_noise_to_input(sample, sigma, generator=generator) + + # 3. Predict the noise residual given the noise magnitude `sigma_hat` + # The model inputs and output are adjusted by following eq. (213) in [1]. + model_output = (sigma_hat / 2) * model((sample_hat + 1) / 2, sigma_hat / 2).sample + + # 4. Evaluate dx/dt at sigma_hat + # 5. Take Euler step from sigma to sigma_prev + step_output = self.scheduler.step(model_output, sigma_hat, sigma_prev, sample_hat) + + if sigma_prev != 0: + # 6. Apply 2nd order correction + # The model inputs and output are adjusted by following eq. (213) in [1]. + model_output = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2, sigma_prev / 2).sample + step_output = self.scheduler.step_correct( + model_output, + sigma_hat, + sigma_prev, + sample_hat, + step_output.prev_sample, + step_output["derivative"], + ) + sample = step_output.prev_sample + + sample = (sample / 2 + 0.5).clamp(0, 1) + image = sample.cpu().permute(0, 2, 3, 1).numpy() + if output_type == "pil": + image = self.numpy_to_pil(image) + + if not return_dict: + return (image,) + + return ImagePipelineOutput(images=image) diff --git a/diffusers3/pipelines/deprecated/versatile_diffusion/__init__.py b/diffusers3/pipelines/deprecated/versatile_diffusion/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8ea6ef6e2f65b96aebebdf72cb80135003e4f08d --- /dev/null +++ b/diffusers3/pipelines/deprecated/versatile_diffusion/__init__.py @@ -0,0 +1,71 @@ +from typing import TYPE_CHECKING + +from ....utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + is_torch_available, + is_transformers_available, + is_transformers_version, +) + + +_dummy_objects = {} +_import_structure = {} + +try: + if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ....utils.dummy_torch_and_transformers_objects import ( + VersatileDiffusionDualGuidedPipeline, + VersatileDiffusionImageVariationPipeline, + VersatileDiffusionPipeline, + VersatileDiffusionTextToImagePipeline, + ) + + _dummy_objects.update( + { + "VersatileDiffusionDualGuidedPipeline": VersatileDiffusionDualGuidedPipeline, + "VersatileDiffusionImageVariationPipeline": VersatileDiffusionImageVariationPipeline, + "VersatileDiffusionPipeline": VersatileDiffusionPipeline, + "VersatileDiffusionTextToImagePipeline": VersatileDiffusionTextToImagePipeline, + } + ) +else: + _import_structure["modeling_text_unet"] = ["UNetFlatConditionModel"] + _import_structure["pipeline_versatile_diffusion"] = ["VersatileDiffusionPipeline"] + _import_structure["pipeline_versatile_diffusion_dual_guided"] = ["VersatileDiffusionDualGuidedPipeline"] + _import_structure["pipeline_versatile_diffusion_image_variation"] = ["VersatileDiffusionImageVariationPipeline"] + _import_structure["pipeline_versatile_diffusion_text_to_image"] = ["VersatileDiffusionTextToImagePipeline"] + + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ....utils.dummy_torch_and_transformers_objects import ( + VersatileDiffusionDualGuidedPipeline, + VersatileDiffusionImageVariationPipeline, + VersatileDiffusionPipeline, + VersatileDiffusionTextToImagePipeline, + ) + else: + from .pipeline_versatile_diffusion import VersatileDiffusionPipeline + from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline + from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline + from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/diffusers3/pipelines/deprecated/versatile_diffusion/modeling_text_unet.py b/diffusers3/pipelines/deprecated/versatile_diffusion/modeling_text_unet.py new file mode 100644 index 0000000000000000000000000000000000000000..3937e87f63c91e0837628f1af2cc7a012166644e --- /dev/null +++ b/diffusers3/pipelines/deprecated/versatile_diffusion/modeling_text_unet.py @@ -0,0 +1,2518 @@ +from typing import Any, Dict, List, Optional, Tuple, Union + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F + +from diffusers.utils import deprecate + +from ....configuration_utils import ConfigMixin, register_to_config +from ....models import ModelMixin +from ....models.activations import get_activation +from ....models.attention_processor import ( + ADDED_KV_ATTENTION_PROCESSORS, + CROSS_ATTENTION_PROCESSORS, + Attention, + AttentionProcessor, + AttnAddedKVProcessor, + AttnAddedKVProcessor2_0, + AttnProcessor, +) +from ....models.embeddings import ( + GaussianFourierProjection, + ImageHintTimeEmbedding, + ImageProjection, + ImageTimeEmbedding, + TextImageProjection, + TextImageTimeEmbedding, + TextTimeEmbedding, + TimestepEmbedding, + Timesteps, +) +from ....models.resnet import ResnetBlockCondNorm2D +from ....models.transformers.dual_transformer_2d import DualTransformer2DModel +from ....models.transformers.transformer_2d import Transformer2DModel +from ....models.unets.unet_2d_condition import UNet2DConditionOutput +from ....utils import USE_PEFT_BACKEND, is_torch_version, logging, scale_lora_layers, unscale_lora_layers +from ....utils.torch_utils import apply_freeu + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +def get_down_block( + down_block_type, + num_layers, + in_channels, + out_channels, + temb_channels, + add_downsample, + resnet_eps, + resnet_act_fn, + num_attention_heads, + transformer_layers_per_block, + attention_type, + attention_head_dim, + resnet_groups=None, + cross_attention_dim=None, + downsample_padding=None, + dual_cross_attention=False, + use_linear_projection=False, + only_cross_attention=False, + upcast_attention=False, + resnet_time_scale_shift="default", + resnet_skip_time_act=False, + resnet_out_scale_factor=1.0, + cross_attention_norm=None, + dropout=0.0, +): + down_block_type = down_block_type[7:] if down_block_type.startswith("UNetRes") else down_block_type + if down_block_type == "DownBlockFlat": + return DownBlockFlat( + num_layers=num_layers, + in_channels=in_channels, + out_channels=out_channels, + temb_channels=temb_channels, + dropout=dropout, + add_downsample=add_downsample, + resnet_eps=resnet_eps, + resnet_act_fn=resnet_act_fn, + resnet_groups=resnet_groups, + downsample_padding=downsample_padding, + resnet_time_scale_shift=resnet_time_scale_shift, + ) + elif down_block_type == "CrossAttnDownBlockFlat": + if cross_attention_dim is None: + raise ValueError("cross_attention_dim must be specified for CrossAttnDownBlockFlat") + return CrossAttnDownBlockFlat( + num_layers=num_layers, + in_channels=in_channels, + out_channels=out_channels, + temb_channels=temb_channels, + dropout=dropout, + add_downsample=add_downsample, + resnet_eps=resnet_eps, + resnet_act_fn=resnet_act_fn, + resnet_groups=resnet_groups, + downsample_padding=downsample_padding, + cross_attention_dim=cross_attention_dim, + num_attention_heads=num_attention_heads, + dual_cross_attention=dual_cross_attention, + use_linear_projection=use_linear_projection, + only_cross_attention=only_cross_attention, + resnet_time_scale_shift=resnet_time_scale_shift, + ) + raise ValueError(f"{down_block_type} is not supported.") + + +def get_up_block( + up_block_type, + num_layers, + in_channels, + out_channels, + prev_output_channel, + temb_channels, + add_upsample, + resnet_eps, + resnet_act_fn, + num_attention_heads, + transformer_layers_per_block, + resolution_idx, + attention_type, + attention_head_dim, + resnet_groups=None, + cross_attention_dim=None, + dual_cross_attention=False, + use_linear_projection=False, + only_cross_attention=False, + upcast_attention=False, + resnet_time_scale_shift="default", + resnet_skip_time_act=False, + resnet_out_scale_factor=1.0, + cross_attention_norm=None, + dropout=0.0, +): + up_block_type = up_block_type[7:] if up_block_type.startswith("UNetRes") else up_block_type + if up_block_type == "UpBlockFlat": + return UpBlockFlat( + num_layers=num_layers, + in_channels=in_channels, + out_channels=out_channels, + prev_output_channel=prev_output_channel, + temb_channels=temb_channels, + dropout=dropout, + add_upsample=add_upsample, + resnet_eps=resnet_eps, + resnet_act_fn=resnet_act_fn, + resnet_groups=resnet_groups, + resnet_time_scale_shift=resnet_time_scale_shift, + ) + elif up_block_type == "CrossAttnUpBlockFlat": + if cross_attention_dim is None: + raise ValueError("cross_attention_dim must be specified for CrossAttnUpBlockFlat") + return CrossAttnUpBlockFlat( + num_layers=num_layers, + in_channels=in_channels, + out_channels=out_channels, + prev_output_channel=prev_output_channel, + temb_channels=temb_channels, + dropout=dropout, + add_upsample=add_upsample, + resnet_eps=resnet_eps, + resnet_act_fn=resnet_act_fn, + resnet_groups=resnet_groups, + cross_attention_dim=cross_attention_dim, + num_attention_heads=num_attention_heads, + dual_cross_attention=dual_cross_attention, + use_linear_projection=use_linear_projection, + only_cross_attention=only_cross_attention, + resnet_time_scale_shift=resnet_time_scale_shift, + ) + raise ValueError(f"{up_block_type} is not supported.") + + +class FourierEmbedder(nn.Module): + def __init__(self, num_freqs=64, temperature=100): + super().__init__() + + self.num_freqs = num_freqs + self.temperature = temperature + + freq_bands = temperature ** (torch.arange(num_freqs) / num_freqs) + freq_bands = freq_bands[None, None, None] + self.register_buffer("freq_bands", freq_bands, persistent=False) + + def __call__(self, x): + x = self.freq_bands * x.unsqueeze(-1) + return torch.stack((x.sin(), x.cos()), dim=-1).permute(0, 1, 3, 4, 2).reshape(*x.shape[:2], -1) + + +class GLIGENTextBoundingboxProjection(nn.Module): + def __init__(self, positive_len, out_dim, feature_type, fourier_freqs=8): + super().__init__() + self.positive_len = positive_len + self.out_dim = out_dim + + self.fourier_embedder = FourierEmbedder(num_freqs=fourier_freqs) + self.position_dim = fourier_freqs * 2 * 4 # 2: sin/cos, 4: xyxy + + if isinstance(out_dim, tuple): + out_dim = out_dim[0] + + if feature_type == "text-only": + self.linears = nn.Sequential( + nn.Linear(self.positive_len + self.position_dim, 512), + nn.SiLU(), + nn.Linear(512, 512), + nn.SiLU(), + nn.Linear(512, out_dim), + ) + self.null_positive_feature = torch.nn.Parameter(torch.zeros([self.positive_len])) + + elif feature_type == "text-image": + self.linears_text = nn.Sequential( + nn.Linear(self.positive_len + self.position_dim, 512), + nn.SiLU(), + nn.Linear(512, 512), + nn.SiLU(), + nn.Linear(512, out_dim), + ) + self.linears_image = nn.Sequential( + nn.Linear(self.positive_len + self.position_dim, 512), + nn.SiLU(), + nn.Linear(512, 512), + nn.SiLU(), + nn.Linear(512, out_dim), + ) + self.null_text_feature = torch.nn.Parameter(torch.zeros([self.positive_len])) + self.null_image_feature = torch.nn.Parameter(torch.zeros([self.positive_len])) + + self.null_position_feature = torch.nn.Parameter(torch.zeros([self.position_dim])) + + def forward( + self, + boxes, + masks, + positive_embeddings=None, + phrases_masks=None, + image_masks=None, + phrases_embeddings=None, + image_embeddings=None, + ): + masks = masks.unsqueeze(-1) + + xyxy_embedding = self.fourier_embedder(boxes) + xyxy_null = self.null_position_feature.view(1, 1, -1) + xyxy_embedding = xyxy_embedding * masks + (1 - masks) * xyxy_null + + if positive_embeddings: + positive_null = self.null_positive_feature.view(1, 1, -1) + positive_embeddings = positive_embeddings * masks + (1 - masks) * positive_null + + objs = self.linears(torch.cat([positive_embeddings, xyxy_embedding], dim=-1)) + else: + phrases_masks = phrases_masks.unsqueeze(-1) + image_masks = image_masks.unsqueeze(-1) + + text_null = self.null_text_feature.view(1, 1, -1) + image_null = self.null_image_feature.view(1, 1, -1) + + phrases_embeddings = phrases_embeddings * phrases_masks + (1 - phrases_masks) * text_null + image_embeddings = image_embeddings * image_masks + (1 - image_masks) * image_null + + objs_text = self.linears_text(torch.cat([phrases_embeddings, xyxy_embedding], dim=-1)) + objs_image = self.linears_image(torch.cat([image_embeddings, xyxy_embedding], dim=-1)) + objs = torch.cat([objs_text, objs_image], dim=1) + + return objs + + +class UNetFlatConditionModel(ModelMixin, ConfigMixin): + r""" + A conditional 2D UNet model that takes a noisy sample, conditional state, and a timestep and returns a sample + shaped output. + + This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented + for all models (such as downloading or saving). + + Parameters: + sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`): + Height and width of input/output sample. + in_channels (`int`, *optional*, defaults to 4): Number of channels in the input sample. + out_channels (`int`, *optional*, defaults to 4): Number of channels in the output. + center_input_sample (`bool`, *optional*, defaults to `False`): Whether to center the input sample. + flip_sin_to_cos (`bool`, *optional*, defaults to `False`): + Whether to flip the sin to cos in the time embedding. + freq_shift (`int`, *optional*, defaults to 0): The frequency shift to apply to the time embedding. + down_block_types (`Tuple[str]`, *optional*, defaults to `("CrossAttnDownBlockFlat", "CrossAttnDownBlockFlat", "CrossAttnDownBlockFlat", "DownBlockFlat")`): + The tuple of downsample blocks to use. + mid_block_type (`str`, *optional*, defaults to `"UNetMidBlockFlatCrossAttn"`): + Block type for middle of UNet, it can be one of `UNetMidBlockFlatCrossAttn`, `UNetMidBlockFlat`, or + `UNetMidBlockFlatSimpleCrossAttn`. If `None`, the mid block layer is skipped. + up_block_types (`Tuple[str]`, *optional*, defaults to `("UpBlockFlat", "CrossAttnUpBlockFlat", "CrossAttnUpBlockFlat", "CrossAttnUpBlockFlat")`): + The tuple of upsample blocks to use. + only_cross_attention(`bool` or `Tuple[bool]`, *optional*, default to `False`): + Whether to include self-attention in the basic transformer blocks, see + [`~models.attention.BasicTransformerBlock`]. + block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`): + The tuple of output channels for each block. + layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block. + downsample_padding (`int`, *optional*, defaults to 1): The padding to use for the downsampling convolution. + mid_block_scale_factor (`float`, *optional*, defaults to 1.0): The scale factor to use for the mid block. + dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. + act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use. + norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization. + If `None`, normalization and activation layers is skipped in post-processing. + norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon to use for the normalization. + cross_attention_dim (`int` or `Tuple[int]`, *optional*, defaults to 1280): + The dimension of the cross attention features. + transformer_layers_per_block (`int`, `Tuple[int]`, or `Tuple[Tuple]` , *optional*, defaults to 1): + The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`]. Only relevant for + [`~models.unet_2d_blocks.CrossAttnDownBlockFlat`], [`~models.unet_2d_blocks.CrossAttnUpBlockFlat`], + [`~models.unet_2d_blocks.UNetMidBlockFlatCrossAttn`]. + reverse_transformer_layers_per_block : (`Tuple[Tuple]`, *optional*, defaults to None): + The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`], in the upsampling + blocks of the U-Net. Only relevant if `transformer_layers_per_block` is of type `Tuple[Tuple]` and for + [`~models.unet_2d_blocks.CrossAttnDownBlockFlat`], [`~models.unet_2d_blocks.CrossAttnUpBlockFlat`], + [`~models.unet_2d_blocks.UNetMidBlockFlatCrossAttn`]. + encoder_hid_dim (`int`, *optional*, defaults to None): + If `encoder_hid_dim_type` is defined, `encoder_hidden_states` will be projected from `encoder_hid_dim` + dimension to `cross_attention_dim`. + encoder_hid_dim_type (`str`, *optional*, defaults to `None`): + If given, the `encoder_hidden_states` and potentially other embeddings are down-projected to text + embeddings of dimension `cross_attention` according to `encoder_hid_dim_type`. + attention_head_dim (`int`, *optional*, defaults to 8): The dimension of the attention heads. + num_attention_heads (`int`, *optional*): + The number of attention heads. If not defined, defaults to `attention_head_dim` + resnet_time_scale_shift (`str`, *optional*, defaults to `"default"`): Time scale shift config + for ResNet blocks (see [`~models.resnet.ResnetBlockFlat`]). Choose from `default` or `scale_shift`. + class_embed_type (`str`, *optional*, defaults to `None`): + The type of class embedding to use which is ultimately summed with the time embeddings. Choose from `None`, + `"timestep"`, `"identity"`, `"projection"`, or `"simple_projection"`. + addition_embed_type (`str`, *optional*, defaults to `None`): + Configures an optional embedding which will be summed with the time embeddings. Choose from `None` or + "text". "text" will use the `TextTimeEmbedding` layer. + addition_time_embed_dim: (`int`, *optional*, defaults to `None`): + Dimension for the timestep embeddings. + num_class_embeds (`int`, *optional*, defaults to `None`): + Input dimension of the learnable embedding matrix to be projected to `time_embed_dim`, when performing + class conditioning with `class_embed_type` equal to `None`. + time_embedding_type (`str`, *optional*, defaults to `positional`): + The type of position embedding to use for timesteps. Choose from `positional` or `fourier`. + time_embedding_dim (`int`, *optional*, defaults to `None`): + An optional override for the dimension of the projected time embedding. + time_embedding_act_fn (`str`, *optional*, defaults to `None`): + Optional activation function to use only once on the time embeddings before they are passed to the rest of + the UNet. Choose from `silu`, `mish`, `gelu`, and `swish`. + timestep_post_act (`str`, *optional*, defaults to `None`): + The second activation function to use in timestep embedding. Choose from `silu`, `mish` and `gelu`. + time_cond_proj_dim (`int`, *optional*, defaults to `None`): + The dimension of `cond_proj` layer in the timestep embedding. + conv_in_kernel (`int`, *optional*, default to `3`): The kernel size of `conv_in` layer. conv_out_kernel (`int`, + *optional*, default to `3`): The kernel size of `conv_out` layer. projection_class_embeddings_input_dim (`int`, + *optional*): The dimension of the `class_labels` input when + `class_embed_type="projection"`. Required when `class_embed_type="projection"`. + class_embeddings_concat (`bool`, *optional*, defaults to `False`): Whether to concatenate the time + embeddings with the class embeddings. + mid_block_only_cross_attention (`bool`, *optional*, defaults to `None`): + Whether to use cross attention with the mid block when using the `UNetMidBlockFlatSimpleCrossAttn`. If + `only_cross_attention` is given as a single boolean and `mid_block_only_cross_attention` is `None`, the + `only_cross_attention` value is used as the value for `mid_block_only_cross_attention`. Default to `False` + otherwise. + """ + + _supports_gradient_checkpointing = True + _no_split_modules = ["BasicTransformerBlock", "ResnetBlockFlat", "CrossAttnUpBlockFlat"] + + @register_to_config + def __init__( + self, + sample_size: Optional[int] = None, + in_channels: int = 4, + out_channels: int = 4, + center_input_sample: bool = False, + flip_sin_to_cos: bool = True, + freq_shift: int = 0, + down_block_types: Tuple[str] = ( + "CrossAttnDownBlockFlat", + "CrossAttnDownBlockFlat", + "CrossAttnDownBlockFlat", + "DownBlockFlat", + ), + mid_block_type: Optional[str] = "UNetMidBlockFlatCrossAttn", + up_block_types: Tuple[str] = ( + "UpBlockFlat", + "CrossAttnUpBlockFlat", + "CrossAttnUpBlockFlat", + "CrossAttnUpBlockFlat", + ), + only_cross_attention: Union[bool, Tuple[bool]] = False, + block_out_channels: Tuple[int] = (320, 640, 1280, 1280), + layers_per_block: Union[int, Tuple[int]] = 2, + downsample_padding: int = 1, + mid_block_scale_factor: float = 1, + dropout: float = 0.0, + act_fn: str = "silu", + norm_num_groups: Optional[int] = 32, + norm_eps: float = 1e-5, + cross_attention_dim: Union[int, Tuple[int]] = 1280, + transformer_layers_per_block: Union[int, Tuple[int], Tuple[Tuple]] = 1, + reverse_transformer_layers_per_block: Optional[Tuple[Tuple[int]]] = None, + encoder_hid_dim: Optional[int] = None, + encoder_hid_dim_type: Optional[str] = None, + attention_head_dim: Union[int, Tuple[int]] = 8, + num_attention_heads: Optional[Union[int, Tuple[int]]] = None, + dual_cross_attention: bool = False, + use_linear_projection: bool = False, + class_embed_type: Optional[str] = None, + addition_embed_type: Optional[str] = None, + addition_time_embed_dim: Optional[int] = None, + num_class_embeds: Optional[int] = None, + upcast_attention: bool = False, + resnet_time_scale_shift: str = "default", + resnet_skip_time_act: bool = False, + resnet_out_scale_factor: int = 1.0, + time_embedding_type: str = "positional", + time_embedding_dim: Optional[int] = None, + time_embedding_act_fn: Optional[str] = None, + timestep_post_act: Optional[str] = None, + time_cond_proj_dim: Optional[int] = None, + conv_in_kernel: int = 3, + conv_out_kernel: int = 3, + projection_class_embeddings_input_dim: Optional[int] = None, + attention_type: str = "default", + class_embeddings_concat: bool = False, + mid_block_only_cross_attention: Optional[bool] = None, + cross_attention_norm: Optional[str] = None, + addition_embed_type_num_heads=64, + ): + super().__init__() + + self.sample_size = sample_size + + if num_attention_heads is not None: + raise ValueError( + "At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19." + ) + + # If `num_attention_heads` is not defined (which is the case for most models) + # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. + # The reason for this behavior is to correct for incorrectly named variables that were introduced + # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 + # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking + # which is why we correct for the naming here. + num_attention_heads = num_attention_heads or attention_head_dim + + # Check inputs + if len(down_block_types) != len(up_block_types): + raise ValueError( + f"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}." + ) + + if len(block_out_channels) != len(down_block_types): + raise ValueError( + f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}." + ) + + if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types): + raise ValueError( + f"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}." + ) + + if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len(down_block_types): + raise ValueError( + f"Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}." + ) + + if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types): + raise ValueError( + f"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}." + ) + + if isinstance(cross_attention_dim, list) and len(cross_attention_dim) != len(down_block_types): + raise ValueError( + f"Must provide the same number of `cross_attention_dim` as `down_block_types`. `cross_attention_dim`: {cross_attention_dim}. `down_block_types`: {down_block_types}." + ) + + if not isinstance(layers_per_block, int) and len(layers_per_block) != len(down_block_types): + raise ValueError( + f"Must provide the same number of `layers_per_block` as `down_block_types`. `layers_per_block`: {layers_per_block}. `down_block_types`: {down_block_types}." + ) + if isinstance(transformer_layers_per_block, list) and reverse_transformer_layers_per_block is None: + for layer_number_per_block in transformer_layers_per_block: + if isinstance(layer_number_per_block, list): + raise ValueError("Must provide 'reverse_transformer_layers_per_block` if using asymmetrical UNet.") + + # input + conv_in_padding = (conv_in_kernel - 1) // 2 + self.conv_in = LinearMultiDim( + in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding + ) + + # time + if time_embedding_type == "fourier": + time_embed_dim = time_embedding_dim or block_out_channels[0] * 2 + if time_embed_dim % 2 != 0: + raise ValueError(f"`time_embed_dim` should be divisible by 2, but is {time_embed_dim}.") + self.time_proj = GaussianFourierProjection( + time_embed_dim // 2, set_W_to_weight=False, log=False, flip_sin_to_cos=flip_sin_to_cos + ) + timestep_input_dim = time_embed_dim + elif time_embedding_type == "positional": + time_embed_dim = time_embedding_dim or block_out_channels[0] * 4 + + self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift) + timestep_input_dim = block_out_channels[0] + else: + raise ValueError( + f"{time_embedding_type} does not exist. Please make sure to use one of `fourier` or `positional`." + ) + + self.time_embedding = TimestepEmbedding( + timestep_input_dim, + time_embed_dim, + act_fn=act_fn, + post_act_fn=timestep_post_act, + cond_proj_dim=time_cond_proj_dim, + ) + + if encoder_hid_dim_type is None and encoder_hid_dim is not None: + encoder_hid_dim_type = "text_proj" + self.register_to_config(encoder_hid_dim_type=encoder_hid_dim_type) + logger.info("encoder_hid_dim_type defaults to 'text_proj' as `encoder_hid_dim` is defined.") + + if encoder_hid_dim is None and encoder_hid_dim_type is not None: + raise ValueError( + f"`encoder_hid_dim` has to be defined when `encoder_hid_dim_type` is set to {encoder_hid_dim_type}." + ) + + if encoder_hid_dim_type == "text_proj": + self.encoder_hid_proj = nn.Linear(encoder_hid_dim, cross_attention_dim) + elif encoder_hid_dim_type == "text_image_proj": + # image_embed_dim DOESN'T have to be `cross_attention_dim`. To not clutter the __init__ too much + # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use + # case when `addition_embed_type == "text_image_proj"` (Kandinsky 2.1)` + self.encoder_hid_proj = TextImageProjection( + text_embed_dim=encoder_hid_dim, + image_embed_dim=cross_attention_dim, + cross_attention_dim=cross_attention_dim, + ) + elif encoder_hid_dim_type == "image_proj": + # Kandinsky 2.2 + self.encoder_hid_proj = ImageProjection( + image_embed_dim=encoder_hid_dim, + cross_attention_dim=cross_attention_dim, + ) + elif encoder_hid_dim_type is not None: + raise ValueError( + f"`encoder_hid_dim_type`: {encoder_hid_dim_type} must be None, 'text_proj', 'text_image_proj' or 'image_proj'." + ) + else: + self.encoder_hid_proj = None + + # class embedding + if class_embed_type is None and num_class_embeds is not None: + self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim) + elif class_embed_type == "timestep": + self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim, act_fn=act_fn) + elif class_embed_type == "identity": + self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim) + elif class_embed_type == "projection": + if projection_class_embeddings_input_dim is None: + raise ValueError( + "`class_embed_type`: 'projection' requires `projection_class_embeddings_input_dim` be set" + ) + # The projection `class_embed_type` is the same as the timestep `class_embed_type` except + # 1. the `class_labels` inputs are not first converted to sinusoidal embeddings + # 2. it projects from an arbitrary input dimension. + # + # Note that `TimestepEmbedding` is quite general, being mainly linear layers and activations. + # When used for embedding actual timesteps, the timesteps are first converted to sinusoidal embeddings. + # As a result, `TimestepEmbedding` can be passed arbitrary vectors. + self.class_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim) + elif class_embed_type == "simple_projection": + if projection_class_embeddings_input_dim is None: + raise ValueError( + "`class_embed_type`: 'simple_projection' requires `projection_class_embeddings_input_dim` be set" + ) + self.class_embedding = nn.Linear(projection_class_embeddings_input_dim, time_embed_dim) + else: + self.class_embedding = None + + if addition_embed_type == "text": + if encoder_hid_dim is not None: + text_time_embedding_from_dim = encoder_hid_dim + else: + text_time_embedding_from_dim = cross_attention_dim + + self.add_embedding = TextTimeEmbedding( + text_time_embedding_from_dim, time_embed_dim, num_heads=addition_embed_type_num_heads + ) + elif addition_embed_type == "text_image": + # text_embed_dim and image_embed_dim DON'T have to be `cross_attention_dim`. To not clutter the __init__ too much + # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use + # case when `addition_embed_type == "text_image"` (Kandinsky 2.1)` + self.add_embedding = TextImageTimeEmbedding( + text_embed_dim=cross_attention_dim, image_embed_dim=cross_attention_dim, time_embed_dim=time_embed_dim + ) + elif addition_embed_type == "text_time": + self.add_time_proj = Timesteps(addition_time_embed_dim, flip_sin_to_cos, freq_shift) + self.add_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim) + elif addition_embed_type == "image": + # Kandinsky 2.2 + self.add_embedding = ImageTimeEmbedding(image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim) + elif addition_embed_type == "image_hint": + # Kandinsky 2.2 ControlNet + self.add_embedding = ImageHintTimeEmbedding(image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim) + elif addition_embed_type is not None: + raise ValueError(f"addition_embed_type: {addition_embed_type} must be None, 'text' or 'text_image'.") + + if time_embedding_act_fn is None: + self.time_embed_act = None + else: + self.time_embed_act = get_activation(time_embedding_act_fn) + + self.down_blocks = nn.ModuleList([]) + self.up_blocks = nn.ModuleList([]) + + if isinstance(only_cross_attention, bool): + if mid_block_only_cross_attention is None: + mid_block_only_cross_attention = only_cross_attention + + only_cross_attention = [only_cross_attention] * len(down_block_types) + + if mid_block_only_cross_attention is None: + mid_block_only_cross_attention = False + + if isinstance(num_attention_heads, int): + num_attention_heads = (num_attention_heads,) * len(down_block_types) + + if isinstance(attention_head_dim, int): + attention_head_dim = (attention_head_dim,) * len(down_block_types) + + if isinstance(cross_attention_dim, int): + cross_attention_dim = (cross_attention_dim,) * len(down_block_types) + + if isinstance(layers_per_block, int): + layers_per_block = [layers_per_block] * len(down_block_types) + + if isinstance(transformer_layers_per_block, int): + transformer_layers_per_block = [transformer_layers_per_block] * len(down_block_types) + + if class_embeddings_concat: + # The time embeddings are concatenated with the class embeddings. The dimension of the + # time embeddings passed to the down, middle, and up blocks is twice the dimension of the + # regular time embeddings + blocks_time_embed_dim = time_embed_dim * 2 + else: + blocks_time_embed_dim = time_embed_dim + + # down + output_channel = block_out_channels[0] + for i, down_block_type in enumerate(down_block_types): + input_channel = output_channel + output_channel = block_out_channels[i] + is_final_block = i == len(block_out_channels) - 1 + + down_block = get_down_block( + down_block_type, + num_layers=layers_per_block[i], + transformer_layers_per_block=transformer_layers_per_block[i], + in_channels=input_channel, + out_channels=output_channel, + temb_channels=blocks_time_embed_dim, + add_downsample=not is_final_block, + resnet_eps=norm_eps, + resnet_act_fn=act_fn, + resnet_groups=norm_num_groups, + cross_attention_dim=cross_attention_dim[i], + num_attention_heads=num_attention_heads[i], + downsample_padding=downsample_padding, + dual_cross_attention=dual_cross_attention, + use_linear_projection=use_linear_projection, + only_cross_attention=only_cross_attention[i], + upcast_attention=upcast_attention, + resnet_time_scale_shift=resnet_time_scale_shift, + attention_type=attention_type, + resnet_skip_time_act=resnet_skip_time_act, + resnet_out_scale_factor=resnet_out_scale_factor, + cross_attention_norm=cross_attention_norm, + attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel, + dropout=dropout, + ) + self.down_blocks.append(down_block) + + # mid + if mid_block_type == "UNetMidBlockFlatCrossAttn": + self.mid_block = UNetMidBlockFlatCrossAttn( + transformer_layers_per_block=transformer_layers_per_block[-1], + in_channels=block_out_channels[-1], + temb_channels=blocks_time_embed_dim, + dropout=dropout, + resnet_eps=norm_eps, + resnet_act_fn=act_fn, + output_scale_factor=mid_block_scale_factor, + resnet_time_scale_shift=resnet_time_scale_shift, + cross_attention_dim=cross_attention_dim[-1], + num_attention_heads=num_attention_heads[-1], + resnet_groups=norm_num_groups, + dual_cross_attention=dual_cross_attention, + use_linear_projection=use_linear_projection, + upcast_attention=upcast_attention, + attention_type=attention_type, + ) + elif mid_block_type == "UNetMidBlockFlatSimpleCrossAttn": + self.mid_block = UNetMidBlockFlatSimpleCrossAttn( + in_channels=block_out_channels[-1], + temb_channels=blocks_time_embed_dim, + dropout=dropout, + resnet_eps=norm_eps, + resnet_act_fn=act_fn, + output_scale_factor=mid_block_scale_factor, + cross_attention_dim=cross_attention_dim[-1], + attention_head_dim=attention_head_dim[-1], + resnet_groups=norm_num_groups, + resnet_time_scale_shift=resnet_time_scale_shift, + skip_time_act=resnet_skip_time_act, + only_cross_attention=mid_block_only_cross_attention, + cross_attention_norm=cross_attention_norm, + ) + elif mid_block_type == "UNetMidBlockFlat": + self.mid_block = UNetMidBlockFlat( + in_channels=block_out_channels[-1], + temb_channels=blocks_time_embed_dim, + dropout=dropout, + num_layers=0, + resnet_eps=norm_eps, + resnet_act_fn=act_fn, + output_scale_factor=mid_block_scale_factor, + resnet_groups=norm_num_groups, + resnet_time_scale_shift=resnet_time_scale_shift, + add_attention=False, + ) + elif mid_block_type is None: + self.mid_block = None + else: + raise ValueError(f"unknown mid_block_type : {mid_block_type}") + + # count how many layers upsample the images + self.num_upsamplers = 0 + + # up + reversed_block_out_channels = list(reversed(block_out_channels)) + reversed_num_attention_heads = list(reversed(num_attention_heads)) + reversed_layers_per_block = list(reversed(layers_per_block)) + reversed_cross_attention_dim = list(reversed(cross_attention_dim)) + reversed_transformer_layers_per_block = ( + list(reversed(transformer_layers_per_block)) + if reverse_transformer_layers_per_block is None + else reverse_transformer_layers_per_block + ) + only_cross_attention = list(reversed(only_cross_attention)) + + output_channel = reversed_block_out_channels[0] + for i, up_block_type in enumerate(up_block_types): + is_final_block = i == len(block_out_channels) - 1 + + prev_output_channel = output_channel + output_channel = reversed_block_out_channels[i] + input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)] + + # add upsample block for all BUT final layer + if not is_final_block: + add_upsample = True + self.num_upsamplers += 1 + else: + add_upsample = False + + up_block = get_up_block( + up_block_type, + num_layers=reversed_layers_per_block[i] + 1, + transformer_layers_per_block=reversed_transformer_layers_per_block[i], + in_channels=input_channel, + out_channels=output_channel, + prev_output_channel=prev_output_channel, + temb_channels=blocks_time_embed_dim, + add_upsample=add_upsample, + resnet_eps=norm_eps, + resnet_act_fn=act_fn, + resolution_idx=i, + resnet_groups=norm_num_groups, + cross_attention_dim=reversed_cross_attention_dim[i], + num_attention_heads=reversed_num_attention_heads[i], + dual_cross_attention=dual_cross_attention, + use_linear_projection=use_linear_projection, + only_cross_attention=only_cross_attention[i], + upcast_attention=upcast_attention, + resnet_time_scale_shift=resnet_time_scale_shift, + attention_type=attention_type, + resnet_skip_time_act=resnet_skip_time_act, + resnet_out_scale_factor=resnet_out_scale_factor, + cross_attention_norm=cross_attention_norm, + attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel, + dropout=dropout, + ) + self.up_blocks.append(up_block) + prev_output_channel = output_channel + + # out + if norm_num_groups is not None: + self.conv_norm_out = nn.GroupNorm( + num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps + ) + + self.conv_act = get_activation(act_fn) + + else: + self.conv_norm_out = None + self.conv_act = None + + conv_out_padding = (conv_out_kernel - 1) // 2 + self.conv_out = LinearMultiDim( + block_out_channels[0], out_channels, kernel_size=conv_out_kernel, padding=conv_out_padding + ) + + if attention_type in ["gated", "gated-text-image"]: + positive_len = 768 + if isinstance(cross_attention_dim, int): + positive_len = cross_attention_dim + elif isinstance(cross_attention_dim, (list, tuple)): + positive_len = cross_attention_dim[0] + + feature_type = "text-only" if attention_type == "gated" else "text-image" + self.position_net = GLIGENTextBoundingboxProjection( + positive_len=positive_len, out_dim=cross_attention_dim, feature_type=feature_type + ) + + @property + def attn_processors(self) -> Dict[str, AttentionProcessor]: + r""" + Returns: + `dict` of attention processors: A dictionary containing all attention processors used in the model with + indexed by its weight name. + """ + # set recursively + processors = {} + + def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): + if hasattr(module, "get_processor"): + processors[f"{name}.processor"] = module.get_processor() + + for sub_name, child in module.named_children(): + fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) + + return processors + + for name, module in self.named_children(): + fn_recursive_add_processors(name, module, processors) + + return processors + + def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): + r""" + Sets the attention processor to use to compute attention. + + Parameters: + processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): + The instantiated processor class or a dictionary of processor classes that will be set as the processor + for **all** `Attention` layers. + + If `processor` is a dict, the key needs to define the path to the corresponding cross attention + processor. This is strongly recommended when setting trainable attention processors. + + """ + count = len(self.attn_processors.keys()) + + if isinstance(processor, dict) and len(processor) != count: + raise ValueError( + f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" + f" number of attention layers: {count}. Please make sure to pass {count} processor classes." + ) + + def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): + if hasattr(module, "set_processor"): + if not isinstance(processor, dict): + module.set_processor(processor) + else: + module.set_processor(processor.pop(f"{name}.processor")) + + for sub_name, child in module.named_children(): + fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) + + for name, module in self.named_children(): + fn_recursive_attn_processor(name, module, processor) + + def set_default_attn_processor(self): + """ + Disables custom attention processors and sets the default attention implementation. + """ + if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): + processor = AttnAddedKVProcessor() + elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): + processor = AttnProcessor() + else: + raise ValueError( + f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}" + ) + + self.set_attn_processor(processor) + + def set_attention_slice(self, slice_size): + r""" + Enable sliced attention computation. + + When this option is enabled, the attention module splits the input tensor in slices to compute attention in + several steps. This is useful for saving some memory in exchange for a small decrease in speed. + + Args: + slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`): + When `"auto"`, input to the attention heads is halved, so attention is computed in two steps. If + `"max"`, maximum amount of memory is saved by running only one slice at a time. If a number is + provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim` + must be a multiple of `slice_size`. + """ + sliceable_head_dims = [] + + def fn_recursive_retrieve_sliceable_dims(module: torch.nn.Module): + if hasattr(module, "set_attention_slice"): + sliceable_head_dims.append(module.sliceable_head_dim) + + for child in module.children(): + fn_recursive_retrieve_sliceable_dims(child) + + # retrieve number of attention layers + for module in self.children(): + fn_recursive_retrieve_sliceable_dims(module) + + num_sliceable_layers = len(sliceable_head_dims) + + if slice_size == "auto": + # half the attention head size is usually a good trade-off between + # speed and memory + slice_size = [dim // 2 for dim in sliceable_head_dims] + elif slice_size == "max": + # make smallest slice possible + slice_size = num_sliceable_layers * [1] + + slice_size = num_sliceable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size + + if len(slice_size) != len(sliceable_head_dims): + raise ValueError( + f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different" + f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}." + ) + + for i in range(len(slice_size)): + size = slice_size[i] + dim = sliceable_head_dims[i] + if size is not None and size > dim: + raise ValueError(f"size {size} has to be smaller or equal to {dim}.") + + # Recursively walk through all the children. + # Any children which exposes the set_attention_slice method + # gets the message + def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]): + if hasattr(module, "set_attention_slice"): + module.set_attention_slice(slice_size.pop()) + + for child in module.children(): + fn_recursive_set_attention_slice(child, slice_size) + + reversed_slice_size = list(reversed(slice_size)) + for module in self.children(): + fn_recursive_set_attention_slice(module, reversed_slice_size) + + def _set_gradient_checkpointing(self, module, value=False): + if hasattr(module, "gradient_checkpointing"): + module.gradient_checkpointing = value + + def enable_freeu(self, s1, s2, b1, b2): + r"""Enables the FreeU mechanism from https://arxiv.org/abs/2309.11497. + + The suffixes after the scaling factors represent the stage blocks where they are being applied. + + Please refer to the [official repository](https://github.com/ChenyangSi/FreeU) for combinations of values that + are known to work well for different pipelines such as Stable Diffusion v1, v2, and Stable Diffusion XL. + + Args: + s1 (`float`): + Scaling factor for stage 1 to attenuate the contributions of the skip features. This is done to + mitigate the "oversmoothing effect" in the enhanced denoising process. + s2 (`float`): + Scaling factor for stage 2 to attenuate the contributions of the skip features. This is done to + mitigate the "oversmoothing effect" in the enhanced denoising process. + b1 (`float`): Scaling factor for stage 1 to amplify the contributions of backbone features. + b2 (`float`): Scaling factor for stage 2 to amplify the contributions of backbone features. + """ + for i, upsample_block in enumerate(self.up_blocks): + setattr(upsample_block, "s1", s1) + setattr(upsample_block, "s2", s2) + setattr(upsample_block, "b1", b1) + setattr(upsample_block, "b2", b2) + + def disable_freeu(self): + """Disables the FreeU mechanism.""" + freeu_keys = {"s1", "s2", "b1", "b2"} + for i, upsample_block in enumerate(self.up_blocks): + for k in freeu_keys: + if hasattr(upsample_block, k) or getattr(upsample_block, k, None) is not None: + setattr(upsample_block, k, None) + + def fuse_qkv_projections(self): + """ + Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query, key, value) + are fused. For cross-attention modules, key and value projection matrices are fused. + + + + This API is ๐Ÿงช experimental. + + + """ + self.original_attn_processors = None + + for _, attn_processor in self.attn_processors.items(): + if "Added" in str(attn_processor.__class__.__name__): + raise ValueError("`fuse_qkv_projections()` is not supported for models having added KV projections.") + + self.original_attn_processors = self.attn_processors + + for module in self.modules(): + if isinstance(module, Attention): + module.fuse_projections(fuse=True) + + def unfuse_qkv_projections(self): + """Disables the fused QKV projection if enabled. + + + + This API is ๐Ÿงช experimental. + + + + """ + if self.original_attn_processors is not None: + self.set_attn_processor(self.original_attn_processors) + + def unload_lora(self): + """Unloads LoRA weights.""" + deprecate( + "unload_lora", + "0.28.0", + "Calling `unload_lora()` is deprecated and will be removed in a future version. Please install `peft` and then call `disable_adapters().", + ) + for module in self.modules(): + if hasattr(module, "set_lora_layer"): + module.set_lora_layer(None) + + def forward( + self, + sample: torch.Tensor, + timestep: Union[torch.Tensor, float, int], + encoder_hidden_states: torch.Tensor, + class_labels: Optional[torch.Tensor] = None, + timestep_cond: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None, + down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None, + mid_block_additional_residual: Optional[torch.Tensor] = None, + down_intrablock_additional_residuals: Optional[Tuple[torch.Tensor]] = None, + encoder_attention_mask: Optional[torch.Tensor] = None, + return_dict: bool = True, + ) -> Union[UNet2DConditionOutput, Tuple]: + r""" + The [`UNetFlatConditionModel`] forward method. + + Args: + sample (`torch.Tensor`): + The noisy input tensor with the following shape `(batch, channel, height, width)`. + timestep (`torch.Tensor` or `float` or `int`): The number of timesteps to denoise an input. + encoder_hidden_states (`torch.Tensor`): + The encoder hidden states with shape `(batch, sequence_length, feature_dim)`. + class_labels (`torch.Tensor`, *optional*, defaults to `None`): + Optional class labels for conditioning. Their embeddings will be summed with the timestep embeddings. + timestep_cond: (`torch.Tensor`, *optional*, defaults to `None`): + Conditional embeddings for timestep. If provided, the embeddings will be summed with the samples passed + through the `self.time_embedding` layer to obtain the timestep embeddings. + attention_mask (`torch.Tensor`, *optional*, defaults to `None`): + An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask + is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large + negative values to the attention scores corresponding to "discard" tokens. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + added_cond_kwargs: (`dict`, *optional*): + A kwargs dictionary containing additional embeddings that if specified are added to the embeddings that + are passed along to the UNet blocks. + down_block_additional_residuals: (`tuple` of `torch.Tensor`, *optional*): + A tuple of tensors that if specified are added to the residuals of down unet blocks. + mid_block_additional_residual: (`torch.Tensor`, *optional*): + A tensor that if specified is added to the residual of the middle unet block. + encoder_attention_mask (`torch.Tensor`): + A cross-attention mask of shape `(batch, sequence_length)` is applied to `encoder_hidden_states`. If + `True` the mask is kept, otherwise if `False` it is discarded. Mask will be converted into a bias, + which adds large negative values to the attention scores corresponding to "discard" tokens. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] instead of a plain + tuple. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttnProcessor`]. + added_cond_kwargs: (`dict`, *optional*): + A kwargs dictionary containin additional embeddings that if specified are added to the embeddings that + are passed along to the UNet blocks. + down_block_additional_residuals (`tuple` of `torch.Tensor`, *optional*): + additional residuals to be added to UNet long skip connections from down blocks to up blocks for + example from ControlNet side model(s) + mid_block_additional_residual (`torch.Tensor`, *optional*): + additional residual to be added to UNet mid block output, for example from ControlNet side model + down_intrablock_additional_residuals (`tuple` of `torch.Tensor`, *optional*): + additional residuals to be added within UNet down blocks, for example from T2I-Adapter side model(s) + + Returns: + [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] or `tuple`: + If `return_dict` is True, an [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] is returned, + otherwise a `tuple` is returned where the first element is the sample tensor. + """ + # By default samples have to be AT least a multiple of the overall upsampling factor. + # The overall upsampling factor is equal to 2 ** (# num of upsampling layers). + # However, the upsampling interpolation output size can be forced to fit any upsampling size + # on the fly if necessary. + default_overall_up_factor = 2**self.num_upsamplers + + # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor` + forward_upsample_size = False + upsample_size = None + + for dim in sample.shape[-2:]: + if dim % default_overall_up_factor != 0: + # Forward upsample size to force interpolation output size. + forward_upsample_size = True + break + + # ensure attention_mask is a bias, and give it a singleton query_tokens dimension + # expects mask of shape: + # [batch, key_tokens] + # adds singleton query_tokens dimension: + # [batch, 1, key_tokens] + # this helps to broadcast it as a bias over attention scores, which will be in one of the following shapes: + # [batch, heads, query_tokens, key_tokens] (e.g. torch sdp attn) + # [batch * heads, query_tokens, key_tokens] (e.g. xformers or classic attn) + if attention_mask is not None: + # assume that mask is expressed as: + # (1 = keep, 0 = discard) + # convert mask into a bias that can be added to attention scores: + # (keep = +0, discard = -10000.0) + attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0 + attention_mask = attention_mask.unsqueeze(1) + + # convert encoder_attention_mask to a bias the same way we do for attention_mask + if encoder_attention_mask is not None: + encoder_attention_mask = (1 - encoder_attention_mask.to(sample.dtype)) * -10000.0 + encoder_attention_mask = encoder_attention_mask.unsqueeze(1) + + # 0. center input if necessary + if self.config.center_input_sample: + sample = 2 * sample - 1.0 + + # 1. time + timesteps = timestep + if not torch.is_tensor(timesteps): + # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can + # This would be a good case for the `match` statement (Python 3.10+) + is_mps = sample.device.type == "mps" + if isinstance(timestep, float): + dtype = torch.float32 if is_mps else torch.float64 + else: + dtype = torch.int32 if is_mps else torch.int64 + timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device) + elif len(timesteps.shape) == 0: + timesteps = timesteps[None].to(sample.device) + + # broadcast to batch dimension in a way that's compatible with ONNX/Core ML + timesteps = timesteps.expand(sample.shape[0]) + + t_emb = self.time_proj(timesteps) + + # `Timesteps` does not contain any weights and will always return f32 tensors + # but time_embedding might actually be running in fp16. so we need to cast here. + # there might be better ways to encapsulate this. + t_emb = t_emb.to(dtype=sample.dtype) + + emb = self.time_embedding(t_emb, timestep_cond) + aug_emb = None + + if self.class_embedding is not None: + if class_labels is None: + raise ValueError("class_labels should be provided when num_class_embeds > 0") + + if self.config.class_embed_type == "timestep": + class_labels = self.time_proj(class_labels) + + # `Timesteps` does not contain any weights and will always return f32 tensors + # there might be better ways to encapsulate this. + class_labels = class_labels.to(dtype=sample.dtype) + + class_emb = self.class_embedding(class_labels).to(dtype=sample.dtype) + + if self.config.class_embeddings_concat: + emb = torch.cat([emb, class_emb], dim=-1) + else: + emb = emb + class_emb + + if self.config.addition_embed_type == "text": + aug_emb = self.add_embedding(encoder_hidden_states) + elif self.config.addition_embed_type == "text_image": + # Kandinsky 2.1 - style + if "image_embeds" not in added_cond_kwargs: + raise ValueError( + f"{self.__class__} has the config param `addition_embed_type` set to 'text_image' which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`" + ) + + image_embs = added_cond_kwargs.get("image_embeds") + text_embs = added_cond_kwargs.get("text_embeds", encoder_hidden_states) + aug_emb = self.add_embedding(text_embs, image_embs) + elif self.config.addition_embed_type == "text_time": + # SDXL - style + if "text_embeds" not in added_cond_kwargs: + raise ValueError( + f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `text_embeds` to be passed in `added_cond_kwargs`" + ) + text_embeds = added_cond_kwargs.get("text_embeds") + if "time_ids" not in added_cond_kwargs: + raise ValueError( + f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `time_ids` to be passed in `added_cond_kwargs`" + ) + time_ids = added_cond_kwargs.get("time_ids") + time_embeds = self.add_time_proj(time_ids.flatten()) + time_embeds = time_embeds.reshape((text_embeds.shape[0], -1)) + add_embeds = torch.concat([text_embeds, time_embeds], dim=-1) + add_embeds = add_embeds.to(emb.dtype) + aug_emb = self.add_embedding(add_embeds) + elif self.config.addition_embed_type == "image": + # Kandinsky 2.2 - style + if "image_embeds" not in added_cond_kwargs: + raise ValueError( + f"{self.__class__} has the config param `addition_embed_type` set to 'image' which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`" + ) + image_embs = added_cond_kwargs.get("image_embeds") + aug_emb = self.add_embedding(image_embs) + elif self.config.addition_embed_type == "image_hint": + # Kandinsky 2.2 - style + if "image_embeds" not in added_cond_kwargs or "hint" not in added_cond_kwargs: + raise ValueError( + f"{self.__class__} has the config param `addition_embed_type` set to 'image_hint' which requires the keyword arguments `image_embeds` and `hint` to be passed in `added_cond_kwargs`" + ) + image_embs = added_cond_kwargs.get("image_embeds") + hint = added_cond_kwargs.get("hint") + aug_emb, hint = self.add_embedding(image_embs, hint) + sample = torch.cat([sample, hint], dim=1) + + emb = emb + aug_emb if aug_emb is not None else emb + + if self.time_embed_act is not None: + emb = self.time_embed_act(emb) + + if self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "text_proj": + encoder_hidden_states = self.encoder_hid_proj(encoder_hidden_states) + elif self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "text_image_proj": + # Kandinsky 2.1 - style + if "image_embeds" not in added_cond_kwargs: + raise ValueError( + f"{self.__class__} has the config param `encoder_hid_dim_type` set to 'text_image_proj' which requires the keyword argument `image_embeds` to be passed in `added_conditions`" + ) + + image_embeds = added_cond_kwargs.get("image_embeds") + encoder_hidden_states = self.encoder_hid_proj(encoder_hidden_states, image_embeds) + elif self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "image_proj": + # Kandinsky 2.2 - style + if "image_embeds" not in added_cond_kwargs: + raise ValueError( + f"{self.__class__} has the config param `encoder_hid_dim_type` set to 'image_proj' which requires the keyword argument `image_embeds` to be passed in `added_conditions`" + ) + image_embeds = added_cond_kwargs.get("image_embeds") + encoder_hidden_states = self.encoder_hid_proj(image_embeds) + elif self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "ip_image_proj": + if "image_embeds" not in added_cond_kwargs: + raise ValueError( + f"{self.__class__} has the config param `encoder_hid_dim_type` set to 'ip_image_proj' which requires the keyword argument `image_embeds` to be passed in `added_conditions`" + ) + image_embeds = added_cond_kwargs.get("image_embeds") + image_embeds = self.encoder_hid_proj(image_embeds) + encoder_hidden_states = (encoder_hidden_states, image_embeds) + + # 2. pre-process + sample = self.conv_in(sample) + + # 2.5 GLIGEN position net + if cross_attention_kwargs is not None and cross_attention_kwargs.get("gligen", None) is not None: + cross_attention_kwargs = cross_attention_kwargs.copy() + gligen_args = cross_attention_kwargs.pop("gligen") + cross_attention_kwargs["gligen"] = {"objs": self.position_net(**gligen_args)} + + # 3. down + lora_scale = cross_attention_kwargs.get("scale", 1.0) if cross_attention_kwargs is not None else 1.0 + if USE_PEFT_BACKEND: + # weight the lora layers by setting `lora_scale` for each PEFT layer + scale_lora_layers(self, lora_scale) + + is_controlnet = mid_block_additional_residual is not None and down_block_additional_residuals is not None + # using new arg down_intrablock_additional_residuals for T2I-Adapters, to distinguish from controlnets + is_adapter = down_intrablock_additional_residuals is not None + # maintain backward compatibility for legacy usage, where + # T2I-Adapter and ControlNet both use down_block_additional_residuals arg + # but can only use one or the other + if not is_adapter and mid_block_additional_residual is None and down_block_additional_residuals is not None: + deprecate( + "T2I should not use down_block_additional_residuals", + "1.3.0", + "Passing intrablock residual connections with `down_block_additional_residuals` is deprecated \ + and will be removed in diffusers 1.3.0. `down_block_additional_residuals` should only be used \ + for ControlNet. Please make sure use `down_intrablock_additional_residuals` instead. ", + standard_warn=False, + ) + down_intrablock_additional_residuals = down_block_additional_residuals + is_adapter = True + + down_block_res_samples = (sample,) + for downsample_block in self.down_blocks: + if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention: + # For t2i-adapter CrossAttnDownBlockFlat + additional_residuals = {} + if is_adapter and len(down_intrablock_additional_residuals) > 0: + additional_residuals["additional_residuals"] = down_intrablock_additional_residuals.pop(0) + + sample, res_samples = downsample_block( + hidden_states=sample, + temb=emb, + encoder_hidden_states=encoder_hidden_states, + attention_mask=attention_mask, + cross_attention_kwargs=cross_attention_kwargs, + encoder_attention_mask=encoder_attention_mask, + **additional_residuals, + ) + else: + sample, res_samples = downsample_block(hidden_states=sample, temb=emb) + if is_adapter and len(down_intrablock_additional_residuals) > 0: + sample += down_intrablock_additional_residuals.pop(0) + + down_block_res_samples += res_samples + + if is_controlnet: + new_down_block_res_samples = () + + for down_block_res_sample, down_block_additional_residual in zip( + down_block_res_samples, down_block_additional_residuals + ): + down_block_res_sample = down_block_res_sample + down_block_additional_residual + new_down_block_res_samples = new_down_block_res_samples + (down_block_res_sample,) + + down_block_res_samples = new_down_block_res_samples + + # 4. mid + if self.mid_block is not None: + if hasattr(self.mid_block, "has_cross_attention") and self.mid_block.has_cross_attention: + sample = self.mid_block( + sample, + emb, + encoder_hidden_states=encoder_hidden_states, + attention_mask=attention_mask, + cross_attention_kwargs=cross_attention_kwargs, + encoder_attention_mask=encoder_attention_mask, + ) + else: + sample = self.mid_block(sample, emb) + + # To support T2I-Adapter-XL + if ( + is_adapter + and len(down_intrablock_additional_residuals) > 0 + and sample.shape == down_intrablock_additional_residuals[0].shape + ): + sample += down_intrablock_additional_residuals.pop(0) + + if is_controlnet: + sample = sample + mid_block_additional_residual + + # 5. up + for i, upsample_block in enumerate(self.up_blocks): + is_final_block = i == len(self.up_blocks) - 1 + + res_samples = down_block_res_samples[-len(upsample_block.resnets) :] + down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)] + + # if we have not reached the final block and need to forward the + # upsample size, we do it here + if not is_final_block and forward_upsample_size: + upsample_size = down_block_res_samples[-1].shape[2:] + + if hasattr(upsample_block, "has_cross_attention") and upsample_block.has_cross_attention: + sample = upsample_block( + hidden_states=sample, + temb=emb, + res_hidden_states_tuple=res_samples, + encoder_hidden_states=encoder_hidden_states, + cross_attention_kwargs=cross_attention_kwargs, + upsample_size=upsample_size, + attention_mask=attention_mask, + encoder_attention_mask=encoder_attention_mask, + ) + else: + sample = upsample_block( + hidden_states=sample, + temb=emb, + res_hidden_states_tuple=res_samples, + upsample_size=upsample_size, + scale=lora_scale, + ) + + # 6. post-process + if self.conv_norm_out: + sample = self.conv_norm_out(sample) + sample = self.conv_act(sample) + sample = self.conv_out(sample) + + if USE_PEFT_BACKEND: + # remove `lora_scale` from each PEFT layer + unscale_lora_layers(self, lora_scale) + + if not return_dict: + return (sample,) + + return UNet2DConditionOutput(sample=sample) + + +class LinearMultiDim(nn.Linear): + def __init__(self, in_features, out_features=None, second_dim=4, *args, **kwargs): + in_features = [in_features, second_dim, 1] if isinstance(in_features, int) else list(in_features) + if out_features is None: + out_features = in_features + out_features = [out_features, second_dim, 1] if isinstance(out_features, int) else list(out_features) + self.in_features_multidim = in_features + self.out_features_multidim = out_features + super().__init__(np.array(in_features).prod(), np.array(out_features).prod()) + + def forward(self, input_tensor, *args, **kwargs): + shape = input_tensor.shape + n_dim = len(self.in_features_multidim) + input_tensor = input_tensor.reshape(*shape[0:-n_dim], self.in_features) + output_tensor = super().forward(input_tensor) + output_tensor = output_tensor.view(*shape[0:-n_dim], *self.out_features_multidim) + return output_tensor + + +class ResnetBlockFlat(nn.Module): + def __init__( + self, + *, + in_channels, + out_channels=None, + dropout=0.0, + temb_channels=512, + groups=32, + groups_out=None, + pre_norm=True, + eps=1e-6, + time_embedding_norm="default", + use_in_shortcut=None, + second_dim=4, + **kwargs, + ): + super().__init__() + self.pre_norm = pre_norm + self.pre_norm = True + + in_channels = [in_channels, second_dim, 1] if isinstance(in_channels, int) else list(in_channels) + self.in_channels_prod = np.array(in_channels).prod() + self.channels_multidim = in_channels + + if out_channels is not None: + out_channels = [out_channels, second_dim, 1] if isinstance(out_channels, int) else list(out_channels) + out_channels_prod = np.array(out_channels).prod() + self.out_channels_multidim = out_channels + else: + out_channels_prod = self.in_channels_prod + self.out_channels_multidim = self.channels_multidim + self.time_embedding_norm = time_embedding_norm + + if groups_out is None: + groups_out = groups + + self.norm1 = torch.nn.GroupNorm(num_groups=groups, num_channels=self.in_channels_prod, eps=eps, affine=True) + self.conv1 = torch.nn.Conv2d(self.in_channels_prod, out_channels_prod, kernel_size=1, padding=0) + + if temb_channels is not None: + self.time_emb_proj = torch.nn.Linear(temb_channels, out_channels_prod) + else: + self.time_emb_proj = None + + self.norm2 = torch.nn.GroupNorm(num_groups=groups_out, num_channels=out_channels_prod, eps=eps, affine=True) + self.dropout = torch.nn.Dropout(dropout) + self.conv2 = torch.nn.Conv2d(out_channels_prod, out_channels_prod, kernel_size=1, padding=0) + + self.nonlinearity = nn.SiLU() + + self.use_in_shortcut = ( + self.in_channels_prod != out_channels_prod if use_in_shortcut is None else use_in_shortcut + ) + + self.conv_shortcut = None + if self.use_in_shortcut: + self.conv_shortcut = torch.nn.Conv2d( + self.in_channels_prod, out_channels_prod, kernel_size=1, stride=1, padding=0 + ) + + def forward(self, input_tensor, temb): + shape = input_tensor.shape + n_dim = len(self.channels_multidim) + input_tensor = input_tensor.reshape(*shape[0:-n_dim], self.in_channels_prod, 1, 1) + input_tensor = input_tensor.view(-1, self.in_channels_prod, 1, 1) + + hidden_states = input_tensor + + hidden_states = self.norm1(hidden_states) + hidden_states = self.nonlinearity(hidden_states) + hidden_states = self.conv1(hidden_states) + + if temb is not None: + temb = self.time_emb_proj(self.nonlinearity(temb))[:, :, None, None] + hidden_states = hidden_states + temb + + hidden_states = self.norm2(hidden_states) + hidden_states = self.nonlinearity(hidden_states) + + hidden_states = self.dropout(hidden_states) + hidden_states = self.conv2(hidden_states) + + if self.conv_shortcut is not None: + input_tensor = self.conv_shortcut(input_tensor) + + output_tensor = input_tensor + hidden_states + + output_tensor = output_tensor.view(*shape[0:-n_dim], -1) + output_tensor = output_tensor.view(*shape[0:-n_dim], *self.out_channels_multidim) + + return output_tensor + + +class DownBlockFlat(nn.Module): + def __init__( + self, + in_channels: int, + out_channels: int, + temb_channels: int, + dropout: float = 0.0, + num_layers: int = 1, + resnet_eps: float = 1e-6, + resnet_time_scale_shift: str = "default", + resnet_act_fn: str = "swish", + resnet_groups: int = 32, + resnet_pre_norm: bool = True, + output_scale_factor: float = 1.0, + add_downsample: bool = True, + downsample_padding: int = 1, + ): + super().__init__() + resnets = [] + + for i in range(num_layers): + in_channels = in_channels if i == 0 else out_channels + resnets.append( + ResnetBlockFlat( + in_channels=in_channels, + out_channels=out_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + time_embedding_norm=resnet_time_scale_shift, + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + pre_norm=resnet_pre_norm, + ) + ) + + self.resnets = nn.ModuleList(resnets) + + if add_downsample: + self.downsamplers = nn.ModuleList( + [ + LinearMultiDim( + out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op" + ) + ] + ) + else: + self.downsamplers = None + + self.gradient_checkpointing = False + + def forward( + self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor] = None + ) -> Tuple[torch.Tensor, Tuple[torch.Tensor, ...]]: + output_states = () + + for resnet in self.resnets: + if self.training and self.gradient_checkpointing: + + def create_custom_forward(module): + def custom_forward(*inputs): + return module(*inputs) + + return custom_forward + + if is_torch_version(">=", "1.11.0"): + hidden_states = torch.utils.checkpoint.checkpoint( + create_custom_forward(resnet), hidden_states, temb, use_reentrant=False + ) + else: + hidden_states = torch.utils.checkpoint.checkpoint( + create_custom_forward(resnet), hidden_states, temb + ) + else: + hidden_states = resnet(hidden_states, temb) + + output_states = output_states + (hidden_states,) + + if self.downsamplers is not None: + for downsampler in self.downsamplers: + hidden_states = downsampler(hidden_states) + + output_states = output_states + (hidden_states,) + + return hidden_states, output_states + + +class CrossAttnDownBlockFlat(nn.Module): + def __init__( + self, + in_channels: int, + out_channels: int, + temb_channels: int, + dropout: float = 0.0, + num_layers: int = 1, + transformer_layers_per_block: Union[int, Tuple[int]] = 1, + resnet_eps: float = 1e-6, + resnet_time_scale_shift: str = "default", + resnet_act_fn: str = "swish", + resnet_groups: int = 32, + resnet_pre_norm: bool = True, + num_attention_heads: int = 1, + cross_attention_dim: int = 1280, + output_scale_factor: float = 1.0, + downsample_padding: int = 1, + add_downsample: bool = True, + dual_cross_attention: bool = False, + use_linear_projection: bool = False, + only_cross_attention: bool = False, + upcast_attention: bool = False, + attention_type: str = "default", + ): + super().__init__() + resnets = [] + attentions = [] + + self.has_cross_attention = True + self.num_attention_heads = num_attention_heads + if isinstance(transformer_layers_per_block, int): + transformer_layers_per_block = [transformer_layers_per_block] * num_layers + + for i in range(num_layers): + in_channels = in_channels if i == 0 else out_channels + resnets.append( + ResnetBlockFlat( + in_channels=in_channels, + out_channels=out_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + time_embedding_norm=resnet_time_scale_shift, + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + pre_norm=resnet_pre_norm, + ) + ) + if not dual_cross_attention: + attentions.append( + Transformer2DModel( + num_attention_heads, + out_channels // num_attention_heads, + in_channels=out_channels, + num_layers=transformer_layers_per_block[i], + cross_attention_dim=cross_attention_dim, + norm_num_groups=resnet_groups, + use_linear_projection=use_linear_projection, + only_cross_attention=only_cross_attention, + upcast_attention=upcast_attention, + attention_type=attention_type, + ) + ) + else: + attentions.append( + DualTransformer2DModel( + num_attention_heads, + out_channels // num_attention_heads, + in_channels=out_channels, + num_layers=1, + cross_attention_dim=cross_attention_dim, + norm_num_groups=resnet_groups, + ) + ) + self.attentions = nn.ModuleList(attentions) + self.resnets = nn.ModuleList(resnets) + + if add_downsample: + self.downsamplers = nn.ModuleList( + [ + LinearMultiDim( + out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op" + ) + ] + ) + else: + self.downsamplers = None + + self.gradient_checkpointing = False + + def forward( + self, + hidden_states: torch.Tensor, + temb: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + encoder_attention_mask: Optional[torch.Tensor] = None, + additional_residuals: Optional[torch.Tensor] = None, + ) -> Tuple[torch.Tensor, Tuple[torch.Tensor, ...]]: + output_states = () + + blocks = list(zip(self.resnets, self.attentions)) + + for i, (resnet, attn) in enumerate(blocks): + if self.training and self.gradient_checkpointing: + + def create_custom_forward(module, return_dict=None): + def custom_forward(*inputs): + if return_dict is not None: + return module(*inputs, return_dict=return_dict) + else: + return module(*inputs) + + return custom_forward + + ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {} + hidden_states = torch.utils.checkpoint.checkpoint( + create_custom_forward(resnet), + hidden_states, + temb, + **ckpt_kwargs, + ) + hidden_states = attn( + hidden_states, + encoder_hidden_states=encoder_hidden_states, + cross_attention_kwargs=cross_attention_kwargs, + attention_mask=attention_mask, + encoder_attention_mask=encoder_attention_mask, + return_dict=False, + )[0] + else: + hidden_states = resnet(hidden_states, temb) + hidden_states = attn( + hidden_states, + encoder_hidden_states=encoder_hidden_states, + cross_attention_kwargs=cross_attention_kwargs, + attention_mask=attention_mask, + encoder_attention_mask=encoder_attention_mask, + return_dict=False, + )[0] + + # apply additional residuals to the output of the last pair of resnet and attention blocks + if i == len(blocks) - 1 and additional_residuals is not None: + hidden_states = hidden_states + additional_residuals + + output_states = output_states + (hidden_states,) + + if self.downsamplers is not None: + for downsampler in self.downsamplers: + hidden_states = downsampler(hidden_states) + + output_states = output_states + (hidden_states,) + + return hidden_states, output_states + + +# Copied from diffusers.models.unets.unet_2d_blocks.UpBlock2D with UpBlock2D->UpBlockFlat, ResnetBlock2D->ResnetBlockFlat, Upsample2D->LinearMultiDim +class UpBlockFlat(nn.Module): + def __init__( + self, + in_channels: int, + prev_output_channel: int, + out_channels: int, + temb_channels: int, + resolution_idx: Optional[int] = None, + dropout: float = 0.0, + num_layers: int = 1, + resnet_eps: float = 1e-6, + resnet_time_scale_shift: str = "default", + resnet_act_fn: str = "swish", + resnet_groups: int = 32, + resnet_pre_norm: bool = True, + output_scale_factor: float = 1.0, + add_upsample: bool = True, + ): + super().__init__() + resnets = [] + + for i in range(num_layers): + res_skip_channels = in_channels if (i == num_layers - 1) else out_channels + resnet_in_channels = prev_output_channel if i == 0 else out_channels + + resnets.append( + ResnetBlockFlat( + in_channels=resnet_in_channels + res_skip_channels, + out_channels=out_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + time_embedding_norm=resnet_time_scale_shift, + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + pre_norm=resnet_pre_norm, + ) + ) + + self.resnets = nn.ModuleList(resnets) + + if add_upsample: + self.upsamplers = nn.ModuleList([LinearMultiDim(out_channels, use_conv=True, out_channels=out_channels)]) + else: + self.upsamplers = None + + self.gradient_checkpointing = False + self.resolution_idx = resolution_idx + + def forward( + self, + hidden_states: torch.Tensor, + res_hidden_states_tuple: Tuple[torch.Tensor, ...], + temb: Optional[torch.Tensor] = None, + upsample_size: Optional[int] = None, + *args, + **kwargs, + ) -> torch.Tensor: + if len(args) > 0 or kwargs.get("scale", None) is not None: + deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`." + deprecate("scale", "1.0.0", deprecation_message) + + is_freeu_enabled = ( + getattr(self, "s1", None) + and getattr(self, "s2", None) + and getattr(self, "b1", None) + and getattr(self, "b2", None) + ) + + for resnet in self.resnets: + # pop res hidden states + res_hidden_states = res_hidden_states_tuple[-1] + res_hidden_states_tuple = res_hidden_states_tuple[:-1] + + # FreeU: Only operate on the first two stages + if is_freeu_enabled: + hidden_states, res_hidden_states = apply_freeu( + self.resolution_idx, + hidden_states, + res_hidden_states, + s1=self.s1, + s2=self.s2, + b1=self.b1, + b2=self.b2, + ) + + hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) + + if self.training and self.gradient_checkpointing: + + def create_custom_forward(module): + def custom_forward(*inputs): + return module(*inputs) + + return custom_forward + + if is_torch_version(">=", "1.11.0"): + hidden_states = torch.utils.checkpoint.checkpoint( + create_custom_forward(resnet), hidden_states, temb, use_reentrant=False + ) + else: + hidden_states = torch.utils.checkpoint.checkpoint( + create_custom_forward(resnet), hidden_states, temb + ) + else: + hidden_states = resnet(hidden_states, temb) + + if self.upsamplers is not None: + for upsampler in self.upsamplers: + hidden_states = upsampler(hidden_states, upsample_size) + + return hidden_states + + +# Copied from diffusers.models.unets.unet_2d_blocks.CrossAttnUpBlock2D with CrossAttnUpBlock2D->CrossAttnUpBlockFlat, ResnetBlock2D->ResnetBlockFlat, Upsample2D->LinearMultiDim +class CrossAttnUpBlockFlat(nn.Module): + def __init__( + self, + in_channels: int, + out_channels: int, + prev_output_channel: int, + temb_channels: int, + resolution_idx: Optional[int] = None, + dropout: float = 0.0, + num_layers: int = 1, + transformer_layers_per_block: Union[int, Tuple[int]] = 1, + resnet_eps: float = 1e-6, + resnet_time_scale_shift: str = "default", + resnet_act_fn: str = "swish", + resnet_groups: int = 32, + resnet_pre_norm: bool = True, + num_attention_heads: int = 1, + cross_attention_dim: int = 1280, + output_scale_factor: float = 1.0, + add_upsample: bool = True, + dual_cross_attention: bool = False, + use_linear_projection: bool = False, + only_cross_attention: bool = False, + upcast_attention: bool = False, + attention_type: str = "default", + ): + super().__init__() + resnets = [] + attentions = [] + + self.has_cross_attention = True + self.num_attention_heads = num_attention_heads + + if isinstance(transformer_layers_per_block, int): + transformer_layers_per_block = [transformer_layers_per_block] * num_layers + + for i in range(num_layers): + res_skip_channels = in_channels if (i == num_layers - 1) else out_channels + resnet_in_channels = prev_output_channel if i == 0 else out_channels + + resnets.append( + ResnetBlockFlat( + in_channels=resnet_in_channels + res_skip_channels, + out_channels=out_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + time_embedding_norm=resnet_time_scale_shift, + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + pre_norm=resnet_pre_norm, + ) + ) + if not dual_cross_attention: + attentions.append( + Transformer2DModel( + num_attention_heads, + out_channels // num_attention_heads, + in_channels=out_channels, + num_layers=transformer_layers_per_block[i], + cross_attention_dim=cross_attention_dim, + norm_num_groups=resnet_groups, + use_linear_projection=use_linear_projection, + only_cross_attention=only_cross_attention, + upcast_attention=upcast_attention, + attention_type=attention_type, + ) + ) + else: + attentions.append( + DualTransformer2DModel( + num_attention_heads, + out_channels // num_attention_heads, + in_channels=out_channels, + num_layers=1, + cross_attention_dim=cross_attention_dim, + norm_num_groups=resnet_groups, + ) + ) + self.attentions = nn.ModuleList(attentions) + self.resnets = nn.ModuleList(resnets) + + if add_upsample: + self.upsamplers = nn.ModuleList([LinearMultiDim(out_channels, use_conv=True, out_channels=out_channels)]) + else: + self.upsamplers = None + + self.gradient_checkpointing = False + self.resolution_idx = resolution_idx + + def forward( + self, + hidden_states: torch.Tensor, + res_hidden_states_tuple: Tuple[torch.Tensor, ...], + temb: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + upsample_size: Optional[int] = None, + attention_mask: Optional[torch.Tensor] = None, + encoder_attention_mask: Optional[torch.Tensor] = None, + ) -> torch.Tensor: + if cross_attention_kwargs is not None: + if cross_attention_kwargs.get("scale", None) is not None: + logger.warning("Passing `scale` to `cross_attention_kwargs` is deprecated. `scale` will be ignored.") + + is_freeu_enabled = ( + getattr(self, "s1", None) + and getattr(self, "s2", None) + and getattr(self, "b1", None) + and getattr(self, "b2", None) + ) + + for resnet, attn in zip(self.resnets, self.attentions): + # pop res hidden states + res_hidden_states = res_hidden_states_tuple[-1] + res_hidden_states_tuple = res_hidden_states_tuple[:-1] + + # FreeU: Only operate on the first two stages + if is_freeu_enabled: + hidden_states, res_hidden_states = apply_freeu( + self.resolution_idx, + hidden_states, + res_hidden_states, + s1=self.s1, + s2=self.s2, + b1=self.b1, + b2=self.b2, + ) + + hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) + + if self.training and self.gradient_checkpointing: + + def create_custom_forward(module, return_dict=None): + def custom_forward(*inputs): + if return_dict is not None: + return module(*inputs, return_dict=return_dict) + else: + return module(*inputs) + + return custom_forward + + ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {} + hidden_states = torch.utils.checkpoint.checkpoint( + create_custom_forward(resnet), + hidden_states, + temb, + **ckpt_kwargs, + ) + hidden_states = attn( + hidden_states, + encoder_hidden_states=encoder_hidden_states, + cross_attention_kwargs=cross_attention_kwargs, + attention_mask=attention_mask, + encoder_attention_mask=encoder_attention_mask, + return_dict=False, + )[0] + else: + hidden_states = resnet(hidden_states, temb) + hidden_states = attn( + hidden_states, + encoder_hidden_states=encoder_hidden_states, + cross_attention_kwargs=cross_attention_kwargs, + attention_mask=attention_mask, + encoder_attention_mask=encoder_attention_mask, + return_dict=False, + )[0] + + if self.upsamplers is not None: + for upsampler in self.upsamplers: + hidden_states = upsampler(hidden_states, upsample_size) + + return hidden_states + + +# Copied from diffusers.models.unets.unet_2d_blocks.UNetMidBlock2D with UNetMidBlock2D->UNetMidBlockFlat, ResnetBlock2D->ResnetBlockFlat +class UNetMidBlockFlat(nn.Module): + """ + A 2D UNet mid-block [`UNetMidBlockFlat`] with multiple residual blocks and optional attention blocks. + + Args: + in_channels (`int`): The number of input channels. + temb_channels (`int`): The number of temporal embedding channels. + dropout (`float`, *optional*, defaults to 0.0): The dropout rate. + num_layers (`int`, *optional*, defaults to 1): The number of residual blocks. + resnet_eps (`float`, *optional*, 1e-6 ): The epsilon value for the resnet blocks. + resnet_time_scale_shift (`str`, *optional*, defaults to `default`): + The type of normalization to apply to the time embeddings. This can help to improve the performance of the + model on tasks with long-range temporal dependencies. + resnet_act_fn (`str`, *optional*, defaults to `swish`): The activation function for the resnet blocks. + resnet_groups (`int`, *optional*, defaults to 32): + The number of groups to use in the group normalization layers of the resnet blocks. + attn_groups (`Optional[int]`, *optional*, defaults to None): The number of groups for the attention blocks. + resnet_pre_norm (`bool`, *optional*, defaults to `True`): + Whether to use pre-normalization for the resnet blocks. + add_attention (`bool`, *optional*, defaults to `True`): Whether to add attention blocks. + attention_head_dim (`int`, *optional*, defaults to 1): + Dimension of a single attention head. The number of attention heads is determined based on this value and + the number of input channels. + output_scale_factor (`float`, *optional*, defaults to 1.0): The output scale factor. + + Returns: + `torch.Tensor`: The output of the last residual block, which is a tensor of shape `(batch_size, in_channels, + height, width)`. + + """ + + def __init__( + self, + in_channels: int, + temb_channels: int, + dropout: float = 0.0, + num_layers: int = 1, + resnet_eps: float = 1e-6, + resnet_time_scale_shift: str = "default", # default, spatial + resnet_act_fn: str = "swish", + resnet_groups: int = 32, + attn_groups: Optional[int] = None, + resnet_pre_norm: bool = True, + add_attention: bool = True, + attention_head_dim: int = 1, + output_scale_factor: float = 1.0, + ): + super().__init__() + resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32) + self.add_attention = add_attention + + if attn_groups is None: + attn_groups = resnet_groups if resnet_time_scale_shift == "default" else None + + # there is always at least one resnet + if resnet_time_scale_shift == "spatial": + resnets = [ + ResnetBlockCondNorm2D( + in_channels=in_channels, + out_channels=in_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + time_embedding_norm="spatial", + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + ) + ] + else: + resnets = [ + ResnetBlockFlat( + in_channels=in_channels, + out_channels=in_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + time_embedding_norm=resnet_time_scale_shift, + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + pre_norm=resnet_pre_norm, + ) + ] + attentions = [] + + if attention_head_dim is None: + logger.warning( + f"It is not recommend to pass `attention_head_dim=None`. Defaulting `attention_head_dim` to `in_channels`: {in_channels}." + ) + attention_head_dim = in_channels + + for _ in range(num_layers): + if self.add_attention: + attentions.append( + Attention( + in_channels, + heads=in_channels // attention_head_dim, + dim_head=attention_head_dim, + rescale_output_factor=output_scale_factor, + eps=resnet_eps, + norm_num_groups=attn_groups, + spatial_norm_dim=temb_channels if resnet_time_scale_shift == "spatial" else None, + residual_connection=True, + bias=True, + upcast_softmax=True, + _from_deprecated_attn_block=True, + ) + ) + else: + attentions.append(None) + + if resnet_time_scale_shift == "spatial": + resnets.append( + ResnetBlockCondNorm2D( + in_channels=in_channels, + out_channels=in_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + time_embedding_norm="spatial", + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + ) + ) + else: + resnets.append( + ResnetBlockFlat( + in_channels=in_channels, + out_channels=in_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + time_embedding_norm=resnet_time_scale_shift, + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + pre_norm=resnet_pre_norm, + ) + ) + + self.attentions = nn.ModuleList(attentions) + self.resnets = nn.ModuleList(resnets) + + def forward(self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor] = None) -> torch.Tensor: + hidden_states = self.resnets[0](hidden_states, temb) + for attn, resnet in zip(self.attentions, self.resnets[1:]): + if attn is not None: + hidden_states = attn(hidden_states, temb=temb) + hidden_states = resnet(hidden_states, temb) + + return hidden_states + + +# Copied from diffusers.models.unets.unet_2d_blocks.UNetMidBlock2DCrossAttn with UNetMidBlock2DCrossAttn->UNetMidBlockFlatCrossAttn, ResnetBlock2D->ResnetBlockFlat +class UNetMidBlockFlatCrossAttn(nn.Module): + def __init__( + self, + in_channels: int, + temb_channels: int, + out_channels: Optional[int] = None, + dropout: float = 0.0, + num_layers: int = 1, + transformer_layers_per_block: Union[int, Tuple[int]] = 1, + resnet_eps: float = 1e-6, + resnet_time_scale_shift: str = "default", + resnet_act_fn: str = "swish", + resnet_groups: int = 32, + resnet_groups_out: Optional[int] = None, + resnet_pre_norm: bool = True, + num_attention_heads: int = 1, + output_scale_factor: float = 1.0, + cross_attention_dim: int = 1280, + dual_cross_attention: bool = False, + use_linear_projection: bool = False, + upcast_attention: bool = False, + attention_type: str = "default", + ): + super().__init__() + + out_channels = out_channels or in_channels + self.in_channels = in_channels + self.out_channels = out_channels + + self.has_cross_attention = True + self.num_attention_heads = num_attention_heads + resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32) + + # support for variable transformer layers per block + if isinstance(transformer_layers_per_block, int): + transformer_layers_per_block = [transformer_layers_per_block] * num_layers + + resnet_groups_out = resnet_groups_out or resnet_groups + + # there is always at least one resnet + resnets = [ + ResnetBlockFlat( + in_channels=in_channels, + out_channels=out_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + groups_out=resnet_groups_out, + dropout=dropout, + time_embedding_norm=resnet_time_scale_shift, + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + pre_norm=resnet_pre_norm, + ) + ] + attentions = [] + + for i in range(num_layers): + if not dual_cross_attention: + attentions.append( + Transformer2DModel( + num_attention_heads, + out_channels // num_attention_heads, + in_channels=out_channels, + num_layers=transformer_layers_per_block[i], + cross_attention_dim=cross_attention_dim, + norm_num_groups=resnet_groups_out, + use_linear_projection=use_linear_projection, + upcast_attention=upcast_attention, + attention_type=attention_type, + ) + ) + else: + attentions.append( + DualTransformer2DModel( + num_attention_heads, + out_channels // num_attention_heads, + in_channels=out_channels, + num_layers=1, + cross_attention_dim=cross_attention_dim, + norm_num_groups=resnet_groups, + ) + ) + resnets.append( + ResnetBlockFlat( + in_channels=out_channels, + out_channels=out_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups_out, + dropout=dropout, + time_embedding_norm=resnet_time_scale_shift, + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + pre_norm=resnet_pre_norm, + ) + ) + + self.attentions = nn.ModuleList(attentions) + self.resnets = nn.ModuleList(resnets) + + self.gradient_checkpointing = False + + def forward( + self, + hidden_states: torch.Tensor, + temb: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + encoder_attention_mask: Optional[torch.Tensor] = None, + ) -> torch.Tensor: + if cross_attention_kwargs is not None: + if cross_attention_kwargs.get("scale", None) is not None: + logger.warning("Passing `scale` to `cross_attention_kwargs` is deprecated. `scale` will be ignored.") + + hidden_states = self.resnets[0](hidden_states, temb) + for attn, resnet in zip(self.attentions, self.resnets[1:]): + if self.training and self.gradient_checkpointing: + + def create_custom_forward(module, return_dict=None): + def custom_forward(*inputs): + if return_dict is not None: + return module(*inputs, return_dict=return_dict) + else: + return module(*inputs) + + return custom_forward + + ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {} + hidden_states = attn( + hidden_states, + encoder_hidden_states=encoder_hidden_states, + cross_attention_kwargs=cross_attention_kwargs, + attention_mask=attention_mask, + encoder_attention_mask=encoder_attention_mask, + return_dict=False, + )[0] + hidden_states = torch.utils.checkpoint.checkpoint( + create_custom_forward(resnet), + hidden_states, + temb, + **ckpt_kwargs, + ) + else: + hidden_states = attn( + hidden_states, + encoder_hidden_states=encoder_hidden_states, + cross_attention_kwargs=cross_attention_kwargs, + attention_mask=attention_mask, + encoder_attention_mask=encoder_attention_mask, + return_dict=False, + )[0] + hidden_states = resnet(hidden_states, temb) + + return hidden_states + + +# Copied from diffusers.models.unets.unet_2d_blocks.UNetMidBlock2DSimpleCrossAttn with UNetMidBlock2DSimpleCrossAttn->UNetMidBlockFlatSimpleCrossAttn, ResnetBlock2D->ResnetBlockFlat +class UNetMidBlockFlatSimpleCrossAttn(nn.Module): + def __init__( + self, + in_channels: int, + temb_channels: int, + dropout: float = 0.0, + num_layers: int = 1, + resnet_eps: float = 1e-6, + resnet_time_scale_shift: str = "default", + resnet_act_fn: str = "swish", + resnet_groups: int = 32, + resnet_pre_norm: bool = True, + attention_head_dim: int = 1, + output_scale_factor: float = 1.0, + cross_attention_dim: int = 1280, + skip_time_act: bool = False, + only_cross_attention: bool = False, + cross_attention_norm: Optional[str] = None, + ): + super().__init__() + + self.has_cross_attention = True + + self.attention_head_dim = attention_head_dim + resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32) + + self.num_heads = in_channels // self.attention_head_dim + + # there is always at least one resnet + resnets = [ + ResnetBlockFlat( + in_channels=in_channels, + out_channels=in_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + time_embedding_norm=resnet_time_scale_shift, + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + pre_norm=resnet_pre_norm, + skip_time_act=skip_time_act, + ) + ] + attentions = [] + + for _ in range(num_layers): + processor = ( + AttnAddedKVProcessor2_0() if hasattr(F, "scaled_dot_product_attention") else AttnAddedKVProcessor() + ) + + attentions.append( + Attention( + query_dim=in_channels, + cross_attention_dim=in_channels, + heads=self.num_heads, + dim_head=self.attention_head_dim, + added_kv_proj_dim=cross_attention_dim, + norm_num_groups=resnet_groups, + bias=True, + upcast_softmax=True, + only_cross_attention=only_cross_attention, + cross_attention_norm=cross_attention_norm, + processor=processor, + ) + ) + resnets.append( + ResnetBlockFlat( + in_channels=in_channels, + out_channels=in_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + time_embedding_norm=resnet_time_scale_shift, + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + pre_norm=resnet_pre_norm, + skip_time_act=skip_time_act, + ) + ) + + self.attentions = nn.ModuleList(attentions) + self.resnets = nn.ModuleList(resnets) + + def forward( + self, + hidden_states: torch.Tensor, + temb: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + encoder_attention_mask: Optional[torch.Tensor] = None, + ) -> torch.Tensor: + cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {} + if cross_attention_kwargs.get("scale", None) is not None: + logger.warning("Passing `scale` to `cross_attention_kwargs` is deprecated. `scale` will be ignored.") + + if attention_mask is None: + # if encoder_hidden_states is defined: we are doing cross-attn, so we should use cross-attn mask. + mask = None if encoder_hidden_states is None else encoder_attention_mask + else: + # when attention_mask is defined: we don't even check for encoder_attention_mask. + # this is to maintain compatibility with UnCLIP, which uses 'attention_mask' param for cross-attn masks. + # TODO: UnCLIP should express cross-attn mask via encoder_attention_mask param instead of via attention_mask. + # then we can simplify this whole if/else block to: + # mask = attention_mask if encoder_hidden_states is None else encoder_attention_mask + mask = attention_mask + + hidden_states = self.resnets[0](hidden_states, temb) + for attn, resnet in zip(self.attentions, self.resnets[1:]): + # attn + hidden_states = attn( + hidden_states, + encoder_hidden_states=encoder_hidden_states, + attention_mask=mask, + **cross_attention_kwargs, + ) + + # resnet + hidden_states = resnet(hidden_states, temb) + + return hidden_states diff --git a/diffusers3/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion.py b/diffusers3/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion.py new file mode 100644 index 0000000000000000000000000000000000000000..c8dc18e2e8ac0facc006f9a1bf48b22ba7a3bdd8 --- /dev/null +++ b/diffusers3/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion.py @@ -0,0 +1,421 @@ +import inspect +from typing import Callable, List, Optional, Union + +import PIL.Image +import torch +from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModel + +from ....models import AutoencoderKL, UNet2DConditionModel +from ....schedulers import KarrasDiffusionSchedulers +from ....utils import logging +from ...pipeline_utils import DiffusionPipeline +from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline +from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline +from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +class VersatileDiffusionPipeline(DiffusionPipeline): + r""" + Pipeline for text-to-image generation using Stable Diffusion. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details + about a model's potential harms. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. + """ + + tokenizer: CLIPTokenizer + image_feature_extractor: CLIPImageProcessor + text_encoder: CLIPTextModel + image_encoder: CLIPVisionModel + image_unet: UNet2DConditionModel + text_unet: UNet2DConditionModel + vae: AutoencoderKL + scheduler: KarrasDiffusionSchedulers + + def __init__( + self, + tokenizer: CLIPTokenizer, + image_feature_extractor: CLIPImageProcessor, + text_encoder: CLIPTextModel, + image_encoder: CLIPVisionModel, + image_unet: UNet2DConditionModel, + text_unet: UNet2DConditionModel, + vae: AutoencoderKL, + scheduler: KarrasDiffusionSchedulers, + ): + super().__init__() + + self.register_modules( + tokenizer=tokenizer, + image_feature_extractor=image_feature_extractor, + text_encoder=text_encoder, + image_encoder=image_encoder, + image_unet=image_unet, + text_unet=text_unet, + vae=vae, + scheduler=scheduler, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + + @torch.no_grad() + def image_variation( + self, + image: Union[torch.Tensor, PIL.Image.Image], + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + guidance_scale: float = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, + callback_steps: int = 1, + ): + r""" + The call function to the pipeline for generation. + + Args: + image (`PIL.Image.Image`, `List[PIL.Image.Image]` or `torch.Tensor`): + The image prompt or prompts to guide the image generation. + height (`int`, *optional*, defaults to `self.image_unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.image_unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + + Examples: + + ```py + >>> from diffusers import VersatileDiffusionPipeline + >>> import torch + >>> import requests + >>> from io import BytesIO + >>> from PIL import Image + + >>> # let's download an initial image + >>> url = "https://huggingface.co/datasets/diffusers/images/resolve/main/benz.jpg" + + >>> response = requests.get(url) + >>> image = Image.open(BytesIO(response.content)).convert("RGB") + + >>> pipe = VersatileDiffusionPipeline.from_pretrained( + ... "shi-labs/versatile-diffusion", torch_dtype=torch.float16 + ... ) + >>> pipe = pipe.to("cuda") + + >>> generator = torch.Generator(device="cuda").manual_seed(0) + >>> image = pipe.image_variation(image, generator=generator).images[0] + >>> image.save("./car_variation.png") + ``` + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images and the + second element is a list of `bool`s indicating whether the corresponding generated image contains + "not-safe-for-work" (nsfw) content. + """ + expected_components = inspect.signature(VersatileDiffusionImageVariationPipeline.__init__).parameters.keys() + components = {name: component for name, component in self.components.items() if name in expected_components} + return VersatileDiffusionImageVariationPipeline(**components)( + image=image, + height=height, + width=width, + num_inference_steps=num_inference_steps, + guidance_scale=guidance_scale, + negative_prompt=negative_prompt, + num_images_per_prompt=num_images_per_prompt, + eta=eta, + generator=generator, + latents=latents, + output_type=output_type, + return_dict=return_dict, + callback=callback, + callback_steps=callback_steps, + ) + + @torch.no_grad() + def text_to_image( + self, + prompt: Union[str, List[str]], + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + guidance_scale: float = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, + callback_steps: int = 1, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`): + The prompt or prompts to guide image generation. + height (`int`, *optional*, defaults to `self.image_unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.image_unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + + Examples: + + ```py + >>> from diffusers import VersatileDiffusionPipeline + >>> import torch + + >>> pipe = VersatileDiffusionPipeline.from_pretrained( + ... "shi-labs/versatile-diffusion", torch_dtype=torch.float16 + ... ) + >>> pipe = pipe.to("cuda") + + >>> generator = torch.Generator(device="cuda").manual_seed(0) + >>> image = pipe.text_to_image("an astronaut riding on a horse on mars", generator=generator).images[0] + >>> image.save("./astronaut.png") + ``` + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images and the + second element is a list of `bool`s indicating whether the corresponding generated image contains + "not-safe-for-work" (nsfw) content. + """ + expected_components = inspect.signature(VersatileDiffusionTextToImagePipeline.__init__).parameters.keys() + components = {name: component for name, component in self.components.items() if name in expected_components} + temp_pipeline = VersatileDiffusionTextToImagePipeline(**components) + output = temp_pipeline( + prompt=prompt, + height=height, + width=width, + num_inference_steps=num_inference_steps, + guidance_scale=guidance_scale, + negative_prompt=negative_prompt, + num_images_per_prompt=num_images_per_prompt, + eta=eta, + generator=generator, + latents=latents, + output_type=output_type, + return_dict=return_dict, + callback=callback, + callback_steps=callback_steps, + ) + # swap the attention blocks back to the original state + temp_pipeline._swap_unet_attention_blocks() + + return output + + @torch.no_grad() + def dual_guided( + self, + prompt: Union[PIL.Image.Image, List[PIL.Image.Image]], + image: Union[str, List[str]], + text_to_image_strength: float = 0.5, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + guidance_scale: float = 7.5, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, + callback_steps: int = 1, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`): + The prompt or prompts to guide image generation. + height (`int`, *optional*, defaults to `self.image_unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.image_unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + + Examples: + + ```py + >>> from diffusers import VersatileDiffusionPipeline + >>> import torch + >>> import requests + >>> from io import BytesIO + >>> from PIL import Image + + >>> # let's download an initial image + >>> url = "https://huggingface.co/datasets/diffusers/images/resolve/main/benz.jpg" + + >>> response = requests.get(url) + >>> image = Image.open(BytesIO(response.content)).convert("RGB") + >>> text = "a red car in the sun" + + >>> pipe = VersatileDiffusionPipeline.from_pretrained( + ... "shi-labs/versatile-diffusion", torch_dtype=torch.float16 + ... ) + >>> pipe = pipe.to("cuda") + + >>> generator = torch.Generator(device="cuda").manual_seed(0) + >>> text_to_image_strength = 0.75 + + >>> image = pipe.dual_guided( + ... prompt=text, image=image, text_to_image_strength=text_to_image_strength, generator=generator + ... ).images[0] + >>> image.save("./car_variation.png") + ``` + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is + returned where the first element is a list with the generated images. + """ + + expected_components = inspect.signature(VersatileDiffusionDualGuidedPipeline.__init__).parameters.keys() + components = {name: component for name, component in self.components.items() if name in expected_components} + temp_pipeline = VersatileDiffusionDualGuidedPipeline(**components) + output = temp_pipeline( + prompt=prompt, + image=image, + text_to_image_strength=text_to_image_strength, + height=height, + width=width, + num_inference_steps=num_inference_steps, + guidance_scale=guidance_scale, + num_images_per_prompt=num_images_per_prompt, + eta=eta, + generator=generator, + latents=latents, + output_type=output_type, + return_dict=return_dict, + callback=callback, + callback_steps=callback_steps, + ) + temp_pipeline._revert_dual_attention() + + return output diff --git a/diffusers3/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_dual_guided.py b/diffusers3/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_dual_guided.py new file mode 100644 index 0000000000000000000000000000000000000000..2212651fbb5bf712ca624618e379684f1442af85 --- /dev/null +++ b/diffusers3/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_dual_guided.py @@ -0,0 +1,561 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Callable, List, Optional, Tuple, Union + +import numpy as np +import PIL.Image +import torch +import torch.utils.checkpoint +from transformers import ( + CLIPImageProcessor, + CLIPTextModelWithProjection, + CLIPTokenizer, + CLIPVisionModelWithProjection, +) + +from ....image_processor import VaeImageProcessor +from ....models import AutoencoderKL, DualTransformer2DModel, Transformer2DModel, UNet2DConditionModel +from ....schedulers import KarrasDiffusionSchedulers +from ....utils import deprecate, logging +from ....utils.torch_utils import randn_tensor +from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput +from .modeling_text_unet import UNetFlatConditionModel + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +class VersatileDiffusionDualGuidedPipeline(DiffusionPipeline): + r""" + Pipeline for image-text dual-guided generation using Versatile Diffusion. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + Parameters: + vqvae ([`VQModel`]): + Vector-quantized (VQ) model to encode and decode images to and from latent representations. + bert ([`LDMBertModel`]): + Text-encoder model based on [`~transformers.BERT`]. + tokenizer ([`~transformers.BertTokenizer`]): + A `BertTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + """ + + model_cpu_offload_seq = "bert->unet->vqvae" + + tokenizer: CLIPTokenizer + image_feature_extractor: CLIPImageProcessor + text_encoder: CLIPTextModelWithProjection + image_encoder: CLIPVisionModelWithProjection + image_unet: UNet2DConditionModel + text_unet: UNetFlatConditionModel + vae: AutoencoderKL + scheduler: KarrasDiffusionSchedulers + + _optional_components = ["text_unet"] + + def __init__( + self, + tokenizer: CLIPTokenizer, + image_feature_extractor: CLIPImageProcessor, + text_encoder: CLIPTextModelWithProjection, + image_encoder: CLIPVisionModelWithProjection, + image_unet: UNet2DConditionModel, + text_unet: UNetFlatConditionModel, + vae: AutoencoderKL, + scheduler: KarrasDiffusionSchedulers, + ): + super().__init__() + self.register_modules( + tokenizer=tokenizer, + image_feature_extractor=image_feature_extractor, + text_encoder=text_encoder, + image_encoder=image_encoder, + image_unet=image_unet, + text_unet=text_unet, + vae=vae, + scheduler=scheduler, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + + if self.text_unet is not None and ( + "dual_cross_attention" not in self.image_unet.config or not self.image_unet.config.dual_cross_attention + ): + # if loading from a universal checkpoint rather than a saved dual-guided pipeline + self._convert_to_dual_attention() + + def remove_unused_weights(self): + self.register_modules(text_unet=None) + + def _convert_to_dual_attention(self): + """ + Replace image_unet's `Transformer2DModel` blocks with `DualTransformer2DModel` that contains transformer blocks + from both `image_unet` and `text_unet` + """ + for name, module in self.image_unet.named_modules(): + if isinstance(module, Transformer2DModel): + parent_name, index = name.rsplit(".", 1) + index = int(index) + + image_transformer = self.image_unet.get_submodule(parent_name)[index] + text_transformer = self.text_unet.get_submodule(parent_name)[index] + + config = image_transformer.config + dual_transformer = DualTransformer2DModel( + num_attention_heads=config.num_attention_heads, + attention_head_dim=config.attention_head_dim, + in_channels=config.in_channels, + num_layers=config.num_layers, + dropout=config.dropout, + norm_num_groups=config.norm_num_groups, + cross_attention_dim=config.cross_attention_dim, + attention_bias=config.attention_bias, + sample_size=config.sample_size, + num_vector_embeds=config.num_vector_embeds, + activation_fn=config.activation_fn, + num_embeds_ada_norm=config.num_embeds_ada_norm, + ) + dual_transformer.transformers[0] = image_transformer + dual_transformer.transformers[1] = text_transformer + + self.image_unet.get_submodule(parent_name)[index] = dual_transformer + self.image_unet.register_to_config(dual_cross_attention=True) + + def _revert_dual_attention(self): + """ + Revert the image_unet `DualTransformer2DModel` blocks back to `Transformer2DModel` with image_unet weights Call + this function if you reuse `image_unet` in another pipeline, e.g. `VersatileDiffusionPipeline` + """ + for name, module in self.image_unet.named_modules(): + if isinstance(module, DualTransformer2DModel): + parent_name, index = name.rsplit(".", 1) + index = int(index) + self.image_unet.get_submodule(parent_name)[index] = module.transformers[0] + + self.image_unet.register_to_config(dual_cross_attention=False) + + def _encode_text_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + """ + + def normalize_embeddings(encoder_output): + embeds = self.text_encoder.text_projection(encoder_output.last_hidden_state) + embeds_pooled = encoder_output.text_embeds + embeds = embeds / torch.norm(embeds_pooled.unsqueeze(1), dim=-1, keepdim=True) + return embeds + + batch_size = len(prompt) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="max_length", return_tensors="pt").input_ids + + if not torch.equal(text_input_ids, untruncated_ids): + removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + prompt_embeds = self.text_encoder( + text_input_ids.to(device), + attention_mask=attention_mask, + ) + prompt_embeds = normalize_embeddings(prompt_embeds) + + # duplicate text embeddings for each generation per prompt, using mps friendly method + bs_embed, seq_len, _ = prompt_embeds.shape + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance: + uncond_tokens = [""] * batch_size + max_length = text_input_ids.shape[-1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = normalize_embeddings(negative_prompt_embeds) + + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + return prompt_embeds + + def _encode_image_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + """ + + def normalize_embeddings(encoder_output): + embeds = self.image_encoder.vision_model.post_layernorm(encoder_output.last_hidden_state) + embeds = self.image_encoder.visual_projection(embeds) + embeds_pooled = embeds[:, 0:1] + embeds = embeds / torch.norm(embeds_pooled, dim=-1, keepdim=True) + return embeds + + batch_size = len(prompt) if isinstance(prompt, list) else 1 + + # get prompt text embeddings + image_input = self.image_feature_extractor(images=prompt, return_tensors="pt") + pixel_values = image_input.pixel_values.to(device).to(self.image_encoder.dtype) + image_embeddings = self.image_encoder(pixel_values) + image_embeddings = normalize_embeddings(image_embeddings) + + # duplicate image embeddings for each generation per prompt, using mps friendly method + bs_embed, seq_len, _ = image_embeddings.shape + image_embeddings = image_embeddings.repeat(1, num_images_per_prompt, 1) + image_embeddings = image_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance: + uncond_images = [np.zeros((512, 512, 3)) + 0.5] * batch_size + uncond_images = self.image_feature_extractor(images=uncond_images, return_tensors="pt") + pixel_values = uncond_images.pixel_values.to(device).to(self.image_encoder.dtype) + negative_prompt_embeds = self.image_encoder(pixel_values) + negative_prompt_embeds = normalize_embeddings(negative_prompt_embeds) + + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and conditional embeddings into a single batch + # to avoid doing two forward passes + image_embeddings = torch.cat([negative_prompt_embeds, image_embeddings]) + + return image_embeddings + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents + def decode_latents(self, latents): + deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" + deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) + + latents = 1 / self.vae.config.scaling_factor * latents + image = self.vae.decode(latents, return_dict=False)[0] + image = (image / 2 + 0.5).clamp(0, 1) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + return image + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs(self, prompt, image, height, width, callback_steps): + if not isinstance(prompt, str) and not isinstance(prompt, PIL.Image.Image) and not isinstance(prompt, list): + raise ValueError(f"`prompt` has to be of type `str` `PIL.Image` or `list` but is {type(prompt)}") + if not isinstance(image, str) and not isinstance(image, PIL.Image.Image) and not isinstance(image, list): + raise ValueError(f"`image` has to be of type `str` `PIL.Image` or `list` but is {type(image)}") + + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if (callback_steps is None) or ( + callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = ( + batch_size, + num_channels_latents, + int(height) // self.vae_scale_factor, + int(width) // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + def set_transformer_params(self, mix_ratio: float = 0.5, condition_types: Tuple = ("text", "image")): + for name, module in self.image_unet.named_modules(): + if isinstance(module, DualTransformer2DModel): + module.mix_ratio = mix_ratio + + for i, type in enumerate(condition_types): + if type == "text": + module.condition_lengths[i] = self.text_encoder.config.max_position_embeddings + module.transformer_index_for_condition[i] = 1 # use the second (text) transformer + else: + module.condition_lengths[i] = 257 + module.transformer_index_for_condition[i] = 0 # use the first (image) transformer + + @torch.no_grad() + def __call__( + self, + prompt: Union[PIL.Image.Image, List[PIL.Image.Image]], + image: Union[str, List[str]], + text_to_image_strength: float = 0.5, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + guidance_scale: float = 7.5, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, + callback_steps: int = 1, + **kwargs, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`): + The prompt or prompts to guide image generation. + height (`int`, *optional*, defaults to `self.image_unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.image_unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + + Examples: + + ```py + >>> from diffusers import VersatileDiffusionDualGuidedPipeline + >>> import torch + >>> import requests + >>> from io import BytesIO + >>> from PIL import Image + + >>> # let's download an initial image + >>> url = "https://huggingface.co/datasets/diffusers/images/resolve/main/benz.jpg" + + >>> response = requests.get(url) + >>> image = Image.open(BytesIO(response.content)).convert("RGB") + >>> text = "a red car in the sun" + + >>> pipe = VersatileDiffusionDualGuidedPipeline.from_pretrained( + ... "shi-labs/versatile-diffusion", torch_dtype=torch.float16 + ... ) + >>> pipe.remove_unused_weights() + >>> pipe = pipe.to("cuda") + + >>> generator = torch.Generator(device="cuda").manual_seed(0) + >>> text_to_image_strength = 0.75 + + >>> image = pipe( + ... prompt=text, image=image, text_to_image_strength=text_to_image_strength, generator=generator + ... ).images[0] + >>> image.save("./car_variation.png") + ``` + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is + returned where the first element is a list with the generated images. + """ + # 0. Default height and width to unet + height = height or self.image_unet.config.sample_size * self.vae_scale_factor + width = width or self.image_unet.config.sample_size * self.vae_scale_factor + + # 1. Check inputs. Raise error if not correct + self.check_inputs(prompt, image, height, width, callback_steps) + + # 2. Define call parameters + prompt = [prompt] if not isinstance(prompt, list) else prompt + image = [image] if not isinstance(image, list) else image + batch_size = len(prompt) + device = self._execution_device + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Encode input prompts + prompt_embeds = self._encode_text_prompt(prompt, device, num_images_per_prompt, do_classifier_free_guidance) + image_embeddings = self._encode_image_prompt(image, device, num_images_per_prompt, do_classifier_free_guidance) + dual_prompt_embeddings = torch.cat([prompt_embeds, image_embeddings], dim=1) + prompt_types = ("text", "image") + + # 4. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # 5. Prepare latent variables + num_channels_latents = self.image_unet.config.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + dual_prompt_embeddings.dtype, + device, + generator, + latents, + ) + + # 6. Prepare extra step kwargs. + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7. Combine the attention blocks of the image and text UNets + self.set_transformer_params(text_to_image_strength, prompt_types) + + # 8. Denoising loop + for i, t in enumerate(self.progress_bar(timesteps)): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + noise_pred = self.image_unet(latent_model_input, t, encoder_hidden_states=dual_prompt_embeddings).sample + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample + + # call the callback, if provided + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + else: + image = latents + + image = self.image_processor.postprocess(image, output_type=output_type) + + if not return_dict: + return (image,) + + return ImagePipelineOutput(images=image) diff --git a/diffusers3/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_image_variation.py b/diffusers3/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_image_variation.py new file mode 100644 index 0000000000000000000000000000000000000000..62d3e83a4790f82f182a360b0cd0d6c20fb231e2 --- /dev/null +++ b/diffusers3/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_image_variation.py @@ -0,0 +1,402 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Callable, List, Optional, Union + +import numpy as np +import PIL.Image +import torch +import torch.utils.checkpoint +from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection + +from ....image_processor import VaeImageProcessor +from ....models import AutoencoderKL, UNet2DConditionModel +from ....schedulers import KarrasDiffusionSchedulers +from ....utils import deprecate, logging +from ....utils.torch_utils import randn_tensor +from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +class VersatileDiffusionImageVariationPipeline(DiffusionPipeline): + r""" + Pipeline for image variation using Versatile Diffusion. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + Parameters: + vqvae ([`VQModel`]): + Vector-quantized (VQ) model to encode and decode images to and from latent representations. + bert ([`LDMBertModel`]): + Text-encoder model based on [`~transformers.BERT`]. + tokenizer ([`~transformers.BertTokenizer`]): + A `BertTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + """ + + model_cpu_offload_seq = "bert->unet->vqvae" + + image_feature_extractor: CLIPImageProcessor + image_encoder: CLIPVisionModelWithProjection + image_unet: UNet2DConditionModel + vae: AutoencoderKL + scheduler: KarrasDiffusionSchedulers + + def __init__( + self, + image_feature_extractor: CLIPImageProcessor, + image_encoder: CLIPVisionModelWithProjection, + image_unet: UNet2DConditionModel, + vae: AutoencoderKL, + scheduler: KarrasDiffusionSchedulers, + ): + super().__init__() + self.register_modules( + image_feature_extractor=image_feature_extractor, + image_encoder=image_encoder, + image_unet=image_unet, + vae=vae, + scheduler=scheduler, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + + def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`): + The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored + if `guidance_scale` is less than `1`). + """ + + def normalize_embeddings(encoder_output): + embeds = self.image_encoder.vision_model.post_layernorm(encoder_output.last_hidden_state) + embeds = self.image_encoder.visual_projection(embeds) + embeds_pooled = embeds[:, 0:1] + embeds = embeds / torch.norm(embeds_pooled, dim=-1, keepdim=True) + return embeds + + if isinstance(prompt, torch.Tensor) and len(prompt.shape) == 4: + prompt = list(prompt) + + batch_size = len(prompt) if isinstance(prompt, list) else 1 + + # get prompt text embeddings + image_input = self.image_feature_extractor(images=prompt, return_tensors="pt") + pixel_values = image_input.pixel_values.to(device).to(self.image_encoder.dtype) + image_embeddings = self.image_encoder(pixel_values) + image_embeddings = normalize_embeddings(image_embeddings) + + # duplicate image embeddings for each generation per prompt, using mps friendly method + bs_embed, seq_len, _ = image_embeddings.shape + image_embeddings = image_embeddings.repeat(1, num_images_per_prompt, 1) + image_embeddings = image_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance: + uncond_images: List[str] + if negative_prompt is None: + uncond_images = [np.zeros((512, 512, 3)) + 0.5] * batch_size + elif type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, PIL.Image.Image): + uncond_images = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_images = negative_prompt + + uncond_images = self.image_feature_extractor(images=uncond_images, return_tensors="pt") + pixel_values = uncond_images.pixel_values.to(device).to(self.image_encoder.dtype) + negative_prompt_embeds = self.image_encoder(pixel_values) + negative_prompt_embeds = normalize_embeddings(negative_prompt_embeds) + + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and conditional embeddings into a single batch + # to avoid doing two forward passes + image_embeddings = torch.cat([negative_prompt_embeds, image_embeddings]) + + return image_embeddings + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents + def decode_latents(self, latents): + deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" + deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) + + latents = 1 / self.vae.config.scaling_factor * latents + image = self.vae.decode(latents, return_dict=False)[0] + image = (image / 2 + 0.5).clamp(0, 1) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + return image + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_image_variation.StableDiffusionImageVariationPipeline.check_inputs + def check_inputs(self, image, height, width, callback_steps): + if ( + not isinstance(image, torch.Tensor) + and not isinstance(image, PIL.Image.Image) + and not isinstance(image, list) + ): + raise ValueError( + "`image` has to be of type `torch.Tensor` or `PIL.Image.Image` or `List[PIL.Image.Image]` but is" + f" {type(image)}" + ) + + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if (callback_steps is None) or ( + callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = ( + batch_size, + num_channels_latents, + int(height) // self.vae_scale_factor, + int(width) // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + @torch.no_grad() + def __call__( + self, + image: Union[PIL.Image.Image, List[PIL.Image.Image], torch.Tensor], + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + guidance_scale: float = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, + callback_steps: int = 1, + **kwargs, + ): + r""" + The call function to the pipeline for generation. + + Args: + image (`PIL.Image.Image`, `List[PIL.Image.Image]` or `torch.Tensor`): + The image prompt or prompts to guide the image generation. + height (`int`, *optional*, defaults to `self.image_unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.image_unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + + Examples: + + ```py + >>> from diffusers import VersatileDiffusionImageVariationPipeline + >>> import torch + >>> import requests + >>> from io import BytesIO + >>> from PIL import Image + + >>> # let's download an initial image + >>> url = "https://huggingface.co/datasets/diffusers/images/resolve/main/benz.jpg" + + >>> response = requests.get(url) + >>> image = Image.open(BytesIO(response.content)).convert("RGB") + + >>> pipe = VersatileDiffusionImageVariationPipeline.from_pretrained( + ... "shi-labs/versatile-diffusion", torch_dtype=torch.float16 + ... ) + >>> pipe = pipe.to("cuda") + + >>> generator = torch.Generator(device="cuda").manual_seed(0) + >>> image = pipe(image, generator=generator).images[0] + >>> image.save("./car_variation.png") + ``` + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images. + """ + # 0. Default height and width to unet + height = height or self.image_unet.config.sample_size * self.vae_scale_factor + width = width or self.image_unet.config.sample_size * self.vae_scale_factor + + # 1. Check inputs. Raise error if not correct + self.check_inputs(image, height, width, callback_steps) + + # 2. Define call parameters + batch_size = 1 if isinstance(image, PIL.Image.Image) else len(image) + device = self._execution_device + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Encode input prompt + image_embeddings = self._encode_prompt( + image, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt + ) + + # 4. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # 5. Prepare latent variables + num_channels_latents = self.image_unet.config.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + image_embeddings.dtype, + device, + generator, + latents, + ) + + # 6. Prepare extra step kwargs. + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7. Denoising loop + for i, t in enumerate(self.progress_bar(timesteps)): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + noise_pred = self.image_unet(latent_model_input, t, encoder_hidden_states=image_embeddings).sample + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample + + # call the callback, if provided + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + else: + image = latents + + image = self.image_processor.postprocess(image, output_type=output_type) + + if not return_dict: + return (image,) + + return ImagePipelineOutput(images=image) diff --git a/diffusers3/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_text_to_image.py b/diffusers3/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_text_to_image.py new file mode 100644 index 0000000000000000000000000000000000000000..de4c2ac9b7f49808b4070fb269bac1222ab59826 --- /dev/null +++ b/diffusers3/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_text_to_image.py @@ -0,0 +1,480 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Callable, List, Optional, Union + +import torch +import torch.utils.checkpoint +from transformers import CLIPImageProcessor, CLIPTextModelWithProjection, CLIPTokenizer + +from ....image_processor import VaeImageProcessor +from ....models import AutoencoderKL, Transformer2DModel, UNet2DConditionModel +from ....schedulers import KarrasDiffusionSchedulers +from ....utils import deprecate, logging +from ....utils.torch_utils import randn_tensor +from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput +from .modeling_text_unet import UNetFlatConditionModel + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +class VersatileDiffusionTextToImagePipeline(DiffusionPipeline): + r""" + Pipeline for text-to-image generation using Versatile Diffusion. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + Parameters: + vqvae ([`VQModel`]): + Vector-quantized (VQ) model to encode and decode images to and from latent representations. + bert ([`LDMBertModel`]): + Text-encoder model based on [`~transformers.BERT`]. + tokenizer ([`~transformers.BertTokenizer`]): + A `BertTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + """ + + model_cpu_offload_seq = "bert->unet->vqvae" + + tokenizer: CLIPTokenizer + image_feature_extractor: CLIPImageProcessor + text_encoder: CLIPTextModelWithProjection + image_unet: UNet2DConditionModel + text_unet: UNetFlatConditionModel + vae: AutoencoderKL + scheduler: KarrasDiffusionSchedulers + + _optional_components = ["text_unet"] + + def __init__( + self, + tokenizer: CLIPTokenizer, + text_encoder: CLIPTextModelWithProjection, + image_unet: UNet2DConditionModel, + text_unet: UNetFlatConditionModel, + vae: AutoencoderKL, + scheduler: KarrasDiffusionSchedulers, + ): + super().__init__() + self.register_modules( + tokenizer=tokenizer, + text_encoder=text_encoder, + image_unet=image_unet, + text_unet=text_unet, + vae=vae, + scheduler=scheduler, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + + if self.text_unet is not None: + self._swap_unet_attention_blocks() + + def _swap_unet_attention_blocks(self): + """ + Swap the `Transformer2DModel` blocks between the image and text UNets + """ + for name, module in self.image_unet.named_modules(): + if isinstance(module, Transformer2DModel): + parent_name, index = name.rsplit(".", 1) + index = int(index) + self.image_unet.get_submodule(parent_name)[index], self.text_unet.get_submodule(parent_name)[index] = ( + self.text_unet.get_submodule(parent_name)[index], + self.image_unet.get_submodule(parent_name)[index], + ) + + def remove_unused_weights(self): + self.register_modules(text_unet=None) + + def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`): + The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored + if `guidance_scale` is less than `1`). + """ + + def normalize_embeddings(encoder_output): + embeds = self.text_encoder.text_projection(encoder_output.last_hidden_state) + embeds_pooled = encoder_output.text_embeds + embeds = embeds / torch.norm(embeds_pooled.unsqueeze(1), dim=-1, keepdim=True) + return embeds + + batch_size = len(prompt) if isinstance(prompt, list) else 1 + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="max_length", return_tensors="pt").input_ids + + if not torch.equal(text_input_ids, untruncated_ids): + removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + prompt_embeds = self.text_encoder( + text_input_ids.to(device), + attention_mask=attention_mask, + ) + prompt_embeds = normalize_embeddings(prompt_embeds) + + # duplicate text embeddings for each generation per prompt, using mps friendly method + bs_embed, seq_len, _ = prompt_embeds.shape + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + max_length = text_input_ids.shape[-1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = normalize_embeddings(negative_prompt_embeds) + + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + return prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents + def decode_latents(self, latents): + deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" + deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) + + latents = 1 / self.vae.config.scaling_factor * latents + image = self.vae.decode(latents, return_dict=False)[0] + image = (image / 2 + 0.5).clamp(0, 1) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + return image + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + height, + width, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + callback_on_step_end_tensor_inputs=None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = ( + batch_size, + num_channels_latents, + int(height) // self.vae_scale_factor, + int(width) // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + @torch.no_grad() + def __call__( + self, + prompt: Union[str, List[str]], + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + guidance_scale: float = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, + callback_steps: int = 1, + **kwargs, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`): + The prompt or prompts to guide image generation. + height (`int`, *optional*, defaults to `self.image_unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.image_unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + + Examples: + + ```py + >>> from diffusers import VersatileDiffusionTextToImagePipeline + >>> import torch + + >>> pipe = VersatileDiffusionTextToImagePipeline.from_pretrained( + ... "shi-labs/versatile-diffusion", torch_dtype=torch.float16 + ... ) + >>> pipe.remove_unused_weights() + >>> pipe = pipe.to("cuda") + + >>> generator = torch.Generator(device="cuda").manual_seed(0) + >>> image = pipe("an astronaut riding on a horse on mars", generator=generator).images[0] + >>> image.save("./astronaut.png") + ``` + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images. + """ + # 0. Default height and width to unet + height = height or self.image_unet.config.sample_size * self.vae_scale_factor + width = width or self.image_unet.config.sample_size * self.vae_scale_factor + + # 1. Check inputs. Raise error if not correct + self.check_inputs(prompt, height, width, callback_steps) + + # 2. Define call parameters + batch_size = 1 if isinstance(prompt, str) else len(prompt) + device = self._execution_device + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Encode input prompt + prompt_embeds = self._encode_prompt( + prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt + ) + + # 4. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # 5. Prepare latent variables + num_channels_latents = self.image_unet.config.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 6. Prepare extra step kwargs. + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7. Denoising loop + for i, t in enumerate(self.progress_bar(timesteps)): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + noise_pred = self.image_unet(latent_model_input, t, encoder_hidden_states=prompt_embeds).sample + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample + + # call the callback, if provided + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + else: + image = latents + + image = self.image_processor.postprocess(image, output_type=output_type) + + if not return_dict: + return (image,) + + return ImagePipelineOutput(images=image) diff --git a/diffusers3/pipelines/deprecated/vq_diffusion/__init__.py b/diffusers3/pipelines/deprecated/vq_diffusion/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..070903377c7188415af0417d4839d74a8a34dc01 --- /dev/null +++ b/diffusers3/pipelines/deprecated/vq_diffusion/__init__.py @@ -0,0 +1,57 @@ +from typing import TYPE_CHECKING + +from ....utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ....utils.dummy_torch_and_transformers_objects import ( + LearnedClassifierFreeSamplingEmbeddings, + VQDiffusionPipeline, + ) + + _dummy_objects.update( + { + "LearnedClassifierFreeSamplingEmbeddings": LearnedClassifierFreeSamplingEmbeddings, + "VQDiffusionPipeline": VQDiffusionPipeline, + } + ) +else: + _import_structure["pipeline_vq_diffusion"] = ["LearnedClassifierFreeSamplingEmbeddings", "VQDiffusionPipeline"] + + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ....utils.dummy_torch_and_transformers_objects import ( + LearnedClassifierFreeSamplingEmbeddings, + VQDiffusionPipeline, + ) + else: + from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/diffusers3/pipelines/deprecated/vq_diffusion/pipeline_vq_diffusion.py b/diffusers3/pipelines/deprecated/vq_diffusion/pipeline_vq_diffusion.py new file mode 100644 index 0000000000000000000000000000000000000000..8dee000df05f6a0196b53277bbe254c53e73e39d --- /dev/null +++ b/diffusers3/pipelines/deprecated/vq_diffusion/pipeline_vq_diffusion.py @@ -0,0 +1,325 @@ +# Copyright 2024 Microsoft and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Callable, List, Optional, Tuple, Union + +import torch +from transformers import CLIPTextModel, CLIPTokenizer + +from ....configuration_utils import ConfigMixin, register_to_config +from ....models import ModelMixin, Transformer2DModel, VQModel +from ....schedulers import VQDiffusionScheduler +from ....utils import logging +from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +class LearnedClassifierFreeSamplingEmbeddings(ModelMixin, ConfigMixin): + """ + Utility class for storing learned text embeddings for classifier free sampling + """ + + @register_to_config + def __init__(self, learnable: bool, hidden_size: Optional[int] = None, length: Optional[int] = None): + super().__init__() + + self.learnable = learnable + + if self.learnable: + assert hidden_size is not None, "learnable=True requires `hidden_size` to be set" + assert length is not None, "learnable=True requires `length` to be set" + + embeddings = torch.zeros(length, hidden_size) + else: + embeddings = None + + self.embeddings = torch.nn.Parameter(embeddings) + + +class VQDiffusionPipeline(DiffusionPipeline): + r""" + Pipeline for text-to-image generation using VQ Diffusion. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + Args: + vqvae ([`VQModel`]): + Vector Quantized Variational Auto-Encoder (VAE) model to encode and decode images to and from latent + representations. + text_encoder ([`~transformers.CLIPTextModel`]): + Frozen text-encoder ([clip-vit-base-patch32](https://huggingface.co/openai/clip-vit-base-patch32)). + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + transformer ([`Transformer2DModel`]): + A conditional `Transformer2DModel` to denoise the encoded image latents. + scheduler ([`VQDiffusionScheduler`]): + A scheduler to be used in combination with `transformer` to denoise the encoded image latents. + """ + + vqvae: VQModel + text_encoder: CLIPTextModel + tokenizer: CLIPTokenizer + transformer: Transformer2DModel + learned_classifier_free_sampling_embeddings: LearnedClassifierFreeSamplingEmbeddings + scheduler: VQDiffusionScheduler + + def __init__( + self, + vqvae: VQModel, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + transformer: Transformer2DModel, + scheduler: VQDiffusionScheduler, + learned_classifier_free_sampling_embeddings: LearnedClassifierFreeSamplingEmbeddings, + ): + super().__init__() + + self.register_modules( + vqvae=vqvae, + transformer=transformer, + text_encoder=text_encoder, + tokenizer=tokenizer, + scheduler=scheduler, + learned_classifier_free_sampling_embeddings=learned_classifier_free_sampling_embeddings, + ) + + def _encode_prompt(self, prompt, num_images_per_prompt, do_classifier_free_guidance): + batch_size = len(prompt) if isinstance(prompt, list) else 1 + + # get prompt text embeddings + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + + if text_input_ids.shape[-1] > self.tokenizer.model_max_length: + removed_text = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length] + prompt_embeds = self.text_encoder(text_input_ids.to(self.device))[0] + + # NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion. + # While CLIP does normalize the pooled output of the text transformer when combining + # the image and text embeddings, CLIP does not directly normalize the last hidden state. + # + # CLIP normalizing the pooled output. + # https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053 + prompt_embeds = prompt_embeds / prompt_embeds.norm(dim=-1, keepdim=True) + + # duplicate text embeddings for each generation per prompt + prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0) + + if do_classifier_free_guidance: + if self.learned_classifier_free_sampling_embeddings.learnable: + negative_prompt_embeds = self.learned_classifier_free_sampling_embeddings.embeddings + negative_prompt_embeds = negative_prompt_embeds.unsqueeze(0).repeat(batch_size, 1, 1) + else: + uncond_tokens = [""] * batch_size + + max_length = text_input_ids.shape[-1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + negative_prompt_embeds = self.text_encoder(uncond_input.input_ids.to(self.device))[0] + # See comment for normalizing text embeddings + negative_prompt_embeds = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1, keepdim=True) + + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + return prompt_embeds + + @torch.no_grad() + def __call__( + self, + prompt: Union[str, List[str]], + num_inference_steps: int = 100, + guidance_scale: float = 5.0, + truncation_rate: float = 1.0, + num_images_per_prompt: int = 1, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, + callback_steps: int = 1, + ) -> Union[ImagePipelineOutput, Tuple]: + """ + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`): + The prompt or prompts to guide image generation. + num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + truncation_rate (`float`, *optional*, defaults to 1.0 (equivalent to no truncation)): + Used to "truncate" the predicted classes for x_0 such that the cumulative probability for a pixel is at + most `truncation_rate`. The lowest probabilities that would increase the cumulative probability above + `truncation_rate` are set to zero. + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + generator (`torch.Generator`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.Tensor` of shape (batch), *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Must be valid embedding indices.If not provided, a latents tensor will be generated of + completely masked latent pixels. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is + returned where the first element is a list with the generated images. + """ + if isinstance(prompt, str): + batch_size = 1 + elif isinstance(prompt, list): + batch_size = len(prompt) + else: + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + batch_size = batch_size * num_images_per_prompt + + do_classifier_free_guidance = guidance_scale > 1.0 + + prompt_embeds = self._encode_prompt(prompt, num_images_per_prompt, do_classifier_free_guidance) + + if (callback_steps is None) or ( + callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + # get the initial completely masked latents unless the user supplied it + + latents_shape = (batch_size, self.transformer.num_latent_pixels) + if latents is None: + mask_class = self.transformer.num_vector_embeds - 1 + latents = torch.full(latents_shape, mask_class).to(self.device) + else: + if latents.shape != latents_shape: + raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}") + if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any(): + raise ValueError( + "Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0," + f" {self.transformer.num_vector_embeds - 1} (inclusive)." + ) + latents = latents.to(self.device) + + # set timesteps + self.scheduler.set_timesteps(num_inference_steps, device=self.device) + + timesteps_tensor = self.scheduler.timesteps.to(self.device) + + sample = latents + + for i, t in enumerate(self.progress_bar(timesteps_tensor)): + # expand the sample if we are doing classifier free guidance + latent_model_input = torch.cat([sample] * 2) if do_classifier_free_guidance else sample + + # predict the un-noised image + # model_output == `log_p_x_0` + model_output = self.transformer(latent_model_input, encoder_hidden_states=prompt_embeds, timestep=t).sample + + if do_classifier_free_guidance: + model_output_uncond, model_output_text = model_output.chunk(2) + model_output = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond) + model_output -= torch.logsumexp(model_output, dim=1, keepdim=True) + + model_output = self.truncate(model_output, truncation_rate) + + # remove `log(0)`'s (`-inf`s) + model_output = model_output.clamp(-70) + + # compute the previous noisy sample x_t -> x_t-1 + sample = self.scheduler.step(model_output, timestep=t, sample=sample, generator=generator).prev_sample + + # call the callback, if provided + if callback is not None and i % callback_steps == 0: + callback(i, t, sample) + + embedding_channels = self.vqvae.config.vq_embed_dim + embeddings_shape = (batch_size, self.transformer.height, self.transformer.width, embedding_channels) + embeddings = self.vqvae.quantize.get_codebook_entry(sample, shape=embeddings_shape) + image = self.vqvae.decode(embeddings, force_not_quantize=True).sample + + image = (image / 2 + 0.5).clamp(0, 1) + image = image.cpu().permute(0, 2, 3, 1).numpy() + + if output_type == "pil": + image = self.numpy_to_pil(image) + + if not return_dict: + return (image,) + + return ImagePipelineOutput(images=image) + + def truncate(self, log_p_x_0: torch.Tensor, truncation_rate: float) -> torch.Tensor: + """ + Truncates `log_p_x_0` such that for each column vector, the total cumulative probability is `truncation_rate` + The lowest probabilities that would increase the cumulative probability above `truncation_rate` are set to + zero. + """ + sorted_log_p_x_0, indices = torch.sort(log_p_x_0, 1, descending=True) + sorted_p_x_0 = torch.exp(sorted_log_p_x_0) + keep_mask = sorted_p_x_0.cumsum(dim=1) < truncation_rate + + # Ensure that at least the largest probability is not zeroed out + all_true = torch.full_like(keep_mask[:, 0:1, :], True) + keep_mask = torch.cat((all_true, keep_mask), dim=1) + keep_mask = keep_mask[:, :-1, :] + + keep_mask = keep_mask.gather(1, indices.argsort(1)) + + rv = log_p_x_0.clone() + + rv[~keep_mask] = -torch.inf # -inf = log(0) + + return rv diff --git a/diffusers3/pipelines/dit/__init__.py b/diffusers3/pipelines/dit/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..fe2a94f3cba77d867f97111a41895918842adc27 --- /dev/null +++ b/diffusers3/pipelines/dit/__init__.py @@ -0,0 +1,19 @@ +from typing import TYPE_CHECKING + +from ...utils import DIFFUSERS_SLOW_IMPORT, _LazyModule + + +_import_structure = {"pipeline_dit": ["DiTPipeline"]} + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + from .pipeline_dit import DiTPipeline + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) diff --git a/diffusers3/pipelines/dit/pipeline_dit.py b/diffusers3/pipelines/dit/pipeline_dit.py new file mode 100644 index 0000000000000000000000000000000000000000..14321b5f33cfae58771d7d363dd63a37348226d4 --- /dev/null +++ b/diffusers3/pipelines/dit/pipeline_dit.py @@ -0,0 +1,236 @@ +# Attribution-NonCommercial 4.0 International (CC BY-NC 4.0) +# William Peebles and Saining Xie +# +# Copyright (c) 2021 OpenAI +# MIT License +# +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Dict, List, Optional, Tuple, Union + +import torch + +from ...models import AutoencoderKL, DiTTransformer2DModel +from ...schedulers import KarrasDiffusionSchedulers +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput + + +class DiTPipeline(DiffusionPipeline): + r""" + Pipeline for image generation based on a Transformer backbone instead of a UNet. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + Parameters: + transformer ([`DiTTransformer2DModel`]): + A class conditioned `DiTTransformer2DModel` to denoise the encoded image latents. + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + scheduler ([`DDIMScheduler`]): + A scheduler to be used in combination with `transformer` to denoise the encoded image latents. + """ + + model_cpu_offload_seq = "transformer->vae" + + def __init__( + self, + transformer: DiTTransformer2DModel, + vae: AutoencoderKL, + scheduler: KarrasDiffusionSchedulers, + id2label: Optional[Dict[int, str]] = None, + ): + super().__init__() + self.register_modules(transformer=transformer, vae=vae, scheduler=scheduler) + + # create a imagenet -> id dictionary for easier use + self.labels = {} + if id2label is not None: + for key, value in id2label.items(): + for label in value.split(","): + self.labels[label.lstrip().rstrip()] = int(key) + self.labels = dict(sorted(self.labels.items())) + + def get_label_ids(self, label: Union[str, List[str]]) -> List[int]: + r""" + + Map label strings from ImageNet to corresponding class ids. + + Parameters: + label (`str` or `dict` of `str`): + Label strings to be mapped to class ids. + + Returns: + `list` of `int`: + Class ids to be processed by pipeline. + """ + + if not isinstance(label, list): + label = list(label) + + for l in label: + if l not in self.labels: + raise ValueError( + f"{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}." + ) + + return [self.labels[l] for l in label] + + @torch.no_grad() + def __call__( + self, + class_labels: List[int], + guidance_scale: float = 4.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + num_inference_steps: int = 50, + output_type: Optional[str] = "pil", + return_dict: bool = True, + ) -> Union[ImagePipelineOutput, Tuple]: + r""" + The call function to the pipeline for generation. + + Args: + class_labels (List[int]): + List of ImageNet class labels for the images to be generated. + guidance_scale (`float`, *optional*, defaults to 4.0): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + generator (`torch.Generator`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + num_inference_steps (`int`, *optional*, defaults to 250): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`ImagePipelineOutput`] instead of a plain tuple. + + Examples: + + ```py + >>> from diffusers import DiTPipeline, DPMSolverMultistepScheduler + >>> import torch + + >>> pipe = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256", torch_dtype=torch.float16) + >>> pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) + >>> pipe = pipe.to("cuda") + + >>> # pick words from Imagenet class labels + >>> pipe.labels # to print all available words + + >>> # pick words that exist in ImageNet + >>> words = ["white shark", "umbrella"] + + >>> class_ids = pipe.get_label_ids(words) + + >>> generator = torch.manual_seed(33) + >>> output = pipe(class_labels=class_ids, num_inference_steps=25, generator=generator) + + >>> image = output.images[0] # label 'white shark' + ``` + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is + returned where the first element is a list with the generated images + """ + + batch_size = len(class_labels) + latent_size = self.transformer.config.sample_size + latent_channels = self.transformer.config.in_channels + + latents = randn_tensor( + shape=(batch_size, latent_channels, latent_size, latent_size), + generator=generator, + device=self._execution_device, + dtype=self.transformer.dtype, + ) + latent_model_input = torch.cat([latents] * 2) if guidance_scale > 1 else latents + + class_labels = torch.tensor(class_labels, device=self._execution_device).reshape(-1) + class_null = torch.tensor([1000] * batch_size, device=self._execution_device) + class_labels_input = torch.cat([class_labels, class_null], 0) if guidance_scale > 1 else class_labels + + # set step values + self.scheduler.set_timesteps(num_inference_steps) + for t in self.progress_bar(self.scheduler.timesteps): + if guidance_scale > 1: + half = latent_model_input[: len(latent_model_input) // 2] + latent_model_input = torch.cat([half, half], dim=0) + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + timesteps = t + if not torch.is_tensor(timesteps): + # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can + # This would be a good case for the `match` statement (Python 3.10+) + is_mps = latent_model_input.device.type == "mps" + if isinstance(timesteps, float): + dtype = torch.float32 if is_mps else torch.float64 + else: + dtype = torch.int32 if is_mps else torch.int64 + timesteps = torch.tensor([timesteps], dtype=dtype, device=latent_model_input.device) + elif len(timesteps.shape) == 0: + timesteps = timesteps[None].to(latent_model_input.device) + # broadcast to batch dimension in a way that's compatible with ONNX/Core ML + timesteps = timesteps.expand(latent_model_input.shape[0]) + # predict noise model_output + noise_pred = self.transformer( + latent_model_input, timestep=timesteps, class_labels=class_labels_input + ).sample + + # perform guidance + if guidance_scale > 1: + eps, rest = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:] + cond_eps, uncond_eps = torch.split(eps, len(eps) // 2, dim=0) + + half_eps = uncond_eps + guidance_scale * (cond_eps - uncond_eps) + eps = torch.cat([half_eps, half_eps], dim=0) + + noise_pred = torch.cat([eps, rest], dim=1) + + # learned sigma + if self.transformer.config.out_channels // 2 == latent_channels: + model_output, _ = torch.split(noise_pred, latent_channels, dim=1) + else: + model_output = noise_pred + + # compute previous image: x_t -> x_t-1 + latent_model_input = self.scheduler.step(model_output, t, latent_model_input).prev_sample + + if guidance_scale > 1: + latents, _ = latent_model_input.chunk(2, dim=0) + else: + latents = latent_model_input + + latents = 1 / self.vae.config.scaling_factor * latents + samples = self.vae.decode(latents).sample + + samples = (samples / 2 + 0.5).clamp(0, 1) + + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + samples = samples.cpu().permute(0, 2, 3, 1).float().numpy() + + if output_type == "pil": + samples = self.numpy_to_pil(samples) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (samples,) + + return ImagePipelineOutput(images=samples) diff --git a/diffusers3/pipelines/flux/__init__.py b/diffusers3/pipelines/flux/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e43a7ab753cd9ca8429f9ba57158563da9247363 --- /dev/null +++ b/diffusers3/pipelines/flux/__init__.py @@ -0,0 +1,53 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_additional_imports = {} +_import_structure = {"pipeline_output": ["FluxPipelineOutput"]} + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["pipeline_flux"] = ["FluxPipeline"] + _import_structure["pipeline_flux_controlnet"] = ["FluxControlNetPipeline"] + _import_structure["pipeline_flux_img2img"] = ["FluxImg2ImgPipeline"] + _import_structure["pipeline_flux_inpaint"] = ["FluxInpaintPipeline"] +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 + else: + from .pipeline_flux import FluxPipeline + from .pipeline_flux_controlnet import FluxControlNetPipeline + from .pipeline_flux_img2img import FluxImg2ImgPipeline + from .pipeline_flux_inpaint import FluxInpaintPipeline +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) + for name, value in _additional_imports.items(): + setattr(sys.modules[__name__], name, value) diff --git a/diffusers3/pipelines/flux/__pycache__/__init__.cpython-38.pyc b/diffusers3/pipelines/flux/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9f042942ece372df89dbb74127037113a0bb9c1c Binary files /dev/null and b/diffusers3/pipelines/flux/__pycache__/__init__.cpython-38.pyc differ diff --git a/diffusers3/pipelines/flux/__pycache__/pipeline_flux.cpython-38.pyc b/diffusers3/pipelines/flux/__pycache__/pipeline_flux.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..65fdd76ff4501f0bcfc577262408bd01aed1bb38 Binary files /dev/null and b/diffusers3/pipelines/flux/__pycache__/pipeline_flux.cpython-38.pyc differ diff --git a/diffusers3/pipelines/flux/__pycache__/pipeline_output.cpython-38.pyc b/diffusers3/pipelines/flux/__pycache__/pipeline_output.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e9f98ef6bbdab20fd744fd6cc7605f19495168af Binary files /dev/null and b/diffusers3/pipelines/flux/__pycache__/pipeline_output.cpython-38.pyc differ diff --git a/diffusers3/pipelines/flux/pipeline_flux.py b/diffusers3/pipelines/flux/pipeline_flux.py new file mode 100644 index 0000000000000000000000000000000000000000..bb214885da1c9eb15bd02a84540144f08ae899d6 --- /dev/null +++ b/diffusers3/pipelines/flux/pipeline_flux.py @@ -0,0 +1,771 @@ +# Copyright 2024 Black Forest Labs and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Any, Callable, Dict, List, Optional, Union + +import numpy as np +import torch +from transformers import CLIPTextModel, CLIPTokenizer, T5EncoderModel, T5TokenizerFast + +from ...image_processor import VaeImageProcessor +from ...loaders import FluxLoraLoaderMixin, FromSingleFileMixin +from ...models.autoencoders import AutoencoderKL +from ...models.transformers import FluxTransformer2DModel +from ...schedulers import FlowMatchEulerDiscreteScheduler +from ...utils import ( + USE_PEFT_BACKEND, + is_torch_xla_available, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline +from .pipeline_output import FluxPipelineOutput + + +if is_torch_xla_available(): + import torch_xla.core.xla_model as xm + + XLA_AVAILABLE = True +else: + XLA_AVAILABLE = False + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import FluxPipeline + + >>> pipe = FluxPipeline.from_pretrained("black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16) + >>> pipe.to("cuda") + >>> prompt = "A cat holding a sign that says hello world" + >>> # Depending on the variant being used, the pipeline call will slightly vary. + >>> # Refer to the pipeline documentation for more details. + >>> image = pipe(prompt, num_inference_steps=4, guidance_scale=0.0).images[0] + >>> image.save("flux.png") + ``` +""" + + +def calculate_shift( + image_seq_len, + base_seq_len: int = 256, + max_seq_len: int = 4096, + base_shift: float = 0.5, + max_shift: float = 1.16, +): + m = (max_shift - base_shift) / (max_seq_len - base_seq_len) + b = base_shift - m * base_seq_len + mu = image_seq_len * m + b + return mu + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + sigmas: Optional[List[float]] = None, + **kwargs, +): + """ + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles + custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` + must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, + `num_inference_steps` and `sigmas` must be `None`. + sigmas (`List[float]`, *optional*): + Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, + `num_inference_steps` and `timesteps` must be `None`. + + Returns: + `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the + second element is the number of inference steps. + """ + if timesteps is not None and sigmas is not None: + raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") + if timesteps is not None: + accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + elif sigmas is not None: + accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accept_sigmas: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" sigmas schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + + +class FluxPipeline(DiffusionPipeline, FluxLoraLoaderMixin, FromSingleFileMixin): + r""" + The Flux pipeline for text-to-image generation. + + Reference: https://blackforestlabs.ai/announcing-black-forest-labs/ + + Args: + transformer ([`FluxTransformer2DModel`]): + Conditional Transformer (MMDiT) architecture to denoise the encoded image latents. + scheduler ([`FlowMatchEulerDiscreteScheduler`]): + A scheduler to be used in combination with `transformer` to denoise the encoded image latents. + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModel`]): + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically + the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. + text_encoder_2 ([`T5EncoderModel`]): + [T5](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5EncoderModel), specifically + the [google/t5-v1_1-xxl](https://huggingface.co/google/t5-v1_1-xxl) variant. + tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/en/model_doc/clip#transformers.CLIPTokenizer). + tokenizer_2 (`T5TokenizerFast`): + Second Tokenizer of class + [T5TokenizerFast](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5TokenizerFast). + """ + + model_cpu_offload_seq = "text_encoder->text_encoder_2->transformer->vae" + _optional_components = [] + _callback_tensor_inputs = ["latents", "prompt_embeds"] + + def __init__( + self, + scheduler: FlowMatchEulerDiscreteScheduler, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + text_encoder_2: T5EncoderModel, + tokenizer_2: T5TokenizerFast, + transformer: FluxTransformer2DModel, + ): + super().__init__() + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + text_encoder_2=text_encoder_2, + tokenizer=tokenizer, + tokenizer_2=tokenizer_2, + transformer=transformer, + scheduler=scheduler, + ) + self.vae_scale_factor = ( + 2 ** (len(self.vae.config.block_out_channels)) if hasattr(self, "vae") and self.vae is not None else 16 + ) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + self.tokenizer_max_length = ( + self.tokenizer.model_max_length if hasattr(self, "tokenizer") and self.tokenizer is not None else 77 + ) + self.default_sample_size = 64 + + def _get_t5_prompt_embeds( + self, + prompt: Union[str, List[str]] = None, + num_images_per_prompt: int = 1, + max_sequence_length: int = 512, + device: Optional[torch.device] = None, + dtype: Optional[torch.dtype] = None, + ): + device = device or self._execution_device + dtype = dtype or self.text_encoder.dtype + + prompt = [prompt] if isinstance(prompt, str) else prompt + batch_size = len(prompt) + + text_inputs = self.tokenizer_2( + prompt, + padding="max_length", + max_length=max_sequence_length, + truncation=True, + return_length=False, + return_overflowing_tokens=False, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer_2(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): + removed_text = self.tokenizer_2.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because `max_sequence_length` is set to " + f" {max_sequence_length} tokens: {removed_text}" + ) + + prompt_embeds = self.text_encoder_2(text_input_ids.to(device), output_hidden_states=False)[0] + + dtype = self.text_encoder_2.dtype + prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) + + _, seq_len, _ = prompt_embeds.shape + + # duplicate text embeddings and attention mask for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + return prompt_embeds + + def _get_clip_prompt_embeds( + self, + prompt: Union[str, List[str]], + num_images_per_prompt: int = 1, + device: Optional[torch.device] = None, + ): + device = device or self._execution_device + + prompt = [prompt] if isinstance(prompt, str) else prompt + batch_size = len(prompt) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer_max_length, + truncation=True, + return_overflowing_tokens=False, + return_length=False, + return_tensors="pt", + ) + + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): + removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer_max_length} tokens: {removed_text}" + ) + prompt_embeds = self.text_encoder(text_input_ids.to(device), output_hidden_states=False) + + # Use pooled output of CLIPTextModel + prompt_embeds = prompt_embeds.pooler_output + prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) + + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt) + prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, -1) + + return prompt_embeds + + def encode_prompt( + self, + prompt: Union[str, List[str]], + prompt_2: Union[str, List[str]], + device: Optional[torch.device] = None, + num_images_per_prompt: int = 1, + prompt_embeds: Optional[torch.FloatTensor] = None, + pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + max_sequence_length: int = 512, + lora_scale: Optional[float] = None, + ): + r""" + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in all text-encoders + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + lora_scale (`float`, *optional*): + A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + """ + device = device or self._execution_device + + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, FluxLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if self.text_encoder is not None and USE_PEFT_BACKEND: + scale_lora_layers(self.text_encoder, lora_scale) + if self.text_encoder_2 is not None and USE_PEFT_BACKEND: + scale_lora_layers(self.text_encoder_2, lora_scale) + + prompt = [prompt] if isinstance(prompt, str) else prompt + + if prompt_embeds is None: + prompt_2 = prompt_2 or prompt + prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 + + # We only use the pooled prompt output from the CLIPTextModel + pooled_prompt_embeds = self._get_clip_prompt_embeds( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + ) + prompt_embeds = self._get_t5_prompt_embeds( + prompt=prompt_2, + num_images_per_prompt=num_images_per_prompt, + max_sequence_length=max_sequence_length, + device=device, + ) + + if self.text_encoder is not None: + if isinstance(self, FluxLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if isinstance(self, FluxLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder_2, lora_scale) + + dtype = self.text_encoder.dtype if self.text_encoder is not None else self.transformer.dtype + text_ids = torch.zeros(prompt_embeds.shape[1], 3).to(device=device, dtype=dtype) + + return prompt_embeds, pooled_prompt_embeds, text_ids + + def check_inputs( + self, + prompt, + prompt_2, + height, + width, + prompt_embeds=None, + pooled_prompt_embeds=None, + callback_on_step_end_tensor_inputs=None, + max_sequence_length=None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt_2 is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)): + raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}") + + if prompt_embeds is not None and pooled_prompt_embeds is None: + raise ValueError( + "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`." + ) + + if max_sequence_length is not None and max_sequence_length > 512: + raise ValueError(f"`max_sequence_length` cannot be greater than 512 but is {max_sequence_length}") + + @staticmethod + def _prepare_latent_image_ids(batch_size, height, width, device, dtype): + latent_image_ids = torch.zeros(height // 2, width // 2, 3) + latent_image_ids[..., 1] = latent_image_ids[..., 1] + torch.arange(height // 2)[:, None] + latent_image_ids[..., 2] = latent_image_ids[..., 2] + torch.arange(width // 2)[None, :] + + latent_image_id_height, latent_image_id_width, latent_image_id_channels = latent_image_ids.shape + + latent_image_ids = latent_image_ids.reshape( + latent_image_id_height * latent_image_id_width, latent_image_id_channels + ) + + return latent_image_ids.to(device=device, dtype=dtype) + + @staticmethod + def _pack_latents(latents, batch_size, num_channels_latents, height, width): + latents = latents.view(batch_size, num_channels_latents, height // 2, 2, width // 2, 2) + latents = latents.permute(0, 2, 4, 1, 3, 5) + latents = latents.reshape(batch_size, (height // 2) * (width // 2), num_channels_latents * 4) + + return latents + + @staticmethod + def _unpack_latents(latents, height, width, vae_scale_factor): + batch_size, num_patches, channels = latents.shape + + height = height // vae_scale_factor + width = width // vae_scale_factor + + latents = latents.view(batch_size, height, width, channels // 4, 2, 2) + latents = latents.permute(0, 3, 1, 4, 2, 5) + + latents = latents.reshape(batch_size, channels // (2 * 2), height * 2, width * 2) + + return latents + + def enable_vae_slicing(self): + r""" + Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to + compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. + """ + self.vae.enable_slicing() + + def disable_vae_slicing(self): + r""" + Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to + computing decoding in one step. + """ + self.vae.disable_slicing() + + def enable_vae_tiling(self): + r""" + Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to + compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow + processing larger images. + """ + self.vae.enable_tiling() + + def disable_vae_tiling(self): + r""" + Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to + computing decoding in one step. + """ + self.vae.disable_tiling() + + def prepare_latents( + self, + batch_size, + num_channels_latents, + height, + width, + dtype, + device, + generator, + latents=None, + ): + height = 2 * (int(height) // self.vae_scale_factor) + width = 2 * (int(width) // self.vae_scale_factor) + + shape = (batch_size, num_channels_latents, height, width) + + if latents is not None: + latent_image_ids = self._prepare_latent_image_ids(batch_size, height, width, device, dtype) + return latents.to(device=device, dtype=dtype), latent_image_ids + + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + latents = self._pack_latents(latents, batch_size, num_channels_latents, height, width) + + latent_image_ids = self._prepare_latent_image_ids(batch_size, height, width, device, dtype) + + return latents, latent_image_ids + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def joint_attention_kwargs(self): + return self._joint_attention_kwargs + + @property + def num_timesteps(self): + return self._num_timesteps + + @property + def interrupt(self): + return self._interrupt + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + prompt_2: Optional[Union[str, List[str]]] = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 28, + timesteps: List[int] = None, + guidance_scale: float = 3.5, + num_images_per_prompt: Optional[int] = 1, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + joint_attention_kwargs: Optional[Dict[str, Any]] = None, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + max_sequence_length: int = 512, + ): + r""" + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + will be used instead + height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The height in pixels of the generated image. This is set to 1024 by default for the best results. + width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The width in pixels of the generated image. This is set to 1024 by default for the best results. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument + in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is + passed will be used. Must be in descending order. + guidance_scale (`float`, *optional*, defaults to 7.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.flux.FluxPipelineOutput`] instead of a plain tuple. + joint_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + max_sequence_length (`int` defaults to 512): Maximum sequence length to use with the `prompt`. + + Examples: + + Returns: + [`~pipelines.flux.FluxPipelineOutput`] or `tuple`: [`~pipelines.flux.FluxPipelineOutput`] if `return_dict` + is True, otherwise a `tuple`. When returning a tuple, the first element is a list with the generated + images. + """ + + height = height or self.default_sample_size * self.vae_scale_factor + width = width or self.default_sample_size * self.vae_scale_factor + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + prompt_2, + height, + width, + prompt_embeds=prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, + max_sequence_length=max_sequence_length, + ) + + self._guidance_scale = guidance_scale + self._joint_attention_kwargs = joint_attention_kwargs + self._interrupt = False + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + lora_scale = ( + self.joint_attention_kwargs.get("scale", None) if self.joint_attention_kwargs is not None else None + ) + ( + prompt_embeds, + pooled_prompt_embeds, + text_ids, + ) = self.encode_prompt( + prompt=prompt, + prompt_2=prompt_2, + prompt_embeds=prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + device=device, + num_images_per_prompt=num_images_per_prompt, + max_sequence_length=max_sequence_length, + lora_scale=lora_scale, + ) + + # 4. Prepare latent variables + num_channels_latents = self.transformer.config.in_channels // 4 + latents, latent_image_ids = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 5. Prepare timesteps + sigmas = np.linspace(1.0, 1 / num_inference_steps, num_inference_steps) + image_seq_len = latents.shape[1] + mu = calculate_shift( + image_seq_len, + self.scheduler.config.base_image_seq_len, + self.scheduler.config.max_image_seq_len, + self.scheduler.config.base_shift, + self.scheduler.config.max_shift, + ) + timesteps, num_inference_steps = retrieve_timesteps( + self.scheduler, + num_inference_steps, + device, + timesteps, + sigmas, + mu=mu, + ) + num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) + self._num_timesteps = len(timesteps) + + # handle guidance + if self.transformer.config.guidance_embeds: + guidance = torch.full([1], guidance_scale, device=device, dtype=torch.float32) + guidance = guidance.expand(latents.shape[0]) + else: + guidance = None + + # 6. Denoising loop + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + if self.interrupt: + continue + + # broadcast to batch dimension in a way that's compatible with ONNX/Core ML + timestep = t.expand(latents.shape[0]).to(latents.dtype) + + noise_pred = self.transformer( + hidden_states=latents, + timestep=timestep / 1000, + guidance=guidance, + pooled_projections=pooled_prompt_embeds, + encoder_hidden_states=prompt_embeds, + txt_ids=text_ids, + img_ids=latent_image_ids, + joint_attention_kwargs=self.joint_attention_kwargs, + return_dict=False, + )[0] + + # compute the previous noisy sample x_t -> x_t-1 + latents_dtype = latents.dtype + latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0] + + if latents.dtype != latents_dtype: + if torch.backends.mps.is_available(): + # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272 + latents = latents.to(latents_dtype) + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + + if XLA_AVAILABLE: + xm.mark_step() + + if output_type == "latent": + image = latents + + else: + latents = self._unpack_latents(latents, height, width, self.vae_scale_factor) + latents = (latents / self.vae.config.scaling_factor) + self.vae.config.shift_factor + image = self.vae.decode(latents, return_dict=False)[0] + image = self.image_processor.postprocess(image, output_type=output_type) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return FluxPipelineOutput(images=image) diff --git a/diffusers3/pipelines/flux/pipeline_flux_controlnet.py b/diffusers3/pipelines/flux/pipeline_flux_controlnet.py new file mode 100644 index 0000000000000000000000000000000000000000..275c1b21d29898782dcbbc6ca3ad1f8b6700682b --- /dev/null +++ b/diffusers3/pipelines/flux/pipeline_flux_controlnet.py @@ -0,0 +1,923 @@ +# Copyright 2024 Black Forest Labs, The HuggingFace Team and The InstantX Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import numpy as np +import torch +from transformers import ( + CLIPTextModel, + CLIPTokenizer, + T5EncoderModel, + T5TokenizerFast, +) + +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import FluxLoraLoaderMixin, FromSingleFileMixin +from ...models.autoencoders import AutoencoderKL +from ...models.controlnet_flux import FluxControlNetModel, FluxMultiControlNetModel +from ...models.transformers import FluxTransformer2DModel +from ...schedulers import FlowMatchEulerDiscreteScheduler +from ...utils import ( + USE_PEFT_BACKEND, + is_torch_xla_available, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline +from .pipeline_output import FluxPipelineOutput + + +if is_torch_xla_available(): + import torch_xla.core.xla_model as xm + + XLA_AVAILABLE = True +else: + XLA_AVAILABLE = False + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers.utils import load_image + >>> from diffusers import FluxControlNetPipeline + >>> from diffusers import FluxControlNetModel + + >>> controlnet_model = "InstantX/FLUX.1-dev-controlnet-canny" + >>> controlnet = FluxControlNetModel.from_pretrained(controlnet_model, torch_dtype=torch.bfloat16) + >>> pipe = FluxControlNetPipeline.from_pretrained( + ... base_model, controlnet=controlnet, torch_dtype=torch.bfloat16 + ... ) + >>> pipe.to("cuda") + >>> control_image = load_image("https://huggingface.co/InstantX/SD3-Controlnet-Canny/resolve/main/canny.jpg") + >>> prompt = "A girl in city, 25 years old, cool, futuristic" + >>> image = pipe( + ... prompt, + ... control_image=control_image, + ... controlnet_conditioning_scale=0.6, + ... num_inference_steps=28, + ... guidance_scale=3.5, + ... ).images[0] + >>> image.save("flux.png") + ``` +""" + + +# Copied from diffusers.pipelines.flux.pipeline_flux.calculate_shift +def calculate_shift( + image_seq_len, + base_seq_len: int = 256, + max_seq_len: int = 4096, + base_shift: float = 0.5, + max_shift: float = 1.16, +): + m = (max_shift - base_shift) / (max_seq_len - base_seq_len) + b = base_shift - m * base_seq_len + mu = image_seq_len * m + b + return mu + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + sigmas: Optional[List[float]] = None, + **kwargs, +): + """ + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles + custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` + must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, + `num_inference_steps` and `sigmas` must be `None`. + sigmas (`List[float]`, *optional*): + Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, + `num_inference_steps` and `timesteps` must be `None`. + + Returns: + `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the + second element is the number of inference steps. + """ + if timesteps is not None and sigmas is not None: + raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") + if timesteps is not None: + accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + elif sigmas is not None: + accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accept_sigmas: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" sigmas schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + + +class FluxControlNetPipeline(DiffusionPipeline, FluxLoraLoaderMixin, FromSingleFileMixin): + r""" + The Flux pipeline for text-to-image generation. + + Reference: https://blackforestlabs.ai/announcing-black-forest-labs/ + + Args: + transformer ([`FluxTransformer2DModel`]): + Conditional Transformer (MMDiT) architecture to denoise the encoded image latents. + scheduler ([`FlowMatchEulerDiscreteScheduler`]): + A scheduler to be used in combination with `transformer` to denoise the encoded image latents. + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModel`]): + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically + the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. + text_encoder_2 ([`T5EncoderModel`]): + [T5](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5EncoderModel), specifically + the [google/t5-v1_1-xxl](https://huggingface.co/google/t5-v1_1-xxl) variant. + tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/en/model_doc/clip#transformers.CLIPTokenizer). + tokenizer_2 (`T5TokenizerFast`): + Second Tokenizer of class + [T5TokenizerFast](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5TokenizerFast). + """ + + model_cpu_offload_seq = "text_encoder->text_encoder_2->transformer->vae" + _optional_components = [] + _callback_tensor_inputs = ["latents", "prompt_embeds"] + + def __init__( + self, + scheduler: FlowMatchEulerDiscreteScheduler, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + text_encoder_2: T5EncoderModel, + tokenizer_2: T5TokenizerFast, + transformer: FluxTransformer2DModel, + controlnet: Union[ + FluxControlNetModel, List[FluxControlNetModel], Tuple[FluxControlNetModel], FluxMultiControlNetModel + ], + ): + super().__init__() + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + text_encoder_2=text_encoder_2, + tokenizer=tokenizer, + tokenizer_2=tokenizer_2, + transformer=transformer, + scheduler=scheduler, + controlnet=controlnet, + ) + self.vae_scale_factor = ( + 2 ** (len(self.vae.config.block_out_channels)) if hasattr(self, "vae") and self.vae is not None else 16 + ) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + self.tokenizer_max_length = ( + self.tokenizer.model_max_length if hasattr(self, "tokenizer") and self.tokenizer is not None else 77 + ) + self.default_sample_size = 64 + + def _get_t5_prompt_embeds( + self, + prompt: Union[str, List[str]] = None, + num_images_per_prompt: int = 1, + max_sequence_length: int = 512, + device: Optional[torch.device] = None, + dtype: Optional[torch.dtype] = None, + ): + device = device or self._execution_device + dtype = dtype or self.text_encoder.dtype + + prompt = [prompt] if isinstance(prompt, str) else prompt + batch_size = len(prompt) + + text_inputs = self.tokenizer_2( + prompt, + padding="max_length", + max_length=max_sequence_length, + truncation=True, + return_length=False, + return_overflowing_tokens=False, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer_2(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): + removed_text = self.tokenizer_2.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because `max_sequence_length` is set to " + f" {max_sequence_length} tokens: {removed_text}" + ) + + prompt_embeds = self.text_encoder_2(text_input_ids.to(device), output_hidden_states=False)[0] + + dtype = self.text_encoder_2.dtype + prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) + + _, seq_len, _ = prompt_embeds.shape + + # duplicate text embeddings and attention mask for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + return prompt_embeds + + def _get_clip_prompt_embeds( + self, + prompt: Union[str, List[str]], + num_images_per_prompt: int = 1, + device: Optional[torch.device] = None, + ): + device = device or self._execution_device + + prompt = [prompt] if isinstance(prompt, str) else prompt + batch_size = len(prompt) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer_max_length, + truncation=True, + return_overflowing_tokens=False, + return_length=False, + return_tensors="pt", + ) + + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): + removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer_max_length} tokens: {removed_text}" + ) + prompt_embeds = self.text_encoder(text_input_ids.to(device), output_hidden_states=False) + + # Use pooled output of CLIPTextModel + prompt_embeds = prompt_embeds.pooler_output + prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) + + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt) + prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, -1) + + return prompt_embeds + + def encode_prompt( + self, + prompt: Union[str, List[str]], + prompt_2: Union[str, List[str]], + device: Optional[torch.device] = None, + num_images_per_prompt: int = 1, + prompt_embeds: Optional[torch.FloatTensor] = None, + pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + max_sequence_length: int = 512, + lora_scale: Optional[float] = None, + ): + r""" + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in all text-encoders + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + lora_scale (`float`, *optional*): + A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + """ + device = device or self._execution_device + + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, FluxLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if self.text_encoder is not None and USE_PEFT_BACKEND: + scale_lora_layers(self.text_encoder, lora_scale) + if self.text_encoder_2 is not None and USE_PEFT_BACKEND: + scale_lora_layers(self.text_encoder_2, lora_scale) + + prompt = [prompt] if isinstance(prompt, str) else prompt + + if prompt_embeds is None: + prompt_2 = prompt_2 or prompt + prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 + + # We only use the pooled prompt output from the CLIPTextModel + pooled_prompt_embeds = self._get_clip_prompt_embeds( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + ) + prompt_embeds = self._get_t5_prompt_embeds( + prompt=prompt_2, + num_images_per_prompt=num_images_per_prompt, + max_sequence_length=max_sequence_length, + device=device, + ) + + if self.text_encoder is not None: + if isinstance(self, FluxLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if isinstance(self, FluxLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder_2, lora_scale) + + dtype = self.text_encoder.dtype if self.text_encoder is not None else self.transformer.dtype + text_ids = torch.zeros(prompt_embeds.shape[1], 3).to(device=device, dtype=dtype) + + return prompt_embeds, pooled_prompt_embeds, text_ids + + def check_inputs( + self, + prompt, + prompt_2, + height, + width, + prompt_embeds=None, + pooled_prompt_embeds=None, + callback_on_step_end_tensor_inputs=None, + max_sequence_length=None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt_2 is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)): + raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}") + + if prompt_embeds is not None and pooled_prompt_embeds is None: + raise ValueError( + "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`." + ) + + if max_sequence_length is not None and max_sequence_length > 512: + raise ValueError(f"`max_sequence_length` cannot be greater than 512 but is {max_sequence_length}") + + @staticmethod + # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline._prepare_latent_image_ids + def _prepare_latent_image_ids(batch_size, height, width, device, dtype): + latent_image_ids = torch.zeros(height // 2, width // 2, 3) + latent_image_ids[..., 1] = latent_image_ids[..., 1] + torch.arange(height // 2)[:, None] + latent_image_ids[..., 2] = latent_image_ids[..., 2] + torch.arange(width // 2)[None, :] + + latent_image_id_height, latent_image_id_width, latent_image_id_channels = latent_image_ids.shape + + latent_image_ids = latent_image_ids.reshape( + latent_image_id_height * latent_image_id_width, latent_image_id_channels + ) + + return latent_image_ids.to(device=device, dtype=dtype) + + @staticmethod + # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline._pack_latents + def _pack_latents(latents, batch_size, num_channels_latents, height, width): + latents = latents.view(batch_size, num_channels_latents, height // 2, 2, width // 2, 2) + latents = latents.permute(0, 2, 4, 1, 3, 5) + latents = latents.reshape(batch_size, (height // 2) * (width // 2), num_channels_latents * 4) + + return latents + + @staticmethod + # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline._unpack_latents + def _unpack_latents(latents, height, width, vae_scale_factor): + batch_size, num_patches, channels = latents.shape + + height = height // vae_scale_factor + width = width // vae_scale_factor + + latents = latents.view(batch_size, height, width, channels // 4, 2, 2) + latents = latents.permute(0, 3, 1, 4, 2, 5) + + latents = latents.reshape(batch_size, channels // (2 * 2), height * 2, width * 2) + + return latents + + # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline.prepare_latents + def prepare_latents( + self, + batch_size, + num_channels_latents, + height, + width, + dtype, + device, + generator, + latents=None, + ): + height = 2 * (int(height) // self.vae_scale_factor) + width = 2 * (int(width) // self.vae_scale_factor) + + shape = (batch_size, num_channels_latents, height, width) + + if latents is not None: + latent_image_ids = self._prepare_latent_image_ids(batch_size, height, width, device, dtype) + return latents.to(device=device, dtype=dtype), latent_image_ids + + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + latents = self._pack_latents(latents, batch_size, num_channels_latents, height, width) + + latent_image_ids = self._prepare_latent_image_ids(batch_size, height, width, device, dtype) + + return latents, latent_image_ids + + # Copied from diffusers.pipelines.controlnet_sd3.pipeline_stable_diffusion_3_controlnet.StableDiffusion3ControlNetPipeline.prepare_image + def prepare_image( + self, + image, + width, + height, + batch_size, + num_images_per_prompt, + device, + dtype, + do_classifier_free_guidance=False, + guess_mode=False, + ): + if isinstance(image, torch.Tensor): + pass + else: + image = self.image_processor.preprocess(image, height=height, width=width) + + image_batch_size = image.shape[0] + + if image_batch_size == 1: + repeat_by = batch_size + else: + # image batch size is the same as prompt batch size + repeat_by = num_images_per_prompt + + image = image.repeat_interleave(repeat_by, dim=0) + + image = image.to(device=device, dtype=dtype) + + if do_classifier_free_guidance and not guess_mode: + image = torch.cat([image] * 2) + + return image + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def joint_attention_kwargs(self): + return self._joint_attention_kwargs + + @property + def num_timesteps(self): + return self._num_timesteps + + @property + def interrupt(self): + return self._interrupt + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + prompt_2: Optional[Union[str, List[str]]] = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 28, + timesteps: List[int] = None, + guidance_scale: float = 7.0, + control_image: PipelineImageInput = None, + control_mode: Optional[Union[int, List[int]]] = None, + controlnet_conditioning_scale: Union[float, List[float]] = 1.0, + num_images_per_prompt: Optional[int] = 1, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + joint_attention_kwargs: Optional[Dict[str, Any]] = None, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + max_sequence_length: int = 512, + ): + r""" + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + will be used instead + height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The height in pixels of the generated image. This is set to 1024 by default for the best results. + width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The width in pixels of the generated image. This is set to 1024 by default for the best results. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument + in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is + passed will be used. Must be in descending order. + guidance_scale (`float`, *optional*, defaults to 7.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + control_image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,: + `List[List[torch.Tensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`): + The ControlNet input condition to provide guidance to the `unet` for generation. If the type is + specified as `torch.Tensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also be accepted + as an image. The dimensions of the output image defaults to `image`'s dimensions. If height and/or + width are passed, `image` is resized accordingly. If multiple ControlNets are specified in `init`, + images must be passed as a list such that each element of the list can be correctly batched for input + to a single ControlNet. + controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0): + The outputs of the ControlNet are multiplied by `controlnet_conditioning_scale` before they are added + to the residual in the original `unet`. If multiple ControlNets are specified in `init`, you can set + the corresponding scale as a list. + control_mode (`int` or `List[int]`,, *optional*, defaults to None): + The control mode when applying ControlNet-Union. + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.flux.FluxPipelineOutput`] instead of a plain tuple. + joint_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + max_sequence_length (`int` defaults to 512): Maximum sequence length to use with the `prompt`. + + Examples: + + Returns: + [`~pipelines.flux.FluxPipelineOutput`] or `tuple`: [`~pipelines.flux.FluxPipelineOutput`] if `return_dict` + is True, otherwise a `tuple`. When returning a tuple, the first element is a list with the generated + images. + """ + + height = height or self.default_sample_size * self.vae_scale_factor + width = width or self.default_sample_size * self.vae_scale_factor + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + prompt_2, + height, + width, + prompt_embeds=prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, + max_sequence_length=max_sequence_length, + ) + + self._guidance_scale = guidance_scale + self._joint_attention_kwargs = joint_attention_kwargs + self._interrupt = False + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + dtype = self.transformer.dtype + + lora_scale = ( + self.joint_attention_kwargs.get("scale", None) if self.joint_attention_kwargs is not None else None + ) + ( + prompt_embeds, + pooled_prompt_embeds, + text_ids, + ) = self.encode_prompt( + prompt=prompt, + prompt_2=prompt_2, + prompt_embeds=prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + device=device, + num_images_per_prompt=num_images_per_prompt, + max_sequence_length=max_sequence_length, + lora_scale=lora_scale, + ) + + # 3. Prepare control image + num_channels_latents = self.transformer.config.in_channels // 4 + if isinstance(self.controlnet, FluxControlNetModel): + control_image = self.prepare_image( + image=control_image, + width=width, + height=height, + batch_size=batch_size * num_images_per_prompt, + num_images_per_prompt=num_images_per_prompt, + device=device, + dtype=dtype, + ) + height, width = control_image.shape[-2:] + + # vae encode + control_image = self.vae.encode(control_image).latent_dist.sample() + control_image = (control_image - self.vae.config.shift_factor) * self.vae.config.scaling_factor + + # pack + height_control_image, width_control_image = control_image.shape[2:] + control_image = self._pack_latents( + control_image, + batch_size * num_images_per_prompt, + num_channels_latents, + height_control_image, + width_control_image, + ) + + # set control mode + if control_mode is not None: + control_mode = torch.tensor(control_mode).to(device, dtype=torch.long) + control_mode = control_mode.reshape([-1, 1]) + + elif isinstance(self.controlnet, FluxMultiControlNetModel): + control_images = [] + + for control_image_ in control_image: + control_image_ = self.prepare_image( + image=control_image_, + width=width, + height=height, + batch_size=batch_size * num_images_per_prompt, + num_images_per_prompt=num_images_per_prompt, + device=device, + dtype=dtype, + ) + height, width = control_image_.shape[-2:] + + # vae encode + control_image_ = self.vae.encode(control_image_).latent_dist.sample() + control_image_ = (control_image_ - self.vae.config.shift_factor) * self.vae.config.scaling_factor + + # pack + height_control_image, width_control_image = control_image_.shape[2:] + control_image_ = self._pack_latents( + control_image_, + batch_size * num_images_per_prompt, + num_channels_latents, + height_control_image, + width_control_image, + ) + + control_images.append(control_image_) + + control_image = control_images + + # set control mode + control_mode_ = [] + if isinstance(control_mode, list): + for cmode in control_mode: + if cmode is None: + control_mode_.append(-1) + else: + control_mode_.append(cmode) + control_mode = torch.tensor(control_mode_).to(device, dtype=torch.long) + control_mode = control_mode.reshape([-1, 1]) + + # 4. Prepare latent variables + num_channels_latents = self.transformer.config.in_channels // 4 + latents, latent_image_ids = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 5. Prepare timesteps + sigmas = np.linspace(1.0, 1 / num_inference_steps, num_inference_steps) + image_seq_len = latents.shape[1] + mu = calculate_shift( + image_seq_len, + self.scheduler.config.base_image_seq_len, + self.scheduler.config.max_image_seq_len, + self.scheduler.config.base_shift, + self.scheduler.config.max_shift, + ) + timesteps, num_inference_steps = retrieve_timesteps( + self.scheduler, + num_inference_steps, + device, + timesteps, + sigmas, + mu=mu, + ) + + num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) + self._num_timesteps = len(timesteps) + + # 6. Denoising loop + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + if self.interrupt: + continue + + # broadcast to batch dimension in a way that's compatible with ONNX/Core ML + timestep = t.expand(latents.shape[0]).to(latents.dtype) + + # handle guidance + if self.transformer.config.guidance_embeds: + guidance = torch.tensor([guidance_scale], device=device) + guidance = guidance.expand(latents.shape[0]) + else: + guidance = None + + # controlnet + controlnet_block_samples, controlnet_single_block_samples = self.controlnet( + hidden_states=latents, + controlnet_cond=control_image, + controlnet_mode=control_mode, + conditioning_scale=controlnet_conditioning_scale, + timestep=timestep / 1000, + guidance=guidance, + pooled_projections=pooled_prompt_embeds, + encoder_hidden_states=prompt_embeds, + txt_ids=text_ids, + img_ids=latent_image_ids, + joint_attention_kwargs=self.joint_attention_kwargs, + return_dict=False, + ) + + noise_pred = self.transformer( + hidden_states=latents, + timestep=timestep / 1000, + guidance=guidance, + pooled_projections=pooled_prompt_embeds, + encoder_hidden_states=prompt_embeds, + controlnet_block_samples=controlnet_block_samples, + controlnet_single_block_samples=controlnet_single_block_samples, + txt_ids=text_ids, + img_ids=latent_image_ids, + joint_attention_kwargs=self.joint_attention_kwargs, + return_dict=False, + )[0] + + # compute the previous noisy sample x_t -> x_t-1 + latents_dtype = latents.dtype + + + latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0] + + if latents.dtype != latents_dtype: + if torch.backends.mps.is_available(): + # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272 + latents = latents.to(latents_dtype) + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + + if XLA_AVAILABLE: + xm.mark_step() + + if output_type == "latent": + image = latents + + else: + latents = self._unpack_latents(latents, height, width, self.vae_scale_factor) + latents = (latents / self.vae.config.scaling_factor) + self.vae.config.shift_factor + + image = self.vae.decode(latents, return_dict=False)[0] + image = self.image_processor.postprocess(image, output_type=output_type) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return FluxPipelineOutput(images=image) diff --git a/diffusers3/pipelines/flux/pipeline_flux_img2img.py b/diffusers3/pipelines/flux/pipeline_flux_img2img.py new file mode 100644 index 0000000000000000000000000000000000000000..bee4f6ce52e7b5285ac0e8a3fbd64be691518281 --- /dev/null +++ b/diffusers3/pipelines/flux/pipeline_flux_img2img.py @@ -0,0 +1,844 @@ +# Copyright 2024 Black Forest Labs and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Any, Callable, Dict, List, Optional, Union + +import numpy as np +import torch +from transformers import CLIPTextModel, CLIPTokenizer, T5EncoderModel, T5TokenizerFast + +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import FluxLoraLoaderMixin +from ...models.autoencoders import AutoencoderKL +from ...models.transformers import FluxTransformer2DModel +from ...schedulers import FlowMatchEulerDiscreteScheduler +from ...utils import ( + USE_PEFT_BACKEND, + is_torch_xla_available, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline +from .pipeline_output import FluxPipelineOutput + + +if is_torch_xla_available(): + import torch_xla.core.xla_model as xm + + XLA_AVAILABLE = True +else: + XLA_AVAILABLE = False + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + + >>> from diffusers import FluxImg2ImgPipeline + >>> from diffusers.utils import load_image + + >>> device = "cuda" + >>> pipe = FluxImg2ImgPipeline.from_pretrained("black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16) + >>> pipe = pipe.to(device) + + >>> url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg" + >>> init_image = load_image(url).resize((1024, 1024)) + + >>> prompt = "cat wizard, gandalf, lord of the rings, detailed, fantasy, cute, adorable, Pixar, Disney, 8k" + + >>> images = pipe( + ... prompt=prompt, image=init_image, num_inference_steps=4, strength=0.95, guidance_scale=0.0 + ... ).images[0] + ``` +""" + + +# Copied from diffusers.pipelines.flux.pipeline_flux.calculate_shift +def calculate_shift( + image_seq_len, + base_seq_len: int = 256, + max_seq_len: int = 4096, + base_shift: float = 0.5, + max_shift: float = 1.16, +): + m = (max_shift - base_shift) / (max_seq_len - base_seq_len) + b = base_shift - m * base_seq_len + mu = image_seq_len * m + b + return mu + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents +def retrieve_latents( + encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" +): + if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": + return encoder_output.latent_dist.sample(generator) + elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": + return encoder_output.latent_dist.mode() + elif hasattr(encoder_output, "latents"): + return encoder_output.latents + else: + raise AttributeError("Could not access latents of provided encoder_output") + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + sigmas: Optional[List[float]] = None, + **kwargs, +): + """ + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles + custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` + must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, + `num_inference_steps` and `sigmas` must be `None`. + sigmas (`List[float]`, *optional*): + Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, + `num_inference_steps` and `timesteps` must be `None`. + + Returns: + `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the + second element is the number of inference steps. + """ + if timesteps is not None and sigmas is not None: + raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") + if timesteps is not None: + accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + elif sigmas is not None: + accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accept_sigmas: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" sigmas schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + + +class FluxImg2ImgPipeline(DiffusionPipeline, FluxLoraLoaderMixin): + r""" + The Flux pipeline for image inpainting. + + Reference: https://blackforestlabs.ai/announcing-black-forest-labs/ + + Args: + transformer ([`FluxTransformer2DModel`]): + Conditional Transformer (MMDiT) architecture to denoise the encoded image latents. + scheduler ([`FlowMatchEulerDiscreteScheduler`]): + A scheduler to be used in combination with `transformer` to denoise the encoded image latents. + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModel`]): + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically + the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. + text_encoder_2 ([`T5EncoderModel`]): + [T5](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5EncoderModel), specifically + the [google/t5-v1_1-xxl](https://huggingface.co/google/t5-v1_1-xxl) variant. + tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/en/model_doc/clip#transformers.CLIPTokenizer). + tokenizer_2 (`T5TokenizerFast`): + Second Tokenizer of class + [T5TokenizerFast](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5TokenizerFast). + """ + + model_cpu_offload_seq = "text_encoder->text_encoder_2->transformer->vae" + _optional_components = [] + _callback_tensor_inputs = ["latents", "prompt_embeds"] + + def __init__( + self, + scheduler: FlowMatchEulerDiscreteScheduler, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + text_encoder_2: T5EncoderModel, + tokenizer_2: T5TokenizerFast, + transformer: FluxTransformer2DModel, + ): + super().__init__() + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + text_encoder_2=text_encoder_2, + tokenizer=tokenizer, + tokenizer_2=tokenizer_2, + transformer=transformer, + scheduler=scheduler, + ) + self.vae_scale_factor = ( + 2 ** (len(self.vae.config.block_out_channels)) if hasattr(self, "vae") and self.vae is not None else 16 + ) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + self.tokenizer_max_length = ( + self.tokenizer.model_max_length if hasattr(self, "tokenizer") and self.tokenizer is not None else 77 + ) + self.default_sample_size = 64 + + # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline._get_t5_prompt_embeds + def _get_t5_prompt_embeds( + self, + prompt: Union[str, List[str]] = None, + num_images_per_prompt: int = 1, + max_sequence_length: int = 512, + device: Optional[torch.device] = None, + dtype: Optional[torch.dtype] = None, + ): + device = device or self._execution_device + dtype = dtype or self.text_encoder.dtype + + prompt = [prompt] if isinstance(prompt, str) else prompt + batch_size = len(prompt) + + text_inputs = self.tokenizer_2( + prompt, + padding="max_length", + max_length=max_sequence_length, + truncation=True, + return_length=False, + return_overflowing_tokens=False, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer_2(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): + removed_text = self.tokenizer_2.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because `max_sequence_length` is set to " + f" {max_sequence_length} tokens: {removed_text}" + ) + + prompt_embeds = self.text_encoder_2(text_input_ids.to(device), output_hidden_states=False)[0] + + dtype = self.text_encoder_2.dtype + prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) + + _, seq_len, _ = prompt_embeds.shape + + # duplicate text embeddings and attention mask for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + return prompt_embeds + + # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline._get_clip_prompt_embeds + def _get_clip_prompt_embeds( + self, + prompt: Union[str, List[str]], + num_images_per_prompt: int = 1, + device: Optional[torch.device] = None, + ): + device = device or self._execution_device + + prompt = [prompt] if isinstance(prompt, str) else prompt + batch_size = len(prompt) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer_max_length, + truncation=True, + return_overflowing_tokens=False, + return_length=False, + return_tensors="pt", + ) + + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): + removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer_max_length} tokens: {removed_text}" + ) + prompt_embeds = self.text_encoder(text_input_ids.to(device), output_hidden_states=False) + + # Use pooled output of CLIPTextModel + prompt_embeds = prompt_embeds.pooler_output + prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) + + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt) + prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, -1) + + return prompt_embeds + + # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline.encode_prompt + def encode_prompt( + self, + prompt: Union[str, List[str]], + prompt_2: Union[str, List[str]], + device: Optional[torch.device] = None, + num_images_per_prompt: int = 1, + prompt_embeds: Optional[torch.FloatTensor] = None, + pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + max_sequence_length: int = 512, + lora_scale: Optional[float] = None, + ): + r""" + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in all text-encoders + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + lora_scale (`float`, *optional*): + A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + """ + device = device or self._execution_device + + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, FluxLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if self.text_encoder is not None and USE_PEFT_BACKEND: + scale_lora_layers(self.text_encoder, lora_scale) + if self.text_encoder_2 is not None and USE_PEFT_BACKEND: + scale_lora_layers(self.text_encoder_2, lora_scale) + + prompt = [prompt] if isinstance(prompt, str) else prompt + + if prompt_embeds is None: + prompt_2 = prompt_2 or prompt + prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 + + # We only use the pooled prompt output from the CLIPTextModel + pooled_prompt_embeds = self._get_clip_prompt_embeds( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + ) + prompt_embeds = self._get_t5_prompt_embeds( + prompt=prompt_2, + num_images_per_prompt=num_images_per_prompt, + max_sequence_length=max_sequence_length, + device=device, + ) + + if self.text_encoder is not None: + if isinstance(self, FluxLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if isinstance(self, FluxLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder_2, lora_scale) + + dtype = self.text_encoder.dtype if self.text_encoder is not None else self.transformer.dtype + text_ids = torch.zeros(prompt_embeds.shape[1], 3).to(device=device, dtype=dtype) + + return prompt_embeds, pooled_prompt_embeds, text_ids + + # Copied from diffusers.pipelines.stable_diffusion_3.pipeline_stable_diffusion_3_inpaint.StableDiffusion3InpaintPipeline._encode_vae_image + def _encode_vae_image(self, image: torch.Tensor, generator: torch.Generator): + if isinstance(generator, list): + image_latents = [ + retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i]) + for i in range(image.shape[0]) + ] + image_latents = torch.cat(image_latents, dim=0) + else: + image_latents = retrieve_latents(self.vae.encode(image), generator=generator) + + image_latents = (image_latents - self.vae.config.shift_factor) * self.vae.config.scaling_factor + + return image_latents + + # Copied from diffusers.pipelines.stable_diffusion_3.pipeline_stable_diffusion_3_img2img.StableDiffusion3Img2ImgPipeline.get_timesteps + def get_timesteps(self, num_inference_steps, strength, device): + # get the original timestep using init_timestep + init_timestep = min(num_inference_steps * strength, num_inference_steps) + + t_start = int(max(num_inference_steps - init_timestep, 0)) + timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] + if hasattr(self.scheduler, "set_begin_index"): + self.scheduler.set_begin_index(t_start * self.scheduler.order) + + return timesteps, num_inference_steps - t_start + + def check_inputs( + self, + prompt, + prompt_2, + strength, + height, + width, + prompt_embeds=None, + pooled_prompt_embeds=None, + callback_on_step_end_tensor_inputs=None, + max_sequence_length=None, + ): + if strength < 0 or strength > 1: + raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}") + + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt_2 is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)): + raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}") + + if prompt_embeds is not None and pooled_prompt_embeds is None: + raise ValueError( + "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`." + ) + + if max_sequence_length is not None and max_sequence_length > 512: + raise ValueError(f"`max_sequence_length` cannot be greater than 512 but is {max_sequence_length}") + + @staticmethod + # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline._prepare_latent_image_ids + def _prepare_latent_image_ids(batch_size, height, width, device, dtype): + latent_image_ids = torch.zeros(height // 2, width // 2, 3) + latent_image_ids[..., 1] = latent_image_ids[..., 1] + torch.arange(height // 2)[:, None] + latent_image_ids[..., 2] = latent_image_ids[..., 2] + torch.arange(width // 2)[None, :] + + latent_image_id_height, latent_image_id_width, latent_image_id_channels = latent_image_ids.shape + + latent_image_ids = latent_image_ids.reshape( + latent_image_id_height * latent_image_id_width, latent_image_id_channels + ) + + return latent_image_ids.to(device=device, dtype=dtype) + + @staticmethod + # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline._pack_latents + def _pack_latents(latents, batch_size, num_channels_latents, height, width): + latents = latents.view(batch_size, num_channels_latents, height // 2, 2, width // 2, 2) + latents = latents.permute(0, 2, 4, 1, 3, 5) + latents = latents.reshape(batch_size, (height // 2) * (width // 2), num_channels_latents * 4) + + return latents + + @staticmethod + # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline._unpack_latents + def _unpack_latents(latents, height, width, vae_scale_factor): + batch_size, num_patches, channels = latents.shape + + height = height // vae_scale_factor + width = width // vae_scale_factor + + latents = latents.view(batch_size, height, width, channels // 4, 2, 2) + latents = latents.permute(0, 3, 1, 4, 2, 5) + + latents = latents.reshape(batch_size, channels // (2 * 2), height * 2, width * 2) + + return latents + + def prepare_latents( + self, + image, + timestep, + batch_size, + num_channels_latents, + height, + width, + dtype, + device, + generator, + latents=None, + ): + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + height = 2 * (int(height) // self.vae_scale_factor) + width = 2 * (int(width) // self.vae_scale_factor) + + shape = (batch_size, num_channels_latents, height, width) + latent_image_ids = self._prepare_latent_image_ids(batch_size, height, width, device, dtype) + + if latents is not None: + return latents.to(device=device, dtype=dtype), latent_image_ids + + image = image.to(device=device, dtype=dtype) + image_latents = self._encode_vae_image(image=image, generator=generator) + if batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] == 0: + # expand init_latents for batch_size + additional_image_per_prompt = batch_size // image_latents.shape[0] + image_latents = torch.cat([image_latents] * additional_image_per_prompt, dim=0) + elif batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] != 0: + raise ValueError( + f"Cannot duplicate `image` of batch size {image_latents.shape[0]} to {batch_size} text prompts." + ) + else: + image_latents = torch.cat([image_latents], dim=0) + + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + latents = self.scheduler.scale_noise(image_latents, timestep, noise) + latents = self._pack_latents(latents, batch_size, num_channels_latents, height, width) + return latents, latent_image_ids + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def joint_attention_kwargs(self): + return self._joint_attention_kwargs + + @property + def num_timesteps(self): + return self._num_timesteps + + @property + def interrupt(self): + return self._interrupt + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + prompt_2: Optional[Union[str, List[str]]] = None, + image: PipelineImageInput = None, + height: Optional[int] = None, + width: Optional[int] = None, + strength: float = 0.6, + num_inference_steps: int = 28, + timesteps: List[int] = None, + guidance_scale: float = 7.0, + num_images_per_prompt: Optional[int] = 1, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + joint_attention_kwargs: Optional[Dict[str, Any]] = None, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + max_sequence_length: int = 512, + ): + r""" + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + will be used instead + image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): + `Image`, numpy array or tensor representing an image batch to be used as the starting point. For both + numpy array and pytorch tensor, the expected value range is between `[0, 1]` If it's a tensor or a list + or tensors, the expected shape should be `(B, C, H, W)` or `(C, H, W)`. If it is a numpy array or a + list of arrays, the expected shape should be `(B, H, W, C)` or `(H, W, C)` It can also accept image + latents as `image`, but if passing latents directly it is not encoded again. + height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The height in pixels of the generated image. This is set to 1024 by default for the best results. + width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The width in pixels of the generated image. This is set to 1024 by default for the best results. + strength (`float`, *optional*, defaults to 1.0): + Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a + starting point and more noise is added the higher the `strength`. The number of denoising steps depends + on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising + process runs for the full number of iterations specified in `num_inference_steps`. A value of 1 + essentially ignores `image`. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument + in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is + passed will be used. Must be in descending order. + guidance_scale (`float`, *optional*, defaults to 7.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.flux.FluxPipelineOutput`] instead of a plain tuple. + joint_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + max_sequence_length (`int` defaults to 512): Maximum sequence length to use with the `prompt`. + + Examples: + + Returns: + [`~pipelines.flux.FluxPipelineOutput`] or `tuple`: [`~pipelines.flux.FluxPipelineOutput`] if `return_dict` + is True, otherwise a `tuple`. When returning a tuple, the first element is a list with the generated + images. + """ + + height = height or self.default_sample_size * self.vae_scale_factor + width = width or self.default_sample_size * self.vae_scale_factor + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + prompt_2, + strength, + height, + width, + prompt_embeds=prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, + max_sequence_length=max_sequence_length, + ) + + self._guidance_scale = guidance_scale + self._joint_attention_kwargs = joint_attention_kwargs + self._interrupt = False + + # 2. Preprocess image + init_image = self.image_processor.preprocess(image, height=height, width=width) + init_image = init_image.to(dtype=torch.float32) + + # 3. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + lora_scale = ( + self.joint_attention_kwargs.get("scale", None) if self.joint_attention_kwargs is not None else None + ) + ( + prompt_embeds, + pooled_prompt_embeds, + text_ids, + ) = self.encode_prompt( + prompt=prompt, + prompt_2=prompt_2, + prompt_embeds=prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + device=device, + num_images_per_prompt=num_images_per_prompt, + max_sequence_length=max_sequence_length, + lora_scale=lora_scale, + ) + + # 4.Prepare timesteps + sigmas = np.linspace(1.0, 1 / num_inference_steps, num_inference_steps) + image_seq_len = (int(height) // self.vae_scale_factor) * (int(width) // self.vae_scale_factor) + mu = calculate_shift( + image_seq_len, + self.scheduler.config.base_image_seq_len, + self.scheduler.config.max_image_seq_len, + self.scheduler.config.base_shift, + self.scheduler.config.max_shift, + ) + timesteps, num_inference_steps = retrieve_timesteps( + self.scheduler, + num_inference_steps, + device, + timesteps, + sigmas, + mu=mu, + ) + timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device) + + if num_inference_steps < 1: + raise ValueError( + f"After adjusting the num_inference_steps by strength parameter: {strength}, the number of pipeline" + f"steps is {num_inference_steps} which is < 1 and not appropriate for this pipeline." + ) + latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) + + # 5. Prepare latent variables + num_channels_latents = self.transformer.config.in_channels // 4 + + latents, latent_image_ids = self.prepare_latents( + init_image, + latent_timestep, + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) + self._num_timesteps = len(timesteps) + + # handle guidance + if self.transformer.config.guidance_embeds: + guidance = torch.full([1], guidance_scale, device=device, dtype=torch.float32) + guidance = guidance.expand(latents.shape[0]) + else: + guidance = None + + # 6. Denoising loop + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + if self.interrupt: + continue + + # broadcast to batch dimension in a way that's compatible with ONNX/Core ML + timestep = t.expand(latents.shape[0]).to(latents.dtype) + noise_pred = self.transformer( + hidden_states=latents, + timestep=timestep / 1000, + guidance=guidance, + pooled_projections=pooled_prompt_embeds, + encoder_hidden_states=prompt_embeds, + txt_ids=text_ids, + img_ids=latent_image_ids, + joint_attention_kwargs=self.joint_attention_kwargs, + return_dict=False, + )[0] + + # compute the previous noisy sample x_t -> x_t-1 + latents_dtype = latents.dtype + latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0] + + if latents.dtype != latents_dtype: + if torch.backends.mps.is_available(): + # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272 + latents = latents.to(latents_dtype) + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + + if XLA_AVAILABLE: + xm.mark_step() + + if output_type == "latent": + image = latents + + else: + latents = self._unpack_latents(latents, height, width, self.vae_scale_factor) + latents = (latents / self.vae.config.scaling_factor) + self.vae.config.shift_factor + image = self.vae.decode(latents, return_dict=False)[0] + image = self.image_processor.postprocess(image, output_type=output_type) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return FluxPipelineOutput(images=image) diff --git a/diffusers3/pipelines/flux/pipeline_flux_inpaint.py b/diffusers3/pipelines/flux/pipeline_flux_inpaint.py new file mode 100644 index 0000000000000000000000000000000000000000..4603367002415fcc9eab77154fb6c8e4cfeb4886 --- /dev/null +++ b/diffusers3/pipelines/flux/pipeline_flux_inpaint.py @@ -0,0 +1,1009 @@ +# Copyright 2024 Black Forest Labs and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Any, Callable, Dict, List, Optional, Union + +import numpy as np +import PIL.Image +import torch +from transformers import CLIPTextModel, CLIPTokenizer, T5EncoderModel, T5TokenizerFast + +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import FluxLoraLoaderMixin +from ...models.autoencoders import AutoencoderKL +from ...models.transformers import FluxTransformer2DModel +from ...schedulers import FlowMatchEulerDiscreteScheduler +from ...utils import ( + USE_PEFT_BACKEND, + is_torch_xla_available, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline +from .pipeline_output import FluxPipelineOutput + + +if is_torch_xla_available(): + import torch_xla.core.xla_model as xm + + XLA_AVAILABLE = True +else: + XLA_AVAILABLE = False + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import FluxInpaintPipeline + >>> from diffusers.utils import load_image + + >>> pipe = FluxInpaintPipeline.from_pretrained("black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16) + >>> pipe.to("cuda") + >>> prompt = "Face of a yellow cat, high resolution, sitting on a park bench" + >>> img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png" + >>> mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png" + >>> source = load_image(img_url) + >>> mask = load_image(mask_url) + >>> image = pipe(prompt=prompt, image=source, mask_image=mask).images[0] + >>> image.save("flux_inpainting.png") + ``` +""" + + +# Copied from diffusers.pipelines.flux.pipeline_flux.calculate_shift +def calculate_shift( + image_seq_len, + base_seq_len: int = 256, + max_seq_len: int = 4096, + base_shift: float = 0.5, + max_shift: float = 1.16, +): + m = (max_shift - base_shift) / (max_seq_len - base_seq_len) + b = base_shift - m * base_seq_len + mu = image_seq_len * m + b + return mu + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents +def retrieve_latents( + encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" +): + if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": + return encoder_output.latent_dist.sample(generator) + elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": + return encoder_output.latent_dist.mode() + elif hasattr(encoder_output, "latents"): + return encoder_output.latents + else: + raise AttributeError("Could not access latents of provided encoder_output") + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + sigmas: Optional[List[float]] = None, + **kwargs, +): + """ + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles + custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` + must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, + `num_inference_steps` and `sigmas` must be `None`. + sigmas (`List[float]`, *optional*): + Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, + `num_inference_steps` and `timesteps` must be `None`. + + Returns: + `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the + second element is the number of inference steps. + """ + if timesteps is not None and sigmas is not None: + raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") + if timesteps is not None: + accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + elif sigmas is not None: + accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accept_sigmas: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" sigmas schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + + +class FluxInpaintPipeline(DiffusionPipeline, FluxLoraLoaderMixin): + r""" + The Flux pipeline for image inpainting. + + Reference: https://blackforestlabs.ai/announcing-black-forest-labs/ + + Args: + transformer ([`FluxTransformer2DModel`]): + Conditional Transformer (MMDiT) architecture to denoise the encoded image latents. + scheduler ([`FlowMatchEulerDiscreteScheduler`]): + A scheduler to be used in combination with `transformer` to denoise the encoded image latents. + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModel`]): + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically + the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. + text_encoder_2 ([`T5EncoderModel`]): + [T5](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5EncoderModel), specifically + the [google/t5-v1_1-xxl](https://huggingface.co/google/t5-v1_1-xxl) variant. + tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/en/model_doc/clip#transformers.CLIPTokenizer). + tokenizer_2 (`T5TokenizerFast`): + Second Tokenizer of class + [T5TokenizerFast](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5TokenizerFast). + """ + + model_cpu_offload_seq = "text_encoder->text_encoder_2->transformer->vae" + _optional_components = [] + _callback_tensor_inputs = ["latents", "prompt_embeds"] + + def __init__( + self, + scheduler: FlowMatchEulerDiscreteScheduler, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + text_encoder_2: T5EncoderModel, + tokenizer_2: T5TokenizerFast, + transformer: FluxTransformer2DModel, + ): + super().__init__() + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + text_encoder_2=text_encoder_2, + tokenizer=tokenizer, + tokenizer_2=tokenizer_2, + transformer=transformer, + scheduler=scheduler, + ) + self.vae_scale_factor = ( + 2 ** (len(self.vae.config.block_out_channels)) if hasattr(self, "vae") and self.vae is not None else 16 + ) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + self.mask_processor = VaeImageProcessor( + vae_scale_factor=self.vae_scale_factor, + vae_latent_channels=self.vae.config.latent_channels, + do_normalize=False, + do_binarize=True, + do_convert_grayscale=True, + ) + self.tokenizer_max_length = ( + self.tokenizer.model_max_length if hasattr(self, "tokenizer") and self.tokenizer is not None else 77 + ) + self.default_sample_size = 64 + + # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline._get_t5_prompt_embeds + def _get_t5_prompt_embeds( + self, + prompt: Union[str, List[str]] = None, + num_images_per_prompt: int = 1, + max_sequence_length: int = 512, + device: Optional[torch.device] = None, + dtype: Optional[torch.dtype] = None, + ): + device = device or self._execution_device + dtype = dtype or self.text_encoder.dtype + + prompt = [prompt] if isinstance(prompt, str) else prompt + batch_size = len(prompt) + + text_inputs = self.tokenizer_2( + prompt, + padding="max_length", + max_length=max_sequence_length, + truncation=True, + return_length=False, + return_overflowing_tokens=False, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer_2(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): + removed_text = self.tokenizer_2.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because `max_sequence_length` is set to " + f" {max_sequence_length} tokens: {removed_text}" + ) + + prompt_embeds = self.text_encoder_2(text_input_ids.to(device), output_hidden_states=False)[0] + + dtype = self.text_encoder_2.dtype + prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) + + _, seq_len, _ = prompt_embeds.shape + + # duplicate text embeddings and attention mask for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + return prompt_embeds + + # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline._get_clip_prompt_embeds + def _get_clip_prompt_embeds( + self, + prompt: Union[str, List[str]], + num_images_per_prompt: int = 1, + device: Optional[torch.device] = None, + ): + device = device or self._execution_device + + prompt = [prompt] if isinstance(prompt, str) else prompt + batch_size = len(prompt) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer_max_length, + truncation=True, + return_overflowing_tokens=False, + return_length=False, + return_tensors="pt", + ) + + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): + removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer_max_length} tokens: {removed_text}" + ) + prompt_embeds = self.text_encoder(text_input_ids.to(device), output_hidden_states=False) + + # Use pooled output of CLIPTextModel + prompt_embeds = prompt_embeds.pooler_output + prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) + + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt) + prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, -1) + + return prompt_embeds + + # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline.encode_prompt + def encode_prompt( + self, + prompt: Union[str, List[str]], + prompt_2: Union[str, List[str]], + device: Optional[torch.device] = None, + num_images_per_prompt: int = 1, + prompt_embeds: Optional[torch.FloatTensor] = None, + pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + max_sequence_length: int = 512, + lora_scale: Optional[float] = None, + ): + r""" + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in all text-encoders + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + lora_scale (`float`, *optional*): + A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + """ + device = device or self._execution_device + + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, FluxLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if self.text_encoder is not None and USE_PEFT_BACKEND: + scale_lora_layers(self.text_encoder, lora_scale) + if self.text_encoder_2 is not None and USE_PEFT_BACKEND: + scale_lora_layers(self.text_encoder_2, lora_scale) + + prompt = [prompt] if isinstance(prompt, str) else prompt + + if prompt_embeds is None: + prompt_2 = prompt_2 or prompt + prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 + + # We only use the pooled prompt output from the CLIPTextModel + pooled_prompt_embeds = self._get_clip_prompt_embeds( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + ) + prompt_embeds = self._get_t5_prompt_embeds( + prompt=prompt_2, + num_images_per_prompt=num_images_per_prompt, + max_sequence_length=max_sequence_length, + device=device, + ) + + if self.text_encoder is not None: + if isinstance(self, FluxLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if isinstance(self, FluxLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder_2, lora_scale) + + dtype = self.text_encoder.dtype if self.text_encoder is not None else self.transformer.dtype + text_ids = torch.zeros(prompt_embeds.shape[1], 3).to(device=device, dtype=dtype) + + return prompt_embeds, pooled_prompt_embeds, text_ids + + # Copied from diffusers.pipelines.stable_diffusion_3.pipeline_stable_diffusion_3_inpaint.StableDiffusion3InpaintPipeline._encode_vae_image + def _encode_vae_image(self, image: torch.Tensor, generator: torch.Generator): + if isinstance(generator, list): + image_latents = [ + retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i]) + for i in range(image.shape[0]) + ] + image_latents = torch.cat(image_latents, dim=0) + else: + image_latents = retrieve_latents(self.vae.encode(image), generator=generator) + + image_latents = (image_latents - self.vae.config.shift_factor) * self.vae.config.scaling_factor + + return image_latents + + # Copied from diffusers.pipelines.stable_diffusion_3.pipeline_stable_diffusion_3_img2img.StableDiffusion3Img2ImgPipeline.get_timesteps + def get_timesteps(self, num_inference_steps, strength, device): + # get the original timestep using init_timestep + init_timestep = min(num_inference_steps * strength, num_inference_steps) + + t_start = int(max(num_inference_steps - init_timestep, 0)) + timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] + if hasattr(self.scheduler, "set_begin_index"): + self.scheduler.set_begin_index(t_start * self.scheduler.order) + + return timesteps, num_inference_steps - t_start + + def check_inputs( + self, + prompt, + prompt_2, + image, + mask_image, + strength, + height, + width, + output_type, + prompt_embeds=None, + pooled_prompt_embeds=None, + callback_on_step_end_tensor_inputs=None, + padding_mask_crop=None, + max_sequence_length=None, + ): + if strength < 0 or strength > 1: + raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}") + + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt_2 is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)): + raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}") + + if prompt_embeds is not None and pooled_prompt_embeds is None: + raise ValueError( + "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`." + ) + + if padding_mask_crop is not None: + if not isinstance(image, PIL.Image.Image): + raise ValueError( + f"The image should be a PIL image when inpainting mask crop, but is of type" f" {type(image)}." + ) + if not isinstance(mask_image, PIL.Image.Image): + raise ValueError( + f"The mask image should be a PIL image when inpainting mask crop, but is of type" + f" {type(mask_image)}." + ) + if output_type != "pil": + raise ValueError(f"The output type should be PIL when inpainting mask crop, but is" f" {output_type}.") + + if max_sequence_length is not None and max_sequence_length > 512: + raise ValueError(f"`max_sequence_length` cannot be greater than 512 but is {max_sequence_length}") + + @staticmethod + # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline._prepare_latent_image_ids + def _prepare_latent_image_ids(batch_size, height, width, device, dtype): + latent_image_ids = torch.zeros(height // 2, width // 2, 3) + latent_image_ids[..., 1] = latent_image_ids[..., 1] + torch.arange(height // 2)[:, None] + latent_image_ids[..., 2] = latent_image_ids[..., 2] + torch.arange(width // 2)[None, :] + + latent_image_id_height, latent_image_id_width, latent_image_id_channels = latent_image_ids.shape + + latent_image_ids = latent_image_ids.reshape( + latent_image_id_height * latent_image_id_width, latent_image_id_channels + ) + + return latent_image_ids.to(device=device, dtype=dtype) + + @staticmethod + # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline._pack_latents + def _pack_latents(latents, batch_size, num_channels_latents, height, width): + latents = latents.view(batch_size, num_channels_latents, height // 2, 2, width // 2, 2) + latents = latents.permute(0, 2, 4, 1, 3, 5) + latents = latents.reshape(batch_size, (height // 2) * (width // 2), num_channels_latents * 4) + + return latents + + @staticmethod + # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline._unpack_latents + def _unpack_latents(latents, height, width, vae_scale_factor): + batch_size, num_patches, channels = latents.shape + + height = height // vae_scale_factor + width = width // vae_scale_factor + + latents = latents.view(batch_size, height, width, channels // 4, 2, 2) + latents = latents.permute(0, 3, 1, 4, 2, 5) + + latents = latents.reshape(batch_size, channels // (2 * 2), height * 2, width * 2) + + return latents + + def prepare_latents( + self, + image, + timestep, + batch_size, + num_channels_latents, + height, + width, + dtype, + device, + generator, + latents=None, + ): + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + height = 2 * (int(height) // self.vae_scale_factor) + width = 2 * (int(width) // self.vae_scale_factor) + + shape = (batch_size, num_channels_latents, height, width) + latent_image_ids = self._prepare_latent_image_ids(batch_size, height, width, device, dtype) + + image = image.to(device=device, dtype=dtype) + image_latents = self._encode_vae_image(image=image, generator=generator) + + if batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] == 0: + # expand init_latents for batch_size + additional_image_per_prompt = batch_size // image_latents.shape[0] + image_latents = torch.cat([image_latents] * additional_image_per_prompt, dim=0) + elif batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] != 0: + raise ValueError( + f"Cannot duplicate `image` of batch size {image_latents.shape[0]} to {batch_size} text prompts." + ) + else: + image_latents = torch.cat([image_latents], dim=0) + + if latents is None: + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + latents = self.scheduler.scale_noise(image_latents, timestep, noise) + else: + noise = latents.to(device) + latents = noise + + noise = self._pack_latents(noise, batch_size, num_channels_latents, height, width) + image_latents = self._pack_latents(image_latents, batch_size, num_channels_latents, height, width) + latents = self._pack_latents(latents, batch_size, num_channels_latents, height, width) + return latents, noise, image_latents, latent_image_ids + + def prepare_mask_latents( + self, + mask, + masked_image, + batch_size, + num_channels_latents, + num_images_per_prompt, + height, + width, + dtype, + device, + generator, + ): + height = 2 * (int(height) // self.vae_scale_factor) + width = 2 * (int(width) // self.vae_scale_factor) + # resize the mask to latents shape as we concatenate the mask to the latents + # we do that before converting to dtype to avoid breaking in case we're using cpu_offload + # and half precision + mask = torch.nn.functional.interpolate(mask, size=(height, width)) + mask = mask.to(device=device, dtype=dtype) + + batch_size = batch_size * num_images_per_prompt + + masked_image = masked_image.to(device=device, dtype=dtype) + + if masked_image.shape[1] == 16: + masked_image_latents = masked_image + else: + masked_image_latents = retrieve_latents(self.vae.encode(masked_image), generator=generator) + + masked_image_latents = (masked_image_latents - self.vae.config.shift_factor) * self.vae.config.scaling_factor + + # duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method + if mask.shape[0] < batch_size: + if not batch_size % mask.shape[0] == 0: + raise ValueError( + "The passed mask and the required batch size don't match. Masks are supposed to be duplicated to" + f" a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number" + " of masks that you pass is divisible by the total requested batch size." + ) + mask = mask.repeat(batch_size // mask.shape[0], 1, 1, 1) + if masked_image_latents.shape[0] < batch_size: + if not batch_size % masked_image_latents.shape[0] == 0: + raise ValueError( + "The passed images and the required batch size don't match. Images are supposed to be duplicated" + f" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed." + " Make sure the number of images that you pass is divisible by the total requested batch size." + ) + masked_image_latents = masked_image_latents.repeat(batch_size // masked_image_latents.shape[0], 1, 1, 1) + + # aligning device to prevent device errors when concating it with the latent model input + masked_image_latents = masked_image_latents.to(device=device, dtype=dtype) + + masked_image_latents = self._pack_latents( + masked_image_latents, + batch_size, + num_channels_latents, + height, + width, + ) + mask = self._pack_latents( + mask.repeat(1, num_channels_latents, 1, 1), + batch_size, + num_channels_latents, + height, + width, + ) + + return mask, masked_image_latents + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def joint_attention_kwargs(self): + return self._joint_attention_kwargs + + @property + def num_timesteps(self): + return self._num_timesteps + + @property + def interrupt(self): + return self._interrupt + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + prompt_2: Optional[Union[str, List[str]]] = None, + image: PipelineImageInput = None, + mask_image: PipelineImageInput = None, + masked_image_latents: PipelineImageInput = None, + height: Optional[int] = None, + width: Optional[int] = None, + padding_mask_crop: Optional[int] = None, + strength: float = 0.6, + num_inference_steps: int = 28, + timesteps: List[int] = None, + guidance_scale: float = 7.0, + num_images_per_prompt: Optional[int] = 1, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + joint_attention_kwargs: Optional[Dict[str, Any]] = None, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + max_sequence_length: int = 512, + ): + r""" + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + will be used instead + image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): + `Image`, numpy array or tensor representing an image batch to be used as the starting point. For both + numpy array and pytorch tensor, the expected value range is between `[0, 1]` If it's a tensor or a list + or tensors, the expected shape should be `(B, C, H, W)` or `(C, H, W)`. If it is a numpy array or a + list of arrays, the expected shape should be `(B, H, W, C)` or `(H, W, C)` It can also accept image + latents as `image`, but if passing latents directly it is not encoded again. + mask_image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): + `Image`, numpy array or tensor representing an image batch to mask `image`. White pixels in the mask + are repainted while black pixels are preserved. If `mask_image` is a PIL image, it is converted to a + single channel (luminance) before use. If it's a numpy array or pytorch tensor, it should contain one + color channel (L) instead of 3, so the expected shape for pytorch tensor would be `(B, 1, H, W)`, `(B, + H, W)`, `(1, H, W)`, `(H, W)`. And for numpy array would be for `(B, H, W, 1)`, `(B, H, W)`, `(H, W, + 1)`, or `(H, W)`. + mask_image_latent (`torch.Tensor`, `List[torch.Tensor]`): + `Tensor` representing an image batch to mask `image` generated by VAE. If not provided, the mask + latents tensor will ge generated by `mask_image`. + height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The height in pixels of the generated image. This is set to 1024 by default for the best results. + width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The width in pixels of the generated image. This is set to 1024 by default for the best results. + padding_mask_crop (`int`, *optional*, defaults to `None`): + The size of margin in the crop to be applied to the image and masking. If `None`, no crop is applied to + image and mask_image. If `padding_mask_crop` is not `None`, it will first find a rectangular region + with the same aspect ration of the image and contains all masked area, and then expand that area based + on `padding_mask_crop`. The image and mask_image will then be cropped based on the expanded area before + resizing to the original image size for inpainting. This is useful when the masked area is small while + the image is large and contain information irrelevant for inpainting, such as background. + strength (`float`, *optional*, defaults to 1.0): + Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a + starting point and more noise is added the higher the `strength`. The number of denoising steps depends + on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising + process runs for the full number of iterations specified in `num_inference_steps`. A value of 1 + essentially ignores `image`. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument + in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is + passed will be used. Must be in descending order. + guidance_scale (`float`, *optional*, defaults to 7.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.flux.FluxPipelineOutput`] instead of a plain tuple. + joint_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + max_sequence_length (`int` defaults to 512): Maximum sequence length to use with the `prompt`. + + Examples: + + Returns: + [`~pipelines.flux.FluxPipelineOutput`] or `tuple`: [`~pipelines.flux.FluxPipelineOutput`] if `return_dict` + is True, otherwise a `tuple`. When returning a tuple, the first element is a list with the generated + images. + """ + + height = height or self.default_sample_size * self.vae_scale_factor + width = width or self.default_sample_size * self.vae_scale_factor + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + prompt_2, + image, + mask_image, + strength, + height, + width, + output_type=output_type, + prompt_embeds=prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, + padding_mask_crop=padding_mask_crop, + max_sequence_length=max_sequence_length, + ) + + self._guidance_scale = guidance_scale + self._joint_attention_kwargs = joint_attention_kwargs + self._interrupt = False + + # 2. Preprocess mask and image + if padding_mask_crop is not None: + crops_coords = self.mask_processor.get_crop_region(mask_image, width, height, pad=padding_mask_crop) + resize_mode = "fill" + else: + crops_coords = None + resize_mode = "default" + + original_image = image + init_image = self.image_processor.preprocess( + image, height=height, width=width, crops_coords=crops_coords, resize_mode=resize_mode + ) + init_image = init_image.to(dtype=torch.float32) + + # 3. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + lora_scale = ( + self.joint_attention_kwargs.get("scale", None) if self.joint_attention_kwargs is not None else None + ) + ( + prompt_embeds, + pooled_prompt_embeds, + text_ids, + ) = self.encode_prompt( + prompt=prompt, + prompt_2=prompt_2, + prompt_embeds=prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + device=device, + num_images_per_prompt=num_images_per_prompt, + max_sequence_length=max_sequence_length, + lora_scale=lora_scale, + ) + + # 4.Prepare timesteps + sigmas = np.linspace(1.0, 1 / num_inference_steps, num_inference_steps) + image_seq_len = (int(height) // self.vae_scale_factor) * (int(width) // self.vae_scale_factor) + mu = calculate_shift( + image_seq_len, + self.scheduler.config.base_image_seq_len, + self.scheduler.config.max_image_seq_len, + self.scheduler.config.base_shift, + self.scheduler.config.max_shift, + ) + timesteps, num_inference_steps = retrieve_timesteps( + self.scheduler, + num_inference_steps, + device, + timesteps, + sigmas, + mu=mu, + ) + timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device) + + if num_inference_steps < 1: + raise ValueError( + f"After adjusting the num_inference_steps by strength parameter: {strength}, the number of pipeline" + f"steps is {num_inference_steps} which is < 1 and not appropriate for this pipeline." + ) + latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) + + # 5. Prepare latent variables + num_channels_latents = self.transformer.config.in_channels // 4 + num_channels_transformer = self.transformer.config.in_channels + + latents, noise, image_latents, latent_image_ids = self.prepare_latents( + init_image, + latent_timestep, + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + mask_condition = self.mask_processor.preprocess( + mask_image, height=height, width=width, resize_mode=resize_mode, crops_coords=crops_coords + ) + + if masked_image_latents is None: + masked_image = init_image * (mask_condition < 0.5) + else: + masked_image = masked_image_latents + + mask, masked_image_latents = self.prepare_mask_latents( + mask_condition, + masked_image, + batch_size, + num_channels_latents, + num_images_per_prompt, + height, + width, + prompt_embeds.dtype, + device, + generator, + ) + + num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) + self._num_timesteps = len(timesteps) + + # handle guidance + if self.transformer.config.guidance_embeds: + guidance = torch.full([1], guidance_scale, device=device, dtype=torch.float32) + guidance = guidance.expand(latents.shape[0]) + else: + guidance = None + + # 6. Denoising loop + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + if self.interrupt: + continue + + # broadcast to batch dimension in a way that's compatible with ONNX/Core ML + timestep = t.expand(latents.shape[0]).to(latents.dtype) + noise_pred = self.transformer( + hidden_states=latents, + timestep=timestep / 1000, + guidance=guidance, + pooled_projections=pooled_prompt_embeds, + encoder_hidden_states=prompt_embeds, + txt_ids=text_ids, + img_ids=latent_image_ids, + joint_attention_kwargs=self.joint_attention_kwargs, + return_dict=False, + )[0] + + # compute the previous noisy sample x_t -> x_t-1 + latents_dtype = latents.dtype + latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0] + + # for 64 channel transformer only. + init_latents_proper = image_latents + init_mask = mask + + if i < len(timesteps) - 1: + noise_timestep = timesteps[i + 1] + init_latents_proper = self.scheduler.scale_noise( + init_latents_proper, torch.tensor([noise_timestep]), noise + ) + + latents = (1 - init_mask) * init_latents_proper + init_mask * latents + + if latents.dtype != latents_dtype: + if torch.backends.mps.is_available(): + # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272 + latents = latents.to(latents_dtype) + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + + if XLA_AVAILABLE: + xm.mark_step() + + if output_type == "latent": + image = latents + + else: + latents = self._unpack_latents(latents, height, width, self.vae_scale_factor) + latents = (latents / self.vae.config.scaling_factor) + self.vae.config.shift_factor + image = self.vae.decode(latents, return_dict=False)[0] + image = self.image_processor.postprocess(image, output_type=output_type) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return FluxPipelineOutput(images=image) diff --git a/diffusers3/pipelines/flux/pipeline_output.py b/diffusers3/pipelines/flux/pipeline_output.py new file mode 100644 index 0000000000000000000000000000000000000000..b5d98fb5bf6000eaae99ded8be09c6e6e62cfd1a --- /dev/null +++ b/diffusers3/pipelines/flux/pipeline_output.py @@ -0,0 +1,21 @@ +from dataclasses import dataclass +from typing import List, Union + +import numpy as np +import PIL.Image + +from ...utils import BaseOutput + + +@dataclass +class FluxPipelineOutput(BaseOutput): + """ + Output class for Stable Diffusion pipelines. + + Args: + images (`List[PIL.Image.Image]` or `np.ndarray`) + List of denoised PIL images of length `batch_size` or numpy array of shape `(batch_size, height, width, + num_channels)`. PIL images or numpy array present the denoised images of the diffusion pipeline. + """ + + images: Union[List[PIL.Image.Image], np.ndarray] diff --git a/diffusers3/pipelines/free_init_utils.py b/diffusers3/pipelines/free_init_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..1fb67592ca4f8cf6a420bf7acb94f4772d963646 --- /dev/null +++ b/diffusers3/pipelines/free_init_utils.py @@ -0,0 +1,187 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +from typing import Tuple, Union + +import torch +import torch.fft as fft + +from ..utils.torch_utils import randn_tensor + + +class FreeInitMixin: + r"""Mixin class for FreeInit.""" + + def enable_free_init( + self, + num_iters: int = 3, + use_fast_sampling: bool = False, + method: str = "butterworth", + order: int = 4, + spatial_stop_frequency: float = 0.25, + temporal_stop_frequency: float = 0.25, + ): + """Enables the FreeInit mechanism as in https://arxiv.org/abs/2312.07537. + + This implementation has been adapted from the [official repository](https://github.com/TianxingWu/FreeInit). + + Args: + num_iters (`int`, *optional*, defaults to `3`): + Number of FreeInit noise re-initialization iterations. + use_fast_sampling (`bool`, *optional*, defaults to `False`): + Whether or not to speedup sampling procedure at the cost of probably lower quality results. Enables the + "Coarse-to-Fine Sampling" strategy, as mentioned in the paper, if set to `True`. + method (`str`, *optional*, defaults to `butterworth`): + Must be one of `butterworth`, `ideal` or `gaussian` to use as the filtering method for the FreeInit low + pass filter. + order (`int`, *optional*, defaults to `4`): + Order of the filter used in `butterworth` method. Larger values lead to `ideal` method behaviour + whereas lower values lead to `gaussian` method behaviour. + spatial_stop_frequency (`float`, *optional*, defaults to `0.25`): + Normalized stop frequency for spatial dimensions. Must be between 0 to 1. Referred to as `d_s` in the + original implementation. + temporal_stop_frequency (`float`, *optional*, defaults to `0.25`): + Normalized stop frequency for temporal dimensions. Must be between 0 to 1. Referred to as `d_t` in the + original implementation. + """ + self._free_init_num_iters = num_iters + self._free_init_use_fast_sampling = use_fast_sampling + self._free_init_method = method + self._free_init_order = order + self._free_init_spatial_stop_frequency = spatial_stop_frequency + self._free_init_temporal_stop_frequency = temporal_stop_frequency + + def disable_free_init(self): + """Disables the FreeInit mechanism if enabled.""" + self._free_init_num_iters = None + + @property + def free_init_enabled(self): + return hasattr(self, "_free_init_num_iters") and self._free_init_num_iters is not None + + def _get_free_init_freq_filter( + self, + shape: Tuple[int, ...], + device: Union[str, torch.dtype], + filter_type: str, + order: float, + spatial_stop_frequency: float, + temporal_stop_frequency: float, + ) -> torch.Tensor: + r"""Returns the FreeInit filter based on filter type and other input conditions.""" + + time, height, width = shape[-3], shape[-2], shape[-1] + mask = torch.zeros(shape) + + if spatial_stop_frequency == 0 or temporal_stop_frequency == 0: + return mask + + if filter_type == "butterworth": + + def retrieve_mask(x): + return 1 / (1 + (x / spatial_stop_frequency**2) ** order) + elif filter_type == "gaussian": + + def retrieve_mask(x): + return math.exp(-1 / (2 * spatial_stop_frequency**2) * x) + elif filter_type == "ideal": + + def retrieve_mask(x): + return 1 if x <= spatial_stop_frequency * 2 else 0 + else: + raise NotImplementedError("`filter_type` must be one of gaussian, butterworth or ideal") + + for t in range(time): + for h in range(height): + for w in range(width): + d_square = ( + ((spatial_stop_frequency / temporal_stop_frequency) * (2 * t / time - 1)) ** 2 + + (2 * h / height - 1) ** 2 + + (2 * w / width - 1) ** 2 + ) + mask[..., t, h, w] = retrieve_mask(d_square) + + return mask.to(device) + + def _apply_freq_filter(self, x: torch.Tensor, noise: torch.Tensor, low_pass_filter: torch.Tensor) -> torch.Tensor: + r"""Noise reinitialization.""" + # FFT + x_freq = fft.fftn(x, dim=(-3, -2, -1)) + x_freq = fft.fftshift(x_freq, dim=(-3, -2, -1)) + noise_freq = fft.fftn(noise, dim=(-3, -2, -1)) + noise_freq = fft.fftshift(noise_freq, dim=(-3, -2, -1)) + + # frequency mix + high_pass_filter = 1 - low_pass_filter + x_freq_low = x_freq * low_pass_filter + noise_freq_high = noise_freq * high_pass_filter + x_freq_mixed = x_freq_low + noise_freq_high # mix in freq domain + + # IFFT + x_freq_mixed = fft.ifftshift(x_freq_mixed, dim=(-3, -2, -1)) + x_mixed = fft.ifftn(x_freq_mixed, dim=(-3, -2, -1)).real + + return x_mixed + + def _apply_free_init( + self, + latents: torch.Tensor, + free_init_iteration: int, + num_inference_steps: int, + device: torch.device, + dtype: torch.dtype, + generator: torch.Generator, + ): + if free_init_iteration == 0: + self._free_init_initial_noise = latents.detach().clone() + else: + latent_shape = latents.shape + + free_init_filter_shape = (1, *latent_shape[1:]) + free_init_freq_filter = self._get_free_init_freq_filter( + shape=free_init_filter_shape, + device=device, + filter_type=self._free_init_method, + order=self._free_init_order, + spatial_stop_frequency=self._free_init_spatial_stop_frequency, + temporal_stop_frequency=self._free_init_temporal_stop_frequency, + ) + + current_diffuse_timestep = self.scheduler.config.num_train_timesteps - 1 + diffuse_timesteps = torch.full((latent_shape[0],), current_diffuse_timestep).long() + + z_t = self.scheduler.add_noise( + original_samples=latents, noise=self._free_init_initial_noise, timesteps=diffuse_timesteps.to(device) + ).to(dtype=torch.float32) + + z_rand = randn_tensor( + shape=latent_shape, + generator=generator, + device=device, + dtype=torch.float32, + ) + latents = self._apply_freq_filter(z_t, z_rand, low_pass_filter=free_init_freq_filter) + latents = latents.to(dtype) + + # Coarse-to-Fine Sampling for faster inference (can lead to lower quality) + if self._free_init_use_fast_sampling: + num_inference_steps = max( + 1, int(num_inference_steps / self._free_init_num_iters * (free_init_iteration + 1)) + ) + + if num_inference_steps > 0: + self.scheduler.set_timesteps(num_inference_steps, device=device) + + return latents, self.scheduler.timesteps diff --git a/diffusers3/pipelines/free_noise_utils.py b/diffusers3/pipelines/free_noise_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..dc0071a494e30d135f80b611def70c3d967ecf90 --- /dev/null +++ b/diffusers3/pipelines/free_noise_utils.py @@ -0,0 +1,596 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Callable, Dict, List, Optional, Tuple, Union + +import torch +import torch.nn as nn + +from ..models.attention import BasicTransformerBlock, FreeNoiseTransformerBlock +from ..models.resnet import Downsample2D, ResnetBlock2D, Upsample2D +from ..models.transformers.transformer_2d import Transformer2DModel +from ..models.unets.unet_motion_model import ( + AnimateDiffTransformer3D, + CrossAttnDownBlockMotion, + DownBlockMotion, + UpBlockMotion, +) +from ..pipelines.pipeline_utils import DiffusionPipeline +from ..utils import logging +from ..utils.torch_utils import randn_tensor + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +class SplitInferenceModule(nn.Module): + r""" + A wrapper module class that splits inputs along a specified dimension before performing a forward pass. + + This module is useful when you need to perform inference on large tensors in a memory-efficient way by breaking + them into smaller chunks, processing each chunk separately, and then reassembling the results. + + Args: + module (`nn.Module`): + The underlying PyTorch module that will be applied to each chunk of split inputs. + split_size (`int`, defaults to `1`): + The size of each chunk after splitting the input tensor. + split_dim (`int`, defaults to `0`): + The dimension along which the input tensors are split. + input_kwargs_to_split (`List[str]`, defaults to `["hidden_states"]`): + A list of keyword arguments (strings) that represent the input tensors to be split. + + Workflow: + 1. The keyword arguments specified in `input_kwargs_to_split` are split into smaller chunks using + `torch.split()` along the dimension `split_dim` and with a chunk size of `split_size`. + 2. The `module` is invoked once for each split with both the split inputs and any unchanged arguments + that were passed. + 3. The output tensors from each split are concatenated back together along `split_dim` before returning. + + Example: + ```python + >>> import torch + >>> import torch.nn as nn + + >>> model = nn.Linear(1000, 1000) + >>> split_module = SplitInferenceModule(model, split_size=2, split_dim=0, input_kwargs_to_split=["input"]) + + >>> input_tensor = torch.randn(42, 1000) + >>> # Will split the tensor into 21 slices of shape [2, 1000]. + >>> output = split_module(input=input_tensor) + ``` + + It is also possible to nest `SplitInferenceModule` across different split dimensions for more complex + multi-dimensional splitting. + """ + + def __init__( + self, + module: nn.Module, + split_size: int = 1, + split_dim: int = 0, + input_kwargs_to_split: List[str] = ["hidden_states"], + ) -> None: + super().__init__() + + self.module = module + self.split_size = split_size + self.split_dim = split_dim + self.input_kwargs_to_split = set(input_kwargs_to_split) + + def forward(self, *args, **kwargs) -> Union[torch.Tensor, Tuple[torch.Tensor]]: + r"""Forward method for the `SplitInferenceModule`. + + This method processes the input by splitting specified keyword arguments along a given dimension, running the + underlying module on each split, and then concatenating the results. The splitting is controlled by the + `split_size` and `split_dim` parameters specified during initialization. + + Args: + *args (`Any`): + Positional arguments that are passed directly to the `module` without modification. + **kwargs (`Dict[str, torch.Tensor]`): + Keyword arguments passed to the underlying `module`. Only keyword arguments whose names match the + entries in `input_kwargs_to_split` and are of type `torch.Tensor` will be split. The remaining keyword + arguments are passed unchanged. + + Returns: + `Union[torch.Tensor, Tuple[torch.Tensor]]`: + The outputs obtained from `SplitInferenceModule` are the same as if the underlying module was inferred + without it. + - If the underlying module returns a single tensor, the result will be a single concatenated tensor + along the same `split_dim` after processing all splits. + - If the underlying module returns a tuple of tensors, each element of the tuple will be concatenated + along the `split_dim` across all splits, and the final result will be a tuple of concatenated tensors. + """ + split_inputs = {} + + # 1. Split inputs that were specified during initialization and also present in passed kwargs + for key in list(kwargs.keys()): + if key not in self.input_kwargs_to_split or not torch.is_tensor(kwargs[key]): + continue + split_inputs[key] = torch.split(kwargs[key], self.split_size, self.split_dim) + kwargs.pop(key) + + # 2. Invoke forward pass across each split + results = [] + for split_input in zip(*split_inputs.values()): + inputs = dict(zip(split_inputs.keys(), split_input)) + inputs.update(kwargs) + + intermediate_tensor_or_tensor_tuple = self.module(*args, **inputs) + results.append(intermediate_tensor_or_tensor_tuple) + + # 3. Concatenate split restuls to obtain final outputs + if isinstance(results[0], torch.Tensor): + return torch.cat(results, dim=self.split_dim) + elif isinstance(results[0], tuple): + return tuple([torch.cat(x, dim=self.split_dim) for x in zip(*results)]) + else: + raise ValueError( + "In order to use the SplitInferenceModule, it is necessary for the underlying `module` to either return a torch.Tensor or a tuple of torch.Tensor's." + ) + + +class AnimateDiffFreeNoiseMixin: + r"""Mixin class for [FreeNoise](https://arxiv.org/abs/2310.15169).""" + + def _enable_free_noise_in_block(self, block: Union[CrossAttnDownBlockMotion, DownBlockMotion, UpBlockMotion]): + r"""Helper function to enable FreeNoise in transformer blocks.""" + + for motion_module in block.motion_modules: + num_transformer_blocks = len(motion_module.transformer_blocks) + + for i in range(num_transformer_blocks): + if isinstance(motion_module.transformer_blocks[i], FreeNoiseTransformerBlock): + motion_module.transformer_blocks[i].set_free_noise_properties( + self._free_noise_context_length, + self._free_noise_context_stride, + self._free_noise_weighting_scheme, + ) + else: + assert isinstance(motion_module.transformer_blocks[i], BasicTransformerBlock) + basic_transfomer_block = motion_module.transformer_blocks[i] + + motion_module.transformer_blocks[i] = FreeNoiseTransformerBlock( + dim=basic_transfomer_block.dim, + num_attention_heads=basic_transfomer_block.num_attention_heads, + attention_head_dim=basic_transfomer_block.attention_head_dim, + dropout=basic_transfomer_block.dropout, + cross_attention_dim=basic_transfomer_block.cross_attention_dim, + activation_fn=basic_transfomer_block.activation_fn, + attention_bias=basic_transfomer_block.attention_bias, + only_cross_attention=basic_transfomer_block.only_cross_attention, + double_self_attention=basic_transfomer_block.double_self_attention, + positional_embeddings=basic_transfomer_block.positional_embeddings, + num_positional_embeddings=basic_transfomer_block.num_positional_embeddings, + context_length=self._free_noise_context_length, + context_stride=self._free_noise_context_stride, + weighting_scheme=self._free_noise_weighting_scheme, + ).to(device=self.device, dtype=self.dtype) + + motion_module.transformer_blocks[i].load_state_dict( + basic_transfomer_block.state_dict(), strict=True + ) + motion_module.transformer_blocks[i].set_chunk_feed_forward( + basic_transfomer_block._chunk_size, basic_transfomer_block._chunk_dim + ) + + def _disable_free_noise_in_block(self, block: Union[CrossAttnDownBlockMotion, DownBlockMotion, UpBlockMotion]): + r"""Helper function to disable FreeNoise in transformer blocks.""" + + for motion_module in block.motion_modules: + num_transformer_blocks = len(motion_module.transformer_blocks) + + for i in range(num_transformer_blocks): + if isinstance(motion_module.transformer_blocks[i], FreeNoiseTransformerBlock): + free_noise_transfomer_block = motion_module.transformer_blocks[i] + + motion_module.transformer_blocks[i] = BasicTransformerBlock( + dim=free_noise_transfomer_block.dim, + num_attention_heads=free_noise_transfomer_block.num_attention_heads, + attention_head_dim=free_noise_transfomer_block.attention_head_dim, + dropout=free_noise_transfomer_block.dropout, + cross_attention_dim=free_noise_transfomer_block.cross_attention_dim, + activation_fn=free_noise_transfomer_block.activation_fn, + attention_bias=free_noise_transfomer_block.attention_bias, + only_cross_attention=free_noise_transfomer_block.only_cross_attention, + double_self_attention=free_noise_transfomer_block.double_self_attention, + positional_embeddings=free_noise_transfomer_block.positional_embeddings, + num_positional_embeddings=free_noise_transfomer_block.num_positional_embeddings, + ).to(device=self.device, dtype=self.dtype) + + motion_module.transformer_blocks[i].load_state_dict( + free_noise_transfomer_block.state_dict(), strict=True + ) + motion_module.transformer_blocks[i].set_chunk_feed_forward( + free_noise_transfomer_block._chunk_size, free_noise_transfomer_block._chunk_dim + ) + + def _check_inputs_free_noise( + self, + prompt, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + num_frames, + ) -> None: + if not isinstance(prompt, (str, dict)): + raise ValueError(f"Expected `prompt` to have type `str` or `dict` but found {type(prompt)=}") + + if negative_prompt is not None: + if not isinstance(negative_prompt, (str, dict)): + raise ValueError( + f"Expected `negative_prompt` to have type `str` or `dict` but found {type(negative_prompt)=}" + ) + + if prompt_embeds is not None or negative_prompt_embeds is not None: + raise ValueError("`prompt_embeds` and `negative_prompt_embeds` is not supported in FreeNoise yet.") + + frame_indices = [isinstance(x, int) for x in prompt.keys()] + frame_prompts = [isinstance(x, str) for x in prompt.values()] + min_frame = min(list(prompt.keys())) + max_frame = max(list(prompt.keys())) + + if not all(frame_indices): + raise ValueError("Expected integer keys in `prompt` dict for FreeNoise.") + if not all(frame_prompts): + raise ValueError("Expected str values in `prompt` dict for FreeNoise.") + if min_frame != 0: + raise ValueError("The minimum frame index in `prompt` dict must be 0 as a starting prompt is necessary.") + if max_frame >= num_frames: + raise ValueError( + f"The maximum frame index in `prompt` dict must be lesser than {num_frames=} and follow 0-based indexing." + ) + + def _encode_prompt_free_noise( + self, + prompt: Union[str, Dict[int, str]], + num_frames: int, + device: torch.device, + num_videos_per_prompt: int, + do_classifier_free_guidance: bool, + negative_prompt: Optional[Union[str, Dict[int, str]]] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ) -> torch.Tensor: + if negative_prompt is None: + negative_prompt = "" + + # Ensure that we have a dictionary of prompts + if isinstance(prompt, str): + prompt = {0: prompt} + if isinstance(negative_prompt, str): + negative_prompt = {0: negative_prompt} + + self._check_inputs_free_noise(prompt, negative_prompt, prompt_embeds, negative_prompt_embeds, num_frames) + + # Sort the prompts based on frame indices + prompt = dict(sorted(prompt.items())) + negative_prompt = dict(sorted(negative_prompt.items())) + + # Ensure that we have a prompt for the last frame index + prompt[num_frames - 1] = prompt[list(prompt.keys())[-1]] + negative_prompt[num_frames - 1] = negative_prompt[list(negative_prompt.keys())[-1]] + + frame_indices = list(prompt.keys()) + frame_prompts = list(prompt.values()) + frame_negative_indices = list(negative_prompt.keys()) + frame_negative_prompts = list(negative_prompt.values()) + + # Generate and interpolate positive prompts + prompt_embeds, _ = self.encode_prompt( + prompt=frame_prompts, + device=device, + num_images_per_prompt=num_videos_per_prompt, + do_classifier_free_guidance=False, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + lora_scale=lora_scale, + clip_skip=clip_skip, + ) + + shape = (num_frames, *prompt_embeds.shape[1:]) + prompt_interpolation_embeds = prompt_embeds.new_zeros(shape) + + for i in range(len(frame_indices) - 1): + start_frame = frame_indices[i] + end_frame = frame_indices[i + 1] + start_tensor = prompt_embeds[i].unsqueeze(0) + end_tensor = prompt_embeds[i + 1].unsqueeze(0) + + prompt_interpolation_embeds[start_frame : end_frame + 1] = self._free_noise_prompt_interpolation_callback( + start_frame, end_frame, start_tensor, end_tensor + ) + + # Generate and interpolate negative prompts + negative_prompt_embeds = None + negative_prompt_interpolation_embeds = None + + if do_classifier_free_guidance: + _, negative_prompt_embeds = self.encode_prompt( + prompt=[""] * len(frame_negative_prompts), + device=device, + num_images_per_prompt=num_videos_per_prompt, + do_classifier_free_guidance=True, + negative_prompt=frame_negative_prompts, + prompt_embeds=None, + negative_prompt_embeds=None, + lora_scale=lora_scale, + clip_skip=clip_skip, + ) + + negative_prompt_interpolation_embeds = negative_prompt_embeds.new_zeros(shape) + + for i in range(len(frame_negative_indices) - 1): + start_frame = frame_negative_indices[i] + end_frame = frame_negative_indices[i + 1] + start_tensor = negative_prompt_embeds[i].unsqueeze(0) + end_tensor = negative_prompt_embeds[i + 1].unsqueeze(0) + + negative_prompt_interpolation_embeds[ + start_frame : end_frame + 1 + ] = self._free_noise_prompt_interpolation_callback(start_frame, end_frame, start_tensor, end_tensor) + + prompt_embeds = prompt_interpolation_embeds + negative_prompt_embeds = negative_prompt_interpolation_embeds + + if do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + return prompt_embeds, negative_prompt_embeds + + def _prepare_latents_free_noise( + self, + batch_size: int, + num_channels_latents: int, + num_frames: int, + height: int, + width: int, + dtype: torch.dtype, + device: torch.device, + generator: Optional[torch.Generator] = None, + latents: Optional[torch.Tensor] = None, + ): + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + context_num_frames = ( + self._free_noise_context_length if self._free_noise_context_length == "repeat_context" else num_frames + ) + + shape = ( + batch_size, + num_channels_latents, + context_num_frames, + height // self.vae_scale_factor, + width // self.vae_scale_factor, + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + if self._free_noise_noise_type == "random": + return latents + else: + if latents.size(2) == num_frames: + return latents + elif latents.size(2) != self._free_noise_context_length: + raise ValueError( + f"You have passed `latents` as a parameter to FreeNoise. The expected number of frames is either {num_frames} or {self._free_noise_context_length}, but found {latents.size(2)}" + ) + latents = latents.to(device) + + if self._free_noise_noise_type == "shuffle_context": + for i in range(self._free_noise_context_length, num_frames, self._free_noise_context_stride): + # ensure window is within bounds + window_start = max(0, i - self._free_noise_context_length) + window_end = min(num_frames, window_start + self._free_noise_context_stride) + window_length = window_end - window_start + + if window_length == 0: + break + + indices = torch.LongTensor(list(range(window_start, window_end))) + shuffled_indices = indices[torch.randperm(window_length, generator=generator)] + + current_start = i + current_end = min(num_frames, current_start + window_length) + if current_end == current_start + window_length: + # batch of frames perfectly fits the window + latents[:, :, current_start:current_end] = latents[:, :, shuffled_indices] + else: + # handle the case where the last batch of frames does not fit perfectly with the window + prefix_length = current_end - current_start + shuffled_indices = shuffled_indices[:prefix_length] + latents[:, :, current_start:current_end] = latents[:, :, shuffled_indices] + + elif self._free_noise_noise_type == "repeat_context": + num_repeats = (num_frames + self._free_noise_context_length - 1) // self._free_noise_context_length + latents = torch.cat([latents] * num_repeats, dim=2) + + latents = latents[:, :, :num_frames] + return latents + + def _lerp( + self, start_index: int, end_index: int, start_tensor: torch.Tensor, end_tensor: torch.Tensor + ) -> torch.Tensor: + num_indices = end_index - start_index + 1 + interpolated_tensors = [] + + for i in range(num_indices): + alpha = i / (num_indices - 1) + interpolated_tensor = (1 - alpha) * start_tensor + alpha * end_tensor + interpolated_tensors.append(interpolated_tensor) + + interpolated_tensors = torch.cat(interpolated_tensors) + return interpolated_tensors + + def enable_free_noise( + self, + context_length: Optional[int] = 16, + context_stride: int = 4, + weighting_scheme: str = "pyramid", + noise_type: str = "shuffle_context", + prompt_interpolation_callback: Optional[ + Callable[[DiffusionPipeline, int, int, torch.Tensor, torch.Tensor], torch.Tensor] + ] = None, + ) -> None: + r""" + Enable long video generation using FreeNoise. + + Args: + context_length (`int`, defaults to `16`, *optional*): + The number of video frames to process at once. It's recommended to set this to the maximum frames the + Motion Adapter was trained with (usually 16/24/32). If `None`, the default value from the motion + adapter config is used. + context_stride (`int`, *optional*): + Long videos are generated by processing many frames. FreeNoise processes these frames in sliding + windows of size `context_length`. Context stride allows you to specify how many frames to skip between + each window. For example, a context length of 16 and context stride of 4 would process 24 frames as: + [0, 15], [4, 19], [8, 23] (0-based indexing) + weighting_scheme (`str`, defaults to `pyramid`): + Weighting scheme for averaging latents after accumulation in FreeNoise blocks. The following weighting + schemes are supported currently: + - "flat" + Performs weighting averaging with a flat weight pattern: [1, 1, 1, 1, 1]. + - "pyramid" + Performs weighted averaging with a pyramid like weight pattern: [1, 2, 3, 2, 1]. + - "delayed_reverse_sawtooth" + Performs weighted averaging with low weights for earlier frames and high-to-low weights for + later frames: [0.01, 0.01, 3, 2, 1]. + noise_type (`str`, defaults to "shuffle_context"): + Must be one of ["shuffle_context", "repeat_context", "random"]. + - "shuffle_context" + Shuffles a fixed batch of `context_length` latents to create a final latent of size + `num_frames`. This is usually the best setting for most generation scenarious. However, there + might be visible repetition noticeable in the kinds of motion/animation generated. + - "repeated_context" + Repeats a fixed batch of `context_length` latents to create a final latent of size + `num_frames`. + - "random" + The final latents are random without any repetition. + """ + + allowed_weighting_scheme = ["flat", "pyramid", "delayed_reverse_sawtooth"] + allowed_noise_type = ["shuffle_context", "repeat_context", "random"] + + if context_length > self.motion_adapter.config.motion_max_seq_length: + logger.warning( + f"You have set {context_length=} which is greater than {self.motion_adapter.config.motion_max_seq_length=}. This can lead to bad generation results." + ) + if weighting_scheme not in allowed_weighting_scheme: + raise ValueError( + f"The parameter `weighting_scheme` must be one of {allowed_weighting_scheme}, but got {weighting_scheme=}" + ) + if noise_type not in allowed_noise_type: + raise ValueError(f"The parameter `noise_type` must be one of {allowed_noise_type}, but got {noise_type=}") + + self._free_noise_context_length = context_length or self.motion_adapter.config.motion_max_seq_length + self._free_noise_context_stride = context_stride + self._free_noise_weighting_scheme = weighting_scheme + self._free_noise_noise_type = noise_type + self._free_noise_prompt_interpolation_callback = prompt_interpolation_callback or self._lerp + + if hasattr(self.unet.mid_block, "motion_modules"): + blocks = [*self.unet.down_blocks, self.unet.mid_block, *self.unet.up_blocks] + else: + blocks = [*self.unet.down_blocks, *self.unet.up_blocks] + + for block in blocks: + self._enable_free_noise_in_block(block) + + def disable_free_noise(self) -> None: + r"""Disable the FreeNoise sampling mechanism.""" + self._free_noise_context_length = None + + if hasattr(self.unet.mid_block, "motion_modules"): + blocks = [*self.unet.down_blocks, self.unet.mid_block, *self.unet.up_blocks] + else: + blocks = [*self.unet.down_blocks, *self.unet.up_blocks] + + blocks = [*self.unet.down_blocks, self.unet.mid_block, *self.unet.up_blocks] + for block in blocks: + self._disable_free_noise_in_block(block) + + def _enable_split_inference_motion_modules_( + self, motion_modules: List[AnimateDiffTransformer3D], spatial_split_size: int + ) -> None: + for motion_module in motion_modules: + motion_module.proj_in = SplitInferenceModule(motion_module.proj_in, spatial_split_size, 0, ["input"]) + + for i in range(len(motion_module.transformer_blocks)): + motion_module.transformer_blocks[i] = SplitInferenceModule( + motion_module.transformer_blocks[i], + spatial_split_size, + 0, + ["hidden_states", "encoder_hidden_states"], + ) + + motion_module.proj_out = SplitInferenceModule(motion_module.proj_out, spatial_split_size, 0, ["input"]) + + def _enable_split_inference_attentions_( + self, attentions: List[Transformer2DModel], temporal_split_size: int + ) -> None: + for i in range(len(attentions)): + attentions[i] = SplitInferenceModule( + attentions[i], temporal_split_size, 0, ["hidden_states", "encoder_hidden_states"] + ) + + def _enable_split_inference_resnets_(self, resnets: List[ResnetBlock2D], temporal_split_size: int) -> None: + for i in range(len(resnets)): + resnets[i] = SplitInferenceModule(resnets[i], temporal_split_size, 0, ["input_tensor", "temb"]) + + def _enable_split_inference_samplers_( + self, samplers: Union[List[Downsample2D], List[Upsample2D]], temporal_split_size: int + ) -> None: + for i in range(len(samplers)): + samplers[i] = SplitInferenceModule(samplers[i], temporal_split_size, 0, ["hidden_states"]) + + def enable_free_noise_split_inference(self, spatial_split_size: int = 256, temporal_split_size: int = 16) -> None: + r""" + Enable FreeNoise memory optimizations by utilizing + [`~diffusers.pipelines.free_noise_utils.SplitInferenceModule`] across different intermediate modeling blocks. + + Args: + spatial_split_size (`int`, defaults to `256`): + The split size across spatial dimensions for internal blocks. This is used in facilitating split + inference across the effective batch dimension (`[B x H x W, F, C]`) of intermediate tensors in motion + modeling blocks. + temporal_split_size (`int`, defaults to `16`): + The split size across temporal dimensions for internal blocks. This is used in facilitating split + inference across the effective batch dimension (`[B x F, H x W, C]`) of intermediate tensors in spatial + attention, resnets, downsampling and upsampling blocks. + """ + # TODO(aryan): Discuss on what's the best way to provide more control to users + blocks = [*self.unet.down_blocks, self.unet.mid_block, *self.unet.up_blocks] + for block in blocks: + if getattr(block, "motion_modules", None) is not None: + self._enable_split_inference_motion_modules_(block.motion_modules, spatial_split_size) + if getattr(block, "attentions", None) is not None: + self._enable_split_inference_attentions_(block.attentions, temporal_split_size) + if getattr(block, "resnets", None) is not None: + self._enable_split_inference_resnets_(block.resnets, temporal_split_size) + if getattr(block, "downsamplers", None) is not None: + self._enable_split_inference_samplers_(block.downsamplers, temporal_split_size) + if getattr(block, "upsamplers", None) is not None: + self._enable_split_inference_samplers_(block.upsamplers, temporal_split_size) + + @property + def free_noise_enabled(self): + return hasattr(self, "_free_noise_context_length") and self._free_noise_context_length is not None diff --git a/diffusers3/pipelines/hunyuandit/__init__.py b/diffusers3/pipelines/hunyuandit/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8337399106f0585d15fa0a35f607baa2c04b203b --- /dev/null +++ b/diffusers3/pipelines/hunyuandit/__init__.py @@ -0,0 +1,48 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["pipeline_hunyuandit"] = ["HunyuanDiTPipeline"] + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * + else: + from .pipeline_hunyuandit import HunyuanDiTPipeline + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/diffusers3/pipelines/hunyuandit/pipeline_hunyuandit.py b/diffusers3/pipelines/hunyuandit/pipeline_hunyuandit.py new file mode 100644 index 0000000000000000000000000000000000000000..86089abc07b4529a93ca48a5434f78ed26950539 --- /dev/null +++ b/diffusers3/pipelines/hunyuandit/pipeline_hunyuandit.py @@ -0,0 +1,900 @@ +# Copyright 2024 HunyuanDiT Authors and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Callable, Dict, List, Optional, Tuple, Union + +import numpy as np +import torch +from transformers import BertModel, BertTokenizer, CLIPImageProcessor, MT5Tokenizer, T5EncoderModel + +from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput + +from ...callbacks import MultiPipelineCallbacks, PipelineCallback +from ...image_processor import VaeImageProcessor +from ...models import AutoencoderKL, HunyuanDiT2DModel +from ...models.embeddings import get_2d_rotary_pos_embed +from ...pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker +from ...schedulers import DDPMScheduler +from ...utils import ( + is_torch_xla_available, + logging, + replace_example_docstring, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline + + +if is_torch_xla_available(): + import torch_xla.core.xla_model as xm + + XLA_AVAILABLE = True +else: + XLA_AVAILABLE = False + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import HunyuanDiTPipeline + + >>> pipe = HunyuanDiTPipeline.from_pretrained( + ... "Tencent-Hunyuan/HunyuanDiT-Diffusers", torch_dtype=torch.float16 + ... ) + >>> pipe.to("cuda") + + >>> # You may also use English prompt as HunyuanDiT supports both English and Chinese + >>> # prompt = "An astronaut riding a horse" + >>> prompt = "ไธ€ไธชๅฎ‡่ˆชๅ‘˜ๅœจ้ช‘้ฉฌ" + >>> image = pipe(prompt).images[0] + ``` +""" + +STANDARD_RATIO = np.array( + [ + 1.0, # 1:1 + 4.0 / 3.0, # 4:3 + 3.0 / 4.0, # 3:4 + 16.0 / 9.0, # 16:9 + 9.0 / 16.0, # 9:16 + ] +) +STANDARD_SHAPE = [ + [(1024, 1024), (1280, 1280)], # 1:1 + [(1024, 768), (1152, 864), (1280, 960)], # 4:3 + [(768, 1024), (864, 1152), (960, 1280)], # 3:4 + [(1280, 768)], # 16:9 + [(768, 1280)], # 9:16 +] +STANDARD_AREA = [np.array([w * h for w, h in shapes]) for shapes in STANDARD_SHAPE] +SUPPORTED_SHAPE = [ + (1024, 1024), + (1280, 1280), # 1:1 + (1024, 768), + (1152, 864), + (1280, 960), # 4:3 + (768, 1024), + (864, 1152), + (960, 1280), # 3:4 + (1280, 768), # 16:9 + (768, 1280), # 9:16 +] + + +def map_to_standard_shapes(target_width, target_height): + target_ratio = target_width / target_height + closest_ratio_idx = np.argmin(np.abs(STANDARD_RATIO - target_ratio)) + closest_area_idx = np.argmin(np.abs(STANDARD_AREA[closest_ratio_idx] - target_width * target_height)) + width, height = STANDARD_SHAPE[closest_ratio_idx][closest_area_idx] + return width, height + + +def get_resize_crop_region_for_grid(src, tgt_size): + th = tw = tgt_size + h, w = src + + r = h / w + + # resize + if r > 1: + resize_height = th + resize_width = int(round(th / h * w)) + else: + resize_width = tw + resize_height = int(round(tw / w * h)) + + crop_top = int(round((th - resize_height) / 2.0)) + crop_left = int(round((tw - resize_width) / 2.0)) + + return (crop_top, crop_left), (crop_top + resize_height, crop_left + resize_width) + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg +def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): + """ + Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4 + """ + std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) + std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) + # rescale the results from guidance (fixes overexposure) + noise_pred_rescaled = noise_cfg * (std_text / std_cfg) + # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images + noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg + return noise_cfg + + +class HunyuanDiTPipeline(DiffusionPipeline): + r""" + Pipeline for English/Chinese-to-image generation using HunyuanDiT. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + HunyuanDiT uses two text encoders: [mT5](https://huggingface.co/google/mt5-base) and [bilingual CLIP](fine-tuned by + ourselves) + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. We use + `sdxl-vae-fp16-fix`. + text_encoder (Optional[`~transformers.BertModel`, `~transformers.CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + HunyuanDiT uses a fine-tuned [bilingual CLIP]. + tokenizer (Optional[`~transformers.BertTokenizer`, `~transformers.CLIPTokenizer`]): + A `BertTokenizer` or `CLIPTokenizer` to tokenize text. + transformer ([`HunyuanDiT2DModel`]): + The HunyuanDiT model designed by Tencent Hunyuan. + text_encoder_2 (`T5EncoderModel`): + The mT5 embedder. Specifically, it is 't5-v1_1-xxl'. + tokenizer_2 (`MT5Tokenizer`): + The tokenizer for the mT5 embedder. + scheduler ([`DDPMScheduler`]): + A scheduler to be used in combination with HunyuanDiT to denoise the encoded image latents. + """ + + model_cpu_offload_seq = "text_encoder->text_encoder_2->transformer->vae" + _optional_components = [ + "safety_checker", + "feature_extractor", + "text_encoder_2", + "tokenizer_2", + "text_encoder", + "tokenizer", + ] + _exclude_from_cpu_offload = ["safety_checker"] + _callback_tensor_inputs = [ + "latents", + "prompt_embeds", + "negative_prompt_embeds", + "prompt_embeds_2", + "negative_prompt_embeds_2", + ] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: BertModel, + tokenizer: BertTokenizer, + transformer: HunyuanDiT2DModel, + scheduler: DDPMScheduler, + safety_checker: StableDiffusionSafetyChecker, + feature_extractor: CLIPImageProcessor, + requires_safety_checker: bool = True, + text_encoder_2=T5EncoderModel, + tokenizer_2=MT5Tokenizer, + ): + super().__init__() + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + tokenizer_2=tokenizer_2, + transformer=transformer, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + text_encoder_2=text_encoder_2, + ) + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + self.vae_scale_factor = ( + 2 ** (len(self.vae.config.block_out_channels) - 1) if hasattr(self, "vae") and self.vae is not None else 8 + ) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + self.register_to_config(requires_safety_checker=requires_safety_checker) + self.default_sample_size = ( + self.transformer.config.sample_size + if hasattr(self, "transformer") and self.transformer is not None + else 128 + ) + + def encode_prompt( + self, + prompt: str, + device: torch.device = None, + dtype: torch.dtype = None, + num_images_per_prompt: int = 1, + do_classifier_free_guidance: bool = True, + negative_prompt: Optional[str] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + prompt_attention_mask: Optional[torch.Tensor] = None, + negative_prompt_attention_mask: Optional[torch.Tensor] = None, + max_sequence_length: Optional[int] = None, + text_encoder_index: int = 0, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + dtype (`torch.dtype`): + torch dtype + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + prompt_attention_mask (`torch.Tensor`, *optional*): + Attention mask for the prompt. Required when `prompt_embeds` is passed directly. + negative_prompt_attention_mask (`torch.Tensor`, *optional*): + Attention mask for the negative prompt. Required when `negative_prompt_embeds` is passed directly. + max_sequence_length (`int`, *optional*): maximum sequence length to use for the prompt. + text_encoder_index (`int`, *optional*): + Index of the text encoder to use. `0` for clip and `1` for T5. + """ + if dtype is None: + if self.text_encoder_2 is not None: + dtype = self.text_encoder_2.dtype + elif self.transformer is not None: + dtype = self.transformer.dtype + else: + dtype = None + + if device is None: + device = self._execution_device + + tokenizers = [self.tokenizer, self.tokenizer_2] + text_encoders = [self.text_encoder, self.text_encoder_2] + + tokenizer = tokenizers[text_encoder_index] + text_encoder = text_encoders[text_encoder_index] + + if max_sequence_length is None: + if text_encoder_index == 0: + max_length = 77 + if text_encoder_index == 1: + max_length = 256 + else: + max_length = max_sequence_length + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + text_inputs = tokenizer( + prompt, + padding="max_length", + max_length=max_length, + truncation=True, + return_attention_mask=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {tokenizer.model_max_length} tokens: {removed_text}" + ) + + prompt_attention_mask = text_inputs.attention_mask.to(device) + prompt_embeds = text_encoder( + text_input_ids.to(device), + attention_mask=prompt_attention_mask, + ) + prompt_embeds = prompt_embeds[0] + prompt_attention_mask = prompt_attention_mask.repeat(num_images_per_prompt, 1) + + prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + max_length = prompt_embeds.shape[1] + uncond_input = tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + negative_prompt_attention_mask = uncond_input.attention_mask.to(device) + negative_prompt_embeds = text_encoder( + uncond_input.input_ids.to(device), + attention_mask=negative_prompt_attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + negative_prompt_attention_mask = negative_prompt_attention_mask.repeat(num_images_per_prompt, 1) + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + return prompt_embeds, negative_prompt_embeds, prompt_attention_mask, negative_prompt_attention_mask + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is None: + has_nsfw_concept = None + else: + if torch.is_tensor(image): + feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") + else: + feature_extractor_input = self.image_processor.numpy_to_pil(image) + safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) + image, has_nsfw_concept = self.safety_checker( + images=image, clip_input=safety_checker_input.pixel_values.to(dtype) + ) + return image, has_nsfw_concept + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + height, + width, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + prompt_attention_mask=None, + negative_prompt_attention_mask=None, + prompt_embeds_2=None, + negative_prompt_embeds_2=None, + prompt_attention_mask_2=None, + negative_prompt_attention_mask_2=None, + callback_on_step_end_tensor_inputs=None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is None and prompt_embeds_2 is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds_2`. Cannot leave both `prompt` and `prompt_embeds_2` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if prompt_embeds is not None and prompt_attention_mask is None: + raise ValueError("Must provide `prompt_attention_mask` when specifying `prompt_embeds`.") + + if prompt_embeds_2 is not None and prompt_attention_mask_2 is None: + raise ValueError("Must provide `prompt_attention_mask_2` when specifying `prompt_embeds_2`.") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if negative_prompt_embeds is not None and negative_prompt_attention_mask is None: + raise ValueError("Must provide `negative_prompt_attention_mask` when specifying `negative_prompt_embeds`.") + + if negative_prompt_embeds_2 is not None and negative_prompt_attention_mask_2 is None: + raise ValueError( + "Must provide `negative_prompt_attention_mask_2` when specifying `negative_prompt_embeds_2`." + ) + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + if prompt_embeds_2 is not None and negative_prompt_embeds_2 is not None: + if prompt_embeds_2.shape != negative_prompt_embeds_2.shape: + raise ValueError( + "`prompt_embeds_2` and `negative_prompt_embeds_2` must have the same shape when passed directly, but" + f" got: `prompt_embeds_2` {prompt_embeds_2.shape} != `negative_prompt_embeds_2`" + f" {negative_prompt_embeds_2.shape}." + ) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = ( + batch_size, + num_channels_latents, + int(height) // self.vae_scale_factor, + int(width) // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def guidance_rescale(self): + return self._guidance_rescale + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 + + @property + def num_timesteps(self): + return self._num_timesteps + + @property + def interrupt(self): + return self._interrupt + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: Optional[int] = 50, + guidance_scale: Optional[float] = 5.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: Optional[float] = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + prompt_embeds_2: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds_2: Optional[torch.Tensor] = None, + prompt_attention_mask: Optional[torch.Tensor] = None, + prompt_attention_mask_2: Optional[torch.Tensor] = None, + negative_prompt_attention_mask: Optional[torch.Tensor] = None, + negative_prompt_attention_mask_2: Optional[torch.Tensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback_on_step_end: Optional[ + Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks] + ] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + guidance_rescale: float = 0.0, + original_size: Optional[Tuple[int, int]] = (1024, 1024), + target_size: Optional[Tuple[int, int]] = None, + crops_coords_top_left: Tuple[int, int] = (0, 0), + use_resolution_binning: bool = True, + ): + r""" + The call function to the pipeline for generation with HunyuanDiT. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + height (`int`): + The height in pixels of the generated image. + width (`int`): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. This parameter is modulated by `strength`. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + prompt_embeds_2 (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + negative_prompt_embeds_2 (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + prompt_attention_mask (`torch.Tensor`, *optional*): + Attention mask for the prompt. Required when `prompt_embeds` is passed directly. + prompt_attention_mask_2 (`torch.Tensor`, *optional*): + Attention mask for the prompt. Required when `prompt_embeds_2` is passed directly. + negative_prompt_attention_mask (`torch.Tensor`, *optional*): + Attention mask for the negative prompt. Required when `negative_prompt_embeds` is passed directly. + negative_prompt_attention_mask_2 (`torch.Tensor`, *optional*): + Attention mask for the negative prompt. Required when `negative_prompt_embeds_2` is passed directly. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback_on_step_end (`Callable[[int, int, Dict], None]`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*): + A callback function or a list of callback functions to be called at the end of each denoising step. + callback_on_step_end_tensor_inputs (`List[str]`, *optional*): + A list of tensor inputs that should be passed to the callback function. If not defined, all tensor + inputs will be passed. + guidance_rescale (`float`, *optional*, defaults to 0.0): + Rescale the noise_cfg according to `guidance_rescale`. Based on findings of [Common Diffusion Noise + Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4 + original_size (`Tuple[int, int]`, *optional*, defaults to `(1024, 1024)`): + The original size of the image. Used to calculate the time ids. + target_size (`Tuple[int, int]`, *optional*): + The target size of the image. Used to calculate the time ids. + crops_coords_top_left (`Tuple[int, int]`, *optional*, defaults to `(0, 0)`): + The top left coordinates of the crop. Used to calculate the time ids. + use_resolution_binning (`bool`, *optional*, defaults to `True`): + Whether to use resolution binning or not. If `True`, the input resolution will be mapped to the closest + standard resolution. Supported resolutions are 1024x1024, 1280x1280, 1024x768, 1152x864, 1280x960, + 768x1024, 864x1152, 960x1280, 1280x768, and 768x1280. It is recommended to set this to `True`. + + Examples: + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images and the + second element is a list of `bool`s indicating whether the corresponding generated image contains + "not-safe-for-work" (nsfw) content. + """ + + if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): + callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs + + # 0. default height and width + height = height or self.default_sample_size * self.vae_scale_factor + width = width or self.default_sample_size * self.vae_scale_factor + height = int((height // 16) * 16) + width = int((width // 16) * 16) + + if use_resolution_binning and (height, width) not in SUPPORTED_SHAPE: + width, height = map_to_standard_shapes(width, height) + height = int(height) + width = int(width) + logger.warning(f"Reshaped to (height, width)=({height}, {width}), Supported shapes are {SUPPORTED_SHAPE}") + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + height, + width, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + prompt_attention_mask, + negative_prompt_attention_mask, + prompt_embeds_2, + negative_prompt_embeds_2, + prompt_attention_mask_2, + negative_prompt_attention_mask_2, + callback_on_step_end_tensor_inputs, + ) + self._guidance_scale = guidance_scale + self._guidance_rescale = guidance_rescale + self._interrupt = False + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + # 3. Encode input prompt + + ( + prompt_embeds, + negative_prompt_embeds, + prompt_attention_mask, + negative_prompt_attention_mask, + ) = self.encode_prompt( + prompt=prompt, + device=device, + dtype=self.transformer.dtype, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=self.do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + prompt_attention_mask=prompt_attention_mask, + negative_prompt_attention_mask=negative_prompt_attention_mask, + max_sequence_length=77, + text_encoder_index=0, + ) + ( + prompt_embeds_2, + negative_prompt_embeds_2, + prompt_attention_mask_2, + negative_prompt_attention_mask_2, + ) = self.encode_prompt( + prompt=prompt, + device=device, + dtype=self.transformer.dtype, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=self.do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds_2, + negative_prompt_embeds=negative_prompt_embeds_2, + prompt_attention_mask=prompt_attention_mask_2, + negative_prompt_attention_mask=negative_prompt_attention_mask_2, + max_sequence_length=256, + text_encoder_index=1, + ) + + # 4. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # 5. Prepare latent variables + num_channels_latents = self.transformer.config.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7 create image_rotary_emb, style embedding & time ids + grid_height = height // 8 // self.transformer.config.patch_size + grid_width = width // 8 // self.transformer.config.patch_size + base_size = 512 // 8 // self.transformer.config.patch_size + grid_crops_coords = get_resize_crop_region_for_grid((grid_height, grid_width), base_size) + image_rotary_emb = get_2d_rotary_pos_embed( + self.transformer.inner_dim // self.transformer.num_heads, grid_crops_coords, (grid_height, grid_width) + ) + + style = torch.tensor([0], device=device) + + target_size = target_size or (height, width) + add_time_ids = list(original_size + target_size + crops_coords_top_left) + add_time_ids = torch.tensor([add_time_ids], dtype=prompt_embeds.dtype) + + if self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + prompt_attention_mask = torch.cat([negative_prompt_attention_mask, prompt_attention_mask]) + prompt_embeds_2 = torch.cat([negative_prompt_embeds_2, prompt_embeds_2]) + prompt_attention_mask_2 = torch.cat([negative_prompt_attention_mask_2, prompt_attention_mask_2]) + add_time_ids = torch.cat([add_time_ids] * 2, dim=0) + style = torch.cat([style] * 2, dim=0) + + prompt_embeds = prompt_embeds.to(device=device) + prompt_attention_mask = prompt_attention_mask.to(device=device) + prompt_embeds_2 = prompt_embeds_2.to(device=device) + prompt_attention_mask_2 = prompt_attention_mask_2.to(device=device) + add_time_ids = add_time_ids.to(dtype=prompt_embeds.dtype, device=device).repeat( + batch_size * num_images_per_prompt, 1 + ) + style = style.to(device=device).repeat(batch_size * num_images_per_prompt) + + # 8. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + self._num_timesteps = len(timesteps) + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + if self.interrupt: + continue + + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # expand scalar t to 1-D tensor to match the 1st dim of latent_model_input + t_expand = torch.tensor([t] * latent_model_input.shape[0], device=device).to( + dtype=latent_model_input.dtype + ) + + # predict the noise residual + noise_pred = self.transformer( + latent_model_input, + t_expand, + encoder_hidden_states=prompt_embeds, + text_embedding_mask=prompt_attention_mask, + encoder_hidden_states_t5=prompt_embeds_2, + text_embedding_mask_t5=prompt_attention_mask_2, + image_meta_size=add_time_ids, + style=style, + image_rotary_emb=image_rotary_emb, + return_dict=False, + )[0] + + noise_pred, _ = noise_pred.chunk(2, dim=1) + + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + if self.do_classifier_free_guidance and guidance_rescale > 0.0: + # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf + noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + prompt_embeds_2 = callback_outputs.pop("prompt_embeds_2", prompt_embeds_2) + negative_prompt_embeds_2 = callback_outputs.pop( + "negative_prompt_embeds_2", negative_prompt_embeds_2 + ) + + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + + if XLA_AVAILABLE: + xm.mark_step() + + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) + else: + image = latents + has_nsfw_concept = None + + if has_nsfw_concept is None: + do_denormalize = [True] * image.shape[0] + else: + do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] + + image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image, has_nsfw_concept) + + return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) diff --git a/diffusers3/pipelines/i2vgen_xl/__init__.py b/diffusers3/pipelines/i2vgen_xl/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b24a7e4cee7fb843b9424469a05f511adfa758de --- /dev/null +++ b/diffusers3/pipelines/i2vgen_xl/__init__.py @@ -0,0 +1,46 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["pipeline_i2vgen_xl"] = ["I2VGenXLPipeline"] + + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 + else: + from .pipeline_i2vgen_xl import I2VGenXLPipeline + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/diffusers3/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py b/diffusers3/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py new file mode 100644 index 0000000000000000000000000000000000000000..f528b60e6ed79287bbc4bed658ab637c852c781f --- /dev/null +++ b/diffusers3/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py @@ -0,0 +1,783 @@ +# Copyright 2024 Alibaba DAMO-VILAB and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from dataclasses import dataclass +from typing import Any, Dict, List, Optional, Tuple, Union + +import numpy as np +import PIL +import torch +from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection + +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...models import AutoencoderKL +from ...models.unets.unet_i2vgen_xl import I2VGenXLUNet +from ...schedulers import DDIMScheduler +from ...utils import ( + BaseOutput, + logging, + replace_example_docstring, +) +from ...utils.torch_utils import randn_tensor +from ...video_processor import VideoProcessor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import I2VGenXLPipeline + >>> from diffusers.utils import export_to_gif, load_image + + >>> pipeline = I2VGenXLPipeline.from_pretrained( + ... "ali-vilab/i2vgen-xl", torch_dtype=torch.float16, variant="fp16" + ... ) + >>> pipeline.enable_model_cpu_offload() + + >>> image_url = ( + ... "https://huggingface.co/datasets/diffusers/docs-images/resolve/main/i2vgen_xl_images/img_0009.png" + ... ) + >>> image = load_image(image_url).convert("RGB") + + >>> prompt = "Papers were floating in the air on a table in the library" + >>> negative_prompt = "Distorted, discontinuous, Ugly, blurry, low resolution, motionless, static, disfigured, disconnected limbs, Ugly faces, incomplete arms" + >>> generator = torch.manual_seed(8888) + + >>> frames = pipeline( + ... prompt=prompt, + ... image=image, + ... num_inference_steps=50, + ... negative_prompt=negative_prompt, + ... guidance_scale=9.0, + ... generator=generator, + ... ).frames[0] + >>> video_path = export_to_gif(frames, "i2v.gif") + ``` +""" + + +@dataclass +class I2VGenXLPipelineOutput(BaseOutput): + r""" + Output class for image-to-video pipeline. + + Args: + frames (`torch.Tensor`, `np.ndarray`, or List[List[PIL.Image.Image]]): + List of video outputs - It can be a nested list of length `batch_size,` with each sub-list containing + denoised + PIL image sequences of length `num_frames.` It can also be a NumPy array or Torch tensor of shape + `(batch_size, num_frames, channels, height, width)` + """ + + frames: Union[torch.Tensor, np.ndarray, List[List[PIL.Image.Image]]] + + +class I2VGenXLPipeline( + DiffusionPipeline, + StableDiffusionMixin, +): + r""" + Pipeline for image-to-video generation as proposed in [I2VGenXL](https://i2vgen-xl.github.io/). + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer (`CLIPTokenizer`): + A [`~transformers.CLIPTokenizer`] to tokenize text. + unet ([`I2VGenXLUNet`]): + A [`I2VGenXLUNet`] to denoise the encoded video latents. + scheduler ([`DDIMScheduler`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. + """ + + model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae" + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + image_encoder: CLIPVisionModelWithProjection, + feature_extractor: CLIPImageProcessor, + unet: I2VGenXLUNet, + scheduler: DDIMScheduler, + ): + super().__init__() + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + image_encoder=image_encoder, + feature_extractor=feature_extractor, + unet=unet, + scheduler=scheduler, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + # `do_resize=False` as we do custom resizing. + self.video_processor = VideoProcessor(vae_scale_factor=self.vae_scale_factor, do_resize=False) + + @property + def guidance_scale(self): + return self._guidance_scale + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 + + def encode_prompt( + self, + prompt, + device, + num_videos_per_prompt, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_videos_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_videos_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_videos_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if self.do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + # Apply clip_skip to negative prompt embeds + if clip_skip is None: + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + else: + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + negative_prompt_embeds = negative_prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + negative_prompt_embeds = self.text_encoder.text_model.final_layer_norm(negative_prompt_embeds) + + if self.do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_videos_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_videos_per_prompt, seq_len, -1) + + return prompt_embeds, negative_prompt_embeds + + def _encode_image(self, image, device, num_videos_per_prompt): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.video_processor.pil_to_numpy(image) + image = self.video_processor.numpy_to_pt(image) + + # Normalize the image with CLIP training stats. + image = self.feature_extractor( + images=image, + do_normalize=True, + do_center_crop=False, + do_resize=False, + do_rescale=False, + return_tensors="pt", + ).pixel_values + + image = image.to(device=device, dtype=dtype) + image_embeddings = self.image_encoder(image).image_embeds + image_embeddings = image_embeddings.unsqueeze(1) + + # duplicate image embeddings for each generation per prompt, using mps friendly method + bs_embed, seq_len, _ = image_embeddings.shape + image_embeddings = image_embeddings.repeat(1, num_videos_per_prompt, 1) + image_embeddings = image_embeddings.view(bs_embed * num_videos_per_prompt, seq_len, -1) + + if self.do_classifier_free_guidance: + negative_image_embeddings = torch.zeros_like(image_embeddings) + image_embeddings = torch.cat([negative_image_embeddings, image_embeddings]) + + return image_embeddings + + def decode_latents(self, latents, decode_chunk_size=None): + latents = 1 / self.vae.config.scaling_factor * latents + + batch_size, channels, num_frames, height, width = latents.shape + latents = latents.permute(0, 2, 1, 3, 4).reshape(batch_size * num_frames, channels, height, width) + + if decode_chunk_size is not None: + frames = [] + for i in range(0, latents.shape[0], decode_chunk_size): + frame = self.vae.decode(latents[i : i + decode_chunk_size]).sample + frames.append(frame) + image = torch.cat(frames, dim=0) + else: + image = self.vae.decode(latents).sample + + decode_shape = (batch_size, num_frames, -1) + image.shape[2:] + video = image[None, :].reshape(decode_shape).permute(0, 2, 1, 3, 4) + + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + video = video.float() + return video + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + image, + height, + width, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if ( + not isinstance(image, torch.Tensor) + and not isinstance(image, PIL.Image.Image) + and not isinstance(image, list) + ): + raise ValueError( + "`image` has to be of type `torch.Tensor` or `PIL.Image.Image` or `List[PIL.Image.Image]` but is" + f" {type(image)}" + ) + + def prepare_image_latents( + self, + image, + device, + num_frames, + num_videos_per_prompt, + ): + image = image.to(device=device) + image_latents = self.vae.encode(image).latent_dist.sample() + image_latents = image_latents * self.vae.config.scaling_factor + + # Add frames dimension to image latents + image_latents = image_latents.unsqueeze(2) + + # Append a position mask for each subsequent frame + # after the intial image latent frame + frame_position_mask = [] + for frame_idx in range(num_frames - 1): + scale = (frame_idx + 1) / (num_frames - 1) + frame_position_mask.append(torch.ones_like(image_latents[:, :, :1]) * scale) + if frame_position_mask: + frame_position_mask = torch.cat(frame_position_mask, dim=2) + image_latents = torch.cat([image_latents, frame_position_mask], dim=2) + + # duplicate image_latents for each generation per prompt, using mps friendly method + image_latents = image_latents.repeat(num_videos_per_prompt, 1, 1, 1, 1) + + if self.do_classifier_free_guidance: + image_latents = torch.cat([image_latents] * 2) + + return image_latents + + # Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_synth.TextToVideoSDPipeline.prepare_latents + def prepare_latents( + self, batch_size, num_channels_latents, num_frames, height, width, dtype, device, generator, latents=None + ): + shape = ( + batch_size, + num_channels_latents, + num_frames, + height // self.vae_scale_factor, + width // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + image: PipelineImageInput = None, + height: Optional[int] = 704, + width: Optional[int] = 1280, + target_fps: Optional[int] = 16, + num_frames: int = 16, + num_inference_steps: int = 50, + guidance_scale: float = 9.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + eta: float = 0.0, + num_videos_per_prompt: Optional[int] = 1, + decode_chunk_size: Optional[int] = 1, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + clip_skip: Optional[int] = 1, + ): + r""" + The call function to the pipeline for image-to-video generation with [`I2VGenXLPipeline`]. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + image (`PIL.Image.Image` or `List[PIL.Image.Image]` or `torch.Tensor`): + Image or images to guide image generation. If you provide a tensor, it needs to be compatible with + [`CLIPImageProcessor`](https://huggingface.co/lambdalabs/sd-image-variations-diffusers/blob/main/feature_extractor/preprocessor_config.json). + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + target_fps (`int`, *optional*): + Frames per second. The rate at which the generated images shall be exported to a video after + generation. This is also used as a "micro-condition" while generation. + num_frames (`int`, *optional*): + The number of video frames to generate. + num_inference_steps (`int`, *optional*): + The number of denoising steps. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + eta (`float`, *optional*): + Corresponds to parameter eta (ฮท) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + num_videos_per_prompt (`int`, *optional*): + The number of images to generate per prompt. + decode_chunk_size (`int`, *optional*): + The number of frames to decode at a time. The higher the chunk size, the higher the temporal + consistency between frames, but also the higher the memory consumption. By default, the decoder will + decode all frames at once for maximal quality. Reduce `decode_chunk_size` to reduce memory usage. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + + Examples: + + Returns: + [`pipelines.i2vgen_xl.pipeline_i2vgen_xl.I2VGenXLPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`pipelines.i2vgen_xl.pipeline_i2vgen_xl.I2VGenXLPipelineOutput`] is + returned, otherwise a `tuple` is returned where the first element is a list with the generated frames. + """ + # 0. Default height and width to unet + height = height or self.unet.config.sample_size * self.vae_scale_factor + width = width or self.unet.config.sample_size * self.vae_scale_factor + + # 1. Check inputs. Raise error if not correct + self.check_inputs(prompt, image, height, width, negative_prompt, prompt_embeds, negative_prompt_embeds) + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + self._guidance_scale = guidance_scale + + # 3.1 Encode input text prompt + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + device, + num_videos_per_prompt, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + clip_skip=clip_skip, + ) + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + if self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + # 3.2 Encode image prompt + # 3.2.1 Image encodings. + # https://github.com/ali-vilab/i2vgen-xl/blob/2539c9262ff8a2a22fa9daecbfd13f0a2dbc32d0/tools/inferences/inference_i2vgen_entrance.py#L114 + cropped_image = _center_crop_wide(image, (width, width)) + cropped_image = _resize_bilinear( + cropped_image, (self.feature_extractor.crop_size["width"], self.feature_extractor.crop_size["height"]) + ) + image_embeddings = self._encode_image(cropped_image, device, num_videos_per_prompt) + + # 3.2.2 Image latents. + resized_image = _center_crop_wide(image, (width, height)) + image = self.video_processor.preprocess(resized_image).to(device=device, dtype=image_embeddings.dtype) + image_latents = self.prepare_image_latents( + image, + device=device, + num_frames=num_frames, + num_videos_per_prompt=num_videos_per_prompt, + ) + + # 3.3 Prepare additional conditions for the UNet. + if self.do_classifier_free_guidance: + fps_tensor = torch.tensor([target_fps, target_fps]).to(device) + else: + fps_tensor = torch.tensor([target_fps]).to(device) + fps_tensor = fps_tensor.repeat(batch_size * num_videos_per_prompt, 1).ravel() + + # 4. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # 5. Prepare latent variables + num_channels_latents = self.unet.config.in_channels + latents = self.prepare_latents( + batch_size * num_videos_per_prompt, + num_channels_latents, + num_frames, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + fps=fps_tensor, + image_latents=image_latents, + image_embeddings=image_embeddings, + cross_attention_kwargs=cross_attention_kwargs, + return_dict=False, + )[0] + + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # reshape latents + batch_size, channel, frames, width, height = latents.shape + latents = latents.permute(0, 2, 1, 3, 4).reshape(batch_size * frames, channel, width, height) + noise_pred = noise_pred.permute(0, 2, 1, 3, 4).reshape(batch_size * frames, channel, width, height) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample + + # reshape latents back + latents = latents[None, :].reshape(batch_size, frames, channel, width, height).permute(0, 2, 1, 3, 4) + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + + # 8. Post processing + if output_type == "latent": + video = latents + else: + video_tensor = self.decode_latents(latents, decode_chunk_size=decode_chunk_size) + video = self.video_processor.postprocess_video(video=video_tensor, output_type=output_type) + + # 9. Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (video,) + + return I2VGenXLPipelineOutput(frames=video) + + +# The following utilities are taken and adapted from +# https://github.com/ali-vilab/i2vgen-xl/blob/main/utils/transforms.py. + + +def _convert_pt_to_pil(image: Union[torch.Tensor, List[torch.Tensor]]): + if isinstance(image, list) and isinstance(image[0], torch.Tensor): + image = torch.cat(image, 0) + + if isinstance(image, torch.Tensor): + if image.ndim == 3: + image = image.unsqueeze(0) + + image_numpy = VaeImageProcessor.pt_to_numpy(image) + image_pil = VaeImageProcessor.numpy_to_pil(image_numpy) + image = image_pil + + return image + + +def _resize_bilinear( + image: Union[torch.Tensor, List[torch.Tensor], PIL.Image.Image, List[PIL.Image.Image]], resolution: Tuple[int, int] +): + # First convert the images to PIL in case they are float tensors (only relevant for tests now). + image = _convert_pt_to_pil(image) + + if isinstance(image, list): + image = [u.resize(resolution, PIL.Image.BILINEAR) for u in image] + else: + image = image.resize(resolution, PIL.Image.BILINEAR) + return image + + +def _center_crop_wide( + image: Union[torch.Tensor, List[torch.Tensor], PIL.Image.Image, List[PIL.Image.Image]], resolution: Tuple[int, int] +): + # First convert the images to PIL in case they are float tensors (only relevant for tests now). + image = _convert_pt_to_pil(image) + + if isinstance(image, list): + scale = min(image[0].size[0] / resolution[0], image[0].size[1] / resolution[1]) + image = [u.resize((round(u.width // scale), round(u.height // scale)), resample=PIL.Image.BOX) for u in image] + + # center crop + x1 = (image[0].width - resolution[0]) // 2 + y1 = (image[0].height - resolution[1]) // 2 + image = [u.crop((x1, y1, x1 + resolution[0], y1 + resolution[1])) for u in image] + return image + else: + scale = min(image.size[0] / resolution[0], image.size[1] / resolution[1]) + image = image.resize((round(image.width // scale), round(image.height // scale)), resample=PIL.Image.BOX) + x1 = (image.width - resolution[0]) // 2 + y1 = (image.height - resolution[1]) // 2 + image = image.crop((x1, y1, x1 + resolution[0], y1 + resolution[1])) + return image diff --git a/diffusers3/pipelines/kandinsky/__init__.py b/diffusers3/pipelines/kandinsky/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..606f7b378a79489bbcbaa87db2040bd4196bbd8a --- /dev/null +++ b/diffusers3/pipelines/kandinsky/__init__.py @@ -0,0 +1,66 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["pipeline_kandinsky"] = ["KandinskyPipeline"] + _import_structure["pipeline_kandinsky_combined"] = [ + "KandinskyCombinedPipeline", + "KandinskyImg2ImgCombinedPipeline", + "KandinskyInpaintCombinedPipeline", + ] + _import_structure["pipeline_kandinsky_img2img"] = ["KandinskyImg2ImgPipeline"] + _import_structure["pipeline_kandinsky_inpaint"] = ["KandinskyInpaintPipeline"] + _import_structure["pipeline_kandinsky_prior"] = ["KandinskyPriorPipeline", "KandinskyPriorPipelineOutput"] + _import_structure["text_encoder"] = ["MultilingualCLIP"] + + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * + + else: + from .pipeline_kandinsky import KandinskyPipeline + from .pipeline_kandinsky_combined import ( + KandinskyCombinedPipeline, + KandinskyImg2ImgCombinedPipeline, + KandinskyInpaintCombinedPipeline, + ) + from .pipeline_kandinsky_img2img import KandinskyImg2ImgPipeline + from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline + from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput + from .text_encoder import MultilingualCLIP + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/diffusers3/pipelines/kandinsky/pipeline_kandinsky.py b/diffusers3/pipelines/kandinsky/pipeline_kandinsky.py new file mode 100644 index 0000000000000000000000000000000000000000..b2041e101564af12fc76b2a625b9ffafefaf9ffa --- /dev/null +++ b/diffusers3/pipelines/kandinsky/pipeline_kandinsky.py @@ -0,0 +1,407 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Callable, List, Optional, Union + +import torch +from transformers import ( + XLMRobertaTokenizer, +) + +from ...models import UNet2DConditionModel, VQModel +from ...schedulers import DDIMScheduler, DDPMScheduler +from ...utils import ( + logging, + replace_example_docstring, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput +from .text_encoder import MultilingualCLIP + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline + >>> import torch + + >>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior") + >>> pipe_prior.to("cuda") + + >>> prompt = "red cat, 4k photo" + >>> out = pipe_prior(prompt) + >>> image_emb = out.image_embeds + >>> negative_image_emb = out.negative_image_embeds + + >>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1") + >>> pipe.to("cuda") + + >>> image = pipe( + ... prompt, + ... image_embeds=image_emb, + ... negative_image_embeds=negative_image_emb, + ... height=768, + ... width=768, + ... num_inference_steps=100, + ... ).images + + >>> image[0].save("cat.png") + ``` +""" + + +def get_new_h_w(h, w, scale_factor=8): + new_h = h // scale_factor**2 + if h % scale_factor**2 != 0: + new_h += 1 + new_w = w // scale_factor**2 + if w % scale_factor**2 != 0: + new_w += 1 + return new_h * scale_factor, new_w * scale_factor + + +class KandinskyPipeline(DiffusionPipeline): + """ + Pipeline for text-to-image generation using Kandinsky + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + Args: + text_encoder ([`MultilingualCLIP`]): + Frozen text-encoder. + tokenizer ([`XLMRobertaTokenizer`]): + Tokenizer of class + scheduler (Union[`DDIMScheduler`,`DDPMScheduler`]): + A scheduler to be used in combination with `unet` to generate image latents. + unet ([`UNet2DConditionModel`]): + Conditional U-Net architecture to denoise the image embedding. + movq ([`VQModel`]): + MoVQ Decoder to generate the image from the latents. + """ + + model_cpu_offload_seq = "text_encoder->unet->movq" + + def __init__( + self, + text_encoder: MultilingualCLIP, + tokenizer: XLMRobertaTokenizer, + unet: UNet2DConditionModel, + scheduler: Union[DDIMScheduler, DDPMScheduler], + movq: VQModel, + ): + super().__init__() + + self.register_modules( + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + movq=movq, + ) + self.movq_scale_factor = 2 ** (len(self.movq.config.block_out_channels) - 1) + + # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents + def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + if latents.shape != shape: + raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") + latents = latents.to(device) + + latents = latents * scheduler.init_noise_sigma + return latents + + def _encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + ): + batch_size = len(prompt) if isinstance(prompt, list) else 1 + # get prompt text embeddings + text_inputs = self.tokenizer( + prompt, + padding="max_length", + truncation=True, + max_length=77, + return_attention_mask=True, + add_special_tokens=True, + return_tensors="pt", + ) + + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): + removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + text_input_ids = text_input_ids.to(device) + text_mask = text_inputs.attention_mask.to(device) + + prompt_embeds, text_encoder_hidden_states = self.text_encoder( + input_ids=text_input_ids, attention_mask=text_mask + ) + + prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0) + text_encoder_hidden_states = text_encoder_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + text_mask = text_mask.repeat_interleave(num_images_per_prompt, dim=0) + + if do_classifier_free_guidance: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=77, + truncation=True, + return_attention_mask=True, + add_special_tokens=True, + return_tensors="pt", + ) + uncond_text_input_ids = uncond_input.input_ids.to(device) + uncond_text_mask = uncond_input.attention_mask.to(device) + + negative_prompt_embeds, uncond_text_encoder_hidden_states = self.text_encoder( + input_ids=uncond_text_input_ids, attention_mask=uncond_text_mask + ) + + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + + seq_len = negative_prompt_embeds.shape[1] + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len) + + seq_len = uncond_text_encoder_hidden_states.shape[1] + uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.repeat(1, num_images_per_prompt, 1) + uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.view( + batch_size * num_images_per_prompt, seq_len, -1 + ) + uncond_text_mask = uncond_text_mask.repeat_interleave(num_images_per_prompt, dim=0) + + # done duplicates + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + text_encoder_hidden_states = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states]) + + text_mask = torch.cat([uncond_text_mask, text_mask]) + + return prompt_embeds, text_encoder_hidden_states, text_mask + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]], + image_embeds: Union[torch.Tensor, List[torch.Tensor]], + negative_image_embeds: Union[torch.Tensor, List[torch.Tensor]], + negative_prompt: Optional[Union[str, List[str]]] = None, + height: int = 512, + width: int = 512, + num_inference_steps: int = 100, + guidance_scale: float = 4.0, + num_images_per_prompt: int = 1, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + output_type: Optional[str] = "pil", + callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, + callback_steps: int = 1, + return_dict: bool = True, + ): + """ + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`): + The prompt or prompts to guide the image generation. + image_embeds (`torch.Tensor` or `List[torch.Tensor]`): + The clip image embeddings for text prompt, that will be used to condition the image generation. + negative_image_embeds (`torch.Tensor` or `List[torch.Tensor]`): + The clip image embeddings for negative text prompt, will be used to condition the image generation. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored + if `guidance_scale` is less than `1`). + height (`int`, *optional*, defaults to 512): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to 512): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 4.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` + (`np.array`) or `"pt"` (`torch.Tensor`). + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. + + Examples: + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple` + """ + + if isinstance(prompt, str): + batch_size = 1 + elif isinstance(prompt, list): + batch_size = len(prompt) + else: + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + device = self._execution_device + + batch_size = batch_size * num_images_per_prompt + do_classifier_free_guidance = guidance_scale > 1.0 + + prompt_embeds, text_encoder_hidden_states, _ = self._encode_prompt( + prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt + ) + + if isinstance(image_embeds, list): + image_embeds = torch.cat(image_embeds, dim=0) + if isinstance(negative_image_embeds, list): + negative_image_embeds = torch.cat(negative_image_embeds, dim=0) + + if do_classifier_free_guidance: + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + negative_image_embeds = negative_image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + + image_embeds = torch.cat([negative_image_embeds, image_embeds], dim=0).to( + dtype=prompt_embeds.dtype, device=device + ) + + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps_tensor = self.scheduler.timesteps + + num_channels_latents = self.unet.config.in_channels + + height, width = get_new_h_w(height, width, self.movq_scale_factor) + + # create initial latent + latents = self.prepare_latents( + (batch_size, num_channels_latents, height, width), + text_encoder_hidden_states.dtype, + device, + generator, + latents, + self.scheduler, + ) + + for i, t in enumerate(self.progress_bar(timesteps_tensor)): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + + added_cond_kwargs = {"text_embeds": prompt_embeds, "image_embeds": image_embeds} + noise_pred = self.unet( + sample=latent_model_input, + timestep=t, + encoder_hidden_states=text_encoder_hidden_states, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + if do_classifier_free_guidance: + noise_pred, variance_pred = noise_pred.split(latents.shape[1], dim=1) + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + _, variance_pred_text = variance_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + noise_pred = torch.cat([noise_pred, variance_pred_text], dim=1) + + if not ( + hasattr(self.scheduler.config, "variance_type") + and self.scheduler.config.variance_type in ["learned", "learned_range"] + ): + noise_pred, _ = noise_pred.split(latents.shape[1], dim=1) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step( + noise_pred, + t, + latents, + generator=generator, + ).prev_sample + + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + # post-processing + image = self.movq.decode(latents, force_not_quantize=True)["sample"] + + self.maybe_free_model_hooks() + + if output_type not in ["pt", "np", "pil"]: + raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}") + + if output_type in ["np", "pil"]: + image = image * 0.5 + 0.5 + image = image.clamp(0, 1) + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + + if output_type == "pil": + image = self.numpy_to_pil(image) + + if not return_dict: + return (image,) + + return ImagePipelineOutput(images=image) diff --git a/diffusers3/pipelines/kandinsky/pipeline_kandinsky_combined.py b/diffusers3/pipelines/kandinsky/pipeline_kandinsky_combined.py new file mode 100644 index 0000000000000000000000000000000000000000..fe9909770376faafacb534f5ed63ac68708167ad --- /dev/null +++ b/diffusers3/pipelines/kandinsky/pipeline_kandinsky_combined.py @@ -0,0 +1,817 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Callable, List, Optional, Union + +import PIL.Image +import torch +from transformers import ( + CLIPImageProcessor, + CLIPTextModelWithProjection, + CLIPTokenizer, + CLIPVisionModelWithProjection, + XLMRobertaTokenizer, +) + +from ...models import PriorTransformer, UNet2DConditionModel, VQModel +from ...schedulers import DDIMScheduler, DDPMScheduler, UnCLIPScheduler +from ...utils import ( + replace_example_docstring, +) +from ..pipeline_utils import DiffusionPipeline +from .pipeline_kandinsky import KandinskyPipeline +from .pipeline_kandinsky_img2img import KandinskyImg2ImgPipeline +from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline +from .pipeline_kandinsky_prior import KandinskyPriorPipeline +from .text_encoder import MultilingualCLIP + + +TEXT2IMAGE_EXAMPLE_DOC_STRING = """ + Examples: + ```py + from diffusers import AutoPipelineForText2Image + import torch + + pipe = AutoPipelineForText2Image.from_pretrained( + "kandinsky-community/kandinsky-2-1", torch_dtype=torch.float16 + ) + pipe.enable_model_cpu_offload() + + prompt = "A lion in galaxies, spirals, nebulae, stars, smoke, iridescent, intricate detail, octane render, 8k" + + image = pipe(prompt=prompt, num_inference_steps=25).images[0] + ``` +""" + +IMAGE2IMAGE_EXAMPLE_DOC_STRING = """ + Examples: + ```py + from diffusers import AutoPipelineForImage2Image + import torch + import requests + from io import BytesIO + from PIL import Image + import os + + pipe = AutoPipelineForImage2Image.from_pretrained( + "kandinsky-community/kandinsky-2-1", torch_dtype=torch.float16 + ) + pipe.enable_model_cpu_offload() + + prompt = "A fantasy landscape, Cinematic lighting" + negative_prompt = "low quality, bad quality" + + url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg" + + response = requests.get(url) + image = Image.open(BytesIO(response.content)).convert("RGB") + image.thumbnail((768, 768)) + + image = pipe(prompt=prompt, image=original_image, num_inference_steps=25).images[0] + ``` +""" + +INPAINT_EXAMPLE_DOC_STRING = """ + Examples: + ```py + from diffusers import AutoPipelineForInpainting + from diffusers.utils import load_image + import torch + import numpy as np + + pipe = AutoPipelineForInpainting.from_pretrained( + "kandinsky-community/kandinsky-2-1-inpaint", torch_dtype=torch.float16 + ) + pipe.enable_model_cpu_offload() + + prompt = "A fantasy landscape, Cinematic lighting" + negative_prompt = "low quality, bad quality" + + original_image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" + ) + + mask = np.zeros((768, 768), dtype=np.float32) + # Let's mask out an area above the cat's head + mask[:250, 250:-250] = 1 + + image = pipe(prompt=prompt, image=original_image, mask_image=mask, num_inference_steps=25).images[0] + ``` +""" + + +class KandinskyCombinedPipeline(DiffusionPipeline): + """ + Combined Pipeline for text-to-image generation using Kandinsky + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + Args: + text_encoder ([`MultilingualCLIP`]): + Frozen text-encoder. + tokenizer ([`XLMRobertaTokenizer`]): + Tokenizer of class + scheduler (Union[`DDIMScheduler`,`DDPMScheduler`]): + A scheduler to be used in combination with `unet` to generate image latents. + unet ([`UNet2DConditionModel`]): + Conditional U-Net architecture to denoise the image embedding. + movq ([`VQModel`]): + MoVQ Decoder to generate the image from the latents. + prior_prior ([`PriorTransformer`]): + The canonical unCLIP prior to approximate the image embedding from the text embedding. + prior_image_encoder ([`CLIPVisionModelWithProjection`]): + Frozen image-encoder. + prior_text_encoder ([`CLIPTextModelWithProjection`]): + Frozen text-encoder. + prior_tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + prior_scheduler ([`UnCLIPScheduler`]): + A scheduler to be used in combination with `prior` to generate image embedding. + """ + + _load_connected_pipes = True + model_cpu_offload_seq = "text_encoder->unet->movq->prior_prior->prior_image_encoder->prior_text_encoder" + _exclude_from_cpu_offload = ["prior_prior"] + + def __init__( + self, + text_encoder: MultilingualCLIP, + tokenizer: XLMRobertaTokenizer, + unet: UNet2DConditionModel, + scheduler: Union[DDIMScheduler, DDPMScheduler], + movq: VQModel, + prior_prior: PriorTransformer, + prior_image_encoder: CLIPVisionModelWithProjection, + prior_text_encoder: CLIPTextModelWithProjection, + prior_tokenizer: CLIPTokenizer, + prior_scheduler: UnCLIPScheduler, + prior_image_processor: CLIPImageProcessor, + ): + super().__init__() + + self.register_modules( + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + movq=movq, + prior_prior=prior_prior, + prior_image_encoder=prior_image_encoder, + prior_text_encoder=prior_text_encoder, + prior_tokenizer=prior_tokenizer, + prior_scheduler=prior_scheduler, + prior_image_processor=prior_image_processor, + ) + self.prior_pipe = KandinskyPriorPipeline( + prior=prior_prior, + image_encoder=prior_image_encoder, + text_encoder=prior_text_encoder, + tokenizer=prior_tokenizer, + scheduler=prior_scheduler, + image_processor=prior_image_processor, + ) + self.decoder_pipe = KandinskyPipeline( + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + movq=movq, + ) + + def enable_xformers_memory_efficient_attention(self, attention_op: Optional[Callable] = None): + self.decoder_pipe.enable_xformers_memory_efficient_attention(attention_op) + + def enable_sequential_cpu_offload(self, gpu_id=0): + r""" + Offloads all models (`unet`, `text_encoder`, `vae`, and `safety checker` state dicts) to CPU using ๐Ÿค— + Accelerate, significantly reducing memory usage. Models are moved to a `torch.device('meta')` and loaded on a + GPU only when their specific submodule's `forward` method is called. Offloading happens on a submodule basis. + Memory savings are higher than using `enable_model_cpu_offload`, but performance is lower. + """ + self.prior_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id) + self.decoder_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id) + + def progress_bar(self, iterable=None, total=None): + self.prior_pipe.progress_bar(iterable=iterable, total=total) + self.decoder_pipe.progress_bar(iterable=iterable, total=total) + self.decoder_pipe.enable_model_cpu_offload() + + def set_progress_bar_config(self, **kwargs): + self.prior_pipe.set_progress_bar_config(**kwargs) + self.decoder_pipe.set_progress_bar_config(**kwargs) + + @torch.no_grad() + @replace_example_docstring(TEXT2IMAGE_EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]], + negative_prompt: Optional[Union[str, List[str]]] = None, + num_inference_steps: int = 100, + guidance_scale: float = 4.0, + num_images_per_prompt: int = 1, + height: int = 512, + width: int = 512, + prior_guidance_scale: float = 4.0, + prior_num_inference_steps: int = 25, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + output_type: Optional[str] = "pil", + callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, + callback_steps: int = 1, + return_dict: bool = True, + ): + """ + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`): + The prompt or prompts to guide the image generation. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored + if `guidance_scale` is less than `1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + height (`int`, *optional*, defaults to 512): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to 512): + The width in pixels of the generated image. + prior_guidance_scale (`float`, *optional*, defaults to 4.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + prior_num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 4.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` + (`np.array`) or `"pt"` (`torch.Tensor`). + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. + + Examples: + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple` + """ + prior_outputs = self.prior_pipe( + prompt=prompt, + negative_prompt=negative_prompt, + num_images_per_prompt=num_images_per_prompt, + num_inference_steps=prior_num_inference_steps, + generator=generator, + latents=latents, + guidance_scale=prior_guidance_scale, + output_type="pt", + return_dict=False, + ) + image_embeds = prior_outputs[0] + negative_image_embeds = prior_outputs[1] + + prompt = [prompt] if not isinstance(prompt, (list, tuple)) else prompt + + if len(prompt) < image_embeds.shape[0] and image_embeds.shape[0] % len(prompt) == 0: + prompt = (image_embeds.shape[0] // len(prompt)) * prompt + + outputs = self.decoder_pipe( + prompt=prompt, + image_embeds=image_embeds, + negative_image_embeds=negative_image_embeds, + width=width, + height=height, + num_inference_steps=num_inference_steps, + generator=generator, + guidance_scale=guidance_scale, + output_type=output_type, + callback=callback, + callback_steps=callback_steps, + return_dict=return_dict, + ) + + self.maybe_free_model_hooks() + + return outputs + + +class KandinskyImg2ImgCombinedPipeline(DiffusionPipeline): + """ + Combined Pipeline for image-to-image generation using Kandinsky + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + Args: + text_encoder ([`MultilingualCLIP`]): + Frozen text-encoder. + tokenizer ([`XLMRobertaTokenizer`]): + Tokenizer of class + scheduler (Union[`DDIMScheduler`,`DDPMScheduler`]): + A scheduler to be used in combination with `unet` to generate image latents. + unet ([`UNet2DConditionModel`]): + Conditional U-Net architecture to denoise the image embedding. + movq ([`VQModel`]): + MoVQ Decoder to generate the image from the latents. + prior_prior ([`PriorTransformer`]): + The canonical unCLIP prior to approximate the image embedding from the text embedding. + prior_image_encoder ([`CLIPVisionModelWithProjection`]): + Frozen image-encoder. + prior_text_encoder ([`CLIPTextModelWithProjection`]): + Frozen text-encoder. + prior_tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + prior_scheduler ([`UnCLIPScheduler`]): + A scheduler to be used in combination with `prior` to generate image embedding. + """ + + _load_connected_pipes = True + model_cpu_offload_seq = "prior_text_encoder->prior_image_encoder->prior_prior->" "text_encoder->unet->movq" + _exclude_from_cpu_offload = ["prior_prior"] + + def __init__( + self, + text_encoder: MultilingualCLIP, + tokenizer: XLMRobertaTokenizer, + unet: UNet2DConditionModel, + scheduler: Union[DDIMScheduler, DDPMScheduler], + movq: VQModel, + prior_prior: PriorTransformer, + prior_image_encoder: CLIPVisionModelWithProjection, + prior_text_encoder: CLIPTextModelWithProjection, + prior_tokenizer: CLIPTokenizer, + prior_scheduler: UnCLIPScheduler, + prior_image_processor: CLIPImageProcessor, + ): + super().__init__() + + self.register_modules( + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + movq=movq, + prior_prior=prior_prior, + prior_image_encoder=prior_image_encoder, + prior_text_encoder=prior_text_encoder, + prior_tokenizer=prior_tokenizer, + prior_scheduler=prior_scheduler, + prior_image_processor=prior_image_processor, + ) + self.prior_pipe = KandinskyPriorPipeline( + prior=prior_prior, + image_encoder=prior_image_encoder, + text_encoder=prior_text_encoder, + tokenizer=prior_tokenizer, + scheduler=prior_scheduler, + image_processor=prior_image_processor, + ) + self.decoder_pipe = KandinskyImg2ImgPipeline( + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + movq=movq, + ) + + def enable_xformers_memory_efficient_attention(self, attention_op: Optional[Callable] = None): + self.decoder_pipe.enable_xformers_memory_efficient_attention(attention_op) + + def enable_sequential_cpu_offload(self, gpu_id=0): + r""" + Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet, + text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a + `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called. + Note that offloading happens on a submodule basis. Memory savings are higher than with + `enable_model_cpu_offload`, but performance is lower. + """ + self.prior_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id) + self.decoder_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id) + + def progress_bar(self, iterable=None, total=None): + self.prior_pipe.progress_bar(iterable=iterable, total=total) + self.decoder_pipe.progress_bar(iterable=iterable, total=total) + self.decoder_pipe.enable_model_cpu_offload() + + def set_progress_bar_config(self, **kwargs): + self.prior_pipe.set_progress_bar_config(**kwargs) + self.decoder_pipe.set_progress_bar_config(**kwargs) + + @torch.no_grad() + @replace_example_docstring(IMAGE2IMAGE_EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]], + image: Union[torch.Tensor, PIL.Image.Image, List[torch.Tensor], List[PIL.Image.Image]], + negative_prompt: Optional[Union[str, List[str]]] = None, + num_inference_steps: int = 100, + guidance_scale: float = 4.0, + num_images_per_prompt: int = 1, + strength: float = 0.3, + height: int = 512, + width: int = 512, + prior_guidance_scale: float = 4.0, + prior_num_inference_steps: int = 25, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + output_type: Optional[str] = "pil", + callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, + callback_steps: int = 1, + return_dict: bool = True, + ): + """ + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`): + The prompt or prompts to guide the image generation. + image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): + `Image`, or tensor representing an image batch, that will be used as the starting point for the + process. Can also accept image latents as `image`, if passing latents directly, it will not be encoded + again. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored + if `guidance_scale` is less than `1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + height (`int`, *optional*, defaults to 512): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to 512): + The width in pixels of the generated image. + strength (`float`, *optional*, defaults to 0.3): + Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image` + will be used as a starting point, adding more noise to it the larger the `strength`. The number of + denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will + be maximum and the denoising process will run for the full number of iterations specified in + `num_inference_steps`. A value of 1, therefore, essentially ignores `image`. + prior_guidance_scale (`float`, *optional*, defaults to 4.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + prior_num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 4.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` + (`np.array`) or `"pt"` (`torch.Tensor`). + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. + + Examples: + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple` + """ + prior_outputs = self.prior_pipe( + prompt=prompt, + negative_prompt=negative_prompt, + num_images_per_prompt=num_images_per_prompt, + num_inference_steps=prior_num_inference_steps, + generator=generator, + latents=latents, + guidance_scale=prior_guidance_scale, + output_type="pt", + return_dict=False, + ) + image_embeds = prior_outputs[0] + negative_image_embeds = prior_outputs[1] + + prompt = [prompt] if not isinstance(prompt, (list, tuple)) else prompt + image = [image] if isinstance(prompt, PIL.Image.Image) else image + + if len(prompt) < image_embeds.shape[0] and image_embeds.shape[0] % len(prompt) == 0: + prompt = (image_embeds.shape[0] // len(prompt)) * prompt + + if ( + isinstance(image, (list, tuple)) + and len(image) < image_embeds.shape[0] + and image_embeds.shape[0] % len(image) == 0 + ): + image = (image_embeds.shape[0] // len(image)) * image + + outputs = self.decoder_pipe( + prompt=prompt, + image=image, + image_embeds=image_embeds, + negative_image_embeds=negative_image_embeds, + strength=strength, + width=width, + height=height, + num_inference_steps=num_inference_steps, + generator=generator, + guidance_scale=guidance_scale, + output_type=output_type, + callback=callback, + callback_steps=callback_steps, + return_dict=return_dict, + ) + + self.maybe_free_model_hooks() + + return outputs + + +class KandinskyInpaintCombinedPipeline(DiffusionPipeline): + """ + Combined Pipeline for generation using Kandinsky + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + Args: + text_encoder ([`MultilingualCLIP`]): + Frozen text-encoder. + tokenizer ([`XLMRobertaTokenizer`]): + Tokenizer of class + scheduler (Union[`DDIMScheduler`,`DDPMScheduler`]): + A scheduler to be used in combination with `unet` to generate image latents. + unet ([`UNet2DConditionModel`]): + Conditional U-Net architecture to denoise the image embedding. + movq ([`VQModel`]): + MoVQ Decoder to generate the image from the latents. + prior_prior ([`PriorTransformer`]): + The canonical unCLIP prior to approximate the image embedding from the text embedding. + prior_image_encoder ([`CLIPVisionModelWithProjection`]): + Frozen image-encoder. + prior_text_encoder ([`CLIPTextModelWithProjection`]): + Frozen text-encoder. + prior_tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + prior_scheduler ([`UnCLIPScheduler`]): + A scheduler to be used in combination with `prior` to generate image embedding. + """ + + _load_connected_pipes = True + model_cpu_offload_seq = "prior_text_encoder->prior_image_encoder->prior_prior->text_encoder->unet->movq" + _exclude_from_cpu_offload = ["prior_prior"] + + def __init__( + self, + text_encoder: MultilingualCLIP, + tokenizer: XLMRobertaTokenizer, + unet: UNet2DConditionModel, + scheduler: Union[DDIMScheduler, DDPMScheduler], + movq: VQModel, + prior_prior: PriorTransformer, + prior_image_encoder: CLIPVisionModelWithProjection, + prior_text_encoder: CLIPTextModelWithProjection, + prior_tokenizer: CLIPTokenizer, + prior_scheduler: UnCLIPScheduler, + prior_image_processor: CLIPImageProcessor, + ): + super().__init__() + + self.register_modules( + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + movq=movq, + prior_prior=prior_prior, + prior_image_encoder=prior_image_encoder, + prior_text_encoder=prior_text_encoder, + prior_tokenizer=prior_tokenizer, + prior_scheduler=prior_scheduler, + prior_image_processor=prior_image_processor, + ) + self.prior_pipe = KandinskyPriorPipeline( + prior=prior_prior, + image_encoder=prior_image_encoder, + text_encoder=prior_text_encoder, + tokenizer=prior_tokenizer, + scheduler=prior_scheduler, + image_processor=prior_image_processor, + ) + self.decoder_pipe = KandinskyInpaintPipeline( + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + movq=movq, + ) + + def enable_xformers_memory_efficient_attention(self, attention_op: Optional[Callable] = None): + self.decoder_pipe.enable_xformers_memory_efficient_attention(attention_op) + + def enable_sequential_cpu_offload(self, gpu_id=0): + r""" + Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet, + text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a + `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called. + Note that offloading happens on a submodule basis. Memory savings are higher than with + `enable_model_cpu_offload`, but performance is lower. + """ + self.prior_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id) + self.decoder_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id) + + def progress_bar(self, iterable=None, total=None): + self.prior_pipe.progress_bar(iterable=iterable, total=total) + self.decoder_pipe.progress_bar(iterable=iterable, total=total) + self.decoder_pipe.enable_model_cpu_offload() + + def set_progress_bar_config(self, **kwargs): + self.prior_pipe.set_progress_bar_config(**kwargs) + self.decoder_pipe.set_progress_bar_config(**kwargs) + + @torch.no_grad() + @replace_example_docstring(INPAINT_EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]], + image: Union[torch.Tensor, PIL.Image.Image, List[torch.Tensor], List[PIL.Image.Image]], + mask_image: Union[torch.Tensor, PIL.Image.Image, List[torch.Tensor], List[PIL.Image.Image]], + negative_prompt: Optional[Union[str, List[str]]] = None, + num_inference_steps: int = 100, + guidance_scale: float = 4.0, + num_images_per_prompt: int = 1, + height: int = 512, + width: int = 512, + prior_guidance_scale: float = 4.0, + prior_num_inference_steps: int = 25, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + output_type: Optional[str] = "pil", + callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, + callback_steps: int = 1, + return_dict: bool = True, + ): + """ + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`): + The prompt or prompts to guide the image generation. + image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): + `Image`, or tensor representing an image batch, that will be used as the starting point for the + process. Can also accept image latents as `image`, if passing latents directly, it will not be encoded + again. + mask_image (`np.array`): + Tensor representing an image batch, to mask `image`. White pixels in the mask will be repainted, while + black pixels will be preserved. If `mask_image` is a PIL image, it will be converted to a single + channel (luminance) before use. If it's a tensor, it should contain one color channel (L) instead of 3, + so the expected shape would be `(B, H, W, 1)`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored + if `guidance_scale` is less than `1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + height (`int`, *optional*, defaults to 512): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to 512): + The width in pixels of the generated image. + prior_guidance_scale (`float`, *optional*, defaults to 4.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + prior_num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 4.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` + (`np.array`) or `"pt"` (`torch.Tensor`). + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. + + Examples: + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple` + """ + prior_outputs = self.prior_pipe( + prompt=prompt, + negative_prompt=negative_prompt, + num_images_per_prompt=num_images_per_prompt, + num_inference_steps=prior_num_inference_steps, + generator=generator, + latents=latents, + guidance_scale=prior_guidance_scale, + output_type="pt", + return_dict=False, + ) + image_embeds = prior_outputs[0] + negative_image_embeds = prior_outputs[1] + + prompt = [prompt] if not isinstance(prompt, (list, tuple)) else prompt + image = [image] if isinstance(prompt, PIL.Image.Image) else image + mask_image = [mask_image] if isinstance(mask_image, PIL.Image.Image) else mask_image + + if len(prompt) < image_embeds.shape[0] and image_embeds.shape[0] % len(prompt) == 0: + prompt = (image_embeds.shape[0] // len(prompt)) * prompt + + if ( + isinstance(image, (list, tuple)) + and len(image) < image_embeds.shape[0] + and image_embeds.shape[0] % len(image) == 0 + ): + image = (image_embeds.shape[0] // len(image)) * image + + if ( + isinstance(mask_image, (list, tuple)) + and len(mask_image) < image_embeds.shape[0] + and image_embeds.shape[0] % len(mask_image) == 0 + ): + mask_image = (image_embeds.shape[0] // len(mask_image)) * mask_image + + outputs = self.decoder_pipe( + prompt=prompt, + image=image, + mask_image=mask_image, + image_embeds=image_embeds, + negative_image_embeds=negative_image_embeds, + width=width, + height=height, + num_inference_steps=num_inference_steps, + generator=generator, + guidance_scale=guidance_scale, + output_type=output_type, + callback=callback, + callback_steps=callback_steps, + return_dict=return_dict, + ) + + self.maybe_free_model_hooks() + + return outputs diff --git a/diffusers3/pipelines/kandinsky/pipeline_kandinsky_img2img.py b/diffusers3/pipelines/kandinsky/pipeline_kandinsky_img2img.py new file mode 100644 index 0000000000000000000000000000000000000000..ef5241fee5d208ad57a1f97283dc3a24cb4cde7e --- /dev/null +++ b/diffusers3/pipelines/kandinsky/pipeline_kandinsky_img2img.py @@ -0,0 +1,500 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Callable, List, Optional, Union + +import numpy as np +import PIL.Image +import torch +from PIL import Image +from transformers import ( + XLMRobertaTokenizer, +) + +from ...models import UNet2DConditionModel, VQModel +from ...schedulers import DDIMScheduler +from ...utils import ( + logging, + replace_example_docstring, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput +from .text_encoder import MultilingualCLIP + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> from diffusers import KandinskyImg2ImgPipeline, KandinskyPriorPipeline + >>> from diffusers.utils import load_image + >>> import torch + + >>> pipe_prior = KandinskyPriorPipeline.from_pretrained( + ... "kandinsky-community/kandinsky-2-1-prior", torch_dtype=torch.float16 + ... ) + >>> pipe_prior.to("cuda") + + >>> prompt = "A red cartoon frog, 4k" + >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False) + + >>> pipe = KandinskyImg2ImgPipeline.from_pretrained( + ... "kandinsky-community/kandinsky-2-1", torch_dtype=torch.float16 + ... ) + >>> pipe.to("cuda") + + >>> init_image = load_image( + ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" + ... "/kandinsky/frog.png" + ... ) + + >>> image = pipe( + ... prompt, + ... image=init_image, + ... image_embeds=image_emb, + ... negative_image_embeds=zero_image_emb, + ... height=768, + ... width=768, + ... num_inference_steps=100, + ... strength=0.2, + ... ).images + + >>> image[0].save("red_frog.png") + ``` +""" + + +def get_new_h_w(h, w, scale_factor=8): + new_h = h // scale_factor**2 + if h % scale_factor**2 != 0: + new_h += 1 + new_w = w // scale_factor**2 + if w % scale_factor**2 != 0: + new_w += 1 + return new_h * scale_factor, new_w * scale_factor + + +def prepare_image(pil_image, w=512, h=512): + pil_image = pil_image.resize((w, h), resample=Image.BICUBIC, reducing_gap=1) + arr = np.array(pil_image.convert("RGB")) + arr = arr.astype(np.float32) / 127.5 - 1 + arr = np.transpose(arr, [2, 0, 1]) + image = torch.from_numpy(arr).unsqueeze(0) + return image + + +class KandinskyImg2ImgPipeline(DiffusionPipeline): + """ + Pipeline for image-to-image generation using Kandinsky + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + Args: + text_encoder ([`MultilingualCLIP`]): + Frozen text-encoder. + tokenizer ([`XLMRobertaTokenizer`]): + Tokenizer of class + scheduler ([`DDIMScheduler`]): + A scheduler to be used in combination with `unet` to generate image latents. + unet ([`UNet2DConditionModel`]): + Conditional U-Net architecture to denoise the image embedding. + movq ([`VQModel`]): + MoVQ image encoder and decoder + """ + + model_cpu_offload_seq = "text_encoder->unet->movq" + + def __init__( + self, + text_encoder: MultilingualCLIP, + movq: VQModel, + tokenizer: XLMRobertaTokenizer, + unet: UNet2DConditionModel, + scheduler: DDIMScheduler, + ): + super().__init__() + + self.register_modules( + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + movq=movq, + ) + self.movq_scale_factor = 2 ** (len(self.movq.config.block_out_channels) - 1) + + def get_timesteps(self, num_inference_steps, strength, device): + # get the original timestep using init_timestep + init_timestep = min(int(num_inference_steps * strength), num_inference_steps) + + t_start = max(num_inference_steps - init_timestep, 0) + timesteps = self.scheduler.timesteps[t_start:] + + return timesteps, num_inference_steps - t_start + + def prepare_latents(self, latents, latent_timestep, shape, dtype, device, generator, scheduler): + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + if latents.shape != shape: + raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") + latents = latents.to(device) + + latents = latents * scheduler.init_noise_sigma + + shape = latents.shape + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + + latents = self.add_noise(latents, noise, latent_timestep) + return latents + + def _encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + ): + batch_size = len(prompt) if isinstance(prompt, list) else 1 + # get prompt text embeddings + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=77, + truncation=True, + return_attention_mask=True, + add_special_tokens=True, + return_tensors="pt", + ) + + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): + removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + text_input_ids = text_input_ids.to(device) + text_mask = text_inputs.attention_mask.to(device) + + prompt_embeds, text_encoder_hidden_states = self.text_encoder( + input_ids=text_input_ids, attention_mask=text_mask + ) + + prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0) + text_encoder_hidden_states = text_encoder_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + text_mask = text_mask.repeat_interleave(num_images_per_prompt, dim=0) + + if do_classifier_free_guidance: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=77, + truncation=True, + return_attention_mask=True, + add_special_tokens=True, + return_tensors="pt", + ) + uncond_text_input_ids = uncond_input.input_ids.to(device) + uncond_text_mask = uncond_input.attention_mask.to(device) + + negative_prompt_embeds, uncond_text_encoder_hidden_states = self.text_encoder( + input_ids=uncond_text_input_ids, attention_mask=uncond_text_mask + ) + + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + + seq_len = negative_prompt_embeds.shape[1] + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len) + + seq_len = uncond_text_encoder_hidden_states.shape[1] + uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.repeat(1, num_images_per_prompt, 1) + uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.view( + batch_size * num_images_per_prompt, seq_len, -1 + ) + uncond_text_mask = uncond_text_mask.repeat_interleave(num_images_per_prompt, dim=0) + + # done duplicates + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + text_encoder_hidden_states = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states]) + + text_mask = torch.cat([uncond_text_mask, text_mask]) + + return prompt_embeds, text_encoder_hidden_states, text_mask + + # add_noise method to overwrite the one in schedule because it use a different beta schedule for adding noise vs sampling + def add_noise( + self, + original_samples: torch.Tensor, + noise: torch.Tensor, + timesteps: torch.IntTensor, + ) -> torch.Tensor: + betas = torch.linspace(0.0001, 0.02, 1000, dtype=torch.float32) + alphas = 1.0 - betas + alphas_cumprod = torch.cumprod(alphas, dim=0) + alphas_cumprod = alphas_cumprod.to(device=original_samples.device, dtype=original_samples.dtype) + timesteps = timesteps.to(original_samples.device) + + sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5 + sqrt_alpha_prod = sqrt_alpha_prod.flatten() + while len(sqrt_alpha_prod.shape) < len(original_samples.shape): + sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) + + sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5 + sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() + while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape): + sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) + + noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise + + return noisy_samples + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]], + image: Union[torch.Tensor, PIL.Image.Image, List[torch.Tensor], List[PIL.Image.Image]], + image_embeds: torch.Tensor, + negative_image_embeds: torch.Tensor, + negative_prompt: Optional[Union[str, List[str]]] = None, + height: int = 512, + width: int = 512, + num_inference_steps: int = 100, + strength: float = 0.3, + guidance_scale: float = 7.0, + num_images_per_prompt: int = 1, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + output_type: Optional[str] = "pil", + callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, + callback_steps: int = 1, + return_dict: bool = True, + ): + """ + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`): + The prompt or prompts to guide the image generation. + image (`torch.Tensor`, `PIL.Image.Image`): + `Image`, or tensor representing an image batch, that will be used as the starting point for the + process. + image_embeds (`torch.Tensor` or `List[torch.Tensor]`): + The clip image embeddings for text prompt, that will be used to condition the image generation. + negative_image_embeds (`torch.Tensor` or `List[torch.Tensor]`): + The clip image embeddings for negative text prompt, will be used to condition the image generation. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored + if `guidance_scale` is less than `1`). + height (`int`, *optional*, defaults to 512): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to 512): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + strength (`float`, *optional*, defaults to 0.3): + Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image` + will be used as a starting point, adding more noise to it the larger the `strength`. The number of + denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will + be maximum and the denoising process will run for the full number of iterations specified in + `num_inference_steps`. A value of 1, therefore, essentially ignores `image`. + guidance_scale (`float`, *optional*, defaults to 4.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` + (`np.array`) or `"pt"` (`torch.Tensor`). + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. + + Examples: + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple` + """ + # 1. Define call parameters + if isinstance(prompt, str): + batch_size = 1 + elif isinstance(prompt, list): + batch_size = len(prompt) + else: + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + device = self._execution_device + + batch_size = batch_size * num_images_per_prompt + + do_classifier_free_guidance = guidance_scale > 1.0 + + # 2. get text and image embeddings + prompt_embeds, text_encoder_hidden_states, _ = self._encode_prompt( + prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt + ) + + if isinstance(image_embeds, list): + image_embeds = torch.cat(image_embeds, dim=0) + if isinstance(negative_image_embeds, list): + negative_image_embeds = torch.cat(negative_image_embeds, dim=0) + + if do_classifier_free_guidance: + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + negative_image_embeds = negative_image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + + image_embeds = torch.cat([negative_image_embeds, image_embeds], dim=0).to( + dtype=prompt_embeds.dtype, device=device + ) + + # 3. pre-processing initial image + if not isinstance(image, list): + image = [image] + if not all(isinstance(i, (PIL.Image.Image, torch.Tensor)) for i in image): + raise ValueError( + f"Input is in incorrect format: {[type(i) for i in image]}. Currently, we only support PIL image and pytorch tensor" + ) + + image = torch.cat([prepare_image(i, width, height) for i in image], dim=0) + image = image.to(dtype=prompt_embeds.dtype, device=device) + + latents = self.movq.encode(image)["latents"] + latents = latents.repeat_interleave(num_images_per_prompt, dim=0) + + # 4. set timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + + timesteps_tensor, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device) + + # the formular to calculate timestep for add_noise is taken from the original kandinsky repo + latent_timestep = int(self.scheduler.config.num_train_timesteps * strength) - 2 + + latent_timestep = torch.tensor([latent_timestep] * batch_size, dtype=timesteps_tensor.dtype, device=device) + + num_channels_latents = self.unet.config.in_channels + + height, width = get_new_h_w(height, width, self.movq_scale_factor) + + # 5. Create initial latent + latents = self.prepare_latents( + latents, + latent_timestep, + (batch_size, num_channels_latents, height, width), + text_encoder_hidden_states.dtype, + device, + generator, + self.scheduler, + ) + + # 6. Denoising loop + for i, t in enumerate(self.progress_bar(timesteps_tensor)): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + + added_cond_kwargs = {"text_embeds": prompt_embeds, "image_embeds": image_embeds} + noise_pred = self.unet( + sample=latent_model_input, + timestep=t, + encoder_hidden_states=text_encoder_hidden_states, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + if do_classifier_free_guidance: + noise_pred, variance_pred = noise_pred.split(latents.shape[1], dim=1) + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + _, variance_pred_text = variance_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + noise_pred = torch.cat([noise_pred, variance_pred_text], dim=1) + + if not ( + hasattr(self.scheduler.config, "variance_type") + and self.scheduler.config.variance_type in ["learned", "learned_range"] + ): + noise_pred, _ = noise_pred.split(latents.shape[1], dim=1) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step( + noise_pred, + t, + latents, + generator=generator, + ).prev_sample + + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + # 7. post-processing + image = self.movq.decode(latents, force_not_quantize=True)["sample"] + + self.maybe_free_model_hooks() + + if output_type not in ["pt", "np", "pil"]: + raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}") + + if output_type in ["np", "pil"]: + image = image * 0.5 + 0.5 + image = image.clamp(0, 1) + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + + if output_type == "pil": + image = self.numpy_to_pil(image) + + if not return_dict: + return (image,) + + return ImagePipelineOutput(images=image) diff --git a/diffusers3/pipelines/kandinsky/pipeline_kandinsky_inpaint.py b/diffusers3/pipelines/kandinsky/pipeline_kandinsky_inpaint.py new file mode 100644 index 0000000000000000000000000000000000000000..778b6e314c0df998497aa6319d09017b31df657c --- /dev/null +++ b/diffusers3/pipelines/kandinsky/pipeline_kandinsky_inpaint.py @@ -0,0 +1,635 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from copy import deepcopy +from typing import Callable, List, Optional, Union + +import numpy as np +import PIL.Image +import torch +import torch.nn.functional as F +from packaging import version +from PIL import Image +from transformers import ( + XLMRobertaTokenizer, +) + +from ... import __version__ +from ...models import UNet2DConditionModel, VQModel +from ...schedulers import DDIMScheduler +from ...utils import ( + logging, + replace_example_docstring, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput +from .text_encoder import MultilingualCLIP + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> from diffusers import KandinskyInpaintPipeline, KandinskyPriorPipeline + >>> from diffusers.utils import load_image + >>> import torch + >>> import numpy as np + + >>> pipe_prior = KandinskyPriorPipeline.from_pretrained( + ... "kandinsky-community/kandinsky-2-1-prior", torch_dtype=torch.float16 + ... ) + >>> pipe_prior.to("cuda") + + >>> prompt = "a hat" + >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False) + + >>> pipe = KandinskyInpaintPipeline.from_pretrained( + ... "kandinsky-community/kandinsky-2-1-inpaint", torch_dtype=torch.float16 + ... ) + >>> pipe.to("cuda") + + >>> init_image = load_image( + ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" + ... "/kandinsky/cat.png" + ... ) + + >>> mask = np.zeros((768, 768), dtype=np.float32) + >>> mask[:250, 250:-250] = 1 + + >>> out = pipe( + ... prompt, + ... image=init_image, + ... mask_image=mask, + ... image_embeds=image_emb, + ... negative_image_embeds=zero_image_emb, + ... height=768, + ... width=768, + ... num_inference_steps=50, + ... ) + + >>> image = out.images[0] + >>> image.save("cat_with_hat.png") + ``` +""" + + +def get_new_h_w(h, w, scale_factor=8): + new_h = h // scale_factor**2 + if h % scale_factor**2 != 0: + new_h += 1 + new_w = w // scale_factor**2 + if w % scale_factor**2 != 0: + new_w += 1 + return new_h * scale_factor, new_w * scale_factor + + +def prepare_mask(masks): + prepared_masks = [] + for mask in masks: + old_mask = deepcopy(mask) + for i in range(mask.shape[1]): + for j in range(mask.shape[2]): + if old_mask[0][i][j] == 1: + continue + if i != 0: + mask[:, i - 1, j] = 0 + if j != 0: + mask[:, i, j - 1] = 0 + if i != 0 and j != 0: + mask[:, i - 1, j - 1] = 0 + if i != mask.shape[1] - 1: + mask[:, i + 1, j] = 0 + if j != mask.shape[2] - 1: + mask[:, i, j + 1] = 0 + if i != mask.shape[1] - 1 and j != mask.shape[2] - 1: + mask[:, i + 1, j + 1] = 0 + prepared_masks.append(mask) + return torch.stack(prepared_masks, dim=0) + + +def prepare_mask_and_masked_image(image, mask, height, width): + r""" + Prepares a pair (mask, image) to be consumed by the Kandinsky inpaint pipeline. This means that those inputs will + be converted to ``torch.Tensor`` with shapes ``batch x channels x height x width`` where ``channels`` is ``3`` for + the ``image`` and ``1`` for the ``mask``. + + The ``image`` will be converted to ``torch.float32`` and normalized to be in ``[-1, 1]``. The ``mask`` will be + binarized (``mask > 0.5``) and cast to ``torch.float32`` too. + + Args: + image (Union[np.array, PIL.Image, torch.Tensor]): The image to inpaint. + It can be a ``PIL.Image``, or a ``height x width x 3`` ``np.array`` or a ``channels x height x width`` + ``torch.Tensor`` or a ``batch x channels x height x width`` ``torch.Tensor``. + mask (_type_): The mask to apply to the image, i.e. regions to inpaint. + It can be a ``PIL.Image``, or a ``height x width`` ``np.array`` or a ``1 x height x width`` + ``torch.Tensor`` or a ``batch x 1 x height x width`` ``torch.Tensor``. + height (`int`, *optional*, defaults to 512): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to 512): + The width in pixels of the generated image. + + + Raises: + ValueError: ``torch.Tensor`` images should be in the ``[-1, 1]`` range. ValueError: ``torch.Tensor`` mask + should be in the ``[0, 1]`` range. ValueError: ``mask`` and ``image`` should have the same spatial dimensions. + TypeError: ``mask`` is a ``torch.Tensor`` but ``image`` is not + (ot the other way around). + + Returns: + tuple[torch.Tensor]: The pair (mask, image) as ``torch.Tensor`` with 4 + dimensions: ``batch x channels x height x width``. + """ + + if image is None: + raise ValueError("`image` input cannot be undefined.") + + if mask is None: + raise ValueError("`mask_image` input cannot be undefined.") + + if isinstance(image, torch.Tensor): + if not isinstance(mask, torch.Tensor): + raise TypeError(f"`image` is a torch.Tensor but `mask` (type: {type(mask)} is not") + + # Batch single image + if image.ndim == 3: + assert image.shape[0] == 3, "Image outside a batch should be of shape (3, H, W)" + image = image.unsqueeze(0) + + # Batch and add channel dim for single mask + if mask.ndim == 2: + mask = mask.unsqueeze(0).unsqueeze(0) + + # Batch single mask or add channel dim + if mask.ndim == 3: + # Single batched mask, no channel dim or single mask not batched but channel dim + if mask.shape[0] == 1: + mask = mask.unsqueeze(0) + + # Batched masks no channel dim + else: + mask = mask.unsqueeze(1) + + assert image.ndim == 4 and mask.ndim == 4, "Image and Mask must have 4 dimensions" + assert image.shape[-2:] == mask.shape[-2:], "Image and Mask must have the same spatial dimensions" + assert image.shape[0] == mask.shape[0], "Image and Mask must have the same batch size" + + # Check image is in [-1, 1] + if image.min() < -1 or image.max() > 1: + raise ValueError("Image should be in [-1, 1] range") + + # Check mask is in [0, 1] + if mask.min() < 0 or mask.max() > 1: + raise ValueError("Mask should be in [0, 1] range") + + # Binarize mask + mask[mask < 0.5] = 0 + mask[mask >= 0.5] = 1 + + # Image as float32 + image = image.to(dtype=torch.float32) + elif isinstance(mask, torch.Tensor): + raise TypeError(f"`mask` is a torch.Tensor but `image` (type: {type(image)} is not") + else: + # preprocess image + if isinstance(image, (PIL.Image.Image, np.ndarray)): + image = [image] + + if isinstance(image, list) and isinstance(image[0], PIL.Image.Image): + # resize all images w.r.t passed height an width + image = [i.resize((width, height), resample=Image.BICUBIC, reducing_gap=1) for i in image] + image = [np.array(i.convert("RGB"))[None, :] for i in image] + image = np.concatenate(image, axis=0) + elif isinstance(image, list) and isinstance(image[0], np.ndarray): + image = np.concatenate([i[None, :] for i in image], axis=0) + + image = image.transpose(0, 3, 1, 2) + image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0 + + # preprocess mask + if isinstance(mask, (PIL.Image.Image, np.ndarray)): + mask = [mask] + + if isinstance(mask, list) and isinstance(mask[0], PIL.Image.Image): + mask = [i.resize((width, height), resample=PIL.Image.LANCZOS) for i in mask] + mask = np.concatenate([np.array(m.convert("L"))[None, None, :] for m in mask], axis=0) + mask = mask.astype(np.float32) / 255.0 + elif isinstance(mask, list) and isinstance(mask[0], np.ndarray): + mask = np.concatenate([m[None, None, :] for m in mask], axis=0) + + mask[mask < 0.5] = 0 + mask[mask >= 0.5] = 1 + mask = torch.from_numpy(mask) + + mask = 1 - mask + + return mask, image + + +class KandinskyInpaintPipeline(DiffusionPipeline): + """ + Pipeline for text-guided image inpainting using Kandinsky2.1 + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + Args: + text_encoder ([`MultilingualCLIP`]): + Frozen text-encoder. + tokenizer ([`XLMRobertaTokenizer`]): + Tokenizer of class + scheduler ([`DDIMScheduler`]): + A scheduler to be used in combination with `unet` to generate image latents. + unet ([`UNet2DConditionModel`]): + Conditional U-Net architecture to denoise the image embedding. + movq ([`VQModel`]): + MoVQ image encoder and decoder + """ + + model_cpu_offload_seq = "text_encoder->unet->movq" + + def __init__( + self, + text_encoder: MultilingualCLIP, + movq: VQModel, + tokenizer: XLMRobertaTokenizer, + unet: UNet2DConditionModel, + scheduler: DDIMScheduler, + ): + super().__init__() + + self.register_modules( + text_encoder=text_encoder, + movq=movq, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + ) + self.movq_scale_factor = 2 ** (len(self.movq.config.block_out_channels) - 1) + self._warn_has_been_called = False + + # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents + def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + if latents.shape != shape: + raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") + latents = latents.to(device) + + latents = latents * scheduler.init_noise_sigma + return latents + + def _encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + ): + batch_size = len(prompt) if isinstance(prompt, list) else 1 + # get prompt text embeddings + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=77, + truncation=True, + return_attention_mask=True, + add_special_tokens=True, + return_tensors="pt", + ) + + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): + removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + text_input_ids = text_input_ids.to(device) + text_mask = text_inputs.attention_mask.to(device) + + prompt_embeds, text_encoder_hidden_states = self.text_encoder( + input_ids=text_input_ids, attention_mask=text_mask + ) + + prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0) + text_encoder_hidden_states = text_encoder_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + text_mask = text_mask.repeat_interleave(num_images_per_prompt, dim=0) + + if do_classifier_free_guidance: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=77, + truncation=True, + return_attention_mask=True, + add_special_tokens=True, + return_tensors="pt", + ) + uncond_text_input_ids = uncond_input.input_ids.to(device) + uncond_text_mask = uncond_input.attention_mask.to(device) + + negative_prompt_embeds, uncond_text_encoder_hidden_states = self.text_encoder( + input_ids=uncond_text_input_ids, attention_mask=uncond_text_mask + ) + + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + + seq_len = negative_prompt_embeds.shape[1] + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len) + + seq_len = uncond_text_encoder_hidden_states.shape[1] + uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.repeat(1, num_images_per_prompt, 1) + uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.view( + batch_size * num_images_per_prompt, seq_len, -1 + ) + uncond_text_mask = uncond_text_mask.repeat_interleave(num_images_per_prompt, dim=0) + + # done duplicates + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + text_encoder_hidden_states = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states]) + + text_mask = torch.cat([uncond_text_mask, text_mask]) + + return prompt_embeds, text_encoder_hidden_states, text_mask + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]], + image: Union[torch.Tensor, PIL.Image.Image], + mask_image: Union[torch.Tensor, PIL.Image.Image, np.ndarray], + image_embeds: torch.Tensor, + negative_image_embeds: torch.Tensor, + negative_prompt: Optional[Union[str, List[str]]] = None, + height: int = 512, + width: int = 512, + num_inference_steps: int = 100, + guidance_scale: float = 4.0, + num_images_per_prompt: int = 1, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + output_type: Optional[str] = "pil", + callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, + callback_steps: int = 1, + return_dict: bool = True, + ): + """ + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`): + The prompt or prompts to guide the image generation. + image (`torch.Tensor`, `PIL.Image.Image` or `np.ndarray`): + `Image`, or tensor representing an image batch, that will be used as the starting point for the + process. + mask_image (`PIL.Image.Image`,`torch.Tensor` or `np.ndarray`): + `Image`, or a tensor representing an image batch, to mask `image`. White pixels in the mask will be + repainted, while black pixels will be preserved. You can pass a pytorch tensor as mask only if the + image you passed is a pytorch tensor, and it should contain one color channel (L) instead of 3, so the + expected shape would be either `(B, 1, H, W,)`, `(B, H, W)`, `(1, H, W)` or `(H, W)` If image is an PIL + image or numpy array, mask should also be a either PIL image or numpy array. If it is a PIL image, it + will be converted to a single channel (luminance) before use. If it is a nummpy array, the expected + shape is `(H, W)`. + image_embeds (`torch.Tensor` or `List[torch.Tensor]`): + The clip image embeddings for text prompt, that will be used to condition the image generation. + negative_image_embeds (`torch.Tensor` or `List[torch.Tensor]`): + The clip image embeddings for negative text prompt, will be used to condition the image generation. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored + if `guidance_scale` is less than `1`). + height (`int`, *optional*, defaults to 512): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to 512): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 4.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` + (`np.array`) or `"pt"` (`torch.Tensor`). + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. + + Examples: + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple` + """ + if not self._warn_has_been_called and version.parse(version.parse(__version__).base_version) < version.parse( + "0.23.0.dev0" + ): + logger.warning( + "Please note that the expected format of `mask_image` has recently been changed. " + "Before diffusers == 0.19.0, Kandinsky Inpainting pipelines repainted black pixels and preserved black pixels. " + "As of diffusers==0.19.0 this behavior has been inverted. Now white pixels are repainted and black pixels are preserved. " + "This way, Kandinsky's masking behavior is aligned with Stable Diffusion. " + "THIS means that you HAVE to invert the input mask to have the same behavior as before as explained in https://github.com/huggingface/diffusers/pull/4207. " + "This warning will be surpressed after the first inference call and will be removed in diffusers>0.23.0" + ) + self._warn_has_been_called = True + + # Define call parameters + if isinstance(prompt, str): + batch_size = 1 + elif isinstance(prompt, list): + batch_size = len(prompt) + else: + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + device = self._execution_device + + batch_size = batch_size * num_images_per_prompt + do_classifier_free_guidance = guidance_scale > 1.0 + + prompt_embeds, text_encoder_hidden_states, _ = self._encode_prompt( + prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt + ) + + if isinstance(image_embeds, list): + image_embeds = torch.cat(image_embeds, dim=0) + if isinstance(negative_image_embeds, list): + negative_image_embeds = torch.cat(negative_image_embeds, dim=0) + + if do_classifier_free_guidance: + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + negative_image_embeds = negative_image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + + image_embeds = torch.cat([negative_image_embeds, image_embeds], dim=0).to( + dtype=prompt_embeds.dtype, device=device + ) + + # preprocess image and mask + mask_image, image = prepare_mask_and_masked_image(image, mask_image, height, width) + + image = image.to(dtype=prompt_embeds.dtype, device=device) + image = self.movq.encode(image)["latents"] + + mask_image = mask_image.to(dtype=prompt_embeds.dtype, device=device) + + image_shape = tuple(image.shape[-2:]) + mask_image = F.interpolate( + mask_image, + image_shape, + mode="nearest", + ) + mask_image = prepare_mask(mask_image) + masked_image = image * mask_image + + mask_image = mask_image.repeat_interleave(num_images_per_prompt, dim=0) + masked_image = masked_image.repeat_interleave(num_images_per_prompt, dim=0) + if do_classifier_free_guidance: + mask_image = mask_image.repeat(2, 1, 1, 1) + masked_image = masked_image.repeat(2, 1, 1, 1) + + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps_tensor = self.scheduler.timesteps + + num_channels_latents = self.movq.config.latent_channels + + # get h, w for latents + sample_height, sample_width = get_new_h_w(height, width, self.movq_scale_factor) + + # create initial latent + latents = self.prepare_latents( + (batch_size, num_channels_latents, sample_height, sample_width), + text_encoder_hidden_states.dtype, + device, + generator, + latents, + self.scheduler, + ) + + # Check that sizes of mask, masked image and latents match with expected + num_channels_mask = mask_image.shape[1] + num_channels_masked_image = masked_image.shape[1] + if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels: + raise ValueError( + f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects" + f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +" + f" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}" + f" = {num_channels_latents+num_channels_masked_image+num_channels_mask}. Please verify the config of" + " `pipeline.unet` or your `mask_image` or `image` input." + ) + + for i, t in enumerate(self.progress_bar(timesteps_tensor)): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + latent_model_input = torch.cat([latent_model_input, masked_image, mask_image], dim=1) + + added_cond_kwargs = {"text_embeds": prompt_embeds, "image_embeds": image_embeds} + noise_pred = self.unet( + sample=latent_model_input, + timestep=t, + encoder_hidden_states=text_encoder_hidden_states, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + if do_classifier_free_guidance: + noise_pred, variance_pred = noise_pred.split(latents.shape[1], dim=1) + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + _, variance_pred_text = variance_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + noise_pred = torch.cat([noise_pred, variance_pred_text], dim=1) + + if not ( + hasattr(self.scheduler.config, "variance_type") + and self.scheduler.config.variance_type in ["learned", "learned_range"] + ): + noise_pred, _ = noise_pred.split(latents.shape[1], dim=1) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step( + noise_pred, + t, + latents, + generator=generator, + ).prev_sample + + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + # post-processing + image = self.movq.decode(latents, force_not_quantize=True)["sample"] + + self.maybe_free_model_hooks() + + if output_type not in ["pt", "np", "pil"]: + raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}") + + if output_type in ["np", "pil"]: + image = image * 0.5 + 0.5 + image = image.clamp(0, 1) + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + + if output_type == "pil": + image = self.numpy_to_pil(image) + + if not return_dict: + return (image,) + + return ImagePipelineOutput(images=image) diff --git a/diffusers3/pipelines/kandinsky/pipeline_kandinsky_prior.py b/diffusers3/pipelines/kandinsky/pipeline_kandinsky_prior.py new file mode 100644 index 0000000000000000000000000000000000000000..b5152d71cb6bbdad7532a8845720ea1713ae2c5f --- /dev/null +++ b/diffusers3/pipelines/kandinsky/pipeline_kandinsky_prior.py @@ -0,0 +1,547 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass +from typing import List, Optional, Union + +import numpy as np +import PIL.Image +import torch +from transformers import CLIPImageProcessor, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionModelWithProjection + +from ...models import PriorTransformer +from ...schedulers import UnCLIPScheduler +from ...utils import ( + BaseOutput, + logging, + replace_example_docstring, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline + >>> import torch + + >>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-1-prior") + >>> pipe_prior.to("cuda") + + >>> prompt = "red cat, 4k photo" + >>> out = pipe_prior(prompt) + >>> image_emb = out.image_embeds + >>> negative_image_emb = out.negative_image_embeds + + >>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1") + >>> pipe.to("cuda") + + >>> image = pipe( + ... prompt, + ... image_embeds=image_emb, + ... negative_image_embeds=negative_image_emb, + ... height=768, + ... width=768, + ... num_inference_steps=100, + ... ).images + + >>> image[0].save("cat.png") + ``` +""" + +EXAMPLE_INTERPOLATE_DOC_STRING = """ + Examples: + ```py + >>> from diffusers import KandinskyPriorPipeline, KandinskyPipeline + >>> from diffusers.utils import load_image + >>> import PIL + + >>> import torch + >>> from torchvision import transforms + + >>> pipe_prior = KandinskyPriorPipeline.from_pretrained( + ... "kandinsky-community/kandinsky-2-1-prior", torch_dtype=torch.float16 + ... ) + >>> pipe_prior.to("cuda") + + >>> img1 = load_image( + ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" + ... "/kandinsky/cat.png" + ... ) + + >>> img2 = load_image( + ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" + ... "/kandinsky/starry_night.jpeg" + ... ) + + >>> images_texts = ["a cat", img1, img2] + >>> weights = [0.3, 0.3, 0.4] + >>> image_emb, zero_image_emb = pipe_prior.interpolate(images_texts, weights) + + >>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1", torch_dtype=torch.float16) + >>> pipe.to("cuda") + + >>> image = pipe( + ... "", + ... image_embeds=image_emb, + ... negative_image_embeds=zero_image_emb, + ... height=768, + ... width=768, + ... num_inference_steps=150, + ... ).images[0] + + >>> image.save("starry_cat.png") + ``` +""" + + +@dataclass +class KandinskyPriorPipelineOutput(BaseOutput): + """ + Output class for KandinskyPriorPipeline. + + Args: + image_embeds (`torch.Tensor`) + clip image embeddings for text prompt + negative_image_embeds (`List[PIL.Image.Image]` or `np.ndarray`) + clip image embeddings for unconditional tokens + """ + + image_embeds: Union[torch.Tensor, np.ndarray] + negative_image_embeds: Union[torch.Tensor, np.ndarray] + + +class KandinskyPriorPipeline(DiffusionPipeline): + """ + Pipeline for generating image prior for Kandinsky + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + Args: + prior ([`PriorTransformer`]): + The canonical unCLIP prior to approximate the image embedding from the text embedding. + image_encoder ([`CLIPVisionModelWithProjection`]): + Frozen image-encoder. + text_encoder ([`CLIPTextModelWithProjection`]): + Frozen text-encoder. + tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + scheduler ([`UnCLIPScheduler`]): + A scheduler to be used in combination with `prior` to generate image embedding. + """ + + _exclude_from_cpu_offload = ["prior"] + model_cpu_offload_seq = "text_encoder->prior" + + def __init__( + self, + prior: PriorTransformer, + image_encoder: CLIPVisionModelWithProjection, + text_encoder: CLIPTextModelWithProjection, + tokenizer: CLIPTokenizer, + scheduler: UnCLIPScheduler, + image_processor: CLIPImageProcessor, + ): + super().__init__() + + self.register_modules( + prior=prior, + text_encoder=text_encoder, + tokenizer=tokenizer, + scheduler=scheduler, + image_encoder=image_encoder, + image_processor=image_processor, + ) + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_INTERPOLATE_DOC_STRING) + def interpolate( + self, + images_and_prompts: List[Union[str, PIL.Image.Image, torch.Tensor]], + weights: List[float], + num_images_per_prompt: int = 1, + num_inference_steps: int = 25, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + negative_prior_prompt: Optional[str] = None, + negative_prompt: str = "", + guidance_scale: float = 4.0, + device=None, + ): + """ + Function invoked when using the prior pipeline for interpolation. + + Args: + images_and_prompts (`List[Union[str, PIL.Image.Image, torch.Tensor]]`): + list of prompts and images to guide the image generation. + weights: (`List[float]`): + list of weights for each condition in `images_and_prompts` + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + num_inference_steps (`int`, *optional*, defaults to 25): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + negative_prior_prompt (`str`, *optional*): + The prompt not to guide the prior diffusion process. Ignored when not using guidance (i.e., ignored if + `guidance_scale` is less than `1`). + negative_prompt (`str` or `List[str]`, *optional*): + The prompt not to guide the image generation. Ignored when not using guidance (i.e., ignored if + `guidance_scale` is less than `1`). + guidance_scale (`float`, *optional*, defaults to 4.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + + Examples: + + Returns: + [`KandinskyPriorPipelineOutput`] or `tuple` + """ + + device = device or self.device + + if len(images_and_prompts) != len(weights): + raise ValueError( + f"`images_and_prompts` contains {len(images_and_prompts)} items and `weights` contains {len(weights)} items - they should be lists of same length" + ) + + image_embeddings = [] + for cond, weight in zip(images_and_prompts, weights): + if isinstance(cond, str): + image_emb = self( + cond, + num_inference_steps=num_inference_steps, + num_images_per_prompt=num_images_per_prompt, + generator=generator, + latents=latents, + negative_prompt=negative_prior_prompt, + guidance_scale=guidance_scale, + ).image_embeds + + elif isinstance(cond, (PIL.Image.Image, torch.Tensor)): + if isinstance(cond, PIL.Image.Image): + cond = ( + self.image_processor(cond, return_tensors="pt") + .pixel_values[0] + .unsqueeze(0) + .to(dtype=self.image_encoder.dtype, device=device) + ) + + image_emb = self.image_encoder(cond)["image_embeds"] + + else: + raise ValueError( + f"`images_and_prompts` can only contains elements to be of type `str`, `PIL.Image.Image` or `torch.Tensor` but is {type(cond)}" + ) + + image_embeddings.append(image_emb * weight) + + image_emb = torch.cat(image_embeddings).sum(dim=0, keepdim=True) + + out_zero = self( + negative_prompt, + num_inference_steps=num_inference_steps, + num_images_per_prompt=num_images_per_prompt, + generator=generator, + latents=latents, + negative_prompt=negative_prior_prompt, + guidance_scale=guidance_scale, + ) + zero_image_emb = out_zero.negative_image_embeds if negative_prompt == "" else out_zero.image_embeds + + return KandinskyPriorPipelineOutput(image_embeds=image_emb, negative_image_embeds=zero_image_emb) + + # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents + def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + if latents.shape != shape: + raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") + latents = latents.to(device) + + latents = latents * scheduler.init_noise_sigma + return latents + + def get_zero_embed(self, batch_size=1, device=None): + device = device or self.device + zero_img = torch.zeros(1, 3, self.image_encoder.config.image_size, self.image_encoder.config.image_size).to( + device=device, dtype=self.image_encoder.dtype + ) + zero_image_emb = self.image_encoder(zero_img)["image_embeds"] + zero_image_emb = zero_image_emb.repeat(batch_size, 1) + return zero_image_emb + + def _encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + ): + batch_size = len(prompt) if isinstance(prompt, list) else 1 + # get prompt text embeddings + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + text_mask = text_inputs.attention_mask.bool().to(device) + + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): + removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length] + + text_encoder_output = self.text_encoder(text_input_ids.to(device)) + + prompt_embeds = text_encoder_output.text_embeds + text_encoder_hidden_states = text_encoder_output.last_hidden_state + + prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0) + text_encoder_hidden_states = text_encoder_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + text_mask = text_mask.repeat_interleave(num_images_per_prompt, dim=0) + + if do_classifier_free_guidance: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + uncond_text_mask = uncond_input.attention_mask.bool().to(device) + negative_prompt_embeds_text_encoder_output = self.text_encoder(uncond_input.input_ids.to(device)) + + negative_prompt_embeds = negative_prompt_embeds_text_encoder_output.text_embeds + uncond_text_encoder_hidden_states = negative_prompt_embeds_text_encoder_output.last_hidden_state + + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + + seq_len = negative_prompt_embeds.shape[1] + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len) + + seq_len = uncond_text_encoder_hidden_states.shape[1] + uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.repeat(1, num_images_per_prompt, 1) + uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.view( + batch_size * num_images_per_prompt, seq_len, -1 + ) + uncond_text_mask = uncond_text_mask.repeat_interleave(num_images_per_prompt, dim=0) + + # done duplicates + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + text_encoder_hidden_states = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states]) + + text_mask = torch.cat([uncond_text_mask, text_mask]) + + return prompt_embeds, text_encoder_hidden_states, text_mask + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]], + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: int = 1, + num_inference_steps: int = 25, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + guidance_scale: float = 4.0, + output_type: Optional[str] = "pt", + return_dict: bool = True, + ): + """ + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`): + The prompt or prompts to guide the image generation. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored + if `guidance_scale` is less than `1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + num_inference_steps (`int`, *optional*, defaults to 25): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + guidance_scale (`float`, *optional*, defaults to 4.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + output_type (`str`, *optional*, defaults to `"pt"`): + The output format of the generate image. Choose between: `"np"` (`np.array`) or `"pt"` + (`torch.Tensor`). + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. + + Examples: + + Returns: + [`KandinskyPriorPipelineOutput`] or `tuple` + """ + + if isinstance(prompt, str): + prompt = [prompt] + elif not isinstance(prompt, list): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if isinstance(negative_prompt, str): + negative_prompt = [negative_prompt] + elif not isinstance(negative_prompt, list) and negative_prompt is not None: + raise ValueError(f"`negative_prompt` has to be of type `str` or `list` but is {type(negative_prompt)}") + + # if the negative prompt is defined we double the batch size to + # directly retrieve the negative prompt embedding + if negative_prompt is not None: + prompt = prompt + negative_prompt + negative_prompt = 2 * negative_prompt + + device = self._execution_device + + batch_size = len(prompt) + batch_size = batch_size * num_images_per_prompt + + do_classifier_free_guidance = guidance_scale > 1.0 + prompt_embeds, text_encoder_hidden_states, text_mask = self._encode_prompt( + prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt + ) + + # prior + self.scheduler.set_timesteps(num_inference_steps, device=device) + prior_timesteps_tensor = self.scheduler.timesteps + + embedding_dim = self.prior.config.embedding_dim + + latents = self.prepare_latents( + (batch_size, embedding_dim), + prompt_embeds.dtype, + device, + generator, + latents, + self.scheduler, + ) + + for i, t in enumerate(self.progress_bar(prior_timesteps_tensor)): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + + predicted_image_embedding = self.prior( + latent_model_input, + timestep=t, + proj_embedding=prompt_embeds, + encoder_hidden_states=text_encoder_hidden_states, + attention_mask=text_mask, + ).predicted_image_embedding + + if do_classifier_free_guidance: + predicted_image_embedding_uncond, predicted_image_embedding_text = predicted_image_embedding.chunk(2) + predicted_image_embedding = predicted_image_embedding_uncond + guidance_scale * ( + predicted_image_embedding_text - predicted_image_embedding_uncond + ) + + if i + 1 == prior_timesteps_tensor.shape[0]: + prev_timestep = None + else: + prev_timestep = prior_timesteps_tensor[i + 1] + + latents = self.scheduler.step( + predicted_image_embedding, + timestep=t, + sample=latents, + generator=generator, + prev_timestep=prev_timestep, + ).prev_sample + + latents = self.prior.post_process_latents(latents) + + image_embeddings = latents + + # if negative prompt has been defined, we retrieve split the image embedding into two + if negative_prompt is None: + zero_embeds = self.get_zero_embed(latents.shape[0], device=latents.device) + + self.maybe_free_model_hooks() + else: + image_embeddings, zero_embeds = image_embeddings.chunk(2) + + if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: + self.prior_hook.offload() + + if output_type not in ["pt", "np"]: + raise ValueError(f"Only the output types `pt` and `np` are supported not output_type={output_type}") + + if output_type == "np": + image_embeddings = image_embeddings.cpu().numpy() + zero_embeds = zero_embeds.cpu().numpy() + + if not return_dict: + return (image_embeddings, zero_embeds) + + return KandinskyPriorPipelineOutput(image_embeds=image_embeddings, negative_image_embeds=zero_embeds) diff --git a/diffusers3/pipelines/kandinsky/text_encoder.py b/diffusers3/pipelines/kandinsky/text_encoder.py new file mode 100644 index 0000000000000000000000000000000000000000..caa0029f00ca22818819d5b76b57ec489c6da1d6 --- /dev/null +++ b/diffusers3/pipelines/kandinsky/text_encoder.py @@ -0,0 +1,27 @@ +import torch +from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel + + +class MCLIPConfig(XLMRobertaConfig): + model_type = "M-CLIP" + + def __init__(self, transformerDimSize=1024, imageDimSize=768, **kwargs): + self.transformerDimensions = transformerDimSize + self.numDims = imageDimSize + super().__init__(**kwargs) + + +class MultilingualCLIP(PreTrainedModel): + config_class = MCLIPConfig + + def __init__(self, config, *args, **kwargs): + super().__init__(config, *args, **kwargs) + self.transformer = XLMRobertaModel(config) + self.LinearTransformation = torch.nn.Linear( + in_features=config.transformerDimensions, out_features=config.numDims + ) + + def forward(self, input_ids, attention_mask): + embs = self.transformer(input_ids=input_ids, attention_mask=attention_mask)[0] + embs2 = (embs * attention_mask.unsqueeze(2)).sum(dim=1) / attention_mask.sum(dim=1)[:, None] + return self.LinearTransformation(embs2), embs diff --git a/diffusers3/pipelines/kandinsky2_2/__init__.py b/diffusers3/pipelines/kandinsky2_2/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..67e97f161173ac8981dadf757fd8d6438307c973 --- /dev/null +++ b/diffusers3/pipelines/kandinsky2_2/__init__.py @@ -0,0 +1,70 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["pipeline_kandinsky2_2"] = ["KandinskyV22Pipeline"] + _import_structure["pipeline_kandinsky2_2_combined"] = [ + "KandinskyV22CombinedPipeline", + "KandinskyV22Img2ImgCombinedPipeline", + "KandinskyV22InpaintCombinedPipeline", + ] + _import_structure["pipeline_kandinsky2_2_controlnet"] = ["KandinskyV22ControlnetPipeline"] + _import_structure["pipeline_kandinsky2_2_controlnet_img2img"] = ["KandinskyV22ControlnetImg2ImgPipeline"] + _import_structure["pipeline_kandinsky2_2_img2img"] = ["KandinskyV22Img2ImgPipeline"] + _import_structure["pipeline_kandinsky2_2_inpainting"] = ["KandinskyV22InpaintPipeline"] + _import_structure["pipeline_kandinsky2_2_prior"] = ["KandinskyV22PriorPipeline"] + _import_structure["pipeline_kandinsky2_2_prior_emb2emb"] = ["KandinskyV22PriorEmb2EmbPipeline"] + + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * + else: + from .pipeline_kandinsky2_2 import KandinskyV22Pipeline + from .pipeline_kandinsky2_2_combined import ( + KandinskyV22CombinedPipeline, + KandinskyV22Img2ImgCombinedPipeline, + KandinskyV22InpaintCombinedPipeline, + ) + from .pipeline_kandinsky2_2_controlnet import KandinskyV22ControlnetPipeline + from .pipeline_kandinsky2_2_controlnet_img2img import KandinskyV22ControlnetImg2ImgPipeline + from .pipeline_kandinsky2_2_img2img import KandinskyV22Img2ImgPipeline + from .pipeline_kandinsky2_2_inpainting import KandinskyV22InpaintPipeline + from .pipeline_kandinsky2_2_prior import KandinskyV22PriorPipeline + from .pipeline_kandinsky2_2_prior_emb2emb import KandinskyV22PriorEmb2EmbPipeline + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/diffusers3/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py b/diffusers3/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py new file mode 100644 index 0000000000000000000000000000000000000000..471db61556f5491d985e4fbccf33457d0c5e6bdf --- /dev/null +++ b/diffusers3/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py @@ -0,0 +1,320 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Callable, Dict, List, Optional, Union + +import torch + +from ...models import UNet2DConditionModel, VQModel +from ...schedulers import DDPMScheduler +from ...utils import deprecate, logging, replace_example_docstring +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline + >>> import torch + + >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior") + >>> pipe_prior.to("cuda") + >>> prompt = "red cat, 4k photo" + >>> out = pipe_prior(prompt) + >>> image_emb = out.image_embeds + >>> zero_image_emb = out.negative_image_embeds + >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder") + >>> pipe.to("cuda") + >>> image = pipe( + ... image_embeds=image_emb, + ... negative_image_embeds=zero_image_emb, + ... height=768, + ... width=768, + ... num_inference_steps=50, + ... ).images + >>> image[0].save("cat.png") + ``` +""" + + +def downscale_height_and_width(height, width, scale_factor=8): + new_height = height // scale_factor**2 + if height % scale_factor**2 != 0: + new_height += 1 + new_width = width // scale_factor**2 + if width % scale_factor**2 != 0: + new_width += 1 + return new_height * scale_factor, new_width * scale_factor + + +class KandinskyV22Pipeline(DiffusionPipeline): + """ + Pipeline for text-to-image generation using Kandinsky + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + Args: + scheduler (Union[`DDIMScheduler`,`DDPMScheduler`]): + A scheduler to be used in combination with `unet` to generate image latents. + unet ([`UNet2DConditionModel`]): + Conditional U-Net architecture to denoise the image embedding. + movq ([`VQModel`]): + MoVQ Decoder to generate the image from the latents. + """ + + model_cpu_offload_seq = "unet->movq" + _callback_tensor_inputs = ["latents", "image_embeds", "negative_image_embeds"] + + def __init__( + self, + unet: UNet2DConditionModel, + scheduler: DDPMScheduler, + movq: VQModel, + ): + super().__init__() + + self.register_modules( + unet=unet, + scheduler=scheduler, + movq=movq, + ) + self.movq_scale_factor = 2 ** (len(self.movq.config.block_out_channels) - 1) + + # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents + def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + if latents.shape != shape: + raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") + latents = latents.to(device) + + latents = latents * scheduler.init_noise_sigma + return latents + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 + + @property + def num_timesteps(self): + return self._num_timesteps + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + image_embeds: Union[torch.Tensor, List[torch.Tensor]], + negative_image_embeds: Union[torch.Tensor, List[torch.Tensor]], + height: int = 512, + width: int = 512, + num_inference_steps: int = 100, + guidance_scale: float = 4.0, + num_images_per_prompt: int = 1, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + **kwargs, + ): + """ + Function invoked when calling the pipeline for generation. + + Args: + image_embeds (`torch.Tensor` or `List[torch.Tensor]`): + The clip image embeddings for text prompt, that will be used to condition the image generation. + negative_image_embeds (`torch.Tensor` or `List[torch.Tensor]`): + The clip image embeddings for negative text prompt, will be used to condition the image generation. + height (`int`, *optional*, defaults to 512): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to 512): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 4.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` + (`np.array`) or `"pt"` (`torch.Tensor`). + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + + Examples: + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple` + """ + + callback = kwargs.pop("callback", None) + callback_steps = kwargs.pop("callback_steps", None) + + if callback is not None: + deprecate( + "callback", + "1.0.0", + "Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", + ) + if callback_steps is not None: + deprecate( + "callback_steps", + "1.0.0", + "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + device = self._execution_device + + self._guidance_scale = guidance_scale + + if isinstance(image_embeds, list): + image_embeds = torch.cat(image_embeds, dim=0) + batch_size = image_embeds.shape[0] * num_images_per_prompt + if isinstance(negative_image_embeds, list): + negative_image_embeds = torch.cat(negative_image_embeds, dim=0) + + if self.do_classifier_free_guidance: + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + negative_image_embeds = negative_image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + + image_embeds = torch.cat([negative_image_embeds, image_embeds], dim=0).to( + dtype=self.unet.dtype, device=device + ) + + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + num_channels_latents = self.unet.config.in_channels + + height, width = downscale_height_and_width(height, width, self.movq_scale_factor) + + # create initial latent + latents = self.prepare_latents( + (batch_size, num_channels_latents, height, width), + image_embeds.dtype, + device, + generator, + latents, + self.scheduler, + ) + + self._num_timesteps = len(timesteps) + for i, t in enumerate(self.progress_bar(timesteps)): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents + + added_cond_kwargs = {"image_embeds": image_embeds} + noise_pred = self.unet( + sample=latent_model_input, + timestep=t, + encoder_hidden_states=None, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + if self.do_classifier_free_guidance: + noise_pred, variance_pred = noise_pred.split(latents.shape[1], dim=1) + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + _, variance_pred_text = variance_pred.chunk(2) + noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) + noise_pred = torch.cat([noise_pred, variance_pred_text], dim=1) + + if not ( + hasattr(self.scheduler.config, "variance_type") + and self.scheduler.config.variance_type in ["learned", "learned_range"] + ): + noise_pred, _ = noise_pred.split(latents.shape[1], dim=1) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step( + noise_pred, + t, + latents, + generator=generator, + )[0] + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + image_embeds = callback_outputs.pop("image_embeds", image_embeds) + negative_image_embeds = callback_outputs.pop("negative_image_embeds", negative_image_embeds) + + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + if output_type not in ["pt", "np", "pil", "latent"]: + raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}") + + if not output_type == "latent": + # post-processing + image = self.movq.decode(latents, force_not_quantize=True)["sample"] + if output_type in ["np", "pil"]: + image = image * 0.5 + 0.5 + image = image.clamp(0, 1) + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + + if output_type == "pil": + image = self.numpy_to_pil(image) + else: + image = latents + + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return ImagePipelineOutput(images=image) diff --git a/diffusers3/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py b/diffusers3/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py new file mode 100644 index 0000000000000000000000000000000000000000..68334fef381195b0329f68838b47489f72f679e9 --- /dev/null +++ b/diffusers3/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py @@ -0,0 +1,854 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Callable, Dict, List, Optional, Union + +import PIL.Image +import torch +from transformers import CLIPImageProcessor, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionModelWithProjection + +from ...models import PriorTransformer, UNet2DConditionModel, VQModel +from ...schedulers import DDPMScheduler, UnCLIPScheduler +from ...utils import deprecate, logging, replace_example_docstring +from ..pipeline_utils import DiffusionPipeline +from .pipeline_kandinsky2_2 import KandinskyV22Pipeline +from .pipeline_kandinsky2_2_img2img import KandinskyV22Img2ImgPipeline +from .pipeline_kandinsky2_2_inpainting import KandinskyV22InpaintPipeline +from .pipeline_kandinsky2_2_prior import KandinskyV22PriorPipeline + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +TEXT2IMAGE_EXAMPLE_DOC_STRING = """ + Examples: + ```py + from diffusers import AutoPipelineForText2Image + import torch + + pipe = AutoPipelineForText2Image.from_pretrained( + "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16 + ) + pipe.enable_model_cpu_offload() + + prompt = "A lion in galaxies, spirals, nebulae, stars, smoke, iridescent, intricate detail, octane render, 8k" + + image = pipe(prompt=prompt, num_inference_steps=25).images[0] + ``` +""" + +IMAGE2IMAGE_EXAMPLE_DOC_STRING = """ + Examples: + ```py + from diffusers import AutoPipelineForImage2Image + import torch + import requests + from io import BytesIO + from PIL import Image + import os + + pipe = AutoPipelineForImage2Image.from_pretrained( + "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16 + ) + pipe.enable_model_cpu_offload() + + prompt = "A fantasy landscape, Cinematic lighting" + negative_prompt = "low quality, bad quality" + + url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg" + + response = requests.get(url) + image = Image.open(BytesIO(response.content)).convert("RGB") + image.thumbnail((768, 768)) + + image = pipe(prompt=prompt, image=original_image, num_inference_steps=25).images[0] + ``` +""" + +INPAINT_EXAMPLE_DOC_STRING = """ + Examples: + ```py + from diffusers import AutoPipelineForInpainting + from diffusers.utils import load_image + import torch + import numpy as np + + pipe = AutoPipelineForInpainting.from_pretrained( + "kandinsky-community/kandinsky-2-2-decoder-inpaint", torch_dtype=torch.float16 + ) + pipe.enable_model_cpu_offload() + + prompt = "A fantasy landscape, Cinematic lighting" + negative_prompt = "low quality, bad quality" + + original_image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" + ) + + mask = np.zeros((768, 768), dtype=np.float32) + # Let's mask out an area above the cat's head + mask[:250, 250:-250] = 1 + + image = pipe(prompt=prompt, image=original_image, mask_image=mask, num_inference_steps=25).images[0] + ``` +""" + + +class KandinskyV22CombinedPipeline(DiffusionPipeline): + """ + Combined Pipeline for text-to-image generation using Kandinsky + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + Args: + scheduler (Union[`DDIMScheduler`,`DDPMScheduler`]): + A scheduler to be used in combination with `unet` to generate image latents. + unet ([`UNet2DConditionModel`]): + Conditional U-Net architecture to denoise the image embedding. + movq ([`VQModel`]): + MoVQ Decoder to generate the image from the latents. + prior_prior ([`PriorTransformer`]): + The canonical unCLIP prior to approximate the image embedding from the text embedding. + prior_image_encoder ([`CLIPVisionModelWithProjection`]): + Frozen image-encoder. + prior_text_encoder ([`CLIPTextModelWithProjection`]): + Frozen text-encoder. + prior_tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + prior_scheduler ([`UnCLIPScheduler`]): + A scheduler to be used in combination with `prior` to generate image embedding. + prior_image_processor ([`CLIPImageProcessor`]): + A image_processor to be used to preprocess image from clip. + """ + + model_cpu_offload_seq = "prior_text_encoder->prior_image_encoder->unet->movq" + _load_connected_pipes = True + _exclude_from_cpu_offload = ["prior_prior"] + + def __init__( + self, + unet: UNet2DConditionModel, + scheduler: DDPMScheduler, + movq: VQModel, + prior_prior: PriorTransformer, + prior_image_encoder: CLIPVisionModelWithProjection, + prior_text_encoder: CLIPTextModelWithProjection, + prior_tokenizer: CLIPTokenizer, + prior_scheduler: UnCLIPScheduler, + prior_image_processor: CLIPImageProcessor, + ): + super().__init__() + + self.register_modules( + unet=unet, + scheduler=scheduler, + movq=movq, + prior_prior=prior_prior, + prior_image_encoder=prior_image_encoder, + prior_text_encoder=prior_text_encoder, + prior_tokenizer=prior_tokenizer, + prior_scheduler=prior_scheduler, + prior_image_processor=prior_image_processor, + ) + self.prior_pipe = KandinskyV22PriorPipeline( + prior=prior_prior, + image_encoder=prior_image_encoder, + text_encoder=prior_text_encoder, + tokenizer=prior_tokenizer, + scheduler=prior_scheduler, + image_processor=prior_image_processor, + ) + self.decoder_pipe = KandinskyV22Pipeline( + unet=unet, + scheduler=scheduler, + movq=movq, + ) + + def enable_xformers_memory_efficient_attention(self, attention_op: Optional[Callable] = None): + self.decoder_pipe.enable_xformers_memory_efficient_attention(attention_op) + + def enable_sequential_cpu_offload(self, gpu_id: Optional[int] = None, device: Union[torch.device, str] = "cuda"): + r""" + Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet, + text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a + `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called. + Note that offloading happens on a submodule basis. Memory savings are higher than with + `enable_model_cpu_offload`, but performance is lower. + """ + self.prior_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id, device=device) + self.decoder_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id, device=device) + + def progress_bar(self, iterable=None, total=None): + self.prior_pipe.progress_bar(iterable=iterable, total=total) + self.decoder_pipe.progress_bar(iterable=iterable, total=total) + self.decoder_pipe.enable_model_cpu_offload() + + def set_progress_bar_config(self, **kwargs): + self.prior_pipe.set_progress_bar_config(**kwargs) + self.decoder_pipe.set_progress_bar_config(**kwargs) + + @torch.no_grad() + @replace_example_docstring(TEXT2IMAGE_EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]], + negative_prompt: Optional[Union[str, List[str]]] = None, + num_inference_steps: int = 100, + guidance_scale: float = 4.0, + num_images_per_prompt: int = 1, + height: int = 512, + width: int = 512, + prior_guidance_scale: float = 4.0, + prior_num_inference_steps: int = 25, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + output_type: Optional[str] = "pil", + callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, + callback_steps: int = 1, + return_dict: bool = True, + prior_callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + prior_callback_on_step_end_tensor_inputs: List[str] = ["latents"], + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + ): + """ + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`): + The prompt or prompts to guide the image generation. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored + if `guidance_scale` is less than `1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + height (`int`, *optional*, defaults to 512): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to 512): + The width in pixels of the generated image. + prior_guidance_scale (`float`, *optional*, defaults to 4.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + prior_num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 4.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` + (`np.array`) or `"pt"` (`torch.Tensor`). + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. + prior_callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference of the prior pipeline. + The function is called with the following arguments: `prior_callback_on_step_end(self: + DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. + prior_callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `prior_callback_on_step_end` function. The tensors specified in the + list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in + the `._callback_tensor_inputs` attribute of your prior pipeline class. + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference of the decoder pipeline. + The function is called with the following arguments: `callback_on_step_end(self: DiffusionPipeline, + step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors + as specified by `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + + Examples: + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple` + """ + prior_outputs = self.prior_pipe( + prompt=prompt, + negative_prompt=negative_prompt, + num_images_per_prompt=num_images_per_prompt, + num_inference_steps=prior_num_inference_steps, + generator=generator, + latents=latents, + guidance_scale=prior_guidance_scale, + output_type="pt", + return_dict=False, + callback_on_step_end=prior_callback_on_step_end, + callback_on_step_end_tensor_inputs=prior_callback_on_step_end_tensor_inputs, + ) + image_embeds = prior_outputs[0] + negative_image_embeds = prior_outputs[1] + + prompt = [prompt] if not isinstance(prompt, (list, tuple)) else prompt + + if len(prompt) < image_embeds.shape[0] and image_embeds.shape[0] % len(prompt) == 0: + prompt = (image_embeds.shape[0] // len(prompt)) * prompt + + outputs = self.decoder_pipe( + image_embeds=image_embeds, + negative_image_embeds=negative_image_embeds, + width=width, + height=height, + num_inference_steps=num_inference_steps, + generator=generator, + guidance_scale=guidance_scale, + output_type=output_type, + callback=callback, + callback_steps=callback_steps, + return_dict=return_dict, + callback_on_step_end=callback_on_step_end, + callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, + ) + self.maybe_free_model_hooks() + + return outputs + + +class KandinskyV22Img2ImgCombinedPipeline(DiffusionPipeline): + """ + Combined Pipeline for image-to-image generation using Kandinsky + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + Args: + scheduler (Union[`DDIMScheduler`,`DDPMScheduler`]): + A scheduler to be used in combination with `unet` to generate image latents. + unet ([`UNet2DConditionModel`]): + Conditional U-Net architecture to denoise the image embedding. + movq ([`VQModel`]): + MoVQ Decoder to generate the image from the latents. + prior_prior ([`PriorTransformer`]): + The canonical unCLIP prior to approximate the image embedding from the text embedding. + prior_image_encoder ([`CLIPVisionModelWithProjection`]): + Frozen image-encoder. + prior_text_encoder ([`CLIPTextModelWithProjection`]): + Frozen text-encoder. + prior_tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + prior_scheduler ([`UnCLIPScheduler`]): + A scheduler to be used in combination with `prior` to generate image embedding. + prior_image_processor ([`CLIPImageProcessor`]): + A image_processor to be used to preprocess image from clip. + """ + + model_cpu_offload_seq = "prior_text_encoder->prior_image_encoder->unet->movq" + _load_connected_pipes = True + _exclude_from_cpu_offload = ["prior_prior"] + + def __init__( + self, + unet: UNet2DConditionModel, + scheduler: DDPMScheduler, + movq: VQModel, + prior_prior: PriorTransformer, + prior_image_encoder: CLIPVisionModelWithProjection, + prior_text_encoder: CLIPTextModelWithProjection, + prior_tokenizer: CLIPTokenizer, + prior_scheduler: UnCLIPScheduler, + prior_image_processor: CLIPImageProcessor, + ): + super().__init__() + + self.register_modules( + unet=unet, + scheduler=scheduler, + movq=movq, + prior_prior=prior_prior, + prior_image_encoder=prior_image_encoder, + prior_text_encoder=prior_text_encoder, + prior_tokenizer=prior_tokenizer, + prior_scheduler=prior_scheduler, + prior_image_processor=prior_image_processor, + ) + self.prior_pipe = KandinskyV22PriorPipeline( + prior=prior_prior, + image_encoder=prior_image_encoder, + text_encoder=prior_text_encoder, + tokenizer=prior_tokenizer, + scheduler=prior_scheduler, + image_processor=prior_image_processor, + ) + self.decoder_pipe = KandinskyV22Img2ImgPipeline( + unet=unet, + scheduler=scheduler, + movq=movq, + ) + + def enable_xformers_memory_efficient_attention(self, attention_op: Optional[Callable] = None): + self.decoder_pipe.enable_xformers_memory_efficient_attention(attention_op) + + def enable_model_cpu_offload(self, gpu_id: Optional[int] = None, device: Union[torch.device, str] = "cuda"): + r""" + Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared + to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward` + method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with + `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`. + """ + self.prior_pipe.enable_model_cpu_offload(gpu_id=gpu_id, device=device) + self.decoder_pipe.enable_model_cpu_offload(gpu_id=gpu_id, device=device) + + def enable_sequential_cpu_offload(self, gpu_id: Optional[int] = None, device: Union[torch.device, str] = "cuda"): + r""" + Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet, + text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a + `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called. + Note that offloading happens on a submodule basis. Memory savings are higher than with + `enable_model_cpu_offload`, but performance is lower. + """ + self.prior_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id, device=device) + self.decoder_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id, device=device) + + def progress_bar(self, iterable=None, total=None): + self.prior_pipe.progress_bar(iterable=iterable, total=total) + self.decoder_pipe.progress_bar(iterable=iterable, total=total) + self.decoder_pipe.enable_model_cpu_offload() + + def set_progress_bar_config(self, **kwargs): + self.prior_pipe.set_progress_bar_config(**kwargs) + self.decoder_pipe.set_progress_bar_config(**kwargs) + + @torch.no_grad() + @replace_example_docstring(IMAGE2IMAGE_EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]], + image: Union[torch.Tensor, PIL.Image.Image, List[torch.Tensor], List[PIL.Image.Image]], + negative_prompt: Optional[Union[str, List[str]]] = None, + num_inference_steps: int = 100, + guidance_scale: float = 4.0, + strength: float = 0.3, + num_images_per_prompt: int = 1, + height: int = 512, + width: int = 512, + prior_guidance_scale: float = 4.0, + prior_num_inference_steps: int = 25, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + output_type: Optional[str] = "pil", + callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, + callback_steps: int = 1, + return_dict: bool = True, + prior_callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + prior_callback_on_step_end_tensor_inputs: List[str] = ["latents"], + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + ): + """ + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`): + The prompt or prompts to guide the image generation. + image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): + `Image`, or tensor representing an image batch, that will be used as the starting point for the + process. Can also accept image latents as `image`, if passing latents directly, it will not be encoded + again. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored + if `guidance_scale` is less than `1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + guidance_scale (`float`, *optional*, defaults to 4.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + strength (`float`, *optional*, defaults to 0.3): + Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image` + will be used as a starting point, adding more noise to it the larger the `strength`. The number of + denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will + be maximum and the denoising process will run for the full number of iterations specified in + `num_inference_steps`. A value of 1, therefore, essentially ignores `image`. + num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + height (`int`, *optional*, defaults to 512): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to 512): + The width in pixels of the generated image. + prior_guidance_scale (`float`, *optional*, defaults to 4.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + prior_num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` + (`np.array`) or `"pt"` (`torch.Tensor`). + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. + + Examples: + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple` + """ + prior_outputs = self.prior_pipe( + prompt=prompt, + negative_prompt=negative_prompt, + num_images_per_prompt=num_images_per_prompt, + num_inference_steps=prior_num_inference_steps, + generator=generator, + latents=latents, + guidance_scale=prior_guidance_scale, + output_type="pt", + return_dict=False, + callback_on_step_end=prior_callback_on_step_end, + callback_on_step_end_tensor_inputs=prior_callback_on_step_end_tensor_inputs, + ) + image_embeds = prior_outputs[0] + negative_image_embeds = prior_outputs[1] + + prompt = [prompt] if not isinstance(prompt, (list, tuple)) else prompt + image = [image] if isinstance(image, PIL.Image.Image) else image + + if len(prompt) < image_embeds.shape[0] and image_embeds.shape[0] % len(prompt) == 0: + prompt = (image_embeds.shape[0] // len(prompt)) * prompt + + if ( + isinstance(image, (list, tuple)) + and len(image) < image_embeds.shape[0] + and image_embeds.shape[0] % len(image) == 0 + ): + image = (image_embeds.shape[0] // len(image)) * image + + outputs = self.decoder_pipe( + image=image, + image_embeds=image_embeds, + negative_image_embeds=negative_image_embeds, + width=width, + height=height, + strength=strength, + num_inference_steps=num_inference_steps, + generator=generator, + guidance_scale=guidance_scale, + output_type=output_type, + callback=callback, + callback_steps=callback_steps, + return_dict=return_dict, + callback_on_step_end=callback_on_step_end, + callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, + ) + + self.maybe_free_model_hooks() + return outputs + + +class KandinskyV22InpaintCombinedPipeline(DiffusionPipeline): + """ + Combined Pipeline for inpainting generation using Kandinsky + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + Args: + scheduler (Union[`DDIMScheduler`,`DDPMScheduler`]): + A scheduler to be used in combination with `unet` to generate image latents. + unet ([`UNet2DConditionModel`]): + Conditional U-Net architecture to denoise the image embedding. + movq ([`VQModel`]): + MoVQ Decoder to generate the image from the latents. + prior_prior ([`PriorTransformer`]): + The canonical unCLIP prior to approximate the image embedding from the text embedding. + prior_image_encoder ([`CLIPVisionModelWithProjection`]): + Frozen image-encoder. + prior_text_encoder ([`CLIPTextModelWithProjection`]): + Frozen text-encoder. + prior_tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + prior_scheduler ([`UnCLIPScheduler`]): + A scheduler to be used in combination with `prior` to generate image embedding. + prior_image_processor ([`CLIPImageProcessor`]): + A image_processor to be used to preprocess image from clip. + """ + + model_cpu_offload_seq = "prior_text_encoder->prior_image_encoder->unet->movq" + _load_connected_pipes = True + _exclude_from_cpu_offload = ["prior_prior"] + + def __init__( + self, + unet: UNet2DConditionModel, + scheduler: DDPMScheduler, + movq: VQModel, + prior_prior: PriorTransformer, + prior_image_encoder: CLIPVisionModelWithProjection, + prior_text_encoder: CLIPTextModelWithProjection, + prior_tokenizer: CLIPTokenizer, + prior_scheduler: UnCLIPScheduler, + prior_image_processor: CLIPImageProcessor, + ): + super().__init__() + + self.register_modules( + unet=unet, + scheduler=scheduler, + movq=movq, + prior_prior=prior_prior, + prior_image_encoder=prior_image_encoder, + prior_text_encoder=prior_text_encoder, + prior_tokenizer=prior_tokenizer, + prior_scheduler=prior_scheduler, + prior_image_processor=prior_image_processor, + ) + self.prior_pipe = KandinskyV22PriorPipeline( + prior=prior_prior, + image_encoder=prior_image_encoder, + text_encoder=prior_text_encoder, + tokenizer=prior_tokenizer, + scheduler=prior_scheduler, + image_processor=prior_image_processor, + ) + self.decoder_pipe = KandinskyV22InpaintPipeline( + unet=unet, + scheduler=scheduler, + movq=movq, + ) + + def enable_xformers_memory_efficient_attention(self, attention_op: Optional[Callable] = None): + self.decoder_pipe.enable_xformers_memory_efficient_attention(attention_op) + + def enable_sequential_cpu_offload(self, gpu_id: Optional[int] = None, device: Union[torch.device, str] = "cuda"): + r""" + Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet, + text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a + `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called. + Note that offloading happens on a submodule basis. Memory savings are higher than with + `enable_model_cpu_offload`, but performance is lower. + """ + self.prior_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id, device=device) + self.decoder_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id, device=device) + + def progress_bar(self, iterable=None, total=None): + self.prior_pipe.progress_bar(iterable=iterable, total=total) + self.decoder_pipe.progress_bar(iterable=iterable, total=total) + self.decoder_pipe.enable_model_cpu_offload() + + def set_progress_bar_config(self, **kwargs): + self.prior_pipe.set_progress_bar_config(**kwargs) + self.decoder_pipe.set_progress_bar_config(**kwargs) + + @torch.no_grad() + @replace_example_docstring(INPAINT_EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]], + image: Union[torch.Tensor, PIL.Image.Image, List[torch.Tensor], List[PIL.Image.Image]], + mask_image: Union[torch.Tensor, PIL.Image.Image, List[torch.Tensor], List[PIL.Image.Image]], + negative_prompt: Optional[Union[str, List[str]]] = None, + num_inference_steps: int = 100, + guidance_scale: float = 4.0, + num_images_per_prompt: int = 1, + height: int = 512, + width: int = 512, + prior_guidance_scale: float = 4.0, + prior_num_inference_steps: int = 25, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + prior_callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + prior_callback_on_step_end_tensor_inputs: List[str] = ["latents"], + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + **kwargs, + ): + """ + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`): + The prompt or prompts to guide the image generation. + image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): + `Image`, or tensor representing an image batch, that will be used as the starting point for the + process. Can also accept image latents as `image`, if passing latents directly, it will not be encoded + again. + mask_image (`np.array`): + Tensor representing an image batch, to mask `image`. White pixels in the mask will be repainted, while + black pixels will be preserved. If `mask_image` is a PIL image, it will be converted to a single + channel (luminance) before use. If it's a tensor, it should contain one color channel (L) instead of 3, + so the expected shape would be `(B, H, W, 1)`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored + if `guidance_scale` is less than `1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + guidance_scale (`float`, *optional*, defaults to 4.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + height (`int`, *optional*, defaults to 512): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to 512): + The width in pixels of the generated image. + prior_guidance_scale (`float`, *optional*, defaults to 4.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + prior_num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` + (`np.array`) or `"pt"` (`torch.Tensor`). + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. + prior_callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `prior_callback_on_step_end(self: DiffusionPipeline, step: int, timestep: + int, callback_kwargs: Dict)`. + prior_callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `prior_callback_on_step_end` function. The tensors specified in the + list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in + the `._callback_tensor_inputs` attribute of your pipeline class. + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + + + Examples: + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple` + """ + prior_kwargs = {} + if kwargs.get("prior_callback", None) is not None: + prior_kwargs["callback"] = kwargs.pop("prior_callback") + deprecate( + "prior_callback", + "1.0.0", + "Passing `prior_callback` as an input argument to `__call__` is deprecated, consider use `prior_callback_on_step_end`", + ) + if kwargs.get("prior_callback_steps", None) is not None: + deprecate( + "prior_callback_steps", + "1.0.0", + "Passing `prior_callback_steps` as an input argument to `__call__` is deprecated, consider use `prior_callback_on_step_end`", + ) + prior_kwargs["callback_steps"] = kwargs.pop("prior_callback_steps") + + prior_outputs = self.prior_pipe( + prompt=prompt, + negative_prompt=negative_prompt, + num_images_per_prompt=num_images_per_prompt, + num_inference_steps=prior_num_inference_steps, + generator=generator, + latents=latents, + guidance_scale=prior_guidance_scale, + output_type="pt", + return_dict=False, + callback_on_step_end=prior_callback_on_step_end, + callback_on_step_end_tensor_inputs=prior_callback_on_step_end_tensor_inputs, + **prior_kwargs, + ) + image_embeds = prior_outputs[0] + negative_image_embeds = prior_outputs[1] + + prompt = [prompt] if not isinstance(prompt, (list, tuple)) else prompt + image = [image] if isinstance(image, PIL.Image.Image) else image + mask_image = [mask_image] if isinstance(mask_image, PIL.Image.Image) else mask_image + + if len(prompt) < image_embeds.shape[0] and image_embeds.shape[0] % len(prompt) == 0: + prompt = (image_embeds.shape[0] // len(prompt)) * prompt + + if ( + isinstance(image, (list, tuple)) + and len(image) < image_embeds.shape[0] + and image_embeds.shape[0] % len(image) == 0 + ): + image = (image_embeds.shape[0] // len(image)) * image + + if ( + isinstance(mask_image, (list, tuple)) + and len(mask_image) < image_embeds.shape[0] + and image_embeds.shape[0] % len(mask_image) == 0 + ): + mask_image = (image_embeds.shape[0] // len(mask_image)) * mask_image + + outputs = self.decoder_pipe( + image=image, + mask_image=mask_image, + image_embeds=image_embeds, + negative_image_embeds=negative_image_embeds, + width=width, + height=height, + num_inference_steps=num_inference_steps, + generator=generator, + guidance_scale=guidance_scale, + output_type=output_type, + return_dict=return_dict, + callback_on_step_end=callback_on_step_end, + callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, + **kwargs, + ) + self.maybe_free_model_hooks() + + return outputs diff --git a/diffusers3/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet.py b/diffusers3/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet.py new file mode 100644 index 0000000000000000000000000000000000000000..0130c3951b38b8ff404bf5747c1bb194abc351b3 --- /dev/null +++ b/diffusers3/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet.py @@ -0,0 +1,320 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Callable, List, Optional, Union + +import torch + +from ...models import UNet2DConditionModel, VQModel +from ...schedulers import DDPMScheduler +from ...utils import ( + logging, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> import numpy as np + + >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline + >>> from transformers import pipeline + >>> from diffusers.utils import load_image + + + >>> def make_hint(image, depth_estimator): + ... image = depth_estimator(image)["depth"] + ... image = np.array(image) + ... image = image[:, :, None] + ... image = np.concatenate([image, image, image], axis=2) + ... detected_map = torch.from_numpy(image).float() / 255.0 + ... hint = detected_map.permute(2, 0, 1) + ... return hint + + + >>> depth_estimator = pipeline("depth-estimation") + + >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained( + ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16 + ... ) + >>> pipe_prior = pipe_prior.to("cuda") + + >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained( + ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16 + ... ) + >>> pipe = pipe.to("cuda") + + + >>> img = load_image( + ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" + ... "/kandinsky/cat.png" + ... ).resize((768, 768)) + + >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda") + + >>> prompt = "A robot, 4k photo" + >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature" + + >>> generator = torch.Generator(device="cuda").manual_seed(43) + + >>> image_emb, zero_image_emb = pipe_prior( + ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator + ... ).to_tuple() + + >>> images = pipe( + ... image_embeds=image_emb, + ... negative_image_embeds=zero_image_emb, + ... hint=hint, + ... num_inference_steps=50, + ... generator=generator, + ... height=768, + ... width=768, + ... ).images + + >>> images[0].save("robot_cat.png") + ``` +""" + + +# Copied from diffusers.pipelines.kandinsky2_2.pipeline_kandinsky2_2.downscale_height_and_width +def downscale_height_and_width(height, width, scale_factor=8): + new_height = height // scale_factor**2 + if height % scale_factor**2 != 0: + new_height += 1 + new_width = width // scale_factor**2 + if width % scale_factor**2 != 0: + new_width += 1 + return new_height * scale_factor, new_width * scale_factor + + +class KandinskyV22ControlnetPipeline(DiffusionPipeline): + """ + Pipeline for text-to-image generation using Kandinsky + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + Args: + scheduler ([`DDIMScheduler`]): + A scheduler to be used in combination with `unet` to generate image latents. + unet ([`UNet2DConditionModel`]): + Conditional U-Net architecture to denoise the image embedding. + movq ([`VQModel`]): + MoVQ Decoder to generate the image from the latents. + """ + + model_cpu_offload_seq = "unet->movq" + + def __init__( + self, + unet: UNet2DConditionModel, + scheduler: DDPMScheduler, + movq: VQModel, + ): + super().__init__() + + self.register_modules( + unet=unet, + scheduler=scheduler, + movq=movq, + ) + self.movq_scale_factor = 2 ** (len(self.movq.config.block_out_channels) - 1) + + # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents + def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + if latents.shape != shape: + raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") + latents = latents.to(device) + + latents = latents * scheduler.init_noise_sigma + return latents + + @torch.no_grad() + def __call__( + self, + image_embeds: Union[torch.Tensor, List[torch.Tensor]], + negative_image_embeds: Union[torch.Tensor, List[torch.Tensor]], + hint: torch.Tensor, + height: int = 512, + width: int = 512, + num_inference_steps: int = 100, + guidance_scale: float = 4.0, + num_images_per_prompt: int = 1, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + output_type: Optional[str] = "pil", + callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, + callback_steps: int = 1, + return_dict: bool = True, + ): + """ + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`): + The prompt or prompts to guide the image generation. + hint (`torch.Tensor`): + The controlnet condition. + image_embeds (`torch.Tensor` or `List[torch.Tensor]`): + The clip image embeddings for text prompt, that will be used to condition the image generation. + negative_image_embeds (`torch.Tensor` or `List[torch.Tensor]`): + The clip image embeddings for negative text prompt, will be used to condition the image generation. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored + if `guidance_scale` is less than `1`). + height (`int`, *optional*, defaults to 512): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to 512): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 4.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` + (`np.array`) or `"pt"` (`torch.Tensor`). + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. + + Examples: + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple` + """ + device = self._execution_device + + do_classifier_free_guidance = guidance_scale > 1.0 + + if isinstance(image_embeds, list): + image_embeds = torch.cat(image_embeds, dim=0) + if isinstance(negative_image_embeds, list): + negative_image_embeds = torch.cat(negative_image_embeds, dim=0) + if isinstance(hint, list): + hint = torch.cat(hint, dim=0) + + batch_size = image_embeds.shape[0] * num_images_per_prompt + + if do_classifier_free_guidance: + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + negative_image_embeds = negative_image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + hint = hint.repeat_interleave(num_images_per_prompt, dim=0) + + image_embeds = torch.cat([negative_image_embeds, image_embeds], dim=0).to( + dtype=self.unet.dtype, device=device + ) + hint = torch.cat([hint, hint], dim=0).to(dtype=self.unet.dtype, device=device) + + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps_tensor = self.scheduler.timesteps + + num_channels_latents = self.movq.config.latent_channels + + height, width = downscale_height_and_width(height, width, self.movq_scale_factor) + + # create initial latent + latents = self.prepare_latents( + (batch_size, num_channels_latents, height, width), + image_embeds.dtype, + device, + generator, + latents, + self.scheduler, + ) + + for i, t in enumerate(self.progress_bar(timesteps_tensor)): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + + added_cond_kwargs = {"image_embeds": image_embeds, "hint": hint} + noise_pred = self.unet( + sample=latent_model_input, + timestep=t, + encoder_hidden_states=None, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + if do_classifier_free_guidance: + noise_pred, variance_pred = noise_pred.split(latents.shape[1], dim=1) + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + _, variance_pred_text = variance_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + noise_pred = torch.cat([noise_pred, variance_pred_text], dim=1) + + if not ( + hasattr(self.scheduler.config, "variance_type") + and self.scheduler.config.variance_type in ["learned", "learned_range"] + ): + noise_pred, _ = noise_pred.split(latents.shape[1], dim=1) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step( + noise_pred, + t, + latents, + generator=generator, + )[0] + + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + # post-processing + image = self.movq.decode(latents, force_not_quantize=True)["sample"] + + # Offload all models + self.maybe_free_model_hooks() + + if output_type not in ["pt", "np", "pil"]: + raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}") + + if output_type in ["np", "pil"]: + image = image * 0.5 + 0.5 + image = image.clamp(0, 1) + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + + if output_type == "pil": + image = self.numpy_to_pil(image) + + if not return_dict: + return (image,) + + return ImagePipelineOutput(images=image) diff --git a/diffusers3/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet_img2img.py b/diffusers3/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet_img2img.py new file mode 100644 index 0000000000000000000000000000000000000000..12be1534c642c49dd04ca847b80e718969a2c3ee --- /dev/null +++ b/diffusers3/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet_img2img.py @@ -0,0 +1,381 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Callable, List, Optional, Union + +import numpy as np +import PIL.Image +import torch +from PIL import Image + +from ...models import UNet2DConditionModel, VQModel +from ...schedulers import DDPMScheduler +from ...utils import ( + logging, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> import numpy as np + + >>> from diffusers import KandinskyV22PriorEmb2EmbPipeline, KandinskyV22ControlnetImg2ImgPipeline + >>> from transformers import pipeline + >>> from diffusers.utils import load_image + + + >>> def make_hint(image, depth_estimator): + ... image = depth_estimator(image)["depth"] + ... image = np.array(image) + ... image = image[:, :, None] + ... image = np.concatenate([image, image, image], axis=2) + ... detected_map = torch.from_numpy(image).float() / 255.0 + ... hint = detected_map.permute(2, 0, 1) + ... return hint + + + >>> depth_estimator = pipeline("depth-estimation") + + >>> pipe_prior = KandinskyV22PriorEmb2EmbPipeline.from_pretrained( + ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16 + ... ) + >>> pipe_prior = pipe_prior.to("cuda") + + >>> pipe = KandinskyV22ControlnetImg2ImgPipeline.from_pretrained( + ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16 + ... ) + >>> pipe = pipe.to("cuda") + + >>> img = load_image( + ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" + ... "/kandinsky/cat.png" + ... ).resize((768, 768)) + + + >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda") + + >>> prompt = "A robot, 4k photo" + >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature" + + >>> generator = torch.Generator(device="cuda").manual_seed(43) + + >>> img_emb = pipe_prior(prompt=prompt, image=img, strength=0.85, generator=generator) + >>> negative_emb = pipe_prior(prompt=negative_prior_prompt, image=img, strength=1, generator=generator) + + >>> images = pipe( + ... image=img, + ... strength=0.5, + ... image_embeds=img_emb.image_embeds, + ... negative_image_embeds=negative_emb.image_embeds, + ... hint=hint, + ... num_inference_steps=50, + ... generator=generator, + ... height=768, + ... width=768, + ... ).images + + >>> images[0].save("robot_cat.png") + ``` +""" + + +# Copied from diffusers.pipelines.kandinsky2_2.pipeline_kandinsky2_2.downscale_height_and_width +def downscale_height_and_width(height, width, scale_factor=8): + new_height = height // scale_factor**2 + if height % scale_factor**2 != 0: + new_height += 1 + new_width = width // scale_factor**2 + if width % scale_factor**2 != 0: + new_width += 1 + return new_height * scale_factor, new_width * scale_factor + + +# Copied from diffusers.pipelines.kandinsky.pipeline_kandinsky_img2img.prepare_image +def prepare_image(pil_image, w=512, h=512): + pil_image = pil_image.resize((w, h), resample=Image.BICUBIC, reducing_gap=1) + arr = np.array(pil_image.convert("RGB")) + arr = arr.astype(np.float32) / 127.5 - 1 + arr = np.transpose(arr, [2, 0, 1]) + image = torch.from_numpy(arr).unsqueeze(0) + return image + + +class KandinskyV22ControlnetImg2ImgPipeline(DiffusionPipeline): + """ + Pipeline for image-to-image generation using Kandinsky + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + Args: + scheduler ([`DDIMScheduler`]): + A scheduler to be used in combination with `unet` to generate image latents. + unet ([`UNet2DConditionModel`]): + Conditional U-Net architecture to denoise the image embedding. + movq ([`VQModel`]): + MoVQ Decoder to generate the image from the latents. + """ + + model_cpu_offload_seq = "unet->movq" + + def __init__( + self, + unet: UNet2DConditionModel, + scheduler: DDPMScheduler, + movq: VQModel, + ): + super().__init__() + + self.register_modules( + unet=unet, + scheduler=scheduler, + movq=movq, + ) + self.movq_scale_factor = 2 ** (len(self.movq.config.block_out_channels) - 1) + + # Copied from diffusers.pipelines.kandinsky.pipeline_kandinsky_img2img.KandinskyImg2ImgPipeline.get_timesteps + def get_timesteps(self, num_inference_steps, strength, device): + # get the original timestep using init_timestep + init_timestep = min(int(num_inference_steps * strength), num_inference_steps) + + t_start = max(num_inference_steps - init_timestep, 0) + timesteps = self.scheduler.timesteps[t_start:] + + return timesteps, num_inference_steps - t_start + + # Copied from diffusers.pipelines.kandinsky2_2.pipeline_kandinsky2_2_img2img.KandinskyV22Img2ImgPipeline.prepare_latents + def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None): + if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): + raise ValueError( + f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" + ) + + image = image.to(device=device, dtype=dtype) + + batch_size = batch_size * num_images_per_prompt + + if image.shape[1] == 4: + init_latents = image + + else: + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + elif isinstance(generator, list): + init_latents = [ + self.movq.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(batch_size) + ] + init_latents = torch.cat(init_latents, dim=0) + else: + init_latents = self.movq.encode(image).latent_dist.sample(generator) + + init_latents = self.movq.config.scaling_factor * init_latents + + init_latents = torch.cat([init_latents], dim=0) + + shape = init_latents.shape + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + + # get latents + init_latents = self.scheduler.add_noise(init_latents, noise, timestep) + + latents = init_latents + + return latents + + @torch.no_grad() + def __call__( + self, + image_embeds: Union[torch.Tensor, List[torch.Tensor]], + image: Union[torch.Tensor, PIL.Image.Image, List[torch.Tensor], List[PIL.Image.Image]], + negative_image_embeds: Union[torch.Tensor, List[torch.Tensor]], + hint: torch.Tensor, + height: int = 512, + width: int = 512, + num_inference_steps: int = 100, + guidance_scale: float = 4.0, + strength: float = 0.3, + num_images_per_prompt: int = 1, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + output_type: Optional[str] = "pil", + callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, + callback_steps: int = 1, + return_dict: bool = True, + ): + """ + Function invoked when calling the pipeline for generation. + + Args: + image_embeds (`torch.Tensor` or `List[torch.Tensor]`): + The clip image embeddings for text prompt, that will be used to condition the image generation. + image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): + `Image`, or tensor representing an image batch, that will be used as the starting point for the + process. Can also accept image latents as `image`, if passing latents directly, it will not be encoded + again. + strength (`float`, *optional*, defaults to 0.8): + Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image` + will be used as a starting point, adding more noise to it the larger the `strength`. The number of + denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will + be maximum and the denoising process will run for the full number of iterations specified in + `num_inference_steps`. A value of 1, therefore, essentially ignores `image`. + hint (`torch.Tensor`): + The controlnet condition. + negative_image_embeds (`torch.Tensor` or `List[torch.Tensor]`): + The clip image embeddings for negative text prompt, will be used to condition the image generation. + height (`int`, *optional*, defaults to 512): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to 512): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 4.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` + (`np.array`) or `"pt"` (`torch.Tensor`). + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. + + Examples: + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple` + """ + device = self._execution_device + + do_classifier_free_guidance = guidance_scale > 1.0 + + if isinstance(image_embeds, list): + image_embeds = torch.cat(image_embeds, dim=0) + if isinstance(negative_image_embeds, list): + negative_image_embeds = torch.cat(negative_image_embeds, dim=0) + if isinstance(hint, list): + hint = torch.cat(hint, dim=0) + + batch_size = image_embeds.shape[0] + + if do_classifier_free_guidance: + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + negative_image_embeds = negative_image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + hint = hint.repeat_interleave(num_images_per_prompt, dim=0) + + image_embeds = torch.cat([negative_image_embeds, image_embeds], dim=0).to( + dtype=self.unet.dtype, device=device + ) + hint = torch.cat([hint, hint], dim=0).to(dtype=self.unet.dtype, device=device) + + if not isinstance(image, list): + image = [image] + if not all(isinstance(i, (PIL.Image.Image, torch.Tensor)) for i in image): + raise ValueError( + f"Input is in incorrect format: {[type(i) for i in image]}. Currently, we only support PIL image and pytorch tensor" + ) + + image = torch.cat([prepare_image(i, width, height) for i in image], dim=0) + image = image.to(dtype=image_embeds.dtype, device=device) + + latents = self.movq.encode(image)["latents"] + latents = latents.repeat_interleave(num_images_per_prompt, dim=0) + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device) + latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) + height, width = downscale_height_and_width(height, width, self.movq_scale_factor) + latents = self.prepare_latents( + latents, latent_timestep, batch_size, num_images_per_prompt, image_embeds.dtype, device, generator + ) + for i, t in enumerate(self.progress_bar(timesteps)): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + + added_cond_kwargs = {"image_embeds": image_embeds, "hint": hint} + noise_pred = self.unet( + sample=latent_model_input, + timestep=t, + encoder_hidden_states=None, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + if do_classifier_free_guidance: + noise_pred, variance_pred = noise_pred.split(latents.shape[1], dim=1) + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + _, variance_pred_text = variance_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + noise_pred = torch.cat([noise_pred, variance_pred_text], dim=1) + + if not ( + hasattr(self.scheduler.config, "variance_type") + and self.scheduler.config.variance_type in ["learned", "learned_range"] + ): + noise_pred, _ = noise_pred.split(latents.shape[1], dim=1) + + # compute the previous noisy sample x_t -> x_t-1 + + latents = self.scheduler.step( + noise_pred, + t, + latents, + generator=generator, + )[0] + + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + # post-processing + image = self.movq.decode(latents, force_not_quantize=True)["sample"] + + # Offload all models + self.maybe_free_model_hooks() + + if output_type not in ["pt", "np", "pil"]: + raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}") + + if output_type in ["np", "pil"]: + image = image * 0.5 + 0.5 + image = image.clamp(0, 1) + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + + if output_type == "pil": + image = self.numpy_to_pil(image) + + if not return_dict: + return (image,) + + return ImagePipelineOutput(images=image) diff --git a/diffusers3/pipelines/kandinsky2_2/pipeline_kandinsky2_2_img2img.py b/diffusers3/pipelines/kandinsky2_2/pipeline_kandinsky2_2_img2img.py new file mode 100644 index 0000000000000000000000000000000000000000..899273a1a7362374cb91d66366095910ab6d8403 --- /dev/null +++ b/diffusers3/pipelines/kandinsky2_2/pipeline_kandinsky2_2_img2img.py @@ -0,0 +1,399 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Callable, Dict, List, Optional, Union + +import numpy as np +import PIL.Image +import torch +from PIL import Image + +from ...models import UNet2DConditionModel, VQModel +from ...schedulers import DDPMScheduler +from ...utils import deprecate, logging +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline + >>> from diffusers.utils import load_image + >>> import torch + + >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained( + ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16 + ... ) + >>> pipe_prior.to("cuda") + + >>> prompt = "A red cartoon frog, 4k" + >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False) + + >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained( + ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16 + ... ) + >>> pipe.to("cuda") + + >>> init_image = load_image( + ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" + ... "/kandinsky/frog.png" + ... ) + + >>> image = pipe( + ... image=init_image, + ... image_embeds=image_emb, + ... negative_image_embeds=zero_image_emb, + ... height=768, + ... width=768, + ... num_inference_steps=100, + ... strength=0.2, + ... ).images + + >>> image[0].save("red_frog.png") + ``` +""" + + +# Copied from diffusers.pipelines.kandinsky2_2.pipeline_kandinsky2_2.downscale_height_and_width +def downscale_height_and_width(height, width, scale_factor=8): + new_height = height // scale_factor**2 + if height % scale_factor**2 != 0: + new_height += 1 + new_width = width // scale_factor**2 + if width % scale_factor**2 != 0: + new_width += 1 + return new_height * scale_factor, new_width * scale_factor + + +# Copied from diffusers.pipelines.kandinsky.pipeline_kandinsky_img2img.prepare_image +def prepare_image(pil_image, w=512, h=512): + pil_image = pil_image.resize((w, h), resample=Image.BICUBIC, reducing_gap=1) + arr = np.array(pil_image.convert("RGB")) + arr = arr.astype(np.float32) / 127.5 - 1 + arr = np.transpose(arr, [2, 0, 1]) + image = torch.from_numpy(arr).unsqueeze(0) + return image + + +class KandinskyV22Img2ImgPipeline(DiffusionPipeline): + """ + Pipeline for image-to-image generation using Kandinsky + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + Args: + scheduler ([`DDIMScheduler`]): + A scheduler to be used in combination with `unet` to generate image latents. + unet ([`UNet2DConditionModel`]): + Conditional U-Net architecture to denoise the image embedding. + movq ([`VQModel`]): + MoVQ Decoder to generate the image from the latents. + """ + + model_cpu_offload_seq = "unet->movq" + _callback_tensor_inputs = ["latents", "image_embeds", "negative_image_embeds"] + + def __init__( + self, + unet: UNet2DConditionModel, + scheduler: DDPMScheduler, + movq: VQModel, + ): + super().__init__() + + self.register_modules( + unet=unet, + scheduler=scheduler, + movq=movq, + ) + self.movq_scale_factor = 2 ** (len(self.movq.config.block_out_channels) - 1) + + # Copied from diffusers.pipelines.kandinsky.pipeline_kandinsky_img2img.KandinskyImg2ImgPipeline.get_timesteps + def get_timesteps(self, num_inference_steps, strength, device): + # get the original timestep using init_timestep + init_timestep = min(int(num_inference_steps * strength), num_inference_steps) + + t_start = max(num_inference_steps - init_timestep, 0) + timesteps = self.scheduler.timesteps[t_start:] + + return timesteps, num_inference_steps - t_start + + def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None): + if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): + raise ValueError( + f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" + ) + + image = image.to(device=device, dtype=dtype) + + batch_size = batch_size * num_images_per_prompt + + if image.shape[1] == 4: + init_latents = image + + else: + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + elif isinstance(generator, list): + init_latents = [ + self.movq.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(batch_size) + ] + init_latents = torch.cat(init_latents, dim=0) + else: + init_latents = self.movq.encode(image).latent_dist.sample(generator) + + init_latents = self.movq.config.scaling_factor * init_latents + + init_latents = torch.cat([init_latents], dim=0) + + shape = init_latents.shape + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + + # get latents + init_latents = self.scheduler.add_noise(init_latents, noise, timestep) + + latents = init_latents + + return latents + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 + + @property + def num_timesteps(self): + return self._num_timesteps + + @torch.no_grad() + def __call__( + self, + image_embeds: Union[torch.Tensor, List[torch.Tensor]], + image: Union[torch.Tensor, PIL.Image.Image, List[torch.Tensor], List[PIL.Image.Image]], + negative_image_embeds: Union[torch.Tensor, List[torch.Tensor]], + height: int = 512, + width: int = 512, + num_inference_steps: int = 100, + guidance_scale: float = 4.0, + strength: float = 0.3, + num_images_per_prompt: int = 1, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + **kwargs, + ): + """ + Function invoked when calling the pipeline for generation. + + Args: + image_embeds (`torch.Tensor` or `List[torch.Tensor]`): + The clip image embeddings for text prompt, that will be used to condition the image generation. + image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): + `Image`, or tensor representing an image batch, that will be used as the starting point for the + process. Can also accept image latents as `image`, if passing latents directly, it will not be encoded + again. + strength (`float`, *optional*, defaults to 0.8): + Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image` + will be used as a starting point, adding more noise to it the larger the `strength`. The number of + denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will + be maximum and the denoising process will run for the full number of iterations specified in + `num_inference_steps`. A value of 1, therefore, essentially ignores `image`. + negative_image_embeds (`torch.Tensor` or `List[torch.Tensor]`): + The clip image embeddings for negative text prompt, will be used to condition the image generation. + height (`int`, *optional*, defaults to 512): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to 512): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 4.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` + (`np.array`) or `"pt"` (`torch.Tensor`). + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + + Examples: + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple` + """ + + callback = kwargs.pop("callback", None) + callback_steps = kwargs.pop("callback_steps", None) + + if callback is not None: + deprecate( + "callback", + "1.0.0", + "Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", + ) + if callback_steps is not None: + deprecate( + "callback_steps", + "1.0.0", + "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + device = self._execution_device + + self._guidance_scale = guidance_scale + + if isinstance(image_embeds, list): + image_embeds = torch.cat(image_embeds, dim=0) + batch_size = image_embeds.shape[0] + if isinstance(negative_image_embeds, list): + negative_image_embeds = torch.cat(negative_image_embeds, dim=0) + + if self.do_classifier_free_guidance: + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + negative_image_embeds = negative_image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + + image_embeds = torch.cat([negative_image_embeds, image_embeds], dim=0).to( + dtype=self.unet.dtype, device=device + ) + + if not isinstance(image, list): + image = [image] + if not all(isinstance(i, (PIL.Image.Image, torch.Tensor)) for i in image): + raise ValueError( + f"Input is in incorrect format: {[type(i) for i in image]}. Currently, we only support PIL image and pytorch tensor" + ) + + image = torch.cat([prepare_image(i, width, height) for i in image], dim=0) + image = image.to(dtype=image_embeds.dtype, device=device) + + latents = self.movq.encode(image)["latents"] + latents = latents.repeat_interleave(num_images_per_prompt, dim=0) + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device) + latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) + height, width = downscale_height_and_width(height, width, self.movq_scale_factor) + latents = self.prepare_latents( + latents, latent_timestep, batch_size, num_images_per_prompt, image_embeds.dtype, device, generator + ) + self._num_timesteps = len(timesteps) + for i, t in enumerate(self.progress_bar(timesteps)): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents + + added_cond_kwargs = {"image_embeds": image_embeds} + noise_pred = self.unet( + sample=latent_model_input, + timestep=t, + encoder_hidden_states=None, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + if self.do_classifier_free_guidance: + noise_pred, variance_pred = noise_pred.split(latents.shape[1], dim=1) + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + _, variance_pred_text = variance_pred.chunk(2) + noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) + noise_pred = torch.cat([noise_pred, variance_pred_text], dim=1) + + if not ( + hasattr(self.scheduler.config, "variance_type") + and self.scheduler.config.variance_type in ["learned", "learned_range"] + ): + noise_pred, _ = noise_pred.split(latents.shape[1], dim=1) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step( + noise_pred, + t, + latents, + generator=generator, + )[0] + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + image_embeds = callback_outputs.pop("image_embeds", image_embeds) + negative_image_embeds = callback_outputs.pop("negative_image_embeds", negative_image_embeds) + + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + if output_type not in ["pt", "np", "pil", "latent"]: + raise ValueError( + f"Only the output types `pt`, `pil` ,`np` and `latent` are supported not output_type={output_type}" + ) + + if not output_type == "latent": + # post-processing + image = self.movq.decode(latents, force_not_quantize=True)["sample"] + if output_type in ["np", "pil"]: + image = image * 0.5 + 0.5 + image = image.clamp(0, 1) + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + + if output_type == "pil": + image = self.numpy_to_pil(image) + else: + image = latents + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return ImagePipelineOutput(images=image) diff --git a/diffusers3/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py b/diffusers3/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py new file mode 100644 index 0000000000000000000000000000000000000000..b5ba7a0011a14058a01f53c34ef5dfe476e28708 --- /dev/null +++ b/diffusers3/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py @@ -0,0 +1,556 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from copy import deepcopy +from typing import Callable, Dict, List, Optional, Union + +import numpy as np +import PIL.Image +import torch +import torch.nn.functional as F +from packaging import version +from PIL import Image + +from ... import __version__ +from ...models import UNet2DConditionModel, VQModel +from ...schedulers import DDPMScheduler +from ...utils import deprecate, logging +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> from diffusers import KandinskyV22InpaintPipeline, KandinskyV22PriorPipeline + >>> from diffusers.utils import load_image + >>> import torch + >>> import numpy as np + + >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained( + ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16 + ... ) + >>> pipe_prior.to("cuda") + + >>> prompt = "a hat" + >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False) + + >>> pipe = KandinskyV22InpaintPipeline.from_pretrained( + ... "kandinsky-community/kandinsky-2-2-decoder-inpaint", torch_dtype=torch.float16 + ... ) + >>> pipe.to("cuda") + + >>> init_image = load_image( + ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" + ... "/kandinsky/cat.png" + ... ) + + >>> mask = np.zeros((768, 768), dtype=np.float32) + >>> mask[:250, 250:-250] = 1 + + >>> out = pipe( + ... image=init_image, + ... mask_image=mask, + ... image_embeds=image_emb, + ... negative_image_embeds=zero_image_emb, + ... height=768, + ... width=768, + ... num_inference_steps=50, + ... ) + + >>> image = out.images[0] + >>> image.save("cat_with_hat.png") + ``` +""" + + +# Copied from diffusers.pipelines.kandinsky2_2.pipeline_kandinsky2_2.downscale_height_and_width +def downscale_height_and_width(height, width, scale_factor=8): + new_height = height // scale_factor**2 + if height % scale_factor**2 != 0: + new_height += 1 + new_width = width // scale_factor**2 + if width % scale_factor**2 != 0: + new_width += 1 + return new_height * scale_factor, new_width * scale_factor + + +# Copied from diffusers.pipelines.kandinsky.pipeline_kandinsky_inpaint.prepare_mask +def prepare_mask(masks): + prepared_masks = [] + for mask in masks: + old_mask = deepcopy(mask) + for i in range(mask.shape[1]): + for j in range(mask.shape[2]): + if old_mask[0][i][j] == 1: + continue + if i != 0: + mask[:, i - 1, j] = 0 + if j != 0: + mask[:, i, j - 1] = 0 + if i != 0 and j != 0: + mask[:, i - 1, j - 1] = 0 + if i != mask.shape[1] - 1: + mask[:, i + 1, j] = 0 + if j != mask.shape[2] - 1: + mask[:, i, j + 1] = 0 + if i != mask.shape[1] - 1 and j != mask.shape[2] - 1: + mask[:, i + 1, j + 1] = 0 + prepared_masks.append(mask) + return torch.stack(prepared_masks, dim=0) + + +# Copied from diffusers.pipelines.kandinsky.pipeline_kandinsky_inpaint.prepare_mask_and_masked_image +def prepare_mask_and_masked_image(image, mask, height, width): + r""" + Prepares a pair (mask, image) to be consumed by the Kandinsky inpaint pipeline. This means that those inputs will + be converted to ``torch.Tensor`` with shapes ``batch x channels x height x width`` where ``channels`` is ``3`` for + the ``image`` and ``1`` for the ``mask``. + + The ``image`` will be converted to ``torch.float32`` and normalized to be in ``[-1, 1]``. The ``mask`` will be + binarized (``mask > 0.5``) and cast to ``torch.float32`` too. + + Args: + image (Union[np.array, PIL.Image, torch.Tensor]): The image to inpaint. + It can be a ``PIL.Image``, or a ``height x width x 3`` ``np.array`` or a ``channels x height x width`` + ``torch.Tensor`` or a ``batch x channels x height x width`` ``torch.Tensor``. + mask (_type_): The mask to apply to the image, i.e. regions to inpaint. + It can be a ``PIL.Image``, or a ``height x width`` ``np.array`` or a ``1 x height x width`` + ``torch.Tensor`` or a ``batch x 1 x height x width`` ``torch.Tensor``. + height (`int`, *optional*, defaults to 512): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to 512): + The width in pixels of the generated image. + + + Raises: + ValueError: ``torch.Tensor`` images should be in the ``[-1, 1]`` range. ValueError: ``torch.Tensor`` mask + should be in the ``[0, 1]`` range. ValueError: ``mask`` and ``image`` should have the same spatial dimensions. + TypeError: ``mask`` is a ``torch.Tensor`` but ``image`` is not + (ot the other way around). + + Returns: + tuple[torch.Tensor]: The pair (mask, image) as ``torch.Tensor`` with 4 + dimensions: ``batch x channels x height x width``. + """ + + if image is None: + raise ValueError("`image` input cannot be undefined.") + + if mask is None: + raise ValueError("`mask_image` input cannot be undefined.") + + if isinstance(image, torch.Tensor): + if not isinstance(mask, torch.Tensor): + raise TypeError(f"`image` is a torch.Tensor but `mask` (type: {type(mask)} is not") + + # Batch single image + if image.ndim == 3: + assert image.shape[0] == 3, "Image outside a batch should be of shape (3, H, W)" + image = image.unsqueeze(0) + + # Batch and add channel dim for single mask + if mask.ndim == 2: + mask = mask.unsqueeze(0).unsqueeze(0) + + # Batch single mask or add channel dim + if mask.ndim == 3: + # Single batched mask, no channel dim or single mask not batched but channel dim + if mask.shape[0] == 1: + mask = mask.unsqueeze(0) + + # Batched masks no channel dim + else: + mask = mask.unsqueeze(1) + + assert image.ndim == 4 and mask.ndim == 4, "Image and Mask must have 4 dimensions" + assert image.shape[-2:] == mask.shape[-2:], "Image and Mask must have the same spatial dimensions" + assert image.shape[0] == mask.shape[0], "Image and Mask must have the same batch size" + + # Check image is in [-1, 1] + if image.min() < -1 or image.max() > 1: + raise ValueError("Image should be in [-1, 1] range") + + # Check mask is in [0, 1] + if mask.min() < 0 or mask.max() > 1: + raise ValueError("Mask should be in [0, 1] range") + + # Binarize mask + mask[mask < 0.5] = 0 + mask[mask >= 0.5] = 1 + + # Image as float32 + image = image.to(dtype=torch.float32) + elif isinstance(mask, torch.Tensor): + raise TypeError(f"`mask` is a torch.Tensor but `image` (type: {type(image)} is not") + else: + # preprocess image + if isinstance(image, (PIL.Image.Image, np.ndarray)): + image = [image] + + if isinstance(image, list) and isinstance(image[0], PIL.Image.Image): + # resize all images w.r.t passed height an width + image = [i.resize((width, height), resample=Image.BICUBIC, reducing_gap=1) for i in image] + image = [np.array(i.convert("RGB"))[None, :] for i in image] + image = np.concatenate(image, axis=0) + elif isinstance(image, list) and isinstance(image[0], np.ndarray): + image = np.concatenate([i[None, :] for i in image], axis=0) + + image = image.transpose(0, 3, 1, 2) + image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0 + + # preprocess mask + if isinstance(mask, (PIL.Image.Image, np.ndarray)): + mask = [mask] + + if isinstance(mask, list) and isinstance(mask[0], PIL.Image.Image): + mask = [i.resize((width, height), resample=PIL.Image.LANCZOS) for i in mask] + mask = np.concatenate([np.array(m.convert("L"))[None, None, :] for m in mask], axis=0) + mask = mask.astype(np.float32) / 255.0 + elif isinstance(mask, list) and isinstance(mask[0], np.ndarray): + mask = np.concatenate([m[None, None, :] for m in mask], axis=0) + + mask[mask < 0.5] = 0 + mask[mask >= 0.5] = 1 + mask = torch.from_numpy(mask) + + mask = 1 - mask + + return mask, image + + +class KandinskyV22InpaintPipeline(DiffusionPipeline): + """ + Pipeline for text-guided image inpainting using Kandinsky2.1 + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + Args: + scheduler ([`DDIMScheduler`]): + A scheduler to be used in combination with `unet` to generate image latents. + unet ([`UNet2DConditionModel`]): + Conditional U-Net architecture to denoise the image embedding. + movq ([`VQModel`]): + MoVQ Decoder to generate the image from the latents. + """ + + model_cpu_offload_seq = "unet->movq" + _callback_tensor_inputs = ["latents", "image_embeds", "negative_image_embeds", "masked_image", "mask_image"] + + def __init__( + self, + unet: UNet2DConditionModel, + scheduler: DDPMScheduler, + movq: VQModel, + ): + super().__init__() + + self.register_modules( + unet=unet, + scheduler=scheduler, + movq=movq, + ) + self.movq_scale_factor = 2 ** (len(self.movq.config.block_out_channels) - 1) + self._warn_has_been_called = False + + # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents + def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + if latents.shape != shape: + raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") + latents = latents.to(device) + + latents = latents * scheduler.init_noise_sigma + return latents + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 + + @property + def num_timesteps(self): + return self._num_timesteps + + @torch.no_grad() + def __call__( + self, + image_embeds: Union[torch.Tensor, List[torch.Tensor]], + image: Union[torch.Tensor, PIL.Image.Image], + mask_image: Union[torch.Tensor, PIL.Image.Image, np.ndarray], + negative_image_embeds: Union[torch.Tensor, List[torch.Tensor]], + height: int = 512, + width: int = 512, + num_inference_steps: int = 100, + guidance_scale: float = 4.0, + num_images_per_prompt: int = 1, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + **kwargs, + ): + """ + Function invoked when calling the pipeline for generation. + + Args: + image_embeds (`torch.Tensor` or `List[torch.Tensor]`): + The clip image embeddings for text prompt, that will be used to condition the image generation. + image (`PIL.Image.Image`): + `Image`, or tensor representing an image batch which will be inpainted, *i.e.* parts of the image will + be masked out with `mask_image` and repainted according to `prompt`. + mask_image (`np.array`): + Tensor representing an image batch, to mask `image`. White pixels in the mask will be repainted, while + black pixels will be preserved. If `mask_image` is a PIL image, it will be converted to a single + channel (luminance) before use. If it's a tensor, it should contain one color channel (L) instead of 3, + so the expected shape would be `(B, H, W, 1)`. + negative_image_embeds (`torch.Tensor` or `List[torch.Tensor]`): + The clip image embeddings for negative text prompt, will be used to condition the image generation. + height (`int`, *optional*, defaults to 512): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to 512): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 4.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` + (`np.array`) or `"pt"` (`torch.Tensor`). + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + + Examples: + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple` + """ + if not self._warn_has_been_called and version.parse(version.parse(__version__).base_version) < version.parse( + "0.23.0.dev0" + ): + logger.warning( + "Please note that the expected format of `mask_image` has recently been changed. " + "Before diffusers == 0.19.0, Kandinsky Inpainting pipelines repainted black pixels and preserved black pixels. " + "As of diffusers==0.19.0 this behavior has been inverted. Now white pixels are repainted and black pixels are preserved. " + "This way, Kandinsky's masking behavior is aligned with Stable Diffusion. " + "THIS means that you HAVE to invert the input mask to have the same behavior as before as explained in https://github.com/huggingface/diffusers/pull/4207. " + "This warning will be surpressed after the first inference call and will be removed in diffusers>0.23.0" + ) + self._warn_has_been_called = True + + callback = kwargs.pop("callback", None) + callback_steps = kwargs.pop("callback_steps", None) + + if callback is not None: + deprecate( + "callback", + "1.0.0", + "Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", + ) + if callback_steps is not None: + deprecate( + "callback_steps", + "1.0.0", + "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + self._guidance_scale = guidance_scale + + device = self._execution_device + + if isinstance(image_embeds, list): + image_embeds = torch.cat(image_embeds, dim=0) + batch_size = image_embeds.shape[0] * num_images_per_prompt + if isinstance(negative_image_embeds, list): + negative_image_embeds = torch.cat(negative_image_embeds, dim=0) + + if self.do_classifier_free_guidance: + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + negative_image_embeds = negative_image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + + image_embeds = torch.cat([negative_image_embeds, image_embeds], dim=0).to( + dtype=self.unet.dtype, device=device + ) + + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # preprocess image and mask + mask_image, image = prepare_mask_and_masked_image(image, mask_image, height, width) + + image = image.to(dtype=image_embeds.dtype, device=device) + image = self.movq.encode(image)["latents"] + + mask_image = mask_image.to(dtype=image_embeds.dtype, device=device) + + image_shape = tuple(image.shape[-2:]) + mask_image = F.interpolate( + mask_image, + image_shape, + mode="nearest", + ) + mask_image = prepare_mask(mask_image) + masked_image = image * mask_image + + mask_image = mask_image.repeat_interleave(num_images_per_prompt, dim=0) + masked_image = masked_image.repeat_interleave(num_images_per_prompt, dim=0) + if self.do_classifier_free_guidance: + mask_image = mask_image.repeat(2, 1, 1, 1) + masked_image = masked_image.repeat(2, 1, 1, 1) + + num_channels_latents = self.movq.config.latent_channels + + height, width = downscale_height_and_width(height, width, self.movq_scale_factor) + + # create initial latent + latents = self.prepare_latents( + (batch_size, num_channels_latents, height, width), + image_embeds.dtype, + device, + generator, + latents, + self.scheduler, + ) + noise = torch.clone(latents) + + self._num_timesteps = len(timesteps) + for i, t in enumerate(self.progress_bar(timesteps)): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents + latent_model_input = torch.cat([latent_model_input, masked_image, mask_image], dim=1) + + added_cond_kwargs = {"image_embeds": image_embeds} + noise_pred = self.unet( + sample=latent_model_input, + timestep=t, + encoder_hidden_states=None, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + if self.do_classifier_free_guidance: + noise_pred, variance_pred = noise_pred.split(latents.shape[1], dim=1) + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + _, variance_pred_text = variance_pred.chunk(2) + noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) + noise_pred = torch.cat([noise_pred, variance_pred_text], dim=1) + + if not ( + hasattr(self.scheduler.config, "variance_type") + and self.scheduler.config.variance_type in ["learned", "learned_range"] + ): + noise_pred, _ = noise_pred.split(latents.shape[1], dim=1) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step( + noise_pred, + t, + latents, + generator=generator, + )[0] + init_latents_proper = image[:1] + init_mask = mask_image[:1] + + if i < len(timesteps) - 1: + noise_timestep = timesteps[i + 1] + init_latents_proper = self.scheduler.add_noise( + init_latents_proper, noise, torch.tensor([noise_timestep]) + ) + + latents = init_mask * init_latents_proper + (1 - init_mask) * latents + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + image_embeds = callback_outputs.pop("image_embeds", image_embeds) + negative_image_embeds = callback_outputs.pop("negative_image_embeds", negative_image_embeds) + masked_image = callback_outputs.pop("masked_image", masked_image) + mask_image = callback_outputs.pop("mask_image", mask_image) + + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + # post-processing + latents = mask_image[:1] * image[:1] + (1 - mask_image[:1]) * latents + + if output_type not in ["pt", "np", "pil", "latent"]: + raise ValueError( + f"Only the output types `pt`, `pil`, `np` and `latent` are supported not output_type={output_type}" + ) + + if not output_type == "latent": + image = self.movq.decode(latents, force_not_quantize=True)["sample"] + + if output_type in ["np", "pil"]: + image = image * 0.5 + 0.5 + image = image.clamp(0, 1) + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + + if output_type == "pil": + image = self.numpy_to_pil(image) + else: + image = latents + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return ImagePipelineOutput(images=image) diff --git a/diffusers3/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior.py b/diffusers3/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior.py new file mode 100644 index 0000000000000000000000000000000000000000..f2134b22b40b70ab26e3225de08cead28542f34f --- /dev/null +++ b/diffusers3/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior.py @@ -0,0 +1,549 @@ +from typing import Callable, Dict, List, Optional, Union + +import PIL.Image +import torch +from transformers import CLIPImageProcessor, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionModelWithProjection + +from ...models import PriorTransformer +from ...schedulers import UnCLIPScheduler +from ...utils import ( + logging, + replace_example_docstring, +) +from ...utils.torch_utils import randn_tensor +from ..kandinsky import KandinskyPriorPipelineOutput +from ..pipeline_utils import DiffusionPipeline + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline + >>> import torch + + >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior") + >>> pipe_prior.to("cuda") + >>> prompt = "red cat, 4k photo" + >>> image_emb, negative_image_emb = pipe_prior(prompt).to_tuple() + + >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder") + >>> pipe.to("cuda") + >>> image = pipe( + ... image_embeds=image_emb, + ... negative_image_embeds=negative_image_emb, + ... height=768, + ... width=768, + ... num_inference_steps=50, + ... ).images + >>> image[0].save("cat.png") + ``` +""" + +EXAMPLE_INTERPOLATE_DOC_STRING = """ + Examples: + ```py + >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22Pipeline + >>> from diffusers.utils import load_image + >>> import PIL + >>> import torch + >>> from torchvision import transforms + + >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained( + ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16 + ... ) + >>> pipe_prior.to("cuda") + >>> img1 = load_image( + ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" + ... "/kandinsky/cat.png" + ... ) + >>> img2 = load_image( + ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" + ... "/kandinsky/starry_night.jpeg" + ... ) + >>> images_texts = ["a cat", img1, img2] + >>> weights = [0.3, 0.3, 0.4] + >>> out = pipe_prior.interpolate(images_texts, weights) + >>> pipe = KandinskyV22Pipeline.from_pretrained( + ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16 + ... ) + >>> pipe.to("cuda") + >>> image = pipe( + ... image_embeds=out.image_embeds, + ... negative_image_embeds=out.negative_image_embeds, + ... height=768, + ... width=768, + ... num_inference_steps=50, + ... ).images[0] + >>> image.save("starry_cat.png") + ``` +""" + + +class KandinskyV22PriorPipeline(DiffusionPipeline): + """ + Pipeline for generating image prior for Kandinsky + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + Args: + prior ([`PriorTransformer`]): + The canonical unCLIP prior to approximate the image embedding from the text embedding. + image_encoder ([`CLIPVisionModelWithProjection`]): + Frozen image-encoder. + text_encoder ([`CLIPTextModelWithProjection`]): + Frozen text-encoder. + tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + scheduler ([`UnCLIPScheduler`]): + A scheduler to be used in combination with `prior` to generate image embedding. + image_processor ([`CLIPImageProcessor`]): + A image_processor to be used to preprocess image from clip. + """ + + model_cpu_offload_seq = "text_encoder->image_encoder->prior" + _exclude_from_cpu_offload = ["prior"] + _callback_tensor_inputs = ["latents", "prompt_embeds", "text_encoder_hidden_states", "text_mask"] + + def __init__( + self, + prior: PriorTransformer, + image_encoder: CLIPVisionModelWithProjection, + text_encoder: CLIPTextModelWithProjection, + tokenizer: CLIPTokenizer, + scheduler: UnCLIPScheduler, + image_processor: CLIPImageProcessor, + ): + super().__init__() + + self.register_modules( + prior=prior, + text_encoder=text_encoder, + tokenizer=tokenizer, + scheduler=scheduler, + image_encoder=image_encoder, + image_processor=image_processor, + ) + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_INTERPOLATE_DOC_STRING) + def interpolate( + self, + images_and_prompts: List[Union[str, PIL.Image.Image, torch.Tensor]], + weights: List[float], + num_images_per_prompt: int = 1, + num_inference_steps: int = 25, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + negative_prior_prompt: Optional[str] = None, + negative_prompt: str = "", + guidance_scale: float = 4.0, + device=None, + ): + """ + Function invoked when using the prior pipeline for interpolation. + + Args: + images_and_prompts (`List[Union[str, PIL.Image.Image, torch.Tensor]]`): + list of prompts and images to guide the image generation. + weights: (`List[float]`): + list of weights for each condition in `images_and_prompts` + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + negative_prior_prompt (`str`, *optional*): + The prompt not to guide the prior diffusion process. Ignored when not using guidance (i.e., ignored if + `guidance_scale` is less than `1`). + negative_prompt (`str` or `List[str]`, *optional*): + The prompt not to guide the image generation. Ignored when not using guidance (i.e., ignored if + `guidance_scale` is less than `1`). + guidance_scale (`float`, *optional*, defaults to 4.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + + Examples: + + Returns: + [`KandinskyPriorPipelineOutput`] or `tuple` + """ + + device = device or self.device + + if len(images_and_prompts) != len(weights): + raise ValueError( + f"`images_and_prompts` contains {len(images_and_prompts)} items and `weights` contains {len(weights)} items - they should be lists of same length" + ) + + image_embeddings = [] + for cond, weight in zip(images_and_prompts, weights): + if isinstance(cond, str): + image_emb = self( + cond, + num_inference_steps=num_inference_steps, + num_images_per_prompt=num_images_per_prompt, + generator=generator, + latents=latents, + negative_prompt=negative_prior_prompt, + guidance_scale=guidance_scale, + ).image_embeds.unsqueeze(0) + + elif isinstance(cond, (PIL.Image.Image, torch.Tensor)): + if isinstance(cond, PIL.Image.Image): + cond = ( + self.image_processor(cond, return_tensors="pt") + .pixel_values[0] + .unsqueeze(0) + .to(dtype=self.image_encoder.dtype, device=device) + ) + + image_emb = self.image_encoder(cond)["image_embeds"].repeat(num_images_per_prompt, 1).unsqueeze(0) + + else: + raise ValueError( + f"`images_and_prompts` can only contains elements to be of type `str`, `PIL.Image.Image` or `torch.Tensor` but is {type(cond)}" + ) + + image_embeddings.append(image_emb * weight) + + image_emb = torch.cat(image_embeddings).sum(dim=0) + + out_zero = self( + negative_prompt, + num_inference_steps=num_inference_steps, + num_images_per_prompt=num_images_per_prompt, + generator=generator, + latents=latents, + negative_prompt=negative_prior_prompt, + guidance_scale=guidance_scale, + ) + zero_image_emb = out_zero.negative_image_embeds if negative_prompt == "" else out_zero.image_embeds + + return KandinskyPriorPipelineOutput(image_embeds=image_emb, negative_image_embeds=zero_image_emb) + + # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents + def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + if latents.shape != shape: + raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") + latents = latents.to(device) + + latents = latents * scheduler.init_noise_sigma + return latents + + # Copied from diffusers.pipelines.kandinsky.pipeline_kandinsky_prior.KandinskyPriorPipeline.get_zero_embed + def get_zero_embed(self, batch_size=1, device=None): + device = device or self.device + zero_img = torch.zeros(1, 3, self.image_encoder.config.image_size, self.image_encoder.config.image_size).to( + device=device, dtype=self.image_encoder.dtype + ) + zero_image_emb = self.image_encoder(zero_img)["image_embeds"] + zero_image_emb = zero_image_emb.repeat(batch_size, 1) + return zero_image_emb + + # Copied from diffusers.pipelines.kandinsky.pipeline_kandinsky_prior.KandinskyPriorPipeline._encode_prompt + def _encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + ): + batch_size = len(prompt) if isinstance(prompt, list) else 1 + # get prompt text embeddings + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + text_mask = text_inputs.attention_mask.bool().to(device) + + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): + removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length] + + text_encoder_output = self.text_encoder(text_input_ids.to(device)) + + prompt_embeds = text_encoder_output.text_embeds + text_encoder_hidden_states = text_encoder_output.last_hidden_state + + prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0) + text_encoder_hidden_states = text_encoder_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + text_mask = text_mask.repeat_interleave(num_images_per_prompt, dim=0) + + if do_classifier_free_guidance: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + uncond_text_mask = uncond_input.attention_mask.bool().to(device) + negative_prompt_embeds_text_encoder_output = self.text_encoder(uncond_input.input_ids.to(device)) + + negative_prompt_embeds = negative_prompt_embeds_text_encoder_output.text_embeds + uncond_text_encoder_hidden_states = negative_prompt_embeds_text_encoder_output.last_hidden_state + + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + + seq_len = negative_prompt_embeds.shape[1] + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len) + + seq_len = uncond_text_encoder_hidden_states.shape[1] + uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.repeat(1, num_images_per_prompt, 1) + uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.view( + batch_size * num_images_per_prompt, seq_len, -1 + ) + uncond_text_mask = uncond_text_mask.repeat_interleave(num_images_per_prompt, dim=0) + + # done duplicates + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + text_encoder_hidden_states = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states]) + + text_mask = torch.cat([uncond_text_mask, text_mask]) + + return prompt_embeds, text_encoder_hidden_states, text_mask + + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def num_timesteps(self): + return self._num_timesteps + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]], + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: int = 1, + num_inference_steps: int = 25, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + guidance_scale: float = 4.0, + output_type: Optional[str] = "pt", # pt only + return_dict: bool = True, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + ): + """ + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`): + The prompt or prompts to guide the image generation. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored + if `guidance_scale` is less than `1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + guidance_scale (`float`, *optional*, defaults to 4.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + output_type (`str`, *optional*, defaults to `"pt"`): + The output format of the generate image. Choose between: `"np"` (`np.array`) or `"pt"` + (`torch.Tensor`). + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + + Examples: + + Returns: + [`KandinskyPriorPipelineOutput`] or `tuple` + """ + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if isinstance(prompt, str): + prompt = [prompt] + elif not isinstance(prompt, list): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if isinstance(negative_prompt, str): + negative_prompt = [negative_prompt] + elif not isinstance(negative_prompt, list) and negative_prompt is not None: + raise ValueError(f"`negative_prompt` has to be of type `str` or `list` but is {type(negative_prompt)}") + + # if the negative prompt is defined we double the batch size to + # directly retrieve the negative prompt embedding + if negative_prompt is not None: + prompt = prompt + negative_prompt + negative_prompt = 2 * negative_prompt + + device = self._execution_device + + batch_size = len(prompt) + batch_size = batch_size * num_images_per_prompt + + self._guidance_scale = guidance_scale + + prompt_embeds, text_encoder_hidden_states, text_mask = self._encode_prompt( + prompt, device, num_images_per_prompt, self.do_classifier_free_guidance, negative_prompt + ) + + # prior + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + embedding_dim = self.prior.config.embedding_dim + + latents = self.prepare_latents( + (batch_size, embedding_dim), + prompt_embeds.dtype, + device, + generator, + latents, + self.scheduler, + ) + self._num_timesteps = len(timesteps) + for i, t in enumerate(self.progress_bar(timesteps)): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents + + predicted_image_embedding = self.prior( + latent_model_input, + timestep=t, + proj_embedding=prompt_embeds, + encoder_hidden_states=text_encoder_hidden_states, + attention_mask=text_mask, + ).predicted_image_embedding + + if self.do_classifier_free_guidance: + predicted_image_embedding_uncond, predicted_image_embedding_text = predicted_image_embedding.chunk(2) + predicted_image_embedding = predicted_image_embedding_uncond + self.guidance_scale * ( + predicted_image_embedding_text - predicted_image_embedding_uncond + ) + + if i + 1 == timesteps.shape[0]: + prev_timestep = None + else: + prev_timestep = timesteps[i + 1] + + latents = self.scheduler.step( + predicted_image_embedding, + timestep=t, + sample=latents, + generator=generator, + prev_timestep=prev_timestep, + ).prev_sample + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + text_encoder_hidden_states = callback_outputs.pop( + "text_encoder_hidden_states", text_encoder_hidden_states + ) + text_mask = callback_outputs.pop("text_mask", text_mask) + + latents = self.prior.post_process_latents(latents) + + image_embeddings = latents + + # if negative prompt has been defined, we retrieve split the image embedding into two + if negative_prompt is None: + zero_embeds = self.get_zero_embed(latents.shape[0], device=latents.device) + else: + image_embeddings, zero_embeds = image_embeddings.chunk(2) + + self.maybe_free_model_hooks() + + if output_type not in ["pt", "np"]: + raise ValueError(f"Only the output types `pt` and `np` are supported not output_type={output_type}") + + if output_type == "np": + image_embeddings = image_embeddings.cpu().numpy() + zero_embeds = zero_embeds.cpu().numpy() + + if not return_dict: + return (image_embeddings, zero_embeds) + + return KandinskyPriorPipelineOutput(image_embeds=image_embeddings, negative_image_embeds=zero_embeds) diff --git a/diffusers3/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior_emb2emb.py b/diffusers3/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior_emb2emb.py new file mode 100644 index 0000000000000000000000000000000000000000..ec6509bb3cb59e5cc53de00a2b99f78f7abbe4ee --- /dev/null +++ b/diffusers3/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior_emb2emb.py @@ -0,0 +1,563 @@ +from typing import List, Optional, Union + +import PIL.Image +import torch +from transformers import CLIPImageProcessor, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionModelWithProjection + +from ...models import PriorTransformer +from ...schedulers import UnCLIPScheduler +from ...utils import ( + logging, + replace_example_docstring, +) +from ...utils.torch_utils import randn_tensor +from ..kandinsky import KandinskyPriorPipelineOutput +from ..pipeline_utils import DiffusionPipeline + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorEmb2EmbPipeline + >>> import torch + + >>> pipe_prior = KandinskyPriorPipeline.from_pretrained( + ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16 + ... ) + >>> pipe_prior.to("cuda") + + >>> prompt = "red cat, 4k photo" + >>> img = load_image( + ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" + ... "/kandinsky/cat.png" + ... ) + >>> image_emb, nagative_image_emb = pipe_prior(prompt, image=img, strength=0.2).to_tuple() + + >>> pipe = KandinskyPipeline.from_pretrained( + ... "kandinsky-community/kandinsky-2-2-decoder, torch_dtype=torch.float16" + ... ) + >>> pipe.to("cuda") + + >>> image = pipe( + ... image_embeds=image_emb, + ... negative_image_embeds=negative_image_emb, + ... height=768, + ... width=768, + ... num_inference_steps=100, + ... ).images + + >>> image[0].save("cat.png") + ``` +""" + +EXAMPLE_INTERPOLATE_DOC_STRING = """ + Examples: + ```py + >>> from diffusers import KandinskyV22PriorEmb2EmbPipeline, KandinskyV22Pipeline + >>> from diffusers.utils import load_image + >>> import PIL + + >>> import torch + >>> from torchvision import transforms + + >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained( + ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16 + ... ) + >>> pipe_prior.to("cuda") + + >>> img1 = load_image( + ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" + ... "/kandinsky/cat.png" + ... ) + + >>> img2 = load_image( + ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" + ... "/kandinsky/starry_night.jpeg" + ... ) + + >>> images_texts = ["a cat", img1, img2] + >>> weights = [0.3, 0.3, 0.4] + >>> image_emb, zero_image_emb = pipe_prior.interpolate(images_texts, weights) + + >>> pipe = KandinskyV22Pipeline.from_pretrained( + ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16 + ... ) + >>> pipe.to("cuda") + + >>> image = pipe( + ... image_embeds=image_emb, + ... negative_image_embeds=zero_image_emb, + ... height=768, + ... width=768, + ... num_inference_steps=150, + ... ).images[0] + + >>> image.save("starry_cat.png") + ``` +""" + + +class KandinskyV22PriorEmb2EmbPipeline(DiffusionPipeline): + """ + Pipeline for generating image prior for Kandinsky + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + Args: + prior ([`PriorTransformer`]): + The canonical unCLIP prior to approximate the image embedding from the text embedding. + image_encoder ([`CLIPVisionModelWithProjection`]): + Frozen image-encoder. + text_encoder ([`CLIPTextModelWithProjection`]): + Frozen text-encoder. + tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + scheduler ([`UnCLIPScheduler`]): + A scheduler to be used in combination with `prior` to generate image embedding. + """ + + model_cpu_offload_seq = "text_encoder->image_encoder->prior" + _exclude_from_cpu_offload = ["prior"] + + def __init__( + self, + prior: PriorTransformer, + image_encoder: CLIPVisionModelWithProjection, + text_encoder: CLIPTextModelWithProjection, + tokenizer: CLIPTokenizer, + scheduler: UnCLIPScheduler, + image_processor: CLIPImageProcessor, + ): + super().__init__() + + self.register_modules( + prior=prior, + text_encoder=text_encoder, + tokenizer=tokenizer, + scheduler=scheduler, + image_encoder=image_encoder, + image_processor=image_processor, + ) + + def get_timesteps(self, num_inference_steps, strength, device): + # get the original timestep using init_timestep + init_timestep = min(int(num_inference_steps * strength), num_inference_steps) + + t_start = max(num_inference_steps - init_timestep, 0) + timesteps = self.scheduler.timesteps[t_start:] + + return timesteps, num_inference_steps - t_start + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_INTERPOLATE_DOC_STRING) + def interpolate( + self, + images_and_prompts: List[Union[str, PIL.Image.Image, torch.Tensor]], + weights: List[float], + num_images_per_prompt: int = 1, + num_inference_steps: int = 25, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + negative_prior_prompt: Optional[str] = None, + negative_prompt: str = "", + guidance_scale: float = 4.0, + device=None, + ): + """ + Function invoked when using the prior pipeline for interpolation. + + Args: + images_and_prompts (`List[Union[str, PIL.Image.Image, torch.Tensor]]`): + list of prompts and images to guide the image generation. + weights: (`List[float]`): + list of weights for each condition in `images_and_prompts` + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + negative_prior_prompt (`str`, *optional*): + The prompt not to guide the prior diffusion process. Ignored when not using guidance (i.e., ignored if + `guidance_scale` is less than `1`). + negative_prompt (`str` or `List[str]`, *optional*): + The prompt not to guide the image generation. Ignored when not using guidance (i.e., ignored if + `guidance_scale` is less than `1`). + guidance_scale (`float`, *optional*, defaults to 4.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + + Examples: + + Returns: + [`KandinskyPriorPipelineOutput`] or `tuple` + """ + + device = device or self.device + + if len(images_and_prompts) != len(weights): + raise ValueError( + f"`images_and_prompts` contains {len(images_and_prompts)} items and `weights` contains {len(weights)} items - they should be lists of same length" + ) + + image_embeddings = [] + for cond, weight in zip(images_and_prompts, weights): + if isinstance(cond, str): + image_emb = self( + cond, + num_inference_steps=num_inference_steps, + num_images_per_prompt=num_images_per_prompt, + generator=generator, + latents=latents, + negative_prompt=negative_prior_prompt, + guidance_scale=guidance_scale, + ).image_embeds.unsqueeze(0) + + elif isinstance(cond, (PIL.Image.Image, torch.Tensor)): + image_emb = self._encode_image( + cond, device=device, num_images_per_prompt=num_images_per_prompt + ).unsqueeze(0) + + else: + raise ValueError( + f"`images_and_prompts` can only contains elements to be of type `str`, `PIL.Image.Image` or `torch.Tensor` but is {type(cond)}" + ) + + image_embeddings.append(image_emb * weight) + + image_emb = torch.cat(image_embeddings).sum(dim=0) + + return KandinskyPriorPipelineOutput(image_embeds=image_emb, negative_image_embeds=torch.randn_like(image_emb)) + + def _encode_image( + self, + image: Union[torch.Tensor, List[PIL.Image.Image]], + device, + num_images_per_prompt, + ): + if not isinstance(image, torch.Tensor): + image = self.image_processor(image, return_tensors="pt").pixel_values.to( + dtype=self.image_encoder.dtype, device=device + ) + + image_emb = self.image_encoder(image)["image_embeds"] # B, D + image_emb = image_emb.repeat_interleave(num_images_per_prompt, dim=0) + image_emb.to(device=device) + + return image_emb + + def prepare_latents(self, emb, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None): + emb = emb.to(device=device, dtype=dtype) + + batch_size = batch_size * num_images_per_prompt + + init_latents = emb + + if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0: + additional_image_per_prompt = batch_size // init_latents.shape[0] + init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0) + elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0: + raise ValueError( + f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts." + ) + else: + init_latents = torch.cat([init_latents], dim=0) + + shape = init_latents.shape + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + + # get latents + init_latents = self.scheduler.add_noise(init_latents, noise, timestep) + latents = init_latents + + return latents + + # Copied from diffusers.pipelines.kandinsky.pipeline_kandinsky_prior.KandinskyPriorPipeline.get_zero_embed + def get_zero_embed(self, batch_size=1, device=None): + device = device or self.device + zero_img = torch.zeros(1, 3, self.image_encoder.config.image_size, self.image_encoder.config.image_size).to( + device=device, dtype=self.image_encoder.dtype + ) + zero_image_emb = self.image_encoder(zero_img)["image_embeds"] + zero_image_emb = zero_image_emb.repeat(batch_size, 1) + return zero_image_emb + + # Copied from diffusers.pipelines.kandinsky.pipeline_kandinsky_prior.KandinskyPriorPipeline._encode_prompt + def _encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + ): + batch_size = len(prompt) if isinstance(prompt, list) else 1 + # get prompt text embeddings + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + text_mask = text_inputs.attention_mask.bool().to(device) + + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): + removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length] + + text_encoder_output = self.text_encoder(text_input_ids.to(device)) + + prompt_embeds = text_encoder_output.text_embeds + text_encoder_hidden_states = text_encoder_output.last_hidden_state + + prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0) + text_encoder_hidden_states = text_encoder_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + text_mask = text_mask.repeat_interleave(num_images_per_prompt, dim=0) + + if do_classifier_free_guidance: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + uncond_text_mask = uncond_input.attention_mask.bool().to(device) + negative_prompt_embeds_text_encoder_output = self.text_encoder(uncond_input.input_ids.to(device)) + + negative_prompt_embeds = negative_prompt_embeds_text_encoder_output.text_embeds + uncond_text_encoder_hidden_states = negative_prompt_embeds_text_encoder_output.last_hidden_state + + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + + seq_len = negative_prompt_embeds.shape[1] + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len) + + seq_len = uncond_text_encoder_hidden_states.shape[1] + uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.repeat(1, num_images_per_prompt, 1) + uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.view( + batch_size * num_images_per_prompt, seq_len, -1 + ) + uncond_text_mask = uncond_text_mask.repeat_interleave(num_images_per_prompt, dim=0) + + # done duplicates + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + text_encoder_hidden_states = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states]) + + text_mask = torch.cat([uncond_text_mask, text_mask]) + + return prompt_embeds, text_encoder_hidden_states, text_mask + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]], + image: Union[torch.Tensor, List[torch.Tensor], PIL.Image.Image, List[PIL.Image.Image]], + strength: float = 0.3, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: int = 1, + num_inference_steps: int = 25, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + guidance_scale: float = 4.0, + output_type: Optional[str] = "pt", # pt only + return_dict: bool = True, + ): + """ + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`): + The prompt or prompts to guide the image generation. + strength (`float`, *optional*, defaults to 0.8): + Conceptually, indicates how much to transform the reference `emb`. Must be between 0 and 1. `image` + will be used as a starting point, adding more noise to it the larger the `strength`. The number of + denoising steps depends on the amount of noise initially added. + emb (`torch.Tensor`): + The image embedding. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored + if `guidance_scale` is less than `1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + guidance_scale (`float`, *optional*, defaults to 4.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + output_type (`str`, *optional*, defaults to `"pt"`): + The output format of the generate image. Choose between: `"np"` (`np.array`) or `"pt"` + (`torch.Tensor`). + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. + + Examples: + + Returns: + [`KandinskyPriorPipelineOutput`] or `tuple` + """ + + if isinstance(prompt, str): + prompt = [prompt] + elif not isinstance(prompt, list): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if isinstance(negative_prompt, str): + negative_prompt = [negative_prompt] + elif not isinstance(negative_prompt, list) and negative_prompt is not None: + raise ValueError(f"`negative_prompt` has to be of type `str` or `list` but is {type(negative_prompt)}") + + # if the negative prompt is defined we double the batch size to + # directly retrieve the negative prompt embedding + if negative_prompt is not None: + prompt = prompt + negative_prompt + negative_prompt = 2 * negative_prompt + + device = self._execution_device + + batch_size = len(prompt) + batch_size = batch_size * num_images_per_prompt + + do_classifier_free_guidance = guidance_scale > 1.0 + prompt_embeds, text_encoder_hidden_states, text_mask = self._encode_prompt( + prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt + ) + + if not isinstance(image, List): + image = [image] + + if isinstance(image[0], torch.Tensor): + image = torch.cat(image, dim=0) + + if isinstance(image, torch.Tensor) and image.ndim == 2: + # allow user to pass image_embeds directly + image_embeds = image.repeat_interleave(num_images_per_prompt, dim=0) + elif isinstance(image, torch.Tensor) and image.ndim != 4: + raise ValueError( + f" if pass `image` as pytorch tensor, or a list of pytorch tensor, please make sure each tensor has shape [batch_size, channels, height, width], currently {image[0].unsqueeze(0).shape}" + ) + else: + image_embeds = self._encode_image(image, device, num_images_per_prompt) + + # prior + self.scheduler.set_timesteps(num_inference_steps, device=device) + + latents = image_embeds + timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device) + latent_timestep = timesteps[:1].repeat(batch_size) + latents = self.prepare_latents( + latents, + latent_timestep, + batch_size // num_images_per_prompt, + num_images_per_prompt, + prompt_embeds.dtype, + device, + generator, + ) + + for i, t in enumerate(self.progress_bar(timesteps)): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + + predicted_image_embedding = self.prior( + latent_model_input, + timestep=t, + proj_embedding=prompt_embeds, + encoder_hidden_states=text_encoder_hidden_states, + attention_mask=text_mask, + ).predicted_image_embedding + + if do_classifier_free_guidance: + predicted_image_embedding_uncond, predicted_image_embedding_text = predicted_image_embedding.chunk(2) + predicted_image_embedding = predicted_image_embedding_uncond + guidance_scale * ( + predicted_image_embedding_text - predicted_image_embedding_uncond + ) + + if i + 1 == timesteps.shape[0]: + prev_timestep = None + else: + prev_timestep = timesteps[i + 1] + + latents = self.scheduler.step( + predicted_image_embedding, + timestep=t, + sample=latents, + generator=generator, + prev_timestep=prev_timestep, + ).prev_sample + + latents = self.prior.post_process_latents(latents) + + image_embeddings = latents + + # if negative prompt has been defined, we retrieve split the image embedding into two + if negative_prompt is None: + zero_embeds = self.get_zero_embed(latents.shape[0], device=latents.device) + else: + image_embeddings, zero_embeds = image_embeddings.chunk(2) + + self.maybe_free_model_hooks() + + if output_type not in ["pt", "np"]: + raise ValueError(f"Only the output types `pt` and `np` are supported not output_type={output_type}") + + if output_type == "np": + image_embeddings = image_embeddings.cpu().numpy() + zero_embeds = zero_embeds.cpu().numpy() + + if not return_dict: + return (image_embeddings, zero_embeds) + + return KandinskyPriorPipelineOutput(image_embeds=image_embeddings, negative_image_embeds=zero_embeds) diff --git a/diffusers3/pipelines/kandinsky3/__init__.py b/diffusers3/pipelines/kandinsky3/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e8a3063141b5e62682ce3ad8e8e0d02473402665 --- /dev/null +++ b/diffusers3/pipelines/kandinsky3/__init__.py @@ -0,0 +1,49 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["pipeline_kandinsky3"] = ["Kandinsky3Pipeline"] + _import_structure["pipeline_kandinsky3_img2img"] = ["Kandinsky3Img2ImgPipeline"] + + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * + else: + from .pipeline_kandinsky3 import Kandinsky3Pipeline + from .pipeline_kandinsky3_img2img import Kandinsky3Img2ImgPipeline +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/diffusers3/pipelines/kandinsky3/convert_kandinsky3_unet.py b/diffusers3/pipelines/kandinsky3/convert_kandinsky3_unet.py new file mode 100755 index 0000000000000000000000000000000000000000..5360632275b41b035f94d84d6156a6bafe84c613 --- /dev/null +++ b/diffusers3/pipelines/kandinsky3/convert_kandinsky3_unet.py @@ -0,0 +1,98 @@ +#!/usr/bin/env python3 +import argparse +import fnmatch + +from safetensors.torch import load_file + +from diffusers import Kandinsky3UNet + + +MAPPING = { + "to_time_embed.1": "time_embedding.linear_1", + "to_time_embed.3": "time_embedding.linear_2", + "in_layer": "conv_in", + "out_layer.0": "conv_norm_out", + "out_layer.2": "conv_out", + "down_samples": "down_blocks", + "up_samples": "up_blocks", + "projection_lin": "encoder_hid_proj.projection_linear", + "projection_ln": "encoder_hid_proj.projection_norm", + "feature_pooling": "add_time_condition", + "to_query": "to_q", + "to_key": "to_k", + "to_value": "to_v", + "output_layer": "to_out.0", + "self_attention_block": "attentions.0", +} + +DYNAMIC_MAP = { + "resnet_attn_blocks.*.0": "resnets_in.*", + "resnet_attn_blocks.*.1": ("attentions.*", 1), + "resnet_attn_blocks.*.2": "resnets_out.*", +} +# MAPPING = {} + + +def convert_state_dict(unet_state_dict): + """ + Args: + Convert the state dict of a U-Net model to match the key format expected by Kandinsky3UNet model. + unet_model (torch.nn.Module): The original U-Net model. unet_kandi3_model (torch.nn.Module): The Kandinsky3UNet + model to match keys with. + + Returns: + OrderedDict: The converted state dictionary. + """ + # Example of renaming logic (this will vary based on your model's architecture) + converted_state_dict = {} + for key in unet_state_dict: + new_key = key + for pattern, new_pattern in MAPPING.items(): + new_key = new_key.replace(pattern, new_pattern) + + for dyn_pattern, dyn_new_pattern in DYNAMIC_MAP.items(): + has_matched = False + if fnmatch.fnmatch(new_key, f"*.{dyn_pattern}.*") and not has_matched: + star = int(new_key.split(dyn_pattern.split(".")[0])[-1].split(".")[1]) + + if isinstance(dyn_new_pattern, tuple): + new_star = star + dyn_new_pattern[-1] + dyn_new_pattern = dyn_new_pattern[0] + else: + new_star = star + + pattern = dyn_pattern.replace("*", str(star)) + new_pattern = dyn_new_pattern.replace("*", str(new_star)) + + new_key = new_key.replace(pattern, new_pattern) + has_matched = True + + converted_state_dict[new_key] = unet_state_dict[key] + + return converted_state_dict + + +def main(model_path, output_path): + # Load your original U-Net model + unet_state_dict = load_file(model_path) + + # Initialize your Kandinsky3UNet model + config = {} + + # Convert the state dict + converted_state_dict = convert_state_dict(unet_state_dict) + + unet = Kandinsky3UNet(config) + unet.load_state_dict(converted_state_dict) + + unet.save_pretrained(output_path) + print(f"Converted model saved to {output_path}") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Convert U-Net PyTorch model to Kandinsky3UNet format") + parser.add_argument("--model_path", type=str, required=True, help="Path to the original U-Net PyTorch model") + parser.add_argument("--output_path", type=str, required=True, help="Path to save the converted model") + + args = parser.parse_args() + main(args.model_path, args.output_path) diff --git a/diffusers3/pipelines/kandinsky3/pipeline_kandinsky3.py b/diffusers3/pipelines/kandinsky3/pipeline_kandinsky3.py new file mode 100644 index 0000000000000000000000000000000000000000..8dbae2a1909ad4d3de74e9b27cfbde40ab6eae61 --- /dev/null +++ b/diffusers3/pipelines/kandinsky3/pipeline_kandinsky3.py @@ -0,0 +1,576 @@ +from typing import Callable, Dict, List, Optional, Union + +import torch +from transformers import T5EncoderModel, T5Tokenizer + +from ...loaders import StableDiffusionLoraLoaderMixin +from ...models import Kandinsky3UNet, VQModel +from ...schedulers import DDPMScheduler +from ...utils import ( + deprecate, + logging, + replace_example_docstring, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> from diffusers import AutoPipelineForText2Image + >>> import torch + + >>> pipe = AutoPipelineForText2Image.from_pretrained( + ... "kandinsky-community/kandinsky-3", variant="fp16", torch_dtype=torch.float16 + ... ) + >>> pipe.enable_model_cpu_offload() + + >>> prompt = "A photograph of the inside of a subway train. There are raccoons sitting on the seats. One of them is reading a newspaper. The window shows the city in the background." + + >>> generator = torch.Generator(device="cpu").manual_seed(0) + >>> image = pipe(prompt, num_inference_steps=25, generator=generator).images[0] + ``` + +""" + + +def downscale_height_and_width(height, width, scale_factor=8): + new_height = height // scale_factor**2 + if height % scale_factor**2 != 0: + new_height += 1 + new_width = width // scale_factor**2 + if width % scale_factor**2 != 0: + new_width += 1 + return new_height * scale_factor, new_width * scale_factor + + +class Kandinsky3Pipeline(DiffusionPipeline, StableDiffusionLoraLoaderMixin): + model_cpu_offload_seq = "text_encoder->unet->movq" + _callback_tensor_inputs = [ + "latents", + "prompt_embeds", + "negative_prompt_embeds", + "negative_attention_mask", + "attention_mask", + ] + + def __init__( + self, + tokenizer: T5Tokenizer, + text_encoder: T5EncoderModel, + unet: Kandinsky3UNet, + scheduler: DDPMScheduler, + movq: VQModel, + ): + super().__init__() + + self.register_modules( + tokenizer=tokenizer, text_encoder=text_encoder, unet=unet, scheduler=scheduler, movq=movq + ) + + def process_embeds(self, embeddings, attention_mask, cut_context): + if cut_context: + embeddings[attention_mask == 0] = torch.zeros_like(embeddings[attention_mask == 0]) + max_seq_length = attention_mask.sum(-1).max() + 1 + embeddings = embeddings[:, :max_seq_length] + attention_mask = attention_mask[:, :max_seq_length] + return embeddings, attention_mask + + @torch.no_grad() + def encode_prompt( + self, + prompt, + do_classifier_free_guidance=True, + num_images_per_prompt=1, + device=None, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + _cut_context=False, + attention_mask: Optional[torch.Tensor] = None, + negative_attention_mask: Optional[torch.Tensor] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`, *optional*): + torch device to place the resulting embeddings on + num_images_per_prompt (`int`, *optional*, defaults to 1): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`, *optional*, defaults to `True`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead. + Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + attention_mask (`torch.Tensor`, *optional*): + Pre-generated attention mask. Must provide if passing `prompt_embeds` directly. + negative_attention_mask (`torch.Tensor`, *optional*): + Pre-generated negative attention mask. Must provide if passing `negative_prompt_embeds` directly. + """ + if prompt is not None and negative_prompt is not None: + if type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + + if device is None: + device = self._execution_device + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + max_length = 128 + + if prompt_embeds is None: + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids.to(device) + attention_mask = text_inputs.attention_mask.to(device) + prompt_embeds = self.text_encoder( + text_input_ids, + attention_mask=attention_mask, + ) + prompt_embeds = prompt_embeds[0] + prompt_embeds, attention_mask = self.process_embeds(prompt_embeds, attention_mask, _cut_context) + prompt_embeds = prompt_embeds * attention_mask.unsqueeze(2) + + if self.text_encoder is not None: + dtype = self.text_encoder.dtype + else: + dtype = None + + prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + attention_mask = attention_mask.repeat(num_images_per_prompt, 1) + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + if negative_prompt is not None: + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=128, + truncation=True, + return_attention_mask=True, + return_tensors="pt", + ) + text_input_ids = uncond_input.input_ids.to(device) + negative_attention_mask = uncond_input.attention_mask.to(device) + + negative_prompt_embeds = self.text_encoder( + text_input_ids, + attention_mask=negative_attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + negative_prompt_embeds = negative_prompt_embeds[:, : prompt_embeds.shape[1]] + negative_attention_mask = negative_attention_mask[:, : prompt_embeds.shape[1]] + negative_prompt_embeds = negative_prompt_embeds * negative_attention_mask.unsqueeze(2) + + else: + negative_prompt_embeds = torch.zeros_like(prompt_embeds) + negative_attention_mask = torch.zeros_like(attention_mask) + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=dtype, device=device) + if negative_prompt_embeds.shape != prompt_embeds.shape: + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + negative_attention_mask = negative_attention_mask.repeat(num_images_per_prompt, 1) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + else: + negative_prompt_embeds = None + negative_attention_mask = None + return prompt_embeds, negative_prompt_embeds, attention_mask, negative_attention_mask + + def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + if latents.shape != shape: + raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") + latents = latents.to(device) + + latents = latents * scheduler.init_noise_sigma + return latents + + def check_inputs( + self, + prompt, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + callback_on_step_end_tensor_inputs=None, + attention_mask=None, + negative_attention_mask=None, + ): + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + if negative_prompt_embeds is not None and negative_attention_mask is None: + raise ValueError("Please provide `negative_attention_mask` along with `negative_prompt_embeds`") + + if negative_prompt_embeds is not None and negative_attention_mask is not None: + if negative_prompt_embeds.shape[:2] != negative_attention_mask.shape: + raise ValueError( + "`negative_prompt_embeds` and `negative_attention_mask` must have the same batch_size and token length when passed directly, but" + f" got: `negative_prompt_embeds` {negative_prompt_embeds.shape[:2]} != `negative_attention_mask`" + f" {negative_attention_mask.shape}." + ) + + if prompt_embeds is not None and attention_mask is None: + raise ValueError("Please provide `attention_mask` along with `prompt_embeds`") + + if prompt_embeds is not None and attention_mask is not None: + if prompt_embeds.shape[:2] != attention_mask.shape: + raise ValueError( + "`prompt_embeds` and `attention_mask` must have the same batch_size and token length when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape[:2]} != `attention_mask`" + f" {attention_mask.shape}." + ) + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 + + @property + def num_timesteps(self): + return self._num_timesteps + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + num_inference_steps: int = 25, + guidance_scale: float = 3.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + height: Optional[int] = 1024, + width: Optional[int] = 1024, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + negative_attention_mask: Optional[torch.Tensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + latents=None, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + **kwargs, + ): + """ + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + num_inference_steps (`int`, *optional*, defaults to 25): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process. If not defined, equal spaced `num_inference_steps` + timesteps are used. Must be in descending order. + guidance_scale (`float`, *optional*, defaults to 3.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + height (`int`, *optional*, defaults to self.unet.config.sample_size): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to self.unet.config.sample_size): + The width in pixels of the generated image. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to + [`schedulers.DDIMScheduler`], will be ignored for others. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + attention_mask (`torch.Tensor`, *optional*): + Pre-generated attention mask. Must provide if passing `prompt_embeds` directly. + negative_attention_mask (`torch.Tensor`, *optional*): + Pre-generated negative attention mask. Must provide if passing `negative_prompt_embeds` directly. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.IFPipelineOutput`] instead of a plain tuple. + callback (`Callable`, *optional*): + A function that will be called every `callback_steps` steps during inference. The function will be + called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function will be called. If not specified, the callback will be + called at every step. + clean_caption (`bool`, *optional*, defaults to `True`): + Whether or not to clean the caption before creating embeddings. Requires `beautifulsoup4` and `ftfy` to + be installed. If the dependencies are not installed, the embeddings will be created from the raw + prompt. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + + Examples: + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple` + + """ + + callback = kwargs.pop("callback", None) + callback_steps = kwargs.pop("callback_steps", None) + + if callback is not None: + deprecate( + "callback", + "1.0.0", + "Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", + ) + if callback_steps is not None: + deprecate( + "callback_steps", + "1.0.0", + "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + cut_context = True + device = self._execution_device + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + callback_steps, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + callback_on_step_end_tensor_inputs, + attention_mask, + negative_attention_mask, + ) + + self._guidance_scale = guidance_scale + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + # 3. Encode input prompt + prompt_embeds, negative_prompt_embeds, attention_mask, negative_attention_mask = self.encode_prompt( + prompt, + self.do_classifier_free_guidance, + num_images_per_prompt=num_images_per_prompt, + device=device, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + _cut_context=cut_context, + attention_mask=attention_mask, + negative_attention_mask=negative_attention_mask, + ) + + if self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + attention_mask = torch.cat([negative_attention_mask, attention_mask]).bool() + # 4. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # 5. Prepare latents + height, width = downscale_height_and_width(height, width, 8) + + latents = self.prepare_latents( + (batch_size * num_images_per_prompt, 4, height, width), + prompt_embeds.dtype, + device, + generator, + latents, + self.scheduler, + ) + + if hasattr(self, "text_encoder_offload_hook") and self.text_encoder_offload_hook is not None: + self.text_encoder_offload_hook.offload() + + # 7. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + self._num_timesteps = len(timesteps) + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + encoder_attention_mask=attention_mask, + return_dict=False, + )[0] + + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + + noise_pred = (guidance_scale + 1.0) * noise_pred_text - guidance_scale * noise_pred_uncond + # noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step( + noise_pred, + t, + latents, + generator=generator, + ).prev_sample + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + attention_mask = callback_outputs.pop("attention_mask", attention_mask) + negative_attention_mask = callback_outputs.pop("negative_attention_mask", negative_attention_mask) + + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + # post-processing + if output_type not in ["pt", "np", "pil", "latent"]: + raise ValueError( + f"Only the output types `pt`, `pil`, `np` and `latent` are supported not output_type={output_type}" + ) + + if not output_type == "latent": + image = self.movq.decode(latents, force_not_quantize=True)["sample"] + + if output_type in ["np", "pil"]: + image = image * 0.5 + 0.5 + image = image.clamp(0, 1) + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + + if output_type == "pil": + image = self.numpy_to_pil(image) + else: + image = latents + + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return ImagePipelineOutput(images=image) diff --git a/diffusers3/pipelines/kandinsky3/pipeline_kandinsky3_img2img.py b/diffusers3/pipelines/kandinsky3/pipeline_kandinsky3_img2img.py new file mode 100644 index 0000000000000000000000000000000000000000..81c45c4fb6f87e7fced3626ad86fabe3f9ec80ad --- /dev/null +++ b/diffusers3/pipelines/kandinsky3/pipeline_kandinsky3_img2img.py @@ -0,0 +1,643 @@ +import inspect +from typing import Callable, Dict, List, Optional, Union + +import numpy as np +import PIL +import PIL.Image +import torch +from transformers import T5EncoderModel, T5Tokenizer + +from ...loaders import StableDiffusionLoraLoaderMixin +from ...models import Kandinsky3UNet, VQModel +from ...schedulers import DDPMScheduler +from ...utils import ( + deprecate, + logging, + replace_example_docstring, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> from diffusers import AutoPipelineForImage2Image + >>> from diffusers.utils import load_image + >>> import torch + + >>> pipe = AutoPipelineForImage2Image.from_pretrained( + ... "kandinsky-community/kandinsky-3", variant="fp16", torch_dtype=torch.float16 + ... ) + >>> pipe.enable_model_cpu_offload() + + >>> prompt = "A painting of the inside of a subway train with tiny raccoons." + >>> image = load_image( + ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/kandinsky3/t2i.png" + ... ) + + >>> generator = torch.Generator(device="cpu").manual_seed(0) + >>> image = pipe(prompt, image=image, strength=0.75, num_inference_steps=25, generator=generator).images[0] + ``` +""" + + +def downscale_height_and_width(height, width, scale_factor=8): + new_height = height // scale_factor**2 + if height % scale_factor**2 != 0: + new_height += 1 + new_width = width // scale_factor**2 + if width % scale_factor**2 != 0: + new_width += 1 + return new_height * scale_factor, new_width * scale_factor + + +def prepare_image(pil_image): + arr = np.array(pil_image.convert("RGB")) + arr = arr.astype(np.float32) / 127.5 - 1 + arr = np.transpose(arr, [2, 0, 1]) + image = torch.from_numpy(arr).unsqueeze(0) + return image + + +class Kandinsky3Img2ImgPipeline(DiffusionPipeline, StableDiffusionLoraLoaderMixin): + model_cpu_offload_seq = "text_encoder->movq->unet->movq" + _callback_tensor_inputs = [ + "latents", + "prompt_embeds", + "negative_prompt_embeds", + "negative_attention_mask", + "attention_mask", + ] + + def __init__( + self, + tokenizer: T5Tokenizer, + text_encoder: T5EncoderModel, + unet: Kandinsky3UNet, + scheduler: DDPMScheduler, + movq: VQModel, + ): + super().__init__() + + self.register_modules( + tokenizer=tokenizer, text_encoder=text_encoder, unet=unet, scheduler=scheduler, movq=movq + ) + + def get_timesteps(self, num_inference_steps, strength, device): + # get the original timestep using init_timestep + init_timestep = min(int(num_inference_steps * strength), num_inference_steps) + + t_start = max(num_inference_steps - init_timestep, 0) + timesteps = self.scheduler.timesteps[t_start:] + + return timesteps, num_inference_steps - t_start + + def _process_embeds(self, embeddings, attention_mask, cut_context): + # return embeddings, attention_mask + if cut_context: + embeddings[attention_mask == 0] = torch.zeros_like(embeddings[attention_mask == 0]) + max_seq_length = attention_mask.sum(-1).max() + 1 + embeddings = embeddings[:, :max_seq_length] + attention_mask = attention_mask[:, :max_seq_length] + return embeddings, attention_mask + + @torch.no_grad() + def encode_prompt( + self, + prompt, + do_classifier_free_guidance=True, + num_images_per_prompt=1, + device=None, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + _cut_context=False, + attention_mask: Optional[torch.Tensor] = None, + negative_attention_mask: Optional[torch.Tensor] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`, *optional*): + torch device to place the resulting embeddings on + num_images_per_prompt (`int`, *optional*, defaults to 1): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`, *optional*, defaults to `True`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead. + Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + attention_mask (`torch.Tensor`, *optional*): + Pre-generated attention mask. Must provide if passing `prompt_embeds` directly. + negative_attention_mask (`torch.Tensor`, *optional*): + Pre-generated negative attention mask. Must provide if passing `negative_prompt_embeds` directly. + """ + if prompt is not None and negative_prompt is not None: + if type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + + if device is None: + device = self._execution_device + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + max_length = 128 + + if prompt_embeds is None: + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids.to(device) + attention_mask = text_inputs.attention_mask.to(device) + prompt_embeds = self.text_encoder( + text_input_ids, + attention_mask=attention_mask, + ) + prompt_embeds = prompt_embeds[0] + prompt_embeds, attention_mask = self._process_embeds(prompt_embeds, attention_mask, _cut_context) + prompt_embeds = prompt_embeds * attention_mask.unsqueeze(2) + + if self.text_encoder is not None: + dtype = self.text_encoder.dtype + else: + dtype = None + + prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + attention_mask = attention_mask.repeat(num_images_per_prompt, 1) + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + if negative_prompt is not None: + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=128, + truncation=True, + return_attention_mask=True, + return_tensors="pt", + ) + text_input_ids = uncond_input.input_ids.to(device) + negative_attention_mask = uncond_input.attention_mask.to(device) + + negative_prompt_embeds = self.text_encoder( + text_input_ids, + attention_mask=negative_attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + negative_prompt_embeds = negative_prompt_embeds[:, : prompt_embeds.shape[1]] + negative_attention_mask = negative_attention_mask[:, : prompt_embeds.shape[1]] + negative_prompt_embeds = negative_prompt_embeds * negative_attention_mask.unsqueeze(2) + + else: + negative_prompt_embeds = torch.zeros_like(prompt_embeds) + negative_attention_mask = torch.zeros_like(attention_mask) + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=dtype, device=device) + if negative_prompt_embeds.shape != prompt_embeds.shape: + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + negative_attention_mask = negative_attention_mask.repeat(num_images_per_prompt, 1) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + else: + negative_prompt_embeds = None + negative_attention_mask = None + return prompt_embeds, negative_prompt_embeds, attention_mask, negative_attention_mask + + def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None): + if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): + raise ValueError( + f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" + ) + + image = image.to(device=device, dtype=dtype) + + batch_size = batch_size * num_images_per_prompt + + if image.shape[1] == 4: + init_latents = image + + else: + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + elif isinstance(generator, list): + init_latents = [ + self.movq.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(batch_size) + ] + init_latents = torch.cat(init_latents, dim=0) + else: + init_latents = self.movq.encode(image).latent_dist.sample(generator) + + init_latents = self.movq.config.scaling_factor * init_latents + + init_latents = torch.cat([init_latents], dim=0) + + shape = init_latents.shape + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + + # get latents + init_latents = self.scheduler.add_noise(init_latents, noise, timestep) + + latents = init_latents + + return latents + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + callback_on_step_end_tensor_inputs=None, + attention_mask=None, + negative_attention_mask=None, + ): + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if negative_prompt_embeds is not None and negative_attention_mask is None: + raise ValueError("Please provide `negative_attention_mask` along with `negative_prompt_embeds`") + + if negative_prompt_embeds is not None and negative_attention_mask is not None: + if negative_prompt_embeds.shape[:2] != negative_attention_mask.shape: + raise ValueError( + "`negative_prompt_embeds` and `negative_attention_mask` must have the same batch_size and token length when passed directly, but" + f" got: `negative_prompt_embeds` {negative_prompt_embeds.shape[:2]} != `negative_attention_mask`" + f" {negative_attention_mask.shape}." + ) + + if prompt_embeds is not None and attention_mask is None: + raise ValueError("Please provide `attention_mask` along with `prompt_embeds`") + + if prompt_embeds is not None and attention_mask is not None: + if prompt_embeds.shape[:2] != attention_mask.shape: + raise ValueError( + "`prompt_embeds` and `attention_mask` must have the same batch_size and token length when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape[:2]} != `attention_mask`" + f" {attention_mask.shape}." + ) + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 + + @property + def num_timesteps(self): + return self._num_timesteps + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + image: Union[torch.Tensor, PIL.Image.Image, List[torch.Tensor], List[PIL.Image.Image]] = None, + strength: float = 0.3, + num_inference_steps: int = 25, + guidance_scale: float = 3.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + negative_attention_mask: Optional[torch.Tensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + **kwargs, + ): + """ + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): + `Image`, or tensor representing an image batch, that will be used as the starting point for the + process. + strength (`float`, *optional*, defaults to 0.8): + Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a + starting point and more noise is added the higher the `strength`. The number of denoising steps depends + on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising + process runs for the full number of iterations specified in `num_inference_steps`. A value of 1 + essentially ignores `image`. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 3.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + attention_mask (`torch.Tensor`, *optional*): + Pre-generated attention mask. Must provide if passing `prompt_embeds` directly. + negative_attention_mask (`torch.Tensor`, *optional*): + Pre-generated negative attention mask. Must provide if passing `negative_prompt_embeds` directly. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.IFPipelineOutput`] instead of a plain tuple. + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + + Examples: + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple` + + """ + callback = kwargs.pop("callback", None) + callback_steps = kwargs.pop("callback_steps", None) + + if callback is not None: + deprecate( + "callback", + "1.0.0", + "Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", + ) + if callback_steps is not None: + deprecate( + "callback_steps", + "1.0.0", + "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + cut_context = True + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + callback_steps, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + callback_on_step_end_tensor_inputs, + attention_mask, + negative_attention_mask, + ) + + self._guidance_scale = guidance_scale + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + # 3. Encode input prompt + prompt_embeds, negative_prompt_embeds, attention_mask, negative_attention_mask = self.encode_prompt( + prompt, + self.do_classifier_free_guidance, + num_images_per_prompt=num_images_per_prompt, + device=device, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + _cut_context=cut_context, + attention_mask=attention_mask, + negative_attention_mask=negative_attention_mask, + ) + + if self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + attention_mask = torch.cat([negative_attention_mask, attention_mask]).bool() + if not isinstance(image, list): + image = [image] + if not all(isinstance(i, (PIL.Image.Image, torch.Tensor)) for i in image): + raise ValueError( + f"Input is in incorrect format: {[type(i) for i in image]}. Currently, we only support PIL image and pytorch tensor" + ) + + image = torch.cat([prepare_image(i) for i in image], dim=0) + image = image.to(dtype=prompt_embeds.dtype, device=device) + # 4. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device) + # 5. Prepare latents + latents = self.movq.encode(image)["latents"] + latents = latents.repeat_interleave(num_images_per_prompt, dim=0) + latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) + latents = self.prepare_latents( + latents, latent_timestep, batch_size, num_images_per_prompt, prompt_embeds.dtype, device, generator + ) + if hasattr(self, "text_encoder_offload_hook") and self.text_encoder_offload_hook is not None: + self.text_encoder_offload_hook.offload() + + # 7. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + self._num_timesteps = len(timesteps) + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + encoder_attention_mask=attention_mask, + )[0] + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + + noise_pred = (guidance_scale + 1.0) * noise_pred_text - guidance_scale * noise_pred_uncond + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step( + noise_pred, + t, + latents, + generator=generator, + ).prev_sample + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + attention_mask = callback_outputs.pop("attention_mask", attention_mask) + negative_attention_mask = callback_outputs.pop("negative_attention_mask", negative_attention_mask) + + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + # post-processing + if output_type not in ["pt", "np", "pil", "latent"]: + raise ValueError( + f"Only the output types `pt`, `pil`, `np` and `latent` are supported not output_type={output_type}" + ) + if not output_type == "latent": + image = self.movq.decode(latents, force_not_quantize=True)["sample"] + + if output_type in ["np", "pil"]: + image = image * 0.5 + 0.5 + image = image.clamp(0, 1) + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + + if output_type == "pil": + image = self.numpy_to_pil(image) + else: + image = latents + + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return ImagePipelineOutput(images=image) diff --git a/diffusers3/pipelines/kolors/__init__.py b/diffusers3/pipelines/kolors/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..671d22e9f433802ec82ebe89fae4cef6c001064b --- /dev/null +++ b/diffusers3/pipelines/kolors/__init__.py @@ -0,0 +1,54 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_sentencepiece_available, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + +try: + if not (is_transformers_available() and is_torch_available()) and is_sentencepiece_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_and_sentencepiece_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_and_sentencepiece_objects)) +else: + _import_structure["pipeline_kolors"] = ["KolorsPipeline"] + _import_structure["pipeline_kolors_img2img"] = ["KolorsImg2ImgPipeline"] + _import_structure["text_encoder"] = ["ChatGLMModel"] + _import_structure["tokenizer"] = ["ChatGLMTokenizer"] + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()) and is_sentencepiece_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_and_sentencepiece_objects import * + + else: + from .pipeline_kolors import KolorsPipeline + from .pipeline_kolors_img2img import KolorsImg2ImgPipeline + from .text_encoder import ChatGLMModel + from .tokenizer import ChatGLMTokenizer + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/diffusers3/pipelines/kolors/pipeline_kolors.py b/diffusers3/pipelines/kolors/pipeline_kolors.py new file mode 100644 index 0000000000000000000000000000000000000000..b682429e9744decf6859933e53c7d73bcb19f8bd --- /dev/null +++ b/diffusers3/pipelines/kolors/pipeline_kolors.py @@ -0,0 +1,1070 @@ +# Copyright 2024 Stability AI, Kwai-Kolors Team and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import inspect +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import torch +from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection + +from ...callbacks import MultiPipelineCallbacks, PipelineCallback +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import IPAdapterMixin, StableDiffusionXLLoraLoaderMixin +from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel +from ...models.attention_processor import AttnProcessor2_0, FusedAttnProcessor2_0, XFormersAttnProcessor +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import is_torch_xla_available, logging, replace_example_docstring +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from .pipeline_output import KolorsPipelineOutput +from .text_encoder import ChatGLMModel +from .tokenizer import ChatGLMTokenizer + + +if is_torch_xla_available(): + import torch_xla.core.xla_model as xm + + XLA_AVAILABLE = True +else: + XLA_AVAILABLE = False + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import KolorsPipeline + + >>> pipe = KolorsPipeline.from_pretrained( + ... "Kwai-Kolors/Kolors-diffusers", variant="fp16", torch_dtype=torch.float16 + ... ) + >>> pipe = pipe.to("cuda") + + >>> prompt = ( + ... "A photo of a ladybug, macro, zoom, high quality, film, holding a wooden sign with the text 'KOLORS'" + ... ) + >>> image = pipe(prompt).images[0] + ``` +""" + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + sigmas: Optional[List[float]] = None, + **kwargs, +): + """ + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles + custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` + must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, + `num_inference_steps` and `sigmas` must be `None`. + sigmas (`List[float]`, *optional*): + Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, + `num_inference_steps` and `timesteps` must be `None`. + + Returns: + `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the + second element is the number of inference steps. + """ + if timesteps is not None and sigmas is not None: + raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") + if timesteps is not None: + accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + elif sigmas is not None: + accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accept_sigmas: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" sigmas schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + + +class KolorsPipeline(DiffusionPipeline, StableDiffusionMixin, StableDiffusionXLLoraLoaderMixin, IPAdapterMixin): + r""" + Pipeline for text-to-image generation using Kolors. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + The pipeline also inherits the following loading methods: + - [`~loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`] for saving LoRA weights + - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`ChatGLMModel`]): + Frozen text-encoder. Kolors uses [ChatGLM3-6B](https://huggingface.co/THUDM/chatglm3-6b). + tokenizer (`ChatGLMTokenizer`): + Tokenizer of class + [ChatGLMTokenizer](https://huggingface.co/THUDM/chatglm3-6b/blob/main/tokenization_chatglm.py). + unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"False"`): + Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of + `Kwai-Kolors/Kolors-diffusers`. + """ + + model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae" + _optional_components = [ + "image_encoder", + "feature_extractor", + ] + _callback_tensor_inputs = [ + "latents", + "prompt_embeds", + "negative_prompt_embeds", + "add_text_embeds", + "add_time_ids", + "negative_pooled_prompt_embeds", + "negative_add_time_ids", + ] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: ChatGLMModel, + tokenizer: ChatGLMTokenizer, + unet: UNet2DConditionModel, + scheduler: KarrasDiffusionSchedulers, + image_encoder: CLIPVisionModelWithProjection = None, + feature_extractor: CLIPImageProcessor = None, + force_zeros_for_empty_prompt: bool = False, + ): + super().__init__() + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + image_encoder=image_encoder, + feature_extractor=feature_extractor, + ) + self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) + self.vae_scale_factor = ( + 2 ** (len(self.vae.config.block_out_channels) - 1) if hasattr(self, "vae") and self.vae is not None else 8 + ) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + + self.default_sample_size = self.unet.config.sample_size + + def encode_prompt( + self, + prompt, + device: Optional[torch.device] = None, + num_images_per_prompt: int = 1, + do_classifier_free_guidance: bool = True, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + pooled_prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, + max_sequence_length: int = 256, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + max_sequence_length (`int` defaults to 256): Maximum sequence length to use with the `prompt`. + """ + # from IPython import embed; embed(); exit() + device = device or self._execution_device + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + # Define tokenizers and text encoders + tokenizers = [self.tokenizer] + text_encoders = [self.text_encoder] + + if prompt_embeds is None: + prompt_embeds_list = [] + for tokenizer, text_encoder in zip(tokenizers, text_encoders): + text_inputs = tokenizer( + prompt, + padding="max_length", + max_length=max_sequence_length, + truncation=True, + return_tensors="pt", + ).to(device) + output = text_encoder( + input_ids=text_inputs["input_ids"], + attention_mask=text_inputs["attention_mask"], + position_ids=text_inputs["position_ids"], + output_hidden_states=True, + ) + + # [max_sequence_length, batch, hidden_size] -> [batch, max_sequence_length, hidden_size] + # clone to have a contiguous tensor + prompt_embeds = output.hidden_states[-2].permute(1, 0, 2).clone() + # [max_sequence_length, batch, hidden_size] -> [batch, hidden_size] + pooled_prompt_embeds = output.hidden_states[-1][-1, :, :].clone() + bs_embed, seq_len, _ = prompt_embeds.shape + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + prompt_embeds_list.append(prompt_embeds) + + prompt_embeds = prompt_embeds_list[0] + + # get unconditional embeddings for classifier free guidance + zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt + + if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: + negative_prompt_embeds = torch.zeros_like(prompt_embeds) + elif do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + negative_prompt_embeds_list = [] + + for tokenizer, text_encoder in zip(tokenizers, text_encoders): + uncond_input = tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_sequence_length, + truncation=True, + return_tensors="pt", + ).to(device) + output = text_encoder( + input_ids=uncond_input["input_ids"], + attention_mask=uncond_input["attention_mask"], + position_ids=uncond_input["position_ids"], + output_hidden_states=True, + ) + + # [max_sequence_length, batch, hidden_size] -> [batch, max_sequence_length, hidden_size] + # clone to have a contiguous tensor + negative_prompt_embeds = output.hidden_states[-2].permute(1, 0, 2).clone() + # [max_sequence_length, batch, hidden_size] -> [batch, hidden_size] + negative_pooled_prompt_embeds = output.hidden_states[-1][-1, :, :].clone() + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=text_encoder.dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view( + batch_size * num_images_per_prompt, seq_len, -1 + ) + + negative_prompt_embeds_list.append(negative_prompt_embeds) + + negative_prompt_embeds = negative_prompt_embeds_list[0] + + bs_embed = pooled_prompt_embeds.shape[0] + pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed * num_images_per_prompt, -1 + ) + + if do_classifier_free_guidance: + negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed * num_images_per_prompt, -1 + ) + + return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image + def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + if output_hidden_states: + image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] + image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_enc_hidden_states = self.image_encoder( + torch.zeros_like(image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( + num_images_per_prompt, dim=0 + ) + return image_enc_hidden_states, uncond_image_enc_hidden_states + else: + image_embeds = self.image_encoder(image).image_embeds + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_embeds = torch.zeros_like(image_embeds) + + return image_embeds, uncond_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds + def prepare_ip_adapter_image_embeds( + self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance + ): + image_embeds = [] + if do_classifier_free_guidance: + negative_image_embeds = [] + if ip_adapter_image_embeds is None: + if not isinstance(ip_adapter_image, list): + ip_adapter_image = [ip_adapter_image] + + if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): + raise ValueError( + f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." + ) + + for single_ip_adapter_image, image_proj_layer in zip( + ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers + ): + output_hidden_state = not isinstance(image_proj_layer, ImageProjection) + single_image_embeds, single_negative_image_embeds = self.encode_image( + single_ip_adapter_image, device, 1, output_hidden_state + ) + + image_embeds.append(single_image_embeds[None, :]) + if do_classifier_free_guidance: + negative_image_embeds.append(single_negative_image_embeds[None, :]) + else: + for single_image_embeds in ip_adapter_image_embeds: + if do_classifier_free_guidance: + single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2) + negative_image_embeds.append(single_negative_image_embeds) + image_embeds.append(single_image_embeds) + + ip_adapter_image_embeds = [] + for i, single_image_embeds in enumerate(image_embeds): + single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) + if do_classifier_free_guidance: + single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) + + single_image_embeds = single_image_embeds.to(device=device) + ip_adapter_image_embeds.append(single_image_embeds) + + return ip_adapter_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + num_inference_steps, + height, + width, + negative_prompt=None, + prompt_embeds=None, + pooled_prompt_embeds=None, + negative_prompt_embeds=None, + negative_pooled_prompt_embeds=None, + ip_adapter_image=None, + ip_adapter_image_embeds=None, + callback_on_step_end_tensor_inputs=None, + max_sequence_length=None, + ): + if not isinstance(num_inference_steps, int) or num_inference_steps <= 0: + raise ValueError( + f"`num_inference_steps` has to be a positive integer but is {num_inference_steps} of type" + f" {type(num_inference_steps)}." + ) + + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if prompt_embeds is not None and pooled_prompt_embeds is None: + raise ValueError( + "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`." + ) + + if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: + raise ValueError( + "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`." + ) + + if ip_adapter_image is not None and ip_adapter_image_embeds is not None: + raise ValueError( + "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined." + ) + + if ip_adapter_image_embeds is not None: + if not isinstance(ip_adapter_image_embeds, list): + raise ValueError( + f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}" + ) + elif ip_adapter_image_embeds[0].ndim not in [3, 4]: + raise ValueError( + f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D" + ) + + if max_sequence_length is not None and max_sequence_length > 256: + raise ValueError(f"`max_sequence_length` cannot be greater than 256 but is {max_sequence_length}") + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = ( + batch_size, + num_channels_latents, + int(height) // self.vae_scale_factor, + int(width) // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline._get_add_time_ids + def _get_add_time_ids( + self, original_size, crops_coords_top_left, target_size, dtype, text_encoder_projection_dim=None + ): + add_time_ids = list(original_size + crops_coords_top_left + target_size) + + passed_add_embed_dim = ( + self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim + ) + expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features + + if expected_add_embed_dim != passed_add_embed_dim: + raise ValueError( + f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`." + ) + + add_time_ids = torch.tensor([add_time_ids], dtype=dtype) + return add_time_ids + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.upcast_vae + def upcast_vae(self): + dtype = self.vae.dtype + self.vae.to(dtype=torch.float32) + use_torch_2_0_or_xformers = isinstance( + self.vae.decoder.mid_block.attentions[0].processor, + ( + AttnProcessor2_0, + XFormersAttnProcessor, + FusedAttnProcessor2_0, + ), + ) + # if xformers or torch_2_0 is used attention block does not need + # to be in float32 which can save lots of memory + if use_torch_2_0_or_xformers: + self.vae.post_quant_conv.to(dtype) + self.vae.decoder.conv_in.to(dtype) + self.vae.decoder.mid_block.to(dtype) + + # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding + def get_guidance_scale_embedding( + self, w: torch.Tensor, embedding_dim: int = 512, dtype: torch.dtype = torch.float32 + ) -> torch.Tensor: + """ + See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 + + Args: + w (`torch.Tensor`): + Generate embedding vectors with a specified guidance scale to subsequently enrich timestep embeddings. + embedding_dim (`int`, *optional*, defaults to 512): + Dimension of the embeddings to generate. + dtype (`torch.dtype`, *optional*, defaults to `torch.float32`): + Data type of the generated embeddings. + + Returns: + `torch.Tensor`: Embedding vectors with shape `(len(w), embedding_dim)`. + """ + assert len(w.shape) == 1 + w = w * 1000.0 + + half_dim = embedding_dim // 2 + emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) + emb = w.to(dtype)[:, None] * emb[None, :] + emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) + if embedding_dim % 2 == 1: # zero pad + emb = torch.nn.functional.pad(emb, (0, 1)) + assert emb.shape == (w.shape[0], embedding_dim) + return emb + + @property + def guidance_scale(self): + return self._guidance_scale + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None + + @property + def cross_attention_kwargs(self): + return self._cross_attention_kwargs + + @property + def denoising_end(self): + return self._denoising_end + + @property + def num_timesteps(self): + return self._num_timesteps + + @property + def interrupt(self): + return self._interrupt + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + timesteps: List[int] = None, + sigmas: List[float] = None, + denoising_end: Optional[float] = None, + guidance_scale: float = 5.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + pooled_prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + original_size: Optional[Tuple[int, int]] = None, + crops_coords_top_left: Tuple[int, int] = (0, 0), + target_size: Optional[Tuple[int, int]] = None, + negative_original_size: Optional[Tuple[int, int]] = None, + negative_crops_coords_top_left: Tuple[int, int] = (0, 0), + negative_target_size: Optional[Tuple[int, int]] = None, + callback_on_step_end: Optional[ + Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks] + ] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + max_sequence_length: int = 256, + ): + r""" + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The height in pixels of the generated image. This is set to 1024 by default for the best results. + Anything below 512 pixels won't work well for + [Kwai-Kolors/Kolors-diffusers](https://huggingface.co/Kwai-Kolors/Kolors-diffusers) and checkpoints + that are not specifically fine-tuned on low resolutions. + width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The width in pixels of the generated image. This is set to 1024 by default for the best results. + Anything below 512 pixels won't work well for + [Kwai-Kolors/Kolors-diffusers](https://huggingface.co/Kwai-Kolors/Kolors-diffusers) and checkpoints + that are not specifically fine-tuned on low resolutions. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument + in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is + passed will be used. Must be in descending order. + sigmas (`List[float]`, *optional*): + Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in + their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed + will be used. + denoising_end (`float`, *optional*): + When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be + completed before it is intentionally prematurely terminated. As a result, the returned sample will + still retain a substantial amount of noise as determined by the discrete timesteps selected by the + scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a + "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image + Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output) + guidance_scale (`float`, *optional*, defaults to 5.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to + [`schedulers.DDIMScheduler`], will be ignored for others. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. + ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*): + Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of + IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should + contain the negative image embedding if `do_classifier_free_guidance` is set to `True`. If not + provided, embeddings are computed from the `ip_adapter_image` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.kolors.KolorsPipelineOutput`] instead of a plain tuple. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled. + `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as + explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position + `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting + `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + For most cases, `target_size` should be set to the desired height and width of the generated image. If + not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in + section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + To negatively condition the generation process based on a specific image resolution. Part of SDXL's + micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's + micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + To negatively condition the generation process based on a target image resolution. It should be as same + as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*): + A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of + each denoising step during the inference. with the following arguments: `callback_on_step_end(self: + DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a + list of all tensors as specified by `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + max_sequence_length (`int` defaults to 256): Maximum sequence length to use with the `prompt`. + + Examples: + + Returns: + [`~pipelines.kolors.KolorsPipelineOutput`] or `tuple`: [`~pipelines.kolors.KolorsPipelineOutput`] if + `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is a list with the + generated images. + """ + + if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): + callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs + + # 0. Default height and width to unet + height = height or self.default_sample_size * self.vae_scale_factor + width = width or self.default_sample_size * self.vae_scale_factor + + original_size = original_size or (height, width) + target_size = target_size or (height, width) + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + num_inference_steps, + height, + width, + negative_prompt, + prompt_embeds, + pooled_prompt_embeds, + negative_prompt_embeds, + negative_pooled_prompt_embeds, + ip_adapter_image, + ip_adapter_image_embeds, + callback_on_step_end_tensor_inputs, + max_sequence_length=max_sequence_length, + ) + + self._guidance_scale = guidance_scale + self._cross_attention_kwargs = cross_attention_kwargs + self._denoising_end = denoising_end + self._interrupt = False + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + # 3. Encode input prompt + ( + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + ) = self.encode_prompt( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=self.do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, + ) + + # 4. Prepare timesteps + timesteps, num_inference_steps = retrieve_timesteps( + self.scheduler, num_inference_steps, device, timesteps, sigmas + ) + + # 5. Prepare latent variables + num_channels_latents = self.unet.config.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7. Prepare added time ids & embeddings + add_text_embeds = pooled_prompt_embeds + text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) + + add_time_ids = self._get_add_time_ids( + original_size, + crops_coords_top_left, + target_size, + dtype=prompt_embeds.dtype, + text_encoder_projection_dim=text_encoder_projection_dim, + ) + if negative_original_size is not None and negative_target_size is not None: + negative_add_time_ids = self._get_add_time_ids( + negative_original_size, + negative_crops_coords_top_left, + negative_target_size, + dtype=prompt_embeds.dtype, + text_encoder_projection_dim=text_encoder_projection_dim, + ) + else: + negative_add_time_ids = add_time_ids + + if self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) + add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) + add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0) + + prompt_embeds = prompt_embeds.to(device) + add_text_embeds = add_text_embeds.to(device) + add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1) + + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + image_embeds = self.prepare_ip_adapter_image_embeds( + ip_adapter_image, + ip_adapter_image_embeds, + device, + batch_size * num_images_per_prompt, + self.do_classifier_free_guidance, + ) + + # 8. Denoising loop + num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) + + # 8.1 Apply denoising_end + if ( + self.denoising_end is not None + and isinstance(self.denoising_end, float) + and self.denoising_end > 0 + and self.denoising_end < 1 + ): + discrete_timestep_cutoff = int( + round( + self.scheduler.config.num_train_timesteps + - (self.denoising_end * self.scheduler.config.num_train_timesteps) + ) + ) + num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps))) + timesteps = timesteps[:num_inference_steps] + + # 9. Optionally get Guidance Scale Embedding + timestep_cond = None + if self.unet.config.time_cond_proj_dim is not None: + guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) + timestep_cond = self.get_guidance_scale_embedding( + guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim + ).to(device=device, dtype=latents.dtype) + + self._num_timesteps = len(timesteps) + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + if self.interrupt: + continue + + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents + + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} + + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + added_cond_kwargs["image_embeds"] = image_embeds + + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + timestep_cond=timestep_cond, + cross_attention_kwargs=self.cross_attention_kwargs, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents_dtype = latents.dtype + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] + if latents.dtype != latents_dtype: + if torch.backends.mps.is_available(): + # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272 + latents = latents.to(latents_dtype) + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + add_text_embeds = callback_outputs.pop("add_text_embeds", add_text_embeds) + negative_pooled_prompt_embeds = callback_outputs.pop( + "negative_pooled_prompt_embeds", negative_pooled_prompt_embeds + ) + add_time_ids = callback_outputs.pop("add_time_ids", add_time_ids) + negative_add_time_ids = callback_outputs.pop("negative_add_time_ids", negative_add_time_ids) + + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + + if XLA_AVAILABLE: + xm.mark_step() + + if not output_type == "latent": + # make sure the VAE is in float32 mode, as it overflows in float16 + needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast + + if needs_upcasting: + self.upcast_vae() + latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) + elif latents.dtype != self.vae.dtype: + if torch.backends.mps.is_available(): + # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272 + self.vae = self.vae.to(latents.dtype) + + # unscale/denormalize the latents + latents = latents / self.vae.config.scaling_factor + + image = self.vae.decode(latents, return_dict=False)[0] + + # cast back to fp16 if needed + if needs_upcasting: + self.vae.to(dtype=torch.float16) + else: + image = latents + + if not output_type == "latent": + image = self.image_processor.postprocess(image, output_type=output_type) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return KolorsPipelineOutput(images=image) diff --git a/diffusers3/pipelines/kolors/pipeline_kolors_img2img.py b/diffusers3/pipelines/kolors/pipeline_kolors_img2img.py new file mode 100644 index 0000000000000000000000000000000000000000..4985a80f88df49b76b7614bdb4bb05585b1942c1 --- /dev/null +++ b/diffusers3/pipelines/kolors/pipeline_kolors_img2img.py @@ -0,0 +1,1250 @@ +# Copyright 2024 Stability AI, Kwai-Kolors Team and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import inspect +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import PIL.Image +import torch +from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection + +from ...callbacks import MultiPipelineCallbacks, PipelineCallback +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import IPAdapterMixin, StableDiffusionXLLoraLoaderMixin +from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel +from ...models.attention_processor import AttnProcessor2_0, FusedAttnProcessor2_0, XFormersAttnProcessor +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import is_torch_xla_available, logging, replace_example_docstring +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from .pipeline_output import KolorsPipelineOutput +from .text_encoder import ChatGLMModel +from .tokenizer import ChatGLMTokenizer + + +if is_torch_xla_available(): + import torch_xla.core.xla_model as xm + + XLA_AVAILABLE = True +else: + XLA_AVAILABLE = False + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import KolorsImg2ImgPipeline + >>> from diffusers.utils import load_image + + >>> pipe = KolorsImg2ImgPipeline.from_pretrained( + ... "Kwai-Kolors/Kolors-diffusers", variant="fp16", torch_dtype=torch.float16 + ... ) + >>> pipe = pipe.to("cuda") + >>> url = ( + ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/kolors/bunny_source.png" + ... ) + + + >>> init_image = load_image(url) + >>> prompt = "high quality image of a capybara wearing sunglasses. In the background of the image there are trees, poles, grass and other objects. At the bottom of the object there is the road., 8k, highly detailed." + >>> image = pipe(prompt, image=init_image).images[0] + ``` +""" + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents +def retrieve_latents( + encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" +): + if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": + return encoder_output.latent_dist.sample(generator) + elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": + return encoder_output.latent_dist.mode() + elif hasattr(encoder_output, "latents"): + return encoder_output.latents + else: + raise AttributeError("Could not access latents of provided encoder_output") + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + sigmas: Optional[List[float]] = None, + **kwargs, +): + """ + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles + custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` + must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, + `num_inference_steps` and `sigmas` must be `None`. + sigmas (`List[float]`, *optional*): + Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, + `num_inference_steps` and `timesteps` must be `None`. + + Returns: + `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the + second element is the number of inference steps. + """ + if timesteps is not None and sigmas is not None: + raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") + if timesteps is not None: + accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + elif sigmas is not None: + accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accept_sigmas: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" sigmas schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + + +class KolorsImg2ImgPipeline(DiffusionPipeline, StableDiffusionMixin, StableDiffusionXLLoraLoaderMixin, IPAdapterMixin): + r""" + Pipeline for text-to-image generation using Kolors. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + The pipeline also inherits the following loading methods: + - [`~loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`] for saving LoRA weights + - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`ChatGLMModel`]): + Frozen text-encoder. Kolors uses [ChatGLM3-6B](https://huggingface.co/THUDM/chatglm3-6b). + tokenizer (`ChatGLMTokenizer`): + Tokenizer of class + [ChatGLMTokenizer](https://huggingface.co/THUDM/chatglm3-6b/blob/main/tokenization_chatglm.py). + unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"False"`): + Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of + `Kwai-Kolors/Kolors-diffusers`. + """ + + model_cpu_offload_seq = "text_encoder->image_encoder-unet->vae" + _optional_components = [ + "image_encoder", + "feature_extractor", + ] + _callback_tensor_inputs = [ + "latents", + "prompt_embeds", + "negative_prompt_embeds", + "add_text_embeds", + "add_time_ids", + "negative_pooled_prompt_embeds", + "negative_add_time_ids", + ] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: ChatGLMModel, + tokenizer: ChatGLMTokenizer, + unet: UNet2DConditionModel, + scheduler: KarrasDiffusionSchedulers, + image_encoder: CLIPVisionModelWithProjection = None, + feature_extractor: CLIPImageProcessor = None, + force_zeros_for_empty_prompt: bool = False, + ): + super().__init__() + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + image_encoder=image_encoder, + feature_extractor=feature_extractor, + ) + self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) + self.vae_scale_factor = ( + 2 ** (len(self.vae.config.block_out_channels) - 1) if hasattr(self, "vae") and self.vae is not None else 8 + ) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + + self.default_sample_size = self.unet.config.sample_size + + # Copied from diffusers.pipelines.kolors.pipeline_kolors.KolorsPipeline.encode_prompt + def encode_prompt( + self, + prompt, + device: Optional[torch.device] = None, + num_images_per_prompt: int = 1, + do_classifier_free_guidance: bool = True, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + pooled_prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, + max_sequence_length: int = 256, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + max_sequence_length (`int` defaults to 256): Maximum sequence length to use with the `prompt`. + """ + # from IPython import embed; embed(); exit() + device = device or self._execution_device + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + # Define tokenizers and text encoders + tokenizers = [self.tokenizer] + text_encoders = [self.text_encoder] + + if prompt_embeds is None: + prompt_embeds_list = [] + for tokenizer, text_encoder in zip(tokenizers, text_encoders): + text_inputs = tokenizer( + prompt, + padding="max_length", + max_length=max_sequence_length, + truncation=True, + return_tensors="pt", + ).to(device) + output = text_encoder( + input_ids=text_inputs["input_ids"], + attention_mask=text_inputs["attention_mask"], + position_ids=text_inputs["position_ids"], + output_hidden_states=True, + ) + + # [max_sequence_length, batch, hidden_size] -> [batch, max_sequence_length, hidden_size] + # clone to have a contiguous tensor + prompt_embeds = output.hidden_states[-2].permute(1, 0, 2).clone() + # [max_sequence_length, batch, hidden_size] -> [batch, hidden_size] + pooled_prompt_embeds = output.hidden_states[-1][-1, :, :].clone() + bs_embed, seq_len, _ = prompt_embeds.shape + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + prompt_embeds_list.append(prompt_embeds) + + prompt_embeds = prompt_embeds_list[0] + + # get unconditional embeddings for classifier free guidance + zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt + + if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: + negative_prompt_embeds = torch.zeros_like(prompt_embeds) + elif do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + negative_prompt_embeds_list = [] + + for tokenizer, text_encoder in zip(tokenizers, text_encoders): + uncond_input = tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_sequence_length, + truncation=True, + return_tensors="pt", + ).to(device) + output = text_encoder( + input_ids=uncond_input["input_ids"], + attention_mask=uncond_input["attention_mask"], + position_ids=uncond_input["position_ids"], + output_hidden_states=True, + ) + + # [max_sequence_length, batch, hidden_size] -> [batch, max_sequence_length, hidden_size] + # clone to have a contiguous tensor + negative_prompt_embeds = output.hidden_states[-2].permute(1, 0, 2).clone() + # [max_sequence_length, batch, hidden_size] -> [batch, hidden_size] + negative_pooled_prompt_embeds = output.hidden_states[-1][-1, :, :].clone() + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=text_encoder.dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view( + batch_size * num_images_per_prompt, seq_len, -1 + ) + + negative_prompt_embeds_list.append(negative_prompt_embeds) + + negative_prompt_embeds = negative_prompt_embeds_list[0] + + bs_embed = pooled_prompt_embeds.shape[0] + pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed * num_images_per_prompt, -1 + ) + + if do_classifier_free_guidance: + negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed * num_images_per_prompt, -1 + ) + + return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image + def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + if output_hidden_states: + image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] + image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_enc_hidden_states = self.image_encoder( + torch.zeros_like(image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( + num_images_per_prompt, dim=0 + ) + return image_enc_hidden_states, uncond_image_enc_hidden_states + else: + image_embeds = self.image_encoder(image).image_embeds + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_embeds = torch.zeros_like(image_embeds) + + return image_embeds, uncond_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds + def prepare_ip_adapter_image_embeds( + self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance + ): + image_embeds = [] + if do_classifier_free_guidance: + negative_image_embeds = [] + if ip_adapter_image_embeds is None: + if not isinstance(ip_adapter_image, list): + ip_adapter_image = [ip_adapter_image] + + if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): + raise ValueError( + f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." + ) + + for single_ip_adapter_image, image_proj_layer in zip( + ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers + ): + output_hidden_state = not isinstance(image_proj_layer, ImageProjection) + single_image_embeds, single_negative_image_embeds = self.encode_image( + single_ip_adapter_image, device, 1, output_hidden_state + ) + + image_embeds.append(single_image_embeds[None, :]) + if do_classifier_free_guidance: + negative_image_embeds.append(single_negative_image_embeds[None, :]) + else: + for single_image_embeds in ip_adapter_image_embeds: + if do_classifier_free_guidance: + single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2) + negative_image_embeds.append(single_negative_image_embeds) + image_embeds.append(single_image_embeds) + + ip_adapter_image_embeds = [] + for i, single_image_embeds in enumerate(image_embeds): + single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) + if do_classifier_free_guidance: + single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) + + single_image_embeds = single_image_embeds.to(device=device) + ip_adapter_image_embeds.append(single_image_embeds) + + return ip_adapter_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + strength, + num_inference_steps, + height, + width, + negative_prompt=None, + prompt_embeds=None, + pooled_prompt_embeds=None, + negative_prompt_embeds=None, + negative_pooled_prompt_embeds=None, + ip_adapter_image=None, + ip_adapter_image_embeds=None, + callback_on_step_end_tensor_inputs=None, + max_sequence_length=None, + ): + if strength < 0 or strength > 1: + raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}") + + if not isinstance(num_inference_steps, int) or num_inference_steps <= 0: + raise ValueError( + f"`num_inference_steps` has to be a positive integer but is {num_inference_steps} of type" + f" {type(num_inference_steps)}." + ) + + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if prompt_embeds is not None and pooled_prompt_embeds is None: + raise ValueError( + "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`." + ) + + if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: + raise ValueError( + "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`." + ) + + if ip_adapter_image is not None and ip_adapter_image_embeds is not None: + raise ValueError( + "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined." + ) + + if ip_adapter_image_embeds is not None: + if not isinstance(ip_adapter_image_embeds, list): + raise ValueError( + f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}" + ) + elif ip_adapter_image_embeds[0].ndim not in [3, 4]: + raise ValueError( + f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D" + ) + + if max_sequence_length is not None and max_sequence_length > 256: + raise ValueError(f"`max_sequence_length` cannot be greater than 256 but is {max_sequence_length}") + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_img2img.StableDiffusionXLImg2ImgPipeline.get_timesteps + def get_timesteps(self, num_inference_steps, strength, device, denoising_start=None): + # get the original timestep using init_timestep + if denoising_start is None: + init_timestep = min(int(num_inference_steps * strength), num_inference_steps) + t_start = max(num_inference_steps - init_timestep, 0) + + timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] + if hasattr(self.scheduler, "set_begin_index"): + self.scheduler.set_begin_index(t_start * self.scheduler.order) + + return timesteps, num_inference_steps - t_start + + else: + # Strength is irrelevant if we directly request a timestep to start at; + # that is, strength is determined by the denoising_start instead. + discrete_timestep_cutoff = int( + round( + self.scheduler.config.num_train_timesteps + - (denoising_start * self.scheduler.config.num_train_timesteps) + ) + ) + + num_inference_steps = (self.scheduler.timesteps < discrete_timestep_cutoff).sum().item() + if self.scheduler.order == 2 and num_inference_steps % 2 == 0: + # if the scheduler is a 2nd order scheduler we might have to do +1 + # because `num_inference_steps` might be even given that every timestep + # (except the highest one) is duplicated. If `num_inference_steps` is even it would + # mean that we cut the timesteps in the middle of the denoising step + # (between 1st and 2nd derivative) which leads to incorrect results. By adding 1 + # we ensure that the denoising process always ends after the 2nd derivate step of the scheduler + num_inference_steps = num_inference_steps + 1 + + # because t_n+1 >= t_n, we slice the timesteps starting from the end + t_start = len(self.scheduler.timesteps) - num_inference_steps + timesteps = self.scheduler.timesteps[t_start:] + if hasattr(self.scheduler, "set_begin_index"): + self.scheduler.set_begin_index(t_start) + return timesteps, num_inference_steps + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_img2img.StableDiffusionXLImg2ImgPipeline.prepare_latents + def prepare_latents( + self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None, add_noise=True + ): + if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): + raise ValueError( + f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" + ) + + latents_mean = latents_std = None + if hasattr(self.vae.config, "latents_mean") and self.vae.config.latents_mean is not None: + latents_mean = torch.tensor(self.vae.config.latents_mean).view(1, 4, 1, 1) + if hasattr(self.vae.config, "latents_std") and self.vae.config.latents_std is not None: + latents_std = torch.tensor(self.vae.config.latents_std).view(1, 4, 1, 1) + + # Offload text encoder if `enable_model_cpu_offload` was enabled + if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: + self.text_encoder_2.to("cpu") + torch.cuda.empty_cache() + + image = image.to(device=device, dtype=dtype) + + batch_size = batch_size * num_images_per_prompt + + if image.shape[1] == 4: + init_latents = image + + else: + # make sure the VAE is in float32 mode, as it overflows in float16 + if self.vae.config.force_upcast: + image = image.float() + self.vae.to(dtype=torch.float32) + + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + elif isinstance(generator, list): + if image.shape[0] < batch_size and batch_size % image.shape[0] == 0: + image = torch.cat([image] * (batch_size // image.shape[0]), dim=0) + elif image.shape[0] < batch_size and batch_size % image.shape[0] != 0: + raise ValueError( + f"Cannot duplicate `image` of batch size {image.shape[0]} to effective batch_size {batch_size} " + ) + + init_latents = [ + retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i]) + for i in range(batch_size) + ] + init_latents = torch.cat(init_latents, dim=0) + else: + init_latents = retrieve_latents(self.vae.encode(image), generator=generator) + + if self.vae.config.force_upcast: + self.vae.to(dtype) + + init_latents = init_latents.to(dtype) + if latents_mean is not None and latents_std is not None: + latents_mean = latents_mean.to(device=device, dtype=dtype) + latents_std = latents_std.to(device=device, dtype=dtype) + init_latents = (init_latents - latents_mean) * self.vae.config.scaling_factor / latents_std + else: + init_latents = self.vae.config.scaling_factor * init_latents + + if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0: + # expand init_latents for batch_size + additional_image_per_prompt = batch_size // init_latents.shape[0] + init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0) + elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0: + raise ValueError( + f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts." + ) + else: + init_latents = torch.cat([init_latents], dim=0) + + if add_noise: + shape = init_latents.shape + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + # get latents + init_latents = self.scheduler.add_noise(init_latents, noise, timestep) + + latents = init_latents + + return latents + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline._get_add_time_ids + def _get_add_time_ids( + self, original_size, crops_coords_top_left, target_size, dtype, text_encoder_projection_dim=None + ): + add_time_ids = list(original_size + crops_coords_top_left + target_size) + + passed_add_embed_dim = ( + self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim + ) + expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features + + if expected_add_embed_dim != passed_add_embed_dim: + raise ValueError( + f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`." + ) + + add_time_ids = torch.tensor([add_time_ids], dtype=dtype) + return add_time_ids + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.upcast_vae + def upcast_vae(self): + dtype = self.vae.dtype + self.vae.to(dtype=torch.float32) + use_torch_2_0_or_xformers = isinstance( + self.vae.decoder.mid_block.attentions[0].processor, + ( + AttnProcessor2_0, + XFormersAttnProcessor, + FusedAttnProcessor2_0, + ), + ) + # if xformers or torch_2_0 is used attention block does not need + # to be in float32 which can save lots of memory + if use_torch_2_0_or_xformers: + self.vae.post_quant_conv.to(dtype) + self.vae.decoder.conv_in.to(dtype) + self.vae.decoder.mid_block.to(dtype) + + # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding + def get_guidance_scale_embedding( + self, w: torch.Tensor, embedding_dim: int = 512, dtype: torch.dtype = torch.float32 + ) -> torch.Tensor: + """ + See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 + + Args: + w (`torch.Tensor`): + Generate embedding vectors with a specified guidance scale to subsequently enrich timestep embeddings. + embedding_dim (`int`, *optional*, defaults to 512): + Dimension of the embeddings to generate. + dtype (`torch.dtype`, *optional*, defaults to `torch.float32`): + Data type of the generated embeddings. + + Returns: + `torch.Tensor`: Embedding vectors with shape `(len(w), embedding_dim)`. + """ + assert len(w.shape) == 1 + w = w * 1000.0 + + half_dim = embedding_dim // 2 + emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) + emb = w.to(dtype)[:, None] * emb[None, :] + emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) + if embedding_dim % 2 == 1: # zero pad + emb = torch.nn.functional.pad(emb, (0, 1)) + assert emb.shape == (w.shape[0], embedding_dim) + return emb + + @property + def guidance_scale(self): + return self._guidance_scale + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None + + @property + def cross_attention_kwargs(self): + return self._cross_attention_kwargs + + @property + def denoising_start(self): + return self._denoising_start + + @property + def denoising_end(self): + return self._denoising_end + + @property + def num_timesteps(self): + return self._num_timesteps + + @property + def interrupt(self): + return self._interrupt + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + image: PipelineImageInput = None, + strength: float = 0.3, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + timesteps: List[int] = None, + sigmas: List[float] = None, + denoising_start: Optional[float] = None, + denoising_end: Optional[float] = None, + guidance_scale: float = 5.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + pooled_prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + original_size: Optional[Tuple[int, int]] = None, + crops_coords_top_left: Tuple[int, int] = (0, 0), + target_size: Optional[Tuple[int, int]] = None, + negative_original_size: Optional[Tuple[int, int]] = None, + negative_crops_coords_top_left: Tuple[int, int] = (0, 0), + negative_target_size: Optional[Tuple[int, int]] = None, + callback_on_step_end: Optional[ + Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks] + ] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + max_sequence_length: int = 256, + ): + r""" + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + image (`torch.Tensor` or `PIL.Image.Image` or `np.ndarray` or `List[torch.Tensor]` or `List[PIL.Image.Image]` or `List[np.ndarray]`): + The image(s) to modify with the pipeline. + strength (`float`, *optional*, defaults to 0.3): + Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image` + will be used as a starting point, adding more noise to it the larger the `strength`. The number of + denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will + be maximum and the denoising process will run for the full number of iterations specified in + `num_inference_steps`. A value of 1, therefore, essentially ignores `image`. Note that in the case of + `denoising_start` being declared as an integer, the value of `strength` will be ignored. + height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The height in pixels of the generated image. This is set to 1024 by default for the best results. + Anything below 512 pixels won't work well for + [Kwai-Kolors/Kolors-diffusers](https://huggingface.co/Kwai-Kolors/Kolors-diffusers) and checkpoints + that are not specifically fine-tuned on low resolutions. + width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The width in pixels of the generated image. This is set to 1024 by default for the best results. + Anything below 512 pixels won't work well for + [Kwai-Kolors/Kolors-diffusers](https://huggingface.co/Kwai-Kolors/Kolors-diffusers) and checkpoints + that are not specifically fine-tuned on low resolutions. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument + in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is + passed will be used. Must be in descending order. + sigmas (`List[float]`, *optional*): + Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in + their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed + will be used. + denoising_start (`float`, *optional*): + When specified, indicates the fraction (between 0.0 and 1.0) of the total denoising process to be + bypassed before it is initiated. Consequently, the initial part of the denoising process is skipped and + it is assumed that the passed `image` is a partly denoised image. Note that when this is specified, + strength will be ignored. The `denoising_start` parameter is particularly beneficial when this pipeline + is integrated into a "Mixture of Denoisers" multi-pipeline setup, as detailed in [**Refine Image + Quality**](https://huggingface.co/docs/diffusers/using-diffusers/sdxl#refine-image-quality). + denoising_end (`float`, *optional*): + When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be + completed before it is intentionally prematurely terminated. As a result, the returned sample will + still retain a substantial amount of noise as determined by the discrete timesteps selected by the + scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a + "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image + Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output) + guidance_scale (`float`, *optional*, defaults to 5.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to + [`schedulers.DDIMScheduler`], will be ignored for others. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. + ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*): + Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of + IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should + contain the negative image embedding if `do_classifier_free_guidance` is set to `True`. If not + provided, embeddings are computed from the `ip_adapter_image` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.kolors.KolorsPipelineOutput`] instead of a plain tuple. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled. + `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as + explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position + `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting + `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + For most cases, `target_size` should be set to the desired height and width of the generated image. If + not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in + section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + To negatively condition the generation process based on a specific image resolution. Part of SDXL's + micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's + micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + To negatively condition the generation process based on a target image resolution. It should be as same + as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*): + A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of + each denoising step during the inference. with the following arguments: `callback_on_step_end(self: + DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a + list of all tensors as specified by `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + max_sequence_length (`int` defaults to 256): Maximum sequence length to use with the `prompt`. + + Examples: + + Returns: + [`~pipelines.kolors.KolorsPipelineOutput`] or `tuple`: [`~pipelines.kolors.KolorsPipelineOutput`] if + `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is a list with the + generated images. + """ + + if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): + callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs + + # 0. Default height and width to unet + height = height or self.default_sample_size * self.vae_scale_factor + width = width or self.default_sample_size * self.vae_scale_factor + + original_size = original_size or (height, width) + target_size = target_size or (height, width) + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + strength, + num_inference_steps, + height, + width, + negative_prompt, + prompt_embeds, + pooled_prompt_embeds, + negative_prompt_embeds, + negative_pooled_prompt_embeds, + ip_adapter_image, + ip_adapter_image_embeds, + callback_on_step_end_tensor_inputs, + max_sequence_length=max_sequence_length, + ) + + self._guidance_scale = guidance_scale + self._cross_attention_kwargs = cross_attention_kwargs + self._denoising_end = denoising_end + self._denoising_start = denoising_start + self._interrupt = False + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + # 3. Encode input prompt + ( + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + ) = self.encode_prompt( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=self.do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + ) + + # 4. Preprocess image + image = self.image_processor.preprocess(image) + + # 5. Prepare timesteps + def denoising_value_valid(dnv): + return isinstance(dnv, float) and 0 < dnv < 1 + + timesteps, num_inference_steps = retrieve_timesteps( + self.scheduler, num_inference_steps, device, timesteps, sigmas + ) + + timesteps, num_inference_steps = self.get_timesteps( + num_inference_steps, + strength, + device, + denoising_start=self.denoising_start if denoising_value_valid(self.denoising_start) else None, + ) + latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) + + add_noise = True if self.denoising_start is None else False + + # 6. Prepare latent variables + if latents is None: + latents = self.prepare_latents( + image, + latent_timestep, + batch_size, + num_images_per_prompt, + prompt_embeds.dtype, + device, + generator, + add_noise, + ) + + # 7. Prepare extra step kwargs. + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + height, width = latents.shape[-2:] + height = height * self.vae_scale_factor + width = width * self.vae_scale_factor + + original_size = original_size or (height, width) + target_size = target_size or (height, width) + + # 8. Prepare added time ids & embeddings + add_text_embeds = pooled_prompt_embeds + text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) + + add_time_ids = self._get_add_time_ids( + original_size, + crops_coords_top_left, + target_size, + dtype=prompt_embeds.dtype, + text_encoder_projection_dim=text_encoder_projection_dim, + ) + if negative_original_size is not None and negative_target_size is not None: + negative_add_time_ids = self._get_add_time_ids( + negative_original_size, + negative_crops_coords_top_left, + negative_target_size, + dtype=prompt_embeds.dtype, + text_encoder_projection_dim=text_encoder_projection_dim, + ) + else: + negative_add_time_ids = add_time_ids + + if self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) + add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) + add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0) + + prompt_embeds = prompt_embeds.to(device) + add_text_embeds = add_text_embeds.to(device) + add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1) + + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + image_embeds = self.prepare_ip_adapter_image_embeds( + ip_adapter_image, + ip_adapter_image_embeds, + device, + batch_size * num_images_per_prompt, + self.do_classifier_free_guidance, + ) + + # 9. Denoising loop + num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) + + # 9.1 Apply denoising_end + if ( + self.denoising_end is not None + and self.denoising_start is not None + and denoising_value_valid(self.denoising_end) + and denoising_value_valid(self.denoising_start) + and self.denoising_start >= self.denoising_end + ): + raise ValueError( + f"`denoising_start`: {self.denoising_start} cannot be larger than or equal to `denoising_end`: " + + f" {self.denoising_end} when using type float." + ) + elif self.denoising_end is not None and denoising_value_valid(self.denoising_end): + discrete_timestep_cutoff = int( + round( + self.scheduler.config.num_train_timesteps + - (self.denoising_end * self.scheduler.config.num_train_timesteps) + ) + ) + num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps))) + timesteps = timesteps[:num_inference_steps] + + # 9.2 Optionally get Guidance Scale Embedding + timestep_cond = None + if self.unet.config.time_cond_proj_dim is not None: + guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) + timestep_cond = self.get_guidance_scale_embedding( + guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim + ).to(device=device, dtype=latents.dtype) + + self._num_timesteps = len(timesteps) + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + if self.interrupt: + continue + + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents + + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} + + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + added_cond_kwargs["image_embeds"] = image_embeds + + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + timestep_cond=timestep_cond, + cross_attention_kwargs=self.cross_attention_kwargs, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents_dtype = latents.dtype + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] + if latents.dtype != latents_dtype: + if torch.backends.mps.is_available(): + # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272 + latents = latents.to(latents_dtype) + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + add_text_embeds = callback_outputs.pop("add_text_embeds", add_text_embeds) + negative_pooled_prompt_embeds = callback_outputs.pop( + "negative_pooled_prompt_embeds", negative_pooled_prompt_embeds + ) + add_time_ids = callback_outputs.pop("add_time_ids", add_time_ids) + negative_add_time_ids = callback_outputs.pop("negative_add_time_ids", negative_add_time_ids) + + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + + if XLA_AVAILABLE: + xm.mark_step() + + if not output_type == "latent": + # make sure the VAE is in float32 mode, as it overflows in float16 + needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast + + if needs_upcasting: + self.upcast_vae() + latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) + elif latents.dtype != self.vae.dtype: + if torch.backends.mps.is_available(): + # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272 + self.vae = self.vae.to(latents.dtype) + + # unscale/denormalize the latents + latents = latents / self.vae.config.scaling_factor + + image = self.vae.decode(latents, return_dict=False)[0] + + # cast back to fp16 if needed + if needs_upcasting: + self.vae.to(dtype=torch.float16) + else: + image = latents + + if not output_type == "latent": + image = self.image_processor.postprocess(image, output_type=output_type) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return KolorsPipelineOutput(images=image) diff --git a/diffusers3/pipelines/kolors/pipeline_output.py b/diffusers3/pipelines/kolors/pipeline_output.py new file mode 100644 index 0000000000000000000000000000000000000000..310ee7e8a89b253c55dfb47e42964310c93eb56a --- /dev/null +++ b/diffusers3/pipelines/kolors/pipeline_output.py @@ -0,0 +1,21 @@ +from dataclasses import dataclass +from typing import List, Union + +import numpy as np +import PIL.Image + +from ...utils import BaseOutput + + +@dataclass +class KolorsPipelineOutput(BaseOutput): + """ + Output class for Kolors pipelines. + + Args: + images (`List[PIL.Image.Image]` or `np.ndarray`) + List of denoised PIL images of length `batch_size` or numpy array of shape `(batch_size, height, width, + num_channels)`. PIL images or numpy array present the denoised images of the diffusion pipeline. + """ + + images: Union[List[PIL.Image.Image], np.ndarray] diff --git a/diffusers3/pipelines/kolors/text_encoder.py b/diffusers3/pipelines/kolors/text_encoder.py new file mode 100644 index 0000000000000000000000000000000000000000..6fb6f18a907aa1efb68b2ca6f82ca477534c3a31 --- /dev/null +++ b/diffusers3/pipelines/kolors/text_encoder.py @@ -0,0 +1,889 @@ +# Copyright 2024 ChatGLM3-6B Model Team, Kwai-Kolors Team and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +from typing import List, Optional, Tuple + +import torch +import torch.nn.functional as F +from torch import nn +from torch.nn import LayerNorm +from torch.nn.utils import skip_init +from transformers import PretrainedConfig, PreTrainedModel +from transformers.modeling_outputs import BaseModelOutputWithPast + +from ...utils import logging + + +logger = logging.get_logger(__name__) + + +class ChatGLMConfig(PretrainedConfig): + model_type = "chatglm" + + def __init__( + self, + num_layers=28, + padded_vocab_size=65024, + hidden_size=4096, + ffn_hidden_size=13696, + kv_channels=128, + num_attention_heads=32, + seq_length=2048, + hidden_dropout=0.0, + classifier_dropout=None, + attention_dropout=0.0, + layernorm_epsilon=1e-5, + rmsnorm=True, + apply_residual_connection_post_layernorm=False, + post_layer_norm=True, + add_bias_linear=False, + add_qkv_bias=False, + bias_dropout_fusion=True, + multi_query_attention=False, + multi_query_group_num=1, + apply_query_key_layer_scaling=True, + attention_softmax_in_fp32=True, + fp32_residual_connection=False, + quantization_bit=0, + pre_seq_len=None, + prefix_projection=False, + **kwargs, + ): + self.num_layers = num_layers + self.vocab_size = padded_vocab_size + self.padded_vocab_size = padded_vocab_size + self.hidden_size = hidden_size + self.ffn_hidden_size = ffn_hidden_size + self.kv_channels = kv_channels + self.num_attention_heads = num_attention_heads + self.seq_length = seq_length + self.hidden_dropout = hidden_dropout + self.classifier_dropout = classifier_dropout + self.attention_dropout = attention_dropout + self.layernorm_epsilon = layernorm_epsilon + self.rmsnorm = rmsnorm + self.apply_residual_connection_post_layernorm = apply_residual_connection_post_layernorm + self.post_layer_norm = post_layer_norm + self.add_bias_linear = add_bias_linear + self.add_qkv_bias = add_qkv_bias + self.bias_dropout_fusion = bias_dropout_fusion + self.multi_query_attention = multi_query_attention + self.multi_query_group_num = multi_query_group_num + self.apply_query_key_layer_scaling = apply_query_key_layer_scaling + self.attention_softmax_in_fp32 = attention_softmax_in_fp32 + self.fp32_residual_connection = fp32_residual_connection + self.quantization_bit = quantization_bit + self.pre_seq_len = pre_seq_len + self.prefix_projection = prefix_projection + super().__init__(**kwargs) + + +class RMSNorm(torch.nn.Module): + def __init__(self, normalized_shape, eps=1e-5, device=None, dtype=None, **kwargs): + super().__init__() + self.weight = torch.nn.Parameter(torch.empty(normalized_shape, device=device, dtype=dtype)) + self.eps = eps + + def forward(self, hidden_states: torch.Tensor): + input_dtype = hidden_states.dtype + variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) + hidden_states = hidden_states * torch.rsqrt(variance + self.eps) + + return (self.weight * hidden_states).to(input_dtype) + + +def _config_to_kwargs(args): + common_kwargs = { + "dtype": args.torch_dtype, + } + return common_kwargs + + +class CoreAttention(torch.nn.Module): + def __init__(self, config: ChatGLMConfig, layer_number): + super(CoreAttention, self).__init__() + + self.apply_query_key_layer_scaling = config.apply_query_key_layer_scaling + self.attention_softmax_in_fp32 = config.attention_softmax_in_fp32 + if self.apply_query_key_layer_scaling: + self.attention_softmax_in_fp32 = True + self.layer_number = max(1, layer_number) + + projection_size = config.kv_channels * config.num_attention_heads + + # Per attention head and per partition values. + self.hidden_size_per_partition = projection_size + self.hidden_size_per_attention_head = projection_size // config.num_attention_heads + self.num_attention_heads_per_partition = config.num_attention_heads + + coeff = None + self.norm_factor = math.sqrt(self.hidden_size_per_attention_head) + if self.apply_query_key_layer_scaling: + coeff = self.layer_number + self.norm_factor *= coeff + self.coeff = coeff + + self.attention_dropout = torch.nn.Dropout(config.attention_dropout) + + def forward(self, query_layer, key_layer, value_layer, attention_mask): + pytorch_major_version = int(torch.__version__.split(".")[0]) + if pytorch_major_version >= 2: + query_layer, key_layer, value_layer = [ + k.permute(1, 2, 0, 3) for k in [query_layer, key_layer, value_layer] + ] + if attention_mask is None and query_layer.shape[2] == key_layer.shape[2]: + context_layer = torch.nn.functional.scaled_dot_product_attention( + query_layer, key_layer, value_layer, is_causal=True + ) + else: + if attention_mask is not None: + attention_mask = ~attention_mask + context_layer = torch.nn.functional.scaled_dot_product_attention( + query_layer, key_layer, value_layer, attention_mask + ) + context_layer = context_layer.permute(2, 0, 1, 3) + new_context_layer_shape = context_layer.size()[:-2] + (self.hidden_size_per_partition,) + context_layer = context_layer.reshape(*new_context_layer_shape) + else: + # Raw attention scores + + # [b, np, sq, sk] + output_size = (query_layer.size(1), query_layer.size(2), query_layer.size(0), key_layer.size(0)) + + # [sq, b, np, hn] -> [sq, b * np, hn] + query_layer = query_layer.view(output_size[2], output_size[0] * output_size[1], -1) + # [sk, b, np, hn] -> [sk, b * np, hn] + key_layer = key_layer.view(output_size[3], output_size[0] * output_size[1], -1) + + # preallocting input tensor: [b * np, sq, sk] + matmul_input_buffer = torch.empty( + output_size[0] * output_size[1], + output_size[2], + output_size[3], + dtype=query_layer.dtype, + device=query_layer.device, + ) + + # Raw attention scores. [b * np, sq, sk] + matmul_result = torch.baddbmm( + matmul_input_buffer, + query_layer.transpose(0, 1), # [b * np, sq, hn] + key_layer.transpose(0, 1).transpose(1, 2), # [b * np, hn, sk] + beta=0.0, + alpha=(1.0 / self.norm_factor), + ) + + # change view to [b, np, sq, sk] + attention_scores = matmul_result.view(*output_size) + + # =========================== + # Attention probs and dropout + # =========================== + + # attention scores and attention mask [b, np, sq, sk] + if self.attention_softmax_in_fp32: + attention_scores = attention_scores.float() + if self.coeff is not None: + attention_scores = attention_scores * self.coeff + if attention_mask is None and attention_scores.shape[2] == attention_scores.shape[3]: + attention_mask = torch.ones( + output_size[0], 1, output_size[2], output_size[3], device=attention_scores.device, dtype=torch.bool + ) + attention_mask.tril_() + attention_mask = ~attention_mask + if attention_mask is not None: + attention_scores = attention_scores.masked_fill(attention_mask, float("-inf")) + attention_probs = F.softmax(attention_scores, dim=-1) + attention_probs = attention_probs.type_as(value_layer) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + attention_probs = self.attention_dropout(attention_probs) + # ========================= + # Context layer. [sq, b, hp] + # ========================= + + # value_layer -> context layer. + # [sk, b, np, hn] --> [b, np, sq, hn] + + # context layer shape: [b, np, sq, hn] + output_size = (value_layer.size(1), value_layer.size(2), query_layer.size(0), value_layer.size(3)) + # change view [sk, b * np, hn] + value_layer = value_layer.view(value_layer.size(0), output_size[0] * output_size[1], -1) + # change view [b * np, sq, sk] + attention_probs = attention_probs.view(output_size[0] * output_size[1], output_size[2], -1) + # matmul: [b * np, sq, hn] + context_layer = torch.bmm(attention_probs, value_layer.transpose(0, 1)) + # change view [b, np, sq, hn] + context_layer = context_layer.view(*output_size) + # [b, np, sq, hn] --> [sq, b, np, hn] + context_layer = context_layer.permute(2, 0, 1, 3).contiguous() + # [sq, b, np, hn] --> [sq, b, hp] + new_context_layer_shape = context_layer.size()[:-2] + (self.hidden_size_per_partition,) + context_layer = context_layer.view(*new_context_layer_shape) + + return context_layer + + +def split_tensor_along_last_dim( + tensor: torch.Tensor, + num_partitions: int, + contiguous_split_chunks: bool = False, +) -> List[torch.Tensor]: + """Split a tensor along its last dimension. + + Arguments: + tensor: input tensor. + num_partitions: number of partitions to split the tensor + contiguous_split_chunks: If True, make each chunk contiguous + in memory. + + Returns: + A list of Tensors + """ + # Get the size and dimension. + last_dim = tensor.dim() - 1 + last_dim_size = tensor.size()[last_dim] // num_partitions + # Split. + tensor_list = torch.split(tensor, last_dim_size, dim=last_dim) + # Note: torch.split does not create contiguous tensors by default. + if contiguous_split_chunks: + return tuple(chunk.contiguous() for chunk in tensor_list) + + return tensor_list + + +@torch.jit.script +def apply_rotary_pos_emb(x: torch.Tensor, rope_cache: torch.Tensor) -> torch.Tensor: + # x: [sq, b, np, hn] + sq, _b, np, _hn = x.size(0), x.size(1), x.size(2), x.size(3) + rot_dim = rope_cache.shape[-2] * 2 + x, x_pass = x[..., :rot_dim], x[..., rot_dim:] + # truncate to support variable sizes + rope_cache = rope_cache[:sq] + xshaped = x.reshape(sq, -1, np, rot_dim // 2, 2) + rope_cache = rope_cache.view(sq, -1, 1, xshaped.size(3), 2) + x_out2 = torch.stack( + [ + xshaped[..., 0] * rope_cache[..., 0] - xshaped[..., 1] * rope_cache[..., 1], + xshaped[..., 1] * rope_cache[..., 0] + xshaped[..., 0] * rope_cache[..., 1], + ], + -1, + ) + x_out2 = x_out2.flatten(3) + return torch.cat((x_out2, x_pass), dim=-1) + + +class SelfAttention(torch.nn.Module): + """Parallel self-attention layer abstract class. + + Self-attention layer takes input with size [s, b, h] and returns output of the same size. + """ + + def __init__(self, config: ChatGLMConfig, layer_number, device=None): + super(SelfAttention, self).__init__() + self.layer_number = max(1, layer_number) + + self.projection_size = config.kv_channels * config.num_attention_heads + + # Per attention head and per partition values. + self.hidden_size_per_attention_head = self.projection_size // config.num_attention_heads + self.num_attention_heads_per_partition = config.num_attention_heads + + self.multi_query_attention = config.multi_query_attention + self.qkv_hidden_size = 3 * self.projection_size + if self.multi_query_attention: + self.num_multi_query_groups_per_partition = config.multi_query_group_num + self.qkv_hidden_size = ( + self.projection_size + 2 * self.hidden_size_per_attention_head * config.multi_query_group_num + ) + self.query_key_value = nn.Linear( + config.hidden_size, + self.qkv_hidden_size, + bias=config.add_bias_linear or config.add_qkv_bias, + device=device, + **_config_to_kwargs(config), + ) + + self.core_attention = CoreAttention(config, self.layer_number) + + # Output. + self.dense = nn.Linear( + self.projection_size, + config.hidden_size, + bias=config.add_bias_linear, + device=device, + **_config_to_kwargs(config), + ) + + def _allocate_memory(self, inference_max_sequence_len, batch_size, device=None, dtype=None): + if self.multi_query_attention: + num_attention_heads = self.num_multi_query_groups_per_partition + else: + num_attention_heads = self.num_attention_heads_per_partition + return torch.empty( + inference_max_sequence_len, + batch_size, + num_attention_heads, + self.hidden_size_per_attention_head, + dtype=dtype, + device=device, + ) + + def forward(self, hidden_states, attention_mask, rotary_pos_emb, kv_cache=None, use_cache=True): + # hidden_states: [sq, b, h] + + # ================================================= + # Pre-allocate memory for key-values for inference. + # ================================================= + # ===================== + # Query, Key, and Value + # ===================== + + # Attention heads [sq, b, h] --> [sq, b, (np * 3 * hn)] + mixed_x_layer = self.query_key_value(hidden_states) + + if self.multi_query_attention: + (query_layer, key_layer, value_layer) = mixed_x_layer.split( + [ + self.num_attention_heads_per_partition * self.hidden_size_per_attention_head, + self.num_multi_query_groups_per_partition * self.hidden_size_per_attention_head, + self.num_multi_query_groups_per_partition * self.hidden_size_per_attention_head, + ], + dim=-1, + ) + query_layer = query_layer.view( + query_layer.size()[:-1] + (self.num_attention_heads_per_partition, self.hidden_size_per_attention_head) + ) + key_layer = key_layer.view( + key_layer.size()[:-1] + + (self.num_multi_query_groups_per_partition, self.hidden_size_per_attention_head) + ) + value_layer = value_layer.view( + value_layer.size()[:-1] + + (self.num_multi_query_groups_per_partition, self.hidden_size_per_attention_head) + ) + else: + new_tensor_shape = mixed_x_layer.size()[:-1] + ( + self.num_attention_heads_per_partition, + 3 * self.hidden_size_per_attention_head, + ) + mixed_x_layer = mixed_x_layer.view(*new_tensor_shape) + + # [sq, b, np, 3 * hn] --> 3 [sq, b, np, hn] + (query_layer, key_layer, value_layer) = split_tensor_along_last_dim(mixed_x_layer, 3) + + # apply relative positional encoding (rotary embedding) + if rotary_pos_emb is not None: + query_layer = apply_rotary_pos_emb(query_layer, rotary_pos_emb) + key_layer = apply_rotary_pos_emb(key_layer, rotary_pos_emb) + + # adjust key and value for inference + if kv_cache is not None: + cache_k, cache_v = kv_cache + key_layer = torch.cat((cache_k, key_layer), dim=0) + value_layer = torch.cat((cache_v, value_layer), dim=0) + if use_cache: + kv_cache = (key_layer, value_layer) + else: + kv_cache = None + + if self.multi_query_attention: + key_layer = key_layer.unsqueeze(-2) + key_layer = key_layer.expand( + -1, -1, -1, self.num_attention_heads_per_partition // self.num_multi_query_groups_per_partition, -1 + ) + key_layer = key_layer.contiguous().view( + key_layer.size()[:2] + (self.num_attention_heads_per_partition, self.hidden_size_per_attention_head) + ) + value_layer = value_layer.unsqueeze(-2) + value_layer = value_layer.expand( + -1, -1, -1, self.num_attention_heads_per_partition // self.num_multi_query_groups_per_partition, -1 + ) + value_layer = value_layer.contiguous().view( + value_layer.size()[:2] + (self.num_attention_heads_per_partition, self.hidden_size_per_attention_head) + ) + + # ================================== + # core attention computation + # ================================== + + context_layer = self.core_attention(query_layer, key_layer, value_layer, attention_mask) + + # ================= + # Output. [sq, b, h] + # ================= + + output = self.dense(context_layer) + + return output, kv_cache + + +class MLP(torch.nn.Module): + """MLP. + + MLP will take the input with h hidden state, project it to 4*h hidden dimension, perform nonlinear transformation, + and project the state back into h hidden dimension. + """ + + def __init__(self, config: ChatGLMConfig, device=None): + super(MLP, self).__init__() + + self.add_bias = config.add_bias_linear + + # Project to 4h. If using swiglu double the output width, see https://arxiv.org/pdf/2002.05202.pdf + self.dense_h_to_4h = nn.Linear( + config.hidden_size, + config.ffn_hidden_size * 2, + bias=self.add_bias, + device=device, + **_config_to_kwargs(config), + ) + + def swiglu(x): + x = torch.chunk(x, 2, dim=-1) + return F.silu(x[0]) * x[1] + + self.activation_func = swiglu + + # Project back to h. + self.dense_4h_to_h = nn.Linear( + config.ffn_hidden_size, config.hidden_size, bias=self.add_bias, device=device, **_config_to_kwargs(config) + ) + + def forward(self, hidden_states): + # [s, b, 4hp] + intermediate_parallel = self.dense_h_to_4h(hidden_states) + intermediate_parallel = self.activation_func(intermediate_parallel) + # [s, b, h] + output = self.dense_4h_to_h(intermediate_parallel) + return output + + +class GLMBlock(torch.nn.Module): + """A single transformer layer. + + Transformer layer takes input with size [s, b, h] and returns an output of the same size. + """ + + def __init__(self, config: ChatGLMConfig, layer_number, device=None): + super(GLMBlock, self).__init__() + self.layer_number = layer_number + + self.apply_residual_connection_post_layernorm = config.apply_residual_connection_post_layernorm + + self.fp32_residual_connection = config.fp32_residual_connection + + LayerNormFunc = RMSNorm if config.rmsnorm else LayerNorm + # Layernorm on the input data. + self.input_layernorm = LayerNormFunc( + config.hidden_size, eps=config.layernorm_epsilon, device=device, dtype=config.torch_dtype + ) + + # Self attention. + self.self_attention = SelfAttention(config, layer_number, device=device) + self.hidden_dropout = config.hidden_dropout + + # Layernorm on the attention output + self.post_attention_layernorm = LayerNormFunc( + config.hidden_size, eps=config.layernorm_epsilon, device=device, dtype=config.torch_dtype + ) + + # MLP + self.mlp = MLP(config, device=device) + + def forward( + self, + hidden_states, + attention_mask, + rotary_pos_emb, + kv_cache=None, + use_cache=True, + ): + # hidden_states: [s, b, h] + + # Layer norm at the beginning of the transformer layer. + layernorm_output = self.input_layernorm(hidden_states) + # Self attention. + attention_output, kv_cache = self.self_attention( + layernorm_output, attention_mask, rotary_pos_emb, kv_cache=kv_cache, use_cache=use_cache + ) + + # Residual connection. + if self.apply_residual_connection_post_layernorm: + residual = layernorm_output + else: + residual = hidden_states + + layernorm_input = torch.nn.functional.dropout(attention_output, p=self.hidden_dropout, training=self.training) + layernorm_input = residual + layernorm_input + + # Layer norm post the self attention. + layernorm_output = self.post_attention_layernorm(layernorm_input) + + # MLP. + mlp_output = self.mlp(layernorm_output) + + # Second residual connection. + if self.apply_residual_connection_post_layernorm: + residual = layernorm_output + else: + residual = layernorm_input + + output = torch.nn.functional.dropout(mlp_output, p=self.hidden_dropout, training=self.training) + output = residual + output + + return output, kv_cache + + +class GLMTransformer(torch.nn.Module): + """Transformer class.""" + + def __init__(self, config: ChatGLMConfig, device=None): + super(GLMTransformer, self).__init__() + + self.fp32_residual_connection = config.fp32_residual_connection + self.post_layer_norm = config.post_layer_norm + + # Number of layers. + self.num_layers = config.num_layers + + # Transformer layers. + def build_layer(layer_number): + return GLMBlock(config, layer_number, device=device) + + self.layers = torch.nn.ModuleList([build_layer(i + 1) for i in range(self.num_layers)]) + + if self.post_layer_norm: + LayerNormFunc = RMSNorm if config.rmsnorm else LayerNorm + # Final layer norm before output. + self.final_layernorm = LayerNormFunc( + config.hidden_size, eps=config.layernorm_epsilon, device=device, dtype=config.torch_dtype + ) + + self.gradient_checkpointing = False + + def _get_layer(self, layer_number): + return self.layers[layer_number] + + def forward( + self, + hidden_states, + attention_mask, + rotary_pos_emb, + kv_caches=None, + use_cache: Optional[bool] = True, + output_hidden_states: Optional[bool] = False, + ): + if not kv_caches: + kv_caches = [None for _ in range(self.num_layers)] + presents = () if use_cache else None + if self.gradient_checkpointing and self.training: + if use_cache: + logger.warning_once( + "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." + ) + use_cache = False + + all_self_attentions = None + all_hidden_states = () if output_hidden_states else None + for index in range(self.num_layers): + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + layer = self._get_layer(index) + if self.gradient_checkpointing and self.training: + layer_ret = torch.utils.checkpoint.checkpoint( + layer, hidden_states, attention_mask, rotary_pos_emb, kv_caches[index], use_cache + ) + else: + layer_ret = layer( + hidden_states, attention_mask, rotary_pos_emb, kv_cache=kv_caches[index], use_cache=use_cache + ) + hidden_states, kv_cache = layer_ret + if use_cache: + presents = presents + (kv_cache,) + + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + # Final layer norm. + if self.post_layer_norm: + hidden_states = self.final_layernorm(hidden_states) + + return hidden_states, presents, all_hidden_states, all_self_attentions + + +class ChatGLMPreTrainedModel(PreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + is_parallelizable = False + supports_gradient_checkpointing = True + config_class = ChatGLMConfig + base_model_prefix = "transformer" + _no_split_modules = ["GLMBlock"] + + def _init_weights(self, module: nn.Module): + """Initialize the weights.""" + return + + def get_masks(self, input_ids, past_key_values, padding_mask=None): + batch_size, seq_length = input_ids.shape + full_attention_mask = torch.ones(batch_size, seq_length, seq_length, device=input_ids.device) + full_attention_mask.tril_() + past_length = 0 + if past_key_values: + past_length = past_key_values[0][0].shape[0] + if past_length: + full_attention_mask = torch.cat( + (torch.ones(batch_size, seq_length, past_length, device=input_ids.device), full_attention_mask), dim=-1 + ) + if padding_mask is not None: + full_attention_mask = full_attention_mask * padding_mask.unsqueeze(1) + if not past_length and padding_mask is not None: + full_attention_mask -= padding_mask.unsqueeze(-1) - 1 + full_attention_mask = (full_attention_mask < 0.5).bool() + full_attention_mask.unsqueeze_(1) + return full_attention_mask + + def get_position_ids(self, input_ids, device): + batch_size, seq_length = input_ids.shape + position_ids = torch.arange(seq_length, dtype=torch.long, device=device).unsqueeze(0).repeat(batch_size, 1) + return position_ids + + def _set_gradient_checkpointing(self, module, value=False): + if isinstance(module, GLMTransformer): + module.gradient_checkpointing = value + + +def default_init(cls, *args, **kwargs): + return cls(*args, **kwargs) + + +class Embedding(torch.nn.Module): + """Language model embeddings.""" + + def __init__(self, config: ChatGLMConfig, device=None): + super(Embedding, self).__init__() + + self.hidden_size = config.hidden_size + # Word embeddings (parallel). + self.word_embeddings = nn.Embedding( + config.padded_vocab_size, self.hidden_size, dtype=config.torch_dtype, device=device + ) + self.fp32_residual_connection = config.fp32_residual_connection + + def forward(self, input_ids): + # Embeddings. + words_embeddings = self.word_embeddings(input_ids) + embeddings = words_embeddings + # Data format change to avoid explicit tranposes : [b s h] --> [s b h]. + embeddings = embeddings.transpose(0, 1).contiguous() + # If the input flag for fp32 residual connection is set, convert for float. + if self.fp32_residual_connection: + embeddings = embeddings.float() + return embeddings + + +class RotaryEmbedding(nn.Module): + def __init__(self, dim, original_impl=False, device=None, dtype=None): + super().__init__() + inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2, device=device).to(dtype=dtype) / dim)) + self.register_buffer("inv_freq", inv_freq) + self.dim = dim + self.original_impl = original_impl + + def forward_impl(self, seq_len: int, n_elem: int, dtype: torch.dtype, device: torch.device, base: int = 10000): + """Enhanced Transformer with Rotary Position Embedding. + + Derived from: https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/master/labml_nn/ + transformers/rope/__init__.py. MIT License: + https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/master/license. + """ + # $\Theta = {\theta_i = 10000^{\frac{2(i-1)}{d}}, i \in [1, 2, ..., \frac{d}{2}]}$ + theta = 1.0 / (base ** (torch.arange(0, n_elem, 2, dtype=torch.float, device=device) / n_elem)) + + # Create position indexes `[0, 1, ..., seq_len - 1]` + seq_idx = torch.arange(seq_len, dtype=torch.float, device=device) + + # Calculate the product of position index and $\theta_i$ + idx_theta = torch.outer(seq_idx, theta).float() + + cache = torch.stack([torch.cos(idx_theta), torch.sin(idx_theta)], dim=-1) + + # this is to mimic the behaviour of complex32, else we will get different results + if dtype in (torch.float16, torch.bfloat16, torch.int8): + cache = cache.bfloat16() if dtype == torch.bfloat16 else cache.half() + return cache + + def forward(self, max_seq_len, offset=0): + return self.forward_impl(max_seq_len, self.dim, dtype=self.inv_freq.dtype, device=self.inv_freq.device) + + +class PrefixEncoder(torch.nn.Module): + """ + The torch.nn model to encode the prefix Input shape: (batch-size, prefix-length) Output shape: (batch-size, + prefix-length, 2*layers*hidden) + """ + + def __init__(self, config: ChatGLMConfig): + super().__init__() + self.prefix_projection = config.prefix_projection + if self.prefix_projection: + # Use a two-layer MLP to encode the prefix + kv_size = config.num_layers * config.kv_channels * config.multi_query_group_num * 2 + self.embedding = torch.nn.Embedding(config.pre_seq_len, kv_size) + self.trans = torch.nn.Sequential( + torch.nn.Linear(kv_size, config.hidden_size), + torch.nn.Tanh(), + torch.nn.Linear(config.hidden_size, kv_size), + ) + else: + self.embedding = torch.nn.Embedding( + config.pre_seq_len, config.num_layers * config.kv_channels * config.multi_query_group_num * 2 + ) + + def forward(self, prefix: torch.Tensor): + if self.prefix_projection: + prefix_tokens = self.embedding(prefix) + past_key_values = self.trans(prefix_tokens) + else: + past_key_values = self.embedding(prefix) + return past_key_values + + +class ChatGLMModel(ChatGLMPreTrainedModel): + def __init__(self, config: ChatGLMConfig, device=None, empty_init=True): + super().__init__(config) + if empty_init: + init_method = skip_init + else: + init_method = default_init + init_kwargs = {} + if device is not None: + init_kwargs["device"] = device + self.embedding = init_method(Embedding, config, **init_kwargs) + self.num_layers = config.num_layers + self.multi_query_group_num = config.multi_query_group_num + self.kv_channels = config.kv_channels + + # Rotary positional embeddings + self.seq_length = config.seq_length + rotary_dim = ( + config.hidden_size // config.num_attention_heads if config.kv_channels is None else config.kv_channels + ) + + self.rotary_pos_emb = RotaryEmbedding( + rotary_dim // 2, original_impl=config.original_rope, device=device, dtype=config.torch_dtype + ) + self.encoder = init_method(GLMTransformer, config, **init_kwargs) + self.output_layer = init_method( + nn.Linear, + config.hidden_size, + config.padded_vocab_size, + bias=False, + dtype=config.torch_dtype, + **init_kwargs, + ) + self.pre_seq_len = config.pre_seq_len + self.prefix_projection = config.prefix_projection + if self.pre_seq_len is not None: + for param in self.parameters(): + param.requires_grad = False + self.prefix_tokens = torch.arange(self.pre_seq_len).long() + self.prefix_encoder = PrefixEncoder(config) + self.dropout = torch.nn.Dropout(0.1) + + def get_input_embeddings(self): + return self.embedding.word_embeddings + + def get_prompt(self, batch_size, device, dtype=torch.half): + prefix_tokens = self.prefix_tokens.unsqueeze(0).expand(batch_size, -1).to(device) + past_key_values = self.prefix_encoder(prefix_tokens).type(dtype) + past_key_values = past_key_values.view( + batch_size, self.pre_seq_len, self.num_layers * 2, self.multi_query_group_num, self.kv_channels + ) + # seq_len, b, nh, hidden_size + past_key_values = self.dropout(past_key_values) + past_key_values = past_key_values.permute([2, 1, 0, 3, 4]).split(2) + return past_key_values + + def forward( + self, + input_ids, + position_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.BoolTensor] = None, + full_attention_mask: Optional[torch.BoolTensor] = None, + past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None, + inputs_embeds: Optional[torch.Tensor] = None, + use_cache: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ): + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + batch_size, seq_length = input_ids.shape + + if inputs_embeds is None: + inputs_embeds = self.embedding(input_ids) + + if self.pre_seq_len is not None: + if past_key_values is None: + past_key_values = self.get_prompt( + batch_size=batch_size, device=input_ids.device, dtype=inputs_embeds.dtype + ) + if attention_mask is not None: + attention_mask = torch.cat( + [attention_mask.new_ones((batch_size, self.pre_seq_len)), attention_mask], dim=-1 + ) + + if full_attention_mask is None: + if (attention_mask is not None and not attention_mask.all()) or (past_key_values and seq_length != 1): + full_attention_mask = self.get_masks(input_ids, past_key_values, padding_mask=attention_mask) + + # Rotary positional embeddings + rotary_pos_emb = self.rotary_pos_emb(self.seq_length) + if position_ids is not None: + rotary_pos_emb = rotary_pos_emb[position_ids] + else: + rotary_pos_emb = rotary_pos_emb[None, :seq_length] + rotary_pos_emb = rotary_pos_emb.transpose(0, 1).contiguous() + + # Run encoder. + hidden_states, presents, all_hidden_states, all_self_attentions = self.encoder( + inputs_embeds, + full_attention_mask, + rotary_pos_emb=rotary_pos_emb, + kv_caches=past_key_values, + use_cache=use_cache, + output_hidden_states=output_hidden_states, + ) + + if not return_dict: + return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None) + + return BaseModelOutputWithPast( + last_hidden_state=hidden_states, + past_key_values=presents, + hidden_states=all_hidden_states, + attentions=all_self_attentions, + ) diff --git a/diffusers3/pipelines/kolors/tokenizer.py b/diffusers3/pipelines/kolors/tokenizer.py new file mode 100644 index 0000000000000000000000000000000000000000..a7b942f4fd22ae7cfa59398e37407c2cd36e7749 --- /dev/null +++ b/diffusers3/pipelines/kolors/tokenizer.py @@ -0,0 +1,334 @@ +# Copyright 2024 ChatGLM3-6B Model Team, Kwai-Kolors Team and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import os +import re +from typing import Dict, List, Optional, Union + +from sentencepiece import SentencePieceProcessor +from transformers import PreTrainedTokenizer +from transformers.tokenization_utils_base import BatchEncoding, EncodedInput +from transformers.utils import PaddingStrategy + + +class SPTokenizer: + def __init__(self, model_path: str): + # reload tokenizer + assert os.path.isfile(model_path), model_path + self.sp_model = SentencePieceProcessor(model_file=model_path) + + # BOS / EOS token IDs + self.n_words: int = self.sp_model.vocab_size() + self.bos_id: int = self.sp_model.bos_id() + self.eos_id: int = self.sp_model.eos_id() + self.pad_id: int = self.sp_model.unk_id() + assert self.sp_model.vocab_size() == self.sp_model.get_piece_size() + + role_special_tokens = ["<|system|>", "<|user|>", "<|assistant|>", "<|observation|>"] + special_tokens = ["[MASK]", "[gMASK]", "[sMASK]", "sop", "eop"] + role_special_tokens + self.special_tokens = {} + self.index_special_tokens = {} + for token in special_tokens: + self.special_tokens[token] = self.n_words + self.index_special_tokens[self.n_words] = token + self.n_words += 1 + self.role_special_token_expression = "|".join([re.escape(token) for token in role_special_tokens]) + + def tokenize(self, s: str, encode_special_tokens=False): + if encode_special_tokens: + last_index = 0 + t = [] + for match in re.finditer(self.role_special_token_expression, s): + if last_index < match.start(): + t.extend(self.sp_model.EncodeAsPieces(s[last_index : match.start()])) + t.append(s[match.start() : match.end()]) + last_index = match.end() + if last_index < len(s): + t.extend(self.sp_model.EncodeAsPieces(s[last_index:])) + return t + else: + return self.sp_model.EncodeAsPieces(s) + + def encode(self, s: str, bos: bool = False, eos: bool = False) -> List[int]: + assert isinstance(s, str) + t = self.sp_model.encode(s) + if bos: + t = [self.bos_id] + t + if eos: + t = t + [self.eos_id] + return t + + def decode(self, t: List[int]) -> str: + text, buffer = "", [] + for token in t: + if token in self.index_special_tokens: + if buffer: + text += self.sp_model.decode(buffer) + buffer = [] + text += self.index_special_tokens[token] + else: + buffer.append(token) + if buffer: + text += self.sp_model.decode(buffer) + return text + + def decode_tokens(self, tokens: List[str]) -> str: + text = self.sp_model.DecodePieces(tokens) + return text + + def convert_token_to_id(self, token): + """Converts a token (str) in an id using the vocab.""" + if token in self.special_tokens: + return self.special_tokens[token] + return self.sp_model.PieceToId(token) + + def convert_id_to_token(self, index): + """Converts an index (integer) in a token (str) using the vocab.""" + if index in self.index_special_tokens: + return self.index_special_tokens[index] + if index in [self.eos_id, self.bos_id, self.pad_id] or index < 0: + return "" + return self.sp_model.IdToPiece(index) + + +class ChatGLMTokenizer(PreTrainedTokenizer): + vocab_files_names = {"vocab_file": "tokenizer.model"} + + model_input_names = ["input_ids", "attention_mask", "position_ids"] + + def __init__( + self, + vocab_file, + padding_side="left", + clean_up_tokenization_spaces=False, + encode_special_tokens=False, + **kwargs, + ): + self.name = "GLMTokenizer" + + self.vocab_file = vocab_file + self.tokenizer = SPTokenizer(vocab_file) + self.special_tokens = { + "": self.tokenizer.bos_id, + "": self.tokenizer.eos_id, + "": self.tokenizer.pad_id, + } + self.encode_special_tokens = encode_special_tokens + super().__init__( + padding_side=padding_side, + clean_up_tokenization_spaces=clean_up_tokenization_spaces, + encode_special_tokens=encode_special_tokens, + **kwargs, + ) + + def get_command(self, token): + if token in self.special_tokens: + return self.special_tokens[token] + assert token in self.tokenizer.special_tokens, f"{token} is not a special token for {self.name}" + return self.tokenizer.special_tokens[token] + + @property + def unk_token(self) -> str: + return "" + + @unk_token.setter + def unk_token(self, value: str): + self._unk_token = value + + @property + def pad_token(self) -> str: + return "" + + @pad_token.setter + def pad_token(self, value: str): + self._pad_token = value + + @property + def pad_token_id(self): + return self.get_command("") + + @property + def eos_token(self) -> str: + return "" + + @eos_token.setter + def eos_token(self, value: str): + self._eos_token = value + + @property + def eos_token_id(self): + return self.get_command("") + + @property + def vocab_size(self): + return self.tokenizer.n_words + + def get_vocab(self): + """Returns vocab as a dict""" + vocab = {self._convert_id_to_token(i): i for i in range(self.vocab_size)} + vocab.update(self.added_tokens_encoder) + return vocab + + def _tokenize(self, text, **kwargs): + return self.tokenizer.tokenize(text, encode_special_tokens=self.encode_special_tokens) + + def _convert_token_to_id(self, token): + """Converts a token (str) in an id using the vocab.""" + return self.tokenizer.convert_token_to_id(token) + + def _convert_id_to_token(self, index): + """Converts an index (integer) in a token (str) using the vocab.""" + return self.tokenizer.convert_id_to_token(index) + + def convert_tokens_to_string(self, tokens: List[str]) -> str: + return self.tokenizer.decode_tokens(tokens) + + def save_vocabulary(self, save_directory, filename_prefix=None): + """ + Save the vocabulary and special tokens file to a directory. + + Args: + save_directory (`str`): + The directory in which to save the vocabulary. + filename_prefix (`str`, *optional*): + An optional prefix to add to the named of the saved files. + + Returns: + `Tuple(str)`: Paths to the files saved. + """ + if os.path.isdir(save_directory): + vocab_file = os.path.join(save_directory, self.vocab_files_names["vocab_file"]) + else: + vocab_file = save_directory + + with open(self.vocab_file, "rb") as fin: + proto_str = fin.read() + + with open(vocab_file, "wb") as writer: + writer.write(proto_str) + + return (vocab_file,) + + def get_prefix_tokens(self): + prefix_tokens = [self.get_command("[gMASK]"), self.get_command("sop")] + return prefix_tokens + + def build_single_message(self, role, metadata, message): + assert role in ["system", "user", "assistant", "observation"], role + role_tokens = [self.get_command(f"<|{role}|>")] + self.tokenizer.encode(f"{metadata}\n") + message_tokens = self.tokenizer.encode(message) + tokens = role_tokens + message_tokens + return tokens + + def build_chat_input(self, query, history=None, role="user"): + if history is None: + history = [] + input_ids = [] + for item in history: + content = item["content"] + if item["role"] == "system" and "tools" in item: + content = content + "\n" + json.dumps(item["tools"], indent=4, ensure_ascii=False) + input_ids.extend(self.build_single_message(item["role"], item.get("metadata", ""), content)) + input_ids.extend(self.build_single_message(role, "", query)) + input_ids.extend([self.get_command("<|assistant|>")]) + return self.batch_encode_plus([input_ids], return_tensors="pt", is_split_into_words=True) + + def build_inputs_with_special_tokens( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None + ) -> List[int]: + """ + Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and + adding special tokens. A BERT sequence has the following format: + + - single sequence: `[CLS] X [SEP]` + - pair of sequences: `[CLS] A [SEP] B [SEP]` + + Args: + token_ids_0 (`List[int]`): + List of IDs to which the special tokens will be added. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + + Returns: + `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. + """ + prefix_tokens = self.get_prefix_tokens() + token_ids_0 = prefix_tokens + token_ids_0 + if token_ids_1 is not None: + token_ids_0 = token_ids_0 + token_ids_1 + [self.get_command("")] + return token_ids_0 + + def _pad( + self, + encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding], + max_length: Optional[int] = None, + padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, + pad_to_multiple_of: Optional[int] = None, + return_attention_mask: Optional[bool] = None, + ) -> dict: + """ + Pad encoded inputs (on left/right and up to predefined length or max length in the batch) + + Args: + encoded_inputs: + Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`). + max_length: maximum length of the returned list and optionally padding length (see below). + Will truncate by taking into account the special tokens. + padding_strategy: PaddingStrategy to use for padding. + + - PaddingStrategy.LONGEST Pad to the longest sequence in the batch + - PaddingStrategy.MAX_LENGTH: Pad to the max length (default) + - PaddingStrategy.DO_NOT_PAD: Do not pad + The tokenizer padding sides are defined in self.padding_side: + + - 'left': pads on the left of the sequences + - 'right': pads on the right of the sequences + pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value. + This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability + `>= 7.5` (Volta). + return_attention_mask: + (optional) Set to False to avoid returning attention mask (default: set to model specifics) + """ + # Load from model defaults + assert self.padding_side == "left" + + required_input = encoded_inputs[self.model_input_names[0]] + seq_length = len(required_input) + + if padding_strategy == PaddingStrategy.LONGEST: + max_length = len(required_input) + + if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): + max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of + + needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length + + # Initialize attention mask if not present. + if "attention_mask" not in encoded_inputs: + encoded_inputs["attention_mask"] = [1] * seq_length + + if "position_ids" not in encoded_inputs: + encoded_inputs["position_ids"] = list(range(seq_length)) + + if needs_to_be_padded: + difference = max_length - len(required_input) + + if "attention_mask" in encoded_inputs: + encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"] + if "position_ids" in encoded_inputs: + encoded_inputs["position_ids"] = [0] * difference + encoded_inputs["position_ids"] + encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input + + return encoded_inputs diff --git a/diffusers3/pipelines/latent_consistency_models/__init__.py b/diffusers3/pipelines/latent_consistency_models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8f79d3c4773f393ed689a949041d36ad77e20968 --- /dev/null +++ b/diffusers3/pipelines/latent_consistency_models/__init__.py @@ -0,0 +1,50 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["pipeline_latent_consistency_img2img"] = ["LatentConsistencyModelImg2ImgPipeline"] + _import_structure["pipeline_latent_consistency_text2img"] = ["LatentConsistencyModelPipeline"] + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * + else: + from .pipeline_latent_consistency_img2img import LatentConsistencyModelImg2ImgPipeline + from .pipeline_latent_consistency_text2img import LatentConsistencyModelPipeline + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/diffusers3/pipelines/latent_consistency_models/pipeline_latent_consistency_img2img.py b/diffusers3/pipelines/latent_consistency_models/pipeline_latent_consistency_img2img.py new file mode 100644 index 0000000000000000000000000000000000000000..dd72d3c9e10e58b5c23eacca058853df10d0df65 --- /dev/null +++ b/diffusers3/pipelines/latent_consistency_models/pipeline_latent_consistency_img2img.py @@ -0,0 +1,976 @@ +# Copyright 2024 Stanford University Team and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion +# and https://github.com/hojonathanho/diffusion + +import inspect +from typing import Any, Callable, Dict, List, Optional, Union + +import PIL.Image +import torch +from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection + +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import FromSingleFileMixin, IPAdapterMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin +from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import LCMScheduler +from ...utils import ( + USE_PEFT_BACKEND, + deprecate, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from ..stable_diffusion import StableDiffusionPipelineOutput, StableDiffusionSafetyChecker + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents +def retrieve_latents( + encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" +): + if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": + return encoder_output.latent_dist.sample(generator) + elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": + return encoder_output.latent_dist.mode() + elif hasattr(encoder_output, "latents"): + return encoder_output.latents + else: + raise AttributeError("Could not access latents of provided encoder_output") + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + sigmas: Optional[List[float]] = None, + **kwargs, +): + """ + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles + custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` + must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, + `num_inference_steps` and `sigmas` must be `None`. + sigmas (`List[float]`, *optional*): + Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, + `num_inference_steps` and `timesteps` must be `None`. + + Returns: + `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the + second element is the number of inference steps. + """ + if timesteps is not None and sigmas is not None: + raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") + if timesteps is not None: + accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + elif sigmas is not None: + accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accept_sigmas: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" sigmas schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> from diffusers import AutoPipelineForImage2Image + >>> import torch + >>> import PIL + + >>> pipe = AutoPipelineForImage2Image.from_pretrained("SimianLuo/LCM_Dreamshaper_v7") + >>> # To save GPU memory, torch.float16 can be used, but it may compromise image quality. + >>> pipe.to(torch_device="cuda", torch_dtype=torch.float32) + + >>> prompt = "High altitude snowy mountains" + >>> image = PIL.Image.open("./snowy_mountains.png") + + >>> # Can be set to 1~50 steps. LCM support fast inference even <= 4 steps. Recommend: 1~8 steps. + >>> num_inference_steps = 4 + >>> images = pipe( + ... prompt=prompt, image=image, num_inference_steps=num_inference_steps, guidance_scale=8.0 + ... ).images + + >>> images[0].save("image.png") + ``` + +""" + + +class LatentConsistencyModelImg2ImgPipeline( + DiffusionPipeline, + StableDiffusionMixin, + TextualInversionLoaderMixin, + IPAdapterMixin, + StableDiffusionLoraLoaderMixin, + FromSingleFileMixin, +): + r""" + Pipeline for image-to-image generation using a latent consistency model. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for saving LoRA weights + - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files + - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Currently only + supports [`LCMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details + about a model's potential harms. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. + requires_safety_checker (`bool`, *optional*, defaults to `True`): + Whether the pipeline requires a safety checker component. + """ + + model_cpu_offload_seq = "text_encoder->unet->vae" + _optional_components = ["safety_checker", "feature_extractor", "image_encoder"] + _exclude_from_cpu_offload = ["safety_checker"] + _callback_tensor_inputs = ["latents", "denoised", "prompt_embeds", "w_embedding"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: UNet2DConditionModel, + scheduler: LCMScheduler, + safety_checker: StableDiffusionSafetyChecker, + feature_extractor: CLIPImageProcessor, + image_encoder: Optional[CLIPVisionModelWithProjection] = None, + requires_safety_checker: bool = True, + ): + super().__init__() + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + image_encoder=image_encoder, + ) + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if self.text_encoder is not None: + if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return prompt_embeds, negative_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image + def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + if output_hidden_states: + image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] + image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_enc_hidden_states = self.image_encoder( + torch.zeros_like(image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( + num_images_per_prompt, dim=0 + ) + return image_enc_hidden_states, uncond_image_enc_hidden_states + else: + image_embeds = self.image_encoder(image).image_embeds + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_embeds = torch.zeros_like(image_embeds) + + return image_embeds, uncond_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds + def prepare_ip_adapter_image_embeds( + self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance + ): + image_embeds = [] + if do_classifier_free_guidance: + negative_image_embeds = [] + if ip_adapter_image_embeds is None: + if not isinstance(ip_adapter_image, list): + ip_adapter_image = [ip_adapter_image] + + if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): + raise ValueError( + f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." + ) + + for single_ip_adapter_image, image_proj_layer in zip( + ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers + ): + output_hidden_state = not isinstance(image_proj_layer, ImageProjection) + single_image_embeds, single_negative_image_embeds = self.encode_image( + single_ip_adapter_image, device, 1, output_hidden_state + ) + + image_embeds.append(single_image_embeds[None, :]) + if do_classifier_free_guidance: + negative_image_embeds.append(single_negative_image_embeds[None, :]) + else: + for single_image_embeds in ip_adapter_image_embeds: + if do_classifier_free_guidance: + single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2) + negative_image_embeds.append(single_negative_image_embeds) + image_embeds.append(single_image_embeds) + + ip_adapter_image_embeds = [] + for i, single_image_embeds in enumerate(image_embeds): + single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) + if do_classifier_free_guidance: + single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) + + single_image_embeds = single_image_embeds.to(device=device) + ip_adapter_image_embeds.append(single_image_embeds) + + return ip_adapter_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is None: + has_nsfw_concept = None + else: + if torch.is_tensor(image): + feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") + else: + feature_extractor_input = self.image_processor.numpy_to_pil(image) + safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) + image, has_nsfw_concept = self.safety_checker( + images=image, clip_input=safety_checker_input.pixel_values.to(dtype) + ) + return image, has_nsfw_concept + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.prepare_latents + def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None): + if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): + raise ValueError( + f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" + ) + + image = image.to(device=device, dtype=dtype) + + batch_size = batch_size * num_images_per_prompt + + if image.shape[1] == 4: + init_latents = image + + else: + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + elif isinstance(generator, list): + if image.shape[0] < batch_size and batch_size % image.shape[0] == 0: + image = torch.cat([image] * (batch_size // image.shape[0]), dim=0) + elif image.shape[0] < batch_size and batch_size % image.shape[0] != 0: + raise ValueError( + f"Cannot duplicate `image` of batch size {image.shape[0]} to effective batch_size {batch_size} " + ) + + init_latents = [ + retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i]) + for i in range(batch_size) + ] + init_latents = torch.cat(init_latents, dim=0) + else: + init_latents = retrieve_latents(self.vae.encode(image), generator=generator) + + init_latents = self.vae.config.scaling_factor * init_latents + + if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0: + # expand init_latents for batch_size + deprecation_message = ( + f"You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial" + " images (`image`). Initial images are now duplicating to match the number of text prompts. Note" + " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update" + " your script to pass as many initial images as text prompts to suppress this warning." + ) + deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False) + additional_image_per_prompt = batch_size // init_latents.shape[0] + init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0) + elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0: + raise ValueError( + f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts." + ) + else: + init_latents = torch.cat([init_latents], dim=0) + + shape = init_latents.shape + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + + # get latents + init_latents = self.scheduler.add_noise(init_latents, noise, timestep) + latents = init_latents + + return latents + + # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding + def get_guidance_scale_embedding( + self, w: torch.Tensor, embedding_dim: int = 512, dtype: torch.dtype = torch.float32 + ) -> torch.Tensor: + """ + See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 + + Args: + w (`torch.Tensor`): + Generate embedding vectors with a specified guidance scale to subsequently enrich timestep embeddings. + embedding_dim (`int`, *optional*, defaults to 512): + Dimension of the embeddings to generate. + dtype (`torch.dtype`, *optional*, defaults to `torch.float32`): + Data type of the generated embeddings. + + Returns: + `torch.Tensor`: Embedding vectors with shape `(len(w), embedding_dim)`. + """ + assert len(w.shape) == 1 + w = w * 1000.0 + + half_dim = embedding_dim // 2 + emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) + emb = w.to(dtype)[:, None] * emb[None, :] + emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) + if embedding_dim % 2 == 1: # zero pad + emb = torch.nn.functional.pad(emb, (0, 1)) + assert emb.shape == (w.shape[0], embedding_dim) + return emb + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps + def get_timesteps(self, num_inference_steps, strength, device): + # get the original timestep using init_timestep + init_timestep = min(int(num_inference_steps * strength), num_inference_steps) + + t_start = max(num_inference_steps - init_timestep, 0) + timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] + if hasattr(self.scheduler, "set_begin_index"): + self.scheduler.set_begin_index(t_start * self.scheduler.order) + + return timesteps, num_inference_steps - t_start + + def check_inputs( + self, + prompt: Union[str, List[str]], + strength: float, + callback_steps: int, + prompt_embeds: Optional[torch.Tensor] = None, + ip_adapter_image=None, + ip_adapter_image_embeds=None, + callback_on_step_end_tensor_inputs=None, + ): + if strength < 0 or strength > 1: + raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}") + + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if ip_adapter_image is not None and ip_adapter_image_embeds is not None: + raise ValueError( + "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined." + ) + + if ip_adapter_image_embeds is not None: + if not isinstance(ip_adapter_image_embeds, list): + raise ValueError( + f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}" + ) + elif ip_adapter_image_embeds[0].ndim not in [3, 4]: + raise ValueError( + f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D" + ) + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def cross_attention_kwargs(self): + return self._cross_attention_kwargs + + @property + def clip_skip(self): + return self._clip_skip + + @property + def do_classifier_free_guidance(self): + return False + + @property + def num_timesteps(self): + return self._num_timesteps + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + image: PipelineImageInput = None, + num_inference_steps: int = 4, + strength: float = 0.8, + original_inference_steps: int = None, + timesteps: List[int] = None, + guidance_scale: float = 8.5, + num_images_per_prompt: Optional[int] = 1, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + clip_skip: Optional[int] = None, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + **kwargs, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + original_inference_steps (`int`, *optional*): + The original number of inference steps use to generate a linearly-spaced timestep schedule, from which + we will draw `num_inference_steps` evenly spaced timesteps from as our final timestep schedule, + following the Skipping-Step method in the paper (see Section 4.3). If not set this will default to the + scheduler's `original_inference_steps` attribute. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process. If not defined, equal spaced `num_inference_steps` + timesteps on the original LCM training/distillation timestep schedule are used. Must be in descending + order. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + Note that the original latent consistency models paper uses a different CFG formulation where the + guidance scales are decreased by 1 (so in the paper formulation CFG is enabled when `guidance_scale > + 0`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + ip_adapter_image: (`PipelineImageInput`, *optional*): + Optional image input to work with IP Adapters. + ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*): + Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of + IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should + contain the negative image embedding if `do_classifier_free_guidance` is set to `True`. If not + provided, embeddings are computed from the `ip_adapter_image` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + + Examples: + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images and the + second element is a list of `bool`s indicating whether the corresponding generated image contains + "not-safe-for-work" (nsfw) content. + """ + callback = kwargs.pop("callback", None) + callback_steps = kwargs.pop("callback_steps", None) + + if callback is not None: + deprecate( + "callback", + "1.0.0", + "Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", + ) + if callback_steps is not None: + deprecate( + "callback_steps", + "1.0.0", + "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", + ) + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + strength, + callback_steps, + prompt_embeds, + ip_adapter_image, + ip_adapter_image_embeds, + callback_on_step_end_tensor_inputs, + ) + self._guidance_scale = guidance_scale + self._clip_skip = clip_skip + self._cross_attention_kwargs = cross_attention_kwargs + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + image_embeds = self.prepare_ip_adapter_image_embeds( + ip_adapter_image, + ip_adapter_image_embeds, + device, + batch_size * num_images_per_prompt, + self.do_classifier_free_guidance, + ) + + # 3. Encode input prompt + lora_scale = ( + self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None + ) + + # NOTE: when a LCM is distilled from an LDM via latent consistency distillation (Algorithm 1) with guided + # distillation, the forward pass of the LCM learns to approximate sampling from the LDM using CFG with the + # unconditional prompt "" (the empty string). Due to this, LCMs currently do not support negative prompts. + prompt_embeds, _ = self.encode_prompt( + prompt, + device, + num_images_per_prompt, + self.do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=None, + lora_scale=lora_scale, + clip_skip=self.clip_skip, + ) + + # 4. Encode image + image = self.image_processor.preprocess(image) + + # 5. Prepare timesteps + timesteps, num_inference_steps = retrieve_timesteps( + self.scheduler, + num_inference_steps, + device, + timesteps, + original_inference_steps=original_inference_steps, + strength=strength, + ) + + # 6. Prepare latent variables + original_inference_steps = ( + original_inference_steps + if original_inference_steps is not None + else self.scheduler.config.original_inference_steps + ) + latent_timestep = timesteps[:1] + if latents is None: + latents = self.prepare_latents( + image, latent_timestep, batch_size, num_images_per_prompt, prompt_embeds.dtype, device, generator + ) + bs = batch_size * num_images_per_prompt + + # 6. Get Guidance Scale Embedding + # NOTE: We use the Imagen CFG formulation that StableDiffusionPipeline uses rather than the original LCM paper + # CFG formulation, so we need to subtract 1 from the input guidance_scale. + # LCM CFG formulation: cfg_noise = noise_cond + cfg_scale * (noise_cond - noise_uncond), (cfg_scale > 0.0 using CFG) + w = torch.tensor(self.guidance_scale - 1).repeat(bs) + w_embedding = self.get_guidance_scale_embedding(w, embedding_dim=self.unet.config.time_cond_proj_dim).to( + device=device, dtype=latents.dtype + ) + + # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, None) + + # 7.1 Add image embeds for IP-Adapter + added_cond_kwargs = ( + {"image_embeds": image_embeds} + if ip_adapter_image is not None or ip_adapter_image_embeds is not None + else None + ) + + # 8. LCM Multistep Sampling Loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + self._num_timesteps = len(timesteps) + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + latents = latents.to(prompt_embeds.dtype) + + # model prediction (v-prediction, eps, x) + model_pred = self.unet( + latents, + t, + timestep_cond=w_embedding, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs=self.cross_attention_kwargs, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + # compute the previous noisy sample x_t -> x_t-1 + latents, denoised = self.scheduler.step(model_pred, t, latents, **extra_step_kwargs, return_dict=False) + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + w_embedding = callback_outputs.pop("w_embedding", w_embedding) + denoised = callback_outputs.pop("denoised", denoised) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + denoised = denoised.to(prompt_embeds.dtype) + if not output_type == "latent": + image = self.vae.decode(denoised / self.vae.config.scaling_factor, return_dict=False)[0] + image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) + else: + image = denoised + has_nsfw_concept = None + + if has_nsfw_concept is None: + do_denormalize = [True] * image.shape[0] + else: + do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] + + image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image, has_nsfw_concept) + + return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) diff --git a/diffusers3/pipelines/latent_consistency_models/pipeline_latent_consistency_text2img.py b/diffusers3/pipelines/latent_consistency_models/pipeline_latent_consistency_text2img.py new file mode 100644 index 0000000000000000000000000000000000000000..89cafc2877fed0ab2b26d5e6b652c291ee101730 --- /dev/null +++ b/diffusers3/pipelines/latent_consistency_models/pipeline_latent_consistency_text2img.py @@ -0,0 +1,905 @@ +# Copyright 2024 Stanford University Team and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion +# and https://github.com/hojonathanho/diffusion + +import inspect +from typing import Any, Callable, Dict, List, Optional, Union + +import torch +from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection + +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import FromSingleFileMixin, IPAdapterMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin +from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import LCMScheduler +from ...utils import ( + USE_PEFT_BACKEND, + deprecate, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from ..stable_diffusion import StableDiffusionPipelineOutput, StableDiffusionSafetyChecker + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> from diffusers import DiffusionPipeline + >>> import torch + + >>> pipe = DiffusionPipeline.from_pretrained("SimianLuo/LCM_Dreamshaper_v7") + >>> # To save GPU memory, torch.float16 can be used, but it may compromise image quality. + >>> pipe.to(torch_device="cuda", torch_dtype=torch.float32) + + >>> prompt = "Self-portrait oil painting, a beautiful cyborg with golden hair, 8k" + + >>> # Can be set to 1~50 steps. LCM support fast inference even <= 4 steps. Recommend: 1~8 steps. + >>> num_inference_steps = 4 + >>> images = pipe(prompt=prompt, num_inference_steps=num_inference_steps, guidance_scale=8.0).images + >>> images[0].save("image.png") + ``` +""" + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + sigmas: Optional[List[float]] = None, + **kwargs, +): + """ + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles + custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` + must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, + `num_inference_steps` and `sigmas` must be `None`. + sigmas (`List[float]`, *optional*): + Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, + `num_inference_steps` and `timesteps` must be `None`. + + Returns: + `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the + second element is the number of inference steps. + """ + if timesteps is not None and sigmas is not None: + raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") + if timesteps is not None: + accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + elif sigmas is not None: + accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accept_sigmas: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" sigmas schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + + +class LatentConsistencyModelPipeline( + DiffusionPipeline, + StableDiffusionMixin, + TextualInversionLoaderMixin, + IPAdapterMixin, + StableDiffusionLoraLoaderMixin, + FromSingleFileMixin, +): + r""" + Pipeline for text-to-image generation using a latent consistency model. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for saving LoRA weights + - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files + - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Currently only + supports [`LCMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details + about a model's potential harms. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. + requires_safety_checker (`bool`, *optional*, defaults to `True`): + Whether the pipeline requires a safety checker component. + """ + + model_cpu_offload_seq = "text_encoder->unet->vae" + _optional_components = ["safety_checker", "feature_extractor", "image_encoder"] + _exclude_from_cpu_offload = ["safety_checker"] + _callback_tensor_inputs = ["latents", "denoised", "prompt_embeds", "w_embedding"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: UNet2DConditionModel, + scheduler: LCMScheduler, + safety_checker: StableDiffusionSafetyChecker, + feature_extractor: CLIPImageProcessor, + image_encoder: Optional[CLIPVisionModelWithProjection] = None, + requires_safety_checker: bool = True, + ): + super().__init__() + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + image_encoder=image_encoder, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if self.text_encoder is not None: + if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return prompt_embeds, negative_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image + def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + if output_hidden_states: + image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] + image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_enc_hidden_states = self.image_encoder( + torch.zeros_like(image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( + num_images_per_prompt, dim=0 + ) + return image_enc_hidden_states, uncond_image_enc_hidden_states + else: + image_embeds = self.image_encoder(image).image_embeds + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_embeds = torch.zeros_like(image_embeds) + + return image_embeds, uncond_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds + def prepare_ip_adapter_image_embeds( + self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance + ): + image_embeds = [] + if do_classifier_free_guidance: + negative_image_embeds = [] + if ip_adapter_image_embeds is None: + if not isinstance(ip_adapter_image, list): + ip_adapter_image = [ip_adapter_image] + + if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): + raise ValueError( + f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." + ) + + for single_ip_adapter_image, image_proj_layer in zip( + ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers + ): + output_hidden_state = not isinstance(image_proj_layer, ImageProjection) + single_image_embeds, single_negative_image_embeds = self.encode_image( + single_ip_adapter_image, device, 1, output_hidden_state + ) + + image_embeds.append(single_image_embeds[None, :]) + if do_classifier_free_guidance: + negative_image_embeds.append(single_negative_image_embeds[None, :]) + else: + for single_image_embeds in ip_adapter_image_embeds: + if do_classifier_free_guidance: + single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2) + negative_image_embeds.append(single_negative_image_embeds) + image_embeds.append(single_image_embeds) + + ip_adapter_image_embeds = [] + for i, single_image_embeds in enumerate(image_embeds): + single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) + if do_classifier_free_guidance: + single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) + + single_image_embeds = single_image_embeds.to(device=device) + ip_adapter_image_embeds.append(single_image_embeds) + + return ip_adapter_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is None: + has_nsfw_concept = None + else: + if torch.is_tensor(image): + feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") + else: + feature_extractor_input = self.image_processor.numpy_to_pil(image) + safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) + image, has_nsfw_concept = self.safety_checker( + images=image, clip_input=safety_checker_input.pixel_values.to(dtype) + ) + return image, has_nsfw_concept + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = ( + batch_size, + num_channels_latents, + int(height) // self.vae_scale_factor, + int(width) // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + def get_guidance_scale_embedding( + self, w: torch.Tensor, embedding_dim: int = 512, dtype: torch.dtype = torch.float32 + ) -> torch.Tensor: + """ + See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 + + Args: + w (`torch.Tensor`): + Generate embedding vectors with a specified guidance scale to subsequently enrich timestep embeddings. + embedding_dim (`int`, *optional*, defaults to 512): + Dimension of the embeddings to generate. + dtype (`torch.dtype`, *optional*, defaults to `torch.float32`): + Data type of the generated embeddings. + + Returns: + `torch.Tensor`: Embedding vectors with shape `(len(w), embedding_dim)`. + """ + assert len(w.shape) == 1 + w = w * 1000.0 + + half_dim = embedding_dim // 2 + emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) + emb = w.to(dtype)[:, None] * emb[None, :] + emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) + if embedding_dim % 2 == 1: # zero pad + emb = torch.nn.functional.pad(emb, (0, 1)) + assert emb.shape == (w.shape[0], embedding_dim) + return emb + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + # Currently StableDiffusionPipeline.check_inputs with negative prompt stuff removed + def check_inputs( + self, + prompt: Union[str, List[str]], + height: int, + width: int, + callback_steps: int, + prompt_embeds: Optional[torch.Tensor] = None, + ip_adapter_image=None, + ip_adapter_image_embeds=None, + callback_on_step_end_tensor_inputs=None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if ip_adapter_image is not None and ip_adapter_image_embeds is not None: + raise ValueError( + "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined." + ) + + if ip_adapter_image_embeds is not None: + if not isinstance(ip_adapter_image_embeds, list): + raise ValueError( + f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}" + ) + elif ip_adapter_image_embeds[0].ndim not in [3, 4]: + raise ValueError( + f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D" + ) + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def cross_attention_kwargs(self): + return self._cross_attention_kwargs + + @property + def clip_skip(self): + return self._clip_skip + + @property + def do_classifier_free_guidance(self): + return False + + @property + def num_timesteps(self): + return self._num_timesteps + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 4, + original_inference_steps: int = None, + timesteps: List[int] = None, + guidance_scale: float = 8.5, + num_images_per_prompt: Optional[int] = 1, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + clip_skip: Optional[int] = None, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + **kwargs, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + original_inference_steps (`int`, *optional*): + The original number of inference steps use to generate a linearly-spaced timestep schedule, from which + we will draw `num_inference_steps` evenly spaced timesteps from as our final timestep schedule, + following the Skipping-Step method in the paper (see Section 4.3). If not set this will default to the + scheduler's `original_inference_steps` attribute. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process. If not defined, equal spaced `num_inference_steps` + timesteps on the original LCM training/distillation timestep schedule are used. Must be in descending + order. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + Note that the original latent consistency models paper uses a different CFG formulation where the + guidance scales are decreased by 1 (so in the paper formulation CFG is enabled when `guidance_scale > + 0`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + ip_adapter_image: (`PipelineImageInput`, *optional*): + Optional image input to work with IP Adapters. + ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*): + Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of + IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should + contain the negative image embedding if `do_classifier_free_guidance` is set to `True`. If not + provided, embeddings are computed from the `ip_adapter_image` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + + Examples: + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images and the + second element is a list of `bool`s indicating whether the corresponding generated image contains + "not-safe-for-work" (nsfw) content. + """ + + callback = kwargs.pop("callback", None) + callback_steps = kwargs.pop("callback_steps", None) + + if callback is not None: + deprecate( + "callback", + "1.0.0", + "Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", + ) + if callback_steps is not None: + deprecate( + "callback_steps", + "1.0.0", + "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", + ) + + # 0. Default height and width to unet + height = height or self.unet.config.sample_size * self.vae_scale_factor + width = width or self.unet.config.sample_size * self.vae_scale_factor + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + height, + width, + callback_steps, + prompt_embeds, + ip_adapter_image, + ip_adapter_image_embeds, + callback_on_step_end_tensor_inputs, + ) + self._guidance_scale = guidance_scale + self._clip_skip = clip_skip + self._cross_attention_kwargs = cross_attention_kwargs + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + image_embeds = self.prepare_ip_adapter_image_embeds( + ip_adapter_image, + ip_adapter_image_embeds, + device, + batch_size * num_images_per_prompt, + self.do_classifier_free_guidance, + ) + + # 3. Encode input prompt + lora_scale = ( + self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None + ) + + # NOTE: when a LCM is distilled from an LDM via latent consistency distillation (Algorithm 1) with guided + # distillation, the forward pass of the LCM learns to approximate sampling from the LDM using CFG with the + # unconditional prompt "" (the empty string). Due to this, LCMs currently do not support negative prompts. + prompt_embeds, _ = self.encode_prompt( + prompt, + device, + num_images_per_prompt, + self.do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=None, + lora_scale=lora_scale, + clip_skip=self.clip_skip, + ) + + # 4. Prepare timesteps + timesteps, num_inference_steps = retrieve_timesteps( + self.scheduler, num_inference_steps, device, timesteps, original_inference_steps=original_inference_steps + ) + + # 5. Prepare latent variable + num_channels_latents = self.unet.config.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + bs = batch_size * num_images_per_prompt + + # 6. Get Guidance Scale Embedding + # NOTE: We use the Imagen CFG formulation that StableDiffusionPipeline uses rather than the original LCM paper + # CFG formulation, so we need to subtract 1 from the input guidance_scale. + # LCM CFG formulation: cfg_noise = noise_cond + cfg_scale * (noise_cond - noise_uncond), (cfg_scale > 0.0 using CFG) + w = torch.tensor(self.guidance_scale - 1).repeat(bs) + w_embedding = self.get_guidance_scale_embedding(w, embedding_dim=self.unet.config.time_cond_proj_dim).to( + device=device, dtype=latents.dtype + ) + + # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, None) + + # 7.1 Add image embeds for IP-Adapter + added_cond_kwargs = ( + {"image_embeds": image_embeds} + if ip_adapter_image is not None or ip_adapter_image_embeds is not None + else None + ) + + # 8. LCM MultiStep Sampling Loop: + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + self._num_timesteps = len(timesteps) + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + latents = latents.to(prompt_embeds.dtype) + + # model prediction (v-prediction, eps, x) + model_pred = self.unet( + latents, + t, + timestep_cond=w_embedding, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs=self.cross_attention_kwargs, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + # compute the previous noisy sample x_t -> x_t-1 + latents, denoised = self.scheduler.step(model_pred, t, latents, **extra_step_kwargs, return_dict=False) + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + w_embedding = callback_outputs.pop("w_embedding", w_embedding) + denoised = callback_outputs.pop("denoised", denoised) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + denoised = denoised.to(prompt_embeds.dtype) + if not output_type == "latent": + image = self.vae.decode(denoised / self.vae.config.scaling_factor, return_dict=False)[0] + image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) + else: + image = denoised + has_nsfw_concept = None + + if has_nsfw_concept is None: + do_denormalize = [True] * image.shape[0] + else: + do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] + + image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image, has_nsfw_concept) + + return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) diff --git a/diffusers3/pipelines/latent_diffusion/__init__.py b/diffusers3/pipelines/latent_diffusion/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..561f96fc71dc7b4404e09571e0b7eaa4ee02fde8 --- /dev/null +++ b/diffusers3/pipelines/latent_diffusion/__init__.py @@ -0,0 +1,50 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["pipeline_latent_diffusion"] = ["LDMBertModel", "LDMTextToImagePipeline"] + _import_structure["pipeline_latent_diffusion_superresolution"] = ["LDMSuperResolutionPipeline"] + + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * + else: + from .pipeline_latent_diffusion import LDMBertModel, LDMTextToImagePipeline + from .pipeline_latent_diffusion_superresolution import LDMSuperResolutionPipeline + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/diffusers3/pipelines/latent_diffusion/pipeline_latent_diffusion.py b/diffusers3/pipelines/latent_diffusion/pipeline_latent_diffusion.py new file mode 100644 index 0000000000000000000000000000000000000000..f6f3531a883554411d06e806367387b1c4e59a0a --- /dev/null +++ b/diffusers3/pipelines/latent_diffusion/pipeline_latent_diffusion.py @@ -0,0 +1,746 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import List, Optional, Tuple, Union + +import torch +import torch.nn as nn +import torch.utils.checkpoint +from transformers import PretrainedConfig, PreTrainedModel, PreTrainedTokenizer +from transformers.activations import ACT2FN +from transformers.modeling_outputs import BaseModelOutput +from transformers.utils import logging + +from ...models import AutoencoderKL, UNet2DConditionModel, UNet2DModel, VQModel +from ...schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput + + +class LDMTextToImagePipeline(DiffusionPipeline): + r""" + Pipeline for text-to-image generation using latent diffusion. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + Parameters: + vqvae ([`VQModel`]): + Vector-quantized (VQ) model to encode and decode images to and from latent representations. + bert ([`LDMBertModel`]): + Text-encoder model based on [`~transformers.BERT`]. + tokenizer ([`~transformers.BertTokenizer`]): + A `BertTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + """ + + model_cpu_offload_seq = "bert->unet->vqvae" + + def __init__( + self, + vqvae: Union[VQModel, AutoencoderKL], + bert: PreTrainedModel, + tokenizer: PreTrainedTokenizer, + unet: Union[UNet2DModel, UNet2DConditionModel], + scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], + ): + super().__init__() + self.register_modules(vqvae=vqvae, bert=bert, tokenizer=tokenizer, unet=unet, scheduler=scheduler) + self.vae_scale_factor = 2 ** (len(self.vqvae.config.block_out_channels) - 1) + + @torch.no_grad() + def __call__( + self, + prompt: Union[str, List[str]], + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: Optional[int] = 50, + guidance_scale: Optional[float] = 1.0, + eta: Optional[float] = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + **kwargs, + ) -> Union[Tuple, ImagePipelineOutput]: + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`): + The prompt or prompts to guide the image generation. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 1.0): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + generator (`torch.Generator`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`ImagePipelineOutput`] instead of a plain tuple. + + Example: + + ```py + >>> from diffusers import DiffusionPipeline + + >>> # load model and scheduler + >>> ldm = DiffusionPipeline.from_pretrained("CompVis/ldm-text2im-large-256") + + >>> # run pipeline in inference (sample random noise and denoise) + >>> prompt = "A painting of a squirrel eating a burger" + >>> images = ldm([prompt], num_inference_steps=50, eta=0.3, guidance_scale=6).images + + >>> # save images + >>> for idx, image in enumerate(images): + ... image.save(f"squirrel-{idx}.png") + ``` + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is + returned where the first element is a list with the generated images. + """ + # 0. Default height and width to unet + height = height or self.unet.config.sample_size * self.vae_scale_factor + width = width or self.unet.config.sample_size * self.vae_scale_factor + + if isinstance(prompt, str): + batch_size = 1 + elif isinstance(prompt, list): + batch_size = len(prompt) + else: + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + # get unconditional embeddings for classifier free guidance + if guidance_scale != 1.0: + uncond_input = self.tokenizer( + [""] * batch_size, padding="max_length", max_length=77, truncation=True, return_tensors="pt" + ) + negative_prompt_embeds = self.bert(uncond_input.input_ids.to(self._execution_device))[0] + + # get prompt text embeddings + text_input = self.tokenizer(prompt, padding="max_length", max_length=77, truncation=True, return_tensors="pt") + prompt_embeds = self.bert(text_input.input_ids.to(self._execution_device))[0] + + # get the initial random noise unless the user supplied it + latents_shape = (batch_size, self.unet.config.in_channels, height // 8, width // 8) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor( + latents_shape, generator=generator, device=self._execution_device, dtype=prompt_embeds.dtype + ) + else: + if latents.shape != latents_shape: + raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}") + latents = latents.to(self._execution_device) + + self.scheduler.set_timesteps(num_inference_steps) + + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + + extra_kwargs = {} + if accepts_eta: + extra_kwargs["eta"] = eta + + for t in self.progress_bar(self.scheduler.timesteps): + if guidance_scale == 1.0: + # guidance_scale of 1 means no guidance + latents_input = latents + context = prompt_embeds + else: + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + latents_input = torch.cat([latents] * 2) + context = torch.cat([negative_prompt_embeds, prompt_embeds]) + + # predict the noise residual + noise_pred = self.unet(latents_input, t, encoder_hidden_states=context).sample + # perform guidance + if guidance_scale != 1.0: + noise_pred_uncond, noise_prediction_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_prediction_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_kwargs).prev_sample + + # scale and decode the image latents with vae + latents = 1 / self.vqvae.config.scaling_factor * latents + image = self.vqvae.decode(latents).sample + + image = (image / 2 + 0.5).clamp(0, 1) + image = image.cpu().permute(0, 2, 3, 1).numpy() + if output_type == "pil": + image = self.numpy_to_pil(image) + + if not return_dict: + return (image,) + + return ImagePipelineOutput(images=image) + + +################################################################################ +# Code for the text transformer model +################################################################################ +""" PyTorch LDMBERT model.""" + + +logger = logging.get_logger(__name__) + +LDMBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [ + "ldm-bert", + # See all LDMBert models at https://huggingface.co/models?filter=ldmbert +] + + +LDMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = { + "ldm-bert": "https://huggingface.co/valhalla/ldm-bert/blob/main/config.json", +} + + +""" LDMBERT model configuration""" + + +class LDMBertConfig(PretrainedConfig): + model_type = "ldmbert" + keys_to_ignore_at_inference = ["past_key_values"] + attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} + + def __init__( + self, + vocab_size=30522, + max_position_embeddings=77, + encoder_layers=32, + encoder_ffn_dim=5120, + encoder_attention_heads=8, + head_dim=64, + encoder_layerdrop=0.0, + activation_function="gelu", + d_model=1280, + dropout=0.1, + attention_dropout=0.0, + activation_dropout=0.0, + init_std=0.02, + classifier_dropout=0.0, + scale_embedding=False, + use_cache=True, + pad_token_id=0, + **kwargs, + ): + self.vocab_size = vocab_size + self.max_position_embeddings = max_position_embeddings + self.d_model = d_model + self.encoder_ffn_dim = encoder_ffn_dim + self.encoder_layers = encoder_layers + self.encoder_attention_heads = encoder_attention_heads + self.head_dim = head_dim + self.dropout = dropout + self.attention_dropout = attention_dropout + self.activation_dropout = activation_dropout + self.activation_function = activation_function + self.init_std = init_std + self.encoder_layerdrop = encoder_layerdrop + self.classifier_dropout = classifier_dropout + self.use_cache = use_cache + self.num_hidden_layers = encoder_layers + self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True + + super().__init__(pad_token_id=pad_token_id, **kwargs) + + +def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): + """ + Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. + """ + bsz, src_len = mask.size() + tgt_len = tgt_len if tgt_len is not None else src_len + + expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) + + inverted_mask = 1.0 - expanded_mask + + return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) + + +# Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->LDMBert +class LDMBertAttention(nn.Module): + """Multi-headed attention from 'Attention Is All You Need' paper""" + + def __init__( + self, + embed_dim: int, + num_heads: int, + head_dim: int, + dropout: float = 0.0, + is_decoder: bool = False, + bias: bool = False, + ): + super().__init__() + self.embed_dim = embed_dim + self.num_heads = num_heads + self.dropout = dropout + self.head_dim = head_dim + self.inner_dim = head_dim * num_heads + + self.scaling = self.head_dim**-0.5 + self.is_decoder = is_decoder + + self.k_proj = nn.Linear(embed_dim, self.inner_dim, bias=bias) + self.v_proj = nn.Linear(embed_dim, self.inner_dim, bias=bias) + self.q_proj = nn.Linear(embed_dim, self.inner_dim, bias=bias) + self.out_proj = nn.Linear(self.inner_dim, embed_dim) + + def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): + return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() + + def forward( + self, + hidden_states: torch.Tensor, + key_value_states: Optional[torch.Tensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + attention_mask: Optional[torch.Tensor] = None, + layer_head_mask: Optional[torch.Tensor] = None, + output_attentions: bool = False, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + """Input shape: Batch x Time x Channel""" + + # if key_value_states are provided this layer is used as a cross-attention layer + # for the decoder + is_cross_attention = key_value_states is not None + + bsz, tgt_len, _ = hidden_states.size() + + # get query proj + query_states = self.q_proj(hidden_states) * self.scaling + # get key, value proj + if is_cross_attention and past_key_value is not None: + # reuse k,v, cross_attentions + key_states = past_key_value[0] + value_states = past_key_value[1] + elif is_cross_attention: + # cross_attentions + key_states = self._shape(self.k_proj(key_value_states), -1, bsz) + value_states = self._shape(self.v_proj(key_value_states), -1, bsz) + elif past_key_value is not None: + # reuse k, v, self_attention + key_states = self._shape(self.k_proj(hidden_states), -1, bsz) + value_states = self._shape(self.v_proj(hidden_states), -1, bsz) + key_states = torch.cat([past_key_value[0], key_states], dim=2) + value_states = torch.cat([past_key_value[1], value_states], dim=2) + else: + # self_attention + key_states = self._shape(self.k_proj(hidden_states), -1, bsz) + value_states = self._shape(self.v_proj(hidden_states), -1, bsz) + + if self.is_decoder: + # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. + # Further calls to cross_attention layer can then reuse all cross-attention + # key/value_states (first "if" case) + # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of + # all previous decoder key/value_states. Further calls to uni-directional self-attention + # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) + # if encoder bi-directional self-attention `past_key_value` is always `None` + past_key_value = (key_states, value_states) + + proj_shape = (bsz * self.num_heads, -1, self.head_dim) + query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) + key_states = key_states.view(*proj_shape) + value_states = value_states.view(*proj_shape) + + src_len = key_states.size(1) + attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) + + if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): + raise ValueError( + f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" + f" {attn_weights.size()}" + ) + + if attention_mask is not None: + if attention_mask.size() != (bsz, 1, tgt_len, src_len): + raise ValueError( + f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" + ) + attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask + attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) + + attn_weights = nn.functional.softmax(attn_weights, dim=-1) + + if layer_head_mask is not None: + if layer_head_mask.size() != (self.num_heads,): + raise ValueError( + f"Head mask for a single layer should be of size {(self.num_heads,)}, but is" + f" {layer_head_mask.size()}" + ) + attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) + + if output_attentions: + # this operation is a bit awkward, but it's required to + # make sure that attn_weights keeps its gradient. + # In order to do so, attn_weights have to be reshaped + # twice and have to be reused in the following + attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) + else: + attn_weights_reshaped = None + + attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) + + attn_output = torch.bmm(attn_probs, value_states) + + if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): + raise ValueError( + f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" + f" {attn_output.size()}" + ) + + attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) + attn_output = attn_output.transpose(1, 2) + + # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be + # partitioned across GPUs when using tensor-parallelism. + attn_output = attn_output.reshape(bsz, tgt_len, self.inner_dim) + + attn_output = self.out_proj(attn_output) + + return attn_output, attn_weights_reshaped, past_key_value + + +class LDMBertEncoderLayer(nn.Module): + def __init__(self, config: LDMBertConfig): + super().__init__() + self.embed_dim = config.d_model + self.self_attn = LDMBertAttention( + embed_dim=self.embed_dim, + num_heads=config.encoder_attention_heads, + head_dim=config.head_dim, + dropout=config.attention_dropout, + ) + self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) + self.dropout = config.dropout + self.activation_fn = ACT2FN[config.activation_function] + self.activation_dropout = config.activation_dropout + self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim) + self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim) + self.final_layer_norm = nn.LayerNorm(self.embed_dim) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: torch.Tensor, + layer_head_mask: torch.Tensor, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: + """ + Args: + hidden_states (`torch.Tensor`): input to the layer of shape `(seq_len, batch, embed_dim)` + attention_mask (`torch.Tensor`): attention mask of size + `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. + layer_head_mask (`torch.Tensor`): mask for attention heads in a given layer of size + `(encoder_attention_heads,)`. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + """ + residual = hidden_states + hidden_states = self.self_attn_layer_norm(hidden_states) + hidden_states, attn_weights, _ = self.self_attn( + hidden_states=hidden_states, + attention_mask=attention_mask, + layer_head_mask=layer_head_mask, + output_attentions=output_attentions, + ) + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + hidden_states = residual + hidden_states + + residual = hidden_states + hidden_states = self.final_layer_norm(hidden_states) + hidden_states = self.activation_fn(self.fc1(hidden_states)) + hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) + hidden_states = self.fc2(hidden_states) + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + hidden_states = residual + hidden_states + + if hidden_states.dtype == torch.float16 and ( + torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any() + ): + clamp_value = torch.finfo(hidden_states.dtype).max - 1000 + hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) + + outputs = (hidden_states,) + + if output_attentions: + outputs += (attn_weights,) + + return outputs + + +# Copied from transformers.models.bart.modeling_bart.BartPretrainedModel with Bart->LDMBert +class LDMBertPreTrainedModel(PreTrainedModel): + config_class = LDMBertConfig + base_model_prefix = "model" + _supports_gradient_checkpointing = True + _keys_to_ignore_on_load_unexpected = [r"encoder\.version", r"decoder\.version"] + + def _init_weights(self, module): + std = self.config.init_std + if isinstance(module, nn.Linear): + module.weight.data.normal_(mean=0.0, std=std) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=std) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + + def _set_gradient_checkpointing(self, module, value=False): + if isinstance(module, (LDMBertEncoder,)): + module.gradient_checkpointing = value + + @property + def dummy_inputs(self): + pad_token = self.config.pad_token_id + input_ids = torch.tensor([[0, 6, 10, 4, 2], [0, 8, 12, 2, pad_token]], device=self.device) + dummy_inputs = { + "attention_mask": input_ids.ne(pad_token), + "input_ids": input_ids, + } + return dummy_inputs + + +class LDMBertEncoder(LDMBertPreTrainedModel): + """ + Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a + [`LDMBertEncoderLayer`]. + + Args: + config: LDMBertConfig + embed_tokens (nn.Embedding): output embedding + """ + + def __init__(self, config: LDMBertConfig): + super().__init__(config) + + self.dropout = config.dropout + + embed_dim = config.d_model + self.padding_idx = config.pad_token_id + self.max_source_positions = config.max_position_embeddings + + self.embed_tokens = nn.Embedding(config.vocab_size, embed_dim) + self.embed_positions = nn.Embedding(config.max_position_embeddings, embed_dim) + self.layers = nn.ModuleList([LDMBertEncoderLayer(config) for _ in range(config.encoder_layers)]) + self.layer_norm = nn.LayerNorm(embed_dim) + + self.gradient_checkpointing = False + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.embed_tokens + + def set_input_embeddings(self, value): + self.embed_tokens = value + + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutput]: + r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you + provide it. + + Indices can be obtained using [`BartTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + inputs_embeds (`torch.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. + This is useful if you want more control over how to convert `input_ids` indices into associated vectors + than the model's internal embedding lookup matrix. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.BaseModelOutput`] instead of a plain tuple. + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # retrieve input_ids and inputs_embeds + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + input_shape = input_ids.size() + input_ids = input_ids.view(-1, input_shape[-1]) + elif inputs_embeds is not None: + input_shape = inputs_embeds.size()[:-1] + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") + + if inputs_embeds is None: + inputs_embeds = self.embed_tokens(input_ids) + + seq_len = input_shape[1] + if position_ids is None: + position_ids = torch.arange(seq_len, dtype=torch.long, device=inputs_embeds.device).expand((1, -1)) + embed_pos = self.embed_positions(position_ids) + + hidden_states = inputs_embeds + embed_pos + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + + # expand attention_mask + if attention_mask is not None: + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + attention_mask = _expand_mask(attention_mask, inputs_embeds.dtype) + + encoder_states = () if output_hidden_states else None + all_attentions = () if output_attentions else None + + # check if head_mask has a correct number of layers specified if desired + if head_mask is not None: + if head_mask.size()[0] != (len(self.layers)): + raise ValueError( + f"The head_mask should be specified for {len(self.layers)} layers, but it is for" + f" {head_mask.size()[0]}." + ) + + for idx, encoder_layer in enumerate(self.layers): + if output_hidden_states: + encoder_states = encoder_states + (hidden_states,) + if self.gradient_checkpointing and self.training: + + def create_custom_forward(module): + def custom_forward(*inputs): + return module(*inputs, output_attentions) + + return custom_forward + + layer_outputs = torch.utils.checkpoint.checkpoint( + create_custom_forward(encoder_layer), + hidden_states, + attention_mask, + (head_mask[idx] if head_mask is not None else None), + ) + else: + layer_outputs = encoder_layer( + hidden_states, + attention_mask, + layer_head_mask=(head_mask[idx] if head_mask is not None else None), + output_attentions=output_attentions, + ) + + hidden_states = layer_outputs[0] + + if output_attentions: + all_attentions = all_attentions + (layer_outputs[1],) + + hidden_states = self.layer_norm(hidden_states) + + if output_hidden_states: + encoder_states = encoder_states + (hidden_states,) + + if not return_dict: + return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) + return BaseModelOutput( + last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions + ) + + +class LDMBertModel(LDMBertPreTrainedModel): + _no_split_modules = [] + + def __init__(self, config: LDMBertConfig): + super().__init__(config) + self.model = LDMBertEncoder(config) + self.to_logits = nn.Linear(config.hidden_size, config.vocab_size) + + def forward( + self, + input_ids=None, + attention_mask=None, + position_ids=None, + head_mask=None, + inputs_embeds=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + ): + outputs = self.model( + input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + return outputs diff --git a/diffusers3/pipelines/latent_diffusion/pipeline_latent_diffusion_superresolution.py b/diffusers3/pipelines/latent_diffusion/pipeline_latent_diffusion_superresolution.py new file mode 100644 index 0000000000000000000000000000000000000000..bb72b4d4eb8e387d596b22cca65c82aef0ab9e75 --- /dev/null +++ b/diffusers3/pipelines/latent_diffusion/pipeline_latent_diffusion_superresolution.py @@ -0,0 +1,189 @@ +import inspect +from typing import List, Optional, Tuple, Union + +import numpy as np +import PIL.Image +import torch +import torch.utils.checkpoint + +from ...models import UNet2DModel, VQModel +from ...schedulers import ( + DDIMScheduler, + DPMSolverMultistepScheduler, + EulerAncestralDiscreteScheduler, + EulerDiscreteScheduler, + LMSDiscreteScheduler, + PNDMScheduler, +) +from ...utils import PIL_INTERPOLATION +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput + + +def preprocess(image): + w, h = image.size + w, h = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 + image = image.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]) + image = np.array(image).astype(np.float32) / 255.0 + image = image[None].transpose(0, 3, 1, 2) + image = torch.from_numpy(image) + return 2.0 * image - 1.0 + + +class LDMSuperResolutionPipeline(DiffusionPipeline): + r""" + A pipeline for image super-resolution using latent diffusion. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + Parameters: + vqvae ([`VQModel`]): + Vector-quantized (VQ) model to encode and decode images to and from latent representations. + unet ([`UNet2DModel`]): + A `UNet2DModel` to denoise the encoded image. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latens. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], [`EulerDiscreteScheduler`], + [`EulerAncestralDiscreteScheduler`], [`DPMSolverMultistepScheduler`], or [`PNDMScheduler`]. + """ + + def __init__( + self, + vqvae: VQModel, + unet: UNet2DModel, + scheduler: Union[ + DDIMScheduler, + PNDMScheduler, + LMSDiscreteScheduler, + EulerDiscreteScheduler, + EulerAncestralDiscreteScheduler, + DPMSolverMultistepScheduler, + ], + ): + super().__init__() + self.register_modules(vqvae=vqvae, unet=unet, scheduler=scheduler) + + @torch.no_grad() + def __call__( + self, + image: Union[torch.Tensor, PIL.Image.Image] = None, + batch_size: Optional[int] = 1, + num_inference_steps: Optional[int] = 100, + eta: Optional[float] = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + ) -> Union[Tuple, ImagePipelineOutput]: + r""" + The call function to the pipeline for generation. + + Args: + image (`torch.Tensor` or `PIL.Image.Image`): + `Image` or tensor representing an image batch to be used as the starting point for the process. + batch_size (`int`, *optional*, defaults to 1): + Number of images to generate. + num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`ImagePipelineOutput`] instead of a plain tuple. + + Example: + + ```py + >>> import requests + >>> from PIL import Image + >>> from io import BytesIO + >>> from diffusers import LDMSuperResolutionPipeline + >>> import torch + + >>> # load model and scheduler + >>> pipeline = LDMSuperResolutionPipeline.from_pretrained("CompVis/ldm-super-resolution-4x-openimages") + >>> pipeline = pipeline.to("cuda") + + >>> # let's download an image + >>> url = ( + ... "https://user-images.githubusercontent.com/38061659/199705896-b48e17b8-b231-47cd-a270-4ffa5a93fa3e.png" + ... ) + >>> response = requests.get(url) + >>> low_res_img = Image.open(BytesIO(response.content)).convert("RGB") + >>> low_res_img = low_res_img.resize((128, 128)) + + >>> # run pipeline in inference (sample random noise and denoise) + >>> upscaled_image = pipeline(low_res_img, num_inference_steps=100, eta=1).images[0] + >>> # save image + >>> upscaled_image.save("ldm_generated_image.png") + ``` + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is + returned where the first element is a list with the generated images + """ + if isinstance(image, PIL.Image.Image): + batch_size = 1 + elif isinstance(image, torch.Tensor): + batch_size = image.shape[0] + else: + raise ValueError(f"`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(image)}") + + if isinstance(image, PIL.Image.Image): + image = preprocess(image) + + height, width = image.shape[-2:] + + # in_channels should be 6: 3 for latents, 3 for low resolution image + latents_shape = (batch_size, self.unet.config.in_channels // 2, height, width) + latents_dtype = next(self.unet.parameters()).dtype + + latents = randn_tensor(latents_shape, generator=generator, device=self.device, dtype=latents_dtype) + + image = image.to(device=self.device, dtype=latents_dtype) + + # set timesteps and move to the correct device + self.scheduler.set_timesteps(num_inference_steps, device=self.device) + timesteps_tensor = self.scheduler.timesteps + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature. + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_kwargs = {} + if accepts_eta: + extra_kwargs["eta"] = eta + + for t in self.progress_bar(timesteps_tensor): + # concat latents and low resolution image in the channel dimension. + latents_input = torch.cat([latents, image], dim=1) + latents_input = self.scheduler.scale_model_input(latents_input, t) + # predict the noise residual + noise_pred = self.unet(latents_input, t).sample + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_kwargs).prev_sample + + # decode the image latents with the VQVAE + image = self.vqvae.decode(latents).sample + image = torch.clamp(image, -1.0, 1.0) + image = image / 2 + 0.5 + image = image.cpu().permute(0, 2, 3, 1).numpy() + + if output_type == "pil": + image = self.numpy_to_pil(image) + + if not return_dict: + return (image,) + + return ImagePipelineOutput(images=image) diff --git a/diffusers3/pipelines/latte/__init__.py b/diffusers3/pipelines/latte/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4296b42e125303c0e036b9f2deecc36bdf959de3 --- /dev/null +++ b/diffusers3/pipelines/latte/__init__.py @@ -0,0 +1,48 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["pipeline_latte"] = ["LattePipeline"] + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * + else: + from .pipeline_latte import LattePipeline + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/diffusers3/pipelines/latte/pipeline_latte.py b/diffusers3/pipelines/latte/pipeline_latte.py new file mode 100644 index 0000000000000000000000000000000000000000..d4bedf2e0e2a05ab057a08d27ace1cdeccce4610 --- /dev/null +++ b/diffusers3/pipelines/latte/pipeline_latte.py @@ -0,0 +1,881 @@ +# Copyright 2024 the Latte Team and The HuggingFace Team. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import html +import inspect +import re +import urllib.parse as ul +from dataclasses import dataclass +from typing import Callable, Dict, List, Optional, Tuple, Union + +import torch +from transformers import T5EncoderModel, T5Tokenizer + +from ...callbacks import MultiPipelineCallbacks, PipelineCallback +from ...models import AutoencoderKL, LatteTransformer3DModel +from ...pipelines.pipeline_utils import DiffusionPipeline +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import ( + BACKENDS_MAPPING, + BaseOutput, + is_bs4_available, + is_ftfy_available, + logging, + replace_example_docstring, +) +from ...utils.torch_utils import is_compiled_module, randn_tensor +from ...video_processor import VideoProcessor + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +if is_bs4_available(): + from bs4 import BeautifulSoup + +if is_ftfy_available(): + import ftfy + + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import LattePipeline + >>> from diffusers.utils import export_to_gif + + >>> # You can replace the checkpoint id with "maxin-cn/Latte-1" too. + >>> pipe = LattePipeline.from_pretrained("maxin-cn/Latte-1", torch_dtype=torch.float16) + >>> # Enable memory optimizations. + >>> pipe.enable_model_cpu_offload() + + >>> prompt = "A small cactus with a happy face in the Sahara desert." + >>> videos = pipe(prompt).frames[0] + >>> export_to_gif(videos, "latte.gif") + ``` +""" + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + sigmas: Optional[List[float]] = None, + **kwargs, +): + """ + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles + custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` + must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, + `num_inference_steps` and `sigmas` must be `None`. + sigmas (`List[float]`, *optional*): + Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, + `num_inference_steps` and `timesteps` must be `None`. + + Returns: + `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the + second element is the number of inference steps. + """ + if timesteps is not None and sigmas is not None: + raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") + if timesteps is not None: + accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + elif sigmas is not None: + accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accept_sigmas: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" sigmas schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + + +@dataclass +class LattePipelineOutput(BaseOutput): + frames: torch.Tensor + + +class LattePipeline(DiffusionPipeline): + r""" + Pipeline for text-to-video generation using Latte. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode videos to and from latent representations. + text_encoder ([`T5EncoderModel`]): + Frozen text-encoder. Latte uses + [T5](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5EncoderModel), specifically the + [t5-v1_1-xxl](https://huggingface.co/PixArt-alpha/PixArt-alpha/tree/main/t5-v1_1-xxl) variant. + tokenizer (`T5Tokenizer`): + Tokenizer of class + [T5Tokenizer](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5Tokenizer). + transformer ([`LatteTransformer3DModel`]): + A text conditioned `LatteTransformer3DModel` to denoise the encoded video latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `transformer` to denoise the encoded video latents. + """ + + bad_punct_regex = re.compile(r"[#ยฎโ€ขยฉโ„ข&@ยทยบยฝยพยฟยกยง~\)\(\]\[\}\{\|\\/\\*]{1,}") + + _optional_components = ["tokenizer", "text_encoder"] + model_cpu_offload_seq = "text_encoder->transformer->vae" + + _callback_tensor_inputs = [ + "latents", + "prompt_embeds", + "negative_prompt_embeds", + ] + + def __init__( + self, + tokenizer: T5Tokenizer, + text_encoder: T5EncoderModel, + vae: AutoencoderKL, + transformer: LatteTransformer3DModel, + scheduler: KarrasDiffusionSchedulers, + ): + super().__init__() + + self.register_modules( + tokenizer=tokenizer, text_encoder=text_encoder, vae=vae, transformer=transformer, scheduler=scheduler + ) + + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.video_processor = VideoProcessor(vae_scale_factor=self.vae_scale_factor) + + # Adapted from https://github.com/PixArt-alpha/PixArt-alpha/blob/master/diffusion/model/utils.py + def mask_text_embeddings(self, emb, mask): + if emb.shape[0] == 1: + keep_index = mask.sum().item() + return emb[:, :, :keep_index, :], keep_index # 1, 120, 4096 -> 1 7 4096 + else: + masked_feature = emb * mask[:, None, :, None] # 1 120 4096 + return masked_feature, emb.shape[2] + + # Adapted from diffusers.pipelines.deepfloyd_if.pipeline_if.encode_prompt + def encode_prompt( + self, + prompt: Union[str, List[str]], + do_classifier_free_guidance: bool = True, + negative_prompt: str = "", + num_images_per_prompt: int = 1, + device: Optional[torch.device] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + clean_caption: bool = False, + mask_feature: bool = True, + dtype=None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + negative_prompt (`str` or `List[str]`, *optional*): + The prompt not to guide the video generation. If not defined, one has to pass `negative_prompt_embeds` + instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). For + Latte, this should be "". + do_classifier_free_guidance (`bool`, *optional*, defaults to `True`): + whether to use classifier free guidance or not + num_images_per_prompt (`int`, *optional*, defaults to 1): + number of video that should be generated per prompt + device: (`torch.device`, *optional*): + torch device to place the resulting embeddings on + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. For Latte, it's should be the embeddings of the "" string. + clean_caption (bool, defaults to `False`): + If `True`, the function will preprocess and clean the provided caption before encoding. + mask_feature: (bool, defaults to `True`): + If `True`, the function will mask the text embeddings. + """ + embeds_initially_provided = prompt_embeds is not None and negative_prompt_embeds is not None + + if device is None: + device = self._execution_device + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + max_length = 120 + if prompt_embeds is None: + prompt = self._text_preprocessing(prompt, clean_caption=clean_caption) + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=max_length, + truncation=True, + return_attention_mask=True, + add_special_tokens=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode(untruncated_ids[:, max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {max_length} tokens: {removed_text}" + ) + + attention_mask = text_inputs.attention_mask.to(device) + prompt_embeds_attention_mask = attention_mask + + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds_attention_mask = torch.ones_like(prompt_embeds) + + if self.text_encoder is not None: + dtype = self.text_encoder.dtype + elif self.transformer is not None: + dtype = self.transformer.dtype + else: + dtype = None + + prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings and attention mask for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + prompt_embeds_attention_mask = prompt_embeds_attention_mask.view(bs_embed, -1) + prompt_embeds_attention_mask = prompt_embeds_attention_mask.repeat(num_images_per_prompt, 1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens = [negative_prompt] * batch_size if isinstance(negative_prompt, str) else negative_prompt + uncond_tokens = self._text_preprocessing(uncond_tokens, clean_caption=clean_caption) + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_attention_mask=True, + add_special_tokens=True, + return_tensors="pt", + ) + attention_mask = uncond_input.attention_mask.to(device) + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + else: + negative_prompt_embeds = None + + # Perform additional masking. + if mask_feature and not embeds_initially_provided: + prompt_embeds = prompt_embeds.unsqueeze(1) + masked_prompt_embeds, keep_indices = self.mask_text_embeddings(prompt_embeds, prompt_embeds_attention_mask) + masked_prompt_embeds = masked_prompt_embeds.squeeze(1) + masked_negative_prompt_embeds = ( + negative_prompt_embeds[:, :keep_indices, :] if negative_prompt_embeds is not None else None + ) + + return masked_prompt_embeds, masked_negative_prompt_embeds + + return prompt_embeds, negative_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + height, + width, + negative_prompt, + callback_on_step_end_tensor_inputs, + prompt_embeds=None, + negative_prompt_embeds=None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline._text_preprocessing + def _text_preprocessing(self, text, clean_caption=False): + if clean_caption and not is_bs4_available(): + logger.warning(BACKENDS_MAPPING["bs4"][-1].format("Setting `clean_caption=True`")) + logger.warning("Setting `clean_caption` to False...") + clean_caption = False + + if clean_caption and not is_ftfy_available(): + logger.warning(BACKENDS_MAPPING["ftfy"][-1].format("Setting `clean_caption=True`")) + logger.warning("Setting `clean_caption` to False...") + clean_caption = False + + if not isinstance(text, (tuple, list)): + text = [text] + + def process(text: str): + if clean_caption: + text = self._clean_caption(text) + text = self._clean_caption(text) + else: + text = text.lower().strip() + return text + + return [process(t) for t in text] + + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline._clean_caption + def _clean_caption(self, caption): + caption = str(caption) + caption = ul.unquote_plus(caption) + caption = caption.strip().lower() + caption = re.sub("", "person", caption) + # urls: + caption = re.sub( + r"\b((?:https?:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa + "", + caption, + ) # regex for urls + caption = re.sub( + r"\b((?:www:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa + "", + caption, + ) # regex for urls + # html: + caption = BeautifulSoup(caption, features="html.parser").text + + # @ + caption = re.sub(r"@[\w\d]+\b", "", caption) + + # 31C0โ€”31EF CJK Strokes + # 31F0โ€”31FF Katakana Phonetic Extensions + # 3200โ€”32FF Enclosed CJK Letters and Months + # 3300โ€”33FF CJK Compatibility + # 3400โ€”4DBF CJK Unified Ideographs Extension A + # 4DC0โ€”4DFF Yijing Hexagram Symbols + # 4E00โ€”9FFF CJK Unified Ideographs + caption = re.sub(r"[\u31c0-\u31ef]+", "", caption) + caption = re.sub(r"[\u31f0-\u31ff]+", "", caption) + caption = re.sub(r"[\u3200-\u32ff]+", "", caption) + caption = re.sub(r"[\u3300-\u33ff]+", "", caption) + caption = re.sub(r"[\u3400-\u4dbf]+", "", caption) + caption = re.sub(r"[\u4dc0-\u4dff]+", "", caption) + caption = re.sub(r"[\u4e00-\u9fff]+", "", caption) + ####################################################### + + # ะฒัะต ะฒะธะดั‹ ั‚ะธั€ะต / all types of dash --> "-" + caption = re.sub( + r"[\u002D\u058A\u05BE\u1400\u1806\u2010-\u2015\u2E17\u2E1A\u2E3A\u2E3B\u2E40\u301C\u3030\u30A0\uFE31\uFE32\uFE58\uFE63\uFF0D]+", # noqa + "-", + caption, + ) + + # ะบะฐะฒั‹ั‡ะบะธ ะบ ะพะดะฝะพะผัƒ ัั‚ะฐะฝะดะฐั€ั‚ัƒ + caption = re.sub(r"[`ยดยซยปโ€œโ€ยจ]", '"', caption) + caption = re.sub(r"[โ€˜โ€™]", "'", caption) + + # " + caption = re.sub(r""?", "", caption) + # & + caption = re.sub(r"&", "", caption) + + # ip adresses: + caption = re.sub(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", " ", caption) + + # article ids: + caption = re.sub(r"\d:\d\d\s+$", "", caption) + + # \n + caption = re.sub(r"\\n", " ", caption) + + # "#123" + caption = re.sub(r"#\d{1,3}\b", "", caption) + # "#12345.." + caption = re.sub(r"#\d{5,}\b", "", caption) + # "123456.." + caption = re.sub(r"\b\d{6,}\b", "", caption) + # filenames: + caption = re.sub(r"[\S]+\.(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)", "", caption) + + # + caption = re.sub(r"[\"\']{2,}", r'"', caption) # """AUSVERKAUFT""" + caption = re.sub(r"[\.]{2,}", r" ", caption) # """AUSVERKAUFT""" + + caption = re.sub(self.bad_punct_regex, r" ", caption) # ***AUSVERKAUFT***, #AUSVERKAUFT + caption = re.sub(r"\s+\.\s+", r" ", caption) # " . " + + # this-is-my-cute-cat / this_is_my_cute_cat + regex2 = re.compile(r"(?:\-|\_)") + if len(re.findall(regex2, caption)) > 3: + caption = re.sub(regex2, " ", caption) + + caption = ftfy.fix_text(caption) + caption = html.unescape(html.unescape(caption)) + + caption = re.sub(r"\b[a-zA-Z]{1,3}\d{3,15}\b", "", caption) # jc6640 + caption = re.sub(r"\b[a-zA-Z]+\d+[a-zA-Z]+\b", "", caption) # jc6640vc + caption = re.sub(r"\b\d+[a-zA-Z]+\d+\b", "", caption) # 6640vc231 + + caption = re.sub(r"(worldwide\s+)?(free\s+)?shipping", "", caption) + caption = re.sub(r"(free\s)?download(\sfree)?", "", caption) + caption = re.sub(r"\bclick\b\s(?:for|on)\s\w+", "", caption) + caption = re.sub(r"\b(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)(\simage[s]?)?", "", caption) + caption = re.sub(r"\bpage\s+\d+\b", "", caption) + + caption = re.sub(r"\b\d*[a-zA-Z]+\d+[a-zA-Z]+\d+[a-zA-Z\d]*\b", r" ", caption) # j2d1a2a... + + caption = re.sub(r"\b\d+\.?\d*[xั…ร—]\d+\.?\d*\b", "", caption) + + caption = re.sub(r"\b\s+\:\s+", r": ", caption) + caption = re.sub(r"(\D[,\./])\b", r"\1 ", caption) + caption = re.sub(r"\s+", " ", caption) + + caption.strip() + + caption = re.sub(r"^[\"\']([\w\W]+)[\"\']$", r"\1", caption) + caption = re.sub(r"^[\'\_,\-\:;]", r"", caption) + caption = re.sub(r"[\'\_,\-\:\-\+]$", r"", caption) + caption = re.sub(r"^\.\S+$", "", caption) + + return caption.strip() + + # Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_synth.TextToVideoSDPipeline.prepare_latents + def prepare_latents( + self, batch_size, num_channels_latents, num_frames, height, width, dtype, device, generator, latents=None + ): + shape = ( + batch_size, + num_channels_latents, + num_frames, + height // self.vae_scale_factor, + width // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + @property + def guidance_scale(self): + return self._guidance_scale + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 + + @property + def num_timesteps(self): + return self._num_timesteps + + @property + def interrupt(self): + return self._interrupt + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + negative_prompt: str = "", + num_inference_steps: int = 50, + timesteps: Optional[List[int]] = None, + guidance_scale: float = 7.5, + num_images_per_prompt: int = 1, + video_length: int = 16, + height: int = 512, + width: int = 512, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + output_type: str = "pil", + return_dict: bool = True, + callback_on_step_end: Optional[ + Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks] + ] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + clean_caption: bool = True, + mask_feature: bool = True, + enable_temporal_attentions: bool = True, + decode_chunk_size: Optional[int] = None, + ) -> Union[LattePipelineOutput, Tuple]: + """ + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the video generation. If not defined, one has to pass `prompt_embeds`. + instead. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the video generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality video at the + expense of slower inference. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process. If not defined, equal spaced `num_inference_steps` + timesteps are used. Must be in descending order. + guidance_scale (`float`, *optional*, defaults to 7.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate videos that are closely linked to the text `prompt`, + usually at the expense of lower video quality. + video_length (`int`, *optional*, defaults to 16): + The number of video frames that are generated. Defaults to 16 frames which at 8 frames per seconds + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of videos to generate per prompt. + height (`int`, *optional*, defaults to self.unet.config.sample_size): + The height in pixels of the generated video. + width (`int`, *optional*, defaults to self.unet.config.sample_size): + The width in pixels of the generated video. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to + [`schedulers.DDIMScheduler`], will be ignored for others. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for video + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. For Latte this negative prompt should be "". If not provided, + negative_prompt_embeds will be generated from `negative_prompt` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate video. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.IFPipelineOutput`] instead of a plain tuple. + callback_on_step_end (`Callable[[int, int, Dict], None]`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*): + A callback function or a list of callback functions to be called at the end of each denoising step. + callback_on_step_end_tensor_inputs (`List[str]`, *optional*): + A list of tensor inputs that should be passed to the callback function. If not defined, all tensor + inputs will be passed. + clean_caption (`bool`, *optional*, defaults to `True`): + Whether or not to clean the caption before creating embeddings. Requires `beautifulsoup4` and `ftfy` to + be installed. If the dependencies are not installed, the embeddings will be created from the raw + prompt. + mask_feature (`bool` defaults to `True`): If set to `True`, the text embeddings will be masked. + enable_temporal_attentions (`bool`, *optional*, defaults to `True`): Whether to enable temporal attentions + decode_chunk_size (`int`, *optional*): + The number of frames to decode at a time. Higher chunk size leads to better temporal consistency at the + expense of more memory usage. By default, the decoder decodes all frames at once for maximal quality. + For lower memory usage, reduce `decode_chunk_size`. + + Examples: + + Returns: + [`~pipelines.latte.pipeline_latte.LattePipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.latte.pipeline_latte.LattePipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images + """ + if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): + callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs + + # 0. Default + decode_chunk_size = decode_chunk_size if decode_chunk_size is not None else video_length + + # 1. Check inputs. Raise error if not correct + height = height or self.transformer.config.sample_size * self.vae_scale_factor + width = width or self.transformer.config.sample_size * self.vae_scale_factor + self.check_inputs( + prompt, + height, + width, + negative_prompt, + callback_on_step_end_tensor_inputs, + prompt_embeds, + negative_prompt_embeds, + ) + self._guidance_scale = guidance_scale + self._interrupt = False + + # 2. Default height and width to transformer + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Encode input prompt + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + do_classifier_free_guidance, + negative_prompt=negative_prompt, + num_images_per_prompt=num_images_per_prompt, + device=device, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + clean_caption=clean_caption, + mask_feature=mask_feature, + ) + if do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) + + # 4. Prepare timesteps + timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps) + self._num_timesteps = len(timesteps) + + # 5. Prepare latents. + latent_channels = self.transformer.config.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + latent_channels, + video_length, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7. Denoising loop + num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) + + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + if self.interrupt: + continue + + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + current_timestep = t + if not torch.is_tensor(current_timestep): + # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can + # This would be a good case for the `match` statement (Python 3.10+) + is_mps = latent_model_input.device.type == "mps" + if isinstance(current_timestep, float): + dtype = torch.float32 if is_mps else torch.float64 + else: + dtype = torch.int32 if is_mps else torch.int64 + current_timestep = torch.tensor([current_timestep], dtype=dtype, device=latent_model_input.device) + elif len(current_timestep.shape) == 0: + current_timestep = current_timestep[None].to(latent_model_input.device) + # broadcast to batch dimension in a way that's compatible with ONNX/Core ML + current_timestep = current_timestep.expand(latent_model_input.shape[0]) + + # predict noise model_output + noise_pred = self.transformer( + latent_model_input, + encoder_hidden_states=prompt_embeds, + timestep=current_timestep, + enable_temporal_attentions=enable_temporal_attentions, + return_dict=False, + )[0] + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # use learned sigma? + if not ( + hasattr(self.scheduler.config, "variance_type") + and self.scheduler.config.variance_type in ["learned", "learned_range"] + ): + noise_pred = noise_pred.chunk(2, dim=1)[0] + + # compute previous video: x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] + + # call the callback, if provided + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + + if not output_type == "latents": + video = self.decode_latents(latents, video_length, decode_chunk_size=14) + video = self.video_processor.postprocess_video(video=video, output_type=output_type) + else: + video = latents + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (video,) + + return LattePipelineOutput(frames=video) + + # Similar to diffusers.pipelines.stable_video_diffusion.pipeline_stable_video_diffusion.decode_latents + def decode_latents(self, latents: torch.Tensor, video_length: int, decode_chunk_size: int = 14): + # [batch, channels, frames, height, width] -> [batch*frames, channels, height, width] + latents = latents.permute(0, 2, 1, 3, 4).flatten(0, 1) + + latents = 1 / self.vae.config.scaling_factor * latents + + forward_vae_fn = self.vae._orig_mod.forward if is_compiled_module(self.vae) else self.vae.forward + accepts_num_frames = "num_frames" in set(inspect.signature(forward_vae_fn).parameters.keys()) + + # decode decode_chunk_size frames at a time to avoid OOM + frames = [] + for i in range(0, latents.shape[0], decode_chunk_size): + num_frames_in = latents[i : i + decode_chunk_size].shape[0] + decode_kwargs = {} + if accepts_num_frames: + # we only pass num_frames_in if it's expected + decode_kwargs["num_frames"] = num_frames_in + + frame = self.vae.decode(latents[i : i + decode_chunk_size], **decode_kwargs).sample + frames.append(frame) + frames = torch.cat(frames, dim=0) + + # [batch*frames, channels, height, width] -> [batch, channels, frames, height, width] + frames = frames.reshape(-1, video_length, *frames.shape[1:]).permute(0, 2, 1, 3, 4) + + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16 + frames = frames.float() + return frames diff --git a/diffusers3/pipelines/ledits_pp/__init__.py b/diffusers3/pipelines/ledits_pp/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..aae3b1cb18ce96c7d79d3c44bc37cf2ae4db9720 --- /dev/null +++ b/diffusers3/pipelines/ledits_pp/__init__.py @@ -0,0 +1,55 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["pipeline_leditspp_stable_diffusion"] = ["LEditsPPPipelineStableDiffusion"] + _import_structure["pipeline_leditspp_stable_diffusion_xl"] = ["LEditsPPPipelineStableDiffusionXL"] + + _import_structure["pipeline_output"] = ["LEditsPPDiffusionPipelineOutput", "LEditsPPDiffusionPipelineOutput"] + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * + else: + from .pipeline_leditspp_stable_diffusion import ( + LEditsPPDiffusionPipelineOutput, + LEditsPPInversionPipelineOutput, + LEditsPPPipelineStableDiffusion, + ) + from .pipeline_leditspp_stable_diffusion_xl import LEditsPPPipelineStableDiffusionXL + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/diffusers3/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion.py b/diffusers3/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion.py new file mode 100644 index 0000000000000000000000000000000000000000..049b89661b115afd01af789e878773d05e8fdfa5 --- /dev/null +++ b/diffusers3/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion.py @@ -0,0 +1,1499 @@ +import inspect +import math +from itertools import repeat +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import torch +import torch.nn.functional as F +from packaging import version +from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer + +from ...configuration_utils import FrozenDict +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import FromSingleFileMixin, IPAdapterMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin +from ...models import AutoencoderKL, UNet2DConditionModel +from ...models.attention_processor import Attention, AttnProcessor +from ...models.lora import adjust_lora_scale_text_encoder +from ...pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker +from ...schedulers import DDIMScheduler, DPMSolverMultistepScheduler +from ...utils import ( + USE_PEFT_BACKEND, + deprecate, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline +from .pipeline_output import LEditsPPDiffusionPipelineOutput, LEditsPPInversionPipelineOutput + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import PIL + >>> import requests + >>> import torch + >>> from io import BytesIO + + >>> from diffusers import LEditsPPPipelineStableDiffusion + >>> from diffusers.utils import load_image + + >>> pipe = LEditsPPPipelineStableDiffusion.from_pretrained( + ... "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16 + ... ) + >>> pipe = pipe.to("cuda") + + >>> img_url = "https://www.aiml.informatik.tu-darmstadt.de/people/mbrack/cherry_blossom.png" + >>> image = load_image(img_url).convert("RGB") + + >>> _ = pipe.invert(image=image, num_inversion_steps=50, skip=0.1) + + >>> edited_image = pipe( + ... editing_prompt=["cherry blossom"], edit_guidance_scale=10.0, edit_threshold=0.75 + ... ).images[0] + ``` +""" + + +# Modified from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionAttendAndExcitePipeline.AttentionStore +class LeditsAttentionStore: + @staticmethod + def get_empty_store(): + return {"down_cross": [], "mid_cross": [], "up_cross": [], "down_self": [], "mid_self": [], "up_self": []} + + def __call__(self, attn, is_cross: bool, place_in_unet: str, editing_prompts, PnP=False): + # attn.shape = batch_size * head_size, seq_len query, seq_len_key + if attn.shape[1] <= self.max_size: + bs = 1 + int(PnP) + editing_prompts + skip = 2 if PnP else 1 # skip PnP & unconditional + attn = torch.stack(attn.split(self.batch_size)).permute(1, 0, 2, 3) + source_batch_size = int(attn.shape[1] // bs) + self.forward(attn[:, skip * source_batch_size :], is_cross, place_in_unet) + + def forward(self, attn, is_cross: bool, place_in_unet: str): + key = f"{place_in_unet}_{'cross' if is_cross else 'self'}" + + self.step_store[key].append(attn) + + def between_steps(self, store_step=True): + if store_step: + if self.average: + if len(self.attention_store) == 0: + self.attention_store = self.step_store + else: + for key in self.attention_store: + for i in range(len(self.attention_store[key])): + self.attention_store[key][i] += self.step_store[key][i] + else: + if len(self.attention_store) == 0: + self.attention_store = [self.step_store] + else: + self.attention_store.append(self.step_store) + + self.cur_step += 1 + self.step_store = self.get_empty_store() + + def get_attention(self, step: int): + if self.average: + attention = { + key: [item / self.cur_step for item in self.attention_store[key]] for key in self.attention_store + } + else: + assert step is not None + attention = self.attention_store[step] + return attention + + def aggregate_attention( + self, attention_maps, prompts, res: Union[int, Tuple[int]], from_where: List[str], is_cross: bool, select: int + ): + out = [[] for x in range(self.batch_size)] + if isinstance(res, int): + num_pixels = res**2 + resolution = (res, res) + else: + num_pixels = res[0] * res[1] + resolution = res[:2] + + for location in from_where: + for bs_item in attention_maps[f"{location}_{'cross' if is_cross else 'self'}"]: + for batch, item in enumerate(bs_item): + if item.shape[1] == num_pixels: + cross_maps = item.reshape(len(prompts), -1, *resolution, item.shape[-1])[select] + out[batch].append(cross_maps) + + out = torch.stack([torch.cat(x, dim=0) for x in out]) + # average over heads + out = out.sum(1) / out.shape[1] + return out + + def __init__(self, average: bool, batch_size=1, max_resolution=16, max_size: int = None): + self.step_store = self.get_empty_store() + self.attention_store = [] + self.cur_step = 0 + self.average = average + self.batch_size = batch_size + if max_size is None: + self.max_size = max_resolution**2 + elif max_size is not None and max_resolution is None: + self.max_size = max_size + else: + raise ValueError("Only allowed to set one of max_resolution or max_size") + + +# Modified from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionAttendAndExcitePipeline.GaussianSmoothing +class LeditsGaussianSmoothing: + def __init__(self, device): + kernel_size = [3, 3] + sigma = [0.5, 0.5] + + # The gaussian kernel is the product of the gaussian function of each dimension. + kernel = 1 + meshgrids = torch.meshgrid([torch.arange(size, dtype=torch.float32) for size in kernel_size]) + for size, std, mgrid in zip(kernel_size, sigma, meshgrids): + mean = (size - 1) / 2 + kernel *= 1 / (std * math.sqrt(2 * math.pi)) * torch.exp(-(((mgrid - mean) / (2 * std)) ** 2)) + + # Make sure sum of values in gaussian kernel equals 1. + kernel = kernel / torch.sum(kernel) + + # Reshape to depthwise convolutional weight + kernel = kernel.view(1, 1, *kernel.size()) + kernel = kernel.repeat(1, *[1] * (kernel.dim() - 1)) + + self.weight = kernel.to(device) + + def __call__(self, input): + """ + Arguments: + Apply gaussian filter to input. + input (torch.Tensor): Input to apply gaussian filter on. + Returns: + filtered (torch.Tensor): Filtered output. + """ + return F.conv2d(input, weight=self.weight.to(input.dtype)) + + +class LEDITSCrossAttnProcessor: + def __init__(self, attention_store, place_in_unet, pnp, editing_prompts): + self.attnstore = attention_store + self.place_in_unet = place_in_unet + self.editing_prompts = editing_prompts + self.pnp = pnp + + def __call__( + self, + attn: Attention, + hidden_states, + encoder_hidden_states, + attention_mask=None, + temb=None, + ): + batch_size, sequence_length, _ = ( + hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape + ) + attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) + + query = attn.to_q(hidden_states) + + if encoder_hidden_states is None: + encoder_hidden_states = hidden_states + elif attn.norm_cross: + encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) + + key = attn.to_k(encoder_hidden_states) + value = attn.to_v(encoder_hidden_states) + + query = attn.head_to_batch_dim(query) + key = attn.head_to_batch_dim(key) + value = attn.head_to_batch_dim(value) + + attention_probs = attn.get_attention_scores(query, key, attention_mask) + self.attnstore( + attention_probs, + is_cross=True, + place_in_unet=self.place_in_unet, + editing_prompts=self.editing_prompts, + PnP=self.pnp, + ) + + hidden_states = torch.bmm(attention_probs, value) + hidden_states = attn.batch_to_head_dim(hidden_states) + + # linear proj + hidden_states = attn.to_out[0](hidden_states) + # dropout + hidden_states = attn.to_out[1](hidden_states) + + hidden_states = hidden_states / attn.rescale_output_factor + return hidden_states + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg +def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): + """ + Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4 + """ + std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) + std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) + # rescale the results from guidance (fixes overexposure) + noise_pred_rescaled = noise_cfg * (std_text / std_cfg) + # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images + noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg + return noise_cfg + + +class LEditsPPPipelineStableDiffusion( + DiffusionPipeline, TextualInversionLoaderMixin, StableDiffusionLoraLoaderMixin, IPAdapterMixin, FromSingleFileMixin +): + """ + Pipeline for textual image editing using LEDits++ with Stable Diffusion. + + This model inherits from [`DiffusionPipeline`] and builds on the [`StableDiffusionPipeline`]. Check the superclass + documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular + device, etc.). + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.CLIPTextModel`]): + Frozen text-encoder. Stable Diffusion uses the text portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically + the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. + tokenizer ([`~transformers.CLIPTokenizer`]): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. + scheduler ([`DPMSolverMultistepScheduler`] or [`DDIMScheduler`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latens. Can be one of + [`DPMSolverMultistepScheduler`] or [`DDIMScheduler`]. If any other scheduler is passed it will + automatically be set to [`DPMSolverMultistepScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please, refer to the [model card](https://huggingface.co/CompVis/stable-diffusion-v1-4) for details. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + Model that extracts features from generated images to be used as inputs for the `safety_checker`. + """ + + model_cpu_offload_seq = "text_encoder->unet->vae" + _exclude_from_cpu_offload = ["safety_checker"] + _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] + _optional_components = ["safety_checker", "feature_extractor", "image_encoder"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: UNet2DConditionModel, + scheduler: Union[DDIMScheduler, DPMSolverMultistepScheduler], + safety_checker: StableDiffusionSafetyChecker, + feature_extractor: CLIPImageProcessor, + requires_safety_checker: bool = True, + ): + super().__init__() + + if not isinstance(scheduler, DDIMScheduler) and not isinstance(scheduler, DPMSolverMultistepScheduler): + scheduler = DPMSolverMultistepScheduler.from_config( + scheduler.config, algorithm_type="sde-dpmsolver++", solver_order=2 + ) + logger.warning( + "This pipeline only supports DDIMScheduler and DPMSolverMultistepScheduler. " + "The scheduler has been changed to DPMSolverMultistepScheduler." + ) + + if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: + deprecation_message = ( + f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" + f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " + "to update the config accordingly as leaving `steps_offset` might led to incorrect results" + " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," + " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" + " file" + ) + deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(scheduler.config) + new_config["steps_offset"] = 1 + scheduler._internal_dict = FrozenDict(new_config) + + if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True: + deprecation_message = ( + f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." + " `clip_sample` should be set to False in the configuration file. Please make sure to update the" + " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" + " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" + " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" + ) + deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(scheduler.config) + new_config["clip_sample"] = False + scheduler._internal_dict = FrozenDict(new_config) + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( + version.parse(unet.config._diffusers_version).base_version + ) < version.parse("0.9.0.dev0") + is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 + if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: + deprecation_message = ( + "The configuration file of the unet has set the default `sample_size` to smaller than" + " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the" + " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" + " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" + " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" + " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" + " in the config might lead to incorrect results in future versions. If you have downloaded this" + " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" + " the `unet/config.json` file" + ) + deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(unet.config) + new_config["sample_size"] = 64 + unet._internal_dict = FrozenDict(new_config) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + self.inversion_steps = None + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is None: + has_nsfw_concept = None + else: + if torch.is_tensor(image): + feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") + else: + feature_extractor_input = self.image_processor.numpy_to_pil(image) + safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) + image, has_nsfw_concept = self.safety_checker( + images=image, clip_input=safety_checker_input.pixel_values.to(dtype) + ) + return image, has_nsfw_concept + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents + def decode_latents(self, latents): + deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" + deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) + + latents = 1 / self.vae.config.scaling_factor * latents + image = self.vae.decode(latents, return_dict=False)[0] + image = (image / 2 + 0.5).clamp(0, 1) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + return image + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, eta, generator=None): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + # Modified from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.check_inputs + def check_inputs( + self, + negative_prompt=None, + editing_prompt_embeddings=None, + negative_prompt_embeds=None, + callback_on_step_end_tensor_inputs=None, + ): + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if editing_prompt_embeddings is not None and negative_prompt_embeds is not None: + if editing_prompt_embeddings.shape != negative_prompt_embeds.shape: + raise ValueError( + "`editing_prompt_embeddings` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `editing_prompt_embeddings` {editing_prompt_embeddings.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + # Modified from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, latents): + # shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) + + # if latents.shape != shape: + # raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") + + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + def prepare_unet(self, attention_store, PnP: bool = False): + attn_procs = {} + for name in self.unet.attn_processors.keys(): + if name.startswith("mid_block"): + place_in_unet = "mid" + elif name.startswith("up_blocks"): + place_in_unet = "up" + elif name.startswith("down_blocks"): + place_in_unet = "down" + else: + continue + + if "attn2" in name and place_in_unet != "mid": + attn_procs[name] = LEDITSCrossAttnProcessor( + attention_store=attention_store, + place_in_unet=place_in_unet, + pnp=PnP, + editing_prompts=self.enabled_editing_prompts, + ) + else: + attn_procs[name] = AttnProcessor() + + self.unet.set_attn_processor(attn_procs) + + def encode_prompt( + self, + device, + num_images_per_prompt, + enable_edit_guidance, + negative_prompt=None, + editing_prompt=None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + editing_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + enable_edit_guidance (`bool`): + whether to perform any editing or reconstruct the input image instead + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + editing_prompt (`str` or `List[str]`, *optional*): + Editing prompt(s) to be encoded. If not defined, one has to pass `editing_prompt_embeds` instead. + editing_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + batch_size = self.batch_size + num_edit_tokens = None + + if negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but exoected" + f"{batch_size} based on the input images. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: procecss multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = negative_prompt_embeds.dtype + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + if enable_edit_guidance: + if editing_prompt_embeds is None: + # textual inversion: procecss multi-vector tokens if necessary + # if isinstance(self, TextualInversionLoaderMixin): + # prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + if isinstance(editing_prompt, str): + editing_prompt = [editing_prompt] + + max_length = negative_prompt_embeds.shape[1] + text_inputs = self.tokenizer( + [x for item in editing_prompt for x in repeat(item, batch_size)], + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + return_length=True, + ) + + num_edit_tokens = text_inputs.length - 2 # not counting startoftext and endoftext + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer( + [x for item in editing_prompt for x in repeat(item, batch_size)], + padding="longest", + return_tensors="pt", + ).input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if ( + hasattr(self.text_encoder.config, "use_attention_mask") + and self.text_encoder.config.use_attention_mask + ): + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + editing_prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + editing_prompt_embeds = editing_prompt_embeds[0] + else: + editing_prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + editing_prompt_embeds = editing_prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + editing_prompt_embeds = self.text_encoder.text_model.final_layer_norm(editing_prompt_embeds) + + editing_prompt_embeds = editing_prompt_embeds.to(dtype=negative_prompt_embeds.dtype, device=device) + + bs_embed_edit, seq_len, _ = editing_prompt_embeds.shape + editing_prompt_embeds = editing_prompt_embeds.to(dtype=negative_prompt_embeds.dtype, device=device) + editing_prompt_embeds = editing_prompt_embeds.repeat(1, num_images_per_prompt, 1) + editing_prompt_embeds = editing_prompt_embeds.view(bs_embed_edit * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return editing_prompt_embeds, negative_prompt_embeds, num_edit_tokens + + @property + def guidance_rescale(self): + return self._guidance_rescale + + @property + def clip_skip(self): + return self._clip_skip + + @property + def cross_attention_kwargs(self): + return self._cross_attention_kwargs + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + negative_prompt: Optional[Union[str, List[str]]] = None, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + editing_prompt: Optional[Union[str, List[str]]] = None, + editing_prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + reverse_editing_direction: Optional[Union[bool, List[bool]]] = False, + edit_guidance_scale: Optional[Union[float, List[float]]] = 5, + edit_warmup_steps: Optional[Union[int, List[int]]] = 0, + edit_cooldown_steps: Optional[Union[int, List[int]]] = None, + edit_threshold: Optional[Union[float, List[float]]] = 0.9, + user_mask: Optional[torch.Tensor] = None, + sem_guidance: Optional[List[torch.Tensor]] = None, + use_cross_attn_mask: bool = False, + use_intersect_mask: bool = True, + attn_store_steps: Optional[List[int]] = [], + store_averaged_over_steps: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + guidance_rescale: float = 0.0, + clip_skip: Optional[int] = None, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + **kwargs, + ): + r""" + The call function to the pipeline for editing. The + [`~pipelines.ledits_pp.LEditsPPPipelineStableDiffusion.invert`] method has to be called beforehand. Edits will + always be performed for the last inverted image(s). + + Args: + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored + if `guidance_scale` is less than `1`). + generator (`torch.Generator`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.ledits_pp.LEditsPPDiffusionPipelineOutput`] instead of a plain + tuple. + editing_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. The image is reconstructed by setting + `editing_prompt = None`. Guidance direction of prompt should be specified via + `reverse_editing_direction`. + editing_prompt_embeds (`torch.Tensor>`, *optional*): + Pre-computed embeddings to use for guiding the image generation. Guidance direction of embedding should + be specified via `reverse_editing_direction`. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + reverse_editing_direction (`bool` or `List[bool]`, *optional*, defaults to `False`): + Whether the corresponding prompt in `editing_prompt` should be increased or decreased. + edit_guidance_scale (`float` or `List[float]`, *optional*, defaults to 5): + Guidance scale for guiding the image generation. If provided as list values should correspond to + `editing_prompt`. `edit_guidance_scale` is defined as `s_e` of equation 12 of [LEDITS++ + Paper](https://arxiv.org/abs/2301.12247). + edit_warmup_steps (`float` or `List[float]`, *optional*, defaults to 10): + Number of diffusion steps (for each prompt) for which guidance will not be applied. + edit_cooldown_steps (`float` or `List[float]`, *optional*, defaults to `None`): + Number of diffusion steps (for each prompt) after which guidance will no longer be applied. + edit_threshold (`float` or `List[float]`, *optional*, defaults to 0.9): + Masking threshold of guidance. Threshold should be proportional to the image region that is modified. + 'edit_threshold' is defined as 'ฮป' of equation 12 of [LEDITS++ + Paper](https://arxiv.org/abs/2301.12247). + user_mask (`torch.Tensor`, *optional*): + User-provided mask for even better control over the editing process. This is helpful when LEDITS++'s + implicit masks do not meet user preferences. + sem_guidance (`List[torch.Tensor]`, *optional*): + List of pre-generated guidance vectors to be applied at generation. Length of the list has to + correspond to `num_inference_steps`. + use_cross_attn_mask (`bool`, defaults to `False`): + Whether cross-attention masks are used. Cross-attention masks are always used when use_intersect_mask + is set to true. Cross-attention masks are defined as 'M^1' of equation 12 of [LEDITS++ + paper](https://arxiv.org/pdf/2311.16711.pdf). + use_intersect_mask (`bool`, defaults to `True`): + Whether the masking term is calculated as intersection of cross-attention masks and masks derived from + the noise estimate. Cross-attention mask are defined as 'M^1' and masks derived from the noise estimate + are defined as 'M^2' of equation 12 of [LEDITS++ paper](https://arxiv.org/pdf/2311.16711.pdf). + attn_store_steps (`List[int]`, *optional*): + Steps for which the attention maps are stored in the AttentionStore. Just for visualization purposes. + store_averaged_over_steps (`bool`, defaults to `True`): + Whether the attention maps for the 'attn_store_steps' are stored averaged over the diffusion steps. If + False, attention maps for each step are stores separately. Just for visualization purposes. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + guidance_rescale (`float`, *optional*, defaults to 0.0): + Guidance rescale factor from [Common Diffusion Noise Schedules and Sample Steps are + Flawed](https://arxiv.org/pdf/2305.08891.pdf). Guidance rescale factor should fix overexposure when + using zero terminal SNR. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + + Examples: + + Returns: + [`~pipelines.ledits_pp.LEditsPPDiffusionPipelineOutput`] or `tuple`: + [`~pipelines.ledits_pp.LEditsPPDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. When + returning a tuple, the first element is a list with the generated images, and the second element is a list + of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw) + content, according to the `safety_checker`. + """ + + if self.inversion_steps is None: + raise ValueError( + "You need to invert an input image first before calling the pipeline. The `invert` method has to be called beforehand. Edits will always be performed for the last inverted image(s)." + ) + + eta = self.eta + num_images_per_prompt = 1 + latents = self.init_latents + + zs = self.zs + self.scheduler.set_timesteps(len(self.scheduler.timesteps)) + + if use_intersect_mask: + use_cross_attn_mask = True + + if use_cross_attn_mask: + self.smoothing = LeditsGaussianSmoothing(self.device) + + if user_mask is not None: + user_mask = user_mask.to(self.device) + + org_prompt = "" + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + negative_prompt, + editing_prompt_embeds, + negative_prompt_embeds, + callback_on_step_end_tensor_inputs, + ) + + self._guidance_rescale = guidance_rescale + self._clip_skip = clip_skip + self._cross_attention_kwargs = cross_attention_kwargs + + # 2. Define call parameters + batch_size = self.batch_size + + if editing_prompt: + enable_edit_guidance = True + if isinstance(editing_prompt, str): + editing_prompt = [editing_prompt] + self.enabled_editing_prompts = len(editing_prompt) + elif editing_prompt_embeds is not None: + enable_edit_guidance = True + self.enabled_editing_prompts = editing_prompt_embeds.shape[0] + else: + self.enabled_editing_prompts = 0 + enable_edit_guidance = False + + # 3. Encode input prompt + lora_scale = ( + self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None + ) + + edit_concepts, uncond_embeddings, num_edit_tokens = self.encode_prompt( + editing_prompt=editing_prompt, + device=self.device, + num_images_per_prompt=num_images_per_prompt, + enable_edit_guidance=enable_edit_guidance, + negative_prompt=negative_prompt, + editing_prompt_embeds=editing_prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=lora_scale, + clip_skip=self.clip_skip, + ) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + if enable_edit_guidance: + text_embeddings = torch.cat([uncond_embeddings, edit_concepts]) + self.text_cross_attention_maps = [editing_prompt] if isinstance(editing_prompt, str) else editing_prompt + else: + text_embeddings = torch.cat([uncond_embeddings]) + + # 4. Prepare timesteps + # self.scheduler.set_timesteps(num_inference_steps, device=self.device) + timesteps = self.inversion_steps + t_to_idx = {int(v): k for k, v in enumerate(timesteps[-zs.shape[0] :])} + + if use_cross_attn_mask: + self.attention_store = LeditsAttentionStore( + average=store_averaged_over_steps, + batch_size=batch_size, + max_size=(latents.shape[-2] / 4.0) * (latents.shape[-1] / 4.0), + max_resolution=None, + ) + self.prepare_unet(self.attention_store, PnP=False) + resolution = latents.shape[-2:] + att_res = (int(resolution[0] / 4), int(resolution[1] / 4)) + + # 5. Prepare latent variables + num_channels_latents = self.unet.config.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + None, + None, + text_embeddings.dtype, + self.device, + latents, + ) + + # 6. Prepare extra step kwargs. + extra_step_kwargs = self.prepare_extra_step_kwargs(eta) + + self.sem_guidance = None + self.activation_mask = None + + # 7. Denoising loop + num_warmup_steps = 0 + with self.progress_bar(total=len(timesteps)) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + + if enable_edit_guidance: + latent_model_input = torch.cat([latents] * (1 + self.enabled_editing_prompts)) + else: + latent_model_input = latents + + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + text_embed_input = text_embeddings + + # predict the noise residual + noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embed_input).sample + + noise_pred_out = noise_pred.chunk(1 + self.enabled_editing_prompts) # [b,4, 64, 64] + noise_pred_uncond = noise_pred_out[0] + noise_pred_edit_concepts = noise_pred_out[1:] + + noise_guidance_edit = torch.zeros( + noise_pred_uncond.shape, + device=self.device, + dtype=noise_pred_uncond.dtype, + ) + + if sem_guidance is not None and len(sem_guidance) > i: + noise_guidance_edit += sem_guidance[i].to(self.device) + + elif enable_edit_guidance: + if self.activation_mask is None: + self.activation_mask = torch.zeros( + (len(timesteps), len(noise_pred_edit_concepts), *noise_pred_edit_concepts[0].shape) + ) + + if self.sem_guidance is None: + self.sem_guidance = torch.zeros((len(timesteps), *noise_pred_uncond.shape)) + + for c, noise_pred_edit_concept in enumerate(noise_pred_edit_concepts): + if isinstance(edit_warmup_steps, list): + edit_warmup_steps_c = edit_warmup_steps[c] + else: + edit_warmup_steps_c = edit_warmup_steps + if i < edit_warmup_steps_c: + continue + + if isinstance(edit_guidance_scale, list): + edit_guidance_scale_c = edit_guidance_scale[c] + else: + edit_guidance_scale_c = edit_guidance_scale + + if isinstance(edit_threshold, list): + edit_threshold_c = edit_threshold[c] + else: + edit_threshold_c = edit_threshold + if isinstance(reverse_editing_direction, list): + reverse_editing_direction_c = reverse_editing_direction[c] + else: + reverse_editing_direction_c = reverse_editing_direction + + if isinstance(edit_cooldown_steps, list): + edit_cooldown_steps_c = edit_cooldown_steps[c] + elif edit_cooldown_steps is None: + edit_cooldown_steps_c = i + 1 + else: + edit_cooldown_steps_c = edit_cooldown_steps + + if i >= edit_cooldown_steps_c: + continue + + noise_guidance_edit_tmp = noise_pred_edit_concept - noise_pred_uncond + + if reverse_editing_direction_c: + noise_guidance_edit_tmp = noise_guidance_edit_tmp * -1 + + noise_guidance_edit_tmp = noise_guidance_edit_tmp * edit_guidance_scale_c + + if user_mask is not None: + noise_guidance_edit_tmp = noise_guidance_edit_tmp * user_mask + + if use_cross_attn_mask: + out = self.attention_store.aggregate_attention( + attention_maps=self.attention_store.step_store, + prompts=self.text_cross_attention_maps, + res=att_res, + from_where=["up", "down"], + is_cross=True, + select=self.text_cross_attention_maps.index(editing_prompt[c]), + ) + attn_map = out[:, :, :, 1 : 1 + num_edit_tokens[c]] # 0 -> startoftext + + # average over all tokens + if attn_map.shape[3] != num_edit_tokens[c]: + raise ValueError( + f"Incorrect shape of attention_map. Expected size {num_edit_tokens[c]}, but found {attn_map.shape[3]}!" + ) + + attn_map = torch.sum(attn_map, dim=3) + + # gaussian_smoothing + attn_map = F.pad(attn_map.unsqueeze(1), (1, 1, 1, 1), mode="reflect") + attn_map = self.smoothing(attn_map).squeeze(1) + + # torch.quantile function expects float32 + if attn_map.dtype == torch.float32: + tmp = torch.quantile(attn_map.flatten(start_dim=1), edit_threshold_c, dim=1) + else: + tmp = torch.quantile( + attn_map.flatten(start_dim=1).to(torch.float32), edit_threshold_c, dim=1 + ).to(attn_map.dtype) + attn_mask = torch.where( + attn_map >= tmp.unsqueeze(1).unsqueeze(1).repeat(1, *att_res), 1.0, 0.0 + ) + + # resolution must match latent space dimension + attn_mask = F.interpolate( + attn_mask.unsqueeze(1), + noise_guidance_edit_tmp.shape[-2:], # 64,64 + ).repeat(1, 4, 1, 1) + self.activation_mask[i, c] = attn_mask.detach().cpu() + if not use_intersect_mask: + noise_guidance_edit_tmp = noise_guidance_edit_tmp * attn_mask + + if use_intersect_mask: + if t <= 800: + noise_guidance_edit_tmp_quantile = torch.abs(noise_guidance_edit_tmp) + noise_guidance_edit_tmp_quantile = torch.sum( + noise_guidance_edit_tmp_quantile, dim=1, keepdim=True + ) + noise_guidance_edit_tmp_quantile = noise_guidance_edit_tmp_quantile.repeat( + 1, self.unet.config.in_channels, 1, 1 + ) + + # torch.quantile function expects float32 + if noise_guidance_edit_tmp_quantile.dtype == torch.float32: + tmp = torch.quantile( + noise_guidance_edit_tmp_quantile.flatten(start_dim=2), + edit_threshold_c, + dim=2, + keepdim=False, + ) + else: + tmp = torch.quantile( + noise_guidance_edit_tmp_quantile.flatten(start_dim=2).to(torch.float32), + edit_threshold_c, + dim=2, + keepdim=False, + ).to(noise_guidance_edit_tmp_quantile.dtype) + + intersect_mask = ( + torch.where( + noise_guidance_edit_tmp_quantile >= tmp[:, :, None, None], + torch.ones_like(noise_guidance_edit_tmp), + torch.zeros_like(noise_guidance_edit_tmp), + ) + * attn_mask + ) + + self.activation_mask[i, c] = intersect_mask.detach().cpu() + + noise_guidance_edit_tmp = noise_guidance_edit_tmp * intersect_mask + + else: + # print(f"only attention mask for step {i}") + noise_guidance_edit_tmp = noise_guidance_edit_tmp * attn_mask + + elif not use_cross_attn_mask: + # calculate quantile + noise_guidance_edit_tmp_quantile = torch.abs(noise_guidance_edit_tmp) + noise_guidance_edit_tmp_quantile = torch.sum( + noise_guidance_edit_tmp_quantile, dim=1, keepdim=True + ) + noise_guidance_edit_tmp_quantile = noise_guidance_edit_tmp_quantile.repeat(1, 4, 1, 1) + + # torch.quantile function expects float32 + if noise_guidance_edit_tmp_quantile.dtype == torch.float32: + tmp = torch.quantile( + noise_guidance_edit_tmp_quantile.flatten(start_dim=2), + edit_threshold_c, + dim=2, + keepdim=False, + ) + else: + tmp = torch.quantile( + noise_guidance_edit_tmp_quantile.flatten(start_dim=2).to(torch.float32), + edit_threshold_c, + dim=2, + keepdim=False, + ).to(noise_guidance_edit_tmp_quantile.dtype) + + self.activation_mask[i, c] = ( + torch.where( + noise_guidance_edit_tmp_quantile >= tmp[:, :, None, None], + torch.ones_like(noise_guidance_edit_tmp), + torch.zeros_like(noise_guidance_edit_tmp), + ) + .detach() + .cpu() + ) + + noise_guidance_edit_tmp = torch.where( + noise_guidance_edit_tmp_quantile >= tmp[:, :, None, None], + noise_guidance_edit_tmp, + torch.zeros_like(noise_guidance_edit_tmp), + ) + + noise_guidance_edit += noise_guidance_edit_tmp + + self.sem_guidance[i] = noise_guidance_edit.detach().cpu() + + noise_pred = noise_pred_uncond + noise_guidance_edit + + if enable_edit_guidance and self.guidance_rescale > 0.0: + # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf + noise_pred = rescale_noise_cfg( + noise_pred, + noise_pred_edit_concepts.mean(dim=0, keepdim=False), + guidance_rescale=self.guidance_rescale, + ) + + idx = t_to_idx[int(t)] + latents = self.scheduler.step( + noise_pred, t, latents, variance_noise=zs[idx], **extra_step_kwargs + ).prev_sample + + # step callback + if use_cross_attn_mask: + store_step = i in attn_store_steps + self.attention_store.between_steps(store_step) + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + # prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + + # 8. Post-processing + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[ + 0 + ] + image, has_nsfw_concept = self.run_safety_checker(image, self.device, text_embeddings.dtype) + else: + image = latents + has_nsfw_concept = None + + if has_nsfw_concept is None: + do_denormalize = [True] * image.shape[0] + else: + do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] + + image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image, has_nsfw_concept) + + return LEditsPPDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) + + @torch.no_grad() + def invert( + self, + image: PipelineImageInput, + source_prompt: str = "", + source_guidance_scale: float = 3.5, + num_inversion_steps: int = 30, + skip: float = 0.15, + generator: Optional[torch.Generator] = None, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + clip_skip: Optional[int] = None, + height: Optional[int] = None, + width: Optional[int] = None, + resize_mode: Optional[str] = "default", + crops_coords: Optional[Tuple[int, int, int, int]] = None, + ): + r""" + The function to the pipeline for image inversion as described by the [LEDITS++ + Paper](https://arxiv.org/abs/2301.12247). If the scheduler is set to [`~schedulers.DDIMScheduler`] the + inversion proposed by [edit-friendly DPDM](https://arxiv.org/abs/2304.06140) will be performed instead. + + Args: + image (`PipelineImageInput`): + Input for the image(s) that are to be edited. Multiple input images have to default to the same aspect + ratio. + source_prompt (`str`, defaults to `""`): + Prompt describing the input image that will be used for guidance during inversion. Guidance is disabled + if the `source_prompt` is `""`. + source_guidance_scale (`float`, defaults to `3.5`): + Strength of guidance during inversion. + num_inversion_steps (`int`, defaults to `30`): + Number of total performed inversion steps after discarding the initial `skip` steps. + skip (`float`, defaults to `0.15`): + Portion of initial steps that will be ignored for inversion and subsequent generation. Lower values + will lead to stronger changes to the input image. `skip` has to be between `0` and `1`. + generator (`torch.Generator`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make inversion + deterministic. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + height (`int`, *optional*, defaults to `None`): + The height in preprocessed image. If `None`, will use the `get_default_height_width()` to get default + height. + width (`int`, *optional*`, defaults to `None`): + The width in preprocessed. If `None`, will use get_default_height_width()` to get the default width. + resize_mode (`str`, *optional*, defaults to `default`): + The resize mode, can be one of `default` or `fill`. If `default`, will resize the image to fit within + the specified width and height, and it may not maintaining the original aspect ratio. If `fill`, will + resize the image to fit within the specified width and height, maintaining the aspect ratio, and then + center the image within the dimensions, filling empty with data from image. If `crop`, will resize the + image to fit within the specified width and height, maintaining the aspect ratio, and then center the + image within the dimensions, cropping the excess. Note that resize_mode `fill` and `crop` are only + supported for PIL image input. + crops_coords (`List[Tuple[int, int, int, int]]`, *optional*, defaults to `None`): + The crop coordinates for each image in the batch. If `None`, will not crop the image. + + Returns: + [`~pipelines.ledits_pp.LEditsPPInversionPipelineOutput`]: Output will contain the resized input image(s) + and respective VAE reconstruction(s). + """ + # Reset attn processor, we do not want to store attn maps during inversion + self.unet.set_attn_processor(AttnProcessor()) + + self.eta = 1.0 + + self.scheduler.config.timestep_spacing = "leading" + self.scheduler.set_timesteps(int(num_inversion_steps * (1 + skip))) + self.inversion_steps = self.scheduler.timesteps[-num_inversion_steps:] + timesteps = self.inversion_steps + + # 1. encode image + x0, resized = self.encode_image( + image, + dtype=self.text_encoder.dtype, + height=height, + width=width, + resize_mode=resize_mode, + crops_coords=crops_coords, + ) + self.batch_size = x0.shape[0] + + # autoencoder reconstruction + image_rec = self.vae.decode(x0 / self.vae.config.scaling_factor, return_dict=False, generator=generator)[0] + image_rec = self.image_processor.postprocess(image_rec, output_type="pil") + + # 2. get embeddings + do_classifier_free_guidance = source_guidance_scale > 1.0 + + lora_scale = cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None + + uncond_embedding, text_embeddings, _ = self.encode_prompt( + num_images_per_prompt=1, + device=self.device, + negative_prompt=None, + enable_edit_guidance=do_classifier_free_guidance, + editing_prompt=source_prompt, + lora_scale=lora_scale, + clip_skip=clip_skip, + ) + + # 3. find zs and xts + variance_noise_shape = (num_inversion_steps, *x0.shape) + + # intermediate latents + t_to_idx = {int(v): k for k, v in enumerate(timesteps)} + xts = torch.zeros(size=variance_noise_shape, device=self.device, dtype=uncond_embedding.dtype) + + for t in reversed(timesteps): + idx = num_inversion_steps - t_to_idx[int(t)] - 1 + noise = randn_tensor(shape=x0.shape, generator=generator, device=self.device, dtype=x0.dtype) + xts[idx] = self.scheduler.add_noise(x0, noise, torch.Tensor([t])) + xts = torch.cat([x0.unsqueeze(0), xts], dim=0) + + self.scheduler.set_timesteps(len(self.scheduler.timesteps)) + # noise maps + zs = torch.zeros(size=variance_noise_shape, device=self.device, dtype=uncond_embedding.dtype) + + with self.progress_bar(total=len(timesteps)) as progress_bar: + for t in timesteps: + idx = num_inversion_steps - t_to_idx[int(t)] - 1 + # 1. predict noise residual + xt = xts[idx + 1] + + noise_pred = self.unet(xt, timestep=t, encoder_hidden_states=uncond_embedding).sample + + if not source_prompt == "": + noise_pred_cond = self.unet(xt, timestep=t, encoder_hidden_states=text_embeddings).sample + noise_pred = noise_pred + source_guidance_scale * (noise_pred_cond - noise_pred) + + xtm1 = xts[idx] + z, xtm1_corrected = compute_noise(self.scheduler, xtm1, xt, t, noise_pred, self.eta) + zs[idx] = z + + # correction to avoid error accumulation + xts[idx] = xtm1_corrected + + progress_bar.update() + + self.init_latents = xts[-1].expand(self.batch_size, -1, -1, -1) + zs = zs.flip(0) + self.zs = zs + + return LEditsPPInversionPipelineOutput(images=resized, vae_reconstruction_images=image_rec) + + @torch.no_grad() + def encode_image(self, image, dtype=None, height=None, width=None, resize_mode="default", crops_coords=None): + image = self.image_processor.preprocess( + image=image, height=height, width=width, resize_mode=resize_mode, crops_coords=crops_coords + ) + resized = self.image_processor.postprocess(image=image, output_type="pil") + + if max(image.shape[-2:]) > self.vae.config["sample_size"] * 1.5: + logger.warning( + "Your input images far exceed the default resolution of the underlying diffusion model. " + "The output images may contain severe artifacts! " + "Consider down-sampling the input using the `height` and `width` parameters" + ) + image = image.to(dtype) + + x0 = self.vae.encode(image.to(self.device)).latent_dist.mode() + x0 = x0.to(dtype) + x0 = self.vae.config.scaling_factor * x0 + return x0, resized + + +def compute_noise_ddim(scheduler, prev_latents, latents, timestep, noise_pred, eta): + # 1. get previous step value (=t-1) + prev_timestep = timestep - scheduler.config.num_train_timesteps // scheduler.num_inference_steps + + # 2. compute alphas, betas + alpha_prod_t = scheduler.alphas_cumprod[timestep] + alpha_prod_t_prev = ( + scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else scheduler.final_alpha_cumprod + ) + + beta_prod_t = 1 - alpha_prod_t + + # 3. compute predicted original sample from predicted noise also called + # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf + pred_original_sample = (latents - beta_prod_t ** (0.5) * noise_pred) / alpha_prod_t ** (0.5) + + # 4. Clip "predicted x_0" + if scheduler.config.clip_sample: + pred_original_sample = torch.clamp(pred_original_sample, -1, 1) + + # 5. compute variance: "sigma_t(ฮท)" -> see formula (16) + # ฯƒ_t = sqrt((1 โˆ’ ฮฑ_tโˆ’1)/(1 โˆ’ ฮฑ_t)) * sqrt(1 โˆ’ ฮฑ_t/ฮฑ_tโˆ’1) + variance = scheduler._get_variance(timestep, prev_timestep) + std_dev_t = eta * variance ** (0.5) + + # 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf + pred_sample_direction = (1 - alpha_prod_t_prev - std_dev_t**2) ** (0.5) * noise_pred + + # modifed so that updated xtm1 is returned as well (to avoid error accumulation) + mu_xt = alpha_prod_t_prev ** (0.5) * pred_original_sample + pred_sample_direction + if variance > 0.0: + noise = (prev_latents - mu_xt) / (variance ** (0.5) * eta) + else: + noise = torch.tensor([0.0]).to(latents.device) + + return noise, mu_xt + (eta * variance**0.5) * noise + + +def compute_noise_sde_dpm_pp_2nd(scheduler, prev_latents, latents, timestep, noise_pred, eta): + def first_order_update(model_output, sample): # timestep, prev_timestep, sample): + sigma_t, sigma_s = scheduler.sigmas[scheduler.step_index + 1], scheduler.sigmas[scheduler.step_index] + alpha_t, sigma_t = scheduler._sigma_to_alpha_sigma_t(sigma_t) + alpha_s, sigma_s = scheduler._sigma_to_alpha_sigma_t(sigma_s) + lambda_t = torch.log(alpha_t) - torch.log(sigma_t) + lambda_s = torch.log(alpha_s) - torch.log(sigma_s) + + h = lambda_t - lambda_s + + mu_xt = (sigma_t / sigma_s * torch.exp(-h)) * sample + (alpha_t * (1 - torch.exp(-2.0 * h))) * model_output + + mu_xt = scheduler.dpm_solver_first_order_update( + model_output=model_output, sample=sample, noise=torch.zeros_like(sample) + ) + + sigma = sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) + if sigma > 0.0: + noise = (prev_latents - mu_xt) / sigma + else: + noise = torch.tensor([0.0]).to(sample.device) + + prev_sample = mu_xt + sigma * noise + return noise, prev_sample + + def second_order_update(model_output_list, sample): # timestep_list, prev_timestep, sample): + sigma_t, sigma_s0, sigma_s1 = ( + scheduler.sigmas[scheduler.step_index + 1], + scheduler.sigmas[scheduler.step_index], + scheduler.sigmas[scheduler.step_index - 1], + ) + + alpha_t, sigma_t = scheduler._sigma_to_alpha_sigma_t(sigma_t) + alpha_s0, sigma_s0 = scheduler._sigma_to_alpha_sigma_t(sigma_s0) + alpha_s1, sigma_s1 = scheduler._sigma_to_alpha_sigma_t(sigma_s1) + + lambda_t = torch.log(alpha_t) - torch.log(sigma_t) + lambda_s0 = torch.log(alpha_s0) - torch.log(sigma_s0) + lambda_s1 = torch.log(alpha_s1) - torch.log(sigma_s1) + + m0, m1 = model_output_list[-1], model_output_list[-2] + + h, h_0 = lambda_t - lambda_s0, lambda_s0 - lambda_s1 + r0 = h_0 / h + D0, D1 = m0, (1.0 / r0) * (m0 - m1) + + mu_xt = ( + (sigma_t / sigma_s0 * torch.exp(-h)) * sample + + (alpha_t * (1 - torch.exp(-2.0 * h))) * D0 + + 0.5 * (alpha_t * (1 - torch.exp(-2.0 * h))) * D1 + ) + + sigma = sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) + if sigma > 0.0: + noise = (prev_latents - mu_xt) / sigma + else: + noise = torch.tensor([0.0]).to(sample.device) + + prev_sample = mu_xt + sigma * noise + + return noise, prev_sample + + if scheduler.step_index is None: + scheduler._init_step_index(timestep) + + model_output = scheduler.convert_model_output(model_output=noise_pred, sample=latents) + for i in range(scheduler.config.solver_order - 1): + scheduler.model_outputs[i] = scheduler.model_outputs[i + 1] + scheduler.model_outputs[-1] = model_output + + if scheduler.lower_order_nums < 1: + noise, prev_sample = first_order_update(model_output, latents) + else: + noise, prev_sample = second_order_update(scheduler.model_outputs, latents) + + if scheduler.lower_order_nums < scheduler.config.solver_order: + scheduler.lower_order_nums += 1 + + # upon completion increase step index by one + scheduler._step_index += 1 + + return noise, prev_sample + + +def compute_noise(scheduler, *args): + if isinstance(scheduler, DDIMScheduler): + return compute_noise_ddim(scheduler, *args) + elif ( + isinstance(scheduler, DPMSolverMultistepScheduler) + and scheduler.config.algorithm_type == "sde-dpmsolver++" + and scheduler.config.solver_order == 2 + ): + return compute_noise_sde_dpm_pp_2nd(scheduler, *args) + else: + raise NotImplementedError diff --git a/diffusers3/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion_xl.py b/diffusers3/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion_xl.py new file mode 100644 index 0000000000000000000000000000000000000000..995cc15f3f933e74c223fc43aaca38260164c6d5 --- /dev/null +++ b/diffusers3/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion_xl.py @@ -0,0 +1,1794 @@ +# Copyright 2023 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import math +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import torch +import torch.nn.functional as F +from transformers import ( + CLIPImageProcessor, + CLIPTextModel, + CLIPTextModelWithProjection, + CLIPTokenizer, + CLIPVisionModelWithProjection, +) + +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import ( + FromSingleFileMixin, + IPAdapterMixin, + StableDiffusionXLLoraLoaderMixin, + TextualInversionLoaderMixin, +) +from ...models import AutoencoderKL, UNet2DConditionModel +from ...models.attention_processor import ( + Attention, + AttnProcessor, + AttnProcessor2_0, + XFormersAttnProcessor, +) +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import DDIMScheduler, DPMSolverMultistepScheduler +from ...utils import ( + USE_PEFT_BACKEND, + is_invisible_watermark_available, + is_torch_xla_available, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline +from .pipeline_output import LEditsPPDiffusionPipelineOutput, LEditsPPInversionPipelineOutput + + +if is_invisible_watermark_available(): + from ..stable_diffusion_xl.watermark import StableDiffusionXLWatermarker + +if is_torch_xla_available(): + import torch_xla.core.xla_model as xm + + XLA_AVAILABLE = True +else: + XLA_AVAILABLE = False + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> import PIL + >>> import requests + >>> from io import BytesIO + + >>> from diffusers import LEditsPPPipelineStableDiffusionXL + + >>> pipe = LEditsPPPipelineStableDiffusionXL.from_pretrained( + ... "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 + ... ) + >>> pipe = pipe.to("cuda") + + + >>> def download_image(url): + ... response = requests.get(url) + ... return PIL.Image.open(BytesIO(response.content)).convert("RGB") + + + >>> img_url = "https://www.aiml.informatik.tu-darmstadt.de/people/mbrack/tennis.jpg" + >>> image = download_image(img_url) + + >>> _ = pipe.invert(image=image, num_inversion_steps=50, skip=0.2) + + >>> edited_image = pipe( + ... editing_prompt=["tennis ball", "tomato"], + ... reverse_editing_direction=[True, False], + ... edit_guidance_scale=[5.0, 10.0], + ... edit_threshold=[0.9, 0.85], + ... ).images[0] + ``` +""" + + +# Copied from diffusers.pipelines.ledits_pp.pipeline_leditspp_stable_diffusion.LeditsAttentionStore +class LeditsAttentionStore: + @staticmethod + def get_empty_store(): + return {"down_cross": [], "mid_cross": [], "up_cross": [], "down_self": [], "mid_self": [], "up_self": []} + + def __call__(self, attn, is_cross: bool, place_in_unet: str, editing_prompts, PnP=False): + # attn.shape = batch_size * head_size, seq_len query, seq_len_key + if attn.shape[1] <= self.max_size: + bs = 1 + int(PnP) + editing_prompts + skip = 2 if PnP else 1 # skip PnP & unconditional + attn = torch.stack(attn.split(self.batch_size)).permute(1, 0, 2, 3) + source_batch_size = int(attn.shape[1] // bs) + self.forward(attn[:, skip * source_batch_size :], is_cross, place_in_unet) + + def forward(self, attn, is_cross: bool, place_in_unet: str): + key = f"{place_in_unet}_{'cross' if is_cross else 'self'}" + + self.step_store[key].append(attn) + + def between_steps(self, store_step=True): + if store_step: + if self.average: + if len(self.attention_store) == 0: + self.attention_store = self.step_store + else: + for key in self.attention_store: + for i in range(len(self.attention_store[key])): + self.attention_store[key][i] += self.step_store[key][i] + else: + if len(self.attention_store) == 0: + self.attention_store = [self.step_store] + else: + self.attention_store.append(self.step_store) + + self.cur_step += 1 + self.step_store = self.get_empty_store() + + def get_attention(self, step: int): + if self.average: + attention = { + key: [item / self.cur_step for item in self.attention_store[key]] for key in self.attention_store + } + else: + assert step is not None + attention = self.attention_store[step] + return attention + + def aggregate_attention( + self, attention_maps, prompts, res: Union[int, Tuple[int]], from_where: List[str], is_cross: bool, select: int + ): + out = [[] for x in range(self.batch_size)] + if isinstance(res, int): + num_pixels = res**2 + resolution = (res, res) + else: + num_pixels = res[0] * res[1] + resolution = res[:2] + + for location in from_where: + for bs_item in attention_maps[f"{location}_{'cross' if is_cross else 'self'}"]: + for batch, item in enumerate(bs_item): + if item.shape[1] == num_pixels: + cross_maps = item.reshape(len(prompts), -1, *resolution, item.shape[-1])[select] + out[batch].append(cross_maps) + + out = torch.stack([torch.cat(x, dim=0) for x in out]) + # average over heads + out = out.sum(1) / out.shape[1] + return out + + def __init__(self, average: bool, batch_size=1, max_resolution=16, max_size: int = None): + self.step_store = self.get_empty_store() + self.attention_store = [] + self.cur_step = 0 + self.average = average + self.batch_size = batch_size + if max_size is None: + self.max_size = max_resolution**2 + elif max_size is not None and max_resolution is None: + self.max_size = max_size + else: + raise ValueError("Only allowed to set one of max_resolution or max_size") + + +# Copied from diffusers.pipelines.ledits_pp.pipeline_leditspp_stable_diffusion.LeditsGaussianSmoothing +class LeditsGaussianSmoothing: + def __init__(self, device): + kernel_size = [3, 3] + sigma = [0.5, 0.5] + + # The gaussian kernel is the product of the gaussian function of each dimension. + kernel = 1 + meshgrids = torch.meshgrid([torch.arange(size, dtype=torch.float32) for size in kernel_size]) + for size, std, mgrid in zip(kernel_size, sigma, meshgrids): + mean = (size - 1) / 2 + kernel *= 1 / (std * math.sqrt(2 * math.pi)) * torch.exp(-(((mgrid - mean) / (2 * std)) ** 2)) + + # Make sure sum of values in gaussian kernel equals 1. + kernel = kernel / torch.sum(kernel) + + # Reshape to depthwise convolutional weight + kernel = kernel.view(1, 1, *kernel.size()) + kernel = kernel.repeat(1, *[1] * (kernel.dim() - 1)) + + self.weight = kernel.to(device) + + def __call__(self, input): + """ + Arguments: + Apply gaussian filter to input. + input (torch.Tensor): Input to apply gaussian filter on. + Returns: + filtered (torch.Tensor): Filtered output. + """ + return F.conv2d(input, weight=self.weight.to(input.dtype)) + + +# Copied from diffusers.pipelines.ledits_pp.pipeline_leditspp_stable_diffusion.LEDITSCrossAttnProcessor +class LEDITSCrossAttnProcessor: + def __init__(self, attention_store, place_in_unet, pnp, editing_prompts): + self.attnstore = attention_store + self.place_in_unet = place_in_unet + self.editing_prompts = editing_prompts + self.pnp = pnp + + def __call__( + self, + attn: Attention, + hidden_states, + encoder_hidden_states, + attention_mask=None, + temb=None, + ): + batch_size, sequence_length, _ = ( + hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape + ) + attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) + + query = attn.to_q(hidden_states) + + if encoder_hidden_states is None: + encoder_hidden_states = hidden_states + elif attn.norm_cross: + encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) + + key = attn.to_k(encoder_hidden_states) + value = attn.to_v(encoder_hidden_states) + + query = attn.head_to_batch_dim(query) + key = attn.head_to_batch_dim(key) + value = attn.head_to_batch_dim(value) + + attention_probs = attn.get_attention_scores(query, key, attention_mask) + self.attnstore( + attention_probs, + is_cross=True, + place_in_unet=self.place_in_unet, + editing_prompts=self.editing_prompts, + PnP=self.pnp, + ) + + hidden_states = torch.bmm(attention_probs, value) + hidden_states = attn.batch_to_head_dim(hidden_states) + + # linear proj + hidden_states = attn.to_out[0](hidden_states) + # dropout + hidden_states = attn.to_out[1](hidden_states) + + hidden_states = hidden_states / attn.rescale_output_factor + return hidden_states + + +class LEditsPPPipelineStableDiffusionXL( + DiffusionPipeline, + FromSingleFileMixin, + StableDiffusionXLLoraLoaderMixin, + TextualInversionLoaderMixin, + IPAdapterMixin, +): + """ + Pipeline for textual image editing using LEDits++ with Stable Diffusion XL. + + This model inherits from [`DiffusionPipeline`] and builds on the [`StableDiffusionXLPipeline`]. Check the + superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a + particular device, etc.). + + In addition the pipeline inherits the following loading methods: + - *LoRA*: [`LEditsPPPipelineStableDiffusionXL.load_lora_weights`] + - *Ckpt*: [`loaders.FromSingleFileMixin.from_single_file`] + + as well as the following saving methods: + - *LoRA*: [`loaders.StableDiffusionXLPipeline.save_lora_weights`] + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.CLIPTextModel`]): + Frozen text-encoder. Stable Diffusion XL uses the text portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically + the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. + text_encoder_2 ([`~transformers.CLIPTextModelWithProjection`]): + Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection), + specifically the + [laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k) + variant. + tokenizer ([`~transformers.CLIPTokenizer`]): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + tokenizer_2 ([`~transformers.CLIPTokenizer`]): + Second Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. + scheduler ([`DPMSolverMultistepScheduler`] or [`DDIMScheduler`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latens. Can be one of + [`DPMSolverMultistepScheduler`] or [`DDIMScheduler`]. If any other scheduler is passed it will + automatically be set to [`DPMSolverMultistepScheduler`]. + force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`): + Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of + `stabilityai/stable-diffusion-xl-base-1-0`. + add_watermarker (`bool`, *optional*): + Whether to use the [invisible_watermark library](https://github.com/ShieldMnt/invisible-watermark/) to + watermark output images. If not defined, it will default to True if the package is installed, otherwise no + watermarker will be used. + """ + + model_cpu_offload_seq = "text_encoder->text_encoder_2->unet->vae" + _optional_components = [ + "tokenizer", + "tokenizer_2", + "text_encoder", + "text_encoder_2", + "image_encoder", + "feature_extractor", + ] + _callback_tensor_inputs = [ + "latents", + "prompt_embeds", + "negative_prompt_embeds", + "add_text_embeds", + "add_time_ids", + "negative_pooled_prompt_embeds", + "negative_add_time_ids", + ] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + text_encoder_2: CLIPTextModelWithProjection, + tokenizer: CLIPTokenizer, + tokenizer_2: CLIPTokenizer, + unet: UNet2DConditionModel, + scheduler: Union[DPMSolverMultistepScheduler, DDIMScheduler], + image_encoder: CLIPVisionModelWithProjection = None, + feature_extractor: CLIPImageProcessor = None, + force_zeros_for_empty_prompt: bool = True, + add_watermarker: Optional[bool] = None, + ): + super().__init__() + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + text_encoder_2=text_encoder_2, + tokenizer=tokenizer, + tokenizer_2=tokenizer_2, + unet=unet, + scheduler=scheduler, + image_encoder=image_encoder, + feature_extractor=feature_extractor, + ) + self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + + if not isinstance(scheduler, DDIMScheduler) and not isinstance(scheduler, DPMSolverMultistepScheduler): + self.scheduler = DPMSolverMultistepScheduler.from_config( + scheduler.config, algorithm_type="sde-dpmsolver++", solver_order=2 + ) + logger.warning( + "This pipeline only supports DDIMScheduler and DPMSolverMultistepScheduler. " + "The scheduler has been changed to DPMSolverMultistepScheduler." + ) + + self.default_sample_size = self.unet.config.sample_size + + add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available() + + if add_watermarker: + self.watermark = StableDiffusionXLWatermarker() + else: + self.watermark = None + self.inversion_steps = None + + def encode_prompt( + self, + device: Optional[torch.device] = None, + num_images_per_prompt: int = 1, + negative_prompt: Optional[str] = None, + negative_prompt_2: Optional[str] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + enable_edit_guidance: bool = True, + editing_prompt: Optional[str] = None, + editing_prompt_embeds: Optional[torch.Tensor] = None, + editing_pooled_prompt_embeds: Optional[torch.Tensor] = None, + ) -> object: + r""" + Encodes the prompt into text encoder hidden states. + + Args: + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + lora_scale (`float`, *optional*): + A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + enable_edit_guidance (`bool`): + Whether to guide towards an editing prompt or not. + editing_prompt (`str` or `List[str]`, *optional*): + Editing prompt(s) to be encoded. If not defined and 'enable_edit_guidance' is True, one has to pass + `editing_prompt_embeds` instead. + editing_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated edit text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided and 'enable_edit_guidance' is True, editing_prompt_embeds will be generated from + `editing_prompt` input argument. + editing_pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated edit pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled editing_pooled_prompt_embeds will be generated from `editing_prompt` + input argument. + """ + device = device or self._execution_device + + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if self.text_encoder is not None: + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale) + else: + scale_lora_layers(self.text_encoder_2, lora_scale) + + batch_size = self.batch_size + + # Define tokenizers and text encoders + tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] + text_encoders = ( + [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] + ) + num_edit_tokens = 0 + + # get unconditional embeddings for classifier free guidance + zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt + + if negative_prompt_embeds is None: + negative_prompt = negative_prompt or "" + negative_prompt_2 = negative_prompt_2 or negative_prompt + + # normalize str to list + negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt + negative_prompt_2 = ( + batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 + ) + + uncond_tokens: List[str] + + if batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but image inversion " + f" has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of the input images." + ) + else: + uncond_tokens = [negative_prompt, negative_prompt_2] + + negative_prompt_embeds_list = [] + for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders): + if isinstance(self, TextualInversionLoaderMixin): + negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer) + + uncond_input = tokenizer( + negative_prompt, + padding="max_length", + max_length=tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + + negative_prompt_embeds = text_encoder( + uncond_input.input_ids.to(device), + output_hidden_states=True, + ) + # We are only ALWAYS interested in the pooled output of the final text encoder + negative_pooled_prompt_embeds = negative_prompt_embeds[0] + negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] + + negative_prompt_embeds_list.append(negative_prompt_embeds) + + negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) + + if zero_out_negative_prompt: + negative_prompt_embeds = torch.zeros_like(negative_prompt_embeds) + negative_pooled_prompt_embeds = torch.zeros_like(negative_pooled_prompt_embeds) + + if enable_edit_guidance and editing_prompt_embeds is None: + editing_prompt_2 = editing_prompt + + editing_prompts = [editing_prompt, editing_prompt_2] + edit_prompt_embeds_list = [] + + for editing_prompt, tokenizer, text_encoder in zip(editing_prompts, tokenizers, text_encoders): + if isinstance(self, TextualInversionLoaderMixin): + editing_prompt = self.maybe_convert_prompt(editing_prompt, tokenizer) + + max_length = negative_prompt_embeds.shape[1] + edit_concepts_input = tokenizer( + # [x for item in editing_prompt for x in repeat(item, batch_size)], + editing_prompt, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + return_length=True, + ) + num_edit_tokens = edit_concepts_input.length - 2 + + edit_concepts_embeds = text_encoder( + edit_concepts_input.input_ids.to(device), + output_hidden_states=True, + ) + # We are only ALWAYS interested in the pooled output of the final text encoder + editing_pooled_prompt_embeds = edit_concepts_embeds[0] + if clip_skip is None: + edit_concepts_embeds = edit_concepts_embeds.hidden_states[-2] + else: + # "2" because SDXL always indexes from the penultimate layer. + edit_concepts_embeds = edit_concepts_embeds.hidden_states[-(clip_skip + 2)] + + edit_prompt_embeds_list.append(edit_concepts_embeds) + + edit_concepts_embeds = torch.concat(edit_prompt_embeds_list, dim=-1) + elif not enable_edit_guidance: + edit_concepts_embeds = None + editing_pooled_prompt_embeds = None + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) + bs_embed, seq_len, _ = negative_prompt_embeds.shape + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if enable_edit_guidance: + bs_embed_edit, seq_len, _ = edit_concepts_embeds.shape + edit_concepts_embeds = edit_concepts_embeds.to(dtype=self.text_encoder_2.dtype, device=device) + edit_concepts_embeds = edit_concepts_embeds.repeat(1, num_images_per_prompt, 1) + edit_concepts_embeds = edit_concepts_embeds.view(bs_embed_edit * num_images_per_prompt, seq_len, -1) + + negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed * num_images_per_prompt, -1 + ) + + if enable_edit_guidance: + editing_pooled_prompt_embeds = editing_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed_edit * num_images_per_prompt, -1 + ) + + if self.text_encoder is not None: + if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder_2, lora_scale) + + return ( + negative_prompt_embeds, + edit_concepts_embeds, + negative_pooled_prompt_embeds, + editing_pooled_prompt_embeds, + num_edit_tokens, + ) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, eta, generator=None): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + negative_prompt=None, + negative_prompt_2=None, + negative_prompt_embeds=None, + negative_pooled_prompt_embeds=None, + ): + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + elif negative_prompt_2 is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: + raise ValueError( + "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`." + ) + + # Modified from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents(self, device, latents): + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + def _get_add_time_ids( + self, original_size, crops_coords_top_left, target_size, dtype, text_encoder_projection_dim=None + ): + add_time_ids = list(original_size + crops_coords_top_left + target_size) + + passed_add_embed_dim = ( + self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim + ) + expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features + + if expected_add_embed_dim != passed_add_embed_dim: + raise ValueError( + f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`." + ) + + add_time_ids = torch.tensor([add_time_ids], dtype=dtype) + return add_time_ids + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae + def upcast_vae(self): + dtype = self.vae.dtype + self.vae.to(dtype=torch.float32) + use_torch_2_0_or_xformers = isinstance( + self.vae.decoder.mid_block.attentions[0].processor, + ( + AttnProcessor2_0, + XFormersAttnProcessor, + ), + ) + # if xformers or torch_2_0 is used attention block does not need + # to be in float32 which can save lots of memory + if use_torch_2_0_or_xformers: + self.vae.post_quant_conv.to(dtype) + self.vae.decoder.conv_in.to(dtype) + self.vae.decoder.mid_block.to(dtype) + + # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding + def get_guidance_scale_embedding( + self, w: torch.Tensor, embedding_dim: int = 512, dtype: torch.dtype = torch.float32 + ) -> torch.Tensor: + """ + See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 + + Args: + w (`torch.Tensor`): + Generate embedding vectors with a specified guidance scale to subsequently enrich timestep embeddings. + embedding_dim (`int`, *optional*, defaults to 512): + Dimension of the embeddings to generate. + dtype (`torch.dtype`, *optional*, defaults to `torch.float32`): + Data type of the generated embeddings. + + Returns: + `torch.Tensor`: Embedding vectors with shape `(len(w), embedding_dim)`. + """ + assert len(w.shape) == 1 + w = w * 1000.0 + + half_dim = embedding_dim // 2 + emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) + emb = w.to(dtype)[:, None] * emb[None, :] + emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) + if embedding_dim % 2 == 1: # zero pad + emb = torch.nn.functional.pad(emb, (0, 1)) + assert emb.shape == (w.shape[0], embedding_dim) + return emb + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def guidance_rescale(self): + return self._guidance_rescale + + @property + def clip_skip(self): + return self._clip_skip + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None + + @property + def cross_attention_kwargs(self): + return self._cross_attention_kwargs + + @property + def denoising_end(self): + return self._denoising_end + + @property + def num_timesteps(self): + return self._num_timesteps + + # Copied from diffusers.pipelines.ledits_pp.pipeline_leditspp_stable_diffusion.LEditsPPPipelineStableDiffusion.prepare_unet + def prepare_unet(self, attention_store, PnP: bool = False): + attn_procs = {} + for name in self.unet.attn_processors.keys(): + if name.startswith("mid_block"): + place_in_unet = "mid" + elif name.startswith("up_blocks"): + place_in_unet = "up" + elif name.startswith("down_blocks"): + place_in_unet = "down" + else: + continue + + if "attn2" in name and place_in_unet != "mid": + attn_procs[name] = LEDITSCrossAttnProcessor( + attention_store=attention_store, + place_in_unet=place_in_unet, + pnp=PnP, + editing_prompts=self.enabled_editing_prompts, + ) + else: + attn_procs[name] = AttnProcessor() + + self.unet.set_attn_processor(attn_procs) + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + denoising_end: Optional[float] = None, + negative_prompt: Optional[Union[str, List[str]]] = None, + negative_prompt_2: Optional[Union[str, List[str]]] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + guidance_rescale: float = 0.0, + crops_coords_top_left: Tuple[int, int] = (0, 0), + target_size: Optional[Tuple[int, int]] = None, + editing_prompt: Optional[Union[str, List[str]]] = None, + editing_prompt_embeddings: Optional[torch.Tensor] = None, + editing_pooled_prompt_embeds: Optional[torch.Tensor] = None, + reverse_editing_direction: Optional[Union[bool, List[bool]]] = False, + edit_guidance_scale: Optional[Union[float, List[float]]] = 5, + edit_warmup_steps: Optional[Union[int, List[int]]] = 0, + edit_cooldown_steps: Optional[Union[int, List[int]]] = None, + edit_threshold: Optional[Union[float, List[float]]] = 0.9, + sem_guidance: Optional[List[torch.Tensor]] = None, + use_cross_attn_mask: bool = False, + use_intersect_mask: bool = False, + user_mask: Optional[torch.Tensor] = None, + attn_store_steps: Optional[List[int]] = [], + store_averaged_over_steps: bool = True, + clip_skip: Optional[int] = None, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + **kwargs, + ): + r""" + The call function to the pipeline for editing. The + [`~pipelines.ledits_pp.LEditsPPPipelineStableDiffusionXL.invert`] method has to be called beforehand. Edits + will always be performed for the last inverted image(s). + + Args: + denoising_end (`float`, *optional*): + When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be + completed before it is intentionally prematurely terminated. As a result, the returned sample will + still retain a substantial amount of noise as determined by the discrete timesteps selected by the + scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a + "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + ip_adapter_image: (`PipelineImageInput`, *optional*): + Optional image input to work with IP Adapters. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] instead + of a plain tuple. + callback (`Callable`, *optional*): + A function that will be called every `callback_steps` steps during inference. The function will be + called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function will be called. If not specified, the callback will be + called at every step. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + guidance_rescale (`float`, *optional*, defaults to 0.7): + Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are + Flawed](https://arxiv.org/pdf/2305.08891.pdf) `guidance_scale` is defined as `ฯ†` in equation 16. of + [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). + Guidance rescale factor should fix overexposure when using zero terminal SNR. + crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position + `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting + `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + For most cases, `target_size` should be set to the desired height and width of the generated image. If + not specified it will default to `(width, height)`. Part of SDXL's micro-conditioning as explained in + section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + editing_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. The image is reconstructed by setting + `editing_prompt = None`. Guidance direction of prompt should be specified via + `reverse_editing_direction`. + editing_prompt_embeddings (`torch.Tensor`, *optional*): + Pre-generated edit text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, editing_prompt_embeddings will be generated from `editing_prompt` input argument. + editing_pooled_prompt_embeddings (`torch.Tensor`, *optional*): + Pre-generated pooled edit text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, editing_prompt_embeddings will be generated from `editing_prompt` input + argument. + reverse_editing_direction (`bool` or `List[bool]`, *optional*, defaults to `False`): + Whether the corresponding prompt in `editing_prompt` should be increased or decreased. + edit_guidance_scale (`float` or `List[float]`, *optional*, defaults to 5): + Guidance scale for guiding the image generation. If provided as list values should correspond to + `editing_prompt`. `edit_guidance_scale` is defined as `s_e` of equation 12 of [LEDITS++ + Paper](https://arxiv.org/abs/2301.12247). + edit_warmup_steps (`float` or `List[float]`, *optional*, defaults to 10): + Number of diffusion steps (for each prompt) for which guidance is not applied. + edit_cooldown_steps (`float` or `List[float]`, *optional*, defaults to `None`): + Number of diffusion steps (for each prompt) after which guidance is no longer applied. + edit_threshold (`float` or `List[float]`, *optional*, defaults to 0.9): + Masking threshold of guidance. Threshold should be proportional to the image region that is modified. + 'edit_threshold' is defined as 'ฮป' of equation 12 of [LEDITS++ + Paper](https://arxiv.org/abs/2301.12247). + sem_guidance (`List[torch.Tensor]`, *optional*): + List of pre-generated guidance vectors to be applied at generation. Length of the list has to + correspond to `num_inference_steps`. + use_cross_attn_mask: + Whether cross-attention masks are used. Cross-attention masks are always used when use_intersect_mask + is set to true. Cross-attention masks are defined as 'M^1' of equation 12 of [LEDITS++ + paper](https://arxiv.org/pdf/2311.16711.pdf). + use_intersect_mask: + Whether the masking term is calculated as intersection of cross-attention masks and masks derived from + the noise estimate. Cross-attention mask are defined as 'M^1' and masks derived from the noise estimate + are defined as 'M^2' of equation 12 of [LEDITS++ paper](https://arxiv.org/pdf/2311.16711.pdf). + user_mask: + User-provided mask for even better control over the editing process. This is helpful when LEDITS++'s + implicit masks do not meet user preferences. + attn_store_steps: + Steps for which the attention maps are stored in the AttentionStore. Just for visualization purposes. + store_averaged_over_steps: + Whether the attention maps for the 'attn_store_steps' are stored averaged over the diffusion steps. If + False, attention maps for each step are stores separately. Just for visualization purposes. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + + Examples: + + Returns: + [`~pipelines.ledits_pp.LEditsPPDiffusionPipelineOutput`] or `tuple`: + [`~pipelines.ledits_pp.LEditsPPDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. When + returning a tuple, the first element is a list with the generated images. + """ + if self.inversion_steps is None: + raise ValueError( + "You need to invert an input image first before calling the pipeline. The `invert` method has to be called beforehand. Edits will always be performed for the last inverted image(s)." + ) + + eta = self.eta + num_images_per_prompt = 1 + latents = self.init_latents + + zs = self.zs + self.scheduler.set_timesteps(len(self.scheduler.timesteps)) + + if use_intersect_mask: + use_cross_attn_mask = True + + if use_cross_attn_mask: + self.smoothing = LeditsGaussianSmoothing(self.device) + + if user_mask is not None: + user_mask = user_mask.to(self.device) + + # TODO: Check inputs + # 1. Check inputs. Raise error if not correct + # self.check_inputs( + # callback_steps, + # negative_prompt, + # negative_prompt_2, + # prompt_embeds, + # negative_prompt_embeds, + # pooled_prompt_embeds, + # negative_pooled_prompt_embeds, + # ) + self._guidance_rescale = guidance_rescale + self._clip_skip = clip_skip + self._cross_attention_kwargs = cross_attention_kwargs + self._denoising_end = denoising_end + + # 2. Define call parameters + batch_size = self.batch_size + + device = self._execution_device + + if editing_prompt: + enable_edit_guidance = True + if isinstance(editing_prompt, str): + editing_prompt = [editing_prompt] + self.enabled_editing_prompts = len(editing_prompt) + elif editing_prompt_embeddings is not None: + enable_edit_guidance = True + self.enabled_editing_prompts = editing_prompt_embeddings.shape[0] + else: + self.enabled_editing_prompts = 0 + enable_edit_guidance = False + + # 3. Encode input prompt + text_encoder_lora_scale = ( + cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None + ) + ( + prompt_embeds, + edit_prompt_embeds, + negative_pooled_prompt_embeds, + pooled_edit_embeds, + num_edit_tokens, + ) = self.encode_prompt( + device=device, + num_images_per_prompt=num_images_per_prompt, + negative_prompt=negative_prompt, + negative_prompt_2=negative_prompt_2, + negative_prompt_embeds=negative_prompt_embeds, + negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, + lora_scale=text_encoder_lora_scale, + clip_skip=self.clip_skip, + enable_edit_guidance=enable_edit_guidance, + editing_prompt=editing_prompt, + editing_prompt_embeds=editing_prompt_embeddings, + editing_pooled_prompt_embeds=editing_pooled_prompt_embeds, + ) + + # 4. Prepare timesteps + # self.scheduler.set_timesteps(num_inference_steps, device=device) + + timesteps = self.inversion_steps + t_to_idx = {int(v): k for k, v in enumerate(timesteps)} + + if use_cross_attn_mask: + self.attention_store = LeditsAttentionStore( + average=store_averaged_over_steps, + batch_size=batch_size, + max_size=(latents.shape[-2] / 4.0) * (latents.shape[-1] / 4.0), + max_resolution=None, + ) + self.prepare_unet(self.attention_store) + resolution = latents.shape[-2:] + att_res = (int(resolution[0] / 4), int(resolution[1] / 4)) + + # 5. Prepare latent variables + latents = self.prepare_latents(device=device, latents=latents) + + # 6. Prepare extra step kwargs. + extra_step_kwargs = self.prepare_extra_step_kwargs(eta) + + if self.text_encoder_2 is None: + text_encoder_projection_dim = int(negative_pooled_prompt_embeds.shape[-1]) + else: + text_encoder_projection_dim = self.text_encoder_2.config.projection_dim + + # 7. Prepare added time ids & embeddings + add_text_embeds = negative_pooled_prompt_embeds + add_time_ids = self._get_add_time_ids( + self.size, + crops_coords_top_left, + self.size, + dtype=negative_pooled_prompt_embeds.dtype, + text_encoder_projection_dim=text_encoder_projection_dim, + ) + + if enable_edit_guidance: + prompt_embeds = torch.cat([prompt_embeds, edit_prompt_embeds], dim=0) + add_text_embeds = torch.cat([add_text_embeds, pooled_edit_embeds], dim=0) + edit_concepts_time_ids = add_time_ids.repeat(edit_prompt_embeds.shape[0], 1) + add_time_ids = torch.cat([add_time_ids, edit_concepts_time_ids], dim=0) + self.text_cross_attention_maps = [editing_prompt] if isinstance(editing_prompt, str) else editing_prompt + + prompt_embeds = prompt_embeds.to(device) + add_text_embeds = add_text_embeds.to(device) + add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1) + + if ip_adapter_image is not None: + # TODO: fix image encoding + image_embeds, negative_image_embeds = self.encode_image(ip_adapter_image, device, num_images_per_prompt) + if self.do_classifier_free_guidance: + image_embeds = torch.cat([negative_image_embeds, image_embeds]) + image_embeds = image_embeds.to(device) + + # 8. Denoising loop + self.sem_guidance = None + self.activation_mask = None + + if ( + self.denoising_end is not None + and isinstance(self.denoising_end, float) + and self.denoising_end > 0 + and self.denoising_end < 1 + ): + discrete_timestep_cutoff = int( + round( + self.scheduler.config.num_train_timesteps + - (self.denoising_end * self.scheduler.config.num_train_timesteps) + ) + ) + num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps))) + timesteps = timesteps[:num_inference_steps] + + # 9. Optionally get Guidance Scale Embedding + timestep_cond = None + if self.unet.config.time_cond_proj_dim is not None: + guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) + timestep_cond = self.get_guidance_scale_embedding( + guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim + ).to(device=device, dtype=latents.dtype) + + self._num_timesteps = len(timesteps) + with self.progress_bar(total=self._num_timesteps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * (1 + self.enabled_editing_prompts)) + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + # predict the noise residual + added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} + if ip_adapter_image is not None: + added_cond_kwargs["image_embeds"] = image_embeds + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs=cross_attention_kwargs, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + noise_pred_out = noise_pred.chunk(1 + self.enabled_editing_prompts) # [b,4, 64, 64] + noise_pred_uncond = noise_pred_out[0] + noise_pred_edit_concepts = noise_pred_out[1:] + + noise_guidance_edit = torch.zeros( + noise_pred_uncond.shape, + device=self.device, + dtype=noise_pred_uncond.dtype, + ) + + if sem_guidance is not None and len(sem_guidance) > i: + noise_guidance_edit += sem_guidance[i].to(self.device) + + elif enable_edit_guidance: + if self.activation_mask is None: + self.activation_mask = torch.zeros( + (len(timesteps), self.enabled_editing_prompts, *noise_pred_edit_concepts[0].shape) + ) + if self.sem_guidance is None: + self.sem_guidance = torch.zeros((len(timesteps), *noise_pred_uncond.shape)) + + # noise_guidance_edit = torch.zeros_like(noise_guidance) + for c, noise_pred_edit_concept in enumerate(noise_pred_edit_concepts): + if isinstance(edit_warmup_steps, list): + edit_warmup_steps_c = edit_warmup_steps[c] + else: + edit_warmup_steps_c = edit_warmup_steps + if i < edit_warmup_steps_c: + continue + + if isinstance(edit_guidance_scale, list): + edit_guidance_scale_c = edit_guidance_scale[c] + else: + edit_guidance_scale_c = edit_guidance_scale + + if isinstance(edit_threshold, list): + edit_threshold_c = edit_threshold[c] + else: + edit_threshold_c = edit_threshold + if isinstance(reverse_editing_direction, list): + reverse_editing_direction_c = reverse_editing_direction[c] + else: + reverse_editing_direction_c = reverse_editing_direction + + if isinstance(edit_cooldown_steps, list): + edit_cooldown_steps_c = edit_cooldown_steps[c] + elif edit_cooldown_steps is None: + edit_cooldown_steps_c = i + 1 + else: + edit_cooldown_steps_c = edit_cooldown_steps + + if i >= edit_cooldown_steps_c: + continue + + noise_guidance_edit_tmp = noise_pred_edit_concept - noise_pred_uncond + + if reverse_editing_direction_c: + noise_guidance_edit_tmp = noise_guidance_edit_tmp * -1 + + noise_guidance_edit_tmp = noise_guidance_edit_tmp * edit_guidance_scale_c + + if user_mask is not None: + noise_guidance_edit_tmp = noise_guidance_edit_tmp * user_mask + + if use_cross_attn_mask: + out = self.attention_store.aggregate_attention( + attention_maps=self.attention_store.step_store, + prompts=self.text_cross_attention_maps, + res=att_res, + from_where=["up", "down"], + is_cross=True, + select=self.text_cross_attention_maps.index(editing_prompt[c]), + ) + attn_map = out[:, :, :, 1 : 1 + num_edit_tokens[c]] # 0 -> startoftext + + # average over all tokens + if attn_map.shape[3] != num_edit_tokens[c]: + raise ValueError( + f"Incorrect shape of attention_map. Expected size {num_edit_tokens[c]}, but found {attn_map.shape[3]}!" + ) + attn_map = torch.sum(attn_map, dim=3) + + # gaussian_smoothing + attn_map = F.pad(attn_map.unsqueeze(1), (1, 1, 1, 1), mode="reflect") + attn_map = self.smoothing(attn_map).squeeze(1) + + # torch.quantile function expects float32 + if attn_map.dtype == torch.float32: + tmp = torch.quantile(attn_map.flatten(start_dim=1), edit_threshold_c, dim=1) + else: + tmp = torch.quantile( + attn_map.flatten(start_dim=1).to(torch.float32), edit_threshold_c, dim=1 + ).to(attn_map.dtype) + attn_mask = torch.where( + attn_map >= tmp.unsqueeze(1).unsqueeze(1).repeat(1, *att_res), 1.0, 0.0 + ) + + # resolution must match latent space dimension + attn_mask = F.interpolate( + attn_mask.unsqueeze(1), + noise_guidance_edit_tmp.shape[-2:], # 64,64 + ).repeat(1, 4, 1, 1) + self.activation_mask[i, c] = attn_mask.detach().cpu() + if not use_intersect_mask: + noise_guidance_edit_tmp = noise_guidance_edit_tmp * attn_mask + + if use_intersect_mask: + noise_guidance_edit_tmp_quantile = torch.abs(noise_guidance_edit_tmp) + noise_guidance_edit_tmp_quantile = torch.sum( + noise_guidance_edit_tmp_quantile, dim=1, keepdim=True + ) + noise_guidance_edit_tmp_quantile = noise_guidance_edit_tmp_quantile.repeat( + 1, self.unet.config.in_channels, 1, 1 + ) + + # torch.quantile function expects float32 + if noise_guidance_edit_tmp_quantile.dtype == torch.float32: + tmp = torch.quantile( + noise_guidance_edit_tmp_quantile.flatten(start_dim=2), + edit_threshold_c, + dim=2, + keepdim=False, + ) + else: + tmp = torch.quantile( + noise_guidance_edit_tmp_quantile.flatten(start_dim=2).to(torch.float32), + edit_threshold_c, + dim=2, + keepdim=False, + ).to(noise_guidance_edit_tmp_quantile.dtype) + + intersect_mask = ( + torch.where( + noise_guidance_edit_tmp_quantile >= tmp[:, :, None, None], + torch.ones_like(noise_guidance_edit_tmp), + torch.zeros_like(noise_guidance_edit_tmp), + ) + * attn_mask + ) + + self.activation_mask[i, c] = intersect_mask.detach().cpu() + + noise_guidance_edit_tmp = noise_guidance_edit_tmp * intersect_mask + + elif not use_cross_attn_mask: + # calculate quantile + noise_guidance_edit_tmp_quantile = torch.abs(noise_guidance_edit_tmp) + noise_guidance_edit_tmp_quantile = torch.sum( + noise_guidance_edit_tmp_quantile, dim=1, keepdim=True + ) + noise_guidance_edit_tmp_quantile = noise_guidance_edit_tmp_quantile.repeat(1, 4, 1, 1) + + # torch.quantile function expects float32 + if noise_guidance_edit_tmp_quantile.dtype == torch.float32: + tmp = torch.quantile( + noise_guidance_edit_tmp_quantile.flatten(start_dim=2), + edit_threshold_c, + dim=2, + keepdim=False, + ) + else: + tmp = torch.quantile( + noise_guidance_edit_tmp_quantile.flatten(start_dim=2).to(torch.float32), + edit_threshold_c, + dim=2, + keepdim=False, + ).to(noise_guidance_edit_tmp_quantile.dtype) + + self.activation_mask[i, c] = ( + torch.where( + noise_guidance_edit_tmp_quantile >= tmp[:, :, None, None], + torch.ones_like(noise_guidance_edit_tmp), + torch.zeros_like(noise_guidance_edit_tmp), + ) + .detach() + .cpu() + ) + + noise_guidance_edit_tmp = torch.where( + noise_guidance_edit_tmp_quantile >= tmp[:, :, None, None], + noise_guidance_edit_tmp, + torch.zeros_like(noise_guidance_edit_tmp), + ) + + noise_guidance_edit += noise_guidance_edit_tmp + + self.sem_guidance[i] = noise_guidance_edit.detach().cpu() + + noise_pred = noise_pred_uncond + noise_guidance_edit + + # compute the previous noisy sample x_t -> x_t-1 + if enable_edit_guidance and self.guidance_rescale > 0.0: + # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf + noise_pred = rescale_noise_cfg( + noise_pred, + noise_pred_edit_concepts.mean(dim=0, keepdim=False), + guidance_rescale=self.guidance_rescale, + ) + + idx = t_to_idx[int(t)] + latents = self.scheduler.step( + noise_pred, t, latents, variance_noise=zs[idx], **extra_step_kwargs, return_dict=False + )[0] + + # step callback + if use_cross_attn_mask: + store_step = i in attn_store_steps + self.attention_store.between_steps(store_step) + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + add_text_embeds = callback_outputs.pop("add_text_embeds", add_text_embeds) + negative_pooled_prompt_embeds = callback_outputs.pop( + "negative_pooled_prompt_embeds", negative_pooled_prompt_embeds + ) + add_time_ids = callback_outputs.pop("add_time_ids", add_time_ids) + # negative_add_time_ids = callback_outputs.pop("negative_add_time_ids", negative_add_time_ids) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > 0 and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + + if XLA_AVAILABLE: + xm.mark_step() + + if not output_type == "latent": + # make sure the VAE is in float32 mode, as it overflows in float16 + needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast + + if needs_upcasting: + self.upcast_vae() + latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) + + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + + # cast back to fp16 if needed + if needs_upcasting: + self.vae.to(dtype=torch.float16) + else: + image = latents + + if not output_type == "latent": + # apply watermark if available + if self.watermark is not None: + image = self.watermark.apply_watermark(image) + + image = self.image_processor.postprocess(image, output_type=output_type) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return LEditsPPDiffusionPipelineOutput(images=image, nsfw_content_detected=None) + + @torch.no_grad() + # Modified from diffusers.pipelines.ledits_pp.pipeline_leditspp_stable_diffusion.LEditsPPPipelineStableDiffusion.encode_image + def encode_image(self, image, dtype=None, height=None, width=None, resize_mode="default", crops_coords=None): + image = self.image_processor.preprocess( + image=image, height=height, width=width, resize_mode=resize_mode, crops_coords=crops_coords + ) + resized = self.image_processor.postprocess(image=image, output_type="pil") + + if max(image.shape[-2:]) > self.vae.config["sample_size"] * 1.5: + logger.warning( + "Your input images far exceed the default resolution of the underlying diffusion model. " + "The output images may contain severe artifacts! " + "Consider down-sampling the input using the `height` and `width` parameters" + ) + image = image.to(self.device, dtype=dtype) + needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast + + if needs_upcasting: + image = image.float() + self.upcast_vae() + + x0 = self.vae.encode(image).latent_dist.mode() + x0 = x0.to(dtype) + # cast back to fp16 if needed + if needs_upcasting: + self.vae.to(dtype=torch.float16) + + x0 = self.vae.config.scaling_factor * x0 + return x0, resized + + @torch.no_grad() + def invert( + self, + image: PipelineImageInput, + source_prompt: str = "", + source_guidance_scale=3.5, + negative_prompt: str = None, + negative_prompt_2: str = None, + num_inversion_steps: int = 50, + skip: float = 0.15, + generator: Optional[torch.Generator] = None, + crops_coords_top_left: Tuple[int, int] = (0, 0), + num_zero_noise_steps: int = 3, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + ): + r""" + The function to the pipeline for image inversion as described by the [LEDITS++ + Paper](https://arxiv.org/abs/2301.12247). If the scheduler is set to [`~schedulers.DDIMScheduler`] the + inversion proposed by [edit-friendly DPDM](https://arxiv.org/abs/2304.06140) will be performed instead. + + Args: + image (`PipelineImageInput`): + Input for the image(s) that are to be edited. Multiple input images have to default to the same aspect + ratio. + source_prompt (`str`, defaults to `""`): + Prompt describing the input image that will be used for guidance during inversion. Guidance is disabled + if the `source_prompt` is `""`. + source_guidance_scale (`float`, defaults to `3.5`): + Strength of guidance during inversion. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders + num_inversion_steps (`int`, defaults to `50`): + Number of total performed inversion steps after discarding the initial `skip` steps. + skip (`float`, defaults to `0.15`): + Portion of initial steps that will be ignored for inversion and subsequent generation. Lower values + will lead to stronger changes to the input image. `skip` has to be between `0` and `1`. + generator (`torch.Generator`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make inversion + deterministic. + crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position + `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting + `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + num_zero_noise_steps (`int`, defaults to `3`): + Number of final diffusion steps that will not renoise the current image. If no steps are set to zero + SD-XL in combination with [`DPMSolverMultistepScheduler`] will produce noise artifacts. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + + Returns: + [`~pipelines.ledits_pp.LEditsPPInversionPipelineOutput`]: Output will contain the resized input image(s) + and respective VAE reconstruction(s). + """ + + # Reset attn processor, we do not want to store attn maps during inversion + self.unet.set_attn_processor(AttnProcessor()) + + self.eta = 1.0 + + self.scheduler.config.timestep_spacing = "leading" + self.scheduler.set_timesteps(int(num_inversion_steps * (1 + skip))) + self.inversion_steps = self.scheduler.timesteps[-num_inversion_steps:] + timesteps = self.inversion_steps + + num_images_per_prompt = 1 + + device = self._execution_device + + # 0. Ensure that only uncond embedding is used if prompt = "" + if source_prompt == "": + # noise pred should only be noise_pred_uncond + source_guidance_scale = 0.0 + do_classifier_free_guidance = False + else: + do_classifier_free_guidance = source_guidance_scale > 1.0 + + # 1. prepare image + x0, resized = self.encode_image(image, dtype=self.text_encoder_2.dtype) + width = x0.shape[2] * self.vae_scale_factor + height = x0.shape[3] * self.vae_scale_factor + self.size = (height, width) + + self.batch_size = x0.shape[0] + + # 2. get embeddings + text_encoder_lora_scale = ( + cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None + ) + + if isinstance(source_prompt, str): + source_prompt = [source_prompt] * self.batch_size + + ( + negative_prompt_embeds, + prompt_embeds, + negative_pooled_prompt_embeds, + edit_pooled_prompt_embeds, + _, + ) = self.encode_prompt( + device=device, + num_images_per_prompt=num_images_per_prompt, + negative_prompt=negative_prompt, + negative_prompt_2=negative_prompt_2, + editing_prompt=source_prompt, + lora_scale=text_encoder_lora_scale, + enable_edit_guidance=do_classifier_free_guidance, + ) + if self.text_encoder_2 is None: + text_encoder_projection_dim = int(negative_pooled_prompt_embeds.shape[-1]) + else: + text_encoder_projection_dim = self.text_encoder_2.config.projection_dim + + # 3. Prepare added time ids & embeddings + add_text_embeds = negative_pooled_prompt_embeds + add_time_ids = self._get_add_time_ids( + self.size, + crops_coords_top_left, + self.size, + dtype=negative_prompt_embeds.dtype, + text_encoder_projection_dim=text_encoder_projection_dim, + ) + + if do_classifier_free_guidance: + negative_prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) + add_text_embeds = torch.cat([add_text_embeds, edit_pooled_prompt_embeds], dim=0) + add_time_ids = torch.cat([add_time_ids, add_time_ids], dim=0) + + negative_prompt_embeds = negative_prompt_embeds.to(device) + + add_text_embeds = add_text_embeds.to(device) + add_time_ids = add_time_ids.to(device).repeat(self.batch_size * num_images_per_prompt, 1) + + # autoencoder reconstruction + if self.vae.dtype == torch.float16 and self.vae.config.force_upcast: + self.upcast_vae() + x0_tmp = x0.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) + image_rec = self.vae.decode( + x0_tmp / self.vae.config.scaling_factor, return_dict=False, generator=generator + )[0] + elif self.vae.config.force_upcast: + x0_tmp = x0.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) + image_rec = self.vae.decode( + x0_tmp / self.vae.config.scaling_factor, return_dict=False, generator=generator + )[0] + else: + image_rec = self.vae.decode(x0 / self.vae.config.scaling_factor, return_dict=False, generator=generator)[0] + + image_rec = self.image_processor.postprocess(image_rec, output_type="pil") + + # 5. find zs and xts + variance_noise_shape = (num_inversion_steps, *x0.shape) + + # intermediate latents + t_to_idx = {int(v): k for k, v in enumerate(timesteps)} + xts = torch.zeros(size=variance_noise_shape, device=self.device, dtype=negative_prompt_embeds.dtype) + + for t in reversed(timesteps): + idx = num_inversion_steps - t_to_idx[int(t)] - 1 + noise = randn_tensor(shape=x0.shape, generator=generator, device=self.device, dtype=x0.dtype) + xts[idx] = self.scheduler.add_noise(x0, noise, t.unsqueeze(0)) + xts = torch.cat([x0.unsqueeze(0), xts], dim=0) + + # noise maps + zs = torch.zeros(size=variance_noise_shape, device=self.device, dtype=negative_prompt_embeds.dtype) + + self.scheduler.set_timesteps(len(self.scheduler.timesteps)) + + for t in self.progress_bar(timesteps): + idx = num_inversion_steps - t_to_idx[int(t)] - 1 + # 1. predict noise residual + xt = xts[idx + 1] + + latent_model_input = torch.cat([xt] * 2) if do_classifier_free_guidance else xt + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} + + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=negative_prompt_embeds, + cross_attention_kwargs=cross_attention_kwargs, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + # 2. perform guidance + if do_classifier_free_guidance: + noise_pred_out = noise_pred.chunk(2) + noise_pred_uncond, noise_pred_text = noise_pred_out[0], noise_pred_out[1] + noise_pred = noise_pred_uncond + source_guidance_scale * (noise_pred_text - noise_pred_uncond) + + xtm1 = xts[idx] + z, xtm1_corrected = compute_noise(self.scheduler, xtm1, xt, t, noise_pred, self.eta) + zs[idx] = z + + # correction to avoid error accumulation + xts[idx] = xtm1_corrected + + self.init_latents = xts[-1] + zs = zs.flip(0) + + if num_zero_noise_steps > 0: + zs[-num_zero_noise_steps:] = torch.zeros_like(zs[-num_zero_noise_steps:]) + self.zs = zs + return LEditsPPInversionPipelineOutput(images=resized, vae_reconstruction_images=image_rec) + + +# Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.rescale_noise_cfg +def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): + """ + Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4 + """ + std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) + std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) + # rescale the results from guidance (fixes overexposure) + noise_pred_rescaled = noise_cfg * (std_text / std_cfg) + # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images + noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg + return noise_cfg + + +# Copied from diffusers.pipelines.ledits_pp.pipeline_leditspp_stable_diffusion.compute_noise_ddim +def compute_noise_ddim(scheduler, prev_latents, latents, timestep, noise_pred, eta): + # 1. get previous step value (=t-1) + prev_timestep = timestep - scheduler.config.num_train_timesteps // scheduler.num_inference_steps + + # 2. compute alphas, betas + alpha_prod_t = scheduler.alphas_cumprod[timestep] + alpha_prod_t_prev = ( + scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else scheduler.final_alpha_cumprod + ) + + beta_prod_t = 1 - alpha_prod_t + + # 3. compute predicted original sample from predicted noise also called + # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf + pred_original_sample = (latents - beta_prod_t ** (0.5) * noise_pred) / alpha_prod_t ** (0.5) + + # 4. Clip "predicted x_0" + if scheduler.config.clip_sample: + pred_original_sample = torch.clamp(pred_original_sample, -1, 1) + + # 5. compute variance: "sigma_t(ฮท)" -> see formula (16) + # ฯƒ_t = sqrt((1 โˆ’ ฮฑ_tโˆ’1)/(1 โˆ’ ฮฑ_t)) * sqrt(1 โˆ’ ฮฑ_t/ฮฑ_tโˆ’1) + variance = scheduler._get_variance(timestep, prev_timestep) + std_dev_t = eta * variance ** (0.5) + + # 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf + pred_sample_direction = (1 - alpha_prod_t_prev - std_dev_t**2) ** (0.5) * noise_pred + + # modifed so that updated xtm1 is returned as well (to avoid error accumulation) + mu_xt = alpha_prod_t_prev ** (0.5) * pred_original_sample + pred_sample_direction + if variance > 0.0: + noise = (prev_latents - mu_xt) / (variance ** (0.5) * eta) + else: + noise = torch.tensor([0.0]).to(latents.device) + + return noise, mu_xt + (eta * variance**0.5) * noise + + +# Copied from diffusers.pipelines.ledits_pp.pipeline_leditspp_stable_diffusion.compute_noise_sde_dpm_pp_2nd +def compute_noise_sde_dpm_pp_2nd(scheduler, prev_latents, latents, timestep, noise_pred, eta): + def first_order_update(model_output, sample): # timestep, prev_timestep, sample): + sigma_t, sigma_s = scheduler.sigmas[scheduler.step_index + 1], scheduler.sigmas[scheduler.step_index] + alpha_t, sigma_t = scheduler._sigma_to_alpha_sigma_t(sigma_t) + alpha_s, sigma_s = scheduler._sigma_to_alpha_sigma_t(sigma_s) + lambda_t = torch.log(alpha_t) - torch.log(sigma_t) + lambda_s = torch.log(alpha_s) - torch.log(sigma_s) + + h = lambda_t - lambda_s + + mu_xt = (sigma_t / sigma_s * torch.exp(-h)) * sample + (alpha_t * (1 - torch.exp(-2.0 * h))) * model_output + + mu_xt = scheduler.dpm_solver_first_order_update( + model_output=model_output, sample=sample, noise=torch.zeros_like(sample) + ) + + sigma = sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) + if sigma > 0.0: + noise = (prev_latents - mu_xt) / sigma + else: + noise = torch.tensor([0.0]).to(sample.device) + + prev_sample = mu_xt + sigma * noise + return noise, prev_sample + + def second_order_update(model_output_list, sample): # timestep_list, prev_timestep, sample): + sigma_t, sigma_s0, sigma_s1 = ( + scheduler.sigmas[scheduler.step_index + 1], + scheduler.sigmas[scheduler.step_index], + scheduler.sigmas[scheduler.step_index - 1], + ) + + alpha_t, sigma_t = scheduler._sigma_to_alpha_sigma_t(sigma_t) + alpha_s0, sigma_s0 = scheduler._sigma_to_alpha_sigma_t(sigma_s0) + alpha_s1, sigma_s1 = scheduler._sigma_to_alpha_sigma_t(sigma_s1) + + lambda_t = torch.log(alpha_t) - torch.log(sigma_t) + lambda_s0 = torch.log(alpha_s0) - torch.log(sigma_s0) + lambda_s1 = torch.log(alpha_s1) - torch.log(sigma_s1) + + m0, m1 = model_output_list[-1], model_output_list[-2] + + h, h_0 = lambda_t - lambda_s0, lambda_s0 - lambda_s1 + r0 = h_0 / h + D0, D1 = m0, (1.0 / r0) * (m0 - m1) + + mu_xt = ( + (sigma_t / sigma_s0 * torch.exp(-h)) * sample + + (alpha_t * (1 - torch.exp(-2.0 * h))) * D0 + + 0.5 * (alpha_t * (1 - torch.exp(-2.0 * h))) * D1 + ) + + sigma = sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) + if sigma > 0.0: + noise = (prev_latents - mu_xt) / sigma + else: + noise = torch.tensor([0.0]).to(sample.device) + + prev_sample = mu_xt + sigma * noise + + return noise, prev_sample + + if scheduler.step_index is None: + scheduler._init_step_index(timestep) + + model_output = scheduler.convert_model_output(model_output=noise_pred, sample=latents) + for i in range(scheduler.config.solver_order - 1): + scheduler.model_outputs[i] = scheduler.model_outputs[i + 1] + scheduler.model_outputs[-1] = model_output + + if scheduler.lower_order_nums < 1: + noise, prev_sample = first_order_update(model_output, latents) + else: + noise, prev_sample = second_order_update(scheduler.model_outputs, latents) + + if scheduler.lower_order_nums < scheduler.config.solver_order: + scheduler.lower_order_nums += 1 + + # upon completion increase step index by one + scheduler._step_index += 1 + + return noise, prev_sample + + +# Copied from diffusers.pipelines.ledits_pp.pipeline_leditspp_stable_diffusion.compute_noise +def compute_noise(scheduler, *args): + if isinstance(scheduler, DDIMScheduler): + return compute_noise_ddim(scheduler, *args) + elif ( + isinstance(scheduler, DPMSolverMultistepScheduler) + and scheduler.config.algorithm_type == "sde-dpmsolver++" + and scheduler.config.solver_order == 2 + ): + return compute_noise_sde_dpm_pp_2nd(scheduler, *args) + else: + raise NotImplementedError diff --git a/diffusers3/pipelines/ledits_pp/pipeline_output.py b/diffusers3/pipelines/ledits_pp/pipeline_output.py new file mode 100644 index 0000000000000000000000000000000000000000..756be82b0069aa986bc0594476e9060e114a7c84 --- /dev/null +++ b/diffusers3/pipelines/ledits_pp/pipeline_output.py @@ -0,0 +1,43 @@ +from dataclasses import dataclass +from typing import List, Optional, Union + +import numpy as np +import PIL.Image + +from ...utils import BaseOutput + + +@dataclass +class LEditsPPDiffusionPipelineOutput(BaseOutput): + """ + Output class for LEdits++ Diffusion pipelines. + + Args: + images (`List[PIL.Image.Image]` or `np.ndarray`) + List of denoised PIL images of length `batch_size` or NumPy array of shape `(batch_size, height, width, + num_channels)`. + nsfw_content_detected (`List[bool]`) + List indicating whether the corresponding generated image contains โ€œnot-safe-for-workโ€ (nsfw) content or + `None` if safety checking could not be performed. + """ + + images: Union[List[PIL.Image.Image], np.ndarray] + nsfw_content_detected: Optional[List[bool]] + + +@dataclass +class LEditsPPInversionPipelineOutput(BaseOutput): + """ + Output class for LEdits++ Diffusion pipelines. + + Args: + input_images (`List[PIL.Image.Image]` or `np.ndarray`) + List of the cropped and resized input images as PIL images of length `batch_size` or NumPy array of shape ` + (batch_size, height, width, num_channels)`. + vae_reconstruction_images (`List[PIL.Image.Image]` or `np.ndarray`) + List of VAE reconstruction of all input images as PIL images of length `batch_size` or NumPy array of shape + ` (batch_size, height, width, num_channels)`. + """ + + images: Union[List[PIL.Image.Image], np.ndarray] + vae_reconstruction_images: Union[List[PIL.Image.Image], np.ndarray] diff --git a/diffusers3/pipelines/lumina/__init__.py b/diffusers3/pipelines/lumina/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ca13963597217f47b3922290abe90ed1c035c552 --- /dev/null +++ b/diffusers3/pipelines/lumina/__init__.py @@ -0,0 +1,48 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["pipeline_lumina"] = ["LuminaText2ImgPipeline"] + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * + else: + from .pipeline_lumina import LuminaText2ImgPipeline + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/diffusers3/pipelines/lumina/pipeline_lumina.py b/diffusers3/pipelines/lumina/pipeline_lumina.py new file mode 100644 index 0000000000000000000000000000000000000000..6bc7e8ddb954de79583fd9fff95a217159404b41 --- /dev/null +++ b/diffusers3/pipelines/lumina/pipeline_lumina.py @@ -0,0 +1,897 @@ +# Copyright 2024 Alpha-VLLM and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import html +import inspect +import math +import re +import urllib.parse as ul +from typing import List, Optional, Tuple, Union + +import torch +from transformers import AutoModel, AutoTokenizer + +from ...image_processor import VaeImageProcessor +from ...models import AutoencoderKL +from ...models.embeddings import get_2d_rotary_pos_embed_lumina +from ...models.transformers.lumina_nextdit2d import LuminaNextDiT2DModel +from ...schedulers import FlowMatchEulerDiscreteScheduler +from ...utils import ( + BACKENDS_MAPPING, + is_bs4_available, + is_ftfy_available, + logging, + replace_example_docstring, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +if is_bs4_available(): + from bs4 import BeautifulSoup + +if is_ftfy_available(): + import ftfy + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import LuminaText2ImgPipeline + + >>> pipe = LuminaText2ImgPipeline.from_pretrained( + ... "Alpha-VLLM/Lumina-Next-SFT-diffusers", torch_dtype=torch.bfloat16 + ... ) + >>> # Enable memory optimizations. + >>> pipe.enable_model_cpu_offload() + + >>> prompt = "Upper body of a young woman in a Victorian-era outfit with brass goggles and leather straps. Background shows an industrial revolution cityscape with smoky skies and tall, metal structures" + >>> image = pipe(prompt).images[0] + ``` +""" + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + sigmas: Optional[List[float]] = None, + **kwargs, +): + """ + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles + custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` + must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, + `num_inference_steps` and `sigmas` must be `None`. + sigmas (`List[float]`, *optional*): + Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, + `num_inference_steps` and `timesteps` must be `None`. + + Returns: + `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the + second element is the number of inference steps. + """ + if timesteps is not None and sigmas is not None: + raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") + if timesteps is not None: + accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + elif sigmas is not None: + accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accept_sigmas: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" sigmas schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + + +class LuminaText2ImgPipeline(DiffusionPipeline): + r""" + Pipeline for text-to-image generation using Lumina-T2I. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`AutoModel`]): + Frozen text-encoder. Lumina-T2I uses + [T5](https://huggingface.co/docs/transformers/model_doc/t5#transformers.AutoModel), specifically the + [t5-v1_1-xxl](https://huggingface.co/Alpha-VLLM/tree/main/t5-v1_1-xxl) variant. + tokenizer (`AutoModel`): + Tokenizer of class + [AutoModel](https://huggingface.co/docs/transformers/model_doc/t5#transformers.AutoModel). + transformer ([`Transformer2DModel`]): + A text conditioned `Transformer2DModel` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `transformer` to denoise the encoded image latents. + """ + + bad_punct_regex = re.compile( + r"[" + + "#ยฎโ€ขยฉโ„ข&@ยทยบยฝยพยฟยกยง~" + + r"\)" + + r"\(" + + r"\]" + + r"\[" + + r"\}" + + r"\{" + + r"\|" + + "\\" + + r"\/" + + r"\*" + + r"]{1,}" + ) # noqa + + _optional_components = [] + model_cpu_offload_seq = "text_encoder->transformer->vae" + + def __init__( + self, + transformer: LuminaNextDiT2DModel, + scheduler: FlowMatchEulerDiscreteScheduler, + vae: AutoencoderKL, + text_encoder: AutoModel, + tokenizer: AutoTokenizer, + ): + super().__init__() + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + transformer=transformer, + scheduler=scheduler, + ) + self.vae_scale_factor = 8 + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + self.max_sequence_length = 256 + self.default_sample_size = ( + self.transformer.config.sample_size + if hasattr(self, "transformer") and self.transformer is not None + else 128 + ) + self.default_image_size = self.default_sample_size * self.vae_scale_factor + + def _get_gemma_prompt_embeds( + self, + prompt: Union[str, List[str]], + num_images_per_prompt: int = 1, + device: Optional[torch.device] = None, + clean_caption: Optional[bool] = False, + max_length: Optional[int] = None, + ): + device = device or self._execution_device + prompt = [prompt] if isinstance(prompt, str) else prompt + batch_size = len(prompt) + + prompt = self._text_preprocessing(prompt, clean_caption=clean_caption) + text_inputs = self.tokenizer( + prompt, + pad_to_multiple_of=8, + max_length=self.max_sequence_length, + truncation=True, + padding=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids.to(device) + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids.to(device) + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): + removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.max_sequence_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because Gemma can only handle sequences up to" + f" {self.max_sequence_length} tokens: {removed_text}" + ) + + prompt_attention_mask = text_inputs.attention_mask.to(device) + prompt_embeds = self.text_encoder( + text_input_ids, attention_mask=prompt_attention_mask, output_hidden_states=True + ) + prompt_embeds = prompt_embeds.hidden_states[-2] + + if self.text_encoder is not None: + dtype = self.text_encoder.dtype + elif self.transformer is not None: + dtype = self.transformer.dtype + else: + dtype = None + + prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) + + _, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings and attention mask for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + prompt_attention_mask = prompt_attention_mask.repeat(num_images_per_prompt, 1) + prompt_attention_mask = prompt_attention_mask.view(batch_size * num_images_per_prompt, -1) + + return prompt_embeds, prompt_attention_mask + + # Adapted from diffusers.pipelines.deepfloyd_if.pipeline_if.encode_prompt + def encode_prompt( + self, + prompt: Union[str, List[str]], + do_classifier_free_guidance: bool = True, + negative_prompt: Union[str, List[str]] = None, + num_images_per_prompt: int = 1, + device: Optional[torch.device] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + prompt_attention_mask: Optional[torch.Tensor] = None, + negative_prompt_attention_mask: Optional[torch.Tensor] = None, + clean_caption: bool = False, + **kwargs, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + negative_prompt (`str` or `List[str]`, *optional*): + The prompt not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` + instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). For + Lumina-T2I, this should be "". + do_classifier_free_guidance (`bool`, *optional*, defaults to `True`): + whether to use classifier free guidance or not + num_images_per_prompt (`int`, *optional*, defaults to 1): + number of images that should be generated per prompt + device: (`torch.device`, *optional*): + torch device to place the resulting embeddings on + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. For Lumina-T2I, it's should be the embeddings of the "" string. + clean_caption (`bool`, defaults to `False`): + If `True`, the function will preprocess and clean the provided caption before encoding. + max_sequence_length (`int`, defaults to 256): Maximum sequence length to use for the prompt. + """ + if device is None: + device = self._execution_device + + prompt = [prompt] if isinstance(prompt, str) else prompt + if prompt is not None: + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + prompt_embeds, prompt_attention_mask = self._get_gemma_prompt_embeds( + prompt=prompt, + num_images_per_prompt=num_images_per_prompt, + device=device, + clean_caption=clean_caption, + ) + + # Get negative embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + negative_prompt = negative_prompt if negative_prompt is not None else "" + + # Normalize str to list + negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt + + if prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + negative_prompt = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + # Padding negative prompt to the same length with prompt + prompt_max_length = prompt_embeds.shape[1] + negative_text_inputs = self.tokenizer( + negative_prompt, + padding="max_length", + max_length=prompt_max_length, + truncation=True, + return_tensors="pt", + ) + negative_text_input_ids = negative_text_inputs.input_ids.to(device) + negative_prompt_attention_mask = negative_text_inputs.attention_mask.to(device) + # Get the negative prompt embeddings + negative_prompt_embeds = self.text_encoder( + negative_text_input_ids, + attention_mask=negative_prompt_attention_mask, + output_hidden_states=True, + ) + + negative_dtype = self.text_encoder.dtype + negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] + _, seq_len, _ = negative_prompt_embeds.shape + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=negative_dtype, device=device) + # duplicate text embeddings and attention mask for each generation per prompt, using mps friendly method + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + negative_prompt_attention_mask = negative_prompt_attention_mask.repeat(num_images_per_prompt, 1) + negative_prompt_attention_mask = negative_prompt_attention_mask.view( + batch_size * num_images_per_prompt, -1 + ) + + return prompt_embeds, prompt_attention_mask, negative_prompt_embeds, negative_prompt_attention_mask + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + height, + width, + negative_prompt, + prompt_embeds=None, + negative_prompt_embeds=None, + prompt_attention_mask=None, + negative_prompt_attention_mask=None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and prompt_attention_mask is None: + raise ValueError("Must provide `prompt_attention_mask` when specifying `prompt_embeds`.") + + if negative_prompt_embeds is not None and negative_prompt_attention_mask is None: + raise ValueError("Must provide `negative_prompt_attention_mask` when specifying `negative_prompt_embeds`.") + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + if prompt_attention_mask.shape != negative_prompt_attention_mask.shape: + raise ValueError( + "`prompt_attention_mask` and `negative_prompt_attention_mask` must have the same shape when passed directly, but" + f" got: `prompt_attention_mask` {prompt_attention_mask.shape} != `negative_prompt_attention_mask`" + f" {negative_prompt_attention_mask.shape}." + ) + + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline._text_preprocessing + def _text_preprocessing(self, text, clean_caption=False): + if clean_caption and not is_bs4_available(): + logger.warning(BACKENDS_MAPPING["bs4"][-1].format("Setting `clean_caption=True`")) + logger.warning("Setting `clean_caption` to False...") + clean_caption = False + + if clean_caption and not is_ftfy_available(): + logger.warning(BACKENDS_MAPPING["ftfy"][-1].format("Setting `clean_caption=True`")) + logger.warning("Setting `clean_caption` to False...") + clean_caption = False + + if not isinstance(text, (tuple, list)): + text = [text] + + def process(text: str): + if clean_caption: + text = self._clean_caption(text) + text = self._clean_caption(text) + else: + text = text.lower().strip() + return text + + return [process(t) for t in text] + + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline._clean_caption + def _clean_caption(self, caption): + caption = str(caption) + caption = ul.unquote_plus(caption) + caption = caption.strip().lower() + caption = re.sub("", "person", caption) + # urls: + caption = re.sub( + r"\b((?:https?:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa + "", + caption, + ) # regex for urls + caption = re.sub( + r"\b((?:www:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa + "", + caption, + ) # regex for urls + # html: + caption = BeautifulSoup(caption, features="html.parser").text + + # @ + caption = re.sub(r"@[\w\d]+\b", "", caption) + + # 31C0โ€”31EF CJK Strokes + # 31F0โ€”31FF Katakana Phonetic Extensions + # 3200โ€”32FF Enclosed CJK Letters and Months + # 3300โ€”33FF CJK Compatibility + # 3400โ€”4DBF CJK Unified Ideographs Extension A + # 4DC0โ€”4DFF Yijing Hexagram Symbols + # 4E00โ€”9FFF CJK Unified Ideographs + caption = re.sub(r"[\u31c0-\u31ef]+", "", caption) + caption = re.sub(r"[\u31f0-\u31ff]+", "", caption) + caption = re.sub(r"[\u3200-\u32ff]+", "", caption) + caption = re.sub(r"[\u3300-\u33ff]+", "", caption) + caption = re.sub(r"[\u3400-\u4dbf]+", "", caption) + caption = re.sub(r"[\u4dc0-\u4dff]+", "", caption) + caption = re.sub(r"[\u4e00-\u9fff]+", "", caption) + ####################################################### + + # ะฒัะต ะฒะธะดั‹ ั‚ะธั€ะต / all types of dash --> "-" + caption = re.sub( + r"[\u002D\u058A\u05BE\u1400\u1806\u2010-\u2015\u2E17\u2E1A\u2E3A\u2E3B\u2E40\u301C\u3030\u30A0\uFE31\uFE32\uFE58\uFE63\uFF0D]+", # noqa + "-", + caption, + ) + + # ะบะฐะฒั‹ั‡ะบะธ ะบ ะพะดะฝะพะผัƒ ัั‚ะฐะฝะดะฐั€ั‚ัƒ + caption = re.sub(r"[`ยดยซยปโ€œโ€ยจ]", '"', caption) + caption = re.sub(r"[โ€˜โ€™]", "'", caption) + + # " + caption = re.sub(r""?", "", caption) + # & + caption = re.sub(r"&", "", caption) + + # ip adresses: + caption = re.sub(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", " ", caption) + + # article ids: + caption = re.sub(r"\d:\d\d\s+$", "", caption) + + # \n + caption = re.sub(r"\\n", " ", caption) + + # "#123" + caption = re.sub(r"#\d{1,3}\b", "", caption) + # "#12345.." + caption = re.sub(r"#\d{5,}\b", "", caption) + # "123456.." + caption = re.sub(r"\b\d{6,}\b", "", caption) + # filenames: + caption = re.sub(r"[\S]+\.(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)", "", caption) + + # + caption = re.sub(r"[\"\']{2,}", r'"', caption) # """AUSVERKAUFT""" + caption = re.sub(r"[\.]{2,}", r" ", caption) # """AUSVERKAUFT""" + + caption = re.sub(self.bad_punct_regex, r" ", caption) # ***AUSVERKAUFT***, #AUSVERKAUFT + caption = re.sub(r"\s+\.\s+", r" ", caption) # " . " + + # this-is-my-cute-cat / this_is_my_cute_cat + regex2 = re.compile(r"(?:\-|\_)") + if len(re.findall(regex2, caption)) > 3: + caption = re.sub(regex2, " ", caption) + + caption = ftfy.fix_text(caption) + caption = html.unescape(html.unescape(caption)) + + caption = re.sub(r"\b[a-zA-Z]{1,3}\d{3,15}\b", "", caption) # jc6640 + caption = re.sub(r"\b[a-zA-Z]+\d+[a-zA-Z]+\b", "", caption) # jc6640vc + caption = re.sub(r"\b\d+[a-zA-Z]+\d+\b", "", caption) # 6640vc231 + + caption = re.sub(r"(worldwide\s+)?(free\s+)?shipping", "", caption) + caption = re.sub(r"(free\s)?download(\sfree)?", "", caption) + caption = re.sub(r"\bclick\b\s(?:for|on)\s\w+", "", caption) + caption = re.sub(r"\b(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)(\simage[s]?)?", "", caption) + caption = re.sub(r"\bpage\s+\d+\b", "", caption) + + caption = re.sub(r"\b\d*[a-zA-Z]+\d+[a-zA-Z]+\d+[a-zA-Z\d]*\b", r" ", caption) # j2d1a2a... + + caption = re.sub(r"\b\d+\.?\d*[xั…ร—]\d+\.?\d*\b", "", caption) + + caption = re.sub(r"\b\s+\:\s+", r": ", caption) + caption = re.sub(r"(\D[,\./])\b", r"\1 ", caption) + caption = re.sub(r"\s+", " ", caption) + + caption.strip() + + caption = re.sub(r"^[\"\']([\w\W]+)[\"\']$", r"\1", caption) + caption = re.sub(r"^[\'\_,\-\:;]", r"", caption) + caption = re.sub(r"[\'\_,\-\:\-\+]$", r"", caption) + caption = re.sub(r"^\.\S+$", "", caption) + + return caption.strip() + + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = ( + batch_size, + num_channels_latents, + int(height) // self.vae_scale_factor, + int(width) // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + return latents + + @property + def guidance_scale(self): + return self._guidance_scale + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 + + @property + def num_timesteps(self): + return self._num_timesteps + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + width: Optional[int] = None, + height: Optional[int] = None, + num_inference_steps: int = 30, + timesteps: List[int] = None, + guidance_scale: float = 4.0, + negative_prompt: Union[str, List[str]] = None, + sigmas: List[float] = None, + num_images_per_prompt: Optional[int] = 1, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + prompt_attention_mask: Optional[torch.Tensor] = None, + negative_prompt_attention_mask: Optional[torch.Tensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + clean_caption: bool = True, + max_sequence_length: int = 256, + scaling_watershed: Optional[float] = 1.0, + proportional_attn: Optional[bool] = True, + ) -> Union[ImagePipelineOutput, Tuple]: + """ + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + num_inference_steps (`int`, *optional*, defaults to 30): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument + in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is + passed will be used. Must be in descending order. + sigmas (`List[float]`, *optional*): + Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in + their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed + will be used. + guidance_scale (`float`, *optional*, defaults to 4.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + height (`int`, *optional*, defaults to self.unet.config.sample_size): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to self.unet.config.sample_size): + The width in pixels of the generated image. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to + [`schedulers.DDIMScheduler`], will be ignored for others. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + prompt_attention_mask (`torch.Tensor`, *optional*): Pre-generated attention mask for text embeddings. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. For Lumina-T2I this negative prompt should be "". If not + provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. + negative_prompt_attention_mask (`torch.Tensor`, *optional*): + Pre-generated attention mask for negative text embeddings. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.IFPipelineOutput`] instead of a plain tuple. + clean_caption (`bool`, *optional*, defaults to `True`): + Whether or not to clean the caption before creating embeddings. Requires `beautifulsoup4` and `ftfy` to + be installed. If the dependencies are not installed, the embeddings will be created from the raw + prompt. + max_sequence_length (`int` defaults to 120): + Maximum sequence length to use with the `prompt`. + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + + Examples: + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is + returned where the first element is a list with the generated images + """ + height = height or self.default_sample_size * self.vae_scale_factor + width = width or self.default_sample_size * self.vae_scale_factor + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + height, + width, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + prompt_attention_mask=prompt_attention_mask, + negative_prompt_attention_mask=negative_prompt_attention_mask, + ) + cross_attention_kwargs = {} + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if proportional_attn: + cross_attention_kwargs["base_sequence_length"] = (self.default_image_size // 16) ** 2 + + scaling_factor = math.sqrt(width * height / self.default_image_size**2) + + device = self._execution_device + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Encode input prompt + ( + prompt_embeds, + prompt_attention_mask, + negative_prompt_embeds, + negative_prompt_attention_mask, + ) = self.encode_prompt( + prompt, + do_classifier_free_guidance, + negative_prompt=negative_prompt, + num_images_per_prompt=num_images_per_prompt, + device=device, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + prompt_attention_mask=prompt_attention_mask, + negative_prompt_attention_mask=negative_prompt_attention_mask, + clean_caption=clean_caption, + max_sequence_length=max_sequence_length, + ) + if do_classifier_free_guidance: + prompt_embeds = torch.cat([prompt_embeds, negative_prompt_embeds], dim=0) + prompt_attention_mask = torch.cat([prompt_attention_mask, negative_prompt_attention_mask], dim=0) + + # 4. Prepare timesteps + timesteps, num_inference_steps = retrieve_timesteps( + self.scheduler, num_inference_steps, device, timesteps, sigmas + ) + + # 5. Prepare latents. + latent_channels = self.transformer.config.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + latent_channels, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 6. Denoising loop + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + + current_timestep = t + if not torch.is_tensor(current_timestep): + # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can + # This would be a good case for the `match` statement (Python 3.10+) + is_mps = latent_model_input.device.type == "mps" + if isinstance(current_timestep, float): + dtype = torch.float32 if is_mps else torch.float64 + else: + dtype = torch.int32 if is_mps else torch.int64 + current_timestep = torch.tensor( + [current_timestep], + dtype=dtype, + device=latent_model_input.device, + ) + elif len(current_timestep.shape) == 0: + current_timestep = current_timestep[None].to(latent_model_input.device) + # broadcast to batch dimension in a way that's compatible with ONNX/Core ML + current_timestep = current_timestep.expand(latent_model_input.shape[0]) + + # reverse the timestep since Lumina uses t=0 as the noise and t=1 as the image + current_timestep = 1 - current_timestep / self.scheduler.config.num_train_timesteps + + # prepare image_rotary_emb for positional encoding + # dynamic scaling_factor for different resolution. + # NOTE: For `Time-aware` denosing mechanism from Lumina-Next + # https://arxiv.org/abs/2406.18583, Sec 2.3 + # NOTE: We should compute different image_rotary_emb with different timestep. + if current_timestep[0] < scaling_watershed: + linear_factor = scaling_factor + ntk_factor = 1.0 + else: + linear_factor = 1.0 + ntk_factor = scaling_factor + image_rotary_emb = get_2d_rotary_pos_embed_lumina( + self.transformer.head_dim, + 384, + 384, + linear_factor=linear_factor, + ntk_factor=ntk_factor, + ) + + noise_pred = self.transformer( + hidden_states=latent_model_input, + timestep=current_timestep, + encoder_hidden_states=prompt_embeds, + encoder_mask=prompt_attention_mask, + image_rotary_emb=image_rotary_emb, + cross_attention_kwargs=cross_attention_kwargs, + return_dict=False, + )[0] + noise_pred = noise_pred.chunk(2, dim=1)[0] + + # perform guidance scale + # NOTE: For exact reproducibility reasons, we apply classifier-free guidance on only + # three channels by default. The standard approach to cfg applies it to all channels. + # This can be done by uncommenting the following line and commenting-out the line following that. + # eps, rest = model_out[:, :self.in_channels], model_out[:, self.in_channels:] + if do_classifier_free_guidance: + noise_pred_eps, noise_pred_rest = noise_pred[:, :3], noise_pred[:, 3:] + noise_pred_cond_eps, noise_pred_uncond_eps = torch.split( + noise_pred_eps, len(noise_pred_eps) // 2, dim=0 + ) + noise_pred_half = noise_pred_uncond_eps + guidance_scale * ( + noise_pred_cond_eps - noise_pred_uncond_eps + ) + noise_pred_eps = torch.cat([noise_pred_half, noise_pred_half], dim=0) + + noise_pred = torch.cat([noise_pred_eps, noise_pred_rest], dim=1) + noise_pred, _ = noise_pred.chunk(2, dim=0) + + # compute the previous noisy sample x_t -> x_t-1 + latents_dtype = latents.dtype + noise_pred = -noise_pred + latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0] + + if latents.dtype != latents_dtype: + if torch.backends.mps.is_available(): + # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272 + latents = latents.to(latents_dtype) + + progress_bar.update() + + if not output_type == "latent": + latents = latents / self.vae.config.scaling_factor + image = self.vae.decode(latents, return_dict=False)[0] + image = self.image_processor.postprocess(image, output_type=output_type) + else: + image = latents + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return ImagePipelineOutput(images=image) diff --git a/diffusers3/pipelines/marigold/__init__.py b/diffusers3/pipelines/marigold/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b5ae03adfc11d7cdf3c233a5a735483e95ff4a29 --- /dev/null +++ b/diffusers3/pipelines/marigold/__init__.py @@ -0,0 +1,50 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["marigold_image_processing"] = ["MarigoldImageProcessor"] + _import_structure["pipeline_marigold_depth"] = ["MarigoldDepthOutput", "MarigoldDepthPipeline"] + _import_structure["pipeline_marigold_normals"] = ["MarigoldNormalsOutput", "MarigoldNormalsPipeline"] + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * + else: + from .marigold_image_processing import MarigoldImageProcessor + from .pipeline_marigold_depth import MarigoldDepthOutput, MarigoldDepthPipeline + from .pipeline_marigold_normals import MarigoldNormalsOutput, MarigoldNormalsPipeline + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/diffusers3/pipelines/marigold/marigold_image_processing.py b/diffusers3/pipelines/marigold/marigold_image_processing.py new file mode 100644 index 0000000000000000000000000000000000000000..51b9983db6f6372aff58fb99e3fa8113ef4752e3 --- /dev/null +++ b/diffusers3/pipelines/marigold/marigold_image_processing.py @@ -0,0 +1,576 @@ +from typing import List, Optional, Tuple, Union + +import numpy as np +import PIL +import torch +import torch.nn.functional as F +from PIL import Image + +from ... import ConfigMixin +from ...configuration_utils import register_to_config +from ...image_processor import PipelineImageInput +from ...utils import CONFIG_NAME, logging +from ...utils.import_utils import is_matplotlib_available + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +class MarigoldImageProcessor(ConfigMixin): + config_name = CONFIG_NAME + + @register_to_config + def __init__( + self, + vae_scale_factor: int = 8, + do_normalize: bool = True, + do_range_check: bool = True, + ): + super().__init__() + + @staticmethod + def expand_tensor_or_array(images: Union[torch.Tensor, np.ndarray]) -> Union[torch.Tensor, np.ndarray]: + """ + Expand a tensor or array to a specified number of images. + """ + if isinstance(images, np.ndarray): + if images.ndim == 2: # [H,W] -> [1,H,W,1] + images = images[None, ..., None] + if images.ndim == 3: # [H,W,C] -> [1,H,W,C] + images = images[None] + elif isinstance(images, torch.Tensor): + if images.ndim == 2: # [H,W] -> [1,1,H,W] + images = images[None, None] + elif images.ndim == 3: # [1,H,W] -> [1,1,H,W] + images = images[None] + else: + raise ValueError(f"Unexpected input type: {type(images)}") + return images + + @staticmethod + def pt_to_numpy(images: torch.Tensor) -> np.ndarray: + """ + Convert a PyTorch tensor to a NumPy image. + """ + images = images.cpu().permute(0, 2, 3, 1).float().numpy() + return images + + @staticmethod + def numpy_to_pt(images: np.ndarray) -> torch.Tensor: + """ + Convert a NumPy image to a PyTorch tensor. + """ + if np.issubdtype(images.dtype, np.integer) and not np.issubdtype(images.dtype, np.unsignedinteger): + raise ValueError(f"Input image dtype={images.dtype} cannot be a signed integer.") + if np.issubdtype(images.dtype, np.complexfloating): + raise ValueError(f"Input image dtype={images.dtype} cannot be complex.") + if np.issubdtype(images.dtype, bool): + raise ValueError(f"Input image dtype={images.dtype} cannot be boolean.") + + images = torch.from_numpy(images.transpose(0, 3, 1, 2)) + return images + + @staticmethod + def resize_antialias( + image: torch.Tensor, size: Tuple[int, int], mode: str, is_aa: Optional[bool] = None + ) -> torch.Tensor: + if not torch.is_tensor(image): + raise ValueError(f"Invalid input type={type(image)}.") + if not torch.is_floating_point(image): + raise ValueError(f"Invalid input dtype={image.dtype}.") + if image.dim() != 4: + raise ValueError(f"Invalid input dimensions; shape={image.shape}.") + + antialias = is_aa and mode in ("bilinear", "bicubic") + image = F.interpolate(image, size, mode=mode, antialias=antialias) + + return image + + @staticmethod + def resize_to_max_edge(image: torch.Tensor, max_edge_sz: int, mode: str) -> torch.Tensor: + if not torch.is_tensor(image): + raise ValueError(f"Invalid input type={type(image)}.") + if not torch.is_floating_point(image): + raise ValueError(f"Invalid input dtype={image.dtype}.") + if image.dim() != 4: + raise ValueError(f"Invalid input dimensions; shape={image.shape}.") + + h, w = image.shape[-2:] + max_orig = max(h, w) + new_h = h * max_edge_sz // max_orig + new_w = w * max_edge_sz // max_orig + + if new_h == 0 or new_w == 0: + raise ValueError(f"Extreme aspect ratio of the input image: [{w} x {h}]") + + image = MarigoldImageProcessor.resize_antialias(image, (new_h, new_w), mode, is_aa=True) + + return image + + @staticmethod + def pad_image(image: torch.Tensor, align: int) -> Tuple[torch.Tensor, Tuple[int, int]]: + if not torch.is_tensor(image): + raise ValueError(f"Invalid input type={type(image)}.") + if not torch.is_floating_point(image): + raise ValueError(f"Invalid input dtype={image.dtype}.") + if image.dim() != 4: + raise ValueError(f"Invalid input dimensions; shape={image.shape}.") + + h, w = image.shape[-2:] + ph, pw = -h % align, -w % align + + image = F.pad(image, (0, pw, 0, ph), mode="replicate") + + return image, (ph, pw) + + @staticmethod + def unpad_image(image: torch.Tensor, padding: Tuple[int, int]) -> torch.Tensor: + if not torch.is_tensor(image): + raise ValueError(f"Invalid input type={type(image)}.") + if not torch.is_floating_point(image): + raise ValueError(f"Invalid input dtype={image.dtype}.") + if image.dim() != 4: + raise ValueError(f"Invalid input dimensions; shape={image.shape}.") + + ph, pw = padding + uh = None if ph == 0 else -ph + uw = None if pw == 0 else -pw + + image = image[:, :, :uh, :uw] + + return image + + @staticmethod + def load_image_canonical( + image: Union[torch.Tensor, np.ndarray, Image.Image], + device: torch.device = torch.device("cpu"), + dtype: torch.dtype = torch.float32, + ) -> Tuple[torch.Tensor, int]: + if isinstance(image, Image.Image): + image = np.array(image) + + image_dtype_max = None + if isinstance(image, (np.ndarray, torch.Tensor)): + image = MarigoldImageProcessor.expand_tensor_or_array(image) + if image.ndim != 4: + raise ValueError("Input image is not 2-, 3-, or 4-dimensional.") + if isinstance(image, np.ndarray): + if np.issubdtype(image.dtype, np.integer) and not np.issubdtype(image.dtype, np.unsignedinteger): + raise ValueError(f"Input image dtype={image.dtype} cannot be a signed integer.") + if np.issubdtype(image.dtype, np.complexfloating): + raise ValueError(f"Input image dtype={image.dtype} cannot be complex.") + if np.issubdtype(image.dtype, bool): + raise ValueError(f"Input image dtype={image.dtype} cannot be boolean.") + if np.issubdtype(image.dtype, np.unsignedinteger): + image_dtype_max = np.iinfo(image.dtype).max + image = image.astype(np.float32) # because torch does not have unsigned dtypes beyond torch.uint8 + image = MarigoldImageProcessor.numpy_to_pt(image) + + if torch.is_tensor(image) and not torch.is_floating_point(image) and image_dtype_max is None: + if image.dtype != torch.uint8: + raise ValueError(f"Image dtype={image.dtype} is not supported.") + image_dtype_max = 255 + + if not torch.is_tensor(image): + raise ValueError(f"Input type unsupported: {type(image)}.") + + if image.shape[1] == 1: + image = image.repeat(1, 3, 1, 1) # [N,1,H,W] -> [N,3,H,W] + if image.shape[1] != 3: + raise ValueError(f"Input image is not 1- or 3-channel: {image.shape}.") + + image = image.to(device=device, dtype=dtype) + + if image_dtype_max is not None: + image = image / image_dtype_max + + return image + + @staticmethod + def check_image_values_range(image: torch.Tensor) -> None: + if not torch.is_tensor(image): + raise ValueError(f"Invalid input type={type(image)}.") + if not torch.is_floating_point(image): + raise ValueError(f"Invalid input dtype={image.dtype}.") + if image.min().item() < 0.0 or image.max().item() > 1.0: + raise ValueError("Input image data is partially outside of the [0,1] range.") + + def preprocess( + self, + image: PipelineImageInput, + processing_resolution: Optional[int] = None, + resample_method_input: str = "bilinear", + device: torch.device = torch.device("cpu"), + dtype: torch.dtype = torch.float32, + ): + if isinstance(image, list): + images = None + for i, img in enumerate(image): + img = self.load_image_canonical(img, device, dtype) # [N,3,H,W] + if images is None: + images = img + else: + if images.shape[2:] != img.shape[2:]: + raise ValueError( + f"Input image[{i}] has incompatible dimensions {img.shape[2:]} with the previous images " + f"{images.shape[2:]}" + ) + images = torch.cat((images, img), dim=0) + image = images + del images + else: + image = self.load_image_canonical(image, device, dtype) # [N,3,H,W] + + original_resolution = image.shape[2:] + + if self.config.do_range_check: + self.check_image_values_range(image) + + if self.config.do_normalize: + image = image * 2.0 - 1.0 + + if processing_resolution is not None and processing_resolution > 0: + image = self.resize_to_max_edge(image, processing_resolution, resample_method_input) # [N,3,PH,PW] + + image, padding = self.pad_image(image, self.config.vae_scale_factor) # [N,3,PPH,PPW] + + return image, padding, original_resolution + + @staticmethod + def colormap( + image: Union[np.ndarray, torch.Tensor], + cmap: str = "Spectral", + bytes: bool = False, + _force_method: Optional[str] = None, + ) -> Union[np.ndarray, torch.Tensor]: + """ + Converts a monochrome image into an RGB image by applying the specified colormap. This function mimics the + behavior of matplotlib.colormaps, but allows the user to use the most discriminative color maps ("Spectral", + "binary") without having to install or import matplotlib. For all other cases, the function will attempt to use + the native implementation. + + Args: + image: 2D tensor of values between 0 and 1, either as np.ndarray or torch.Tensor. + cmap: Colormap name. + bytes: Whether to return the output as uint8 or floating point image. + _force_method: + Can be used to specify whether to use the native implementation (`"matplotlib"`), the efficient custom + implementation of the select color maps (`"custom"`), or rely on autodetection (`None`, default). + + Returns: + An RGB-colorized tensor corresponding to the input image. + """ + if not (torch.is_tensor(image) or isinstance(image, np.ndarray)): + raise ValueError("Argument must be a numpy array or torch tensor.") + if _force_method not in (None, "matplotlib", "custom"): + raise ValueError("_force_method must be either `None`, `'matplotlib'` or `'custom'`.") + + supported_cmaps = { + "binary": [ + (1.0, 1.0, 1.0), + (0.0, 0.0, 0.0), + ], + "Spectral": [ # Taken from matplotlib/_cm.py + (0.61960784313725492, 0.003921568627450980, 0.25882352941176473), # 0.0 -> [0] + (0.83529411764705885, 0.24313725490196078, 0.30980392156862746), + (0.95686274509803926, 0.42745098039215684, 0.2627450980392157), + (0.99215686274509807, 0.68235294117647061, 0.38039215686274508), + (0.99607843137254903, 0.8784313725490196, 0.54509803921568623), + (1.0, 1.0, 0.74901960784313726), + (0.90196078431372551, 0.96078431372549022, 0.59607843137254901), + (0.6705882352941176, 0.8666666666666667, 0.64313725490196083), + (0.4, 0.76078431372549016, 0.6470588235294118), + (0.19607843137254902, 0.53333333333333333, 0.74117647058823533), + (0.36862745098039218, 0.30980392156862746, 0.63529411764705879), # 1.0 -> [K-1] + ], + } + + def method_matplotlib(image, cmap, bytes=False): + if is_matplotlib_available(): + import matplotlib + else: + return None + + arg_is_pt, device = torch.is_tensor(image), None + if arg_is_pt: + image, device = image.cpu().numpy(), image.device + + if cmap not in matplotlib.colormaps: + raise ValueError( + f"Unexpected color map {cmap}; available options are: {', '.join(list(matplotlib.colormaps.keys()))}" + ) + + cmap = matplotlib.colormaps[cmap] + out = cmap(image, bytes=bytes) # [?,4] + out = out[..., :3] # [?,3] + + if arg_is_pt: + out = torch.tensor(out, device=device) + + return out + + def method_custom(image, cmap, bytes=False): + arg_is_np = isinstance(image, np.ndarray) + if arg_is_np: + image = torch.tensor(image) + if image.dtype == torch.uint8: + image = image.float() / 255 + else: + image = image.float() + + is_cmap_reversed = cmap.endswith("_r") + if is_cmap_reversed: + cmap = cmap[:-2] + + if cmap not in supported_cmaps: + raise ValueError( + f"Only {list(supported_cmaps.keys())} color maps are available without installing matplotlib." + ) + + cmap = supported_cmaps[cmap] + if is_cmap_reversed: + cmap = cmap[::-1] + cmap = torch.tensor(cmap, dtype=torch.float, device=image.device) # [K,3] + K = cmap.shape[0] + + pos = image.clamp(min=0, max=1) * (K - 1) + left = pos.long() + right = (left + 1).clamp(max=K - 1) + + d = (pos - left.float()).unsqueeze(-1) + left_colors = cmap[left] + right_colors = cmap[right] + + out = (1 - d) * left_colors + d * right_colors + + if bytes: + out = (out * 255).to(torch.uint8) + + if arg_is_np: + out = out.numpy() + + return out + + if _force_method is None and torch.is_tensor(image) and cmap == "Spectral": + return method_custom(image, cmap, bytes) + + out = None + if _force_method != "custom": + out = method_matplotlib(image, cmap, bytes) + + if _force_method == "matplotlib" and out is None: + raise ImportError("Make sure to install matplotlib if you want to use a color map other than 'Spectral'.") + + if out is None: + out = method_custom(image, cmap, bytes) + + return out + + @staticmethod + def visualize_depth( + depth: Union[ + PIL.Image.Image, + np.ndarray, + torch.Tensor, + List[PIL.Image.Image], + List[np.ndarray], + List[torch.Tensor], + ], + val_min: float = 0.0, + val_max: float = 1.0, + color_map: str = "Spectral", + ) -> Union[PIL.Image.Image, List[PIL.Image.Image]]: + """ + Visualizes depth maps, such as predictions of the `MarigoldDepthPipeline`. + + Args: + depth (`Union[PIL.Image.Image, np.ndarray, torch.Tensor, List[PIL.Image.Image], List[np.ndarray], + List[torch.Tensor]]`): Depth maps. + val_min (`float`, *optional*, defaults to `0.0`): Minimum value of the visualized depth range. + val_max (`float`, *optional*, defaults to `1.0`): Maximum value of the visualized depth range. + color_map (`str`, *optional*, defaults to `"Spectral"`): Color map used to convert a single-channel + depth prediction into colored representation. + + Returns: `PIL.Image.Image` or `List[PIL.Image.Image]` with depth maps visualization. + """ + if val_max <= val_min: + raise ValueError(f"Invalid values range: [{val_min}, {val_max}].") + + def visualize_depth_one(img, idx=None): + prefix = "Depth" + (f"[{idx}]" if idx else "") + if isinstance(img, PIL.Image.Image): + if img.mode != "I;16": + raise ValueError(f"{prefix}: invalid PIL mode={img.mode}.") + img = np.array(img).astype(np.float32) / (2**16 - 1) + if isinstance(img, np.ndarray) or torch.is_tensor(img): + if img.ndim != 2: + raise ValueError(f"{prefix}: unexpected shape={img.shape}.") + if isinstance(img, np.ndarray): + img = torch.from_numpy(img) + if not torch.is_floating_point(img): + raise ValueError(f"{prefix}: unexected dtype={img.dtype}.") + else: + raise ValueError(f"{prefix}: unexpected type={type(img)}.") + if val_min != 0.0 or val_max != 1.0: + img = (img - val_min) / (val_max - val_min) + img = MarigoldImageProcessor.colormap(img, cmap=color_map, bytes=True) # [H,W,3] + img = PIL.Image.fromarray(img.cpu().numpy()) + return img + + if depth is None or isinstance(depth, list) and any(o is None for o in depth): + raise ValueError("Input depth is `None`") + if isinstance(depth, (np.ndarray, torch.Tensor)): + depth = MarigoldImageProcessor.expand_tensor_or_array(depth) + if isinstance(depth, np.ndarray): + depth = MarigoldImageProcessor.numpy_to_pt(depth) # [N,H,W,1] -> [N,1,H,W] + if not (depth.ndim == 4 and depth.shape[1] == 1): # [N,1,H,W] + raise ValueError(f"Unexpected input shape={depth.shape}, expecting [N,1,H,W].") + return [visualize_depth_one(img[0], idx) for idx, img in enumerate(depth)] + elif isinstance(depth, list): + return [visualize_depth_one(img, idx) for idx, img in enumerate(depth)] + else: + raise ValueError(f"Unexpected input type: {type(depth)}") + + @staticmethod + def export_depth_to_16bit_png( + depth: Union[np.ndarray, torch.Tensor, List[np.ndarray], List[torch.Tensor]], + val_min: float = 0.0, + val_max: float = 1.0, + ) -> Union[PIL.Image.Image, List[PIL.Image.Image]]: + def export_depth_to_16bit_png_one(img, idx=None): + prefix = "Depth" + (f"[{idx}]" if idx else "") + if not isinstance(img, np.ndarray) and not torch.is_tensor(img): + raise ValueError(f"{prefix}: unexpected type={type(img)}.") + if img.ndim != 2: + raise ValueError(f"{prefix}: unexpected shape={img.shape}.") + if torch.is_tensor(img): + img = img.cpu().numpy() + if not np.issubdtype(img.dtype, np.floating): + raise ValueError(f"{prefix}: unexected dtype={img.dtype}.") + if val_min != 0.0 or val_max != 1.0: + img = (img - val_min) / (val_max - val_min) + img = (img * (2**16 - 1)).astype(np.uint16) + img = PIL.Image.fromarray(img, mode="I;16") + return img + + if depth is None or isinstance(depth, list) and any(o is None for o in depth): + raise ValueError("Input depth is `None`") + if isinstance(depth, (np.ndarray, torch.Tensor)): + depth = MarigoldImageProcessor.expand_tensor_or_array(depth) + if isinstance(depth, np.ndarray): + depth = MarigoldImageProcessor.numpy_to_pt(depth) # [N,H,W,1] -> [N,1,H,W] + if not (depth.ndim == 4 and depth.shape[1] == 1): + raise ValueError(f"Unexpected input shape={depth.shape}, expecting [N,1,H,W].") + return [export_depth_to_16bit_png_one(img[0], idx) for idx, img in enumerate(depth)] + elif isinstance(depth, list): + return [export_depth_to_16bit_png_one(img, idx) for idx, img in enumerate(depth)] + else: + raise ValueError(f"Unexpected input type: {type(depth)}") + + @staticmethod + def visualize_normals( + normals: Union[ + np.ndarray, + torch.Tensor, + List[np.ndarray], + List[torch.Tensor], + ], + flip_x: bool = False, + flip_y: bool = False, + flip_z: bool = False, + ) -> Union[PIL.Image.Image, List[PIL.Image.Image]]: + """ + Visualizes surface normals, such as predictions of the `MarigoldNormalsPipeline`. + + Args: + normals (`Union[np.ndarray, torch.Tensor, List[np.ndarray], List[torch.Tensor]]`): + Surface normals. + flip_x (`bool`, *optional*, defaults to `False`): Flips the X axis of the normals frame of reference. + Default direction is right. + flip_y (`bool`, *optional*, defaults to `False`): Flips the Y axis of the normals frame of reference. + Default direction is top. + flip_z (`bool`, *optional*, defaults to `False`): Flips the Z axis of the normals frame of reference. + Default direction is facing the observer. + + Returns: `PIL.Image.Image` or `List[PIL.Image.Image]` with surface normals visualization. + """ + flip_vec = None + if any((flip_x, flip_y, flip_z)): + flip_vec = torch.tensor( + [ + (-1) ** flip_x, + (-1) ** flip_y, + (-1) ** flip_z, + ], + dtype=torch.float32, + ) + + def visualize_normals_one(img, idx=None): + img = img.permute(1, 2, 0) + if flip_vec is not None: + img *= flip_vec.to(img.device) + img = (img + 1.0) * 0.5 + img = (img * 255).to(dtype=torch.uint8, device="cpu").numpy() + img = PIL.Image.fromarray(img) + return img + + if normals is None or isinstance(normals, list) and any(o is None for o in normals): + raise ValueError("Input normals is `None`") + if isinstance(normals, (np.ndarray, torch.Tensor)): + normals = MarigoldImageProcessor.expand_tensor_or_array(normals) + if isinstance(normals, np.ndarray): + normals = MarigoldImageProcessor.numpy_to_pt(normals) # [N,3,H,W] + if not (normals.ndim == 4 and normals.shape[1] == 3): + raise ValueError(f"Unexpected input shape={normals.shape}, expecting [N,3,H,W].") + return [visualize_normals_one(img, idx) for idx, img in enumerate(normals)] + elif isinstance(normals, list): + return [visualize_normals_one(img, idx) for idx, img in enumerate(normals)] + else: + raise ValueError(f"Unexpected input type: {type(normals)}") + + @staticmethod + def visualize_uncertainty( + uncertainty: Union[ + np.ndarray, + torch.Tensor, + List[np.ndarray], + List[torch.Tensor], + ], + saturation_percentile=95, + ) -> Union[PIL.Image.Image, List[PIL.Image.Image]]: + """ + Visualizes dense uncertainties, such as produced by `MarigoldDepthPipeline` or `MarigoldNormalsPipeline`. + + Args: + uncertainty (`Union[np.ndarray, torch.Tensor, List[np.ndarray], List[torch.Tensor]]`): + Uncertainty maps. + saturation_percentile (`int`, *optional*, defaults to `95`): + Specifies the percentile uncertainty value visualized with maximum intensity. + + Returns: `PIL.Image.Image` or `List[PIL.Image.Image]` with uncertainty visualization. + """ + + def visualize_uncertainty_one(img, idx=None): + prefix = "Uncertainty" + (f"[{idx}]" if idx else "") + if img.min() < 0: + raise ValueError(f"{prefix}: unexected data range, min={img.min()}.") + img = img.squeeze(0).cpu().numpy() + saturation_value = np.percentile(img, saturation_percentile) + img = np.clip(img * 255 / saturation_value, 0, 255) + img = img.astype(np.uint8) + img = PIL.Image.fromarray(img) + return img + + if uncertainty is None or isinstance(uncertainty, list) and any(o is None for o in uncertainty): + raise ValueError("Input uncertainty is `None`") + if isinstance(uncertainty, (np.ndarray, torch.Tensor)): + uncertainty = MarigoldImageProcessor.expand_tensor_or_array(uncertainty) + if isinstance(uncertainty, np.ndarray): + uncertainty = MarigoldImageProcessor.numpy_to_pt(uncertainty) # [N,1,H,W] + if not (uncertainty.ndim == 4 and uncertainty.shape[1] == 1): + raise ValueError(f"Unexpected input shape={uncertainty.shape}, expecting [N,1,H,W].") + return [visualize_uncertainty_one(img, idx) for idx, img in enumerate(uncertainty)] + elif isinstance(uncertainty, list): + return [visualize_uncertainty_one(img, idx) for idx, img in enumerate(uncertainty)] + else: + raise ValueError(f"Unexpected input type: {type(uncertainty)}") diff --git a/diffusers3/pipelines/marigold/pipeline_marigold_depth.py b/diffusers3/pipelines/marigold/pipeline_marigold_depth.py new file mode 100644 index 0000000000000000000000000000000000000000..a602ba611ea5a860f783aae7a789b7c90f417667 --- /dev/null +++ b/diffusers3/pipelines/marigold/pipeline_marigold_depth.py @@ -0,0 +1,813 @@ +# Copyright 2024 Marigold authors, PRS ETH Zurich. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# -------------------------------------------------------------------------- +# More information and citation instructions are available on the +# Marigold project website: https://marigoldmonodepth.github.io +# -------------------------------------------------------------------------- +from dataclasses import dataclass +from functools import partial +from typing import Any, Dict, List, Optional, Tuple, Union + +import numpy as np +import torch +from PIL import Image +from tqdm.auto import tqdm +from transformers import CLIPTextModel, CLIPTokenizer + +from ...image_processor import PipelineImageInput +from ...models import ( + AutoencoderKL, + UNet2DConditionModel, +) +from ...schedulers import ( + DDIMScheduler, + LCMScheduler, +) +from ...utils import ( + BaseOutput, + logging, + replace_example_docstring, +) +from ...utils.import_utils import is_scipy_available +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline +from .marigold_image_processing import MarigoldImageProcessor + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +EXAMPLE_DOC_STRING = """ +Examples: +```py +>>> import diffusers +>>> import torch + +>>> pipe = diffusers.MarigoldDepthPipeline.from_pretrained( +... "prs-eth/marigold-depth-lcm-v1-0", variant="fp16", torch_dtype=torch.float16 +... ).to("cuda") + +>>> image = diffusers.utils.load_image("https://marigoldmonodepth.github.io/images/einstein.jpg") +>>> depth = pipe(image) + +>>> vis = pipe.image_processor.visualize_depth(depth.prediction) +>>> vis[0].save("einstein_depth.png") + +>>> depth_16bit = pipe.image_processor.export_depth_to_16bit_png(depth.prediction) +>>> depth_16bit[0].save("einstein_depth_16bit.png") +``` +""" + + +@dataclass +class MarigoldDepthOutput(BaseOutput): + """ + Output class for Marigold monocular depth prediction pipeline. + + Args: + prediction (`np.ndarray`, `torch.Tensor`): + Predicted depth maps with values in the range [0, 1]. The shape is always $numimages \times 1 \times height + \times width$, regardless of whether the images were passed as a 4D array or a list. + uncertainty (`None`, `np.ndarray`, `torch.Tensor`): + Uncertainty maps computed from the ensemble, with values in the range [0, 1]. The shape is $numimages + \times 1 \times height \times width$. + latent (`None`, `torch.Tensor`): + Latent features corresponding to the predictions, compatible with the `latents` argument of the pipeline. + The shape is $numimages * numensemble \times 4 \times latentheight \times latentwidth$. + """ + + prediction: Union[np.ndarray, torch.Tensor] + uncertainty: Union[None, np.ndarray, torch.Tensor] + latent: Union[None, torch.Tensor] + + +class MarigoldDepthPipeline(DiffusionPipeline): + """ + Pipeline for monocular depth estimation using the Marigold method: https://marigoldmonodepth.github.io. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + Args: + unet (`UNet2DConditionModel`): + Conditional U-Net to denoise the depth latent, conditioned on image latent. + vae (`AutoencoderKL`): + Variational Auto-Encoder (VAE) Model to encode and decode images and predictions to and from latent + representations. + scheduler (`DDIMScheduler` or `LCMScheduler`): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. + text_encoder (`CLIPTextModel`): + Text-encoder, for empty text embedding. + tokenizer (`CLIPTokenizer`): + CLIP tokenizer. + prediction_type (`str`, *optional*): + Type of predictions made by the model. + scale_invariant (`bool`, *optional*): + A model property specifying whether the predicted depth maps are scale-invariant. This value must be set in + the model config. When used together with the `shift_invariant=True` flag, the model is also called + "affine-invariant". NB: overriding this value is not supported. + shift_invariant (`bool`, *optional*): + A model property specifying whether the predicted depth maps are shift-invariant. This value must be set in + the model config. When used together with the `scale_invariant=True` flag, the model is also called + "affine-invariant". NB: overriding this value is not supported. + default_denoising_steps (`int`, *optional*): + The minimum number of denoising diffusion steps that are required to produce a prediction of reasonable + quality with the given model. This value must be set in the model config. When the pipeline is called + without explicitly setting `num_inference_steps`, the default value is used. This is required to ensure + reasonable results with various model flavors compatible with the pipeline, such as those relying on very + short denoising schedules (`LCMScheduler`) and those with full diffusion schedules (`DDIMScheduler`). + default_processing_resolution (`int`, *optional*): + The recommended value of the `processing_resolution` parameter of the pipeline. This value must be set in + the model config. When the pipeline is called without explicitly setting `processing_resolution`, the + default value is used. This is required to ensure reasonable results with various model flavors trained + with varying optimal processing resolution values. + """ + + model_cpu_offload_seq = "text_encoder->unet->vae" + supported_prediction_types = ("depth", "disparity") + + def __init__( + self, + unet: UNet2DConditionModel, + vae: AutoencoderKL, + scheduler: Union[DDIMScheduler, LCMScheduler], + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + prediction_type: Optional[str] = None, + scale_invariant: Optional[bool] = True, + shift_invariant: Optional[bool] = True, + default_denoising_steps: Optional[int] = None, + default_processing_resolution: Optional[int] = None, + ): + super().__init__() + + if prediction_type not in self.supported_prediction_types: + logger.warning( + f"Potentially unsupported `prediction_type='{prediction_type}'`; values supported by the pipeline: " + f"{self.supported_prediction_types}." + ) + + self.register_modules( + unet=unet, + vae=vae, + scheduler=scheduler, + text_encoder=text_encoder, + tokenizer=tokenizer, + ) + self.register_to_config( + prediction_type=prediction_type, + scale_invariant=scale_invariant, + shift_invariant=shift_invariant, + default_denoising_steps=default_denoising_steps, + default_processing_resolution=default_processing_resolution, + ) + + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + + self.scale_invariant = scale_invariant + self.shift_invariant = shift_invariant + self.default_denoising_steps = default_denoising_steps + self.default_processing_resolution = default_processing_resolution + + self.empty_text_embedding = None + + self.image_processor = MarigoldImageProcessor(vae_scale_factor=self.vae_scale_factor) + + def check_inputs( + self, + image: PipelineImageInput, + num_inference_steps: int, + ensemble_size: int, + processing_resolution: int, + resample_method_input: str, + resample_method_output: str, + batch_size: int, + ensembling_kwargs: Optional[Dict[str, Any]], + latents: Optional[torch.Tensor], + generator: Optional[Union[torch.Generator, List[torch.Generator]]], + output_type: str, + output_uncertainty: bool, + ) -> int: + if num_inference_steps is None: + raise ValueError("`num_inference_steps` is not specified and could not be resolved from the model config.") + if num_inference_steps < 1: + raise ValueError("`num_inference_steps` must be positive.") + if ensemble_size < 1: + raise ValueError("`ensemble_size` must be positive.") + if ensemble_size == 2: + logger.warning( + "`ensemble_size` == 2 results are similar to no ensembling (1); " + "consider increasing the value to at least 3." + ) + if ensemble_size > 1 and (self.scale_invariant or self.shift_invariant) and not is_scipy_available(): + raise ImportError("Make sure to install scipy if you want to use ensembling.") + if ensemble_size == 1 and output_uncertainty: + raise ValueError( + "Computing uncertainty by setting `output_uncertainty=True` also requires setting `ensemble_size` " + "greater than 1." + ) + if processing_resolution is None: + raise ValueError( + "`processing_resolution` is not specified and could not be resolved from the model config." + ) + if processing_resolution < 0: + raise ValueError( + "`processing_resolution` must be non-negative: 0 for native resolution, or any positive value for " + "downsampled processing." + ) + if processing_resolution % self.vae_scale_factor != 0: + raise ValueError(f"`processing_resolution` must be a multiple of {self.vae_scale_factor}.") + if resample_method_input not in ("nearest", "nearest-exact", "bilinear", "bicubic", "area"): + raise ValueError( + "`resample_method_input` takes string values compatible with PIL library: " + "nearest, nearest-exact, bilinear, bicubic, area." + ) + if resample_method_output not in ("nearest", "nearest-exact", "bilinear", "bicubic", "area"): + raise ValueError( + "`resample_method_output` takes string values compatible with PIL library: " + "nearest, nearest-exact, bilinear, bicubic, area." + ) + if batch_size < 1: + raise ValueError("`batch_size` must be positive.") + if output_type not in ["pt", "np"]: + raise ValueError("`output_type` must be one of `pt` or `np`.") + if latents is not None and generator is not None: + raise ValueError("`latents` and `generator` cannot be used together.") + if ensembling_kwargs is not None: + if not isinstance(ensembling_kwargs, dict): + raise ValueError("`ensembling_kwargs` must be a dictionary.") + if "reduction" in ensembling_kwargs and ensembling_kwargs["reduction"] not in ("mean", "median"): + raise ValueError("`ensembling_kwargs['reduction']` can be either `'mean'` or `'median'`.") + + # image checks + num_images = 0 + W, H = None, None + if not isinstance(image, list): + image = [image] + for i, img in enumerate(image): + if isinstance(img, np.ndarray) or torch.is_tensor(img): + if img.ndim not in (2, 3, 4): + raise ValueError(f"`image[{i}]` has unsupported dimensions or shape: {img.shape}.") + H_i, W_i = img.shape[-2:] + N_i = 1 + if img.ndim == 4: + N_i = img.shape[0] + elif isinstance(img, Image.Image): + W_i, H_i = img.size + N_i = 1 + else: + raise ValueError(f"Unsupported `image[{i}]` type: {type(img)}.") + if W is None: + W, H = W_i, H_i + elif (W, H) != (W_i, H_i): + raise ValueError( + f"Input `image[{i}]` has incompatible dimensions {(W_i, H_i)} with the previous images {(W, H)}" + ) + num_images += N_i + + # latents checks + if latents is not None: + if not torch.is_tensor(latents): + raise ValueError("`latents` must be a torch.Tensor.") + if latents.dim() != 4: + raise ValueError(f"`latents` has unsupported dimensions or shape: {latents.shape}.") + + if processing_resolution > 0: + max_orig = max(H, W) + new_H = H * processing_resolution // max_orig + new_W = W * processing_resolution // max_orig + if new_H == 0 or new_W == 0: + raise ValueError(f"Extreme aspect ratio of the input image: [{W} x {H}]") + W, H = new_W, new_H + w = (W + self.vae_scale_factor - 1) // self.vae_scale_factor + h = (H + self.vae_scale_factor - 1) // self.vae_scale_factor + shape_expected = (num_images * ensemble_size, self.vae.config.latent_channels, h, w) + + if latents.shape != shape_expected: + raise ValueError(f"`latents` has unexpected shape={latents.shape} expected={shape_expected}.") + + # generator checks + if generator is not None: + if isinstance(generator, list): + if len(generator) != num_images * ensemble_size: + raise ValueError( + "The number of generators must match the total number of ensemble members for all input images." + ) + if not all(g.device.type == generator[0].device.type for g in generator): + raise ValueError("`generator` device placement is not consistent in the list.") + elif not isinstance(generator, torch.Generator): + raise ValueError(f"Unsupported generator type: {type(generator)}.") + + return num_images + + def progress_bar(self, iterable=None, total=None, desc=None, leave=True): + if not hasattr(self, "_progress_bar_config"): + self._progress_bar_config = {} + elif not isinstance(self._progress_bar_config, dict): + raise ValueError( + f"`self._progress_bar_config` should be of type `dict`, but is {type(self._progress_bar_config)}." + ) + + progress_bar_config = dict(**self._progress_bar_config) + progress_bar_config["desc"] = progress_bar_config.get("desc", desc) + progress_bar_config["leave"] = progress_bar_config.get("leave", leave) + if iterable is not None: + return tqdm(iterable, **progress_bar_config) + elif total is not None: + return tqdm(total=total, **progress_bar_config) + else: + raise ValueError("Either `total` or `iterable` has to be defined.") + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + image: PipelineImageInput, + num_inference_steps: Optional[int] = None, + ensemble_size: int = 1, + processing_resolution: Optional[int] = None, + match_input_resolution: bool = True, + resample_method_input: str = "bilinear", + resample_method_output: str = "bilinear", + batch_size: int = 1, + ensembling_kwargs: Optional[Dict[str, Any]] = None, + latents: Optional[Union[torch.Tensor, List[torch.Tensor]]] = None, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + output_type: str = "np", + output_uncertainty: bool = False, + output_latent: bool = False, + return_dict: bool = True, + ): + """ + Function invoked when calling the pipeline. + + Args: + image (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`), + `List[torch.Tensor]`: An input image or images used as an input for the depth estimation task. For + arrays and tensors, the expected value range is between `[0, 1]`. Passing a batch of images is possible + by providing a four-dimensional array or a tensor. Additionally, a list of images of two- or + three-dimensional arrays or tensors can be passed. In the latter case, all list elements must have the + same width and height. + num_inference_steps (`int`, *optional*, defaults to `None`): + Number of denoising diffusion steps during inference. The default value `None` results in automatic + selection. The number of steps should be at least 10 with the full Marigold models, and between 1 and 4 + for Marigold-LCM models. + ensemble_size (`int`, defaults to `1`): + Number of ensemble predictions. Recommended values are 5 and higher for better precision, or 1 for + faster inference. + processing_resolution (`int`, *optional*, defaults to `None`): + Effective processing resolution. When set to `0`, matches the larger input image dimension. This + produces crisper predictions, but may also lead to the overall loss of global context. The default + value `None` resolves to the optimal value from the model config. + match_input_resolution (`bool`, *optional*, defaults to `True`): + When enabled, the output prediction is resized to match the input dimensions. When disabled, the longer + side of the output will equal to `processing_resolution`. + resample_method_input (`str`, *optional*, defaults to `"bilinear"`): + Resampling method used to resize input images to `processing_resolution`. The accepted values are: + `"nearest"`, `"nearest-exact"`, `"bilinear"`, `"bicubic"`, or `"area"`. + resample_method_output (`str`, *optional*, defaults to `"bilinear"`): + Resampling method used to resize output predictions to match the input resolution. The accepted values + are `"nearest"`, `"nearest-exact"`, `"bilinear"`, `"bicubic"`, or `"area"`. + batch_size (`int`, *optional*, defaults to `1`): + Batch size; only matters when setting `ensemble_size` or passing a tensor of images. + ensembling_kwargs (`dict`, *optional*, defaults to `None`) + Extra dictionary with arguments for precise ensembling control. The following options are available: + - reduction (`str`, *optional*, defaults to `"median"`): Defines the ensembling function applied in + every pixel location, can be either `"median"` or `"mean"`. + - regularizer_strength (`float`, *optional*, defaults to `0.02`): Strength of the regularizer that + pulls the aligned predictions to the unit range from 0 to 1. + - max_iter (`int`, *optional*, defaults to `2`): Maximum number of the alignment solver steps. Refer to + `scipy.optimize.minimize` function, `options` argument. + - tol (`float`, *optional*, defaults to `1e-3`): Alignment solver tolerance. The solver stops when the + tolerance is reached. + - max_res (`int`, *optional*, defaults to `None`): Resolution at which the alignment is performed; + `None` matches the `processing_resolution`. + latents (`torch.Tensor`, or `List[torch.Tensor]`, *optional*, defaults to `None`): + Latent noise tensors to replace the random initialization. These can be taken from the previous + function call's output. + generator (`torch.Generator`, or `List[torch.Generator]`, *optional*, defaults to `None`): + Random number generator object to ensure reproducibility. + output_type (`str`, *optional*, defaults to `"np"`): + Preferred format of the output's `prediction` and the optional `uncertainty` fields. The accepted + values are: `"np"` (numpy array) or `"pt"` (torch tensor). + output_uncertainty (`bool`, *optional*, defaults to `False`): + When enabled, the output's `uncertainty` field contains the predictive uncertainty map, provided that + the `ensemble_size` argument is set to a value above 2. + output_latent (`bool`, *optional*, defaults to `False`): + When enabled, the output's `latent` field contains the latent codes corresponding to the predictions + within the ensemble. These codes can be saved, modified, and used for subsequent calls with the + `latents` argument. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.marigold.MarigoldDepthOutput`] instead of a plain tuple. + + Examples: + + Returns: + [`~pipelines.marigold.MarigoldDepthOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.marigold.MarigoldDepthOutput`] is returned, otherwise a + `tuple` is returned where the first element is the prediction, the second element is the uncertainty + (or `None`), and the third is the latent (or `None`). + """ + + # 0. Resolving variables. + device = self._execution_device + dtype = self.dtype + + # Model-specific optimal default values leading to fast and reasonable results. + if num_inference_steps is None: + num_inference_steps = self.default_denoising_steps + if processing_resolution is None: + processing_resolution = self.default_processing_resolution + + # 1. Check inputs. + num_images = self.check_inputs( + image, + num_inference_steps, + ensemble_size, + processing_resolution, + resample_method_input, + resample_method_output, + batch_size, + ensembling_kwargs, + latents, + generator, + output_type, + output_uncertainty, + ) + + # 2. Prepare empty text conditioning. + # Model invocation: self.tokenizer, self.text_encoder. + if self.empty_text_embedding is None: + prompt = "" + text_inputs = self.tokenizer( + prompt, + padding="do_not_pad", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids.to(device) + self.empty_text_embedding = self.text_encoder(text_input_ids)[0] # [1,2,1024] + + # 3. Preprocess input images. This function loads input image or images of compatible dimensions `(H, W)`, + # optionally downsamples them to the `processing_resolution` `(PH, PW)`, where + # `max(PH, PW) == processing_resolution`, and pads the dimensions to `(PPH, PPW)` such that these values are + # divisible by the latent space downscaling factor (typically 8 in Stable Diffusion). The default value `None` + # of `processing_resolution` resolves to the optimal value from the model config. It is a recommended mode of + # operation and leads to the most reasonable results. Using the native image resolution or any other processing + # resolution can lead to loss of either fine details or global context in the output predictions. + image, padding, original_resolution = self.image_processor.preprocess( + image, processing_resolution, resample_method_input, device, dtype + ) # [N,3,PPH,PPW] + + # 4. Encode input image into latent space. At this step, each of the `N` input images is represented with `E` + # ensemble members. Each ensemble member is an independent diffused prediction, just initialized independently. + # Latents of each such predictions across all input images and all ensemble members are represented in the + # `pred_latent` variable. The variable `image_latent` is of the same shape: it contains each input image encoded + # into latent space and replicated `E` times. The latents can be either generated (see `generator` to ensure + # reproducibility), or passed explicitly via the `latents` argument. The latter can be set outside the pipeline + # code. For example, in the Marigold-LCM video processing demo, the latents initialization of a frame is taken + # as a convex combination of the latents output of the pipeline for the previous frame and a newly-sampled + # noise. This behavior can be achieved by setting the `output_latent` argument to `True`. The latent space + # dimensions are `(h, w)`. Encoding into latent space happens in batches of size `batch_size`. + # Model invocation: self.vae.encoder. + image_latent, pred_latent = self.prepare_latents( + image, latents, generator, ensemble_size, batch_size + ) # [N*E,4,h,w], [N*E,4,h,w] + + del image + + batch_empty_text_embedding = self.empty_text_embedding.to(device=device, dtype=dtype).repeat( + batch_size, 1, 1 + ) # [B,1024,2] + + # 5. Process the denoising loop. All `N * E` latents are processed sequentially in batches of size `batch_size`. + # The unet model takes concatenated latent spaces of the input image and the predicted modality as an input, and + # outputs noise for the predicted modality's latent space. The number of denoising diffusion steps is defined by + # `num_inference_steps`. It is either set directly, or resolves to the optimal value specific to the loaded + # model. + # Model invocation: self.unet. + pred_latents = [] + + for i in self.progress_bar( + range(0, num_images * ensemble_size, batch_size), leave=True, desc="Marigold predictions..." + ): + batch_image_latent = image_latent[i : i + batch_size] # [B,4,h,w] + batch_pred_latent = pred_latent[i : i + batch_size] # [B,4,h,w] + effective_batch_size = batch_image_latent.shape[0] + text = batch_empty_text_embedding[:effective_batch_size] # [B,2,1024] + + self.scheduler.set_timesteps(num_inference_steps, device=device) + for t in self.progress_bar(self.scheduler.timesteps, leave=False, desc="Diffusion steps..."): + batch_latent = torch.cat([batch_image_latent, batch_pred_latent], dim=1) # [B,8,h,w] + noise = self.unet(batch_latent, t, encoder_hidden_states=text, return_dict=False)[0] # [B,4,h,w] + batch_pred_latent = self.scheduler.step( + noise, t, batch_pred_latent, generator=generator + ).prev_sample # [B,4,h,w] + + pred_latents.append(batch_pred_latent) + + pred_latent = torch.cat(pred_latents, dim=0) # [N*E,4,h,w] + + del ( + pred_latents, + image_latent, + batch_empty_text_embedding, + batch_image_latent, + batch_pred_latent, + text, + batch_latent, + noise, + ) + + # 6. Decode predictions from latent into pixel space. The resulting `N * E` predictions have shape `(PPH, PPW)`, + # which requires slight postprocessing. Decoding into pixel space happens in batches of size `batch_size`. + # Model invocation: self.vae.decoder. + prediction = torch.cat( + [ + self.decode_prediction(pred_latent[i : i + batch_size]) + for i in range(0, pred_latent.shape[0], batch_size) + ], + dim=0, + ) # [N*E,1,PPH,PPW] + + if not output_latent: + pred_latent = None + + # 7. Remove padding. The output shape is (PH, PW). + prediction = self.image_processor.unpad_image(prediction, padding) # [N*E,1,PH,PW] + + # 8. Ensemble and compute uncertainty (when `output_uncertainty` is set). This code treats each of the `N` + # groups of `E` ensemble predictions independently. For each group it computes an ensembled prediction of shape + # `(PH, PW)` and an optional uncertainty map of the same dimensions. After computing this pair of outputs for + # each group independently, it stacks them respectively into batches of `N` almost final predictions and + # uncertainty maps. + uncertainty = None + if ensemble_size > 1: + prediction = prediction.reshape(num_images, ensemble_size, *prediction.shape[1:]) # [N,E,1,PH,PW] + prediction = [ + self.ensemble_depth( + prediction[i], + self.scale_invariant, + self.shift_invariant, + output_uncertainty, + **(ensembling_kwargs or {}), + ) + for i in range(num_images) + ] # [ [[1,1,PH,PW], [1,1,PH,PW]], ... ] + prediction, uncertainty = zip(*prediction) # [[1,1,PH,PW], ... ], [[1,1,PH,PW], ... ] + prediction = torch.cat(prediction, dim=0) # [N,1,PH,PW] + if output_uncertainty: + uncertainty = torch.cat(uncertainty, dim=0) # [N,1,PH,PW] + else: + uncertainty = None + + # 9. If `match_input_resolution` is set, the output prediction and the uncertainty are upsampled to match the + # input resolution `(H, W)`. This step may introduce upsampling artifacts, and therefore can be disabled. + # Depending on the downstream use-case, upsampling can be also chosen based on the tolerated artifacts by + # setting the `resample_method_output` parameter (e.g., to `"nearest"`). + if match_input_resolution: + prediction = self.image_processor.resize_antialias( + prediction, original_resolution, resample_method_output, is_aa=False + ) # [N,1,H,W] + if uncertainty is not None and output_uncertainty: + uncertainty = self.image_processor.resize_antialias( + uncertainty, original_resolution, resample_method_output, is_aa=False + ) # [N,1,H,W] + + # 10. Prepare the final outputs. + if output_type == "np": + prediction = self.image_processor.pt_to_numpy(prediction) # [N,H,W,1] + if uncertainty is not None and output_uncertainty: + uncertainty = self.image_processor.pt_to_numpy(uncertainty) # [N,H,W,1] + + # 11. Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (prediction, uncertainty, pred_latent) + + return MarigoldDepthOutput( + prediction=prediction, + uncertainty=uncertainty, + latent=pred_latent, + ) + + def prepare_latents( + self, + image: torch.Tensor, + latents: Optional[torch.Tensor], + generator: Optional[torch.Generator], + ensemble_size: int, + batch_size: int, + ) -> Tuple[torch.Tensor, torch.Tensor]: + def retrieve_latents(encoder_output): + if hasattr(encoder_output, "latent_dist"): + return encoder_output.latent_dist.mode() + elif hasattr(encoder_output, "latents"): + return encoder_output.latents + else: + raise AttributeError("Could not access latents of provided encoder_output") + + image_latent = torch.cat( + [ + retrieve_latents(self.vae.encode(image[i : i + batch_size])) + for i in range(0, image.shape[0], batch_size) + ], + dim=0, + ) # [N,4,h,w] + image_latent = image_latent * self.vae.config.scaling_factor + image_latent = image_latent.repeat_interleave(ensemble_size, dim=0) # [N*E,4,h,w] + + pred_latent = latents + if pred_latent is None: + pred_latent = randn_tensor( + image_latent.shape, + generator=generator, + device=image_latent.device, + dtype=image_latent.dtype, + ) # [N*E,4,h,w] + + return image_latent, pred_latent + + def decode_prediction(self, pred_latent: torch.Tensor) -> torch.Tensor: + if pred_latent.dim() != 4 or pred_latent.shape[1] != self.vae.config.latent_channels: + raise ValueError( + f"Expecting 4D tensor of shape [B,{self.vae.config.latent_channels},H,W]; got {pred_latent.shape}." + ) + + prediction = self.vae.decode(pred_latent / self.vae.config.scaling_factor, return_dict=False)[0] # [B,3,H,W] + + prediction = prediction.mean(dim=1, keepdim=True) # [B,1,H,W] + prediction = torch.clip(prediction, -1.0, 1.0) # [B,1,H,W] + prediction = (prediction + 1.0) / 2.0 + + return prediction # [B,1,H,W] + + @staticmethod + def ensemble_depth( + depth: torch.Tensor, + scale_invariant: bool = True, + shift_invariant: bool = True, + output_uncertainty: bool = False, + reduction: str = "median", + regularizer_strength: float = 0.02, + max_iter: int = 2, + tol: float = 1e-3, + max_res: int = 1024, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: + """ + Ensembles the depth maps represented by the `depth` tensor with expected shape `(B, 1, H, W)`, where B is the + number of ensemble members for a given prediction of size `(H x W)`. Even though the function is designed for + depth maps, it can also be used with disparity maps as long as the input tensor values are non-negative. The + alignment happens when the predictions have one or more degrees of freedom, that is when they are either + affine-invariant (`scale_invariant=True` and `shift_invariant=True`), or just scale-invariant (only + `scale_invariant=True`). For absolute predictions (`scale_invariant=False` and `shift_invariant=False`) + alignment is skipped and only ensembling is performed. + + Args: + depth (`torch.Tensor`): + Input ensemble depth maps. + scale_invariant (`bool`, *optional*, defaults to `True`): + Whether to treat predictions as scale-invariant. + shift_invariant (`bool`, *optional*, defaults to `True`): + Whether to treat predictions as shift-invariant. + output_uncertainty (`bool`, *optional*, defaults to `False`): + Whether to output uncertainty map. + reduction (`str`, *optional*, defaults to `"median"`): + Reduction method used to ensemble aligned predictions. The accepted values are: `"mean"` and + `"median"`. + regularizer_strength (`float`, *optional*, defaults to `0.02`): + Strength of the regularizer that pulls the aligned predictions to the unit range from 0 to 1. + max_iter (`int`, *optional*, defaults to `2`): + Maximum number of the alignment solver steps. Refer to `scipy.optimize.minimize` function, `options` + argument. + tol (`float`, *optional*, defaults to `1e-3`): + Alignment solver tolerance. The solver stops when the tolerance is reached. + max_res (`int`, *optional*, defaults to `1024`): + Resolution at which the alignment is performed; `None` matches the `processing_resolution`. + Returns: + A tensor of aligned and ensembled depth maps and optionally a tensor of uncertainties of the same shape: + `(1, 1, H, W)`. + """ + if depth.dim() != 4 or depth.shape[1] != 1: + raise ValueError(f"Expecting 4D tensor of shape [B,1,H,W]; got {depth.shape}.") + if reduction not in ("mean", "median"): + raise ValueError(f"Unrecognized reduction method: {reduction}.") + if not scale_invariant and shift_invariant: + raise ValueError("Pure shift-invariant ensembling is not supported.") + + def init_param(depth: torch.Tensor): + init_min = depth.reshape(ensemble_size, -1).min(dim=1).values + init_max = depth.reshape(ensemble_size, -1).max(dim=1).values + + if scale_invariant and shift_invariant: + init_s = 1.0 / (init_max - init_min).clamp(min=1e-6) + init_t = -init_s * init_min + param = torch.cat((init_s, init_t)).cpu().numpy() + elif scale_invariant: + init_s = 1.0 / init_max.clamp(min=1e-6) + param = init_s.cpu().numpy() + else: + raise ValueError("Unrecognized alignment.") + + return param + + def align(depth: torch.Tensor, param: np.ndarray) -> torch.Tensor: + if scale_invariant and shift_invariant: + s, t = np.split(param, 2) + s = torch.from_numpy(s).to(depth).view(ensemble_size, 1, 1, 1) + t = torch.from_numpy(t).to(depth).view(ensemble_size, 1, 1, 1) + out = depth * s + t + elif scale_invariant: + s = torch.from_numpy(param).to(depth).view(ensemble_size, 1, 1, 1) + out = depth * s + else: + raise ValueError("Unrecognized alignment.") + return out + + def ensemble( + depth_aligned: torch.Tensor, return_uncertainty: bool = False + ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: + uncertainty = None + if reduction == "mean": + prediction = torch.mean(depth_aligned, dim=0, keepdim=True) + if return_uncertainty: + uncertainty = torch.std(depth_aligned, dim=0, keepdim=True) + elif reduction == "median": + prediction = torch.median(depth_aligned, dim=0, keepdim=True).values + if return_uncertainty: + uncertainty = torch.median(torch.abs(depth_aligned - prediction), dim=0, keepdim=True).values + else: + raise ValueError(f"Unrecognized reduction method: {reduction}.") + return prediction, uncertainty + + def cost_fn(param: np.ndarray, depth: torch.Tensor) -> float: + cost = 0.0 + depth_aligned = align(depth, param) + + for i, j in torch.combinations(torch.arange(ensemble_size)): + diff = depth_aligned[i] - depth_aligned[j] + cost += (diff**2).mean().sqrt().item() + + if regularizer_strength > 0: + prediction, _ = ensemble(depth_aligned, return_uncertainty=False) + err_near = (0.0 - prediction.min()).abs().item() + err_far = (1.0 - prediction.max()).abs().item() + cost += (err_near + err_far) * regularizer_strength + + return cost + + def compute_param(depth: torch.Tensor): + import scipy + + depth_to_align = depth.to(torch.float32) + if max_res is not None and max(depth_to_align.shape[2:]) > max_res: + depth_to_align = MarigoldImageProcessor.resize_to_max_edge(depth_to_align, max_res, "nearest-exact") + + param = init_param(depth_to_align) + + res = scipy.optimize.minimize( + partial(cost_fn, depth=depth_to_align), + param, + method="BFGS", + tol=tol, + options={"maxiter": max_iter, "disp": False}, + ) + + return res.x + + requires_aligning = scale_invariant or shift_invariant + ensemble_size = depth.shape[0] + + if requires_aligning: + param = compute_param(depth) + depth = align(depth, param) + + depth, uncertainty = ensemble(depth, return_uncertainty=output_uncertainty) + + depth_max = depth.max() + if scale_invariant and shift_invariant: + depth_min = depth.min() + elif scale_invariant: + depth_min = 0 + else: + raise ValueError("Unrecognized alignment.") + depth_range = (depth_max - depth_min).clamp(min=1e-6) + depth = (depth - depth_min) / depth_range + if output_uncertainty: + uncertainty /= depth_range + + return depth, uncertainty # [1,1,H,W], [1,1,H,W] diff --git a/diffusers3/pipelines/marigold/pipeline_marigold_normals.py b/diffusers3/pipelines/marigold/pipeline_marigold_normals.py new file mode 100644 index 0000000000000000000000000000000000000000..aa9ad36ffc35614c889165152ecaea443773cfb8 --- /dev/null +++ b/diffusers3/pipelines/marigold/pipeline_marigold_normals.py @@ -0,0 +1,690 @@ +# Copyright 2024 Marigold authors, PRS ETH Zurich. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# -------------------------------------------------------------------------- +# More information and citation instructions are available on the +# Marigold project website: https://marigoldmonodepth.github.io +# -------------------------------------------------------------------------- +from dataclasses import dataclass +from typing import Any, Dict, List, Optional, Tuple, Union + +import numpy as np +import torch +from PIL import Image +from tqdm.auto import tqdm +from transformers import CLIPTextModel, CLIPTokenizer + +from ...image_processor import PipelineImageInput +from ...models import ( + AutoencoderKL, + UNet2DConditionModel, +) +from ...schedulers import ( + DDIMScheduler, + LCMScheduler, +) +from ...utils import ( + BaseOutput, + logging, + replace_example_docstring, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline +from .marigold_image_processing import MarigoldImageProcessor + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +EXAMPLE_DOC_STRING = """ +Examples: +```py +>>> import diffusers +>>> import torch + +>>> pipe = diffusers.MarigoldNormalsPipeline.from_pretrained( +... "prs-eth/marigold-normals-lcm-v0-1", variant="fp16", torch_dtype=torch.float16 +... ).to("cuda") + +>>> image = diffusers.utils.load_image("https://marigoldmonodepth.github.io/images/einstein.jpg") +>>> normals = pipe(image) + +>>> vis = pipe.image_processor.visualize_normals(normals.prediction) +>>> vis[0].save("einstein_normals.png") +``` +""" + + +@dataclass +class MarigoldNormalsOutput(BaseOutput): + """ + Output class for Marigold monocular normals prediction pipeline. + + Args: + prediction (`np.ndarray`, `torch.Tensor`): + Predicted normals with values in the range [-1, 1]. The shape is always $numimages \times 3 \times height + \times width$, regardless of whether the images were passed as a 4D array or a list. + uncertainty (`None`, `np.ndarray`, `torch.Tensor`): + Uncertainty maps computed from the ensemble, with values in the range [0, 1]. The shape is $numimages + \times 1 \times height \times width$. + latent (`None`, `torch.Tensor`): + Latent features corresponding to the predictions, compatible with the `latents` argument of the pipeline. + The shape is $numimages * numensemble \times 4 \times latentheight \times latentwidth$. + """ + + prediction: Union[np.ndarray, torch.Tensor] + uncertainty: Union[None, np.ndarray, torch.Tensor] + latent: Union[None, torch.Tensor] + + +class MarigoldNormalsPipeline(DiffusionPipeline): + """ + Pipeline for monocular normals estimation using the Marigold method: https://marigoldmonodepth.github.io. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + Args: + unet (`UNet2DConditionModel`): + Conditional U-Net to denoise the normals latent, conditioned on image latent. + vae (`AutoencoderKL`): + Variational Auto-Encoder (VAE) Model to encode and decode images and predictions to and from latent + representations. + scheduler (`DDIMScheduler` or `LCMScheduler`): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. + text_encoder (`CLIPTextModel`): + Text-encoder, for empty text embedding. + tokenizer (`CLIPTokenizer`): + CLIP tokenizer. + prediction_type (`str`, *optional*): + Type of predictions made by the model. + use_full_z_range (`bool`, *optional*): + Whether the normals predicted by this model utilize the full range of the Z dimension, or only its positive + half. + default_denoising_steps (`int`, *optional*): + The minimum number of denoising diffusion steps that are required to produce a prediction of reasonable + quality with the given model. This value must be set in the model config. When the pipeline is called + without explicitly setting `num_inference_steps`, the default value is used. This is required to ensure + reasonable results with various model flavors compatible with the pipeline, such as those relying on very + short denoising schedules (`LCMScheduler`) and those with full diffusion schedules (`DDIMScheduler`). + default_processing_resolution (`int`, *optional*): + The recommended value of the `processing_resolution` parameter of the pipeline. This value must be set in + the model config. When the pipeline is called without explicitly setting `processing_resolution`, the + default value is used. This is required to ensure reasonable results with various model flavors trained + with varying optimal processing resolution values. + """ + + model_cpu_offload_seq = "text_encoder->unet->vae" + supported_prediction_types = ("normals",) + + def __init__( + self, + unet: UNet2DConditionModel, + vae: AutoencoderKL, + scheduler: Union[DDIMScheduler, LCMScheduler], + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + prediction_type: Optional[str] = None, + use_full_z_range: Optional[bool] = True, + default_denoising_steps: Optional[int] = None, + default_processing_resolution: Optional[int] = None, + ): + super().__init__() + + if prediction_type not in self.supported_prediction_types: + logger.warning( + f"Potentially unsupported `prediction_type='{prediction_type}'`; values supported by the pipeline: " + f"{self.supported_prediction_types}." + ) + + self.register_modules( + unet=unet, + vae=vae, + scheduler=scheduler, + text_encoder=text_encoder, + tokenizer=tokenizer, + ) + self.register_to_config( + use_full_z_range=use_full_z_range, + default_denoising_steps=default_denoising_steps, + default_processing_resolution=default_processing_resolution, + ) + + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + + self.use_full_z_range = use_full_z_range + self.default_denoising_steps = default_denoising_steps + self.default_processing_resolution = default_processing_resolution + + self.empty_text_embedding = None + + self.image_processor = MarigoldImageProcessor(vae_scale_factor=self.vae_scale_factor) + + def check_inputs( + self, + image: PipelineImageInput, + num_inference_steps: int, + ensemble_size: int, + processing_resolution: int, + resample_method_input: str, + resample_method_output: str, + batch_size: int, + ensembling_kwargs: Optional[Dict[str, Any]], + latents: Optional[torch.Tensor], + generator: Optional[Union[torch.Generator, List[torch.Generator]]], + output_type: str, + output_uncertainty: bool, + ) -> int: + if num_inference_steps is None: + raise ValueError("`num_inference_steps` is not specified and could not be resolved from the model config.") + if num_inference_steps < 1: + raise ValueError("`num_inference_steps` must be positive.") + if ensemble_size < 1: + raise ValueError("`ensemble_size` must be positive.") + if ensemble_size == 2: + logger.warning( + "`ensemble_size` == 2 results are similar to no ensembling (1); " + "consider increasing the value to at least 3." + ) + if ensemble_size == 1 and output_uncertainty: + raise ValueError( + "Computing uncertainty by setting `output_uncertainty=True` also requires setting `ensemble_size` " + "greater than 1." + ) + if processing_resolution is None: + raise ValueError( + "`processing_resolution` is not specified and could not be resolved from the model config." + ) + if processing_resolution < 0: + raise ValueError( + "`processing_resolution` must be non-negative: 0 for native resolution, or any positive value for " + "downsampled processing." + ) + if processing_resolution % self.vae_scale_factor != 0: + raise ValueError(f"`processing_resolution` must be a multiple of {self.vae_scale_factor}.") + if resample_method_input not in ("nearest", "nearest-exact", "bilinear", "bicubic", "area"): + raise ValueError( + "`resample_method_input` takes string values compatible with PIL library: " + "nearest, nearest-exact, bilinear, bicubic, area." + ) + if resample_method_output not in ("nearest", "nearest-exact", "bilinear", "bicubic", "area"): + raise ValueError( + "`resample_method_output` takes string values compatible with PIL library: " + "nearest, nearest-exact, bilinear, bicubic, area." + ) + if batch_size < 1: + raise ValueError("`batch_size` must be positive.") + if output_type not in ["pt", "np"]: + raise ValueError("`output_type` must be one of `pt` or `np`.") + if latents is not None and generator is not None: + raise ValueError("`latents` and `generator` cannot be used together.") + if ensembling_kwargs is not None: + if not isinstance(ensembling_kwargs, dict): + raise ValueError("`ensembling_kwargs` must be a dictionary.") + if "reduction" in ensembling_kwargs and ensembling_kwargs["reduction"] not in ("closest", "mean"): + raise ValueError("`ensembling_kwargs['reduction']` can be either `'closest'` or `'mean'`.") + + # image checks + num_images = 0 + W, H = None, None + if not isinstance(image, list): + image = [image] + for i, img in enumerate(image): + if isinstance(img, np.ndarray) or torch.is_tensor(img): + if img.ndim not in (2, 3, 4): + raise ValueError(f"`image[{i}]` has unsupported dimensions or shape: {img.shape}.") + H_i, W_i = img.shape[-2:] + N_i = 1 + if img.ndim == 4: + N_i = img.shape[0] + elif isinstance(img, Image.Image): + W_i, H_i = img.size + N_i = 1 + else: + raise ValueError(f"Unsupported `image[{i}]` type: {type(img)}.") + if W is None: + W, H = W_i, H_i + elif (W, H) != (W_i, H_i): + raise ValueError( + f"Input `image[{i}]` has incompatible dimensions {(W_i, H_i)} with the previous images {(W, H)}" + ) + num_images += N_i + + # latents checks + if latents is not None: + if not torch.is_tensor(latents): + raise ValueError("`latents` must be a torch.Tensor.") + if latents.dim() != 4: + raise ValueError(f"`latents` has unsupported dimensions or shape: {latents.shape}.") + + if processing_resolution > 0: + max_orig = max(H, W) + new_H = H * processing_resolution // max_orig + new_W = W * processing_resolution // max_orig + if new_H == 0 or new_W == 0: + raise ValueError(f"Extreme aspect ratio of the input image: [{W} x {H}]") + W, H = new_W, new_H + w = (W + self.vae_scale_factor - 1) // self.vae_scale_factor + h = (H + self.vae_scale_factor - 1) // self.vae_scale_factor + shape_expected = (num_images * ensemble_size, self.vae.config.latent_channels, h, w) + + if latents.shape != shape_expected: + raise ValueError(f"`latents` has unexpected shape={latents.shape} expected={shape_expected}.") + + # generator checks + if generator is not None: + if isinstance(generator, list): + if len(generator) != num_images * ensemble_size: + raise ValueError( + "The number of generators must match the total number of ensemble members for all input images." + ) + if not all(g.device.type == generator[0].device.type for g in generator): + raise ValueError("`generator` device placement is not consistent in the list.") + elif not isinstance(generator, torch.Generator): + raise ValueError(f"Unsupported generator type: {type(generator)}.") + + return num_images + + def progress_bar(self, iterable=None, total=None, desc=None, leave=True): + if not hasattr(self, "_progress_bar_config"): + self._progress_bar_config = {} + elif not isinstance(self._progress_bar_config, dict): + raise ValueError( + f"`self._progress_bar_config` should be of type `dict`, but is {type(self._progress_bar_config)}." + ) + + progress_bar_config = dict(**self._progress_bar_config) + progress_bar_config["desc"] = progress_bar_config.get("desc", desc) + progress_bar_config["leave"] = progress_bar_config.get("leave", leave) + if iterable is not None: + return tqdm(iterable, **progress_bar_config) + elif total is not None: + return tqdm(total=total, **progress_bar_config) + else: + raise ValueError("Either `total` or `iterable` has to be defined.") + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + image: PipelineImageInput, + num_inference_steps: Optional[int] = None, + ensemble_size: int = 1, + processing_resolution: Optional[int] = None, + match_input_resolution: bool = True, + resample_method_input: str = "bilinear", + resample_method_output: str = "bilinear", + batch_size: int = 1, + ensembling_kwargs: Optional[Dict[str, Any]] = None, + latents: Optional[Union[torch.Tensor, List[torch.Tensor]]] = None, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + output_type: str = "np", + output_uncertainty: bool = False, + output_latent: bool = False, + return_dict: bool = True, + ): + """ + Function invoked when calling the pipeline. + + Args: + image (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`), + `List[torch.Tensor]`: An input image or images used as an input for the normals estimation task. For + arrays and tensors, the expected value range is between `[0, 1]`. Passing a batch of images is possible + by providing a four-dimensional array or a tensor. Additionally, a list of images of two- or + three-dimensional arrays or tensors can be passed. In the latter case, all list elements must have the + same width and height. + num_inference_steps (`int`, *optional*, defaults to `None`): + Number of denoising diffusion steps during inference. The default value `None` results in automatic + selection. The number of steps should be at least 10 with the full Marigold models, and between 1 and 4 + for Marigold-LCM models. + ensemble_size (`int`, defaults to `1`): + Number of ensemble predictions. Recommended values are 5 and higher for better precision, or 1 for + faster inference. + processing_resolution (`int`, *optional*, defaults to `None`): + Effective processing resolution. When set to `0`, matches the larger input image dimension. This + produces crisper predictions, but may also lead to the overall loss of global context. The default + value `None` resolves to the optimal value from the model config. + match_input_resolution (`bool`, *optional*, defaults to `True`): + When enabled, the output prediction is resized to match the input dimensions. When disabled, the longer + side of the output will equal to `processing_resolution`. + resample_method_input (`str`, *optional*, defaults to `"bilinear"`): + Resampling method used to resize input images to `processing_resolution`. The accepted values are: + `"nearest"`, `"nearest-exact"`, `"bilinear"`, `"bicubic"`, or `"area"`. + resample_method_output (`str`, *optional*, defaults to `"bilinear"`): + Resampling method used to resize output predictions to match the input resolution. The accepted values + are `"nearest"`, `"nearest-exact"`, `"bilinear"`, `"bicubic"`, or `"area"`. + batch_size (`int`, *optional*, defaults to `1`): + Batch size; only matters when setting `ensemble_size` or passing a tensor of images. + ensembling_kwargs (`dict`, *optional*, defaults to `None`) + Extra dictionary with arguments for precise ensembling control. The following options are available: + - reduction (`str`, *optional*, defaults to `"closest"`): Defines the ensembling function applied in + every pixel location, can be either `"closest"` or `"mean"`. + latents (`torch.Tensor`, *optional*, defaults to `None`): + Latent noise tensors to replace the random initialization. These can be taken from the previous + function call's output. + generator (`torch.Generator`, or `List[torch.Generator]`, *optional*, defaults to `None`): + Random number generator object to ensure reproducibility. + output_type (`str`, *optional*, defaults to `"np"`): + Preferred format of the output's `prediction` and the optional `uncertainty` fields. The accepted + values are: `"np"` (numpy array) or `"pt"` (torch tensor). + output_uncertainty (`bool`, *optional*, defaults to `False`): + When enabled, the output's `uncertainty` field contains the predictive uncertainty map, provided that + the `ensemble_size` argument is set to a value above 2. + output_latent (`bool`, *optional*, defaults to `False`): + When enabled, the output's `latent` field contains the latent codes corresponding to the predictions + within the ensemble. These codes can be saved, modified, and used for subsequent calls with the + `latents` argument. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.marigold.MarigoldDepthOutput`] instead of a plain tuple. + + Examples: + + Returns: + [`~pipelines.marigold.MarigoldNormalsOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.marigold.MarigoldNormalsOutput`] is returned, otherwise a + `tuple` is returned where the first element is the prediction, the second element is the uncertainty + (or `None`), and the third is the latent (or `None`). + """ + + # 0. Resolving variables. + device = self._execution_device + dtype = self.dtype + + # Model-specific optimal default values leading to fast and reasonable results. + if num_inference_steps is None: + num_inference_steps = self.default_denoising_steps + if processing_resolution is None: + processing_resolution = self.default_processing_resolution + + # 1. Check inputs. + num_images = self.check_inputs( + image, + num_inference_steps, + ensemble_size, + processing_resolution, + resample_method_input, + resample_method_output, + batch_size, + ensembling_kwargs, + latents, + generator, + output_type, + output_uncertainty, + ) + + # 2. Prepare empty text conditioning. + # Model invocation: self.tokenizer, self.text_encoder. + if self.empty_text_embedding is None: + prompt = "" + text_inputs = self.tokenizer( + prompt, + padding="do_not_pad", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids.to(device) + self.empty_text_embedding = self.text_encoder(text_input_ids)[0] # [1,2,1024] + + # 3. Preprocess input images. This function loads input image or images of compatible dimensions `(H, W)`, + # optionally downsamples them to the `processing_resolution` `(PH, PW)`, where + # `max(PH, PW) == processing_resolution`, and pads the dimensions to `(PPH, PPW)` such that these values are + # divisible by the latent space downscaling factor (typically 8 in Stable Diffusion). The default value `None` + # of `processing_resolution` resolves to the optimal value from the model config. It is a recommended mode of + # operation and leads to the most reasonable results. Using the native image resolution or any other processing + # resolution can lead to loss of either fine details or global context in the output predictions. + image, padding, original_resolution = self.image_processor.preprocess( + image, processing_resolution, resample_method_input, device, dtype + ) # [N,3,PPH,PPW] + + # 4. Encode input image into latent space. At this step, each of the `N` input images is represented with `E` + # ensemble members. Each ensemble member is an independent diffused prediction, just initialized independently. + # Latents of each such predictions across all input images and all ensemble members are represented in the + # `pred_latent` variable. The variable `image_latent` is of the same shape: it contains each input image encoded + # into latent space and replicated `E` times. The latents can be either generated (see `generator` to ensure + # reproducibility), or passed explicitly via the `latents` argument. The latter can be set outside the pipeline + # code. For example, in the Marigold-LCM video processing demo, the latents initialization of a frame is taken + # as a convex combination of the latents output of the pipeline for the previous frame and a newly-sampled + # noise. This behavior can be achieved by setting the `output_latent` argument to `True`. The latent space + # dimensions are `(h, w)`. Encoding into latent space happens in batches of size `batch_size`. + # Model invocation: self.vae.encoder. + image_latent, pred_latent = self.prepare_latents( + image, latents, generator, ensemble_size, batch_size + ) # [N*E,4,h,w], [N*E,4,h,w] + + del image + + batch_empty_text_embedding = self.empty_text_embedding.to(device=device, dtype=dtype).repeat( + batch_size, 1, 1 + ) # [B,1024,2] + + # 5. Process the denoising loop. All `N * E` latents are processed sequentially in batches of size `batch_size`. + # The unet model takes concatenated latent spaces of the input image and the predicted modality as an input, and + # outputs noise for the predicted modality's latent space. The number of denoising diffusion steps is defined by + # `num_inference_steps`. It is either set directly, or resolves to the optimal value specific to the loaded + # model. + # Model invocation: self.unet. + pred_latents = [] + + for i in self.progress_bar( + range(0, num_images * ensemble_size, batch_size), leave=True, desc="Marigold predictions..." + ): + batch_image_latent = image_latent[i : i + batch_size] # [B,4,h,w] + batch_pred_latent = pred_latent[i : i + batch_size] # [B,4,h,w] + effective_batch_size = batch_image_latent.shape[0] + text = batch_empty_text_embedding[:effective_batch_size] # [B,2,1024] + + self.scheduler.set_timesteps(num_inference_steps, device=device) + for t in self.progress_bar(self.scheduler.timesteps, leave=False, desc="Diffusion steps..."): + batch_latent = torch.cat([batch_image_latent, batch_pred_latent], dim=1) # [B,8,h,w] + noise = self.unet(batch_latent, t, encoder_hidden_states=text, return_dict=False)[0] # [B,4,h,w] + batch_pred_latent = self.scheduler.step( + noise, t, batch_pred_latent, generator=generator + ).prev_sample # [B,4,h,w] + + pred_latents.append(batch_pred_latent) + + pred_latent = torch.cat(pred_latents, dim=0) # [N*E,4,h,w] + + del ( + pred_latents, + image_latent, + batch_empty_text_embedding, + batch_image_latent, + batch_pred_latent, + text, + batch_latent, + noise, + ) + + # 6. Decode predictions from latent into pixel space. The resulting `N * E` predictions have shape `(PPH, PPW)`, + # which requires slight postprocessing. Decoding into pixel space happens in batches of size `batch_size`. + # Model invocation: self.vae.decoder. + prediction = torch.cat( + [ + self.decode_prediction(pred_latent[i : i + batch_size]) + for i in range(0, pred_latent.shape[0], batch_size) + ], + dim=0, + ) # [N*E,3,PPH,PPW] + + if not output_latent: + pred_latent = None + + # 7. Remove padding. The output shape is (PH, PW). + prediction = self.image_processor.unpad_image(prediction, padding) # [N*E,3,PH,PW] + + # 8. Ensemble and compute uncertainty (when `output_uncertainty` is set). This code treats each of the `N` + # groups of `E` ensemble predictions independently. For each group it computes an ensembled prediction of shape + # `(PH, PW)` and an optional uncertainty map of the same dimensions. After computing this pair of outputs for + # each group independently, it stacks them respectively into batches of `N` almost final predictions and + # uncertainty maps. + uncertainty = None + if ensemble_size > 1: + prediction = prediction.reshape(num_images, ensemble_size, *prediction.shape[1:]) # [N,E,3,PH,PW] + prediction = [ + self.ensemble_normals(prediction[i], output_uncertainty, **(ensembling_kwargs or {})) + for i in range(num_images) + ] # [ [[1,3,PH,PW], [1,1,PH,PW]], ... ] + prediction, uncertainty = zip(*prediction) # [[1,3,PH,PW], ... ], [[1,1,PH,PW], ... ] + prediction = torch.cat(prediction, dim=0) # [N,3,PH,PW] + if output_uncertainty: + uncertainty = torch.cat(uncertainty, dim=0) # [N,1,PH,PW] + else: + uncertainty = None + + # 9. If `match_input_resolution` is set, the output prediction and the uncertainty are upsampled to match the + # input resolution `(H, W)`. This step may introduce upsampling artifacts, and therefore can be disabled. + # After upsampling, the native resolution normal maps are renormalized to unit length to reduce the artifacts. + # Depending on the downstream use-case, upsampling can be also chosen based on the tolerated artifacts by + # setting the `resample_method_output` parameter (e.g., to `"nearest"`). + if match_input_resolution: + prediction = self.image_processor.resize_antialias( + prediction, original_resolution, resample_method_output, is_aa=False + ) # [N,3,H,W] + prediction = self.normalize_normals(prediction) # [N,3,H,W] + if uncertainty is not None and output_uncertainty: + uncertainty = self.image_processor.resize_antialias( + uncertainty, original_resolution, resample_method_output, is_aa=False + ) # [N,1,H,W] + + # 10. Prepare the final outputs. + if output_type == "np": + prediction = self.image_processor.pt_to_numpy(prediction) # [N,H,W,3] + if uncertainty is not None and output_uncertainty: + uncertainty = self.image_processor.pt_to_numpy(uncertainty) # [N,H,W,1] + + # 11. Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (prediction, uncertainty, pred_latent) + + return MarigoldNormalsOutput( + prediction=prediction, + uncertainty=uncertainty, + latent=pred_latent, + ) + + # Copied from diffusers.pipelines.marigold.pipeline_marigold_depth.MarigoldDepthPipeline.prepare_latents + def prepare_latents( + self, + image: torch.Tensor, + latents: Optional[torch.Tensor], + generator: Optional[torch.Generator], + ensemble_size: int, + batch_size: int, + ) -> Tuple[torch.Tensor, torch.Tensor]: + def retrieve_latents(encoder_output): + if hasattr(encoder_output, "latent_dist"): + return encoder_output.latent_dist.mode() + elif hasattr(encoder_output, "latents"): + return encoder_output.latents + else: + raise AttributeError("Could not access latents of provided encoder_output") + + image_latent = torch.cat( + [ + retrieve_latents(self.vae.encode(image[i : i + batch_size])) + for i in range(0, image.shape[0], batch_size) + ], + dim=0, + ) # [N,4,h,w] + image_latent = image_latent * self.vae.config.scaling_factor + image_latent = image_latent.repeat_interleave(ensemble_size, dim=0) # [N*E,4,h,w] + + pred_latent = latents + if pred_latent is None: + pred_latent = randn_tensor( + image_latent.shape, + generator=generator, + device=image_latent.device, + dtype=image_latent.dtype, + ) # [N*E,4,h,w] + + return image_latent, pred_latent + + def decode_prediction(self, pred_latent: torch.Tensor) -> torch.Tensor: + if pred_latent.dim() != 4 or pred_latent.shape[1] != self.vae.config.latent_channels: + raise ValueError( + f"Expecting 4D tensor of shape [B,{self.vae.config.latent_channels},H,W]; got {pred_latent.shape}." + ) + + prediction = self.vae.decode(pred_latent / self.vae.config.scaling_factor, return_dict=False)[0] # [B,3,H,W] + + prediction = torch.clip(prediction, -1.0, 1.0) + + if not self.use_full_z_range: + prediction[:, 2, :, :] *= 0.5 + prediction[:, 2, :, :] += 0.5 + + prediction = self.normalize_normals(prediction) # [B,3,H,W] + + return prediction # [B,3,H,W] + + @staticmethod + def normalize_normals(normals: torch.Tensor, eps: float = 1e-6) -> torch.Tensor: + if normals.dim() != 4 or normals.shape[1] != 3: + raise ValueError(f"Expecting 4D tensor of shape [B,3,H,W]; got {normals.shape}.") + + norm = torch.norm(normals, dim=1, keepdim=True) + normals /= norm.clamp(min=eps) + + return normals + + @staticmethod + def ensemble_normals( + normals: torch.Tensor, output_uncertainty: bool, reduction: str = "closest" + ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: + """ + Ensembles the normals maps represented by the `normals` tensor with expected shape `(B, 3, H, W)`, where B is + the number of ensemble members for a given prediction of size `(H x W)`. + + Args: + normals (`torch.Tensor`): + Input ensemble normals maps. + output_uncertainty (`bool`, *optional*, defaults to `False`): + Whether to output uncertainty map. + reduction (`str`, *optional*, defaults to `"closest"`): + Reduction method used to ensemble aligned predictions. The accepted values are: `"closest"` and + `"mean"`. + + Returns: + A tensor of aligned and ensembled normals maps with shape `(1, 3, H, W)` and optionally a tensor of + uncertainties of shape `(1, 1, H, W)`. + """ + if normals.dim() != 4 or normals.shape[1] != 3: + raise ValueError(f"Expecting 4D tensor of shape [B,3,H,W]; got {normals.shape}.") + if reduction not in ("closest", "mean"): + raise ValueError(f"Unrecognized reduction method: {reduction}.") + + mean_normals = normals.mean(dim=0, keepdim=True) # [1,3,H,W] + mean_normals = MarigoldNormalsPipeline.normalize_normals(mean_normals) # [1,3,H,W] + + sim_cos = (mean_normals * normals).sum(dim=1, keepdim=True) # [E,1,H,W] + sim_cos = sim_cos.clamp(-1, 1) # required to avoid NaN in uncertainty with fp16 + + uncertainty = None + if output_uncertainty: + uncertainty = sim_cos.arccos() # [E,1,H,W] + uncertainty = uncertainty.mean(dim=0, keepdim=True) / np.pi # [1,1,H,W] + + if reduction == "mean": + return mean_normals, uncertainty # [1,3,H,W], [1,1,H,W] + + closest_indices = sim_cos.argmax(dim=0, keepdim=True) # [1,1,H,W] + closest_indices = closest_indices.repeat(1, 3, 1, 1) # [1,3,H,W] + closest_normals = torch.gather(normals, 0, closest_indices) # [1,3,H,W] + + return closest_normals, uncertainty # [1,3,H,W], [1,1,H,W] diff --git a/diffusers3/pipelines/minus.py b/diffusers3/pipelines/minus.py new file mode 100644 index 0000000000000000000000000000000000000000..3cf57cc6ce88817569605e3df34eb90920a528ae --- /dev/null +++ b/diffusers3/pipelines/minus.py @@ -0,0 +1,3 @@ +class minus_test(): + def minus(a,b): + return a - b \ No newline at end of file diff --git a/diffusers3/pipelines/musicldm/__init__.py b/diffusers3/pipelines/musicldm/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ed71eeb1d99b28f20f7cd94776c0303208620653 --- /dev/null +++ b/diffusers3/pipelines/musicldm/__init__.py @@ -0,0 +1,49 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_torch_available, + is_transformers_available, + is_transformers_version, +) + + +_dummy_objects = {} +_import_structure = {} + +try: + if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.27.0")): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["pipeline_musicldm"] = ["MusicLDMPipeline"] + + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.27.0")): + raise OptionalDependencyNotAvailable() + + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * + else: + from .pipeline_musicldm import MusicLDMPipeline + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/diffusers3/pipelines/musicldm/pipeline_musicldm.py b/diffusers3/pipelines/musicldm/pipeline_musicldm.py new file mode 100644 index 0000000000000000000000000000000000000000..728635da6d4dc66ae2f8e465b20309ae654c0558 --- /dev/null +++ b/diffusers3/pipelines/musicldm/pipeline_musicldm.py @@ -0,0 +1,635 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Any, Callable, Dict, List, Optional, Union + +import numpy as np +import torch +from transformers import ( + ClapFeatureExtractor, + ClapModel, + ClapTextModelWithProjection, + RobertaTokenizer, + RobertaTokenizerFast, + SpeechT5HifiGan, +) + +from ...models import AutoencoderKL, UNet2DConditionModel +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import ( + is_accelerate_available, + is_accelerate_version, + is_librosa_available, + logging, + replace_example_docstring, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline, StableDiffusionMixin + + +if is_librosa_available(): + import librosa + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> from diffusers import MusicLDMPipeline + >>> import torch + >>> import scipy + + >>> repo_id = "ucsd-reach/musicldm" + >>> pipe = MusicLDMPipeline.from_pretrained(repo_id, torch_dtype=torch.float16) + >>> pipe = pipe.to("cuda") + + >>> prompt = "Techno music with a strong, upbeat tempo and high melodic riffs" + >>> audio = pipe(prompt, num_inference_steps=10, audio_length_in_s=5.0).audios[0] + + >>> # save the audio sample as a .wav file + >>> scipy.io.wavfile.write("techno.wav", rate=16000, data=audio) + ``` +""" + + +class MusicLDMPipeline(DiffusionPipeline, StableDiffusionMixin): + r""" + Pipeline for text-to-audio generation using MusicLDM. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.ClapModel`]): + Frozen text-audio embedding model (`ClapTextModel`), specifically the + [laion/clap-htsat-unfused](https://huggingface.co/laion/clap-htsat-unfused) variant. + tokenizer ([`PreTrainedTokenizer`]): + A [`~transformers.RobertaTokenizer`] to tokenize text. + feature_extractor ([`~transformers.ClapFeatureExtractor`]): + Feature extractor to compute mel-spectrograms from audio waveforms. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded audio latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded audio latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + vocoder ([`~transformers.SpeechT5HifiGan`]): + Vocoder of class `SpeechT5HifiGan`. + """ + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: Union[ClapTextModelWithProjection, ClapModel], + tokenizer: Union[RobertaTokenizer, RobertaTokenizerFast], + feature_extractor: Optional[ClapFeatureExtractor], + unet: UNet2DConditionModel, + scheduler: KarrasDiffusionSchedulers, + vocoder: SpeechT5HifiGan, + ): + super().__init__() + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + feature_extractor=feature_extractor, + unet=unet, + scheduler=scheduler, + vocoder=vocoder, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + + def _encode_prompt( + self, + prompt, + device, + num_waveforms_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device (`torch.device`): + torch device + num_waveforms_per_prompt (`int`): + number of waveforms that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the audio generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + """ + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + attention_mask = text_inputs.attention_mask + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLAP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + prompt_embeds = self.text_encoder.get_text_features( + text_input_ids.to(device), + attention_mask=attention_mask.to(device), + ) + + prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.text_model.dtype, device=device) + + ( + bs_embed, + seq_len, + ) = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_waveforms_per_prompt) + prompt_embeds = prompt_embeds.view(bs_embed * num_waveforms_per_prompt, seq_len) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + uncond_input_ids = uncond_input.input_ids.to(device) + attention_mask = uncond_input.attention_mask.to(device) + + negative_prompt_embeds = self.text_encoder.get_text_features( + uncond_input_ids, + attention_mask=attention_mask, + ) + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.text_model.dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_waveforms_per_prompt) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_waveforms_per_prompt, seq_len) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + return prompt_embeds + + # Copied from diffusers.pipelines.audioldm.pipeline_audioldm.AudioLDMPipeline.mel_spectrogram_to_waveform + def mel_spectrogram_to_waveform(self, mel_spectrogram): + if mel_spectrogram.dim() == 4: + mel_spectrogram = mel_spectrogram.squeeze(1) + + waveform = self.vocoder(mel_spectrogram) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + waveform = waveform.cpu().float() + return waveform + + # Copied from diffusers.pipelines.audioldm2.pipeline_audioldm2.AudioLDM2Pipeline.score_waveforms + def score_waveforms(self, text, audio, num_waveforms_per_prompt, device, dtype): + if not is_librosa_available(): + logger.info( + "Automatic scoring of the generated audio waveforms against the input prompt text requires the " + "`librosa` package to resample the generated waveforms. Returning the audios in the order they were " + "generated. To enable automatic scoring, install `librosa` with: `pip install librosa`." + ) + return audio + inputs = self.tokenizer(text, return_tensors="pt", padding=True) + resampled_audio = librosa.resample( + audio.numpy(), orig_sr=self.vocoder.config.sampling_rate, target_sr=self.feature_extractor.sampling_rate + ) + inputs["input_features"] = self.feature_extractor( + list(resampled_audio), return_tensors="pt", sampling_rate=self.feature_extractor.sampling_rate + ).input_features.type(dtype) + inputs = inputs.to(device) + + # compute the audio-text similarity score using the CLAP model + logits_per_text = self.text_encoder(**inputs).logits_per_text + # sort by the highest matching generations per prompt + indices = torch.argsort(logits_per_text, dim=1, descending=True)[:, :num_waveforms_per_prompt] + audio = torch.index_select(audio, 0, indices.reshape(-1).cpu()) + return audio + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + # Copied from diffusers.pipelines.audioldm.pipeline_audioldm.AudioLDMPipeline.check_inputs + def check_inputs( + self, + prompt, + audio_length_in_s, + vocoder_upsample_factor, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + ): + min_audio_length_in_s = vocoder_upsample_factor * self.vae_scale_factor + if audio_length_in_s < min_audio_length_in_s: + raise ValueError( + f"`audio_length_in_s` has to be a positive value greater than or equal to {min_audio_length_in_s}, but " + f"is {audio_length_in_s}." + ) + + if self.vocoder.config.model_in_dim % self.vae_scale_factor != 0: + raise ValueError( + f"The number of frequency bins in the vocoder's log-mel spectrogram has to be divisible by the " + f"VAE scale factor, but got {self.vocoder.config.model_in_dim} bins and a scale factor of " + f"{self.vae_scale_factor}." + ) + + if (callback_steps is None) or ( + callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + # Copied from diffusers.pipelines.audioldm.pipeline_audioldm.AudioLDMPipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels_latents, height, dtype, device, generator, latents=None): + shape = ( + batch_size, + num_channels_latents, + int(height) // self.vae_scale_factor, + int(self.vocoder.config.model_in_dim) // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + def enable_model_cpu_offload(self, gpu_id=0): + r""" + Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared + to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward` + method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with + `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`. + """ + if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"): + from accelerate import cpu_offload_with_hook + else: + raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.") + + device = torch.device(f"cuda:{gpu_id}") + + if self.device.type != "cpu": + self.to("cpu", silence_dtype_warnings=True) + torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) + + model_sequence = [ + self.text_encoder.text_model, + self.text_encoder.text_projection, + self.unet, + self.vae, + self.vocoder, + self.text_encoder, + ] + + hook = None + for cpu_offloaded_model in model_sequence: + _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook) + + # We'll offload the last model manually. + self.final_offload_hook = hook + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + audio_length_in_s: Optional[float] = None, + num_inference_steps: int = 200, + guidance_scale: float = 2.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_waveforms_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, + callback_steps: Optional[int] = 1, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + output_type: Optional[str] = "np", + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide audio generation. If not defined, you need to pass `prompt_embeds`. + audio_length_in_s (`int`, *optional*, defaults to 10.24): + The length of the generated audio sample in seconds. + num_inference_steps (`int`, *optional*, defaults to 200): + The number of denoising steps. More denoising steps usually lead to a higher quality audio at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 2.0): + A higher guidance scale value encourages the model to generate audio that is closely linked to the text + `prompt` at the expense of lower sound quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in audio generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_waveforms_per_prompt (`int`, *optional*, defaults to 1): + The number of waveforms to generate per prompt. If `num_waveforms_per_prompt > 1`, the text encoding + model is a joint text-audio model ([`~transformers.ClapModel`]), and the tokenizer is a + `[~transformers.ClapProcessor]`, then automatic scoring will be performed between the generated outputs + and the input text. This scoring ranks the generated waveforms based on their cosine similarity to text + input in the joint text-audio embedding space. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.AudioPipelineOutput`] instead of a plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + output_type (`str`, *optional*, defaults to `"np"`): + The output format of the generated audio. Choose between `"np"` to return a NumPy `np.ndarray` or + `"pt"` to return a PyTorch `torch.Tensor` object. Set to `"latent"` to return the latent diffusion + model (LDM) output. + + Examples: + + Returns: + [`~pipelines.AudioPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.AudioPipelineOutput`] is returned, otherwise a `tuple` is + returned where the first element is a list with the generated audio. + """ + # 0. Convert audio input length from seconds to spectrogram height + vocoder_upsample_factor = np.prod(self.vocoder.config.upsample_rates) / self.vocoder.config.sampling_rate + + if audio_length_in_s is None: + audio_length_in_s = self.unet.config.sample_size * self.vae_scale_factor * vocoder_upsample_factor + + height = int(audio_length_in_s / vocoder_upsample_factor) + + original_waveform_length = int(audio_length_in_s * self.vocoder.config.sampling_rate) + if height % self.vae_scale_factor != 0: + height = int(np.ceil(height / self.vae_scale_factor)) * self.vae_scale_factor + logger.info( + f"Audio length in seconds {audio_length_in_s} is increased to {height * vocoder_upsample_factor} " + f"so that it can be handled by the model. It will be cut to {audio_length_in_s} after the " + f"denoising process." + ) + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + audio_length_in_s, + vocoder_upsample_factor, + callback_steps, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + ) + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Encode input prompt + prompt_embeds = self._encode_prompt( + prompt, + device, + num_waveforms_per_prompt, + do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + ) + + # 4. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # 5. Prepare latent variables + num_channels_latents = self.unet.config.in_channels + latents = self.prepare_latents( + batch_size * num_waveforms_per_prompt, + num_channels_latents, + height, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 6. Prepare extra step kwargs + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=None, + class_labels=prompt_embeds, + cross_attention_kwargs=cross_attention_kwargs, + return_dict=False, + )[0] + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + self.maybe_free_model_hooks() + + # 8. Post-processing + if not output_type == "latent": + latents = 1 / self.vae.config.scaling_factor * latents + mel_spectrogram = self.vae.decode(latents).sample + else: + return AudioPipelineOutput(audios=latents) + + audio = self.mel_spectrogram_to_waveform(mel_spectrogram) + + audio = audio[:, :original_waveform_length] + + # 9. Automatic scoring + if num_waveforms_per_prompt > 1 and prompt is not None: + audio = self.score_waveforms( + text=prompt, + audio=audio, + num_waveforms_per_prompt=num_waveforms_per_prompt, + device=device, + dtype=prompt_embeds.dtype, + ) + + if output_type == "np": + audio = audio.numpy() + + if not return_dict: + return (audio,) + + return AudioPipelineOutput(audios=audio) diff --git a/diffusers3/pipelines/onnx_utils.py b/diffusers3/pipelines/onnx_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..11f2241c64c837bc3d0160370214f59859b331df --- /dev/null +++ b/diffusers3/pipelines/onnx_utils.py @@ -0,0 +1,215 @@ +# coding=utf-8 +# Copyright 2024 The HuggingFace Inc. team. +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import os +import shutil +from pathlib import Path +from typing import Optional, Union + +import numpy as np +from huggingface_hub import hf_hub_download +from huggingface_hub.utils import validate_hf_hub_args + +from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging + + +if is_onnx_available(): + import onnxruntime as ort + + +logger = logging.get_logger(__name__) + +ORT_TO_NP_TYPE = { + "tensor(bool)": np.bool_, + "tensor(int8)": np.int8, + "tensor(uint8)": np.uint8, + "tensor(int16)": np.int16, + "tensor(uint16)": np.uint16, + "tensor(int32)": np.int32, + "tensor(uint32)": np.uint32, + "tensor(int64)": np.int64, + "tensor(uint64)": np.uint64, + "tensor(float16)": np.float16, + "tensor(float)": np.float32, + "tensor(double)": np.float64, +} + + +class OnnxRuntimeModel: + def __init__(self, model=None, **kwargs): + logger.info("`diffusers.OnnxRuntimeModel` is experimental and might change in the future.") + self.model = model + self.model_save_dir = kwargs.get("model_save_dir", None) + self.latest_model_name = kwargs.get("latest_model_name", ONNX_WEIGHTS_NAME) + + def __call__(self, **kwargs): + inputs = {k: np.array(v) for k, v in kwargs.items()} + return self.model.run(None, inputs) + + @staticmethod + def load_model(path: Union[str, Path], provider=None, sess_options=None): + """ + Loads an ONNX Inference session with an ExecutionProvider. Default provider is `CPUExecutionProvider` + + Arguments: + path (`str` or `Path`): + Directory from which to load + provider(`str`, *optional*): + Onnxruntime execution provider to use for loading the model, defaults to `CPUExecutionProvider` + """ + if provider is None: + logger.info("No onnxruntime provider specified, using CPUExecutionProvider") + provider = "CPUExecutionProvider" + + return ort.InferenceSession(path, providers=[provider], sess_options=sess_options) + + def _save_pretrained(self, save_directory: Union[str, Path], file_name: Optional[str] = None, **kwargs): + """ + Save a model and its configuration file to a directory, so that it can be re-loaded using the + [`~optimum.onnxruntime.modeling_ort.ORTModel.from_pretrained`] class method. It will always save the + latest_model_name. + + Arguments: + save_directory (`str` or `Path`): + Directory where to save the model file. + file_name(`str`, *optional*): + Overwrites the default model file name from `"model.onnx"` to `file_name`. This allows you to save the + model with a different name. + """ + model_file_name = file_name if file_name is not None else ONNX_WEIGHTS_NAME + + src_path = self.model_save_dir.joinpath(self.latest_model_name) + dst_path = Path(save_directory).joinpath(model_file_name) + try: + shutil.copyfile(src_path, dst_path) + except shutil.SameFileError: + pass + + # copy external weights (for models >2GB) + src_path = self.model_save_dir.joinpath(ONNX_EXTERNAL_WEIGHTS_NAME) + if src_path.exists(): + dst_path = Path(save_directory).joinpath(ONNX_EXTERNAL_WEIGHTS_NAME) + try: + shutil.copyfile(src_path, dst_path) + except shutil.SameFileError: + pass + + def save_pretrained( + self, + save_directory: Union[str, os.PathLike], + **kwargs, + ): + """ + Save a model to a directory, so that it can be re-loaded using the [`~OnnxModel.from_pretrained`] class + method.: + + Arguments: + save_directory (`str` or `os.PathLike`): + Directory to which to save. Will be created if it doesn't exist. + """ + if os.path.isfile(save_directory): + logger.error(f"Provided path ({save_directory}) should be a directory, not a file") + return + + os.makedirs(save_directory, exist_ok=True) + + # saving model weights/files + self._save_pretrained(save_directory, **kwargs) + + @classmethod + @validate_hf_hub_args + def _from_pretrained( + cls, + model_id: Union[str, Path], + token: Optional[Union[bool, str, None]] = None, + revision: Optional[Union[str, None]] = None, + force_download: bool = False, + cache_dir: Optional[str] = None, + file_name: Optional[str] = None, + provider: Optional[str] = None, + sess_options: Optional["ort.SessionOptions"] = None, + **kwargs, + ): + """ + Load a model from a directory or the HF Hub. + + Arguments: + model_id (`str` or `Path`): + Directory from which to load + token (`str` or `bool`): + Is needed to load models from a private or gated repository + revision (`str`): + Revision is the specific model version to use. It can be a branch name, a tag name, or a commit id + cache_dir (`Union[str, Path]`, *optional*): + Path to a directory in which a downloaded pretrained model configuration should be cached if the + standard cache should not be used. + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force the (re-)download of the model weights and configuration files, overriding the + cached versions if they exist. + file_name(`str`): + Overwrites the default model file name from `"model.onnx"` to `file_name`. This allows you to load + different model files from the same repository or directory. + provider(`str`): + The ONNX runtime provider, e.g. `CPUExecutionProvider` or `CUDAExecutionProvider`. + kwargs (`Dict`, *optional*): + kwargs will be passed to the model during initialization + """ + model_file_name = file_name if file_name is not None else ONNX_WEIGHTS_NAME + # load model from local directory + if os.path.isdir(model_id): + model = OnnxRuntimeModel.load_model( + Path(model_id, model_file_name).as_posix(), provider=provider, sess_options=sess_options + ) + kwargs["model_save_dir"] = Path(model_id) + # load model from hub + else: + # download model + model_cache_path = hf_hub_download( + repo_id=model_id, + filename=model_file_name, + token=token, + revision=revision, + cache_dir=cache_dir, + force_download=force_download, + ) + kwargs["model_save_dir"] = Path(model_cache_path).parent + kwargs["latest_model_name"] = Path(model_cache_path).name + model = OnnxRuntimeModel.load_model(model_cache_path, provider=provider, sess_options=sess_options) + return cls(model=model, **kwargs) + + @classmethod + @validate_hf_hub_args + def from_pretrained( + cls, + model_id: Union[str, Path], + force_download: bool = True, + token: Optional[str] = None, + cache_dir: Optional[str] = None, + **model_kwargs, + ): + revision = None + if len(str(model_id).split("@")) == 2: + model_id, revision = model_id.split("@") + + return cls._from_pretrained( + model_id=model_id, + revision=revision, + cache_dir=cache_dir, + force_download=force_download, + token=token, + **model_kwargs, + ) diff --git a/diffusers3/pipelines/pag/__init__.py b/diffusers3/pipelines/pag/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d8842ce911752537d69835eade836f0584476161 --- /dev/null +++ b/diffusers3/pipelines/pag/__init__.py @@ -0,0 +1,69 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_flax_available, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["pipeline_pag_controlnet_sd"] = ["StableDiffusionControlNetPAGPipeline"] + _import_structure["pipeline_pag_controlnet_sd_xl"] = ["StableDiffusionXLControlNetPAGPipeline"] + _import_structure["pipeline_pag_controlnet_sd_xl_img2img"] = ["StableDiffusionXLControlNetPAGImg2ImgPipeline"] + _import_structure["pipeline_pag_hunyuandit"] = ["HunyuanDiTPAGPipeline"] + _import_structure["pipeline_pag_kolors"] = ["KolorsPAGPipeline"] + _import_structure["pipeline_pag_pixart_sigma"] = ["PixArtSigmaPAGPipeline"] + _import_structure["pipeline_pag_sd"] = ["StableDiffusionPAGPipeline"] + _import_structure["pipeline_pag_sd_3"] = ["StableDiffusion3PAGPipeline"] + _import_structure["pipeline_pag_sd_animatediff"] = ["AnimateDiffPAGPipeline"] + _import_structure["pipeline_pag_sd_xl"] = ["StableDiffusionXLPAGPipeline"] + _import_structure["pipeline_pag_sd_xl_img2img"] = ["StableDiffusionXLPAGImg2ImgPipeline"] + _import_structure["pipeline_pag_sd_xl_inpaint"] = ["StableDiffusionXLPAGInpaintPipeline"] + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * + else: + from .pipeline_pag_controlnet_sd import StableDiffusionControlNetPAGPipeline + from .pipeline_pag_controlnet_sd_xl import StableDiffusionXLControlNetPAGPipeline + from .pipeline_pag_controlnet_sd_xl_img2img import StableDiffusionXLControlNetPAGImg2ImgPipeline + from .pipeline_pag_hunyuandit import HunyuanDiTPAGPipeline + from .pipeline_pag_kolors import KolorsPAGPipeline + from .pipeline_pag_pixart_sigma import PixArtSigmaPAGPipeline + from .pipeline_pag_sd import StableDiffusionPAGPipeline + from .pipeline_pag_sd_3 import StableDiffusion3PAGPipeline + from .pipeline_pag_sd_animatediff import AnimateDiffPAGPipeline + from .pipeline_pag_sd_xl import StableDiffusionXLPAGPipeline + from .pipeline_pag_sd_xl_img2img import StableDiffusionXLPAGImg2ImgPipeline + from .pipeline_pag_sd_xl_inpaint import StableDiffusionXLPAGInpaintPipeline + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/diffusers3/pipelines/pag/pag_utils.py b/diffusers3/pipelines/pag/pag_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..728f730c9904523a48c77643dac4f3ed6a114d32 --- /dev/null +++ b/diffusers3/pipelines/pag/pag_utils.py @@ -0,0 +1,237 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import re +from typing import Dict, List, Tuple, Union + +import torch +import torch.nn as nn + +from ...models.attention_processor import ( + Attention, + AttentionProcessor, + PAGCFGIdentitySelfAttnProcessor2_0, + PAGIdentitySelfAttnProcessor2_0, +) +from ...utils import logging + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +class PAGMixin: + r"""Mixin class for [Pertubed Attention Guidance](https://arxiv.org/abs/2403.17377v1).""" + + def _set_pag_attn_processor(self, pag_applied_layers, do_classifier_free_guidance): + r""" + Set the attention processor for the PAG layers. + """ + pag_attn_processors = self._pag_attn_processors + if pag_attn_processors is None: + raise ValueError( + "No PAG attention processors have been set. Set the attention processors by calling `set_pag_applied_layers` and passing the relevant parameters." + ) + + pag_attn_proc = pag_attn_processors[0] if do_classifier_free_guidance else pag_attn_processors[1] + + if hasattr(self, "unet"): + model: nn.Module = self.unet + else: + model: nn.Module = self.transformer + + def is_self_attn(module: nn.Module) -> bool: + r""" + Check if the module is self-attention module based on its name. + """ + return isinstance(module, Attention) and not module.is_cross_attention + + def is_fake_integral_match(layer_id, name): + layer_id = layer_id.split(".")[-1] + name = name.split(".")[-1] + return layer_id.isnumeric() and name.isnumeric() and layer_id == name + + for layer_id in pag_applied_layers: + # for each PAG layer input, we find corresponding self-attention layers in the unet model + target_modules = [] + + for name, module in model.named_modules(): + # Identify the following simple cases: + # (1) Self Attention layer existing + # (2) Whether the module name matches pag layer id even partially + # (3) Make sure it's not a fake integral match if the layer_id ends with a number + # For example, blocks.1, blocks.10 should be differentiable if layer_id="blocks.1" + if ( + is_self_attn(module) + and re.search(layer_id, name) is not None + and not is_fake_integral_match(layer_id, name) + ): + logger.debug(f"Applying PAG to layer: {name}") + target_modules.append(module) + + if len(target_modules) == 0: + raise ValueError(f"Cannot find PAG layer to set attention processor for: {layer_id}") + + for module in target_modules: + module.processor = pag_attn_proc + + def _get_pag_scale(self, t): + r""" + Get the scale factor for the perturbed attention guidance at timestep `t`. + """ + + if self.do_pag_adaptive_scaling: + signal_scale = self.pag_scale - self.pag_adaptive_scale * (1000 - t) + if signal_scale < 0: + signal_scale = 0 + return signal_scale + else: + return self.pag_scale + + def _apply_perturbed_attention_guidance(self, noise_pred, do_classifier_free_guidance, guidance_scale, t): + r""" + Apply perturbed attention guidance to the noise prediction. + + Args: + noise_pred (torch.Tensor): The noise prediction tensor. + do_classifier_free_guidance (bool): Whether to apply classifier-free guidance. + guidance_scale (float): The scale factor for the guidance term. + t (int): The current time step. + + Returns: + torch.Tensor: The updated noise prediction tensor after applying perturbed attention guidance. + """ + pag_scale = self._get_pag_scale(t) + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text, noise_pred_perturb = noise_pred.chunk(3) + noise_pred = ( + noise_pred_uncond + + guidance_scale * (noise_pred_text - noise_pred_uncond) + + pag_scale * (noise_pred_text - noise_pred_perturb) + ) + else: + noise_pred_text, noise_pred_perturb = noise_pred.chunk(2) + noise_pred = noise_pred_text + pag_scale * (noise_pred_text - noise_pred_perturb) + return noise_pred + + def _prepare_perturbed_attention_guidance(self, cond, uncond, do_classifier_free_guidance): + """ + Prepares the perturbed attention guidance for the PAG model. + + Args: + cond (torch.Tensor): The conditional input tensor. + uncond (torch.Tensor): The unconditional input tensor. + do_classifier_free_guidance (bool): Flag indicating whether to perform classifier-free guidance. + + Returns: + torch.Tensor: The prepared perturbed attention guidance tensor. + """ + + cond = torch.cat([cond] * 2, dim=0) + + if do_classifier_free_guidance: + cond = torch.cat([uncond, cond], dim=0) + return cond + + def set_pag_applied_layers( + self, + pag_applied_layers: Union[str, List[str]], + pag_attn_processors: Tuple[AttentionProcessor, AttentionProcessor] = ( + PAGCFGIdentitySelfAttnProcessor2_0(), + PAGIdentitySelfAttnProcessor2_0(), + ), + ): + r""" + Set the the self-attention layers to apply PAG. Raise ValueError if the input is invalid. + + Args: + pag_applied_layers (`str` or `List[str]`): + One or more strings identifying the layer names, or a simple regex for matching multiple layers, where + PAG is to be applied. A few ways of expected usage are as follows: + - Single layers specified as - "blocks.{layer_index}" + - Multiple layers as a list - ["blocks.{layers_index_1}", "blocks.{layer_index_2}", ...] + - Multiple layers as a block name - "mid" + - Multiple layers as regex - "blocks.({layer_index_1}|{layer_index_2})" + pag_attn_processors: + (`Tuple[AttentionProcessor, AttentionProcessor]`, defaults to `(PAGCFGIdentitySelfAttnProcessor2_0(), + PAGIdentitySelfAttnProcessor2_0())`): A tuple of two attention processors. The first attention + processor is for PAG with Classifier-free guidance enabled (conditional and unconditional). The second + attention processor is for PAG with CFG disabled (unconditional only). + """ + + if not hasattr(self, "_pag_attn_processors"): + self._pag_attn_processors = None + + if not isinstance(pag_applied_layers, list): + pag_applied_layers = [pag_applied_layers] + if pag_attn_processors is not None: + if not isinstance(pag_attn_processors, tuple) or len(pag_attn_processors) != 2: + raise ValueError("Expected a tuple of two attention processors") + + for i in range(len(pag_applied_layers)): + if not isinstance(pag_applied_layers[i], str): + raise ValueError( + f"Expected either a string or a list of string but got type {type(pag_applied_layers[i])}" + ) + + self.pag_applied_layers = pag_applied_layers + self._pag_attn_processors = pag_attn_processors + + @property + def pag_scale(self) -> float: + r"""Get the scale factor for the perturbed attention guidance.""" + return self._pag_scale + + @property + def pag_adaptive_scale(self) -> float: + r"""Get the adaptive scale factor for the perturbed attention guidance.""" + return self._pag_adaptive_scale + + @property + def do_pag_adaptive_scaling(self) -> bool: + r"""Check if the adaptive scaling is enabled for the perturbed attention guidance.""" + return self._pag_adaptive_scale > 0 and self._pag_scale > 0 and len(self.pag_applied_layers) > 0 + + @property + def do_perturbed_attention_guidance(self) -> bool: + r"""Check if the perturbed attention guidance is enabled.""" + return self._pag_scale > 0 and len(self.pag_applied_layers) > 0 + + @property + def pag_attn_processors(self) -> Dict[str, AttentionProcessor]: + r""" + Returns: + `dict` of PAG attention processors: A dictionary contains all PAG attention processors used in the model + with the key as the name of the layer. + """ + + if self._pag_attn_processors is None: + return {} + + valid_attn_processors = {x.__class__ for x in self._pag_attn_processors} + + processors = {} + # We could have iterated through the self.components.items() and checked if a component is + # `ModelMixin` subclassed but that can include a VAE too. + if hasattr(self, "unet"): + denoiser_module = self.unet + elif hasattr(self, "transformer"): + denoiser_module = self.transformer + else: + raise ValueError("No denoiser module found.") + + for name, proc in denoiser_module.attn_processors.items(): + if proc.__class__ in valid_attn_processors: + processors[name] = proc + + return processors diff --git a/diffusers3/pipelines/pag/pipeline_pag_controlnet_sd.py b/diffusers3/pipelines/pag/pipeline_pag_controlnet_sd.py new file mode 100644 index 0000000000000000000000000000000000000000..9bac883b5c999a2d12a38621f8ca7eebdd8c067f --- /dev/null +++ b/diffusers3/pipelines/pag/pipeline_pag_controlnet_sd.py @@ -0,0 +1,1329 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import inspect +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import numpy as np +import PIL.Image +import torch +import torch.nn.functional as F +from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection + +from ...callbacks import MultiPipelineCallbacks, PipelineCallback +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import FromSingleFileMixin, IPAdapterMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin +from ...models import AutoencoderKL, ControlNetModel, ImageProjection, UNet2DConditionModel +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import ( + USE_PEFT_BACKEND, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import is_compiled_module, is_torch_version, randn_tensor +from ..controlnet.multicontrolnet import MultiControlNetModel +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from ..stable_diffusion.pipeline_output import StableDiffusionPipelineOutput +from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker +from .pag_utils import PAGMixin + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> # !pip install opencv-python transformers accelerate + >>> from diffusers import AutoPipelineForText2Image, ControlNetModel, UniPCMultistepScheduler + >>> from diffusers.utils import load_image + >>> import numpy as np + >>> import torch + + >>> import cv2 + >>> from PIL import Image + + >>> # download an image + >>> image = load_image( + ... "https://hf.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/hf-logo.png" + ... ) + >>> image = np.array(image) + + >>> # get canny image + >>> image = cv2.Canny(image, 100, 200) + >>> image = image[:, :, None] + >>> image = np.concatenate([image, image, image], axis=2) + >>> canny_image = Image.fromarray(image) + + >>> # load control net and stable diffusion v1-5 + >>> controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16) + >>> pipe = AutoPipelineForText2Image.from_pretrained( + ... "runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16, enable_pag=True + ... ) + + >>> # speed up diffusion process with faster scheduler and memory optimization + >>> # remove following line if xformers is not installed + >>> pipe.enable_xformers_memory_efficient_attention() + + >>> pipe.enable_model_cpu_offload() + + >>> # generate image + >>> generator = torch.manual_seed(0) + >>> image = pipe( + ... "aerial view, a futuristic research complex in a bright foggy jungle, hard lighting", + ... guidance_scale=7.5, + ... generator=generator, + ... image=canny_image, + ... pag_scale=10, + ... ).images[0] + ``` +""" + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + sigmas: Optional[List[float]] = None, + **kwargs, +): + """ + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles + custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` + must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, + `num_inference_steps` and `sigmas` must be `None`. + sigmas (`List[float]`, *optional*): + Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, + `num_inference_steps` and `timesteps` must be `None`. + + Returns: + `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the + second element is the number of inference steps. + """ + if timesteps is not None and sigmas is not None: + raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") + if timesteps is not None: + accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + elif sigmas is not None: + accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accept_sigmas: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" sigmas schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + + +class StableDiffusionControlNetPAGPipeline( + DiffusionPipeline, + StableDiffusionMixin, + TextualInversionLoaderMixin, + StableDiffusionLoraLoaderMixin, + IPAdapterMixin, + FromSingleFileMixin, + PAGMixin, +): + r""" + Pipeline for text-to-image generation using Stable Diffusion with ControlNet guidance. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for saving LoRA weights + - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files + - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + controlnet ([`ControlNetModel`] or `List[ControlNetModel]`): + Provides additional conditioning to the `unet` during the denoising process. If you set multiple + ControlNets as a list, the outputs from each ControlNet are added together to create one combined + additional conditioning. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details + about a model's potential harms. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. + """ + + model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae" + _optional_components = ["safety_checker", "feature_extractor", "image_encoder"] + _exclude_from_cpu_offload = ["safety_checker"] + _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: UNet2DConditionModel, + controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel], + scheduler: KarrasDiffusionSchedulers, + safety_checker: StableDiffusionSafetyChecker, + feature_extractor: CLIPImageProcessor, + image_encoder: CLIPVisionModelWithProjection = None, + requires_safety_checker: bool = True, + pag_applied_layers: Union[str, List[str]] = "mid", + ): + super().__init__() + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + if isinstance(controlnet, (list, tuple)): + controlnet = MultiControlNetModel(controlnet) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + controlnet=controlnet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + image_encoder=image_encoder, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True) + self.control_image_processor = VaeImageProcessor( + vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False + ) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + self.set_pag_applied_layers(pag_applied_layers) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if self.text_encoder is not None: + if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return prompt_embeds, negative_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image + def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + if output_hidden_states: + image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] + image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_enc_hidden_states = self.image_encoder( + torch.zeros_like(image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( + num_images_per_prompt, dim=0 + ) + return image_enc_hidden_states, uncond_image_enc_hidden_states + else: + image_embeds = self.image_encoder(image).image_embeds + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_embeds = torch.zeros_like(image_embeds) + + return image_embeds, uncond_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds + def prepare_ip_adapter_image_embeds( + self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance + ): + image_embeds = [] + if do_classifier_free_guidance: + negative_image_embeds = [] + if ip_adapter_image_embeds is None: + if not isinstance(ip_adapter_image, list): + ip_adapter_image = [ip_adapter_image] + + if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): + raise ValueError( + f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." + ) + + for single_ip_adapter_image, image_proj_layer in zip( + ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers + ): + output_hidden_state = not isinstance(image_proj_layer, ImageProjection) + single_image_embeds, single_negative_image_embeds = self.encode_image( + single_ip_adapter_image, device, 1, output_hidden_state + ) + + image_embeds.append(single_image_embeds[None, :]) + if do_classifier_free_guidance: + negative_image_embeds.append(single_negative_image_embeds[None, :]) + else: + for single_image_embeds in ip_adapter_image_embeds: + if do_classifier_free_guidance: + single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2) + negative_image_embeds.append(single_negative_image_embeds) + image_embeds.append(single_image_embeds) + + ip_adapter_image_embeds = [] + for i, single_image_embeds in enumerate(image_embeds): + single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) + if do_classifier_free_guidance: + single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) + + single_image_embeds = single_image_embeds.to(device=device) + ip_adapter_image_embeds.append(single_image_embeds) + + return ip_adapter_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is None: + has_nsfw_concept = None + else: + if torch.is_tensor(image): + feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") + else: + feature_extractor_input = self.image_processor.numpy_to_pil(image) + safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) + image, has_nsfw_concept = self.safety_checker( + images=image, clip_input=safety_checker_input.pixel_values.to(dtype) + ) + return image, has_nsfw_concept + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + image, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + ip_adapter_image=None, + ip_adapter_image_embeds=None, + controlnet_conditioning_scale=1.0, + control_guidance_start=0.0, + control_guidance_end=1.0, + callback_on_step_end_tensor_inputs=None, + ): + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + # Check `image` + is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance( + self.controlnet, torch._dynamo.eval_frame.OptimizedModule + ) + if ( + isinstance(self.controlnet, ControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, ControlNetModel) + ): + self.check_image(image, prompt, prompt_embeds) + elif ( + isinstance(self.controlnet, MultiControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, MultiControlNetModel) + ): + if not isinstance(image, list): + raise TypeError("For multiple controlnets: `image` must be type `list`") + + # When `image` is a nested list: + # (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]]) + elif any(isinstance(i, list) for i in image): + transposed_image = [list(t) for t in zip(*image)] + if len(transposed_image) != len(self.controlnet.nets): + raise ValueError( + f"For multiple controlnets: if you pass`image` as a list of list, each sublist must have the same length as the number of controlnets, but the sublists in `image` got {len(transposed_image)} images and {len(self.controlnet.nets)} ControlNets." + ) + for image_ in transposed_image: + self.check_image(image_, prompt, prompt_embeds) + elif len(image) != len(self.controlnet.nets): + raise ValueError( + f"For multiple controlnets: `image` must have the same length as the number of controlnets, but got {len(image)} images and {len(self.controlnet.nets)} ControlNets." + ) + else: + for image_ in image: + self.check_image(image_, prompt, prompt_embeds) + else: + assert False + + # Check `controlnet_conditioning_scale` + if ( + isinstance(self.controlnet, ControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, ControlNetModel) + ): + if not isinstance(controlnet_conditioning_scale, float): + raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.") + elif ( + isinstance(self.controlnet, MultiControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, MultiControlNetModel) + ): + if isinstance(controlnet_conditioning_scale, list): + if any(isinstance(i, list) for i in controlnet_conditioning_scale): + raise ValueError( + "A single batch of varying conditioning scale settings (e.g. [[1.0, 0.5], [0.2, 0.8]]) is not supported at the moment. " + "The conditioning scale must be fixed across the batch." + ) + elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len( + self.controlnet.nets + ): + raise ValueError( + "For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have" + " the same length as the number of controlnets" + ) + else: + assert False + + if not isinstance(control_guidance_start, (tuple, list)): + control_guidance_start = [control_guidance_start] + + if not isinstance(control_guidance_end, (tuple, list)): + control_guidance_end = [control_guidance_end] + + if len(control_guidance_start) != len(control_guidance_end): + raise ValueError( + f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list." + ) + + if isinstance(self.controlnet, MultiControlNetModel): + if len(control_guidance_start) != len(self.controlnet.nets): + raise ValueError( + f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}." + ) + + for start, end in zip(control_guidance_start, control_guidance_end): + if start >= end: + raise ValueError( + f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}." + ) + if start < 0.0: + raise ValueError(f"control guidance start: {start} can't be smaller than 0.") + if end > 1.0: + raise ValueError(f"control guidance end: {end} can't be larger than 1.0.") + + if ip_adapter_image is not None and ip_adapter_image_embeds is not None: + raise ValueError( + "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined." + ) + + if ip_adapter_image_embeds is not None: + if not isinstance(ip_adapter_image_embeds, list): + raise ValueError( + f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}" + ) + elif ip_adapter_image_embeds[0].ndim not in [3, 4]: + raise ValueError( + f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D" + ) + + # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.check_image + def check_image(self, image, prompt, prompt_embeds): + image_is_pil = isinstance(image, PIL.Image.Image) + image_is_tensor = isinstance(image, torch.Tensor) + image_is_np = isinstance(image, np.ndarray) + image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image) + image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor) + image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray) + + if ( + not image_is_pil + and not image_is_tensor + and not image_is_np + and not image_is_pil_list + and not image_is_tensor_list + and not image_is_np_list + ): + raise TypeError( + f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}" + ) + + if image_is_pil: + image_batch_size = 1 + else: + image_batch_size = len(image) + + if prompt is not None and isinstance(prompt, str): + prompt_batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + prompt_batch_size = len(prompt) + elif prompt_embeds is not None: + prompt_batch_size = prompt_embeds.shape[0] + + if image_batch_size != 1 and image_batch_size != prompt_batch_size: + raise ValueError( + f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}" + ) + + # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.prepare_image + def prepare_image( + self, + image, + width, + height, + batch_size, + num_images_per_prompt, + device, + dtype, + do_classifier_free_guidance=False, + guess_mode=False, + ): + image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32) + image_batch_size = image.shape[0] + + if image_batch_size == 1: + repeat_by = batch_size + else: + # image batch size is the same as prompt batch size + repeat_by = num_images_per_prompt + + image = image.repeat_interleave(repeat_by, dim=0) + + image = image.to(device=device, dtype=dtype) + + if do_classifier_free_guidance and not guess_mode: + image = torch.cat([image] * 2) + + return image + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = ( + batch_size, + num_channels_latents, + int(height) // self.vae_scale_factor, + int(width) // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding + def get_guidance_scale_embedding( + self, w: torch.Tensor, embedding_dim: int = 512, dtype: torch.dtype = torch.float32 + ) -> torch.Tensor: + """ + See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 + + Args: + w (`torch.Tensor`): + Generate embedding vectors with a specified guidance scale to subsequently enrich timestep embeddings. + embedding_dim (`int`, *optional*, defaults to 512): + Dimension of the embeddings to generate. + dtype (`torch.dtype`, *optional*, defaults to `torch.float32`): + Data type of the generated embeddings. + + Returns: + `torch.Tensor`: Embedding vectors with shape `(len(w), embedding_dim)`. + """ + assert len(w.shape) == 1 + w = w * 1000.0 + + half_dim = embedding_dim // 2 + emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) + emb = w.to(dtype)[:, None] * emb[None, :] + emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) + if embedding_dim % 2 == 1: # zero pad + emb = torch.nn.functional.pad(emb, (0, 1)) + assert emb.shape == (w.shape[0], embedding_dim) + return emb + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def clip_skip(self): + return self._clip_skip + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None + + @property + def cross_attention_kwargs(self): + return self._cross_attention_kwargs + + @property + def num_timesteps(self): + return self._num_timesteps + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + image: PipelineImageInput = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + timesteps: List[int] = None, + sigmas: List[float] = None, + guidance_scale: float = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + controlnet_conditioning_scale: Union[float, List[float]] = 1.0, + guess_mode: bool = False, + control_guidance_start: Union[float, List[float]] = 0.0, + control_guidance_end: Union[float, List[float]] = 1.0, + clip_skip: Optional[int] = None, + callback_on_step_end: Optional[ + Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks] + ] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + pag_scale: float = 3.0, + pag_adaptive_scale: float = 0.0, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,: + `List[List[torch.Tensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`): + The ControlNet input condition to provide guidance to the `unet` for generation. If the type is + specified as `torch.Tensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also be accepted + as an image. The dimensions of the output image defaults to `image`'s dimensions. If height and/or + width are passed, `image` is resized accordingly. If multiple ControlNets are specified in `init`, + images must be passed as a list such that each element of the list can be correctly batched for input + to a single ControlNet. When `prompt` is a list, and if a list of images is passed for a single + ControlNet, each will be paired with each prompt in the `prompt` list. This also applies to multiple + ControlNets, where a list of image lists can be passed to batch for each prompt and each ControlNet. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument + in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is + passed will be used. Must be in descending order. + sigmas (`List[float]`, *optional*): + Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in + their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed + will be used. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. + ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*): + Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of + IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should + contain the negative image embedding if `do_classifier_free_guidance` is set to `True`. If not + provided, embeddings are computed from the `ip_adapter_image` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0): + The outputs of the ControlNet are multiplied by `controlnet_conditioning_scale` before they are added + to the residual in the original `unet`. If multiple ControlNets are specified in `init`, you can set + the corresponding scale as a list. + guess_mode (`bool`, *optional*, defaults to `False`): + The ControlNet encoder tries to recognize the content of the input image even if you remove all + prompts. A `guidance_scale` value between 3.0 and 5.0 is recommended. + control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0): + The percentage of total steps at which the ControlNet starts applying. + control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0): + The percentage of total steps at which the ControlNet stops applying. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*): + A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of + each denoising step during the inference. with the following arguments: `callback_on_step_end(self: + DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a + list of all tensors as specified by `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + pag_scale (`float`, *optional*, defaults to 3.0): + The scale factor for the perturbed attention guidance. If it is set to 0.0, the perturbed attention + guidance will not be used. + pag_adaptive_scale (`float`, *optional*, defaults to 0.0): + The adaptive scale factor for the perturbed attention guidance. If it is set to 0.0, `pag_scale` is + used. + + Examples: + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images and the + second element is a list of `bool`s indicating whether the corresponding generated image contains + "not-safe-for-work" (nsfw) content. + """ + + if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): + callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs + + controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet + + # align format for control guidance + if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list): + control_guidance_start = len(control_guidance_end) * [control_guidance_start] + elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list): + control_guidance_end = len(control_guidance_start) * [control_guidance_end] + elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list): + mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1 + control_guidance_start, control_guidance_end = ( + mult * [control_guidance_start], + mult * [control_guidance_end], + ) + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + image, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + ip_adapter_image, + ip_adapter_image_embeds, + controlnet_conditioning_scale, + control_guidance_start, + control_guidance_end, + callback_on_step_end_tensor_inputs, + ) + + self._guidance_scale = guidance_scale + self._clip_skip = clip_skip + self._cross_attention_kwargs = cross_attention_kwargs + self._pag_scale = pag_scale + self._pag_adaptive_scale = pag_adaptive_scale + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float): + controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets) + + global_pool_conditions = ( + controlnet.config.global_pool_conditions + if isinstance(controlnet, ControlNetModel) + else controlnet.nets[0].config.global_pool_conditions + ) + guess_mode = guess_mode or global_pool_conditions + + # 3. Encode input prompt + text_encoder_lora_scale = ( + self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None + ) + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + device, + num_images_per_prompt, + self.do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=text_encoder_lora_scale, + clip_skip=self.clip_skip, + ) + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + if self.do_perturbed_attention_guidance: + prompt_embeds = self._prepare_perturbed_attention_guidance( + prompt_embeds, negative_prompt_embeds, self.do_classifier_free_guidance + ) + elif self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + # 4. Prepare image + if isinstance(controlnet, ControlNetModel): + image = self.prepare_image( + image=image, + width=width, + height=height, + batch_size=batch_size * num_images_per_prompt, + num_images_per_prompt=num_images_per_prompt, + device=device, + dtype=controlnet.dtype, + do_classifier_free_guidance=self.do_classifier_free_guidance, + guess_mode=guess_mode, + ) + height, width = image.shape[-2:] + elif isinstance(controlnet, MultiControlNetModel): + images = [] + + # Nested lists as ControlNet condition + if isinstance(image[0], list): + # Transpose the nested image list + image = [list(t) for t in zip(*image)] + + for image_ in image: + image_ = self.prepare_image( + image=image_, + width=width, + height=height, + batch_size=batch_size * num_images_per_prompt, + num_images_per_prompt=num_images_per_prompt, + device=device, + dtype=controlnet.dtype, + do_classifier_free_guidance=self.do_classifier_free_guidance, + guess_mode=guess_mode, + ) + + images.append(image_) + + image = images + height, width = image[0].shape[-2:] + else: + assert False + + # 5. Prepare timesteps + timesteps, num_inference_steps = retrieve_timesteps( + self.scheduler, num_inference_steps, device, timesteps, sigmas + ) + self._num_timesteps = len(timesteps) + + # 6. Prepare latent variables + num_channels_latents = self.unet.config.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 6.5 Optionally get Guidance Scale Embedding + timestep_cond = None + if self.unet.config.time_cond_proj_dim is not None: + guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) + timestep_cond = self.get_guidance_scale_embedding( + guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim + ).to(device=device, dtype=latents.dtype) + + # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7.1 Add image embeds for IP-Adapter + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + ip_adapter_image_embeds = self.prepare_ip_adapter_image_embeds( + ip_adapter_image, + ip_adapter_image_embeds, + device, + batch_size * num_images_per_prompt, + self.do_classifier_free_guidance, + ) + for i, image_embeds in enumerate(ip_adapter_image_embeds): + negative_image_embeds = None + if self.do_classifier_free_guidance: + negative_image_embeds, image_embeds = image_embeds.chunk(2) + + if self.do_perturbed_attention_guidance: + image_embeds = self._prepare_perturbed_attention_guidance( + image_embeds, negative_image_embeds, self.do_classifier_free_guidance + ) + elif self.do_classifier_free_guidance: + image_embeds = torch.cat([negative_image_embeds, image_embeds], dim=0) + image_embeds = image_embeds.to(device) + ip_adapter_image_embeds[i] = image_embeds + + added_cond_kwargs = ( + {"image_embeds": ip_adapter_image_embeds} + if ip_adapter_image is not None or ip_adapter_image_embeds is not None + else None + ) + + controlnet_prompt_embeds = prompt_embeds + + # 7.2 Create tensor stating which controlnets to keep + controlnet_keep = [] + for i in range(len(timesteps)): + keeps = [ + 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e) + for s, e in zip(control_guidance_start, control_guidance_end) + ] + controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps) + + images = image if isinstance(image, list) else [image] + for i, single_image in enumerate(images): + if self.do_classifier_free_guidance: + single_image = single_image.chunk(2)[0] + + if self.do_perturbed_attention_guidance: + single_image = self._prepare_perturbed_attention_guidance( + single_image, single_image, self.do_classifier_free_guidance + ) + elif self.do_classifier_free_guidance: + single_image = torch.cat([single_image] * 2) + single_image = single_image.to(device) + images[i] = single_image + + image = images if isinstance(image, list) else images[0] + + # 8. Denoising loop + if self.do_perturbed_attention_guidance: + original_attn_proc = self.unet.attn_processors + self._set_pag_attn_processor( + pag_applied_layers=self.pag_applied_layers, + do_classifier_free_guidance=self.do_classifier_free_guidance, + ) + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + is_unet_compiled = is_compiled_module(self.unet) + is_controlnet_compiled = is_compiled_module(self.controlnet) + is_torch_higher_equal_2_1 = is_torch_version(">=", "2.1") + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # Relevant thread: + # https://dev-discuss.pytorch.org/t/cudagraphs-in-pytorch-2-0/1428 + if (is_unet_compiled and is_controlnet_compiled) and is_torch_higher_equal_2_1: + torch._inductor.cudagraph_mark_step_begin() + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * (prompt_embeds.shape[0] // latents.shape[0])) + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # controlnet(s) inference + control_model_input = latent_model_input + + if isinstance(controlnet_keep[i], list): + cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])] + else: + controlnet_cond_scale = controlnet_conditioning_scale + if isinstance(controlnet_cond_scale, list): + controlnet_cond_scale = controlnet_cond_scale[0] + cond_scale = controlnet_cond_scale * controlnet_keep[i] + + down_block_res_samples, mid_block_res_sample = self.controlnet( + control_model_input, + t, + encoder_hidden_states=controlnet_prompt_embeds, + controlnet_cond=image, + conditioning_scale=cond_scale, + guess_mode=guess_mode, + return_dict=False, + ) + + if guess_mode and self.do_classifier_free_guidance: + # Inferred ControlNet only for the conditional batch. + # To apply the output of ControlNet to both the unconditional and conditional batches, + # add 0 to the unconditional batch to keep it unchanged. + down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples] + mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample]) + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + timestep_cond=timestep_cond, + cross_attention_kwargs=self.cross_attention_kwargs, + down_block_additional_residuals=down_block_res_samples, + mid_block_additional_residual=mid_block_res_sample, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + # perform guidance + if self.do_perturbed_attention_guidance: + noise_pred = self._apply_perturbed_attention_guidance( + noise_pred, self.do_classifier_free_guidance, self.guidance_scale, t + ) + elif self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + + # If we do sequential model offloading, let's offload unet and controlnet + # manually for max memory savings + if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: + self.unet.to("cpu") + self.controlnet.to("cpu") + torch.cuda.empty_cache() + + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[ + 0 + ] + image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) + else: + image = latents + has_nsfw_concept = None + + if has_nsfw_concept is None: + do_denormalize = [True] * image.shape[0] + else: + do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] + + image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) + + # Offload all models + self.maybe_free_model_hooks() + + if self.do_perturbed_attention_guidance: + self.unet.set_attn_processor(original_attn_proc) + + if not return_dict: + return (image, has_nsfw_concept) + + return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) diff --git a/diffusers3/pipelines/pag/pipeline_pag_controlnet_sd_xl.py b/diffusers3/pipelines/pag/pipeline_pag_controlnet_sd_xl.py new file mode 100644 index 0000000000000000000000000000000000000000..247fc900a7b0dd5e3189f4ee2d83d227ce52b013 --- /dev/null +++ b/diffusers3/pipelines/pag/pipeline_pag_controlnet_sd_xl.py @@ -0,0 +1,1612 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import inspect +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import numpy as np +import PIL.Image +import torch +import torch.nn.functional as F +from transformers import ( + CLIPImageProcessor, + CLIPTextModel, + CLIPTextModelWithProjection, + CLIPTokenizer, + CLIPVisionModelWithProjection, +) + +from diffusers.utils.import_utils import is_invisible_watermark_available + +from ...callbacks import MultiPipelineCallbacks, PipelineCallback +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import ( + FromSingleFileMixin, + IPAdapterMixin, + StableDiffusionXLLoraLoaderMixin, + TextualInversionLoaderMixin, +) +from ...models import AutoencoderKL, ControlNetModel, ImageProjection, UNet2DConditionModel +from ...models.attention_processor import ( + AttnProcessor2_0, + XFormersAttnProcessor, +) +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import ( + USE_PEFT_BACKEND, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import is_compiled_module, is_torch_version, randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from ..stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput +from .pag_utils import PAGMixin + + +if is_invisible_watermark_available(): + from ..stable_diffusion_xl.watermark import StableDiffusionXLWatermarker + +from ..controlnet.multicontrolnet import MultiControlNetModel + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> # !pip install opencv-python transformers accelerate + >>> from diffusers import AutoPipelineForText2Image, ControlNetModel, AutoencoderKL + >>> from diffusers.utils import load_image + >>> import numpy as np + >>> import torch + + >>> import cv2 + >>> from PIL import Image + + >>> prompt = "aerial view, a futuristic research complex in a bright foggy jungle, hard lighting" + >>> negative_prompt = "low quality, bad quality, sketches" + + >>> # download an image + >>> image = load_image( + ... "https://hf.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/hf-logo.png" + ... ) + + >>> # initialize the models and pipeline + >>> controlnet_conditioning_scale = 0.5 # recommended for good generalization + >>> controlnet = ControlNetModel.from_pretrained( + ... "diffusers/controlnet-canny-sdxl-1.0", torch_dtype=torch.float16 + ... ) + >>> vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16) + >>> pipe = AutoPipelineForText2Image.from_pretrained( + ... "stabilityai/stable-diffusion-xl-base-1.0", + ... controlnet=controlnet, + ... vae=vae, + ... torch_dtype=torch.float16, + ... enable_pag=True, + ... ) + >>> pipe.enable_model_cpu_offload() + + >>> # get canny image + >>> image = np.array(image) + >>> image = cv2.Canny(image, 100, 200) + >>> image = image[:, :, None] + >>> image = np.concatenate([image, image, image], axis=2) + >>> canny_image = Image.fromarray(image) + + >>> # generate image + >>> image = pipe( + ... prompt, controlnet_conditioning_scale=controlnet_conditioning_scale, image=canny_image, pag_scale=0.3 + ... ).images[0] + ``` +""" + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + sigmas: Optional[List[float]] = None, + **kwargs, +): + """ + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles + custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` + must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, + `num_inference_steps` and `sigmas` must be `None`. + sigmas (`List[float]`, *optional*): + Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, + `num_inference_steps` and `timesteps` must be `None`. + + Returns: + `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the + second element is the number of inference steps. + """ + if timesteps is not None and sigmas is not None: + raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") + if timesteps is not None: + accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + elif sigmas is not None: + accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accept_sigmas: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" sigmas schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + + +class StableDiffusionXLControlNetPAGPipeline( + DiffusionPipeline, + StableDiffusionMixin, + TextualInversionLoaderMixin, + StableDiffusionXLLoraLoaderMixin, + IPAdapterMixin, + FromSingleFileMixin, + PAGMixin, +): + r""" + Pipeline for text-to-image generation using Stable Diffusion XL with ControlNet guidance. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`] for saving LoRA weights + - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files + - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + text_encoder_2 ([`~transformers.CLIPTextModelWithProjection`]): + Second frozen text-encoder + ([laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k)). + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + tokenizer_2 ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + controlnet ([`ControlNetModel`] or `List[ControlNetModel]`): + Provides additional conditioning to the `unet` during the denoising process. If you set multiple + ControlNets as a list, the outputs from each ControlNet are added together to create one combined + additional conditioning. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`): + Whether the negative prompt embeddings should always be set to 0. Also see the config of + `stabilityai/stable-diffusion-xl-base-1-0`. + add_watermarker (`bool`, *optional*): + Whether to use the [invisible_watermark](https://github.com/ShieldMnt/invisible-watermark/) library to + watermark output images. If not defined, it defaults to `True` if the package is installed; otherwise no + watermarker is used. + """ + + # leave controlnet out on purpose because it iterates with unet + model_cpu_offload_seq = "text_encoder->text_encoder_2->image_encoder->unet->vae" + _optional_components = [ + "tokenizer", + "tokenizer_2", + "text_encoder", + "text_encoder_2", + "feature_extractor", + "image_encoder", + ] + _callback_tensor_inputs = [ + "latents", + "prompt_embeds", + "negative_prompt_embeds", + "add_text_embeds", + "add_time_ids", + "negative_pooled_prompt_embeds", + "negative_add_time_ids", + ] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + text_encoder_2: CLIPTextModelWithProjection, + tokenizer: CLIPTokenizer, + tokenizer_2: CLIPTokenizer, + unet: UNet2DConditionModel, + controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel], + scheduler: KarrasDiffusionSchedulers, + force_zeros_for_empty_prompt: bool = True, + add_watermarker: Optional[bool] = None, + feature_extractor: CLIPImageProcessor = None, + image_encoder: CLIPVisionModelWithProjection = None, + pag_applied_layers: Union[str, List[str]] = "mid", # ["down.block_2", "up.block_1.attentions_0"], "mid" + ): + super().__init__() + + if isinstance(controlnet, (list, tuple)): + controlnet = MultiControlNetModel(controlnet) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + text_encoder_2=text_encoder_2, + tokenizer=tokenizer, + tokenizer_2=tokenizer_2, + unet=unet, + controlnet=controlnet, + scheduler=scheduler, + feature_extractor=feature_extractor, + image_encoder=image_encoder, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True) + self.control_image_processor = VaeImageProcessor( + vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False + ) + add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available() + + if add_watermarker: + self.watermark = StableDiffusionXLWatermarker() + else: + self.watermark = None + + self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) + self.set_pag_applied_layers(pag_applied_layers) + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.encode_prompt + def encode_prompt( + self, + prompt: str, + prompt_2: Optional[str] = None, + device: Optional[torch.device] = None, + num_images_per_prompt: int = 1, + do_classifier_free_guidance: bool = True, + negative_prompt: Optional[str] = None, + negative_prompt_2: Optional[str] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + pooled_prompt_embeds: Optional[torch.Tensor] = None, + negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in both text-encoders + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + lora_scale (`float`, *optional*): + A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + device = device or self._execution_device + + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if self.text_encoder is not None: + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale) + else: + scale_lora_layers(self.text_encoder_2, lora_scale) + + prompt = [prompt] if isinstance(prompt, str) else prompt + + if prompt is not None: + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + # Define tokenizers and text encoders + tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] + text_encoders = ( + [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] + ) + + if prompt_embeds is None: + prompt_2 = prompt_2 or prompt + prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 + + # textual inversion: process multi-vector tokens if necessary + prompt_embeds_list = [] + prompts = [prompt, prompt_2] + for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders): + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, tokenizer) + + text_inputs = tokenizer( + prompt, + padding="max_length", + max_length=tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + + text_input_ids = text_inputs.input_ids + untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {tokenizer.model_max_length} tokens: {removed_text}" + ) + + prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) + + # We are only ALWAYS interested in the pooled output of the final text encoder + pooled_prompt_embeds = prompt_embeds[0] + if clip_skip is None: + prompt_embeds = prompt_embeds.hidden_states[-2] + else: + # "2" because SDXL always indexes from the penultimate layer. + prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)] + + prompt_embeds_list.append(prompt_embeds) + + prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) + + # get unconditional embeddings for classifier free guidance + zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt + if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: + negative_prompt_embeds = torch.zeros_like(prompt_embeds) + negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) + elif do_classifier_free_guidance and negative_prompt_embeds is None: + negative_prompt = negative_prompt or "" + negative_prompt_2 = negative_prompt_2 or negative_prompt + + # normalize str to list + negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt + negative_prompt_2 = ( + batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 + ) + + uncond_tokens: List[str] + if prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = [negative_prompt, negative_prompt_2] + + negative_prompt_embeds_list = [] + for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders): + if isinstance(self, TextualInversionLoaderMixin): + negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = tokenizer( + negative_prompt, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + negative_prompt_embeds = text_encoder( + uncond_input.input_ids.to(device), + output_hidden_states=True, + ) + # We are only ALWAYS interested in the pooled output of the final text encoder + negative_pooled_prompt_embeds = negative_prompt_embeds[0] + negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] + + negative_prompt_embeds_list.append(negative_prompt_embeds) + + negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) + + if self.text_encoder_2 is not None: + prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) + else: + prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + if self.text_encoder_2 is not None: + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) + else: + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed * num_images_per_prompt, -1 + ) + if do_classifier_free_guidance: + negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed * num_images_per_prompt, -1 + ) + + if self.text_encoder is not None: + if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder_2, lora_scale) + + return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image + def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + if output_hidden_states: + image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] + image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_enc_hidden_states = self.image_encoder( + torch.zeros_like(image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( + num_images_per_prompt, dim=0 + ) + return image_enc_hidden_states, uncond_image_enc_hidden_states + else: + image_embeds = self.image_encoder(image).image_embeds + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_embeds = torch.zeros_like(image_embeds) + + return image_embeds, uncond_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds + def prepare_ip_adapter_image_embeds( + self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance + ): + image_embeds = [] + if do_classifier_free_guidance: + negative_image_embeds = [] + if ip_adapter_image_embeds is None: + if not isinstance(ip_adapter_image, list): + ip_adapter_image = [ip_adapter_image] + + if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): + raise ValueError( + f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." + ) + + for single_ip_adapter_image, image_proj_layer in zip( + ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers + ): + output_hidden_state = not isinstance(image_proj_layer, ImageProjection) + single_image_embeds, single_negative_image_embeds = self.encode_image( + single_ip_adapter_image, device, 1, output_hidden_state + ) + + image_embeds.append(single_image_embeds[None, :]) + if do_classifier_free_guidance: + negative_image_embeds.append(single_negative_image_embeds[None, :]) + else: + for single_image_embeds in ip_adapter_image_embeds: + if do_classifier_free_guidance: + single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2) + negative_image_embeds.append(single_negative_image_embeds) + image_embeds.append(single_image_embeds) + + ip_adapter_image_embeds = [] + for i, single_image_embeds in enumerate(image_embeds): + single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) + if do_classifier_free_guidance: + single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) + + single_image_embeds = single_image_embeds.to(device=device) + ip_adapter_image_embeds.append(single_image_embeds) + + return ip_adapter_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + # Copied from diffusers.pipelines.controlnet.pipeline_controlnet_sd_xl.StableDiffusionXLControlNetPipeline.check_inputs + def check_inputs( + self, + prompt, + prompt_2, + image, + callback_steps, + negative_prompt=None, + negative_prompt_2=None, + prompt_embeds=None, + negative_prompt_embeds=None, + pooled_prompt_embeds=None, + ip_adapter_image=None, + ip_adapter_image_embeds=None, + negative_pooled_prompt_embeds=None, + controlnet_conditioning_scale=1.0, + control_guidance_start=0.0, + control_guidance_end=1.0, + callback_on_step_end_tensor_inputs=None, + ): + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt_2 is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)): + raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + elif negative_prompt_2 is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if prompt_embeds is not None and pooled_prompt_embeds is None: + raise ValueError( + "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`." + ) + + if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: + raise ValueError( + "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`." + ) + + # `prompt` needs more sophisticated handling when there are multiple + # conditionings. + if isinstance(self.controlnet, MultiControlNetModel): + if isinstance(prompt, list): + logger.warning( + f"You have {len(self.controlnet.nets)} ControlNets and you have passed {len(prompt)}" + " prompts. The conditionings will be fixed across the prompts." + ) + + # Check `image` + is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance( + self.controlnet, torch._dynamo.eval_frame.OptimizedModule + ) + if ( + isinstance(self.controlnet, ControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, ControlNetModel) + ): + self.check_image(image, prompt, prompt_embeds) + elif ( + isinstance(self.controlnet, MultiControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, MultiControlNetModel) + ): + if not isinstance(image, list): + raise TypeError("For multiple controlnets: `image` must be type `list`") + + # When `image` is a nested list: + # (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]]) + elif any(isinstance(i, list) for i in image): + raise ValueError("A single batch of multiple conditionings are supported at the moment.") + elif len(image) != len(self.controlnet.nets): + raise ValueError( + f"For multiple controlnets: `image` must have the same length as the number of controlnets, but got {len(image)} images and {len(self.controlnet.nets)} ControlNets." + ) + + for image_ in image: + self.check_image(image_, prompt, prompt_embeds) + else: + assert False + + # Check `controlnet_conditioning_scale` + if ( + isinstance(self.controlnet, ControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, ControlNetModel) + ): + if not isinstance(controlnet_conditioning_scale, float): + raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.") + elif ( + isinstance(self.controlnet, MultiControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, MultiControlNetModel) + ): + if isinstance(controlnet_conditioning_scale, list): + if any(isinstance(i, list) for i in controlnet_conditioning_scale): + raise ValueError("A single batch of multiple conditionings are supported at the moment.") + elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len( + self.controlnet.nets + ): + raise ValueError( + "For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have" + " the same length as the number of controlnets" + ) + else: + assert False + + if not isinstance(control_guidance_start, (tuple, list)): + control_guidance_start = [control_guidance_start] + + if not isinstance(control_guidance_end, (tuple, list)): + control_guidance_end = [control_guidance_end] + + if len(control_guidance_start) != len(control_guidance_end): + raise ValueError( + f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list." + ) + + if isinstance(self.controlnet, MultiControlNetModel): + if len(control_guidance_start) != len(self.controlnet.nets): + raise ValueError( + f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}." + ) + + for start, end in zip(control_guidance_start, control_guidance_end): + if start >= end: + raise ValueError( + f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}." + ) + if start < 0.0: + raise ValueError(f"control guidance start: {start} can't be smaller than 0.") + if end > 1.0: + raise ValueError(f"control guidance end: {end} can't be larger than 1.0.") + + if ip_adapter_image is not None and ip_adapter_image_embeds is not None: + raise ValueError( + "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined." + ) + + if ip_adapter_image_embeds is not None: + if not isinstance(ip_adapter_image_embeds, list): + raise ValueError( + f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}" + ) + elif ip_adapter_image_embeds[0].ndim not in [3, 4]: + raise ValueError( + f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D" + ) + + # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.check_image + def check_image(self, image, prompt, prompt_embeds): + image_is_pil = isinstance(image, PIL.Image.Image) + image_is_tensor = isinstance(image, torch.Tensor) + image_is_np = isinstance(image, np.ndarray) + image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image) + image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor) + image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray) + + if ( + not image_is_pil + and not image_is_tensor + and not image_is_np + and not image_is_pil_list + and not image_is_tensor_list + and not image_is_np_list + ): + raise TypeError( + f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}" + ) + + if image_is_pil: + image_batch_size = 1 + else: + image_batch_size = len(image) + + if prompt is not None and isinstance(prompt, str): + prompt_batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + prompt_batch_size = len(prompt) + elif prompt_embeds is not None: + prompt_batch_size = prompt_embeds.shape[0] + + if image_batch_size != 1 and image_batch_size != prompt_batch_size: + raise ValueError( + f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}" + ) + + # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.prepare_image + def prepare_image( + self, + image, + width, + height, + batch_size, + num_images_per_prompt, + device, + dtype, + do_classifier_free_guidance=False, + guess_mode=False, + ): + image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32) + image_batch_size = image.shape[0] + + if image_batch_size == 1: + repeat_by = batch_size + else: + # image batch size is the same as prompt batch size + repeat_by = num_images_per_prompt + + image = image.repeat_interleave(repeat_by, dim=0) + + image = image.to(device=device, dtype=dtype) + + if do_classifier_free_guidance and not guess_mode: + image = torch.cat([image] * 2) + + return image + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = ( + batch_size, + num_channels_latents, + int(height) // self.vae_scale_factor, + int(width) // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline._get_add_time_ids + def _get_add_time_ids( + self, original_size, crops_coords_top_left, target_size, dtype, text_encoder_projection_dim=None + ): + add_time_ids = list(original_size + crops_coords_top_left + target_size) + + passed_add_embed_dim = ( + self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim + ) + expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features + + if expected_add_embed_dim != passed_add_embed_dim: + raise ValueError( + f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`." + ) + + add_time_ids = torch.tensor([add_time_ids], dtype=dtype) + return add_time_ids + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae + def upcast_vae(self): + dtype = self.vae.dtype + self.vae.to(dtype=torch.float32) + use_torch_2_0_or_xformers = isinstance( + self.vae.decoder.mid_block.attentions[0].processor, + ( + AttnProcessor2_0, + XFormersAttnProcessor, + ), + ) + # if xformers or torch_2_0 is used attention block does not need + # to be in float32 which can save lots of memory + if use_torch_2_0_or_xformers: + self.vae.post_quant_conv.to(dtype) + self.vae.decoder.conv_in.to(dtype) + self.vae.decoder.mid_block.to(dtype) + + # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding + def get_guidance_scale_embedding( + self, w: torch.Tensor, embedding_dim: int = 512, dtype: torch.dtype = torch.float32 + ) -> torch.Tensor: + """ + See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 + + Args: + w (`torch.Tensor`): + Generate embedding vectors with a specified guidance scale to subsequently enrich timestep embeddings. + embedding_dim (`int`, *optional*, defaults to 512): + Dimension of the embeddings to generate. + dtype (`torch.dtype`, *optional*, defaults to `torch.float32`): + Data type of the generated embeddings. + + Returns: + `torch.Tensor`: Embedding vectors with shape `(len(w), embedding_dim)`. + """ + assert len(w.shape) == 1 + w = w * 1000.0 + + half_dim = embedding_dim // 2 + emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) + emb = w.to(dtype)[:, None] * emb[None, :] + emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) + if embedding_dim % 2 == 1: # zero pad + emb = torch.nn.functional.pad(emb, (0, 1)) + assert emb.shape == (w.shape[0], embedding_dim) + return emb + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def clip_skip(self): + return self._clip_skip + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None + + @property + def cross_attention_kwargs(self): + return self._cross_attention_kwargs + + @property + def denoising_end(self): + return self._denoising_end + + @property + def num_timesteps(self): + return self._num_timesteps + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + prompt_2: Optional[Union[str, List[str]]] = None, + image: PipelineImageInput = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + timesteps: List[int] = None, + sigmas: List[float] = None, + denoising_end: Optional[float] = None, + guidance_scale: float = 5.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + negative_prompt_2: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + pooled_prompt_embeds: Optional[torch.Tensor] = None, + negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + controlnet_conditioning_scale: Union[float, List[float]] = 1.0, + control_guidance_start: Union[float, List[float]] = 0.0, + control_guidance_end: Union[float, List[float]] = 1.0, + original_size: Tuple[int, int] = None, + crops_coords_top_left: Tuple[int, int] = (0, 0), + target_size: Tuple[int, int] = None, + negative_original_size: Optional[Tuple[int, int]] = None, + negative_crops_coords_top_left: Tuple[int, int] = (0, 0), + negative_target_size: Optional[Tuple[int, int]] = None, + clip_skip: Optional[int] = None, + callback_on_step_end: Optional[ + Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks] + ] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + pag_scale: float = 3.0, + pag_adaptive_scale: float = 0.0, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in both text-encoders. + image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,: + `List[List[torch.Tensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`): + The ControlNet input condition to provide guidance to the `unet` for generation. If the type is + specified as `torch.Tensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also be accepted + as an image. The dimensions of the output image defaults to `image`'s dimensions. If height and/or + width are passed, `image` is resized accordingly. If multiple ControlNets are specified in `init`, + images must be passed as a list such that each element of the list can be correctly batched for input + to a single ControlNet. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. Anything below 512 pixels won't work well for + [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) + and checkpoints that are not specifically fine-tuned on low resolutions. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. Anything below 512 pixels won't work well for + [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) + and checkpoints that are not specifically fine-tuned on low resolutions. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument + in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is + passed will be used. Must be in descending order. + sigmas (`List[float]`, *optional*): + Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in + their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed + will be used. + denoising_end (`float`, *optional*): + When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be + completed before it is intentionally prematurely terminated. As a result, the returned sample will + still retain a substantial amount of noise as determined by the discrete timesteps selected by the + scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a + "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image + Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output) + guidance_scale (`float`, *optional*, defaults to 5.0): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. This is sent to `tokenizer_2` + and `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders. + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, pooled text embeddings are generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs (prompt + weighting). If not provided, pooled `negative_prompt_embeds` are generated from `negative_prompt` input + argument. + ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. + ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*): + Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of + IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should + contain the negative image embedding if `do_classifier_free_guidance` is set to `True`. If not + provided, embeddings are computed from the `ip_adapter_image` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0): + The outputs of the ControlNet are multiplied by `controlnet_conditioning_scale` before they are added + to the residual in the original `unet`. If multiple ControlNets are specified in `init`, you can set + the corresponding scale as a list. + control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0): + The percentage of total steps at which the ControlNet starts applying. + control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0): + The percentage of total steps at which the ControlNet stops applying. + original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled. + `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as + explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position + `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting + `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + For most cases, `target_size` should be set to the desired height and width of the generated image. If + not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in + section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + To negatively condition the generation process based on a specific image resolution. Part of SDXL's + micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's + micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + To negatively condition the generation process based on a target image resolution. It should be as same + as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*): + A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of + each denoising step during the inference. with the following arguments: `callback_on_step_end(self: + DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a + list of all tensors as specified by `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + pag_scale (`float`, *optional*, defaults to 3.0): + The scale factor for the perturbed attention guidance. If it is set to 0.0, the perturbed attention + guidance will not be used. + pag_adaptive_scale (`float`, *optional*, defaults to 0.0): + The adaptive scale factor for the perturbed attention guidance. If it is set to 0.0, `pag_scale` is + used. + + Examples: + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned containing the output images. + """ + + if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): + callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs + + controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet + + # align format for control guidance + if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list): + control_guidance_start = len(control_guidance_end) * [control_guidance_start] + elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list): + control_guidance_end = len(control_guidance_start) * [control_guidance_end] + elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list): + mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1 + control_guidance_start, control_guidance_end = ( + mult * [control_guidance_start], + mult * [control_guidance_end], + ) + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + prompt_2, + image, + None, + negative_prompt, + negative_prompt_2, + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + ip_adapter_image, + ip_adapter_image_embeds, + negative_pooled_prompt_embeds, + controlnet_conditioning_scale, + control_guidance_start, + control_guidance_end, + callback_on_step_end_tensor_inputs, + ) + + self._guidance_scale = guidance_scale + self._clip_skip = clip_skip + self._cross_attention_kwargs = cross_attention_kwargs + self._denoising_end = denoising_end + self._pag_scale = pag_scale + self._pag_adaptive_scale = pag_adaptive_scale + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float): + controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets) + + # 3.1 Encode input prompt + text_encoder_lora_scale = ( + self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None + ) + ( + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + ) = self.encode_prompt( + prompt, + prompt_2, + device, + num_images_per_prompt, + self.do_classifier_free_guidance, + negative_prompt, + negative_prompt_2, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, + lora_scale=text_encoder_lora_scale, + clip_skip=self.clip_skip, + ) + + # 3.2 Encode ip_adapter_image + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + ip_adapter_image_embeds = self.prepare_ip_adapter_image_embeds( + ip_adapter_image, + ip_adapter_image_embeds, + device, + batch_size * num_images_per_prompt, + self.do_classifier_free_guidance, + ) + + # 4. Prepare image + if isinstance(controlnet, ControlNetModel): + image = self.prepare_image( + image=image, + width=width, + height=height, + batch_size=batch_size * num_images_per_prompt, + num_images_per_prompt=num_images_per_prompt, + device=device, + dtype=controlnet.dtype, + do_classifier_free_guidance=self.do_classifier_free_guidance, + guess_mode=False, + ) + height, width = image.shape[-2:] + elif isinstance(controlnet, MultiControlNetModel): + images = [] + + for image_ in image: + image_ = self.prepare_image( + image=image_, + width=width, + height=height, + batch_size=batch_size * num_images_per_prompt, + num_images_per_prompt=num_images_per_prompt, + device=device, + dtype=controlnet.dtype, + do_classifier_free_guidance=self.do_classifier_free_guidance, + guess_mode=False, + ) + + images.append(image_) + + image = images + height, width = image[0].shape[-2:] + else: + assert False + + # 5. Prepare timesteps + timesteps, num_inference_steps = retrieve_timesteps( + self.scheduler, num_inference_steps, device, timesteps, sigmas + ) + self._num_timesteps = len(timesteps) + + # 6. Prepare latent variables + num_channels_latents = self.unet.config.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 6.5 Optionally get Guidance Scale Embedding + timestep_cond = None + if self.unet.config.time_cond_proj_dim is not None: + guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) + timestep_cond = self.get_guidance_scale_embedding( + guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim + ).to(device=device, dtype=latents.dtype) + + # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7.1 Create tensor stating which controlnets to keep + controlnet_keep = [] + for i in range(len(timesteps)): + keeps = [ + 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e) + for s, e in zip(control_guidance_start, control_guidance_end) + ] + controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps) + + # 7.2 Prepare added time ids & embeddings + if isinstance(image, list): + original_size = original_size or image[0].shape[-2:] + else: + original_size = original_size or image.shape[-2:] + target_size = target_size or (height, width) + + add_text_embeds = pooled_prompt_embeds + if self.text_encoder_2 is None: + text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) + else: + text_encoder_projection_dim = self.text_encoder_2.config.projection_dim + + add_time_ids = self._get_add_time_ids( + original_size, + crops_coords_top_left, + target_size, + dtype=prompt_embeds.dtype, + text_encoder_projection_dim=text_encoder_projection_dim, + ) + + if negative_original_size is not None and negative_target_size is not None: + negative_add_time_ids = self._get_add_time_ids( + negative_original_size, + negative_crops_coords_top_left, + negative_target_size, + dtype=prompt_embeds.dtype, + text_encoder_projection_dim=text_encoder_projection_dim, + ) + else: + negative_add_time_ids = add_time_ids + + images = image if isinstance(image, list) else [image] + for i, single_image in enumerate(images): + if self.do_classifier_free_guidance: + single_image = single_image.chunk(2)[0] + + if self.do_perturbed_attention_guidance: + single_image = self._prepare_perturbed_attention_guidance( + single_image, single_image, self.do_classifier_free_guidance + ) + elif self.do_classifier_free_guidance: + single_image = torch.cat([single_image] * 2) + single_image = single_image.to(device) + images[i] = single_image + + image = images if isinstance(image, list) else images[0] + + if ip_adapter_image_embeds is not None: + for i, image_embeds in enumerate(ip_adapter_image_embeds): + negative_image_embeds = None + if self.do_classifier_free_guidance: + negative_image_embeds, image_embeds = image_embeds.chunk(2) + + if self.do_perturbed_attention_guidance: + image_embeds = self._prepare_perturbed_attention_guidance( + image_embeds, negative_image_embeds, self.do_classifier_free_guidance + ) + elif self.do_classifier_free_guidance: + image_embeds = torch.cat([negative_image_embeds, image_embeds], dim=0) + image_embeds = image_embeds.to(device) + ip_adapter_image_embeds[i] = image_embeds + + if self.do_perturbed_attention_guidance: + prompt_embeds = self._prepare_perturbed_attention_guidance( + prompt_embeds, negative_prompt_embeds, self.do_classifier_free_guidance + ) + add_text_embeds = self._prepare_perturbed_attention_guidance( + add_text_embeds, negative_pooled_prompt_embeds, self.do_classifier_free_guidance + ) + add_time_ids = self._prepare_perturbed_attention_guidance( + add_time_ids, negative_add_time_ids, self.do_classifier_free_guidance + ) + elif self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) + add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) + add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0) + + prompt_embeds = prompt_embeds.to(device) + add_text_embeds = add_text_embeds.to(device) + add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1) + added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} + + controlnet_prompt_embeds = prompt_embeds + controlnet_added_cond_kwargs = added_cond_kwargs + + # 8. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + + # 8.1 Apply denoising_end + if ( + self.denoising_end is not None + and isinstance(self.denoising_end, float) + and self.denoising_end > 0 + and self.denoising_end < 1 + ): + discrete_timestep_cutoff = int( + round( + self.scheduler.config.num_train_timesteps + - (self.denoising_end * self.scheduler.config.num_train_timesteps) + ) + ) + num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps))) + timesteps = timesteps[:num_inference_steps] + + if self.do_perturbed_attention_guidance: + original_attn_proc = self.unet.attn_processors + self._set_pag_attn_processor( + pag_applied_layers=self.pag_applied_layers, + do_classifier_free_guidance=self.do_classifier_free_guidance, + ) + + is_unet_compiled = is_compiled_module(self.unet) + is_controlnet_compiled = is_compiled_module(self.controlnet) + is_torch_higher_equal_2_1 = is_torch_version(">=", "2.1") + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # Relevant thread: + # https://dev-discuss.pytorch.org/t/cudagraphs-in-pytorch-2-0/1428 + if (is_unet_compiled and is_controlnet_compiled) and is_torch_higher_equal_2_1: + torch._inductor.cudagraph_mark_step_begin() + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * (prompt_embeds.shape[0] // latents.shape[0])) + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # controlnet(s) inference + control_model_input = latent_model_input + + if isinstance(controlnet_keep[i], list): + cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])] + else: + controlnet_cond_scale = controlnet_conditioning_scale + if isinstance(controlnet_cond_scale, list): + controlnet_cond_scale = controlnet_cond_scale[0] + cond_scale = controlnet_cond_scale * controlnet_keep[i] + + down_block_res_samples, mid_block_res_sample = self.controlnet( + control_model_input, + t, + encoder_hidden_states=controlnet_prompt_embeds, + controlnet_cond=image, + conditioning_scale=cond_scale, + guess_mode=False, + added_cond_kwargs=controlnet_added_cond_kwargs, + return_dict=False, + ) + + if ip_adapter_image_embeds is not None: + added_cond_kwargs["image_embeds"] = ip_adapter_image_embeds + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + timestep_cond=timestep_cond, + cross_attention_kwargs=self.cross_attention_kwargs, + down_block_additional_residuals=down_block_res_samples, + mid_block_additional_residual=mid_block_res_sample, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + # perform guidance + if self.do_perturbed_attention_guidance: + noise_pred = self._apply_perturbed_attention_guidance( + noise_pred, self.do_classifier_free_guidance, self.guidance_scale, t + ) + elif self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + add_text_embeds = callback_outputs.pop("add_text_embeds", add_text_embeds) + negative_pooled_prompt_embeds = callback_outputs.pop( + "negative_pooled_prompt_embeds", negative_pooled_prompt_embeds + ) + add_time_ids = callback_outputs.pop("add_time_ids", add_time_ids) + negative_add_time_ids = callback_outputs.pop("negative_add_time_ids", negative_add_time_ids) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + + if not output_type == "latent": + # make sure the VAE is in float32 mode, as it overflows in float16 + needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast + + if needs_upcasting: + self.upcast_vae() + latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) + + # unscale/denormalize the latents + # denormalize with the mean and std if available and not None + has_latents_mean = hasattr(self.vae.config, "latents_mean") and self.vae.config.latents_mean is not None + has_latents_std = hasattr(self.vae.config, "latents_std") and self.vae.config.latents_std is not None + if has_latents_mean and has_latents_std: + latents_mean = ( + torch.tensor(self.vae.config.latents_mean).view(1, 4, 1, 1).to(latents.device, latents.dtype) + ) + latents_std = ( + torch.tensor(self.vae.config.latents_std).view(1, 4, 1, 1).to(latents.device, latents.dtype) + ) + latents = latents * latents_std / self.vae.config.scaling_factor + latents_mean + else: + latents = latents / self.vae.config.scaling_factor + + image = self.vae.decode(latents, return_dict=False)[0] + + # cast back to fp16 if needed + if needs_upcasting: + self.vae.to(dtype=torch.float16) + else: + image = latents + + if not output_type == "latent": + # apply watermark if available + if self.watermark is not None: + image = self.watermark.apply_watermark(image) + + image = self.image_processor.postprocess(image, output_type=output_type) + + # Offload all models + self.maybe_free_model_hooks() + + if self.do_perturbed_attention_guidance: + self.unet.set_attn_processor(original_attn_proc) + + if not return_dict: + return (image,) + + return StableDiffusionXLPipelineOutput(images=image) diff --git a/diffusers3/pipelines/pag/pipeline_pag_controlnet_sd_xl_img2img.py b/diffusers3/pipelines/pag/pipeline_pag_controlnet_sd_xl_img2img.py new file mode 100644 index 0000000000000000000000000000000000000000..66398483e046ee034e9c69a8ef92b6ac99c3a240 --- /dev/null +++ b/diffusers3/pipelines/pag/pipeline_pag_controlnet_sd_xl_img2img.py @@ -0,0 +1,1685 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import inspect +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import numpy as np +import PIL.Image +import torch +import torch.nn.functional as F +from transformers import ( + CLIPImageProcessor, + CLIPTextModel, + CLIPTextModelWithProjection, + CLIPTokenizer, + CLIPVisionModelWithProjection, +) + +from diffusers.utils.import_utils import is_invisible_watermark_available + +from ...callbacks import MultiPipelineCallbacks, PipelineCallback +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import ( + FromSingleFileMixin, + IPAdapterMixin, + StableDiffusionXLLoraLoaderMixin, + TextualInversionLoaderMixin, +) +from ...models import AutoencoderKL, ControlNetModel, ImageProjection, UNet2DConditionModel +from ...models.attention_processor import ( + AttnProcessor2_0, + XFormersAttnProcessor, +) +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import ( + USE_PEFT_BACKEND, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import is_compiled_module, randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from ..stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput +from .pag_utils import PAGMixin + + +if is_invisible_watermark_available(): + from ..stable_diffusion_xl.watermark import StableDiffusionXLWatermarker + +from ..controlnet.multicontrolnet import MultiControlNetModel + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> # pip install accelerate transformers safetensors diffusers + + >>> import torch + >>> import numpy as np + >>> from PIL import Image + + >>> from transformers import DPTFeatureExtractor, DPTForDepthEstimation + >>> from diffusers import ControlNetModel, StableDiffusionXLControlNetPAGImg2ImgPipeline, AutoencoderKL + >>> from diffusers.utils import load_image + + + >>> depth_estimator = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas").to("cuda") + >>> feature_extractor = DPTFeatureExtractor.from_pretrained("Intel/dpt-hybrid-midas") + >>> controlnet = ControlNetModel.from_pretrained( + ... "diffusers/controlnet-depth-sdxl-1.0-small", + ... variant="fp16", + ... use_safetensors="True", + ... torch_dtype=torch.float16, + ... ) + >>> vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16) + >>> pipe = StableDiffusionXLControlNetPAGImg2ImgPipeline.from_pretrained( + ... "stabilityai/stable-diffusion-xl-base-1.0", + ... controlnet=controlnet, + ... vae=vae, + ... variant="fp16", + ... use_safetensors=True, + ... torch_dtype=torch.float16, + ... enable_pag=True, + ... ) + >>> pipe.enable_model_cpu_offload() + + + >>> def get_depth_map(image): + ... image = feature_extractor(images=image, return_tensors="pt").pixel_values.to("cuda") + ... with torch.no_grad(), torch.autocast("cuda"): + ... depth_map = depth_estimator(image).predicted_depth + + ... depth_map = torch.nn.fuctional.interpolate( + ... depth_map.unsqueeze(1), + ... size=(1024, 1024), + ... mode="bicubic", + ... align_corners=False, + ... ) + ... depth_min = torch.amin(depth_map, dim=[1, 2, 3], keepdim=True) + ... depth_max = torch.amax(depth_map, dim=[1, 2, 3], keepdim=True) + ... depth_map = (depth_map - depth_min) / (depth_max - depth_min) + ... image = torch.cat([depth_map] * 3, dim=1) + ... image = image.permute(0, 2, 3, 1).cpu().numpy()[0] + ... image = Image.fromarray((image * 255.0).clip(0, 255).astype(np.uint8)) + ... return image + + + >>> prompt = "A robot, 4k photo" + >>> image = load_image( + ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" + ... "/kandinsky/cat.png" + ... ).resize((1024, 1024)) + >>> controlnet_conditioning_scale = 0.5 # recommended for good generalization + >>> depth_image = get_depth_map(image) + + >>> images = pipe( + ... prompt, + ... image=image, + ... control_image=depth_image, + ... strength=0.99, + ... num_inference_steps=50, + ... controlnet_conditioning_scale=controlnet_conditioning_scale, + ... ).images + >>> images[0].save(f"robot_cat.png") + ``` +""" + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents +def retrieve_latents( + encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" +): + if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": + return encoder_output.latent_dist.sample(generator) + elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": + return encoder_output.latent_dist.mode() + elif hasattr(encoder_output, "latents"): + return encoder_output.latents + else: + raise AttributeError("Could not access latents of provided encoder_output") + + +class StableDiffusionXLControlNetPAGImg2ImgPipeline( + DiffusionPipeline, + StableDiffusionMixin, + TextualInversionLoaderMixin, + StableDiffusionXLLoraLoaderMixin, + FromSingleFileMixin, + IPAdapterMixin, + PAGMixin, +): + r""" + Pipeline for image-to-image generation using Stable Diffusion XL with ControlNet guidance. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`] for saving LoRA weights + - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModel`]): + Frozen text-encoder. Stable Diffusion uses the text portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically + the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. + text_encoder_2 ([` CLIPTextModelWithProjection`]): + Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection), + specifically the + [laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k) + variant. + tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + tokenizer_2 (`CLIPTokenizer`): + Second Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. + controlnet ([`ControlNetModel`] or `List[ControlNetModel]`): + Provides additional conditioning to the unet during the denoising process. If you set multiple ControlNets + as a list, the outputs from each ControlNet are added together to create one combined additional + conditioning. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + requires_aesthetics_score (`bool`, *optional*, defaults to `"False"`): + Whether the `unet` requires an `aesthetic_score` condition to be passed during inference. Also see the + config of `stabilityai/stable-diffusion-xl-refiner-1-0`. + force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`): + Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of + `stabilityai/stable-diffusion-xl-base-1-0`. + add_watermarker (`bool`, *optional*): + Whether to use the [invisible_watermark library](https://github.com/ShieldMnt/invisible-watermark/) to + watermark output images. If not defined, it will default to True if the package is installed, otherwise no + watermarker will be used. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. + """ + + model_cpu_offload_seq = "text_encoder->text_encoder_2->image_encoder->unet->vae" + _optional_components = [ + "tokenizer", + "tokenizer_2", + "text_encoder", + "text_encoder_2", + "feature_extractor", + "image_encoder", + ] + _callback_tensor_inputs = [ + "latents", + "prompt_embeds", + "negative_prompt_embeds", + "add_text_embeds", + "add_time_ids", + "negative_pooled_prompt_embeds", + "add_neg_time_ids", + ] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + text_encoder_2: CLIPTextModelWithProjection, + tokenizer: CLIPTokenizer, + tokenizer_2: CLIPTokenizer, + unet: UNet2DConditionModel, + controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel], + scheduler: KarrasDiffusionSchedulers, + requires_aesthetics_score: bool = False, + force_zeros_for_empty_prompt: bool = True, + add_watermarker: Optional[bool] = None, + feature_extractor: CLIPImageProcessor = None, + image_encoder: CLIPVisionModelWithProjection = None, + pag_applied_layers: Union[str, List[str]] = "mid", # ["mid"], ["down.block_1", "up.block_0.attentions_0"] + ): + super().__init__() + + if isinstance(controlnet, (list, tuple)): + controlnet = MultiControlNetModel(controlnet) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + text_encoder_2=text_encoder_2, + tokenizer=tokenizer, + tokenizer_2=tokenizer_2, + unet=unet, + controlnet=controlnet, + scheduler=scheduler, + feature_extractor=feature_extractor, + image_encoder=image_encoder, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True) + self.control_image_processor = VaeImageProcessor( + vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False + ) + add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available() + + if add_watermarker: + self.watermark = StableDiffusionXLWatermarker() + else: + self.watermark = None + + self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) + self.register_to_config(requires_aesthetics_score=requires_aesthetics_score) + + self.set_pag_applied_layers(pag_applied_layers) + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.encode_prompt + def encode_prompt( + self, + prompt: str, + prompt_2: Optional[str] = None, + device: Optional[torch.device] = None, + num_images_per_prompt: int = 1, + do_classifier_free_guidance: bool = True, + negative_prompt: Optional[str] = None, + negative_prompt_2: Optional[str] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + pooled_prompt_embeds: Optional[torch.Tensor] = None, + negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in both text-encoders + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + lora_scale (`float`, *optional*): + A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + device = device or self._execution_device + + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if self.text_encoder is not None: + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale) + else: + scale_lora_layers(self.text_encoder_2, lora_scale) + + prompt = [prompt] if isinstance(prompt, str) else prompt + + if prompt is not None: + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + # Define tokenizers and text encoders + tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] + text_encoders = ( + [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] + ) + + if prompt_embeds is None: + prompt_2 = prompt_2 or prompt + prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 + + # textual inversion: process multi-vector tokens if necessary + prompt_embeds_list = [] + prompts = [prompt, prompt_2] + for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders): + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, tokenizer) + + text_inputs = tokenizer( + prompt, + padding="max_length", + max_length=tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + + text_input_ids = text_inputs.input_ids + untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {tokenizer.model_max_length} tokens: {removed_text}" + ) + + prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) + + # We are only ALWAYS interested in the pooled output of the final text encoder + pooled_prompt_embeds = prompt_embeds[0] + if clip_skip is None: + prompt_embeds = prompt_embeds.hidden_states[-2] + else: + # "2" because SDXL always indexes from the penultimate layer. + prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)] + + prompt_embeds_list.append(prompt_embeds) + + prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) + + # get unconditional embeddings for classifier free guidance + zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt + if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: + negative_prompt_embeds = torch.zeros_like(prompt_embeds) + negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) + elif do_classifier_free_guidance and negative_prompt_embeds is None: + negative_prompt = negative_prompt or "" + negative_prompt_2 = negative_prompt_2 or negative_prompt + + # normalize str to list + negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt + negative_prompt_2 = ( + batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 + ) + + uncond_tokens: List[str] + if prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = [negative_prompt, negative_prompt_2] + + negative_prompt_embeds_list = [] + for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders): + if isinstance(self, TextualInversionLoaderMixin): + negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = tokenizer( + negative_prompt, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + negative_prompt_embeds = text_encoder( + uncond_input.input_ids.to(device), + output_hidden_states=True, + ) + # We are only ALWAYS interested in the pooled output of the final text encoder + negative_pooled_prompt_embeds = negative_prompt_embeds[0] + negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] + + negative_prompt_embeds_list.append(negative_prompt_embeds) + + negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) + + if self.text_encoder_2 is not None: + prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) + else: + prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + if self.text_encoder_2 is not None: + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) + else: + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed * num_images_per_prompt, -1 + ) + if do_classifier_free_guidance: + negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed * num_images_per_prompt, -1 + ) + + if self.text_encoder is not None: + if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder_2, lora_scale) + + return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image + def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + if output_hidden_states: + image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] + image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_enc_hidden_states = self.image_encoder( + torch.zeros_like(image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( + num_images_per_prompt, dim=0 + ) + return image_enc_hidden_states, uncond_image_enc_hidden_states + else: + image_embeds = self.image_encoder(image).image_embeds + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_embeds = torch.zeros_like(image_embeds) + + return image_embeds, uncond_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds + def prepare_ip_adapter_image_embeds( + self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance + ): + image_embeds = [] + if do_classifier_free_guidance: + negative_image_embeds = [] + if ip_adapter_image_embeds is None: + if not isinstance(ip_adapter_image, list): + ip_adapter_image = [ip_adapter_image] + + if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): + raise ValueError( + f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." + ) + + for single_ip_adapter_image, image_proj_layer in zip( + ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers + ): + output_hidden_state = not isinstance(image_proj_layer, ImageProjection) + single_image_embeds, single_negative_image_embeds = self.encode_image( + single_ip_adapter_image, device, 1, output_hidden_state + ) + + image_embeds.append(single_image_embeds[None, :]) + if do_classifier_free_guidance: + negative_image_embeds.append(single_negative_image_embeds[None, :]) + else: + for single_image_embeds in ip_adapter_image_embeds: + if do_classifier_free_guidance: + single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2) + negative_image_embeds.append(single_negative_image_embeds) + image_embeds.append(single_image_embeds) + + ip_adapter_image_embeds = [] + for i, single_image_embeds in enumerate(image_embeds): + single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) + if do_classifier_free_guidance: + single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) + + single_image_embeds = single_image_embeds.to(device=device) + ip_adapter_image_embeds.append(single_image_embeds) + + return ip_adapter_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + # Copied from diffusers.pipelines.controlnet.pipeline_controlnet_sd_xl_img2img.StableDiffusionXLControlNetImg2ImgPipeline.check_inputs + def check_inputs( + self, + prompt, + prompt_2, + image, + strength, + num_inference_steps, + callback_steps, + negative_prompt=None, + negative_prompt_2=None, + prompt_embeds=None, + negative_prompt_embeds=None, + pooled_prompt_embeds=None, + negative_pooled_prompt_embeds=None, + ip_adapter_image=None, + ip_adapter_image_embeds=None, + controlnet_conditioning_scale=1.0, + control_guidance_start=0.0, + control_guidance_end=1.0, + callback_on_step_end_tensor_inputs=None, + ): + if strength < 0 or strength > 1: + raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}") + if num_inference_steps is None: + raise ValueError("`num_inference_steps` cannot be None.") + elif not isinstance(num_inference_steps, int) or num_inference_steps <= 0: + raise ValueError( + f"`num_inference_steps` has to be a positive integer but is {num_inference_steps} of type" + f" {type(num_inference_steps)}." + ) + + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt_2 is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)): + raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + elif negative_prompt_2 is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if prompt_embeds is not None and pooled_prompt_embeds is None: + raise ValueError( + "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`." + ) + + if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: + raise ValueError( + "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`." + ) + + # `prompt` needs more sophisticated handling when there are multiple + # conditionings. + if isinstance(self.controlnet, MultiControlNetModel): + if isinstance(prompt, list): + logger.warning( + f"You have {len(self.controlnet.nets)} ControlNets and you have passed {len(prompt)}" + " prompts. The conditionings will be fixed across the prompts." + ) + + # Check `image` + is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance( + self.controlnet, torch._dynamo.eval_frame.OptimizedModule + ) + if ( + isinstance(self.controlnet, ControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, ControlNetModel) + ): + self.check_image(image, prompt, prompt_embeds) + elif ( + isinstance(self.controlnet, MultiControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, MultiControlNetModel) + ): + if not isinstance(image, list): + raise TypeError("For multiple controlnets: `image` must be type `list`") + + # When `image` is a nested list: + # (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]]) + elif any(isinstance(i, list) for i in image): + raise ValueError("A single batch of multiple conditionings are supported at the moment.") + elif len(image) != len(self.controlnet.nets): + raise ValueError( + f"For multiple controlnets: `image` must have the same length as the number of controlnets, but got {len(image)} images and {len(self.controlnet.nets)} ControlNets." + ) + + for image_ in image: + self.check_image(image_, prompt, prompt_embeds) + else: + assert False + + # Check `controlnet_conditioning_scale` + if ( + isinstance(self.controlnet, ControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, ControlNetModel) + ): + if not isinstance(controlnet_conditioning_scale, float): + raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.") + elif ( + isinstance(self.controlnet, MultiControlNetModel) + or is_compiled + and isinstance(self.controlnet._orig_mod, MultiControlNetModel) + ): + if isinstance(controlnet_conditioning_scale, list): + if any(isinstance(i, list) for i in controlnet_conditioning_scale): + raise ValueError("A single batch of multiple conditionings are supported at the moment.") + elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len( + self.controlnet.nets + ): + raise ValueError( + "For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have" + " the same length as the number of controlnets" + ) + else: + assert False + + if not isinstance(control_guidance_start, (tuple, list)): + control_guidance_start = [control_guidance_start] + + if not isinstance(control_guidance_end, (tuple, list)): + control_guidance_end = [control_guidance_end] + + if len(control_guidance_start) != len(control_guidance_end): + raise ValueError( + f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list." + ) + + if isinstance(self.controlnet, MultiControlNetModel): + if len(control_guidance_start) != len(self.controlnet.nets): + raise ValueError( + f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}." + ) + + for start, end in zip(control_guidance_start, control_guidance_end): + if start >= end: + raise ValueError( + f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}." + ) + if start < 0.0: + raise ValueError(f"control guidance start: {start} can't be smaller than 0.") + if end > 1.0: + raise ValueError(f"control guidance end: {end} can't be larger than 1.0.") + + if ip_adapter_image is not None and ip_adapter_image_embeds is not None: + raise ValueError( + "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined." + ) + + if ip_adapter_image_embeds is not None: + if not isinstance(ip_adapter_image_embeds, list): + raise ValueError( + f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}" + ) + elif ip_adapter_image_embeds[0].ndim not in [3, 4]: + raise ValueError( + f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D" + ) + + # Copied from diffusers.pipelines.controlnet.pipeline_controlnet_sd_xl.StableDiffusionXLControlNetPipeline.check_image + def check_image(self, image, prompt, prompt_embeds): + image_is_pil = isinstance(image, PIL.Image.Image) + image_is_tensor = isinstance(image, torch.Tensor) + image_is_np = isinstance(image, np.ndarray) + image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image) + image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor) + image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray) + + if ( + not image_is_pil + and not image_is_tensor + and not image_is_np + and not image_is_pil_list + and not image_is_tensor_list + and not image_is_np_list + ): + raise TypeError( + f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}" + ) + + if image_is_pil: + image_batch_size = 1 + else: + image_batch_size = len(image) + + if prompt is not None and isinstance(prompt, str): + prompt_batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + prompt_batch_size = len(prompt) + elif prompt_embeds is not None: + prompt_batch_size = prompt_embeds.shape[0] + + if image_batch_size != 1 and image_batch_size != prompt_batch_size: + raise ValueError( + f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}" + ) + + # Copied from diffusers.pipelines.controlnet.pipeline_controlnet_sd_xl.StableDiffusionXLControlNetPipeline.prepare_image + def prepare_control_image( + self, + image, + width, + height, + batch_size, + num_images_per_prompt, + device, + dtype, + do_classifier_free_guidance=False, + guess_mode=False, + ): + image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32) + image_batch_size = image.shape[0] + + if image_batch_size == 1: + repeat_by = batch_size + else: + # image batch size is the same as prompt batch size + repeat_by = num_images_per_prompt + + image = image.repeat_interleave(repeat_by, dim=0) + + image = image.to(device=device, dtype=dtype) + + if do_classifier_free_guidance and not guess_mode: + image = torch.cat([image] * 2) + + return image + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps + def get_timesteps(self, num_inference_steps, strength, device): + # get the original timestep using init_timestep + init_timestep = min(int(num_inference_steps * strength), num_inference_steps) + + t_start = max(num_inference_steps - init_timestep, 0) + timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] + if hasattr(self.scheduler, "set_begin_index"): + self.scheduler.set_begin_index(t_start * self.scheduler.order) + + return timesteps, num_inference_steps - t_start + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_img2img.StableDiffusionXLImg2ImgPipeline.prepare_latents + def prepare_latents( + self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None, add_noise=True + ): + if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): + raise ValueError( + f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" + ) + + latents_mean = latents_std = None + if hasattr(self.vae.config, "latents_mean") and self.vae.config.latents_mean is not None: + latents_mean = torch.tensor(self.vae.config.latents_mean).view(1, 4, 1, 1) + if hasattr(self.vae.config, "latents_std") and self.vae.config.latents_std is not None: + latents_std = torch.tensor(self.vae.config.latents_std).view(1, 4, 1, 1) + + # Offload text encoder if `enable_model_cpu_offload` was enabled + if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: + self.text_encoder_2.to("cpu") + torch.cuda.empty_cache() + + image = image.to(device=device, dtype=dtype) + + batch_size = batch_size * num_images_per_prompt + + if image.shape[1] == 4: + init_latents = image + + else: + # make sure the VAE is in float32 mode, as it overflows in float16 + if self.vae.config.force_upcast: + image = image.float() + self.vae.to(dtype=torch.float32) + + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + elif isinstance(generator, list): + if image.shape[0] < batch_size and batch_size % image.shape[0] == 0: + image = torch.cat([image] * (batch_size // image.shape[0]), dim=0) + elif image.shape[0] < batch_size and batch_size % image.shape[0] != 0: + raise ValueError( + f"Cannot duplicate `image` of batch size {image.shape[0]} to effective batch_size {batch_size} " + ) + + init_latents = [ + retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i]) + for i in range(batch_size) + ] + init_latents = torch.cat(init_latents, dim=0) + else: + init_latents = retrieve_latents(self.vae.encode(image), generator=generator) + + if self.vae.config.force_upcast: + self.vae.to(dtype) + + init_latents = init_latents.to(dtype) + if latents_mean is not None and latents_std is not None: + latents_mean = latents_mean.to(device=device, dtype=dtype) + latents_std = latents_std.to(device=device, dtype=dtype) + init_latents = (init_latents - latents_mean) * self.vae.config.scaling_factor / latents_std + else: + init_latents = self.vae.config.scaling_factor * init_latents + + if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0: + # expand init_latents for batch_size + additional_image_per_prompt = batch_size // init_latents.shape[0] + init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0) + elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0: + raise ValueError( + f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts." + ) + else: + init_latents = torch.cat([init_latents], dim=0) + + if add_noise: + shape = init_latents.shape + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + # get latents + init_latents = self.scheduler.add_noise(init_latents, noise, timestep) + + latents = init_latents + + return latents + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_img2img.StableDiffusionXLImg2ImgPipeline._get_add_time_ids + def _get_add_time_ids( + self, + original_size, + crops_coords_top_left, + target_size, + aesthetic_score, + negative_aesthetic_score, + negative_original_size, + negative_crops_coords_top_left, + negative_target_size, + dtype, + text_encoder_projection_dim=None, + ): + if self.config.requires_aesthetics_score: + add_time_ids = list(original_size + crops_coords_top_left + (aesthetic_score,)) + add_neg_time_ids = list( + negative_original_size + negative_crops_coords_top_left + (negative_aesthetic_score,) + ) + else: + add_time_ids = list(original_size + crops_coords_top_left + target_size) + add_neg_time_ids = list(negative_original_size + crops_coords_top_left + negative_target_size) + + passed_add_embed_dim = ( + self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim + ) + expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features + + if ( + expected_add_embed_dim > passed_add_embed_dim + and (expected_add_embed_dim - passed_add_embed_dim) == self.unet.config.addition_time_embed_dim + ): + raise ValueError( + f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to enable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=True)` to make sure `aesthetic_score` {aesthetic_score} and `negative_aesthetic_score` {negative_aesthetic_score} is correctly used by the model." + ) + elif ( + expected_add_embed_dim < passed_add_embed_dim + and (passed_add_embed_dim - expected_add_embed_dim) == self.unet.config.addition_time_embed_dim + ): + raise ValueError( + f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to disable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=False)` to make sure `target_size` {target_size} is correctly used by the model." + ) + elif expected_add_embed_dim != passed_add_embed_dim: + raise ValueError( + f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`." + ) + + add_time_ids = torch.tensor([add_time_ids], dtype=dtype) + add_neg_time_ids = torch.tensor([add_neg_time_ids], dtype=dtype) + + return add_time_ids, add_neg_time_ids + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae + def upcast_vae(self): + dtype = self.vae.dtype + self.vae.to(dtype=torch.float32) + use_torch_2_0_or_xformers = isinstance( + self.vae.decoder.mid_block.attentions[0].processor, + ( + AttnProcessor2_0, + XFormersAttnProcessor, + ), + ) + # if xformers or torch_2_0 is used attention block does not need + # to be in float32 which can save lots of memory + if use_torch_2_0_or_xformers: + self.vae.post_quant_conv.to(dtype) + self.vae.decoder.conv_in.to(dtype) + self.vae.decoder.mid_block.to(dtype) + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def clip_skip(self): + return self._clip_skip + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 + + @property + def cross_attention_kwargs(self): + return self._cross_attention_kwargs + + @property + def num_timesteps(self): + return self._num_timesteps + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + prompt_2: Optional[Union[str, List[str]]] = None, + image: PipelineImageInput = None, + control_image: PipelineImageInput = None, + height: Optional[int] = None, + width: Optional[int] = None, + strength: float = 0.8, + num_inference_steps: int = 50, + guidance_scale: float = 5.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + negative_prompt_2: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + pooled_prompt_embeds: Optional[torch.Tensor] = None, + negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + controlnet_conditioning_scale: Union[float, List[float]] = 0.8, + guess_mode: bool = False, + control_guidance_start: Union[float, List[float]] = 0.0, + control_guidance_end: Union[float, List[float]] = 1.0, + original_size: Tuple[int, int] = None, + crops_coords_top_left: Tuple[int, int] = (0, 0), + target_size: Tuple[int, int] = None, + negative_original_size: Optional[Tuple[int, int]] = None, + negative_crops_coords_top_left: Tuple[int, int] = (0, 0), + negative_target_size: Optional[Tuple[int, int]] = None, + aesthetic_score: float = 6.0, + negative_aesthetic_score: float = 2.5, + clip_skip: Optional[int] = None, + callback_on_step_end: Optional[ + Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks] + ] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + pag_scale: float = 3.0, + pag_adaptive_scale: float = 0.0, + ): + r""" + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in both text-encoders + image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,: + `List[List[torch.Tensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`): + The initial image will be used as the starting point for the image generation process. Can also accept + image latents as `image`, if passing latents directly, it will not be encoded again. + control_image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,: + `List[List[torch.Tensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`): + The ControlNet input condition. ControlNet uses this input condition to generate guidance to Unet. If + the type is specified as `torch.Tensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also + be accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If height + and/or width are passed, `image` is resized according to them. If multiple ControlNets are specified in + init, images must be passed as a list such that each element of the list can be correctly batched for + input to a single controlnet. + height (`int`, *optional*, defaults to the size of control_image): + The height in pixels of the generated image. Anything below 512 pixels won't work well for + [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) + and checkpoints that are not specifically fine-tuned on low resolutions. + width (`int`, *optional*, defaults to the size of control_image): + The width in pixels of the generated image. Anything below 512 pixels won't work well for + [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) + and checkpoints that are not specifically fine-tuned on low resolutions. + strength (`float`, *optional*, defaults to 0.8): + Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a + starting point and more noise is added the higher the `strength`. The number of denoising steps depends + on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising + process runs for the full number of iterations specified in `num_inference_steps`. A value of 1 + essentially ignores `image`. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to + [`schedulers.DDIMScheduler`], will be ignored for others. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. + ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*): + Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of + IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should + contain the negative image embedding if `do_classifier_free_guidance` is set to `True`. If not + provided, embeddings are computed from the `ip_adapter_image` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0): + The outputs of the controlnet are multiplied by `controlnet_conditioning_scale` before they are added + to the residual in the original unet. If multiple ControlNets are specified in init, you can set the + corresponding scale as a list. + guess_mode (`bool`, *optional*, defaults to `False`): + In this mode, the ControlNet encoder will try best to recognize the content of the input image even if + you remove all prompts. The `guidance_scale` between 3.0 and 5.0 is recommended. + control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0): + The percentage of total steps at which the controlnet starts applying. + control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0): + The percentage of total steps at which the controlnet stops applying. + original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled. + `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as + explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position + `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting + `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + For most cases, `target_size` should be set to the desired height and width of the generated image. If + not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in + section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + To negatively condition the generation process based on a specific image resolution. Part of SDXL's + micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's + micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + To negatively condition the generation process based on a target image resolution. It should be as same + as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + aesthetic_score (`float`, *optional*, defaults to 6.0): + Used to simulate an aesthetic score of the generated image by influencing the positive text condition. + Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + negative_aesthetic_score (`float`, *optional*, defaults to 2.5): + Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). Can be used to + simulate an aesthetic score of the generated image by influencing the negative text condition. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*): + A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of + each denoising step during the inference. with the following arguments: `callback_on_step_end(self: + DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a + list of all tensors as specified by `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + pag_scale (`float`, *optional*, defaults to 3.0): + The scale factor for the perturbed attention guidance. If it is set to 0.0, the perturbed attention + guidance will not be used. + pag_adaptive_scale (`float`, *optional*, defaults to 0.0): + The adaptive scale factor for the perturbed attention guidance. If it is set to 0.0, `pag_scale` is + used. + + + Examples: + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionXLPipelineOutput`] or `tuple`: + [`~pipelines.stable_diffusion.StableDiffusionXLPipelineOutput`] if `return_dict` is True, otherwise a + `tuple` containing the output images. + """ + + if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): + callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs + + controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet + + # align format for control guidance + if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list): + control_guidance_start = len(control_guidance_end) * [control_guidance_start] + elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list): + control_guidance_end = len(control_guidance_start) * [control_guidance_end] + elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list): + mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1 + control_guidance_start, control_guidance_end = ( + mult * [control_guidance_start], + mult * [control_guidance_end], + ) + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + prompt_2, + control_image, + strength, + num_inference_steps, + None, + negative_prompt, + negative_prompt_2, + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + ip_adapter_image, + ip_adapter_image_embeds, + controlnet_conditioning_scale, + control_guidance_start, + control_guidance_end, + callback_on_step_end_tensor_inputs, + ) + + self._guidance_scale = guidance_scale + self._clip_skip = clip_skip + self._cross_attention_kwargs = cross_attention_kwargs + self._pag_scale = pag_scale + self._pag_adaptive_scale = pag_adaptive_scale + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float): + controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets) + + # 3.1 Encode input prompt + text_encoder_lora_scale = ( + self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None + ) + ( + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + ) = self.encode_prompt( + prompt, + prompt_2, + device, + num_images_per_prompt, + self.do_classifier_free_guidance, + negative_prompt, + negative_prompt_2, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, + lora_scale=text_encoder_lora_scale, + clip_skip=self.clip_skip, + ) + + # 3.2 Encode ip_adapter_image + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + ip_adapter_image_embeds = self.prepare_ip_adapter_image_embeds( + ip_adapter_image, + ip_adapter_image_embeds, + device, + batch_size * num_images_per_prompt, + self.do_classifier_free_guidance, + ) + + # 4. Prepare image and controlnet_conditioning_image + image = self.image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32) + + if isinstance(controlnet, ControlNetModel): + control_image = self.prepare_control_image( + image=control_image, + width=width, + height=height, + batch_size=batch_size * num_images_per_prompt, + num_images_per_prompt=num_images_per_prompt, + device=device, + dtype=controlnet.dtype, + do_classifier_free_guidance=self.do_classifier_free_guidance, + guess_mode=False, + ) + height, width = control_image.shape[-2:] + elif isinstance(controlnet, MultiControlNetModel): + control_images = [] + + for control_image_ in control_image: + control_image_ = self.prepare_control_image( + image=control_image_, + width=width, + height=height, + batch_size=batch_size * num_images_per_prompt, + num_images_per_prompt=num_images_per_prompt, + device=device, + dtype=controlnet.dtype, + do_classifier_free_guidance=self.do_classifier_free_guidance, + guess_mode=False, + ) + + control_images.append(control_image_) + + control_image = control_images + height, width = control_image[0].shape[-2:] + else: + assert False + + # 5. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device) + latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) + self._num_timesteps = len(timesteps) + + # 6. Prepare latent variables + if latents is None: + latents = self.prepare_latents( + image, + latent_timestep, + batch_size, + num_images_per_prompt, + prompt_embeds.dtype, + device, + generator, + True, + ) + + # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7.1 Create tensor stating which controlnets to keep + controlnet_keep = [] + for i in range(len(timesteps)): + keeps = [ + 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e) + for s, e in zip(control_guidance_start, control_guidance_end) + ] + controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps) + + # 7.2 Prepare added time ids & embeddings + if isinstance(control_image, list): + original_size = original_size or control_image[0].shape[-2:] + else: + original_size = original_size or control_image.shape[-2:] + target_size = target_size or (height, width) + + if negative_original_size is None: + negative_original_size = original_size + if negative_target_size is None: + negative_target_size = target_size + add_text_embeds = pooled_prompt_embeds + + if self.text_encoder_2 is None: + text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) + else: + text_encoder_projection_dim = self.text_encoder_2.config.projection_dim + + add_time_ids, add_neg_time_ids = self._get_add_time_ids( + original_size, + crops_coords_top_left, + target_size, + aesthetic_score, + negative_aesthetic_score, + negative_original_size, + negative_crops_coords_top_left, + negative_target_size, + dtype=prompt_embeds.dtype, + text_encoder_projection_dim=text_encoder_projection_dim, + ) + add_time_ids = add_time_ids.repeat(batch_size * num_images_per_prompt, 1) + add_neg_time_ids = add_neg_time_ids.repeat(batch_size * num_images_per_prompt, 1) + + control_images = control_image if isinstance(control_image, list) else [control_image] + for i, single_image in enumerate(control_images): + if self.do_classifier_free_guidance: + single_image = single_image.chunk(2)[0] + + if self.do_perturbed_attention_guidance: + single_image = self._prepare_perturbed_attention_guidance( + single_image, single_image, self.do_classifier_free_guidance + ) + elif self.do_classifier_free_guidance: + single_image = torch.cat([single_image] * 2) + single_image = single_image.to(device) + control_images[i] = single_image + + control_image = control_images if isinstance(control_image, list) else control_images[0] + + if ip_adapter_image_embeds is not None: + for i, image_embeds in enumerate(ip_adapter_image_embeds): + negative_image_embeds = None + if self.do_classifier_free_guidance: + negative_image_embeds, image_embeds = image_embeds.chunk(2) + + if self.do_perturbed_attention_guidance: + image_embeds = self._prepare_perturbed_attention_guidance( + image_embeds, negative_image_embeds, self.do_classifier_free_guidance + ) + elif self.do_classifier_free_guidance: + image_embeds = torch.cat([negative_image_embeds, image_embeds], dim=0) + image_embeds = image_embeds.to(device) + ip_adapter_image_embeds[i] = image_embeds + + if self.do_perturbed_attention_guidance: + prompt_embeds = self._prepare_perturbed_attention_guidance( + prompt_embeds, negative_prompt_embeds, self.do_classifier_free_guidance + ) + add_text_embeds = self._prepare_perturbed_attention_guidance( + add_text_embeds, negative_pooled_prompt_embeds, self.do_classifier_free_guidance + ) + add_time_ids = self._prepare_perturbed_attention_guidance( + add_time_ids, add_neg_time_ids, self.do_classifier_free_guidance + ) + elif self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) + add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) + add_time_ids = torch.cat([add_neg_time_ids, add_time_ids], dim=0) + + prompt_embeds = prompt_embeds.to(device) + add_text_embeds = add_text_embeds.to(device) + add_time_ids = add_time_ids.to(device) + added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} + + controlnet_prompt_embeds = prompt_embeds + controlnet_added_cond_kwargs = added_cond_kwargs + + # 8. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + + if self.do_perturbed_attention_guidance: + original_attn_proc = self.unet.attn_processors + self._set_pag_attn_processor( + pag_applied_layers=self.pag_applied_layers, + do_classifier_free_guidance=self.do_classifier_free_guidance, + ) + + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * (prompt_embeds.shape[0] // latents.shape[0])) + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # controlnet(s) inference + control_model_input = latent_model_input + + if isinstance(controlnet_keep[i], list): + cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])] + else: + controlnet_cond_scale = controlnet_conditioning_scale + if isinstance(controlnet_cond_scale, list): + controlnet_cond_scale = controlnet_cond_scale[0] + cond_scale = controlnet_cond_scale * controlnet_keep[i] + down_block_res_samples, mid_block_res_sample = self.controlnet( + control_model_input, + t, + encoder_hidden_states=controlnet_prompt_embeds, + controlnet_cond=control_image, + conditioning_scale=cond_scale, + guess_mode=False, + added_cond_kwargs=controlnet_added_cond_kwargs, + return_dict=False, + ) + + if ip_adapter_image_embeds is not None: + added_cond_kwargs["image_embeds"] = ip_adapter_image_embeds + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs=self.cross_attention_kwargs, + down_block_additional_residuals=down_block_res_samples, + mid_block_additional_residual=mid_block_res_sample, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + # perform guidance + if self.do_perturbed_attention_guidance: + noise_pred = self._apply_perturbed_attention_guidance( + noise_pred, self.do_classifier_free_guidance, self.guidance_scale, t + ) + elif self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + add_text_embeds = callback_outputs.pop("add_text_embeds", add_text_embeds) + negative_pooled_prompt_embeds = callback_outputs.pop( + "negative_pooled_prompt_embeds", negative_pooled_prompt_embeds + ) + add_time_ids = callback_outputs.pop("add_time_ids", add_time_ids) + add_neg_time_ids = callback_outputs.pop("add_neg_time_ids", add_neg_time_ids) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + + # If we do sequential model offloading, let's offload unet and controlnet + # manually for max memory savings + if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: + self.unet.to("cpu") + self.controlnet.to("cpu") + torch.cuda.empty_cache() + + if not output_type == "latent": + # make sure the VAE is in float32 mode, as it overflows in float16 + needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast + + if needs_upcasting: + self.upcast_vae() + latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) + + # unscale/denormalize the latents + # denormalize with the mean and std if available and not None + has_latents_mean = hasattr(self.vae.config, "latents_mean") and self.vae.config.latents_mean is not None + has_latents_std = hasattr(self.vae.config, "latents_std") and self.vae.config.latents_std is not None + if has_latents_mean and has_latents_std: + latents_mean = ( + torch.tensor(self.vae.config.latents_mean).view(1, 4, 1, 1).to(latents.device, latents.dtype) + ) + latents_std = ( + torch.tensor(self.vae.config.latents_std).view(1, 4, 1, 1).to(latents.device, latents.dtype) + ) + latents = latents * latents_std / self.vae.config.scaling_factor + latents_mean + else: + latents = latents / self.vae.config.scaling_factor + + image = self.vae.decode(latents, return_dict=False)[0] + + # cast back to fp16 if needed + if needs_upcasting: + self.vae.to(dtype=torch.float16) + else: + image = latents + return StableDiffusionXLPipelineOutput(images=image) + + # apply watermark if available + if self.watermark is not None: + image = self.watermark.apply_watermark(image) + + image = self.image_processor.postprocess(image, output_type=output_type) + + # Offload all models + self.maybe_free_model_hooks() + + if self.do_perturbed_attention_guidance: + self.unet.set_attn_processor(original_attn_proc) + + if not return_dict: + return (image,) + + return StableDiffusionXLPipelineOutput(images=image) diff --git a/diffusers3/pipelines/pag/pipeline_pag_hunyuandit.py b/diffusers3/pipelines/pag/pipeline_pag_hunyuandit.py new file mode 100644 index 0000000000000000000000000000000000000000..63126cc5aae9bdf446aaf695ba6db07dabb891f0 --- /dev/null +++ b/diffusers3/pipelines/pag/pipeline_pag_hunyuandit.py @@ -0,0 +1,953 @@ +# Copyright 2024 HunyuanDiT Authors and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Callable, Dict, List, Optional, Tuple, Union + +import numpy as np +import torch +from transformers import BertModel, BertTokenizer, CLIPImageProcessor, MT5Tokenizer, T5EncoderModel + +from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput + +from ...callbacks import MultiPipelineCallbacks, PipelineCallback +from ...image_processor import VaeImageProcessor +from ...models import AutoencoderKL, HunyuanDiT2DModel +from ...models.attention_processor import PAGCFGHunyuanAttnProcessor2_0, PAGHunyuanAttnProcessor2_0 +from ...models.embeddings import get_2d_rotary_pos_embed +from ...pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker +from ...schedulers import DDPMScheduler +from ...utils import ( + is_torch_xla_available, + logging, + replace_example_docstring, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline +from .pag_utils import PAGMixin + + +if is_torch_xla_available(): + import torch_xla.core.xla_model as xm + + XLA_AVAILABLE = True +else: + XLA_AVAILABLE = False + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```python + >>> import torch + >>> from diffusers import AutoPipelineForText2Image + + >>> pipe = AutoPipelineForText2Image.from_pretrained( + ... "Tencent-Hunyuan/HunyuanDiT-v1.2-Diffusers", + ... torch_dtype=torch.float16, + ... enable_pag=True, + ... pag_applied_layers=[14], + ... ).to("cuda") + + >>> # prompt = "an astronaut riding a horse" + >>> prompt = "ไธ€ไธชๅฎ‡่ˆชๅ‘˜ๅœจ้ช‘้ฉฌ" + >>> image = pipe(prompt, guidance_scale=4, pag_scale=3).images[0] + ``` +""" + +STANDARD_RATIO = np.array( + [ + 1.0, # 1:1 + 4.0 / 3.0, # 4:3 + 3.0 / 4.0, # 3:4 + 16.0 / 9.0, # 16:9 + 9.0 / 16.0, # 9:16 + ] +) +STANDARD_SHAPE = [ + [(1024, 1024), (1280, 1280)], # 1:1 + [(1024, 768), (1152, 864), (1280, 960)], # 4:3 + [(768, 1024), (864, 1152), (960, 1280)], # 3:4 + [(1280, 768)], # 16:9 + [(768, 1280)], # 9:16 +] +STANDARD_AREA = [np.array([w * h for w, h in shapes]) for shapes in STANDARD_SHAPE] +SUPPORTED_SHAPE = [ + (1024, 1024), + (1280, 1280), # 1:1 + (1024, 768), + (1152, 864), + (1280, 960), # 4:3 + (768, 1024), + (864, 1152), + (960, 1280), # 3:4 + (1280, 768), # 16:9 + (768, 1280), # 9:16 +] + + +def map_to_standard_shapes(target_width, target_height): + target_ratio = target_width / target_height + closest_ratio_idx = np.argmin(np.abs(STANDARD_RATIO - target_ratio)) + closest_area_idx = np.argmin(np.abs(STANDARD_AREA[closest_ratio_idx] - target_width * target_height)) + width, height = STANDARD_SHAPE[closest_ratio_idx][closest_area_idx] + return width, height + + +def get_resize_crop_region_for_grid(src, tgt_size): + th = tw = tgt_size + h, w = src + + r = h / w + + # resize + if r > 1: + resize_height = th + resize_width = int(round(th / h * w)) + else: + resize_width = tw + resize_height = int(round(tw / w * h)) + + crop_top = int(round((th - resize_height) / 2.0)) + crop_left = int(round((tw - resize_width) / 2.0)) + + return (crop_top, crop_left), (crop_top + resize_height, crop_left + resize_width) + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg +def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): + """ + Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4 + """ + std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) + std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) + # rescale the results from guidance (fixes overexposure) + noise_pred_rescaled = noise_cfg * (std_text / std_cfg) + # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images + noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg + return noise_cfg + + +class HunyuanDiTPAGPipeline(DiffusionPipeline, PAGMixin): + r""" + Pipeline for English/Chinese-to-image generation using HunyuanDiT and [Perturbed Attention + Guidance](https://huggingface.co/docs/diffusers/en/using-diffusers/pag). + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + HunyuanDiT uses two text encoders: [mT5](https://huggingface.co/google/mt5-base) and [bilingual CLIP](fine-tuned by + ourselves) + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. We use + `sdxl-vae-fp16-fix`. + text_encoder (Optional[`~transformers.BertModel`, `~transformers.CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + HunyuanDiT uses a fine-tuned [bilingual CLIP]. + tokenizer (Optional[`~transformers.BertTokenizer`, `~transformers.CLIPTokenizer`]): + A `BertTokenizer` or `CLIPTokenizer` to tokenize text. + transformer ([`HunyuanDiT2DModel`]): + The HunyuanDiT model designed by Tencent Hunyuan. + text_encoder_2 (`T5EncoderModel`): + The mT5 embedder. Specifically, it is 't5-v1_1-xxl'. + tokenizer_2 (`MT5Tokenizer`): + The tokenizer for the mT5 embedder. + scheduler ([`DDPMScheduler`]): + A scheduler to be used in combination with HunyuanDiT to denoise the encoded image latents. + """ + + model_cpu_offload_seq = "text_encoder->text_encoder_2->transformer->vae" + _optional_components = [ + "safety_checker", + "feature_extractor", + "text_encoder_2", + "tokenizer_2", + "text_encoder", + "tokenizer", + ] + _exclude_from_cpu_offload = ["safety_checker"] + _callback_tensor_inputs = [ + "latents", + "prompt_embeds", + "negative_prompt_embeds", + "prompt_embeds_2", + "negative_prompt_embeds_2", + ] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: BertModel, + tokenizer: BertTokenizer, + transformer: HunyuanDiT2DModel, + scheduler: DDPMScheduler, + safety_checker: Optional[StableDiffusionSafetyChecker] = None, + feature_extractor: Optional[CLIPImageProcessor] = None, + requires_safety_checker: bool = True, + text_encoder_2: Optional[T5EncoderModel] = None, + tokenizer_2: Optional[MT5Tokenizer] = None, + pag_applied_layers: Union[str, List[str]] = "blocks.1", # "blocks.16.attn1", "blocks.16", "16", 16 + ): + super().__init__() + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + tokenizer_2=tokenizer_2, + transformer=transformer, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + text_encoder_2=text_encoder_2, + ) + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + self.vae_scale_factor = ( + 2 ** (len(self.vae.config.block_out_channels) - 1) if hasattr(self, "vae") and self.vae is not None else 8 + ) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + self.register_to_config(requires_safety_checker=requires_safety_checker) + self.default_sample_size = ( + self.transformer.config.sample_size + if hasattr(self, "transformer") and self.transformer is not None + else 128 + ) + + self.set_pag_applied_layers( + pag_applied_layers, pag_attn_processors=(PAGCFGHunyuanAttnProcessor2_0(), PAGHunyuanAttnProcessor2_0()) + ) + + # Copied from diffusers.pipelines.hunyuandit.pipeline_hunyuandit.HunyuanDiTPipeline.encode_prompt + def encode_prompt( + self, + prompt: str, + device: torch.device = None, + dtype: torch.dtype = None, + num_images_per_prompt: int = 1, + do_classifier_free_guidance: bool = True, + negative_prompt: Optional[str] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + prompt_attention_mask: Optional[torch.Tensor] = None, + negative_prompt_attention_mask: Optional[torch.Tensor] = None, + max_sequence_length: Optional[int] = None, + text_encoder_index: int = 0, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + dtype (`torch.dtype`): + torch dtype + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + prompt_attention_mask (`torch.Tensor`, *optional*): + Attention mask for the prompt. Required when `prompt_embeds` is passed directly. + negative_prompt_attention_mask (`torch.Tensor`, *optional*): + Attention mask for the negative prompt. Required when `negative_prompt_embeds` is passed directly. + max_sequence_length (`int`, *optional*): maximum sequence length to use for the prompt. + text_encoder_index (`int`, *optional*): + Index of the text encoder to use. `0` for clip and `1` for T5. + """ + if dtype is None: + if self.text_encoder_2 is not None: + dtype = self.text_encoder_2.dtype + elif self.transformer is not None: + dtype = self.transformer.dtype + else: + dtype = None + + if device is None: + device = self._execution_device + + tokenizers = [self.tokenizer, self.tokenizer_2] + text_encoders = [self.text_encoder, self.text_encoder_2] + + tokenizer = tokenizers[text_encoder_index] + text_encoder = text_encoders[text_encoder_index] + + if max_sequence_length is None: + if text_encoder_index == 0: + max_length = 77 + if text_encoder_index == 1: + max_length = 256 + else: + max_length = max_sequence_length + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + text_inputs = tokenizer( + prompt, + padding="max_length", + max_length=max_length, + truncation=True, + return_attention_mask=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {tokenizer.model_max_length} tokens: {removed_text}" + ) + + prompt_attention_mask = text_inputs.attention_mask.to(device) + prompt_embeds = text_encoder( + text_input_ids.to(device), + attention_mask=prompt_attention_mask, + ) + prompt_embeds = prompt_embeds[0] + prompt_attention_mask = prompt_attention_mask.repeat(num_images_per_prompt, 1) + + prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + max_length = prompt_embeds.shape[1] + uncond_input = tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + negative_prompt_attention_mask = uncond_input.attention_mask.to(device) + negative_prompt_embeds = text_encoder( + uncond_input.input_ids.to(device), + attention_mask=negative_prompt_attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + negative_prompt_attention_mask = negative_prompt_attention_mask.repeat(num_images_per_prompt, 1) + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + return prompt_embeds, negative_prompt_embeds, prompt_attention_mask, negative_prompt_attention_mask + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is None: + has_nsfw_concept = None + else: + if torch.is_tensor(image): + feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") + else: + feature_extractor_input = self.image_processor.numpy_to_pil(image) + safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) + image, has_nsfw_concept = self.safety_checker( + images=image, clip_input=safety_checker_input.pixel_values.to(dtype) + ) + return image, has_nsfw_concept + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + # Copied from diffusers.pipelines.hunyuandit.pipeline_hunyuandit.HunyuanDiTPipeline.check_inputs + def check_inputs( + self, + prompt, + height, + width, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + prompt_attention_mask=None, + negative_prompt_attention_mask=None, + prompt_embeds_2=None, + negative_prompt_embeds_2=None, + prompt_attention_mask_2=None, + negative_prompt_attention_mask_2=None, + callback_on_step_end_tensor_inputs=None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is None and prompt_embeds_2 is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds_2`. Cannot leave both `prompt` and `prompt_embeds_2` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if prompt_embeds is not None and prompt_attention_mask is None: + raise ValueError("Must provide `prompt_attention_mask` when specifying `prompt_embeds`.") + + if prompt_embeds_2 is not None and prompt_attention_mask_2 is None: + raise ValueError("Must provide `prompt_attention_mask_2` when specifying `prompt_embeds_2`.") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if negative_prompt_embeds is not None and negative_prompt_attention_mask is None: + raise ValueError("Must provide `negative_prompt_attention_mask` when specifying `negative_prompt_embeds`.") + + if negative_prompt_embeds_2 is not None and negative_prompt_attention_mask_2 is None: + raise ValueError( + "Must provide `negative_prompt_attention_mask_2` when specifying `negative_prompt_embeds_2`." + ) + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + if prompt_embeds_2 is not None and negative_prompt_embeds_2 is not None: + if prompt_embeds_2.shape != negative_prompt_embeds_2.shape: + raise ValueError( + "`prompt_embeds_2` and `negative_prompt_embeds_2` must have the same shape when passed directly, but" + f" got: `prompt_embeds_2` {prompt_embeds_2.shape} != `negative_prompt_embeds_2`" + f" {negative_prompt_embeds_2.shape}." + ) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = ( + batch_size, + num_channels_latents, + int(height) // self.vae_scale_factor, + int(width) // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def guidance_rescale(self): + return self._guidance_rescale + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 + + @property + def num_timesteps(self): + return self._num_timesteps + + @property + def interrupt(self): + return self._interrupt + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: Optional[int] = 50, + guidance_scale: Optional[float] = 5.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: Optional[float] = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + prompt_embeds_2: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds_2: Optional[torch.Tensor] = None, + prompt_attention_mask: Optional[torch.Tensor] = None, + prompt_attention_mask_2: Optional[torch.Tensor] = None, + negative_prompt_attention_mask: Optional[torch.Tensor] = None, + negative_prompt_attention_mask_2: Optional[torch.Tensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback_on_step_end: Optional[ + Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks] + ] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + guidance_rescale: float = 0.0, + original_size: Optional[Tuple[int, int]] = (1024, 1024), + target_size: Optional[Tuple[int, int]] = None, + crops_coords_top_left: Tuple[int, int] = (0, 0), + use_resolution_binning: bool = True, + pag_scale: float = 3.0, + pag_adaptive_scale: float = 0.0, + ): + r""" + The call function to the pipeline for generation with HunyuanDiT. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + height (`int`): + The height in pixels of the generated image. + width (`int`): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. This parameter is modulated by `strength`. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + prompt_embeds_2 (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + negative_prompt_embeds_2 (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + prompt_attention_mask (`torch.Tensor`, *optional*): + Attention mask for the prompt. Required when `prompt_embeds` is passed directly. + prompt_attention_mask_2 (`torch.Tensor`, *optional*): + Attention mask for the prompt. Required when `prompt_embeds_2` is passed directly. + negative_prompt_attention_mask (`torch.Tensor`, *optional*): + Attention mask for the negative prompt. Required when `negative_prompt_embeds` is passed directly. + negative_prompt_attention_mask_2 (`torch.Tensor`, *optional*): + Attention mask for the negative prompt. Required when `negative_prompt_embeds_2` is passed directly. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback_on_step_end (`Callable[[int, int, Dict], None]`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*): + A callback function or a list of callback functions to be called at the end of each denoising step. + callback_on_step_end_tensor_inputs (`List[str]`, *optional*): + A list of tensor inputs that should be passed to the callback function. If not defined, all tensor + inputs will be passed. + guidance_rescale (`float`, *optional*, defaults to 0.0): + Rescale the noise_cfg according to `guidance_rescale`. Based on findings of [Common Diffusion Noise + Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4 + original_size (`Tuple[int, int]`, *optional*, defaults to `(1024, 1024)`): + The original size of the image. Used to calculate the time ids. + target_size (`Tuple[int, int]`, *optional*): + The target size of the image. Used to calculate the time ids. + crops_coords_top_left (`Tuple[int, int]`, *optional*, defaults to `(0, 0)`): + The top left coordinates of the crop. Used to calculate the time ids. + use_resolution_binning (`bool`, *optional*, defaults to `True`): + Whether to use resolution binning or not. If `True`, the input resolution will be mapped to the closest + standard resolution. Supported resolutions are 1024x1024, 1280x1280, 1024x768, 1152x864, 1280x960, + 768x1024, 864x1152, 960x1280, 1280x768, and 768x1280. It is recommended to set this to `True`. + pag_scale (`float`, *optional*, defaults to 3.0): + The scale factor for the perturbed attention guidance. If it is set to 0.0, the perturbed attention + guidance will not be used. + pag_adaptive_scale (`float`, *optional*, defaults to 0.0): + The adaptive scale factor for the perturbed attention guidance. If it is set to 0.0, `pag_scale` is + used. + + Examples: + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images and the + second element is a list of `bool`s indicating whether the corresponding generated image contains + "not-safe-for-work" (nsfw) content. + """ + + if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): + callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs + + # 0. Default height and width + height = height or self.default_sample_size * self.vae_scale_factor + width = width or self.default_sample_size * self.vae_scale_factor + height = int((height // 16) * 16) + width = int((width // 16) * 16) + + if use_resolution_binning and (height, width) not in SUPPORTED_SHAPE: + width, height = map_to_standard_shapes(width, height) + height = int(height) + width = int(width) + logger.warning(f"Reshaped to (height, width)=({height}, {width}), Supported shapes are {SUPPORTED_SHAPE}") + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + height, + width, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + prompt_attention_mask, + negative_prompt_attention_mask, + prompt_embeds_2, + negative_prompt_embeds_2, + prompt_attention_mask_2, + negative_prompt_attention_mask_2, + callback_on_step_end_tensor_inputs, + ) + self._guidance_scale = guidance_scale + self._guidance_rescale = guidance_rescale + self._interrupt = False + self._pag_scale = pag_scale + self._pag_adaptive_scale = pag_adaptive_scale + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + # 3. Encode input prompt + ( + prompt_embeds, + negative_prompt_embeds, + prompt_attention_mask, + negative_prompt_attention_mask, + ) = self.encode_prompt( + prompt=prompt, + device=device, + dtype=self.transformer.dtype, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=self.do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + prompt_attention_mask=prompt_attention_mask, + negative_prompt_attention_mask=negative_prompt_attention_mask, + max_sequence_length=77, + text_encoder_index=0, + ) + ( + prompt_embeds_2, + negative_prompt_embeds_2, + prompt_attention_mask_2, + negative_prompt_attention_mask_2, + ) = self.encode_prompt( + prompt=prompt, + device=device, + dtype=self.transformer.dtype, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=self.do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds_2, + negative_prompt_embeds=negative_prompt_embeds_2, + prompt_attention_mask=prompt_attention_mask_2, + negative_prompt_attention_mask=negative_prompt_attention_mask_2, + max_sequence_length=256, + text_encoder_index=1, + ) + + # 4. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # 5. Prepare latent variables + num_channels_latents = self.transformer.config.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7. Create image_rotary_emb, style embedding & time ids + grid_height = height // 8 // self.transformer.config.patch_size + grid_width = width // 8 // self.transformer.config.patch_size + base_size = 512 // 8 // self.transformer.config.patch_size + grid_crops_coords = get_resize_crop_region_for_grid((grid_height, grid_width), base_size) + image_rotary_emb = get_2d_rotary_pos_embed( + self.transformer.inner_dim // self.transformer.num_heads, grid_crops_coords, (grid_height, grid_width) + ) + + style = torch.tensor([0], device=device) + + target_size = target_size or (height, width) + add_time_ids = list(original_size + target_size + crops_coords_top_left) + add_time_ids = torch.tensor([add_time_ids], dtype=prompt_embeds.dtype) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + if self.do_perturbed_attention_guidance: + prompt_embeds = self._prepare_perturbed_attention_guidance( + prompt_embeds, negative_prompt_embeds, self.do_classifier_free_guidance + ) + prompt_attention_mask = self._prepare_perturbed_attention_guidance( + prompt_attention_mask, negative_prompt_attention_mask, self.do_classifier_free_guidance + ) + prompt_embeds_2 = self._prepare_perturbed_attention_guidance( + prompt_embeds_2, negative_prompt_embeds_2, self.do_classifier_free_guidance + ) + prompt_attention_mask_2 = self._prepare_perturbed_attention_guidance( + prompt_attention_mask_2, negative_prompt_attention_mask_2, self.do_classifier_free_guidance + ) + add_time_ids = torch.cat([add_time_ids] * 3, dim=0) + style = torch.cat([style] * 3, dim=0) + elif self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + prompt_attention_mask = torch.cat([negative_prompt_attention_mask, prompt_attention_mask]) + prompt_embeds_2 = torch.cat([negative_prompt_embeds_2, prompt_embeds_2]) + prompt_attention_mask_2 = torch.cat([negative_prompt_attention_mask_2, prompt_attention_mask_2]) + add_time_ids = torch.cat([add_time_ids] * 2, dim=0) + style = torch.cat([style] * 2, dim=0) + + prompt_embeds = prompt_embeds.to(device=device) + prompt_attention_mask = prompt_attention_mask.to(device=device) + prompt_embeds_2 = prompt_embeds_2.to(device=device) + prompt_attention_mask_2 = prompt_attention_mask_2.to(device=device) + add_time_ids = add_time_ids.to(dtype=prompt_embeds.dtype, device=device).repeat( + batch_size * num_images_per_prompt, 1 + ) + style = style.to(device=device).repeat(batch_size * num_images_per_prompt) + + # 8. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + self._num_timesteps = len(timesteps) + + if self.do_perturbed_attention_guidance: + original_attn_proc = self.transformer.attn_processors + self._set_pag_attn_processor( + pag_applied_layers=self.pag_applied_layers, + do_classifier_free_guidance=self.do_classifier_free_guidance, + ) + + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + if self.interrupt: + continue + + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * (prompt_embeds.shape[0] // latents.shape[0])) + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # expand scalar t to 1-D tensor to match the 1st dim of latent_model_input + t_expand = torch.tensor([t] * latent_model_input.shape[0], device=device).to( + dtype=latent_model_input.dtype + ) + + # predict the noise residual + noise_pred = self.transformer( + latent_model_input, + t_expand, + encoder_hidden_states=prompt_embeds, + text_embedding_mask=prompt_attention_mask, + encoder_hidden_states_t5=prompt_embeds_2, + text_embedding_mask_t5=prompt_attention_mask_2, + image_meta_size=add_time_ids, + style=style, + image_rotary_emb=image_rotary_emb, + return_dict=False, + )[0] + + noise_pred, _ = noise_pred.chunk(2, dim=1) + + # perform guidance + if self.do_perturbed_attention_guidance: + noise_pred = self._apply_perturbed_attention_guidance( + noise_pred, self.do_classifier_free_guidance, self.guidance_scale, t + ) + elif self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + if self.do_classifier_free_guidance and guidance_rescale > 0.0: + # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf + noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + prompt_embeds_2 = callback_outputs.pop("prompt_embeds_2", prompt_embeds_2) + negative_prompt_embeds_2 = callback_outputs.pop( + "negative_prompt_embeds_2", negative_prompt_embeds_2 + ) + + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + + if XLA_AVAILABLE: + xm.mark_step() + + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) + else: + image = latents + has_nsfw_concept = None + + if has_nsfw_concept is None: + do_denormalize = [True] * image.shape[0] + else: + do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] + + image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) + + # 9. Offload all models + self.maybe_free_model_hooks() + + if self.do_perturbed_attention_guidance: + self.transformer.set_attn_processor(original_attn_proc) + + if not return_dict: + return (image, has_nsfw_concept) + + return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) diff --git a/diffusers3/pipelines/pag/pipeline_pag_kolors.py b/diffusers3/pipelines/pag/pipeline_pag_kolors.py new file mode 100644 index 0000000000000000000000000000000000000000..3255bfdfc85f4dceec8259fbfa9191db30b730e7 --- /dev/null +++ b/diffusers3/pipelines/pag/pipeline_pag_kolors.py @@ -0,0 +1,1136 @@ +# Copyright 2024 Stability AI, Kwai-Kolors Team and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import inspect +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import torch +from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection + +from ...callbacks import MultiPipelineCallbacks, PipelineCallback +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import IPAdapterMixin, StableDiffusionXLLoraLoaderMixin +from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel +from ...models.attention_processor import AttnProcessor2_0, FusedAttnProcessor2_0, XFormersAttnProcessor +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import is_torch_xla_available, logging, replace_example_docstring +from ...utils.torch_utils import randn_tensor +from ..kolors.pipeline_output import KolorsPipelineOutput +from ..kolors.text_encoder import ChatGLMModel +from ..kolors.tokenizer import ChatGLMTokenizer +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from .pag_utils import PAGMixin + + +if is_torch_xla_available(): + import torch_xla.core.xla_model as xm + + XLA_AVAILABLE = True +else: + XLA_AVAILABLE = False + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import AutoPipelineForText2Image + + >>> pipe = AutoPipelineForText2Image.from_pretrained( + ... "Kwai-Kolors/Kolors-diffusers", + ... variant="fp16", + ... torch_dtype=torch.float16, + ... enable_pag=True, + ... pag_applied_layers=["down.block_2.attentions_1", "up.block_0.attentions_1"], + ... ) + >>> pipe = pipe.to("cuda") + + >>> prompt = ( + ... "A photo of a ladybug, macro, zoom, high quality, film, holding a wooden sign with the text 'KOLORS'" + ... ) + >>> image = pipe(prompt, guidance_scale=5.5, pag_scale=1.5).images[0] + ``` +""" + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + sigmas: Optional[List[float]] = None, + **kwargs, +): + """ + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles + custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` + must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, + `num_inference_steps` and `sigmas` must be `None`. + sigmas (`List[float]`, *optional*): + Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, + `num_inference_steps` and `timesteps` must be `None`. + + Returns: + `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the + second element is the number of inference steps. + """ + if timesteps is not None and sigmas is not None: + raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") + if timesteps is not None: + accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + elif sigmas is not None: + accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accept_sigmas: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" sigmas schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + + +class KolorsPAGPipeline( + DiffusionPipeline, StableDiffusionMixin, StableDiffusionXLLoraLoaderMixin, IPAdapterMixin, PAGMixin +): + r""" + Pipeline for text-to-image generation using Kolors. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + The pipeline also inherits the following loading methods: + - [`~loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`] for saving LoRA weights + - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`ChatGLMModel`]): + Frozen text-encoder. Kolors uses [ChatGLM3-6B](https://huggingface.co/THUDM/chatglm3-6b). + tokenizer (`ChatGLMTokenizer`): + Tokenizer of class + [ChatGLMTokenizer](https://huggingface.co/THUDM/chatglm3-6b/blob/main/tokenization_chatglm.py). + unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"False"`): + Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of + `Kwai-Kolors/Kolors-diffusers`. + pag_applied_layers (`str` or `List[str]``, *optional*, defaults to `"mid"`): + Set the transformer attention layers where to apply the perturbed attention guidance. Can be a string or a + list of strings with "down", "mid", "up", a whole transformer block or specific transformer block attention + layers, e.g.: + ["mid"] ["down", "mid"] ["down", "mid", "up.block_1"] ["down", "mid", "up.block_1.attentions_0", + "up.block_1.attentions_1"] + """ + + model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae" + _optional_components = [ + "image_encoder", + "feature_extractor", + ] + _callback_tensor_inputs = [ + "latents", + "prompt_embeds", + "negative_prompt_embeds", + "add_text_embeds", + "add_time_ids", + "negative_pooled_prompt_embeds", + "negative_add_time_ids", + ] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: ChatGLMModel, + tokenizer: ChatGLMTokenizer, + unet: UNet2DConditionModel, + scheduler: KarrasDiffusionSchedulers, + image_encoder: CLIPVisionModelWithProjection = None, + feature_extractor: CLIPImageProcessor = None, + force_zeros_for_empty_prompt: bool = False, + pag_applied_layers: Union[str, List[str]] = "mid", + ): + super().__init__() + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + image_encoder=image_encoder, + feature_extractor=feature_extractor, + ) + self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) + self.vae_scale_factor = ( + 2 ** (len(self.vae.config.block_out_channels) - 1) if hasattr(self, "vae") and self.vae is not None else 8 + ) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + + self.default_sample_size = self.unet.config.sample_size + + self.set_pag_applied_layers(pag_applied_layers) + + # Copied from diffusers.pipelines.kolors.pipeline_kolors.KolorsPipeline.encode_prompt + def encode_prompt( + self, + prompt, + device: Optional[torch.device] = None, + num_images_per_prompt: int = 1, + do_classifier_free_guidance: bool = True, + negative_prompt=None, + prompt_embeds: Optional[torch.FloatTensor] = None, + pooled_prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, + max_sequence_length: int = 256, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + max_sequence_length (`int` defaults to 256): Maximum sequence length to use with the `prompt`. + """ + # from IPython import embed; embed(); exit() + device = device or self._execution_device + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + # Define tokenizers and text encoders + tokenizers = [self.tokenizer] + text_encoders = [self.text_encoder] + + if prompt_embeds is None: + prompt_embeds_list = [] + for tokenizer, text_encoder in zip(tokenizers, text_encoders): + text_inputs = tokenizer( + prompt, + padding="max_length", + max_length=max_sequence_length, + truncation=True, + return_tensors="pt", + ).to(device) + output = text_encoder( + input_ids=text_inputs["input_ids"], + attention_mask=text_inputs["attention_mask"], + position_ids=text_inputs["position_ids"], + output_hidden_states=True, + ) + + # [max_sequence_length, batch, hidden_size] -> [batch, max_sequence_length, hidden_size] + # clone to have a contiguous tensor + prompt_embeds = output.hidden_states[-2].permute(1, 0, 2).clone() + # [max_sequence_length, batch, hidden_size] -> [batch, hidden_size] + pooled_prompt_embeds = output.hidden_states[-1][-1, :, :].clone() + bs_embed, seq_len, _ = prompt_embeds.shape + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + prompt_embeds_list.append(prompt_embeds) + + prompt_embeds = prompt_embeds_list[0] + + # get unconditional embeddings for classifier free guidance + zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt + + if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: + negative_prompt_embeds = torch.zeros_like(prompt_embeds) + elif do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + negative_prompt_embeds_list = [] + + for tokenizer, text_encoder in zip(tokenizers, text_encoders): + uncond_input = tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_sequence_length, + truncation=True, + return_tensors="pt", + ).to(device) + output = text_encoder( + input_ids=uncond_input["input_ids"], + attention_mask=uncond_input["attention_mask"], + position_ids=uncond_input["position_ids"], + output_hidden_states=True, + ) + + # [max_sequence_length, batch, hidden_size] -> [batch, max_sequence_length, hidden_size] + # clone to have a contiguous tensor + negative_prompt_embeds = output.hidden_states[-2].permute(1, 0, 2).clone() + # [max_sequence_length, batch, hidden_size] -> [batch, hidden_size] + negative_pooled_prompt_embeds = output.hidden_states[-1][-1, :, :].clone() + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=text_encoder.dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view( + batch_size * num_images_per_prompt, seq_len, -1 + ) + + negative_prompt_embeds_list.append(negative_prompt_embeds) + + negative_prompt_embeds = negative_prompt_embeds_list[0] + + bs_embed = pooled_prompt_embeds.shape[0] + pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed * num_images_per_prompt, -1 + ) + + if do_classifier_free_guidance: + negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed * num_images_per_prompt, -1 + ) + + return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image + def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + if output_hidden_states: + image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] + image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_enc_hidden_states = self.image_encoder( + torch.zeros_like(image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( + num_images_per_prompt, dim=0 + ) + return image_enc_hidden_states, uncond_image_enc_hidden_states + else: + image_embeds = self.image_encoder(image).image_embeds + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_embeds = torch.zeros_like(image_embeds) + + return image_embeds, uncond_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds + def prepare_ip_adapter_image_embeds( + self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance + ): + image_embeds = [] + if do_classifier_free_guidance: + negative_image_embeds = [] + if ip_adapter_image_embeds is None: + if not isinstance(ip_adapter_image, list): + ip_adapter_image = [ip_adapter_image] + + if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): + raise ValueError( + f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." + ) + + for single_ip_adapter_image, image_proj_layer in zip( + ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers + ): + output_hidden_state = not isinstance(image_proj_layer, ImageProjection) + single_image_embeds, single_negative_image_embeds = self.encode_image( + single_ip_adapter_image, device, 1, output_hidden_state + ) + + image_embeds.append(single_image_embeds[None, :]) + if do_classifier_free_guidance: + negative_image_embeds.append(single_negative_image_embeds[None, :]) + else: + for single_image_embeds in ip_adapter_image_embeds: + if do_classifier_free_guidance: + single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2) + negative_image_embeds.append(single_negative_image_embeds) + image_embeds.append(single_image_embeds) + + ip_adapter_image_embeds = [] + for i, single_image_embeds in enumerate(image_embeds): + single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) + if do_classifier_free_guidance: + single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) + + single_image_embeds = single_image_embeds.to(device=device) + ip_adapter_image_embeds.append(single_image_embeds) + + return ip_adapter_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + # Copied from diffusers.pipelines.kolors.pipeline_kolors.KolorsPipeline.check_inputs + def check_inputs( + self, + prompt, + num_inference_steps, + height, + width, + negative_prompt=None, + prompt_embeds=None, + pooled_prompt_embeds=None, + negative_prompt_embeds=None, + negative_pooled_prompt_embeds=None, + ip_adapter_image=None, + ip_adapter_image_embeds=None, + callback_on_step_end_tensor_inputs=None, + max_sequence_length=None, + ): + if not isinstance(num_inference_steps, int) or num_inference_steps <= 0: + raise ValueError( + f"`num_inference_steps` has to be a positive integer but is {num_inference_steps} of type" + f" {type(num_inference_steps)}." + ) + + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if prompt_embeds is not None and pooled_prompt_embeds is None: + raise ValueError( + "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`." + ) + + if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: + raise ValueError( + "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`." + ) + + if ip_adapter_image is not None and ip_adapter_image_embeds is not None: + raise ValueError( + "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined." + ) + + if ip_adapter_image_embeds is not None: + if not isinstance(ip_adapter_image_embeds, list): + raise ValueError( + f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}" + ) + elif ip_adapter_image_embeds[0].ndim not in [3, 4]: + raise ValueError( + f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D" + ) + + if max_sequence_length is not None and max_sequence_length > 256: + raise ValueError(f"`max_sequence_length` cannot be greater than 256 but is {max_sequence_length}") + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = ( + batch_size, + num_channels_latents, + int(height) // self.vae_scale_factor, + int(width) // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline._get_add_time_ids + def _get_add_time_ids( + self, original_size, crops_coords_top_left, target_size, dtype, text_encoder_projection_dim=None + ): + add_time_ids = list(original_size + crops_coords_top_left + target_size) + + passed_add_embed_dim = ( + self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim + ) + expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features + + if expected_add_embed_dim != passed_add_embed_dim: + raise ValueError( + f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`." + ) + + add_time_ids = torch.tensor([add_time_ids], dtype=dtype) + return add_time_ids + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.upcast_vae + def upcast_vae(self): + dtype = self.vae.dtype + self.vae.to(dtype=torch.float32) + use_torch_2_0_or_xformers = isinstance( + self.vae.decoder.mid_block.attentions[0].processor, + ( + AttnProcessor2_0, + XFormersAttnProcessor, + FusedAttnProcessor2_0, + ), + ) + # if xformers or torch_2_0 is used attention block does not need + # to be in float32 which can save lots of memory + if use_torch_2_0_or_xformers: + self.vae.post_quant_conv.to(dtype) + self.vae.decoder.conv_in.to(dtype) + self.vae.decoder.mid_block.to(dtype) + + # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding + def get_guidance_scale_embedding( + self, w: torch.Tensor, embedding_dim: int = 512, dtype: torch.dtype = torch.float32 + ) -> torch.Tensor: + """ + See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 + + Args: + w (`torch.Tensor`): + Generate embedding vectors with a specified guidance scale to subsequently enrich timestep embeddings. + embedding_dim (`int`, *optional*, defaults to 512): + Dimension of the embeddings to generate. + dtype (`torch.dtype`, *optional*, defaults to `torch.float32`): + Data type of the generated embeddings. + + Returns: + `torch.Tensor`: Embedding vectors with shape `(len(w), embedding_dim)`. + """ + assert len(w.shape) == 1 + w = w * 1000.0 + + half_dim = embedding_dim // 2 + emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) + emb = w.to(dtype)[:, None] * emb[None, :] + emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) + if embedding_dim % 2 == 1: # zero pad + emb = torch.nn.functional.pad(emb, (0, 1)) + assert emb.shape == (w.shape[0], embedding_dim) + return emb + + @property + def guidance_scale(self): + return self._guidance_scale + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None + + @property + def cross_attention_kwargs(self): + return self._cross_attention_kwargs + + @property + def denoising_end(self): + return self._denoising_end + + @property + def num_timesteps(self): + return self._num_timesteps + + @property + def interrupt(self): + return self._interrupt + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + timesteps: List[int] = None, + sigmas: List[float] = None, + denoising_end: Optional[float] = None, + guidance_scale: float = 5.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + pooled_prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + original_size: Optional[Tuple[int, int]] = None, + crops_coords_top_left: Tuple[int, int] = (0, 0), + target_size: Optional[Tuple[int, int]] = None, + negative_original_size: Optional[Tuple[int, int]] = None, + negative_crops_coords_top_left: Tuple[int, int] = (0, 0), + negative_target_size: Optional[Tuple[int, int]] = None, + callback_on_step_end: Optional[ + Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks] + ] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + pag_scale: float = 3.0, + pag_adaptive_scale: float = 0.0, + max_sequence_length: int = 256, + ): + r""" + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The height in pixels of the generated image. This is set to 1024 by default for the best results. + Anything below 512 pixels won't work well for + [Kwai-Kolors/Kolors-diffusers](https://huggingface.co/Kwai-Kolors/Kolors-diffusers) and checkpoints + that are not specifically fine-tuned on low resolutions. + width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The width in pixels of the generated image. This is set to 1024 by default for the best results. + Anything below 512 pixels won't work well for + [Kwai-Kolors/Kolors-diffusers](https://huggingface.co/Kwai-Kolors/Kolors-diffusers) and checkpoints + that are not specifically fine-tuned on low resolutions. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument + in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is + passed will be used. Must be in descending order. + sigmas (`List[float]`, *optional*): + Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in + their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed + will be used. + denoising_end (`float`, *optional*): + When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be + completed before it is intentionally prematurely terminated. As a result, the returned sample will + still retain a substantial amount of noise as determined by the discrete timesteps selected by the + scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a + "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image + Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output) + guidance_scale (`float`, *optional*, defaults to 5.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to + [`schedulers.DDIMScheduler`], will be ignored for others. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. + ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*): + Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of + IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should + contain the negative image embedding if `do_classifier_free_guidance` is set to `True`. If not + provided, embeddings are computed from the `ip_adapter_image` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.kolors.KolorsPipelineOutput`] instead of a plain tuple. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled. + `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as + explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position + `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting + `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + For most cases, `target_size` should be set to the desired height and width of the generated image. If + not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in + section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + To negatively condition the generation process based on a specific image resolution. Part of SDXL's + micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's + micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + To negatively condition the generation process based on a target image resolution. It should be as same + as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*): + A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of + each denoising step during the inference. with the following arguments: `callback_on_step_end(self: + DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a + list of all tensors as specified by `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + pag_scale (`float`, *optional*, defaults to 3.0): + The scale factor for the perturbed attention guidance. If it is set to 0.0, the perturbed attention + guidance will not be used. + pag_adaptive_scale (`float`, *optional*, defaults to 0.0): + The adaptive scale factor for the perturbed attention guidance. If it is set to 0.0, `pag_scale` is + used. + max_sequence_length (`int` defaults to 256): Maximum sequence length to use with the `prompt`. + + Examples: + + Returns: + [`~pipelines.kolors.KolorsPipelineOutput`] or `tuple`: [`~pipelines.kolors.KolorsPipelineOutput`] if + `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is a list with the + generated images. + """ + + if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): + callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs + + # 0. Default height and width to unet + height = height or self.default_sample_size * self.vae_scale_factor + width = width or self.default_sample_size * self.vae_scale_factor + + original_size = original_size or (height, width) + target_size = target_size or (height, width) + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + num_inference_steps, + height, + width, + negative_prompt, + prompt_embeds, + pooled_prompt_embeds, + negative_prompt_embeds, + negative_pooled_prompt_embeds, + ip_adapter_image, + ip_adapter_image_embeds, + callback_on_step_end_tensor_inputs, + max_sequence_length=max_sequence_length, + ) + + self._guidance_scale = guidance_scale + self._cross_attention_kwargs = cross_attention_kwargs + self._denoising_end = denoising_end + self._interrupt = False + self._pag_scale = pag_scale + self._pag_adaptive_scale = pag_adaptive_scale + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + # 3. Encode input prompt + ( + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + ) = self.encode_prompt( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=self.do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, + ) + + # 4. Prepare timesteps + timesteps, num_inference_steps = retrieve_timesteps( + self.scheduler, num_inference_steps, device, timesteps, sigmas + ) + + # 5. Prepare latent variables + num_channels_latents = self.unet.config.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7. Prepare added time ids & embeddings + add_text_embeds = pooled_prompt_embeds + text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) + + add_time_ids = self._get_add_time_ids( + original_size, + crops_coords_top_left, + target_size, + dtype=prompt_embeds.dtype, + text_encoder_projection_dim=text_encoder_projection_dim, + ) + if negative_original_size is not None and negative_target_size is not None: + negative_add_time_ids = self._get_add_time_ids( + negative_original_size, + negative_crops_coords_top_left, + negative_target_size, + dtype=prompt_embeds.dtype, + text_encoder_projection_dim=text_encoder_projection_dim, + ) + else: + negative_add_time_ids = add_time_ids + + if self.do_perturbed_attention_guidance: + prompt_embeds = self._prepare_perturbed_attention_guidance( + prompt_embeds, negative_prompt_embeds, self.do_classifier_free_guidance + ) + add_text_embeds = self._prepare_perturbed_attention_guidance( + add_text_embeds, negative_pooled_prompt_embeds, self.do_classifier_free_guidance + ) + add_time_ids = self._prepare_perturbed_attention_guidance( + add_time_ids, negative_add_time_ids, self.do_classifier_free_guidance + ) + elif self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) + add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) + add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0) + + prompt_embeds = prompt_embeds.to(device) + add_text_embeds = add_text_embeds.to(device) + add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1) + + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + image_embeds = self.prepare_ip_adapter_image_embeds( + ip_adapter_image, + ip_adapter_image_embeds, + device, + batch_size * num_images_per_prompt, + self.do_classifier_free_guidance, + ) + + for i, image_embeds in enumerate(ip_adapter_image_embeds): + negative_image_embeds = None + if self.do_classifier_free_guidance: + negative_image_embeds, image_embeds = image_embeds.chunk(2) + + if self.do_perturbed_attention_guidance: + image_embeds = self._prepare_perturbed_attention_guidance( + image_embeds, negative_image_embeds, self.do_classifier_free_guidance + ) + elif self.do_classifier_free_guidance: + image_embeds = torch.cat([negative_image_embeds, image_embeds], dim=0) + image_embeds = image_embeds.to(device) + ip_adapter_image_embeds[i] = image_embeds + + # 8. Denoising loop + num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) + + # 8.1 Apply denoising_end + if ( + self.denoising_end is not None + and isinstance(self.denoising_end, float) + and self.denoising_end > 0 + and self.denoising_end < 1 + ): + discrete_timestep_cutoff = int( + round( + self.scheduler.config.num_train_timesteps + - (self.denoising_end * self.scheduler.config.num_train_timesteps) + ) + ) + num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps))) + timesteps = timesteps[:num_inference_steps] + + # 9. Optionally get Guidance Scale Embedding + timestep_cond = None + if self.unet.config.time_cond_proj_dim is not None: + guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) + timestep_cond = self.get_guidance_scale_embedding( + guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim + ).to(device=device, dtype=latents.dtype) + + if self.do_perturbed_attention_guidance: + original_attn_proc = self.unet.attn_processors + self._set_pag_attn_processor( + pag_applied_layers=self.pag_applied_layers, + do_classifier_free_guidance=self.do_classifier_free_guidance, + ) + + self._num_timesteps = len(timesteps) + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + if self.interrupt: + continue + + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * (prompt_embeds.shape[0] // latents.shape[0])) + + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} + + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + added_cond_kwargs["image_embeds"] = image_embeds + + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + timestep_cond=timestep_cond, + cross_attention_kwargs=self.cross_attention_kwargs, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + # perform guidance + if self.do_perturbed_attention_guidance: + noise_pred = self._apply_perturbed_attention_guidance( + noise_pred, self.do_classifier_free_guidance, self.guidance_scale, t + ) + elif self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents_dtype = latents.dtype + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] + if latents.dtype != latents_dtype: + if torch.backends.mps.is_available(): + # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272 + latents = latents.to(latents_dtype) + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + add_text_embeds = callback_outputs.pop("add_text_embeds", add_text_embeds) + negative_pooled_prompt_embeds = callback_outputs.pop( + "negative_pooled_prompt_embeds", negative_pooled_prompt_embeds + ) + add_time_ids = callback_outputs.pop("add_time_ids", add_time_ids) + negative_add_time_ids = callback_outputs.pop("negative_add_time_ids", negative_add_time_ids) + + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + + if XLA_AVAILABLE: + xm.mark_step() + + if not output_type == "latent": + # make sure the VAE is in float32 mode, as it overflows in float16 + needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast + + if needs_upcasting: + self.upcast_vae() + latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) + elif latents.dtype != self.vae.dtype: + if torch.backends.mps.is_available(): + # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272 + self.vae = self.vae.to(latents.dtype) + + # unscale/denormalize the latents + latents = latents / self.vae.config.scaling_factor + + image = self.vae.decode(latents, return_dict=False)[0] + + # cast back to fp16 if needed + if needs_upcasting: + self.vae.to(dtype=torch.float16) + else: + image = latents + + if not output_type == "latent": + image = self.image_processor.postprocess(image, output_type=output_type) + + # Offload all models + self.maybe_free_model_hooks() + + if self.do_perturbed_attention_guidance: + self.unet.set_attn_processor(original_attn_proc) + + if not return_dict: + return (image,) + + return KolorsPipelineOutput(images=image) diff --git a/diffusers3/pipelines/pag/pipeline_pag_pixart_sigma.py b/diffusers3/pipelines/pag/pipeline_pag_pixart_sigma.py new file mode 100644 index 0000000000000000000000000000000000000000..8e5e6cbaf5adacc6de27e1bfb44cbae4c63f3153 --- /dev/null +++ b/diffusers3/pipelines/pag/pipeline_pag_pixart_sigma.py @@ -0,0 +1,872 @@ +# Copyright 2024 PixArt-Sigma Authors and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import html +import inspect +import re +import urllib.parse as ul +from typing import Callable, List, Optional, Tuple, Union + +import torch +from transformers import T5EncoderModel, T5Tokenizer + +from ...image_processor import PixArtImageProcessor +from ...models import AutoencoderKL, PixArtTransformer2DModel +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import ( + BACKENDS_MAPPING, + deprecate, + is_bs4_available, + is_ftfy_available, + logging, + replace_example_docstring, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput +from ..pixart_alpha.pipeline_pixart_alpha import ( + ASPECT_RATIO_256_BIN, + ASPECT_RATIO_512_BIN, + ASPECT_RATIO_1024_BIN, +) +from ..pixart_alpha.pipeline_pixart_sigma import ASPECT_RATIO_2048_BIN +from .pag_utils import PAGMixin + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +if is_bs4_available(): + from bs4 import BeautifulSoup + +if is_ftfy_available(): + import ftfy + + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import AutoPipelineForText2Image + + >>> pipe = AutoPipelineForText2Image.from_pretrained( + ... "PixArt-alpha/PixArt-Sigma-XL-2-1024-MS", + ... torch_dtype=torch.float16, + ... pag_applied_layers=["blocks.14"], + ... enable_pag=True, + ... ) + >>> pipe = pipe.to("cuda") + + >>> prompt = "A small cactus with a happy face in the Sahara desert" + >>> image = pipe(prompt, pag_scale=4.0, guidance_scale=1.0).images[0] + ``` +""" + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + sigmas: Optional[List[float]] = None, + **kwargs, +): + """ + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles + custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` + must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, + `num_inference_steps` and `sigmas` must be `None`. + sigmas (`List[float]`, *optional*): + Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, + `num_inference_steps` and `timesteps` must be `None`. + + Returns: + `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the + second element is the number of inference steps. + """ + if timesteps is not None and sigmas is not None: + raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") + if timesteps is not None: + accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + elif sigmas is not None: + accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accept_sigmas: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" sigmas schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + + +class PixArtSigmaPAGPipeline(DiffusionPipeline, PAGMixin): + r""" + [PAG pipeline](https://huggingface.co/docs/diffusers/main/en/using-diffusers/pag) for text-to-image generation + using PixArt-Sigma. + """ + + bad_punct_regex = re.compile( + r"[" + + "#ยฎโ€ขยฉโ„ข&@ยทยบยฝยพยฟยกยง~" + + r"\)" + + r"\(" + + r"\]" + + r"\[" + + r"\}" + + r"\{" + + r"\|" + + "\\" + + r"\/" + + r"\*" + + r"]{1,}" + ) # noqa + + _optional_components = ["tokenizer", "text_encoder"] + model_cpu_offload_seq = "text_encoder->transformer->vae" + + def __init__( + self, + tokenizer: T5Tokenizer, + text_encoder: T5EncoderModel, + vae: AutoencoderKL, + transformer: PixArtTransformer2DModel, + scheduler: KarrasDiffusionSchedulers, + pag_applied_layers: Union[str, List[str]] = "blocks.1", # 1st transformer block + ): + super().__init__() + + self.register_modules( + tokenizer=tokenizer, text_encoder=text_encoder, vae=vae, transformer=transformer, scheduler=scheduler + ) + + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = PixArtImageProcessor(vae_scale_factor=self.vae_scale_factor) + + self.set_pag_applied_layers(pag_applied_layers) + + # Copied from diffusers.pipelines.pixart_alpha.pipeline_pixart_alpha.PixArtAlphaPipeline.encode_prompt with 120->300 + def encode_prompt( + self, + prompt: Union[str, List[str]], + do_classifier_free_guidance: bool = True, + negative_prompt: str = "", + num_images_per_prompt: int = 1, + device: Optional[torch.device] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + prompt_attention_mask: Optional[torch.Tensor] = None, + negative_prompt_attention_mask: Optional[torch.Tensor] = None, + clean_caption: bool = False, + max_sequence_length: int = 300, + **kwargs, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + negative_prompt (`str` or `List[str]`, *optional*): + The prompt not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` + instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). For + PixArt-Alpha, this should be "". + do_classifier_free_guidance (`bool`, *optional*, defaults to `True`): + whether to use classifier free guidance or not + num_images_per_prompt (`int`, *optional*, defaults to 1): + number of images that should be generated per prompt + device: (`torch.device`, *optional*): + torch device to place the resulting embeddings on + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. For PixArt-Alpha, it's should be the embeddings of the "" + string. + clean_caption (`bool`, defaults to `False`): + If `True`, the function will preprocess and clean the provided caption before encoding. + max_sequence_length (`int`, defaults to 300): Maximum sequence length to use for the prompt. + """ + + if "mask_feature" in kwargs: + deprecation_message = "The use of `mask_feature` is deprecated. It is no longer used in any computation and that doesn't affect the end results. It will be removed in a future version." + deprecate("mask_feature", "1.0.0", deprecation_message, standard_warn=False) + + if device is None: + device = self._execution_device + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + # See Section 3.1. of the paper. + max_length = max_sequence_length + + if prompt_embeds is None: + prompt = self._text_preprocessing(prompt, clean_caption=clean_caption) + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=max_length, + truncation=True, + add_special_tokens=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode(untruncated_ids[:, max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because T5 can only handle sequences up to" + f" {max_length} tokens: {removed_text}" + ) + + prompt_attention_mask = text_inputs.attention_mask + prompt_attention_mask = prompt_attention_mask.to(device) + + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=prompt_attention_mask) + prompt_embeds = prompt_embeds[0] + + if self.text_encoder is not None: + dtype = self.text_encoder.dtype + elif self.transformer is not None: + dtype = self.transformer.dtype + else: + dtype = None + + prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings and attention mask for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + prompt_attention_mask = prompt_attention_mask.view(bs_embed, -1) + prompt_attention_mask = prompt_attention_mask.repeat(num_images_per_prompt, 1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens = [negative_prompt] * batch_size if isinstance(negative_prompt, str) else negative_prompt + uncond_tokens = self._text_preprocessing(uncond_tokens, clean_caption=clean_caption) + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_attention_mask=True, + add_special_tokens=True, + return_tensors="pt", + ) + negative_prompt_attention_mask = uncond_input.attention_mask + negative_prompt_attention_mask = negative_prompt_attention_mask.to(device) + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), attention_mask=negative_prompt_attention_mask + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + negative_prompt_attention_mask = negative_prompt_attention_mask.view(bs_embed, -1) + negative_prompt_attention_mask = negative_prompt_attention_mask.repeat(num_images_per_prompt, 1) + else: + negative_prompt_embeds = None + negative_prompt_attention_mask = None + + return prompt_embeds, prompt_attention_mask, negative_prompt_embeds, negative_prompt_attention_mask + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + # Copied from diffusers.pipelines.pixart_alpha.pipeline_pixart_alpha.PixArtAlphaPipeline.check_inputs + def check_inputs( + self, + prompt, + height, + width, + negative_prompt, + callback_steps, + prompt_embeds=None, + negative_prompt_embeds=None, + prompt_attention_mask=None, + negative_prompt_attention_mask=None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if (callback_steps is None) or ( + callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and prompt_attention_mask is None: + raise ValueError("Must provide `prompt_attention_mask` when specifying `prompt_embeds`.") + + if negative_prompt_embeds is not None and negative_prompt_attention_mask is None: + raise ValueError("Must provide `negative_prompt_attention_mask` when specifying `negative_prompt_embeds`.") + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + if prompt_attention_mask.shape != negative_prompt_attention_mask.shape: + raise ValueError( + "`prompt_attention_mask` and `negative_prompt_attention_mask` must have the same shape when passed directly, but" + f" got: `prompt_attention_mask` {prompt_attention_mask.shape} != `negative_prompt_attention_mask`" + f" {negative_prompt_attention_mask.shape}." + ) + + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline._text_preprocessing + def _text_preprocessing(self, text, clean_caption=False): + if clean_caption and not is_bs4_available(): + logger.warning(BACKENDS_MAPPING["bs4"][-1].format("Setting `clean_caption=True`")) + logger.warning("Setting `clean_caption` to False...") + clean_caption = False + + if clean_caption and not is_ftfy_available(): + logger.warning(BACKENDS_MAPPING["ftfy"][-1].format("Setting `clean_caption=True`")) + logger.warning("Setting `clean_caption` to False...") + clean_caption = False + + if not isinstance(text, (tuple, list)): + text = [text] + + def process(text: str): + if clean_caption: + text = self._clean_caption(text) + text = self._clean_caption(text) + else: + text = text.lower().strip() + return text + + return [process(t) for t in text] + + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline._clean_caption + def _clean_caption(self, caption): + caption = str(caption) + caption = ul.unquote_plus(caption) + caption = caption.strip().lower() + caption = re.sub("", "person", caption) + # urls: + caption = re.sub( + r"\b((?:https?:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa + "", + caption, + ) # regex for urls + caption = re.sub( + r"\b((?:www:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa + "", + caption, + ) # regex for urls + # html: + caption = BeautifulSoup(caption, features="html.parser").text + + # @ + caption = re.sub(r"@[\w\d]+\b", "", caption) + + # 31C0โ€”31EF CJK Strokes + # 31F0โ€”31FF Katakana Phonetic Extensions + # 3200โ€”32FF Enclosed CJK Letters and Months + # 3300โ€”33FF CJK Compatibility + # 3400โ€”4DBF CJK Unified Ideographs Extension A + # 4DC0โ€”4DFF Yijing Hexagram Symbols + # 4E00โ€”9FFF CJK Unified Ideographs + caption = re.sub(r"[\u31c0-\u31ef]+", "", caption) + caption = re.sub(r"[\u31f0-\u31ff]+", "", caption) + caption = re.sub(r"[\u3200-\u32ff]+", "", caption) + caption = re.sub(r"[\u3300-\u33ff]+", "", caption) + caption = re.sub(r"[\u3400-\u4dbf]+", "", caption) + caption = re.sub(r"[\u4dc0-\u4dff]+", "", caption) + caption = re.sub(r"[\u4e00-\u9fff]+", "", caption) + ####################################################### + + # ะฒัะต ะฒะธะดั‹ ั‚ะธั€ะต / all types of dash --> "-" + caption = re.sub( + r"[\u002D\u058A\u05BE\u1400\u1806\u2010-\u2015\u2E17\u2E1A\u2E3A\u2E3B\u2E40\u301C\u3030\u30A0\uFE31\uFE32\uFE58\uFE63\uFF0D]+", # noqa + "-", + caption, + ) + + # ะบะฐะฒั‹ั‡ะบะธ ะบ ะพะดะฝะพะผัƒ ัั‚ะฐะฝะดะฐั€ั‚ัƒ + caption = re.sub(r"[`ยดยซยปโ€œโ€ยจ]", '"', caption) + caption = re.sub(r"[โ€˜โ€™]", "'", caption) + + # " + caption = re.sub(r""?", "", caption) + # & + caption = re.sub(r"&", "", caption) + + # ip adresses: + caption = re.sub(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", " ", caption) + + # article ids: + caption = re.sub(r"\d:\d\d\s+$", "", caption) + + # \n + caption = re.sub(r"\\n", " ", caption) + + # "#123" + caption = re.sub(r"#\d{1,3}\b", "", caption) + # "#12345.." + caption = re.sub(r"#\d{5,}\b", "", caption) + # "123456.." + caption = re.sub(r"\b\d{6,}\b", "", caption) + # filenames: + caption = re.sub(r"[\S]+\.(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)", "", caption) + + # + caption = re.sub(r"[\"\']{2,}", r'"', caption) # """AUSVERKAUFT""" + caption = re.sub(r"[\.]{2,}", r" ", caption) # """AUSVERKAUFT""" + + caption = re.sub(self.bad_punct_regex, r" ", caption) # ***AUSVERKAUFT***, #AUSVERKAUFT + caption = re.sub(r"\s+\.\s+", r" ", caption) # " . " + + # this-is-my-cute-cat / this_is_my_cute_cat + regex2 = re.compile(r"(?:\-|\_)") + if len(re.findall(regex2, caption)) > 3: + caption = re.sub(regex2, " ", caption) + + caption = ftfy.fix_text(caption) + caption = html.unescape(html.unescape(caption)) + + caption = re.sub(r"\b[a-zA-Z]{1,3}\d{3,15}\b", "", caption) # jc6640 + caption = re.sub(r"\b[a-zA-Z]+\d+[a-zA-Z]+\b", "", caption) # jc6640vc + caption = re.sub(r"\b\d+[a-zA-Z]+\d+\b", "", caption) # 6640vc231 + + caption = re.sub(r"(worldwide\s+)?(free\s+)?shipping", "", caption) + caption = re.sub(r"(free\s)?download(\sfree)?", "", caption) + caption = re.sub(r"\bclick\b\s(?:for|on)\s\w+", "", caption) + caption = re.sub(r"\b(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)(\simage[s]?)?", "", caption) + caption = re.sub(r"\bpage\s+\d+\b", "", caption) + + caption = re.sub(r"\b\d*[a-zA-Z]+\d+[a-zA-Z]+\d+[a-zA-Z\d]*\b", r" ", caption) # j2d1a2a... + + caption = re.sub(r"\b\d+\.?\d*[xั…ร—]\d+\.?\d*\b", "", caption) + + caption = re.sub(r"\b\s+\:\s+", r": ", caption) + caption = re.sub(r"(\D[,\./])\b", r"\1 ", caption) + caption = re.sub(r"\s+", " ", caption) + + caption.strip() + + caption = re.sub(r"^[\"\']([\w\W]+)[\"\']$", r"\1", caption) + caption = re.sub(r"^[\'\_,\-\:;]", r"", caption) + caption = re.sub(r"[\'\_,\-\:\-\+]$", r"", caption) + caption = re.sub(r"^\.\S+$", "", caption) + + return caption.strip() + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = ( + batch_size, + num_channels_latents, + int(height) // self.vae_scale_factor, + int(width) // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + negative_prompt: str = "", + num_inference_steps: int = 20, + timesteps: List[int] = None, + sigmas: List[float] = None, + guidance_scale: float = 4.5, + num_images_per_prompt: Optional[int] = 1, + height: Optional[int] = None, + width: Optional[int] = None, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + prompt_attention_mask: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_attention_mask: Optional[torch.Tensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, + callback_steps: int = 1, + clean_caption: bool = True, + use_resolution_binning: bool = True, + max_sequence_length: int = 300, + pag_scale: float = 3.0, + pag_adaptive_scale: float = 0.0, + ) -> Union[ImagePipelineOutput, Tuple]: + """ + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument + in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is + passed will be used. Must be in descending order. + sigmas (`List[float]`, *optional*): + Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in + their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed + will be used. + guidance_scale (`float`, *optional*, defaults to 4.5): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + height (`int`, *optional*, defaults to self.unet.config.sample_size): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to self.unet.config.sample_size): + The width in pixels of the generated image. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to + [`schedulers.DDIMScheduler`], will be ignored for others. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + prompt_attention_mask (`torch.Tensor`, *optional*): Pre-generated attention mask for text embeddings. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. For PixArt-Sigma this negative prompt should be "". If not + provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. + negative_prompt_attention_mask (`torch.Tensor`, *optional*): + Pre-generated attention mask for negative text embeddings. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.IFPipelineOutput`] instead of a plain tuple. + callback (`Callable`, *optional*): + A function that will be called every `callback_steps` steps during inference. The function will be + called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function will be called. If not specified, the callback will be + called at every step. + clean_caption (`bool`, *optional*, defaults to `True`): + Whether or not to clean the caption before creating embeddings. Requires `beautifulsoup4` and `ftfy` to + be installed. If the dependencies are not installed, the embeddings will be created from the raw + prompt. + use_resolution_binning (`bool` defaults to `True`): + If set to `True`, the requested height and width are first mapped to the closest resolutions using + `ASPECT_RATIO_1024_BIN`. After the produced latents are decoded into images, they are resized back to + the requested resolution. Useful for generating non-square images. + max_sequence_length (`int` defaults to 300): Maximum sequence length to use with the `prompt`. + pag_scale (`float`, *optional*, defaults to 3.0): + The scale factor for the perturbed attention guidance. If it is set to 0.0, the perturbed attention + guidance will not be used. + pag_adaptive_scale (`float`, *optional*, defaults to 0.0): + The adaptive scale factor for the perturbed attention guidance. If it is set to 0.0, `pag_scale` is + used. + Examples: + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is + returned where the first element is a list with the generated images + """ + # 1. Check inputs. Raise error if not correct + height = height or self.transformer.config.sample_size * self.vae_scale_factor + width = width or self.transformer.config.sample_size * self.vae_scale_factor + if use_resolution_binning: + if self.transformer.config.sample_size == 256: + aspect_ratio_bin = ASPECT_RATIO_2048_BIN + elif self.transformer.config.sample_size == 128: + aspect_ratio_bin = ASPECT_RATIO_1024_BIN + elif self.transformer.config.sample_size == 64: + aspect_ratio_bin = ASPECT_RATIO_512_BIN + elif self.transformer.config.sample_size == 32: + aspect_ratio_bin = ASPECT_RATIO_256_BIN + else: + raise ValueError("Invalid sample size") + orig_height, orig_width = height, width + height, width = self.image_processor.classify_height_width_bin(height, width, ratios=aspect_ratio_bin) + + self.check_inputs( + prompt, + height, + width, + negative_prompt, + callback_steps, + prompt_embeds, + negative_prompt_embeds, + prompt_attention_mask, + negative_prompt_attention_mask, + ) + self._pag_scale = pag_scale + self._pag_adaptive_scale = pag_adaptive_scale + + # 2. Default height and width to transformer + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Encode input prompt + ( + prompt_embeds, + prompt_attention_mask, + negative_prompt_embeds, + negative_prompt_attention_mask, + ) = self.encode_prompt( + prompt, + do_classifier_free_guidance, + negative_prompt=negative_prompt, + num_images_per_prompt=num_images_per_prompt, + device=device, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + prompt_attention_mask=prompt_attention_mask, + negative_prompt_attention_mask=negative_prompt_attention_mask, + clean_caption=clean_caption, + max_sequence_length=max_sequence_length, + ) + if self.do_perturbed_attention_guidance: + prompt_embeds = self._prepare_perturbed_attention_guidance( + prompt_embeds, negative_prompt_embeds, do_classifier_free_guidance + ) + prompt_attention_mask = self._prepare_perturbed_attention_guidance( + prompt_attention_mask, negative_prompt_attention_mask, do_classifier_free_guidance + ) + elif do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) + prompt_attention_mask = torch.cat([negative_prompt_attention_mask, prompt_attention_mask], dim=0) + + # 4. Prepare timesteps + timesteps, num_inference_steps = retrieve_timesteps( + self.scheduler, num_inference_steps, device, timesteps, sigmas + ) + + # 5. Prepare latents. + latent_channels = self.transformer.config.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + latent_channels, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + if self.do_perturbed_attention_guidance: + original_attn_proc = self.transformer.attn_processors + self._set_pag_attn_processor( + pag_applied_layers=self.pag_applied_layers, + do_classifier_free_guidance=do_classifier_free_guidance, + ) + + # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 6.1 Prepare micro-conditions. + added_cond_kwargs = {"resolution": None, "aspect_ratio": None} + + # 7. Denoising loop + num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) + + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance, perturbed-attention guidance, or both + latent_model_input = torch.cat([latents] * (prompt_embeds.shape[0] // latents.shape[0])) + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + current_timestep = t + if not torch.is_tensor(current_timestep): + # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can + # This would be a good case for the `match` statement (Python 3.10+) + is_mps = latent_model_input.device.type == "mps" + if isinstance(current_timestep, float): + dtype = torch.float32 if is_mps else torch.float64 + else: + dtype = torch.int32 if is_mps else torch.int64 + current_timestep = torch.tensor([current_timestep], dtype=dtype, device=latent_model_input.device) + elif len(current_timestep.shape) == 0: + current_timestep = current_timestep[None].to(latent_model_input.device) + # broadcast to batch dimension in a way that's compatible with ONNX/Core ML + current_timestep = current_timestep.expand(latent_model_input.shape[0]) + + # predict noise model_output + noise_pred = self.transformer( + latent_model_input, + encoder_hidden_states=prompt_embeds, + encoder_attention_mask=prompt_attention_mask, + timestep=current_timestep, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + # perform guidance + if self.do_perturbed_attention_guidance: + noise_pred = self._apply_perturbed_attention_guidance( + noise_pred, do_classifier_free_guidance, guidance_scale, current_timestep + ) + elif do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # learned sigma + if self.transformer.config.out_channels // 2 == latent_channels: + noise_pred = noise_pred.chunk(2, dim=1)[0] + else: + noise_pred = noise_pred + + # compute previous image: x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + if use_resolution_binning: + image = self.image_processor.resize_and_crop_tensor(image, orig_width, orig_height) + else: + image = latents + + if not output_type == "latent": + image = self.image_processor.postprocess(image, output_type=output_type) + + # Offload all models + self.maybe_free_model_hooks() + + if self.do_perturbed_attention_guidance: + self.transformer.set_attn_processor(original_attn_proc) + + if not return_dict: + return (image,) + + return ImagePipelineOutput(images=image) diff --git a/diffusers3/pipelines/pag/pipeline_pag_sd.py b/diffusers3/pipelines/pag/pipeline_pag_sd.py new file mode 100644 index 0000000000000000000000000000000000000000..c6a4f7f42c84f48876d322f32dbfe4470e028755 --- /dev/null +++ b/diffusers3/pipelines/pag/pipeline_pag_sd.py @@ -0,0 +1,1050 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import inspect +from typing import Any, Callable, Dict, List, Optional, Union + +import torch +from packaging import version +from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection + +from ...configuration_utils import FrozenDict +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import FromSingleFileMixin, IPAdapterMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin +from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import ( + USE_PEFT_BACKEND, + deprecate, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from ..stable_diffusion.pipeline_output import StableDiffusionPipelineOutput +from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker +from .pag_utils import PAGMixin + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import AutoPipelineForText2Image + + >>> pipe = AutoPipelineForText2Image.from_pretrained( + ... "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, enable_pag=True + ... ) + >>> pipe = pipe.to("cuda") + + >>> prompt = "a photo of an astronaut riding a horse on mars" + >>> image = pipe(prompt, pag_scale=0.3).images[0] + ``` +""" + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg +def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): + """ + Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4 + """ + std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) + std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) + # rescale the results from guidance (fixes overexposure) + noise_pred_rescaled = noise_cfg * (std_text / std_cfg) + # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images + noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg + return noise_cfg + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + sigmas: Optional[List[float]] = None, + **kwargs, +): + """ + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles + custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` + must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, + `num_inference_steps` and `sigmas` must be `None`. + sigmas (`List[float]`, *optional*): + Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, + `num_inference_steps` and `timesteps` must be `None`. + + Returns: + `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the + second element is the number of inference steps. + """ + if timesteps is not None and sigmas is not None: + raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") + if timesteps is not None: + accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + elif sigmas is not None: + accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accept_sigmas: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" sigmas schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + + +class StableDiffusionPAGPipeline( + DiffusionPipeline, + StableDiffusionMixin, + TextualInversionLoaderMixin, + StableDiffusionLoraLoaderMixin, + IPAdapterMixin, + FromSingleFileMixin, + PAGMixin, +): + r""" + Pipeline for text-to-image generation using Stable Diffusion. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for saving LoRA weights + - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files + - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details + about a model's potential harms. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. + """ + + model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae" + _optional_components = ["safety_checker", "feature_extractor", "image_encoder"] + _exclude_from_cpu_offload = ["safety_checker"] + _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: UNet2DConditionModel, + scheduler: KarrasDiffusionSchedulers, + safety_checker: StableDiffusionSafetyChecker, + feature_extractor: CLIPImageProcessor, + image_encoder: CLIPVisionModelWithProjection = None, + requires_safety_checker: bool = True, + pag_applied_layers: Union[str, List[str]] = "mid", + ): + super().__init__() + + if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: + deprecation_message = ( + f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" + f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " + "to update the config accordingly as leaving `steps_offset` might led to incorrect results" + " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," + " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" + " file" + ) + deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(scheduler.config) + new_config["steps_offset"] = 1 + scheduler._internal_dict = FrozenDict(new_config) + + if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True: + deprecation_message = ( + f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." + " `clip_sample` should be set to False in the configuration file. Please make sure to update the" + " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" + " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" + " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" + ) + deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(scheduler.config) + new_config["clip_sample"] = False + scheduler._internal_dict = FrozenDict(new_config) + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( + version.parse(unet.config._diffusers_version).base_version + ) < version.parse("0.9.0.dev0") + is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 + if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: + deprecation_message = ( + "The configuration file of the unet has set the default `sample_size` to smaller than" + " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the" + " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" + " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" + " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" + " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" + " in the config might lead to incorrect results in future versions. If you have downloaded this" + " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" + " the `unet/config.json` file" + ) + deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(unet.config) + new_config["sample_size"] = 64 + unet._internal_dict = FrozenDict(new_config) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + image_encoder=image_encoder, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + self.set_pag_applied_layers(pag_applied_layers) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if self.text_encoder is not None: + if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return prompt_embeds, negative_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image + def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + if output_hidden_states: + image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] + image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_enc_hidden_states = self.image_encoder( + torch.zeros_like(image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( + num_images_per_prompt, dim=0 + ) + return image_enc_hidden_states, uncond_image_enc_hidden_states + else: + image_embeds = self.image_encoder(image).image_embeds + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_embeds = torch.zeros_like(image_embeds) + + return image_embeds, uncond_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds + def prepare_ip_adapter_image_embeds( + self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance + ): + image_embeds = [] + if do_classifier_free_guidance: + negative_image_embeds = [] + if ip_adapter_image_embeds is None: + if not isinstance(ip_adapter_image, list): + ip_adapter_image = [ip_adapter_image] + + if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): + raise ValueError( + f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." + ) + + for single_ip_adapter_image, image_proj_layer in zip( + ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers + ): + output_hidden_state = not isinstance(image_proj_layer, ImageProjection) + single_image_embeds, single_negative_image_embeds = self.encode_image( + single_ip_adapter_image, device, 1, output_hidden_state + ) + + image_embeds.append(single_image_embeds[None, :]) + if do_classifier_free_guidance: + negative_image_embeds.append(single_negative_image_embeds[None, :]) + else: + for single_image_embeds in ip_adapter_image_embeds: + if do_classifier_free_guidance: + single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2) + negative_image_embeds.append(single_negative_image_embeds) + image_embeds.append(single_image_embeds) + + ip_adapter_image_embeds = [] + for i, single_image_embeds in enumerate(image_embeds): + single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) + if do_classifier_free_guidance: + single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) + + single_image_embeds = single_image_embeds.to(device=device) + ip_adapter_image_embeds.append(single_image_embeds) + + return ip_adapter_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is None: + has_nsfw_concept = None + else: + if torch.is_tensor(image): + feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") + else: + feature_extractor_input = self.image_processor.numpy_to_pil(image) + safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) + image, has_nsfw_concept = self.safety_checker( + images=image, clip_input=safety_checker_input.pixel_values.to(dtype) + ) + return image, has_nsfw_concept + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.check_inputs + def check_inputs( + self, + prompt, + height, + width, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + ip_adapter_image=None, + ip_adapter_image_embeds=None, + callback_on_step_end_tensor_inputs=None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if ip_adapter_image is not None and ip_adapter_image_embeds is not None: + raise ValueError( + "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined." + ) + + if ip_adapter_image_embeds is not None: + if not isinstance(ip_adapter_image_embeds, list): + raise ValueError( + f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}" + ) + elif ip_adapter_image_embeds[0].ndim not in [3, 4]: + raise ValueError( + f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D" + ) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = ( + batch_size, + num_channels_latents, + int(height) // self.vae_scale_factor, + int(width) // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding + def get_guidance_scale_embedding( + self, w: torch.Tensor, embedding_dim: int = 512, dtype: torch.dtype = torch.float32 + ) -> torch.Tensor: + """ + See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 + + Args: + w (`torch.Tensor`): + Generate embedding vectors with a specified guidance scale to subsequently enrich timestep embeddings. + embedding_dim (`int`, *optional*, defaults to 512): + Dimension of the embeddings to generate. + dtype (`torch.dtype`, *optional*, defaults to `torch.float32`): + Data type of the generated embeddings. + + Returns: + `torch.Tensor`: Embedding vectors with shape `(len(w), embedding_dim)`. + """ + assert len(w.shape) == 1 + w = w * 1000.0 + + half_dim = embedding_dim // 2 + emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) + emb = w.to(dtype)[:, None] * emb[None, :] + emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) + if embedding_dim % 2 == 1: # zero pad + emb = torch.nn.functional.pad(emb, (0, 1)) + assert emb.shape == (w.shape[0], embedding_dim) + return emb + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def guidance_rescale(self): + return self._guidance_rescale + + @property + def clip_skip(self): + return self._clip_skip + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None + + @property + def cross_attention_kwargs(self): + return self._cross_attention_kwargs + + @property + def num_timesteps(self): + return self._num_timesteps + + @property + def interrupt(self): + return self._interrupt + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + timesteps: List[int] = None, + sigmas: List[float] = None, + guidance_scale: float = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + guidance_rescale: float = 0.0, + clip_skip: Optional[int] = None, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + pag_scale: float = 3.0, + pag_adaptive_scale: float = 0.0, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument + in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is + passed will be used. Must be in descending order. + sigmas (`List[float]`, *optional*): + Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in + their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed + will be used. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. + ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*): + Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of + IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should + contain the negative image embedding if `do_classifier_free_guidance` is set to `True`. If not + provided, embeddings are computed from the `ip_adapter_image` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + guidance_rescale (`float`, *optional*, defaults to 0.0): + Guidance rescale factor from [Common Diffusion Noise Schedules and Sample Steps are + Flawed](https://arxiv.org/pdf/2305.08891.pdf). Guidance rescale factor should fix overexposure when + using zero terminal SNR. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*): + A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of + each denoising step during the inference. with the following arguments: `callback_on_step_end(self: + DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a + list of all tensors as specified by `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + pag_scale (`float`, *optional*, defaults to 3.0): + The scale factor for the perturbed attention guidance. If it is set to 0.0, the perturbed attention + guidance will not be used. + pag_adaptive_scale (`float`, *optional*, defaults to 0.0): + The adaptive scale factor for the perturbed attention guidance. If it is set to 0.0, `pag_scale` is + used. + + Examples: + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images and the + second element is a list of `bool`s indicating whether the corresponding generated image contains + "not-safe-for-work" (nsfw) content. + """ + + # 0. Default height and width to unet + height = height or self.unet.config.sample_size * self.vae_scale_factor + width = width or self.unet.config.sample_size * self.vae_scale_factor + # to deal with lora scaling and other possible forward hooks + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + height, + width, + None, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + ip_adapter_image, + ip_adapter_image_embeds, + callback_on_step_end_tensor_inputs, + ) + + self._guidance_scale = guidance_scale + self._guidance_rescale = guidance_rescale + self._clip_skip = clip_skip + self._cross_attention_kwargs = cross_attention_kwargs + self._interrupt = False + self._pag_scale = pag_scale + self._pag_adaptive_scale = pag_adaptive_scale + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + # 3. Encode input prompt + lora_scale = ( + self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None + ) + + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + device, + num_images_per_prompt, + self.do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=lora_scale, + clip_skip=self.clip_skip, + ) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + if self.do_perturbed_attention_guidance: + prompt_embeds = self._prepare_perturbed_attention_guidance( + prompt_embeds, negative_prompt_embeds, self.do_classifier_free_guidance + ) + elif self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + ip_adapter_image_embeds = self.prepare_ip_adapter_image_embeds( + ip_adapter_image, + ip_adapter_image_embeds, + device, + batch_size * num_images_per_prompt, + self.do_classifier_free_guidance, + ) + + for i, image_embeds in enumerate(ip_adapter_image_embeds): + negative_image_embeds = None + if self.do_classifier_free_guidance: + negative_image_embeds, image_embeds = image_embeds.chunk(2) + if self.do_perturbed_attention_guidance: + image_embeds = self._prepare_perturbed_attention_guidance( + image_embeds, negative_image_embeds, self.do_classifier_free_guidance + ) + + elif self.do_classifier_free_guidance: + image_embeds = torch.cat([negative_image_embeds, image_embeds], dim=0) + image_embeds = image_embeds.to(device) + ip_adapter_image_embeds[i] = image_embeds + + # 4. Prepare timesteps + timesteps, num_inference_steps = retrieve_timesteps( + self.scheduler, num_inference_steps, device, timesteps, sigmas + ) + + # 5. Prepare latent variables + num_channels_latents = self.unet.config.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 6.1 Add image embeds for IP-Adapter + added_cond_kwargs = ( + {"image_embeds": ip_adapter_image_embeds} + if (ip_adapter_image is not None or ip_adapter_image_embeds is not None) + else None + ) + + # 6.2 Optionally get Guidance Scale Embedding + timestep_cond = None + if self.unet.config.time_cond_proj_dim is not None: + guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) + timestep_cond = self.get_guidance_scale_embedding( + guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim + ).to(device=device, dtype=latents.dtype) + + # 7. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + if self.do_perturbed_attention_guidance: + original_attn_proc = self.unet.attn_processors + self._set_pag_attn_processor( + pag_applied_layers=self.pag_applied_layers, + do_classifier_free_guidance=self.do_classifier_free_guidance, + ) + self._num_timesteps = len(timesteps) + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + if self.interrupt: + continue + + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * (prompt_embeds.shape[0] // latents.shape[0])) + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + timestep_cond=timestep_cond, + cross_attention_kwargs=self.cross_attention_kwargs, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + # perform guidance + if self.do_perturbed_attention_guidance: + noise_pred = self._apply_perturbed_attention_guidance( + noise_pred, self.do_classifier_free_guidance, self.guidance_scale, t + ) + + elif self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) + + if self.do_classifier_free_guidance and self.guidance_rescale > 0.0: + # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf + noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[ + 0 + ] + image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) + else: + image = latents + has_nsfw_concept = None + + if has_nsfw_concept is None: + do_denormalize = [True] * image.shape[0] + else: + do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] + + image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) + + # Offload all models + self.maybe_free_model_hooks() + + if self.do_perturbed_attention_guidance: + self.unet.set_attn_processor(original_attn_proc) + + if not return_dict: + return (image, has_nsfw_concept) + + return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) diff --git a/diffusers3/pipelines/pag/pipeline_pag_sd_3.py b/diffusers3/pipelines/pag/pipeline_pag_sd_3.py new file mode 100644 index 0000000000000000000000000000000000000000..3035509843c0305080c43ed43ec519f353907b36 --- /dev/null +++ b/diffusers3/pipelines/pag/pipeline_pag_sd_3.py @@ -0,0 +1,985 @@ +# Copyright 2024 Stability AI and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Any, Callable, Dict, List, Optional, Union + +import torch +from transformers import ( + CLIPTextModelWithProjection, + CLIPTokenizer, + T5EncoderModel, + T5TokenizerFast, +) + +from ...image_processor import VaeImageProcessor +from ...loaders import FromSingleFileMixin, SD3LoraLoaderMixin +from ...models.attention_processor import PAGCFGJointAttnProcessor2_0, PAGJointAttnProcessor2_0 +from ...models.autoencoders import AutoencoderKL +from ...models.transformers import SD3Transformer2DModel +from ...schedulers import FlowMatchEulerDiscreteScheduler +from ...utils import ( + USE_PEFT_BACKEND, + is_torch_xla_available, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline +from ..stable_diffusion_3.pipeline_output import StableDiffusion3PipelineOutput +from .pag_utils import PAGMixin + + +if is_torch_xla_available(): + import torch_xla.core.xla_model as xm + + XLA_AVAILABLE = True +else: + XLA_AVAILABLE = False + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import AutoPipelineForText2Image + + >>> pipe = AutoPipelineForText2Image.from_pretrained( + ... "stabilityai/stable-diffusion-3-medium-diffusers", + ... torch_dtype=torch.float16, + ... enable_pag=True, + ... pag_applied_layers=["blocks.13"], + ... ) + >>> pipe.to("cuda") + >>> prompt = "A cat holding a sign that says hello world" + >>> image = pipe(prompt, guidance_scale=5.0, pag_scale=0.7).images[0] + >>> image.save("sd3_pag.png") + ``` +""" + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + sigmas: Optional[List[float]] = None, + **kwargs, +): + """ + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles + custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` + must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, + `num_inference_steps` and `sigmas` must be `None`. + sigmas (`List[float]`, *optional*): + Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, + `num_inference_steps` and `timesteps` must be `None`. + + Returns: + `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the + second element is the number of inference steps. + """ + if timesteps is not None and sigmas is not None: + raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") + if timesteps is not None: + accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + elif sigmas is not None: + accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accept_sigmas: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" sigmas schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + + +class StableDiffusion3PAGPipeline(DiffusionPipeline, SD3LoraLoaderMixin, FromSingleFileMixin, PAGMixin): + r""" + [PAG pipeline](https://huggingface.co/docs/diffusers/main/en/using-diffusers/pag) for text-to-image generation + using Stable Diffusion 3. + + Args: + transformer ([`SD3Transformer2DModel`]): + Conditional Transformer (MMDiT) architecture to denoise the encoded image latents. + scheduler ([`FlowMatchEulerDiscreteScheduler`]): + A scheduler to be used in combination with `transformer` to denoise the encoded image latents. + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModelWithProjection`]): + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection), + specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant, + with an additional added projection layer that is initialized with a diagonal matrix with the `hidden_size` + as its dimension. + text_encoder_2 ([`CLIPTextModelWithProjection`]): + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection), + specifically the + [laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k) + variant. + text_encoder_3 ([`T5EncoderModel`]): + Frozen text-encoder. Stable Diffusion 3 uses + [T5](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5EncoderModel), specifically the + [t5-v1_1-xxl](https://huggingface.co/google/t5-v1_1-xxl) variant. + tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + tokenizer_2 (`CLIPTokenizer`): + Second Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + tokenizer_3 (`T5TokenizerFast`): + Tokenizer of class + [T5Tokenizer](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5Tokenizer). + """ + + model_cpu_offload_seq = "text_encoder->text_encoder_2->text_encoder_3->transformer->vae" + _optional_components = [] + _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds", "negative_pooled_prompt_embeds"] + + def __init__( + self, + transformer: SD3Transformer2DModel, + scheduler: FlowMatchEulerDiscreteScheduler, + vae: AutoencoderKL, + text_encoder: CLIPTextModelWithProjection, + tokenizer: CLIPTokenizer, + text_encoder_2: CLIPTextModelWithProjection, + tokenizer_2: CLIPTokenizer, + text_encoder_3: T5EncoderModel, + tokenizer_3: T5TokenizerFast, + pag_applied_layers: Union[str, List[str]] = "blocks.1", # 1st transformer block + ): + super().__init__() + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + text_encoder_2=text_encoder_2, + text_encoder_3=text_encoder_3, + tokenizer=tokenizer, + tokenizer_2=tokenizer_2, + tokenizer_3=tokenizer_3, + transformer=transformer, + scheduler=scheduler, + ) + self.vae_scale_factor = ( + 2 ** (len(self.vae.config.block_out_channels) - 1) if hasattr(self, "vae") and self.vae is not None else 8 + ) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + self.tokenizer_max_length = ( + self.tokenizer.model_max_length if hasattr(self, "tokenizer") and self.tokenizer is not None else 77 + ) + self.default_sample_size = ( + self.transformer.config.sample_size + if hasattr(self, "transformer") and self.transformer is not None + else 128 + ) + + self.set_pag_applied_layers( + pag_applied_layers, pag_attn_processors=(PAGCFGJointAttnProcessor2_0(), PAGJointAttnProcessor2_0()) + ) + + # Copied from diffusers.pipelines.stable_diffusion_3.pipeline_stable_diffusion_3.StableDiffusion3Pipeline._get_t5_prompt_embeds + def _get_t5_prompt_embeds( + self, + prompt: Union[str, List[str]] = None, + num_images_per_prompt: int = 1, + max_sequence_length: int = 256, + device: Optional[torch.device] = None, + dtype: Optional[torch.dtype] = None, + ): + device = device or self._execution_device + dtype = dtype or self.text_encoder.dtype + + prompt = [prompt] if isinstance(prompt, str) else prompt + batch_size = len(prompt) + + if self.text_encoder_3 is None: + return torch.zeros( + ( + batch_size * num_images_per_prompt, + self.tokenizer_max_length, + self.transformer.config.joint_attention_dim, + ), + device=device, + dtype=dtype, + ) + + text_inputs = self.tokenizer_3( + prompt, + padding="max_length", + max_length=max_sequence_length, + truncation=True, + add_special_tokens=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer_3(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): + removed_text = self.tokenizer_3.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because `max_sequence_length` is set to " + f" {max_sequence_length} tokens: {removed_text}" + ) + + prompt_embeds = self.text_encoder_3(text_input_ids.to(device))[0] + + dtype = self.text_encoder_3.dtype + prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) + + _, seq_len, _ = prompt_embeds.shape + + # duplicate text embeddings and attention mask for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + return prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion_3.pipeline_stable_diffusion_3.StableDiffusion3Pipeline._get_clip_prompt_embeds + def _get_clip_prompt_embeds( + self, + prompt: Union[str, List[str]], + num_images_per_prompt: int = 1, + device: Optional[torch.device] = None, + clip_skip: Optional[int] = None, + clip_model_index: int = 0, + ): + device = device or self._execution_device + + clip_tokenizers = [self.tokenizer, self.tokenizer_2] + clip_text_encoders = [self.text_encoder, self.text_encoder_2] + + tokenizer = clip_tokenizers[clip_model_index] + text_encoder = clip_text_encoders[clip_model_index] + + prompt = [prompt] if isinstance(prompt, str) else prompt + batch_size = len(prompt) + + text_inputs = tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer_max_length, + truncation=True, + return_tensors="pt", + ) + + text_input_ids = text_inputs.input_ids + untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): + removed_text = tokenizer.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer_max_length} tokens: {removed_text}" + ) + prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) + pooled_prompt_embeds = prompt_embeds[0] + + if clip_skip is None: + prompt_embeds = prompt_embeds.hidden_states[-2] + else: + prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)] + + prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) + + _, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt, 1) + pooled_prompt_embeds = pooled_prompt_embeds.view(batch_size * num_images_per_prompt, -1) + + return prompt_embeds, pooled_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion_3.pipeline_stable_diffusion_3.StableDiffusion3Pipeline.encode_prompt + def encode_prompt( + self, + prompt: Union[str, List[str]], + prompt_2: Union[str, List[str]], + prompt_3: Union[str, List[str]], + device: Optional[torch.device] = None, + num_images_per_prompt: int = 1, + do_classifier_free_guidance: bool = True, + negative_prompt: Optional[Union[str, List[str]]] = None, + negative_prompt_2: Optional[Union[str, List[str]]] = None, + negative_prompt_3: Optional[Union[str, List[str]]] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + clip_skip: Optional[int] = None, + max_sequence_length: int = 256, + lora_scale: Optional[float] = None, + ): + r""" + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in all text-encoders + prompt_3 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_3` and `text_encoder_3`. If not defined, `prompt` is + used in all text-encoders + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in all the text-encoders. + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_3` and + `text_encoder_3`. If not defined, `negative_prompt` is used in both text-encoders + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + lora_scale (`float`, *optional*): + A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + """ + device = device or self._execution_device + + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, SD3LoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if self.text_encoder is not None and USE_PEFT_BACKEND: + scale_lora_layers(self.text_encoder, lora_scale) + if self.text_encoder_2 is not None and USE_PEFT_BACKEND: + scale_lora_layers(self.text_encoder_2, lora_scale) + + prompt = [prompt] if isinstance(prompt, str) else prompt + if prompt is not None: + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + prompt_2 = prompt_2 or prompt + prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 + + prompt_3 = prompt_3 or prompt + prompt_3 = [prompt_3] if isinstance(prompt_3, str) else prompt_3 + + prompt_embed, pooled_prompt_embed = self._get_clip_prompt_embeds( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + clip_skip=clip_skip, + clip_model_index=0, + ) + prompt_2_embed, pooled_prompt_2_embed = self._get_clip_prompt_embeds( + prompt=prompt_2, + device=device, + num_images_per_prompt=num_images_per_prompt, + clip_skip=clip_skip, + clip_model_index=1, + ) + clip_prompt_embeds = torch.cat([prompt_embed, prompt_2_embed], dim=-1) + + t5_prompt_embed = self._get_t5_prompt_embeds( + prompt=prompt_3, + num_images_per_prompt=num_images_per_prompt, + max_sequence_length=max_sequence_length, + device=device, + ) + + clip_prompt_embeds = torch.nn.functional.pad( + clip_prompt_embeds, (0, t5_prompt_embed.shape[-1] - clip_prompt_embeds.shape[-1]) + ) + + prompt_embeds = torch.cat([clip_prompt_embeds, t5_prompt_embed], dim=-2) + pooled_prompt_embeds = torch.cat([pooled_prompt_embed, pooled_prompt_2_embed], dim=-1) + + if do_classifier_free_guidance and negative_prompt_embeds is None: + negative_prompt = negative_prompt or "" + negative_prompt_2 = negative_prompt_2 or negative_prompt + negative_prompt_3 = negative_prompt_3 or negative_prompt + + # normalize str to list + negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt + negative_prompt_2 = ( + batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 + ) + negative_prompt_3 = ( + batch_size * [negative_prompt_3] if isinstance(negative_prompt_3, str) else negative_prompt_3 + ) + + if prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + + negative_prompt_embed, negative_pooled_prompt_embed = self._get_clip_prompt_embeds( + negative_prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + clip_skip=None, + clip_model_index=0, + ) + negative_prompt_2_embed, negative_pooled_prompt_2_embed = self._get_clip_prompt_embeds( + negative_prompt_2, + device=device, + num_images_per_prompt=num_images_per_prompt, + clip_skip=None, + clip_model_index=1, + ) + negative_clip_prompt_embeds = torch.cat([negative_prompt_embed, negative_prompt_2_embed], dim=-1) + + t5_negative_prompt_embed = self._get_t5_prompt_embeds( + prompt=negative_prompt_3, + num_images_per_prompt=num_images_per_prompt, + max_sequence_length=max_sequence_length, + device=device, + ) + + negative_clip_prompt_embeds = torch.nn.functional.pad( + negative_clip_prompt_embeds, + (0, t5_negative_prompt_embed.shape[-1] - negative_clip_prompt_embeds.shape[-1]), + ) + + negative_prompt_embeds = torch.cat([negative_clip_prompt_embeds, t5_negative_prompt_embed], dim=-2) + negative_pooled_prompt_embeds = torch.cat( + [negative_pooled_prompt_embed, negative_pooled_prompt_2_embed], dim=-1 + ) + + if self.text_encoder is not None: + if isinstance(self, SD3LoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if isinstance(self, SD3LoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder_2, lora_scale) + + return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion_3.pipeline_stable_diffusion_3.StableDiffusion3Pipeline.check_inputs + def check_inputs( + self, + prompt, + prompt_2, + prompt_3, + height, + width, + negative_prompt=None, + negative_prompt_2=None, + negative_prompt_3=None, + prompt_embeds=None, + negative_prompt_embeds=None, + pooled_prompt_embeds=None, + negative_pooled_prompt_embeds=None, + callback_on_step_end_tensor_inputs=None, + max_sequence_length=None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt_2 is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt_3 is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt_3`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)): + raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}") + elif prompt_3 is not None and (not isinstance(prompt_3, str) and not isinstance(prompt_3, list)): + raise ValueError(f"`prompt_3` has to be of type `str` or `list` but is {type(prompt_3)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + elif negative_prompt_2 is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + elif negative_prompt_3 is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt_3`: {negative_prompt_3} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if prompt_embeds is not None and pooled_prompt_embeds is None: + raise ValueError( + "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`." + ) + + if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: + raise ValueError( + "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`." + ) + + if max_sequence_length is not None and max_sequence_length > 512: + raise ValueError(f"`max_sequence_length` cannot be greater than 512 but is {max_sequence_length}") + + # Copied from diffusers.pipelines.stable_diffusion_3.pipeline_stable_diffusion_3.StableDiffusion3Pipeline.prepare_latents + def prepare_latents( + self, + batch_size, + num_channels_latents, + height, + width, + dtype, + device, + generator, + latents=None, + ): + if latents is not None: + return latents.to(device=device, dtype=dtype) + + shape = ( + batch_size, + num_channels_latents, + int(height) // self.vae_scale_factor, + int(width) // self.vae_scale_factor, + ) + + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + + return latents + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def clip_skip(self): + return self._clip_skip + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 + + @property + def joint_attention_kwargs(self): + return self._joint_attention_kwargs + + @property + def num_timesteps(self): + return self._num_timesteps + + @property + def interrupt(self): + return self._interrupt + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + prompt_2: Optional[Union[str, List[str]]] = None, + prompt_3: Optional[Union[str, List[str]]] = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 28, + timesteps: List[int] = None, + guidance_scale: float = 7.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + negative_prompt_2: Optional[Union[str, List[str]]] = None, + negative_prompt_3: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + joint_attention_kwargs: Optional[Dict[str, Any]] = None, + clip_skip: Optional[int] = None, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + max_sequence_length: int = 256, + pag_scale: float = 3.0, + pag_adaptive_scale: float = 0.0, + ): + r""" + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + will be used instead + prompt_3 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to `tokenizer_3` and `text_encoder_3`. If not defined, `prompt` is + will be used instead + height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The height in pixels of the generated image. This is set to 1024 by default for the best results. + width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The width in pixels of the generated image. This is set to 1024 by default for the best results. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument + in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is + passed will be used. Must be in descending order. + guidance_scale (`float`, *optional*, defaults to 7.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used instead + negative_prompt_3 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_3` and + `text_encoder_3`. If not defined, `negative_prompt` is used instead + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] instead + of a plain tuple. + joint_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + max_sequence_length (`int` defaults to 256): Maximum sequence length to use with the `prompt`. + pag_scale (`float`, *optional*, defaults to 3.0): + The scale factor for the perturbed attention guidance. If it is set to 0.0, the perturbed attention + guidance will not be used. + pag_adaptive_scale (`float`, *optional*, defaults to 0.0): + The adaptive scale factor for the perturbed attention guidance. If it is set to 0.0, `pag_scale` is + used. + + Examples: + + Returns: + [`~pipelines.stable_diffusion_3.StableDiffusion3PipelineOutput`] or `tuple`: + [`~pipelines.stable_diffusion_3.StableDiffusion3PipelineOutput`] if `return_dict` is True, otherwise a + `tuple`. When returning a tuple, the first element is a list with the generated images. + """ + + height = height or self.default_sample_size * self.vae_scale_factor + width = width or self.default_sample_size * self.vae_scale_factor + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + prompt_2, + prompt_3, + height, + width, + negative_prompt=negative_prompt, + negative_prompt_2=negative_prompt_2, + negative_prompt_3=negative_prompt_3, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, + callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, + max_sequence_length=max_sequence_length, + ) + + self._guidance_scale = guidance_scale + self._clip_skip = clip_skip + self._joint_attention_kwargs = joint_attention_kwargs + self._interrupt = False + self._pag_scale = pag_scale + self._pag_adaptive_scale = pag_adaptive_scale # + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + lora_scale = ( + self.joint_attention_kwargs.get("scale", None) if self.joint_attention_kwargs is not None else None + ) + ( + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + ) = self.encode_prompt( + prompt=prompt, + prompt_2=prompt_2, + prompt_3=prompt_3, + negative_prompt=negative_prompt, + negative_prompt_2=negative_prompt_2, + negative_prompt_3=negative_prompt_3, + do_classifier_free_guidance=self.do_classifier_free_guidance, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, + device=device, + clip_skip=self.clip_skip, + num_images_per_prompt=num_images_per_prompt, + max_sequence_length=max_sequence_length, + lora_scale=lora_scale, + ) + + if self.do_perturbed_attention_guidance: + prompt_embeds = self._prepare_perturbed_attention_guidance( + prompt_embeds, negative_prompt_embeds, self.do_classifier_free_guidance + ) + pooled_prompt_embeds = self._prepare_perturbed_attention_guidance( + pooled_prompt_embeds, negative_pooled_prompt_embeds, self.do_classifier_free_guidance + ) + elif self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) + pooled_prompt_embeds = torch.cat([negative_pooled_prompt_embeds, pooled_prompt_embeds], dim=0) + + # 4. Prepare timesteps + timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps) + num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) + self._num_timesteps = len(timesteps) + + # 5. Prepare latent variables + num_channels_latents = self.transformer.config.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + if self.do_perturbed_attention_guidance: + original_attn_proc = self.transformer.attn_processors + self._set_pag_attn_processor( + pag_applied_layers=self.pag_applied_layers, + do_classifier_free_guidance=self.do_classifier_free_guidance, + ) + + # 6. Denoising loop + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + if self.interrupt: + continue + + # expand the latents if we are doing classifier free guidance, perturbed-attention guidance, or both + latent_model_input = torch.cat([latents] * (prompt_embeds.shape[0] // latents.shape[0])) + # broadcast to batch dimension in a way that's compatible with ONNX/Core ML + timestep = t.expand(latent_model_input.shape[0]) + + noise_pred = self.transformer( + hidden_states=latent_model_input, + timestep=timestep, + encoder_hidden_states=prompt_embeds, + pooled_projections=pooled_prompt_embeds, + joint_attention_kwargs=self.joint_attention_kwargs, + return_dict=False, + )[0] + + # perform guidance + if self.do_perturbed_attention_guidance: + noise_pred = self._apply_perturbed_attention_guidance( + noise_pred, self.do_classifier_free_guidance, self.guidance_scale, t + ) + + elif self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents_dtype = latents.dtype + latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0] + + if latents.dtype != latents_dtype: + if torch.backends.mps.is_available(): + # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272 + latents = latents.to(latents_dtype) + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + negative_pooled_prompt_embeds = callback_outputs.pop( + "negative_pooled_prompt_embeds", negative_pooled_prompt_embeds + ) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + + if XLA_AVAILABLE: + xm.mark_step() + + if output_type == "latent": + image = latents + + else: + latents = (latents / self.vae.config.scaling_factor) + self.vae.config.shift_factor + + image = self.vae.decode(latents, return_dict=False)[0] + image = self.image_processor.postprocess(image, output_type=output_type) + + # Offload all models + self.maybe_free_model_hooks() + + if self.do_perturbed_attention_guidance: + self.transformer.set_attn_processor(original_attn_proc) + + if not return_dict: + return (image,) + + return StableDiffusion3PipelineOutput(images=image) diff --git a/diffusers3/pipelines/pag/pipeline_pag_sd_animatediff.py b/diffusers3/pipelines/pag/pipeline_pag_sd_animatediff.py new file mode 100644 index 0000000000000000000000000000000000000000..1e81fa3a158c21a87e9cf2655ae767ac7d57d50b --- /dev/null +++ b/diffusers3/pipelines/pag/pipeline_pag_sd_animatediff.py @@ -0,0 +1,866 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Any, Callable, Dict, List, Optional, Union + +import torch +from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection + +from ...image_processor import PipelineImageInput +from ...loaders import IPAdapterMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin +from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel, UNetMotionModel +from ...models.lora import adjust_lora_scale_text_encoder +from ...models.unets.unet_motion_model import MotionAdapter +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import ( + USE_PEFT_BACKEND, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import randn_tensor +from ...video_processor import VideoProcessor +from ..animatediff.pipeline_output import AnimateDiffPipelineOutput +from ..free_init_utils import FreeInitMixin +from ..free_noise_utils import AnimateDiffFreeNoiseMixin +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from .pag_utils import PAGMixin + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import AnimateDiffPAGPipeline, MotionAdapter, DDIMScheduler + >>> from diffusers.utils import export_to_gif + + >>> model_id = "SG161222/Realistic_Vision_V5.1_noVAE" + >>> motion_adapter_id = "guoyww/animatediff-motion-adapter-v1-5-2" + >>> motion_adapter = MotionAdapter.from_pretrained(motion_adapter_id) + >>> scheduler = DDIMScheduler.from_pretrained( + ... model_id, subfolder="scheduler", beta_schedule="linear", steps_offset=1, clip_sample=False + ... ) + >>> pipe = AnimateDiffPAGPipeline.from_pretrained( + ... model_id, + ... motion_adapter=motion_adapter, + ... scheduler=scheduler, + ... pag_applied_layers=["mid"], + ... torch_dtype=torch.float16, + ... ).to("cuda") + + >>> video = pipe( + ... prompt="car, futuristic cityscape with neon lights, street, no human", + ... negative_prompt="low quality, bad quality", + ... num_inference_steps=25, + ... guidance_scale=6.0, + ... pag_scale=3.0, + ... generator=torch.Generator().manual_seed(42), + ... ).frames[0] + + >>> export_to_gif(video, "animatediff_pag.gif") + ``` +""" + + +class AnimateDiffPAGPipeline( + DiffusionPipeline, + StableDiffusionMixin, + TextualInversionLoaderMixin, + IPAdapterMixin, + StableDiffusionLoraLoaderMixin, + FreeInitMixin, + AnimateDiffFreeNoiseMixin, + PAGMixin, +): + r""" + Pipeline for text-to-video generation using + [AnimateDiff](https://huggingface.co/docs/diffusers/en/api/pipelines/animatediff) and [Perturbed Attention + Guidance](https://huggingface.co/docs/diffusers/en/using-diffusers/pag). + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for saving LoRA weights + - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer (`CLIPTokenizer`): + A [`~transformers.CLIPTokenizer`] to tokenize text. + unet ([`UNet2DConditionModel`]): + A [`UNet2DConditionModel`] used to create a UNetMotionModel to denoise the encoded video latents. + motion_adapter ([`MotionAdapter`]): + A [`MotionAdapter`] to be used in combination with `unet` to denoise the encoded video latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + """ + + model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae" + _optional_components = ["feature_extractor", "image_encoder", "motion_adapter"] + _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: Union[UNet2DConditionModel, UNetMotionModel], + motion_adapter: MotionAdapter, + scheduler: KarrasDiffusionSchedulers, + feature_extractor: CLIPImageProcessor = None, + image_encoder: CLIPVisionModelWithProjection = None, + pag_applied_layers: Union[str, List[str]] = "mid_block.*attn1", # ["mid"], ["down_blocks.1"] + ): + super().__init__() + if isinstance(unet, UNet2DConditionModel): + unet = UNetMotionModel.from_unet2d(unet, motion_adapter) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + motion_adapter=motion_adapter, + scheduler=scheduler, + feature_extractor=feature_extractor, + image_encoder=image_encoder, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.video_processor = VideoProcessor(do_resize=False, vae_scale_factor=self.vae_scale_factor) + + self.set_pag_applied_layers(pag_applied_layers) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt with num_images_per_prompt -> num_videos_per_prompt + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if self.text_encoder is not None: + if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return prompt_embeds, negative_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image + def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + if output_hidden_states: + image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] + image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_enc_hidden_states = self.image_encoder( + torch.zeros_like(image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( + num_images_per_prompt, dim=0 + ) + return image_enc_hidden_states, uncond_image_enc_hidden_states + else: + image_embeds = self.image_encoder(image).image_embeds + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_embeds = torch.zeros_like(image_embeds) + + return image_embeds, uncond_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds + def prepare_ip_adapter_image_embeds( + self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance + ): + image_embeds = [] + if do_classifier_free_guidance: + negative_image_embeds = [] + if ip_adapter_image_embeds is None: + if not isinstance(ip_adapter_image, list): + ip_adapter_image = [ip_adapter_image] + + if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): + raise ValueError( + f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." + ) + + for single_ip_adapter_image, image_proj_layer in zip( + ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers + ): + output_hidden_state = not isinstance(image_proj_layer, ImageProjection) + single_image_embeds, single_negative_image_embeds = self.encode_image( + single_ip_adapter_image, device, 1, output_hidden_state + ) + + image_embeds.append(single_image_embeds[None, :]) + if do_classifier_free_guidance: + negative_image_embeds.append(single_negative_image_embeds[None, :]) + else: + for single_image_embeds in ip_adapter_image_embeds: + if do_classifier_free_guidance: + single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2) + negative_image_embeds.append(single_negative_image_embeds) + image_embeds.append(single_image_embeds) + + ip_adapter_image_embeds = [] + for i, single_image_embeds in enumerate(image_embeds): + single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) + if do_classifier_free_guidance: + single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) + + single_image_embeds = single_image_embeds.to(device=device) + ip_adapter_image_embeds.append(single_image_embeds) + + return ip_adapter_image_embeds + + # Copied from diffusers.pipelines.animatediff.pipeline_animatediff.AnimateDiffPipeline.decode_latents + def decode_latents(self, latents, decode_chunk_size: int = 16): + latents = 1 / self.vae.config.scaling_factor * latents + + batch_size, channels, num_frames, height, width = latents.shape + latents = latents.permute(0, 2, 1, 3, 4).reshape(batch_size * num_frames, channels, height, width) + + video = [] + for i in range(0, latents.shape[0], decode_chunk_size): + batch_latents = latents[i : i + decode_chunk_size] + batch_latents = self.vae.decode(batch_latents).sample + video.append(batch_latents) + + video = torch.cat(video) + video = video[None, :].reshape((batch_size, num_frames, -1) + video.shape[2:]).permute(0, 2, 1, 3, 4) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + video = video.float() + return video + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + # Copied from diffusers.pipelines.pia.pipeline_pia.PIAPipeline.check_inputs + def check_inputs( + self, + prompt, + height, + width, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + ip_adapter_image=None, + ip_adapter_image_embeds=None, + callback_on_step_end_tensor_inputs=None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if ip_adapter_image is not None and ip_adapter_image_embeds is not None: + raise ValueError( + "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined." + ) + + if ip_adapter_image_embeds is not None: + if not isinstance(ip_adapter_image_embeds, list): + raise ValueError( + f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}" + ) + elif ip_adapter_image_embeds[0].ndim not in [3, 4]: + raise ValueError( + f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D" + ) + + # Copied from diffusers.pipelines.animatediff.pipeline_animatediff.AnimateDiffPipeline.prepare_latents + def prepare_latents( + self, batch_size, num_channels_latents, num_frames, height, width, dtype, device, generator, latents=None + ): + # If FreeNoise is enabled, generate latents as described in Equation (7) of [FreeNoise](https://arxiv.org/abs/2310.15169) + if self.free_noise_enabled: + latents = self._prepare_latents_free_noise( + batch_size, num_channels_latents, num_frames, height, width, dtype, device, generator, latents + ) + + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + shape = ( + batch_size, + num_channels_latents, + num_frames, + height // self.vae_scale_factor, + width // self.vae_scale_factor, + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def clip_skip(self): + return self._clip_skip + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 + + @property + def cross_attention_kwargs(self): + return self._cross_attention_kwargs + + @property + def num_timesteps(self): + return self._num_timesteps + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Optional[Union[str, List[str]]] = None, + num_frames: Optional[int] = 16, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + guidance_scale: float = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_videos_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + clip_skip: Optional[int] = None, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + decode_chunk_size: int = 16, + pag_scale: float = 3.0, + pag_adaptive_scale: float = 0.0, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated video. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated video. + num_frames (`int`, *optional*, defaults to 16): + The number of video frames that are generated. Defaults to 16 frames which at 8 frames per seconds + amounts to 2 seconds of video. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality videos at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for video + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. Latents should be of shape + `(batch_size, num_channel, num_frames, height, width)`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + ip_adapter_image: (`PipelineImageInput`, *optional*): + Optional image input to work with IP Adapters. + ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*): + Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of + IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should + contain the negative image embedding if `do_classifier_free_guidance` is set to `True`. If not + provided, embeddings are computed from the `ip_adapter_image` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated video. Choose between `torch.Tensor`, `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.text_to_video_synthesis.TextToVideoSDPipelineOutput`] instead + of a plain tuple. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + pag_scale (`float`, *optional*, defaults to 3.0): + The scale factor for the perturbed attention guidance. If it is set to 0.0, the perturbed attention + guidance will not be used. + pag_adaptive_scale (`float`, *optional*, defaults to 0.0): + The adaptive scale factor for the perturbed attention guidance. If it is set to 0.0, `pag_scale` is + used. + + Examples: + + Returns: + [`~pipelines.animatediff.pipeline_output.AnimateDiffPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.animatediff.pipeline_output.AnimateDiffPipelineOutput`] is + returned, otherwise a `tuple` is returned where the first element is a list with the generated frames. + """ + + # 0. Default height and width to unet + height = height or self.unet.config.sample_size * self.vae_scale_factor + width = width or self.unet.config.sample_size * self.vae_scale_factor + + num_videos_per_prompt = 1 + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + height, + width, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + ip_adapter_image, + ip_adapter_image_embeds, + callback_on_step_end_tensor_inputs, + ) + + self._guidance_scale = guidance_scale + self._clip_skip = clip_skip + self._cross_attention_kwargs = cross_attention_kwargs + self._pag_scale = pag_scale + self._pag_adaptive_scale = pag_adaptive_scale + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + # 3. Encode input prompt + text_encoder_lora_scale = ( + self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None + ) + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + device, + num_videos_per_prompt, + self.do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=text_encoder_lora_scale, + clip_skip=self.clip_skip, + ) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + if self.do_perturbed_attention_guidance: + prompt_embeds = self._prepare_perturbed_attention_guidance( + prompt_embeds, negative_prompt_embeds, self.do_classifier_free_guidance + ) + elif self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + prompt_embeds = prompt_embeds.repeat_interleave(repeats=num_frames, dim=0) + + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + ip_adapter_image_embeds = self.prepare_ip_adapter_image_embeds( + ip_adapter_image, + ip_adapter_image_embeds, + device, + batch_size * num_videos_per_prompt, + self.do_classifier_free_guidance, + ) + + for i, image_embeds in enumerate(ip_adapter_image_embeds): + negative_image_embeds = None + if self.do_classifier_free_guidance: + negative_image_embeds, image_embeds = image_embeds.chunk(2) + if self.do_perturbed_attention_guidance: + image_embeds = self._prepare_perturbed_attention_guidance( + image_embeds, negative_image_embeds, self.do_classifier_free_guidance + ) + elif self.do_classifier_free_guidance: + image_embeds = torch.cat([negative_image_embeds, image_embeds], dim=0) + image_embeds = image_embeds.to(device) + ip_adapter_image_embeds[i] = image_embeds + + # 4. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # 5. Prepare latent variables + num_channels_latents = self.unet.config.in_channels + latents = self.prepare_latents( + batch_size * num_videos_per_prompt, + num_channels_latents, + num_frames, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7. Add image embeds for IP-Adapter + added_cond_kwargs = ( + {"image_embeds": ip_adapter_image_embeds} + if ip_adapter_image is not None or ip_adapter_image_embeds is not None + else None + ) + + if self.do_perturbed_attention_guidance: + original_attn_proc = self.unet.attn_processors + self._set_pag_attn_processor( + pag_applied_layers=self.pag_applied_layers, + do_classifier_free_guidance=self.do_classifier_free_guidance, + ) + + num_free_init_iters = self._free_init_num_iters if self.free_init_enabled else 1 + for free_init_iter in range(num_free_init_iters): + if self.free_init_enabled: + latents, timesteps = self._apply_free_init( + latents, free_init_iter, num_inference_steps, device, latents.dtype, generator + ) + + self._num_timesteps = len(timesteps) + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + + # 8. Denoising loop + with self.progress_bar(total=self._num_timesteps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat( + [latents] * (prompt_embeds.shape[0] // num_frames // latents.shape[0]) + ) + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs=cross_attention_kwargs, + added_cond_kwargs=added_cond_kwargs, + ).sample + + # perform guidance + if self.do_perturbed_attention_guidance: + noise_pred = self._apply_perturbed_attention_guidance( + noise_pred, self.do_classifier_free_guidance, self.guidance_scale, t + ) + elif self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + + # 9. Post processing + if output_type == "latent": + video = latents + else: + video_tensor = self.decode_latents(latents, decode_chunk_size) + video = self.video_processor.postprocess_video(video=video_tensor, output_type=output_type) + + # 10. Offload all models + self.maybe_free_model_hooks() + + if self.do_perturbed_attention_guidance: + self.unet.set_attn_processor(original_attn_proc) + + if not return_dict: + return (video,) + + return AnimateDiffPipelineOutput(frames=video) diff --git a/diffusers3/pipelines/pag/pipeline_pag_sd_xl.py b/diffusers3/pipelines/pag/pipeline_pag_sd_xl.py new file mode 100644 index 0000000000000000000000000000000000000000..18fc06c1f9b869f35132701ca740de49b84cf3c8 --- /dev/null +++ b/diffusers3/pipelines/pag/pipeline_pag_sd_xl.py @@ -0,0 +1,1333 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import torch +from transformers import ( + CLIPImageProcessor, + CLIPTextModel, + CLIPTextModelWithProjection, + CLIPTokenizer, + CLIPVisionModelWithProjection, +) + +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import ( + FromSingleFileMixin, + IPAdapterMixin, + StableDiffusionXLLoraLoaderMixin, + TextualInversionLoaderMixin, +) +from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel +from ...models.attention_processor import ( + AttnProcessor2_0, + FusedAttnProcessor2_0, + XFormersAttnProcessor, +) +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import ( + USE_PEFT_BACKEND, + is_invisible_watermark_available, + is_torch_xla_available, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from ..stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput +from .pag_utils import PAGMixin + + +if is_invisible_watermark_available(): + from ..stable_diffusion_xl.watermark import StableDiffusionXLWatermarker + +if is_torch_xla_available(): + import torch_xla.core.xla_model as xm + + XLA_AVAILABLE = True +else: + XLA_AVAILABLE = False + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import AutoPipelineForText2Image + + >>> pipe = AutoPipelineForText2Image.from_pretrained( + ... "stabilityai/stable-diffusion-xl-base-1.0", + ... torch_dtype=torch.float16, + ... enable_pag=True, + ... ) + >>> pipe = pipe.to("cuda") + + >>> prompt = "a photo of an astronaut riding a horse on mars" + >>> image = pipe(prompt, pag_scale=0.3).images[0] + ``` +""" + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg +def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): + """ + Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4 + """ + std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) + std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) + # rescale the results from guidance (fixes overexposure) + noise_pred_rescaled = noise_cfg * (std_text / std_cfg) + # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images + noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg + return noise_cfg + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + sigmas: Optional[List[float]] = None, + **kwargs, +): + """ + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles + custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` + must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, + `num_inference_steps` and `sigmas` must be `None`. + sigmas (`List[float]`, *optional*): + Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, + `num_inference_steps` and `timesteps` must be `None`. + + Returns: + `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the + second element is the number of inference steps. + """ + if timesteps is not None and sigmas is not None: + raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") + if timesteps is not None: + accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + elif sigmas is not None: + accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accept_sigmas: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" sigmas schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + + +class StableDiffusionXLPAGPipeline( + DiffusionPipeline, + StableDiffusionMixin, + FromSingleFileMixin, + StableDiffusionXLLoraLoaderMixin, + TextualInversionLoaderMixin, + IPAdapterMixin, + PAGMixin, +): + r""" + Pipeline for text-to-image generation using Stable Diffusion XL. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files + - [`~loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`] for saving LoRA weights + - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModel`]): + Frozen text-encoder. Stable Diffusion XL uses the text portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically + the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. + text_encoder_2 ([` CLIPTextModelWithProjection`]): + Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection), + specifically the + [laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k) + variant. + tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + tokenizer_2 (`CLIPTokenizer`): + Second Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`): + Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of + `stabilityai/stable-diffusion-xl-base-1-0`. + add_watermarker (`bool`, *optional*): + Whether to use the [invisible_watermark library](https://github.com/ShieldMnt/invisible-watermark/) to + watermark output images. If not defined, it will default to True if the package is installed, otherwise no + watermarker will be used. + """ + + model_cpu_offload_seq = "text_encoder->text_encoder_2->image_encoder->unet->vae" + _optional_components = [ + "tokenizer", + "tokenizer_2", + "text_encoder", + "text_encoder_2", + "image_encoder", + "feature_extractor", + ] + _callback_tensor_inputs = [ + "latents", + "prompt_embeds", + "negative_prompt_embeds", + "add_text_embeds", + "add_time_ids", + "negative_pooled_prompt_embeds", + "negative_add_time_ids", + ] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + text_encoder_2: CLIPTextModelWithProjection, + tokenizer: CLIPTokenizer, + tokenizer_2: CLIPTokenizer, + unet: UNet2DConditionModel, + scheduler: KarrasDiffusionSchedulers, + image_encoder: CLIPVisionModelWithProjection = None, + feature_extractor: CLIPImageProcessor = None, + force_zeros_for_empty_prompt: bool = True, + add_watermarker: Optional[bool] = None, + pag_applied_layers: Union[str, List[str]] = "mid", # ["mid"],["down.block_1"],["up.block_0.attentions_0"] + ): + super().__init__() + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + text_encoder_2=text_encoder_2, + tokenizer=tokenizer, + tokenizer_2=tokenizer_2, + unet=unet, + scheduler=scheduler, + image_encoder=image_encoder, + feature_extractor=feature_extractor, + ) + self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + + self.default_sample_size = self.unet.config.sample_size + + add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available() + + if add_watermarker: + self.watermark = StableDiffusionXLWatermarker() + else: + self.watermark = None + + self.set_pag_applied_layers(pag_applied_layers) + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.encode_prompt + def encode_prompt( + self, + prompt: str, + prompt_2: Optional[str] = None, + device: Optional[torch.device] = None, + num_images_per_prompt: int = 1, + do_classifier_free_guidance: bool = True, + negative_prompt: Optional[str] = None, + negative_prompt_2: Optional[str] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + pooled_prompt_embeds: Optional[torch.Tensor] = None, + negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in both text-encoders + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + lora_scale (`float`, *optional*): + A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + device = device or self._execution_device + + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if self.text_encoder is not None: + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale) + else: + scale_lora_layers(self.text_encoder_2, lora_scale) + + prompt = [prompt] if isinstance(prompt, str) else prompt + + if prompt is not None: + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + # Define tokenizers and text encoders + tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] + text_encoders = ( + [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] + ) + + if prompt_embeds is None: + prompt_2 = prompt_2 or prompt + prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 + + # textual inversion: process multi-vector tokens if necessary + prompt_embeds_list = [] + prompts = [prompt, prompt_2] + for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders): + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, tokenizer) + + text_inputs = tokenizer( + prompt, + padding="max_length", + max_length=tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + + text_input_ids = text_inputs.input_ids + untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {tokenizer.model_max_length} tokens: {removed_text}" + ) + + prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) + + # We are only ALWAYS interested in the pooled output of the final text encoder + pooled_prompt_embeds = prompt_embeds[0] + if clip_skip is None: + prompt_embeds = prompt_embeds.hidden_states[-2] + else: + # "2" because SDXL always indexes from the penultimate layer. + prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)] + + prompt_embeds_list.append(prompt_embeds) + + prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) + + # get unconditional embeddings for classifier free guidance + zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt + if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: + negative_prompt_embeds = torch.zeros_like(prompt_embeds) + negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) + elif do_classifier_free_guidance and negative_prompt_embeds is None: + negative_prompt = negative_prompt or "" + negative_prompt_2 = negative_prompt_2 or negative_prompt + + # normalize str to list + negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt + negative_prompt_2 = ( + batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 + ) + + uncond_tokens: List[str] + if prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = [negative_prompt, negative_prompt_2] + + negative_prompt_embeds_list = [] + for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders): + if isinstance(self, TextualInversionLoaderMixin): + negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = tokenizer( + negative_prompt, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + negative_prompt_embeds = text_encoder( + uncond_input.input_ids.to(device), + output_hidden_states=True, + ) + # We are only ALWAYS interested in the pooled output of the final text encoder + negative_pooled_prompt_embeds = negative_prompt_embeds[0] + negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] + + negative_prompt_embeds_list.append(negative_prompt_embeds) + + negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) + + if self.text_encoder_2 is not None: + prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) + else: + prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + if self.text_encoder_2 is not None: + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) + else: + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed * num_images_per_prompt, -1 + ) + if do_classifier_free_guidance: + negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed * num_images_per_prompt, -1 + ) + + if self.text_encoder is not None: + if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder_2, lora_scale) + + return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image + def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + if output_hidden_states: + image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] + image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_enc_hidden_states = self.image_encoder( + torch.zeros_like(image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( + num_images_per_prompt, dim=0 + ) + return image_enc_hidden_states, uncond_image_enc_hidden_states + else: + image_embeds = self.image_encoder(image).image_embeds + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_embeds = torch.zeros_like(image_embeds) + + return image_embeds, uncond_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds + def prepare_ip_adapter_image_embeds( + self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance + ): + image_embeds = [] + if do_classifier_free_guidance: + negative_image_embeds = [] + if ip_adapter_image_embeds is None: + if not isinstance(ip_adapter_image, list): + ip_adapter_image = [ip_adapter_image] + + if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): + raise ValueError( + f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." + ) + + for single_ip_adapter_image, image_proj_layer in zip( + ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers + ): + output_hidden_state = not isinstance(image_proj_layer, ImageProjection) + single_image_embeds, single_negative_image_embeds = self.encode_image( + single_ip_adapter_image, device, 1, output_hidden_state + ) + + image_embeds.append(single_image_embeds[None, :]) + if do_classifier_free_guidance: + negative_image_embeds.append(single_negative_image_embeds[None, :]) + else: + for single_image_embeds in ip_adapter_image_embeds: + if do_classifier_free_guidance: + single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2) + negative_image_embeds.append(single_negative_image_embeds) + image_embeds.append(single_image_embeds) + + ip_adapter_image_embeds = [] + for i, single_image_embeds in enumerate(image_embeds): + single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) + if do_classifier_free_guidance: + single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) + + single_image_embeds = single_image_embeds.to(device=device) + ip_adapter_image_embeds.append(single_image_embeds) + + return ip_adapter_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.check_inputs + def check_inputs( + self, + prompt, + prompt_2, + height, + width, + callback_steps, + negative_prompt=None, + negative_prompt_2=None, + prompt_embeds=None, + negative_prompt_embeds=None, + pooled_prompt_embeds=None, + negative_pooled_prompt_embeds=None, + ip_adapter_image=None, + ip_adapter_image_embeds=None, + callback_on_step_end_tensor_inputs=None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt_2 is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)): + raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + elif negative_prompt_2 is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if prompt_embeds is not None and pooled_prompt_embeds is None: + raise ValueError( + "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`." + ) + + if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: + raise ValueError( + "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`." + ) + + if ip_adapter_image is not None and ip_adapter_image_embeds is not None: + raise ValueError( + "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined." + ) + + if ip_adapter_image_embeds is not None: + if not isinstance(ip_adapter_image_embeds, list): + raise ValueError( + f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}" + ) + elif ip_adapter_image_embeds[0].ndim not in [3, 4]: + raise ValueError( + f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D" + ) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = ( + batch_size, + num_channels_latents, + int(height) // self.vae_scale_factor, + int(width) // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline._get_add_time_ids + def _get_add_time_ids( + self, original_size, crops_coords_top_left, target_size, dtype, text_encoder_projection_dim=None + ): + add_time_ids = list(original_size + crops_coords_top_left + target_size) + + passed_add_embed_dim = ( + self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim + ) + expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features + + if expected_add_embed_dim != passed_add_embed_dim: + raise ValueError( + f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`." + ) + + add_time_ids = torch.tensor([add_time_ids], dtype=dtype) + return add_time_ids + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.upcast_vae + def upcast_vae(self): + dtype = self.vae.dtype + self.vae.to(dtype=torch.float32) + use_torch_2_0_or_xformers = isinstance( + self.vae.decoder.mid_block.attentions[0].processor, + ( + AttnProcessor2_0, + XFormersAttnProcessor, + FusedAttnProcessor2_0, + ), + ) + # if xformers or torch_2_0 is used attention block does not need + # to be in float32 which can save lots of memory + if use_torch_2_0_or_xformers: + self.vae.post_quant_conv.to(dtype) + self.vae.decoder.conv_in.to(dtype) + self.vae.decoder.mid_block.to(dtype) + + # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding + def get_guidance_scale_embedding( + self, w: torch.Tensor, embedding_dim: int = 512, dtype: torch.dtype = torch.float32 + ) -> torch.Tensor: + """ + See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 + + Args: + w (`torch.Tensor`): + Generate embedding vectors with a specified guidance scale to subsequently enrich timestep embeddings. + embedding_dim (`int`, *optional*, defaults to 512): + Dimension of the embeddings to generate. + dtype (`torch.dtype`, *optional*, defaults to `torch.float32`): + Data type of the generated embeddings. + + Returns: + `torch.Tensor`: Embedding vectors with shape `(len(w), embedding_dim)`. + """ + assert len(w.shape) == 1 + w = w * 1000.0 + + half_dim = embedding_dim // 2 + emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) + emb = w.to(dtype)[:, None] * emb[None, :] + emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) + if embedding_dim % 2 == 1: # zero pad + emb = torch.nn.functional.pad(emb, (0, 1)) + assert emb.shape == (w.shape[0], embedding_dim) + return emb + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def guidance_rescale(self): + return self._guidance_rescale + + @property + def clip_skip(self): + return self._clip_skip + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None + + @property + def cross_attention_kwargs(self): + return self._cross_attention_kwargs + + @property + def denoising_end(self): + return self._denoising_end + + @property + def num_timesteps(self): + return self._num_timesteps + + @property + def interrupt(self): + return self._interrupt + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + prompt_2: Optional[Union[str, List[str]]] = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + timesteps: List[int] = None, + sigmas: List[float] = None, + denoising_end: Optional[float] = None, + guidance_scale: float = 5.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + negative_prompt_2: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + pooled_prompt_embeds: Optional[torch.Tensor] = None, + negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + guidance_rescale: float = 0.0, + original_size: Optional[Tuple[int, int]] = None, + crops_coords_top_left: Tuple[int, int] = (0, 0), + target_size: Optional[Tuple[int, int]] = None, + negative_original_size: Optional[Tuple[int, int]] = None, + negative_crops_coords_top_left: Tuple[int, int] = (0, 0), + negative_target_size: Optional[Tuple[int, int]] = None, + clip_skip: Optional[int] = None, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + pag_scale: float = 3.0, + pag_adaptive_scale: float = 0.0, + ): + r""" + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in both text-encoders + height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The height in pixels of the generated image. This is set to 1024 by default for the best results. + Anything below 512 pixels won't work well for + [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) + and checkpoints that are not specifically fine-tuned on low resolutions. + width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The width in pixels of the generated image. This is set to 1024 by default for the best results. + Anything below 512 pixels won't work well for + [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) + and checkpoints that are not specifically fine-tuned on low resolutions. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument + in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is + passed will be used. Must be in descending order. + sigmas (`List[float]`, *optional*): + Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in + their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed + will be used. + denoising_end (`float`, *optional*): + When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be + completed before it is intentionally prematurely terminated. As a result, the returned sample will + still retain a substantial amount of noise as determined by the discrete timesteps selected by the + scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a + "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image + Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output) + guidance_scale (`float`, *optional*, defaults to 5.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to + [`schedulers.DDIMScheduler`], will be ignored for others. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. + ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*): + Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of + IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should + contain the negative image embedding if `do_classifier_free_guidance` is set to `True`. If not + provided, embeddings are computed from the `ip_adapter_image` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] instead + of a plain tuple. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + guidance_rescale (`float`, *optional*, defaults to 0.0): + Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are + Flawed](https://arxiv.org/pdf/2305.08891.pdf) `guidance_scale` is defined as `ฯ†` in equation 16. of + [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). + Guidance rescale factor should fix overexposure when using zero terminal SNR. + original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled. + `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as + explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position + `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting + `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + For most cases, `target_size` should be set to the desired height and width of the generated image. If + not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in + section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + To negatively condition the generation process based on a specific image resolution. Part of SDXL's + micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's + micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + To negatively condition the generation process based on a target image resolution. It should be as same + as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + pag_scale (`float`, *optional*, defaults to 3.0): + The scale factor for the perturbed attention guidance. If it is set to 0.0, the perturbed attention + guidance will not be used. + pag_adaptive_scale (`float`, *optional*, defaults to 0.0): + The adaptive scale factor for the perturbed attention guidance. If it is set to 0.0, `pag_scale` is + used. + + Examples: + + Returns: + [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] or `tuple`: + [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] if `return_dict` is True, otherwise a + `tuple`. When returning a tuple, the first element is a list with the generated images. + """ + + # 0. Default height and width to unet + height = height or self.default_sample_size * self.vae_scale_factor + width = width or self.default_sample_size * self.vae_scale_factor + + original_size = original_size or (height, width) + target_size = target_size or (height, width) + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + prompt_2, + height, + width, + None, + negative_prompt, + negative_prompt_2, + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + ip_adapter_image, + ip_adapter_image_embeds, + callback_on_step_end_tensor_inputs, + ) + + self._guidance_scale = guidance_scale + self._guidance_rescale = guidance_rescale + self._clip_skip = clip_skip + self._cross_attention_kwargs = cross_attention_kwargs + self._denoising_end = denoising_end + self._interrupt = False + self._pag_scale = pag_scale + self._pag_adaptive_scale = pag_adaptive_scale + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + # 3. Encode input prompt + lora_scale = ( + self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None + ) + + ( + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + ) = self.encode_prompt( + prompt=prompt, + prompt_2=prompt_2, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=self.do_classifier_free_guidance, + negative_prompt=negative_prompt, + negative_prompt_2=negative_prompt_2, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, + lora_scale=lora_scale, + clip_skip=self.clip_skip, + ) + + # 4. Prepare timesteps + timesteps, num_inference_steps = retrieve_timesteps( + self.scheduler, num_inference_steps, device, timesteps, sigmas + ) + + # 5. Prepare latent variables + num_channels_latents = self.unet.config.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7. Prepare added time ids & embeddings + add_text_embeds = pooled_prompt_embeds + if self.text_encoder_2 is None: + text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) + else: + text_encoder_projection_dim = self.text_encoder_2.config.projection_dim + + add_time_ids = self._get_add_time_ids( + original_size, + crops_coords_top_left, + target_size, + dtype=prompt_embeds.dtype, + text_encoder_projection_dim=text_encoder_projection_dim, + ) + if negative_original_size is not None and negative_target_size is not None: + negative_add_time_ids = self._get_add_time_ids( + negative_original_size, + negative_crops_coords_top_left, + negative_target_size, + dtype=prompt_embeds.dtype, + text_encoder_projection_dim=text_encoder_projection_dim, + ) + else: + negative_add_time_ids = add_time_ids + + if self.do_perturbed_attention_guidance: + prompt_embeds = self._prepare_perturbed_attention_guidance( + prompt_embeds, negative_prompt_embeds, self.do_classifier_free_guidance + ) + add_text_embeds = self._prepare_perturbed_attention_guidance( + add_text_embeds, negative_pooled_prompt_embeds, self.do_classifier_free_guidance + ) + add_time_ids = self._prepare_perturbed_attention_guidance( + add_time_ids, negative_add_time_ids, self.do_classifier_free_guidance + ) + + elif self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) + add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) + add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0) + + prompt_embeds = prompt_embeds.to(device) + add_text_embeds = add_text_embeds.to(device) + add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1) + + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + ip_adapter_image_embeds = self.prepare_ip_adapter_image_embeds( + ip_adapter_image, + ip_adapter_image_embeds, + device, + batch_size * num_images_per_prompt, + self.do_classifier_free_guidance, + ) + + for i, image_embeds in enumerate(ip_adapter_image_embeds): + negative_image_embeds = None + if self.do_classifier_free_guidance: + negative_image_embeds, image_embeds = image_embeds.chunk(2) + + if self.do_perturbed_attention_guidance: + image_embeds = self._prepare_perturbed_attention_guidance( + image_embeds, negative_image_embeds, self.do_classifier_free_guidance + ) + elif self.do_classifier_free_guidance: + image_embeds = torch.cat([negative_image_embeds, image_embeds], dim=0) + image_embeds = image_embeds.to(device) + ip_adapter_image_embeds[i] = image_embeds + + # 8. Denoising loop + num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) + + # 8.1 Apply denoising_end + if ( + self.denoising_end is not None + and isinstance(self.denoising_end, float) + and self.denoising_end > 0 + and self.denoising_end < 1 + ): + discrete_timestep_cutoff = int( + round( + self.scheduler.config.num_train_timesteps + - (self.denoising_end * self.scheduler.config.num_train_timesteps) + ) + ) + num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps))) + timesteps = timesteps[:num_inference_steps] + + # 9. Optionally get Guidance Scale Embedding + timestep_cond = None + if self.unet.config.time_cond_proj_dim is not None: + guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) + timestep_cond = self.get_guidance_scale_embedding( + guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim + ).to(device=device, dtype=latents.dtype) + + if self.do_perturbed_attention_guidance: + original_attn_proc = self.unet.attn_processors + self._set_pag_attn_processor( + pag_applied_layers=self.pag_applied_layers, + do_classifier_free_guidance=self.do_classifier_free_guidance, + ) + + self._num_timesteps = len(timesteps) + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + if self.interrupt: + continue + + # expand the latents if we are doing classifier free guidance, perturbed-attention guidance, or both + latent_model_input = torch.cat([latents] * (prompt_embeds.shape[0] // latents.shape[0])) + + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} + if ip_adapter_image_embeds is not None: + added_cond_kwargs["image_embeds"] = ip_adapter_image_embeds + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + timestep_cond=timestep_cond, + cross_attention_kwargs=self.cross_attention_kwargs, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + # perform guidance + if self.do_perturbed_attention_guidance: + noise_pred = self._apply_perturbed_attention_guidance( + noise_pred, self.do_classifier_free_guidance, self.guidance_scale, t + ) + + elif self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) + + if self.do_classifier_free_guidance and self.guidance_rescale > 0.0: + # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf + noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale) + + # compute the previous noisy sample x_t -> x_t-1 + latents_dtype = latents.dtype + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] + if latents.dtype != latents_dtype: + if torch.backends.mps.is_available(): + # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272 + latents = latents.to(latents_dtype) + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + add_text_embeds = callback_outputs.pop("add_text_embeds", add_text_embeds) + negative_pooled_prompt_embeds = callback_outputs.pop( + "negative_pooled_prompt_embeds", negative_pooled_prompt_embeds + ) + add_time_ids = callback_outputs.pop("add_time_ids", add_time_ids) + negative_add_time_ids = callback_outputs.pop("negative_add_time_ids", negative_add_time_ids) + + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + + if XLA_AVAILABLE: + xm.mark_step() + + if not output_type == "latent": + # make sure the VAE is in float32 mode, as it overflows in float16 + needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast + + if needs_upcasting: + self.upcast_vae() + latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) + elif latents.dtype != self.vae.dtype: + if torch.backends.mps.is_available(): + # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272 + self.vae = self.vae.to(latents.dtype) + + # unscale/denormalize the latents + # denormalize with the mean and std if available and not None + has_latents_mean = hasattr(self.vae.config, "latents_mean") and self.vae.config.latents_mean is not None + has_latents_std = hasattr(self.vae.config, "latents_std") and self.vae.config.latents_std is not None + if has_latents_mean and has_latents_std: + latents_mean = ( + torch.tensor(self.vae.config.latents_mean).view(1, 4, 1, 1).to(latents.device, latents.dtype) + ) + latents_std = ( + torch.tensor(self.vae.config.latents_std).view(1, 4, 1, 1).to(latents.device, latents.dtype) + ) + latents = latents * latents_std / self.vae.config.scaling_factor + latents_mean + else: + latents = latents / self.vae.config.scaling_factor + + image = self.vae.decode(latents, return_dict=False)[0] + + # cast back to fp16 if needed + if needs_upcasting: + self.vae.to(dtype=torch.float16) + else: + image = latents + + if not output_type == "latent": + # apply watermark if available + if self.watermark is not None: + image = self.watermark.apply_watermark(image) + + image = self.image_processor.postprocess(image, output_type=output_type) + + # Offload all models + self.maybe_free_model_hooks() + + if self.do_perturbed_attention_guidance: + self.unet.set_attn_processor(original_attn_proc) + + if not return_dict: + return (image,) + + return StableDiffusionXLPipelineOutput(images=image) diff --git a/diffusers3/pipelines/pag/pipeline_pag_sd_xl_img2img.py b/diffusers3/pipelines/pag/pipeline_pag_sd_xl_img2img.py new file mode 100644 index 0000000000000000000000000000000000000000..dc85aaaca37fea9f6e8893e28ba396606880f08e --- /dev/null +++ b/diffusers3/pipelines/pag/pipeline_pag_sd_xl_img2img.py @@ -0,0 +1,1532 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import PIL.Image +import torch +from transformers import ( + CLIPImageProcessor, + CLIPTextModel, + CLIPTextModelWithProjection, + CLIPTokenizer, + CLIPVisionModelWithProjection, +) + +from ...callbacks import MultiPipelineCallbacks, PipelineCallback +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import ( + FromSingleFileMixin, + IPAdapterMixin, + StableDiffusionXLLoraLoaderMixin, + TextualInversionLoaderMixin, +) +from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel +from ...models.attention_processor import ( + AttnProcessor2_0, + XFormersAttnProcessor, +) +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import ( + USE_PEFT_BACKEND, + is_invisible_watermark_available, + is_torch_xla_available, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from ..stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput +from .pag_utils import PAGMixin + + +if is_invisible_watermark_available(): + from ..stable_diffusion_xl.watermark import StableDiffusionXLWatermarker + +if is_torch_xla_available(): + import torch_xla.core.xla_model as xm + + XLA_AVAILABLE = True +else: + XLA_AVAILABLE = False + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import AutoPipelineForImage2Image + >>> from diffusers.utils import load_image + + >>> pipe = AutoPipelineForImage2Image.from_pretrained( + ... "stabilityai/stable-diffusion-xl-refiner-1.0", + ... torch_dtype=torch.float16, + ... enable_pag=True, + ... ) + >>> pipe = pipe.to("cuda") + >>> url = "https://huggingface.co/datasets/patrickvonplaten/images/resolve/main/aa_xl/000000009.png" + + >>> init_image = load_image(url).convert("RGB") + >>> prompt = "a photo of an astronaut riding a horse on mars" + >>> image = pipe(prompt, image=init_image, pag_scale=0.3).images[0] + ``` +""" + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg +def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): + """ + Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4 + """ + std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) + std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) + # rescale the results from guidance (fixes overexposure) + noise_pred_rescaled = noise_cfg * (std_text / std_cfg) + # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images + noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg + return noise_cfg + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents +def retrieve_latents( + encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" +): + if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": + return encoder_output.latent_dist.sample(generator) + elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": + return encoder_output.latent_dist.mode() + elif hasattr(encoder_output, "latents"): + return encoder_output.latents + else: + raise AttributeError("Could not access latents of provided encoder_output") + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + sigmas: Optional[List[float]] = None, + **kwargs, +): + """ + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles + custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` + must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, + `num_inference_steps` and `sigmas` must be `None`. + sigmas (`List[float]`, *optional*): + Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, + `num_inference_steps` and `timesteps` must be `None`. + + Returns: + `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the + second element is the number of inference steps. + """ + if timesteps is not None and sigmas is not None: + raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") + if timesteps is not None: + accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + elif sigmas is not None: + accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accept_sigmas: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" sigmas schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + + +class StableDiffusionXLPAGImg2ImgPipeline( + DiffusionPipeline, + StableDiffusionMixin, + TextualInversionLoaderMixin, + FromSingleFileMixin, + StableDiffusionXLLoraLoaderMixin, + IPAdapterMixin, + PAGMixin, +): + r""" + Pipeline for text-to-image generation using Stable Diffusion XL. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files + - [`~loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`] for saving LoRA weights + - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModel`]): + Frozen text-encoder. Stable Diffusion XL uses the text portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically + the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. + text_encoder_2 ([` CLIPTextModelWithProjection`]): + Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection), + specifically the + [laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k) + variant. + tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + tokenizer_2 (`CLIPTokenizer`): + Second Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + requires_aesthetics_score (`bool`, *optional*, defaults to `"False"`): + Whether the `unet` requires an `aesthetic_score` condition to be passed during inference. Also see the + config of `stabilityai/stable-diffusion-xl-refiner-1-0`. + force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`): + Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of + `stabilityai/stable-diffusion-xl-base-1-0`. + add_watermarker (`bool`, *optional*): + Whether to use the [invisible_watermark library](https://github.com/ShieldMnt/invisible-watermark/) to + watermark output images. If not defined, it will default to True if the package is installed, otherwise no + watermarker will be used. + """ + + model_cpu_offload_seq = "text_encoder->text_encoder_2->image_encoder->unet->vae" + _optional_components = [ + "tokenizer", + "tokenizer_2", + "text_encoder", + "text_encoder_2", + "image_encoder", + "feature_extractor", + ] + _callback_tensor_inputs = [ + "latents", + "prompt_embeds", + "negative_prompt_embeds", + "add_text_embeds", + "add_time_ids", + "negative_pooled_prompt_embeds", + "add_neg_time_ids", + ] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + text_encoder_2: CLIPTextModelWithProjection, + tokenizer: CLIPTokenizer, + tokenizer_2: CLIPTokenizer, + unet: UNet2DConditionModel, + scheduler: KarrasDiffusionSchedulers, + image_encoder: CLIPVisionModelWithProjection = None, + feature_extractor: CLIPImageProcessor = None, + requires_aesthetics_score: bool = False, + force_zeros_for_empty_prompt: bool = True, + add_watermarker: Optional[bool] = None, + pag_applied_layers: Union[str, List[str]] = "mid", # ["mid"], ["down.block_1", "up.block_0.attentions_0"] + ): + super().__init__() + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + text_encoder_2=text_encoder_2, + tokenizer=tokenizer, + tokenizer_2=tokenizer_2, + unet=unet, + image_encoder=image_encoder, + feature_extractor=feature_extractor, + scheduler=scheduler, + ) + self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) + self.register_to_config(requires_aesthetics_score=requires_aesthetics_score) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + + add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available() + + if add_watermarker: + self.watermark = StableDiffusionXLWatermarker() + else: + self.watermark = None + + self.set_pag_applied_layers(pag_applied_layers) + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.encode_prompt + def encode_prompt( + self, + prompt: str, + prompt_2: Optional[str] = None, + device: Optional[torch.device] = None, + num_images_per_prompt: int = 1, + do_classifier_free_guidance: bool = True, + negative_prompt: Optional[str] = None, + negative_prompt_2: Optional[str] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + pooled_prompt_embeds: Optional[torch.Tensor] = None, + negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in both text-encoders + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + lora_scale (`float`, *optional*): + A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + device = device or self._execution_device + + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if self.text_encoder is not None: + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale) + else: + scale_lora_layers(self.text_encoder_2, lora_scale) + + prompt = [prompt] if isinstance(prompt, str) else prompt + + if prompt is not None: + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + # Define tokenizers and text encoders + tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] + text_encoders = ( + [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] + ) + + if prompt_embeds is None: + prompt_2 = prompt_2 or prompt + prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 + + # textual inversion: process multi-vector tokens if necessary + prompt_embeds_list = [] + prompts = [prompt, prompt_2] + for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders): + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, tokenizer) + + text_inputs = tokenizer( + prompt, + padding="max_length", + max_length=tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + + text_input_ids = text_inputs.input_ids + untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {tokenizer.model_max_length} tokens: {removed_text}" + ) + + prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) + + # We are only ALWAYS interested in the pooled output of the final text encoder + pooled_prompt_embeds = prompt_embeds[0] + if clip_skip is None: + prompt_embeds = prompt_embeds.hidden_states[-2] + else: + # "2" because SDXL always indexes from the penultimate layer. + prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)] + + prompt_embeds_list.append(prompt_embeds) + + prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) + + # get unconditional embeddings for classifier free guidance + zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt + if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: + negative_prompt_embeds = torch.zeros_like(prompt_embeds) + negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) + elif do_classifier_free_guidance and negative_prompt_embeds is None: + negative_prompt = negative_prompt or "" + negative_prompt_2 = negative_prompt_2 or negative_prompt + + # normalize str to list + negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt + negative_prompt_2 = ( + batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 + ) + + uncond_tokens: List[str] + if prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = [negative_prompt, negative_prompt_2] + + negative_prompt_embeds_list = [] + for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders): + if isinstance(self, TextualInversionLoaderMixin): + negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = tokenizer( + negative_prompt, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + negative_prompt_embeds = text_encoder( + uncond_input.input_ids.to(device), + output_hidden_states=True, + ) + # We are only ALWAYS interested in the pooled output of the final text encoder + negative_pooled_prompt_embeds = negative_prompt_embeds[0] + negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] + + negative_prompt_embeds_list.append(negative_prompt_embeds) + + negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) + + if self.text_encoder_2 is not None: + prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) + else: + prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + if self.text_encoder_2 is not None: + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) + else: + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed * num_images_per_prompt, -1 + ) + if do_classifier_free_guidance: + negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed * num_images_per_prompt, -1 + ) + + if self.text_encoder is not None: + if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder_2, lora_scale) + + return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_img2img.StableDiffusionXLImg2ImgPipeline.check_inputs + def check_inputs( + self, + prompt, + prompt_2, + strength, + num_inference_steps, + callback_steps, + negative_prompt=None, + negative_prompt_2=None, + prompt_embeds=None, + negative_prompt_embeds=None, + ip_adapter_image=None, + ip_adapter_image_embeds=None, + callback_on_step_end_tensor_inputs=None, + ): + if strength < 0 or strength > 1: + raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}") + if num_inference_steps is None: + raise ValueError("`num_inference_steps` cannot be None.") + elif not isinstance(num_inference_steps, int) or num_inference_steps <= 0: + raise ValueError( + f"`num_inference_steps` has to be a positive integer but is {num_inference_steps} of type" + f" {type(num_inference_steps)}." + ) + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt_2 is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)): + raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + elif negative_prompt_2 is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if ip_adapter_image is not None and ip_adapter_image_embeds is not None: + raise ValueError( + "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined." + ) + + if ip_adapter_image_embeds is not None: + if not isinstance(ip_adapter_image_embeds, list): + raise ValueError( + f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}" + ) + elif ip_adapter_image_embeds[0].ndim not in [3, 4]: + raise ValueError( + f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D" + ) + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_img2img.StableDiffusionXLImg2ImgPipeline.get_timesteps + def get_timesteps(self, num_inference_steps, strength, device, denoising_start=None): + # get the original timestep using init_timestep + if denoising_start is None: + init_timestep = min(int(num_inference_steps * strength), num_inference_steps) + t_start = max(num_inference_steps - init_timestep, 0) + + timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] + if hasattr(self.scheduler, "set_begin_index"): + self.scheduler.set_begin_index(t_start * self.scheduler.order) + + return timesteps, num_inference_steps - t_start + + else: + # Strength is irrelevant if we directly request a timestep to start at; + # that is, strength is determined by the denoising_start instead. + discrete_timestep_cutoff = int( + round( + self.scheduler.config.num_train_timesteps + - (denoising_start * self.scheduler.config.num_train_timesteps) + ) + ) + + num_inference_steps = (self.scheduler.timesteps < discrete_timestep_cutoff).sum().item() + if self.scheduler.order == 2 and num_inference_steps % 2 == 0: + # if the scheduler is a 2nd order scheduler we might have to do +1 + # because `num_inference_steps` might be even given that every timestep + # (except the highest one) is duplicated. If `num_inference_steps` is even it would + # mean that we cut the timesteps in the middle of the denoising step + # (between 1st and 2nd derivative) which leads to incorrect results. By adding 1 + # we ensure that the denoising process always ends after the 2nd derivate step of the scheduler + num_inference_steps = num_inference_steps + 1 + + # because t_n+1 >= t_n, we slice the timesteps starting from the end + t_start = len(self.scheduler.timesteps) - num_inference_steps + timesteps = self.scheduler.timesteps[t_start:] + if hasattr(self.scheduler, "set_begin_index"): + self.scheduler.set_begin_index(t_start) + return timesteps, num_inference_steps + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_img2img.StableDiffusionXLImg2ImgPipeline.prepare_latents + def prepare_latents( + self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None, add_noise=True + ): + if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): + raise ValueError( + f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" + ) + + latents_mean = latents_std = None + if hasattr(self.vae.config, "latents_mean") and self.vae.config.latents_mean is not None: + latents_mean = torch.tensor(self.vae.config.latents_mean).view(1, 4, 1, 1) + if hasattr(self.vae.config, "latents_std") and self.vae.config.latents_std is not None: + latents_std = torch.tensor(self.vae.config.latents_std).view(1, 4, 1, 1) + + # Offload text encoder if `enable_model_cpu_offload` was enabled + if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: + self.text_encoder_2.to("cpu") + torch.cuda.empty_cache() + + image = image.to(device=device, dtype=dtype) + + batch_size = batch_size * num_images_per_prompt + + if image.shape[1] == 4: + init_latents = image + + else: + # make sure the VAE is in float32 mode, as it overflows in float16 + if self.vae.config.force_upcast: + image = image.float() + self.vae.to(dtype=torch.float32) + + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + elif isinstance(generator, list): + if image.shape[0] < batch_size and batch_size % image.shape[0] == 0: + image = torch.cat([image] * (batch_size // image.shape[0]), dim=0) + elif image.shape[0] < batch_size and batch_size % image.shape[0] != 0: + raise ValueError( + f"Cannot duplicate `image` of batch size {image.shape[0]} to effective batch_size {batch_size} " + ) + + init_latents = [ + retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i]) + for i in range(batch_size) + ] + init_latents = torch.cat(init_latents, dim=0) + else: + init_latents = retrieve_latents(self.vae.encode(image), generator=generator) + + if self.vae.config.force_upcast: + self.vae.to(dtype) + + init_latents = init_latents.to(dtype) + if latents_mean is not None and latents_std is not None: + latents_mean = latents_mean.to(device=device, dtype=dtype) + latents_std = latents_std.to(device=device, dtype=dtype) + init_latents = (init_latents - latents_mean) * self.vae.config.scaling_factor / latents_std + else: + init_latents = self.vae.config.scaling_factor * init_latents + + if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0: + # expand init_latents for batch_size + additional_image_per_prompt = batch_size // init_latents.shape[0] + init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0) + elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0: + raise ValueError( + f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts." + ) + else: + init_latents = torch.cat([init_latents], dim=0) + + if add_noise: + shape = init_latents.shape + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + # get latents + init_latents = self.scheduler.add_noise(init_latents, noise, timestep) + + latents = init_latents + + return latents + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image + def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + if output_hidden_states: + image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] + image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_enc_hidden_states = self.image_encoder( + torch.zeros_like(image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( + num_images_per_prompt, dim=0 + ) + return image_enc_hidden_states, uncond_image_enc_hidden_states + else: + image_embeds = self.image_encoder(image).image_embeds + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_embeds = torch.zeros_like(image_embeds) + + return image_embeds, uncond_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds + def prepare_ip_adapter_image_embeds( + self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance + ): + image_embeds = [] + if do_classifier_free_guidance: + negative_image_embeds = [] + if ip_adapter_image_embeds is None: + if not isinstance(ip_adapter_image, list): + ip_adapter_image = [ip_adapter_image] + + if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): + raise ValueError( + f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." + ) + + for single_ip_adapter_image, image_proj_layer in zip( + ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers + ): + output_hidden_state = not isinstance(image_proj_layer, ImageProjection) + single_image_embeds, single_negative_image_embeds = self.encode_image( + single_ip_adapter_image, device, 1, output_hidden_state + ) + + image_embeds.append(single_image_embeds[None, :]) + if do_classifier_free_guidance: + negative_image_embeds.append(single_negative_image_embeds[None, :]) + else: + for single_image_embeds in ip_adapter_image_embeds: + if do_classifier_free_guidance: + single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2) + negative_image_embeds.append(single_negative_image_embeds) + image_embeds.append(single_image_embeds) + + ip_adapter_image_embeds = [] + for i, single_image_embeds in enumerate(image_embeds): + single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) + if do_classifier_free_guidance: + single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) + + single_image_embeds = single_image_embeds.to(device=device) + ip_adapter_image_embeds.append(single_image_embeds) + + return ip_adapter_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_img2img.StableDiffusionXLImg2ImgPipeline._get_add_time_ids + def _get_add_time_ids( + self, + original_size, + crops_coords_top_left, + target_size, + aesthetic_score, + negative_aesthetic_score, + negative_original_size, + negative_crops_coords_top_left, + negative_target_size, + dtype, + text_encoder_projection_dim=None, + ): + if self.config.requires_aesthetics_score: + add_time_ids = list(original_size + crops_coords_top_left + (aesthetic_score,)) + add_neg_time_ids = list( + negative_original_size + negative_crops_coords_top_left + (negative_aesthetic_score,) + ) + else: + add_time_ids = list(original_size + crops_coords_top_left + target_size) + add_neg_time_ids = list(negative_original_size + crops_coords_top_left + negative_target_size) + + passed_add_embed_dim = ( + self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim + ) + expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features + + if ( + expected_add_embed_dim > passed_add_embed_dim + and (expected_add_embed_dim - passed_add_embed_dim) == self.unet.config.addition_time_embed_dim + ): + raise ValueError( + f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to enable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=True)` to make sure `aesthetic_score` {aesthetic_score} and `negative_aesthetic_score` {negative_aesthetic_score} is correctly used by the model." + ) + elif ( + expected_add_embed_dim < passed_add_embed_dim + and (passed_add_embed_dim - expected_add_embed_dim) == self.unet.config.addition_time_embed_dim + ): + raise ValueError( + f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to disable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=False)` to make sure `target_size` {target_size} is correctly used by the model." + ) + elif expected_add_embed_dim != passed_add_embed_dim: + raise ValueError( + f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`." + ) + + add_time_ids = torch.tensor([add_time_ids], dtype=dtype) + add_neg_time_ids = torch.tensor([add_neg_time_ids], dtype=dtype) + + return add_time_ids, add_neg_time_ids + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae + def upcast_vae(self): + dtype = self.vae.dtype + self.vae.to(dtype=torch.float32) + use_torch_2_0_or_xformers = isinstance( + self.vae.decoder.mid_block.attentions[0].processor, + ( + AttnProcessor2_0, + XFormersAttnProcessor, + ), + ) + # if xformers or torch_2_0 is used attention block does not need + # to be in float32 which can save lots of memory + if use_torch_2_0_or_xformers: + self.vae.post_quant_conv.to(dtype) + self.vae.decoder.conv_in.to(dtype) + self.vae.decoder.mid_block.to(dtype) + + # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding + def get_guidance_scale_embedding( + self, w: torch.Tensor, embedding_dim: int = 512, dtype: torch.dtype = torch.float32 + ) -> torch.Tensor: + """ + See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 + + Args: + w (`torch.Tensor`): + Generate embedding vectors with a specified guidance scale to subsequently enrich timestep embeddings. + embedding_dim (`int`, *optional*, defaults to 512): + Dimension of the embeddings to generate. + dtype (`torch.dtype`, *optional*, defaults to `torch.float32`): + Data type of the generated embeddings. + + Returns: + `torch.Tensor`: Embedding vectors with shape `(len(w), embedding_dim)`. + """ + assert len(w.shape) == 1 + w = w * 1000.0 + + half_dim = embedding_dim // 2 + emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) + emb = w.to(dtype)[:, None] * emb[None, :] + emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) + if embedding_dim % 2 == 1: # zero pad + emb = torch.nn.functional.pad(emb, (0, 1)) + assert emb.shape == (w.shape[0], embedding_dim) + return emb + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def guidance_rescale(self): + return self._guidance_rescale + + @property + def clip_skip(self): + return self._clip_skip + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None + + @property + def cross_attention_kwargs(self): + return self._cross_attention_kwargs + + @property + def denoising_end(self): + return self._denoising_end + + @property + def denoising_start(self): + return self._denoising_start + + @property + def num_timesteps(self): + return self._num_timesteps + + @property + def interrupt(self): + return self._interrupt + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + prompt_2: Optional[Union[str, List[str]]] = None, + image: PipelineImageInput = None, + strength: float = 0.3, + num_inference_steps: int = 50, + timesteps: List[int] = None, + sigmas: List[float] = None, + denoising_start: Optional[float] = None, + denoising_end: Optional[float] = None, + guidance_scale: float = 5.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + negative_prompt_2: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + pooled_prompt_embeds: Optional[torch.Tensor] = None, + negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + guidance_rescale: float = 0.0, + original_size: Tuple[int, int] = None, + crops_coords_top_left: Tuple[int, int] = (0, 0), + target_size: Tuple[int, int] = None, + negative_original_size: Optional[Tuple[int, int]] = None, + negative_crops_coords_top_left: Tuple[int, int] = (0, 0), + negative_target_size: Optional[Tuple[int, int]] = None, + aesthetic_score: float = 6.0, + negative_aesthetic_score: float = 2.5, + clip_skip: Optional[int] = None, + callback_on_step_end: Optional[ + Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks] + ] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + pag_scale: float = 3.0, + pag_adaptive_scale: float = 0.0, + ): + r""" + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in both text-encoders + image (`torch.Tensor` or `PIL.Image.Image` or `np.ndarray` or `List[torch.Tensor]` or `List[PIL.Image.Image]` or `List[np.ndarray]`): + The image(s) to modify with the pipeline. + strength (`float`, *optional*, defaults to 0.3): + Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image` + will be used as a starting point, adding more noise to it the larger the `strength`. The number of + denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will + be maximum and the denoising process will run for the full number of iterations specified in + `num_inference_steps`. A value of 1, therefore, essentially ignores `image`. Note that in the case of + `denoising_start` being declared as an integer, the value of `strength` will be ignored. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument + in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is + passed will be used. Must be in descending order. + sigmas (`List[float]`, *optional*): + Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in + their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed + will be used. + denoising_start (`float`, *optional*): + When specified, indicates the fraction (between 0.0 and 1.0) of the total denoising process to be + bypassed before it is initiated. Consequently, the initial part of the denoising process is skipped and + it is assumed that the passed `image` is a partly denoised image. Note that when this is specified, + strength will be ignored. The `denoising_start` parameter is particularly beneficial when this pipeline + is integrated into a "Mixture of Denoisers" multi-pipeline setup, as detailed in [**Refine Image + Quality**](https://huggingface.co/docs/diffusers/using-diffusers/sdxl#refine-image-quality). + denoising_end (`float`, *optional*): + When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be + completed before it is intentionally prematurely terminated. As a result, the returned sample will + still retain a substantial amount of noise (ca. final 20% of timesteps still needed) and should be + denoised by a successor pipeline that has `denoising_start` set to 0.8 so that it only denoises the + final 20% of the scheduler. The denoising_end parameter should ideally be utilized when this pipeline + forms a part of a "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refine Image + Quality**](https://huggingface.co/docs/diffusers/using-diffusers/sdxl#refine-image-quality). + guidance_scale (`float`, *optional*, defaults to 7.5): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to + [`schedulers.DDIMScheduler`], will be ignored for others. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. + ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*): + Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of + IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should + contain the negative image embedding if `do_classifier_free_guidance` is set to `True`. If not + provided, embeddings are computed from the `ip_adapter_image` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionXLPipelineOutput`] instead of a + plain tuple. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + guidance_rescale (`float`, *optional*, defaults to 0.0): + Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are + Flawed](https://arxiv.org/pdf/2305.08891.pdf) `guidance_scale` is defined as `ฯ†` in equation 16. of + [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). + Guidance rescale factor should fix overexposure when using zero terminal SNR. + original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled. + `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as + explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position + `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting + `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + For most cases, `target_size` should be set to the desired height and width of the generated image. If + not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in + section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + To negatively condition the generation process based on a specific image resolution. Part of SDXL's + micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's + micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + To negatively condition the generation process based on a target image resolution. It should be as same + as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + aesthetic_score (`float`, *optional*, defaults to 6.0): + Used to simulate an aesthetic score of the generated image by influencing the positive text condition. + Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + negative_aesthetic_score (`float`, *optional*, defaults to 2.5): + Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). Can be used to + simulate an aesthetic score of the generated image by influencing the negative text condition. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*): + A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of + each denoising step during the inference. with the following arguments: `callback_on_step_end(self: + DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a + list of all tensors as specified by `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + pag_scale (`float`, *optional*, defaults to 3.0): + The scale factor for the perturbed attention guidance. If it is set to 0.0, the perturbed attention + guidance will not be used. + pag_adaptive_scale (`float`, *optional*, defaults to 0.0): + The adaptive scale factor for the perturbed attention guidance. If it is set to 0.0, `pag_scale` is + used. + + + Examples: + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionXLPipelineOutput`] or `tuple`: + [`~pipelines.stable_diffusion.StableDiffusionXLPipelineOutput`] if `return_dict` is True, otherwise a + `tuple. When returning a tuple, the first element is a list with the generated images. + """ + + if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): + callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + prompt_2, + strength, + num_inference_steps, + None, + negative_prompt, + negative_prompt_2, + prompt_embeds, + negative_prompt_embeds, + ip_adapter_image, + ip_adapter_image_embeds, + callback_on_step_end_tensor_inputs, + ) + + self._guidance_scale = guidance_scale + self._guidance_rescale = guidance_rescale + self._clip_skip = clip_skip + self._cross_attention_kwargs = cross_attention_kwargs + self._denoising_end = denoising_end + self._denoising_start = denoising_start + self._interrupt = False + self._pag_scale = pag_scale + self._pag_adaptive_scale = pag_adaptive_scale + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + # 3. Encode input prompt + text_encoder_lora_scale = ( + self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None + ) + ( + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + ) = self.encode_prompt( + prompt=prompt, + prompt_2=prompt_2, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=self.do_classifier_free_guidance, + negative_prompt=negative_prompt, + negative_prompt_2=negative_prompt_2, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, + lora_scale=text_encoder_lora_scale, + clip_skip=self.clip_skip, + ) + + # 4. Preprocess image + image = self.image_processor.preprocess(image) + + # 5. Prepare timesteps + def denoising_value_valid(dnv): + return isinstance(dnv, float) and 0 < dnv < 1 + + timesteps, num_inference_steps = retrieve_timesteps( + self.scheduler, num_inference_steps, device, timesteps, sigmas + ) + timesteps, num_inference_steps = self.get_timesteps( + num_inference_steps, + strength, + device, + denoising_start=self.denoising_start if denoising_value_valid(self.denoising_start) else None, + ) + latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) + + add_noise = True if self.denoising_start is None else False + + # 6. Prepare latent variables + if latents is None: + latents = self.prepare_latents( + image, + latent_timestep, + batch_size, + num_images_per_prompt, + prompt_embeds.dtype, + device, + generator, + add_noise, + ) + # 7. Prepare extra step kwargs. + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + height, width = latents.shape[-2:] + height = height * self.vae_scale_factor + width = width * self.vae_scale_factor + + original_size = original_size or (height, width) + target_size = target_size or (height, width) + + # 8. Prepare added time ids & embeddings + if negative_original_size is None: + negative_original_size = original_size + if negative_target_size is None: + negative_target_size = target_size + + add_text_embeds = pooled_prompt_embeds + if self.text_encoder_2 is None: + text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) + else: + text_encoder_projection_dim = self.text_encoder_2.config.projection_dim + + add_time_ids, add_neg_time_ids = self._get_add_time_ids( + original_size, + crops_coords_top_left, + target_size, + aesthetic_score, + negative_aesthetic_score, + negative_original_size, + negative_crops_coords_top_left, + negative_target_size, + dtype=prompt_embeds.dtype, + text_encoder_projection_dim=text_encoder_projection_dim, + ) + add_time_ids = add_time_ids.repeat(batch_size * num_images_per_prompt, 1) + add_neg_time_ids = add_neg_time_ids.repeat(batch_size * num_images_per_prompt, 1) + + if self.do_perturbed_attention_guidance: + prompt_embeds = self._prepare_perturbed_attention_guidance( + prompt_embeds, negative_prompt_embeds, self.do_classifier_free_guidance + ) + add_text_embeds = self._prepare_perturbed_attention_guidance( + add_text_embeds, negative_pooled_prompt_embeds, self.do_classifier_free_guidance + ) + add_time_ids = self._prepare_perturbed_attention_guidance( + add_time_ids, add_neg_time_ids, self.do_classifier_free_guidance + ) + elif self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) + add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) + add_time_ids = torch.cat([add_neg_time_ids, add_time_ids], dim=0) + + prompt_embeds = prompt_embeds.to(device) + add_text_embeds = add_text_embeds.to(device) + add_time_ids = add_time_ids.to(device) + + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + ip_adapter_image_embeds = self.prepare_ip_adapter_image_embeds( + ip_adapter_image, + ip_adapter_image_embeds, + device, + batch_size * num_images_per_prompt, + self.do_classifier_free_guidance, + ) + for i, image_embeds in enumerate(ip_adapter_image_embeds): + negative_image_embeds = None + if self.do_classifier_free_guidance: + negative_image_embeds, image_embeds = image_embeds.chunk(2) + + if self.do_perturbed_attention_guidance: + image_embeds = self._prepare_perturbed_attention_guidance( + image_embeds, negative_image_embeds, self.do_classifier_free_guidance + ) + elif self.do_classifier_free_guidance: + image_embeds = torch.cat([negative_image_embeds, image_embeds], dim=0) + image_embeds = image_embeds.to(device) + ip_adapter_image_embeds[i] = image_embeds + + # 9. Denoising loop + num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) + + # 9.1 Apply denoising_end + if ( + self.denoising_end is not None + and self.denoising_start is not None + and denoising_value_valid(self.denoising_end) + and denoising_value_valid(self.denoising_start) + and self.denoising_start >= self.denoising_end + ): + raise ValueError( + f"`denoising_start`: {self.denoising_start} cannot be larger than or equal to `denoising_end`: " + + f" {self.denoising_end} when using type float." + ) + elif self.denoising_end is not None and denoising_value_valid(self.denoising_end): + discrete_timestep_cutoff = int( + round( + self.scheduler.config.num_train_timesteps + - (self.denoising_end * self.scheduler.config.num_train_timesteps) + ) + ) + num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps))) + timesteps = timesteps[:num_inference_steps] + + # 9.2 Optionally get Guidance Scale Embedding + timestep_cond = None + if self.unet.config.time_cond_proj_dim is not None: + guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) + timestep_cond = self.get_guidance_scale_embedding( + guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim + ).to(device=device, dtype=latents.dtype) + + if self.do_perturbed_attention_guidance: + original_attn_proc = self.unet.attn_processors + self._set_pag_attn_processor( + pag_applied_layers=self.pag_applied_layers, + do_classifier_free_guidance=self.do_classifier_free_guidance, + ) + + self._num_timesteps = len(timesteps) + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + if self.interrupt: + continue + + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * (prompt_embeds.shape[0] // latents.shape[0])) + + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} + if ip_adapter_image_embeds is not None: + added_cond_kwargs["image_embeds"] = ip_adapter_image_embeds + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + timestep_cond=timestep_cond, + cross_attention_kwargs=self.cross_attention_kwargs, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + # perform guidance + if self.do_perturbed_attention_guidance: + noise_pred = self._apply_perturbed_attention_guidance( + noise_pred, self.do_classifier_free_guidance, self.guidance_scale, t + ) + elif self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) + + if self.do_classifier_free_guidance and self.guidance_rescale > 0.0: + # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf + noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale) + + # compute the previous noisy sample x_t -> x_t-1 + latents_dtype = latents.dtype + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] + if latents.dtype != latents_dtype: + if torch.backends.mps.is_available(): + # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272 + latents = latents.to(latents_dtype) + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + add_text_embeds = callback_outputs.pop("add_text_embeds", add_text_embeds) + negative_pooled_prompt_embeds = callback_outputs.pop( + "negative_pooled_prompt_embeds", negative_pooled_prompt_embeds + ) + add_time_ids = callback_outputs.pop("add_time_ids", add_time_ids) + add_neg_time_ids = callback_outputs.pop("add_neg_time_ids", add_neg_time_ids) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + + if XLA_AVAILABLE: + xm.mark_step() + + if not output_type == "latent": + # make sure the VAE is in float32 mode, as it overflows in float16 + needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast + + if needs_upcasting: + self.upcast_vae() + latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) + elif latents.dtype != self.vae.dtype: + if torch.backends.mps.is_available(): + # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272 + self.vae = self.vae.to(latents.dtype) + + # unscale/denormalize the latents + # denormalize with the mean and std if available and not None + has_latents_mean = hasattr(self.vae.config, "latents_mean") and self.vae.config.latents_mean is not None + has_latents_std = hasattr(self.vae.config, "latents_std") and self.vae.config.latents_std is not None + if has_latents_mean and has_latents_std: + latents_mean = ( + torch.tensor(self.vae.config.latents_mean).view(1, 4, 1, 1).to(latents.device, latents.dtype) + ) + latents_std = ( + torch.tensor(self.vae.config.latents_std).view(1, 4, 1, 1).to(latents.device, latents.dtype) + ) + latents = latents * latents_std / self.vae.config.scaling_factor + latents_mean + else: + latents = latents / self.vae.config.scaling_factor + + image = self.vae.decode(latents, return_dict=False)[0] + + # cast back to fp16 if needed + if needs_upcasting: + self.vae.to(dtype=torch.float16) + else: + image = latents + + # apply watermark if available + if self.watermark is not None: + image = self.watermark.apply_watermark(image) + + image = self.image_processor.postprocess(image, output_type=output_type) + + # Offload all models + self.maybe_free_model_hooks() + + if self.do_perturbed_attention_guidance: + self.unet.set_attn_processor(original_attn_proc) + + if not return_dict: + return (image,) + + return StableDiffusionXLPipelineOutput(images=image) diff --git a/diffusers3/pipelines/pag/pipeline_pag_sd_xl_inpaint.py b/diffusers3/pipelines/pag/pipeline_pag_sd_xl_inpaint.py new file mode 100644 index 0000000000000000000000000000000000000000..f5ebf4300934c4115cd23a49b723f1fc55f23849 --- /dev/null +++ b/diffusers3/pipelines/pag/pipeline_pag_sd_xl_inpaint.py @@ -0,0 +1,1764 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import PIL.Image +import torch +from transformers import ( + CLIPImageProcessor, + CLIPTextModel, + CLIPTextModelWithProjection, + CLIPTokenizer, + CLIPVisionModelWithProjection, +) + +from ...callbacks import MultiPipelineCallbacks, PipelineCallback +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import ( + FromSingleFileMixin, + IPAdapterMixin, + StableDiffusionXLLoraLoaderMixin, + TextualInversionLoaderMixin, +) +from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel +from ...models.attention_processor import ( + AttnProcessor2_0, + XFormersAttnProcessor, +) +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import ( + USE_PEFT_BACKEND, + is_invisible_watermark_available, + is_torch_xla_available, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from ..stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput +from .pag_utils import PAGMixin + + +if is_invisible_watermark_available(): + from ..stable_diffusion_xl.watermark import StableDiffusionXLWatermarker + +if is_torch_xla_available(): + import torch_xla.core.xla_model as xm + + XLA_AVAILABLE = True +else: + XLA_AVAILABLE = False + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import AutoPipelineForInpainting + >>> from diffusers.utils import load_image + + >>> pipe = AutoPipelineForInpainting.from_pretrained( + ... "stabilityai/stable-diffusion-xl-base-1.0", + ... torch_dtype=torch.float16, + ... variant="fp16", + ... enable_pag=True, + ... ) + >>> pipe.to("cuda") + + >>> img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png" + >>> mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png" + + >>> init_image = load_image(img_url).convert("RGB") + >>> mask_image = load_image(mask_url).convert("RGB") + + >>> prompt = "A majestic tiger sitting on a bench" + >>> image = pipe( + ... prompt=prompt, + ... image=init_image, + ... mask_image=mask_image, + ... num_inference_steps=50, + ... strength=0.80, + ... pag_scale=0.3, + ... ).images[0] + ``` +""" + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg +def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): + """ + Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4 + """ + std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) + std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) + # rescale the results from guidance (fixes overexposure) + noise_pred_rescaled = noise_cfg * (std_text / std_cfg) + # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images + noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg + return noise_cfg + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents +def retrieve_latents( + encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" +): + if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": + return encoder_output.latent_dist.sample(generator) + elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": + return encoder_output.latent_dist.mode() + elif hasattr(encoder_output, "latents"): + return encoder_output.latents + else: + raise AttributeError("Could not access latents of provided encoder_output") + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + sigmas: Optional[List[float]] = None, + **kwargs, +): + """ + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles + custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` + must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, + `num_inference_steps` and `sigmas` must be `None`. + sigmas (`List[float]`, *optional*): + Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, + `num_inference_steps` and `timesteps` must be `None`. + + Returns: + `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the + second element is the number of inference steps. + """ + if timesteps is not None and sigmas is not None: + raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") + if timesteps is not None: + accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + elif sigmas is not None: + accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accept_sigmas: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" sigmas schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + + +class StableDiffusionXLPAGInpaintPipeline( + DiffusionPipeline, + StableDiffusionMixin, + TextualInversionLoaderMixin, + StableDiffusionXLLoraLoaderMixin, + FromSingleFileMixin, + IPAdapterMixin, + PAGMixin, +): + r""" + Pipeline for text-to-image generation using Stable Diffusion XL. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files + - [`~loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`] for saving LoRA weights + - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModel`]): + Frozen text-encoder. Stable Diffusion XL uses the text portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically + the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. + text_encoder_2 ([` CLIPTextModelWithProjection`]): + Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection), + specifically the + [laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k) + variant. + tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + tokenizer_2 (`CLIPTokenizer`): + Second Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + requires_aesthetics_score (`bool`, *optional*, defaults to `"False"`): + Whether the `unet` requires a aesthetic_score condition to be passed during inference. Also see the config + of `stabilityai/stable-diffusion-xl-refiner-1-0`. + force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`): + Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of + `stabilityai/stable-diffusion-xl-base-1-0`. + add_watermarker (`bool`, *optional*): + Whether to use the [invisible_watermark library](https://github.com/ShieldMnt/invisible-watermark/) to + watermark output images. If not defined, it will default to True if the package is installed, otherwise no + watermarker will be used. + """ + + model_cpu_offload_seq = "text_encoder->text_encoder_2->image_encoder->unet->vae" + + _optional_components = [ + "tokenizer", + "tokenizer_2", + "text_encoder", + "text_encoder_2", + "image_encoder", + "feature_extractor", + ] + _callback_tensor_inputs = [ + "latents", + "prompt_embeds", + "negative_prompt_embeds", + "add_text_embeds", + "add_time_ids", + "negative_pooled_prompt_embeds", + "add_neg_time_ids", + "mask", + "masked_image_latents", + ] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + text_encoder_2: CLIPTextModelWithProjection, + tokenizer: CLIPTokenizer, + tokenizer_2: CLIPTokenizer, + unet: UNet2DConditionModel, + scheduler: KarrasDiffusionSchedulers, + image_encoder: CLIPVisionModelWithProjection = None, + feature_extractor: CLIPImageProcessor = None, + requires_aesthetics_score: bool = False, + force_zeros_for_empty_prompt: bool = True, + add_watermarker: Optional[bool] = None, + pag_applied_layers: Union[str, List[str]] = "mid", # ["mid"], ["down.block_1", "up.block_0.attentions_0"] + ): + super().__init__() + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + text_encoder_2=text_encoder_2, + tokenizer=tokenizer, + tokenizer_2=tokenizer_2, + unet=unet, + image_encoder=image_encoder, + feature_extractor=feature_extractor, + scheduler=scheduler, + ) + self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) + self.register_to_config(requires_aesthetics_score=requires_aesthetics_score) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + self.mask_processor = VaeImageProcessor( + vae_scale_factor=self.vae_scale_factor, do_normalize=False, do_binarize=True, do_convert_grayscale=True + ) + + add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available() + + if add_watermarker: + self.watermark = StableDiffusionXLWatermarker() + else: + self.watermark = None + + self.set_pag_applied_layers(pag_applied_layers) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image + def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + if output_hidden_states: + image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] + image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_enc_hidden_states = self.image_encoder( + torch.zeros_like(image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( + num_images_per_prompt, dim=0 + ) + return image_enc_hidden_states, uncond_image_enc_hidden_states + else: + image_embeds = self.image_encoder(image).image_embeds + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_embeds = torch.zeros_like(image_embeds) + + return image_embeds, uncond_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds + def prepare_ip_adapter_image_embeds( + self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance + ): + image_embeds = [] + if do_classifier_free_guidance: + negative_image_embeds = [] + if ip_adapter_image_embeds is None: + if not isinstance(ip_adapter_image, list): + ip_adapter_image = [ip_adapter_image] + + if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): + raise ValueError( + f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." + ) + + for single_ip_adapter_image, image_proj_layer in zip( + ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers + ): + output_hidden_state = not isinstance(image_proj_layer, ImageProjection) + single_image_embeds, single_negative_image_embeds = self.encode_image( + single_ip_adapter_image, device, 1, output_hidden_state + ) + + image_embeds.append(single_image_embeds[None, :]) + if do_classifier_free_guidance: + negative_image_embeds.append(single_negative_image_embeds[None, :]) + else: + for single_image_embeds in ip_adapter_image_embeds: + if do_classifier_free_guidance: + single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2) + negative_image_embeds.append(single_negative_image_embeds) + image_embeds.append(single_image_embeds) + + ip_adapter_image_embeds = [] + for i, single_image_embeds in enumerate(image_embeds): + single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) + if do_classifier_free_guidance: + single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) + + single_image_embeds = single_image_embeds.to(device=device) + ip_adapter_image_embeds.append(single_image_embeds) + + return ip_adapter_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.encode_prompt + def encode_prompt( + self, + prompt: str, + prompt_2: Optional[str] = None, + device: Optional[torch.device] = None, + num_images_per_prompt: int = 1, + do_classifier_free_guidance: bool = True, + negative_prompt: Optional[str] = None, + negative_prompt_2: Optional[str] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + pooled_prompt_embeds: Optional[torch.Tensor] = None, + negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in both text-encoders + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + lora_scale (`float`, *optional*): + A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + device = device or self._execution_device + + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if self.text_encoder is not None: + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale) + else: + scale_lora_layers(self.text_encoder_2, lora_scale) + + prompt = [prompt] if isinstance(prompt, str) else prompt + + if prompt is not None: + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + # Define tokenizers and text encoders + tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] + text_encoders = ( + [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] + ) + + if prompt_embeds is None: + prompt_2 = prompt_2 or prompt + prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 + + # textual inversion: process multi-vector tokens if necessary + prompt_embeds_list = [] + prompts = [prompt, prompt_2] + for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders): + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, tokenizer) + + text_inputs = tokenizer( + prompt, + padding="max_length", + max_length=tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + + text_input_ids = text_inputs.input_ids + untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {tokenizer.model_max_length} tokens: {removed_text}" + ) + + prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) + + # We are only ALWAYS interested in the pooled output of the final text encoder + pooled_prompt_embeds = prompt_embeds[0] + if clip_skip is None: + prompt_embeds = prompt_embeds.hidden_states[-2] + else: + # "2" because SDXL always indexes from the penultimate layer. + prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)] + + prompt_embeds_list.append(prompt_embeds) + + prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) + + # get unconditional embeddings for classifier free guidance + zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt + if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: + negative_prompt_embeds = torch.zeros_like(prompt_embeds) + negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) + elif do_classifier_free_guidance and negative_prompt_embeds is None: + negative_prompt = negative_prompt or "" + negative_prompt_2 = negative_prompt_2 or negative_prompt + + # normalize str to list + negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt + negative_prompt_2 = ( + batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 + ) + + uncond_tokens: List[str] + if prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = [negative_prompt, negative_prompt_2] + + negative_prompt_embeds_list = [] + for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders): + if isinstance(self, TextualInversionLoaderMixin): + negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = tokenizer( + negative_prompt, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + negative_prompt_embeds = text_encoder( + uncond_input.input_ids.to(device), + output_hidden_states=True, + ) + # We are only ALWAYS interested in the pooled output of the final text encoder + negative_pooled_prompt_embeds = negative_prompt_embeds[0] + negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] + + negative_prompt_embeds_list.append(negative_prompt_embeds) + + negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) + + if self.text_encoder_2 is not None: + prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) + else: + prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + if self.text_encoder_2 is not None: + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) + else: + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed * num_images_per_prompt, -1 + ) + if do_classifier_free_guidance: + negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed * num_images_per_prompt, -1 + ) + + if self.text_encoder is not None: + if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder_2, lora_scale) + + return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_inpaint.StableDiffusionXLInpaintPipeline.check_inputs + def check_inputs( + self, + prompt, + prompt_2, + image, + mask_image, + height, + width, + strength, + callback_steps, + output_type, + negative_prompt=None, + negative_prompt_2=None, + prompt_embeds=None, + negative_prompt_embeds=None, + ip_adapter_image=None, + ip_adapter_image_embeds=None, + callback_on_step_end_tensor_inputs=None, + padding_mask_crop=None, + ): + if strength < 0 or strength > 1: + raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}") + + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt_2 is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)): + raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + elif negative_prompt_2 is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + if padding_mask_crop is not None: + if not isinstance(image, PIL.Image.Image): + raise ValueError( + f"The image should be a PIL image when inpainting mask crop, but is of type" f" {type(image)}." + ) + if not isinstance(mask_image, PIL.Image.Image): + raise ValueError( + f"The mask image should be a PIL image when inpainting mask crop, but is of type" + f" {type(mask_image)}." + ) + if output_type != "pil": + raise ValueError(f"The output type should be PIL when inpainting mask crop, but is" f" {output_type}.") + + if ip_adapter_image is not None and ip_adapter_image_embeds is not None: + raise ValueError( + "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined." + ) + + if ip_adapter_image_embeds is not None: + if not isinstance(ip_adapter_image_embeds, list): + raise ValueError( + f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}" + ) + elif ip_adapter_image_embeds[0].ndim not in [3, 4]: + raise ValueError( + f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D" + ) + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_inpaint.StableDiffusionXLInpaintPipeline.prepare_latents + def prepare_latents( + self, + batch_size, + num_channels_latents, + height, + width, + dtype, + device, + generator, + latents=None, + image=None, + timestep=None, + is_strength_max=True, + add_noise=True, + return_noise=False, + return_image_latents=False, + ): + shape = ( + batch_size, + num_channels_latents, + int(height) // self.vae_scale_factor, + int(width) // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if (image is None or timestep is None) and not is_strength_max: + raise ValueError( + "Since strength < 1. initial latents are to be initialised as a combination of Image + Noise." + "However, either the image or the noise timestep has not been provided." + ) + + if image.shape[1] == 4: + image_latents = image.to(device=device, dtype=dtype) + image_latents = image_latents.repeat(batch_size // image_latents.shape[0], 1, 1, 1) + elif return_image_latents or (latents is None and not is_strength_max): + image = image.to(device=device, dtype=dtype) + image_latents = self._encode_vae_image(image=image, generator=generator) + image_latents = image_latents.repeat(batch_size // image_latents.shape[0], 1, 1, 1) + + if latents is None and add_noise: + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + # if strength is 1. then initialise the latents to noise, else initial to image + noise + latents = noise if is_strength_max else self.scheduler.add_noise(image_latents, noise, timestep) + # if pure noise then scale the initial latents by the Scheduler's init sigma + latents = latents * self.scheduler.init_noise_sigma if is_strength_max else latents + elif add_noise: + noise = latents.to(device) + latents = noise * self.scheduler.init_noise_sigma + else: + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + latents = image_latents.to(device) + + outputs = (latents,) + + if return_noise: + outputs += (noise,) + + if return_image_latents: + outputs += (image_latents,) + + return outputs + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_inpaint.StableDiffusionXLInpaintPipeline._encode_vae_image + def _encode_vae_image(self, image: torch.Tensor, generator: torch.Generator): + dtype = image.dtype + if self.vae.config.force_upcast: + image = image.float() + self.vae.to(dtype=torch.float32) + + if isinstance(generator, list): + image_latents = [ + retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i]) + for i in range(image.shape[0]) + ] + image_latents = torch.cat(image_latents, dim=0) + else: + image_latents = retrieve_latents(self.vae.encode(image), generator=generator) + + if self.vae.config.force_upcast: + self.vae.to(dtype) + + image_latents = image_latents.to(dtype) + image_latents = self.vae.config.scaling_factor * image_latents + + return image_latents + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_inpaint.StableDiffusionXLInpaintPipeline.prepare_mask_latents + def prepare_mask_latents( + self, mask, masked_image, batch_size, height, width, dtype, device, generator, do_classifier_free_guidance + ): + # resize the mask to latents shape as we concatenate the mask to the latents + # we do that before converting to dtype to avoid breaking in case we're using cpu_offload + # and half precision + mask = torch.nn.functional.interpolate( + mask, size=(height // self.vae_scale_factor, width // self.vae_scale_factor) + ) + mask = mask.to(device=device, dtype=dtype) + + # duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method + if mask.shape[0] < batch_size: + if not batch_size % mask.shape[0] == 0: + raise ValueError( + "The passed mask and the required batch size don't match. Masks are supposed to be duplicated to" + f" a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number" + " of masks that you pass is divisible by the total requested batch size." + ) + mask = mask.repeat(batch_size // mask.shape[0], 1, 1, 1) + + mask = torch.cat([mask] * 2) if do_classifier_free_guidance else mask + + if masked_image is not None and masked_image.shape[1] == 4: + masked_image_latents = masked_image + else: + masked_image_latents = None + + if masked_image is not None: + if masked_image_latents is None: + masked_image = masked_image.to(device=device, dtype=dtype) + masked_image_latents = self._encode_vae_image(masked_image, generator=generator) + + if masked_image_latents.shape[0] < batch_size: + if not batch_size % masked_image_latents.shape[0] == 0: + raise ValueError( + "The passed images and the required batch size don't match. Images are supposed to be duplicated" + f" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed." + " Make sure the number of images that you pass is divisible by the total requested batch size." + ) + masked_image_latents = masked_image_latents.repeat( + batch_size // masked_image_latents.shape[0], 1, 1, 1 + ) + + masked_image_latents = ( + torch.cat([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents + ) + + # aligning device to prevent device errors when concating it with the latent model input + masked_image_latents = masked_image_latents.to(device=device, dtype=dtype) + + return mask, masked_image_latents + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_img2img.StableDiffusionXLImg2ImgPipeline.get_timesteps + def get_timesteps(self, num_inference_steps, strength, device, denoising_start=None): + # get the original timestep using init_timestep + if denoising_start is None: + init_timestep = min(int(num_inference_steps * strength), num_inference_steps) + t_start = max(num_inference_steps - init_timestep, 0) + + timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] + if hasattr(self.scheduler, "set_begin_index"): + self.scheduler.set_begin_index(t_start * self.scheduler.order) + + return timesteps, num_inference_steps - t_start + + else: + # Strength is irrelevant if we directly request a timestep to start at; + # that is, strength is determined by the denoising_start instead. + discrete_timestep_cutoff = int( + round( + self.scheduler.config.num_train_timesteps + - (denoising_start * self.scheduler.config.num_train_timesteps) + ) + ) + + num_inference_steps = (self.scheduler.timesteps < discrete_timestep_cutoff).sum().item() + if self.scheduler.order == 2 and num_inference_steps % 2 == 0: + # if the scheduler is a 2nd order scheduler we might have to do +1 + # because `num_inference_steps` might be even given that every timestep + # (except the highest one) is duplicated. If `num_inference_steps` is even it would + # mean that we cut the timesteps in the middle of the denoising step + # (between 1st and 2nd derivative) which leads to incorrect results. By adding 1 + # we ensure that the denoising process always ends after the 2nd derivate step of the scheduler + num_inference_steps = num_inference_steps + 1 + + # because t_n+1 >= t_n, we slice the timesteps starting from the end + t_start = len(self.scheduler.timesteps) - num_inference_steps + timesteps = self.scheduler.timesteps[t_start:] + if hasattr(self.scheduler, "set_begin_index"): + self.scheduler.set_begin_index(t_start) + return timesteps, num_inference_steps + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_img2img.StableDiffusionXLImg2ImgPipeline._get_add_time_ids + def _get_add_time_ids( + self, + original_size, + crops_coords_top_left, + target_size, + aesthetic_score, + negative_aesthetic_score, + negative_original_size, + negative_crops_coords_top_left, + negative_target_size, + dtype, + text_encoder_projection_dim=None, + ): + if self.config.requires_aesthetics_score: + add_time_ids = list(original_size + crops_coords_top_left + (aesthetic_score,)) + add_neg_time_ids = list( + negative_original_size + negative_crops_coords_top_left + (negative_aesthetic_score,) + ) + else: + add_time_ids = list(original_size + crops_coords_top_left + target_size) + add_neg_time_ids = list(negative_original_size + crops_coords_top_left + negative_target_size) + + passed_add_embed_dim = ( + self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim + ) + expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features + + if ( + expected_add_embed_dim > passed_add_embed_dim + and (expected_add_embed_dim - passed_add_embed_dim) == self.unet.config.addition_time_embed_dim + ): + raise ValueError( + f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to enable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=True)` to make sure `aesthetic_score` {aesthetic_score} and `negative_aesthetic_score` {negative_aesthetic_score} is correctly used by the model." + ) + elif ( + expected_add_embed_dim < passed_add_embed_dim + and (passed_add_embed_dim - expected_add_embed_dim) == self.unet.config.addition_time_embed_dim + ): + raise ValueError( + f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to disable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=False)` to make sure `target_size` {target_size} is correctly used by the model." + ) + elif expected_add_embed_dim != passed_add_embed_dim: + raise ValueError( + f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`." + ) + + add_time_ids = torch.tensor([add_time_ids], dtype=dtype) + add_neg_time_ids = torch.tensor([add_neg_time_ids], dtype=dtype) + + return add_time_ids, add_neg_time_ids + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae + def upcast_vae(self): + dtype = self.vae.dtype + self.vae.to(dtype=torch.float32) + use_torch_2_0_or_xformers = isinstance( + self.vae.decoder.mid_block.attentions[0].processor, + ( + AttnProcessor2_0, + XFormersAttnProcessor, + ), + ) + # if xformers or torch_2_0 is used attention block does not need + # to be in float32 which can save lots of memory + if use_torch_2_0_or_xformers: + self.vae.post_quant_conv.to(dtype) + self.vae.decoder.conv_in.to(dtype) + self.vae.decoder.mid_block.to(dtype) + + # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding + def get_guidance_scale_embedding( + self, w: torch.Tensor, embedding_dim: int = 512, dtype: torch.dtype = torch.float32 + ) -> torch.Tensor: + """ + See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 + + Args: + w (`torch.Tensor`): + Generate embedding vectors with a specified guidance scale to subsequently enrich timestep embeddings. + embedding_dim (`int`, *optional*, defaults to 512): + Dimension of the embeddings to generate. + dtype (`torch.dtype`, *optional*, defaults to `torch.float32`): + Data type of the generated embeddings. + + Returns: + `torch.Tensor`: Embedding vectors with shape `(len(w), embedding_dim)`. + """ + assert len(w.shape) == 1 + w = w * 1000.0 + + half_dim = embedding_dim // 2 + emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) + emb = w.to(dtype)[:, None] * emb[None, :] + emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) + if embedding_dim % 2 == 1: # zero pad + emb = torch.nn.functional.pad(emb, (0, 1)) + assert emb.shape == (w.shape[0], embedding_dim) + return emb + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def guidance_rescale(self): + return self._guidance_rescale + + @property + def clip_skip(self): + return self._clip_skip + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None + + @property + def cross_attention_kwargs(self): + return self._cross_attention_kwargs + + @property + def denoising_end(self): + return self._denoising_end + + @property + def denoising_start(self): + return self._denoising_start + + @property + def num_timesteps(self): + return self._num_timesteps + + @property + def interrupt(self): + return self._interrupt + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + prompt_2: Optional[Union[str, List[str]]] = None, + image: PipelineImageInput = None, + mask_image: PipelineImageInput = None, + masked_image_latents: torch.Tensor = None, + height: Optional[int] = None, + width: Optional[int] = None, + padding_mask_crop: Optional[int] = None, + strength: float = 0.9999, + num_inference_steps: int = 50, + timesteps: List[int] = None, + sigmas: List[float] = None, + denoising_start: Optional[float] = None, + denoising_end: Optional[float] = None, + guidance_scale: float = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + negative_prompt_2: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + pooled_prompt_embeds: Optional[torch.Tensor] = None, + negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + guidance_rescale: float = 0.0, + original_size: Tuple[int, int] = None, + crops_coords_top_left: Tuple[int, int] = (0, 0), + target_size: Tuple[int, int] = None, + negative_original_size: Optional[Tuple[int, int]] = None, + negative_crops_coords_top_left: Tuple[int, int] = (0, 0), + negative_target_size: Optional[Tuple[int, int]] = None, + aesthetic_score: float = 6.0, + negative_aesthetic_score: float = 2.5, + clip_skip: Optional[int] = None, + callback_on_step_end: Optional[ + Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks] + ] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + pag_scale: float = 3.0, + pag_adaptive_scale: float = 0.0, + ): + r""" + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in both text-encoders + image (`PIL.Image.Image`): + `Image`, or tensor representing an image batch which will be inpainted, *i.e.* parts of the image will + be masked out with `mask_image` and repainted according to `prompt`. + mask_image (`PIL.Image.Image`): + `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be + repainted, while black pixels will be preserved. If `mask_image` is a PIL image, it will be converted + to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L) + instead of 3, so the expected shape would be `(B, H, W, 1)`. + height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The height in pixels of the generated image. This is set to 1024 by default for the best results. + Anything below 512 pixels won't work well for + [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) + and checkpoints that are not specifically fine-tuned on low resolutions. + width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The width in pixels of the generated image. This is set to 1024 by default for the best results. + Anything below 512 pixels won't work well for + [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) + and checkpoints that are not specifically fine-tuned on low resolutions. + padding_mask_crop (`int`, *optional*, defaults to `None`): + The size of margin in the crop to be applied to the image and masking. If `None`, no crop is applied to + image and mask_image. If `padding_mask_crop` is not `None`, it will first find a rectangular region + with the same aspect ration of the image and contains all masked area, and then expand that area based + on `padding_mask_crop`. The image and mask_image will then be cropped based on the expanded area before + resizing to the original image size for inpainting. This is useful when the masked area is small while + the image is large and contain information irrelevant for inpainting, such as background. + strength (`float`, *optional*, defaults to 0.9999): + Conceptually, indicates how much to transform the masked portion of the reference `image`. Must be + between 0 and 1. `image` will be used as a starting point, adding more noise to it the larger the + `strength`. The number of denoising steps depends on the amount of noise initially added. When + `strength` is 1, added noise will be maximum and the denoising process will run for the full number of + iterations specified in `num_inference_steps`. A value of 1, therefore, essentially ignores the masked + portion of the reference `image`. Note that in the case of `denoising_start` being declared as an + integer, the value of `strength` will be ignored. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument + in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is + passed will be used. Must be in descending order. + sigmas (`List[float]`, *optional*): + Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in + their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed + will be used. + denoising_start (`float`, *optional*): + When specified, indicates the fraction (between 0.0 and 1.0) of the total denoising process to be + bypassed before it is initiated. Consequently, the initial part of the denoising process is skipped and + it is assumed that the passed `image` is a partly denoised image. Note that when this is specified, + strength will be ignored. The `denoising_start` parameter is particularly beneficial when this pipeline + is integrated into a "Mixture of Denoisers" multi-pipeline setup, as detailed in [**Refining the Image + Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output). + denoising_end (`float`, *optional*): + When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be + completed before it is intentionally prematurely terminated. As a result, the returned sample will + still retain a substantial amount of noise (ca. final 20% of timesteps still needed) and should be + denoised by a successor pipeline that has `denoising_start` set to 0.8 so that it only denoises the + final 20% of the scheduler. The denoising_end parameter should ideally be utilized when this pipeline + forms a part of a "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image + Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output). + guidance_scale (`float`, *optional*, defaults to 7.5): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. + ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*): + Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of + IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should + contain the negative image embedding if `do_classifier_free_guidance` is set to `True`. If not + provided, embeddings are computed from the `ip_adapter_image` input argument. + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to + [`schedulers.DDIMScheduler`], will be ignored for others. + generator (`torch.Generator`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled. + `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as + explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position + `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting + `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + For most cases, `target_size` should be set to the desired height and width of the generated image. If + not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in + section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + To negatively condition the generation process based on a specific image resolution. Part of SDXL's + micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's + micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + To negatively condition the generation process based on a target image resolution. It should be as same + as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + aesthetic_score (`float`, *optional*, defaults to 6.0): + Used to simulate an aesthetic score of the generated image by influencing the positive text condition. + Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + negative_aesthetic_score (`float`, *optional*, defaults to 2.5): + Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). Can be used to + simulate an aesthetic score of the generated image by influencing the negative text condition. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*): + A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of + each denoising step during the inference. with the following arguments: `callback_on_step_end(self: + DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a + list of all tensors as specified by `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + pag_scale (`float`, *optional*, defaults to 3.0): + The scale factor for the perturbed attention guidance. If it is set to 0.0, the perturbed attention + guidance will not be used. + pag_adaptive_scale (`float`, *optional*, defaults to 0.0): + The adaptive scale factor for the perturbed attention guidance. If it is set to 0.0, `pag_scale` is + used. + + Examples: + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionXLPipelineOutput`] or `tuple`: + [`~pipelines.stable_diffusion.StableDiffusionXLPipelineOutput`] if `return_dict` is True, otherwise a + `tuple. `tuple. When returning a tuple, the first element is a list with the generated images. + """ + + if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): + callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs + + # 0. Default height and width to unet + height = height or self.unet.config.sample_size * self.vae_scale_factor + width = width or self.unet.config.sample_size * self.vae_scale_factor + + # 1. Check inputs + self.check_inputs( + prompt, + prompt_2, + image, + mask_image, + height, + width, + strength, + None, + output_type, + negative_prompt, + negative_prompt_2, + prompt_embeds, + negative_prompt_embeds, + ip_adapter_image, + ip_adapter_image_embeds, + callback_on_step_end_tensor_inputs, + padding_mask_crop, + ) + + self._guidance_scale = guidance_scale + self._guidance_rescale = guidance_rescale + self._clip_skip = clip_skip + self._cross_attention_kwargs = cross_attention_kwargs + self._denoising_end = denoising_end + self._denoising_start = denoising_start + self._interrupt = False + self._pag_scale = pag_scale + self._pag_adaptive_scale = pag_adaptive_scale + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + # 3. Encode input prompt + text_encoder_lora_scale = ( + self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None + ) + + ( + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + ) = self.encode_prompt( + prompt=prompt, + prompt_2=prompt_2, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=self.do_classifier_free_guidance, + negative_prompt=negative_prompt, + negative_prompt_2=negative_prompt_2, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, + lora_scale=text_encoder_lora_scale, + clip_skip=self.clip_skip, + ) + + # 4. set timesteps + def denoising_value_valid(dnv): + return isinstance(dnv, float) and 0 < dnv < 1 + + timesteps, num_inference_steps = retrieve_timesteps( + self.scheduler, num_inference_steps, device, timesteps, sigmas + ) + timesteps, num_inference_steps = self.get_timesteps( + num_inference_steps, + strength, + device, + denoising_start=self.denoising_start if denoising_value_valid(self.denoising_start) else None, + ) + # check that number of inference steps is not < 1 - as this doesn't make sense + if num_inference_steps < 1: + raise ValueError( + f"After adjusting the num_inference_steps by strength parameter: {strength}, the number of pipeline" + f"steps is {num_inference_steps} which is < 1 and not appropriate for this pipeline." + ) + # at which timestep to set the initial noise (n.b. 50% if strength is 0.5) + latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) + # create a boolean to check if the strength is set to 1. if so then initialise the latents with pure noise + is_strength_max = strength == 1.0 + + # 5. Preprocess mask and image + if padding_mask_crop is not None: + crops_coords = self.mask_processor.get_crop_region(mask_image, width, height, pad=padding_mask_crop) + resize_mode = "fill" + else: + crops_coords = None + resize_mode = "default" + + original_image = image + init_image = self.image_processor.preprocess( + image, height=height, width=width, crops_coords=crops_coords, resize_mode=resize_mode + ) + init_image = init_image.to(dtype=torch.float32) + + mask = self.mask_processor.preprocess( + mask_image, height=height, width=width, resize_mode=resize_mode, crops_coords=crops_coords + ) + + if masked_image_latents is not None: + masked_image = masked_image_latents + elif init_image.shape[1] == 4: + # if images are in latent space, we can't mask it + masked_image = None + else: + masked_image = init_image * (mask < 0.5) + + # 6. Prepare latent variables + num_channels_latents = self.vae.config.latent_channels + num_channels_unet = self.unet.config.in_channels + return_image_latents = num_channels_unet == 4 + + add_noise = True if self.denoising_start is None else False + latents_outputs = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + image=init_image, + timestep=latent_timestep, + is_strength_max=is_strength_max, + add_noise=add_noise, + return_noise=True, + return_image_latents=return_image_latents, + ) + + if return_image_latents: + latents, noise, image_latents = latents_outputs + else: + latents, noise = latents_outputs + + # 7. Prepare mask latent variables + mask, masked_image_latents = self.prepare_mask_latents( + mask, + masked_image, + batch_size * num_images_per_prompt, + height, + width, + prompt_embeds.dtype, + device, + generator, + self.do_classifier_free_guidance, + ) + if self.do_perturbed_attention_guidance: + if self.do_classifier_free_guidance: + mask, _ = mask.chunk(2) + masked_image_latents, _ = masked_image_latents.chunk(2) + mask = self._prepare_perturbed_attention_guidance(mask, mask, self.do_classifier_free_guidance) + masked_image_latents = self._prepare_perturbed_attention_guidance( + masked_image_latents, masked_image_latents, self.do_classifier_free_guidance + ) + + # 8. Check that sizes of mask, masked image and latents match + if num_channels_unet == 9: + # default case for runwayml/stable-diffusion-inpainting + num_channels_mask = mask.shape[1] + num_channels_masked_image = masked_image_latents.shape[1] + if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels: + raise ValueError( + f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects" + f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +" + f" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}" + f" = {num_channels_latents+num_channels_masked_image+num_channels_mask}. Please verify the config of" + " `pipeline.unet` or your `mask_image` or `image` input." + ) + elif num_channels_unet != 4: + raise ValueError( + f"The unet {self.unet.__class__} should have either 4 or 9 input channels, not {self.unet.config.in_channels}." + ) + # 8.1 Prepare extra step kwargs. + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + height, width = latents.shape[-2:] + height = height * self.vae_scale_factor + width = width * self.vae_scale_factor + + original_size = original_size or (height, width) + target_size = target_size or (height, width) + + # 10. Prepare added time ids & embeddings + if negative_original_size is None: + negative_original_size = original_size + if negative_target_size is None: + negative_target_size = target_size + + add_text_embeds = pooled_prompt_embeds + if self.text_encoder_2 is None: + text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) + else: + text_encoder_projection_dim = self.text_encoder_2.config.projection_dim + + add_time_ids, add_neg_time_ids = self._get_add_time_ids( + original_size, + crops_coords_top_left, + target_size, + aesthetic_score, + negative_aesthetic_score, + negative_original_size, + negative_crops_coords_top_left, + negative_target_size, + dtype=prompt_embeds.dtype, + text_encoder_projection_dim=text_encoder_projection_dim, + ) + add_time_ids = add_time_ids.repeat(batch_size * num_images_per_prompt, 1) + add_neg_time_ids = add_neg_time_ids.repeat(batch_size * num_images_per_prompt, 1) + + if self.do_perturbed_attention_guidance: + prompt_embeds = self._prepare_perturbed_attention_guidance( + prompt_embeds, negative_prompt_embeds, self.do_classifier_free_guidance + ) + add_text_embeds = self._prepare_perturbed_attention_guidance( + add_text_embeds, negative_pooled_prompt_embeds, self.do_classifier_free_guidance + ) + add_time_ids = self._prepare_perturbed_attention_guidance( + add_time_ids, add_neg_time_ids, self.do_classifier_free_guidance + ) + + elif self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) + add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) + add_time_ids = torch.cat([add_neg_time_ids, add_time_ids], dim=0) + + prompt_embeds = prompt_embeds.to(device) + add_text_embeds = add_text_embeds.to(device) + add_time_ids = add_time_ids.to(device) + + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + ip_adapter_image_embeds = self.prepare_ip_adapter_image_embeds( + ip_adapter_image, + ip_adapter_image_embeds, + device, + batch_size * num_images_per_prompt, + self.do_classifier_free_guidance, + ) + for i, image_embeds in enumerate(ip_adapter_image_embeds): + negative_image_embeds = None + if self.do_classifier_free_guidance: + negative_image_embeds, image_embeds = image_embeds.chunk(2) + + if self.do_perturbed_attention_guidance: + image_embeds = self._prepare_perturbed_attention_guidance( + image_embeds, negative_image_embeds, self.do_classifier_free_guidance + ) + elif self.do_classifier_free_guidance: + image_embeds = torch.cat([negative_image_embeds, image_embeds], dim=0) + image_embeds = image_embeds.to(device) + ip_adapter_image_embeds[i] = image_embeds + + # 11. Denoising loop + num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) + + if ( + self.denoising_end is not None + and self.denoising_start is not None + and denoising_value_valid(self.denoising_end) + and denoising_value_valid(self.denoising_start) + and self.denoising_start >= self.denoising_end + ): + raise ValueError( + f"`denoising_start`: {self.denoising_start} cannot be larger than or equal to `denoising_end`: " + + f" {self.denoising_end} when using type float." + ) + elif self.denoising_end is not None and denoising_value_valid(self.denoising_end): + discrete_timestep_cutoff = int( + round( + self.scheduler.config.num_train_timesteps + - (self.denoising_end * self.scheduler.config.num_train_timesteps) + ) + ) + num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps))) + timesteps = timesteps[:num_inference_steps] + + # 11.1 Optionally get Guidance Scale Embedding + timestep_cond = None + if self.unet.config.time_cond_proj_dim is not None: + guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) + timestep_cond = self.get_guidance_scale_embedding( + guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim + ).to(device=device, dtype=latents.dtype) + + if self.do_perturbed_attention_guidance: + original_attn_proc = self.unet.attn_processors + self._set_pag_attn_processor( + pag_applied_layers=self.pag_applied_layers, + do_classifier_free_guidance=self.do_classifier_free_guidance, + ) + + self._num_timesteps = len(timesteps) + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + if self.interrupt: + continue + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * (prompt_embeds.shape[0] // latents.shape[0])) + + # concat latents, mask, masked_image_latents in the channel dimension + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + if num_channels_unet == 9: + latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1) + + # predict the noise residual + added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + added_cond_kwargs["image_embeds"] = ip_adapter_image_embeds + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + timestep_cond=timestep_cond, + cross_attention_kwargs=self.cross_attention_kwargs, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + # perform guidance + if self.do_perturbed_attention_guidance: + noise_pred = self._apply_perturbed_attention_guidance( + noise_pred, self.do_classifier_free_guidance, self.guidance_scale, t + ) + elif self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) + + if self.do_classifier_free_guidance and self.guidance_rescale > 0.0: + # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf + noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale) + + # compute the previous noisy sample x_t -> x_t-1 + latents_dtype = latents.dtype + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] + if latents.dtype != latents_dtype: + if torch.backends.mps.is_available(): + # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272 + latents = latents.to(latents_dtype) + + if num_channels_unet == 4: + init_latents_proper = image_latents + if self.do_perturbed_attention_guidance: + init_mask, *_ = mask.chunk(3) if self.do_classifier_free_guidance else mask.chunk(2) + else: + init_mask, *_ = mask.chunk(2) if self.do_classifier_free_guidance else mask + + if i < len(timesteps) - 1: + noise_timestep = timesteps[i + 1] + init_latents_proper = self.scheduler.add_noise( + init_latents_proper, noise, torch.tensor([noise_timestep]) + ) + + latents = (1 - init_mask) * init_latents_proper + init_mask * latents + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + add_text_embeds = callback_outputs.pop("add_text_embeds", add_text_embeds) + negative_pooled_prompt_embeds = callback_outputs.pop( + "negative_pooled_prompt_embeds", negative_pooled_prompt_embeds + ) + add_time_ids = callback_outputs.pop("add_time_ids", add_time_ids) + add_neg_time_ids = callback_outputs.pop("add_neg_time_ids", add_neg_time_ids) + mask = callback_outputs.pop("mask", mask) + masked_image_latents = callback_outputs.pop("masked_image_latents", masked_image_latents) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + + if XLA_AVAILABLE: + xm.mark_step() + + if not output_type == "latent": + # make sure the VAE is in float32 mode, as it overflows in float16 + needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast + + if needs_upcasting: + self.upcast_vae() + latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) + elif latents.dtype != self.vae.dtype: + if torch.backends.mps.is_available(): + # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272 + self.vae = self.vae.to(latents.dtype) + + # unscale/denormalize the latents + # denormalize with the mean and std if available and not None + has_latents_mean = hasattr(self.vae.config, "latents_mean") and self.vae.config.latents_mean is not None + has_latents_std = hasattr(self.vae.config, "latents_std") and self.vae.config.latents_std is not None + if has_latents_mean and has_latents_std: + latents_mean = ( + torch.tensor(self.vae.config.latents_mean).view(1, 4, 1, 1).to(latents.device, latents.dtype) + ) + latents_std = ( + torch.tensor(self.vae.config.latents_std).view(1, 4, 1, 1).to(latents.device, latents.dtype) + ) + latents = latents * latents_std / self.vae.config.scaling_factor + latents_mean + else: + latents = latents / self.vae.config.scaling_factor + + image = self.vae.decode(latents, return_dict=False)[0] + + # cast back to fp16 if needed + if needs_upcasting: + self.vae.to(dtype=torch.float16) + else: + return StableDiffusionXLPipelineOutput(images=latents) + + # apply watermark if available + if self.watermark is not None: + image = self.watermark.apply_watermark(image) + + image = self.image_processor.postprocess(image, output_type=output_type) + + if padding_mask_crop is not None: + image = [self.image_processor.apply_overlay(mask_image, original_image, i, crops_coords) for i in image] + + # Offload all models + self.maybe_free_model_hooks() + + if self.do_perturbed_attention_guidance: + self.unet.set_attn_processor(original_attn_proc) + + if not return_dict: + return (image,) + + return StableDiffusionXLPipelineOutput(images=image) diff --git a/diffusers3/pipelines/paint_by_example/__init__.py b/diffusers3/pipelines/paint_by_example/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..aaa775f690c3d290074662c029f242df3c61e003 --- /dev/null +++ b/diffusers3/pipelines/paint_by_example/__init__.py @@ -0,0 +1,55 @@ +from dataclasses import dataclass +from typing import TYPE_CHECKING, List, Optional, Union + +import numpy as np +import PIL +from PIL import Image + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["image_encoder"] = ["PaintByExampleImageEncoder"] + _import_structure["pipeline_paint_by_example"] = ["PaintByExamplePipeline"] + + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * + else: + from .image_encoder import PaintByExampleImageEncoder + from .pipeline_paint_by_example import PaintByExamplePipeline + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/diffusers3/pipelines/paint_by_example/image_encoder.py b/diffusers3/pipelines/paint_by_example/image_encoder.py new file mode 100644 index 0000000000000000000000000000000000000000..2fd0338b1f9190e63053c086e4bbd74da9d98c54 --- /dev/null +++ b/diffusers3/pipelines/paint_by_example/image_encoder.py @@ -0,0 +1,67 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import torch +from torch import nn +from transformers import CLIPPreTrainedModel, CLIPVisionModel + +from ...models.attention import BasicTransformerBlock +from ...utils import logging + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +class PaintByExampleImageEncoder(CLIPPreTrainedModel): + def __init__(self, config, proj_size=None): + super().__init__(config) + self.proj_size = proj_size or getattr(config, "projection_dim", 768) + + self.model = CLIPVisionModel(config) + self.mapper = PaintByExampleMapper(config) + self.final_layer_norm = nn.LayerNorm(config.hidden_size) + self.proj_out = nn.Linear(config.hidden_size, self.proj_size) + + # uncondition for scaling + self.uncond_vector = nn.Parameter(torch.randn((1, 1, self.proj_size))) + + def forward(self, pixel_values, return_uncond_vector=False): + clip_output = self.model(pixel_values=pixel_values) + latent_states = clip_output.pooler_output + latent_states = self.mapper(latent_states[:, None]) + latent_states = self.final_layer_norm(latent_states) + latent_states = self.proj_out(latent_states) + if return_uncond_vector: + return latent_states, self.uncond_vector + + return latent_states + + +class PaintByExampleMapper(nn.Module): + def __init__(self, config): + super().__init__() + num_layers = (config.num_hidden_layers + 1) // 5 + hid_size = config.hidden_size + num_heads = 1 + self.blocks = nn.ModuleList( + [ + BasicTransformerBlock(hid_size, num_heads, hid_size, activation_fn="gelu", attention_bias=True) + for _ in range(num_layers) + ] + ) + + def forward(self, hidden_states): + for block in self.blocks: + hidden_states = block(hidden_states) + + return hidden_states diff --git a/diffusers3/pipelines/paint_by_example/pipeline_paint_by_example.py b/diffusers3/pipelines/paint_by_example/pipeline_paint_by_example.py new file mode 100644 index 0000000000000000000000000000000000000000..b225fd71edf81a88ac82818816db6ffbe9cf2480 --- /dev/null +++ b/diffusers3/pipelines/paint_by_example/pipeline_paint_by_example.py @@ -0,0 +1,626 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Callable, List, Optional, Union + +import numpy as np +import PIL.Image +import torch +from transformers import CLIPImageProcessor + +from ...image_processor import VaeImageProcessor +from ...models import AutoencoderKL, UNet2DConditionModel +from ...schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler +from ...utils import deprecate, logging +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from ..stable_diffusion import StableDiffusionPipelineOutput +from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker +from .image_encoder import PaintByExampleImageEncoder + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents +def retrieve_latents( + encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" +): + if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": + return encoder_output.latent_dist.sample(generator) + elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": + return encoder_output.latent_dist.mode() + elif hasattr(encoder_output, "latents"): + return encoder_output.latents + else: + raise AttributeError("Could not access latents of provided encoder_output") + + +def prepare_mask_and_masked_image(image, mask): + """ + Prepares a pair (image, mask) to be consumed by the Paint by Example pipeline. This means that those inputs will be + converted to ``torch.Tensor`` with shapes ``batch x channels x height x width`` where ``channels`` is ``3`` for the + ``image`` and ``1`` for the ``mask``. + + The ``image`` will be converted to ``torch.float32`` and normalized to be in ``[-1, 1]``. The ``mask`` will be + binarized (``mask > 0.5``) and cast to ``torch.float32`` too. + + Args: + image (Union[np.array, PIL.Image, torch.Tensor]): The image to inpaint. + It can be a ``PIL.Image``, or a ``height x width x 3`` ``np.array`` or a ``channels x height x width`` + ``torch.Tensor`` or a ``batch x channels x height x width`` ``torch.Tensor``. + mask (_type_): The mask to apply to the image, i.e. regions to inpaint. + It can be a ``PIL.Image``, or a ``height x width`` ``np.array`` or a ``1 x height x width`` + ``torch.Tensor`` or a ``batch x 1 x height x width`` ``torch.Tensor``. + + + Raises: + ValueError: ``torch.Tensor`` images should be in the ``[-1, 1]`` range. ValueError: ``torch.Tensor`` mask + should be in the ``[0, 1]`` range. ValueError: ``mask`` and ``image`` should have the same spatial dimensions. + TypeError: ``mask`` is a ``torch.Tensor`` but ``image`` is not + (ot the other way around). + + Returns: + tuple[torch.Tensor]: The pair (mask, masked_image) as ``torch.Tensor`` with 4 + dimensions: ``batch x channels x height x width``. + """ + if isinstance(image, torch.Tensor): + if not isinstance(mask, torch.Tensor): + raise TypeError(f"`image` is a torch.Tensor but `mask` (type: {type(mask)} is not") + + # Batch single image + if image.ndim == 3: + assert image.shape[0] == 3, "Image outside a batch should be of shape (3, H, W)" + image = image.unsqueeze(0) + + # Batch and add channel dim for single mask + if mask.ndim == 2: + mask = mask.unsqueeze(0).unsqueeze(0) + + # Batch single mask or add channel dim + if mask.ndim == 3: + # Batched mask + if mask.shape[0] == image.shape[0]: + mask = mask.unsqueeze(1) + else: + mask = mask.unsqueeze(0) + + assert image.ndim == 4 and mask.ndim == 4, "Image and Mask must have 4 dimensions" + assert image.shape[-2:] == mask.shape[-2:], "Image and Mask must have the same spatial dimensions" + assert image.shape[0] == mask.shape[0], "Image and Mask must have the same batch size" + assert mask.shape[1] == 1, "Mask image must have a single channel" + + # Check image is in [-1, 1] + if image.min() < -1 or image.max() > 1: + raise ValueError("Image should be in [-1, 1] range") + + # Check mask is in [0, 1] + if mask.min() < 0 or mask.max() > 1: + raise ValueError("Mask should be in [0, 1] range") + + # paint-by-example inverses the mask + mask = 1 - mask + + # Binarize mask + mask[mask < 0.5] = 0 + mask[mask >= 0.5] = 1 + + # Image as float32 + image = image.to(dtype=torch.float32) + elif isinstance(mask, torch.Tensor): + raise TypeError(f"`mask` is a torch.Tensor but `image` (type: {type(image)} is not") + else: + if isinstance(image, PIL.Image.Image): + image = [image] + + image = np.concatenate([np.array(i.convert("RGB"))[None, :] for i in image], axis=0) + image = image.transpose(0, 3, 1, 2) + image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0 + + # preprocess mask + if isinstance(mask, PIL.Image.Image): + mask = [mask] + + mask = np.concatenate([np.array(m.convert("L"))[None, None, :] for m in mask], axis=0) + mask = mask.astype(np.float32) / 255.0 + + # paint-by-example inverses the mask + mask = 1 - mask + + mask[mask < 0.5] = 0 + mask[mask >= 0.5] = 1 + mask = torch.from_numpy(mask) + + masked_image = image * mask + + return mask, masked_image + + +class PaintByExamplePipeline(DiffusionPipeline, StableDiffusionMixin): + r""" + + + ๐Ÿงช This is an experimental feature! + + + + Pipeline for image-guided image inpainting using Stable Diffusion. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + image_encoder ([`PaintByExampleImageEncoder`]): + Encodes the example input image. The `unet` is conditioned on the example image instead of a text prompt. + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details + about a model's potential harms. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. + + """ + + # TODO: feature_extractor is required to encode initial images (if they are in PIL format), + # we should give a descriptive message if the pipeline doesn't have one. + + model_cpu_offload_seq = "unet->vae" + _exclude_from_cpu_offload = ["image_encoder"] + _optional_components = ["safety_checker"] + + def __init__( + self, + vae: AutoencoderKL, + image_encoder: PaintByExampleImageEncoder, + unet: UNet2DConditionModel, + scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], + safety_checker: StableDiffusionSafetyChecker, + feature_extractor: CLIPImageProcessor, + requires_safety_checker: bool = False, + ): + super().__init__() + + self.register_modules( + vae=vae, + image_encoder=image_encoder, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is None: + has_nsfw_concept = None + else: + if torch.is_tensor(image): + feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") + else: + feature_extractor_input = self.image_processor.numpy_to_pil(image) + safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) + image, has_nsfw_concept = self.safety_checker( + images=image, clip_input=safety_checker_input.pixel_values.to(dtype) + ) + return image, has_nsfw_concept + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents + def decode_latents(self, latents): + deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" + deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) + + latents = 1 / self.vae.config.scaling_factor * latents + image = self.vae.decode(latents, return_dict=False)[0] + image = (image / 2 + 0.5).clamp(0, 1) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + return image + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_image_variation.StableDiffusionImageVariationPipeline.check_inputs + def check_inputs(self, image, height, width, callback_steps): + if ( + not isinstance(image, torch.Tensor) + and not isinstance(image, PIL.Image.Image) + and not isinstance(image, list) + ): + raise ValueError( + "`image` has to be of type `torch.Tensor` or `PIL.Image.Image` or `List[PIL.Image.Image]` but is" + f" {type(image)}" + ) + + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if (callback_steps is None) or ( + callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = ( + batch_size, + num_channels_latents, + int(height) // self.vae_scale_factor, + int(width) // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_inpaint.StableDiffusionInpaintPipeline.prepare_mask_latents + def prepare_mask_latents( + self, mask, masked_image, batch_size, height, width, dtype, device, generator, do_classifier_free_guidance + ): + # resize the mask to latents shape as we concatenate the mask to the latents + # we do that before converting to dtype to avoid breaking in case we're using cpu_offload + # and half precision + mask = torch.nn.functional.interpolate( + mask, size=(height // self.vae_scale_factor, width // self.vae_scale_factor) + ) + mask = mask.to(device=device, dtype=dtype) + + masked_image = masked_image.to(device=device, dtype=dtype) + + if masked_image.shape[1] == 4: + masked_image_latents = masked_image + else: + masked_image_latents = self._encode_vae_image(masked_image, generator=generator) + + # duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method + if mask.shape[0] < batch_size: + if not batch_size % mask.shape[0] == 0: + raise ValueError( + "The passed mask and the required batch size don't match. Masks are supposed to be duplicated to" + f" a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number" + " of masks that you pass is divisible by the total requested batch size." + ) + mask = mask.repeat(batch_size // mask.shape[0], 1, 1, 1) + if masked_image_latents.shape[0] < batch_size: + if not batch_size % masked_image_latents.shape[0] == 0: + raise ValueError( + "The passed images and the required batch size don't match. Images are supposed to be duplicated" + f" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed." + " Make sure the number of images that you pass is divisible by the total requested batch size." + ) + masked_image_latents = masked_image_latents.repeat(batch_size // masked_image_latents.shape[0], 1, 1, 1) + + mask = torch.cat([mask] * 2) if do_classifier_free_guidance else mask + masked_image_latents = ( + torch.cat([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents + ) + + # aligning device to prevent device errors when concating it with the latent model input + masked_image_latents = masked_image_latents.to(device=device, dtype=dtype) + return mask, masked_image_latents + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_inpaint.StableDiffusionInpaintPipeline._encode_vae_image + def _encode_vae_image(self, image: torch.Tensor, generator: torch.Generator): + if isinstance(generator, list): + image_latents = [ + retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i]) + for i in range(image.shape[0]) + ] + image_latents = torch.cat(image_latents, dim=0) + else: + image_latents = retrieve_latents(self.vae.encode(image), generator=generator) + + image_latents = self.vae.config.scaling_factor * image_latents + + return image_latents + + def _encode_image(self, image, device, num_images_per_prompt, do_classifier_free_guidance): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(images=image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + image_embeddings, negative_prompt_embeds = self.image_encoder(image, return_uncond_vector=True) + + # duplicate image embeddings for each generation per prompt, using mps friendly method + bs_embed, seq_len, _ = image_embeddings.shape + image_embeddings = image_embeddings.repeat(1, num_images_per_prompt, 1) + image_embeddings = image_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1) + + if do_classifier_free_guidance: + negative_prompt_embeds = negative_prompt_embeds.repeat(1, image_embeddings.shape[0], 1) + negative_prompt_embeds = negative_prompt_embeds.view(bs_embed * num_images_per_prompt, 1, -1) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + image_embeddings = torch.cat([negative_prompt_embeds, image_embeddings]) + + return image_embeddings + + @torch.no_grad() + def __call__( + self, + example_image: Union[torch.Tensor, PIL.Image.Image], + image: Union[torch.Tensor, PIL.Image.Image], + mask_image: Union[torch.Tensor, PIL.Image.Image], + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + guidance_scale: float = 5.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, + callback_steps: int = 1, + ): + r""" + The call function to the pipeline for generation. + + Args: + example_image (`torch.Tensor` or `PIL.Image.Image` or `List[PIL.Image.Image]`): + An example image to guide image generation. + image (`torch.Tensor` or `PIL.Image.Image` or `List[PIL.Image.Image]`): + `Image` or tensor representing an image batch to be inpainted (parts of the image are masked out with + `mask_image` and repainted according to `prompt`). + mask_image (`torch.Tensor` or `PIL.Image.Image` or `List[PIL.Image.Image]`): + `Image` or tensor representing an image batch to mask `image`. White pixels in the mask are repainted, + while black pixels are preserved. If `mask_image` is a PIL image, it is converted to a single channel + (luminance) before use. If it's a tensor, it should contain one color channel (L) instead of 3, so the + expected shape would be `(B, H, W, 1)`. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + + Example: + + ```py + >>> import PIL + >>> import requests + >>> import torch + >>> from io import BytesIO + >>> from diffusers import PaintByExamplePipeline + + + >>> def download_image(url): + ... response = requests.get(url) + ... return PIL.Image.open(BytesIO(response.content)).convert("RGB") + + + >>> img_url = ( + ... "https://raw.githubusercontent.com/Fantasy-Studio/Paint-by-Example/main/examples/image/example_1.png" + ... ) + >>> mask_url = ( + ... "https://raw.githubusercontent.com/Fantasy-Studio/Paint-by-Example/main/examples/mask/example_1.png" + ... ) + >>> example_url = "https://raw.githubusercontent.com/Fantasy-Studio/Paint-by-Example/main/examples/reference/example_1.jpg" + + >>> init_image = download_image(img_url).resize((512, 512)) + >>> mask_image = download_image(mask_url).resize((512, 512)) + >>> example_image = download_image(example_url).resize((512, 512)) + + >>> pipe = PaintByExamplePipeline.from_pretrained( + ... "Fantasy-Studio/Paint-by-Example", + ... torch_dtype=torch.float16, + ... ) + >>> pipe = pipe.to("cuda") + + >>> image = pipe(image=init_image, mask_image=mask_image, example_image=example_image).images[0] + >>> image + ``` + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images and the + second element is a list of `bool`s indicating whether the corresponding generated image contains + "not-safe-for-work" (nsfw) content. + """ + # 1. Define call parameters + if isinstance(image, PIL.Image.Image): + batch_size = 1 + elif isinstance(image, list): + batch_size = len(image) + else: + batch_size = image.shape[0] + device = self._execution_device + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 2. Preprocess mask and image + mask, masked_image = prepare_mask_and_masked_image(image, mask_image) + height, width = masked_image.shape[-2:] + + # 3. Check inputs + self.check_inputs(example_image, height, width, callback_steps) + + # 4. Encode input image + image_embeddings = self._encode_image( + example_image, device, num_images_per_prompt, do_classifier_free_guidance + ) + + # 5. set timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # 6. Prepare latent variables + num_channels_latents = self.vae.config.latent_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + image_embeddings.dtype, + device, + generator, + latents, + ) + + # 7. Prepare mask latent variables + mask, masked_image_latents = self.prepare_mask_latents( + mask, + masked_image, + batch_size * num_images_per_prompt, + height, + width, + image_embeddings.dtype, + device, + generator, + do_classifier_free_guidance, + ) + + # 8. Check that sizes of mask, masked image and latents match + num_channels_mask = mask.shape[1] + num_channels_masked_image = masked_image_latents.shape[1] + if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels: + raise ValueError( + f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects" + f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +" + f" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}" + f" = {num_channels_latents+num_channels_masked_image+num_channels_mask}. Please verify the config of" + " `pipeline.unet` or your `mask_image` or `image` input." + ) + + # 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 10. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + + # concat latents, mask, masked_image_latents in the channel dimension + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + latent_model_input = torch.cat([latent_model_input, masked_image_latents, mask], dim=1) + + # predict the noise residual + noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=image_embeddings).sample + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + self.maybe_free_model_hooks() + + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + image, has_nsfw_concept = self.run_safety_checker(image, device, image_embeddings.dtype) + else: + image = latents + has_nsfw_concept = None + + if has_nsfw_concept is None: + do_denormalize = [True] * image.shape[0] + else: + do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] + + image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) + + if not return_dict: + return (image, has_nsfw_concept) + + return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) diff --git a/diffusers3/pipelines/pia/__init__.py b/diffusers3/pipelines/pia/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..16e8004966e58387f7dba2f6ff3175575fe0abee --- /dev/null +++ b/diffusers3/pipelines/pia/__init__.py @@ -0,0 +1,46 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["pipeline_pia"] = ["PIAPipeline", "PIAPipelineOutput"] + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * + + else: + from .pipeline_pia import PIAPipeline, PIAPipelineOutput + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/diffusers3/pipelines/pia/pipeline_pia.py b/diffusers3/pipelines/pia/pipeline_pia.py new file mode 100644 index 0000000000000000000000000000000000000000..b7dfcd39edce2e61b0a0eed214580719a18cac6d --- /dev/null +++ b/diffusers3/pipelines/pia/pipeline_pia.py @@ -0,0 +1,944 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from dataclasses import dataclass +from typing import Any, Callable, Dict, List, Optional, Union + +import numpy as np +import PIL +import torch +from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection + +from ...image_processor import PipelineImageInput +from ...loaders import FromSingleFileMixin, IPAdapterMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin +from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel, UNetMotionModel +from ...models.lora import adjust_lora_scale_text_encoder +from ...models.unets.unet_motion_model import MotionAdapter +from ...schedulers import ( + DDIMScheduler, + DPMSolverMultistepScheduler, + EulerAncestralDiscreteScheduler, + EulerDiscreteScheduler, + LMSDiscreteScheduler, + PNDMScheduler, +) +from ...utils import ( + USE_PEFT_BACKEND, + BaseOutput, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import randn_tensor +from ...video_processor import VideoProcessor +from ..free_init_utils import FreeInitMixin +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import EulerDiscreteScheduler, MotionAdapter, PIAPipeline + >>> from diffusers.utils import export_to_gif, load_image + + >>> adapter = MotionAdapter.from_pretrained("openmmlab/PIA-condition-adapter") + >>> pipe = PIAPipeline.from_pretrained( + ... "SG161222/Realistic_Vision_V6.0_B1_noVAE", motion_adapter=adapter, torch_dtype=torch.float16 + ... ) + + >>> pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config) + >>> image = load_image( + ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/pix2pix/cat_6.png?download=true" + ... ) + >>> image = image.resize((512, 512)) + >>> prompt = "cat in a hat" + >>> negative_prompt = "wrong white balance, dark, sketches, worst quality, low quality, deformed, distorted" + >>> generator = torch.Generator("cpu").manual_seed(0) + >>> output = pipe(image=image, prompt=prompt, negative_prompt=negative_prompt, generator=generator) + >>> frames = output.frames[0] + >>> export_to_gif(frames, "pia-animation.gif") + ``` +""" + +RANGE_LIST = [ + [1.0, 0.9, 0.85, 0.85, 0.85, 0.8], # 0 Small Motion + [1.0, 0.8, 0.8, 0.8, 0.79, 0.78, 0.75], # Moderate Motion + [1.0, 0.8, 0.7, 0.7, 0.7, 0.7, 0.7, 0.7, 0.7, 0.7, 0.6, 0.5, 0.5], # Large Motion + [1.0, 0.9, 0.85, 0.85, 0.85, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.85, 0.85, 0.9, 1.0], # Loop + [1.0, 0.8, 0.8, 0.8, 0.79, 0.78, 0.75, 0.75, 0.75, 0.75, 0.75, 0.78, 0.79, 0.8, 0.8, 1.0], # Loop + [1.0, 0.8, 0.7, 0.7, 0.7, 0.7, 0.6, 0.5, 0.5, 0.6, 0.7, 0.7, 0.7, 0.7, 0.8, 1.0], # Loop + [0.5, 0.4, 0.4, 0.4, 0.35, 0.3], # Style Transfer Candidate Small Motion + [0.5, 0.4, 0.4, 0.4, 0.35, 0.35, 0.3, 0.25, 0.2], # Style Transfer Moderate Motion + [0.5, 0.2], # Style Transfer Large Motion +] + + +def prepare_mask_coef_by_statistics(num_frames: int, cond_frame: int, motion_scale: int): + assert num_frames > 0, "video_length should be greater than 0" + + assert num_frames > cond_frame, "video_length should be greater than cond_frame" + + range_list = RANGE_LIST + + assert motion_scale < len(range_list), f"motion_scale type{motion_scale} not implemented" + + coef = range_list[motion_scale] + coef = coef + ([coef[-1]] * (num_frames - len(coef))) + + order = [abs(i - cond_frame) for i in range(num_frames)] + coef = [coef[order[i]] for i in range(num_frames)] + + return coef + + +@dataclass +class PIAPipelineOutput(BaseOutput): + r""" + Output class for PIAPipeline. + + Args: + frames (`torch.Tensor`, `np.ndarray`, or List[List[PIL.Image.Image]]): + Nested list of length `batch_size` with denoised PIL image sequences of length `num_frames`, NumPy array of + shape `(batch_size, num_frames, channels, height, width, Torch tensor of shape `(batch_size, num_frames, + channels, height, width)`. + """ + + frames: Union[torch.Tensor, np.ndarray, List[List[PIL.Image.Image]]] + + +class PIAPipeline( + DiffusionPipeline, + StableDiffusionMixin, + TextualInversionLoaderMixin, + IPAdapterMixin, + StableDiffusionLoraLoaderMixin, + FromSingleFileMixin, + FreeInitMixin, +): + r""" + Pipeline for text-to-video generation. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for saving LoRA weights + - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer (`CLIPTokenizer`): + A [`~transformers.CLIPTokenizer`] to tokenize text. + unet ([`UNet2DConditionModel`]): + A [`UNet2DConditionModel`] used to create a UNetMotionModel to denoise the encoded video latents. + motion_adapter ([`MotionAdapter`]): + A [`MotionAdapter`] to be used in combination with `unet` to denoise the encoded video latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + """ + + model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae" + _optional_components = ["feature_extractor", "image_encoder", "motion_adapter"] + _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: Union[UNet2DConditionModel, UNetMotionModel], + scheduler: Union[ + DDIMScheduler, + PNDMScheduler, + LMSDiscreteScheduler, + EulerDiscreteScheduler, + EulerAncestralDiscreteScheduler, + DPMSolverMultistepScheduler, + ], + motion_adapter: Optional[MotionAdapter] = None, + feature_extractor: CLIPImageProcessor = None, + image_encoder: CLIPVisionModelWithProjection = None, + ): + super().__init__() + if isinstance(unet, UNet2DConditionModel): + unet = UNetMotionModel.from_unet2d(unet, motion_adapter) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + motion_adapter=motion_adapter, + scheduler=scheduler, + feature_extractor=feature_extractor, + image_encoder=image_encoder, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.video_processor = VideoProcessor(do_resize=False, vae_scale_factor=self.vae_scale_factor) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt with num_images_per_prompt -> num_videos_per_prompt + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if self.text_encoder is not None: + if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return prompt_embeds, negative_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image + def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + if output_hidden_states: + image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] + image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_enc_hidden_states = self.image_encoder( + torch.zeros_like(image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( + num_images_per_prompt, dim=0 + ) + return image_enc_hidden_states, uncond_image_enc_hidden_states + else: + image_embeds = self.image_encoder(image).image_embeds + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_embeds = torch.zeros_like(image_embeds) + + return image_embeds, uncond_image_embeds + + # Copied from diffusers.pipelines.text_to_video_synthesis/pipeline_text_to_video_synth.TextToVideoSDPipeline.decode_latents + def decode_latents(self, latents): + latents = 1 / self.vae.config.scaling_factor * latents + + batch_size, channels, num_frames, height, width = latents.shape + latents = latents.permute(0, 2, 1, 3, 4).reshape(batch_size * num_frames, channels, height, width) + + image = self.vae.decode(latents).sample + video = image[None, :].reshape((batch_size, num_frames, -1) + image.shape[2:]).permute(0, 2, 1, 3, 4) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + video = video.float() + return video + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + height, + width, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + ip_adapter_image=None, + ip_adapter_image_embeds=None, + callback_on_step_end_tensor_inputs=None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if ip_adapter_image is not None and ip_adapter_image_embeds is not None: + raise ValueError( + "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined." + ) + + if ip_adapter_image_embeds is not None: + if not isinstance(ip_adapter_image_embeds, list): + raise ValueError( + f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}" + ) + elif ip_adapter_image_embeds[0].ndim not in [3, 4]: + raise ValueError( + f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D" + ) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds + def prepare_ip_adapter_image_embeds( + self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance + ): + image_embeds = [] + if do_classifier_free_guidance: + negative_image_embeds = [] + if ip_adapter_image_embeds is None: + if not isinstance(ip_adapter_image, list): + ip_adapter_image = [ip_adapter_image] + + if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): + raise ValueError( + f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." + ) + + for single_ip_adapter_image, image_proj_layer in zip( + ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers + ): + output_hidden_state = not isinstance(image_proj_layer, ImageProjection) + single_image_embeds, single_negative_image_embeds = self.encode_image( + single_ip_adapter_image, device, 1, output_hidden_state + ) + + image_embeds.append(single_image_embeds[None, :]) + if do_classifier_free_guidance: + negative_image_embeds.append(single_negative_image_embeds[None, :]) + else: + for single_image_embeds in ip_adapter_image_embeds: + if do_classifier_free_guidance: + single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2) + negative_image_embeds.append(single_negative_image_embeds) + image_embeds.append(single_image_embeds) + + ip_adapter_image_embeds = [] + for i, single_image_embeds in enumerate(image_embeds): + single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) + if do_classifier_free_guidance: + single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) + + single_image_embeds = single_image_embeds.to(device=device) + ip_adapter_image_embeds.append(single_image_embeds) + + return ip_adapter_image_embeds + + # Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_synth.TextToVideoSDPipeline.prepare_latents + def prepare_latents( + self, batch_size, num_channels_latents, num_frames, height, width, dtype, device, generator, latents=None + ): + shape = ( + batch_size, + num_channels_latents, + num_frames, + height // self.vae_scale_factor, + width // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + def prepare_masked_condition( + self, + image, + batch_size, + num_channels_latents, + num_frames, + height, + width, + dtype, + device, + generator, + motion_scale=0, + ): + shape = ( + batch_size, + num_channels_latents, + num_frames, + height // self.vae_scale_factor, + width // self.vae_scale_factor, + ) + _, _, _, scaled_height, scaled_width = shape + + image = self.video_processor.preprocess(image) + image = image.to(device, dtype) + + if isinstance(generator, list): + image_latent = [ + self.vae.encode(image[k : k + 1]).latent_dist.sample(generator[k]) for k in range(batch_size) + ] + image_latent = torch.cat(image_latent, dim=0) + else: + image_latent = self.vae.encode(image).latent_dist.sample(generator) + + image_latent = image_latent.to(device=device, dtype=dtype) + image_latent = torch.nn.functional.interpolate(image_latent, size=[scaled_height, scaled_width]) + image_latent_padding = image_latent.clone() * self.vae.config.scaling_factor + + mask = torch.zeros((batch_size, 1, num_frames, scaled_height, scaled_width)).to(device=device, dtype=dtype) + mask_coef = prepare_mask_coef_by_statistics(num_frames, 0, motion_scale) + masked_image = torch.zeros(batch_size, 4, num_frames, scaled_height, scaled_width).to( + device=device, dtype=self.unet.dtype + ) + for f in range(num_frames): + mask[:, :, f, :, :] = mask_coef[f] + masked_image[:, :, f, :, :] = image_latent_padding.clone() + + mask = torch.cat([mask] * 2) if self.do_classifier_free_guidance else mask + masked_image = torch.cat([masked_image] * 2) if self.do_classifier_free_guidance else masked_image + + return mask, masked_image + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps + def get_timesteps(self, num_inference_steps, strength, device): + # get the original timestep using init_timestep + init_timestep = min(int(num_inference_steps * strength), num_inference_steps) + + t_start = max(num_inference_steps - init_timestep, 0) + timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] + if hasattr(self.scheduler, "set_begin_index"): + self.scheduler.set_begin_index(t_start * self.scheduler.order) + + return timesteps, num_inference_steps - t_start + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def clip_skip(self): + return self._clip_skip + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 + + @property + def cross_attention_kwargs(self): + return self._cross_attention_kwargs + + @property + def num_timesteps(self): + return self._num_timesteps + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + image: PipelineImageInput, + prompt: Union[str, List[str]] = None, + strength: float = 1.0, + num_frames: Optional[int] = 16, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + guidance_scale: float = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_videos_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None, + motion_scale: int = 0, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + clip_skip: Optional[int] = None, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + ): + r""" + The call function to the pipeline for generation. + + Args: + image (`PipelineImageInput`): + The input image to be used for video generation. + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + strength (`float`, *optional*, defaults to 1.0): + Indicates extent to transform the reference `image`. Must be between 0 and 1. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated video. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated video. + num_frames (`int`, *optional*, defaults to 16): + The number of video frames that are generated. Defaults to 16 frames which at 8 frames per seconds + amounts to 2 seconds of video. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality videos at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for video + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. Latents should be of shape + `(batch_size, num_channel, num_frames, height, width)`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + ip_adapter_image: (`PipelineImageInput`, *optional*): + Optional image input to work with IP Adapters. + ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*): + Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of + IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should + contain the negative image embedding if `do_classifier_free_guidance` is set to `True`. If not + provided, embeddings are computed from the `ip_adapter_image` input argument. + motion_scale: (`int`, *optional*, defaults to 0): + Parameter that controls the amount and type of motion that is added to the image. Increasing the value + increases the amount of motion, while specific ranges of values control the type of motion that is + added. Must be between 0 and 8. Set between 0-2 to only increase the amount of motion. Set between 3-5 + to create looping motion. Set between 6-8 to perform motion with image style transfer. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated video. Choose between `torch.Tensor`, `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.text_to_video_synthesis.TextToVideoSDPipelineOutput`] instead + of a plain tuple. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + + Examples: + + Returns: + [`~pipelines.pia.pipeline_pia.PIAPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.pia.pipeline_pia.PIAPipelineOutput`] is returned, otherwise a + `tuple` is returned where the first element is a list with the generated frames. + """ + # 0. Default height and width to unet + height = height or self.unet.config.sample_size * self.vae_scale_factor + width = width or self.unet.config.sample_size * self.vae_scale_factor + + num_videos_per_prompt = 1 + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + height, + width, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + ip_adapter_image, + ip_adapter_image_embeds, + callback_on_step_end_tensor_inputs, + ) + + self._guidance_scale = guidance_scale + self._clip_skip = clip_skip + self._cross_attention_kwargs = cross_attention_kwargs + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + # 3. Encode input prompt + text_encoder_lora_scale = ( + self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None + ) + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + device, + num_videos_per_prompt, + self.do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=text_encoder_lora_scale, + clip_skip=self.clip_skip, + ) + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + if self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + prompt_embeds = prompt_embeds.repeat_interleave(repeats=num_frames, dim=0) + + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + image_embeds = self.prepare_ip_adapter_image_embeds( + ip_adapter_image, + ip_adapter_image_embeds, + device, + batch_size * num_videos_per_prompt, + self.do_classifier_free_guidance, + ) + + # 4. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device) + latent_timestep = timesteps[:1].repeat(batch_size * num_videos_per_prompt) + self._num_timesteps = len(timesteps) + + # 5. Prepare latent variables + latents = self.prepare_latents( + batch_size * num_videos_per_prompt, + 4, + num_frames, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents=latents, + ) + mask, masked_image = self.prepare_masked_condition( + image, + batch_size * num_videos_per_prompt, + 4, + num_frames=num_frames, + height=height, + width=width, + dtype=self.unet.dtype, + device=device, + generator=generator, + motion_scale=motion_scale, + ) + if strength < 1.0: + noise = randn_tensor(latents.shape, generator=generator, device=device, dtype=latents.dtype) + latents = self.scheduler.add_noise(masked_image[0], noise, latent_timestep) + + # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7. Add image embeds for IP-Adapter + added_cond_kwargs = ( + {"image_embeds": image_embeds} + if ip_adapter_image is not None or ip_adapter_image_embeds is not None + else None + ) + + # 8. Denoising loop + num_free_init_iters = self._free_init_num_iters if self.free_init_enabled else 1 + for free_init_iter in range(num_free_init_iters): + if self.free_init_enabled: + latents, timesteps = self._apply_free_init( + latents, free_init_iter, num_inference_steps, device, latents.dtype, generator + ) + + self._num_timesteps = len(timesteps) + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + + with self.progress_bar(total=self._num_timesteps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + latent_model_input = torch.cat([latent_model_input, mask, masked_image], dim=1) + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs=cross_attention_kwargs, + added_cond_kwargs=added_cond_kwargs, + ).sample + + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + + # 9. Post processing + if output_type == "latent": + video = latents + else: + video_tensor = self.decode_latents(latents) + video = self.video_processor.postprocess_video(video=video_tensor, output_type=output_type) + + # 10. Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (video,) + + return PIAPipelineOutput(frames=video) diff --git a/diffusers3/pipelines/pipeline_flax_utils.py b/diffusers3/pipelines/pipeline_flax_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..c4c212873a88fbef21c8a7d528fe92efea09e7ed --- /dev/null +++ b/diffusers3/pipelines/pipeline_flax_utils.py @@ -0,0 +1,611 @@ +# coding=utf-8 +# Copyright 2024 The HuggingFace Inc. team. +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import importlib +import inspect +import os +from typing import Any, Dict, List, Optional, Union + +import flax +import numpy as np +import PIL.Image +from flax.core.frozen_dict import FrozenDict +from huggingface_hub import create_repo, snapshot_download +from huggingface_hub.utils import validate_hf_hub_args +from PIL import Image +from tqdm.auto import tqdm + +from ..configuration_utils import ConfigMixin +from ..models.modeling_flax_utils import FLAX_WEIGHTS_NAME, FlaxModelMixin +from ..schedulers.scheduling_utils_flax import SCHEDULER_CONFIG_NAME, FlaxSchedulerMixin +from ..utils import ( + CONFIG_NAME, + BaseOutput, + PushToHubMixin, + http_user_agent, + is_transformers_available, + logging, +) + + +if is_transformers_available(): + from transformers import FlaxPreTrainedModel + +INDEX_FILE = "diffusion_flax_model.bin" + + +logger = logging.get_logger(__name__) + + +LOADABLE_CLASSES = { + "diffusers": { + "FlaxModelMixin": ["save_pretrained", "from_pretrained"], + "FlaxSchedulerMixin": ["save_pretrained", "from_pretrained"], + "FlaxDiffusionPipeline": ["save_pretrained", "from_pretrained"], + }, + "transformers": { + "PreTrainedTokenizer": ["save_pretrained", "from_pretrained"], + "PreTrainedTokenizerFast": ["save_pretrained", "from_pretrained"], + "FlaxPreTrainedModel": ["save_pretrained", "from_pretrained"], + "FeatureExtractionMixin": ["save_pretrained", "from_pretrained"], + "ProcessorMixin": ["save_pretrained", "from_pretrained"], + "ImageProcessingMixin": ["save_pretrained", "from_pretrained"], + }, +} + +ALL_IMPORTABLE_CLASSES = {} +for library in LOADABLE_CLASSES: + ALL_IMPORTABLE_CLASSES.update(LOADABLE_CLASSES[library]) + + +def import_flax_or_no_model(module, class_name): + try: + # 1. First make sure that if a Flax object is present, import this one + class_obj = getattr(module, "Flax" + class_name) + except AttributeError: + # 2. If this doesn't work, it's not a model and we don't append "Flax" + class_obj = getattr(module, class_name) + except AttributeError: + raise ValueError(f"Neither Flax{class_name} nor {class_name} exist in {module}") + + return class_obj + + +@flax.struct.dataclass +class FlaxImagePipelineOutput(BaseOutput): + """ + Output class for image pipelines. + + Args: + images (`List[PIL.Image.Image]` or `np.ndarray`) + List of denoised PIL images of length `batch_size` or NumPy array of shape `(batch_size, height, width, + num_channels)`. + """ + + images: Union[List[PIL.Image.Image], np.ndarray] + + +class FlaxDiffusionPipeline(ConfigMixin, PushToHubMixin): + r""" + Base class for Flax-based pipelines. + + [`FlaxDiffusionPipeline`] stores all components (models, schedulers, and processors) for diffusion pipelines and + provides methods for loading, downloading and saving models. It also includes methods to: + + - enable/disable the progress bar for the denoising iteration + + Class attributes: + + - **config_name** ([`str`]) -- The configuration filename that stores the class and module names of all the + diffusion pipeline's components. + """ + + config_name = "model_index.json" + + def register_modules(self, **kwargs): + # import it here to avoid circular import + from diffusers import pipelines + + for name, module in kwargs.items(): + if module is None: + register_dict = {name: (None, None)} + else: + # retrieve library + library = module.__module__.split(".")[0] + + # check if the module is a pipeline module + pipeline_dir = module.__module__.split(".")[-2] + path = module.__module__.split(".") + is_pipeline_module = pipeline_dir in path and hasattr(pipelines, pipeline_dir) + + # if library is not in LOADABLE_CLASSES, then it is a custom module. + # Or if it's a pipeline module, then the module is inside the pipeline + # folder so we set the library to module name. + if library not in LOADABLE_CLASSES or is_pipeline_module: + library = pipeline_dir + + # retrieve class_name + class_name = module.__class__.__name__ + + register_dict = {name: (library, class_name)} + + # save model index config + self.register_to_config(**register_dict) + + # set models + setattr(self, name, module) + + def save_pretrained( + self, + save_directory: Union[str, os.PathLike], + params: Union[Dict, FrozenDict], + push_to_hub: bool = False, + **kwargs, + ): + # TODO: handle inference_state + """ + Save all saveable variables of the pipeline to a directory. A pipeline variable can be saved and loaded if its + class implements both a save and loading method. The pipeline is easily reloaded using the + [`~FlaxDiffusionPipeline.from_pretrained`] class method. + + Arguments: + save_directory (`str` or `os.PathLike`): + Directory to which to save. Will be created if it doesn't exist. + push_to_hub (`bool`, *optional*, defaults to `False`): + Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the + repository you want to push to with `repo_id` (will default to the name of `save_directory` in your + namespace). + kwargs (`Dict[str, Any]`, *optional*): + Additional keyword arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method. + """ + self.save_config(save_directory) + + model_index_dict = dict(self.config) + model_index_dict.pop("_class_name") + model_index_dict.pop("_diffusers_version") + model_index_dict.pop("_module", None) + + if push_to_hub: + commit_message = kwargs.pop("commit_message", None) + private = kwargs.pop("private", False) + create_pr = kwargs.pop("create_pr", False) + token = kwargs.pop("token", None) + repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1]) + repo_id = create_repo(repo_id, exist_ok=True, private=private, token=token).repo_id + + for pipeline_component_name in model_index_dict.keys(): + sub_model = getattr(self, pipeline_component_name) + if sub_model is None: + # edge case for saving a pipeline with safety_checker=None + continue + + model_cls = sub_model.__class__ + + save_method_name = None + # search for the model's base class in LOADABLE_CLASSES + for library_name, library_classes in LOADABLE_CLASSES.items(): + library = importlib.import_module(library_name) + for base_class, save_load_methods in library_classes.items(): + class_candidate = getattr(library, base_class, None) + if class_candidate is not None and issubclass(model_cls, class_candidate): + # if we found a suitable base class in LOADABLE_CLASSES then grab its save method + save_method_name = save_load_methods[0] + break + if save_method_name is not None: + break + + save_method = getattr(sub_model, save_method_name) + expects_params = "params" in set(inspect.signature(save_method).parameters.keys()) + + if expects_params: + save_method( + os.path.join(save_directory, pipeline_component_name), params=params[pipeline_component_name] + ) + else: + save_method(os.path.join(save_directory, pipeline_component_name)) + + if push_to_hub: + self._upload_folder( + save_directory, + repo_id, + token=token, + commit_message=commit_message, + create_pr=create_pr, + ) + + @classmethod + @validate_hf_hub_args + def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], **kwargs): + r""" + Instantiate a Flax-based diffusion pipeline from pretrained pipeline weights. + + The pipeline is set in evaluation mode (`model.eval()) by default and dropout modules are deactivated. + + If you get the error message below, you need to finetune the weights for your downstream task: + + ``` + Some weights of FlaxUNet2DConditionModel were not initialized from the model checkpoint at runwayml/stable-diffusion-v1-5 and are newly initialized because the shapes did not match: + ``` + + Parameters: + pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*): + Can be either: + + - A string, the *repo id* (for example `runwayml/stable-diffusion-v1-5`) of a pretrained pipeline + hosted on the Hub. + - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved + using [`~FlaxDiffusionPipeline.save_pretrained`]. + dtype (`str` or `jnp.dtype`, *optional*): + Override the default `jnp.dtype` and load the model under this dtype. If `"auto"`, the dtype is + automatically derived from the model's weights. + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force the (re-)download of the model weights and configuration files, overriding the + cached versions if they exist. + + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. + output_loading_info(`bool`, *optional*, defaults to `False`): + Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages. + local_files_only (`bool`, *optional*, defaults to `False`): + Whether to only load local model weights and configuration files or not. If set to `True`, the model + won't be downloaded from the Hub. + token (`str` or *bool*, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from + `diffusers-cli login` (stored in `~/.huggingface`) is used. + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier + allowed by Git. + mirror (`str`, *optional*): + Mirror source to resolve accessibility issues if you're downloading a model in China. We do not + guarantee the timeliness or safety of the source, and you should refer to the mirror site for more + information. + kwargs (remaining dictionary of keyword arguments, *optional*): + Can be used to overwrite load and saveable variables (the pipeline components) of the specific pipeline + class. The overwritten components are passed directly to the pipelines `__init__` method. + + + + To use private or [gated models](https://huggingface.co/docs/hub/models-gated#gated-models), log-in with + `huggingface-cli login`. + + + + Examples: + + ```py + >>> from diffusers import FlaxDiffusionPipeline + + >>> # Download pipeline from huggingface.co and cache. + >>> # Requires to be logged in to Hugging Face hub, + >>> # see more in [the documentation](https://huggingface.co/docs/hub/security-tokens) + >>> pipeline, params = FlaxDiffusionPipeline.from_pretrained( + ... "runwayml/stable-diffusion-v1-5", + ... variant="bf16", + ... dtype=jnp.bfloat16, + ... ) + + >>> # Download pipeline, but use a different scheduler + >>> from diffusers import FlaxDPMSolverMultistepScheduler + + >>> model_id = "runwayml/stable-diffusion-v1-5" + >>> dpmpp, dpmpp_state = FlaxDPMSolverMultistepScheduler.from_pretrained( + ... model_id, + ... subfolder="scheduler", + ... ) + + >>> dpm_pipe, dpm_params = FlaxStableDiffusionPipeline.from_pretrained( + ... model_id, variant="bf16", dtype=jnp.bfloat16, scheduler=dpmpp + ... ) + >>> dpm_params["scheduler"] = dpmpp_state + ``` + """ + cache_dir = kwargs.pop("cache_dir", None) + proxies = kwargs.pop("proxies", None) + local_files_only = kwargs.pop("local_files_only", False) + token = kwargs.pop("token", None) + revision = kwargs.pop("revision", None) + from_pt = kwargs.pop("from_pt", False) + use_memory_efficient_attention = kwargs.pop("use_memory_efficient_attention", False) + split_head_dim = kwargs.pop("split_head_dim", False) + dtype = kwargs.pop("dtype", None) + + # 1. Download the checkpoints and configs + # use snapshot download here to get it working from from_pretrained + if not os.path.isdir(pretrained_model_name_or_path): + config_dict = cls.load_config( + pretrained_model_name_or_path, + cache_dir=cache_dir, + proxies=proxies, + local_files_only=local_files_only, + token=token, + revision=revision, + ) + # make sure we only download sub-folders and `diffusers` filenames + folder_names = [k for k in config_dict.keys() if not k.startswith("_")] + allow_patterns = [os.path.join(k, "*") for k in folder_names] + allow_patterns += [FLAX_WEIGHTS_NAME, SCHEDULER_CONFIG_NAME, CONFIG_NAME, cls.config_name] + + ignore_patterns = ["*.bin", "*.safetensors"] if not from_pt else [] + ignore_patterns += ["*.onnx", "*.onnx_data", "*.xml", "*.pb"] + + if cls != FlaxDiffusionPipeline: + requested_pipeline_class = cls.__name__ + else: + requested_pipeline_class = config_dict.get("_class_name", cls.__name__) + requested_pipeline_class = ( + requested_pipeline_class + if requested_pipeline_class.startswith("Flax") + else "Flax" + requested_pipeline_class + ) + + user_agent = {"pipeline_class": requested_pipeline_class} + user_agent = http_user_agent(user_agent) + + # download all allow_patterns + cached_folder = snapshot_download( + pretrained_model_name_or_path, + cache_dir=cache_dir, + proxies=proxies, + local_files_only=local_files_only, + token=token, + revision=revision, + allow_patterns=allow_patterns, + ignore_patterns=ignore_patterns, + user_agent=user_agent, + ) + else: + cached_folder = pretrained_model_name_or_path + + config_dict = cls.load_config(cached_folder) + + # 2. Load the pipeline class, if using custom module then load it from the hub + # if we load from explicit class, let's use it + if cls != FlaxDiffusionPipeline: + pipeline_class = cls + else: + diffusers_module = importlib.import_module(cls.__module__.split(".")[0]) + class_name = ( + config_dict["_class_name"] + if config_dict["_class_name"].startswith("Flax") + else "Flax" + config_dict["_class_name"] + ) + pipeline_class = getattr(diffusers_module, class_name) + + # some modules can be passed directly to the init + # in this case they are already instantiated in `kwargs` + # extract them here + expected_modules, optional_kwargs = cls._get_signature_keys(pipeline_class) + passed_class_obj = {k: kwargs.pop(k) for k in expected_modules if k in kwargs} + passed_pipe_kwargs = {k: kwargs.pop(k) for k in optional_kwargs if k in kwargs} + + init_dict, unused_kwargs, _ = pipeline_class.extract_init_dict(config_dict, **kwargs) + + # define init kwargs + init_kwargs = {k: init_dict.pop(k) for k in optional_kwargs if k in init_dict} + init_kwargs = {**init_kwargs, **passed_pipe_kwargs} + + # remove `null` components + def load_module(name, value): + if value[0] is None: + return False + if name in passed_class_obj and passed_class_obj[name] is None: + return False + return True + + init_dict = {k: v for k, v in init_dict.items() if load_module(k, v)} + + # Throw nice warnings / errors for fast accelerate loading + if len(unused_kwargs) > 0: + logger.warning( + f"Keyword arguments {unused_kwargs} are not expected by {pipeline_class.__name__} and will be ignored." + ) + + # inference_params + params = {} + + # import it here to avoid circular import + from diffusers import pipelines + + # 3. Load each module in the pipeline + for name, (library_name, class_name) in init_dict.items(): + if class_name is None: + # edge case for when the pipeline was saved with safety_checker=None + init_kwargs[name] = None + continue + + is_pipeline_module = hasattr(pipelines, library_name) + loaded_sub_model = None + sub_model_should_be_defined = True + + # if the model is in a pipeline module, then we load it from the pipeline + if name in passed_class_obj: + # 1. check that passed_class_obj has correct parent class + if not is_pipeline_module: + library = importlib.import_module(library_name) + class_obj = getattr(library, class_name) + importable_classes = LOADABLE_CLASSES[library_name] + class_candidates = {c: getattr(library, c, None) for c in importable_classes.keys()} + + expected_class_obj = None + for class_name, class_candidate in class_candidates.items(): + if class_candidate is not None and issubclass(class_obj, class_candidate): + expected_class_obj = class_candidate + + if not issubclass(passed_class_obj[name].__class__, expected_class_obj): + raise ValueError( + f"{passed_class_obj[name]} is of type: {type(passed_class_obj[name])}, but should be" + f" {expected_class_obj}" + ) + elif passed_class_obj[name] is None: + logger.warning( + f"You have passed `None` for {name} to disable its functionality in {pipeline_class}. Note" + f" that this might lead to problems when using {pipeline_class} and is not recommended." + ) + sub_model_should_be_defined = False + else: + logger.warning( + f"You have passed a non-standard module {passed_class_obj[name]}. We cannot verify whether it" + " has the correct type" + ) + + # set passed class object + loaded_sub_model = passed_class_obj[name] + elif is_pipeline_module: + pipeline_module = getattr(pipelines, library_name) + class_obj = import_flax_or_no_model(pipeline_module, class_name) + + importable_classes = ALL_IMPORTABLE_CLASSES + class_candidates = {c: class_obj for c in importable_classes.keys()} + else: + # else we just import it from the library. + library = importlib.import_module(library_name) + class_obj = import_flax_or_no_model(library, class_name) + + importable_classes = LOADABLE_CLASSES[library_name] + class_candidates = {c: getattr(library, c, None) for c in importable_classes.keys()} + + if loaded_sub_model is None and sub_model_should_be_defined: + load_method_name = None + for class_name, class_candidate in class_candidates.items(): + if class_candidate is not None and issubclass(class_obj, class_candidate): + load_method_name = importable_classes[class_name][1] + + load_method = getattr(class_obj, load_method_name) + + # check if the module is in a subdirectory + if os.path.isdir(os.path.join(cached_folder, name)): + loadable_folder = os.path.join(cached_folder, name) + else: + loaded_sub_model = cached_folder + + if issubclass(class_obj, FlaxModelMixin): + loaded_sub_model, loaded_params = load_method( + loadable_folder, + from_pt=from_pt, + use_memory_efficient_attention=use_memory_efficient_attention, + split_head_dim=split_head_dim, + dtype=dtype, + ) + params[name] = loaded_params + elif is_transformers_available() and issubclass(class_obj, FlaxPreTrainedModel): + if from_pt: + # TODO(Suraj): Fix this in Transformers. We should be able to use `_do_init=False` here + loaded_sub_model = load_method(loadable_folder, from_pt=from_pt) + loaded_params = loaded_sub_model.params + del loaded_sub_model._params + else: + loaded_sub_model, loaded_params = load_method(loadable_folder, _do_init=False) + params[name] = loaded_params + elif issubclass(class_obj, FlaxSchedulerMixin): + loaded_sub_model, scheduler_state = load_method(loadable_folder) + params[name] = scheduler_state + else: + loaded_sub_model = load_method(loadable_folder) + + init_kwargs[name] = loaded_sub_model # UNet(...), # DiffusionSchedule(...) + + # 4. Potentially add passed objects if expected + missing_modules = set(expected_modules) - set(init_kwargs.keys()) + passed_modules = list(passed_class_obj.keys()) + + if len(missing_modules) > 0 and missing_modules <= set(passed_modules): + for module in missing_modules: + init_kwargs[module] = passed_class_obj.get(module, None) + elif len(missing_modules) > 0: + passed_modules = set(list(init_kwargs.keys()) + list(passed_class_obj.keys())) - optional_kwargs + raise ValueError( + f"Pipeline {pipeline_class} expected {expected_modules}, but only {passed_modules} were passed." + ) + + model = pipeline_class(**init_kwargs, dtype=dtype) + return model, params + + @classmethod + def _get_signature_keys(cls, obj): + parameters = inspect.signature(obj.__init__).parameters + required_parameters = {k: v for k, v in parameters.items() if v.default == inspect._empty} + optional_parameters = set({k for k, v in parameters.items() if v.default != inspect._empty}) + expected_modules = set(required_parameters.keys()) - {"self"} + + return expected_modules, optional_parameters + + @property + def components(self) -> Dict[str, Any]: + r""" + + The `self.components` property can be useful to run different pipelines with the same weights and + configurations to not have to re-allocate memory. + + Examples: + + ```py + >>> from diffusers import ( + ... FlaxStableDiffusionPipeline, + ... FlaxStableDiffusionImg2ImgPipeline, + ... ) + + >>> text2img = FlaxStableDiffusionPipeline.from_pretrained( + ... "runwayml/stable-diffusion-v1-5", variant="bf16", dtype=jnp.bfloat16 + ... ) + >>> img2img = FlaxStableDiffusionImg2ImgPipeline(**text2img.components) + ``` + + Returns: + A dictionary containing all the modules needed to initialize the pipeline. + """ + expected_modules, optional_parameters = self._get_signature_keys(self) + components = { + k: getattr(self, k) for k in self.config.keys() if not k.startswith("_") and k not in optional_parameters + } + + if set(components.keys()) != expected_modules: + raise ValueError( + f"{self} has been incorrectly initialized or {self.__class__} is incorrectly implemented. Expected" + f" {expected_modules} to be defined, but {components} are defined." + ) + + return components + + @staticmethod + def numpy_to_pil(images): + """ + Convert a NumPy image or a batch of images to a PIL image. + """ + if images.ndim == 3: + images = images[None, ...] + images = (images * 255).round().astype("uint8") + if images.shape[-1] == 1: + # special case for grayscale (single channel) images + pil_images = [Image.fromarray(image.squeeze(), mode="L") for image in images] + else: + pil_images = [Image.fromarray(image) for image in images] + + return pil_images + + # TODO: make it compatible with jax.lax + def progress_bar(self, iterable): + if not hasattr(self, "_progress_bar_config"): + self._progress_bar_config = {} + elif not isinstance(self._progress_bar_config, dict): + raise ValueError( + f"`self._progress_bar_config` should be of type `dict`, but is {type(self._progress_bar_config)}." + ) + + return tqdm(iterable, **self._progress_bar_config) + + def set_progress_bar_config(self, **kwargs): + self._progress_bar_config = kwargs diff --git a/diffusers3/pipelines/pipeline_loading_utils.py b/diffusers3/pipelines/pipeline_loading_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..318599f560636610c9bd812bbe311674d83ff055 --- /dev/null +++ b/diffusers3/pipelines/pipeline_loading_utils.py @@ -0,0 +1,838 @@ +# coding=utf-8 +# Copyright 2024 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import importlib +import os +import re +import warnings +from pathlib import Path +from typing import Any, Dict, List, Optional, Union + +import torch +from huggingface_hub import ModelCard, model_info +from huggingface_hub.utils import validate_hf_hub_args +from packaging import version + +from .. import __version__ +from ..utils import ( + FLAX_WEIGHTS_NAME, + ONNX_EXTERNAL_WEIGHTS_NAME, + ONNX_WEIGHTS_NAME, + SAFETENSORS_WEIGHTS_NAME, + WEIGHTS_NAME, + deprecate, + get_class_from_dynamic_module, + is_accelerate_available, + is_peft_available, + is_transformers_available, + logging, +) +from ..utils.torch_utils import is_compiled_module + + +if is_transformers_available(): + import transformers + from transformers import PreTrainedModel + from transformers.utils import FLAX_WEIGHTS_NAME as TRANSFORMERS_FLAX_WEIGHTS_NAME + from transformers.utils import SAFE_WEIGHTS_NAME as TRANSFORMERS_SAFE_WEIGHTS_NAME + from transformers.utils import WEIGHTS_NAME as TRANSFORMERS_WEIGHTS_NAME + +if is_accelerate_available(): + import accelerate + from accelerate import dispatch_model + from accelerate.hooks import remove_hook_from_module + from accelerate.utils import compute_module_sizes, get_max_memory + + +INDEX_FILE = "diffusion_pytorch_model.bin" +CUSTOM_PIPELINE_FILE_NAME = "pipeline.py" +DUMMY_MODULES_FOLDER = "diffusers.utils" +TRANSFORMERS_DUMMY_MODULES_FOLDER = "transformers.utils" +CONNECTED_PIPES_KEYS = ["prior"] + +logger = logging.get_logger(__name__) + +LOADABLE_CLASSES = { + "diffusers": { + "ModelMixin": ["save_pretrained", "from_pretrained"], + "SchedulerMixin": ["save_pretrained", "from_pretrained"], + "DiffusionPipeline": ["save_pretrained", "from_pretrained"], + "OnnxRuntimeModel": ["save_pretrained", "from_pretrained"], + }, + "transformers": { + "PreTrainedTokenizer": ["save_pretrained", "from_pretrained"], + "PreTrainedTokenizerFast": ["save_pretrained", "from_pretrained"], + "PreTrainedModel": ["save_pretrained", "from_pretrained"], + "FeatureExtractionMixin": ["save_pretrained", "from_pretrained"], + "ProcessorMixin": ["save_pretrained", "from_pretrained"], + "ImageProcessingMixin": ["save_pretrained", "from_pretrained"], + }, + "onnxruntime.training": { + "ORTModule": ["save_pretrained", "from_pretrained"], + }, +} + +ALL_IMPORTABLE_CLASSES = {} +for library in LOADABLE_CLASSES: + ALL_IMPORTABLE_CLASSES.update(LOADABLE_CLASSES[library]) + + +def is_safetensors_compatible(filenames, passed_components=None, folder_names=None) -> bool: + """ + Checking for safetensors compatibility: + - The model is safetensors compatible only if there is a safetensors file for each model component present in + filenames. + + Converting default pytorch serialized filenames to safetensors serialized filenames: + - For models from the diffusers library, just replace the ".bin" extension with ".safetensors" + - For models from the transformers library, the filename changes from "pytorch_model" to "model", and the ".bin" + extension is replaced with ".safetensors" + """ + passed_components = passed_components or [] + if folder_names is not None: + filenames = {f for f in filenames if os.path.split(f)[0] in folder_names} + + # extract all components of the pipeline and their associated files + components = {} + for filename in filenames: + if not len(filename.split("/")) == 2: + continue + + component, component_filename = filename.split("/") + if component in passed_components: + continue + + components.setdefault(component, []) + components[component].append(component_filename) + + # iterate over all files of a component + # check if safetensor files exist for that component + # if variant is provided check if the variant of the safetensors exists + for component, component_filenames in components.items(): + matches = [] + for component_filename in component_filenames: + filename, extension = os.path.splitext(component_filename) + + match_exists = extension == ".safetensors" + matches.append(match_exists) + + if not any(matches): + return False + + return True + + +def variant_compatible_siblings(filenames, variant=None) -> Union[List[os.PathLike], str]: + weight_names = [ + WEIGHTS_NAME, + SAFETENSORS_WEIGHTS_NAME, + FLAX_WEIGHTS_NAME, + ONNX_WEIGHTS_NAME, + ONNX_EXTERNAL_WEIGHTS_NAME, + ] + + if is_transformers_available(): + weight_names += [TRANSFORMERS_WEIGHTS_NAME, TRANSFORMERS_SAFE_WEIGHTS_NAME, TRANSFORMERS_FLAX_WEIGHTS_NAME] + + # model_pytorch, diffusion_model_pytorch, ... + weight_prefixes = [w.split(".")[0] for w in weight_names] + # .bin, .safetensors, ... + weight_suffixs = [w.split(".")[-1] for w in weight_names] + # -00001-of-00002 + transformers_index_format = r"\d{5}-of-\d{5}" + + if variant is not None: + # `diffusion_pytorch_model.fp16.bin` as well as `model.fp16-00001-of-00002.safetensors` + variant_file_re = re.compile( + rf"({'|'.join(weight_prefixes)})\.({variant}|{variant}-{transformers_index_format})\.({'|'.join(weight_suffixs)})$" + ) + # `text_encoder/pytorch_model.bin.index.fp16.json` + variant_index_re = re.compile( + rf"({'|'.join(weight_prefixes)})\.({'|'.join(weight_suffixs)})\.index\.{variant}\.json$" + ) + + # `diffusion_pytorch_model.bin` as well as `model-00001-of-00002.safetensors` + non_variant_file_re = re.compile( + rf"({'|'.join(weight_prefixes)})(-{transformers_index_format})?\.({'|'.join(weight_suffixs)})$" + ) + # `text_encoder/pytorch_model.bin.index.json` + non_variant_index_re = re.compile(rf"({'|'.join(weight_prefixes)})\.({'|'.join(weight_suffixs)})\.index\.json") + + if variant is not None: + variant_weights = {f for f in filenames if variant_file_re.match(f.split("/")[-1]) is not None} + variant_indexes = {f for f in filenames if variant_index_re.match(f.split("/")[-1]) is not None} + variant_filenames = variant_weights | variant_indexes + else: + variant_filenames = set() + + non_variant_weights = {f for f in filenames if non_variant_file_re.match(f.split("/")[-1]) is not None} + non_variant_indexes = {f for f in filenames if non_variant_index_re.match(f.split("/")[-1]) is not None} + non_variant_filenames = non_variant_weights | non_variant_indexes + + # all variant filenames will be used by default + usable_filenames = set(variant_filenames) + + def convert_to_variant(filename): + if "index" in filename: + variant_filename = filename.replace("index", f"index.{variant}") + elif re.compile(f"^(.*?){transformers_index_format}").match(filename) is not None: + variant_filename = f"{filename.split('-')[0]}.{variant}-{'-'.join(filename.split('-')[1:])}" + else: + variant_filename = f"{filename.split('.')[0]}.{variant}.{filename.split('.')[1]}" + return variant_filename + + for f in non_variant_filenames: + variant_filename = convert_to_variant(f) + if variant_filename not in usable_filenames: + usable_filenames.add(f) + + return usable_filenames, variant_filenames + + +@validate_hf_hub_args +def warn_deprecated_model_variant(pretrained_model_name_or_path, token, variant, revision, model_filenames): + info = model_info( + pretrained_model_name_or_path, + token=token, + revision=None, + ) + filenames = {sibling.rfilename for sibling in info.siblings} + comp_model_filenames, _ = variant_compatible_siblings(filenames, variant=revision) + comp_model_filenames = [".".join(f.split(".")[:1] + f.split(".")[2:]) for f in comp_model_filenames] + + if set(model_filenames).issubset(set(comp_model_filenames)): + warnings.warn( + f"You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` even though you can load it via `variant=`{revision}`. Loading model variants via `revision='{revision}'` is deprecated and will be removed in diffusers v1. Please use `variant='{revision}'` instead.", + FutureWarning, + ) + else: + warnings.warn( + f"You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have the required variant filenames in the 'main' branch. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {revision} files' so that the correct variant file can be added.", + FutureWarning, + ) + + +def _unwrap_model(model): + """Unwraps a model.""" + if is_compiled_module(model): + model = model._orig_mod + + if is_peft_available(): + from peft import PeftModel + + if isinstance(model, PeftModel): + model = model.base_model.model + + return model + + +def maybe_raise_or_warn( + library_name, library, class_name, importable_classes, passed_class_obj, name, is_pipeline_module +): + """Simple helper method to raise or warn in case incorrect module has been passed""" + if not is_pipeline_module: + library = importlib.import_module(library_name) + class_obj = getattr(library, class_name) + class_candidates = {c: getattr(library, c, None) for c in importable_classes.keys()} + + expected_class_obj = None + for class_name, class_candidate in class_candidates.items(): + if class_candidate is not None and issubclass(class_obj, class_candidate): + expected_class_obj = class_candidate + + # Dynamo wraps the original model in a private class. + # I didn't find a public API to get the original class. + sub_model = passed_class_obj[name] + unwrapped_sub_model = _unwrap_model(sub_model) + model_cls = unwrapped_sub_model.__class__ + + if not issubclass(model_cls, expected_class_obj): + raise ValueError( + f"{passed_class_obj[name]} is of type: {model_cls}, but should be" f" {expected_class_obj}" + ) + else: + logger.warning( + f"You have passed a non-standard module {passed_class_obj[name]}. We cannot verify whether it" + " has the correct type" + ) + + +def get_class_obj_and_candidates( + library_name, class_name, importable_classes, pipelines, is_pipeline_module, component_name=None, cache_dir=None +): + """Simple helper method to retrieve class object of module as well as potential parent class objects""" + component_folder = os.path.join(cache_dir, component_name) + + if is_pipeline_module: + pipeline_module = getattr(pipelines, library_name) + + class_obj = getattr(pipeline_module, class_name) + class_candidates = {c: class_obj for c in importable_classes.keys()} + elif os.path.isfile(os.path.join(component_folder, library_name + ".py")): + # load custom component + class_obj = get_class_from_dynamic_module( + component_folder, module_file=library_name + ".py", class_name=class_name + ) + class_candidates = {c: class_obj for c in importable_classes.keys()} + else: + # else we just import it from the library. + library = importlib.import_module(library_name) + + class_obj = getattr(library, class_name) + class_candidates = {c: getattr(library, c, None) for c in importable_classes.keys()} + + return class_obj, class_candidates + + +def _get_custom_pipeline_class( + custom_pipeline, + repo_id=None, + hub_revision=None, + class_name=None, + cache_dir=None, + revision=None, +): + if custom_pipeline.endswith(".py"): + path = Path(custom_pipeline) + # decompose into folder & file + file_name = path.name + custom_pipeline = path.parent.absolute() + elif repo_id is not None: + file_name = f"{custom_pipeline}.py" + custom_pipeline = repo_id + else: + file_name = CUSTOM_PIPELINE_FILE_NAME + + if repo_id is not None and hub_revision is not None: + # if we load the pipeline code from the Hub + # make sure to overwrite the `revision` + revision = hub_revision + + return get_class_from_dynamic_module( + custom_pipeline, + module_file=file_name, + class_name=class_name, + cache_dir=cache_dir, + revision=revision, + ) + + +def _get_pipeline_class( + class_obj, + config=None, + load_connected_pipeline=False, + custom_pipeline=None, + repo_id=None, + hub_revision=None, + class_name=None, + cache_dir=None, + revision=None, +): + if custom_pipeline is not None: + return _get_custom_pipeline_class( + custom_pipeline, + repo_id=repo_id, + hub_revision=hub_revision, + class_name=class_name, + cache_dir=cache_dir, + revision=revision, + ) + + if class_obj.__name__ != "DiffusionPipeline": + return class_obj + + diffusers_module = importlib.import_module(class_obj.__module__.split(".")[0]) + class_name = class_name or config["_class_name"] + if not class_name: + raise ValueError( + "The class name could not be found in the configuration file. Please make sure to pass the correct `class_name`." + ) + + class_name = class_name[4:] if class_name.startswith("Flax") else class_name + + pipeline_cls = getattr(diffusers_module, class_name) + + if load_connected_pipeline: + from .auto_pipeline import _get_connected_pipeline + + connected_pipeline_cls = _get_connected_pipeline(pipeline_cls) + if connected_pipeline_cls is not None: + logger.info( + f"Loading connected pipeline {connected_pipeline_cls.__name__} instead of {pipeline_cls.__name__} as specified via `load_connected_pipeline=True`" + ) + else: + logger.info(f"{pipeline_cls.__name__} has no connected pipeline class. Loading {pipeline_cls.__name__}.") + + pipeline_cls = connected_pipeline_cls or pipeline_cls + + return pipeline_cls + + +def _load_empty_model( + library_name: str, + class_name: str, + importable_classes: List[Any], + pipelines: Any, + is_pipeline_module: bool, + name: str, + torch_dtype: Union[str, torch.dtype], + cached_folder: Union[str, os.PathLike], + **kwargs, +): + # retrieve class objects. + class_obj, _ = get_class_obj_and_candidates( + library_name, + class_name, + importable_classes, + pipelines, + is_pipeline_module, + component_name=name, + cache_dir=cached_folder, + ) + + if is_transformers_available(): + transformers_version = version.parse(version.parse(transformers.__version__).base_version) + else: + transformers_version = "N/A" + + # Determine library. + is_transformers_model = ( + is_transformers_available() + and issubclass(class_obj, PreTrainedModel) + and transformers_version >= version.parse("4.20.0") + ) + diffusers_module = importlib.import_module(__name__.split(".")[0]) + is_diffusers_model = issubclass(class_obj, diffusers_module.ModelMixin) + + model = None + config_path = cached_folder + user_agent = { + "diffusers": __version__, + "file_type": "model", + "framework": "pytorch", + } + + if is_diffusers_model: + # Load config and then the model on meta. + config, unused_kwargs, commit_hash = class_obj.load_config( + os.path.join(config_path, name), + cache_dir=cached_folder, + return_unused_kwargs=True, + return_commit_hash=True, + force_download=kwargs.pop("force_download", False), + proxies=kwargs.pop("proxies", None), + local_files_only=kwargs.pop("local_files_only", False), + token=kwargs.pop("token", None), + revision=kwargs.pop("revision", None), + subfolder=kwargs.pop("subfolder", None), + user_agent=user_agent, + ) + with accelerate.init_empty_weights(): + model = class_obj.from_config(config, **unused_kwargs) + elif is_transformers_model: + config_class = getattr(class_obj, "config_class", None) + if config_class is None: + raise ValueError("`config_class` cannot be None. Please double-check the model.") + + config = config_class.from_pretrained( + cached_folder, + subfolder=name, + force_download=kwargs.pop("force_download", False), + proxies=kwargs.pop("proxies", None), + local_files_only=kwargs.pop("local_files_only", False), + token=kwargs.pop("token", None), + revision=kwargs.pop("revision", None), + user_agent=user_agent, + ) + with accelerate.init_empty_weights(): + model = class_obj(config) + + if model is not None: + model = model.to(dtype=torch_dtype) + return model + + +def _assign_components_to_devices( + module_sizes: Dict[str, float], device_memory: Dict[str, float], device_mapping_strategy: str = "balanced" +): + device_ids = list(device_memory.keys()) + device_cycle = device_ids + device_ids[::-1] + device_memory = device_memory.copy() + + device_id_component_mapping = {} + current_device_index = 0 + for component in module_sizes: + device_id = device_cycle[current_device_index % len(device_cycle)] + component_memory = module_sizes[component] + curr_device_memory = device_memory[device_id] + + # If the GPU doesn't fit the current component offload to the CPU. + if component_memory > curr_device_memory: + device_id_component_mapping["cpu"] = [component] + else: + if device_id not in device_id_component_mapping: + device_id_component_mapping[device_id] = [component] + else: + device_id_component_mapping[device_id].append(component) + + # Update the device memory. + device_memory[device_id] -= component_memory + current_device_index += 1 + + return device_id_component_mapping + + +def _get_final_device_map(device_map, pipeline_class, passed_class_obj, init_dict, library, max_memory, **kwargs): + # To avoid circular import problem. + from diffusers import pipelines + + torch_dtype = kwargs.get("torch_dtype", torch.float32) + + # Load each module in the pipeline on a meta device so that we can derive the device map. + init_empty_modules = {} + for name, (library_name, class_name) in init_dict.items(): + if class_name.startswith("Flax"): + raise ValueError("Flax pipelines are not supported with `device_map`.") + + # Define all importable classes + is_pipeline_module = hasattr(pipelines, library_name) + importable_classes = ALL_IMPORTABLE_CLASSES + loaded_sub_model = None + + # Use passed sub model or load class_name from library_name + if name in passed_class_obj: + # if the model is in a pipeline module, then we load it from the pipeline + # check that passed_class_obj has correct parent class + maybe_raise_or_warn( + library_name, + library, + class_name, + importable_classes, + passed_class_obj, + name, + is_pipeline_module, + ) + with accelerate.init_empty_weights(): + loaded_sub_model = passed_class_obj[name] + + else: + loaded_sub_model = _load_empty_model( + library_name=library_name, + class_name=class_name, + importable_classes=importable_classes, + pipelines=pipelines, + is_pipeline_module=is_pipeline_module, + pipeline_class=pipeline_class, + name=name, + torch_dtype=torch_dtype, + cached_folder=kwargs.get("cached_folder", None), + force_download=kwargs.get("force_download", None), + proxies=kwargs.get("proxies", None), + local_files_only=kwargs.get("local_files_only", None), + token=kwargs.get("token", None), + revision=kwargs.get("revision", None), + ) + + if loaded_sub_model is not None: + init_empty_modules[name] = loaded_sub_model + + # determine device map + # Obtain a sorted dictionary for mapping the model-level components + # to their sizes. + module_sizes = { + module_name: compute_module_sizes(module, dtype=torch_dtype)[""] + for module_name, module in init_empty_modules.items() + if isinstance(module, torch.nn.Module) + } + module_sizes = dict(sorted(module_sizes.items(), key=lambda item: item[1], reverse=True)) + + # Obtain maximum memory available per device (GPUs only). + max_memory = get_max_memory(max_memory) + max_memory = dict(sorted(max_memory.items(), key=lambda item: item[1], reverse=True)) + max_memory = {k: v for k, v in max_memory.items() if k != "cpu"} + + # Obtain a dictionary mapping the model-level components to the available + # devices based on the maximum memory and the model sizes. + final_device_map = None + if len(max_memory) > 0: + device_id_component_mapping = _assign_components_to_devices( + module_sizes, max_memory, device_mapping_strategy=device_map + ) + + # Obtain the final device map, e.g., `{"unet": 0, "text_encoder": 1, "vae": 1, ...}` + final_device_map = {} + for device_id, components in device_id_component_mapping.items(): + for component in components: + final_device_map[component] = device_id + + return final_device_map + + +def load_sub_model( + library_name: str, + class_name: str, + importable_classes: List[Any], + pipelines: Any, + is_pipeline_module: bool, + pipeline_class: Any, + torch_dtype: torch.dtype, + provider: Any, + sess_options: Any, + device_map: Optional[Union[Dict[str, torch.device], str]], + max_memory: Optional[Dict[Union[int, str], Union[int, str]]], + offload_folder: Optional[Union[str, os.PathLike]], + offload_state_dict: bool, + model_variants: Dict[str, str], + name: str, + from_flax: bool, + variant: str, + low_cpu_mem_usage: bool, + cached_folder: Union[str, os.PathLike], +): + """Helper method to load the module `name` from `library_name` and `class_name`""" + + # retrieve class candidates + + class_obj, class_candidates = get_class_obj_and_candidates( + library_name, + class_name, + importable_classes, + pipelines, + is_pipeline_module, + component_name=name, + cache_dir=cached_folder, + ) + + load_method_name = None + # retrieve load method name + for class_name, class_candidate in class_candidates.items(): + if class_candidate is not None and issubclass(class_obj, class_candidate): + load_method_name = importable_classes[class_name][1] + + # if load method name is None, then we have a dummy module -> raise Error + if load_method_name is None: + none_module = class_obj.__module__ + is_dummy_path = none_module.startswith(DUMMY_MODULES_FOLDER) or none_module.startswith( + TRANSFORMERS_DUMMY_MODULES_FOLDER + ) + if is_dummy_path and "dummy" in none_module: + # call class_obj for nice error message of missing requirements + class_obj() + + raise ValueError( + f"The component {class_obj} of {pipeline_class} cannot be loaded as it does not seem to have" + f" any of the loading methods defined in {ALL_IMPORTABLE_CLASSES}." + ) + + load_method = getattr(class_obj, load_method_name) + + # add kwargs to loading method + diffusers_module = importlib.import_module(__name__.split(".")[0]) + loading_kwargs = {} + if issubclass(class_obj, torch.nn.Module): + loading_kwargs["torch_dtype"] = torch_dtype + if issubclass(class_obj, diffusers_module.OnnxRuntimeModel): + loading_kwargs["provider"] = provider + loading_kwargs["sess_options"] = sess_options + + is_diffusers_model = issubclass(class_obj, diffusers_module.ModelMixin) + + if is_transformers_available(): + transformers_version = version.parse(version.parse(transformers.__version__).base_version) + else: + transformers_version = "N/A" + + is_transformers_model = ( + is_transformers_available() + and issubclass(class_obj, PreTrainedModel) + and transformers_version >= version.parse("4.20.0") + ) + + # When loading a transformers model, if the device_map is None, the weights will be initialized as opposed to diffusers. + # To make default loading faster we set the `low_cpu_mem_usage=low_cpu_mem_usage` flag which is `True` by default. + # This makes sure that the weights won't be initialized which significantly speeds up loading. + if is_diffusers_model or is_transformers_model: + loading_kwargs["device_map"] = device_map + loading_kwargs["max_memory"] = max_memory + loading_kwargs["offload_folder"] = offload_folder + loading_kwargs["offload_state_dict"] = offload_state_dict + loading_kwargs["variant"] = model_variants.pop(name, None) + + if from_flax: + loading_kwargs["from_flax"] = True + + # the following can be deleted once the minimum required `transformers` version + # is higher than 4.27 + if ( + is_transformers_model + and loading_kwargs["variant"] is not None + and transformers_version < version.parse("4.27.0") + ): + raise ImportError( + f"When passing `variant='{variant}'`, please make sure to upgrade your `transformers` version to at least 4.27.0.dev0" + ) + elif is_transformers_model and loading_kwargs["variant"] is None: + loading_kwargs.pop("variant") + + # if `from_flax` and model is transformer model, can currently not load with `low_cpu_mem_usage` + if not (from_flax and is_transformers_model): + loading_kwargs["low_cpu_mem_usage"] = low_cpu_mem_usage + else: + loading_kwargs["low_cpu_mem_usage"] = False + + # check if the module is in a subdirectory + if os.path.isdir(os.path.join(cached_folder, name)): + loaded_sub_model = load_method(os.path.join(cached_folder, name), **loading_kwargs) + else: + # else load from the root directory + loaded_sub_model = load_method(cached_folder, **loading_kwargs) + + if isinstance(loaded_sub_model, torch.nn.Module) and isinstance(device_map, dict): + # remove hooks + remove_hook_from_module(loaded_sub_model, recurse=True) + needs_offloading_to_cpu = device_map[""] == "cpu" + + if needs_offloading_to_cpu: + dispatch_model( + loaded_sub_model, + state_dict=loaded_sub_model.state_dict(), + device_map=device_map, + force_hooks=True, + main_device=0, + ) + else: + dispatch_model(loaded_sub_model, device_map=device_map, force_hooks=True) + + return loaded_sub_model + + +def _fetch_class_library_tuple(module): + # import it here to avoid circular import + diffusers_module = importlib.import_module(__name__.split(".")[0]) + pipelines = getattr(diffusers_module, "pipelines") + + # register the config from the original module, not the dynamo compiled one + not_compiled_module = _unwrap_model(module) + library = not_compiled_module.__module__.split(".")[0] + + # check if the module is a pipeline module + module_path_items = not_compiled_module.__module__.split(".") + pipeline_dir = module_path_items[-2] if len(module_path_items) > 2 else None + + path = not_compiled_module.__module__.split(".") + is_pipeline_module = pipeline_dir in path and hasattr(pipelines, pipeline_dir) + + # if library is not in LOADABLE_CLASSES, then it is a custom module. + # Or if it's a pipeline module, then the module is inside the pipeline + # folder so we set the library to module name. + if is_pipeline_module: + library = pipeline_dir + elif library not in LOADABLE_CLASSES: + library = not_compiled_module.__module__ + + # retrieve class_name + class_name = not_compiled_module.__class__.__name__ + + return (library, class_name) + + +def _identify_model_variants(folder: str, variant: str, config: dict) -> dict: + model_variants = {} + if variant is not None: + for sub_folder in os.listdir(folder): + folder_path = os.path.join(folder, sub_folder) + is_folder = os.path.isdir(folder_path) and sub_folder in config + variant_exists = is_folder and any(p.split(".")[1].startswith(variant) for p in os.listdir(folder_path)) + if variant_exists: + model_variants[sub_folder] = variant + return model_variants + + +def _resolve_custom_pipeline_and_cls(folder, config, custom_pipeline): + custom_class_name = None + if os.path.isfile(os.path.join(folder, f"{custom_pipeline}.py")): + custom_pipeline = os.path.join(folder, f"{custom_pipeline}.py") + elif isinstance(config["_class_name"], (list, tuple)) and os.path.isfile( + os.path.join(folder, f"{config['_class_name'][0]}.py") + ): + custom_pipeline = os.path.join(folder, f"{config['_class_name'][0]}.py") + custom_class_name = config["_class_name"][1] + + return custom_pipeline, custom_class_name + + +def _maybe_raise_warning_for_inpainting(pipeline_class, pretrained_model_name_or_path: str, config: dict): + if pipeline_class.__name__ == "StableDiffusionInpaintPipeline" and version.parse( + version.parse(config["_diffusers_version"]).base_version + ) <= version.parse("0.5.1"): + from diffusers import StableDiffusionInpaintPipeline, StableDiffusionInpaintPipelineLegacy + + pipeline_class = StableDiffusionInpaintPipelineLegacy + + deprecation_message = ( + "You are using a legacy checkpoint for inpainting with Stable Diffusion, therefore we are loading the" + f" {StableDiffusionInpaintPipelineLegacy} class instead of {StableDiffusionInpaintPipeline}. For" + " better inpainting results, we strongly suggest using Stable Diffusion's official inpainting" + " checkpoint: https://huggingface.co/runwayml/stable-diffusion-inpainting instead or adapting your" + f" checkpoint {pretrained_model_name_or_path} to the format of" + " https://huggingface.co/runwayml/stable-diffusion-inpainting. Note that we do not actively maintain" + " the {StableDiffusionInpaintPipelineLegacy} class and will likely remove it in version 1.0.0." + ) + deprecate("StableDiffusionInpaintPipelineLegacy", "1.0.0", deprecation_message, standard_warn=False) + + +def _update_init_kwargs_with_connected_pipeline( + init_kwargs: dict, passed_pipe_kwargs: dict, passed_class_objs: dict, folder: str, **pipeline_loading_kwargs +) -> dict: + from .pipeline_utils import DiffusionPipeline + + modelcard = ModelCard.load(os.path.join(folder, "README.md")) + connected_pipes = {prefix: getattr(modelcard.data, prefix, [None])[0] for prefix in CONNECTED_PIPES_KEYS} + + # We don't scheduler argument to match the existing logic: + # https://github.com/huggingface/diffusers/blob/867e0c919e1aa7ef8b03c8eb1460f4f875a683ae/src/diffusers/pipelines/pipeline_utils.py#L906C13-L925C14 + pipeline_loading_kwargs_cp = pipeline_loading_kwargs.copy() + if pipeline_loading_kwargs_cp is not None and len(pipeline_loading_kwargs_cp) >= 1: + for k in pipeline_loading_kwargs: + if "scheduler" in k: + _ = pipeline_loading_kwargs_cp.pop(k) + + def get_connected_passed_kwargs(prefix): + connected_passed_class_obj = { + k.replace(f"{prefix}_", ""): w for k, w in passed_class_objs.items() if k.split("_")[0] == prefix + } + connected_passed_pipe_kwargs = { + k.replace(f"{prefix}_", ""): w for k, w in passed_pipe_kwargs.items() if k.split("_")[0] == prefix + } + + connected_passed_kwargs = {**connected_passed_class_obj, **connected_passed_pipe_kwargs} + return connected_passed_kwargs + + connected_pipes = { + prefix: DiffusionPipeline.from_pretrained( + repo_id, **pipeline_loading_kwargs_cp, **get_connected_passed_kwargs(prefix) + ) + for prefix, repo_id in connected_pipes.items() + if repo_id is not None + } + + for prefix, connected_pipe in connected_pipes.items(): + # add connected pipes to `init_kwargs` with _, e.g. "prior_text_encoder" + init_kwargs.update( + {"_".join([prefix, name]): component for name, component in connected_pipe.components.items()} + ) + + return init_kwargs diff --git a/diffusers3/pipelines/pipeline_utils.py b/diffusers3/pipelines/pipeline_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..aa6da17edfe7570322b3f5c870841f20ce335332 --- /dev/null +++ b/diffusers3/pipelines/pipeline_utils.py @@ -0,0 +1,1932 @@ +# coding=utf-8 +# Copyright 2024 The HuggingFace Inc. team. +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import fnmatch +import importlib +import inspect +import os +import re +import sys +from dataclasses import dataclass +from pathlib import Path +from typing import Any, Callable, Dict, List, Optional, Union, get_args, get_origin + +import numpy as np +import PIL.Image +import requests +import torch +from huggingface_hub import ( + ModelCard, + create_repo, + hf_hub_download, + model_info, + snapshot_download, +) +from huggingface_hub.utils import OfflineModeIsEnabled, validate_hf_hub_args +from packaging import version +from requests.exceptions import HTTPError +from tqdm.auto import tqdm + +from .. import __version__ +from ..configuration_utils import ConfigMixin +from ..models import AutoencoderKL +from ..models.attention_processor import FusedAttnProcessor2_0 +from ..models.modeling_utils import _LOW_CPU_MEM_USAGE_DEFAULT, ModelMixin +from ..schedulers.scheduling_utils import SCHEDULER_CONFIG_NAME +from ..utils import ( + CONFIG_NAME, + DEPRECATED_REVISION_ARGS, + BaseOutput, + PushToHubMixin, + deprecate, + is_accelerate_available, + is_accelerate_version, + is_torch_npu_available, + is_torch_version, + logging, + numpy_to_pil, +) +from ..utils.hub_utils import load_or_create_model_card, populate_model_card +from ..utils.torch_utils import is_compiled_module + + +if is_torch_npu_available(): + import torch_npu # noqa: F401 + + +from .pipeline_loading_utils import ( + ALL_IMPORTABLE_CLASSES, + CONNECTED_PIPES_KEYS, + CUSTOM_PIPELINE_FILE_NAME, + LOADABLE_CLASSES, + _fetch_class_library_tuple, + _get_custom_pipeline_class, + _get_final_device_map, + _get_pipeline_class, + _identify_model_variants, + _maybe_raise_warning_for_inpainting, + _resolve_custom_pipeline_and_cls, + _unwrap_model, + _update_init_kwargs_with_connected_pipeline, + is_safetensors_compatible, + load_sub_model, + maybe_raise_or_warn, + variant_compatible_siblings, + warn_deprecated_model_variant, +) + + +if is_accelerate_available(): + import accelerate + + +LIBRARIES = [] +for library in LOADABLE_CLASSES: + LIBRARIES.append(library) + +SUPPORTED_DEVICE_MAP = ["balanced"] + +logger = logging.get_logger(__name__) + + +@dataclass +class ImagePipelineOutput(BaseOutput): + """ + Output class for image pipelines. + + Args: + images (`List[PIL.Image.Image]` or `np.ndarray`) + List of denoised PIL images of length `batch_size` or NumPy array of shape `(batch_size, height, width, + num_channels)`. + """ + + images: Union[List[PIL.Image.Image], np.ndarray] + + +@dataclass +class AudioPipelineOutput(BaseOutput): + """ + Output class for audio pipelines. + + Args: + audios (`np.ndarray`) + List of denoised audio samples of a NumPy array of shape `(batch_size, num_channels, sample_rate)`. + """ + + audios: np.ndarray + + +class DiffusionPipeline(ConfigMixin, PushToHubMixin): + r""" + Base class for all pipelines. + + [`DiffusionPipeline`] stores all components (models, schedulers, and processors) for diffusion pipelines and + provides methods for loading, downloading and saving models. It also includes methods to: + + - move all PyTorch modules to the device of your choice + - enable/disable the progress bar for the denoising iteration + + Class attributes: + + - **config_name** (`str`) -- The configuration filename that stores the class and module names of all the + diffusion pipeline's components. + - **_optional_components** (`List[str]`) -- List of all optional components that don't have to be passed to the + pipeline to function (should be overridden by subclasses). + """ + + config_name = "model_index.json" + model_cpu_offload_seq = None + hf_device_map = None + _optional_components = [] + _exclude_from_cpu_offload = [] + _load_connected_pipes = False + _is_onnx = False + + def register_modules(self, **kwargs): + for name, module in kwargs.items(): + # retrieve library + if module is None or isinstance(module, (tuple, list)) and module[0] is None: + register_dict = {name: (None, None)} + else: + library, class_name = _fetch_class_library_tuple(module) + register_dict = {name: (library, class_name)} + + # save model index config + self.register_to_config(**register_dict) + + # set models + setattr(self, name, module) + + def __setattr__(self, name: str, value: Any): + if name in self.__dict__ and hasattr(self.config, name): + # We need to overwrite the config if name exists in config + if isinstance(getattr(self.config, name), (tuple, list)): + if value is not None and self.config[name][0] is not None: + class_library_tuple = _fetch_class_library_tuple(value) + else: + class_library_tuple = (None, None) + + self.register_to_config(**{name: class_library_tuple}) + else: + self.register_to_config(**{name: value}) + + super().__setattr__(name, value) + + def save_pretrained( + self, + save_directory: Union[str, os.PathLike], + safe_serialization: bool = True, + variant: Optional[str] = None, + push_to_hub: bool = False, + **kwargs, + ): + """ + Save all saveable variables of the pipeline to a directory. A pipeline variable can be saved and loaded if its + class implements both a save and loading method. The pipeline is easily reloaded using the + [`~DiffusionPipeline.from_pretrained`] class method. + + Arguments: + save_directory (`str` or `os.PathLike`): + Directory to save a pipeline to. Will be created if it doesn't exist. + safe_serialization (`bool`, *optional*, defaults to `True`): + Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`. + variant (`str`, *optional*): + If specified, weights are saved in the format `pytorch_model..bin`. + push_to_hub (`bool`, *optional*, defaults to `False`): + Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the + repository you want to push to with `repo_id` (will default to the name of `save_directory` in your + namespace). + kwargs (`Dict[str, Any]`, *optional*): + Additional keyword arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method. + """ + model_index_dict = dict(self.config) + model_index_dict.pop("_class_name", None) + model_index_dict.pop("_diffusers_version", None) + model_index_dict.pop("_module", None) + model_index_dict.pop("_name_or_path", None) + + if push_to_hub: + commit_message = kwargs.pop("commit_message", None) + private = kwargs.pop("private", False) + create_pr = kwargs.pop("create_pr", False) + token = kwargs.pop("token", None) + repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1]) + repo_id = create_repo(repo_id, exist_ok=True, private=private, token=token).repo_id + + expected_modules, optional_kwargs = self._get_signature_keys(self) + + def is_saveable_module(name, value): + if name not in expected_modules: + return False + if name in self._optional_components and value[0] is None: + return False + return True + + model_index_dict = {k: v for k, v in model_index_dict.items() if is_saveable_module(k, v)} + for pipeline_component_name in model_index_dict.keys(): + sub_model = getattr(self, pipeline_component_name) + model_cls = sub_model.__class__ + + # Dynamo wraps the original model in a private class. + # I didn't find a public API to get the original class. + if is_compiled_module(sub_model): + sub_model = _unwrap_model(sub_model) + model_cls = sub_model.__class__ + + save_method_name = None + # search for the model's base class in LOADABLE_CLASSES + for library_name, library_classes in LOADABLE_CLASSES.items(): + if library_name in sys.modules: + library = importlib.import_module(library_name) + else: + logger.info( + f"{library_name} is not installed. Cannot save {pipeline_component_name} as {library_classes} from {library_name}" + ) + + for base_class, save_load_methods in library_classes.items(): + class_candidate = getattr(library, base_class, None) + if class_candidate is not None and issubclass(model_cls, class_candidate): + # if we found a suitable base class in LOADABLE_CLASSES then grab its save method + save_method_name = save_load_methods[0] + break + if save_method_name is not None: + break + + if save_method_name is None: + logger.warning( + f"self.{pipeline_component_name}={sub_model} of type {type(sub_model)} cannot be saved." + ) + # make sure that unsaveable components are not tried to be loaded afterward + self.register_to_config(**{pipeline_component_name: (None, None)}) + continue + + save_method = getattr(sub_model, save_method_name) + + # Call the save method with the argument safe_serialization only if it's supported + save_method_signature = inspect.signature(save_method) + save_method_accept_safe = "safe_serialization" in save_method_signature.parameters + save_method_accept_variant = "variant" in save_method_signature.parameters + + save_kwargs = {} + if save_method_accept_safe: + save_kwargs["safe_serialization"] = safe_serialization + if save_method_accept_variant: + save_kwargs["variant"] = variant + + save_method(os.path.join(save_directory, pipeline_component_name), **save_kwargs) + + # finally save the config + self.save_config(save_directory) + + if push_to_hub: + # Create a new empty model card and eventually tag it + model_card = load_or_create_model_card(repo_id, token=token, is_pipeline=True) + model_card = populate_model_card(model_card) + model_card.save(os.path.join(save_directory, "README.md")) + + self._upload_folder( + save_directory, + repo_id, + token=token, + commit_message=commit_message, + create_pr=create_pr, + ) + + def to(self, *args, **kwargs): + r""" + Performs Pipeline dtype and/or device conversion. A torch.dtype and torch.device are inferred from the + arguments of `self.to(*args, **kwargs).` + + + + If the pipeline already has the correct torch.dtype and torch.device, then it is returned as is. Otherwise, + the returned pipeline is a copy of self with the desired torch.dtype and torch.device. + + + + + Here are the ways to call `to`: + + - `to(dtype, silence_dtype_warnings=False) โ†’ DiffusionPipeline` to return a pipeline with the specified + [`dtype`](https://pytorch.org/docs/stable/tensor_attributes.html#torch.dtype) + - `to(device, silence_dtype_warnings=False) โ†’ DiffusionPipeline` to return a pipeline with the specified + [`device`](https://pytorch.org/docs/stable/tensor_attributes.html#torch.device) + - `to(device=None, dtype=None, silence_dtype_warnings=False) โ†’ DiffusionPipeline` to return a pipeline with the + specified [`device`](https://pytorch.org/docs/stable/tensor_attributes.html#torch.device) and + [`dtype`](https://pytorch.org/docs/stable/tensor_attributes.html#torch.dtype) + + Arguments: + dtype (`torch.dtype`, *optional*): + Returns a pipeline with the specified + [`dtype`](https://pytorch.org/docs/stable/tensor_attributes.html#torch.dtype) + device (`torch.Device`, *optional*): + Returns a pipeline with the specified + [`device`](https://pytorch.org/docs/stable/tensor_attributes.html#torch.device) + silence_dtype_warnings (`str`, *optional*, defaults to `False`): + Whether to omit warnings if the target `dtype` is not compatible with the target `device`. + + Returns: + [`DiffusionPipeline`]: The pipeline converted to specified `dtype` and/or `dtype`. + """ + dtype = kwargs.pop("dtype", None) + device = kwargs.pop("device", None) + silence_dtype_warnings = kwargs.pop("silence_dtype_warnings", False) + + dtype_arg = None + device_arg = None + if len(args) == 1: + if isinstance(args[0], torch.dtype): + dtype_arg = args[0] + else: + device_arg = torch.device(args[0]) if args[0] is not None else None + elif len(args) == 2: + if isinstance(args[0], torch.dtype): + raise ValueError( + "When passing two arguments, make sure the first corresponds to `device` and the second to `dtype`." + ) + device_arg = torch.device(args[0]) if args[0] is not None else None + dtype_arg = args[1] + elif len(args) > 2: + raise ValueError("Please make sure to pass at most two arguments (`device` and `dtype`) `.to(...)`") + + if dtype is not None and dtype_arg is not None: + raise ValueError( + "You have passed `dtype` both as an argument and as a keyword argument. Please only pass one of the two." + ) + + dtype = dtype or dtype_arg + + if device is not None and device_arg is not None: + raise ValueError( + "You have passed `device` both as an argument and as a keyword argument. Please only pass one of the two." + ) + + device = device or device_arg + + # throw warning if pipeline is in "offloaded"-mode but user tries to manually set to GPU. + def module_is_sequentially_offloaded(module): + if not is_accelerate_available() or is_accelerate_version("<", "0.14.0"): + return False + + return hasattr(module, "_hf_hook") and ( + isinstance(module._hf_hook, accelerate.hooks.AlignDevicesHook) + or hasattr(module._hf_hook, "hooks") + and isinstance(module._hf_hook.hooks[0], accelerate.hooks.AlignDevicesHook) + ) + + def module_is_offloaded(module): + if not is_accelerate_available() or is_accelerate_version("<", "0.17.0.dev0"): + return False + + return hasattr(module, "_hf_hook") and isinstance(module._hf_hook, accelerate.hooks.CpuOffload) + + # .to("cuda") would raise an error if the pipeline is sequentially offloaded, so we raise our own to make it clearer + pipeline_is_sequentially_offloaded = any( + module_is_sequentially_offloaded(module) for _, module in self.components.items() + ) + if pipeline_is_sequentially_offloaded and device and torch.device(device).type == "cuda": + raise ValueError( + "It seems like you have activated sequential model offloading by calling `enable_sequential_cpu_offload`, but are now attempting to move the pipeline to GPU. This is not compatible with offloading. Please, move your pipeline `.to('cpu')` or consider removing the move altogether if you use sequential offloading." + ) + + is_pipeline_device_mapped = self.hf_device_map is not None and len(self.hf_device_map) > 1 + if is_pipeline_device_mapped: + raise ValueError( + "It seems like you have activated a device mapping strategy on the pipeline which doesn't allow explicit device placement using `to()`. You can call `reset_device_map()` first and then call `to()`." + ) + + # Display a warning in this case (the operation succeeds but the benefits are lost) + pipeline_is_offloaded = any(module_is_offloaded(module) for _, module in self.components.items()) + if pipeline_is_offloaded and device and torch.device(device).type == "cuda": + logger.warning( + f"It seems like you have activated model offloading by calling `enable_model_cpu_offload`, but are now manually moving the pipeline to GPU. It is strongly recommended against doing so as memory gains from offloading are likely to be lost. Offloading automatically takes care of moving the individual components {', '.join(self.components.keys())} to GPU when needed. To make sure offloading works as expected, you should consider moving the pipeline back to CPU: `pipeline.to('cpu')` or removing the move altogether if you use offloading." + ) + + module_names, _ = self._get_signature_keys(self) + modules = [getattr(self, n, None) for n in module_names] + modules = [m for m in modules if isinstance(m, torch.nn.Module)] + + is_offloaded = pipeline_is_offloaded or pipeline_is_sequentially_offloaded + for module in modules: + is_loaded_in_8bit = hasattr(module, "is_loaded_in_8bit") and module.is_loaded_in_8bit + + if is_loaded_in_8bit and dtype is not None: + logger.warning( + f"The module '{module.__class__.__name__}' has been loaded in 8bit and conversion to {dtype} is not yet supported. Module is still in 8bit precision." + ) + + if is_loaded_in_8bit and device is not None: + logger.warning( + f"The module '{module.__class__.__name__}' has been loaded in 8bit and moving it to {dtype} via `.to()` is not yet supported. Module is still on {module.device}." + ) + else: + module.to(device, dtype) + + if ( + module.dtype == torch.float16 + and str(device) in ["cpu"] + and not silence_dtype_warnings + and not is_offloaded + ): + logger.warning( + "Pipelines loaded with `dtype=torch.float16` cannot run with `cpu` device. It" + " is not recommended to move them to `cpu` as running them will fail. Please make" + " sure to use an accelerator to run the pipeline in inference, due to the lack of" + " support for`float16` operations on this device in PyTorch. Please, remove the" + " `torch_dtype=torch.float16` argument, or use another device for inference." + ) + return self + + @property + def device(self) -> torch.device: + r""" + Returns: + `torch.device`: The torch device on which the pipeline is located. + """ + module_names, _ = self._get_signature_keys(self) + modules = [getattr(self, n, None) for n in module_names] + modules = [m for m in modules if isinstance(m, torch.nn.Module)] + + for module in modules: + return module.device + + return torch.device("cpu") + + @property + def dtype(self) -> torch.dtype: + r""" + Returns: + `torch.dtype`: The torch dtype on which the pipeline is located. + """ + module_names, _ = self._get_signature_keys(self) + modules = [getattr(self, n, None) for n in module_names] + modules = [m for m in modules if isinstance(m, torch.nn.Module)] + + for module in modules: + return module.dtype + + return torch.float32 + + @classmethod + @validate_hf_hub_args + def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], **kwargs): + r""" + Instantiate a PyTorch diffusion pipeline from pretrained pipeline weights. + + The pipeline is set in evaluation mode (`model.eval()`) by default. + + If you get the error message below, you need to finetune the weights for your downstream task: + + ``` + Some weights of UNet2DConditionModel were not initialized from the model checkpoint at runwayml/stable-diffusion-v1-5 and are newly initialized because the shapes did not match: + - conv_in.weight: found shape torch.Size([320, 4, 3, 3]) in the checkpoint and torch.Size([320, 9, 3, 3]) in the model instantiated + You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. + ``` + + Parameters: + pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*): + Can be either: + + - A string, the *repo id* (for example `CompVis/ldm-text2im-large-256`) of a pretrained pipeline + hosted on the Hub. + - A path to a *directory* (for example `./my_pipeline_directory/`) containing pipeline weights + saved using + [`~DiffusionPipeline.save_pretrained`]. + torch_dtype (`str` or `torch.dtype`, *optional*): + Override the default `torch.dtype` and load the model with another dtype. If "auto" is passed, the + dtype is automatically derived from the model's weights. + custom_pipeline (`str`, *optional*): + + + + ๐Ÿงช This is an experimental feature and may change in the future. + + + + Can be either: + + - A string, the *repo id* (for example `hf-internal-testing/diffusers-dummy-pipeline`) of a custom + pipeline hosted on the Hub. The repository must contain a file called pipeline.py that defines + the custom pipeline. + - A string, the *file name* of a community pipeline hosted on GitHub under + [Community](https://github.com/huggingface/diffusers/tree/main/examples/community). Valid file + names must match the file name and not the pipeline script (`clip_guided_stable_diffusion` + instead of `clip_guided_stable_diffusion.py`). Community pipelines are always loaded from the + current main branch of GitHub. + - A path to a directory (`./my_pipeline_directory/`) containing a custom pipeline. The directory + must contain a file called `pipeline.py` that defines the custom pipeline. + + For more information on how to load and create custom pipelines, please have a look at [Loading and + Adding Custom + Pipelines](https://huggingface.co/docs/diffusers/using-diffusers/custom_pipeline_overview) + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force the (re-)download of the model weights and configuration files, overriding the + cached versions if they exist. + cache_dir (`Union[str, os.PathLike]`, *optional*): + Path to a directory where a downloaded pretrained model configuration is cached if the standard cache + is not used. + + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. + output_loading_info(`bool`, *optional*, defaults to `False`): + Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages. + local_files_only (`bool`, *optional*, defaults to `False`): + Whether to only load local model weights and configuration files or not. If set to `True`, the model + won't be downloaded from the Hub. + token (`str` or *bool*, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from + `diffusers-cli login` (stored in `~/.huggingface`) is used. + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier + allowed by Git. + custom_revision (`str`, *optional*): + The specific model version to use. It can be a branch name, a tag name, or a commit id similar to + `revision` when loading a custom pipeline from the Hub. Defaults to the latest stable ๐Ÿค— Diffusers + version. + mirror (`str`, *optional*): + Mirror source to resolve accessibility issues if youโ€™re downloading a model in China. We do not + guarantee the timeliness or safety of the source, and you should refer to the mirror site for more + information. + device_map (`str` or `Dict[str, Union[int, str, torch.device]]`, *optional*): + A map that specifies where each submodule should go. It doesnโ€™t need to be defined for each + parameter/buffer name; once a given module name is inside, every submodule of it will be sent to the + same device. + + Set `device_map="auto"` to have ๐Ÿค— Accelerate automatically compute the most optimized `device_map`. For + more information about each option see [designing a device + map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map). + max_memory (`Dict`, *optional*): + A dictionary device identifier for the maximum memory. Will default to the maximum memory available for + each GPU and the available CPU RAM if unset. + offload_folder (`str` or `os.PathLike`, *optional*): + The path to offload weights if device_map contains the value `"disk"`. + offload_state_dict (`bool`, *optional*): + If `True`, temporarily offloads the CPU state dict to the hard drive to avoid running out of CPU RAM if + the weight of the CPU state dict + the biggest shard of the checkpoint does not fit. Defaults to `True` + when there is some disk offload. + low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`): + Speed up model loading only loading the pretrained weights and not initializing the weights. This also + tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model. + Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this + argument to `True` will raise an error. + use_safetensors (`bool`, *optional*, defaults to `None`): + If set to `None`, the safetensors weights are downloaded if they're available **and** if the + safetensors library is installed. If set to `True`, the model is forcibly loaded from safetensors + weights. If set to `False`, safetensors weights are not loaded. + use_onnx (`bool`, *optional*, defaults to `None`): + If set to `True`, ONNX weights will always be downloaded if present. If set to `False`, ONNX weights + will never be downloaded. By default `use_onnx` defaults to the `_is_onnx` class attribute which is + `False` for non-ONNX pipelines and `True` for ONNX pipelines. ONNX weights include both files ending + with `.onnx` and `.pb`. + kwargs (remaining dictionary of keyword arguments, *optional*): + Can be used to overwrite load and saveable variables (the pipeline components of the specific pipeline + class). The overwritten components are passed directly to the pipelines `__init__` method. See example + below for more information. + variant (`str`, *optional*): + Load weights from a specified variant filename such as `"fp16"` or `"ema"`. This is ignored when + loading `from_flax`. + + + + To use private or [gated](https://huggingface.co/docs/hub/models-gated#gated-models) models, log-in with + `huggingface-cli login`. + + + + Examples: + + ```py + >>> from diffusers import DiffusionPipeline + + >>> # Download pipeline from huggingface.co and cache. + >>> pipeline = DiffusionPipeline.from_pretrained("CompVis/ldm-text2im-large-256") + + >>> # Download pipeline that requires an authorization token + >>> # For more information on access tokens, please refer to this section + >>> # of the documentation](https://huggingface.co/docs/hub/security-tokens) + >>> pipeline = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") + + >>> # Use a different scheduler + >>> from diffusers import LMSDiscreteScheduler + + >>> scheduler = LMSDiscreteScheduler.from_config(pipeline.scheduler.config) + >>> pipeline.scheduler = scheduler + ``` + """ + # Copy the kwargs to re-use during loading connected pipeline. + kwargs_copied = kwargs.copy() + + cache_dir = kwargs.pop("cache_dir", None) + force_download = kwargs.pop("force_download", False) + proxies = kwargs.pop("proxies", None) + local_files_only = kwargs.pop("local_files_only", None) + token = kwargs.pop("token", None) + revision = kwargs.pop("revision", None) + from_flax = kwargs.pop("from_flax", False) + torch_dtype = kwargs.pop("torch_dtype", None) + custom_pipeline = kwargs.pop("custom_pipeline", None) + custom_revision = kwargs.pop("custom_revision", None) + provider = kwargs.pop("provider", None) + sess_options = kwargs.pop("sess_options", None) + device_map = kwargs.pop("device_map", None) + max_memory = kwargs.pop("max_memory", None) + offload_folder = kwargs.pop("offload_folder", None) + offload_state_dict = kwargs.pop("offload_state_dict", False) + low_cpu_mem_usage = kwargs.pop("low_cpu_mem_usage", _LOW_CPU_MEM_USAGE_DEFAULT) + variant = kwargs.pop("variant", None) + use_safetensors = kwargs.pop("use_safetensors", None) + use_onnx = kwargs.pop("use_onnx", None) + load_connected_pipeline = kwargs.pop("load_connected_pipeline", False) + + if low_cpu_mem_usage and not is_accelerate_available(): + low_cpu_mem_usage = False + logger.warning( + "Cannot initialize model with low cpu memory usage because `accelerate` was not found in the" + " environment. Defaulting to `low_cpu_mem_usage=False`. It is strongly recommended to install" + " `accelerate` for faster and less memory-intense model loading. You can do so with: \n```\npip" + " install accelerate\n```\n." + ) + + if low_cpu_mem_usage is True and not is_torch_version(">=", "1.9.0"): + raise NotImplementedError( + "Low memory initialization requires torch >= 1.9.0. Please either update your PyTorch version or set" + " `low_cpu_mem_usage=False`." + ) + + if device_map is not None and not is_torch_version(">=", "1.9.0"): + raise NotImplementedError( + "Loading and dispatching requires torch >= 1.9.0. Please either update your PyTorch version or set" + " `device_map=None`." + ) + + if device_map is not None and not is_accelerate_available(): + raise NotImplementedError( + "Using `device_map` requires the `accelerate` library. Please install it using: `pip install accelerate`." + ) + + if device_map is not None and not isinstance(device_map, str): + raise ValueError("`device_map` must be a string.") + + if device_map is not None and device_map not in SUPPORTED_DEVICE_MAP: + raise NotImplementedError( + f"{device_map} not supported. Supported strategies are: {', '.join(SUPPORTED_DEVICE_MAP)}" + ) + + if device_map is not None and device_map in SUPPORTED_DEVICE_MAP: + if is_accelerate_version("<", "0.28.0"): + raise NotImplementedError("Device placement requires `accelerate` version `0.28.0` or later.") + + if low_cpu_mem_usage is False and device_map is not None: + raise ValueError( + f"You cannot set `low_cpu_mem_usage` to False while using device_map={device_map} for loading and" + " dispatching. Please make sure to set `low_cpu_mem_usage=True`." + ) + + # 1. Download the checkpoints and configs + # use snapshot download here to get it working from from_pretrained + if not os.path.isdir(pretrained_model_name_or_path): + if pretrained_model_name_or_path.count("/") > 1: + raise ValueError( + f'The provided pretrained_model_name_or_path "{pretrained_model_name_or_path}"' + " is neither a valid local path nor a valid repo id. Please check the parameter." + ) + cached_folder = cls.download( + pretrained_model_name_or_path, + cache_dir=cache_dir, + force_download=force_download, + proxies=proxies, + local_files_only=local_files_only, + token=token, + revision=revision, + from_flax=from_flax, + use_safetensors=use_safetensors, + use_onnx=use_onnx, + custom_pipeline=custom_pipeline, + custom_revision=custom_revision, + variant=variant, + load_connected_pipeline=load_connected_pipeline, + **kwargs, + ) + else: + cached_folder = pretrained_model_name_or_path + + config_dict = cls.load_config(cached_folder) + + # pop out "_ignore_files" as it is only needed for download + config_dict.pop("_ignore_files", None) + + # 2. Define which model components should load variants + # We retrieve the information by matching whether variant model checkpoints exist in the subfolders. + # Example: `diffusion_pytorch_model.safetensors` -> `diffusion_pytorch_model.fp16.safetensors` + # with variant being `"fp16"`. + model_variants = _identify_model_variants(folder=cached_folder, variant=variant, config=config_dict) + + # 3. Load the pipeline class, if using custom module then load it from the hub + # if we load from explicit class, let's use it + custom_pipeline, custom_class_name = _resolve_custom_pipeline_and_cls( + folder=cached_folder, config=config_dict, custom_pipeline=custom_pipeline + ) + pipeline_class = _get_pipeline_class( + cls, + config=config_dict, + load_connected_pipeline=load_connected_pipeline, + custom_pipeline=custom_pipeline, + class_name=custom_class_name, + cache_dir=cache_dir, + revision=custom_revision, + ) + + if device_map is not None and pipeline_class._load_connected_pipes: + raise NotImplementedError("`device_map` is not yet supported for connected pipelines.") + + # DEPRECATED: To be removed in 1.0.0 + # we are deprecating the `StableDiffusionInpaintPipelineLegacy` pipeline which gets loaded + # when a user requests for a `StableDiffusionInpaintPipeline` with `diffusers` version being <= 0.5.1. + _maybe_raise_warning_for_inpainting( + pipeline_class=pipeline_class, + pretrained_model_name_or_path=pretrained_model_name_or_path, + config=config_dict, + ) + + # 4. Define expected modules given pipeline signature + # and define non-None initialized modules (=`init_kwargs`) + + # some modules can be passed directly to the init + # in this case they are already instantiated in `kwargs` + # extract them here + expected_modules, optional_kwargs = cls._get_signature_keys(pipeline_class) + passed_class_obj = {k: kwargs.pop(k) for k in expected_modules if k in kwargs} + passed_pipe_kwargs = {k: kwargs.pop(k) for k in optional_kwargs if k in kwargs} + init_dict, unused_kwargs, _ = pipeline_class.extract_init_dict(config_dict, **kwargs) + + # define init kwargs and make sure that optional component modules are filtered out + init_kwargs = { + k: init_dict.pop(k) + for k in optional_kwargs + if k in init_dict and k not in pipeline_class._optional_components + } + init_kwargs = {**init_kwargs, **passed_pipe_kwargs} + + # remove `null` components + def load_module(name, value): + if value[0] is None: + return False + if name in passed_class_obj and passed_class_obj[name] is None: + return False + return True + + init_dict = {k: v for k, v in init_dict.items() if load_module(k, v)} + + # Special case: safety_checker must be loaded separately when using `from_flax` + if from_flax and "safety_checker" in init_dict and "safety_checker" not in passed_class_obj: + raise NotImplementedError( + "The safety checker cannot be automatically loaded when loading weights `from_flax`." + " Please, pass `safety_checker=None` to `from_pretrained`, and load the safety checker" + " separately if you need it." + ) + + # 5. Throw nice warnings / errors for fast accelerate loading + if len(unused_kwargs) > 0: + logger.warning( + f"Keyword arguments {unused_kwargs} are not expected by {pipeline_class.__name__} and will be ignored." + ) + + # import it here to avoid circular import + from diffusers import pipelines + + # 6. device map delegation + final_device_map = None + if device_map is not None: + final_device_map = _get_final_device_map( + device_map=device_map, + pipeline_class=pipeline_class, + passed_class_obj=passed_class_obj, + init_dict=init_dict, + library=library, + max_memory=max_memory, + torch_dtype=torch_dtype, + cached_folder=cached_folder, + force_download=force_download, + proxies=proxies, + local_files_only=local_files_only, + token=token, + revision=revision, + ) + + # 7. Load each module in the pipeline + current_device_map = None + for name, (library_name, class_name) in logging.tqdm(init_dict.items(), desc="Loading pipeline components..."): + # 7.1 device_map shenanigans + if final_device_map is not None and len(final_device_map) > 0: + component_device = final_device_map.get(name, None) + if component_device is not None: + current_device_map = {"": component_device} + else: + current_device_map = None + + # 7.2 - now that JAX/Flax is an official framework of the library, we might load from Flax names + class_name = class_name[4:] if class_name.startswith("Flax") else class_name + + # 7.3 Define all importable classes + is_pipeline_module = hasattr(pipelines, library_name) + importable_classes = ALL_IMPORTABLE_CLASSES + loaded_sub_model = None + + # 7.4 Use passed sub model or load class_name from library_name + if name in passed_class_obj: + # if the model is in a pipeline module, then we load it from the pipeline + # check that passed_class_obj has correct parent class + maybe_raise_or_warn( + library_name, library, class_name, importable_classes, passed_class_obj, name, is_pipeline_module + ) + + loaded_sub_model = passed_class_obj[name] + else: + # load sub model + loaded_sub_model = load_sub_model( + library_name=library_name, + class_name=class_name, + importable_classes=importable_classes, + pipelines=pipelines, + is_pipeline_module=is_pipeline_module, + pipeline_class=pipeline_class, + torch_dtype=torch_dtype, + provider=provider, + sess_options=sess_options, + device_map=current_device_map, + max_memory=max_memory, + offload_folder=offload_folder, + offload_state_dict=offload_state_dict, + model_variants=model_variants, + name=name, + from_flax=from_flax, + variant=variant, + low_cpu_mem_usage=low_cpu_mem_usage, + cached_folder=cached_folder, + ) + logger.info( + f"Loaded {name} as {class_name} from `{name}` subfolder of {pretrained_model_name_or_path}." + ) + + init_kwargs[name] = loaded_sub_model # UNet(...), # DiffusionSchedule(...) + + # 8. Handle connected pipelines. + if pipeline_class._load_connected_pipes and os.path.isfile(os.path.join(cached_folder, "README.md")): + init_kwargs = _update_init_kwargs_with_connected_pipeline( + init_kwargs=init_kwargs, + passed_pipe_kwargs=passed_pipe_kwargs, + passed_class_objs=passed_class_obj, + folder=cached_folder, + **kwargs_copied, + ) + + # 9. Potentially add passed objects if expected + missing_modules = set(expected_modules) - set(init_kwargs.keys()) + passed_modules = list(passed_class_obj.keys()) + optional_modules = pipeline_class._optional_components + if len(missing_modules) > 0 and missing_modules <= set(passed_modules + optional_modules): + for module in missing_modules: + init_kwargs[module] = passed_class_obj.get(module, None) + elif len(missing_modules) > 0: + passed_modules = set(list(init_kwargs.keys()) + list(passed_class_obj.keys())) - optional_kwargs + raise ValueError( + f"Pipeline {pipeline_class} expected {expected_modules}, but only {passed_modules} were passed." + ) + + # 10. Instantiate the pipeline + model = pipeline_class(**init_kwargs) + + # 11. Save where the model was instantiated from + model.register_to_config(_name_or_path=pretrained_model_name_or_path) + if device_map is not None: + setattr(model, "hf_device_map", final_device_map) + return model + + @property + def name_or_path(self) -> str: + return getattr(self.config, "_name_or_path", None) + + @property + def _execution_device(self): + r""" + Returns the device on which the pipeline's models will be executed. After calling + [`~DiffusionPipeline.enable_sequential_cpu_offload`] the execution device can only be inferred from + Accelerate's module hooks. + """ + for name, model in self.components.items(): + if not isinstance(model, torch.nn.Module) or name in self._exclude_from_cpu_offload: + continue + + if not hasattr(model, "_hf_hook"): + return self.device + for module in model.modules(): + if ( + hasattr(module, "_hf_hook") + and hasattr(module._hf_hook, "execution_device") + and module._hf_hook.execution_device is not None + ): + return torch.device(module._hf_hook.execution_device) + return self.device + + def remove_all_hooks(self): + r""" + Removes all hooks that were added when using `enable_sequential_cpu_offload` or `enable_model_cpu_offload`. + """ + for _, model in self.components.items(): + if isinstance(model, torch.nn.Module) and hasattr(model, "_hf_hook"): + accelerate.hooks.remove_hook_from_module(model, recurse=True) + self._all_hooks = [] + + def enable_model_cpu_offload(self, gpu_id: Optional[int] = None, device: Union[torch.device, str] = "cuda"): + r""" + Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared + to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward` + method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with + `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`. + + Arguments: + gpu_id (`int`, *optional*): + The ID of the accelerator that shall be used in inference. If not specified, it will default to 0. + device (`torch.Device` or `str`, *optional*, defaults to "cuda"): + The PyTorch device type of the accelerator that shall be used in inference. If not specified, it will + default to "cuda". + """ + is_pipeline_device_mapped = self.hf_device_map is not None and len(self.hf_device_map) > 1 + if is_pipeline_device_mapped: + raise ValueError( + "It seems like you have activated a device mapping strategy on the pipeline so calling `enable_model_cpu_offload() isn't allowed. You can call `reset_device_map()` first and then call `enable_model_cpu_offload()`." + ) + + if self.model_cpu_offload_seq is None: + raise ValueError( + "Model CPU offload cannot be enabled because no `model_cpu_offload_seq` class attribute is set." + ) + + if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"): + from accelerate import cpu_offload_with_hook + else: + raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.") + + self.remove_all_hooks() + + torch_device = torch.device(device) + device_index = torch_device.index + + if gpu_id is not None and device_index is not None: + raise ValueError( + f"You have passed both `gpu_id`={gpu_id} and an index as part of the passed device `device`={device}" + f"Cannot pass both. Please make sure to either not define `gpu_id` or not pass the index as part of the device: `device`={torch_device.type}" + ) + + # _offload_gpu_id should be set to passed gpu_id (or id in passed `device`) or default to previously set id or default to 0 + self._offload_gpu_id = gpu_id or torch_device.index or getattr(self, "_offload_gpu_id", 0) + + device_type = torch_device.type + device = torch.device(f"{device_type}:{self._offload_gpu_id}") + self._offload_device = device + + self.to("cpu", silence_dtype_warnings=True) + device_mod = getattr(torch, device.type, None) + if hasattr(device_mod, "empty_cache") and device_mod.is_available(): + device_mod.empty_cache() # otherwise we don't see the memory savings (but they probably exist) + + all_model_components = {k: v for k, v in self.components.items() if isinstance(v, torch.nn.Module)} + + self._all_hooks = [] + hook = None + for model_str in self.model_cpu_offload_seq.split("->"): + model = all_model_components.pop(model_str, None) + if not isinstance(model, torch.nn.Module): + continue + + _, hook = cpu_offload_with_hook(model, device, prev_module_hook=hook) + self._all_hooks.append(hook) + + # CPU offload models that are not in the seq chain unless they are explicitly excluded + # these models will stay on CPU until maybe_free_model_hooks is called + # some models cannot be in the seq chain because they are iteratively called, such as controlnet + for name, model in all_model_components.items(): + if not isinstance(model, torch.nn.Module): + continue + + if name in self._exclude_from_cpu_offload: + model.to(device) + else: + _, hook = cpu_offload_with_hook(model, device) + self._all_hooks.append(hook) + + def maybe_free_model_hooks(self): + r""" + Function that offloads all components, removes all model hooks that were added when using + `enable_model_cpu_offload` and then applies them again. In case the model has not been offloaded this function + is a no-op. Make sure to add this function to the end of the `__call__` function of your pipeline so that it + functions correctly when applying enable_model_cpu_offload. + """ + if not hasattr(self, "_all_hooks") or len(self._all_hooks) == 0: + # `enable_model_cpu_offload` has not be called, so silently do nothing + return + + # make sure the model is in the same state as before calling it + self.enable_model_cpu_offload(device=getattr(self, "_offload_device", "cuda")) + + def enable_sequential_cpu_offload(self, gpu_id: Optional[int] = None, device: Union[torch.device, str] = "cuda"): + r""" + Offloads all models to CPU using ๐Ÿค— Accelerate, significantly reducing memory usage. When called, the state + dicts of all `torch.nn.Module` components (except those in `self._exclude_from_cpu_offload`) are saved to CPU + and then moved to `torch.device('meta')` and loaded to GPU only when their specific submodule has its `forward` + method called. Offloading happens on a submodule basis. Memory savings are higher than with + `enable_model_cpu_offload`, but performance is lower. + + Arguments: + gpu_id (`int`, *optional*): + The ID of the accelerator that shall be used in inference. If not specified, it will default to 0. + device (`torch.Device` or `str`, *optional*, defaults to "cuda"): + The PyTorch device type of the accelerator that shall be used in inference. If not specified, it will + default to "cuda". + """ + if is_accelerate_available() and is_accelerate_version(">=", "0.14.0"): + from accelerate import cpu_offload + else: + raise ImportError("`enable_sequential_cpu_offload` requires `accelerate v0.14.0` or higher") + self.remove_all_hooks() + + is_pipeline_device_mapped = self.hf_device_map is not None and len(self.hf_device_map) > 1 + if is_pipeline_device_mapped: + raise ValueError( + "It seems like you have activated a device mapping strategy on the pipeline so calling `enable_sequential_cpu_offload() isn't allowed. You can call `reset_device_map()` first and then call `enable_sequential_cpu_offload()`." + ) + + torch_device = torch.device(device) + device_index = torch_device.index + + if gpu_id is not None and device_index is not None: + raise ValueError( + f"You have passed both `gpu_id`={gpu_id} and an index as part of the passed device `device`={device}" + f"Cannot pass both. Please make sure to either not define `gpu_id` or not pass the index as part of the device: `device`={torch_device.type}" + ) + + # _offload_gpu_id should be set to passed gpu_id (or id in passed `device`) or default to previously set id or default to 0 + self._offload_gpu_id = gpu_id or torch_device.index or getattr(self, "_offload_gpu_id", 0) + + device_type = torch_device.type + device = torch.device(f"{device_type}:{self._offload_gpu_id}") + self._offload_device = device + + if self.device.type != "cpu": + self.to("cpu", silence_dtype_warnings=True) + device_mod = getattr(torch, self.device.type, None) + if hasattr(device_mod, "empty_cache") and device_mod.is_available(): + device_mod.empty_cache() # otherwise we don't see the memory savings (but they probably exist) + + for name, model in self.components.items(): + if not isinstance(model, torch.nn.Module): + continue + + if name in self._exclude_from_cpu_offload: + model.to(device) + else: + # make sure to offload buffers if not all high level weights + # are of type nn.Module + offload_buffers = len(model._parameters) > 0 + cpu_offload(model, device, offload_buffers=offload_buffers) + + def reset_device_map(self): + r""" + Resets the device maps (if any) to None. + """ + if self.hf_device_map is None: + return + else: + self.remove_all_hooks() + for name, component in self.components.items(): + if isinstance(component, torch.nn.Module): + component.to("cpu") + self.hf_device_map = None + + @classmethod + @validate_hf_hub_args + def download(cls, pretrained_model_name, **kwargs) -> Union[str, os.PathLike]: + r""" + Download and cache a PyTorch diffusion pipeline from pretrained pipeline weights. + + Parameters: + pretrained_model_name (`str` or `os.PathLike`, *optional*): + A string, the *repository id* (for example `CompVis/ldm-text2im-large-256`) of a pretrained pipeline + hosted on the Hub. + custom_pipeline (`str`, *optional*): + Can be either: + + - A string, the *repository id* (for example `CompVis/ldm-text2im-large-256`) of a pretrained + pipeline hosted on the Hub. The repository must contain a file called `pipeline.py` that defines + the custom pipeline. + + - A string, the *file name* of a community pipeline hosted on GitHub under + [Community](https://github.com/huggingface/diffusers/tree/main/examples/community). Valid file + names must match the file name and not the pipeline script (`clip_guided_stable_diffusion` + instead of `clip_guided_stable_diffusion.py`). Community pipelines are always loaded from the + current `main` branch of GitHub. + + - A path to a *directory* (`./my_pipeline_directory/`) containing a custom pipeline. The directory + must contain a file called `pipeline.py` that defines the custom pipeline. + + + + ๐Ÿงช This is an experimental feature and may change in the future. + + + + For more information on how to load and create custom pipelines, take a look at [How to contribute a + community pipeline](https://huggingface.co/docs/diffusers/main/en/using-diffusers/contribute_pipeline). + + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force the (re-)download of the model weights and configuration files, overriding the + cached versions if they exist. + + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. + output_loading_info(`bool`, *optional*, defaults to `False`): + Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages. + local_files_only (`bool`, *optional*, defaults to `False`): + Whether to only load local model weights and configuration files or not. If set to `True`, the model + won't be downloaded from the Hub. + token (`str` or *bool*, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from + `diffusers-cli login` (stored in `~/.huggingface`) is used. + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier + allowed by Git. + custom_revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, or a commit id similar to + `revision` when loading a custom pipeline from the Hub. It can be a ๐Ÿค— Diffusers version when loading a + custom pipeline from GitHub, otherwise it defaults to `"main"` when loading from the Hub. + mirror (`str`, *optional*): + Mirror source to resolve accessibility issues if you're downloading a model in China. We do not + guarantee the timeliness or safety of the source, and you should refer to the mirror site for more + information. + variant (`str`, *optional*): + Load weights from a specified variant filename such as `"fp16"` or `"ema"`. This is ignored when + loading `from_flax`. + use_safetensors (`bool`, *optional*, defaults to `None`): + If set to `None`, the safetensors weights are downloaded if they're available **and** if the + safetensors library is installed. If set to `True`, the model is forcibly loaded from safetensors + weights. If set to `False`, safetensors weights are not loaded. + use_onnx (`bool`, *optional*, defaults to `False`): + If set to `True`, ONNX weights will always be downloaded if present. If set to `False`, ONNX weights + will never be downloaded. By default `use_onnx` defaults to the `_is_onnx` class attribute which is + `False` for non-ONNX pipelines and `True` for ONNX pipelines. ONNX weights include both files ending + with `.onnx` and `.pb`. + trust_remote_code (`bool`, *optional*, defaults to `False`): + Whether or not to allow for custom pipelines and components defined on the Hub in their own files. This + option should only be set to `True` for repositories you trust and in which you have read the code, as + it will execute code present on the Hub on your local machine. + + Returns: + `os.PathLike`: + A path to the downloaded pipeline. + + + + To use private or [gated models](https://huggingface.co/docs/hub/models-gated#gated-models), log-in with + `huggingface-cli login`. + + + + """ + cache_dir = kwargs.pop("cache_dir", None) + force_download = kwargs.pop("force_download", False) + proxies = kwargs.pop("proxies", None) + local_files_only = kwargs.pop("local_files_only", None) + token = kwargs.pop("token", None) + revision = kwargs.pop("revision", None) + from_flax = kwargs.pop("from_flax", False) + custom_pipeline = kwargs.pop("custom_pipeline", None) + custom_revision = kwargs.pop("custom_revision", None) + variant = kwargs.pop("variant", None) + use_safetensors = kwargs.pop("use_safetensors", None) + use_onnx = kwargs.pop("use_onnx", None) + load_connected_pipeline = kwargs.pop("load_connected_pipeline", False) + trust_remote_code = kwargs.pop("trust_remote_code", False) + + allow_pickle = False + if use_safetensors is None: + use_safetensors = True + allow_pickle = True + + allow_patterns = None + ignore_patterns = None + + model_info_call_error: Optional[Exception] = None + if not local_files_only: + try: + info = model_info(pretrained_model_name, token=token, revision=revision) + except (HTTPError, OfflineModeIsEnabled, requests.ConnectionError) as e: + logger.warning(f"Couldn't connect to the Hub: {e}.\nWill try to load from local cache.") + local_files_only = True + model_info_call_error = e # save error to reraise it if model is not cached locally + + if not local_files_only: + config_file = hf_hub_download( + pretrained_model_name, + cls.config_name, + cache_dir=cache_dir, + revision=revision, + proxies=proxies, + force_download=force_download, + token=token, + ) + + config_dict = cls._dict_from_json_file(config_file) + ignore_filenames = config_dict.pop("_ignore_files", []) + + # retrieve all folder_names that contain relevant files + folder_names = [k for k, v in config_dict.items() if isinstance(v, list) and k != "_class_name"] + + filenames = {sibling.rfilename for sibling in info.siblings} + model_filenames, variant_filenames = variant_compatible_siblings(filenames, variant=variant) + + diffusers_module = importlib.import_module(__name__.split(".")[0]) + pipelines = getattr(diffusers_module, "pipelines") + + # optionally create a custom component <> custom file mapping + custom_components = {} + for component in folder_names: + module_candidate = config_dict[component][0] + + if module_candidate is None or not isinstance(module_candidate, str): + continue + + # We compute candidate file path on the Hub. Do not use `os.path.join`. + candidate_file = f"{component}/{module_candidate}.py" + + if candidate_file in filenames: + custom_components[component] = module_candidate + elif module_candidate not in LOADABLE_CLASSES and not hasattr(pipelines, module_candidate): + raise ValueError( + f"{candidate_file} as defined in `model_index.json` does not exist in {pretrained_model_name} and is not a module in 'diffusers/pipelines'." + ) + + if len(variant_filenames) == 0 and variant is not None: + deprecation_message = ( + f"You are trying to load the model files of the `variant={variant}`, but no such modeling files are available." + f"The default model files: {model_filenames} will be loaded instead. Make sure to not load from `variant={variant}`" + "if such variant modeling files are not available. Doing so will lead to an error in v0.24.0 as defaulting to non-variant" + "modeling files is deprecated." + ) + deprecate("no variant default", "0.24.0", deprecation_message, standard_warn=False) + + # remove ignored filenames + model_filenames = set(model_filenames) - set(ignore_filenames) + variant_filenames = set(variant_filenames) - set(ignore_filenames) + + # if the whole pipeline is cached we don't have to ping the Hub + if revision in DEPRECATED_REVISION_ARGS and version.parse( + version.parse(__version__).base_version + ) >= version.parse("0.22.0"): + warn_deprecated_model_variant(pretrained_model_name, token, variant, revision, model_filenames) + + model_folder_names = {os.path.split(f)[0] for f in model_filenames if os.path.split(f)[0] in folder_names} + + custom_class_name = None + if custom_pipeline is None and isinstance(config_dict["_class_name"], (list, tuple)): + custom_pipeline = config_dict["_class_name"][0] + custom_class_name = config_dict["_class_name"][1] + + # all filenames compatible with variant will be added + allow_patterns = list(model_filenames) + + # allow all patterns from non-model folders + # this enables downloading schedulers, tokenizers, ... + allow_patterns += [f"{k}/*" for k in folder_names if k not in model_folder_names] + # add custom component files + allow_patterns += [f"{k}/{f}.py" for k, f in custom_components.items()] + # add custom pipeline file + allow_patterns += [f"{custom_pipeline}.py"] if f"{custom_pipeline}.py" in filenames else [] + # also allow downloading config.json files with the model + allow_patterns += [os.path.join(k, "config.json") for k in model_folder_names] + + allow_patterns += [ + SCHEDULER_CONFIG_NAME, + CONFIG_NAME, + cls.config_name, + CUSTOM_PIPELINE_FILE_NAME, + ] + + load_pipe_from_hub = custom_pipeline is not None and f"{custom_pipeline}.py" in filenames + load_components_from_hub = len(custom_components) > 0 + + if load_pipe_from_hub and not trust_remote_code: + raise ValueError( + f"The repository for {pretrained_model_name} contains custom code in {custom_pipeline}.py which must be executed to correctly " + f"load the model. You can inspect the repository content at https://hf.co/{pretrained_model_name}/blob/main/{custom_pipeline}.py.\n" + f"Please pass the argument `trust_remote_code=True` to allow custom code to be run." + ) + + if load_components_from_hub and not trust_remote_code: + raise ValueError( + f"The repository for {pretrained_model_name} contains custom code in {'.py, '.join([os.path.join(k, v) for k,v in custom_components.items()])} which must be executed to correctly " + f"load the model. You can inspect the repository content at {', '.join([f'https://hf.co/{pretrained_model_name}/{k}/{v}.py' for k,v in custom_components.items()])}.\n" + f"Please pass the argument `trust_remote_code=True` to allow custom code to be run." + ) + + # retrieve passed components that should not be downloaded + pipeline_class = _get_pipeline_class( + cls, + config_dict, + load_connected_pipeline=load_connected_pipeline, + custom_pipeline=custom_pipeline, + repo_id=pretrained_model_name if load_pipe_from_hub else None, + hub_revision=revision, + class_name=custom_class_name, + cache_dir=cache_dir, + revision=custom_revision, + ) + expected_components, _ = cls._get_signature_keys(pipeline_class) + passed_components = [k for k in expected_components if k in kwargs] + + if ( + use_safetensors + and not allow_pickle + and not is_safetensors_compatible( + model_filenames, passed_components=passed_components, folder_names=model_folder_names + ) + ): + raise EnvironmentError( + f"Could not find the necessary `safetensors` weights in {model_filenames} (variant={variant})" + ) + if from_flax: + ignore_patterns = ["*.bin", "*.safetensors", "*.onnx", "*.pb"] + elif use_safetensors and is_safetensors_compatible( + model_filenames, passed_components=passed_components, folder_names=model_folder_names + ): + ignore_patterns = ["*.bin", "*.msgpack"] + + use_onnx = use_onnx if use_onnx is not None else pipeline_class._is_onnx + if not use_onnx: + ignore_patterns += ["*.onnx", "*.pb"] + + safetensors_variant_filenames = {f for f in variant_filenames if f.endswith(".safetensors")} + safetensors_model_filenames = {f for f in model_filenames if f.endswith(".safetensors")} + if ( + len(safetensors_variant_filenames) > 0 + and safetensors_model_filenames != safetensors_variant_filenames + ): + logger.warning( + f"\nA mixture of {variant} and non-{variant} filenames will be loaded.\nLoaded {variant} filenames:\n[{', '.join(safetensors_variant_filenames)}]\nLoaded non-{variant} filenames:\n[{', '.join(safetensors_model_filenames - safetensors_variant_filenames)}\nIf this behavior is not expected, please check your folder structure." + ) + else: + ignore_patterns = ["*.safetensors", "*.msgpack"] + + use_onnx = use_onnx if use_onnx is not None else pipeline_class._is_onnx + if not use_onnx: + ignore_patterns += ["*.onnx", "*.pb"] + + bin_variant_filenames = {f for f in variant_filenames if f.endswith(".bin")} + bin_model_filenames = {f for f in model_filenames if f.endswith(".bin")} + if len(bin_variant_filenames) > 0 and bin_model_filenames != bin_variant_filenames: + logger.warning( + f"\nA mixture of {variant} and non-{variant} filenames will be loaded.\nLoaded {variant} filenames:\n[{', '.join(bin_variant_filenames)}]\nLoaded non-{variant} filenames:\n[{', '.join(bin_model_filenames - bin_variant_filenames)}\nIf this behavior is not expected, please check your folder structure." + ) + + # Don't download any objects that are passed + allow_patterns = [ + p for p in allow_patterns if not (len(p.split("/")) == 2 and p.split("/")[0] in passed_components) + ] + + if pipeline_class._load_connected_pipes: + allow_patterns.append("README.md") + + # Don't download index files of forbidden patterns either + ignore_patterns = ignore_patterns + [f"{i}.index.*json" for i in ignore_patterns] + re_ignore_pattern = [re.compile(fnmatch.translate(p)) for p in ignore_patterns] + re_allow_pattern = [re.compile(fnmatch.translate(p)) for p in allow_patterns] + + expected_files = [f for f in filenames if not any(p.match(f) for p in re_ignore_pattern)] + expected_files = [f for f in expected_files if any(p.match(f) for p in re_allow_pattern)] + + snapshot_folder = Path(config_file).parent + pipeline_is_cached = all((snapshot_folder / f).is_file() for f in expected_files) + + if pipeline_is_cached and not force_download: + # if the pipeline is cached, we can directly return it + # else call snapshot_download + return snapshot_folder + + user_agent = {"pipeline_class": cls.__name__} + if custom_pipeline is not None and not custom_pipeline.endswith(".py"): + user_agent["custom_pipeline"] = custom_pipeline + + # download all allow_patterns - ignore_patterns + try: + cached_folder = snapshot_download( + pretrained_model_name, + cache_dir=cache_dir, + proxies=proxies, + local_files_only=local_files_only, + token=token, + revision=revision, + allow_patterns=allow_patterns, + ignore_patterns=ignore_patterns, + user_agent=user_agent, + ) + + # retrieve pipeline class from local file + cls_name = cls.load_config(os.path.join(cached_folder, "model_index.json")).get("_class_name", None) + cls_name = cls_name[4:] if isinstance(cls_name, str) and cls_name.startswith("Flax") else cls_name + + diffusers_module = importlib.import_module(__name__.split(".")[0]) + pipeline_class = getattr(diffusers_module, cls_name, None) if isinstance(cls_name, str) else None + + if pipeline_class is not None and pipeline_class._load_connected_pipes: + modelcard = ModelCard.load(os.path.join(cached_folder, "README.md")) + connected_pipes = sum([getattr(modelcard.data, k, []) for k in CONNECTED_PIPES_KEYS], []) + for connected_pipe_repo_id in connected_pipes: + download_kwargs = { + "cache_dir": cache_dir, + "force_download": force_download, + "proxies": proxies, + "local_files_only": local_files_only, + "token": token, + "variant": variant, + "use_safetensors": use_safetensors, + } + DiffusionPipeline.download(connected_pipe_repo_id, **download_kwargs) + + return cached_folder + + except FileNotFoundError: + # Means we tried to load pipeline with `local_files_only=True` but the files have not been found in local cache. + # This can happen in two cases: + # 1. If the user passed `local_files_only=True` => we raise the error directly + # 2. If we forced `local_files_only=True` when `model_info` failed => we raise the initial error + if model_info_call_error is None: + # 1. user passed `local_files_only=True` + raise + else: + # 2. we forced `local_files_only=True` when `model_info` failed + raise EnvironmentError( + f"Cannot load model {pretrained_model_name}: model is not cached locally and an error occurred" + " while trying to fetch metadata from the Hub. Please check out the root cause in the stacktrace" + " above." + ) from model_info_call_error + + @classmethod + def _get_signature_keys(cls, obj): + parameters = inspect.signature(obj.__init__).parameters + required_parameters = {k: v for k, v in parameters.items() if v.default == inspect._empty} + optional_parameters = set({k for k, v in parameters.items() if v.default != inspect._empty}) + expected_modules = set(required_parameters.keys()) - {"self"} + + optional_names = list(optional_parameters) + for name in optional_names: + if name in cls._optional_components: + expected_modules.add(name) + optional_parameters.remove(name) + + return expected_modules, optional_parameters + + @classmethod + def _get_signature_types(cls): + signature_types = {} + for k, v in inspect.signature(cls.__init__).parameters.items(): + if inspect.isclass(v.annotation): + signature_types[k] = (v.annotation,) + elif get_origin(v.annotation) == Union: + signature_types[k] = get_args(v.annotation) + else: + logger.warning(f"cannot get type annotation for Parameter {k} of {cls}.") + return signature_types + + @property + def components(self) -> Dict[str, Any]: + r""" + The `self.components` property can be useful to run different pipelines with the same weights and + configurations without reallocating additional memory. + + Returns (`dict`): + A dictionary containing all the modules needed to initialize the pipeline. + + Examples: + + ```py + >>> from diffusers import ( + ... StableDiffusionPipeline, + ... StableDiffusionImg2ImgPipeline, + ... StableDiffusionInpaintPipeline, + ... ) + + >>> text2img = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") + >>> img2img = StableDiffusionImg2ImgPipeline(**text2img.components) + >>> inpaint = StableDiffusionInpaintPipeline(**text2img.components) + ``` + """ + expected_modules, optional_parameters = self._get_signature_keys(self) + components = { + k: getattr(self, k) for k in self.config.keys() if not k.startswith("_") and k not in optional_parameters + } + + if set(components.keys()) != expected_modules: + raise ValueError( + f"{self} has been incorrectly initialized or {self.__class__} is incorrectly implemented. Expected" + f" {expected_modules} to be defined, but {components.keys()} are defined." + ) + + return components + + @staticmethod + def numpy_to_pil(images): + """ + Convert a NumPy image or a batch of images to a PIL image. + """ + return numpy_to_pil(images) + + def progress_bar(self, iterable=None, total=None): + if not hasattr(self, "_progress_bar_config"): + self._progress_bar_config = {} + elif not isinstance(self._progress_bar_config, dict): + raise ValueError( + f"`self._progress_bar_config` should be of type `dict`, but is {type(self._progress_bar_config)}." + ) + + if iterable is not None: + return tqdm(iterable, **self._progress_bar_config) + elif total is not None: + return tqdm(total=total, **self._progress_bar_config) + else: + raise ValueError("Either `total` or `iterable` has to be defined.") + + def set_progress_bar_config(self, **kwargs): + self._progress_bar_config = kwargs + + def enable_xformers_memory_efficient_attention(self, attention_op: Optional[Callable] = None): + r""" + Enable memory efficient attention from [xFormers](https://facebookresearch.github.io/xformers/). When this + option is enabled, you should observe lower GPU memory usage and a potential speed up during inference. Speed + up during training is not guaranteed. + + + + โš ๏ธ When memory efficient attention and sliced attention are both enabled, memory efficient attention takes + precedent. + + + + Parameters: + attention_op (`Callable`, *optional*): + Override the default `None` operator for use as `op` argument to the + [`memory_efficient_attention()`](https://facebookresearch.github.io/xformers/components/ops.html#xformers.ops.memory_efficient_attention) + function of xFormers. + + Examples: + + ```py + >>> import torch + >>> from diffusers import DiffusionPipeline + >>> from xformers.ops import MemoryEfficientAttentionFlashAttentionOp + + >>> pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1", torch_dtype=torch.float16) + >>> pipe = pipe.to("cuda") + >>> pipe.enable_xformers_memory_efficient_attention(attention_op=MemoryEfficientAttentionFlashAttentionOp) + >>> # Workaround for not accepting attention shape using VAE for Flash Attention + >>> pipe.vae.enable_xformers_memory_efficient_attention(attention_op=None) + ``` + """ + self.set_use_memory_efficient_attention_xformers(True, attention_op) + + def disable_xformers_memory_efficient_attention(self): + r""" + Disable memory efficient attention from [xFormers](https://facebookresearch.github.io/xformers/). + """ + self.set_use_memory_efficient_attention_xformers(False) + + def set_use_memory_efficient_attention_xformers( + self, valid: bool, attention_op: Optional[Callable] = None + ) -> None: + # Recursively walk through all the children. + # Any children which exposes the set_use_memory_efficient_attention_xformers method + # gets the message + def fn_recursive_set_mem_eff(module: torch.nn.Module): + if hasattr(module, "set_use_memory_efficient_attention_xformers"): + module.set_use_memory_efficient_attention_xformers(valid, attention_op) + + for child in module.children(): + fn_recursive_set_mem_eff(child) + + module_names, _ = self._get_signature_keys(self) + modules = [getattr(self, n, None) for n in module_names] + modules = [m for m in modules if isinstance(m, torch.nn.Module)] + + for module in modules: + fn_recursive_set_mem_eff(module) + + def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"): + r""" + Enable sliced attention computation. When this option is enabled, the attention module splits the input tensor + in slices to compute attention in several steps. For more than one attention head, the computation is performed + sequentially over each head. This is useful to save some memory in exchange for a small speed decrease. + + + + โš ๏ธ Don't enable attention slicing if you're already using `scaled_dot_product_attention` (SDPA) from PyTorch + 2.0 or xFormers. These attention computations are already very memory efficient so you won't need to enable + this function. If you enable attention slicing with SDPA or xFormers, it can lead to serious slow downs! + + + + Args: + slice_size (`str` or `int`, *optional*, defaults to `"auto"`): + When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If + `"max"`, maximum amount of memory will be saved by running only one slice at a time. If a number is + provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim` + must be a multiple of `slice_size`. + + Examples: + + ```py + >>> import torch + >>> from diffusers import StableDiffusionPipeline + + >>> pipe = StableDiffusionPipeline.from_pretrained( + ... "runwayml/stable-diffusion-v1-5", + ... torch_dtype=torch.float16, + ... use_safetensors=True, + ... ) + + >>> prompt = "a photo of an astronaut riding a horse on mars" + >>> pipe.enable_attention_slicing() + >>> image = pipe(prompt).images[0] + ``` + """ + self.set_attention_slice(slice_size) + + def disable_attention_slicing(self): + r""" + Disable sliced attention computation. If `enable_attention_slicing` was previously called, attention is + computed in one step. + """ + # set slice_size = `None` to disable `attention slicing` + self.enable_attention_slicing(None) + + def set_attention_slice(self, slice_size: Optional[int]): + module_names, _ = self._get_signature_keys(self) + modules = [getattr(self, n, None) for n in module_names] + modules = [m for m in modules if isinstance(m, torch.nn.Module) and hasattr(m, "set_attention_slice")] + + for module in modules: + module.set_attention_slice(slice_size) + + @classmethod + def from_pipe(cls, pipeline, **kwargs): + r""" + Create a new pipeline from a given pipeline. This method is useful to create a new pipeline from the existing + pipeline components without reallocating additional memory. + + Arguments: + pipeline (`DiffusionPipeline`): + The pipeline from which to create a new pipeline. + + Returns: + `DiffusionPipeline`: + A new pipeline with the same weights and configurations as `pipeline`. + + Examples: + + ```py + >>> from diffusers import StableDiffusionPipeline, StableDiffusionSAGPipeline + + >>> pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") + >>> new_pipe = StableDiffusionSAGPipeline.from_pipe(pipe) + ``` + """ + + original_config = dict(pipeline.config) + torch_dtype = kwargs.pop("torch_dtype", None) + + # derive the pipeline class to instantiate + custom_pipeline = kwargs.pop("custom_pipeline", None) + custom_revision = kwargs.pop("custom_revision", None) + + if custom_pipeline is not None: + pipeline_class = _get_custom_pipeline_class(custom_pipeline, revision=custom_revision) + else: + pipeline_class = cls + + expected_modules, optional_kwargs = cls._get_signature_keys(pipeline_class) + # true_optional_modules are optional components with default value in signature so it is ok not to pass them to `__init__` + # e.g. `image_encoder` for StableDiffusionPipeline + parameters = inspect.signature(cls.__init__).parameters + true_optional_modules = set( + {k for k, v in parameters.items() if v.default != inspect._empty and k in expected_modules} + ) + + # get the class of each component based on its type hint + # e.g. {"unet": UNet2DConditionModel, "text_encoder": CLIPTextMode} + component_types = pipeline_class._get_signature_types() + + pretrained_model_name_or_path = original_config.pop("_name_or_path", None) + # allow users pass modules in `kwargs` to override the original pipeline's components + passed_class_obj = {k: kwargs.pop(k) for k in expected_modules if k in kwargs} + + original_class_obj = {} + for name, component in pipeline.components.items(): + if name in expected_modules and name not in passed_class_obj: + # for model components, we will not switch over if the class does not matches the type hint in the new pipeline's signature + if ( + not isinstance(component, ModelMixin) + or type(component) in component_types[name] + or (component is None and name in cls._optional_components) + ): + original_class_obj[name] = component + else: + logger.warning( + f"component {name} is not switched over to new pipeline because type does not match the expected." + f" {name} is {type(component)} while the new pipeline expect {component_types[name]}." + f" please pass the component of the correct type to the new pipeline. `from_pipe(..., {name}={name})`" + ) + + # allow users pass optional kwargs to override the original pipelines config attribute + passed_pipe_kwargs = {k: kwargs.pop(k) for k in optional_kwargs if k in kwargs} + original_pipe_kwargs = { + k: original_config[k] + for k in original_config.keys() + if k in optional_kwargs and k not in passed_pipe_kwargs + } + + # config attribute that were not expected by pipeline is stored as its private attribute + # (i.e. when the original pipeline was also instantiated with `from_pipe` from another pipeline that has this config) + # in this case, we will pass them as optional arguments if they can be accepted by the new pipeline + additional_pipe_kwargs = [ + k[1:] + for k in original_config.keys() + if k.startswith("_") and k[1:] in optional_kwargs and k[1:] not in passed_pipe_kwargs + ] + for k in additional_pipe_kwargs: + original_pipe_kwargs[k] = original_config.pop(f"_{k}") + + pipeline_kwargs = { + **passed_class_obj, + **original_class_obj, + **passed_pipe_kwargs, + **original_pipe_kwargs, + **kwargs, + } + + # store unused config as private attribute in the new pipeline + unused_original_config = { + f"{'' if k.startswith('_') else '_'}{k}": v for k, v in original_config.items() if k not in pipeline_kwargs + } + + missing_modules = ( + set(expected_modules) + - set(pipeline._optional_components) + - set(pipeline_kwargs.keys()) + - set(true_optional_modules) + ) + + if len(missing_modules) > 0: + raise ValueError( + f"Pipeline {pipeline_class} expected {expected_modules}, but only {set(list(passed_class_obj.keys()) + list(original_class_obj.keys()))} were passed" + ) + + new_pipeline = pipeline_class(**pipeline_kwargs) + if pretrained_model_name_or_path is not None: + new_pipeline.register_to_config(_name_or_path=pretrained_model_name_or_path) + new_pipeline.register_to_config(**unused_original_config) + + if torch_dtype is not None: + new_pipeline.to(dtype=torch_dtype) + + return new_pipeline + + +class StableDiffusionMixin: + r""" + Helper for DiffusionPipeline with vae and unet.(mainly for LDM such as stable diffusion) + """ + + def enable_vae_slicing(self): + r""" + Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to + compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. + """ + self.vae.enable_slicing() + + def disable_vae_slicing(self): + r""" + Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to + computing decoding in one step. + """ + self.vae.disable_slicing() + + def enable_vae_tiling(self): + r""" + Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to + compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow + processing larger images. + """ + self.vae.enable_tiling() + + def disable_vae_tiling(self): + r""" + Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to + computing decoding in one step. + """ + self.vae.disable_tiling() + + def enable_freeu(self, s1: float, s2: float, b1: float, b2: float): + r"""Enables the FreeU mechanism as in https://arxiv.org/abs/2309.11497. + + The suffixes after the scaling factors represent the stages where they are being applied. + + Please refer to the [official repository](https://github.com/ChenyangSi/FreeU) for combinations of the values + that are known to work well for different pipelines such as Stable Diffusion v1, v2, and Stable Diffusion XL. + + Args: + s1 (`float`): + Scaling factor for stage 1 to attenuate the contributions of the skip features. This is done to + mitigate "oversmoothing effect" in the enhanced denoising process. + s2 (`float`): + Scaling factor for stage 2 to attenuate the contributions of the skip features. This is done to + mitigate "oversmoothing effect" in the enhanced denoising process. + b1 (`float`): Scaling factor for stage 1 to amplify the contributions of backbone features. + b2 (`float`): Scaling factor for stage 2 to amplify the contributions of backbone features. + """ + if not hasattr(self, "unet"): + raise ValueError("The pipeline must have `unet` for using FreeU.") + self.unet.enable_freeu(s1=s1, s2=s2, b1=b1, b2=b2) + + def disable_freeu(self): + """Disables the FreeU mechanism if enabled.""" + self.unet.disable_freeu() + + def fuse_qkv_projections(self, unet: bool = True, vae: bool = True): + """ + Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query, key, value) + are fused. For cross-attention modules, key and value projection matrices are fused. + + + + This API is ๐Ÿงช experimental. + + + + Args: + unet (`bool`, defaults to `True`): To apply fusion on the UNet. + vae (`bool`, defaults to `True`): To apply fusion on the VAE. + """ + self.fusing_unet = False + self.fusing_vae = False + + if unet: + self.fusing_unet = True + self.unet.fuse_qkv_projections() + self.unet.set_attn_processor(FusedAttnProcessor2_0()) + + if vae: + if not isinstance(self.vae, AutoencoderKL): + raise ValueError("`fuse_qkv_projections()` is only supported for the VAE of type `AutoencoderKL`.") + + self.fusing_vae = True + self.vae.fuse_qkv_projections() + self.vae.set_attn_processor(FusedAttnProcessor2_0()) + + def unfuse_qkv_projections(self, unet: bool = True, vae: bool = True): + """Disable QKV projection fusion if enabled. + + + + This API is ๐Ÿงช experimental. + + + + Args: + unet (`bool`, defaults to `True`): To apply fusion on the UNet. + vae (`bool`, defaults to `True`): To apply fusion on the VAE. + + """ + if unet: + if not self.fusing_unet: + logger.warning("The UNet was not initially fused for QKV projections. Doing nothing.") + else: + self.unet.unfuse_qkv_projections() + self.fusing_unet = False + + if vae: + if not self.fusing_vae: + logger.warning("The VAE was not initially fused for QKV projections. Doing nothing.") + else: + self.vae.unfuse_qkv_projections() + self.fusing_vae = False diff --git a/diffusers3/pipelines/pixart_alpha/__init__.py b/diffusers3/pipelines/pixart_alpha/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7cb870fc8589aeff366429c182736b2ddd6ce215 --- /dev/null +++ b/diffusers3/pipelines/pixart_alpha/__init__.py @@ -0,0 +1,55 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["pipeline_pixart_alpha"] = ["PixArtAlphaPipeline"] + _import_structure["pipeline_pixart_sigma"] = ["PixArtSigmaPipeline"] + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * + else: + from .pipeline_pixart_alpha import ( + ASPECT_RATIO_256_BIN, + ASPECT_RATIO_512_BIN, + ASPECT_RATIO_1024_BIN, + PixArtAlphaPipeline, + ) + from .pipeline_pixart_sigma import ASPECT_RATIO_2048_BIN, PixArtSigmaPipeline + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/diffusers3/pipelines/pixart_alpha/pipeline_pixart_alpha.py b/diffusers3/pipelines/pixart_alpha/pipeline_pixart_alpha.py new file mode 100644 index 0000000000000000000000000000000000000000..5b220df8058b260813b2ee61769755ffa505784d --- /dev/null +++ b/diffusers3/pipelines/pixart_alpha/pipeline_pixart_alpha.py @@ -0,0 +1,969 @@ +# Copyright 2024 PixArt-Alpha Authors and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import html +import inspect +import re +import urllib.parse as ul +from typing import Callable, List, Optional, Tuple, Union + +import torch +from transformers import T5EncoderModel, T5Tokenizer + +from ...image_processor import PixArtImageProcessor +from ...models import AutoencoderKL, PixArtTransformer2DModel +from ...schedulers import DPMSolverMultistepScheduler +from ...utils import ( + BACKENDS_MAPPING, + deprecate, + is_bs4_available, + is_ftfy_available, + logging, + replace_example_docstring, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +if is_bs4_available(): + from bs4 import BeautifulSoup + +if is_ftfy_available(): + import ftfy + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import PixArtAlphaPipeline + + >>> # You can replace the checkpoint id with "PixArt-alpha/PixArt-XL-2-512x512" too. + >>> pipe = PixArtAlphaPipeline.from_pretrained("PixArt-alpha/PixArt-XL-2-1024-MS", torch_dtype=torch.float16) + >>> # Enable memory optimizations. + >>> pipe.enable_model_cpu_offload() + + >>> prompt = "A small cactus with a happy face in the Sahara desert." + >>> image = pipe(prompt).images[0] + ``` +""" + +ASPECT_RATIO_1024_BIN = { + "0.25": [512.0, 2048.0], + "0.28": [512.0, 1856.0], + "0.32": [576.0, 1792.0], + "0.33": [576.0, 1728.0], + "0.35": [576.0, 1664.0], + "0.4": [640.0, 1600.0], + "0.42": [640.0, 1536.0], + "0.48": [704.0, 1472.0], + "0.5": [704.0, 1408.0], + "0.52": [704.0, 1344.0], + "0.57": [768.0, 1344.0], + "0.6": [768.0, 1280.0], + "0.68": [832.0, 1216.0], + "0.72": [832.0, 1152.0], + "0.78": [896.0, 1152.0], + "0.82": [896.0, 1088.0], + "0.88": [960.0, 1088.0], + "0.94": [960.0, 1024.0], + "1.0": [1024.0, 1024.0], + "1.07": [1024.0, 960.0], + "1.13": [1088.0, 960.0], + "1.21": [1088.0, 896.0], + "1.29": [1152.0, 896.0], + "1.38": [1152.0, 832.0], + "1.46": [1216.0, 832.0], + "1.67": [1280.0, 768.0], + "1.75": [1344.0, 768.0], + "2.0": [1408.0, 704.0], + "2.09": [1472.0, 704.0], + "2.4": [1536.0, 640.0], + "2.5": [1600.0, 640.0], + "3.0": [1728.0, 576.0], + "4.0": [2048.0, 512.0], +} + +ASPECT_RATIO_512_BIN = { + "0.25": [256.0, 1024.0], + "0.28": [256.0, 928.0], + "0.32": [288.0, 896.0], + "0.33": [288.0, 864.0], + "0.35": [288.0, 832.0], + "0.4": [320.0, 800.0], + "0.42": [320.0, 768.0], + "0.48": [352.0, 736.0], + "0.5": [352.0, 704.0], + "0.52": [352.0, 672.0], + "0.57": [384.0, 672.0], + "0.6": [384.0, 640.0], + "0.68": [416.0, 608.0], + "0.72": [416.0, 576.0], + "0.78": [448.0, 576.0], + "0.82": [448.0, 544.0], + "0.88": [480.0, 544.0], + "0.94": [480.0, 512.0], + "1.0": [512.0, 512.0], + "1.07": [512.0, 480.0], + "1.13": [544.0, 480.0], + "1.21": [544.0, 448.0], + "1.29": [576.0, 448.0], + "1.38": [576.0, 416.0], + "1.46": [608.0, 416.0], + "1.67": [640.0, 384.0], + "1.75": [672.0, 384.0], + "2.0": [704.0, 352.0], + "2.09": [736.0, 352.0], + "2.4": [768.0, 320.0], + "2.5": [800.0, 320.0], + "3.0": [864.0, 288.0], + "4.0": [1024.0, 256.0], +} + +ASPECT_RATIO_256_BIN = { + "0.25": [128.0, 512.0], + "0.28": [128.0, 464.0], + "0.32": [144.0, 448.0], + "0.33": [144.0, 432.0], + "0.35": [144.0, 416.0], + "0.4": [160.0, 400.0], + "0.42": [160.0, 384.0], + "0.48": [176.0, 368.0], + "0.5": [176.0, 352.0], + "0.52": [176.0, 336.0], + "0.57": [192.0, 336.0], + "0.6": [192.0, 320.0], + "0.68": [208.0, 304.0], + "0.72": [208.0, 288.0], + "0.78": [224.0, 288.0], + "0.82": [224.0, 272.0], + "0.88": [240.0, 272.0], + "0.94": [240.0, 256.0], + "1.0": [256.0, 256.0], + "1.07": [256.0, 240.0], + "1.13": [272.0, 240.0], + "1.21": [272.0, 224.0], + "1.29": [288.0, 224.0], + "1.38": [288.0, 208.0], + "1.46": [304.0, 208.0], + "1.67": [320.0, 192.0], + "1.75": [336.0, 192.0], + "2.0": [352.0, 176.0], + "2.09": [368.0, 176.0], + "2.4": [384.0, 160.0], + "2.5": [400.0, 160.0], + "3.0": [432.0, 144.0], + "4.0": [512.0, 128.0], +} + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + sigmas: Optional[List[float]] = None, + **kwargs, +): + """ + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles + custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` + must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, + `num_inference_steps` and `sigmas` must be `None`. + sigmas (`List[float]`, *optional*): + Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, + `num_inference_steps` and `timesteps` must be `None`. + + Returns: + `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the + second element is the number of inference steps. + """ + if timesteps is not None and sigmas is not None: + raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") + if timesteps is not None: + accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + elif sigmas is not None: + accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accept_sigmas: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" sigmas schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + + +class PixArtAlphaPipeline(DiffusionPipeline): + r""" + Pipeline for text-to-image generation using PixArt-Alpha. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`T5EncoderModel`]): + Frozen text-encoder. PixArt-Alpha uses + [T5](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5EncoderModel), specifically the + [t5-v1_1-xxl](https://huggingface.co/PixArt-alpha/PixArt-alpha/tree/main/t5-v1_1-xxl) variant. + tokenizer (`T5Tokenizer`): + Tokenizer of class + [T5Tokenizer](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5Tokenizer). + transformer ([`PixArtTransformer2DModel`]): + A text conditioned `PixArtTransformer2DModel` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `transformer` to denoise the encoded image latents. + """ + + bad_punct_regex = re.compile( + r"[" + + "#ยฎโ€ขยฉโ„ข&@ยทยบยฝยพยฟยกยง~" + + r"\)" + + r"\(" + + r"\]" + + r"\[" + + r"\}" + + r"\{" + + r"\|" + + "\\" + + r"\/" + + r"\*" + + r"]{1,}" + ) # noqa + + _optional_components = ["tokenizer", "text_encoder"] + model_cpu_offload_seq = "text_encoder->transformer->vae" + + def __init__( + self, + tokenizer: T5Tokenizer, + text_encoder: T5EncoderModel, + vae: AutoencoderKL, + transformer: PixArtTransformer2DModel, + scheduler: DPMSolverMultistepScheduler, + ): + super().__init__() + + self.register_modules( + tokenizer=tokenizer, text_encoder=text_encoder, vae=vae, transformer=transformer, scheduler=scheduler + ) + + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = PixArtImageProcessor(vae_scale_factor=self.vae_scale_factor) + + # Adapted from diffusers.pipelines.deepfloyd_if.pipeline_if.encode_prompt + def encode_prompt( + self, + prompt: Union[str, List[str]], + do_classifier_free_guidance: bool = True, + negative_prompt: str = "", + num_images_per_prompt: int = 1, + device: Optional[torch.device] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + prompt_attention_mask: Optional[torch.Tensor] = None, + negative_prompt_attention_mask: Optional[torch.Tensor] = None, + clean_caption: bool = False, + max_sequence_length: int = 120, + **kwargs, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + negative_prompt (`str` or `List[str]`, *optional*): + The prompt not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` + instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). For + PixArt-Alpha, this should be "". + do_classifier_free_guidance (`bool`, *optional*, defaults to `True`): + whether to use classifier free guidance or not + num_images_per_prompt (`int`, *optional*, defaults to 1): + number of images that should be generated per prompt + device: (`torch.device`, *optional*): + torch device to place the resulting embeddings on + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. For PixArt-Alpha, it's should be the embeddings of the "" + string. + clean_caption (`bool`, defaults to `False`): + If `True`, the function will preprocess and clean the provided caption before encoding. + max_sequence_length (`int`, defaults to 120): Maximum sequence length to use for the prompt. + """ + + if "mask_feature" in kwargs: + deprecation_message = "The use of `mask_feature` is deprecated. It is no longer used in any computation and that doesn't affect the end results. It will be removed in a future version." + deprecate("mask_feature", "1.0.0", deprecation_message, standard_warn=False) + + if device is None: + device = self._execution_device + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + # See Section 3.1. of the paper. + max_length = max_sequence_length + + if prompt_embeds is None: + prompt = self._text_preprocessing(prompt, clean_caption=clean_caption) + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=max_length, + truncation=True, + add_special_tokens=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode(untruncated_ids[:, max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because T5 can only handle sequences up to" + f" {max_length} tokens: {removed_text}" + ) + + prompt_attention_mask = text_inputs.attention_mask + prompt_attention_mask = prompt_attention_mask.to(device) + + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=prompt_attention_mask) + prompt_embeds = prompt_embeds[0] + + if self.text_encoder is not None: + dtype = self.text_encoder.dtype + elif self.transformer is not None: + dtype = self.transformer.dtype + else: + dtype = None + + prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings and attention mask for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + prompt_attention_mask = prompt_attention_mask.view(bs_embed, -1) + prompt_attention_mask = prompt_attention_mask.repeat(num_images_per_prompt, 1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens = [negative_prompt] * batch_size if isinstance(negative_prompt, str) else negative_prompt + uncond_tokens = self._text_preprocessing(uncond_tokens, clean_caption=clean_caption) + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_attention_mask=True, + add_special_tokens=True, + return_tensors="pt", + ) + negative_prompt_attention_mask = uncond_input.attention_mask + negative_prompt_attention_mask = negative_prompt_attention_mask.to(device) + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), attention_mask=negative_prompt_attention_mask + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + negative_prompt_attention_mask = negative_prompt_attention_mask.view(bs_embed, -1) + negative_prompt_attention_mask = negative_prompt_attention_mask.repeat(num_images_per_prompt, 1) + else: + negative_prompt_embeds = None + negative_prompt_attention_mask = None + + return prompt_embeds, prompt_attention_mask, negative_prompt_embeds, negative_prompt_attention_mask + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + height, + width, + negative_prompt, + callback_steps, + prompt_embeds=None, + negative_prompt_embeds=None, + prompt_attention_mask=None, + negative_prompt_attention_mask=None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if (callback_steps is None) or ( + callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and prompt_attention_mask is None: + raise ValueError("Must provide `prompt_attention_mask` when specifying `prompt_embeds`.") + + if negative_prompt_embeds is not None and negative_prompt_attention_mask is None: + raise ValueError("Must provide `negative_prompt_attention_mask` when specifying `negative_prompt_embeds`.") + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + if prompt_attention_mask.shape != negative_prompt_attention_mask.shape: + raise ValueError( + "`prompt_attention_mask` and `negative_prompt_attention_mask` must have the same shape when passed directly, but" + f" got: `prompt_attention_mask` {prompt_attention_mask.shape} != `negative_prompt_attention_mask`" + f" {negative_prompt_attention_mask.shape}." + ) + + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline._text_preprocessing + def _text_preprocessing(self, text, clean_caption=False): + if clean_caption and not is_bs4_available(): + logger.warning(BACKENDS_MAPPING["bs4"][-1].format("Setting `clean_caption=True`")) + logger.warning("Setting `clean_caption` to False...") + clean_caption = False + + if clean_caption and not is_ftfy_available(): + logger.warning(BACKENDS_MAPPING["ftfy"][-1].format("Setting `clean_caption=True`")) + logger.warning("Setting `clean_caption` to False...") + clean_caption = False + + if not isinstance(text, (tuple, list)): + text = [text] + + def process(text: str): + if clean_caption: + text = self._clean_caption(text) + text = self._clean_caption(text) + else: + text = text.lower().strip() + return text + + return [process(t) for t in text] + + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline._clean_caption + def _clean_caption(self, caption): + caption = str(caption) + caption = ul.unquote_plus(caption) + caption = caption.strip().lower() + caption = re.sub("", "person", caption) + # urls: + caption = re.sub( + r"\b((?:https?:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa + "", + caption, + ) # regex for urls + caption = re.sub( + r"\b((?:www:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa + "", + caption, + ) # regex for urls + # html: + caption = BeautifulSoup(caption, features="html.parser").text + + # @ + caption = re.sub(r"@[\w\d]+\b", "", caption) + + # 31C0โ€”31EF CJK Strokes + # 31F0โ€”31FF Katakana Phonetic Extensions + # 3200โ€”32FF Enclosed CJK Letters and Months + # 3300โ€”33FF CJK Compatibility + # 3400โ€”4DBF CJK Unified Ideographs Extension A + # 4DC0โ€”4DFF Yijing Hexagram Symbols + # 4E00โ€”9FFF CJK Unified Ideographs + caption = re.sub(r"[\u31c0-\u31ef]+", "", caption) + caption = re.sub(r"[\u31f0-\u31ff]+", "", caption) + caption = re.sub(r"[\u3200-\u32ff]+", "", caption) + caption = re.sub(r"[\u3300-\u33ff]+", "", caption) + caption = re.sub(r"[\u3400-\u4dbf]+", "", caption) + caption = re.sub(r"[\u4dc0-\u4dff]+", "", caption) + caption = re.sub(r"[\u4e00-\u9fff]+", "", caption) + ####################################################### + + # ะฒัะต ะฒะธะดั‹ ั‚ะธั€ะต / all types of dash --> "-" + caption = re.sub( + r"[\u002D\u058A\u05BE\u1400\u1806\u2010-\u2015\u2E17\u2E1A\u2E3A\u2E3B\u2E40\u301C\u3030\u30A0\uFE31\uFE32\uFE58\uFE63\uFF0D]+", # noqa + "-", + caption, + ) + + # ะบะฐะฒั‹ั‡ะบะธ ะบ ะพะดะฝะพะผัƒ ัั‚ะฐะฝะดะฐั€ั‚ัƒ + caption = re.sub(r"[`ยดยซยปโ€œโ€ยจ]", '"', caption) + caption = re.sub(r"[โ€˜โ€™]", "'", caption) + + # " + caption = re.sub(r""?", "", caption) + # & + caption = re.sub(r"&", "", caption) + + # ip adresses: + caption = re.sub(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", " ", caption) + + # article ids: + caption = re.sub(r"\d:\d\d\s+$", "", caption) + + # \n + caption = re.sub(r"\\n", " ", caption) + + # "#123" + caption = re.sub(r"#\d{1,3}\b", "", caption) + # "#12345.." + caption = re.sub(r"#\d{5,}\b", "", caption) + # "123456.." + caption = re.sub(r"\b\d{6,}\b", "", caption) + # filenames: + caption = re.sub(r"[\S]+\.(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)", "", caption) + + # + caption = re.sub(r"[\"\']{2,}", r'"', caption) # """AUSVERKAUFT""" + caption = re.sub(r"[\.]{2,}", r" ", caption) # """AUSVERKAUFT""" + + caption = re.sub(self.bad_punct_regex, r" ", caption) # ***AUSVERKAUFT***, #AUSVERKAUFT + caption = re.sub(r"\s+\.\s+", r" ", caption) # " . " + + # this-is-my-cute-cat / this_is_my_cute_cat + regex2 = re.compile(r"(?:\-|\_)") + if len(re.findall(regex2, caption)) > 3: + caption = re.sub(regex2, " ", caption) + + caption = ftfy.fix_text(caption) + caption = html.unescape(html.unescape(caption)) + + caption = re.sub(r"\b[a-zA-Z]{1,3}\d{3,15}\b", "", caption) # jc6640 + caption = re.sub(r"\b[a-zA-Z]+\d+[a-zA-Z]+\b", "", caption) # jc6640vc + caption = re.sub(r"\b\d+[a-zA-Z]+\d+\b", "", caption) # 6640vc231 + + caption = re.sub(r"(worldwide\s+)?(free\s+)?shipping", "", caption) + caption = re.sub(r"(free\s)?download(\sfree)?", "", caption) + caption = re.sub(r"\bclick\b\s(?:for|on)\s\w+", "", caption) + caption = re.sub(r"\b(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)(\simage[s]?)?", "", caption) + caption = re.sub(r"\bpage\s+\d+\b", "", caption) + + caption = re.sub(r"\b\d*[a-zA-Z]+\d+[a-zA-Z]+\d+[a-zA-Z\d]*\b", r" ", caption) # j2d1a2a... + + caption = re.sub(r"\b\d+\.?\d*[xั…ร—]\d+\.?\d*\b", "", caption) + + caption = re.sub(r"\b\s+\:\s+", r": ", caption) + caption = re.sub(r"(\D[,\./])\b", r"\1 ", caption) + caption = re.sub(r"\s+", " ", caption) + + caption.strip() + + caption = re.sub(r"^[\"\']([\w\W]+)[\"\']$", r"\1", caption) + caption = re.sub(r"^[\'\_,\-\:;]", r"", caption) + caption = re.sub(r"[\'\_,\-\:\-\+]$", r"", caption) + caption = re.sub(r"^\.\S+$", "", caption) + + return caption.strip() + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = ( + batch_size, + num_channels_latents, + int(height) // self.vae_scale_factor, + int(width) // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + negative_prompt: str = "", + num_inference_steps: int = 20, + timesteps: List[int] = None, + sigmas: List[float] = None, + guidance_scale: float = 4.5, + num_images_per_prompt: Optional[int] = 1, + height: Optional[int] = None, + width: Optional[int] = None, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + prompt_attention_mask: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_attention_mask: Optional[torch.Tensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, + callback_steps: int = 1, + clean_caption: bool = True, + use_resolution_binning: bool = True, + max_sequence_length: int = 120, + **kwargs, + ) -> Union[ImagePipelineOutput, Tuple]: + """ + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument + in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is + passed will be used. Must be in descending order. + sigmas (`List[float]`, *optional*): + Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in + their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed + will be used. + guidance_scale (`float`, *optional*, defaults to 4.5): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + height (`int`, *optional*, defaults to self.unet.config.sample_size): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to self.unet.config.sample_size): + The width in pixels of the generated image. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to + [`schedulers.DDIMScheduler`], will be ignored for others. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + prompt_attention_mask (`torch.Tensor`, *optional*): Pre-generated attention mask for text embeddings. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. For PixArt-Alpha this negative prompt should be "". If not + provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. + negative_prompt_attention_mask (`torch.Tensor`, *optional*): + Pre-generated attention mask for negative text embeddings. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.IFPipelineOutput`] instead of a plain tuple. + callback (`Callable`, *optional*): + A function that will be called every `callback_steps` steps during inference. The function will be + called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function will be called. If not specified, the callback will be + called at every step. + clean_caption (`bool`, *optional*, defaults to `True`): + Whether or not to clean the caption before creating embeddings. Requires `beautifulsoup4` and `ftfy` to + be installed. If the dependencies are not installed, the embeddings will be created from the raw + prompt. + use_resolution_binning (`bool` defaults to `True`): + If set to `True`, the requested height and width are first mapped to the closest resolutions using + `ASPECT_RATIO_1024_BIN`. After the produced latents are decoded into images, they are resized back to + the requested resolution. Useful for generating non-square images. + max_sequence_length (`int` defaults to 120): Maximum sequence length to use with the `prompt`. + + Examples: + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is + returned where the first element is a list with the generated images + """ + if "mask_feature" in kwargs: + deprecation_message = "The use of `mask_feature` is deprecated. It is no longer used in any computation and that doesn't affect the end results. It will be removed in a future version." + deprecate("mask_feature", "1.0.0", deprecation_message, standard_warn=False) + # 1. Check inputs. Raise error if not correct + height = height or self.transformer.config.sample_size * self.vae_scale_factor + width = width or self.transformer.config.sample_size * self.vae_scale_factor + if use_resolution_binning: + if self.transformer.config.sample_size == 128: + aspect_ratio_bin = ASPECT_RATIO_1024_BIN + elif self.transformer.config.sample_size == 64: + aspect_ratio_bin = ASPECT_RATIO_512_BIN + elif self.transformer.config.sample_size == 32: + aspect_ratio_bin = ASPECT_RATIO_256_BIN + else: + raise ValueError("Invalid sample size") + orig_height, orig_width = height, width + height, width = self.image_processor.classify_height_width_bin(height, width, ratios=aspect_ratio_bin) + + self.check_inputs( + prompt, + height, + width, + negative_prompt, + callback_steps, + prompt_embeds, + negative_prompt_embeds, + prompt_attention_mask, + negative_prompt_attention_mask, + ) + + # 2. Default height and width to transformer + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Encode input prompt + ( + prompt_embeds, + prompt_attention_mask, + negative_prompt_embeds, + negative_prompt_attention_mask, + ) = self.encode_prompt( + prompt, + do_classifier_free_guidance, + negative_prompt=negative_prompt, + num_images_per_prompt=num_images_per_prompt, + device=device, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + prompt_attention_mask=prompt_attention_mask, + negative_prompt_attention_mask=negative_prompt_attention_mask, + clean_caption=clean_caption, + max_sequence_length=max_sequence_length, + ) + if do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) + prompt_attention_mask = torch.cat([negative_prompt_attention_mask, prompt_attention_mask], dim=0) + + # 4. Prepare timesteps + timesteps, num_inference_steps = retrieve_timesteps( + self.scheduler, num_inference_steps, device, timesteps, sigmas + ) + + # 5. Prepare latents. + latent_channels = self.transformer.config.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + latent_channels, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 6.1 Prepare micro-conditions. + added_cond_kwargs = {"resolution": None, "aspect_ratio": None} + if self.transformer.config.sample_size == 128: + resolution = torch.tensor([height, width]).repeat(batch_size * num_images_per_prompt, 1) + aspect_ratio = torch.tensor([float(height / width)]).repeat(batch_size * num_images_per_prompt, 1) + resolution = resolution.to(dtype=prompt_embeds.dtype, device=device) + aspect_ratio = aspect_ratio.to(dtype=prompt_embeds.dtype, device=device) + + if do_classifier_free_guidance: + resolution = torch.cat([resolution, resolution], dim=0) + aspect_ratio = torch.cat([aspect_ratio, aspect_ratio], dim=0) + + added_cond_kwargs = {"resolution": resolution, "aspect_ratio": aspect_ratio} + + # 7. Denoising loop + num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) + + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + current_timestep = t + if not torch.is_tensor(current_timestep): + # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can + # This would be a good case for the `match` statement (Python 3.10+) + is_mps = latent_model_input.device.type == "mps" + if isinstance(current_timestep, float): + dtype = torch.float32 if is_mps else torch.float64 + else: + dtype = torch.int32 if is_mps else torch.int64 + current_timestep = torch.tensor([current_timestep], dtype=dtype, device=latent_model_input.device) + elif len(current_timestep.shape) == 0: + current_timestep = current_timestep[None].to(latent_model_input.device) + # broadcast to batch dimension in a way that's compatible with ONNX/Core ML + current_timestep = current_timestep.expand(latent_model_input.shape[0]) + + # predict noise model_output + noise_pred = self.transformer( + latent_model_input, + encoder_hidden_states=prompt_embeds, + encoder_attention_mask=prompt_attention_mask, + timestep=current_timestep, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # learned sigma + if self.transformer.config.out_channels // 2 == latent_channels: + noise_pred = noise_pred.chunk(2, dim=1)[0] + else: + noise_pred = noise_pred + + # compute previous image: x_t -> x_t-1 + if num_inference_steps == 1: + # For DMD one step sampling: https://arxiv.org/abs/2311.18828 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).pred_original_sample + else: + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + if use_resolution_binning: + image = self.image_processor.resize_and_crop_tensor(image, orig_width, orig_height) + else: + image = latents + + if not output_type == "latent": + image = self.image_processor.postprocess(image, output_type=output_type) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return ImagePipelineOutput(images=image) diff --git a/diffusers3/pipelines/pixart_alpha/pipeline_pixart_sigma.py b/diffusers3/pipelines/pixart_alpha/pipeline_pixart_sigma.py new file mode 100644 index 0000000000000000000000000000000000000000..69f02891477400dfe7f2e9645d602bbc02aa98c1 --- /dev/null +++ b/diffusers3/pipelines/pixart_alpha/pipeline_pixart_sigma.py @@ -0,0 +1,880 @@ +# Copyright 2024 PixArt-Sigma Authors and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import html +import inspect +import re +import urllib.parse as ul +from typing import Callable, List, Optional, Tuple, Union + +import torch +from transformers import T5EncoderModel, T5Tokenizer + +from ...image_processor import PixArtImageProcessor +from ...models import AutoencoderKL, PixArtTransformer2DModel +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import ( + BACKENDS_MAPPING, + deprecate, + is_bs4_available, + is_ftfy_available, + logging, + replace_example_docstring, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput +from .pipeline_pixart_alpha import ( + ASPECT_RATIO_256_BIN, + ASPECT_RATIO_512_BIN, + ASPECT_RATIO_1024_BIN, +) + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +if is_bs4_available(): + from bs4 import BeautifulSoup + +if is_ftfy_available(): + import ftfy + + +ASPECT_RATIO_2048_BIN = { + "0.25": [1024.0, 4096.0], + "0.26": [1024.0, 3968.0], + "0.27": [1024.0, 3840.0], + "0.28": [1024.0, 3712.0], + "0.32": [1152.0, 3584.0], + "0.33": [1152.0, 3456.0], + "0.35": [1152.0, 3328.0], + "0.4": [1280.0, 3200.0], + "0.42": [1280.0, 3072.0], + "0.48": [1408.0, 2944.0], + "0.5": [1408.0, 2816.0], + "0.52": [1408.0, 2688.0], + "0.57": [1536.0, 2688.0], + "0.6": [1536.0, 2560.0], + "0.68": [1664.0, 2432.0], + "0.72": [1664.0, 2304.0], + "0.78": [1792.0, 2304.0], + "0.82": [1792.0, 2176.0], + "0.88": [1920.0, 2176.0], + "0.94": [1920.0, 2048.0], + "1.0": [2048.0, 2048.0], + "1.07": [2048.0, 1920.0], + "1.13": [2176.0, 1920.0], + "1.21": [2176.0, 1792.0], + "1.29": [2304.0, 1792.0], + "1.38": [2304.0, 1664.0], + "1.46": [2432.0, 1664.0], + "1.67": [2560.0, 1536.0], + "1.75": [2688.0, 1536.0], + "2.0": [2816.0, 1408.0], + "2.09": [2944.0, 1408.0], + "2.4": [3072.0, 1280.0], + "2.5": [3200.0, 1280.0], + "2.89": [3328.0, 1152.0], + "3.0": [3456.0, 1152.0], + "3.11": [3584.0, 1152.0], + "3.62": [3712.0, 1024.0], + "3.75": [3840.0, 1024.0], + "3.88": [3968.0, 1024.0], + "4.0": [4096.0, 1024.0], +} + + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import PixArtSigmaPipeline + + >>> # You can replace the checkpoint id with "PixArt-alpha/PixArt-Sigma-XL-2-512-MS" too. + >>> pipe = PixArtSigmaPipeline.from_pretrained( + ... "PixArt-alpha/PixArt-Sigma-XL-2-1024-MS", torch_dtype=torch.float16 + ... ) + >>> # Enable memory optimizations. + >>> # pipe.enable_model_cpu_offload() + + >>> prompt = "A small cactus with a happy face in the Sahara desert." + >>> image = pipe(prompt).images[0] + ``` +""" + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + sigmas: Optional[List[float]] = None, + **kwargs, +): + """ + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles + custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` + must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, + `num_inference_steps` and `sigmas` must be `None`. + sigmas (`List[float]`, *optional*): + Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, + `num_inference_steps` and `timesteps` must be `None`. + + Returns: + `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the + second element is the number of inference steps. + """ + if timesteps is not None and sigmas is not None: + raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") + if timesteps is not None: + accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + elif sigmas is not None: + accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accept_sigmas: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" sigmas schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + + +class PixArtSigmaPipeline(DiffusionPipeline): + r""" + Pipeline for text-to-image generation using PixArt-Sigma. + """ + + bad_punct_regex = re.compile( + r"[" + + "#ยฎโ€ขยฉโ„ข&@ยทยบยฝยพยฟยกยง~" + + r"\)" + + r"\(" + + r"\]" + + r"\[" + + r"\}" + + r"\{" + + r"\|" + + "\\" + + r"\/" + + r"\*" + + r"]{1,}" + ) # noqa + + _optional_components = ["tokenizer", "text_encoder"] + model_cpu_offload_seq = "text_encoder->transformer->vae" + + def __init__( + self, + tokenizer: T5Tokenizer, + text_encoder: T5EncoderModel, + vae: AutoencoderKL, + transformer: PixArtTransformer2DModel, + scheduler: KarrasDiffusionSchedulers, + ): + super().__init__() + + self.register_modules( + tokenizer=tokenizer, text_encoder=text_encoder, vae=vae, transformer=transformer, scheduler=scheduler + ) + + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = PixArtImageProcessor(vae_scale_factor=self.vae_scale_factor) + + # Copied from diffusers.pipelines.pixart_alpha.pipeline_pixart_alpha.PixArtAlphaPipeline.encode_prompt with 120->300 + def encode_prompt( + self, + prompt: Union[str, List[str]], + do_classifier_free_guidance: bool = True, + negative_prompt: str = "", + num_images_per_prompt: int = 1, + device: Optional[torch.device] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + prompt_attention_mask: Optional[torch.Tensor] = None, + negative_prompt_attention_mask: Optional[torch.Tensor] = None, + clean_caption: bool = False, + max_sequence_length: int = 300, + **kwargs, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + negative_prompt (`str` or `List[str]`, *optional*): + The prompt not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` + instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). For + PixArt-Alpha, this should be "". + do_classifier_free_guidance (`bool`, *optional*, defaults to `True`): + whether to use classifier free guidance or not + num_images_per_prompt (`int`, *optional*, defaults to 1): + number of images that should be generated per prompt + device: (`torch.device`, *optional*): + torch device to place the resulting embeddings on + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. For PixArt-Alpha, it's should be the embeddings of the "" + string. + clean_caption (`bool`, defaults to `False`): + If `True`, the function will preprocess and clean the provided caption before encoding. + max_sequence_length (`int`, defaults to 300): Maximum sequence length to use for the prompt. + """ + + if "mask_feature" in kwargs: + deprecation_message = "The use of `mask_feature` is deprecated. It is no longer used in any computation and that doesn't affect the end results. It will be removed in a future version." + deprecate("mask_feature", "1.0.0", deprecation_message, standard_warn=False) + + if device is None: + device = self._execution_device + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + # See Section 3.1. of the paper. + max_length = max_sequence_length + + if prompt_embeds is None: + prompt = self._text_preprocessing(prompt, clean_caption=clean_caption) + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=max_length, + truncation=True, + add_special_tokens=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode(untruncated_ids[:, max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because T5 can only handle sequences up to" + f" {max_length} tokens: {removed_text}" + ) + + prompt_attention_mask = text_inputs.attention_mask + prompt_attention_mask = prompt_attention_mask.to(device) + + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=prompt_attention_mask) + prompt_embeds = prompt_embeds[0] + + if self.text_encoder is not None: + dtype = self.text_encoder.dtype + elif self.transformer is not None: + dtype = self.transformer.dtype + else: + dtype = None + + prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings and attention mask for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + prompt_attention_mask = prompt_attention_mask.view(bs_embed, -1) + prompt_attention_mask = prompt_attention_mask.repeat(num_images_per_prompt, 1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens = [negative_prompt] * batch_size if isinstance(negative_prompt, str) else negative_prompt + uncond_tokens = self._text_preprocessing(uncond_tokens, clean_caption=clean_caption) + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_attention_mask=True, + add_special_tokens=True, + return_tensors="pt", + ) + negative_prompt_attention_mask = uncond_input.attention_mask + negative_prompt_attention_mask = negative_prompt_attention_mask.to(device) + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), attention_mask=negative_prompt_attention_mask + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + negative_prompt_attention_mask = negative_prompt_attention_mask.view(bs_embed, -1) + negative_prompt_attention_mask = negative_prompt_attention_mask.repeat(num_images_per_prompt, 1) + else: + negative_prompt_embeds = None + negative_prompt_attention_mask = None + + return prompt_embeds, prompt_attention_mask, negative_prompt_embeds, negative_prompt_attention_mask + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + # Copied from diffusers.pipelines.pixart_alpha.pipeline_pixart_alpha.PixArtAlphaPipeline.check_inputs + def check_inputs( + self, + prompt, + height, + width, + negative_prompt, + callback_steps, + prompt_embeds=None, + negative_prompt_embeds=None, + prompt_attention_mask=None, + negative_prompt_attention_mask=None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if (callback_steps is None) or ( + callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and prompt_attention_mask is None: + raise ValueError("Must provide `prompt_attention_mask` when specifying `prompt_embeds`.") + + if negative_prompt_embeds is not None and negative_prompt_attention_mask is None: + raise ValueError("Must provide `negative_prompt_attention_mask` when specifying `negative_prompt_embeds`.") + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + if prompt_attention_mask.shape != negative_prompt_attention_mask.shape: + raise ValueError( + "`prompt_attention_mask` and `negative_prompt_attention_mask` must have the same shape when passed directly, but" + f" got: `prompt_attention_mask` {prompt_attention_mask.shape} != `negative_prompt_attention_mask`" + f" {negative_prompt_attention_mask.shape}." + ) + + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline._text_preprocessing + def _text_preprocessing(self, text, clean_caption=False): + if clean_caption and not is_bs4_available(): + logger.warning(BACKENDS_MAPPING["bs4"][-1].format("Setting `clean_caption=True`")) + logger.warning("Setting `clean_caption` to False...") + clean_caption = False + + if clean_caption and not is_ftfy_available(): + logger.warning(BACKENDS_MAPPING["ftfy"][-1].format("Setting `clean_caption=True`")) + logger.warning("Setting `clean_caption` to False...") + clean_caption = False + + if not isinstance(text, (tuple, list)): + text = [text] + + def process(text: str): + if clean_caption: + text = self._clean_caption(text) + text = self._clean_caption(text) + else: + text = text.lower().strip() + return text + + return [process(t) for t in text] + + # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline._clean_caption + def _clean_caption(self, caption): + caption = str(caption) + caption = ul.unquote_plus(caption) + caption = caption.strip().lower() + caption = re.sub("", "person", caption) + # urls: + caption = re.sub( + r"\b((?:https?:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa + "", + caption, + ) # regex for urls + caption = re.sub( + r"\b((?:www:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa + "", + caption, + ) # regex for urls + # html: + caption = BeautifulSoup(caption, features="html.parser").text + + # @ + caption = re.sub(r"@[\w\d]+\b", "", caption) + + # 31C0โ€”31EF CJK Strokes + # 31F0โ€”31FF Katakana Phonetic Extensions + # 3200โ€”32FF Enclosed CJK Letters and Months + # 3300โ€”33FF CJK Compatibility + # 3400โ€”4DBF CJK Unified Ideographs Extension A + # 4DC0โ€”4DFF Yijing Hexagram Symbols + # 4E00โ€”9FFF CJK Unified Ideographs + caption = re.sub(r"[\u31c0-\u31ef]+", "", caption) + caption = re.sub(r"[\u31f0-\u31ff]+", "", caption) + caption = re.sub(r"[\u3200-\u32ff]+", "", caption) + caption = re.sub(r"[\u3300-\u33ff]+", "", caption) + caption = re.sub(r"[\u3400-\u4dbf]+", "", caption) + caption = re.sub(r"[\u4dc0-\u4dff]+", "", caption) + caption = re.sub(r"[\u4e00-\u9fff]+", "", caption) + ####################################################### + + # ะฒัะต ะฒะธะดั‹ ั‚ะธั€ะต / all types of dash --> "-" + caption = re.sub( + r"[\u002D\u058A\u05BE\u1400\u1806\u2010-\u2015\u2E17\u2E1A\u2E3A\u2E3B\u2E40\u301C\u3030\u30A0\uFE31\uFE32\uFE58\uFE63\uFF0D]+", # noqa + "-", + caption, + ) + + # ะบะฐะฒั‹ั‡ะบะธ ะบ ะพะดะฝะพะผัƒ ัั‚ะฐะฝะดะฐั€ั‚ัƒ + caption = re.sub(r"[`ยดยซยปโ€œโ€ยจ]", '"', caption) + caption = re.sub(r"[โ€˜โ€™]", "'", caption) + + # " + caption = re.sub(r""?", "", caption) + # & + caption = re.sub(r"&", "", caption) + + # ip adresses: + caption = re.sub(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", " ", caption) + + # article ids: + caption = re.sub(r"\d:\d\d\s+$", "", caption) + + # \n + caption = re.sub(r"\\n", " ", caption) + + # "#123" + caption = re.sub(r"#\d{1,3}\b", "", caption) + # "#12345.." + caption = re.sub(r"#\d{5,}\b", "", caption) + # "123456.." + caption = re.sub(r"\b\d{6,}\b", "", caption) + # filenames: + caption = re.sub(r"[\S]+\.(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)", "", caption) + + # + caption = re.sub(r"[\"\']{2,}", r'"', caption) # """AUSVERKAUFT""" + caption = re.sub(r"[\.]{2,}", r" ", caption) # """AUSVERKAUFT""" + + caption = re.sub(self.bad_punct_regex, r" ", caption) # ***AUSVERKAUFT***, #AUSVERKAUFT + caption = re.sub(r"\s+\.\s+", r" ", caption) # " . " + + # this-is-my-cute-cat / this_is_my_cute_cat + regex2 = re.compile(r"(?:\-|\_)") + if len(re.findall(regex2, caption)) > 3: + caption = re.sub(regex2, " ", caption) + + caption = ftfy.fix_text(caption) + caption = html.unescape(html.unescape(caption)) + + caption = re.sub(r"\b[a-zA-Z]{1,3}\d{3,15}\b", "", caption) # jc6640 + caption = re.sub(r"\b[a-zA-Z]+\d+[a-zA-Z]+\b", "", caption) # jc6640vc + caption = re.sub(r"\b\d+[a-zA-Z]+\d+\b", "", caption) # 6640vc231 + + caption = re.sub(r"(worldwide\s+)?(free\s+)?shipping", "", caption) + caption = re.sub(r"(free\s)?download(\sfree)?", "", caption) + caption = re.sub(r"\bclick\b\s(?:for|on)\s\w+", "", caption) + caption = re.sub(r"\b(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)(\simage[s]?)?", "", caption) + caption = re.sub(r"\bpage\s+\d+\b", "", caption) + + caption = re.sub(r"\b\d*[a-zA-Z]+\d+[a-zA-Z]+\d+[a-zA-Z\d]*\b", r" ", caption) # j2d1a2a... + + caption = re.sub(r"\b\d+\.?\d*[xั…ร—]\d+\.?\d*\b", "", caption) + + caption = re.sub(r"\b\s+\:\s+", r": ", caption) + caption = re.sub(r"(\D[,\./])\b", r"\1 ", caption) + caption = re.sub(r"\s+", " ", caption) + + caption.strip() + + caption = re.sub(r"^[\"\']([\w\W]+)[\"\']$", r"\1", caption) + caption = re.sub(r"^[\'\_,\-\:;]", r"", caption) + caption = re.sub(r"[\'\_,\-\:\-\+]$", r"", caption) + caption = re.sub(r"^\.\S+$", "", caption) + + return caption.strip() + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = ( + batch_size, + num_channels_latents, + int(height) // self.vae_scale_factor, + int(width) // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + negative_prompt: str = "", + num_inference_steps: int = 20, + timesteps: List[int] = None, + sigmas: List[float] = None, + guidance_scale: float = 4.5, + num_images_per_prompt: Optional[int] = 1, + height: Optional[int] = None, + width: Optional[int] = None, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + prompt_attention_mask: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_attention_mask: Optional[torch.Tensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, + callback_steps: int = 1, + clean_caption: bool = True, + use_resolution_binning: bool = True, + max_sequence_length: int = 300, + **kwargs, + ) -> Union[ImagePipelineOutput, Tuple]: + """ + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument + in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is + passed will be used. Must be in descending order. + sigmas (`List[float]`, *optional*): + Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in + their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed + will be used. + guidance_scale (`float`, *optional*, defaults to 4.5): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + height (`int`, *optional*, defaults to self.unet.config.sample_size): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to self.unet.config.sample_size): + The width in pixels of the generated image. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to + [`schedulers.DDIMScheduler`], will be ignored for others. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + prompt_attention_mask (`torch.Tensor`, *optional*): Pre-generated attention mask for text embeddings. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. For PixArt-Sigma this negative prompt should be "". If not + provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. + negative_prompt_attention_mask (`torch.Tensor`, *optional*): + Pre-generated attention mask for negative text embeddings. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.IFPipelineOutput`] instead of a plain tuple. + callback (`Callable`, *optional*): + A function that will be called every `callback_steps` steps during inference. The function will be + called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function will be called. If not specified, the callback will be + called at every step. + clean_caption (`bool`, *optional*, defaults to `True`): + Whether or not to clean the caption before creating embeddings. Requires `beautifulsoup4` and `ftfy` to + be installed. If the dependencies are not installed, the embeddings will be created from the raw + prompt. + use_resolution_binning (`bool` defaults to `True`): + If set to `True`, the requested height and width are first mapped to the closest resolutions using + `ASPECT_RATIO_1024_BIN`. After the produced latents are decoded into images, they are resized back to + the requested resolution. Useful for generating non-square images. + max_sequence_length (`int` defaults to 300): Maximum sequence length to use with the `prompt`. + + Examples: + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is + returned where the first element is a list with the generated images + """ + # 1. Check inputs. Raise error if not correct + height = height or self.transformer.config.sample_size * self.vae_scale_factor + width = width or self.transformer.config.sample_size * self.vae_scale_factor + if use_resolution_binning: + if self.transformer.config.sample_size == 256: + aspect_ratio_bin = ASPECT_RATIO_2048_BIN + elif self.transformer.config.sample_size == 128: + aspect_ratio_bin = ASPECT_RATIO_1024_BIN + elif self.transformer.config.sample_size == 64: + aspect_ratio_bin = ASPECT_RATIO_512_BIN + elif self.transformer.config.sample_size == 32: + aspect_ratio_bin = ASPECT_RATIO_256_BIN + else: + raise ValueError("Invalid sample size") + orig_height, orig_width = height, width + height, width = self.image_processor.classify_height_width_bin(height, width, ratios=aspect_ratio_bin) + + self.check_inputs( + prompt, + height, + width, + negative_prompt, + callback_steps, + prompt_embeds, + negative_prompt_embeds, + prompt_attention_mask, + negative_prompt_attention_mask, + ) + + # 2. Default height and width to transformer + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Encode input prompt + ( + prompt_embeds, + prompt_attention_mask, + negative_prompt_embeds, + negative_prompt_attention_mask, + ) = self.encode_prompt( + prompt, + do_classifier_free_guidance, + negative_prompt=negative_prompt, + num_images_per_prompt=num_images_per_prompt, + device=device, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + prompt_attention_mask=prompt_attention_mask, + negative_prompt_attention_mask=negative_prompt_attention_mask, + clean_caption=clean_caption, + max_sequence_length=max_sequence_length, + ) + if do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) + prompt_attention_mask = torch.cat([negative_prompt_attention_mask, prompt_attention_mask], dim=0) + + # 4. Prepare timesteps + timesteps, num_inference_steps = retrieve_timesteps( + self.scheduler, num_inference_steps, device, timesteps, sigmas + ) + + # 5. Prepare latents. + latent_channels = self.transformer.config.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + latent_channels, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 6.1 Prepare micro-conditions. + added_cond_kwargs = {"resolution": None, "aspect_ratio": None} + + # 7. Denoising loop + num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) + + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + current_timestep = t + if not torch.is_tensor(current_timestep): + # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can + # This would be a good case for the `match` statement (Python 3.10+) + is_mps = latent_model_input.device.type == "mps" + if isinstance(current_timestep, float): + dtype = torch.float32 if is_mps else torch.float64 + else: + dtype = torch.int32 if is_mps else torch.int64 + current_timestep = torch.tensor([current_timestep], dtype=dtype, device=latent_model_input.device) + elif len(current_timestep.shape) == 0: + current_timestep = current_timestep[None].to(latent_model_input.device) + # broadcast to batch dimension in a way that's compatible with ONNX/Core ML + current_timestep = current_timestep.expand(latent_model_input.shape[0]) + + # predict noise model_output + noise_pred = self.transformer( + latent_model_input, + encoder_hidden_states=prompt_embeds, + encoder_attention_mask=prompt_attention_mask, + timestep=current_timestep, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # learned sigma + if self.transformer.config.out_channels // 2 == latent_channels: + noise_pred = noise_pred.chunk(2, dim=1)[0] + else: + noise_pred = noise_pred + + # compute previous image: x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + if use_resolution_binning: + image = self.image_processor.resize_and_crop_tensor(image, orig_width, orig_height) + else: + image = latents + + if not output_type == "latent": + image = self.image_processor.postprocess(image, output_type=output_type) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return ImagePipelineOutput(images=image) diff --git a/diffusers3/pipelines/semantic_stable_diffusion/__init__.py b/diffusers3/pipelines/semantic_stable_diffusion/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..70f5b1a547c4b90e28109843ae3be2fca2e98c88 --- /dev/null +++ b/diffusers3/pipelines/semantic_stable_diffusion/__init__.py @@ -0,0 +1,49 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["pipeline_output"] = ["SemanticStableDiffusionPipelineOutput"] + _import_structure["pipeline_semantic_stable_diffusion"] = ["SemanticStableDiffusionPipeline"] + + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * + else: + from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/diffusers3/pipelines/semantic_stable_diffusion/pipeline_output.py b/diffusers3/pipelines/semantic_stable_diffusion/pipeline_output.py new file mode 100644 index 0000000000000000000000000000000000000000..34991299398115f439537b77e1f1fc8a83e0d431 --- /dev/null +++ b/diffusers3/pipelines/semantic_stable_diffusion/pipeline_output.py @@ -0,0 +1,25 @@ +from dataclasses import dataclass +from typing import List, Optional, Union + +import numpy as np +import PIL.Image + +from ...utils import BaseOutput + + +@dataclass +class SemanticStableDiffusionPipelineOutput(BaseOutput): + """ + Output class for Stable Diffusion pipelines. + + Args: + images (`List[PIL.Image.Image]` or `np.ndarray`) + List of denoised PIL images of length `batch_size` or NumPy array of shape `(batch_size, height, width, + num_channels)`. + nsfw_content_detected (`List[bool]`) + List indicating whether the corresponding generated image contains โ€œnot-safe-for-workโ€ (nsfw) content or + `None` if safety checking could not be performed. + """ + + images: Union[List[PIL.Image.Image], np.ndarray] + nsfw_content_detected: Optional[List[bool]] diff --git a/diffusers3/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py b/diffusers3/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py new file mode 100644 index 0000000000000000000000000000000000000000..6f83071f3e85edb7c1e1eaf61d46086b239e6651 --- /dev/null +++ b/diffusers3/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py @@ -0,0 +1,722 @@ +import inspect +from itertools import repeat +from typing import Callable, List, Optional, Union + +import torch +from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer + +from ...image_processor import VaeImageProcessor +from ...models import AutoencoderKL, UNet2DConditionModel +from ...pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import deprecate, logging +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from .pipeline_output import SemanticStableDiffusionPipelineOutput + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +class SemanticStableDiffusionPipeline(DiffusionPipeline, StableDiffusionMixin): + r""" + Pipeline for text-to-image generation using Stable Diffusion with latent editing. + + This model inherits from [`DiffusionPipeline`] and builds on the [`StableDiffusionPipeline`]. Check the superclass + documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular + device, etc.). + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + safety_checker ([`Q16SafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details + about a model's potential harms. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. + """ + + model_cpu_offload_seq = "text_encoder->unet->vae" + _optional_components = ["safety_checker", "feature_extractor"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: UNet2DConditionModel, + scheduler: KarrasDiffusionSchedulers, + safety_checker: StableDiffusionSafetyChecker, + feature_extractor: CLIPImageProcessor, + requires_safety_checker: bool = True, + ): + super().__init__() + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is None: + has_nsfw_concept = None + else: + if torch.is_tensor(image): + feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") + else: + feature_extractor_input = self.image_processor.numpy_to_pil(image) + safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) + image, has_nsfw_concept = self.safety_checker( + images=image, clip_input=safety_checker_input.pixel_values.to(dtype) + ) + return image, has_nsfw_concept + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents + def decode_latents(self, latents): + deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" + deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) + + latents = 1 / self.vae.config.scaling_factor * latents + image = self.vae.decode(latents, return_dict=False)[0] + image = (image / 2 + 0.5).clamp(0, 1) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + return image + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + # Copied from diffusers.pipelines.stable_diffusion_k_diffusion.pipeline_stable_diffusion_k_diffusion.StableDiffusionKDiffusionPipeline.check_inputs + def check_inputs( + self, + prompt, + height, + width, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + callback_on_step_end_tensor_inputs=None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = ( + batch_size, + num_channels_latents, + int(height) // self.vae_scale_factor, + int(width) // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + @torch.no_grad() + def __call__( + self, + prompt: Union[str, List[str]], + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + guidance_scale: float = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: int = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, + callback_steps: int = 1, + editing_prompt: Optional[Union[str, List[str]]] = None, + editing_prompt_embeddings: Optional[torch.Tensor] = None, + reverse_editing_direction: Optional[Union[bool, List[bool]]] = False, + edit_guidance_scale: Optional[Union[float, List[float]]] = 5, + edit_warmup_steps: Optional[Union[int, List[int]]] = 10, + edit_cooldown_steps: Optional[Union[int, List[int]]] = None, + edit_threshold: Optional[Union[float, List[float]]] = 0.9, + edit_momentum_scale: Optional[float] = 0.1, + edit_mom_beta: Optional[float] = 0.4, + edit_weights: Optional[List[float]] = None, + sem_guidance: Optional[List[torch.Tensor]] = None, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`): + The prompt or prompts to guide image generation. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + editing_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to use for semantic guidance. Semantic guidance is disabled by setting + `editing_prompt = None`. Guidance direction of prompt should be specified via + `reverse_editing_direction`. + editing_prompt_embeddings (`torch.Tensor`, *optional*): + Pre-computed embeddings to use for semantic guidance. Guidance direction of embedding should be + specified via `reverse_editing_direction`. + reverse_editing_direction (`bool` or `List[bool]`, *optional*, defaults to `False`): + Whether the corresponding prompt in `editing_prompt` should be increased or decreased. + edit_guidance_scale (`float` or `List[float]`, *optional*, defaults to 5): + Guidance scale for semantic guidance. If provided as a list, values should correspond to + `editing_prompt`. + edit_warmup_steps (`float` or `List[float]`, *optional*, defaults to 10): + Number of diffusion steps (for each prompt) for which semantic guidance is not applied. Momentum is + calculated for those steps and applied once all warmup periods are over. + edit_cooldown_steps (`float` or `List[float]`, *optional*, defaults to `None`): + Number of diffusion steps (for each prompt) after which semantic guidance is longer applied. + edit_threshold (`float` or `List[float]`, *optional*, defaults to 0.9): + Threshold of semantic guidance. + edit_momentum_scale (`float`, *optional*, defaults to 0.1): + Scale of the momentum to be added to the semantic guidance at each diffusion step. If set to 0.0, + momentum is disabled. Momentum is already built up during warmup (for diffusion steps smaller than + `sld_warmup_steps`). Momentum is only added to latent guidance once all warmup periods are finished. + edit_mom_beta (`float`, *optional*, defaults to 0.4): + Defines how semantic guidance momentum builds up. `edit_mom_beta` indicates how much of the previous + momentum is kept. Momentum is already built up during warmup (for diffusion steps smaller than + `edit_warmup_steps`). + edit_weights (`List[float]`, *optional*, defaults to `None`): + Indicates how much each individual concept should influence the overall guidance. If no weights are + provided all concepts are applied equally. + sem_guidance (`List[torch.Tensor]`, *optional*): + List of pre-generated guidance vectors to be applied at generation. Length of the list has to + correspond to `num_inference_steps`. + + Examples: + + ```py + >>> import torch + >>> from diffusers import SemanticStableDiffusionPipeline + + >>> pipe = SemanticStableDiffusionPipeline.from_pretrained( + ... "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16 + ... ) + >>> pipe = pipe.to("cuda") + + >>> out = pipe( + ... prompt="a photo of the face of a woman", + ... num_images_per_prompt=1, + ... guidance_scale=7, + ... editing_prompt=[ + ... "smiling, smile", # Concepts to apply + ... "glasses, wearing glasses", + ... "curls, wavy hair, curly hair", + ... "beard, full beard, mustache", + ... ], + ... reverse_editing_direction=[ + ... False, + ... False, + ... False, + ... False, + ... ], # Direction of guidance i.e. increase all concepts + ... edit_warmup_steps=[10, 10, 10, 10], # Warmup period for each concept + ... edit_guidance_scale=[4, 5, 5, 5.4], # Guidance scale for each concept + ... edit_threshold=[ + ... 0.99, + ... 0.975, + ... 0.925, + ... 0.96, + ... ], # Threshold for each concept. Threshold equals the percentile of the latent space that will be discarded. I.e. threshold=0.99 uses 1% of the latent dimensions + ... edit_momentum_scale=0.3, # Momentum scale that will be added to the latent guidance + ... edit_mom_beta=0.6, # Momentum beta + ... edit_weights=[1, 1, 1, 1, 1], # Weights of the individual concepts against each other + ... ) + >>> image = out.images[0] + ``` + + Returns: + [`~pipelines.semantic_stable_diffusion.SemanticStableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, + [`~pipelines.semantic_stable_diffusion.SemanticStableDiffusionPipelineOutput`] is returned, otherwise a + `tuple` is returned where the first element is a list with the generated images and the second element + is a list of `bool`s indicating whether the corresponding generated image contains "not-safe-for-work" + (nsfw) content. + """ + # 0. Default height and width to unet + height = height or self.unet.config.sample_size * self.vae_scale_factor + width = width or self.unet.config.sample_size * self.vae_scale_factor + + # 1. Check inputs. Raise error if not correct + self.check_inputs(prompt, height, width, callback_steps) + + # 2. Define call parameters + batch_size = 1 if isinstance(prompt, str) else len(prompt) + device = self._execution_device + + if editing_prompt: + enable_edit_guidance = True + if isinstance(editing_prompt, str): + editing_prompt = [editing_prompt] + enabled_editing_prompts = len(editing_prompt) + elif editing_prompt_embeddings is not None: + enable_edit_guidance = True + enabled_editing_prompts = editing_prompt_embeddings.shape[0] + else: + enabled_editing_prompts = 0 + enable_edit_guidance = False + + # get prompt text embeddings + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + + if text_input_ids.shape[-1] > self.tokenizer.model_max_length: + removed_text = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length] + text_embeddings = self.text_encoder(text_input_ids.to(device))[0] + + # duplicate text embeddings for each generation per prompt, using mps friendly method + bs_embed, seq_len, _ = text_embeddings.shape + text_embeddings = text_embeddings.repeat(1, num_images_per_prompt, 1) + text_embeddings = text_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1) + + if enable_edit_guidance: + # get safety text embeddings + if editing_prompt_embeddings is None: + edit_concepts_input = self.tokenizer( + [x for item in editing_prompt for x in repeat(item, batch_size)], + padding="max_length", + max_length=self.tokenizer.model_max_length, + return_tensors="pt", + ) + + edit_concepts_input_ids = edit_concepts_input.input_ids + + if edit_concepts_input_ids.shape[-1] > self.tokenizer.model_max_length: + removed_text = self.tokenizer.batch_decode( + edit_concepts_input_ids[:, self.tokenizer.model_max_length :] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + edit_concepts_input_ids = edit_concepts_input_ids[:, : self.tokenizer.model_max_length] + edit_concepts = self.text_encoder(edit_concepts_input_ids.to(device))[0] + else: + edit_concepts = editing_prompt_embeddings.to(device).repeat(batch_size, 1, 1) + + # duplicate text embeddings for each generation per prompt, using mps friendly method + bs_embed_edit, seq_len_edit, _ = edit_concepts.shape + edit_concepts = edit_concepts.repeat(1, num_images_per_prompt, 1) + edit_concepts = edit_concepts.view(bs_embed_edit * num_images_per_prompt, seq_len_edit, -1) + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + # get unconditional embeddings for classifier free guidance + + if do_classifier_free_guidance: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + max_length = text_input_ids.shape[-1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(device))[0] + + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = uncond_embeddings.shape[1] + uncond_embeddings = uncond_embeddings.repeat(1, num_images_per_prompt, 1) + uncond_embeddings = uncond_embeddings.view(batch_size * num_images_per_prompt, seq_len, -1) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + if enable_edit_guidance: + text_embeddings = torch.cat([uncond_embeddings, text_embeddings, edit_concepts]) + else: + text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) + # get the initial random noise unless the user supplied it + + # 4. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # 5. Prepare latent variables + num_channels_latents = self.unet.config.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + text_embeddings.dtype, + device, + generator, + latents, + ) + + # 6. Prepare extra step kwargs. + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # Initialize edit_momentum to None + edit_momentum = None + + self.uncond_estimates = None + self.text_estimates = None + self.edit_estimates = None + self.sem_guidance = None + + for i, t in enumerate(self.progress_bar(timesteps)): + # expand the latents if we are doing classifier free guidance + latent_model_input = ( + torch.cat([latents] * (2 + enabled_editing_prompts)) if do_classifier_free_guidance else latents + ) + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample + + # perform guidance + if do_classifier_free_guidance: + noise_pred_out = noise_pred.chunk(2 + enabled_editing_prompts) # [b,4, 64, 64] + noise_pred_uncond, noise_pred_text = noise_pred_out[0], noise_pred_out[1] + noise_pred_edit_concepts = noise_pred_out[2:] + + # default text guidance + noise_guidance = guidance_scale * (noise_pred_text - noise_pred_uncond) + # noise_guidance = (noise_pred_text - noise_pred_edit_concepts[0]) + + if self.uncond_estimates is None: + self.uncond_estimates = torch.zeros((num_inference_steps + 1, *noise_pred_uncond.shape)) + self.uncond_estimates[i] = noise_pred_uncond.detach().cpu() + + if self.text_estimates is None: + self.text_estimates = torch.zeros((num_inference_steps + 1, *noise_pred_text.shape)) + self.text_estimates[i] = noise_pred_text.detach().cpu() + + if self.edit_estimates is None and enable_edit_guidance: + self.edit_estimates = torch.zeros( + (num_inference_steps + 1, len(noise_pred_edit_concepts), *noise_pred_edit_concepts[0].shape) + ) + + if self.sem_guidance is None: + self.sem_guidance = torch.zeros((num_inference_steps + 1, *noise_pred_text.shape)) + + if edit_momentum is None: + edit_momentum = torch.zeros_like(noise_guidance) + + if enable_edit_guidance: + concept_weights = torch.zeros( + (len(noise_pred_edit_concepts), noise_guidance.shape[0]), + device=device, + dtype=noise_guidance.dtype, + ) + noise_guidance_edit = torch.zeros( + (len(noise_pred_edit_concepts), *noise_guidance.shape), + device=device, + dtype=noise_guidance.dtype, + ) + # noise_guidance_edit = torch.zeros_like(noise_guidance) + warmup_inds = [] + for c, noise_pred_edit_concept in enumerate(noise_pred_edit_concepts): + self.edit_estimates[i, c] = noise_pred_edit_concept + if isinstance(edit_guidance_scale, list): + edit_guidance_scale_c = edit_guidance_scale[c] + else: + edit_guidance_scale_c = edit_guidance_scale + + if isinstance(edit_threshold, list): + edit_threshold_c = edit_threshold[c] + else: + edit_threshold_c = edit_threshold + if isinstance(reverse_editing_direction, list): + reverse_editing_direction_c = reverse_editing_direction[c] + else: + reverse_editing_direction_c = reverse_editing_direction + if edit_weights: + edit_weight_c = edit_weights[c] + else: + edit_weight_c = 1.0 + if isinstance(edit_warmup_steps, list): + edit_warmup_steps_c = edit_warmup_steps[c] + else: + edit_warmup_steps_c = edit_warmup_steps + + if isinstance(edit_cooldown_steps, list): + edit_cooldown_steps_c = edit_cooldown_steps[c] + elif edit_cooldown_steps is None: + edit_cooldown_steps_c = i + 1 + else: + edit_cooldown_steps_c = edit_cooldown_steps + if i >= edit_warmup_steps_c: + warmup_inds.append(c) + if i >= edit_cooldown_steps_c: + noise_guidance_edit[c, :, :, :, :] = torch.zeros_like(noise_pred_edit_concept) + continue + + noise_guidance_edit_tmp = noise_pred_edit_concept - noise_pred_uncond + # tmp_weights = (noise_pred_text - noise_pred_edit_concept).sum(dim=(1, 2, 3)) + tmp_weights = (noise_guidance - noise_pred_edit_concept).sum(dim=(1, 2, 3)) + + tmp_weights = torch.full_like(tmp_weights, edit_weight_c) # * (1 / enabled_editing_prompts) + if reverse_editing_direction_c: + noise_guidance_edit_tmp = noise_guidance_edit_tmp * -1 + concept_weights[c, :] = tmp_weights + + noise_guidance_edit_tmp = noise_guidance_edit_tmp * edit_guidance_scale_c + + # torch.quantile function expects float32 + if noise_guidance_edit_tmp.dtype == torch.float32: + tmp = torch.quantile( + torch.abs(noise_guidance_edit_tmp).flatten(start_dim=2), + edit_threshold_c, + dim=2, + keepdim=False, + ) + else: + tmp = torch.quantile( + torch.abs(noise_guidance_edit_tmp).flatten(start_dim=2).to(torch.float32), + edit_threshold_c, + dim=2, + keepdim=False, + ).to(noise_guidance_edit_tmp.dtype) + + noise_guidance_edit_tmp = torch.where( + torch.abs(noise_guidance_edit_tmp) >= tmp[:, :, None, None], + noise_guidance_edit_tmp, + torch.zeros_like(noise_guidance_edit_tmp), + ) + noise_guidance_edit[c, :, :, :, :] = noise_guidance_edit_tmp + + # noise_guidance_edit = noise_guidance_edit + noise_guidance_edit_tmp + + warmup_inds = torch.tensor(warmup_inds).to(device) + if len(noise_pred_edit_concepts) > warmup_inds.shape[0] > 0: + concept_weights = concept_weights.to("cpu") # Offload to cpu + noise_guidance_edit = noise_guidance_edit.to("cpu") + + concept_weights_tmp = torch.index_select(concept_weights.to(device), 0, warmup_inds) + concept_weights_tmp = torch.where( + concept_weights_tmp < 0, torch.zeros_like(concept_weights_tmp), concept_weights_tmp + ) + concept_weights_tmp = concept_weights_tmp / concept_weights_tmp.sum(dim=0) + # concept_weights_tmp = torch.nan_to_num(concept_weights_tmp) + + noise_guidance_edit_tmp = torch.index_select(noise_guidance_edit.to(device), 0, warmup_inds) + noise_guidance_edit_tmp = torch.einsum( + "cb,cbijk->bijk", concept_weights_tmp, noise_guidance_edit_tmp + ) + noise_guidance = noise_guidance + noise_guidance_edit_tmp + + self.sem_guidance[i] = noise_guidance_edit_tmp.detach().cpu() + + del noise_guidance_edit_tmp + del concept_weights_tmp + concept_weights = concept_weights.to(device) + noise_guidance_edit = noise_guidance_edit.to(device) + + concept_weights = torch.where( + concept_weights < 0, torch.zeros_like(concept_weights), concept_weights + ) + + concept_weights = torch.nan_to_num(concept_weights) + + noise_guidance_edit = torch.einsum("cb,cbijk->bijk", concept_weights, noise_guidance_edit) + noise_guidance_edit = noise_guidance_edit.to(edit_momentum.device) + + noise_guidance_edit = noise_guidance_edit + edit_momentum_scale * edit_momentum + + edit_momentum = edit_mom_beta * edit_momentum + (1 - edit_mom_beta) * noise_guidance_edit + + if warmup_inds.shape[0] == len(noise_pred_edit_concepts): + noise_guidance = noise_guidance + noise_guidance_edit + self.sem_guidance[i] = noise_guidance_edit.detach().cpu() + + if sem_guidance is not None: + edit_guidance = sem_guidance[i].to(device) + noise_guidance = noise_guidance + edit_guidance + + noise_pred = noise_pred_uncond + noise_guidance + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample + + # call the callback, if provided + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + # 8. Post-processing + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + image, has_nsfw_concept = self.run_safety_checker(image, device, text_embeddings.dtype) + else: + image = latents + has_nsfw_concept = None + + if has_nsfw_concept is None: + do_denormalize = [True] * image.shape[0] + else: + do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] + + image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) + + if not return_dict: + return (image, has_nsfw_concept) + + return SemanticStableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) diff --git a/diffusers3/pipelines/shap_e/__init__.py b/diffusers3/pipelines/shap_e/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4ed563c4a51f6e627c06711b60fe3a0709ff22f7 --- /dev/null +++ b/diffusers3/pipelines/shap_e/__init__.py @@ -0,0 +1,71 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["camera"] = ["create_pan_cameras"] + _import_structure["pipeline_shap_e"] = ["ShapEPipeline"] + _import_structure["pipeline_shap_e_img2img"] = ["ShapEImg2ImgPipeline"] + _import_structure["renderer"] = [ + "BoundingBoxVolume", + "ImportanceRaySampler", + "MLPNeRFModelOutput", + "MLPNeRSTFModel", + "ShapEParamsProjModel", + "ShapERenderer", + "StratifiedRaySampler", + "VoidNeRFModel", + ] + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * + else: + from .camera import create_pan_cameras + from .pipeline_shap_e import ShapEPipeline + from .pipeline_shap_e_img2img import ShapEImg2ImgPipeline + from .renderer import ( + BoundingBoxVolume, + ImportanceRaySampler, + MLPNeRFModelOutput, + MLPNeRSTFModel, + ShapEParamsProjModel, + ShapERenderer, + StratifiedRaySampler, + VoidNeRFModel, + ) + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/diffusers3/pipelines/shap_e/camera.py b/diffusers3/pipelines/shap_e/camera.py new file mode 100644 index 0000000000000000000000000000000000000000..d4b94c3000d845dedf1dc3e45fa217b9071c5f38 --- /dev/null +++ b/diffusers3/pipelines/shap_e/camera.py @@ -0,0 +1,147 @@ +# Copyright 2024 Open AI and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass +from typing import Tuple + +import numpy as np +import torch + + +@dataclass +class DifferentiableProjectiveCamera: + """ + Implements a batch, differentiable, standard pinhole camera + """ + + origin: torch.Tensor # [batch_size x 3] + x: torch.Tensor # [batch_size x 3] + y: torch.Tensor # [batch_size x 3] + z: torch.Tensor # [batch_size x 3] + width: int + height: int + x_fov: float + y_fov: float + shape: Tuple[int] + + def __post_init__(self): + assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0] + assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3 + assert len(self.x.shape) == len(self.y.shape) == len(self.z.shape) == len(self.origin.shape) == 2 + + def resolution(self): + return torch.from_numpy(np.array([self.width, self.height], dtype=np.float32)) + + def fov(self): + return torch.from_numpy(np.array([self.x_fov, self.y_fov], dtype=np.float32)) + + def get_image_coords(self) -> torch.Tensor: + """ + :return: coords of shape (width * height, 2) + """ + pixel_indices = torch.arange(self.height * self.width) + coords = torch.stack( + [ + pixel_indices % self.width, + torch.div(pixel_indices, self.width, rounding_mode="trunc"), + ], + axis=1, + ) + return coords + + @property + def camera_rays(self): + batch_size, *inner_shape = self.shape + inner_batch_size = int(np.prod(inner_shape)) + + coords = self.get_image_coords() + coords = torch.broadcast_to(coords.unsqueeze(0), [batch_size * inner_batch_size, *coords.shape]) + rays = self.get_camera_rays(coords) + + rays = rays.view(batch_size, inner_batch_size * self.height * self.width, 2, 3) + + return rays + + def get_camera_rays(self, coords: torch.Tensor) -> torch.Tensor: + batch_size, *shape, n_coords = coords.shape + assert n_coords == 2 + assert batch_size == self.origin.shape[0] + + flat = coords.view(batch_size, -1, 2) + + res = self.resolution() + fov = self.fov() + + fracs = (flat.float() / (res - 1)) * 2 - 1 + fracs = fracs * torch.tan(fov / 2) + + fracs = fracs.view(batch_size, -1, 2) + directions = ( + self.z.view(batch_size, 1, 3) + + self.x.view(batch_size, 1, 3) * fracs[:, :, :1] + + self.y.view(batch_size, 1, 3) * fracs[:, :, 1:] + ) + directions = directions / directions.norm(dim=-1, keepdim=True) + rays = torch.stack( + [ + torch.broadcast_to(self.origin.view(batch_size, 1, 3), [batch_size, directions.shape[1], 3]), + directions, + ], + dim=2, + ) + return rays.view(batch_size, *shape, 2, 3) + + def resize_image(self, width: int, height: int) -> "DifferentiableProjectiveCamera": + """ + Creates a new camera for the resized view assuming the aspect ratio does not change. + """ + assert width * self.height == height * self.width, "The aspect ratio should not change." + return DifferentiableProjectiveCamera( + origin=self.origin, + x=self.x, + y=self.y, + z=self.z, + width=width, + height=height, + x_fov=self.x_fov, + y_fov=self.y_fov, + ) + + +def create_pan_cameras(size: int) -> DifferentiableProjectiveCamera: + origins = [] + xs = [] + ys = [] + zs = [] + for theta in np.linspace(0, 2 * np.pi, num=20): + z = np.array([np.sin(theta), np.cos(theta), -0.5]) + z /= np.sqrt(np.sum(z**2)) + origin = -z * 4 + x = np.array([np.cos(theta), -np.sin(theta), 0.0]) + y = np.cross(z, x) + origins.append(origin) + xs.append(x) + ys.append(y) + zs.append(z) + return DifferentiableProjectiveCamera( + origin=torch.from_numpy(np.stack(origins, axis=0)).float(), + x=torch.from_numpy(np.stack(xs, axis=0)).float(), + y=torch.from_numpy(np.stack(ys, axis=0)).float(), + z=torch.from_numpy(np.stack(zs, axis=0)).float(), + width=size, + height=size, + x_fov=0.7, + y_fov=0.7, + shape=(1, len(xs)), + ) diff --git a/diffusers3/pipelines/shap_e/pipeline_shap_e.py b/diffusers3/pipelines/shap_e/pipeline_shap_e.py new file mode 100644 index 0000000000000000000000000000000000000000..f87f28e06c4ab9c490f42207b9441398857d3614 --- /dev/null +++ b/diffusers3/pipelines/shap_e/pipeline_shap_e.py @@ -0,0 +1,334 @@ +# Copyright 2024 Open AI and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +from dataclasses import dataclass +from typing import List, Optional, Union + +import numpy as np +import PIL.Image +import torch +from transformers import CLIPTextModelWithProjection, CLIPTokenizer + +from ...models import PriorTransformer +from ...schedulers import HeunDiscreteScheduler +from ...utils import ( + BaseOutput, + logging, + replace_example_docstring, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline +from .renderer import ShapERenderer + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import DiffusionPipeline + >>> from diffusers.utils import export_to_gif + + >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + + >>> repo = "openai/shap-e" + >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16) + >>> pipe = pipe.to(device) + + >>> guidance_scale = 15.0 + >>> prompt = "a shark" + + >>> images = pipe( + ... prompt, + ... guidance_scale=guidance_scale, + ... num_inference_steps=64, + ... frame_size=256, + ... ).images + + >>> gif_path = export_to_gif(images[0], "shark_3d.gif") + ``` +""" + + +@dataclass +class ShapEPipelineOutput(BaseOutput): + """ + Output class for [`ShapEPipeline`] and [`ShapEImg2ImgPipeline`]. + + Args: + images (`torch.Tensor`) + A list of images for 3D rendering. + """ + + images: Union[List[List[PIL.Image.Image]], List[List[np.ndarray]]] + + +class ShapEPipeline(DiffusionPipeline): + """ + Pipeline for generating latent representation of a 3D asset and rendering with the NeRF method. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + Args: + prior ([`PriorTransformer`]): + The canonical unCLIP prior to approximate the image embedding from the text embedding. + text_encoder ([`~transformers.CLIPTextModelWithProjection`]): + Frozen text-encoder. + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + scheduler ([`HeunDiscreteScheduler`]): + A scheduler to be used in combination with the `prior` model to generate image embedding. + shap_e_renderer ([`ShapERenderer`]): + Shap-E renderer projects the generated latents into parameters of a MLP to create 3D objects with the NeRF + rendering method. + """ + + model_cpu_offload_seq = "text_encoder->prior" + _exclude_from_cpu_offload = ["shap_e_renderer"] + + def __init__( + self, + prior: PriorTransformer, + text_encoder: CLIPTextModelWithProjection, + tokenizer: CLIPTokenizer, + scheduler: HeunDiscreteScheduler, + shap_e_renderer: ShapERenderer, + ): + super().__init__() + + self.register_modules( + prior=prior, + text_encoder=text_encoder, + tokenizer=tokenizer, + scheduler=scheduler, + shap_e_renderer=shap_e_renderer, + ) + + # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents + def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + if latents.shape != shape: + raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") + latents = latents.to(device) + + latents = latents * scheduler.init_noise_sigma + return latents + + def _encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + ): + len(prompt) if isinstance(prompt, list) else 1 + + # YiYi Notes: set pad_token_id to be 0, not sure why I can't set in the config file + self.tokenizer.pad_token_id = 0 + # get prompt text embeddings + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): + removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + text_encoder_output = self.text_encoder(text_input_ids.to(device)) + prompt_embeds = text_encoder_output.text_embeds + + prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0) + # in Shap-E it normalize the prompt_embeds and then later rescale it + prompt_embeds = prompt_embeds / torch.linalg.norm(prompt_embeds, dim=-1, keepdim=True) + + if do_classifier_free_guidance: + negative_prompt_embeds = torch.zeros_like(prompt_embeds) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + # Rescale the features to have unit variance + prompt_embeds = math.sqrt(prompt_embeds.shape[1]) * prompt_embeds + + return prompt_embeds + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: str, + num_images_per_prompt: int = 1, + num_inference_steps: int = 25, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + guidance_scale: float = 4.0, + frame_size: int = 64, + output_type: Optional[str] = "pil", # pil, np, latent, mesh + return_dict: bool = True, + ): + """ + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`): + The prompt or prompts to guide the image generation. + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + num_inference_steps (`int`, *optional*, defaults to 25): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + guidance_scale (`float`, *optional*, defaults to 4.0): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + frame_size (`int`, *optional*, default to 64): + The width and height of each image frame of the generated 3D output. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `"pil"` (`PIL.Image.Image`), `"np"` + (`np.array`), `"latent"` (`torch.Tensor`), or mesh ([`MeshDecoderOutput`]). + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.shap_e.pipeline_shap_e.ShapEPipelineOutput`] instead of a plain + tuple. + + Examples: + + Returns: + [`~pipelines.shap_e.pipeline_shap_e.ShapEPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.shap_e.pipeline_shap_e.ShapEPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images. + """ + + if isinstance(prompt, str): + batch_size = 1 + elif isinstance(prompt, list): + batch_size = len(prompt) + else: + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + device = self._execution_device + + batch_size = batch_size * num_images_per_prompt + + do_classifier_free_guidance = guidance_scale > 1.0 + prompt_embeds = self._encode_prompt(prompt, device, num_images_per_prompt, do_classifier_free_guidance) + + # prior + + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + num_embeddings = self.prior.config.num_embeddings + embedding_dim = self.prior.config.embedding_dim + + latents = self.prepare_latents( + (batch_size, num_embeddings * embedding_dim), + prompt_embeds.dtype, + device, + generator, + latents, + self.scheduler, + ) + + # YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim + latents = latents.reshape(latents.shape[0], num_embeddings, embedding_dim) + + for i, t in enumerate(self.progress_bar(timesteps)): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + scaled_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + noise_pred = self.prior( + scaled_model_input, + timestep=t, + proj_embedding=prompt_embeds, + ).predicted_image_embedding + + # remove the variance + noise_pred, _ = noise_pred.split( + scaled_model_input.shape[2], dim=2 + ) # batch_size, num_embeddings, embedding_dim + + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond) + + latents = self.scheduler.step( + noise_pred, + timestep=t, + sample=latents, + ).prev_sample + + # Offload all models + self.maybe_free_model_hooks() + + if output_type not in ["np", "pil", "latent", "mesh"]: + raise ValueError( + f"Only the output types `pil`, `np`, `latent` and `mesh` are supported not output_type={output_type}" + ) + + if output_type == "latent": + return ShapEPipelineOutput(images=latents) + + images = [] + if output_type == "mesh": + for i, latent in enumerate(latents): + mesh = self.shap_e_renderer.decode_to_mesh( + latent[None, :], + device, + ) + images.append(mesh) + + else: + # np, pil + for i, latent in enumerate(latents): + image = self.shap_e_renderer.decode_to_image( + latent[None, :], + device, + size=frame_size, + ) + images.append(image) + + images = torch.stack(images) + + images = images.cpu().numpy() + + if output_type == "pil": + images = [self.numpy_to_pil(image) for image in images] + + if not return_dict: + return (images,) + + return ShapEPipelineOutput(images=images) diff --git a/diffusers3/pipelines/shap_e/pipeline_shap_e_img2img.py b/diffusers3/pipelines/shap_e/pipeline_shap_e_img2img.py new file mode 100644 index 0000000000000000000000000000000000000000..7cc145e4c3e294c785c1787dca3811c549f0fedd --- /dev/null +++ b/diffusers3/pipelines/shap_e/pipeline_shap_e_img2img.py @@ -0,0 +1,321 @@ +# Copyright 2024 Open AI and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass +from typing import List, Optional, Union + +import numpy as np +import PIL.Image +import torch +from transformers import CLIPImageProcessor, CLIPVisionModel + +from ...models import PriorTransformer +from ...schedulers import HeunDiscreteScheduler +from ...utils import ( + BaseOutput, + logging, + replace_example_docstring, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline +from .renderer import ShapERenderer + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> from PIL import Image + >>> import torch + >>> from diffusers import DiffusionPipeline + >>> from diffusers.utils import export_to_gif, load_image + + >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + + >>> repo = "openai/shap-e-img2img" + >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16) + >>> pipe = pipe.to(device) + + >>> guidance_scale = 3.0 + >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png" + >>> image = load_image(image_url).convert("RGB") + + >>> images = pipe( + ... image, + ... guidance_scale=guidance_scale, + ... num_inference_steps=64, + ... frame_size=256, + ... ).images + + >>> gif_path = export_to_gif(images[0], "corgi_3d.gif") + ``` +""" + + +@dataclass +class ShapEPipelineOutput(BaseOutput): + """ + Output class for [`ShapEPipeline`] and [`ShapEImg2ImgPipeline`]. + + Args: + images (`torch.Tensor`) + A list of images for 3D rendering. + """ + + images: Union[PIL.Image.Image, np.ndarray] + + +class ShapEImg2ImgPipeline(DiffusionPipeline): + """ + Pipeline for generating latent representation of a 3D asset and rendering with the NeRF method from an image. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + Args: + prior ([`PriorTransformer`]): + The canonical unCLIP prior to approximate the image embedding from the text embedding. + image_encoder ([`~transformers.CLIPVisionModel`]): + Frozen image-encoder. + image_processor ([`~transformers.CLIPImageProcessor`]): + A `CLIPImageProcessor` to process images. + scheduler ([`HeunDiscreteScheduler`]): + A scheduler to be used in combination with the `prior` model to generate image embedding. + shap_e_renderer ([`ShapERenderer`]): + Shap-E renderer projects the generated latents into parameters of a MLP to create 3D objects with the NeRF + rendering method. + """ + + model_cpu_offload_seq = "image_encoder->prior" + _exclude_from_cpu_offload = ["shap_e_renderer"] + + def __init__( + self, + prior: PriorTransformer, + image_encoder: CLIPVisionModel, + image_processor: CLIPImageProcessor, + scheduler: HeunDiscreteScheduler, + shap_e_renderer: ShapERenderer, + ): + super().__init__() + + self.register_modules( + prior=prior, + image_encoder=image_encoder, + image_processor=image_processor, + scheduler=scheduler, + shap_e_renderer=shap_e_renderer, + ) + + # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents + def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + if latents.shape != shape: + raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") + latents = latents.to(device) + + latents = latents * scheduler.init_noise_sigma + return latents + + def _encode_image( + self, + image, + device, + num_images_per_prompt, + do_classifier_free_guidance, + ): + if isinstance(image, List) and isinstance(image[0], torch.Tensor): + image = torch.cat(image, axis=0) if image[0].ndim == 4 else torch.stack(image, axis=0) + + if not isinstance(image, torch.Tensor): + image = self.image_processor(image, return_tensors="pt").pixel_values[0].unsqueeze(0) + + image = image.to(dtype=self.image_encoder.dtype, device=device) + + image_embeds = self.image_encoder(image)["last_hidden_state"] + image_embeds = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256 + + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + + if do_classifier_free_guidance: + negative_image_embeds = torch.zeros_like(image_embeds) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + image_embeds = torch.cat([negative_image_embeds, image_embeds]) + + return image_embeds + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + image: Union[PIL.Image.Image, List[PIL.Image.Image]], + num_images_per_prompt: int = 1, + num_inference_steps: int = 25, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + guidance_scale: float = 4.0, + frame_size: int = 64, + output_type: Optional[str] = "pil", # pil, np, latent, mesh + return_dict: bool = True, + ): + """ + The call function to the pipeline for generation. + + Args: + image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): + `Image` or tensor representing an image batch to be used as the starting point. Can also accept image + latents as image, but if passing latents directly it is not encoded again. + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + num_inference_steps (`int`, *optional*, defaults to 25): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + guidance_scale (`float`, *optional*, defaults to 4.0): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + frame_size (`int`, *optional*, default to 64): + The width and height of each image frame of the generated 3D output. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `"pil"` (`PIL.Image.Image`), `"np"` + (`np.array`), `"latent"` (`torch.Tensor`), or mesh ([`MeshDecoderOutput`]). + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.shap_e.pipeline_shap_e.ShapEPipelineOutput`] instead of a plain + tuple. + + Examples: + + Returns: + [`~pipelines.shap_e.pipeline_shap_e.ShapEPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.shap_e.pipeline_shap_e.ShapEPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images. + """ + + if isinstance(image, PIL.Image.Image): + batch_size = 1 + elif isinstance(image, torch.Tensor): + batch_size = image.shape[0] + elif isinstance(image, list) and isinstance(image[0], (torch.Tensor, PIL.Image.Image)): + batch_size = len(image) + else: + raise ValueError( + f"`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(image)}" + ) + + device = self._execution_device + + batch_size = batch_size * num_images_per_prompt + + do_classifier_free_guidance = guidance_scale > 1.0 + image_embeds = self._encode_image(image, device, num_images_per_prompt, do_classifier_free_guidance) + + # prior + + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + num_embeddings = self.prior.config.num_embeddings + embedding_dim = self.prior.config.embedding_dim + if latents is None: + latents = self.prepare_latents( + (batch_size, num_embeddings * embedding_dim), + image_embeds.dtype, + device, + generator, + latents, + self.scheduler, + ) + + # YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim + latents = latents.reshape(latents.shape[0], num_embeddings, embedding_dim) + + for i, t in enumerate(self.progress_bar(timesteps)): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + scaled_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + noise_pred = self.prior( + scaled_model_input, + timestep=t, + proj_embedding=image_embeds, + ).predicted_image_embedding + + # remove the variance + noise_pred, _ = noise_pred.split( + scaled_model_input.shape[2], dim=2 + ) # batch_size, num_embeddings, embedding_dim + + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond) + + latents = self.scheduler.step( + noise_pred, + timestep=t, + sample=latents, + ).prev_sample + + if output_type not in ["np", "pil", "latent", "mesh"]: + raise ValueError( + f"Only the output types `pil`, `np`, `latent` and `mesh` are supported not output_type={output_type}" + ) + + # Offload all models + self.maybe_free_model_hooks() + + if output_type == "latent": + return ShapEPipelineOutput(images=latents) + + images = [] + if output_type == "mesh": + for i, latent in enumerate(latents): + mesh = self.shap_e_renderer.decode_to_mesh( + latent[None, :], + device, + ) + images.append(mesh) + + else: + # np, pil + for i, latent in enumerate(latents): + image = self.shap_e_renderer.decode_to_image( + latent[None, :], + device, + size=frame_size, + ) + images.append(image) + + images = torch.stack(images) + + images = images.cpu().numpy() + + if output_type == "pil": + images = [self.numpy_to_pil(image) for image in images] + + if not return_dict: + return (images,) + + return ShapEPipelineOutput(images=images) diff --git a/diffusers3/pipelines/shap_e/renderer.py b/diffusers3/pipelines/shap_e/renderer.py new file mode 100644 index 0000000000000000000000000000000000000000..9d9f9d9b2ab1e923a79c72f21f2d0c5dd7456515 --- /dev/null +++ b/diffusers3/pipelines/shap_e/renderer.py @@ -0,0 +1,1050 @@ +# Copyright 2024 Open AI and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +from dataclasses import dataclass +from typing import Dict, Optional, Tuple + +import numpy as np +import torch +import torch.nn.functional as F +from torch import nn + +from ...configuration_utils import ConfigMixin, register_to_config +from ...models import ModelMixin +from ...utils import BaseOutput +from .camera import create_pan_cameras + + +def sample_pmf(pmf: torch.Tensor, n_samples: int) -> torch.Tensor: + r""" + Sample from the given discrete probability distribution with replacement. + + The i-th bin is assumed to have mass pmf[i]. + + Args: + pmf: [batch_size, *shape, n_samples, 1] where (pmf.sum(dim=-2) == 1).all() + n_samples: number of samples + + Return: + indices sampled with replacement + """ + + *shape, support_size, last_dim = pmf.shape + assert last_dim == 1 + + cdf = torch.cumsum(pmf.view(-1, support_size), dim=1) + inds = torch.searchsorted(cdf, torch.rand(cdf.shape[0], n_samples, device=cdf.device)) + + return inds.view(*shape, n_samples, 1).clamp(0, support_size - 1) + + +def posenc_nerf(x: torch.Tensor, min_deg: int = 0, max_deg: int = 15) -> torch.Tensor: + """ + Concatenate x and its positional encodings, following NeRF. + + Reference: https://arxiv.org/pdf/2210.04628.pdf + """ + if min_deg == max_deg: + return x + + scales = 2.0 ** torch.arange(min_deg, max_deg, dtype=x.dtype, device=x.device) + *shape, dim = x.shape + xb = (x.reshape(-1, 1, dim) * scales.view(1, -1, 1)).reshape(*shape, -1) + assert xb.shape[-1] == dim * (max_deg - min_deg) + emb = torch.cat([xb, xb + math.pi / 2.0], axis=-1).sin() + return torch.cat([x, emb], dim=-1) + + +def encode_position(position): + return posenc_nerf(position, min_deg=0, max_deg=15) + + +def encode_direction(position, direction=None): + if direction is None: + return torch.zeros_like(posenc_nerf(position, min_deg=0, max_deg=8)) + else: + return posenc_nerf(direction, min_deg=0, max_deg=8) + + +def _sanitize_name(x: str) -> str: + return x.replace(".", "__") + + +def integrate_samples(volume_range, ts, density, channels): + r""" + Function integrating the model output. + + Args: + volume_range: Specifies the integral range [t0, t1] + ts: timesteps + density: torch.Tensor [batch_size, *shape, n_samples, 1] + channels: torch.Tensor [batch_size, *shape, n_samples, n_channels] + returns: + channels: integrated rgb output weights: torch.Tensor [batch_size, *shape, n_samples, 1] (density + *transmittance)[i] weight for each rgb output at [..., i, :]. transmittance: transmittance of this volume + ) + """ + + # 1. Calculate the weights + _, _, dt = volume_range.partition(ts) + ddensity = density * dt + + mass = torch.cumsum(ddensity, dim=-2) + transmittance = torch.exp(-mass[..., -1, :]) + + alphas = 1.0 - torch.exp(-ddensity) + Ts = torch.exp(torch.cat([torch.zeros_like(mass[..., :1, :]), -mass[..., :-1, :]], dim=-2)) + # This is the probability of light hitting and reflecting off of + # something at depth [..., i, :]. + weights = alphas * Ts + + # 2. Integrate channels + channels = torch.sum(channels * weights, dim=-2) + + return channels, weights, transmittance + + +def volume_query_points(volume, grid_size): + indices = torch.arange(grid_size**3, device=volume.bbox_min.device) + zs = indices % grid_size + ys = torch.div(indices, grid_size, rounding_mode="trunc") % grid_size + xs = torch.div(indices, grid_size**2, rounding_mode="trunc") % grid_size + combined = torch.stack([xs, ys, zs], dim=1) + return (combined.float() / (grid_size - 1)) * (volume.bbox_max - volume.bbox_min) + volume.bbox_min + + +def _convert_srgb_to_linear(u: torch.Tensor): + return torch.where(u <= 0.04045, u / 12.92, ((u + 0.055) / 1.055) ** 2.4) + + +def _create_flat_edge_indices( + flat_cube_indices: torch.Tensor, + grid_size: Tuple[int, int, int], +): + num_xs = (grid_size[0] - 1) * grid_size[1] * grid_size[2] + y_offset = num_xs + num_ys = grid_size[0] * (grid_size[1] - 1) * grid_size[2] + z_offset = num_xs + num_ys + return torch.stack( + [ + # Edges spanning x-axis. + flat_cube_indices[:, 0] * grid_size[1] * grid_size[2] + + flat_cube_indices[:, 1] * grid_size[2] + + flat_cube_indices[:, 2], + flat_cube_indices[:, 0] * grid_size[1] * grid_size[2] + + (flat_cube_indices[:, 1] + 1) * grid_size[2] + + flat_cube_indices[:, 2], + flat_cube_indices[:, 0] * grid_size[1] * grid_size[2] + + flat_cube_indices[:, 1] * grid_size[2] + + flat_cube_indices[:, 2] + + 1, + flat_cube_indices[:, 0] * grid_size[1] * grid_size[2] + + (flat_cube_indices[:, 1] + 1) * grid_size[2] + + flat_cube_indices[:, 2] + + 1, + # Edges spanning y-axis. + ( + y_offset + + flat_cube_indices[:, 0] * (grid_size[1] - 1) * grid_size[2] + + flat_cube_indices[:, 1] * grid_size[2] + + flat_cube_indices[:, 2] + ), + ( + y_offset + + (flat_cube_indices[:, 0] + 1) * (grid_size[1] - 1) * grid_size[2] + + flat_cube_indices[:, 1] * grid_size[2] + + flat_cube_indices[:, 2] + ), + ( + y_offset + + flat_cube_indices[:, 0] * (grid_size[1] - 1) * grid_size[2] + + flat_cube_indices[:, 1] * grid_size[2] + + flat_cube_indices[:, 2] + + 1 + ), + ( + y_offset + + (flat_cube_indices[:, 0] + 1) * (grid_size[1] - 1) * grid_size[2] + + flat_cube_indices[:, 1] * grid_size[2] + + flat_cube_indices[:, 2] + + 1 + ), + # Edges spanning z-axis. + ( + z_offset + + flat_cube_indices[:, 0] * grid_size[1] * (grid_size[2] - 1) + + flat_cube_indices[:, 1] * (grid_size[2] - 1) + + flat_cube_indices[:, 2] + ), + ( + z_offset + + (flat_cube_indices[:, 0] + 1) * grid_size[1] * (grid_size[2] - 1) + + flat_cube_indices[:, 1] * (grid_size[2] - 1) + + flat_cube_indices[:, 2] + ), + ( + z_offset + + flat_cube_indices[:, 0] * grid_size[1] * (grid_size[2] - 1) + + (flat_cube_indices[:, 1] + 1) * (grid_size[2] - 1) + + flat_cube_indices[:, 2] + ), + ( + z_offset + + (flat_cube_indices[:, 0] + 1) * grid_size[1] * (grid_size[2] - 1) + + (flat_cube_indices[:, 1] + 1) * (grid_size[2] - 1) + + flat_cube_indices[:, 2] + ), + ], + dim=-1, + ) + + +class VoidNeRFModel(nn.Module): + """ + Implements the default empty space model where all queries are rendered as background. + """ + + def __init__(self, background, channel_scale=255.0): + super().__init__() + background = nn.Parameter(torch.from_numpy(np.array(background)).to(dtype=torch.float32) / channel_scale) + + self.register_buffer("background", background) + + def forward(self, position): + background = self.background[None].to(position.device) + + shape = position.shape[:-1] + ones = [1] * (len(shape) - 1) + n_channels = background.shape[-1] + background = torch.broadcast_to(background.view(background.shape[0], *ones, n_channels), [*shape, n_channels]) + + return background + + +@dataclass +class VolumeRange: + t0: torch.Tensor + t1: torch.Tensor + intersected: torch.Tensor + + def __post_init__(self): + assert self.t0.shape == self.t1.shape == self.intersected.shape + + def partition(self, ts): + """ + Partitions t0 and t1 into n_samples intervals. + + Args: + ts: [batch_size, *shape, n_samples, 1] + + Return: + + lower: [batch_size, *shape, n_samples, 1] upper: [batch_size, *shape, n_samples, 1] delta: [batch_size, + *shape, n_samples, 1] + + where + ts \\in [lower, upper] deltas = upper - lower + """ + + mids = (ts[..., 1:, :] + ts[..., :-1, :]) * 0.5 + lower = torch.cat([self.t0[..., None, :], mids], dim=-2) + upper = torch.cat([mids, self.t1[..., None, :]], dim=-2) + delta = upper - lower + assert lower.shape == upper.shape == delta.shape == ts.shape + return lower, upper, delta + + +class BoundingBoxVolume(nn.Module): + """ + Axis-aligned bounding box defined by the two opposite corners. + """ + + def __init__( + self, + *, + bbox_min, + bbox_max, + min_dist: float = 0.0, + min_t_range: float = 1e-3, + ): + """ + Args: + bbox_min: the left/bottommost corner of the bounding box + bbox_max: the other corner of the bounding box + min_dist: all rays should start at least this distance away from the origin. + """ + super().__init__() + + self.min_dist = min_dist + self.min_t_range = min_t_range + + self.bbox_min = torch.tensor(bbox_min) + self.bbox_max = torch.tensor(bbox_max) + self.bbox = torch.stack([self.bbox_min, self.bbox_max]) + assert self.bbox.shape == (2, 3) + assert min_dist >= 0.0 + assert min_t_range > 0.0 + + def intersect( + self, + origin: torch.Tensor, + direction: torch.Tensor, + t0_lower: Optional[torch.Tensor] = None, + epsilon=1e-6, + ): + """ + Args: + origin: [batch_size, *shape, 3] + direction: [batch_size, *shape, 3] + t0_lower: Optional [batch_size, *shape, 1] lower bound of t0 when intersecting this volume. + params: Optional meta parameters in case Volume is parametric + epsilon: to stabilize calculations + + Return: + A tuple of (t0, t1, intersected) where each has a shape [batch_size, *shape, 1]. If a ray intersects with + the volume, `o + td` is in the volume for all t in [t0, t1]. If the volume is bounded, t1 is guaranteed to + be on the boundary of the volume. + """ + + batch_size, *shape, _ = origin.shape + ones = [1] * len(shape) + bbox = self.bbox.view(1, *ones, 2, 3).to(origin.device) + + def _safe_divide(a, b, epsilon=1e-6): + return a / torch.where(b < 0, b - epsilon, b + epsilon) + + ts = _safe_divide(bbox - origin[..., None, :], direction[..., None, :], epsilon=epsilon) + + # Cases to think about: + # + # 1. t1 <= t0: the ray does not pass through the AABB. + # 2. t0 < t1 <= 0: the ray intersects but the BB is behind the origin. + # 3. t0 <= 0 <= t1: the ray starts from inside the BB + # 4. 0 <= t0 < t1: the ray is not inside and intersects with the BB twice. + # + # 1 and 4 are clearly handled from t0 < t1 below. + # Making t0 at least min_dist (>= 0) takes care of 2 and 3. + t0 = ts.min(dim=-2).values.max(dim=-1, keepdim=True).values.clamp(self.min_dist) + t1 = ts.max(dim=-2).values.min(dim=-1, keepdim=True).values + assert t0.shape == t1.shape == (batch_size, *shape, 1) + if t0_lower is not None: + assert t0.shape == t0_lower.shape + t0 = torch.maximum(t0, t0_lower) + + intersected = t0 + self.min_t_range < t1 + t0 = torch.where(intersected, t0, torch.zeros_like(t0)) + t1 = torch.where(intersected, t1, torch.ones_like(t1)) + + return VolumeRange(t0=t0, t1=t1, intersected=intersected) + + +class StratifiedRaySampler(nn.Module): + """ + Instead of fixed intervals, a sample is drawn uniformly at random from each interval. + """ + + def __init__(self, depth_mode: str = "linear"): + """ + :param depth_mode: linear samples ts linearly in depth. harmonic ensures + closer points are sampled more densely. + """ + self.depth_mode = depth_mode + assert self.depth_mode in ("linear", "geometric", "harmonic") + + def sample( + self, + t0: torch.Tensor, + t1: torch.Tensor, + n_samples: int, + epsilon: float = 1e-3, + ) -> torch.Tensor: + """ + Args: + t0: start time has shape [batch_size, *shape, 1] + t1: finish time has shape [batch_size, *shape, 1] + n_samples: number of ts to sample + Return: + sampled ts of shape [batch_size, *shape, n_samples, 1] + """ + ones = [1] * (len(t0.shape) - 1) + ts = torch.linspace(0, 1, n_samples).view(*ones, n_samples).to(t0.dtype).to(t0.device) + + if self.depth_mode == "linear": + ts = t0 * (1.0 - ts) + t1 * ts + elif self.depth_mode == "geometric": + ts = (t0.clamp(epsilon).log() * (1.0 - ts) + t1.clamp(epsilon).log() * ts).exp() + elif self.depth_mode == "harmonic": + # The original NeRF recommends this interpolation scheme for + # spherical scenes, but there could be some weird edge cases when + # the observer crosses from the inner to outer volume. + ts = 1.0 / (1.0 / t0.clamp(epsilon) * (1.0 - ts) + 1.0 / t1.clamp(epsilon) * ts) + + mids = 0.5 * (ts[..., 1:] + ts[..., :-1]) + upper = torch.cat([mids, t1], dim=-1) + lower = torch.cat([t0, mids], dim=-1) + # yiyi notes: add a random seed here for testing, don't forget to remove + torch.manual_seed(0) + t_rand = torch.rand_like(ts) + + ts = lower + (upper - lower) * t_rand + return ts.unsqueeze(-1) + + +class ImportanceRaySampler(nn.Module): + """ + Given the initial estimate of densities, this samples more from regions/bins expected to have objects. + """ + + def __init__( + self, + volume_range: VolumeRange, + ts: torch.Tensor, + weights: torch.Tensor, + blur_pool: bool = False, + alpha: float = 1e-5, + ): + """ + Args: + volume_range: the range in which a ray intersects the given volume. + ts: earlier samples from the coarse rendering step + weights: discretized version of density * transmittance + blur_pool: if true, use 2-tap max + 2-tap blur filter from mip-NeRF. + alpha: small value to add to weights. + """ + self.volume_range = volume_range + self.ts = ts.clone().detach() + self.weights = weights.clone().detach() + self.blur_pool = blur_pool + self.alpha = alpha + + @torch.no_grad() + def sample(self, t0: torch.Tensor, t1: torch.Tensor, n_samples: int) -> torch.Tensor: + """ + Args: + t0: start time has shape [batch_size, *shape, 1] + t1: finish time has shape [batch_size, *shape, 1] + n_samples: number of ts to sample + Return: + sampled ts of shape [batch_size, *shape, n_samples, 1] + """ + lower, upper, _ = self.volume_range.partition(self.ts) + + batch_size, *shape, n_coarse_samples, _ = self.ts.shape + + weights = self.weights + if self.blur_pool: + padded = torch.cat([weights[..., :1, :], weights, weights[..., -1:, :]], dim=-2) + maxes = torch.maximum(padded[..., :-1, :], padded[..., 1:, :]) + weights = 0.5 * (maxes[..., :-1, :] + maxes[..., 1:, :]) + weights = weights + self.alpha + pmf = weights / weights.sum(dim=-2, keepdim=True) + inds = sample_pmf(pmf, n_samples) + assert inds.shape == (batch_size, *shape, n_samples, 1) + assert (inds >= 0).all() and (inds < n_coarse_samples).all() + + t_rand = torch.rand(inds.shape, device=inds.device) + lower_ = torch.gather(lower, -2, inds) + upper_ = torch.gather(upper, -2, inds) + + ts = lower_ + (upper_ - lower_) * t_rand + ts = torch.sort(ts, dim=-2).values + return ts + + +@dataclass +class MeshDecoderOutput(BaseOutput): + """ + A 3D triangle mesh with optional data at the vertices and faces. + + Args: + verts (`torch.Tensor` of shape `(N, 3)`): + array of vertext coordinates + faces (`torch.Tensor` of shape `(N, 3)`): + array of triangles, pointing to indices in verts. + vertext_channels (Dict): + vertext coordinates for each color channel + """ + + verts: torch.Tensor + faces: torch.Tensor + vertex_channels: Dict[str, torch.Tensor] + + +class MeshDecoder(nn.Module): + """ + Construct meshes from Signed distance functions (SDFs) using marching cubes method + """ + + def __init__(self): + super().__init__() + cases = torch.zeros(256, 5, 3, dtype=torch.long) + masks = torch.zeros(256, 5, dtype=torch.bool) + + self.register_buffer("cases", cases) + self.register_buffer("masks", masks) + + def forward(self, field: torch.Tensor, min_point: torch.Tensor, size: torch.Tensor): + """ + For a signed distance field, produce a mesh using marching cubes. + + :param field: a 3D tensor of field values, where negative values correspond + to the outside of the shape. The dimensions correspond to the x, y, and z directions, respectively. + :param min_point: a tensor of shape [3] containing the point corresponding + to (0, 0, 0) in the field. + :param size: a tensor of shape [3] containing the per-axis distance from the + (0, 0, 0) field corner and the (-1, -1, -1) field corner. + """ + assert len(field.shape) == 3, "input must be a 3D scalar field" + dev = field.device + + cases = self.cases.to(dev) + masks = self.masks.to(dev) + + min_point = min_point.to(dev) + size = size.to(dev) + + grid_size = field.shape + grid_size_tensor = torch.tensor(grid_size).to(size) + + # Create bitmasks between 0 and 255 (inclusive) indicating the state + # of the eight corners of each cube. + bitmasks = (field > 0).to(torch.uint8) + bitmasks = bitmasks[:-1, :, :] | (bitmasks[1:, :, :] << 1) + bitmasks = bitmasks[:, :-1, :] | (bitmasks[:, 1:, :] << 2) + bitmasks = bitmasks[:, :, :-1] | (bitmasks[:, :, 1:] << 4) + + # Compute corner coordinates across the entire grid. + corner_coords = torch.empty(*grid_size, 3, device=dev, dtype=field.dtype) + corner_coords[range(grid_size[0]), :, :, 0] = torch.arange(grid_size[0], device=dev, dtype=field.dtype)[ + :, None, None + ] + corner_coords[:, range(grid_size[1]), :, 1] = torch.arange(grid_size[1], device=dev, dtype=field.dtype)[ + :, None + ] + corner_coords[:, :, range(grid_size[2]), 2] = torch.arange(grid_size[2], device=dev, dtype=field.dtype) + + # Compute all vertices across all edges in the grid, even though we will + # throw some out later. We have (X-1)*Y*Z + X*(Y-1)*Z + X*Y*(Z-1) vertices. + # These are all midpoints, and don't account for interpolation (which is + # done later based on the used edge midpoints). + edge_midpoints = torch.cat( + [ + ((corner_coords[:-1] + corner_coords[1:]) / 2).reshape(-1, 3), + ((corner_coords[:, :-1] + corner_coords[:, 1:]) / 2).reshape(-1, 3), + ((corner_coords[:, :, :-1] + corner_coords[:, :, 1:]) / 2).reshape(-1, 3), + ], + dim=0, + ) + + # Create a flat array of [X, Y, Z] indices for each cube. + cube_indices = torch.zeros( + grid_size[0] - 1, grid_size[1] - 1, grid_size[2] - 1, 3, device=dev, dtype=torch.long + ) + cube_indices[range(grid_size[0] - 1), :, :, 0] = torch.arange(grid_size[0] - 1, device=dev)[:, None, None] + cube_indices[:, range(grid_size[1] - 1), :, 1] = torch.arange(grid_size[1] - 1, device=dev)[:, None] + cube_indices[:, :, range(grid_size[2] - 1), 2] = torch.arange(grid_size[2] - 1, device=dev) + flat_cube_indices = cube_indices.reshape(-1, 3) + + # Create a flat array mapping each cube to 12 global edge indices. + edge_indices = _create_flat_edge_indices(flat_cube_indices, grid_size) + + # Apply the LUT to figure out the triangles. + flat_bitmasks = bitmasks.reshape(-1).long() # must cast to long for indexing to believe this not a mask + local_tris = cases[flat_bitmasks] + local_masks = masks[flat_bitmasks] + # Compute the global edge indices for the triangles. + global_tris = torch.gather(edge_indices, 1, local_tris.reshape(local_tris.shape[0], -1)).reshape( + local_tris.shape + ) + # Select the used triangles for each cube. + selected_tris = global_tris.reshape(-1, 3)[local_masks.reshape(-1)] + + # Now we have a bunch of indices into the full list of possible vertices, + # but we want to reduce this list to only the used vertices. + used_vertex_indices = torch.unique(selected_tris.view(-1)) + used_edge_midpoints = edge_midpoints[used_vertex_indices] + old_index_to_new_index = torch.zeros(len(edge_midpoints), device=dev, dtype=torch.long) + old_index_to_new_index[used_vertex_indices] = torch.arange( + len(used_vertex_indices), device=dev, dtype=torch.long + ) + + # Rewrite the triangles to use the new indices + faces = torch.gather(old_index_to_new_index, 0, selected_tris.view(-1)).reshape(selected_tris.shape) + + # Compute the actual interpolated coordinates corresponding to edge midpoints. + v1 = torch.floor(used_edge_midpoints).to(torch.long) + v2 = torch.ceil(used_edge_midpoints).to(torch.long) + s1 = field[v1[:, 0], v1[:, 1], v1[:, 2]] + s2 = field[v2[:, 0], v2[:, 1], v2[:, 2]] + p1 = (v1.float() / (grid_size_tensor - 1)) * size + min_point + p2 = (v2.float() / (grid_size_tensor - 1)) * size + min_point + # The signs of s1 and s2 should be different. We want to find + # t such that t*s2 + (1-t)*s1 = 0. + t = (s1 / (s1 - s2))[:, None] + verts = t * p2 + (1 - t) * p1 + + return MeshDecoderOutput(verts=verts, faces=faces, vertex_channels=None) + + +@dataclass +class MLPNeRFModelOutput(BaseOutput): + density: torch.Tensor + signed_distance: torch.Tensor + channels: torch.Tensor + ts: torch.Tensor + + +class MLPNeRSTFModel(ModelMixin, ConfigMixin): + @register_to_config + def __init__( + self, + d_hidden: int = 256, + n_output: int = 12, + n_hidden_layers: int = 6, + act_fn: str = "swish", + insert_direction_at: int = 4, + ): + super().__init__() + + # Instantiate the MLP + + # Find out the dimension of encoded position and direction + dummy = torch.eye(1, 3) + d_posenc_pos = encode_position(position=dummy).shape[-1] + d_posenc_dir = encode_direction(position=dummy).shape[-1] + + mlp_widths = [d_hidden] * n_hidden_layers + input_widths = [d_posenc_pos] + mlp_widths + output_widths = mlp_widths + [n_output] + + if insert_direction_at is not None: + input_widths[insert_direction_at] += d_posenc_dir + + self.mlp = nn.ModuleList([nn.Linear(d_in, d_out) for d_in, d_out in zip(input_widths, output_widths)]) + + if act_fn == "swish": + # self.activation = swish + # yiyi testing: + self.activation = lambda x: F.silu(x) + else: + raise ValueError(f"Unsupported activation function {act_fn}") + + self.sdf_activation = torch.tanh + self.density_activation = torch.nn.functional.relu + self.channel_activation = torch.sigmoid + + def map_indices_to_keys(self, output): + h_map = { + "sdf": (0, 1), + "density_coarse": (1, 2), + "density_fine": (2, 3), + "stf": (3, 6), + "nerf_coarse": (6, 9), + "nerf_fine": (9, 12), + } + + mapped_output = {k: output[..., start:end] for k, (start, end) in h_map.items()} + + return mapped_output + + def forward(self, *, position, direction, ts, nerf_level="coarse", rendering_mode="nerf"): + h = encode_position(position) + + h_preact = h + h_directionless = None + for i, layer in enumerate(self.mlp): + if i == self.config.insert_direction_at: # 4 in the config + h_directionless = h_preact + h_direction = encode_direction(position, direction=direction) + h = torch.cat([h, h_direction], dim=-1) + + h = layer(h) + + h_preact = h + + if i < len(self.mlp) - 1: + h = self.activation(h) + + h_final = h + if h_directionless is None: + h_directionless = h_preact + + activation = self.map_indices_to_keys(h_final) + + if nerf_level == "coarse": + h_density = activation["density_coarse"] + else: + h_density = activation["density_fine"] + + if rendering_mode == "nerf": + if nerf_level == "coarse": + h_channels = activation["nerf_coarse"] + else: + h_channels = activation["nerf_fine"] + + elif rendering_mode == "stf": + h_channels = activation["stf"] + + density = self.density_activation(h_density) + signed_distance = self.sdf_activation(activation["sdf"]) + channels = self.channel_activation(h_channels) + + # yiyi notes: I think signed_distance is not used + return MLPNeRFModelOutput(density=density, signed_distance=signed_distance, channels=channels, ts=ts) + + +class ChannelsProj(nn.Module): + def __init__( + self, + *, + vectors: int, + channels: int, + d_latent: int, + ): + super().__init__() + self.proj = nn.Linear(d_latent, vectors * channels) + self.norm = nn.LayerNorm(channels) + self.d_latent = d_latent + self.vectors = vectors + self.channels = channels + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x_bvd = x + w_vcd = self.proj.weight.view(self.vectors, self.channels, self.d_latent) + b_vc = self.proj.bias.view(1, self.vectors, self.channels) + h = torch.einsum("bvd,vcd->bvc", x_bvd, w_vcd) + h = self.norm(h) + + h = h + b_vc + return h + + +class ShapEParamsProjModel(ModelMixin, ConfigMixin): + """ + project the latent representation of a 3D asset to obtain weights of a multi-layer perceptron (MLP). + + For more details, see the original paper: + """ + + @register_to_config + def __init__( + self, + *, + param_names: Tuple[str] = ( + "nerstf.mlp.0.weight", + "nerstf.mlp.1.weight", + "nerstf.mlp.2.weight", + "nerstf.mlp.3.weight", + ), + param_shapes: Tuple[Tuple[int]] = ( + (256, 93), + (256, 256), + (256, 256), + (256, 256), + ), + d_latent: int = 1024, + ): + super().__init__() + + # check inputs + if len(param_names) != len(param_shapes): + raise ValueError("Must provide same number of `param_names` as `param_shapes`") + self.projections = nn.ModuleDict({}) + for k, (vectors, channels) in zip(param_names, param_shapes): + self.projections[_sanitize_name(k)] = ChannelsProj( + vectors=vectors, + channels=channels, + d_latent=d_latent, + ) + + def forward(self, x: torch.Tensor): + out = {} + start = 0 + for k, shape in zip(self.config.param_names, self.config.param_shapes): + vectors, _ = shape + end = start + vectors + x_bvd = x[:, start:end] + out[k] = self.projections[_sanitize_name(k)](x_bvd).reshape(len(x), *shape) + start = end + return out + + +class ShapERenderer(ModelMixin, ConfigMixin): + @register_to_config + def __init__( + self, + *, + param_names: Tuple[str] = ( + "nerstf.mlp.0.weight", + "nerstf.mlp.1.weight", + "nerstf.mlp.2.weight", + "nerstf.mlp.3.weight", + ), + param_shapes: Tuple[Tuple[int]] = ( + (256, 93), + (256, 256), + (256, 256), + (256, 256), + ), + d_latent: int = 1024, + d_hidden: int = 256, + n_output: int = 12, + n_hidden_layers: int = 6, + act_fn: str = "swish", + insert_direction_at: int = 4, + background: Tuple[float] = ( + 255.0, + 255.0, + 255.0, + ), + ): + super().__init__() + + self.params_proj = ShapEParamsProjModel( + param_names=param_names, + param_shapes=param_shapes, + d_latent=d_latent, + ) + self.mlp = MLPNeRSTFModel(d_hidden, n_output, n_hidden_layers, act_fn, insert_direction_at) + self.void = VoidNeRFModel(background=background, channel_scale=255.0) + self.volume = BoundingBoxVolume(bbox_max=[1.0, 1.0, 1.0], bbox_min=[-1.0, -1.0, -1.0]) + self.mesh_decoder = MeshDecoder() + + @torch.no_grad() + def render_rays(self, rays, sampler, n_samples, prev_model_out=None, render_with_direction=False): + """ + Perform volumetric rendering over a partition of possible t's in the union of rendering volumes (written below + with some abuse of notations) + + C(r) := sum( + transmittance(t[i]) * integrate( + lambda t: density(t) * channels(t) * transmittance(t), [t[i], t[i + 1]], + ) for i in range(len(parts)) + ) + transmittance(t[-1]) * void_model(t[-1]).channels + + where + + 1) transmittance(s) := exp(-integrate(density, [t[0], s])) calculates the probability of light passing through + the volume specified by [t[0], s]. (transmittance of 1 means light can pass freely) 2) density and channels are + obtained by evaluating the appropriate part.model at time t. 3) [t[i], t[i + 1]] is defined as the range of t + where the ray intersects (parts[i].volume \\ union(part.volume for part in parts[:i])) at the surface of the + shell (if bounded). If the ray does not intersect, the integral over this segment is evaluated as 0 and + transmittance(t[i + 1]) := transmittance(t[i]). 4) The last term is integration to infinity (e.g. [t[-1], + math.inf]) that is evaluated by the void_model (i.e. we consider this space to be empty). + + Args: + rays: [batch_size x ... x 2 x 3] origin and direction. sampler: disjoint volume integrals. n_samples: + number of ts to sample. prev_model_outputs: model outputs from the previous rendering step, including + + :return: A tuple of + - `channels` + - A importance samplers for additional fine-grained rendering + - raw model output + """ + origin, direction = rays[..., 0, :], rays[..., 1, :] + + # Integrate over [t[i], t[i + 1]] + + # 1 Intersect the rays with the current volume and sample ts to integrate along. + vrange = self.volume.intersect(origin, direction, t0_lower=None) + ts = sampler.sample(vrange.t0, vrange.t1, n_samples) + ts = ts.to(rays.dtype) + + if prev_model_out is not None: + # Append the previous ts now before fprop because previous + # rendering used a different model and we can't reuse the output. + ts = torch.sort(torch.cat([ts, prev_model_out.ts], dim=-2), dim=-2).values + + batch_size, *_shape, _t0_dim = vrange.t0.shape + _, *ts_shape, _ts_dim = ts.shape + + # 2. Get the points along the ray and query the model + directions = torch.broadcast_to(direction.unsqueeze(-2), [batch_size, *ts_shape, 3]) + positions = origin.unsqueeze(-2) + ts * directions + + directions = directions.to(self.mlp.dtype) + positions = positions.to(self.mlp.dtype) + + optional_directions = directions if render_with_direction else None + + model_out = self.mlp( + position=positions, + direction=optional_directions, + ts=ts, + nerf_level="coarse" if prev_model_out is None else "fine", + ) + + # 3. Integrate the model results + channels, weights, transmittance = integrate_samples( + vrange, model_out.ts, model_out.density, model_out.channels + ) + + # 4. Clean up results that do not intersect with the volume. + transmittance = torch.where(vrange.intersected, transmittance, torch.ones_like(transmittance)) + channels = torch.where(vrange.intersected, channels, torch.zeros_like(channels)) + # 5. integration to infinity (e.g. [t[-1], math.inf]) that is evaluated by the void_model (i.e. we consider this space to be empty). + channels = channels + transmittance * self.void(origin) + + weighted_sampler = ImportanceRaySampler(vrange, ts=model_out.ts, weights=weights) + + return channels, weighted_sampler, model_out + + @torch.no_grad() + def decode_to_image( + self, + latents, + device, + size: int = 64, + ray_batch_size: int = 4096, + n_coarse_samples=64, + n_fine_samples=128, + ): + # project the parameters from the generated latents + projected_params = self.params_proj(latents) + + # update the mlp layers of the renderer + for name, param in self.mlp.state_dict().items(): + if f"nerstf.{name}" in projected_params.keys(): + param.copy_(projected_params[f"nerstf.{name}"].squeeze(0)) + + # create cameras object + camera = create_pan_cameras(size) + rays = camera.camera_rays + rays = rays.to(device) + n_batches = rays.shape[1] // ray_batch_size + + coarse_sampler = StratifiedRaySampler() + + images = [] + + for idx in range(n_batches): + rays_batch = rays[:, idx * ray_batch_size : (idx + 1) * ray_batch_size] + + # render rays with coarse, stratified samples. + _, fine_sampler, coarse_model_out = self.render_rays(rays_batch, coarse_sampler, n_coarse_samples) + # Then, render with additional importance-weighted ray samples. + channels, _, _ = self.render_rays( + rays_batch, fine_sampler, n_fine_samples, prev_model_out=coarse_model_out + ) + + images.append(channels) + + images = torch.cat(images, dim=1) + images = images.view(*camera.shape, camera.height, camera.width, -1).squeeze(0) + + return images + + @torch.no_grad() + def decode_to_mesh( + self, + latents, + device, + grid_size: int = 128, + query_batch_size: int = 4096, + texture_channels: Tuple = ("R", "G", "B"), + ): + # 1. project the parameters from the generated latents + projected_params = self.params_proj(latents) + + # 2. update the mlp layers of the renderer + for name, param in self.mlp.state_dict().items(): + if f"nerstf.{name}" in projected_params.keys(): + param.copy_(projected_params[f"nerstf.{name}"].squeeze(0)) + + # 3. decoding with STF rendering + # 3.1 query the SDF values at vertices along a regular 128**3 grid + + query_points = volume_query_points(self.volume, grid_size) + query_positions = query_points[None].repeat(1, 1, 1).to(device=device, dtype=self.mlp.dtype) + + fields = [] + + for idx in range(0, query_positions.shape[1], query_batch_size): + query_batch = query_positions[:, idx : idx + query_batch_size] + + model_out = self.mlp( + position=query_batch, direction=None, ts=None, nerf_level="fine", rendering_mode="stf" + ) + fields.append(model_out.signed_distance) + + # predicted SDF values + fields = torch.cat(fields, dim=1) + fields = fields.float() + + assert ( + len(fields.shape) == 3 and fields.shape[-1] == 1 + ), f"expected [meta_batch x inner_batch] SDF results, but got {fields.shape}" + + fields = fields.reshape(1, *([grid_size] * 3)) + + # create grid 128 x 128 x 128 + # - force a negative border around the SDFs to close off all the models. + full_grid = torch.zeros( + 1, + grid_size + 2, + grid_size + 2, + grid_size + 2, + device=fields.device, + dtype=fields.dtype, + ) + full_grid.fill_(-1.0) + full_grid[:, 1:-1, 1:-1, 1:-1] = fields + fields = full_grid + + # apply a differentiable implementation of Marching Cubes to construct meshs + raw_meshes = [] + mesh_mask = [] + + for field in fields: + raw_mesh = self.mesh_decoder(field, self.volume.bbox_min, self.volume.bbox_max - self.volume.bbox_min) + mesh_mask.append(True) + raw_meshes.append(raw_mesh) + + mesh_mask = torch.tensor(mesh_mask, device=fields.device) + max_vertices = max(len(m.verts) for m in raw_meshes) + + # 3.2. query the texture color head at each vertex of the resulting mesh. + texture_query_positions = torch.stack( + [m.verts[torch.arange(0, max_vertices) % len(m.verts)] for m in raw_meshes], + dim=0, + ) + texture_query_positions = texture_query_positions.to(device=device, dtype=self.mlp.dtype) + + textures = [] + + for idx in range(0, texture_query_positions.shape[1], query_batch_size): + query_batch = texture_query_positions[:, idx : idx + query_batch_size] + + texture_model_out = self.mlp( + position=query_batch, direction=None, ts=None, nerf_level="fine", rendering_mode="stf" + ) + textures.append(texture_model_out.channels) + + # predict texture color + textures = torch.cat(textures, dim=1) + + textures = _convert_srgb_to_linear(textures) + textures = textures.float() + + # 3.3 augument the mesh with texture data + assert len(textures.shape) == 3 and textures.shape[-1] == len( + texture_channels + ), f"expected [meta_batch x inner_batch x texture_channels] field results, but got {textures.shape}" + + for m, texture in zip(raw_meshes, textures): + texture = texture[: len(m.verts)] + m.vertex_channels = dict(zip(texture_channels, texture.unbind(-1))) + + return raw_meshes[0] diff --git a/diffusers3/pipelines/stable_audio/__init__.py b/diffusers3/pipelines/stable_audio/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..dfdd419ae9914e64c7fdcf7c152ac308b75d75d2 --- /dev/null +++ b/diffusers3/pipelines/stable_audio/__init__.py @@ -0,0 +1,50 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_torch_available, + is_transformers_available, + is_transformers_version, +) + + +_dummy_objects = {} +_import_structure = {} + +try: + if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.27.0")): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["modeling_stable_audio"] = ["StableAudioProjectionModel"] + _import_structure["pipeline_stable_audio"] = ["StableAudioPipeline"] + + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.27.0")): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * + + else: + from .modeling_stable_audio import StableAudioProjectionModel + from .pipeline_stable_audio import StableAudioPipeline + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/diffusers3/pipelines/stable_audio/modeling_stable_audio.py b/diffusers3/pipelines/stable_audio/modeling_stable_audio.py new file mode 100644 index 0000000000000000000000000000000000000000..b8f8a705de21ae4486318f21da87cb44f0737368 --- /dev/null +++ b/diffusers3/pipelines/stable_audio/modeling_stable_audio.py @@ -0,0 +1,158 @@ +# Copyright 2024 Stability AI and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass +from math import pi +from typing import Optional + +import torch +import torch.nn as nn +import torch.utils.checkpoint + +from ...configuration_utils import ConfigMixin, register_to_config +from ...models.modeling_utils import ModelMixin +from ...utils import BaseOutput, logging + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +class StableAudioPositionalEmbedding(nn.Module): + """Used for continuous time""" + + def __init__(self, dim: int): + super().__init__() + assert (dim % 2) == 0 + half_dim = dim // 2 + self.weights = nn.Parameter(torch.randn(half_dim)) + + def forward(self, times: torch.Tensor) -> torch.Tensor: + times = times[..., None] + freqs = times * self.weights[None] * 2 * pi + fouriered = torch.cat((freqs.sin(), freqs.cos()), dim=-1) + fouriered = torch.cat((times, fouriered), dim=-1) + return fouriered + + +@dataclass +class StableAudioProjectionModelOutput(BaseOutput): + """ + Args: + Class for StableAudio projection layer's outputs. + text_hidden_states (`torch.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states obtained by linearly projecting the hidden-states for the text encoder. + seconds_start_hidden_states (`torch.Tensor` of shape `(batch_size, 1, hidden_size)`, *optional*): + Sequence of hidden-states obtained by linearly projecting the audio start hidden states. + seconds_end_hidden_states (`torch.Tensor` of shape `(batch_size, 1, hidden_size)`, *optional*): + Sequence of hidden-states obtained by linearly projecting the audio end hidden states. + """ + + text_hidden_states: Optional[torch.Tensor] = None + seconds_start_hidden_states: Optional[torch.Tensor] = None + seconds_end_hidden_states: Optional[torch.Tensor] = None + + +class StableAudioNumberConditioner(nn.Module): + """ + A simple linear projection model to map numbers to a latent space. + + Args: + number_embedding_dim (`int`): + Dimensionality of the number embeddings. + min_value (`int`): + The minimum value of the seconds number conditioning modules. + max_value (`int`): + The maximum value of the seconds number conditioning modules + internal_dim (`int`): + Dimensionality of the intermediate number hidden states. + """ + + def __init__( + self, + number_embedding_dim, + min_value, + max_value, + internal_dim: Optional[int] = 256, + ): + super().__init__() + self.time_positional_embedding = nn.Sequential( + StableAudioPositionalEmbedding(internal_dim), + nn.Linear(in_features=internal_dim + 1, out_features=number_embedding_dim), + ) + + self.number_embedding_dim = number_embedding_dim + self.min_value = min_value + self.max_value = max_value + + def forward( + self, + floats: torch.Tensor, + ): + floats = floats.clamp(self.min_value, self.max_value) + + normalized_floats = (floats - self.min_value) / (self.max_value - self.min_value) + + # Cast floats to same type as embedder + embedder_dtype = next(self.time_positional_embedding.parameters()).dtype + normalized_floats = normalized_floats.to(embedder_dtype) + + embedding = self.time_positional_embedding(normalized_floats) + float_embeds = embedding.view(-1, 1, self.number_embedding_dim) + + return float_embeds + + +class StableAudioProjectionModel(ModelMixin, ConfigMixin): + """ + A simple linear projection model to map the conditioning values to a shared latent space. + + Args: + text_encoder_dim (`int`): + Dimensionality of the text embeddings from the text encoder (T5). + conditioning_dim (`int`): + Dimensionality of the output conditioning tensors. + min_value (`int`): + The minimum value of the seconds number conditioning modules. + max_value (`int`): + The maximum value of the seconds number conditioning modules + """ + + @register_to_config + def __init__(self, text_encoder_dim, conditioning_dim, min_value, max_value): + super().__init__() + self.text_projection = ( + nn.Identity() if conditioning_dim == text_encoder_dim else nn.Linear(text_encoder_dim, conditioning_dim) + ) + self.start_number_conditioner = StableAudioNumberConditioner(conditioning_dim, min_value, max_value) + self.end_number_conditioner = StableAudioNumberConditioner(conditioning_dim, min_value, max_value) + + def forward( + self, + text_hidden_states: Optional[torch.Tensor] = None, + start_seconds: Optional[torch.Tensor] = None, + end_seconds: Optional[torch.Tensor] = None, + ): + text_hidden_states = ( + text_hidden_states if text_hidden_states is None else self.text_projection(text_hidden_states) + ) + seconds_start_hidden_states = ( + start_seconds if start_seconds is None else self.start_number_conditioner(start_seconds) + ) + seconds_end_hidden_states = end_seconds if end_seconds is None else self.end_number_conditioner(end_seconds) + + return StableAudioProjectionModelOutput( + text_hidden_states=text_hidden_states, + seconds_start_hidden_states=seconds_start_hidden_states, + seconds_end_hidden_states=seconds_end_hidden_states, + ) diff --git a/diffusers3/pipelines/stable_audio/pipeline_stable_audio.py b/diffusers3/pipelines/stable_audio/pipeline_stable_audio.py new file mode 100644 index 0000000000000000000000000000000000000000..4fe082d88957dc4cf29c2f11c9a2d01fbba3252b --- /dev/null +++ b/diffusers3/pipelines/stable_audio/pipeline_stable_audio.py @@ -0,0 +1,745 @@ +# Copyright 2024 Stability AI and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Callable, List, Optional, Union + +import torch +from transformers import ( + T5EncoderModel, + T5Tokenizer, + T5TokenizerFast, +) + +from ...models import AutoencoderOobleck, StableAudioDiTModel +from ...models.embeddings import get_1d_rotary_pos_embed +from ...schedulers import EDMDPMSolverMultistepScheduler +from ...utils import ( + logging, + replace_example_docstring, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline +from .modeling_stable_audio import StableAudioProjectionModel + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import scipy + >>> import torch + >>> import soundfile as sf + >>> from diffusers import StableAudioPipeline + + >>> repo_id = "stabilityai/stable-audio-open-1.0" + >>> pipe = StableAudioPipeline.from_pretrained(repo_id, torch_dtype=torch.float16) + >>> pipe = pipe.to("cuda") + + >>> # define the prompts + >>> prompt = "The sound of a hammer hitting a wooden surface." + >>> negative_prompt = "Low quality." + + >>> # set the seed for generator + >>> generator = torch.Generator("cuda").manual_seed(0) + + >>> # run the generation + >>> audio = pipe( + ... prompt, + ... negative_prompt=negative_prompt, + ... num_inference_steps=200, + ... audio_end_in_s=10.0, + ... num_waveforms_per_prompt=3, + ... generator=generator, + ... ).audios + + >>> output = audio[0].T.float().cpu().numpy() + >>> sf.write("hammer.wav", output, pipe.vae.sampling_rate) + ``` +""" + + +class StableAudioPipeline(DiffusionPipeline): + r""" + Pipeline for text-to-audio generation using StableAudio. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + Args: + vae ([`AutoencoderOobleck`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.T5EncoderModel`]): + Frozen text-encoder. StableAudio uses the encoder of + [T5](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5EncoderModel), specifically the + [google-t5/t5-base](https://huggingface.co/google-t5/t5-base) variant. + projection_model ([`StableAudioProjectionModel`]): + A trained model used to linearly project the hidden-states from the text encoder model and the start and + end seconds. The projected hidden-states from the encoder and the conditional seconds are concatenated to + give the input to the transformer model. + tokenizer ([`~transformers.T5Tokenizer`]): + Tokenizer to tokenize text for the frozen text-encoder. + transformer ([`StableAudioDiTModel`]): + A `StableAudioDiTModel` to denoise the encoded audio latents. + scheduler ([`EDMDPMSolverMultistepScheduler`]): + A scheduler to be used in combination with `transformer` to denoise the encoded audio latents. + """ + + model_cpu_offload_seq = "text_encoder->projection_model->transformer->vae" + + def __init__( + self, + vae: AutoencoderOobleck, + text_encoder: T5EncoderModel, + projection_model: StableAudioProjectionModel, + tokenizer: Union[T5Tokenizer, T5TokenizerFast], + transformer: StableAudioDiTModel, + scheduler: EDMDPMSolverMultistepScheduler, + ): + super().__init__() + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + projection_model=projection_model, + tokenizer=tokenizer, + transformer=transformer, + scheduler=scheduler, + ) + self.rotary_embed_dim = self.transformer.config.attention_head_dim // 2 + + # Copied from diffusers.pipelines.pipeline_utils.StableDiffusionMixin.enable_vae_slicing + def enable_vae_slicing(self): + r""" + Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to + compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. + """ + self.vae.enable_slicing() + + # Copied from diffusers.pipelines.pipeline_utils.StableDiffusionMixin.disable_vae_slicing + def disable_vae_slicing(self): + r""" + Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to + computing decoding in one step. + """ + self.vae.disable_slicing() + + def encode_prompt( + self, + prompt, + device, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.LongTensor] = None, + negative_attention_mask: Optional[torch.LongTensor] = None, + ): + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # 1. Tokenize text + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + attention_mask = text_inputs.attention_mask + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + f"The following part of your input was truncated because {self.text_encoder.config.model_type} can " + f"only handle sequences up to {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + text_input_ids = text_input_ids.to(device) + attention_mask = attention_mask.to(device) + + # 2. Text encoder forward + self.text_encoder.eval() + prompt_embeds = self.text_encoder( + text_input_ids, + attention_mask=attention_mask, + ) + prompt_embeds = prompt_embeds[0] + + if do_classifier_free_guidance and negative_prompt is not None: + uncond_tokens: List[str] + if type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # 1. Tokenize text + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + + uncond_input_ids = uncond_input.input_ids.to(device) + negative_attention_mask = uncond_input.attention_mask.to(device) + + # 2. Text encoder forward + self.text_encoder.eval() + negative_prompt_embeds = self.text_encoder( + uncond_input_ids, + attention_mask=negative_attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if negative_attention_mask is not None: + # set the masked tokens to the null embed + negative_prompt_embeds = torch.where( + negative_attention_mask.to(torch.bool).unsqueeze(2), negative_prompt_embeds, 0.0 + ) + + # 3. Project prompt_embeds and negative_prompt_embeds + if do_classifier_free_guidance and negative_prompt_embeds is not None: + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the negative and text embeddings into a single batch + # to avoid doing two forward passes + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + if attention_mask is not None and negative_attention_mask is None: + negative_attention_mask = torch.ones_like(attention_mask) + elif attention_mask is None and negative_attention_mask is not None: + attention_mask = torch.ones_like(negative_attention_mask) + + if attention_mask is not None: + attention_mask = torch.cat([negative_attention_mask, attention_mask]) + + prompt_embeds = self.projection_model( + text_hidden_states=prompt_embeds, + ).text_hidden_states + if attention_mask is not None: + prompt_embeds = prompt_embeds * attention_mask.unsqueeze(-1).to(prompt_embeds.dtype) + prompt_embeds = prompt_embeds * attention_mask.unsqueeze(-1).to(prompt_embeds.dtype) + + return prompt_embeds + + def encode_duration( + self, + audio_start_in_s, + audio_end_in_s, + device, + do_classifier_free_guidance, + batch_size, + ): + audio_start_in_s = audio_start_in_s if isinstance(audio_start_in_s, list) else [audio_start_in_s] + audio_end_in_s = audio_end_in_s if isinstance(audio_end_in_s, list) else [audio_end_in_s] + + if len(audio_start_in_s) == 1: + audio_start_in_s = audio_start_in_s * batch_size + if len(audio_end_in_s) == 1: + audio_end_in_s = audio_end_in_s * batch_size + + # Cast the inputs to floats + audio_start_in_s = [float(x) for x in audio_start_in_s] + audio_start_in_s = torch.tensor(audio_start_in_s).to(device) + + audio_end_in_s = [float(x) for x in audio_end_in_s] + audio_end_in_s = torch.tensor(audio_end_in_s).to(device) + + projection_output = self.projection_model( + start_seconds=audio_start_in_s, + end_seconds=audio_end_in_s, + ) + seconds_start_hidden_states = projection_output.seconds_start_hidden_states + seconds_end_hidden_states = projection_output.seconds_end_hidden_states + + # For classifier free guidance, we need to do two forward passes. + # Here we repeat the audio hidden states to avoid doing two forward passes + if do_classifier_free_guidance: + seconds_start_hidden_states = torch.cat([seconds_start_hidden_states, seconds_start_hidden_states], dim=0) + seconds_end_hidden_states = torch.cat([seconds_end_hidden_states, seconds_end_hidden_states], dim=0) + + return seconds_start_hidden_states, seconds_end_hidden_states + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + audio_start_in_s, + audio_end_in_s, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + attention_mask=None, + negative_attention_mask=None, + initial_audio_waveforms=None, + initial_audio_sampling_rate=None, + ): + if audio_end_in_s < audio_start_in_s: + raise ValueError( + f"`audio_end_in_s={audio_end_in_s}' must be higher than 'audio_start_in_s={audio_start_in_s}` but " + ) + + if ( + audio_start_in_s < self.projection_model.config.min_value + or audio_start_in_s > self.projection_model.config.max_value + ): + raise ValueError( + f"`audio_start_in_s` must be greater than or equal to {self.projection_model.config.min_value}, and lower than or equal to {self.projection_model.config.max_value} but " + f"is {audio_start_in_s}." + ) + + if ( + audio_end_in_s < self.projection_model.config.min_value + or audio_end_in_s > self.projection_model.config.max_value + ): + raise ValueError( + f"`audio_end_in_s` must be greater than or equal to {self.projection_model.config.min_value}, and lower than or equal to {self.projection_model.config.max_value} but " + f"is {audio_end_in_s}." + ) + + if (callback_steps is None) or ( + callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and (prompt_embeds is None): + raise ValueError( + "Provide either `prompt`, or `prompt_embeds`. Cannot leave" + "`prompt` undefined without specifying `prompt_embeds`." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + if attention_mask is not None and attention_mask.shape != prompt_embeds.shape[:2]: + raise ValueError( + "`attention_mask should have the same batch size and sequence length as `prompt_embeds`, but got:" + f"`attention_mask: {attention_mask.shape} != `prompt_embeds` {prompt_embeds.shape}" + ) + + if initial_audio_sampling_rate is None and initial_audio_waveforms is not None: + raise ValueError( + "`initial_audio_waveforms' is provided but the sampling rate is not. Make sure to pass `initial_audio_sampling_rate`." + ) + + if initial_audio_sampling_rate is not None and initial_audio_sampling_rate != self.vae.sampling_rate: + raise ValueError( + f"`initial_audio_sampling_rate` must be {self.vae.hop_length}' but is `{initial_audio_sampling_rate}`." + "Make sure to resample the `initial_audio_waveforms` and to correct the sampling rate. " + ) + + def prepare_latents( + self, + batch_size, + num_channels_vae, + sample_size, + dtype, + device, + generator, + latents=None, + initial_audio_waveforms=None, + num_waveforms_per_prompt=None, + audio_channels=None, + ): + shape = (batch_size, num_channels_vae, sample_size) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + + # encode the initial audio for use by the model + if initial_audio_waveforms is not None: + # check dimension + if initial_audio_waveforms.ndim == 2: + initial_audio_waveforms = initial_audio_waveforms.unsqueeze(1) + elif initial_audio_waveforms.ndim != 3: + raise ValueError( + f"`initial_audio_waveforms` must be of shape `(batch_size, num_channels, audio_length)` or `(batch_size, audio_length)` but has `{initial_audio_waveforms.ndim}` dimensions" + ) + + audio_vae_length = self.transformer.config.sample_size * self.vae.hop_length + audio_shape = (batch_size // num_waveforms_per_prompt, audio_channels, audio_vae_length) + + # check num_channels + if initial_audio_waveforms.shape[1] == 1 and audio_channels == 2: + initial_audio_waveforms = initial_audio_waveforms.repeat(1, 2, 1) + elif initial_audio_waveforms.shape[1] == 2 and audio_channels == 1: + initial_audio_waveforms = initial_audio_waveforms.mean(1, keepdim=True) + + if initial_audio_waveforms.shape[:2] != audio_shape[:2]: + raise ValueError( + f"`initial_audio_waveforms` must be of shape `(batch_size, num_channels, audio_length)` or `(batch_size, audio_length)` but is of shape `{initial_audio_waveforms.shape}`" + ) + + # crop or pad + audio_length = initial_audio_waveforms.shape[-1] + if audio_length < audio_vae_length: + logger.warning( + f"The provided input waveform is shorter ({audio_length}) than the required audio length ({audio_vae_length}) of the model and will thus be padded." + ) + elif audio_length > audio_vae_length: + logger.warning( + f"The provided input waveform is longer ({audio_length}) than the required audio length ({audio_vae_length}) of the model and will thus be cropped." + ) + + audio = initial_audio_waveforms.new_zeros(audio_shape) + audio[:, :, : min(audio_length, audio_vae_length)] = initial_audio_waveforms[:, :, :audio_vae_length] + + encoded_audio = self.vae.encode(audio).latent_dist.sample(generator) + encoded_audio = encoded_audio.repeat((num_waveforms_per_prompt, 1, 1)) + latents = encoded_audio + latents + return latents + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + audio_end_in_s: Optional[float] = None, + audio_start_in_s: Optional[float] = 0.0, + num_inference_steps: int = 100, + guidance_scale: float = 7.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_waveforms_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + initial_audio_waveforms: Optional[torch.Tensor] = None, + initial_audio_sampling_rate: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.LongTensor] = None, + negative_attention_mask: Optional[torch.LongTensor] = None, + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, + callback_steps: Optional[int] = 1, + output_type: Optional[str] = "pt", + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide audio generation. If not defined, you need to pass `prompt_embeds`. + audio_end_in_s (`float`, *optional*, defaults to 47.55): + Audio end index in seconds. + audio_start_in_s (`float`, *optional*, defaults to 0): + Audio start index in seconds. + num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality audio at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.0): + A higher guidance scale value encourages the model to generate audio that is closely linked to the text + `prompt` at the expense of lower sound quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in audio generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_waveforms_per_prompt (`int`, *optional*, defaults to 1): + The number of waveforms to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for audio + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + initial_audio_waveforms (`torch.Tensor`, *optional*): + Optional initial audio waveforms to use as the initial audio waveform for generation. Must be of shape + `(batch_size, num_channels, audio_length)` or `(batch_size, audio_length)`, where `batch_size` + corresponds to the number of prompts passed to the model. + initial_audio_sampling_rate (`int`, *optional*): + Sampling rate of the `initial_audio_waveforms`, if they are provided. Must be the same as the model. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-computed text embeddings from the text encoder model. Can be used to easily tweak text inputs, + *e.g.* prompt weighting. If not provided, text embeddings will be computed from `prompt` input + argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-computed negative text embeddings from the text encoder model. Can be used to easily tweak text + inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be computed from + `negative_prompt` input argument. + attention_mask (`torch.LongTensor`, *optional*): + Pre-computed attention mask to be applied to the `prompt_embeds`. If not provided, attention mask will + be computed from `prompt` input argument. + negative_attention_mask (`torch.LongTensor`, *optional*): + Pre-computed attention mask to be applied to the `negative_text_audio_duration_embeds`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + output_type (`str`, *optional*, defaults to `"pt"`): + The output format of the generated audio. Choose between `"np"` to return a NumPy `np.ndarray` or + `"pt"` to return a PyTorch `torch.Tensor` object. Set to `"latent"` to return the latent diffusion + model (LDM) output. + + Examples: + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated audio. + """ + # 0. Convert audio input length from seconds to latent length + downsample_ratio = self.vae.hop_length + + max_audio_length_in_s = self.transformer.config.sample_size * downsample_ratio / self.vae.config.sampling_rate + if audio_end_in_s is None: + audio_end_in_s = max_audio_length_in_s + + if audio_end_in_s - audio_start_in_s > max_audio_length_in_s: + raise ValueError( + f"The total audio length requested ({audio_end_in_s-audio_start_in_s}s) is longer than the model maximum possible length ({max_audio_length_in_s}). Make sure that 'audio_end_in_s-audio_start_in_s<={max_audio_length_in_s}'." + ) + + waveform_start = int(audio_start_in_s * self.vae.config.sampling_rate) + waveform_end = int(audio_end_in_s * self.vae.config.sampling_rate) + waveform_length = int(self.transformer.config.sample_size) + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + audio_start_in_s, + audio_end_in_s, + callback_steps, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + attention_mask, + negative_attention_mask, + initial_audio_waveforms, + initial_audio_sampling_rate, + ) + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Encode input prompt + prompt_embeds = self.encode_prompt( + prompt, + device, + do_classifier_free_guidance, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + attention_mask, + negative_attention_mask, + ) + + # Encode duration + seconds_start_hidden_states, seconds_end_hidden_states = self.encode_duration( + audio_start_in_s, + audio_end_in_s, + device, + do_classifier_free_guidance and (negative_prompt is not None or negative_prompt_embeds is not None), + batch_size, + ) + + # Create text_audio_duration_embeds and audio_duration_embeds + text_audio_duration_embeds = torch.cat( + [prompt_embeds, seconds_start_hidden_states, seconds_end_hidden_states], dim=1 + ) + + audio_duration_embeds = torch.cat([seconds_start_hidden_states, seconds_end_hidden_states], dim=2) + + # In case of classifier free guidance without negative prompt, we need to create unconditional embeddings and + # to concatenate it to the embeddings + if do_classifier_free_guidance and negative_prompt_embeds is None and negative_prompt is None: + negative_text_audio_duration_embeds = torch.zeros_like( + text_audio_duration_embeds, device=text_audio_duration_embeds.device + ) + text_audio_duration_embeds = torch.cat( + [negative_text_audio_duration_embeds, text_audio_duration_embeds], dim=0 + ) + audio_duration_embeds = torch.cat([audio_duration_embeds, audio_duration_embeds], dim=0) + + bs_embed, seq_len, hidden_size = text_audio_duration_embeds.shape + # duplicate audio_duration_embeds and text_audio_duration_embeds for each generation per prompt, using mps friendly method + text_audio_duration_embeds = text_audio_duration_embeds.repeat(1, num_waveforms_per_prompt, 1) + text_audio_duration_embeds = text_audio_duration_embeds.view( + bs_embed * num_waveforms_per_prompt, seq_len, hidden_size + ) + + audio_duration_embeds = audio_duration_embeds.repeat(1, num_waveforms_per_prompt, 1) + audio_duration_embeds = audio_duration_embeds.view( + bs_embed * num_waveforms_per_prompt, -1, audio_duration_embeds.shape[-1] + ) + + # 4. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # 5. Prepare latent variables + num_channels_vae = self.transformer.config.in_channels + latents = self.prepare_latents( + batch_size * num_waveforms_per_prompt, + num_channels_vae, + waveform_length, + text_audio_duration_embeds.dtype, + device, + generator, + latents, + initial_audio_waveforms, + num_waveforms_per_prompt, + audio_channels=self.vae.config.audio_channels, + ) + + # 6. Prepare extra step kwargs + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7. Prepare rotary positional embedding + rotary_embedding = get_1d_rotary_pos_embed( + self.rotary_embed_dim, + latents.shape[2] + audio_duration_embeds.shape[1], + use_real=True, + repeat_interleave_real=False, + ) + + # 8. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + noise_pred = self.transformer( + latent_model_input, + t.unsqueeze(0), + encoder_hidden_states=text_audio_duration_embeds, + global_hidden_states=audio_duration_embeds, + rotary_embedding=rotary_embedding, + return_dict=False, + )[0] + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + # 9. Post-processing + if not output_type == "latent": + audio = self.vae.decode(latents).sample + else: + return AudioPipelineOutput(audios=latents) + + audio = audio[:, :, waveform_start:waveform_end] + + if output_type == "np": + audio = audio.cpu().float().numpy() + + self.maybe_free_model_hooks() + + if not return_dict: + return (audio,) + + return AudioPipelineOutput(audios=audio) diff --git a/diffusers3/pipelines/stable_cascade/__init__.py b/diffusers3/pipelines/stable_cascade/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5270cb94af01fd94ed6c8e76c243a86cad8ec348 --- /dev/null +++ b/diffusers3/pipelines/stable_cascade/__init__.py @@ -0,0 +1,50 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["pipeline_stable_cascade"] = ["StableCascadeDecoderPipeline"] + _import_structure["pipeline_stable_cascade_combined"] = ["StableCascadeCombinedPipeline"] + _import_structure["pipeline_stable_cascade_prior"] = ["StableCascadePriorPipeline"] + + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 + else: + from .pipeline_stable_cascade import StableCascadeDecoderPipeline + from .pipeline_stable_cascade_combined import StableCascadeCombinedPipeline + from .pipeline_stable_cascade_prior import StableCascadePriorPipeline +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/diffusers3/pipelines/stable_cascade/pipeline_stable_cascade.py b/diffusers3/pipelines/stable_cascade/pipeline_stable_cascade.py new file mode 100644 index 0000000000000000000000000000000000000000..111ccc40c5a551997f7ca9f83f44d68e2c606717 --- /dev/null +++ b/diffusers3/pipelines/stable_cascade/pipeline_stable_cascade.py @@ -0,0 +1,528 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Callable, Dict, List, Optional, Union + +import torch +from transformers import CLIPTextModel, CLIPTokenizer + +from ...models import StableCascadeUNet +from ...schedulers import DDPMWuerstchenScheduler +from ...utils import is_torch_version, logging, replace_example_docstring +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput +from ..wuerstchen.modeling_paella_vq_model import PaellaVQModel + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import StableCascadePriorPipeline, StableCascadeDecoderPipeline + + >>> prior_pipe = StableCascadePriorPipeline.from_pretrained( + ... "stabilityai/stable-cascade-prior", torch_dtype=torch.bfloat16 + ... ).to("cuda") + >>> gen_pipe = StableCascadeDecoderPipeline.from_pretrain( + ... "stabilityai/stable-cascade", torch_dtype=torch.float16 + ... ).to("cuda") + + >>> prompt = "an image of a shiba inu, donning a spacesuit and helmet" + >>> prior_output = pipe(prompt) + >>> images = gen_pipe(prior_output.image_embeddings, prompt=prompt) + ``` +""" + + +class StableCascadeDecoderPipeline(DiffusionPipeline): + """ + Pipeline for generating images from the Stable Cascade model. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + Args: + tokenizer (`CLIPTokenizer`): + The CLIP tokenizer. + text_encoder (`CLIPTextModel`): + The CLIP text encoder. + decoder ([`StableCascadeUNet`]): + The Stable Cascade decoder unet. + vqgan ([`PaellaVQModel`]): + The VQGAN model. + scheduler ([`DDPMWuerstchenScheduler`]): + A scheduler to be used in combination with `prior` to generate image embedding. + latent_dim_scale (float, `optional`, defaults to 10.67): + Multiplier to determine the VQ latent space size from the image embeddings. If the image embeddings are + height=24 and width=24, the VQ latent shape needs to be height=int(24*10.67)=256 and + width=int(24*10.67)=256 in order to match the training conditions. + """ + + unet_name = "decoder" + text_encoder_name = "text_encoder" + model_cpu_offload_seq = "text_encoder->decoder->vqgan" + _callback_tensor_inputs = [ + "latents", + "prompt_embeds_pooled", + "negative_prompt_embeds", + "image_embeddings", + ] + + def __init__( + self, + decoder: StableCascadeUNet, + tokenizer: CLIPTokenizer, + text_encoder: CLIPTextModel, + scheduler: DDPMWuerstchenScheduler, + vqgan: PaellaVQModel, + latent_dim_scale: float = 10.67, + ) -> None: + super().__init__() + self.register_modules( + decoder=decoder, + tokenizer=tokenizer, + text_encoder=text_encoder, + scheduler=scheduler, + vqgan=vqgan, + ) + self.register_to_config(latent_dim_scale=latent_dim_scale) + + def prepare_latents( + self, batch_size, image_embeddings, num_images_per_prompt, dtype, device, generator, latents, scheduler + ): + _, channels, height, width = image_embeddings.shape + latents_shape = ( + batch_size * num_images_per_prompt, + 4, + int(height * self.config.latent_dim_scale), + int(width * self.config.latent_dim_scale), + ) + + if latents is None: + latents = randn_tensor(latents_shape, generator=generator, device=device, dtype=dtype) + else: + if latents.shape != latents_shape: + raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}") + latents = latents.to(device) + + latents = latents * scheduler.init_noise_sigma + return latents + + def encode_prompt( + self, + device, + batch_size, + num_images_per_prompt, + do_classifier_free_guidance, + prompt=None, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + prompt_embeds_pooled: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds_pooled: Optional[torch.Tensor] = None, + ): + if prompt_embeds is None: + # get prompt text embeddings + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + attention_mask = text_inputs.attention_mask + + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length] + attention_mask = attention_mask[:, : self.tokenizer.model_max_length] + + text_encoder_output = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask.to(device), output_hidden_states=True + ) + prompt_embeds = text_encoder_output.hidden_states[-1] + if prompt_embeds_pooled is None: + prompt_embeds_pooled = text_encoder_output.text_embeds.unsqueeze(1) + + prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) + prompt_embeds_pooled = prompt_embeds_pooled.to(dtype=self.text_encoder.dtype, device=device) + prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0) + prompt_embeds_pooled = prompt_embeds_pooled.repeat_interleave(num_images_per_prompt, dim=0) + + if negative_prompt_embeds is None and do_classifier_free_guidance: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + negative_prompt_embeds_text_encoder_output = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=uncond_input.attention_mask.to(device), + output_hidden_states=True, + ) + + negative_prompt_embeds = negative_prompt_embeds_text_encoder_output.hidden_states[-1] + negative_prompt_embeds_pooled = negative_prompt_embeds_text_encoder_output.text_embeds.unsqueeze(1) + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + seq_len = negative_prompt_embeds_pooled.shape[1] + negative_prompt_embeds_pooled = negative_prompt_embeds_pooled.to( + dtype=self.text_encoder.dtype, device=device + ) + negative_prompt_embeds_pooled = negative_prompt_embeds_pooled.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds_pooled = negative_prompt_embeds_pooled.view( + batch_size * num_images_per_prompt, seq_len, -1 + ) + # done duplicates + + return prompt_embeds, prompt_embeds_pooled, negative_prompt_embeds, negative_prompt_embeds_pooled + + def check_inputs( + self, + prompt, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + callback_on_step_end_tensor_inputs=None, + ): + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 + + @property + def num_timesteps(self): + return self._num_timesteps + + def get_timestep_ratio_conditioning(self, t, alphas_cumprod): + s = torch.tensor([0.008]) + clamp_range = [0, 1] + min_var = torch.cos(s / (1 + s) * torch.pi * 0.5) ** 2 + var = alphas_cumprod[t] + var = var.clamp(*clamp_range) + s, min_var = s.to(var.device), min_var.to(var.device) + ratio = (((var * min_var) ** 0.5).acos() / (torch.pi * 0.5)) * (1 + s) - s + return ratio + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + image_embeddings: Union[torch.Tensor, List[torch.Tensor]], + prompt: Union[str, List[str]] = None, + num_inference_steps: int = 10, + guidance_scale: float = 0.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + prompt_embeds: Optional[torch.Tensor] = None, + prompt_embeds_pooled: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds_pooled: Optional[torch.Tensor] = None, + num_images_per_prompt: int = 1, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + ): + """ + Function invoked when calling the pipeline for generation. + + Args: + image_embedding (`torch.Tensor` or `List[torch.Tensor]`): + Image Embeddings either extracted from an image or generated by a Prior Model. + prompt (`str` or `List[str]`): + The prompt or prompts to guide the image generation. + num_inference_steps (`int`, *optional*, defaults to 12): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 0.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `decoder_guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting + `decoder_guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely + linked to the text `prompt`, usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored + if `decoder_guidance_scale` is less than `1`). + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + prompt_embeds_pooled (`torch.Tensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + negative_prompt_embeds_pooled (`torch.Tensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds_pooled will be generated from `negative_prompt` + input argument. + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` + (`np.array`) or `"pt"` (`torch.Tensor`). + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + + Examples: + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple` [`~pipelines.ImagePipelineOutput`] if `return_dict` is True, + otherwise a `tuple`. When returning a tuple, the first element is a list with the generated image + embeddings. + """ + + # 0. Define commonly used variables + device = self._execution_device + dtype = self.decoder.dtype + self._guidance_scale = guidance_scale + if is_torch_version("<", "2.2.0") and dtype == torch.bfloat16: + raise ValueError("`StableCascadeDecoderPipeline` requires torch>=2.2.0 when using `torch.bfloat16` dtype.") + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, + ) + if isinstance(image_embeddings, list): + image_embeddings = torch.cat(image_embeddings, dim=0) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + # Compute the effective number of images per prompt + # We must account for the fact that the image embeddings from the prior can be generated with num_images_per_prompt > 1 + # This results in a case where a single prompt is associated with multiple image embeddings + # Divide the number of image embeddings by the batch size to determine if this is the case. + num_images_per_prompt = num_images_per_prompt * (image_embeddings.shape[0] // batch_size) + + # 2. Encode caption + if prompt_embeds is None and negative_prompt_embeds is None: + _, prompt_embeds_pooled, _, negative_prompt_embeds_pooled = self.encode_prompt( + prompt=prompt, + device=device, + batch_size=batch_size, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=self.do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + prompt_embeds_pooled=prompt_embeds_pooled, + negative_prompt_embeds=negative_prompt_embeds, + negative_prompt_embeds_pooled=negative_prompt_embeds_pooled, + ) + + # The pooled embeds from the prior are pooled again before being passed to the decoder + prompt_embeds_pooled = ( + torch.cat([prompt_embeds_pooled, negative_prompt_embeds_pooled]) + if self.do_classifier_free_guidance + else prompt_embeds_pooled + ) + effnet = ( + torch.cat([image_embeddings, torch.zeros_like(image_embeddings)]) + if self.do_classifier_free_guidance + else image_embeddings + ) + + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # 5. Prepare latents + latents = self.prepare_latents( + batch_size, image_embeddings, num_images_per_prompt, dtype, device, generator, latents, self.scheduler + ) + + if isinstance(self.scheduler, DDPMWuerstchenScheduler): + timesteps = timesteps[:-1] + else: + if hasattr(self.scheduler.config, "clip_sample") and self.scheduler.config.clip_sample: + self.scheduler.config.clip_sample = False # disample sample clipping + logger.warning(" set `clip_sample` to be False") + + # 6. Run denoising loop + if hasattr(self.scheduler, "betas"): + alphas = 1.0 - self.scheduler.betas + alphas_cumprod = torch.cumprod(alphas, dim=0) + else: + alphas_cumprod = [] + + self._num_timesteps = len(timesteps) + for i, t in enumerate(self.progress_bar(timesteps)): + if not isinstance(self.scheduler, DDPMWuerstchenScheduler): + if len(alphas_cumprod) > 0: + timestep_ratio = self.get_timestep_ratio_conditioning(t.long().cpu(), alphas_cumprod) + timestep_ratio = timestep_ratio.expand(latents.size(0)).to(dtype).to(device) + else: + timestep_ratio = t.float().div(self.scheduler.timesteps[-1]).expand(latents.size(0)).to(dtype) + else: + timestep_ratio = t.expand(latents.size(0)).to(dtype) + + # 7. Denoise latents + predicted_latents = self.decoder( + sample=torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents, + timestep_ratio=torch.cat([timestep_ratio] * 2) if self.do_classifier_free_guidance else timestep_ratio, + clip_text_pooled=prompt_embeds_pooled, + effnet=effnet, + return_dict=False, + )[0] + + # 8. Check for classifier free guidance and apply it + if self.do_classifier_free_guidance: + predicted_latents_text, predicted_latents_uncond = predicted_latents.chunk(2) + predicted_latents = torch.lerp(predicted_latents_uncond, predicted_latents_text, self.guidance_scale) + + # 9. Renoise latents to next timestep + if not isinstance(self.scheduler, DDPMWuerstchenScheduler): + timestep_ratio = t + latents = self.scheduler.step( + model_output=predicted_latents, + timestep=timestep_ratio, + sample=latents, + generator=generator, + ).prev_sample + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + + if output_type not in ["pt", "np", "pil", "latent"]: + raise ValueError( + f"Only the output types `pt`, `np`, `pil` and `latent` are supported not output_type={output_type}" + ) + + if not output_type == "latent": + # 10. Scale and decode the image latents with vq-vae + latents = self.vqgan.config.scale_factor * latents + images = self.vqgan.decode(latents).sample.clamp(0, 1) + if output_type == "np": + images = images.permute(0, 2, 3, 1).cpu().float().numpy() # float() as bfloat16-> numpy doesnt work + elif output_type == "pil": + images = images.permute(0, 2, 3, 1).cpu().float().numpy() # float() as bfloat16-> numpy doesnt work + images = self.numpy_to_pil(images) + else: + images = latents + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return images + return ImagePipelineOutput(images) diff --git a/diffusers3/pipelines/stable_cascade/pipeline_stable_cascade_combined.py b/diffusers3/pipelines/stable_cascade/pipeline_stable_cascade_combined.py new file mode 100644 index 0000000000000000000000000000000000000000..6724b60cc424aa3b4557d522b08af4302dec2da0 --- /dev/null +++ b/diffusers3/pipelines/stable_cascade/pipeline_stable_cascade_combined.py @@ -0,0 +1,315 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Callable, Dict, List, Optional, Union + +import PIL +import torch +from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection + +from ...models import StableCascadeUNet +from ...schedulers import DDPMWuerstchenScheduler +from ...utils import is_torch_version, replace_example_docstring +from ..pipeline_utils import DiffusionPipeline +from ..wuerstchen.modeling_paella_vq_model import PaellaVQModel +from .pipeline_stable_cascade import StableCascadeDecoderPipeline +from .pipeline_stable_cascade_prior import StableCascadePriorPipeline + + +TEXT2IMAGE_EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import StableCascadeCombinedPipeline + + >>> pipe = StableCascadeCombinedPipeline.from_pretrained( + ... "stabilityai/stable-cascade", variant="bf16", torch_dtype=torch.bfloat16 + ... ) + >>> pipe.enable_model_cpu_offload() + >>> prompt = "an image of a shiba inu, donning a spacesuit and helmet" + >>> images = pipe(prompt=prompt) + ``` +""" + + +class StableCascadeCombinedPipeline(DiffusionPipeline): + """ + Combined Pipeline for text-to-image generation using Stable Cascade. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + Args: + tokenizer (`CLIPTokenizer`): + The decoder tokenizer to be used for text inputs. + text_encoder (`CLIPTextModel`): + The decoder text encoder to be used for text inputs. + decoder (`StableCascadeUNet`): + The decoder model to be used for decoder image generation pipeline. + scheduler (`DDPMWuerstchenScheduler`): + The scheduler to be used for decoder image generation pipeline. + vqgan (`PaellaVQModel`): + The VQGAN model to be used for decoder image generation pipeline. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + Model that extracts features from generated images to be used as inputs for the `image_encoder`. + image_encoder ([`CLIPVisionModelWithProjection`]): + Frozen CLIP image-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + prior_prior (`StableCascadeUNet`): + The prior model to be used for prior pipeline. + prior_scheduler (`DDPMWuerstchenScheduler`): + The scheduler to be used for prior pipeline. + """ + + _load_connected_pipes = True + _optional_components = ["prior_feature_extractor", "prior_image_encoder"] + + def __init__( + self, + tokenizer: CLIPTokenizer, + text_encoder: CLIPTextModel, + decoder: StableCascadeUNet, + scheduler: DDPMWuerstchenScheduler, + vqgan: PaellaVQModel, + prior_prior: StableCascadeUNet, + prior_text_encoder: CLIPTextModel, + prior_tokenizer: CLIPTokenizer, + prior_scheduler: DDPMWuerstchenScheduler, + prior_feature_extractor: Optional[CLIPImageProcessor] = None, + prior_image_encoder: Optional[CLIPVisionModelWithProjection] = None, + ): + super().__init__() + + self.register_modules( + text_encoder=text_encoder, + tokenizer=tokenizer, + decoder=decoder, + scheduler=scheduler, + vqgan=vqgan, + prior_text_encoder=prior_text_encoder, + prior_tokenizer=prior_tokenizer, + prior_prior=prior_prior, + prior_scheduler=prior_scheduler, + prior_feature_extractor=prior_feature_extractor, + prior_image_encoder=prior_image_encoder, + ) + self.prior_pipe = StableCascadePriorPipeline( + prior=prior_prior, + text_encoder=prior_text_encoder, + tokenizer=prior_tokenizer, + scheduler=prior_scheduler, + image_encoder=prior_image_encoder, + feature_extractor=prior_feature_extractor, + ) + self.decoder_pipe = StableCascadeDecoderPipeline( + text_encoder=text_encoder, + tokenizer=tokenizer, + decoder=decoder, + scheduler=scheduler, + vqgan=vqgan, + ) + + def enable_xformers_memory_efficient_attention(self, attention_op: Optional[Callable] = None): + self.decoder_pipe.enable_xformers_memory_efficient_attention(attention_op) + + def enable_model_cpu_offload(self, gpu_id: Optional[int] = None, device: Union[torch.device, str] = "cuda"): + r""" + Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared + to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward` + method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with + `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`. + """ + self.prior_pipe.enable_model_cpu_offload(gpu_id=gpu_id, device=device) + self.decoder_pipe.enable_model_cpu_offload(gpu_id=gpu_id, device=device) + + def enable_sequential_cpu_offload(self, gpu_id: Optional[int] = None, device: Union[torch.device, str] = "cuda"): + r""" + Offloads all models (`unet`, `text_encoder`, `vae`, and `safety checker` state dicts) to CPU using ๐Ÿค— + Accelerate, significantly reducing memory usage. Models are moved to a `torch.device('meta')` and loaded on a + GPU only when their specific submodule's `forward` method is called. Offloading happens on a submodule basis. + Memory savings are higher than using `enable_model_cpu_offload`, but performance is lower. + """ + self.prior_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id, device=device) + self.decoder_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id, device=device) + + def progress_bar(self, iterable=None, total=None): + self.prior_pipe.progress_bar(iterable=iterable, total=total) + self.decoder_pipe.progress_bar(iterable=iterable, total=total) + + def set_progress_bar_config(self, **kwargs): + self.prior_pipe.set_progress_bar_config(**kwargs) + self.decoder_pipe.set_progress_bar_config(**kwargs) + + @torch.no_grad() + @replace_example_docstring(TEXT2IMAGE_EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Optional[Union[str, List[str]]] = None, + images: Union[torch.Tensor, PIL.Image.Image, List[torch.Tensor], List[PIL.Image.Image]] = None, + height: int = 512, + width: int = 512, + prior_num_inference_steps: int = 60, + prior_guidance_scale: float = 4.0, + num_inference_steps: int = 12, + decoder_guidance_scale: float = 0.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + prompt_embeds: Optional[torch.Tensor] = None, + prompt_embeds_pooled: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds_pooled: Optional[torch.Tensor] = None, + num_images_per_prompt: int = 1, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + prior_callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + prior_callback_on_step_end_tensor_inputs: List[str] = ["latents"], + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + ): + """ + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`): + The prompt or prompts to guide the image generation for the prior and decoder. + images (`torch.Tensor`, `PIL.Image.Image`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, *optional*): + The images to guide the image generation for the prior. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored + if `guidance_scale` is less than `1`). + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings for the prior. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, text embeddings will be generated from `prompt` input argument. + prompt_embeds_pooled (`torch.Tensor`, *optional*): + Pre-generated text embeddings for the prior. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings for the prior. Can be used to easily tweak text inputs, *e.g.* + prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` + input argument. + negative_prompt_embeds_pooled (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings for the prior. Can be used to easily tweak text inputs, *e.g.* + prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` + input argument. + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + height (`int`, *optional*, defaults to 512): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to 512): + The width in pixels of the generated image. + prior_guidance_scale (`float`, *optional*, defaults to 4.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `prior_guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting + `prior_guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked + to the text `prompt`, usually at the expense of lower image quality. + prior_num_inference_steps (`Union[int, Dict[float, int]]`, *optional*, defaults to 60): + The number of prior denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. For more specific timestep spacing, you can pass customized + `prior_timesteps` + num_inference_steps (`int`, *optional*, defaults to 12): + The number of decoder denoising steps. More denoising steps usually lead to a higher quality image at + the expense of slower inference. For more specific timestep spacing, you can pass customized + `timesteps` + decoder_guidance_scale (`float`, *optional*, defaults to 0.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` + (`np.array`) or `"pt"` (`torch.Tensor`). + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. + prior_callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `prior_callback_on_step_end(self: DiffusionPipeline, step: int, timestep: + int, callback_kwargs: Dict)`. + prior_callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `prior_callback_on_step_end` function. The tensors specified in the + list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in + the `._callback_tensor_inputs` attribute of your pipeline class. + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + + Examples: + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple` [`~pipelines.ImagePipelineOutput`] if `return_dict` is True, + otherwise a `tuple`. When returning a tuple, the first element is a list with the generated images. + """ + dtype = self.decoder_pipe.decoder.dtype + if is_torch_version("<", "2.2.0") and dtype == torch.bfloat16: + raise ValueError( + "`StableCascadeCombinedPipeline` requires torch>=2.2.0 when using `torch.bfloat16` dtype." + ) + + prior_outputs = self.prior_pipe( + prompt=prompt if prompt_embeds is None else None, + images=images, + height=height, + width=width, + num_inference_steps=prior_num_inference_steps, + guidance_scale=prior_guidance_scale, + negative_prompt=negative_prompt if negative_prompt_embeds is None else None, + prompt_embeds=prompt_embeds, + prompt_embeds_pooled=prompt_embeds_pooled, + negative_prompt_embeds=negative_prompt_embeds, + negative_prompt_embeds_pooled=negative_prompt_embeds_pooled, + num_images_per_prompt=num_images_per_prompt, + generator=generator, + latents=latents, + output_type="pt", + return_dict=True, + callback_on_step_end=prior_callback_on_step_end, + callback_on_step_end_tensor_inputs=prior_callback_on_step_end_tensor_inputs, + ) + image_embeddings = prior_outputs.image_embeddings + prompt_embeds = prior_outputs.get("prompt_embeds", None) + prompt_embeds_pooled = prior_outputs.get("prompt_embeds_pooled", None) + negative_prompt_embeds = prior_outputs.get("negative_prompt_embeds", None) + negative_prompt_embeds_pooled = prior_outputs.get("negative_prompt_embeds_pooled", None) + + outputs = self.decoder_pipe( + image_embeddings=image_embeddings, + prompt=prompt if prompt_embeds is None else None, + num_inference_steps=num_inference_steps, + guidance_scale=decoder_guidance_scale, + negative_prompt=negative_prompt if negative_prompt_embeds is None else None, + prompt_embeds=prompt_embeds, + prompt_embeds_pooled=prompt_embeds_pooled, + negative_prompt_embeds=negative_prompt_embeds, + negative_prompt_embeds_pooled=negative_prompt_embeds_pooled, + generator=generator, + output_type=output_type, + return_dict=return_dict, + callback_on_step_end=callback_on_step_end, + callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, + ) + + return outputs diff --git a/diffusers3/pipelines/stable_cascade/pipeline_stable_cascade_prior.py b/diffusers3/pipelines/stable_cascade/pipeline_stable_cascade_prior.py new file mode 100644 index 0000000000000000000000000000000000000000..058dbf6b0797fce082528a2eae32e3221cb37c3c --- /dev/null +++ b/diffusers3/pipelines/stable_cascade/pipeline_stable_cascade_prior.py @@ -0,0 +1,639 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass +from math import ceil +from typing import Callable, Dict, List, Optional, Union + +import numpy as np +import PIL +import torch +from transformers import CLIPImageProcessor, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionModelWithProjection + +from ...models import StableCascadeUNet +from ...schedulers import DDPMWuerstchenScheduler +from ...utils import BaseOutput, logging, replace_example_docstring +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +DEFAULT_STAGE_C_TIMESTEPS = list(np.linspace(1.0, 2 / 3, 20)) + list(np.linspace(2 / 3, 0.0, 11))[1:] + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import StableCascadePriorPipeline + + >>> prior_pipe = StableCascadePriorPipeline.from_pretrained( + ... "stabilityai/stable-cascade-prior", torch_dtype=torch.bfloat16 + ... ).to("cuda") + + >>> prompt = "an image of a shiba inu, donning a spacesuit and helmet" + >>> prior_output = pipe(prompt) + ``` +""" + + +@dataclass +class StableCascadePriorPipelineOutput(BaseOutput): + """ + Output class for WuerstchenPriorPipeline. + + Args: + image_embeddings (`torch.Tensor` or `np.ndarray`) + Prior image embeddings for text prompt + prompt_embeds (`torch.Tensor`): + Text embeddings for the prompt. + negative_prompt_embeds (`torch.Tensor`): + Text embeddings for the negative prompt. + """ + + image_embeddings: Union[torch.Tensor, np.ndarray] + prompt_embeds: Union[torch.Tensor, np.ndarray] + prompt_embeds_pooled: Union[torch.Tensor, np.ndarray] + negative_prompt_embeds: Union[torch.Tensor, np.ndarray] + negative_prompt_embeds_pooled: Union[torch.Tensor, np.ndarray] + + +class StableCascadePriorPipeline(DiffusionPipeline): + """ + Pipeline for generating image prior for Stable Cascade. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + Args: + prior ([`StableCascadeUNet`]): + The Stable Cascade prior to approximate the image embedding from the text and/or image embedding. + text_encoder ([`CLIPTextModelWithProjection`]): + Frozen text-encoder + ([laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k)). + feature_extractor ([`~transformers.CLIPImageProcessor`]): + Model that extracts features from generated images to be used as inputs for the `image_encoder`. + image_encoder ([`CLIPVisionModelWithProjection`]): + Frozen CLIP image-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + scheduler ([`DDPMWuerstchenScheduler`]): + A scheduler to be used in combination with `prior` to generate image embedding. + resolution_multiple ('float', *optional*, defaults to 42.67): + Default resolution for multiple images generated. + """ + + unet_name = "prior" + text_encoder_name = "text_encoder" + model_cpu_offload_seq = "image_encoder->text_encoder->prior" + _optional_components = ["image_encoder", "feature_extractor"] + _callback_tensor_inputs = ["latents", "text_encoder_hidden_states", "negative_prompt_embeds"] + + def __init__( + self, + tokenizer: CLIPTokenizer, + text_encoder: CLIPTextModelWithProjection, + prior: StableCascadeUNet, + scheduler: DDPMWuerstchenScheduler, + resolution_multiple: float = 42.67, + feature_extractor: Optional[CLIPImageProcessor] = None, + image_encoder: Optional[CLIPVisionModelWithProjection] = None, + ) -> None: + super().__init__() + self.register_modules( + tokenizer=tokenizer, + text_encoder=text_encoder, + image_encoder=image_encoder, + feature_extractor=feature_extractor, + prior=prior, + scheduler=scheduler, + ) + self.register_to_config(resolution_multiple=resolution_multiple) + + def prepare_latents( + self, batch_size, height, width, num_images_per_prompt, dtype, device, generator, latents, scheduler + ): + latent_shape = ( + num_images_per_prompt * batch_size, + self.prior.config.in_channels, + ceil(height / self.config.resolution_multiple), + ceil(width / self.config.resolution_multiple), + ) + + if latents is None: + latents = randn_tensor(latent_shape, generator=generator, device=device, dtype=dtype) + else: + if latents.shape != latent_shape: + raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latent_shape}") + latents = latents.to(device) + + latents = latents * scheduler.init_noise_sigma + return latents + + def encode_prompt( + self, + device, + batch_size, + num_images_per_prompt, + do_classifier_free_guidance, + prompt=None, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + prompt_embeds_pooled: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds_pooled: Optional[torch.Tensor] = None, + ): + if prompt_embeds is None: + # get prompt text embeddings + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + attention_mask = text_inputs.attention_mask + + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length] + attention_mask = attention_mask[:, : self.tokenizer.model_max_length] + + text_encoder_output = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask.to(device), output_hidden_states=True + ) + prompt_embeds = text_encoder_output.hidden_states[-1] + if prompt_embeds_pooled is None: + prompt_embeds_pooled = text_encoder_output.text_embeds.unsqueeze(1) + + prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) + prompt_embeds_pooled = prompt_embeds_pooled.to(dtype=self.text_encoder.dtype, device=device) + prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0) + prompt_embeds_pooled = prompt_embeds_pooled.repeat_interleave(num_images_per_prompt, dim=0) + + if negative_prompt_embeds is None and do_classifier_free_guidance: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + negative_prompt_embeds_text_encoder_output = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=uncond_input.attention_mask.to(device), + output_hidden_states=True, + ) + + negative_prompt_embeds = negative_prompt_embeds_text_encoder_output.hidden_states[-1] + negative_prompt_embeds_pooled = negative_prompt_embeds_text_encoder_output.text_embeds.unsqueeze(1) + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + seq_len = negative_prompt_embeds_pooled.shape[1] + negative_prompt_embeds_pooled = negative_prompt_embeds_pooled.to( + dtype=self.text_encoder.dtype, device=device + ) + negative_prompt_embeds_pooled = negative_prompt_embeds_pooled.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds_pooled = negative_prompt_embeds_pooled.view( + batch_size * num_images_per_prompt, seq_len, -1 + ) + # done duplicates + + return prompt_embeds, prompt_embeds_pooled, negative_prompt_embeds, negative_prompt_embeds_pooled + + def encode_image(self, images, device, dtype, batch_size, num_images_per_prompt): + image_embeds = [] + for image in images: + image = self.feature_extractor(image, return_tensors="pt").pixel_values + image = image.to(device=device, dtype=dtype) + image_embed = self.image_encoder(image).image_embeds.unsqueeze(1) + image_embeds.append(image_embed) + image_embeds = torch.cat(image_embeds, dim=1) + + image_embeds = image_embeds.repeat(batch_size * num_images_per_prompt, 1, 1) + negative_image_embeds = torch.zeros_like(image_embeds) + + return image_embeds, negative_image_embeds + + def check_inputs( + self, + prompt, + images=None, + image_embeds=None, + negative_prompt=None, + prompt_embeds=None, + prompt_embeds_pooled=None, + negative_prompt_embeds=None, + negative_prompt_embeds_pooled=None, + callback_on_step_end_tensor_inputs=None, + ): + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if prompt_embeds is not None and prompt_embeds_pooled is None: + raise ValueError( + "If `prompt_embeds` are provided, `prompt_embeds_pooled` must also be provided. Make sure to generate `prompt_embeds_pooled` from the same text encoder that was used to generate `prompt_embeds`" + ) + + if negative_prompt_embeds is not None and negative_prompt_embeds_pooled is None: + raise ValueError( + "If `negative_prompt_embeds` are provided, `negative_prompt_embeds_pooled` must also be provided. Make sure to generate `prompt_embeds_pooled` from the same text encoder that was used to generate `prompt_embeds`" + ) + + if prompt_embeds_pooled is not None and negative_prompt_embeds_pooled is not None: + if prompt_embeds_pooled.shape != negative_prompt_embeds_pooled.shape: + raise ValueError( + "`prompt_embeds_pooled` and `negative_prompt_embeds_pooled` must have the same shape when passed" + f"directly, but got: `prompt_embeds_pooled` {prompt_embeds_pooled.shape} !=" + f"`negative_prompt_embeds_pooled` {negative_prompt_embeds_pooled.shape}." + ) + + if image_embeds is not None and images is not None: + raise ValueError( + f"Cannot forward both `images`: {images} and `image_embeds`: {image_embeds}. Please make sure to" + " only forward one of the two." + ) + + if images: + for i, image in enumerate(images): + if not isinstance(image, torch.Tensor) and not isinstance(image, PIL.Image.Image): + raise TypeError( + f"'images' must contain images of type 'torch.Tensor' or 'PIL.Image.Image, but got" + f"{type(image)} for image number {i}." + ) + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 + + @property + def num_timesteps(self): + return self._num_timesteps + + def get_timestep_ratio_conditioning(self, t, alphas_cumprod): + s = torch.tensor([0.008]) + clamp_range = [0, 1] + min_var = torch.cos(s / (1 + s) * torch.pi * 0.5) ** 2 + var = alphas_cumprod[t] + var = var.clamp(*clamp_range) + s, min_var = s.to(var.device), min_var.to(var.device) + ratio = (((var * min_var) ** 0.5).acos() / (torch.pi * 0.5)) * (1 + s) - s + return ratio + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Optional[Union[str, List[str]]] = None, + images: Union[torch.Tensor, PIL.Image.Image, List[torch.Tensor], List[PIL.Image.Image]] = None, + height: int = 1024, + width: int = 1024, + num_inference_steps: int = 20, + timesteps: List[float] = None, + guidance_scale: float = 4.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + prompt_embeds: Optional[torch.Tensor] = None, + prompt_embeds_pooled: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds_pooled: Optional[torch.Tensor] = None, + image_embeds: Optional[torch.Tensor] = None, + num_images_per_prompt: Optional[int] = 1, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + output_type: Optional[str] = "pt", + return_dict: bool = True, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + ): + """ + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`): + The prompt or prompts to guide the image generation. + height (`int`, *optional*, defaults to 1024): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to 1024): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 60): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 8.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `decoder_guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting + `decoder_guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely + linked to the text `prompt`, usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored + if `decoder_guidance_scale` is less than `1`). + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + prompt_embeds_pooled (`torch.Tensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + negative_prompt_embeds_pooled (`torch.Tensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds_pooled will be generated from `negative_prompt` + input argument. + image_embeds (`torch.Tensor`, *optional*): + Pre-generated image embeddings. Can be used to easily tweak image inputs, *e.g.* prompt weighting. If + not provided, image embeddings will be generated from `image` input argument if existing. + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` + (`np.array`) or `"pt"` (`torch.Tensor`). + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + + Examples: + + Returns: + [`StableCascadePriorPipelineOutput`] or `tuple` [`StableCascadePriorPipelineOutput`] if `return_dict` is + True, otherwise a `tuple`. When returning a tuple, the first element is a list with the generated image + embeddings. + """ + + # 0. Define commonly used variables + device = self._execution_device + dtype = next(self.prior.parameters()).dtype + self._guidance_scale = guidance_scale + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + images=images, + image_embeds=image_embeds, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + prompt_embeds_pooled=prompt_embeds_pooled, + negative_prompt_embeds=negative_prompt_embeds, + negative_prompt_embeds_pooled=negative_prompt_embeds_pooled, + callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, + ) + + # 2. Encode caption + images + ( + prompt_embeds, + prompt_embeds_pooled, + negative_prompt_embeds, + negative_prompt_embeds_pooled, + ) = self.encode_prompt( + prompt=prompt, + device=device, + batch_size=batch_size, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=self.do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + prompt_embeds_pooled=prompt_embeds_pooled, + negative_prompt_embeds=negative_prompt_embeds, + negative_prompt_embeds_pooled=negative_prompt_embeds_pooled, + ) + + if images is not None: + image_embeds_pooled, uncond_image_embeds_pooled = self.encode_image( + images=images, + device=device, + dtype=dtype, + batch_size=batch_size, + num_images_per_prompt=num_images_per_prompt, + ) + elif image_embeds is not None: + image_embeds_pooled = image_embeds.repeat(batch_size * num_images_per_prompt, 1, 1) + uncond_image_embeds_pooled = torch.zeros_like(image_embeds_pooled) + else: + image_embeds_pooled = torch.zeros( + batch_size * num_images_per_prompt, + 1, + self.prior.config.clip_image_in_channels, + device=device, + dtype=dtype, + ) + uncond_image_embeds_pooled = torch.zeros( + batch_size * num_images_per_prompt, + 1, + self.prior.config.clip_image_in_channels, + device=device, + dtype=dtype, + ) + + if self.do_classifier_free_guidance: + image_embeds = torch.cat([image_embeds_pooled, uncond_image_embeds_pooled], dim=0) + else: + image_embeds = image_embeds_pooled + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + text_encoder_hidden_states = ( + torch.cat([prompt_embeds, negative_prompt_embeds]) if negative_prompt_embeds is not None else prompt_embeds + ) + text_encoder_pooled = ( + torch.cat([prompt_embeds_pooled, negative_prompt_embeds_pooled]) + if negative_prompt_embeds is not None + else prompt_embeds_pooled + ) + + # 4. Prepare and set timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # 5. Prepare latents + latents = self.prepare_latents( + batch_size, height, width, num_images_per_prompt, dtype, device, generator, latents, self.scheduler + ) + + if isinstance(self.scheduler, DDPMWuerstchenScheduler): + timesteps = timesteps[:-1] + else: + if hasattr(self.scheduler.config, "clip_sample") and self.scheduler.config.clip_sample: + self.scheduler.config.clip_sample = False # disample sample clipping + logger.warning(" set `clip_sample` to be False") + # 6. Run denoising loop + if hasattr(self.scheduler, "betas"): + alphas = 1.0 - self.scheduler.betas + alphas_cumprod = torch.cumprod(alphas, dim=0) + else: + alphas_cumprod = [] + + self._num_timesteps = len(timesteps) + for i, t in enumerate(self.progress_bar(timesteps)): + if not isinstance(self.scheduler, DDPMWuerstchenScheduler): + if len(alphas_cumprod) > 0: + timestep_ratio = self.get_timestep_ratio_conditioning(t.long().cpu(), alphas_cumprod) + timestep_ratio = timestep_ratio.expand(latents.size(0)).to(dtype).to(device) + else: + timestep_ratio = t.float().div(self.scheduler.timesteps[-1]).expand(latents.size(0)).to(dtype) + else: + timestep_ratio = t.expand(latents.size(0)).to(dtype) + # 7. Denoise image embeddings + predicted_image_embedding = self.prior( + sample=torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents, + timestep_ratio=torch.cat([timestep_ratio] * 2) if self.do_classifier_free_guidance else timestep_ratio, + clip_text_pooled=text_encoder_pooled, + clip_text=text_encoder_hidden_states, + clip_img=image_embeds, + return_dict=False, + )[0] + + # 8. Check for classifier free guidance and apply it + if self.do_classifier_free_guidance: + predicted_image_embedding_text, predicted_image_embedding_uncond = predicted_image_embedding.chunk(2) + predicted_image_embedding = torch.lerp( + predicted_image_embedding_uncond, predicted_image_embedding_text, self.guidance_scale + ) + + # 9. Renoise latents to next timestep + if not isinstance(self.scheduler, DDPMWuerstchenScheduler): + timestep_ratio = t + latents = self.scheduler.step( + model_output=predicted_image_embedding, timestep=timestep_ratio, sample=latents, generator=generator + ).prev_sample + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + + # Offload all models + self.maybe_free_model_hooks() + + if output_type == "np": + latents = latents.cpu().float().numpy() # float() as bfloat16-> numpy doesnt work + prompt_embeds = prompt_embeds.cpu().float().numpy() # float() as bfloat16-> numpy doesnt work + negative_prompt_embeds = ( + negative_prompt_embeds.cpu().float().numpy() if negative_prompt_embeds is not None else None + ) # float() as bfloat16-> numpy doesnt work + + if not return_dict: + return ( + latents, + prompt_embeds, + prompt_embeds_pooled, + negative_prompt_embeds, + negative_prompt_embeds_pooled, + ) + + return StableCascadePriorPipelineOutput( + image_embeddings=latents, + prompt_embeds=prompt_embeds, + prompt_embeds_pooled=prompt_embeds_pooled, + negative_prompt_embeds=negative_prompt_embeds, + negative_prompt_embeds_pooled=negative_prompt_embeds_pooled, + ) diff --git a/diffusers3/pipelines/stable_diffusion/README.md b/diffusers3/pipelines/stable_diffusion/README.md new file mode 100644 index 0000000000000000000000000000000000000000..5b6424308f020ef901aab854b901abaf59b23e37 --- /dev/null +++ b/diffusers3/pipelines/stable_diffusion/README.md @@ -0,0 +1,176 @@ +# Stable Diffusion + +## Overview + +Stable Diffusion was proposed in [Stable Diffusion Announcement](https://stability.ai/blog/stable-diffusion-announcement) by Patrick Esser and Robin Rombach and the Stability AI team. + +The summary of the model is the following: + +*Stable Diffusion is a text-to-image model that will empower billions of people to create stunning art within seconds. It is a breakthrough in speed and quality meaning that it can run on consumer GPUs. You can see some of the amazing output that has been created by this model without pre or post-processing on this page. The model itself builds upon the work of the team at CompVis and Runway in their widely used latent diffusion model combined with insights from the conditional diffusion models by our lead generative AI developer Katherine Crowson, Dall-E 2 by Open AI, Imagen by Google Brain and many others. We are delighted that AI media generation is a cooperative field and hope it can continue this way to bring the gift of creativity to all.* + +## Tips: + +- Stable Diffusion has the same architecture as [Latent Diffusion](https://arxiv.org/abs/2112.10752) but uses a frozen CLIP Text Encoder instead of training the text encoder jointly with the diffusion model. +- An in-detail explanation of the Stable Diffusion model can be found under [Stable Diffusion with ๐Ÿงจ Diffusers](https://huggingface.co/blog/stable_diffusion). +- If you don't want to rely on the Hugging Face Hub and having to pass a authentication token, you can +download the weights with `git lfs install; git clone https://huggingface.co/runwayml/stable-diffusion-v1-5` and instead pass the local path to the cloned folder to `from_pretrained` as shown below. +- Stable Diffusion can work with a variety of different samplers as is shown below. + +## Available Pipelines: + +| Pipeline | Tasks | Colab +|---|---|:---:| +| [pipeline_stable_diffusion.py](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py) | *Text-to-Image Generation* | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/training_example.ipynb) +| [pipeline_stable_diffusion_img2img](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py) | *Image-to-Image Text-Guided Generation* | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/image_2_image_using_diffusers.ipynb) +| [pipeline_stable_diffusion_inpaint](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py) | *Text-Guided Image Inpainting* | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/in_painting_with_stable_diffusion_using_diffusers.ipynb) + +## Examples: + +### Using Stable Diffusion without being logged into the Hub. + +If you want to download the model weights using a single Python line, you need to be logged in via `huggingface-cli login`. + +```python +from diffusers import DiffusionPipeline + +pipeline = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") +``` + +This however can make it difficult to build applications on top of `diffusers` as you will always have to pass the token around. A potential way to solve this issue is by downloading the weights to a local path `"./stable-diffusion-v1-5"`: + +``` +git lfs install +git clone https://huggingface.co/runwayml/stable-diffusion-v1-5 +``` + +and simply passing the local path to `from_pretrained`: + +```python +from diffusers import StableDiffusionPipeline + +pipe = StableDiffusionPipeline.from_pretrained("./stable-diffusion-v1-5") +``` + +### Text-to-Image with default PLMS scheduler + +```python +# make sure you're logged in with `huggingface-cli login` +from diffusers import StableDiffusionPipeline + +pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") +pipe = pipe.to("cuda") + +prompt = "a photo of an astronaut riding a horse on mars" +image = pipe(prompt).images[0] + +image.save("astronaut_rides_horse.png") +``` + +### Text-to-Image with DDIM scheduler + +```python +# make sure you're logged in with `huggingface-cli login` +from diffusers import StableDiffusionPipeline, DDIMScheduler + +scheduler = DDIMScheduler.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="scheduler") + +pipe = StableDiffusionPipeline.from_pretrained( + "runwayml/stable-diffusion-v1-5", + scheduler=scheduler, +).to("cuda") + +prompt = "a photo of an astronaut riding a horse on mars" +image = pipe(prompt).images[0] + +image.save("astronaut_rides_horse.png") +``` + +### Text-to-Image with K-LMS scheduler + +```python +# make sure you're logged in with `huggingface-cli login` +from diffusers import StableDiffusionPipeline, LMSDiscreteScheduler + +lms = LMSDiscreteScheduler.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="scheduler") + +pipe = StableDiffusionPipeline.from_pretrained( + "runwayml/stable-diffusion-v1-5", + scheduler=lms, +).to("cuda") + +prompt = "a photo of an astronaut riding a horse on mars" +image = pipe(prompt).images[0] + +image.save("astronaut_rides_horse.png") +``` + +### CycleDiffusion using Stable Diffusion and DDIM scheduler + +```python +import requests +import torch +from PIL import Image +from io import BytesIO + +from diffusers import CycleDiffusionPipeline, DDIMScheduler + + +# load the scheduler. CycleDiffusion only supports stochastic schedulers. + +# load the pipeline +# make sure you're logged in with `huggingface-cli login` +model_id_or_path = "CompVis/stable-diffusion-v1-4" +scheduler = DDIMScheduler.from_pretrained(model_id_or_path, subfolder="scheduler") +pipe = CycleDiffusionPipeline.from_pretrained(model_id_or_path, scheduler=scheduler).to("cuda") + +# let's download an initial image +url = "https://raw.githubusercontent.com/ChenWu98/cycle-diffusion/main/data/dalle2/An%20astronaut%20riding%20a%20horse.png" +response = requests.get(url) +init_image = Image.open(BytesIO(response.content)).convert("RGB") +init_image = init_image.resize((512, 512)) +init_image.save("horse.png") + +# let's specify a prompt +source_prompt = "An astronaut riding a horse" +prompt = "An astronaut riding an elephant" + +# call the pipeline +image = pipe( + prompt=prompt, + source_prompt=source_prompt, + image=init_image, + num_inference_steps=100, + eta=0.1, + strength=0.8, + guidance_scale=2, + source_guidance_scale=1, +).images[0] + +image.save("horse_to_elephant.png") + +# let's try another example +# See more samples at the original repo: https://github.com/ChenWu98/cycle-diffusion +url = "https://raw.githubusercontent.com/ChenWu98/cycle-diffusion/main/data/dalle2/A%20black%20colored%20car.png" +response = requests.get(url) +init_image = Image.open(BytesIO(response.content)).convert("RGB") +init_image = init_image.resize((512, 512)) +init_image.save("black.png") + +source_prompt = "A black colored car" +prompt = "A blue colored car" + +# call the pipeline +torch.manual_seed(0) +image = pipe( + prompt=prompt, + source_prompt=source_prompt, + image=init_image, + num_inference_steps=100, + eta=0.1, + strength=0.85, + guidance_scale=3, + source_guidance_scale=1, +).images[0] + +image.save("black_to_blue.png") +``` diff --git a/diffusers3/pipelines/stable_diffusion/__init__.py b/diffusers3/pipelines/stable_diffusion/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8ce08aec66ca688e65623e40bdbaf459cc7283b2 --- /dev/null +++ b/diffusers3/pipelines/stable_diffusion/__init__.py @@ -0,0 +1,202 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_flax_available, + is_k_diffusion_available, + is_k_diffusion_version, + is_onnx_available, + is_torch_available, + is_transformers_available, + is_transformers_version, +) + + +_dummy_objects = {} +_additional_imports = {} +_import_structure = {"pipeline_output": ["StableDiffusionPipelineOutput"]} + +if is_transformers_available() and is_flax_available(): + _import_structure["pipeline_output"].extend(["FlaxStableDiffusionPipelineOutput"]) +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["clip_image_project_model"] = ["CLIPImageProjection"] + _import_structure["pipeline_cycle_diffusion"] = ["CycleDiffusionPipeline"] + _import_structure["pipeline_stable_diffusion"] = ["StableDiffusionPipeline"] + _import_structure["pipeline_stable_diffusion_attend_and_excite"] = ["StableDiffusionAttendAndExcitePipeline"] + _import_structure["pipeline_stable_diffusion_gligen"] = ["StableDiffusionGLIGENPipeline"] + _import_structure["pipeline_stable_diffusion_gligen_text_image"] = ["StableDiffusionGLIGENTextImagePipeline"] + _import_structure["pipeline_stable_diffusion_img2img"] = ["StableDiffusionImg2ImgPipeline"] + _import_structure["pipeline_stable_diffusion_inpaint"] = ["StableDiffusionInpaintPipeline"] + _import_structure["pipeline_stable_diffusion_inpaint_legacy"] = ["StableDiffusionInpaintPipelineLegacy"] + _import_structure["pipeline_stable_diffusion_instruct_pix2pix"] = ["StableDiffusionInstructPix2PixPipeline"] + _import_structure["pipeline_stable_diffusion_latent_upscale"] = ["StableDiffusionLatentUpscalePipeline"] + _import_structure["pipeline_stable_diffusion_model_editing"] = ["StableDiffusionModelEditingPipeline"] + _import_structure["pipeline_stable_diffusion_paradigms"] = ["StableDiffusionParadigmsPipeline"] + _import_structure["pipeline_stable_diffusion_upscale"] = ["StableDiffusionUpscalePipeline"] + _import_structure["pipeline_stable_unclip"] = ["StableUnCLIPPipeline"] + _import_structure["pipeline_stable_unclip_img2img"] = ["StableUnCLIPImg2ImgPipeline"] + _import_structure["safety_checker"] = ["StableDiffusionSafetyChecker"] + _import_structure["stable_unclip_image_normalizer"] = ["StableUnCLIPImageNormalizer"] +try: + if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import ( + StableDiffusionImageVariationPipeline, + ) + + _dummy_objects.update({"StableDiffusionImageVariationPipeline": StableDiffusionImageVariationPipeline}) +else: + _import_structure["pipeline_stable_diffusion_image_variation"] = ["StableDiffusionImageVariationPipeline"] +try: + if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.26.0")): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import ( + StableDiffusionDepth2ImgPipeline, + ) + + _dummy_objects.update( + { + "StableDiffusionDepth2ImgPipeline": StableDiffusionDepth2ImgPipeline, + } + ) +else: + _import_structure["pipeline_stable_diffusion_depth2img"] = ["StableDiffusionDepth2ImgPipeline"] + +try: + if not (is_transformers_available() and is_onnx_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_onnx_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_onnx_objects)) +else: + _import_structure["pipeline_onnx_stable_diffusion"] = [ + "OnnxStableDiffusionPipeline", + "StableDiffusionOnnxPipeline", + ] + _import_structure["pipeline_onnx_stable_diffusion_img2img"] = ["OnnxStableDiffusionImg2ImgPipeline"] + _import_structure["pipeline_onnx_stable_diffusion_inpaint"] = ["OnnxStableDiffusionInpaintPipeline"] + _import_structure["pipeline_onnx_stable_diffusion_inpaint_legacy"] = ["OnnxStableDiffusionInpaintPipelineLegacy"] + _import_structure["pipeline_onnx_stable_diffusion_upscale"] = ["OnnxStableDiffusionUpscalePipeline"] + +if is_transformers_available() and is_flax_available(): + from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState + + _additional_imports.update({"PNDMSchedulerState": PNDMSchedulerState}) + _import_structure["pipeline_flax_stable_diffusion"] = ["FlaxStableDiffusionPipeline"] + _import_structure["pipeline_flax_stable_diffusion_img2img"] = ["FlaxStableDiffusionImg2ImgPipeline"] + _import_structure["pipeline_flax_stable_diffusion_inpaint"] = ["FlaxStableDiffusionInpaintPipeline"] + _import_structure["safety_checker_flax"] = ["FlaxStableDiffusionSafetyChecker"] + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * + + else: + from .clip_image_project_model import CLIPImageProjection + from .pipeline_stable_diffusion import ( + StableDiffusionPipeline, + StableDiffusionPipelineOutput, + ) + from .pipeline_stable_diffusion_img2img import StableDiffusionImg2ImgPipeline + from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline + from .pipeline_stable_diffusion_instruct_pix2pix import ( + StableDiffusionInstructPix2PixPipeline, + ) + from .pipeline_stable_diffusion_latent_upscale import ( + StableDiffusionLatentUpscalePipeline, + ) + from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline + from .pipeline_stable_unclip import StableUnCLIPPipeline + from .pipeline_stable_unclip_img2img import StableUnCLIPImg2ImgPipeline + from .safety_checker import StableDiffusionSafetyChecker + from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer + + try: + if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import ( + StableDiffusionImageVariationPipeline, + ) + else: + from .pipeline_stable_diffusion_image_variation import ( + StableDiffusionImageVariationPipeline, + ) + + try: + if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.26.0")): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import StableDiffusionDepth2ImgPipeline + else: + from .pipeline_stable_diffusion_depth2img import ( + StableDiffusionDepth2ImgPipeline, + ) + + try: + if not (is_transformers_available() and is_onnx_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ...utils.dummy_onnx_objects import * + else: + from .pipeline_onnx_stable_diffusion import ( + OnnxStableDiffusionPipeline, + StableDiffusionOnnxPipeline, + ) + from .pipeline_onnx_stable_diffusion_img2img import ( + OnnxStableDiffusionImg2ImgPipeline, + ) + from .pipeline_onnx_stable_diffusion_inpaint import ( + OnnxStableDiffusionInpaintPipeline, + ) + from .pipeline_onnx_stable_diffusion_upscale import ( + OnnxStableDiffusionUpscalePipeline, + ) + + try: + if not (is_transformers_available() and is_flax_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ...utils.dummy_flax_objects import * + else: + from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline + from .pipeline_flax_stable_diffusion_img2img import ( + FlaxStableDiffusionImg2ImgPipeline, + ) + from .pipeline_flax_stable_diffusion_inpaint import ( + FlaxStableDiffusionInpaintPipeline, + ) + from .pipeline_output import FlaxStableDiffusionPipelineOutput + from .safety_checker_flax import FlaxStableDiffusionSafetyChecker + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) + for name, value in _additional_imports.items(): + setattr(sys.modules[__name__], name, value) diff --git a/diffusers3/pipelines/stable_diffusion/__pycache__/__init__.cpython-38.pyc b/diffusers3/pipelines/stable_diffusion/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..de17caac121ae014e4f8425c22be187143b82c96 Binary files /dev/null and b/diffusers3/pipelines/stable_diffusion/__pycache__/__init__.cpython-38.pyc differ diff --git a/diffusers3/pipelines/stable_diffusion/__pycache__/pipeline_output.cpython-38.pyc b/diffusers3/pipelines/stable_diffusion/__pycache__/pipeline_output.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7025ef55b736eb424d2b1c195ce36622220e8ab7 Binary files /dev/null and b/diffusers3/pipelines/stable_diffusion/__pycache__/pipeline_output.cpython-38.pyc differ diff --git a/diffusers3/pipelines/stable_diffusion/__pycache__/pipeline_stable_diffusion.cpython-38.pyc b/diffusers3/pipelines/stable_diffusion/__pycache__/pipeline_stable_diffusion.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..818694d8875ed5c754357b3344cbf0a497df508d Binary files /dev/null and b/diffusers3/pipelines/stable_diffusion/__pycache__/pipeline_stable_diffusion.cpython-38.pyc differ diff --git a/diffusers3/pipelines/stable_diffusion/__pycache__/safety_checker.cpython-38.pyc b/diffusers3/pipelines/stable_diffusion/__pycache__/safety_checker.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0b34a2f801a6287d60f1b2690077c79aa0d09fa0 Binary files /dev/null and b/diffusers3/pipelines/stable_diffusion/__pycache__/safety_checker.cpython-38.pyc differ diff --git a/diffusers3/pipelines/stable_diffusion/clip_image_project_model.py b/diffusers3/pipelines/stable_diffusion/clip_image_project_model.py new file mode 100644 index 0000000000000000000000000000000000000000..71f9d9714e6b8b7772888d959ae1ae24fae37f77 --- /dev/null +++ b/diffusers3/pipelines/stable_diffusion/clip_image_project_model.py @@ -0,0 +1,29 @@ +# Copyright 2024 The GLIGEN Authors and HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from torch import nn + +from ...configuration_utils import ConfigMixin, register_to_config +from ...models.modeling_utils import ModelMixin + + +class CLIPImageProjection(ModelMixin, ConfigMixin): + @register_to_config + def __init__(self, hidden_size: int = 768): + super().__init__() + self.hidden_size = hidden_size + self.project = nn.Linear(self.hidden_size, self.hidden_size, bias=False) + + def forward(self, x): + return self.project(x) diff --git a/diffusers3/pipelines/stable_diffusion/convert_from_ckpt.py b/diffusers3/pipelines/stable_diffusion/convert_from_ckpt.py new file mode 100644 index 0000000000000000000000000000000000000000..53dc98aea69898f53847265a3e0a06e31173f100 --- /dev/null +++ b/diffusers3/pipelines/stable_diffusion/convert_from_ckpt.py @@ -0,0 +1,1869 @@ +# coding=utf-8 +# Copyright 2024 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Conversion script for the Stable Diffusion checkpoints.""" + +import re +from contextlib import nullcontext +from io import BytesIO +from typing import Dict, Optional, Union + +import requests +import torch +import yaml +from transformers import ( + AutoFeatureExtractor, + BertTokenizerFast, + CLIPImageProcessor, + CLIPTextConfig, + CLIPTextModel, + CLIPTextModelWithProjection, + CLIPTokenizer, + CLIPVisionConfig, + CLIPVisionModelWithProjection, +) + +from ...models import ( + AutoencoderKL, + ControlNetModel, + PriorTransformer, + UNet2DConditionModel, +) +from ...schedulers import ( + DDIMScheduler, + DDPMScheduler, + DPMSolverMultistepScheduler, + EulerAncestralDiscreteScheduler, + EulerDiscreteScheduler, + HeunDiscreteScheduler, + LMSDiscreteScheduler, + PNDMScheduler, + UnCLIPScheduler, +) +from ...utils import is_accelerate_available, logging +from ..latent_diffusion.pipeline_latent_diffusion import LDMBertConfig, LDMBertModel +from ..paint_by_example import PaintByExampleImageEncoder +from ..pipeline_utils import DiffusionPipeline +from .safety_checker import StableDiffusionSafetyChecker +from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer + + +if is_accelerate_available(): + from accelerate import init_empty_weights + from accelerate.utils import set_module_tensor_to_device + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +def shave_segments(path, n_shave_prefix_segments=1): + """ + Removes segments. Positive values shave the first segments, negative shave the last segments. + """ + if n_shave_prefix_segments >= 0: + return ".".join(path.split(".")[n_shave_prefix_segments:]) + else: + return ".".join(path.split(".")[:n_shave_prefix_segments]) + + +def renew_resnet_paths(old_list, n_shave_prefix_segments=0): + """ + Updates paths inside resnets to the new naming scheme (local renaming) + """ + mapping = [] + for old_item in old_list: + new_item = old_item.replace("in_layers.0", "norm1") + new_item = new_item.replace("in_layers.2", "conv1") + + new_item = new_item.replace("out_layers.0", "norm2") + new_item = new_item.replace("out_layers.3", "conv2") + + new_item = new_item.replace("emb_layers.1", "time_emb_proj") + new_item = new_item.replace("skip_connection", "conv_shortcut") + + new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments) + + mapping.append({"old": old_item, "new": new_item}) + + return mapping + + +def renew_vae_resnet_paths(old_list, n_shave_prefix_segments=0): + """ + Updates paths inside resnets to the new naming scheme (local renaming) + """ + mapping = [] + for old_item in old_list: + new_item = old_item + + new_item = new_item.replace("nin_shortcut", "conv_shortcut") + new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments) + + mapping.append({"old": old_item, "new": new_item}) + + return mapping + + +def renew_attention_paths(old_list, n_shave_prefix_segments=0): + """ + Updates paths inside attentions to the new naming scheme (local renaming) + """ + mapping = [] + for old_item in old_list: + new_item = old_item + + # new_item = new_item.replace('norm.weight', 'group_norm.weight') + # new_item = new_item.replace('norm.bias', 'group_norm.bias') + + # new_item = new_item.replace('proj_out.weight', 'proj_attn.weight') + # new_item = new_item.replace('proj_out.bias', 'proj_attn.bias') + + # new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments) + + mapping.append({"old": old_item, "new": new_item}) + + return mapping + + +def renew_vae_attention_paths(old_list, n_shave_prefix_segments=0): + """ + Updates paths inside attentions to the new naming scheme (local renaming) + """ + mapping = [] + for old_item in old_list: + new_item = old_item + + new_item = new_item.replace("norm.weight", "group_norm.weight") + new_item = new_item.replace("norm.bias", "group_norm.bias") + + new_item = new_item.replace("q.weight", "to_q.weight") + new_item = new_item.replace("q.bias", "to_q.bias") + + new_item = new_item.replace("k.weight", "to_k.weight") + new_item = new_item.replace("k.bias", "to_k.bias") + + new_item = new_item.replace("v.weight", "to_v.weight") + new_item = new_item.replace("v.bias", "to_v.bias") + + new_item = new_item.replace("proj_out.weight", "to_out.0.weight") + new_item = new_item.replace("proj_out.bias", "to_out.0.bias") + + new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments) + + mapping.append({"old": old_item, "new": new_item}) + + return mapping + + +def assign_to_checkpoint( + paths, checkpoint, old_checkpoint, attention_paths_to_split=None, additional_replacements=None, config=None +): + """ + This does the final conversion step: take locally converted weights and apply a global renaming to them. It splits + attention layers, and takes into account additional replacements that may arise. + + Assigns the weights to the new checkpoint. + """ + assert isinstance(paths, list), "Paths should be a list of dicts containing 'old' and 'new' keys." + + # Splits the attention layers into three variables. + if attention_paths_to_split is not None: + for path, path_map in attention_paths_to_split.items(): + old_tensor = old_checkpoint[path] + channels = old_tensor.shape[0] // 3 + + target_shape = (-1, channels) if len(old_tensor.shape) == 3 else (-1) + + num_heads = old_tensor.shape[0] // config["num_head_channels"] // 3 + + old_tensor = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:]) + query, key, value = old_tensor.split(channels // num_heads, dim=1) + + checkpoint[path_map["query"]] = query.reshape(target_shape) + checkpoint[path_map["key"]] = key.reshape(target_shape) + checkpoint[path_map["value"]] = value.reshape(target_shape) + + for path in paths: + new_path = path["new"] + + # These have already been assigned + if attention_paths_to_split is not None and new_path in attention_paths_to_split: + continue + + # Global renaming happens here + new_path = new_path.replace("middle_block.0", "mid_block.resnets.0") + new_path = new_path.replace("middle_block.1", "mid_block.attentions.0") + new_path = new_path.replace("middle_block.2", "mid_block.resnets.1") + + if additional_replacements is not None: + for replacement in additional_replacements: + new_path = new_path.replace(replacement["old"], replacement["new"]) + + # proj_attn.weight has to be converted from conv 1D to linear + is_attn_weight = "proj_attn.weight" in new_path or ("attentions" in new_path and "to_" in new_path) + shape = old_checkpoint[path["old"]].shape + if is_attn_weight and len(shape) == 3: + checkpoint[new_path] = old_checkpoint[path["old"]][:, :, 0] + elif is_attn_weight and len(shape) == 4: + checkpoint[new_path] = old_checkpoint[path["old"]][:, :, 0, 0] + else: + checkpoint[new_path] = old_checkpoint[path["old"]] + + +def conv_attn_to_linear(checkpoint): + keys = list(checkpoint.keys()) + attn_keys = ["query.weight", "key.weight", "value.weight"] + for key in keys: + if ".".join(key.split(".")[-2:]) in attn_keys: + if checkpoint[key].ndim > 2: + checkpoint[key] = checkpoint[key][:, :, 0, 0] + elif "proj_attn.weight" in key: + if checkpoint[key].ndim > 2: + checkpoint[key] = checkpoint[key][:, :, 0] + + +def create_unet_diffusers_config(original_config, image_size: int, controlnet=False): + """ + Creates a config for the diffusers based on the config of the LDM model. + """ + if controlnet: + unet_params = original_config["model"]["params"]["control_stage_config"]["params"] + else: + if ( + "unet_config" in original_config["model"]["params"] + and original_config["model"]["params"]["unet_config"] is not None + ): + unet_params = original_config["model"]["params"]["unet_config"]["params"] + else: + unet_params = original_config["model"]["params"]["network_config"]["params"] + + vae_params = original_config["model"]["params"]["first_stage_config"]["params"]["ddconfig"] + + block_out_channels = [unet_params["model_channels"] * mult for mult in unet_params["channel_mult"]] + + down_block_types = [] + resolution = 1 + for i in range(len(block_out_channels)): + block_type = "CrossAttnDownBlock2D" if resolution in unet_params["attention_resolutions"] else "DownBlock2D" + down_block_types.append(block_type) + if i != len(block_out_channels) - 1: + resolution *= 2 + + up_block_types = [] + for i in range(len(block_out_channels)): + block_type = "CrossAttnUpBlock2D" if resolution in unet_params["attention_resolutions"] else "UpBlock2D" + up_block_types.append(block_type) + resolution //= 2 + + if unet_params["transformer_depth"] is not None: + transformer_layers_per_block = ( + unet_params["transformer_depth"] + if isinstance(unet_params["transformer_depth"], int) + else list(unet_params["transformer_depth"]) + ) + else: + transformer_layers_per_block = 1 + + vae_scale_factor = 2 ** (len(vae_params["ch_mult"]) - 1) + + head_dim = unet_params["num_heads"] if "num_heads" in unet_params else None + use_linear_projection = ( + unet_params["use_linear_in_transformer"] if "use_linear_in_transformer" in unet_params else False + ) + if use_linear_projection: + # stable diffusion 2-base-512 and 2-768 + if head_dim is None: + head_dim_mult = unet_params["model_channels"] // unet_params["num_head_channels"] + head_dim = [head_dim_mult * c for c in list(unet_params["channel_mult"])] + + class_embed_type = None + addition_embed_type = None + addition_time_embed_dim = None + projection_class_embeddings_input_dim = None + context_dim = None + + if unet_params["context_dim"] is not None: + context_dim = ( + unet_params["context_dim"] + if isinstance(unet_params["context_dim"], int) + else unet_params["context_dim"][0] + ) + + if "num_classes" in unet_params: + if unet_params["num_classes"] == "sequential": + if context_dim in [2048, 1280]: + # SDXL + addition_embed_type = "text_time" + addition_time_embed_dim = 256 + else: + class_embed_type = "projection" + assert "adm_in_channels" in unet_params + projection_class_embeddings_input_dim = unet_params["adm_in_channels"] + + config = { + "sample_size": image_size // vae_scale_factor, + "in_channels": unet_params["in_channels"], + "down_block_types": tuple(down_block_types), + "block_out_channels": tuple(block_out_channels), + "layers_per_block": unet_params["num_res_blocks"], + "cross_attention_dim": context_dim, + "attention_head_dim": head_dim, + "use_linear_projection": use_linear_projection, + "class_embed_type": class_embed_type, + "addition_embed_type": addition_embed_type, + "addition_time_embed_dim": addition_time_embed_dim, + "projection_class_embeddings_input_dim": projection_class_embeddings_input_dim, + "transformer_layers_per_block": transformer_layers_per_block, + } + + if "disable_self_attentions" in unet_params: + config["only_cross_attention"] = unet_params["disable_self_attentions"] + + if "num_classes" in unet_params and isinstance(unet_params["num_classes"], int): + config["num_class_embeds"] = unet_params["num_classes"] + + if controlnet: + config["conditioning_channels"] = unet_params["hint_channels"] + else: + config["out_channels"] = unet_params["out_channels"] + config["up_block_types"] = tuple(up_block_types) + + return config + + +def create_vae_diffusers_config(original_config, image_size: int): + """ + Creates a config for the diffusers based on the config of the LDM model. + """ + vae_params = original_config["model"]["params"]["first_stage_config"]["params"]["ddconfig"] + _ = original_config["model"]["params"]["first_stage_config"]["params"]["embed_dim"] + + block_out_channels = [vae_params["ch"] * mult for mult in vae_params["ch_mult"]] + down_block_types = ["DownEncoderBlock2D"] * len(block_out_channels) + up_block_types = ["UpDecoderBlock2D"] * len(block_out_channels) + + config = { + "sample_size": image_size, + "in_channels": vae_params["in_channels"], + "out_channels": vae_params["out_ch"], + "down_block_types": tuple(down_block_types), + "up_block_types": tuple(up_block_types), + "block_out_channels": tuple(block_out_channels), + "latent_channels": vae_params["z_channels"], + "layers_per_block": vae_params["num_res_blocks"], + } + return config + + +def create_diffusers_schedular(original_config): + schedular = DDIMScheduler( + num_train_timesteps=original_config["model"]["params"]["timesteps"], + beta_start=original_config["model"]["params"]["linear_start"], + beta_end=original_config["model"]["params"]["linear_end"], + beta_schedule="scaled_linear", + ) + return schedular + + +def create_ldm_bert_config(original_config): + bert_params = original_config["model"]["params"]["cond_stage_config"]["params"] + config = LDMBertConfig( + d_model=bert_params.n_embed, + encoder_layers=bert_params.n_layer, + encoder_ffn_dim=bert_params.n_embed * 4, + ) + return config + + +def convert_ldm_unet_checkpoint( + checkpoint, config, path=None, extract_ema=False, controlnet=False, skip_extract_state_dict=False +): + """ + Takes a state dict and a config, and returns a converted checkpoint. + """ + + if skip_extract_state_dict: + unet_state_dict = checkpoint + else: + # extract state_dict for UNet + unet_state_dict = {} + keys = list(checkpoint.keys()) + + if controlnet: + unet_key = "control_model." + else: + unet_key = "model.diffusion_model." + + # at least a 100 parameters have to start with `model_ema` in order for the checkpoint to be EMA + if sum(k.startswith("model_ema") for k in keys) > 100 and extract_ema: + logger.warning(f"Checkpoint {path} has both EMA and non-EMA weights.") + logger.warning( + "In this conversion only the EMA weights are extracted. If you want to instead extract the non-EMA" + " weights (useful to continue fine-tuning), please make sure to remove the `--extract_ema` flag." + ) + for key in keys: + if key.startswith("model.diffusion_model"): + flat_ema_key = "model_ema." + "".join(key.split(".")[1:]) + unet_state_dict[key.replace(unet_key, "")] = checkpoint.pop(flat_ema_key) + else: + if sum(k.startswith("model_ema") for k in keys) > 100: + logger.warning( + "In this conversion only the non-EMA weights are extracted. If you want to instead extract the EMA" + " weights (usually better for inference), please make sure to add the `--extract_ema` flag." + ) + + for key in keys: + if key.startswith(unet_key): + unet_state_dict[key.replace(unet_key, "")] = checkpoint.pop(key) + + new_checkpoint = {} + + new_checkpoint["time_embedding.linear_1.weight"] = unet_state_dict["time_embed.0.weight"] + new_checkpoint["time_embedding.linear_1.bias"] = unet_state_dict["time_embed.0.bias"] + new_checkpoint["time_embedding.linear_2.weight"] = unet_state_dict["time_embed.2.weight"] + new_checkpoint["time_embedding.linear_2.bias"] = unet_state_dict["time_embed.2.bias"] + + if config["class_embed_type"] is None: + # No parameters to port + ... + elif config["class_embed_type"] == "timestep" or config["class_embed_type"] == "projection": + new_checkpoint["class_embedding.linear_1.weight"] = unet_state_dict["label_emb.0.0.weight"] + new_checkpoint["class_embedding.linear_1.bias"] = unet_state_dict["label_emb.0.0.bias"] + new_checkpoint["class_embedding.linear_2.weight"] = unet_state_dict["label_emb.0.2.weight"] + new_checkpoint["class_embedding.linear_2.bias"] = unet_state_dict["label_emb.0.2.bias"] + else: + raise NotImplementedError(f"Not implemented `class_embed_type`: {config['class_embed_type']}") + + if config["addition_embed_type"] == "text_time": + new_checkpoint["add_embedding.linear_1.weight"] = unet_state_dict["label_emb.0.0.weight"] + new_checkpoint["add_embedding.linear_1.bias"] = unet_state_dict["label_emb.0.0.bias"] + new_checkpoint["add_embedding.linear_2.weight"] = unet_state_dict["label_emb.0.2.weight"] + new_checkpoint["add_embedding.linear_2.bias"] = unet_state_dict["label_emb.0.2.bias"] + + # Relevant to StableDiffusionUpscalePipeline + if "num_class_embeds" in config: + if (config["num_class_embeds"] is not None) and ("label_emb.weight" in unet_state_dict): + new_checkpoint["class_embedding.weight"] = unet_state_dict["label_emb.weight"] + + new_checkpoint["conv_in.weight"] = unet_state_dict["input_blocks.0.0.weight"] + new_checkpoint["conv_in.bias"] = unet_state_dict["input_blocks.0.0.bias"] + + if not controlnet: + new_checkpoint["conv_norm_out.weight"] = unet_state_dict["out.0.weight"] + new_checkpoint["conv_norm_out.bias"] = unet_state_dict["out.0.bias"] + new_checkpoint["conv_out.weight"] = unet_state_dict["out.2.weight"] + new_checkpoint["conv_out.bias"] = unet_state_dict["out.2.bias"] + + # Retrieves the keys for the input blocks only + num_input_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "input_blocks" in layer}) + input_blocks = { + layer_id: [key for key in unet_state_dict if f"input_blocks.{layer_id}" in key] + for layer_id in range(num_input_blocks) + } + + # Retrieves the keys for the middle blocks only + num_middle_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "middle_block" in layer}) + middle_blocks = { + layer_id: [key for key in unet_state_dict if f"middle_block.{layer_id}" in key] + for layer_id in range(num_middle_blocks) + } + + # Retrieves the keys for the output blocks only + num_output_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "output_blocks" in layer}) + output_blocks = { + layer_id: [key for key in unet_state_dict if f"output_blocks.{layer_id}" in key] + for layer_id in range(num_output_blocks) + } + + for i in range(1, num_input_blocks): + block_id = (i - 1) // (config["layers_per_block"] + 1) + layer_in_block_id = (i - 1) % (config["layers_per_block"] + 1) + + resnets = [ + key for key in input_blocks[i] if f"input_blocks.{i}.0" in key and f"input_blocks.{i}.0.op" not in key + ] + attentions = [key for key in input_blocks[i] if f"input_blocks.{i}.1" in key] + + if f"input_blocks.{i}.0.op.weight" in unet_state_dict: + new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.conv.weight"] = unet_state_dict.pop( + f"input_blocks.{i}.0.op.weight" + ) + new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.conv.bias"] = unet_state_dict.pop( + f"input_blocks.{i}.0.op.bias" + ) + + paths = renew_resnet_paths(resnets) + meta_path = {"old": f"input_blocks.{i}.0", "new": f"down_blocks.{block_id}.resnets.{layer_in_block_id}"} + assign_to_checkpoint( + paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config + ) + + if len(attentions): + paths = renew_attention_paths(attentions) + + meta_path = {"old": f"input_blocks.{i}.1", "new": f"down_blocks.{block_id}.attentions.{layer_in_block_id}"} + assign_to_checkpoint( + paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config + ) + + resnet_0 = middle_blocks[0] + attentions = middle_blocks[1] + resnet_1 = middle_blocks[2] + + resnet_0_paths = renew_resnet_paths(resnet_0) + assign_to_checkpoint(resnet_0_paths, new_checkpoint, unet_state_dict, config=config) + + resnet_1_paths = renew_resnet_paths(resnet_1) + assign_to_checkpoint(resnet_1_paths, new_checkpoint, unet_state_dict, config=config) + + attentions_paths = renew_attention_paths(attentions) + meta_path = {"old": "middle_block.1", "new": "mid_block.attentions.0"} + assign_to_checkpoint( + attentions_paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config + ) + + for i in range(num_output_blocks): + block_id = i // (config["layers_per_block"] + 1) + layer_in_block_id = i % (config["layers_per_block"] + 1) + output_block_layers = [shave_segments(name, 2) for name in output_blocks[i]] + output_block_list = {} + + for layer in output_block_layers: + layer_id, layer_name = layer.split(".")[0], shave_segments(layer, 1) + if layer_id in output_block_list: + output_block_list[layer_id].append(layer_name) + else: + output_block_list[layer_id] = [layer_name] + + if len(output_block_list) > 1: + resnets = [key for key in output_blocks[i] if f"output_blocks.{i}.0" in key] + attentions = [key for key in output_blocks[i] if f"output_blocks.{i}.1" in key] + + resnet_0_paths = renew_resnet_paths(resnets) + paths = renew_resnet_paths(resnets) + + meta_path = {"old": f"output_blocks.{i}.0", "new": f"up_blocks.{block_id}.resnets.{layer_in_block_id}"} + assign_to_checkpoint( + paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config + ) + + output_block_list = {k: sorted(v) for k, v in sorted(output_block_list.items())} + if ["conv.bias", "conv.weight"] in output_block_list.values(): + index = list(output_block_list.values()).index(["conv.bias", "conv.weight"]) + new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.weight"] = unet_state_dict[ + f"output_blocks.{i}.{index}.conv.weight" + ] + new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.bias"] = unet_state_dict[ + f"output_blocks.{i}.{index}.conv.bias" + ] + + # Clear attentions as they have been attributed above. + if len(attentions) == 2: + attentions = [] + + if len(attentions): + paths = renew_attention_paths(attentions) + meta_path = { + "old": f"output_blocks.{i}.1", + "new": f"up_blocks.{block_id}.attentions.{layer_in_block_id}", + } + assign_to_checkpoint( + paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config + ) + else: + resnet_0_paths = renew_resnet_paths(output_block_layers, n_shave_prefix_segments=1) + for path in resnet_0_paths: + old_path = ".".join(["output_blocks", str(i), path["old"]]) + new_path = ".".join(["up_blocks", str(block_id), "resnets", str(layer_in_block_id), path["new"]]) + + new_checkpoint[new_path] = unet_state_dict[old_path] + + if controlnet: + # conditioning embedding + + orig_index = 0 + + new_checkpoint["controlnet_cond_embedding.conv_in.weight"] = unet_state_dict.pop( + f"input_hint_block.{orig_index}.weight" + ) + new_checkpoint["controlnet_cond_embedding.conv_in.bias"] = unet_state_dict.pop( + f"input_hint_block.{orig_index}.bias" + ) + + orig_index += 2 + + diffusers_index = 0 + + while diffusers_index < 6: + new_checkpoint[f"controlnet_cond_embedding.blocks.{diffusers_index}.weight"] = unet_state_dict.pop( + f"input_hint_block.{orig_index}.weight" + ) + new_checkpoint[f"controlnet_cond_embedding.blocks.{diffusers_index}.bias"] = unet_state_dict.pop( + f"input_hint_block.{orig_index}.bias" + ) + diffusers_index += 1 + orig_index += 2 + + new_checkpoint["controlnet_cond_embedding.conv_out.weight"] = unet_state_dict.pop( + f"input_hint_block.{orig_index}.weight" + ) + new_checkpoint["controlnet_cond_embedding.conv_out.bias"] = unet_state_dict.pop( + f"input_hint_block.{orig_index}.bias" + ) + + # down blocks + for i in range(num_input_blocks): + new_checkpoint[f"controlnet_down_blocks.{i}.weight"] = unet_state_dict.pop(f"zero_convs.{i}.0.weight") + new_checkpoint[f"controlnet_down_blocks.{i}.bias"] = unet_state_dict.pop(f"zero_convs.{i}.0.bias") + + # mid block + new_checkpoint["controlnet_mid_block.weight"] = unet_state_dict.pop("middle_block_out.0.weight") + new_checkpoint["controlnet_mid_block.bias"] = unet_state_dict.pop("middle_block_out.0.bias") + + return new_checkpoint + + +def convert_ldm_vae_checkpoint(checkpoint, config): + # extract state dict for VAE + vae_state_dict = {} + keys = list(checkpoint.keys()) + vae_key = "first_stage_model." if any(k.startswith("first_stage_model.") for k in keys) else "" + for key in keys: + if key.startswith(vae_key): + vae_state_dict[key.replace(vae_key, "")] = checkpoint.get(key) + + new_checkpoint = {} + + new_checkpoint["encoder.conv_in.weight"] = vae_state_dict["encoder.conv_in.weight"] + new_checkpoint["encoder.conv_in.bias"] = vae_state_dict["encoder.conv_in.bias"] + new_checkpoint["encoder.conv_out.weight"] = vae_state_dict["encoder.conv_out.weight"] + new_checkpoint["encoder.conv_out.bias"] = vae_state_dict["encoder.conv_out.bias"] + new_checkpoint["encoder.conv_norm_out.weight"] = vae_state_dict["encoder.norm_out.weight"] + new_checkpoint["encoder.conv_norm_out.bias"] = vae_state_dict["encoder.norm_out.bias"] + + new_checkpoint["decoder.conv_in.weight"] = vae_state_dict["decoder.conv_in.weight"] + new_checkpoint["decoder.conv_in.bias"] = vae_state_dict["decoder.conv_in.bias"] + new_checkpoint["decoder.conv_out.weight"] = vae_state_dict["decoder.conv_out.weight"] + new_checkpoint["decoder.conv_out.bias"] = vae_state_dict["decoder.conv_out.bias"] + new_checkpoint["decoder.conv_norm_out.weight"] = vae_state_dict["decoder.norm_out.weight"] + new_checkpoint["decoder.conv_norm_out.bias"] = vae_state_dict["decoder.norm_out.bias"] + + new_checkpoint["quant_conv.weight"] = vae_state_dict["quant_conv.weight"] + new_checkpoint["quant_conv.bias"] = vae_state_dict["quant_conv.bias"] + new_checkpoint["post_quant_conv.weight"] = vae_state_dict["post_quant_conv.weight"] + new_checkpoint["post_quant_conv.bias"] = vae_state_dict["post_quant_conv.bias"] + + # Retrieves the keys for the encoder down blocks only + num_down_blocks = len({".".join(layer.split(".")[:3]) for layer in vae_state_dict if "encoder.down" in layer}) + down_blocks = { + layer_id: [key for key in vae_state_dict if f"down.{layer_id}" in key] for layer_id in range(num_down_blocks) + } + + # Retrieves the keys for the decoder up blocks only + num_up_blocks = len({".".join(layer.split(".")[:3]) for layer in vae_state_dict if "decoder.up" in layer}) + up_blocks = { + layer_id: [key for key in vae_state_dict if f"up.{layer_id}" in key] for layer_id in range(num_up_blocks) + } + + for i in range(num_down_blocks): + resnets = [key for key in down_blocks[i] if f"down.{i}" in key and f"down.{i}.downsample" not in key] + + if f"encoder.down.{i}.downsample.conv.weight" in vae_state_dict: + new_checkpoint[f"encoder.down_blocks.{i}.downsamplers.0.conv.weight"] = vae_state_dict.pop( + f"encoder.down.{i}.downsample.conv.weight" + ) + new_checkpoint[f"encoder.down_blocks.{i}.downsamplers.0.conv.bias"] = vae_state_dict.pop( + f"encoder.down.{i}.downsample.conv.bias" + ) + + paths = renew_vae_resnet_paths(resnets) + meta_path = {"old": f"down.{i}.block", "new": f"down_blocks.{i}.resnets"} + assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) + + mid_resnets = [key for key in vae_state_dict if "encoder.mid.block" in key] + num_mid_res_blocks = 2 + for i in range(1, num_mid_res_blocks + 1): + resnets = [key for key in mid_resnets if f"encoder.mid.block_{i}" in key] + + paths = renew_vae_resnet_paths(resnets) + meta_path = {"old": f"mid.block_{i}", "new": f"mid_block.resnets.{i - 1}"} + assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) + + mid_attentions = [key for key in vae_state_dict if "encoder.mid.attn" in key] + paths = renew_vae_attention_paths(mid_attentions) + meta_path = {"old": "mid.attn_1", "new": "mid_block.attentions.0"} + assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) + conv_attn_to_linear(new_checkpoint) + + for i in range(num_up_blocks): + block_id = num_up_blocks - 1 - i + resnets = [ + key for key in up_blocks[block_id] if f"up.{block_id}" in key and f"up.{block_id}.upsample" not in key + ] + + if f"decoder.up.{block_id}.upsample.conv.weight" in vae_state_dict: + new_checkpoint[f"decoder.up_blocks.{i}.upsamplers.0.conv.weight"] = vae_state_dict[ + f"decoder.up.{block_id}.upsample.conv.weight" + ] + new_checkpoint[f"decoder.up_blocks.{i}.upsamplers.0.conv.bias"] = vae_state_dict[ + f"decoder.up.{block_id}.upsample.conv.bias" + ] + + paths = renew_vae_resnet_paths(resnets) + meta_path = {"old": f"up.{block_id}.block", "new": f"up_blocks.{i}.resnets"} + assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) + + mid_resnets = [key for key in vae_state_dict if "decoder.mid.block" in key] + num_mid_res_blocks = 2 + for i in range(1, num_mid_res_blocks + 1): + resnets = [key for key in mid_resnets if f"decoder.mid.block_{i}" in key] + + paths = renew_vae_resnet_paths(resnets) + meta_path = {"old": f"mid.block_{i}", "new": f"mid_block.resnets.{i - 1}"} + assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) + + mid_attentions = [key for key in vae_state_dict if "decoder.mid.attn" in key] + paths = renew_vae_attention_paths(mid_attentions) + meta_path = {"old": "mid.attn_1", "new": "mid_block.attentions.0"} + assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) + conv_attn_to_linear(new_checkpoint) + return new_checkpoint + + +def convert_ldm_bert_checkpoint(checkpoint, config): + def _copy_attn_layer(hf_attn_layer, pt_attn_layer): + hf_attn_layer.q_proj.weight.data = pt_attn_layer.to_q.weight + hf_attn_layer.k_proj.weight.data = pt_attn_layer.to_k.weight + hf_attn_layer.v_proj.weight.data = pt_attn_layer.to_v.weight + + hf_attn_layer.out_proj.weight = pt_attn_layer.to_out.weight + hf_attn_layer.out_proj.bias = pt_attn_layer.to_out.bias + + def _copy_linear(hf_linear, pt_linear): + hf_linear.weight = pt_linear.weight + hf_linear.bias = pt_linear.bias + + def _copy_layer(hf_layer, pt_layer): + # copy layer norms + _copy_linear(hf_layer.self_attn_layer_norm, pt_layer[0][0]) + _copy_linear(hf_layer.final_layer_norm, pt_layer[1][0]) + + # copy attn + _copy_attn_layer(hf_layer.self_attn, pt_layer[0][1]) + + # copy MLP + pt_mlp = pt_layer[1][1] + _copy_linear(hf_layer.fc1, pt_mlp.net[0][0]) + _copy_linear(hf_layer.fc2, pt_mlp.net[2]) + + def _copy_layers(hf_layers, pt_layers): + for i, hf_layer in enumerate(hf_layers): + if i != 0: + i += i + pt_layer = pt_layers[i : i + 2] + _copy_layer(hf_layer, pt_layer) + + hf_model = LDMBertModel(config).eval() + + # copy embeds + hf_model.model.embed_tokens.weight = checkpoint.transformer.token_emb.weight + hf_model.model.embed_positions.weight.data = checkpoint.transformer.pos_emb.emb.weight + + # copy layer norm + _copy_linear(hf_model.model.layer_norm, checkpoint.transformer.norm) + + # copy hidden layers + _copy_layers(hf_model.model.layers, checkpoint.transformer.attn_layers.layers) + + _copy_linear(hf_model.to_logits, checkpoint.transformer.to_logits) + + return hf_model + + +def convert_ldm_clip_checkpoint(checkpoint, local_files_only=False, text_encoder=None): + if text_encoder is None: + config_name = "openai/clip-vit-large-patch14" + try: + config = CLIPTextConfig.from_pretrained(config_name, local_files_only=local_files_only) + except Exception: + raise ValueError( + f"With local_files_only set to {local_files_only}, you must first locally save the configuration in the following path: 'openai/clip-vit-large-patch14'." + ) + + ctx = init_empty_weights if is_accelerate_available() else nullcontext + with ctx(): + text_model = CLIPTextModel(config) + else: + text_model = text_encoder + + keys = list(checkpoint.keys()) + + text_model_dict = {} + + remove_prefixes = ["cond_stage_model.transformer", "conditioner.embedders.0.transformer"] + + for key in keys: + for prefix in remove_prefixes: + if key.startswith(prefix): + text_model_dict[key[len(prefix + ".") :]] = checkpoint[key] + + if is_accelerate_available(): + for param_name, param in text_model_dict.items(): + set_module_tensor_to_device(text_model, param_name, "cpu", value=param) + else: + if not (hasattr(text_model, "embeddings") and hasattr(text_model.embeddings.position_ids)): + text_model_dict.pop("text_model.embeddings.position_ids", None) + + text_model.load_state_dict(text_model_dict) + + return text_model + + +textenc_conversion_lst = [ + ("positional_embedding", "text_model.embeddings.position_embedding.weight"), + ("token_embedding.weight", "text_model.embeddings.token_embedding.weight"), + ("ln_final.weight", "text_model.final_layer_norm.weight"), + ("ln_final.bias", "text_model.final_layer_norm.bias"), + ("text_projection", "text_projection.weight"), +] +textenc_conversion_map = {x[0]: x[1] for x in textenc_conversion_lst} + +textenc_transformer_conversion_lst = [ + # (stable-diffusion, HF Diffusers) + ("resblocks.", "text_model.encoder.layers."), + ("ln_1", "layer_norm1"), + ("ln_2", "layer_norm2"), + (".c_fc.", ".fc1."), + (".c_proj.", ".fc2."), + (".attn", ".self_attn"), + ("ln_final.", "transformer.text_model.final_layer_norm."), + ("token_embedding.weight", "transformer.text_model.embeddings.token_embedding.weight"), + ("positional_embedding", "transformer.text_model.embeddings.position_embedding.weight"), +] +protected = {re.escape(x[0]): x[1] for x in textenc_transformer_conversion_lst} +textenc_pattern = re.compile("|".join(protected.keys())) + + +def convert_paint_by_example_checkpoint(checkpoint, local_files_only=False): + config = CLIPVisionConfig.from_pretrained("openai/clip-vit-large-patch14", local_files_only=local_files_only) + model = PaintByExampleImageEncoder(config) + + keys = list(checkpoint.keys()) + + text_model_dict = {} + + for key in keys: + if key.startswith("cond_stage_model.transformer"): + text_model_dict[key[len("cond_stage_model.transformer.") :]] = checkpoint[key] + + # load clip vision + model.model.load_state_dict(text_model_dict) + + # load mapper + keys_mapper = { + k[len("cond_stage_model.mapper.res") :]: v + for k, v in checkpoint.items() + if k.startswith("cond_stage_model.mapper") + } + + MAPPING = { + "attn.c_qkv": ["attn1.to_q", "attn1.to_k", "attn1.to_v"], + "attn.c_proj": ["attn1.to_out.0"], + "ln_1": ["norm1"], + "ln_2": ["norm3"], + "mlp.c_fc": ["ff.net.0.proj"], + "mlp.c_proj": ["ff.net.2"], + } + + mapped_weights = {} + for key, value in keys_mapper.items(): + prefix = key[: len("blocks.i")] + suffix = key.split(prefix)[-1].split(".")[-1] + name = key.split(prefix)[-1].split(suffix)[0][1:-1] + mapped_names = MAPPING[name] + + num_splits = len(mapped_names) + for i, mapped_name in enumerate(mapped_names): + new_name = ".".join([prefix, mapped_name, suffix]) + shape = value.shape[0] // num_splits + mapped_weights[new_name] = value[i * shape : (i + 1) * shape] + + model.mapper.load_state_dict(mapped_weights) + + # load final layer norm + model.final_layer_norm.load_state_dict( + { + "bias": checkpoint["cond_stage_model.final_ln.bias"], + "weight": checkpoint["cond_stage_model.final_ln.weight"], + } + ) + + # load final proj + model.proj_out.load_state_dict( + { + "bias": checkpoint["proj_out.bias"], + "weight": checkpoint["proj_out.weight"], + } + ) + + # load uncond vector + model.uncond_vector.data = torch.nn.Parameter(checkpoint["learnable_vector"]) + return model + + +def convert_open_clip_checkpoint( + checkpoint, + config_name, + prefix="cond_stage_model.model.", + has_projection=False, + local_files_only=False, + **config_kwargs, +): + # text_model = CLIPTextModel.from_pretrained("stabilityai/stable-diffusion-2", subfolder="text_encoder") + # text_model = CLIPTextModelWithProjection.from_pretrained( + # "laion/CLIP-ViT-bigG-14-laion2B-39B-b160k", projection_dim=1280 + # ) + try: + config = CLIPTextConfig.from_pretrained(config_name, **config_kwargs, local_files_only=local_files_only) + except Exception: + raise ValueError( + f"With local_files_only set to {local_files_only}, you must first locally save the configuration in the following path: '{config_name}'." + ) + + ctx = init_empty_weights if is_accelerate_available() else nullcontext + with ctx(): + text_model = CLIPTextModelWithProjection(config) if has_projection else CLIPTextModel(config) + + keys = list(checkpoint.keys()) + + keys_to_ignore = [] + if config_name == "stabilityai/stable-diffusion-2" and config.num_hidden_layers == 23: + # make sure to remove all keys > 22 + keys_to_ignore += [k for k in keys if k.startswith("cond_stage_model.model.transformer.resblocks.23")] + keys_to_ignore += ["cond_stage_model.model.text_projection"] + + text_model_dict = {} + + if prefix + "text_projection" in checkpoint: + d_model = int(checkpoint[prefix + "text_projection"].shape[0]) + else: + d_model = 1024 + + text_model_dict["text_model.embeddings.position_ids"] = text_model.text_model.embeddings.get_buffer("position_ids") + + for key in keys: + if key in keys_to_ignore: + continue + if key[len(prefix) :] in textenc_conversion_map: + if key.endswith("text_projection"): + value = checkpoint[key].T.contiguous() + else: + value = checkpoint[key] + + text_model_dict[textenc_conversion_map[key[len(prefix) :]]] = value + + if key.startswith(prefix + "transformer."): + new_key = key[len(prefix + "transformer.") :] + if new_key.endswith(".in_proj_weight"): + new_key = new_key[: -len(".in_proj_weight")] + new_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], new_key) + text_model_dict[new_key + ".q_proj.weight"] = checkpoint[key][:d_model, :] + text_model_dict[new_key + ".k_proj.weight"] = checkpoint[key][d_model : d_model * 2, :] + text_model_dict[new_key + ".v_proj.weight"] = checkpoint[key][d_model * 2 :, :] + elif new_key.endswith(".in_proj_bias"): + new_key = new_key[: -len(".in_proj_bias")] + new_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], new_key) + text_model_dict[new_key + ".q_proj.bias"] = checkpoint[key][:d_model] + text_model_dict[new_key + ".k_proj.bias"] = checkpoint[key][d_model : d_model * 2] + text_model_dict[new_key + ".v_proj.bias"] = checkpoint[key][d_model * 2 :] + else: + new_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], new_key) + + text_model_dict[new_key] = checkpoint[key] + + if is_accelerate_available(): + for param_name, param in text_model_dict.items(): + set_module_tensor_to_device(text_model, param_name, "cpu", value=param) + else: + if not (hasattr(text_model, "embeddings") and hasattr(text_model.embeddings.position_ids)): + text_model_dict.pop("text_model.embeddings.position_ids", None) + + text_model.load_state_dict(text_model_dict) + + return text_model + + +def stable_unclip_image_encoder(original_config, local_files_only=False): + """ + Returns the image processor and clip image encoder for the img2img unclip pipeline. + + We currently know of two types of stable unclip models which separately use the clip and the openclip image + encoders. + """ + + image_embedder_config = original_config["model"]["params"]["embedder_config"] + + sd_clip_image_embedder_class = image_embedder_config["target"] + sd_clip_image_embedder_class = sd_clip_image_embedder_class.split(".")[-1] + + if sd_clip_image_embedder_class == "ClipImageEmbedder": + clip_model_name = image_embedder_config.params.model + + if clip_model_name == "ViT-L/14": + feature_extractor = CLIPImageProcessor() + image_encoder = CLIPVisionModelWithProjection.from_pretrained( + "openai/clip-vit-large-patch14", local_files_only=local_files_only + ) + else: + raise NotImplementedError(f"Unknown CLIP checkpoint name in stable diffusion checkpoint {clip_model_name}") + + elif sd_clip_image_embedder_class == "FrozenOpenCLIPImageEmbedder": + feature_extractor = CLIPImageProcessor() + image_encoder = CLIPVisionModelWithProjection.from_pretrained( + "laion/CLIP-ViT-H-14-laion2B-s32B-b79K", local_files_only=local_files_only + ) + else: + raise NotImplementedError( + f"Unknown CLIP image embedder class in stable diffusion checkpoint {sd_clip_image_embedder_class}" + ) + + return feature_extractor, image_encoder + + +def stable_unclip_image_noising_components( + original_config, clip_stats_path: Optional[str] = None, device: Optional[str] = None +): + """ + Returns the noising components for the img2img and txt2img unclip pipelines. + + Converts the stability noise augmentor into + 1. a `StableUnCLIPImageNormalizer` for holding the CLIP stats + 2. a `DDPMScheduler` for holding the noise schedule + + If the noise augmentor config specifies a clip stats path, the `clip_stats_path` must be provided. + """ + noise_aug_config = original_config["model"]["params"]["noise_aug_config"] + noise_aug_class = noise_aug_config["target"] + noise_aug_class = noise_aug_class.split(".")[-1] + + if noise_aug_class == "CLIPEmbeddingNoiseAugmentation": + noise_aug_config = noise_aug_config.params + embedding_dim = noise_aug_config.timestep_dim + max_noise_level = noise_aug_config.noise_schedule_config.timesteps + beta_schedule = noise_aug_config.noise_schedule_config.beta_schedule + + image_normalizer = StableUnCLIPImageNormalizer(embedding_dim=embedding_dim) + image_noising_scheduler = DDPMScheduler(num_train_timesteps=max_noise_level, beta_schedule=beta_schedule) + + if "clip_stats_path" in noise_aug_config: + if clip_stats_path is None: + raise ValueError("This stable unclip config requires a `clip_stats_path`") + + clip_mean, clip_std = torch.load(clip_stats_path, map_location=device) + clip_mean = clip_mean[None, :] + clip_std = clip_std[None, :] + + clip_stats_state_dict = { + "mean": clip_mean, + "std": clip_std, + } + + image_normalizer.load_state_dict(clip_stats_state_dict) + else: + raise NotImplementedError(f"Unknown noise augmentor class: {noise_aug_class}") + + return image_normalizer, image_noising_scheduler + + +def convert_controlnet_checkpoint( + checkpoint, + original_config, + checkpoint_path, + image_size, + upcast_attention, + extract_ema, + use_linear_projection=None, + cross_attention_dim=None, +): + ctrlnet_config = create_unet_diffusers_config(original_config, image_size=image_size, controlnet=True) + ctrlnet_config["upcast_attention"] = upcast_attention + + ctrlnet_config.pop("sample_size") + + if use_linear_projection is not None: + ctrlnet_config["use_linear_projection"] = use_linear_projection + + if cross_attention_dim is not None: + ctrlnet_config["cross_attention_dim"] = cross_attention_dim + + ctx = init_empty_weights if is_accelerate_available() else nullcontext + with ctx(): + controlnet = ControlNetModel(**ctrlnet_config) + + # Some controlnet ckpt files are distributed independently from the rest of the + # model components i.e. https://huggingface.co/thibaud/controlnet-sd21/ + if "time_embed.0.weight" in checkpoint: + skip_extract_state_dict = True + else: + skip_extract_state_dict = False + + converted_ctrl_checkpoint = convert_ldm_unet_checkpoint( + checkpoint, + ctrlnet_config, + path=checkpoint_path, + extract_ema=extract_ema, + controlnet=True, + skip_extract_state_dict=skip_extract_state_dict, + ) + + if is_accelerate_available(): + for param_name, param in converted_ctrl_checkpoint.items(): + set_module_tensor_to_device(controlnet, param_name, "cpu", value=param) + else: + controlnet.load_state_dict(converted_ctrl_checkpoint) + + return controlnet + + +def download_from_original_stable_diffusion_ckpt( + checkpoint_path_or_dict: Union[str, Dict[str, torch.Tensor]], + original_config_file: str = None, + image_size: Optional[int] = None, + prediction_type: str = None, + model_type: str = None, + extract_ema: bool = False, + scheduler_type: str = "pndm", + num_in_channels: Optional[int] = None, + upcast_attention: Optional[bool] = None, + device: str = None, + from_safetensors: bool = False, + stable_unclip: Optional[str] = None, + stable_unclip_prior: Optional[str] = None, + clip_stats_path: Optional[str] = None, + controlnet: Optional[bool] = None, + adapter: Optional[bool] = None, + load_safety_checker: bool = True, + safety_checker: Optional[StableDiffusionSafetyChecker] = None, + feature_extractor: Optional[AutoFeatureExtractor] = None, + pipeline_class: DiffusionPipeline = None, + local_files_only=False, + vae_path=None, + vae=None, + text_encoder=None, + text_encoder_2=None, + tokenizer=None, + tokenizer_2=None, + config_files=None, +) -> DiffusionPipeline: + """ + Load a Stable Diffusion pipeline object from a CompVis-style `.ckpt`/`.safetensors` file and (ideally) a `.yaml` + config file. + + Although many of the arguments can be automatically inferred, some of these rely on brittle checks against the + global step count, which will likely fail for models that have undergone further fine-tuning. Therefore, it is + recommended that you override the default values and/or supply an `original_config_file` wherever possible. + + Args: + checkpoint_path_or_dict (`str` or `dict`): Path to `.ckpt` file, or the state dict. + original_config_file (`str`): + Path to `.yaml` config file corresponding to the original architecture. If `None`, will be automatically + inferred by looking for a key that only exists in SD2.0 models. + image_size (`int`, *optional*, defaults to 512): + The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Diffusion v2 + Base. Use 768 for Stable Diffusion v2. + prediction_type (`str`, *optional*): + The prediction type that the model was trained on. Use `'epsilon'` for Stable Diffusion v1.X and Stable + Diffusion v2 Base. Use `'v_prediction'` for Stable Diffusion v2. + num_in_channels (`int`, *optional*, defaults to None): + The number of input channels. If `None`, it will be automatically inferred. + scheduler_type (`str`, *optional*, defaults to 'pndm'): + Type of scheduler to use. Should be one of `["pndm", "lms", "heun", "euler", "euler-ancestral", "dpm", + "ddim"]`. + model_type (`str`, *optional*, defaults to `None`): + The pipeline type. `None` to automatically infer, or one of `["FrozenOpenCLIPEmbedder", + "FrozenCLIPEmbedder", "PaintByExample"]`. + is_img2img (`bool`, *optional*, defaults to `False`): + Whether the model should be loaded as an img2img pipeline. + extract_ema (`bool`, *optional*, defaults to `False`): Only relevant for + checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights or not. Defaults to + `False`. Pass `True` to extract the EMA weights. EMA weights usually yield higher quality images for + inference. Non-EMA weights are usually better to continue fine-tuning. + upcast_attention (`bool`, *optional*, defaults to `None`): + Whether the attention computation should always be upcasted. This is necessary when running stable + diffusion 2.1. + device (`str`, *optional*, defaults to `None`): + The device to use. Pass `None` to determine automatically. + from_safetensors (`str`, *optional*, defaults to `False`): + If `checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch. + load_safety_checker (`bool`, *optional*, defaults to `True`): + Whether to load the safety checker or not. Defaults to `True`. + safety_checker (`StableDiffusionSafetyChecker`, *optional*, defaults to `None`): + Safety checker to use. If this parameter is `None`, the function will load a new instance of + [StableDiffusionSafetyChecker] by itself, if needed. + feature_extractor (`AutoFeatureExtractor`, *optional*, defaults to `None`): + Feature extractor to use. If this parameter is `None`, the function will load a new instance of + [AutoFeatureExtractor] by itself, if needed. + pipeline_class (`str`, *optional*, defaults to `None`): + The pipeline class to use. Pass `None` to determine automatically. + local_files_only (`bool`, *optional*, defaults to `False`): + Whether or not to only look at local files (i.e., do not try to download the model). + vae (`AutoencoderKL`, *optional*, defaults to `None`): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. If + this parameter is `None`, the function will load a new instance of [CLIP] by itself, if needed. + text_encoder (`CLIPTextModel`, *optional*, defaults to `None`): + An instance of [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel) + to use, specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) + variant. If this parameter is `None`, the function will load a new instance of [CLIP] by itself, if needed. + tokenizer (`CLIPTokenizer`, *optional*, defaults to `None`): + An instance of + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer) + to use. If this parameter is `None`, the function will load a new instance of [CLIPTokenizer] by itself, if + needed. + config_files (`Dict[str, str]`, *optional*, defaults to `None`): + A dictionary mapping from config file names to their contents. If this parameter is `None`, the function + will load the config files by itself, if needed. Valid keys are: + - `v1`: Config file for Stable Diffusion v1 + - `v2`: Config file for Stable Diffusion v2 + - `xl`: Config file for Stable Diffusion XL + - `xl_refiner`: Config file for Stable Diffusion XL Refiner + return: A StableDiffusionPipeline object representing the passed-in `.ckpt`/`.safetensors` file. + """ + + # import pipelines here to avoid circular import error when using from_single_file method + from diffusers import ( + LDMTextToImagePipeline, + PaintByExamplePipeline, + StableDiffusionControlNetPipeline, + StableDiffusionInpaintPipeline, + StableDiffusionPipeline, + StableDiffusionUpscalePipeline, + StableDiffusionXLControlNetInpaintPipeline, + StableDiffusionXLImg2ImgPipeline, + StableDiffusionXLInpaintPipeline, + StableDiffusionXLPipeline, + StableUnCLIPImg2ImgPipeline, + StableUnCLIPPipeline, + ) + + if prediction_type == "v-prediction": + prediction_type = "v_prediction" + + if isinstance(checkpoint_path_or_dict, str): + if from_safetensors: + from safetensors.torch import load_file as safe_load + + checkpoint = safe_load(checkpoint_path_or_dict, device="cpu") + else: + if device is None: + device = "cuda" if torch.cuda.is_available() else "cpu" + checkpoint = torch.load(checkpoint_path_or_dict, map_location=device) + else: + checkpoint = torch.load(checkpoint_path_or_dict, map_location=device) + elif isinstance(checkpoint_path_or_dict, dict): + checkpoint = checkpoint_path_or_dict + + # Sometimes models don't have the global_step item + if "global_step" in checkpoint: + global_step = checkpoint["global_step"] + else: + logger.debug("global_step key not found in model") + global_step = None + + # NOTE: this while loop isn't great but this controlnet checkpoint has one additional + # "state_dict" key https://huggingface.co/thibaud/controlnet-canny-sd21 + while "state_dict" in checkpoint: + checkpoint = checkpoint["state_dict"] + + if original_config_file is None: + key_name_v2_1 = "model.diffusion_model.input_blocks.2.1.transformer_blocks.0.attn2.to_k.weight" + key_name_sd_xl_base = "conditioner.embedders.1.model.transformer.resblocks.9.mlp.c_proj.bias" + key_name_sd_xl_refiner = "conditioner.embedders.0.model.transformer.resblocks.9.mlp.c_proj.bias" + is_upscale = pipeline_class == StableDiffusionUpscalePipeline + + config_url = None + + # model_type = "v1" + if config_files is not None and "v1" in config_files: + original_config_file = config_files["v1"] + else: + config_url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" + + if key_name_v2_1 in checkpoint and checkpoint[key_name_v2_1].shape[-1] == 1024: + # model_type = "v2" + if config_files is not None and "v2" in config_files: + original_config_file = config_files["v2"] + else: + config_url = "https://raw.githubusercontent.com/Stability-AI/stablediffusion/main/configs/stable-diffusion/v2-inference-v.yaml" + if global_step == 110000: + # v2.1 needs to upcast attention + upcast_attention = True + elif key_name_sd_xl_base in checkpoint: + # only base xl has two text embedders + if config_files is not None and "xl" in config_files: + original_config_file = config_files["xl"] + else: + config_url = "https://raw.githubusercontent.com/Stability-AI/generative-models/main/configs/inference/sd_xl_base.yaml" + elif key_name_sd_xl_refiner in checkpoint: + # only refiner xl has embedder and one text embedders + if config_files is not None and "xl_refiner" in config_files: + original_config_file = config_files["xl_refiner"] + else: + config_url = "https://raw.githubusercontent.com/Stability-AI/generative-models/main/configs/inference/sd_xl_refiner.yaml" + + if is_upscale: + config_url = "https://raw.githubusercontent.com/Stability-AI/stablediffusion/main/configs/stable-diffusion/x4-upscaling.yaml" + + if config_url is not None: + original_config_file = BytesIO(requests.get(config_url).content) + else: + with open(original_config_file, "r") as f: + original_config_file = f.read() + else: + with open(original_config_file, "r") as f: + original_config_file = f.read() + + original_config = yaml.safe_load(original_config_file) + + # Convert the text model. + if ( + model_type is None + and "cond_stage_config" in original_config["model"]["params"] + and original_config["model"]["params"]["cond_stage_config"] is not None + ): + model_type = original_config["model"]["params"]["cond_stage_config"]["target"].split(".")[-1] + logger.debug(f"no `model_type` given, `model_type` inferred as: {model_type}") + elif model_type is None and original_config["model"]["params"]["network_config"] is not None: + if original_config["model"]["params"]["network_config"]["params"]["context_dim"] == 2048: + model_type = "SDXL" + else: + model_type = "SDXL-Refiner" + if image_size is None: + image_size = 1024 + + if pipeline_class is None: + # Check if we have a SDXL or SD model and initialize default pipeline + if model_type not in ["SDXL", "SDXL-Refiner"]: + pipeline_class = StableDiffusionPipeline if not controlnet else StableDiffusionControlNetPipeline + else: + pipeline_class = StableDiffusionXLPipeline if model_type == "SDXL" else StableDiffusionXLImg2ImgPipeline + + if num_in_channels is None and pipeline_class in [ + StableDiffusionInpaintPipeline, + StableDiffusionXLInpaintPipeline, + StableDiffusionXLControlNetInpaintPipeline, + ]: + num_in_channels = 9 + if num_in_channels is None and pipeline_class == StableDiffusionUpscalePipeline: + num_in_channels = 7 + elif num_in_channels is None: + num_in_channels = 4 + + if "unet_config" in original_config["model"]["params"]: + original_config["model"]["params"]["unet_config"]["params"]["in_channels"] = num_in_channels + elif "network_config" in original_config["model"]["params"]: + original_config["model"]["params"]["network_config"]["params"]["in_channels"] = num_in_channels + + if ( + "parameterization" in original_config["model"]["params"] + and original_config["model"]["params"]["parameterization"] == "v" + ): + if prediction_type is None: + # NOTE: For stable diffusion 2 base it is recommended to pass `prediction_type=="epsilon"` + # as it relies on a brittle global step parameter here + prediction_type = "epsilon" if global_step == 875000 else "v_prediction" + if image_size is None: + # NOTE: For stable diffusion 2 base one has to pass `image_size==512` + # as it relies on a brittle global step parameter here + image_size = 512 if global_step == 875000 else 768 + else: + if prediction_type is None: + prediction_type = "epsilon" + if image_size is None: + image_size = 512 + + if controlnet is None and "control_stage_config" in original_config["model"]["params"]: + path = checkpoint_path_or_dict if isinstance(checkpoint_path_or_dict, str) else "" + controlnet = convert_controlnet_checkpoint( + checkpoint, original_config, path, image_size, upcast_attention, extract_ema + ) + + if "timesteps" in original_config["model"]["params"]: + num_train_timesteps = original_config["model"]["params"]["timesteps"] + else: + num_train_timesteps = 1000 + + if model_type in ["SDXL", "SDXL-Refiner"]: + scheduler_dict = { + "beta_schedule": "scaled_linear", + "beta_start": 0.00085, + "beta_end": 0.012, + "interpolation_type": "linear", + "num_train_timesteps": num_train_timesteps, + "prediction_type": "epsilon", + "sample_max_value": 1.0, + "set_alpha_to_one": False, + "skip_prk_steps": True, + "steps_offset": 1, + "timestep_spacing": "leading", + } + scheduler = EulerDiscreteScheduler.from_config(scheduler_dict) + scheduler_type = "euler" + else: + if "linear_start" in original_config["model"]["params"]: + beta_start = original_config["model"]["params"]["linear_start"] + else: + beta_start = 0.02 + + if "linear_end" in original_config["model"]["params"]: + beta_end = original_config["model"]["params"]["linear_end"] + else: + beta_end = 0.085 + scheduler = DDIMScheduler( + beta_end=beta_end, + beta_schedule="scaled_linear", + beta_start=beta_start, + num_train_timesteps=num_train_timesteps, + steps_offset=1, + clip_sample=False, + set_alpha_to_one=False, + prediction_type=prediction_type, + ) + # make sure scheduler works correctly with DDIM + scheduler.register_to_config(clip_sample=False) + + if scheduler_type == "pndm": + config = dict(scheduler.config) + config["skip_prk_steps"] = True + scheduler = PNDMScheduler.from_config(config) + elif scheduler_type == "lms": + scheduler = LMSDiscreteScheduler.from_config(scheduler.config) + elif scheduler_type == "heun": + scheduler = HeunDiscreteScheduler.from_config(scheduler.config) + elif scheduler_type == "euler": + scheduler = EulerDiscreteScheduler.from_config(scheduler.config) + elif scheduler_type == "euler-ancestral": + scheduler = EulerAncestralDiscreteScheduler.from_config(scheduler.config) + elif scheduler_type == "dpm": + scheduler = DPMSolverMultistepScheduler.from_config(scheduler.config) + elif scheduler_type == "ddim": + scheduler = scheduler + else: + raise ValueError(f"Scheduler of type {scheduler_type} doesn't exist!") + + if pipeline_class == StableDiffusionUpscalePipeline: + image_size = original_config["model"]["params"]["unet_config"]["params"]["image_size"] + + # Convert the UNet2DConditionModel model. + unet_config = create_unet_diffusers_config(original_config, image_size=image_size) + unet_config["upcast_attention"] = upcast_attention + + path = checkpoint_path_or_dict if isinstance(checkpoint_path_or_dict, str) else "" + converted_unet_checkpoint = convert_ldm_unet_checkpoint( + checkpoint, unet_config, path=path, extract_ema=extract_ema + ) + + ctx = init_empty_weights if is_accelerate_available() else nullcontext + with ctx(): + unet = UNet2DConditionModel(**unet_config) + + if is_accelerate_available(): + if model_type not in ["SDXL", "SDXL-Refiner"]: # SBM Delay this. + for param_name, param in converted_unet_checkpoint.items(): + set_module_tensor_to_device(unet, param_name, "cpu", value=param) + else: + unet.load_state_dict(converted_unet_checkpoint) + + # Convert the VAE model. + if vae_path is None and vae is None: + vae_config = create_vae_diffusers_config(original_config, image_size=image_size) + converted_vae_checkpoint = convert_ldm_vae_checkpoint(checkpoint, vae_config) + + if ( + "model" in original_config + and "params" in original_config["model"] + and "scale_factor" in original_config["model"]["params"] + ): + vae_scaling_factor = original_config["model"]["params"]["scale_factor"] + else: + vae_scaling_factor = 0.18215 # default SD scaling factor + + vae_config["scaling_factor"] = vae_scaling_factor + + ctx = init_empty_weights if is_accelerate_available() else nullcontext + with ctx(): + vae = AutoencoderKL(**vae_config) + + if is_accelerate_available(): + for param_name, param in converted_vae_checkpoint.items(): + set_module_tensor_to_device(vae, param_name, "cpu", value=param) + else: + vae.load_state_dict(converted_vae_checkpoint) + elif vae is None: + vae = AutoencoderKL.from_pretrained(vae_path, local_files_only=local_files_only) + + if model_type == "FrozenOpenCLIPEmbedder": + config_name = "stabilityai/stable-diffusion-2" + config_kwargs = {"subfolder": "text_encoder"} + + if text_encoder is None: + text_model = convert_open_clip_checkpoint( + checkpoint, config_name, local_files_only=local_files_only, **config_kwargs + ) + else: + text_model = text_encoder + + try: + tokenizer = CLIPTokenizer.from_pretrained( + "stabilityai/stable-diffusion-2", subfolder="tokenizer", local_files_only=local_files_only + ) + except Exception: + raise ValueError( + f"With local_files_only set to {local_files_only}, you must first locally save the tokenizer in the following path: 'stabilityai/stable-diffusion-2'." + ) + + if stable_unclip is None: + if controlnet: + pipe = pipeline_class( + vae=vae, + text_encoder=text_model, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + controlnet=controlnet, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + ) + if hasattr(pipe, "requires_safety_checker"): + pipe.requires_safety_checker = False + + elif pipeline_class == StableDiffusionUpscalePipeline: + scheduler = DDIMScheduler.from_pretrained( + "stabilityai/stable-diffusion-x4-upscaler", subfolder="scheduler" + ) + low_res_scheduler = DDPMScheduler.from_pretrained( + "stabilityai/stable-diffusion-x4-upscaler", subfolder="low_res_scheduler" + ) + + pipe = pipeline_class( + vae=vae, + text_encoder=text_model, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + low_res_scheduler=low_res_scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + ) + + else: + pipe = pipeline_class( + vae=vae, + text_encoder=text_model, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + ) + if hasattr(pipe, "requires_safety_checker"): + pipe.requires_safety_checker = False + + else: + image_normalizer, image_noising_scheduler = stable_unclip_image_noising_components( + original_config, clip_stats_path=clip_stats_path, device=device + ) + + if stable_unclip == "img2img": + feature_extractor, image_encoder = stable_unclip_image_encoder(original_config) + + pipe = StableUnCLIPImg2ImgPipeline( + # image encoding components + feature_extractor=feature_extractor, + image_encoder=image_encoder, + # image noising components + image_normalizer=image_normalizer, + image_noising_scheduler=image_noising_scheduler, + # regular denoising components + tokenizer=tokenizer, + text_encoder=text_model, + unet=unet, + scheduler=scheduler, + # vae + vae=vae, + ) + elif stable_unclip == "txt2img": + if stable_unclip_prior is None or stable_unclip_prior == "karlo": + karlo_model = "kakaobrain/karlo-v1-alpha" + prior = PriorTransformer.from_pretrained( + karlo_model, subfolder="prior", local_files_only=local_files_only + ) + + try: + prior_tokenizer = CLIPTokenizer.from_pretrained( + "openai/clip-vit-large-patch14", local_files_only=local_files_only + ) + except Exception: + raise ValueError( + f"With local_files_only set to {local_files_only}, you must first locally save the tokenizer in the following path: 'openai/clip-vit-large-patch14'." + ) + prior_text_model = CLIPTextModelWithProjection.from_pretrained( + "openai/clip-vit-large-patch14", local_files_only=local_files_only + ) + + prior_scheduler = UnCLIPScheduler.from_pretrained( + karlo_model, subfolder="prior_scheduler", local_files_only=local_files_only + ) + prior_scheduler = DDPMScheduler.from_config(prior_scheduler.config) + else: + raise NotImplementedError(f"unknown prior for stable unclip model: {stable_unclip_prior}") + + pipe = StableUnCLIPPipeline( + # prior components + prior_tokenizer=prior_tokenizer, + prior_text_encoder=prior_text_model, + prior=prior, + prior_scheduler=prior_scheduler, + # image noising components + image_normalizer=image_normalizer, + image_noising_scheduler=image_noising_scheduler, + # regular denoising components + tokenizer=tokenizer, + text_encoder=text_model, + unet=unet, + scheduler=scheduler, + # vae + vae=vae, + ) + else: + raise NotImplementedError(f"unknown `stable_unclip` type: {stable_unclip}") + elif model_type == "PaintByExample": + vision_model = convert_paint_by_example_checkpoint(checkpoint) + try: + tokenizer = CLIPTokenizer.from_pretrained( + "openai/clip-vit-large-patch14", local_files_only=local_files_only + ) + except Exception: + raise ValueError( + f"With local_files_only set to {local_files_only}, you must first locally save the tokenizer in the following path: 'openai/clip-vit-large-patch14'." + ) + try: + feature_extractor = AutoFeatureExtractor.from_pretrained( + "CompVis/stable-diffusion-safety-checker", local_files_only=local_files_only + ) + except Exception: + raise ValueError( + f"With local_files_only set to {local_files_only}, you must first locally save the feature_extractor in the following path: 'CompVis/stable-diffusion-safety-checker'." + ) + pipe = PaintByExamplePipeline( + vae=vae, + image_encoder=vision_model, + unet=unet, + scheduler=scheduler, + safety_checker=None, + feature_extractor=feature_extractor, + ) + elif model_type == "FrozenCLIPEmbedder": + text_model = convert_ldm_clip_checkpoint( + checkpoint, local_files_only=local_files_only, text_encoder=text_encoder + ) + try: + tokenizer = ( + CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14", local_files_only=local_files_only) + if tokenizer is None + else tokenizer + ) + except Exception: + raise ValueError( + f"With local_files_only set to {local_files_only}, you must first locally save the tokenizer in the following path: 'openai/clip-vit-large-patch14'." + ) + + if load_safety_checker: + safety_checker = StableDiffusionSafetyChecker.from_pretrained( + "CompVis/stable-diffusion-safety-checker", local_files_only=local_files_only + ) + feature_extractor = AutoFeatureExtractor.from_pretrained( + "CompVis/stable-diffusion-safety-checker", local_files_only=local_files_only + ) + + if controlnet: + pipe = pipeline_class( + vae=vae, + text_encoder=text_model, + tokenizer=tokenizer, + unet=unet, + controlnet=controlnet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + ) + else: + pipe = pipeline_class( + vae=vae, + text_encoder=text_model, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + ) + elif model_type in ["SDXL", "SDXL-Refiner"]: + is_refiner = model_type == "SDXL-Refiner" + + if (is_refiner is False) and (tokenizer is None): + try: + tokenizer = CLIPTokenizer.from_pretrained( + "openai/clip-vit-large-patch14", local_files_only=local_files_only + ) + except Exception: + raise ValueError( + f"With local_files_only set to {local_files_only}, you must first locally save the tokenizer in the following path: 'openai/clip-vit-large-patch14'." + ) + + if (is_refiner is False) and (text_encoder is None): + text_encoder = convert_ldm_clip_checkpoint(checkpoint, local_files_only=local_files_only) + + if tokenizer_2 is None: + try: + tokenizer_2 = CLIPTokenizer.from_pretrained( + "laion/CLIP-ViT-bigG-14-laion2B-39B-b160k", pad_token="!", local_files_only=local_files_only + ) + except Exception: + raise ValueError( + f"With local_files_only set to {local_files_only}, you must first locally save the tokenizer in the following path: 'laion/CLIP-ViT-bigG-14-laion2B-39B-b160k' with `pad_token` set to '!'." + ) + + if text_encoder_2 is None: + config_name = "laion/CLIP-ViT-bigG-14-laion2B-39B-b160k" + config_kwargs = {"projection_dim": 1280} + prefix = "conditioner.embedders.0.model." if is_refiner else "conditioner.embedders.1.model." + + text_encoder_2 = convert_open_clip_checkpoint( + checkpoint, + config_name, + prefix=prefix, + has_projection=True, + local_files_only=local_files_only, + **config_kwargs, + ) + + if is_accelerate_available(): # SBM Now move model to cpu. + for param_name, param in converted_unet_checkpoint.items(): + set_module_tensor_to_device(unet, param_name, "cpu", value=param) + + if controlnet: + pipe = pipeline_class( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + text_encoder_2=text_encoder_2, + tokenizer_2=tokenizer_2, + unet=unet, + controlnet=controlnet, + scheduler=scheduler, + force_zeros_for_empty_prompt=True, + ) + elif adapter: + pipe = pipeline_class( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + text_encoder_2=text_encoder_2, + tokenizer_2=tokenizer_2, + unet=unet, + adapter=adapter, + scheduler=scheduler, + force_zeros_for_empty_prompt=True, + ) + + else: + pipeline_kwargs = { + "vae": vae, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "text_encoder_2": text_encoder_2, + "tokenizer_2": tokenizer_2, + "unet": unet, + "scheduler": scheduler, + } + + if (pipeline_class == StableDiffusionXLImg2ImgPipeline) or ( + pipeline_class == StableDiffusionXLInpaintPipeline + ): + pipeline_kwargs.update({"requires_aesthetics_score": is_refiner}) + + if is_refiner: + pipeline_kwargs.update({"force_zeros_for_empty_prompt": False}) + + pipe = pipeline_class(**pipeline_kwargs) + else: + text_config = create_ldm_bert_config(original_config) + text_model = convert_ldm_bert_checkpoint(checkpoint, text_config) + tokenizer = BertTokenizerFast.from_pretrained("bert-base-uncased", local_files_only=local_files_only) + pipe = LDMTextToImagePipeline(vqvae=vae, bert=text_model, tokenizer=tokenizer, unet=unet, scheduler=scheduler) + + return pipe + + +def download_controlnet_from_original_ckpt( + checkpoint_path: str, + original_config_file: str, + image_size: int = 512, + extract_ema: bool = False, + num_in_channels: Optional[int] = None, + upcast_attention: Optional[bool] = None, + device: str = None, + from_safetensors: bool = False, + use_linear_projection: Optional[bool] = None, + cross_attention_dim: Optional[bool] = None, +) -> DiffusionPipeline: + if from_safetensors: + from safetensors import safe_open + + checkpoint = {} + with safe_open(checkpoint_path, framework="pt", device="cpu") as f: + for key in f.keys(): + checkpoint[key] = f.get_tensor(key) + else: + if device is None: + device = "cuda" if torch.cuda.is_available() else "cpu" + checkpoint = torch.load(checkpoint_path, map_location=device) + else: + checkpoint = torch.load(checkpoint_path, map_location=device) + + # NOTE: this while loop isn't great but this controlnet checkpoint has one additional + # "state_dict" key https://huggingface.co/thibaud/controlnet-canny-sd21 + while "state_dict" in checkpoint: + checkpoint = checkpoint["state_dict"] + + with open(original_config_file, "r") as f: + original_config_file = f.read() + original_config = yaml.safe_load(original_config_file) + + if num_in_channels is not None: + original_config["model"]["params"]["unet_config"]["params"]["in_channels"] = num_in_channels + + if "control_stage_config" not in original_config["model"]["params"]: + raise ValueError("`control_stage_config` not present in original config") + + controlnet = convert_controlnet_checkpoint( + checkpoint, + original_config, + checkpoint_path, + image_size, + upcast_attention, + extract_ema, + use_linear_projection=use_linear_projection, + cross_attention_dim=cross_attention_dim, + ) + + return controlnet diff --git a/diffusers3/pipelines/stable_diffusion/pipeline_flax_stable_diffusion.py b/diffusers3/pipelines/stable_diffusion/pipeline_flax_stable_diffusion.py new file mode 100644 index 0000000000000000000000000000000000000000..5d6ffd463cc31387ccc5d45bb4b49446a571cef0 --- /dev/null +++ b/diffusers3/pipelines/stable_diffusion/pipeline_flax_stable_diffusion.py @@ -0,0 +1,473 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import warnings +from functools import partial +from typing import Dict, List, Optional, Union + +import jax +import jax.numpy as jnp +import numpy as np +from flax.core.frozen_dict import FrozenDict +from flax.jax_utils import unreplicate +from flax.training.common_utils import shard +from packaging import version +from PIL import Image +from transformers import CLIPImageProcessor, CLIPTokenizer, FlaxCLIPTextModel + +from ...models import FlaxAutoencoderKL, FlaxUNet2DConditionModel +from ...schedulers import ( + FlaxDDIMScheduler, + FlaxDPMSolverMultistepScheduler, + FlaxLMSDiscreteScheduler, + FlaxPNDMScheduler, +) +from ...utils import deprecate, logging, replace_example_docstring +from ..pipeline_flax_utils import FlaxDiffusionPipeline +from .pipeline_output import FlaxStableDiffusionPipelineOutput +from .safety_checker_flax import FlaxStableDiffusionSafetyChecker + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +# Set to True to use python for loop instead of jax.fori_loop for easier debugging +DEBUG = False + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import jax + >>> import numpy as np + >>> from flax.jax_utils import replicate + >>> from flax.training.common_utils import shard + + >>> from diffusers import FlaxStableDiffusionPipeline + + >>> pipeline, params = FlaxStableDiffusionPipeline.from_pretrained( + ... "runwayml/stable-diffusion-v1-5", variant="bf16", dtype=jax.numpy.bfloat16 + ... ) + + >>> prompt = "a photo of an astronaut riding a horse on mars" + + >>> prng_seed = jax.random.PRNGKey(0) + >>> num_inference_steps = 50 + + >>> num_samples = jax.device_count() + >>> prompt = num_samples * [prompt] + >>> prompt_ids = pipeline.prepare_inputs(prompt) + # shard inputs and rng + + >>> params = replicate(params) + >>> prng_seed = jax.random.split(prng_seed, jax.device_count()) + >>> prompt_ids = shard(prompt_ids) + + >>> images = pipeline(prompt_ids, params, prng_seed, num_inference_steps, jit=True).images + >>> images = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:]))) + ``` +""" + + +class FlaxStableDiffusionPipeline(FlaxDiffusionPipeline): + r""" + Flax-based pipeline for text-to-image generation using Stable Diffusion. + + This model inherits from [`FlaxDiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + Args: + vae ([`FlaxAutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.FlaxCLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + unet ([`FlaxUNet2DConditionModel`]): + A `FlaxUNet2DConditionModel` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`FlaxDDIMScheduler`], [`FlaxLMSDiscreteScheduler`], [`FlaxPNDMScheduler`], or + [`FlaxDPMSolverMultistepScheduler`]. + safety_checker ([`FlaxStableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details + about a model's potential harms. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. + """ + + def __init__( + self, + vae: FlaxAutoencoderKL, + text_encoder: FlaxCLIPTextModel, + tokenizer: CLIPTokenizer, + unet: FlaxUNet2DConditionModel, + scheduler: Union[ + FlaxDDIMScheduler, FlaxPNDMScheduler, FlaxLMSDiscreteScheduler, FlaxDPMSolverMultistepScheduler + ], + safety_checker: FlaxStableDiffusionSafetyChecker, + feature_extractor: CLIPImageProcessor, + dtype: jnp.dtype = jnp.float32, + ): + super().__init__() + self.dtype = dtype + + if safety_checker is None: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( + version.parse(unet.config._diffusers_version).base_version + ) < version.parse("0.9.0.dev0") + is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 + if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: + deprecation_message = ( + "The configuration file of the unet has set the default `sample_size` to smaller than" + " 64 which seems highly unlikely .If you're checkpoint is a fine-tuned version of any of the" + " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" + " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" + " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" + " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" + " in the config might lead to incorrect results in future versions. If you have downloaded this" + " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" + " the `unet/config.json` file" + ) + deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(unet.config) + new_config["sample_size"] = 64 + unet._internal_dict = FrozenDict(new_config) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + + def prepare_inputs(self, prompt: Union[str, List[str]]): + if not isinstance(prompt, (str, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + text_input = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="np", + ) + return text_input.input_ids + + def _get_has_nsfw_concepts(self, features, params): + has_nsfw_concepts = self.safety_checker(features, params) + return has_nsfw_concepts + + def _run_safety_checker(self, images, safety_model_params, jit=False): + # safety_model_params should already be replicated when jit is True + pil_images = [Image.fromarray(image) for image in images] + features = self.feature_extractor(pil_images, return_tensors="np").pixel_values + + if jit: + features = shard(features) + has_nsfw_concepts = _p_get_has_nsfw_concepts(self, features, safety_model_params) + has_nsfw_concepts = unshard(has_nsfw_concepts) + safety_model_params = unreplicate(safety_model_params) + else: + has_nsfw_concepts = self._get_has_nsfw_concepts(features, safety_model_params) + + images_was_copied = False + for idx, has_nsfw_concept in enumerate(has_nsfw_concepts): + if has_nsfw_concept: + if not images_was_copied: + images_was_copied = True + images = images.copy() + + images[idx] = np.zeros(images[idx].shape, dtype=np.uint8) # black image + + if any(has_nsfw_concepts): + warnings.warn( + "Potential NSFW content was detected in one or more images. A black image will be returned" + " instead. Try again with a different prompt and/or seed." + ) + + return images, has_nsfw_concepts + + def _generate( + self, + prompt_ids: jnp.array, + params: Union[Dict, FrozenDict], + prng_seed: jax.Array, + num_inference_steps: int, + height: int, + width: int, + guidance_scale: float, + latents: Optional[jnp.ndarray] = None, + neg_prompt_ids: Optional[jnp.ndarray] = None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + # get prompt text embeddings + prompt_embeds = self.text_encoder(prompt_ids, params=params["text_encoder"])[0] + + # TODO: currently it is assumed `do_classifier_free_guidance = guidance_scale > 1.0` + # implement this conditional `do_classifier_free_guidance = guidance_scale > 1.0` + batch_size = prompt_ids.shape[0] + + max_length = prompt_ids.shape[-1] + + if neg_prompt_ids is None: + uncond_input = self.tokenizer( + [""] * batch_size, padding="max_length", max_length=max_length, return_tensors="np" + ).input_ids + else: + uncond_input = neg_prompt_ids + negative_prompt_embeds = self.text_encoder(uncond_input, params=params["text_encoder"])[0] + context = jnp.concatenate([negative_prompt_embeds, prompt_embeds]) + + # Ensure model output will be `float32` before going into the scheduler + guidance_scale = jnp.array([guidance_scale], dtype=jnp.float32) + + latents_shape = ( + batch_size, + self.unet.config.in_channels, + height // self.vae_scale_factor, + width // self.vae_scale_factor, + ) + if latents is None: + latents = jax.random.normal(prng_seed, shape=latents_shape, dtype=jnp.float32) + else: + if latents.shape != latents_shape: + raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}") + + def loop_body(step, args): + latents, scheduler_state = args + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + latents_input = jnp.concatenate([latents] * 2) + + t = jnp.array(scheduler_state.timesteps, dtype=jnp.int32)[step] + timestep = jnp.broadcast_to(t, latents_input.shape[0]) + + latents_input = self.scheduler.scale_model_input(scheduler_state, latents_input, t) + + # predict the noise residual + noise_pred = self.unet.apply( + {"params": params["unet"]}, + jnp.array(latents_input), + jnp.array(timestep, dtype=jnp.int32), + encoder_hidden_states=context, + ).sample + # perform guidance + noise_pred_uncond, noise_prediction_text = jnp.split(noise_pred, 2, axis=0) + noise_pred = noise_pred_uncond + guidance_scale * (noise_prediction_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents, scheduler_state = self.scheduler.step(scheduler_state, noise_pred, t, latents).to_tuple() + return latents, scheduler_state + + scheduler_state = self.scheduler.set_timesteps( + params["scheduler"], num_inference_steps=num_inference_steps, shape=latents.shape + ) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * params["scheduler"].init_noise_sigma + + if DEBUG: + # run with python for loop + for i in range(num_inference_steps): + latents, scheduler_state = loop_body(i, (latents, scheduler_state)) + else: + latents, _ = jax.lax.fori_loop(0, num_inference_steps, loop_body, (latents, scheduler_state)) + + # scale and decode the image latents with vae + latents = 1 / self.vae.config.scaling_factor * latents + image = self.vae.apply({"params": params["vae"]}, latents, method=self.vae.decode).sample + + image = (image / 2 + 0.5).clip(0, 1).transpose(0, 2, 3, 1) + return image + + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt_ids: jnp.array, + params: Union[Dict, FrozenDict], + prng_seed: jax.Array, + num_inference_steps: int = 50, + height: Optional[int] = None, + width: Optional[int] = None, + guidance_scale: Union[float, jnp.ndarray] = 7.5, + latents: jnp.ndarray = None, + neg_prompt_ids: jnp.ndarray = None, + return_dict: bool = True, + jit: bool = False, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + latents (`jnp.ndarray`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + array is generated by sampling using the supplied random `generator`. + jit (`bool`, defaults to `False`): + Whether to run `pmap` versions of the generation and safety scoring functions. + + + + This argument exists because `__call__` is not yet end-to-end pmap-able. It will be removed in a + future release. + + + + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.FlaxStableDiffusionPipelineOutput`] instead of + a plain tuple. + + Examples: + + Returns: + [`~pipelines.stable_diffusion.FlaxStableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.FlaxStableDiffusionPipelineOutput`] is + returned, otherwise a `tuple` is returned where the first element is a list with the generated images + and the second element is a list of `bool`s indicating whether the corresponding generated image + contains "not-safe-for-work" (nsfw) content. + """ + # 0. Default height and width to unet + height = height or self.unet.config.sample_size * self.vae_scale_factor + width = width or self.unet.config.sample_size * self.vae_scale_factor + + if isinstance(guidance_scale, float): + # Convert to a tensor so each device gets a copy. Follow the prompt_ids for + # shape information, as they may be sharded (when `jit` is `True`), or not. + guidance_scale = jnp.array([guidance_scale] * prompt_ids.shape[0]) + if len(prompt_ids.shape) > 2: + # Assume sharded + guidance_scale = guidance_scale[:, None] + + if jit: + images = _p_generate( + self, + prompt_ids, + params, + prng_seed, + num_inference_steps, + height, + width, + guidance_scale, + latents, + neg_prompt_ids, + ) + else: + images = self._generate( + prompt_ids, + params, + prng_seed, + num_inference_steps, + height, + width, + guidance_scale, + latents, + neg_prompt_ids, + ) + + if self.safety_checker is not None: + safety_params = params["safety_checker"] + images_uint8_casted = (images * 255).round().astype("uint8") + num_devices, batch_size = images.shape[:2] + + images_uint8_casted = np.asarray(images_uint8_casted).reshape(num_devices * batch_size, height, width, 3) + images_uint8_casted, has_nsfw_concept = self._run_safety_checker(images_uint8_casted, safety_params, jit) + images = np.asarray(images).copy() + + # block images + if any(has_nsfw_concept): + for i, is_nsfw in enumerate(has_nsfw_concept): + if is_nsfw: + images[i, 0] = np.asarray(images_uint8_casted[i]) + + images = images.reshape(num_devices, batch_size, height, width, 3) + else: + images = np.asarray(images) + has_nsfw_concept = False + + if not return_dict: + return (images, has_nsfw_concept) + + return FlaxStableDiffusionPipelineOutput(images=images, nsfw_content_detected=has_nsfw_concept) + + +# Static argnums are pipe, num_inference_steps, height, width. A change would trigger recompilation. +# Non-static args are (sharded) input tensors mapped over their first dimension (hence, `0`). +@partial( + jax.pmap, + in_axes=(None, 0, 0, 0, None, None, None, 0, 0, 0), + static_broadcasted_argnums=(0, 4, 5, 6), +) +def _p_generate( + pipe, + prompt_ids, + params, + prng_seed, + num_inference_steps, + height, + width, + guidance_scale, + latents, + neg_prompt_ids, +): + return pipe._generate( + prompt_ids, + params, + prng_seed, + num_inference_steps, + height, + width, + guidance_scale, + latents, + neg_prompt_ids, + ) + + +@partial(jax.pmap, static_broadcasted_argnums=(0,)) +def _p_get_has_nsfw_concepts(pipe, features, params): + return pipe._get_has_nsfw_concepts(features, params) + + +def unshard(x: jnp.ndarray): + # einops.rearrange(x, 'd b ... -> (d b) ...') + num_devices, batch_size = x.shape[:2] + rest = x.shape[2:] + return x.reshape(num_devices * batch_size, *rest) diff --git a/diffusers3/pipelines/stable_diffusion/pipeline_flax_stable_diffusion_img2img.py b/diffusers3/pipelines/stable_diffusion/pipeline_flax_stable_diffusion_img2img.py new file mode 100644 index 0000000000000000000000000000000000000000..7792bc0975955ab6131e1ba34c6953149e412208 --- /dev/null +++ b/diffusers3/pipelines/stable_diffusion/pipeline_flax_stable_diffusion_img2img.py @@ -0,0 +1,532 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import warnings +from functools import partial +from typing import Dict, List, Optional, Union + +import jax +import jax.numpy as jnp +import numpy as np +from flax.core.frozen_dict import FrozenDict +from flax.jax_utils import unreplicate +from flax.training.common_utils import shard +from PIL import Image +from transformers import CLIPImageProcessor, CLIPTokenizer, FlaxCLIPTextModel + +from ...models import FlaxAutoencoderKL, FlaxUNet2DConditionModel +from ...schedulers import ( + FlaxDDIMScheduler, + FlaxDPMSolverMultistepScheduler, + FlaxLMSDiscreteScheduler, + FlaxPNDMScheduler, +) +from ...utils import PIL_INTERPOLATION, logging, replace_example_docstring +from ..pipeline_flax_utils import FlaxDiffusionPipeline +from .pipeline_output import FlaxStableDiffusionPipelineOutput +from .safety_checker_flax import FlaxStableDiffusionSafetyChecker + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +# Set to True to use python for loop instead of jax.fori_loop for easier debugging +DEBUG = False + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import jax + >>> import numpy as np + >>> import jax.numpy as jnp + >>> from flax.jax_utils import replicate + >>> from flax.training.common_utils import shard + >>> import requests + >>> from io import BytesIO + >>> from PIL import Image + >>> from diffusers import FlaxStableDiffusionImg2ImgPipeline + + + >>> def create_key(seed=0): + ... return jax.random.PRNGKey(seed) + + + >>> rng = create_key(0) + + >>> url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg" + >>> response = requests.get(url) + >>> init_img = Image.open(BytesIO(response.content)).convert("RGB") + >>> init_img = init_img.resize((768, 512)) + + >>> prompts = "A fantasy landscape, trending on artstation" + + >>> pipeline, params = FlaxStableDiffusionImg2ImgPipeline.from_pretrained( + ... "CompVis/stable-diffusion-v1-4", + ... revision="flax", + ... dtype=jnp.bfloat16, + ... ) + + >>> num_samples = jax.device_count() + >>> rng = jax.random.split(rng, jax.device_count()) + >>> prompt_ids, processed_image = pipeline.prepare_inputs( + ... prompt=[prompts] * num_samples, image=[init_img] * num_samples + ... ) + >>> p_params = replicate(params) + >>> prompt_ids = shard(prompt_ids) + >>> processed_image = shard(processed_image) + + >>> output = pipeline( + ... prompt_ids=prompt_ids, + ... image=processed_image, + ... params=p_params, + ... prng_seed=rng, + ... strength=0.75, + ... num_inference_steps=50, + ... jit=True, + ... height=512, + ... width=768, + ... ).images + + >>> output_images = pipeline.numpy_to_pil(np.asarray(output.reshape((num_samples,) + output.shape[-3:]))) + ``` +""" + + +class FlaxStableDiffusionImg2ImgPipeline(FlaxDiffusionPipeline): + r""" + Flax-based pipeline for text-guided image-to-image generation using Stable Diffusion. + + This model inherits from [`FlaxDiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + Args: + vae ([`FlaxAutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.FlaxCLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + unet ([`FlaxUNet2DConditionModel`]): + A `FlaxUNet2DConditionModel` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`FlaxDDIMScheduler`], [`FlaxLMSDiscreteScheduler`], [`FlaxPNDMScheduler`], or + [`FlaxDPMSolverMultistepScheduler`]. + safety_checker ([`FlaxStableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details + about a model's potential harms. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. + """ + + def __init__( + self, + vae: FlaxAutoencoderKL, + text_encoder: FlaxCLIPTextModel, + tokenizer: CLIPTokenizer, + unet: FlaxUNet2DConditionModel, + scheduler: Union[ + FlaxDDIMScheduler, FlaxPNDMScheduler, FlaxLMSDiscreteScheduler, FlaxDPMSolverMultistepScheduler + ], + safety_checker: FlaxStableDiffusionSafetyChecker, + feature_extractor: CLIPImageProcessor, + dtype: jnp.dtype = jnp.float32, + ): + super().__init__() + self.dtype = dtype + + if safety_checker is None: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + + def prepare_inputs(self, prompt: Union[str, List[str]], image: Union[Image.Image, List[Image.Image]]): + if not isinstance(prompt, (str, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if not isinstance(image, (Image.Image, list)): + raise ValueError(f"image has to be of type `PIL.Image.Image` or list but is {type(image)}") + + if isinstance(image, Image.Image): + image = [image] + + processed_images = jnp.concatenate([preprocess(img, jnp.float32) for img in image]) + + text_input = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="np", + ) + return text_input.input_ids, processed_images + + def _get_has_nsfw_concepts(self, features, params): + has_nsfw_concepts = self.safety_checker(features, params) + return has_nsfw_concepts + + def _run_safety_checker(self, images, safety_model_params, jit=False): + # safety_model_params should already be replicated when jit is True + pil_images = [Image.fromarray(image) for image in images] + features = self.feature_extractor(pil_images, return_tensors="np").pixel_values + + if jit: + features = shard(features) + has_nsfw_concepts = _p_get_has_nsfw_concepts(self, features, safety_model_params) + has_nsfw_concepts = unshard(has_nsfw_concepts) + safety_model_params = unreplicate(safety_model_params) + else: + has_nsfw_concepts = self._get_has_nsfw_concepts(features, safety_model_params) + + images_was_copied = False + for idx, has_nsfw_concept in enumerate(has_nsfw_concepts): + if has_nsfw_concept: + if not images_was_copied: + images_was_copied = True + images = images.copy() + + images[idx] = np.zeros(images[idx].shape, dtype=np.uint8) # black image + + if any(has_nsfw_concepts): + warnings.warn( + "Potential NSFW content was detected in one or more images. A black image will be returned" + " instead. Try again with a different prompt and/or seed." + ) + + return images, has_nsfw_concepts + + def get_timestep_start(self, num_inference_steps, strength): + # get the original timestep using init_timestep + init_timestep = min(int(num_inference_steps * strength), num_inference_steps) + + t_start = max(num_inference_steps - init_timestep, 0) + + return t_start + + def _generate( + self, + prompt_ids: jnp.ndarray, + image: jnp.ndarray, + params: Union[Dict, FrozenDict], + prng_seed: jax.Array, + start_timestep: int, + num_inference_steps: int, + height: int, + width: int, + guidance_scale: float, + noise: Optional[jnp.ndarray] = None, + neg_prompt_ids: Optional[jnp.ndarray] = None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + # get prompt text embeddings + prompt_embeds = self.text_encoder(prompt_ids, params=params["text_encoder"])[0] + + # TODO: currently it is assumed `do_classifier_free_guidance = guidance_scale > 1.0` + # implement this conditional `do_classifier_free_guidance = guidance_scale > 1.0` + batch_size = prompt_ids.shape[0] + + max_length = prompt_ids.shape[-1] + + if neg_prompt_ids is None: + uncond_input = self.tokenizer( + [""] * batch_size, padding="max_length", max_length=max_length, return_tensors="np" + ).input_ids + else: + uncond_input = neg_prompt_ids + negative_prompt_embeds = self.text_encoder(uncond_input, params=params["text_encoder"])[0] + context = jnp.concatenate([negative_prompt_embeds, prompt_embeds]) + + latents_shape = ( + batch_size, + self.unet.config.in_channels, + height // self.vae_scale_factor, + width // self.vae_scale_factor, + ) + if noise is None: + noise = jax.random.normal(prng_seed, shape=latents_shape, dtype=jnp.float32) + else: + if noise.shape != latents_shape: + raise ValueError(f"Unexpected latents shape, got {noise.shape}, expected {latents_shape}") + + # Create init_latents + init_latent_dist = self.vae.apply({"params": params["vae"]}, image, method=self.vae.encode).latent_dist + init_latents = init_latent_dist.sample(key=prng_seed).transpose((0, 3, 1, 2)) + init_latents = self.vae.config.scaling_factor * init_latents + + def loop_body(step, args): + latents, scheduler_state = args + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + latents_input = jnp.concatenate([latents] * 2) + + t = jnp.array(scheduler_state.timesteps, dtype=jnp.int32)[step] + timestep = jnp.broadcast_to(t, latents_input.shape[0]) + + latents_input = self.scheduler.scale_model_input(scheduler_state, latents_input, t) + + # predict the noise residual + noise_pred = self.unet.apply( + {"params": params["unet"]}, + jnp.array(latents_input), + jnp.array(timestep, dtype=jnp.int32), + encoder_hidden_states=context, + ).sample + # perform guidance + noise_pred_uncond, noise_prediction_text = jnp.split(noise_pred, 2, axis=0) + noise_pred = noise_pred_uncond + guidance_scale * (noise_prediction_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents, scheduler_state = self.scheduler.step(scheduler_state, noise_pred, t, latents).to_tuple() + return latents, scheduler_state + + scheduler_state = self.scheduler.set_timesteps( + params["scheduler"], num_inference_steps=num_inference_steps, shape=latents_shape + ) + + latent_timestep = scheduler_state.timesteps[start_timestep : start_timestep + 1].repeat(batch_size) + + latents = self.scheduler.add_noise(params["scheduler"], init_latents, noise, latent_timestep) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * params["scheduler"].init_noise_sigma + + if DEBUG: + # run with python for loop + for i in range(start_timestep, num_inference_steps): + latents, scheduler_state = loop_body(i, (latents, scheduler_state)) + else: + latents, _ = jax.lax.fori_loop(start_timestep, num_inference_steps, loop_body, (latents, scheduler_state)) + + # scale and decode the image latents with vae + latents = 1 / self.vae.config.scaling_factor * latents + image = self.vae.apply({"params": params["vae"]}, latents, method=self.vae.decode).sample + + image = (image / 2 + 0.5).clip(0, 1).transpose(0, 2, 3, 1) + return image + + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt_ids: jnp.ndarray, + image: jnp.ndarray, + params: Union[Dict, FrozenDict], + prng_seed: jax.Array, + strength: float = 0.8, + num_inference_steps: int = 50, + height: Optional[int] = None, + width: Optional[int] = None, + guidance_scale: Union[float, jnp.ndarray] = 7.5, + noise: jnp.ndarray = None, + neg_prompt_ids: jnp.ndarray = None, + return_dict: bool = True, + jit: bool = False, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt_ids (`jnp.ndarray`): + The prompt or prompts to guide image generation. + image (`jnp.ndarray`): + Array representing an image batch to be used as the starting point. + params (`Dict` or `FrozenDict`): + Dictionary containing the model parameters/weights. + prng_seed (`jax.Array` or `jax.Array`): + Array containing random number generator key. + strength (`float`, *optional*, defaults to 0.8): + Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a + starting point and more noise is added the higher the `strength`. The number of denoising steps depends + on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising + process runs for the full number of iterations specified in `num_inference_steps`. A value of 1 + essentially ignores `image`. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. This parameter is modulated by `strength`. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + noise (`jnp.ndarray`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. The array is generated by + sampling using the supplied random `generator`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.FlaxStableDiffusionPipelineOutput`] instead of + a plain tuple. + jit (`bool`, defaults to `False`): + Whether to run `pmap` versions of the generation and safety scoring functions. + + + + This argument exists because `__call__` is not yet end-to-end pmap-able. It will be removed in a + future release. + + + + Examples: + + Returns: + [`~pipelines.stable_diffusion.FlaxStableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.FlaxStableDiffusionPipelineOutput`] is + returned, otherwise a `tuple` is returned where the first element is a list with the generated images + and the second element is a list of `bool`s indicating whether the corresponding generated image + contains "not-safe-for-work" (nsfw) content. + """ + # 0. Default height and width to unet + height = height or self.unet.config.sample_size * self.vae_scale_factor + width = width or self.unet.config.sample_size * self.vae_scale_factor + + if isinstance(guidance_scale, float): + # Convert to a tensor so each device gets a copy. Follow the prompt_ids for + # shape information, as they may be sharded (when `jit` is `True`), or not. + guidance_scale = jnp.array([guidance_scale] * prompt_ids.shape[0]) + if len(prompt_ids.shape) > 2: + # Assume sharded + guidance_scale = guidance_scale[:, None] + + start_timestep = self.get_timestep_start(num_inference_steps, strength) + + if jit: + images = _p_generate( + self, + prompt_ids, + image, + params, + prng_seed, + start_timestep, + num_inference_steps, + height, + width, + guidance_scale, + noise, + neg_prompt_ids, + ) + else: + images = self._generate( + prompt_ids, + image, + params, + prng_seed, + start_timestep, + num_inference_steps, + height, + width, + guidance_scale, + noise, + neg_prompt_ids, + ) + + if self.safety_checker is not None: + safety_params = params["safety_checker"] + images_uint8_casted = (images * 255).round().astype("uint8") + num_devices, batch_size = images.shape[:2] + + images_uint8_casted = np.asarray(images_uint8_casted).reshape(num_devices * batch_size, height, width, 3) + images_uint8_casted, has_nsfw_concept = self._run_safety_checker(images_uint8_casted, safety_params, jit) + images = np.asarray(images) + + # block images + if any(has_nsfw_concept): + for i, is_nsfw in enumerate(has_nsfw_concept): + if is_nsfw: + images[i] = np.asarray(images_uint8_casted[i]) + + images = images.reshape(num_devices, batch_size, height, width, 3) + else: + images = np.asarray(images) + has_nsfw_concept = False + + if not return_dict: + return (images, has_nsfw_concept) + + return FlaxStableDiffusionPipelineOutput(images=images, nsfw_content_detected=has_nsfw_concept) + + +# Static argnums are pipe, start_timestep, num_inference_steps, height, width. A change would trigger recompilation. +# Non-static args are (sharded) input tensors mapped over their first dimension (hence, `0`). +@partial( + jax.pmap, + in_axes=(None, 0, 0, 0, 0, None, None, None, None, 0, 0, 0), + static_broadcasted_argnums=(0, 5, 6, 7, 8), +) +def _p_generate( + pipe, + prompt_ids, + image, + params, + prng_seed, + start_timestep, + num_inference_steps, + height, + width, + guidance_scale, + noise, + neg_prompt_ids, +): + return pipe._generate( + prompt_ids, + image, + params, + prng_seed, + start_timestep, + num_inference_steps, + height, + width, + guidance_scale, + noise, + neg_prompt_ids, + ) + + +@partial(jax.pmap, static_broadcasted_argnums=(0,)) +def _p_get_has_nsfw_concepts(pipe, features, params): + return pipe._get_has_nsfw_concepts(features, params) + + +def unshard(x: jnp.ndarray): + # einops.rearrange(x, 'd b ... -> (d b) ...') + num_devices, batch_size = x.shape[:2] + rest = x.shape[2:] + return x.reshape(num_devices * batch_size, *rest) + + +def preprocess(image, dtype): + w, h = image.size + w, h = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 + image = image.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]) + image = jnp.array(image).astype(dtype) / 255.0 + image = image[None].transpose(0, 3, 1, 2) + return 2.0 * image - 1.0 diff --git a/diffusers3/pipelines/stable_diffusion/pipeline_flax_stable_diffusion_inpaint.py b/diffusers3/pipelines/stable_diffusion/pipeline_flax_stable_diffusion_inpaint.py new file mode 100644 index 0000000000000000000000000000000000000000..f6bb0ac299b3bc382fd0a79b1d0a78e907ebd790 --- /dev/null +++ b/diffusers3/pipelines/stable_diffusion/pipeline_flax_stable_diffusion_inpaint.py @@ -0,0 +1,589 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import warnings +from functools import partial +from typing import Dict, List, Optional, Union + +import jax +import jax.numpy as jnp +import numpy as np +from flax.core.frozen_dict import FrozenDict +from flax.jax_utils import unreplicate +from flax.training.common_utils import shard +from packaging import version +from PIL import Image +from transformers import CLIPImageProcessor, CLIPTokenizer, FlaxCLIPTextModel + +from ...models import FlaxAutoencoderKL, FlaxUNet2DConditionModel +from ...schedulers import ( + FlaxDDIMScheduler, + FlaxDPMSolverMultistepScheduler, + FlaxLMSDiscreteScheduler, + FlaxPNDMScheduler, +) +from ...utils import PIL_INTERPOLATION, deprecate, logging, replace_example_docstring +from ..pipeline_flax_utils import FlaxDiffusionPipeline +from .pipeline_output import FlaxStableDiffusionPipelineOutput +from .safety_checker_flax import FlaxStableDiffusionSafetyChecker + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +# Set to True to use python for loop instead of jax.fori_loop for easier debugging +DEBUG = False + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import jax + >>> import numpy as np + >>> from flax.jax_utils import replicate + >>> from flax.training.common_utils import shard + >>> import PIL + >>> import requests + >>> from io import BytesIO + >>> from diffusers import FlaxStableDiffusionInpaintPipeline + + + >>> def download_image(url): + ... response = requests.get(url) + ... return PIL.Image.open(BytesIO(response.content)).convert("RGB") + + + >>> img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png" + >>> mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png" + + >>> init_image = download_image(img_url).resize((512, 512)) + >>> mask_image = download_image(mask_url).resize((512, 512)) + + >>> pipeline, params = FlaxStableDiffusionInpaintPipeline.from_pretrained( + ... "xvjiarui/stable-diffusion-2-inpainting" + ... ) + + >>> prompt = "Face of a yellow cat, high resolution, sitting on a park bench" + >>> prng_seed = jax.random.PRNGKey(0) + >>> num_inference_steps = 50 + + >>> num_samples = jax.device_count() + >>> prompt = num_samples * [prompt] + >>> init_image = num_samples * [init_image] + >>> mask_image = num_samples * [mask_image] + >>> prompt_ids, processed_masked_images, processed_masks = pipeline.prepare_inputs( + ... prompt, init_image, mask_image + ... ) + # shard inputs and rng + + >>> params = replicate(params) + >>> prng_seed = jax.random.split(prng_seed, jax.device_count()) + >>> prompt_ids = shard(prompt_ids) + >>> processed_masked_images = shard(processed_masked_images) + >>> processed_masks = shard(processed_masks) + + >>> images = pipeline( + ... prompt_ids, processed_masks, processed_masked_images, params, prng_seed, num_inference_steps, jit=True + ... ).images + >>> images = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:]))) + ``` +""" + + +class FlaxStableDiffusionInpaintPipeline(FlaxDiffusionPipeline): + r""" + Flax-based pipeline for text-guided image inpainting using Stable Diffusion. + + + + ๐Ÿงช This is an experimental feature! + + + + This model inherits from [`FlaxDiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + Args: + vae ([`FlaxAutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.FlaxCLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + unet ([`FlaxUNet2DConditionModel`]): + A `FlaxUNet2DConditionModel` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`FlaxDDIMScheduler`], [`FlaxLMSDiscreteScheduler`], [`FlaxPNDMScheduler`], or + [`FlaxDPMSolverMultistepScheduler`]. + safety_checker ([`FlaxStableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details + about a model's potential harms. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. + """ + + def __init__( + self, + vae: FlaxAutoencoderKL, + text_encoder: FlaxCLIPTextModel, + tokenizer: CLIPTokenizer, + unet: FlaxUNet2DConditionModel, + scheduler: Union[ + FlaxDDIMScheduler, FlaxPNDMScheduler, FlaxLMSDiscreteScheduler, FlaxDPMSolverMultistepScheduler + ], + safety_checker: FlaxStableDiffusionSafetyChecker, + feature_extractor: CLIPImageProcessor, + dtype: jnp.dtype = jnp.float32, + ): + super().__init__() + self.dtype = dtype + + if safety_checker is None: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( + version.parse(unet.config._diffusers_version).base_version + ) < version.parse("0.9.0.dev0") + is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 + if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: + deprecation_message = ( + "The configuration file of the unet has set the default `sample_size` to smaller than" + " 64 which seems highly unlikely .If you're checkpoint is a fine-tuned version of any of the" + " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" + " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" + " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" + " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" + " in the config might lead to incorrect results in future versions. If you have downloaded this" + " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" + " the `unet/config.json` file" + ) + deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(unet.config) + new_config["sample_size"] = 64 + unet._internal_dict = FrozenDict(new_config) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + + def prepare_inputs( + self, + prompt: Union[str, List[str]], + image: Union[Image.Image, List[Image.Image]], + mask: Union[Image.Image, List[Image.Image]], + ): + if not isinstance(prompt, (str, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if not isinstance(image, (Image.Image, list)): + raise ValueError(f"image has to be of type `PIL.Image.Image` or list but is {type(image)}") + + if isinstance(image, Image.Image): + image = [image] + + if not isinstance(mask, (Image.Image, list)): + raise ValueError(f"image has to be of type `PIL.Image.Image` or list but is {type(image)}") + + if isinstance(mask, Image.Image): + mask = [mask] + + processed_images = jnp.concatenate([preprocess_image(img, jnp.float32) for img in image]) + processed_masks = jnp.concatenate([preprocess_mask(m, jnp.float32) for m in mask]) + # processed_masks[processed_masks < 0.5] = 0 + processed_masks = processed_masks.at[processed_masks < 0.5].set(0) + # processed_masks[processed_masks >= 0.5] = 1 + processed_masks = processed_masks.at[processed_masks >= 0.5].set(1) + + processed_masked_images = processed_images * (processed_masks < 0.5) + + text_input = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="np", + ) + return text_input.input_ids, processed_masked_images, processed_masks + + def _get_has_nsfw_concepts(self, features, params): + has_nsfw_concepts = self.safety_checker(features, params) + return has_nsfw_concepts + + def _run_safety_checker(self, images, safety_model_params, jit=False): + # safety_model_params should already be replicated when jit is True + pil_images = [Image.fromarray(image) for image in images] + features = self.feature_extractor(pil_images, return_tensors="np").pixel_values + + if jit: + features = shard(features) + has_nsfw_concepts = _p_get_has_nsfw_concepts(self, features, safety_model_params) + has_nsfw_concepts = unshard(has_nsfw_concepts) + safety_model_params = unreplicate(safety_model_params) + else: + has_nsfw_concepts = self._get_has_nsfw_concepts(features, safety_model_params) + + images_was_copied = False + for idx, has_nsfw_concept in enumerate(has_nsfw_concepts): + if has_nsfw_concept: + if not images_was_copied: + images_was_copied = True + images = images.copy() + + images[idx] = np.zeros(images[idx].shape, dtype=np.uint8) # black image + + if any(has_nsfw_concepts): + warnings.warn( + "Potential NSFW content was detected in one or more images. A black image will be returned" + " instead. Try again with a different prompt and/or seed." + ) + + return images, has_nsfw_concepts + + def _generate( + self, + prompt_ids: jnp.ndarray, + mask: jnp.ndarray, + masked_image: jnp.ndarray, + params: Union[Dict, FrozenDict], + prng_seed: jax.Array, + num_inference_steps: int, + height: int, + width: int, + guidance_scale: float, + latents: Optional[jnp.ndarray] = None, + neg_prompt_ids: Optional[jnp.ndarray] = None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + # get prompt text embeddings + prompt_embeds = self.text_encoder(prompt_ids, params=params["text_encoder"])[0] + + # TODO: currently it is assumed `do_classifier_free_guidance = guidance_scale > 1.0` + # implement this conditional `do_classifier_free_guidance = guidance_scale > 1.0` + batch_size = prompt_ids.shape[0] + + max_length = prompt_ids.shape[-1] + + if neg_prompt_ids is None: + uncond_input = self.tokenizer( + [""] * batch_size, padding="max_length", max_length=max_length, return_tensors="np" + ).input_ids + else: + uncond_input = neg_prompt_ids + negative_prompt_embeds = self.text_encoder(uncond_input, params=params["text_encoder"])[0] + context = jnp.concatenate([negative_prompt_embeds, prompt_embeds]) + + latents_shape = ( + batch_size, + self.vae.config.latent_channels, + height // self.vae_scale_factor, + width // self.vae_scale_factor, + ) + if latents is None: + latents = jax.random.normal(prng_seed, shape=latents_shape, dtype=self.dtype) + else: + if latents.shape != latents_shape: + raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}") + + prng_seed, mask_prng_seed = jax.random.split(prng_seed) + + masked_image_latent_dist = self.vae.apply( + {"params": params["vae"]}, masked_image, method=self.vae.encode + ).latent_dist + masked_image_latents = masked_image_latent_dist.sample(key=mask_prng_seed).transpose((0, 3, 1, 2)) + masked_image_latents = self.vae.config.scaling_factor * masked_image_latents + del mask_prng_seed + + mask = jax.image.resize(mask, (*mask.shape[:-2], *masked_image_latents.shape[-2:]), method="nearest") + + # 8. Check that sizes of mask, masked image and latents match + num_channels_latents = self.vae.config.latent_channels + num_channels_mask = mask.shape[1] + num_channels_masked_image = masked_image_latents.shape[1] + if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels: + raise ValueError( + f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects" + f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +" + f" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}" + f" = {num_channels_latents+num_channels_masked_image+num_channels_mask}. Please verify the config of" + " `pipeline.unet` or your `mask_image` or `image` input." + ) + + def loop_body(step, args): + latents, mask, masked_image_latents, scheduler_state = args + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + latents_input = jnp.concatenate([latents] * 2) + mask_input = jnp.concatenate([mask] * 2) + masked_image_latents_input = jnp.concatenate([masked_image_latents] * 2) + + t = jnp.array(scheduler_state.timesteps, dtype=jnp.int32)[step] + timestep = jnp.broadcast_to(t, latents_input.shape[0]) + + latents_input = self.scheduler.scale_model_input(scheduler_state, latents_input, t) + # concat latents, mask, masked_image_latents in the channel dimension + latents_input = jnp.concatenate([latents_input, mask_input, masked_image_latents_input], axis=1) + + # predict the noise residual + noise_pred = self.unet.apply( + {"params": params["unet"]}, + jnp.array(latents_input), + jnp.array(timestep, dtype=jnp.int32), + encoder_hidden_states=context, + ).sample + # perform guidance + noise_pred_uncond, noise_prediction_text = jnp.split(noise_pred, 2, axis=0) + noise_pred = noise_pred_uncond + guidance_scale * (noise_prediction_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents, scheduler_state = self.scheduler.step(scheduler_state, noise_pred, t, latents).to_tuple() + return latents, mask, masked_image_latents, scheduler_state + + scheduler_state = self.scheduler.set_timesteps( + params["scheduler"], num_inference_steps=num_inference_steps, shape=latents.shape + ) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * params["scheduler"].init_noise_sigma + + if DEBUG: + # run with python for loop + for i in range(num_inference_steps): + latents, mask, masked_image_latents, scheduler_state = loop_body( + i, (latents, mask, masked_image_latents, scheduler_state) + ) + else: + latents, _, _, _ = jax.lax.fori_loop( + 0, num_inference_steps, loop_body, (latents, mask, masked_image_latents, scheduler_state) + ) + + # scale and decode the image latents with vae + latents = 1 / self.vae.config.scaling_factor * latents + image = self.vae.apply({"params": params["vae"]}, latents, method=self.vae.decode).sample + + image = (image / 2 + 0.5).clip(0, 1).transpose(0, 2, 3, 1) + return image + + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt_ids: jnp.ndarray, + mask: jnp.ndarray, + masked_image: jnp.ndarray, + params: Union[Dict, FrozenDict], + prng_seed: jax.Array, + num_inference_steps: int = 50, + height: Optional[int] = None, + width: Optional[int] = None, + guidance_scale: Union[float, jnp.ndarray] = 7.5, + latents: jnp.ndarray = None, + neg_prompt_ids: jnp.ndarray = None, + return_dict: bool = True, + jit: bool = False, + ): + r""" + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`): + The prompt or prompts to guide image generation. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. This parameter is modulated by `strength`. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + latents (`jnp.ndarray`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + array is generated by sampling using the supplied random `generator`. + jit (`bool`, defaults to `False`): + Whether to run `pmap` versions of the generation and safety scoring functions. + + + + This argument exists because `__call__` is not yet end-to-end pmap-able. It will be removed in a + future release. + + + + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.FlaxStableDiffusionPipelineOutput`] instead of + a plain tuple. + + Examples: + + Returns: + [`~pipelines.stable_diffusion.FlaxStableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.FlaxStableDiffusionPipelineOutput`] is + returned, otherwise a `tuple` is returned where the first element is a list with the generated images + and the second element is a list of `bool`s indicating whether the corresponding generated image + contains "not-safe-for-work" (nsfw) content. + """ + # 0. Default height and width to unet + height = height or self.unet.config.sample_size * self.vae_scale_factor + width = width or self.unet.config.sample_size * self.vae_scale_factor + + masked_image = jax.image.resize(masked_image, (*masked_image.shape[:-2], height, width), method="bicubic") + mask = jax.image.resize(mask, (*mask.shape[:-2], height, width), method="nearest") + + if isinstance(guidance_scale, float): + # Convert to a tensor so each device gets a copy. Follow the prompt_ids for + # shape information, as they may be sharded (when `jit` is `True`), or not. + guidance_scale = jnp.array([guidance_scale] * prompt_ids.shape[0]) + if len(prompt_ids.shape) > 2: + # Assume sharded + guidance_scale = guidance_scale[:, None] + + if jit: + images = _p_generate( + self, + prompt_ids, + mask, + masked_image, + params, + prng_seed, + num_inference_steps, + height, + width, + guidance_scale, + latents, + neg_prompt_ids, + ) + else: + images = self._generate( + prompt_ids, + mask, + masked_image, + params, + prng_seed, + num_inference_steps, + height, + width, + guidance_scale, + latents, + neg_prompt_ids, + ) + + if self.safety_checker is not None: + safety_params = params["safety_checker"] + images_uint8_casted = (images * 255).round().astype("uint8") + num_devices, batch_size = images.shape[:2] + + images_uint8_casted = np.asarray(images_uint8_casted).reshape(num_devices * batch_size, height, width, 3) + images_uint8_casted, has_nsfw_concept = self._run_safety_checker(images_uint8_casted, safety_params, jit) + images = np.asarray(images) + + # block images + if any(has_nsfw_concept): + for i, is_nsfw in enumerate(has_nsfw_concept): + if is_nsfw: + images[i] = np.asarray(images_uint8_casted[i]) + + images = images.reshape(num_devices, batch_size, height, width, 3) + else: + images = np.asarray(images) + has_nsfw_concept = False + + if not return_dict: + return (images, has_nsfw_concept) + + return FlaxStableDiffusionPipelineOutput(images=images, nsfw_content_detected=has_nsfw_concept) + + +# Static argnums are pipe, num_inference_steps, height, width. A change would trigger recompilation. +# Non-static args are (sharded) input tensors mapped over their first dimension (hence, `0`). +@partial( + jax.pmap, + in_axes=(None, 0, 0, 0, 0, 0, None, None, None, 0, 0, 0), + static_broadcasted_argnums=(0, 6, 7, 8), +) +def _p_generate( + pipe, + prompt_ids, + mask, + masked_image, + params, + prng_seed, + num_inference_steps, + height, + width, + guidance_scale, + latents, + neg_prompt_ids, +): + return pipe._generate( + prompt_ids, + mask, + masked_image, + params, + prng_seed, + num_inference_steps, + height, + width, + guidance_scale, + latents, + neg_prompt_ids, + ) + + +@partial(jax.pmap, static_broadcasted_argnums=(0,)) +def _p_get_has_nsfw_concepts(pipe, features, params): + return pipe._get_has_nsfw_concepts(features, params) + + +def unshard(x: jnp.ndarray): + # einops.rearrange(x, 'd b ... -> (d b) ...') + num_devices, batch_size = x.shape[:2] + rest = x.shape[2:] + return x.reshape(num_devices * batch_size, *rest) + + +def preprocess_image(image, dtype): + w, h = image.size + w, h = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 + image = image.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]) + image = jnp.array(image).astype(dtype) / 255.0 + image = image[None].transpose(0, 3, 1, 2) + return 2.0 * image - 1.0 + + +def preprocess_mask(mask, dtype): + w, h = mask.size + w, h = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 + mask = mask.resize((w, h)) + mask = jnp.array(mask.convert("L")).astype(dtype) / 255.0 + mask = jnp.expand_dims(mask, axis=(0, 1)) + + return mask diff --git a/diffusers3/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion.py b/diffusers3/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion.py new file mode 100644 index 0000000000000000000000000000000000000000..2e34dcb83c010c344f5b8463a939d1932ed5f769 --- /dev/null +++ b/diffusers3/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion.py @@ -0,0 +1,487 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Callable, List, Optional, Union + +import numpy as np +import torch +from transformers import CLIPImageProcessor, CLIPTokenizer + +from ...configuration_utils import FrozenDict +from ...schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler +from ...utils import deprecate, logging +from ..onnx_utils import ORT_TO_NP_TYPE, OnnxRuntimeModel +from ..pipeline_utils import DiffusionPipeline +from . import StableDiffusionPipelineOutput + + +logger = logging.get_logger(__name__) + + +class OnnxStableDiffusionPipeline(DiffusionPipeline): + vae_encoder: OnnxRuntimeModel + vae_decoder: OnnxRuntimeModel + text_encoder: OnnxRuntimeModel + tokenizer: CLIPTokenizer + unet: OnnxRuntimeModel + scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] + safety_checker: OnnxRuntimeModel + feature_extractor: CLIPImageProcessor + + _optional_components = ["safety_checker", "feature_extractor"] + _is_onnx = True + + def __init__( + self, + vae_encoder: OnnxRuntimeModel, + vae_decoder: OnnxRuntimeModel, + text_encoder: OnnxRuntimeModel, + tokenizer: CLIPTokenizer, + unet: OnnxRuntimeModel, + scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], + safety_checker: OnnxRuntimeModel, + feature_extractor: CLIPImageProcessor, + requires_safety_checker: bool = True, + ): + super().__init__() + + if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: + deprecation_message = ( + f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" + f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " + "to update the config accordingly as leaving `steps_offset` might led to incorrect results" + " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," + " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" + " file" + ) + deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(scheduler.config) + new_config["steps_offset"] = 1 + scheduler._internal_dict = FrozenDict(new_config) + + if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True: + deprecation_message = ( + f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." + " `clip_sample` should be set to False in the configuration file. Please make sure to update the" + " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" + " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" + " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" + ) + deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(scheduler.config) + new_config["clip_sample"] = False + scheduler._internal_dict = FrozenDict(new_config) + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + self.register_modules( + vae_encoder=vae_encoder, + vae_decoder=vae_decoder, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + ) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + def _encode_prompt( + self, + prompt: Union[str, List[str]], + num_images_per_prompt: Optional[int], + do_classifier_free_guidance: bool, + negative_prompt: Optional[str], + prompt_embeds: Optional[np.ndarray] = None, + negative_prompt_embeds: Optional[np.ndarray] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`): + prompt to be encoded + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`): + The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored + if `guidance_scale` is less than `1`). + prompt_embeds (`np.ndarray`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`np.ndarray`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + """ + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # get prompt text embeddings + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="np", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="max_length", return_tensors="np").input_ids + + if not np.array_equal(text_input_ids, untruncated_ids): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + prompt_embeds = self.text_encoder(input_ids=text_input_ids.astype(np.int32))[0] + + prompt_embeds = np.repeat(prompt_embeds, num_images_per_prompt, axis=0) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] * batch_size + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="np", + ) + negative_prompt_embeds = self.text_encoder(input_ids=uncond_input.input_ids.astype(np.int32))[0] + + if do_classifier_free_guidance: + negative_prompt_embeds = np.repeat(negative_prompt_embeds, num_images_per_prompt, axis=0) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + prompt_embeds = np.concatenate([negative_prompt_embeds, prompt_embeds]) + + return prompt_embeds + + def check_inputs( + self, + prompt: Union[str, List[str]], + height: Optional[int], + width: Optional[int], + callback_steps: int, + negative_prompt: Optional[str] = None, + prompt_embeds: Optional[np.ndarray] = None, + negative_prompt_embeds: Optional[np.ndarray] = None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if (callback_steps is None) or ( + callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + def __call__( + self, + prompt: Union[str, List[str]] = None, + height: Optional[int] = 512, + width: Optional[int] = 512, + num_inference_steps: Optional[int] = 50, + guidance_scale: Optional[float] = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: Optional[float] = 0.0, + generator: Optional[np.random.RandomState] = None, + latents: Optional[np.ndarray] = None, + prompt_embeds: Optional[np.ndarray] = None, + negative_prompt_embeds: Optional[np.ndarray] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, np.ndarray], None]] = None, + callback_steps: int = 1, + ): + r""" + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + image (`PIL.Image.Image` or List[`PIL.Image.Image`] or `torch.Tensor`): + `Image`, or tensor representing an image batch which will be upscaled. * + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds`. instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` + is less than `1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to + [`schedulers.DDIMScheduler`], will be ignored for others. + generator (`np.random.RandomState`, *optional*): + One or a list of [numpy generator(s)](TODO) to make generation deterministic. + latents (`np.ndarray`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + prompt_embeds (`np.ndarray`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`np.ndarray`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that will be called every `callback_steps` steps during inference. The function will be + called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function will be called. If not specified, the callback will be + called at every step. + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. + When returning a tuple, the first element is a list with the generated images, and the second element is a + list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" + (nsfw) content, according to the `safety_checker`. + """ + + # check inputs. Raise error if not correct + self.check_inputs( + prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds + ) + + # define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if generator is None: + generator = np.random + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + prompt_embeds = self._encode_prompt( + prompt, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + ) + + # get the initial random noise unless the user supplied it + latents_dtype = prompt_embeds.dtype + latents_shape = (batch_size * num_images_per_prompt, 4, height // 8, width // 8) + if latents is None: + latents = generator.randn(*latents_shape).astype(latents_dtype) + elif latents.shape != latents_shape: + raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}") + + # set timesteps + self.scheduler.set_timesteps(num_inference_steps) + + latents = latents * np.float64(self.scheduler.init_noise_sigma) + + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + timestep_dtype = next( + (input.type for input in self.unet.model.get_inputs() if input.name == "timestep"), "tensor(float)" + ) + timestep_dtype = ORT_TO_NP_TYPE[timestep_dtype] + + for i, t in enumerate(self.progress_bar(self.scheduler.timesteps)): + # expand the latents if we are doing classifier free guidance + latent_model_input = np.concatenate([latents] * 2) if do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(torch.from_numpy(latent_model_input), t) + latent_model_input = latent_model_input.cpu().numpy() + + # predict the noise residual + timestep = np.array([t], dtype=timestep_dtype) + noise_pred = self.unet(sample=latent_model_input, timestep=timestep, encoder_hidden_states=prompt_embeds) + noise_pred = noise_pred[0] + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = np.split(noise_pred, 2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + scheduler_output = self.scheduler.step( + torch.from_numpy(noise_pred), t, torch.from_numpy(latents), **extra_step_kwargs + ) + latents = scheduler_output.prev_sample.numpy() + + # call the callback, if provided + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + latents = 1 / 0.18215 * latents + # image = self.vae_decoder(latent_sample=latents)[0] + # it seems likes there is a strange result for using half-precision vae decoder if batchsize>1 + image = np.concatenate( + [self.vae_decoder(latent_sample=latents[i : i + 1])[0] for i in range(latents.shape[0])] + ) + + image = np.clip(image / 2 + 0.5, 0, 1) + image = image.transpose((0, 2, 3, 1)) + + if self.safety_checker is not None: + safety_checker_input = self.feature_extractor( + self.numpy_to_pil(image), return_tensors="np" + ).pixel_values.astype(image.dtype) + + images, has_nsfw_concept = [], [] + for i in range(image.shape[0]): + image_i, has_nsfw_concept_i = self.safety_checker( + clip_input=safety_checker_input[i : i + 1], images=image[i : i + 1] + ) + images.append(image_i) + has_nsfw_concept.append(has_nsfw_concept_i[0]) + image = np.concatenate(images) + else: + has_nsfw_concept = None + + if output_type == "pil": + image = self.numpy_to_pil(image) + + if not return_dict: + return (image, has_nsfw_concept) + + return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) + + +class StableDiffusionOnnxPipeline(OnnxStableDiffusionPipeline): + def __init__( + self, + vae_encoder: OnnxRuntimeModel, + vae_decoder: OnnxRuntimeModel, + text_encoder: OnnxRuntimeModel, + tokenizer: CLIPTokenizer, + unet: OnnxRuntimeModel, + scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], + safety_checker: OnnxRuntimeModel, + feature_extractor: CLIPImageProcessor, + ): + deprecation_message = "Please use `OnnxStableDiffusionPipeline` instead of `StableDiffusionOnnxPipeline`." + deprecate("StableDiffusionOnnxPipeline", "1.0.0", deprecation_message) + super().__init__( + vae_encoder=vae_encoder, + vae_decoder=vae_decoder, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + ) diff --git a/diffusers3/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_img2img.py b/diffusers3/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_img2img.py new file mode 100644 index 0000000000000000000000000000000000000000..c39409886bd9d27faf71d6e343b5e45fa8b6a51c --- /dev/null +++ b/diffusers3/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_img2img.py @@ -0,0 +1,549 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Callable, List, Optional, Union + +import numpy as np +import PIL.Image +import torch +from transformers import CLIPImageProcessor, CLIPTokenizer + +from ...configuration_utils import FrozenDict +from ...schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler +from ...utils import PIL_INTERPOLATION, deprecate, logging +from ..onnx_utils import ORT_TO_NP_TYPE, OnnxRuntimeModel +from ..pipeline_utils import DiffusionPipeline +from . import StableDiffusionPipelineOutput + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.preprocess with 8->64 +def preprocess(image): + deprecation_message = "The preprocess method is deprecated and will be removed in diffusers 1.0.0. Please use VaeImageProcessor.preprocess(...) instead" + deprecate("preprocess", "1.0.0", deprecation_message, standard_warn=False) + if isinstance(image, torch.Tensor): + return image + elif isinstance(image, PIL.Image.Image): + image = [image] + + if isinstance(image[0], PIL.Image.Image): + w, h = image[0].size + w, h = (x - x % 64 for x in (w, h)) # resize to integer multiple of 64 + + image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image] + image = np.concatenate(image, axis=0) + image = np.array(image).astype(np.float32) / 255.0 + image = image.transpose(0, 3, 1, 2) + image = 2.0 * image - 1.0 + image = torch.from_numpy(image) + elif isinstance(image[0], torch.Tensor): + image = torch.cat(image, dim=0) + return image + + +class OnnxStableDiffusionImg2ImgPipeline(DiffusionPipeline): + r""" + Pipeline for text-guided image to image generation using Stable Diffusion. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModel`]): + Frozen text-encoder. Stable Diffusion uses the text portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically + the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. + tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details. + feature_extractor ([`CLIPImageProcessor`]): + Model that extracts features from generated images to be used as inputs for the `safety_checker`. + """ + + vae_encoder: OnnxRuntimeModel + vae_decoder: OnnxRuntimeModel + text_encoder: OnnxRuntimeModel + tokenizer: CLIPTokenizer + unet: OnnxRuntimeModel + scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] + safety_checker: OnnxRuntimeModel + feature_extractor: CLIPImageProcessor + + _optional_components = ["safety_checker", "feature_extractor"] + _is_onnx = True + + def __init__( + self, + vae_encoder: OnnxRuntimeModel, + vae_decoder: OnnxRuntimeModel, + text_encoder: OnnxRuntimeModel, + tokenizer: CLIPTokenizer, + unet: OnnxRuntimeModel, + scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], + safety_checker: OnnxRuntimeModel, + feature_extractor: CLIPImageProcessor, + requires_safety_checker: bool = True, + ): + super().__init__() + + if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: + deprecation_message = ( + f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" + f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " + "to update the config accordingly as leaving `steps_offset` might led to incorrect results" + " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," + " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" + " file" + ) + deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(scheduler.config) + new_config["steps_offset"] = 1 + scheduler._internal_dict = FrozenDict(new_config) + + if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True: + deprecation_message = ( + f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." + " `clip_sample` should be set to False in the configuration file. Please make sure to update the" + " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" + " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" + " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" + ) + deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(scheduler.config) + new_config["clip_sample"] = False + scheduler._internal_dict = FrozenDict(new_config) + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + self.register_modules( + vae_encoder=vae_encoder, + vae_decoder=vae_decoder, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + ) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_onnx_stable_diffusion.OnnxStableDiffusionPipeline._encode_prompt + def _encode_prompt( + self, + prompt: Union[str, List[str]], + num_images_per_prompt: Optional[int], + do_classifier_free_guidance: bool, + negative_prompt: Optional[str], + prompt_embeds: Optional[np.ndarray] = None, + negative_prompt_embeds: Optional[np.ndarray] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`): + prompt to be encoded + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`): + The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored + if `guidance_scale` is less than `1`). + prompt_embeds (`np.ndarray`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`np.ndarray`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + """ + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # get prompt text embeddings + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="np", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="max_length", return_tensors="np").input_ids + + if not np.array_equal(text_input_ids, untruncated_ids): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + prompt_embeds = self.text_encoder(input_ids=text_input_ids.astype(np.int32))[0] + + prompt_embeds = np.repeat(prompt_embeds, num_images_per_prompt, axis=0) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] * batch_size + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="np", + ) + negative_prompt_embeds = self.text_encoder(input_ids=uncond_input.input_ids.astype(np.int32))[0] + + if do_classifier_free_guidance: + negative_prompt_embeds = np.repeat(negative_prompt_embeds, num_images_per_prompt, axis=0) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + prompt_embeds = np.concatenate([negative_prompt_embeds, prompt_embeds]) + + return prompt_embeds + + def check_inputs( + self, + prompt: Union[str, List[str]], + callback_steps: int, + negative_prompt: Optional[Union[str, List[str]]] = None, + prompt_embeds: Optional[np.ndarray] = None, + negative_prompt_embeds: Optional[np.ndarray] = None, + ): + if (callback_steps is None) or ( + callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + def __call__( + self, + prompt: Union[str, List[str]], + image: Union[np.ndarray, PIL.Image.Image] = None, + strength: float = 0.8, + num_inference_steps: Optional[int] = 50, + guidance_scale: Optional[float] = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: Optional[float] = 0.0, + generator: Optional[np.random.RandomState] = None, + prompt_embeds: Optional[np.ndarray] = None, + negative_prompt_embeds: Optional[np.ndarray] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, np.ndarray], None]] = None, + callback_steps: int = 1, + ): + r""" + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`): + The prompt or prompts to guide the image generation. + image (`np.ndarray` or `PIL.Image.Image`): + `Image`, or tensor representing an image batch, that will be used as the starting point for the + process. + strength (`float`, *optional*, defaults to 0.8): + Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image` + will be used as a starting point, adding more noise to it the larger the `strength`. The number of + denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will + be maximum and the denoising process will run for the full number of iterations specified in + `num_inference_steps`. A value of 1, therefore, essentially ignores `image`. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. This parameter will be modulated by `strength`. + guidance_scale (`float`, *optional*, defaults to 7.5): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored + if `guidance_scale` is less than `1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to + [`schedulers.DDIMScheduler`], will be ignored for others. + generator (`np.random.RandomState`, *optional*): + A np.random.RandomState to make generation deterministic. + prompt_embeds (`np.ndarray`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`np.ndarray`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that will be called every `callback_steps` steps during inference. The function will be + called with the following arguments: `callback(step: int, timestep: int, latents: np.ndarray)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function will be called. If not specified, the callback will be + called at every step. + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. + When returning a tuple, the first element is a list with the generated images, and the second element is a + list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" + (nsfw) content, according to the `safety_checker`. + """ + + # check inputs. Raise error if not correct + self.check_inputs(prompt, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds) + + # define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if strength < 0 or strength > 1: + raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}") + + if generator is None: + generator = np.random + + # set timesteps + self.scheduler.set_timesteps(num_inference_steps) + + image = preprocess(image).cpu().numpy() + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + prompt_embeds = self._encode_prompt( + prompt, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + ) + + latents_dtype = prompt_embeds.dtype + image = image.astype(latents_dtype) + # encode the init image into latents and scale the latents + init_latents = self.vae_encoder(sample=image)[0] + init_latents = 0.18215 * init_latents + + if isinstance(prompt, str): + prompt = [prompt] + if len(prompt) > init_latents.shape[0] and len(prompt) % init_latents.shape[0] == 0: + # expand init_latents for batch_size + deprecation_message = ( + f"You have passed {len(prompt)} text prompts (`prompt`), but only {init_latents.shape[0]} initial" + " images (`image`). Initial images are now duplicating to match the number of text prompts. Note" + " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update" + " your script to pass as many initial images as text prompts to suppress this warning." + ) + deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False) + additional_image_per_prompt = len(prompt) // init_latents.shape[0] + init_latents = np.concatenate([init_latents] * additional_image_per_prompt * num_images_per_prompt, axis=0) + elif len(prompt) > init_latents.shape[0] and len(prompt) % init_latents.shape[0] != 0: + raise ValueError( + f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {len(prompt)} text prompts." + ) + else: + init_latents = np.concatenate([init_latents] * num_images_per_prompt, axis=0) + + # get the original timestep using init_timestep + offset = self.scheduler.config.get("steps_offset", 0) + init_timestep = int(num_inference_steps * strength) + offset + init_timestep = min(init_timestep, num_inference_steps) + + timesteps = self.scheduler.timesteps.numpy()[-init_timestep] + timesteps = np.array([timesteps] * batch_size * num_images_per_prompt) + + # add noise to latents using the timesteps + noise = generator.randn(*init_latents.shape).astype(latents_dtype) + init_latents = self.scheduler.add_noise( + torch.from_numpy(init_latents), torch.from_numpy(noise), torch.from_numpy(timesteps) + ) + init_latents = init_latents.numpy() + + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + latents = init_latents + + t_start = max(num_inference_steps - init_timestep + offset, 0) + timesteps = self.scheduler.timesteps[t_start:].numpy() + + timestep_dtype = next( + (input.type for input in self.unet.model.get_inputs() if input.name == "timestep"), "tensor(float)" + ) + timestep_dtype = ORT_TO_NP_TYPE[timestep_dtype] + + for i, t in enumerate(self.progress_bar(timesteps)): + # expand the latents if we are doing classifier free guidance + latent_model_input = np.concatenate([latents] * 2) if do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(torch.from_numpy(latent_model_input), t) + latent_model_input = latent_model_input.cpu().numpy() + + # predict the noise residual + timestep = np.array([t], dtype=timestep_dtype) + noise_pred = self.unet(sample=latent_model_input, timestep=timestep, encoder_hidden_states=prompt_embeds)[ + 0 + ] + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = np.split(noise_pred, 2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + scheduler_output = self.scheduler.step( + torch.from_numpy(noise_pred), t, torch.from_numpy(latents), **extra_step_kwargs + ) + latents = scheduler_output.prev_sample.numpy() + + # call the callback, if provided + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + latents = 1 / 0.18215 * latents + # image = self.vae_decoder(latent_sample=latents)[0] + # it seems likes there is a strange result for using half-precision vae decoder if batchsize>1 + image = np.concatenate( + [self.vae_decoder(latent_sample=latents[i : i + 1])[0] for i in range(latents.shape[0])] + ) + + image = np.clip(image / 2 + 0.5, 0, 1) + image = image.transpose((0, 2, 3, 1)) + + if self.safety_checker is not None: + safety_checker_input = self.feature_extractor( + self.numpy_to_pil(image), return_tensors="np" + ).pixel_values.astype(image.dtype) + # safety_checker does not support batched inputs yet + images, has_nsfw_concept = [], [] + for i in range(image.shape[0]): + image_i, has_nsfw_concept_i = self.safety_checker( + clip_input=safety_checker_input[i : i + 1], images=image[i : i + 1] + ) + images.append(image_i) + has_nsfw_concept.append(has_nsfw_concept_i[0]) + image = np.concatenate(images) + else: + has_nsfw_concept = None + + if output_type == "pil": + image = self.numpy_to_pil(image) + + if not return_dict: + return (image, has_nsfw_concept) + + return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) diff --git a/diffusers3/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_inpaint.py b/diffusers3/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_inpaint.py new file mode 100644 index 0000000000000000000000000000000000000000..18d8050826cc3b755f7d42dd1903b5c80f2c6672 --- /dev/null +++ b/diffusers3/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_inpaint.py @@ -0,0 +1,563 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Callable, List, Optional, Union + +import numpy as np +import PIL.Image +import torch +from transformers import CLIPImageProcessor, CLIPTokenizer + +from ...configuration_utils import FrozenDict +from ...schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler +from ...utils import PIL_INTERPOLATION, deprecate, logging +from ..onnx_utils import ORT_TO_NP_TYPE, OnnxRuntimeModel +from ..pipeline_utils import DiffusionPipeline +from . import StableDiffusionPipelineOutput + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +NUM_UNET_INPUT_CHANNELS = 9 +NUM_LATENT_CHANNELS = 4 + + +def prepare_mask_and_masked_image(image, mask, latents_shape): + image = np.array(image.convert("RGB").resize((latents_shape[1] * 8, latents_shape[0] * 8))) + image = image[None].transpose(0, 3, 1, 2) + image = image.astype(np.float32) / 127.5 - 1.0 + + image_mask = np.array(mask.convert("L").resize((latents_shape[1] * 8, latents_shape[0] * 8))) + masked_image = image * (image_mask < 127.5) + + mask = mask.resize((latents_shape[1], latents_shape[0]), PIL_INTERPOLATION["nearest"]) + mask = np.array(mask.convert("L")) + mask = mask.astype(np.float32) / 255.0 + mask = mask[None, None] + mask[mask < 0.5] = 0 + mask[mask >= 0.5] = 1 + + return mask, masked_image + + +class OnnxStableDiffusionInpaintPipeline(DiffusionPipeline): + r""" + Pipeline for text-guided image inpainting using Stable Diffusion. *This is an experimental feature*. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModel`]): + Frozen text-encoder. Stable Diffusion uses the text portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically + the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. + tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details. + feature_extractor ([`CLIPImageProcessor`]): + Model that extracts features from generated images to be used as inputs for the `safety_checker`. + """ + + vae_encoder: OnnxRuntimeModel + vae_decoder: OnnxRuntimeModel + text_encoder: OnnxRuntimeModel + tokenizer: CLIPTokenizer + unet: OnnxRuntimeModel + scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] + safety_checker: OnnxRuntimeModel + feature_extractor: CLIPImageProcessor + + _optional_components = ["safety_checker", "feature_extractor"] + _is_onnx = True + + def __init__( + self, + vae_encoder: OnnxRuntimeModel, + vae_decoder: OnnxRuntimeModel, + text_encoder: OnnxRuntimeModel, + tokenizer: CLIPTokenizer, + unet: OnnxRuntimeModel, + scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], + safety_checker: OnnxRuntimeModel, + feature_extractor: CLIPImageProcessor, + requires_safety_checker: bool = True, + ): + super().__init__() + logger.info("`OnnxStableDiffusionInpaintPipeline` is experimental and will very likely change in the future.") + + if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: + deprecation_message = ( + f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" + f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " + "to update the config accordingly as leaving `steps_offset` might led to incorrect results" + " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," + " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" + " file" + ) + deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(scheduler.config) + new_config["steps_offset"] = 1 + scheduler._internal_dict = FrozenDict(new_config) + + if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True: + deprecation_message = ( + f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." + " `clip_sample` should be set to False in the configuration file. Please make sure to update the" + " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" + " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" + " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" + ) + deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(scheduler.config) + new_config["clip_sample"] = False + scheduler._internal_dict = FrozenDict(new_config) + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + self.register_modules( + vae_encoder=vae_encoder, + vae_decoder=vae_decoder, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + ) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_onnx_stable_diffusion.OnnxStableDiffusionPipeline._encode_prompt + def _encode_prompt( + self, + prompt: Union[str, List[str]], + num_images_per_prompt: Optional[int], + do_classifier_free_guidance: bool, + negative_prompt: Optional[str], + prompt_embeds: Optional[np.ndarray] = None, + negative_prompt_embeds: Optional[np.ndarray] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`): + prompt to be encoded + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`): + The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored + if `guidance_scale` is less than `1`). + prompt_embeds (`np.ndarray`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`np.ndarray`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + """ + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # get prompt text embeddings + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="np", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="max_length", return_tensors="np").input_ids + + if not np.array_equal(text_input_ids, untruncated_ids): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + prompt_embeds = self.text_encoder(input_ids=text_input_ids.astype(np.int32))[0] + + prompt_embeds = np.repeat(prompt_embeds, num_images_per_prompt, axis=0) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] * batch_size + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="np", + ) + negative_prompt_embeds = self.text_encoder(input_ids=uncond_input.input_ids.astype(np.int32))[0] + + if do_classifier_free_guidance: + negative_prompt_embeds = np.repeat(negative_prompt_embeds, num_images_per_prompt, axis=0) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + prompt_embeds = np.concatenate([negative_prompt_embeds, prompt_embeds]) + + return prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_onnx_stable_diffusion.OnnxStableDiffusionPipeline.check_inputs + def check_inputs( + self, + prompt: Union[str, List[str]], + height: Optional[int], + width: Optional[int], + callback_steps: int, + negative_prompt: Optional[str] = None, + prompt_embeds: Optional[np.ndarray] = None, + negative_prompt_embeds: Optional[np.ndarray] = None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if (callback_steps is None) or ( + callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + @torch.no_grad() + def __call__( + self, + prompt: Union[str, List[str]], + image: PIL.Image.Image, + mask_image: PIL.Image.Image, + height: Optional[int] = 512, + width: Optional[int] = 512, + num_inference_steps: int = 50, + guidance_scale: float = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[np.random.RandomState] = None, + latents: Optional[np.ndarray] = None, + prompt_embeds: Optional[np.ndarray] = None, + negative_prompt_embeds: Optional[np.ndarray] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, np.ndarray], None]] = None, + callback_steps: int = 1, + ): + r""" + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`): + The prompt or prompts to guide the image generation. + image (`PIL.Image.Image`): + `Image`, or tensor representing an image batch which will be inpainted, *i.e.* parts of the image will + be masked out with `mask_image` and repainted according to `prompt`. + mask_image (`PIL.Image.Image`): + `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be + repainted, while black pixels will be preserved. If `mask_image` is a PIL image, it will be converted + to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L) + instead of 3, so the expected shape would be `(B, H, W, 1)`. + height (`int`, *optional*, defaults to 512): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to 512): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored + if `guidance_scale` is less than `1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to + [`schedulers.DDIMScheduler`], will be ignored for others. + generator (`np.random.RandomState`, *optional*): + A np.random.RandomState to make generation deterministic. + latents (`np.ndarray`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + prompt_embeds (`np.ndarray`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`np.ndarray`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that will be called every `callback_steps` steps during inference. The function will be + called with the following arguments: `callback(step: int, timestep: int, latents: np.ndarray)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function will be called. If not specified, the callback will be + called at every step. + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. + When returning a tuple, the first element is a list with the generated images, and the second element is a + list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" + (nsfw) content, according to the `safety_checker`. + """ + + # check inputs. Raise error if not correct + self.check_inputs( + prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds + ) + + # define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if generator is None: + generator = np.random + + # set timesteps + self.scheduler.set_timesteps(num_inference_steps) + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + prompt_embeds = self._encode_prompt( + prompt, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + ) + + num_channels_latents = NUM_LATENT_CHANNELS + latents_shape = (batch_size * num_images_per_prompt, num_channels_latents, height // 8, width // 8) + latents_dtype = prompt_embeds.dtype + if latents is None: + latents = generator.randn(*latents_shape).astype(latents_dtype) + else: + if latents.shape != latents_shape: + raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}") + + # prepare mask and masked_image + mask, masked_image = prepare_mask_and_masked_image(image, mask_image, latents_shape[-2:]) + mask = mask.astype(latents.dtype) + masked_image = masked_image.astype(latents.dtype) + + masked_image_latents = self.vae_encoder(sample=masked_image)[0] + masked_image_latents = 0.18215 * masked_image_latents + + # duplicate mask and masked_image_latents for each generation per prompt + mask = mask.repeat(batch_size * num_images_per_prompt, 0) + masked_image_latents = masked_image_latents.repeat(batch_size * num_images_per_prompt, 0) + + mask = np.concatenate([mask] * 2) if do_classifier_free_guidance else mask + masked_image_latents = ( + np.concatenate([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents + ) + + num_channels_mask = mask.shape[1] + num_channels_masked_image = masked_image_latents.shape[1] + + unet_input_channels = NUM_UNET_INPUT_CHANNELS + if num_channels_latents + num_channels_mask + num_channels_masked_image != unet_input_channels: + raise ValueError( + "Incorrect configuration settings! The config of `pipeline.unet` expects" + f" {unet_input_channels} but received `num_channels_latents`: {num_channels_latents} +" + f" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}" + f" = {num_channels_latents+num_channels_masked_image+num_channels_mask}. Please verify the config of" + " `pipeline.unet` or your `mask_image` or `image` input." + ) + + # set timesteps + self.scheduler.set_timesteps(num_inference_steps) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * np.float64(self.scheduler.init_noise_sigma) + + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + timestep_dtype = next( + (input.type for input in self.unet.model.get_inputs() if input.name == "timestep"), "tensor(float)" + ) + timestep_dtype = ORT_TO_NP_TYPE[timestep_dtype] + + for i, t in enumerate(self.progress_bar(self.scheduler.timesteps)): + # expand the latents if we are doing classifier free guidance + latent_model_input = np.concatenate([latents] * 2) if do_classifier_free_guidance else latents + # concat latents, mask, masked_image_latnets in the channel dimension + latent_model_input = self.scheduler.scale_model_input(torch.from_numpy(latent_model_input), t) + latent_model_input = latent_model_input.cpu().numpy() + latent_model_input = np.concatenate([latent_model_input, mask, masked_image_latents], axis=1) + + # predict the noise residual + timestep = np.array([t], dtype=timestep_dtype) + noise_pred = self.unet(sample=latent_model_input, timestep=timestep, encoder_hidden_states=prompt_embeds)[ + 0 + ] + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = np.split(noise_pred, 2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + scheduler_output = self.scheduler.step( + torch.from_numpy(noise_pred), t, torch.from_numpy(latents), **extra_step_kwargs + ) + latents = scheduler_output.prev_sample.numpy() + + # call the callback, if provided + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + latents = 1 / 0.18215 * latents + # image = self.vae_decoder(latent_sample=latents)[0] + # it seems likes there is a strange result for using half-precision vae decoder if batchsize>1 + image = np.concatenate( + [self.vae_decoder(latent_sample=latents[i : i + 1])[0] for i in range(latents.shape[0])] + ) + + image = np.clip(image / 2 + 0.5, 0, 1) + image = image.transpose((0, 2, 3, 1)) + + if self.safety_checker is not None: + safety_checker_input = self.feature_extractor( + self.numpy_to_pil(image), return_tensors="np" + ).pixel_values.astype(image.dtype) + # safety_checker does not support batched inputs yet + images, has_nsfw_concept = [], [] + for i in range(image.shape[0]): + image_i, has_nsfw_concept_i = self.safety_checker( + clip_input=safety_checker_input[i : i + 1], images=image[i : i + 1] + ) + images.append(image_i) + has_nsfw_concept.append(has_nsfw_concept_i[0]) + image = np.concatenate(images) + else: + has_nsfw_concept = None + + if output_type == "pil": + image = self.numpy_to_pil(image) + + if not return_dict: + return (image, has_nsfw_concept) + + return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) diff --git a/diffusers3/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_upscale.py b/diffusers3/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_upscale.py new file mode 100644 index 0000000000000000000000000000000000000000..cd9ec57fb8792a56f7382ff6beef2a925ae897ef --- /dev/null +++ b/diffusers3/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_upscale.py @@ -0,0 +1,586 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Any, Callable, List, Optional, Union + +import numpy as np +import PIL.Image +import torch +from transformers import CLIPImageProcessor, CLIPTokenizer + +from ...configuration_utils import FrozenDict +from ...schedulers import DDPMScheduler, KarrasDiffusionSchedulers +from ...utils import deprecate, logging +from ..onnx_utils import ORT_TO_NP_TYPE, OnnxRuntimeModel +from ..pipeline_utils import DiffusionPipeline +from . import StableDiffusionPipelineOutput + + +logger = logging.get_logger(__name__) + + +def preprocess(image): + if isinstance(image, torch.Tensor): + return image + elif isinstance(image, PIL.Image.Image): + image = [image] + + if isinstance(image[0], PIL.Image.Image): + w, h = image[0].size + w, h = (x - x % 64 for x in (w, h)) # resize to integer multiple of 32 + + image = [np.array(i.resize((w, h)))[None, :] for i in image] + image = np.concatenate(image, axis=0) + image = np.array(image).astype(np.float32) / 255.0 + image = image.transpose(0, 3, 1, 2) + image = 2.0 * image - 1.0 + image = torch.from_numpy(image) + elif isinstance(image[0], torch.Tensor): + image = torch.cat(image, dim=0) + + return image + + +class OnnxStableDiffusionUpscalePipeline(DiffusionPipeline): + vae: OnnxRuntimeModel + text_encoder: OnnxRuntimeModel + tokenizer: CLIPTokenizer + unet: OnnxRuntimeModel + low_res_scheduler: DDPMScheduler + scheduler: KarrasDiffusionSchedulers + safety_checker: OnnxRuntimeModel + feature_extractor: CLIPImageProcessor + + _optional_components = ["safety_checker", "feature_extractor"] + _is_onnx = True + + def __init__( + self, + vae: OnnxRuntimeModel, + text_encoder: OnnxRuntimeModel, + tokenizer: Any, + unet: OnnxRuntimeModel, + low_res_scheduler: DDPMScheduler, + scheduler: KarrasDiffusionSchedulers, + safety_checker: Optional[OnnxRuntimeModel] = None, + feature_extractor: Optional[CLIPImageProcessor] = None, + max_noise_level: int = 350, + num_latent_channels=4, + num_unet_input_channels=7, + requires_safety_checker: bool = True, + ): + super().__init__() + + if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: + deprecation_message = ( + f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" + f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " + "to update the config accordingly as leaving `steps_offset` might led to incorrect results" + " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," + " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" + " file" + ) + deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(scheduler.config) + new_config["steps_offset"] = 1 + scheduler._internal_dict = FrozenDict(new_config) + + if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True: + deprecation_message = ( + f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." + " `clip_sample` should be set to False in the configuration file. Please make sure to update the" + " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" + " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" + " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" + ) + deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(scheduler.config) + new_config["clip_sample"] = False + scheduler._internal_dict = FrozenDict(new_config) + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + low_res_scheduler=low_res_scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + ) + self.register_to_config( + max_noise_level=max_noise_level, + num_latent_channels=num_latent_channels, + num_unet_input_channels=num_unet_input_channels, + ) + + def check_inputs( + self, + prompt: Union[str, List[str]], + image, + noise_level, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + ): + if (callback_steps is None) or ( + callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if ( + not isinstance(image, torch.Tensor) + and not isinstance(image, PIL.Image.Image) + and not isinstance(image, np.ndarray) + and not isinstance(image, list) + ): + raise ValueError( + f"`image` has to be of type `torch.Tensor`, `np.ndarray`, `PIL.Image.Image` or `list` but is {type(image)}" + ) + + # verify batch size of prompt and image are same if image is a list or tensor or numpy array + if isinstance(image, (list, np.ndarray)): + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if isinstance(image, list): + image_batch_size = len(image) + else: + image_batch_size = image.shape[0] + if batch_size != image_batch_size: + raise ValueError( + f"`prompt` has batch size {batch_size} and `image` has batch size {image_batch_size}." + " Please make sure that passed `prompt` matches the batch size of `image`." + ) + + # check noise level + if noise_level > self.config.max_noise_level: + raise ValueError(f"`noise_level` has to be <= {self.config.max_noise_level} but is {noise_level}") + + if (callback_steps is None) or ( + callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, generator, latents=None): + shape = (batch_size, num_channels_latents, height, width) + if latents is None: + latents = generator.randn(*shape).astype(dtype) + elif latents.shape != shape: + raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") + + return latents + + def decode_latents(self, latents): + latents = 1 / 0.08333 * latents + image = self.vae(latent_sample=latents)[0] + image = np.clip(image / 2 + 0.5, 0, 1) + image = image.transpose((0, 2, 3, 1)) + return image + + def _encode_prompt( + self, + prompt: Union[str, List[str]], + num_images_per_prompt: Optional[int], + do_classifier_free_guidance: bool, + negative_prompt: Optional[str], + prompt_embeds: Optional[np.ndarray] = None, + negative_prompt_embeds: Optional[np.ndarray] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`): + prompt to be encoded + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`): + The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored + if `guidance_scale` is less than `1`). + prompt_embeds (`np.ndarray`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`np.ndarray`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + """ + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # get prompt text embeddings + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="np", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="max_length", return_tensors="np").input_ids + + if not np.array_equal(text_input_ids, untruncated_ids): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + prompt_embeds = self.text_encoder(input_ids=text_input_ids.astype(np.int32))[0] + + prompt_embeds = np.repeat(prompt_embeds, num_images_per_prompt, axis=0) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] * batch_size + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="np", + ) + negative_prompt_embeds = self.text_encoder(input_ids=uncond_input.input_ids.astype(np.int32))[0] + + if do_classifier_free_guidance: + negative_prompt_embeds = np.repeat(negative_prompt_embeds, num_images_per_prompt, axis=0) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + prompt_embeds = np.concatenate([negative_prompt_embeds, prompt_embeds]) + + return prompt_embeds + + def __call__( + self, + prompt: Union[str, List[str]], + image: Union[np.ndarray, PIL.Image.Image, List[PIL.Image.Image]], + num_inference_steps: int = 75, + guidance_scale: float = 9.0, + noise_level: int = 20, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[np.random.RandomState, List[np.random.RandomState]]] = None, + latents: Optional[np.ndarray] = None, + prompt_embeds: Optional[np.ndarray] = None, + negative_prompt_embeds: Optional[np.ndarray] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, np.ndarray], None]] = None, + callback_steps: Optional[int] = 1, + ): + r""" + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`): + The prompt or prompts to guide the image generation. + image (`np.ndarray` or `PIL.Image.Image`): + `Image`, or tensor representing an image batch, that will be used as the starting point for the + process. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. This parameter will be modulated by `strength`. + guidance_scale (`float`, *optional*, defaults to 7.5): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + noise_level (`float`, defaults to 0.2): + Deteremines the amount of noise to add to the initial image before performing upscaling. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored + if `guidance_scale` is less than `1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to + [`schedulers.DDIMScheduler`], will be ignored for others. + generator (`np.random.RandomState`, *optional*): + A np.random.RandomState to make generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + prompt_embeds (`np.ndarray`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`np.ndarray`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that will be called every `callback_steps` steps during inference. The function will be + called with the following arguments: `callback(step: int, timestep: int, latents: np.ndarray)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function will be called. If not specified, the callback will be + called at every step. + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. + When returning a tuple, the first element is a list with the generated images, and the second element is a + list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" + (nsfw) content, according to the `safety_checker`. + """ + + # 1. Check inputs + self.check_inputs( + prompt, + image, + noise_level, + callback_steps, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + ) + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if generator is None: + generator = np.random + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + prompt_embeds = self._encode_prompt( + prompt, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + ) + + latents_dtype = prompt_embeds.dtype + image = preprocess(image).cpu().numpy() + height, width = image.shape[2:] + + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + self.config.num_latent_channels, + height, + width, + latents_dtype, + generator, + ) + image = image.astype(latents_dtype) + + self.scheduler.set_timesteps(num_inference_steps) + timesteps = self.scheduler.timesteps + + # Scale the initial noise by the standard deviation required by the scheduler + latents = latents * np.float64(self.scheduler.init_noise_sigma) + + # 5. Add noise to image + noise_level = np.array([noise_level]).astype(np.int64) + noise = generator.randn(*image.shape).astype(latents_dtype) + + image = self.low_res_scheduler.add_noise( + torch.from_numpy(image), torch.from_numpy(noise), torch.from_numpy(noise_level) + ) + image = image.numpy() + + batch_multiplier = 2 if do_classifier_free_guidance else 1 + image = np.concatenate([image] * batch_multiplier * num_images_per_prompt) + noise_level = np.concatenate([noise_level] * image.shape[0]) + + # 7. Check that sizes of image and latents match + num_channels_image = image.shape[1] + if self.config.num_latent_channels + num_channels_image != self.config.num_unet_input_channels: + raise ValueError( + "Incorrect configuration settings! The config of `pipeline.unet` expects" + f" {self.config.num_unet_input_channels} but received `num_channels_latents`: {self.config.num_latent_channels} +" + f" `num_channels_image`: {num_channels_image} " + f" = {self.config.num_latent_channels + num_channels_image}. Please verify the config of" + " `pipeline.unet` or your `image` input." + ) + + # 8. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + timestep_dtype = next( + (input.type for input in self.unet.model.get_inputs() if input.name == "timestep"), "tensor(float)" + ) + timestep_dtype = ORT_TO_NP_TYPE[timestep_dtype] + + # 9. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = np.concatenate([latents] * 2) if do_classifier_free_guidance else latents + + # concat latents, mask, masked_image_latents in the channel dimension + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + latent_model_input = np.concatenate([latent_model_input, image], axis=1) + + # timestep to tensor + timestep = np.array([t], dtype=timestep_dtype) + + # predict the noise residual + noise_pred = self.unet( + sample=latent_model_input, + timestep=timestep, + encoder_hidden_states=prompt_embeds, + class_labels=noise_level, + )[0] + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = np.split(noise_pred, 2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step( + torch.from_numpy(noise_pred), t, torch.from_numpy(latents), **extra_step_kwargs + ).prev_sample + latents = latents.numpy() + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + # 10. Post-processing + image = self.decode_latents(latents) + + if self.safety_checker is not None: + safety_checker_input = self.feature_extractor( + self.numpy_to_pil(image), return_tensors="np" + ).pixel_values.astype(image.dtype) + + images, has_nsfw_concept = [], [] + for i in range(image.shape[0]): + image_i, has_nsfw_concept_i = self.safety_checker( + clip_input=safety_checker_input[i : i + 1], images=image[i : i + 1] + ) + images.append(image_i) + has_nsfw_concept.append(has_nsfw_concept_i[0]) + image = np.concatenate(images) + else: + has_nsfw_concept = None + + if output_type == "pil": + image = self.numpy_to_pil(image) + + if not return_dict: + return (image, has_nsfw_concept) + + return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) diff --git a/diffusers3/pipelines/stable_diffusion/pipeline_output.py b/diffusers3/pipelines/stable_diffusion/pipeline_output.py new file mode 100644 index 0000000000000000000000000000000000000000..5fb9b1a1412d96b69144a4c2e960dcc8b75a615c --- /dev/null +++ b/diffusers3/pipelines/stable_diffusion/pipeline_output.py @@ -0,0 +1,45 @@ +from dataclasses import dataclass +from typing import List, Optional, Union + +import numpy as np +import PIL.Image + +from ...utils import BaseOutput, is_flax_available + + +@dataclass +class StableDiffusionPipelineOutput(BaseOutput): + """ + Output class for Stable Diffusion pipelines. + + Args: + images (`List[PIL.Image.Image]` or `np.ndarray`) + List of denoised PIL images of length `batch_size` or NumPy array of shape `(batch_size, height, width, + num_channels)`. + nsfw_content_detected (`List[bool]`) + List indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content or + `None` if safety checking could not be performed. + """ + + images: Union[List[PIL.Image.Image], np.ndarray] + nsfw_content_detected: Optional[List[bool]] + + +if is_flax_available(): + import flax + + @flax.struct.dataclass + class FlaxStableDiffusionPipelineOutput(BaseOutput): + """ + Output class for Flax-based Stable Diffusion pipelines. + + Args: + images (`np.ndarray`): + Denoised images of array shape of `(batch_size, height, width, num_channels)`. + nsfw_content_detected (`List[bool]`): + List indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content + or `None` if safety checking could not be performed. + """ + + images: np.ndarray + nsfw_content_detected: List[bool] diff --git a/diffusers3/pipelines/stable_diffusion/pipeline_stable_diffusion.py b/diffusers3/pipelines/stable_diffusion/pipeline_stable_diffusion.py new file mode 100644 index 0000000000000000000000000000000000000000..a2bbec7b3c3f3428ddf60d7b0ec8ce2d490a3047 --- /dev/null +++ b/diffusers3/pipelines/stable_diffusion/pipeline_stable_diffusion.py @@ -0,0 +1,1072 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import inspect +from typing import Any, Callable, Dict, List, Optional, Union + +import torch +from packaging import version +from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection + +from ...callbacks import MultiPipelineCallbacks, PipelineCallback +from ...configuration_utils import FrozenDict +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import FromSingleFileMixin, IPAdapterMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin +from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import ( + USE_PEFT_BACKEND, + deprecate, + is_torch_xla_available, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from .pipeline_output import StableDiffusionPipelineOutput +from .safety_checker import StableDiffusionSafetyChecker + + +if is_torch_xla_available(): + import torch_xla.core.xla_model as xm + + XLA_AVAILABLE = True +else: + XLA_AVAILABLE = False + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import StableDiffusionPipeline + + >>> pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16) + >>> pipe = pipe.to("cuda") + + >>> prompt = "a photo of an astronaut riding a horse on mars" + >>> image = pipe(prompt).images[0] + ``` +""" + + +def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): + """ + Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4 + """ + std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) + std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) + # rescale the results from guidance (fixes overexposure) + noise_pred_rescaled = noise_cfg * (std_text / std_cfg) + # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images + noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg + return noise_cfg + + +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + sigmas: Optional[List[float]] = None, + **kwargs, +): + """ + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles + custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` + must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, + `num_inference_steps` and `sigmas` must be `None`. + sigmas (`List[float]`, *optional*): + Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, + `num_inference_steps` and `timesteps` must be `None`. + + Returns: + `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the + second element is the number of inference steps. + """ + if timesteps is not None and sigmas is not None: + raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") + if timesteps is not None: + accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + elif sigmas is not None: + accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accept_sigmas: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" sigmas schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + + +class StableDiffusionPipeline( + DiffusionPipeline, + StableDiffusionMixin, + TextualInversionLoaderMixin, + StableDiffusionLoraLoaderMixin, + IPAdapterMixin, + FromSingleFileMixin, +): + r""" + Pipeline for text-to-image generation using Stable Diffusion. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for saving LoRA weights + - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files + - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details + about a model's potential harms. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. + """ + + model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae" + _optional_components = ["safety_checker", "feature_extractor", "image_encoder"] + _exclude_from_cpu_offload = ["safety_checker"] + _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: UNet2DConditionModel, + scheduler: KarrasDiffusionSchedulers, + safety_checker: StableDiffusionSafetyChecker, + feature_extractor: CLIPImageProcessor, + image_encoder: CLIPVisionModelWithProjection = None, + requires_safety_checker: bool = True, + ): + super().__init__() + + if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: + deprecation_message = ( + f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" + f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " + "to update the config accordingly as leaving `steps_offset` might led to incorrect results" + " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," + " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" + " file" + ) + deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(scheduler.config) + new_config["steps_offset"] = 1 + scheduler._internal_dict = FrozenDict(new_config) + + if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True: + deprecation_message = ( + f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." + " `clip_sample` should be set to False in the configuration file. Please make sure to update the" + " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" + " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" + " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" + ) + deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(scheduler.config) + new_config["clip_sample"] = False + scheduler._internal_dict = FrozenDict(new_config) + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( + version.parse(unet.config._diffusers_version).base_version + ) < version.parse("0.9.0.dev0") + is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 + if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: + deprecation_message = ( + "The configuration file of the unet has set the default `sample_size` to smaller than" + " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the" + " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" + " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" + " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" + " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" + " in the config might lead to incorrect results in future versions. If you have downloaded this" + " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" + " the `unet/config.json` file" + ) + deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(unet.config) + new_config["sample_size"] = 64 + unet._internal_dict = FrozenDict(new_config) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + image_encoder=image_encoder, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + def _encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + **kwargs, + ): + deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." + deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) + + prompt_embeds_tuple = self.encode_prompt( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=lora_scale, + **kwargs, + ) + + # concatenate for backwards comp + prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) + + return prompt_embeds + + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if self.text_encoder is not None: + if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return prompt_embeds, negative_prompt_embeds + + def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + if output_hidden_states: + image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] + image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_enc_hidden_states = self.image_encoder( + torch.zeros_like(image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( + num_images_per_prompt, dim=0 + ) + return image_enc_hidden_states, uncond_image_enc_hidden_states + else: + image_embeds = self.image_encoder(image).image_embeds + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_embeds = torch.zeros_like(image_embeds) + + return image_embeds, uncond_image_embeds + + def prepare_ip_adapter_image_embeds( + self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance + ): + image_embeds = [] + if do_classifier_free_guidance: + negative_image_embeds = [] + if ip_adapter_image_embeds is None: + if not isinstance(ip_adapter_image, list): + ip_adapter_image = [ip_adapter_image] + + if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): + raise ValueError( + f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." + ) + + for single_ip_adapter_image, image_proj_layer in zip( + ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers + ): + output_hidden_state = not isinstance(image_proj_layer, ImageProjection) + single_image_embeds, single_negative_image_embeds = self.encode_image( + single_ip_adapter_image, device, 1, output_hidden_state + ) + + image_embeds.append(single_image_embeds[None, :]) + if do_classifier_free_guidance: + negative_image_embeds.append(single_negative_image_embeds[None, :]) + else: + for single_image_embeds in ip_adapter_image_embeds: + if do_classifier_free_guidance: + single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2) + negative_image_embeds.append(single_negative_image_embeds) + image_embeds.append(single_image_embeds) + + ip_adapter_image_embeds = [] + for i, single_image_embeds in enumerate(image_embeds): + single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) + if do_classifier_free_guidance: + single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) + + single_image_embeds = single_image_embeds.to(device=device) + ip_adapter_image_embeds.append(single_image_embeds) + + return ip_adapter_image_embeds + + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is None: + has_nsfw_concept = None + else: + if torch.is_tensor(image): + feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") + else: + feature_extractor_input = self.image_processor.numpy_to_pil(image) + safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) + image, has_nsfw_concept = self.safety_checker( + images=image, clip_input=safety_checker_input.pixel_values.to(dtype) + ) + return image, has_nsfw_concept + + def decode_latents(self, latents): + deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" + deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) + + latents = 1 / self.vae.config.scaling_factor * latents + image = self.vae.decode(latents, return_dict=False)[0] + image = (image / 2 + 0.5).clamp(0, 1) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + return image + + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + height, + width, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + ip_adapter_image=None, + ip_adapter_image_embeds=None, + callback_on_step_end_tensor_inputs=None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if ip_adapter_image is not None and ip_adapter_image_embeds is not None: + raise ValueError( + "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined." + ) + + if ip_adapter_image_embeds is not None: + if not isinstance(ip_adapter_image_embeds, list): + raise ValueError( + f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}" + ) + elif ip_adapter_image_embeds[0].ndim not in [3, 4]: + raise ValueError( + f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D" + ) + + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = ( + batch_size, + num_channels_latents, + int(height) // self.vae_scale_factor, + int(width) // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding + def get_guidance_scale_embedding( + self, w: torch.Tensor, embedding_dim: int = 512, dtype: torch.dtype = torch.float32 + ) -> torch.Tensor: + """ + See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 + + Args: + w (`torch.Tensor`): + Generate embedding vectors with a specified guidance scale to subsequently enrich timestep embeddings. + embedding_dim (`int`, *optional*, defaults to 512): + Dimension of the embeddings to generate. + dtype (`torch.dtype`, *optional*, defaults to `torch.float32`): + Data type of the generated embeddings. + + Returns: + `torch.Tensor`: Embedding vectors with shape `(len(w), embedding_dim)`. + """ + assert len(w.shape) == 1 + w = w * 1000.0 + + half_dim = embedding_dim // 2 + emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) + emb = w.to(dtype)[:, None] * emb[None, :] + emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) + if embedding_dim % 2 == 1: # zero pad + emb = torch.nn.functional.pad(emb, (0, 1)) + assert emb.shape == (w.shape[0], embedding_dim) + return emb + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def guidance_rescale(self): + return self._guidance_rescale + + @property + def clip_skip(self): + return self._clip_skip + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None + + @property + def cross_attention_kwargs(self): + return self._cross_attention_kwargs + + @property + def num_timesteps(self): + return self._num_timesteps + + @property + def interrupt(self): + return self._interrupt + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + timesteps: List[int] = None, + sigmas: List[float] = None, + guidance_scale: float = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + guidance_rescale: float = 0.0, + clip_skip: Optional[int] = None, + callback_on_step_end: Optional[ + Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks] + ] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + **kwargs, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument + in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is + passed will be used. Must be in descending order. + sigmas (`List[float]`, *optional*): + Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in + their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed + will be used. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. + ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*): + Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of + IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should + contain the negative image embedding if `do_classifier_free_guidance` is set to `True`. If not + provided, embeddings are computed from the `ip_adapter_image` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + guidance_rescale (`float`, *optional*, defaults to 0.0): + Guidance rescale factor from [Common Diffusion Noise Schedules and Sample Steps are + Flawed](https://arxiv.org/pdf/2305.08891.pdf). Guidance rescale factor should fix overexposure when + using zero terminal SNR. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*): + A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of + each denoising step during the inference. with the following arguments: `callback_on_step_end(self: + DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a + list of all tensors as specified by `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + + Examples: + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images and the + second element is a list of `bool`s indicating whether the corresponding generated image contains + "not-safe-for-work" (nsfw) content. + """ + + callback = kwargs.pop("callback", None) + callback_steps = kwargs.pop("callback_steps", None) + + if callback is not None: + deprecate( + "callback", + "1.0.0", + "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", + ) + if callback_steps is not None: + deprecate( + "callback_steps", + "1.0.0", + "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", + ) + + if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): + callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs + + # 0. Default height and width to unet + height = height or self.unet.config.sample_size * self.vae_scale_factor + width = width or self.unet.config.sample_size * self.vae_scale_factor + # to deal with lora scaling and other possible forward hooks + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + height, + width, + callback_steps, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + ip_adapter_image, + ip_adapter_image_embeds, + callback_on_step_end_tensor_inputs, + ) + + self._guidance_scale = guidance_scale + self._guidance_rescale = guidance_rescale + self._clip_skip = clip_skip + self._cross_attention_kwargs = cross_attention_kwargs + self._interrupt = False + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + # 3. Encode input prompt + lora_scale = ( + self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None + ) + + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + device, + num_images_per_prompt, + self.do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=lora_scale, + clip_skip=self.clip_skip, + ) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + if self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + image_embeds = self.prepare_ip_adapter_image_embeds( + ip_adapter_image, + ip_adapter_image_embeds, + device, + batch_size * num_images_per_prompt, + self.do_classifier_free_guidance, + ) + + # 4. Prepare timesteps + timesteps, num_inference_steps = retrieve_timesteps( + self.scheduler, num_inference_steps, device, timesteps, sigmas + ) + + # 5. Prepare latent variables + num_channels_latents = self.unet.config.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 6.1 Add image embeds for IP-Adapter + added_cond_kwargs = ( + {"image_embeds": image_embeds} + if (ip_adapter_image is not None or ip_adapter_image_embeds is not None) + else None + ) + + # 6.2 Optionally get Guidance Scale Embedding + timestep_cond = None + if self.unet.config.time_cond_proj_dim is not None: + guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) + timestep_cond = self.get_guidance_scale_embedding( + guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim + ).to(device=device, dtype=latents.dtype) + + # 7. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + self._num_timesteps = len(timesteps) + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + if self.interrupt: + continue + + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + timestep_cond=timestep_cond, + cross_attention_kwargs=self.cross_attention_kwargs, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) + + if self.do_classifier_free_guidance and self.guidance_rescale > 0.0: + # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf + noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + if XLA_AVAILABLE: + xm.mark_step() + + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[ + 0 + ] + image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) + else: + image = latents + has_nsfw_concept = None + + if has_nsfw_concept is None: + do_denormalize = [True] * image.shape[0] + else: + do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] + + image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image, has_nsfw_concept) + + return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) diff --git a/diffusers3/pipelines/stable_diffusion/pipeline_stable_diffusion_depth2img.py b/diffusers3/pipelines/stable_diffusion/pipeline_stable_diffusion_depth2img.py new file mode 100644 index 0000000000000000000000000000000000000000..7801b0d01dffe59712da18866eb579d5d9818a1e --- /dev/null +++ b/diffusers3/pipelines/stable_diffusion/pipeline_stable_diffusion_depth2img.py @@ -0,0 +1,875 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import contextlib +import inspect +from typing import Any, Callable, Dict, List, Optional, Union + +import numpy as np +import PIL.Image +import torch +from packaging import version +from transformers import CLIPTextModel, CLIPTokenizer, DPTForDepthEstimation, DPTImageProcessor + +from ...configuration_utils import FrozenDict +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin +from ...models import AutoencoderKL, UNet2DConditionModel +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import PIL_INTERPOLATION, USE_PEFT_BACKEND, deprecate, logging, scale_lora_layers, unscale_lora_layers +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents +def retrieve_latents( + encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" +): + if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": + return encoder_output.latent_dist.sample(generator) + elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": + return encoder_output.latent_dist.mode() + elif hasattr(encoder_output, "latents"): + return encoder_output.latents + else: + raise AttributeError("Could not access latents of provided encoder_output") + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.preprocess +def preprocess(image): + deprecation_message = "The preprocess method is deprecated and will be removed in diffusers 1.0.0. Please use VaeImageProcessor.preprocess(...) instead" + deprecate("preprocess", "1.0.0", deprecation_message, standard_warn=False) + if isinstance(image, torch.Tensor): + return image + elif isinstance(image, PIL.Image.Image): + image = [image] + + if isinstance(image[0], PIL.Image.Image): + w, h = image[0].size + w, h = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8 + + image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image] + image = np.concatenate(image, axis=0) + image = np.array(image).astype(np.float32) / 255.0 + image = image.transpose(0, 3, 1, 2) + image = 2.0 * image - 1.0 + image = torch.from_numpy(image) + elif isinstance(image[0], torch.Tensor): + image = torch.cat(image, dim=0) + return image + + +class StableDiffusionDepth2ImgPipeline(DiffusionPipeline, TextualInversionLoaderMixin, StableDiffusionLoraLoaderMixin): + r""" + Pipeline for text-guided depth-based image-to-image generation using Stable Diffusion. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for saving LoRA weights + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + """ + + model_cpu_offload_seq = "text_encoder->unet->vae" + _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds", "depth_mask"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: UNet2DConditionModel, + scheduler: KarrasDiffusionSchedulers, + depth_estimator: DPTForDepthEstimation, + feature_extractor: DPTImageProcessor, + ): + super().__init__() + + is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( + version.parse(unet.config._diffusers_version).base_version + ) < version.parse("0.9.0.dev0") + is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 + if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: + deprecation_message = ( + "The configuration file of the unet has set the default `sample_size` to smaller than" + " 64 which seems highly unlikely .If you're checkpoint is a fine-tuned version of any of the" + " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" + " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" + " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" + " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" + " in the config might lead to incorrect results in future versions. If you have downloaded this" + " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" + " the `unet/config.json` file" + ) + deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(unet.config) + new_config["sample_size"] = 64 + unet._internal_dict = FrozenDict(new_config) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + depth_estimator=depth_estimator, + feature_extractor=feature_extractor, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt + def _encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + **kwargs, + ): + deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." + deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) + + prompt_embeds_tuple = self.encode_prompt( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=lora_scale, + **kwargs, + ) + + # concatenate for backwards comp + prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) + + return prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if self.text_encoder is not None: + if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return prompt_embeds, negative_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is None: + has_nsfw_concept = None + else: + if torch.is_tensor(image): + feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") + else: + feature_extractor_input = self.image_processor.numpy_to_pil(image) + safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) + image, has_nsfw_concept = self.safety_checker( + images=image, clip_input=safety_checker_input.pixel_values.to(dtype) + ) + return image, has_nsfw_concept + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents + def decode_latents(self, latents): + deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" + deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) + + latents = 1 / self.vae.config.scaling_factor * latents + image = self.vae.decode(latents, return_dict=False)[0] + image = (image / 2 + 0.5).clamp(0, 1) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + return image + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + strength, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + callback_on_step_end_tensor_inputs=None, + ): + if strength < 0 or strength > 1: + raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}") + + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps + def get_timesteps(self, num_inference_steps, strength, device): + # get the original timestep using init_timestep + init_timestep = min(int(num_inference_steps * strength), num_inference_steps) + + t_start = max(num_inference_steps - init_timestep, 0) + timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] + if hasattr(self.scheduler, "set_begin_index"): + self.scheduler.set_begin_index(t_start * self.scheduler.order) + + return timesteps, num_inference_steps - t_start + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.prepare_latents + def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None): + if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): + raise ValueError( + f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" + ) + + image = image.to(device=device, dtype=dtype) + + batch_size = batch_size * num_images_per_prompt + + if image.shape[1] == 4: + init_latents = image + + else: + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + elif isinstance(generator, list): + if image.shape[0] < batch_size and batch_size % image.shape[0] == 0: + image = torch.cat([image] * (batch_size // image.shape[0]), dim=0) + elif image.shape[0] < batch_size and batch_size % image.shape[0] != 0: + raise ValueError( + f"Cannot duplicate `image` of batch size {image.shape[0]} to effective batch_size {batch_size} " + ) + + init_latents = [ + retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i]) + for i in range(batch_size) + ] + init_latents = torch.cat(init_latents, dim=0) + else: + init_latents = retrieve_latents(self.vae.encode(image), generator=generator) + + init_latents = self.vae.config.scaling_factor * init_latents + + if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0: + # expand init_latents for batch_size + deprecation_message = ( + f"You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial" + " images (`image`). Initial images are now duplicating to match the number of text prompts. Note" + " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update" + " your script to pass as many initial images as text prompts to suppress this warning." + ) + deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False) + additional_image_per_prompt = batch_size // init_latents.shape[0] + init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0) + elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0: + raise ValueError( + f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts." + ) + else: + init_latents = torch.cat([init_latents], dim=0) + + shape = init_latents.shape + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + + # get latents + init_latents = self.scheduler.add_noise(init_latents, noise, timestep) + latents = init_latents + + return latents + + def prepare_depth_map(self, image, depth_map, batch_size, do_classifier_free_guidance, dtype, device): + if isinstance(image, PIL.Image.Image): + image = [image] + else: + image = list(image) + + if isinstance(image[0], PIL.Image.Image): + width, height = image[0].size + elif isinstance(image[0], np.ndarray): + width, height = image[0].shape[:-1] + else: + height, width = image[0].shape[-2:] + + if depth_map is None: + pixel_values = self.feature_extractor(images=image, return_tensors="pt").pixel_values + pixel_values = pixel_values.to(device=device, dtype=dtype) + # The DPT-Hybrid model uses batch-norm layers which are not compatible with fp16. + # So we use `torch.autocast` here for half precision inference. + if torch.backends.mps.is_available(): + autocast_ctx = contextlib.nullcontext() + logger.warning( + "The DPT-Hybrid model uses batch-norm layers which are not compatible with fp16, but autocast is not yet supported on MPS." + ) + else: + autocast_ctx = torch.autocast(device.type, dtype=dtype) + + with autocast_ctx: + depth_map = self.depth_estimator(pixel_values).predicted_depth + else: + depth_map = depth_map.to(device=device, dtype=dtype) + + depth_map = torch.nn.functional.interpolate( + depth_map.unsqueeze(1), + size=(height // self.vae_scale_factor, width // self.vae_scale_factor), + mode="bicubic", + align_corners=False, + ) + + depth_min = torch.amin(depth_map, dim=[1, 2, 3], keepdim=True) + depth_max = torch.amax(depth_map, dim=[1, 2, 3], keepdim=True) + depth_map = 2.0 * (depth_map - depth_min) / (depth_max - depth_min) - 1.0 + depth_map = depth_map.to(dtype) + + # duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method + if depth_map.shape[0] < batch_size: + repeat_by = batch_size // depth_map.shape[0] + depth_map = depth_map.repeat(repeat_by, 1, 1, 1) + + depth_map = torch.cat([depth_map] * 2) if do_classifier_free_guidance else depth_map + return depth_map + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def clip_skip(self): + return self._clip_skip + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 + + @property + def cross_attention_kwargs(self): + return self._cross_attention_kwargs + + @property + def num_timesteps(self): + return self._num_timesteps + + @torch.no_grad() + def __call__( + self, + prompt: Union[str, List[str]] = None, + image: PipelineImageInput = None, + depth_map: Optional[torch.Tensor] = None, + strength: float = 0.8, + num_inference_steps: Optional[int] = 50, + guidance_scale: Optional[float] = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: Optional[float] = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + clip_skip: Optional[int] = None, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + **kwargs, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): + `Image` or tensor representing an image batch to be used as the starting point. Can accept image + latents as `image` only if `depth_map` is not `None`. + depth_map (`torch.Tensor`, *optional*): + Depth prediction to be used as additional conditioning for the image generation process. If not + defined, it automatically predicts the depth with `self.depth_estimator`. + strength (`float`, *optional*, defaults to 0.8): + Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a + starting point and more noise is added the higher the `strength`. The number of denoising steps depends + on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising + process runs for the full number of iterations specified in `num_inference_steps`. A value of 1 + essentially ignores `image`. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. This parameter is modulated by `strength`. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + Examples: + + ```py + >>> import torch + >>> import requests + >>> from PIL import Image + + >>> from diffusers import StableDiffusionDepth2ImgPipeline + + >>> pipe = StableDiffusionDepth2ImgPipeline.from_pretrained( + ... "stabilityai/stable-diffusion-2-depth", + ... torch_dtype=torch.float16, + ... ) + >>> pipe.to("cuda") + + + >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" + >>> init_image = Image.open(requests.get(url, stream=True).raw) + >>> prompt = "two tigers" + >>> n_prompt = "bad, deformed, ugly, bad anotomy" + >>> image = pipe(prompt=prompt, image=init_image, negative_prompt=n_prompt, strength=0.7).images[0] + ``` + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images. + """ + + callback = kwargs.pop("callback", None) + callback_steps = kwargs.pop("callback_steps", None) + + if callback is not None: + deprecate( + "callback", + "1.0.0", + "Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", + ) + if callback_steps is not None: + deprecate( + "callback_steps", + "1.0.0", + "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", + ) + + # 1. Check inputs + self.check_inputs( + prompt, + strength, + callback_steps, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, + ) + + self._guidance_scale = guidance_scale + self._clip_skip = clip_skip + self._cross_attention_kwargs = cross_attention_kwargs + + if image is None: + raise ValueError("`image` input cannot be undefined.") + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + # 3. Encode input prompt + text_encoder_lora_scale = ( + self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None + ) + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + device, + num_images_per_prompt, + self.do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=text_encoder_lora_scale, + clip_skip=self.clip_skip, + ) + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + if self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + # 4. Prepare depth mask + depth_mask = self.prepare_depth_map( + image, + depth_map, + batch_size * num_images_per_prompt, + self.do_classifier_free_guidance, + prompt_embeds.dtype, + device, + ) + + # 5. Preprocess image + image = self.image_processor.preprocess(image) + + # 6. Set timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device) + latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) + + # 7. Prepare latent variables + latents = self.prepare_latents( + image, latent_timestep, batch_size, num_images_per_prompt, prompt_embeds.dtype, device, generator + ) + + # 8. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 9. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + self._num_timesteps = len(timesteps) + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + latent_model_input = torch.cat([latent_model_input, depth_mask], dim=1) + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs=self.cross_attention_kwargs, + return_dict=False, + )[0] + + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + depth_mask = callback_outputs.pop("depth_mask", depth_mask) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + else: + image = latents + + image = self.image_processor.postprocess(image, output_type=output_type) + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return ImagePipelineOutput(images=image) diff --git a/diffusers3/pipelines/stable_diffusion/pipeline_stable_diffusion_image_variation.py b/diffusers3/pipelines/stable_diffusion/pipeline_stable_diffusion_image_variation.py new file mode 100644 index 0000000000000000000000000000000000000000..93a8bd160318400cddbbdec46465fcccdc506029 --- /dev/null +++ b/diffusers3/pipelines/stable_diffusion/pipeline_stable_diffusion_image_variation.py @@ -0,0 +1,425 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Callable, List, Optional, Union + +import PIL.Image +import torch +from packaging import version +from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection + +from ...configuration_utils import FrozenDict +from ...image_processor import VaeImageProcessor +from ...models import AutoencoderKL, UNet2DConditionModel +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import deprecate, logging +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from . import StableDiffusionPipelineOutput +from .safety_checker import StableDiffusionSafetyChecker + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +class StableDiffusionImageVariationPipeline(DiffusionPipeline, StableDiffusionMixin): + r""" + Pipeline to generate image variations from an input image using Stable Diffusion. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + image_encoder ([`~transformers.CLIPVisionModelWithProjection`]): + Frozen CLIP image-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + text_encoder ([`~transformers.CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details + about a model's potential harms. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. + """ + + # TODO: feature_extractor is required to encode images (if they are in PIL format), + # we should give a descriptive message if the pipeline doesn't have one. + _optional_components = ["safety_checker"] + model_cpu_offload_seq = "image_encoder->unet->vae" + _exclude_from_cpu_offload = ["safety_checker"] + + def __init__( + self, + vae: AutoencoderKL, + image_encoder: CLIPVisionModelWithProjection, + unet: UNet2DConditionModel, + scheduler: KarrasDiffusionSchedulers, + safety_checker: StableDiffusionSafetyChecker, + feature_extractor: CLIPImageProcessor, + requires_safety_checker: bool = True, + ): + super().__init__() + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( + version.parse(unet.config._diffusers_version).base_version + ) < version.parse("0.9.0.dev0") + is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 + if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: + deprecation_message = ( + "The configuration file of the unet has set the default `sample_size` to smaller than" + " 64 which seems highly unlikely .If you're checkpoint is a fine-tuned version of any of the" + " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" + " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" + " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" + " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" + " in the config might lead to incorrect results in future versions. If you have downloaded this" + " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" + " the `unet/config.json` file" + ) + deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(unet.config) + new_config["sample_size"] = 64 + unet._internal_dict = FrozenDict(new_config) + + self.register_modules( + vae=vae, + image_encoder=image_encoder, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + def _encode_image(self, image, device, num_images_per_prompt, do_classifier_free_guidance): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(images=image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + image_embeddings = self.image_encoder(image).image_embeds + image_embeddings = image_embeddings.unsqueeze(1) + + # duplicate image embeddings for each generation per prompt, using mps friendly method + bs_embed, seq_len, _ = image_embeddings.shape + image_embeddings = image_embeddings.repeat(1, num_images_per_prompt, 1) + image_embeddings = image_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1) + + if do_classifier_free_guidance: + negative_prompt_embeds = torch.zeros_like(image_embeddings) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + image_embeddings = torch.cat([negative_prompt_embeds, image_embeddings]) + + return image_embeddings + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is None: + has_nsfw_concept = None + else: + if torch.is_tensor(image): + feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") + else: + feature_extractor_input = self.image_processor.numpy_to_pil(image) + safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) + image, has_nsfw_concept = self.safety_checker( + images=image, clip_input=safety_checker_input.pixel_values.to(dtype) + ) + return image, has_nsfw_concept + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents + def decode_latents(self, latents): + deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" + deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) + + latents = 1 / self.vae.config.scaling_factor * latents + image = self.vae.decode(latents, return_dict=False)[0] + image = (image / 2 + 0.5).clamp(0, 1) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + return image + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs(self, image, height, width, callback_steps): + if ( + not isinstance(image, torch.Tensor) + and not isinstance(image, PIL.Image.Image) + and not isinstance(image, list) + ): + raise ValueError( + "`image` has to be of type `torch.Tensor` or `PIL.Image.Image` or `List[PIL.Image.Image]` but is" + f" {type(image)}" + ) + + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if (callback_steps is None) or ( + callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = ( + batch_size, + num_channels_latents, + int(height) // self.vae_scale_factor, + int(width) // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + @torch.no_grad() + def __call__( + self, + image: Union[PIL.Image.Image, List[PIL.Image.Image], torch.Tensor], + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + guidance_scale: float = 7.5, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, + callback_steps: int = 1, + ): + r""" + The call function to the pipeline for generation. + + Args: + image (`PIL.Image.Image` or `List[PIL.Image.Image]` or `torch.Tensor`): + Image or images to guide image generation. If you provide a tensor, it needs to be compatible with + [`CLIPImageProcessor`](https://huggingface.co/lambdalabs/sd-image-variations-diffusers/blob/main/feature_extractor/preprocessor_config.json). + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. This parameter is modulated by `strength`. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images and the + second element is a list of `bool`s indicating whether the corresponding generated image contains + "not-safe-for-work" (nsfw) content. + + Examples: + + ```py + from diffusers import StableDiffusionImageVariationPipeline + from PIL import Image + from io import BytesIO + import requests + + pipe = StableDiffusionImageVariationPipeline.from_pretrained( + "lambdalabs/sd-image-variations-diffusers", revision="v2.0" + ) + pipe = pipe.to("cuda") + + url = "https://lh3.googleusercontent.com/y-iFOHfLTwkuQSUegpwDdgKmOjRSTvPxat63dQLB25xkTs4lhIbRUFeNBWZzYf370g=s1200" + + response = requests.get(url) + image = Image.open(BytesIO(response.content)).convert("RGB") + + out = pipe(image, num_images_per_prompt=3, guidance_scale=15) + out["images"][0].save("result.jpg") + ``` + """ + # 0. Default height and width to unet + height = height or self.unet.config.sample_size * self.vae_scale_factor + width = width or self.unet.config.sample_size * self.vae_scale_factor + + # 1. Check inputs. Raise error if not correct + self.check_inputs(image, height, width, callback_steps) + + # 2. Define call parameters + if isinstance(image, PIL.Image.Image): + batch_size = 1 + elif isinstance(image, list): + batch_size = len(image) + else: + batch_size = image.shape[0] + device = self._execution_device + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Encode input image + image_embeddings = self._encode_image(image, device, num_images_per_prompt, do_classifier_free_guidance) + + # 4. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # 5. Prepare latent variables + num_channels_latents = self.unet.config.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + image_embeddings.dtype, + device, + generator, + latents, + ) + + # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=image_embeddings).sample + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + self.maybe_free_model_hooks() + + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + image, has_nsfw_concept = self.run_safety_checker(image, device, image_embeddings.dtype) + else: + image = latents + has_nsfw_concept = None + + if has_nsfw_concept is None: + do_denormalize = [True] * image.shape[0] + else: + do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] + + image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) + + self.maybe_free_model_hooks() + + if not return_dict: + return (image, has_nsfw_concept) + + return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) diff --git a/diffusers3/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py b/diffusers3/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py new file mode 100644 index 0000000000000000000000000000000000000000..424f0e3c56e2c1a4e73bc4e31dc34cdf4c4f78e9 --- /dev/null +++ b/diffusers3/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py @@ -0,0 +1,1145 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Any, Callable, Dict, List, Optional, Union + +import numpy as np +import PIL.Image +import torch +from packaging import version +from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection + +from ...callbacks import MultiPipelineCallbacks, PipelineCallback +from ...configuration_utils import FrozenDict +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import FromSingleFileMixin, IPAdapterMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin +from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import ( + PIL_INTERPOLATION, + USE_PEFT_BACKEND, + deprecate, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from . import StableDiffusionPipelineOutput +from .safety_checker import StableDiffusionSafetyChecker + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import requests + >>> import torch + >>> from PIL import Image + >>> from io import BytesIO + + >>> from diffusers import StableDiffusionImg2ImgPipeline + + >>> device = "cuda" + >>> model_id_or_path = "runwayml/stable-diffusion-v1-5" + >>> pipe = StableDiffusionImg2ImgPipeline.from_pretrained(model_id_or_path, torch_dtype=torch.float16) + >>> pipe = pipe.to(device) + + >>> url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg" + + >>> response = requests.get(url) + >>> init_image = Image.open(BytesIO(response.content)).convert("RGB") + >>> init_image = init_image.resize((768, 512)) + + >>> prompt = "A fantasy landscape, trending on artstation" + + >>> images = pipe(prompt=prompt, image=init_image, strength=0.75, guidance_scale=7.5).images + >>> images[0].save("fantasy_landscape.png") + ``` +""" + + +def retrieve_latents( + encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" +): + if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": + return encoder_output.latent_dist.sample(generator) + elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": + return encoder_output.latent_dist.mode() + elif hasattr(encoder_output, "latents"): + return encoder_output.latents + else: + raise AttributeError("Could not access latents of provided encoder_output") + + +def preprocess(image): + deprecation_message = "The preprocess method is deprecated and will be removed in diffusers 1.0.0. Please use VaeImageProcessor.preprocess(...) instead" + deprecate("preprocess", "1.0.0", deprecation_message, standard_warn=False) + if isinstance(image, torch.Tensor): + return image + elif isinstance(image, PIL.Image.Image): + image = [image] + + if isinstance(image[0], PIL.Image.Image): + w, h = image[0].size + w, h = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8 + + image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image] + image = np.concatenate(image, axis=0) + image = np.array(image).astype(np.float32) / 255.0 + image = image.transpose(0, 3, 1, 2) + image = 2.0 * image - 1.0 + image = torch.from_numpy(image) + elif isinstance(image[0], torch.Tensor): + image = torch.cat(image, dim=0) + return image + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + sigmas: Optional[List[float]] = None, + **kwargs, +): + """ + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles + custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` + must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, + `num_inference_steps` and `sigmas` must be `None`. + sigmas (`List[float]`, *optional*): + Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, + `num_inference_steps` and `timesteps` must be `None`. + + Returns: + `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the + second element is the number of inference steps. + """ + if timesteps is not None and sigmas is not None: + raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") + if timesteps is not None: + accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + elif sigmas is not None: + accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accept_sigmas: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" sigmas schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + + +class StableDiffusionImg2ImgPipeline( + DiffusionPipeline, + StableDiffusionMixin, + TextualInversionLoaderMixin, + IPAdapterMixin, + StableDiffusionLoraLoaderMixin, + FromSingleFileMixin, +): + r""" + Pipeline for text-guided image-to-image generation using Stable Diffusion. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for saving LoRA weights + - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files + - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details + about a model's potential harms. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. + """ + + model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae" + _optional_components = ["safety_checker", "feature_extractor", "image_encoder"] + _exclude_from_cpu_offload = ["safety_checker"] + _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: UNet2DConditionModel, + scheduler: KarrasDiffusionSchedulers, + safety_checker: StableDiffusionSafetyChecker, + feature_extractor: CLIPImageProcessor, + image_encoder: CLIPVisionModelWithProjection = None, + requires_safety_checker: bool = True, + ): + super().__init__() + + if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: + deprecation_message = ( + f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" + f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " + "to update the config accordingly as leaving `steps_offset` might led to incorrect results" + " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," + " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" + " file" + ) + deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(scheduler.config) + new_config["steps_offset"] = 1 + scheduler._internal_dict = FrozenDict(new_config) + + if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True: + deprecation_message = ( + f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." + " `clip_sample` should be set to False in the configuration file. Please make sure to update the" + " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" + " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" + " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" + ) + deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(scheduler.config) + new_config["clip_sample"] = False + scheduler._internal_dict = FrozenDict(new_config) + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( + version.parse(unet.config._diffusers_version).base_version + ) < version.parse("0.9.0.dev0") + is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 + if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: + deprecation_message = ( + "The configuration file of the unet has set the default `sample_size` to smaller than" + " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the" + " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" + " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" + " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" + " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" + " in the config might lead to incorrect results in future versions. If you have downloaded this" + " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" + " the `unet/config.json` file" + ) + deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(unet.config) + new_config["sample_size"] = 64 + unet._internal_dict = FrozenDict(new_config) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + image_encoder=image_encoder, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt + def _encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + **kwargs, + ): + deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." + deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) + + prompt_embeds_tuple = self.encode_prompt( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=lora_scale, + **kwargs, + ) + + # concatenate for backwards comp + prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) + + return prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if self.text_encoder is not None: + if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return prompt_embeds, negative_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image + def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + if output_hidden_states: + image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] + image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_enc_hidden_states = self.image_encoder( + torch.zeros_like(image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( + num_images_per_prompt, dim=0 + ) + return image_enc_hidden_states, uncond_image_enc_hidden_states + else: + image_embeds = self.image_encoder(image).image_embeds + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_embeds = torch.zeros_like(image_embeds) + + return image_embeds, uncond_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds + def prepare_ip_adapter_image_embeds( + self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance + ): + image_embeds = [] + if do_classifier_free_guidance: + negative_image_embeds = [] + if ip_adapter_image_embeds is None: + if not isinstance(ip_adapter_image, list): + ip_adapter_image = [ip_adapter_image] + + if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): + raise ValueError( + f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." + ) + + for single_ip_adapter_image, image_proj_layer in zip( + ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers + ): + output_hidden_state = not isinstance(image_proj_layer, ImageProjection) + single_image_embeds, single_negative_image_embeds = self.encode_image( + single_ip_adapter_image, device, 1, output_hidden_state + ) + + image_embeds.append(single_image_embeds[None, :]) + if do_classifier_free_guidance: + negative_image_embeds.append(single_negative_image_embeds[None, :]) + else: + for single_image_embeds in ip_adapter_image_embeds: + if do_classifier_free_guidance: + single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2) + negative_image_embeds.append(single_negative_image_embeds) + image_embeds.append(single_image_embeds) + + ip_adapter_image_embeds = [] + for i, single_image_embeds in enumerate(image_embeds): + single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) + if do_classifier_free_guidance: + single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) + + single_image_embeds = single_image_embeds.to(device=device) + ip_adapter_image_embeds.append(single_image_embeds) + + return ip_adapter_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is None: + has_nsfw_concept = None + else: + if torch.is_tensor(image): + feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") + else: + feature_extractor_input = self.image_processor.numpy_to_pil(image) + safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) + image, has_nsfw_concept = self.safety_checker( + images=image, clip_input=safety_checker_input.pixel_values.to(dtype) + ) + return image, has_nsfw_concept + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents + def decode_latents(self, latents): + deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" + deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) + + latents = 1 / self.vae.config.scaling_factor * latents + image = self.vae.decode(latents, return_dict=False)[0] + image = (image / 2 + 0.5).clamp(0, 1) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + return image + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + strength, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + ip_adapter_image=None, + ip_adapter_image_embeds=None, + callback_on_step_end_tensor_inputs=None, + ): + if strength < 0 or strength > 1: + raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}") + + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if ip_adapter_image is not None and ip_adapter_image_embeds is not None: + raise ValueError( + "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined." + ) + + if ip_adapter_image_embeds is not None: + if not isinstance(ip_adapter_image_embeds, list): + raise ValueError( + f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}" + ) + elif ip_adapter_image_embeds[0].ndim not in [3, 4]: + raise ValueError( + f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D" + ) + + def get_timesteps(self, num_inference_steps, strength, device): + # get the original timestep using init_timestep + init_timestep = min(int(num_inference_steps * strength), num_inference_steps) + + t_start = max(num_inference_steps - init_timestep, 0) + timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] + if hasattr(self.scheduler, "set_begin_index"): + self.scheduler.set_begin_index(t_start * self.scheduler.order) + + return timesteps, num_inference_steps - t_start + + def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None): + if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): + raise ValueError( + f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" + ) + + image = image.to(device=device, dtype=dtype) + + batch_size = batch_size * num_images_per_prompt + + if image.shape[1] == 4: + init_latents = image + + else: + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + elif isinstance(generator, list): + if image.shape[0] < batch_size and batch_size % image.shape[0] == 0: + image = torch.cat([image] * (batch_size // image.shape[0]), dim=0) + elif image.shape[0] < batch_size and batch_size % image.shape[0] != 0: + raise ValueError( + f"Cannot duplicate `image` of batch size {image.shape[0]} to effective batch_size {batch_size} " + ) + + init_latents = [ + retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i]) + for i in range(batch_size) + ] + init_latents = torch.cat(init_latents, dim=0) + else: + init_latents = retrieve_latents(self.vae.encode(image), generator=generator) + + init_latents = self.vae.config.scaling_factor * init_latents + + if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0: + # expand init_latents for batch_size + deprecation_message = ( + f"You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial" + " images (`image`). Initial images are now duplicating to match the number of text prompts. Note" + " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update" + " your script to pass as many initial images as text prompts to suppress this warning." + ) + deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False) + additional_image_per_prompt = batch_size // init_latents.shape[0] + init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0) + elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0: + raise ValueError( + f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts." + ) + else: + init_latents = torch.cat([init_latents], dim=0) + + shape = init_latents.shape + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + + # get latents + init_latents = self.scheduler.add_noise(init_latents, noise, timestep) + latents = init_latents + + return latents + + # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding + def get_guidance_scale_embedding( + self, w: torch.Tensor, embedding_dim: int = 512, dtype: torch.dtype = torch.float32 + ) -> torch.Tensor: + """ + See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 + + Args: + w (`torch.Tensor`): + Generate embedding vectors with a specified guidance scale to subsequently enrich timestep embeddings. + embedding_dim (`int`, *optional*, defaults to 512): + Dimension of the embeddings to generate. + dtype (`torch.dtype`, *optional*, defaults to `torch.float32`): + Data type of the generated embeddings. + + Returns: + `torch.Tensor`: Embedding vectors with shape `(len(w), embedding_dim)`. + """ + assert len(w.shape) == 1 + w = w * 1000.0 + + half_dim = embedding_dim // 2 + emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) + emb = w.to(dtype)[:, None] * emb[None, :] + emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) + if embedding_dim % 2 == 1: # zero pad + emb = torch.nn.functional.pad(emb, (0, 1)) + assert emb.shape == (w.shape[0], embedding_dim) + return emb + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def clip_skip(self): + return self._clip_skip + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None + + @property + def cross_attention_kwargs(self): + return self._cross_attention_kwargs + + @property + def num_timesteps(self): + return self._num_timesteps + + @property + def interrupt(self): + return self._interrupt + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + image: PipelineImageInput = None, + strength: float = 0.8, + num_inference_steps: Optional[int] = 50, + timesteps: List[int] = None, + sigmas: List[float] = None, + guidance_scale: Optional[float] = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: Optional[float] = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + clip_skip: int = None, + callback_on_step_end: Optional[ + Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks] + ] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + **kwargs, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): + `Image`, numpy array or tensor representing an image batch to be used as the starting point. For both + numpy array and pytorch tensor, the expected value range is between `[0, 1]` If it's a tensor or a list + or tensors, the expected shape should be `(B, C, H, W)` or `(C, H, W)`. If it is a numpy array or a + list of arrays, the expected shape should be `(B, H, W, C)` or `(H, W, C)` It can also accept image + latents as `image`, but if passing latents directly it is not encoded again. + strength (`float`, *optional*, defaults to 0.8): + Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a + starting point and more noise is added the higher the `strength`. The number of denoising steps depends + on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising + process runs for the full number of iterations specified in `num_inference_steps`. A value of 1 + essentially ignores `image`. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. This parameter is modulated by `strength`. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument + in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is + passed will be used. Must be in descending order. + sigmas (`List[float]`, *optional*): + Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in + their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed + will be used. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. + ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*): + Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of + IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should + contain the negative image embedding if `do_classifier_free_guidance` is set to `True`. If not + provided, embeddings are computed from the `ip_adapter_image` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*): + A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of + each denoising step during the inference. with the following arguments: `callback_on_step_end(self: + DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a + list of all tensors as specified by `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + Examples: + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images and the + second element is a list of `bool`s indicating whether the corresponding generated image contains + "not-safe-for-work" (nsfw) content. + """ + + callback = kwargs.pop("callback", None) + callback_steps = kwargs.pop("callback_steps", None) + + if callback is not None: + deprecate( + "callback", + "1.0.0", + "Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", + ) + if callback_steps is not None: + deprecate( + "callback_steps", + "1.0.0", + "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", + ) + + if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): + callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + strength, + callback_steps, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + ip_adapter_image, + ip_adapter_image_embeds, + callback_on_step_end_tensor_inputs, + ) + + self._guidance_scale = guidance_scale + self._clip_skip = clip_skip + self._cross_attention_kwargs = cross_attention_kwargs + self._interrupt = False + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + # 3. Encode input prompt + text_encoder_lora_scale = ( + self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None + ) + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + device, + num_images_per_prompt, + self.do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=text_encoder_lora_scale, + clip_skip=self.clip_skip, + ) + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + if self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + image_embeds = self.prepare_ip_adapter_image_embeds( + ip_adapter_image, + ip_adapter_image_embeds, + device, + batch_size * num_images_per_prompt, + self.do_classifier_free_guidance, + ) + + # 4. Preprocess image + image = self.image_processor.preprocess(image) + + # 5. set timesteps + timesteps, num_inference_steps = retrieve_timesteps( + self.scheduler, num_inference_steps, device, timesteps, sigmas + ) + timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device) + latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) + + # 6. Prepare latent variables + latents = self.prepare_latents( + image, + latent_timestep, + batch_size, + num_images_per_prompt, + prompt_embeds.dtype, + device, + generator, + ) + + # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7.1 Add image embeds for IP-Adapter + added_cond_kwargs = ( + {"image_embeds": image_embeds} + if ip_adapter_image is not None or ip_adapter_image_embeds is not None + else None + ) + + # 7.2 Optionally get Guidance Scale Embedding + timestep_cond = None + if self.unet.config.time_cond_proj_dim is not None: + guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) + timestep_cond = self.get_guidance_scale_embedding( + guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim + ).to(device=device, dtype=latents.dtype) + + # 8. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + self._num_timesteps = len(timesteps) + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + if self.interrupt: + continue + + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + timestep_cond=timestep_cond, + cross_attention_kwargs=self.cross_attention_kwargs, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[ + 0 + ] + image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) + else: + image = latents + has_nsfw_concept = None + + if has_nsfw_concept is None: + do_denormalize = [True] * image.shape[0] + else: + do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] + + image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image, has_nsfw_concept) + + return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) diff --git a/diffusers3/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py b/diffusers3/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py new file mode 100644 index 0000000000000000000000000000000000000000..e2c5b11d34cfd39fb62b438022525f5ce41c8066 --- /dev/null +++ b/diffusers3/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py @@ -0,0 +1,1338 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Any, Callable, Dict, List, Optional, Union + +import PIL.Image +import torch +from packaging import version +from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection + +from ...callbacks import MultiPipelineCallbacks, PipelineCallback +from ...configuration_utils import FrozenDict +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import FromSingleFileMixin, IPAdapterMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin +from ...models import AsymmetricAutoencoderKL, AutoencoderKL, ImageProjection, UNet2DConditionModel +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import USE_PEFT_BACKEND, deprecate, logging, scale_lora_layers, unscale_lora_layers +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from . import StableDiffusionPipelineOutput +from .safety_checker import StableDiffusionSafetyChecker + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents +def retrieve_latents( + encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" +): + if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": + return encoder_output.latent_dist.sample(generator) + elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": + return encoder_output.latent_dist.mode() + elif hasattr(encoder_output, "latents"): + return encoder_output.latents + else: + raise AttributeError("Could not access latents of provided encoder_output") + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + sigmas: Optional[List[float]] = None, + **kwargs, +): + """ + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles + custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` + must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, + `num_inference_steps` and `sigmas` must be `None`. + sigmas (`List[float]`, *optional*): + Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, + `num_inference_steps` and `timesteps` must be `None`. + + Returns: + `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the + second element is the number of inference steps. + """ + if timesteps is not None and sigmas is not None: + raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") + if timesteps is not None: + accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + elif sigmas is not None: + accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accept_sigmas: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" sigmas schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + + +class StableDiffusionInpaintPipeline( + DiffusionPipeline, + StableDiffusionMixin, + TextualInversionLoaderMixin, + IPAdapterMixin, + StableDiffusionLoraLoaderMixin, + FromSingleFileMixin, +): + r""" + Pipeline for text-guided image inpainting using Stable Diffusion. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for saving LoRA weights + - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters + - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files + + Args: + vae ([`AutoencoderKL`, `AsymmetricAutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details + about a model's potential harms. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. + """ + + model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae" + _optional_components = ["safety_checker", "feature_extractor", "image_encoder"] + _exclude_from_cpu_offload = ["safety_checker"] + _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds", "mask", "masked_image_latents"] + + def __init__( + self, + vae: Union[AutoencoderKL, AsymmetricAutoencoderKL], + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: UNet2DConditionModel, + scheduler: KarrasDiffusionSchedulers, + safety_checker: StableDiffusionSafetyChecker, + feature_extractor: CLIPImageProcessor, + image_encoder: CLIPVisionModelWithProjection = None, + requires_safety_checker: bool = True, + ): + super().__init__() + + if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: + deprecation_message = ( + f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" + f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " + "to update the config accordingly as leaving `steps_offset` might led to incorrect results" + " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," + " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" + " file" + ) + deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(scheduler.config) + new_config["steps_offset"] = 1 + scheduler._internal_dict = FrozenDict(new_config) + + if hasattr(scheduler.config, "skip_prk_steps") and scheduler.config.skip_prk_steps is False: + deprecation_message = ( + f"The configuration file of this scheduler: {scheduler} has not set the configuration" + " `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make" + " sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to" + " incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face" + " Hub, it would be very nice if you could open a Pull request for the" + " `scheduler/scheduler_config.json` file" + ) + deprecate("skip_prk_steps not set", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(scheduler.config) + new_config["skip_prk_steps"] = True + scheduler._internal_dict = FrozenDict(new_config) + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( + version.parse(unet.config._diffusers_version).base_version + ) < version.parse("0.9.0.dev0") + is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 + if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: + deprecation_message = ( + "The configuration file of the unet has set the default `sample_size` to smaller than" + " 64 which seems highly unlikely .If you're checkpoint is a fine-tuned version of any of the" + " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" + " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" + " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" + " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" + " in the config might lead to incorrect results in future versions. If you have downloaded this" + " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" + " the `unet/config.json` file" + ) + deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(unet.config) + new_config["sample_size"] = 64 + unet._internal_dict = FrozenDict(new_config) + + # Check shapes, assume num_channels_latents == 4, num_channels_mask == 1, num_channels_masked == 4 + if unet.config.in_channels != 9: + logger.info(f"You have loaded a UNet with {unet.config.in_channels} input channels which.") + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + image_encoder=image_encoder, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + self.mask_processor = VaeImageProcessor( + vae_scale_factor=self.vae_scale_factor, do_normalize=False, do_binarize=True, do_convert_grayscale=True + ) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt + def _encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + **kwargs, + ): + deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." + deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) + + prompt_embeds_tuple = self.encode_prompt( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=lora_scale, + **kwargs, + ) + + # concatenate for backwards comp + prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) + + return prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if self.text_encoder is not None: + if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return prompt_embeds, negative_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image + def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + if output_hidden_states: + image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] + image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_enc_hidden_states = self.image_encoder( + torch.zeros_like(image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( + num_images_per_prompt, dim=0 + ) + return image_enc_hidden_states, uncond_image_enc_hidden_states + else: + image_embeds = self.image_encoder(image).image_embeds + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_embeds = torch.zeros_like(image_embeds) + + return image_embeds, uncond_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds + def prepare_ip_adapter_image_embeds( + self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance + ): + image_embeds = [] + if do_classifier_free_guidance: + negative_image_embeds = [] + if ip_adapter_image_embeds is None: + if not isinstance(ip_adapter_image, list): + ip_adapter_image = [ip_adapter_image] + + if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): + raise ValueError( + f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." + ) + + for single_ip_adapter_image, image_proj_layer in zip( + ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers + ): + output_hidden_state = not isinstance(image_proj_layer, ImageProjection) + single_image_embeds, single_negative_image_embeds = self.encode_image( + single_ip_adapter_image, device, 1, output_hidden_state + ) + + image_embeds.append(single_image_embeds[None, :]) + if do_classifier_free_guidance: + negative_image_embeds.append(single_negative_image_embeds[None, :]) + else: + for single_image_embeds in ip_adapter_image_embeds: + if do_classifier_free_guidance: + single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2) + negative_image_embeds.append(single_negative_image_embeds) + image_embeds.append(single_image_embeds) + + ip_adapter_image_embeds = [] + for i, single_image_embeds in enumerate(image_embeds): + single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) + if do_classifier_free_guidance: + single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) + + single_image_embeds = single_image_embeds.to(device=device) + ip_adapter_image_embeds.append(single_image_embeds) + + return ip_adapter_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is None: + has_nsfw_concept = None + else: + if torch.is_tensor(image): + feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") + else: + feature_extractor_input = self.image_processor.numpy_to_pil(image) + safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) + image, has_nsfw_concept = self.safety_checker( + images=image, clip_input=safety_checker_input.pixel_values.to(dtype) + ) + return image, has_nsfw_concept + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + image, + mask_image, + height, + width, + strength, + callback_steps, + output_type, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + ip_adapter_image=None, + ip_adapter_image_embeds=None, + callback_on_step_end_tensor_inputs=None, + padding_mask_crop=None, + ): + if strength < 0 or strength > 1: + raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}") + + if height % self.vae_scale_factor != 0 or width % self.vae_scale_factor != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + if padding_mask_crop is not None: + if not isinstance(image, PIL.Image.Image): + raise ValueError( + f"The image should be a PIL image when inpainting mask crop, but is of type" f" {type(image)}." + ) + if not isinstance(mask_image, PIL.Image.Image): + raise ValueError( + f"The mask image should be a PIL image when inpainting mask crop, but is of type" + f" {type(mask_image)}." + ) + if output_type != "pil": + raise ValueError(f"The output type should be PIL when inpainting mask crop, but is" f" {output_type}.") + + if ip_adapter_image is not None and ip_adapter_image_embeds is not None: + raise ValueError( + "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined." + ) + + if ip_adapter_image_embeds is not None: + if not isinstance(ip_adapter_image_embeds, list): + raise ValueError( + f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}" + ) + elif ip_adapter_image_embeds[0].ndim not in [3, 4]: + raise ValueError( + f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D" + ) + + def prepare_latents( + self, + batch_size, + num_channels_latents, + height, + width, + dtype, + device, + generator, + latents=None, + image=None, + timestep=None, + is_strength_max=True, + return_noise=False, + return_image_latents=False, + ): + shape = ( + batch_size, + num_channels_latents, + int(height) // self.vae_scale_factor, + int(width) // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if (image is None or timestep is None) and not is_strength_max: + raise ValueError( + "Since strength < 1. initial latents are to be initialised as a combination of Image + Noise." + "However, either the image or the noise timestep has not been provided." + ) + + if return_image_latents or (latents is None and not is_strength_max): + image = image.to(device=device, dtype=dtype) + + if image.shape[1] == 4: + image_latents = image + else: + image_latents = self._encode_vae_image(image=image, generator=generator) + image_latents = image_latents.repeat(batch_size // image_latents.shape[0], 1, 1, 1) + + if latents is None: + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + # if strength is 1. then initialise the latents to noise, else initial to image + noise + latents = noise if is_strength_max else self.scheduler.add_noise(image_latents, noise, timestep) + # if pure noise then scale the initial latents by the Scheduler's init sigma + latents = latents * self.scheduler.init_noise_sigma if is_strength_max else latents + else: + noise = latents.to(device) + latents = noise * self.scheduler.init_noise_sigma + + outputs = (latents,) + + if return_noise: + outputs += (noise,) + + if return_image_latents: + outputs += (image_latents,) + + return outputs + + def _encode_vae_image(self, image: torch.Tensor, generator: torch.Generator): + if isinstance(generator, list): + image_latents = [ + retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i]) + for i in range(image.shape[0]) + ] + image_latents = torch.cat(image_latents, dim=0) + else: + image_latents = retrieve_latents(self.vae.encode(image), generator=generator) + + image_latents = self.vae.config.scaling_factor * image_latents + + return image_latents + + def prepare_mask_latents( + self, mask, masked_image, batch_size, height, width, dtype, device, generator, do_classifier_free_guidance + ): + # resize the mask to latents shape as we concatenate the mask to the latents + # we do that before converting to dtype to avoid breaking in case we're using cpu_offload + # and half precision + mask = torch.nn.functional.interpolate( + mask, size=(height // self.vae_scale_factor, width // self.vae_scale_factor) + ) + mask = mask.to(device=device, dtype=dtype) + + masked_image = masked_image.to(device=device, dtype=dtype) + + if masked_image.shape[1] == 4: + masked_image_latents = masked_image + else: + masked_image_latents = self._encode_vae_image(masked_image, generator=generator) + + # duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method + if mask.shape[0] < batch_size: + if not batch_size % mask.shape[0] == 0: + raise ValueError( + "The passed mask and the required batch size don't match. Masks are supposed to be duplicated to" + f" a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number" + " of masks that you pass is divisible by the total requested batch size." + ) + mask = mask.repeat(batch_size // mask.shape[0], 1, 1, 1) + if masked_image_latents.shape[0] < batch_size: + if not batch_size % masked_image_latents.shape[0] == 0: + raise ValueError( + "The passed images and the required batch size don't match. Images are supposed to be duplicated" + f" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed." + " Make sure the number of images that you pass is divisible by the total requested batch size." + ) + masked_image_latents = masked_image_latents.repeat(batch_size // masked_image_latents.shape[0], 1, 1, 1) + + mask = torch.cat([mask] * 2) if do_classifier_free_guidance else mask + masked_image_latents = ( + torch.cat([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents + ) + + # aligning device to prevent device errors when concating it with the latent model input + masked_image_latents = masked_image_latents.to(device=device, dtype=dtype) + return mask, masked_image_latents + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps + def get_timesteps(self, num_inference_steps, strength, device): + # get the original timestep using init_timestep + init_timestep = min(int(num_inference_steps * strength), num_inference_steps) + + t_start = max(num_inference_steps - init_timestep, 0) + timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] + if hasattr(self.scheduler, "set_begin_index"): + self.scheduler.set_begin_index(t_start * self.scheduler.order) + + return timesteps, num_inference_steps - t_start + + # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding + def get_guidance_scale_embedding( + self, w: torch.Tensor, embedding_dim: int = 512, dtype: torch.dtype = torch.float32 + ) -> torch.Tensor: + """ + See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 + + Args: + w (`torch.Tensor`): + Generate embedding vectors with a specified guidance scale to subsequently enrich timestep embeddings. + embedding_dim (`int`, *optional*, defaults to 512): + Dimension of the embeddings to generate. + dtype (`torch.dtype`, *optional*, defaults to `torch.float32`): + Data type of the generated embeddings. + + Returns: + `torch.Tensor`: Embedding vectors with shape `(len(w), embedding_dim)`. + """ + assert len(w.shape) == 1 + w = w * 1000.0 + + half_dim = embedding_dim // 2 + emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) + emb = w.to(dtype)[:, None] * emb[None, :] + emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) + if embedding_dim % 2 == 1: # zero pad + emb = torch.nn.functional.pad(emb, (0, 1)) + assert emb.shape == (w.shape[0], embedding_dim) + return emb + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def clip_skip(self): + return self._clip_skip + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None + + @property + def cross_attention_kwargs(self): + return self._cross_attention_kwargs + + @property + def num_timesteps(self): + return self._num_timesteps + + @property + def interrupt(self): + return self._interrupt + + @torch.no_grad() + def __call__( + self, + prompt: Union[str, List[str]] = None, + image: PipelineImageInput = None, + mask_image: PipelineImageInput = None, + masked_image_latents: torch.Tensor = None, + height: Optional[int] = None, + width: Optional[int] = None, + padding_mask_crop: Optional[int] = None, + strength: float = 1.0, + num_inference_steps: int = 50, + timesteps: List[int] = None, + sigmas: List[float] = None, + guidance_scale: float = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + clip_skip: int = None, + callback_on_step_end: Optional[ + Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks] + ] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + **kwargs, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): + `Image`, numpy array or tensor representing an image batch to be inpainted (which parts of the image to + be masked out with `mask_image` and repainted according to `prompt`). For both numpy array and pytorch + tensor, the expected value range is between `[0, 1]` If it's a tensor or a list or tensors, the + expected shape should be `(B, C, H, W)` or `(C, H, W)`. If it is a numpy array or a list of arrays, the + expected shape should be `(B, H, W, C)` or `(H, W, C)` It can also accept image latents as `image`, but + if passing latents directly it is not encoded again. + mask_image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): + `Image`, numpy array or tensor representing an image batch to mask `image`. White pixels in the mask + are repainted while black pixels are preserved. If `mask_image` is a PIL image, it is converted to a + single channel (luminance) before use. If it's a numpy array or pytorch tensor, it should contain one + color channel (L) instead of 3, so the expected shape for pytorch tensor would be `(B, 1, H, W)`, `(B, + H, W)`, `(1, H, W)`, `(H, W)`. And for numpy array would be for `(B, H, W, 1)`, `(B, H, W)`, `(H, W, + 1)`, or `(H, W)`. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + padding_mask_crop (`int`, *optional*, defaults to `None`): + The size of margin in the crop to be applied to the image and masking. If `None`, no crop is applied to + image and mask_image. If `padding_mask_crop` is not `None`, it will first find a rectangular region + with the same aspect ration of the image and contains all masked area, and then expand that area based + on `padding_mask_crop`. The image and mask_image will then be cropped based on the expanded area before + resizing to the original image size for inpainting. This is useful when the masked area is small while + the image is large and contain information irrelevant for inpainting, such as background. + strength (`float`, *optional*, defaults to 1.0): + Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a + starting point and more noise is added the higher the `strength`. The number of denoising steps depends + on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising + process runs for the full number of iterations specified in `num_inference_steps`. A value of 1 + essentially ignores `image`. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. This parameter is modulated by `strength`. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument + in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is + passed will be used. Must be in descending order. + sigmas (`List[float]`, *optional*): + Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in + their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed + will be used. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. + ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*): + Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of + IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should + contain the negative image embedding if `do_classifier_free_guidance` is set to `True`. If not + provided, embeddings are computed from the `ip_adapter_image` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*): + A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of + each denoising step during the inference. with the following arguments: `callback_on_step_end(self: + DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a + list of all tensors as specified by `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + Examples: + + ```py + >>> import PIL + >>> import requests + >>> import torch + >>> from io import BytesIO + + >>> from diffusers import StableDiffusionInpaintPipeline + + + >>> def download_image(url): + ... response = requests.get(url) + ... return PIL.Image.open(BytesIO(response.content)).convert("RGB") + + + >>> img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png" + >>> mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png" + + >>> init_image = download_image(img_url).resize((512, 512)) + >>> mask_image = download_image(mask_url).resize((512, 512)) + + >>> pipe = StableDiffusionInpaintPipeline.from_pretrained( + ... "runwayml/stable-diffusion-inpainting", torch_dtype=torch.float16 + ... ) + >>> pipe = pipe.to("cuda") + + >>> prompt = "Face of a yellow cat, high resolution, sitting on a park bench" + >>> image = pipe(prompt=prompt, image=init_image, mask_image=mask_image).images[0] + ``` + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images and the + second element is a list of `bool`s indicating whether the corresponding generated image contains + "not-safe-for-work" (nsfw) content. + """ + + callback = kwargs.pop("callback", None) + callback_steps = kwargs.pop("callback_steps", None) + + if callback is not None: + deprecate( + "callback", + "1.0.0", + "Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", + ) + if callback_steps is not None: + deprecate( + "callback_steps", + "1.0.0", + "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", + ) + + if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): + callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs + + # 0. Default height and width to unet + height = height or self.unet.config.sample_size * self.vae_scale_factor + width = width or self.unet.config.sample_size * self.vae_scale_factor + + # 1. Check inputs + self.check_inputs( + prompt, + image, + mask_image, + height, + width, + strength, + callback_steps, + output_type, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + ip_adapter_image, + ip_adapter_image_embeds, + callback_on_step_end_tensor_inputs, + padding_mask_crop, + ) + + self._guidance_scale = guidance_scale + self._clip_skip = clip_skip + self._cross_attention_kwargs = cross_attention_kwargs + self._interrupt = False + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + # 3. Encode input prompt + text_encoder_lora_scale = ( + cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None + ) + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + device, + num_images_per_prompt, + self.do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=text_encoder_lora_scale, + clip_skip=self.clip_skip, + ) + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + if self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + image_embeds = self.prepare_ip_adapter_image_embeds( + ip_adapter_image, + ip_adapter_image_embeds, + device, + batch_size * num_images_per_prompt, + self.do_classifier_free_guidance, + ) + + # 4. set timesteps + timesteps, num_inference_steps = retrieve_timesteps( + self.scheduler, num_inference_steps, device, timesteps, sigmas + ) + timesteps, num_inference_steps = self.get_timesteps( + num_inference_steps=num_inference_steps, strength=strength, device=device + ) + # check that number of inference steps is not < 1 - as this doesn't make sense + if num_inference_steps < 1: + raise ValueError( + f"After adjusting the num_inference_steps by strength parameter: {strength}, the number of pipeline" + f"steps is {num_inference_steps} which is < 1 and not appropriate for this pipeline." + ) + # at which timestep to set the initial noise (n.b. 50% if strength is 0.5) + latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) + # create a boolean to check if the strength is set to 1. if so then initialise the latents with pure noise + is_strength_max = strength == 1.0 + + # 5. Preprocess mask and image + + if padding_mask_crop is not None: + crops_coords = self.mask_processor.get_crop_region(mask_image, width, height, pad=padding_mask_crop) + resize_mode = "fill" + else: + crops_coords = None + resize_mode = "default" + + original_image = image + init_image = self.image_processor.preprocess( + image, height=height, width=width, crops_coords=crops_coords, resize_mode=resize_mode + ) + init_image = init_image.to(dtype=torch.float32) + + # 6. Prepare latent variables + num_channels_latents = self.vae.config.latent_channels + num_channels_unet = self.unet.config.in_channels + return_image_latents = num_channels_unet == 4 + + latents_outputs = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + image=init_image, + timestep=latent_timestep, + is_strength_max=is_strength_max, + return_noise=True, + return_image_latents=return_image_latents, + ) + + if return_image_latents: + latents, noise, image_latents = latents_outputs + else: + latents, noise = latents_outputs + + # 7. Prepare mask latent variables + mask_condition = self.mask_processor.preprocess( + mask_image, height=height, width=width, resize_mode=resize_mode, crops_coords=crops_coords + ) + + if masked_image_latents is None: + masked_image = init_image * (mask_condition < 0.5) + else: + masked_image = masked_image_latents + + mask, masked_image_latents = self.prepare_mask_latents( + mask_condition, + masked_image, + batch_size * num_images_per_prompt, + height, + width, + prompt_embeds.dtype, + device, + generator, + self.do_classifier_free_guidance, + ) + + # 8. Check that sizes of mask, masked image and latents match + if num_channels_unet == 9: + # default case for runwayml/stable-diffusion-inpainting + num_channels_mask = mask.shape[1] + num_channels_masked_image = masked_image_latents.shape[1] + if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels: + raise ValueError( + f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects" + f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +" + f" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}" + f" = {num_channels_latents+num_channels_masked_image+num_channels_mask}. Please verify the config of" + " `pipeline.unet` or your `mask_image` or `image` input." + ) + elif num_channels_unet != 4: + raise ValueError( + f"The unet {self.unet.__class__} should have either 4 or 9 input channels, not {self.unet.config.in_channels}." + ) + + # 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 9.1 Add image embeds for IP-Adapter + added_cond_kwargs = ( + {"image_embeds": image_embeds} + if ip_adapter_image is not None or ip_adapter_image_embeds is not None + else None + ) + + # 9.2 Optionally get Guidance Scale Embedding + timestep_cond = None + if self.unet.config.time_cond_proj_dim is not None: + guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) + timestep_cond = self.get_guidance_scale_embedding( + guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim + ).to(device=device, dtype=latents.dtype) + + # 10. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + self._num_timesteps = len(timesteps) + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + if self.interrupt: + continue + + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents + + # concat latents, mask, masked_image_latents in the channel dimension + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + if num_channels_unet == 9: + latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1) + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + timestep_cond=timestep_cond, + cross_attention_kwargs=self.cross_attention_kwargs, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] + if num_channels_unet == 4: + init_latents_proper = image_latents + if self.do_classifier_free_guidance: + init_mask, _ = mask.chunk(2) + else: + init_mask = mask + + if i < len(timesteps) - 1: + noise_timestep = timesteps[i + 1] + init_latents_proper = self.scheduler.add_noise( + init_latents_proper, noise, torch.tensor([noise_timestep]) + ) + + latents = (1 - init_mask) * init_latents_proper + init_mask * latents + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + mask = callback_outputs.pop("mask", mask) + masked_image_latents = callback_outputs.pop("masked_image_latents", masked_image_latents) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + if not output_type == "latent": + condition_kwargs = {} + if isinstance(self.vae, AsymmetricAutoencoderKL): + init_image = init_image.to(device=device, dtype=masked_image_latents.dtype) + init_image_condition = init_image.clone() + init_image = self._encode_vae_image(init_image, generator=generator) + mask_condition = mask_condition.to(device=device, dtype=masked_image_latents.dtype) + condition_kwargs = {"image": init_image_condition, "mask": mask_condition} + image = self.vae.decode( + latents / self.vae.config.scaling_factor, return_dict=False, generator=generator, **condition_kwargs + )[0] + image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) + else: + image = latents + has_nsfw_concept = None + + if has_nsfw_concept is None: + do_denormalize = [True] * image.shape[0] + else: + do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] + + image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) + + if padding_mask_crop is not None: + image = [self.image_processor.apply_overlay(mask_image, original_image, i, crops_coords) for i in image] + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image, has_nsfw_concept) + + return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) diff --git a/diffusers3/pipelines/stable_diffusion/pipeline_stable_diffusion_instruct_pix2pix.py b/diffusers3/pipelines/stable_diffusion/pipeline_stable_diffusion_instruct_pix2pix.py new file mode 100644 index 0000000000000000000000000000000000000000..fd89b195c77885aed4831d67bfe8a8383e0593ec --- /dev/null +++ b/diffusers3/pipelines/stable_diffusion/pipeline_stable_diffusion_instruct_pix2pix.py @@ -0,0 +1,906 @@ +# Copyright 2024 The InstructPix2Pix Authors and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Any, Callable, Dict, List, Optional, Union + +import numpy as np +import PIL.Image +import torch +from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection + +from ...callbacks import MultiPipelineCallbacks, PipelineCallback +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import IPAdapterMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin +from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import PIL_INTERPOLATION, deprecate, logging +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from . import StableDiffusionPipelineOutput +from .safety_checker import StableDiffusionSafetyChecker + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.preprocess +def preprocess(image): + deprecation_message = "The preprocess method is deprecated and will be removed in diffusers 1.0.0. Please use VaeImageProcessor.preprocess(...) instead" + deprecate("preprocess", "1.0.0", deprecation_message, standard_warn=False) + if isinstance(image, torch.Tensor): + return image + elif isinstance(image, PIL.Image.Image): + image = [image] + + if isinstance(image[0], PIL.Image.Image): + w, h = image[0].size + w, h = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8 + + image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image] + image = np.concatenate(image, axis=0) + image = np.array(image).astype(np.float32) / 255.0 + image = image.transpose(0, 3, 1, 2) + image = 2.0 * image - 1.0 + image = torch.from_numpy(image) + elif isinstance(image[0], torch.Tensor): + image = torch.cat(image, dim=0) + return image + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents +def retrieve_latents( + encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" +): + if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": + return encoder_output.latent_dist.sample(generator) + elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": + return encoder_output.latent_dist.mode() + elif hasattr(encoder_output, "latents"): + return encoder_output.latents + else: + raise AttributeError("Could not access latents of provided encoder_output") + + +class StableDiffusionInstructPix2PixPipeline( + DiffusionPipeline, + StableDiffusionMixin, + TextualInversionLoaderMixin, + StableDiffusionLoraLoaderMixin, + IPAdapterMixin, +): + r""" + Pipeline for pixel-level image editing by following text instructions (based on Stable Diffusion). + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for saving LoRA weights + - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details + about a model's potential harms. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. + """ + + model_cpu_offload_seq = "text_encoder->unet->vae" + _optional_components = ["safety_checker", "feature_extractor", "image_encoder"] + _exclude_from_cpu_offload = ["safety_checker"] + _callback_tensor_inputs = ["latents", "prompt_embeds", "image_latents"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: UNet2DConditionModel, + scheduler: KarrasDiffusionSchedulers, + safety_checker: StableDiffusionSafetyChecker, + feature_extractor: CLIPImageProcessor, + image_encoder: Optional[CLIPVisionModelWithProjection] = None, + requires_safety_checker: bool = True, + ): + super().__init__() + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + image_encoder=image_encoder, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + @torch.no_grad() + def __call__( + self, + prompt: Union[str, List[str]] = None, + image: PipelineImageInput = None, + num_inference_steps: int = 100, + guidance_scale: float = 7.5, + image_guidance_scale: float = 1.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback_on_step_end: Optional[ + Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks] + ] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + **kwargs, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + image (`torch.Tensor` `np.ndarray`, `PIL.Image.Image`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): + `Image` or tensor representing an image batch to be repainted according to `prompt`. Can also accept + image latents as `image`, but if passing latents directly it is not encoded again. + num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + image_guidance_scale (`float`, *optional*, defaults to 1.5): + Push the generated image towards the initial `image`. Image guidance scale is enabled by setting + `image_guidance_scale > 1`. Higher image guidance scale encourages generated images that are closely + linked to the source `image`, usually at the expense of lower image quality. This pipeline requires a + value of at least `1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + ip_adapter_image: (`PipelineImageInput`, *optional*): + Optional image input to work with IP Adapters. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*): + A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of + each denoising step during the inference. with the following arguments: `callback_on_step_end(self: + DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a + list of all tensors as specified by `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + + Examples: + + ```py + >>> import PIL + >>> import requests + >>> import torch + >>> from io import BytesIO + + >>> from diffusers import StableDiffusionInstructPix2PixPipeline + + + >>> def download_image(url): + ... response = requests.get(url) + ... return PIL.Image.open(BytesIO(response.content)).convert("RGB") + + + >>> img_url = "https://huggingface.co/datasets/diffusers/diffusers-images-docs/resolve/main/mountain.png" + + >>> image = download_image(img_url).resize((512, 512)) + + >>> pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained( + ... "timbrooks/instruct-pix2pix", torch_dtype=torch.float16 + ... ) + >>> pipe = pipe.to("cuda") + + >>> prompt = "make the mountains snowy" + >>> image = pipe(prompt=prompt, image=image).images[0] + ``` + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images and the + second element is a list of `bool`s indicating whether the corresponding generated image contains + "not-safe-for-work" (nsfw) content. + """ + + callback = kwargs.pop("callback", None) + callback_steps = kwargs.pop("callback_steps", None) + + if callback is not None: + deprecate( + "callback", + "1.0.0", + "Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", + ) + if callback_steps is not None: + deprecate( + "callback_steps", + "1.0.0", + "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", + ) + + if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): + callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs + + # 0. Check inputs + self.check_inputs( + prompt, + callback_steps, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + ip_adapter_image, + ip_adapter_image_embeds, + callback_on_step_end_tensor_inputs, + ) + self._guidance_scale = guidance_scale + self._image_guidance_scale = image_guidance_scale + + device = self._execution_device + + if image is None: + raise ValueError("`image` input cannot be undefined.") + + # 1. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + # 2. Encode input prompt + prompt_embeds = self._encode_prompt( + prompt, + device, + num_images_per_prompt, + self.do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + ) + + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + image_embeds = self.prepare_ip_adapter_image_embeds( + ip_adapter_image, + ip_adapter_image_embeds, + device, + batch_size * num_images_per_prompt, + self.do_classifier_free_guidance, + ) + # 3. Preprocess image + image = self.image_processor.preprocess(image) + + # 4. set timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # 5. Prepare Image latents + image_latents = self.prepare_image_latents( + image, + batch_size, + num_images_per_prompt, + prompt_embeds.dtype, + device, + self.do_classifier_free_guidance, + ) + + height, width = image_latents.shape[-2:] + height = height * self.vae_scale_factor + width = width * self.vae_scale_factor + + # 6. Prepare latent variables + num_channels_latents = self.vae.config.latent_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 7. Check that shapes of latents and image match the UNet channels + num_channels_image = image_latents.shape[1] + if num_channels_latents + num_channels_image != self.unet.config.in_channels: + raise ValueError( + f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects" + f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +" + f" `num_channels_image`: {num_channels_image} " + f" = {num_channels_latents+num_channels_image}. Please verify the config of" + " `pipeline.unet` or your `image` input." + ) + + # 8. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 8.1 Add image embeds for IP-Adapter + added_cond_kwargs = {"image_embeds": image_embeds} if ip_adapter_image is not None else None + + # 9. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + self._num_timesteps = len(timesteps) + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # Expand the latents if we are doing classifier free guidance. + # The latents are expanded 3 times because for pix2pix the guidance\ + # is applied for both the text and the input image. + latent_model_input = torch.cat([latents] * 3) if self.do_classifier_free_guidance else latents + + # concat latents, image_latents in the channel dimension + scaled_latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + scaled_latent_model_input = torch.cat([scaled_latent_model_input, image_latents], dim=1) + + # predict the noise residual + noise_pred = self.unet( + scaled_latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + added_cond_kwargs=added_cond_kwargs, + cross_attention_kwargs=cross_attention_kwargs, + return_dict=False, + )[0] + + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_text, noise_pred_image, noise_pred_uncond = noise_pred.chunk(3) + noise_pred = ( + noise_pred_uncond + + self.guidance_scale * (noise_pred_text - noise_pred_image) + + self.image_guidance_scale * (noise_pred_image - noise_pred_uncond) + ) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + image_latents = callback_outputs.pop("image_latents", image_latents) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) + else: + image = latents + has_nsfw_concept = None + + if has_nsfw_concept is None: + do_denormalize = [True] * image.shape[0] + else: + do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] + + image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image, has_nsfw_concept) + + return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) + + def _encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_ prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + """ + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + else: + prompt_embeds_dtype = self.unet.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + # pix2pix has two negative embeddings, and unlike in other pipelines latents are ordered [prompt_embeds, negative_prompt_embeds, negative_prompt_embeds] + prompt_embeds = torch.cat([prompt_embeds, negative_prompt_embeds, negative_prompt_embeds]) + + return prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image + def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + if output_hidden_states: + image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] + image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_enc_hidden_states = self.image_encoder( + torch.zeros_like(image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( + num_images_per_prompt, dim=0 + ) + return image_enc_hidden_states, uncond_image_enc_hidden_states + else: + image_embeds = self.image_encoder(image).image_embeds + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_embeds = torch.zeros_like(image_embeds) + + return image_embeds, uncond_image_embeds + + def prepare_ip_adapter_image_embeds( + self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance + ): + if ip_adapter_image_embeds is None: + if not isinstance(ip_adapter_image, list): + ip_adapter_image = [ip_adapter_image] + + if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): + raise ValueError( + f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." + ) + + image_embeds = [] + for single_ip_adapter_image, image_proj_layer in zip( + ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers + ): + output_hidden_state = not isinstance(image_proj_layer, ImageProjection) + single_image_embeds, single_negative_image_embeds = self.encode_image( + single_ip_adapter_image, device, 1, output_hidden_state + ) + single_image_embeds = torch.stack([single_image_embeds] * num_images_per_prompt, dim=0) + single_negative_image_embeds = torch.stack( + [single_negative_image_embeds] * num_images_per_prompt, dim=0 + ) + + if do_classifier_free_guidance: + single_image_embeds = torch.cat( + [single_image_embeds, single_negative_image_embeds, single_negative_image_embeds] + ) + single_image_embeds = single_image_embeds.to(device) + + image_embeds.append(single_image_embeds) + else: + repeat_dims = [1] + image_embeds = [] + for single_image_embeds in ip_adapter_image_embeds: + if do_classifier_free_guidance: + ( + single_image_embeds, + single_negative_image_embeds, + single_negative_image_embeds, + ) = single_image_embeds.chunk(3) + single_image_embeds = single_image_embeds.repeat( + num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:])) + ) + single_negative_image_embeds = single_negative_image_embeds.repeat( + num_images_per_prompt, *(repeat_dims * len(single_negative_image_embeds.shape[1:])) + ) + single_image_embeds = torch.cat( + [single_image_embeds, single_negative_image_embeds, single_negative_image_embeds] + ) + else: + single_image_embeds = single_image_embeds.repeat( + num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:])) + ) + image_embeds.append(single_image_embeds) + + return image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is None: + has_nsfw_concept = None + else: + if torch.is_tensor(image): + feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") + else: + feature_extractor_input = self.image_processor.numpy_to_pil(image) + safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) + image, has_nsfw_concept = self.safety_checker( + images=image, clip_input=safety_checker_input.pixel_values.to(dtype) + ) + return image, has_nsfw_concept + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents + def decode_latents(self, latents): + deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" + deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) + + latents = 1 / self.vae.config.scaling_factor * latents + image = self.vae.decode(latents, return_dict=False)[0] + image = (image / 2 + 0.5).clamp(0, 1) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + return image + + def check_inputs( + self, + prompt, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + ip_adapter_image=None, + ip_adapter_image_embeds=None, + callback_on_step_end_tensor_inputs=None, + ): + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if ip_adapter_image is not None and ip_adapter_image_embeds is not None: + raise ValueError( + "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined." + ) + + if ip_adapter_image_embeds is not None: + if not isinstance(ip_adapter_image_embeds, list): + raise ValueError( + f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}" + ) + elif ip_adapter_image_embeds[0].ndim not in [3, 4]: + raise ValueError( + f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D" + ) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = ( + batch_size, + num_channels_latents, + int(height) // self.vae_scale_factor, + int(width) // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + def prepare_image_latents( + self, image, batch_size, num_images_per_prompt, dtype, device, do_classifier_free_guidance, generator=None + ): + if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): + raise ValueError( + f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" + ) + + image = image.to(device=device, dtype=dtype) + + batch_size = batch_size * num_images_per_prompt + + if image.shape[1] == 4: + image_latents = image + else: + image_latents = retrieve_latents(self.vae.encode(image), sample_mode="argmax") + + if batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] == 0: + # expand image_latents for batch_size + deprecation_message = ( + f"You have passed {batch_size} text prompts (`prompt`), but only {image_latents.shape[0]} initial" + " images (`image`). Initial images are now duplicating to match the number of text prompts. Note" + " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update" + " your script to pass as many initial images as text prompts to suppress this warning." + ) + deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False) + additional_image_per_prompt = batch_size // image_latents.shape[0] + image_latents = torch.cat([image_latents] * additional_image_per_prompt, dim=0) + elif batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] != 0: + raise ValueError( + f"Cannot duplicate `image` of batch size {image_latents.shape[0]} to {batch_size} text prompts." + ) + else: + image_latents = torch.cat([image_latents], dim=0) + + if do_classifier_free_guidance: + uncond_image_latents = torch.zeros_like(image_latents) + image_latents = torch.cat([image_latents, image_latents, uncond_image_latents], dim=0) + + return image_latents + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def image_guidance_scale(self): + return self._image_guidance_scale + + @property + def num_timesteps(self): + return self._num_timesteps + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self.guidance_scale > 1.0 and self.image_guidance_scale >= 1.0 diff --git a/diffusers3/pipelines/stable_diffusion/pipeline_stable_diffusion_latent_upscale.py b/diffusers3/pipelines/stable_diffusion/pipeline_stable_diffusion_latent_upscale.py new file mode 100644 index 0000000000000000000000000000000000000000..ffe02ae679e5d60f52ff6d46cbd983488096285d --- /dev/null +++ b/diffusers3/pipelines/stable_diffusion/pipeline_stable_diffusion_latent_upscale.py @@ -0,0 +1,655 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import warnings +from typing import Callable, List, Optional, Union + +import numpy as np +import PIL.Image +import torch +import torch.nn.functional as F +from transformers import CLIPTextModel, CLIPTokenizer + +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import FromSingleFileMixin +from ...models import AutoencoderKL, UNet2DConditionModel +from ...schedulers import EulerDiscreteScheduler +from ...utils import deprecate, logging +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput, StableDiffusionMixin + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents +def retrieve_latents( + encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" +): + if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": + return encoder_output.latent_dist.sample(generator) + elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": + return encoder_output.latent_dist.mode() + elif hasattr(encoder_output, "latents"): + return encoder_output.latents + else: + raise AttributeError("Could not access latents of provided encoder_output") + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.preprocess +def preprocess(image): + warnings.warn( + "The preprocess method is deprecated and will be removed in a future version. Please" + " use VaeImageProcessor.preprocess instead", + FutureWarning, + ) + if isinstance(image, torch.Tensor): + return image + elif isinstance(image, PIL.Image.Image): + image = [image] + + if isinstance(image[0], PIL.Image.Image): + w, h = image[0].size + w, h = (x - x % 64 for x in (w, h)) # resize to integer multiple of 64 + + image = [np.array(i.resize((w, h)))[None, :] for i in image] + image = np.concatenate(image, axis=0) + image = np.array(image).astype(np.float32) / 255.0 + image = image.transpose(0, 3, 1, 2) + image = 2.0 * image - 1.0 + image = torch.from_numpy(image) + elif isinstance(image[0], torch.Tensor): + image = torch.cat(image, dim=0) + return image + + +class StableDiffusionLatentUpscalePipeline(DiffusionPipeline, StableDiffusionMixin, FromSingleFileMixin): + r""" + Pipeline for upscaling Stable Diffusion output image resolution by a factor of 2. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading methods: + - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A [`EulerDiscreteScheduler`] to be used in combination with `unet` to denoise the encoded image latents. + """ + + model_cpu_offload_seq = "text_encoder->unet->vae" + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: UNet2DConditionModel, + scheduler: EulerDiscreteScheduler, + ): + super().__init__() + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, resample="bicubic") + + def _encode_prompt( + self, + prompt, + device, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + pooled_prompt_embeds: Optional[torch.Tensor] = None, + negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, + **kwargs, + ): + deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." + deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) + + ( + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + ) = self.encode_prompt( + prompt=prompt, + device=device, + do_classifier_free_guidance=do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, + **kwargs, + ) + + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + pooled_prompt_embeds = torch.cat([negative_pooled_prompt_embeds, pooled_prompt_embeds]) + + return prompt_embeds, pooled_prompt_embeds + + def encode_prompt( + self, + prompt, + device, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + pooled_prompt_embeds: Optional[torch.Tensor] = None, + negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `list(int)`): + prompt to be encoded + device: (`torch.device`): + torch device + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`): + The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored + if `guidance_scale` is less than `1`). + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + """ + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None or pooled_prompt_embeds is None: + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_length=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + text_encoder_out = self.text_encoder( + text_input_ids.to(device), + output_hidden_states=True, + ) + prompt_embeds = text_encoder_out.hidden_states[-1] + pooled_prompt_embeds = text_encoder_out.pooler_output + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance: + if negative_prompt_embeds is None or negative_pooled_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + max_length = text_input_ids.shape[-1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_length=True, + return_tensors="pt", + ) + + uncond_encoder_out = self.text_encoder( + uncond_input.input_ids.to(device), + output_hidden_states=True, + ) + + negative_prompt_embeds = uncond_encoder_out.hidden_states[-1] + negative_pooled_prompt_embeds = uncond_encoder_out.pooler_output + + return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents + def decode_latents(self, latents): + deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" + deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) + + latents = 1 / self.vae.config.scaling_factor * latents + image = self.vae.decode(latents, return_dict=False)[0] + image = (image / 2 + 0.5).clamp(0, 1) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + return image + + def check_inputs( + self, + prompt, + image, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + pooled_prompt_embeds=None, + negative_pooled_prompt_embeds=None, + ): + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and not isinstance(prompt, str) and not isinstance(prompt, list): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if prompt_embeds is not None and pooled_prompt_embeds is None: + raise ValueError( + "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`." + ) + + if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: + raise ValueError( + "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`." + ) + + if ( + not isinstance(image, torch.Tensor) + and not isinstance(image, np.ndarray) + and not isinstance(image, PIL.Image.Image) + and not isinstance(image, list) + ): + raise ValueError( + f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or `list` but is {type(image)}" + ) + + # verify batch size of prompt and image are same if image is a list or tensor + if isinstance(image, (list, torch.Tensor)): + if prompt is not None: + if isinstance(prompt, str): + batch_size = 1 + else: + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if isinstance(image, list): + image_batch_size = len(image) + else: + image_batch_size = image.shape[0] if image.ndim == 4 else 1 + if batch_size != image_batch_size: + raise ValueError( + f"`prompt` has batch size {batch_size} and `image` has batch size {image_batch_size}." + " Please make sure that passed `prompt` matches the batch size of `image`." + ) + + if (callback_steps is None) or ( + callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = (batch_size, num_channels_latents, height, width) + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + if latents.shape != shape: + raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + @torch.no_grad() + def __call__( + self, + prompt: Union[str, List[str]] = None, + image: PipelineImageInput = None, + num_inference_steps: int = 75, + guidance_scale: float = 9.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + pooled_prompt_embeds: Optional[torch.Tensor] = None, + negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, + callback_steps: int = 1, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`): + The prompt or prompts to guide image upscaling. + image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): + `Image` or tensor representing an image batch to be upscaled. If it's a tensor, it can be either a + latent output from a Stable Diffusion model or an image tensor in the range `[-1, 1]`. It is considered + a `latent` if `image.shape[1]` is `4`; otherwise, it is considered to be an image representation and + encoded using this pipeline's `vae` encoder. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + + Examples: + ```py + >>> from diffusers import StableDiffusionLatentUpscalePipeline, StableDiffusionPipeline + >>> import torch + + + >>> pipeline = StableDiffusionPipeline.from_pretrained( + ... "CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16 + ... ) + >>> pipeline.to("cuda") + + >>> model_id = "stabilityai/sd-x2-latent-upscaler" + >>> upscaler = StableDiffusionLatentUpscalePipeline.from_pretrained(model_id, torch_dtype=torch.float16) + >>> upscaler.to("cuda") + + >>> prompt = "a photo of an astronaut high resolution, unreal engine, ultra realistic" + >>> generator = torch.manual_seed(33) + + >>> low_res_latents = pipeline(prompt, generator=generator, output_type="latent").images + + >>> with torch.no_grad(): + ... image = pipeline.decode_latents(low_res_latents) + >>> image = pipeline.numpy_to_pil(image)[0] + + >>> image.save("../images/a1.png") + + >>> upscaled_image = upscaler( + ... prompt=prompt, + ... image=low_res_latents, + ... num_inference_steps=20, + ... guidance_scale=0, + ... generator=generator, + ... ).images[0] + + >>> upscaled_image.save("../images/a2.png") + ``` + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images. + """ + + # 1. Check inputs + self.check_inputs( + prompt, + image, + callback_steps, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + ) + + # 2. Define call parameters + if prompt is not None: + batch_size = 1 if isinstance(prompt, str) else len(prompt) + else: + batch_size = prompt_embeds.shape[0] + device = self._execution_device + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + if guidance_scale == 0: + prompt = [""] * batch_size + + # 3. Encode input prompt + ( + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + ) = self.encode_prompt( + prompt, + device, + do_classifier_free_guidance, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + ) + + if do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + pooled_prompt_embeds = torch.cat([negative_pooled_prompt_embeds, pooled_prompt_embeds]) + + # 4. Preprocess image + image = self.image_processor.preprocess(image) + image = image.to(dtype=prompt_embeds.dtype, device=device) + if image.shape[1] == 3: + # encode image if not in latent-space yet + image = retrieve_latents(self.vae.encode(image), generator=generator) * self.vae.config.scaling_factor + + # 5. set timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + batch_multiplier = 2 if do_classifier_free_guidance else 1 + image = image[None, :] if image.ndim == 3 else image + image = torch.cat([image] * batch_multiplier) + + # 5. Add noise to image (set to be 0): + # (see below notes from the author): + # "the This step theoretically can make the model work better on out-of-distribution inputs, but mostly just seems to make it match the input less, so it's turned off by default." + noise_level = torch.tensor([0.0], dtype=torch.float32, device=device) + noise_level = torch.cat([noise_level] * image.shape[0]) + inv_noise_level = (noise_level**2 + 1) ** (-0.5) + + image_cond = F.interpolate(image, scale_factor=2, mode="nearest") * inv_noise_level[:, None, None, None] + image_cond = image_cond.to(prompt_embeds.dtype) + + noise_level_embed = torch.cat( + [ + torch.ones(pooled_prompt_embeds.shape[0], 64, dtype=pooled_prompt_embeds.dtype, device=device), + torch.zeros(pooled_prompt_embeds.shape[0], 64, dtype=pooled_prompt_embeds.dtype, device=device), + ], + dim=1, + ) + + timestep_condition = torch.cat([noise_level_embed, pooled_prompt_embeds], dim=1) + + # 6. Prepare latent variables + height, width = image.shape[2:] + num_channels_latents = self.vae.config.latent_channels + latents = self.prepare_latents( + batch_size, + num_channels_latents, + height * 2, # 2x upscale + width * 2, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 7. Check that sizes of image and latents match + num_channels_image = image.shape[1] + if num_channels_latents + num_channels_image != self.unet.config.in_channels: + raise ValueError( + f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects" + f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +" + f" `num_channels_image`: {num_channels_image} " + f" = {num_channels_latents+num_channels_image}. Please verify the config of" + " `pipeline.unet` or your `image` input." + ) + + # 9. Denoising loop + num_warmup_steps = 0 + + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + sigma = self.scheduler.sigmas[i] + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + scaled_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + scaled_model_input = torch.cat([scaled_model_input, image_cond], dim=1) + # preconditioning parameter based on Karras et al. (2022) (table 1) + timestep = torch.log(sigma) * 0.25 + + noise_pred = self.unet( + scaled_model_input, + timestep, + encoder_hidden_states=prompt_embeds, + timestep_cond=timestep_condition, + ).sample + + # in original repo, the output contains a variance channel that's not used + noise_pred = noise_pred[:, :-1] + + # apply preconditioning, based on table 1 in Karras et al. (2022) + inv_sigma = 1 / (sigma**2 + 1) + noise_pred = inv_sigma * latent_model_input + self.scheduler.scale_model_input(sigma, t) * noise_pred + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents).prev_sample + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + else: + image = latents + + image = self.image_processor.postprocess(image, output_type=output_type) + + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return ImagePipelineOutput(images=image) diff --git a/diffusers3/pipelines/stable_diffusion/pipeline_stable_diffusion_upscale.py b/diffusers3/pipelines/stable_diffusion/pipeline_stable_diffusion_upscale.py new file mode 100644 index 0000000000000000000000000000000000000000..4cbbe17531ef7565a99ff2ce201193b3068443c8 --- /dev/null +++ b/diffusers3/pipelines/stable_diffusion/pipeline_stable_diffusion_upscale.py @@ -0,0 +1,809 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import warnings +from typing import Any, Callable, Dict, List, Optional, Union + +import numpy as np +import PIL.Image +import torch +from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer + +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import FromSingleFileMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin +from ...models import AutoencoderKL, UNet2DConditionModel +from ...models.attention_processor import ( + AttnProcessor2_0, + XFormersAttnProcessor, +) +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import DDPMScheduler, KarrasDiffusionSchedulers +from ...utils import USE_PEFT_BACKEND, deprecate, logging, scale_lora_layers, unscale_lora_layers +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from . import StableDiffusionPipelineOutput + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +def preprocess(image): + warnings.warn( + "The preprocess method is deprecated and will be removed in a future version. Please" + " use VaeImageProcessor.preprocess instead", + FutureWarning, + ) + if isinstance(image, torch.Tensor): + return image + elif isinstance(image, PIL.Image.Image): + image = [image] + + if isinstance(image[0], PIL.Image.Image): + w, h = image[0].size + w, h = (x - x % 64 for x in (w, h)) # resize to integer multiple of 64 + + image = [np.array(i.resize((w, h)))[None, :] for i in image] + image = np.concatenate(image, axis=0) + image = np.array(image).astype(np.float32) / 255.0 + image = image.transpose(0, 3, 1, 2) + image = 2.0 * image - 1.0 + image = torch.from_numpy(image) + elif isinstance(image[0], torch.Tensor): + image = torch.cat(image, dim=0) + return image + + +class StableDiffusionUpscalePipeline( + DiffusionPipeline, + StableDiffusionMixin, + TextualInversionLoaderMixin, + StableDiffusionLoraLoaderMixin, + FromSingleFileMixin, +): + r""" + Pipeline for text-guided image super-resolution using Stable Diffusion 2. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for saving LoRA weights + - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + low_res_scheduler ([`SchedulerMixin`]): + A scheduler used to add initial noise to the low resolution conditioning image. It must be an instance of + [`DDPMScheduler`]. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + """ + + model_cpu_offload_seq = "text_encoder->unet->vae" + _optional_components = ["watermarker", "safety_checker", "feature_extractor"] + _exclude_from_cpu_offload = ["safety_checker"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: UNet2DConditionModel, + low_res_scheduler: DDPMScheduler, + scheduler: KarrasDiffusionSchedulers, + safety_checker: Optional[Any] = None, + feature_extractor: Optional[CLIPImageProcessor] = None, + watermarker: Optional[Any] = None, + max_noise_level: int = 350, + ): + super().__init__() + + if hasattr( + vae, "config" + ): # check if vae has a config attribute `scaling_factor` and if it is set to 0.08333, else set it to 0.08333 and deprecate + is_vae_scaling_factor_set_to_0_08333 = ( + hasattr(vae.config, "scaling_factor") and vae.config.scaling_factor == 0.08333 + ) + if not is_vae_scaling_factor_set_to_0_08333: + deprecation_message = ( + "The configuration file of the vae does not contain `scaling_factor` or it is set to" + f" {vae.config.scaling_factor}, which seems highly unlikely. If your checkpoint is a fine-tuned" + " version of `stabilityai/stable-diffusion-x4-upscaler` you should change 'scaling_factor' to" + " 0.08333 Please make sure to update the config accordingly, as not doing so might lead to" + " incorrect results in future versions. If you have downloaded this checkpoint from the Hugging" + " Face Hub, it would be very nice if you could open a Pull Request for the `vae/config.json` file" + ) + deprecate("wrong scaling_factor", "1.0.0", deprecation_message, standard_warn=False) + vae.register_to_config(scaling_factor=0.08333) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + low_res_scheduler=low_res_scheduler, + scheduler=scheduler, + safety_checker=safety_checker, + watermarker=watermarker, + feature_extractor=feature_extractor, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, resample="bicubic") + self.register_to_config(max_noise_level=max_noise_level) + + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is not None: + feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") + safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) + image, nsfw_detected, watermark_detected = self.safety_checker( + images=image, + clip_input=safety_checker_input.pixel_values.to(dtype=dtype), + ) + else: + nsfw_detected = None + watermark_detected = None + + if hasattr(self, "unet_offload_hook") and self.unet_offload_hook is not None: + self.unet_offload_hook.offload() + + return image, nsfw_detected, watermark_detected + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt + def _encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + **kwargs, + ): + deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." + deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) + + prompt_embeds_tuple = self.encode_prompt( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=lora_scale, + **kwargs, + ) + + # concatenate for backwards comp + prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) + + return prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if self.text_encoder is not None: + if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return prompt_embeds, negative_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents + def decode_latents(self, latents): + deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" + deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) + + latents = 1 / self.vae.config.scaling_factor * latents + image = self.vae.decode(latents, return_dict=False)[0] + image = (image / 2 + 0.5).clamp(0, 1) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + return image + + def check_inputs( + self, + prompt, + image, + noise_level, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + ): + if (callback_steps is None) or ( + callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if ( + not isinstance(image, torch.Tensor) + and not isinstance(image, PIL.Image.Image) + and not isinstance(image, np.ndarray) + and not isinstance(image, list) + ): + raise ValueError( + f"`image` has to be of type `torch.Tensor`, `np.ndarray`, `PIL.Image.Image` or `list` but is {type(image)}" + ) + + # verify batch size of prompt and image are same if image is a list or tensor or numpy array + if isinstance(image, (list, np.ndarray, torch.Tensor)): + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if isinstance(image, list): + image_batch_size = len(image) + else: + image_batch_size = image.shape[0] + if batch_size != image_batch_size: + raise ValueError( + f"`prompt` has batch size {batch_size} and `image` has batch size {image_batch_size}." + " Please make sure that passed `prompt` matches the batch size of `image`." + ) + + # check noise level + if noise_level > self.config.max_noise_level: + raise ValueError(f"`noise_level` has to be <= {self.config.max_noise_level} but is {noise_level}") + + if (callback_steps is None) or ( + callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = (batch_size, num_channels_latents, height, width) + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + if latents.shape != shape: + raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + def upcast_vae(self): + dtype = self.vae.dtype + self.vae.to(dtype=torch.float32) + use_torch_2_0_or_xformers = isinstance( + self.vae.decoder.mid_block.attentions[0].processor, + ( + AttnProcessor2_0, + XFormersAttnProcessor, + ), + ) + # if xformers or torch_2_0 is used attention block does not need + # to be in float32 which can save lots of memory + if use_torch_2_0_or_xformers: + self.vae.post_quant_conv.to(dtype) + self.vae.decoder.conv_in.to(dtype) + self.vae.decoder.mid_block.to(dtype) + + @torch.no_grad() + def __call__( + self, + prompt: Union[str, List[str]] = None, + image: PipelineImageInput = None, + num_inference_steps: int = 75, + guidance_scale: float = 9.0, + noise_level: int = 20, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, + callback_steps: int = 1, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + clip_skip: int = None, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): + `Image` or tensor representing an image batch to be upscaled. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + Examples: + ```py + >>> import requests + >>> from PIL import Image + >>> from io import BytesIO + >>> from diffusers import StableDiffusionUpscalePipeline + >>> import torch + + >>> # load model and scheduler + >>> model_id = "stabilityai/stable-diffusion-x4-upscaler" + >>> pipeline = StableDiffusionUpscalePipeline.from_pretrained( + ... model_id, variant="fp16", torch_dtype=torch.float16 + ... ) + >>> pipeline = pipeline.to("cuda") + + >>> # let's download an image + >>> url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale/low_res_cat.png" + >>> response = requests.get(url) + >>> low_res_img = Image.open(BytesIO(response.content)).convert("RGB") + >>> low_res_img = low_res_img.resize((128, 128)) + >>> prompt = "a white cat" + + >>> upscaled_image = pipeline(prompt=prompt, image=low_res_img).images[0] + >>> upscaled_image.save("upsampled_cat.png") + ``` + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images and the + second element is a list of `bool`s indicating whether the corresponding generated image contains + "not-safe-for-work" (nsfw) content. + """ + + # 1. Check inputs + self.check_inputs( + prompt, + image, + noise_level, + callback_steps, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + ) + + if image is None: + raise ValueError("`image` input cannot be undefined.") + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Encode input prompt + text_encoder_lora_scale = ( + cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None + ) + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=text_encoder_lora_scale, + clip_skip=clip_skip, + ) + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + if do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + # 4. Preprocess image + image = self.image_processor.preprocess(image) + image = image.to(dtype=prompt_embeds.dtype, device=device) + + # 5. set timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # 5. Add noise to image + noise_level = torch.tensor([noise_level], dtype=torch.long, device=device) + noise = randn_tensor(image.shape, generator=generator, device=device, dtype=prompt_embeds.dtype) + image = self.low_res_scheduler.add_noise(image, noise, noise_level) + + batch_multiplier = 2 if do_classifier_free_guidance else 1 + image = torch.cat([image] * batch_multiplier * num_images_per_prompt) + noise_level = torch.cat([noise_level] * image.shape[0]) + + # 6. Prepare latent variables + height, width = image.shape[2:] + num_channels_latents = self.vae.config.latent_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 7. Check that sizes of image and latents match + num_channels_image = image.shape[1] + if num_channels_latents + num_channels_image != self.unet.config.in_channels: + raise ValueError( + f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects" + f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +" + f" `num_channels_image`: {num_channels_image} " + f" = {num_channels_latents+num_channels_image}. Please verify the config of" + " `pipeline.unet` or your `image` input." + ) + + # 8. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 9. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + + # concat latents, mask, masked_image_latents in the channel dimension + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + latent_model_input = torch.cat([latent_model_input, image], dim=1) + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs=cross_attention_kwargs, + class_labels=noise_level, + return_dict=False, + )[0] + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + if not output_type == "latent": + # make sure the VAE is in float32 mode, as it overflows in float16 + needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast + + if needs_upcasting: + self.upcast_vae() + + # Ensure latents are always the same type as the VAE + latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + + # cast back to fp16 if needed + if needs_upcasting: + self.vae.to(dtype=torch.float16) + + image, has_nsfw_concept, _ = self.run_safety_checker(image, device, prompt_embeds.dtype) + else: + image = latents + has_nsfw_concept = None + + if has_nsfw_concept is None: + do_denormalize = [True] * image.shape[0] + else: + do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] + + image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) + + # 11. Apply watermark + if output_type == "pil" and self.watermarker is not None: + image = self.watermarker.apply_watermark(image) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image, has_nsfw_concept) + + return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) diff --git a/diffusers3/pipelines/stable_diffusion/pipeline_stable_unclip.py b/diffusers3/pipelines/stable_diffusion/pipeline_stable_unclip.py new file mode 100644 index 0000000000000000000000000000000000000000..41811f8f2c0ee5ee538c14a174f9e42143ede5e6 --- /dev/null +++ b/diffusers3/pipelines/stable_diffusion/pipeline_stable_unclip.py @@ -0,0 +1,940 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import torch +from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer +from transformers.models.clip.modeling_clip import CLIPTextModelOutput + +from ...image_processor import VaeImageProcessor +from ...loaders import StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin +from ...models import AutoencoderKL, PriorTransformer, UNet2DConditionModel +from ...models.embeddings import get_timestep_embedding +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import ( + USE_PEFT_BACKEND, + deprecate, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput, StableDiffusionMixin +from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import StableUnCLIPPipeline + + >>> pipe = StableUnCLIPPipeline.from_pretrained( + ... "fusing/stable-unclip-2-1-l", torch_dtype=torch.float16 + ... ) # TODO update model path + >>> pipe = pipe.to("cuda") + + >>> prompt = "a photo of an astronaut riding a horse on mars" + >>> images = pipe(prompt).images + >>> images[0].save("astronaut_horse.png") + ``` +""" + + +class StableUnCLIPPipeline( + DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, StableDiffusionLoraLoaderMixin +): + """ + Pipeline for text-to-image generation using stable unCLIP. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for saving LoRA weights + + Args: + prior_tokenizer ([`CLIPTokenizer`]): + A [`CLIPTokenizer`]. + prior_text_encoder ([`CLIPTextModelWithProjection`]): + Frozen [`CLIPTextModelWithProjection`] text-encoder. + prior ([`PriorTransformer`]): + The canonical unCLIP prior to approximate the image embedding from the text embedding. + prior_scheduler ([`KarrasDiffusionSchedulers`]): + Scheduler used in the prior denoising process. + image_normalizer ([`StableUnCLIPImageNormalizer`]): + Used to normalize the predicted image embeddings before the noise is applied and un-normalize the image + embeddings after the noise has been applied. + image_noising_scheduler ([`KarrasDiffusionSchedulers`]): + Noise schedule for adding noise to the predicted image embeddings. The amount of noise to add is determined + by the `noise_level`. + tokenizer ([`CLIPTokenizer`]): + A [`CLIPTokenizer`]. + text_encoder ([`CLIPTextModel`]): + Frozen [`CLIPTextModel`] text-encoder. + unet ([`UNet2DConditionModel`]): + A [`UNet2DConditionModel`] to denoise the encoded image latents. + scheduler ([`KarrasDiffusionSchedulers`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + """ + + _exclude_from_cpu_offload = ["prior", "image_normalizer"] + model_cpu_offload_seq = "text_encoder->prior_text_encoder->unet->vae" + + # prior components + prior_tokenizer: CLIPTokenizer + prior_text_encoder: CLIPTextModelWithProjection + prior: PriorTransformer + prior_scheduler: KarrasDiffusionSchedulers + + # image noising components + image_normalizer: StableUnCLIPImageNormalizer + image_noising_scheduler: KarrasDiffusionSchedulers + + # regular denoising components + tokenizer: CLIPTokenizer + text_encoder: CLIPTextModel + unet: UNet2DConditionModel + scheduler: KarrasDiffusionSchedulers + + vae: AutoencoderKL + + def __init__( + self, + # prior components + prior_tokenizer: CLIPTokenizer, + prior_text_encoder: CLIPTextModelWithProjection, + prior: PriorTransformer, + prior_scheduler: KarrasDiffusionSchedulers, + # image noising components + image_normalizer: StableUnCLIPImageNormalizer, + image_noising_scheduler: KarrasDiffusionSchedulers, + # regular denoising components + tokenizer: CLIPTokenizer, + text_encoder: CLIPTextModelWithProjection, + unet: UNet2DConditionModel, + scheduler: KarrasDiffusionSchedulers, + # vae + vae: AutoencoderKL, + ): + super().__init__() + + self.register_modules( + prior_tokenizer=prior_tokenizer, + prior_text_encoder=prior_text_encoder, + prior=prior, + prior_scheduler=prior_scheduler, + image_normalizer=image_normalizer, + image_noising_scheduler=image_noising_scheduler, + tokenizer=tokenizer, + text_encoder=text_encoder, + unet=unet, + scheduler=scheduler, + vae=vae, + ) + + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + + # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline._encode_prompt with _encode_prompt->_encode_prior_prompt, tokenizer->prior_tokenizer, text_encoder->prior_text_encoder + def _encode_prior_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + text_model_output: Optional[Union[CLIPTextModelOutput, Tuple]] = None, + text_attention_mask: Optional[torch.Tensor] = None, + ): + if text_model_output is None: + batch_size = len(prompt) if isinstance(prompt, list) else 1 + # get prompt text embeddings + text_inputs = self.prior_tokenizer( + prompt, + padding="max_length", + max_length=self.prior_tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + text_mask = text_inputs.attention_mask.bool().to(device) + + untruncated_ids = self.prior_tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.prior_tokenizer.batch_decode( + untruncated_ids[:, self.prior_tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.prior_tokenizer.model_max_length} tokens: {removed_text}" + ) + text_input_ids = text_input_ids[:, : self.prior_tokenizer.model_max_length] + + prior_text_encoder_output = self.prior_text_encoder(text_input_ids.to(device)) + + prompt_embeds = prior_text_encoder_output.text_embeds + text_enc_hid_states = prior_text_encoder_output.last_hidden_state + + else: + batch_size = text_model_output[0].shape[0] + prompt_embeds, text_enc_hid_states = text_model_output[0], text_model_output[1] + text_mask = text_attention_mask + + prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0) + text_enc_hid_states = text_enc_hid_states.repeat_interleave(num_images_per_prompt, dim=0) + text_mask = text_mask.repeat_interleave(num_images_per_prompt, dim=0) + + if do_classifier_free_guidance: + uncond_tokens = [""] * batch_size + + uncond_input = self.prior_tokenizer( + uncond_tokens, + padding="max_length", + max_length=self.prior_tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + uncond_text_mask = uncond_input.attention_mask.bool().to(device) + negative_prompt_embeds_prior_text_encoder_output = self.prior_text_encoder( + uncond_input.input_ids.to(device) + ) + + negative_prompt_embeds = negative_prompt_embeds_prior_text_encoder_output.text_embeds + uncond_text_enc_hid_states = negative_prompt_embeds_prior_text_encoder_output.last_hidden_state + + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + + seq_len = negative_prompt_embeds.shape[1] + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len) + + seq_len = uncond_text_enc_hid_states.shape[1] + uncond_text_enc_hid_states = uncond_text_enc_hid_states.repeat(1, num_images_per_prompt, 1) + uncond_text_enc_hid_states = uncond_text_enc_hid_states.view( + batch_size * num_images_per_prompt, seq_len, -1 + ) + uncond_text_mask = uncond_text_mask.repeat_interleave(num_images_per_prompt, dim=0) + + # done duplicates + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + text_enc_hid_states = torch.cat([uncond_text_enc_hid_states, text_enc_hid_states]) + + text_mask = torch.cat([uncond_text_mask, text_mask]) + + return prompt_embeds, text_enc_hid_states, text_mask + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt + def _encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + **kwargs, + ): + deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." + deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) + + prompt_embeds_tuple = self.encode_prompt( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=lora_scale, + **kwargs, + ) + + # concatenate for backwards comp + prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) + + return prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if self.text_encoder is not None: + if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return prompt_embeds, negative_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents + def decode_latents(self, latents): + deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" + deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) + + latents = 1 / self.vae.config.scaling_factor * latents + image = self.vae.decode(latents, return_dict=False)[0] + image = (image / 2 + 0.5).clamp(0, 1) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + return image + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs with prepare_extra_step_kwargs->prepare_prior_extra_step_kwargs, scheduler->prior_scheduler + def prepare_prior_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the prior_scheduler step, since not all prior_schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other prior_schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.prior_scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the prior_scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.prior_scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + height, + width, + callback_steps, + noise_level, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if (callback_steps is None) or ( + callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Please make sure to define only one of the two." + ) + + if prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + + if prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + "Provide either `negative_prompt` or `negative_prompt_embeds`. Cannot leave both `negative_prompt` and `negative_prompt_embeds` undefined." + ) + + if prompt is not None and negative_prompt is not None: + if type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if noise_level < 0 or noise_level >= self.image_noising_scheduler.config.num_train_timesteps: + raise ValueError( + f"`noise_level` must be between 0 and {self.image_noising_scheduler.config.num_train_timesteps - 1}, inclusive." + ) + + # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents + def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + if latents.shape != shape: + raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") + latents = latents.to(device) + + latents = latents * scheduler.init_noise_sigma + return latents + + def noise_image_embeddings( + self, + image_embeds: torch.Tensor, + noise_level: int, + noise: Optional[torch.Tensor] = None, + generator: Optional[torch.Generator] = None, + ): + """ + Add noise to the image embeddings. The amount of noise is controlled by a `noise_level` input. A higher + `noise_level` increases the variance in the final un-noised images. + + The noise is applied in two ways: + 1. A noise schedule is applied directly to the embeddings. + 2. A vector of sinusoidal time embeddings are appended to the output. + + In both cases, the amount of noise is controlled by the same `noise_level`. + + The embeddings are normalized before the noise is applied and un-normalized after the noise is applied. + """ + if noise is None: + noise = randn_tensor( + image_embeds.shape, generator=generator, device=image_embeds.device, dtype=image_embeds.dtype + ) + + noise_level = torch.tensor([noise_level] * image_embeds.shape[0], device=image_embeds.device) + + self.image_normalizer.to(image_embeds.device) + image_embeds = self.image_normalizer.scale(image_embeds) + + image_embeds = self.image_noising_scheduler.add_noise(image_embeds, timesteps=noise_level, noise=noise) + + image_embeds = self.image_normalizer.unscale(image_embeds) + + noise_level = get_timestep_embedding( + timesteps=noise_level, embedding_dim=image_embeds.shape[-1], flip_sin_to_cos=True, downscale_freq_shift=0 + ) + + # `get_timestep_embeddings` does not contain any weights and will always return f32 tensors, + # but we might actually be running in fp16. so we need to cast here. + # there might be better ways to encapsulate this. + noise_level = noise_level.to(image_embeds.dtype) + + image_embeds = torch.cat((image_embeds, noise_level), 1) + + return image_embeds + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + # regular denoising process args + prompt: Optional[Union[str, List[str]]] = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 20, + guidance_scale: float = 10.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[torch.Generator] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, + callback_steps: int = 1, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + noise_level: int = 0, + # prior args + prior_num_inference_steps: int = 25, + prior_guidance_scale: float = 4.0, + prior_latents: Optional[torch.Tensor] = None, + clip_skip: Optional[int] = None, + ): + """ + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 20): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 10.0): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + noise_level (`int`, *optional*, defaults to `0`): + The amount of noise to add to the image embeddings. A higher `noise_level` increases the variance in + the final un-noised images. See [`StableUnCLIPPipeline.noise_image_embeddings`] for more details. + prior_num_inference_steps (`int`, *optional*, defaults to 25): + The number of denoising steps in the prior denoising process. More denoising steps usually lead to a + higher quality image at the expense of slower inference. + prior_guidance_scale (`float`, *optional*, defaults to 4.0): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + prior_latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + embedding generation in the prior denoising process. Can be used to tweak the same generation with + different prompts. If not provided, a latents tensor is generated by sampling using the supplied random + `generator`. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + Examples: + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple`: + [`~ pipeline_utils.ImagePipelineOutput`] if `return_dict` is True, otherwise a `tuple`. When returning + a tuple, the first element is a list with the generated images. + """ + # 0. Default height and width to unet + height = height or self.unet.config.sample_size * self.vae_scale_factor + width = width or self.unet.config.sample_size * self.vae_scale_factor + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt=prompt, + height=height, + width=width, + callback_steps=callback_steps, + noise_level=noise_level, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + ) + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + batch_size = batch_size * num_images_per_prompt + + device = self._execution_device + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + prior_do_classifier_free_guidance = prior_guidance_scale > 1.0 + + # 3. Encode input prompt + prior_prompt_embeds, prior_text_encoder_hidden_states, prior_text_mask = self._encode_prior_prompt( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=prior_do_classifier_free_guidance, + ) + + # 4. Prepare prior timesteps + self.prior_scheduler.set_timesteps(prior_num_inference_steps, device=device) + prior_timesteps_tensor = self.prior_scheduler.timesteps + + # 5. Prepare prior latent variables + embedding_dim = self.prior.config.embedding_dim + prior_latents = self.prepare_latents( + (batch_size, embedding_dim), + prior_prompt_embeds.dtype, + device, + generator, + prior_latents, + self.prior_scheduler, + ) + + # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + prior_extra_step_kwargs = self.prepare_prior_extra_step_kwargs(generator, eta) + + # 7. Prior denoising loop + for i, t in enumerate(self.progress_bar(prior_timesteps_tensor)): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([prior_latents] * 2) if prior_do_classifier_free_guidance else prior_latents + latent_model_input = self.prior_scheduler.scale_model_input(latent_model_input, t) + + predicted_image_embedding = self.prior( + latent_model_input, + timestep=t, + proj_embedding=prior_prompt_embeds, + encoder_hidden_states=prior_text_encoder_hidden_states, + attention_mask=prior_text_mask, + ).predicted_image_embedding + + if prior_do_classifier_free_guidance: + predicted_image_embedding_uncond, predicted_image_embedding_text = predicted_image_embedding.chunk(2) + predicted_image_embedding = predicted_image_embedding_uncond + prior_guidance_scale * ( + predicted_image_embedding_text - predicted_image_embedding_uncond + ) + + prior_latents = self.prior_scheduler.step( + predicted_image_embedding, + timestep=t, + sample=prior_latents, + **prior_extra_step_kwargs, + return_dict=False, + )[0] + + if callback is not None and i % callback_steps == 0: + callback(i, t, prior_latents) + + prior_latents = self.prior.post_process_latents(prior_latents) + + image_embeds = prior_latents + + # done prior + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 8. Encode input prompt + text_encoder_lora_scale = ( + cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None + ) + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=text_encoder_lora_scale, + clip_skip=clip_skip, + ) + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + if do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + # 9. Prepare image embeddings + image_embeds = self.noise_image_embeddings( + image_embeds=image_embeds, + noise_level=noise_level, + generator=generator, + ) + + if do_classifier_free_guidance: + negative_prompt_embeds = torch.zeros_like(image_embeds) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + image_embeds = torch.cat([negative_prompt_embeds, image_embeds]) + + # 10. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # 11. Prepare latent variables + num_channels_latents = self.unet.config.in_channels + shape = ( + batch_size, + num_channels_latents, + int(height) // self.vae_scale_factor, + int(width) // self.vae_scale_factor, + ) + latents = self.prepare_latents( + shape=shape, + dtype=prompt_embeds.dtype, + device=device, + generator=generator, + latents=latents, + scheduler=self.scheduler, + ) + + # 12. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 13. Denoising loop + for i, t in enumerate(self.progress_bar(timesteps)): + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + class_labels=image_embeds, + cross_attention_kwargs=cross_attention_kwargs, + return_dict=False, + )[0] + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] + + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + else: + image = latents + + image = self.image_processor.postprocess(image, output_type=output_type) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return ImagePipelineOutput(images=image) diff --git a/diffusers3/pipelines/stable_diffusion/pipeline_stable_unclip_img2img.py b/diffusers3/pipelines/stable_diffusion/pipeline_stable_unclip_img2img.py new file mode 100644 index 0000000000000000000000000000000000000000..2556d5e57b6de1fa726708c86eaefe4efdc90f37 --- /dev/null +++ b/diffusers3/pipelines/stable_diffusion/pipeline_stable_unclip_img2img.py @@ -0,0 +1,846 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Any, Callable, Dict, List, Optional, Union + +import PIL.Image +import torch +from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection + +from ...image_processor import VaeImageProcessor +from ...loaders import StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin +from ...models import AutoencoderKL, UNet2DConditionModel +from ...models.embeddings import get_timestep_embedding +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import ( + USE_PEFT_BACKEND, + deprecate, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput, StableDiffusionMixin +from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import requests + >>> import torch + >>> from PIL import Image + >>> from io import BytesIO + + >>> from diffusers import StableUnCLIPImg2ImgPipeline + + >>> pipe = StableUnCLIPImg2ImgPipeline.from_pretrained( + ... "stabilityai/stable-diffusion-2-1-unclip-small", torch_dtype=torch.float16 + ... ) + >>> pipe = pipe.to("cuda") + + >>> url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg" + + >>> response = requests.get(url) + >>> init_image = Image.open(BytesIO(response.content)).convert("RGB") + >>> init_image = init_image.resize((768, 512)) + + >>> prompt = "A fantasy landscape, trending on artstation" + + >>> images = pipe(init_image, prompt).images + >>> images[0].save("fantasy_landscape.png") + ``` +""" + + +class StableUnCLIPImg2ImgPipeline( + DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, StableDiffusionLoraLoaderMixin +): + """ + Pipeline for text-guided image-to-image generation using stable unCLIP. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for saving LoRA weights + + Args: + feature_extractor ([`CLIPImageProcessor`]): + Feature extractor for image pre-processing before being encoded. + image_encoder ([`CLIPVisionModelWithProjection`]): + CLIP vision model for encoding images. + image_normalizer ([`StableUnCLIPImageNormalizer`]): + Used to normalize the predicted image embeddings before the noise is applied and un-normalize the image + embeddings after the noise has been applied. + image_noising_scheduler ([`KarrasDiffusionSchedulers`]): + Noise schedule for adding noise to the predicted image embeddings. The amount of noise to add is determined + by the `noise_level`. + tokenizer (`~transformers.CLIPTokenizer`): + A [`~transformers.CLIPTokenizer`)]. + text_encoder ([`~transformers.CLIPTextModel`]): + Frozen [`~transformers.CLIPTextModel`] text-encoder. + unet ([`UNet2DConditionModel`]): + A [`UNet2DConditionModel`] to denoise the encoded image latents. + scheduler ([`KarrasDiffusionSchedulers`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + """ + + model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae" + _exclude_from_cpu_offload = ["image_normalizer"] + + # image encoding components + feature_extractor: CLIPImageProcessor + image_encoder: CLIPVisionModelWithProjection + + # image noising components + image_normalizer: StableUnCLIPImageNormalizer + image_noising_scheduler: KarrasDiffusionSchedulers + + # regular denoising components + tokenizer: CLIPTokenizer + text_encoder: CLIPTextModel + unet: UNet2DConditionModel + scheduler: KarrasDiffusionSchedulers + + vae: AutoencoderKL + + def __init__( + self, + # image encoding components + feature_extractor: CLIPImageProcessor, + image_encoder: CLIPVisionModelWithProjection, + # image noising components + image_normalizer: StableUnCLIPImageNormalizer, + image_noising_scheduler: KarrasDiffusionSchedulers, + # regular denoising components + tokenizer: CLIPTokenizer, + text_encoder: CLIPTextModel, + unet: UNet2DConditionModel, + scheduler: KarrasDiffusionSchedulers, + # vae + vae: AutoencoderKL, + ): + super().__init__() + + self.register_modules( + feature_extractor=feature_extractor, + image_encoder=image_encoder, + image_normalizer=image_normalizer, + image_noising_scheduler=image_noising_scheduler, + tokenizer=tokenizer, + text_encoder=text_encoder, + unet=unet, + scheduler=scheduler, + vae=vae, + ) + + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt + def _encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + **kwargs, + ): + deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." + deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) + + prompt_embeds_tuple = self.encode_prompt( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=lora_scale, + **kwargs, + ) + + # concatenate for backwards comp + prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) + + return prompt_embeds + + def _encode_image( + self, + image, + device, + batch_size, + num_images_per_prompt, + do_classifier_free_guidance, + noise_level, + generator, + image_embeds, + ): + dtype = next(self.image_encoder.parameters()).dtype + + if isinstance(image, PIL.Image.Image): + # the image embedding should repeated so it matches the total batch size of the prompt + repeat_by = batch_size + else: + # assume the image input is already properly batched and just needs to be repeated so + # it matches the num_images_per_prompt. + # + # NOTE(will) this is probably missing a few number of side cases. I.e. batched/non-batched + # `image_embeds`. If those happen to be common use cases, let's think harder about + # what the expected dimensions of inputs should be and how we handle the encoding. + repeat_by = num_images_per_prompt + + if image_embeds is None: + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(images=image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + image_embeds = self.image_encoder(image).image_embeds + + image_embeds = self.noise_image_embeddings( + image_embeds=image_embeds, + noise_level=noise_level, + generator=generator, + ) + + # duplicate image embeddings for each generation per prompt, using mps friendly method + image_embeds = image_embeds.unsqueeze(1) + bs_embed, seq_len, _ = image_embeds.shape + image_embeds = image_embeds.repeat(1, repeat_by, 1) + image_embeds = image_embeds.view(bs_embed * repeat_by, seq_len, -1) + image_embeds = image_embeds.squeeze(1) + + if do_classifier_free_guidance: + negative_prompt_embeds = torch.zeros_like(image_embeds) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + image_embeds = torch.cat([negative_prompt_embeds, image_embeds]) + + return image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if self.text_encoder is not None: + if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return prompt_embeds, negative_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents + def decode_latents(self, latents): + deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" + deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) + + latents = 1 / self.vae.config.scaling_factor * latents + image = self.vae.decode(latents, return_dict=False)[0] + image = (image / 2 + 0.5).clamp(0, 1) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + return image + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + image, + height, + width, + callback_steps, + noise_level, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + image_embeds=None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if (callback_steps is None) or ( + callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Please make sure to define only one of the two." + ) + + if prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + + if prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + "Provide either `negative_prompt` or `negative_prompt_embeds`. Cannot leave both `negative_prompt` and `negative_prompt_embeds` undefined." + ) + + if prompt is not None and negative_prompt is not None: + if type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if noise_level < 0 or noise_level >= self.image_noising_scheduler.config.num_train_timesteps: + raise ValueError( + f"`noise_level` must be between 0 and {self.image_noising_scheduler.config.num_train_timesteps - 1}, inclusive." + ) + + if image is not None and image_embeds is not None: + raise ValueError( + "Provide either `image` or `image_embeds`. Please make sure to define only one of the two." + ) + + if image is None and image_embeds is None: + raise ValueError( + "Provide either `image` or `image_embeds`. Cannot leave both `image` and `image_embeds` undefined." + ) + + if image is not None: + if ( + not isinstance(image, torch.Tensor) + and not isinstance(image, PIL.Image.Image) + and not isinstance(image, list) + ): + raise ValueError( + "`image` has to be of type `torch.Tensor` or `PIL.Image.Image` or `List[PIL.Image.Image]` but is" + f" {type(image)}" + ) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = ( + batch_size, + num_channels_latents, + int(height) // self.vae_scale_factor, + int(width) // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_unclip.StableUnCLIPPipeline.noise_image_embeddings + def noise_image_embeddings( + self, + image_embeds: torch.Tensor, + noise_level: int, + noise: Optional[torch.Tensor] = None, + generator: Optional[torch.Generator] = None, + ): + """ + Add noise to the image embeddings. The amount of noise is controlled by a `noise_level` input. A higher + `noise_level` increases the variance in the final un-noised images. + + The noise is applied in two ways: + 1. A noise schedule is applied directly to the embeddings. + 2. A vector of sinusoidal time embeddings are appended to the output. + + In both cases, the amount of noise is controlled by the same `noise_level`. + + The embeddings are normalized before the noise is applied and un-normalized after the noise is applied. + """ + if noise is None: + noise = randn_tensor( + image_embeds.shape, generator=generator, device=image_embeds.device, dtype=image_embeds.dtype + ) + + noise_level = torch.tensor([noise_level] * image_embeds.shape[0], device=image_embeds.device) + + self.image_normalizer.to(image_embeds.device) + image_embeds = self.image_normalizer.scale(image_embeds) + + image_embeds = self.image_noising_scheduler.add_noise(image_embeds, timesteps=noise_level, noise=noise) + + image_embeds = self.image_normalizer.unscale(image_embeds) + + noise_level = get_timestep_embedding( + timesteps=noise_level, embedding_dim=image_embeds.shape[-1], flip_sin_to_cos=True, downscale_freq_shift=0 + ) + + # `get_timestep_embeddings` does not contain any weights and will always return f32 tensors, + # but we might actually be running in fp16. so we need to cast here. + # there might be better ways to encapsulate this. + noise_level = noise_level.to(image_embeds.dtype) + + image_embeds = torch.cat((image_embeds, noise_level), 1) + + return image_embeds + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + image: Union[torch.Tensor, PIL.Image.Image] = None, + prompt: Union[str, List[str]] = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 20, + guidance_scale: float = 10, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[torch.Generator] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, + callback_steps: int = 1, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + noise_level: int = 0, + image_embeds: Optional[torch.Tensor] = None, + clip_skip: Optional[int] = None, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, either `prompt_embeds` will be + used or prompt is initialized to `""`. + image (`torch.Tensor` or `PIL.Image.Image`): + `Image` or tensor representing an image batch. The image is encoded to its CLIP embedding which the + `unet` is conditioned on. The image is _not_ encoded by the `vae` and then used as the latents in the + denoising process like it is in the standard Stable Diffusion text-guided image variation process. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 20): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 10.0): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + noise_level (`int`, *optional*, defaults to `0`): + The amount of noise to add to the image embeddings. A higher `noise_level` increases the variance in + the final un-noised images. See [`StableUnCLIPPipeline.noise_image_embeddings`] for more details. + image_embeds (`torch.Tensor`, *optional*): + Pre-generated CLIP embeddings to condition the `unet` on. These latents are not used in the denoising + process. If you want to provide pre-generated latents, pass them to `__call__` as `latents`. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + + Examples: + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple`: + [`~ pipeline_utils.ImagePipelineOutput`] if `return_dict` is True, otherwise a `tuple`. When returning + a tuple, the first element is a list with the generated images. + """ + # 0. Default height and width to unet + height = height or self.unet.config.sample_size * self.vae_scale_factor + width = width or self.unet.config.sample_size * self.vae_scale_factor + + if prompt is None and prompt_embeds is None: + prompt = len(image) * [""] if isinstance(image, list) else "" + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt=prompt, + image=image, + height=height, + width=width, + callback_steps=callback_steps, + noise_level=noise_level, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + image_embeds=image_embeds, + ) + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + batch_size = batch_size * num_images_per_prompt + + device = self._execution_device + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Encode input prompt + text_encoder_lora_scale = ( + cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None + ) + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=text_encoder_lora_scale, + ) + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + if do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + # 4. Encoder input image + noise_level = torch.tensor([noise_level], device=device) + image_embeds = self._encode_image( + image=image, + device=device, + batch_size=batch_size, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=do_classifier_free_guidance, + noise_level=noise_level, + generator=generator, + image_embeds=image_embeds, + ) + + # 5. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # 6. Prepare latent variables + num_channels_latents = self.unet.config.in_channels + if latents is None: + latents = self.prepare_latents( + batch_size=batch_size, + num_channels_latents=num_channels_latents, + height=height, + width=width, + dtype=prompt_embeds.dtype, + device=device, + generator=generator, + latents=latents, + ) + + # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 8. Denoising loop + for i, t in enumerate(self.progress_bar(timesteps)): + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + class_labels=image_embeds, + cross_attention_kwargs=cross_attention_kwargs, + return_dict=False, + )[0] + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] + + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + # 9. Post-processing + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + else: + image = latents + + image = self.image_processor.postprocess(image, output_type=output_type) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return ImagePipelineOutput(images=image) diff --git a/diffusers3/pipelines/stable_diffusion/safety_checker.py b/diffusers3/pipelines/stable_diffusion/safety_checker.py new file mode 100644 index 0000000000000000000000000000000000000000..3a0e86409e4a008b8cc3e91c8541aa8ad0fadb5a --- /dev/null +++ b/diffusers3/pipelines/stable_diffusion/safety_checker.py @@ -0,0 +1,126 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import numpy as np +import torch +import torch.nn as nn +from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel + +from ...utils import logging + + +logger = logging.get_logger(__name__) + + +def cosine_distance(image_embeds, text_embeds): + normalized_image_embeds = nn.functional.normalize(image_embeds) + normalized_text_embeds = nn.functional.normalize(text_embeds) + return torch.mm(normalized_image_embeds, normalized_text_embeds.t()) + + +class StableDiffusionSafetyChecker(PreTrainedModel): + config_class = CLIPConfig + main_input_name = "clip_input" + + _no_split_modules = ["CLIPEncoderLayer"] + + def __init__(self, config: CLIPConfig): + super().__init__(config) + + self.vision_model = CLIPVisionModel(config.vision_config) + self.visual_projection = nn.Linear(config.vision_config.hidden_size, config.projection_dim, bias=False) + + self.concept_embeds = nn.Parameter(torch.ones(17, config.projection_dim), requires_grad=False) + self.special_care_embeds = nn.Parameter(torch.ones(3, config.projection_dim), requires_grad=False) + + self.concept_embeds_weights = nn.Parameter(torch.ones(17), requires_grad=False) + self.special_care_embeds_weights = nn.Parameter(torch.ones(3), requires_grad=False) + + @torch.no_grad() + def forward(self, clip_input, images): + pooled_output = self.vision_model(clip_input)[1] # pooled_output + image_embeds = self.visual_projection(pooled_output) + + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + special_cos_dist = cosine_distance(image_embeds, self.special_care_embeds).cpu().float().numpy() + cos_dist = cosine_distance(image_embeds, self.concept_embeds).cpu().float().numpy() + + result = [] + batch_size = image_embeds.shape[0] + for i in range(batch_size): + result_img = {"special_scores": {}, "special_care": [], "concept_scores": {}, "bad_concepts": []} + + # increase this value to create a stronger `nfsw` filter + # at the cost of increasing the possibility of filtering benign images + adjustment = 0.0 + + for concept_idx in range(len(special_cos_dist[0])): + concept_cos = special_cos_dist[i][concept_idx] + concept_threshold = self.special_care_embeds_weights[concept_idx].item() + result_img["special_scores"][concept_idx] = round(concept_cos - concept_threshold + adjustment, 3) + if result_img["special_scores"][concept_idx] > 0: + result_img["special_care"].append({concept_idx, result_img["special_scores"][concept_idx]}) + adjustment = 0.01 + + for concept_idx in range(len(cos_dist[0])): + concept_cos = cos_dist[i][concept_idx] + concept_threshold = self.concept_embeds_weights[concept_idx].item() + result_img["concept_scores"][concept_idx] = round(concept_cos - concept_threshold + adjustment, 3) + if result_img["concept_scores"][concept_idx] > 0: + result_img["bad_concepts"].append(concept_idx) + + result.append(result_img) + + has_nsfw_concepts = [len(res["bad_concepts"]) > 0 for res in result] + + for idx, has_nsfw_concept in enumerate(has_nsfw_concepts): + if has_nsfw_concept: + if torch.is_tensor(images) or torch.is_tensor(images[0]): + images[idx] = torch.zeros_like(images[idx]) # black image + else: + images[idx] = np.zeros(images[idx].shape) # black image + + if any(has_nsfw_concepts): + logger.warning( + "Potential NSFW content was detected in one or more images. A black image will be returned instead." + " Try again with a different prompt and/or seed." + ) + + return images, has_nsfw_concepts + + @torch.no_grad() + def forward_onnx(self, clip_input: torch.Tensor, images: torch.Tensor): + pooled_output = self.vision_model(clip_input)[1] # pooled_output + image_embeds = self.visual_projection(pooled_output) + + special_cos_dist = cosine_distance(image_embeds, self.special_care_embeds) + cos_dist = cosine_distance(image_embeds, self.concept_embeds) + + # increase this value to create a stronger `nsfw` filter + # at the cost of increasing the possibility of filtering benign images + adjustment = 0.0 + + special_scores = special_cos_dist - self.special_care_embeds_weights + adjustment + # special_scores = special_scores.round(decimals=3) + special_care = torch.any(special_scores > 0, dim=1) + special_adjustment = special_care * 0.01 + special_adjustment = special_adjustment.unsqueeze(1).expand(-1, cos_dist.shape[1]) + + concept_scores = (cos_dist - self.concept_embeds_weights) + special_adjustment + # concept_scores = concept_scores.round(decimals=3) + has_nsfw_concepts = torch.any(concept_scores > 0, dim=1) + + images[has_nsfw_concepts] = 0.0 # black image + + return images, has_nsfw_concepts diff --git a/diffusers3/pipelines/stable_diffusion/safety_checker_flax.py b/diffusers3/pipelines/stable_diffusion/safety_checker_flax.py new file mode 100644 index 0000000000000000000000000000000000000000..571a4f2d771015784926bcbf240d1f367b4fc293 --- /dev/null +++ b/diffusers3/pipelines/stable_diffusion/safety_checker_flax.py @@ -0,0 +1,112 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Optional, Tuple + +import jax +import jax.numpy as jnp +from flax import linen as nn +from flax.core.frozen_dict import FrozenDict +from transformers import CLIPConfig, FlaxPreTrainedModel +from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule + + +def jax_cosine_distance(emb_1, emb_2, eps=1e-12): + norm_emb_1 = jnp.divide(emb_1.T, jnp.clip(jnp.linalg.norm(emb_1, axis=1), a_min=eps)).T + norm_emb_2 = jnp.divide(emb_2.T, jnp.clip(jnp.linalg.norm(emb_2, axis=1), a_min=eps)).T + return jnp.matmul(norm_emb_1, norm_emb_2.T) + + +class FlaxStableDiffusionSafetyCheckerModule(nn.Module): + config: CLIPConfig + dtype: jnp.dtype = jnp.float32 + + def setup(self): + self.vision_model = FlaxCLIPVisionModule(self.config.vision_config) + self.visual_projection = nn.Dense(self.config.projection_dim, use_bias=False, dtype=self.dtype) + + self.concept_embeds = self.param("concept_embeds", jax.nn.initializers.ones, (17, self.config.projection_dim)) + self.special_care_embeds = self.param( + "special_care_embeds", jax.nn.initializers.ones, (3, self.config.projection_dim) + ) + + self.concept_embeds_weights = self.param("concept_embeds_weights", jax.nn.initializers.ones, (17,)) + self.special_care_embeds_weights = self.param("special_care_embeds_weights", jax.nn.initializers.ones, (3,)) + + def __call__(self, clip_input): + pooled_output = self.vision_model(clip_input)[1] + image_embeds = self.visual_projection(pooled_output) + + special_cos_dist = jax_cosine_distance(image_embeds, self.special_care_embeds) + cos_dist = jax_cosine_distance(image_embeds, self.concept_embeds) + + # increase this value to create a stronger `nfsw` filter + # at the cost of increasing the possibility of filtering benign image inputs + adjustment = 0.0 + + special_scores = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment + special_scores = jnp.round(special_scores, 3) + is_special_care = jnp.any(special_scores > 0, axis=1, keepdims=True) + # Use a lower threshold if an image has any special care concept + special_adjustment = is_special_care * 0.01 + + concept_scores = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment + concept_scores = jnp.round(concept_scores, 3) + has_nsfw_concepts = jnp.any(concept_scores > 0, axis=1) + + return has_nsfw_concepts + + +class FlaxStableDiffusionSafetyChecker(FlaxPreTrainedModel): + config_class = CLIPConfig + main_input_name = "clip_input" + module_class = FlaxStableDiffusionSafetyCheckerModule + + def __init__( + self, + config: CLIPConfig, + input_shape: Optional[Tuple] = None, + seed: int = 0, + dtype: jnp.dtype = jnp.float32, + _do_init: bool = True, + **kwargs, + ): + if input_shape is None: + input_shape = (1, 224, 224, 3) + module = self.module_class(config=config, dtype=dtype, **kwargs) + super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) + + def init_weights(self, rng: jax.Array, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict: + # init input tensor + clip_input = jax.random.normal(rng, input_shape) + + params_rng, dropout_rng = jax.random.split(rng) + rngs = {"params": params_rng, "dropout": dropout_rng} + + random_params = self.module.init(rngs, clip_input)["params"] + + return random_params + + def __call__( + self, + clip_input, + params: dict = None, + ): + clip_input = jnp.transpose(clip_input, (0, 2, 3, 1)) + + return self.module.apply( + {"params": params or self.params}, + jnp.array(clip_input, dtype=jnp.float32), + rngs={}, + ) diff --git a/diffusers3/pipelines/stable_diffusion/stable_unclip_image_normalizer.py b/diffusers3/pipelines/stable_diffusion/stable_unclip_image_normalizer.py new file mode 100644 index 0000000000000000000000000000000000000000..3fc6b3a3f8b0d1012a13482052adfc61a2682aba --- /dev/null +++ b/diffusers3/pipelines/stable_diffusion/stable_unclip_image_normalizer.py @@ -0,0 +1,57 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Optional, Union + +import torch +from torch import nn + +from ...configuration_utils import ConfigMixin, register_to_config +from ...models.modeling_utils import ModelMixin + + +class StableUnCLIPImageNormalizer(ModelMixin, ConfigMixin): + """ + This class is used to hold the mean and standard deviation of the CLIP embedder used in stable unCLIP. + + It is used to normalize the image embeddings before the noise is applied and un-normalize the noised image + embeddings. + """ + + @register_to_config + def __init__( + self, + embedding_dim: int = 768, + ): + super().__init__() + + self.mean = nn.Parameter(torch.zeros(1, embedding_dim)) + self.std = nn.Parameter(torch.ones(1, embedding_dim)) + + def to( + self, + torch_device: Optional[Union[str, torch.device]] = None, + torch_dtype: Optional[torch.dtype] = None, + ): + self.mean = nn.Parameter(self.mean.to(torch_device).to(torch_dtype)) + self.std = nn.Parameter(self.std.to(torch_device).to(torch_dtype)) + return self + + def scale(self, embeds): + embeds = (embeds - self.mean) * 1.0 / self.std + return embeds + + def unscale(self, embeds): + embeds = (embeds * self.std) + self.mean + return embeds diff --git a/diffusers3/pipelines/stable_diffusion_3/__init__.py b/diffusers3/pipelines/stable_diffusion_3/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b0604589a208d0c5c1270629dcad144395b750cb --- /dev/null +++ b/diffusers3/pipelines/stable_diffusion_3/__init__.py @@ -0,0 +1,54 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_flax_available, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_additional_imports = {} +_import_structure = {"pipeline_output": ["StableDiffusion3PipelineOutput"]} + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["pipeline_stable_diffusion_3"] = ["StableDiffusion3Pipeline"] + _import_structure["pipeline_stable_diffusion_3_img2img"] = ["StableDiffusion3Img2ImgPipeline"] + _import_structure["pipeline_stable_diffusion_3_inpaint"] = ["StableDiffusion3InpaintPipeline"] + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 + else: + from .pipeline_stable_diffusion_3 import StableDiffusion3Pipeline + from .pipeline_stable_diffusion_3_img2img import StableDiffusion3Img2ImgPipeline + from .pipeline_stable_diffusion_3_inpaint import StableDiffusion3InpaintPipeline + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) + for name, value in _additional_imports.items(): + setattr(sys.modules[__name__], name, value) diff --git a/diffusers3/pipelines/stable_diffusion_3/pipeline_output.py b/diffusers3/pipelines/stable_diffusion_3/pipeline_output.py new file mode 100644 index 0000000000000000000000000000000000000000..4655f446102a7f2db93743abdba9acc038270862 --- /dev/null +++ b/diffusers3/pipelines/stable_diffusion_3/pipeline_output.py @@ -0,0 +1,21 @@ +from dataclasses import dataclass +from typing import List, Union + +import numpy as np +import PIL.Image + +from ...utils import BaseOutput + + +@dataclass +class StableDiffusion3PipelineOutput(BaseOutput): + """ + Output class for Stable Diffusion pipelines. + + Args: + images (`List[PIL.Image.Image]` or `np.ndarray`) + List of denoised PIL images of length `batch_size` or numpy array of shape `(batch_size, height, width, + num_channels)`. PIL images or numpy array present the denoised images of the diffusion pipeline. + """ + + images: Union[List[PIL.Image.Image], np.ndarray] diff --git a/diffusers3/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3.py b/diffusers3/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3.py new file mode 100644 index 0000000000000000000000000000000000000000..5a10f329a0af804858ff9410cc1e59d2be38d859 --- /dev/null +++ b/diffusers3/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3.py @@ -0,0 +1,935 @@ +# Copyright 2024 Stability AI and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Any, Callable, Dict, List, Optional, Union + +import torch +from transformers import ( + CLIPTextModelWithProjection, + CLIPTokenizer, + T5EncoderModel, + T5TokenizerFast, +) + +from ...image_processor import VaeImageProcessor +from ...loaders import FromSingleFileMixin, SD3LoraLoaderMixin +from ...models.autoencoders import AutoencoderKL +from ...models.transformers import SD3Transformer2DModel +from ...schedulers import FlowMatchEulerDiscreteScheduler +from ...utils import ( + USE_PEFT_BACKEND, + is_torch_xla_available, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline +from .pipeline_output import StableDiffusion3PipelineOutput + + +if is_torch_xla_available(): + import torch_xla.core.xla_model as xm + + XLA_AVAILABLE = True +else: + XLA_AVAILABLE = False + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import StableDiffusion3Pipeline + + >>> pipe = StableDiffusion3Pipeline.from_pretrained( + ... "stabilityai/stable-diffusion-3-medium-diffusers", torch_dtype=torch.float16 + ... ) + >>> pipe.to("cuda") + >>> prompt = "A cat holding a sign that says hello world" + >>> image = pipe(prompt).images[0] + >>> image.save("sd3.png") + ``` +""" + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + sigmas: Optional[List[float]] = None, + **kwargs, +): + """ + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles + custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` + must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, + `num_inference_steps` and `sigmas` must be `None`. + sigmas (`List[float]`, *optional*): + Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, + `num_inference_steps` and `timesteps` must be `None`. + + Returns: + `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the + second element is the number of inference steps. + """ + if timesteps is not None and sigmas is not None: + raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") + if timesteps is not None: + accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + elif sigmas is not None: + accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accept_sigmas: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" sigmas schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + + +class StableDiffusion3Pipeline(DiffusionPipeline, SD3LoraLoaderMixin, FromSingleFileMixin): + r""" + Args: + transformer ([`SD3Transformer2DModel`]): + Conditional Transformer (MMDiT) architecture to denoise the encoded image latents. + scheduler ([`FlowMatchEulerDiscreteScheduler`]): + A scheduler to be used in combination with `transformer` to denoise the encoded image latents. + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModelWithProjection`]): + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection), + specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant, + with an additional added projection layer that is initialized with a diagonal matrix with the `hidden_size` + as its dimension. + text_encoder_2 ([`CLIPTextModelWithProjection`]): + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection), + specifically the + [laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k) + variant. + text_encoder_3 ([`T5EncoderModel`]): + Frozen text-encoder. Stable Diffusion 3 uses + [T5](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5EncoderModel), specifically the + [t5-v1_1-xxl](https://huggingface.co/google/t5-v1_1-xxl) variant. + tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + tokenizer_2 (`CLIPTokenizer`): + Second Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + tokenizer_3 (`T5TokenizerFast`): + Tokenizer of class + [T5Tokenizer](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5Tokenizer). + """ + + model_cpu_offload_seq = "text_encoder->text_encoder_2->text_encoder_3->transformer->vae" + _optional_components = [] + _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds", "negative_pooled_prompt_embeds"] + + def __init__( + self, + transformer: SD3Transformer2DModel, + scheduler: FlowMatchEulerDiscreteScheduler, + vae: AutoencoderKL, + text_encoder: CLIPTextModelWithProjection, + tokenizer: CLIPTokenizer, + text_encoder_2: CLIPTextModelWithProjection, + tokenizer_2: CLIPTokenizer, + text_encoder_3: T5EncoderModel, + tokenizer_3: T5TokenizerFast, + ): + super().__init__() + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + text_encoder_2=text_encoder_2, + text_encoder_3=text_encoder_3, + tokenizer=tokenizer, + tokenizer_2=tokenizer_2, + tokenizer_3=tokenizer_3, + transformer=transformer, + scheduler=scheduler, + ) + self.vae_scale_factor = ( + 2 ** (len(self.vae.config.block_out_channels) - 1) if hasattr(self, "vae") and self.vae is not None else 8 + ) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + self.tokenizer_max_length = ( + self.tokenizer.model_max_length if hasattr(self, "tokenizer") and self.tokenizer is not None else 77 + ) + self.default_sample_size = ( + self.transformer.config.sample_size + if hasattr(self, "transformer") and self.transformer is not None + else 128 + ) + + def _get_t5_prompt_embeds( + self, + prompt: Union[str, List[str]] = None, + num_images_per_prompt: int = 1, + max_sequence_length: int = 256, + device: Optional[torch.device] = None, + dtype: Optional[torch.dtype] = None, + ): + device = device or self._execution_device + dtype = dtype or self.text_encoder.dtype + + prompt = [prompt] if isinstance(prompt, str) else prompt + batch_size = len(prompt) + + if self.text_encoder_3 is None: + return torch.zeros( + ( + batch_size * num_images_per_prompt, + self.tokenizer_max_length, + self.transformer.config.joint_attention_dim, + ), + device=device, + dtype=dtype, + ) + + text_inputs = self.tokenizer_3( + prompt, + padding="max_length", + max_length=max_sequence_length, + truncation=True, + add_special_tokens=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer_3(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): + removed_text = self.tokenizer_3.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because `max_sequence_length` is set to " + f" {max_sequence_length} tokens: {removed_text}" + ) + + prompt_embeds = self.text_encoder_3(text_input_ids.to(device))[0] + + dtype = self.text_encoder_3.dtype + prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) + + _, seq_len, _ = prompt_embeds.shape + + # duplicate text embeddings and attention mask for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + return prompt_embeds + + def _get_clip_prompt_embeds( + self, + prompt: Union[str, List[str]], + num_images_per_prompt: int = 1, + device: Optional[torch.device] = None, + clip_skip: Optional[int] = None, + clip_model_index: int = 0, + ): + device = device or self._execution_device + + clip_tokenizers = [self.tokenizer, self.tokenizer_2] + clip_text_encoders = [self.text_encoder, self.text_encoder_2] + + tokenizer = clip_tokenizers[clip_model_index] + text_encoder = clip_text_encoders[clip_model_index] + + prompt = [prompt] if isinstance(prompt, str) else prompt + batch_size = len(prompt) + + text_inputs = tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer_max_length, + truncation=True, + return_tensors="pt", + ) + + text_input_ids = text_inputs.input_ids + untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): + removed_text = tokenizer.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer_max_length} tokens: {removed_text}" + ) + prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) + pooled_prompt_embeds = prompt_embeds[0] + + if clip_skip is None: + prompt_embeds = prompt_embeds.hidden_states[-2] + else: + prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)] + + prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) + + _, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt, 1) + pooled_prompt_embeds = pooled_prompt_embeds.view(batch_size * num_images_per_prompt, -1) + + return prompt_embeds, pooled_prompt_embeds + + def encode_prompt( + self, + prompt: Union[str, List[str]], + prompt_2: Union[str, List[str]], + prompt_3: Union[str, List[str]], + device: Optional[torch.device] = None, + num_images_per_prompt: int = 1, + do_classifier_free_guidance: bool = True, + negative_prompt: Optional[Union[str, List[str]]] = None, + negative_prompt_2: Optional[Union[str, List[str]]] = None, + negative_prompt_3: Optional[Union[str, List[str]]] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + clip_skip: Optional[int] = None, + max_sequence_length: int = 256, + lora_scale: Optional[float] = None, + ): + r""" + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in all text-encoders + prompt_3 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_3` and `text_encoder_3`. If not defined, `prompt` is + used in all text-encoders + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in all the text-encoders. + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_3` and + `text_encoder_3`. If not defined, `negative_prompt` is used in both text-encoders + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + lora_scale (`float`, *optional*): + A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + """ + device = device or self._execution_device + + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, SD3LoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if self.text_encoder is not None and USE_PEFT_BACKEND: + scale_lora_layers(self.text_encoder, lora_scale) + if self.text_encoder_2 is not None and USE_PEFT_BACKEND: + scale_lora_layers(self.text_encoder_2, lora_scale) + + prompt = [prompt] if isinstance(prompt, str) else prompt + if prompt is not None: + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + prompt_2 = prompt_2 or prompt + prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 + + prompt_3 = prompt_3 or prompt + prompt_3 = [prompt_3] if isinstance(prompt_3, str) else prompt_3 + + prompt_embed, pooled_prompt_embed = self._get_clip_prompt_embeds( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + clip_skip=clip_skip, + clip_model_index=0, + ) + prompt_2_embed, pooled_prompt_2_embed = self._get_clip_prompt_embeds( + prompt=prompt_2, + device=device, + num_images_per_prompt=num_images_per_prompt, + clip_skip=clip_skip, + clip_model_index=1, + ) + clip_prompt_embeds = torch.cat([prompt_embed, prompt_2_embed], dim=-1) + + t5_prompt_embed = self._get_t5_prompt_embeds( + prompt=prompt_3, + num_images_per_prompt=num_images_per_prompt, + max_sequence_length=max_sequence_length, + device=device, + ) + + clip_prompt_embeds = torch.nn.functional.pad( + clip_prompt_embeds, (0, t5_prompt_embed.shape[-1] - clip_prompt_embeds.shape[-1]) + ) + + prompt_embeds = torch.cat([clip_prompt_embeds, t5_prompt_embed], dim=-2) + pooled_prompt_embeds = torch.cat([pooled_prompt_embed, pooled_prompt_2_embed], dim=-1) + + if do_classifier_free_guidance and negative_prompt_embeds is None: + negative_prompt = negative_prompt or "" + negative_prompt_2 = negative_prompt_2 or negative_prompt + negative_prompt_3 = negative_prompt_3 or negative_prompt + + # normalize str to list + negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt + negative_prompt_2 = ( + batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 + ) + negative_prompt_3 = ( + batch_size * [negative_prompt_3] if isinstance(negative_prompt_3, str) else negative_prompt_3 + ) + + if prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + + negative_prompt_embed, negative_pooled_prompt_embed = self._get_clip_prompt_embeds( + negative_prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + clip_skip=None, + clip_model_index=0, + ) + negative_prompt_2_embed, negative_pooled_prompt_2_embed = self._get_clip_prompt_embeds( + negative_prompt_2, + device=device, + num_images_per_prompt=num_images_per_prompt, + clip_skip=None, + clip_model_index=1, + ) + negative_clip_prompt_embeds = torch.cat([negative_prompt_embed, negative_prompt_2_embed], dim=-1) + + t5_negative_prompt_embed = self._get_t5_prompt_embeds( + prompt=negative_prompt_3, + num_images_per_prompt=num_images_per_prompt, + max_sequence_length=max_sequence_length, + device=device, + ) + + negative_clip_prompt_embeds = torch.nn.functional.pad( + negative_clip_prompt_embeds, + (0, t5_negative_prompt_embed.shape[-1] - negative_clip_prompt_embeds.shape[-1]), + ) + + negative_prompt_embeds = torch.cat([negative_clip_prompt_embeds, t5_negative_prompt_embed], dim=-2) + negative_pooled_prompt_embeds = torch.cat( + [negative_pooled_prompt_embed, negative_pooled_prompt_2_embed], dim=-1 + ) + + if self.text_encoder is not None: + if isinstance(self, SD3LoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if isinstance(self, SD3LoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder_2, lora_scale) + + return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds + + def check_inputs( + self, + prompt, + prompt_2, + prompt_3, + height, + width, + negative_prompt=None, + negative_prompt_2=None, + negative_prompt_3=None, + prompt_embeds=None, + negative_prompt_embeds=None, + pooled_prompt_embeds=None, + negative_pooled_prompt_embeds=None, + callback_on_step_end_tensor_inputs=None, + max_sequence_length=None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt_2 is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt_3 is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt_3`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)): + raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}") + elif prompt_3 is not None and (not isinstance(prompt_3, str) and not isinstance(prompt_3, list)): + raise ValueError(f"`prompt_3` has to be of type `str` or `list` but is {type(prompt_3)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + elif negative_prompt_2 is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + elif negative_prompt_3 is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt_3`: {negative_prompt_3} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if prompt_embeds is not None and pooled_prompt_embeds is None: + raise ValueError( + "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`." + ) + + if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: + raise ValueError( + "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`." + ) + + if max_sequence_length is not None and max_sequence_length > 512: + raise ValueError(f"`max_sequence_length` cannot be greater than 512 but is {max_sequence_length}") + + def prepare_latents( + self, + batch_size, + num_channels_latents, + height, + width, + dtype, + device, + generator, + latents=None, + ): + if latents is not None: + return latents.to(device=device, dtype=dtype) + + shape = ( + batch_size, + num_channels_latents, + int(height) // self.vae_scale_factor, + int(width) // self.vae_scale_factor, + ) + + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + + return latents + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def clip_skip(self): + return self._clip_skip + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 + + @property + def joint_attention_kwargs(self): + return self._joint_attention_kwargs + + @property + def num_timesteps(self): + return self._num_timesteps + + @property + def interrupt(self): + return self._interrupt + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + prompt_2: Optional[Union[str, List[str]]] = None, + prompt_3: Optional[Union[str, List[str]]] = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 28, + timesteps: List[int] = None, + guidance_scale: float = 7.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + negative_prompt_2: Optional[Union[str, List[str]]] = None, + negative_prompt_3: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + joint_attention_kwargs: Optional[Dict[str, Any]] = None, + clip_skip: Optional[int] = None, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + max_sequence_length: int = 256, + ): + r""" + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + will be used instead + prompt_3 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to `tokenizer_3` and `text_encoder_3`. If not defined, `prompt` is + will be used instead + height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The height in pixels of the generated image. This is set to 1024 by default for the best results. + width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The width in pixels of the generated image. This is set to 1024 by default for the best results. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument + in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is + passed will be used. Must be in descending order. + guidance_scale (`float`, *optional*, defaults to 7.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used instead + negative_prompt_3 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_3` and + `text_encoder_3`. If not defined, `negative_prompt` is used instead + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] instead + of a plain tuple. + joint_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + max_sequence_length (`int` defaults to 256): Maximum sequence length to use with the `prompt`. + + Examples: + + Returns: + [`~pipelines.stable_diffusion_3.StableDiffusion3PipelineOutput`] or `tuple`: + [`~pipelines.stable_diffusion_3.StableDiffusion3PipelineOutput`] if `return_dict` is True, otherwise a + `tuple`. When returning a tuple, the first element is a list with the generated images. + """ + + height = height or self.default_sample_size * self.vae_scale_factor + width = width or self.default_sample_size * self.vae_scale_factor + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + prompt_2, + prompt_3, + height, + width, + negative_prompt=negative_prompt, + negative_prompt_2=negative_prompt_2, + negative_prompt_3=negative_prompt_3, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, + callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, + max_sequence_length=max_sequence_length, + ) + + self._guidance_scale = guidance_scale + self._clip_skip = clip_skip + self._joint_attention_kwargs = joint_attention_kwargs + self._interrupt = False + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + lora_scale = ( + self.joint_attention_kwargs.get("scale", None) if self.joint_attention_kwargs is not None else None + ) + ( + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + ) = self.encode_prompt( + prompt=prompt, + prompt_2=prompt_2, + prompt_3=prompt_3, + negative_prompt=negative_prompt, + negative_prompt_2=negative_prompt_2, + negative_prompt_3=negative_prompt_3, + do_classifier_free_guidance=self.do_classifier_free_guidance, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, + device=device, + clip_skip=self.clip_skip, + num_images_per_prompt=num_images_per_prompt, + max_sequence_length=max_sequence_length, + lora_scale=lora_scale, + ) + + if self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) + pooled_prompt_embeds = torch.cat([negative_pooled_prompt_embeds, pooled_prompt_embeds], dim=0) + + # 4. Prepare timesteps + timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps) + num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) + self._num_timesteps = len(timesteps) + + # 5. Prepare latent variables + num_channels_latents = self.transformer.config.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 6. Denoising loop + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + if self.interrupt: + continue + + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents + # broadcast to batch dimension in a way that's compatible with ONNX/Core ML + timestep = t.expand(latent_model_input.shape[0]) + + noise_pred = self.transformer( + hidden_states=latent_model_input, + timestep=timestep, + encoder_hidden_states=prompt_embeds, + pooled_projections=pooled_prompt_embeds, + joint_attention_kwargs=self.joint_attention_kwargs, + return_dict=False, + )[0] + + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents_dtype = latents.dtype + latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0] + + if latents.dtype != latents_dtype: + if torch.backends.mps.is_available(): + # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272 + latents = latents.to(latents_dtype) + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + negative_pooled_prompt_embeds = callback_outputs.pop( + "negative_pooled_prompt_embeds", negative_pooled_prompt_embeds + ) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + + if XLA_AVAILABLE: + xm.mark_step() + + if output_type == "latent": + image = latents + + else: + latents = (latents / self.vae.config.scaling_factor) + self.vae.config.shift_factor + + image = self.vae.decode(latents, return_dict=False)[0] + image = self.image_processor.postprocess(image, output_type=output_type) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return StableDiffusion3PipelineOutput(images=image) diff --git a/diffusers3/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_img2img.py b/diffusers3/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_img2img.py new file mode 100644 index 0000000000000000000000000000000000000000..96d53663b8672ac5a02e9dbd0e0b6bd6151b7c05 --- /dev/null +++ b/diffusers3/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_img2img.py @@ -0,0 +1,967 @@ +# Copyright 2024 Stability AI and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Callable, Dict, List, Optional, Union + +import PIL.Image +import torch +from transformers import ( + CLIPTextModelWithProjection, + CLIPTokenizer, + T5EncoderModel, + T5TokenizerFast, +) + +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import SD3LoraLoaderMixin +from ...models.autoencoders import AutoencoderKL +from ...models.transformers import SD3Transformer2DModel +from ...schedulers import FlowMatchEulerDiscreteScheduler +from ...utils import ( + USE_PEFT_BACKEND, + is_torch_xla_available, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline +from .pipeline_output import StableDiffusion3PipelineOutput + + +if is_torch_xla_available(): + import torch_xla.core.xla_model as xm + + XLA_AVAILABLE = True +else: + XLA_AVAILABLE = False + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + + >>> from diffusers import AutoPipelineForImage2Image + >>> from diffusers.utils import load_image + + >>> device = "cuda" + >>> model_id_or_path = "stabilityai/stable-diffusion-3-medium-diffusers" + >>> pipe = AutoPipelineForImage2Image.from_pretrained(model_id_or_path, torch_dtype=torch.float16) + >>> pipe = pipe.to(device) + + >>> url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg" + >>> init_image = load_image(url).resize((1024, 1024)) + + >>> prompt = "cat wizard, gandalf, lord of the rings, detailed, fantasy, cute, adorable, Pixar, Disney, 8k" + + >>> images = pipe(prompt=prompt, image=init_image, strength=0.95, guidance_scale=7.5).images[0] + ``` +""" + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents +def retrieve_latents( + encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" +): + if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": + return encoder_output.latent_dist.sample(generator) + elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": + return encoder_output.latent_dist.mode() + elif hasattr(encoder_output, "latents"): + return encoder_output.latents + else: + raise AttributeError("Could not access latents of provided encoder_output") + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + sigmas: Optional[List[float]] = None, + **kwargs, +): + """ + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles + custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` + must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, + `num_inference_steps` and `sigmas` must be `None`. + sigmas (`List[float]`, *optional*): + Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, + `num_inference_steps` and `timesteps` must be `None`. + + Returns: + `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the + second element is the number of inference steps. + """ + if timesteps is not None and sigmas is not None: + raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") + if timesteps is not None: + accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + elif sigmas is not None: + accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accept_sigmas: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" sigmas schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + + +class StableDiffusion3Img2ImgPipeline(DiffusionPipeline): + r""" + Args: + transformer ([`SD3Transformer2DModel`]): + Conditional Transformer (MMDiT) architecture to denoise the encoded image latents. + scheduler ([`FlowMatchEulerDiscreteScheduler`]): + A scheduler to be used in combination with `transformer` to denoise the encoded image latents. + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModelWithProjection`]): + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection), + specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant, + with an additional added projection layer that is initialized with a diagonal matrix with the `hidden_size` + as its dimension. + text_encoder_2 ([`CLIPTextModelWithProjection`]): + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection), + specifically the + [laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k) + variant. + text_encoder_3 ([`T5EncoderModel`]): + Frozen text-encoder. Stable Diffusion 3 uses + [T5](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5EncoderModel), specifically the + [t5-v1_1-xxl](https://huggingface.co/google/t5-v1_1-xxl) variant. + tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + tokenizer_2 (`CLIPTokenizer`): + Second Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + tokenizer_3 (`T5TokenizerFast`): + Tokenizer of class + [T5Tokenizer](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5Tokenizer). + """ + + model_cpu_offload_seq = "text_encoder->text_encoder_2->text_encoder_3->transformer->vae" + _optional_components = [] + _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds", "negative_pooled_prompt_embeds"] + + def __init__( + self, + transformer: SD3Transformer2DModel, + scheduler: FlowMatchEulerDiscreteScheduler, + vae: AutoencoderKL, + text_encoder: CLIPTextModelWithProjection, + tokenizer: CLIPTokenizer, + text_encoder_2: CLIPTextModelWithProjection, + tokenizer_2: CLIPTokenizer, + text_encoder_3: T5EncoderModel, + tokenizer_3: T5TokenizerFast, + ): + super().__init__() + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + text_encoder_2=text_encoder_2, + text_encoder_3=text_encoder_3, + tokenizer=tokenizer, + tokenizer_2=tokenizer_2, + tokenizer_3=tokenizer_3, + transformer=transformer, + scheduler=scheduler, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor( + vae_scale_factor=self.vae_scale_factor, vae_latent_channels=self.vae.config.latent_channels + ) + self.tokenizer_max_length = self.tokenizer.model_max_length + self.default_sample_size = self.transformer.config.sample_size + + # Copied from diffusers.pipelines.stable_diffusion_3.pipeline_stable_diffusion_3.StableDiffusion3Pipeline._get_t5_prompt_embeds + def _get_t5_prompt_embeds( + self, + prompt: Union[str, List[str]] = None, + num_images_per_prompt: int = 1, + max_sequence_length: int = 256, + device: Optional[torch.device] = None, + dtype: Optional[torch.dtype] = None, + ): + device = device or self._execution_device + dtype = dtype or self.text_encoder.dtype + + prompt = [prompt] if isinstance(prompt, str) else prompt + batch_size = len(prompt) + + if self.text_encoder_3 is None: + return torch.zeros( + ( + batch_size * num_images_per_prompt, + self.tokenizer_max_length, + self.transformer.config.joint_attention_dim, + ), + device=device, + dtype=dtype, + ) + + text_inputs = self.tokenizer_3( + prompt, + padding="max_length", + max_length=max_sequence_length, + truncation=True, + add_special_tokens=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer_3(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): + removed_text = self.tokenizer_3.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because `max_sequence_length` is set to " + f" {max_sequence_length} tokens: {removed_text}" + ) + + prompt_embeds = self.text_encoder_3(text_input_ids.to(device))[0] + + dtype = self.text_encoder_3.dtype + prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) + + _, seq_len, _ = prompt_embeds.shape + + # duplicate text embeddings and attention mask for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + return prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion_3.pipeline_stable_diffusion_3.StableDiffusion3Pipeline._get_clip_prompt_embeds + def _get_clip_prompt_embeds( + self, + prompt: Union[str, List[str]], + num_images_per_prompt: int = 1, + device: Optional[torch.device] = None, + clip_skip: Optional[int] = None, + clip_model_index: int = 0, + ): + device = device or self._execution_device + + clip_tokenizers = [self.tokenizer, self.tokenizer_2] + clip_text_encoders = [self.text_encoder, self.text_encoder_2] + + tokenizer = clip_tokenizers[clip_model_index] + text_encoder = clip_text_encoders[clip_model_index] + + prompt = [prompt] if isinstance(prompt, str) else prompt + batch_size = len(prompt) + + text_inputs = tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer_max_length, + truncation=True, + return_tensors="pt", + ) + + text_input_ids = text_inputs.input_ids + untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): + removed_text = tokenizer.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer_max_length} tokens: {removed_text}" + ) + prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) + pooled_prompt_embeds = prompt_embeds[0] + + if clip_skip is None: + prompt_embeds = prompt_embeds.hidden_states[-2] + else: + prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)] + + prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) + + _, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt, 1) + pooled_prompt_embeds = pooled_prompt_embeds.view(batch_size * num_images_per_prompt, -1) + + return prompt_embeds, pooled_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion_3.pipeline_stable_diffusion_3.StableDiffusion3Pipeline.encode_prompt + def encode_prompt( + self, + prompt: Union[str, List[str]], + prompt_2: Union[str, List[str]], + prompt_3: Union[str, List[str]], + device: Optional[torch.device] = None, + num_images_per_prompt: int = 1, + do_classifier_free_guidance: bool = True, + negative_prompt: Optional[Union[str, List[str]]] = None, + negative_prompt_2: Optional[Union[str, List[str]]] = None, + negative_prompt_3: Optional[Union[str, List[str]]] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + clip_skip: Optional[int] = None, + max_sequence_length: int = 256, + lora_scale: Optional[float] = None, + ): + r""" + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in all text-encoders + prompt_3 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_3` and `text_encoder_3`. If not defined, `prompt` is + used in all text-encoders + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in all the text-encoders. + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_3` and + `text_encoder_3`. If not defined, `negative_prompt` is used in both text-encoders + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + lora_scale (`float`, *optional*): + A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + """ + device = device or self._execution_device + + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, SD3LoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if self.text_encoder is not None and USE_PEFT_BACKEND: + scale_lora_layers(self.text_encoder, lora_scale) + if self.text_encoder_2 is not None and USE_PEFT_BACKEND: + scale_lora_layers(self.text_encoder_2, lora_scale) + + prompt = [prompt] if isinstance(prompt, str) else prompt + if prompt is not None: + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + prompt_2 = prompt_2 or prompt + prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 + + prompt_3 = prompt_3 or prompt + prompt_3 = [prompt_3] if isinstance(prompt_3, str) else prompt_3 + + prompt_embed, pooled_prompt_embed = self._get_clip_prompt_embeds( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + clip_skip=clip_skip, + clip_model_index=0, + ) + prompt_2_embed, pooled_prompt_2_embed = self._get_clip_prompt_embeds( + prompt=prompt_2, + device=device, + num_images_per_prompt=num_images_per_prompt, + clip_skip=clip_skip, + clip_model_index=1, + ) + clip_prompt_embeds = torch.cat([prompt_embed, prompt_2_embed], dim=-1) + + t5_prompt_embed = self._get_t5_prompt_embeds( + prompt=prompt_3, + num_images_per_prompt=num_images_per_prompt, + max_sequence_length=max_sequence_length, + device=device, + ) + + clip_prompt_embeds = torch.nn.functional.pad( + clip_prompt_embeds, (0, t5_prompt_embed.shape[-1] - clip_prompt_embeds.shape[-1]) + ) + + prompt_embeds = torch.cat([clip_prompt_embeds, t5_prompt_embed], dim=-2) + pooled_prompt_embeds = torch.cat([pooled_prompt_embed, pooled_prompt_2_embed], dim=-1) + + if do_classifier_free_guidance and negative_prompt_embeds is None: + negative_prompt = negative_prompt or "" + negative_prompt_2 = negative_prompt_2 or negative_prompt + negative_prompt_3 = negative_prompt_3 or negative_prompt + + # normalize str to list + negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt + negative_prompt_2 = ( + batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 + ) + negative_prompt_3 = ( + batch_size * [negative_prompt_3] if isinstance(negative_prompt_3, str) else negative_prompt_3 + ) + + if prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + + negative_prompt_embed, negative_pooled_prompt_embed = self._get_clip_prompt_embeds( + negative_prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + clip_skip=None, + clip_model_index=0, + ) + negative_prompt_2_embed, negative_pooled_prompt_2_embed = self._get_clip_prompt_embeds( + negative_prompt_2, + device=device, + num_images_per_prompt=num_images_per_prompt, + clip_skip=None, + clip_model_index=1, + ) + negative_clip_prompt_embeds = torch.cat([negative_prompt_embed, negative_prompt_2_embed], dim=-1) + + t5_negative_prompt_embed = self._get_t5_prompt_embeds( + prompt=negative_prompt_3, + num_images_per_prompt=num_images_per_prompt, + max_sequence_length=max_sequence_length, + device=device, + ) + + negative_clip_prompt_embeds = torch.nn.functional.pad( + negative_clip_prompt_embeds, + (0, t5_negative_prompt_embed.shape[-1] - negative_clip_prompt_embeds.shape[-1]), + ) + + negative_prompt_embeds = torch.cat([negative_clip_prompt_embeds, t5_negative_prompt_embed], dim=-2) + negative_pooled_prompt_embeds = torch.cat( + [negative_pooled_prompt_embed, negative_pooled_prompt_2_embed], dim=-1 + ) + + if self.text_encoder is not None: + if isinstance(self, SD3LoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if isinstance(self, SD3LoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder_2, lora_scale) + + return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds + + def check_inputs( + self, + prompt, + prompt_2, + prompt_3, + strength, + negative_prompt=None, + negative_prompt_2=None, + negative_prompt_3=None, + prompt_embeds=None, + negative_prompt_embeds=None, + pooled_prompt_embeds=None, + negative_pooled_prompt_embeds=None, + callback_on_step_end_tensor_inputs=None, + max_sequence_length=None, + ): + if strength < 0 or strength > 1: + raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}") + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt_2 is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt_3 is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt_3`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)): + raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}") + elif prompt_3 is not None and (not isinstance(prompt_3, str) and not isinstance(prompt_3, list)): + raise ValueError(f"`prompt_3` has to be of type `str` or `list` but is {type(prompt_3)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + elif negative_prompt_2 is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + elif negative_prompt_3 is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt_3`: {negative_prompt_3} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if prompt_embeds is not None and pooled_prompt_embeds is None: + raise ValueError( + "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`." + ) + + if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: + raise ValueError( + "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`." + ) + + if max_sequence_length is not None and max_sequence_length > 512: + raise ValueError(f"`max_sequence_length` cannot be greater than 512 but is {max_sequence_length}") + + def get_timesteps(self, num_inference_steps, strength, device): + # get the original timestep using init_timestep + init_timestep = min(num_inference_steps * strength, num_inference_steps) + + t_start = int(max(num_inference_steps - init_timestep, 0)) + timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] + if hasattr(self.scheduler, "set_begin_index"): + self.scheduler.set_begin_index(t_start * self.scheduler.order) + + return timesteps, num_inference_steps - t_start + + def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None): + if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): + raise ValueError( + f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" + ) + + image = image.to(device=device, dtype=dtype) + + batch_size = batch_size * num_images_per_prompt + if image.shape[1] == self.vae.config.latent_channels: + init_latents = image + + else: + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + elif isinstance(generator, list): + init_latents = [ + retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i]) + for i in range(batch_size) + ] + init_latents = torch.cat(init_latents, dim=0) + else: + init_latents = retrieve_latents(self.vae.encode(image), generator=generator) + + init_latents = (init_latents - self.vae.config.shift_factor) * self.vae.config.scaling_factor + + if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0: + # expand init_latents for batch_size + additional_image_per_prompt = batch_size // init_latents.shape[0] + init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0) + elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0: + raise ValueError( + f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts." + ) + else: + init_latents = torch.cat([init_latents], dim=0) + + shape = init_latents.shape + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + + # get latents + init_latents = self.scheduler.scale_noise(init_latents, timestep, noise) + latents = init_latents.to(device=device, dtype=dtype) + + return latents + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def clip_skip(self): + return self._clip_skip + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 + + @property + def num_timesteps(self): + return self._num_timesteps + + @property + def interrupt(self): + return self._interrupt + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + prompt_2: Optional[Union[str, List[str]]] = None, + prompt_3: Optional[Union[str, List[str]]] = None, + image: PipelineImageInput = None, + strength: float = 0.6, + num_inference_steps: int = 50, + timesteps: List[int] = None, + guidance_scale: float = 7.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + negative_prompt_2: Optional[Union[str, List[str]]] = None, + negative_prompt_3: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + clip_skip: Optional[int] = None, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + max_sequence_length: int = 256, + ): + r""" + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + will be used instead + prompt_3 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to `tokenizer_3` and `text_encoder_3`. If not defined, `prompt` is + will be used instead + height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The height in pixels of the generated image. This is set to 1024 by default for the best results. + width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The width in pixels of the generated image. This is set to 1024 by default for the best results. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument + in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is + passed will be used. Must be in descending order. + guidance_scale (`float`, *optional*, defaults to 7.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used instead + negative_prompt_3 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_3` and + `text_encoder_3`. If not defined, `negative_prompt` is used instead + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] instead + of a plain tuple. + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + max_sequence_length (`int` defaults to 256): Maximum sequence length to use with the `prompt`. + + Examples: + + Returns: + [`~pipelines.stable_diffusion_3.StableDiffusion3PipelineOutput`] or `tuple`: + [`~pipelines.stable_diffusion_3.StableDiffusion3PipelineOutput`] if `return_dict` is True, otherwise a + `tuple`. When returning a tuple, the first element is a list with the generated images. + """ + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + prompt_2, + prompt_3, + strength, + negative_prompt=negative_prompt, + negative_prompt_2=negative_prompt_2, + negative_prompt_3=negative_prompt_3, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, + callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, + max_sequence_length=max_sequence_length, + ) + + self._guidance_scale = guidance_scale + self._clip_skip = clip_skip + self._interrupt = False + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + ( + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + ) = self.encode_prompt( + prompt=prompt, + prompt_2=prompt_2, + prompt_3=prompt_3, + negative_prompt=negative_prompt, + negative_prompt_2=negative_prompt_2, + negative_prompt_3=negative_prompt_3, + do_classifier_free_guidance=self.do_classifier_free_guidance, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, + device=device, + clip_skip=self.clip_skip, + num_images_per_prompt=num_images_per_prompt, + max_sequence_length=max_sequence_length, + ) + + if self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) + pooled_prompt_embeds = torch.cat([negative_pooled_prompt_embeds, pooled_prompt_embeds], dim=0) + + # 3. Preprocess image + image = self.image_processor.preprocess(image) + + # 4. Prepare timesteps + timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps) + timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device) + latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) + + # 5. Prepare latent variables + if latents is None: + latents = self.prepare_latents( + image, + latent_timestep, + batch_size, + num_images_per_prompt, + prompt_embeds.dtype, + device, + generator, + ) + + # 6. Denoising loop + num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) + self._num_timesteps = len(timesteps) + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + if self.interrupt: + continue + + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents + # broadcast to batch dimension in a way that's compatible with ONNX/Core ML + timestep = t.expand(latent_model_input.shape[0]) + + noise_pred = self.transformer( + hidden_states=latent_model_input, + timestep=timestep, + encoder_hidden_states=prompt_embeds, + pooled_projections=pooled_prompt_embeds, + return_dict=False, + )[0] + + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents_dtype = latents.dtype + latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0] + + if latents.dtype != latents_dtype: + if torch.backends.mps.is_available(): + # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272 + latents = latents.to(latents_dtype) + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + negative_pooled_prompt_embeds = callback_outputs.pop( + "negative_pooled_prompt_embeds", negative_pooled_prompt_embeds + ) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + + if XLA_AVAILABLE: + xm.mark_step() + + if output_type == "latent": + image = latents + + else: + latents = (latents / self.vae.config.scaling_factor) + self.vae.config.shift_factor + + image = self.vae.decode(latents, return_dict=False)[0] + image = self.image_processor.postprocess(image, output_type=output_type) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return StableDiffusion3PipelineOutput(images=image) diff --git a/diffusers3/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_inpaint.py b/diffusers3/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_inpaint.py new file mode 100644 index 0000000000000000000000000000000000000000..440b6529c9cafb5b832f7a76e27312366151054c --- /dev/null +++ b/diffusers3/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_inpaint.py @@ -0,0 +1,1201 @@ +# Copyright 2024 Stability AI and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Callable, Dict, List, Optional, Union + +import torch +from transformers import ( + CLIPTextModelWithProjection, + CLIPTokenizer, + T5EncoderModel, + T5TokenizerFast, +) + +from ...callbacks import MultiPipelineCallbacks, PipelineCallback +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import FromSingleFileMixin, SD3LoraLoaderMixin +from ...models.autoencoders import AutoencoderKL +from ...models.transformers import SD3Transformer2DModel +from ...schedulers import FlowMatchEulerDiscreteScheduler +from ...utils import ( + USE_PEFT_BACKEND, + is_torch_xla_available, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline +from .pipeline_output import StableDiffusion3PipelineOutput + + +if is_torch_xla_available(): + import torch_xla.core.xla_model as xm + + XLA_AVAILABLE = True +else: + XLA_AVAILABLE = False + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import StableDiffusion3InpaintPipeline + >>> from diffusers.utils import load_image + + >>> pipe = StableDiffusion3InpaintPipeline.from_pretrained( + ... "stabilityai/stable-diffusion-3-medium-diffusers", torch_dtype=torch.float16 + ... ) + >>> pipe.to("cuda") + >>> prompt = "Face of a yellow cat, high resolution, sitting on a park bench" + >>> img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png" + >>> mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png" + >>> source = load_image(img_url) + >>> mask = load_image(mask_url) + >>> image = pipe(prompt=prompt, image=source, mask_image=mask).images[0] + >>> image.save("sd3_inpainting.png") + ``` +""" + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents +def retrieve_latents( + encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" +): + if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": + return encoder_output.latent_dist.sample(generator) + elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": + return encoder_output.latent_dist.mode() + elif hasattr(encoder_output, "latents"): + return encoder_output.latents + else: + raise AttributeError("Could not access latents of provided encoder_output") + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + sigmas: Optional[List[float]] = None, + **kwargs, +): + """ + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles + custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` + must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, + `num_inference_steps` and `sigmas` must be `None`. + sigmas (`List[float]`, *optional*): + Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, + `num_inference_steps` and `timesteps` must be `None`. + + Returns: + `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the + second element is the number of inference steps. + """ + if timesteps is not None and sigmas is not None: + raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") + if timesteps is not None: + accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + elif sigmas is not None: + accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accept_sigmas: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" sigmas schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + + +class StableDiffusion3InpaintPipeline(DiffusionPipeline, SD3LoraLoaderMixin, FromSingleFileMixin): + r""" + Args: + transformer ([`SD3Transformer2DModel`]): + Conditional Transformer (MMDiT) architecture to denoise the encoded image latents. + scheduler ([`FlowMatchEulerDiscreteScheduler`]): + A scheduler to be used in combination with `transformer` to denoise the encoded image latents. + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModelWithProjection`]): + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection), + specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant, + with an additional added projection layer that is initialized with a diagonal matrix with the `hidden_size` + as its dimension. + text_encoder_2 ([`CLIPTextModelWithProjection`]): + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection), + specifically the + [laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k) + variant. + text_encoder_3 ([`T5EncoderModel`]): + Frozen text-encoder. Stable Diffusion 3 uses + [T5](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5EncoderModel), specifically the + [t5-v1_1-xxl](https://huggingface.co/google/t5-v1_1-xxl) variant. + tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + tokenizer_2 (`CLIPTokenizer`): + Second Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + tokenizer_3 (`T5TokenizerFast`): + Tokenizer of class + [T5Tokenizer](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5Tokenizer). + """ + + model_cpu_offload_seq = "text_encoder->text_encoder_2->text_encoder_3->transformer->vae" + _optional_components = [] + _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds", "negative_pooled_prompt_embeds"] + + def __init__( + self, + transformer: SD3Transformer2DModel, + scheduler: FlowMatchEulerDiscreteScheduler, + vae: AutoencoderKL, + text_encoder: CLIPTextModelWithProjection, + tokenizer: CLIPTokenizer, + text_encoder_2: CLIPTextModelWithProjection, + tokenizer_2: CLIPTokenizer, + text_encoder_3: T5EncoderModel, + tokenizer_3: T5TokenizerFast, + ): + super().__init__() + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + text_encoder_2=text_encoder_2, + text_encoder_3=text_encoder_3, + tokenizer=tokenizer, + tokenizer_2=tokenizer_2, + tokenizer_3=tokenizer_3, + transformer=transformer, + scheduler=scheduler, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor( + vae_scale_factor=self.vae_scale_factor, vae_latent_channels=self.vae.config.latent_channels + ) + self.mask_processor = VaeImageProcessor( + vae_scale_factor=self.vae_scale_factor, + vae_latent_channels=self.vae.config.latent_channels, + do_normalize=False, + do_binarize=True, + do_convert_grayscale=True, + ) + self.tokenizer_max_length = self.tokenizer.model_max_length + self.default_sample_size = self.transformer.config.sample_size + + # Copied from diffusers.pipelines.stable_diffusion_3.pipeline_stable_diffusion_3.StableDiffusion3Pipeline._get_t5_prompt_embeds + def _get_t5_prompt_embeds( + self, + prompt: Union[str, List[str]] = None, + num_images_per_prompt: int = 1, + max_sequence_length: int = 256, + device: Optional[torch.device] = None, + dtype: Optional[torch.dtype] = None, + ): + device = device or self._execution_device + dtype = dtype or self.text_encoder.dtype + + prompt = [prompt] if isinstance(prompt, str) else prompt + batch_size = len(prompt) + + if self.text_encoder_3 is None: + return torch.zeros( + ( + batch_size * num_images_per_prompt, + self.tokenizer_max_length, + self.transformer.config.joint_attention_dim, + ), + device=device, + dtype=dtype, + ) + + text_inputs = self.tokenizer_3( + prompt, + padding="max_length", + max_length=max_sequence_length, + truncation=True, + add_special_tokens=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer_3(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): + removed_text = self.tokenizer_3.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because `max_sequence_length` is set to " + f" {max_sequence_length} tokens: {removed_text}" + ) + + prompt_embeds = self.text_encoder_3(text_input_ids.to(device))[0] + + dtype = self.text_encoder_3.dtype + prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) + + _, seq_len, _ = prompt_embeds.shape + + # duplicate text embeddings and attention mask for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + return prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion_3.pipeline_stable_diffusion_3.StableDiffusion3Pipeline._get_clip_prompt_embeds + def _get_clip_prompt_embeds( + self, + prompt: Union[str, List[str]], + num_images_per_prompt: int = 1, + device: Optional[torch.device] = None, + clip_skip: Optional[int] = None, + clip_model_index: int = 0, + ): + device = device or self._execution_device + + clip_tokenizers = [self.tokenizer, self.tokenizer_2] + clip_text_encoders = [self.text_encoder, self.text_encoder_2] + + tokenizer = clip_tokenizers[clip_model_index] + text_encoder = clip_text_encoders[clip_model_index] + + prompt = [prompt] if isinstance(prompt, str) else prompt + batch_size = len(prompt) + + text_inputs = tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer_max_length, + truncation=True, + return_tensors="pt", + ) + + text_input_ids = text_inputs.input_ids + untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): + removed_text = tokenizer.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer_max_length} tokens: {removed_text}" + ) + prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) + pooled_prompt_embeds = prompt_embeds[0] + + if clip_skip is None: + prompt_embeds = prompt_embeds.hidden_states[-2] + else: + prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)] + + prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) + + _, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt, 1) + pooled_prompt_embeds = pooled_prompt_embeds.view(batch_size * num_images_per_prompt, -1) + + return prompt_embeds, pooled_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion_3.pipeline_stable_diffusion_3.StableDiffusion3Pipeline.encode_prompt + def encode_prompt( + self, + prompt: Union[str, List[str]], + prompt_2: Union[str, List[str]], + prompt_3: Union[str, List[str]], + device: Optional[torch.device] = None, + num_images_per_prompt: int = 1, + do_classifier_free_guidance: bool = True, + negative_prompt: Optional[Union[str, List[str]]] = None, + negative_prompt_2: Optional[Union[str, List[str]]] = None, + negative_prompt_3: Optional[Union[str, List[str]]] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + clip_skip: Optional[int] = None, + max_sequence_length: int = 256, + lora_scale: Optional[float] = None, + ): + r""" + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in all text-encoders + prompt_3 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_3` and `text_encoder_3`. If not defined, `prompt` is + used in all text-encoders + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in all the text-encoders. + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_3` and + `text_encoder_3`. If not defined, `negative_prompt` is used in both text-encoders + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + lora_scale (`float`, *optional*): + A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + """ + device = device or self._execution_device + + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, SD3LoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if self.text_encoder is not None and USE_PEFT_BACKEND: + scale_lora_layers(self.text_encoder, lora_scale) + if self.text_encoder_2 is not None and USE_PEFT_BACKEND: + scale_lora_layers(self.text_encoder_2, lora_scale) + + prompt = [prompt] if isinstance(prompt, str) else prompt + if prompt is not None: + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + prompt_2 = prompt_2 or prompt + prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 + + prompt_3 = prompt_3 or prompt + prompt_3 = [prompt_3] if isinstance(prompt_3, str) else prompt_3 + + prompt_embed, pooled_prompt_embed = self._get_clip_prompt_embeds( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + clip_skip=clip_skip, + clip_model_index=0, + ) + prompt_2_embed, pooled_prompt_2_embed = self._get_clip_prompt_embeds( + prompt=prompt_2, + device=device, + num_images_per_prompt=num_images_per_prompt, + clip_skip=clip_skip, + clip_model_index=1, + ) + clip_prompt_embeds = torch.cat([prompt_embed, prompt_2_embed], dim=-1) + + t5_prompt_embed = self._get_t5_prompt_embeds( + prompt=prompt_3, + num_images_per_prompt=num_images_per_prompt, + max_sequence_length=max_sequence_length, + device=device, + ) + + clip_prompt_embeds = torch.nn.functional.pad( + clip_prompt_embeds, (0, t5_prompt_embed.shape[-1] - clip_prompt_embeds.shape[-1]) + ) + + prompt_embeds = torch.cat([clip_prompt_embeds, t5_prompt_embed], dim=-2) + pooled_prompt_embeds = torch.cat([pooled_prompt_embed, pooled_prompt_2_embed], dim=-1) + + if do_classifier_free_guidance and negative_prompt_embeds is None: + negative_prompt = negative_prompt or "" + negative_prompt_2 = negative_prompt_2 or negative_prompt + negative_prompt_3 = negative_prompt_3 or negative_prompt + + # normalize str to list + negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt + negative_prompt_2 = ( + batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 + ) + negative_prompt_3 = ( + batch_size * [negative_prompt_3] if isinstance(negative_prompt_3, str) else negative_prompt_3 + ) + + if prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + + negative_prompt_embed, negative_pooled_prompt_embed = self._get_clip_prompt_embeds( + negative_prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + clip_skip=None, + clip_model_index=0, + ) + negative_prompt_2_embed, negative_pooled_prompt_2_embed = self._get_clip_prompt_embeds( + negative_prompt_2, + device=device, + num_images_per_prompt=num_images_per_prompt, + clip_skip=None, + clip_model_index=1, + ) + negative_clip_prompt_embeds = torch.cat([negative_prompt_embed, negative_prompt_2_embed], dim=-1) + + t5_negative_prompt_embed = self._get_t5_prompt_embeds( + prompt=negative_prompt_3, + num_images_per_prompt=num_images_per_prompt, + max_sequence_length=max_sequence_length, + device=device, + ) + + negative_clip_prompt_embeds = torch.nn.functional.pad( + negative_clip_prompt_embeds, + (0, t5_negative_prompt_embed.shape[-1] - negative_clip_prompt_embeds.shape[-1]), + ) + + negative_prompt_embeds = torch.cat([negative_clip_prompt_embeds, t5_negative_prompt_embed], dim=-2) + negative_pooled_prompt_embeds = torch.cat( + [negative_pooled_prompt_embed, negative_pooled_prompt_2_embed], dim=-1 + ) + + if self.text_encoder is not None: + if isinstance(self, SD3LoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if isinstance(self, SD3LoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder_2, lora_scale) + + return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion_3.pipeline_stable_diffusion_3_img2img.StableDiffusion3Img2ImgPipeline.check_inputs + def check_inputs( + self, + prompt, + prompt_2, + prompt_3, + strength, + negative_prompt=None, + negative_prompt_2=None, + negative_prompt_3=None, + prompt_embeds=None, + negative_prompt_embeds=None, + pooled_prompt_embeds=None, + negative_pooled_prompt_embeds=None, + callback_on_step_end_tensor_inputs=None, + max_sequence_length=None, + ): + if strength < 0 or strength > 1: + raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}") + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt_2 is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt_3 is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt_3`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)): + raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}") + elif prompt_3 is not None and (not isinstance(prompt_3, str) and not isinstance(prompt_3, list)): + raise ValueError(f"`prompt_3` has to be of type `str` or `list` but is {type(prompt_3)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + elif negative_prompt_2 is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + elif negative_prompt_3 is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt_3`: {negative_prompt_3} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if prompt_embeds is not None and pooled_prompt_embeds is None: + raise ValueError( + "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`." + ) + + if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: + raise ValueError( + "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`." + ) + + if max_sequence_length is not None and max_sequence_length > 512: + raise ValueError(f"`max_sequence_length` cannot be greater than 512 but is {max_sequence_length}") + + # Copied from diffusers.pipelines.stable_diffusion_3.pipeline_stable_diffusion_3_img2img.StableDiffusion3Img2ImgPipeline.get_timesteps + def get_timesteps(self, num_inference_steps, strength, device): + # get the original timestep using init_timestep + init_timestep = min(num_inference_steps * strength, num_inference_steps) + + t_start = int(max(num_inference_steps - init_timestep, 0)) + timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] + if hasattr(self.scheduler, "set_begin_index"): + self.scheduler.set_begin_index(t_start * self.scheduler.order) + + return timesteps, num_inference_steps - t_start + + def prepare_latents( + self, + batch_size, + num_channels_latents, + height, + width, + dtype, + device, + generator, + latents=None, + image=None, + timestep=None, + is_strength_max=True, + return_noise=False, + return_image_latents=False, + ): + shape = ( + batch_size, + num_channels_latents, + int(height) // self.vae_scale_factor, + int(width) // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if (image is None or timestep is None) and not is_strength_max: + raise ValueError( + "Since strength < 1. initial latents are to be initialised as a combination of Image + Noise." + "However, either the image or the noise timestep has not been provided." + ) + + if return_image_latents or (latents is None and not is_strength_max): + image = image.to(device=device, dtype=dtype) + + if image.shape[1] == 16: + image_latents = image + else: + image_latents = self._encode_vae_image(image=image, generator=generator) + image_latents = image_latents.repeat(batch_size // image_latents.shape[0], 1, 1, 1) + + if latents is None: + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + # if strength is 1. then initialise the latents to noise, else initial to image + noise + latents = noise if is_strength_max else self.scheduler.scale_noise(image_latents, timestep, noise) + else: + noise = latents.to(device) + latents = noise + + outputs = (latents,) + + if return_noise: + outputs += (noise,) + + if return_image_latents: + outputs += (image_latents,) + + return outputs + + def _encode_vae_image(self, image: torch.Tensor, generator: torch.Generator): + if isinstance(generator, list): + image_latents = [ + retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i]) + for i in range(image.shape[0]) + ] + image_latents = torch.cat(image_latents, dim=0) + else: + image_latents = retrieve_latents(self.vae.encode(image), generator=generator) + + image_latents = (image_latents - self.vae.config.shift_factor) * self.vae.config.scaling_factor + + return image_latents + + def prepare_mask_latents( + self, + mask, + masked_image, + batch_size, + num_images_per_prompt, + height, + width, + dtype, + device, + generator, + do_classifier_free_guidance, + ): + # resize the mask to latents shape as we concatenate the mask to the latents + # we do that before converting to dtype to avoid breaking in case we're using cpu_offload + # and half precision + mask = torch.nn.functional.interpolate( + mask, size=(height // self.vae_scale_factor, width // self.vae_scale_factor) + ) + mask = mask.to(device=device, dtype=dtype) + + batch_size = batch_size * num_images_per_prompt + + masked_image = masked_image.to(device=device, dtype=dtype) + + if masked_image.shape[1] == 16: + masked_image_latents = masked_image + else: + masked_image_latents = retrieve_latents(self.vae.encode(masked_image), generator=generator) + + masked_image_latents = (masked_image_latents - self.vae.config.shift_factor) * self.vae.config.scaling_factor + + # duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method + if mask.shape[0] < batch_size: + if not batch_size % mask.shape[0] == 0: + raise ValueError( + "The passed mask and the required batch size don't match. Masks are supposed to be duplicated to" + f" a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number" + " of masks that you pass is divisible by the total requested batch size." + ) + mask = mask.repeat(batch_size // mask.shape[0], 1, 1, 1) + if masked_image_latents.shape[0] < batch_size: + if not batch_size % masked_image_latents.shape[0] == 0: + raise ValueError( + "The passed images and the required batch size don't match. Images are supposed to be duplicated" + f" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed." + " Make sure the number of images that you pass is divisible by the total requested batch size." + ) + masked_image_latents = masked_image_latents.repeat(batch_size // masked_image_latents.shape[0], 1, 1, 1) + + mask = torch.cat([mask] * 2) if do_classifier_free_guidance else mask + masked_image_latents = ( + torch.cat([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents + ) + + # aligning device to prevent device errors when concating it with the latent model input + masked_image_latents = masked_image_latents.to(device=device, dtype=dtype) + return mask, masked_image_latents + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def clip_skip(self): + return self._clip_skip + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 + + @property + def num_timesteps(self): + return self._num_timesteps + + @property + def interrupt(self): + return self._interrupt + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + prompt_2: Optional[Union[str, List[str]]] = None, + prompt_3: Optional[Union[str, List[str]]] = None, + image: PipelineImageInput = None, + mask_image: PipelineImageInput = None, + masked_image_latents: PipelineImageInput = None, + height: int = None, + width: int = None, + padding_mask_crop: Optional[int] = None, + strength: float = 0.6, + num_inference_steps: int = 50, + timesteps: List[int] = None, + guidance_scale: float = 7.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + negative_prompt_2: Optional[Union[str, List[str]]] = None, + negative_prompt_3: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + pooled_prompt_embeds: Optional[torch.Tensor] = None, + negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + clip_skip: Optional[int] = None, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + max_sequence_length: int = 256, + ): + r""" + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + will be used instead + prompt_3 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to `tokenizer_3` and `text_encoder_3`. If not defined, `prompt` is + will be used instead + image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): + `Image`, numpy array or tensor representing an image batch to be used as the starting point. For both + numpy array and pytorch tensor, the expected value range is between `[0, 1]` If it's a tensor or a list + or tensors, the expected shape should be `(B, C, H, W)` or `(C, H, W)`. If it is a numpy array or a + list of arrays, the expected shape should be `(B, H, W, C)` or `(H, W, C)` It can also accept image + latents as `image`, but if passing latents directly it is not encoded again. + mask_image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): + `Image`, numpy array or tensor representing an image batch to mask `image`. White pixels in the mask + are repainted while black pixels are preserved. If `mask_image` is a PIL image, it is converted to a + single channel (luminance) before use. If it's a numpy array or pytorch tensor, it should contain one + color channel (L) instead of 3, so the expected shape for pytorch tensor would be `(B, 1, H, W)`, `(B, + H, W)`, `(1, H, W)`, `(H, W)`. And for numpy array would be for `(B, H, W, 1)`, `(B, H, W)`, `(H, W, + 1)`, or `(H, W)`. + mask_image_latent (`torch.Tensor`, `List[torch.Tensor]`): + `Tensor` representing an image batch to mask `image` generated by VAE. If not provided, the mask + latents tensor will ge generated by `mask_image`. + height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The height in pixels of the generated image. This is set to 1024 by default for the best results. + width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The width in pixels of the generated image. This is set to 1024 by default for the best results. + padding_mask_crop (`int`, *optional*, defaults to `None`): + The size of margin in the crop to be applied to the image and masking. If `None`, no crop is applied to + image and mask_image. If `padding_mask_crop` is not `None`, it will first find a rectangular region + with the same aspect ration of the image and contains all masked area, and then expand that area based + on `padding_mask_crop`. The image and mask_image will then be cropped based on the expanded area before + resizing to the original image size for inpainting. This is useful when the masked area is small while + the image is large and contain information irrelevant for inpainting, such as background. + strength (`float`, *optional*, defaults to 1.0): + Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a + starting point and more noise is added the higher the `strength`. The number of denoising steps depends + on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising + process runs for the full number of iterations specified in `num_inference_steps`. A value of 1 + essentially ignores `image`. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument + in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is + passed will be used. Must be in descending order. + guidance_scale (`float`, *optional*, defaults to 7.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used instead + negative_prompt_3 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_3` and + `text_encoder_3`. If not defined, `negative_prompt` is used instead + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] instead + of a plain tuple. + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + max_sequence_length (`int` defaults to 256): Maximum sequence length to use with the `prompt`. + + Examples: + + Returns: + [`~pipelines.stable_diffusion_3.StableDiffusion3PipelineOutput`] or `tuple`: + [`~pipelines.stable_diffusion_3.StableDiffusion3PipelineOutput`] if `return_dict` is True, otherwise a + `tuple`. When returning a tuple, the first element is a list with the generated images. + """ + + if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): + callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs + + height = height or self.transformer.config.sample_size * self.vae_scale_factor + width = width or self.transformer.config.sample_size * self.vae_scale_factor + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + prompt_2, + prompt_3, + strength, + negative_prompt=negative_prompt, + negative_prompt_2=negative_prompt_2, + negative_prompt_3=negative_prompt_3, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, + callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, + max_sequence_length=max_sequence_length, + ) + + self._guidance_scale = guidance_scale + self._clip_skip = clip_skip + self._interrupt = False + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + ( + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + ) = self.encode_prompt( + prompt=prompt, + prompt_2=prompt_2, + prompt_3=prompt_3, + negative_prompt=negative_prompt, + negative_prompt_2=negative_prompt_2, + negative_prompt_3=negative_prompt_3, + do_classifier_free_guidance=self.do_classifier_free_guidance, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, + device=device, + clip_skip=self.clip_skip, + num_images_per_prompt=num_images_per_prompt, + max_sequence_length=max_sequence_length, + ) + + if self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) + pooled_prompt_embeds = torch.cat([negative_pooled_prompt_embeds, pooled_prompt_embeds], dim=0) + + # 3. Prepare timesteps + timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps) + timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device) + # check that number of inference steps is not < 1 - as this doesn't make sense + if num_inference_steps < 1: + raise ValueError( + f"After adjusting the num_inference_steps by strength parameter: {strength}, the number of pipeline" + f"steps is {num_inference_steps} which is < 1 and not appropriate for this pipeline." + ) + latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) + + # create a boolean to check if the strength is set to 1. if so then initialise the latents with pure noise + is_strength_max = strength == 1.0 + + # 4. Preprocess mask and image + if padding_mask_crop is not None: + crops_coords = self.mask_processor.get_crop_region(mask_image, width, height, pad=padding_mask_crop) + resize_mode = "fill" + else: + crops_coords = None + resize_mode = "default" + + original_image = image + init_image = self.image_processor.preprocess( + image, height=height, width=width, crops_coords=crops_coords, resize_mode=resize_mode + ) + init_image = init_image.to(dtype=torch.float32) + + # 5. Prepare latent variables + num_channels_latents = self.vae.config.latent_channels + num_channels_transformer = self.transformer.config.in_channels + return_image_latents = num_channels_transformer == 16 + + latents_outputs = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + image=init_image, + timestep=latent_timestep, + is_strength_max=is_strength_max, + return_noise=True, + return_image_latents=return_image_latents, + ) + + if return_image_latents: + latents, noise, image_latents = latents_outputs + else: + latents, noise = latents_outputs + + # 6. Prepare mask latent variables + mask_condition = self.mask_processor.preprocess( + mask_image, height=height, width=width, resize_mode=resize_mode, crops_coords=crops_coords + ) + + if masked_image_latents is None: + masked_image = init_image * (mask_condition < 0.5) + else: + masked_image = masked_image_latents + + mask, masked_image_latents = self.prepare_mask_latents( + mask_condition, + masked_image, + batch_size, + num_images_per_prompt, + height, + width, + prompt_embeds.dtype, + device, + generator, + self.do_classifier_free_guidance, + ) + + # match the inpainting pipeline and will be updated with input + mask inpainting model later + if num_channels_transformer == 33: + # default case for runwayml/stable-diffusion-inpainting + num_channels_mask = mask.shape[1] + num_channels_masked_image = masked_image_latents.shape[1] + if ( + num_channels_latents + num_channels_mask + num_channels_masked_image + != self.transformer.config.in_channels + ): + raise ValueError( + f"Incorrect configuration settings! The config of `pipeline.transformer`: {self.transformer.config} expects" + f" {self.transformer.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +" + f" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}" + f" = {num_channels_latents+num_channels_masked_image+num_channels_mask}. Please verify the config of" + " `pipeline.transformer` or your `mask_image` or `image` input." + ) + elif num_channels_transformer != 16: + raise ValueError( + f"The transformer {self.transformer.__class__} should have 16 input channels or 33 input channels, not {self.transformer.config.in_channels}." + ) + + # 7. Denoising loop + num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) + self._num_timesteps = len(timesteps) + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + if self.interrupt: + continue + + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents + # broadcast to batch dimension in a way that's compatible with ONNX/Core ML + timestep = t.expand(latent_model_input.shape[0]) + + if num_channels_transformer == 33: + latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1) + + noise_pred = self.transformer( + hidden_states=latent_model_input, + timestep=timestep, + encoder_hidden_states=prompt_embeds, + pooled_projections=pooled_prompt_embeds, + return_dict=False, + )[0] + + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents_dtype = latents.dtype + latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0] + if num_channels_transformer == 16: + init_latents_proper = image_latents + if self.do_classifier_free_guidance: + init_mask, _ = mask.chunk(2) + else: + init_mask = mask + + if i < len(timesteps) - 1: + noise_timestep = timesteps[i + 1] + init_latents_proper = self.scheduler.scale_noise( + init_latents_proper, torch.tensor([noise_timestep]), noise + ) + + latents = (1 - init_mask) * init_latents_proper + init_mask * latents + + if latents.dtype != latents_dtype: + if torch.backends.mps.is_available(): + # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272 + latents = latents.to(latents_dtype) + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + negative_pooled_prompt_embeds = callback_outputs.pop( + "negative_pooled_prompt_embeds", negative_pooled_prompt_embeds + ) + mask = callback_outputs.pop("mask", mask) + masked_image_latents = callback_outputs.pop("masked_image_latents", masked_image_latents) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + + if XLA_AVAILABLE: + xm.mark_step() + + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[ + 0 + ] + else: + image = latents + + do_denormalize = [True] * image.shape[0] + + image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) + + if padding_mask_crop is not None: + image = [self.image_processor.apply_overlay(mask_image, original_image, i, crops_coords) for i in image] + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return StableDiffusion3PipelineOutput(images=image) diff --git a/diffusers3/pipelines/stable_diffusion_attend_and_excite/__init__.py b/diffusers3/pipelines/stable_diffusion_attend_and_excite/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..cce556fceb2379be482d383e380a09836d25ce3b --- /dev/null +++ b/diffusers3/pipelines/stable_diffusion_attend_and_excite/__init__.py @@ -0,0 +1,48 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["pipeline_stable_diffusion_attend_and_excite"] = ["StableDiffusionAttendAndExcitePipeline"] + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * + else: + from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/diffusers3/pipelines/stable_diffusion_attend_and_excite/pipeline_stable_diffusion_attend_and_excite.py b/diffusers3/pipelines/stable_diffusion_attend_and_excite/pipeline_stable_diffusion_attend_and_excite.py new file mode 100644 index 0000000000000000000000000000000000000000..8f40fa72a25c85b6f71398dfe1e5055b0b34d400 --- /dev/null +++ b/diffusers3/pipelines/stable_diffusion_attend_and_excite/pipeline_stable_diffusion_attend_and_excite.py @@ -0,0 +1,1097 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import math +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import numpy as np +import torch +from torch.nn import functional as F +from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer + +from ...image_processor import VaeImageProcessor +from ...loaders import StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin +from ...models import AutoencoderKL, UNet2DConditionModel +from ...models.attention_processor import Attention +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import ( + USE_PEFT_BACKEND, + deprecate, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from ..stable_diffusion import StableDiffusionPipelineOutput +from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker + + +logger = logging.get_logger(__name__) + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import StableDiffusionAttendAndExcitePipeline + + >>> pipe = StableDiffusionAttendAndExcitePipeline.from_pretrained( + ... "CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16 + ... ).to("cuda") + + + >>> prompt = "a cat and a frog" + + >>> # use get_indices function to find out indices of the tokens you want to alter + >>> pipe.get_indices(prompt) + {0: '<|startoftext|>', 1: 'a', 2: 'cat', 3: 'and', 4: 'a', 5: 'frog', 6: '<|endoftext|>'} + + >>> token_indices = [2, 5] + >>> seed = 6141 + >>> generator = torch.Generator("cuda").manual_seed(seed) + + >>> images = pipe( + ... prompt=prompt, + ... token_indices=token_indices, + ... guidance_scale=7.5, + ... generator=generator, + ... num_inference_steps=50, + ... max_iter_to_alter=25, + ... ).images + + >>> image = images[0] + >>> image.save(f"../images/{prompt}_{seed}.png") + ``` +""" + + +class AttentionStore: + @staticmethod + def get_empty_store(): + return {"down": [], "mid": [], "up": []} + + def __call__(self, attn, is_cross: bool, place_in_unet: str): + if self.cur_att_layer >= 0 and is_cross: + if attn.shape[1] == np.prod(self.attn_res): + self.step_store[place_in_unet].append(attn) + + self.cur_att_layer += 1 + if self.cur_att_layer == self.num_att_layers: + self.cur_att_layer = 0 + self.between_steps() + + def between_steps(self): + self.attention_store = self.step_store + self.step_store = self.get_empty_store() + + def get_average_attention(self): + average_attention = self.attention_store + return average_attention + + def aggregate_attention(self, from_where: List[str]) -> torch.Tensor: + """Aggregates the attention across the different layers and heads at the specified resolution.""" + out = [] + attention_maps = self.get_average_attention() + for location in from_where: + for item in attention_maps[location]: + cross_maps = item.reshape(-1, self.attn_res[0], self.attn_res[1], item.shape[-1]) + out.append(cross_maps) + out = torch.cat(out, dim=0) + out = out.sum(0) / out.shape[0] + return out + + def reset(self): + self.cur_att_layer = 0 + self.step_store = self.get_empty_store() + self.attention_store = {} + + def __init__(self, attn_res): + """ + Initialize an empty AttentionStore :param step_index: used to visualize only a specific step in the diffusion + process + """ + self.num_att_layers = -1 + self.cur_att_layer = 0 + self.step_store = self.get_empty_store() + self.attention_store = {} + self.curr_step_index = 0 + self.attn_res = attn_res + + +class AttendExciteAttnProcessor: + def __init__(self, attnstore, place_in_unet): + super().__init__() + self.attnstore = attnstore + self.place_in_unet = place_in_unet + + def __call__(self, attn: Attention, hidden_states, encoder_hidden_states=None, attention_mask=None): + batch_size, sequence_length, _ = hidden_states.shape + attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) + + query = attn.to_q(hidden_states) + + is_cross = encoder_hidden_states is not None + encoder_hidden_states = encoder_hidden_states if encoder_hidden_states is not None else hidden_states + key = attn.to_k(encoder_hidden_states) + value = attn.to_v(encoder_hidden_states) + + query = attn.head_to_batch_dim(query) + key = attn.head_to_batch_dim(key) + value = attn.head_to_batch_dim(value) + + attention_probs = attn.get_attention_scores(query, key, attention_mask) + + # only need to store attention maps during the Attend and Excite process + if attention_probs.requires_grad: + self.attnstore(attention_probs, is_cross, self.place_in_unet) + + hidden_states = torch.bmm(attention_probs, value) + hidden_states = attn.batch_to_head_dim(hidden_states) + + # linear proj + hidden_states = attn.to_out[0](hidden_states) + # dropout + hidden_states = attn.to_out[1](hidden_states) + + return hidden_states + + +class StableDiffusionAttendAndExcitePipeline(DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin): + r""" + Pipeline for text-to-image generation using Stable Diffusion and Attend-and-Excite. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details + about a model's potential harms. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. + """ + + model_cpu_offload_seq = "text_encoder->unet->vae" + _optional_components = ["safety_checker", "feature_extractor"] + _exclude_from_cpu_offload = ["safety_checker"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: UNet2DConditionModel, + scheduler: KarrasDiffusionSchedulers, + safety_checker: StableDiffusionSafetyChecker, + feature_extractor: CLIPImageProcessor, + requires_safety_checker: bool = True, + ): + super().__init__() + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt + def _encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + **kwargs, + ): + deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." + deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) + + prompt_embeds_tuple = self.encode_prompt( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=lora_scale, + **kwargs, + ) + + # concatenate for backwards comp + prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) + + return prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if self.text_encoder is not None: + if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return prompt_embeds, negative_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is None: + has_nsfw_concept = None + else: + if torch.is_tensor(image): + feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") + else: + feature_extractor_input = self.image_processor.numpy_to_pil(image) + safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) + image, has_nsfw_concept = self.safety_checker( + images=image, clip_input=safety_checker_input.pixel_values.to(dtype) + ) + return image, has_nsfw_concept + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents + def decode_latents(self, latents): + deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" + deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) + + latents = 1 / self.vae.config.scaling_factor * latents + image = self.vae.decode(latents, return_dict=False)[0] + image = (image / 2 + 0.5).clamp(0, 1) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + return image + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + indices, + height, + width, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if (callback_steps is None) or ( + callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + indices_is_list_ints = isinstance(indices, list) and isinstance(indices[0], int) + indices_is_list_list_ints = ( + isinstance(indices, list) and isinstance(indices[0], list) and isinstance(indices[0][0], int) + ) + + if not indices_is_list_ints and not indices_is_list_list_ints: + raise TypeError("`indices` must be a list of ints or a list of a list of ints") + + if indices_is_list_ints: + indices_batch_size = 1 + elif indices_is_list_list_ints: + indices_batch_size = len(indices) + + if prompt is not None and isinstance(prompt, str): + prompt_batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + prompt_batch_size = len(prompt) + elif prompt_embeds is not None: + prompt_batch_size = prompt_embeds.shape[0] + + if indices_batch_size != prompt_batch_size: + raise ValueError( + f"indices batch size must be same as prompt batch size. indices batch size: {indices_batch_size}, prompt batch size: {prompt_batch_size}" + ) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = ( + batch_size, + num_channels_latents, + int(height) // self.vae_scale_factor, + int(width) // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + @staticmethod + def _compute_max_attention_per_index( + attention_maps: torch.Tensor, + indices: List[int], + ) -> List[torch.Tensor]: + """Computes the maximum attention value for each of the tokens we wish to alter.""" + attention_for_text = attention_maps[:, :, 1:-1] + attention_for_text *= 100 + attention_for_text = torch.nn.functional.softmax(attention_for_text, dim=-1) + + # Shift indices since we removed the first token + indices = [index - 1 for index in indices] + + # Extract the maximum values + max_indices_list = [] + for i in indices: + image = attention_for_text[:, :, i] + smoothing = GaussianSmoothing().to(attention_maps.device) + input = F.pad(image.unsqueeze(0).unsqueeze(0), (1, 1, 1, 1), mode="reflect") + image = smoothing(input).squeeze(0).squeeze(0) + max_indices_list.append(image.max()) + return max_indices_list + + def _aggregate_and_get_max_attention_per_token( + self, + indices: List[int], + ): + """Aggregates the attention for each token and computes the max activation value for each token to alter.""" + attention_maps = self.attention_store.aggregate_attention( + from_where=("up", "down", "mid"), + ) + max_attention_per_index = self._compute_max_attention_per_index( + attention_maps=attention_maps, + indices=indices, + ) + return max_attention_per_index + + @staticmethod + def _compute_loss(max_attention_per_index: List[torch.Tensor]) -> torch.Tensor: + """Computes the attend-and-excite loss using the maximum attention value for each token.""" + losses = [max(0, 1.0 - curr_max) for curr_max in max_attention_per_index] + loss = max(losses) + return loss + + @staticmethod + def _update_latent(latents: torch.Tensor, loss: torch.Tensor, step_size: float) -> torch.Tensor: + """Update the latent according to the computed loss.""" + grad_cond = torch.autograd.grad(loss.requires_grad_(True), [latents], retain_graph=True)[0] + latents = latents - step_size * grad_cond + return latents + + def _perform_iterative_refinement_step( + self, + latents: torch.Tensor, + indices: List[int], + loss: torch.Tensor, + threshold: float, + text_embeddings: torch.Tensor, + step_size: float, + t: int, + max_refinement_steps: int = 20, + ): + """ + Performs the iterative latent refinement introduced in the paper. Here, we continuously update the latent code + according to our loss objective until the given threshold is reached for all tokens. + """ + iteration = 0 + target_loss = max(0, 1.0 - threshold) + while loss > target_loss: + iteration += 1 + + latents = latents.clone().detach().requires_grad_(True) + self.unet(latents, t, encoder_hidden_states=text_embeddings).sample + self.unet.zero_grad() + + # Get max activation value for each subject token + max_attention_per_index = self._aggregate_and_get_max_attention_per_token( + indices=indices, + ) + + loss = self._compute_loss(max_attention_per_index) + + if loss != 0: + latents = self._update_latent(latents, loss, step_size) + + logger.info(f"\t Try {iteration}. loss: {loss}") + + if iteration >= max_refinement_steps: + logger.info(f"\t Exceeded max number of iterations ({max_refinement_steps})! ") + break + + # Run one more time but don't compute gradients and update the latents. + # We just need to compute the new loss - the grad update will occur below + latents = latents.clone().detach().requires_grad_(True) + _ = self.unet(latents, t, encoder_hidden_states=text_embeddings).sample + self.unet.zero_grad() + + # Get max activation value for each subject token + max_attention_per_index = self._aggregate_and_get_max_attention_per_token( + indices=indices, + ) + loss = self._compute_loss(max_attention_per_index) + logger.info(f"\t Finished with loss of: {loss}") + return loss, latents, max_attention_per_index + + def register_attention_control(self): + attn_procs = {} + cross_att_count = 0 + for name in self.unet.attn_processors.keys(): + if name.startswith("mid_block"): + place_in_unet = "mid" + elif name.startswith("up_blocks"): + place_in_unet = "up" + elif name.startswith("down_blocks"): + place_in_unet = "down" + else: + continue + + cross_att_count += 1 + attn_procs[name] = AttendExciteAttnProcessor(attnstore=self.attention_store, place_in_unet=place_in_unet) + + self.unet.set_attn_processor(attn_procs) + self.attention_store.num_att_layers = cross_att_count + + def get_indices(self, prompt: str) -> Dict[str, int]: + """Utility function to list the indices of the tokens you wish to alte""" + ids = self.tokenizer(prompt).input_ids + indices = {i: tok for tok, i in zip(self.tokenizer.convert_ids_to_tokens(ids), range(len(ids)))} + return indices + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]], + token_indices: Union[List[int], List[List[int]]], + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + guidance_scale: float = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: int = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, + callback_steps: int = 1, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + max_iter_to_alter: int = 25, + thresholds: dict = {0: 0.05, 10: 0.5, 20: 0.8}, + scale_factor: int = 20, + attn_res: Optional[Tuple[int]] = (16, 16), + clip_skip: Optional[int] = None, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + token_indices (`List[int]`): + The token indices to alter with attend-and-excite. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + max_iter_to_alter (`int`, *optional*, defaults to `25`): + Number of denoising steps to apply attend-and-excite. The `max_iter_to_alter` denoising steps are when + attend-and-excite is applied. For example, if `max_iter_to_alter` is `25` and there are a total of `30` + denoising steps, the first `25` denoising steps applies attend-and-excite and the last `5` will not. + thresholds (`dict`, *optional*, defaults to `{0: 0.05, 10: 0.5, 20: 0.8}`): + Dictionary defining the iterations and desired thresholds to apply iterative latent refinement in. + scale_factor (`int`, *optional*, default to 20): + Scale factor to control the step size of each attend-and-excite update. + attn_res (`tuple`, *optional*, default computed from width and height): + The 2D resolution of the semantic attention map. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + + Examples: + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images and the + second element is a list of `bool`s indicating whether the corresponding generated image contains + "not-safe-for-work" (nsfw) content. + """ + + # 0. Default height and width to unet + height = height or self.unet.config.sample_size * self.vae_scale_factor + width = width or self.unet.config.sample_size * self.vae_scale_factor + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + token_indices, + height, + width, + callback_steps, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + ) + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Encode input prompt + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + clip_skip=clip_skip, + ) + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + if do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + # 4. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # 5. Prepare latent variables + num_channels_latents = self.unet.config.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + if attn_res is None: + attn_res = int(np.ceil(width / 32)), int(np.ceil(height / 32)) + self.attention_store = AttentionStore(attn_res) + original_attn_proc = self.unet.attn_processors + self.register_attention_control() + + # default config for step size from original repo + scale_range = np.linspace(1.0, 0.5, len(self.scheduler.timesteps)) + step_size = scale_factor * np.sqrt(scale_range) + + text_embeddings = ( + prompt_embeds[batch_size * num_images_per_prompt :] if do_classifier_free_guidance else prompt_embeds + ) + + if isinstance(token_indices[0], int): + token_indices = [token_indices] + + indices = [] + + for ind in token_indices: + indices = indices + [ind] * num_images_per_prompt + + # 7. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # Attend and excite process + with torch.enable_grad(): + latents = latents.clone().detach().requires_grad_(True) + updated_latents = [] + for latent, index, text_embedding in zip(latents, indices, text_embeddings): + # Forward pass of denoising with text conditioning + latent = latent.unsqueeze(0) + text_embedding = text_embedding.unsqueeze(0) + + self.unet( + latent, + t, + encoder_hidden_states=text_embedding, + cross_attention_kwargs=cross_attention_kwargs, + ).sample + self.unet.zero_grad() + + # Get max activation value for each subject token + max_attention_per_index = self._aggregate_and_get_max_attention_per_token( + indices=index, + ) + + loss = self._compute_loss(max_attention_per_index=max_attention_per_index) + + # If this is an iterative refinement step, verify we have reached the desired threshold for all + if i in thresholds.keys() and loss > 1.0 - thresholds[i]: + loss, latent, max_attention_per_index = self._perform_iterative_refinement_step( + latents=latent, + indices=index, + loss=loss, + threshold=thresholds[i], + text_embeddings=text_embedding, + step_size=step_size[i], + t=t, + ) + + # Perform gradient update + if i < max_iter_to_alter: + if loss != 0: + latent = self._update_latent( + latents=latent, + loss=loss, + step_size=step_size[i], + ) + logger.info(f"Iteration {i} | Loss: {loss:0.4f}") + + updated_latents.append(latent) + + latents = torch.cat(updated_latents, dim=0) + + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs=cross_attention_kwargs, + ).sample + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + # 8. Post-processing + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) + else: + image = latents + has_nsfw_concept = None + + if has_nsfw_concept is None: + do_denormalize = [True] * image.shape[0] + else: + do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] + + image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) + self.maybe_free_model_hooks() + # make sure to set the original attention processors back + self.unet.set_attn_processor(original_attn_proc) + + if not return_dict: + return (image, has_nsfw_concept) + + return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) + + +class GaussianSmoothing(torch.nn.Module): + """ + Arguments: + Apply gaussian smoothing on a 1d, 2d or 3d tensor. Filtering is performed seperately for each channel in the input + using a depthwise convolution. + channels (int, sequence): Number of channels of the input tensors. Output will + have this number of channels as well. + kernel_size (int, sequence): Size of the gaussian kernel. sigma (float, sequence): Standard deviation of the + gaussian kernel. dim (int, optional): The number of dimensions of the data. + Default value is 2 (spatial). + """ + + # channels=1, kernel_size=kernel_size, sigma=sigma, dim=2 + def __init__( + self, + channels: int = 1, + kernel_size: int = 3, + sigma: float = 0.5, + dim: int = 2, + ): + super().__init__() + + if isinstance(kernel_size, int): + kernel_size = [kernel_size] * dim + if isinstance(sigma, float): + sigma = [sigma] * dim + + # The gaussian kernel is the product of the + # gaussian function of each dimension. + kernel = 1 + meshgrids = torch.meshgrid([torch.arange(size, dtype=torch.float32) for size in kernel_size]) + for size, std, mgrid in zip(kernel_size, sigma, meshgrids): + mean = (size - 1) / 2 + kernel *= 1 / (std * math.sqrt(2 * math.pi)) * torch.exp(-(((mgrid - mean) / (2 * std)) ** 2)) + + # Make sure sum of values in gaussian kernel equals 1. + kernel = kernel / torch.sum(kernel) + + # Reshape to depthwise convolutional weight + kernel = kernel.view(1, 1, *kernel.size()) + kernel = kernel.repeat(channels, *[1] * (kernel.dim() - 1)) + + self.register_buffer("weight", kernel) + self.groups = channels + + if dim == 1: + self.conv = F.conv1d + elif dim == 2: + self.conv = F.conv2d + elif dim == 3: + self.conv = F.conv3d + else: + raise RuntimeError("Only 1, 2 and 3 dimensions are supported. Received {}.".format(dim)) + + def forward(self, input): + """ + Arguments: + Apply gaussian filter to input. + input (torch.Tensor): Input to apply gaussian filter on. + Returns: + filtered (torch.Tensor): Filtered output. + """ + return self.conv(input, weight=self.weight.to(input.dtype), groups=self.groups) diff --git a/diffusers3/pipelines/stable_diffusion_diffedit/__init__.py b/diffusers3/pipelines/stable_diffusion_diffedit/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e2145edb96c6be124abf9e9a21b9a5e8a3f3d641 --- /dev/null +++ b/diffusers3/pipelines/stable_diffusion_diffedit/__init__.py @@ -0,0 +1,48 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["pipeline_stable_diffusion_diffedit"] = ["StableDiffusionDiffEditPipeline"] + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * + else: + from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/diffusers3/pipelines/stable_diffusion_diffedit/pipeline_stable_diffusion_diffedit.py b/diffusers3/pipelines/stable_diffusion_diffedit/pipeline_stable_diffusion_diffedit.py new file mode 100644 index 0000000000000000000000000000000000000000..2b86470dbff1ac943d314e713e3d23eba2fdd011 --- /dev/null +++ b/diffusers3/pipelines/stable_diffusion_diffedit/pipeline_stable_diffusion_diffedit.py @@ -0,0 +1,1531 @@ +# Copyright 2024 DiffEdit Authors and Pix2Pix Zero Authors and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from dataclasses import dataclass +from typing import Any, Callable, Dict, List, Optional, Union + +import numpy as np +import PIL.Image +import torch +from packaging import version +from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer + +from ...configuration_utils import FrozenDict +from ...image_processor import VaeImageProcessor +from ...loaders import StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin +from ...models import AutoencoderKL, UNet2DConditionModel +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import DDIMInverseScheduler, KarrasDiffusionSchedulers +from ...utils import ( + PIL_INTERPOLATION, + USE_PEFT_BACKEND, + BaseOutput, + deprecate, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from ..stable_diffusion import StableDiffusionPipelineOutput +from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +@dataclass +class DiffEditInversionPipelineOutput(BaseOutput): + """ + Output class for Stable Diffusion pipelines. + + Args: + latents (`torch.Tensor`) + inverted latents tensor + images (`List[PIL.Image.Image]` or `np.ndarray`) + List of denoised PIL images of length `num_timesteps * batch_size` or numpy array of shape `(num_timesteps, + batch_size, height, width, num_channels)`. PIL images or numpy array present the denoised images of the + diffusion pipeline. + """ + + latents: torch.Tensor + images: Union[List[PIL.Image.Image], np.ndarray] + + +EXAMPLE_DOC_STRING = """ + + ```py + >>> import PIL + >>> import requests + >>> import torch + >>> from io import BytesIO + + >>> from diffusers import StableDiffusionDiffEditPipeline + + + >>> def download_image(url): + ... response = requests.get(url) + ... return PIL.Image.open(BytesIO(response.content)).convert("RGB") + + + >>> img_url = "https://github.com/Xiang-cd/DiffEdit-stable-diffusion/raw/main/assets/origin.png" + + >>> init_image = download_image(img_url).resize((768, 768)) + + >>> pipeline = StableDiffusionDiffEditPipeline.from_pretrained( + ... "stabilityai/stable-diffusion-2-1", torch_dtype=torch.float16 + ... ) + + >>> pipeline.scheduler = DDIMScheduler.from_config(pipeline.scheduler.config) + >>> pipeline.inverse_scheduler = DDIMInverseScheduler.from_config(pipeline.scheduler.config) + >>> pipeline.enable_model_cpu_offload() + + >>> mask_prompt = "A bowl of fruits" + >>> prompt = "A bowl of pears" + + >>> mask_image = pipeline.generate_mask(image=init_image, source_prompt=prompt, target_prompt=mask_prompt) + >>> image_latents = pipeline.invert(image=init_image, prompt=mask_prompt).latents + >>> image = pipeline(prompt=prompt, mask_image=mask_image, image_latents=image_latents).images[0] + ``` +""" + +EXAMPLE_INVERT_DOC_STRING = """ + ```py + >>> import PIL + >>> import requests + >>> import torch + >>> from io import BytesIO + + >>> from diffusers import StableDiffusionDiffEditPipeline + + + >>> def download_image(url): + ... response = requests.get(url) + ... return PIL.Image.open(BytesIO(response.content)).convert("RGB") + + + >>> img_url = "https://github.com/Xiang-cd/DiffEdit-stable-diffusion/raw/main/assets/origin.png" + + >>> init_image = download_image(img_url).resize((768, 768)) + + >>> pipeline = StableDiffusionDiffEditPipeline.from_pretrained( + ... "stabilityai/stable-diffusion-2-1", torch_dtype=torch.float16 + ... ) + + >>> pipeline.scheduler = DDIMScheduler.from_config(pipeline.scheduler.config) + >>> pipeline.inverse_scheduler = DDIMInverseScheduler.from_config(pipeline.scheduler.config) + >>> pipeline.enable_model_cpu_offload() + + >>> prompt = "A bowl of fruits" + + >>> inverted_latents = pipeline.invert(image=init_image, prompt=prompt).latents + ``` +""" + + +def auto_corr_loss(hidden_states, generator=None): + reg_loss = 0.0 + for i in range(hidden_states.shape[0]): + for j in range(hidden_states.shape[1]): + noise = hidden_states[i : i + 1, j : j + 1, :, :] + while True: + roll_amount = torch.randint(noise.shape[2] // 2, (1,), generator=generator).item() + reg_loss += (noise * torch.roll(noise, shifts=roll_amount, dims=2)).mean() ** 2 + reg_loss += (noise * torch.roll(noise, shifts=roll_amount, dims=3)).mean() ** 2 + + if noise.shape[2] <= 8: + break + noise = torch.nn.functional.avg_pool2d(noise, kernel_size=2) + return reg_loss + + +def kl_divergence(hidden_states): + return hidden_states.var() + hidden_states.mean() ** 2 - 1 - torch.log(hidden_states.var() + 1e-7) + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.preprocess +def preprocess(image): + deprecation_message = "The preprocess method is deprecated and will be removed in diffusers 1.0.0. Please use VaeImageProcessor.preprocess(...) instead" + deprecate("preprocess", "1.0.0", deprecation_message, standard_warn=False) + if isinstance(image, torch.Tensor): + return image + elif isinstance(image, PIL.Image.Image): + image = [image] + + if isinstance(image[0], PIL.Image.Image): + w, h = image[0].size + w, h = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8 + + image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image] + image = np.concatenate(image, axis=0) + image = np.array(image).astype(np.float32) / 255.0 + image = image.transpose(0, 3, 1, 2) + image = 2.0 * image - 1.0 + image = torch.from_numpy(image) + elif isinstance(image[0], torch.Tensor): + image = torch.cat(image, dim=0) + return image + + +def preprocess_mask(mask, batch_size: int = 1): + if not isinstance(mask, torch.Tensor): + # preprocess mask + if isinstance(mask, (PIL.Image.Image, np.ndarray)): + mask = [mask] + + if isinstance(mask, list): + if isinstance(mask[0], PIL.Image.Image): + mask = [np.array(m.convert("L")).astype(np.float32) / 255.0 for m in mask] + if isinstance(mask[0], np.ndarray): + mask = np.stack(mask, axis=0) if mask[0].ndim < 3 else np.concatenate(mask, axis=0) + mask = torch.from_numpy(mask) + elif isinstance(mask[0], torch.Tensor): + mask = torch.stack(mask, dim=0) if mask[0].ndim < 3 else torch.cat(mask, dim=0) + + # Batch and add channel dim for single mask + if mask.ndim == 2: + mask = mask.unsqueeze(0).unsqueeze(0) + + # Batch single mask or add channel dim + if mask.ndim == 3: + # Single batched mask, no channel dim or single mask not batched but channel dim + if mask.shape[0] == 1: + mask = mask.unsqueeze(0) + + # Batched masks no channel dim + else: + mask = mask.unsqueeze(1) + + # Check mask shape + if batch_size > 1: + if mask.shape[0] == 1: + mask = torch.cat([mask] * batch_size) + elif mask.shape[0] > 1 and mask.shape[0] != batch_size: + raise ValueError( + f"`mask_image` with batch size {mask.shape[0]} cannot be broadcasted to batch size {batch_size} " + f"inferred by prompt inputs" + ) + + if mask.shape[1] != 1: + raise ValueError(f"`mask_image` must have 1 channel, but has {mask.shape[1]} channels") + + # Check mask is in [0, 1] + if mask.min() < 0 or mask.max() > 1: + raise ValueError("`mask_image` should be in [0, 1] range") + + # Binarize mask + mask[mask < 0.5] = 0 + mask[mask >= 0.5] = 1 + + return mask + + +class StableDiffusionDiffEditPipeline( + DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, StableDiffusionLoraLoaderMixin +): + r""" + + + This is an experimental feature! + + + + Pipeline for text-guided image inpainting using Stable Diffusion and DiffEdit. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading and saving methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for saving LoRA weights + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. + inverse_scheduler ([`DDIMInverseScheduler`]): + A scheduler to be used in combination with `unet` to fill in the unmasked part of the input latents. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details + about a model's potential harms. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. + """ + + model_cpu_offload_seq = "text_encoder->unet->vae" + _optional_components = ["safety_checker", "feature_extractor", "inverse_scheduler"] + _exclude_from_cpu_offload = ["safety_checker"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: UNet2DConditionModel, + scheduler: KarrasDiffusionSchedulers, + safety_checker: StableDiffusionSafetyChecker, + feature_extractor: CLIPImageProcessor, + inverse_scheduler: DDIMInverseScheduler, + requires_safety_checker: bool = True, + ): + super().__init__() + + if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: + deprecation_message = ( + f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" + f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " + "to update the config accordingly as leaving `steps_offset` might led to incorrect results" + " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," + " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" + " file" + ) + deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(scheduler.config) + new_config["steps_offset"] = 1 + scheduler._internal_dict = FrozenDict(new_config) + + if hasattr(scheduler.config, "skip_prk_steps") and scheduler.config.skip_prk_steps is False: + deprecation_message = ( + f"The configuration file of this scheduler: {scheduler} has not set the configuration" + " `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make" + " sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to" + " incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face" + " Hub, it would be very nice if you could open a Pull request for the" + " `scheduler/scheduler_config.json` file" + ) + deprecate("skip_prk_steps not set", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(scheduler.config) + new_config["skip_prk_steps"] = True + scheduler._internal_dict = FrozenDict(new_config) + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( + version.parse(unet.config._diffusers_version).base_version + ) < version.parse("0.9.0.dev0") + is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 + if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: + deprecation_message = ( + "The configuration file of the unet has set the default `sample_size` to smaller than" + " 64 which seems highly unlikely .If you're checkpoint is a fine-tuned version of any of the" + " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" + " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" + " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" + " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" + " in the config might lead to incorrect results in future versions. If you have downloaded this" + " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" + " the `unet/config.json` file" + ) + deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(unet.config) + new_config["sample_size"] = 64 + unet._internal_dict = FrozenDict(new_config) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + inverse_scheduler=inverse_scheduler, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt + def _encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + **kwargs, + ): + deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." + deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) + + prompt_embeds_tuple = self.encode_prompt( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=lora_scale, + **kwargs, + ) + + # concatenate for backwards comp + prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) + + return prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if self.text_encoder is not None: + if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return prompt_embeds, negative_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is None: + has_nsfw_concept = None + else: + if torch.is_tensor(image): + feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") + else: + feature_extractor_input = self.image_processor.numpy_to_pil(image) + safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) + image, has_nsfw_concept = self.safety_checker( + images=image, clip_input=safety_checker_input.pixel_values.to(dtype) + ) + return image, has_nsfw_concept + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents + def decode_latents(self, latents): + deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" + deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) + + latents = 1 / self.vae.config.scaling_factor * latents + image = self.vae.decode(latents, return_dict=False)[0] + image = (image / 2 + 0.5).clamp(0, 1) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + return image + + def check_inputs( + self, + prompt, + strength, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + ): + if (strength is None) or (strength is not None and (strength < 0 or strength > 1)): + raise ValueError( + f"The value of `strength` should in [0.0, 1.0] but is, but is {strength} of type {type(strength)}." + ) + + if (callback_steps is None) or ( + callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + def check_source_inputs( + self, + source_prompt=None, + source_negative_prompt=None, + source_prompt_embeds=None, + source_negative_prompt_embeds=None, + ): + if source_prompt is not None and source_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `source_prompt`: {source_prompt} and `source_prompt_embeds`: {source_prompt_embeds}." + " Please make sure to only forward one of the two." + ) + elif source_prompt is None and source_prompt_embeds is None: + raise ValueError( + "Provide either `source_image` or `source_prompt_embeds`. Cannot leave all both of the arguments undefined." + ) + elif source_prompt is not None and ( + not isinstance(source_prompt, str) and not isinstance(source_prompt, list) + ): + raise ValueError(f"`source_prompt` has to be of type `str` or `list` but is {type(source_prompt)}") + + if source_negative_prompt is not None and source_negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `source_negative_prompt`: {source_negative_prompt} and `source_negative_prompt_embeds`:" + f" {source_negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if source_prompt_embeds is not None and source_negative_prompt_embeds is not None: + if source_prompt_embeds.shape != source_negative_prompt_embeds.shape: + raise ValueError( + "`source_prompt_embeds` and `source_negative_prompt_embeds` must have the same shape when passed" + f" directly, but got: `source_prompt_embeds` {source_prompt_embeds.shape} !=" + f" `source_negative_prompt_embeds` {source_negative_prompt_embeds.shape}." + ) + + def get_timesteps(self, num_inference_steps, strength, device): + # get the original timestep using init_timestep + init_timestep = min(int(num_inference_steps * strength), num_inference_steps) + + t_start = max(num_inference_steps - init_timestep, 0) + timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] + + return timesteps, num_inference_steps - t_start + + def get_inverse_timesteps(self, num_inference_steps, strength, device): + # get the original timestep using init_timestep + init_timestep = min(int(num_inference_steps * strength), num_inference_steps) + + t_start = max(num_inference_steps - init_timestep, 0) + + # safety for t_start overflow to prevent empty timsteps slice + if t_start == 0: + return self.inverse_scheduler.timesteps, num_inference_steps + timesteps = self.inverse_scheduler.timesteps[:-t_start] + + return timesteps, num_inference_steps - t_start + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = ( + batch_size, + num_channels_latents, + int(height) // self.vae_scale_factor, + int(width) // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + def prepare_image_latents(self, image, batch_size, dtype, device, generator=None): + if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): + raise ValueError( + f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" + ) + + image = image.to(device=device, dtype=dtype) + + if image.shape[1] == 4: + latents = image + + else: + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if isinstance(generator, list): + latents = [ + self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(batch_size) + ] + latents = torch.cat(latents, dim=0) + else: + latents = self.vae.encode(image).latent_dist.sample(generator) + + latents = self.vae.config.scaling_factor * latents + + if batch_size != latents.shape[0]: + if batch_size % latents.shape[0] == 0: + # expand image_latents for batch_size + deprecation_message = ( + f"You have passed {batch_size} text prompts (`prompt`), but only {latents.shape[0]} initial" + " images (`image`). Initial images are now duplicating to match the number of text prompts. Note" + " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update" + " your script to pass as many initial images as text prompts to suppress this warning." + ) + deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False) + additional_latents_per_image = batch_size // latents.shape[0] + latents = torch.cat([latents] * additional_latents_per_image, dim=0) + else: + raise ValueError( + f"Cannot duplicate `image` of batch size {latents.shape[0]} to {batch_size} text prompts." + ) + else: + latents = torch.cat([latents], dim=0) + + return latents + + def get_epsilon(self, model_output: torch.Tensor, sample: torch.Tensor, timestep: int): + pred_type = self.inverse_scheduler.config.prediction_type + alpha_prod_t = self.inverse_scheduler.alphas_cumprod[timestep] + + beta_prod_t = 1 - alpha_prod_t + + if pred_type == "epsilon": + return model_output + elif pred_type == "sample": + return (sample - alpha_prod_t ** (0.5) * model_output) / beta_prod_t ** (0.5) + elif pred_type == "v_prediction": + return (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample + else: + raise ValueError( + f"prediction_type given as {pred_type} must be one of `epsilon`, `sample`, or `v_prediction`" + ) + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def generate_mask( + self, + image: Union[torch.Tensor, PIL.Image.Image] = None, + target_prompt: Optional[Union[str, List[str]]] = None, + target_negative_prompt: Optional[Union[str, List[str]]] = None, + target_prompt_embeds: Optional[torch.Tensor] = None, + target_negative_prompt_embeds: Optional[torch.Tensor] = None, + source_prompt: Optional[Union[str, List[str]]] = None, + source_negative_prompt: Optional[Union[str, List[str]]] = None, + source_prompt_embeds: Optional[torch.Tensor] = None, + source_negative_prompt_embeds: Optional[torch.Tensor] = None, + num_maps_per_mask: Optional[int] = 10, + mask_encode_strength: Optional[float] = 0.5, + mask_thresholding_ratio: Optional[float] = 3.0, + num_inference_steps: int = 50, + guidance_scale: float = 7.5, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + output_type: Optional[str] = "np", + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + ): + r""" + Generate a latent mask given a mask prompt, a target prompt, and an image. + + Args: + image (`PIL.Image.Image`): + `Image` or tensor representing an image batch to be used for computing the mask. + target_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide semantic mask generation. If not defined, you need to pass + `prompt_embeds`. + target_negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + target_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + target_negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + source_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide semantic mask generation using DiffEdit. If not defined, you need to + pass `source_prompt_embeds` or `source_image` instead. + source_negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide semantic mask generation away from using DiffEdit. If not defined, you + need to pass `source_negative_prompt_embeds` or `source_image` instead. + source_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings to guide the semantic mask generation. Can be used to easily tweak text + inputs (prompt weighting). If not provided, text embeddings are generated from `source_prompt` input + argument. + source_negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings to negatively guide the semantic mask generation. Can be used to easily + tweak text inputs (prompt weighting). If not provided, text embeddings are generated from + `source_negative_prompt` input argument. + num_maps_per_mask (`int`, *optional*, defaults to 10): + The number of noise maps sampled to generate the semantic mask using DiffEdit. + mask_encode_strength (`float`, *optional*, defaults to 0.5): + The strength of the noise maps sampled to generate the semantic mask using DiffEdit. Must be between 0 + and 1. + mask_thresholding_ratio (`float`, *optional*, defaults to 3.0): + The maximum multiple of the mean absolute difference used to clamp the semantic guidance map before + mask binarization. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the + [`~models.attention_processor.AttnProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + + Examples: + + Returns: + `List[PIL.Image.Image]` or `np.array`: + When returning a `List[PIL.Image.Image]`, the list consists of a batch of single-channel binary images + with dimensions `(height // self.vae_scale_factor, width // self.vae_scale_factor)`. If it's + `np.array`, the shape is `(batch_size, height // self.vae_scale_factor, width // + self.vae_scale_factor)`. + """ + + # 1. Check inputs (Provide dummy argument for callback_steps) + self.check_inputs( + target_prompt, + mask_encode_strength, + 1, + target_negative_prompt, + target_prompt_embeds, + target_negative_prompt_embeds, + ) + + self.check_source_inputs( + source_prompt, + source_negative_prompt, + source_prompt_embeds, + source_negative_prompt_embeds, + ) + + if (num_maps_per_mask is None) or ( + num_maps_per_mask is not None and (not isinstance(num_maps_per_mask, int) or num_maps_per_mask <= 0) + ): + raise ValueError( + f"`num_maps_per_mask` has to be a positive integer but is {num_maps_per_mask} of type" + f" {type(num_maps_per_mask)}." + ) + + if mask_thresholding_ratio is None or mask_thresholding_ratio <= 0: + raise ValueError( + f"`mask_thresholding_ratio` has to be positive but is {mask_thresholding_ratio} of type" + f" {type(mask_thresholding_ratio)}." + ) + + # 2. Define call parameters + if target_prompt is not None and isinstance(target_prompt, str): + batch_size = 1 + elif target_prompt is not None and isinstance(target_prompt, list): + batch_size = len(target_prompt) + else: + batch_size = target_prompt_embeds.shape[0] + if cross_attention_kwargs is None: + cross_attention_kwargs = {} + + device = self._execution_device + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Encode input prompts + (cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None) + target_negative_prompt_embeds, target_prompt_embeds = self.encode_prompt( + target_prompt, + device, + num_maps_per_mask, + do_classifier_free_guidance, + target_negative_prompt, + prompt_embeds=target_prompt_embeds, + negative_prompt_embeds=target_negative_prompt_embeds, + ) + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + if do_classifier_free_guidance: + target_prompt_embeds = torch.cat([target_negative_prompt_embeds, target_prompt_embeds]) + + source_negative_prompt_embeds, source_prompt_embeds = self.encode_prompt( + source_prompt, + device, + num_maps_per_mask, + do_classifier_free_guidance, + source_negative_prompt, + prompt_embeds=source_prompt_embeds, + negative_prompt_embeds=source_negative_prompt_embeds, + ) + if do_classifier_free_guidance: + source_prompt_embeds = torch.cat([source_negative_prompt_embeds, source_prompt_embeds]) + + # 4. Preprocess image + image = self.image_processor.preprocess(image).repeat_interleave(num_maps_per_mask, dim=0) + + # 5. Set timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps, _ = self.get_timesteps(num_inference_steps, mask_encode_strength, device) + encode_timestep = timesteps[0] + + # 6. Prepare image latents and add noise with specified strength + image_latents = self.prepare_image_latents( + image, batch_size * num_maps_per_mask, self.vae.dtype, device, generator + ) + noise = randn_tensor(image_latents.shape, generator=generator, device=device, dtype=self.vae.dtype) + image_latents = self.scheduler.add_noise(image_latents, noise, encode_timestep) + + latent_model_input = torch.cat([image_latents] * (4 if do_classifier_free_guidance else 2)) + latent_model_input = self.scheduler.scale_model_input(latent_model_input, encode_timestep) + + # 7. Predict the noise residual + prompt_embeds = torch.cat([source_prompt_embeds, target_prompt_embeds]) + noise_pred = self.unet( + latent_model_input, + encode_timestep, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs=cross_attention_kwargs, + ).sample + + if do_classifier_free_guidance: + noise_pred_neg_src, noise_pred_source, noise_pred_uncond, noise_pred_target = noise_pred.chunk(4) + noise_pred_source = noise_pred_neg_src + guidance_scale * (noise_pred_source - noise_pred_neg_src) + noise_pred_target = noise_pred_uncond + guidance_scale * (noise_pred_target - noise_pred_uncond) + else: + noise_pred_source, noise_pred_target = noise_pred.chunk(2) + + # 8. Compute the mask from the absolute difference of predicted noise residuals + # TODO: Consider smoothing mask guidance map + mask_guidance_map = ( + torch.abs(noise_pred_target - noise_pred_source) + .reshape(batch_size, num_maps_per_mask, *noise_pred_target.shape[-3:]) + .mean([1, 2]) + ) + clamp_magnitude = mask_guidance_map.mean() * mask_thresholding_ratio + semantic_mask_image = mask_guidance_map.clamp(0, clamp_magnitude) / clamp_magnitude + semantic_mask_image = torch.where(semantic_mask_image <= 0.5, 0, 1) + mask_image = semantic_mask_image.cpu().numpy() + + # 9. Convert to Numpy array or PIL. + if output_type == "pil": + mask_image = self.image_processor.numpy_to_pil(mask_image) + + # Offload all models + self.maybe_free_model_hooks() + + return mask_image + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_INVERT_DOC_STRING) + def invert( + self, + prompt: Optional[Union[str, List[str]]] = None, + image: Union[torch.Tensor, PIL.Image.Image] = None, + num_inference_steps: int = 50, + inpaint_strength: float = 0.8, + guidance_scale: float = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + decode_latents: bool = False, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, + callback_steps: Optional[int] = 1, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + lambda_auto_corr: float = 20.0, + lambda_kl: float = 20.0, + num_reg_steps: int = 0, + num_auto_corr_rolls: int = 5, + ): + r""" + Generate inverted latents given a prompt and image. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + image (`PIL.Image.Image`): + `Image` or tensor representing an image batch to produce the inverted latents guided by `prompt`. + inpaint_strength (`float`, *optional*, defaults to 0.8): + Indicates extent of the noising process to run latent inversion. Must be between 0 and 1. When + `inpaint_strength` is 1, the inversion process is run for the full number of iterations specified in + `num_inference_steps`. `image` is used as a reference for the inversion process, and adding more noise + increases `inpaint_strength`. If `inpaint_strength` is 0, no inpainting occurs. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + generator (`torch.Generator`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + decode_latents (`bool`, *optional*, defaults to `False`): + Whether or not to decode the inverted latents into a generated image. Setting this argument to `True` + decodes all inverted latents for each timestep into a list of generated images. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.DiffEditInversionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the + [`~models.attention_processor.AttnProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + lambda_auto_corr (`float`, *optional*, defaults to 20.0): + Lambda parameter to control auto correction. + lambda_kl (`float`, *optional*, defaults to 20.0): + Lambda parameter to control Kullback-Leibler divergence output. + num_reg_steps (`int`, *optional*, defaults to 0): + Number of regularization loss steps. + num_auto_corr_rolls (`int`, *optional*, defaults to 5): + Number of auto correction roll steps. + + Examples: + + Returns: + [`~pipelines.stable_diffusion.pipeline_stable_diffusion_diffedit.DiffEditInversionPipelineOutput`] or + `tuple`: + If `return_dict` is `True`, + [`~pipelines.stable_diffusion.pipeline_stable_diffusion_diffedit.DiffEditInversionPipelineOutput`] is + returned, otherwise a `tuple` is returned where the first element is the inverted latents tensors + ordered by increasing noise, and the second is the corresponding decoded images if `decode_latents` is + `True`, otherwise `None`. + """ + + # 1. Check inputs + self.check_inputs( + prompt, + inpaint_strength, + callback_steps, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + ) + + if image is None: + raise ValueError("`image` input cannot be undefined.") + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + if cross_attention_kwargs is None: + cross_attention_kwargs = {} + + device = self._execution_device + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Preprocess image + image = self.image_processor.preprocess(image) + + # 4. Prepare latent variables + num_images_per_prompt = 1 + latents = self.prepare_image_latents( + image, batch_size * num_images_per_prompt, self.vae.dtype, device, generator + ) + + # 5. Encode input prompt + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + ) + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + if do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + # 6. Prepare timesteps + self.inverse_scheduler.set_timesteps(num_inference_steps, device=device) + timesteps, num_inference_steps = self.get_inverse_timesteps(num_inference_steps, inpaint_strength, device) + + # 7. Noising loop where we obtain the intermediate noised latent image for each timestep. + num_warmup_steps = len(timesteps) - num_inference_steps * self.inverse_scheduler.order + inverted_latents = [] + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + latent_model_input = self.inverse_scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs=cross_attention_kwargs, + ).sample + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # regularization of the noise prediction (not in original code or paper but borrowed from Pix2PixZero) + if num_reg_steps > 0: + with torch.enable_grad(): + for _ in range(num_reg_steps): + if lambda_auto_corr > 0: + for _ in range(num_auto_corr_rolls): + var = torch.autograd.Variable(noise_pred.detach().clone(), requires_grad=True) + + # Derive epsilon from model output before regularizing to IID standard normal + var_epsilon = self.get_epsilon(var, latent_model_input.detach(), t) + + l_ac = auto_corr_loss(var_epsilon, generator=generator) + l_ac.backward() + + grad = var.grad.detach() / num_auto_corr_rolls + noise_pred = noise_pred - lambda_auto_corr * grad + + if lambda_kl > 0: + var = torch.autograd.Variable(noise_pred.detach().clone(), requires_grad=True) + + # Derive epsilon from model output before regularizing to IID standard normal + var_epsilon = self.get_epsilon(var, latent_model_input.detach(), t) + + l_kld = kl_divergence(var_epsilon) + l_kld.backward() + + grad = var.grad.detach() + noise_pred = noise_pred - lambda_kl * grad + + noise_pred = noise_pred.detach() + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.inverse_scheduler.step(noise_pred, t, latents).prev_sample + inverted_latents.append(latents.detach().clone()) + + # call the callback, if provided + if i == len(timesteps) - 1 or ( + (i + 1) > num_warmup_steps and (i + 1) % self.inverse_scheduler.order == 0 + ): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + assert len(inverted_latents) == len(timesteps) + latents = torch.stack(list(reversed(inverted_latents)), 1) + + # 8. Post-processing + image = None + if decode_latents: + image = self.decode_latents(latents.flatten(0, 1)) + + # 9. Convert to PIL. + if decode_latents and output_type == "pil": + image = self.image_processor.numpy_to_pil(image) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (latents, image) + + return DiffEditInversionPipelineOutput(latents=latents, images=image) + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Optional[Union[str, List[str]]] = None, + mask_image: Union[torch.Tensor, PIL.Image.Image] = None, + image_latents: Union[torch.Tensor, PIL.Image.Image] = None, + inpaint_strength: Optional[float] = 0.8, + num_inference_steps: int = 50, + guidance_scale: float = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, + callback_steps: int = 1, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + clip_skip: int = None, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + mask_image (`PIL.Image.Image`): + `Image` or tensor representing an image batch to mask the generated image. White pixels in the mask are + repainted, while black pixels are preserved. If `mask_image` is a PIL image, it is converted to a + single channel (luminance) before use. If it's a tensor, it should contain one color channel (L) + instead of 3, so the expected shape would be `(B, 1, H, W)`. + image_latents (`PIL.Image.Image` or `torch.Tensor`): + Partially noised image latents from the inversion process to be used as inputs for image generation. + inpaint_strength (`float`, *optional*, defaults to 0.8): + Indicates extent to inpaint the masked area. Must be between 0 and 1. When `inpaint_strength` is 1, the + denoising process is run on the masked area for the full number of iterations specified in + `num_inference_steps`. `image_latents` is used as a reference for the masked area, and adding more + noise to a region increases `inpaint_strength`. If `inpaint_strength` is 0, no inpainting occurs. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + Examples: + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images and the + second element is a list of `bool`s indicating whether the corresponding generated image contains + "not-safe-for-work" (nsfw) content. + """ + + # 1. Check inputs + self.check_inputs( + prompt, + inpaint_strength, + callback_steps, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + ) + + if mask_image is None: + raise ValueError( + "`mask_image` input cannot be undefined. Use `generate_mask()` to compute `mask_image` from text prompts." + ) + if image_latents is None: + raise ValueError( + "`image_latents` input cannot be undefined. Use `invert()` to compute `image_latents` from input images." + ) + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + if cross_attention_kwargs is None: + cross_attention_kwargs = {} + + device = self._execution_device + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Encode input prompt + text_encoder_lora_scale = ( + cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None + ) + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=text_encoder_lora_scale, + clip_skip=clip_skip, + ) + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + if do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + # 4. Preprocess mask + mask_image = preprocess_mask(mask_image, batch_size) + latent_height, latent_width = mask_image.shape[-2:] + mask_image = torch.cat([mask_image] * num_images_per_prompt) + mask_image = mask_image.to(device=device, dtype=prompt_embeds.dtype) + + # 5. Set timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, inpaint_strength, device) + + # 6. Preprocess image latents + if isinstance(image_latents, list) and any(isinstance(l, torch.Tensor) and l.ndim == 5 for l in image_latents): + image_latents = torch.cat(image_latents).detach() + elif isinstance(image_latents, torch.Tensor) and image_latents.ndim == 5: + image_latents = image_latents.detach() + else: + image_latents = self.image_processor.preprocess(image_latents).detach() + + latent_shape = (self.vae.config.latent_channels, latent_height, latent_width) + if image_latents.shape[-3:] != latent_shape: + raise ValueError( + f"Each latent image in `image_latents` must have shape {latent_shape}, " + f"but has shape {image_latents.shape[-3:]}" + ) + if image_latents.ndim == 4: + image_latents = image_latents.reshape(batch_size, len(timesteps), *latent_shape) + if image_latents.shape[:2] != (batch_size, len(timesteps)): + raise ValueError( + f"`image_latents` must have batch size {batch_size} with latent images from {len(timesteps)}" + f" timesteps, but has batch size {image_latents.shape[0]} with latent images from" + f" {image_latents.shape[1]} timesteps." + ) + image_latents = image_latents.transpose(0, 1).repeat_interleave(num_images_per_prompt, dim=1) + image_latents = image_latents.to(device=device, dtype=prompt_embeds.dtype) + + # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 8. Denoising loop + latents = image_latents[0].clone() + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs=cross_attention_kwargs, + ).sample + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample + + # mask with inverted latents from appropriate timestep - use original image latent for last step + latents = latents * mask_image + image_latents[i] * (1 - mask_image) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) + else: + image = latents + has_nsfw_concept = None + + if has_nsfw_concept is None: + do_denormalize = [True] * image.shape[0] + else: + do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] + + image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image, has_nsfw_concept) + + return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) diff --git a/diffusers3/pipelines/stable_diffusion_gligen/__init__.py b/diffusers3/pipelines/stable_diffusion_gligen/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..147980cbf9e5c3418fc1854787ae37b25e4fed56 --- /dev/null +++ b/diffusers3/pipelines/stable_diffusion_gligen/__init__.py @@ -0,0 +1,50 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["pipeline_stable_diffusion_gligen"] = ["StableDiffusionGLIGENPipeline"] + _import_structure["pipeline_stable_diffusion_gligen_text_image"] = ["StableDiffusionGLIGENTextImagePipeline"] + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * + else: + from .pipeline_stable_diffusion_gligen import StableDiffusionGLIGENPipeline + from .pipeline_stable_diffusion_gligen_text_image import StableDiffusionGLIGENTextImagePipeline + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/diffusers3/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen.py b/diffusers3/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen.py new file mode 100644 index 0000000000000000000000000000000000000000..52ccd5612776caca32fd7bfec09297477f49b427 --- /dev/null +++ b/diffusers3/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen.py @@ -0,0 +1,851 @@ +# Copyright 2024 The GLIGEN Authors and HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import warnings +from typing import Any, Callable, Dict, List, Optional, Union + +import PIL.Image +import torch +from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer + +from ...image_processor import VaeImageProcessor +from ...loaders import StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin +from ...models import AutoencoderKL, UNet2DConditionModel +from ...models.attention import GatedSelfAttentionDense +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import ( + USE_PEFT_BACKEND, + deprecate, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from ..stable_diffusion import StableDiffusionPipelineOutput +from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import StableDiffusionGLIGENPipeline + >>> from diffusers.utils import load_image + + >>> # Insert objects described by text at the region defined by bounding boxes + >>> pipe = StableDiffusionGLIGENPipeline.from_pretrained( + ... "masterful/gligen-1-4-inpainting-text-box", variant="fp16", torch_dtype=torch.float16 + ... ) + >>> pipe = pipe.to("cuda") + + >>> input_image = load_image( + ... "https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/gligen/livingroom_modern.png" + ... ) + >>> prompt = "a birthday cake" + >>> boxes = [[0.2676, 0.6088, 0.4773, 0.7183]] + >>> phrases = ["a birthday cake"] + + >>> images = pipe( + ... prompt=prompt, + ... gligen_phrases=phrases, + ... gligen_inpaint_image=input_image, + ... gligen_boxes=boxes, + ... gligen_scheduled_sampling_beta=1, + ... output_type="pil", + ... num_inference_steps=50, + ... ).images + + >>> images[0].save("./gligen-1-4-inpainting-text-box.jpg") + + >>> # Generate an image described by the prompt and + >>> # insert objects described by text at the region defined by bounding boxes + >>> pipe = StableDiffusionGLIGENPipeline.from_pretrained( + ... "masterful/gligen-1-4-generation-text-box", variant="fp16", torch_dtype=torch.float16 + ... ) + >>> pipe = pipe.to("cuda") + + >>> prompt = "a waterfall and a modern high speed train running through the tunnel in a beautiful forest with fall foliage" + >>> boxes = [[0.1387, 0.2051, 0.4277, 0.7090], [0.4980, 0.4355, 0.8516, 0.7266]] + >>> phrases = ["a waterfall", "a modern high speed train running through the tunnel"] + + >>> images = pipe( + ... prompt=prompt, + ... gligen_phrases=phrases, + ... gligen_boxes=boxes, + ... gligen_scheduled_sampling_beta=1, + ... output_type="pil", + ... num_inference_steps=50, + ... ).images + + >>> images[0].save("./gligen-1-4-generation-text-box.jpg") + ``` +""" + + +class StableDiffusionGLIGENPipeline(DiffusionPipeline, StableDiffusionMixin): + r""" + Pipeline for text-to-image generation using Stable Diffusion with Grounded-Language-to-Image Generation (GLIGEN). + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.). + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details + about a model's potential harms. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. + """ + + _optional_components = ["safety_checker", "feature_extractor"] + model_cpu_offload_seq = "text_encoder->unet->vae" + _exclude_from_cpu_offload = ["safety_checker"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: UNet2DConditionModel, + scheduler: KarrasDiffusionSchedulers, + safety_checker: StableDiffusionSafetyChecker, + feature_extractor: CLIPImageProcessor, + requires_safety_checker: bool = True, + ): + super().__init__() + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt + def _encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + **kwargs, + ): + deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." + deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) + + prompt_embeds_tuple = self.encode_prompt( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=lora_scale, + **kwargs, + ) + + # concatenate for backwards comp + prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) + + return prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if self.text_encoder is not None: + if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return prompt_embeds, negative_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is None: + has_nsfw_concept = None + else: + if torch.is_tensor(image): + feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") + else: + feature_extractor_input = self.image_processor.numpy_to_pil(image) + safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) + image, has_nsfw_concept = self.safety_checker( + images=image, clip_input=safety_checker_input.pixel_values.to(dtype) + ) + return image, has_nsfw_concept + + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + height, + width, + callback_steps, + gligen_phrases, + gligen_boxes, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if (callback_steps is None) or ( + callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if len(gligen_phrases) != len(gligen_boxes): + raise ValueError( + "length of `gligen_phrases` and `gligen_boxes` has to be same, but" + f" got: `gligen_phrases` {len(gligen_phrases)} != `gligen_boxes` {len(gligen_boxes)}" + ) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = ( + batch_size, + num_channels_latents, + int(height) // self.vae_scale_factor, + int(width) // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + def enable_fuser(self, enabled=True): + for module in self.unet.modules(): + if type(module) is GatedSelfAttentionDense: + module.enabled = enabled + + def draw_inpaint_mask_from_boxes(self, boxes, size): + inpaint_mask = torch.ones(size[0], size[1]) + for box in boxes: + x0, x1 = box[0] * size[0], box[2] * size[0] + y0, y1 = box[1] * size[1], box[3] * size[1] + inpaint_mask[int(y0) : int(y1), int(x0) : int(x1)] = 0 + return inpaint_mask + + def crop(self, im, new_width, new_height): + width, height = im.size + left = (width - new_width) / 2 + top = (height - new_height) / 2 + right = (width + new_width) / 2 + bottom = (height + new_height) / 2 + return im.crop((left, top, right, bottom)) + + def target_size_center_crop(self, im, new_hw): + width, height = im.size + if width != height: + im = self.crop(im, min(height, width), min(height, width)) + return im.resize((new_hw, new_hw), PIL.Image.LANCZOS) + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + guidance_scale: float = 7.5, + gligen_scheduled_sampling_beta: float = 0.3, + gligen_phrases: List[str] = None, + gligen_boxes: List[List[float]] = None, + gligen_inpaint_image: Optional[PIL.Image.Image] = None, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, + callback_steps: int = 1, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + clip_skip: Optional[int] = None, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + gligen_phrases (`List[str]`): + The phrases to guide what to include in each of the regions defined by the corresponding + `gligen_boxes`. There should only be one phrase per bounding box. + gligen_boxes (`List[List[float]]`): + The bounding boxes that identify rectangular regions of the image that are going to be filled with the + content described by the corresponding `gligen_phrases`. Each rectangular box is defined as a + `List[float]` of 4 elements `[xmin, ymin, xmax, ymax]` where each value is between [0,1]. + gligen_inpaint_image (`PIL.Image.Image`, *optional*): + The input image, if provided, is inpainted with objects described by the `gligen_boxes` and + `gligen_phrases`. Otherwise, it is treated as a generation task on a blank input image. + gligen_scheduled_sampling_beta (`float`, defaults to 0.3): + Scheduled Sampling factor from [GLIGEN: Open-Set Grounded Text-to-Image + Generation](https://arxiv.org/pdf/2301.07093.pdf). Scheduled Sampling factor is only varied for + scheduled sampling during inference for improved quality and controllability. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + guidance_rescale (`float`, *optional*, defaults to 0.0): + Guidance rescale factor from [Common Diffusion Noise Schedules and Sample Steps are + Flawed](https://arxiv.org/pdf/2305.08891.pdf). Guidance rescale factor should fix overexposure when + using zero terminal SNR. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + Examples: + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images and the + second element is a list of `bool`s indicating whether the corresponding generated image contains + "not-safe-for-work" (nsfw) content. + """ + # 0. Default height and width to unet + height = height or self.unet.config.sample_size * self.vae_scale_factor + width = width or self.unet.config.sample_size * self.vae_scale_factor + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + height, + width, + callback_steps, + gligen_phrases, + gligen_boxes, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + ) + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Encode input prompt + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + clip_skip=clip_skip, + ) + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + if do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + # 4. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # 5. Prepare latent variables + num_channels_latents = self.unet.config.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 5.1 Prepare GLIGEN variables + max_objs = 30 + if len(gligen_boxes) > max_objs: + warnings.warn( + f"More that {max_objs} objects found. Only first {max_objs} objects will be processed.", + FutureWarning, + ) + gligen_phrases = gligen_phrases[:max_objs] + gligen_boxes = gligen_boxes[:max_objs] + # prepare batched input to the GLIGENTextBoundingboxProjection (boxes, phrases, mask) + # Get tokens for phrases from pre-trained CLIPTokenizer + tokenizer_inputs = self.tokenizer(gligen_phrases, padding=True, return_tensors="pt").to(device) + # For the token, we use the same pre-trained text encoder + # to obtain its text feature + _text_embeddings = self.text_encoder(**tokenizer_inputs).pooler_output + n_objs = len(gligen_boxes) + # For each entity, described in phrases, is denoted with a bounding box, + # we represent the location information as (xmin,ymin,xmax,ymax) + boxes = torch.zeros(max_objs, 4, device=device, dtype=self.text_encoder.dtype) + boxes[:n_objs] = torch.tensor(gligen_boxes) + text_embeddings = torch.zeros( + max_objs, self.unet.config.cross_attention_dim, device=device, dtype=self.text_encoder.dtype + ) + text_embeddings[:n_objs] = _text_embeddings + # Generate a mask for each object that is entity described by phrases + masks = torch.zeros(max_objs, device=device, dtype=self.text_encoder.dtype) + masks[:n_objs] = 1 + + repeat_batch = batch_size * num_images_per_prompt + boxes = boxes.unsqueeze(0).expand(repeat_batch, -1, -1).clone() + text_embeddings = text_embeddings.unsqueeze(0).expand(repeat_batch, -1, -1).clone() + masks = masks.unsqueeze(0).expand(repeat_batch, -1).clone() + if do_classifier_free_guidance: + repeat_batch = repeat_batch * 2 + boxes = torch.cat([boxes] * 2) + text_embeddings = torch.cat([text_embeddings] * 2) + masks = torch.cat([masks] * 2) + masks[: repeat_batch // 2] = 0 + if cross_attention_kwargs is None: + cross_attention_kwargs = {} + cross_attention_kwargs["gligen"] = {"boxes": boxes, "positive_embeddings": text_embeddings, "masks": masks} + + # Prepare latent variables for GLIGEN inpainting + if gligen_inpaint_image is not None: + # if the given input image is not of the same size as expected by VAE + # center crop and resize the input image to expected shape + if gligen_inpaint_image.size != (self.vae.sample_size, self.vae.sample_size): + gligen_inpaint_image = self.target_size_center_crop(gligen_inpaint_image, self.vae.sample_size) + # Convert a single image into a batch of images with a batch size of 1 + # The resulting shape becomes (1, C, H, W), where C is the number of channels, + # and H and W are the height and width of the image. + # scales the pixel values to a range [-1, 1] + gligen_inpaint_image = self.image_processor.preprocess(gligen_inpaint_image) + gligen_inpaint_image = gligen_inpaint_image.to(dtype=self.vae.dtype, device=self.vae.device) + # Run AutoEncoder to get corresponding latents + gligen_inpaint_latent = self.vae.encode(gligen_inpaint_image).latent_dist.sample() + gligen_inpaint_latent = self.vae.config.scaling_factor * gligen_inpaint_latent + # Generate an inpainting mask + # pixel value = 0, where the object is present (defined by bounding boxes above) + # 1, everywhere else + gligen_inpaint_mask = self.draw_inpaint_mask_from_boxes(gligen_boxes, gligen_inpaint_latent.shape[2:]) + gligen_inpaint_mask = gligen_inpaint_mask.to( + dtype=gligen_inpaint_latent.dtype, device=gligen_inpaint_latent.device + ) + gligen_inpaint_mask = gligen_inpaint_mask[None, None] + gligen_inpaint_mask_addition = torch.cat( + (gligen_inpaint_latent * gligen_inpaint_mask, gligen_inpaint_mask), dim=1 + ) + # Convert a single mask into a batch of masks with a batch size of 1 + gligen_inpaint_mask_addition = gligen_inpaint_mask_addition.expand(repeat_batch, -1, -1, -1).clone() + + num_grounding_steps = int(gligen_scheduled_sampling_beta * len(timesteps)) + self.enable_fuser(True) + + # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # Scheduled sampling + if i == num_grounding_steps: + self.enable_fuser(False) + + if latents.shape[1] != 4: + latents = torch.randn_like(latents[:, :4]) + + if gligen_inpaint_image is not None: + gligen_inpaint_latent_with_noise = ( + self.scheduler.add_noise( + gligen_inpaint_latent, torch.randn_like(gligen_inpaint_latent), torch.tensor([t]) + ) + .expand(latents.shape[0], -1, -1, -1) + .clone() + ) + latents = gligen_inpaint_latent_with_noise * gligen_inpaint_mask + latents * ( + 1 - gligen_inpaint_mask + ) + + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + if gligen_inpaint_image is not None: + latent_model_input = torch.cat((latent_model_input, gligen_inpaint_mask_addition), dim=1) + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs=cross_attention_kwargs, + ).sample + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) + else: + image = latents + has_nsfw_concept = None + + if has_nsfw_concept is None: + do_denormalize = [True] * image.shape[0] + else: + do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] + + image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image, has_nsfw_concept) + + return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) diff --git a/diffusers3/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen_text_image.py b/diffusers3/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen_text_image.py new file mode 100644 index 0000000000000000000000000000000000000000..c6748ad418fe5f7da297135b3c49d9f6589eeb4b --- /dev/null +++ b/diffusers3/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen_text_image.py @@ -0,0 +1,1023 @@ +# Copyright 2024 The GLIGEN Authors and HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import warnings +from typing import Any, Callable, Dict, List, Optional, Union + +import PIL.Image +import torch +from transformers import ( + CLIPImageProcessor, + CLIPProcessor, + CLIPTextModel, + CLIPTokenizer, + CLIPVisionModelWithProjection, +) + +from ...image_processor import VaeImageProcessor +from ...loaders import StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin +from ...models import AutoencoderKL, UNet2DConditionModel +from ...models.attention import GatedSelfAttentionDense +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import USE_PEFT_BACKEND, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from ..stable_diffusion import StableDiffusionPipelineOutput +from ..stable_diffusion.clip_image_project_model import CLIPImageProjection +from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import StableDiffusionGLIGENTextImagePipeline + >>> from diffusers.utils import load_image + + >>> # Insert objects described by image at the region defined by bounding boxes + >>> pipe = StableDiffusionGLIGENTextImagePipeline.from_pretrained( + ... "anhnct/Gligen_Inpainting_Text_Image", torch_dtype=torch.float16 + ... ) + >>> pipe = pipe.to("cuda") + + >>> input_image = load_image( + ... "https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/gligen/livingroom_modern.png" + ... ) + >>> prompt = "a backpack" + >>> boxes = [[0.2676, 0.4088, 0.4773, 0.7183]] + >>> phrases = None + >>> gligen_image = load_image( + ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/gligen/backpack.jpeg" + ... ) + + >>> images = pipe( + ... prompt=prompt, + ... gligen_phrases=phrases, + ... gligen_inpaint_image=input_image, + ... gligen_boxes=boxes, + ... gligen_images=[gligen_image], + ... gligen_scheduled_sampling_beta=1, + ... output_type="pil", + ... num_inference_steps=50, + ... ).images + + >>> images[0].save("./gligen-inpainting-text-image-box.jpg") + + >>> # Generate an image described by the prompt and + >>> # insert objects described by text and image at the region defined by bounding boxes + >>> pipe = StableDiffusionGLIGENTextImagePipeline.from_pretrained( + ... "anhnct/Gligen_Text_Image", torch_dtype=torch.float16 + ... ) + >>> pipe = pipe.to("cuda") + + >>> prompt = "a flower sitting on the beach" + >>> boxes = [[0.0, 0.09, 0.53, 0.76]] + >>> phrases = ["flower"] + >>> gligen_image = load_image( + ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/gligen/pexels-pixabay-60597.jpg" + ... ) + + >>> images = pipe( + ... prompt=prompt, + ... gligen_phrases=phrases, + ... gligen_images=[gligen_image], + ... gligen_boxes=boxes, + ... gligen_scheduled_sampling_beta=1, + ... output_type="pil", + ... num_inference_steps=50, + ... ).images + + >>> images[0].save("./gligen-generation-text-image-box.jpg") + + >>> # Generate an image described by the prompt and + >>> # transfer style described by image at the region defined by bounding boxes + >>> pipe = StableDiffusionGLIGENTextImagePipeline.from_pretrained( + ... "anhnct/Gligen_Text_Image", torch_dtype=torch.float16 + ... ) + >>> pipe = pipe.to("cuda") + + >>> prompt = "a dragon flying on the sky" + >>> boxes = [[0.4, 0.2, 1.0, 0.8], [0.0, 1.0, 0.0, 1.0]] # Set `[0.0, 1.0, 0.0, 1.0]` for the style + + >>> gligen_image = load_image( + ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/landscape.png" + ... ) + + >>> gligen_placeholder = load_image( + ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/landscape.png" + ... ) + + >>> images = pipe( + ... prompt=prompt, + ... gligen_phrases=[ + ... "dragon", + ... "placeholder", + ... ], # Can use any text instead of `placeholder` token, because we will use mask here + ... gligen_images=[ + ... gligen_placeholder, + ... gligen_image, + ... ], # Can use any image in gligen_placeholder, because we will use mask here + ... input_phrases_mask=[1, 0], # Set 0 for the placeholder token + ... input_images_mask=[0, 1], # Set 0 for the placeholder image + ... gligen_boxes=boxes, + ... gligen_scheduled_sampling_beta=1, + ... output_type="pil", + ... num_inference_steps=50, + ... ).images + + >>> images[0].save("./gligen-generation-text-image-box-style-transfer.jpg") + ``` +""" + + +class StableDiffusionGLIGENTextImagePipeline(DiffusionPipeline, StableDiffusionMixin): + r""" + Pipeline for text-to-image generation using Stable Diffusion with Grounded-Language-to-Image Generation (GLIGEN). + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.). + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + processor ([`~transformers.CLIPProcessor`]): + A `CLIPProcessor` to procces reference image. + image_encoder ([`~transformers.CLIPVisionModelWithProjection`]): + Frozen image-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + image_project ([`CLIPImageProjection`]): + A `CLIPImageProjection` to project image embedding into phrases embedding space. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details + about a model's potential harms. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. + """ + + model_cpu_offload_seq = "text_encoder->unet->vae" + _optional_components = ["safety_checker", "feature_extractor"] + _exclude_from_cpu_offload = ["safety_checker"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + processor: CLIPProcessor, + image_encoder: CLIPVisionModelWithProjection, + image_project: CLIPImageProjection, + unet: UNet2DConditionModel, + scheduler: KarrasDiffusionSchedulers, + safety_checker: StableDiffusionSafetyChecker, + feature_extractor: CLIPImageProcessor, + requires_safety_checker: bool = True, + ): + super().__init__() + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + image_encoder=image_encoder, + processor=processor, + image_project=image_project, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if self.text_encoder is not None: + if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return prompt_embeds, negative_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is None: + has_nsfw_concept = None + else: + if torch.is_tensor(image): + feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") + else: + feature_extractor_input = self.image_processor.numpy_to_pil(image) + safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) + image, has_nsfw_concept = self.safety_checker( + images=image, clip_input=safety_checker_input.pixel_values.to(dtype) + ) + return image, has_nsfw_concept + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + # Copied from diffusers.pipelines.stable_diffusion_k_diffusion.pipeline_stable_diffusion_k_diffusion.StableDiffusionKDiffusionPipeline.check_inputs + def check_inputs( + self, + prompt, + height, + width, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + callback_on_step_end_tensor_inputs=None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = ( + batch_size, + num_channels_latents, + int(height) // self.vae_scale_factor, + int(width) // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + def enable_fuser(self, enabled=True): + for module in self.unet.modules(): + if type(module) is GatedSelfAttentionDense: + module.enabled = enabled + + def draw_inpaint_mask_from_boxes(self, boxes, size): + """ + Create an inpainting mask based on given boxes. This function generates an inpainting mask using the provided + boxes to mark regions that need to be inpainted. + """ + inpaint_mask = torch.ones(size[0], size[1]) + for box in boxes: + x0, x1 = box[0] * size[0], box[2] * size[0] + y0, y1 = box[1] * size[1], box[3] * size[1] + inpaint_mask[int(y0) : int(y1), int(x0) : int(x1)] = 0 + return inpaint_mask + + def crop(self, im, new_width, new_height): + """ + Crop the input image to the specified dimensions. + """ + width, height = im.size + left = (width - new_width) / 2 + top = (height - new_height) / 2 + right = (width + new_width) / 2 + bottom = (height + new_height) / 2 + return im.crop((left, top, right, bottom)) + + def target_size_center_crop(self, im, new_hw): + """ + Crop and resize the image to the target size while keeping the center. + """ + width, height = im.size + if width != height: + im = self.crop(im, min(height, width), min(height, width)) + return im.resize((new_hw, new_hw), PIL.Image.LANCZOS) + + def complete_mask(self, has_mask, max_objs, device): + """ + Based on the input mask corresponding value `0 or 1` for each phrases and image, mask the features + corresponding to phrases and images. + """ + mask = torch.ones(1, max_objs).type(self.text_encoder.dtype).to(device) + if has_mask is None: + return mask + + if isinstance(has_mask, int): + return mask * has_mask + else: + for idx, value in enumerate(has_mask): + mask[0, idx] = value + return mask + + def get_clip_feature(self, input, normalize_constant, device, is_image=False): + """ + Get image and phrases embedding by using CLIP pretrain model. The image embedding is transformed into the + phrases embedding space through a projection. + """ + if is_image: + if input is None: + return None + inputs = self.processor(images=[input], return_tensors="pt").to(device) + inputs["pixel_values"] = inputs["pixel_values"].to(self.image_encoder.dtype) + + outputs = self.image_encoder(**inputs) + feature = outputs.image_embeds + feature = self.image_project(feature).squeeze(0) + feature = (feature / feature.norm()) * normalize_constant + feature = feature.unsqueeze(0) + else: + if input is None: + return None + inputs = self.tokenizer(input, return_tensors="pt", padding=True).to(device) + outputs = self.text_encoder(**inputs) + feature = outputs.pooler_output + return feature + + def get_cross_attention_kwargs_with_grounded( + self, + hidden_size, + gligen_phrases, + gligen_images, + gligen_boxes, + input_phrases_mask, + input_images_mask, + repeat_batch, + normalize_constant, + max_objs, + device, + ): + """ + Prepare the cross-attention kwargs containing information about the grounded input (boxes, mask, image + embedding, phrases embedding). + """ + phrases, images = gligen_phrases, gligen_images + images = [None] * len(phrases) if images is None else images + phrases = [None] * len(images) if phrases is None else phrases + + boxes = torch.zeros(max_objs, 4, device=device, dtype=self.text_encoder.dtype) + masks = torch.zeros(max_objs, device=device, dtype=self.text_encoder.dtype) + phrases_masks = torch.zeros(max_objs, device=device, dtype=self.text_encoder.dtype) + image_masks = torch.zeros(max_objs, device=device, dtype=self.text_encoder.dtype) + phrases_embeddings = torch.zeros(max_objs, hidden_size, device=device, dtype=self.text_encoder.dtype) + image_embeddings = torch.zeros(max_objs, hidden_size, device=device, dtype=self.text_encoder.dtype) + + text_features = [] + image_features = [] + for phrase, image in zip(phrases, images): + text_features.append(self.get_clip_feature(phrase, normalize_constant, device, is_image=False)) + image_features.append(self.get_clip_feature(image, normalize_constant, device, is_image=True)) + + for idx, (box, text_feature, image_feature) in enumerate(zip(gligen_boxes, text_features, image_features)): + boxes[idx] = torch.tensor(box) + masks[idx] = 1 + if text_feature is not None: + phrases_embeddings[idx] = text_feature + phrases_masks[idx] = 1 + if image_feature is not None: + image_embeddings[idx] = image_feature + image_masks[idx] = 1 + + input_phrases_mask = self.complete_mask(input_phrases_mask, max_objs, device) + phrases_masks = phrases_masks.unsqueeze(0).repeat(repeat_batch, 1) * input_phrases_mask + input_images_mask = self.complete_mask(input_images_mask, max_objs, device) + image_masks = image_masks.unsqueeze(0).repeat(repeat_batch, 1) * input_images_mask + boxes = boxes.unsqueeze(0).repeat(repeat_batch, 1, 1) + masks = masks.unsqueeze(0).repeat(repeat_batch, 1) + phrases_embeddings = phrases_embeddings.unsqueeze(0).repeat(repeat_batch, 1, 1) + image_embeddings = image_embeddings.unsqueeze(0).repeat(repeat_batch, 1, 1) + + out = { + "boxes": boxes, + "masks": masks, + "phrases_masks": phrases_masks, + "image_masks": image_masks, + "phrases_embeddings": phrases_embeddings, + "image_embeddings": image_embeddings, + } + + return out + + def get_cross_attention_kwargs_without_grounded(self, hidden_size, repeat_batch, max_objs, device): + """ + Prepare the cross-attention kwargs without information about the grounded input (boxes, mask, image embedding, + phrases embedding) (All are zero tensor). + """ + boxes = torch.zeros(max_objs, 4, device=device, dtype=self.text_encoder.dtype) + masks = torch.zeros(max_objs, device=device, dtype=self.text_encoder.dtype) + phrases_masks = torch.zeros(max_objs, device=device, dtype=self.text_encoder.dtype) + image_masks = torch.zeros(max_objs, device=device, dtype=self.text_encoder.dtype) + phrases_embeddings = torch.zeros(max_objs, hidden_size, device=device, dtype=self.text_encoder.dtype) + image_embeddings = torch.zeros(max_objs, hidden_size, device=device, dtype=self.text_encoder.dtype) + + out = { + "boxes": boxes.unsqueeze(0).repeat(repeat_batch, 1, 1), + "masks": masks.unsqueeze(0).repeat(repeat_batch, 1), + "phrases_masks": phrases_masks.unsqueeze(0).repeat(repeat_batch, 1), + "image_masks": image_masks.unsqueeze(0).repeat(repeat_batch, 1), + "phrases_embeddings": phrases_embeddings.unsqueeze(0).repeat(repeat_batch, 1, 1), + "image_embeddings": image_embeddings.unsqueeze(0).repeat(repeat_batch, 1, 1), + } + + return out + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + guidance_scale: float = 7.5, + gligen_scheduled_sampling_beta: float = 0.3, + gligen_phrases: List[str] = None, + gligen_images: List[PIL.Image.Image] = None, + input_phrases_mask: Union[int, List[int]] = None, + input_images_mask: Union[int, List[int]] = None, + gligen_boxes: List[List[float]] = None, + gligen_inpaint_image: Optional[PIL.Image.Image] = None, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, + callback_steps: int = 1, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + gligen_normalize_constant: float = 28.7, + clip_skip: int = None, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + gligen_phrases (`List[str]`): + The phrases to guide what to include in each of the regions defined by the corresponding + `gligen_boxes`. There should only be one phrase per bounding box. + gligen_images (`List[PIL.Image.Image]`): + The images to guide what to include in each of the regions defined by the corresponding `gligen_boxes`. + There should only be one image per bounding box + input_phrases_mask (`int` or `List[int]`): + pre phrases mask input defined by the correspongding `input_phrases_mask` + input_images_mask (`int` or `List[int]`): + pre images mask input defined by the correspongding `input_images_mask` + gligen_boxes (`List[List[float]]`): + The bounding boxes that identify rectangular regions of the image that are going to be filled with the + content described by the corresponding `gligen_phrases`. Each rectangular box is defined as a + `List[float]` of 4 elements `[xmin, ymin, xmax, ymax]` where each value is between [0,1]. + gligen_inpaint_image (`PIL.Image.Image`, *optional*): + The input image, if provided, is inpainted with objects described by the `gligen_boxes` and + `gligen_phrases`. Otherwise, it is treated as a generation task on a blank input image. + gligen_scheduled_sampling_beta (`float`, defaults to 0.3): + Scheduled Sampling factor from [GLIGEN: Open-Set Grounded Text-to-Image + Generation](https://arxiv.org/pdf/2301.07093.pdf). Scheduled Sampling factor is only varied for + scheduled sampling during inference for improved quality and controllability. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + gligen_normalize_constant (`float`, *optional*, defaults to 28.7): + The normalize value of the image embedding. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + + Examples: + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images and the + second element is a list of `bool`s indicating whether the corresponding generated image contains + "not-safe-for-work" (nsfw) content. + """ + # 0. Default height and width to unet + height = height or self.unet.config.sample_size * self.vae_scale_factor + width = width or self.unet.config.sample_size * self.vae_scale_factor + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + height, + width, + callback_steps, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + ) + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Encode input prompt + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + clip_skip=clip_skip, + ) + + if do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + # 4. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # 5. Prepare latent variables + num_channels_latents = self.unet.config.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 5.1 Prepare GLIGEN variables + max_objs = 30 + if len(gligen_boxes) > max_objs: + warnings.warn( + f"More that {max_objs} objects found. Only first {max_objs} objects will be processed.", + FutureWarning, + ) + gligen_phrases = gligen_phrases[:max_objs] + gligen_boxes = gligen_boxes[:max_objs] + gligen_images = gligen_images[:max_objs] + + repeat_batch = batch_size * num_images_per_prompt + + if do_classifier_free_guidance: + repeat_batch = repeat_batch * 2 + + if cross_attention_kwargs is None: + cross_attention_kwargs = {} + + hidden_size = prompt_embeds.shape[2] + + cross_attention_kwargs["gligen"] = self.get_cross_attention_kwargs_with_grounded( + hidden_size=hidden_size, + gligen_phrases=gligen_phrases, + gligen_images=gligen_images, + gligen_boxes=gligen_boxes, + input_phrases_mask=input_phrases_mask, + input_images_mask=input_images_mask, + repeat_batch=repeat_batch, + normalize_constant=gligen_normalize_constant, + max_objs=max_objs, + device=device, + ) + + cross_attention_kwargs_without_grounded = {} + cross_attention_kwargs_without_grounded["gligen"] = self.get_cross_attention_kwargs_without_grounded( + hidden_size=hidden_size, repeat_batch=repeat_batch, max_objs=max_objs, device=device + ) + + # Prepare latent variables for GLIGEN inpainting + if gligen_inpaint_image is not None: + # if the given input image is not of the same size as expected by VAE + # center crop and resize the input image to expected shape + if gligen_inpaint_image.size != (self.vae.sample_size, self.vae.sample_size): + gligen_inpaint_image = self.target_size_center_crop(gligen_inpaint_image, self.vae.sample_size) + # Convert a single image into a batch of images with a batch size of 1 + # The resulting shape becomes (1, C, H, W), where C is the number of channels, + # and H and W are the height and width of the image. + # scales the pixel values to a range [-1, 1] + gligen_inpaint_image = self.image_processor.preprocess(gligen_inpaint_image) + gligen_inpaint_image = gligen_inpaint_image.to(dtype=self.vae.dtype, device=self.vae.device) + # Run AutoEncoder to get corresponding latents + gligen_inpaint_latent = self.vae.encode(gligen_inpaint_image).latent_dist.sample() + gligen_inpaint_latent = self.vae.config.scaling_factor * gligen_inpaint_latent + # Generate an inpainting mask + # pixel value = 0, where the object is present (defined by bounding boxes above) + # 1, everywhere else + gligen_inpaint_mask = self.draw_inpaint_mask_from_boxes(gligen_boxes, gligen_inpaint_latent.shape[2:]) + gligen_inpaint_mask = gligen_inpaint_mask.to( + dtype=gligen_inpaint_latent.dtype, device=gligen_inpaint_latent.device + ) + gligen_inpaint_mask = gligen_inpaint_mask[None, None] + gligen_inpaint_mask_addition = torch.cat( + (gligen_inpaint_latent * gligen_inpaint_mask, gligen_inpaint_mask), dim=1 + ) + # Convert a single mask into a batch of masks with a batch size of 1 + gligen_inpaint_mask_addition = gligen_inpaint_mask_addition.expand(repeat_batch, -1, -1, -1).clone() + + int(gligen_scheduled_sampling_beta * len(timesteps)) + self.enable_fuser(True) + + # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + if latents.shape[1] != 4: + latents = torch.randn_like(latents[:, :4]) + + if gligen_inpaint_image is not None: + gligen_inpaint_latent_with_noise = ( + self.scheduler.add_noise( + gligen_inpaint_latent, torch.randn_like(gligen_inpaint_latent), torch.tensor([t]) + ) + .expand(latents.shape[0], -1, -1, -1) + .clone() + ) + latents = gligen_inpaint_latent_with_noise * gligen_inpaint_mask + latents * ( + 1 - gligen_inpaint_mask + ) + + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + if gligen_inpaint_image is not None: + latent_model_input = torch.cat((latent_model_input, gligen_inpaint_mask_addition), dim=1) + + # predict the noise residual with grounded information + noise_pred_with_grounding = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs=cross_attention_kwargs, + ).sample + + # predict the noise residual without grounded information + noise_pred_without_grounding = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs=cross_attention_kwargs_without_grounded, + ).sample + + # perform guidance + if do_classifier_free_guidance: + # Using noise_pred_text from noise residual with grounded information and noise_pred_uncond from noise residual without grounded information + _, noise_pred_text = noise_pred_with_grounding.chunk(2) + noise_pred_uncond, _ = noise_pred_without_grounding.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + else: + noise_pred = noise_pred_with_grounding + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) + else: + image = latents + has_nsfw_concept = None + + if has_nsfw_concept is None: + do_denormalize = [True] * image.shape[0] + else: + do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] + + image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image, has_nsfw_concept) + + return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) diff --git a/diffusers3/pipelines/stable_diffusion_k_diffusion/__init__.py b/diffusers3/pipelines/stable_diffusion_k_diffusion/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7eb5bf8c229b584c934e33806efa73a3727dbb5b --- /dev/null +++ b/diffusers3/pipelines/stable_diffusion_k_diffusion/__init__.py @@ -0,0 +1,62 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_k_diffusion_available, + is_k_diffusion_version, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + + +try: + if not ( + is_transformers_available() + and is_torch_available() + and is_k_diffusion_available() + and is_k_diffusion_version(">=", "0.0.12") + ): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_and_k_diffusion_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_and_k_diffusion_objects)) +else: + _import_structure["pipeline_stable_diffusion_k_diffusion"] = ["StableDiffusionKDiffusionPipeline"] + _import_structure["pipeline_stable_diffusion_xl_k_diffusion"] = ["StableDiffusionXLKDiffusionPipeline"] + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not ( + is_transformers_available() + and is_torch_available() + and is_k_diffusion_available() + and is_k_diffusion_version(">=", "0.0.12") + ): + raise OptionalDependencyNotAvailable() + + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * + else: + from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline + from .pipeline_stable_diffusion_xl_k_diffusion import StableDiffusionXLKDiffusionPipeline + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/diffusers3/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_k_diffusion.py b/diffusers3/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_k_diffusion.py new file mode 100755 index 0000000000000000000000000000000000000000..122701ff923f60458abad380eb614c8df7582d31 --- /dev/null +++ b/diffusers3/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_k_diffusion.py @@ -0,0 +1,670 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import importlib +import inspect +from typing import Callable, List, Optional, Union + +import torch +from k_diffusion.external import CompVisDenoiser, CompVisVDenoiser +from k_diffusion.sampling import BrownianTreeNoiseSampler, get_sigmas_karras + +from ...image_processor import VaeImageProcessor +from ...loaders import StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import LMSDiscreteScheduler +from ...utils import USE_PEFT_BACKEND, deprecate, logging, scale_lora_layers, unscale_lora_layers +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from ..stable_diffusion import StableDiffusionPipelineOutput + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +class ModelWrapper: + def __init__(self, model, alphas_cumprod): + self.model = model + self.alphas_cumprod = alphas_cumprod + + def apply_model(self, *args, **kwargs): + if len(args) == 3: + encoder_hidden_states = args[-1] + args = args[:2] + if kwargs.get("cond", None) is not None: + encoder_hidden_states = kwargs.pop("cond") + return self.model(*args, encoder_hidden_states=encoder_hidden_states, **kwargs).sample + + +class StableDiffusionKDiffusionPipeline( + DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, StableDiffusionLoraLoaderMixin +): + r""" + Pipeline for text-to-image generation using Stable Diffusion. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for saving LoRA weights + + + + This is an experimental pipeline and is likely to change in the future. + + + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModel`]): + Frozen text-encoder. Stable Diffusion uses the text portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically + the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. + tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details. + feature_extractor ([`CLIPImageProcessor`]): + Model that extracts features from generated images to be used as inputs for the `safety_checker`. + """ + + model_cpu_offload_seq = "text_encoder->unet->vae" + _optional_components = ["safety_checker", "feature_extractor"] + _exclude_from_cpu_offload = ["safety_checker"] + + def __init__( + self, + vae, + text_encoder, + tokenizer, + unet, + scheduler, + safety_checker, + feature_extractor, + requires_safety_checker: bool = True, + ): + super().__init__() + + logger.info( + f"{self.__class__} is an experimntal pipeline and is likely to change in the future. We recommend to use" + " this pipeline for fast experimentation / iteration if needed, but advice to rely on existing pipelines" + " as defined in https://huggingface.co/docs/diffusers/api/schedulers#implemented-schedulers for" + " production settings." + ) + + # get correct sigmas from LMS + scheduler = LMSDiscreteScheduler.from_config(scheduler.config) + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + ) + self.register_to_config(requires_safety_checker=requires_safety_checker) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + + model = ModelWrapper(unet, scheduler.alphas_cumprod) + if scheduler.config.prediction_type == "v_prediction": + self.k_diffusion_model = CompVisVDenoiser(model) + else: + self.k_diffusion_model = CompVisDenoiser(model) + + def set_scheduler(self, scheduler_type: str): + library = importlib.import_module("k_diffusion") + sampling = getattr(library, "sampling") + try: + self.sampler = getattr(sampling, scheduler_type) + except Exception: + valid_samplers = [] + for s in dir(sampling): + if "sample_" in s: + valid_samplers.append(s) + + raise ValueError(f"Invalid scheduler type {scheduler_type}. Please choose one of {valid_samplers}.") + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt + def _encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + **kwargs, + ): + deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." + deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) + + prompt_embeds_tuple = self.encode_prompt( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=lora_scale, + **kwargs, + ) + + # concatenate for backwards comp + prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) + + return prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if self.text_encoder is not None: + if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return prompt_embeds, negative_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is None: + has_nsfw_concept = None + else: + if torch.is_tensor(image): + feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") + else: + feature_extractor_input = self.image_processor.numpy_to_pil(image) + safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) + image, has_nsfw_concept = self.safety_checker( + images=image, clip_input=safety_checker_input.pixel_values.to(dtype) + ) + return image, has_nsfw_concept + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents + def decode_latents(self, latents): + deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" + deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) + + latents = 1 / self.vae.config.scaling_factor * latents + image = self.vae.decode(latents, return_dict=False)[0] + image = (image / 2 + 0.5).clamp(0, 1) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + return image + + def check_inputs( + self, + prompt, + height, + width, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + callback_on_step_end_tensor_inputs=None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = ( + batch_size, + num_channels_latents, + int(height) // self.vae_scale_factor, + int(width) // self.vae_scale_factor, + ) + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + if latents.shape != shape: + raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + return latents + + @torch.no_grad() + def __call__( + self, + prompt: Union[str, List[str]] = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + guidance_scale: float = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, + callback_steps: int = 1, + use_karras_sigmas: Optional[bool] = False, + noise_sampler_seed: Optional[int] = None, + clip_skip: int = None, + ): + r""" + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds`. instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` + is less than `1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to + [`schedulers.DDIMScheduler`], will be ignored for others. + generator (`torch.Generator`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that will be called every `callback_steps` steps during inference. The function will be + called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function will be called. If not specified, the callback will be + called at every step. + use_karras_sigmas (`bool`, *optional*, defaults to `False`): + Use karras sigmas. For example, specifying `sample_dpmpp_2m` to `set_scheduler` will be equivalent to + `DPM++2M` in stable-diffusion-webui. On top of that, setting this option to True will make it `DPM++2M + Karras`. + noise_sampler_seed (`int`, *optional*, defaults to `None`): + The random seed to use for the noise sampler. If `None`, a random seed will be generated. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. + When returning a tuple, the first element is a list with the generated images, and the second element is a + list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" + (nsfw) content, according to the `safety_checker`. + """ + # 0. Default height and width to unet + height = height or self.unet.config.sample_size * self.vae_scale_factor + width = width or self.unet.config.sample_size * self.vae_scale_factor + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds + ) + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = True + if guidance_scale <= 1.0: + raise ValueError("has to use guidance_scale") + + # 3. Encode input prompt + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + clip_skip=clip_skip, + ) + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + if do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + # 4. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=prompt_embeds.device) + + # 5. Prepare sigmas + if use_karras_sigmas: + sigma_min: float = self.k_diffusion_model.sigmas[0].item() + sigma_max: float = self.k_diffusion_model.sigmas[-1].item() + sigmas = get_sigmas_karras(n=num_inference_steps, sigma_min=sigma_min, sigma_max=sigma_max) + else: + sigmas = self.scheduler.sigmas + sigmas = sigmas.to(device) + sigmas = sigmas.to(prompt_embeds.dtype) + + # 6. Prepare latent variables + num_channels_latents = self.unet.config.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + latents = latents * sigmas[0] + self.k_diffusion_model.sigmas = self.k_diffusion_model.sigmas.to(latents.device) + self.k_diffusion_model.log_sigmas = self.k_diffusion_model.log_sigmas.to(latents.device) + + # 7. Define model function + def model_fn(x, t): + latent_model_input = torch.cat([x] * 2) + t = torch.cat([t] * 2) + + noise_pred = self.k_diffusion_model(latent_model_input, t, cond=prompt_embeds) + + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + return noise_pred + + # 8. Run k-diffusion solver + sampler_kwargs = {} + + if "noise_sampler" in inspect.signature(self.sampler).parameters: + min_sigma, max_sigma = sigmas[sigmas > 0].min(), sigmas.max() + noise_sampler = BrownianTreeNoiseSampler(latents, min_sigma, max_sigma, noise_sampler_seed) + sampler_kwargs["noise_sampler"] = noise_sampler + + if "generator" in inspect.signature(self.sampler).parameters: + sampler_kwargs["generator"] = generator + + latents = self.sampler(model_fn, latents, sigmas, **sampler_kwargs) + + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) + else: + image = latents + has_nsfw_concept = None + + if has_nsfw_concept is None: + do_denormalize = [True] * image.shape[0] + else: + do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] + + image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image, has_nsfw_concept) + + return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) diff --git a/diffusers3/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_xl_k_diffusion.py b/diffusers3/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_xl_k_diffusion.py new file mode 100644 index 0000000000000000000000000000000000000000..45f814fd538f4fbe1d9b6fb8d2490e4cb169afde --- /dev/null +++ b/diffusers3/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_xl_k_diffusion.py @@ -0,0 +1,892 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import importlib +import inspect +from typing import List, Optional, Tuple, Union + +import torch +from k_diffusion.external import CompVisDenoiser, CompVisVDenoiser +from k_diffusion.sampling import BrownianTreeNoiseSampler, get_sigmas_karras +from transformers import ( + CLIPTextModel, + CLIPTextModelWithProjection, + CLIPTokenizer, +) + +from ...image_processor import VaeImageProcessor +from ...loaders import ( + FromSingleFileMixin, + IPAdapterMixin, + StableDiffusionXLLoraLoaderMixin, + TextualInversionLoaderMixin, +) +from ...models import AutoencoderKL, UNet2DConditionModel +from ...models.attention_processor import ( + AttnProcessor2_0, + FusedAttnProcessor2_0, + XFormersAttnProcessor, +) +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import KarrasDiffusionSchedulers, LMSDiscreteScheduler +from ...utils import ( + USE_PEFT_BACKEND, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from ..stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import StableDiffusionXLKDiffusionPipeline + + >>> pipe = StableDiffusionXLKDiffusionPipeline.from_pretrained( + ... "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 + ... ) + >>> pipe = pipe.to("cuda") + >>> pipe.set_scheduler("sample_dpmpp_2m_sde") + + >>> prompt = "a photo of an astronaut riding a horse on mars" + >>> image = pipe(prompt).images[0] + ``` +""" + + +# Copied from diffusers.pipelines.stable_diffusion_k_diffusion.pipeline_stable_diffusion_k_diffusion.ModelWrapper +class ModelWrapper: + def __init__(self, model, alphas_cumprod): + self.model = model + self.alphas_cumprod = alphas_cumprod + + def apply_model(self, *args, **kwargs): + if len(args) == 3: + encoder_hidden_states = args[-1] + args = args[:2] + if kwargs.get("cond", None) is not None: + encoder_hidden_states = kwargs.pop("cond") + return self.model(*args, encoder_hidden_states=encoder_hidden_states, **kwargs).sample + + +class StableDiffusionXLKDiffusionPipeline( + DiffusionPipeline, + StableDiffusionMixin, + FromSingleFileMixin, + StableDiffusionXLLoraLoaderMixin, + TextualInversionLoaderMixin, + IPAdapterMixin, +): + r""" + Pipeline for text-to-image generation using Stable Diffusion XL and k-diffusion. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files + - [`~loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`] for saving LoRA weights + - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModel`]): + Frozen text-encoder. Stable Diffusion XL uses the text portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically + the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. + text_encoder_2 ([` CLIPTextModelWithProjection`]): + Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection), + specifically the + [laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k) + variant. + tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + tokenizer_2 (`CLIPTokenizer`): + Second Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`): + Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of + `stabilityai/stable-diffusion-xl-base-1-0`. + """ + + model_cpu_offload_seq = "text_encoder->text_encoder_2->unet->vae" + _optional_components = [ + "tokenizer", + "tokenizer_2", + "text_encoder", + "text_encoder_2", + "feature_extractor", + ] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + text_encoder_2: CLIPTextModelWithProjection, + tokenizer: CLIPTokenizer, + tokenizer_2: CLIPTokenizer, + unet: UNet2DConditionModel, + scheduler: KarrasDiffusionSchedulers, + force_zeros_for_empty_prompt: bool = True, + ): + super().__init__() + + # get correct sigmas from LMS + scheduler = LMSDiscreteScheduler.from_config(scheduler.config) + self.register_modules( + vae=vae, + text_encoder=text_encoder, + text_encoder_2=text_encoder_2, + tokenizer=tokenizer, + tokenizer_2=tokenizer_2, + unet=unet, + scheduler=scheduler, + ) + self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + + self.default_sample_size = self.unet.config.sample_size + + model = ModelWrapper(unet, scheduler.alphas_cumprod) + if scheduler.config.prediction_type == "v_prediction": + self.k_diffusion_model = CompVisVDenoiser(model) + else: + self.k_diffusion_model = CompVisDenoiser(model) + + # Copied from diffusers.pipelines.stable_diffusion_k_diffusion.pipeline_stable_diffusion_k_diffusion.StableDiffusionKDiffusionPipeline.set_scheduler + def set_scheduler(self, scheduler_type: str): + library = importlib.import_module("k_diffusion") + sampling = getattr(library, "sampling") + try: + self.sampler = getattr(sampling, scheduler_type) + except Exception: + valid_samplers = [] + for s in dir(sampling): + if "sample_" in s: + valid_samplers.append(s) + + raise ValueError(f"Invalid scheduler type {scheduler_type}. Please choose one of {valid_samplers}.") + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.encode_prompt + def encode_prompt( + self, + prompt: str, + prompt_2: Optional[str] = None, + device: Optional[torch.device] = None, + num_images_per_prompt: int = 1, + do_classifier_free_guidance: bool = True, + negative_prompt: Optional[str] = None, + negative_prompt_2: Optional[str] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + pooled_prompt_embeds: Optional[torch.Tensor] = None, + negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in both text-encoders + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + lora_scale (`float`, *optional*): + A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + device = device or self._execution_device + + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if self.text_encoder is not None: + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale) + else: + scale_lora_layers(self.text_encoder_2, lora_scale) + + prompt = [prompt] if isinstance(prompt, str) else prompt + + if prompt is not None: + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + # Define tokenizers and text encoders + tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] + text_encoders = ( + [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] + ) + + if prompt_embeds is None: + prompt_2 = prompt_2 or prompt + prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 + + # textual inversion: process multi-vector tokens if necessary + prompt_embeds_list = [] + prompts = [prompt, prompt_2] + for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders): + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, tokenizer) + + text_inputs = tokenizer( + prompt, + padding="max_length", + max_length=tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + + text_input_ids = text_inputs.input_ids + untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {tokenizer.model_max_length} tokens: {removed_text}" + ) + + prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) + + # We are only ALWAYS interested in the pooled output of the final text encoder + pooled_prompt_embeds = prompt_embeds[0] + if clip_skip is None: + prompt_embeds = prompt_embeds.hidden_states[-2] + else: + # "2" because SDXL always indexes from the penultimate layer. + prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)] + + prompt_embeds_list.append(prompt_embeds) + + prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) + + # get unconditional embeddings for classifier free guidance + zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt + if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: + negative_prompt_embeds = torch.zeros_like(prompt_embeds) + negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) + elif do_classifier_free_guidance and negative_prompt_embeds is None: + negative_prompt = negative_prompt or "" + negative_prompt_2 = negative_prompt_2 or negative_prompt + + # normalize str to list + negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt + negative_prompt_2 = ( + batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 + ) + + uncond_tokens: List[str] + if prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = [negative_prompt, negative_prompt_2] + + negative_prompt_embeds_list = [] + for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders): + if isinstance(self, TextualInversionLoaderMixin): + negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = tokenizer( + negative_prompt, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + negative_prompt_embeds = text_encoder( + uncond_input.input_ids.to(device), + output_hidden_states=True, + ) + # We are only ALWAYS interested in the pooled output of the final text encoder + negative_pooled_prompt_embeds = negative_prompt_embeds[0] + negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] + + negative_prompt_embeds_list.append(negative_prompt_embeds) + + negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) + + if self.text_encoder_2 is not None: + prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) + else: + prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + if self.text_encoder_2 is not None: + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) + else: + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed * num_images_per_prompt, -1 + ) + if do_classifier_free_guidance: + negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed * num_images_per_prompt, -1 + ) + + if self.text_encoder is not None: + if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder_2, lora_scale) + + return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds + + def check_inputs( + self, + prompt, + prompt_2, + height, + width, + negative_prompt=None, + negative_prompt_2=None, + prompt_embeds=None, + negative_prompt_embeds=None, + pooled_prompt_embeds=None, + negative_pooled_prompt_embeds=None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt_2 is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)): + raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + elif negative_prompt_2 is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if prompt_embeds is not None and pooled_prompt_embeds is None: + raise ValueError( + "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`." + ) + + if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: + raise ValueError( + "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`." + ) + + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = ( + batch_size, + num_channels_latents, + int(height) // self.vae_scale_factor, + int(width) // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + return latents + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline._get_add_time_ids + def _get_add_time_ids( + self, original_size, crops_coords_top_left, target_size, dtype, text_encoder_projection_dim=None + ): + add_time_ids = list(original_size + crops_coords_top_left + target_size) + + passed_add_embed_dim = ( + self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim + ) + expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features + + if expected_add_embed_dim != passed_add_embed_dim: + raise ValueError( + f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`." + ) + + add_time_ids = torch.tensor([add_time_ids], dtype=dtype) + return add_time_ids + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.upcast_vae + def upcast_vae(self): + dtype = self.vae.dtype + self.vae.to(dtype=torch.float32) + use_torch_2_0_or_xformers = isinstance( + self.vae.decoder.mid_block.attentions[0].processor, + ( + AttnProcessor2_0, + XFormersAttnProcessor, + FusedAttnProcessor2_0, + ), + ) + # if xformers or torch_2_0 is used attention block does not need + # to be in float32 which can save lots of memory + if use_torch_2_0_or_xformers: + self.vae.post_quant_conv.to(dtype) + self.vae.decoder.conv_in.to(dtype) + self.vae.decoder.mid_block.to(dtype) + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def clip_skip(self): + return self._clip_skip + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + prompt_2: Optional[Union[str, List[str]]] = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + guidance_scale: float = 5.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + negative_prompt_2: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + pooled_prompt_embeds: Optional[torch.Tensor] = None, + negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + original_size: Optional[Tuple[int, int]] = None, + crops_coords_top_left: Tuple[int, int] = (0, 0), + target_size: Optional[Tuple[int, int]] = None, + negative_original_size: Optional[Tuple[int, int]] = None, + negative_crops_coords_top_left: Tuple[int, int] = (0, 0), + negative_target_size: Optional[Tuple[int, int]] = None, + use_karras_sigmas: Optional[bool] = False, + noise_sampler_seed: Optional[int] = None, + clip_skip: Optional[int] = None, + ): + r""" + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in both text-encoders + height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The height in pixels of the generated image. This is set to 1024 by default for the best results. + Anything below 512 pixels won't work well for + [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) + and checkpoints that are not specifically fine-tuned on low resolutions. + width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The width in pixels of the generated image. This is set to 1024 by default for the best results. + Anything below 512 pixels won't work well for + [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) + and checkpoints that are not specifically fine-tuned on low resolutions. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 5.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] instead + of a plain tuple. + original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled. + `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as + explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position + `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting + `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + For most cases, `target_size` should be set to the desired height and width of the generated image. If + not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in + section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + To negatively condition the generation process based on a specific image resolution. Part of SDXL's + micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's + micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + To negatively condition the generation process based on a target image resolution. It should be as same + as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + + Examples: + + Returns: + [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] or `tuple`: + [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] if `return_dict` is True, otherwise a + `tuple`. When returning a tuple, the first element is a list with the generated images. + """ + + # 0. Default height and width to unet + height = height or self.default_sample_size * self.vae_scale_factor + width = width or self.default_sample_size * self.vae_scale_factor + + original_size = original_size or (height, width) + target_size = target_size or (height, width) + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + prompt_2, + height, + width, + negative_prompt, + negative_prompt_2, + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + ) + + if guidance_scale <= 1.0: + raise ValueError("has to use guidance_scale") + + self._guidance_scale = guidance_scale + self._clip_skip = clip_skip + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + # 3. Encode input prompt + lora_scale = None + + ( + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + ) = self.encode_prompt( + prompt=prompt, + prompt_2=prompt_2, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=self.do_classifier_free_guidance, + negative_prompt=negative_prompt, + negative_prompt_2=negative_prompt_2, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, + lora_scale=lora_scale, + clip_skip=self.clip_skip, + ) + + # 4. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=prompt_embeds.device) + + # 5. Prepare sigmas + if use_karras_sigmas: + sigma_min: float = self.k_diffusion_model.sigmas[0].item() + sigma_max: float = self.k_diffusion_model.sigmas[-1].item() + sigmas = get_sigmas_karras(n=num_inference_steps, sigma_min=sigma_min, sigma_max=sigma_max) + else: + sigmas = self.scheduler.sigmas + sigmas = sigmas.to(dtype=prompt_embeds.dtype, device=device) + + # 6. Prepare latent variables + num_channels_latents = self.unet.config.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + latents = latents * sigmas[0] + + self.k_diffusion_model.sigmas = self.k_diffusion_model.sigmas.to(latents.device) + self.k_diffusion_model.log_sigmas = self.k_diffusion_model.log_sigmas.to(latents.device) + + # 7. Prepare added time ids & embeddings + add_text_embeds = pooled_prompt_embeds + if self.text_encoder_2 is None: + text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) + else: + text_encoder_projection_dim = self.text_encoder_2.config.projection_dim + + add_time_ids = self._get_add_time_ids( + original_size, + crops_coords_top_left, + target_size, + dtype=prompt_embeds.dtype, + text_encoder_projection_dim=text_encoder_projection_dim, + ) + if negative_original_size is not None and negative_target_size is not None: + negative_add_time_ids = self._get_add_time_ids( + negative_original_size, + negative_crops_coords_top_left, + negative_target_size, + dtype=prompt_embeds.dtype, + text_encoder_projection_dim=text_encoder_projection_dim, + ) + else: + negative_add_time_ids = add_time_ids + + if self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) + add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) + add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0) + + prompt_embeds = prompt_embeds.to(device) + add_text_embeds = add_text_embeds.to(device) + add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1) + + added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} + + # 8. Optionally get Guidance Scale Embedding + timestep_cond = None + if self.unet.config.time_cond_proj_dim is not None: + guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) + timestep_cond = self.get_guidance_scale_embedding( + guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim + ).to(device=device, dtype=latents.dtype) + + # 9. Define model function + def model_fn(x, t): + latent_model_input = torch.cat([x] * 2) + t = torch.cat([t] * 2) + + noise_pred = self.k_diffusion_model( + latent_model_input, + t, + cond=prompt_embeds, + timestep_cond=timestep_cond, + added_cond_kwargs=added_cond_kwargs, + ) + + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + return noise_pred + + # 10. Run k-diffusion solver + sampler_kwargs = {} + + if "noise_sampler" in inspect.signature(self.sampler).parameters: + min_sigma, max_sigma = sigmas[sigmas > 0].min(), sigmas.max() + noise_sampler = BrownianTreeNoiseSampler(latents, min_sigma, max_sigma, noise_sampler_seed) + sampler_kwargs["noise_sampler"] = noise_sampler + + if "generator" in inspect.signature(self.sampler).parameters: + sampler_kwargs["generator"] = generator + + latents = self.sampler(model_fn, latents, sigmas, **sampler_kwargs) + + if not output_type == "latent": + # make sure the VAE is in float32 mode, as it overflows in float16 + needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast + + if needs_upcasting: + self.upcast_vae() + latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) + + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + + # cast back to fp16 if needed + if needs_upcasting: + self.vae.to(dtype=torch.float16) + else: + image = latents + + if not output_type == "latent": + image = self.image_processor.postprocess(image, output_type=output_type) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return StableDiffusionXLPipelineOutput(images=image) diff --git a/diffusers3/pipelines/stable_diffusion_ldm3d/__init__.py b/diffusers3/pipelines/stable_diffusion_ldm3d/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..dae2affddd1fd5952f454ed9cee906277dcceb16 --- /dev/null +++ b/diffusers3/pipelines/stable_diffusion_ldm3d/__init__.py @@ -0,0 +1,48 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["pipeline_stable_diffusion_ldm3d"] = ["StableDiffusionLDM3DPipeline"] + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * + else: + from .pipeline_stable_diffusion_ldm3d import StableDiffusionLDM3DPipeline + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/diffusers3/pipelines/stable_diffusion_ldm3d/pipeline_stable_diffusion_ldm3d.py b/diffusers3/pipelines/stable_diffusion_ldm3d/pipeline_stable_diffusion_ldm3d.py new file mode 100644 index 0000000000000000000000000000000000000000..251ec12d66ab5942105ef25baac98c6b6966be52 --- /dev/null +++ b/diffusers3/pipelines/stable_diffusion_ldm3d/pipeline_stable_diffusion_ldm3d.py @@ -0,0 +1,1013 @@ +# Copyright 2024 The Intel Labs Team Authors and the HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from dataclasses import dataclass +from typing import Any, Callable, Dict, List, Optional, Union + +import numpy as np +import PIL.Image +import torch +from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection + +from ...image_processor import PipelineImageInput, VaeImageProcessorLDM3D +from ...loaders import FromSingleFileMixin, IPAdapterMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin +from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import ( + USE_PEFT_BACKEND, + BaseOutput, + deprecate, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```python + >>> from diffusers import StableDiffusionLDM3DPipeline + + >>> pipe = StableDiffusionLDM3DPipeline.from_pretrained("Intel/ldm3d-4c") + >>> pipe = pipe.to("cuda") + + >>> prompt = "a photo of an astronaut riding a horse on mars" + >>> output = pipe(prompt) + >>> rgb_image, depth_image = output.rgb, output.depth + >>> rgb_image[0].save("astronaut_ldm3d_rgb.jpg") + >>> depth_image[0].save("astronaut_ldm3d_depth.png") + ``` +""" + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg +def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): + """ + Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4 + """ + std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) + std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) + # rescale the results from guidance (fixes overexposure) + noise_pred_rescaled = noise_cfg * (std_text / std_cfg) + # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images + noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg + return noise_cfg + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + sigmas: Optional[List[float]] = None, + **kwargs, +): + """ + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles + custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` + must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, + `num_inference_steps` and `sigmas` must be `None`. + sigmas (`List[float]`, *optional*): + Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, + `num_inference_steps` and `timesteps` must be `None`. + + Returns: + `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the + second element is the number of inference steps. + """ + if timesteps is not None and sigmas is not None: + raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") + if timesteps is not None: + accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + elif sigmas is not None: + accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accept_sigmas: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" sigmas schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + + +@dataclass +class LDM3DPipelineOutput(BaseOutput): + """ + Output class for Stable Diffusion pipelines. + + Args: + rgb (`List[PIL.Image.Image]` or `np.ndarray`) + List of denoised PIL images of length `batch_size` or NumPy array of shape `(batch_size, height, width, + num_channels)`. + depth (`List[PIL.Image.Image]` or `np.ndarray`) + List of denoised PIL images of length `batch_size` or NumPy array of shape `(batch_size, height, width, + num_channels)`. + nsfw_content_detected (`List[bool]`) + List indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content or + `None` if safety checking could not be performed. + """ + + rgb: Union[List[PIL.Image.Image], np.ndarray] + depth: Union[List[PIL.Image.Image], np.ndarray] + nsfw_content_detected: Optional[List[bool]] + + +class StableDiffusionLDM3DPipeline( + DiffusionPipeline, + StableDiffusionMixin, + TextualInversionLoaderMixin, + IPAdapterMixin, + StableDiffusionLoraLoaderMixin, + FromSingleFileMixin, +): + r""" + Pipeline for text-to-image and 3D generation using LDM3D. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for saving LoRA weights + - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files + - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details + about a model's potential harms. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. + """ + + model_cpu_offload_seq = "text_encoder->unet->vae" + _optional_components = ["safety_checker", "feature_extractor", "image_encoder"] + _exclude_from_cpu_offload = ["safety_checker"] + _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: UNet2DConditionModel, + scheduler: KarrasDiffusionSchedulers, + safety_checker: StableDiffusionSafetyChecker, + feature_extractor: CLIPImageProcessor, + image_encoder: Optional[CLIPVisionModelWithProjection], + requires_safety_checker: bool = True, + ): + super().__init__() + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + image_encoder=image_encoder, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessorLDM3D(vae_scale_factor=self.vae_scale_factor) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt + def _encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + **kwargs, + ): + deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." + deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) + + prompt_embeds_tuple = self.encode_prompt( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=lora_scale, + **kwargs, + ) + + # concatenate for backwards comp + prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) + + return prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if self.text_encoder is not None: + if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return prompt_embeds, negative_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image + def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + if output_hidden_states: + image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] + image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_enc_hidden_states = self.image_encoder( + torch.zeros_like(image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( + num_images_per_prompt, dim=0 + ) + return image_enc_hidden_states, uncond_image_enc_hidden_states + else: + image_embeds = self.image_encoder(image).image_embeds + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_embeds = torch.zeros_like(image_embeds) + + return image_embeds, uncond_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds + def prepare_ip_adapter_image_embeds( + self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance + ): + image_embeds = [] + if do_classifier_free_guidance: + negative_image_embeds = [] + if ip_adapter_image_embeds is None: + if not isinstance(ip_adapter_image, list): + ip_adapter_image = [ip_adapter_image] + + if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): + raise ValueError( + f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." + ) + + for single_ip_adapter_image, image_proj_layer in zip( + ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers + ): + output_hidden_state = not isinstance(image_proj_layer, ImageProjection) + single_image_embeds, single_negative_image_embeds = self.encode_image( + single_ip_adapter_image, device, 1, output_hidden_state + ) + + image_embeds.append(single_image_embeds[None, :]) + if do_classifier_free_guidance: + negative_image_embeds.append(single_negative_image_embeds[None, :]) + else: + for single_image_embeds in ip_adapter_image_embeds: + if do_classifier_free_guidance: + single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2) + negative_image_embeds.append(single_negative_image_embeds) + image_embeds.append(single_image_embeds) + + ip_adapter_image_embeds = [] + for i, single_image_embeds in enumerate(image_embeds): + single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) + if do_classifier_free_guidance: + single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) + + single_image_embeds = single_image_embeds.to(device=device) + ip_adapter_image_embeds.append(single_image_embeds) + + return ip_adapter_image_embeds + + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is None: + has_nsfw_concept = None + else: + if torch.is_tensor(image): + feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") + else: + feature_extractor_input = self.image_processor.numpy_to_pil(image) + rgb_feature_extractor_input = feature_extractor_input[0] + safety_checker_input = self.feature_extractor(rgb_feature_extractor_input, return_tensors="pt").to(device) + image, has_nsfw_concept = self.safety_checker( + images=image, clip_input=safety_checker_input.pixel_values.to(dtype) + ) + return image, has_nsfw_concept + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.check_inputs + def check_inputs( + self, + prompt, + height, + width, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + ip_adapter_image=None, + ip_adapter_image_embeds=None, + callback_on_step_end_tensor_inputs=None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if ip_adapter_image is not None and ip_adapter_image_embeds is not None: + raise ValueError( + "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined." + ) + + if ip_adapter_image_embeds is not None: + if not isinstance(ip_adapter_image_embeds, list): + raise ValueError( + f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}" + ) + elif ip_adapter_image_embeds[0].ndim not in [3, 4]: + raise ValueError( + f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D" + ) + + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = ( + batch_size, + num_channels_latents, + int(height) // self.vae_scale_factor, + int(width) // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding + def get_guidance_scale_embedding( + self, w: torch.Tensor, embedding_dim: int = 512, dtype: torch.dtype = torch.float32 + ) -> torch.Tensor: + """ + See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 + + Args: + w (`torch.Tensor`): + Generate embedding vectors with a specified guidance scale to subsequently enrich timestep embeddings. + embedding_dim (`int`, *optional*, defaults to 512): + Dimension of the embeddings to generate. + dtype (`torch.dtype`, *optional*, defaults to `torch.float32`): + Data type of the generated embeddings. + + Returns: + `torch.Tensor`: Embedding vectors with shape `(len(w), embedding_dim)`. + """ + assert len(w.shape) == 1 + w = w * 1000.0 + + half_dim = embedding_dim // 2 + emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) + emb = w.to(dtype)[:, None] * emb[None, :] + emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) + if embedding_dim % 2 == 1: # zero pad + emb = torch.nn.functional.pad(emb, (0, 1)) + assert emb.shape == (w.shape[0], embedding_dim) + return emb + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def guidance_rescale(self): + return self._guidance_rescale + + @property + def clip_skip(self): + return self._clip_skip + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None + + @property + def cross_attention_kwargs(self): + return self._cross_attention_kwargs + + @property + def num_timesteps(self): + return self._num_timesteps + + @property + def interrupt(self): + return self._interrupt + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 49, + timesteps: List[int] = None, + sigmas: List[float] = None, + guidance_scale: float = 5.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + guidance_rescale: float = 0.0, + clip_skip: Optional[int] = None, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + **kwargs, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument + in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is + passed will be used. Must be in descending order. + sigmas (`List[float]`, *optional*): + Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in + their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed + will be used. + guidance_scale (`float`, *optional*, defaults to 5.0): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + ip_adapter_image: (`PipelineImageInput`, *optional*): + Optional image input to work with IP Adapters. + ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*): + Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of + IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should + contain the negative image embedding if `do_classifier_free_guidance` is set to `True`. If not + provided, embeddings are computed from the `ip_adapter_image` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + Examples: + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images and the + second element is a list of `bool`s indicating whether the corresponding generated image contains + "not-safe-for-work" (nsfw) content. + """ + callback = kwargs.pop("callback", None) + callback_steps = kwargs.pop("callback_steps", None) + + if callback is not None: + deprecate( + "callback", + "1.0.0", + "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", + ) + if callback_steps is not None: + deprecate( + "callback_steps", + "1.0.0", + "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", + ) + + # 0. Default height and width to unet + height = height or self.unet.config.sample_size * self.vae_scale_factor + width = width or self.unet.config.sample_size * self.vae_scale_factor + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + height, + width, + callback_steps, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + ip_adapter_image, + ip_adapter_image_embeds, + callback_on_step_end_tensor_inputs, + ) + + self._guidance_scale = guidance_scale + self._guidance_rescale = guidance_rescale + self._clip_skip = clip_skip + self._cross_attention_kwargs = cross_attention_kwargs + self._interrupt = False + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + image_embeds = self.prepare_ip_adapter_image_embeds( + ip_adapter_image, + ip_adapter_image_embeds, + device, + batch_size * num_images_per_prompt, + self.do_classifier_free_guidance, + ) + + # 3. Encode input prompt + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + device, + num_images_per_prompt, + self.do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + clip_skip=clip_skip, + ) + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + if self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + # 4. Prepare timesteps + timesteps, num_inference_steps = retrieve_timesteps( + self.scheduler, num_inference_steps, device, timesteps, sigmas + ) + + # 5. Prepare latent variables + num_channels_latents = self.unet.config.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 6.1 Add image embeds for IP-Adapter + added_cond_kwargs = {"image_embeds": image_embeds} if ip_adapter_image is not None else None + + # 6.2 Optionally get Guidance Scale Embedding + timestep_cond = None + if self.unet.config.time_cond_proj_dim is not None: + guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) + timestep_cond = self.get_guidance_scale_embedding( + guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim + ).to(device=device, dtype=latents.dtype) + + # 7. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + self._num_timesteps = len(timesteps) + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + if self.interrupt: + continue + + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + timestep_cond=timestep_cond, + cross_attention_kwargs=cross_attention_kwargs, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + if self.do_classifier_free_guidance and self.guidance_rescale > 0.0: + # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf + noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) + else: + image = latents + has_nsfw_concept = None + + if has_nsfw_concept is None: + do_denormalize = [True] * image.shape[0] + else: + do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] + + rgb, depth = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return ((rgb, depth), has_nsfw_concept) + + return LDM3DPipelineOutput(rgb=rgb, depth=depth, nsfw_content_detected=has_nsfw_concept) diff --git a/diffusers3/pipelines/stable_diffusion_panorama/__init__.py b/diffusers3/pipelines/stable_diffusion_panorama/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f7572db7236cd6bcfd7dd032abcb29fd5f67cf1c --- /dev/null +++ b/diffusers3/pipelines/stable_diffusion_panorama/__init__.py @@ -0,0 +1,48 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["pipeline_stable_diffusion_panorama"] = ["StableDiffusionPanoramaPipeline"] + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * + else: + from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/diffusers3/pipelines/stable_diffusion_panorama/pipeline_stable_diffusion_panorama.py b/diffusers3/pipelines/stable_diffusion_panorama/pipeline_stable_diffusion_panorama.py new file mode 100644 index 0000000000000000000000000000000000000000..96fba06f92a2b35827542bba0ae9d7dad98b7be7 --- /dev/null +++ b/diffusers3/pipelines/stable_diffusion_panorama/pipeline_stable_diffusion_panorama.py @@ -0,0 +1,1168 @@ +# Copyright 2024 MultiDiffusion Authors and The HuggingFace Team. All rights reserved." +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +import inspect +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import torch +from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection + +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import IPAdapterMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin +from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import DDIMScheduler +from ...utils import ( + USE_PEFT_BACKEND, + deprecate, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from ..stable_diffusion import StableDiffusionPipelineOutput +from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import StableDiffusionPanoramaPipeline, DDIMScheduler + + >>> model_ckpt = "stabilityai/stable-diffusion-2-base" + >>> scheduler = DDIMScheduler.from_pretrained(model_ckpt, subfolder="scheduler") + >>> pipe = StableDiffusionPanoramaPipeline.from_pretrained( + ... model_ckpt, scheduler=scheduler, torch_dtype=torch.float16 + ... ) + + >>> pipe = pipe.to("cuda") + + >>> prompt = "a photo of the dolomites" + >>> image = pipe(prompt).images[0] + ``` +""" + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg +def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): + """ + Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4 + """ + std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) + std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) + # rescale the results from guidance (fixes overexposure) + noise_pred_rescaled = noise_cfg * (std_text / std_cfg) + # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images + noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg + return noise_cfg + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + sigmas: Optional[List[float]] = None, + **kwargs, +): + """ + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles + custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` + must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, + `num_inference_steps` and `sigmas` must be `None`. + sigmas (`List[float]`, *optional*): + Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, + `num_inference_steps` and `timesteps` must be `None`. + + Returns: + `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the + second element is the number of inference steps. + """ + if timesteps is not None and sigmas is not None: + raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") + if timesteps is not None: + accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + elif sigmas is not None: + accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accept_sigmas: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" sigmas schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + + +class StableDiffusionPanoramaPipeline( + DiffusionPipeline, + StableDiffusionMixin, + TextualInversionLoaderMixin, + StableDiffusionLoraLoaderMixin, + IPAdapterMixin, +): + r""" + Pipeline for text-to-image generation using MultiDiffusion. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for saving LoRA weights + - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details + about a model's potential harms. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. + """ + + model_cpu_offload_seq = "text_encoder->unet->vae" + _optional_components = ["safety_checker", "feature_extractor", "image_encoder"] + _exclude_from_cpu_offload = ["safety_checker"] + _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: UNet2DConditionModel, + scheduler: DDIMScheduler, + safety_checker: StableDiffusionSafetyChecker, + feature_extractor: CLIPImageProcessor, + image_encoder: Optional[CLIPVisionModelWithProjection] = None, + requires_safety_checker: bool = True, + ): + super().__init__() + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + image_encoder=image_encoder, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt + def _encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + **kwargs, + ): + deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." + deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) + + prompt_embeds_tuple = self.encode_prompt( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=lora_scale, + **kwargs, + ) + + # concatenate for backwards comp + prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) + + return prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if self.text_encoder is not None: + if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return prompt_embeds, negative_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image + def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + if output_hidden_states: + image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] + image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_enc_hidden_states = self.image_encoder( + torch.zeros_like(image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( + num_images_per_prompt, dim=0 + ) + return image_enc_hidden_states, uncond_image_enc_hidden_states + else: + image_embeds = self.image_encoder(image).image_embeds + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_embeds = torch.zeros_like(image_embeds) + + return image_embeds, uncond_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds + def prepare_ip_adapter_image_embeds( + self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance + ): + image_embeds = [] + if do_classifier_free_guidance: + negative_image_embeds = [] + if ip_adapter_image_embeds is None: + if not isinstance(ip_adapter_image, list): + ip_adapter_image = [ip_adapter_image] + + if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): + raise ValueError( + f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." + ) + + for single_ip_adapter_image, image_proj_layer in zip( + ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers + ): + output_hidden_state = not isinstance(image_proj_layer, ImageProjection) + single_image_embeds, single_negative_image_embeds = self.encode_image( + single_ip_adapter_image, device, 1, output_hidden_state + ) + + image_embeds.append(single_image_embeds[None, :]) + if do_classifier_free_guidance: + negative_image_embeds.append(single_negative_image_embeds[None, :]) + else: + for single_image_embeds in ip_adapter_image_embeds: + if do_classifier_free_guidance: + single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2) + negative_image_embeds.append(single_negative_image_embeds) + image_embeds.append(single_image_embeds) + + ip_adapter_image_embeds = [] + for i, single_image_embeds in enumerate(image_embeds): + single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) + if do_classifier_free_guidance: + single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) + + single_image_embeds = single_image_embeds.to(device=device) + ip_adapter_image_embeds.append(single_image_embeds) + + return ip_adapter_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is None: + has_nsfw_concept = None + else: + if torch.is_tensor(image): + feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") + else: + feature_extractor_input = self.image_processor.numpy_to_pil(image) + safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) + image, has_nsfw_concept = self.safety_checker( + images=image, clip_input=safety_checker_input.pixel_values.to(dtype) + ) + return image, has_nsfw_concept + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents + def decode_latents(self, latents): + deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" + deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) + + latents = 1 / self.vae.config.scaling_factor * latents + image = self.vae.decode(latents, return_dict=False)[0] + image = (image / 2 + 0.5).clamp(0, 1) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + return image + + def decode_latents_with_padding(self, latents: torch.Tensor, padding: int = 8) -> torch.Tensor: + """ + Decode the given latents with padding for circular inference. + + Args: + latents (torch.Tensor): The input latents to decode. + padding (int, optional): The number of latents to add on each side for padding. Defaults to 8. + + Returns: + torch.Tensor: The decoded image with padding removed. + + Notes: + - The padding is added to remove boundary artifacts and improve the output quality. + - This would slightly increase the memory usage. + - The padding pixels are then removed from the decoded image. + + """ + latents = 1 / self.vae.config.scaling_factor * latents + latents_left = latents[..., :padding] + latents_right = latents[..., -padding:] + latents = torch.cat((latents_right, latents, latents_left), axis=-1) + image = self.vae.decode(latents, return_dict=False)[0] + padding_pix = self.vae_scale_factor * padding + image = image[..., padding_pix:-padding_pix] + return image + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.check_inputs + def check_inputs( + self, + prompt, + height, + width, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + ip_adapter_image=None, + ip_adapter_image_embeds=None, + callback_on_step_end_tensor_inputs=None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if ip_adapter_image is not None and ip_adapter_image_embeds is not None: + raise ValueError( + "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined." + ) + + if ip_adapter_image_embeds is not None: + if not isinstance(ip_adapter_image_embeds, list): + raise ValueError( + f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}" + ) + elif ip_adapter_image_embeds[0].ndim not in [3, 4]: + raise ValueError( + f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D" + ) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = ( + batch_size, + num_channels_latents, + int(height) // self.vae_scale_factor, + int(width) // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding + def get_guidance_scale_embedding( + self, w: torch.Tensor, embedding_dim: int = 512, dtype: torch.dtype = torch.float32 + ) -> torch.Tensor: + """ + See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 + + Args: + w (`torch.Tensor`): + Generate embedding vectors with a specified guidance scale to subsequently enrich timestep embeddings. + embedding_dim (`int`, *optional*, defaults to 512): + Dimension of the embeddings to generate. + dtype (`torch.dtype`, *optional*, defaults to `torch.float32`): + Data type of the generated embeddings. + + Returns: + `torch.Tensor`: Embedding vectors with shape `(len(w), embedding_dim)`. + """ + assert len(w.shape) == 1 + w = w * 1000.0 + + half_dim = embedding_dim // 2 + emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) + emb = w.to(dtype)[:, None] * emb[None, :] + emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) + if embedding_dim % 2 == 1: # zero pad + emb = torch.nn.functional.pad(emb, (0, 1)) + assert emb.shape == (w.shape[0], embedding_dim) + return emb + + def get_views( + self, + panorama_height: int, + panorama_width: int, + window_size: int = 64, + stride: int = 8, + circular_padding: bool = False, + ) -> List[Tuple[int, int, int, int]]: + """ + Generates a list of views based on the given parameters. Here, we define the mappings F_i (see Eq. 7 in the + MultiDiffusion paper https://arxiv.org/abs/2302.08113). If panorama's height/width < window_size, num_blocks of + height/width should return 1. + + Args: + panorama_height (int): The height of the panorama. + panorama_width (int): The width of the panorama. + window_size (int, optional): The size of the window. Defaults to 64. + stride (int, optional): The stride value. Defaults to 8. + circular_padding (bool, optional): Whether to apply circular padding. Defaults to False. + + Returns: + List[Tuple[int, int, int, int]]: A list of tuples representing the views. Each tuple contains four integers + representing the start and end coordinates of the window in the panorama. + + """ + panorama_height /= 8 + panorama_width /= 8 + num_blocks_height = (panorama_height - window_size) // stride + 1 if panorama_height > window_size else 1 + if circular_padding: + num_blocks_width = panorama_width // stride if panorama_width > window_size else 1 + else: + num_blocks_width = (panorama_width - window_size) // stride + 1 if panorama_width > window_size else 1 + total_num_blocks = int(num_blocks_height * num_blocks_width) + views = [] + for i in range(total_num_blocks): + h_start = int((i // num_blocks_width) * stride) + h_end = h_start + window_size + w_start = int((i % num_blocks_width) * stride) + w_end = w_start + window_size + views.append((h_start, h_end, w_start, w_end)) + return views + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def guidance_rescale(self): + return self._guidance_rescale + + @property + def cross_attention_kwargs(self): + return self._cross_attention_kwargs + + @property + def clip_skip(self): + return self._clip_skip + + @property + def do_classifier_free_guidance(self): + return False + + @property + def num_timesteps(self): + return self._num_timesteps + + @property + def interrupt(self): + return self._interrupt + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + height: Optional[int] = 512, + width: Optional[int] = 2048, + num_inference_steps: int = 50, + timesteps: List[int] = None, + guidance_scale: float = 7.5, + view_batch_size: int = 1, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + guidance_rescale: float = 0.0, + circular_padding: bool = False, + clip_skip: Optional[int] = None, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + **kwargs: Any, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + height (`int`, *optional*, defaults to 512): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to 2048): + The width in pixels of the generated image. The width is kept high because the pipeline is supposed + generate panorama-like images. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + timesteps (`List[int]`, *optional*): + The timesteps at which to generate the images. If not specified, then the default timestep spacing + strategy of the scheduler is used. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + view_batch_size (`int`, *optional*, defaults to 1): + The batch size to denoise split views. For some GPUs with high performance, higher view batch size can + speedup the generation and increase the VRAM usage. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + ip_adapter_image: (`PipelineImageInput`, *optional*): + Optional image input to work with IP Adapters. + ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*): + Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of + IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should + contain the negative image embedding if `do_classifier_free_guidance` is set to `True`. If not + provided, embeddings are computed from the `ip_adapter_image` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + guidance_rescale (`float`, *optional*, defaults to 0.0): + A rescaling factor for the guidance embeddings. A value of 0.0 means no rescaling is applied. + circular_padding (`bool`, *optional*, defaults to `False`): + If set to `True`, circular padding is applied to ensure there are no stitching artifacts. Circular + padding allows the model to seamlessly generate a transition from the rightmost part of the image to + the leftmost part, maintaining consistency in a 360-degree sense. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List[str]`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + Examples: + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images and the + second element is a list of `bool`s indicating whether the corresponding generated image contains + "not-safe-for-work" (nsfw) content. + """ + callback = kwargs.pop("callback", None) + callback_steps = kwargs.pop("callback_steps", None) + + if callback is not None: + deprecate( + "callback", + "1.0.0", + "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", + ) + if callback_steps is not None: + deprecate( + "callback_steps", + "1.0.0", + "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", + ) + + # 0. Default height and width to unet + height = height or self.unet.config.sample_size * self.vae_scale_factor + width = width or self.unet.config.sample_size * self.vae_scale_factor + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + height, + width, + callback_steps, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + ip_adapter_image, + ip_adapter_image_embeds, + callback_on_step_end_tensor_inputs, + ) + + self._guidance_scale = guidance_scale + self._guidance_rescale = guidance_rescale + self._clip_skip = clip_skip + self._cross_attention_kwargs = cross_attention_kwargs + self._interrupt = False + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + image_embeds = self.prepare_ip_adapter_image_embeds( + ip_adapter_image, + ip_adapter_image_embeds, + device, + batch_size * num_images_per_prompt, + self.do_classifier_free_guidance, + ) + + # 3. Encode input prompt + text_encoder_lora_scale = ( + cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None + ) + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=text_encoder_lora_scale, + clip_skip=clip_skip, + ) + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + if do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + # 4. Prepare timesteps + timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps) + + # 5. Prepare latent variables + num_channels_latents = self.unet.config.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 6. Define panorama grid and initialize views for synthesis. + # prepare batch grid + views = self.get_views(height, width, circular_padding=circular_padding) + views_batch = [views[i : i + view_batch_size] for i in range(0, len(views), view_batch_size)] + views_scheduler_status = [copy.deepcopy(self.scheduler.__dict__)] * len(views_batch) + count = torch.zeros_like(latents) + value = torch.zeros_like(latents) + + # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7.1 Add image embeds for IP-Adapter + added_cond_kwargs = ( + {"image_embeds": image_embeds} + if ip_adapter_image is not None or ip_adapter_image_embeds is not None + else None + ) + + # 7.2 Optionally get Guidance Scale Embedding + timestep_cond = None + if self.unet.config.time_cond_proj_dim is not None: + guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) + timestep_cond = self.get_guidance_scale_embedding( + guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim + ).to(device=device, dtype=latents.dtype) + + # 8. Denoising loop + # Each denoising step also includes refinement of the latents with respect to the + # views. + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + self._num_timesteps = len(timesteps) + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + if self.interrupt: + continue + count.zero_() + value.zero_() + + # generate views + # Here, we iterate through different spatial crops of the latents and denoise them. These + # denoised (latent) crops are then averaged to produce the final latent + # for the current timestep via MultiDiffusion. Please see Sec. 4.1 in the + # MultiDiffusion paper for more details: https://arxiv.org/abs/2302.08113 + # Batch views denoise + for j, batch_view in enumerate(views_batch): + vb_size = len(batch_view) + # get the latents corresponding to the current view coordinates + if circular_padding: + latents_for_view = [] + for h_start, h_end, w_start, w_end in batch_view: + if w_end > latents.shape[3]: + # Add circular horizontal padding + latent_view = torch.cat( + ( + latents[:, :, h_start:h_end, w_start:], + latents[:, :, h_start:h_end, : w_end - latents.shape[3]], + ), + axis=-1, + ) + else: + latent_view = latents[:, :, h_start:h_end, w_start:w_end] + latents_for_view.append(latent_view) + latents_for_view = torch.cat(latents_for_view) + else: + latents_for_view = torch.cat( + [ + latents[:, :, h_start:h_end, w_start:w_end] + for h_start, h_end, w_start, w_end in batch_view + ] + ) + + # rematch block's scheduler status + self.scheduler.__dict__.update(views_scheduler_status[j]) + + # expand the latents if we are doing classifier free guidance + latent_model_input = ( + latents_for_view.repeat_interleave(2, dim=0) + if do_classifier_free_guidance + else latents_for_view + ) + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # repeat prompt_embeds for batch + prompt_embeds_input = torch.cat([prompt_embeds] * vb_size) + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds_input, + timestep_cond=timestep_cond, + cross_attention_kwargs=cross_attention_kwargs, + added_cond_kwargs=added_cond_kwargs, + ).sample + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred[::2], noise_pred[1::2] + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + if self.do_classifier_free_guidance and self.guidance_rescale > 0.0: + # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf + noise_pred = rescale_noise_cfg( + noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale + ) + + # compute the previous noisy sample x_t -> x_t-1 + latents_denoised_batch = self.scheduler.step( + noise_pred, t, latents_for_view, **extra_step_kwargs + ).prev_sample + + # save views scheduler status after sample + views_scheduler_status[j] = copy.deepcopy(self.scheduler.__dict__) + + # extract value from batch + for latents_view_denoised, (h_start, h_end, w_start, w_end) in zip( + latents_denoised_batch.chunk(vb_size), batch_view + ): + if circular_padding and w_end > latents.shape[3]: + # Case for circular padding + value[:, :, h_start:h_end, w_start:] += latents_view_denoised[ + :, :, h_start:h_end, : latents.shape[3] - w_start + ] + value[:, :, h_start:h_end, : w_end - latents.shape[3]] += latents_view_denoised[ + :, :, h_start:h_end, latents.shape[3] - w_start : + ] + count[:, :, h_start:h_end, w_start:] += 1 + count[:, :, h_start:h_end, : w_end - latents.shape[3]] += 1 + else: + value[:, :, h_start:h_end, w_start:w_end] += latents_view_denoised + count[:, :, h_start:h_end, w_start:w_end] += 1 + + # take the MultiDiffusion step. Eq. 5 in MultiDiffusion paper: https://arxiv.org/abs/2302.08113 + latents = torch.where(count > 0, value / count, value) + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + if output_type != "latent": + if circular_padding: + image = self.decode_latents_with_padding(latents) + else: + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) + else: + image = latents + has_nsfw_concept = None + + if has_nsfw_concept is None: + do_denormalize = [True] * image.shape[0] + else: + do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] + + image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) + + self.maybe_free_model_hooks() + + if not return_dict: + return (image, has_nsfw_concept) + + return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) diff --git a/diffusers3/pipelines/stable_diffusion_safe/__init__.py b/diffusers3/pipelines/stable_diffusion_safe/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b432b9418c46257913d81c5bf56edc0f1fa74ed1 --- /dev/null +++ b/diffusers3/pipelines/stable_diffusion_safe/__init__.py @@ -0,0 +1,99 @@ +from dataclasses import dataclass +from enum import Enum +from typing import TYPE_CHECKING, List, Optional, Union + +import numpy as np +import PIL +from PIL import Image + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + BaseOutput, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_torch_available, + is_transformers_available, +) + + +@dataclass +class SafetyConfig(object): + WEAK = { + "sld_warmup_steps": 15, + "sld_guidance_scale": 20, + "sld_threshold": 0.0, + "sld_momentum_scale": 0.0, + "sld_mom_beta": 0.0, + } + MEDIUM = { + "sld_warmup_steps": 10, + "sld_guidance_scale": 1000, + "sld_threshold": 0.01, + "sld_momentum_scale": 0.3, + "sld_mom_beta": 0.4, + } + STRONG = { + "sld_warmup_steps": 7, + "sld_guidance_scale": 2000, + "sld_threshold": 0.025, + "sld_momentum_scale": 0.5, + "sld_mom_beta": 0.7, + } + MAX = { + "sld_warmup_steps": 0, + "sld_guidance_scale": 5000, + "sld_threshold": 1.0, + "sld_momentum_scale": 0.5, + "sld_mom_beta": 0.7, + } + + +_dummy_objects = {} +_additional_imports = {} +_import_structure = {} + +_additional_imports.update({"SafetyConfig": SafetyConfig}) + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure.update( + { + "pipeline_output": ["StableDiffusionSafePipelineOutput"], + "pipeline_stable_diffusion_safe": ["StableDiffusionPipelineSafe"], + "safety_checker": ["StableDiffusionSafetyChecker"], + } + ) + + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * + else: + from .pipeline_output import StableDiffusionSafePipelineOutput + from .pipeline_stable_diffusion_safe import StableDiffusionPipelineSafe + from .safety_checker import SafeStableDiffusionSafetyChecker + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) + for name, value in _additional_imports.items(): + setattr(sys.modules[__name__], name, value) diff --git a/diffusers3/pipelines/stable_diffusion_safe/pipeline_output.py b/diffusers3/pipelines/stable_diffusion_safe/pipeline_output.py new file mode 100644 index 0000000000000000000000000000000000000000..69a064d6638df556d3007f59daf7e767ec7c298b --- /dev/null +++ b/diffusers3/pipelines/stable_diffusion_safe/pipeline_output.py @@ -0,0 +1,34 @@ +from dataclasses import dataclass +from typing import List, Optional, Union + +import numpy as np +import PIL.Image + +from ...utils import ( + BaseOutput, +) + + +@dataclass +class StableDiffusionSafePipelineOutput(BaseOutput): + """ + Output class for Safe Stable Diffusion pipelines. + + Args: + images (`List[PIL.Image.Image]` or `np.ndarray`) + List of denoised PIL images of length `batch_size` or numpy array of shape `(batch_size, height, width, + num_channels)`. PIL images or numpy array present the denoised images of the diffusion pipeline. + nsfw_content_detected (`List[bool]`) + List of flags denoting whether the corresponding generated image likely represents "not-safe-for-work" + (nsfw) content, or `None` if safety checking could not be performed. + images (`List[PIL.Image.Image]` or `np.ndarray`) + List of denoised PIL images that were flagged by the safety checker any may contain "not-safe-for-work" + (nsfw) content, or `None` if no safety check was performed or no images were flagged. + applied_safety_concept (`str`) + The safety concept that was applied for safety guidance, or `None` if safety guidance was disabled + """ + + images: Union[List[PIL.Image.Image], np.ndarray] + nsfw_content_detected: Optional[List[bool]] + unsafe_images: Optional[Union[List[PIL.Image.Image], np.ndarray]] + applied_safety_concept: Optional[str] diff --git a/diffusers3/pipelines/stable_diffusion_safe/pipeline_stable_diffusion_safe.py b/diffusers3/pipelines/stable_diffusion_safe/pipeline_stable_diffusion_safe.py new file mode 100644 index 0000000000000000000000000000000000000000..cd59cf51869d3f7f20c685fd6c0cb3fcc64bc4c5 --- /dev/null +++ b/diffusers3/pipelines/stable_diffusion_safe/pipeline_stable_diffusion_safe.py @@ -0,0 +1,769 @@ +import inspect +import warnings +from typing import Callable, List, Optional, Union + +import numpy as np +import torch +from packaging import version +from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection + +from ...configuration_utils import FrozenDict +from ...image_processor import PipelineImageInput +from ...loaders import IPAdapterMixin +from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import deprecate, logging +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from . import StableDiffusionSafePipelineOutput +from .safety_checker import SafeStableDiffusionSafetyChecker + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +class StableDiffusionPipelineSafe(DiffusionPipeline, StableDiffusionMixin, IPAdapterMixin): + r""" + Pipeline based on the [`StableDiffusionPipeline`] for text-to-image generation using Safe Latent Diffusion. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading methods: + - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details + about a model's potential harms. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. + """ + + model_cpu_offload_seq = "text_encoder->unet->vae" + _optional_components = ["safety_checker", "feature_extractor", "image_encoder"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: UNet2DConditionModel, + scheduler: KarrasDiffusionSchedulers, + safety_checker: SafeStableDiffusionSafetyChecker, + feature_extractor: CLIPImageProcessor, + image_encoder: Optional[CLIPVisionModelWithProjection] = None, + requires_safety_checker: bool = True, + ): + super().__init__() + safety_concept: Optional[str] = ( + "an image showing hate, harassment, violence, suffering, humiliation, harm, suicide, sexual, nudity," + " bodily fluids, blood, obscene gestures, illegal activity, drug use, theft, vandalism, weapons, child" + " abuse, brutality, cruelty" + ) + + if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: + deprecation_message = ( + f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" + f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " + "to update the config accordingly as leaving `steps_offset` might led to incorrect results" + " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," + " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" + " file" + ) + deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(scheduler.config) + new_config["steps_offset"] = 1 + scheduler._internal_dict = FrozenDict(new_config) + + if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True: + deprecation_message = ( + f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." + " `clip_sample` should be set to False in the configuration file. Please make sure to update the" + " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" + " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" + " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" + ) + deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(scheduler.config) + new_config["clip_sample"] = False + scheduler._internal_dict = FrozenDict(new_config) + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( + version.parse(unet.config._diffusers_version).base_version + ) < version.parse("0.9.0.dev0") + is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 + if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: + deprecation_message = ( + "The configuration file of the unet has set the default `sample_size` to smaller than" + " 64 which seems highly unlikely .If you're checkpoint is a fine-tuned version of any of the" + " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" + " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" + " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" + " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" + " in the config might lead to incorrect results in future versions. If you have downloaded this" + " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" + " the `unet/config.json` file" + ) + deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) + new_config = dict(unet.config) + new_config["sample_size"] = 64 + unet._internal_dict = FrozenDict(new_config) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + image_encoder=image_encoder, + ) + self._safety_text_concept = safety_concept + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + @property + def safety_concept(self): + r""" + Getter method for the safety concept used with SLD + + Returns: + `str`: The text describing the safety concept + """ + return self._safety_text_concept + + @safety_concept.setter + def safety_concept(self, concept): + r""" + Setter method for the safety concept used with SLD + + Args: + concept (`str`): + The text of the new safety concept + """ + self._safety_text_concept = concept + + def _encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt, + enable_safety_guidance, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`): + The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored + if `guidance_scale` is less than `1`). + """ + batch_size = len(prompt) if isinstance(prompt, list) else 1 + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="max_length", return_tensors="pt").input_ids + + if not torch.equal(text_input_ids, untruncated_ids): + removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + prompt_embeds = self.text_encoder( + text_input_ids.to(device), + attention_mask=attention_mask, + ) + prompt_embeds = prompt_embeds[0] + + # duplicate text embeddings for each generation per prompt, using mps friendly method + bs_embed, seq_len, _ = prompt_embeds.shape + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + max_length = text_input_ids.shape[-1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + # Encode the safety concept text + if enable_safety_guidance: + safety_concept_input = self.tokenizer( + [self._safety_text_concept], + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + safety_embeddings = self.text_encoder(safety_concept_input.input_ids.to(self.device))[0] + + # duplicate safety embeddings for each generation per prompt, using mps friendly method + seq_len = safety_embeddings.shape[1] + safety_embeddings = safety_embeddings.repeat(batch_size, num_images_per_prompt, 1) + safety_embeddings = safety_embeddings.view(batch_size * num_images_per_prompt, seq_len, -1) + + # For classifier free guidance + sld, we need to do three forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing three forward passes + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds, safety_embeddings]) + + else: + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + return prompt_embeds + + def run_safety_checker(self, image, device, dtype, enable_safety_guidance): + if self.safety_checker is not None: + images = image.copy() + safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device) + image, has_nsfw_concept = self.safety_checker( + images=image, clip_input=safety_checker_input.pixel_values.to(dtype) + ) + flagged_images = np.zeros((2, *image.shape[1:])) + if any(has_nsfw_concept): + logger.warning( + "Potential NSFW content was detected in one or more images. A black image will be returned" + " instead." + f"{'You may look at this images in the `unsafe_images` variable of the output at your own discretion.' if enable_safety_guidance else 'Try again with a different prompt and/or seed.'}" + ) + for idx, has_nsfw_concept in enumerate(has_nsfw_concept): + if has_nsfw_concept: + flagged_images[idx] = images[idx] + image[idx] = np.zeros(image[idx].shape) # black image + else: + has_nsfw_concept = None + flagged_images = None + return image, has_nsfw_concept, flagged_images + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents + def decode_latents(self, latents): + deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" + deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) + + latents = 1 / self.vae.config.scaling_factor * latents + image = self.vae.decode(latents, return_dict=False)[0] + image = (image / 2 + 0.5).clamp(0, 1) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + return image + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + # Copied from diffusers.pipelines.stable_diffusion_k_diffusion.pipeline_stable_diffusion_k_diffusion.StableDiffusionKDiffusionPipeline.check_inputs + def check_inputs( + self, + prompt, + height, + width, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + callback_on_step_end_tensor_inputs=None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = ( + batch_size, + num_channels_latents, + int(height) // self.vae_scale_factor, + int(width) // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + def perform_safety_guidance( + self, + enable_safety_guidance, + safety_momentum, + noise_guidance, + noise_pred_out, + i, + sld_guidance_scale, + sld_warmup_steps, + sld_threshold, + sld_momentum_scale, + sld_mom_beta, + ): + # Perform SLD guidance + if enable_safety_guidance: + if safety_momentum is None: + safety_momentum = torch.zeros_like(noise_guidance) + noise_pred_text, noise_pred_uncond = noise_pred_out[0], noise_pred_out[1] + noise_pred_safety_concept = noise_pred_out[2] + + # Equation 6 + scale = torch.clamp(torch.abs((noise_pred_text - noise_pred_safety_concept)) * sld_guidance_scale, max=1.0) + + # Equation 6 + safety_concept_scale = torch.where( + (noise_pred_text - noise_pred_safety_concept) >= sld_threshold, torch.zeros_like(scale), scale + ) + + # Equation 4 + noise_guidance_safety = torch.mul((noise_pred_safety_concept - noise_pred_uncond), safety_concept_scale) + + # Equation 7 + noise_guidance_safety = noise_guidance_safety + sld_momentum_scale * safety_momentum + + # Equation 8 + safety_momentum = sld_mom_beta * safety_momentum + (1 - sld_mom_beta) * noise_guidance_safety + + if i >= sld_warmup_steps: # Warmup + # Equation 3 + noise_guidance = noise_guidance - noise_guidance_safety + return noise_guidance, safety_momentum + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image + def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + if output_hidden_states: + image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] + image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_enc_hidden_states = self.image_encoder( + torch.zeros_like(image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( + num_images_per_prompt, dim=0 + ) + return image_enc_hidden_states, uncond_image_enc_hidden_states + else: + image_embeds = self.image_encoder(image).image_embeds + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_embeds = torch.zeros_like(image_embeds) + + return image_embeds, uncond_image_embeds + + @torch.no_grad() + def __call__( + self, + prompt: Union[str, List[str]], + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + guidance_scale: float = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, + callback_steps: int = 1, + sld_guidance_scale: Optional[float] = 1000, + sld_warmup_steps: Optional[int] = 10, + sld_threshold: Optional[float] = 0.01, + sld_momentum_scale: Optional[float] = 0.3, + sld_mom_beta: Optional[float] = 0.4, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + ip_adapter_image: (`PipelineImageInput`, *optional*): + Optional image input to work with IP Adapters. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + sld_guidance_scale (`float`, *optional*, defaults to 1000): + If `sld_guidance_scale < 1`, safety guidance is disabled. + sld_warmup_steps (`int`, *optional*, defaults to 10): + Number of warmup steps for safety guidance. SLD is only be applied for diffusion steps greater than + `sld_warmup_steps`. + sld_threshold (`float`, *optional*, defaults to 0.01): + Threshold that separates the hyperplane between appropriate and inappropriate images. + sld_momentum_scale (`float`, *optional*, defaults to 0.3): + Scale of the SLD momentum to be added to the safety guidance at each diffusion step. If set to 0.0, + momentum is disabled. Momentum is built up during warmup for diffusion steps smaller than + `sld_warmup_steps`. + sld_mom_beta (`float`, *optional*, defaults to 0.4): + Defines how safety guidance momentum builds up. `sld_mom_beta` indicates how much of the previous + momentum is kept. Momentum is built up during warmup for diffusion steps smaller than + `sld_warmup_steps`. + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images and the + second element is a list of `bool`s indicating whether the corresponding generated image contains + "not-safe-for-work" (nsfw) content. + + Examples: + + ```py + import torch + from diffusers import StableDiffusionPipelineSafe + from diffusers.pipelines.stable_diffusion_safe import SafetyConfig + + pipeline = StableDiffusionPipelineSafe.from_pretrained( + "AIML-TUDA/stable-diffusion-safe", torch_dtype=torch.float16 + ).to("cuda") + prompt = "the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c. leyendecker" + image = pipeline(prompt=prompt, **SafetyConfig.MEDIUM).images[0] + ``` + """ + # 0. Default height and width to unet + height = height or self.unet.config.sample_size * self.vae_scale_factor + width = width or self.unet.config.sample_size * self.vae_scale_factor + + # 1. Check inputs. Raise error if not correct + self.check_inputs(prompt, height, width, callback_steps) + + # 2. Define call parameters + batch_size = 1 if isinstance(prompt, str) else len(prompt) + device = self._execution_device + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + enable_safety_guidance = sld_guidance_scale > 1.0 and do_classifier_free_guidance + if not enable_safety_guidance: + warnings.warn("Safety checker disabled!") + + if ip_adapter_image is not None: + output_hidden_state = False if isinstance(self.unet.encoder_hid_proj, ImageProjection) else True + image_embeds, negative_image_embeds = self.encode_image( + ip_adapter_image, device, num_images_per_prompt, output_hidden_state + ) + if do_classifier_free_guidance: + if enable_safety_guidance: + image_embeds = torch.cat([negative_image_embeds, image_embeds, image_embeds]) + else: + image_embeds = torch.cat([negative_image_embeds, image_embeds]) + + # 3. Encode input prompt + prompt_embeds = self._encode_prompt( + prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, enable_safety_guidance + ) + + # 4. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # 5. Prepare latent variables + num_channels_latents = self.unet.config.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 6. Prepare extra step kwargs. + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 6.1 Add image embeds for IP-Adapter + added_cond_kwargs = {"image_embeds": image_embeds} if ip_adapter_image is not None else None + + safety_momentum = None + + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = ( + torch.cat([latents] * (3 if enable_safety_guidance else 2)) + if do_classifier_free_guidance + else latents + ) + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, t, encoder_hidden_states=prompt_embeds, added_cond_kwargs=added_cond_kwargs + ).sample + + # perform guidance + if do_classifier_free_guidance: + noise_pred_out = noise_pred.chunk((3 if enable_safety_guidance else 2)) + noise_pred_uncond, noise_pred_text = noise_pred_out[0], noise_pred_out[1] + + # default classifier free guidance + noise_guidance = noise_pred_text - noise_pred_uncond + + # Perform SLD guidance + if enable_safety_guidance: + if safety_momentum is None: + safety_momentum = torch.zeros_like(noise_guidance) + noise_pred_safety_concept = noise_pred_out[2] + + # Equation 6 + scale = torch.clamp( + torch.abs((noise_pred_text - noise_pred_safety_concept)) * sld_guidance_scale, max=1.0 + ) + + # Equation 6 + safety_concept_scale = torch.where( + (noise_pred_text - noise_pred_safety_concept) >= sld_threshold, + torch.zeros_like(scale), + scale, + ) + + # Equation 4 + noise_guidance_safety = torch.mul( + (noise_pred_safety_concept - noise_pred_uncond), safety_concept_scale + ) + + # Equation 7 + noise_guidance_safety = noise_guidance_safety + sld_momentum_scale * safety_momentum + + # Equation 8 + safety_momentum = sld_mom_beta * safety_momentum + (1 - sld_mom_beta) * noise_guidance_safety + + if i >= sld_warmup_steps: # Warmup + # Equation 3 + noise_guidance = noise_guidance - noise_guidance_safety + + noise_pred = noise_pred_uncond + guidance_scale * noise_guidance + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + # 8. Post-processing + image = self.decode_latents(latents) + + # 9. Run safety checker + image, has_nsfw_concept, flagged_images = self.run_safety_checker( + image, device, prompt_embeds.dtype, enable_safety_guidance + ) + + # 10. Convert to PIL + if output_type == "pil": + image = self.numpy_to_pil(image) + if flagged_images is not None: + flagged_images = self.numpy_to_pil(flagged_images) + + if not return_dict: + return ( + image, + has_nsfw_concept, + self._safety_text_concept if enable_safety_guidance else None, + flagged_images, + ) + + return StableDiffusionSafePipelineOutput( + images=image, + nsfw_content_detected=has_nsfw_concept, + applied_safety_concept=self._safety_text_concept if enable_safety_guidance else None, + unsafe_images=flagged_images, + ) diff --git a/diffusers3/pipelines/stable_diffusion_safe/safety_checker.py b/diffusers3/pipelines/stable_diffusion_safe/safety_checker.py new file mode 100644 index 0000000000000000000000000000000000000000..338e4c65c500a902ca483c79e8311e8796cc7c03 --- /dev/null +++ b/diffusers3/pipelines/stable_diffusion_safe/safety_checker.py @@ -0,0 +1,109 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch +import torch.nn as nn +from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel + +from ...utils import logging + + +logger = logging.get_logger(__name__) + + +def cosine_distance(image_embeds, text_embeds): + normalized_image_embeds = nn.functional.normalize(image_embeds) + normalized_text_embeds = nn.functional.normalize(text_embeds) + return torch.mm(normalized_image_embeds, normalized_text_embeds.t()) + + +class SafeStableDiffusionSafetyChecker(PreTrainedModel): + config_class = CLIPConfig + + _no_split_modules = ["CLIPEncoderLayer"] + + def __init__(self, config: CLIPConfig): + super().__init__(config) + + self.vision_model = CLIPVisionModel(config.vision_config) + self.visual_projection = nn.Linear(config.vision_config.hidden_size, config.projection_dim, bias=False) + + self.concept_embeds = nn.Parameter(torch.ones(17, config.projection_dim), requires_grad=False) + self.special_care_embeds = nn.Parameter(torch.ones(3, config.projection_dim), requires_grad=False) + + self.concept_embeds_weights = nn.Parameter(torch.ones(17), requires_grad=False) + self.special_care_embeds_weights = nn.Parameter(torch.ones(3), requires_grad=False) + + @torch.no_grad() + def forward(self, clip_input, images): + pooled_output = self.vision_model(clip_input)[1] # pooled_output + image_embeds = self.visual_projection(pooled_output) + + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + special_cos_dist = cosine_distance(image_embeds, self.special_care_embeds).cpu().float().numpy() + cos_dist = cosine_distance(image_embeds, self.concept_embeds).cpu().float().numpy() + + result = [] + batch_size = image_embeds.shape[0] + for i in range(batch_size): + result_img = {"special_scores": {}, "special_care": [], "concept_scores": {}, "bad_concepts": []} + + # increase this value to create a stronger `nfsw` filter + # at the cost of increasing the possibility of filtering benign images + adjustment = 0.0 + + for concept_idx in range(len(special_cos_dist[0])): + concept_cos = special_cos_dist[i][concept_idx] + concept_threshold = self.special_care_embeds_weights[concept_idx].item() + result_img["special_scores"][concept_idx] = round(concept_cos - concept_threshold + adjustment, 3) + if result_img["special_scores"][concept_idx] > 0: + result_img["special_care"].append({concept_idx, result_img["special_scores"][concept_idx]}) + adjustment = 0.01 + + for concept_idx in range(len(cos_dist[0])): + concept_cos = cos_dist[i][concept_idx] + concept_threshold = self.concept_embeds_weights[concept_idx].item() + result_img["concept_scores"][concept_idx] = round(concept_cos - concept_threshold + adjustment, 3) + if result_img["concept_scores"][concept_idx] > 0: + result_img["bad_concepts"].append(concept_idx) + + result.append(result_img) + + has_nsfw_concepts = [len(res["bad_concepts"]) > 0 for res in result] + + return images, has_nsfw_concepts + + @torch.no_grad() + def forward_onnx(self, clip_input: torch.Tensor, images: torch.Tensor): + pooled_output = self.vision_model(clip_input)[1] # pooled_output + image_embeds = self.visual_projection(pooled_output) + + special_cos_dist = cosine_distance(image_embeds, self.special_care_embeds) + cos_dist = cosine_distance(image_embeds, self.concept_embeds) + + # increase this value to create a stronger `nsfw` filter + # at the cost of increasing the possibility of filtering benign images + adjustment = 0.0 + + special_scores = special_cos_dist - self.special_care_embeds_weights + adjustment + # special_scores = special_scores.round(decimals=3) + special_care = torch.any(special_scores > 0, dim=1) + special_adjustment = special_care * 0.01 + special_adjustment = special_adjustment.unsqueeze(1).expand(-1, cos_dist.shape[1]) + + concept_scores = (cos_dist - self.concept_embeds_weights) + special_adjustment + # concept_scores = concept_scores.round(decimals=3) + has_nsfw_concepts = torch.any(concept_scores > 0, dim=1) + + return images, has_nsfw_concepts diff --git a/diffusers3/pipelines/stable_diffusion_sag/__init__.py b/diffusers3/pipelines/stable_diffusion_sag/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..378e0e57817f58a0a28afed5d6110f6ee3effb3a --- /dev/null +++ b/diffusers3/pipelines/stable_diffusion_sag/__init__.py @@ -0,0 +1,48 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["pipeline_stable_diffusion_sag"] = ["StableDiffusionSAGPipeline"] + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * + else: + from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/diffusers3/pipelines/stable_diffusion_sag/pipeline_stable_diffusion_sag.py b/diffusers3/pipelines/stable_diffusion_sag/pipeline_stable_diffusion_sag.py new file mode 100644 index 0000000000000000000000000000000000000000..c32052d2e4d019ef7a5d305381c8063fe78a0991 --- /dev/null +++ b/diffusers3/pipelines/stable_diffusion_sag/pipeline_stable_diffusion_sag.py @@ -0,0 +1,954 @@ +# Copyright 2024 Susung Hong and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Any, Callable, Dict, List, Optional, Union + +import torch +import torch.nn.functional as F +from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection + +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import IPAdapterMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin +from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import ( + USE_PEFT_BACKEND, + deprecate, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from ..stable_diffusion import StableDiffusionPipelineOutput +from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import StableDiffusionSAGPipeline + + >>> pipe = StableDiffusionSAGPipeline.from_pretrained( + ... "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16 + ... ) + >>> pipe = pipe.to("cuda") + + >>> prompt = "a photo of an astronaut riding a horse on mars" + >>> image = pipe(prompt, sag_scale=0.75).images[0] + ``` +""" + + +# processes and stores attention probabilities +class CrossAttnStoreProcessor: + def __init__(self): + self.attention_probs = None + + def __call__( + self, + attn, + hidden_states, + encoder_hidden_states=None, + attention_mask=None, + ): + batch_size, sequence_length, _ = hidden_states.shape + attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) + query = attn.to_q(hidden_states) + + if encoder_hidden_states is None: + encoder_hidden_states = hidden_states + elif attn.norm_cross: + encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) + + key = attn.to_k(encoder_hidden_states) + value = attn.to_v(encoder_hidden_states) + + query = attn.head_to_batch_dim(query) + key = attn.head_to_batch_dim(key) + value = attn.head_to_batch_dim(value) + + self.attention_probs = attn.get_attention_scores(query, key, attention_mask) + hidden_states = torch.bmm(self.attention_probs, value) + hidden_states = attn.batch_to_head_dim(hidden_states) + + # linear proj + hidden_states = attn.to_out[0](hidden_states) + # dropout + hidden_states = attn.to_out[1](hidden_states) + + return hidden_states + + +# Modified to get self-attention guidance scale in this paper (https://arxiv.org/pdf/2210.00939.pdf) as an input +class StableDiffusionSAGPipeline(DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, IPAdapterMixin): + r""" + Pipeline for text-to-image generation using Stable Diffusion. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`~transformers.CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + unet ([`UNet2DConditionModel`]): + A `UNet2DConditionModel` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details + about a model's potential harms. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. + """ + + model_cpu_offload_seq = "text_encoder->unet->vae" + _optional_components = ["safety_checker", "feature_extractor", "image_encoder"] + _exclude_from_cpu_offload = ["safety_checker"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: UNet2DConditionModel, + scheduler: KarrasDiffusionSchedulers, + safety_checker: StableDiffusionSafetyChecker, + feature_extractor: CLIPImageProcessor, + image_encoder: Optional[CLIPVisionModelWithProjection] = None, + requires_safety_checker: bool = True, + ): + super().__init__() + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + image_encoder=image_encoder, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt + def _encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + **kwargs, + ): + deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." + deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) + + prompt_embeds_tuple = self.encode_prompt( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=lora_scale, + **kwargs, + ) + + # concatenate for backwards comp + prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) + + return prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if self.text_encoder is not None: + if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return prompt_embeds, negative_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image + def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + if output_hidden_states: + image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] + image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_enc_hidden_states = self.image_encoder( + torch.zeros_like(image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( + num_images_per_prompt, dim=0 + ) + return image_enc_hidden_states, uncond_image_enc_hidden_states + else: + image_embeds = self.image_encoder(image).image_embeds + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_embeds = torch.zeros_like(image_embeds) + + return image_embeds, uncond_image_embeds + + def prepare_ip_adapter_image_embeds( + self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance + ): + if ip_adapter_image_embeds is None: + if not isinstance(ip_adapter_image, list): + ip_adapter_image = [ip_adapter_image] + + if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): + raise ValueError( + f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." + ) + + image_embeds = [] + for single_ip_adapter_image, image_proj_layer in zip( + ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers + ): + output_hidden_state = not isinstance(image_proj_layer, ImageProjection) + single_image_embeds, single_negative_image_embeds = self.encode_image( + single_ip_adapter_image, device, 1, output_hidden_state + ) + single_image_embeds = torch.stack([single_image_embeds] * num_images_per_prompt, dim=0) + single_negative_image_embeds = torch.stack( + [single_negative_image_embeds] * num_images_per_prompt, dim=0 + ) + + if do_classifier_free_guidance: + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds]) + single_image_embeds = single_image_embeds.to(device) + + image_embeds.append(single_image_embeds) + else: + image_embeds = ip_adapter_image_embeds + return image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is None: + has_nsfw_concept = None + else: + if torch.is_tensor(image): + feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") + else: + feature_extractor_input = self.image_processor.numpy_to_pil(image) + safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) + image, has_nsfw_concept = self.safety_checker( + images=image, clip_input=safety_checker_input.pixel_values.to(dtype) + ) + return image, has_nsfw_concept + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents + def decode_latents(self, latents): + deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" + deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) + + latents = 1 / self.vae.config.scaling_factor * latents + image = self.vae.decode(latents, return_dict=False)[0] + image = (image / 2 + 0.5).clamp(0, 1) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + return image + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + # Copied from diffusers.pipelines.stable_diffusion_k_diffusion.pipeline_stable_diffusion_k_diffusion.StableDiffusionKDiffusionPipeline.check_inputs + def check_inputs( + self, + prompt, + height, + width, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + callback_on_step_end_tensor_inputs=None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = ( + batch_size, + num_channels_latents, + int(height) // self.vae_scale_factor, + int(width) // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + guidance_scale: float = 7.5, + sag_scale: float = 0.75, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, + callback_steps: Optional[int] = 1, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + clip_skip: Optional[int] = None, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + sag_scale (`float`, *optional*, defaults to 0.75): + Chosen between [0, 1.0] for better quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + ip_adapter_image: (`PipelineImageInput`, *optional*): + Optional image input to work with IP Adapters. + ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*): + Pre-generated image embeddings for IP-Adapter. If not provided, embeddings are computed from the + `ip_adapter_image` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + Examples: + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images and the + second element is a list of `bool`s indicating whether the corresponding generated image contains + "not-safe-for-work" (nsfw) content. + """ + # 0. Default height and width to unet + height = height or self.unet.config.sample_size * self.vae_scale_factor + width = width or self.unet.config.sample_size * self.vae_scale_factor + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds + ) + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + # and `sag_scale` is` `s` of equation (16) + # of the self-attention guidance paper: https://arxiv.org/pdf/2210.00939.pdf + # `sag_scale = 0` means no self-attention guidance + do_self_attention_guidance = sag_scale > 0.0 + + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + ip_adapter_image_embeds = self.prepare_ip_adapter_image_embeds( + ip_adapter_image, + ip_adapter_image_embeds, + device, + batch_size * num_images_per_prompt, + do_classifier_free_guidance, + ) + + if do_classifier_free_guidance: + image_embeds = [] + negative_image_embeds = [] + for tmp_image_embeds in ip_adapter_image_embeds: + single_negative_image_embeds, single_image_embeds = tmp_image_embeds.chunk(2) + image_embeds.append(single_image_embeds) + negative_image_embeds.append(single_negative_image_embeds) + else: + image_embeds = ip_adapter_image_embeds + + # 3. Encode input prompt + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + clip_skip=clip_skip, + ) + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + if do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + # 4. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + if timesteps.dtype not in [torch.int16, torch.int32, torch.int64]: + raise ValueError( + f"{self.__class__.__name__} does not support using a scheduler of type {self.scheduler.__class__.__name__}. Please make sure to use one of 'DDIMScheduler, PNDMScheduler, DDPMScheduler, DEISMultistepScheduler, UniPCMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler'." + ) + + # 5. Prepare latent variables + num_channels_latents = self.unet.config.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 6.1 Add image embeds for IP-Adapter + added_cond_kwargs = ( + {"image_embeds": image_embeds} + if ip_adapter_image is not None or ip_adapter_image_embeds is not None + else None + ) + + if do_classifier_free_guidance: + added_uncond_kwargs = ( + {"image_embeds": negative_image_embeds} + if ip_adapter_image is not None or ip_adapter_image_embeds is not None + else None + ) + + # 7. Denoising loop + original_attn_proc = self.unet.attn_processors + store_processor = CrossAttnStoreProcessor() + self.unet.mid_block.attentions[0].transformer_blocks[0].attn1.processor = store_processor + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + + map_size = None + + def get_map_size(module, input, output): + nonlocal map_size + map_size = output[0].shape[-2:] + + with self.unet.mid_block.attentions[0].register_forward_hook(get_map_size): + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs=cross_attention_kwargs, + added_cond_kwargs=added_cond_kwargs, + ).sample + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # perform self-attention guidance with the stored self-attention map + if do_self_attention_guidance: + # classifier-free guidance produces two chunks of attention map + # and we only use unconditional one according to equation (25) + # in https://arxiv.org/pdf/2210.00939.pdf + if do_classifier_free_guidance: + # DDIM-like prediction of x0 + pred_x0 = self.pred_x0(latents, noise_pred_uncond, t) + # get the stored attention maps + uncond_attn, cond_attn = store_processor.attention_probs.chunk(2) + # self-attention-based degrading of latents + degraded_latents = self.sag_masking( + pred_x0, uncond_attn, map_size, t, self.pred_epsilon(latents, noise_pred_uncond, t) + ) + uncond_emb, _ = prompt_embeds.chunk(2) + # forward and give guidance + degraded_pred = self.unet( + degraded_latents, + t, + encoder_hidden_states=uncond_emb, + added_cond_kwargs=added_uncond_kwargs, + ).sample + noise_pred += sag_scale * (noise_pred_uncond - degraded_pred) + else: + # DDIM-like prediction of x0 + pred_x0 = self.pred_x0(latents, noise_pred, t) + # get the stored attention maps + cond_attn = store_processor.attention_probs + # self-attention-based degrading of latents + degraded_latents = self.sag_masking( + pred_x0, cond_attn, map_size, t, self.pred_epsilon(latents, noise_pred, t) + ) + # forward and give guidance + degraded_pred = self.unet( + degraded_latents, + t, + encoder_hidden_states=prompt_embeds, + added_cond_kwargs=added_cond_kwargs, + ).sample + noise_pred += sag_scale * (noise_pred - degraded_pred) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) + else: + image = latents + has_nsfw_concept = None + + if has_nsfw_concept is None: + do_denormalize = [True] * image.shape[0] + else: + do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] + + image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) + + self.maybe_free_model_hooks() + # make sure to set the original attention processors back + self.unet.set_attn_processor(original_attn_proc) + + if not return_dict: + return (image, has_nsfw_concept) + + return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) + + def sag_masking(self, original_latents, attn_map, map_size, t, eps): + # Same masking process as in SAG paper: https://arxiv.org/pdf/2210.00939.pdf + bh, hw1, hw2 = attn_map.shape + b, latent_channel, latent_h, latent_w = original_latents.shape + h = self.unet.config.attention_head_dim + if isinstance(h, list): + h = h[-1] + + # Produce attention mask + attn_map = attn_map.reshape(b, h, hw1, hw2) + attn_mask = attn_map.mean(1, keepdim=False).sum(1, keepdim=False) > 1.0 + attn_mask = ( + attn_mask.reshape(b, map_size[0], map_size[1]) + .unsqueeze(1) + .repeat(1, latent_channel, 1, 1) + .type(attn_map.dtype) + ) + attn_mask = F.interpolate(attn_mask, (latent_h, latent_w)) + + # Blur according to the self-attention mask + degraded_latents = gaussian_blur_2d(original_latents, kernel_size=9, sigma=1.0) + degraded_latents = degraded_latents * attn_mask + original_latents * (1 - attn_mask) + + # Noise it again to match the noise level + degraded_latents = self.scheduler.add_noise(degraded_latents, noise=eps, timesteps=t[None]) + + return degraded_latents + + # Modified from diffusers.schedulers.scheduling_ddim.DDIMScheduler.step + # Note: there are some schedulers that clip or do not return x_0 (PNDMScheduler, DDIMScheduler, etc.) + def pred_x0(self, sample, model_output, timestep): + alpha_prod_t = self.scheduler.alphas_cumprod[timestep].to(sample.device) + + beta_prod_t = 1 - alpha_prod_t + if self.scheduler.config.prediction_type == "epsilon": + pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5) + elif self.scheduler.config.prediction_type == "sample": + pred_original_sample = model_output + elif self.scheduler.config.prediction_type == "v_prediction": + pred_original_sample = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output + # predict V + model_output = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample + else: + raise ValueError( + f"prediction_type given as {self.scheduler.config.prediction_type} must be one of `epsilon`, `sample`," + " or `v_prediction`" + ) + + return pred_original_sample + + def pred_epsilon(self, sample, model_output, timestep): + alpha_prod_t = self.scheduler.alphas_cumprod[timestep] + + beta_prod_t = 1 - alpha_prod_t + if self.scheduler.config.prediction_type == "epsilon": + pred_eps = model_output + elif self.scheduler.config.prediction_type == "sample": + pred_eps = (sample - (alpha_prod_t**0.5) * model_output) / (beta_prod_t**0.5) + elif self.scheduler.config.prediction_type == "v_prediction": + pred_eps = (beta_prod_t**0.5) * sample + (alpha_prod_t**0.5) * model_output + else: + raise ValueError( + f"prediction_type given as {self.scheduler.config.prediction_type} must be one of `epsilon`, `sample`," + " or `v_prediction`" + ) + + return pred_eps + + +# Gaussian blur +def gaussian_blur_2d(img, kernel_size, sigma): + ksize_half = (kernel_size - 1) * 0.5 + + x = torch.linspace(-ksize_half, ksize_half, steps=kernel_size) + + pdf = torch.exp(-0.5 * (x / sigma).pow(2)) + + x_kernel = pdf / pdf.sum() + x_kernel = x_kernel.to(device=img.device, dtype=img.dtype) + + kernel2d = torch.mm(x_kernel[:, None], x_kernel[None, :]) + kernel2d = kernel2d.expand(img.shape[-3], 1, kernel2d.shape[0], kernel2d.shape[1]) + + padding = [kernel_size // 2, kernel_size // 2, kernel_size // 2, kernel_size // 2] + + img = F.pad(img, padding, mode="reflect") + img = F.conv2d(img, kernel2d, groups=img.shape[-3]) + + return img diff --git a/diffusers3/pipelines/stable_diffusion_xl/.ipynb_checkpoints/pipeline_stable_diffusion_xl-checkpoint.py b/diffusers3/pipelines/stable_diffusion_xl/.ipynb_checkpoints/pipeline_stable_diffusion_xl-checkpoint.py new file mode 100644 index 0000000000000000000000000000000000000000..787b6fa2612dbbbc47488de0b1c7976616cc48ce --- /dev/null +++ b/diffusers3/pipelines/stable_diffusion_xl/.ipynb_checkpoints/pipeline_stable_diffusion_xl-checkpoint.py @@ -0,0 +1,1527 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import torch +import PIL.Image +import numpy as np +from transformers import ( + CLIPImageProcessor, + CLIPTextModel, + CLIPTextModelWithProjection, + CLIPTokenizer, + CLIPVisionModelWithProjection, +) + +from ...callbacks import MultiPipelineCallbacks, PipelineCallback +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import ( + FromSingleFileMixin, + IPAdapterMixin, + StableDiffusionXLLoraLoaderMixin, + TextualInversionLoaderMixin, +) +from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel +from ...models.attention_processor import ( + AttnProcessor2_0, + FusedAttnProcessor2_0, + XFormersAttnProcessor, +) +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import ( + USE_PEFT_BACKEND, + deprecate, + is_invisible_watermark_available, + is_torch_xla_available, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from .pipeline_output import StableDiffusionXLPipelineOutput + + +if is_invisible_watermark_available(): + from .watermark import StableDiffusionXLWatermarker + +if is_torch_xla_available(): + import torch_xla.core.xla_model as xm + + XLA_AVAILABLE = True +else: + XLA_AVAILABLE = False + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import StableDiffusionXLPipeline + + >>> pipe = StableDiffusionXLPipeline.from_pretrained( + ... "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 + ... ) + >>> pipe = pipe.to("cuda") + + >>> prompt = "a photo of an astronaut riding a horse on mars" + >>> image = pipe(prompt).images[0] + ``` +""" + + +def prepare_mask( + mask: Union[PIL.Image.Image, np.ndarray, torch.Tensor] +) -> torch.Tensor: + """ + Prepares a mask to be consumed by the Stable Diffusion pipeline. This means that this input will be + converted to ``torch.Tensor`` with shapes ``batch x channels x height x width`` where ``channels`` is ``1`` for + the ``mask``. + + The ``mask`` will be binarized (``mask > 0.5``) and cast to ``torch.float32`` too. + + Args: + mask (_type_): The mask to apply to the image, i.e. regions to inpaint. + It can be a ``PIL.Image``, or a ``height x width`` ``np.array`` or a ``1 x height x width`` + ``torch.Tensor`` or a ``batch x 1 x height x width`` ``torch.Tensor``. + + + Raises: + ValueError: ``torch.Tensor`` images should be in the ``[-1, 1]`` range. ValueError: ``torch.Tensor`` mask + should be in the ``[0, 1]`` range. + TypeError: ``mask`` is a ``torch.Tensor`` but ``image`` is not + (ot the other way around). + + Returns: + torch.Tensor: mask as ``torch.Tensor`` with 4 dimensions: ``batch x channels x height x width``. + """ + if isinstance(mask, torch.Tensor): + print("^^^^") + print("mask_1") + if not isinstance(mask, torch.Tensor): + raise TypeError( + f"`image` is a torch.Tensor but `mask` (type: {type(mask)} is not" + ) + + # Batch and add channel dim for single mask + if mask.ndim == 2: + mask = mask.unsqueeze(0).unsqueeze(0) + + # Batch single mask or add channel dim + if mask.ndim == 3: + # Single batched mask, no channel dim or single mask not batched but channel dim + if mask.shape[0] == 1: + mask = mask.unsqueeze(0) + + # Batched masks no channel dim + else: + mask = mask.unsqueeze(1) + + # Check mask is in [0, 1] + if mask.min() < 0 or mask.max() > 1: + raise ValueError("Mask should be in [0, 1] range") + + # Binarize mask + mask[mask < 0.5] = 0 + mask[mask >= 0.5] = 1 + else: + print("^^^^") + print("mask_2") + # preprocess mask + if isinstance(mask, (PIL.Image.Image, np.ndarray)): + mask = [mask] + + if isinstance(mask, list) and isinstance(mask[0], PIL.Image.Image): + mask = np.concatenate( + [np.array(m.convert("L"))[None, None, :] for m in mask], axis=0 + ) + mask = mask.astype(np.float32) / 255.0 + elif isinstance(mask, list) and isinstance(mask[0], np.ndarray): + mask = np.concatenate([m[None, None, :] for m in mask], axis=0) + + mask[mask < 0.5] = 0 + mask[mask >= 0.5] = 1 + mask = torch.from_numpy(mask) + + return mask + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg +def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): + """ + Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4 + """ + std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) + std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) + # rescale the results from guidance (fixes overexposure) + noise_pred_rescaled = noise_cfg * (std_text / std_cfg) + # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images + noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg + return noise_cfg + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + sigmas: Optional[List[float]] = None, + **kwargs, +): + """ + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles + custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` + must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, + `num_inference_steps` and `sigmas` must be `None`. + sigmas (`List[float]`, *optional*): + Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, + `num_inference_steps` and `timesteps` must be `None`. + + Returns: + `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the + second element is the number of inference steps. + """ + if timesteps is not None and sigmas is not None: + raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") + if timesteps is not None: + accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + elif sigmas is not None: + accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accept_sigmas: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" sigmas schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + + +class StableDiffusionXLPipeline( + DiffusionPipeline, + StableDiffusionMixin, + FromSingleFileMixin, + StableDiffusionXLLoraLoaderMixin, + TextualInversionLoaderMixin, + IPAdapterMixin, +): + r""" + Pipeline for text-to-image generation using Stable Diffusion XL. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files + - [`~loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`] for saving LoRA weights + - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModel`]): + Frozen text-encoder. Stable Diffusion XL uses the text portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically + the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. + text_encoder_2 ([` CLIPTextModelWithProjection`]): + Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection), + specifically the + [laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k) + variant. + tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + tokenizer_2 (`CLIPTokenizer`): + Second Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`): + Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of + `stabilityai/stable-diffusion-xl-base-1-0`. + add_watermarker (`bool`, *optional*): + Whether to use the [invisible_watermark library](https://github.com/ShieldMnt/invisible-watermark/) to + watermark output images. If not defined, it will default to True if the package is installed, otherwise no + watermarker will be used. + """ + + model_cpu_offload_seq = "text_encoder->text_encoder_2->image_encoder->unet->vae" + _optional_components = [ + "tokenizer", + "tokenizer_2", + "text_encoder", + "text_encoder_2", + "image_encoder", + "feature_extractor", + ] + _callback_tensor_inputs = [ + "latents", + "prompt_embeds", + "negative_prompt_embeds", + "add_text_embeds", + "add_time_ids", + "negative_pooled_prompt_embeds", + "negative_add_time_ids", + ] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + text_encoder_2: CLIPTextModelWithProjection, + tokenizer: CLIPTokenizer, + tokenizer_2: CLIPTokenizer, + unet: UNet2DConditionModel, + scheduler: KarrasDiffusionSchedulers, + image_encoder: CLIPVisionModelWithProjection = None, + feature_extractor: CLIPImageProcessor = None, + force_zeros_for_empty_prompt: bool = True, + add_watermarker: Optional[bool] = None, + ): + super().__init__() + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + text_encoder_2=text_encoder_2, + tokenizer=tokenizer, + tokenizer_2=tokenizer_2, + unet=unet, + scheduler=scheduler, + image_encoder=image_encoder, + feature_extractor=feature_extractor, + ) + self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + + self.default_sample_size = self.unet.config.sample_size + + add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available() + + if add_watermarker: + self.watermark = StableDiffusionXLWatermarker() + else: + self.watermark = None + + def encode_prompt( + self, + prompt: str, + prompt_2: Optional[str] = None, + device: Optional[torch.device] = None, + num_images_per_prompt: int = 1, + do_classifier_free_guidance: bool = True, + negative_prompt: Optional[str] = None, + negative_prompt_2: Optional[str] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + pooled_prompt_embeds: Optional[torch.Tensor] = None, + negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in both text-encoders + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + lora_scale (`float`, *optional*): + A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + device = device or self._execution_device + + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if self.text_encoder is not None: + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale) + else: + scale_lora_layers(self.text_encoder_2, lora_scale) + + prompt = [prompt] if isinstance(prompt, str) else prompt + + if prompt is not None: + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + # Define tokenizers and text encoders + tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] + text_encoders = ( + [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] + ) + + if prompt_embeds is None: + prompt_2 = prompt_2 or prompt + prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 + + # textual inversion: process multi-vector tokens if necessary + prompt_embeds_list = [] + prompts = [prompt, prompt_2] + for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders): + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, tokenizer) + + text_inputs = tokenizer( + prompt, + padding="max_length", + max_length=tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + + text_input_ids = text_inputs.input_ids + untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {tokenizer.model_max_length} tokens: {removed_text}" + ) + + prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) + + # We are only ALWAYS interested in the pooled output of the final text encoder + pooled_prompt_embeds = prompt_embeds[0] + if clip_skip is None: + prompt_embeds = prompt_embeds.hidden_states[-2] + else: + # "2" because SDXL always indexes from the penultimate layer. + prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)] + + prompt_embeds_list.append(prompt_embeds) + + prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) + + # get unconditional embeddings for classifier free guidance + zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt + if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: + negative_prompt_embeds = torch.zeros_like(prompt_embeds) + negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) + elif do_classifier_free_guidance and negative_prompt_embeds is None: + negative_prompt = negative_prompt or "" + negative_prompt_2 = negative_prompt_2 or negative_prompt + + # normalize str to list + negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt + negative_prompt_2 = ( + batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 + ) + + uncond_tokens: List[str] + if prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = [negative_prompt, negative_prompt_2] + + negative_prompt_embeds_list = [] + for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders): + if isinstance(self, TextualInversionLoaderMixin): + negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = tokenizer( + negative_prompt, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + negative_prompt_embeds = text_encoder( + uncond_input.input_ids.to(device), + output_hidden_states=True, + ) + # We are only ALWAYS interested in the pooled output of the final text encoder + negative_pooled_prompt_embeds = negative_prompt_embeds[0] + negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] + + negative_prompt_embeds_list.append(negative_prompt_embeds) + + negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) + + if self.text_encoder_2 is not None: + prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) + else: + prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + if self.text_encoder_2 is not None: + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) + else: + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed * num_images_per_prompt, -1 + ) + if do_classifier_free_guidance: + negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed * num_images_per_prompt, -1 + ) + + if self.text_encoder is not None: + if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder_2, lora_scale) + + return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image + def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + if output_hidden_states: + image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] + image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_enc_hidden_states = self.image_encoder( + torch.zeros_like(image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( + num_images_per_prompt, dim=0 + ) + return image_enc_hidden_states, uncond_image_enc_hidden_states + else: + image_embeds = self.image_encoder(image).image_embeds + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_embeds = torch.zeros_like(image_embeds) + + return image_embeds, uncond_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds + def prepare_ip_adapter_image_embeds( + self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance + ): + image_embeds = [] + if do_classifier_free_guidance: + negative_image_embeds = [] + if ip_adapter_image_embeds is None: + if not isinstance(ip_adapter_image, list): + ip_adapter_image = [ip_adapter_image] + + if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): + raise ValueError( + f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." + ) + + for single_ip_adapter_image, image_proj_layer in zip( + ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers + ): + output_hidden_state = not isinstance(image_proj_layer, ImageProjection) + single_image_embeds, single_negative_image_embeds = self.encode_image( + single_ip_adapter_image, device, 1, output_hidden_state + ) + + image_embeds.append(single_image_embeds[None, :]) + if do_classifier_free_guidance: + negative_image_embeds.append(single_negative_image_embeds[None, :]) + else: + for single_image_embeds in ip_adapter_image_embeds: + if do_classifier_free_guidance: + single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2) + negative_image_embeds.append(single_negative_image_embeds) + image_embeds.append(single_image_embeds) + + ip_adapter_image_embeds = [] + for i, single_image_embeds in enumerate(image_embeds): + single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) + if do_classifier_free_guidance: + single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) + + single_image_embeds = single_image_embeds.to(device=device) + ip_adapter_image_embeds.append(single_image_embeds) + + return ip_adapter_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + prompt_2, + height, + width, + callback_steps, + negative_prompt=None, + negative_prompt_2=None, + prompt_embeds=None, + negative_prompt_embeds=None, + pooled_prompt_embeds=None, + negative_pooled_prompt_embeds=None, + ip_adapter_image=None, + ip_adapter_image_embeds=None, + callback_on_step_end_tensor_inputs=None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt_2 is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)): + raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + elif negative_prompt_2 is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if prompt_embeds is not None and pooled_prompt_embeds is None: + raise ValueError( + "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`." + ) + + if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: + raise ValueError( + "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`." + ) + + if ip_adapter_image is not None and ip_adapter_image_embeds is not None: + raise ValueError( + "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined." + ) + + if ip_adapter_image_embeds is not None: + if not isinstance(ip_adapter_image_embeds, list): + raise ValueError( + f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}" + ) + elif ip_adapter_image_embeds[0].ndim not in [3, 4]: + raise ValueError( + f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D" + ) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + + def prepare_latents( + self, + image, + timestep, + batch_size, + num_images_per_prompt, + dtype, + device, + generator=None, + ): + if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): + raise ValueError( + f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" + ) + + image = image.to(device=device, dtype=dtype) + + batch_size = batch_size * num_images_per_prompt + + if image.shape[1] == 4: + init_latents = image + + else: + if isinstance(generator, list) and len(generator) != batch_size: + + + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + elif isinstance(generator, list): + + init_latents = [ + self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) + for i in range(batch_size) + ] + print("***") + print("vae_1") + init_latents = torch.cat(init_latents, dim=0) + else: + print("***") + print("vae_2") + init_latents = self.vae.encode(image).latent_dist.sample(generator) + + init_latents = self.vae.config.scaling_factor * init_latents + + if ( + batch_size > init_latents.shape[0] + and batch_size % init_latents.shape[0] == 0 + ): + # expand init_latents for batch_size + deprecation_message = ( + f"You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial" + " images (`image`). Initial images are now duplicating to match the number of text prompts. Note" + " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update" + " your script to pass as many initial images as text prompts to suppress this warning." + ) + deprecate( + "len(prompt) != len(image)", + "1.0.0", + deprecation_message, + standard_warn=False, + ) + additional_image_per_prompt = batch_size // init_latents.shape[0] + init_latents = torch.cat( + [init_latents] * additional_image_per_prompt, dim=0 + ) + elif ( + batch_size > init_latents.shape[0] + and batch_size % init_latents.shape[0] != 0 + ): + raise ValueError( + f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts." + ) + else: + init_latents = torch.cat([init_latents], dim=0) + + shape = init_latents.shape + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + + # get latents + + latents = self.scheduler.add_noise(init_latents, noise, timestep) + + return init_latents, noise + + + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents_origin(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + + shape = ( + batch_size, + num_channels_latents, + int(height) // self.vae_scale_factor, + int(width) // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + print("latents is None") + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + return latents + + def _get_add_time_ids( + self, original_size, crops_coords_top_left, target_size, dtype, text_encoder_projection_dim=None + ): + add_time_ids = list(original_size + crops_coords_top_left + target_size) + + passed_add_embed_dim = ( + self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim + ) + expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features + + if expected_add_embed_dim != passed_add_embed_dim: + raise ValueError( + f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`." + ) + + add_time_ids = torch.tensor([add_time_ids], dtype=dtype) + return add_time_ids + + def upcast_vae(self): + dtype = self.vae.dtype + self.vae.to(dtype=torch.float32) + use_torch_2_0_or_xformers = isinstance( + self.vae.decoder.mid_block.attentions[0].processor, + ( + AttnProcessor2_0, + XFormersAttnProcessor, + FusedAttnProcessor2_0, + ), + ) + # if xformers or torch_2_0 is used attention block does not need + # to be in float32 which can save lots of memory + if use_torch_2_0_or_xformers: + self.vae.post_quant_conv.to(dtype) + self.vae.decoder.conv_in.to(dtype) + self.vae.decoder.mid_block.to(dtype) + + # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding + def get_guidance_scale_embedding( + self, w: torch.Tensor, embedding_dim: int = 512, dtype: torch.dtype = torch.float32 + ) -> torch.Tensor: + """ + See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 + + Args: + w (`torch.Tensor`): + Generate embedding vectors with a specified guidance scale to subsequently enrich timestep embeddings. + embedding_dim (`int`, *optional*, defaults to 512): + Dimension of the embeddings to generate. + dtype (`torch.dtype`, *optional*, defaults to `torch.float32`): + Data type of the generated embeddings. + + Returns: + `torch.Tensor`: Embedding vectors with shape `(len(w), embedding_dim)`. + """ + assert len(w.shape) == 1 + w = w * 1000.0 + + half_dim = embedding_dim // 2 + emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) + emb = w.to(dtype)[:, None] * emb[None, :] + emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) + if embedding_dim % 2 == 1: # zero pad + emb = torch.nn.functional.pad(emb, (0, 1)) + assert emb.shape == (w.shape[0], embedding_dim) + return emb + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def guidance_rescale(self): + return self._guidance_rescale + + @property + def clip_skip(self): + return self._clip_skip + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None + + @property + def cross_attention_kwargs(self): + return self._cross_attention_kwargs + + @property + def denoising_end(self): + return self._denoising_end + + @property + def num_timesteps(self): + return self._num_timesteps + + @property + def interrupt(self): + return self._interrupt + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + prompt_2: Optional[Union[str, List[str]]] = None, + mask_image: Union[torch.FloatTensor, PIL.Image.Image] = None, + sketch_image: Union[torch.FloatTensor, PIL.Image.Image] = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + timesteps: List[int] = None, + sigmas: List[float] = None, + denoising_end: Optional[float] = None, + guidance_scale: float = 5.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + negative_prompt_2: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + pooled_prompt_embeds: Optional[torch.Tensor] = None, + negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + guidance_rescale: float = 0.0, + original_size: Optional[Tuple[int, int]] = None, + crops_coords_top_left: Tuple[int, int] = (0, 0), + target_size: Optional[Tuple[int, int]] = None, + negative_original_size: Optional[Tuple[int, int]] = None, + negative_crops_coords_top_left: Tuple[int, int] = (0, 0), + negative_target_size: Optional[Tuple[int, int]] = None, + clip_skip: Optional[int] = None, + callback_on_step_end: Optional[ + Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks] + ] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + **kwargs, + ): + r""" + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in both text-encoders + height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The height in pixels of the generated image. This is set to 1024 by default for the best results. + Anything below 512 pixels won't work well for + [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) + and checkpoints that are not specifically fine-tuned on low resolutions. + width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The width in pixels of the generated image. This is set to 1024 by default for the best results. + Anything below 512 pixels won't work well for + [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) + and checkpoints that are not specifically fine-tuned on low resolutions. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument + in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is + passed will be used. Must be in descending order. + sigmas (`List[float]`, *optional*): + Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in + their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed + will be used. + denoising_end (`float`, *optional*): + When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be + completed before it is intentionally prematurely terminated. As a result, the returned sample will + still retain a substantial amount of noise as determined by the discrete timesteps selected by the + scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a + "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image + Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output) + guidance_scale (`float`, *optional*, defaults to 5.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to + [`schedulers.DDIMScheduler`], will be ignored for others. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. + ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*): + Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of + IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should + contain the negative image embedding if `do_classifier_free_guidance` is set to `True`. If not + provided, embeddings are computed from the `ip_adapter_image` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] instead + of a plain tuple. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + guidance_rescale (`float`, *optional*, defaults to 0.0): + Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are + Flawed](https://arxiv.org/pdf/2305.08891.pdf) `guidance_scale` is defined as `ฯ†` in equation 16. of + [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). + Guidance rescale factor should fix overexposure when using zero terminal SNR. + original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled. + `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as + explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position + `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting + `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + For most cases, `target_size` should be set to the desired height and width of the generated image. If + not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in + section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + To negatively condition the generation process based on a specific image resolution. Part of SDXL's + micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's + micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + To negatively condition the generation process based on a target image resolution. It should be as same + as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*): + A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of + each denoising step during the inference. with the following arguments: `callback_on_step_end(self: + DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a + list of all tensors as specified by `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + + Examples: + + Returns: + [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] or `tuple`: + [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] if `return_dict` is True, otherwise a + `tuple`. When returning a tuple, the first element is a list with the generated images. + """ + + callback = kwargs.pop("callback", None) + callback_steps = kwargs.pop("callback_steps", None) + + if callback is not None: + deprecate( + "callback", + "1.0.0", + "Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", + ) + if callback_steps is not None: + deprecate( + "callback_steps", + "1.0.0", + "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", + ) + + if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): + callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs + + # 0. Default height and width to unet + height = height or self.default_sample_size * self.vae_scale_factor + width = width or self.default_sample_size * self.vae_scale_factor + + original_size = original_size or (height, width) + target_size = target_size or (height, width) + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + prompt_2, + height, + width, + callback_steps, + negative_prompt, + negative_prompt_2, + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + ip_adapter_image, + ip_adapter_image_embeds, + callback_on_step_end_tensor_inputs, + ) + + self._guidance_scale = guidance_scale + self._guidance_rescale = guidance_rescale + self._clip_skip = clip_skip + self._cross_attention_kwargs = cross_attention_kwargs + self._denoising_end = denoising_end + self._interrupt = False + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + # 3. Encode input prompt + lora_scale = ( + self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None + ) + + ( + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + ) = self.encode_prompt( + prompt=prompt, + prompt_2=prompt_2, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=self.do_classifier_free_guidance, + negative_prompt=negative_prompt, + negative_prompt_2=negative_prompt_2, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, + lora_scale=lora_scale, + clip_skip=self.clip_skip, + ) + + # 4. Prepare timesteps + timesteps, num_inference_steps = retrieve_timesteps( + self.scheduler, num_inference_steps, device, timesteps, sigmas + ) + + # 5. Prepare latent variables + + mask = prepare_mask(mask=mask_image) + + sketch_image = self.image_processor.preprocess(sketch_image) + + latent_timestep = timesteps[:1] # type: ignore + latent_timestep = latent_timestep.to(torch.long) + + + + init_latents, noise= self.prepare_latents( + sketch_image, + latent_timestep, + batch_size, + num_images_per_prompt, + prompt_embeds.dtype, + device, + generator, + ) + + num_channels_latents = self.unet.config.in_channels + latents_with_noise = self.prepare_latents_origin( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + + # 6.1. Prepare mask + + height, width = mask.shape[-2:] + mask = torch.nn.functional.interpolate( + mask, size=( + 128, + 128 + ) + ) + mask = torch.nn.functional.interpolate( + mask, size=(height // self.vae_scale_factor, width // self.vae_scale_factor) + ) + + mask = mask.to(device) + mask = mask.to(prompt_embeds.dtype) + + # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7. Prepare added time ids & embeddings + add_text_embeds = pooled_prompt_embeds + if self.text_encoder_2 is None: + text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) + else: + text_encoder_projection_dim = self.text_encoder_2.config.projection_dim + + add_time_ids = self._get_add_time_ids( + original_size, + crops_coords_top_left, + target_size, + dtype=prompt_embeds.dtype, + text_encoder_projection_dim=text_encoder_projection_dim, + ) + if negative_original_size is not None and negative_target_size is not None: + negative_add_time_ids = self._get_add_time_ids( + negative_original_size, + negative_crops_coords_top_left, + negative_target_size, + dtype=prompt_embeds.dtype, + text_encoder_projection_dim=text_encoder_projection_dim, + ) + else: + negative_add_time_ids = add_time_ids + + if self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) + add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) + add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0) + + prompt_embeds = prompt_embeds.to(device) + add_text_embeds = add_text_embeds.to(device) + add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1) + + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + image_embeds = self.prepare_ip_adapter_image_embeds( + ip_adapter_image, + ip_adapter_image_embeds, + device, + batch_size * num_images_per_prompt, + self.do_classifier_free_guidance, + ) + + # 8. Denoising loop + num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) + + # 8.1 Apply denoising_end + if ( + self.denoising_end is not None + and isinstance(self.denoising_end, float) + and self.denoising_end > 0 + and self.denoising_end < 1 + ): + discrete_timestep_cutoff = int( + round( + self.scheduler.config.num_train_timesteps + - (self.denoising_end * self.scheduler.config.num_train_timesteps) + ) + ) + num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps))) + timesteps = timesteps[:num_inference_steps] + + # 9. Optionally get Guidance Scale Embedding + timestep_cond = None + if self.unet.config.time_cond_proj_dim is not None: + guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) + timestep_cond = self.get_guidance_scale_embedding( + guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim + ).to(device=device, dtype=latents_with_noise.dtype) + + self._num_timesteps = len(timesteps) + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + if self.interrupt: + continue + + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents_with_noise] * 2) if self.do_classifier_free_guidance else latents + + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + added_cond_kwargs["image_embeds"] = image_embeds + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + timestep_cond=timestep_cond, + cross_attention_kwargs=self.cross_attention_kwargs, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) + + if self.do_classifier_free_guidance and self.guidance_rescale > 0.0: + # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf + noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale) + + # compute the previous noisy sample x_t -> x_t-1 + latents_with_noise = self.scheduler.step( + noise_pred, + t, + latents_with_noise, + **extra_step_kwargs, + return_dict=False, + )[0] + + + # masking process + tmp = t.unsqueeze(0) + init_latents_proper = self.scheduler.add_noise( + init_latents, noise, tmp + ).to(device) + + mask = (mask > 0.5).to(prompt_embeds.dtype) + latents_with_noise = ( + mask * latents_with_noise + (1 - mask) * init_latents_proper + ) + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents_with_noise = callback_outputs.pop("latents_with_noise", latents_with_noise) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + add_text_embeds = callback_outputs.pop("add_text_embeds", add_text_embeds) + negative_pooled_prompt_embeds = callback_outputs.pop( + "negative_pooled_prompt_embeds", negative_pooled_prompt_embeds + ) + add_time_ids = callback_outputs.pop("add_time_ids", add_time_ids) + negative_add_time_ids = callback_outputs.pop("negative_add_time_ids", negative_add_time_ids) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents_with_noise) + + if XLA_AVAILABLE: + xm.mark_step() + + if not output_type == "latent": + # make sure the VAE is in float32 mode, as it overflows in float16 + needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast + + if needs_upcasting: + self.upcast_vae() + latents_with_noise = latents_with_noise.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) + elif latents_with_noise.dtype != self.vae.dtype: + if torch.backends.mps.is_available(): + # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272 + self.vae = self.vae.to(latents_with_noise.dtype) + + # unscale/denormalize the latents + # denormalize with the mean and std if available and not None + has_latents_mean = hasattr(self.vae.config, "latents_mean") and self.vae.config.latents_mean is not None + has_latents_std = hasattr(self.vae.config, "latents_std") and self.vae.config.latents_std is not None + if has_latents_mean and has_latents_std: + latents_mean = ( + torch.tensor(self.vae.config.latents_mean).view(1, 4, 1, 1).to(latents_with_noise.device, latents.dtype) + ) + latents_std = ( + torch.tensor(self.vae.config.latents_std).view(1, 4, 1, 1).to(latents_with_noise.device, latents.dtype) + ) + latents = latents * latents_std / self.vae.config.scaling_factor + latents_mean + else: + latents_with_noise = latents_with_noise / self.vae.config.scaling_factor + + image = self.vae.decode(latents_with_noise, return_dict=False)[0] + + # cast back to fp16 if needed + if needs_upcasting: + self.vae.to(dtype=torch.float16) + else: + image = latents_with_noise + + if not output_type == "latent": + # apply watermark if available + if self.watermark is not None: + image = self.watermark.apply_watermark(image) + + image = self.image_processor.postprocess(image, output_type=output_type) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return StableDiffusionXLPipelineOutput(images=image) + + diff --git a/diffusers3/pipelines/stable_diffusion_xl/__init__.py b/diffusers3/pipelines/stable_diffusion_xl/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8088fbcfceba205b9b908613f4ca3fdc579120e8 --- /dev/null +++ b/diffusers3/pipelines/stable_diffusion_xl/__init__.py @@ -0,0 +1,76 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_flax_available, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_additional_imports = {} +_import_structure = {"pipeline_output": ["StableDiffusionXLPipelineOutput"]} + +if is_transformers_available() and is_flax_available(): + _import_structure["pipeline_output"].extend(["FlaxStableDiffusionXLPipelineOutput"]) +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["pipeline_stable_diffusion_xl"] = ["StableDiffusionXLPipeline"] + _import_structure["pipeline_stable_diffusion_xl_img2img"] = ["StableDiffusionXLImg2ImgPipeline"] + _import_structure["pipeline_stable_diffusion_xl_inpaint"] = ["StableDiffusionXLInpaintPipeline"] + _import_structure["pipeline_stable_diffusion_xl_instruct_pix2pix"] = ["StableDiffusionXLInstructPix2PixPipeline"] + +if is_transformers_available() and is_flax_available(): + from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState + + _additional_imports.update({"PNDMSchedulerState": PNDMSchedulerState}) + _import_structure["pipeline_flax_stable_diffusion_xl"] = ["FlaxStableDiffusionXLPipeline"] + + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 + else: + from .pipeline_stable_diffusion_xl import StableDiffusionXLPipeline + from .pipeline_stable_diffusion_xl_img2img import StableDiffusionXLImg2ImgPipeline + from .pipeline_stable_diffusion_xl_inpaint import StableDiffusionXLInpaintPipeline + from .pipeline_stable_diffusion_xl_instruct_pix2pix import StableDiffusionXLInstructPix2PixPipeline + + try: + if not (is_transformers_available() and is_flax_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ...utils.dummy_flax_objects import * + else: + from .pipeline_flax_stable_diffusion_xl import ( + FlaxStableDiffusionXLPipeline, + ) + from .pipeline_output import FlaxStableDiffusionXLPipelineOutput + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) + for name, value in _additional_imports.items(): + setattr(sys.modules[__name__], name, value) diff --git a/diffusers3/pipelines/stable_diffusion_xl/__pycache__/__init__.cpython-310.pyc b/diffusers3/pipelines/stable_diffusion_xl/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..36316b9158fd703cfa3c2254f65543bc790e95a7 Binary files /dev/null and b/diffusers3/pipelines/stable_diffusion_xl/__pycache__/__init__.cpython-310.pyc differ diff --git a/diffusers3/pipelines/stable_diffusion_xl/__pycache__/__init__.cpython-38.pyc b/diffusers3/pipelines/stable_diffusion_xl/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b22ec4b96c2d22ade6f4c3f668038deba9d6d845 Binary files /dev/null and b/diffusers3/pipelines/stable_diffusion_xl/__pycache__/__init__.cpython-38.pyc differ diff --git a/diffusers3/pipelines/stable_diffusion_xl/__pycache__/pipeline_output.cpython-310.pyc b/diffusers3/pipelines/stable_diffusion_xl/__pycache__/pipeline_output.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..551d0827ae389f38a04274fd3002643d1ae820b0 Binary files /dev/null and b/diffusers3/pipelines/stable_diffusion_xl/__pycache__/pipeline_output.cpython-310.pyc differ diff --git a/diffusers3/pipelines/stable_diffusion_xl/__pycache__/pipeline_output.cpython-38.pyc b/diffusers3/pipelines/stable_diffusion_xl/__pycache__/pipeline_output.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..174f3168aa7125b3a32f24e8b21d22f362facea8 Binary files /dev/null and b/diffusers3/pipelines/stable_diffusion_xl/__pycache__/pipeline_output.cpython-38.pyc differ diff --git a/diffusers3/pipelines/stable_diffusion_xl/__pycache__/pipeline_stable_diffusion_xl.cpython-310.pyc b/diffusers3/pipelines/stable_diffusion_xl/__pycache__/pipeline_stable_diffusion_xl.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dde90ad1c1101f977c0473627a877f595478d1e6 Binary files /dev/null and b/diffusers3/pipelines/stable_diffusion_xl/__pycache__/pipeline_stable_diffusion_xl.cpython-310.pyc differ diff --git a/diffusers3/pipelines/stable_diffusion_xl/__pycache__/pipeline_stable_diffusion_xl.cpython-38.pyc b/diffusers3/pipelines/stable_diffusion_xl/__pycache__/pipeline_stable_diffusion_xl.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2f0b5a65e6d87d267b17c622d7147685285a6831 Binary files /dev/null and b/diffusers3/pipelines/stable_diffusion_xl/__pycache__/pipeline_stable_diffusion_xl.cpython-38.pyc differ diff --git a/diffusers3/pipelines/stable_diffusion_xl/__pycache__/pipeline_stable_diffusion_xl_inpaint.cpython-38.pyc b/diffusers3/pipelines/stable_diffusion_xl/__pycache__/pipeline_stable_diffusion_xl_inpaint.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f1c8d2df4ae60507f8d6f1080342ca8c41ec7898 Binary files /dev/null and b/diffusers3/pipelines/stable_diffusion_xl/__pycache__/pipeline_stable_diffusion_xl_inpaint.cpython-38.pyc differ diff --git a/diffusers3/pipelines/stable_diffusion_xl/pipeline_flax_stable_diffusion_xl.py b/diffusers3/pipelines/stable_diffusion_xl/pipeline_flax_stable_diffusion_xl.py new file mode 100644 index 0000000000000000000000000000000000000000..77363b2546d72836618d4fad3d24ca572d1c8f12 --- /dev/null +++ b/diffusers3/pipelines/stable_diffusion_xl/pipeline_flax_stable_diffusion_xl.py @@ -0,0 +1,308 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from functools import partial +from typing import Dict, List, Optional, Union + +import jax +import jax.numpy as jnp +from flax.core.frozen_dict import FrozenDict +from transformers import CLIPTokenizer, FlaxCLIPTextModel + +from diffusers.utils import logging + +from ...models import FlaxAutoencoderKL, FlaxUNet2DConditionModel +from ...schedulers import ( + FlaxDDIMScheduler, + FlaxDPMSolverMultistepScheduler, + FlaxLMSDiscreteScheduler, + FlaxPNDMScheduler, +) +from ..pipeline_flax_utils import FlaxDiffusionPipeline +from .pipeline_output import FlaxStableDiffusionXLPipelineOutput + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +# Set to True to use python for loop instead of jax.fori_loop for easier debugging +DEBUG = False + + +class FlaxStableDiffusionXLPipeline(FlaxDiffusionPipeline): + def __init__( + self, + text_encoder: FlaxCLIPTextModel, + text_encoder_2: FlaxCLIPTextModel, + vae: FlaxAutoencoderKL, + tokenizer: CLIPTokenizer, + tokenizer_2: CLIPTokenizer, + unet: FlaxUNet2DConditionModel, + scheduler: Union[ + FlaxDDIMScheduler, FlaxPNDMScheduler, FlaxLMSDiscreteScheduler, FlaxDPMSolverMultistepScheduler + ], + dtype: jnp.dtype = jnp.float32, + ): + super().__init__() + self.dtype = dtype + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + text_encoder_2=text_encoder_2, + tokenizer=tokenizer, + tokenizer_2=tokenizer_2, + unet=unet, + scheduler=scheduler, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + + def prepare_inputs(self, prompt: Union[str, List[str]]): + if not isinstance(prompt, (str, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + # Assume we have the two encoders + inputs = [] + for tokenizer in [self.tokenizer, self.tokenizer_2]: + text_inputs = tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="np", + ) + inputs.append(text_inputs.input_ids) + inputs = jnp.stack(inputs, axis=1) + return inputs + + def __call__( + self, + prompt_ids: jax.Array, + params: Union[Dict, FrozenDict], + prng_seed: jax.Array, + num_inference_steps: int = 50, + guidance_scale: Union[float, jax.Array] = 7.5, + height: Optional[int] = None, + width: Optional[int] = None, + latents: jnp.array = None, + neg_prompt_ids: jnp.array = None, + return_dict: bool = True, + output_type: str = None, + jit: bool = False, + ): + # 0. Default height and width to unet + height = height or self.unet.config.sample_size * self.vae_scale_factor + width = width or self.unet.config.sample_size * self.vae_scale_factor + + if isinstance(guidance_scale, float) and jit: + # Convert to a tensor so each device gets a copy. + guidance_scale = jnp.array([guidance_scale] * prompt_ids.shape[0]) + guidance_scale = guidance_scale[:, None] + + return_latents = output_type == "latent" + + if jit: + images = _p_generate( + self, + prompt_ids, + params, + prng_seed, + num_inference_steps, + height, + width, + guidance_scale, + latents, + neg_prompt_ids, + return_latents, + ) + else: + images = self._generate( + prompt_ids, + params, + prng_seed, + num_inference_steps, + height, + width, + guidance_scale, + latents, + neg_prompt_ids, + return_latents, + ) + + if not return_dict: + return (images,) + + return FlaxStableDiffusionXLPipelineOutput(images=images) + + def get_embeddings(self, prompt_ids: jnp.array, params): + # We assume we have the two encoders + + # bs, encoder_input, seq_length + te_1_inputs = prompt_ids[:, 0, :] + te_2_inputs = prompt_ids[:, 1, :] + + prompt_embeds = self.text_encoder(te_1_inputs, params=params["text_encoder"], output_hidden_states=True) + prompt_embeds = prompt_embeds["hidden_states"][-2] + prompt_embeds_2_out = self.text_encoder_2( + te_2_inputs, params=params["text_encoder_2"], output_hidden_states=True + ) + prompt_embeds_2 = prompt_embeds_2_out["hidden_states"][-2] + text_embeds = prompt_embeds_2_out["text_embeds"] + prompt_embeds = jnp.concatenate([prompt_embeds, prompt_embeds_2], axis=-1) + return prompt_embeds, text_embeds + + def _get_add_time_ids(self, original_size, crops_coords_top_left, target_size, bs, dtype): + add_time_ids = list(original_size + crops_coords_top_left + target_size) + add_time_ids = jnp.array([add_time_ids] * bs, dtype=dtype) + return add_time_ids + + def _generate( + self, + prompt_ids: jnp.array, + params: Union[Dict, FrozenDict], + prng_seed: jax.Array, + num_inference_steps: int, + height: int, + width: int, + guidance_scale: float, + latents: Optional[jnp.array] = None, + neg_prompt_ids: Optional[jnp.array] = None, + return_latents=False, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + # Encode input prompt + prompt_embeds, pooled_embeds = self.get_embeddings(prompt_ids, params) + + # Get unconditional embeddings + batch_size = prompt_embeds.shape[0] + if neg_prompt_ids is None: + neg_prompt_embeds = jnp.zeros_like(prompt_embeds) + negative_pooled_embeds = jnp.zeros_like(pooled_embeds) + else: + neg_prompt_embeds, negative_pooled_embeds = self.get_embeddings(neg_prompt_ids, params) + + add_time_ids = self._get_add_time_ids( + (height, width), (0, 0), (height, width), prompt_embeds.shape[0], dtype=prompt_embeds.dtype + ) + + prompt_embeds = jnp.concatenate([neg_prompt_embeds, prompt_embeds], axis=0) # (2, 77, 2048) + add_text_embeds = jnp.concatenate([negative_pooled_embeds, pooled_embeds], axis=0) + add_time_ids = jnp.concatenate([add_time_ids, add_time_ids], axis=0) + + # Ensure model output will be `float32` before going into the scheduler + guidance_scale = jnp.array([guidance_scale], dtype=jnp.float32) + + # Create random latents + latents_shape = ( + batch_size, + self.unet.config.in_channels, + height // self.vae_scale_factor, + width // self.vae_scale_factor, + ) + if latents is None: + latents = jax.random.normal(prng_seed, shape=latents_shape, dtype=jnp.float32) + else: + if latents.shape != latents_shape: + raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}") + + # Prepare scheduler state + scheduler_state = self.scheduler.set_timesteps( + params["scheduler"], num_inference_steps=num_inference_steps, shape=latents.shape + ) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * scheduler_state.init_noise_sigma + + added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} + + # Denoising loop + def loop_body(step, args): + latents, scheduler_state = args + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + latents_input = jnp.concatenate([latents] * 2) + + t = jnp.array(scheduler_state.timesteps, dtype=jnp.int32)[step] + timestep = jnp.broadcast_to(t, latents_input.shape[0]) + + latents_input = self.scheduler.scale_model_input(scheduler_state, latents_input, t) + + # predict the noise residual + noise_pred = self.unet.apply( + {"params": params["unet"]}, + jnp.array(latents_input), + jnp.array(timestep, dtype=jnp.int32), + encoder_hidden_states=prompt_embeds, + added_cond_kwargs=added_cond_kwargs, + ).sample + # perform guidance + noise_pred_uncond, noise_prediction_text = jnp.split(noise_pred, 2, axis=0) + noise_pred = noise_pred_uncond + guidance_scale * (noise_prediction_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents, scheduler_state = self.scheduler.step(scheduler_state, noise_pred, t, latents).to_tuple() + return latents, scheduler_state + + if DEBUG: + # run with python for loop + for i in range(num_inference_steps): + latents, scheduler_state = loop_body(i, (latents, scheduler_state)) + else: + latents, _ = jax.lax.fori_loop(0, num_inference_steps, loop_body, (latents, scheduler_state)) + + if return_latents: + return latents + + # Decode latents + latents = 1 / self.vae.config.scaling_factor * latents + image = self.vae.apply({"params": params["vae"]}, latents, method=self.vae.decode).sample + + image = (image / 2 + 0.5).clip(0, 1).transpose(0, 2, 3, 1) + return image + + +# Static argnums are pipe, num_inference_steps, height, width, return_latents. A change would trigger recompilation. +# Non-static args are (sharded) input tensors mapped over their first dimension (hence, `0`). +@partial( + jax.pmap, + in_axes=(None, 0, 0, 0, None, None, None, 0, 0, 0, None), + static_broadcasted_argnums=(0, 4, 5, 6, 10), +) +def _p_generate( + pipe, + prompt_ids, + params, + prng_seed, + num_inference_steps, + height, + width, + guidance_scale, + latents, + neg_prompt_ids, + return_latents, +): + return pipe._generate( + prompt_ids, + params, + prng_seed, + num_inference_steps, + height, + width, + guidance_scale, + latents, + neg_prompt_ids, + return_latents, + ) diff --git a/diffusers3/pipelines/stable_diffusion_xl/pipeline_output.py b/diffusers3/pipelines/stable_diffusion_xl/pipeline_output.py new file mode 100644 index 0000000000000000000000000000000000000000..0783f44486ee1448bd15529f745af381ee7fa69f --- /dev/null +++ b/diffusers3/pipelines/stable_diffusion_xl/pipeline_output.py @@ -0,0 +1,37 @@ +from dataclasses import dataclass +from typing import List, Union + +import numpy as np +import PIL.Image + +from ...utils import BaseOutput, is_flax_available + + +@dataclass +class StableDiffusionXLPipelineOutput(BaseOutput): + """ + Output class for Stable Diffusion pipelines. + + Args: + images (`List[PIL.Image.Image]` or `np.ndarray`) + List of denoised PIL images of length `batch_size` or numpy array of shape `(batch_size, height, width, + num_channels)`. PIL images or numpy array present the denoised images of the diffusion pipeline. + """ + + images: Union[List[PIL.Image.Image], np.ndarray] + + +if is_flax_available(): + import flax + + @flax.struct.dataclass + class FlaxStableDiffusionXLPipelineOutput(BaseOutput): + """ + Output class for Flax Stable Diffusion XL pipelines. + + Args: + images (`np.ndarray`) + Array of shape `(batch_size, height, width, num_channels)` with images from the diffusion pipeline. + """ + + images: np.ndarray diff --git a/diffusers3/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py b/diffusers3/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py new file mode 100644 index 0000000000000000000000000000000000000000..787b6fa2612dbbbc47488de0b1c7976616cc48ce --- /dev/null +++ b/diffusers3/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py @@ -0,0 +1,1527 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import torch +import PIL.Image +import numpy as np +from transformers import ( + CLIPImageProcessor, + CLIPTextModel, + CLIPTextModelWithProjection, + CLIPTokenizer, + CLIPVisionModelWithProjection, +) + +from ...callbacks import MultiPipelineCallbacks, PipelineCallback +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import ( + FromSingleFileMixin, + IPAdapterMixin, + StableDiffusionXLLoraLoaderMixin, + TextualInversionLoaderMixin, +) +from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel +from ...models.attention_processor import ( + AttnProcessor2_0, + FusedAttnProcessor2_0, + XFormersAttnProcessor, +) +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import ( + USE_PEFT_BACKEND, + deprecate, + is_invisible_watermark_available, + is_torch_xla_available, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from .pipeline_output import StableDiffusionXLPipelineOutput + + +if is_invisible_watermark_available(): + from .watermark import StableDiffusionXLWatermarker + +if is_torch_xla_available(): + import torch_xla.core.xla_model as xm + + XLA_AVAILABLE = True +else: + XLA_AVAILABLE = False + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import StableDiffusionXLPipeline + + >>> pipe = StableDiffusionXLPipeline.from_pretrained( + ... "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 + ... ) + >>> pipe = pipe.to("cuda") + + >>> prompt = "a photo of an astronaut riding a horse on mars" + >>> image = pipe(prompt).images[0] + ``` +""" + + +def prepare_mask( + mask: Union[PIL.Image.Image, np.ndarray, torch.Tensor] +) -> torch.Tensor: + """ + Prepares a mask to be consumed by the Stable Diffusion pipeline. This means that this input will be + converted to ``torch.Tensor`` with shapes ``batch x channels x height x width`` where ``channels`` is ``1`` for + the ``mask``. + + The ``mask`` will be binarized (``mask > 0.5``) and cast to ``torch.float32`` too. + + Args: + mask (_type_): The mask to apply to the image, i.e. regions to inpaint. + It can be a ``PIL.Image``, or a ``height x width`` ``np.array`` or a ``1 x height x width`` + ``torch.Tensor`` or a ``batch x 1 x height x width`` ``torch.Tensor``. + + + Raises: + ValueError: ``torch.Tensor`` images should be in the ``[-1, 1]`` range. ValueError: ``torch.Tensor`` mask + should be in the ``[0, 1]`` range. + TypeError: ``mask`` is a ``torch.Tensor`` but ``image`` is not + (ot the other way around). + + Returns: + torch.Tensor: mask as ``torch.Tensor`` with 4 dimensions: ``batch x channels x height x width``. + """ + if isinstance(mask, torch.Tensor): + print("^^^^") + print("mask_1") + if not isinstance(mask, torch.Tensor): + raise TypeError( + f"`image` is a torch.Tensor but `mask` (type: {type(mask)} is not" + ) + + # Batch and add channel dim for single mask + if mask.ndim == 2: + mask = mask.unsqueeze(0).unsqueeze(0) + + # Batch single mask or add channel dim + if mask.ndim == 3: + # Single batched mask, no channel dim or single mask not batched but channel dim + if mask.shape[0] == 1: + mask = mask.unsqueeze(0) + + # Batched masks no channel dim + else: + mask = mask.unsqueeze(1) + + # Check mask is in [0, 1] + if mask.min() < 0 or mask.max() > 1: + raise ValueError("Mask should be in [0, 1] range") + + # Binarize mask + mask[mask < 0.5] = 0 + mask[mask >= 0.5] = 1 + else: + print("^^^^") + print("mask_2") + # preprocess mask + if isinstance(mask, (PIL.Image.Image, np.ndarray)): + mask = [mask] + + if isinstance(mask, list) and isinstance(mask[0], PIL.Image.Image): + mask = np.concatenate( + [np.array(m.convert("L"))[None, None, :] for m in mask], axis=0 + ) + mask = mask.astype(np.float32) / 255.0 + elif isinstance(mask, list) and isinstance(mask[0], np.ndarray): + mask = np.concatenate([m[None, None, :] for m in mask], axis=0) + + mask[mask < 0.5] = 0 + mask[mask >= 0.5] = 1 + mask = torch.from_numpy(mask) + + return mask + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg +def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): + """ + Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4 + """ + std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) + std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) + # rescale the results from guidance (fixes overexposure) + noise_pred_rescaled = noise_cfg * (std_text / std_cfg) + # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images + noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg + return noise_cfg + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + sigmas: Optional[List[float]] = None, + **kwargs, +): + """ + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles + custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` + must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, + `num_inference_steps` and `sigmas` must be `None`. + sigmas (`List[float]`, *optional*): + Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, + `num_inference_steps` and `timesteps` must be `None`. + + Returns: + `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the + second element is the number of inference steps. + """ + if timesteps is not None and sigmas is not None: + raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") + if timesteps is not None: + accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + elif sigmas is not None: + accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accept_sigmas: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" sigmas schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + + +class StableDiffusionXLPipeline( + DiffusionPipeline, + StableDiffusionMixin, + FromSingleFileMixin, + StableDiffusionXLLoraLoaderMixin, + TextualInversionLoaderMixin, + IPAdapterMixin, +): + r""" + Pipeline for text-to-image generation using Stable Diffusion XL. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files + - [`~loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`] for saving LoRA weights + - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModel`]): + Frozen text-encoder. Stable Diffusion XL uses the text portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically + the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. + text_encoder_2 ([` CLIPTextModelWithProjection`]): + Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection), + specifically the + [laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k) + variant. + tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + tokenizer_2 (`CLIPTokenizer`): + Second Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`): + Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of + `stabilityai/stable-diffusion-xl-base-1-0`. + add_watermarker (`bool`, *optional*): + Whether to use the [invisible_watermark library](https://github.com/ShieldMnt/invisible-watermark/) to + watermark output images. If not defined, it will default to True if the package is installed, otherwise no + watermarker will be used. + """ + + model_cpu_offload_seq = "text_encoder->text_encoder_2->image_encoder->unet->vae" + _optional_components = [ + "tokenizer", + "tokenizer_2", + "text_encoder", + "text_encoder_2", + "image_encoder", + "feature_extractor", + ] + _callback_tensor_inputs = [ + "latents", + "prompt_embeds", + "negative_prompt_embeds", + "add_text_embeds", + "add_time_ids", + "negative_pooled_prompt_embeds", + "negative_add_time_ids", + ] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + text_encoder_2: CLIPTextModelWithProjection, + tokenizer: CLIPTokenizer, + tokenizer_2: CLIPTokenizer, + unet: UNet2DConditionModel, + scheduler: KarrasDiffusionSchedulers, + image_encoder: CLIPVisionModelWithProjection = None, + feature_extractor: CLIPImageProcessor = None, + force_zeros_for_empty_prompt: bool = True, + add_watermarker: Optional[bool] = None, + ): + super().__init__() + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + text_encoder_2=text_encoder_2, + tokenizer=tokenizer, + tokenizer_2=tokenizer_2, + unet=unet, + scheduler=scheduler, + image_encoder=image_encoder, + feature_extractor=feature_extractor, + ) + self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + + self.default_sample_size = self.unet.config.sample_size + + add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available() + + if add_watermarker: + self.watermark = StableDiffusionXLWatermarker() + else: + self.watermark = None + + def encode_prompt( + self, + prompt: str, + prompt_2: Optional[str] = None, + device: Optional[torch.device] = None, + num_images_per_prompt: int = 1, + do_classifier_free_guidance: bool = True, + negative_prompt: Optional[str] = None, + negative_prompt_2: Optional[str] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + pooled_prompt_embeds: Optional[torch.Tensor] = None, + negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in both text-encoders + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + lora_scale (`float`, *optional*): + A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + device = device or self._execution_device + + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if self.text_encoder is not None: + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale) + else: + scale_lora_layers(self.text_encoder_2, lora_scale) + + prompt = [prompt] if isinstance(prompt, str) else prompt + + if prompt is not None: + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + # Define tokenizers and text encoders + tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] + text_encoders = ( + [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] + ) + + if prompt_embeds is None: + prompt_2 = prompt_2 or prompt + prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 + + # textual inversion: process multi-vector tokens if necessary + prompt_embeds_list = [] + prompts = [prompt, prompt_2] + for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders): + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, tokenizer) + + text_inputs = tokenizer( + prompt, + padding="max_length", + max_length=tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + + text_input_ids = text_inputs.input_ids + untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {tokenizer.model_max_length} tokens: {removed_text}" + ) + + prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) + + # We are only ALWAYS interested in the pooled output of the final text encoder + pooled_prompt_embeds = prompt_embeds[0] + if clip_skip is None: + prompt_embeds = prompt_embeds.hidden_states[-2] + else: + # "2" because SDXL always indexes from the penultimate layer. + prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)] + + prompt_embeds_list.append(prompt_embeds) + + prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) + + # get unconditional embeddings for classifier free guidance + zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt + if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: + negative_prompt_embeds = torch.zeros_like(prompt_embeds) + negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) + elif do_classifier_free_guidance and negative_prompt_embeds is None: + negative_prompt = negative_prompt or "" + negative_prompt_2 = negative_prompt_2 or negative_prompt + + # normalize str to list + negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt + negative_prompt_2 = ( + batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 + ) + + uncond_tokens: List[str] + if prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = [negative_prompt, negative_prompt_2] + + negative_prompt_embeds_list = [] + for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders): + if isinstance(self, TextualInversionLoaderMixin): + negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = tokenizer( + negative_prompt, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + negative_prompt_embeds = text_encoder( + uncond_input.input_ids.to(device), + output_hidden_states=True, + ) + # We are only ALWAYS interested in the pooled output of the final text encoder + negative_pooled_prompt_embeds = negative_prompt_embeds[0] + negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] + + negative_prompt_embeds_list.append(negative_prompt_embeds) + + negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) + + if self.text_encoder_2 is not None: + prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) + else: + prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + if self.text_encoder_2 is not None: + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) + else: + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed * num_images_per_prompt, -1 + ) + if do_classifier_free_guidance: + negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed * num_images_per_prompt, -1 + ) + + if self.text_encoder is not None: + if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder_2, lora_scale) + + return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image + def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + if output_hidden_states: + image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] + image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_enc_hidden_states = self.image_encoder( + torch.zeros_like(image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( + num_images_per_prompt, dim=0 + ) + return image_enc_hidden_states, uncond_image_enc_hidden_states + else: + image_embeds = self.image_encoder(image).image_embeds + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_embeds = torch.zeros_like(image_embeds) + + return image_embeds, uncond_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds + def prepare_ip_adapter_image_embeds( + self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance + ): + image_embeds = [] + if do_classifier_free_guidance: + negative_image_embeds = [] + if ip_adapter_image_embeds is None: + if not isinstance(ip_adapter_image, list): + ip_adapter_image = [ip_adapter_image] + + if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): + raise ValueError( + f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." + ) + + for single_ip_adapter_image, image_proj_layer in zip( + ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers + ): + output_hidden_state = not isinstance(image_proj_layer, ImageProjection) + single_image_embeds, single_negative_image_embeds = self.encode_image( + single_ip_adapter_image, device, 1, output_hidden_state + ) + + image_embeds.append(single_image_embeds[None, :]) + if do_classifier_free_guidance: + negative_image_embeds.append(single_negative_image_embeds[None, :]) + else: + for single_image_embeds in ip_adapter_image_embeds: + if do_classifier_free_guidance: + single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2) + negative_image_embeds.append(single_negative_image_embeds) + image_embeds.append(single_image_embeds) + + ip_adapter_image_embeds = [] + for i, single_image_embeds in enumerate(image_embeds): + single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) + if do_classifier_free_guidance: + single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) + + single_image_embeds = single_image_embeds.to(device=device) + ip_adapter_image_embeds.append(single_image_embeds) + + return ip_adapter_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + prompt_2, + height, + width, + callback_steps, + negative_prompt=None, + negative_prompt_2=None, + prompt_embeds=None, + negative_prompt_embeds=None, + pooled_prompt_embeds=None, + negative_pooled_prompt_embeds=None, + ip_adapter_image=None, + ip_adapter_image_embeds=None, + callback_on_step_end_tensor_inputs=None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt_2 is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)): + raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + elif negative_prompt_2 is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if prompt_embeds is not None and pooled_prompt_embeds is None: + raise ValueError( + "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`." + ) + + if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: + raise ValueError( + "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`." + ) + + if ip_adapter_image is not None and ip_adapter_image_embeds is not None: + raise ValueError( + "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined." + ) + + if ip_adapter_image_embeds is not None: + if not isinstance(ip_adapter_image_embeds, list): + raise ValueError( + f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}" + ) + elif ip_adapter_image_embeds[0].ndim not in [3, 4]: + raise ValueError( + f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D" + ) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + + def prepare_latents( + self, + image, + timestep, + batch_size, + num_images_per_prompt, + dtype, + device, + generator=None, + ): + if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): + raise ValueError( + f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" + ) + + image = image.to(device=device, dtype=dtype) + + batch_size = batch_size * num_images_per_prompt + + if image.shape[1] == 4: + init_latents = image + + else: + if isinstance(generator, list) and len(generator) != batch_size: + + + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + elif isinstance(generator, list): + + init_latents = [ + self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) + for i in range(batch_size) + ] + print("***") + print("vae_1") + init_latents = torch.cat(init_latents, dim=0) + else: + print("***") + print("vae_2") + init_latents = self.vae.encode(image).latent_dist.sample(generator) + + init_latents = self.vae.config.scaling_factor * init_latents + + if ( + batch_size > init_latents.shape[0] + and batch_size % init_latents.shape[0] == 0 + ): + # expand init_latents for batch_size + deprecation_message = ( + f"You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial" + " images (`image`). Initial images are now duplicating to match the number of text prompts. Note" + " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update" + " your script to pass as many initial images as text prompts to suppress this warning." + ) + deprecate( + "len(prompt) != len(image)", + "1.0.0", + deprecation_message, + standard_warn=False, + ) + additional_image_per_prompt = batch_size // init_latents.shape[0] + init_latents = torch.cat( + [init_latents] * additional_image_per_prompt, dim=0 + ) + elif ( + batch_size > init_latents.shape[0] + and batch_size % init_latents.shape[0] != 0 + ): + raise ValueError( + f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts." + ) + else: + init_latents = torch.cat([init_latents], dim=0) + + shape = init_latents.shape + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + + # get latents + + latents = self.scheduler.add_noise(init_latents, noise, timestep) + + return init_latents, noise + + + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents_origin(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + + shape = ( + batch_size, + num_channels_latents, + int(height) // self.vae_scale_factor, + int(width) // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + print("latents is None") + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + return latents + + def _get_add_time_ids( + self, original_size, crops_coords_top_left, target_size, dtype, text_encoder_projection_dim=None + ): + add_time_ids = list(original_size + crops_coords_top_left + target_size) + + passed_add_embed_dim = ( + self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim + ) + expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features + + if expected_add_embed_dim != passed_add_embed_dim: + raise ValueError( + f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`." + ) + + add_time_ids = torch.tensor([add_time_ids], dtype=dtype) + return add_time_ids + + def upcast_vae(self): + dtype = self.vae.dtype + self.vae.to(dtype=torch.float32) + use_torch_2_0_or_xformers = isinstance( + self.vae.decoder.mid_block.attentions[0].processor, + ( + AttnProcessor2_0, + XFormersAttnProcessor, + FusedAttnProcessor2_0, + ), + ) + # if xformers or torch_2_0 is used attention block does not need + # to be in float32 which can save lots of memory + if use_torch_2_0_or_xformers: + self.vae.post_quant_conv.to(dtype) + self.vae.decoder.conv_in.to(dtype) + self.vae.decoder.mid_block.to(dtype) + + # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding + def get_guidance_scale_embedding( + self, w: torch.Tensor, embedding_dim: int = 512, dtype: torch.dtype = torch.float32 + ) -> torch.Tensor: + """ + See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 + + Args: + w (`torch.Tensor`): + Generate embedding vectors with a specified guidance scale to subsequently enrich timestep embeddings. + embedding_dim (`int`, *optional*, defaults to 512): + Dimension of the embeddings to generate. + dtype (`torch.dtype`, *optional*, defaults to `torch.float32`): + Data type of the generated embeddings. + + Returns: + `torch.Tensor`: Embedding vectors with shape `(len(w), embedding_dim)`. + """ + assert len(w.shape) == 1 + w = w * 1000.0 + + half_dim = embedding_dim // 2 + emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) + emb = w.to(dtype)[:, None] * emb[None, :] + emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) + if embedding_dim % 2 == 1: # zero pad + emb = torch.nn.functional.pad(emb, (0, 1)) + assert emb.shape == (w.shape[0], embedding_dim) + return emb + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def guidance_rescale(self): + return self._guidance_rescale + + @property + def clip_skip(self): + return self._clip_skip + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None + + @property + def cross_attention_kwargs(self): + return self._cross_attention_kwargs + + @property + def denoising_end(self): + return self._denoising_end + + @property + def num_timesteps(self): + return self._num_timesteps + + @property + def interrupt(self): + return self._interrupt + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + prompt_2: Optional[Union[str, List[str]]] = None, + mask_image: Union[torch.FloatTensor, PIL.Image.Image] = None, + sketch_image: Union[torch.FloatTensor, PIL.Image.Image] = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + timesteps: List[int] = None, + sigmas: List[float] = None, + denoising_end: Optional[float] = None, + guidance_scale: float = 5.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + negative_prompt_2: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + pooled_prompt_embeds: Optional[torch.Tensor] = None, + negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + guidance_rescale: float = 0.0, + original_size: Optional[Tuple[int, int]] = None, + crops_coords_top_left: Tuple[int, int] = (0, 0), + target_size: Optional[Tuple[int, int]] = None, + negative_original_size: Optional[Tuple[int, int]] = None, + negative_crops_coords_top_left: Tuple[int, int] = (0, 0), + negative_target_size: Optional[Tuple[int, int]] = None, + clip_skip: Optional[int] = None, + callback_on_step_end: Optional[ + Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks] + ] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + **kwargs, + ): + r""" + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in both text-encoders + height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The height in pixels of the generated image. This is set to 1024 by default for the best results. + Anything below 512 pixels won't work well for + [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) + and checkpoints that are not specifically fine-tuned on low resolutions. + width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The width in pixels of the generated image. This is set to 1024 by default for the best results. + Anything below 512 pixels won't work well for + [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) + and checkpoints that are not specifically fine-tuned on low resolutions. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument + in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is + passed will be used. Must be in descending order. + sigmas (`List[float]`, *optional*): + Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in + their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed + will be used. + denoising_end (`float`, *optional*): + When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be + completed before it is intentionally prematurely terminated. As a result, the returned sample will + still retain a substantial amount of noise as determined by the discrete timesteps selected by the + scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a + "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image + Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output) + guidance_scale (`float`, *optional*, defaults to 5.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to + [`schedulers.DDIMScheduler`], will be ignored for others. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. + ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*): + Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of + IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should + contain the negative image embedding if `do_classifier_free_guidance` is set to `True`. If not + provided, embeddings are computed from the `ip_adapter_image` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] instead + of a plain tuple. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + guidance_rescale (`float`, *optional*, defaults to 0.0): + Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are + Flawed](https://arxiv.org/pdf/2305.08891.pdf) `guidance_scale` is defined as `ฯ†` in equation 16. of + [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). + Guidance rescale factor should fix overexposure when using zero terminal SNR. + original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled. + `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as + explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position + `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting + `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + For most cases, `target_size` should be set to the desired height and width of the generated image. If + not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in + section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + To negatively condition the generation process based on a specific image resolution. Part of SDXL's + micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's + micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + To negatively condition the generation process based on a target image resolution. It should be as same + as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*): + A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of + each denoising step during the inference. with the following arguments: `callback_on_step_end(self: + DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a + list of all tensors as specified by `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + + Examples: + + Returns: + [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] or `tuple`: + [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] if `return_dict` is True, otherwise a + `tuple`. When returning a tuple, the first element is a list with the generated images. + """ + + callback = kwargs.pop("callback", None) + callback_steps = kwargs.pop("callback_steps", None) + + if callback is not None: + deprecate( + "callback", + "1.0.0", + "Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", + ) + if callback_steps is not None: + deprecate( + "callback_steps", + "1.0.0", + "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", + ) + + if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): + callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs + + # 0. Default height and width to unet + height = height or self.default_sample_size * self.vae_scale_factor + width = width or self.default_sample_size * self.vae_scale_factor + + original_size = original_size or (height, width) + target_size = target_size or (height, width) + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + prompt_2, + height, + width, + callback_steps, + negative_prompt, + negative_prompt_2, + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + ip_adapter_image, + ip_adapter_image_embeds, + callback_on_step_end_tensor_inputs, + ) + + self._guidance_scale = guidance_scale + self._guidance_rescale = guidance_rescale + self._clip_skip = clip_skip + self._cross_attention_kwargs = cross_attention_kwargs + self._denoising_end = denoising_end + self._interrupt = False + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + # 3. Encode input prompt + lora_scale = ( + self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None + ) + + ( + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + ) = self.encode_prompt( + prompt=prompt, + prompt_2=prompt_2, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=self.do_classifier_free_guidance, + negative_prompt=negative_prompt, + negative_prompt_2=negative_prompt_2, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, + lora_scale=lora_scale, + clip_skip=self.clip_skip, + ) + + # 4. Prepare timesteps + timesteps, num_inference_steps = retrieve_timesteps( + self.scheduler, num_inference_steps, device, timesteps, sigmas + ) + + # 5. Prepare latent variables + + mask = prepare_mask(mask=mask_image) + + sketch_image = self.image_processor.preprocess(sketch_image) + + latent_timestep = timesteps[:1] # type: ignore + latent_timestep = latent_timestep.to(torch.long) + + + + init_latents, noise= self.prepare_latents( + sketch_image, + latent_timestep, + batch_size, + num_images_per_prompt, + prompt_embeds.dtype, + device, + generator, + ) + + num_channels_latents = self.unet.config.in_channels + latents_with_noise = self.prepare_latents_origin( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + + # 6.1. Prepare mask + + height, width = mask.shape[-2:] + mask = torch.nn.functional.interpolate( + mask, size=( + 128, + 128 + ) + ) + mask = torch.nn.functional.interpolate( + mask, size=(height // self.vae_scale_factor, width // self.vae_scale_factor) + ) + + mask = mask.to(device) + mask = mask.to(prompt_embeds.dtype) + + # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7. Prepare added time ids & embeddings + add_text_embeds = pooled_prompt_embeds + if self.text_encoder_2 is None: + text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) + else: + text_encoder_projection_dim = self.text_encoder_2.config.projection_dim + + add_time_ids = self._get_add_time_ids( + original_size, + crops_coords_top_left, + target_size, + dtype=prompt_embeds.dtype, + text_encoder_projection_dim=text_encoder_projection_dim, + ) + if negative_original_size is not None and negative_target_size is not None: + negative_add_time_ids = self._get_add_time_ids( + negative_original_size, + negative_crops_coords_top_left, + negative_target_size, + dtype=prompt_embeds.dtype, + text_encoder_projection_dim=text_encoder_projection_dim, + ) + else: + negative_add_time_ids = add_time_ids + + if self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) + add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) + add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0) + + prompt_embeds = prompt_embeds.to(device) + add_text_embeds = add_text_embeds.to(device) + add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1) + + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + image_embeds = self.prepare_ip_adapter_image_embeds( + ip_adapter_image, + ip_adapter_image_embeds, + device, + batch_size * num_images_per_prompt, + self.do_classifier_free_guidance, + ) + + # 8. Denoising loop + num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) + + # 8.1 Apply denoising_end + if ( + self.denoising_end is not None + and isinstance(self.denoising_end, float) + and self.denoising_end > 0 + and self.denoising_end < 1 + ): + discrete_timestep_cutoff = int( + round( + self.scheduler.config.num_train_timesteps + - (self.denoising_end * self.scheduler.config.num_train_timesteps) + ) + ) + num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps))) + timesteps = timesteps[:num_inference_steps] + + # 9. Optionally get Guidance Scale Embedding + timestep_cond = None + if self.unet.config.time_cond_proj_dim is not None: + guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) + timestep_cond = self.get_guidance_scale_embedding( + guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim + ).to(device=device, dtype=latents_with_noise.dtype) + + self._num_timesteps = len(timesteps) + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + if self.interrupt: + continue + + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents_with_noise] * 2) if self.do_classifier_free_guidance else latents + + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + added_cond_kwargs["image_embeds"] = image_embeds + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + timestep_cond=timestep_cond, + cross_attention_kwargs=self.cross_attention_kwargs, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) + + if self.do_classifier_free_guidance and self.guidance_rescale > 0.0: + # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf + noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale) + + # compute the previous noisy sample x_t -> x_t-1 + latents_with_noise = self.scheduler.step( + noise_pred, + t, + latents_with_noise, + **extra_step_kwargs, + return_dict=False, + )[0] + + + # masking process + tmp = t.unsqueeze(0) + init_latents_proper = self.scheduler.add_noise( + init_latents, noise, tmp + ).to(device) + + mask = (mask > 0.5).to(prompt_embeds.dtype) + latents_with_noise = ( + mask * latents_with_noise + (1 - mask) * init_latents_proper + ) + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents_with_noise = callback_outputs.pop("latents_with_noise", latents_with_noise) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + add_text_embeds = callback_outputs.pop("add_text_embeds", add_text_embeds) + negative_pooled_prompt_embeds = callback_outputs.pop( + "negative_pooled_prompt_embeds", negative_pooled_prompt_embeds + ) + add_time_ids = callback_outputs.pop("add_time_ids", add_time_ids) + negative_add_time_ids = callback_outputs.pop("negative_add_time_ids", negative_add_time_ids) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents_with_noise) + + if XLA_AVAILABLE: + xm.mark_step() + + if not output_type == "latent": + # make sure the VAE is in float32 mode, as it overflows in float16 + needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast + + if needs_upcasting: + self.upcast_vae() + latents_with_noise = latents_with_noise.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) + elif latents_with_noise.dtype != self.vae.dtype: + if torch.backends.mps.is_available(): + # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272 + self.vae = self.vae.to(latents_with_noise.dtype) + + # unscale/denormalize the latents + # denormalize with the mean and std if available and not None + has_latents_mean = hasattr(self.vae.config, "latents_mean") and self.vae.config.latents_mean is not None + has_latents_std = hasattr(self.vae.config, "latents_std") and self.vae.config.latents_std is not None + if has_latents_mean and has_latents_std: + latents_mean = ( + torch.tensor(self.vae.config.latents_mean).view(1, 4, 1, 1).to(latents_with_noise.device, latents.dtype) + ) + latents_std = ( + torch.tensor(self.vae.config.latents_std).view(1, 4, 1, 1).to(latents_with_noise.device, latents.dtype) + ) + latents = latents * latents_std / self.vae.config.scaling_factor + latents_mean + else: + latents_with_noise = latents_with_noise / self.vae.config.scaling_factor + + image = self.vae.decode(latents_with_noise, return_dict=False)[0] + + # cast back to fp16 if needed + if needs_upcasting: + self.vae.to(dtype=torch.float16) + else: + image = latents_with_noise + + if not output_type == "latent": + # apply watermark if available + if self.watermark is not None: + image = self.watermark.apply_watermark(image) + + image = self.image_processor.postprocess(image, output_type=output_type) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return StableDiffusionXLPipelineOutput(images=image) + + diff --git a/diffusers3/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py b/diffusers3/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py new file mode 100644 index 0000000000000000000000000000000000000000..cf696a354f8f63083c36d68411c81393b184e09c --- /dev/null +++ b/diffusers3/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py @@ -0,0 +1,1495 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import PIL.Image +import torch +from transformers import ( + CLIPImageProcessor, + CLIPTextModel, + CLIPTextModelWithProjection, + CLIPTokenizer, + CLIPVisionModelWithProjection, +) + +from ...callbacks import MultiPipelineCallbacks, PipelineCallback +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import ( + FromSingleFileMixin, + IPAdapterMixin, + StableDiffusionXLLoraLoaderMixin, + TextualInversionLoaderMixin, +) +from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel +from ...models.attention_processor import ( + AttnProcessor2_0, + XFormersAttnProcessor, +) +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import ( + USE_PEFT_BACKEND, + deprecate, + is_invisible_watermark_available, + is_torch_xla_available, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from .pipeline_output import StableDiffusionXLPipelineOutput + + +if is_invisible_watermark_available(): + from .watermark import StableDiffusionXLWatermarker + +if is_torch_xla_available(): + import torch_xla.core.xla_model as xm + + XLA_AVAILABLE = True +else: + XLA_AVAILABLE = False + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import StableDiffusionXLImg2ImgPipeline + >>> from diffusers.utils import load_image + + >>> pipe = StableDiffusionXLImg2ImgPipeline.from_pretrained( + ... "stabilityai/stable-diffusion-xl-refiner-1.0", torch_dtype=torch.float16 + ... ) + >>> pipe = pipe.to("cuda") + >>> url = "https://huggingface.co/datasets/patrickvonplaten/images/resolve/main/aa_xl/000000009.png" + + >>> init_image = load_image(url).convert("RGB") + >>> prompt = "a photo of an astronaut riding a horse on mars" + >>> image = pipe(prompt, image=init_image).images[0] + ``` +""" + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg +def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): + """ + Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4 + """ + std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) + std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) + # rescale the results from guidance (fixes overexposure) + noise_pred_rescaled = noise_cfg * (std_text / std_cfg) + # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images + noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg + return noise_cfg + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents +def retrieve_latents( + encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" +): + if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": + return encoder_output.latent_dist.sample(generator) + elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": + return encoder_output.latent_dist.mode() + elif hasattr(encoder_output, "latents"): + return encoder_output.latents + else: + raise AttributeError("Could not access latents of provided encoder_output") + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + sigmas: Optional[List[float]] = None, + **kwargs, +): + """ + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles + custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` + must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, + `num_inference_steps` and `sigmas` must be `None`. + sigmas (`List[float]`, *optional*): + Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, + `num_inference_steps` and `timesteps` must be `None`. + + Returns: + `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the + second element is the number of inference steps. + """ + if timesteps is not None and sigmas is not None: + raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") + if timesteps is not None: + accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + elif sigmas is not None: + accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accept_sigmas: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" sigmas schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + + +class StableDiffusionXLImg2ImgPipeline( + DiffusionPipeline, + StableDiffusionMixin, + TextualInversionLoaderMixin, + FromSingleFileMixin, + StableDiffusionXLLoraLoaderMixin, + IPAdapterMixin, +): + r""" + Pipeline for text-to-image generation using Stable Diffusion XL. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files + - [`~loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`] for saving LoRA weights + - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModel`]): + Frozen text-encoder. Stable Diffusion XL uses the text portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically + the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. + text_encoder_2 ([` CLIPTextModelWithProjection`]): + Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection), + specifically the + [laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k) + variant. + tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + tokenizer_2 (`CLIPTokenizer`): + Second Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + requires_aesthetics_score (`bool`, *optional*, defaults to `"False"`): + Whether the `unet` requires an `aesthetic_score` condition to be passed during inference. Also see the + config of `stabilityai/stable-diffusion-xl-refiner-1-0`. + force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`): + Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of + `stabilityai/stable-diffusion-xl-base-1-0`. + add_watermarker (`bool`, *optional*): + Whether to use the [invisible_watermark library](https://github.com/ShieldMnt/invisible-watermark/) to + watermark output images. If not defined, it will default to True if the package is installed, otherwise no + watermarker will be used. + """ + + model_cpu_offload_seq = "text_encoder->text_encoder_2->image_encoder->unet->vae" + _optional_components = [ + "tokenizer", + "tokenizer_2", + "text_encoder", + "text_encoder_2", + "image_encoder", + "feature_extractor", + ] + _callback_tensor_inputs = [ + "latents", + "prompt_embeds", + "negative_prompt_embeds", + "add_text_embeds", + "add_time_ids", + "negative_pooled_prompt_embeds", + "add_neg_time_ids", + ] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + text_encoder_2: CLIPTextModelWithProjection, + tokenizer: CLIPTokenizer, + tokenizer_2: CLIPTokenizer, + unet: UNet2DConditionModel, + scheduler: KarrasDiffusionSchedulers, + image_encoder: CLIPVisionModelWithProjection = None, + feature_extractor: CLIPImageProcessor = None, + requires_aesthetics_score: bool = False, + force_zeros_for_empty_prompt: bool = True, + add_watermarker: Optional[bool] = None, + ): + super().__init__() + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + text_encoder_2=text_encoder_2, + tokenizer=tokenizer, + tokenizer_2=tokenizer_2, + unet=unet, + image_encoder=image_encoder, + feature_extractor=feature_extractor, + scheduler=scheduler, + ) + self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) + self.register_to_config(requires_aesthetics_score=requires_aesthetics_score) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + + add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available() + + if add_watermarker: + self.watermark = StableDiffusionXLWatermarker() + else: + self.watermark = None + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.encode_prompt + def encode_prompt( + self, + prompt: str, + prompt_2: Optional[str] = None, + device: Optional[torch.device] = None, + num_images_per_prompt: int = 1, + do_classifier_free_guidance: bool = True, + negative_prompt: Optional[str] = None, + negative_prompt_2: Optional[str] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + pooled_prompt_embeds: Optional[torch.Tensor] = None, + negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in both text-encoders + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + lora_scale (`float`, *optional*): + A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + device = device or self._execution_device + + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if self.text_encoder is not None: + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale) + else: + scale_lora_layers(self.text_encoder_2, lora_scale) + + prompt = [prompt] if isinstance(prompt, str) else prompt + + if prompt is not None: + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + # Define tokenizers and text encoders + tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] + text_encoders = ( + [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] + ) + + if prompt_embeds is None: + prompt_2 = prompt_2 or prompt + prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 + + # textual inversion: process multi-vector tokens if necessary + prompt_embeds_list = [] + prompts = [prompt, prompt_2] + for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders): + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, tokenizer) + + text_inputs = tokenizer( + prompt, + padding="max_length", + max_length=tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + + text_input_ids = text_inputs.input_ids + untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {tokenizer.model_max_length} tokens: {removed_text}" + ) + + prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) + + # We are only ALWAYS interested in the pooled output of the final text encoder + pooled_prompt_embeds = prompt_embeds[0] + if clip_skip is None: + prompt_embeds = prompt_embeds.hidden_states[-2] + else: + # "2" because SDXL always indexes from the penultimate layer. + prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)] + + prompt_embeds_list.append(prompt_embeds) + + prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) + + # get unconditional embeddings for classifier free guidance + zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt + if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: + negative_prompt_embeds = torch.zeros_like(prompt_embeds) + negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) + elif do_classifier_free_guidance and negative_prompt_embeds is None: + negative_prompt = negative_prompt or "" + negative_prompt_2 = negative_prompt_2 or negative_prompt + + # normalize str to list + negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt + negative_prompt_2 = ( + batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 + ) + + uncond_tokens: List[str] + if prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = [negative_prompt, negative_prompt_2] + + negative_prompt_embeds_list = [] + for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders): + if isinstance(self, TextualInversionLoaderMixin): + negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = tokenizer( + negative_prompt, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + negative_prompt_embeds = text_encoder( + uncond_input.input_ids.to(device), + output_hidden_states=True, + ) + # We are only ALWAYS interested in the pooled output of the final text encoder + negative_pooled_prompt_embeds = negative_prompt_embeds[0] + negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] + + negative_prompt_embeds_list.append(negative_prompt_embeds) + + negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) + + if self.text_encoder_2 is not None: + prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) + else: + prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + if self.text_encoder_2 is not None: + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) + else: + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed * num_images_per_prompt, -1 + ) + if do_classifier_free_guidance: + negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed * num_images_per_prompt, -1 + ) + + if self.text_encoder is not None: + if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder_2, lora_scale) + + return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + prompt_2, + strength, + num_inference_steps, + callback_steps, + negative_prompt=None, + negative_prompt_2=None, + prompt_embeds=None, + negative_prompt_embeds=None, + ip_adapter_image=None, + ip_adapter_image_embeds=None, + callback_on_step_end_tensor_inputs=None, + ): + if strength < 0 or strength > 1: + raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}") + if num_inference_steps is None: + raise ValueError("`num_inference_steps` cannot be None.") + elif not isinstance(num_inference_steps, int) or num_inference_steps <= 0: + raise ValueError( + f"`num_inference_steps` has to be a positive integer but is {num_inference_steps} of type" + f" {type(num_inference_steps)}." + ) + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt_2 is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)): + raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + elif negative_prompt_2 is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if ip_adapter_image is not None and ip_adapter_image_embeds is not None: + raise ValueError( + "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined." + ) + + if ip_adapter_image_embeds is not None: + if not isinstance(ip_adapter_image_embeds, list): + raise ValueError( + f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}" + ) + elif ip_adapter_image_embeds[0].ndim not in [3, 4]: + raise ValueError( + f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D" + ) + + def get_timesteps(self, num_inference_steps, strength, device, denoising_start=None): + # get the original timestep using init_timestep + if denoising_start is None: + init_timestep = min(int(num_inference_steps * strength), num_inference_steps) + t_start = max(num_inference_steps - init_timestep, 0) + + timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] + if hasattr(self.scheduler, "set_begin_index"): + self.scheduler.set_begin_index(t_start * self.scheduler.order) + + return timesteps, num_inference_steps - t_start + + else: + # Strength is irrelevant if we directly request a timestep to start at; + # that is, strength is determined by the denoising_start instead. + discrete_timestep_cutoff = int( + round( + self.scheduler.config.num_train_timesteps + - (denoising_start * self.scheduler.config.num_train_timesteps) + ) + ) + + num_inference_steps = (self.scheduler.timesteps < discrete_timestep_cutoff).sum().item() + if self.scheduler.order == 2 and num_inference_steps % 2 == 0: + # if the scheduler is a 2nd order scheduler we might have to do +1 + # because `num_inference_steps` might be even given that every timestep + # (except the highest one) is duplicated. If `num_inference_steps` is even it would + # mean that we cut the timesteps in the middle of the denoising step + # (between 1st and 2nd derivative) which leads to incorrect results. By adding 1 + # we ensure that the denoising process always ends after the 2nd derivate step of the scheduler + num_inference_steps = num_inference_steps + 1 + + # because t_n+1 >= t_n, we slice the timesteps starting from the end + t_start = len(self.scheduler.timesteps) - num_inference_steps + timesteps = self.scheduler.timesteps[t_start:] + if hasattr(self.scheduler, "set_begin_index"): + self.scheduler.set_begin_index(t_start) + return timesteps, num_inference_steps + + def prepare_latents( + self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None, add_noise=True + ): + if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): + raise ValueError( + f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" + ) + + latents_mean = latents_std = None + if hasattr(self.vae.config, "latents_mean") and self.vae.config.latents_mean is not None: + latents_mean = torch.tensor(self.vae.config.latents_mean).view(1, 4, 1, 1) + if hasattr(self.vae.config, "latents_std") and self.vae.config.latents_std is not None: + latents_std = torch.tensor(self.vae.config.latents_std).view(1, 4, 1, 1) + + # Offload text encoder if `enable_model_cpu_offload` was enabled + if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: + self.text_encoder_2.to("cpu") + torch.cuda.empty_cache() + + image = image.to(device=device, dtype=dtype) + + batch_size = batch_size * num_images_per_prompt + + if image.shape[1] == 4: + init_latents = image + + else: + # make sure the VAE is in float32 mode, as it overflows in float16 + if self.vae.config.force_upcast: + image = image.float() + self.vae.to(dtype=torch.float32) + + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + elif isinstance(generator, list): + if image.shape[0] < batch_size and batch_size % image.shape[0] == 0: + image = torch.cat([image] * (batch_size // image.shape[0]), dim=0) + elif image.shape[0] < batch_size and batch_size % image.shape[0] != 0: + raise ValueError( + f"Cannot duplicate `image` of batch size {image.shape[0]} to effective batch_size {batch_size} " + ) + + init_latents = [ + retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i]) + for i in range(batch_size) + ] + init_latents = torch.cat(init_latents, dim=0) + else: + init_latents = retrieve_latents(self.vae.encode(image), generator=generator) + + if self.vae.config.force_upcast: + self.vae.to(dtype) + + init_latents = init_latents.to(dtype) + if latents_mean is not None and latents_std is not None: + latents_mean = latents_mean.to(device=device, dtype=dtype) + latents_std = latents_std.to(device=device, dtype=dtype) + init_latents = (init_latents - latents_mean) * self.vae.config.scaling_factor / latents_std + else: + init_latents = self.vae.config.scaling_factor * init_latents + + if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0: + # expand init_latents for batch_size + additional_image_per_prompt = batch_size // init_latents.shape[0] + init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0) + elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0: + raise ValueError( + f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts." + ) + else: + init_latents = torch.cat([init_latents], dim=0) + + if add_noise: + shape = init_latents.shape + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + # get latents + init_latents = self.scheduler.add_noise(init_latents, noise, timestep) + + latents = init_latents + + return latents + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image + def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + if output_hidden_states: + image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] + image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_enc_hidden_states = self.image_encoder( + torch.zeros_like(image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( + num_images_per_prompt, dim=0 + ) + return image_enc_hidden_states, uncond_image_enc_hidden_states + else: + image_embeds = self.image_encoder(image).image_embeds + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_embeds = torch.zeros_like(image_embeds) + + return image_embeds, uncond_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds + def prepare_ip_adapter_image_embeds( + self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance + ): + image_embeds = [] + if do_classifier_free_guidance: + negative_image_embeds = [] + if ip_adapter_image_embeds is None: + if not isinstance(ip_adapter_image, list): + ip_adapter_image = [ip_adapter_image] + + if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): + raise ValueError( + f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." + ) + + for single_ip_adapter_image, image_proj_layer in zip( + ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers + ): + output_hidden_state = not isinstance(image_proj_layer, ImageProjection) + single_image_embeds, single_negative_image_embeds = self.encode_image( + single_ip_adapter_image, device, 1, output_hidden_state + ) + + image_embeds.append(single_image_embeds[None, :]) + if do_classifier_free_guidance: + negative_image_embeds.append(single_negative_image_embeds[None, :]) + else: + for single_image_embeds in ip_adapter_image_embeds: + if do_classifier_free_guidance: + single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2) + negative_image_embeds.append(single_negative_image_embeds) + image_embeds.append(single_image_embeds) + + ip_adapter_image_embeds = [] + for i, single_image_embeds in enumerate(image_embeds): + single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) + if do_classifier_free_guidance: + single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) + + single_image_embeds = single_image_embeds.to(device=device) + ip_adapter_image_embeds.append(single_image_embeds) + + return ip_adapter_image_embeds + + def _get_add_time_ids( + self, + original_size, + crops_coords_top_left, + target_size, + aesthetic_score, + negative_aesthetic_score, + negative_original_size, + negative_crops_coords_top_left, + negative_target_size, + dtype, + text_encoder_projection_dim=None, + ): + if self.config.requires_aesthetics_score: + add_time_ids = list(original_size + crops_coords_top_left + (aesthetic_score,)) + add_neg_time_ids = list( + negative_original_size + negative_crops_coords_top_left + (negative_aesthetic_score,) + ) + else: + add_time_ids = list(original_size + crops_coords_top_left + target_size) + add_neg_time_ids = list(negative_original_size + crops_coords_top_left + negative_target_size) + + passed_add_embed_dim = ( + self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim + ) + expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features + + if ( + expected_add_embed_dim > passed_add_embed_dim + and (expected_add_embed_dim - passed_add_embed_dim) == self.unet.config.addition_time_embed_dim + ): + raise ValueError( + f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to enable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=True)` to make sure `aesthetic_score` {aesthetic_score} and `negative_aesthetic_score` {negative_aesthetic_score} is correctly used by the model." + ) + elif ( + expected_add_embed_dim < passed_add_embed_dim + and (passed_add_embed_dim - expected_add_embed_dim) == self.unet.config.addition_time_embed_dim + ): + raise ValueError( + f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to disable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=False)` to make sure `target_size` {target_size} is correctly used by the model." + ) + elif expected_add_embed_dim != passed_add_embed_dim: + raise ValueError( + f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`." + ) + + add_time_ids = torch.tensor([add_time_ids], dtype=dtype) + add_neg_time_ids = torch.tensor([add_neg_time_ids], dtype=dtype) + + return add_time_ids, add_neg_time_ids + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae + def upcast_vae(self): + dtype = self.vae.dtype + self.vae.to(dtype=torch.float32) + use_torch_2_0_or_xformers = isinstance( + self.vae.decoder.mid_block.attentions[0].processor, + ( + AttnProcessor2_0, + XFormersAttnProcessor, + ), + ) + # if xformers or torch_2_0 is used attention block does not need + # to be in float32 which can save lots of memory + if use_torch_2_0_or_xformers: + self.vae.post_quant_conv.to(dtype) + self.vae.decoder.conv_in.to(dtype) + self.vae.decoder.mid_block.to(dtype) + + # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding + def get_guidance_scale_embedding( + self, w: torch.Tensor, embedding_dim: int = 512, dtype: torch.dtype = torch.float32 + ) -> torch.Tensor: + """ + See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 + + Args: + w (`torch.Tensor`): + Generate embedding vectors with a specified guidance scale to subsequently enrich timestep embeddings. + embedding_dim (`int`, *optional*, defaults to 512): + Dimension of the embeddings to generate. + dtype (`torch.dtype`, *optional*, defaults to `torch.float32`): + Data type of the generated embeddings. + + Returns: + `torch.Tensor`: Embedding vectors with shape `(len(w), embedding_dim)`. + """ + assert len(w.shape) == 1 + w = w * 1000.0 + + half_dim = embedding_dim // 2 + emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) + emb = w.to(dtype)[:, None] * emb[None, :] + emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) + if embedding_dim % 2 == 1: # zero pad + emb = torch.nn.functional.pad(emb, (0, 1)) + assert emb.shape == (w.shape[0], embedding_dim) + return emb + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def guidance_rescale(self): + return self._guidance_rescale + + @property + def clip_skip(self): + return self._clip_skip + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None + + @property + def cross_attention_kwargs(self): + return self._cross_attention_kwargs + + @property + def denoising_end(self): + return self._denoising_end + + @property + def denoising_start(self): + return self._denoising_start + + @property + def num_timesteps(self): + return self._num_timesteps + + @property + def interrupt(self): + return self._interrupt + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + prompt_2: Optional[Union[str, List[str]]] = None, + image: PipelineImageInput = None, + strength: float = 0.3, + num_inference_steps: int = 50, + timesteps: List[int] = None, + sigmas: List[float] = None, + denoising_start: Optional[float] = None, + denoising_end: Optional[float] = None, + guidance_scale: float = 5.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + negative_prompt_2: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + pooled_prompt_embeds: Optional[torch.Tensor] = None, + negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + guidance_rescale: float = 0.0, + original_size: Tuple[int, int] = None, + crops_coords_top_left: Tuple[int, int] = (0, 0), + target_size: Tuple[int, int] = None, + negative_original_size: Optional[Tuple[int, int]] = None, + negative_crops_coords_top_left: Tuple[int, int] = (0, 0), + negative_target_size: Optional[Tuple[int, int]] = None, + aesthetic_score: float = 6.0, + negative_aesthetic_score: float = 2.5, + clip_skip: Optional[int] = None, + callback_on_step_end: Optional[ + Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks] + ] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + **kwargs, + ): + r""" + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in both text-encoders + image (`torch.Tensor` or `PIL.Image.Image` or `np.ndarray` or `List[torch.Tensor]` or `List[PIL.Image.Image]` or `List[np.ndarray]`): + The image(s) to modify with the pipeline. + strength (`float`, *optional*, defaults to 0.3): + Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image` + will be used as a starting point, adding more noise to it the larger the `strength`. The number of + denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will + be maximum and the denoising process will run for the full number of iterations specified in + `num_inference_steps`. A value of 1, therefore, essentially ignores `image`. Note that in the case of + `denoising_start` being declared as an integer, the value of `strength` will be ignored. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument + in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is + passed will be used. Must be in descending order. + sigmas (`List[float]`, *optional*): + Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in + their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed + will be used. + denoising_start (`float`, *optional*): + When specified, indicates the fraction (between 0.0 and 1.0) of the total denoising process to be + bypassed before it is initiated. Consequently, the initial part of the denoising process is skipped and + it is assumed that the passed `image` is a partly denoised image. Note that when this is specified, + strength will be ignored. The `denoising_start` parameter is particularly beneficial when this pipeline + is integrated into a "Mixture of Denoisers" multi-pipeline setup, as detailed in [**Refine Image + Quality**](https://huggingface.co/docs/diffusers/using-diffusers/sdxl#refine-image-quality). + denoising_end (`float`, *optional*): + When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be + completed before it is intentionally prematurely terminated. As a result, the returned sample will + still retain a substantial amount of noise (ca. final 20% of timesteps still needed) and should be + denoised by a successor pipeline that has `denoising_start` set to 0.8 so that it only denoises the + final 20% of the scheduler. The denoising_end parameter should ideally be utilized when this pipeline + forms a part of a "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refine Image + Quality**](https://huggingface.co/docs/diffusers/using-diffusers/sdxl#refine-image-quality). + guidance_scale (`float`, *optional*, defaults to 7.5): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to + [`schedulers.DDIMScheduler`], will be ignored for others. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. + ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*): + Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of + IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should + contain the negative image embedding if `do_classifier_free_guidance` is set to `True`. If not + provided, embeddings are computed from the `ip_adapter_image` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionXLPipelineOutput`] instead of a + plain tuple. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + guidance_rescale (`float`, *optional*, defaults to 0.0): + Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are + Flawed](https://arxiv.org/pdf/2305.08891.pdf) `guidance_scale` is defined as `ฯ†` in equation 16. of + [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). + Guidance rescale factor should fix overexposure when using zero terminal SNR. + original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled. + `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as + explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position + `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting + `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + For most cases, `target_size` should be set to the desired height and width of the generated image. If + not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in + section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + To negatively condition the generation process based on a specific image resolution. Part of SDXL's + micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's + micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + To negatively condition the generation process based on a target image resolution. It should be as same + as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + aesthetic_score (`float`, *optional*, defaults to 6.0): + Used to simulate an aesthetic score of the generated image by influencing the positive text condition. + Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + negative_aesthetic_score (`float`, *optional*, defaults to 2.5): + Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). Can be used to + simulate an aesthetic score of the generated image by influencing the negative text condition. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*): + A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of + each denoising step during the inference. with the following arguments: `callback_on_step_end(self: + DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a + list of all tensors as specified by `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + + Examples: + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionXLPipelineOutput`] or `tuple`: + [`~pipelines.stable_diffusion.StableDiffusionXLPipelineOutput`] if `return_dict` is True, otherwise a + `tuple. When returning a tuple, the first element is a list with the generated images. + """ + + callback = kwargs.pop("callback", None) + callback_steps = kwargs.pop("callback_steps", None) + + if callback is not None: + deprecate( + "callback", + "1.0.0", + "Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", + ) + if callback_steps is not None: + deprecate( + "callback_steps", + "1.0.0", + "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", + ) + + if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): + callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + prompt_2, + strength, + num_inference_steps, + callback_steps, + negative_prompt, + negative_prompt_2, + prompt_embeds, + negative_prompt_embeds, + ip_adapter_image, + ip_adapter_image_embeds, + callback_on_step_end_tensor_inputs, + ) + + self._guidance_scale = guidance_scale + self._guidance_rescale = guidance_rescale + self._clip_skip = clip_skip + self._cross_attention_kwargs = cross_attention_kwargs + self._denoising_end = denoising_end + self._denoising_start = denoising_start + self._interrupt = False + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + # 3. Encode input prompt + text_encoder_lora_scale = ( + self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None + ) + ( + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + ) = self.encode_prompt( + prompt=prompt, + prompt_2=prompt_2, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=self.do_classifier_free_guidance, + negative_prompt=negative_prompt, + negative_prompt_2=negative_prompt_2, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, + lora_scale=text_encoder_lora_scale, + clip_skip=self.clip_skip, + ) + + # 4. Preprocess image + image = self.image_processor.preprocess(image) + + # 5. Prepare timesteps + def denoising_value_valid(dnv): + return isinstance(dnv, float) and 0 < dnv < 1 + + timesteps, num_inference_steps = retrieve_timesteps( + self.scheduler, num_inference_steps, device, timesteps, sigmas + ) + timesteps, num_inference_steps = self.get_timesteps( + num_inference_steps, + strength, + device, + denoising_start=self.denoising_start if denoising_value_valid(self.denoising_start) else None, + ) + latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) + + add_noise = True if self.denoising_start is None else False + + # 6. Prepare latent variables + if latents is None: + latents = self.prepare_latents( + image, + latent_timestep, + batch_size, + num_images_per_prompt, + prompt_embeds.dtype, + device, + generator, + add_noise, + ) + # 7. Prepare extra step kwargs. + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + height, width = latents.shape[-2:] + height = height * self.vae_scale_factor + width = width * self.vae_scale_factor + + original_size = original_size or (height, width) + target_size = target_size or (height, width) + + # 8. Prepare added time ids & embeddings + if negative_original_size is None: + negative_original_size = original_size + if negative_target_size is None: + negative_target_size = target_size + + add_text_embeds = pooled_prompt_embeds + if self.text_encoder_2 is None: + text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) + else: + text_encoder_projection_dim = self.text_encoder_2.config.projection_dim + + add_time_ids, add_neg_time_ids = self._get_add_time_ids( + original_size, + crops_coords_top_left, + target_size, + aesthetic_score, + negative_aesthetic_score, + negative_original_size, + negative_crops_coords_top_left, + negative_target_size, + dtype=prompt_embeds.dtype, + text_encoder_projection_dim=text_encoder_projection_dim, + ) + add_time_ids = add_time_ids.repeat(batch_size * num_images_per_prompt, 1) + + if self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) + add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) + add_neg_time_ids = add_neg_time_ids.repeat(batch_size * num_images_per_prompt, 1) + add_time_ids = torch.cat([add_neg_time_ids, add_time_ids], dim=0) + + prompt_embeds = prompt_embeds.to(device) + add_text_embeds = add_text_embeds.to(device) + add_time_ids = add_time_ids.to(device) + + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + image_embeds = self.prepare_ip_adapter_image_embeds( + ip_adapter_image, + ip_adapter_image_embeds, + device, + batch_size * num_images_per_prompt, + self.do_classifier_free_guidance, + ) + + # 9. Denoising loop + num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) + + # 9.1 Apply denoising_end + if ( + self.denoising_end is not None + and self.denoising_start is not None + and denoising_value_valid(self.denoising_end) + and denoising_value_valid(self.denoising_start) + and self.denoising_start >= self.denoising_end + ): + raise ValueError( + f"`denoising_start`: {self.denoising_start} cannot be larger than or equal to `denoising_end`: " + + f" {self.denoising_end} when using type float." + ) + elif self.denoising_end is not None and denoising_value_valid(self.denoising_end): + discrete_timestep_cutoff = int( + round( + self.scheduler.config.num_train_timesteps + - (self.denoising_end * self.scheduler.config.num_train_timesteps) + ) + ) + num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps))) + timesteps = timesteps[:num_inference_steps] + + # 9.2 Optionally get Guidance Scale Embedding + timestep_cond = None + if self.unet.config.time_cond_proj_dim is not None: + guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) + timestep_cond = self.get_guidance_scale_embedding( + guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim + ).to(device=device, dtype=latents.dtype) + + self._num_timesteps = len(timesteps) + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + if self.interrupt: + continue + + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents + + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + added_cond_kwargs["image_embeds"] = image_embeds + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + timestep_cond=timestep_cond, + cross_attention_kwargs=self.cross_attention_kwargs, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) + + if self.do_classifier_free_guidance and self.guidance_rescale > 0.0: + # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf + noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale) + + # compute the previous noisy sample x_t -> x_t-1 + latents_dtype = latents.dtype + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] + if latents.dtype != latents_dtype: + if torch.backends.mps.is_available(): + # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272 + latents = latents.to(latents_dtype) + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + add_text_embeds = callback_outputs.pop("add_text_embeds", add_text_embeds) + negative_pooled_prompt_embeds = callback_outputs.pop( + "negative_pooled_prompt_embeds", negative_pooled_prompt_embeds + ) + add_time_ids = callback_outputs.pop("add_time_ids", add_time_ids) + add_neg_time_ids = callback_outputs.pop("add_neg_time_ids", add_neg_time_ids) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + if XLA_AVAILABLE: + xm.mark_step() + + if not output_type == "latent": + # make sure the VAE is in float32 mode, as it overflows in float16 + needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast + + if needs_upcasting: + self.upcast_vae() + latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) + elif latents.dtype != self.vae.dtype: + if torch.backends.mps.is_available(): + # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272 + self.vae = self.vae.to(latents.dtype) + + # unscale/denormalize the latents + # denormalize with the mean and std if available and not None + has_latents_mean = hasattr(self.vae.config, "latents_mean") and self.vae.config.latents_mean is not None + has_latents_std = hasattr(self.vae.config, "latents_std") and self.vae.config.latents_std is not None + if has_latents_mean and has_latents_std: + latents_mean = ( + torch.tensor(self.vae.config.latents_mean).view(1, 4, 1, 1).to(latents.device, latents.dtype) + ) + latents_std = ( + torch.tensor(self.vae.config.latents_std).view(1, 4, 1, 1).to(latents.device, latents.dtype) + ) + latents = latents * latents_std / self.vae.config.scaling_factor + latents_mean + else: + latents = latents / self.vae.config.scaling_factor + + image = self.vae.decode(latents, return_dict=False)[0] + + # cast back to fp16 if needed + if needs_upcasting: + self.vae.to(dtype=torch.float16) + else: + image = latents + + # apply watermark if available + if self.watermark is not None: + image = self.watermark.apply_watermark(image) + + image = self.image_processor.postprocess(image, output_type=output_type) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return StableDiffusionXLPipelineOutput(images=image) diff --git a/diffusers3/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py b/diffusers3/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py new file mode 100644 index 0000000000000000000000000000000000000000..d28a9afbfb7aad890d45b23dc3b3d4a1b55d3f2b --- /dev/null +++ b/diffusers3/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py @@ -0,0 +1,1732 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import numpy as np +import PIL.Image +import torch +from transformers import ( + CLIPImageProcessor, + CLIPTextModel, + CLIPTextModelWithProjection, + CLIPTokenizer, + CLIPVisionModelWithProjection, +) + +from ...callbacks import MultiPipelineCallbacks, PipelineCallback +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import ( + FromSingleFileMixin, + IPAdapterMixin, + StableDiffusionXLLoraLoaderMixin, + TextualInversionLoaderMixin, +) +from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel +from ...models.attention_processor import ( + AttnProcessor2_0, + XFormersAttnProcessor, +) +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import ( + USE_PEFT_BACKEND, + deprecate, + is_invisible_watermark_available, + is_torch_xla_available, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from .pipeline_output import StableDiffusionXLPipelineOutput + + +if is_invisible_watermark_available(): + from .watermark import StableDiffusionXLWatermarker + +if is_torch_xla_available(): + import torch_xla.core.xla_model as xm + + XLA_AVAILABLE = True +else: + XLA_AVAILABLE = False + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import StableDiffusionXLInpaintPipeline + >>> from diffusers.utils import load_image + + >>> pipe = StableDiffusionXLInpaintPipeline.from_pretrained( + ... "stabilityai/stable-diffusion-xl-base-1.0", + ... torch_dtype=torch.float16, + ... variant="fp16", + ... use_safetensors=True, + ... ) + >>> pipe.to("cuda") + + >>> img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png" + >>> mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png" + + >>> init_image = load_image(img_url).convert("RGB") + >>> mask_image = load_image(mask_url).convert("RGB") + + >>> prompt = "A majestic tiger sitting on a bench" + >>> image = pipe( + ... prompt=prompt, image=init_image, mask_image=mask_image, num_inference_steps=50, strength=0.80 + ... ).images[0] + ``` +""" + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg +def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): + """ + Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4 + """ + std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) + std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) + # rescale the results from guidance (fixes overexposure) + noise_pred_rescaled = noise_cfg * (std_text / std_cfg) + # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images + noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg + return noise_cfg + + +def mask_pil_to_torch(mask, height, width): + # preprocess mask + if isinstance(mask, (PIL.Image.Image, np.ndarray)): + mask = [mask] + + if isinstance(mask, list) and isinstance(mask[0], PIL.Image.Image): + mask = [i.resize((width, height), resample=PIL.Image.LANCZOS) for i in mask] + mask = np.concatenate([np.array(m.convert("L"))[None, None, :] for m in mask], axis=0) + mask = mask.astype(np.float32) / 255.0 + elif isinstance(mask, list) and isinstance(mask[0], np.ndarray): + mask = np.concatenate([m[None, None, :] for m in mask], axis=0) + + mask = torch.from_numpy(mask) + return mask + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents +def retrieve_latents( + encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" +): + if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": + return encoder_output.latent_dist.sample(generator) + elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": + return encoder_output.latent_dist.mode() + elif hasattr(encoder_output, "latents"): + return encoder_output.latents + else: + raise AttributeError("Could not access latents of provided encoder_output") + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + sigmas: Optional[List[float]] = None, + **kwargs, +): + """ + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles + custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` + must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, + `num_inference_steps` and `sigmas` must be `None`. + sigmas (`List[float]`, *optional*): + Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, + `num_inference_steps` and `timesteps` must be `None`. + + Returns: + `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the + second element is the number of inference steps. + """ + if timesteps is not None and sigmas is not None: + raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") + if timesteps is not None: + accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + elif sigmas is not None: + accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accept_sigmas: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" sigmas schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + + +class StableDiffusionXLInpaintPipeline( + DiffusionPipeline, + StableDiffusionMixin, + TextualInversionLoaderMixin, + StableDiffusionXLLoraLoaderMixin, + FromSingleFileMixin, + IPAdapterMixin, +): + r""" + Pipeline for text-to-image generation using Stable Diffusion XL. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files + - [`~loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`] for saving LoRA weights + - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModel`]): + Frozen text-encoder. Stable Diffusion XL uses the text portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically + the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. + text_encoder_2 ([` CLIPTextModelWithProjection`]): + Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection), + specifically the + [laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k) + variant. + tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + tokenizer_2 (`CLIPTokenizer`): + Second Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + requires_aesthetics_score (`bool`, *optional*, defaults to `"False"`): + Whether the `unet` requires a aesthetic_score condition to be passed during inference. Also see the config + of `stabilityai/stable-diffusion-xl-refiner-1-0`. + force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`): + Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of + `stabilityai/stable-diffusion-xl-base-1-0`. + add_watermarker (`bool`, *optional*): + Whether to use the [invisible_watermark library](https://github.com/ShieldMnt/invisible-watermark/) to + watermark output images. If not defined, it will default to True if the package is installed, otherwise no + watermarker will be used. + """ + + model_cpu_offload_seq = "text_encoder->text_encoder_2->image_encoder->unet->vae" + + _optional_components = [ + "tokenizer", + "tokenizer_2", + "text_encoder", + "text_encoder_2", + "image_encoder", + "feature_extractor", + ] + _callback_tensor_inputs = [ + "latents", + "prompt_embeds", + "negative_prompt_embeds", + "add_text_embeds", + "add_time_ids", + "negative_pooled_prompt_embeds", + "add_neg_time_ids", + "mask", + "masked_image_latents", + ] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + text_encoder_2: CLIPTextModelWithProjection, + tokenizer: CLIPTokenizer, + tokenizer_2: CLIPTokenizer, + unet: UNet2DConditionModel, + scheduler: KarrasDiffusionSchedulers, + image_encoder: CLIPVisionModelWithProjection = None, + feature_extractor: CLIPImageProcessor = None, + requires_aesthetics_score: bool = False, + force_zeros_for_empty_prompt: bool = True, + add_watermarker: Optional[bool] = None, + ): + super().__init__() + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + text_encoder_2=text_encoder_2, + tokenizer=tokenizer, + tokenizer_2=tokenizer_2, + unet=unet, + image_encoder=image_encoder, + feature_extractor=feature_extractor, + scheduler=scheduler, + ) + self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) + self.register_to_config(requires_aesthetics_score=requires_aesthetics_score) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + self.mask_processor = VaeImageProcessor( + vae_scale_factor=self.vae_scale_factor, do_normalize=False, do_binarize=True, do_convert_grayscale=True + ) + + add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available() + + if add_watermarker: + self.watermark = StableDiffusionXLWatermarker() + else: + self.watermark = None + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image + def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + if output_hidden_states: + image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] + image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_enc_hidden_states = self.image_encoder( + torch.zeros_like(image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( + num_images_per_prompt, dim=0 + ) + return image_enc_hidden_states, uncond_image_enc_hidden_states + else: + image_embeds = self.image_encoder(image).image_embeds + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_embeds = torch.zeros_like(image_embeds) + + return image_embeds, uncond_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds + def prepare_ip_adapter_image_embeds( + self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance + ): + image_embeds = [] + if do_classifier_free_guidance: + negative_image_embeds = [] + if ip_adapter_image_embeds is None: + if not isinstance(ip_adapter_image, list): + ip_adapter_image = [ip_adapter_image] + + if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): + raise ValueError( + f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." + ) + + for single_ip_adapter_image, image_proj_layer in zip( + ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers + ): + output_hidden_state = not isinstance(image_proj_layer, ImageProjection) + single_image_embeds, single_negative_image_embeds = self.encode_image( + single_ip_adapter_image, device, 1, output_hidden_state + ) + + image_embeds.append(single_image_embeds[None, :]) + if do_classifier_free_guidance: + negative_image_embeds.append(single_negative_image_embeds[None, :]) + else: + for single_image_embeds in ip_adapter_image_embeds: + if do_classifier_free_guidance: + single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2) + negative_image_embeds.append(single_negative_image_embeds) + image_embeds.append(single_image_embeds) + + ip_adapter_image_embeds = [] + for i, single_image_embeds in enumerate(image_embeds): + single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) + if do_classifier_free_guidance: + single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) + + single_image_embeds = single_image_embeds.to(device=device) + ip_adapter_image_embeds.append(single_image_embeds) + + return ip_adapter_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.encode_prompt + def encode_prompt( + self, + prompt: str, + prompt_2: Optional[str] = None, + device: Optional[torch.device] = None, + num_images_per_prompt: int = 1, + do_classifier_free_guidance: bool = True, + negative_prompt: Optional[str] = None, + negative_prompt_2: Optional[str] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + pooled_prompt_embeds: Optional[torch.Tensor] = None, + negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in both text-encoders + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + lora_scale (`float`, *optional*): + A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + device = device or self._execution_device + + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if self.text_encoder is not None: + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale) + else: + scale_lora_layers(self.text_encoder_2, lora_scale) + + prompt = [prompt] if isinstance(prompt, str) else prompt + + if prompt is not None: + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + # Define tokenizers and text encoders + tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] + text_encoders = ( + [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] + ) + + if prompt_embeds is None: + prompt_2 = prompt_2 or prompt + prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 + + # textual inversion: process multi-vector tokens if necessary + prompt_embeds_list = [] + prompts = [prompt, prompt_2] + for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders): + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, tokenizer) + + text_inputs = tokenizer( + prompt, + padding="max_length", + max_length=tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + + text_input_ids = text_inputs.input_ids + untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {tokenizer.model_max_length} tokens: {removed_text}" + ) + + prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) + + # We are only ALWAYS interested in the pooled output of the final text encoder + pooled_prompt_embeds = prompt_embeds[0] + if clip_skip is None: + prompt_embeds = prompt_embeds.hidden_states[-2] + else: + # "2" because SDXL always indexes from the penultimate layer. + prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)] + + prompt_embeds_list.append(prompt_embeds) + + prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) + + # get unconditional embeddings for classifier free guidance + zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt + if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: + negative_prompt_embeds = torch.zeros_like(prompt_embeds) + negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) + elif do_classifier_free_guidance and negative_prompt_embeds is None: + negative_prompt = negative_prompt or "" + negative_prompt_2 = negative_prompt_2 or negative_prompt + + # normalize str to list + negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt + negative_prompt_2 = ( + batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 + ) + + uncond_tokens: List[str] + if prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = [negative_prompt, negative_prompt_2] + + negative_prompt_embeds_list = [] + for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders): + if isinstance(self, TextualInversionLoaderMixin): + negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = tokenizer( + negative_prompt, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + negative_prompt_embeds = text_encoder( + uncond_input.input_ids.to(device), + output_hidden_states=True, + ) + # We are only ALWAYS interested in the pooled output of the final text encoder + negative_pooled_prompt_embeds = negative_prompt_embeds[0] + negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] + + negative_prompt_embeds_list.append(negative_prompt_embeds) + + negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) + + if self.text_encoder_2 is not None: + prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) + else: + prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + if self.text_encoder_2 is not None: + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) + else: + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed * num_images_per_prompt, -1 + ) + if do_classifier_free_guidance: + negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed * num_images_per_prompt, -1 + ) + + if self.text_encoder is not None: + if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder_2, lora_scale) + + return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + prompt_2, + image, + mask_image, + height, + width, + strength, + callback_steps, + output_type, + negative_prompt=None, + negative_prompt_2=None, + prompt_embeds=None, + negative_prompt_embeds=None, + ip_adapter_image=None, + ip_adapter_image_embeds=None, + callback_on_step_end_tensor_inputs=None, + padding_mask_crop=None, + ): + if strength < 0 or strength > 1: + raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}") + + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt_2 is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)): + raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + elif negative_prompt_2 is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + if padding_mask_crop is not None: + if not isinstance(image, PIL.Image.Image): + raise ValueError( + f"The image should be a PIL image when inpainting mask crop, but is of type" f" {type(image)}." + ) + if not isinstance(mask_image, PIL.Image.Image): + raise ValueError( + f"The mask image should be a PIL image when inpainting mask crop, but is of type" + f" {type(mask_image)}." + ) + if output_type != "pil": + raise ValueError(f"The output type should be PIL when inpainting mask crop, but is" f" {output_type}.") + + if ip_adapter_image is not None and ip_adapter_image_embeds is not None: + raise ValueError( + "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined." + ) + + if ip_adapter_image_embeds is not None: + if not isinstance(ip_adapter_image_embeds, list): + raise ValueError( + f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}" + ) + elif ip_adapter_image_embeds[0].ndim not in [3, 4]: + raise ValueError( + f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D" + ) + + def prepare_latents( + self, + batch_size, + num_channels_latents, + height, + width, + dtype, + device, + generator, + latents=None, + image=None, + timestep=None, + is_strength_max=True, + add_noise=True, + return_noise=False, + return_image_latents=False, + ): + shape = ( + batch_size, + num_channels_latents, + int(height) // self.vae_scale_factor, + int(width) // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if (image is None or timestep is None) and not is_strength_max: + raise ValueError( + "Since strength < 1. initial latents are to be initialised as a combination of Image + Noise." + "However, either the image or the noise timestep has not been provided." + ) + + if image.shape[1] == 4: + image_latents = image.to(device=device, dtype=dtype) + image_latents = image_latents.repeat(batch_size // image_latents.shape[0], 1, 1, 1) + elif return_image_latents or (latents is None and not is_strength_max): + image = image.to(device=device, dtype=dtype) + image_latents = self._encode_vae_image(image=image, generator=generator) + image_latents = image_latents.repeat(batch_size // image_latents.shape[0], 1, 1, 1) + + if latents is None and add_noise: + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + # if strength is 1. then initialise the latents to noise, else initial to image + noise + latents = noise if is_strength_max else self.scheduler.add_noise(image_latents, noise, timestep) + # if pure noise then scale the initial latents by the Scheduler's init sigma + latents = latents * self.scheduler.init_noise_sigma if is_strength_max else latents + elif add_noise: + noise = latents.to(device) + latents = noise * self.scheduler.init_noise_sigma + else: + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + latents = image_latents.to(device) + + outputs = (latents,) + + if return_noise: + outputs += (noise,) + + if return_image_latents: + outputs += (image_latents,) + + return outputs + + def _encode_vae_image(self, image: torch.Tensor, generator: torch.Generator): + dtype = image.dtype + if self.vae.config.force_upcast: + image = image.float() + self.vae.to(dtype=torch.float32) + + if isinstance(generator, list): + image_latents = [ + retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i]) + for i in range(image.shape[0]) + ] + image_latents = torch.cat(image_latents, dim=0) + else: + image_latents = retrieve_latents(self.vae.encode(image), generator=generator) + + if self.vae.config.force_upcast: + self.vae.to(dtype) + + image_latents = image_latents.to(dtype) + image_latents = self.vae.config.scaling_factor * image_latents + + return image_latents + + def prepare_mask_latents( + self, mask, masked_image, batch_size, height, width, dtype, device, generator, do_classifier_free_guidance + ): + # resize the mask to latents shape as we concatenate the mask to the latents + # we do that before converting to dtype to avoid breaking in case we're using cpu_offload + # and half precision + mask = torch.nn.functional.interpolate( + mask, size=(height // self.vae_scale_factor, width // self.vae_scale_factor) + ) + mask = mask.to(device=device, dtype=dtype) + + # duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method + if mask.shape[0] < batch_size: + if not batch_size % mask.shape[0] == 0: + raise ValueError( + "The passed mask and the required batch size don't match. Masks are supposed to be duplicated to" + f" a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number" + " of masks that you pass is divisible by the total requested batch size." + ) + mask = mask.repeat(batch_size // mask.shape[0], 1, 1, 1) + + mask = torch.cat([mask] * 2) if do_classifier_free_guidance else mask + + if masked_image is not None and masked_image.shape[1] == 4: + masked_image_latents = masked_image + else: + masked_image_latents = None + + if masked_image is not None: + if masked_image_latents is None: + masked_image = masked_image.to(device=device, dtype=dtype) + masked_image_latents = self._encode_vae_image(masked_image, generator=generator) + + if masked_image_latents.shape[0] < batch_size: + if not batch_size % masked_image_latents.shape[0] == 0: + raise ValueError( + "The passed images and the required batch size don't match. Images are supposed to be duplicated" + f" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed." + " Make sure the number of images that you pass is divisible by the total requested batch size." + ) + masked_image_latents = masked_image_latents.repeat( + batch_size // masked_image_latents.shape[0], 1, 1, 1 + ) + + masked_image_latents = ( + torch.cat([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents + ) + + # aligning device to prevent device errors when concating it with the latent model input + masked_image_latents = masked_image_latents.to(device=device, dtype=dtype) + + return mask, masked_image_latents + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_img2img.StableDiffusionXLImg2ImgPipeline.get_timesteps + def get_timesteps(self, num_inference_steps, strength, device, denoising_start=None): + # get the original timestep using init_timestep + if denoising_start is None: + init_timestep = min(int(num_inference_steps * strength), num_inference_steps) + t_start = max(num_inference_steps - init_timestep, 0) + + timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] + if hasattr(self.scheduler, "set_begin_index"): + self.scheduler.set_begin_index(t_start * self.scheduler.order) + + return timesteps, num_inference_steps - t_start + + else: + # Strength is irrelevant if we directly request a timestep to start at; + # that is, strength is determined by the denoising_start instead. + discrete_timestep_cutoff = int( + round( + self.scheduler.config.num_train_timesteps + - (denoising_start * self.scheduler.config.num_train_timesteps) + ) + ) + + num_inference_steps = (self.scheduler.timesteps < discrete_timestep_cutoff).sum().item() + if self.scheduler.order == 2 and num_inference_steps % 2 == 0: + # if the scheduler is a 2nd order scheduler we might have to do +1 + # because `num_inference_steps` might be even given that every timestep + # (except the highest one) is duplicated. If `num_inference_steps` is even it would + # mean that we cut the timesteps in the middle of the denoising step + # (between 1st and 2nd derivative) which leads to incorrect results. By adding 1 + # we ensure that the denoising process always ends after the 2nd derivate step of the scheduler + num_inference_steps = num_inference_steps + 1 + + # because t_n+1 >= t_n, we slice the timesteps starting from the end + t_start = len(self.scheduler.timesteps) - num_inference_steps + timesteps = self.scheduler.timesteps[t_start:] + if hasattr(self.scheduler, "set_begin_index"): + self.scheduler.set_begin_index(t_start) + return timesteps, num_inference_steps + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_img2img.StableDiffusionXLImg2ImgPipeline._get_add_time_ids + def _get_add_time_ids( + self, + original_size, + crops_coords_top_left, + target_size, + aesthetic_score, + negative_aesthetic_score, + negative_original_size, + negative_crops_coords_top_left, + negative_target_size, + dtype, + text_encoder_projection_dim=None, + ): + if self.config.requires_aesthetics_score: + add_time_ids = list(original_size + crops_coords_top_left + (aesthetic_score,)) + add_neg_time_ids = list( + negative_original_size + negative_crops_coords_top_left + (negative_aesthetic_score,) + ) + else: + add_time_ids = list(original_size + crops_coords_top_left + target_size) + add_neg_time_ids = list(negative_original_size + crops_coords_top_left + negative_target_size) + + passed_add_embed_dim = ( + self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim + ) + expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features + + if ( + expected_add_embed_dim > passed_add_embed_dim + and (expected_add_embed_dim - passed_add_embed_dim) == self.unet.config.addition_time_embed_dim + ): + raise ValueError( + f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to enable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=True)` to make sure `aesthetic_score` {aesthetic_score} and `negative_aesthetic_score` {negative_aesthetic_score} is correctly used by the model." + ) + elif ( + expected_add_embed_dim < passed_add_embed_dim + and (passed_add_embed_dim - expected_add_embed_dim) == self.unet.config.addition_time_embed_dim + ): + raise ValueError( + f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to disable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=False)` to make sure `target_size` {target_size} is correctly used by the model." + ) + elif expected_add_embed_dim != passed_add_embed_dim: + raise ValueError( + f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`." + ) + + add_time_ids = torch.tensor([add_time_ids], dtype=dtype) + add_neg_time_ids = torch.tensor([add_neg_time_ids], dtype=dtype) + + return add_time_ids, add_neg_time_ids + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae + def upcast_vae(self): + dtype = self.vae.dtype + self.vae.to(dtype=torch.float32) + use_torch_2_0_or_xformers = isinstance( + self.vae.decoder.mid_block.attentions[0].processor, + ( + AttnProcessor2_0, + XFormersAttnProcessor, + ), + ) + # if xformers or torch_2_0 is used attention block does not need + # to be in float32 which can save lots of memory + if use_torch_2_0_or_xformers: + self.vae.post_quant_conv.to(dtype) + self.vae.decoder.conv_in.to(dtype) + self.vae.decoder.mid_block.to(dtype) + + # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding + def get_guidance_scale_embedding( + self, w: torch.Tensor, embedding_dim: int = 512, dtype: torch.dtype = torch.float32 + ) -> torch.Tensor: + """ + See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 + + Args: + w (`torch.Tensor`): + Generate embedding vectors with a specified guidance scale to subsequently enrich timestep embeddings. + embedding_dim (`int`, *optional*, defaults to 512): + Dimension of the embeddings to generate. + dtype (`torch.dtype`, *optional*, defaults to `torch.float32`): + Data type of the generated embeddings. + + Returns: + `torch.Tensor`: Embedding vectors with shape `(len(w), embedding_dim)`. + """ + assert len(w.shape) == 1 + w = w * 1000.0 + + half_dim = embedding_dim // 2 + emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) + emb = w.to(dtype)[:, None] * emb[None, :] + emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) + if embedding_dim % 2 == 1: # zero pad + emb = torch.nn.functional.pad(emb, (0, 1)) + assert emb.shape == (w.shape[0], embedding_dim) + return emb + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def guidance_rescale(self): + return self._guidance_rescale + + @property + def clip_skip(self): + return self._clip_skip + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None + + @property + def cross_attention_kwargs(self): + return self._cross_attention_kwargs + + @property + def denoising_end(self): + return self._denoising_end + + @property + def denoising_start(self): + return self._denoising_start + + @property + def num_timesteps(self): + return self._num_timesteps + + @property + def interrupt(self): + return self._interrupt + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + prompt_2: Optional[Union[str, List[str]]] = None, + image: PipelineImageInput = None, + mask_image: PipelineImageInput = None, + masked_image_latents: torch.Tensor = None, + height: Optional[int] = None, + width: Optional[int] = None, + padding_mask_crop: Optional[int] = None, + strength: float = 0.9999, + num_inference_steps: int = 50, + timesteps: List[int] = None, + sigmas: List[float] = None, + denoising_start: Optional[float] = None, + denoising_end: Optional[float] = None, + guidance_scale: float = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + negative_prompt_2: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + pooled_prompt_embeds: Optional[torch.Tensor] = None, + negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + guidance_rescale: float = 0.0, + original_size: Tuple[int, int] = None, + crops_coords_top_left: Tuple[int, int] = (0, 0), + target_size: Tuple[int, int] = None, + negative_original_size: Optional[Tuple[int, int]] = None, + negative_crops_coords_top_left: Tuple[int, int] = (0, 0), + negative_target_size: Optional[Tuple[int, int]] = None, + aesthetic_score: float = 6.0, + negative_aesthetic_score: float = 2.5, + clip_skip: Optional[int] = None, + callback_on_step_end: Optional[ + Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks] + ] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + **kwargs, + ): + r""" + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in both text-encoders + image (`PIL.Image.Image`): + `Image`, or tensor representing an image batch which will be inpainted, *i.e.* parts of the image will + be masked out with `mask_image` and repainted according to `prompt`. + mask_image (`PIL.Image.Image`): + `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be + repainted, while black pixels will be preserved. If `mask_image` is a PIL image, it will be converted + to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L) + instead of 3, so the expected shape would be `(B, H, W, 1)`. + height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The height in pixels of the generated image. This is set to 1024 by default for the best results. + Anything below 512 pixels won't work well for + [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) + and checkpoints that are not specifically fine-tuned on low resolutions. + width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The width in pixels of the generated image. This is set to 1024 by default for the best results. + Anything below 512 pixels won't work well for + [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) + and checkpoints that are not specifically fine-tuned on low resolutions. + padding_mask_crop (`int`, *optional*, defaults to `None`): + The size of margin in the crop to be applied to the image and masking. If `None`, no crop is applied to + image and mask_image. If `padding_mask_crop` is not `None`, it will first find a rectangular region + with the same aspect ration of the image and contains all masked area, and then expand that area based + on `padding_mask_crop`. The image and mask_image will then be cropped based on the expanded area before + resizing to the original image size for inpainting. This is useful when the masked area is small while + the image is large and contain information irrelevant for inpainting, such as background. + strength (`float`, *optional*, defaults to 0.9999): + Conceptually, indicates how much to transform the masked portion of the reference `image`. Must be + between 0 and 1. `image` will be used as a starting point, adding more noise to it the larger the + `strength`. The number of denoising steps depends on the amount of noise initially added. When + `strength` is 1, added noise will be maximum and the denoising process will run for the full number of + iterations specified in `num_inference_steps`. A value of 1, therefore, essentially ignores the masked + portion of the reference `image`. Note that in the case of `denoising_start` being declared as an + integer, the value of `strength` will be ignored. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument + in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is + passed will be used. Must be in descending order. + sigmas (`List[float]`, *optional*): + Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in + their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed + will be used. + denoising_start (`float`, *optional*): + When specified, indicates the fraction (between 0.0 and 1.0) of the total denoising process to be + bypassed before it is initiated. Consequently, the initial part of the denoising process is skipped and + it is assumed that the passed `image` is a partly denoised image. Note that when this is specified, + strength will be ignored. The `denoising_start` parameter is particularly beneficial when this pipeline + is integrated into a "Mixture of Denoisers" multi-pipeline setup, as detailed in [**Refining the Image + Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output). + denoising_end (`float`, *optional*): + When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be + completed before it is intentionally prematurely terminated. As a result, the returned sample will + still retain a substantial amount of noise (ca. final 20% of timesteps still needed) and should be + denoised by a successor pipeline that has `denoising_start` set to 0.8 so that it only denoises the + final 20% of the scheduler. The denoising_end parameter should ideally be utilized when this pipeline + forms a part of a "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image + Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output). + guidance_scale (`float`, *optional*, defaults to 7.5): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. + ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*): + Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of + IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should + contain the negative image embedding if `do_classifier_free_guidance` is set to `True`. If not + provided, embeddings are computed from the `ip_adapter_image` input argument. + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to + [`schedulers.DDIMScheduler`], will be ignored for others. + generator (`torch.Generator`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled. + `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as + explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position + `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting + `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + For most cases, `target_size` should be set to the desired height and width of the generated image. If + not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in + section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + To negatively condition the generation process based on a specific image resolution. Part of SDXL's + micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's + micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + To negatively condition the generation process based on a target image resolution. It should be as same + as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + aesthetic_score (`float`, *optional*, defaults to 6.0): + Used to simulate an aesthetic score of the generated image by influencing the positive text condition. + Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + negative_aesthetic_score (`float`, *optional*, defaults to 2.5): + Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). Can be used to + simulate an aesthetic score of the generated image by influencing the negative text condition. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*): + A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of + each denoising step during the inference. with the following arguments: `callback_on_step_end(self: + DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a + list of all tensors as specified by `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + + Examples: + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionXLPipelineOutput`] or `tuple`: + [`~pipelines.stable_diffusion.StableDiffusionXLPipelineOutput`] if `return_dict` is True, otherwise a + `tuple. `tuple. When returning a tuple, the first element is a list with the generated images. + """ + + callback = kwargs.pop("callback", None) + callback_steps = kwargs.pop("callback_steps", None) + + if callback is not None: + deprecate( + "callback", + "1.0.0", + "Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", + ) + if callback_steps is not None: + deprecate( + "callback_steps", + "1.0.0", + "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", + ) + + if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): + callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs + + # 0. Default height and width to unet + height = height or self.unet.config.sample_size * self.vae_scale_factor + width = width or self.unet.config.sample_size * self.vae_scale_factor + + # 1. Check inputs + self.check_inputs( + prompt, + prompt_2, + image, + mask_image, + height, + width, + strength, + callback_steps, + output_type, + negative_prompt, + negative_prompt_2, + prompt_embeds, + negative_prompt_embeds, + ip_adapter_image, + ip_adapter_image_embeds, + callback_on_step_end_tensor_inputs, + padding_mask_crop, + ) + + self._guidance_scale = guidance_scale + self._guidance_rescale = guidance_rescale + self._clip_skip = clip_skip + self._cross_attention_kwargs = cross_attention_kwargs + self._denoising_end = denoising_end + self._denoising_start = denoising_start + self._interrupt = False + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + # 3. Encode input prompt + text_encoder_lora_scale = ( + self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None + ) + + ( + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + ) = self.encode_prompt( + prompt=prompt, + prompt_2=prompt_2, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=self.do_classifier_free_guidance, + negative_prompt=negative_prompt, + negative_prompt_2=negative_prompt_2, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, + lora_scale=text_encoder_lora_scale, + clip_skip=self.clip_skip, + ) + + # 4. set timesteps + def denoising_value_valid(dnv): + return isinstance(dnv, float) and 0 < dnv < 1 + + timesteps, num_inference_steps = retrieve_timesteps( + self.scheduler, num_inference_steps, device, timesteps, sigmas + ) + timesteps, num_inference_steps = self.get_timesteps( + num_inference_steps, + strength, + device, + denoising_start=self.denoising_start if denoising_value_valid(self.denoising_start) else None, + ) + # check that number of inference steps is not < 1 - as this doesn't make sense + if num_inference_steps < 1: + raise ValueError( + f"After adjusting the num_inference_steps by strength parameter: {strength}, the number of pipeline" + f"steps is {num_inference_steps} which is < 1 and not appropriate for this pipeline." + ) + # at which timestep to set the initial noise (n.b. 50% if strength is 0.5) + latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) + # create a boolean to check if the strength is set to 1. if so then initialise the latents with pure noise + is_strength_max = strength == 1.0 + + # 5. Preprocess mask and image + if padding_mask_crop is not None: + crops_coords = self.mask_processor.get_crop_region(mask_image, width, height, pad=padding_mask_crop) + resize_mode = "fill" + else: + crops_coords = None + resize_mode = "default" + + original_image = image + init_image = self.image_processor.preprocess( + image, height=height, width=width, crops_coords=crops_coords, resize_mode=resize_mode + ) + init_image = init_image.to(dtype=torch.float32) + + mask = self.mask_processor.preprocess( + mask_image, height=height, width=width, resize_mode=resize_mode, crops_coords=crops_coords + ) + + if masked_image_latents is not None: + masked_image = masked_image_latents + elif init_image.shape[1] == 4: + # if images are in latent space, we can't mask it + masked_image = None + else: + masked_image = init_image * (mask < 0.5) + + # 6. Prepare latent variables + num_channels_latents = self.vae.config.latent_channels + num_channels_unet = self.unet.config.in_channels + return_image_latents = num_channels_unet == 4 + + add_noise = True if self.denoising_start is None else False + latents_outputs = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + image=init_image, + timestep=latent_timestep, + is_strength_max=is_strength_max, + add_noise=add_noise, + return_noise=True, + return_image_latents=return_image_latents, + ) + + if return_image_latents: + latents, noise, image_latents = latents_outputs + else: + latents, noise = latents_outputs + + # 7. Prepare mask latent variables + mask, masked_image_latents = self.prepare_mask_latents( + mask, + masked_image, + batch_size * num_images_per_prompt, + height, + width, + prompt_embeds.dtype, + device, + generator, + self.do_classifier_free_guidance, + ) + + # 8. Check that sizes of mask, masked image and latents match + if num_channels_unet == 9: + # default case for runwayml/stable-diffusion-inpainting + num_channels_mask = mask.shape[1] + num_channels_masked_image = masked_image_latents.shape[1] + if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels: + raise ValueError( + f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects" + f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +" + f" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}" + f" = {num_channels_latents+num_channels_masked_image+num_channels_mask}. Please verify the config of" + " `pipeline.unet` or your `mask_image` or `image` input." + ) + elif num_channels_unet != 4: + raise ValueError( + f"The unet {self.unet.__class__} should have either 4 or 9 input channels, not {self.unet.config.in_channels}." + ) + # 8.1 Prepare extra step kwargs. + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + height, width = latents.shape[-2:] + height = height * self.vae_scale_factor + width = width * self.vae_scale_factor + + original_size = original_size or (height, width) + target_size = target_size or (height, width) + + # 10. Prepare added time ids & embeddings + if negative_original_size is None: + negative_original_size = original_size + if negative_target_size is None: + negative_target_size = target_size + + add_text_embeds = pooled_prompt_embeds + if self.text_encoder_2 is None: + text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) + else: + text_encoder_projection_dim = self.text_encoder_2.config.projection_dim + + add_time_ids, add_neg_time_ids = self._get_add_time_ids( + original_size, + crops_coords_top_left, + target_size, + aesthetic_score, + negative_aesthetic_score, + negative_original_size, + negative_crops_coords_top_left, + negative_target_size, + dtype=prompt_embeds.dtype, + text_encoder_projection_dim=text_encoder_projection_dim, + ) + add_time_ids = add_time_ids.repeat(batch_size * num_images_per_prompt, 1) + + if self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) + add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) + add_neg_time_ids = add_neg_time_ids.repeat(batch_size * num_images_per_prompt, 1) + add_time_ids = torch.cat([add_neg_time_ids, add_time_ids], dim=0) + + prompt_embeds = prompt_embeds.to(device) + add_text_embeds = add_text_embeds.to(device) + add_time_ids = add_time_ids.to(device) + + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + image_embeds = self.prepare_ip_adapter_image_embeds( + ip_adapter_image, + ip_adapter_image_embeds, + device, + batch_size * num_images_per_prompt, + self.do_classifier_free_guidance, + ) + + # 11. Denoising loop + num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) + + if ( + self.denoising_end is not None + and self.denoising_start is not None + and denoising_value_valid(self.denoising_end) + and denoising_value_valid(self.denoising_start) + and self.denoising_start >= self.denoising_end + ): + raise ValueError( + f"`denoising_start`: {self.denoising_start} cannot be larger than or equal to `denoising_end`: " + + f" {self.denoising_end} when using type float." + ) + elif self.denoising_end is not None and denoising_value_valid(self.denoising_end): + discrete_timestep_cutoff = int( + round( + self.scheduler.config.num_train_timesteps + - (self.denoising_end * self.scheduler.config.num_train_timesteps) + ) + ) + num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps))) + timesteps = timesteps[:num_inference_steps] + + # 11.1 Optionally get Guidance Scale Embedding + timestep_cond = None + if self.unet.config.time_cond_proj_dim is not None: + guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) + timestep_cond = self.get_guidance_scale_embedding( + guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim + ).to(device=device, dtype=latents.dtype) + + self._num_timesteps = len(timesteps) + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + if self.interrupt: + continue + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents + + # concat latents, mask, masked_image_latents in the channel dimension + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + if num_channels_unet == 9: + latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1) + + # predict the noise residual + added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + added_cond_kwargs["image_embeds"] = image_embeds + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + timestep_cond=timestep_cond, + cross_attention_kwargs=self.cross_attention_kwargs, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) + + if self.do_classifier_free_guidance and self.guidance_rescale > 0.0: + # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf + noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale) + + # compute the previous noisy sample x_t -> x_t-1 + latents_dtype = latents.dtype + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] + if latents.dtype != latents_dtype: + if torch.backends.mps.is_available(): + # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272 + latents = latents.to(latents_dtype) + + if num_channels_unet == 4: + init_latents_proper = image_latents + if self.do_classifier_free_guidance: + init_mask, _ = mask.chunk(2) + else: + init_mask = mask + + if i < len(timesteps) - 1: + noise_timestep = timesteps[i + 1] + init_latents_proper = self.scheduler.add_noise( + init_latents_proper, noise, torch.tensor([noise_timestep]) + ) + + latents = (1 - init_mask) * init_latents_proper + init_mask * latents + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + add_text_embeds = callback_outputs.pop("add_text_embeds", add_text_embeds) + negative_pooled_prompt_embeds = callback_outputs.pop( + "negative_pooled_prompt_embeds", negative_pooled_prompt_embeds + ) + add_time_ids = callback_outputs.pop("add_time_ids", add_time_ids) + add_neg_time_ids = callback_outputs.pop("add_neg_time_ids", add_neg_time_ids) + mask = callback_outputs.pop("mask", mask) + masked_image_latents = callback_outputs.pop("masked_image_latents", masked_image_latents) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + if XLA_AVAILABLE: + xm.mark_step() + + if not output_type == "latent": + # make sure the VAE is in float32 mode, as it overflows in float16 + needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast + + if needs_upcasting: + self.upcast_vae() + latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) + elif latents.dtype != self.vae.dtype: + if torch.backends.mps.is_available(): + # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272 + self.vae = self.vae.to(latents.dtype) + + # unscale/denormalize the latents + # denormalize with the mean and std if available and not None + has_latents_mean = hasattr(self.vae.config, "latents_mean") and self.vae.config.latents_mean is not None + has_latents_std = hasattr(self.vae.config, "latents_std") and self.vae.config.latents_std is not None + if has_latents_mean and has_latents_std: + latents_mean = ( + torch.tensor(self.vae.config.latents_mean).view(1, 4, 1, 1).to(latents.device, latents.dtype) + ) + latents_std = ( + torch.tensor(self.vae.config.latents_std).view(1, 4, 1, 1).to(latents.device, latents.dtype) + ) + latents = latents * latents_std / self.vae.config.scaling_factor + latents_mean + else: + latents = latents / self.vae.config.scaling_factor + + image = self.vae.decode(latents, return_dict=False)[0] + + # cast back to fp16 if needed + if needs_upcasting: + self.vae.to(dtype=torch.float16) + else: + return StableDiffusionXLPipelineOutput(images=latents) + + # apply watermark if available + if self.watermark is not None: + image = self.watermark.apply_watermark(image) + + image = self.image_processor.postprocess(image, output_type=output_type) + + if padding_mask_crop is not None: + image = [self.image_processor.apply_overlay(mask_image, original_image, i, crops_coords) for i in image] + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return StableDiffusionXLPipelineOutput(images=image) diff --git a/diffusers3/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_instruct_pix2pix.py b/diffusers3/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_instruct_pix2pix.py new file mode 100644 index 0000000000000000000000000000000000000000..b59f2312726d0071c70317f34f14b00986c02f69 --- /dev/null +++ b/diffusers3/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_instruct_pix2pix.py @@ -0,0 +1,992 @@ +# Copyright 2024 Harutatsu Akiyama and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import PIL.Image +import torch +from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer + +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import FromSingleFileMixin, StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin +from ...models import AutoencoderKL, UNet2DConditionModel +from ...models.attention_processor import ( + AttnProcessor2_0, + FusedAttnProcessor2_0, + XFormersAttnProcessor, +) +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import ( + USE_PEFT_BACKEND, + deprecate, + is_invisible_watermark_available, + is_torch_xla_available, + logging, + replace_example_docstring, + scale_lora_layers, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from .pipeline_output import StableDiffusionXLPipelineOutput + + +if is_invisible_watermark_available(): + from .watermark import StableDiffusionXLWatermarker + +if is_torch_xla_available(): + import torch_xla.core.xla_model as xm + + XLA_AVAILABLE = True +else: + XLA_AVAILABLE = False + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import StableDiffusionXLInstructPix2PixPipeline + >>> from diffusers.utils import load_image + + >>> resolution = 768 + >>> image = load_image( + ... "https://hf.co/datasets/diffusers/diffusers-images-docs/resolve/main/mountain.png" + ... ).resize((resolution, resolution)) + >>> edit_instruction = "Turn sky into a cloudy one" + + >>> pipe = StableDiffusionXLInstructPix2PixPipeline.from_pretrained( + ... "diffusers/sdxl-instructpix2pix-768", torch_dtype=torch.float16 + ... ).to("cuda") + + >>> edited_image = pipe( + ... prompt=edit_instruction, + ... image=image, + ... height=resolution, + ... width=resolution, + ... guidance_scale=3.0, + ... image_guidance_scale=1.5, + ... num_inference_steps=30, + ... ).images[0] + >>> edited_image + ``` +""" + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents +def retrieve_latents( + encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" +): + if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": + return encoder_output.latent_dist.sample(generator) + elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": + return encoder_output.latent_dist.mode() + elif hasattr(encoder_output, "latents"): + return encoder_output.latents + else: + raise AttributeError("Could not access latents of provided encoder_output") + + +def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): + """ + Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4 + """ + std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) + std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) + # rescale the results from guidance (fixes overexposure) + noise_pred_rescaled = noise_cfg * (std_text / std_cfg) + # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images + noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg + return noise_cfg + + +class StableDiffusionXLInstructPix2PixPipeline( + DiffusionPipeline, + StableDiffusionMixin, + TextualInversionLoaderMixin, + FromSingleFileMixin, + StableDiffusionXLLoraLoaderMixin, +): + r""" + Pipeline for pixel-level image editing by following text instructions. Based on Stable Diffusion XL. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files + - [`~loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`] for saving LoRA weights + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModel`]): + Frozen text-encoder. Stable Diffusion XL uses the text portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically + the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. + text_encoder_2 ([` CLIPTextModelWithProjection`]): + Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection), + specifically the + [laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k) + variant. + tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + tokenizer_2 (`CLIPTokenizer`): + Second Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + requires_aesthetics_score (`bool`, *optional*, defaults to `"False"`): + Whether the `unet` requires a aesthetic_score condition to be passed during inference. Also see the config + of `stabilityai/stable-diffusion-xl-refiner-1-0`. + force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`): + Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of + `stabilityai/stable-diffusion-xl-base-1-0`. + add_watermarker (`bool`, *optional*): + Whether to use the [invisible_watermark library](https://github.com/ShieldMnt/invisible-watermark/) to + watermark output images. If not defined, it will default to True if the package is installed, otherwise no + watermarker will be used. + is_cosxl_edit (`bool`, *optional*): + When set the image latents are scaled. + """ + + model_cpu_offload_seq = "text_encoder->text_encoder_2->unet->vae" + _optional_components = ["tokenizer", "tokenizer_2", "text_encoder", "text_encoder_2"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + text_encoder_2: CLIPTextModelWithProjection, + tokenizer: CLIPTokenizer, + tokenizer_2: CLIPTokenizer, + unet: UNet2DConditionModel, + scheduler: KarrasDiffusionSchedulers, + force_zeros_for_empty_prompt: bool = True, + add_watermarker: Optional[bool] = None, + is_cosxl_edit: Optional[bool] = False, + ): + super().__init__() + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + text_encoder_2=text_encoder_2, + tokenizer=tokenizer, + tokenizer_2=tokenizer_2, + unet=unet, + scheduler=scheduler, + ) + self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + self.default_sample_size = self.unet.config.sample_size + self.is_cosxl_edit = is_cosxl_edit + + add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available() + + if add_watermarker: + self.watermark = StableDiffusionXLWatermarker() + else: + self.watermark = None + + def encode_prompt( + self, + prompt: str, + prompt_2: Optional[str] = None, + device: Optional[torch.device] = None, + num_images_per_prompt: int = 1, + do_classifier_free_guidance: bool = True, + negative_prompt: Optional[str] = None, + negative_prompt_2: Optional[str] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + pooled_prompt_embeds: Optional[torch.Tensor] = None, + negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in both text-encoders + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + lora_scale (`float`, *optional*): + A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + """ + device = device or self._execution_device + + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if self.text_encoder is not None: + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale) + else: + scale_lora_layers(self.text_encoder_2, lora_scale) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + # Define tokenizers and text encoders + tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] + text_encoders = ( + [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] + ) + + if prompt_embeds is None: + prompt_2 = prompt_2 or prompt + # textual inversion: process multi-vector tokens if necessary + prompt_embeds_list = [] + prompts = [prompt, prompt_2] + for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders): + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, tokenizer) + + text_inputs = tokenizer( + prompt, + padding="max_length", + max_length=tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + + text_input_ids = text_inputs.input_ids + untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {tokenizer.model_max_length} tokens: {removed_text}" + ) + + prompt_embeds = text_encoder( + text_input_ids.to(device), + output_hidden_states=True, + ) + + # We are only ALWAYS interested in the pooled output of the final text encoder + pooled_prompt_embeds = prompt_embeds[0] + prompt_embeds = prompt_embeds.hidden_states[-2] + + prompt_embeds_list.append(prompt_embeds) + + prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) + + # get unconditional embeddings for classifier free guidance + zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt + if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: + negative_prompt_embeds = torch.zeros_like(prompt_embeds) + negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) + elif do_classifier_free_guidance and negative_prompt_embeds is None: + negative_prompt = negative_prompt or "" + negative_prompt_2 = negative_prompt_2 or negative_prompt + + uncond_tokens: List[str] + if prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt, negative_prompt_2] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = [negative_prompt, negative_prompt_2] + + negative_prompt_embeds_list = [] + for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders): + if isinstance(self, TextualInversionLoaderMixin): + negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = tokenizer( + negative_prompt, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + negative_prompt_embeds = text_encoder( + uncond_input.input_ids.to(device), + output_hidden_states=True, + ) + # We are only ALWAYS interested in the pooled output of the final text encoder + negative_pooled_prompt_embeds = negative_prompt_embeds[0] + negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] + + negative_prompt_embeds_list.append(negative_prompt_embeds) + + negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) + + prompt_embeds_dtype = self.text_encoder_2.dtype if self.text_encoder_2 is not None else self.unet.dtype + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed * num_images_per_prompt, -1 + ) + if do_classifier_free_guidance: + negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed * num_images_per_prompt, -1 + ) + + return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + callback_on_step_end_tensor_inputs=None, + ): + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = ( + batch_size, + num_channels_latents, + int(height) // self.vae_scale_factor, + int(width) // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + def prepare_image_latents( + self, image, batch_size, num_images_per_prompt, dtype, device, do_classifier_free_guidance, generator=None + ): + if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): + raise ValueError( + f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" + ) + + image = image.to(device=device, dtype=dtype) + + batch_size = batch_size * num_images_per_prompt + + if image.shape[1] == 4: + image_latents = image + else: + # make sure the VAE is in float32 mode, as it overflows in float16 + needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast + if needs_upcasting: + image = image.float() + self.upcast_vae() + + image_latents = retrieve_latents(self.vae.encode(image), sample_mode="argmax") + + # cast back to fp16 if needed + if needs_upcasting: + self.vae.to(dtype=torch.float16) + + if batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] == 0: + # expand image_latents for batch_size + deprecation_message = ( + f"You have passed {batch_size} text prompts (`prompt`), but only {image_latents.shape[0]} initial" + " images (`image`). Initial images are now duplicating to match the number of text prompts. Note" + " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update" + " your script to pass as many initial images as text prompts to suppress this warning." + ) + deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False) + additional_image_per_prompt = batch_size // image_latents.shape[0] + image_latents = torch.cat([image_latents] * additional_image_per_prompt, dim=0) + elif batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] != 0: + raise ValueError( + f"Cannot duplicate `image` of batch size {image_latents.shape[0]} to {batch_size} text prompts." + ) + else: + image_latents = torch.cat([image_latents], dim=0) + + if do_classifier_free_guidance: + uncond_image_latents = torch.zeros_like(image_latents) + image_latents = torch.cat([image_latents, image_latents, uncond_image_latents], dim=0) + + if image_latents.dtype != self.vae.dtype: + image_latents = image_latents.to(dtype=self.vae.dtype) + + if self.is_cosxl_edit: + image_latents = image_latents * self.vae.config.scaling_factor + + return image_latents + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline._get_add_time_ids + def _get_add_time_ids( + self, original_size, crops_coords_top_left, target_size, dtype, text_encoder_projection_dim=None + ): + add_time_ids = list(original_size + crops_coords_top_left + target_size) + + passed_add_embed_dim = ( + self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim + ) + expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features + + if expected_add_embed_dim != passed_add_embed_dim: + raise ValueError( + f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`." + ) + + add_time_ids = torch.tensor([add_time_ids], dtype=dtype) + return add_time_ids + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.upcast_vae + def upcast_vae(self): + dtype = self.vae.dtype + self.vae.to(dtype=torch.float32) + use_torch_2_0_or_xformers = isinstance( + self.vae.decoder.mid_block.attentions[0].processor, + ( + AttnProcessor2_0, + XFormersAttnProcessor, + FusedAttnProcessor2_0, + ), + ) + # if xformers or torch_2_0 is used attention block does not need + # to be in float32 which can save lots of memory + if use_torch_2_0_or_xformers: + self.vae.post_quant_conv.to(dtype) + self.vae.decoder.conv_in.to(dtype) + self.vae.decoder.mid_block.to(dtype) + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + prompt_2: Optional[Union[str, List[str]]] = None, + image: PipelineImageInput = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 100, + denoising_end: Optional[float] = None, + guidance_scale: float = 5.0, + image_guidance_scale: float = 1.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + negative_prompt_2: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + pooled_prompt_embeds: Optional[torch.Tensor] = None, + negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, + callback_steps: int = 1, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + guidance_rescale: float = 0.0, + original_size: Tuple[int, int] = None, + crops_coords_top_left: Tuple[int, int] = (0, 0), + target_size: Tuple[int, int] = None, + ): + r""" + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in both text-encoders + image (`torch.Tensor` or `PIL.Image.Image` or `np.ndarray` or `List[torch.Tensor]` or `List[PIL.Image.Image]` or `List[np.ndarray]`): + The image(s) to modify with the pipeline. + height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + denoising_end (`float`, *optional*): + When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be + completed before it is intentionally prematurely terminated. As a result, the returned sample will + still retain a substantial amount of noise as determined by the discrete timesteps selected by the + scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a + "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image + Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output) + guidance_scale (`float`, *optional*, defaults to 5.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + image_guidance_scale (`float`, *optional*, defaults to 1.5): + Image guidance scale is to push the generated image towards the initial image `image`. Image guidance + scale is enabled by setting `image_guidance_scale > 1`. Higher image guidance scale encourages to + generate images that are closely linked to the source image `image`, usually at the expense of lower + image quality. This pipeline requires a value of at least `1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders. + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to + [`schedulers.DDIMScheduler`], will be ignored for others. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionXLPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that will be called every `callback_steps` steps during inference. The function will be + called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function will be called. If not specified, the callback will be + called at every step. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + guidance_rescale (`float`, *optional*, defaults to 0.0): + Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are + Flawed](https://arxiv.org/pdf/2305.08891.pdf) `guidance_scale` is defined as `ฯ†` in equation 16. of + [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). + Guidance rescale factor should fix overexposure when using zero terminal SNR. + original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled. + `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as + explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position + `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting + `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + For most cases, `target_size` should be set to the desired height and width of the generated image. If + not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in + section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + aesthetic_score (`float`, *optional*, defaults to 6.0): + Used to simulate an aesthetic score of the generated image by influencing the positive text condition. + Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + negative_aesthetic_score (`float`, *optional*, defaults to 2.5): + Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). Can be used to + simulate an aesthetic score of the generated image by influencing the negative text condition. + + Examples: + + Returns: + [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] or `tuple`: + [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] if `return_dict` is True, otherwise a + `tuple`. When returning a tuple, the first element is a list with the generated images. + """ + # 0. Default height and width to unet + height = height or self.default_sample_size * self.vae_scale_factor + width = width or self.default_sample_size * self.vae_scale_factor + + original_size = original_size or (height, width) + target_size = target_size or (height, width) + + # 1. Check inputs. Raise error if not correct + self.check_inputs(prompt, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds) + + if image is None: + raise ValueError("`image` input cannot be undefined.") + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 and image_guidance_scale >= 1.0 + + # 3. Encode input prompt + text_encoder_lora_scale = ( + cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None + ) + ( + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + ) = self.encode_prompt( + prompt=prompt, + prompt_2=prompt_2, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=do_classifier_free_guidance, + negative_prompt=negative_prompt, + negative_prompt_2=negative_prompt_2, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, + lora_scale=text_encoder_lora_scale, + ) + + # 4. Preprocess image + image = self.image_processor.preprocess(image, height=height, width=width).to(device) + + # 5. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # 6. Prepare Image latents + image_latents = self.prepare_image_latents( + image, + batch_size, + num_images_per_prompt, + prompt_embeds.dtype, + device, + do_classifier_free_guidance, + ) + + # 7. Prepare latent variables + num_channels_latents = self.vae.config.latent_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 8. Check that shapes of latents and image match the UNet channels + num_channels_image = image_latents.shape[1] + if num_channels_latents + num_channels_image != self.unet.config.in_channels: + raise ValueError( + f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects" + f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +" + f" `num_channels_image`: {num_channels_image} " + f" = {num_channels_latents + num_channels_image}. Please verify the config of" + " `pipeline.unet` or your `image` input." + ) + + # 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 10. Prepare added time ids & embeddings + add_text_embeds = pooled_prompt_embeds + if self.text_encoder_2 is None: + text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) + else: + text_encoder_projection_dim = self.text_encoder_2.config.projection_dim + + add_time_ids = self._get_add_time_ids( + original_size, + crops_coords_top_left, + target_size, + dtype=prompt_embeds.dtype, + text_encoder_projection_dim=text_encoder_projection_dim, + ) + + if do_classifier_free_guidance: + # The extra concat similar to how it's done in SD InstructPix2Pix. + prompt_embeds = torch.cat([prompt_embeds, negative_prompt_embeds, negative_prompt_embeds], dim=0) + add_text_embeds = torch.cat( + [add_text_embeds, negative_pooled_prompt_embeds, negative_pooled_prompt_embeds], dim=0 + ) + add_time_ids = torch.cat([add_time_ids, add_time_ids, add_time_ids], dim=0) + + prompt_embeds = prompt_embeds.to(device) + add_text_embeds = add_text_embeds.to(device) + add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1) + + # 11. Denoising loop + num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) + if denoising_end is not None and isinstance(denoising_end, float) and denoising_end > 0 and denoising_end < 1: + discrete_timestep_cutoff = int( + round( + self.scheduler.config.num_train_timesteps + - (denoising_end * self.scheduler.config.num_train_timesteps) + ) + ) + num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps))) + timesteps = timesteps[:num_inference_steps] + + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # Expand the latents if we are doing classifier free guidance. + # The latents are expanded 3 times because for pix2pix the guidance + # is applied for both the text and the input image. + latent_model_input = torch.cat([latents] * 3) if do_classifier_free_guidance else latents + + # concat latents, image_latents in the channel dimension + scaled_latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + scaled_latent_model_input = torch.cat([scaled_latent_model_input, image_latents], dim=1) + + # predict the noise residual + added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} + noise_pred = self.unet( + scaled_latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs=cross_attention_kwargs, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + # perform guidance + if do_classifier_free_guidance: + noise_pred_text, noise_pred_image, noise_pred_uncond = noise_pred.chunk(3) + noise_pred = ( + noise_pred_uncond + + guidance_scale * (noise_pred_text - noise_pred_image) + + image_guidance_scale * (noise_pred_image - noise_pred_uncond) + ) + + if do_classifier_free_guidance and guidance_rescale > 0.0: + # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf + noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale) + + # compute the previous noisy sample x_t -> x_t-1 + latents_dtype = latents.dtype + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] + if latents.dtype != latents_dtype: + if torch.backends.mps.is_available(): + # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272 + latents = latents.to(latents_dtype) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + if XLA_AVAILABLE: + xm.mark_step() + + if not output_type == "latent": + # make sure the VAE is in float32 mode, as it overflows in float16 + needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast + + if needs_upcasting: + self.upcast_vae() + latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) + elif latents.dtype != self.vae.dtype: + if torch.backends.mps.is_available(): + # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272 + self.vae = self.vae.to(latents.dtype) + + # unscale/denormalize the latents + # denormalize with the mean and std if available and not None + has_latents_mean = hasattr(self.vae.config, "latents_mean") and self.vae.config.latents_mean is not None + has_latents_std = hasattr(self.vae.config, "latents_std") and self.vae.config.latents_std is not None + if has_latents_mean and has_latents_std: + latents_mean = ( + torch.tensor(self.vae.config.latents_mean).view(1, 4, 1, 1).to(latents.device, latents.dtype) + ) + latents_std = ( + torch.tensor(self.vae.config.latents_std).view(1, 4, 1, 1).to(latents.device, latents.dtype) + ) + latents = latents * latents_std / self.vae.config.scaling_factor + latents_mean + else: + latents = latents / self.vae.config.scaling_factor + + image = self.vae.decode(latents, return_dict=False)[0] + + # cast back to fp16 if needed + if needs_upcasting: + self.vae.to(dtype=torch.float16) + else: + return StableDiffusionXLPipelineOutput(images=latents) + + # apply watermark if available + if self.watermark is not None: + image = self.watermark.apply_watermark(image) + + image = self.image_processor.postprocess(image, output_type=output_type) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return StableDiffusionXLPipelineOutput(images=image) diff --git a/diffusers3/pipelines/stable_diffusion_xl/watermark.py b/diffusers3/pipelines/stable_diffusion_xl/watermark.py new file mode 100644 index 0000000000000000000000000000000000000000..70d06bb6320d890ea90542cc7551bc42d7c59451 --- /dev/null +++ b/diffusers3/pipelines/stable_diffusion_xl/watermark.py @@ -0,0 +1,42 @@ +import numpy as np +import torch + +from ...utils import is_invisible_watermark_available + + +if is_invisible_watermark_available(): + from imwatermark import WatermarkEncoder + + +# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66 +WATERMARK_MESSAGE = 0b101100111110110010010000011110111011000110011110 +# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1 +WATERMARK_BITS = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]] + + +class StableDiffusionXLWatermarker: + def __init__(self): + self.watermark = WATERMARK_BITS + self.encoder = WatermarkEncoder() + + self.encoder.set_watermark("bits", self.watermark) + + def apply_watermark(self, images: torch.Tensor): + # can't encode images that are smaller than 256 + if images.shape[-1] < 256: + return images + + images = (255 * (images / 2 + 0.5)).cpu().permute(0, 2, 3, 1).float().numpy() + + # Convert RGB to BGR, which is the channel order expected by the watermark encoder. + images = images[:, :, :, ::-1] + + # Add watermark and convert BGR back to RGB + images = [self.encoder.encode(image, "dwtDct")[:, :, ::-1] for image in images] + + images = np.array(images) + + images = torch.from_numpy(images).permute(0, 3, 1, 2) + + images = torch.clamp(2 * (images / 255 - 0.5), min=-1.0, max=1.0) + return images diff --git a/diffusers3/pipelines/stable_video_diffusion/__init__.py b/diffusers3/pipelines/stable_video_diffusion/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3bd4dc78966e217d85769691b98ed8fb0b6ac05c --- /dev/null +++ b/diffusers3/pipelines/stable_video_diffusion/__init__.py @@ -0,0 +1,58 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + BaseOutput, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure.update( + { + "pipeline_stable_video_diffusion": [ + "StableVideoDiffusionPipeline", + "StableVideoDiffusionPipelineOutput", + ], + } + ) + + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * + else: + from .pipeline_stable_video_diffusion import ( + StableVideoDiffusionPipeline, + StableVideoDiffusionPipelineOutput, + ) + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/diffusers3/pipelines/stable_video_diffusion/pipeline_stable_video_diffusion.py b/diffusers3/pipelines/stable_video_diffusion/pipeline_stable_video_diffusion.py new file mode 100644 index 0000000000000000000000000000000000000000..b14fdd4f8de33310491464f8cf0c68e6dd1a32e4 --- /dev/null +++ b/diffusers3/pipelines/stable_video_diffusion/pipeline_stable_video_diffusion.py @@ -0,0 +1,726 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from dataclasses import dataclass +from typing import Callable, Dict, List, Optional, Union + +import numpy as np +import PIL.Image +import torch +from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection + +from ...image_processor import PipelineImageInput +from ...models import AutoencoderKLTemporalDecoder, UNetSpatioTemporalConditionModel +from ...schedulers import EulerDiscreteScheduler +from ...utils import BaseOutput, logging, replace_example_docstring +from ...utils.torch_utils import is_compiled_module, randn_tensor +from ...video_processor import VideoProcessor +from ..pipeline_utils import DiffusionPipeline + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> from diffusers import StableVideoDiffusionPipeline + >>> from diffusers.utils import load_image, export_to_video + + >>> pipe = StableVideoDiffusionPipeline.from_pretrained( + ... "stabilityai/stable-video-diffusion-img2vid-xt", torch_dtype=torch.float16, variant="fp16" + ... ) + >>> pipe.to("cuda") + + >>> image = load_image( + ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/svd-docstring-example.jpeg" + ... ) + >>> image = image.resize((1024, 576)) + + >>> frames = pipe(image, num_frames=25, decode_chunk_size=8).frames[0] + >>> export_to_video(frames, "generated.mp4", fps=7) + ``` +""" + + +def _append_dims(x, target_dims): + """Appends dimensions to the end of a tensor until it has target_dims dimensions.""" + dims_to_append = target_dims - x.ndim + if dims_to_append < 0: + raise ValueError(f"input has {x.ndim} dims but target_dims is {target_dims}, which is less") + return x[(...,) + (None,) * dims_to_append] + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + sigmas: Optional[List[float]] = None, + **kwargs, +): + """ + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles + custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` + must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, + `num_inference_steps` and `sigmas` must be `None`. + sigmas (`List[float]`, *optional*): + Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, + `num_inference_steps` and `timesteps` must be `None`. + + Returns: + `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the + second element is the number of inference steps. + """ + if timesteps is not None and sigmas is not None: + raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") + if timesteps is not None: + accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + elif sigmas is not None: + accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accept_sigmas: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" sigmas schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + + +@dataclass +class StableVideoDiffusionPipelineOutput(BaseOutput): + r""" + Output class for Stable Video Diffusion pipeline. + + Args: + frames (`[List[List[PIL.Image.Image]]`, `np.ndarray`, `torch.Tensor`]): + List of denoised PIL images of length `batch_size` or numpy array or torch tensor of shape `(batch_size, + num_frames, height, width, num_channels)`. + """ + + frames: Union[List[List[PIL.Image.Image]], np.ndarray, torch.Tensor] + + +class StableVideoDiffusionPipeline(DiffusionPipeline): + r""" + Pipeline to generate video from an input image using Stable Video Diffusion. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + Args: + vae ([`AutoencoderKLTemporalDecoder`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + image_encoder ([`~transformers.CLIPVisionModelWithProjection`]): + Frozen CLIP image-encoder + ([laion/CLIP-ViT-H-14-laion2B-s32B-b79K](https://huggingface.co/laion/CLIP-ViT-H-14-laion2B-s32B-b79K)). + unet ([`UNetSpatioTemporalConditionModel`]): + A `UNetSpatioTemporalConditionModel` to denoise the encoded image latents. + scheduler ([`EulerDiscreteScheduler`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + A `CLIPImageProcessor` to extract features from generated images. + """ + + model_cpu_offload_seq = "image_encoder->unet->vae" + _callback_tensor_inputs = ["latents"] + + def __init__( + self, + vae: AutoencoderKLTemporalDecoder, + image_encoder: CLIPVisionModelWithProjection, + unet: UNetSpatioTemporalConditionModel, + scheduler: EulerDiscreteScheduler, + feature_extractor: CLIPImageProcessor, + ): + super().__init__() + + self.register_modules( + vae=vae, + image_encoder=image_encoder, + unet=unet, + scheduler=scheduler, + feature_extractor=feature_extractor, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.video_processor = VideoProcessor(do_resize=True, vae_scale_factor=self.vae_scale_factor) + + def _encode_image( + self, + image: PipelineImageInput, + device: Union[str, torch.device], + num_videos_per_prompt: int, + do_classifier_free_guidance: bool, + ) -> torch.Tensor: + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.video_processor.pil_to_numpy(image) + image = self.video_processor.numpy_to_pt(image) + + # We normalize the image before resizing to match with the original implementation. + # Then we unnormalize it after resizing. + image = image * 2.0 - 1.0 + image = _resize_with_antialiasing(image, (224, 224)) + image = (image + 1.0) / 2.0 + + # Normalize the image with for CLIP input + image = self.feature_extractor( + images=image, + do_normalize=True, + do_center_crop=False, + do_resize=False, + do_rescale=False, + return_tensors="pt", + ).pixel_values + + image = image.to(device=device, dtype=dtype) + image_embeddings = self.image_encoder(image).image_embeds + image_embeddings = image_embeddings.unsqueeze(1) + + # duplicate image embeddings for each generation per prompt, using mps friendly method + bs_embed, seq_len, _ = image_embeddings.shape + image_embeddings = image_embeddings.repeat(1, num_videos_per_prompt, 1) + image_embeddings = image_embeddings.view(bs_embed * num_videos_per_prompt, seq_len, -1) + + if do_classifier_free_guidance: + negative_image_embeddings = torch.zeros_like(image_embeddings) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + image_embeddings = torch.cat([negative_image_embeddings, image_embeddings]) + + return image_embeddings + + def _encode_vae_image( + self, + image: torch.Tensor, + device: Union[str, torch.device], + num_videos_per_prompt: int, + do_classifier_free_guidance: bool, + ): + image = image.to(device=device) + image_latents = self.vae.encode(image).latent_dist.mode() + + # duplicate image_latents for each generation per prompt, using mps friendly method + image_latents = image_latents.repeat(num_videos_per_prompt, 1, 1, 1) + + if do_classifier_free_guidance: + negative_image_latents = torch.zeros_like(image_latents) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + image_latents = torch.cat([negative_image_latents, image_latents]) + + return image_latents + + def _get_add_time_ids( + self, + fps: int, + motion_bucket_id: int, + noise_aug_strength: float, + dtype: torch.dtype, + batch_size: int, + num_videos_per_prompt: int, + do_classifier_free_guidance: bool, + ): + add_time_ids = [fps, motion_bucket_id, noise_aug_strength] + + passed_add_embed_dim = self.unet.config.addition_time_embed_dim * len(add_time_ids) + expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features + + if expected_add_embed_dim != passed_add_embed_dim: + raise ValueError( + f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`." + ) + + add_time_ids = torch.tensor([add_time_ids], dtype=dtype) + add_time_ids = add_time_ids.repeat(batch_size * num_videos_per_prompt, 1) + + if do_classifier_free_guidance: + add_time_ids = torch.cat([add_time_ids, add_time_ids]) + + return add_time_ids + + def decode_latents(self, latents: torch.Tensor, num_frames: int, decode_chunk_size: int = 14): + # [batch, frames, channels, height, width] -> [batch*frames, channels, height, width] + latents = latents.flatten(0, 1) + + latents = 1 / self.vae.config.scaling_factor * latents + + forward_vae_fn = self.vae._orig_mod.forward if is_compiled_module(self.vae) else self.vae.forward + accepts_num_frames = "num_frames" in set(inspect.signature(forward_vae_fn).parameters.keys()) + + # decode decode_chunk_size frames at a time to avoid OOM + frames = [] + for i in range(0, latents.shape[0], decode_chunk_size): + num_frames_in = latents[i : i + decode_chunk_size].shape[0] + decode_kwargs = {} + if accepts_num_frames: + # we only pass num_frames_in if it's expected + decode_kwargs["num_frames"] = num_frames_in + + frame = self.vae.decode(latents[i : i + decode_chunk_size], **decode_kwargs).sample + frames.append(frame) + frames = torch.cat(frames, dim=0) + + # [batch*frames, channels, height, width] -> [batch, channels, frames, height, width] + frames = frames.reshape(-1, num_frames, *frames.shape[1:]).permute(0, 2, 1, 3, 4) + + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + frames = frames.float() + return frames + + def check_inputs(self, image, height, width): + if ( + not isinstance(image, torch.Tensor) + and not isinstance(image, PIL.Image.Image) + and not isinstance(image, list) + ): + raise ValueError( + "`image` has to be of type `torch.Tensor` or `PIL.Image.Image` or `List[PIL.Image.Image]` but is" + f" {type(image)}" + ) + + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + def prepare_latents( + self, + batch_size: int, + num_frames: int, + num_channels_latents: int, + height: int, + width: int, + dtype: torch.dtype, + device: Union[str, torch.device], + generator: torch.Generator, + latents: Optional[torch.Tensor] = None, + ): + shape = ( + batch_size, + num_frames, + num_channels_latents // 2, + height // self.vae_scale_factor, + width // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + @property + def guidance_scale(self): + return self._guidance_scale + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + if isinstance(self.guidance_scale, (int, float)): + return self.guidance_scale > 1 + return self.guidance_scale.max() > 1 + + @property + def num_timesteps(self): + return self._num_timesteps + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + image: Union[PIL.Image.Image, List[PIL.Image.Image], torch.Tensor], + height: int = 576, + width: int = 1024, + num_frames: Optional[int] = None, + num_inference_steps: int = 25, + sigmas: Optional[List[float]] = None, + min_guidance_scale: float = 1.0, + max_guidance_scale: float = 3.0, + fps: int = 7, + motion_bucket_id: int = 127, + noise_aug_strength: float = 0.02, + decode_chunk_size: Optional[int] = None, + num_videos_per_prompt: Optional[int] = 1, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + output_type: Optional[str] = "pil", + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + return_dict: bool = True, + ): + r""" + The call function to the pipeline for generation. + + Args: + image (`PIL.Image.Image` or `List[PIL.Image.Image]` or `torch.Tensor`): + Image(s) to guide image generation. If you provide a tensor, the expected value range is between `[0, + 1]`. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + num_frames (`int`, *optional*): + The number of video frames to generate. Defaults to `self.unet.config.num_frames` (14 for + `stable-video-diffusion-img2vid` and to 25 for `stable-video-diffusion-img2vid-xt`). + num_inference_steps (`int`, *optional*, defaults to 25): + The number of denoising steps. More denoising steps usually lead to a higher quality video at the + expense of slower inference. This parameter is modulated by `strength`. + sigmas (`List[float]`, *optional*): + Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in + their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed + will be used. + min_guidance_scale (`float`, *optional*, defaults to 1.0): + The minimum guidance scale. Used for the classifier free guidance with first frame. + max_guidance_scale (`float`, *optional*, defaults to 3.0): + The maximum guidance scale. Used for the classifier free guidance with last frame. + fps (`int`, *optional*, defaults to 7): + Frames per second. The rate at which the generated images shall be exported to a video after + generation. Note that Stable Diffusion Video's UNet was micro-conditioned on fps-1 during training. + motion_bucket_id (`int`, *optional*, defaults to 127): + Used for conditioning the amount of motion for the generation. The higher the number the more motion + will be in the video. + noise_aug_strength (`float`, *optional*, defaults to 0.02): + The amount of noise added to the init image, the higher it is the less the video will look like the + init image. Increase it for more motion. + decode_chunk_size (`int`, *optional*): + The number of frames to decode at a time. Higher chunk size leads to better temporal consistency at the + expense of more memory usage. By default, the decoder decodes all frames at once for maximal quality. + For lower memory usage, reduce `decode_chunk_size`. + num_videos_per_prompt (`int`, *optional*, defaults to 1): + The number of videos to generate per prompt. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for video + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `pil`, `np` or `pt`. + callback_on_step_end (`Callable`, *optional*): + A function that is called at the end of each denoising step during inference. The function is called + with the following arguments: + `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. + `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + + Examples: + + Returns: + [`~pipelines.stable_diffusion.StableVideoDiffusionPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableVideoDiffusionPipelineOutput`] is + returned, otherwise a `tuple` of (`List[List[PIL.Image.Image]]` or `np.ndarray` or `torch.Tensor`) is + returned. + """ + # 0. Default height and width to unet + height = height or self.unet.config.sample_size * self.vae_scale_factor + width = width or self.unet.config.sample_size * self.vae_scale_factor + + num_frames = num_frames if num_frames is not None else self.unet.config.num_frames + decode_chunk_size = decode_chunk_size if decode_chunk_size is not None else num_frames + + # 1. Check inputs. Raise error if not correct + self.check_inputs(image, height, width) + + # 2. Define call parameters + if isinstance(image, PIL.Image.Image): + batch_size = 1 + elif isinstance(image, list): + batch_size = len(image) + else: + batch_size = image.shape[0] + device = self._execution_device + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + self._guidance_scale = max_guidance_scale + + # 3. Encode input image + image_embeddings = self._encode_image(image, device, num_videos_per_prompt, self.do_classifier_free_guidance) + + # NOTE: Stable Video Diffusion was conditioned on fps - 1, which is why it is reduced here. + # See: https://github.com/Stability-AI/generative-models/blob/ed0997173f98eaf8f4edf7ba5fe8f15c6b877fd3/scripts/sampling/simple_video_sample.py#L188 + fps = fps - 1 + + # 4. Encode input image using VAE + image = self.video_processor.preprocess(image, height=height, width=width).to(device) + noise = randn_tensor(image.shape, generator=generator, device=device, dtype=image.dtype) + image = image + noise_aug_strength * noise + + needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast + if needs_upcasting: + self.vae.to(dtype=torch.float32) + + image_latents = self._encode_vae_image( + image, + device=device, + num_videos_per_prompt=num_videos_per_prompt, + do_classifier_free_guidance=self.do_classifier_free_guidance, + ) + image_latents = image_latents.to(image_embeddings.dtype) + + # cast back to fp16 if needed + if needs_upcasting: + self.vae.to(dtype=torch.float16) + + # Repeat the image latents for each frame so we can concatenate them with the noise + # image_latents [batch, channels, height, width] ->[batch, num_frames, channels, height, width] + image_latents = image_latents.unsqueeze(1).repeat(1, num_frames, 1, 1, 1) + + # 5. Get Added Time IDs + added_time_ids = self._get_add_time_ids( + fps, + motion_bucket_id, + noise_aug_strength, + image_embeddings.dtype, + batch_size, + num_videos_per_prompt, + self.do_classifier_free_guidance, + ) + added_time_ids = added_time_ids.to(device) + + # 6. Prepare timesteps + timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, None, sigmas) + + # 7. Prepare latent variables + num_channels_latents = self.unet.config.in_channels + latents = self.prepare_latents( + batch_size * num_videos_per_prompt, + num_frames, + num_channels_latents, + height, + width, + image_embeddings.dtype, + device, + generator, + latents, + ) + + # 8. Prepare guidance scale + guidance_scale = torch.linspace(min_guidance_scale, max_guidance_scale, num_frames).unsqueeze(0) + guidance_scale = guidance_scale.to(device, latents.dtype) + guidance_scale = guidance_scale.repeat(batch_size * num_videos_per_prompt, 1) + guidance_scale = _append_dims(guidance_scale, latents.ndim) + + self._guidance_scale = guidance_scale + + # 9. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + self._num_timesteps = len(timesteps) + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # Concatenate image_latents over channels dimension + latent_model_input = torch.cat([latent_model_input, image_latents], dim=2) + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=image_embeddings, + added_time_ids=added_time_ids, + return_dict=False, + )[0] + + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_cond = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_cond - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents).prev_sample + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + + if not output_type == "latent": + # cast back to fp16 if needed + if needs_upcasting: + self.vae.to(dtype=torch.float16) + frames = self.decode_latents(latents, num_frames, decode_chunk_size) + frames = self.video_processor.postprocess_video(video=frames, output_type=output_type) + else: + frames = latents + + self.maybe_free_model_hooks() + + if not return_dict: + return frames + + return StableVideoDiffusionPipelineOutput(frames=frames) + + +# resizing utils +# TODO: clean up later +def _resize_with_antialiasing(input, size, interpolation="bicubic", align_corners=True): + h, w = input.shape[-2:] + factors = (h / size[0], w / size[1]) + + # First, we have to determine sigma + # Taken from skimage: https://github.com/scikit-image/scikit-image/blob/v0.19.2/skimage/transform/_warps.py#L171 + sigmas = ( + max((factors[0] - 1.0) / 2.0, 0.001), + max((factors[1] - 1.0) / 2.0, 0.001), + ) + + # Now kernel size. Good results are for 3 sigma, but that is kind of slow. Pillow uses 1 sigma + # https://github.com/python-pillow/Pillow/blob/master/src/libImaging/Resample.c#L206 + # But they do it in the 2 passes, which gives better results. Let's try 2 sigmas for now + ks = int(max(2.0 * 2 * sigmas[0], 3)), int(max(2.0 * 2 * sigmas[1], 3)) + + # Make sure it is odd + if (ks[0] % 2) == 0: + ks = ks[0] + 1, ks[1] + + if (ks[1] % 2) == 0: + ks = ks[0], ks[1] + 1 + + input = _gaussian_blur2d(input, ks, sigmas) + + output = torch.nn.functional.interpolate(input, size=size, mode=interpolation, align_corners=align_corners) + return output + + +def _compute_padding(kernel_size): + """Compute padding tuple.""" + # 4 or 6 ints: (padding_left, padding_right,padding_top,padding_bottom) + # https://pytorch.org/docs/stable/nn.html#torch.nn.functional.pad + if len(kernel_size) < 2: + raise AssertionError(kernel_size) + computed = [k - 1 for k in kernel_size] + + # for even kernels we need to do asymmetric padding :( + out_padding = 2 * len(kernel_size) * [0] + + for i in range(len(kernel_size)): + computed_tmp = computed[-(i + 1)] + + pad_front = computed_tmp // 2 + pad_rear = computed_tmp - pad_front + + out_padding[2 * i + 0] = pad_front + out_padding[2 * i + 1] = pad_rear + + return out_padding + + +def _filter2d(input, kernel): + # prepare kernel + b, c, h, w = input.shape + tmp_kernel = kernel[:, None, ...].to(device=input.device, dtype=input.dtype) + + tmp_kernel = tmp_kernel.expand(-1, c, -1, -1) + + height, width = tmp_kernel.shape[-2:] + + padding_shape: List[int] = _compute_padding([height, width]) + input = torch.nn.functional.pad(input, padding_shape, mode="reflect") + + # kernel and input tensor reshape to align element-wise or batch-wise params + tmp_kernel = tmp_kernel.reshape(-1, 1, height, width) + input = input.view(-1, tmp_kernel.size(0), input.size(-2), input.size(-1)) + + # convolve the tensor with the kernel. + output = torch.nn.functional.conv2d(input, tmp_kernel, groups=tmp_kernel.size(0), padding=0, stride=1) + + out = output.view(b, c, h, w) + return out + + +def _gaussian(window_size: int, sigma): + if isinstance(sigma, float): + sigma = torch.tensor([[sigma]]) + + batch_size = sigma.shape[0] + + x = (torch.arange(window_size, device=sigma.device, dtype=sigma.dtype) - window_size // 2).expand(batch_size, -1) + + if window_size % 2 == 0: + x = x + 0.5 + + gauss = torch.exp(-x.pow(2.0) / (2 * sigma.pow(2.0))) + + return gauss / gauss.sum(-1, keepdim=True) + + +def _gaussian_blur2d(input, kernel_size, sigma): + if isinstance(sigma, tuple): + sigma = torch.tensor([sigma], dtype=input.dtype) + else: + sigma = sigma.to(dtype=input.dtype) + + ky, kx = int(kernel_size[0]), int(kernel_size[1]) + bs = sigma.shape[0] + kernel_x = _gaussian(kx, sigma[:, 1].view(bs, 1)) + kernel_y = _gaussian(ky, sigma[:, 0].view(bs, 1)) + out_x = _filter2d(input, kernel_x[..., None, :]) + out = _filter2d(out_x, kernel_y[..., None]) + + return out diff --git a/diffusers3/pipelines/t2i_adapter/__init__.py b/diffusers3/pipelines/t2i_adapter/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..08c22a2707fe55770a519db481954881c1cad26e --- /dev/null +++ b/diffusers3/pipelines/t2i_adapter/__init__.py @@ -0,0 +1,47 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["pipeline_stable_diffusion_adapter"] = ["StableDiffusionAdapterPipeline"] + _import_structure["pipeline_stable_diffusion_xl_adapter"] = ["StableDiffusionXLAdapterPipeline"] + + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 + else: + from .pipeline_stable_diffusion_adapter import StableDiffusionAdapterPipeline + from .pipeline_stable_diffusion_xl_adapter import StableDiffusionXLAdapterPipeline +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/diffusers3/pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py b/diffusers3/pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py new file mode 100644 index 0000000000000000000000000000000000000000..3cb7c26bb6a2a411f803a647b4eccbabf331c4e6 --- /dev/null +++ b/diffusers3/pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py @@ -0,0 +1,942 @@ +# Copyright 2024 TencentARC and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from dataclasses import dataclass +from typing import Any, Callable, Dict, List, Optional, Union + +import numpy as np +import PIL.Image +import torch +from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer + +from ...image_processor import VaeImageProcessor +from ...loaders import StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin +from ...models import AutoencoderKL, MultiAdapter, T2IAdapter, UNet2DConditionModel +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import ( + PIL_INTERPOLATION, + USE_PEFT_BACKEND, + BaseOutput, + deprecate, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker + + +@dataclass +class StableDiffusionAdapterPipelineOutput(BaseOutput): + """ + Args: + images (`List[PIL.Image.Image]` or `np.ndarray`) + List of denoised PIL images of length `batch_size` or numpy array of shape `(batch_size, height, width, + num_channels)`. PIL images or numpy array present the denoised images of the diffusion pipeline. + nsfw_content_detected (`List[bool]`) + List of flags denoting whether the corresponding generated image likely represents "not-safe-for-work" + (nsfw) content, or `None` if safety checking could not be performed. + """ + + images: Union[List[PIL.Image.Image], np.ndarray] + nsfw_content_detected: Optional[List[bool]] + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> from PIL import Image + >>> from diffusers.utils import load_image + >>> import torch + >>> from diffusers import StableDiffusionAdapterPipeline, T2IAdapter + + >>> image = load_image( + ... "https://huggingface.co/datasets/diffusers/docs-images/resolve/main/t2i-adapter/color_ref.png" + ... ) + + >>> color_palette = image.resize((8, 8)) + >>> color_palette = color_palette.resize((512, 512), resample=Image.Resampling.NEAREST) + + >>> adapter = T2IAdapter.from_pretrained("TencentARC/t2iadapter_color_sd14v1", torch_dtype=torch.float16) + >>> pipe = StableDiffusionAdapterPipeline.from_pretrained( + ... "CompVis/stable-diffusion-v1-4", + ... adapter=adapter, + ... torch_dtype=torch.float16, + ... ) + + >>> pipe.to("cuda") + + >>> out_image = pipe( + ... "At night, glowing cubes in front of the beach", + ... image=color_palette, + ... ).images[0] + ``` +""" + + +def _preprocess_adapter_image(image, height, width): + if isinstance(image, torch.Tensor): + return image + elif isinstance(image, PIL.Image.Image): + image = [image] + + if isinstance(image[0], PIL.Image.Image): + image = [np.array(i.resize((width, height), resample=PIL_INTERPOLATION["lanczos"])) for i in image] + image = [ + i[None, ..., None] if i.ndim == 2 else i[None, ...] for i in image + ] # expand [h, w] or [h, w, c] to [b, h, w, c] + image = np.concatenate(image, axis=0) + image = np.array(image).astype(np.float32) / 255.0 + image = image.transpose(0, 3, 1, 2) + image = torch.from_numpy(image) + elif isinstance(image[0], torch.Tensor): + if image[0].ndim == 3: + image = torch.stack(image, dim=0) + elif image[0].ndim == 4: + image = torch.cat(image, dim=0) + else: + raise ValueError( + f"Invalid image tensor! Expecting image tensor with 3 or 4 dimension, but recive: {image[0].ndim}" + ) + return image + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + sigmas: Optional[List[float]] = None, + **kwargs, +): + """ + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles + custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` + must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, + `num_inference_steps` and `sigmas` must be `None`. + sigmas (`List[float]`, *optional*): + Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, + `num_inference_steps` and `timesteps` must be `None`. + + Returns: + `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the + second element is the number of inference steps. + """ + if timesteps is not None and sigmas is not None: + raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") + if timesteps is not None: + accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + elif sigmas is not None: + accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accept_sigmas: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" sigmas schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + + +class StableDiffusionAdapterPipeline(DiffusionPipeline, StableDiffusionMixin): + r""" + Pipeline for text-to-image generation using Stable Diffusion augmented with T2I-Adapter + https://arxiv.org/abs/2302.08453 + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + Args: + adapter ([`T2IAdapter`] or [`MultiAdapter`] or `List[T2IAdapter]`): + Provides additional conditioning to the unet during the denoising process. If you set multiple Adapter as a + list, the outputs from each Adapter are added together to create one combined additional conditioning. + adapter_weights (`List[float]`, *optional*, defaults to None): + List of floats representing the weight which will be multiply to each adapter's output before adding them + together. + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModel`]): + Frozen text-encoder. Stable Diffusion uses the text portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically + the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. + tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details. + feature_extractor ([`CLIPImageProcessor`]): + Model that extracts features from generated images to be used as inputs for the `safety_checker`. + """ + + model_cpu_offload_seq = "text_encoder->adapter->unet->vae" + _optional_components = ["safety_checker", "feature_extractor"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: UNet2DConditionModel, + adapter: Union[T2IAdapter, MultiAdapter, List[T2IAdapter]], + scheduler: KarrasDiffusionSchedulers, + safety_checker: StableDiffusionSafetyChecker, + feature_extractor: CLIPImageProcessor, + requires_safety_checker: bool = True, + ): + super().__init__() + + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + + if safety_checker is not None and feature_extractor is None: + raise ValueError( + "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" + " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." + ) + + if isinstance(adapter, (list, tuple)): + adapter = MultiAdapter(adapter) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + adapter=adapter, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + self.register_to_config(requires_safety_checker=requires_safety_checker) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt + def _encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + **kwargs, + ): + deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." + deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) + + prompt_embeds_tuple = self.encode_prompt( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=lora_scale, + **kwargs, + ) + + # concatenate for backwards comp + prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) + + return prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if self.text_encoder is not None: + if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return prompt_embeds, negative_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is None: + has_nsfw_concept = None + else: + if torch.is_tensor(image): + feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") + else: + feature_extractor_input = self.image_processor.numpy_to_pil(image) + safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) + image, has_nsfw_concept = self.safety_checker( + images=image, clip_input=safety_checker_input.pixel_values.to(dtype) + ) + return image, has_nsfw_concept + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents + def decode_latents(self, latents): + deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" + deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) + + latents = 1 / self.vae.config.scaling_factor * latents + image = self.vae.decode(latents, return_dict=False)[0] + image = (image / 2 + 0.5).clamp(0, 1) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + return image + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + height, + width, + callback_steps, + image, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if (callback_steps is None) or ( + callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if isinstance(self.adapter, MultiAdapter): + if not isinstance(image, list): + raise ValueError( + "MultiAdapter is enabled, but `image` is not a list. Please pass a list of images to `image`." + ) + + if len(image) != len(self.adapter.adapters): + raise ValueError( + f"MultiAdapter requires passing the same number of images as adapters. Given {len(image)} images and {len(self.adapter.adapters)} adapters." + ) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = ( + batch_size, + num_channels_latents, + int(height) // self.vae_scale_factor, + int(width) // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + def _default_height_width(self, height, width, image): + # NOTE: It is possible that a list of images have different + # dimensions for each image, so just checking the first image + # is not _exactly_ correct, but it is simple. + while isinstance(image, list): + image = image[0] + + if height is None: + if isinstance(image, PIL.Image.Image): + height = image.height + elif isinstance(image, torch.Tensor): + height = image.shape[-2] + + # round down to nearest multiple of `self.adapter.downscale_factor` + height = (height // self.adapter.downscale_factor) * self.adapter.downscale_factor + + if width is None: + if isinstance(image, PIL.Image.Image): + width = image.width + elif isinstance(image, torch.Tensor): + width = image.shape[-1] + + # round down to nearest multiple of `self.adapter.downscale_factor` + width = (width // self.adapter.downscale_factor) * self.adapter.downscale_factor + + return height, width + + # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding + def get_guidance_scale_embedding( + self, w: torch.Tensor, embedding_dim: int = 512, dtype: torch.dtype = torch.float32 + ) -> torch.Tensor: + """ + See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 + + Args: + w (`torch.Tensor`): + Generate embedding vectors with a specified guidance scale to subsequently enrich timestep embeddings. + embedding_dim (`int`, *optional*, defaults to 512): + Dimension of the embeddings to generate. + dtype (`torch.dtype`, *optional*, defaults to `torch.float32`): + Data type of the generated embeddings. + + Returns: + `torch.Tensor`: Embedding vectors with shape `(len(w), embedding_dim)`. + """ + assert len(w.shape) == 1 + w = w * 1000.0 + + half_dim = embedding_dim // 2 + emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) + emb = w.to(dtype)[:, None] * emb[None, :] + emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) + if embedding_dim % 2 == 1: # zero pad + emb = torch.nn.functional.pad(emb, (0, 1)) + assert emb.shape == (w.shape[0], embedding_dim) + return emb + + @property + def guidance_scale(self): + return self._guidance_scale + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + image: Union[torch.Tensor, PIL.Image.Image, List[PIL.Image.Image]] = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + timesteps: List[int] = None, + sigmas: List[float] = None, + guidance_scale: float = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, + callback_steps: int = 1, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + adapter_conditioning_scale: Union[float, List[float]] = 1.0, + clip_skip: Optional[int] = None, + ): + r""" + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + image (`torch.Tensor`, `PIL.Image.Image`, `List[torch.Tensor]` or `List[PIL.Image.Image]` or `List[List[PIL.Image.Image]]`): + The Adapter input condition. Adapter uses this input condition to generate guidance to Unet. If the + type is specified as `torch.Tensor`, it is passed to Adapter as is. PIL.Image.Image` can also be + accepted as an image. The control image is automatically resized to fit the output image. + height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument + in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is + passed will be used. Must be in descending order. + sigmas (`List[float]`, *optional*): + Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in + their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed + will be used. + guidance_scale (`float`, *optional*, defaults to 7.5): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead. + Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to + [`schedulers.DDIMScheduler`], will be ignored for others. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionAdapterPipelineOutput`] instead + of a plain tuple. + callback (`Callable`, *optional*): + A function that will be called every `callback_steps` steps during inference. The function will be + called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function will be called. If not specified, the callback will be + called at every step. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttnProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + adapter_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0): + The outputs of the adapter are multiplied by `adapter_conditioning_scale` before they are added to the + residual in the original unet. If multiple adapters are specified in init, you can set the + corresponding scale as a list. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + Examples: + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionAdapterPipelineOutput`] or `tuple`: + [`~pipelines.stable_diffusion.StableDiffusionAdapterPipelineOutput`] if `return_dict` is True, otherwise a + `tuple. When returning a tuple, the first element is a list with the generated images, and the second + element is a list of `bool`s denoting whether the corresponding generated image likely represents + "not-safe-for-work" (nsfw) content, according to the `safety_checker`. + """ + # 0. Default height and width to unet + height, width = self._default_height_width(height, width, image) + device = self._execution_device + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, height, width, callback_steps, image, negative_prompt, prompt_embeds, negative_prompt_embeds + ) + + self._guidance_scale = guidance_scale + + if isinstance(self.adapter, MultiAdapter): + adapter_input = [] + + for one_image in image: + one_image = _preprocess_adapter_image(one_image, height, width) + one_image = one_image.to(device=device, dtype=self.adapter.dtype) + adapter_input.append(one_image) + else: + adapter_input = _preprocess_adapter_image(image, height, width) + adapter_input = adapter_input.to(device=device, dtype=self.adapter.dtype) + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + # 3. Encode input prompt + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + device, + num_images_per_prompt, + self.do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + clip_skip=clip_skip, + ) + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + if self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + # 4. Prepare timesteps + timesteps, num_inference_steps = retrieve_timesteps( + self.scheduler, num_inference_steps, device, timesteps, sigmas + ) + + # 5. Prepare latent variables + num_channels_latents = self.unet.config.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 6.5 Optionally get Guidance Scale Embedding + timestep_cond = None + if self.unet.config.time_cond_proj_dim is not None: + guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) + timestep_cond = self.get_guidance_scale_embedding( + guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim + ).to(device=device, dtype=latents.dtype) + + # 7. Denoising loop + if isinstance(self.adapter, MultiAdapter): + adapter_state = self.adapter(adapter_input, adapter_conditioning_scale) + for k, v in enumerate(adapter_state): + adapter_state[k] = v + else: + adapter_state = self.adapter(adapter_input) + for k, v in enumerate(adapter_state): + adapter_state[k] = v * adapter_conditioning_scale + if num_images_per_prompt > 1: + for k, v in enumerate(adapter_state): + adapter_state[k] = v.repeat(num_images_per_prompt, 1, 1, 1) + if self.do_classifier_free_guidance: + for k, v in enumerate(adapter_state): + adapter_state[k] = torch.cat([v] * 2, dim=0) + + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + timestep_cond=timestep_cond, + cross_attention_kwargs=cross_attention_kwargs, + down_intrablock_additional_residuals=[state.clone() for state in adapter_state], + return_dict=False, + )[0] + + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + if output_type == "latent": + image = latents + has_nsfw_concept = None + elif output_type == "pil": + # 8. Post-processing + image = self.decode_latents(latents) + + # 9. Run safety checker + image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) + + # 10. Convert to PIL + image = self.numpy_to_pil(image) + else: + # 8. Post-processing + image = self.decode_latents(latents) + + # 9. Run safety checker + image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image, has_nsfw_concept) + + return StableDiffusionAdapterPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) diff --git a/diffusers3/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py b/diffusers3/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py new file mode 100644 index 0000000000000000000000000000000000000000..0ea197e42e6212785355512692e2b098776c6cbc --- /dev/null +++ b/diffusers3/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py @@ -0,0 +1,1277 @@ +# Copyright 2024 TencentARC and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import numpy as np +import PIL.Image +import torch +from transformers import ( + CLIPImageProcessor, + CLIPTextModel, + CLIPTextModelWithProjection, + CLIPTokenizer, + CLIPVisionModelWithProjection, +) + +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import ( + FromSingleFileMixin, + IPAdapterMixin, + StableDiffusionXLLoraLoaderMixin, + TextualInversionLoaderMixin, +) +from ...models import AutoencoderKL, ImageProjection, MultiAdapter, T2IAdapter, UNet2DConditionModel +from ...models.attention_processor import ( + AttnProcessor2_0, + XFormersAttnProcessor, +) +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import ( + PIL_INTERPOLATION, + USE_PEFT_BACKEND, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from ..stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import T2IAdapter, StableDiffusionXLAdapterPipeline, DDPMScheduler + >>> from diffusers.utils import load_image + + >>> sketch_image = load_image("https://huggingface.co/Adapter/t2iadapter/resolve/main/sketch.png").convert("L") + + >>> model_id = "stabilityai/stable-diffusion-xl-base-1.0" + + >>> adapter = T2IAdapter.from_pretrained( + ... "Adapter/t2iadapter", + ... subfolder="sketch_sdxl_1.0", + ... torch_dtype=torch.float16, + ... adapter_type="full_adapter_xl", + ... ) + >>> scheduler = DDPMScheduler.from_pretrained(model_id, subfolder="scheduler") + + >>> pipe = StableDiffusionXLAdapterPipeline.from_pretrained( + ... model_id, adapter=adapter, torch_dtype=torch.float16, variant="fp16", scheduler=scheduler + ... ).to("cuda") + + >>> generator = torch.manual_seed(42) + >>> sketch_image_out = pipe( + ... prompt="a photo of a dog in real world, high quality", + ... negative_prompt="extra digit, fewer digits, cropped, worst quality, low quality", + ... image=sketch_image, + ... generator=generator, + ... guidance_scale=7.5, + ... ).images[0] + ``` +""" + + +def _preprocess_adapter_image(image, height, width): + if isinstance(image, torch.Tensor): + return image + elif isinstance(image, PIL.Image.Image): + image = [image] + + if isinstance(image[0], PIL.Image.Image): + image = [np.array(i.resize((width, height), resample=PIL_INTERPOLATION["lanczos"])) for i in image] + image = [ + i[None, ..., None] if i.ndim == 2 else i[None, ...] for i in image + ] # expand [h, w] or [h, w, c] to [b, h, w, c] + image = np.concatenate(image, axis=0) + image = np.array(image).astype(np.float32) / 255.0 + image = image.transpose(0, 3, 1, 2) + image = torch.from_numpy(image) + elif isinstance(image[0], torch.Tensor): + if image[0].ndim == 3: + image = torch.stack(image, dim=0) + elif image[0].ndim == 4: + image = torch.cat(image, dim=0) + else: + raise ValueError( + f"Invalid image tensor! Expecting image tensor with 3 or 4 dimension, but recive: {image[0].ndim}" + ) + return image + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg +def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): + """ + Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4 + """ + std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) + std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) + # rescale the results from guidance (fixes overexposure) + noise_pred_rescaled = noise_cfg * (std_text / std_cfg) + # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images + noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg + return noise_cfg + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + sigmas: Optional[List[float]] = None, + **kwargs, +): + """ + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles + custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` + must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, + `num_inference_steps` and `sigmas` must be `None`. + sigmas (`List[float]`, *optional*): + Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, + `num_inference_steps` and `timesteps` must be `None`. + + Returns: + `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the + second element is the number of inference steps. + """ + if timesteps is not None and sigmas is not None: + raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") + if timesteps is not None: + accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + elif sigmas is not None: + accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accept_sigmas: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" sigmas schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + + +class StableDiffusionXLAdapterPipeline( + DiffusionPipeline, + StableDiffusionMixin, + TextualInversionLoaderMixin, + StableDiffusionXLLoraLoaderMixin, + IPAdapterMixin, + FromSingleFileMixin, +): + r""" + Pipeline for text-to-image generation using Stable Diffusion augmented with T2I-Adapter + https://arxiv.org/abs/2302.08453 + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files + - [`~loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`] for saving LoRA weights + - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters + + Args: + adapter ([`T2IAdapter`] or [`MultiAdapter`] or `List[T2IAdapter]`): + Provides additional conditioning to the unet during the denoising process. If you set multiple Adapter as a + list, the outputs from each Adapter are added together to create one combined additional conditioning. + adapter_weights (`List[float]`, *optional*, defaults to None): + List of floats representing the weight which will be multiply to each adapter's output before adding them + together. + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModel`]): + Frozen text-encoder. Stable Diffusion uses the text portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically + the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. + tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details. + feature_extractor ([`CLIPImageProcessor`]): + Model that extracts features from generated images to be used as inputs for the `safety_checker`. + """ + + model_cpu_offload_seq = "text_encoder->text_encoder_2->image_encoder->unet->vae" + _optional_components = [ + "tokenizer", + "tokenizer_2", + "text_encoder", + "text_encoder_2", + "feature_extractor", + "image_encoder", + ] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + text_encoder_2: CLIPTextModelWithProjection, + tokenizer: CLIPTokenizer, + tokenizer_2: CLIPTokenizer, + unet: UNet2DConditionModel, + adapter: Union[T2IAdapter, MultiAdapter, List[T2IAdapter]], + scheduler: KarrasDiffusionSchedulers, + force_zeros_for_empty_prompt: bool = True, + feature_extractor: CLIPImageProcessor = None, + image_encoder: CLIPVisionModelWithProjection = None, + ): + super().__init__() + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + text_encoder_2=text_encoder_2, + tokenizer=tokenizer, + tokenizer_2=tokenizer_2, + unet=unet, + adapter=adapter, + scheduler=scheduler, + feature_extractor=feature_extractor, + image_encoder=image_encoder, + ) + self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + self.default_sample_size = self.unet.config.sample_size + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.encode_prompt + def encode_prompt( + self, + prompt: str, + prompt_2: Optional[str] = None, + device: Optional[torch.device] = None, + num_images_per_prompt: int = 1, + do_classifier_free_guidance: bool = True, + negative_prompt: Optional[str] = None, + negative_prompt_2: Optional[str] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + pooled_prompt_embeds: Optional[torch.Tensor] = None, + negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in both text-encoders + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + lora_scale (`float`, *optional*): + A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + device = device or self._execution_device + + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if self.text_encoder is not None: + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale) + else: + scale_lora_layers(self.text_encoder_2, lora_scale) + + prompt = [prompt] if isinstance(prompt, str) else prompt + + if prompt is not None: + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + # Define tokenizers and text encoders + tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] + text_encoders = ( + [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] + ) + + if prompt_embeds is None: + prompt_2 = prompt_2 or prompt + prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 + + # textual inversion: process multi-vector tokens if necessary + prompt_embeds_list = [] + prompts = [prompt, prompt_2] + for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders): + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, tokenizer) + + text_inputs = tokenizer( + prompt, + padding="max_length", + max_length=tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + + text_input_ids = text_inputs.input_ids + untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {tokenizer.model_max_length} tokens: {removed_text}" + ) + + prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) + + # We are only ALWAYS interested in the pooled output of the final text encoder + pooled_prompt_embeds = prompt_embeds[0] + if clip_skip is None: + prompt_embeds = prompt_embeds.hidden_states[-2] + else: + # "2" because SDXL always indexes from the penultimate layer. + prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)] + + prompt_embeds_list.append(prompt_embeds) + + prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) + + # get unconditional embeddings for classifier free guidance + zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt + if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: + negative_prompt_embeds = torch.zeros_like(prompt_embeds) + negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) + elif do_classifier_free_guidance and negative_prompt_embeds is None: + negative_prompt = negative_prompt or "" + negative_prompt_2 = negative_prompt_2 or negative_prompt + + # normalize str to list + negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt + negative_prompt_2 = ( + batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 + ) + + uncond_tokens: List[str] + if prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = [negative_prompt, negative_prompt_2] + + negative_prompt_embeds_list = [] + for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders): + if isinstance(self, TextualInversionLoaderMixin): + negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = tokenizer( + negative_prompt, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + negative_prompt_embeds = text_encoder( + uncond_input.input_ids.to(device), + output_hidden_states=True, + ) + # We are only ALWAYS interested in the pooled output of the final text encoder + negative_pooled_prompt_embeds = negative_prompt_embeds[0] + negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] + + negative_prompt_embeds_list.append(negative_prompt_embeds) + + negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) + + if self.text_encoder_2 is not None: + prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) + else: + prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + if self.text_encoder_2 is not None: + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) + else: + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed * num_images_per_prompt, -1 + ) + if do_classifier_free_guidance: + negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed * num_images_per_prompt, -1 + ) + + if self.text_encoder is not None: + if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder_2, lora_scale) + + return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image + def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + if output_hidden_states: + image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] + image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_enc_hidden_states = self.image_encoder( + torch.zeros_like(image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( + num_images_per_prompt, dim=0 + ) + return image_enc_hidden_states, uncond_image_enc_hidden_states + else: + image_embeds = self.image_encoder(image).image_embeds + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + uncond_image_embeds = torch.zeros_like(image_embeds) + + return image_embeds, uncond_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds + def prepare_ip_adapter_image_embeds( + self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance + ): + image_embeds = [] + if do_classifier_free_guidance: + negative_image_embeds = [] + if ip_adapter_image_embeds is None: + if not isinstance(ip_adapter_image, list): + ip_adapter_image = [ip_adapter_image] + + if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): + raise ValueError( + f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." + ) + + for single_ip_adapter_image, image_proj_layer in zip( + ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers + ): + output_hidden_state = not isinstance(image_proj_layer, ImageProjection) + single_image_embeds, single_negative_image_embeds = self.encode_image( + single_ip_adapter_image, device, 1, output_hidden_state + ) + + image_embeds.append(single_image_embeds[None, :]) + if do_classifier_free_guidance: + negative_image_embeds.append(single_negative_image_embeds[None, :]) + else: + for single_image_embeds in ip_adapter_image_embeds: + if do_classifier_free_guidance: + single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2) + negative_image_embeds.append(single_negative_image_embeds) + image_embeds.append(single_image_embeds) + + ip_adapter_image_embeds = [] + for i, single_image_embeds in enumerate(image_embeds): + single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) + if do_classifier_free_guidance: + single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) + single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) + + single_image_embeds = single_image_embeds.to(device=device) + ip_adapter_image_embeds.append(single_image_embeds) + + return ip_adapter_image_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.check_inputs + def check_inputs( + self, + prompt, + prompt_2, + height, + width, + callback_steps, + negative_prompt=None, + negative_prompt_2=None, + prompt_embeds=None, + negative_prompt_embeds=None, + pooled_prompt_embeds=None, + negative_pooled_prompt_embeds=None, + ip_adapter_image=None, + ip_adapter_image_embeds=None, + callback_on_step_end_tensor_inputs=None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt_2 is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)): + raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + elif negative_prompt_2 is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if prompt_embeds is not None and pooled_prompt_embeds is None: + raise ValueError( + "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`." + ) + + if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: + raise ValueError( + "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`." + ) + + if ip_adapter_image is not None and ip_adapter_image_embeds is not None: + raise ValueError( + "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined." + ) + + if ip_adapter_image_embeds is not None: + if not isinstance(ip_adapter_image_embeds, list): + raise ValueError( + f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}" + ) + elif ip_adapter_image_embeds[0].ndim not in [3, 4]: + raise ValueError( + f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D" + ) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = ( + batch_size, + num_channels_latents, + int(height) // self.vae_scale_factor, + int(width) // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline._get_add_time_ids + def _get_add_time_ids( + self, original_size, crops_coords_top_left, target_size, dtype, text_encoder_projection_dim=None + ): + add_time_ids = list(original_size + crops_coords_top_left + target_size) + + passed_add_embed_dim = ( + self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim + ) + expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features + + if expected_add_embed_dim != passed_add_embed_dim: + raise ValueError( + f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`." + ) + + add_time_ids = torch.tensor([add_time_ids], dtype=dtype) + return add_time_ids + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae + def upcast_vae(self): + dtype = self.vae.dtype + self.vae.to(dtype=torch.float32) + use_torch_2_0_or_xformers = isinstance( + self.vae.decoder.mid_block.attentions[0].processor, + ( + AttnProcessor2_0, + XFormersAttnProcessor, + ), + ) + # if xformers or torch_2_0 is used attention block does not need + # to be in float32 which can save lots of memory + if use_torch_2_0_or_xformers: + self.vae.post_quant_conv.to(dtype) + self.vae.decoder.conv_in.to(dtype) + self.vae.decoder.mid_block.to(dtype) + + # Copied from diffusers.pipelines.t2i_adapter.pipeline_stable_diffusion_adapter.StableDiffusionAdapterPipeline._default_height_width + def _default_height_width(self, height, width, image): + # NOTE: It is possible that a list of images have different + # dimensions for each image, so just checking the first image + # is not _exactly_ correct, but it is simple. + while isinstance(image, list): + image = image[0] + + if height is None: + if isinstance(image, PIL.Image.Image): + height = image.height + elif isinstance(image, torch.Tensor): + height = image.shape[-2] + + # round down to nearest multiple of `self.adapter.downscale_factor` + height = (height // self.adapter.downscale_factor) * self.adapter.downscale_factor + + if width is None: + if isinstance(image, PIL.Image.Image): + width = image.width + elif isinstance(image, torch.Tensor): + width = image.shape[-1] + + # round down to nearest multiple of `self.adapter.downscale_factor` + width = (width // self.adapter.downscale_factor) * self.adapter.downscale_factor + + return height, width + + # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding + def get_guidance_scale_embedding( + self, w: torch.Tensor, embedding_dim: int = 512, dtype: torch.dtype = torch.float32 + ) -> torch.Tensor: + """ + See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 + + Args: + w (`torch.Tensor`): + Generate embedding vectors with a specified guidance scale to subsequently enrich timestep embeddings. + embedding_dim (`int`, *optional*, defaults to 512): + Dimension of the embeddings to generate. + dtype (`torch.dtype`, *optional*, defaults to `torch.float32`): + Data type of the generated embeddings. + + Returns: + `torch.Tensor`: Embedding vectors with shape `(len(w), embedding_dim)`. + """ + assert len(w.shape) == 1 + w = w * 1000.0 + + half_dim = embedding_dim // 2 + emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) + emb = w.to(dtype)[:, None] * emb[None, :] + emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) + if embedding_dim % 2 == 1: # zero pad + emb = torch.nn.functional.pad(emb, (0, 1)) + assert emb.shape == (w.shape[0], embedding_dim) + return emb + + @property + def guidance_scale(self): + return self._guidance_scale + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + prompt_2: Optional[Union[str, List[str]]] = None, + image: PipelineImageInput = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + timesteps: List[int] = None, + sigmas: List[float] = None, + denoising_end: Optional[float] = None, + guidance_scale: float = 5.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + negative_prompt_2: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + pooled_prompt_embeds: Optional[torch.Tensor] = None, + negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, + callback_steps: int = 1, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + guidance_rescale: float = 0.0, + original_size: Optional[Tuple[int, int]] = None, + crops_coords_top_left: Tuple[int, int] = (0, 0), + target_size: Optional[Tuple[int, int]] = None, + negative_original_size: Optional[Tuple[int, int]] = None, + negative_crops_coords_top_left: Tuple[int, int] = (0, 0), + negative_target_size: Optional[Tuple[int, int]] = None, + adapter_conditioning_scale: Union[float, List[float]] = 1.0, + adapter_conditioning_factor: float = 1.0, + clip_skip: Optional[int] = None, + ): + r""" + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in both text-encoders + image (`torch.Tensor`, `PIL.Image.Image`, `List[torch.Tensor]` or `List[PIL.Image.Image]` or `List[List[PIL.Image.Image]]`): + The Adapter input condition. Adapter uses this input condition to generate guidance to Unet. If the + type is specified as `torch.Tensor`, it is passed to Adapter as is. PIL.Image.Image` can also be + accepted as an image. The control image is automatically resized to fit the output image. + height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The height in pixels of the generated image. Anything below 512 pixels won't work well for + [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) + and checkpoints that are not specifically fine-tuned on low resolutions. + width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The width in pixels of the generated image. Anything below 512 pixels won't work well for + [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) + and checkpoints that are not specifically fine-tuned on low resolutions. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument + in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is + passed will be used. Must be in descending order. + sigmas (`List[float]`, *optional*): + Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in + their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed + will be used. + denoising_end (`float`, *optional*): + When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be + completed before it is intentionally prematurely terminated. As a result, the returned sample will + still retain a substantial amount of noise as determined by the discrete timesteps selected by the + scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a + "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image + Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output) + guidance_scale (`float`, *optional*, defaults to 5.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to + [`schedulers.DDIMScheduler`], will be ignored for others. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. + ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*): + Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of + IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should + contain the negative image embedding if `do_classifier_free_guidance` is set to `True`. If not + provided, embeddings are computed from the `ip_adapter_image` input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionAdapterPipelineOutput`] + instead of a plain tuple. + callback (`Callable`, *optional*): + A function that will be called every `callback_steps` steps during inference. The function will be + called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function will be called. If not specified, the callback will be + called at every step. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + guidance_rescale (`float`, *optional*, defaults to 0.0): + Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are + Flawed](https://arxiv.org/pdf/2305.08891.pdf) `guidance_scale` is defined as `ฯ†` in equation 16. of + [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). + Guidance rescale factor should fix overexposure when using zero terminal SNR. + original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled. + `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as + explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position + `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting + `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + For most cases, `target_size` should be set to the desired height and width of the generated image. If + not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in + section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + To negatively condition the generation process based on a specific image resolution. Part of SDXL's + micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's + micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + To negatively condition the generation process based on a target image resolution. It should be as same + as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more + information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. + adapter_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0): + The outputs of the adapter are multiplied by `adapter_conditioning_scale` before they are added to the + residual in the original unet. If multiple adapters are specified in init, you can set the + corresponding scale as a list. + adapter_conditioning_factor (`float`, *optional*, defaults to 1.0): + The fraction of timesteps for which adapter should be applied. If `adapter_conditioning_factor` is + `0.0`, adapter is not applied at all. If `adapter_conditioning_factor` is `1.0`, adapter is applied for + all timesteps. If `adapter_conditioning_factor` is `0.5`, adapter is applied for half of the timesteps. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + + Examples: + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionAdapterPipelineOutput`] or `tuple`: + [`~pipelines.stable_diffusion.StableDiffusionAdapterPipelineOutput`] if `return_dict` is True, otherwise a + `tuple`. When returning a tuple, the first element is a list with the generated images. + """ + # 0. Default height and width to unet + + height, width = self._default_height_width(height, width, image) + device = self._execution_device + + if isinstance(self.adapter, MultiAdapter): + adapter_input = [] + + for one_image in image: + one_image = _preprocess_adapter_image(one_image, height, width) + one_image = one_image.to(device=device, dtype=self.adapter.dtype) + adapter_input.append(one_image) + else: + adapter_input = _preprocess_adapter_image(image, height, width) + adapter_input = adapter_input.to(device=device, dtype=self.adapter.dtype) + original_size = original_size or (height, width) + target_size = target_size or (height, width) + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + prompt_2, + height, + width, + callback_steps, + negative_prompt, + negative_prompt_2, + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + ip_adapter_image, + ip_adapter_image_embeds, + ) + + self._guidance_scale = guidance_scale + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + # 3.1 Encode input prompt + ( + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + ) = self.encode_prompt( + prompt=prompt, + prompt_2=prompt_2, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=self.do_classifier_free_guidance, + negative_prompt=negative_prompt, + negative_prompt_2=negative_prompt_2, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, + clip_skip=clip_skip, + ) + + # 3.2 Encode ip_adapter_image + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + image_embeds = self.prepare_ip_adapter_image_embeds( + ip_adapter_image, + ip_adapter_image_embeds, + device, + batch_size * num_images_per_prompt, + self.do_classifier_free_guidance, + ) + + # 4. Prepare timesteps + timesteps, num_inference_steps = retrieve_timesteps( + self.scheduler, num_inference_steps, device, timesteps, sigmas + ) + + # 5. Prepare latent variables + num_channels_latents = self.unet.config.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 6.1 Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 6.2 Optionally get Guidance Scale Embedding + timestep_cond = None + if self.unet.config.time_cond_proj_dim is not None: + guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) + timestep_cond = self.get_guidance_scale_embedding( + guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim + ).to(device=device, dtype=latents.dtype) + + # 7. Prepare added time ids & embeddings & adapter features + if isinstance(self.adapter, MultiAdapter): + adapter_state = self.adapter(adapter_input, adapter_conditioning_scale) + for k, v in enumerate(adapter_state): + adapter_state[k] = v + else: + adapter_state = self.adapter(adapter_input) + for k, v in enumerate(adapter_state): + adapter_state[k] = v * adapter_conditioning_scale + if num_images_per_prompt > 1: + for k, v in enumerate(adapter_state): + adapter_state[k] = v.repeat(num_images_per_prompt, 1, 1, 1) + if self.do_classifier_free_guidance: + for k, v in enumerate(adapter_state): + adapter_state[k] = torch.cat([v] * 2, dim=0) + + add_text_embeds = pooled_prompt_embeds + if self.text_encoder_2 is None: + text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) + else: + text_encoder_projection_dim = self.text_encoder_2.config.projection_dim + + add_time_ids = self._get_add_time_ids( + original_size, + crops_coords_top_left, + target_size, + dtype=prompt_embeds.dtype, + text_encoder_projection_dim=text_encoder_projection_dim, + ) + if negative_original_size is not None and negative_target_size is not None: + negative_add_time_ids = self._get_add_time_ids( + negative_original_size, + negative_crops_coords_top_left, + negative_target_size, + dtype=prompt_embeds.dtype, + text_encoder_projection_dim=text_encoder_projection_dim, + ) + else: + negative_add_time_ids = add_time_ids + + if self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) + add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) + add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0) + + prompt_embeds = prompt_embeds.to(device) + add_text_embeds = add_text_embeds.to(device) + add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1) + + # 8. Denoising loop + num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) + # Apply denoising_end + if denoising_end is not None and isinstance(denoising_end, float) and denoising_end > 0 and denoising_end < 1: + discrete_timestep_cutoff = int( + round( + self.scheduler.config.num_train_timesteps + - (denoising_end * self.scheduler.config.num_train_timesteps) + ) + ) + num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps))) + timesteps = timesteps[:num_inference_steps] + + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents + + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} + + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + added_cond_kwargs["image_embeds"] = image_embeds + + # predict the noise residual + if i < int(num_inference_steps * adapter_conditioning_factor): + down_intrablock_additional_residuals = [state.clone() for state in adapter_state] + else: + down_intrablock_additional_residuals = None + + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + timestep_cond=timestep_cond, + cross_attention_kwargs=cross_attention_kwargs, + down_intrablock_additional_residuals=down_intrablock_additional_residuals, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + if self.do_classifier_free_guidance and guidance_rescale > 0.0: + # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf + noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + if not output_type == "latent": + # make sure the VAE is in float32 mode, as it overflows in float16 + needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast + + if needs_upcasting: + self.upcast_vae() + latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) + + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + + # cast back to fp16 if needed + if needs_upcasting: + self.vae.to(dtype=torch.float16) + else: + image = latents + return StableDiffusionXLPipelineOutput(images=image) + + image = self.image_processor.postprocess(image, output_type=output_type) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return StableDiffusionXLPipelineOutput(images=image) diff --git a/diffusers3/pipelines/test/.ipynb_checkpoints/Untitled-checkpoint.ipynb b/diffusers3/pipelines/test/.ipynb_checkpoints/Untitled-checkpoint.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..363fcab7ed6e9634e198cf5555ceb88932c9a245 --- /dev/null +++ b/diffusers3/pipelines/test/.ipynb_checkpoints/Untitled-checkpoint.ipynb @@ -0,0 +1,6 @@ +{ + "cells": [], + "metadata": {}, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/diffusers3/pipelines/test/.ipynb_checkpoints/__init__-checkpoint.py b/diffusers3/pipelines/test/.ipynb_checkpoints/__init__-checkpoint.py new file mode 100644 index 0000000000000000000000000000000000000000..363fcab7ed6e9634e198cf5555ceb88932c9a245 --- /dev/null +++ b/diffusers3/pipelines/test/.ipynb_checkpoints/__init__-checkpoint.py @@ -0,0 +1,6 @@ +{ + "cells": [], + "metadata": {}, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/diffusers3/pipelines/test/.ipynb_checkpoints/add-checkpoint.py b/diffusers3/pipelines/test/.ipynb_checkpoints/add-checkpoint.py new file mode 100644 index 0000000000000000000000000000000000000000..dba8e5810e038b7b4487a7b0e1374f7a0994aa8d --- /dev/null +++ b/diffusers3/pipelines/test/.ipynb_checkpoints/add-checkpoint.py @@ -0,0 +1,5 @@ +from add2 import add2_test + +class return_add(): + def add(a,b): + return add2_test(a,b) \ No newline at end of file diff --git a/diffusers3/pipelines/test/.ipynb_checkpoints/add2-checkpoint.py b/diffusers3/pipelines/test/.ipynb_checkpoints/add2-checkpoint.py new file mode 100644 index 0000000000000000000000000000000000000000..f9743ef5f0c29971debfe1fd9ae021aff6bf4308 --- /dev/null +++ b/diffusers3/pipelines/test/.ipynb_checkpoints/add2-checkpoint.py @@ -0,0 +1,3 @@ +class add2_test(): + def add2(a,b): + return a+b \ No newline at end of file diff --git a/diffusers3/pipelines/test/Untitled.ipynb b/diffusers3/pipelines/test/Untitled.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..9935e2086e9b921972f8dbd7a9d392db6c80349d --- /dev/null +++ b/diffusers3/pipelines/test/Untitled.ipynb @@ -0,0 +1,181 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 2, + "id": "17fc7601-fd75-4f07-a511-888e9a3d6dea", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "from add import add_test" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "aa4e54af-cedb-4305-8b11-448587eb79a3", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "data": { + "text/plain": [ + "3" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "add_test.add(1,2)" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "e23f3ef3-39b6-4195-89ff-8fc9a520b5fc", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "ename": "ImportError", + "evalue": "attempted relative import with no known parent package", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mImportError\u001b[0m Traceback (most recent call last)", + "Cell \u001b[0;32mIn[11], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mminus\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m minus_test\n", + "\u001b[0;31mImportError\u001b[0m: attempted relative import with no known parent package" + ] + } + ], + "source": [ + "from ..minus import minus_test" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "eea8194d-d83d-41f2-88bb-6bf28d963cab", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "ename": "NameError", + "evalue": "name '__file__' is not defined", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)", + "Cell \u001b[0;32mIn[12], line 6\u001b[0m\n\u001b[1;32m 3\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01mos\u001b[39;00m\n\u001b[1;32m 5\u001b[0m \u001b[38;5;66;03m# ์ƒ์œ„ ํด๋” ๊ฒฝ๋กœ ์ถ”๊ฐ€\u001b[39;00m\n\u001b[0;32m----> 6\u001b[0m sys\u001b[38;5;241m.\u001b[39mpath\u001b[38;5;241m.\u001b[39mappend(os\u001b[38;5;241m.\u001b[39mpath\u001b[38;5;241m.\u001b[39mabspath(os\u001b[38;5;241m.\u001b[39mpath\u001b[38;5;241m.\u001b[39mjoin(os\u001b[38;5;241m.\u001b[39mpath\u001b[38;5;241m.\u001b[39mdirname(\u001b[38;5;18;43m__file__\u001b[39;49m), \u001b[38;5;124m'\u001b[39m\u001b[38;5;124m..\u001b[39m\u001b[38;5;124m'\u001b[39m)))\n\u001b[1;32m 8\u001b[0m \u001b[38;5;66;03m# minus ๋ชจ๋“ˆ ์ž„ํฌํŠธ\u001b[39;00m\n\u001b[1;32m 9\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mminus\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m minus_test\n", + "\u001b[0;31mNameError\u001b[0m: name '__file__' is not defined" + ] + } + ], + "source": [ + "# subfolder/my_script.py\n", + "import sys\n", + "import os\n", + "\n", + "# ์ƒ์œ„ ํด๋” ๊ฒฝ๋กœ ์ถ”๊ฐ€\n", + "sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))\n", + "\n", + "# minus ๋ชจ๋“ˆ ์ž„ํฌํŠธ\n", + "\n", + "# ์ด์ œ minus_test ํด๋ž˜์Šค๋ฅผ ์‚ฌ์šฉํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "5234aebf-4a5a-4c20-8cf7-ff2516fd7d90", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "ename": "ImportError", + "evalue": "attempted relative import with no known parent package", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mImportError\u001b[0m Traceback (most recent call last)", + "Cell \u001b[0;32mIn[14], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mminus\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m minus_test\n", + "\u001b[0;31mImportError\u001b[0m: attempted relative import with no known parent package" + ] + } + ], + "source": [ + "from ..minus import minus_test\n" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "08273981-2971-4d34-8bb6-8813e3ca7467", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "/root/data/diffusers/src/diffusers/pipelines/test\n" + ] + } + ], + "source": [ + "!pwd" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "id": "8729382f-c388-4159-91f4-7a0fb458a958", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "sys.path.append('C:/root/data/diffusers/src/diffusers/pipelines/test') # Windows ๊ฒฝ๋กœ ์˜ˆ์‹œ\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "508cd858-f724-4587-aece-8038128ca3be", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.12" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/diffusers3/pipelines/test/__init__.py b/diffusers3/pipelines/test/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e5c6c6e2a0a71ebafeb0d553e6adcc1210911c6f --- /dev/null +++ b/diffusers3/pipelines/test/__init__.py @@ -0,0 +1,33 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "47bd84fa-736c-4fae-84de-f7a5b32d908c", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.12" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/diffusers3/pipelines/test/add.py b/diffusers3/pipelines/test/add.py new file mode 100644 index 0000000000000000000000000000000000000000..dba8e5810e038b7b4487a7b0e1374f7a0994aa8d --- /dev/null +++ b/diffusers3/pipelines/test/add.py @@ -0,0 +1,5 @@ +from add2 import add2_test + +class return_add(): + def add(a,b): + return add2_test(a,b) \ No newline at end of file diff --git a/diffusers3/pipelines/test/add2.py b/diffusers3/pipelines/test/add2.py new file mode 100644 index 0000000000000000000000000000000000000000..f9743ef5f0c29971debfe1fd9ae021aff6bf4308 --- /dev/null +++ b/diffusers3/pipelines/test/add2.py @@ -0,0 +1,3 @@ +class add2_test(): + def add2(a,b): + return a+b \ No newline at end of file diff --git a/diffusers3/pipelines/test/subfolder/.ipynb_checkpoints/Untitled-checkpoint.ipynb b/diffusers3/pipelines/test/subfolder/.ipynb_checkpoints/Untitled-checkpoint.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..363fcab7ed6e9634e198cf5555ceb88932c9a245 --- /dev/null +++ b/diffusers3/pipelines/test/subfolder/.ipynb_checkpoints/Untitled-checkpoint.ipynb @@ -0,0 +1,6 @@ +{ + "cells": [], + "metadata": {}, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/diffusers3/pipelines/test/subfolder/.ipynb_checkpoints/__init__-checkpoint.py b/diffusers3/pipelines/test/subfolder/.ipynb_checkpoints/__init__-checkpoint.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/diffusers3/pipelines/test/subfolder/Untitled.ipynb b/diffusers3/pipelines/test/subfolder/Untitled.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..9227549e5b86851897c8e72719862f3efcc4678b --- /dev/null +++ b/diffusers3/pipelines/test/subfolder/Untitled.ipynb @@ -0,0 +1,102 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "id": "433c2abb-afe2-4b82-a4c7-20699a6223aa", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "ename": "NameError", + "evalue": "name '__file__' is not defined", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)", + "Cell \u001b[0;32mIn[1], line 6\u001b[0m\n\u001b[1;32m 3\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01mos\u001b[39;00m\n\u001b[1;32m 5\u001b[0m \u001b[38;5;66;03m# ์ƒ์œ„ ํด๋” ๊ฒฝ๋กœ ์ถ”๊ฐ€\u001b[39;00m\n\u001b[0;32m----> 6\u001b[0m sys\u001b[38;5;241m.\u001b[39mpath\u001b[38;5;241m.\u001b[39mappend(os\u001b[38;5;241m.\u001b[39mpath\u001b[38;5;241m.\u001b[39mabspath(os\u001b[38;5;241m.\u001b[39mpath\u001b[38;5;241m.\u001b[39mjoin(os\u001b[38;5;241m.\u001b[39mpath\u001b[38;5;241m.\u001b[39mdirname(\u001b[38;5;18;43m__file__\u001b[39;49m), \u001b[38;5;124m'\u001b[39m\u001b[38;5;124m..\u001b[39m\u001b[38;5;124m'\u001b[39m)))\n\u001b[1;32m 8\u001b[0m \u001b[38;5;66;03m# minus ๋ชจ๋“ˆ ์ž„ํฌํŠธ\u001b[39;00m\n\u001b[1;32m 9\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mminus\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m minus_test\n", + "\u001b[0;31mNameError\u001b[0m: name '__file__' is not defined" + ] + } + ], + "source": [ + "# subfolder/my_script.py\n", + "import sys\n", + "import os\n", + "\n", + "# ์ƒ์œ„ ํด๋” ๊ฒฝ๋กœ ์ถ”๊ฐ€\n", + "sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))\n", + "\n", + "# minus ๋ชจ๋“ˆ ์ž„ํฌํŠธ\n", + "from minus import minus_test\n", + "\n", + "# ์ด์ œ minus_test ํด๋ž˜์Šค๋ฅผ ์‚ฌ์šฉํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค\n" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "7cf01f7b-70c1-4b34-aa5d-5e3d236f1f2f", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "ename": "ModuleNotFoundError", + "evalue": "No module named 'add'", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mModuleNotFoundError\u001b[0m Traceback (most recent call last)", + "Cell \u001b[0;32mIn[1], line 10\u001b[0m\n\u001b[1;32m 6\u001b[0m sys\u001b[38;5;241m.\u001b[39mpath\u001b[38;5;241m.\u001b[39mappend(\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mC:/root/data/diffusers/src/diffusers/pipelines/test\u001b[39m\u001b[38;5;124m'\u001b[39m) \u001b[38;5;66;03m# Windows ๊ฒฝ๋กœ ์˜ˆ์‹œ\u001b[39;00m\n\u001b[1;32m 7\u001b[0m \u001b[38;5;66;03m# sys.path.append('/Users/user/Documents/my_project') # macOS/Linux ๊ฒฝ๋กœ ์˜ˆ์‹œ\u001b[39;00m\n\u001b[1;32m 8\u001b[0m \n\u001b[1;32m 9\u001b[0m \u001b[38;5;66;03m# minus ๋ชจ๋“ˆ ์ž„ํฌํŠธ\u001b[39;00m\n\u001b[0;32m---> 10\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01madd\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m add_test\n\u001b[1;32m 12\u001b[0m \u001b[38;5;66;03m# ์ด์ œ minus_test ํด๋ž˜์Šค๋ฅผ ์‚ฌ์šฉํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค\u001b[39;00m\n", + "\u001b[0;31mModuleNotFoundError\u001b[0m: No module named 'add'" + ] + } + ], + "source": [ + "# subfolder/my_script.py\n", + "import sys\n", + "import os\n", + "\n", + "# ์ง์ ‘ ์ƒ์œ„ ํด๋” ๊ฒฝ๋กœ ์„ค์ •\n", + "sys.path.append('C:/root/data/diffusers/src/diffusers/pipelines/test') # Windows ๊ฒฝ๋กœ ์˜ˆ์‹œ\n", + "# sys.path.append('/Users/user/Documents/my_project') # macOS/Linux ๊ฒฝ๋กœ ์˜ˆ์‹œ\n", + "\n", + "# minus ๋ชจ๋“ˆ ์ž„ํฌํŠธ\n", + "from add import add_test\n", + "\n", + "# ์ด์ œ minus_test ํด๋ž˜์Šค๋ฅผ ์‚ฌ์šฉํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d6a9a511-0207-44f4-a65c-f92e453a743c", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.12" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/diffusers3/pipelines/test/subfolder/__init__.py b/diffusers3/pipelines/test/subfolder/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/diffusers3/pipelines/test/subfolder/untitled.txt b/diffusers3/pipelines/test/subfolder/untitled.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/diffusers3/pipelines/text_to_video_synthesis/__init__.py b/diffusers3/pipelines/text_to_video_synthesis/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8d8fdb92769bb3dcb2dd7696115b16f197062262 --- /dev/null +++ b/diffusers3/pipelines/text_to_video_synthesis/__init__.py @@ -0,0 +1,54 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["pipeline_output"] = ["TextToVideoSDPipelineOutput"] + _import_structure["pipeline_text_to_video_synth"] = ["TextToVideoSDPipeline"] + _import_structure["pipeline_text_to_video_synth_img2img"] = ["VideoToVideoSDPipeline"] + _import_structure["pipeline_text_to_video_zero"] = ["TextToVideoZeroPipeline"] + _import_structure["pipeline_text_to_video_zero_sdxl"] = ["TextToVideoZeroSDXLPipeline"] + + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 + else: + from .pipeline_output import TextToVideoSDPipelineOutput + from .pipeline_text_to_video_synth import TextToVideoSDPipeline + from .pipeline_text_to_video_synth_img2img import VideoToVideoSDPipeline + from .pipeline_text_to_video_zero import TextToVideoZeroPipeline + from .pipeline_text_to_video_zero_sdxl import TextToVideoZeroSDXLPipeline + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/diffusers3/pipelines/text_to_video_synthesis/pipeline_output.py b/diffusers3/pipelines/text_to_video_synthesis/pipeline_output.py new file mode 100644 index 0000000000000000000000000000000000000000..040bf0efba8465d830af5e6686bc3f544585f63f --- /dev/null +++ b/diffusers3/pipelines/text_to_video_synthesis/pipeline_output.py @@ -0,0 +1,26 @@ +from dataclasses import dataclass +from typing import List, Union + +import numpy as np +import PIL +import torch + +from ...utils import ( + BaseOutput, +) + + +@dataclass +class TextToVideoSDPipelineOutput(BaseOutput): + """ + Output class for text-to-video pipelines. + + Args: + frames (`torch.Tensor`, `np.ndarray`, or List[List[PIL.Image.Image]]): + List of video outputs - It can be a nested list of length `batch_size,` with each sub-list containing + denoised + PIL image sequences of length `num_frames.` It can also be a NumPy array or Torch tensor of shape + `(batch_size, num_frames, channels, height, width)` + """ + + frames: Union[torch.Tensor, np.ndarray, List[List[PIL.Image.Image]]] diff --git a/diffusers3/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth.py b/diffusers3/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth.py new file mode 100644 index 0000000000000000000000000000000000000000..cdd72b97f86b62c404347478e9698b7d889a6bf1 --- /dev/null +++ b/diffusers3/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth.py @@ -0,0 +1,643 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Any, Callable, Dict, List, Optional, Union + +import torch +from transformers import CLIPTextModel, CLIPTokenizer + +from ...loaders import StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin +from ...models import AutoencoderKL, UNet3DConditionModel +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import ( + USE_PEFT_BACKEND, + deprecate, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import randn_tensor +from ...video_processor import VideoProcessor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from . import TextToVideoSDPipelineOutput + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import TextToVideoSDPipeline + >>> from diffusers.utils import export_to_video + + >>> pipe = TextToVideoSDPipeline.from_pretrained( + ... "damo-vilab/text-to-video-ms-1.7b", torch_dtype=torch.float16, variant="fp16" + ... ) + >>> pipe.enable_model_cpu_offload() + + >>> prompt = "Spiderman is surfing" + >>> video_frames = pipe(prompt).frames[0] + >>> video_path = export_to_video(video_frames) + >>> video_path + ``` +""" + + +class TextToVideoSDPipeline( + DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, StableDiffusionLoraLoaderMixin +): + r""" + Pipeline for text-to-video generation. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for saving LoRA weights + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer (`CLIPTokenizer`): + A [`~transformers.CLIPTokenizer`] to tokenize text. + unet ([`UNet3DConditionModel`]): + A [`UNet3DConditionModel`] to denoise the encoded video latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + """ + + model_cpu_offload_seq = "text_encoder->unet->vae" + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: UNet3DConditionModel, + scheduler: KarrasDiffusionSchedulers, + ): + super().__init__() + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.video_processor = VideoProcessor(do_resize=False, vae_scale_factor=self.vae_scale_factor) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt + def _encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + **kwargs, + ): + deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." + deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) + + prompt_embeds_tuple = self.encode_prompt( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=lora_scale, + **kwargs, + ) + + # concatenate for backwards comp + prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) + + return prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if self.text_encoder is not None: + if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return prompt_embeds, negative_prompt_embeds + + def decode_latents(self, latents): + latents = 1 / self.vae.config.scaling_factor * latents + + batch_size, channels, num_frames, height, width = latents.shape + latents = latents.permute(0, 2, 1, 3, 4).reshape(batch_size * num_frames, channels, height, width) + + image = self.vae.decode(latents).sample + video = image[None, :].reshape((batch_size, num_frames, -1) + image.shape[2:]).permute(0, 2, 1, 3, 4) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + video = video.float() + return video + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + # Copied from diffusers.pipelines.stable_diffusion_k_diffusion.pipeline_stable_diffusion_k_diffusion.StableDiffusionKDiffusionPipeline.check_inputs + def check_inputs( + self, + prompt, + height, + width, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + callback_on_step_end_tensor_inputs=None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + def prepare_latents( + self, batch_size, num_channels_latents, num_frames, height, width, dtype, device, generator, latents=None + ): + shape = ( + batch_size, + num_channels_latents, + num_frames, + height // self.vae_scale_factor, + width // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_frames: int = 16, + num_inference_steps: int = 50, + guidance_scale: float = 9.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + output_type: Optional[str] = "np", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, + callback_steps: int = 1, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + clip_skip: Optional[int] = None, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated video. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated video. + num_frames (`int`, *optional*, defaults to 16): + The number of video frames that are generated. Defaults to 16 frames which at 8 frames per seconds + amounts to 2 seconds of video. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality videos at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for video + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. Latents should be of shape + `(batch_size, num_channel, num_frames, height, width)`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + output_type (`str`, *optional*, defaults to `"np"`): + The output format of the generated video. Choose between `torch.Tensor` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.text_to_video_synthesis.TextToVideoSDPipelineOutput`] instead + of a plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + Examples: + + Returns: + [`~pipelines.text_to_video_synthesis.TextToVideoSDPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.text_to_video_synthesis.TextToVideoSDPipelineOutput`] is + returned, otherwise a `tuple` is returned where the first element is a list with the generated frames. + """ + # 0. Default height and width to unet + height = height or self.unet.config.sample_size * self.vae_scale_factor + width = width or self.unet.config.sample_size * self.vae_scale_factor + + num_images_per_prompt = 1 + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds + ) + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Encode input prompt + text_encoder_lora_scale = ( + cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None + ) + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=text_encoder_lora_scale, + clip_skip=clip_skip, + ) + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + if do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + # 4. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # 5. Prepare latent variables + num_channels_latents = self.unet.config.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + num_frames, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs=cross_attention_kwargs, + return_dict=False, + )[0] + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # reshape latents + bsz, channel, frames, width, height = latents.shape + latents = latents.permute(0, 2, 1, 3, 4).reshape(bsz * frames, channel, width, height) + noise_pred = noise_pred.permute(0, 2, 1, 3, 4).reshape(bsz * frames, channel, width, height) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample + + # reshape latents back + latents = latents[None, :].reshape(bsz, frames, channel, width, height).permute(0, 2, 1, 3, 4) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + # 8. Post processing + if output_type == "latent": + video = latents + else: + video_tensor = self.decode_latents(latents) + video = self.video_processor.postprocess_video(video=video_tensor, output_type=output_type) + + # 9. Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (video,) + + return TextToVideoSDPipelineOutput(frames=video) diff --git a/diffusers3/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth_img2img.py b/diffusers3/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth_img2img.py new file mode 100644 index 0000000000000000000000000000000000000000..92bf1d388c13cae67fae4de20a0994d68d874049 --- /dev/null +++ b/diffusers3/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth_img2img.py @@ -0,0 +1,699 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Any, Callable, Dict, List, Optional, Union + +import numpy as np +import torch +from transformers import CLIPTextModel, CLIPTokenizer + +from ...loaders import StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin +from ...models import AutoencoderKL, UNet3DConditionModel +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import ( + USE_PEFT_BACKEND, + deprecate, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import randn_tensor +from ...video_processor import VideoProcessor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from . import TextToVideoSDPipelineOutput + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler + >>> from diffusers.utils import export_to_video + + >>> pipe = DiffusionPipeline.from_pretrained("cerspense/zeroscope_v2_576w", torch_dtype=torch.float16) + >>> pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) + >>> pipe.to("cuda") + + >>> prompt = "spiderman running in the desert" + >>> video_frames = pipe(prompt, num_inference_steps=40, height=320, width=576, num_frames=24).frames[0] + >>> # safe low-res video + >>> video_path = export_to_video(video_frames, output_video_path="./video_576_spiderman.mp4") + + >>> # let's offload the text-to-image model + >>> pipe.to("cpu") + + >>> # and load the image-to-image model + >>> pipe = DiffusionPipeline.from_pretrained( + ... "cerspense/zeroscope_v2_XL", torch_dtype=torch.float16, revision="refs/pr/15" + ... ) + >>> pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) + >>> pipe.enable_model_cpu_offload() + + >>> # The VAE consumes A LOT of memory, let's make sure we run it in sliced mode + >>> pipe.vae.enable_slicing() + + >>> # now let's upscale it + >>> video = [Image.fromarray(frame).resize((1024, 576)) for frame in video_frames] + + >>> # and denoise it + >>> video_frames = pipe(prompt, video=video, strength=0.6).frames[0] + >>> video_path = export_to_video(video_frames, output_video_path="./video_1024_spiderman.mp4") + >>> video_path + ``` +""" + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents +def retrieve_latents( + encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" +): + if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": + return encoder_output.latent_dist.sample(generator) + elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": + return encoder_output.latent_dist.mode() + elif hasattr(encoder_output, "latents"): + return encoder_output.latents + else: + raise AttributeError("Could not access latents of provided encoder_output") + + +class VideoToVideoSDPipeline( + DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, StableDiffusionLoraLoaderMixin +): + r""" + Pipeline for text-guided video-to-video generation. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + The pipeline also inherits the following loading methods: + - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings + - [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for saving LoRA weights + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode videos to and from latent representations. + text_encoder ([`CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer (`CLIPTokenizer`): + A [`~transformers.CLIPTokenizer`] to tokenize text. + unet ([`UNet3DConditionModel`]): + A [`UNet3DConditionModel`] to denoise the encoded video latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + """ + + model_cpu_offload_seq = "text_encoder->unet->vae" + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: UNet3DConditionModel, + scheduler: KarrasDiffusionSchedulers, + ): + super().__init__() + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.video_processor = VideoProcessor(do_resize=False, vae_scale_factor=self.vae_scale_factor) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt + def _encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + **kwargs, + ): + deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." + deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) + + prompt_embeds_tuple = self.encode_prompt( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=lora_scale, + **kwargs, + ) + + # concatenate for backwards comp + prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) + + return prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if self.text_encoder is not None: + if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return prompt_embeds, negative_prompt_embeds + + # Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_synth.TextToVideoSDPipeline.decode_latents + def decode_latents(self, latents): + latents = 1 / self.vae.config.scaling_factor * latents + + batch_size, channels, num_frames, height, width = latents.shape + latents = latents.permute(0, 2, 1, 3, 4).reshape(batch_size * num_frames, channels, height, width) + + image = self.vae.decode(latents).sample + video = image[None, :].reshape((batch_size, num_frames, -1) + image.shape[2:]).permute(0, 2, 1, 3, 4) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + video = video.float() + return video + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + strength, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + callback_on_step_end_tensor_inputs=None, + ): + if strength < 0 or strength > 1: + raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}") + + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps + def get_timesteps(self, num_inference_steps, strength, device): + # get the original timestep using init_timestep + init_timestep = min(int(num_inference_steps * strength), num_inference_steps) + + t_start = max(num_inference_steps - init_timestep, 0) + timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] + if hasattr(self.scheduler, "set_begin_index"): + self.scheduler.set_begin_index(t_start * self.scheduler.order) + + return timesteps, num_inference_steps - t_start + + def prepare_latents(self, video, timestep, batch_size, dtype, device, generator=None): + video = video.to(device=device, dtype=dtype) + + # change from (b, c, f, h, w) -> (b * f, c, w, h) + bsz, channel, frames, width, height = video.shape + video = video.permute(0, 2, 1, 3, 4).reshape(bsz * frames, channel, width, height) + + if video.shape[1] == 4: + init_latents = video + else: + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + elif isinstance(generator, list): + init_latents = [ + retrieve_latents(self.vae.encode(video[i : i + 1]), generator=generator[i]) + for i in range(batch_size) + ] + init_latents = torch.cat(init_latents, dim=0) + else: + init_latents = retrieve_latents(self.vae.encode(video), generator=generator) + + init_latents = self.vae.config.scaling_factor * init_latents + + if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0: + raise ValueError( + f"Cannot duplicate `video` of batch size {init_latents.shape[0]} to {batch_size} text prompts." + ) + else: + init_latents = torch.cat([init_latents], dim=0) + + shape = init_latents.shape + noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + + # get latents + init_latents = self.scheduler.add_noise(init_latents, noise, timestep) + latents = init_latents + + latents = latents[None, :].reshape((bsz, frames, latents.shape[1]) + latents.shape[2:]).permute(0, 2, 1, 3, 4) + + return latents + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + video: Union[List[np.ndarray], torch.Tensor] = None, + strength: float = 0.6, + num_inference_steps: int = 50, + guidance_scale: float = 15.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + output_type: Optional[str] = "np", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, + callback_steps: int = 1, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + clip_skip: Optional[int] = None, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + video (`List[np.ndarray]` or `torch.Tensor`): + `video` frames or tensor representing a video batch to be used as the starting point for the process. + Can also accept video latents as `image`, if passing latents directly, it will not be encoded again. + strength (`float`, *optional*, defaults to 0.8): + Indicates extent to transform the reference `video`. Must be between 0 and 1. `video` is used as a + starting point, adding more noise to it the larger the `strength`. The number of denoising steps + depends on the amount of noise initially added. When `strength` is 1, added noise is maximum and the + denoising process runs for the full number of iterations specified in `num_inference_steps`. A value of + 1 essentially ignores `video`. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality videos at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in video generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for video + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. Latents should be of shape + `(batch_size, num_channel, num_frames, height, width)`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + output_type (`str`, *optional*, defaults to `"np"`): + The output format of the generated video. Choose between `torch.Tensor` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.text_to_video_synthesis.TextToVideoSDPipelineOutput`] instead + of a plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + Examples: + + Returns: + [`~pipelines.text_to_video_synthesis.TextToVideoSDPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.text_to_video_synthesis.TextToVideoSDPipelineOutput`] is + returned, otherwise a `tuple` is returned where the first element is a list with the generated frames. + """ + # 0. Default height and width to unet + num_images_per_prompt = 1 + + # 1. Check inputs. Raise error if not correct + self.check_inputs(prompt, strength, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds) + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Encode input prompt + text_encoder_lora_scale = ( + cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None + ) + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=text_encoder_lora_scale, + clip_skip=clip_skip, + ) + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + if do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + # 4. Preprocess video + video = self.video_processor.preprocess_video(video) + + # 5. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device) + latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) + + # 6. Prepare latent variables + latents = self.prepare_latents(video, latent_timestep, batch_size, prompt_embeds.dtype, device, generator) + + # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 8. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs=cross_attention_kwargs, + return_dict=False, + )[0] + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # reshape latents + bsz, channel, frames, width, height = latents.shape + latents = latents.permute(0, 2, 1, 3, 4).reshape(bsz * frames, channel, width, height) + noise_pred = noise_pred.permute(0, 2, 1, 3, 4).reshape(bsz * frames, channel, width, height) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample + + # reshape latents back + latents = latents[None, :].reshape(bsz, frames, channel, width, height).permute(0, 2, 1, 3, 4) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + # manually for max memory savings + if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: + self.unet.to("cpu") + + # 9. Post processing + if output_type == "latent": + video = latents + else: + video_tensor = self.decode_latents(latents) + video = self.video_processor.postprocess_video(video=video_tensor, output_type=output_type) + + # 10. Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (video,) + + return TextToVideoSDPipelineOutput(frames=video) diff --git a/diffusers3/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero.py b/diffusers3/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero.py new file mode 100644 index 0000000000000000000000000000000000000000..c95c7f1b9625564757b3747546dc977b7622e577 --- /dev/null +++ b/diffusers3/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero.py @@ -0,0 +1,981 @@ +import copy +import inspect +from dataclasses import dataclass +from typing import Callable, List, Optional, Union + +import numpy as np +import PIL.Image +import torch +import torch.nn.functional as F +from torch.nn.functional import grid_sample +from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer + +from ...image_processor import VaeImageProcessor +from ...loaders import StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin +from ...models import AutoencoderKL, UNet2DConditionModel +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import USE_PEFT_BACKEND, BaseOutput, logging, scale_lora_layers, unscale_lora_layers +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin +from ..stable_diffusion import StableDiffusionSafetyChecker + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +def rearrange_0(tensor, f): + F, C, H, W = tensor.size() + tensor = torch.permute(torch.reshape(tensor, (F // f, f, C, H, W)), (0, 2, 1, 3, 4)) + return tensor + + +def rearrange_1(tensor): + B, C, F, H, W = tensor.size() + return torch.reshape(torch.permute(tensor, (0, 2, 1, 3, 4)), (B * F, C, H, W)) + + +def rearrange_3(tensor, f): + F, D, C = tensor.size() + return torch.reshape(tensor, (F // f, f, D, C)) + + +def rearrange_4(tensor): + B, F, D, C = tensor.size() + return torch.reshape(tensor, (B * F, D, C)) + + +class CrossFrameAttnProcessor: + """ + Cross frame attention processor. Each frame attends the first frame. + + Args: + batch_size: The number that represents actual batch size, other than the frames. + For example, calling unet with a single prompt and num_images_per_prompt=1, batch_size should be equal to + 2, due to classifier-free guidance. + """ + + def __init__(self, batch_size=2): + self.batch_size = batch_size + + def __call__(self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None): + batch_size, sequence_length, _ = hidden_states.shape + attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) + query = attn.to_q(hidden_states) + + is_cross_attention = encoder_hidden_states is not None + if encoder_hidden_states is None: + encoder_hidden_states = hidden_states + elif attn.norm_cross: + encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) + + key = attn.to_k(encoder_hidden_states) + value = attn.to_v(encoder_hidden_states) + + # Cross Frame Attention + if not is_cross_attention: + video_length = key.size()[0] // self.batch_size + first_frame_index = [0] * video_length + + # rearrange keys to have batch and frames in the 1st and 2nd dims respectively + key = rearrange_3(key, video_length) + key = key[:, first_frame_index] + # rearrange values to have batch and frames in the 1st and 2nd dims respectively + value = rearrange_3(value, video_length) + value = value[:, first_frame_index] + + # rearrange back to original shape + key = rearrange_4(key) + value = rearrange_4(value) + + query = attn.head_to_batch_dim(query) + key = attn.head_to_batch_dim(key) + value = attn.head_to_batch_dim(value) + + attention_probs = attn.get_attention_scores(query, key, attention_mask) + hidden_states = torch.bmm(attention_probs, value) + hidden_states = attn.batch_to_head_dim(hidden_states) + + # linear proj + hidden_states = attn.to_out[0](hidden_states) + # dropout + hidden_states = attn.to_out[1](hidden_states) + + return hidden_states + + +class CrossFrameAttnProcessor2_0: + """ + Cross frame attention processor with scaled_dot_product attention of Pytorch 2.0. + + Args: + batch_size: The number that represents actual batch size, other than the frames. + For example, calling unet with a single prompt and num_images_per_prompt=1, batch_size should be equal to + 2, due to classifier-free guidance. + """ + + def __init__(self, batch_size=2): + if not hasattr(F, "scaled_dot_product_attention"): + raise ImportError("AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.") + self.batch_size = batch_size + + def __call__(self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None): + batch_size, sequence_length, _ = ( + hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape + ) + inner_dim = hidden_states.shape[-1] + + if attention_mask is not None: + attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) + # scaled_dot_product_attention expects attention_mask shape to be + # (batch, heads, source_length, target_length) + attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) + + query = attn.to_q(hidden_states) + + is_cross_attention = encoder_hidden_states is not None + if encoder_hidden_states is None: + encoder_hidden_states = hidden_states + elif attn.norm_cross: + encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) + + key = attn.to_k(encoder_hidden_states) + value = attn.to_v(encoder_hidden_states) + + # Cross Frame Attention + if not is_cross_attention: + video_length = max(1, key.size()[0] // self.batch_size) + first_frame_index = [0] * video_length + + # rearrange keys to have batch and frames in the 1st and 2nd dims respectively + key = rearrange_3(key, video_length) + key = key[:, first_frame_index] + # rearrange values to have batch and frames in the 1st and 2nd dims respectively + value = rearrange_3(value, video_length) + value = value[:, first_frame_index] + + # rearrange back to original shape + key = rearrange_4(key) + value = rearrange_4(value) + + head_dim = inner_dim // attn.heads + query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + + # the output of sdp = (batch, num_heads, seq_len, head_dim) + # TODO: add support for attn.scale when we move to Torch 2.1 + hidden_states = F.scaled_dot_product_attention( + query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False + ) + + hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) + hidden_states = hidden_states.to(query.dtype) + + # linear proj + hidden_states = attn.to_out[0](hidden_states) + # dropout + hidden_states = attn.to_out[1](hidden_states) + return hidden_states + + +@dataclass +class TextToVideoPipelineOutput(BaseOutput): + r""" + Output class for zero-shot text-to-video pipeline. + + Args: + images (`[List[PIL.Image.Image]`, `np.ndarray`]): + List of denoised PIL images of length `batch_size` or NumPy array of shape `(batch_size, height, width, + num_channels)`. + nsfw_content_detected (`[List[bool]]`): + List indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content or + `None` if safety checking could not be performed. + """ + + images: Union[List[PIL.Image.Image], np.ndarray] + nsfw_content_detected: Optional[List[bool]] + + +def coords_grid(batch, ht, wd, device): + # Adapted from https://github.com/princeton-vl/RAFT/blob/master/core/utils/utils.py + coords = torch.meshgrid(torch.arange(ht, device=device), torch.arange(wd, device=device)) + coords = torch.stack(coords[::-1], dim=0).float() + return coords[None].repeat(batch, 1, 1, 1) + + +def warp_single_latent(latent, reference_flow): + """ + Warp latent of a single frame with given flow + + Args: + latent: latent code of a single frame + reference_flow: flow which to warp the latent with + + Returns: + warped: warped latent + """ + _, _, H, W = reference_flow.size() + _, _, h, w = latent.size() + coords0 = coords_grid(1, H, W, device=latent.device).to(latent.dtype) + + coords_t0 = coords0 + reference_flow + coords_t0[:, 0] /= W + coords_t0[:, 1] /= H + + coords_t0 = coords_t0 * 2.0 - 1.0 + coords_t0 = F.interpolate(coords_t0, size=(h, w), mode="bilinear") + coords_t0 = torch.permute(coords_t0, (0, 2, 3, 1)) + + warped = grid_sample(latent, coords_t0, mode="nearest", padding_mode="reflection") + return warped + + +def create_motion_field(motion_field_strength_x, motion_field_strength_y, frame_ids, device, dtype): + """ + Create translation motion field + + Args: + motion_field_strength_x: motion strength along x-axis + motion_field_strength_y: motion strength along y-axis + frame_ids: indexes of the frames the latents of which are being processed. + This is needed when we perform chunk-by-chunk inference + device: device + dtype: dtype + + Returns: + + """ + seq_length = len(frame_ids) + reference_flow = torch.zeros((seq_length, 2, 512, 512), device=device, dtype=dtype) + for fr_idx in range(seq_length): + reference_flow[fr_idx, 0, :, :] = motion_field_strength_x * (frame_ids[fr_idx]) + reference_flow[fr_idx, 1, :, :] = motion_field_strength_y * (frame_ids[fr_idx]) + return reference_flow + + +def create_motion_field_and_warp_latents(motion_field_strength_x, motion_field_strength_y, frame_ids, latents): + """ + Creates translation motion and warps the latents accordingly + + Args: + motion_field_strength_x: motion strength along x-axis + motion_field_strength_y: motion strength along y-axis + frame_ids: indexes of the frames the latents of which are being processed. + This is needed when we perform chunk-by-chunk inference + latents: latent codes of frames + + Returns: + warped_latents: warped latents + """ + motion_field = create_motion_field( + motion_field_strength_x=motion_field_strength_x, + motion_field_strength_y=motion_field_strength_y, + frame_ids=frame_ids, + device=latents.device, + dtype=latents.dtype, + ) + warped_latents = latents.clone().detach() + for i in range(len(warped_latents)): + warped_latents[i] = warp_single_latent(latents[i][None], motion_field[i][None]) + return warped_latents + + +class TextToVideoZeroPipeline( + DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, StableDiffusionLoraLoaderMixin +): + r""" + Pipeline for zero-shot text-to-video generation using Stable Diffusion. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + tokenizer (`CLIPTokenizer`): + A [`~transformers.CLIPTokenizer`] to tokenize text. + unet ([`UNet2DConditionModel`]): + A [`UNet3DConditionModel`] to denoise the encoded video latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details + about a model's potential harms. + feature_extractor ([`CLIPImageProcessor`]): + A [`CLIPImageProcessor`] to extract features from generated images; used as inputs to the `safety_checker`. + """ + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + unet: UNet2DConditionModel, + scheduler: KarrasDiffusionSchedulers, + safety_checker: StableDiffusionSafetyChecker, + feature_extractor: CLIPImageProcessor, + requires_safety_checker: bool = True, + ): + super().__init__() + self.register_modules( + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + unet=unet, + scheduler=scheduler, + safety_checker=safety_checker, + feature_extractor=feature_extractor, + ) + if safety_checker is None and requires_safety_checker: + logger.warning( + f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" + " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" + " results in services or applications open to the public. Both the diffusers team and Hugging Face" + " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" + " it only for use-cases that involve analyzing network behavior or auditing its results. For more" + " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + + def forward_loop(self, x_t0, t0, t1, generator): + """ + Perform DDPM forward process from time t0 to t1. This is the same as adding noise with corresponding variance. + + Args: + x_t0: + Latent code at time t0. + t0: + Timestep at t0. + t1: + Timestamp at t1. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + + Returns: + x_t1: + Forward process applied to x_t0 from time t0 to t1. + """ + eps = randn_tensor(x_t0.size(), generator=generator, dtype=x_t0.dtype, device=x_t0.device) + alpha_vec = torch.prod(self.scheduler.alphas[t0:t1]) + x_t1 = torch.sqrt(alpha_vec) * x_t0 + torch.sqrt(1 - alpha_vec) * eps + return x_t1 + + def backward_loop( + self, + latents, + timesteps, + prompt_embeds, + guidance_scale, + callback, + callback_steps, + num_warmup_steps, + extra_step_kwargs, + cross_attention_kwargs=None, + ): + """ + Perform backward process given list of time steps. + + Args: + latents: + Latents at time timesteps[0]. + timesteps: + Time steps along which to perform backward process. + prompt_embeds: + Pre-generated text embeddings. + guidance_scale: + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + extra_step_kwargs: + Extra_step_kwargs. + cross_attention_kwargs: + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + num_warmup_steps: + number of warmup steps. + + Returns: + latents: + Latents of backward process output at time timesteps[-1]. + """ + do_classifier_free_guidance = guidance_scale > 1.0 + num_steps = (len(timesteps) - num_warmup_steps) // self.scheduler.order + with self.progress_bar(total=num_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs=cross_attention_kwargs, + ).sample + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + return latents.clone().detach() + + # Copied from diffusers.pipelines.stable_diffusion_k_diffusion.pipeline_stable_diffusion_k_diffusion.StableDiffusionKDiffusionPipeline.check_inputs + def check_inputs( + self, + prompt, + height, + width, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + callback_on_step_end_tensor_inputs=None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = ( + batch_size, + num_channels_latents, + int(height) // self.vae_scale_factor, + int(width) // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + @torch.no_grad() + def __call__( + self, + prompt: Union[str, List[str]], + video_length: Optional[int] = 8, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + guidance_scale: float = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_videos_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + motion_field_strength_x: float = 12, + motion_field_strength_y: float = 12, + output_type: Optional[str] = "tensor", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, + callback_steps: Optional[int] = 1, + t0: int = 44, + t1: int = 47, + frame_ids: Optional[List[int]] = None, + ): + """ + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + video_length (`int`, *optional*, defaults to 8): + The number of generated video frames. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in video generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_videos_per_prompt (`int`, *optional*, defaults to 1): + The number of videos to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for video + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + output_type (`str`, *optional*, defaults to `"np"`): + The output format of the generated video. Choose between `"latent"` and `"np"`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a + [`~pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.TextToVideoPipelineOutput`] instead of + a plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + motion_field_strength_x (`float`, *optional*, defaults to 12): + Strength of motion in generated video along x-axis. See the [paper](https://arxiv.org/abs/2303.13439), + Sect. 3.3.1. + motion_field_strength_y (`float`, *optional*, defaults to 12): + Strength of motion in generated video along y-axis. See the [paper](https://arxiv.org/abs/2303.13439), + Sect. 3.3.1. + t0 (`int`, *optional*, defaults to 44): + Timestep t0. Should be in the range [0, num_inference_steps - 1]. See the + [paper](https://arxiv.org/abs/2303.13439), Sect. 3.3.1. + t1 (`int`, *optional*, defaults to 47): + Timestep t0. Should be in the range [t0 + 1, num_inference_steps - 1]. See the + [paper](https://arxiv.org/abs/2303.13439), Sect. 3.3.1. + frame_ids (`List[int]`, *optional*): + Indexes of the frames that are being generated. This is used when generating longer videos + chunk-by-chunk. + + Returns: + [`~pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.TextToVideoPipelineOutput`]: + The output contains a `ndarray` of the generated video, when `output_type` != `"latent"`, otherwise a + latent code of generated videos and a list of `bool`s indicating whether the corresponding generated + video contains "not-safe-for-work" (nsfw) content.. + """ + assert video_length > 0 + if frame_ids is None: + frame_ids = list(range(video_length)) + assert len(frame_ids) == video_length + + assert num_videos_per_prompt == 1 + + # set the processor + original_attn_proc = self.unet.attn_processors + processor = ( + CrossFrameAttnProcessor2_0(batch_size=2) + if hasattr(F, "scaled_dot_product_attention") + else CrossFrameAttnProcessor(batch_size=2) + ) + self.unet.set_attn_processor(processor) + + if isinstance(prompt, str): + prompt = [prompt] + if isinstance(negative_prompt, str): + negative_prompt = [negative_prompt] + + # Default height and width to unet + height = height or self.unet.config.sample_size * self.vae_scale_factor + width = width or self.unet.config.sample_size * self.vae_scale_factor + + # Check inputs. Raise error if not correct + self.check_inputs(prompt, height, width, callback_steps) + + # Define call parameters + batch_size = 1 if isinstance(prompt, str) else len(prompt) + device = self._execution_device + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # Encode input prompt + prompt_embeds_tuple = self.encode_prompt( + prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt + ) + prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) + + # Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # Prepare latent variables + num_channels_latents = self.unet.config.in_channels + latents = self.prepare_latents( + batch_size * num_videos_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + # Prepare extra step kwargs. + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + + # Perform the first backward process up to time T_1 + x_1_t1 = self.backward_loop( + timesteps=timesteps[: -t1 - 1], + prompt_embeds=prompt_embeds, + latents=latents, + guidance_scale=guidance_scale, + callback=callback, + callback_steps=callback_steps, + extra_step_kwargs=extra_step_kwargs, + num_warmup_steps=num_warmup_steps, + ) + scheduler_copy = copy.deepcopy(self.scheduler) + + # Perform the second backward process up to time T_0 + x_1_t0 = self.backward_loop( + timesteps=timesteps[-t1 - 1 : -t0 - 1], + prompt_embeds=prompt_embeds, + latents=x_1_t1, + guidance_scale=guidance_scale, + callback=callback, + callback_steps=callback_steps, + extra_step_kwargs=extra_step_kwargs, + num_warmup_steps=0, + ) + + # Propagate first frame latents at time T_0 to remaining frames + x_2k_t0 = x_1_t0.repeat(video_length - 1, 1, 1, 1) + + # Add motion in latents at time T_0 + x_2k_t0 = create_motion_field_and_warp_latents( + motion_field_strength_x=motion_field_strength_x, + motion_field_strength_y=motion_field_strength_y, + latents=x_2k_t0, + frame_ids=frame_ids[1:], + ) + + # Perform forward process up to time T_1 + x_2k_t1 = self.forward_loop( + x_t0=x_2k_t0, + t0=timesteps[-t0 - 1].item(), + t1=timesteps[-t1 - 1].item(), + generator=generator, + ) + + # Perform backward process from time T_1 to 0 + x_1k_t1 = torch.cat([x_1_t1, x_2k_t1]) + b, l, d = prompt_embeds.size() + prompt_embeds = prompt_embeds[:, None].repeat(1, video_length, 1, 1).reshape(b * video_length, l, d) + + self.scheduler = scheduler_copy + x_1k_0 = self.backward_loop( + timesteps=timesteps[-t1 - 1 :], + prompt_embeds=prompt_embeds, + latents=x_1k_t1, + guidance_scale=guidance_scale, + callback=callback, + callback_steps=callback_steps, + extra_step_kwargs=extra_step_kwargs, + num_warmup_steps=0, + ) + latents = x_1k_0 + + # manually for max memory savings + if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: + self.unet.to("cpu") + torch.cuda.empty_cache() + + if output_type == "latent": + image = latents + has_nsfw_concept = None + else: + image = self.decode_latents(latents) + # Run safety checker + image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) + + # Offload all models + self.maybe_free_model_hooks() + # make sure to set the original attention processors back + self.unet.set_attn_processor(original_attn_proc) + + if not return_dict: + return (image, has_nsfw_concept) + + return TextToVideoPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker + def run_safety_checker(self, image, device, dtype): + if self.safety_checker is None: + has_nsfw_concept = None + else: + if torch.is_tensor(image): + feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") + else: + feature_extractor_input = self.image_processor.numpy_to_pil(image) + safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) + image, has_nsfw_concept = self.safety_checker( + images=image, clip_input=safety_checker_input.pixel_values.to(dtype) + ) + return image, has_nsfw_concept + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if self.text_encoder is not None: + if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return prompt_embeds, negative_prompt_embeds + + def decode_latents(self, latents): + latents = 1 / self.vae.config.scaling_factor * latents + image = self.vae.decode(latents, return_dict=False)[0] + image = (image / 2 + 0.5).clamp(0, 1) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + return image diff --git a/diffusers3/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero_sdxl.py b/diffusers3/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero_sdxl.py new file mode 100644 index 0000000000000000000000000000000000000000..c46a5ce7c084cf7342ac6a139940f4cfbb5e9b28 --- /dev/null +++ b/diffusers3/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero_sdxl.py @@ -0,0 +1,1317 @@ +import copy +import inspect +from dataclasses import dataclass +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import numpy as np +import PIL +import torch +import torch.nn.functional as F +from torch.nn.functional import grid_sample +from transformers import ( + CLIPImageProcessor, + CLIPTextModel, + CLIPTextModelWithProjection, + CLIPTokenizer, + CLIPVisionModelWithProjection, +) + +from ...image_processor import VaeImageProcessor +from ...loaders import StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin +from ...models import AutoencoderKL, UNet2DConditionModel +from ...models.attention_processor import ( + AttnProcessor2_0, + FusedAttnProcessor2_0, + XFormersAttnProcessor, +) +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import ( + USE_PEFT_BACKEND, + BaseOutput, + is_invisible_watermark_available, + logging, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin + + +if is_invisible_watermark_available(): + from ..stable_diffusion_xl.watermark import StableDiffusionXLWatermarker + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +# Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.rearrange_0 +def rearrange_0(tensor, f): + F, C, H, W = tensor.size() + tensor = torch.permute(torch.reshape(tensor, (F // f, f, C, H, W)), (0, 2, 1, 3, 4)) + return tensor + + +# Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.rearrange_1 +def rearrange_1(tensor): + B, C, F, H, W = tensor.size() + return torch.reshape(torch.permute(tensor, (0, 2, 1, 3, 4)), (B * F, C, H, W)) + + +# Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.rearrange_3 +def rearrange_3(tensor, f): + F, D, C = tensor.size() + return torch.reshape(tensor, (F // f, f, D, C)) + + +# Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.rearrange_4 +def rearrange_4(tensor): + B, F, D, C = tensor.size() + return torch.reshape(tensor, (B * F, D, C)) + + +# Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.CrossFrameAttnProcessor +class CrossFrameAttnProcessor: + """ + Cross frame attention processor. Each frame attends the first frame. + + Args: + batch_size: The number that represents actual batch size, other than the frames. + For example, calling unet with a single prompt and num_images_per_prompt=1, batch_size should be equal to + 2, due to classifier-free guidance. + """ + + def __init__(self, batch_size=2): + self.batch_size = batch_size + + def __call__(self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None): + batch_size, sequence_length, _ = hidden_states.shape + attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) + query = attn.to_q(hidden_states) + + is_cross_attention = encoder_hidden_states is not None + if encoder_hidden_states is None: + encoder_hidden_states = hidden_states + elif attn.norm_cross: + encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) + + key = attn.to_k(encoder_hidden_states) + value = attn.to_v(encoder_hidden_states) + + # Cross Frame Attention + if not is_cross_attention: + video_length = key.size()[0] // self.batch_size + first_frame_index = [0] * video_length + + # rearrange keys to have batch and frames in the 1st and 2nd dims respectively + key = rearrange_3(key, video_length) + key = key[:, first_frame_index] + # rearrange values to have batch and frames in the 1st and 2nd dims respectively + value = rearrange_3(value, video_length) + value = value[:, first_frame_index] + + # rearrange back to original shape + key = rearrange_4(key) + value = rearrange_4(value) + + query = attn.head_to_batch_dim(query) + key = attn.head_to_batch_dim(key) + value = attn.head_to_batch_dim(value) + + attention_probs = attn.get_attention_scores(query, key, attention_mask) + hidden_states = torch.bmm(attention_probs, value) + hidden_states = attn.batch_to_head_dim(hidden_states) + + # linear proj + hidden_states = attn.to_out[0](hidden_states) + # dropout + hidden_states = attn.to_out[1](hidden_states) + + return hidden_states + + +# Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.CrossFrameAttnProcessor2_0 +class CrossFrameAttnProcessor2_0: + """ + Cross frame attention processor with scaled_dot_product attention of Pytorch 2.0. + + Args: + batch_size: The number that represents actual batch size, other than the frames. + For example, calling unet with a single prompt and num_images_per_prompt=1, batch_size should be equal to + 2, due to classifier-free guidance. + """ + + def __init__(self, batch_size=2): + if not hasattr(F, "scaled_dot_product_attention"): + raise ImportError("AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.") + self.batch_size = batch_size + + def __call__(self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None): + batch_size, sequence_length, _ = ( + hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape + ) + inner_dim = hidden_states.shape[-1] + + if attention_mask is not None: + attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) + # scaled_dot_product_attention expects attention_mask shape to be + # (batch, heads, source_length, target_length) + attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) + + query = attn.to_q(hidden_states) + + is_cross_attention = encoder_hidden_states is not None + if encoder_hidden_states is None: + encoder_hidden_states = hidden_states + elif attn.norm_cross: + encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) + + key = attn.to_k(encoder_hidden_states) + value = attn.to_v(encoder_hidden_states) + + # Cross Frame Attention + if not is_cross_attention: + video_length = max(1, key.size()[0] // self.batch_size) + first_frame_index = [0] * video_length + + # rearrange keys to have batch and frames in the 1st and 2nd dims respectively + key = rearrange_3(key, video_length) + key = key[:, first_frame_index] + # rearrange values to have batch and frames in the 1st and 2nd dims respectively + value = rearrange_3(value, video_length) + value = value[:, first_frame_index] + + # rearrange back to original shape + key = rearrange_4(key) + value = rearrange_4(value) + + head_dim = inner_dim // attn.heads + query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + + # the output of sdp = (batch, num_heads, seq_len, head_dim) + # TODO: add support for attn.scale when we move to Torch 2.1 + hidden_states = F.scaled_dot_product_attention( + query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False + ) + + hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) + hidden_states = hidden_states.to(query.dtype) + + # linear proj + hidden_states = attn.to_out[0](hidden_states) + # dropout + hidden_states = attn.to_out[1](hidden_states) + return hidden_states + + +@dataclass +class TextToVideoSDXLPipelineOutput(BaseOutput): + """ + Output class for zero-shot text-to-video pipeline. + + Args: + images (`List[PIL.Image.Image]` or `np.ndarray`) + List of denoised PIL images of length `batch_size` or numpy array of shape `(batch_size, height, width, + num_channels)`. PIL images or numpy array present the denoised images of the diffusion pipeline. + """ + + images: Union[List[PIL.Image.Image], np.ndarray] + + +# Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.coords_grid +def coords_grid(batch, ht, wd, device): + # Adapted from https://github.com/princeton-vl/RAFT/blob/master/core/utils/utils.py + coords = torch.meshgrid(torch.arange(ht, device=device), torch.arange(wd, device=device)) + coords = torch.stack(coords[::-1], dim=0).float() + return coords[None].repeat(batch, 1, 1, 1) + + +# Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.warp_single_latent +def warp_single_latent(latent, reference_flow): + """ + Warp latent of a single frame with given flow + + Args: + latent: latent code of a single frame + reference_flow: flow which to warp the latent with + + Returns: + warped: warped latent + """ + _, _, H, W = reference_flow.size() + _, _, h, w = latent.size() + coords0 = coords_grid(1, H, W, device=latent.device).to(latent.dtype) + + coords_t0 = coords0 + reference_flow + coords_t0[:, 0] /= W + coords_t0[:, 1] /= H + + coords_t0 = coords_t0 * 2.0 - 1.0 + coords_t0 = F.interpolate(coords_t0, size=(h, w), mode="bilinear") + coords_t0 = torch.permute(coords_t0, (0, 2, 3, 1)) + + warped = grid_sample(latent, coords_t0, mode="nearest", padding_mode="reflection") + return warped + + +# Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.create_motion_field +def create_motion_field(motion_field_strength_x, motion_field_strength_y, frame_ids, device, dtype): + """ + Create translation motion field + + Args: + motion_field_strength_x: motion strength along x-axis + motion_field_strength_y: motion strength along y-axis + frame_ids: indexes of the frames the latents of which are being processed. + This is needed when we perform chunk-by-chunk inference + device: device + dtype: dtype + + Returns: + + """ + seq_length = len(frame_ids) + reference_flow = torch.zeros((seq_length, 2, 512, 512), device=device, dtype=dtype) + for fr_idx in range(seq_length): + reference_flow[fr_idx, 0, :, :] = motion_field_strength_x * (frame_ids[fr_idx]) + reference_flow[fr_idx, 1, :, :] = motion_field_strength_y * (frame_ids[fr_idx]) + return reference_flow + + +# Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.create_motion_field_and_warp_latents +def create_motion_field_and_warp_latents(motion_field_strength_x, motion_field_strength_y, frame_ids, latents): + """ + Creates translation motion and warps the latents accordingly + + Args: + motion_field_strength_x: motion strength along x-axis + motion_field_strength_y: motion strength along y-axis + frame_ids: indexes of the frames the latents of which are being processed. + This is needed when we perform chunk-by-chunk inference + latents: latent codes of frames + + Returns: + warped_latents: warped latents + """ + motion_field = create_motion_field( + motion_field_strength_x=motion_field_strength_x, + motion_field_strength_y=motion_field_strength_y, + frame_ids=frame_ids, + device=latents.device, + dtype=latents.dtype, + ) + warped_latents = latents.clone().detach() + for i in range(len(warped_latents)): + warped_latents[i] = warp_single_latent(latents[i][None], motion_field[i][None]) + return warped_latents + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg +def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): + """ + Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4 + """ + std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) + std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) + # rescale the results from guidance (fixes overexposure) + noise_pred_rescaled = noise_cfg * (std_text / std_cfg) + # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images + noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg + return noise_cfg + + +class TextToVideoZeroSDXLPipeline( + DiffusionPipeline, + StableDiffusionMixin, + StableDiffusionXLLoraLoaderMixin, + TextualInversionLoaderMixin, +): + r""" + Pipeline for zero-shot text-to-video generation using Stable Diffusion XL. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModel`]): + Frozen text-encoder. Stable Diffusion XL uses the text portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically + the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. + text_encoder_2 ([` CLIPTextModelWithProjection`]): + Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection), + specifically the + [laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k) + variant. + tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + tokenizer_2 (`CLIPTokenizer`): + Second Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + """ + + model_cpu_offload_seq = "text_encoder->text_encoder_2->unet->vae" + _optional_components = [ + "tokenizer", + "tokenizer_2", + "text_encoder", + "text_encoder_2", + "image_encoder", + "feature_extractor", + ] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + text_encoder_2: CLIPTextModelWithProjection, + tokenizer: CLIPTokenizer, + tokenizer_2: CLIPTokenizer, + unet: UNet2DConditionModel, + scheduler: KarrasDiffusionSchedulers, + image_encoder: CLIPVisionModelWithProjection = None, + feature_extractor: CLIPImageProcessor = None, + force_zeros_for_empty_prompt: bool = True, + add_watermarker: Optional[bool] = None, + ): + super().__init__() + self.register_modules( + vae=vae, + text_encoder=text_encoder, + text_encoder_2=text_encoder_2, + tokenizer=tokenizer, + tokenizer_2=tokenizer_2, + unet=unet, + scheduler=scheduler, + image_encoder=image_encoder, + feature_extractor=feature_extractor, + ) + self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + + self.default_sample_size = self.unet.config.sample_size + + add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available() + + if add_watermarker: + self.watermark = StableDiffusionXLWatermarker() + else: + self.watermark = None + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.upcast_vae + def upcast_vae(self): + dtype = self.vae.dtype + self.vae.to(dtype=torch.float32) + use_torch_2_0_or_xformers = isinstance( + self.vae.decoder.mid_block.attentions[0].processor, + ( + AttnProcessor2_0, + XFormersAttnProcessor, + FusedAttnProcessor2_0, + ), + ) + # if xformers or torch_2_0 is used attention block does not need + # to be in float32 which can save lots of memory + if use_torch_2_0_or_xformers: + self.vae.post_quant_conv.to(dtype) + self.vae.decoder.conv_in.to(dtype) + self.vae.decoder.mid_block.to(dtype) + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline._get_add_time_ids + def _get_add_time_ids( + self, original_size, crops_coords_top_left, target_size, dtype, text_encoder_projection_dim=None + ): + add_time_ids = list(original_size + crops_coords_top_left + target_size) + + passed_add_embed_dim = ( + self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim + ) + expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features + + if expected_add_embed_dim != passed_add_embed_dim: + raise ValueError( + f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`." + ) + + add_time_ids = torch.tensor([add_time_ids], dtype=dtype) + return add_time_ids + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): + shape = ( + batch_size, + num_channels_latents, + int(height) // self.vae_scale_factor, + int(width) // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + latents = latents.to(device) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + def check_inputs( + self, + prompt, + prompt_2, + height, + width, + callback_steps, + negative_prompt=None, + negative_prompt_2=None, + prompt_embeds=None, + negative_prompt_embeds=None, + pooled_prompt_embeds=None, + negative_pooled_prompt_embeds=None, + callback_on_step_end_tensor_inputs=None, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt_2 is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)): + raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + elif negative_prompt_2 is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if prompt_embeds is not None and pooled_prompt_embeds is None: + raise ValueError( + "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`." + ) + + if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: + raise ValueError( + "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`." + ) + + # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.encode_prompt + def encode_prompt( + self, + prompt: str, + prompt_2: Optional[str] = None, + device: Optional[torch.device] = None, + num_images_per_prompt: int = 1, + do_classifier_free_guidance: bool = True, + negative_prompt: Optional[str] = None, + negative_prompt_2: Optional[str] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + pooled_prompt_embeds: Optional[torch.Tensor] = None, + negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in both text-encoders + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + lora_scale (`float`, *optional*): + A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + device = device or self._execution_device + + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if self.text_encoder is not None: + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale) + else: + scale_lora_layers(self.text_encoder_2, lora_scale) + + prompt = [prompt] if isinstance(prompt, str) else prompt + + if prompt is not None: + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + # Define tokenizers and text encoders + tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] + text_encoders = ( + [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] + ) + + if prompt_embeds is None: + prompt_2 = prompt_2 or prompt + prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 + + # textual inversion: process multi-vector tokens if necessary + prompt_embeds_list = [] + prompts = [prompt, prompt_2] + for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders): + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, tokenizer) + + text_inputs = tokenizer( + prompt, + padding="max_length", + max_length=tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + + text_input_ids = text_inputs.input_ids + untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {tokenizer.model_max_length} tokens: {removed_text}" + ) + + prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) + + # We are only ALWAYS interested in the pooled output of the final text encoder + pooled_prompt_embeds = prompt_embeds[0] + if clip_skip is None: + prompt_embeds = prompt_embeds.hidden_states[-2] + else: + # "2" because SDXL always indexes from the penultimate layer. + prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)] + + prompt_embeds_list.append(prompt_embeds) + + prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) + + # get unconditional embeddings for classifier free guidance + zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt + if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: + negative_prompt_embeds = torch.zeros_like(prompt_embeds) + negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) + elif do_classifier_free_guidance and negative_prompt_embeds is None: + negative_prompt = negative_prompt or "" + negative_prompt_2 = negative_prompt_2 or negative_prompt + + # normalize str to list + negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt + negative_prompt_2 = ( + batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 + ) + + uncond_tokens: List[str] + if prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = [negative_prompt, negative_prompt_2] + + negative_prompt_embeds_list = [] + for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders): + if isinstance(self, TextualInversionLoaderMixin): + negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = tokenizer( + negative_prompt, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + negative_prompt_embeds = text_encoder( + uncond_input.input_ids.to(device), + output_hidden_states=True, + ) + # We are only ALWAYS interested in the pooled output of the final text encoder + negative_pooled_prompt_embeds = negative_prompt_embeds[0] + negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] + + negative_prompt_embeds_list.append(negative_prompt_embeds) + + negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) + + if self.text_encoder_2 is not None: + prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) + else: + prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + if self.text_encoder_2 is not None: + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) + else: + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed * num_images_per_prompt, -1 + ) + if do_classifier_free_guidance: + negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( + bs_embed * num_images_per_prompt, -1 + ) + + if self.text_encoder is not None: + if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder_2, lora_scale) + + return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds + + # Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.TextToVideoZeroPipeline.forward_loop + def forward_loop(self, x_t0, t0, t1, generator): + """ + Perform DDPM forward process from time t0 to t1. This is the same as adding noise with corresponding variance. + + Args: + x_t0: + Latent code at time t0. + t0: + Timestep at t0. + t1: + Timestamp at t1. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + + Returns: + x_t1: + Forward process applied to x_t0 from time t0 to t1. + """ + eps = randn_tensor(x_t0.size(), generator=generator, dtype=x_t0.dtype, device=x_t0.device) + alpha_vec = torch.prod(self.scheduler.alphas[t0:t1]) + x_t1 = torch.sqrt(alpha_vec) * x_t0 + torch.sqrt(1 - alpha_vec) * eps + return x_t1 + + def backward_loop( + self, + latents, + timesteps, + prompt_embeds, + guidance_scale, + callback, + callback_steps, + num_warmup_steps, + extra_step_kwargs, + add_text_embeds, + add_time_ids, + cross_attention_kwargs=None, + guidance_rescale: float = 0.0, + ): + """ + Perform backward process given list of time steps + + Args: + latents: + Latents at time timesteps[0]. + timesteps: + Time steps along which to perform backward process. + prompt_embeds: + Pre-generated text embeddings. + guidance_scale: + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + extra_step_kwargs: + Extra_step_kwargs. + cross_attention_kwargs: + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + num_warmup_steps: + number of warmup steps. + + Returns: + latents: latents of backward process output at time timesteps[-1] + """ + + do_classifier_free_guidance = guidance_scale > 1.0 + num_steps = (len(timesteps) - num_warmup_steps) // self.scheduler.order + + with self.progress_bar(total=num_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs=cross_attention_kwargs, + added_cond_kwargs=added_cond_kwargs, + return_dict=False, + )[0] + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + if do_classifier_free_guidance and guidance_rescale > 0.0: + # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf + noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + callback(i, t, latents) + return latents.clone().detach() + + @torch.no_grad() + def __call__( + self, + prompt: Union[str, List[str]], + prompt_2: Optional[Union[str, List[str]]] = None, + video_length: Optional[int] = 8, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + denoising_end: Optional[float] = None, + guidance_scale: float = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + negative_prompt_2: Optional[Union[str, List[str]]] = None, + num_videos_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + frame_ids: Optional[List[int]] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + pooled_prompt_embeds: Optional[torch.Tensor] = None, + negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, + latents: Optional[torch.Tensor] = None, + motion_field_strength_x: float = 12, + motion_field_strength_y: float = 12, + output_type: Optional[str] = "tensor", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, + callback_steps: int = 1, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + guidance_rescale: float = 0.0, + original_size: Optional[Tuple[int, int]] = None, + crops_coords_top_left: Tuple[int, int] = (0, 0), + target_size: Optional[Tuple[int, int]] = None, + t0: int = 44, + t1: int = 47, + ): + """ + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in both text-encoders + video_length (`int`, *optional*, defaults to 8): + The number of generated video frames. + height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + denoising_end (`float`, *optional*): + When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be + completed before it is intentionally prematurely terminated. As a result, the returned sample will + still retain a substantial amount of noise as determined by the discrete timesteps selected by the + scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a + "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image + Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output) + guidance_scale (`float`, *optional*, defaults to 7.5): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders + num_videos_per_prompt (`int`, *optional*, defaults to 1): + The number of videos to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to + [`schedulers.DDIMScheduler`], will be ignored for others. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + frame_ids (`List[int]`, *optional*): + Indexes of the frames that are being generated. This is used when generating longer videos + chunk-by-chunk. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + motion_field_strength_x (`float`, *optional*, defaults to 12): + Strength of motion in generated video along x-axis. See the [paper](https://arxiv.org/abs/2303.13439), + Sect. 3.3.1. + motion_field_strength_y (`float`, *optional*, defaults to 12): + Strength of motion in generated video along y-axis. See the [paper](https://arxiv.org/abs/2303.13439), + Sect. 3.3.1. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] instead + of a plain tuple. + callback (`Callable`, *optional*): + A function that will be called every `callback_steps` steps during inference. The function will be + called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function will be called. If not specified, the callback will be + called at every step. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py). + guidance_rescale (`float`, *optional*, defaults to 0.7): + Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are + Flawed](https://arxiv.org/pdf/2305.08891.pdf) `guidance_scale` is defined as `ฯ†` in equation 16. of + [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). + Guidance rescale factor should fix overexposure when using zero terminal SNR. + original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled. + `original_size` defaults to `(width, height)` if not specified. Part of SDXL's micro-conditioning as + explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position + `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting + `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + For most cases, `target_size` should be set to the desired height and width of the generated image. If + not specified it will default to `(width, height)`. Part of SDXL's micro-conditioning as explained in + section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + t0 (`int`, *optional*, defaults to 44): + Timestep t0. Should be in the range [0, num_inference_steps - 1]. See the + [paper](https://arxiv.org/abs/2303.13439), Sect. 3.3.1. + t1 (`int`, *optional*, defaults to 47): + Timestep t0. Should be in the range [t0 + 1, num_inference_steps - 1]. See the + [paper](https://arxiv.org/abs/2303.13439), Sect. 3.3.1. + + Returns: + [`~pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.TextToVideoSDXLPipelineOutput`] or + `tuple`: [`~pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.TextToVideoSDXLPipelineOutput`] + if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is a list with the + generated images. + """ + assert video_length > 0 + if frame_ids is None: + frame_ids = list(range(video_length)) + assert len(frame_ids) == video_length + + assert num_videos_per_prompt == 1 + + # set the processor + original_attn_proc = self.unet.attn_processors + processor = ( + CrossFrameAttnProcessor2_0(batch_size=2) + if hasattr(F, "scaled_dot_product_attention") + else CrossFrameAttnProcessor(batch_size=2) + ) + self.unet.set_attn_processor(processor) + + if isinstance(prompt, str): + prompt = [prompt] + if isinstance(negative_prompt, str): + negative_prompt = [negative_prompt] + + # 0. Default height and width to unet + height = height or self.default_sample_size * self.vae_scale_factor + width = width or self.default_sample_size * self.vae_scale_factor + + original_size = original_size or (height, width) + target_size = target_size or (height, width) + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + prompt_2, + height, + width, + callback_steps, + negative_prompt, + negative_prompt_2, + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + ) + + # 2. Define call parameters + batch_size = ( + 1 if isinstance(prompt, str) else len(prompt) if isinstance(prompt, list) else prompt_embeds.shape[0] + ) + device = self._execution_device + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Encode input prompt + text_encoder_lora_scale = ( + cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None + ) + ( + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + ) = self.encode_prompt( + prompt=prompt, + prompt_2=prompt_2, + device=device, + num_images_per_prompt=num_videos_per_prompt, + do_classifier_free_guidance=do_classifier_free_guidance, + negative_prompt=negative_prompt, + negative_prompt_2=negative_prompt_2, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, + lora_scale=text_encoder_lora_scale, + ) + + # 4. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # 5. Prepare latent variables + num_channels_latents = self.unet.config.in_channels + + latents = self.prepare_latents( + batch_size * num_videos_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7. Prepare added time ids & embeddings + add_text_embeds = pooled_prompt_embeds + if self.text_encoder_2 is None: + text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) + else: + text_encoder_projection_dim = self.text_encoder_2.config.projection_dim + + add_time_ids = self._get_add_time_ids( + original_size, + crops_coords_top_left, + target_size, + dtype=prompt_embeds.dtype, + text_encoder_projection_dim=text_encoder_projection_dim, + ) + + if do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) + add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) + add_time_ids = torch.cat([add_time_ids, add_time_ids], dim=0) + + prompt_embeds = prompt_embeds.to(device) + add_text_embeds = add_text_embeds.to(device) + add_time_ids = add_time_ids.to(device).repeat(batch_size * num_videos_per_prompt, 1) + + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + + # Perform the first backward process up to time T_1 + x_1_t1 = self.backward_loop( + timesteps=timesteps[: -t1 - 1], + prompt_embeds=prompt_embeds, + latents=latents, + guidance_scale=guidance_scale, + callback=callback, + callback_steps=callback_steps, + extra_step_kwargs=extra_step_kwargs, + num_warmup_steps=num_warmup_steps, + add_text_embeds=add_text_embeds, + add_time_ids=add_time_ids, + ) + + scheduler_copy = copy.deepcopy(self.scheduler) + + # Perform the second backward process up to time T_0 + x_1_t0 = self.backward_loop( + timesteps=timesteps[-t1 - 1 : -t0 - 1], + prompt_embeds=prompt_embeds, + latents=x_1_t1, + guidance_scale=guidance_scale, + callback=callback, + callback_steps=callback_steps, + extra_step_kwargs=extra_step_kwargs, + num_warmup_steps=0, + add_text_embeds=add_text_embeds, + add_time_ids=add_time_ids, + ) + + # Propagate first frame latents at time T_0 to remaining frames + x_2k_t0 = x_1_t0.repeat(video_length - 1, 1, 1, 1) + + # Add motion in latents at time T_0 + x_2k_t0 = create_motion_field_and_warp_latents( + motion_field_strength_x=motion_field_strength_x, + motion_field_strength_y=motion_field_strength_y, + latents=x_2k_t0, + frame_ids=frame_ids[1:], + ) + + # Perform forward process up to time T_1 + x_2k_t1 = self.forward_loop( + x_t0=x_2k_t0, + t0=timesteps[-t0 - 1].to(torch.long), + t1=timesteps[-t1 - 1].to(torch.long), + generator=generator, + ) + + # Perform backward process from time T_1 to 0 + latents = torch.cat([x_1_t1, x_2k_t1]) + + self.scheduler = scheduler_copy + timesteps = timesteps[-t1 - 1 :] + + b, l, d = prompt_embeds.size() + prompt_embeds = prompt_embeds[:, None].repeat(1, video_length, 1, 1).reshape(b * video_length, l, d) + + b, k = add_text_embeds.size() + add_text_embeds = add_text_embeds[:, None].repeat(1, video_length, 1).reshape(b * video_length, k) + + b, k = add_time_ids.size() + add_time_ids = add_time_ids[:, None].repeat(1, video_length, 1).reshape(b * video_length, k) + + # 7.1 Apply denoising_end + if denoising_end is not None and isinstance(denoising_end, float) and denoising_end > 0 and denoising_end < 1: + discrete_timestep_cutoff = int( + round( + self.scheduler.config.num_train_timesteps + - (denoising_end * self.scheduler.config.num_train_timesteps) + ) + ) + num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps))) + timesteps = timesteps[:num_inference_steps] + + x_1k_0 = self.backward_loop( + timesteps=timesteps, + prompt_embeds=prompt_embeds, + latents=latents, + guidance_scale=guidance_scale, + callback=callback, + callback_steps=callback_steps, + extra_step_kwargs=extra_step_kwargs, + num_warmup_steps=0, + add_text_embeds=add_text_embeds, + add_time_ids=add_time_ids, + ) + + latents = x_1k_0 + + if not output_type == "latent": + # make sure the VAE is in float32 mode, as it overflows in float16 + needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast + + if needs_upcasting: + self.upcast_vae() + latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) + + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + + # cast back to fp16 if needed + if needs_upcasting: + self.vae.to(dtype=torch.float16) + else: + image = latents + return TextToVideoSDXLPipelineOutput(images=image) + + # apply watermark if available + if self.watermark is not None: + image = self.watermark.apply_watermark(image) + + image = self.image_processor.postprocess(image, output_type=output_type) + + self.maybe_free_model_hooks() + # make sure to set the original attention processors back + self.unet.set_attn_processor(original_attn_proc) + + if not return_dict: + return (image,) + + return TextToVideoSDXLPipelineOutput(images=image) diff --git a/diffusers3/pipelines/unclip/__init__.py b/diffusers3/pipelines/unclip/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c89e899463beede59b8ccf02688f6168b8ee3d77 --- /dev/null +++ b/diffusers3/pipelines/unclip/__init__.py @@ -0,0 +1,52 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + is_torch_available, + is_transformers_available, + is_transformers_version, +) + + +_dummy_objects = {} +_import_structure = {} + +try: + if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline + + _dummy_objects.update( + {"UnCLIPImageVariationPipeline": UnCLIPImageVariationPipeline, "UnCLIPPipeline": UnCLIPPipeline} + ) +else: + _import_structure["pipeline_unclip"] = ["UnCLIPPipeline"] + _import_structure["pipeline_unclip_image_variation"] = ["UnCLIPImageVariationPipeline"] + _import_structure["text_proj"] = ["UnCLIPTextProjModel"] + + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 + else: + from .pipeline_unclip import UnCLIPPipeline + from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline + from .text_proj import UnCLIPTextProjModel + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/diffusers3/pipelines/unclip/pipeline_unclip.py b/diffusers3/pipelines/unclip/pipeline_unclip.py new file mode 100644 index 0000000000000000000000000000000000000000..25c6739d8720a3696652438c4a7f8a09c5a1a3ba --- /dev/null +++ b/diffusers3/pipelines/unclip/pipeline_unclip.py @@ -0,0 +1,493 @@ +# Copyright 2024 Kakao Brain and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import List, Optional, Tuple, Union + +import torch +from torch.nn import functional as F +from transformers import CLIPTextModelWithProjection, CLIPTokenizer +from transformers.models.clip.modeling_clip import CLIPTextModelOutput + +from ...models import PriorTransformer, UNet2DConditionModel, UNet2DModel +from ...schedulers import UnCLIPScheduler +from ...utils import logging +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput +from .text_proj import UnCLIPTextProjModel + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +class UnCLIPPipeline(DiffusionPipeline): + """ + Pipeline for text-to-image generation using unCLIP. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + Args: + text_encoder ([`~transformers.CLIPTextModelWithProjection`]): + Frozen text-encoder. + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + prior ([`PriorTransformer`]): + The canonical unCLIP prior to approximate the image embedding from the text embedding. + text_proj ([`UnCLIPTextProjModel`]): + Utility class to prepare and combine the embeddings before they are passed to the decoder. + decoder ([`UNet2DConditionModel`]): + The decoder to invert the image embedding into an image. + super_res_first ([`UNet2DModel`]): + Super resolution UNet. Used in all but the last step of the super resolution diffusion process. + super_res_last ([`UNet2DModel`]): + Super resolution UNet. Used in the last step of the super resolution diffusion process. + prior_scheduler ([`UnCLIPScheduler`]): + Scheduler used in the prior denoising process (a modified [`DDPMScheduler`]). + decoder_scheduler ([`UnCLIPScheduler`]): + Scheduler used in the decoder denoising process (a modified [`DDPMScheduler`]). + super_res_scheduler ([`UnCLIPScheduler`]): + Scheduler used in the super resolution denoising process (a modified [`DDPMScheduler`]). + + """ + + _exclude_from_cpu_offload = ["prior"] + + prior: PriorTransformer + decoder: UNet2DConditionModel + text_proj: UnCLIPTextProjModel + text_encoder: CLIPTextModelWithProjection + tokenizer: CLIPTokenizer + super_res_first: UNet2DModel + super_res_last: UNet2DModel + + prior_scheduler: UnCLIPScheduler + decoder_scheduler: UnCLIPScheduler + super_res_scheduler: UnCLIPScheduler + + model_cpu_offload_seq = "text_encoder->text_proj->decoder->super_res_first->super_res_last" + + def __init__( + self, + prior: PriorTransformer, + decoder: UNet2DConditionModel, + text_encoder: CLIPTextModelWithProjection, + tokenizer: CLIPTokenizer, + text_proj: UnCLIPTextProjModel, + super_res_first: UNet2DModel, + super_res_last: UNet2DModel, + prior_scheduler: UnCLIPScheduler, + decoder_scheduler: UnCLIPScheduler, + super_res_scheduler: UnCLIPScheduler, + ): + super().__init__() + + self.register_modules( + prior=prior, + decoder=decoder, + text_encoder=text_encoder, + tokenizer=tokenizer, + text_proj=text_proj, + super_res_first=super_res_first, + super_res_last=super_res_last, + prior_scheduler=prior_scheduler, + decoder_scheduler=decoder_scheduler, + super_res_scheduler=super_res_scheduler, + ) + + def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + if latents.shape != shape: + raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") + latents = latents.to(device) + + latents = latents * scheduler.init_noise_sigma + return latents + + def _encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + text_model_output: Optional[Union[CLIPTextModelOutput, Tuple]] = None, + text_attention_mask: Optional[torch.Tensor] = None, + ): + if text_model_output is None: + batch_size = len(prompt) if isinstance(prompt, list) else 1 + # get prompt text embeddings + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + text_mask = text_inputs.attention_mask.bool().to(device) + + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length] + + text_encoder_output = self.text_encoder(text_input_ids.to(device)) + + prompt_embeds = text_encoder_output.text_embeds + text_enc_hid_states = text_encoder_output.last_hidden_state + + else: + batch_size = text_model_output[0].shape[0] + prompt_embeds, text_enc_hid_states = text_model_output[0], text_model_output[1] + text_mask = text_attention_mask + + prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0) + text_enc_hid_states = text_enc_hid_states.repeat_interleave(num_images_per_prompt, dim=0) + text_mask = text_mask.repeat_interleave(num_images_per_prompt, dim=0) + + if do_classifier_free_guidance: + uncond_tokens = [""] * batch_size + + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + uncond_text_mask = uncond_input.attention_mask.bool().to(device) + negative_prompt_embeds_text_encoder_output = self.text_encoder(uncond_input.input_ids.to(device)) + + negative_prompt_embeds = negative_prompt_embeds_text_encoder_output.text_embeds + uncond_text_enc_hid_states = negative_prompt_embeds_text_encoder_output.last_hidden_state + + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + + seq_len = negative_prompt_embeds.shape[1] + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len) + + seq_len = uncond_text_enc_hid_states.shape[1] + uncond_text_enc_hid_states = uncond_text_enc_hid_states.repeat(1, num_images_per_prompt, 1) + uncond_text_enc_hid_states = uncond_text_enc_hid_states.view( + batch_size * num_images_per_prompt, seq_len, -1 + ) + uncond_text_mask = uncond_text_mask.repeat_interleave(num_images_per_prompt, dim=0) + + # done duplicates + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + text_enc_hid_states = torch.cat([uncond_text_enc_hid_states, text_enc_hid_states]) + + text_mask = torch.cat([uncond_text_mask, text_mask]) + + return prompt_embeds, text_enc_hid_states, text_mask + + @torch.no_grad() + def __call__( + self, + prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: int = 1, + prior_num_inference_steps: int = 25, + decoder_num_inference_steps: int = 25, + super_res_num_inference_steps: int = 7, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + prior_latents: Optional[torch.Tensor] = None, + decoder_latents: Optional[torch.Tensor] = None, + super_res_latents: Optional[torch.Tensor] = None, + text_model_output: Optional[Union[CLIPTextModelOutput, Tuple]] = None, + text_attention_mask: Optional[torch.Tensor] = None, + prior_guidance_scale: float = 4.0, + decoder_guidance_scale: float = 8.0, + output_type: Optional[str] = "pil", + return_dict: bool = True, + ): + """ + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`): + The prompt or prompts to guide image generation. This can only be left undefined if `text_model_output` + and `text_attention_mask` is passed. + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + prior_num_inference_steps (`int`, *optional*, defaults to 25): + The number of denoising steps for the prior. More denoising steps usually lead to a higher quality + image at the expense of slower inference. + decoder_num_inference_steps (`int`, *optional*, defaults to 25): + The number of denoising steps for the decoder. More denoising steps usually lead to a higher quality + image at the expense of slower inference. + super_res_num_inference_steps (`int`, *optional*, defaults to 7): + The number of denoising steps for super resolution. More denoising steps usually lead to a higher + quality image at the expense of slower inference. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + prior_latents (`torch.Tensor` of shape (batch size, embeddings dimension), *optional*): + Pre-generated noisy latents to be used as inputs for the prior. + decoder_latents (`torch.Tensor` of shape (batch size, channels, height, width), *optional*): + Pre-generated noisy latents to be used as inputs for the decoder. + super_res_latents (`torch.Tensor` of shape (batch size, channels, super res height, super res width), *optional*): + Pre-generated noisy latents to be used as inputs for the decoder. + prior_guidance_scale (`float`, *optional*, defaults to 4.0): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + decoder_guidance_scale (`float`, *optional*, defaults to 4.0): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + text_model_output (`CLIPTextModelOutput`, *optional*): + Pre-defined [`CLIPTextModel`] outputs that can be derived from the text encoder. Pre-defined text + outputs can be passed for tasks like text embedding interpolations. Make sure to also pass + `text_attention_mask` in this case. `prompt` can the be left `None`. + text_attention_mask (`torch.Tensor`, *optional*): + Pre-defined CLIP text attention mask that can be derived from the tokenizer. Pre-defined text attention + masks are necessary when passing `text_model_output`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is + returned where the first element is a list with the generated images. + """ + if prompt is not None: + if isinstance(prompt, str): + batch_size = 1 + elif isinstance(prompt, list): + batch_size = len(prompt) + else: + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + else: + batch_size = text_model_output[0].shape[0] + + device = self._execution_device + + batch_size = batch_size * num_images_per_prompt + + do_classifier_free_guidance = prior_guidance_scale > 1.0 or decoder_guidance_scale > 1.0 + + prompt_embeds, text_enc_hid_states, text_mask = self._encode_prompt( + prompt, device, num_images_per_prompt, do_classifier_free_guidance, text_model_output, text_attention_mask + ) + + # prior + + self.prior_scheduler.set_timesteps(prior_num_inference_steps, device=device) + prior_timesteps_tensor = self.prior_scheduler.timesteps + + embedding_dim = self.prior.config.embedding_dim + + prior_latents = self.prepare_latents( + (batch_size, embedding_dim), + prompt_embeds.dtype, + device, + generator, + prior_latents, + self.prior_scheduler, + ) + + for i, t in enumerate(self.progress_bar(prior_timesteps_tensor)): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([prior_latents] * 2) if do_classifier_free_guidance else prior_latents + + predicted_image_embedding = self.prior( + latent_model_input, + timestep=t, + proj_embedding=prompt_embeds, + encoder_hidden_states=text_enc_hid_states, + attention_mask=text_mask, + ).predicted_image_embedding + + if do_classifier_free_guidance: + predicted_image_embedding_uncond, predicted_image_embedding_text = predicted_image_embedding.chunk(2) + predicted_image_embedding = predicted_image_embedding_uncond + prior_guidance_scale * ( + predicted_image_embedding_text - predicted_image_embedding_uncond + ) + + if i + 1 == prior_timesteps_tensor.shape[0]: + prev_timestep = None + else: + prev_timestep = prior_timesteps_tensor[i + 1] + + prior_latents = self.prior_scheduler.step( + predicted_image_embedding, + timestep=t, + sample=prior_latents, + generator=generator, + prev_timestep=prev_timestep, + ).prev_sample + + prior_latents = self.prior.post_process_latents(prior_latents) + + image_embeddings = prior_latents + + # done prior + + # decoder + + text_enc_hid_states, additive_clip_time_embeddings = self.text_proj( + image_embeddings=image_embeddings, + prompt_embeds=prompt_embeds, + text_encoder_hidden_states=text_enc_hid_states, + do_classifier_free_guidance=do_classifier_free_guidance, + ) + + if device.type == "mps": + # HACK: MPS: There is a panic when padding bool tensors, + # so cast to int tensor for the pad and back to bool afterwards + text_mask = text_mask.type(torch.int) + decoder_text_mask = F.pad(text_mask, (self.text_proj.clip_extra_context_tokens, 0), value=1) + decoder_text_mask = decoder_text_mask.type(torch.bool) + else: + decoder_text_mask = F.pad(text_mask, (self.text_proj.clip_extra_context_tokens, 0), value=True) + + self.decoder_scheduler.set_timesteps(decoder_num_inference_steps, device=device) + decoder_timesteps_tensor = self.decoder_scheduler.timesteps + + num_channels_latents = self.decoder.config.in_channels + height = self.decoder.config.sample_size + width = self.decoder.config.sample_size + + decoder_latents = self.prepare_latents( + (batch_size, num_channels_latents, height, width), + text_enc_hid_states.dtype, + device, + generator, + decoder_latents, + self.decoder_scheduler, + ) + + for i, t in enumerate(self.progress_bar(decoder_timesteps_tensor)): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([decoder_latents] * 2) if do_classifier_free_guidance else decoder_latents + + noise_pred = self.decoder( + sample=latent_model_input, + timestep=t, + encoder_hidden_states=text_enc_hid_states, + class_labels=additive_clip_time_embeddings, + attention_mask=decoder_text_mask, + ).sample + + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred_uncond, _ = noise_pred_uncond.split(latent_model_input.shape[1], dim=1) + noise_pred_text, predicted_variance = noise_pred_text.split(latent_model_input.shape[1], dim=1) + noise_pred = noise_pred_uncond + decoder_guidance_scale * (noise_pred_text - noise_pred_uncond) + noise_pred = torch.cat([noise_pred, predicted_variance], dim=1) + + if i + 1 == decoder_timesteps_tensor.shape[0]: + prev_timestep = None + else: + prev_timestep = decoder_timesteps_tensor[i + 1] + + # compute the previous noisy sample x_t -> x_t-1 + decoder_latents = self.decoder_scheduler.step( + noise_pred, t, decoder_latents, prev_timestep=prev_timestep, generator=generator + ).prev_sample + + decoder_latents = decoder_latents.clamp(-1, 1) + + image_small = decoder_latents + + # done decoder + + # super res + + self.super_res_scheduler.set_timesteps(super_res_num_inference_steps, device=device) + super_res_timesteps_tensor = self.super_res_scheduler.timesteps + + channels = self.super_res_first.config.in_channels // 2 + height = self.super_res_first.config.sample_size + width = self.super_res_first.config.sample_size + + super_res_latents = self.prepare_latents( + (batch_size, channels, height, width), + image_small.dtype, + device, + generator, + super_res_latents, + self.super_res_scheduler, + ) + + if device.type == "mps": + # MPS does not support many interpolations + image_upscaled = F.interpolate(image_small, size=[height, width]) + else: + interpolate_antialias = {} + if "antialias" in inspect.signature(F.interpolate).parameters: + interpolate_antialias["antialias"] = True + + image_upscaled = F.interpolate( + image_small, size=[height, width], mode="bicubic", align_corners=False, **interpolate_antialias + ) + + for i, t in enumerate(self.progress_bar(super_res_timesteps_tensor)): + # no classifier free guidance + + if i == super_res_timesteps_tensor.shape[0] - 1: + unet = self.super_res_last + else: + unet = self.super_res_first + + latent_model_input = torch.cat([super_res_latents, image_upscaled], dim=1) + + noise_pred = unet( + sample=latent_model_input, + timestep=t, + ).sample + + if i + 1 == super_res_timesteps_tensor.shape[0]: + prev_timestep = None + else: + prev_timestep = super_res_timesteps_tensor[i + 1] + + # compute the previous noisy sample x_t -> x_t-1 + super_res_latents = self.super_res_scheduler.step( + noise_pred, t, super_res_latents, prev_timestep=prev_timestep, generator=generator + ).prev_sample + + image = super_res_latents + # done super res + + self.maybe_free_model_hooks() + + # post processing + image = image * 0.5 + 0.5 + image = image.clamp(0, 1) + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + + if output_type == "pil": + image = self.numpy_to_pil(image) + + if not return_dict: + return (image,) + + return ImagePipelineOutput(images=image) diff --git a/diffusers3/pipelines/unclip/pipeline_unclip_image_variation.py b/diffusers3/pipelines/unclip/pipeline_unclip_image_variation.py new file mode 100644 index 0000000000000000000000000000000000000000..2a0e7e90e4d29d12e360d76bd7846acfd280d6f3 --- /dev/null +++ b/diffusers3/pipelines/unclip/pipeline_unclip_image_variation.py @@ -0,0 +1,420 @@ +# Copyright 2024 Kakao Brain and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import List, Optional, Union + +import PIL.Image +import torch +from torch.nn import functional as F +from transformers import ( + CLIPImageProcessor, + CLIPTextModelWithProjection, + CLIPTokenizer, + CLIPVisionModelWithProjection, +) + +from ...models import UNet2DConditionModel, UNet2DModel +from ...schedulers import UnCLIPScheduler +from ...utils import logging +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput +from .text_proj import UnCLIPTextProjModel + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +class UnCLIPImageVariationPipeline(DiffusionPipeline): + """ + Pipeline to generate image variations from an input image using UnCLIP. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + Args: + text_encoder ([`~transformers.CLIPTextModelWithProjection`]): + Frozen text-encoder. + tokenizer ([`~transformers.CLIPTokenizer`]): + A `CLIPTokenizer` to tokenize text. + feature_extractor ([`~transformers.CLIPImageProcessor`]): + Model that extracts features from generated images to be used as inputs for the `image_encoder`. + image_encoder ([`~transformers.CLIPVisionModelWithProjection`]): + Frozen CLIP image-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + text_proj ([`UnCLIPTextProjModel`]): + Utility class to prepare and combine the embeddings before they are passed to the decoder. + decoder ([`UNet2DConditionModel`]): + The decoder to invert the image embedding into an image. + super_res_first ([`UNet2DModel`]): + Super resolution UNet. Used in all but the last step of the super resolution diffusion process. + super_res_last ([`UNet2DModel`]): + Super resolution UNet. Used in the last step of the super resolution diffusion process. + decoder_scheduler ([`UnCLIPScheduler`]): + Scheduler used in the decoder denoising process (a modified [`DDPMScheduler`]). + super_res_scheduler ([`UnCLIPScheduler`]): + Scheduler used in the super resolution denoising process (a modified [`DDPMScheduler`]). + """ + + decoder: UNet2DConditionModel + text_proj: UnCLIPTextProjModel + text_encoder: CLIPTextModelWithProjection + tokenizer: CLIPTokenizer + feature_extractor: CLIPImageProcessor + image_encoder: CLIPVisionModelWithProjection + super_res_first: UNet2DModel + super_res_last: UNet2DModel + + decoder_scheduler: UnCLIPScheduler + super_res_scheduler: UnCLIPScheduler + model_cpu_offload_seq = "text_encoder->image_encoder->text_proj->decoder->super_res_first->super_res_last" + + def __init__( + self, + decoder: UNet2DConditionModel, + text_encoder: CLIPTextModelWithProjection, + tokenizer: CLIPTokenizer, + text_proj: UnCLIPTextProjModel, + feature_extractor: CLIPImageProcessor, + image_encoder: CLIPVisionModelWithProjection, + super_res_first: UNet2DModel, + super_res_last: UNet2DModel, + decoder_scheduler: UnCLIPScheduler, + super_res_scheduler: UnCLIPScheduler, + ): + super().__init__() + + self.register_modules( + decoder=decoder, + text_encoder=text_encoder, + tokenizer=tokenizer, + text_proj=text_proj, + feature_extractor=feature_extractor, + image_encoder=image_encoder, + super_res_first=super_res_first, + super_res_last=super_res_last, + decoder_scheduler=decoder_scheduler, + super_res_scheduler=super_res_scheduler, + ) + + # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents + def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + if latents.shape != shape: + raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") + latents = latents.to(device) + + latents = latents * scheduler.init_noise_sigma + return latents + + def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance): + batch_size = len(prompt) if isinstance(prompt, list) else 1 + + # get prompt text embeddings + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + text_mask = text_inputs.attention_mask.bool().to(device) + text_encoder_output = self.text_encoder(text_input_ids.to(device)) + + prompt_embeds = text_encoder_output.text_embeds + text_encoder_hidden_states = text_encoder_output.last_hidden_state + + prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0) + text_encoder_hidden_states = text_encoder_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + text_mask = text_mask.repeat_interleave(num_images_per_prompt, dim=0) + + if do_classifier_free_guidance: + uncond_tokens = [""] * batch_size + + max_length = text_input_ids.shape[-1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + uncond_text_mask = uncond_input.attention_mask.bool().to(device) + negative_prompt_embeds_text_encoder_output = self.text_encoder(uncond_input.input_ids.to(device)) + + negative_prompt_embeds = negative_prompt_embeds_text_encoder_output.text_embeds + uncond_text_encoder_hidden_states = negative_prompt_embeds_text_encoder_output.last_hidden_state + + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + + seq_len = negative_prompt_embeds.shape[1] + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len) + + seq_len = uncond_text_encoder_hidden_states.shape[1] + uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.repeat(1, num_images_per_prompt, 1) + uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.view( + batch_size * num_images_per_prompt, seq_len, -1 + ) + uncond_text_mask = uncond_text_mask.repeat_interleave(num_images_per_prompt, dim=0) + + # done duplicates + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + text_encoder_hidden_states = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states]) + + text_mask = torch.cat([uncond_text_mask, text_mask]) + + return prompt_embeds, text_encoder_hidden_states, text_mask + + def _encode_image(self, image, device, num_images_per_prompt, image_embeddings: Optional[torch.Tensor] = None): + dtype = next(self.image_encoder.parameters()).dtype + + if image_embeddings is None: + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(images=image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + image_embeddings = self.image_encoder(image).image_embeds + + image_embeddings = image_embeddings.repeat_interleave(num_images_per_prompt, dim=0) + + return image_embeddings + + @torch.no_grad() + def __call__( + self, + image: Optional[Union[PIL.Image.Image, List[PIL.Image.Image], torch.Tensor]] = None, + num_images_per_prompt: int = 1, + decoder_num_inference_steps: int = 25, + super_res_num_inference_steps: int = 7, + generator: Optional[torch.Generator] = None, + decoder_latents: Optional[torch.Tensor] = None, + super_res_latents: Optional[torch.Tensor] = None, + image_embeddings: Optional[torch.Tensor] = None, + decoder_guidance_scale: float = 8.0, + output_type: Optional[str] = "pil", + return_dict: bool = True, + ): + """ + The call function to the pipeline for generation. + + Args: + image (`PIL.Image.Image` or `List[PIL.Image.Image]` or `torch.Tensor`): + `Image` or tensor representing an image batch to be used as the starting point. If you provide a + tensor, it needs to be compatible with the [`CLIPImageProcessor`] + [configuration](https://huggingface.co/fusing/karlo-image-variations-diffusers/blob/main/feature_extractor/preprocessor_config.json). + Can be left as `None` only when `image_embeddings` are passed. + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + decoder_num_inference_steps (`int`, *optional*, defaults to 25): + The number of denoising steps for the decoder. More denoising steps usually lead to a higher quality + image at the expense of slower inference. + super_res_num_inference_steps (`int`, *optional*, defaults to 7): + The number of denoising steps for super resolution. More denoising steps usually lead to a higher + quality image at the expense of slower inference. + generator (`torch.Generator`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + decoder_latents (`torch.Tensor` of shape (batch size, channels, height, width), *optional*): + Pre-generated noisy latents to be used as inputs for the decoder. + super_res_latents (`torch.Tensor` of shape (batch size, channels, super res height, super res width), *optional*): + Pre-generated noisy latents to be used as inputs for the decoder. + decoder_guidance_scale (`float`, *optional*, defaults to 4.0): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + image_embeddings (`torch.Tensor`, *optional*): + Pre-defined image embeddings that can be derived from the image encoder. Pre-defined image embeddings + can be passed for tasks like image interpolations. `image` can be left as `None`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is + returned where the first element is a list with the generated images. + """ + if image is not None: + if isinstance(image, PIL.Image.Image): + batch_size = 1 + elif isinstance(image, list): + batch_size = len(image) + else: + batch_size = image.shape[0] + else: + batch_size = image_embeddings.shape[0] + + prompt = [""] * batch_size + + device = self._execution_device + + batch_size = batch_size * num_images_per_prompt + + do_classifier_free_guidance = decoder_guidance_scale > 1.0 + + prompt_embeds, text_encoder_hidden_states, text_mask = self._encode_prompt( + prompt, device, num_images_per_prompt, do_classifier_free_guidance + ) + + image_embeddings = self._encode_image(image, device, num_images_per_prompt, image_embeddings) + + # decoder + text_encoder_hidden_states, additive_clip_time_embeddings = self.text_proj( + image_embeddings=image_embeddings, + prompt_embeds=prompt_embeds, + text_encoder_hidden_states=text_encoder_hidden_states, + do_classifier_free_guidance=do_classifier_free_guidance, + ) + + if device.type == "mps": + # HACK: MPS: There is a panic when padding bool tensors, + # so cast to int tensor for the pad and back to bool afterwards + text_mask = text_mask.type(torch.int) + decoder_text_mask = F.pad(text_mask, (self.text_proj.clip_extra_context_tokens, 0), value=1) + decoder_text_mask = decoder_text_mask.type(torch.bool) + else: + decoder_text_mask = F.pad(text_mask, (self.text_proj.clip_extra_context_tokens, 0), value=True) + + self.decoder_scheduler.set_timesteps(decoder_num_inference_steps, device=device) + decoder_timesteps_tensor = self.decoder_scheduler.timesteps + + num_channels_latents = self.decoder.config.in_channels + height = self.decoder.config.sample_size + width = self.decoder.config.sample_size + + if decoder_latents is None: + decoder_latents = self.prepare_latents( + (batch_size, num_channels_latents, height, width), + text_encoder_hidden_states.dtype, + device, + generator, + decoder_latents, + self.decoder_scheduler, + ) + + for i, t in enumerate(self.progress_bar(decoder_timesteps_tensor)): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([decoder_latents] * 2) if do_classifier_free_guidance else decoder_latents + + noise_pred = self.decoder( + sample=latent_model_input, + timestep=t, + encoder_hidden_states=text_encoder_hidden_states, + class_labels=additive_clip_time_embeddings, + attention_mask=decoder_text_mask, + ).sample + + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred_uncond, _ = noise_pred_uncond.split(latent_model_input.shape[1], dim=1) + noise_pred_text, predicted_variance = noise_pred_text.split(latent_model_input.shape[1], dim=1) + noise_pred = noise_pred_uncond + decoder_guidance_scale * (noise_pred_text - noise_pred_uncond) + noise_pred = torch.cat([noise_pred, predicted_variance], dim=1) + + if i + 1 == decoder_timesteps_tensor.shape[0]: + prev_timestep = None + else: + prev_timestep = decoder_timesteps_tensor[i + 1] + + # compute the previous noisy sample x_t -> x_t-1 + decoder_latents = self.decoder_scheduler.step( + noise_pred, t, decoder_latents, prev_timestep=prev_timestep, generator=generator + ).prev_sample + + decoder_latents = decoder_latents.clamp(-1, 1) + + image_small = decoder_latents + + # done decoder + + # super res + + self.super_res_scheduler.set_timesteps(super_res_num_inference_steps, device=device) + super_res_timesteps_tensor = self.super_res_scheduler.timesteps + + channels = self.super_res_first.config.in_channels // 2 + height = self.super_res_first.config.sample_size + width = self.super_res_first.config.sample_size + + if super_res_latents is None: + super_res_latents = self.prepare_latents( + (batch_size, channels, height, width), + image_small.dtype, + device, + generator, + super_res_latents, + self.super_res_scheduler, + ) + + if device.type == "mps": + # MPS does not support many interpolations + image_upscaled = F.interpolate(image_small, size=[height, width]) + else: + interpolate_antialias = {} + if "antialias" in inspect.signature(F.interpolate).parameters: + interpolate_antialias["antialias"] = True + + image_upscaled = F.interpolate( + image_small, size=[height, width], mode="bicubic", align_corners=False, **interpolate_antialias + ) + + for i, t in enumerate(self.progress_bar(super_res_timesteps_tensor)): + # no classifier free guidance + + if i == super_res_timesteps_tensor.shape[0] - 1: + unet = self.super_res_last + else: + unet = self.super_res_first + + latent_model_input = torch.cat([super_res_latents, image_upscaled], dim=1) + + noise_pred = unet( + sample=latent_model_input, + timestep=t, + ).sample + + if i + 1 == super_res_timesteps_tensor.shape[0]: + prev_timestep = None + else: + prev_timestep = super_res_timesteps_tensor[i + 1] + + # compute the previous noisy sample x_t -> x_t-1 + super_res_latents = self.super_res_scheduler.step( + noise_pred, t, super_res_latents, prev_timestep=prev_timestep, generator=generator + ).prev_sample + + image = super_res_latents + + # done super res + self.maybe_free_model_hooks() + + # post processing + + image = image * 0.5 + 0.5 + image = image.clamp(0, 1) + image = image.cpu().permute(0, 2, 3, 1).float().numpy() + + if output_type == "pil": + image = self.numpy_to_pil(image) + + if not return_dict: + return (image,) + + return ImagePipelineOutput(images=image) diff --git a/diffusers3/pipelines/unclip/text_proj.py b/diffusers3/pipelines/unclip/text_proj.py new file mode 100644 index 0000000000000000000000000000000000000000..5a86d0c08a8da90ff27505c8868b030cfa10068f --- /dev/null +++ b/diffusers3/pipelines/unclip/text_proj.py @@ -0,0 +1,86 @@ +# Copyright 2024 Kakao Brain and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch +from torch import nn + +from ...configuration_utils import ConfigMixin, register_to_config +from ...models import ModelMixin + + +class UnCLIPTextProjModel(ModelMixin, ConfigMixin): + """ + Utility class for CLIP embeddings. Used to combine the image and text embeddings into a format usable by the + decoder. + + For more details, see the original paper: https://arxiv.org/abs/2204.06125 section 2.1 + """ + + @register_to_config + def __init__( + self, + *, + clip_extra_context_tokens: int = 4, + clip_embeddings_dim: int = 768, + time_embed_dim: int, + cross_attention_dim, + ): + super().__init__() + + self.learned_classifier_free_guidance_embeddings = nn.Parameter(torch.zeros(clip_embeddings_dim)) + + # parameters for additional clip time embeddings + self.embedding_proj = nn.Linear(clip_embeddings_dim, time_embed_dim) + self.clip_image_embeddings_project_to_time_embeddings = nn.Linear(clip_embeddings_dim, time_embed_dim) + + # parameters for encoder hidden states + self.clip_extra_context_tokens = clip_extra_context_tokens + self.clip_extra_context_tokens_proj = nn.Linear( + clip_embeddings_dim, self.clip_extra_context_tokens * cross_attention_dim + ) + self.encoder_hidden_states_proj = nn.Linear(clip_embeddings_dim, cross_attention_dim) + self.text_encoder_hidden_states_norm = nn.LayerNorm(cross_attention_dim) + + def forward(self, *, image_embeddings, prompt_embeds, text_encoder_hidden_states, do_classifier_free_guidance): + if do_classifier_free_guidance: + # Add the classifier free guidance embeddings to the image embeddings + image_embeddings_batch_size = image_embeddings.shape[0] + classifier_free_guidance_embeddings = self.learned_classifier_free_guidance_embeddings.unsqueeze(0) + classifier_free_guidance_embeddings = classifier_free_guidance_embeddings.expand( + image_embeddings_batch_size, -1 + ) + image_embeddings = torch.cat([classifier_free_guidance_embeddings, image_embeddings], dim=0) + + # The image embeddings batch size and the text embeddings batch size are equal + assert image_embeddings.shape[0] == prompt_embeds.shape[0] + + batch_size = prompt_embeds.shape[0] + + # "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and + # adding CLIP embeddings to the existing timestep embedding, ... + time_projected_prompt_embeds = self.embedding_proj(prompt_embeds) + time_projected_image_embeddings = self.clip_image_embeddings_project_to_time_embeddings(image_embeddings) + additive_clip_time_embeddings = time_projected_image_embeddings + time_projected_prompt_embeds + + # ... and by projecting CLIP embeddings into four + # extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder" + clip_extra_context_tokens = self.clip_extra_context_tokens_proj(image_embeddings) + clip_extra_context_tokens = clip_extra_context_tokens.reshape(batch_size, -1, self.clip_extra_context_tokens) + clip_extra_context_tokens = clip_extra_context_tokens.permute(0, 2, 1) + + text_encoder_hidden_states = self.encoder_hidden_states_proj(text_encoder_hidden_states) + text_encoder_hidden_states = self.text_encoder_hidden_states_norm(text_encoder_hidden_states) + text_encoder_hidden_states = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states], dim=1) + + return text_encoder_hidden_states, additive_clip_time_embeddings diff --git a/diffusers3/pipelines/unidiffuser/__init__.py b/diffusers3/pipelines/unidiffuser/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1ac2b09a6e570087c80bc11bf1a8102dd4970b8f --- /dev/null +++ b/diffusers3/pipelines/unidiffuser/__init__.py @@ -0,0 +1,58 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import ( + ImageTextPipelineOutput, + UniDiffuserPipeline, + ) + + _dummy_objects.update( + {"ImageTextPipelineOutput": ImageTextPipelineOutput, "UniDiffuserPipeline": UniDiffuserPipeline} + ) +else: + _import_structure["modeling_text_decoder"] = ["UniDiffuserTextDecoder"] + _import_structure["modeling_uvit"] = ["UniDiffuserModel", "UTransformer2DModel"] + _import_structure["pipeline_unidiffuser"] = ["ImageTextPipelineOutput", "UniDiffuserPipeline"] + + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import ( + ImageTextPipelineOutput, + UniDiffuserPipeline, + ) + else: + from .modeling_text_decoder import UniDiffuserTextDecoder + from .modeling_uvit import UniDiffuserModel, UTransformer2DModel + from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/diffusers3/pipelines/unidiffuser/modeling_text_decoder.py b/diffusers3/pipelines/unidiffuser/modeling_text_decoder.py new file mode 100644 index 0000000000000000000000000000000000000000..75e5d43678d5085b4b7bdc1c4e7c0aad1309ba42 --- /dev/null +++ b/diffusers3/pipelines/unidiffuser/modeling_text_decoder.py @@ -0,0 +1,296 @@ +from typing import Optional + +import numpy as np +import torch +from torch import nn +from transformers import GPT2Config, GPT2LMHeadModel +from transformers.modeling_utils import ModuleUtilsMixin + +from ...configuration_utils import ConfigMixin, register_to_config +from ...models import ModelMixin + + +# Modified from ClipCaptionModel in https://github.com/thu-ml/unidiffuser/blob/main/libs/caption_decoder.py +class UniDiffuserTextDecoder(ModelMixin, ConfigMixin, ModuleUtilsMixin): + """ + Text decoder model for a image-text [UniDiffuser](https://arxiv.org/pdf/2303.06555.pdf) model. This is used to + generate text from the UniDiffuser image-text embedding. + + Parameters: + prefix_length (`int`): + Max number of prefix tokens that will be supplied to the model. + prefix_inner_dim (`int`): + The hidden size of the incoming prefix embeddings. For UniDiffuser, this would be the hidden dim of the + CLIP text encoder. + prefix_hidden_dim (`int`, *optional*): + Hidden dim of the MLP if we encode the prefix. + vocab_size (`int`, *optional*, defaults to 50257): + Vocabulary size of the GPT-2 model. Defines the number of different tokens that can be represented by the + `inputs_ids` passed when calling [`GPT2Model`] or [`TFGPT2Model`]. + n_positions (`int`, *optional*, defaults to 1024): + The maximum sequence length that this model might ever be used with. Typically set this to something large + just in case (e.g., 512 or 1024 or 2048). + n_embd (`int`, *optional*, defaults to 768): + Dimensionality of the embeddings and hidden states. + n_layer (`int`, *optional*, defaults to 12): + Number of hidden layers in the Transformer encoder. + n_head (`int`, *optional*, defaults to 12): + Number of attention heads for each attention layer in the Transformer encoder. + n_inner (`int`, *optional*, defaults to None): + Dimensionality of the inner feed-forward layers. `None` will set it to 4 times n_embd + activation_function (`str`, *optional*, defaults to `"gelu"`): + Activation function, to be selected in the list `["relu", "silu", "gelu", "tanh", "gelu_new"]`. + resid_pdrop (`float`, *optional*, defaults to 0.1): + The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. + embd_pdrop (`float`, *optional*, defaults to 0.1): + The dropout ratio for the embeddings. + attn_pdrop (`float`, *optional*, defaults to 0.1): + The dropout ratio for the attention. + layer_norm_epsilon (`float`, *optional*, defaults to 1e-5): + The epsilon to use in the layer normalization layers. + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + scale_attn_weights (`bool`, *optional*, defaults to `True`): + Scale attention weights by dividing by sqrt(hidden_size).. + use_cache (`bool`, *optional*, defaults to `True`): + Whether or not the model should return the last key/values attentions (not used by all models). + scale_attn_by_inverse_layer_idx (`bool`, *optional*, defaults to `False`): + Whether to additionally scale attention weights by `1 / layer_idx + 1`. + reorder_and_upcast_attn (`bool`, *optional*, defaults to `False`): + Whether to scale keys (K) prior to computing attention (dot-product) and upcast attention + dot-product/softmax to float() when training with mixed precision. + """ + + _keys_to_ignore_on_load_unexpected = [r"h\.\d+\.attn\.bias", r"h\.\d+\.attn\.masked_bias"] + + @register_to_config + def __init__( + self, + prefix_length: int, + prefix_inner_dim: int, + prefix_hidden_dim: Optional[int] = None, + vocab_size: int = 50257, # Start of GPT2 config args + n_positions: int = 1024, + n_embd: int = 768, + n_layer: int = 12, + n_head: int = 12, + n_inner: Optional[int] = None, + activation_function: str = "gelu_new", + resid_pdrop: float = 0.1, + embd_pdrop: float = 0.1, + attn_pdrop: float = 0.1, + layer_norm_epsilon: float = 1e-5, + initializer_range: float = 0.02, + scale_attn_weights: bool = True, + use_cache: bool = True, + scale_attn_by_inverse_layer_idx: bool = False, + reorder_and_upcast_attn: bool = False, + ): + super().__init__() + + self.prefix_length = prefix_length + + if prefix_inner_dim != n_embd and prefix_hidden_dim is None: + raise ValueError( + f"`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and" + f" `n_embd`: {n_embd} are not equal." + ) + + self.prefix_inner_dim = prefix_inner_dim + self.prefix_hidden_dim = prefix_hidden_dim + + self.encode_prefix = ( + nn.Linear(self.prefix_inner_dim, self.prefix_hidden_dim) + if self.prefix_hidden_dim is not None + else nn.Identity() + ) + self.decode_prefix = ( + nn.Linear(self.prefix_hidden_dim, n_embd) if self.prefix_hidden_dim is not None else nn.Identity() + ) + + gpt_config = GPT2Config( + vocab_size=vocab_size, + n_positions=n_positions, + n_embd=n_embd, + n_layer=n_layer, + n_head=n_head, + n_inner=n_inner, + activation_function=activation_function, + resid_pdrop=resid_pdrop, + embd_pdrop=embd_pdrop, + attn_pdrop=attn_pdrop, + layer_norm_epsilon=layer_norm_epsilon, + initializer_range=initializer_range, + scale_attn_weights=scale_attn_weights, + use_cache=use_cache, + scale_attn_by_inverse_layer_idx=scale_attn_by_inverse_layer_idx, + reorder_and_upcast_attn=reorder_and_upcast_attn, + ) + self.transformer = GPT2LMHeadModel(gpt_config) + + def forward( + self, + input_ids: torch.Tensor, + prefix_embeds: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + labels: Optional[torch.Tensor] = None, + ): + """ + Args: + input_ids (`torch.Tensor` of shape `(N, max_seq_len)`): + Text tokens to use for inference. + prefix_embeds (`torch.Tensor` of shape `(N, prefix_length, 768)`): + Prefix embedding to preprend to the embedded tokens. + attention_mask (`torch.Tensor` of shape `(N, prefix_length + max_seq_len, 768)`, *optional*): + Attention mask for the prefix embedding. + labels (`torch.Tensor`, *optional*): + Labels to use for language modeling. + """ + embedding_text = self.transformer.transformer.wte(input_ids) + hidden = self.encode_prefix(prefix_embeds) + prefix_embeds = self.decode_prefix(hidden) + embedding_cat = torch.cat((prefix_embeds, embedding_text), dim=1) + + if labels is not None: + dummy_token = self.get_dummy_token(input_ids.shape[0], input_ids.device) + labels = torch.cat((dummy_token, input_ids), dim=1) + out = self.transformer(inputs_embeds=embedding_cat, labels=labels, attention_mask=attention_mask) + if self.prefix_hidden_dim is not None: + return out, hidden + else: + return out + + def get_dummy_token(self, batch_size: int, device: torch.device) -> torch.Tensor: + return torch.zeros(batch_size, self.prefix_length, dtype=torch.int64, device=device) + + def encode(self, prefix): + return self.encode_prefix(prefix) + + @torch.no_grad() + def generate_captions(self, features, eos_token_id, device): + """ + Generate captions given text embedding features. Returns list[L]. + + Args: + features (`torch.Tensor` of shape `(B, L, D)`): + Text embedding features to generate captions from. + eos_token_id (`int`): + The token ID of the EOS token for the text decoder model. + device: + Device to perform text generation on. + + Returns: + `List[str]`: A list of strings generated from the decoder model. + """ + + features = torch.split(features, 1, dim=0) + generated_tokens = [] + generated_seq_lengths = [] + for feature in features: + feature = self.decode_prefix(feature.to(device)) # back to the clip feature + # Only support beam search for now + output_tokens, seq_lengths = self.generate_beam( + input_embeds=feature, device=device, eos_token_id=eos_token_id + ) + generated_tokens.append(output_tokens[0]) + generated_seq_lengths.append(seq_lengths[0]) + generated_tokens = torch.stack(generated_tokens) + generated_seq_lengths = torch.stack(generated_seq_lengths) + return generated_tokens, generated_seq_lengths + + @torch.no_grad() + def generate_beam( + self, + input_ids=None, + input_embeds=None, + device=None, + beam_size: int = 5, + entry_length: int = 67, + temperature: float = 1.0, + eos_token_id: Optional[int] = None, + ): + """ + Generates text using the given tokenizer and text prompt or token embedding via beam search. This + implementation is based on the beam search implementation from the [original UniDiffuser + code](https://github.com/thu-ml/unidiffuser/blob/main/libs/caption_decoder.py#L89). + + Args: + eos_token_id (`int`, *optional*): + The token ID of the EOS token for the text decoder model. + input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`, *optional*): + Tokenizer indices of input sequence tokens in the vocabulary. One of `input_ids` and `input_embeds` + must be supplied. + input_embeds (`torch.Tensor` of shape `(batch_size, seq_len, hidden_size)`, *optional*): + An embedded representation to directly pass to the transformer as a prefix for beam search. One of + `input_ids` and `input_embeds` must be supplied. + device: + The device to perform beam search on. + beam_size (`int`, *optional*, defaults to `5`): + The number of best states to store during beam search. + entry_length (`int`, *optional*, defaults to `67`): + The number of iterations to run beam search. + temperature (`float`, *optional*, defaults to 1.0): + The temperature to use when performing the softmax over logits from the decoding model. + + Returns: + `Tuple(torch.Tensor, torch.Tensor)`: A tuple of tensors where the first element is a tensor of generated + token sequences sorted by score in descending order, and the second element is the sequence lengths + corresponding to those sequences. + """ + # Generates text until stop_token is reached using beam search with the desired beam size. + stop_token_index = eos_token_id + tokens = None + scores = None + seq_lengths = torch.ones(beam_size, device=device, dtype=torch.int) + is_stopped = torch.zeros(beam_size, device=device, dtype=torch.bool) + + if input_embeds is not None: + generated = input_embeds + else: + generated = self.transformer.transformer.wte(input_ids) + + for i in range(entry_length): + outputs = self.transformer(inputs_embeds=generated) + logits = outputs.logits + logits = logits[:, -1, :] / (temperature if temperature > 0 else 1.0) + logits = logits.softmax(-1).log() + + if scores is None: + scores, next_tokens = logits.topk(beam_size, -1) + generated = generated.expand(beam_size, *generated.shape[1:]) + next_tokens, scores = next_tokens.permute(1, 0), scores.squeeze(0) + if tokens is None: + tokens = next_tokens + else: + tokens = tokens.expand(beam_size, *tokens.shape[1:]) + tokens = torch.cat((tokens, next_tokens), dim=1) + else: + logits[is_stopped] = -float(np.inf) + logits[is_stopped, 0] = 0 + scores_sum = scores[:, None] + logits + seq_lengths[~is_stopped] += 1 + scores_sum_average = scores_sum / seq_lengths[:, None] + scores_sum_average, next_tokens = scores_sum_average.view(-1).topk(beam_size, -1) + next_tokens_source = next_tokens // scores_sum.shape[1] + seq_lengths = seq_lengths[next_tokens_source] + next_tokens = next_tokens % scores_sum.shape[1] + next_tokens = next_tokens.unsqueeze(1) + tokens = tokens[next_tokens_source] + tokens = torch.cat((tokens, next_tokens), dim=1) + generated = generated[next_tokens_source] + scores = scores_sum_average * seq_lengths + is_stopped = is_stopped[next_tokens_source] + + next_token_embed = self.transformer.transformer.wte(next_tokens.squeeze()).view(generated.shape[0], 1, -1) + generated = torch.cat((generated, next_token_embed), dim=1) + is_stopped = is_stopped + next_tokens.eq(stop_token_index).squeeze() + if is_stopped.all(): + break + + scores = scores / seq_lengths + order = scores.argsort(descending=True) + # tokens tensors are already padded to max_seq_length + output_texts = [tokens[i] for i in order] + output_texts = torch.stack(output_texts, dim=0) + seq_lengths = torch.tensor([seq_lengths[i] for i in order], dtype=seq_lengths.dtype) + return output_texts, seq_lengths diff --git a/diffusers3/pipelines/unidiffuser/modeling_uvit.py b/diffusers3/pipelines/unidiffuser/modeling_uvit.py new file mode 100644 index 0000000000000000000000000000000000000000..cb1514b153ce960438bed7be321329eb0df30dc6 --- /dev/null +++ b/diffusers3/pipelines/unidiffuser/modeling_uvit.py @@ -0,0 +1,1197 @@ +import math +from typing import Optional, Union + +import torch +from torch import nn + +from ...configuration_utils import ConfigMixin, register_to_config +from ...models import ModelMixin +from ...models.attention import FeedForward +from ...models.attention_processor import Attention +from ...models.embeddings import TimestepEmbedding, Timesteps, get_2d_sincos_pos_embed +from ...models.modeling_outputs import Transformer2DModelOutput +from ...models.normalization import AdaLayerNorm +from ...utils import logging + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +def _no_grad_trunc_normal_(tensor, mean, std, a, b): + # Cut & paste from PyTorch official master until it's in a few official releases - RW + # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf + def norm_cdf(x): + # Computes standard normal cumulative distribution function + return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0 + + if (mean < a - 2 * std) or (mean > b + 2 * std): + logger.warning( + "mean is more than 2 std from [a, b] in nn.init.trunc_normal_. " + "The distribution of values may be incorrect." + ) + + with torch.no_grad(): + # Values are generated by using a truncated uniform distribution and + # then using the inverse CDF for the normal distribution. + # Get upper and lower cdf values + l = norm_cdf((a - mean) / std) + u = norm_cdf((b - mean) / std) + + # Uniformly fill tensor with values from [l, u], then translate to + # [2l-1, 2u-1]. + tensor.uniform_(2 * l - 1, 2 * u - 1) + + # Use inverse cdf transform for normal distribution to get truncated + # standard normal + tensor.erfinv_() + + # Transform to proper mean, std + tensor.mul_(std * math.sqrt(2.0)) + tensor.add_(mean) + + # Clamp to ensure it's in the proper range + tensor.clamp_(min=a, max=b) + return tensor + + +def trunc_normal_(tensor, mean=0.0, std=1.0, a=-2.0, b=2.0): + # type: (torch.Tensor, float, float, float, float) -> torch.Tensor + r"""Fills the input Tensor with values drawn from a truncated + normal distribution. The values are effectively drawn from the normal distribution :math:`\mathcal{N}(\text{mean}, + \text{std}^2)` with values outside :math:`[a, b]` redrawn until they are within the bounds. The method used for + generating the random values works best when :math:`a \leq \text{mean} \leq b`. + + Args: + tensor: an n-dimensional `torch.Tensor` + mean: the mean of the normal distribution + std: the standard deviation of the normal distribution + a: the minimum cutoff value + b: the maximum cutoff value + Examples: + >>> w = torch.empty(3, 5) >>> nn.init.trunc_normal_(w) + """ + return _no_grad_trunc_normal_(tensor, mean, std, a, b) + + +class PatchEmbed(nn.Module): + """2D Image to Patch Embedding""" + + def __init__( + self, + height=224, + width=224, + patch_size=16, + in_channels=3, + embed_dim=768, + layer_norm=False, + flatten=True, + bias=True, + use_pos_embed=True, + ): + super().__init__() + + num_patches = (height // patch_size) * (width // patch_size) + self.flatten = flatten + self.layer_norm = layer_norm + + self.proj = nn.Conv2d( + in_channels, embed_dim, kernel_size=(patch_size, patch_size), stride=patch_size, bias=bias + ) + if layer_norm: + self.norm = nn.LayerNorm(embed_dim, elementwise_affine=False, eps=1e-6) + else: + self.norm = None + + self.use_pos_embed = use_pos_embed + if self.use_pos_embed: + pos_embed = get_2d_sincos_pos_embed(embed_dim, int(num_patches**0.5)) + self.register_buffer("pos_embed", torch.from_numpy(pos_embed).float().unsqueeze(0), persistent=False) + + def forward(self, latent): + latent = self.proj(latent) + if self.flatten: + latent = latent.flatten(2).transpose(1, 2) # BCHW -> BNC + if self.layer_norm: + latent = self.norm(latent) + if self.use_pos_embed: + return latent + self.pos_embed + else: + return latent + + +class SkipBlock(nn.Module): + def __init__(self, dim: int): + super().__init__() + + self.skip_linear = nn.Linear(2 * dim, dim) + + # Use torch.nn.LayerNorm for now, following the original code + self.norm = nn.LayerNorm(dim) + + def forward(self, x, skip): + x = self.skip_linear(torch.cat([x, skip], dim=-1)) + x = self.norm(x) + + return x + + +# Modified to support both pre-LayerNorm and post-LayerNorm configurations +# Don't support AdaLayerNormZero for now +# Modified from diffusers.models.attention.BasicTransformerBlock +class UTransformerBlock(nn.Module): + r""" + A modification of BasicTransformerBlock which supports pre-LayerNorm and post-LayerNorm configurations. + + Parameters: + dim (`int`): The number of channels in the input and output. + num_attention_heads (`int`): The number of heads to use for multi-head attention. + attention_head_dim (`int`): The number of channels in each head. + dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. + cross_attention_dim (`int`, *optional*): The size of the encoder_hidden_states vector for cross attention. + activation_fn (`str`, *optional*, defaults to `"geglu"`): + Activation function to be used in feed-forward. + num_embeds_ada_norm (:obj: `int`, *optional*): + The number of diffusion steps used during training. See `Transformer2DModel`. + attention_bias (:obj: `bool`, *optional*, defaults to `False`): + Configure if the attentions should contain a bias parameter. + only_cross_attention (`bool`, *optional*): + Whether to use only cross-attention layers. In this case two cross attention layers are used. + double_self_attention (`bool`, *optional*): + Whether to use two self-attention layers. In this case no cross attention layers are used. + upcast_attention (`bool`, *optional*): + Whether to upcast the query and key to float32 when performing the attention calculation. + norm_elementwise_affine (`bool`, *optional*): + Whether to use learnable per-element affine parameters during layer normalization. + norm_type (`str`, defaults to `"layer_norm"`): + The layer norm implementation to use. + pre_layer_norm (`bool`, *optional*): + Whether to perform layer normalization before the attention and feedforward operations ("pre-LayerNorm"), + as opposed to after ("post-LayerNorm"). Note that `BasicTransformerBlock` uses pre-LayerNorm, e.g. + `pre_layer_norm = True`. + final_dropout (`bool`, *optional*): + Whether to use a final Dropout layer after the feedforward network. + """ + + def __init__( + self, + dim: int, + num_attention_heads: int, + attention_head_dim: int, + dropout=0.0, + cross_attention_dim: Optional[int] = None, + activation_fn: str = "geglu", + num_embeds_ada_norm: Optional[int] = None, + attention_bias: bool = False, + only_cross_attention: bool = False, + double_self_attention: bool = False, + upcast_attention: bool = False, + norm_elementwise_affine: bool = True, + norm_type: str = "layer_norm", + pre_layer_norm: bool = True, + final_dropout: bool = False, + ): + super().__init__() + self.only_cross_attention = only_cross_attention + + self.use_ada_layer_norm = (num_embeds_ada_norm is not None) and norm_type == "ada_norm" + + self.pre_layer_norm = pre_layer_norm + + if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None: + raise ValueError( + f"`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to" + f" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}." + ) + + # 1. Self-Attn + self.attn1 = Attention( + query_dim=dim, + heads=num_attention_heads, + dim_head=attention_head_dim, + dropout=dropout, + bias=attention_bias, + cross_attention_dim=cross_attention_dim if only_cross_attention else None, + upcast_attention=upcast_attention, + ) + + # 2. Cross-Attn + if cross_attention_dim is not None or double_self_attention: + self.attn2 = Attention( + query_dim=dim, + cross_attention_dim=cross_attention_dim if not double_self_attention else None, + heads=num_attention_heads, + dim_head=attention_head_dim, + dropout=dropout, + bias=attention_bias, + upcast_attention=upcast_attention, + ) # is self-attn if encoder_hidden_states is none + else: + self.attn2 = None + + if self.use_ada_layer_norm: + self.norm1 = AdaLayerNorm(dim, num_embeds_ada_norm) + else: + self.norm1 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine) + + if cross_attention_dim is not None or double_self_attention: + # We currently only use AdaLayerNormZero for self attention where there will only be one attention block. + # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during + # the second cross attention block. + self.norm2 = ( + AdaLayerNorm(dim, num_embeds_ada_norm) + if self.use_ada_layer_norm + else nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine) + ) + else: + self.norm2 = None + + # 3. Feed-forward + self.norm3 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine) + self.ff = FeedForward(dim, dropout=dropout, activation_fn=activation_fn, final_dropout=final_dropout) + + def forward( + self, + hidden_states, + attention_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + timestep=None, + cross_attention_kwargs=None, + class_labels=None, + ): + # Pre-LayerNorm + if self.pre_layer_norm: + if self.use_ada_layer_norm: + norm_hidden_states = self.norm1(hidden_states, timestep) + else: + norm_hidden_states = self.norm1(hidden_states) + else: + norm_hidden_states = hidden_states + + # 1. Self-Attention + cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {} + attn_output = self.attn1( + norm_hidden_states, + encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None, + attention_mask=attention_mask, + **cross_attention_kwargs, + ) + + # Post-LayerNorm + if not self.pre_layer_norm: + if self.use_ada_layer_norm: + attn_output = self.norm1(attn_output, timestep) + else: + attn_output = self.norm1(attn_output) + + hidden_states = attn_output + hidden_states + + if self.attn2 is not None: + # Pre-LayerNorm + if self.pre_layer_norm: + norm_hidden_states = ( + self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states) + ) + else: + norm_hidden_states = hidden_states + # TODO (Birch-San): Here we should prepare the encoder_attention mask correctly + # prepare attention mask here + + # 2. Cross-Attention + attn_output = self.attn2( + norm_hidden_states, + encoder_hidden_states=encoder_hidden_states, + attention_mask=encoder_attention_mask, + **cross_attention_kwargs, + ) + + # Post-LayerNorm + if not self.pre_layer_norm: + attn_output = self.norm2(attn_output, timestep) if self.use_ada_layer_norm else self.norm2(attn_output) + + hidden_states = attn_output + hidden_states + + # 3. Feed-forward + # Pre-LayerNorm + if self.pre_layer_norm: + norm_hidden_states = self.norm3(hidden_states) + else: + norm_hidden_states = hidden_states + + ff_output = self.ff(norm_hidden_states) + + # Post-LayerNorm + if not self.pre_layer_norm: + ff_output = self.norm3(ff_output) + + hidden_states = ff_output + hidden_states + + return hidden_states + + +# Like UTransformerBlock except with LayerNorms on the residual backbone of the block +# Modified from diffusers.models.attention.BasicTransformerBlock +class UniDiffuserBlock(nn.Module): + r""" + A modification of BasicTransformerBlock which supports pre-LayerNorm and post-LayerNorm configurations and puts the + LayerNorms on the residual backbone of the block. This matches the transformer block in the [original UniDiffuser + implementation](https://github.com/thu-ml/unidiffuser/blob/main/libs/uvit_multi_post_ln_v1.py#L104). + + Parameters: + dim (`int`): The number of channels in the input and output. + num_attention_heads (`int`): The number of heads to use for multi-head attention. + attention_head_dim (`int`): The number of channels in each head. + dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. + cross_attention_dim (`int`, *optional*): The size of the encoder_hidden_states vector for cross attention. + activation_fn (`str`, *optional*, defaults to `"geglu"`): + Activation function to be used in feed-forward. + num_embeds_ada_norm (:obj: `int`, *optional*): + The number of diffusion steps used during training. See `Transformer2DModel`. + attention_bias (:obj: `bool`, *optional*, defaults to `False`): + Configure if the attentions should contain a bias parameter. + only_cross_attention (`bool`, *optional*): + Whether to use only cross-attention layers. In this case two cross attention layers are used. + double_self_attention (`bool`, *optional*): + Whether to use two self-attention layers. In this case no cross attention layers are used. + upcast_attention (`bool`, *optional*): + Whether to upcast the query and key to float() when performing the attention calculation. + norm_elementwise_affine (`bool`, *optional*): + Whether to use learnable per-element affine parameters during layer normalization. + norm_type (`str`, defaults to `"layer_norm"`): + The layer norm implementation to use. + pre_layer_norm (`bool`, *optional*): + Whether to perform layer normalization before the attention and feedforward operations ("pre-LayerNorm"), + as opposed to after ("post-LayerNorm"). The original UniDiffuser implementation is post-LayerNorm + (`pre_layer_norm = False`). + final_dropout (`bool`, *optional*): + Whether to use a final Dropout layer after the feedforward network. + """ + + def __init__( + self, + dim: int, + num_attention_heads: int, + attention_head_dim: int, + dropout=0.0, + cross_attention_dim: Optional[int] = None, + activation_fn: str = "geglu", + num_embeds_ada_norm: Optional[int] = None, + attention_bias: bool = False, + only_cross_attention: bool = False, + double_self_attention: bool = False, + upcast_attention: bool = False, + norm_elementwise_affine: bool = True, + norm_type: str = "layer_norm", + pre_layer_norm: bool = False, + final_dropout: bool = True, + ): + super().__init__() + self.only_cross_attention = only_cross_attention + + self.use_ada_layer_norm = (num_embeds_ada_norm is not None) and norm_type == "ada_norm" + + self.pre_layer_norm = pre_layer_norm + + if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None: + raise ValueError( + f"`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to" + f" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}." + ) + + # 1. Self-Attn + self.attn1 = Attention( + query_dim=dim, + heads=num_attention_heads, + dim_head=attention_head_dim, + dropout=dropout, + bias=attention_bias, + cross_attention_dim=cross_attention_dim if only_cross_attention else None, + upcast_attention=upcast_attention, + ) + + # 2. Cross-Attn + if cross_attention_dim is not None or double_self_attention: + self.attn2 = Attention( + query_dim=dim, + cross_attention_dim=cross_attention_dim if not double_self_attention else None, + heads=num_attention_heads, + dim_head=attention_head_dim, + dropout=dropout, + bias=attention_bias, + upcast_attention=upcast_attention, + ) # is self-attn if encoder_hidden_states is none + else: + self.attn2 = None + + if self.use_ada_layer_norm: + self.norm1 = AdaLayerNorm(dim, num_embeds_ada_norm) + else: + self.norm1 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine) + + if cross_attention_dim is not None or double_self_attention: + # We currently only use AdaLayerNormZero for self attention where there will only be one attention block. + # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during + # the second cross attention block. + self.norm2 = ( + AdaLayerNorm(dim, num_embeds_ada_norm) + if self.use_ada_layer_norm + else nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine) + ) + else: + self.norm2 = None + + # 3. Feed-forward + self.norm3 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine) + self.ff = FeedForward(dim, dropout=dropout, activation_fn=activation_fn, final_dropout=final_dropout) + + def forward( + self, + hidden_states, + attention_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + timestep=None, + cross_attention_kwargs=None, + class_labels=None, + ): + # Following the diffusers transformer block implementation, put the LayerNorm on the + # residual backbone + # Pre-LayerNorm + if self.pre_layer_norm: + if self.use_ada_layer_norm: + hidden_states = self.norm1(hidden_states, timestep) + else: + hidden_states = self.norm1(hidden_states) + + # 1. Self-Attention + cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {} + attn_output = self.attn1( + hidden_states, + encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None, + attention_mask=attention_mask, + **cross_attention_kwargs, + ) + + hidden_states = attn_output + hidden_states + + # Following the diffusers transformer block implementation, put the LayerNorm on the + # residual backbone + # Post-LayerNorm + if not self.pre_layer_norm: + if self.use_ada_layer_norm: + hidden_states = self.norm1(hidden_states, timestep) + else: + hidden_states = self.norm1(hidden_states) + + if self.attn2 is not None: + # Pre-LayerNorm + if self.pre_layer_norm: + hidden_states = ( + self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states) + ) + # TODO (Birch-San): Here we should prepare the encoder_attention mask correctly + # prepare attention mask here + + # 2. Cross-Attention + attn_output = self.attn2( + hidden_states, + encoder_hidden_states=encoder_hidden_states, + attention_mask=encoder_attention_mask, + **cross_attention_kwargs, + ) + + hidden_states = attn_output + hidden_states + + # Post-LayerNorm + if not self.pre_layer_norm: + hidden_states = ( + self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states) + ) + + # 3. Feed-forward + # Pre-LayerNorm + if self.pre_layer_norm: + hidden_states = self.norm3(hidden_states) + + ff_output = self.ff(hidden_states) + + hidden_states = ff_output + hidden_states + + # Post-LayerNorm + if not self.pre_layer_norm: + hidden_states = self.norm3(hidden_states) + + return hidden_states + + +# Modified from diffusers.models.transformer_2d.Transformer2DModel +# Modify the transformer block structure to be U-Net like following U-ViT +# Only supports patch-style input and torch.nn.LayerNorm currently +# https://github.com/baofff/U-ViT +class UTransformer2DModel(ModelMixin, ConfigMixin): + """ + Transformer model based on the [U-ViT](https://github.com/baofff/U-ViT) architecture for image-like data. Compared + to [`Transformer2DModel`], this model has skip connections between transformer blocks in a "U"-shaped fashion, + similar to a U-Net. Supports only continuous (actual embeddings) inputs, which are embedded via a [`PatchEmbed`] + layer and then reshaped to (b, t, d). + + Parameters: + num_attention_heads (`int`, *optional*, defaults to 16): The number of heads to use for multi-head attention. + attention_head_dim (`int`, *optional*, defaults to 88): The number of channels in each head. + in_channels (`int`, *optional*): + Pass if the input is continuous. The number of channels in the input. + out_channels (`int`, *optional*): + The number of output channels; if `None`, defaults to `in_channels`. + num_layers (`int`, *optional*, defaults to 1): The number of layers of Transformer blocks to use. + dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. + norm_num_groups (`int`, *optional*, defaults to `32`): + The number of groups to use when performing Group Normalization. + cross_attention_dim (`int`, *optional*): The number of encoder_hidden_states dimensions to use. + attention_bias (`bool`, *optional*): + Configure if the TransformerBlocks' attention should contain a bias parameter. + sample_size (`int`, *optional*): Pass if the input is discrete. The width of the latent images. + Note that this is fixed at training time as it is used for learning a number of position embeddings. See + `ImagePositionalEmbeddings`. + num_vector_embeds (`int`, *optional*): + Pass if the input is discrete. The number of classes of the vector embeddings of the latent pixels. + Includes the class for the masked latent pixel. + patch_size (`int`, *optional*, defaults to 2): + The patch size to use in the patch embedding. + activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward. + num_embeds_ada_norm ( `int`, *optional*): Pass if at least one of the norm_layers is `AdaLayerNorm`. + The number of diffusion steps used during training. Note that this is fixed at training time as it is used + to learn a number of embeddings that are added to the hidden states. During inference, you can denoise for + up to but not more than steps than `num_embeds_ada_norm`. + use_linear_projection (int, *optional*): TODO: Not used + only_cross_attention (`bool`, *optional*): + Whether to use only cross-attention layers. In this case two cross attention layers are used in each + transformer block. + upcast_attention (`bool`, *optional*): + Whether to upcast the query and key to float() when performing the attention calculation. + norm_type (`str`, *optional*, defaults to `"layer_norm"`): + The Layer Normalization implementation to use. Defaults to `torch.nn.LayerNorm`. + block_type (`str`, *optional*, defaults to `"unidiffuser"`): + The transformer block implementation to use. If `"unidiffuser"`, has the LayerNorms on the residual + backbone of each transformer block; otherwise has them in the attention/feedforward branches (the standard + behavior in `diffusers`.) + pre_layer_norm (`bool`, *optional*): + Whether to perform layer normalization before the attention and feedforward operations ("pre-LayerNorm"), + as opposed to after ("post-LayerNorm"). The original UniDiffuser implementation is post-LayerNorm + (`pre_layer_norm = False`). + norm_elementwise_affine (`bool`, *optional*): + Whether to use learnable per-element affine parameters during layer normalization. + use_patch_pos_embed (`bool`, *optional*): + Whether to use position embeddings inside the patch embedding layer (`PatchEmbed`). + final_dropout (`bool`, *optional*): + Whether to use a final Dropout layer after the feedforward network. + """ + + @register_to_config + def __init__( + self, + num_attention_heads: int = 16, + attention_head_dim: int = 88, + in_channels: Optional[int] = None, + out_channels: Optional[int] = None, + num_layers: int = 1, + dropout: float = 0.0, + norm_num_groups: int = 32, + cross_attention_dim: Optional[int] = None, + attention_bias: bool = False, + sample_size: Optional[int] = None, + num_vector_embeds: Optional[int] = None, + patch_size: Optional[int] = 2, + activation_fn: str = "geglu", + num_embeds_ada_norm: Optional[int] = None, + use_linear_projection: bool = False, + only_cross_attention: bool = False, + upcast_attention: bool = False, + norm_type: str = "layer_norm", + block_type: str = "unidiffuser", + pre_layer_norm: bool = False, + norm_elementwise_affine: bool = True, + use_patch_pos_embed=False, + ff_final_dropout: bool = False, + ): + super().__init__() + self.use_linear_projection = use_linear_projection + self.num_attention_heads = num_attention_heads + self.attention_head_dim = attention_head_dim + inner_dim = num_attention_heads * attention_head_dim + + # 1. Input + # Only support patch input of shape (batch_size, num_channels, height, width) for now + assert in_channels is not None and patch_size is not None, "Patch input requires in_channels and patch_size." + + assert sample_size is not None, "UTransformer2DModel over patched input must provide sample_size" + + # 2. Define input layers + self.height = sample_size + self.width = sample_size + + self.patch_size = patch_size + self.pos_embed = PatchEmbed( + height=sample_size, + width=sample_size, + patch_size=patch_size, + in_channels=in_channels, + embed_dim=inner_dim, + use_pos_embed=use_patch_pos_embed, + ) + + # 3. Define transformers blocks + # Modify this to have in_blocks ("downsample" blocks, even though we don't actually downsample), a mid_block, + # and out_blocks ("upsample" blocks). Like a U-Net, there are skip connections from in_blocks to out_blocks in + # a "U"-shaped fashion (e.g. first in_block to last out_block, etc.). + # Quick hack to make the transformer block type configurable + if block_type == "unidiffuser": + block_cls = UniDiffuserBlock + else: + block_cls = UTransformerBlock + self.transformer_in_blocks = nn.ModuleList( + [ + block_cls( + inner_dim, + num_attention_heads, + attention_head_dim, + dropout=dropout, + cross_attention_dim=cross_attention_dim, + activation_fn=activation_fn, + num_embeds_ada_norm=num_embeds_ada_norm, + attention_bias=attention_bias, + only_cross_attention=only_cross_attention, + upcast_attention=upcast_attention, + norm_type=norm_type, + pre_layer_norm=pre_layer_norm, + norm_elementwise_affine=norm_elementwise_affine, + final_dropout=ff_final_dropout, + ) + for d in range(num_layers // 2) + ] + ) + + self.transformer_mid_block = block_cls( + inner_dim, + num_attention_heads, + attention_head_dim, + dropout=dropout, + cross_attention_dim=cross_attention_dim, + activation_fn=activation_fn, + num_embeds_ada_norm=num_embeds_ada_norm, + attention_bias=attention_bias, + only_cross_attention=only_cross_attention, + upcast_attention=upcast_attention, + norm_type=norm_type, + pre_layer_norm=pre_layer_norm, + norm_elementwise_affine=norm_elementwise_affine, + final_dropout=ff_final_dropout, + ) + + # For each skip connection, we use a SkipBlock (concatenation + Linear + LayerNorm) to process the inputs + # before each transformer out_block. + self.transformer_out_blocks = nn.ModuleList( + [ + nn.ModuleDict( + { + "skip": SkipBlock( + inner_dim, + ), + "block": block_cls( + inner_dim, + num_attention_heads, + attention_head_dim, + dropout=dropout, + cross_attention_dim=cross_attention_dim, + activation_fn=activation_fn, + num_embeds_ada_norm=num_embeds_ada_norm, + attention_bias=attention_bias, + only_cross_attention=only_cross_attention, + upcast_attention=upcast_attention, + norm_type=norm_type, + pre_layer_norm=pre_layer_norm, + norm_elementwise_affine=norm_elementwise_affine, + final_dropout=ff_final_dropout, + ), + } + ) + for d in range(num_layers // 2) + ] + ) + + # 4. Define output layers + self.out_channels = in_channels if out_channels is None else out_channels + + # Following the UniDiffuser U-ViT implementation, we process the transformer output with + # a LayerNorm layer with per-element affine params + self.norm_out = nn.LayerNorm(inner_dim) + + def forward( + self, + hidden_states, + encoder_hidden_states=None, + timestep=None, + class_labels=None, + cross_attention_kwargs=None, + return_dict: bool = True, + hidden_states_is_embedding: bool = False, + unpatchify: bool = True, + ): + """ + Args: + hidden_states ( When discrete, `torch.LongTensor` of shape `(batch size, num latent pixels)`. + When continuous, `torch.Tensor` of shape `(batch size, channel, height, width)`): Input hidden_states + encoder_hidden_states ( `torch.LongTensor` of shape `(batch size, encoder_hidden_states dim)`, *optional*): + Conditional embeddings for cross attention layer. If not given, cross-attention defaults to + self-attention. + timestep ( `torch.long`, *optional*): + Optional timestep to be applied as an embedding in AdaLayerNorm's. Used to indicate denoising step. + class_labels ( `torch.LongTensor` of shape `(batch size, num classes)`, *optional*): + Optional class labels to be applied as an embedding in AdaLayerZeroNorm. Used to indicate class labels + conditioning. + cross_attention_kwargs (*optional*): + Keyword arguments to supply to the cross attention layers, if used. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`models.unets.unet_2d_condition.UNet2DConditionOutput`] instead of a plain + tuple. + hidden_states_is_embedding (`bool`, *optional*, defaults to `False`): + Whether or not hidden_states is an embedding directly usable by the transformer. In this case we will + ignore input handling (e.g. continuous, vectorized, etc.) and directly feed hidden_states into the + transformer blocks. + unpatchify (`bool`, *optional*, defaults to `True`): + Whether to unpatchify the transformer output. + + Returns: + [`~models.transformer_2d.Transformer2DModelOutput`] or `tuple`: + [`~models.transformer_2d.Transformer2DModelOutput`] if `return_dict` is True, otherwise a `tuple`. When + returning a tuple, the first element is the sample tensor. + """ + # 0. Check inputs + + if not unpatchify and return_dict: + raise ValueError( + f"Cannot both define `unpatchify`: {unpatchify} and `return_dict`: {return_dict} since when" + f" `unpatchify` is {unpatchify} the returned output is of shape (batch_size, seq_len, hidden_dim)" + " rather than (batch_size, num_channels, height, width)." + ) + + # 1. Input + if not hidden_states_is_embedding: + hidden_states = self.pos_embed(hidden_states) + + # 2. Blocks + + # In ("downsample") blocks + skips = [] + for in_block in self.transformer_in_blocks: + hidden_states = in_block( + hidden_states, + encoder_hidden_states=encoder_hidden_states, + timestep=timestep, + cross_attention_kwargs=cross_attention_kwargs, + class_labels=class_labels, + ) + skips.append(hidden_states) + + # Mid block + hidden_states = self.transformer_mid_block(hidden_states) + + # Out ("upsample") blocks + for out_block in self.transformer_out_blocks: + hidden_states = out_block["skip"](hidden_states, skips.pop()) + hidden_states = out_block["block"]( + hidden_states, + encoder_hidden_states=encoder_hidden_states, + timestep=timestep, + cross_attention_kwargs=cross_attention_kwargs, + class_labels=class_labels, + ) + + # 3. Output + # Don't support AdaLayerNorm for now, so no conditioning/scale/shift logic + hidden_states = self.norm_out(hidden_states) + # hidden_states = self.proj_out(hidden_states) + + if unpatchify: + # unpatchify + height = width = int(hidden_states.shape[1] ** 0.5) + hidden_states = hidden_states.reshape( + shape=(-1, height, width, self.patch_size, self.patch_size, self.out_channels) + ) + hidden_states = torch.einsum("nhwpqc->nchpwq", hidden_states) + output = hidden_states.reshape( + shape=(-1, self.out_channels, height * self.patch_size, width * self.patch_size) + ) + else: + output = hidden_states + + if not return_dict: + return (output,) + + return Transformer2DModelOutput(sample=output) + + +class UniDiffuserModel(ModelMixin, ConfigMixin): + """ + Transformer model for a image-text [UniDiffuser](https://arxiv.org/pdf/2303.06555.pdf) model. This is a + modification of [`UTransformer2DModel`] with input and output heads for the VAE-embedded latent image, the + CLIP-embedded image, and the CLIP-embedded prompt (see paper for more details). + + Parameters: + text_dim (`int`): The hidden dimension of the CLIP text model used to embed images. + clip_img_dim (`int`): The hidden dimension of the CLIP vision model used to embed prompts. + num_attention_heads (`int`, *optional*, defaults to 16): The number of heads to use for multi-head attention. + attention_head_dim (`int`, *optional*, defaults to 88): The number of channels in each head. + in_channels (`int`, *optional*): + Pass if the input is continuous. The number of channels in the input. + out_channels (`int`, *optional*): + The number of output channels; if `None`, defaults to `in_channels`. + num_layers (`int`, *optional*, defaults to 1): The number of layers of Transformer blocks to use. + dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. + norm_num_groups (`int`, *optional*, defaults to `32`): + The number of groups to use when performing Group Normalization. + cross_attention_dim (`int`, *optional*): The number of encoder_hidden_states dimensions to use. + attention_bias (`bool`, *optional*): + Configure if the TransformerBlocks' attention should contain a bias parameter. + sample_size (`int`, *optional*): Pass if the input is discrete. The width of the latent images. + Note that this is fixed at training time as it is used for learning a number of position embeddings. See + `ImagePositionalEmbeddings`. + num_vector_embeds (`int`, *optional*): + Pass if the input is discrete. The number of classes of the vector embeddings of the latent pixels. + Includes the class for the masked latent pixel. + patch_size (`int`, *optional*, defaults to 2): + The patch size to use in the patch embedding. + activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward. + num_embeds_ada_norm ( `int`, *optional*): Pass if at least one of the norm_layers is `AdaLayerNorm`. + The number of diffusion steps used during training. Note that this is fixed at training time as it is used + to learn a number of embeddings that are added to the hidden states. During inference, you can denoise for + up to but not more than steps than `num_embeds_ada_norm`. + use_linear_projection (int, *optional*): TODO: Not used + only_cross_attention (`bool`, *optional*): + Whether to use only cross-attention layers. In this case two cross attention layers are used in each + transformer block. + upcast_attention (`bool`, *optional*): + Whether to upcast the query and key to float32 when performing the attention calculation. + norm_type (`str`, *optional*, defaults to `"layer_norm"`): + The Layer Normalization implementation to use. Defaults to `torch.nn.LayerNorm`. + block_type (`str`, *optional*, defaults to `"unidiffuser"`): + The transformer block implementation to use. If `"unidiffuser"`, has the LayerNorms on the residual + backbone of each transformer block; otherwise has them in the attention/feedforward branches (the standard + behavior in `diffusers`.) + pre_layer_norm (`bool`, *optional*): + Whether to perform layer normalization before the attention and feedforward operations ("pre-LayerNorm"), + as opposed to after ("post-LayerNorm"). The original UniDiffuser implementation is post-LayerNorm + (`pre_layer_norm = False`). + norm_elementwise_affine (`bool`, *optional*): + Whether to use learnable per-element affine parameters during layer normalization. + use_patch_pos_embed (`bool`, *optional*): + Whether to use position embeddings inside the patch embedding layer (`PatchEmbed`). + ff_final_dropout (`bool`, *optional*): + Whether to use a final Dropout layer after the feedforward network. + use_data_type_embedding (`bool`, *optional*): + Whether to use a data type embedding. This is only relevant for UniDiffuser-v1 style models; UniDiffuser-v1 + is continue-trained from UniDiffuser-v0 on non-publically-available data and accepts a `data_type` + argument, which can either be `1` to use the weights trained on non-publically-available data or `0` + otherwise. This argument is subsequently embedded by the data type embedding, if used. + """ + + @register_to_config + def __init__( + self, + text_dim: int = 768, + clip_img_dim: int = 512, + num_text_tokens: int = 77, + num_attention_heads: int = 16, + attention_head_dim: int = 88, + in_channels: Optional[int] = None, + out_channels: Optional[int] = None, + num_layers: int = 1, + dropout: float = 0.0, + norm_num_groups: int = 32, + cross_attention_dim: Optional[int] = None, + attention_bias: bool = False, + sample_size: Optional[int] = None, + num_vector_embeds: Optional[int] = None, + patch_size: Optional[int] = None, + activation_fn: str = "geglu", + num_embeds_ada_norm: Optional[int] = None, + use_linear_projection: bool = False, + only_cross_attention: bool = False, + upcast_attention: bool = False, + norm_type: str = "layer_norm", + block_type: str = "unidiffuser", + pre_layer_norm: bool = False, + use_timestep_embedding=False, + norm_elementwise_affine: bool = True, + use_patch_pos_embed=False, + ff_final_dropout: bool = True, + use_data_type_embedding: bool = False, + ): + super().__init__() + + # 0. Handle dimensions + self.inner_dim = num_attention_heads * attention_head_dim + + assert sample_size is not None, "UniDiffuserModel over patched input must provide sample_size" + self.sample_size = sample_size + self.in_channels = in_channels + self.out_channels = in_channels if out_channels is None else out_channels + + self.patch_size = patch_size + # Assume image is square... + self.num_patches = (self.sample_size // patch_size) * (self.sample_size // patch_size) + + # 1. Define input layers + # 1.1 Input layers for text and image input + # For now, only support patch input for VAE latent image input + self.vae_img_in = PatchEmbed( + height=sample_size, + width=sample_size, + patch_size=patch_size, + in_channels=in_channels, + embed_dim=self.inner_dim, + use_pos_embed=use_patch_pos_embed, + ) + self.clip_img_in = nn.Linear(clip_img_dim, self.inner_dim) + self.text_in = nn.Linear(text_dim, self.inner_dim) + + # 1.2. Timestep embeddings for t_img, t_text + self.timestep_img_proj = Timesteps( + self.inner_dim, + flip_sin_to_cos=True, + downscale_freq_shift=0, + ) + self.timestep_img_embed = ( + TimestepEmbedding( + self.inner_dim, + 4 * self.inner_dim, + out_dim=self.inner_dim, + ) + if use_timestep_embedding + else nn.Identity() + ) + + self.timestep_text_proj = Timesteps( + self.inner_dim, + flip_sin_to_cos=True, + downscale_freq_shift=0, + ) + self.timestep_text_embed = ( + TimestepEmbedding( + self.inner_dim, + 4 * self.inner_dim, + out_dim=self.inner_dim, + ) + if use_timestep_embedding + else nn.Identity() + ) + + # 1.3. Positional embedding + self.num_text_tokens = num_text_tokens + self.num_tokens = 1 + 1 + num_text_tokens + 1 + self.num_patches + self.pos_embed = nn.Parameter(torch.zeros(1, self.num_tokens, self.inner_dim)) + self.pos_embed_drop = nn.Dropout(p=dropout) + trunc_normal_(self.pos_embed, std=0.02) + + # 1.4. Handle data type token embeddings for UniDiffuser-V1, if necessary + self.use_data_type_embedding = use_data_type_embedding + if self.use_data_type_embedding: + self.data_type_token_embedding = nn.Embedding(2, self.inner_dim) + self.data_type_pos_embed_token = nn.Parameter(torch.zeros(1, 1, self.inner_dim)) + + # 2. Define transformer blocks + self.transformer = UTransformer2DModel( + num_attention_heads=num_attention_heads, + attention_head_dim=attention_head_dim, + in_channels=in_channels, + out_channels=out_channels, + num_layers=num_layers, + dropout=dropout, + norm_num_groups=norm_num_groups, + cross_attention_dim=cross_attention_dim, + attention_bias=attention_bias, + sample_size=sample_size, + num_vector_embeds=num_vector_embeds, + patch_size=patch_size, + activation_fn=activation_fn, + num_embeds_ada_norm=num_embeds_ada_norm, + use_linear_projection=use_linear_projection, + only_cross_attention=only_cross_attention, + upcast_attention=upcast_attention, + norm_type=norm_type, + block_type=block_type, + pre_layer_norm=pre_layer_norm, + norm_elementwise_affine=norm_elementwise_affine, + use_patch_pos_embed=use_patch_pos_embed, + ff_final_dropout=ff_final_dropout, + ) + + # 3. Define output layers + patch_dim = (patch_size**2) * out_channels + self.vae_img_out = nn.Linear(self.inner_dim, patch_dim) + self.clip_img_out = nn.Linear(self.inner_dim, clip_img_dim) + self.text_out = nn.Linear(self.inner_dim, text_dim) + + @torch.jit.ignore + def no_weight_decay(self): + return {"pos_embed"} + + def forward( + self, + latent_image_embeds: torch.Tensor, + image_embeds: torch.Tensor, + prompt_embeds: torch.Tensor, + timestep_img: Union[torch.Tensor, float, int], + timestep_text: Union[torch.Tensor, float, int], + data_type: Optional[Union[torch.Tensor, float, int]] = 1, + encoder_hidden_states=None, + cross_attention_kwargs=None, + ): + """ + Args: + latent_image_embeds (`torch.Tensor` of shape `(batch size, latent channels, height, width)`): + Latent image representation from the VAE encoder. + image_embeds (`torch.Tensor` of shape `(batch size, 1, clip_img_dim)`): + CLIP-embedded image representation (unsqueezed in the first dimension). + prompt_embeds (`torch.Tensor` of shape `(batch size, seq_len, text_dim)`): + CLIP-embedded text representation. + timestep_img (`torch.long` or `float` or `int`): + Current denoising step for the image. + timestep_text (`torch.long` or `float` or `int`): + Current denoising step for the text. + data_type: (`torch.int` or `float` or `int`, *optional*, defaults to `1`): + Only used in UniDiffuser-v1-style models. Can be either `1`, to use weights trained on nonpublic data, + or `0` otherwise. + encoder_hidden_states ( `torch.LongTensor` of shape `(batch size, encoder_hidden_states dim)`, *optional*): + Conditional embeddings for cross attention layer. If not given, cross-attention defaults to + self-attention. + cross_attention_kwargs (*optional*): + Keyword arguments to supply to the cross attention layers, if used. + + + Returns: + `tuple`: Returns relevant parts of the model's noise prediction: the first element of the tuple is tbe VAE + image embedding, the second element is the CLIP image embedding, and the third element is the CLIP text + embedding. + """ + batch_size = latent_image_embeds.shape[0] + + # 1. Input + # 1.1. Map inputs to shape (B, N, inner_dim) + vae_hidden_states = self.vae_img_in(latent_image_embeds) + clip_hidden_states = self.clip_img_in(image_embeds) + text_hidden_states = self.text_in(prompt_embeds) + + num_text_tokens, num_img_tokens = text_hidden_states.size(1), vae_hidden_states.size(1) + + # 1.2. Encode image timesteps to single token (B, 1, inner_dim) + if not torch.is_tensor(timestep_img): + timestep_img = torch.tensor([timestep_img], dtype=torch.long, device=vae_hidden_states.device) + + # broadcast to batch dimension in a way that's compatible with ONNX/Core ML + timestep_img = timestep_img * torch.ones(batch_size, dtype=timestep_img.dtype, device=timestep_img.device) + + timestep_img_token = self.timestep_img_proj(timestep_img) + # t_img_token does not contain any weights and will always return f32 tensors + # but time_embedding might be fp16, so we need to cast here. + timestep_img_token = timestep_img_token.to(dtype=self.dtype) + timestep_img_token = self.timestep_img_embed(timestep_img_token) + timestep_img_token = timestep_img_token.unsqueeze(dim=1) + + # 1.3. Encode text timesteps to single token (B, 1, inner_dim) + if not torch.is_tensor(timestep_text): + timestep_text = torch.tensor([timestep_text], dtype=torch.long, device=vae_hidden_states.device) + + # broadcast to batch dimension in a way that's compatible with ONNX/Core ML + timestep_text = timestep_text * torch.ones(batch_size, dtype=timestep_text.dtype, device=timestep_text.device) + + timestep_text_token = self.timestep_text_proj(timestep_text) + # t_text_token does not contain any weights and will always return f32 tensors + # but time_embedding might be fp16, so we need to cast here. + timestep_text_token = timestep_text_token.to(dtype=self.dtype) + timestep_text_token = self.timestep_text_embed(timestep_text_token) + timestep_text_token = timestep_text_token.unsqueeze(dim=1) + + # 1.4. Concatenate all of the embeddings together. + if self.use_data_type_embedding: + assert data_type is not None, "data_type must be supplied if the model uses a data type embedding" + if not torch.is_tensor(data_type): + data_type = torch.tensor([data_type], dtype=torch.int, device=vae_hidden_states.device) + + # broadcast to batch dimension in a way that's compatible with ONNX/Core ML + data_type = data_type * torch.ones(batch_size, dtype=data_type.dtype, device=data_type.device) + + data_type_token = self.data_type_token_embedding(data_type).unsqueeze(dim=1) + hidden_states = torch.cat( + [ + timestep_img_token, + timestep_text_token, + data_type_token, + text_hidden_states, + clip_hidden_states, + vae_hidden_states, + ], + dim=1, + ) + else: + hidden_states = torch.cat( + [timestep_img_token, timestep_text_token, text_hidden_states, clip_hidden_states, vae_hidden_states], + dim=1, + ) + + # 1.5. Prepare the positional embeddings and add to hidden states + # Note: I think img_vae should always have the proper shape, so there's no need to interpolate + # the position embeddings. + if self.use_data_type_embedding: + pos_embed = torch.cat( + [self.pos_embed[:, : 1 + 1, :], self.data_type_pos_embed_token, self.pos_embed[:, 1 + 1 :, :]], dim=1 + ) + else: + pos_embed = self.pos_embed + hidden_states = hidden_states + pos_embed + hidden_states = self.pos_embed_drop(hidden_states) + + # 2. Blocks + hidden_states = self.transformer( + hidden_states, + encoder_hidden_states=encoder_hidden_states, + timestep=None, + class_labels=None, + cross_attention_kwargs=cross_attention_kwargs, + return_dict=False, + hidden_states_is_embedding=True, + unpatchify=False, + )[0] + + # 3. Output + # Split out the predicted noise representation. + if self.use_data_type_embedding: + ( + t_img_token_out, + t_text_token_out, + data_type_token_out, + text_out, + img_clip_out, + img_vae_out, + ) = hidden_states.split((1, 1, 1, num_text_tokens, 1, num_img_tokens), dim=1) + else: + t_img_token_out, t_text_token_out, text_out, img_clip_out, img_vae_out = hidden_states.split( + (1, 1, num_text_tokens, 1, num_img_tokens), dim=1 + ) + + img_vae_out = self.vae_img_out(img_vae_out) + + # unpatchify + height = width = int(img_vae_out.shape[1] ** 0.5) + img_vae_out = img_vae_out.reshape( + shape=(-1, height, width, self.patch_size, self.patch_size, self.out_channels) + ) + img_vae_out = torch.einsum("nhwpqc->nchpwq", img_vae_out) + img_vae_out = img_vae_out.reshape( + shape=(-1, self.out_channels, height * self.patch_size, width * self.patch_size) + ) + + img_clip_out = self.clip_img_out(img_clip_out) + + text_out = self.text_out(text_out) + + return img_vae_out, img_clip_out, text_out diff --git a/diffusers3/pipelines/unidiffuser/pipeline_unidiffuser.py b/diffusers3/pipelines/unidiffuser/pipeline_unidiffuser.py new file mode 100644 index 0000000000000000000000000000000000000000..4f65caf4e61061ff2ce1e71f5db947f6acc4d65a --- /dev/null +++ b/diffusers3/pipelines/unidiffuser/pipeline_unidiffuser.py @@ -0,0 +1,1420 @@ +import inspect +from dataclasses import dataclass +from typing import Callable, List, Optional, Union + +import numpy as np +import PIL.Image +import torch +from transformers import ( + CLIPImageProcessor, + CLIPTextModel, + CLIPTokenizer, + CLIPVisionModelWithProjection, + GPT2Tokenizer, +) + +from ...image_processor import VaeImageProcessor +from ...loaders import StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin +from ...models import AutoencoderKL +from ...models.lora import adjust_lora_scale_text_encoder +from ...schedulers import KarrasDiffusionSchedulers +from ...utils import USE_PEFT_BACKEND, deprecate, logging, scale_lora_layers, unscale_lora_layers +from ...utils.outputs import BaseOutput +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline +from .modeling_text_decoder import UniDiffuserTextDecoder +from .modeling_uvit import UniDiffuserModel + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +# New BaseOutput child class for joint image-text output +@dataclass +class ImageTextPipelineOutput(BaseOutput): + """ + Output class for joint image-text pipelines. + + Args: + images (`List[PIL.Image.Image]` or `np.ndarray`) + List of denoised PIL images of length `batch_size` or NumPy array of shape `(batch_size, height, width, + num_channels)`. + text (`List[str]` or `List[List[str]]`) + List of generated text strings of length `batch_size` or a list of list of strings whose outer list has + length `batch_size`. + """ + + images: Optional[Union[List[PIL.Image.Image], np.ndarray]] + text: Optional[Union[List[str], List[List[str]]]] + + +class UniDiffuserPipeline(DiffusionPipeline): + r""" + Pipeline for a bimodal image-text model which supports unconditional text and image generation, text-conditioned + image generation, image-conditioned text generation, and joint image-text generation. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. This + is part of the UniDiffuser image representation along with the CLIP vision encoding. + text_encoder ([`CLIPTextModel`]): + Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). + image_encoder ([`CLIPVisionModel`]): + A [`~transformers.CLIPVisionModel`] to encode images as part of its image representation along with the VAE + latent representation. + image_processor ([`CLIPImageProcessor`]): + [`~transformers.CLIPImageProcessor`] to preprocess an image before CLIP encoding it with `image_encoder`. + clip_tokenizer ([`CLIPTokenizer`]): + A [`~transformers.CLIPTokenizer`] to tokenize the prompt before encoding it with `text_encoder`. + text_decoder ([`UniDiffuserTextDecoder`]): + Frozen text decoder. This is a GPT-style model which is used to generate text from the UniDiffuser + embedding. + text_tokenizer ([`GPT2Tokenizer`]): + A [`~transformers.GPT2Tokenizer`] to decode text for text generation; used along with the `text_decoder`. + unet ([`UniDiffuserModel`]): + A [U-ViT](https://github.com/baofff/U-ViT) model with UNNet-style skip connections between transformer + layers to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image and/or text latents. The + original UniDiffuser paper uses the [`DPMSolverMultistepScheduler`] scheduler. + """ + + # TODO: support for moving submodules for components with enable_model_cpu_offload + model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae->text_decoder" + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + image_encoder: CLIPVisionModelWithProjection, + clip_image_processor: CLIPImageProcessor, + clip_tokenizer: CLIPTokenizer, + text_decoder: UniDiffuserTextDecoder, + text_tokenizer: GPT2Tokenizer, + unet: UniDiffuserModel, + scheduler: KarrasDiffusionSchedulers, + ): + super().__init__() + + if text_encoder.config.hidden_size != text_decoder.prefix_inner_dim: + raise ValueError( + f"The text encoder hidden size and text decoder prefix inner dim must be the same, but" + f" `text_encoder.config.hidden_size`: {text_encoder.config.hidden_size} and `text_decoder.prefix_inner_dim`: {text_decoder.prefix_inner_dim}" + ) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + image_encoder=image_encoder, + clip_image_processor=clip_image_processor, + clip_tokenizer=clip_tokenizer, + text_decoder=text_decoder, + text_tokenizer=text_tokenizer, + unet=unet, + scheduler=scheduler, + ) + + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + + self.num_channels_latents = vae.config.latent_channels + self.text_encoder_seq_len = text_encoder.config.max_position_embeddings + self.text_encoder_hidden_size = text_encoder.config.hidden_size + self.image_encoder_projection_dim = image_encoder.config.projection_dim + self.unet_resolution = unet.config.sample_size + + self.text_intermediate_dim = self.text_encoder_hidden_size + if self.text_decoder.prefix_hidden_dim is not None: + self.text_intermediate_dim = self.text_decoder.prefix_hidden_dim + + self.mode = None + + # TODO: handle safety checking? + self.safety_checker = None + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def _infer_mode(self, prompt, prompt_embeds, image, latents, prompt_latents, vae_latents, clip_latents): + r""" + Infer the generation task ('mode') from the inputs to `__call__`. If the mode has been manually set, the set + mode will be used. + """ + prompt_available = (prompt is not None) or (prompt_embeds is not None) + image_available = image is not None + input_available = prompt_available or image_available + + prompt_latents_available = prompt_latents is not None + vae_latents_available = vae_latents is not None + clip_latents_available = clip_latents is not None + full_latents_available = latents is not None + image_latents_available = vae_latents_available and clip_latents_available + all_indv_latents_available = prompt_latents_available and image_latents_available + + if self.mode is not None: + # Preferentially use the mode set by the user + mode = self.mode + elif prompt_available: + mode = "text2img" + elif image_available: + mode = "img2text" + else: + # Neither prompt nor image supplied, infer based on availability of latents + if full_latents_available or all_indv_latents_available: + mode = "joint" + elif prompt_latents_available: + mode = "text" + elif image_latents_available: + mode = "img" + else: + # No inputs or latents available + mode = "joint" + + # Give warnings for ambiguous cases + if self.mode is None and prompt_available and image_available: + logger.warning( + f"You have supplied both a text prompt and image to the pipeline and mode has not been set manually," + f" defaulting to mode '{mode}'." + ) + + if self.mode is None and not input_available: + if vae_latents_available != clip_latents_available: + # Exactly one of vae_latents and clip_latents is supplied + logger.warning( + f"You have supplied exactly one of `vae_latents` and `clip_latents`, whereas either both or none" + f" are expected to be supplied. Defaulting to mode '{mode}'." + ) + elif not prompt_latents_available and not vae_latents_available and not clip_latents_available: + # No inputs or latents supplied + logger.warning( + f"No inputs or latents have been supplied, and mode has not been manually set," + f" defaulting to mode '{mode}'." + ) + + return mode + + # Copied from diffusers.pipelines.pipeline_utils.StableDiffusionMixin.enable_vae_slicing + def enable_vae_slicing(self): + r""" + Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to + compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. + """ + self.vae.enable_slicing() + + # Copied from diffusers.pipelines.pipeline_utils.StableDiffusionMixin.disable_vae_slicing + def disable_vae_slicing(self): + r""" + Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to + computing decoding in one step. + """ + self.vae.disable_slicing() + + # Copied from diffusers.pipelines.pipeline_utils.StableDiffusionMixin.enable_vae_tiling + def enable_vae_tiling(self): + r""" + Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to + compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow + processing larger images. + """ + self.vae.enable_tiling() + + # Copied from diffusers.pipelines.pipeline_utils.StableDiffusionMixin.disable_vae_tiling + def disable_vae_tiling(self): + r""" + Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to + computing decoding in one step. + """ + self.vae.disable_tiling() + + # Functions to manually set the mode + def set_text_mode(self): + r"""Manually set the generation mode to unconditional ("marginal") text generation.""" + self.mode = "text" + + def set_image_mode(self): + r"""Manually set the generation mode to unconditional ("marginal") image generation.""" + self.mode = "img" + + def set_text_to_image_mode(self): + r"""Manually set the generation mode to text-conditioned image generation.""" + self.mode = "text2img" + + def set_image_to_text_mode(self): + r"""Manually set the generation mode to image-conditioned text generation.""" + self.mode = "img2text" + + def set_joint_mode(self): + r"""Manually set the generation mode to unconditional joint image-text generation.""" + self.mode = "joint" + + def reset_mode(self): + r"""Removes a manually set mode; after calling this, the pipeline will infer the mode from inputs.""" + self.mode = None + + def _infer_batch_size( + self, + mode, + prompt, + prompt_embeds, + image, + num_images_per_prompt, + num_prompts_per_image, + latents, + prompt_latents, + vae_latents, + clip_latents, + ): + r"""Infers the batch size and multiplier depending on mode and supplied arguments to `__call__`.""" + if num_images_per_prompt is None: + num_images_per_prompt = 1 + if num_prompts_per_image is None: + num_prompts_per_image = 1 + + assert num_images_per_prompt > 0, "num_images_per_prompt must be a positive integer" + assert num_prompts_per_image > 0, "num_prompts_per_image must be a positive integer" + + if mode in ["text2img"]: + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + # Either prompt or prompt_embeds must be present for text2img. + batch_size = prompt_embeds.shape[0] + multiplier = num_images_per_prompt + elif mode in ["img2text"]: + if isinstance(image, PIL.Image.Image): + batch_size = 1 + else: + # Image must be available and type either PIL.Image.Image or torch.Tensor. + # Not currently supporting something like image_embeds. + batch_size = image.shape[0] + multiplier = num_prompts_per_image + elif mode in ["img"]: + if vae_latents is not None: + batch_size = vae_latents.shape[0] + elif clip_latents is not None: + batch_size = clip_latents.shape[0] + else: + batch_size = 1 + multiplier = num_images_per_prompt + elif mode in ["text"]: + if prompt_latents is not None: + batch_size = prompt_latents.shape[0] + else: + batch_size = 1 + multiplier = num_prompts_per_image + elif mode in ["joint"]: + if latents is not None: + batch_size = latents.shape[0] + elif prompt_latents is not None: + batch_size = prompt_latents.shape[0] + elif vae_latents is not None: + batch_size = vae_latents.shape[0] + elif clip_latents is not None: + batch_size = clip_latents.shape[0] + else: + batch_size = 1 + + if num_images_per_prompt == num_prompts_per_image: + multiplier = num_images_per_prompt + else: + multiplier = min(num_images_per_prompt, num_prompts_per_image) + logger.warning( + f"You are using mode `{mode}` and `num_images_per_prompt`: {num_images_per_prompt} and" + f" num_prompts_per_image: {num_prompts_per_image} are not equal. Using batch size equal to" + f" `min(num_images_per_prompt, num_prompts_per_image) = {batch_size}." + ) + return batch_size, multiplier + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt + def _encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + **kwargs, + ): + deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." + deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) + + prompt_embeds_tuple = self.encode_prompt( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + lora_scale=lora_scale, + **kwargs, + ) + + # concatenate for backwards comp + prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) + + return prompt_embeds + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt with self.tokenizer->self.clip_tokenizer + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) + else: + scale_lora_layers(self.text_encoder, lora_scale) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.clip_tokenizer) + + text_inputs = self.clip_tokenizer( + prompt, + padding="max_length", + max_length=self.clip_tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.clip_tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.clip_tokenizer.batch_decode( + untruncated_ids[:, self.clip_tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.clip_tokenizer.model_max_length} tokens: {removed_text}" + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = text_inputs.attention_mask.to(device) + else: + attention_mask = None + + if clip_skip is None: + prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) + prompt_embeds = prompt_embeds[0] + else: + prompt_embeds = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) + + if self.text_encoder is not None: + prompt_embeds_dtype = self.text_encoder.dtype + elif self.unet is not None: + prompt_embeds_dtype = self.unet.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.clip_tokenizer) + + max_length = prompt_embeds.shape[1] + uncond_input = self.clip_tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="pt", + ) + + if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: + attention_mask = uncond_input.attention_mask.to(device) + else: + attention_mask = None + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(device), + attention_mask=attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + if self.text_encoder is not None: + if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + return prompt_embeds, negative_prompt_embeds + + # Modified from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_instruct_pix2pix.StableDiffusionInstructPix2PixPipeline.prepare_image_latents + # Add num_prompts_per_image argument, sample from autoencoder moment distribution + def encode_image_vae_latents( + self, + image, + batch_size, + num_prompts_per_image, + dtype, + device, + do_classifier_free_guidance, + generator=None, + ): + if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): + raise ValueError( + f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" + ) + + image = image.to(device=device, dtype=dtype) + + batch_size = batch_size * num_prompts_per_image + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if isinstance(generator, list): + image_latents = [ + self.vae.encode(image[i : i + 1]).latent_dist.sample(generator=generator[i]) + * self.vae.config.scaling_factor + for i in range(batch_size) + ] + image_latents = torch.cat(image_latents, dim=0) + else: + image_latents = self.vae.encode(image).latent_dist.sample(generator=generator) + # Scale image_latents by the VAE's scaling factor + image_latents = image_latents * self.vae.config.scaling_factor + + if batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] == 0: + # expand image_latents for batch_size + deprecation_message = ( + f"You have passed {batch_size} text prompts (`prompt`), but only {image_latents.shape[0]} initial" + " images (`image`). Initial images are now duplicating to match the number of text prompts. Note" + " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update" + " your script to pass as many initial images as text prompts to suppress this warning." + ) + deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False) + additional_image_per_prompt = batch_size // image_latents.shape[0] + image_latents = torch.cat([image_latents] * additional_image_per_prompt, dim=0) + elif batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] != 0: + raise ValueError( + f"Cannot duplicate `image` of batch size {image_latents.shape[0]} to {batch_size} text prompts." + ) + else: + image_latents = torch.cat([image_latents], dim=0) + + if do_classifier_free_guidance: + uncond_image_latents = torch.zeros_like(image_latents) + image_latents = torch.cat([image_latents, image_latents, uncond_image_latents], dim=0) + + return image_latents + + def encode_image_clip_latents( + self, + image, + batch_size, + num_prompts_per_image, + dtype, + device, + generator=None, + ): + # Map image to CLIP embedding. + if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): + raise ValueError( + f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" + ) + + preprocessed_image = self.clip_image_processor.preprocess( + image, + return_tensors="pt", + ) + preprocessed_image = preprocessed_image.to(device=device, dtype=dtype) + + batch_size = batch_size * num_prompts_per_image + if isinstance(generator, list): + image_latents = [ + self.image_encoder(**preprocessed_image[i : i + 1]).image_embeds for i in range(batch_size) + ] + image_latents = torch.cat(image_latents, dim=0) + else: + image_latents = self.image_encoder(**preprocessed_image).image_embeds + + if batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] == 0: + # expand image_latents for batch_size + deprecation_message = ( + f"You have passed {batch_size} text prompts (`prompt`), but only {image_latents.shape[0]} initial" + " images (`image`). Initial images are now duplicating to match the number of text prompts. Note" + " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update" + " your script to pass as many initial images as text prompts to suppress this warning." + ) + deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False) + additional_image_per_prompt = batch_size // image_latents.shape[0] + image_latents = torch.cat([image_latents] * additional_image_per_prompt, dim=0) + elif batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] != 0: + raise ValueError( + f"Cannot duplicate `image` of batch size {image_latents.shape[0]} to {batch_size} text prompts." + ) + else: + image_latents = torch.cat([image_latents], dim=0) + + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + return image_latents + + def prepare_text_latents( + self, batch_size, num_images_per_prompt, seq_len, hidden_size, dtype, device, generator, latents=None + ): + # Prepare latents for the CLIP embedded prompt. + shape = (batch_size * num_images_per_prompt, seq_len, hidden_size) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + # latents is assumed to have shace (B, L, D) + latents = latents.repeat(num_images_per_prompt, 1, 1) + latents = latents.to(device=device, dtype=dtype) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + # Modified from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + # Rename prepare_latents -> prepare_image_vae_latents and add num_prompts_per_image argument. + def prepare_image_vae_latents( + self, + batch_size, + num_prompts_per_image, + num_channels_latents, + height, + width, + dtype, + device, + generator, + latents=None, + ): + shape = ( + batch_size * num_prompts_per_image, + num_channels_latents, + height // self.vae_scale_factor, + width // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + # latents is assumed to have shape (B, C, H, W) + latents = latents.repeat(num_prompts_per_image, 1, 1, 1) + latents = latents.to(device=device, dtype=dtype) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + def prepare_image_clip_latents( + self, batch_size, num_prompts_per_image, clip_img_dim, dtype, device, generator, latents=None + ): + # Prepare latents for the CLIP embedded image. + shape = (batch_size * num_prompts_per_image, 1, clip_img_dim) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + # latents is assumed to have shape (B, L, D) + latents = latents.repeat(num_prompts_per_image, 1, 1) + latents = latents.to(device=device, dtype=dtype) + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + def decode_text_latents(self, text_latents, device): + output_token_list, seq_lengths = self.text_decoder.generate_captions( + text_latents, self.text_tokenizer.eos_token_id, device=device + ) + output_list = output_token_list.cpu().numpy() + generated_text = [ + self.text_tokenizer.decode(output[: int(length)], skip_special_tokens=True) + for output, length in zip(output_list, seq_lengths) + ] + return generated_text + + def _split(self, x, height, width): + r""" + Splits a flattened embedding x of shape (B, C * H * W + clip_img_dim) into two tensors of shape (B, C, H, W) + and (B, 1, clip_img_dim) + """ + batch_size = x.shape[0] + latent_height = height // self.vae_scale_factor + latent_width = width // self.vae_scale_factor + img_vae_dim = self.num_channels_latents * latent_height * latent_width + + img_vae, img_clip = x.split([img_vae_dim, self.image_encoder_projection_dim], dim=1) + + img_vae = torch.reshape(img_vae, (batch_size, self.num_channels_latents, latent_height, latent_width)) + img_clip = torch.reshape(img_clip, (batch_size, 1, self.image_encoder_projection_dim)) + return img_vae, img_clip + + def _combine(self, img_vae, img_clip): + r""" + Combines a latent iamge img_vae of shape (B, C, H, W) and a CLIP-embedded image img_clip of shape (B, 1, + clip_img_dim) into a single tensor of shape (B, C * H * W + clip_img_dim). + """ + img_vae = torch.reshape(img_vae, (img_vae.shape[0], -1)) + img_clip = torch.reshape(img_clip, (img_clip.shape[0], -1)) + return torch.concat([img_vae, img_clip], dim=-1) + + def _split_joint(self, x, height, width): + r""" + Splits a flattened embedding x of shape (B, C * H * W + clip_img_dim + text_seq_len * text_dim] into (img_vae, + img_clip, text) where img_vae is of shape (B, C, H, W), img_clip is of shape (B, 1, clip_img_dim), and text is + of shape (B, text_seq_len, text_dim). + """ + batch_size = x.shape[0] + latent_height = height // self.vae_scale_factor + latent_width = width // self.vae_scale_factor + img_vae_dim = self.num_channels_latents * latent_height * latent_width + text_dim = self.text_encoder_seq_len * self.text_intermediate_dim + + img_vae, img_clip, text = x.split([img_vae_dim, self.image_encoder_projection_dim, text_dim], dim=1) + + img_vae = torch.reshape(img_vae, (batch_size, self.num_channels_latents, latent_height, latent_width)) + img_clip = torch.reshape(img_clip, (batch_size, 1, self.image_encoder_projection_dim)) + text = torch.reshape(text, (batch_size, self.text_encoder_seq_len, self.text_intermediate_dim)) + return img_vae, img_clip, text + + def _combine_joint(self, img_vae, img_clip, text): + r""" + Combines a latent image img_vae of shape (B, C, H, W), a CLIP-embedded image img_clip of shape (B, L_img, + clip_img_dim), and a text embedding text of shape (B, L_text, text_dim) into a single embedding x of shape (B, + C * H * W + L_img * clip_img_dim + L_text * text_dim). + """ + img_vae = torch.reshape(img_vae, (img_vae.shape[0], -1)) + img_clip = torch.reshape(img_clip, (img_clip.shape[0], -1)) + text = torch.reshape(text, (text.shape[0], -1)) + return torch.concat([img_vae, img_clip, text], dim=-1) + + def _get_noise_pred( + self, + mode, + latents, + t, + prompt_embeds, + img_vae, + img_clip, + max_timestep, + data_type, + guidance_scale, + generator, + device, + height, + width, + ): + r""" + Gets the noise prediction using the `unet` and performs classifier-free guidance, if necessary. + """ + if mode == "joint": + # Joint text-image generation + img_vae_latents, img_clip_latents, text_latents = self._split_joint(latents, height, width) + + img_vae_out, img_clip_out, text_out = self.unet( + img_vae_latents, img_clip_latents, text_latents, timestep_img=t, timestep_text=t, data_type=data_type + ) + + x_out = self._combine_joint(img_vae_out, img_clip_out, text_out) + + if guidance_scale <= 1.0: + return x_out + + # Classifier-free guidance + img_vae_T = randn_tensor(img_vae.shape, generator=generator, device=device, dtype=img_vae.dtype) + img_clip_T = randn_tensor(img_clip.shape, generator=generator, device=device, dtype=img_clip.dtype) + text_T = randn_tensor(prompt_embeds.shape, generator=generator, device=device, dtype=prompt_embeds.dtype) + + _, _, text_out_uncond = self.unet( + img_vae_T, img_clip_T, text_latents, timestep_img=max_timestep, timestep_text=t, data_type=data_type + ) + + img_vae_out_uncond, img_clip_out_uncond, _ = self.unet( + img_vae_latents, + img_clip_latents, + text_T, + timestep_img=t, + timestep_text=max_timestep, + data_type=data_type, + ) + + x_out_uncond = self._combine_joint(img_vae_out_uncond, img_clip_out_uncond, text_out_uncond) + + return guidance_scale * x_out + (1.0 - guidance_scale) * x_out_uncond + elif mode == "text2img": + # Text-conditioned image generation + img_vae_latents, img_clip_latents = self._split(latents, height, width) + + img_vae_out, img_clip_out, text_out = self.unet( + img_vae_latents, img_clip_latents, prompt_embeds, timestep_img=t, timestep_text=0, data_type=data_type + ) + + img_out = self._combine(img_vae_out, img_clip_out) + + if guidance_scale <= 1.0: + return img_out + + # Classifier-free guidance + text_T = randn_tensor(prompt_embeds.shape, generator=generator, device=device, dtype=prompt_embeds.dtype) + + img_vae_out_uncond, img_clip_out_uncond, text_out_uncond = self.unet( + img_vae_latents, + img_clip_latents, + text_T, + timestep_img=t, + timestep_text=max_timestep, + data_type=data_type, + ) + + img_out_uncond = self._combine(img_vae_out_uncond, img_clip_out_uncond) + + return guidance_scale * img_out + (1.0 - guidance_scale) * img_out_uncond + elif mode == "img2text": + # Image-conditioned text generation + img_vae_out, img_clip_out, text_out = self.unet( + img_vae, img_clip, latents, timestep_img=0, timestep_text=t, data_type=data_type + ) + + if guidance_scale <= 1.0: + return text_out + + # Classifier-free guidance + img_vae_T = randn_tensor(img_vae.shape, generator=generator, device=device, dtype=img_vae.dtype) + img_clip_T = randn_tensor(img_clip.shape, generator=generator, device=device, dtype=img_clip.dtype) + + img_vae_out_uncond, img_clip_out_uncond, text_out_uncond = self.unet( + img_vae_T, img_clip_T, latents, timestep_img=max_timestep, timestep_text=t, data_type=data_type + ) + + return guidance_scale * text_out + (1.0 - guidance_scale) * text_out_uncond + elif mode == "text": + # Unconditional ("marginal") text generation (no CFG) + img_vae_out, img_clip_out, text_out = self.unet( + img_vae, img_clip, latents, timestep_img=max_timestep, timestep_text=t, data_type=data_type + ) + + return text_out + elif mode == "img": + # Unconditional ("marginal") image generation (no CFG) + img_vae_latents, img_clip_latents = self._split(latents, height, width) + + img_vae_out, img_clip_out, text_out = self.unet( + img_vae_latents, + img_clip_latents, + prompt_embeds, + timestep_img=t, + timestep_text=max_timestep, + data_type=data_type, + ) + + img_out = self._combine(img_vae_out, img_clip_out) + return img_out + + def check_latents_shape(self, latents_name, latents, expected_shape): + latents_shape = latents.shape + expected_num_dims = len(expected_shape) + 1 # expected dimensions plus the batch dimension + expected_shape_str = ", ".join(str(dim) for dim in expected_shape) + if len(latents_shape) != expected_num_dims: + raise ValueError( + f"`{latents_name}` should have shape (batch_size, {expected_shape_str}), but the current shape" + f" {latents_shape} has {len(latents_shape)} dimensions." + ) + for i in range(1, expected_num_dims): + if latents_shape[i] != expected_shape[i - 1]: + raise ValueError( + f"`{latents_name}` should have shape (batch_size, {expected_shape_str}), but the current shape" + f" {latents_shape} has {latents_shape[i]} != {expected_shape[i - 1]} at dimension {i}." + ) + + def check_inputs( + self, + mode, + prompt, + image, + height, + width, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + latents=None, + prompt_latents=None, + vae_latents=None, + clip_latents=None, + ): + # Check inputs before running the generative process. + if height % self.vae_scale_factor != 0 or width % self.vae_scale_factor != 0: + raise ValueError( + f"`height` and `width` have to be divisible by {self.vae_scale_factor} but are {height} and {width}." + ) + + if (callback_steps is None) or ( + callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + + if mode == "text2img": + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if mode == "img2text": + if image is None: + raise ValueError("`img2text` mode requires an image to be provided.") + + # Check provided latents + latent_height = height // self.vae_scale_factor + latent_width = width // self.vae_scale_factor + full_latents_available = latents is not None + prompt_latents_available = prompt_latents is not None + vae_latents_available = vae_latents is not None + clip_latents_available = clip_latents is not None + + if full_latents_available: + individual_latents_available = ( + prompt_latents is not None or vae_latents is not None or clip_latents is not None + ) + if individual_latents_available: + logger.warning( + "You have supplied both `latents` and at least one of `prompt_latents`, `vae_latents`, and" + " `clip_latents`. The value of `latents` will override the value of any individually supplied latents." + ) + # Check shape of full latents + img_vae_dim = self.num_channels_latents * latent_height * latent_width + text_dim = self.text_encoder_seq_len * self.text_encoder_hidden_size + latents_dim = img_vae_dim + self.image_encoder_projection_dim + text_dim + latents_expected_shape = (latents_dim,) + self.check_latents_shape("latents", latents, latents_expected_shape) + + # Check individual latent shapes, if present + if prompt_latents_available: + prompt_latents_expected_shape = (self.text_encoder_seq_len, self.text_encoder_hidden_size) + self.check_latents_shape("prompt_latents", prompt_latents, prompt_latents_expected_shape) + + if vae_latents_available: + vae_latents_expected_shape = (self.num_channels_latents, latent_height, latent_width) + self.check_latents_shape("vae_latents", vae_latents, vae_latents_expected_shape) + + if clip_latents_available: + clip_latents_expected_shape = (1, self.image_encoder_projection_dim) + self.check_latents_shape("clip_latents", clip_latents, clip_latents_expected_shape) + + if mode in ["text2img", "img"] and vae_latents_available and clip_latents_available: + if vae_latents.shape[0] != clip_latents.shape[0]: + raise ValueError( + f"Both `vae_latents` and `clip_latents` are supplied, but their batch dimensions are not equal:" + f" {vae_latents.shape[0]} != {clip_latents.shape[0]}." + ) + + if mode == "joint" and prompt_latents_available and vae_latents_available and clip_latents_available: + if prompt_latents.shape[0] != vae_latents.shape[0] or prompt_latents.shape[0] != clip_latents.shape[0]: + raise ValueError( + f"All of `prompt_latents`, `vae_latents`, and `clip_latents` are supplied, but their batch" + f" dimensions are not equal: {prompt_latents.shape[0]} != {vae_latents.shape[0]}" + f" != {clip_latents.shape[0]}." + ) + + @torch.no_grad() + def __call__( + self, + prompt: Optional[Union[str, List[str]]] = None, + image: Optional[Union[torch.Tensor, PIL.Image.Image]] = None, + height: Optional[int] = None, + width: Optional[int] = None, + data_type: Optional[int] = 1, + num_inference_steps: int = 50, + guidance_scale: float = 8.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + num_prompts_per_image: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_latents: Optional[torch.Tensor] = None, + vae_latents: Optional[torch.Tensor] = None, + clip_latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, + callback_steps: int = 1, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + Required for text-conditioned image generation (`text2img`) mode. + image (`torch.Tensor` or `PIL.Image.Image`, *optional*): + `Image` or tensor representing an image batch. Required for image-conditioned text generation + (`img2text`) mode. + height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): + The width in pixels of the generated image. + data_type (`int`, *optional*, defaults to 1): + The data type (either 0 or 1). Only used if you are loading a checkpoint which supports a data type + embedding; this is added for compatibility with the + [UniDiffuser-v1](https://huggingface.co/thu-ml/unidiffuser-v1) checkpoint. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 8.0): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). Used in + text-conditioned image generation (`text2img`) mode. + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. Used in `text2img` (text-conditioned image generation) and + `img` mode. If the mode is joint and both `num_images_per_prompt` and `num_prompts_per_image` are + supplied, `min(num_images_per_prompt, num_prompts_per_image)` samples are generated. + num_prompts_per_image (`int`, *optional*, defaults to 1): + The number of prompts to generate per image. Used in `img2text` (image-conditioned text generation) and + `text` mode. If the mode is joint and both `num_images_per_prompt` and `num_prompts_per_image` are + supplied, `min(num_images_per_prompt, num_prompts_per_image)` samples are generated. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (ฮท) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for joint + image-text generation. Can be used to tweak the same generation with different prompts. If not + provided, a latents tensor is generated by sampling using the supplied random `generator`. This assumes + a full set of VAE, CLIP, and text latents, if supplied, overrides the value of `prompt_latents`, + `vae_latents`, and `clip_latents`. + prompt_latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for text + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + vae_latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + clip_latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. Used in text-conditioned + image generation (`text2img`) mode. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are be generated from the `negative_prompt` input argument. Used + in text-conditioned image generation (`text2img`) mode. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.ImageTextPipelineOutput`] instead of a plain tuple. + callback (`Callable`, *optional*): + A function that calls every `callback_steps` steps during inference. The function is called with the + following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function is called. If not specified, the callback is called at + every step. + + Returns: + [`~pipelines.unidiffuser.ImageTextPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.unidiffuser.ImageTextPipelineOutput`] is returned, otherwise a + `tuple` is returned where the first element is a list with the generated images and the second element + is a list of generated texts. + """ + + # 0. Default height and width to unet + height = height or self.unet_resolution * self.vae_scale_factor + width = width or self.unet_resolution * self.vae_scale_factor + + # 1. Check inputs + # Recalculate mode for each call to the pipeline. + mode = self._infer_mode(prompt, prompt_embeds, image, latents, prompt_latents, vae_latents, clip_latents) + self.check_inputs( + mode, + prompt, + image, + height, + width, + callback_steps, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + latents, + prompt_latents, + vae_latents, + clip_latents, + ) + + # 2. Define call parameters + batch_size, multiplier = self._infer_batch_size( + mode, + prompt, + prompt_embeds, + image, + num_images_per_prompt, + num_prompts_per_image, + latents, + prompt_latents, + vae_latents, + clip_latents, + ) + device = self._execution_device + reduce_text_emb_dim = self.text_intermediate_dim < self.text_encoder_hidden_size or self.mode != "text2img" + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + # Note that this differs from the formulation in the unidiffusers paper! + do_classifier_free_guidance = guidance_scale > 1.0 + + # check if scheduler is in sigmas space + # scheduler_is_in_sigma_space = hasattr(self.scheduler, "sigmas") + + # 3. Encode input prompt, if available; otherwise prepare text latents + if latents is not None: + # Overwrite individual latents + vae_latents, clip_latents, prompt_latents = self._split_joint(latents, height, width) + + if mode in ["text2img"]: + # 3.1. Encode input prompt, if available + assert prompt is not None or prompt_embeds is not None + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt=prompt, + device=device, + num_images_per_prompt=multiplier, + do_classifier_free_guidance=do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + ) + + # if do_classifier_free_guidance: + # prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + else: + # 3.2. Prepare text latent variables, if input not available + prompt_embeds = self.prepare_text_latents( + batch_size=batch_size, + num_images_per_prompt=multiplier, + seq_len=self.text_encoder_seq_len, + hidden_size=self.text_encoder_hidden_size, + dtype=self.text_encoder.dtype, # Should work with both full precision and mixed precision + device=device, + generator=generator, + latents=prompt_latents, + ) + + if reduce_text_emb_dim: + prompt_embeds = self.text_decoder.encode(prompt_embeds) + + # 4. Encode image, if available; otherwise prepare image latents + if mode in ["img2text"]: + # 4.1. Encode images, if available + assert image is not None, "`img2text` requires a conditioning image" + # Encode image using VAE + image_vae = self.image_processor.preprocess(image) + height, width = image_vae.shape[-2:] + image_vae_latents = self.encode_image_vae_latents( + image=image_vae, + batch_size=batch_size, + num_prompts_per_image=multiplier, + dtype=prompt_embeds.dtype, + device=device, + do_classifier_free_guidance=False, # Copied from InstructPix2Pix, don't use their version of CFG + generator=generator, + ) + + # Encode image using CLIP + image_clip_latents = self.encode_image_clip_latents( + image=image, + batch_size=batch_size, + num_prompts_per_image=multiplier, + dtype=prompt_embeds.dtype, + device=device, + generator=generator, + ) + # (batch_size, clip_hidden_size) => (batch_size, 1, clip_hidden_size) + image_clip_latents = image_clip_latents.unsqueeze(1) + else: + # 4.2. Prepare image latent variables, if input not available + # Prepare image VAE latents in latent space + image_vae_latents = self.prepare_image_vae_latents( + batch_size=batch_size, + num_prompts_per_image=multiplier, + num_channels_latents=self.num_channels_latents, + height=height, + width=width, + dtype=prompt_embeds.dtype, + device=device, + generator=generator, + latents=vae_latents, + ) + + # Prepare image CLIP latents + image_clip_latents = self.prepare_image_clip_latents( + batch_size=batch_size, + num_prompts_per_image=multiplier, + clip_img_dim=self.image_encoder_projection_dim, + dtype=prompt_embeds.dtype, + device=device, + generator=generator, + latents=clip_latents, + ) + + # 5. Set timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + # max_timestep = timesteps[0] + max_timestep = self.scheduler.config.num_train_timesteps + + # 6. Prepare latent variables + if mode == "joint": + latents = self._combine_joint(image_vae_latents, image_clip_latents, prompt_embeds) + elif mode in ["text2img", "img"]: + latents = self._combine(image_vae_latents, image_clip_latents) + elif mode in ["img2text", "text"]: + latents = prompt_embeds + + # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + logger.debug(f"Scheduler extra step kwargs: {extra_step_kwargs}") + + # 8. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # predict the noise residual + # Also applies classifier-free guidance as described in the UniDiffuser paper + noise_pred = self._get_noise_pred( + mode, + latents, + t, + prompt_embeds, + image_vae_latents, + image_clip_latents, + max_timestep, + data_type, + guidance_scale, + generator, + device, + height, + width, + ) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + # 9. Post-processing + image = None + text = None + if mode == "joint": + image_vae_latents, image_clip_latents, text_latents = self._split_joint(latents, height, width) + + if not output_type == "latent": + # Map latent VAE image back to pixel space + image = self.vae.decode(image_vae_latents / self.vae.config.scaling_factor, return_dict=False)[0] + else: + image = image_vae_latents + + text = self.decode_text_latents(text_latents, device) + elif mode in ["text2img", "img"]: + image_vae_latents, image_clip_latents = self._split(latents, height, width) + + if not output_type == "latent": + # Map latent VAE image back to pixel space + image = self.vae.decode(image_vae_latents / self.vae.config.scaling_factor, return_dict=False)[0] + else: + image = image_vae_latents + elif mode in ["img2text", "text"]: + text_latents = latents + text = self.decode_text_latents(text_latents, device) + + self.maybe_free_model_hooks() + + # 10. Postprocess the image, if necessary + if image is not None: + do_denormalize = [True] * image.shape[0] + image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) + + # Offload last model to CPU + if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: + self.final_offload_hook.offload() + + if not return_dict: + return (image, text) + + return ImageTextPipelineOutput(images=image, text=text) diff --git a/diffusers3/pipelines/wuerstchen/__init__.py b/diffusers3/pipelines/wuerstchen/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ddb852d1931558fe0948e81e16cf9a92fc2a114b --- /dev/null +++ b/diffusers3/pipelines/wuerstchen/__init__.py @@ -0,0 +1,56 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["modeling_paella_vq_model"] = ["PaellaVQModel"] + _import_structure["modeling_wuerstchen_diffnext"] = ["WuerstchenDiffNeXt"] + _import_structure["modeling_wuerstchen_prior"] = ["WuerstchenPrior"] + _import_structure["pipeline_wuerstchen"] = ["WuerstchenDecoderPipeline"] + _import_structure["pipeline_wuerstchen_combined"] = ["WuerstchenCombinedPipeline"] + _import_structure["pipeline_wuerstchen_prior"] = ["DEFAULT_STAGE_C_TIMESTEPS", "WuerstchenPriorPipeline"] + + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 + else: + from .modeling_paella_vq_model import PaellaVQModel + from .modeling_wuerstchen_diffnext import WuerstchenDiffNeXt + from .modeling_wuerstchen_prior import WuerstchenPrior + from .pipeline_wuerstchen import WuerstchenDecoderPipeline + from .pipeline_wuerstchen_combined import WuerstchenCombinedPipeline + from .pipeline_wuerstchen_prior import DEFAULT_STAGE_C_TIMESTEPS, WuerstchenPriorPipeline +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/diffusers3/pipelines/wuerstchen/modeling_paella_vq_model.py b/diffusers3/pipelines/wuerstchen/modeling_paella_vq_model.py new file mode 100644 index 0000000000000000000000000000000000000000..b2cf8cbc978c402e375d9fa038ede03a5cda4cf7 --- /dev/null +++ b/diffusers3/pipelines/wuerstchen/modeling_paella_vq_model.py @@ -0,0 +1,172 @@ +# Copyright (c) 2022 Dominic Rampas MIT License +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Union + +import torch +import torch.nn as nn + +from ...configuration_utils import ConfigMixin, register_to_config +from ...models.autoencoders.vae import DecoderOutput, VectorQuantizer +from ...models.modeling_utils import ModelMixin +from ...models.vq_model import VQEncoderOutput +from ...utils.accelerate_utils import apply_forward_hook + + +class MixingResidualBlock(nn.Module): + """ + Residual block with mixing used by Paella's VQ-VAE. + """ + + def __init__(self, inp_channels, embed_dim): + super().__init__() + # depthwise + self.norm1 = nn.LayerNorm(inp_channels, elementwise_affine=False, eps=1e-6) + self.depthwise = nn.Sequential( + nn.ReplicationPad2d(1), nn.Conv2d(inp_channels, inp_channels, kernel_size=3, groups=inp_channels) + ) + + # channelwise + self.norm2 = nn.LayerNorm(inp_channels, elementwise_affine=False, eps=1e-6) + self.channelwise = nn.Sequential( + nn.Linear(inp_channels, embed_dim), nn.GELU(), nn.Linear(embed_dim, inp_channels) + ) + + self.gammas = nn.Parameter(torch.zeros(6), requires_grad=True) + + def forward(self, x): + mods = self.gammas + x_temp = self.norm1(x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) * (1 + mods[0]) + mods[1] + x = x + self.depthwise(x_temp) * mods[2] + x_temp = self.norm2(x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) * (1 + mods[3]) + mods[4] + x = x + self.channelwise(x_temp.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) * mods[5] + return x + + +class PaellaVQModel(ModelMixin, ConfigMixin): + r"""VQ-VAE model from Paella model. + + This model inherits from [`ModelMixin`]. Check the superclass documentation for the generic methods the library + implements for all the model (such as downloading or saving, etc.) + + Parameters: + in_channels (int, *optional*, defaults to 3): Number of channels in the input image. + out_channels (int, *optional*, defaults to 3): Number of channels in the output. + up_down_scale_factor (int, *optional*, defaults to 2): Up and Downscale factor of the input image. + levels (int, *optional*, defaults to 2): Number of levels in the model. + bottleneck_blocks (int, *optional*, defaults to 12): Number of bottleneck blocks in the model. + embed_dim (int, *optional*, defaults to 384): Number of hidden channels in the model. + latent_channels (int, *optional*, defaults to 4): Number of latent channels in the VQ-VAE model. + num_vq_embeddings (int, *optional*, defaults to 8192): Number of codebook vectors in the VQ-VAE. + scale_factor (float, *optional*, defaults to 0.3764): Scaling factor of the latent space. + """ + + @register_to_config + def __init__( + self, + in_channels: int = 3, + out_channels: int = 3, + up_down_scale_factor: int = 2, + levels: int = 2, + bottleneck_blocks: int = 12, + embed_dim: int = 384, + latent_channels: int = 4, + num_vq_embeddings: int = 8192, + scale_factor: float = 0.3764, + ): + super().__init__() + + c_levels = [embed_dim // (2**i) for i in reversed(range(levels))] + # Encoder blocks + self.in_block = nn.Sequential( + nn.PixelUnshuffle(up_down_scale_factor), + nn.Conv2d(in_channels * up_down_scale_factor**2, c_levels[0], kernel_size=1), + ) + down_blocks = [] + for i in range(levels): + if i > 0: + down_blocks.append(nn.Conv2d(c_levels[i - 1], c_levels[i], kernel_size=4, stride=2, padding=1)) + block = MixingResidualBlock(c_levels[i], c_levels[i] * 4) + down_blocks.append(block) + down_blocks.append( + nn.Sequential( + nn.Conv2d(c_levels[-1], latent_channels, kernel_size=1, bias=False), + nn.BatchNorm2d(latent_channels), # then normalize them to have mean 0 and std 1 + ) + ) + self.down_blocks = nn.Sequential(*down_blocks) + + # Vector Quantizer + self.vquantizer = VectorQuantizer(num_vq_embeddings, vq_embed_dim=latent_channels, legacy=False, beta=0.25) + + # Decoder blocks + up_blocks = [nn.Sequential(nn.Conv2d(latent_channels, c_levels[-1], kernel_size=1))] + for i in range(levels): + for j in range(bottleneck_blocks if i == 0 else 1): + block = MixingResidualBlock(c_levels[levels - 1 - i], c_levels[levels - 1 - i] * 4) + up_blocks.append(block) + if i < levels - 1: + up_blocks.append( + nn.ConvTranspose2d( + c_levels[levels - 1 - i], c_levels[levels - 2 - i], kernel_size=4, stride=2, padding=1 + ) + ) + self.up_blocks = nn.Sequential(*up_blocks) + self.out_block = nn.Sequential( + nn.Conv2d(c_levels[0], out_channels * up_down_scale_factor**2, kernel_size=1), + nn.PixelShuffle(up_down_scale_factor), + ) + + @apply_forward_hook + def encode(self, x: torch.Tensor, return_dict: bool = True) -> VQEncoderOutput: + h = self.in_block(x) + h = self.down_blocks(h) + + if not return_dict: + return (h,) + + return VQEncoderOutput(latents=h) + + @apply_forward_hook + def decode( + self, h: torch.Tensor, force_not_quantize: bool = True, return_dict: bool = True + ) -> Union[DecoderOutput, torch.Tensor]: + if not force_not_quantize: + quant, _, _ = self.vquantizer(h) + else: + quant = h + + x = self.up_blocks(quant) + dec = self.out_block(x) + if not return_dict: + return (dec,) + + return DecoderOutput(sample=dec) + + def forward(self, sample: torch.Tensor, return_dict: bool = True) -> Union[DecoderOutput, torch.Tensor]: + r""" + Args: + sample (`torch.Tensor`): Input sample. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`DecoderOutput`] instead of a plain tuple. + """ + x = sample + h = self.encode(x).latents + dec = self.decode(h).sample + + if not return_dict: + return (dec,) + + return DecoderOutput(sample=dec) diff --git a/diffusers3/pipelines/wuerstchen/modeling_wuerstchen_common.py b/diffusers3/pipelines/wuerstchen/modeling_wuerstchen_common.py new file mode 100644 index 0000000000000000000000000000000000000000..73e71b3076fbca259ae76138bc4ab3d3797e2755 --- /dev/null +++ b/diffusers3/pipelines/wuerstchen/modeling_wuerstchen_common.py @@ -0,0 +1,76 @@ +import torch +import torch.nn as nn + +from ...models.attention_processor import Attention + + +class WuerstchenLayerNorm(nn.LayerNorm): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + def forward(self, x): + x = x.permute(0, 2, 3, 1) + x = super().forward(x) + return x.permute(0, 3, 1, 2) + + +class TimestepBlock(nn.Module): + def __init__(self, c, c_timestep): + super().__init__() + + self.mapper = nn.Linear(c_timestep, c * 2) + + def forward(self, x, t): + a, b = self.mapper(t)[:, :, None, None].chunk(2, dim=1) + return x * (1 + a) + b + + +class ResBlock(nn.Module): + def __init__(self, c, c_skip=0, kernel_size=3, dropout=0.0): + super().__init__() + + self.depthwise = nn.Conv2d(c + c_skip, c, kernel_size=kernel_size, padding=kernel_size // 2, groups=c) + self.norm = WuerstchenLayerNorm(c, elementwise_affine=False, eps=1e-6) + self.channelwise = nn.Sequential( + nn.Linear(c, c * 4), nn.GELU(), GlobalResponseNorm(c * 4), nn.Dropout(dropout), nn.Linear(c * 4, c) + ) + + def forward(self, x, x_skip=None): + x_res = x + if x_skip is not None: + x = torch.cat([x, x_skip], dim=1) + x = self.norm(self.depthwise(x)).permute(0, 2, 3, 1) + x = self.channelwise(x).permute(0, 3, 1, 2) + return x + x_res + + +# from https://github.com/facebookresearch/ConvNeXt-V2/blob/3608f67cc1dae164790c5d0aead7bf2d73d9719b/models/utils.py#L105 +class GlobalResponseNorm(nn.Module): + def __init__(self, dim): + super().__init__() + self.gamma = nn.Parameter(torch.zeros(1, 1, 1, dim)) + self.beta = nn.Parameter(torch.zeros(1, 1, 1, dim)) + + def forward(self, x): + agg_norm = torch.norm(x, p=2, dim=(1, 2), keepdim=True) + stand_div_norm = agg_norm / (agg_norm.mean(dim=-1, keepdim=True) + 1e-6) + return self.gamma * (x * stand_div_norm) + self.beta + x + + +class AttnBlock(nn.Module): + def __init__(self, c, c_cond, nhead, self_attn=True, dropout=0.0): + super().__init__() + + self.self_attn = self_attn + self.norm = WuerstchenLayerNorm(c, elementwise_affine=False, eps=1e-6) + self.attention = Attention(query_dim=c, heads=nhead, dim_head=c // nhead, dropout=dropout, bias=True) + self.kv_mapper = nn.Sequential(nn.SiLU(), nn.Linear(c_cond, c)) + + def forward(self, x, kv): + kv = self.kv_mapper(kv) + norm_x = self.norm(x) + if self.self_attn: + batch_size, channel, _, _ = x.shape + kv = torch.cat([norm_x.view(batch_size, channel, -1).transpose(1, 2), kv], dim=1) + x = x + self.attention(norm_x, encoder_hidden_states=kv) + return x diff --git a/diffusers3/pipelines/wuerstchen/modeling_wuerstchen_diffnext.py b/diffusers3/pipelines/wuerstchen/modeling_wuerstchen_diffnext.py new file mode 100644 index 0000000000000000000000000000000000000000..6c06cc0e7303f2ec72c6c2958b9f03eaba5f8c67 --- /dev/null +++ b/diffusers3/pipelines/wuerstchen/modeling_wuerstchen_diffnext.py @@ -0,0 +1,254 @@ +# Copyright (c) 2023 Dominic Rampas MIT License +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math + +import numpy as np +import torch +import torch.nn as nn + +from ...configuration_utils import ConfigMixin, register_to_config +from ...models.modeling_utils import ModelMixin +from .modeling_wuerstchen_common import AttnBlock, GlobalResponseNorm, TimestepBlock, WuerstchenLayerNorm + + +class WuerstchenDiffNeXt(ModelMixin, ConfigMixin): + @register_to_config + def __init__( + self, + c_in=4, + c_out=4, + c_r=64, + patch_size=2, + c_cond=1024, + c_hidden=[320, 640, 1280, 1280], + nhead=[-1, 10, 20, 20], + blocks=[4, 4, 14, 4], + level_config=["CT", "CTA", "CTA", "CTA"], + inject_effnet=[False, True, True, True], + effnet_embd=16, + clip_embd=1024, + kernel_size=3, + dropout=0.1, + ): + super().__init__() + self.c_r = c_r + self.c_cond = c_cond + if not isinstance(dropout, list): + dropout = [dropout] * len(c_hidden) + + # CONDITIONING + self.clip_mapper = nn.Linear(clip_embd, c_cond) + self.effnet_mappers = nn.ModuleList( + [ + nn.Conv2d(effnet_embd, c_cond, kernel_size=1) if inject else None + for inject in inject_effnet + list(reversed(inject_effnet)) + ] + ) + self.seq_norm = nn.LayerNorm(c_cond, elementwise_affine=False, eps=1e-6) + + self.embedding = nn.Sequential( + nn.PixelUnshuffle(patch_size), + nn.Conv2d(c_in * (patch_size**2), c_hidden[0], kernel_size=1), + WuerstchenLayerNorm(c_hidden[0], elementwise_affine=False, eps=1e-6), + ) + + def get_block(block_type, c_hidden, nhead, c_skip=0, dropout=0): + if block_type == "C": + return ResBlockStageB(c_hidden, c_skip, kernel_size=kernel_size, dropout=dropout) + elif block_type == "A": + return AttnBlock(c_hidden, c_cond, nhead, self_attn=True, dropout=dropout) + elif block_type == "T": + return TimestepBlock(c_hidden, c_r) + else: + raise ValueError(f"Block type {block_type} not supported") + + # BLOCKS + # -- down blocks + self.down_blocks = nn.ModuleList() + for i in range(len(c_hidden)): + down_block = nn.ModuleList() + if i > 0: + down_block.append( + nn.Sequential( + WuerstchenLayerNorm(c_hidden[i - 1], elementwise_affine=False, eps=1e-6), + nn.Conv2d(c_hidden[i - 1], c_hidden[i], kernel_size=2, stride=2), + ) + ) + for _ in range(blocks[i]): + for block_type in level_config[i]: + c_skip = c_cond if inject_effnet[i] else 0 + down_block.append(get_block(block_type, c_hidden[i], nhead[i], c_skip=c_skip, dropout=dropout[i])) + self.down_blocks.append(down_block) + + # -- up blocks + self.up_blocks = nn.ModuleList() + for i in reversed(range(len(c_hidden))): + up_block = nn.ModuleList() + for j in range(blocks[i]): + for k, block_type in enumerate(level_config[i]): + c_skip = c_hidden[i] if i < len(c_hidden) - 1 and j == k == 0 else 0 + c_skip += c_cond if inject_effnet[i] else 0 + up_block.append(get_block(block_type, c_hidden[i], nhead[i], c_skip=c_skip, dropout=dropout[i])) + if i > 0: + up_block.append( + nn.Sequential( + WuerstchenLayerNorm(c_hidden[i], elementwise_affine=False, eps=1e-6), + nn.ConvTranspose2d(c_hidden[i], c_hidden[i - 1], kernel_size=2, stride=2), + ) + ) + self.up_blocks.append(up_block) + + # OUTPUT + self.clf = nn.Sequential( + WuerstchenLayerNorm(c_hidden[0], elementwise_affine=False, eps=1e-6), + nn.Conv2d(c_hidden[0], 2 * c_out * (patch_size**2), kernel_size=1), + nn.PixelShuffle(patch_size), + ) + + # --- WEIGHT INIT --- + self.apply(self._init_weights) + + def _init_weights(self, m): + # General init + if isinstance(m, (nn.Conv2d, nn.Linear)): + nn.init.xavier_uniform_(m.weight) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + + for mapper in self.effnet_mappers: + if mapper is not None: + nn.init.normal_(mapper.weight, std=0.02) # conditionings + nn.init.normal_(self.clip_mapper.weight, std=0.02) # conditionings + nn.init.xavier_uniform_(self.embedding[1].weight, 0.02) # inputs + nn.init.constant_(self.clf[1].weight, 0) # outputs + + # blocks + for level_block in self.down_blocks + self.up_blocks: + for block in level_block: + if isinstance(block, ResBlockStageB): + block.channelwise[-1].weight.data *= np.sqrt(1 / sum(self.config.blocks)) + elif isinstance(block, TimestepBlock): + nn.init.constant_(block.mapper.weight, 0) + + def gen_r_embedding(self, r, max_positions=10000): + r = r * max_positions + half_dim = self.c_r // 2 + emb = math.log(max_positions) / (half_dim - 1) + emb = torch.arange(half_dim, device=r.device).float().mul(-emb).exp() + emb = r[:, None] * emb[None, :] + emb = torch.cat([emb.sin(), emb.cos()], dim=1) + if self.c_r % 2 == 1: # zero pad + emb = nn.functional.pad(emb, (0, 1), mode="constant") + return emb.to(dtype=r.dtype) + + def gen_c_embeddings(self, clip): + clip = self.clip_mapper(clip) + clip = self.seq_norm(clip) + return clip + + def _down_encode(self, x, r_embed, effnet, clip=None): + level_outputs = [] + for i, down_block in enumerate(self.down_blocks): + effnet_c = None + for block in down_block: + if isinstance(block, ResBlockStageB): + if effnet_c is None and self.effnet_mappers[i] is not None: + dtype = effnet.dtype + effnet_c = self.effnet_mappers[i]( + nn.functional.interpolate( + effnet.float(), size=x.shape[-2:], mode="bicubic", antialias=True, align_corners=True + ).to(dtype) + ) + skip = effnet_c if self.effnet_mappers[i] is not None else None + x = block(x, skip) + elif isinstance(block, AttnBlock): + x = block(x, clip) + elif isinstance(block, TimestepBlock): + x = block(x, r_embed) + else: + x = block(x) + level_outputs.insert(0, x) + return level_outputs + + def _up_decode(self, level_outputs, r_embed, effnet, clip=None): + x = level_outputs[0] + for i, up_block in enumerate(self.up_blocks): + effnet_c = None + for j, block in enumerate(up_block): + if isinstance(block, ResBlockStageB): + if effnet_c is None and self.effnet_mappers[len(self.down_blocks) + i] is not None: + dtype = effnet.dtype + effnet_c = self.effnet_mappers[len(self.down_blocks) + i]( + nn.functional.interpolate( + effnet.float(), size=x.shape[-2:], mode="bicubic", antialias=True, align_corners=True + ).to(dtype) + ) + skip = level_outputs[i] if j == 0 and i > 0 else None + if effnet_c is not None: + if skip is not None: + skip = torch.cat([skip, effnet_c], dim=1) + else: + skip = effnet_c + x = block(x, skip) + elif isinstance(block, AttnBlock): + x = block(x, clip) + elif isinstance(block, TimestepBlock): + x = block(x, r_embed) + else: + x = block(x) + return x + + def forward(self, x, r, effnet, clip=None, x_cat=None, eps=1e-3, return_noise=True): + if x_cat is not None: + x = torch.cat([x, x_cat], dim=1) + # Process the conditioning embeddings + r_embed = self.gen_r_embedding(r) + if clip is not None: + clip = self.gen_c_embeddings(clip) + + # Model Blocks + x_in = x + x = self.embedding(x) + level_outputs = self._down_encode(x, r_embed, effnet, clip) + x = self._up_decode(level_outputs, r_embed, effnet, clip) + a, b = self.clf(x).chunk(2, dim=1) + b = b.sigmoid() * (1 - eps * 2) + eps + if return_noise: + return (x_in - a) / b + else: + return a, b + + +class ResBlockStageB(nn.Module): + def __init__(self, c, c_skip=0, kernel_size=3, dropout=0.0): + super().__init__() + self.depthwise = nn.Conv2d(c, c, kernel_size=kernel_size, padding=kernel_size // 2, groups=c) + self.norm = WuerstchenLayerNorm(c, elementwise_affine=False, eps=1e-6) + self.channelwise = nn.Sequential( + nn.Linear(c + c_skip, c * 4), + nn.GELU(), + GlobalResponseNorm(c * 4), + nn.Dropout(dropout), + nn.Linear(c * 4, c), + ) + + def forward(self, x, x_skip=None): + x_res = x + x = self.norm(self.depthwise(x)) + if x_skip is not None: + x = torch.cat([x, x_skip], dim=1) + x = self.channelwise(x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) + return x + x_res diff --git a/diffusers3/pipelines/wuerstchen/modeling_wuerstchen_prior.py b/diffusers3/pipelines/wuerstchen/modeling_wuerstchen_prior.py new file mode 100644 index 0000000000000000000000000000000000000000..edb0c1ec45dee08ced9606f204f61d6f9a53dada --- /dev/null +++ b/diffusers3/pipelines/wuerstchen/modeling_wuerstchen_prior.py @@ -0,0 +1,198 @@ +# Copyright (c) 2023 Dominic Rampas MIT License +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +from typing import Dict, Union + +import torch +import torch.nn as nn + +from ...configuration_utils import ConfigMixin, register_to_config +from ...loaders import PeftAdapterMixin, UNet2DConditionLoadersMixin +from ...models.attention_processor import ( + ADDED_KV_ATTENTION_PROCESSORS, + CROSS_ATTENTION_PROCESSORS, + AttentionProcessor, + AttnAddedKVProcessor, + AttnProcessor, +) +from ...models.modeling_utils import ModelMixin +from ...utils import is_torch_version +from .modeling_wuerstchen_common import AttnBlock, ResBlock, TimestepBlock, WuerstchenLayerNorm + + +class WuerstchenPrior(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin, PeftAdapterMixin): + unet_name = "prior" + _supports_gradient_checkpointing = True + + @register_to_config + def __init__(self, c_in=16, c=1280, c_cond=1024, c_r=64, depth=16, nhead=16, dropout=0.1): + super().__init__() + + self.c_r = c_r + self.projection = nn.Conv2d(c_in, c, kernel_size=1) + self.cond_mapper = nn.Sequential( + nn.Linear(c_cond, c), + nn.LeakyReLU(0.2), + nn.Linear(c, c), + ) + + self.blocks = nn.ModuleList() + for _ in range(depth): + self.blocks.append(ResBlock(c, dropout=dropout)) + self.blocks.append(TimestepBlock(c, c_r)) + self.blocks.append(AttnBlock(c, c, nhead, self_attn=True, dropout=dropout)) + self.out = nn.Sequential( + WuerstchenLayerNorm(c, elementwise_affine=False, eps=1e-6), + nn.Conv2d(c, c_in * 2, kernel_size=1), + ) + + self.gradient_checkpointing = False + self.set_default_attn_processor() + + @property + # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors + def attn_processors(self) -> Dict[str, AttentionProcessor]: + r""" + Returns: + `dict` of attention processors: A dictionary containing all attention processors used in the model with + indexed by its weight name. + """ + # set recursively + processors = {} + + def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): + if hasattr(module, "get_processor"): + processors[f"{name}.processor"] = module.get_processor() + + for sub_name, child in module.named_children(): + fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) + + return processors + + for name, module in self.named_children(): + fn_recursive_add_processors(name, module, processors) + + return processors + + # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor + def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): + r""" + Sets the attention processor to use to compute attention. + + Parameters: + processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): + The instantiated processor class or a dictionary of processor classes that will be set as the processor + for **all** `Attention` layers. + + If `processor` is a dict, the key needs to define the path to the corresponding cross attention + processor. This is strongly recommended when setting trainable attention processors. + + """ + count = len(self.attn_processors.keys()) + + if isinstance(processor, dict) and len(processor) != count: + raise ValueError( + f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" + f" number of attention layers: {count}. Please make sure to pass {count} processor classes." + ) + + def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): + if hasattr(module, "set_processor"): + if not isinstance(processor, dict): + module.set_processor(processor) + else: + module.set_processor(processor.pop(f"{name}.processor")) + + for sub_name, child in module.named_children(): + fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) + + for name, module in self.named_children(): + fn_recursive_attn_processor(name, module, processor) + + # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_default_attn_processor + def set_default_attn_processor(self): + """ + Disables custom attention processors and sets the default attention implementation. + """ + if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): + processor = AttnAddedKVProcessor() + elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): + processor = AttnProcessor() + else: + raise ValueError( + f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}" + ) + + self.set_attn_processor(processor) + + def _set_gradient_checkpointing(self, module, value=False): + self.gradient_checkpointing = value + + def gen_r_embedding(self, r, max_positions=10000): + r = r * max_positions + half_dim = self.c_r // 2 + emb = math.log(max_positions) / (half_dim - 1) + emb = torch.arange(half_dim, device=r.device).float().mul(-emb).exp() + emb = r[:, None] * emb[None, :] + emb = torch.cat([emb.sin(), emb.cos()], dim=1) + if self.c_r % 2 == 1: # zero pad + emb = nn.functional.pad(emb, (0, 1), mode="constant") + return emb.to(dtype=r.dtype) + + def forward(self, x, r, c): + x_in = x + x = self.projection(x) + c_embed = self.cond_mapper(c) + r_embed = self.gen_r_embedding(r) + + if self.training and self.gradient_checkpointing: + + def create_custom_forward(module): + def custom_forward(*inputs): + return module(*inputs) + + return custom_forward + + if is_torch_version(">=", "1.11.0"): + for block in self.blocks: + if isinstance(block, AttnBlock): + x = torch.utils.checkpoint.checkpoint( + create_custom_forward(block), x, c_embed, use_reentrant=False + ) + elif isinstance(block, TimestepBlock): + x = torch.utils.checkpoint.checkpoint( + create_custom_forward(block), x, r_embed, use_reentrant=False + ) + else: + x = torch.utils.checkpoint.checkpoint(create_custom_forward(block), x, use_reentrant=False) + else: + for block in self.blocks: + if isinstance(block, AttnBlock): + x = torch.utils.checkpoint.checkpoint(create_custom_forward(block), x, c_embed) + elif isinstance(block, TimestepBlock): + x = torch.utils.checkpoint.checkpoint(create_custom_forward(block), x, r_embed) + else: + x = torch.utils.checkpoint.checkpoint(create_custom_forward(block), x) + else: + for block in self.blocks: + if isinstance(block, AttnBlock): + x = block(x, c_embed) + elif isinstance(block, TimestepBlock): + x = block(x, r_embed) + else: + x = block(x) + a, b = self.out(x).chunk(2, dim=1) + return (x_in - a) / ((1 - b).abs() + 1e-5) diff --git a/diffusers3/pipelines/wuerstchen/pipeline_wuerstchen.py b/diffusers3/pipelines/wuerstchen/pipeline_wuerstchen.py new file mode 100644 index 0000000000000000000000000000000000000000..b08421415b235bc3b4876e7a12149f946f4e5c06 --- /dev/null +++ b/diffusers3/pipelines/wuerstchen/pipeline_wuerstchen.py @@ -0,0 +1,438 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Callable, Dict, List, Optional, Union + +import numpy as np +import torch +from transformers import CLIPTextModel, CLIPTokenizer + +from ...schedulers import DDPMWuerstchenScheduler +from ...utils import deprecate, logging, replace_example_docstring +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput +from .modeling_paella_vq_model import PaellaVQModel +from .modeling_wuerstchen_diffnext import WuerstchenDiffNeXt + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import WuerstchenPriorPipeline, WuerstchenDecoderPipeline + + >>> prior_pipe = WuerstchenPriorPipeline.from_pretrained( + ... "warp-ai/wuerstchen-prior", torch_dtype=torch.float16 + ... ).to("cuda") + >>> gen_pipe = WuerstchenDecoderPipeline.from_pretrain("warp-ai/wuerstchen", torch_dtype=torch.float16).to( + ... "cuda" + ... ) + + >>> prompt = "an image of a shiba inu, donning a spacesuit and helmet" + >>> prior_output = pipe(prompt) + >>> images = gen_pipe(prior_output.image_embeddings, prompt=prompt) + ``` +""" + + +class WuerstchenDecoderPipeline(DiffusionPipeline): + """ + Pipeline for generating images from the Wuerstchen model. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + Args: + tokenizer (`CLIPTokenizer`): + The CLIP tokenizer. + text_encoder (`CLIPTextModel`): + The CLIP text encoder. + decoder ([`WuerstchenDiffNeXt`]): + The WuerstchenDiffNeXt unet decoder. + vqgan ([`PaellaVQModel`]): + The VQGAN model. + scheduler ([`DDPMWuerstchenScheduler`]): + A scheduler to be used in combination with `prior` to generate image embedding. + latent_dim_scale (float, `optional`, defaults to 10.67): + Multiplier to determine the VQ latent space size from the image embeddings. If the image embeddings are + height=24 and width=24, the VQ latent shape needs to be height=int(24*10.67)=256 and + width=int(24*10.67)=256 in order to match the training conditions. + """ + + model_cpu_offload_seq = "text_encoder->decoder->vqgan" + _callback_tensor_inputs = [ + "latents", + "text_encoder_hidden_states", + "negative_prompt_embeds", + "image_embeddings", + ] + + def __init__( + self, + tokenizer: CLIPTokenizer, + text_encoder: CLIPTextModel, + decoder: WuerstchenDiffNeXt, + scheduler: DDPMWuerstchenScheduler, + vqgan: PaellaVQModel, + latent_dim_scale: float = 10.67, + ) -> None: + super().__init__() + self.register_modules( + tokenizer=tokenizer, + text_encoder=text_encoder, + decoder=decoder, + scheduler=scheduler, + vqgan=vqgan, + ) + self.register_to_config(latent_dim_scale=latent_dim_scale) + + # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents + def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + if latents.shape != shape: + raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") + latents = latents.to(device) + + latents = latents * scheduler.init_noise_sigma + return latents + + def encode_prompt( + self, + prompt, + device, + num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + ): + batch_size = len(prompt) if isinstance(prompt, list) else 1 + # get prompt text embeddings + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + attention_mask = text_inputs.attention_mask + + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): + removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length] + attention_mask = attention_mask[:, : self.tokenizer.model_max_length] + + text_encoder_output = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask.to(device)) + text_encoder_hidden_states = text_encoder_output.last_hidden_state + text_encoder_hidden_states = text_encoder_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + + uncond_text_encoder_hidden_states = None + if do_classifier_free_guidance: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + negative_prompt_embeds_text_encoder_output = self.text_encoder( + uncond_input.input_ids.to(device), attention_mask=uncond_input.attention_mask.to(device) + ) + + uncond_text_encoder_hidden_states = negative_prompt_embeds_text_encoder_output.last_hidden_state + + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = uncond_text_encoder_hidden_states.shape[1] + uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.repeat(1, num_images_per_prompt, 1) + uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.view( + batch_size * num_images_per_prompt, seq_len, -1 + ) + # done duplicates + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + return text_encoder_hidden_states, uncond_text_encoder_hidden_states + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 + + @property + def num_timesteps(self): + return self._num_timesteps + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + image_embeddings: Union[torch.Tensor, List[torch.Tensor]], + prompt: Union[str, List[str]] = None, + num_inference_steps: int = 12, + timesteps: Optional[List[float]] = None, + guidance_scale: float = 0.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: int = 1, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + **kwargs, + ): + """ + Function invoked when calling the pipeline for generation. + + Args: + image_embedding (`torch.Tensor` or `List[torch.Tensor]`): + Image Embeddings either extracted from an image or generated by a Prior Model. + prompt (`str` or `List[str]`): + The prompt or prompts to guide the image generation. + num_inference_steps (`int`, *optional*, defaults to 12): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process. If not defined, equal spaced `num_inference_steps` + timesteps are used. Must be in descending order. + guidance_scale (`float`, *optional*, defaults to 0.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `decoder_guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting + `decoder_guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely + linked to the text `prompt`, usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored + if `decoder_guidance_scale` is less than `1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` + (`np.array`) or `"pt"` (`torch.Tensor`). + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + + Examples: + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple` [`~pipelines.ImagePipelineOutput`] if `return_dict` is True, + otherwise a `tuple`. When returning a tuple, the first element is a list with the generated image + embeddings. + """ + + callback = kwargs.pop("callback", None) + callback_steps = kwargs.pop("callback_steps", None) + + if callback is not None: + deprecate( + "callback", + "1.0.0", + "Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", + ) + if callback_steps is not None: + deprecate( + "callback_steps", + "1.0.0", + "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + # 0. Define commonly used variables + device = self._execution_device + dtype = self.decoder.dtype + self._guidance_scale = guidance_scale + + # 1. Check inputs. Raise error if not correct + if not isinstance(prompt, list): + if isinstance(prompt, str): + prompt = [prompt] + else: + raise TypeError(f"'prompt' must be of type 'list' or 'str', but got {type(prompt)}.") + + if self.do_classifier_free_guidance: + if negative_prompt is not None and not isinstance(negative_prompt, list): + if isinstance(negative_prompt, str): + negative_prompt = [negative_prompt] + else: + raise TypeError( + f"'negative_prompt' must be of type 'list' or 'str', but got {type(negative_prompt)}." + ) + + if isinstance(image_embeddings, list): + image_embeddings = torch.cat(image_embeddings, dim=0) + if isinstance(image_embeddings, np.ndarray): + image_embeddings = torch.Tensor(image_embeddings, device=device).to(dtype=dtype) + if not isinstance(image_embeddings, torch.Tensor): + raise TypeError( + f"'image_embeddings' must be of type 'torch.Tensor' or 'np.array', but got {type(image_embeddings)}." + ) + + if not isinstance(num_inference_steps, int): + raise TypeError( + f"'num_inference_steps' must be of type 'int', but got {type(num_inference_steps)}\ + In Case you want to provide explicit timesteps, please use the 'timesteps' argument." + ) + + # 2. Encode caption + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt, + device, + image_embeddings.size(0) * num_images_per_prompt, + self.do_classifier_free_guidance, + negative_prompt, + ) + text_encoder_hidden_states = ( + torch.cat([prompt_embeds, negative_prompt_embeds]) if negative_prompt_embeds is not None else prompt_embeds + ) + effnet = ( + torch.cat([image_embeddings, torch.zeros_like(image_embeddings)]) + if self.do_classifier_free_guidance + else image_embeddings + ) + + # 3. Determine latent shape of latents + latent_height = int(image_embeddings.size(2) * self.config.latent_dim_scale) + latent_width = int(image_embeddings.size(3) * self.config.latent_dim_scale) + latent_features_shape = (image_embeddings.size(0) * num_images_per_prompt, 4, latent_height, latent_width) + + # 4. Prepare and set timesteps + if timesteps is not None: + self.scheduler.set_timesteps(timesteps=timesteps, device=device) + timesteps = self.scheduler.timesteps + num_inference_steps = len(timesteps) + else: + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # 5. Prepare latents + latents = self.prepare_latents(latent_features_shape, dtype, device, generator, latents, self.scheduler) + + # 6. Run denoising loop + self._num_timesteps = len(timesteps[:-1]) + for i, t in enumerate(self.progress_bar(timesteps[:-1])): + ratio = t.expand(latents.size(0)).to(dtype) + # 7. Denoise latents + predicted_latents = self.decoder( + torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents, + r=torch.cat([ratio] * 2) if self.do_classifier_free_guidance else ratio, + effnet=effnet, + clip=text_encoder_hidden_states, + ) + + # 8. Check for classifier free guidance and apply it + if self.do_classifier_free_guidance: + predicted_latents_text, predicted_latents_uncond = predicted_latents.chunk(2) + predicted_latents = torch.lerp(predicted_latents_uncond, predicted_latents_text, self.guidance_scale) + + # 9. Renoise latents to next timestep + latents = self.scheduler.step( + model_output=predicted_latents, + timestep=ratio, + sample=latents, + generator=generator, + ).prev_sample + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + image_embeddings = callback_outputs.pop("image_embeddings", image_embeddings) + text_encoder_hidden_states = callback_outputs.pop( + "text_encoder_hidden_states", text_encoder_hidden_states + ) + + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + if output_type not in ["pt", "np", "pil", "latent"]: + raise ValueError( + f"Only the output types `pt`, `np`, `pil` and `latent` are supported not output_type={output_type}" + ) + + if not output_type == "latent": + # 10. Scale and decode the image latents with vq-vae + latents = self.vqgan.config.scale_factor * latents + images = self.vqgan.decode(latents).sample.clamp(0, 1) + if output_type == "np": + images = images.permute(0, 2, 3, 1).cpu().float().numpy() + elif output_type == "pil": + images = images.permute(0, 2, 3, 1).cpu().float().numpy() + images = self.numpy_to_pil(images) + else: + images = latents + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return images + return ImagePipelineOutput(images) diff --git a/diffusers3/pipelines/wuerstchen/pipeline_wuerstchen_combined.py b/diffusers3/pipelines/wuerstchen/pipeline_wuerstchen_combined.py new file mode 100644 index 0000000000000000000000000000000000000000..7819c8c0a0efb6308c46b8c9f15b538a7e5ece34 --- /dev/null +++ b/diffusers3/pipelines/wuerstchen/pipeline_wuerstchen_combined.py @@ -0,0 +1,306 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Callable, Dict, List, Optional, Union + +import torch +from transformers import CLIPTextModel, CLIPTokenizer + +from ...schedulers import DDPMWuerstchenScheduler +from ...utils import deprecate, replace_example_docstring +from ..pipeline_utils import DiffusionPipeline +from .modeling_paella_vq_model import PaellaVQModel +from .modeling_wuerstchen_diffnext import WuerstchenDiffNeXt +from .modeling_wuerstchen_prior import WuerstchenPrior +from .pipeline_wuerstchen import WuerstchenDecoderPipeline +from .pipeline_wuerstchen_prior import WuerstchenPriorPipeline + + +TEXT2IMAGE_EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> from diffusions import WuerstchenCombinedPipeline + + >>> pipe = WuerstchenCombinedPipeline.from_pretrained("warp-ai/Wuerstchen", torch_dtype=torch.float16).to( + ... "cuda" + ... ) + >>> prompt = "an image of a shiba inu, donning a spacesuit and helmet" + >>> images = pipe(prompt=prompt) + ``` +""" + + +class WuerstchenCombinedPipeline(DiffusionPipeline): + """ + Combined Pipeline for text-to-image generation using Wuerstchen + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + Args: + tokenizer (`CLIPTokenizer`): + The decoder tokenizer to be used for text inputs. + text_encoder (`CLIPTextModel`): + The decoder text encoder to be used for text inputs. + decoder (`WuerstchenDiffNeXt`): + The decoder model to be used for decoder image generation pipeline. + scheduler (`DDPMWuerstchenScheduler`): + The scheduler to be used for decoder image generation pipeline. + vqgan (`PaellaVQModel`): + The VQGAN model to be used for decoder image generation pipeline. + prior_tokenizer (`CLIPTokenizer`): + The prior tokenizer to be used for text inputs. + prior_text_encoder (`CLIPTextModel`): + The prior text encoder to be used for text inputs. + prior_prior (`WuerstchenPrior`): + The prior model to be used for prior pipeline. + prior_scheduler (`DDPMWuerstchenScheduler`): + The scheduler to be used for prior pipeline. + """ + + _load_connected_pipes = True + + def __init__( + self, + tokenizer: CLIPTokenizer, + text_encoder: CLIPTextModel, + decoder: WuerstchenDiffNeXt, + scheduler: DDPMWuerstchenScheduler, + vqgan: PaellaVQModel, + prior_tokenizer: CLIPTokenizer, + prior_text_encoder: CLIPTextModel, + prior_prior: WuerstchenPrior, + prior_scheduler: DDPMWuerstchenScheduler, + ): + super().__init__() + + self.register_modules( + text_encoder=text_encoder, + tokenizer=tokenizer, + decoder=decoder, + scheduler=scheduler, + vqgan=vqgan, + prior_prior=prior_prior, + prior_text_encoder=prior_text_encoder, + prior_tokenizer=prior_tokenizer, + prior_scheduler=prior_scheduler, + ) + self.prior_pipe = WuerstchenPriorPipeline( + prior=prior_prior, + text_encoder=prior_text_encoder, + tokenizer=prior_tokenizer, + scheduler=prior_scheduler, + ) + self.decoder_pipe = WuerstchenDecoderPipeline( + text_encoder=text_encoder, + tokenizer=tokenizer, + decoder=decoder, + scheduler=scheduler, + vqgan=vqgan, + ) + + def enable_xformers_memory_efficient_attention(self, attention_op: Optional[Callable] = None): + self.decoder_pipe.enable_xformers_memory_efficient_attention(attention_op) + + def enable_model_cpu_offload(self, gpu_id: Optional[int] = None, device: Union[torch.device, str] = "cuda"): + r""" + Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared + to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward` + method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with + `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`. + """ + self.prior_pipe.enable_model_cpu_offload(gpu_id=gpu_id, device=device) + self.decoder_pipe.enable_model_cpu_offload(gpu_id=gpu_id, device=device) + + def enable_sequential_cpu_offload(self, gpu_id: Optional[int] = None, device: Union[torch.device, str] = "cuda"): + r""" + Offloads all models (`unet`, `text_encoder`, `vae`, and `safety checker` state dicts) to CPU using ๐Ÿค— + Accelerate, significantly reducing memory usage. Models are moved to a `torch.device('meta')` and loaded on a + GPU only when their specific submodule's `forward` method is called. Offloading happens on a submodule basis. + Memory savings are higher than using `enable_model_cpu_offload`, but performance is lower. + """ + self.prior_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id, device=device) + self.decoder_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id, device=device) + + def progress_bar(self, iterable=None, total=None): + self.prior_pipe.progress_bar(iterable=iterable, total=total) + self.decoder_pipe.progress_bar(iterable=iterable, total=total) + + def set_progress_bar_config(self, **kwargs): + self.prior_pipe.set_progress_bar_config(**kwargs) + self.decoder_pipe.set_progress_bar_config(**kwargs) + + @torch.no_grad() + @replace_example_docstring(TEXT2IMAGE_EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Optional[Union[str, List[str]]] = None, + height: int = 512, + width: int = 512, + prior_num_inference_steps: int = 60, + prior_timesteps: Optional[List[float]] = None, + prior_guidance_scale: float = 4.0, + num_inference_steps: int = 12, + decoder_timesteps: Optional[List[float]] = None, + decoder_guidance_scale: float = 0.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + num_images_per_prompt: int = 1, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + prior_callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + prior_callback_on_step_end_tensor_inputs: List[str] = ["latents"], + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + **kwargs, + ): + """ + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`): + The prompt or prompts to guide the image generation for the prior and decoder. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored + if `guidance_scale` is less than `1`). + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings for the prior. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings for the prior. Can be used to easily tweak text inputs, *e.g.* + prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` + input argument. + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + height (`int`, *optional*, defaults to 512): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to 512): + The width in pixels of the generated image. + prior_guidance_scale (`float`, *optional*, defaults to 4.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `prior_guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting + `prior_guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked + to the text `prompt`, usually at the expense of lower image quality. + prior_num_inference_steps (`Union[int, Dict[float, int]]`, *optional*, defaults to 60): + The number of prior denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. For more specific timestep spacing, you can pass customized + `prior_timesteps` + num_inference_steps (`int`, *optional*, defaults to 12): + The number of decoder denoising steps. More denoising steps usually lead to a higher quality image at + the expense of slower inference. For more specific timestep spacing, you can pass customized + `timesteps` + prior_timesteps (`List[float]`, *optional*): + Custom timesteps to use for the denoising process for the prior. If not defined, equal spaced + `prior_num_inference_steps` timesteps are used. Must be in descending order. + decoder_timesteps (`List[float]`, *optional*): + Custom timesteps to use for the denoising process for the decoder. If not defined, equal spaced + `num_inference_steps` timesteps are used. Must be in descending order. + decoder_guidance_scale (`float`, *optional*, defaults to 0.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` + (`np.array`) or `"pt"` (`torch.Tensor`). + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. + prior_callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `prior_callback_on_step_end(self: DiffusionPipeline, step: int, timestep: + int, callback_kwargs: Dict)`. + prior_callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `prior_callback_on_step_end` function. The tensors specified in the + list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in + the `._callback_tensor_inputs` attribute of your pipeline class. + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + + Examples: + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple` [`~pipelines.ImagePipelineOutput`] if `return_dict` is True, + otherwise a `tuple`. When returning a tuple, the first element is a list with the generated images. + """ + prior_kwargs = {} + if kwargs.get("prior_callback", None) is not None: + prior_kwargs["callback"] = kwargs.pop("prior_callback") + deprecate( + "prior_callback", + "1.0.0", + "Passing `prior_callback` as an input argument to `__call__` is deprecated, consider use `prior_callback_on_step_end`", + ) + if kwargs.get("prior_callback_steps", None) is not None: + deprecate( + "prior_callback_steps", + "1.0.0", + "Passing `prior_callback_steps` as an input argument to `__call__` is deprecated, consider use `prior_callback_on_step_end`", + ) + prior_kwargs["callback_steps"] = kwargs.pop("prior_callback_steps") + + prior_outputs = self.prior_pipe( + prompt=prompt if prompt_embeds is None else None, + height=height, + width=width, + num_inference_steps=prior_num_inference_steps, + timesteps=prior_timesteps, + guidance_scale=prior_guidance_scale, + negative_prompt=negative_prompt if negative_prompt_embeds is None else None, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + num_images_per_prompt=num_images_per_prompt, + generator=generator, + latents=latents, + output_type="pt", + return_dict=False, + callback_on_step_end=prior_callback_on_step_end, + callback_on_step_end_tensor_inputs=prior_callback_on_step_end_tensor_inputs, + **prior_kwargs, + ) + image_embeddings = prior_outputs[0] + + outputs = self.decoder_pipe( + image_embeddings=image_embeddings, + prompt=prompt if prompt is not None else "", + num_inference_steps=num_inference_steps, + timesteps=decoder_timesteps, + guidance_scale=decoder_guidance_scale, + negative_prompt=negative_prompt, + generator=generator, + output_type=output_type, + return_dict=return_dict, + callback_on_step_end=callback_on_step_end, + callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, + **kwargs, + ) + + return outputs diff --git a/diffusers3/pipelines/wuerstchen/pipeline_wuerstchen_prior.py b/diffusers3/pipelines/wuerstchen/pipeline_wuerstchen_prior.py new file mode 100644 index 0000000000000000000000000000000000000000..92223ce993a635f6603a6429305dfe2a67059321 --- /dev/null +++ b/diffusers3/pipelines/wuerstchen/pipeline_wuerstchen_prior.py @@ -0,0 +1,517 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass +from math import ceil +from typing import Callable, Dict, List, Optional, Union + +import numpy as np +import torch +from transformers import CLIPTextModel, CLIPTokenizer + +from ...loaders import StableDiffusionLoraLoaderMixin +from ...schedulers import DDPMWuerstchenScheduler +from ...utils import BaseOutput, deprecate, logging, replace_example_docstring +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline +from .modeling_wuerstchen_prior import WuerstchenPrior + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +DEFAULT_STAGE_C_TIMESTEPS = list(np.linspace(1.0, 2 / 3, 20)) + list(np.linspace(2 / 3, 0.0, 11))[1:] + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import WuerstchenPriorPipeline + + >>> prior_pipe = WuerstchenPriorPipeline.from_pretrained( + ... "warp-ai/wuerstchen-prior", torch_dtype=torch.float16 + ... ).to("cuda") + + >>> prompt = "an image of a shiba inu, donning a spacesuit and helmet" + >>> prior_output = pipe(prompt) + ``` +""" + + +@dataclass +class WuerstchenPriorPipelineOutput(BaseOutput): + """ + Output class for WuerstchenPriorPipeline. + + Args: + image_embeddings (`torch.Tensor` or `np.ndarray`) + Prior image embeddings for text prompt + + """ + + image_embeddings: Union[torch.Tensor, np.ndarray] + + +class WuerstchenPriorPipeline(DiffusionPipeline, StableDiffusionLoraLoaderMixin): + """ + Pipeline for generating image prior for Wuerstchen. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + The pipeline also inherits the following loading methods: + - [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for loading LoRA weights + - [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for saving LoRA weights + + Args: + prior ([`Prior`]): + The canonical unCLIP prior to approximate the image embedding from the text embedding. + text_encoder ([`CLIPTextModelWithProjection`]): + Frozen text-encoder. + tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + scheduler ([`DDPMWuerstchenScheduler`]): + A scheduler to be used in combination with `prior` to generate image embedding. + latent_mean ('float', *optional*, defaults to 42.0): + Mean value for latent diffusers. + latent_std ('float', *optional*, defaults to 1.0): + Standard value for latent diffusers. + resolution_multiple ('float', *optional*, defaults to 42.67): + Default resolution for multiple images generated. + """ + + unet_name = "prior" + text_encoder_name = "text_encoder" + model_cpu_offload_seq = "text_encoder->prior" + _callback_tensor_inputs = ["latents", "text_encoder_hidden_states", "negative_prompt_embeds"] + _lora_loadable_modules = ["prior", "text_encoder"] + + def __init__( + self, + tokenizer: CLIPTokenizer, + text_encoder: CLIPTextModel, + prior: WuerstchenPrior, + scheduler: DDPMWuerstchenScheduler, + latent_mean: float = 42.0, + latent_std: float = 1.0, + resolution_multiple: float = 42.67, + ) -> None: + super().__init__() + self.register_modules( + tokenizer=tokenizer, + text_encoder=text_encoder, + prior=prior, + scheduler=scheduler, + ) + self.register_to_config( + latent_mean=latent_mean, latent_std=latent_std, resolution_multiple=resolution_multiple + ) + + # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents + def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): + if latents is None: + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + else: + if latents.shape != shape: + raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") + latents = latents.to(device) + + latents = latents * scheduler.init_noise_sigma + return latents + + def encode_prompt( + self, + device, + num_images_per_prompt, + do_classifier_free_guidance, + prompt=None, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + ): + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # get prompt text embeddings + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + attention_mask = text_inputs.attention_mask + + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( + text_input_ids, untruncated_ids + ): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer.model_max_length} tokens: {removed_text}" + ) + text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length] + attention_mask = attention_mask[:, : self.tokenizer.model_max_length] + + text_encoder_output = self.text_encoder( + text_input_ids.to(device), attention_mask=attention_mask.to(device) + ) + prompt_embeds = text_encoder_output.last_hidden_state + + prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) + prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0) + + if negative_prompt_embeds is None and do_classifier_free_guidance: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + negative_prompt_embeds_text_encoder_output = self.text_encoder( + uncond_input.input_ids.to(device), attention_mask=uncond_input.attention_mask.to(device) + ) + + negative_prompt_embeds = negative_prompt_embeds_text_encoder_output.last_hidden_state + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) + negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) + negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + # done duplicates + + return prompt_embeds, negative_prompt_embeds + + def check_inputs( + self, + prompt, + negative_prompt, + num_inference_steps, + do_classifier_free_guidance, + prompt_embeds=None, + negative_prompt_embeds=None, + ): + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + if not isinstance(num_inference_steps, int): + raise TypeError( + f"'num_inference_steps' must be of type 'int', but got {type(num_inference_steps)}\ + In Case you want to provide explicit timesteps, please use the 'timesteps' argument." + ) + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def do_classifier_free_guidance(self): + return self._guidance_scale > 1 + + @property + def num_timesteps(self): + return self._num_timesteps + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Optional[Union[str, List[str]]] = None, + height: int = 1024, + width: int = 1024, + num_inference_steps: int = 60, + timesteps: List[float] = None, + guidance_scale: float = 8.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + prompt_embeds: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + num_images_per_prompt: Optional[int] = 1, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + output_type: Optional[str] = "pt", + return_dict: bool = True, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + **kwargs, + ): + """ + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`): + The prompt or prompts to guide the image generation. + height (`int`, *optional*, defaults to 1024): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to 1024): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 60): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process. If not defined, equal spaced `num_inference_steps` + timesteps are used. Must be in descending order. + guidance_scale (`float`, *optional*, defaults to 8.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `decoder_guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting + `decoder_guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely + linked to the text `prompt`, usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored + if `decoder_guidance_scale` is less than `1`). + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` + (`np.array`) or `"pt"` (`torch.Tensor`). + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + + Examples: + + Returns: + [`~pipelines.WuerstchenPriorPipelineOutput`] or `tuple` [`~pipelines.WuerstchenPriorPipelineOutput`] if + `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is a list with the + generated image embeddings. + """ + + callback = kwargs.pop("callback", None) + callback_steps = kwargs.pop("callback_steps", None) + + if callback is not None: + deprecate( + "callback", + "1.0.0", + "Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", + ) + if callback_steps is not None: + deprecate( + "callback_steps", + "1.0.0", + "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + # 0. Define commonly used variables + device = self._execution_device + self._guidance_scale = guidance_scale + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + # 1. Check inputs. Raise error if not correct + if prompt is not None and not isinstance(prompt, list): + if isinstance(prompt, str): + prompt = [prompt] + else: + raise TypeError(f"'prompt' must be of type 'list' or 'str', but got {type(prompt)}.") + + if self.do_classifier_free_guidance: + if negative_prompt is not None and not isinstance(negative_prompt, list): + if isinstance(negative_prompt, str): + negative_prompt = [negative_prompt] + else: + raise TypeError( + f"'negative_prompt' must be of type 'list' or 'str', but got {type(negative_prompt)}." + ) + + self.check_inputs( + prompt, + negative_prompt, + num_inference_steps, + self.do_classifier_free_guidance, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + ) + + # 2. Encode caption + prompt_embeds, negative_prompt_embeds = self.encode_prompt( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=self.do_classifier_free_guidance, + negative_prompt=negative_prompt, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + ) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + text_encoder_hidden_states = ( + torch.cat([prompt_embeds, negative_prompt_embeds]) if negative_prompt_embeds is not None else prompt_embeds + ) + + # 3. Determine latent shape of image embeddings + dtype = text_encoder_hidden_states.dtype + latent_height = ceil(height / self.config.resolution_multiple) + latent_width = ceil(width / self.config.resolution_multiple) + num_channels = self.prior.config.c_in + effnet_features_shape = (num_images_per_prompt * batch_size, num_channels, latent_height, latent_width) + + # 4. Prepare and set timesteps + if timesteps is not None: + self.scheduler.set_timesteps(timesteps=timesteps, device=device) + timesteps = self.scheduler.timesteps + num_inference_steps = len(timesteps) + else: + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # 5. Prepare latents + latents = self.prepare_latents(effnet_features_shape, dtype, device, generator, latents, self.scheduler) + + # 6. Run denoising loop + self._num_timesteps = len(timesteps[:-1]) + for i, t in enumerate(self.progress_bar(timesteps[:-1])): + ratio = t.expand(latents.size(0)).to(dtype) + + # 7. Denoise image embeddings + predicted_image_embedding = self.prior( + torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents, + r=torch.cat([ratio] * 2) if self.do_classifier_free_guidance else ratio, + c=text_encoder_hidden_states, + ) + + # 8. Check for classifier free guidance and apply it + if self.do_classifier_free_guidance: + predicted_image_embedding_text, predicted_image_embedding_uncond = predicted_image_embedding.chunk(2) + predicted_image_embedding = torch.lerp( + predicted_image_embedding_uncond, predicted_image_embedding_text, self.guidance_scale + ) + + # 9. Renoise latents to next timestep + latents = self.scheduler.step( + model_output=predicted_image_embedding, + timestep=ratio, + sample=latents, + generator=generator, + ).prev_sample + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + text_encoder_hidden_states = callback_outputs.pop( + "text_encoder_hidden_states", text_encoder_hidden_states + ) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + # 10. Denormalize the latents + latents = latents * self.config.latent_mean - self.config.latent_std + + # Offload all models + self.maybe_free_model_hooks() + + if output_type == "np": + latents = latents.cpu().float().numpy() + + if not return_dict: + return (latents,) + + return WuerstchenPriorPipelineOutput(latents) diff --git a/diffusers3/py.typed b/diffusers3/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/diffusers3/schedulers/.ipynb_checkpoints/scheduling_euler_discrete-checkpoint.py b/diffusers3/schedulers/.ipynb_checkpoints/scheduling_euler_discrete-checkpoint.py new file mode 100644 index 0000000000000000000000000000000000000000..46e0e6baef811bceba8ee1021592fc3a5c8d8688 --- /dev/null +++ b/diffusers3/schedulers/.ipynb_checkpoints/scheduling_euler_discrete-checkpoint.py @@ -0,0 +1,672 @@ +# Copyright 2024 Katherine Crowson and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +from dataclasses import dataclass +from typing import List, Optional, Tuple, Union + +import numpy as np +import torch + +from ..configuration_utils import ConfigMixin, register_to_config +from ..utils import BaseOutput, logging +from ..utils.torch_utils import randn_tensor +from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +@dataclass +# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->EulerDiscrete +class EulerDiscreteSchedulerOutput(BaseOutput): + """ + Output class for the scheduler's `step` function output. + + Args: + prev_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images): + Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the + denoising loop. + pred_original_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images): + The predicted denoised sample `(x_{0})` based on the model output from the current timestep. + `pred_original_sample` can be used to preview progress or for guidance. + """ + + prev_sample: torch.Tensor + pred_original_sample: Optional[torch.Tensor] = None + + +# Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar +def betas_for_alpha_bar( + num_diffusion_timesteps, + max_beta=0.999, + alpha_transform_type="cosine", +): + """ + Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of + (1-beta) over time from t = [0,1]. + + Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up + to that part of the diffusion process. + + + Args: + num_diffusion_timesteps (`int`): the number of betas to produce. + max_beta (`float`): the maximum beta to use; use values lower than 1 to + prevent singularities. + alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. + Choose from `cosine` or `exp` + + Returns: + betas (`np.ndarray`): the betas used by the scheduler to step the model outputs + """ + if alpha_transform_type == "cosine": + + def alpha_bar_fn(t): + return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 + + elif alpha_transform_type == "exp": + + def alpha_bar_fn(t): + return math.exp(t * -12.0) + + else: + raise ValueError(f"Unsupported alpha_transform_type: {alpha_transform_type}") + + betas = [] + for i in range(num_diffusion_timesteps): + t1 = i / num_diffusion_timesteps + t2 = (i + 1) / num_diffusion_timesteps + betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) + return torch.tensor(betas, dtype=torch.float32) + + +# Copied from diffusers.schedulers.scheduling_ddim.rescale_zero_terminal_snr +def rescale_zero_terminal_snr(betas): + """ + Rescales betas to have zero terminal SNR Based on https://arxiv.org/pdf/2305.08891.pdf (Algorithm 1) + + + Args: + betas (`torch.Tensor`): + the betas that the scheduler is being initialized with. + + Returns: + `torch.Tensor`: rescaled betas with zero terminal SNR + """ + # Convert betas to alphas_bar_sqrt + alphas = 1.0 - betas + alphas_cumprod = torch.cumprod(alphas, dim=0) + alphas_bar_sqrt = alphas_cumprod.sqrt() + + # Store old values. + alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone() + alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone() + + # Shift so the last timestep is zero. + alphas_bar_sqrt -= alphas_bar_sqrt_T + + # Scale so the first timestep is back to the old value. + alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T) + + # Convert alphas_bar_sqrt to betas + alphas_bar = alphas_bar_sqrt**2 # Revert sqrt + alphas = alphas_bar[1:] / alphas_bar[:-1] # Revert cumprod + alphas = torch.cat([alphas_bar[0:1], alphas]) + betas = 1 - alphas + + return betas + + +class EulerDiscreteScheduler(SchedulerMixin, ConfigMixin): + """ + Euler scheduler. + + This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic + methods the library implements for all schedulers such as loading and saving. + + Args: + num_train_timesteps (`int`, defaults to 1000): + The number of diffusion steps to train the model. + beta_start (`float`, defaults to 0.0001): + The starting `beta` value of inference. + beta_end (`float`, defaults to 0.02): + The final `beta` value. + beta_schedule (`str`, defaults to `"linear"`): + The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from + `linear` or `scaled_linear`. + trained_betas (`np.ndarray`, *optional*): + Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`. + prediction_type (`str`, defaults to `epsilon`, *optional*): + Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process), + `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen + Video](https://imagen.research.google/video/paper.pdf) paper). + interpolation_type(`str`, defaults to `"linear"`, *optional*): + The interpolation type to compute intermediate sigmas for the scheduler denoising steps. Should be on of + `"linear"` or `"log_linear"`. + use_karras_sigmas (`bool`, *optional*, defaults to `False`): + Whether to use Karras sigmas for step sizes in the noise schedule during the sampling process. If `True`, + the sigmas are determined according to a sequence of noise levels {ฯƒi}. + timestep_spacing (`str`, defaults to `"linspace"`): + The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information. + steps_offset (`int`, defaults to 0): + An offset added to the inference steps, as required by some model families. + rescale_betas_zero_snr (`bool`, defaults to `False`): + Whether to rescale the betas to have zero terminal SNR. This enables the model to generate very bright and + dark samples instead of limiting it to samples with medium brightness. Loosely related to + [`--offset_noise`](https://github.com/huggingface/diffusers/blob/74fd735eb073eb1d774b1ab4154a0876eb82f055/examples/dreambooth/train_dreambooth.py#L506). + final_sigmas_type (`str`, defaults to `"zero"`): + The final `sigma` value for the noise schedule during the sampling process. If `"sigma_min"`, the final + sigma is the same as the last sigma in the training schedule. If `zero`, the final sigma is set to 0. + """ + + _compatibles = [e.name for e in KarrasDiffusionSchedulers] + order = 1 + + @register_to_config + def __init__( + self, + num_train_timesteps: int = 1000, + beta_start: float = 0.0001, + beta_end: float = 0.02, + beta_schedule: str = "linear", + trained_betas: Optional[Union[np.ndarray, List[float]]] = None, + prediction_type: str = "epsilon", + interpolation_type: str = "linear", + use_karras_sigmas: Optional[bool] = False, + sigma_min: Optional[float] = None, + sigma_max: Optional[float] = None, + timestep_spacing: str = "linspace", + timestep_type: str = "discrete", # can be "discrete" or "continuous" + steps_offset: int = 0, + rescale_betas_zero_snr: bool = False, + final_sigmas_type: str = "zero", # can be "zero" or "sigma_min" + ): + if trained_betas is not None: + self.betas = torch.tensor(trained_betas, dtype=torch.float32) + elif beta_schedule == "linear": + self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) + elif beta_schedule == "scaled_linear": + # this schedule is very specific to the latent diffusion model. + self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2 + elif beta_schedule == "squaredcos_cap_v2": + # Glide cosine schedule + self.betas = betas_for_alpha_bar(num_train_timesteps) + else: + raise NotImplementedError(f"{beta_schedule} is not implemented for {self.__class__}") + + if rescale_betas_zero_snr: + self.betas = rescale_zero_terminal_snr(self.betas) + + self.alphas = 1.0 - self.betas + self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) + + if rescale_betas_zero_snr: + # Close to 0 without being 0 so first sigma is not inf + # FP16 smallest positive subnormal works well here + self.alphas_cumprod[-1] = 2**-24 + + sigmas = (((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5).flip(0) + timesteps = np.linspace(0, num_train_timesteps - 1, num_train_timesteps, dtype=float)[::-1].copy() + timesteps = torch.from_numpy(timesteps).to(dtype=torch.float32) + + # setable values + self.num_inference_steps = None + + # TODO: Support the full EDM scalings for all prediction types and timestep types + if timestep_type == "continuous" and prediction_type == "v_prediction": + self.timesteps = torch.Tensor([0.25 * sigma.log() for sigma in sigmas]) + else: + self.timesteps = timesteps + + self.sigmas = torch.cat([sigmas, torch.zeros(1, device=sigmas.device)]) + + self.is_scale_input_called = False + self.use_karras_sigmas = use_karras_sigmas + + self._step_index = None + self._begin_index = None + self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication + + @property + def init_noise_sigma(self): + # standard deviation of the initial noise distribution + max_sigma = max(self.sigmas) if isinstance(self.sigmas, list) else self.sigmas.max() + if self.config.timestep_spacing in ["linspace", "trailing"]: + return max_sigma + + return (max_sigma**2 + 1) ** 0.5 + + @property + def step_index(self): + """ + The index counter for current timestep. It will increase 1 after each scheduler step. + """ + return self._step_index + + @property + def begin_index(self): + """ + The index for the first timestep. It should be set from pipeline with `set_begin_index` method. + """ + return self._begin_index + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.set_begin_index + def set_begin_index(self, begin_index: int = 0): + """ + Sets the begin index for the scheduler. This function should be run from pipeline before the inference. + + Args: + begin_index (`int`): + The begin index for the scheduler. + """ + self._begin_index = begin_index + + def scale_model_input(self, sample: torch.Tensor, timestep: Union[float, torch.Tensor]) -> torch.Tensor: + """ + Ensures interchangeability with schedulers that need to scale the denoising model input depending on the + current timestep. Scales the denoising model input by `(sigma**2 + 1) ** 0.5` to match the Euler algorithm. + + Args: + sample (`torch.Tensor`): + The input sample. + timestep (`int`, *optional*): + The current timestep in the diffusion chain. + + Returns: + `torch.Tensor`: + A scaled input sample. + """ + if self.step_index is None: + self._init_step_index(timestep) + + sigma = self.sigmas[self.step_index] + sample = sample / ((sigma**2 + 1) ** 0.5) + + self.is_scale_input_called = True + return sample + + def set_timesteps( + self, + num_inference_steps: int = None, + device: Union[str, torch.device] = None, + timesteps: Optional[List[int]] = None, + sigmas: Optional[List[float]] = None, + ): + """ + Sets the discrete timesteps used for the diffusion chain (to be run before inference). + + Args: + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to support arbitrary timesteps schedule. If `None`, timesteps will be generated + based on the `timestep_spacing` attribute. If `timesteps` is passed, `num_inference_steps` and `sigmas` + must be `None`, and `timestep_spacing` attribute will be ignored. + sigmas (`List[float]`, *optional*): + Custom sigmas used to support arbitrary timesteps schedule schedule. If `None`, timesteps and sigmas + will be generated based on the relevant scheduler attributes. If `sigmas` is passed, + `num_inference_steps` and `timesteps` must be `None`, and the timesteps will be generated based on the + custom sigmas schedule. + """ + + if timesteps is not None and sigmas is not None: + raise ValueError("Only one of `timesteps` or `sigmas` should be set.") + if num_inference_steps is None and timesteps is None and sigmas is None: + raise ValueError("Must pass exactly one of `num_inference_steps` or `timesteps` or `sigmas.") + if num_inference_steps is not None and (timesteps is not None or sigmas is not None): + raise ValueError("Can only pass one of `num_inference_steps` or `timesteps` or `sigmas`.") + if timesteps is not None and self.config.use_karras_sigmas: + raise ValueError("Cannot set `timesteps` with `config.use_karras_sigmas = True`.") + if ( + timesteps is not None + and self.config.timestep_type == "continuous" + and self.config.prediction_type == "v_prediction" + ): + raise ValueError( + "Cannot set `timesteps` with `config.timestep_type = 'continuous'` and `config.prediction_type = 'v_prediction'`." + ) + + if num_inference_steps is None: + num_inference_steps = len(timesteps) if timesteps is not None else len(sigmas) - 1 + self.num_inference_steps = num_inference_steps + + if sigmas is not None: + log_sigmas = np.log(np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5)) + sigmas = np.array(sigmas).astype(np.float32) + timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas[:-1]]) + + else: + if timesteps is not None: + timesteps = np.array(timesteps).astype(np.float32) + else: + # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 + if self.config.timestep_spacing == "linspace": + timesteps = np.linspace( + 0, self.config.num_train_timesteps - 1, num_inference_steps, dtype=np.float32 + )[::-1].copy() + elif self.config.timestep_spacing == "leading": + step_ratio = self.config.num_train_timesteps // self.num_inference_steps + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + timesteps = ( + (np.arange(0, num_inference_steps) * step_ratio).round()[::-1].copy().astype(np.float32) + ) + timesteps += self.config.steps_offset + elif self.config.timestep_spacing == "trailing": + step_ratio = self.config.num_train_timesteps / self.num_inference_steps + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + timesteps = ( + (np.arange(self.config.num_train_timesteps, 0, -step_ratio)).round().copy().astype(np.float32) + ) + timesteps -= 1 + else: + raise ValueError( + f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." + ) + + sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5) + log_sigmas = np.log(sigmas) + if self.config.interpolation_type == "linear": + sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas) + elif self.config.interpolation_type == "log_linear": + sigmas = torch.linspace(np.log(sigmas[-1]), np.log(sigmas[0]), num_inference_steps + 1).exp().numpy() + else: + raise ValueError( + f"{self.config.interpolation_type} is not implemented. Please specify interpolation_type to either" + " 'linear' or 'log_linear'" + ) + + if self.config.use_karras_sigmas: + sigmas = self._convert_to_karras(in_sigmas=sigmas, num_inference_steps=self.num_inference_steps) + timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas]) + + if self.config.final_sigmas_type == "sigma_min": + sigma_last = ((1 - self.alphas_cumprod[0]) / self.alphas_cumprod[0]) ** 0.5 + elif self.config.final_sigmas_type == "zero": + sigma_last = 0 + else: + raise ValueError( + f"`final_sigmas_type` must be one of 'zero', or 'sigma_min', but got {self.config.final_sigmas_type}" + ) + + sigmas = np.concatenate([sigmas, [sigma_last]]).astype(np.float32) + + sigmas = torch.from_numpy(sigmas).to(dtype=torch.float32, device=device) + + # TODO: Support the full EDM scalings for all prediction types and timestep types + if self.config.timestep_type == "continuous" and self.config.prediction_type == "v_prediction": + self.timesteps = torch.Tensor([0.25 * sigma.log() for sigma in sigmas[:-1]]).to(device=device) + else: + self.timesteps = torch.from_numpy(timesteps.astype(np.float32)).to(device=device) + + self._step_index = None + self._begin_index = None + self.sigmas = sigmas.to("cpu") # to avoid too much CPU/GPU communication + + def _sigma_to_t(self, sigma, log_sigmas): + # get log sigma + log_sigma = np.log(np.maximum(sigma, 1e-10)) + + # get distribution + dists = log_sigma - log_sigmas[:, np.newaxis] + + # get sigmas range + low_idx = np.cumsum((dists >= 0), axis=0).argmax(axis=0).clip(max=log_sigmas.shape[0] - 2) + high_idx = low_idx + 1 + + low = log_sigmas[low_idx] + high = log_sigmas[high_idx] + + # interpolate sigmas + w = (low - log_sigma) / (low - high) + w = np.clip(w, 0, 1) + + # transform interpolation to time range + t = (1 - w) * low_idx + w * high_idx + t = t.reshape(sigma.shape) + return t + + # Copied from https://github.com/crowsonkb/k-diffusion/blob/686dbad0f39640ea25c8a8c6a6e56bb40eacefa2/k_diffusion/sampling.py#L17 + def _convert_to_karras(self, in_sigmas: torch.Tensor, num_inference_steps) -> torch.Tensor: + """Constructs the noise schedule of Karras et al. (2022).""" + + # Hack to make sure that other schedulers which copy this function don't break + # TODO: Add this logic to the other schedulers + if hasattr(self.config, "sigma_min"): + sigma_min = self.config.sigma_min + else: + sigma_min = None + + if hasattr(self.config, "sigma_max"): + sigma_max = self.config.sigma_max + else: + sigma_max = None + + sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item() + sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item() + + rho = 7.0 # 7.0 is the value used in the paper + ramp = np.linspace(0, 1, num_inference_steps) + min_inv_rho = sigma_min ** (1 / rho) + max_inv_rho = sigma_max ** (1 / rho) + sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho + return sigmas + + def index_for_timestep(self, timestep, schedule_timesteps=None): + if schedule_timesteps is None: + schedule_timesteps = self.timesteps + + indices = (schedule_timesteps == timestep).nonzero() + + # The sigma index that is taken for the **very** first `step` + # is always the second index (or the last index if there is only 1) + # This way we can ensure we don't accidentally skip a sigma in + # case we start in the middle of the denoising schedule (e.g. for image-to-image) + pos = 1 if len(indices) > 1 else 0 + + return indices[pos].item() + + def _init_step_index(self, timestep): + if self.begin_index is None: + if isinstance(timestep, torch.Tensor): + timestep = timestep.to(self.timesteps.device) + self._step_index = self.index_for_timestep(timestep) + else: + self._step_index = self._begin_index + + def step( + self, + model_output: torch.Tensor, + timestep: Union[float, torch.Tensor], + sample: torch.Tensor, + s_churn: float = 0.0, + s_tmin: float = 0.0, + s_tmax: float = float("inf"), + s_noise: float = 1.0, + generator: Optional[torch.Generator] = None, + return_dict: bool = True, + ) -> Union[EulerDiscreteSchedulerOutput, Tuple]: + """ + Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion + process from the learned model outputs (most often the predicted noise). + + Args: + model_output (`torch.Tensor`): + The direct output from learned diffusion model. + timestep (`float`): + The current discrete timestep in the diffusion chain. + sample (`torch.Tensor`): + A current instance of a sample created by the diffusion process. + s_churn (`float`): + s_tmin (`float`): + s_tmax (`float`): + s_noise (`float`, defaults to 1.0): + Scaling factor for noise added to the sample. + generator (`torch.Generator`, *optional*): + A random number generator. + return_dict (`bool`): + Whether or not to return a [`~schedulers.scheduling_euler_discrete.EulerDiscreteSchedulerOutput`] or + tuple. + + Returns: + [`~schedulers.scheduling_euler_discrete.EulerDiscreteSchedulerOutput`] or `tuple`: + If return_dict is `True`, [`~schedulers.scheduling_euler_discrete.EulerDiscreteSchedulerOutput`] is + returned, otherwise a tuple is returned where the first element is the sample tensor. + """ + + if isinstance(timestep, (int, torch.IntTensor, torch.LongTensor)): + raise ValueError( + ( + "Passing integer indices (e.g. from `enumerate(timesteps)`) as timesteps to" + " `EulerDiscreteScheduler.step()` is not supported. Make sure to pass" + " one of the `scheduler.timesteps` as a timestep." + ), + ) + + if not self.is_scale_input_called: + logger.warning( + "The `scale_model_input` function should be called before `step` to ensure correct denoising. " + "See `StableDiffusionPipeline` for a usage example." + ) + + if self.step_index is None: + self._init_step_index(timestep) + + # Upcast to avoid precision issues when computing prev_sample + sample = sample.to(torch.float32) + + sigma = self.sigmas[self.step_index] + + gamma = min(s_churn / (len(self.sigmas) - 1), 2**0.5 - 1) if s_tmin <= sigma <= s_tmax else 0.0 + + noise = randn_tensor( + model_output.shape, dtype=model_output.dtype, device=model_output.device, generator=generator + ) + + eps = noise * s_noise + sigma_hat = sigma * (gamma + 1) + + if gamma > 0: + sample = sample + eps * (sigma_hat**2 - sigma**2) ** 0.5 + + # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise + # NOTE: "original_sample" should not be an expected prediction_type but is left in for + # backwards compatibility + if self.config.prediction_type == "original_sample" or self.config.prediction_type == "sample": + pred_original_sample = model_output + elif self.config.prediction_type == "epsilon": + pred_original_sample = sample - sigma_hat * model_output + elif self.config.prediction_type == "v_prediction": + # denoised = model_output * c_out + input * c_skip + pred_original_sample = model_output * (-sigma / (sigma**2 + 1) ** 0.5) + (sample / (sigma**2 + 1)) + else: + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`" + ) + + # 2. Convert to an ODE derivative + derivative = (sample - pred_original_sample) / sigma_hat + + dt = self.sigmas[self.step_index + 1] - sigma_hat + + prev_sample = sample + derivative * dt + + # Cast sample back to model compatible dtype + prev_sample = prev_sample.to(model_output.dtype) + + # upon completion increase step index by one + self._step_index += 1 + + if not return_dict: + return (prev_sample,) + + return EulerDiscreteSchedulerOutput(prev_sample=prev_sample, pred_original_sample=pred_original_sample) + + def add_noise( + self, + original_samples: torch.Tensor, + noise: torch.Tensor, + timesteps: torch.Tensor, + ) -> torch.Tensor: + # Make sure sigmas and timesteps have the same device and dtype as original_samples + sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype) + if original_samples.device.type == "mps" and torch.is_floating_point(timesteps): + # mps does not support float64 + schedule_timesteps = self.timesteps.to(original_samples.device, dtype=torch.float32) + timesteps = timesteps.to(original_samples.device, dtype=torch.float32) + else: + schedule_timesteps = self.timesteps.to(original_samples.device) + timesteps = timesteps.to(original_samples.device) + + # self.begin_index is None when scheduler is used for training, or pipeline does not implement set_begin_index + if self.begin_index is None: + step_indices = [self.index_for_timestep(t, schedule_timesteps) for t in timesteps] + elif self.step_index is not None: + # add_noise is called after first denoising step (for inpainting) + step_indices = [self.step_index] * timesteps.shape[0] + else: + # add noise is called before first denoising step to create initial latent(img2img) + step_indices = [self.begin_index] * timesteps.shape[0] + + sigma = sigmas[step_indices].flatten() + while len(sigma.shape) < len(original_samples.shape): + sigma = sigma.unsqueeze(-1) + + noisy_samples = original_samples + noise * sigma + return noisy_samples + + def get_velocity(self, sample: torch.Tensor, noise: torch.Tensor, timesteps: torch.Tensor) -> torch.Tensor: + if ( + isinstance(timesteps, int) + or isinstance(timesteps, torch.IntTensor) + or isinstance(timesteps, torch.LongTensor) + ): + raise ValueError( + ( + "Passing integer indices (e.g. from `enumerate(timesteps)`) as timesteps to" + " `EulerDiscreteScheduler.get_velocity()` is not supported. Make sure to pass" + " one of the `scheduler.timesteps` as a timestep." + ), + ) + + if sample.device.type == "mps" and torch.is_floating_point(timesteps): + # mps does not support float64 + schedule_timesteps = self.timesteps.to(sample.device, dtype=torch.float32) + timesteps = timesteps.to(sample.device, dtype=torch.float32) + else: + schedule_timesteps = self.timesteps.to(sample.device) + timesteps = timesteps.to(sample.device) + + step_indices = [self.index_for_timestep(t, schedule_timesteps) for t in timesteps] + alphas_cumprod = self.alphas_cumprod.to(sample) + sqrt_alpha_prod = alphas_cumprod[step_indices] ** 0.5 + sqrt_alpha_prod = sqrt_alpha_prod.flatten() + while len(sqrt_alpha_prod.shape) < len(sample.shape): + sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) + + sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[step_indices]) ** 0.5 + sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() + while len(sqrt_one_minus_alpha_prod.shape) < len(sample.shape): + sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) + + velocity = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample + return velocity + + def __len__(self): + return self.config.num_train_timesteps diff --git a/diffusers3/schedulers/.ipynb_checkpoints/scheduling_utils-checkpoint.py b/diffusers3/schedulers/.ipynb_checkpoints/scheduling_utils-checkpoint.py new file mode 100644 index 0000000000000000000000000000000000000000..f20224b19009eed6c7e34cd308f4978210f5c466 --- /dev/null +++ b/diffusers3/schedulers/.ipynb_checkpoints/scheduling_utils-checkpoint.py @@ -0,0 +1,193 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import importlib +import os +from dataclasses import dataclass +from enum import Enum +from typing import Optional, Union + +import torch +from huggingface_hub.utils import validate_hf_hub_args + +from ..utils import BaseOutput, PushToHubMixin + + +SCHEDULER_CONFIG_NAME = "scheduler_config.json" + + +# NOTE: We make this type an enum because it simplifies usage in docs and prevents +# circular imports when used for `_compatibles` within the schedulers module. +# When it's used as a type in pipelines, it really is a Union because the actual +# scheduler instance is passed in. +class KarrasDiffusionSchedulers(Enum): + DDIMScheduler = 1 + DDPMScheduler = 2 + PNDMScheduler = 3 + LMSDiscreteScheduler = 4 + EulerDiscreteScheduler = 5 + HeunDiscreteScheduler = 6 + EulerAncestralDiscreteScheduler = 7 + DPMSolverMultistepScheduler = 8 + DPMSolverSinglestepScheduler = 9 + KDPM2DiscreteScheduler = 10 + KDPM2AncestralDiscreteScheduler = 11 + DEISMultistepScheduler = 12 + UniPCMultistepScheduler = 13 + DPMSolverSDEScheduler = 14 + EDMEulerScheduler = 15 + + +AysSchedules = { + "StableDiffusionTimesteps": [999, 850, 736, 645, 545, 455, 343, 233, 124, 24], + "StableDiffusionSigmas": [14.615, 6.475, 3.861, 2.697, 1.886, 1.396, 0.963, 0.652, 0.399, 0.152, 0.0], + "StableDiffusionXLTimesteps": [999, 845, 730, 587, 443, 310, 193, 116, 53, 13], + "StableDiffusionXLSigmas": [14.615, 6.315, 3.771, 2.181, 1.342, 0.862, 0.555, 0.380, 0.234, 0.113, 0.0], + "StableDiffusionVideoSigmas": [700.00, 54.5, 15.886, 7.977, 4.248, 1.789, 0.981, 0.403, 0.173, 0.034, 0.0], +} + + +@dataclass +class SchedulerOutput(BaseOutput): + """ + Base class for the output of a scheduler's `step` function. + + Args: + prev_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images): + Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the + denoising loop. + """ + + prev_sample: torch.Tensor + + +class SchedulerMixin(PushToHubMixin): + """ + Base class for all schedulers. + + [`SchedulerMixin`] contains common functions shared by all schedulers such as general loading and saving + functionalities. + + [`ConfigMixin`] takes care of storing the configuration attributes (like `num_train_timesteps`) that are passed to + the scheduler's `__init__` function, and the attributes can be accessed by `scheduler.config.num_train_timesteps`. + + Class attributes: + - **_compatibles** (`List[str]`) -- A list of scheduler classes that are compatible with the parent scheduler + class. Use [`~ConfigMixin.from_config`] to load a different compatible scheduler class (should be overridden + by parent class). + """ + + config_name = SCHEDULER_CONFIG_NAME + _compatibles = [] + has_compatibles = True + + @classmethod + @validate_hf_hub_args + def from_pretrained( + cls, + pretrained_model_name_or_path: Optional[Union[str, os.PathLike]] = None, + subfolder: Optional[str] = None, + return_unused_kwargs=False, + **kwargs, + ): + r""" + Instantiate a scheduler from a pre-defined JSON configuration file in a local directory or Hub repository. + + Parameters: + pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*): + Can be either: + + - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on + the Hub. + - A path to a *directory* (for example `./my_model_directory`) containing the scheduler + configuration saved with [`~SchedulerMixin.save_pretrained`]. + subfolder (`str`, *optional*): + The subfolder location of a model file within a larger model repository on the Hub or locally. + return_unused_kwargs (`bool`, *optional*, defaults to `False`): + Whether kwargs that are not consumed by the Python class should be returned or not. + cache_dir (`Union[str, os.PathLike]`, *optional*): + Path to a directory where a downloaded pretrained model configuration is cached if the standard cache + is not used. + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force the (re-)download of the model weights and configuration files, overriding the + cached versions if they exist. + + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. + output_loading_info(`bool`, *optional*, defaults to `False`): + Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages. + local_files_only(`bool`, *optional*, defaults to `False`): + Whether to only load local model weights and configuration files or not. If set to `True`, the model + won't be downloaded from the Hub. + token (`str` or *bool*, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from + `diffusers-cli login` (stored in `~/.huggingface`) is used. + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier + allowed by Git. + + + + To use private or [gated models](https://huggingface.co/docs/hub/models-gated#gated-models), log-in with + `huggingface-cli login`. You can also activate the special + ["offline-mode"](https://huggingface.co/diffusers/installation.html#offline-mode) to use this method in a + firewalled environment. + + + + """ + config, kwargs, commit_hash = cls.load_config( + pretrained_model_name_or_path=pretrained_model_name_or_path, + subfolder=subfolder, + return_unused_kwargs=True, + return_commit_hash=True, + **kwargs, + ) + return cls.from_config(config, return_unused_kwargs=return_unused_kwargs, **kwargs) + + def save_pretrained(self, save_directory: Union[str, os.PathLike], push_to_hub: bool = False, **kwargs): + """ + Save a scheduler configuration object to a directory so that it can be reloaded using the + [`~SchedulerMixin.from_pretrained`] class method. + + Args: + save_directory (`str` or `os.PathLike`): + Directory where the configuration JSON file will be saved (will be created if it does not exist). + push_to_hub (`bool`, *optional*, defaults to `False`): + Whether or not to push your model to the Hugging Face Hub after saving it. You can specify the + repository you want to push to with `repo_id` (will default to the name of `save_directory` in your + namespace). + kwargs (`Dict[str, Any]`, *optional*): + Additional keyword arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method. + """ + self.save_config(save_directory=save_directory, push_to_hub=push_to_hub, **kwargs) + + @property + def compatibles(self): + """ + Returns all schedulers that are compatible with this scheduler + + Returns: + `List[SchedulerMixin]`: List of compatible schedulers + """ + return self._get_compatibles() + + @classmethod + def _get_compatibles(cls): + compatible_classes_str = list(set([cls.__name__] + cls._compatibles)) + diffusers_library = importlib.import_module(__name__.split(".")[0]) + compatible_classes = [ + getattr(diffusers_library, c) for c in compatible_classes_str if hasattr(diffusers_library, c) + ] + return compatible_classes diff --git a/diffusers3/schedulers/README.md b/diffusers3/schedulers/README.md new file mode 100644 index 0000000000000000000000000000000000000000..31ad27793e34783faabc222adf98691fb396a0d8 --- /dev/null +++ b/diffusers3/schedulers/README.md @@ -0,0 +1,3 @@ +# Schedulers + +For more information on the schedulers, please refer to the [docs](https://huggingface.co/docs/diffusers/api/schedulers/overview). \ No newline at end of file diff --git a/diffusers3/schedulers/__init__.py b/diffusers3/schedulers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..bb9088538653721f16938894cc694e55fad0b5e8 --- /dev/null +++ b/diffusers3/schedulers/__init__.py @@ -0,0 +1,221 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import TYPE_CHECKING + +from ..utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_flax_available, + is_scipy_available, + is_torch_available, + is_torchsde_available, +) + + +_dummy_modules = {} +_import_structure = {} + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ..utils import dummy_pt_objects # noqa F403 + + _dummy_modules.update(get_objects_from_module(dummy_pt_objects)) + +else: + _import_structure["deprecated"] = ["KarrasVeScheduler", "ScoreSdeVpScheduler"] + _import_structure["scheduling_amused"] = ["AmusedScheduler"] + _import_structure["scheduling_consistency_decoder"] = ["ConsistencyDecoderScheduler"] + _import_structure["scheduling_consistency_models"] = ["CMStochasticIterativeScheduler"] + _import_structure["scheduling_ddim"] = ["DDIMScheduler"] + _import_structure["scheduling_ddim_cogvideox"] = ["CogVideoXDDIMScheduler"] + _import_structure["scheduling_ddim_inverse"] = ["DDIMInverseScheduler"] + _import_structure["scheduling_ddim_parallel"] = ["DDIMParallelScheduler"] + _import_structure["scheduling_ddpm"] = ["DDPMScheduler"] + _import_structure["scheduling_ddpm_parallel"] = ["DDPMParallelScheduler"] + _import_structure["scheduling_ddpm_wuerstchen"] = ["DDPMWuerstchenScheduler"] + _import_structure["scheduling_deis_multistep"] = ["DEISMultistepScheduler"] + _import_structure["scheduling_dpm_cogvideox"] = ["CogVideoXDPMScheduler"] + _import_structure["scheduling_dpmsolver_multistep"] = ["DPMSolverMultistepScheduler"] + _import_structure["scheduling_dpmsolver_multistep_inverse"] = ["DPMSolverMultistepInverseScheduler"] + _import_structure["scheduling_dpmsolver_singlestep"] = ["DPMSolverSinglestepScheduler"] + _import_structure["scheduling_edm_dpmsolver_multistep"] = ["EDMDPMSolverMultistepScheduler"] + _import_structure["scheduling_edm_euler"] = ["EDMEulerScheduler"] + _import_structure["scheduling_euler_ancestral_discrete"] = ["EulerAncestralDiscreteScheduler"] + _import_structure["scheduling_euler_discrete"] = ["EulerDiscreteScheduler"] + _import_structure["scheduling_flow_match_euler_discrete"] = ["FlowMatchEulerDiscreteScheduler"] + _import_structure["scheduling_flow_match_heun_discrete"] = ["FlowMatchHeunDiscreteScheduler"] + _import_structure["scheduling_heun_discrete"] = ["HeunDiscreteScheduler"] + _import_structure["scheduling_ipndm"] = ["IPNDMScheduler"] + _import_structure["scheduling_k_dpm_2_ancestral_discrete"] = ["KDPM2AncestralDiscreteScheduler"] + _import_structure["scheduling_k_dpm_2_discrete"] = ["KDPM2DiscreteScheduler"] + _import_structure["scheduling_lcm"] = ["LCMScheduler"] + _import_structure["scheduling_pndm"] = ["PNDMScheduler"] + _import_structure["scheduling_repaint"] = ["RePaintScheduler"] + _import_structure["scheduling_sasolver"] = ["SASolverScheduler"] + _import_structure["scheduling_sde_ve"] = ["ScoreSdeVeScheduler"] + _import_structure["scheduling_tcd"] = ["TCDScheduler"] + _import_structure["scheduling_unclip"] = ["UnCLIPScheduler"] + _import_structure["scheduling_unipc_multistep"] = ["UniPCMultistepScheduler"] + _import_structure["scheduling_utils"] = ["AysSchedules", "KarrasDiffusionSchedulers", "SchedulerMixin"] + _import_structure["scheduling_vq_diffusion"] = ["VQDiffusionScheduler"] + +try: + if not is_flax_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ..utils import dummy_flax_objects # noqa F403 + + _dummy_modules.update(get_objects_from_module(dummy_flax_objects)) + +else: + _import_structure["scheduling_ddim_flax"] = ["FlaxDDIMScheduler"] + _import_structure["scheduling_ddpm_flax"] = ["FlaxDDPMScheduler"] + _import_structure["scheduling_dpmsolver_multistep_flax"] = ["FlaxDPMSolverMultistepScheduler"] + _import_structure["scheduling_euler_discrete_flax"] = ["FlaxEulerDiscreteScheduler"] + _import_structure["scheduling_karras_ve_flax"] = ["FlaxKarrasVeScheduler"] + _import_structure["scheduling_lms_discrete_flax"] = ["FlaxLMSDiscreteScheduler"] + _import_structure["scheduling_pndm_flax"] = ["FlaxPNDMScheduler"] + _import_structure["scheduling_sde_ve_flax"] = ["FlaxScoreSdeVeScheduler"] + _import_structure["scheduling_utils_flax"] = [ + "FlaxKarrasDiffusionSchedulers", + "FlaxSchedulerMixin", + "FlaxSchedulerOutput", + "broadcast_to_shape_from_left", + ] + + +try: + if not (is_torch_available() and is_scipy_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ..utils import dummy_torch_and_scipy_objects # noqa F403 + + _dummy_modules.update(get_objects_from_module(dummy_torch_and_scipy_objects)) + +else: + _import_structure["scheduling_lms_discrete"] = ["LMSDiscreteScheduler"] + +try: + if not (is_torch_available() and is_torchsde_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ..utils import dummy_torch_and_torchsde_objects # noqa F403 + + _dummy_modules.update(get_objects_from_module(dummy_torch_and_torchsde_objects)) + +else: + _import_structure["scheduling_cosine_dpmsolver_multistep"] = ["CosineDPMSolverMultistepScheduler"] + _import_structure["scheduling_dpmsolver_sde"] = ["DPMSolverSDEScheduler"] + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + from ..utils import ( + OptionalDependencyNotAvailable, + is_flax_available, + is_scipy_available, + is_torch_available, + is_torchsde_available, + ) + + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ..utils.dummy_pt_objects import * # noqa F403 + else: + from .deprecated import KarrasVeScheduler, ScoreSdeVpScheduler + from .scheduling_amused import AmusedScheduler + from .scheduling_consistency_decoder import ConsistencyDecoderScheduler + from .scheduling_consistency_models import CMStochasticIterativeScheduler + from .scheduling_ddim import DDIMScheduler + from .scheduling_ddim_cogvideox import CogVideoXDDIMScheduler + from .scheduling_ddim_inverse import DDIMInverseScheduler + from .scheduling_ddim_parallel import DDIMParallelScheduler + from .scheduling_ddpm import DDPMScheduler + from .scheduling_ddpm_parallel import DDPMParallelScheduler + from .scheduling_ddpm_wuerstchen import DDPMWuerstchenScheduler + from .scheduling_deis_multistep import DEISMultistepScheduler + from .scheduling_dpm_cogvideox import CogVideoXDPMScheduler + from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler + from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler + from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler + from .scheduling_edm_dpmsolver_multistep import EDMDPMSolverMultistepScheduler + from .scheduling_edm_euler import EDMEulerScheduler + from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler + from .scheduling_euler_discrete import EulerDiscreteScheduler + from .scheduling_flow_match_euler_discrete import FlowMatchEulerDiscreteScheduler + from .scheduling_flow_match_heun_discrete import FlowMatchHeunDiscreteScheduler + from .scheduling_heun_discrete import HeunDiscreteScheduler + from .scheduling_ipndm import IPNDMScheduler + from .scheduling_k_dpm_2_ancestral_discrete import KDPM2AncestralDiscreteScheduler + from .scheduling_k_dpm_2_discrete import KDPM2DiscreteScheduler + from .scheduling_lcm import LCMScheduler + from .scheduling_pndm import PNDMScheduler + from .scheduling_repaint import RePaintScheduler + from .scheduling_sasolver import SASolverScheduler + from .scheduling_sde_ve import ScoreSdeVeScheduler + from .scheduling_tcd import TCDScheduler + from .scheduling_unclip import UnCLIPScheduler + from .scheduling_unipc_multistep import UniPCMultistepScheduler + from .scheduling_utils import AysSchedules, KarrasDiffusionSchedulers, SchedulerMixin + from .scheduling_vq_diffusion import VQDiffusionScheduler + + try: + if not is_flax_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ..utils.dummy_flax_objects import * # noqa F403 + else: + from .scheduling_ddim_flax import FlaxDDIMScheduler + from .scheduling_ddpm_flax import FlaxDDPMScheduler + from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler + from .scheduling_euler_discrete_flax import FlaxEulerDiscreteScheduler + from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler + from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler + from .scheduling_pndm_flax import FlaxPNDMScheduler + from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler + from .scheduling_utils_flax import ( + FlaxKarrasDiffusionSchedulers, + FlaxSchedulerMixin, + FlaxSchedulerOutput, + broadcast_to_shape_from_left, + ) + + try: + if not (is_torch_available() and is_scipy_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ..utils.dummy_torch_and_scipy_objects import * # noqa F403 + else: + from .scheduling_lms_discrete import LMSDiscreteScheduler + + try: + if not (is_torch_available() and is_torchsde_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403 + else: + from .scheduling_cosine_dpmsolver_multistep import CosineDPMSolverMultistepScheduler + from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) + for name, value in _dummy_modules.items(): + setattr(sys.modules[__name__], name, value) diff --git a/diffusers3/schedulers/__pycache__/__init__.cpython-310.pyc b/diffusers3/schedulers/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0b3371664ccfd298abecf66f1ba6d152de91386d Binary files /dev/null and b/diffusers3/schedulers/__pycache__/__init__.cpython-310.pyc differ diff --git a/diffusers3/schedulers/__pycache__/__init__.cpython-38.pyc b/diffusers3/schedulers/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5003c54d3b8574b527a247b2f17cf7ed64af3b88 Binary files /dev/null and b/diffusers3/schedulers/__pycache__/__init__.cpython-38.pyc differ diff --git a/diffusers3/schedulers/__pycache__/scheduling_consistency_decoder.cpython-310.pyc b/diffusers3/schedulers/__pycache__/scheduling_consistency_decoder.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fcfe2053f20c870ac7f97e9263fe9b26ccbf5d5f Binary files /dev/null and b/diffusers3/schedulers/__pycache__/scheduling_consistency_decoder.cpython-310.pyc differ diff --git a/diffusers3/schedulers/__pycache__/scheduling_consistency_decoder.cpython-38.pyc b/diffusers3/schedulers/__pycache__/scheduling_consistency_decoder.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3bafd157bb5ed335fae5fa9849f92da43ace68fd Binary files /dev/null and b/diffusers3/schedulers/__pycache__/scheduling_consistency_decoder.cpython-38.pyc differ diff --git a/diffusers3/schedulers/__pycache__/scheduling_ddim.cpython-310.pyc b/diffusers3/schedulers/__pycache__/scheduling_ddim.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c8974715af55823dd3835f0e4a5409a52387fab6 Binary files /dev/null and b/diffusers3/schedulers/__pycache__/scheduling_ddim.cpython-310.pyc differ diff --git a/diffusers3/schedulers/__pycache__/scheduling_ddim.cpython-38.pyc b/diffusers3/schedulers/__pycache__/scheduling_ddim.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..13de75f9c31d7c16d469c1ccafd6b99b207f92e5 Binary files /dev/null and b/diffusers3/schedulers/__pycache__/scheduling_ddim.cpython-38.pyc differ diff --git a/diffusers3/schedulers/__pycache__/scheduling_dpmsolver_multistep.cpython-310.pyc b/diffusers3/schedulers/__pycache__/scheduling_dpmsolver_multistep.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..647b7f8fe87d3548304095f492602fea27224b5f Binary files /dev/null and b/diffusers3/schedulers/__pycache__/scheduling_dpmsolver_multistep.cpython-310.pyc differ diff --git a/diffusers3/schedulers/__pycache__/scheduling_dpmsolver_multistep.cpython-38.pyc b/diffusers3/schedulers/__pycache__/scheduling_dpmsolver_multistep.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..93def155e07309332bfc6da262b3d6bd9c80a74a Binary files /dev/null and b/diffusers3/schedulers/__pycache__/scheduling_dpmsolver_multistep.cpython-38.pyc differ diff --git a/diffusers3/schedulers/__pycache__/scheduling_edm_dpmsolver_multistep.cpython-310.pyc b/diffusers3/schedulers/__pycache__/scheduling_edm_dpmsolver_multistep.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4dd6a55411ab4c7d45e71f874758be5e2d8ca8fe Binary files /dev/null and b/diffusers3/schedulers/__pycache__/scheduling_edm_dpmsolver_multistep.cpython-310.pyc differ diff --git a/diffusers3/schedulers/__pycache__/scheduling_edm_dpmsolver_multistep.cpython-38.pyc b/diffusers3/schedulers/__pycache__/scheduling_edm_dpmsolver_multistep.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2ec11817fa2077a119072fcbb02178ae13cb4332 Binary files /dev/null and b/diffusers3/schedulers/__pycache__/scheduling_edm_dpmsolver_multistep.cpython-38.pyc differ diff --git a/diffusers3/schedulers/__pycache__/scheduling_euler_ancestral_discrete.cpython-310.pyc b/diffusers3/schedulers/__pycache__/scheduling_euler_ancestral_discrete.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aeb3d5a3a693d95fd70f73a062717f99646c8005 Binary files /dev/null and b/diffusers3/schedulers/__pycache__/scheduling_euler_ancestral_discrete.cpython-310.pyc differ diff --git a/diffusers3/schedulers/__pycache__/scheduling_euler_ancestral_discrete.cpython-38.pyc b/diffusers3/schedulers/__pycache__/scheduling_euler_ancestral_discrete.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..53b87c4798f98934d85c65fb6c741443f531bbe1 Binary files /dev/null and b/diffusers3/schedulers/__pycache__/scheduling_euler_ancestral_discrete.cpython-38.pyc differ diff --git a/diffusers3/schedulers/__pycache__/scheduling_euler_discrete.cpython-310.pyc b/diffusers3/schedulers/__pycache__/scheduling_euler_discrete.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eb105c3b8ba4827d67df44f61a5e569372258cec Binary files /dev/null and b/diffusers3/schedulers/__pycache__/scheduling_euler_discrete.cpython-310.pyc differ diff --git a/diffusers3/schedulers/__pycache__/scheduling_euler_discrete.cpython-38.pyc b/diffusers3/schedulers/__pycache__/scheduling_euler_discrete.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b188a064b7c46db7f3c895d3d1cbbfc7a5762a04 Binary files /dev/null and b/diffusers3/schedulers/__pycache__/scheduling_euler_discrete.cpython-38.pyc differ diff --git a/diffusers3/schedulers/__pycache__/scheduling_flow_match_euler_discrete.cpython-38.pyc b/diffusers3/schedulers/__pycache__/scheduling_flow_match_euler_discrete.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aa120b6ded0e87c1c2acc736902fd25e6fc3959d Binary files /dev/null and b/diffusers3/schedulers/__pycache__/scheduling_flow_match_euler_discrete.cpython-38.pyc differ diff --git a/diffusers3/schedulers/__pycache__/scheduling_heun_discrete.cpython-310.pyc b/diffusers3/schedulers/__pycache__/scheduling_heun_discrete.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2d1a762c994de889c66503980b0aae5adb81dec5 Binary files /dev/null and b/diffusers3/schedulers/__pycache__/scheduling_heun_discrete.cpython-310.pyc differ diff --git a/diffusers3/schedulers/__pycache__/scheduling_heun_discrete.cpython-38.pyc b/diffusers3/schedulers/__pycache__/scheduling_heun_discrete.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..de1c07abea8d82e91548b2f613e5da9a5b5e55ec Binary files /dev/null and b/diffusers3/schedulers/__pycache__/scheduling_heun_discrete.cpython-38.pyc differ diff --git a/diffusers3/schedulers/__pycache__/scheduling_lms_discrete.cpython-38.pyc b/diffusers3/schedulers/__pycache__/scheduling_lms_discrete.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5b5824e661f474c738b2b1645e1282ddf3633fa3 Binary files /dev/null and b/diffusers3/schedulers/__pycache__/scheduling_lms_discrete.cpython-38.pyc differ diff --git a/diffusers3/schedulers/__pycache__/scheduling_pndm.cpython-310.pyc b/diffusers3/schedulers/__pycache__/scheduling_pndm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..277f6a609648a5bda383b370dae1f4c85070e9f5 Binary files /dev/null and b/diffusers3/schedulers/__pycache__/scheduling_pndm.cpython-310.pyc differ diff --git a/diffusers3/schedulers/__pycache__/scheduling_pndm.cpython-38.pyc b/diffusers3/schedulers/__pycache__/scheduling_pndm.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..14475876c1c1baaa89d108863a9b43e72c2ea10f Binary files /dev/null and b/diffusers3/schedulers/__pycache__/scheduling_pndm.cpython-38.pyc differ diff --git a/diffusers3/schedulers/__pycache__/scheduling_utils.cpython-310.pyc b/diffusers3/schedulers/__pycache__/scheduling_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..06f928c405518fa6392748f10712aacbd33607e1 Binary files /dev/null and b/diffusers3/schedulers/__pycache__/scheduling_utils.cpython-310.pyc differ diff --git a/diffusers3/schedulers/__pycache__/scheduling_utils.cpython-38.pyc b/diffusers3/schedulers/__pycache__/scheduling_utils.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e68e929056bd0a4107be4cd489a2cd906b0d6d63 Binary files /dev/null and b/diffusers3/schedulers/__pycache__/scheduling_utils.cpython-38.pyc differ diff --git a/diffusers3/schedulers/deprecated/__init__.py b/diffusers3/schedulers/deprecated/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..479cf9bd568be320b60e87a831bf80835e9943f2 --- /dev/null +++ b/diffusers3/schedulers/deprecated/__init__.py @@ -0,0 +1,50 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_pt_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_pt_objects)) +else: + _import_structure["scheduling_karras_ve"] = ["KarrasVeScheduler"] + _import_structure["scheduling_sde_vp"] = ["ScoreSdeVpScheduler"] + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + + except OptionalDependencyNotAvailable: + from ...utils.dummy_pt_objects import * # noqa F403 + else: + from .scheduling_karras_ve import KarrasVeScheduler + from .scheduling_sde_vp import ScoreSdeVpScheduler + + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/diffusers3/schedulers/deprecated/scheduling_karras_ve.py b/diffusers3/schedulers/deprecated/scheduling_karras_ve.py new file mode 100644 index 0000000000000000000000000000000000000000..f5f9bd256c2e57f03176d93d66461a3a43edecae --- /dev/null +++ b/diffusers3/schedulers/deprecated/scheduling_karras_ve.py @@ -0,0 +1,243 @@ +# Copyright 2024 NVIDIA and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from dataclasses import dataclass +from typing import Optional, Tuple, Union + +import numpy as np +import torch + +from ...configuration_utils import ConfigMixin, register_to_config +from ...utils import BaseOutput +from ...utils.torch_utils import randn_tensor +from ..scheduling_utils import SchedulerMixin + + +@dataclass +class KarrasVeOutput(BaseOutput): + """ + Output class for the scheduler's step function output. + + Args: + prev_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images): + Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the + denoising loop. + derivative (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images): + Derivative of predicted original image sample (x_0). + pred_original_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images): + The predicted denoised sample (x_{0}) based on the model output from the current timestep. + `pred_original_sample` can be used to preview progress or for guidance. + """ + + prev_sample: torch.Tensor + derivative: torch.Tensor + pred_original_sample: Optional[torch.Tensor] = None + + +class KarrasVeScheduler(SchedulerMixin, ConfigMixin): + """ + A stochastic scheduler tailored to variance-expanding models. + + This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic + methods the library implements for all schedulers such as loading and saving. + + + + For more details on the parameters, see [Appendix E](https://arxiv.org/abs/2206.00364). The grid search values used + to find the optimal `{s_noise, s_churn, s_min, s_max}` for a specific model are described in Table 5 of the paper. + + + + Args: + sigma_min (`float`, defaults to 0.02): + The minimum noise magnitude. + sigma_max (`float`, defaults to 100): + The maximum noise magnitude. + s_noise (`float`, defaults to 1.007): + The amount of additional noise to counteract loss of detail during sampling. A reasonable range is [1.000, + 1.011]. + s_churn (`float`, defaults to 80): + The parameter controlling the overall amount of stochasticity. A reasonable range is [0, 100]. + s_min (`float`, defaults to 0.05): + The start value of the sigma range to add noise (enable stochasticity). A reasonable range is [0, 10]. + s_max (`float`, defaults to 50): + The end value of the sigma range to add noise. A reasonable range is [0.2, 80]. + """ + + order = 2 + + @register_to_config + def __init__( + self, + sigma_min: float = 0.02, + sigma_max: float = 100, + s_noise: float = 1.007, + s_churn: float = 80, + s_min: float = 0.05, + s_max: float = 50, + ): + # standard deviation of the initial noise distribution + self.init_noise_sigma = sigma_max + + # setable values + self.num_inference_steps: int = None + self.timesteps: np.IntTensor = None + self.schedule: torch.Tensor = None # sigma(t_i) + + def scale_model_input(self, sample: torch.Tensor, timestep: Optional[int] = None) -> torch.Tensor: + """ + Ensures interchangeability with schedulers that need to scale the denoising model input depending on the + current timestep. + + Args: + sample (`torch.Tensor`): + The input sample. + timestep (`int`, *optional*): + The current timestep in the diffusion chain. + + Returns: + `torch.Tensor`: + A scaled input sample. + """ + return sample + + def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None): + """ + Sets the discrete timesteps used for the diffusion chain (to be run before inference). + + Args: + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + """ + self.num_inference_steps = num_inference_steps + timesteps = np.arange(0, self.num_inference_steps)[::-1].copy() + self.timesteps = torch.from_numpy(timesteps).to(device) + schedule = [ + ( + self.config.sigma_max**2 + * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) + ) + for i in self.timesteps + ] + self.schedule = torch.tensor(schedule, dtype=torch.float32, device=device) + + def add_noise_to_input( + self, sample: torch.Tensor, sigma: float, generator: Optional[torch.Generator] = None + ) -> Tuple[torch.Tensor, float]: + """ + Explicit Langevin-like "churn" step of adding noise to the sample according to a `gamma_i โ‰ฅ 0` to reach a + higher noise level `sigma_hat = sigma_i + gamma_i*sigma_i`. + + Args: + sample (`torch.Tensor`): + The input sample. + sigma (`float`): + generator (`torch.Generator`, *optional*): + A random number generator. + """ + if self.config.s_min <= sigma <= self.config.s_max: + gamma = min(self.config.s_churn / self.num_inference_steps, 2**0.5 - 1) + else: + gamma = 0 + + # sample eps ~ N(0, S_noise^2 * I) + eps = self.config.s_noise * randn_tensor(sample.shape, generator=generator).to(sample.device) + sigma_hat = sigma + gamma * sigma + sample_hat = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) + + return sample_hat, sigma_hat + + def step( + self, + model_output: torch.Tensor, + sigma_hat: float, + sigma_prev: float, + sample_hat: torch.Tensor, + return_dict: bool = True, + ) -> Union[KarrasVeOutput, Tuple]: + """ + Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion + process from the learned model outputs (most often the predicted noise). + + Args: + model_output (`torch.Tensor`): + The direct output from learned diffusion model. + sigma_hat (`float`): + sigma_prev (`float`): + sample_hat (`torch.Tensor`): + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~schedulers.scheduling_karras_ve.KarrasVESchedulerOutput`] or `tuple`. + + Returns: + [`~schedulers.scheduling_karras_ve.KarrasVESchedulerOutput`] or `tuple`: + If return_dict is `True`, [`~schedulers.scheduling_karras_ve.KarrasVESchedulerOutput`] is returned, + otherwise a tuple is returned where the first element is the sample tensor. + + """ + + pred_original_sample = sample_hat + sigma_hat * model_output + derivative = (sample_hat - pred_original_sample) / sigma_hat + sample_prev = sample_hat + (sigma_prev - sigma_hat) * derivative + + if not return_dict: + return (sample_prev, derivative) + + return KarrasVeOutput( + prev_sample=sample_prev, derivative=derivative, pred_original_sample=pred_original_sample + ) + + def step_correct( + self, + model_output: torch.Tensor, + sigma_hat: float, + sigma_prev: float, + sample_hat: torch.Tensor, + sample_prev: torch.Tensor, + derivative: torch.Tensor, + return_dict: bool = True, + ) -> Union[KarrasVeOutput, Tuple]: + """ + Corrects the predicted sample based on the `model_output` of the network. + + Args: + model_output (`torch.Tensor`): + The direct output from learned diffusion model. + sigma_hat (`float`): TODO + sigma_prev (`float`): TODO + sample_hat (`torch.Tensor`): TODO + sample_prev (`torch.Tensor`): TODO + derivative (`torch.Tensor`): TODO + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~schedulers.scheduling_ddpm.DDPMSchedulerOutput`] or `tuple`. + + Returns: + prev_sample (TODO): updated sample in the diffusion chain. derivative (TODO): TODO + + """ + pred_original_sample = sample_prev + sigma_prev * model_output + derivative_corr = (sample_prev - pred_original_sample) / sigma_prev + sample_prev = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) + + if not return_dict: + return (sample_prev, derivative) + + return KarrasVeOutput( + prev_sample=sample_prev, derivative=derivative, pred_original_sample=pred_original_sample + ) + + def add_noise(self, original_samples, noise, timesteps): + raise NotImplementedError() diff --git a/diffusers3/schedulers/deprecated/scheduling_sde_vp.py b/diffusers3/schedulers/deprecated/scheduling_sde_vp.py new file mode 100644 index 0000000000000000000000000000000000000000..09b02cadc4008b1d80184a355a06201a85f97e8f --- /dev/null +++ b/diffusers3/schedulers/deprecated/scheduling_sde_vp.py @@ -0,0 +1,109 @@ +# Copyright 2024 Google Brain and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch + +import math +from typing import Union + +import torch + +from ...configuration_utils import ConfigMixin, register_to_config +from ...utils.torch_utils import randn_tensor +from ..scheduling_utils import SchedulerMixin + + +class ScoreSdeVpScheduler(SchedulerMixin, ConfigMixin): + """ + `ScoreSdeVpScheduler` is a variance preserving stochastic differential equation (SDE) scheduler. + + This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic + methods the library implements for all schedulers such as loading and saving. + + Args: + num_train_timesteps (`int`, defaults to 2000): + The number of diffusion steps to train the model. + beta_min (`int`, defaults to 0.1): + beta_max (`int`, defaults to 20): + sampling_eps (`int`, defaults to 1e-3): + The end value of sampling where timesteps decrease progressively from 1 to epsilon. + """ + + order = 1 + + @register_to_config + def __init__(self, num_train_timesteps=2000, beta_min=0.1, beta_max=20, sampling_eps=1e-3): + self.sigmas = None + self.discrete_sigmas = None + self.timesteps = None + + def set_timesteps(self, num_inference_steps, device: Union[str, torch.device] = None): + """ + Sets the continuous timesteps used for the diffusion chain (to be run before inference). + + Args: + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + """ + self.timesteps = torch.linspace(1, self.config.sampling_eps, num_inference_steps, device=device) + + def step_pred(self, score, x, t, generator=None): + """ + Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion + process from the learned model outputs (most often the predicted noise). + + Args: + score (): + x (): + t (): + generator (`torch.Generator`, *optional*): + A random number generator. + """ + if self.timesteps is None: + raise ValueError( + "`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" + ) + + # TODO(Patrick) better comments + non-PyTorch + # postprocess model score + log_mean_coeff = -0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min + std = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff)) + std = std.flatten() + while len(std.shape) < len(score.shape): + std = std.unsqueeze(-1) + score = -score / std + + # compute + dt = -1.0 / len(self.timesteps) + + beta_t = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min) + beta_t = beta_t.flatten() + while len(beta_t.shape) < len(x.shape): + beta_t = beta_t.unsqueeze(-1) + drift = -0.5 * beta_t * x + + diffusion = torch.sqrt(beta_t) + drift = drift - diffusion**2 * score + x_mean = x + drift * dt + + # add noise + noise = randn_tensor(x.shape, layout=x.layout, generator=generator, device=x.device, dtype=x.dtype) + x = x_mean + diffusion * math.sqrt(-dt) * noise + + return x, x_mean + + def __len__(self): + return self.config.num_train_timesteps diff --git a/diffusers3/schedulers/scheduling_amused.py b/diffusers3/schedulers/scheduling_amused.py new file mode 100644 index 0000000000000000000000000000000000000000..238b8d8691715a83105df815c0ebcad748092ec1 --- /dev/null +++ b/diffusers3/schedulers/scheduling_amused.py @@ -0,0 +1,162 @@ +import math +from dataclasses import dataclass +from typing import List, Optional, Tuple, Union + +import torch + +from ..configuration_utils import ConfigMixin, register_to_config +from ..utils import BaseOutput +from .scheduling_utils import SchedulerMixin + + +def gumbel_noise(t, generator=None): + device = generator.device if generator is not None else t.device + noise = torch.zeros_like(t, device=device).uniform_(0, 1, generator=generator).to(t.device) + return -torch.log((-torch.log(noise.clamp(1e-20))).clamp(1e-20)) + + +def mask_by_random_topk(mask_len, probs, temperature=1.0, generator=None): + confidence = torch.log(probs.clamp(1e-20)) + temperature * gumbel_noise(probs, generator=generator) + sorted_confidence = torch.sort(confidence, dim=-1).values + cut_off = torch.gather(sorted_confidence, 1, mask_len.long()) + masking = confidence < cut_off + return masking + + +@dataclass +class AmusedSchedulerOutput(BaseOutput): + """ + Output class for the scheduler's `step` function output. + + Args: + prev_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images): + Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the + denoising loop. + pred_original_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images): + The predicted denoised sample `(x_{0})` based on the model output from the current timestep. + `pred_original_sample` can be used to preview progress or for guidance. + """ + + prev_sample: torch.Tensor + pred_original_sample: torch.Tensor = None + + +class AmusedScheduler(SchedulerMixin, ConfigMixin): + order = 1 + + temperatures: torch.Tensor + + @register_to_config + def __init__( + self, + mask_token_id: int, + masking_schedule: str = "cosine", + ): + self.temperatures = None + self.timesteps = None + + def set_timesteps( + self, + num_inference_steps: int, + temperature: Union[int, Tuple[int, int], List[int]] = (2, 0), + device: Union[str, torch.device] = None, + ): + self.timesteps = torch.arange(num_inference_steps, device=device).flip(0) + + if isinstance(temperature, (tuple, list)): + self.temperatures = torch.linspace(temperature[0], temperature[1], num_inference_steps, device=device) + else: + self.temperatures = torch.linspace(temperature, 0.01, num_inference_steps, device=device) + + def step( + self, + model_output: torch.Tensor, + timestep: torch.long, + sample: torch.LongTensor, + starting_mask_ratio: int = 1, + generator: Optional[torch.Generator] = None, + return_dict: bool = True, + ) -> Union[AmusedSchedulerOutput, Tuple]: + two_dim_input = sample.ndim == 3 and model_output.ndim == 4 + + if two_dim_input: + batch_size, codebook_size, height, width = model_output.shape + sample = sample.reshape(batch_size, height * width) + model_output = model_output.reshape(batch_size, codebook_size, height * width).permute(0, 2, 1) + + unknown_map = sample == self.config.mask_token_id + + probs = model_output.softmax(dim=-1) + + device = probs.device + probs_ = probs.to(generator.device) if generator is not None else probs # handles when generator is on CPU + if probs_.device.type == "cpu" and probs_.dtype != torch.float32: + probs_ = probs_.float() # multinomial is not implemented for cpu half precision + probs_ = probs_.reshape(-1, probs.size(-1)) + pred_original_sample = torch.multinomial(probs_, 1, generator=generator).to(device=device) + pred_original_sample = pred_original_sample[:, 0].view(*probs.shape[:-1]) + pred_original_sample = torch.where(unknown_map, pred_original_sample, sample) + + if timestep == 0: + prev_sample = pred_original_sample + else: + seq_len = sample.shape[1] + step_idx = (self.timesteps == timestep).nonzero() + ratio = (step_idx + 1) / len(self.timesteps) + + if self.config.masking_schedule == "cosine": + mask_ratio = torch.cos(ratio * math.pi / 2) + elif self.config.masking_schedule == "linear": + mask_ratio = 1 - ratio + else: + raise ValueError(f"unknown masking schedule {self.config.masking_schedule}") + + mask_ratio = starting_mask_ratio * mask_ratio + + mask_len = (seq_len * mask_ratio).floor() + # do not mask more than amount previously masked + mask_len = torch.min(unknown_map.sum(dim=-1, keepdim=True) - 1, mask_len) + # mask at least one + mask_len = torch.max(torch.tensor([1], device=model_output.device), mask_len) + + selected_probs = torch.gather(probs, -1, pred_original_sample[:, :, None])[:, :, 0] + # Ignores the tokens given in the input by overwriting their confidence. + selected_probs = torch.where(unknown_map, selected_probs, torch.finfo(selected_probs.dtype).max) + + masking = mask_by_random_topk(mask_len, selected_probs, self.temperatures[step_idx], generator) + + # Masks tokens with lower confidence. + prev_sample = torch.where(masking, self.config.mask_token_id, pred_original_sample) + + if two_dim_input: + prev_sample = prev_sample.reshape(batch_size, height, width) + pred_original_sample = pred_original_sample.reshape(batch_size, height, width) + + if not return_dict: + return (prev_sample, pred_original_sample) + + return AmusedSchedulerOutput(prev_sample, pred_original_sample) + + def add_noise(self, sample, timesteps, generator=None): + step_idx = (self.timesteps == timesteps).nonzero() + ratio = (step_idx + 1) / len(self.timesteps) + + if self.config.masking_schedule == "cosine": + mask_ratio = torch.cos(ratio * math.pi / 2) + elif self.config.masking_schedule == "linear": + mask_ratio = 1 - ratio + else: + raise ValueError(f"unknown masking schedule {self.config.masking_schedule}") + + mask_indices = ( + torch.rand( + sample.shape, device=generator.device if generator is not None else sample.device, generator=generator + ).to(sample.device) + < mask_ratio + ) + + masked_sample = sample.clone() + + masked_sample[mask_indices] = self.config.mask_token_id + + return masked_sample diff --git a/diffusers3/schedulers/scheduling_consistency_decoder.py b/diffusers3/schedulers/scheduling_consistency_decoder.py new file mode 100644 index 0000000000000000000000000000000000000000..d7af018b284a59878c3cae86cec57e02d344e227 --- /dev/null +++ b/diffusers3/schedulers/scheduling_consistency_decoder.py @@ -0,0 +1,180 @@ +import math +from dataclasses import dataclass +from typing import Optional, Tuple, Union + +import torch + +from ..configuration_utils import ConfigMixin, register_to_config +from ..utils import BaseOutput +from ..utils.torch_utils import randn_tensor +from .scheduling_utils import SchedulerMixin + + +# Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar +def betas_for_alpha_bar( + num_diffusion_timesteps, + max_beta=0.999, + alpha_transform_type="cosine", +): + """ + Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of + (1-beta) over time from t = [0,1]. + + Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up + to that part of the diffusion process. + + + Args: + num_diffusion_timesteps (`int`): the number of betas to produce. + max_beta (`float`): the maximum beta to use; use values lower than 1 to + prevent singularities. + alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. + Choose from `cosine` or `exp` + + Returns: + betas (`np.ndarray`): the betas used by the scheduler to step the model outputs + """ + if alpha_transform_type == "cosine": + + def alpha_bar_fn(t): + return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 + + elif alpha_transform_type == "exp": + + def alpha_bar_fn(t): + return math.exp(t * -12.0) + + else: + raise ValueError(f"Unsupported alpha_transform_type: {alpha_transform_type}") + + betas = [] + for i in range(num_diffusion_timesteps): + t1 = i / num_diffusion_timesteps + t2 = (i + 1) / num_diffusion_timesteps + betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) + return torch.tensor(betas, dtype=torch.float32) + + +@dataclass +class ConsistencyDecoderSchedulerOutput(BaseOutput): + """ + Output class for the scheduler's `step` function. + + Args: + prev_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images): + Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the + denoising loop. + """ + + prev_sample: torch.Tensor + + +class ConsistencyDecoderScheduler(SchedulerMixin, ConfigMixin): + order = 1 + + @register_to_config + def __init__( + self, + num_train_timesteps: int = 1024, + sigma_data: float = 0.5, + ): + betas = betas_for_alpha_bar(num_train_timesteps) + + alphas = 1.0 - betas + alphas_cumprod = torch.cumprod(alphas, dim=0) + + self.sqrt_alphas_cumprod = torch.sqrt(alphas_cumprod) + self.sqrt_one_minus_alphas_cumprod = torch.sqrt(1.0 - alphas_cumprod) + + sigmas = torch.sqrt(1.0 / alphas_cumprod - 1) + + sqrt_recip_alphas_cumprod = torch.sqrt(1.0 / alphas_cumprod) + + self.c_skip = sqrt_recip_alphas_cumprod * sigma_data**2 / (sigmas**2 + sigma_data**2) + self.c_out = sigmas * sigma_data / (sigmas**2 + sigma_data**2) ** 0.5 + self.c_in = sqrt_recip_alphas_cumprod / (sigmas**2 + sigma_data**2) ** 0.5 + + def set_timesteps( + self, + num_inference_steps: Optional[int] = None, + device: Union[str, torch.device] = None, + ): + if num_inference_steps != 2: + raise ValueError("Currently more than 2 inference steps are not supported.") + + self.timesteps = torch.tensor([1008, 512], dtype=torch.long, device=device) + self.sqrt_alphas_cumprod = self.sqrt_alphas_cumprod.to(device) + self.sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod.to(device) + self.c_skip = self.c_skip.to(device) + self.c_out = self.c_out.to(device) + self.c_in = self.c_in.to(device) + + @property + def init_noise_sigma(self): + return self.sqrt_one_minus_alphas_cumprod[self.timesteps[0]] + + def scale_model_input(self, sample: torch.Tensor, timestep: Optional[int] = None) -> torch.Tensor: + """ + Ensures interchangeability with schedulers that need to scale the denoising model input depending on the + current timestep. + + Args: + sample (`torch.Tensor`): + The input sample. + timestep (`int`, *optional*): + The current timestep in the diffusion chain. + + Returns: + `torch.Tensor`: + A scaled input sample. + """ + return sample * self.c_in[timestep] + + def step( + self, + model_output: torch.Tensor, + timestep: Union[float, torch.Tensor], + sample: torch.Tensor, + generator: Optional[torch.Generator] = None, + return_dict: bool = True, + ) -> Union[ConsistencyDecoderSchedulerOutput, Tuple]: + """ + Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion + process from the learned model outputs (most often the predicted noise). + + Args: + model_output (`torch.Tensor`): + The direct output from the learned diffusion model. + timestep (`float`): + The current timestep in the diffusion chain. + sample (`torch.Tensor`): + A current instance of a sample created by the diffusion process. + generator (`torch.Generator`, *optional*): + A random number generator. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a + [`~schedulers.scheduling_consistency_models.ConsistencyDecoderSchedulerOutput`] or `tuple`. + + Returns: + [`~schedulers.scheduling_consistency_models.ConsistencyDecoderSchedulerOutput`] or `tuple`: + If return_dict is `True`, + [`~schedulers.scheduling_consistency_models.ConsistencyDecoderSchedulerOutput`] is returned, otherwise + a tuple is returned where the first element is the sample tensor. + """ + x_0 = self.c_out[timestep] * model_output + self.c_skip[timestep] * sample + + timestep_idx = torch.where(self.timesteps == timestep)[0] + + if timestep_idx == len(self.timesteps) - 1: + prev_sample = x_0 + else: + noise = randn_tensor(x_0.shape, generator=generator, dtype=x_0.dtype, device=x_0.device) + prev_sample = ( + self.sqrt_alphas_cumprod[self.timesteps[timestep_idx + 1]].to(x_0.dtype) * x_0 + + self.sqrt_one_minus_alphas_cumprod[self.timesteps[timestep_idx + 1]].to(x_0.dtype) * noise + ) + + if not return_dict: + return (prev_sample,) + + return ConsistencyDecoderSchedulerOutput(prev_sample=prev_sample) diff --git a/diffusers3/schedulers/scheduling_consistency_models.py b/diffusers3/schedulers/scheduling_consistency_models.py new file mode 100644 index 0000000000000000000000000000000000000000..653171638ccfe53ce55913d7c2eba454b305a63d --- /dev/null +++ b/diffusers3/schedulers/scheduling_consistency_models.py @@ -0,0 +1,446 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass +from typing import List, Optional, Tuple, Union + +import numpy as np +import torch + +from ..configuration_utils import ConfigMixin, register_to_config +from ..utils import BaseOutput, logging +from ..utils.torch_utils import randn_tensor +from .scheduling_utils import SchedulerMixin + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +@dataclass +class CMStochasticIterativeSchedulerOutput(BaseOutput): + """ + Output class for the scheduler's `step` function. + + Args: + prev_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images): + Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the + denoising loop. + """ + + prev_sample: torch.Tensor + + +class CMStochasticIterativeScheduler(SchedulerMixin, ConfigMixin): + """ + Multistep and onestep sampling for consistency models. + + This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic + methods the library implements for all schedulers such as loading and saving. + + Args: + num_train_timesteps (`int`, defaults to 40): + The number of diffusion steps to train the model. + sigma_min (`float`, defaults to 0.002): + Minimum noise magnitude in the sigma schedule. Defaults to 0.002 from the original implementation. + sigma_max (`float`, defaults to 80.0): + Maximum noise magnitude in the sigma schedule. Defaults to 80.0 from the original implementation. + sigma_data (`float`, defaults to 0.5): + The standard deviation of the data distribution from the EDM + [paper](https://huggingface.co/papers/2206.00364). Defaults to 0.5 from the original implementation. + s_noise (`float`, defaults to 1.0): + The amount of additional noise to counteract loss of detail during sampling. A reasonable range is [1.000, + 1.011]. Defaults to 1.0 from the original implementation. + rho (`float`, defaults to 7.0): + The parameter for calculating the Karras sigma schedule from the EDM + [paper](https://huggingface.co/papers/2206.00364). Defaults to 7.0 from the original implementation. + clip_denoised (`bool`, defaults to `True`): + Whether to clip the denoised outputs to `(-1, 1)`. + timesteps (`List` or `np.ndarray` or `torch.Tensor`, *optional*): + An explicit timestep schedule that can be optionally specified. The timesteps are expected to be in + increasing order. + """ + + order = 1 + + @register_to_config + def __init__( + self, + num_train_timesteps: int = 40, + sigma_min: float = 0.002, + sigma_max: float = 80.0, + sigma_data: float = 0.5, + s_noise: float = 1.0, + rho: float = 7.0, + clip_denoised: bool = True, + ): + # standard deviation of the initial noise distribution + self.init_noise_sigma = sigma_max + + ramp = np.linspace(0, 1, num_train_timesteps) + sigmas = self._convert_to_karras(ramp) + timesteps = self.sigma_to_t(sigmas) + + # setable values + self.num_inference_steps = None + self.sigmas = torch.from_numpy(sigmas) + self.timesteps = torch.from_numpy(timesteps) + self.custom_timesteps = False + self.is_scale_input_called = False + self._step_index = None + self._begin_index = None + self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication + + @property + def step_index(self): + """ + The index counter for current timestep. It will increase 1 after each scheduler step. + """ + return self._step_index + + @property + def begin_index(self): + """ + The index for the first timestep. It should be set from pipeline with `set_begin_index` method. + """ + return self._begin_index + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.set_begin_index + def set_begin_index(self, begin_index: int = 0): + """ + Sets the begin index for the scheduler. This function should be run from pipeline before the inference. + + Args: + begin_index (`int`): + The begin index for the scheduler. + """ + self._begin_index = begin_index + + def scale_model_input(self, sample: torch.Tensor, timestep: Union[float, torch.Tensor]) -> torch.Tensor: + """ + Scales the consistency model input by `(sigma**2 + sigma_data**2) ** 0.5`. + + Args: + sample (`torch.Tensor`): + The input sample. + timestep (`float` or `torch.Tensor`): + The current timestep in the diffusion chain. + + Returns: + `torch.Tensor`: + A scaled input sample. + """ + # Get sigma corresponding to timestep + if self.step_index is None: + self._init_step_index(timestep) + + sigma = self.sigmas[self.step_index] + + sample = sample / ((sigma**2 + self.config.sigma_data**2) ** 0.5) + + self.is_scale_input_called = True + return sample + + def sigma_to_t(self, sigmas: Union[float, np.ndarray]): + """ + Gets scaled timesteps from the Karras sigmas for input to the consistency model. + + Args: + sigmas (`float` or `np.ndarray`): + A single Karras sigma or an array of Karras sigmas. + + Returns: + `float` or `np.ndarray`: + A scaled input timestep or scaled input timestep array. + """ + if not isinstance(sigmas, np.ndarray): + sigmas = np.array(sigmas, dtype=np.float64) + + timesteps = 1000 * 0.25 * np.log(sigmas + 1e-44) + + return timesteps + + def set_timesteps( + self, + num_inference_steps: Optional[int] = None, + device: Union[str, torch.device] = None, + timesteps: Optional[List[int]] = None, + ): + """ + Sets the timesteps used for the diffusion chain (to be run before inference). + + Args: + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default + timestep spacing strategy of equal spacing between timesteps is used. If `timesteps` is passed, + `num_inference_steps` must be `None`. + """ + if num_inference_steps is None and timesteps is None: + raise ValueError("Exactly one of `num_inference_steps` or `timesteps` must be supplied.") + + if num_inference_steps is not None and timesteps is not None: + raise ValueError("Can only pass one of `num_inference_steps` or `timesteps`.") + + # Follow DDPMScheduler custom timesteps logic + if timesteps is not None: + for i in range(1, len(timesteps)): + if timesteps[i] >= timesteps[i - 1]: + raise ValueError("`timesteps` must be in descending order.") + + if timesteps[0] >= self.config.num_train_timesteps: + raise ValueError( + f"`timesteps` must start before `self.config.train_timesteps`:" + f" {self.config.num_train_timesteps}." + ) + + timesteps = np.array(timesteps, dtype=np.int64) + self.custom_timesteps = True + else: + if num_inference_steps > self.config.num_train_timesteps: + raise ValueError( + f"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:" + f" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle" + f" maximal {self.config.num_train_timesteps} timesteps." + ) + + self.num_inference_steps = num_inference_steps + + step_ratio = self.config.num_train_timesteps // self.num_inference_steps + timesteps = (np.arange(0, num_inference_steps) * step_ratio).round()[::-1].copy().astype(np.int64) + self.custom_timesteps = False + + # Map timesteps to Karras sigmas directly for multistep sampling + # See https://github.com/openai/consistency_models/blob/main/cm/karras_diffusion.py#L675 + num_train_timesteps = self.config.num_train_timesteps + ramp = timesteps[::-1].copy() + ramp = ramp / (num_train_timesteps - 1) + sigmas = self._convert_to_karras(ramp) + timesteps = self.sigma_to_t(sigmas) + + sigmas = np.concatenate([sigmas, [self.config.sigma_min]]).astype(np.float32) + self.sigmas = torch.from_numpy(sigmas).to(device=device) + + if str(device).startswith("mps"): + # mps does not support float64 + self.timesteps = torch.from_numpy(timesteps).to(device, dtype=torch.float32) + else: + self.timesteps = torch.from_numpy(timesteps).to(device=device) + + self._step_index = None + self._begin_index = None + self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication + + # Modified _convert_to_karras implementation that takes in ramp as argument + def _convert_to_karras(self, ramp): + """Constructs the noise schedule of Karras et al. (2022).""" + + sigma_min: float = self.config.sigma_min + sigma_max: float = self.config.sigma_max + + rho = self.config.rho + min_inv_rho = sigma_min ** (1 / rho) + max_inv_rho = sigma_max ** (1 / rho) + sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho + return sigmas + + def get_scalings(self, sigma): + sigma_data = self.config.sigma_data + + c_skip = sigma_data**2 / (sigma**2 + sigma_data**2) + c_out = sigma * sigma_data / (sigma**2 + sigma_data**2) ** 0.5 + return c_skip, c_out + + def get_scalings_for_boundary_condition(self, sigma): + """ + Gets the scalings used in the consistency model parameterization (from Appendix C of the + [paper](https://huggingface.co/papers/2303.01469)) to enforce boundary condition. + + + + `epsilon` in the equations for `c_skip` and `c_out` is set to `sigma_min`. + + + + Args: + sigma (`torch.Tensor`): + The current sigma in the Karras sigma schedule. + + Returns: + `tuple`: + A two-element tuple where `c_skip` (which weights the current sample) is the first element and `c_out` + (which weights the consistency model output) is the second element. + """ + sigma_min = self.config.sigma_min + sigma_data = self.config.sigma_data + + c_skip = sigma_data**2 / ((sigma - sigma_min) ** 2 + sigma_data**2) + c_out = (sigma - sigma_min) * sigma_data / (sigma**2 + sigma_data**2) ** 0.5 + return c_skip, c_out + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler.index_for_timestep + def index_for_timestep(self, timestep, schedule_timesteps=None): + if schedule_timesteps is None: + schedule_timesteps = self.timesteps + + indices = (schedule_timesteps == timestep).nonzero() + + # The sigma index that is taken for the **very** first `step` + # is always the second index (or the last index if there is only 1) + # This way we can ensure we don't accidentally skip a sigma in + # case we start in the middle of the denoising schedule (e.g. for image-to-image) + pos = 1 if len(indices) > 1 else 0 + + return indices[pos].item() + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._init_step_index + def _init_step_index(self, timestep): + if self.begin_index is None: + if isinstance(timestep, torch.Tensor): + timestep = timestep.to(self.timesteps.device) + self._step_index = self.index_for_timestep(timestep) + else: + self._step_index = self._begin_index + + def step( + self, + model_output: torch.Tensor, + timestep: Union[float, torch.Tensor], + sample: torch.Tensor, + generator: Optional[torch.Generator] = None, + return_dict: bool = True, + ) -> Union[CMStochasticIterativeSchedulerOutput, Tuple]: + """ + Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion + process from the learned model outputs (most often the predicted noise). + + Args: + model_output (`torch.Tensor`): + The direct output from the learned diffusion model. + timestep (`float`): + The current timestep in the diffusion chain. + sample (`torch.Tensor`): + A current instance of a sample created by the diffusion process. + generator (`torch.Generator`, *optional*): + A random number generator. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a + [`~schedulers.scheduling_consistency_models.CMStochasticIterativeSchedulerOutput`] or `tuple`. + + Returns: + [`~schedulers.scheduling_consistency_models.CMStochasticIterativeSchedulerOutput`] or `tuple`: + If return_dict is `True`, + [`~schedulers.scheduling_consistency_models.CMStochasticIterativeSchedulerOutput`] is returned, + otherwise a tuple is returned where the first element is the sample tensor. + """ + + if isinstance(timestep, (int, torch.IntTensor, torch.LongTensor)): + raise ValueError( + ( + "Passing integer indices (e.g. from `enumerate(timesteps)`) as timesteps to" + f" `{self.__class__}.step()` is not supported. Make sure to pass" + " one of the `scheduler.timesteps` as a timestep." + ), + ) + + if not self.is_scale_input_called: + logger.warning( + "The `scale_model_input` function should be called before `step` to ensure correct denoising. " + "See `StableDiffusionPipeline` for a usage example." + ) + + sigma_min = self.config.sigma_min + sigma_max = self.config.sigma_max + + if self.step_index is None: + self._init_step_index(timestep) + + # sigma_next corresponds to next_t in original implementation + sigma = self.sigmas[self.step_index] + if self.step_index + 1 < self.config.num_train_timesteps: + sigma_next = self.sigmas[self.step_index + 1] + else: + # Set sigma_next to sigma_min + sigma_next = self.sigmas[-1] + + # Get scalings for boundary conditions + c_skip, c_out = self.get_scalings_for_boundary_condition(sigma) + + # 1. Denoise model output using boundary conditions + denoised = c_out * model_output + c_skip * sample + if self.config.clip_denoised: + denoised = denoised.clamp(-1, 1) + + # 2. Sample z ~ N(0, s_noise^2 * I) + # Noise is not used for onestep sampling. + if len(self.timesteps) > 1: + noise = randn_tensor( + model_output.shape, dtype=model_output.dtype, device=model_output.device, generator=generator + ) + else: + noise = torch.zeros_like(model_output) + z = noise * self.config.s_noise + + sigma_hat = sigma_next.clamp(min=sigma_min, max=sigma_max) + + # 3. Return noisy sample + # tau = sigma_hat, eps = sigma_min + prev_sample = denoised + z * (sigma_hat**2 - sigma_min**2) ** 0.5 + + # upon completion increase step index by one + self._step_index += 1 + + if not return_dict: + return (prev_sample,) + + return CMStochasticIterativeSchedulerOutput(prev_sample=prev_sample) + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler.add_noise + def add_noise( + self, + original_samples: torch.Tensor, + noise: torch.Tensor, + timesteps: torch.Tensor, + ) -> torch.Tensor: + # Make sure sigmas and timesteps have the same device and dtype as original_samples + sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype) + if original_samples.device.type == "mps" and torch.is_floating_point(timesteps): + # mps does not support float64 + schedule_timesteps = self.timesteps.to(original_samples.device, dtype=torch.float32) + timesteps = timesteps.to(original_samples.device, dtype=torch.float32) + else: + schedule_timesteps = self.timesteps.to(original_samples.device) + timesteps = timesteps.to(original_samples.device) + + # self.begin_index is None when scheduler is used for training, or pipeline does not implement set_begin_index + if self.begin_index is None: + step_indices = [self.index_for_timestep(t, schedule_timesteps) for t in timesteps] + elif self.step_index is not None: + # add_noise is called after first denoising step (for inpainting) + step_indices = [self.step_index] * timesteps.shape[0] + else: + # add noise is called before first denoising step to create initial latent(img2img) + step_indices = [self.begin_index] * timesteps.shape[0] + + sigma = sigmas[step_indices].flatten() + while len(sigma.shape) < len(original_samples.shape): + sigma = sigma.unsqueeze(-1) + + noisy_samples = original_samples + noise * sigma + return noisy_samples + + def __len__(self): + return self.config.num_train_timesteps diff --git a/diffusers3/schedulers/scheduling_cosine_dpmsolver_multistep.py b/diffusers3/schedulers/scheduling_cosine_dpmsolver_multistep.py new file mode 100644 index 0000000000000000000000000000000000000000..ab56650dbac526f72246d720fe015e62c327a558 --- /dev/null +++ b/diffusers3/schedulers/scheduling_cosine_dpmsolver_multistep.py @@ -0,0 +1,572 @@ +# Copyright 2024 TSAIL Team and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# DISCLAIMER: This file is strongly influenced by https://github.com/LuChengTHU/dpm-solver and https://github.com/NVlabs/edm + +import math +from typing import List, Optional, Tuple, Union + +import numpy as np +import torch + +from ..configuration_utils import ConfigMixin, register_to_config +from .scheduling_dpmsolver_sde import BrownianTreeNoiseSampler +from .scheduling_utils import SchedulerMixin, SchedulerOutput + + +class CosineDPMSolverMultistepScheduler(SchedulerMixin, ConfigMixin): + """ + Implements a variant of `DPMSolverMultistepScheduler` with cosine schedule, proposed by Nichol and Dhariwal (2021). + This scheduler was used in Stable Audio Open [1]. + + [1] Evans, Parker, et al. "Stable Audio Open" https://arxiv.org/abs/2407.14358 + + This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic + methods the library implements for all schedulers such as loading and saving. + + Args: + sigma_min (`float`, *optional*, defaults to 0.3): + Minimum noise magnitude in the sigma schedule. This was set to 0.3 in Stable Audio Open [1]. + sigma_max (`float`, *optional*, defaults to 500): + Maximum noise magnitude in the sigma schedule. This was set to 500 in Stable Audio Open [1]. + sigma_data (`float`, *optional*, defaults to 1.0): + The standard deviation of the data distribution. This is set to 1.0 in Stable Audio Open [1]. + sigma_schedule (`str`, *optional*, defaults to `exponential`): + Sigma schedule to compute the `sigmas`. By default, we the schedule introduced in the EDM paper + (https://arxiv.org/abs/2206.00364). Other acceptable value is "exponential". The exponential schedule was + incorporated in this model: https://huggingface.co/stabilityai/cosxl. + num_train_timesteps (`int`, defaults to 1000): + The number of diffusion steps to train the model. + solver_order (`int`, defaults to 2): + The DPMSolver order which can be `1` or `2`. It is recommended to use `solver_order=2`. + prediction_type (`str`, defaults to `v_prediction`, *optional*): + Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process), + `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen + Video](https://imagen.research.google/video/paper.pdf) paper). + solver_type (`str`, defaults to `midpoint`): + Solver type for the second-order solver; can be `midpoint` or `heun`. The solver type slightly affects the + sample quality, especially for a small number of steps. It is recommended to use `midpoint` solvers. + lower_order_final (`bool`, defaults to `True`): + Whether to use lower-order solvers in the final steps. Only valid for < 15 inference steps. This can + stabilize the sampling of DPMSolver for steps < 15, especially for steps <= 10. + euler_at_final (`bool`, defaults to `False`): + Whether to use Euler's method in the final step. It is a trade-off between numerical stability and detail + richness. This can stabilize the sampling of the SDE variant of DPMSolver for small number of inference + steps, but sometimes may result in blurring. + final_sigmas_type (`str`, defaults to `"zero"`): + The final `sigma` value for the noise schedule during the sampling process. If `"sigma_min"`, the final + sigma is the same as the last sigma in the training schedule. If `zero`, the final sigma is set to 0. + """ + + _compatibles = [] + order = 1 + + @register_to_config + def __init__( + self, + sigma_min: float = 0.3, + sigma_max: float = 500, + sigma_data: float = 1.0, + sigma_schedule: str = "exponential", + num_train_timesteps: int = 1000, + solver_order: int = 2, + prediction_type: str = "v_prediction", + rho: float = 7.0, + solver_type: str = "midpoint", + lower_order_final: bool = True, + euler_at_final: bool = False, + final_sigmas_type: Optional[str] = "zero", # "zero", "sigma_min" + ): + if solver_type not in ["midpoint", "heun"]: + if solver_type in ["logrho", "bh1", "bh2"]: + self.register_to_config(solver_type="midpoint") + else: + raise NotImplementedError(f"{solver_type} is not implemented for {self.__class__}") + + ramp = torch.linspace(0, 1, num_train_timesteps) + if sigma_schedule == "karras": + sigmas = self._compute_karras_sigmas(ramp) + elif sigma_schedule == "exponential": + sigmas = self._compute_exponential_sigmas(ramp) + + self.timesteps = self.precondition_noise(sigmas) + + self.sigmas = torch.cat([sigmas, torch.zeros(1, device=sigmas.device)]) + + # setable values + self.num_inference_steps = None + self.model_outputs = [None] * solver_order + self.lower_order_nums = 0 + self._step_index = None + self._begin_index = None + self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication + + @property + def init_noise_sigma(self): + # standard deviation of the initial noise distribution + return (self.config.sigma_max**2 + 1) ** 0.5 + + @property + def step_index(self): + """ + The index counter for current timestep. It will increase 1 after each scheduler step. + """ + return self._step_index + + @property + def begin_index(self): + """ + The index for the first timestep. It should be set from pipeline with `set_begin_index` method. + """ + return self._begin_index + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.set_begin_index + def set_begin_index(self, begin_index: int = 0): + """ + Sets the begin index for the scheduler. This function should be run from pipeline before the inference. + + Args: + begin_index (`int`): + The begin index for the scheduler. + """ + self._begin_index = begin_index + + # Copied from diffusers.schedulers.scheduling_edm_euler.EDMEulerScheduler.precondition_inputs + def precondition_inputs(self, sample, sigma): + c_in = 1 / ((sigma**2 + self.config.sigma_data**2) ** 0.5) + scaled_sample = sample * c_in + return scaled_sample + + def precondition_noise(self, sigma): + if not isinstance(sigma, torch.Tensor): + sigma = torch.tensor([sigma]) + + return sigma.atan() / math.pi * 2 + + # Copied from diffusers.schedulers.scheduling_edm_euler.EDMEulerScheduler.precondition_outputs + def precondition_outputs(self, sample, model_output, sigma): + sigma_data = self.config.sigma_data + c_skip = sigma_data**2 / (sigma**2 + sigma_data**2) + + if self.config.prediction_type == "epsilon": + c_out = sigma * sigma_data / (sigma**2 + sigma_data**2) ** 0.5 + elif self.config.prediction_type == "v_prediction": + c_out = -sigma * sigma_data / (sigma**2 + sigma_data**2) ** 0.5 + else: + raise ValueError(f"Prediction type {self.config.prediction_type} is not supported.") + + denoised = c_skip * sample + c_out * model_output + + return denoised + + # Copied from diffusers.schedulers.scheduling_edm_euler.EDMEulerScheduler.scale_model_input + def scale_model_input(self, sample: torch.Tensor, timestep: Union[float, torch.Tensor]) -> torch.Tensor: + """ + Ensures interchangeability with schedulers that need to scale the denoising model input depending on the + current timestep. Scales the denoising model input by `(sigma**2 + 1) ** 0.5` to match the Euler algorithm. + + Args: + sample (`torch.Tensor`): + The input sample. + timestep (`int`, *optional*): + The current timestep in the diffusion chain. + + Returns: + `torch.Tensor`: + A scaled input sample. + """ + if self.step_index is None: + self._init_step_index(timestep) + + sigma = self.sigmas[self.step_index] + sample = self.precondition_inputs(sample, sigma) + + self.is_scale_input_called = True + return sample + + def set_timesteps(self, num_inference_steps: int = None, device: Union[str, torch.device] = None): + """ + Sets the discrete timesteps used for the diffusion chain (to be run before inference). + + Args: + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + """ + + self.num_inference_steps = num_inference_steps + + ramp = torch.linspace(0, 1, self.num_inference_steps) + if self.config.sigma_schedule == "karras": + sigmas = self._compute_karras_sigmas(ramp) + elif self.config.sigma_schedule == "exponential": + sigmas = self._compute_exponential_sigmas(ramp) + + sigmas = sigmas.to(dtype=torch.float32, device=device) + self.timesteps = self.precondition_noise(sigmas) + + if self.config.final_sigmas_type == "sigma_min": + sigma_last = self.config.sigma_min + elif self.config.final_sigmas_type == "zero": + sigma_last = 0 + else: + raise ValueError( + f"`final_sigmas_type` must be one of 'zero', or 'sigma_min', but got {self.config.final_sigmas_type}" + ) + + self.sigmas = torch.cat([sigmas, torch.tensor([sigma_last], dtype=torch.float32, device=device)]) + + self.model_outputs = [ + None, + ] * self.config.solver_order + self.lower_order_nums = 0 + + # add an index counter for schedulers that allow duplicated timesteps + self._step_index = None + self._begin_index = None + self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication + + # if a noise sampler is used, reinitialise it + self.noise_sampler = None + + # Copied from diffusers.schedulers.scheduling_edm_euler.EDMEulerScheduler._compute_karras_sigmas + def _compute_karras_sigmas(self, ramp, sigma_min=None, sigma_max=None) -> torch.Tensor: + """Constructs the noise schedule of Karras et al. (2022).""" + sigma_min = sigma_min or self.config.sigma_min + sigma_max = sigma_max or self.config.sigma_max + + rho = self.config.rho + min_inv_rho = sigma_min ** (1 / rho) + max_inv_rho = sigma_max ** (1 / rho) + sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho + return sigmas + + # Copied from diffusers.schedulers.scheduling_edm_euler.EDMEulerScheduler._compute_exponential_sigmas + def _compute_exponential_sigmas(self, ramp, sigma_min=None, sigma_max=None) -> torch.Tensor: + """Implementation closely follows k-diffusion. + + https://github.com/crowsonkb/k-diffusion/blob/6ab5146d4a5ef63901326489f31f1d8e7dd36b48/k_diffusion/sampling.py#L26 + """ + sigma_min = sigma_min or self.config.sigma_min + sigma_max = sigma_max or self.config.sigma_max + sigmas = torch.linspace(math.log(sigma_min), math.log(sigma_max), len(ramp)).exp().flip(0) + return sigmas + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._sigma_to_t + def _sigma_to_t(self, sigma, log_sigmas): + # get log sigma + log_sigma = np.log(np.maximum(sigma, 1e-10)) + + # get distribution + dists = log_sigma - log_sigmas[:, np.newaxis] + + # get sigmas range + low_idx = np.cumsum((dists >= 0), axis=0).argmax(axis=0).clip(max=log_sigmas.shape[0] - 2) + high_idx = low_idx + 1 + + low = log_sigmas[low_idx] + high = log_sigmas[high_idx] + + # interpolate sigmas + w = (low - log_sigma) / (low - high) + w = np.clip(w, 0, 1) + + # transform interpolation to time range + t = (1 - w) * low_idx + w * high_idx + t = t.reshape(sigma.shape) + return t + + def _sigma_to_alpha_sigma_t(self, sigma): + alpha_t = torch.tensor(1) # Inputs are pre-scaled before going into unet, so alpha_t = 1 + sigma_t = sigma + + return alpha_t, sigma_t + + def convert_model_output( + self, + model_output: torch.Tensor, + sample: torch.Tensor = None, + ) -> torch.Tensor: + """ + Convert the model output to the corresponding type the DPMSolver/DPMSolver++ algorithm needs. DPM-Solver is + designed to discretize an integral of the noise prediction model, and DPM-Solver++ is designed to discretize an + integral of the data prediction model. + + + + The algorithm and model type are decoupled. You can use either DPMSolver or DPMSolver++ for both noise + prediction and data prediction models. + + + + Args: + model_output (`torch.Tensor`): + The direct output from the learned diffusion model. + sample (`torch.Tensor`): + A current instance of a sample created by the diffusion process. + + Returns: + `torch.Tensor`: + The converted model output. + """ + sigma = self.sigmas[self.step_index] + x0_pred = self.precondition_outputs(sample, model_output, sigma) + + return x0_pred + + def dpm_solver_first_order_update( + self, + model_output: torch.Tensor, + sample: torch.Tensor = None, + noise: Optional[torch.Tensor] = None, + ) -> torch.Tensor: + """ + One step for the first-order DPMSolver (equivalent to DDIM). + + Args: + model_output (`torch.Tensor`): + The direct output from the learned diffusion model. + sample (`torch.Tensor`): + A current instance of a sample created by the diffusion process. + + Returns: + `torch.Tensor`: + The sample tensor at the previous timestep. + """ + sigma_t, sigma_s = self.sigmas[self.step_index + 1], self.sigmas[self.step_index] + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma_t) + alpha_s, sigma_s = self._sigma_to_alpha_sigma_t(sigma_s) + lambda_t = torch.log(alpha_t) - torch.log(sigma_t) + lambda_s = torch.log(alpha_s) - torch.log(sigma_s) + + h = lambda_t - lambda_s + assert noise is not None + x_t = ( + (sigma_t / sigma_s * torch.exp(-h)) * sample + + (alpha_t * (1 - torch.exp(-2.0 * h))) * model_output + + sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) * noise + ) + + return x_t + + def multistep_dpm_solver_second_order_update( + self, + model_output_list: List[torch.Tensor], + sample: torch.Tensor = None, + noise: Optional[torch.Tensor] = None, + ) -> torch.Tensor: + """ + One step for the second-order multistep DPMSolver. + + Args: + model_output_list (`List[torch.Tensor]`): + The direct outputs from learned diffusion model at current and latter timesteps. + sample (`torch.Tensor`): + A current instance of a sample created by the diffusion process. + + Returns: + `torch.Tensor`: + The sample tensor at the previous timestep. + """ + sigma_t, sigma_s0, sigma_s1 = ( + self.sigmas[self.step_index + 1], + self.sigmas[self.step_index], + self.sigmas[self.step_index - 1], + ) + + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma_t) + alpha_s0, sigma_s0 = self._sigma_to_alpha_sigma_t(sigma_s0) + alpha_s1, sigma_s1 = self._sigma_to_alpha_sigma_t(sigma_s1) + + lambda_t = torch.log(alpha_t) - torch.log(sigma_t) + lambda_s0 = torch.log(alpha_s0) - torch.log(sigma_s0) + lambda_s1 = torch.log(alpha_s1) - torch.log(sigma_s1) + + m0, m1 = model_output_list[-1], model_output_list[-2] + + h, h_0 = lambda_t - lambda_s0, lambda_s0 - lambda_s1 + r0 = h_0 / h + D0, D1 = m0, (1.0 / r0) * (m0 - m1) + + # sde-dpmsolver++ + assert noise is not None + if self.config.solver_type == "midpoint": + x_t = ( + (sigma_t / sigma_s0 * torch.exp(-h)) * sample + + (alpha_t * (1 - torch.exp(-2.0 * h))) * D0 + + 0.5 * (alpha_t * (1 - torch.exp(-2.0 * h))) * D1 + + sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) * noise + ) + elif self.config.solver_type == "heun": + x_t = ( + (sigma_t / sigma_s0 * torch.exp(-h)) * sample + + (alpha_t * (1 - torch.exp(-2.0 * h))) * D0 + + (alpha_t * ((1.0 - torch.exp(-2.0 * h)) / (-2.0 * h) + 1.0)) * D1 + + sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) * noise + ) + + return x_t + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.index_for_timestep + def index_for_timestep(self, timestep, schedule_timesteps=None): + if schedule_timesteps is None: + schedule_timesteps = self.timesteps + + index_candidates = (schedule_timesteps == timestep).nonzero() + + if len(index_candidates) == 0: + step_index = len(self.timesteps) - 1 + # The sigma index that is taken for the **very** first `step` + # is always the second index (or the last index if there is only 1) + # This way we can ensure we don't accidentally skip a sigma in + # case we start in the middle of the denoising schedule (e.g. for image-to-image) + elif len(index_candidates) > 1: + step_index = index_candidates[1].item() + else: + step_index = index_candidates[0].item() + + return step_index + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler._init_step_index + def _init_step_index(self, timestep): + """ + Initialize the step_index counter for the scheduler. + """ + + if self.begin_index is None: + if isinstance(timestep, torch.Tensor): + timestep = timestep.to(self.timesteps.device) + self._step_index = self.index_for_timestep(timestep) + else: + self._step_index = self._begin_index + + def step( + self, + model_output: torch.Tensor, + timestep: Union[int, torch.Tensor], + sample: torch.Tensor, + generator=None, + return_dict: bool = True, + ) -> Union[SchedulerOutput, Tuple]: + """ + Predict the sample from the previous timestep by reversing the SDE. This function propagates the sample with + the multistep DPMSolver. + + Args: + model_output (`torch.Tensor`): + The direct output from learned diffusion model. + timestep (`int`): + The current discrete timestep in the diffusion chain. + sample (`torch.Tensor`): + A current instance of a sample created by the diffusion process. + generator (`torch.Generator`, *optional*): + A random number generator. + return_dict (`bool`): + Whether or not to return a [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`. + + Returns: + [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`: + If return_dict is `True`, [`~schedulers.scheduling_utils.SchedulerOutput`] is returned, otherwise a + tuple is returned where the first element is the sample tensor. + + """ + if self.num_inference_steps is None: + raise ValueError( + "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" + ) + + if self.step_index is None: + self._init_step_index(timestep) + + # Improve numerical stability for small number of steps + lower_order_final = (self.step_index == len(self.timesteps) - 1) and ( + self.config.euler_at_final + or (self.config.lower_order_final and len(self.timesteps) < 15) + or self.config.final_sigmas_type == "zero" + ) + lower_order_second = ( + (self.step_index == len(self.timesteps) - 2) and self.config.lower_order_final and len(self.timesteps) < 15 + ) + + model_output = self.convert_model_output(model_output, sample=sample) + for i in range(self.config.solver_order - 1): + self.model_outputs[i] = self.model_outputs[i + 1] + self.model_outputs[-1] = model_output + + if self.noise_sampler is None: + seed = None + if generator is not None: + seed = ( + [g.initial_seed() for g in generator] if isinstance(generator, list) else generator.initial_seed() + ) + self.noise_sampler = BrownianTreeNoiseSampler( + model_output, sigma_min=self.config.sigma_min, sigma_max=self.config.sigma_max, seed=seed + ) + noise = self.noise_sampler(self.sigmas[self.step_index], self.sigmas[self.step_index + 1]).to( + model_output.device + ) + + if self.config.solver_order == 1 or self.lower_order_nums < 1 or lower_order_final: + prev_sample = self.dpm_solver_first_order_update(model_output, sample=sample, noise=noise) + elif self.config.solver_order == 2 or self.lower_order_nums < 2 or lower_order_second: + prev_sample = self.multistep_dpm_solver_second_order_update(self.model_outputs, sample=sample, noise=noise) + + if self.lower_order_nums < self.config.solver_order: + self.lower_order_nums += 1 + + # upon completion increase step index by one + self._step_index += 1 + + if not return_dict: + return (prev_sample,) + + return SchedulerOutput(prev_sample=prev_sample) + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler.add_noise + def add_noise( + self, + original_samples: torch.Tensor, + noise: torch.Tensor, + timesteps: torch.Tensor, + ) -> torch.Tensor: + # Make sure sigmas and timesteps have the same device and dtype as original_samples + sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype) + if original_samples.device.type == "mps" and torch.is_floating_point(timesteps): + # mps does not support float64 + schedule_timesteps = self.timesteps.to(original_samples.device, dtype=torch.float32) + timesteps = timesteps.to(original_samples.device, dtype=torch.float32) + else: + schedule_timesteps = self.timesteps.to(original_samples.device) + timesteps = timesteps.to(original_samples.device) + + # self.begin_index is None when scheduler is used for training, or pipeline does not implement set_begin_index + if self.begin_index is None: + step_indices = [self.index_for_timestep(t, schedule_timesteps) for t in timesteps] + elif self.step_index is not None: + # add_noise is called after first denoising step (for inpainting) + step_indices = [self.step_index] * timesteps.shape[0] + else: + # add noise is called before first denoising step to create initial latent(img2img) + step_indices = [self.begin_index] * timesteps.shape[0] + + sigma = sigmas[step_indices].flatten() + while len(sigma.shape) < len(original_samples.shape): + sigma = sigma.unsqueeze(-1) + + noisy_samples = original_samples + noise * sigma + return noisy_samples + + def __len__(self): + return self.config.num_train_timesteps diff --git a/diffusers3/schedulers/scheduling_ddim.py b/diffusers3/schedulers/scheduling_ddim.py new file mode 100644 index 0000000000000000000000000000000000000000..14356eafdaea8cc7f4e165bcb4e8840468fa932c --- /dev/null +++ b/diffusers3/schedulers/scheduling_ddim.py @@ -0,0 +1,518 @@ +# Copyright 2024 Stanford University Team and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion +# and https://github.com/hojonathanho/diffusion + +import math +from dataclasses import dataclass +from typing import List, Optional, Tuple, Union + +import numpy as np +import torch + +from ..configuration_utils import ConfigMixin, register_to_config +from ..utils import BaseOutput +from ..utils.torch_utils import randn_tensor +from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin + + +@dataclass +# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM +class DDIMSchedulerOutput(BaseOutput): + """ + Output class for the scheduler's `step` function output. + + Args: + prev_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images): + Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the + denoising loop. + pred_original_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images): + The predicted denoised sample `(x_{0})` based on the model output from the current timestep. + `pred_original_sample` can be used to preview progress or for guidance. + """ + + prev_sample: torch.Tensor + pred_original_sample: Optional[torch.Tensor] = None + + +# Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar +def betas_for_alpha_bar( + num_diffusion_timesteps, + max_beta=0.999, + alpha_transform_type="cosine", +): + """ + Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of + (1-beta) over time from t = [0,1]. + + Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up + to that part of the diffusion process. + + + Args: + num_diffusion_timesteps (`int`): the number of betas to produce. + max_beta (`float`): the maximum beta to use; use values lower than 1 to + prevent singularities. + alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. + Choose from `cosine` or `exp` + + Returns: + betas (`np.ndarray`): the betas used by the scheduler to step the model outputs + """ + if alpha_transform_type == "cosine": + + def alpha_bar_fn(t): + return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 + + elif alpha_transform_type == "exp": + + def alpha_bar_fn(t): + return math.exp(t * -12.0) + + else: + raise ValueError(f"Unsupported alpha_transform_type: {alpha_transform_type}") + + betas = [] + for i in range(num_diffusion_timesteps): + t1 = i / num_diffusion_timesteps + t2 = (i + 1) / num_diffusion_timesteps + betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) + return torch.tensor(betas, dtype=torch.float32) + + +def rescale_zero_terminal_snr(betas): + """ + Rescales betas to have zero terminal SNR Based on https://arxiv.org/pdf/2305.08891.pdf (Algorithm 1) + + + Args: + betas (`torch.Tensor`): + the betas that the scheduler is being initialized with. + + Returns: + `torch.Tensor`: rescaled betas with zero terminal SNR + """ + # Convert betas to alphas_bar_sqrt + alphas = 1.0 - betas + alphas_cumprod = torch.cumprod(alphas, dim=0) + alphas_bar_sqrt = alphas_cumprod.sqrt() + + # Store old values. + alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone() + alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone() + + # Shift so the last timestep is zero. + alphas_bar_sqrt -= alphas_bar_sqrt_T + + # Scale so the first timestep is back to the old value. + alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T) + + # Convert alphas_bar_sqrt to betas + alphas_bar = alphas_bar_sqrt**2 # Revert sqrt + alphas = alphas_bar[1:] / alphas_bar[:-1] # Revert cumprod + alphas = torch.cat([alphas_bar[0:1], alphas]) + betas = 1 - alphas + + return betas + + +class DDIMScheduler(SchedulerMixin, ConfigMixin): + """ + `DDIMScheduler` extends the denoising procedure introduced in denoising diffusion probabilistic models (DDPMs) with + non-Markovian guidance. + + This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic + methods the library implements for all schedulers such as loading and saving. + + Args: + num_train_timesteps (`int`, defaults to 1000): + The number of diffusion steps to train the model. + beta_start (`float`, defaults to 0.0001): + The starting `beta` value of inference. + beta_end (`float`, defaults to 0.02): + The final `beta` value. + beta_schedule (`str`, defaults to `"linear"`): + The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from + `linear`, `scaled_linear`, or `squaredcos_cap_v2`. + trained_betas (`np.ndarray`, *optional*): + Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`. + clip_sample (`bool`, defaults to `True`): + Clip the predicted sample for numerical stability. + clip_sample_range (`float`, defaults to 1.0): + The maximum magnitude for sample clipping. Valid only when `clip_sample=True`. + set_alpha_to_one (`bool`, defaults to `True`): + Each diffusion step uses the alphas product value at that step and at the previous one. For the final step + there is no previous alpha. When this option is `True` the previous alpha product is fixed to `1`, + otherwise it uses the alpha value at step 0. + steps_offset (`int`, defaults to 0): + An offset added to the inference steps, as required by some model families. + prediction_type (`str`, defaults to `epsilon`, *optional*): + Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process), + `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen + Video](https://imagen.research.google/video/paper.pdf) paper). + thresholding (`bool`, defaults to `False`): + Whether to use the "dynamic thresholding" method. This is unsuitable for latent-space diffusion models such + as Stable Diffusion. + dynamic_thresholding_ratio (`float`, defaults to 0.995): + The ratio for the dynamic thresholding method. Valid only when `thresholding=True`. + sample_max_value (`float`, defaults to 1.0): + The threshold value for dynamic thresholding. Valid only when `thresholding=True`. + timestep_spacing (`str`, defaults to `"leading"`): + The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information. + rescale_betas_zero_snr (`bool`, defaults to `False`): + Whether to rescale the betas to have zero terminal SNR. This enables the model to generate very bright and + dark samples instead of limiting it to samples with medium brightness. Loosely related to + [`--offset_noise`](https://github.com/huggingface/diffusers/blob/74fd735eb073eb1d774b1ab4154a0876eb82f055/examples/dreambooth/train_dreambooth.py#L506). + """ + + _compatibles = [e.name for e in KarrasDiffusionSchedulers] + order = 1 + + @register_to_config + def __init__( + self, + num_train_timesteps: int = 1000, + beta_start: float = 0.0001, + beta_end: float = 0.02, + beta_schedule: str = "linear", + trained_betas: Optional[Union[np.ndarray, List[float]]] = None, + clip_sample: bool = True, + set_alpha_to_one: bool = True, + steps_offset: int = 0, + prediction_type: str = "epsilon", + thresholding: bool = False, + dynamic_thresholding_ratio: float = 0.995, + clip_sample_range: float = 1.0, + sample_max_value: float = 1.0, + timestep_spacing: str = "leading", + rescale_betas_zero_snr: bool = False, + ): + if trained_betas is not None: + self.betas = torch.tensor(trained_betas, dtype=torch.float32) + elif beta_schedule == "linear": + self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) + elif beta_schedule == "scaled_linear": + # this schedule is very specific to the latent diffusion model. + self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2 + elif beta_schedule == "squaredcos_cap_v2": + # Glide cosine schedule + self.betas = betas_for_alpha_bar(num_train_timesteps) + else: + raise NotImplementedError(f"{beta_schedule} is not implemented for {self.__class__}") + + # Rescale for zero SNR + if rescale_betas_zero_snr: + self.betas = rescale_zero_terminal_snr(self.betas) + + self.alphas = 1.0 - self.betas + self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) + + # At every step in ddim, we are looking into the previous alphas_cumprod + # For the final step, there is no previous alphas_cumprod because we are already at 0 + # `set_alpha_to_one` decides whether we set this parameter simply to one or + # whether we use the final alpha of the "non-previous" one. + self.final_alpha_cumprod = torch.tensor(1.0) if set_alpha_to_one else self.alphas_cumprod[0] + + # standard deviation of the initial noise distribution + self.init_noise_sigma = 1.0 + + # setable values + self.num_inference_steps = None + self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy().astype(np.int64)) + + def scale_model_input(self, sample: torch.Tensor, timestep: Optional[int] = None) -> torch.Tensor: + """ + Ensures interchangeability with schedulers that need to scale the denoising model input depending on the + current timestep. + + Args: + sample (`torch.Tensor`): + The input sample. + timestep (`int`, *optional*): + The current timestep in the diffusion chain. + + Returns: + `torch.Tensor`: + A scaled input sample. + """ + return sample + + def _get_variance(self, timestep, prev_timestep): + alpha_prod_t = self.alphas_cumprod[timestep] + alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod + beta_prod_t = 1 - alpha_prod_t + beta_prod_t_prev = 1 - alpha_prod_t_prev + + variance = (beta_prod_t_prev / beta_prod_t) * (1 - alpha_prod_t / alpha_prod_t_prev) + + return variance + + # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample + def _threshold_sample(self, sample: torch.Tensor) -> torch.Tensor: + """ + "Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the + prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by + s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing + pixels from saturation at each step. We find that dynamic thresholding results in significantly better + photorealism as well as better image-text alignment, especially when using very large guidance weights." + + https://arxiv.org/abs/2205.11487 + """ + dtype = sample.dtype + batch_size, channels, *remaining_dims = sample.shape + + if dtype not in (torch.float32, torch.float64): + sample = sample.float() # upcast for quantile calculation, and clamp not implemented for cpu half + + # Flatten sample for doing quantile calculation along each image + sample = sample.reshape(batch_size, channels * np.prod(remaining_dims)) + + abs_sample = sample.abs() # "a certain percentile absolute pixel value" + + s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1) + s = torch.clamp( + s, min=1, max=self.config.sample_max_value + ) # When clamped to min=1, equivalent to standard clipping to [-1, 1] + s = s.unsqueeze(1) # (batch_size, 1) because clamp will broadcast along dim=0 + sample = torch.clamp(sample, -s, s) / s # "we threshold xt0 to the range [-s, s] and then divide by s" + + sample = sample.reshape(batch_size, channels, *remaining_dims) + sample = sample.to(dtype) + + return sample + + def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None): + """ + Sets the discrete timesteps used for the diffusion chain (to be run before inference). + + Args: + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. + """ + + if num_inference_steps > self.config.num_train_timesteps: + raise ValueError( + f"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:" + f" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle" + f" maximal {self.config.num_train_timesteps} timesteps." + ) + + self.num_inference_steps = num_inference_steps + + # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 + if self.config.timestep_spacing == "linspace": + timesteps = ( + np.linspace(0, self.config.num_train_timesteps - 1, num_inference_steps) + .round()[::-1] + .copy() + .astype(np.int64) + ) + elif self.config.timestep_spacing == "leading": + step_ratio = self.config.num_train_timesteps // self.num_inference_steps + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + timesteps = (np.arange(0, num_inference_steps) * step_ratio).round()[::-1].copy().astype(np.int64) + timesteps += self.config.steps_offset + elif self.config.timestep_spacing == "trailing": + step_ratio = self.config.num_train_timesteps / self.num_inference_steps + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + timesteps = np.round(np.arange(self.config.num_train_timesteps, 0, -step_ratio)).astype(np.int64) + timesteps -= 1 + else: + raise ValueError( + f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'leading' or 'trailing'." + ) + + self.timesteps = torch.from_numpy(timesteps).to(device) + + def step( + self, + model_output: torch.Tensor, + timestep: int, + sample: torch.Tensor, + eta: float = 0.0, + use_clipped_model_output: bool = False, + generator=None, + variance_noise: Optional[torch.Tensor] = None, + return_dict: bool = True, + ) -> Union[DDIMSchedulerOutput, Tuple]: + """ + Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion + process from the learned model outputs (most often the predicted noise). + + Args: + model_output (`torch.Tensor`): + The direct output from learned diffusion model. + timestep (`float`): + The current discrete timestep in the diffusion chain. + sample (`torch.Tensor`): + A current instance of a sample created by the diffusion process. + eta (`float`): + The weight of noise for added noise in diffusion step. + use_clipped_model_output (`bool`, defaults to `False`): + If `True`, computes "corrected" `model_output` from the clipped predicted original sample. Necessary + because predicted original sample is clipped to [-1, 1] when `self.config.clip_sample` is `True`. If no + clipping has happened, "corrected" `model_output` would coincide with the one provided as input and + `use_clipped_model_output` has no effect. + generator (`torch.Generator`, *optional*): + A random number generator. + variance_noise (`torch.Tensor`): + Alternative to generating noise with `generator` by directly providing the noise for the variance + itself. Useful for methods such as [`CycleDiffusion`]. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~schedulers.scheduling_ddim.DDIMSchedulerOutput`] or `tuple`. + + Returns: + [`~schedulers.scheduling_ddim.DDIMSchedulerOutput`] or `tuple`: + If return_dict is `True`, [`~schedulers.scheduling_ddim.DDIMSchedulerOutput`] is returned, otherwise a + tuple is returned where the first element is the sample tensor. + + """ + if self.num_inference_steps is None: + raise ValueError( + "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" + ) + + # See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf + # Ideally, read DDIM paper in-detail understanding + + # Notation ( -> + # - pred_noise_t -> e_theta(x_t, t) + # - pred_original_sample -> f_theta(x_t, t) or x_0 + # - std_dev_t -> sigma_t + # - eta -> ฮท + # - pred_sample_direction -> "direction pointing to x_t" + # - pred_prev_sample -> "x_t-1" + + # 1. get previous step value (=t-1) + prev_timestep = timestep - self.config.num_train_timesteps // self.num_inference_steps + + # 2. compute alphas, betas + alpha_prod_t = self.alphas_cumprod[timestep] + alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod + + beta_prod_t = 1 - alpha_prod_t + + # 3. compute predicted original sample from predicted noise also called + # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf + if self.config.prediction_type == "epsilon": + pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5) + pred_epsilon = model_output + elif self.config.prediction_type == "sample": + pred_original_sample = model_output + pred_epsilon = (sample - alpha_prod_t ** (0.5) * pred_original_sample) / beta_prod_t ** (0.5) + elif self.config.prediction_type == "v_prediction": + pred_original_sample = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output + pred_epsilon = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample + else: + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or" + " `v_prediction`" + ) + + # 4. Clip or threshold "predicted x_0" + if self.config.thresholding: + pred_original_sample = self._threshold_sample(pred_original_sample) + elif self.config.clip_sample: + pred_original_sample = pred_original_sample.clamp( + -self.config.clip_sample_range, self.config.clip_sample_range + ) + + # 5. compute variance: "sigma_t(ฮท)" -> see formula (16) + # ฯƒ_t = sqrt((1 โˆ’ ฮฑ_tโˆ’1)/(1 โˆ’ ฮฑ_t)) * sqrt(1 โˆ’ ฮฑ_t/ฮฑ_tโˆ’1) + variance = self._get_variance(timestep, prev_timestep) + std_dev_t = eta * variance ** (0.5) + + if use_clipped_model_output: + # the pred_epsilon is always re-derived from the clipped x_0 in Glide + pred_epsilon = (sample - alpha_prod_t ** (0.5) * pred_original_sample) / beta_prod_t ** (0.5) + + # 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf + pred_sample_direction = (1 - alpha_prod_t_prev - std_dev_t**2) ** (0.5) * pred_epsilon + + # 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf + prev_sample = alpha_prod_t_prev ** (0.5) * pred_original_sample + pred_sample_direction + + if eta > 0: + if variance_noise is not None and generator is not None: + raise ValueError( + "Cannot pass both generator and variance_noise. Please make sure that either `generator` or" + " `variance_noise` stays `None`." + ) + + if variance_noise is None: + variance_noise = randn_tensor( + model_output.shape, generator=generator, device=model_output.device, dtype=model_output.dtype + ) + variance = std_dev_t * variance_noise + + prev_sample = prev_sample + variance + + if not return_dict: + return (prev_sample,) + + return DDIMSchedulerOutput(prev_sample=prev_sample, pred_original_sample=pred_original_sample) + + # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.add_noise + def add_noise( + self, + original_samples: torch.Tensor, + noise: torch.Tensor, + timesteps: torch.IntTensor, + ) -> torch.Tensor: + # Make sure alphas_cumprod and timestep have same device and dtype as original_samples + # Move the self.alphas_cumprod to device to avoid redundant CPU to GPU data movement + # for the subsequent add_noise calls + self.alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device) + alphas_cumprod = self.alphas_cumprod.to(dtype=original_samples.dtype) + timesteps = timesteps.to(original_samples.device) + + sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5 + sqrt_alpha_prod = sqrt_alpha_prod.flatten() + while len(sqrt_alpha_prod.shape) < len(original_samples.shape): + sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) + + sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5 + sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() + while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape): + sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) + + noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise + return noisy_samples + + # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.get_velocity + def get_velocity(self, sample: torch.Tensor, noise: torch.Tensor, timesteps: torch.IntTensor) -> torch.Tensor: + # Make sure alphas_cumprod and timestep have same device and dtype as sample + self.alphas_cumprod = self.alphas_cumprod.to(device=sample.device) + alphas_cumprod = self.alphas_cumprod.to(dtype=sample.dtype) + timesteps = timesteps.to(sample.device) + + sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5 + sqrt_alpha_prod = sqrt_alpha_prod.flatten() + while len(sqrt_alpha_prod.shape) < len(sample.shape): + sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) + + sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5 + sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() + while len(sqrt_one_minus_alpha_prod.shape) < len(sample.shape): + sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) + + velocity = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample + return velocity + + def __len__(self): + return self.config.num_train_timesteps diff --git a/diffusers3/schedulers/scheduling_ddim_cogvideox.py b/diffusers3/schedulers/scheduling_ddim_cogvideox.py new file mode 100644 index 0000000000000000000000000000000000000000..ec5c5f3e1c5dd620c2b51d222e681b93680afccf --- /dev/null +++ b/diffusers3/schedulers/scheduling_ddim_cogvideox.py @@ -0,0 +1,449 @@ +# Copyright 2024 The CogVideoX team, Tsinghua University & ZhipuAI and The HuggingFace Team. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion +# and https://github.com/hojonathanho/diffusion + +import math +from dataclasses import dataclass +from typing import List, Optional, Tuple, Union + +import numpy as np +import torch + +from ..configuration_utils import ConfigMixin, register_to_config +from ..utils import BaseOutput +from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin + + +@dataclass +# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM +class DDIMSchedulerOutput(BaseOutput): + """ + Output class for the scheduler's `step` function output. + + Args: + prev_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images): + Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the + denoising loop. + pred_original_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images): + The predicted denoised sample `(x_{0})` based on the model output from the current timestep. + `pred_original_sample` can be used to preview progress or for guidance. + """ + + prev_sample: torch.Tensor + pred_original_sample: Optional[torch.Tensor] = None + + +# Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar +def betas_for_alpha_bar( + num_diffusion_timesteps, + max_beta=0.999, + alpha_transform_type="cosine", +): + """ + Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of + (1-beta) over time from t = [0,1]. + + Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up + to that part of the diffusion process. + + + Args: + num_diffusion_timesteps (`int`): the number of betas to produce. + max_beta (`float`): the maximum beta to use; use values lower than 1 to + prevent singularities. + alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. + Choose from `cosine` or `exp` + + Returns: + betas (`np.ndarray`): the betas used by the scheduler to step the model outputs + """ + if alpha_transform_type == "cosine": + + def alpha_bar_fn(t): + return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 + + elif alpha_transform_type == "exp": + + def alpha_bar_fn(t): + return math.exp(t * -12.0) + + else: + raise ValueError(f"Unsupported alpha_transform_type: {alpha_transform_type}") + + betas = [] + for i in range(num_diffusion_timesteps): + t1 = i / num_diffusion_timesteps + t2 = (i + 1) / num_diffusion_timesteps + betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) + return torch.tensor(betas, dtype=torch.float32) + + +def rescale_zero_terminal_snr(alphas_cumprod): + """ + Rescales betas to have zero terminal SNR Based on https://arxiv.org/pdf/2305.08891.pdf (Algorithm 1) + + + Args: + betas (`torch.Tensor`): + the betas that the scheduler is being initialized with. + + Returns: + `torch.Tensor`: rescaled betas with zero terminal SNR + """ + + alphas_bar_sqrt = alphas_cumprod.sqrt() + + # Store old values. + alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone() + alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone() + + # Shift so the last timestep is zero. + alphas_bar_sqrt -= alphas_bar_sqrt_T + + # Scale so the first timestep is back to the old value. + alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T) + + # Convert alphas_bar_sqrt to betas + alphas_bar = alphas_bar_sqrt**2 # Revert sqrt + + return alphas_bar + + +class CogVideoXDDIMScheduler(SchedulerMixin, ConfigMixin): + """ + `DDIMScheduler` extends the denoising procedure introduced in denoising diffusion probabilistic models (DDPMs) with + non-Markovian guidance. + + This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic + methods the library implements for all schedulers such as loading and saving. + + Args: + num_train_timesteps (`int`, defaults to 1000): + The number of diffusion steps to train the model. + beta_start (`float`, defaults to 0.0001): + The starting `beta` value of inference. + beta_end (`float`, defaults to 0.02): + The final `beta` value. + beta_schedule (`str`, defaults to `"linear"`): + The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from + `linear`, `scaled_linear`, or `squaredcos_cap_v2`. + trained_betas (`np.ndarray`, *optional*): + Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`. + clip_sample (`bool`, defaults to `True`): + Clip the predicted sample for numerical stability. + clip_sample_range (`float`, defaults to 1.0): + The maximum magnitude for sample clipping. Valid only when `clip_sample=True`. + set_alpha_to_one (`bool`, defaults to `True`): + Each diffusion step uses the alphas product value at that step and at the previous one. For the final step + there is no previous alpha. When this option is `True` the previous alpha product is fixed to `1`, + otherwise it uses the alpha value at step 0. + steps_offset (`int`, defaults to 0): + An offset added to the inference steps, as required by some model families. + prediction_type (`str`, defaults to `epsilon`, *optional*): + Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process), + `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen + Video](https://imagen.research.google/video/paper.pdf) paper). + thresholding (`bool`, defaults to `False`): + Whether to use the "dynamic thresholding" method. This is unsuitable for latent-space diffusion models such + as Stable Diffusion. + dynamic_thresholding_ratio (`float`, defaults to 0.995): + The ratio for the dynamic thresholding method. Valid only when `thresholding=True`. + sample_max_value (`float`, defaults to 1.0): + The threshold value for dynamic thresholding. Valid only when `thresholding=True`. + timestep_spacing (`str`, defaults to `"leading"`): + The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information. + rescale_betas_zero_snr (`bool`, defaults to `False`): + Whether to rescale the betas to have zero terminal SNR. This enables the model to generate very bright and + dark samples instead of limiting it to samples with medium brightness. Loosely related to + [`--offset_noise`](https://github.com/huggingface/diffusers/blob/74fd735eb073eb1d774b1ab4154a0876eb82f055/examples/dreambooth/train_dreambooth.py#L506). + """ + + _compatibles = [e.name for e in KarrasDiffusionSchedulers] + order = 1 + + @register_to_config + def __init__( + self, + num_train_timesteps: int = 1000, + beta_start: float = 0.00085, + beta_end: float = 0.0120, + beta_schedule: str = "scaled_linear", + trained_betas: Optional[Union[np.ndarray, List[float]]] = None, + clip_sample: bool = True, + set_alpha_to_one: bool = True, + steps_offset: int = 0, + prediction_type: str = "epsilon", + clip_sample_range: float = 1.0, + sample_max_value: float = 1.0, + timestep_spacing: str = "leading", + rescale_betas_zero_snr: bool = False, + snr_shift_scale: float = 3.0, + ): + if trained_betas is not None: + self.betas = torch.tensor(trained_betas, dtype=torch.float32) + elif beta_schedule == "linear": + self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) + elif beta_schedule == "scaled_linear": + # this schedule is very specific to the latent diffusion model. + self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float64) ** 2 + elif beta_schedule == "squaredcos_cap_v2": + # Glide cosine schedule + self.betas = betas_for_alpha_bar(num_train_timesteps) + else: + raise NotImplementedError(f"{beta_schedule} is not implemented for {self.__class__}") + + self.alphas = 1.0 - self.betas + self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) + + # Modify: SNR shift following SD3 + self.alphas_cumprod = self.alphas_cumprod / (snr_shift_scale + (1 - snr_shift_scale) * self.alphas_cumprod) + + # Rescale for zero SNR + if rescale_betas_zero_snr: + self.alphas_cumprod = rescale_zero_terminal_snr(self.alphas_cumprod) + + # At every step in ddim, we are looking into the previous alphas_cumprod + # For the final step, there is no previous alphas_cumprod because we are already at 0 + # `set_alpha_to_one` decides whether we set this parameter simply to one or + # whether we use the final alpha of the "non-previous" one. + self.final_alpha_cumprod = torch.tensor(1.0) if set_alpha_to_one else self.alphas_cumprod[0] + + # standard deviation of the initial noise distribution + self.init_noise_sigma = 1.0 + + # setable values + self.num_inference_steps = None + self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy().astype(np.int64)) + + def _get_variance(self, timestep, prev_timestep): + alpha_prod_t = self.alphas_cumprod[timestep] + alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod + beta_prod_t = 1 - alpha_prod_t + beta_prod_t_prev = 1 - alpha_prod_t_prev + + variance = (beta_prod_t_prev / beta_prod_t) * (1 - alpha_prod_t / alpha_prod_t_prev) + + return variance + + def scale_model_input(self, sample: torch.Tensor, timestep: Optional[int] = None) -> torch.Tensor: + """ + Ensures interchangeability with schedulers that need to scale the denoising model input depending on the + current timestep. + + Args: + sample (`torch.Tensor`): + The input sample. + timestep (`int`, *optional*): + The current timestep in the diffusion chain. + + Returns: + `torch.Tensor`: + A scaled input sample. + """ + return sample + + def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None): + """ + Sets the discrete timesteps used for the diffusion chain (to be run before inference). + + Args: + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. + """ + + if num_inference_steps > self.config.num_train_timesteps: + raise ValueError( + f"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:" + f" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle" + f" maximal {self.config.num_train_timesteps} timesteps." + ) + + self.num_inference_steps = num_inference_steps + + # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 + if self.config.timestep_spacing == "linspace": + timesteps = ( + np.linspace(0, self.config.num_train_timesteps - 1, num_inference_steps) + .round()[::-1] + .copy() + .astype(np.int64) + ) + elif self.config.timestep_spacing == "leading": + step_ratio = self.config.num_train_timesteps // self.num_inference_steps + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + timesteps = (np.arange(0, num_inference_steps) * step_ratio).round()[::-1].copy().astype(np.int64) + timesteps += self.config.steps_offset + elif self.config.timestep_spacing == "trailing": + step_ratio = self.config.num_train_timesteps / self.num_inference_steps + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + timesteps = np.round(np.arange(self.config.num_train_timesteps, 0, -step_ratio)).astype(np.int64) + timesteps -= 1 + else: + raise ValueError( + f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'leading' or 'trailing'." + ) + + self.timesteps = torch.from_numpy(timesteps).to(device) + + def step( + self, + model_output: torch.Tensor, + timestep: int, + sample: torch.Tensor, + eta: float = 0.0, + use_clipped_model_output: bool = False, + generator=None, + variance_noise: Optional[torch.Tensor] = None, + return_dict: bool = True, + ) -> Union[DDIMSchedulerOutput, Tuple]: + """ + Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion + process from the learned model outputs (most often the predicted noise). + + Args: + model_output (`torch.Tensor`): + The direct output from learned diffusion model. + timestep (`float`): + The current discrete timestep in the diffusion chain. + sample (`torch.Tensor`): + A current instance of a sample created by the diffusion process. + eta (`float`): + The weight of noise for added noise in diffusion step. + use_clipped_model_output (`bool`, defaults to `False`): + If `True`, computes "corrected" `model_output` from the clipped predicted original sample. Necessary + because predicted original sample is clipped to [-1, 1] when `self.config.clip_sample` is `True`. If no + clipping has happened, "corrected" `model_output` would coincide with the one provided as input and + `use_clipped_model_output` has no effect. + generator (`torch.Generator`, *optional*): + A random number generator. + variance_noise (`torch.Tensor`): + Alternative to generating noise with `generator` by directly providing the noise for the variance + itself. Useful for methods such as [`CycleDiffusion`]. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~schedulers.scheduling_ddim.DDIMSchedulerOutput`] or `tuple`. + + Returns: + [`~schedulers.scheduling_ddim.DDIMSchedulerOutput`] or `tuple`: + If return_dict is `True`, [`~schedulers.scheduling_ddim.DDIMSchedulerOutput`] is returned, otherwise a + tuple is returned where the first element is the sample tensor. + + """ + if self.num_inference_steps is None: + raise ValueError( + "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" + ) + + # See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf + # Ideally, read DDIM paper in-detail understanding + + # Notation ( -> + # - pred_noise_t -> e_theta(x_t, t) + # - pred_original_sample -> f_theta(x_t, t) or x_0 + # - std_dev_t -> sigma_t + # - eta -> ฮท + # - pred_sample_direction -> "direction pointing to x_t" + # - pred_prev_sample -> "x_t-1" + + # 1. get previous step value (=t-1) + prev_timestep = timestep - self.config.num_train_timesteps // self.num_inference_steps + + # 2. compute alphas, betas + alpha_prod_t = self.alphas_cumprod[timestep] + alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod + + beta_prod_t = 1 - alpha_prod_t + + # 3. compute predicted original sample from predicted noise also called + # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf + # To make style tests pass, commented out `pred_epsilon` as it is an unused variable + if self.config.prediction_type == "epsilon": + pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5) + # pred_epsilon = model_output + elif self.config.prediction_type == "sample": + pred_original_sample = model_output + # pred_epsilon = (sample - alpha_prod_t ** (0.5) * pred_original_sample) / beta_prod_t ** (0.5) + elif self.config.prediction_type == "v_prediction": + pred_original_sample = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output + # pred_epsilon = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample + else: + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or" + " `v_prediction`" + ) + + a_t = ((1 - alpha_prod_t_prev) / (1 - alpha_prod_t)) ** 0.5 + b_t = alpha_prod_t_prev**0.5 - alpha_prod_t**0.5 * a_t + + prev_sample = a_t * sample + b_t * pred_original_sample + + if not return_dict: + return (prev_sample,) + + return DDIMSchedulerOutput(prev_sample=prev_sample, pred_original_sample=pred_original_sample) + + # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.add_noise + def add_noise( + self, + original_samples: torch.Tensor, + noise: torch.Tensor, + timesteps: torch.IntTensor, + ) -> torch.Tensor: + # Make sure alphas_cumprod and timestep have same device and dtype as original_samples + # Move the self.alphas_cumprod to device to avoid redundant CPU to GPU data movement + # for the subsequent add_noise calls + self.alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device) + alphas_cumprod = self.alphas_cumprod.to(dtype=original_samples.dtype) + timesteps = timesteps.to(original_samples.device) + + sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5 + sqrt_alpha_prod = sqrt_alpha_prod.flatten() + while len(sqrt_alpha_prod.shape) < len(original_samples.shape): + sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) + + sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5 + sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() + while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape): + sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) + + noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise + return noisy_samples + + # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.get_velocity + def get_velocity(self, sample: torch.Tensor, noise: torch.Tensor, timesteps: torch.IntTensor) -> torch.Tensor: + # Make sure alphas_cumprod and timestep have same device and dtype as sample + self.alphas_cumprod = self.alphas_cumprod.to(device=sample.device) + alphas_cumprod = self.alphas_cumprod.to(dtype=sample.dtype) + timesteps = timesteps.to(sample.device) + + sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5 + sqrt_alpha_prod = sqrt_alpha_prod.flatten() + while len(sqrt_alpha_prod.shape) < len(sample.shape): + sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) + + sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5 + sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() + while len(sqrt_one_minus_alpha_prod.shape) < len(sample.shape): + sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) + + velocity = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample + return velocity + + def __len__(self): + return self.config.num_train_timesteps diff --git a/diffusers3/schedulers/scheduling_ddim_flax.py b/diffusers3/schedulers/scheduling_ddim_flax.py new file mode 100644 index 0000000000000000000000000000000000000000..23c71a61452a9bf1bb1e17e7c0d8eb82daee4616 --- /dev/null +++ b/diffusers3/schedulers/scheduling_ddim_flax.py @@ -0,0 +1,314 @@ +# Copyright 2024 Stanford University Team and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion +# and https://github.com/hojonathanho/diffusion + +from dataclasses import dataclass +from typing import Optional, Tuple, Union + +import flax +import jax.numpy as jnp + +from ..configuration_utils import ConfigMixin, register_to_config +from .scheduling_utils_flax import ( + CommonSchedulerState, + FlaxKarrasDiffusionSchedulers, + FlaxSchedulerMixin, + FlaxSchedulerOutput, + add_noise_common, + get_velocity_common, +) + + +@flax.struct.dataclass +class DDIMSchedulerState: + common: CommonSchedulerState + final_alpha_cumprod: jnp.ndarray + + # setable values + init_noise_sigma: jnp.ndarray + timesteps: jnp.ndarray + num_inference_steps: Optional[int] = None + + @classmethod + def create( + cls, + common: CommonSchedulerState, + final_alpha_cumprod: jnp.ndarray, + init_noise_sigma: jnp.ndarray, + timesteps: jnp.ndarray, + ): + return cls( + common=common, + final_alpha_cumprod=final_alpha_cumprod, + init_noise_sigma=init_noise_sigma, + timesteps=timesteps, + ) + + +@dataclass +class FlaxDDIMSchedulerOutput(FlaxSchedulerOutput): + state: DDIMSchedulerState + + +class FlaxDDIMScheduler(FlaxSchedulerMixin, ConfigMixin): + """ + Denoising diffusion implicit models is a scheduler that extends the denoising procedure introduced in denoising + diffusion probabilistic models (DDPMs) with non-Markovian guidance. + + [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__` + function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`. + [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and + [`~SchedulerMixin.from_pretrained`] functions. + + For more details, see the original paper: https://arxiv.org/abs/2010.02502 + + Args: + num_train_timesteps (`int`): number of diffusion steps used to train the model. + beta_start (`float`): the starting `beta` value of inference. + beta_end (`float`): the final `beta` value. + beta_schedule (`str`): + the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from + `linear`, `scaled_linear`, or `squaredcos_cap_v2`. + trained_betas (`jnp.ndarray`, optional): + option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc. + clip_sample (`bool`, default `True`): + option to clip predicted sample between for numerical stability. The clip range is determined by + `clip_sample_range`. + clip_sample_range (`float`, default `1.0`): + the maximum magnitude for sample clipping. Valid only when `clip_sample=True`. + set_alpha_to_one (`bool`, default `True`): + each diffusion step uses the value of alphas product at that step and at the previous one. For the final + step there is no previous alpha. When this option is `True` the previous alpha product is fixed to `1`, + otherwise it uses the value of alpha at step 0. + steps_offset (`int`, default `0`): + An offset added to the inference steps, as required by some model families. + prediction_type (`str`, default `epsilon`): + indicates whether the model predicts the noise (epsilon), or the samples. One of `epsilon`, `sample`. + `v-prediction` is not supported for this scheduler. + dtype (`jnp.dtype`, *optional*, defaults to `jnp.float32`): + the `dtype` used for params and computation. + """ + + _compatibles = [e.name for e in FlaxKarrasDiffusionSchedulers] + + dtype: jnp.dtype + + @property + def has_state(self): + return True + + @register_to_config + def __init__( + self, + num_train_timesteps: int = 1000, + beta_start: float = 0.0001, + beta_end: float = 0.02, + beta_schedule: str = "linear", + trained_betas: Optional[jnp.ndarray] = None, + clip_sample: bool = True, + clip_sample_range: float = 1.0, + set_alpha_to_one: bool = True, + steps_offset: int = 0, + prediction_type: str = "epsilon", + dtype: jnp.dtype = jnp.float32, + ): + self.dtype = dtype + + def create_state(self, common: Optional[CommonSchedulerState] = None) -> DDIMSchedulerState: + if common is None: + common = CommonSchedulerState.create(self) + + # At every step in ddim, we are looking into the previous alphas_cumprod + # For the final step, there is no previous alphas_cumprod because we are already at 0 + # `set_alpha_to_one` decides whether we set this parameter simply to one or + # whether we use the final alpha of the "non-previous" one. + final_alpha_cumprod = ( + jnp.array(1.0, dtype=self.dtype) if self.config.set_alpha_to_one else common.alphas_cumprod[0] + ) + + # standard deviation of the initial noise distribution + init_noise_sigma = jnp.array(1.0, dtype=self.dtype) + + timesteps = jnp.arange(0, self.config.num_train_timesteps).round()[::-1] + + return DDIMSchedulerState.create( + common=common, + final_alpha_cumprod=final_alpha_cumprod, + init_noise_sigma=init_noise_sigma, + timesteps=timesteps, + ) + + def scale_model_input( + self, state: DDIMSchedulerState, sample: jnp.ndarray, timestep: Optional[int] = None + ) -> jnp.ndarray: + """ + Args: + state (`PNDMSchedulerState`): the `FlaxPNDMScheduler` state data class instance. + sample (`jnp.ndarray`): input sample + timestep (`int`, optional): current timestep + + Returns: + `jnp.ndarray`: scaled input sample + """ + return sample + + def set_timesteps( + self, state: DDIMSchedulerState, num_inference_steps: int, shape: Tuple = () + ) -> DDIMSchedulerState: + """ + Sets the discrete timesteps used for the diffusion chain. Supporting function to be run before inference. + + Args: + state (`DDIMSchedulerState`): + the `FlaxDDIMScheduler` state data class instance. + num_inference_steps (`int`): + the number of diffusion steps used when generating samples with a pre-trained model. + """ + step_ratio = self.config.num_train_timesteps // num_inference_steps + # creates integer timesteps by multiplying by ratio + # rounding to avoid issues when num_inference_step is power of 3 + timesteps = (jnp.arange(0, num_inference_steps) * step_ratio).round()[::-1] + self.config.steps_offset + + return state.replace( + num_inference_steps=num_inference_steps, + timesteps=timesteps, + ) + + def _get_variance(self, state: DDIMSchedulerState, timestep, prev_timestep): + alpha_prod_t = state.common.alphas_cumprod[timestep] + alpha_prod_t_prev = jnp.where( + prev_timestep >= 0, state.common.alphas_cumprod[prev_timestep], state.final_alpha_cumprod + ) + beta_prod_t = 1 - alpha_prod_t + beta_prod_t_prev = 1 - alpha_prod_t_prev + + variance = (beta_prod_t_prev / beta_prod_t) * (1 - alpha_prod_t / alpha_prod_t_prev) + + return variance + + def step( + self, + state: DDIMSchedulerState, + model_output: jnp.ndarray, + timestep: int, + sample: jnp.ndarray, + eta: float = 0.0, + return_dict: bool = True, + ) -> Union[FlaxDDIMSchedulerOutput, Tuple]: + """ + Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion + process from the learned model outputs (most often the predicted noise). + + Args: + state (`DDIMSchedulerState`): the `FlaxDDIMScheduler` state data class instance. + model_output (`jnp.ndarray`): direct output from learned diffusion model. + timestep (`int`): current discrete timestep in the diffusion chain. + sample (`jnp.ndarray`): + current instance of sample being created by diffusion process. + return_dict (`bool`): option for returning tuple rather than FlaxDDIMSchedulerOutput class + + Returns: + [`FlaxDDIMSchedulerOutput`] or `tuple`: [`FlaxDDIMSchedulerOutput`] if `return_dict` is True, otherwise a + `tuple`. When returning a tuple, the first element is the sample tensor. + + """ + if state.num_inference_steps is None: + raise ValueError( + "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" + ) + + # See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf + # Ideally, read DDIM paper in-detail understanding + + # Notation ( -> + # - pred_noise_t -> e_theta(x_t, t) + # - pred_original_sample -> f_theta(x_t, t) or x_0 + # - std_dev_t -> sigma_t + # - eta -> ฮท + # - pred_sample_direction -> "direction pointing to x_t" + # - pred_prev_sample -> "x_t-1" + + # 1. get previous step value (=t-1) + prev_timestep = timestep - self.config.num_train_timesteps // state.num_inference_steps + + alphas_cumprod = state.common.alphas_cumprod + final_alpha_cumprod = state.final_alpha_cumprod + + # 2. compute alphas, betas + alpha_prod_t = alphas_cumprod[timestep] + alpha_prod_t_prev = jnp.where(prev_timestep >= 0, alphas_cumprod[prev_timestep], final_alpha_cumprod) + + beta_prod_t = 1 - alpha_prod_t + + # 3. compute predicted original sample from predicted noise also called + # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf + if self.config.prediction_type == "epsilon": + pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5) + pred_epsilon = model_output + elif self.config.prediction_type == "sample": + pred_original_sample = model_output + pred_epsilon = (sample - alpha_prod_t ** (0.5) * pred_original_sample) / beta_prod_t ** (0.5) + elif self.config.prediction_type == "v_prediction": + pred_original_sample = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output + pred_epsilon = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample + else: + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or" + " `v_prediction`" + ) + + # 4. Clip or threshold "predicted x_0" + if self.config.clip_sample: + pred_original_sample = pred_original_sample.clip( + -self.config.clip_sample_range, self.config.clip_sample_range + ) + + # 4. compute variance: "sigma_t(ฮท)" -> see formula (16) + # ฯƒ_t = sqrt((1 โˆ’ ฮฑ_tโˆ’1)/(1 โˆ’ ฮฑ_t)) * sqrt(1 โˆ’ ฮฑ_t/ฮฑ_tโˆ’1) + variance = self._get_variance(state, timestep, prev_timestep) + std_dev_t = eta * variance ** (0.5) + + # 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf + pred_sample_direction = (1 - alpha_prod_t_prev - std_dev_t**2) ** (0.5) * pred_epsilon + + # 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf + prev_sample = alpha_prod_t_prev ** (0.5) * pred_original_sample + pred_sample_direction + + if not return_dict: + return (prev_sample, state) + + return FlaxDDIMSchedulerOutput(prev_sample=prev_sample, state=state) + + def add_noise( + self, + state: DDIMSchedulerState, + original_samples: jnp.ndarray, + noise: jnp.ndarray, + timesteps: jnp.ndarray, + ) -> jnp.ndarray: + return add_noise_common(state.common, original_samples, noise, timesteps) + + def get_velocity( + self, + state: DDIMSchedulerState, + sample: jnp.ndarray, + noise: jnp.ndarray, + timesteps: jnp.ndarray, + ) -> jnp.ndarray: + return get_velocity_common(state.common, sample, noise, timesteps) + + def __len__(self): + return self.config.num_train_timesteps diff --git a/diffusers3/schedulers/scheduling_ddim_inverse.py b/diffusers3/schedulers/scheduling_ddim_inverse.py new file mode 100644 index 0000000000000000000000000000000000000000..6c2352f2c828e1fc92e8a9e0403d621fe841fe8d --- /dev/null +++ b/diffusers3/schedulers/scheduling_ddim_inverse.py @@ -0,0 +1,374 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion +# and https://github.com/hojonathanho/diffusion +import math +from dataclasses import dataclass +from typing import List, Optional, Tuple, Union + +import numpy as np +import torch + +from diffusers.configuration_utils import ConfigMixin, register_to_config +from diffusers.schedulers.scheduling_utils import SchedulerMixin +from diffusers.utils import BaseOutput, deprecate + + +@dataclass +# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM +class DDIMSchedulerOutput(BaseOutput): + """ + Output class for the scheduler's `step` function output. + + Args: + prev_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images): + Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the + denoising loop. + pred_original_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images): + The predicted denoised sample `(x_{0})` based on the model output from the current timestep. + `pred_original_sample` can be used to preview progress or for guidance. + """ + + prev_sample: torch.Tensor + pred_original_sample: Optional[torch.Tensor] = None + + +# Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar +def betas_for_alpha_bar( + num_diffusion_timesteps, + max_beta=0.999, + alpha_transform_type="cosine", +): + """ + Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of + (1-beta) over time from t = [0,1]. + + Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up + to that part of the diffusion process. + + + Args: + num_diffusion_timesteps (`int`): the number of betas to produce. + max_beta (`float`): the maximum beta to use; use values lower than 1 to + prevent singularities. + alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. + Choose from `cosine` or `exp` + + Returns: + betas (`np.ndarray`): the betas used by the scheduler to step the model outputs + """ + if alpha_transform_type == "cosine": + + def alpha_bar_fn(t): + return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 + + elif alpha_transform_type == "exp": + + def alpha_bar_fn(t): + return math.exp(t * -12.0) + + else: + raise ValueError(f"Unsupported alpha_transform_type: {alpha_transform_type}") + + betas = [] + for i in range(num_diffusion_timesteps): + t1 = i / num_diffusion_timesteps + t2 = (i + 1) / num_diffusion_timesteps + betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) + return torch.tensor(betas, dtype=torch.float32) + + +# Copied from diffusers.schedulers.scheduling_ddim.rescale_zero_terminal_snr +def rescale_zero_terminal_snr(betas): + """ + Rescales betas to have zero terminal SNR Based on https://arxiv.org/pdf/2305.08891.pdf (Algorithm 1) + + + Args: + betas (`torch.Tensor`): + the betas that the scheduler is being initialized with. + + Returns: + `torch.Tensor`: rescaled betas with zero terminal SNR + """ + # Convert betas to alphas_bar_sqrt + alphas = 1.0 - betas + alphas_cumprod = torch.cumprod(alphas, dim=0) + alphas_bar_sqrt = alphas_cumprod.sqrt() + + # Store old values. + alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone() + alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone() + + # Shift so the last timestep is zero. + alphas_bar_sqrt -= alphas_bar_sqrt_T + + # Scale so the first timestep is back to the old value. + alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T) + + # Convert alphas_bar_sqrt to betas + alphas_bar = alphas_bar_sqrt**2 # Revert sqrt + alphas = alphas_bar[1:] / alphas_bar[:-1] # Revert cumprod + alphas = torch.cat([alphas_bar[0:1], alphas]) + betas = 1 - alphas + + return betas + + +class DDIMInverseScheduler(SchedulerMixin, ConfigMixin): + """ + `DDIMInverseScheduler` is the reverse scheduler of [`DDIMScheduler`]. + + This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic + methods the library implements for all schedulers such as loading and saving. + + Args: + num_train_timesteps (`int`, defaults to 1000): + The number of diffusion steps to train the model. + beta_start (`float`, defaults to 0.0001): + The starting `beta` value of inference. + beta_end (`float`, defaults to 0.02): + The final `beta` value. + beta_schedule (`str`, defaults to `"linear"`): + The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from + `linear`, `scaled_linear`, or `squaredcos_cap_v2`. + trained_betas (`np.ndarray`, *optional*): + Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`. + clip_sample (`bool`, defaults to `True`): + Clip the predicted sample for numerical stability. + clip_sample_range (`float`, defaults to 1.0): + The maximum magnitude for sample clipping. Valid only when `clip_sample=True`. + set_alpha_to_one (`bool`, defaults to `True`): + Each diffusion step uses the alphas product value at that step and at the previous one. For the final step + there is no previous alpha. When this option is `True` the previous alpha product is fixed to 0, otherwise + it uses the alpha value at step `num_train_timesteps - 1`. + steps_offset (`int`, defaults to 0): + An offset added to the inference steps, as required by some model families. + prediction_type (`str`, defaults to `epsilon`, *optional*): + Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process), + `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen + Video](https://imagen.research.google/video/paper.pdf) paper). + timestep_spacing (`str`, defaults to `"leading"`): + The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information. + rescale_betas_zero_snr (`bool`, defaults to `False`): + Whether to rescale the betas to have zero terminal SNR. This enables the model to generate very bright and + dark samples instead of limiting it to samples with medium brightness. Loosely related to + [`--offset_noise`](https://github.com/huggingface/diffusers/blob/74fd735eb073eb1d774b1ab4154a0876eb82f055/examples/dreambooth/train_dreambooth.py#L506). + """ + + order = 1 + ignore_for_config = ["kwargs"] + _deprecated_kwargs = ["set_alpha_to_zero"] + + @register_to_config + def __init__( + self, + num_train_timesteps: int = 1000, + beta_start: float = 0.0001, + beta_end: float = 0.02, + beta_schedule: str = "linear", + trained_betas: Optional[Union[np.ndarray, List[float]]] = None, + clip_sample: bool = True, + set_alpha_to_one: bool = True, + steps_offset: int = 0, + prediction_type: str = "epsilon", + clip_sample_range: float = 1.0, + timestep_spacing: str = "leading", + rescale_betas_zero_snr: bool = False, + **kwargs, + ): + if kwargs.get("set_alpha_to_zero", None) is not None: + deprecation_message = ( + "The `set_alpha_to_zero` argument is deprecated. Please use `set_alpha_to_one` instead." + ) + deprecate("set_alpha_to_zero", "1.0.0", deprecation_message, standard_warn=False) + set_alpha_to_one = kwargs["set_alpha_to_zero"] + if trained_betas is not None: + self.betas = torch.tensor(trained_betas, dtype=torch.float32) + elif beta_schedule == "linear": + self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) + elif beta_schedule == "scaled_linear": + # this schedule is very specific to the latent diffusion model. + self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2 + elif beta_schedule == "squaredcos_cap_v2": + # Glide cosine schedule + self.betas = betas_for_alpha_bar(num_train_timesteps) + else: + raise NotImplementedError(f"{beta_schedule} is not implemented for {self.__class__}") + + # Rescale for zero SNR + if rescale_betas_zero_snr: + self.betas = rescale_zero_terminal_snr(self.betas) + + self.alphas = 1.0 - self.betas + self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) + + # At every step in inverted ddim, we are looking into the next alphas_cumprod + # For the initial step, there is no current alphas_cumprod, and the index is out of bounds + # `set_alpha_to_one` decides whether we set this parameter simply to one + # in this case, self.step() just output the predicted noise + # or whether we use the initial alpha used in training the diffusion model. + self.initial_alpha_cumprod = torch.tensor(1.0) if set_alpha_to_one else self.alphas_cumprod[0] + + # standard deviation of the initial noise distribution + self.init_noise_sigma = 1.0 + + # setable values + self.num_inference_steps = None + self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps).copy().astype(np.int64)) + + # Copied from diffusers.schedulers.scheduling_ddim.DDIMScheduler.scale_model_input + def scale_model_input(self, sample: torch.Tensor, timestep: Optional[int] = None) -> torch.Tensor: + """ + Ensures interchangeability with schedulers that need to scale the denoising model input depending on the + current timestep. + + Args: + sample (`torch.Tensor`): + The input sample. + timestep (`int`, *optional*): + The current timestep in the diffusion chain. + + Returns: + `torch.Tensor`: + A scaled input sample. + """ + return sample + + def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None): + """ + Sets the discrete timesteps used for the diffusion chain (to be run before inference). + + Args: + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. + """ + + if num_inference_steps > self.config.num_train_timesteps: + raise ValueError( + f"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:" + f" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle" + f" maximal {self.config.num_train_timesteps} timesteps." + ) + + self.num_inference_steps = num_inference_steps + + # "leading" and "trailing" corresponds to annotation of Table 1. of https://arxiv.org/abs/2305.08891 + if self.config.timestep_spacing == "leading": + step_ratio = self.config.num_train_timesteps // self.num_inference_steps + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + timesteps = (np.arange(0, num_inference_steps) * step_ratio).round().copy().astype(np.int64) + timesteps += self.config.steps_offset + elif self.config.timestep_spacing == "trailing": + step_ratio = self.config.num_train_timesteps / self.num_inference_steps + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + timesteps = np.round(np.arange(self.config.num_train_timesteps, 0, -step_ratio)[::-1]).astype(np.int64) + timesteps -= 1 + else: + raise ValueError( + f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'leading' or 'trailing'." + ) + + self.timesteps = torch.from_numpy(timesteps).to(device) + + def step( + self, + model_output: torch.Tensor, + timestep: int, + sample: torch.Tensor, + return_dict: bool = True, + ) -> Union[DDIMSchedulerOutput, Tuple]: + """ + Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion + process from the learned model outputs (most often the predicted noise). + + Args: + model_output (`torch.Tensor`): + The direct output from learned diffusion model. + timestep (`float`): + The current discrete timestep in the diffusion chain. + sample (`torch.Tensor`): + A current instance of a sample created by the diffusion process. + eta (`float`): + The weight of noise for added noise in diffusion step. + use_clipped_model_output (`bool`, defaults to `False`): + If `True`, computes "corrected" `model_output` from the clipped predicted original sample. Necessary + because predicted original sample is clipped to [-1, 1] when `self.config.clip_sample` is `True`. If no + clipping has happened, "corrected" `model_output` would coincide with the one provided as input and + `use_clipped_model_output` has no effect. + variance_noise (`torch.Tensor`): + Alternative to generating noise with `generator` by directly providing the noise for the variance + itself. Useful for methods such as [`CycleDiffusion`]. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~schedulers.scheduling_ddim_inverse.DDIMInverseSchedulerOutput`] or + `tuple`. + + Returns: + [`~schedulers.scheduling_ddim_inverse.DDIMInverseSchedulerOutput`] or `tuple`: + If return_dict is `True`, [`~schedulers.scheduling_ddim_inverse.DDIMInverseSchedulerOutput`] is + returned, otherwise a tuple is returned where the first element is the sample tensor. + + """ + # 1. get previous step value (=t+1) + prev_timestep = timestep + timestep = min( + timestep - self.config.num_train_timesteps // self.num_inference_steps, self.config.num_train_timesteps - 1 + ) + + # 2. compute alphas, betas + # change original implementation to exactly match noise levels for analogous forward process + alpha_prod_t = self.alphas_cumprod[timestep] if timestep >= 0 else self.initial_alpha_cumprod + alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] + + beta_prod_t = 1 - alpha_prod_t + + # 3. compute predicted original sample from predicted noise also called + # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf + if self.config.prediction_type == "epsilon": + pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5) + pred_epsilon = model_output + elif self.config.prediction_type == "sample": + pred_original_sample = model_output + pred_epsilon = (sample - alpha_prod_t ** (0.5) * pred_original_sample) / beta_prod_t ** (0.5) + elif self.config.prediction_type == "v_prediction": + pred_original_sample = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output + pred_epsilon = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample + else: + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or" + " `v_prediction`" + ) + + # 4. Clip or threshold "predicted x_0" + if self.config.clip_sample: + pred_original_sample = pred_original_sample.clamp( + -self.config.clip_sample_range, self.config.clip_sample_range + ) + + # 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf + pred_sample_direction = (1 - alpha_prod_t_prev) ** (0.5) * pred_epsilon + + # 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf + prev_sample = alpha_prod_t_prev ** (0.5) * pred_original_sample + pred_sample_direction + + if not return_dict: + return (prev_sample, pred_original_sample) + return DDIMSchedulerOutput(prev_sample=prev_sample, pred_original_sample=pred_original_sample) + + def __len__(self): + return self.config.num_train_timesteps diff --git a/diffusers3/schedulers/scheduling_ddim_parallel.py b/diffusers3/schedulers/scheduling_ddim_parallel.py new file mode 100644 index 0000000000000000000000000000000000000000..0cf84b694db51c3021ea51e542fc3cb103d04882 --- /dev/null +++ b/diffusers3/schedulers/scheduling_ddim_parallel.py @@ -0,0 +1,643 @@ +# Copyright 2024 ParaDiGMS authors and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion +# and https://github.com/hojonathanho/diffusion + +import math +from dataclasses import dataclass +from typing import List, Optional, Tuple, Union + +import numpy as np +import torch + +from ..configuration_utils import ConfigMixin, register_to_config +from ..utils import BaseOutput +from ..utils.torch_utils import randn_tensor +from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin + + +@dataclass +# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput +class DDIMParallelSchedulerOutput(BaseOutput): + """ + Output class for the scheduler's `step` function output. + + Args: + prev_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images): + Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the + denoising loop. + pred_original_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images): + The predicted denoised sample `(x_{0})` based on the model output from the current timestep. + `pred_original_sample` can be used to preview progress or for guidance. + """ + + prev_sample: torch.Tensor + pred_original_sample: Optional[torch.Tensor] = None + + +# Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar +def betas_for_alpha_bar( + num_diffusion_timesteps, + max_beta=0.999, + alpha_transform_type="cosine", +): + """ + Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of + (1-beta) over time from t = [0,1]. + + Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up + to that part of the diffusion process. + + + Args: + num_diffusion_timesteps (`int`): the number of betas to produce. + max_beta (`float`): the maximum beta to use; use values lower than 1 to + prevent singularities. + alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. + Choose from `cosine` or `exp` + + Returns: + betas (`np.ndarray`): the betas used by the scheduler to step the model outputs + """ + if alpha_transform_type == "cosine": + + def alpha_bar_fn(t): + return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 + + elif alpha_transform_type == "exp": + + def alpha_bar_fn(t): + return math.exp(t * -12.0) + + else: + raise ValueError(f"Unsupported alpha_transform_type: {alpha_transform_type}") + + betas = [] + for i in range(num_diffusion_timesteps): + t1 = i / num_diffusion_timesteps + t2 = (i + 1) / num_diffusion_timesteps + betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) + return torch.tensor(betas, dtype=torch.float32) + + +# Copied from diffusers.schedulers.scheduling_ddim.rescale_zero_terminal_snr +def rescale_zero_terminal_snr(betas): + """ + Rescales betas to have zero terminal SNR Based on https://arxiv.org/pdf/2305.08891.pdf (Algorithm 1) + + + Args: + betas (`torch.Tensor`): + the betas that the scheduler is being initialized with. + + Returns: + `torch.Tensor`: rescaled betas with zero terminal SNR + """ + # Convert betas to alphas_bar_sqrt + alphas = 1.0 - betas + alphas_cumprod = torch.cumprod(alphas, dim=0) + alphas_bar_sqrt = alphas_cumprod.sqrt() + + # Store old values. + alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone() + alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone() + + # Shift so the last timestep is zero. + alphas_bar_sqrt -= alphas_bar_sqrt_T + + # Scale so the first timestep is back to the old value. + alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T) + + # Convert alphas_bar_sqrt to betas + alphas_bar = alphas_bar_sqrt**2 # Revert sqrt + alphas = alphas_bar[1:] / alphas_bar[:-1] # Revert cumprod + alphas = torch.cat([alphas_bar[0:1], alphas]) + betas = 1 - alphas + + return betas + + +class DDIMParallelScheduler(SchedulerMixin, ConfigMixin): + """ + Denoising diffusion implicit models is a scheduler that extends the denoising procedure introduced in denoising + diffusion probabilistic models (DDPMs) with non-Markovian guidance. + + [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__` + function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`. + [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and + [`~SchedulerMixin.from_pretrained`] functions. + + For more details, see the original paper: https://arxiv.org/abs/2010.02502 + + Args: + num_train_timesteps (`int`): number of diffusion steps used to train the model. + beta_start (`float`): the starting `beta` value of inference. + beta_end (`float`): the final `beta` value. + beta_schedule (`str`): + the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from + `linear`, `scaled_linear`, or `squaredcos_cap_v2`. + trained_betas (`np.ndarray`, optional): + option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc. + clip_sample (`bool`, default `True`): + option to clip predicted sample for numerical stability. + clip_sample_range (`float`, default `1.0`): + the maximum magnitude for sample clipping. Valid only when `clip_sample=True`. + set_alpha_to_one (`bool`, default `True`): + each diffusion step uses the value of alphas product at that step and at the previous one. For the final + step there is no previous alpha. When this option is `True` the previous alpha product is fixed to `1`, + otherwise it uses the value of alpha at step 0. + steps_offset (`int`, default `0`): + An offset added to the inference steps, as required by some model families. + prediction_type (`str`, default `epsilon`, optional): + prediction type of the scheduler function, one of `epsilon` (predicting the noise of the diffusion + process), `sample` (directly predicting the noisy sample`) or `v_prediction` (see section 2.4 + https://imagen.research.google/video/paper.pdf) + thresholding (`bool`, default `False`): + whether to use the "dynamic thresholding" method (introduced by Imagen, https://arxiv.org/abs/2205.11487). + Note that the thresholding method is unsuitable for latent-space diffusion models (such as + stable-diffusion). + dynamic_thresholding_ratio (`float`, default `0.995`): + the ratio for the dynamic thresholding method. Default is `0.995`, the same as Imagen + (https://arxiv.org/abs/2205.11487). Valid only when `thresholding=True`. + sample_max_value (`float`, default `1.0`): + the threshold value for dynamic thresholding. Valid only when `thresholding=True`. + timestep_spacing (`str`, default `"leading"`): + The way the timesteps should be scaled. Refer to Table 2. of [Common Diffusion Noise Schedules and Sample + Steps are Flawed](https://arxiv.org/abs/2305.08891) for more information. + rescale_betas_zero_snr (`bool`, default `False`): + whether to rescale the betas to have zero terminal SNR (proposed by https://arxiv.org/pdf/2305.08891.pdf). + This can enable the model to generate very bright and dark samples instead of limiting it to samples with + medium brightness. Loosely related to + [`--offset_noise`](https://github.com/huggingface/diffusers/blob/74fd735eb073eb1d774b1ab4154a0876eb82f055/examples/dreambooth/train_dreambooth.py#L506). + """ + + _compatibles = [e.name for e in KarrasDiffusionSchedulers] + order = 1 + _is_ode_scheduler = True + + @register_to_config + # Copied from diffusers.schedulers.scheduling_ddim.DDIMScheduler.__init__ + def __init__( + self, + num_train_timesteps: int = 1000, + beta_start: float = 0.0001, + beta_end: float = 0.02, + beta_schedule: str = "linear", + trained_betas: Optional[Union[np.ndarray, List[float]]] = None, + clip_sample: bool = True, + set_alpha_to_one: bool = True, + steps_offset: int = 0, + prediction_type: str = "epsilon", + thresholding: bool = False, + dynamic_thresholding_ratio: float = 0.995, + clip_sample_range: float = 1.0, + sample_max_value: float = 1.0, + timestep_spacing: str = "leading", + rescale_betas_zero_snr: bool = False, + ): + if trained_betas is not None: + self.betas = torch.tensor(trained_betas, dtype=torch.float32) + elif beta_schedule == "linear": + self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) + elif beta_schedule == "scaled_linear": + # this schedule is very specific to the latent diffusion model. + self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2 + elif beta_schedule == "squaredcos_cap_v2": + # Glide cosine schedule + self.betas = betas_for_alpha_bar(num_train_timesteps) + else: + raise NotImplementedError(f"{beta_schedule} is not implemented for {self.__class__}") + + # Rescale for zero SNR + if rescale_betas_zero_snr: + self.betas = rescale_zero_terminal_snr(self.betas) + + self.alphas = 1.0 - self.betas + self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) + + # At every step in ddim, we are looking into the previous alphas_cumprod + # For the final step, there is no previous alphas_cumprod because we are already at 0 + # `set_alpha_to_one` decides whether we set this parameter simply to one or + # whether we use the final alpha of the "non-previous" one. + self.final_alpha_cumprod = torch.tensor(1.0) if set_alpha_to_one else self.alphas_cumprod[0] + + # standard deviation of the initial noise distribution + self.init_noise_sigma = 1.0 + + # setable values + self.num_inference_steps = None + self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy().astype(np.int64)) + + # Copied from diffusers.schedulers.scheduling_ddim.DDIMScheduler.scale_model_input + def scale_model_input(self, sample: torch.Tensor, timestep: Optional[int] = None) -> torch.Tensor: + """ + Ensures interchangeability with schedulers that need to scale the denoising model input depending on the + current timestep. + + Args: + sample (`torch.Tensor`): + The input sample. + timestep (`int`, *optional*): + The current timestep in the diffusion chain. + + Returns: + `torch.Tensor`: + A scaled input sample. + """ + return sample + + def _get_variance(self, timestep, prev_timestep=None): + if prev_timestep is None: + prev_timestep = timestep - self.config.num_train_timesteps // self.num_inference_steps + + alpha_prod_t = self.alphas_cumprod[timestep] + alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod + beta_prod_t = 1 - alpha_prod_t + beta_prod_t_prev = 1 - alpha_prod_t_prev + + variance = (beta_prod_t_prev / beta_prod_t) * (1 - alpha_prod_t / alpha_prod_t_prev) + + return variance + + def _batch_get_variance(self, t, prev_t): + alpha_prod_t = self.alphas_cumprod[t] + alpha_prod_t_prev = self.alphas_cumprod[torch.clip(prev_t, min=0)] + alpha_prod_t_prev[prev_t < 0] = torch.tensor(1.0) + beta_prod_t = 1 - alpha_prod_t + beta_prod_t_prev = 1 - alpha_prod_t_prev + + variance = (beta_prod_t_prev / beta_prod_t) * (1 - alpha_prod_t / alpha_prod_t_prev) + + return variance + + # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample + def _threshold_sample(self, sample: torch.Tensor) -> torch.Tensor: + """ + "Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the + prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by + s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing + pixels from saturation at each step. We find that dynamic thresholding results in significantly better + photorealism as well as better image-text alignment, especially when using very large guidance weights." + + https://arxiv.org/abs/2205.11487 + """ + dtype = sample.dtype + batch_size, channels, *remaining_dims = sample.shape + + if dtype not in (torch.float32, torch.float64): + sample = sample.float() # upcast for quantile calculation, and clamp not implemented for cpu half + + # Flatten sample for doing quantile calculation along each image + sample = sample.reshape(batch_size, channels * np.prod(remaining_dims)) + + abs_sample = sample.abs() # "a certain percentile absolute pixel value" + + s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1) + s = torch.clamp( + s, min=1, max=self.config.sample_max_value + ) # When clamped to min=1, equivalent to standard clipping to [-1, 1] + s = s.unsqueeze(1) # (batch_size, 1) because clamp will broadcast along dim=0 + sample = torch.clamp(sample, -s, s) / s # "we threshold xt0 to the range [-s, s] and then divide by s" + + sample = sample.reshape(batch_size, channels, *remaining_dims) + sample = sample.to(dtype) + + return sample + + # Copied from diffusers.schedulers.scheduling_ddim.DDIMScheduler.set_timesteps + def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None): + """ + Sets the discrete timesteps used for the diffusion chain (to be run before inference). + + Args: + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. + """ + + if num_inference_steps > self.config.num_train_timesteps: + raise ValueError( + f"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:" + f" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle" + f" maximal {self.config.num_train_timesteps} timesteps." + ) + + self.num_inference_steps = num_inference_steps + + # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 + if self.config.timestep_spacing == "linspace": + timesteps = ( + np.linspace(0, self.config.num_train_timesteps - 1, num_inference_steps) + .round()[::-1] + .copy() + .astype(np.int64) + ) + elif self.config.timestep_spacing == "leading": + step_ratio = self.config.num_train_timesteps // self.num_inference_steps + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + timesteps = (np.arange(0, num_inference_steps) * step_ratio).round()[::-1].copy().astype(np.int64) + timesteps += self.config.steps_offset + elif self.config.timestep_spacing == "trailing": + step_ratio = self.config.num_train_timesteps / self.num_inference_steps + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + timesteps = np.round(np.arange(self.config.num_train_timesteps, 0, -step_ratio)).astype(np.int64) + timesteps -= 1 + else: + raise ValueError( + f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'leading' or 'trailing'." + ) + + self.timesteps = torch.from_numpy(timesteps).to(device) + + def step( + self, + model_output: torch.Tensor, + timestep: int, + sample: torch.Tensor, + eta: float = 0.0, + use_clipped_model_output: bool = False, + generator=None, + variance_noise: Optional[torch.Tensor] = None, + return_dict: bool = True, + ) -> Union[DDIMParallelSchedulerOutput, Tuple]: + """ + Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion + process from the learned model outputs (most often the predicted noise). + + Args: + model_output (`torch.Tensor`): direct output from learned diffusion model. + timestep (`int`): current discrete timestep in the diffusion chain. + sample (`torch.Tensor`): + current instance of sample being created by diffusion process. + eta (`float`): weight of noise for added noise in diffusion step. + use_clipped_model_output (`bool`): if `True`, compute "corrected" `model_output` from the clipped + predicted original sample. Necessary because predicted original sample is clipped to [-1, 1] when + `self.config.clip_sample` is `True`. If no clipping has happened, "corrected" `model_output` would + coincide with the one provided as input and `use_clipped_model_output` will have not effect. + generator: random number generator. + variance_noise (`torch.Tensor`): instead of generating noise for the variance using `generator`, we + can directly provide the noise for the variance itself. This is useful for methods such as + CycleDiffusion. (https://arxiv.org/abs/2210.05559) + return_dict (`bool`): option for returning tuple rather than DDIMParallelSchedulerOutput class + + Returns: + [`~schedulers.scheduling_utils.DDIMParallelSchedulerOutput`] or `tuple`: + [`~schedulers.scheduling_utils.DDIMParallelSchedulerOutput`] if `return_dict` is True, otherwise a `tuple`. + When returning a tuple, the first element is the sample tensor. + + """ + if self.num_inference_steps is None: + raise ValueError( + "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" + ) + + # See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf + # Ideally, read DDIM paper in-detail understanding + + # Notation ( -> + # - pred_noise_t -> e_theta(x_t, t) + # - pred_original_sample -> f_theta(x_t, t) or x_0 + # - std_dev_t -> sigma_t + # - eta -> ฮท + # - pred_sample_direction -> "direction pointing to x_t" + # - pred_prev_sample -> "x_t-1" + + # 1. get previous step value (=t-1) + prev_timestep = timestep - self.config.num_train_timesteps // self.num_inference_steps + + # 2. compute alphas, betas + alpha_prod_t = self.alphas_cumprod[timestep] + alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod + + beta_prod_t = 1 - alpha_prod_t + + # 3. compute predicted original sample from predicted noise also called + # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf + if self.config.prediction_type == "epsilon": + pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5) + pred_epsilon = model_output + elif self.config.prediction_type == "sample": + pred_original_sample = model_output + pred_epsilon = (sample - alpha_prod_t ** (0.5) * pred_original_sample) / beta_prod_t ** (0.5) + elif self.config.prediction_type == "v_prediction": + pred_original_sample = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output + pred_epsilon = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample + else: + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or" + " `v_prediction`" + ) + + # 4. Clip or threshold "predicted x_0" + if self.config.thresholding: + pred_original_sample = self._threshold_sample(pred_original_sample) + elif self.config.clip_sample: + pred_original_sample = pred_original_sample.clamp( + -self.config.clip_sample_range, self.config.clip_sample_range + ) + + # 5. compute variance: "sigma_t(ฮท)" -> see formula (16) + # ฯƒ_t = sqrt((1 โˆ’ ฮฑ_tโˆ’1)/(1 โˆ’ ฮฑ_t)) * sqrt(1 โˆ’ ฮฑ_t/ฮฑ_tโˆ’1) + variance = self._get_variance(timestep, prev_timestep) + std_dev_t = eta * variance ** (0.5) + + if use_clipped_model_output: + # the pred_epsilon is always re-derived from the clipped x_0 in Glide + pred_epsilon = (sample - alpha_prod_t ** (0.5) * pred_original_sample) / beta_prod_t ** (0.5) + + # 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf + pred_sample_direction = (1 - alpha_prod_t_prev - std_dev_t**2) ** (0.5) * pred_epsilon + + # 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf + prev_sample = alpha_prod_t_prev ** (0.5) * pred_original_sample + pred_sample_direction + + if eta > 0: + if variance_noise is not None and generator is not None: + raise ValueError( + "Cannot pass both generator and variance_noise. Please make sure that either `generator` or" + " `variance_noise` stays `None`." + ) + + if variance_noise is None: + variance_noise = randn_tensor( + model_output.shape, generator=generator, device=model_output.device, dtype=model_output.dtype + ) + variance = std_dev_t * variance_noise + + prev_sample = prev_sample + variance + + if not return_dict: + return (prev_sample,) + + return DDIMParallelSchedulerOutput(prev_sample=prev_sample, pred_original_sample=pred_original_sample) + + def batch_step_no_noise( + self, + model_output: torch.Tensor, + timesteps: List[int], + sample: torch.Tensor, + eta: float = 0.0, + use_clipped_model_output: bool = False, + ) -> torch.Tensor: + """ + Batched version of the `step` function, to be able to reverse the SDE for multiple samples/timesteps at once. + Also, does not add any noise to the predicted sample, which is necessary for parallel sampling where the noise + is pre-sampled by the pipeline. + + Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion + process from the learned model outputs (most often the predicted noise). + + Args: + model_output (`torch.Tensor`): direct output from learned diffusion model. + timesteps (`List[int]`): + current discrete timesteps in the diffusion chain. This is now a list of integers. + sample (`torch.Tensor`): + current instance of sample being created by diffusion process. + eta (`float`): weight of noise for added noise in diffusion step. + use_clipped_model_output (`bool`): if `True`, compute "corrected" `model_output` from the clipped + predicted original sample. Necessary because predicted original sample is clipped to [-1, 1] when + `self.config.clip_sample` is `True`. If no clipping has happened, "corrected" `model_output` would + coincide with the one provided as input and `use_clipped_model_output` will have not effect. + + Returns: + `torch.Tensor`: sample tensor at previous timestep. + + """ + if self.num_inference_steps is None: + raise ValueError( + "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" + ) + + assert eta == 0.0 + + # See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf + # Ideally, read DDIM paper in-detail understanding + + # Notation ( -> + # - pred_noise_t -> e_theta(x_t, t) + # - pred_original_sample -> f_theta(x_t, t) or x_0 + # - std_dev_t -> sigma_t + # - eta -> ฮท + # - pred_sample_direction -> "direction pointing to x_t" + # - pred_prev_sample -> "x_t-1" + + # 1. get previous step value (=t-1) + t = timesteps + prev_t = t - self.config.num_train_timesteps // self.num_inference_steps + + t = t.view(-1, *([1] * (model_output.ndim - 1))) + prev_t = prev_t.view(-1, *([1] * (model_output.ndim - 1))) + + # 1. compute alphas, betas + self.alphas_cumprod = self.alphas_cumprod.to(model_output.device) + self.final_alpha_cumprod = self.final_alpha_cumprod.to(model_output.device) + alpha_prod_t = self.alphas_cumprod[t] + alpha_prod_t_prev = self.alphas_cumprod[torch.clip(prev_t, min=0)] + alpha_prod_t_prev[prev_t < 0] = torch.tensor(1.0) + + beta_prod_t = 1 - alpha_prod_t + + # 3. compute predicted original sample from predicted noise also called + # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf + if self.config.prediction_type == "epsilon": + pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5) + pred_epsilon = model_output + elif self.config.prediction_type == "sample": + pred_original_sample = model_output + pred_epsilon = (sample - alpha_prod_t ** (0.5) * pred_original_sample) / beta_prod_t ** (0.5) + elif self.config.prediction_type == "v_prediction": + pred_original_sample = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output + pred_epsilon = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample + else: + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or" + " `v_prediction`" + ) + + # 4. Clip or threshold "predicted x_0" + if self.config.thresholding: + pred_original_sample = self._threshold_sample(pred_original_sample) + elif self.config.clip_sample: + pred_original_sample = pred_original_sample.clamp( + -self.config.clip_sample_range, self.config.clip_sample_range + ) + + # 5. compute variance: "sigma_t(ฮท)" -> see formula (16) + # ฯƒ_t = sqrt((1 โˆ’ ฮฑ_tโˆ’1)/(1 โˆ’ ฮฑ_t)) * sqrt(1 โˆ’ ฮฑ_t/ฮฑ_tโˆ’1) + variance = self._batch_get_variance(t, prev_t).to(model_output.device).view(*alpha_prod_t_prev.shape) + std_dev_t = eta * variance ** (0.5) + + if use_clipped_model_output: + # the pred_epsilon is always re-derived from the clipped x_0 in Glide + pred_epsilon = (sample - alpha_prod_t ** (0.5) * pred_original_sample) / beta_prod_t ** (0.5) + + # 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf + pred_sample_direction = (1 - alpha_prod_t_prev - std_dev_t**2) ** (0.5) * pred_epsilon + + # 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf + prev_sample = alpha_prod_t_prev ** (0.5) * pred_original_sample + pred_sample_direction + + return prev_sample + + # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.add_noise + def add_noise( + self, + original_samples: torch.Tensor, + noise: torch.Tensor, + timesteps: torch.IntTensor, + ) -> torch.Tensor: + # Make sure alphas_cumprod and timestep have same device and dtype as original_samples + # Move the self.alphas_cumprod to device to avoid redundant CPU to GPU data movement + # for the subsequent add_noise calls + self.alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device) + alphas_cumprod = self.alphas_cumprod.to(dtype=original_samples.dtype) + timesteps = timesteps.to(original_samples.device) + + sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5 + sqrt_alpha_prod = sqrt_alpha_prod.flatten() + while len(sqrt_alpha_prod.shape) < len(original_samples.shape): + sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) + + sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5 + sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() + while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape): + sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) + + noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise + return noisy_samples + + # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.get_velocity + def get_velocity(self, sample: torch.Tensor, noise: torch.Tensor, timesteps: torch.IntTensor) -> torch.Tensor: + # Make sure alphas_cumprod and timestep have same device and dtype as sample + self.alphas_cumprod = self.alphas_cumprod.to(device=sample.device) + alphas_cumprod = self.alphas_cumprod.to(dtype=sample.dtype) + timesteps = timesteps.to(sample.device) + + sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5 + sqrt_alpha_prod = sqrt_alpha_prod.flatten() + while len(sqrt_alpha_prod.shape) < len(sample.shape): + sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) + + sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5 + sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() + while len(sqrt_one_minus_alpha_prod.shape) < len(sample.shape): + sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) + + velocity = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample + return velocity + + def __len__(self): + return self.config.num_train_timesteps diff --git a/diffusers3/schedulers/scheduling_ddpm.py b/diffusers3/schedulers/scheduling_ddpm.py new file mode 100644 index 0000000000000000000000000000000000000000..81a770edf635aa41e6a7bd8b6532f378845eee1e --- /dev/null +++ b/diffusers3/schedulers/scheduling_ddpm.py @@ -0,0 +1,560 @@ +# Copyright 2024 UC Berkeley Team and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim + +import math +from dataclasses import dataclass +from typing import List, Optional, Tuple, Union + +import numpy as np +import torch + +from ..configuration_utils import ConfigMixin, register_to_config +from ..utils import BaseOutput +from ..utils.torch_utils import randn_tensor +from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin + + +@dataclass +class DDPMSchedulerOutput(BaseOutput): + """ + Output class for the scheduler's `step` function output. + + Args: + prev_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images): + Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the + denoising loop. + pred_original_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images): + The predicted denoised sample `(x_{0})` based on the model output from the current timestep. + `pred_original_sample` can be used to preview progress or for guidance. + """ + + prev_sample: torch.Tensor + pred_original_sample: Optional[torch.Tensor] = None + + +def betas_for_alpha_bar( + num_diffusion_timesteps, + max_beta=0.999, + alpha_transform_type="cosine", +): + """ + Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of + (1-beta) over time from t = [0,1]. + + Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up + to that part of the diffusion process. + + + Args: + num_diffusion_timesteps (`int`): the number of betas to produce. + max_beta (`float`): the maximum beta to use; use values lower than 1 to + prevent singularities. + alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. + Choose from `cosine` or `exp` + + Returns: + betas (`np.ndarray`): the betas used by the scheduler to step the model outputs + """ + if alpha_transform_type == "cosine": + + def alpha_bar_fn(t): + return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 + + elif alpha_transform_type == "exp": + + def alpha_bar_fn(t): + return math.exp(t * -12.0) + + else: + raise ValueError(f"Unsupported alpha_transform_type: {alpha_transform_type}") + + betas = [] + for i in range(num_diffusion_timesteps): + t1 = i / num_diffusion_timesteps + t2 = (i + 1) / num_diffusion_timesteps + betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) + return torch.tensor(betas, dtype=torch.float32) + + +# Copied from diffusers.schedulers.scheduling_ddim.rescale_zero_terminal_snr +def rescale_zero_terminal_snr(betas): + """ + Rescales betas to have zero terminal SNR Based on https://arxiv.org/pdf/2305.08891.pdf (Algorithm 1) + + + Args: + betas (`torch.Tensor`): + the betas that the scheduler is being initialized with. + + Returns: + `torch.Tensor`: rescaled betas with zero terminal SNR + """ + # Convert betas to alphas_bar_sqrt + alphas = 1.0 - betas + alphas_cumprod = torch.cumprod(alphas, dim=0) + alphas_bar_sqrt = alphas_cumprod.sqrt() + + # Store old values. + alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone() + alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone() + + # Shift so the last timestep is zero. + alphas_bar_sqrt -= alphas_bar_sqrt_T + + # Scale so the first timestep is back to the old value. + alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T) + + # Convert alphas_bar_sqrt to betas + alphas_bar = alphas_bar_sqrt**2 # Revert sqrt + alphas = alphas_bar[1:] / alphas_bar[:-1] # Revert cumprod + alphas = torch.cat([alphas_bar[0:1], alphas]) + betas = 1 - alphas + + return betas + + +class DDPMScheduler(SchedulerMixin, ConfigMixin): + """ + `DDPMScheduler` explores the connections between denoising score matching and Langevin dynamics sampling. + + This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic + methods the library implements for all schedulers such as loading and saving. + + Args: + num_train_timesteps (`int`, defaults to 1000): + The number of diffusion steps to train the model. + beta_start (`float`, defaults to 0.0001): + The starting `beta` value of inference. + beta_end (`float`, defaults to 0.02): + The final `beta` value. + beta_schedule (`str`, defaults to `"linear"`): + The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from + `linear`, `scaled_linear`, or `squaredcos_cap_v2`. + trained_betas (`np.ndarray`, *optional*): + An array of betas to pass directly to the constructor without using `beta_start` and `beta_end`. + variance_type (`str`, defaults to `"fixed_small"`): + Clip the variance when adding noise to the denoised sample. Choose from `fixed_small`, `fixed_small_log`, + `fixed_large`, `fixed_large_log`, `learned` or `learned_range`. + clip_sample (`bool`, defaults to `True`): + Clip the predicted sample for numerical stability. + clip_sample_range (`float`, defaults to 1.0): + The maximum magnitude for sample clipping. Valid only when `clip_sample=True`. + prediction_type (`str`, defaults to `epsilon`, *optional*): + Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process), + `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen + Video](https://imagen.research.google/video/paper.pdf) paper). + thresholding (`bool`, defaults to `False`): + Whether to use the "dynamic thresholding" method. This is unsuitable for latent-space diffusion models such + as Stable Diffusion. + dynamic_thresholding_ratio (`float`, defaults to 0.995): + The ratio for the dynamic thresholding method. Valid only when `thresholding=True`. + sample_max_value (`float`, defaults to 1.0): + The threshold value for dynamic thresholding. Valid only when `thresholding=True`. + timestep_spacing (`str`, defaults to `"leading"`): + The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information. + steps_offset (`int`, defaults to 0): + An offset added to the inference steps, as required by some model families. + rescale_betas_zero_snr (`bool`, defaults to `False`): + Whether to rescale the betas to have zero terminal SNR. This enables the model to generate very bright and + dark samples instead of limiting it to samples with medium brightness. Loosely related to + [`--offset_noise`](https://github.com/huggingface/diffusers/blob/74fd735eb073eb1d774b1ab4154a0876eb82f055/examples/dreambooth/train_dreambooth.py#L506). + """ + + _compatibles = [e.name for e in KarrasDiffusionSchedulers] + order = 1 + + @register_to_config + def __init__( + self, + num_train_timesteps: int = 1000, + beta_start: float = 0.0001, + beta_end: float = 0.02, + beta_schedule: str = "linear", + trained_betas: Optional[Union[np.ndarray, List[float]]] = None, + variance_type: str = "fixed_small", + clip_sample: bool = True, + prediction_type: str = "epsilon", + thresholding: bool = False, + dynamic_thresholding_ratio: float = 0.995, + clip_sample_range: float = 1.0, + sample_max_value: float = 1.0, + timestep_spacing: str = "leading", + steps_offset: int = 0, + rescale_betas_zero_snr: bool = False, + ): + if trained_betas is not None: + self.betas = torch.tensor(trained_betas, dtype=torch.float32) + elif beta_schedule == "linear": + self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) + elif beta_schedule == "scaled_linear": + # this schedule is very specific to the latent diffusion model. + self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2 + elif beta_schedule == "squaredcos_cap_v2": + # Glide cosine schedule + self.betas = betas_for_alpha_bar(num_train_timesteps) + elif beta_schedule == "sigmoid": + # GeoDiff sigmoid schedule + betas = torch.linspace(-6, 6, num_train_timesteps) + self.betas = torch.sigmoid(betas) * (beta_end - beta_start) + beta_start + else: + raise NotImplementedError(f"{beta_schedule} is not implemented for {self.__class__}") + + # Rescale for zero SNR + if rescale_betas_zero_snr: + self.betas = rescale_zero_terminal_snr(self.betas) + + self.alphas = 1.0 - self.betas + self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) + self.one = torch.tensor(1.0) + + # standard deviation of the initial noise distribution + self.init_noise_sigma = 1.0 + + # setable values + self.custom_timesteps = False + self.num_inference_steps = None + self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy()) + + self.variance_type = variance_type + + def scale_model_input(self, sample: torch.Tensor, timestep: Optional[int] = None) -> torch.Tensor: + """ + Ensures interchangeability with schedulers that need to scale the denoising model input depending on the + current timestep. + + Args: + sample (`torch.Tensor`): + The input sample. + timestep (`int`, *optional*): + The current timestep in the diffusion chain. + + Returns: + `torch.Tensor`: + A scaled input sample. + """ + return sample + + def set_timesteps( + self, + num_inference_steps: Optional[int] = None, + device: Union[str, torch.device] = None, + timesteps: Optional[List[int]] = None, + ): + """ + Sets the discrete timesteps used for the diffusion chain (to be run before inference). + + Args: + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, + `timesteps` must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default + timestep spacing strategy of equal spacing between timesteps is used. If `timesteps` is passed, + `num_inference_steps` must be `None`. + + """ + if num_inference_steps is not None and timesteps is not None: + raise ValueError("Can only pass one of `num_inference_steps` or `custom_timesteps`.") + + if timesteps is not None: + for i in range(1, len(timesteps)): + if timesteps[i] >= timesteps[i - 1]: + raise ValueError("`custom_timesteps` must be in descending order.") + + if timesteps[0] >= self.config.num_train_timesteps: + raise ValueError( + f"`timesteps` must start before `self.config.train_timesteps`:" + f" {self.config.num_train_timesteps}." + ) + + timesteps = np.array(timesteps, dtype=np.int64) + self.custom_timesteps = True + else: + if num_inference_steps > self.config.num_train_timesteps: + raise ValueError( + f"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:" + f" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle" + f" maximal {self.config.num_train_timesteps} timesteps." + ) + + self.num_inference_steps = num_inference_steps + self.custom_timesteps = False + + # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 + if self.config.timestep_spacing == "linspace": + timesteps = ( + np.linspace(0, self.config.num_train_timesteps - 1, num_inference_steps) + .round()[::-1] + .copy() + .astype(np.int64) + ) + elif self.config.timestep_spacing == "leading": + step_ratio = self.config.num_train_timesteps // self.num_inference_steps + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + timesteps = (np.arange(0, num_inference_steps) * step_ratio).round()[::-1].copy().astype(np.int64) + timesteps += self.config.steps_offset + elif self.config.timestep_spacing == "trailing": + step_ratio = self.config.num_train_timesteps / self.num_inference_steps + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + timesteps = np.round(np.arange(self.config.num_train_timesteps, 0, -step_ratio)).astype(np.int64) + timesteps -= 1 + else: + raise ValueError( + f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." + ) + + self.timesteps = torch.from_numpy(timesteps).to(device) + + def _get_variance(self, t, predicted_variance=None, variance_type=None): + prev_t = self.previous_timestep(t) + + alpha_prod_t = self.alphas_cumprod[t] + alpha_prod_t_prev = self.alphas_cumprod[prev_t] if prev_t >= 0 else self.one + current_beta_t = 1 - alpha_prod_t / alpha_prod_t_prev + + # For t > 0, compute predicted variance ฮฒt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) + # and sample from it to get previous sample + # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample + variance = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * current_beta_t + + # we always take the log of variance, so clamp it to ensure it's not 0 + variance = torch.clamp(variance, min=1e-20) + + if variance_type is None: + variance_type = self.config.variance_type + + # hacks - were probably added for training stability + if variance_type == "fixed_small": + variance = variance + # for rl-diffuser https://arxiv.org/abs/2205.09991 + elif variance_type == "fixed_small_log": + variance = torch.log(variance) + variance = torch.exp(0.5 * variance) + elif variance_type == "fixed_large": + variance = current_beta_t + elif variance_type == "fixed_large_log": + # Glide max_log + variance = torch.log(current_beta_t) + elif variance_type == "learned": + return predicted_variance + elif variance_type == "learned_range": + min_log = torch.log(variance) + max_log = torch.log(current_beta_t) + frac = (predicted_variance + 1) / 2 + variance = frac * max_log + (1 - frac) * min_log + + return variance + + def _threshold_sample(self, sample: torch.Tensor) -> torch.Tensor: + """ + "Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the + prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by + s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing + pixels from saturation at each step. We find that dynamic thresholding results in significantly better + photorealism as well as better image-text alignment, especially when using very large guidance weights." + + https://arxiv.org/abs/2205.11487 + """ + dtype = sample.dtype + batch_size, channels, *remaining_dims = sample.shape + + if dtype not in (torch.float32, torch.float64): + sample = sample.float() # upcast for quantile calculation, and clamp not implemented for cpu half + + # Flatten sample for doing quantile calculation along each image + sample = sample.reshape(batch_size, channels * np.prod(remaining_dims)) + + abs_sample = sample.abs() # "a certain percentile absolute pixel value" + + s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1) + s = torch.clamp( + s, min=1, max=self.config.sample_max_value + ) # When clamped to min=1, equivalent to standard clipping to [-1, 1] + s = s.unsqueeze(1) # (batch_size, 1) because clamp will broadcast along dim=0 + sample = torch.clamp(sample, -s, s) / s # "we threshold xt0 to the range [-s, s] and then divide by s" + + sample = sample.reshape(batch_size, channels, *remaining_dims) + sample = sample.to(dtype) + + return sample + + def step( + self, + model_output: torch.Tensor, + timestep: int, + sample: torch.Tensor, + generator=None, + return_dict: bool = True, + ) -> Union[DDPMSchedulerOutput, Tuple]: + """ + Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion + process from the learned model outputs (most often the predicted noise). + + Args: + model_output (`torch.Tensor`): + The direct output from learned diffusion model. + timestep (`float`): + The current discrete timestep in the diffusion chain. + sample (`torch.Tensor`): + A current instance of a sample created by the diffusion process. + generator (`torch.Generator`, *optional*): + A random number generator. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~schedulers.scheduling_ddpm.DDPMSchedulerOutput`] or `tuple`. + + Returns: + [`~schedulers.scheduling_ddpm.DDPMSchedulerOutput`] or `tuple`: + If return_dict is `True`, [`~schedulers.scheduling_ddpm.DDPMSchedulerOutput`] is returned, otherwise a + tuple is returned where the first element is the sample tensor. + + """ + t = timestep + + prev_t = self.previous_timestep(t) + + if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]: + model_output, predicted_variance = torch.split(model_output, sample.shape[1], dim=1) + else: + predicted_variance = None + + # 1. compute alphas, betas + alpha_prod_t = self.alphas_cumprod[t] + alpha_prod_t_prev = self.alphas_cumprod[prev_t] if prev_t >= 0 else self.one + beta_prod_t = 1 - alpha_prod_t + beta_prod_t_prev = 1 - alpha_prod_t_prev + current_alpha_t = alpha_prod_t / alpha_prod_t_prev + current_beta_t = 1 - current_alpha_t + + # 2. compute predicted original sample from predicted noise also called + # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf + if self.config.prediction_type == "epsilon": + pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5) + elif self.config.prediction_type == "sample": + pred_original_sample = model_output + elif self.config.prediction_type == "v_prediction": + pred_original_sample = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output + else: + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` or" + " `v_prediction` for the DDPMScheduler." + ) + + # 3. Clip or threshold "predicted x_0" + if self.config.thresholding: + pred_original_sample = self._threshold_sample(pred_original_sample) + elif self.config.clip_sample: + pred_original_sample = pred_original_sample.clamp( + -self.config.clip_sample_range, self.config.clip_sample_range + ) + + # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t + # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf + pred_original_sample_coeff = (alpha_prod_t_prev ** (0.5) * current_beta_t) / beta_prod_t + current_sample_coeff = current_alpha_t ** (0.5) * beta_prod_t_prev / beta_prod_t + + # 5. Compute predicted previous sample ยต_t + # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf + pred_prev_sample = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample + + # 6. Add noise + variance = 0 + if t > 0: + device = model_output.device + variance_noise = randn_tensor( + model_output.shape, generator=generator, device=device, dtype=model_output.dtype + ) + if self.variance_type == "fixed_small_log": + variance = self._get_variance(t, predicted_variance=predicted_variance) * variance_noise + elif self.variance_type == "learned_range": + variance = self._get_variance(t, predicted_variance=predicted_variance) + variance = torch.exp(0.5 * variance) * variance_noise + else: + variance = (self._get_variance(t, predicted_variance=predicted_variance) ** 0.5) * variance_noise + + pred_prev_sample = pred_prev_sample + variance + + if not return_dict: + return (pred_prev_sample,) + + return DDPMSchedulerOutput(prev_sample=pred_prev_sample, pred_original_sample=pred_original_sample) + + def add_noise( + self, + original_samples: torch.Tensor, + noise: torch.Tensor, + timesteps: torch.IntTensor, + ) -> torch.Tensor: + # Make sure alphas_cumprod and timestep have same device and dtype as original_samples + # Move the self.alphas_cumprod to device to avoid redundant CPU to GPU data movement + # for the subsequent add_noise calls + self.alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device) + alphas_cumprod = self.alphas_cumprod.to(dtype=original_samples.dtype) + timesteps = timesteps.to(original_samples.device) + + sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5 + sqrt_alpha_prod = sqrt_alpha_prod.flatten() + while len(sqrt_alpha_prod.shape) < len(original_samples.shape): + sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) + + sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5 + sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() + while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape): + sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) + + noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise + return noisy_samples + + def get_velocity(self, sample: torch.Tensor, noise: torch.Tensor, timesteps: torch.IntTensor) -> torch.Tensor: + # Make sure alphas_cumprod and timestep have same device and dtype as sample + self.alphas_cumprod = self.alphas_cumprod.to(device=sample.device) + alphas_cumprod = self.alphas_cumprod.to(dtype=sample.dtype) + timesteps = timesteps.to(sample.device) + + sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5 + sqrt_alpha_prod = sqrt_alpha_prod.flatten() + while len(sqrt_alpha_prod.shape) < len(sample.shape): + sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) + + sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5 + sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() + while len(sqrt_one_minus_alpha_prod.shape) < len(sample.shape): + sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) + + velocity = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample + return velocity + + def __len__(self): + return self.config.num_train_timesteps + + def previous_timestep(self, timestep): + if self.custom_timesteps: + index = (self.timesteps == timestep).nonzero(as_tuple=True)[0][0] + if index == self.timesteps.shape[0] - 1: + prev_t = torch.tensor(-1) + else: + prev_t = self.timesteps[index + 1] + else: + num_inference_steps = ( + self.num_inference_steps if self.num_inference_steps else self.config.num_train_timesteps + ) + prev_t = timestep - self.config.num_train_timesteps // num_inference_steps + + return prev_t diff --git a/diffusers3/schedulers/scheduling_ddpm_flax.py b/diffusers3/schedulers/scheduling_ddpm_flax.py new file mode 100644 index 0000000000000000000000000000000000000000..d06a171159ee765ff23e50ea2baa94ae62e24dd4 --- /dev/null +++ b/diffusers3/schedulers/scheduling_ddpm_flax.py @@ -0,0 +1,303 @@ +# Copyright 2024 UC Berkeley Team and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim + +from dataclasses import dataclass +from typing import Optional, Tuple, Union + +import flax +import jax +import jax.numpy as jnp + +from ..configuration_utils import ConfigMixin, register_to_config +from .scheduling_utils_flax import ( + CommonSchedulerState, + FlaxKarrasDiffusionSchedulers, + FlaxSchedulerMixin, + FlaxSchedulerOutput, + add_noise_common, + get_velocity_common, +) + + +@flax.struct.dataclass +class DDPMSchedulerState: + common: CommonSchedulerState + + # setable values + init_noise_sigma: jnp.ndarray + timesteps: jnp.ndarray + num_inference_steps: Optional[int] = None + + @classmethod + def create(cls, common: CommonSchedulerState, init_noise_sigma: jnp.ndarray, timesteps: jnp.ndarray): + return cls(common=common, init_noise_sigma=init_noise_sigma, timesteps=timesteps) + + +@dataclass +class FlaxDDPMSchedulerOutput(FlaxSchedulerOutput): + state: DDPMSchedulerState + + +class FlaxDDPMScheduler(FlaxSchedulerMixin, ConfigMixin): + """ + Denoising diffusion probabilistic models (DDPMs) explores the connections between denoising score matching and + Langevin dynamics sampling. + + [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__` + function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`. + [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and + [`~SchedulerMixin.from_pretrained`] functions. + + For more details, see the original paper: https://arxiv.org/abs/2006.11239 + + Args: + num_train_timesteps (`int`): number of diffusion steps used to train the model. + beta_start (`float`): the starting `beta` value of inference. + beta_end (`float`): the final `beta` value. + beta_schedule (`str`): + the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from + `linear`, `scaled_linear`, or `squaredcos_cap_v2`. + trained_betas (`np.ndarray`, optional): + option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc. + variance_type (`str`): + options to clip the variance used when adding noise to the denoised sample. Choose from `fixed_small`, + `fixed_small_log`, `fixed_large`, `fixed_large_log`, `learned` or `learned_range`. + clip_sample (`bool`, default `True`): + option to clip predicted sample between -1 and 1 for numerical stability. + prediction_type (`str`, default `epsilon`): + indicates whether the model predicts the noise (epsilon), or the samples. One of `epsilon`, `sample`. + `v-prediction` is not supported for this scheduler. + dtype (`jnp.dtype`, *optional*, defaults to `jnp.float32`): + the `dtype` used for params and computation. + """ + + _compatibles = [e.name for e in FlaxKarrasDiffusionSchedulers] + + dtype: jnp.dtype + + @property + def has_state(self): + return True + + @register_to_config + def __init__( + self, + num_train_timesteps: int = 1000, + beta_start: float = 0.0001, + beta_end: float = 0.02, + beta_schedule: str = "linear", + trained_betas: Optional[jnp.ndarray] = None, + variance_type: str = "fixed_small", + clip_sample: bool = True, + prediction_type: str = "epsilon", + dtype: jnp.dtype = jnp.float32, + ): + self.dtype = dtype + + def create_state(self, common: Optional[CommonSchedulerState] = None) -> DDPMSchedulerState: + if common is None: + common = CommonSchedulerState.create(self) + + # standard deviation of the initial noise distribution + init_noise_sigma = jnp.array(1.0, dtype=self.dtype) + + timesteps = jnp.arange(0, self.config.num_train_timesteps).round()[::-1] + + return DDPMSchedulerState.create( + common=common, + init_noise_sigma=init_noise_sigma, + timesteps=timesteps, + ) + + def scale_model_input( + self, state: DDPMSchedulerState, sample: jnp.ndarray, timestep: Optional[int] = None + ) -> jnp.ndarray: + """ + Args: + state (`PNDMSchedulerState`): the `FlaxPNDMScheduler` state data class instance. + sample (`jnp.ndarray`): input sample + timestep (`int`, optional): current timestep + + Returns: + `jnp.ndarray`: scaled input sample + """ + return sample + + def set_timesteps( + self, state: DDPMSchedulerState, num_inference_steps: int, shape: Tuple = () + ) -> DDPMSchedulerState: + """ + Sets the discrete timesteps used for the diffusion chain. Supporting function to be run before inference. + + Args: + state (`DDIMSchedulerState`): + the `FlaxDDPMScheduler` state data class instance. + num_inference_steps (`int`): + the number of diffusion steps used when generating samples with a pre-trained model. + """ + + step_ratio = self.config.num_train_timesteps // num_inference_steps + # creates integer timesteps by multiplying by ratio + # rounding to avoid issues when num_inference_step is power of 3 + timesteps = (jnp.arange(0, num_inference_steps) * step_ratio).round()[::-1] + + return state.replace( + num_inference_steps=num_inference_steps, + timesteps=timesteps, + ) + + def _get_variance(self, state: DDPMSchedulerState, t, predicted_variance=None, variance_type=None): + alpha_prod_t = state.common.alphas_cumprod[t] + alpha_prod_t_prev = jnp.where(t > 0, state.common.alphas_cumprod[t - 1], jnp.array(1.0, dtype=self.dtype)) + + # For t > 0, compute predicted variance ฮฒt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) + # and sample from it to get previous sample + # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample + variance = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t] + + if variance_type is None: + variance_type = self.config.variance_type + + # hacks - were probably added for training stability + if variance_type == "fixed_small": + variance = jnp.clip(variance, a_min=1e-20) + # for rl-diffuser https://arxiv.org/abs/2205.09991 + elif variance_type == "fixed_small_log": + variance = jnp.log(jnp.clip(variance, a_min=1e-20)) + elif variance_type == "fixed_large": + variance = state.common.betas[t] + elif variance_type == "fixed_large_log": + # Glide max_log + variance = jnp.log(state.common.betas[t]) + elif variance_type == "learned": + return predicted_variance + elif variance_type == "learned_range": + min_log = variance + max_log = state.common.betas[t] + frac = (predicted_variance + 1) / 2 + variance = frac * max_log + (1 - frac) * min_log + + return variance + + def step( + self, + state: DDPMSchedulerState, + model_output: jnp.ndarray, + timestep: int, + sample: jnp.ndarray, + key: Optional[jax.Array] = None, + return_dict: bool = True, + ) -> Union[FlaxDDPMSchedulerOutput, Tuple]: + """ + Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion + process from the learned model outputs (most often the predicted noise). + + Args: + state (`DDPMSchedulerState`): the `FlaxDDPMScheduler` state data class instance. + model_output (`jnp.ndarray`): direct output from learned diffusion model. + timestep (`int`): current discrete timestep in the diffusion chain. + sample (`jnp.ndarray`): + current instance of sample being created by diffusion process. + key (`jax.Array`): a PRNG key. + return_dict (`bool`): option for returning tuple rather than FlaxDDPMSchedulerOutput class + + Returns: + [`FlaxDDPMSchedulerOutput`] or `tuple`: [`FlaxDDPMSchedulerOutput`] if `return_dict` is True, otherwise a + `tuple`. When returning a tuple, the first element is the sample tensor. + + """ + t = timestep + + if key is None: + key = jax.random.key(0) + + if ( + len(model_output.shape) > 1 + and model_output.shape[1] == sample.shape[1] * 2 + and self.config.variance_type in ["learned", "learned_range"] + ): + model_output, predicted_variance = jnp.split(model_output, sample.shape[1], axis=1) + else: + predicted_variance = None + + # 1. compute alphas, betas + alpha_prod_t = state.common.alphas_cumprod[t] + alpha_prod_t_prev = jnp.where(t > 0, state.common.alphas_cumprod[t - 1], jnp.array(1.0, dtype=self.dtype)) + beta_prod_t = 1 - alpha_prod_t + beta_prod_t_prev = 1 - alpha_prod_t_prev + + # 2. compute predicted original sample from predicted noise also called + # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf + if self.config.prediction_type == "epsilon": + pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5) + elif self.config.prediction_type == "sample": + pred_original_sample = model_output + elif self.config.prediction_type == "v_prediction": + pred_original_sample = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output + else: + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` " + " for the FlaxDDPMScheduler." + ) + + # 3. Clip "predicted x_0" + if self.config.clip_sample: + pred_original_sample = jnp.clip(pred_original_sample, -1, 1) + + # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t + # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf + pred_original_sample_coeff = (alpha_prod_t_prev ** (0.5) * state.common.betas[t]) / beta_prod_t + current_sample_coeff = state.common.alphas[t] ** (0.5) * beta_prod_t_prev / beta_prod_t + + # 5. Compute predicted previous sample ยต_t + # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf + pred_prev_sample = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample + + # 6. Add noise + def random_variance(): + split_key = jax.random.split(key, num=1)[0] + noise = jax.random.normal(split_key, shape=model_output.shape, dtype=self.dtype) + return (self._get_variance(state, t, predicted_variance=predicted_variance) ** 0.5) * noise + + variance = jnp.where(t > 0, random_variance(), jnp.zeros(model_output.shape, dtype=self.dtype)) + + pred_prev_sample = pred_prev_sample + variance + + if not return_dict: + return (pred_prev_sample, state) + + return FlaxDDPMSchedulerOutput(prev_sample=pred_prev_sample, state=state) + + def add_noise( + self, + state: DDPMSchedulerState, + original_samples: jnp.ndarray, + noise: jnp.ndarray, + timesteps: jnp.ndarray, + ) -> jnp.ndarray: + return add_noise_common(state.common, original_samples, noise, timesteps) + + def get_velocity( + self, + state: DDPMSchedulerState, + sample: jnp.ndarray, + noise: jnp.ndarray, + timesteps: jnp.ndarray, + ) -> jnp.ndarray: + return get_velocity_common(state.common, sample, noise, timesteps) + + def __len__(self): + return self.config.num_train_timesteps diff --git a/diffusers3/schedulers/scheduling_ddpm_parallel.py b/diffusers3/schedulers/scheduling_ddpm_parallel.py new file mode 100644 index 0000000000000000000000000000000000000000..5dfcf3c17a2fcda2abd9916febe1aad65fed36f7 --- /dev/null +++ b/diffusers3/schedulers/scheduling_ddpm_parallel.py @@ -0,0 +1,651 @@ +# Copyright 2024 ParaDiGMS authors and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim + +import math +from dataclasses import dataclass +from typing import List, Optional, Tuple, Union + +import numpy as np +import torch + +from ..configuration_utils import ConfigMixin, register_to_config +from ..utils import BaseOutput +from ..utils.torch_utils import randn_tensor +from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin + + +@dataclass +# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput +class DDPMParallelSchedulerOutput(BaseOutput): + """ + Output class for the scheduler's `step` function output. + + Args: + prev_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images): + Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the + denoising loop. + pred_original_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images): + The predicted denoised sample `(x_{0})` based on the model output from the current timestep. + `pred_original_sample` can be used to preview progress or for guidance. + """ + + prev_sample: torch.Tensor + pred_original_sample: Optional[torch.Tensor] = None + + +# Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar +def betas_for_alpha_bar( + num_diffusion_timesteps, + max_beta=0.999, + alpha_transform_type="cosine", +): + """ + Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of + (1-beta) over time from t = [0,1]. + + Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up + to that part of the diffusion process. + + + Args: + num_diffusion_timesteps (`int`): the number of betas to produce. + max_beta (`float`): the maximum beta to use; use values lower than 1 to + prevent singularities. + alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. + Choose from `cosine` or `exp` + + Returns: + betas (`np.ndarray`): the betas used by the scheduler to step the model outputs + """ + if alpha_transform_type == "cosine": + + def alpha_bar_fn(t): + return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 + + elif alpha_transform_type == "exp": + + def alpha_bar_fn(t): + return math.exp(t * -12.0) + + else: + raise ValueError(f"Unsupported alpha_transform_type: {alpha_transform_type}") + + betas = [] + for i in range(num_diffusion_timesteps): + t1 = i / num_diffusion_timesteps + t2 = (i + 1) / num_diffusion_timesteps + betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) + return torch.tensor(betas, dtype=torch.float32) + + +# Copied from diffusers.schedulers.scheduling_ddim.rescale_zero_terminal_snr +def rescale_zero_terminal_snr(betas): + """ + Rescales betas to have zero terminal SNR Based on https://arxiv.org/pdf/2305.08891.pdf (Algorithm 1) + + + Args: + betas (`torch.Tensor`): + the betas that the scheduler is being initialized with. + + Returns: + `torch.Tensor`: rescaled betas with zero terminal SNR + """ + # Convert betas to alphas_bar_sqrt + alphas = 1.0 - betas + alphas_cumprod = torch.cumprod(alphas, dim=0) + alphas_bar_sqrt = alphas_cumprod.sqrt() + + # Store old values. + alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone() + alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone() + + # Shift so the last timestep is zero. + alphas_bar_sqrt -= alphas_bar_sqrt_T + + # Scale so the first timestep is back to the old value. + alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T) + + # Convert alphas_bar_sqrt to betas + alphas_bar = alphas_bar_sqrt**2 # Revert sqrt + alphas = alphas_bar[1:] / alphas_bar[:-1] # Revert cumprod + alphas = torch.cat([alphas_bar[0:1], alphas]) + betas = 1 - alphas + + return betas + + +class DDPMParallelScheduler(SchedulerMixin, ConfigMixin): + """ + Denoising diffusion probabilistic models (DDPMs) explores the connections between denoising score matching and + Langevin dynamics sampling. + + [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__` + function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`. + [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and + [`~SchedulerMixin.from_pretrained`] functions. + + For more details, see the original paper: https://arxiv.org/abs/2006.11239 + + Args: + num_train_timesteps (`int`): number of diffusion steps used to train the model. + beta_start (`float`): the starting `beta` value of inference. + beta_end (`float`): the final `beta` value. + beta_schedule (`str`): + the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from + `linear`, `scaled_linear`, `squaredcos_cap_v2` or `sigmoid`. + trained_betas (`np.ndarray`, optional): + option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc. + variance_type (`str`): + options to clip the variance used when adding noise to the denoised sample. Choose from `fixed_small`, + `fixed_small_log`, `fixed_large`, `fixed_large_log`, `learned` or `learned_range`. + clip_sample (`bool`, default `True`): + option to clip predicted sample for numerical stability. + clip_sample_range (`float`, default `1.0`): + the maximum magnitude for sample clipping. Valid only when `clip_sample=True`. + prediction_type (`str`, default `epsilon`, optional): + prediction type of the scheduler function, one of `epsilon` (predicting the noise of the diffusion + process), `sample` (directly predicting the noisy sample`) or `v_prediction` (see section 2.4 + https://imagen.research.google/video/paper.pdf) + thresholding (`bool`, default `False`): + whether to use the "dynamic thresholding" method (introduced by Imagen, https://arxiv.org/abs/2205.11487). + Note that the thresholding method is unsuitable for latent-space diffusion models (such as + stable-diffusion). + dynamic_thresholding_ratio (`float`, default `0.995`): + the ratio for the dynamic thresholding method. Default is `0.995`, the same as Imagen + (https://arxiv.org/abs/2205.11487). Valid only when `thresholding=True`. + sample_max_value (`float`, default `1.0`): + the threshold value for dynamic thresholding. Valid only when `thresholding=True`. + timestep_spacing (`str`, default `"leading"`): + The way the timesteps should be scaled. Refer to Table 2. of [Common Diffusion Noise Schedules and Sample + Steps are Flawed](https://arxiv.org/abs/2305.08891) for more information. + steps_offset (`int`, default `0`): + An offset added to the inference steps, as required by some model families. + rescale_betas_zero_snr (`bool`, defaults to `False`): + Whether to rescale the betas to have zero terminal SNR. This enables the model to generate very bright and + dark samples instead of limiting it to samples with medium brightness. Loosely related to + [`--offset_noise`](https://github.com/huggingface/diffusers/blob/74fd735eb073eb1d774b1ab4154a0876eb82f055/examples/dreambooth/train_dreambooth.py#L506). + """ + + _compatibles = [e.name for e in KarrasDiffusionSchedulers] + order = 1 + _is_ode_scheduler = False + + @register_to_config + # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.__init__ + def __init__( + self, + num_train_timesteps: int = 1000, + beta_start: float = 0.0001, + beta_end: float = 0.02, + beta_schedule: str = "linear", + trained_betas: Optional[Union[np.ndarray, List[float]]] = None, + variance_type: str = "fixed_small", + clip_sample: bool = True, + prediction_type: str = "epsilon", + thresholding: bool = False, + dynamic_thresholding_ratio: float = 0.995, + clip_sample_range: float = 1.0, + sample_max_value: float = 1.0, + timestep_spacing: str = "leading", + steps_offset: int = 0, + rescale_betas_zero_snr: bool = False, + ): + if trained_betas is not None: + self.betas = torch.tensor(trained_betas, dtype=torch.float32) + elif beta_schedule == "linear": + self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) + elif beta_schedule == "scaled_linear": + # this schedule is very specific to the latent diffusion model. + self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2 + elif beta_schedule == "squaredcos_cap_v2": + # Glide cosine schedule + self.betas = betas_for_alpha_bar(num_train_timesteps) + elif beta_schedule == "sigmoid": + # GeoDiff sigmoid schedule + betas = torch.linspace(-6, 6, num_train_timesteps) + self.betas = torch.sigmoid(betas) * (beta_end - beta_start) + beta_start + else: + raise NotImplementedError(f"{beta_schedule} is not implemented for {self.__class__}") + + # Rescale for zero SNR + if rescale_betas_zero_snr: + self.betas = rescale_zero_terminal_snr(self.betas) + + self.alphas = 1.0 - self.betas + self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) + self.one = torch.tensor(1.0) + + # standard deviation of the initial noise distribution + self.init_noise_sigma = 1.0 + + # setable values + self.custom_timesteps = False + self.num_inference_steps = None + self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy()) + + self.variance_type = variance_type + + # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.scale_model_input + def scale_model_input(self, sample: torch.Tensor, timestep: Optional[int] = None) -> torch.Tensor: + """ + Ensures interchangeability with schedulers that need to scale the denoising model input depending on the + current timestep. + + Args: + sample (`torch.Tensor`): + The input sample. + timestep (`int`, *optional*): + The current timestep in the diffusion chain. + + Returns: + `torch.Tensor`: + A scaled input sample. + """ + return sample + + # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.set_timesteps + def set_timesteps( + self, + num_inference_steps: Optional[int] = None, + device: Union[str, torch.device] = None, + timesteps: Optional[List[int]] = None, + ): + """ + Sets the discrete timesteps used for the diffusion chain (to be run before inference). + + Args: + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, + `timesteps` must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default + timestep spacing strategy of equal spacing between timesteps is used. If `timesteps` is passed, + `num_inference_steps` must be `None`. + + """ + if num_inference_steps is not None and timesteps is not None: + raise ValueError("Can only pass one of `num_inference_steps` or `custom_timesteps`.") + + if timesteps is not None: + for i in range(1, len(timesteps)): + if timesteps[i] >= timesteps[i - 1]: + raise ValueError("`custom_timesteps` must be in descending order.") + + if timesteps[0] >= self.config.num_train_timesteps: + raise ValueError( + f"`timesteps` must start before `self.config.train_timesteps`:" + f" {self.config.num_train_timesteps}." + ) + + timesteps = np.array(timesteps, dtype=np.int64) + self.custom_timesteps = True + else: + if num_inference_steps > self.config.num_train_timesteps: + raise ValueError( + f"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:" + f" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle" + f" maximal {self.config.num_train_timesteps} timesteps." + ) + + self.num_inference_steps = num_inference_steps + self.custom_timesteps = False + + # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 + if self.config.timestep_spacing == "linspace": + timesteps = ( + np.linspace(0, self.config.num_train_timesteps - 1, num_inference_steps) + .round()[::-1] + .copy() + .astype(np.int64) + ) + elif self.config.timestep_spacing == "leading": + step_ratio = self.config.num_train_timesteps // self.num_inference_steps + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + timesteps = (np.arange(0, num_inference_steps) * step_ratio).round()[::-1].copy().astype(np.int64) + timesteps += self.config.steps_offset + elif self.config.timestep_spacing == "trailing": + step_ratio = self.config.num_train_timesteps / self.num_inference_steps + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + timesteps = np.round(np.arange(self.config.num_train_timesteps, 0, -step_ratio)).astype(np.int64) + timesteps -= 1 + else: + raise ValueError( + f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." + ) + + self.timesteps = torch.from_numpy(timesteps).to(device) + + # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._get_variance + def _get_variance(self, t, predicted_variance=None, variance_type=None): + prev_t = self.previous_timestep(t) + + alpha_prod_t = self.alphas_cumprod[t] + alpha_prod_t_prev = self.alphas_cumprod[prev_t] if prev_t >= 0 else self.one + current_beta_t = 1 - alpha_prod_t / alpha_prod_t_prev + + # For t > 0, compute predicted variance ฮฒt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) + # and sample from it to get previous sample + # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample + variance = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * current_beta_t + + # we always take the log of variance, so clamp it to ensure it's not 0 + variance = torch.clamp(variance, min=1e-20) + + if variance_type is None: + variance_type = self.config.variance_type + + # hacks - were probably added for training stability + if variance_type == "fixed_small": + variance = variance + # for rl-diffuser https://arxiv.org/abs/2205.09991 + elif variance_type == "fixed_small_log": + variance = torch.log(variance) + variance = torch.exp(0.5 * variance) + elif variance_type == "fixed_large": + variance = current_beta_t + elif variance_type == "fixed_large_log": + # Glide max_log + variance = torch.log(current_beta_t) + elif variance_type == "learned": + return predicted_variance + elif variance_type == "learned_range": + min_log = torch.log(variance) + max_log = torch.log(current_beta_t) + frac = (predicted_variance + 1) / 2 + variance = frac * max_log + (1 - frac) * min_log + + return variance + + # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample + def _threshold_sample(self, sample: torch.Tensor) -> torch.Tensor: + """ + "Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the + prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by + s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing + pixels from saturation at each step. We find that dynamic thresholding results in significantly better + photorealism as well as better image-text alignment, especially when using very large guidance weights." + + https://arxiv.org/abs/2205.11487 + """ + dtype = sample.dtype + batch_size, channels, *remaining_dims = sample.shape + + if dtype not in (torch.float32, torch.float64): + sample = sample.float() # upcast for quantile calculation, and clamp not implemented for cpu half + + # Flatten sample for doing quantile calculation along each image + sample = sample.reshape(batch_size, channels * np.prod(remaining_dims)) + + abs_sample = sample.abs() # "a certain percentile absolute pixel value" + + s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1) + s = torch.clamp( + s, min=1, max=self.config.sample_max_value + ) # When clamped to min=1, equivalent to standard clipping to [-1, 1] + s = s.unsqueeze(1) # (batch_size, 1) because clamp will broadcast along dim=0 + sample = torch.clamp(sample, -s, s) / s # "we threshold xt0 to the range [-s, s] and then divide by s" + + sample = sample.reshape(batch_size, channels, *remaining_dims) + sample = sample.to(dtype) + + return sample + + def step( + self, + model_output: torch.Tensor, + timestep: int, + sample: torch.Tensor, + generator=None, + return_dict: bool = True, + ) -> Union[DDPMParallelSchedulerOutput, Tuple]: + """ + Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion + process from the learned model outputs (most often the predicted noise). + + Args: + model_output (`torch.Tensor`): direct output from learned diffusion model. + timestep (`int`): current discrete timestep in the diffusion chain. + sample (`torch.Tensor`): + current instance of sample being created by diffusion process. + generator: random number generator. + return_dict (`bool`): option for returning tuple rather than DDPMParallelSchedulerOutput class + + Returns: + [`~schedulers.scheduling_utils.DDPMParallelSchedulerOutput`] or `tuple`: + [`~schedulers.scheduling_utils.DDPMParallelSchedulerOutput`] if `return_dict` is True, otherwise a `tuple`. + When returning a tuple, the first element is the sample tensor. + + """ + t = timestep + + prev_t = self.previous_timestep(t) + + if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]: + model_output, predicted_variance = torch.split(model_output, sample.shape[1], dim=1) + else: + predicted_variance = None + + # 1. compute alphas, betas + alpha_prod_t = self.alphas_cumprod[t] + alpha_prod_t_prev = self.alphas_cumprod[prev_t] if prev_t >= 0 else self.one + beta_prod_t = 1 - alpha_prod_t + beta_prod_t_prev = 1 - alpha_prod_t_prev + current_alpha_t = alpha_prod_t / alpha_prod_t_prev + current_beta_t = 1 - current_alpha_t + + # 2. compute predicted original sample from predicted noise also called + # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf + if self.config.prediction_type == "epsilon": + pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5) + elif self.config.prediction_type == "sample": + pred_original_sample = model_output + elif self.config.prediction_type == "v_prediction": + pred_original_sample = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output + else: + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` or" + " `v_prediction` for the DDPMScheduler." + ) + + # 3. Clip or threshold "predicted x_0" + if self.config.thresholding: + pred_original_sample = self._threshold_sample(pred_original_sample) + elif self.config.clip_sample: + pred_original_sample = pred_original_sample.clamp( + -self.config.clip_sample_range, self.config.clip_sample_range + ) + + # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t + # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf + pred_original_sample_coeff = (alpha_prod_t_prev ** (0.5) * current_beta_t) / beta_prod_t + current_sample_coeff = current_alpha_t ** (0.5) * beta_prod_t_prev / beta_prod_t + + # 5. Compute predicted previous sample ยต_t + # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf + pred_prev_sample = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample + + # 6. Add noise + variance = 0 + if t > 0: + device = model_output.device + variance_noise = randn_tensor( + model_output.shape, generator=generator, device=device, dtype=model_output.dtype + ) + if self.variance_type == "fixed_small_log": + variance = self._get_variance(t, predicted_variance=predicted_variance) * variance_noise + elif self.variance_type == "learned_range": + variance = self._get_variance(t, predicted_variance=predicted_variance) + variance = torch.exp(0.5 * variance) * variance_noise + else: + variance = (self._get_variance(t, predicted_variance=predicted_variance) ** 0.5) * variance_noise + + pred_prev_sample = pred_prev_sample + variance + + if not return_dict: + return (pred_prev_sample,) + + return DDPMParallelSchedulerOutput(prev_sample=pred_prev_sample, pred_original_sample=pred_original_sample) + + def batch_step_no_noise( + self, + model_output: torch.Tensor, + timesteps: List[int], + sample: torch.Tensor, + ) -> torch.Tensor: + """ + Batched version of the `step` function, to be able to reverse the SDE for multiple samples/timesteps at once. + Also, does not add any noise to the predicted sample, which is necessary for parallel sampling where the noise + is pre-sampled by the pipeline. + + Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion + process from the learned model outputs (most often the predicted noise). + + Args: + model_output (`torch.Tensor`): direct output from learned diffusion model. + timesteps (`List[int]`): + current discrete timesteps in the diffusion chain. This is now a list of integers. + sample (`torch.Tensor`): + current instance of sample being created by diffusion process. + + Returns: + `torch.Tensor`: sample tensor at previous timestep. + """ + t = timesteps + num_inference_steps = self.num_inference_steps if self.num_inference_steps else self.config.num_train_timesteps + prev_t = t - self.config.num_train_timesteps // num_inference_steps + + t = t.view(-1, *([1] * (model_output.ndim - 1))) + prev_t = prev_t.view(-1, *([1] * (model_output.ndim - 1))) + + if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]: + model_output, predicted_variance = torch.split(model_output, sample.shape[1], dim=1) + else: + pass + + # 1. compute alphas, betas + self.alphas_cumprod = self.alphas_cumprod.to(model_output.device) + alpha_prod_t = self.alphas_cumprod[t] + alpha_prod_t_prev = self.alphas_cumprod[torch.clip(prev_t, min=0)] + alpha_prod_t_prev[prev_t < 0] = torch.tensor(1.0) + + beta_prod_t = 1 - alpha_prod_t + beta_prod_t_prev = 1 - alpha_prod_t_prev + current_alpha_t = alpha_prod_t / alpha_prod_t_prev + current_beta_t = 1 - current_alpha_t + + # 2. compute predicted original sample from predicted noise also called + # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf + if self.config.prediction_type == "epsilon": + pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5) + elif self.config.prediction_type == "sample": + pred_original_sample = model_output + elif self.config.prediction_type == "v_prediction": + pred_original_sample = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output + else: + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` or" + " `v_prediction` for the DDPMParallelScheduler." + ) + + # 3. Clip or threshold "predicted x_0" + if self.config.thresholding: + pred_original_sample = self._threshold_sample(pred_original_sample) + elif self.config.clip_sample: + pred_original_sample = pred_original_sample.clamp( + -self.config.clip_sample_range, self.config.clip_sample_range + ) + + # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t + # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf + pred_original_sample_coeff = (alpha_prod_t_prev ** (0.5) * current_beta_t) / beta_prod_t + current_sample_coeff = current_alpha_t ** (0.5) * beta_prod_t_prev / beta_prod_t + + # 5. Compute predicted previous sample ยต_t + # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf + pred_prev_sample = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample + + return pred_prev_sample + + # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.add_noise + def add_noise( + self, + original_samples: torch.Tensor, + noise: torch.Tensor, + timesteps: torch.IntTensor, + ) -> torch.Tensor: + # Make sure alphas_cumprod and timestep have same device and dtype as original_samples + # Move the self.alphas_cumprod to device to avoid redundant CPU to GPU data movement + # for the subsequent add_noise calls + self.alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device) + alphas_cumprod = self.alphas_cumprod.to(dtype=original_samples.dtype) + timesteps = timesteps.to(original_samples.device) + + sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5 + sqrt_alpha_prod = sqrt_alpha_prod.flatten() + while len(sqrt_alpha_prod.shape) < len(original_samples.shape): + sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) + + sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5 + sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() + while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape): + sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) + + noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise + return noisy_samples + + # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.get_velocity + def get_velocity(self, sample: torch.Tensor, noise: torch.Tensor, timesteps: torch.IntTensor) -> torch.Tensor: + # Make sure alphas_cumprod and timestep have same device and dtype as sample + self.alphas_cumprod = self.alphas_cumprod.to(device=sample.device) + alphas_cumprod = self.alphas_cumprod.to(dtype=sample.dtype) + timesteps = timesteps.to(sample.device) + + sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5 + sqrt_alpha_prod = sqrt_alpha_prod.flatten() + while len(sqrt_alpha_prod.shape) < len(sample.shape): + sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) + + sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5 + sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() + while len(sqrt_one_minus_alpha_prod.shape) < len(sample.shape): + sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) + + velocity = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample + return velocity + + def __len__(self): + return self.config.num_train_timesteps + + # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.previous_timestep + def previous_timestep(self, timestep): + if self.custom_timesteps: + index = (self.timesteps == timestep).nonzero(as_tuple=True)[0][0] + if index == self.timesteps.shape[0] - 1: + prev_t = torch.tensor(-1) + else: + prev_t = self.timesteps[index + 1] + else: + num_inference_steps = ( + self.num_inference_steps if self.num_inference_steps else self.config.num_train_timesteps + ) + prev_t = timestep - self.config.num_train_timesteps // num_inference_steps + + return prev_t diff --git a/diffusers3/schedulers/scheduling_ddpm_wuerstchen.py b/diffusers3/schedulers/scheduling_ddpm_wuerstchen.py new file mode 100644 index 0000000000000000000000000000000000000000..71b5669b052864647998959d2dc48453dfdcbb73 --- /dev/null +++ b/diffusers3/schedulers/scheduling_ddpm_wuerstchen.py @@ -0,0 +1,230 @@ +# Copyright (c) 2022 Pablo Pernรญas MIT License +# Copyright 2024 UC Berkeley Team and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim + +import math +from dataclasses import dataclass +from typing import List, Optional, Tuple, Union + +import torch + +from ..configuration_utils import ConfigMixin, register_to_config +from ..utils import BaseOutput +from ..utils.torch_utils import randn_tensor +from .scheduling_utils import SchedulerMixin + + +@dataclass +class DDPMWuerstchenSchedulerOutput(BaseOutput): + """ + Output class for the scheduler's step function output. + + Args: + prev_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images): + Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the + denoising loop. + """ + + prev_sample: torch.Tensor + + +def betas_for_alpha_bar( + num_diffusion_timesteps, + max_beta=0.999, + alpha_transform_type="cosine", +): + """ + Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of + (1-beta) over time from t = [0,1]. + + Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up + to that part of the diffusion process. + + + Args: + num_diffusion_timesteps (`int`): the number of betas to produce. + max_beta (`float`): the maximum beta to use; use values lower than 1 to + prevent singularities. + alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. + Choose from `cosine` or `exp` + + Returns: + betas (`np.ndarray`): the betas used by the scheduler to step the model outputs + """ + if alpha_transform_type == "cosine": + + def alpha_bar_fn(t): + return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 + + elif alpha_transform_type == "exp": + + def alpha_bar_fn(t): + return math.exp(t * -12.0) + + else: + raise ValueError(f"Unsupported alpha_transform_type: {alpha_transform_type}") + + betas = [] + for i in range(num_diffusion_timesteps): + t1 = i / num_diffusion_timesteps + t2 = (i + 1) / num_diffusion_timesteps + betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) + return torch.tensor(betas, dtype=torch.float32) + + +class DDPMWuerstchenScheduler(SchedulerMixin, ConfigMixin): + """ + Denoising diffusion probabilistic models (DDPMs) explores the connections between denoising score matching and + Langevin dynamics sampling. + + [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__` + function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`. + [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and + [`~SchedulerMixin.from_pretrained`] functions. + + For more details, see the original paper: https://arxiv.org/abs/2006.11239 + + Args: + scaler (`float`): .... + s (`float`): .... + """ + + @register_to_config + def __init__( + self, + scaler: float = 1.0, + s: float = 0.008, + ): + self.scaler = scaler + self.s = torch.tensor([s]) + self._init_alpha_cumprod = torch.cos(self.s / (1 + self.s) * torch.pi * 0.5) ** 2 + + # standard deviation of the initial noise distribution + self.init_noise_sigma = 1.0 + + def _alpha_cumprod(self, t, device): + if self.scaler > 1: + t = 1 - (1 - t) ** self.scaler + elif self.scaler < 1: + t = t**self.scaler + alpha_cumprod = torch.cos( + (t + self.s.to(device)) / (1 + self.s.to(device)) * torch.pi * 0.5 + ) ** 2 / self._init_alpha_cumprod.to(device) + return alpha_cumprod.clamp(0.0001, 0.9999) + + def scale_model_input(self, sample: torch.Tensor, timestep: Optional[int] = None) -> torch.Tensor: + """ + Ensures interchangeability with schedulers that need to scale the denoising model input depending on the + current timestep. + + Args: + sample (`torch.Tensor`): input sample + timestep (`int`, optional): current timestep + + Returns: + `torch.Tensor`: scaled input sample + """ + return sample + + def set_timesteps( + self, + num_inference_steps: int = None, + timesteps: Optional[List[int]] = None, + device: Union[str, torch.device] = None, + ): + """ + Sets the discrete timesteps used for the diffusion chain. Supporting function to be run before inference. + + Args: + num_inference_steps (`Dict[float, int]`): + the number of diffusion steps used when generating samples with a pre-trained model. If passed, then + `timesteps` must be `None`. + device (`str` or `torch.device`, optional): + the device to which the timesteps are moved to. {2 / 3: 20, 0.0: 10} + """ + if timesteps is None: + timesteps = torch.linspace(1.0, 0.0, num_inference_steps + 1, device=device) + if not isinstance(timesteps, torch.Tensor): + timesteps = torch.Tensor(timesteps).to(device) + self.timesteps = timesteps + + def step( + self, + model_output: torch.Tensor, + timestep: int, + sample: torch.Tensor, + generator=None, + return_dict: bool = True, + ) -> Union[DDPMWuerstchenSchedulerOutput, Tuple]: + """ + Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion + process from the learned model outputs (most often the predicted noise). + + Args: + model_output (`torch.Tensor`): direct output from learned diffusion model. + timestep (`int`): current discrete timestep in the diffusion chain. + sample (`torch.Tensor`): + current instance of sample being created by diffusion process. + generator: random number generator. + return_dict (`bool`): option for returning tuple rather than DDPMWuerstchenSchedulerOutput class + + Returns: + [`DDPMWuerstchenSchedulerOutput`] or `tuple`: [`DDPMWuerstchenSchedulerOutput`] if `return_dict` is True, + otherwise a `tuple`. When returning a tuple, the first element is the sample tensor. + + """ + dtype = model_output.dtype + device = model_output.device + t = timestep + + prev_t = self.previous_timestep(t) + + alpha_cumprod = self._alpha_cumprod(t, device).view(t.size(0), *[1 for _ in sample.shape[1:]]) + alpha_cumprod_prev = self._alpha_cumprod(prev_t, device).view(prev_t.size(0), *[1 for _ in sample.shape[1:]]) + alpha = alpha_cumprod / alpha_cumprod_prev + + mu = (1.0 / alpha).sqrt() * (sample - (1 - alpha) * model_output / (1 - alpha_cumprod).sqrt()) + + std_noise = randn_tensor(mu.shape, generator=generator, device=model_output.device, dtype=model_output.dtype) + std = ((1 - alpha) * (1.0 - alpha_cumprod_prev) / (1.0 - alpha_cumprod)).sqrt() * std_noise + pred = mu + std * (prev_t != 0).float().view(prev_t.size(0), *[1 for _ in sample.shape[1:]]) + + if not return_dict: + return (pred.to(dtype),) + + return DDPMWuerstchenSchedulerOutput(prev_sample=pred.to(dtype)) + + def add_noise( + self, + original_samples: torch.Tensor, + noise: torch.Tensor, + timesteps: torch.Tensor, + ) -> torch.Tensor: + device = original_samples.device + dtype = original_samples.dtype + alpha_cumprod = self._alpha_cumprod(timesteps, device=device).view( + timesteps.size(0), *[1 for _ in original_samples.shape[1:]] + ) + noisy_samples = alpha_cumprod.sqrt() * original_samples + (1 - alpha_cumprod).sqrt() * noise + return noisy_samples.to(dtype=dtype) + + def __len__(self): + return self.config.num_train_timesteps + + def previous_timestep(self, timestep): + index = (self.timesteps - timestep[0]).abs().argmin().item() + prev_t = self.timesteps[index + 1][None].expand(timestep.shape[0]) + return prev_t diff --git a/diffusers3/schedulers/scheduling_deis_multistep.py b/diffusers3/schedulers/scheduling_deis_multistep.py new file mode 100644 index 0000000000000000000000000000000000000000..11073ce491d32c81f323399b061e524aaf7eb693 --- /dev/null +++ b/diffusers3/schedulers/scheduling_deis_multistep.py @@ -0,0 +1,790 @@ +# Copyright 2024 FLAIR Lab and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# DISCLAIMER: check https://arxiv.org/abs/2204.13902 and https://github.com/qsh-zh/deis for more info +# The codebase is modified based on https://github.com/huggingface/diffusers/blob/main/src/diffusers/schedulers/scheduling_dpmsolver_multistep.py + +import math +from typing import List, Optional, Tuple, Union + +import numpy as np +import torch + +from ..configuration_utils import ConfigMixin, register_to_config +from ..utils import deprecate +from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput + + +# Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar +def betas_for_alpha_bar( + num_diffusion_timesteps, + max_beta=0.999, + alpha_transform_type="cosine", +): + """ + Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of + (1-beta) over time from t = [0,1]. + + Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up + to that part of the diffusion process. + + + Args: + num_diffusion_timesteps (`int`): the number of betas to produce. + max_beta (`float`): the maximum beta to use; use values lower than 1 to + prevent singularities. + alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. + Choose from `cosine` or `exp` + + Returns: + betas (`np.ndarray`): the betas used by the scheduler to step the model outputs + """ + if alpha_transform_type == "cosine": + + def alpha_bar_fn(t): + return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 + + elif alpha_transform_type == "exp": + + def alpha_bar_fn(t): + return math.exp(t * -12.0) + + else: + raise ValueError(f"Unsupported alpha_transform_type: {alpha_transform_type}") + + betas = [] + for i in range(num_diffusion_timesteps): + t1 = i / num_diffusion_timesteps + t2 = (i + 1) / num_diffusion_timesteps + betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) + return torch.tensor(betas, dtype=torch.float32) + + +class DEISMultistepScheduler(SchedulerMixin, ConfigMixin): + """ + `DEISMultistepScheduler` is a fast high order solver for diffusion ordinary differential equations (ODEs). + + This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic + methods the library implements for all schedulers such as loading and saving. + + Args: + num_train_timesteps (`int`, defaults to 1000): + The number of diffusion steps to train the model. + beta_start (`float`, defaults to 0.0001): + The starting `beta` value of inference. + beta_end (`float`, defaults to 0.02): + The final `beta` value. + beta_schedule (`str`, defaults to `"linear"`): + The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from + `linear`, `scaled_linear`, or `squaredcos_cap_v2`. + trained_betas (`np.ndarray`, *optional*): + Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`. + solver_order (`int`, defaults to 2): + The DEIS order which can be `1` or `2` or `3`. It is recommended to use `solver_order=2` for guided + sampling, and `solver_order=3` for unconditional sampling. + prediction_type (`str`, defaults to `epsilon`): + Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process), + `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen + Video](https://imagen.research.google/video/paper.pdf) paper). + thresholding (`bool`, defaults to `False`): + Whether to use the "dynamic thresholding" method. This is unsuitable for latent-space diffusion models such + as Stable Diffusion. + dynamic_thresholding_ratio (`float`, defaults to 0.995): + The ratio for the dynamic thresholding method. Valid only when `thresholding=True`. + sample_max_value (`float`, defaults to 1.0): + The threshold value for dynamic thresholding. Valid only when `thresholding=True`. + algorithm_type (`str`, defaults to `deis`): + The algorithm type for the solver. + lower_order_final (`bool`, defaults to `True`): + Whether to use lower-order solvers in the final steps. Only valid for < 15 inference steps. + use_karras_sigmas (`bool`, *optional*, defaults to `False`): + Whether to use Karras sigmas for step sizes in the noise schedule during the sampling process. If `True`, + the sigmas are determined according to a sequence of noise levels {ฯƒi}. + timestep_spacing (`str`, defaults to `"linspace"`): + The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information. + steps_offset (`int`, defaults to 0): + An offset added to the inference steps, as required by some model families. + """ + + _compatibles = [e.name for e in KarrasDiffusionSchedulers] + order = 1 + + @register_to_config + def __init__( + self, + num_train_timesteps: int = 1000, + beta_start: float = 0.0001, + beta_end: float = 0.02, + beta_schedule: str = "linear", + trained_betas: Optional[np.ndarray] = None, + solver_order: int = 2, + prediction_type: str = "epsilon", + thresholding: bool = False, + dynamic_thresholding_ratio: float = 0.995, + sample_max_value: float = 1.0, + algorithm_type: str = "deis", + solver_type: str = "logrho", + lower_order_final: bool = True, + use_karras_sigmas: Optional[bool] = False, + timestep_spacing: str = "linspace", + steps_offset: int = 0, + ): + if trained_betas is not None: + self.betas = torch.tensor(trained_betas, dtype=torch.float32) + elif beta_schedule == "linear": + self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) + elif beta_schedule == "scaled_linear": + # this schedule is very specific to the latent diffusion model. + self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2 + elif beta_schedule == "squaredcos_cap_v2": + # Glide cosine schedule + self.betas = betas_for_alpha_bar(num_train_timesteps) + else: + raise NotImplementedError(f"{beta_schedule} is not implemented for {self.__class__}") + + self.alphas = 1.0 - self.betas + self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) + # Currently we only support VP-type noise schedule + self.alpha_t = torch.sqrt(self.alphas_cumprod) + self.sigma_t = torch.sqrt(1 - self.alphas_cumprod) + self.lambda_t = torch.log(self.alpha_t) - torch.log(self.sigma_t) + self.sigmas = ((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 + + # standard deviation of the initial noise distribution + self.init_noise_sigma = 1.0 + + # settings for DEIS + if algorithm_type not in ["deis"]: + if algorithm_type in ["dpmsolver", "dpmsolver++"]: + self.register_to_config(algorithm_type="deis") + else: + raise NotImplementedError(f"{algorithm_type} is not implemented for {self.__class__}") + + if solver_type not in ["logrho"]: + if solver_type in ["midpoint", "heun", "bh1", "bh2"]: + self.register_to_config(solver_type="logrho") + else: + raise NotImplementedError(f"solver type {solver_type} is not implemented for {self.__class__}") + + # setable values + self.num_inference_steps = None + timesteps = np.linspace(0, num_train_timesteps - 1, num_train_timesteps, dtype=np.float32)[::-1].copy() + self.timesteps = torch.from_numpy(timesteps) + self.model_outputs = [None] * solver_order + self.lower_order_nums = 0 + self._step_index = None + self._begin_index = None + self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication + + @property + def step_index(self): + """ + The index counter for current timestep. It will increase 1 after each scheduler step. + """ + return self._step_index + + @property + def begin_index(self): + """ + The index for the first timestep. It should be set from pipeline with `set_begin_index` method. + """ + return self._begin_index + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.set_begin_index + def set_begin_index(self, begin_index: int = 0): + """ + Sets the begin index for the scheduler. This function should be run from pipeline before the inference. + + Args: + begin_index (`int`): + The begin index for the scheduler. + """ + self._begin_index = begin_index + + def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None): + """ + Sets the discrete timesteps used for the diffusion chain (to be run before inference). + + Args: + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + """ + # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 + if self.config.timestep_spacing == "linspace": + timesteps = ( + np.linspace(0, self.config.num_train_timesteps - 1, num_inference_steps + 1) + .round()[::-1][:-1] + .copy() + .astype(np.int64) + ) + elif self.config.timestep_spacing == "leading": + step_ratio = self.config.num_train_timesteps // (num_inference_steps + 1) + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + timesteps = (np.arange(0, num_inference_steps + 1) * step_ratio).round()[::-1][:-1].copy().astype(np.int64) + timesteps += self.config.steps_offset + elif self.config.timestep_spacing == "trailing": + step_ratio = self.config.num_train_timesteps / num_inference_steps + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + timesteps = np.arange(self.config.num_train_timesteps, 0, -step_ratio).round().copy().astype(np.int64) + timesteps -= 1 + else: + raise ValueError( + f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." + ) + + sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5) + if self.config.use_karras_sigmas: + log_sigmas = np.log(sigmas) + sigmas = np.flip(sigmas).copy() + sigmas = self._convert_to_karras(in_sigmas=sigmas, num_inference_steps=num_inference_steps) + timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas]).round() + sigmas = np.concatenate([sigmas, sigmas[-1:]]).astype(np.float32) + else: + sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas) + sigma_last = ((1 - self.alphas_cumprod[0]) / self.alphas_cumprod[0]) ** 0.5 + sigmas = np.concatenate([sigmas, [sigma_last]]).astype(np.float32) + + self.sigmas = torch.from_numpy(sigmas) + self.timesteps = torch.from_numpy(timesteps).to(device=device, dtype=torch.int64) + + self.num_inference_steps = len(timesteps) + + self.model_outputs = [ + None, + ] * self.config.solver_order + self.lower_order_nums = 0 + + # add an index counter for schedulers that allow duplicated timesteps + self._step_index = None + self._begin_index = None + self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication + + # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample + def _threshold_sample(self, sample: torch.Tensor) -> torch.Tensor: + """ + "Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the + prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by + s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing + pixels from saturation at each step. We find that dynamic thresholding results in significantly better + photorealism as well as better image-text alignment, especially when using very large guidance weights." + + https://arxiv.org/abs/2205.11487 + """ + dtype = sample.dtype + batch_size, channels, *remaining_dims = sample.shape + + if dtype not in (torch.float32, torch.float64): + sample = sample.float() # upcast for quantile calculation, and clamp not implemented for cpu half + + # Flatten sample for doing quantile calculation along each image + sample = sample.reshape(batch_size, channels * np.prod(remaining_dims)) + + abs_sample = sample.abs() # "a certain percentile absolute pixel value" + + s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1) + s = torch.clamp( + s, min=1, max=self.config.sample_max_value + ) # When clamped to min=1, equivalent to standard clipping to [-1, 1] + s = s.unsqueeze(1) # (batch_size, 1) because clamp will broadcast along dim=0 + sample = torch.clamp(sample, -s, s) / s # "we threshold xt0 to the range [-s, s] and then divide by s" + + sample = sample.reshape(batch_size, channels, *remaining_dims) + sample = sample.to(dtype) + + return sample + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._sigma_to_t + def _sigma_to_t(self, sigma, log_sigmas): + # get log sigma + log_sigma = np.log(np.maximum(sigma, 1e-10)) + + # get distribution + dists = log_sigma - log_sigmas[:, np.newaxis] + + # get sigmas range + low_idx = np.cumsum((dists >= 0), axis=0).argmax(axis=0).clip(max=log_sigmas.shape[0] - 2) + high_idx = low_idx + 1 + + low = log_sigmas[low_idx] + high = log_sigmas[high_idx] + + # interpolate sigmas + w = (low - log_sigma) / (low - high) + w = np.clip(w, 0, 1) + + # transform interpolation to time range + t = (1 - w) * low_idx + w * high_idx + t = t.reshape(sigma.shape) + return t + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler._sigma_to_alpha_sigma_t + def _sigma_to_alpha_sigma_t(self, sigma): + alpha_t = 1 / ((sigma**2 + 1) ** 0.5) + sigma_t = sigma * alpha_t + + return alpha_t, sigma_t + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_karras + def _convert_to_karras(self, in_sigmas: torch.Tensor, num_inference_steps) -> torch.Tensor: + """Constructs the noise schedule of Karras et al. (2022).""" + + # Hack to make sure that other schedulers which copy this function don't break + # TODO: Add this logic to the other schedulers + if hasattr(self.config, "sigma_min"): + sigma_min = self.config.sigma_min + else: + sigma_min = None + + if hasattr(self.config, "sigma_max"): + sigma_max = self.config.sigma_max + else: + sigma_max = None + + sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item() + sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item() + + rho = 7.0 # 7.0 is the value used in the paper + ramp = np.linspace(0, 1, num_inference_steps) + min_inv_rho = sigma_min ** (1 / rho) + max_inv_rho = sigma_max ** (1 / rho) + sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho + return sigmas + + def convert_model_output( + self, + model_output: torch.Tensor, + *args, + sample: torch.Tensor = None, + **kwargs, + ) -> torch.Tensor: + """ + Convert the model output to the corresponding type the DEIS algorithm needs. + + Args: + model_output (`torch.Tensor`): + The direct output from the learned diffusion model. + timestep (`int`): + The current discrete timestep in the diffusion chain. + sample (`torch.Tensor`): + A current instance of a sample created by the diffusion process. + + Returns: + `torch.Tensor`: + The converted model output. + """ + timestep = args[0] if len(args) > 0 else kwargs.pop("timestep", None) + if sample is None: + if len(args) > 1: + sample = args[1] + else: + raise ValueError("missing `sample` as a required keyward argument") + if timestep is not None: + deprecate( + "timesteps", + "1.0.0", + "Passing `timesteps` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + + sigma = self.sigmas[self.step_index] + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma) + if self.config.prediction_type == "epsilon": + x0_pred = (sample - sigma_t * model_output) / alpha_t + elif self.config.prediction_type == "sample": + x0_pred = model_output + elif self.config.prediction_type == "v_prediction": + x0_pred = alpha_t * sample - sigma_t * model_output + else: + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or" + " `v_prediction` for the DEISMultistepScheduler." + ) + + if self.config.thresholding: + x0_pred = self._threshold_sample(x0_pred) + + if self.config.algorithm_type == "deis": + return (sample - alpha_t * x0_pred) / sigma_t + else: + raise NotImplementedError("only support log-rho multistep deis now") + + def deis_first_order_update( + self, + model_output: torch.Tensor, + *args, + sample: torch.Tensor = None, + **kwargs, + ) -> torch.Tensor: + """ + One step for the first-order DEIS (equivalent to DDIM). + + Args: + model_output (`torch.Tensor`): + The direct output from the learned diffusion model. + timestep (`int`): + The current discrete timestep in the diffusion chain. + prev_timestep (`int`): + The previous discrete timestep in the diffusion chain. + sample (`torch.Tensor`): + A current instance of a sample created by the diffusion process. + + Returns: + `torch.Tensor`: + The sample tensor at the previous timestep. + """ + timestep = args[0] if len(args) > 0 else kwargs.pop("timestep", None) + prev_timestep = args[1] if len(args) > 1 else kwargs.pop("prev_timestep", None) + if sample is None: + if len(args) > 2: + sample = args[2] + else: + raise ValueError(" missing `sample` as a required keyward argument") + if timestep is not None: + deprecate( + "timesteps", + "1.0.0", + "Passing `timesteps` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + + if prev_timestep is not None: + deprecate( + "prev_timestep", + "1.0.0", + "Passing `prev_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + + sigma_t, sigma_s = self.sigmas[self.step_index + 1], self.sigmas[self.step_index] + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma_t) + alpha_s, sigma_s = self._sigma_to_alpha_sigma_t(sigma_s) + lambda_t = torch.log(alpha_t) - torch.log(sigma_t) + lambda_s = torch.log(alpha_s) - torch.log(sigma_s) + + h = lambda_t - lambda_s + if self.config.algorithm_type == "deis": + x_t = (alpha_t / alpha_s) * sample - (sigma_t * (torch.exp(h) - 1.0)) * model_output + else: + raise NotImplementedError("only support log-rho multistep deis now") + return x_t + + def multistep_deis_second_order_update( + self, + model_output_list: List[torch.Tensor], + *args, + sample: torch.Tensor = None, + **kwargs, + ) -> torch.Tensor: + """ + One step for the second-order multistep DEIS. + + Args: + model_output_list (`List[torch.Tensor]`): + The direct outputs from learned diffusion model at current and latter timesteps. + sample (`torch.Tensor`): + A current instance of a sample created by the diffusion process. + + Returns: + `torch.Tensor`: + The sample tensor at the previous timestep. + """ + timestep_list = args[0] if len(args) > 0 else kwargs.pop("timestep_list", None) + prev_timestep = args[1] if len(args) > 1 else kwargs.pop("prev_timestep", None) + if sample is None: + if len(args) > 2: + sample = args[2] + else: + raise ValueError(" missing `sample` as a required keyward argument") + if timestep_list is not None: + deprecate( + "timestep_list", + "1.0.0", + "Passing `timestep_list` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + + if prev_timestep is not None: + deprecate( + "prev_timestep", + "1.0.0", + "Passing `prev_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + + sigma_t, sigma_s0, sigma_s1 = ( + self.sigmas[self.step_index + 1], + self.sigmas[self.step_index], + self.sigmas[self.step_index - 1], + ) + + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma_t) + alpha_s0, sigma_s0 = self._sigma_to_alpha_sigma_t(sigma_s0) + alpha_s1, sigma_s1 = self._sigma_to_alpha_sigma_t(sigma_s1) + + m0, m1 = model_output_list[-1], model_output_list[-2] + + rho_t, rho_s0, rho_s1 = sigma_t / alpha_t, sigma_s0 / alpha_s0, sigma_s1 / alpha_s1 + + if self.config.algorithm_type == "deis": + + def ind_fn(t, b, c): + # Integrate[(log(t) - log(c)) / (log(b) - log(c)), {t}] + return t * (-np.log(c) + np.log(t) - 1) / (np.log(b) - np.log(c)) + + coef1 = ind_fn(rho_t, rho_s0, rho_s1) - ind_fn(rho_s0, rho_s0, rho_s1) + coef2 = ind_fn(rho_t, rho_s1, rho_s0) - ind_fn(rho_s0, rho_s1, rho_s0) + + x_t = alpha_t * (sample / alpha_s0 + coef1 * m0 + coef2 * m1) + return x_t + else: + raise NotImplementedError("only support log-rho multistep deis now") + + def multistep_deis_third_order_update( + self, + model_output_list: List[torch.Tensor], + *args, + sample: torch.Tensor = None, + **kwargs, + ) -> torch.Tensor: + """ + One step for the third-order multistep DEIS. + + Args: + model_output_list (`List[torch.Tensor]`): + The direct outputs from learned diffusion model at current and latter timesteps. + sample (`torch.Tensor`): + A current instance of a sample created by diffusion process. + + Returns: + `torch.Tensor`: + The sample tensor at the previous timestep. + """ + + timestep_list = args[0] if len(args) > 0 else kwargs.pop("timestep_list", None) + prev_timestep = args[1] if len(args) > 1 else kwargs.pop("prev_timestep", None) + if sample is None: + if len(args) > 2: + sample = args[2] + else: + raise ValueError(" missing`sample` as a required keyward argument") + if timestep_list is not None: + deprecate( + "timestep_list", + "1.0.0", + "Passing `timestep_list` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + + if prev_timestep is not None: + deprecate( + "prev_timestep", + "1.0.0", + "Passing `prev_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + + sigma_t, sigma_s0, sigma_s1, sigma_s2 = ( + self.sigmas[self.step_index + 1], + self.sigmas[self.step_index], + self.sigmas[self.step_index - 1], + self.sigmas[self.step_index - 2], + ) + + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma_t) + alpha_s0, sigma_s0 = self._sigma_to_alpha_sigma_t(sigma_s0) + alpha_s1, sigma_s1 = self._sigma_to_alpha_sigma_t(sigma_s1) + alpha_s2, sigma_s2 = self._sigma_to_alpha_sigma_t(sigma_s2) + + m0, m1, m2 = model_output_list[-1], model_output_list[-2], model_output_list[-3] + + rho_t, rho_s0, rho_s1, rho_s2 = ( + sigma_t / alpha_t, + sigma_s0 / alpha_s0, + sigma_s1 / alpha_s1, + sigma_s2 / alpha_s2, + ) + + if self.config.algorithm_type == "deis": + + def ind_fn(t, b, c, d): + # Integrate[(log(t) - log(c))(log(t) - log(d)) / (log(b) - log(c))(log(b) - log(d)), {t}] + numerator = t * ( + np.log(c) * (np.log(d) - np.log(t) + 1) + - np.log(d) * np.log(t) + + np.log(d) + + np.log(t) ** 2 + - 2 * np.log(t) + + 2 + ) + denominator = (np.log(b) - np.log(c)) * (np.log(b) - np.log(d)) + return numerator / denominator + + coef1 = ind_fn(rho_t, rho_s0, rho_s1, rho_s2) - ind_fn(rho_s0, rho_s0, rho_s1, rho_s2) + coef2 = ind_fn(rho_t, rho_s1, rho_s2, rho_s0) - ind_fn(rho_s0, rho_s1, rho_s2, rho_s0) + coef3 = ind_fn(rho_t, rho_s2, rho_s0, rho_s1) - ind_fn(rho_s0, rho_s2, rho_s0, rho_s1) + + x_t = alpha_t * (sample / alpha_s0 + coef1 * m0 + coef2 * m1 + coef3 * m2) + + return x_t + else: + raise NotImplementedError("only support log-rho multistep deis now") + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.index_for_timestep + def index_for_timestep(self, timestep, schedule_timesteps=None): + if schedule_timesteps is None: + schedule_timesteps = self.timesteps + + index_candidates = (schedule_timesteps == timestep).nonzero() + + if len(index_candidates) == 0: + step_index = len(self.timesteps) - 1 + # The sigma index that is taken for the **very** first `step` + # is always the second index (or the last index if there is only 1) + # This way we can ensure we don't accidentally skip a sigma in + # case we start in the middle of the denoising schedule (e.g. for image-to-image) + elif len(index_candidates) > 1: + step_index = index_candidates[1].item() + else: + step_index = index_candidates[0].item() + + return step_index + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler._init_step_index + def _init_step_index(self, timestep): + """ + Initialize the step_index counter for the scheduler. + """ + + if self.begin_index is None: + if isinstance(timestep, torch.Tensor): + timestep = timestep.to(self.timesteps.device) + self._step_index = self.index_for_timestep(timestep) + else: + self._step_index = self._begin_index + + def step( + self, + model_output: torch.Tensor, + timestep: Union[int, torch.Tensor], + sample: torch.Tensor, + return_dict: bool = True, + ) -> Union[SchedulerOutput, Tuple]: + """ + Predict the sample from the previous timestep by reversing the SDE. This function propagates the sample with + the multistep DEIS. + + Args: + model_output (`torch.Tensor`): + The direct output from learned diffusion model. + timestep (`int`): + The current discrete timestep in the diffusion chain. + sample (`torch.Tensor`): + A current instance of a sample created by the diffusion process. + return_dict (`bool`): + Whether or not to return a [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`. + + Returns: + [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`: + If return_dict is `True`, [`~schedulers.scheduling_utils.SchedulerOutput`] is returned, otherwise a + tuple is returned where the first element is the sample tensor. + + """ + if self.num_inference_steps is None: + raise ValueError( + "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" + ) + + if self.step_index is None: + self._init_step_index(timestep) + + lower_order_final = ( + (self.step_index == len(self.timesteps) - 1) and self.config.lower_order_final and len(self.timesteps) < 15 + ) + lower_order_second = ( + (self.step_index == len(self.timesteps) - 2) and self.config.lower_order_final and len(self.timesteps) < 15 + ) + + model_output = self.convert_model_output(model_output, sample=sample) + for i in range(self.config.solver_order - 1): + self.model_outputs[i] = self.model_outputs[i + 1] + self.model_outputs[-1] = model_output + + if self.config.solver_order == 1 or self.lower_order_nums < 1 or lower_order_final: + prev_sample = self.deis_first_order_update(model_output, sample=sample) + elif self.config.solver_order == 2 or self.lower_order_nums < 2 or lower_order_second: + prev_sample = self.multistep_deis_second_order_update(self.model_outputs, sample=sample) + else: + prev_sample = self.multistep_deis_third_order_update(self.model_outputs, sample=sample) + + if self.lower_order_nums < self.config.solver_order: + self.lower_order_nums += 1 + + # upon completion increase step index by one + self._step_index += 1 + + if not return_dict: + return (prev_sample,) + + return SchedulerOutput(prev_sample=prev_sample) + + def scale_model_input(self, sample: torch.Tensor, *args, **kwargs) -> torch.Tensor: + """ + Ensures interchangeability with schedulers that need to scale the denoising model input depending on the + current timestep. + + Args: + sample (`torch.Tensor`): + The input sample. + + Returns: + `torch.Tensor`: + A scaled input sample. + """ + return sample + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.add_noise + def add_noise( + self, + original_samples: torch.Tensor, + noise: torch.Tensor, + timesteps: torch.IntTensor, + ) -> torch.Tensor: + # Make sure sigmas and timesteps have the same device and dtype as original_samples + sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype) + if original_samples.device.type == "mps" and torch.is_floating_point(timesteps): + # mps does not support float64 + schedule_timesteps = self.timesteps.to(original_samples.device, dtype=torch.float32) + timesteps = timesteps.to(original_samples.device, dtype=torch.float32) + else: + schedule_timesteps = self.timesteps.to(original_samples.device) + timesteps = timesteps.to(original_samples.device) + + # begin_index is None when the scheduler is used for training or pipeline does not implement set_begin_index + if self.begin_index is None: + step_indices = [self.index_for_timestep(t, schedule_timesteps) for t in timesteps] + elif self.step_index is not None: + # add_noise is called after first denoising step (for inpainting) + step_indices = [self.step_index] * timesteps.shape[0] + else: + # add noise is called before first denoising step to create initial latent(img2img) + step_indices = [self.begin_index] * timesteps.shape[0] + + sigma = sigmas[step_indices].flatten() + while len(sigma.shape) < len(original_samples.shape): + sigma = sigma.unsqueeze(-1) + + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma) + noisy_samples = alpha_t * original_samples + sigma_t * noise + return noisy_samples + + def __len__(self): + return self.config.num_train_timesteps diff --git a/diffusers3/schedulers/scheduling_dpm_cogvideox.py b/diffusers3/schedulers/scheduling_dpm_cogvideox.py new file mode 100644 index 0000000000000000000000000000000000000000..1a2c7be7115bc1188de1a6dc399e91668202681c --- /dev/null +++ b/diffusers3/schedulers/scheduling_dpm_cogvideox.py @@ -0,0 +1,489 @@ +# Copyright 2024 The CogVideoX team, Tsinghua University & ZhipuAI and The HuggingFace Team. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion +# and https://github.com/hojonathanho/diffusion + +import math +from dataclasses import dataclass +from typing import List, Optional, Tuple, Union + +import numpy as np +import torch + +from ..configuration_utils import ConfigMixin, register_to_config +from ..utils import BaseOutput +from ..utils.torch_utils import randn_tensor +from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin + + +@dataclass +# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM +class DDIMSchedulerOutput(BaseOutput): + """ + Output class for the scheduler's `step` function output. + + Args: + prev_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images): + Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the + denoising loop. + pred_original_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images): + The predicted denoised sample `(x_{0})` based on the model output from the current timestep. + `pred_original_sample` can be used to preview progress or for guidance. + """ + + prev_sample: torch.Tensor + pred_original_sample: Optional[torch.Tensor] = None + + +# Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar +def betas_for_alpha_bar( + num_diffusion_timesteps, + max_beta=0.999, + alpha_transform_type="cosine", +): + """ + Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of + (1-beta) over time from t = [0,1]. + + Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up + to that part of the diffusion process. + + + Args: + num_diffusion_timesteps (`int`): the number of betas to produce. + max_beta (`float`): the maximum beta to use; use values lower than 1 to + prevent singularities. + alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. + Choose from `cosine` or `exp` + + Returns: + betas (`np.ndarray`): the betas used by the scheduler to step the model outputs + """ + if alpha_transform_type == "cosine": + + def alpha_bar_fn(t): + return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 + + elif alpha_transform_type == "exp": + + def alpha_bar_fn(t): + return math.exp(t * -12.0) + + else: + raise ValueError(f"Unsupported alpha_transform_type: {alpha_transform_type}") + + betas = [] + for i in range(num_diffusion_timesteps): + t1 = i / num_diffusion_timesteps + t2 = (i + 1) / num_diffusion_timesteps + betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) + return torch.tensor(betas, dtype=torch.float32) + + +def rescale_zero_terminal_snr(alphas_cumprod): + """ + Rescales betas to have zero terminal SNR Based on https://arxiv.org/pdf/2305.08891.pdf (Algorithm 1) + + + Args: + betas (`torch.Tensor`): + the betas that the scheduler is being initialized with. + + Returns: + `torch.Tensor`: rescaled betas with zero terminal SNR + """ + + alphas_bar_sqrt = alphas_cumprod.sqrt() + + # Store old values. + alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone() + alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone() + + # Shift so the last timestep is zero. + alphas_bar_sqrt -= alphas_bar_sqrt_T + + # Scale so the first timestep is back to the old value. + alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T) + + # Convert alphas_bar_sqrt to betas + alphas_bar = alphas_bar_sqrt**2 # Revert sqrt + + return alphas_bar + + +class CogVideoXDPMScheduler(SchedulerMixin, ConfigMixin): + """ + `DDIMScheduler` extends the denoising procedure introduced in denoising diffusion probabilistic models (DDPMs) with + non-Markovian guidance. + + This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic + methods the library implements for all schedulers such as loading and saving. + + Args: + num_train_timesteps (`int`, defaults to 1000): + The number of diffusion steps to train the model. + beta_start (`float`, defaults to 0.0001): + The starting `beta` value of inference. + beta_end (`float`, defaults to 0.02): + The final `beta` value. + beta_schedule (`str`, defaults to `"linear"`): + The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from + `linear`, `scaled_linear`, or `squaredcos_cap_v2`. + trained_betas (`np.ndarray`, *optional*): + Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`. + clip_sample (`bool`, defaults to `True`): + Clip the predicted sample for numerical stability. + clip_sample_range (`float`, defaults to 1.0): + The maximum magnitude for sample clipping. Valid only when `clip_sample=True`. + set_alpha_to_one (`bool`, defaults to `True`): + Each diffusion step uses the alphas product value at that step and at the previous one. For the final step + there is no previous alpha. When this option is `True` the previous alpha product is fixed to `1`, + otherwise it uses the alpha value at step 0. + steps_offset (`int`, defaults to 0): + An offset added to the inference steps, as required by some model families. + prediction_type (`str`, defaults to `epsilon`, *optional*): + Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process), + `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen + Video](https://imagen.research.google/video/paper.pdf) paper). + thresholding (`bool`, defaults to `False`): + Whether to use the "dynamic thresholding" method. This is unsuitable for latent-space diffusion models such + as Stable Diffusion. + dynamic_thresholding_ratio (`float`, defaults to 0.995): + The ratio for the dynamic thresholding method. Valid only when `thresholding=True`. + sample_max_value (`float`, defaults to 1.0): + The threshold value for dynamic thresholding. Valid only when `thresholding=True`. + timestep_spacing (`str`, defaults to `"leading"`): + The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information. + rescale_betas_zero_snr (`bool`, defaults to `False`): + Whether to rescale the betas to have zero terminal SNR. This enables the model to generate very bright and + dark samples instead of limiting it to samples with medium brightness. Loosely related to + [`--offset_noise`](https://github.com/huggingface/diffusers/blob/74fd735eb073eb1d774b1ab4154a0876eb82f055/examples/dreambooth/train_dreambooth.py#L506). + """ + + _compatibles = [e.name for e in KarrasDiffusionSchedulers] + order = 1 + + @register_to_config + def __init__( + self, + num_train_timesteps: int = 1000, + beta_start: float = 0.00085, + beta_end: float = 0.0120, + beta_schedule: str = "scaled_linear", + trained_betas: Optional[Union[np.ndarray, List[float]]] = None, + clip_sample: bool = True, + set_alpha_to_one: bool = True, + steps_offset: int = 0, + prediction_type: str = "epsilon", + clip_sample_range: float = 1.0, + sample_max_value: float = 1.0, + timestep_spacing: str = "leading", + rescale_betas_zero_snr: bool = False, + snr_shift_scale: float = 3.0, + ): + if trained_betas is not None: + self.betas = torch.tensor(trained_betas, dtype=torch.float32) + elif beta_schedule == "linear": + self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) + elif beta_schedule == "scaled_linear": + # this schedule is very specific to the latent diffusion model. + self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float64) ** 2 + elif beta_schedule == "squaredcos_cap_v2": + # Glide cosine schedule + self.betas = betas_for_alpha_bar(num_train_timesteps) + else: + raise NotImplementedError(f"{beta_schedule} is not implemented for {self.__class__}") + + self.alphas = 1.0 - self.betas + self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) + + # Modify: SNR shift following SD3 + self.alphas_cumprod = self.alphas_cumprod / (snr_shift_scale + (1 - snr_shift_scale) * self.alphas_cumprod) + + # Rescale for zero SNR + if rescale_betas_zero_snr: + self.alphas_cumprod = rescale_zero_terminal_snr(self.alphas_cumprod) + + # At every step in ddim, we are looking into the previous alphas_cumprod + # For the final step, there is no previous alphas_cumprod because we are already at 0 + # `set_alpha_to_one` decides whether we set this parameter simply to one or + # whether we use the final alpha of the "non-previous" one. + self.final_alpha_cumprod = torch.tensor(1.0) if set_alpha_to_one else self.alphas_cumprod[0] + + # standard deviation of the initial noise distribution + self.init_noise_sigma = 1.0 + + # setable values + self.num_inference_steps = None + self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy().astype(np.int64)) + + def _get_variance(self, timestep, prev_timestep): + alpha_prod_t = self.alphas_cumprod[timestep] + alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod + beta_prod_t = 1 - alpha_prod_t + beta_prod_t_prev = 1 - alpha_prod_t_prev + + variance = (beta_prod_t_prev / beta_prod_t) * (1 - alpha_prod_t / alpha_prod_t_prev) + + return variance + + def scale_model_input(self, sample: torch.Tensor, timestep: Optional[int] = None) -> torch.Tensor: + """ + Ensures interchangeability with schedulers that need to scale the denoising model input depending on the + current timestep. + + Args: + sample (`torch.Tensor`): + The input sample. + timestep (`int`, *optional*): + The current timestep in the diffusion chain. + + Returns: + `torch.Tensor`: + A scaled input sample. + """ + return sample + + def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None): + """ + Sets the discrete timesteps used for the diffusion chain (to be run before inference). + + Args: + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. + """ + + if num_inference_steps > self.config.num_train_timesteps: + raise ValueError( + f"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:" + f" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle" + f" maximal {self.config.num_train_timesteps} timesteps." + ) + + self.num_inference_steps = num_inference_steps + + # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 + if self.config.timestep_spacing == "linspace": + timesteps = ( + np.linspace(0, self.config.num_train_timesteps - 1, num_inference_steps) + .round()[::-1] + .copy() + .astype(np.int64) + ) + elif self.config.timestep_spacing == "leading": + step_ratio = self.config.num_train_timesteps // self.num_inference_steps + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + timesteps = (np.arange(0, num_inference_steps) * step_ratio).round()[::-1].copy().astype(np.int64) + timesteps += self.config.steps_offset + elif self.config.timestep_spacing == "trailing": + step_ratio = self.config.num_train_timesteps / self.num_inference_steps + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + timesteps = np.round(np.arange(self.config.num_train_timesteps, 0, -step_ratio)).astype(np.int64) + timesteps -= 1 + else: + raise ValueError( + f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'leading' or 'trailing'." + ) + + self.timesteps = torch.from_numpy(timesteps).to(device) + + def get_variables(self, alpha_prod_t, alpha_prod_t_prev, alpha_prod_t_back=None): + lamb = ((alpha_prod_t / (1 - alpha_prod_t)) ** 0.5).log() + lamb_next = ((alpha_prod_t_prev / (1 - alpha_prod_t_prev)) ** 0.5).log() + h = lamb_next - lamb + + if alpha_prod_t_back is not None: + lamb_previous = ((alpha_prod_t_back / (1 - alpha_prod_t_back)) ** 0.5).log() + h_last = lamb - lamb_previous + r = h_last / h + return h, r, lamb, lamb_next + else: + return h, None, lamb, lamb_next + + def get_mult(self, h, r, alpha_prod_t, alpha_prod_t_prev, alpha_prod_t_back): + mult1 = ((1 - alpha_prod_t_prev) / (1 - alpha_prod_t)) ** 0.5 * (-h).exp() + mult2 = (-2 * h).expm1() * alpha_prod_t_prev**0.5 + + if alpha_prod_t_back is not None: + mult3 = 1 + 1 / (2 * r) + mult4 = 1 / (2 * r) + return mult1, mult2, mult3, mult4 + else: + return mult1, mult2 + + def step( + self, + model_output: torch.Tensor, + old_pred_original_sample: torch.Tensor, + timestep: int, + timestep_back: int, + sample: torch.Tensor, + eta: float = 0.0, + use_clipped_model_output: bool = False, + generator=None, + variance_noise: Optional[torch.Tensor] = None, + return_dict: bool = False, + ) -> Union[DDIMSchedulerOutput, Tuple]: + """ + Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion + process from the learned model outputs (most often the predicted noise). + + Args: + model_output (`torch.Tensor`): + The direct output from learned diffusion model. + timestep (`float`): + The current discrete timestep in the diffusion chain. + sample (`torch.Tensor`): + A current instance of a sample created by the diffusion process. + eta (`float`): + The weight of noise for added noise in diffusion step. + use_clipped_model_output (`bool`, defaults to `False`): + If `True`, computes "corrected" `model_output` from the clipped predicted original sample. Necessary + because predicted original sample is clipped to [-1, 1] when `self.config.clip_sample` is `True`. If no + clipping has happened, "corrected" `model_output` would coincide with the one provided as input and + `use_clipped_model_output` has no effect. + generator (`torch.Generator`, *optional*): + A random number generator. + variance_noise (`torch.Tensor`): + Alternative to generating noise with `generator` by directly providing the noise for the variance + itself. Useful for methods such as [`CycleDiffusion`]. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~schedulers.scheduling_ddim.DDIMSchedulerOutput`] or `tuple`. + + Returns: + [`~schedulers.scheduling_ddim.DDIMSchedulerOutput`] or `tuple`: + If return_dict is `True`, [`~schedulers.scheduling_ddim.DDIMSchedulerOutput`] is returned, otherwise a + tuple is returned where the first element is the sample tensor. + + """ + if self.num_inference_steps is None: + raise ValueError( + "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" + ) + + # See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf + # Ideally, read DDIM paper in-detail understanding + + # Notation ( -> + # - pred_noise_t -> e_theta(x_t, t) + # - pred_original_sample -> f_theta(x_t, t) or x_0 + # - std_dev_t -> sigma_t + # - eta -> ฮท + # - pred_sample_direction -> "direction pointing to x_t" + # - pred_prev_sample -> "x_t-1" + + # 1. get previous step value (=t-1) + prev_timestep = timestep - self.config.num_train_timesteps // self.num_inference_steps + + # 2. compute alphas, betas + alpha_prod_t = self.alphas_cumprod[timestep] + alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod + alpha_prod_t_back = self.alphas_cumprod[timestep_back] if timestep_back is not None else None + + beta_prod_t = 1 - alpha_prod_t + + # 3. compute predicted original sample from predicted noise also called + # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf + # To make style tests pass, commented out `pred_epsilon` as it is an unused variable + if self.config.prediction_type == "epsilon": + pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5) + # pred_epsilon = model_output + elif self.config.prediction_type == "sample": + pred_original_sample = model_output + # pred_epsilon = (sample - alpha_prod_t ** (0.5) * pred_original_sample) / beta_prod_t ** (0.5) + elif self.config.prediction_type == "v_prediction": + pred_original_sample = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output + # pred_epsilon = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample + else: + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or" + " `v_prediction`" + ) + + h, r, lamb, lamb_next = self.get_variables(alpha_prod_t, alpha_prod_t_prev, alpha_prod_t_back) + mult = list(self.get_mult(h, r, alpha_prod_t, alpha_prod_t_prev, alpha_prod_t_back)) + mult_noise = (1 - alpha_prod_t_prev) ** 0.5 * (1 - (-2 * h).exp()) ** 0.5 + + noise = randn_tensor(sample.shape, generator=generator, device=sample.device, dtype=sample.dtype) + prev_sample = mult[0] * sample - mult[1] * pred_original_sample + mult_noise * noise + + if old_pred_original_sample is None or prev_timestep < 0: + # Save a network evaluation if all noise levels are 0 or on the first step + return prev_sample, pred_original_sample + else: + denoised_d = mult[2] * pred_original_sample - mult[3] * old_pred_original_sample + noise = randn_tensor(sample.shape, generator=generator, device=sample.device, dtype=sample.dtype) + x_advanced = mult[0] * sample - mult[1] * denoised_d + mult_noise * noise + + prev_sample = x_advanced + + if not return_dict: + return (prev_sample, pred_original_sample) + + return DDIMSchedulerOutput(prev_sample=prev_sample, pred_original_sample=pred_original_sample) + + # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.add_noise + def add_noise( + self, + original_samples: torch.Tensor, + noise: torch.Tensor, + timesteps: torch.IntTensor, + ) -> torch.Tensor: + # Make sure alphas_cumprod and timestep have same device and dtype as original_samples + # Move the self.alphas_cumprod to device to avoid redundant CPU to GPU data movement + # for the subsequent add_noise calls + self.alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device) + alphas_cumprod = self.alphas_cumprod.to(dtype=original_samples.dtype) + timesteps = timesteps.to(original_samples.device) + + sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5 + sqrt_alpha_prod = sqrt_alpha_prod.flatten() + while len(sqrt_alpha_prod.shape) < len(original_samples.shape): + sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) + + sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5 + sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() + while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape): + sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) + + noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise + return noisy_samples + + # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.get_velocity + def get_velocity(self, sample: torch.Tensor, noise: torch.Tensor, timesteps: torch.IntTensor) -> torch.Tensor: + # Make sure alphas_cumprod and timestep have same device and dtype as sample + self.alphas_cumprod = self.alphas_cumprod.to(device=sample.device) + alphas_cumprod = self.alphas_cumprod.to(dtype=sample.dtype) + timesteps = timesteps.to(sample.device) + + sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5 + sqrt_alpha_prod = sqrt_alpha_prod.flatten() + while len(sqrt_alpha_prod.shape) < len(sample.shape): + sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) + + sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5 + sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() + while len(sqrt_one_minus_alpha_prod.shape) < len(sample.shape): + sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) + + velocity = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample + return velocity + + def __len__(self): + return self.config.num_train_timesteps diff --git a/diffusers3/schedulers/scheduling_dpmsolver_multistep.py b/diffusers3/schedulers/scheduling_dpmsolver_multistep.py new file mode 100644 index 0000000000000000000000000000000000000000..4472a06c342843a95b4c317b43b9f684e50371ed --- /dev/null +++ b/diffusers3/schedulers/scheduling_dpmsolver_multistep.py @@ -0,0 +1,1059 @@ +# Copyright 2024 TSAIL Team and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# DISCLAIMER: This file is strongly influenced by https://github.com/LuChengTHU/dpm-solver + +import math +from typing import List, Optional, Tuple, Union + +import numpy as np +import torch + +from ..configuration_utils import ConfigMixin, register_to_config +from ..utils import deprecate +from ..utils.torch_utils import randn_tensor +from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput + + +# Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar +def betas_for_alpha_bar( + num_diffusion_timesteps, + max_beta=0.999, + alpha_transform_type="cosine", +): + """ + Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of + (1-beta) over time from t = [0,1]. + + Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up + to that part of the diffusion process. + + + Args: + num_diffusion_timesteps (`int`): the number of betas to produce. + max_beta (`float`): the maximum beta to use; use values lower than 1 to + prevent singularities. + alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. + Choose from `cosine` or `exp` + + Returns: + betas (`np.ndarray`): the betas used by the scheduler to step the model outputs + """ + if alpha_transform_type == "cosine": + + def alpha_bar_fn(t): + return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 + + elif alpha_transform_type == "exp": + + def alpha_bar_fn(t): + return math.exp(t * -12.0) + + else: + raise ValueError(f"Unsupported alpha_transform_type: {alpha_transform_type}") + + betas = [] + for i in range(num_diffusion_timesteps): + t1 = i / num_diffusion_timesteps + t2 = (i + 1) / num_diffusion_timesteps + betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) + return torch.tensor(betas, dtype=torch.float32) + + +# Copied from diffusers.schedulers.scheduling_ddim.rescale_zero_terminal_snr +def rescale_zero_terminal_snr(betas): + """ + Rescales betas to have zero terminal SNR Based on https://arxiv.org/pdf/2305.08891.pdf (Algorithm 1) + + + Args: + betas (`torch.Tensor`): + the betas that the scheduler is being initialized with. + + Returns: + `torch.Tensor`: rescaled betas with zero terminal SNR + """ + # Convert betas to alphas_bar_sqrt + alphas = 1.0 - betas + alphas_cumprod = torch.cumprod(alphas, dim=0) + alphas_bar_sqrt = alphas_cumprod.sqrt() + + # Store old values. + alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone() + alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone() + + # Shift so the last timestep is zero. + alphas_bar_sqrt -= alphas_bar_sqrt_T + + # Scale so the first timestep is back to the old value. + alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T) + + # Convert alphas_bar_sqrt to betas + alphas_bar = alphas_bar_sqrt**2 # Revert sqrt + alphas = alphas_bar[1:] / alphas_bar[:-1] # Revert cumprod + alphas = torch.cat([alphas_bar[0:1], alphas]) + betas = 1 - alphas + + return betas + + +class DPMSolverMultistepScheduler(SchedulerMixin, ConfigMixin): + """ + `DPMSolverMultistepScheduler` is a fast dedicated high-order solver for diffusion ODEs. + + This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic + methods the library implements for all schedulers such as loading and saving. + + Args: + num_train_timesteps (`int`, defaults to 1000): + The number of diffusion steps to train the model. + beta_start (`float`, defaults to 0.0001): + The starting `beta` value of inference. + beta_end (`float`, defaults to 0.02): + The final `beta` value. + beta_schedule (`str`, defaults to `"linear"`): + The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from + `linear`, `scaled_linear`, or `squaredcos_cap_v2`. + trained_betas (`np.ndarray`, *optional*): + Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`. + solver_order (`int`, defaults to 2): + The DPMSolver order which can be `1` or `2` or `3`. It is recommended to use `solver_order=2` for guided + sampling, and `solver_order=3` for unconditional sampling. + prediction_type (`str`, defaults to `epsilon`, *optional*): + Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process), + `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen + Video](https://imagen.research.google/video/paper.pdf) paper). + thresholding (`bool`, defaults to `False`): + Whether to use the "dynamic thresholding" method. This is unsuitable for latent-space diffusion models such + as Stable Diffusion. + dynamic_thresholding_ratio (`float`, defaults to 0.995): + The ratio for the dynamic thresholding method. Valid only when `thresholding=True`. + sample_max_value (`float`, defaults to 1.0): + The threshold value for dynamic thresholding. Valid only when `thresholding=True` and + `algorithm_type="dpmsolver++"`. + algorithm_type (`str`, defaults to `dpmsolver++`): + Algorithm type for the solver; can be `dpmsolver`, `dpmsolver++`, `sde-dpmsolver` or `sde-dpmsolver++`. The + `dpmsolver` type implements the algorithms in the [DPMSolver](https://huggingface.co/papers/2206.00927) + paper, and the `dpmsolver++` type implements the algorithms in the + [DPMSolver++](https://huggingface.co/papers/2211.01095) paper. It is recommended to use `dpmsolver++` or + `sde-dpmsolver++` with `solver_order=2` for guided sampling like in Stable Diffusion. + solver_type (`str`, defaults to `midpoint`): + Solver type for the second-order solver; can be `midpoint` or `heun`. The solver type slightly affects the + sample quality, especially for a small number of steps. It is recommended to use `midpoint` solvers. + lower_order_final (`bool`, defaults to `True`): + Whether to use lower-order solvers in the final steps. Only valid for < 15 inference steps. This can + stabilize the sampling of DPMSolver for steps < 15, especially for steps <= 10. + euler_at_final (`bool`, defaults to `False`): + Whether to use Euler's method in the final step. It is a trade-off between numerical stability and detail + richness. This can stabilize the sampling of the SDE variant of DPMSolver for small number of inference + steps, but sometimes may result in blurring. + use_karras_sigmas (`bool`, *optional*, defaults to `False`): + Whether to use Karras sigmas for step sizes in the noise schedule during the sampling process. If `True`, + the sigmas are determined according to a sequence of noise levels {ฯƒi}. + use_lu_lambdas (`bool`, *optional*, defaults to `False`): + Whether to use the uniform-logSNR for step sizes proposed by Lu's DPM-Solver in the noise schedule during + the sampling process. If `True`, the sigmas and time steps are determined according to a sequence of + `lambda(t)`. + final_sigmas_type (`str`, defaults to `"zero"`): + The final `sigma` value for the noise schedule during the sampling process. If `"sigma_min"`, the final + sigma is the same as the last sigma in the training schedule. If `zero`, the final sigma is set to 0. + lambda_min_clipped (`float`, defaults to `-inf`): + Clipping threshold for the minimum value of `lambda(t)` for numerical stability. This is critical for the + cosine (`squaredcos_cap_v2`) noise schedule. + variance_type (`str`, *optional*): + Set to "learned" or "learned_range" for diffusion models that predict variance. If set, the model's output + contains the predicted Gaussian variance. + timestep_spacing (`str`, defaults to `"linspace"`): + The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information. + steps_offset (`int`, defaults to 0): + An offset added to the inference steps, as required by some model families. + rescale_betas_zero_snr (`bool`, defaults to `False`): + Whether to rescale the betas to have zero terminal SNR. This enables the model to generate very bright and + dark samples instead of limiting it to samples with medium brightness. Loosely related to + [`--offset_noise`](https://github.com/huggingface/diffusers/blob/74fd735eb073eb1d774b1ab4154a0876eb82f055/examples/dreambooth/train_dreambooth.py#L506). + """ + + _compatibles = [e.name for e in KarrasDiffusionSchedulers] + order = 1 + + @register_to_config + def __init__( + self, + num_train_timesteps: int = 1000, + beta_start: float = 0.0001, + beta_end: float = 0.02, + beta_schedule: str = "linear", + trained_betas: Optional[Union[np.ndarray, List[float]]] = None, + solver_order: int = 2, + prediction_type: str = "epsilon", + thresholding: bool = False, + dynamic_thresholding_ratio: float = 0.995, + sample_max_value: float = 1.0, + algorithm_type: str = "dpmsolver++", + solver_type: str = "midpoint", + lower_order_final: bool = True, + euler_at_final: bool = False, + use_karras_sigmas: Optional[bool] = False, + use_lu_lambdas: Optional[bool] = False, + final_sigmas_type: Optional[str] = "zero", # "zero", "sigma_min" + lambda_min_clipped: float = -float("inf"), + variance_type: Optional[str] = None, + timestep_spacing: str = "linspace", + steps_offset: int = 0, + rescale_betas_zero_snr: bool = False, + ): + if algorithm_type in ["dpmsolver", "sde-dpmsolver"]: + deprecation_message = f"algorithm_type {algorithm_type} is deprecated and will be removed in a future version. Choose from `dpmsolver++` or `sde-dpmsolver++` instead" + deprecate("algorithm_types dpmsolver and sde-dpmsolver", "1.0.0", deprecation_message) + + if trained_betas is not None: + self.betas = torch.tensor(trained_betas, dtype=torch.float32) + elif beta_schedule == "linear": + self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) + elif beta_schedule == "scaled_linear": + # this schedule is very specific to the latent diffusion model. + self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2 + elif beta_schedule == "squaredcos_cap_v2": + # Glide cosine schedule + self.betas = betas_for_alpha_bar(num_train_timesteps) + else: + raise NotImplementedError(f"{beta_schedule} is not implemented for {self.__class__}") + + if rescale_betas_zero_snr: + self.betas = rescale_zero_terminal_snr(self.betas) + + self.alphas = 1.0 - self.betas + self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) + + if rescale_betas_zero_snr: + # Close to 0 without being 0 so first sigma is not inf + # FP16 smallest positive subnormal works well here + self.alphas_cumprod[-1] = 2**-24 + + # Currently we only support VP-type noise schedule + self.alpha_t = torch.sqrt(self.alphas_cumprod) + self.sigma_t = torch.sqrt(1 - self.alphas_cumprod) + self.lambda_t = torch.log(self.alpha_t) - torch.log(self.sigma_t) + self.sigmas = ((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 + + # standard deviation of the initial noise distribution + self.init_noise_sigma = 1.0 + + # settings for DPM-Solver + if algorithm_type not in ["dpmsolver", "dpmsolver++", "sde-dpmsolver", "sde-dpmsolver++"]: + if algorithm_type == "deis": + self.register_to_config(algorithm_type="dpmsolver++") + else: + raise NotImplementedError(f"{algorithm_type} is not implemented for {self.__class__}") + + if solver_type not in ["midpoint", "heun"]: + if solver_type in ["logrho", "bh1", "bh2"]: + self.register_to_config(solver_type="midpoint") + else: + raise NotImplementedError(f"{solver_type} is not implemented for {self.__class__}") + + if algorithm_type not in ["dpmsolver++", "sde-dpmsolver++"] and final_sigmas_type == "zero": + raise ValueError( + f"`final_sigmas_type` {final_sigmas_type} is not supported for `algorithm_type` {algorithm_type}. Please choose `sigma_min` instead." + ) + + # setable values + self.num_inference_steps = None + timesteps = np.linspace(0, num_train_timesteps - 1, num_train_timesteps, dtype=np.float32)[::-1].copy() + self.timesteps = torch.from_numpy(timesteps) + self.model_outputs = [None] * solver_order + self.lower_order_nums = 0 + self._step_index = None + self._begin_index = None + self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication + + @property + def step_index(self): + """ + The index counter for current timestep. It will increase 1 after each scheduler step. + """ + return self._step_index + + @property + def begin_index(self): + """ + The index for the first timestep. It should be set from pipeline with `set_begin_index` method. + """ + return self._begin_index + + def set_begin_index(self, begin_index: int = 0): + """ + Sets the begin index for the scheduler. This function should be run from pipeline before the inference. + + Args: + begin_index (`int`): + The begin index for the scheduler. + """ + self._begin_index = begin_index + + def set_timesteps( + self, + num_inference_steps: int = None, + device: Union[str, torch.device] = None, + timesteps: Optional[List[int]] = None, + ): + """ + Sets the discrete timesteps used for the diffusion chain (to be run before inference). + + Args: + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to support arbitrary timesteps schedule. If `None`, timesteps will be generated + based on the `timestep_spacing` attribute. If `timesteps` is passed, `num_inference_steps` and `sigmas` + must be `None`, and `timestep_spacing` attribute will be ignored. + """ + if num_inference_steps is None and timesteps is None: + raise ValueError("Must pass exactly one of `num_inference_steps` or `timesteps`.") + if num_inference_steps is not None and timesteps is not None: + raise ValueError("Can only pass one of `num_inference_steps` or `custom_timesteps`.") + if timesteps is not None and self.config.use_karras_sigmas: + raise ValueError("Cannot use `timesteps` with `config.use_karras_sigmas = True`") + if timesteps is not None and self.config.use_lu_lambdas: + raise ValueError("Cannot use `timesteps` with `config.use_lu_lambdas = True`") + + if timesteps is not None: + timesteps = np.array(timesteps).astype(np.int64) + else: + # Clipping the minimum of all lambda(t) for numerical stability. + # This is critical for cosine (squaredcos_cap_v2) noise schedule. + clipped_idx = torch.searchsorted(torch.flip(self.lambda_t, [0]), self.config.lambda_min_clipped) + last_timestep = ((self.config.num_train_timesteps - clipped_idx).numpy()).item() + + # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 + if self.config.timestep_spacing == "linspace": + timesteps = ( + np.linspace(0, last_timestep - 1, num_inference_steps + 1) + .round()[::-1][:-1] + .copy() + .astype(np.int64) + ) + elif self.config.timestep_spacing == "leading": + step_ratio = last_timestep // (num_inference_steps + 1) + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + timesteps = ( + (np.arange(0, num_inference_steps + 1) * step_ratio).round()[::-1][:-1].copy().astype(np.int64) + ) + timesteps += self.config.steps_offset + elif self.config.timestep_spacing == "trailing": + step_ratio = self.config.num_train_timesteps / num_inference_steps + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + timesteps = np.arange(last_timestep, 0, -step_ratio).round().copy().astype(np.int64) + timesteps -= 1 + else: + raise ValueError( + f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." + ) + + sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5) + log_sigmas = np.log(sigmas) + + if self.config.use_karras_sigmas: + sigmas = np.flip(sigmas).copy() + sigmas = self._convert_to_karras(in_sigmas=sigmas, num_inference_steps=num_inference_steps) + timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas]).round() + elif self.config.use_lu_lambdas: + lambdas = np.flip(log_sigmas.copy()) + lambdas = self._convert_to_lu(in_lambdas=lambdas, num_inference_steps=num_inference_steps) + sigmas = np.exp(lambdas) + timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas]).round() + else: + sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas) + + if self.config.final_sigmas_type == "sigma_min": + sigma_last = ((1 - self.alphas_cumprod[0]) / self.alphas_cumprod[0]) ** 0.5 + elif self.config.final_sigmas_type == "zero": + sigma_last = 0 + else: + raise ValueError( + f"`final_sigmas_type` must be one of 'zero', or 'sigma_min', but got {self.config.final_sigmas_type}" + ) + + sigmas = np.concatenate([sigmas, [sigma_last]]).astype(np.float32) + + self.sigmas = torch.from_numpy(sigmas) + self.timesteps = torch.from_numpy(timesteps).to(device=device, dtype=torch.int64) + + self.num_inference_steps = len(timesteps) + + self.model_outputs = [ + None, + ] * self.config.solver_order + self.lower_order_nums = 0 + + # add an index counter for schedulers that allow duplicated timesteps + self._step_index = None + self._begin_index = None + self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication + + # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample + def _threshold_sample(self, sample: torch.Tensor) -> torch.Tensor: + """ + "Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the + prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by + s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing + pixels from saturation at each step. We find that dynamic thresholding results in significantly better + photorealism as well as better image-text alignment, especially when using very large guidance weights." + + https://arxiv.org/abs/2205.11487 + """ + dtype = sample.dtype + batch_size, channels, *remaining_dims = sample.shape + + if dtype not in (torch.float32, torch.float64): + sample = sample.float() # upcast for quantile calculation, and clamp not implemented for cpu half + + # Flatten sample for doing quantile calculation along each image + sample = sample.reshape(batch_size, channels * np.prod(remaining_dims)) + + abs_sample = sample.abs() # "a certain percentile absolute pixel value" + + s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1) + s = torch.clamp( + s, min=1, max=self.config.sample_max_value + ) # When clamped to min=1, equivalent to standard clipping to [-1, 1] + s = s.unsqueeze(1) # (batch_size, 1) because clamp will broadcast along dim=0 + sample = torch.clamp(sample, -s, s) / s # "we threshold xt0 to the range [-s, s] and then divide by s" + + sample = sample.reshape(batch_size, channels, *remaining_dims) + sample = sample.to(dtype) + + return sample + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._sigma_to_t + def _sigma_to_t(self, sigma, log_sigmas): + # get log sigma + log_sigma = np.log(np.maximum(sigma, 1e-10)) + + # get distribution + dists = log_sigma - log_sigmas[:, np.newaxis] + + # get sigmas range + low_idx = np.cumsum((dists >= 0), axis=0).argmax(axis=0).clip(max=log_sigmas.shape[0] - 2) + high_idx = low_idx + 1 + + low = log_sigmas[low_idx] + high = log_sigmas[high_idx] + + # interpolate sigmas + w = (low - log_sigma) / (low - high) + w = np.clip(w, 0, 1) + + # transform interpolation to time range + t = (1 - w) * low_idx + w * high_idx + t = t.reshape(sigma.shape) + return t + + def _sigma_to_alpha_sigma_t(self, sigma): + alpha_t = 1 / ((sigma**2 + 1) ** 0.5) + sigma_t = sigma * alpha_t + + return alpha_t, sigma_t + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_karras + def _convert_to_karras(self, in_sigmas: torch.Tensor, num_inference_steps) -> torch.Tensor: + """Constructs the noise schedule of Karras et al. (2022).""" + + # Hack to make sure that other schedulers which copy this function don't break + # TODO: Add this logic to the other schedulers + if hasattr(self.config, "sigma_min"): + sigma_min = self.config.sigma_min + else: + sigma_min = None + + if hasattr(self.config, "sigma_max"): + sigma_max = self.config.sigma_max + else: + sigma_max = None + + sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item() + sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item() + + rho = 7.0 # 7.0 is the value used in the paper + ramp = np.linspace(0, 1, num_inference_steps) + min_inv_rho = sigma_min ** (1 / rho) + max_inv_rho = sigma_max ** (1 / rho) + sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho + return sigmas + + def _convert_to_lu(self, in_lambdas: torch.Tensor, num_inference_steps) -> torch.Tensor: + """Constructs the noise schedule of Lu et al. (2022).""" + + lambda_min: float = in_lambdas[-1].item() + lambda_max: float = in_lambdas[0].item() + + rho = 1.0 # 1.0 is the value used in the paper + ramp = np.linspace(0, 1, num_inference_steps) + min_inv_rho = lambda_min ** (1 / rho) + max_inv_rho = lambda_max ** (1 / rho) + lambdas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho + return lambdas + + def convert_model_output( + self, + model_output: torch.Tensor, + *args, + sample: torch.Tensor = None, + **kwargs, + ) -> torch.Tensor: + """ + Convert the model output to the corresponding type the DPMSolver/DPMSolver++ algorithm needs. DPM-Solver is + designed to discretize an integral of the noise prediction model, and DPM-Solver++ is designed to discretize an + integral of the data prediction model. + + + + The algorithm and model type are decoupled. You can use either DPMSolver or DPMSolver++ for both noise + prediction and data prediction models. + + + + Args: + model_output (`torch.Tensor`): + The direct output from the learned diffusion model. + sample (`torch.Tensor`): + A current instance of a sample created by the diffusion process. + + Returns: + `torch.Tensor`: + The converted model output. + """ + timestep = args[0] if len(args) > 0 else kwargs.pop("timestep", None) + if sample is None: + if len(args) > 1: + sample = args[1] + else: + raise ValueError("missing `sample` as a required keyward argument") + if timestep is not None: + deprecate( + "timesteps", + "1.0.0", + "Passing `timesteps` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + + # DPM-Solver++ needs to solve an integral of the data prediction model. + if self.config.algorithm_type in ["dpmsolver++", "sde-dpmsolver++"]: + if self.config.prediction_type == "epsilon": + # DPM-Solver and DPM-Solver++ only need the "mean" output. + if self.config.variance_type in ["learned", "learned_range"]: + model_output = model_output[:, :3] + sigma = self.sigmas[self.step_index] + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma) + x0_pred = (sample - sigma_t * model_output) / alpha_t + elif self.config.prediction_type == "sample": + x0_pred = model_output + elif self.config.prediction_type == "v_prediction": + sigma = self.sigmas[self.step_index] + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma) + x0_pred = alpha_t * sample - sigma_t * model_output + else: + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or" + " `v_prediction` for the DPMSolverMultistepScheduler." + ) + + if self.config.thresholding: + x0_pred = self._threshold_sample(x0_pred) + + return x0_pred + + # DPM-Solver needs to solve an integral of the noise prediction model. + elif self.config.algorithm_type in ["dpmsolver", "sde-dpmsolver"]: + if self.config.prediction_type == "epsilon": + # DPM-Solver and DPM-Solver++ only need the "mean" output. + if self.config.variance_type in ["learned", "learned_range"]: + epsilon = model_output[:, :3] + else: + epsilon = model_output + elif self.config.prediction_type == "sample": + sigma = self.sigmas[self.step_index] + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma) + epsilon = (sample - alpha_t * model_output) / sigma_t + elif self.config.prediction_type == "v_prediction": + sigma = self.sigmas[self.step_index] + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma) + epsilon = alpha_t * model_output + sigma_t * sample + else: + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or" + " `v_prediction` for the DPMSolverMultistepScheduler." + ) + + if self.config.thresholding: + sigma = self.sigmas[self.step_index] + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma) + x0_pred = (sample - sigma_t * epsilon) / alpha_t + x0_pred = self._threshold_sample(x0_pred) + epsilon = (sample - alpha_t * x0_pred) / sigma_t + + return epsilon + + def dpm_solver_first_order_update( + self, + model_output: torch.Tensor, + *args, + sample: torch.Tensor = None, + noise: Optional[torch.Tensor] = None, + **kwargs, + ) -> torch.Tensor: + """ + One step for the first-order DPMSolver (equivalent to DDIM). + + Args: + model_output (`torch.Tensor`): + The direct output from the learned diffusion model. + sample (`torch.Tensor`): + A current instance of a sample created by the diffusion process. + + Returns: + `torch.Tensor`: + The sample tensor at the previous timestep. + """ + timestep = args[0] if len(args) > 0 else kwargs.pop("timestep", None) + prev_timestep = args[1] if len(args) > 1 else kwargs.pop("prev_timestep", None) + if sample is None: + if len(args) > 2: + sample = args[2] + else: + raise ValueError(" missing `sample` as a required keyward argument") + if timestep is not None: + deprecate( + "timesteps", + "1.0.0", + "Passing `timesteps` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + + if prev_timestep is not None: + deprecate( + "prev_timestep", + "1.0.0", + "Passing `prev_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + + sigma_t, sigma_s = self.sigmas[self.step_index + 1], self.sigmas[self.step_index] + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma_t) + alpha_s, sigma_s = self._sigma_to_alpha_sigma_t(sigma_s) + lambda_t = torch.log(alpha_t) - torch.log(sigma_t) + lambda_s = torch.log(alpha_s) - torch.log(sigma_s) + + h = lambda_t - lambda_s + if self.config.algorithm_type == "dpmsolver++": + x_t = (sigma_t / sigma_s) * sample - (alpha_t * (torch.exp(-h) - 1.0)) * model_output + elif self.config.algorithm_type == "dpmsolver": + x_t = (alpha_t / alpha_s) * sample - (sigma_t * (torch.exp(h) - 1.0)) * model_output + elif self.config.algorithm_type == "sde-dpmsolver++": + assert noise is not None + x_t = ( + (sigma_t / sigma_s * torch.exp(-h)) * sample + + (alpha_t * (1 - torch.exp(-2.0 * h))) * model_output + + sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) * noise + ) + elif self.config.algorithm_type == "sde-dpmsolver": + assert noise is not None + x_t = ( + (alpha_t / alpha_s) * sample + - 2.0 * (sigma_t * (torch.exp(h) - 1.0)) * model_output + + sigma_t * torch.sqrt(torch.exp(2 * h) - 1.0) * noise + ) + return x_t + + def multistep_dpm_solver_second_order_update( + self, + model_output_list: List[torch.Tensor], + *args, + sample: torch.Tensor = None, + noise: Optional[torch.Tensor] = None, + **kwargs, + ) -> torch.Tensor: + """ + One step for the second-order multistep DPMSolver. + + Args: + model_output_list (`List[torch.Tensor]`): + The direct outputs from learned diffusion model at current and latter timesteps. + sample (`torch.Tensor`): + A current instance of a sample created by the diffusion process. + + Returns: + `torch.Tensor`: + The sample tensor at the previous timestep. + """ + timestep_list = args[0] if len(args) > 0 else kwargs.pop("timestep_list", None) + prev_timestep = args[1] if len(args) > 1 else kwargs.pop("prev_timestep", None) + if sample is None: + if len(args) > 2: + sample = args[2] + else: + raise ValueError(" missing `sample` as a required keyward argument") + if timestep_list is not None: + deprecate( + "timestep_list", + "1.0.0", + "Passing `timestep_list` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + + if prev_timestep is not None: + deprecate( + "prev_timestep", + "1.0.0", + "Passing `prev_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + + sigma_t, sigma_s0, sigma_s1 = ( + self.sigmas[self.step_index + 1], + self.sigmas[self.step_index], + self.sigmas[self.step_index - 1], + ) + + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma_t) + alpha_s0, sigma_s0 = self._sigma_to_alpha_sigma_t(sigma_s0) + alpha_s1, sigma_s1 = self._sigma_to_alpha_sigma_t(sigma_s1) + + lambda_t = torch.log(alpha_t) - torch.log(sigma_t) + lambda_s0 = torch.log(alpha_s0) - torch.log(sigma_s0) + lambda_s1 = torch.log(alpha_s1) - torch.log(sigma_s1) + + m0, m1 = model_output_list[-1], model_output_list[-2] + + h, h_0 = lambda_t - lambda_s0, lambda_s0 - lambda_s1 + r0 = h_0 / h + D0, D1 = m0, (1.0 / r0) * (m0 - m1) + if self.config.algorithm_type == "dpmsolver++": + # See https://arxiv.org/abs/2211.01095 for detailed derivations + if self.config.solver_type == "midpoint": + x_t = ( + (sigma_t / sigma_s0) * sample + - (alpha_t * (torch.exp(-h) - 1.0)) * D0 + - 0.5 * (alpha_t * (torch.exp(-h) - 1.0)) * D1 + ) + elif self.config.solver_type == "heun": + x_t = ( + (sigma_t / sigma_s0) * sample + - (alpha_t * (torch.exp(-h) - 1.0)) * D0 + + (alpha_t * ((torch.exp(-h) - 1.0) / h + 1.0)) * D1 + ) + elif self.config.algorithm_type == "dpmsolver": + # See https://arxiv.org/abs/2206.00927 for detailed derivations + if self.config.solver_type == "midpoint": + x_t = ( + (alpha_t / alpha_s0) * sample + - (sigma_t * (torch.exp(h) - 1.0)) * D0 + - 0.5 * (sigma_t * (torch.exp(h) - 1.0)) * D1 + ) + elif self.config.solver_type == "heun": + x_t = ( + (alpha_t / alpha_s0) * sample + - (sigma_t * (torch.exp(h) - 1.0)) * D0 + - (sigma_t * ((torch.exp(h) - 1.0) / h - 1.0)) * D1 + ) + elif self.config.algorithm_type == "sde-dpmsolver++": + assert noise is not None + if self.config.solver_type == "midpoint": + x_t = ( + (sigma_t / sigma_s0 * torch.exp(-h)) * sample + + (alpha_t * (1 - torch.exp(-2.0 * h))) * D0 + + 0.5 * (alpha_t * (1 - torch.exp(-2.0 * h))) * D1 + + sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) * noise + ) + elif self.config.solver_type == "heun": + x_t = ( + (sigma_t / sigma_s0 * torch.exp(-h)) * sample + + (alpha_t * (1 - torch.exp(-2.0 * h))) * D0 + + (alpha_t * ((1.0 - torch.exp(-2.0 * h)) / (-2.0 * h) + 1.0)) * D1 + + sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) * noise + ) + elif self.config.algorithm_type == "sde-dpmsolver": + assert noise is not None + if self.config.solver_type == "midpoint": + x_t = ( + (alpha_t / alpha_s0) * sample + - 2.0 * (sigma_t * (torch.exp(h) - 1.0)) * D0 + - (sigma_t * (torch.exp(h) - 1.0)) * D1 + + sigma_t * torch.sqrt(torch.exp(2 * h) - 1.0) * noise + ) + elif self.config.solver_type == "heun": + x_t = ( + (alpha_t / alpha_s0) * sample + - 2.0 * (sigma_t * (torch.exp(h) - 1.0)) * D0 + - 2.0 * (sigma_t * ((torch.exp(h) - 1.0) / h - 1.0)) * D1 + + sigma_t * torch.sqrt(torch.exp(2 * h) - 1.0) * noise + ) + return x_t + + def multistep_dpm_solver_third_order_update( + self, + model_output_list: List[torch.Tensor], + *args, + sample: torch.Tensor = None, + **kwargs, + ) -> torch.Tensor: + """ + One step for the third-order multistep DPMSolver. + + Args: + model_output_list (`List[torch.Tensor]`): + The direct outputs from learned diffusion model at current and latter timesteps. + sample (`torch.Tensor`): + A current instance of a sample created by diffusion process. + + Returns: + `torch.Tensor`: + The sample tensor at the previous timestep. + """ + + timestep_list = args[0] if len(args) > 0 else kwargs.pop("timestep_list", None) + prev_timestep = args[1] if len(args) > 1 else kwargs.pop("prev_timestep", None) + if sample is None: + if len(args) > 2: + sample = args[2] + else: + raise ValueError(" missing`sample` as a required keyward argument") + if timestep_list is not None: + deprecate( + "timestep_list", + "1.0.0", + "Passing `timestep_list` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + + if prev_timestep is not None: + deprecate( + "prev_timestep", + "1.0.0", + "Passing `prev_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + + sigma_t, sigma_s0, sigma_s1, sigma_s2 = ( + self.sigmas[self.step_index + 1], + self.sigmas[self.step_index], + self.sigmas[self.step_index - 1], + self.sigmas[self.step_index - 2], + ) + + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma_t) + alpha_s0, sigma_s0 = self._sigma_to_alpha_sigma_t(sigma_s0) + alpha_s1, sigma_s1 = self._sigma_to_alpha_sigma_t(sigma_s1) + alpha_s2, sigma_s2 = self._sigma_to_alpha_sigma_t(sigma_s2) + + lambda_t = torch.log(alpha_t) - torch.log(sigma_t) + lambda_s0 = torch.log(alpha_s0) - torch.log(sigma_s0) + lambda_s1 = torch.log(alpha_s1) - torch.log(sigma_s1) + lambda_s2 = torch.log(alpha_s2) - torch.log(sigma_s2) + + m0, m1, m2 = model_output_list[-1], model_output_list[-2], model_output_list[-3] + + h, h_0, h_1 = lambda_t - lambda_s0, lambda_s0 - lambda_s1, lambda_s1 - lambda_s2 + r0, r1 = h_0 / h, h_1 / h + D0 = m0 + D1_0, D1_1 = (1.0 / r0) * (m0 - m1), (1.0 / r1) * (m1 - m2) + D1 = D1_0 + (r0 / (r0 + r1)) * (D1_0 - D1_1) + D2 = (1.0 / (r0 + r1)) * (D1_0 - D1_1) + if self.config.algorithm_type == "dpmsolver++": + # See https://arxiv.org/abs/2206.00927 for detailed derivations + x_t = ( + (sigma_t / sigma_s0) * sample + - (alpha_t * (torch.exp(-h) - 1.0)) * D0 + + (alpha_t * ((torch.exp(-h) - 1.0) / h + 1.0)) * D1 + - (alpha_t * ((torch.exp(-h) - 1.0 + h) / h**2 - 0.5)) * D2 + ) + elif self.config.algorithm_type == "dpmsolver": + # See https://arxiv.org/abs/2206.00927 for detailed derivations + x_t = ( + (alpha_t / alpha_s0) * sample + - (sigma_t * (torch.exp(h) - 1.0)) * D0 + - (sigma_t * ((torch.exp(h) - 1.0) / h - 1.0)) * D1 + - (sigma_t * ((torch.exp(h) - 1.0 - h) / h**2 - 0.5)) * D2 + ) + return x_t + + def index_for_timestep(self, timestep, schedule_timesteps=None): + if schedule_timesteps is None: + schedule_timesteps = self.timesteps + + index_candidates = (schedule_timesteps == timestep).nonzero() + + if len(index_candidates) == 0: + step_index = len(self.timesteps) - 1 + # The sigma index that is taken for the **very** first `step` + # is always the second index (or the last index if there is only 1) + # This way we can ensure we don't accidentally skip a sigma in + # case we start in the middle of the denoising schedule (e.g. for image-to-image) + elif len(index_candidates) > 1: + step_index = index_candidates[1].item() + else: + step_index = index_candidates[0].item() + + return step_index + + def _init_step_index(self, timestep): + """ + Initialize the step_index counter for the scheduler. + """ + + if self.begin_index is None: + if isinstance(timestep, torch.Tensor): + timestep = timestep.to(self.timesteps.device) + self._step_index = self.index_for_timestep(timestep) + else: + self._step_index = self._begin_index + + def step( + self, + model_output: torch.Tensor, + timestep: Union[int, torch.Tensor], + sample: torch.Tensor, + generator=None, + variance_noise: Optional[torch.Tensor] = None, + return_dict: bool = True, + ) -> Union[SchedulerOutput, Tuple]: + """ + Predict the sample from the previous timestep by reversing the SDE. This function propagates the sample with + the multistep DPMSolver. + + Args: + model_output (`torch.Tensor`): + The direct output from learned diffusion model. + timestep (`int`): + The current discrete timestep in the diffusion chain. + sample (`torch.Tensor`): + A current instance of a sample created by the diffusion process. + generator (`torch.Generator`, *optional*): + A random number generator. + variance_noise (`torch.Tensor`): + Alternative to generating noise with `generator` by directly providing the noise for the variance + itself. Useful for methods such as [`LEdits++`]. + return_dict (`bool`): + Whether or not to return a [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`. + + Returns: + [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`: + If return_dict is `True`, [`~schedulers.scheduling_utils.SchedulerOutput`] is returned, otherwise a + tuple is returned where the first element is the sample tensor. + + """ + if self.num_inference_steps is None: + raise ValueError( + "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" + ) + + if self.step_index is None: + self._init_step_index(timestep) + + # Improve numerical stability for small number of steps + lower_order_final = (self.step_index == len(self.timesteps) - 1) and ( + self.config.euler_at_final + or (self.config.lower_order_final and len(self.timesteps) < 15) + or self.config.final_sigmas_type == "zero" + ) + lower_order_second = ( + (self.step_index == len(self.timesteps) - 2) and self.config.lower_order_final and len(self.timesteps) < 15 + ) + + model_output = self.convert_model_output(model_output, sample=sample) + for i in range(self.config.solver_order - 1): + self.model_outputs[i] = self.model_outputs[i + 1] + self.model_outputs[-1] = model_output + + # Upcast to avoid precision issues when computing prev_sample + sample = sample.to(torch.float32) + if self.config.algorithm_type in ["sde-dpmsolver", "sde-dpmsolver++"] and variance_noise is None: + noise = randn_tensor( + model_output.shape, generator=generator, device=model_output.device, dtype=torch.float32 + ) + elif self.config.algorithm_type in ["sde-dpmsolver", "sde-dpmsolver++"]: + noise = variance_noise.to(device=model_output.device, dtype=torch.float32) + else: + noise = None + + if self.config.solver_order == 1 or self.lower_order_nums < 1 or lower_order_final: + prev_sample = self.dpm_solver_first_order_update(model_output, sample=sample, noise=noise) + elif self.config.solver_order == 2 or self.lower_order_nums < 2 or lower_order_second: + prev_sample = self.multistep_dpm_solver_second_order_update(self.model_outputs, sample=sample, noise=noise) + else: + prev_sample = self.multistep_dpm_solver_third_order_update(self.model_outputs, sample=sample) + + if self.lower_order_nums < self.config.solver_order: + self.lower_order_nums += 1 + + # Cast sample back to expected dtype + prev_sample = prev_sample.to(model_output.dtype) + + # upon completion increase step index by one + self._step_index += 1 + + if not return_dict: + return (prev_sample,) + + return SchedulerOutput(prev_sample=prev_sample) + + def scale_model_input(self, sample: torch.Tensor, *args, **kwargs) -> torch.Tensor: + """ + Ensures interchangeability with schedulers that need to scale the denoising model input depending on the + current timestep. + + Args: + sample (`torch.Tensor`): + The input sample. + + Returns: + `torch.Tensor`: + A scaled input sample. + """ + return sample + + def add_noise( + self, + original_samples: torch.Tensor, + noise: torch.Tensor, + timesteps: torch.IntTensor, + ) -> torch.Tensor: + # Make sure sigmas and timesteps have the same device and dtype as original_samples + sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype) + if original_samples.device.type == "mps" and torch.is_floating_point(timesteps): + # mps does not support float64 + schedule_timesteps = self.timesteps.to(original_samples.device, dtype=torch.float32) + timesteps = timesteps.to(original_samples.device, dtype=torch.float32) + else: + schedule_timesteps = self.timesteps.to(original_samples.device) + timesteps = timesteps.to(original_samples.device) + + # begin_index is None when the scheduler is used for training or pipeline does not implement set_begin_index + if self.begin_index is None: + step_indices = [self.index_for_timestep(t, schedule_timesteps) for t in timesteps] + elif self.step_index is not None: + # add_noise is called after first denoising step (for inpainting) + step_indices = [self.step_index] * timesteps.shape[0] + else: + # add noise is called before first denoising step to create initial latent(img2img) + step_indices = [self.begin_index] * timesteps.shape[0] + + sigma = sigmas[step_indices].flatten() + while len(sigma.shape) < len(original_samples.shape): + sigma = sigma.unsqueeze(-1) + + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma) + noisy_samples = alpha_t * original_samples + sigma_t * noise + return noisy_samples + + def __len__(self): + return self.config.num_train_timesteps diff --git a/diffusers3/schedulers/scheduling_dpmsolver_multistep_flax.py b/diffusers3/schedulers/scheduling_dpmsolver_multistep_flax.py new file mode 100644 index 0000000000000000000000000000000000000000..3f48066455fb2328ee3acd882157807f684e8917 --- /dev/null +++ b/diffusers3/schedulers/scheduling_dpmsolver_multistep_flax.py @@ -0,0 +1,643 @@ +# Copyright 2024 TSAIL Team and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# DISCLAIMER: This file is strongly influenced by https://github.com/LuChengTHU/dpm-solver + +from dataclasses import dataclass +from typing import List, Optional, Tuple, Union + +import flax +import jax +import jax.numpy as jnp + +from ..configuration_utils import ConfigMixin, register_to_config +from .scheduling_utils_flax import ( + CommonSchedulerState, + FlaxKarrasDiffusionSchedulers, + FlaxSchedulerMixin, + FlaxSchedulerOutput, + add_noise_common, +) + + +@flax.struct.dataclass +class DPMSolverMultistepSchedulerState: + common: CommonSchedulerState + alpha_t: jnp.ndarray + sigma_t: jnp.ndarray + lambda_t: jnp.ndarray + + # setable values + init_noise_sigma: jnp.ndarray + timesteps: jnp.ndarray + num_inference_steps: Optional[int] = None + + # running values + model_outputs: Optional[jnp.ndarray] = None + lower_order_nums: Optional[jnp.int32] = None + prev_timestep: Optional[jnp.int32] = None + cur_sample: Optional[jnp.ndarray] = None + + @classmethod + def create( + cls, + common: CommonSchedulerState, + alpha_t: jnp.ndarray, + sigma_t: jnp.ndarray, + lambda_t: jnp.ndarray, + init_noise_sigma: jnp.ndarray, + timesteps: jnp.ndarray, + ): + return cls( + common=common, + alpha_t=alpha_t, + sigma_t=sigma_t, + lambda_t=lambda_t, + init_noise_sigma=init_noise_sigma, + timesteps=timesteps, + ) + + +@dataclass +class FlaxDPMSolverMultistepSchedulerOutput(FlaxSchedulerOutput): + state: DPMSolverMultistepSchedulerState + + +class FlaxDPMSolverMultistepScheduler(FlaxSchedulerMixin, ConfigMixin): + """ + DPM-Solver (and the improved version DPM-Solver++) is a fast dedicated high-order solver for diffusion ODEs with + the convergence order guarantee. Empirically, sampling by DPM-Solver with only 20 steps can generate high-quality + samples, and it can generate quite good samples even in only 10 steps. + + For more details, see the original paper: https://arxiv.org/abs/2206.00927 and https://arxiv.org/abs/2211.01095 + + Currently, we support the multistep DPM-Solver for both noise prediction models and data prediction models. We + recommend to use `solver_order=2` for guided sampling, and `solver_order=3` for unconditional sampling. + + We also support the "dynamic thresholding" method in Imagen (https://arxiv.org/abs/2205.11487). For pixel-space + diffusion models, you can set both `algorithm_type="dpmsolver++"` and `thresholding=True` to use the dynamic + thresholding. Note that the thresholding method is unsuitable for latent-space diffusion models (such as + stable-diffusion). + + [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__` + function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`. + [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and + [`~SchedulerMixin.from_pretrained`] functions. + + For more details, see the original paper: https://arxiv.org/abs/2206.00927 and https://arxiv.org/abs/2211.01095 + + Args: + num_train_timesteps (`int`): number of diffusion steps used to train the model. + beta_start (`float`): the starting `beta` value of inference. + beta_end (`float`): the final `beta` value. + beta_schedule (`str`): + the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from + `linear`, `scaled_linear`, or `squaredcos_cap_v2`. + trained_betas (`np.ndarray`, optional): + option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc. + solver_order (`int`, default `2`): + the order of DPM-Solver; can be `1` or `2` or `3`. We recommend to use `solver_order=2` for guided + sampling, and `solver_order=3` for unconditional sampling. + prediction_type (`str`, default `epsilon`): + indicates whether the model predicts the noise (epsilon), or the data / `x0`. One of `epsilon`, `sample`, + or `v-prediction`. + thresholding (`bool`, default `False`): + whether to use the "dynamic thresholding" method (introduced by Imagen, https://arxiv.org/abs/2205.11487). + For pixel-space diffusion models, you can set both `algorithm_type=dpmsolver++` and `thresholding=True` to + use the dynamic thresholding. Note that the thresholding method is unsuitable for latent-space diffusion + models (such as stable-diffusion). + dynamic_thresholding_ratio (`float`, default `0.995`): + the ratio for the dynamic thresholding method. Default is `0.995`, the same as Imagen + (https://arxiv.org/abs/2205.11487). + sample_max_value (`float`, default `1.0`): + the threshold value for dynamic thresholding. Valid only when `thresholding=True` and + `algorithm_type="dpmsolver++`. + algorithm_type (`str`, default `dpmsolver++`): + the algorithm type for the solver. Either `dpmsolver` or `dpmsolver++`. The `dpmsolver` type implements the + algorithms in https://arxiv.org/abs/2206.00927, and the `dpmsolver++` type implements the algorithms in + https://arxiv.org/abs/2211.01095. We recommend to use `dpmsolver++` with `solver_order=2` for guided + sampling (e.g. stable-diffusion). + solver_type (`str`, default `midpoint`): + the solver type for the second-order solver. Either `midpoint` or `heun`. The solver type slightly affects + the sample quality, especially for small number of steps. We empirically find that `midpoint` solvers are + slightly better, so we recommend to use the `midpoint` type. + lower_order_final (`bool`, default `True`): + whether to use lower-order solvers in the final steps. Only valid for < 15 inference steps. We empirically + find this trick can stabilize the sampling of DPM-Solver for steps < 15, especially for steps <= 10. + timestep_spacing (`str`, defaults to `"linspace"`): + The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information. + dtype (`jnp.dtype`, *optional*, defaults to `jnp.float32`): + the `dtype` used for params and computation. + """ + + _compatibles = [e.name for e in FlaxKarrasDiffusionSchedulers] + + dtype: jnp.dtype + + @property + def has_state(self): + return True + + @register_to_config + def __init__( + self, + num_train_timesteps: int = 1000, + beta_start: float = 0.0001, + beta_end: float = 0.02, + beta_schedule: str = "linear", + trained_betas: Optional[jnp.ndarray] = None, + solver_order: int = 2, + prediction_type: str = "epsilon", + thresholding: bool = False, + dynamic_thresholding_ratio: float = 0.995, + sample_max_value: float = 1.0, + algorithm_type: str = "dpmsolver++", + solver_type: str = "midpoint", + lower_order_final: bool = True, + timestep_spacing: str = "linspace", + dtype: jnp.dtype = jnp.float32, + ): + self.dtype = dtype + + def create_state(self, common: Optional[CommonSchedulerState] = None) -> DPMSolverMultistepSchedulerState: + if common is None: + common = CommonSchedulerState.create(self) + + # Currently we only support VP-type noise schedule + alpha_t = jnp.sqrt(common.alphas_cumprod) + sigma_t = jnp.sqrt(1 - common.alphas_cumprod) + lambda_t = jnp.log(alpha_t) - jnp.log(sigma_t) + + # settings for DPM-Solver + if self.config.algorithm_type not in ["dpmsolver", "dpmsolver++"]: + raise NotImplementedError(f"{self.config.algorithm_type} is not implemented for {self.__class__}") + if self.config.solver_type not in ["midpoint", "heun"]: + raise NotImplementedError(f"{self.config.solver_type} is not implemented for {self.__class__}") + + # standard deviation of the initial noise distribution + init_noise_sigma = jnp.array(1.0, dtype=self.dtype) + + timesteps = jnp.arange(0, self.config.num_train_timesteps).round()[::-1] + + return DPMSolverMultistepSchedulerState.create( + common=common, + alpha_t=alpha_t, + sigma_t=sigma_t, + lambda_t=lambda_t, + init_noise_sigma=init_noise_sigma, + timesteps=timesteps, + ) + + def set_timesteps( + self, state: DPMSolverMultistepSchedulerState, num_inference_steps: int, shape: Tuple + ) -> DPMSolverMultistepSchedulerState: + """ + Sets the discrete timesteps used for the diffusion chain. Supporting function to be run before inference. + + Args: + state (`DPMSolverMultistepSchedulerState`): + the `FlaxDPMSolverMultistepScheduler` state data class instance. + num_inference_steps (`int`): + the number of diffusion steps used when generating samples with a pre-trained model. + shape (`Tuple`): + the shape of the samples to be generated. + """ + last_timestep = self.config.num_train_timesteps + if self.config.timestep_spacing == "linspace": + timesteps = ( + jnp.linspace(0, last_timestep - 1, num_inference_steps + 1).round()[::-1][:-1].astype(jnp.int32) + ) + elif self.config.timestep_spacing == "leading": + step_ratio = last_timestep // (num_inference_steps + 1) + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + timesteps = ( + (jnp.arange(0, num_inference_steps + 1) * step_ratio).round()[::-1][:-1].copy().astype(jnp.int32) + ) + timesteps += self.config.steps_offset + elif self.config.timestep_spacing == "trailing": + step_ratio = self.config.num_train_timesteps / num_inference_steps + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + timesteps = jnp.arange(last_timestep, 0, -step_ratio).round().copy().astype(jnp.int32) + timesteps -= 1 + else: + raise ValueError( + f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." + ) + + # initial running values + + model_outputs = jnp.zeros((self.config.solver_order,) + shape, dtype=self.dtype) + lower_order_nums = jnp.int32(0) + prev_timestep = jnp.int32(-1) + cur_sample = jnp.zeros(shape, dtype=self.dtype) + + return state.replace( + num_inference_steps=num_inference_steps, + timesteps=timesteps, + model_outputs=model_outputs, + lower_order_nums=lower_order_nums, + prev_timestep=prev_timestep, + cur_sample=cur_sample, + ) + + def convert_model_output( + self, + state: DPMSolverMultistepSchedulerState, + model_output: jnp.ndarray, + timestep: int, + sample: jnp.ndarray, + ) -> jnp.ndarray: + """ + Convert the model output to the corresponding type that the algorithm (DPM-Solver / DPM-Solver++) needs. + + DPM-Solver is designed to discretize an integral of the noise prediction model, and DPM-Solver++ is designed to + discretize an integral of the data prediction model. So we need to first convert the model output to the + corresponding type to match the algorithm. + + Note that the algorithm type and the model type is decoupled. That is to say, we can use either DPM-Solver or + DPM-Solver++ for both noise prediction model and data prediction model. + + Args: + model_output (`jnp.ndarray`): direct output from learned diffusion model. + timestep (`int`): current discrete timestep in the diffusion chain. + sample (`jnp.ndarray`): + current instance of sample being created by diffusion process. + + Returns: + `jnp.ndarray`: the converted model output. + """ + # DPM-Solver++ needs to solve an integral of the data prediction model. + if self.config.algorithm_type == "dpmsolver++": + if self.config.prediction_type == "epsilon": + alpha_t, sigma_t = state.alpha_t[timestep], state.sigma_t[timestep] + x0_pred = (sample - sigma_t * model_output) / alpha_t + elif self.config.prediction_type == "sample": + x0_pred = model_output + elif self.config.prediction_type == "v_prediction": + alpha_t, sigma_t = state.alpha_t[timestep], state.sigma_t[timestep] + x0_pred = alpha_t * sample - sigma_t * model_output + else: + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, " + " or `v_prediction` for the FlaxDPMSolverMultistepScheduler." + ) + + if self.config.thresholding: + # Dynamic thresholding in https://arxiv.org/abs/2205.11487 + dynamic_max_val = jnp.percentile( + jnp.abs(x0_pred), self.config.dynamic_thresholding_ratio, axis=tuple(range(1, x0_pred.ndim)) + ) + dynamic_max_val = jnp.maximum( + dynamic_max_val, self.config.sample_max_value * jnp.ones_like(dynamic_max_val) + ) + x0_pred = jnp.clip(x0_pred, -dynamic_max_val, dynamic_max_val) / dynamic_max_val + return x0_pred + # DPM-Solver needs to solve an integral of the noise prediction model. + elif self.config.algorithm_type == "dpmsolver": + if self.config.prediction_type == "epsilon": + return model_output + elif self.config.prediction_type == "sample": + alpha_t, sigma_t = state.alpha_t[timestep], state.sigma_t[timestep] + epsilon = (sample - alpha_t * model_output) / sigma_t + return epsilon + elif self.config.prediction_type == "v_prediction": + alpha_t, sigma_t = state.alpha_t[timestep], state.sigma_t[timestep] + epsilon = alpha_t * model_output + sigma_t * sample + return epsilon + else: + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, " + " or `v_prediction` for the FlaxDPMSolverMultistepScheduler." + ) + + def dpm_solver_first_order_update( + self, + state: DPMSolverMultistepSchedulerState, + model_output: jnp.ndarray, + timestep: int, + prev_timestep: int, + sample: jnp.ndarray, + ) -> jnp.ndarray: + """ + One step for the first-order DPM-Solver (equivalent to DDIM). + + See https://arxiv.org/abs/2206.00927 for the detailed derivation. + + Args: + model_output (`jnp.ndarray`): direct output from learned diffusion model. + timestep (`int`): current discrete timestep in the diffusion chain. + prev_timestep (`int`): previous discrete timestep in the diffusion chain. + sample (`jnp.ndarray`): + current instance of sample being created by diffusion process. + + Returns: + `jnp.ndarray`: the sample tensor at the previous timestep. + """ + t, s0 = prev_timestep, timestep + m0 = model_output + lambda_t, lambda_s = state.lambda_t[t], state.lambda_t[s0] + alpha_t, alpha_s = state.alpha_t[t], state.alpha_t[s0] + sigma_t, sigma_s = state.sigma_t[t], state.sigma_t[s0] + h = lambda_t - lambda_s + if self.config.algorithm_type == "dpmsolver++": + x_t = (sigma_t / sigma_s) * sample - (alpha_t * (jnp.exp(-h) - 1.0)) * m0 + elif self.config.algorithm_type == "dpmsolver": + x_t = (alpha_t / alpha_s) * sample - (sigma_t * (jnp.exp(h) - 1.0)) * m0 + return x_t + + def multistep_dpm_solver_second_order_update( + self, + state: DPMSolverMultistepSchedulerState, + model_output_list: jnp.ndarray, + timestep_list: List[int], + prev_timestep: int, + sample: jnp.ndarray, + ) -> jnp.ndarray: + """ + One step for the second-order multistep DPM-Solver. + + Args: + model_output_list (`List[jnp.ndarray]`): + direct outputs from learned diffusion model at current and latter timesteps. + timestep (`int`): current and latter discrete timestep in the diffusion chain. + prev_timestep (`int`): previous discrete timestep in the diffusion chain. + sample (`jnp.ndarray`): + current instance of sample being created by diffusion process. + + Returns: + `jnp.ndarray`: the sample tensor at the previous timestep. + """ + t, s0, s1 = prev_timestep, timestep_list[-1], timestep_list[-2] + m0, m1 = model_output_list[-1], model_output_list[-2] + lambda_t, lambda_s0, lambda_s1 = state.lambda_t[t], state.lambda_t[s0], state.lambda_t[s1] + alpha_t, alpha_s0 = state.alpha_t[t], state.alpha_t[s0] + sigma_t, sigma_s0 = state.sigma_t[t], state.sigma_t[s0] + h, h_0 = lambda_t - lambda_s0, lambda_s0 - lambda_s1 + r0 = h_0 / h + D0, D1 = m0, (1.0 / r0) * (m0 - m1) + if self.config.algorithm_type == "dpmsolver++": + # See https://arxiv.org/abs/2211.01095 for detailed derivations + if self.config.solver_type == "midpoint": + x_t = ( + (sigma_t / sigma_s0) * sample + - (alpha_t * (jnp.exp(-h) - 1.0)) * D0 + - 0.5 * (alpha_t * (jnp.exp(-h) - 1.0)) * D1 + ) + elif self.config.solver_type == "heun": + x_t = ( + (sigma_t / sigma_s0) * sample + - (alpha_t * (jnp.exp(-h) - 1.0)) * D0 + + (alpha_t * ((jnp.exp(-h) - 1.0) / h + 1.0)) * D1 + ) + elif self.config.algorithm_type == "dpmsolver": + # See https://arxiv.org/abs/2206.00927 for detailed derivations + if self.config.solver_type == "midpoint": + x_t = ( + (alpha_t / alpha_s0) * sample + - (sigma_t * (jnp.exp(h) - 1.0)) * D0 + - 0.5 * (sigma_t * (jnp.exp(h) - 1.0)) * D1 + ) + elif self.config.solver_type == "heun": + x_t = ( + (alpha_t / alpha_s0) * sample + - (sigma_t * (jnp.exp(h) - 1.0)) * D0 + - (sigma_t * ((jnp.exp(h) - 1.0) / h - 1.0)) * D1 + ) + return x_t + + def multistep_dpm_solver_third_order_update( + self, + state: DPMSolverMultistepSchedulerState, + model_output_list: jnp.ndarray, + timestep_list: List[int], + prev_timestep: int, + sample: jnp.ndarray, + ) -> jnp.ndarray: + """ + One step for the third-order multistep DPM-Solver. + + Args: + model_output_list (`List[jnp.ndarray]`): + direct outputs from learned diffusion model at current and latter timesteps. + timestep (`int`): current and latter discrete timestep in the diffusion chain. + prev_timestep (`int`): previous discrete timestep in the diffusion chain. + sample (`jnp.ndarray`): + current instance of sample being created by diffusion process. + + Returns: + `jnp.ndarray`: the sample tensor at the previous timestep. + """ + t, s0, s1, s2 = prev_timestep, timestep_list[-1], timestep_list[-2], timestep_list[-3] + m0, m1, m2 = model_output_list[-1], model_output_list[-2], model_output_list[-3] + lambda_t, lambda_s0, lambda_s1, lambda_s2 = ( + state.lambda_t[t], + state.lambda_t[s0], + state.lambda_t[s1], + state.lambda_t[s2], + ) + alpha_t, alpha_s0 = state.alpha_t[t], state.alpha_t[s0] + sigma_t, sigma_s0 = state.sigma_t[t], state.sigma_t[s0] + h, h_0, h_1 = lambda_t - lambda_s0, lambda_s0 - lambda_s1, lambda_s1 - lambda_s2 + r0, r1 = h_0 / h, h_1 / h + D0 = m0 + D1_0, D1_1 = (1.0 / r0) * (m0 - m1), (1.0 / r1) * (m1 - m2) + D1 = D1_0 + (r0 / (r0 + r1)) * (D1_0 - D1_1) + D2 = (1.0 / (r0 + r1)) * (D1_0 - D1_1) + if self.config.algorithm_type == "dpmsolver++": + # See https://arxiv.org/abs/2206.00927 for detailed derivations + x_t = ( + (sigma_t / sigma_s0) * sample + - (alpha_t * (jnp.exp(-h) - 1.0)) * D0 + + (alpha_t * ((jnp.exp(-h) - 1.0) / h + 1.0)) * D1 + - (alpha_t * ((jnp.exp(-h) - 1.0 + h) / h**2 - 0.5)) * D2 + ) + elif self.config.algorithm_type == "dpmsolver": + # See https://arxiv.org/abs/2206.00927 for detailed derivations + x_t = ( + (alpha_t / alpha_s0) * sample + - (sigma_t * (jnp.exp(h) - 1.0)) * D0 + - (sigma_t * ((jnp.exp(h) - 1.0) / h - 1.0)) * D1 + - (sigma_t * ((jnp.exp(h) - 1.0 - h) / h**2 - 0.5)) * D2 + ) + return x_t + + def step( + self, + state: DPMSolverMultistepSchedulerState, + model_output: jnp.ndarray, + timestep: int, + sample: jnp.ndarray, + return_dict: bool = True, + ) -> Union[FlaxDPMSolverMultistepSchedulerOutput, Tuple]: + """ + Predict the sample at the previous timestep by DPM-Solver. Core function to propagate the diffusion process + from the learned model outputs (most often the predicted noise). + + Args: + state (`DPMSolverMultistepSchedulerState`): + the `FlaxDPMSolverMultistepScheduler` state data class instance. + model_output (`jnp.ndarray`): direct output from learned diffusion model. + timestep (`int`): current discrete timestep in the diffusion chain. + sample (`jnp.ndarray`): + current instance of sample being created by diffusion process. + return_dict (`bool`): option for returning tuple rather than FlaxDPMSolverMultistepSchedulerOutput class + + Returns: + [`FlaxDPMSolverMultistepSchedulerOutput`] or `tuple`: [`FlaxDPMSolverMultistepSchedulerOutput`] if + `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is the sample tensor. + + """ + if state.num_inference_steps is None: + raise ValueError( + "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" + ) + + (step_index,) = jnp.where(state.timesteps == timestep, size=1) + step_index = step_index[0] + + prev_timestep = jax.lax.select(step_index == len(state.timesteps) - 1, 0, state.timesteps[step_index + 1]) + + model_output = self.convert_model_output(state, model_output, timestep, sample) + + model_outputs_new = jnp.roll(state.model_outputs, -1, axis=0) + model_outputs_new = model_outputs_new.at[-1].set(model_output) + state = state.replace( + model_outputs=model_outputs_new, + prev_timestep=prev_timestep, + cur_sample=sample, + ) + + def step_1(state: DPMSolverMultistepSchedulerState) -> jnp.ndarray: + return self.dpm_solver_first_order_update( + state, + state.model_outputs[-1], + state.timesteps[step_index], + state.prev_timestep, + state.cur_sample, + ) + + def step_23(state: DPMSolverMultistepSchedulerState) -> jnp.ndarray: + def step_2(state: DPMSolverMultistepSchedulerState) -> jnp.ndarray: + timestep_list = jnp.array([state.timesteps[step_index - 1], state.timesteps[step_index]]) + return self.multistep_dpm_solver_second_order_update( + state, + state.model_outputs, + timestep_list, + state.prev_timestep, + state.cur_sample, + ) + + def step_3(state: DPMSolverMultistepSchedulerState) -> jnp.ndarray: + timestep_list = jnp.array( + [ + state.timesteps[step_index - 2], + state.timesteps[step_index - 1], + state.timesteps[step_index], + ] + ) + return self.multistep_dpm_solver_third_order_update( + state, + state.model_outputs, + timestep_list, + state.prev_timestep, + state.cur_sample, + ) + + step_2_output = step_2(state) + step_3_output = step_3(state) + + if self.config.solver_order == 2: + return step_2_output + elif self.config.lower_order_final and len(state.timesteps) < 15: + return jax.lax.select( + state.lower_order_nums < 2, + step_2_output, + jax.lax.select( + step_index == len(state.timesteps) - 2, + step_2_output, + step_3_output, + ), + ) + else: + return jax.lax.select( + state.lower_order_nums < 2, + step_2_output, + step_3_output, + ) + + step_1_output = step_1(state) + step_23_output = step_23(state) + + if self.config.solver_order == 1: + prev_sample = step_1_output + + elif self.config.lower_order_final and len(state.timesteps) < 15: + prev_sample = jax.lax.select( + state.lower_order_nums < 1, + step_1_output, + jax.lax.select( + step_index == len(state.timesteps) - 1, + step_1_output, + step_23_output, + ), + ) + + else: + prev_sample = jax.lax.select( + state.lower_order_nums < 1, + step_1_output, + step_23_output, + ) + + state = state.replace( + lower_order_nums=jnp.minimum(state.lower_order_nums + 1, self.config.solver_order), + ) + + if not return_dict: + return (prev_sample, state) + + return FlaxDPMSolverMultistepSchedulerOutput(prev_sample=prev_sample, state=state) + + def scale_model_input( + self, state: DPMSolverMultistepSchedulerState, sample: jnp.ndarray, timestep: Optional[int] = None + ) -> jnp.ndarray: + """ + Ensures interchangeability with schedulers that need to scale the denoising model input depending on the + current timestep. + + Args: + state (`DPMSolverMultistepSchedulerState`): + the `FlaxDPMSolverMultistepScheduler` state data class instance. + sample (`jnp.ndarray`): input sample + timestep (`int`, optional): current timestep + + Returns: + `jnp.ndarray`: scaled input sample + """ + return sample + + def add_noise( + self, + state: DPMSolverMultistepSchedulerState, + original_samples: jnp.ndarray, + noise: jnp.ndarray, + timesteps: jnp.ndarray, + ) -> jnp.ndarray: + return add_noise_common(state.common, original_samples, noise, timesteps) + + def __len__(self): + return self.config.num_train_timesteps diff --git a/diffusers3/schedulers/scheduling_dpmsolver_multistep_inverse.py b/diffusers3/schedulers/scheduling_dpmsolver_multistep_inverse.py new file mode 100644 index 0000000000000000000000000000000000000000..6628a92ba034006e39f76053ac1b42c307a067c2 --- /dev/null +++ b/diffusers3/schedulers/scheduling_dpmsolver_multistep_inverse.py @@ -0,0 +1,921 @@ +# Copyright 2024 TSAIL Team and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# DISCLAIMER: This file is strongly influenced by https://github.com/LuChengTHU/dpm-solver + +import math +from typing import List, Optional, Tuple, Union + +import numpy as np +import torch + +from ..configuration_utils import ConfigMixin, register_to_config +from ..utils import deprecate +from ..utils.torch_utils import randn_tensor +from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput + + +# Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar +def betas_for_alpha_bar( + num_diffusion_timesteps, + max_beta=0.999, + alpha_transform_type="cosine", +): + """ + Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of + (1-beta) over time from t = [0,1]. + + Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up + to that part of the diffusion process. + + + Args: + num_diffusion_timesteps (`int`): the number of betas to produce. + max_beta (`float`): the maximum beta to use; use values lower than 1 to + prevent singularities. + alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. + Choose from `cosine` or `exp` + + Returns: + betas (`np.ndarray`): the betas used by the scheduler to step the model outputs + """ + if alpha_transform_type == "cosine": + + def alpha_bar_fn(t): + return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 + + elif alpha_transform_type == "exp": + + def alpha_bar_fn(t): + return math.exp(t * -12.0) + + else: + raise ValueError(f"Unsupported alpha_transform_type: {alpha_transform_type}") + + betas = [] + for i in range(num_diffusion_timesteps): + t1 = i / num_diffusion_timesteps + t2 = (i + 1) / num_diffusion_timesteps + betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) + return torch.tensor(betas, dtype=torch.float32) + + +class DPMSolverMultistepInverseScheduler(SchedulerMixin, ConfigMixin): + """ + `DPMSolverMultistepInverseScheduler` is the reverse scheduler of [`DPMSolverMultistepScheduler`]. + + This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic + methods the library implements for all schedulers such as loading and saving. + + Args: + num_train_timesteps (`int`, defaults to 1000): + The number of diffusion steps to train the model. + beta_start (`float`, defaults to 0.0001): + The starting `beta` value of inference. + beta_end (`float`, defaults to 0.02): + The final `beta` value. + beta_schedule (`str`, defaults to `"linear"`): + The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from + `linear`, `scaled_linear`, or `squaredcos_cap_v2`. + trained_betas (`np.ndarray`, *optional*): + Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`. + solver_order (`int`, defaults to 2): + The DPMSolver order which can be `1` or `2` or `3`. It is recommended to use `solver_order=2` for guided + sampling, and `solver_order=3` for unconditional sampling. + prediction_type (`str`, defaults to `epsilon`, *optional*): + Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process), + `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen + Video](https://imagen.research.google/video/paper.pdf) paper). + thresholding (`bool`, defaults to `False`): + Whether to use the "dynamic thresholding" method. This is unsuitable for latent-space diffusion models such + as Stable Diffusion. + dynamic_thresholding_ratio (`float`, defaults to 0.995): + The ratio for the dynamic thresholding method. Valid only when `thresholding=True`. + sample_max_value (`float`, defaults to 1.0): + The threshold value for dynamic thresholding. Valid only when `thresholding=True` and + `algorithm_type="dpmsolver++"`. + algorithm_type (`str`, defaults to `dpmsolver++`): + Algorithm type for the solver; can be `dpmsolver`, `dpmsolver++`, `sde-dpmsolver` or `sde-dpmsolver++`. The + `dpmsolver` type implements the algorithms in the [DPMSolver](https://huggingface.co/papers/2206.00927) + paper, and the `dpmsolver++` type implements the algorithms in the + [DPMSolver++](https://huggingface.co/papers/2211.01095) paper. It is recommended to use `dpmsolver++` or + `sde-dpmsolver++` with `solver_order=2` for guided sampling like in Stable Diffusion. + solver_type (`str`, defaults to `midpoint`): + Solver type for the second-order solver; can be `midpoint` or `heun`. The solver type slightly affects the + sample quality, especially for a small number of steps. It is recommended to use `midpoint` solvers. + lower_order_final (`bool`, defaults to `True`): + Whether to use lower-order solvers in the final steps. Only valid for < 15 inference steps. This can + stabilize the sampling of DPMSolver for steps < 15, especially for steps <= 10. + euler_at_final (`bool`, defaults to `False`): + Whether to use Euler's method in the final step. It is a trade-off between numerical stability and detail + richness. This can stabilize the sampling of the SDE variant of DPMSolver for small number of inference + steps, but sometimes may result in blurring. + use_karras_sigmas (`bool`, *optional*, defaults to `False`): + Whether to use Karras sigmas for step sizes in the noise schedule during the sampling process. If `True`, + the sigmas are determined according to a sequence of noise levels {ฯƒi}. + lambda_min_clipped (`float`, defaults to `-inf`): + Clipping threshold for the minimum value of `lambda(t)` for numerical stability. This is critical for the + cosine (`squaredcos_cap_v2`) noise schedule. + variance_type (`str`, *optional*): + Set to "learned" or "learned_range" for diffusion models that predict variance. If set, the model's output + contains the predicted Gaussian variance. + timestep_spacing (`str`, defaults to `"linspace"`): + The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information. + steps_offset (`int`, defaults to 0): + An offset added to the inference steps, as required by some model families. + """ + + _compatibles = [e.name for e in KarrasDiffusionSchedulers] + order = 1 + + @register_to_config + def __init__( + self, + num_train_timesteps: int = 1000, + beta_start: float = 0.0001, + beta_end: float = 0.02, + beta_schedule: str = "linear", + trained_betas: Optional[Union[np.ndarray, List[float]]] = None, + solver_order: int = 2, + prediction_type: str = "epsilon", + thresholding: bool = False, + dynamic_thresholding_ratio: float = 0.995, + sample_max_value: float = 1.0, + algorithm_type: str = "dpmsolver++", + solver_type: str = "midpoint", + lower_order_final: bool = True, + euler_at_final: bool = False, + use_karras_sigmas: Optional[bool] = False, + lambda_min_clipped: float = -float("inf"), + variance_type: Optional[str] = None, + timestep_spacing: str = "linspace", + steps_offset: int = 0, + ): + if algorithm_type in ["dpmsolver", "sde-dpmsolver"]: + deprecation_message = f"algorithm_type {algorithm_type} is deprecated and will be removed in a future version. Choose from `dpmsolver++` or `sde-dpmsolver++` instead" + deprecate("algorithm_types dpmsolver and sde-dpmsolver", "1.0.0", deprecation_message) + + if trained_betas is not None: + self.betas = torch.tensor(trained_betas, dtype=torch.float32) + elif beta_schedule == "linear": + self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) + elif beta_schedule == "scaled_linear": + # this schedule is very specific to the latent diffusion model. + self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2 + elif beta_schedule == "squaredcos_cap_v2": + # Glide cosine schedule + self.betas = betas_for_alpha_bar(num_train_timesteps) + else: + raise NotImplementedError(f"{beta_schedule} is not implemented for {self.__class__}") + + self.alphas = 1.0 - self.betas + self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) + # Currently we only support VP-type noise schedule + self.alpha_t = torch.sqrt(self.alphas_cumprod) + self.sigma_t = torch.sqrt(1 - self.alphas_cumprod) + self.lambda_t = torch.log(self.alpha_t) - torch.log(self.sigma_t) + self.sigmas = ((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 + + # standard deviation of the initial noise distribution + self.init_noise_sigma = 1.0 + + # settings for DPM-Solver + if algorithm_type not in ["dpmsolver", "dpmsolver++", "sde-dpmsolver", "sde-dpmsolver++"]: + if algorithm_type == "deis": + self.register_to_config(algorithm_type="dpmsolver++") + else: + raise NotImplementedError(f"{algorithm_type} is not implemented for {self.__class__}") + + if solver_type not in ["midpoint", "heun"]: + if solver_type in ["logrho", "bh1", "bh2"]: + self.register_to_config(solver_type="midpoint") + else: + raise NotImplementedError(f"{solver_type} is not implemented for {self.__class__}") + + # setable values + self.num_inference_steps = None + timesteps = np.linspace(0, num_train_timesteps - 1, num_train_timesteps, dtype=np.float32).copy() + self.timesteps = torch.from_numpy(timesteps) + self.model_outputs = [None] * solver_order + self.lower_order_nums = 0 + self._step_index = None + self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication + self.use_karras_sigmas = use_karras_sigmas + + @property + def step_index(self): + """ + The index counter for current timestep. It will increase 1 after each scheduler step. + """ + return self._step_index + + def set_timesteps(self, num_inference_steps: int = None, device: Union[str, torch.device] = None): + """ + Sets the discrete timesteps used for the diffusion chain (to be run before inference). + + Args: + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + """ + # Clipping the minimum of all lambda(t) for numerical stability. + # This is critical for cosine (squaredcos_cap_v2) noise schedule. + clipped_idx = torch.searchsorted(torch.flip(self.lambda_t, [0]), self.config.lambda_min_clipped).item() + self.noisiest_timestep = self.config.num_train_timesteps - 1 - clipped_idx + + # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 + if self.config.timestep_spacing == "linspace": + timesteps = ( + np.linspace(0, self.noisiest_timestep, num_inference_steps + 1).round()[:-1].copy().astype(np.int64) + ) + elif self.config.timestep_spacing == "leading": + step_ratio = (self.noisiest_timestep + 1) // (num_inference_steps + 1) + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + timesteps = (np.arange(0, num_inference_steps + 1) * step_ratio).round()[:-1].copy().astype(np.int64) + timesteps += self.config.steps_offset + elif self.config.timestep_spacing == "trailing": + step_ratio = self.config.num_train_timesteps / num_inference_steps + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + timesteps = np.arange(self.noisiest_timestep + 1, 0, -step_ratio).round()[::-1].copy().astype(np.int64) + timesteps -= 1 + else: + raise ValueError( + f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', " + "'leading' or 'trailing'." + ) + + sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5) + log_sigmas = np.log(sigmas) + + if self.config.use_karras_sigmas: + sigmas = self._convert_to_karras(in_sigmas=sigmas, num_inference_steps=num_inference_steps) + timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas]).round() + timesteps = timesteps.copy().astype(np.int64) + sigmas = np.concatenate([sigmas, sigmas[-1:]]).astype(np.float32) + else: + sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas) + sigma_max = ( + (1 - self.alphas_cumprod[self.noisiest_timestep]) / self.alphas_cumprod[self.noisiest_timestep] + ) ** 0.5 + sigmas = np.concatenate([sigmas, [sigma_max]]).astype(np.float32) + + self.sigmas = torch.from_numpy(sigmas) + + # when num_inference_steps == num_train_timesteps, we can end up with + # duplicates in timesteps. + _, unique_indices = np.unique(timesteps, return_index=True) + timesteps = timesteps[np.sort(unique_indices)] + + self.timesteps = torch.from_numpy(timesteps).to(device=device, dtype=torch.int64) + + self.num_inference_steps = len(timesteps) + + self.model_outputs = [ + None, + ] * self.config.solver_order + self.lower_order_nums = 0 + + # add an index counter for schedulers that allow duplicated timesteps + self._step_index = None + self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication + + # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample + def _threshold_sample(self, sample: torch.Tensor) -> torch.Tensor: + """ + "Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the + prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by + s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing + pixels from saturation at each step. We find that dynamic thresholding results in significantly better + photorealism as well as better image-text alignment, especially when using very large guidance weights." + + https://arxiv.org/abs/2205.11487 + """ + dtype = sample.dtype + batch_size, channels, *remaining_dims = sample.shape + + if dtype not in (torch.float32, torch.float64): + sample = sample.float() # upcast for quantile calculation, and clamp not implemented for cpu half + + # Flatten sample for doing quantile calculation along each image + sample = sample.reshape(batch_size, channels * np.prod(remaining_dims)) + + abs_sample = sample.abs() # "a certain percentile absolute pixel value" + + s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1) + s = torch.clamp( + s, min=1, max=self.config.sample_max_value + ) # When clamped to min=1, equivalent to standard clipping to [-1, 1] + s = s.unsqueeze(1) # (batch_size, 1) because clamp will broadcast along dim=0 + sample = torch.clamp(sample, -s, s) / s # "we threshold xt0 to the range [-s, s] and then divide by s" + + sample = sample.reshape(batch_size, channels, *remaining_dims) + sample = sample.to(dtype) + + return sample + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._sigma_to_t + def _sigma_to_t(self, sigma, log_sigmas): + # get log sigma + log_sigma = np.log(np.maximum(sigma, 1e-10)) + + # get distribution + dists = log_sigma - log_sigmas[:, np.newaxis] + + # get sigmas range + low_idx = np.cumsum((dists >= 0), axis=0).argmax(axis=0).clip(max=log_sigmas.shape[0] - 2) + high_idx = low_idx + 1 + + low = log_sigmas[low_idx] + high = log_sigmas[high_idx] + + # interpolate sigmas + w = (low - log_sigma) / (low - high) + w = np.clip(w, 0, 1) + + # transform interpolation to time range + t = (1 - w) * low_idx + w * high_idx + t = t.reshape(sigma.shape) + return t + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler._sigma_to_alpha_sigma_t + def _sigma_to_alpha_sigma_t(self, sigma): + alpha_t = 1 / ((sigma**2 + 1) ** 0.5) + sigma_t = sigma * alpha_t + + return alpha_t, sigma_t + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_karras + def _convert_to_karras(self, in_sigmas: torch.Tensor, num_inference_steps) -> torch.Tensor: + """Constructs the noise schedule of Karras et al. (2022).""" + + # Hack to make sure that other schedulers which copy this function don't break + # TODO: Add this logic to the other schedulers + if hasattr(self.config, "sigma_min"): + sigma_min = self.config.sigma_min + else: + sigma_min = None + + if hasattr(self.config, "sigma_max"): + sigma_max = self.config.sigma_max + else: + sigma_max = None + + sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item() + sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item() + + rho = 7.0 # 7.0 is the value used in the paper + ramp = np.linspace(0, 1, num_inference_steps) + min_inv_rho = sigma_min ** (1 / rho) + max_inv_rho = sigma_max ** (1 / rho) + sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho + return sigmas + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.convert_model_output + def convert_model_output( + self, + model_output: torch.Tensor, + *args, + sample: torch.Tensor = None, + **kwargs, + ) -> torch.Tensor: + """ + Convert the model output to the corresponding type the DPMSolver/DPMSolver++ algorithm needs. DPM-Solver is + designed to discretize an integral of the noise prediction model, and DPM-Solver++ is designed to discretize an + integral of the data prediction model. + + + + The algorithm and model type are decoupled. You can use either DPMSolver or DPMSolver++ for both noise + prediction and data prediction models. + + + + Args: + model_output (`torch.Tensor`): + The direct output from the learned diffusion model. + sample (`torch.Tensor`): + A current instance of a sample created by the diffusion process. + + Returns: + `torch.Tensor`: + The converted model output. + """ + timestep = args[0] if len(args) > 0 else kwargs.pop("timestep", None) + if sample is None: + if len(args) > 1: + sample = args[1] + else: + raise ValueError("missing `sample` as a required keyward argument") + if timestep is not None: + deprecate( + "timesteps", + "1.0.0", + "Passing `timesteps` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + + # DPM-Solver++ needs to solve an integral of the data prediction model. + if self.config.algorithm_type in ["dpmsolver++", "sde-dpmsolver++"]: + if self.config.prediction_type == "epsilon": + # DPM-Solver and DPM-Solver++ only need the "mean" output. + if self.config.variance_type in ["learned", "learned_range"]: + model_output = model_output[:, :3] + sigma = self.sigmas[self.step_index] + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma) + x0_pred = (sample - sigma_t * model_output) / alpha_t + elif self.config.prediction_type == "sample": + x0_pred = model_output + elif self.config.prediction_type == "v_prediction": + sigma = self.sigmas[self.step_index] + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma) + x0_pred = alpha_t * sample - sigma_t * model_output + else: + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or" + " `v_prediction` for the DPMSolverMultistepScheduler." + ) + + if self.config.thresholding: + x0_pred = self._threshold_sample(x0_pred) + + return x0_pred + + # DPM-Solver needs to solve an integral of the noise prediction model. + elif self.config.algorithm_type in ["dpmsolver", "sde-dpmsolver"]: + if self.config.prediction_type == "epsilon": + # DPM-Solver and DPM-Solver++ only need the "mean" output. + if self.config.variance_type in ["learned", "learned_range"]: + epsilon = model_output[:, :3] + else: + epsilon = model_output + elif self.config.prediction_type == "sample": + sigma = self.sigmas[self.step_index] + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma) + epsilon = (sample - alpha_t * model_output) / sigma_t + elif self.config.prediction_type == "v_prediction": + sigma = self.sigmas[self.step_index] + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma) + epsilon = alpha_t * model_output + sigma_t * sample + else: + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or" + " `v_prediction` for the DPMSolverMultistepScheduler." + ) + + if self.config.thresholding: + sigma = self.sigmas[self.step_index] + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma) + x0_pred = (sample - sigma_t * epsilon) / alpha_t + x0_pred = self._threshold_sample(x0_pred) + epsilon = (sample - alpha_t * x0_pred) / sigma_t + + return epsilon + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.dpm_solver_first_order_update + def dpm_solver_first_order_update( + self, + model_output: torch.Tensor, + *args, + sample: torch.Tensor = None, + noise: Optional[torch.Tensor] = None, + **kwargs, + ) -> torch.Tensor: + """ + One step for the first-order DPMSolver (equivalent to DDIM). + + Args: + model_output (`torch.Tensor`): + The direct output from the learned diffusion model. + sample (`torch.Tensor`): + A current instance of a sample created by the diffusion process. + + Returns: + `torch.Tensor`: + The sample tensor at the previous timestep. + """ + timestep = args[0] if len(args) > 0 else kwargs.pop("timestep", None) + prev_timestep = args[1] if len(args) > 1 else kwargs.pop("prev_timestep", None) + if sample is None: + if len(args) > 2: + sample = args[2] + else: + raise ValueError(" missing `sample` as a required keyward argument") + if timestep is not None: + deprecate( + "timesteps", + "1.0.0", + "Passing `timesteps` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + + if prev_timestep is not None: + deprecate( + "prev_timestep", + "1.0.0", + "Passing `prev_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + + sigma_t, sigma_s = self.sigmas[self.step_index + 1], self.sigmas[self.step_index] + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma_t) + alpha_s, sigma_s = self._sigma_to_alpha_sigma_t(sigma_s) + lambda_t = torch.log(alpha_t) - torch.log(sigma_t) + lambda_s = torch.log(alpha_s) - torch.log(sigma_s) + + h = lambda_t - lambda_s + if self.config.algorithm_type == "dpmsolver++": + x_t = (sigma_t / sigma_s) * sample - (alpha_t * (torch.exp(-h) - 1.0)) * model_output + elif self.config.algorithm_type == "dpmsolver": + x_t = (alpha_t / alpha_s) * sample - (sigma_t * (torch.exp(h) - 1.0)) * model_output + elif self.config.algorithm_type == "sde-dpmsolver++": + assert noise is not None + x_t = ( + (sigma_t / sigma_s * torch.exp(-h)) * sample + + (alpha_t * (1 - torch.exp(-2.0 * h))) * model_output + + sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) * noise + ) + elif self.config.algorithm_type == "sde-dpmsolver": + assert noise is not None + x_t = ( + (alpha_t / alpha_s) * sample + - 2.0 * (sigma_t * (torch.exp(h) - 1.0)) * model_output + + sigma_t * torch.sqrt(torch.exp(2 * h) - 1.0) * noise + ) + return x_t + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.multistep_dpm_solver_second_order_update + def multistep_dpm_solver_second_order_update( + self, + model_output_list: List[torch.Tensor], + *args, + sample: torch.Tensor = None, + noise: Optional[torch.Tensor] = None, + **kwargs, + ) -> torch.Tensor: + """ + One step for the second-order multistep DPMSolver. + + Args: + model_output_list (`List[torch.Tensor]`): + The direct outputs from learned diffusion model at current and latter timesteps. + sample (`torch.Tensor`): + A current instance of a sample created by the diffusion process. + + Returns: + `torch.Tensor`: + The sample tensor at the previous timestep. + """ + timestep_list = args[0] if len(args) > 0 else kwargs.pop("timestep_list", None) + prev_timestep = args[1] if len(args) > 1 else kwargs.pop("prev_timestep", None) + if sample is None: + if len(args) > 2: + sample = args[2] + else: + raise ValueError(" missing `sample` as a required keyward argument") + if timestep_list is not None: + deprecate( + "timestep_list", + "1.0.0", + "Passing `timestep_list` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + + if prev_timestep is not None: + deprecate( + "prev_timestep", + "1.0.0", + "Passing `prev_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + + sigma_t, sigma_s0, sigma_s1 = ( + self.sigmas[self.step_index + 1], + self.sigmas[self.step_index], + self.sigmas[self.step_index - 1], + ) + + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma_t) + alpha_s0, sigma_s0 = self._sigma_to_alpha_sigma_t(sigma_s0) + alpha_s1, sigma_s1 = self._sigma_to_alpha_sigma_t(sigma_s1) + + lambda_t = torch.log(alpha_t) - torch.log(sigma_t) + lambda_s0 = torch.log(alpha_s0) - torch.log(sigma_s0) + lambda_s1 = torch.log(alpha_s1) - torch.log(sigma_s1) + + m0, m1 = model_output_list[-1], model_output_list[-2] + + h, h_0 = lambda_t - lambda_s0, lambda_s0 - lambda_s1 + r0 = h_0 / h + D0, D1 = m0, (1.0 / r0) * (m0 - m1) + if self.config.algorithm_type == "dpmsolver++": + # See https://arxiv.org/abs/2211.01095 for detailed derivations + if self.config.solver_type == "midpoint": + x_t = ( + (sigma_t / sigma_s0) * sample + - (alpha_t * (torch.exp(-h) - 1.0)) * D0 + - 0.5 * (alpha_t * (torch.exp(-h) - 1.0)) * D1 + ) + elif self.config.solver_type == "heun": + x_t = ( + (sigma_t / sigma_s0) * sample + - (alpha_t * (torch.exp(-h) - 1.0)) * D0 + + (alpha_t * ((torch.exp(-h) - 1.0) / h + 1.0)) * D1 + ) + elif self.config.algorithm_type == "dpmsolver": + # See https://arxiv.org/abs/2206.00927 for detailed derivations + if self.config.solver_type == "midpoint": + x_t = ( + (alpha_t / alpha_s0) * sample + - (sigma_t * (torch.exp(h) - 1.0)) * D0 + - 0.5 * (sigma_t * (torch.exp(h) - 1.0)) * D1 + ) + elif self.config.solver_type == "heun": + x_t = ( + (alpha_t / alpha_s0) * sample + - (sigma_t * (torch.exp(h) - 1.0)) * D0 + - (sigma_t * ((torch.exp(h) - 1.0) / h - 1.0)) * D1 + ) + elif self.config.algorithm_type == "sde-dpmsolver++": + assert noise is not None + if self.config.solver_type == "midpoint": + x_t = ( + (sigma_t / sigma_s0 * torch.exp(-h)) * sample + + (alpha_t * (1 - torch.exp(-2.0 * h))) * D0 + + 0.5 * (alpha_t * (1 - torch.exp(-2.0 * h))) * D1 + + sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) * noise + ) + elif self.config.solver_type == "heun": + x_t = ( + (sigma_t / sigma_s0 * torch.exp(-h)) * sample + + (alpha_t * (1 - torch.exp(-2.0 * h))) * D0 + + (alpha_t * ((1.0 - torch.exp(-2.0 * h)) / (-2.0 * h) + 1.0)) * D1 + + sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) * noise + ) + elif self.config.algorithm_type == "sde-dpmsolver": + assert noise is not None + if self.config.solver_type == "midpoint": + x_t = ( + (alpha_t / alpha_s0) * sample + - 2.0 * (sigma_t * (torch.exp(h) - 1.0)) * D0 + - (sigma_t * (torch.exp(h) - 1.0)) * D1 + + sigma_t * torch.sqrt(torch.exp(2 * h) - 1.0) * noise + ) + elif self.config.solver_type == "heun": + x_t = ( + (alpha_t / alpha_s0) * sample + - 2.0 * (sigma_t * (torch.exp(h) - 1.0)) * D0 + - 2.0 * (sigma_t * ((torch.exp(h) - 1.0) / h - 1.0)) * D1 + + sigma_t * torch.sqrt(torch.exp(2 * h) - 1.0) * noise + ) + return x_t + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.multistep_dpm_solver_third_order_update + def multistep_dpm_solver_third_order_update( + self, + model_output_list: List[torch.Tensor], + *args, + sample: torch.Tensor = None, + **kwargs, + ) -> torch.Tensor: + """ + One step for the third-order multistep DPMSolver. + + Args: + model_output_list (`List[torch.Tensor]`): + The direct outputs from learned diffusion model at current and latter timesteps. + sample (`torch.Tensor`): + A current instance of a sample created by diffusion process. + + Returns: + `torch.Tensor`: + The sample tensor at the previous timestep. + """ + + timestep_list = args[0] if len(args) > 0 else kwargs.pop("timestep_list", None) + prev_timestep = args[1] if len(args) > 1 else kwargs.pop("prev_timestep", None) + if sample is None: + if len(args) > 2: + sample = args[2] + else: + raise ValueError(" missing`sample` as a required keyward argument") + if timestep_list is not None: + deprecate( + "timestep_list", + "1.0.0", + "Passing `timestep_list` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + + if prev_timestep is not None: + deprecate( + "prev_timestep", + "1.0.0", + "Passing `prev_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + + sigma_t, sigma_s0, sigma_s1, sigma_s2 = ( + self.sigmas[self.step_index + 1], + self.sigmas[self.step_index], + self.sigmas[self.step_index - 1], + self.sigmas[self.step_index - 2], + ) + + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma_t) + alpha_s0, sigma_s0 = self._sigma_to_alpha_sigma_t(sigma_s0) + alpha_s1, sigma_s1 = self._sigma_to_alpha_sigma_t(sigma_s1) + alpha_s2, sigma_s2 = self._sigma_to_alpha_sigma_t(sigma_s2) + + lambda_t = torch.log(alpha_t) - torch.log(sigma_t) + lambda_s0 = torch.log(alpha_s0) - torch.log(sigma_s0) + lambda_s1 = torch.log(alpha_s1) - torch.log(sigma_s1) + lambda_s2 = torch.log(alpha_s2) - torch.log(sigma_s2) + + m0, m1, m2 = model_output_list[-1], model_output_list[-2], model_output_list[-3] + + h, h_0, h_1 = lambda_t - lambda_s0, lambda_s0 - lambda_s1, lambda_s1 - lambda_s2 + r0, r1 = h_0 / h, h_1 / h + D0 = m0 + D1_0, D1_1 = (1.0 / r0) * (m0 - m1), (1.0 / r1) * (m1 - m2) + D1 = D1_0 + (r0 / (r0 + r1)) * (D1_0 - D1_1) + D2 = (1.0 / (r0 + r1)) * (D1_0 - D1_1) + if self.config.algorithm_type == "dpmsolver++": + # See https://arxiv.org/abs/2206.00927 for detailed derivations + x_t = ( + (sigma_t / sigma_s0) * sample + - (alpha_t * (torch.exp(-h) - 1.0)) * D0 + + (alpha_t * ((torch.exp(-h) - 1.0) / h + 1.0)) * D1 + - (alpha_t * ((torch.exp(-h) - 1.0 + h) / h**2 - 0.5)) * D2 + ) + elif self.config.algorithm_type == "dpmsolver": + # See https://arxiv.org/abs/2206.00927 for detailed derivations + x_t = ( + (alpha_t / alpha_s0) * sample + - (sigma_t * (torch.exp(h) - 1.0)) * D0 + - (sigma_t * ((torch.exp(h) - 1.0) / h - 1.0)) * D1 + - (sigma_t * ((torch.exp(h) - 1.0 - h) / h**2 - 0.5)) * D2 + ) + return x_t + + def _init_step_index(self, timestep): + if isinstance(timestep, torch.Tensor): + timestep = timestep.to(self.timesteps.device) + + index_candidates = (self.timesteps == timestep).nonzero() + + if len(index_candidates) == 0: + step_index = len(self.timesteps) - 1 + # The sigma index that is taken for the **very** first `step` + # is always the second index (or the last index if there is only 1) + # This way we can ensure we don't accidentally skip a sigma in + # case we start in the middle of the denoising schedule (e.g. for image-to-image) + elif len(index_candidates) > 1: + step_index = index_candidates[1].item() + else: + step_index = index_candidates[0].item() + + self._step_index = step_index + + def step( + self, + model_output: torch.Tensor, + timestep: Union[int, torch.Tensor], + sample: torch.Tensor, + generator=None, + variance_noise: Optional[torch.Tensor] = None, + return_dict: bool = True, + ) -> Union[SchedulerOutput, Tuple]: + """ + Predict the sample from the previous timestep by reversing the SDE. This function propagates the sample with + the multistep DPMSolver. + + Args: + model_output (`torch.Tensor`): + The direct output from learned diffusion model. + timestep (`int`): + The current discrete timestep in the diffusion chain. + sample (`torch.Tensor`): + A current instance of a sample created by the diffusion process. + generator (`torch.Generator`, *optional*): + A random number generator. + variance_noise (`torch.Tensor`): + Alternative to generating noise with `generator` by directly providing the noise for the variance + itself. Useful for methods such as [`CycleDiffusion`]. + return_dict (`bool`): + Whether or not to return a [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`. + + Returns: + [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`: + If return_dict is `True`, [`~schedulers.scheduling_utils.SchedulerOutput`] is returned, otherwise a + tuple is returned where the first element is the sample tensor. + + """ + if self.num_inference_steps is None: + raise ValueError( + "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" + ) + + if self.step_index is None: + self._init_step_index(timestep) + + # Improve numerical stability for small number of steps + lower_order_final = (self.step_index == len(self.timesteps) - 1) and ( + self.config.euler_at_final or (self.config.lower_order_final and len(self.timesteps) < 15) + ) + lower_order_second = ( + (self.step_index == len(self.timesteps) - 2) and self.config.lower_order_final and len(self.timesteps) < 15 + ) + + model_output = self.convert_model_output(model_output, sample=sample) + for i in range(self.config.solver_order - 1): + self.model_outputs[i] = self.model_outputs[i + 1] + self.model_outputs[-1] = model_output + + if self.config.algorithm_type in ["sde-dpmsolver", "sde-dpmsolver++"] and variance_noise is None: + noise = randn_tensor( + model_output.shape, generator=generator, device=model_output.device, dtype=model_output.dtype + ) + elif self.config.algorithm_type in ["sde-dpmsolver", "sde-dpmsolver++"]: + noise = variance_noise + else: + noise = None + + if self.config.solver_order == 1 or self.lower_order_nums < 1 or lower_order_final: + prev_sample = self.dpm_solver_first_order_update(model_output, sample=sample, noise=noise) + elif self.config.solver_order == 2 or self.lower_order_nums < 2 or lower_order_second: + prev_sample = self.multistep_dpm_solver_second_order_update(self.model_outputs, sample=sample, noise=noise) + else: + prev_sample = self.multistep_dpm_solver_third_order_update(self.model_outputs, sample=sample) + + if self.lower_order_nums < self.config.solver_order: + self.lower_order_nums += 1 + + # upon completion increase step index by one + self._step_index += 1 + + if not return_dict: + return (prev_sample,) + + return SchedulerOutput(prev_sample=prev_sample) + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.scale_model_input + def scale_model_input(self, sample: torch.Tensor, *args, **kwargs) -> torch.Tensor: + """ + Ensures interchangeability with schedulers that need to scale the denoising model input depending on the + current timestep. + + Args: + sample (`torch.Tensor`): + The input sample. + + Returns: + `torch.Tensor`: + A scaled input sample. + """ + return sample + + def add_noise( + self, + original_samples: torch.Tensor, + noise: torch.Tensor, + timesteps: torch.IntTensor, + ) -> torch.Tensor: + # Make sure sigmas and timesteps have the same device and dtype as original_samples + sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype) + if original_samples.device.type == "mps" and torch.is_floating_point(timesteps): + # mps does not support float64 + schedule_timesteps = self.timesteps.to(original_samples.device, dtype=torch.float32) + timesteps = timesteps.to(original_samples.device, dtype=torch.float32) + else: + schedule_timesteps = self.timesteps.to(original_samples.device) + timesteps = timesteps.to(original_samples.device) + + step_indices = [] + for timestep in timesteps: + index_candidates = (schedule_timesteps == timestep).nonzero() + if len(index_candidates) == 0: + step_index = len(schedule_timesteps) - 1 + elif len(index_candidates) > 1: + step_index = index_candidates[1].item() + else: + step_index = index_candidates[0].item() + step_indices.append(step_index) + + sigma = sigmas[step_indices].flatten() + while len(sigma.shape) < len(original_samples.shape): + sigma = sigma.unsqueeze(-1) + + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma) + noisy_samples = alpha_t * original_samples + sigma_t * noise + return noisy_samples + + def __len__(self): + return self.config.num_train_timesteps diff --git a/diffusers3/schedulers/scheduling_dpmsolver_sde.py b/diffusers3/schedulers/scheduling_dpmsolver_sde.py new file mode 100644 index 0000000000000000000000000000000000000000..7f2dd081577b631e2fb5940d2fed4697b17ebcbc --- /dev/null +++ b/diffusers3/schedulers/scheduling_dpmsolver_sde.py @@ -0,0 +1,574 @@ +# Copyright 2024 Katherine Crowson, The HuggingFace Team and hlky. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +from typing import List, Optional, Tuple, Union + +import numpy as np +import torch +import torchsde + +from ..configuration_utils import ConfigMixin, register_to_config +from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput + + +class BatchedBrownianTree: + """A wrapper around torchsde.BrownianTree that enables batches of entropy.""" + + def __init__(self, x, t0, t1, seed=None, **kwargs): + t0, t1, self.sign = self.sort(t0, t1) + w0 = kwargs.get("w0", torch.zeros_like(x)) + if seed is None: + seed = torch.randint(0, 2**63 - 1, []).item() + self.batched = True + try: + assert len(seed) == x.shape[0] + w0 = w0[0] + except TypeError: + seed = [seed] + self.batched = False + self.trees = [ + torchsde.BrownianInterval( + t0=t0, + t1=t1, + size=w0.shape, + dtype=w0.dtype, + device=w0.device, + entropy=s, + tol=1e-6, + pool_size=24, + halfway_tree=True, + ) + for s in seed + ] + + @staticmethod + def sort(a, b): + return (a, b, 1) if a < b else (b, a, -1) + + def __call__(self, t0, t1): + t0, t1, sign = self.sort(t0, t1) + w = torch.stack([tree(t0, t1) for tree in self.trees]) * (self.sign * sign) + return w if self.batched else w[0] + + +class BrownianTreeNoiseSampler: + """A noise sampler backed by a torchsde.BrownianTree. + + Args: + x (Tensor): The tensor whose shape, device and dtype to use to generate + random samples. + sigma_min (float): The low end of the valid interval. + sigma_max (float): The high end of the valid interval. + seed (int or List[int]): The random seed. If a list of seeds is + supplied instead of a single integer, then the noise sampler will use one BrownianTree per batch item, each + with its own seed. + transform (callable): A function that maps sigma to the sampler's + internal timestep. + """ + + def __init__(self, x, sigma_min, sigma_max, seed=None, transform=lambda x: x): + self.transform = transform + t0, t1 = self.transform(torch.as_tensor(sigma_min)), self.transform(torch.as_tensor(sigma_max)) + self.tree = BatchedBrownianTree(x, t0, t1, seed) + + def __call__(self, sigma, sigma_next): + t0, t1 = self.transform(torch.as_tensor(sigma)), self.transform(torch.as_tensor(sigma_next)) + return self.tree(t0, t1) / (t1 - t0).abs().sqrt() + + +# Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar +def betas_for_alpha_bar( + num_diffusion_timesteps, + max_beta=0.999, + alpha_transform_type="cosine", +): + """ + Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of + (1-beta) over time from t = [0,1]. + + Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up + to that part of the diffusion process. + + + Args: + num_diffusion_timesteps (`int`): the number of betas to produce. + max_beta (`float`): the maximum beta to use; use values lower than 1 to + prevent singularities. + alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. + Choose from `cosine` or `exp` + + Returns: + betas (`np.ndarray`): the betas used by the scheduler to step the model outputs + """ + if alpha_transform_type == "cosine": + + def alpha_bar_fn(t): + return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 + + elif alpha_transform_type == "exp": + + def alpha_bar_fn(t): + return math.exp(t * -12.0) + + else: + raise ValueError(f"Unsupported alpha_transform_type: {alpha_transform_type}") + + betas = [] + for i in range(num_diffusion_timesteps): + t1 = i / num_diffusion_timesteps + t2 = (i + 1) / num_diffusion_timesteps + betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) + return torch.tensor(betas, dtype=torch.float32) + + +class DPMSolverSDEScheduler(SchedulerMixin, ConfigMixin): + """ + DPMSolverSDEScheduler implements the stochastic sampler from the [Elucidating the Design Space of Diffusion-Based + Generative Models](https://huggingface.co/papers/2206.00364) paper. + + This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic + methods the library implements for all schedulers such as loading and saving. + + Args: + num_train_timesteps (`int`, defaults to 1000): + The number of diffusion steps to train the model. + beta_start (`float`, defaults to 0.00085): + The starting `beta` value of inference. + beta_end (`float`, defaults to 0.012): + The final `beta` value. + beta_schedule (`str`, defaults to `"linear"`): + The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from + `linear` or `scaled_linear`. + trained_betas (`np.ndarray`, *optional*): + Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`. + prediction_type (`str`, defaults to `epsilon`, *optional*): + Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process), + `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen + Video](https://imagen.research.google/video/paper.pdf) paper). + use_karras_sigmas (`bool`, *optional*, defaults to `False`): + Whether to use Karras sigmas for step sizes in the noise schedule during the sampling process. If `True`, + the sigmas are determined according to a sequence of noise levels {ฯƒi}. + noise_sampler_seed (`int`, *optional*, defaults to `None`): + The random seed to use for the noise sampler. If `None`, a random seed is generated. + timestep_spacing (`str`, defaults to `"linspace"`): + The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information. + steps_offset (`int`, defaults to 0): + An offset added to the inference steps, as required by some model families. + """ + + _compatibles = [e.name for e in KarrasDiffusionSchedulers] + order = 2 + + @register_to_config + def __init__( + self, + num_train_timesteps: int = 1000, + beta_start: float = 0.00085, # sensible defaults + beta_end: float = 0.012, + beta_schedule: str = "linear", + trained_betas: Optional[Union[np.ndarray, List[float]]] = None, + prediction_type: str = "epsilon", + use_karras_sigmas: Optional[bool] = False, + noise_sampler_seed: Optional[int] = None, + timestep_spacing: str = "linspace", + steps_offset: int = 0, + ): + if trained_betas is not None: + self.betas = torch.tensor(trained_betas, dtype=torch.float32) + elif beta_schedule == "linear": + self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) + elif beta_schedule == "scaled_linear": + # this schedule is very specific to the latent diffusion model. + self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2 + elif beta_schedule == "squaredcos_cap_v2": + # Glide cosine schedule + self.betas = betas_for_alpha_bar(num_train_timesteps) + else: + raise NotImplementedError(f"{beta_schedule} is not implemented for {self.__class__}") + + self.alphas = 1.0 - self.betas + self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) + + # set all values + self.set_timesteps(num_train_timesteps, None, num_train_timesteps) + self.use_karras_sigmas = use_karras_sigmas + self.noise_sampler = None + self.noise_sampler_seed = noise_sampler_seed + self._step_index = None + self._begin_index = None + self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler.index_for_timestep + def index_for_timestep(self, timestep, schedule_timesteps=None): + if schedule_timesteps is None: + schedule_timesteps = self.timesteps + + indices = (schedule_timesteps == timestep).nonzero() + + # The sigma index that is taken for the **very** first `step` + # is always the second index (or the last index if there is only 1) + # This way we can ensure we don't accidentally skip a sigma in + # case we start in the middle of the denoising schedule (e.g. for image-to-image) + pos = 1 if len(indices) > 1 else 0 + + return indices[pos].item() + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._init_step_index + def _init_step_index(self, timestep): + if self.begin_index is None: + if isinstance(timestep, torch.Tensor): + timestep = timestep.to(self.timesteps.device) + self._step_index = self.index_for_timestep(timestep) + else: + self._step_index = self._begin_index + + @property + def init_noise_sigma(self): + # standard deviation of the initial noise distribution + if self.config.timestep_spacing in ["linspace", "trailing"]: + return self.sigmas.max() + + return (self.sigmas.max() ** 2 + 1) ** 0.5 + + @property + def step_index(self): + """ + The index counter for current timestep. It will increase 1 after each scheduler step. + """ + return self._step_index + + @property + def begin_index(self): + """ + The index for the first timestep. It should be set from pipeline with `set_begin_index` method. + """ + return self._begin_index + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.set_begin_index + def set_begin_index(self, begin_index: int = 0): + """ + Sets the begin index for the scheduler. This function should be run from pipeline before the inference. + + Args: + begin_index (`int`): + The begin index for the scheduler. + """ + self._begin_index = begin_index + + def scale_model_input( + self, + sample: torch.Tensor, + timestep: Union[float, torch.Tensor], + ) -> torch.Tensor: + """ + Ensures interchangeability with schedulers that need to scale the denoising model input depending on the + current timestep. + + Args: + sample (`torch.Tensor`): + The input sample. + timestep (`int`, *optional*): + The current timestep in the diffusion chain. + + Returns: + `torch.Tensor`: + A scaled input sample. + """ + if self.step_index is None: + self._init_step_index(timestep) + + sigma = self.sigmas[self.step_index] + sigma_input = sigma if self.state_in_first_order else self.mid_point_sigma + sample = sample / ((sigma_input**2 + 1) ** 0.5) + return sample + + def set_timesteps( + self, + num_inference_steps: int, + device: Union[str, torch.device] = None, + num_train_timesteps: Optional[int] = None, + ): + """ + Sets the discrete timesteps used for the diffusion chain (to be run before inference). + + Args: + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + """ + self.num_inference_steps = num_inference_steps + + num_train_timesteps = num_train_timesteps or self.config.num_train_timesteps + + # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 + if self.config.timestep_spacing == "linspace": + timesteps = np.linspace(0, num_train_timesteps - 1, num_inference_steps, dtype=float)[::-1].copy() + elif self.config.timestep_spacing == "leading": + step_ratio = num_train_timesteps // self.num_inference_steps + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + timesteps = (np.arange(0, num_inference_steps) * step_ratio).round()[::-1].copy().astype(float) + timesteps += self.config.steps_offset + elif self.config.timestep_spacing == "trailing": + step_ratio = num_train_timesteps / self.num_inference_steps + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + timesteps = (np.arange(num_train_timesteps, 0, -step_ratio)).round().copy().astype(float) + timesteps -= 1 + else: + raise ValueError( + f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." + ) + + sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5) + log_sigmas = np.log(sigmas) + sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas) + + if self.config.use_karras_sigmas: + sigmas = self._convert_to_karras(in_sigmas=sigmas) + timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas]) + + second_order_timesteps = self._second_order_timesteps(sigmas, log_sigmas) + + sigmas = np.concatenate([sigmas, [0.0]]).astype(np.float32) + sigmas = torch.from_numpy(sigmas).to(device=device) + self.sigmas = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2), sigmas[-1:]]) + + timesteps = torch.from_numpy(timesteps) + second_order_timesteps = torch.from_numpy(second_order_timesteps) + timesteps = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2)]) + timesteps[1::2] = second_order_timesteps + + if str(device).startswith("mps"): + # mps does not support float64 + self.timesteps = timesteps.to(device, dtype=torch.float32) + else: + self.timesteps = timesteps.to(device=device) + + # empty first order variables + self.sample = None + self.mid_point_sigma = None + + self._step_index = None + self._begin_index = None + self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication + self.noise_sampler = None + + def _second_order_timesteps(self, sigmas, log_sigmas): + def sigma_fn(_t): + return np.exp(-_t) + + def t_fn(_sigma): + return -np.log(_sigma) + + midpoint_ratio = 0.5 + t = t_fn(sigmas) + delta_time = np.diff(t) + t_proposed = t[:-1] + delta_time * midpoint_ratio + sig_proposed = sigma_fn(t_proposed) + timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sig_proposed]) + return timesteps + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._sigma_to_t + def _sigma_to_t(self, sigma, log_sigmas): + # get log sigma + log_sigma = np.log(np.maximum(sigma, 1e-10)) + + # get distribution + dists = log_sigma - log_sigmas[:, np.newaxis] + + # get sigmas range + low_idx = np.cumsum((dists >= 0), axis=0).argmax(axis=0).clip(max=log_sigmas.shape[0] - 2) + high_idx = low_idx + 1 + + low = log_sigmas[low_idx] + high = log_sigmas[high_idx] + + # interpolate sigmas + w = (low - log_sigma) / (low - high) + w = np.clip(w, 0, 1) + + # transform interpolation to time range + t = (1 - w) * low_idx + w * high_idx + t = t.reshape(sigma.shape) + return t + + # copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_karras + def _convert_to_karras(self, in_sigmas: torch.Tensor) -> torch.Tensor: + """Constructs the noise schedule of Karras et al. (2022).""" + + sigma_min: float = in_sigmas[-1].item() + sigma_max: float = in_sigmas[0].item() + + rho = 7.0 # 7.0 is the value used in the paper + ramp = np.linspace(0, 1, self.num_inference_steps) + min_inv_rho = sigma_min ** (1 / rho) + max_inv_rho = sigma_max ** (1 / rho) + sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho + return sigmas + + @property + def state_in_first_order(self): + return self.sample is None + + def step( + self, + model_output: Union[torch.Tensor, np.ndarray], + timestep: Union[float, torch.Tensor], + sample: Union[torch.Tensor, np.ndarray], + return_dict: bool = True, + s_noise: float = 1.0, + ) -> Union[SchedulerOutput, Tuple]: + """ + Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion + process from the learned model outputs (most often the predicted noise). + + Args: + model_output (`torch.Tensor` or `np.ndarray`): + The direct output from learned diffusion model. + timestep (`float` or `torch.Tensor`): + The current discrete timestep in the diffusion chain. + sample (`torch.Tensor` or `np.ndarray`): + A current instance of a sample created by the diffusion process. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~schedulers.scheduling_utils.SchedulerOutput`] or tuple. + s_noise (`float`, *optional*, defaults to 1.0): + Scaling factor for noise added to the sample. + + Returns: + [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`: + If return_dict is `True`, [`~schedulers.scheduling_utils.SchedulerOutput`] is returned, otherwise a + tuple is returned where the first element is the sample tensor. + """ + if self.step_index is None: + self._init_step_index(timestep) + + # Create a noise sampler if it hasn't been created yet + if self.noise_sampler is None: + min_sigma, max_sigma = self.sigmas[self.sigmas > 0].min(), self.sigmas.max() + self.noise_sampler = BrownianTreeNoiseSampler(sample, min_sigma, max_sigma, self.noise_sampler_seed) + + # Define functions to compute sigma and t from each other + def sigma_fn(_t: torch.Tensor) -> torch.Tensor: + return _t.neg().exp() + + def t_fn(_sigma: torch.Tensor) -> torch.Tensor: + return _sigma.log().neg() + + if self.state_in_first_order: + sigma = self.sigmas[self.step_index] + sigma_next = self.sigmas[self.step_index + 1] + else: + # 2nd order + sigma = self.sigmas[self.step_index - 1] + sigma_next = self.sigmas[self.step_index] + + # Set the midpoint and step size for the current step + midpoint_ratio = 0.5 + t, t_next = t_fn(sigma), t_fn(sigma_next) + delta_time = t_next - t + t_proposed = t + delta_time * midpoint_ratio + + # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise + if self.config.prediction_type == "epsilon": + sigma_input = sigma if self.state_in_first_order else sigma_fn(t_proposed) + pred_original_sample = sample - sigma_input * model_output + elif self.config.prediction_type == "v_prediction": + sigma_input = sigma if self.state_in_first_order else sigma_fn(t_proposed) + pred_original_sample = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( + sample / (sigma_input**2 + 1) + ) + elif self.config.prediction_type == "sample": + raise NotImplementedError("prediction_type not implemented yet: sample") + else: + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`" + ) + + if sigma_next == 0: + derivative = (sample - pred_original_sample) / sigma + dt = sigma_next - sigma + prev_sample = sample + derivative * dt + else: + if self.state_in_first_order: + t_next = t_proposed + else: + sample = self.sample + + sigma_from = sigma_fn(t) + sigma_to = sigma_fn(t_next) + sigma_up = min(sigma_to, (sigma_to**2 * (sigma_from**2 - sigma_to**2) / sigma_from**2) ** 0.5) + sigma_down = (sigma_to**2 - sigma_up**2) ** 0.5 + ancestral_t = t_fn(sigma_down) + prev_sample = (sigma_fn(ancestral_t) / sigma_fn(t)) * sample - ( + t - ancestral_t + ).expm1() * pred_original_sample + prev_sample = prev_sample + self.noise_sampler(sigma_fn(t), sigma_fn(t_next)) * s_noise * sigma_up + + if self.state_in_first_order: + # store for 2nd order step + self.sample = sample + self.mid_point_sigma = sigma_fn(t_next) + else: + # free for "first order mode" + self.sample = None + self.mid_point_sigma = None + + # upon completion increase step index by one + self._step_index += 1 + + if not return_dict: + return (prev_sample,) + + return SchedulerOutput(prev_sample=prev_sample) + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler.add_noise + def add_noise( + self, + original_samples: torch.Tensor, + noise: torch.Tensor, + timesteps: torch.Tensor, + ) -> torch.Tensor: + # Make sure sigmas and timesteps have the same device and dtype as original_samples + sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype) + if original_samples.device.type == "mps" and torch.is_floating_point(timesteps): + # mps does not support float64 + schedule_timesteps = self.timesteps.to(original_samples.device, dtype=torch.float32) + timesteps = timesteps.to(original_samples.device, dtype=torch.float32) + else: + schedule_timesteps = self.timesteps.to(original_samples.device) + timesteps = timesteps.to(original_samples.device) + + # self.begin_index is None when scheduler is used for training, or pipeline does not implement set_begin_index + if self.begin_index is None: + step_indices = [self.index_for_timestep(t, schedule_timesteps) for t in timesteps] + elif self.step_index is not None: + # add_noise is called after first denoising step (for inpainting) + step_indices = [self.step_index] * timesteps.shape[0] + else: + # add noise is called before first denoising step to create initial latent(img2img) + step_indices = [self.begin_index] * timesteps.shape[0] + + sigma = sigmas[step_indices].flatten() + while len(sigma.shape) < len(original_samples.shape): + sigma = sigma.unsqueeze(-1) + + noisy_samples = original_samples + noise * sigma + return noisy_samples + + def __len__(self): + return self.config.num_train_timesteps diff --git a/diffusers3/schedulers/scheduling_dpmsolver_singlestep.py b/diffusers3/schedulers/scheduling_dpmsolver_singlestep.py new file mode 100644 index 0000000000000000000000000000000000000000..1a10fff043fb40035a54c4ca98987d94001e5636 --- /dev/null +++ b/diffusers3/schedulers/scheduling_dpmsolver_singlestep.py @@ -0,0 +1,1049 @@ +# Copyright 2024 TSAIL Team and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# DISCLAIMER: This file is strongly influenced by https://github.com/LuChengTHU/dpm-solver + +import math +from typing import List, Optional, Tuple, Union + +import numpy as np +import torch + +from ..configuration_utils import ConfigMixin, register_to_config +from ..utils import deprecate, logging +from ..utils.torch_utils import randn_tensor +from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +# Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar +def betas_for_alpha_bar( + num_diffusion_timesteps, + max_beta=0.999, + alpha_transform_type="cosine", +): + """ + Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of + (1-beta) over time from t = [0,1]. + + Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up + to that part of the diffusion process. + + + Args: + num_diffusion_timesteps (`int`): the number of betas to produce. + max_beta (`float`): the maximum beta to use; use values lower than 1 to + prevent singularities. + alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. + Choose from `cosine` or `exp` + + Returns: + betas (`np.ndarray`): the betas used by the scheduler to step the model outputs + """ + if alpha_transform_type == "cosine": + + def alpha_bar_fn(t): + return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 + + elif alpha_transform_type == "exp": + + def alpha_bar_fn(t): + return math.exp(t * -12.0) + + else: + raise ValueError(f"Unsupported alpha_transform_type: {alpha_transform_type}") + + betas = [] + for i in range(num_diffusion_timesteps): + t1 = i / num_diffusion_timesteps + t2 = (i + 1) / num_diffusion_timesteps + betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) + return torch.tensor(betas, dtype=torch.float32) + + +class DPMSolverSinglestepScheduler(SchedulerMixin, ConfigMixin): + """ + `DPMSolverSinglestepScheduler` is a fast dedicated high-order solver for diffusion ODEs. + + This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic + methods the library implements for all schedulers such as loading and saving. + + Args: + num_train_timesteps (`int`, defaults to 1000): + The number of diffusion steps to train the model. + beta_start (`float`, defaults to 0.0001): + The starting `beta` value of inference. + beta_end (`float`, defaults to 0.02): + The final `beta` value. + beta_schedule (`str`, defaults to `"linear"`): + The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from + `linear`, `scaled_linear`, or `squaredcos_cap_v2`. + trained_betas (`np.ndarray`, *optional*): + Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`. + solver_order (`int`, defaults to 2): + The DPMSolver order which can be `1` or `2` or `3`. It is recommended to use `solver_order=2` for guided + sampling, and `solver_order=3` for unconditional sampling. + prediction_type (`str`, defaults to `epsilon`, *optional*): + Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process), + `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen + Video](https://imagen.research.google/video/paper.pdf) paper). + thresholding (`bool`, defaults to `False`): + Whether to use the "dynamic thresholding" method. This is unsuitable for latent-space diffusion models such + as Stable Diffusion. + dynamic_thresholding_ratio (`float`, defaults to 0.995): + The ratio for the dynamic thresholding method. Valid only when `thresholding=True`. + sample_max_value (`float`, defaults to 1.0): + The threshold value for dynamic thresholding. Valid only when `thresholding=True` and + `algorithm_type="dpmsolver++"`. + algorithm_type (`str`, defaults to `dpmsolver++`): + Algorithm type for the solver; can be `dpmsolver` or `dpmsolver++` or `sde-dpmsolver++`. The `dpmsolver` + type implements the algorithms in the [DPMSolver](https://huggingface.co/papers/2206.00927) paper, and the + `dpmsolver++` type implements the algorithms in the [DPMSolver++](https://huggingface.co/papers/2211.01095) + paper. It is recommended to use `dpmsolver++` or `sde-dpmsolver++` with `solver_order=2` for guided + sampling like in Stable Diffusion. + solver_type (`str`, defaults to `midpoint`): + Solver type for the second-order solver; can be `midpoint` or `heun`. The solver type slightly affects the + sample quality, especially for a small number of steps. It is recommended to use `midpoint` solvers. + lower_order_final (`bool`, defaults to `True`): + Whether to use lower-order solvers in the final steps. Only valid for < 15 inference steps. This can + stabilize the sampling of DPMSolver for steps < 15, especially for steps <= 10. + use_karras_sigmas (`bool`, *optional*, defaults to `False`): + Whether to use Karras sigmas for step sizes in the noise schedule during the sampling process. If `True`, + the sigmas are determined according to a sequence of noise levels {ฯƒi}. + final_sigmas_type (`str`, *optional*, defaults to `"zero"`): + The final `sigma` value for the noise schedule during the sampling process. If `"sigma_min"`, the final + sigma is the same as the last sigma in the training schedule. If `zero`, the final sigma is set to 0. + lambda_min_clipped (`float`, defaults to `-inf`): + Clipping threshold for the minimum value of `lambda(t)` for numerical stability. This is critical for the + cosine (`squaredcos_cap_v2`) noise schedule. + variance_type (`str`, *optional*): + Set to "learned" or "learned_range" for diffusion models that predict variance. If set, the model's output + contains the predicted Gaussian variance. + """ + + _compatibles = [e.name for e in KarrasDiffusionSchedulers] + order = 1 + + @register_to_config + def __init__( + self, + num_train_timesteps: int = 1000, + beta_start: float = 0.0001, + beta_end: float = 0.02, + beta_schedule: str = "linear", + trained_betas: Optional[np.ndarray] = None, + solver_order: int = 2, + prediction_type: str = "epsilon", + thresholding: bool = False, + dynamic_thresholding_ratio: float = 0.995, + sample_max_value: float = 1.0, + algorithm_type: str = "dpmsolver++", + solver_type: str = "midpoint", + lower_order_final: bool = False, + use_karras_sigmas: Optional[bool] = False, + final_sigmas_type: Optional[str] = "zero", # "zero", "sigma_min" + lambda_min_clipped: float = -float("inf"), + variance_type: Optional[str] = None, + ): + if algorithm_type == "dpmsolver": + deprecation_message = "algorithm_type `dpmsolver` is deprecated and will be removed in a future version. Choose from `dpmsolver++` or `sde-dpmsolver++` instead" + deprecate("algorithm_types=dpmsolver", "1.0.0", deprecation_message) + + if trained_betas is not None: + self.betas = torch.tensor(trained_betas, dtype=torch.float32) + elif beta_schedule == "linear": + self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) + elif beta_schedule == "scaled_linear": + # this schedule is very specific to the latent diffusion model. + self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2 + elif beta_schedule == "squaredcos_cap_v2": + # Glide cosine schedule + self.betas = betas_for_alpha_bar(num_train_timesteps) + else: + raise NotImplementedError(f"{beta_schedule} is not implemented for {self.__class__}") + + self.alphas = 1.0 - self.betas + self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) + # Currently we only support VP-type noise schedule + self.alpha_t = torch.sqrt(self.alphas_cumprod) + self.sigma_t = torch.sqrt(1 - self.alphas_cumprod) + self.lambda_t = torch.log(self.alpha_t) - torch.log(self.sigma_t) + self.sigmas = ((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 + + # standard deviation of the initial noise distribution + self.init_noise_sigma = 1.0 + + # settings for DPM-Solver + if algorithm_type not in ["dpmsolver", "dpmsolver++", "sde-dpmsolver++"]: + if algorithm_type == "deis": + self.register_to_config(algorithm_type="dpmsolver++") + else: + raise NotImplementedError(f"{algorithm_type} is not implemented for {self.__class__}") + if solver_type not in ["midpoint", "heun"]: + if solver_type in ["logrho", "bh1", "bh2"]: + self.register_to_config(solver_type="midpoint") + else: + raise NotImplementedError(f"{solver_type} is not implemented for {self.__class__}") + + if algorithm_type not in ["dpmsolver++", "sde-dpmsolver++"] and final_sigmas_type == "zero": + raise ValueError( + f"`final_sigmas_type` {final_sigmas_type} is not supported for `algorithm_type` {algorithm_type}. Please chooose `sigma_min` instead." + ) + + # setable values + self.num_inference_steps = None + timesteps = np.linspace(0, num_train_timesteps - 1, num_train_timesteps, dtype=np.float32)[::-1].copy() + self.timesteps = torch.from_numpy(timesteps) + self.model_outputs = [None] * solver_order + self.sample = None + self.order_list = self.get_order_list(num_train_timesteps) + self._step_index = None + self._begin_index = None + self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication + + def get_order_list(self, num_inference_steps: int) -> List[int]: + """ + Computes the solver order at each time step. + + Args: + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. + """ + steps = num_inference_steps + order = self.config.solver_order + if order > 3: + raise ValueError("Order > 3 is not supported by this scheduler") + if self.config.lower_order_final: + if order == 3: + if steps % 3 == 0: + orders = [1, 2, 3] * (steps // 3 - 1) + [1, 2] + [1] + elif steps % 3 == 1: + orders = [1, 2, 3] * (steps // 3) + [1] + else: + orders = [1, 2, 3] * (steps // 3) + [1, 2] + elif order == 2: + if steps % 2 == 0: + orders = [1, 2] * (steps // 2 - 1) + [1, 1] + else: + orders = [1, 2] * (steps // 2) + [1] + elif order == 1: + orders = [1] * steps + else: + if order == 3: + orders = [1, 2, 3] * (steps // 3) + elif order == 2: + orders = [1, 2] * (steps // 2) + elif order == 1: + orders = [1] * steps + return orders + + @property + def step_index(self): + """ + The index counter for current timestep. It will increase 1 after each scheduler step. + """ + return self._step_index + + @property + def begin_index(self): + """ + The index for the first timestep. It should be set from pipeline with `set_begin_index` method. + """ + return self._begin_index + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.set_begin_index + def set_begin_index(self, begin_index: int = 0): + """ + Sets the begin index for the scheduler. This function should be run from pipeline before the inference. + + Args: + begin_index (`int`): + The begin index for the scheduler. + """ + self._begin_index = begin_index + + def set_timesteps( + self, + num_inference_steps: int = None, + device: Union[str, torch.device] = None, + timesteps: Optional[List[int]] = None, + ): + """ + Sets the discrete timesteps used for the diffusion chain (to be run before inference). + + Args: + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default + timestep spacing strategy of equal spacing between timesteps schedule is used. If `timesteps` is + passed, `num_inference_steps` must be `None`. + """ + if num_inference_steps is None and timesteps is None: + raise ValueError("Must pass exactly one of `num_inference_steps` or `timesteps`.") + if num_inference_steps is not None and timesteps is not None: + raise ValueError("Must pass exactly one of `num_inference_steps` or `timesteps`.") + if timesteps is not None and self.config.use_karras_sigmas: + raise ValueError("Cannot use `timesteps` when `config.use_karras_sigmas=True`.") + + num_inference_steps = num_inference_steps or len(timesteps) + self.num_inference_steps = num_inference_steps + + if timesteps is not None: + timesteps = np.array(timesteps).astype(np.int64) + else: + # Clipping the minimum of all lambda(t) for numerical stability. + # This is critical for cosine (squaredcos_cap_v2) noise schedule. + clipped_idx = torch.searchsorted(torch.flip(self.lambda_t, [0]), self.config.lambda_min_clipped) + timesteps = ( + np.linspace(0, self.config.num_train_timesteps - 1 - clipped_idx, num_inference_steps + 1) + .round()[::-1][:-1] + .copy() + .astype(np.int64) + ) + + sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5) + if self.config.use_karras_sigmas: + log_sigmas = np.log(sigmas) + sigmas = np.flip(sigmas).copy() + sigmas = self._convert_to_karras(in_sigmas=sigmas, num_inference_steps=num_inference_steps) + timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas]).round() + else: + sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas) + + if self.config.final_sigmas_type == "sigma_min": + sigma_last = ((1 - self.alphas_cumprod[0]) / self.alphas_cumprod[0]) ** 0.5 + elif self.config.final_sigmas_type == "zero": + sigma_last = 0 + else: + raise ValueError( + f" `final_sigmas_type` must be one of `sigma_min` or `zero`, but got {self.config.final_sigmas_type}" + ) + sigmas = np.concatenate([sigmas, [sigma_last]]).astype(np.float32) + + self.sigmas = torch.from_numpy(sigmas).to(device=device) + + self.timesteps = torch.from_numpy(timesteps).to(device=device, dtype=torch.int64) + self.model_outputs = [None] * self.config.solver_order + self.sample = None + + if not self.config.lower_order_final and num_inference_steps % self.config.solver_order != 0: + logger.warning( + "Changing scheduler {self.config} to have `lower_order_final` set to True to handle uneven amount of inference steps. Please make sure to always use an even number of `num_inference steps when using `lower_order_final=False`." + ) + self.register_to_config(lower_order_final=True) + + if not self.config.lower_order_final and self.config.final_sigmas_type == "zero": + logger.warning( + " `last_sigmas_type='zero'` is not supported for `lower_order_final=False`. Changing scheduler {self.config} to have `lower_order_final` set to True." + ) + self.register_to_config(lower_order_final=True) + + self.order_list = self.get_order_list(num_inference_steps) + + # add an index counter for schedulers that allow duplicated timesteps + self._step_index = None + self._begin_index = None + self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication + + # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample + def _threshold_sample(self, sample: torch.Tensor) -> torch.Tensor: + """ + "Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the + prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by + s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing + pixels from saturation at each step. We find that dynamic thresholding results in significantly better + photorealism as well as better image-text alignment, especially when using very large guidance weights." + + https://arxiv.org/abs/2205.11487 + """ + dtype = sample.dtype + batch_size, channels, *remaining_dims = sample.shape + + if dtype not in (torch.float32, torch.float64): + sample = sample.float() # upcast for quantile calculation, and clamp not implemented for cpu half + + # Flatten sample for doing quantile calculation along each image + sample = sample.reshape(batch_size, channels * np.prod(remaining_dims)) + + abs_sample = sample.abs() # "a certain percentile absolute pixel value" + + s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1) + s = torch.clamp( + s, min=1, max=self.config.sample_max_value + ) # When clamped to min=1, equivalent to standard clipping to [-1, 1] + s = s.unsqueeze(1) # (batch_size, 1) because clamp will broadcast along dim=0 + sample = torch.clamp(sample, -s, s) / s # "we threshold xt0 to the range [-s, s] and then divide by s" + + sample = sample.reshape(batch_size, channels, *remaining_dims) + sample = sample.to(dtype) + + return sample + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._sigma_to_t + def _sigma_to_t(self, sigma, log_sigmas): + # get log sigma + log_sigma = np.log(np.maximum(sigma, 1e-10)) + + # get distribution + dists = log_sigma - log_sigmas[:, np.newaxis] + + # get sigmas range + low_idx = np.cumsum((dists >= 0), axis=0).argmax(axis=0).clip(max=log_sigmas.shape[0] - 2) + high_idx = low_idx + 1 + + low = log_sigmas[low_idx] + high = log_sigmas[high_idx] + + # interpolate sigmas + w = (low - log_sigma) / (low - high) + w = np.clip(w, 0, 1) + + # transform interpolation to time range + t = (1 - w) * low_idx + w * high_idx + t = t.reshape(sigma.shape) + return t + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler._sigma_to_alpha_sigma_t + def _sigma_to_alpha_sigma_t(self, sigma): + alpha_t = 1 / ((sigma**2 + 1) ** 0.5) + sigma_t = sigma * alpha_t + + return alpha_t, sigma_t + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_karras + def _convert_to_karras(self, in_sigmas: torch.Tensor, num_inference_steps) -> torch.Tensor: + """Constructs the noise schedule of Karras et al. (2022).""" + + # Hack to make sure that other schedulers which copy this function don't break + # TODO: Add this logic to the other schedulers + if hasattr(self.config, "sigma_min"): + sigma_min = self.config.sigma_min + else: + sigma_min = None + + if hasattr(self.config, "sigma_max"): + sigma_max = self.config.sigma_max + else: + sigma_max = None + + sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item() + sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item() + + rho = 7.0 # 7.0 is the value used in the paper + ramp = np.linspace(0, 1, num_inference_steps) + min_inv_rho = sigma_min ** (1 / rho) + max_inv_rho = sigma_max ** (1 / rho) + sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho + return sigmas + + def convert_model_output( + self, + model_output: torch.Tensor, + *args, + sample: torch.Tensor = None, + **kwargs, + ) -> torch.Tensor: + """ + Convert the model output to the corresponding type the DPMSolver/DPMSolver++ algorithm needs. DPM-Solver is + designed to discretize an integral of the noise prediction model, and DPM-Solver++ is designed to discretize an + integral of the data prediction model. + + + + The algorithm and model type are decoupled. You can use either DPMSolver or DPMSolver++ for both noise + prediction and data prediction models. + + + + Args: + model_output (`torch.Tensor`): + The direct output from the learned diffusion model. + sample (`torch.Tensor`): + A current instance of a sample created by the diffusion process. + + Returns: + `torch.Tensor`: + The converted model output. + """ + timestep = args[0] if len(args) > 0 else kwargs.pop("timestep", None) + if sample is None: + if len(args) > 1: + sample = args[1] + else: + raise ValueError("missing `sample` as a required keyward argument") + if timestep is not None: + deprecate( + "timesteps", + "1.0.0", + "Passing `timesteps` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + # DPM-Solver++ needs to solve an integral of the data prediction model. + if self.config.algorithm_type in ["dpmsolver++", "sde-dpmsolver++"]: + if self.config.prediction_type == "epsilon": + # DPM-Solver and DPM-Solver++ only need the "mean" output. + if self.config.variance_type in ["learned", "learned_range"]: + model_output = model_output[:, :3] + sigma = self.sigmas[self.step_index] + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma) + x0_pred = (sample - sigma_t * model_output) / alpha_t + elif self.config.prediction_type == "sample": + x0_pred = model_output + elif self.config.prediction_type == "v_prediction": + sigma = self.sigmas[self.step_index] + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma) + x0_pred = alpha_t * sample - sigma_t * model_output + else: + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or" + " `v_prediction` for the DPMSolverSinglestepScheduler." + ) + + if self.config.thresholding: + x0_pred = self._threshold_sample(x0_pred) + + return x0_pred + + # DPM-Solver needs to solve an integral of the noise prediction model. + elif self.config.algorithm_type == "dpmsolver": + if self.config.prediction_type == "epsilon": + # DPM-Solver and DPM-Solver++ only need the "mean" output. + if self.config.variance_type in ["learned", "learned_range"]: + epsilon = model_output[:, :3] + else: + epsilon = model_output + elif self.config.prediction_type == "sample": + sigma = self.sigmas[self.step_index] + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma) + epsilon = (sample - alpha_t * model_output) / sigma_t + elif self.config.prediction_type == "v_prediction": + sigma = self.sigmas[self.step_index] + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma) + epsilon = alpha_t * model_output + sigma_t * sample + else: + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or" + " `v_prediction` for the DPMSolverSinglestepScheduler." + ) + + if self.config.thresholding: + alpha_t, sigma_t = self.alpha_t[timestep], self.sigma_t[timestep] + x0_pred = (sample - sigma_t * epsilon) / alpha_t + x0_pred = self._threshold_sample(x0_pred) + epsilon = (sample - alpha_t * x0_pred) / sigma_t + + return epsilon + + def dpm_solver_first_order_update( + self, + model_output: torch.Tensor, + *args, + sample: torch.Tensor = None, + noise: Optional[torch.Tensor] = None, + **kwargs, + ) -> torch.Tensor: + """ + One step for the first-order DPMSolver (equivalent to DDIM). + + Args: + model_output (`torch.Tensor`): + The direct output from the learned diffusion model. + timestep (`int`): + The current discrete timestep in the diffusion chain. + prev_timestep (`int`): + The previous discrete timestep in the diffusion chain. + sample (`torch.Tensor`): + A current instance of a sample created by the diffusion process. + + Returns: + `torch.Tensor`: + The sample tensor at the previous timestep. + """ + timestep = args[0] if len(args) > 0 else kwargs.pop("timestep", None) + prev_timestep = args[1] if len(args) > 1 else kwargs.pop("prev_timestep", None) + if sample is None: + if len(args) > 2: + sample = args[2] + else: + raise ValueError(" missing `sample` as a required keyward argument") + if timestep is not None: + deprecate( + "timesteps", + "1.0.0", + "Passing `timesteps` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + + if prev_timestep is not None: + deprecate( + "prev_timestep", + "1.0.0", + "Passing `prev_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + sigma_t, sigma_s = self.sigmas[self.step_index + 1], self.sigmas[self.step_index] + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma_t) + alpha_s, sigma_s = self._sigma_to_alpha_sigma_t(sigma_s) + lambda_t = torch.log(alpha_t) - torch.log(sigma_t) + lambda_s = torch.log(alpha_s) - torch.log(sigma_s) + h = lambda_t - lambda_s + if self.config.algorithm_type == "dpmsolver++": + x_t = (sigma_t / sigma_s) * sample - (alpha_t * (torch.exp(-h) - 1.0)) * model_output + elif self.config.algorithm_type == "dpmsolver": + x_t = (alpha_t / alpha_s) * sample - (sigma_t * (torch.exp(h) - 1.0)) * model_output + elif self.config.algorithm_type == "sde-dpmsolver++": + assert noise is not None + x_t = ( + (sigma_t / sigma_s * torch.exp(-h)) * sample + + (alpha_t * (1 - torch.exp(-2.0 * h))) * model_output + + sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) * noise + ) + return x_t + + def singlestep_dpm_solver_second_order_update( + self, + model_output_list: List[torch.Tensor], + *args, + sample: torch.Tensor = None, + noise: Optional[torch.Tensor] = None, + **kwargs, + ) -> torch.Tensor: + """ + One step for the second-order singlestep DPMSolver that computes the solution at time `prev_timestep` from the + time `timestep_list[-2]`. + + Args: + model_output_list (`List[torch.Tensor]`): + The direct outputs from learned diffusion model at current and latter timesteps. + timestep (`int`): + The current and latter discrete timestep in the diffusion chain. + prev_timestep (`int`): + The previous discrete timestep in the diffusion chain. + sample (`torch.Tensor`): + A current instance of a sample created by the diffusion process. + + Returns: + `torch.Tensor`: + The sample tensor at the previous timestep. + """ + timestep_list = args[0] if len(args) > 0 else kwargs.pop("timestep_list", None) + prev_timestep = args[1] if len(args) > 1 else kwargs.pop("prev_timestep", None) + if sample is None: + if len(args) > 2: + sample = args[2] + else: + raise ValueError(" missing `sample` as a required keyward argument") + if timestep_list is not None: + deprecate( + "timestep_list", + "1.0.0", + "Passing `timestep_list` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + + if prev_timestep is not None: + deprecate( + "prev_timestep", + "1.0.0", + "Passing `prev_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + sigma_t, sigma_s0, sigma_s1 = ( + self.sigmas[self.step_index + 1], + self.sigmas[self.step_index], + self.sigmas[self.step_index - 1], + ) + + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma_t) + alpha_s0, sigma_s0 = self._sigma_to_alpha_sigma_t(sigma_s0) + alpha_s1, sigma_s1 = self._sigma_to_alpha_sigma_t(sigma_s1) + + lambda_t = torch.log(alpha_t) - torch.log(sigma_t) + lambda_s0 = torch.log(alpha_s0) - torch.log(sigma_s0) + lambda_s1 = torch.log(alpha_s1) - torch.log(sigma_s1) + + m0, m1 = model_output_list[-1], model_output_list[-2] + + h, h_0 = lambda_t - lambda_s1, lambda_s0 - lambda_s1 + r0 = h_0 / h + D0, D1 = m1, (1.0 / r0) * (m0 - m1) + if self.config.algorithm_type == "dpmsolver++": + # See https://arxiv.org/abs/2211.01095 for detailed derivations + if self.config.solver_type == "midpoint": + x_t = ( + (sigma_t / sigma_s1) * sample + - (alpha_t * (torch.exp(-h) - 1.0)) * D0 + - 0.5 * (alpha_t * (torch.exp(-h) - 1.0)) * D1 + ) + elif self.config.solver_type == "heun": + x_t = ( + (sigma_t / sigma_s1) * sample + - (alpha_t * (torch.exp(-h) - 1.0)) * D0 + + (alpha_t * ((torch.exp(-h) - 1.0) / h + 1.0)) * D1 + ) + elif self.config.algorithm_type == "dpmsolver": + # See https://arxiv.org/abs/2206.00927 for detailed derivations + if self.config.solver_type == "midpoint": + x_t = ( + (alpha_t / alpha_s1) * sample + - (sigma_t * (torch.exp(h) - 1.0)) * D0 + - 0.5 * (sigma_t * (torch.exp(h) - 1.0)) * D1 + ) + elif self.config.solver_type == "heun": + x_t = ( + (alpha_t / alpha_s1) * sample + - (sigma_t * (torch.exp(h) - 1.0)) * D0 + - (sigma_t * ((torch.exp(h) - 1.0) / h - 1.0)) * D1 + ) + elif self.config.algorithm_type == "sde-dpmsolver++": + assert noise is not None + if self.config.solver_type == "midpoint": + x_t = ( + (sigma_t / sigma_s1 * torch.exp(-h)) * sample + + (alpha_t * (1 - torch.exp(-2.0 * h))) * D0 + + 0.5 * (alpha_t * (1 - torch.exp(-2.0 * h))) * D1 + + sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) * noise + ) + elif self.config.solver_type == "heun": + x_t = ( + (sigma_t / sigma_s1 * torch.exp(-h)) * sample + + (alpha_t * (1 - torch.exp(-2.0 * h))) * D0 + + (alpha_t * ((1.0 - torch.exp(-2.0 * h)) / (-2.0 * h) + 1.0)) * D1 + + sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) * noise + ) + return x_t + + def singlestep_dpm_solver_third_order_update( + self, + model_output_list: List[torch.Tensor], + *args, + sample: torch.Tensor = None, + **kwargs, + ) -> torch.Tensor: + """ + One step for the third-order singlestep DPMSolver that computes the solution at time `prev_timestep` from the + time `timestep_list[-3]`. + + Args: + model_output_list (`List[torch.Tensor]`): + The direct outputs from learned diffusion model at current and latter timesteps. + timestep (`int`): + The current and latter discrete timestep in the diffusion chain. + prev_timestep (`int`): + The previous discrete timestep in the diffusion chain. + sample (`torch.Tensor`): + A current instance of a sample created by diffusion process. + + Returns: + `torch.Tensor`: + The sample tensor at the previous timestep. + """ + + timestep_list = args[0] if len(args) > 0 else kwargs.pop("timestep_list", None) + prev_timestep = args[1] if len(args) > 1 else kwargs.pop("prev_timestep", None) + if sample is None: + if len(args) > 2: + sample = args[2] + else: + raise ValueError(" missing`sample` as a required keyward argument") + if timestep_list is not None: + deprecate( + "timestep_list", + "1.0.0", + "Passing `timestep_list` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + + if prev_timestep is not None: + deprecate( + "prev_timestep", + "1.0.0", + "Passing `prev_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + + sigma_t, sigma_s0, sigma_s1, sigma_s2 = ( + self.sigmas[self.step_index + 1], + self.sigmas[self.step_index], + self.sigmas[self.step_index - 1], + self.sigmas[self.step_index - 2], + ) + + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma_t) + alpha_s0, sigma_s0 = self._sigma_to_alpha_sigma_t(sigma_s0) + alpha_s1, sigma_s1 = self._sigma_to_alpha_sigma_t(sigma_s1) + alpha_s2, sigma_s2 = self._sigma_to_alpha_sigma_t(sigma_s2) + + lambda_t = torch.log(alpha_t) - torch.log(sigma_t) + lambda_s0 = torch.log(alpha_s0) - torch.log(sigma_s0) + lambda_s1 = torch.log(alpha_s1) - torch.log(sigma_s1) + lambda_s2 = torch.log(alpha_s2) - torch.log(sigma_s2) + + m0, m1, m2 = model_output_list[-1], model_output_list[-2], model_output_list[-3] + + h, h_0, h_1 = lambda_t - lambda_s2, lambda_s0 - lambda_s2, lambda_s1 - lambda_s2 + r0, r1 = h_0 / h, h_1 / h + D0 = m2 + D1_0, D1_1 = (1.0 / r1) * (m1 - m2), (1.0 / r0) * (m0 - m2) + D1 = (r0 * D1_0 - r1 * D1_1) / (r0 - r1) + D2 = 2.0 * (D1_1 - D1_0) / (r0 - r1) + if self.config.algorithm_type == "dpmsolver++": + # See https://arxiv.org/abs/2206.00927 for detailed derivations + if self.config.solver_type == "midpoint": + x_t = ( + (sigma_t / sigma_s2) * sample + - (alpha_t * (torch.exp(-h) - 1.0)) * D0 + + (alpha_t * ((torch.exp(-h) - 1.0) / h + 1.0)) * D1_1 + ) + elif self.config.solver_type == "heun": + x_t = ( + (sigma_t / sigma_s2) * sample + - (alpha_t * (torch.exp(-h) - 1.0)) * D0 + + (alpha_t * ((torch.exp(-h) - 1.0) / h + 1.0)) * D1 + - (alpha_t * ((torch.exp(-h) - 1.0 + h) / h**2 - 0.5)) * D2 + ) + elif self.config.algorithm_type == "dpmsolver": + # See https://arxiv.org/abs/2206.00927 for detailed derivations + if self.config.solver_type == "midpoint": + x_t = ( + (alpha_t / alpha_s2) * sample + - (sigma_t * (torch.exp(h) - 1.0)) * D0 + - (sigma_t * ((torch.exp(h) - 1.0) / h - 1.0)) * D1_1 + ) + elif self.config.solver_type == "heun": + x_t = ( + (alpha_t / alpha_s2) * sample + - (sigma_t * (torch.exp(h) - 1.0)) * D0 + - (sigma_t * ((torch.exp(h) - 1.0) / h - 1.0)) * D1 + - (sigma_t * ((torch.exp(h) - 1.0 - h) / h**2 - 0.5)) * D2 + ) + return x_t + + def singlestep_dpm_solver_update( + self, + model_output_list: List[torch.Tensor], + *args, + sample: torch.Tensor = None, + order: int = None, + noise: Optional[torch.Tensor] = None, + **kwargs, + ) -> torch.Tensor: + """ + One step for the singlestep DPMSolver. + + Args: + model_output_list (`List[torch.Tensor]`): + The direct outputs from learned diffusion model at current and latter timesteps. + timestep (`int`): + The current and latter discrete timestep in the diffusion chain. + prev_timestep (`int`): + The previous discrete timestep in the diffusion chain. + sample (`torch.Tensor`): + A current instance of a sample created by diffusion process. + order (`int`): + The solver order at this step. + + Returns: + `torch.Tensor`: + The sample tensor at the previous timestep. + """ + timestep_list = args[0] if len(args) > 0 else kwargs.pop("timestep_list", None) + prev_timestep = args[1] if len(args) > 1 else kwargs.pop("prev_timestep", None) + if sample is None: + if len(args) > 2: + sample = args[2] + else: + raise ValueError(" missing`sample` as a required keyward argument") + if order is None: + if len(args) > 3: + order = args[3] + else: + raise ValueError(" missing `order` as a required keyward argument") + if timestep_list is not None: + deprecate( + "timestep_list", + "1.0.0", + "Passing `timestep_list` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + + if prev_timestep is not None: + deprecate( + "prev_timestep", + "1.0.0", + "Passing `prev_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + + if order == 1: + return self.dpm_solver_first_order_update(model_output_list[-1], sample=sample, noise=noise) + elif order == 2: + return self.singlestep_dpm_solver_second_order_update(model_output_list, sample=sample, noise=noise) + elif order == 3: + return self.singlestep_dpm_solver_third_order_update(model_output_list, sample=sample) + else: + raise ValueError(f"Order must be 1, 2, 3, got {order}") + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.index_for_timestep + def index_for_timestep(self, timestep, schedule_timesteps=None): + if schedule_timesteps is None: + schedule_timesteps = self.timesteps + + index_candidates = (schedule_timesteps == timestep).nonzero() + + if len(index_candidates) == 0: + step_index = len(self.timesteps) - 1 + # The sigma index that is taken for the **very** first `step` + # is always the second index (or the last index if there is only 1) + # This way we can ensure we don't accidentally skip a sigma in + # case we start in the middle of the denoising schedule (e.g. for image-to-image) + elif len(index_candidates) > 1: + step_index = index_candidates[1].item() + else: + step_index = index_candidates[0].item() + + return step_index + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler._init_step_index + def _init_step_index(self, timestep): + """ + Initialize the step_index counter for the scheduler. + """ + + if self.begin_index is None: + if isinstance(timestep, torch.Tensor): + timestep = timestep.to(self.timesteps.device) + self._step_index = self.index_for_timestep(timestep) + else: + self._step_index = self._begin_index + + def step( + self, + model_output: torch.Tensor, + timestep: Union[int, torch.Tensor], + sample: torch.Tensor, + generator=None, + return_dict: bool = True, + ) -> Union[SchedulerOutput, Tuple]: + """ + Predict the sample from the previous timestep by reversing the SDE. This function propagates the sample with + the singlestep DPMSolver. + + Args: + model_output (`torch.Tensor`): + The direct output from learned diffusion model. + timestep (`int`): + The current discrete timestep in the diffusion chain. + sample (`torch.Tensor`): + A current instance of a sample created by the diffusion process. + return_dict (`bool`): + Whether or not to return a [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`. + + Returns: + [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`: + If return_dict is `True`, [`~schedulers.scheduling_utils.SchedulerOutput`] is returned, otherwise a + tuple is returned where the first element is the sample tensor. + + """ + if self.num_inference_steps is None: + raise ValueError( + "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" + ) + + if self.step_index is None: + self._init_step_index(timestep) + + model_output = self.convert_model_output(model_output, sample=sample) + for i in range(self.config.solver_order - 1): + self.model_outputs[i] = self.model_outputs[i + 1] + self.model_outputs[-1] = model_output + + if self.config.algorithm_type == "sde-dpmsolver++": + noise = randn_tensor( + model_output.shape, generator=generator, device=model_output.device, dtype=model_output.dtype + ) + else: + noise = None + + order = self.order_list[self.step_index] + + # For img2img denoising might start with order>1 which is not possible + # In this case make sure that the first two steps are both order=1 + while self.model_outputs[-order] is None: + order -= 1 + + # For single-step solvers, we use the initial value at each time with order = 1. + if order == 1: + self.sample = sample + + prev_sample = self.singlestep_dpm_solver_update( + self.model_outputs, sample=self.sample, order=order, noise=noise + ) + + # upon completion increase step index by one, noise=noise + self._step_index += 1 + + if not return_dict: + return (prev_sample,) + + return SchedulerOutput(prev_sample=prev_sample) + + def scale_model_input(self, sample: torch.Tensor, *args, **kwargs) -> torch.Tensor: + """ + Ensures interchangeability with schedulers that need to scale the denoising model input depending on the + current timestep. + + Args: + sample (`torch.Tensor`): + The input sample. + + Returns: + `torch.Tensor`: + A scaled input sample. + """ + return sample + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.add_noise + def add_noise( + self, + original_samples: torch.Tensor, + noise: torch.Tensor, + timesteps: torch.IntTensor, + ) -> torch.Tensor: + # Make sure sigmas and timesteps have the same device and dtype as original_samples + sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype) + if original_samples.device.type == "mps" and torch.is_floating_point(timesteps): + # mps does not support float64 + schedule_timesteps = self.timesteps.to(original_samples.device, dtype=torch.float32) + timesteps = timesteps.to(original_samples.device, dtype=torch.float32) + else: + schedule_timesteps = self.timesteps.to(original_samples.device) + timesteps = timesteps.to(original_samples.device) + + # begin_index is None when the scheduler is used for training or pipeline does not implement set_begin_index + if self.begin_index is None: + step_indices = [self.index_for_timestep(t, schedule_timesteps) for t in timesteps] + elif self.step_index is not None: + # add_noise is called after first denoising step (for inpainting) + step_indices = [self.step_index] * timesteps.shape[0] + else: + # add noise is called before first denoising step to create initial latent(img2img) + step_indices = [self.begin_index] * timesteps.shape[0] + + sigma = sigmas[step_indices].flatten() + while len(sigma.shape) < len(original_samples.shape): + sigma = sigma.unsqueeze(-1) + + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma) + noisy_samples = alpha_t * original_samples + sigma_t * noise + return noisy_samples + + def __len__(self): + return self.config.num_train_timesteps diff --git a/diffusers3/schedulers/scheduling_edm_dpmsolver_multistep.py b/diffusers3/schedulers/scheduling_edm_dpmsolver_multistep.py new file mode 100644 index 0000000000000000000000000000000000000000..c49e8e9a191a85040250a428c817b7201433ec87 --- /dev/null +++ b/diffusers3/schedulers/scheduling_edm_dpmsolver_multistep.py @@ -0,0 +1,707 @@ +# Copyright 2024 TSAIL Team and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# DISCLAIMER: This file is strongly influenced by https://github.com/LuChengTHU/dpm-solver and https://github.com/NVlabs/edm + +import math +from typing import List, Optional, Tuple, Union + +import numpy as np +import torch + +from ..configuration_utils import ConfigMixin, register_to_config +from ..utils.torch_utils import randn_tensor +from .scheduling_utils import SchedulerMixin, SchedulerOutput + + +class EDMDPMSolverMultistepScheduler(SchedulerMixin, ConfigMixin): + """ + Implements DPMSolverMultistepScheduler in EDM formulation as presented in Karras et al. 2022 [1]. + `EDMDPMSolverMultistepScheduler` is a fast dedicated high-order solver for diffusion ODEs. + + [1] Karras, Tero, et al. "Elucidating the Design Space of Diffusion-Based Generative Models." + https://arxiv.org/abs/2206.00364 + + This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic + methods the library implements for all schedulers such as loading and saving. + + Args: + sigma_min (`float`, *optional*, defaults to 0.002): + Minimum noise magnitude in the sigma schedule. This was set to 0.002 in the EDM paper [1]; a reasonable + range is [0, 10]. + sigma_max (`float`, *optional*, defaults to 80.0): + Maximum noise magnitude in the sigma schedule. This was set to 80.0 in the EDM paper [1]; a reasonable + range is [0.2, 80.0]. + sigma_data (`float`, *optional*, defaults to 0.5): + The standard deviation of the data distribution. This is set to 0.5 in the EDM paper [1]. + sigma_schedule (`str`, *optional*, defaults to `karras`): + Sigma schedule to compute the `sigmas`. By default, we the schedule introduced in the EDM paper + (https://arxiv.org/abs/2206.00364). Other acceptable value is "exponential". The exponential schedule was + incorporated in this model: https://huggingface.co/stabilityai/cosxl. + num_train_timesteps (`int`, defaults to 1000): + The number of diffusion steps to train the model. + solver_order (`int`, defaults to 2): + The DPMSolver order which can be `1` or `2` or `3`. It is recommended to use `solver_order=2` for guided + sampling, and `solver_order=3` for unconditional sampling. + prediction_type (`str`, defaults to `epsilon`, *optional*): + Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process), + `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen + Video](https://imagen.research.google/video/paper.pdf) paper). + thresholding (`bool`, defaults to `False`): + Whether to use the "dynamic thresholding" method. This is unsuitable for latent-space diffusion models such + as Stable Diffusion. + dynamic_thresholding_ratio (`float`, defaults to 0.995): + The ratio for the dynamic thresholding method. Valid only when `thresholding=True`. + sample_max_value (`float`, defaults to 1.0): + The threshold value for dynamic thresholding. Valid only when `thresholding=True` and + `algorithm_type="dpmsolver++"`. + algorithm_type (`str`, defaults to `dpmsolver++`): + Algorithm type for the solver; can be `dpmsolver++` or `sde-dpmsolver++`. The `dpmsolver++` type implements + the algorithms in the [DPMSolver++](https://huggingface.co/papers/2211.01095) paper. It is recommended to + use `dpmsolver++` or `sde-dpmsolver++` with `solver_order=2` for guided sampling like in Stable Diffusion. + solver_type (`str`, defaults to `midpoint`): + Solver type for the second-order solver; can be `midpoint` or `heun`. The solver type slightly affects the + sample quality, especially for a small number of steps. It is recommended to use `midpoint` solvers. + lower_order_final (`bool`, defaults to `True`): + Whether to use lower-order solvers in the final steps. Only valid for < 15 inference steps. This can + stabilize the sampling of DPMSolver for steps < 15, especially for steps <= 10. + euler_at_final (`bool`, defaults to `False`): + Whether to use Euler's method in the final step. It is a trade-off between numerical stability and detail + richness. This can stabilize the sampling of the SDE variant of DPMSolver for small number of inference + steps, but sometimes may result in blurring. + final_sigmas_type (`str`, defaults to `"zero"`): + The final `sigma` value for the noise schedule during the sampling process. If `"sigma_min"`, the final + sigma is the same as the last sigma in the training schedule. If `zero`, the final sigma is set to 0. + """ + + _compatibles = [] + order = 1 + + @register_to_config + def __init__( + self, + sigma_min: float = 0.002, + sigma_max: float = 80.0, + sigma_data: float = 0.5, + sigma_schedule: str = "karras", + num_train_timesteps: int = 1000, + prediction_type: str = "epsilon", + rho: float = 7.0, + solver_order: int = 2, + thresholding: bool = False, + dynamic_thresholding_ratio: float = 0.995, + sample_max_value: float = 1.0, + algorithm_type: str = "dpmsolver++", + solver_type: str = "midpoint", + lower_order_final: bool = True, + euler_at_final: bool = False, + final_sigmas_type: Optional[str] = "zero", # "zero", "sigma_min" + ): + # settings for DPM-Solver + if algorithm_type not in ["dpmsolver++", "sde-dpmsolver++"]: + if algorithm_type == "deis": + self.register_to_config(algorithm_type="dpmsolver++") + else: + raise NotImplementedError(f"{algorithm_type} is not implemented for {self.__class__}") + + if solver_type not in ["midpoint", "heun"]: + if solver_type in ["logrho", "bh1", "bh2"]: + self.register_to_config(solver_type="midpoint") + else: + raise NotImplementedError(f"{solver_type} is not implemented for {self.__class__}") + + if algorithm_type not in ["dpmsolver++", "sde-dpmsolver++"] and final_sigmas_type == "zero": + raise ValueError( + f"`final_sigmas_type` {final_sigmas_type} is not supported for `algorithm_type` {algorithm_type}. Please choose `sigma_min` instead." + ) + + ramp = torch.linspace(0, 1, num_train_timesteps) + if sigma_schedule == "karras": + sigmas = self._compute_karras_sigmas(ramp) + elif sigma_schedule == "exponential": + sigmas = self._compute_exponential_sigmas(ramp) + + self.timesteps = self.precondition_noise(sigmas) + + self.sigmas = torch.cat([sigmas, torch.zeros(1, device=sigmas.device)]) + + # setable values + self.num_inference_steps = None + self.model_outputs = [None] * solver_order + self.lower_order_nums = 0 + self._step_index = None + self._begin_index = None + self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication + + @property + def init_noise_sigma(self): + # standard deviation of the initial noise distribution + return (self.config.sigma_max**2 + 1) ** 0.5 + + @property + def step_index(self): + """ + The index counter for current timestep. It will increase 1 after each scheduler step. + """ + return self._step_index + + @property + def begin_index(self): + """ + The index for the first timestep. It should be set from pipeline with `set_begin_index` method. + """ + return self._begin_index + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.set_begin_index + def set_begin_index(self, begin_index: int = 0): + """ + Sets the begin index for the scheduler. This function should be run from pipeline before the inference. + + Args: + begin_index (`int`): + The begin index for the scheduler. + """ + self._begin_index = begin_index + + # Copied from diffusers.schedulers.scheduling_edm_euler.EDMEulerScheduler.precondition_inputs + def precondition_inputs(self, sample, sigma): + c_in = 1 / ((sigma**2 + self.config.sigma_data**2) ** 0.5) + scaled_sample = sample * c_in + return scaled_sample + + # Copied from diffusers.schedulers.scheduling_edm_euler.EDMEulerScheduler.precondition_noise + def precondition_noise(self, sigma): + if not isinstance(sigma, torch.Tensor): + sigma = torch.tensor([sigma]) + + c_noise = 0.25 * torch.log(sigma) + + return c_noise + + # Copied from diffusers.schedulers.scheduling_edm_euler.EDMEulerScheduler.precondition_outputs + def precondition_outputs(self, sample, model_output, sigma): + sigma_data = self.config.sigma_data + c_skip = sigma_data**2 / (sigma**2 + sigma_data**2) + + if self.config.prediction_type == "epsilon": + c_out = sigma * sigma_data / (sigma**2 + sigma_data**2) ** 0.5 + elif self.config.prediction_type == "v_prediction": + c_out = -sigma * sigma_data / (sigma**2 + sigma_data**2) ** 0.5 + else: + raise ValueError(f"Prediction type {self.config.prediction_type} is not supported.") + + denoised = c_skip * sample + c_out * model_output + + return denoised + + # Copied from diffusers.schedulers.scheduling_edm_euler.EDMEulerScheduler.scale_model_input + def scale_model_input(self, sample: torch.Tensor, timestep: Union[float, torch.Tensor]) -> torch.Tensor: + """ + Ensures interchangeability with schedulers that need to scale the denoising model input depending on the + current timestep. Scales the denoising model input by `(sigma**2 + 1) ** 0.5` to match the Euler algorithm. + + Args: + sample (`torch.Tensor`): + The input sample. + timestep (`int`, *optional*): + The current timestep in the diffusion chain. + + Returns: + `torch.Tensor`: + A scaled input sample. + """ + if self.step_index is None: + self._init_step_index(timestep) + + sigma = self.sigmas[self.step_index] + sample = self.precondition_inputs(sample, sigma) + + self.is_scale_input_called = True + return sample + + def set_timesteps(self, num_inference_steps: int = None, device: Union[str, torch.device] = None): + """ + Sets the discrete timesteps used for the diffusion chain (to be run before inference). + + Args: + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + """ + + self.num_inference_steps = num_inference_steps + + ramp = torch.linspace(0, 1, self.num_inference_steps) + if self.config.sigma_schedule == "karras": + sigmas = self._compute_karras_sigmas(ramp) + elif self.config.sigma_schedule == "exponential": + sigmas = self._compute_exponential_sigmas(ramp) + + sigmas = sigmas.to(dtype=torch.float32, device=device) + self.timesteps = self.precondition_noise(sigmas) + + if self.config.final_sigmas_type == "sigma_min": + sigma_last = self.config.sigma_min + elif self.config.final_sigmas_type == "zero": + sigma_last = 0 + else: + raise ValueError( + f"`final_sigmas_type` must be one of 'zero', or 'sigma_min', but got {self.config.final_sigmas_type}" + ) + + self.sigmas = torch.cat([sigmas, torch.tensor([sigma_last], dtype=torch.float32, device=device)]) + + self.model_outputs = [ + None, + ] * self.config.solver_order + self.lower_order_nums = 0 + + # add an index counter for schedulers that allow duplicated timesteps + self._step_index = None + self._begin_index = None + self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication + + # Copied from diffusers.schedulers.scheduling_edm_euler.EDMEulerScheduler._compute_karras_sigmas + def _compute_karras_sigmas(self, ramp, sigma_min=None, sigma_max=None) -> torch.Tensor: + """Constructs the noise schedule of Karras et al. (2022).""" + sigma_min = sigma_min or self.config.sigma_min + sigma_max = sigma_max or self.config.sigma_max + + rho = self.config.rho + min_inv_rho = sigma_min ** (1 / rho) + max_inv_rho = sigma_max ** (1 / rho) + sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho + return sigmas + + # Copied from diffusers.schedulers.scheduling_edm_euler.EDMEulerScheduler._compute_exponential_sigmas + def _compute_exponential_sigmas(self, ramp, sigma_min=None, sigma_max=None) -> torch.Tensor: + """Implementation closely follows k-diffusion. + + https://github.com/crowsonkb/k-diffusion/blob/6ab5146d4a5ef63901326489f31f1d8e7dd36b48/k_diffusion/sampling.py#L26 + """ + sigma_min = sigma_min or self.config.sigma_min + sigma_max = sigma_max or self.config.sigma_max + sigmas = torch.linspace(math.log(sigma_min), math.log(sigma_max), len(ramp)).exp().flip(0) + return sigmas + + # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample + def _threshold_sample(self, sample: torch.Tensor) -> torch.Tensor: + """ + "Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the + prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by + s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing + pixels from saturation at each step. We find that dynamic thresholding results in significantly better + photorealism as well as better image-text alignment, especially when using very large guidance weights." + + https://arxiv.org/abs/2205.11487 + """ + dtype = sample.dtype + batch_size, channels, *remaining_dims = sample.shape + + if dtype not in (torch.float32, torch.float64): + sample = sample.float() # upcast for quantile calculation, and clamp not implemented for cpu half + + # Flatten sample for doing quantile calculation along each image + sample = sample.reshape(batch_size, channels * np.prod(remaining_dims)) + + abs_sample = sample.abs() # "a certain percentile absolute pixel value" + + s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1) + s = torch.clamp( + s, min=1, max=self.config.sample_max_value + ) # When clamped to min=1, equivalent to standard clipping to [-1, 1] + s = s.unsqueeze(1) # (batch_size, 1) because clamp will broadcast along dim=0 + sample = torch.clamp(sample, -s, s) / s # "we threshold xt0 to the range [-s, s] and then divide by s" + + sample = sample.reshape(batch_size, channels, *remaining_dims) + sample = sample.to(dtype) + + return sample + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._sigma_to_t + def _sigma_to_t(self, sigma, log_sigmas): + # get log sigma + log_sigma = np.log(np.maximum(sigma, 1e-10)) + + # get distribution + dists = log_sigma - log_sigmas[:, np.newaxis] + + # get sigmas range + low_idx = np.cumsum((dists >= 0), axis=0).argmax(axis=0).clip(max=log_sigmas.shape[0] - 2) + high_idx = low_idx + 1 + + low = log_sigmas[low_idx] + high = log_sigmas[high_idx] + + # interpolate sigmas + w = (low - log_sigma) / (low - high) + w = np.clip(w, 0, 1) + + # transform interpolation to time range + t = (1 - w) * low_idx + w * high_idx + t = t.reshape(sigma.shape) + return t + + def _sigma_to_alpha_sigma_t(self, sigma): + alpha_t = torch.tensor(1) # Inputs are pre-scaled before going into unet, so alpha_t = 1 + sigma_t = sigma + + return alpha_t, sigma_t + + def convert_model_output( + self, + model_output: torch.Tensor, + sample: torch.Tensor = None, + ) -> torch.Tensor: + """ + Convert the model output to the corresponding type the DPMSolver/DPMSolver++ algorithm needs. DPM-Solver is + designed to discretize an integral of the noise prediction model, and DPM-Solver++ is designed to discretize an + integral of the data prediction model. + + + + The algorithm and model type are decoupled. You can use either DPMSolver or DPMSolver++ for both noise + prediction and data prediction models. + + + + Args: + model_output (`torch.Tensor`): + The direct output from the learned diffusion model. + sample (`torch.Tensor`): + A current instance of a sample created by the diffusion process. + + Returns: + `torch.Tensor`: + The converted model output. + """ + sigma = self.sigmas[self.step_index] + x0_pred = self.precondition_outputs(sample, model_output, sigma) + + if self.config.thresholding: + x0_pred = self._threshold_sample(x0_pred) + + return x0_pred + + def dpm_solver_first_order_update( + self, + model_output: torch.Tensor, + sample: torch.Tensor = None, + noise: Optional[torch.Tensor] = None, + ) -> torch.Tensor: + """ + One step for the first-order DPMSolver (equivalent to DDIM). + + Args: + model_output (`torch.Tensor`): + The direct output from the learned diffusion model. + sample (`torch.Tensor`): + A current instance of a sample created by the diffusion process. + + Returns: + `torch.Tensor`: + The sample tensor at the previous timestep. + """ + sigma_t, sigma_s = self.sigmas[self.step_index + 1], self.sigmas[self.step_index] + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma_t) + alpha_s, sigma_s = self._sigma_to_alpha_sigma_t(sigma_s) + lambda_t = torch.log(alpha_t) - torch.log(sigma_t) + lambda_s = torch.log(alpha_s) - torch.log(sigma_s) + + h = lambda_t - lambda_s + if self.config.algorithm_type == "dpmsolver++": + x_t = (sigma_t / sigma_s) * sample - (alpha_t * (torch.exp(-h) - 1.0)) * model_output + elif self.config.algorithm_type == "sde-dpmsolver++": + assert noise is not None + x_t = ( + (sigma_t / sigma_s * torch.exp(-h)) * sample + + (alpha_t * (1 - torch.exp(-2.0 * h))) * model_output + + sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) * noise + ) + + return x_t + + def multistep_dpm_solver_second_order_update( + self, + model_output_list: List[torch.Tensor], + sample: torch.Tensor = None, + noise: Optional[torch.Tensor] = None, + ) -> torch.Tensor: + """ + One step for the second-order multistep DPMSolver. + + Args: + model_output_list (`List[torch.Tensor]`): + The direct outputs from learned diffusion model at current and latter timesteps. + sample (`torch.Tensor`): + A current instance of a sample created by the diffusion process. + + Returns: + `torch.Tensor`: + The sample tensor at the previous timestep. + """ + sigma_t, sigma_s0, sigma_s1 = ( + self.sigmas[self.step_index + 1], + self.sigmas[self.step_index], + self.sigmas[self.step_index - 1], + ) + + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma_t) + alpha_s0, sigma_s0 = self._sigma_to_alpha_sigma_t(sigma_s0) + alpha_s1, sigma_s1 = self._sigma_to_alpha_sigma_t(sigma_s1) + + lambda_t = torch.log(alpha_t) - torch.log(sigma_t) + lambda_s0 = torch.log(alpha_s0) - torch.log(sigma_s0) + lambda_s1 = torch.log(alpha_s1) - torch.log(sigma_s1) + + m0, m1 = model_output_list[-1], model_output_list[-2] + + h, h_0 = lambda_t - lambda_s0, lambda_s0 - lambda_s1 + r0 = h_0 / h + D0, D1 = m0, (1.0 / r0) * (m0 - m1) + if self.config.algorithm_type == "dpmsolver++": + # See https://arxiv.org/abs/2211.01095 for detailed derivations + if self.config.solver_type == "midpoint": + x_t = ( + (sigma_t / sigma_s0) * sample + - (alpha_t * (torch.exp(-h) - 1.0)) * D0 + - 0.5 * (alpha_t * (torch.exp(-h) - 1.0)) * D1 + ) + elif self.config.solver_type == "heun": + x_t = ( + (sigma_t / sigma_s0) * sample + - (alpha_t * (torch.exp(-h) - 1.0)) * D0 + + (alpha_t * ((torch.exp(-h) - 1.0) / h + 1.0)) * D1 + ) + elif self.config.algorithm_type == "sde-dpmsolver++": + assert noise is not None + if self.config.solver_type == "midpoint": + x_t = ( + (sigma_t / sigma_s0 * torch.exp(-h)) * sample + + (alpha_t * (1 - torch.exp(-2.0 * h))) * D0 + + 0.5 * (alpha_t * (1 - torch.exp(-2.0 * h))) * D1 + + sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) * noise + ) + elif self.config.solver_type == "heun": + x_t = ( + (sigma_t / sigma_s0 * torch.exp(-h)) * sample + + (alpha_t * (1 - torch.exp(-2.0 * h))) * D0 + + (alpha_t * ((1.0 - torch.exp(-2.0 * h)) / (-2.0 * h) + 1.0)) * D1 + + sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) * noise + ) + + return x_t + + def multistep_dpm_solver_third_order_update( + self, + model_output_list: List[torch.Tensor], + sample: torch.Tensor = None, + ) -> torch.Tensor: + """ + One step for the third-order multistep DPMSolver. + + Args: + model_output_list (`List[torch.Tensor]`): + The direct outputs from learned diffusion model at current and latter timesteps. + sample (`torch.Tensor`): + A current instance of a sample created by diffusion process. + + Returns: + `torch.Tensor`: + The sample tensor at the previous timestep. + """ + sigma_t, sigma_s0, sigma_s1, sigma_s2 = ( + self.sigmas[self.step_index + 1], + self.sigmas[self.step_index], + self.sigmas[self.step_index - 1], + self.sigmas[self.step_index - 2], + ) + + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma_t) + alpha_s0, sigma_s0 = self._sigma_to_alpha_sigma_t(sigma_s0) + alpha_s1, sigma_s1 = self._sigma_to_alpha_sigma_t(sigma_s1) + alpha_s2, sigma_s2 = self._sigma_to_alpha_sigma_t(sigma_s2) + + lambda_t = torch.log(alpha_t) - torch.log(sigma_t) + lambda_s0 = torch.log(alpha_s0) - torch.log(sigma_s0) + lambda_s1 = torch.log(alpha_s1) - torch.log(sigma_s1) + lambda_s2 = torch.log(alpha_s2) - torch.log(sigma_s2) + + m0, m1, m2 = model_output_list[-1], model_output_list[-2], model_output_list[-3] + + h, h_0, h_1 = lambda_t - lambda_s0, lambda_s0 - lambda_s1, lambda_s1 - lambda_s2 + r0, r1 = h_0 / h, h_1 / h + D0 = m0 + D1_0, D1_1 = (1.0 / r0) * (m0 - m1), (1.0 / r1) * (m1 - m2) + D1 = D1_0 + (r0 / (r0 + r1)) * (D1_0 - D1_1) + D2 = (1.0 / (r0 + r1)) * (D1_0 - D1_1) + if self.config.algorithm_type == "dpmsolver++": + # See https://arxiv.org/abs/2206.00927 for detailed derivations + x_t = ( + (sigma_t / sigma_s0) * sample + - (alpha_t * (torch.exp(-h) - 1.0)) * D0 + + (alpha_t * ((torch.exp(-h) - 1.0) / h + 1.0)) * D1 + - (alpha_t * ((torch.exp(-h) - 1.0 + h) / h**2 - 0.5)) * D2 + ) + + return x_t + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.index_for_timestep + def index_for_timestep(self, timestep, schedule_timesteps=None): + if schedule_timesteps is None: + schedule_timesteps = self.timesteps + + index_candidates = (schedule_timesteps == timestep).nonzero() + + if len(index_candidates) == 0: + step_index = len(self.timesteps) - 1 + # The sigma index that is taken for the **very** first `step` + # is always the second index (or the last index if there is only 1) + # This way we can ensure we don't accidentally skip a sigma in + # case we start in the middle of the denoising schedule (e.g. for image-to-image) + elif len(index_candidates) > 1: + step_index = index_candidates[1].item() + else: + step_index = index_candidates[0].item() + + return step_index + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler._init_step_index + def _init_step_index(self, timestep): + """ + Initialize the step_index counter for the scheduler. + """ + + if self.begin_index is None: + if isinstance(timestep, torch.Tensor): + timestep = timestep.to(self.timesteps.device) + self._step_index = self.index_for_timestep(timestep) + else: + self._step_index = self._begin_index + + def step( + self, + model_output: torch.Tensor, + timestep: Union[int, torch.Tensor], + sample: torch.Tensor, + generator=None, + return_dict: bool = True, + ) -> Union[SchedulerOutput, Tuple]: + """ + Predict the sample from the previous timestep by reversing the SDE. This function propagates the sample with + the multistep DPMSolver. + + Args: + model_output (`torch.Tensor`): + The direct output from learned diffusion model. + timestep (`int`): + The current discrete timestep in the diffusion chain. + sample (`torch.Tensor`): + A current instance of a sample created by the diffusion process. + generator (`torch.Generator`, *optional*): + A random number generator. + return_dict (`bool`): + Whether or not to return a [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`. + + Returns: + [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`: + If return_dict is `True`, [`~schedulers.scheduling_utils.SchedulerOutput`] is returned, otherwise a + tuple is returned where the first element is the sample tensor. + + """ + if self.num_inference_steps is None: + raise ValueError( + "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" + ) + + if self.step_index is None: + self._init_step_index(timestep) + + # Improve numerical stability for small number of steps + lower_order_final = (self.step_index == len(self.timesteps) - 1) and ( + self.config.euler_at_final + or (self.config.lower_order_final and len(self.timesteps) < 15) + or self.config.final_sigmas_type == "zero" + ) + lower_order_second = ( + (self.step_index == len(self.timesteps) - 2) and self.config.lower_order_final and len(self.timesteps) < 15 + ) + + model_output = self.convert_model_output(model_output, sample=sample) + for i in range(self.config.solver_order - 1): + self.model_outputs[i] = self.model_outputs[i + 1] + self.model_outputs[-1] = model_output + + if self.config.algorithm_type == "sde-dpmsolver++": + noise = randn_tensor( + model_output.shape, generator=generator, device=model_output.device, dtype=model_output.dtype + ) + else: + noise = None + + if self.config.solver_order == 1 or self.lower_order_nums < 1 or lower_order_final: + prev_sample = self.dpm_solver_first_order_update(model_output, sample=sample, noise=noise) + elif self.config.solver_order == 2 or self.lower_order_nums < 2 or lower_order_second: + prev_sample = self.multistep_dpm_solver_second_order_update(self.model_outputs, sample=sample, noise=noise) + else: + prev_sample = self.multistep_dpm_solver_third_order_update(self.model_outputs, sample=sample) + + if self.lower_order_nums < self.config.solver_order: + self.lower_order_nums += 1 + + # upon completion increase step index by one + self._step_index += 1 + + if not return_dict: + return (prev_sample,) + + return SchedulerOutput(prev_sample=prev_sample) + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler.add_noise + def add_noise( + self, + original_samples: torch.Tensor, + noise: torch.Tensor, + timesteps: torch.Tensor, + ) -> torch.Tensor: + # Make sure sigmas and timesteps have the same device and dtype as original_samples + sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype) + if original_samples.device.type == "mps" and torch.is_floating_point(timesteps): + # mps does not support float64 + schedule_timesteps = self.timesteps.to(original_samples.device, dtype=torch.float32) + timesteps = timesteps.to(original_samples.device, dtype=torch.float32) + else: + schedule_timesteps = self.timesteps.to(original_samples.device) + timesteps = timesteps.to(original_samples.device) + + # self.begin_index is None when scheduler is used for training, or pipeline does not implement set_begin_index + if self.begin_index is None: + step_indices = [self.index_for_timestep(t, schedule_timesteps) for t in timesteps] + elif self.step_index is not None: + # add_noise is called after first denoising step (for inpainting) + step_indices = [self.step_index] * timesteps.shape[0] + else: + # add noise is called before first denoising step to create initial latent(img2img) + step_indices = [self.begin_index] * timesteps.shape[0] + + sigma = sigmas[step_indices].flatten() + while len(sigma.shape) < len(original_samples.shape): + sigma = sigma.unsqueeze(-1) + + noisy_samples = original_samples + noise * sigma + return noisy_samples + + def __len__(self): + return self.config.num_train_timesteps diff --git a/diffusers3/schedulers/scheduling_edm_euler.py b/diffusers3/schedulers/scheduling_edm_euler.py new file mode 100644 index 0000000000000000000000000000000000000000..4b823c0d281b3eba295072c5cc4b48e2e90b12c0 --- /dev/null +++ b/diffusers3/schedulers/scheduling_edm_euler.py @@ -0,0 +1,402 @@ +# Copyright 2024 Katherine Crowson and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +from dataclasses import dataclass +from typing import Optional, Tuple, Union + +import torch + +from ..configuration_utils import ConfigMixin, register_to_config +from ..utils import BaseOutput, logging +from ..utils.torch_utils import randn_tensor +from .scheduling_utils import SchedulerMixin + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +@dataclass +# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->EulerDiscrete +class EDMEulerSchedulerOutput(BaseOutput): + """ + Output class for the scheduler's `step` function output. + + Args: + prev_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images): + Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the + denoising loop. + pred_original_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images): + The predicted denoised sample `(x_{0})` based on the model output from the current timestep. + `pred_original_sample` can be used to preview progress or for guidance. + """ + + prev_sample: torch.Tensor + pred_original_sample: Optional[torch.Tensor] = None + + +class EDMEulerScheduler(SchedulerMixin, ConfigMixin): + """ + Implements the Euler scheduler in EDM formulation as presented in Karras et al. 2022 [1]. + + [1] Karras, Tero, et al. "Elucidating the Design Space of Diffusion-Based Generative Models." + https://arxiv.org/abs/2206.00364 + + This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic + methods the library implements for all schedulers such as loading and saving. + + Args: + sigma_min (`float`, *optional*, defaults to 0.002): + Minimum noise magnitude in the sigma schedule. This was set to 0.002 in the EDM paper [1]; a reasonable + range is [0, 10]. + sigma_max (`float`, *optional*, defaults to 80.0): + Maximum noise magnitude in the sigma schedule. This was set to 80.0 in the EDM paper [1]; a reasonable + range is [0.2, 80.0]. + sigma_data (`float`, *optional*, defaults to 0.5): + The standard deviation of the data distribution. This is set to 0.5 in the EDM paper [1]. + sigma_schedule (`str`, *optional*, defaults to `karras`): + Sigma schedule to compute the `sigmas`. By default, we the schedule introduced in the EDM paper + (https://arxiv.org/abs/2206.00364). Other acceptable value is "exponential". The exponential schedule was + incorporated in this model: https://huggingface.co/stabilityai/cosxl. + num_train_timesteps (`int`, defaults to 1000): + The number of diffusion steps to train the model. + prediction_type (`str`, defaults to `epsilon`, *optional*): + Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process), + `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen + Video](https://imagen.research.google/video/paper.pdf) paper). + rho (`float`, *optional*, defaults to 7.0): + The rho parameter used for calculating the Karras sigma schedule, which is set to 7.0 in the EDM paper [1]. + """ + + _compatibles = [] + order = 1 + + @register_to_config + def __init__( + self, + sigma_min: float = 0.002, + sigma_max: float = 80.0, + sigma_data: float = 0.5, + sigma_schedule: str = "karras", + num_train_timesteps: int = 1000, + prediction_type: str = "epsilon", + rho: float = 7.0, + ): + if sigma_schedule not in ["karras", "exponential"]: + raise ValueError(f"Wrong value for provided for `{sigma_schedule=}`.`") + + # setable values + self.num_inference_steps = None + + ramp = torch.linspace(0, 1, num_train_timesteps) + if sigma_schedule == "karras": + sigmas = self._compute_karras_sigmas(ramp) + elif sigma_schedule == "exponential": + sigmas = self._compute_exponential_sigmas(ramp) + + self.timesteps = self.precondition_noise(sigmas) + + self.sigmas = torch.cat([sigmas, torch.zeros(1, device=sigmas.device)]) + + self.is_scale_input_called = False + + self._step_index = None + self._begin_index = None + self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication + + @property + def init_noise_sigma(self): + # standard deviation of the initial noise distribution + return (self.config.sigma_max**2 + 1) ** 0.5 + + @property + def step_index(self): + """ + The index counter for current timestep. It will increase 1 after each scheduler step. + """ + return self._step_index + + @property + def begin_index(self): + """ + The index for the first timestep. It should be set from pipeline with `set_begin_index` method. + """ + return self._begin_index + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.set_begin_index + def set_begin_index(self, begin_index: int = 0): + """ + Sets the begin index for the scheduler. This function should be run from pipeline before the inference. + + Args: + begin_index (`int`): + The begin index for the scheduler. + """ + self._begin_index = begin_index + + def precondition_inputs(self, sample, sigma): + c_in = 1 / ((sigma**2 + self.config.sigma_data**2) ** 0.5) + scaled_sample = sample * c_in + return scaled_sample + + def precondition_noise(self, sigma): + if not isinstance(sigma, torch.Tensor): + sigma = torch.tensor([sigma]) + + c_noise = 0.25 * torch.log(sigma) + + return c_noise + + def precondition_outputs(self, sample, model_output, sigma): + sigma_data = self.config.sigma_data + c_skip = sigma_data**2 / (sigma**2 + sigma_data**2) + + if self.config.prediction_type == "epsilon": + c_out = sigma * sigma_data / (sigma**2 + sigma_data**2) ** 0.5 + elif self.config.prediction_type == "v_prediction": + c_out = -sigma * sigma_data / (sigma**2 + sigma_data**2) ** 0.5 + else: + raise ValueError(f"Prediction type {self.config.prediction_type} is not supported.") + + denoised = c_skip * sample + c_out * model_output + + return denoised + + def scale_model_input(self, sample: torch.Tensor, timestep: Union[float, torch.Tensor]) -> torch.Tensor: + """ + Ensures interchangeability with schedulers that need to scale the denoising model input depending on the + current timestep. Scales the denoising model input by `(sigma**2 + 1) ** 0.5` to match the Euler algorithm. + + Args: + sample (`torch.Tensor`): + The input sample. + timestep (`int`, *optional*): + The current timestep in the diffusion chain. + + Returns: + `torch.Tensor`: + A scaled input sample. + """ + if self.step_index is None: + self._init_step_index(timestep) + + sigma = self.sigmas[self.step_index] + sample = self.precondition_inputs(sample, sigma) + + self.is_scale_input_called = True + return sample + + def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None): + """ + Sets the discrete timesteps used for the diffusion chain (to be run before inference). + + Args: + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + """ + self.num_inference_steps = num_inference_steps + + ramp = torch.linspace(0, 1, self.num_inference_steps) + if self.config.sigma_schedule == "karras": + sigmas = self._compute_karras_sigmas(ramp) + elif self.config.sigma_schedule == "exponential": + sigmas = self._compute_exponential_sigmas(ramp) + + sigmas = sigmas.to(dtype=torch.float32, device=device) + self.timesteps = self.precondition_noise(sigmas) + + self.sigmas = torch.cat([sigmas, torch.zeros(1, device=sigmas.device)]) + self._step_index = None + self._begin_index = None + self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication + + # Taken from https://github.com/crowsonkb/k-diffusion/blob/686dbad0f39640ea25c8a8c6a6e56bb40eacefa2/k_diffusion/sampling.py#L17 + def _compute_karras_sigmas(self, ramp, sigma_min=None, sigma_max=None) -> torch.Tensor: + """Constructs the noise schedule of Karras et al. (2022).""" + sigma_min = sigma_min or self.config.sigma_min + sigma_max = sigma_max or self.config.sigma_max + + rho = self.config.rho + min_inv_rho = sigma_min ** (1 / rho) + max_inv_rho = sigma_max ** (1 / rho) + sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho + return sigmas + + def _compute_exponential_sigmas(self, ramp, sigma_min=None, sigma_max=None) -> torch.Tensor: + """Implementation closely follows k-diffusion. + + https://github.com/crowsonkb/k-diffusion/blob/6ab5146d4a5ef63901326489f31f1d8e7dd36b48/k_diffusion/sampling.py#L26 + """ + sigma_min = sigma_min or self.config.sigma_min + sigma_max = sigma_max or self.config.sigma_max + sigmas = torch.linspace(math.log(sigma_min), math.log(sigma_max), len(ramp)).exp().flip(0) + return sigmas + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler.index_for_timestep + def index_for_timestep(self, timestep, schedule_timesteps=None): + if schedule_timesteps is None: + schedule_timesteps = self.timesteps + + indices = (schedule_timesteps == timestep).nonzero() + + # The sigma index that is taken for the **very** first `step` + # is always the second index (or the last index if there is only 1) + # This way we can ensure we don't accidentally skip a sigma in + # case we start in the middle of the denoising schedule (e.g. for image-to-image) + pos = 1 if len(indices) > 1 else 0 + + return indices[pos].item() + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._init_step_index + def _init_step_index(self, timestep): + if self.begin_index is None: + if isinstance(timestep, torch.Tensor): + timestep = timestep.to(self.timesteps.device) + self._step_index = self.index_for_timestep(timestep) + else: + self._step_index = self._begin_index + + def step( + self, + model_output: torch.Tensor, + timestep: Union[float, torch.Tensor], + sample: torch.Tensor, + s_churn: float = 0.0, + s_tmin: float = 0.0, + s_tmax: float = float("inf"), + s_noise: float = 1.0, + generator: Optional[torch.Generator] = None, + return_dict: bool = True, + ) -> Union[EDMEulerSchedulerOutput, Tuple]: + """ + Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion + process from the learned model outputs (most often the predicted noise). + + Args: + model_output (`torch.Tensor`): + The direct output from learned diffusion model. + timestep (`float`): + The current discrete timestep in the diffusion chain. + sample (`torch.Tensor`): + A current instance of a sample created by the diffusion process. + s_churn (`float`): + s_tmin (`float`): + s_tmax (`float`): + s_noise (`float`, defaults to 1.0): + Scaling factor for noise added to the sample. + generator (`torch.Generator`, *optional*): + A random number generator. + return_dict (`bool`): + Whether or not to return a [`~schedulers.scheduling_euler_discrete.EDMEulerSchedulerOutput`] or tuple. + + Returns: + [`~schedulers.scheduling_euler_discrete.EDMEulerSchedulerOutput`] or `tuple`: + If return_dict is `True`, [`~schedulers.scheduling_euler_discrete.EDMEulerSchedulerOutput`] is + returned, otherwise a tuple is returned where the first element is the sample tensor. + """ + + if isinstance(timestep, (int, torch.IntTensor, torch.LongTensor)): + raise ValueError( + ( + "Passing integer indices (e.g. from `enumerate(timesteps)`) as timesteps to" + " `EDMEulerScheduler.step()` is not supported. Make sure to pass" + " one of the `scheduler.timesteps` as a timestep." + ), + ) + + if not self.is_scale_input_called: + logger.warning( + "The `scale_model_input` function should be called before `step` to ensure correct denoising. " + "See `StableDiffusionPipeline` for a usage example." + ) + + if self.step_index is None: + self._init_step_index(timestep) + + # Upcast to avoid precision issues when computing prev_sample + sample = sample.to(torch.float32) + + sigma = self.sigmas[self.step_index] + + gamma = min(s_churn / (len(self.sigmas) - 1), 2**0.5 - 1) if s_tmin <= sigma <= s_tmax else 0.0 + + noise = randn_tensor( + model_output.shape, dtype=model_output.dtype, device=model_output.device, generator=generator + ) + + eps = noise * s_noise + sigma_hat = sigma * (gamma + 1) + + if gamma > 0: + sample = sample + eps * (sigma_hat**2 - sigma**2) ** 0.5 + + # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise + pred_original_sample = self.precondition_outputs(sample, model_output, sigma_hat) + + # 2. Convert to an ODE derivative + derivative = (sample - pred_original_sample) / sigma_hat + + dt = self.sigmas[self.step_index + 1] - sigma_hat + + prev_sample = sample + derivative * dt + + # Cast sample back to model compatible dtype + prev_sample = prev_sample.to(model_output.dtype) + + # upon completion increase step index by one + self._step_index += 1 + + if not return_dict: + return (prev_sample,) + + return EDMEulerSchedulerOutput(prev_sample=prev_sample, pred_original_sample=pred_original_sample) + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler.add_noise + def add_noise( + self, + original_samples: torch.Tensor, + noise: torch.Tensor, + timesteps: torch.Tensor, + ) -> torch.Tensor: + # Make sure sigmas and timesteps have the same device and dtype as original_samples + sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype) + if original_samples.device.type == "mps" and torch.is_floating_point(timesteps): + # mps does not support float64 + schedule_timesteps = self.timesteps.to(original_samples.device, dtype=torch.float32) + timesteps = timesteps.to(original_samples.device, dtype=torch.float32) + else: + schedule_timesteps = self.timesteps.to(original_samples.device) + timesteps = timesteps.to(original_samples.device) + + # self.begin_index is None when scheduler is used for training, or pipeline does not implement set_begin_index + if self.begin_index is None: + step_indices = [self.index_for_timestep(t, schedule_timesteps) for t in timesteps] + elif self.step_index is not None: + # add_noise is called after first denoising step (for inpainting) + step_indices = [self.step_index] * timesteps.shape[0] + else: + # add noise is called before first denoising step to create initial latent(img2img) + step_indices = [self.begin_index] * timesteps.shape[0] + + sigma = sigmas[step_indices].flatten() + while len(sigma.shape) < len(original_samples.shape): + sigma = sigma.unsqueeze(-1) + + noisy_samples = original_samples + noise * sigma + return noisy_samples + + def __len__(self): + return self.config.num_train_timesteps diff --git a/diffusers3/schedulers/scheduling_euler_ancestral_discrete.py b/diffusers3/schedulers/scheduling_euler_ancestral_discrete.py new file mode 100644 index 0000000000000000000000000000000000000000..485e919e9cc548f0a9be6b9ddc04c210a22545cb --- /dev/null +++ b/diffusers3/schedulers/scheduling_euler_ancestral_discrete.py @@ -0,0 +1,479 @@ +# Copyright 2024 Katherine Crowson and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +from dataclasses import dataclass +from typing import List, Optional, Tuple, Union + +import numpy as np +import torch + +from ..configuration_utils import ConfigMixin, register_to_config +from ..utils import BaseOutput, logging +from ..utils.torch_utils import randn_tensor +from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +@dataclass +# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->EulerAncestralDiscrete +class EulerAncestralDiscreteSchedulerOutput(BaseOutput): + """ + Output class for the scheduler's `step` function output. + + Args: + prev_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images): + Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the + denoising loop. + pred_original_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images): + The predicted denoised sample `(x_{0})` based on the model output from the current timestep. + `pred_original_sample` can be used to preview progress or for guidance. + """ + + prev_sample: torch.Tensor + pred_original_sample: Optional[torch.Tensor] = None + + +# Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar +def betas_for_alpha_bar( + num_diffusion_timesteps, + max_beta=0.999, + alpha_transform_type="cosine", +): + """ + Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of + (1-beta) over time from t = [0,1]. + + Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up + to that part of the diffusion process. + + + Args: + num_diffusion_timesteps (`int`): the number of betas to produce. + max_beta (`float`): the maximum beta to use; use values lower than 1 to + prevent singularities. + alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. + Choose from `cosine` or `exp` + + Returns: + betas (`np.ndarray`): the betas used by the scheduler to step the model outputs + """ + if alpha_transform_type == "cosine": + + def alpha_bar_fn(t): + return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 + + elif alpha_transform_type == "exp": + + def alpha_bar_fn(t): + return math.exp(t * -12.0) + + else: + raise ValueError(f"Unsupported alpha_transform_type: {alpha_transform_type}") + + betas = [] + for i in range(num_diffusion_timesteps): + t1 = i / num_diffusion_timesteps + t2 = (i + 1) / num_diffusion_timesteps + betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) + return torch.tensor(betas, dtype=torch.float32) + + +# Copied from diffusers.schedulers.scheduling_ddim.rescale_zero_terminal_snr +def rescale_zero_terminal_snr(betas): + """ + Rescales betas to have zero terminal SNR Based on https://arxiv.org/pdf/2305.08891.pdf (Algorithm 1) + + + Args: + betas (`torch.Tensor`): + the betas that the scheduler is being initialized with. + + Returns: + `torch.Tensor`: rescaled betas with zero terminal SNR + """ + # Convert betas to alphas_bar_sqrt + alphas = 1.0 - betas + alphas_cumprod = torch.cumprod(alphas, dim=0) + alphas_bar_sqrt = alphas_cumprod.sqrt() + + # Store old values. + alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone() + alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone() + + # Shift so the last timestep is zero. + alphas_bar_sqrt -= alphas_bar_sqrt_T + + # Scale so the first timestep is back to the old value. + alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T) + + # Convert alphas_bar_sqrt to betas + alphas_bar = alphas_bar_sqrt**2 # Revert sqrt + alphas = alphas_bar[1:] / alphas_bar[:-1] # Revert cumprod + alphas = torch.cat([alphas_bar[0:1], alphas]) + betas = 1 - alphas + + return betas + + +class EulerAncestralDiscreteScheduler(SchedulerMixin, ConfigMixin): + """ + Ancestral sampling with Euler method steps. + + This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic + methods the library implements for all schedulers such as loading and saving. + + Args: + num_train_timesteps (`int`, defaults to 1000): + The number of diffusion steps to train the model. + beta_start (`float`, defaults to 0.0001): + The starting `beta` value of inference. + beta_end (`float`, defaults to 0.02): + The final `beta` value. + beta_schedule (`str`, defaults to `"linear"`): + The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from + `linear` or `scaled_linear`. + trained_betas (`np.ndarray`, *optional*): + Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`. + prediction_type (`str`, defaults to `epsilon`, *optional*): + Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process), + `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen + Video](https://imagen.research.google/video/paper.pdf) paper). + timestep_spacing (`str`, defaults to `"linspace"`): + The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information. + steps_offset (`int`, defaults to 0): + An offset added to the inference steps, as required by some model families. + rescale_betas_zero_snr (`bool`, defaults to `False`): + Whether to rescale the betas to have zero terminal SNR. This enables the model to generate very bright and + dark samples instead of limiting it to samples with medium brightness. Loosely related to + [`--offset_noise`](https://github.com/huggingface/diffusers/blob/74fd735eb073eb1d774b1ab4154a0876eb82f055/examples/dreambooth/train_dreambooth.py#L506). + """ + + _compatibles = [e.name for e in KarrasDiffusionSchedulers] + order = 1 + + @register_to_config + def __init__( + self, + num_train_timesteps: int = 1000, + beta_start: float = 0.0001, + beta_end: float = 0.02, + beta_schedule: str = "linear", + trained_betas: Optional[Union[np.ndarray, List[float]]] = None, + prediction_type: str = "epsilon", + timestep_spacing: str = "linspace", + steps_offset: int = 0, + rescale_betas_zero_snr: bool = False, + ): + if trained_betas is not None: + self.betas = torch.tensor(trained_betas, dtype=torch.float32) + elif beta_schedule == "linear": + self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) + elif beta_schedule == "scaled_linear": + # this schedule is very specific to the latent diffusion model. + self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2 + elif beta_schedule == "squaredcos_cap_v2": + # Glide cosine schedule + self.betas = betas_for_alpha_bar(num_train_timesteps) + else: + raise NotImplementedError(f"{beta_schedule} is not implemented for {self.__class__}") + + if rescale_betas_zero_snr: + self.betas = rescale_zero_terminal_snr(self.betas) + + self.alphas = 1.0 - self.betas + self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) + + if rescale_betas_zero_snr: + # Close to 0 without being 0 so first sigma is not inf + # FP16 smallest positive subnormal works well here + self.alphas_cumprod[-1] = 2**-24 + + sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5) + sigmas = np.concatenate([sigmas[::-1], [0.0]]).astype(np.float32) + self.sigmas = torch.from_numpy(sigmas) + + # setable values + self.num_inference_steps = None + timesteps = np.linspace(0, num_train_timesteps - 1, num_train_timesteps, dtype=float)[::-1].copy() + self.timesteps = torch.from_numpy(timesteps) + self.is_scale_input_called = False + + self._step_index = None + self._begin_index = None + self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication + + @property + def init_noise_sigma(self): + # standard deviation of the initial noise distribution + if self.config.timestep_spacing in ["linspace", "trailing"]: + return self.sigmas.max() + + return (self.sigmas.max() ** 2 + 1) ** 0.5 + + @property + def step_index(self): + """ + The index counter for current timestep. It will increase 1 after each scheduler step. + """ + return self._step_index + + @property + def begin_index(self): + """ + The index for the first timestep. It should be set from pipeline with `set_begin_index` method. + """ + return self._begin_index + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.set_begin_index + def set_begin_index(self, begin_index: int = 0): + """ + Sets the begin index for the scheduler. This function should be run from pipeline before the inference. + + Args: + begin_index (`int`): + The begin index for the scheduler. + """ + self._begin_index = begin_index + + def scale_model_input(self, sample: torch.Tensor, timestep: Union[float, torch.Tensor]) -> torch.Tensor: + """ + Ensures interchangeability with schedulers that need to scale the denoising model input depending on the + current timestep. Scales the denoising model input by `(sigma**2 + 1) ** 0.5` to match the Euler algorithm. + + Args: + sample (`torch.Tensor`): + The input sample. + timestep (`int`, *optional*): + The current timestep in the diffusion chain. + + Returns: + `torch.Tensor`: + A scaled input sample. + """ + + if self.step_index is None: + self._init_step_index(timestep) + + sigma = self.sigmas[self.step_index] + sample = sample / ((sigma**2 + 1) ** 0.5) + self.is_scale_input_called = True + return sample + + def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None): + """ + Sets the discrete timesteps used for the diffusion chain (to be run before inference). + + Args: + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + """ + self.num_inference_steps = num_inference_steps + + # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 + if self.config.timestep_spacing == "linspace": + timesteps = np.linspace(0, self.config.num_train_timesteps - 1, num_inference_steps, dtype=np.float32)[ + ::-1 + ].copy() + elif self.config.timestep_spacing == "leading": + step_ratio = self.config.num_train_timesteps // self.num_inference_steps + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + timesteps = (np.arange(0, num_inference_steps) * step_ratio).round()[::-1].copy().astype(np.float32) + timesteps += self.config.steps_offset + elif self.config.timestep_spacing == "trailing": + step_ratio = self.config.num_train_timesteps / self.num_inference_steps + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + timesteps = (np.arange(self.config.num_train_timesteps, 0, -step_ratio)).round().copy().astype(np.float32) + timesteps -= 1 + else: + raise ValueError( + f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." + ) + + sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5) + sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas) + sigmas = np.concatenate([sigmas, [0.0]]).astype(np.float32) + self.sigmas = torch.from_numpy(sigmas).to(device=device) + + self.timesteps = torch.from_numpy(timesteps).to(device=device) + self._step_index = None + self._begin_index = None + self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler.index_for_timestep + def index_for_timestep(self, timestep, schedule_timesteps=None): + if schedule_timesteps is None: + schedule_timesteps = self.timesteps + + indices = (schedule_timesteps == timestep).nonzero() + + # The sigma index that is taken for the **very** first `step` + # is always the second index (or the last index if there is only 1) + # This way we can ensure we don't accidentally skip a sigma in + # case we start in the middle of the denoising schedule (e.g. for image-to-image) + pos = 1 if len(indices) > 1 else 0 + + return indices[pos].item() + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._init_step_index + def _init_step_index(self, timestep): + if self.begin_index is None: + if isinstance(timestep, torch.Tensor): + timestep = timestep.to(self.timesteps.device) + self._step_index = self.index_for_timestep(timestep) + else: + self._step_index = self._begin_index + + def step( + self, + model_output: torch.Tensor, + timestep: Union[float, torch.Tensor], + sample: torch.Tensor, + generator: Optional[torch.Generator] = None, + return_dict: bool = True, + ) -> Union[EulerAncestralDiscreteSchedulerOutput, Tuple]: + """ + Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion + process from the learned model outputs (most often the predicted noise). + + Args: + model_output (`torch.Tensor`): + The direct output from learned diffusion model. + timestep (`float`): + The current discrete timestep in the diffusion chain. + sample (`torch.Tensor`): + A current instance of a sample created by the diffusion process. + generator (`torch.Generator`, *optional*): + A random number generator. + return_dict (`bool`): + Whether or not to return a + [`~schedulers.scheduling_euler_ancestral_discrete.EulerAncestralDiscreteSchedulerOutput`] or tuple. + + Returns: + [`~schedulers.scheduling_euler_ancestral_discrete.EulerAncestralDiscreteSchedulerOutput`] or `tuple`: + If return_dict is `True`, + [`~schedulers.scheduling_euler_ancestral_discrete.EulerAncestralDiscreteSchedulerOutput`] is returned, + otherwise a tuple is returned where the first element is the sample tensor. + + """ + + if isinstance(timestep, (int, torch.IntTensor, torch.LongTensor)): + raise ValueError( + ( + "Passing integer indices (e.g. from `enumerate(timesteps)`) as timesteps to" + " `EulerDiscreteScheduler.step()` is not supported. Make sure to pass" + " one of the `scheduler.timesteps` as a timestep." + ), + ) + + if not self.is_scale_input_called: + logger.warning( + "The `scale_model_input` function should be called before `step` to ensure correct denoising. " + "See `StableDiffusionPipeline` for a usage example." + ) + + if self.step_index is None: + self._init_step_index(timestep) + + sigma = self.sigmas[self.step_index] + + # Upcast to avoid precision issues when computing prev_sample + sample = sample.to(torch.float32) + + # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise + if self.config.prediction_type == "epsilon": + pred_original_sample = sample - sigma * model_output + elif self.config.prediction_type == "v_prediction": + # * c_out + input * c_skip + pred_original_sample = model_output * (-sigma / (sigma**2 + 1) ** 0.5) + (sample / (sigma**2 + 1)) + elif self.config.prediction_type == "sample": + raise NotImplementedError("prediction_type not implemented yet: sample") + else: + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`" + ) + + sigma_from = self.sigmas[self.step_index] + sigma_to = self.sigmas[self.step_index + 1] + sigma_up = (sigma_to**2 * (sigma_from**2 - sigma_to**2) / sigma_from**2) ** 0.5 + sigma_down = (sigma_to**2 - sigma_up**2) ** 0.5 + + # 2. Convert to an ODE derivative + derivative = (sample - pred_original_sample) / sigma + + dt = sigma_down - sigma + + prev_sample = sample + derivative * dt + + device = model_output.device + noise = randn_tensor(model_output.shape, dtype=model_output.dtype, device=device, generator=generator) + + prev_sample = prev_sample + noise * sigma_up + + # Cast sample back to model compatible dtype + prev_sample = prev_sample.to(model_output.dtype) + + # upon completion increase step index by one + self._step_index += 1 + + if not return_dict: + return (prev_sample,) + + return EulerAncestralDiscreteSchedulerOutput( + prev_sample=prev_sample, pred_original_sample=pred_original_sample + ) + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler.add_noise + def add_noise( + self, + original_samples: torch.Tensor, + noise: torch.Tensor, + timesteps: torch.Tensor, + ) -> torch.Tensor: + # Make sure sigmas and timesteps have the same device and dtype as original_samples + sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype) + if original_samples.device.type == "mps" and torch.is_floating_point(timesteps): + # mps does not support float64 + schedule_timesteps = self.timesteps.to(original_samples.device, dtype=torch.float32) + timesteps = timesteps.to(original_samples.device, dtype=torch.float32) + else: + schedule_timesteps = self.timesteps.to(original_samples.device) + timesteps = timesteps.to(original_samples.device) + + # self.begin_index is None when scheduler is used for training, or pipeline does not implement set_begin_index + if self.begin_index is None: + step_indices = [self.index_for_timestep(t, schedule_timesteps) for t in timesteps] + elif self.step_index is not None: + # add_noise is called after first denoising step (for inpainting) + step_indices = [self.step_index] * timesteps.shape[0] + else: + # add noise is called before first denoising step to create initial latent(img2img) + step_indices = [self.begin_index] * timesteps.shape[0] + + sigma = sigmas[step_indices].flatten() + while len(sigma.shape) < len(original_samples.shape): + sigma = sigma.unsqueeze(-1) + + noisy_samples = original_samples + noise * sigma + return noisy_samples + + def __len__(self): + return self.config.num_train_timesteps diff --git a/diffusers3/schedulers/scheduling_euler_discrete.py b/diffusers3/schedulers/scheduling_euler_discrete.py new file mode 100644 index 0000000000000000000000000000000000000000..46e0e6baef811bceba8ee1021592fc3a5c8d8688 --- /dev/null +++ b/diffusers3/schedulers/scheduling_euler_discrete.py @@ -0,0 +1,672 @@ +# Copyright 2024 Katherine Crowson and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +from dataclasses import dataclass +from typing import List, Optional, Tuple, Union + +import numpy as np +import torch + +from ..configuration_utils import ConfigMixin, register_to_config +from ..utils import BaseOutput, logging +from ..utils.torch_utils import randn_tensor +from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +@dataclass +# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->EulerDiscrete +class EulerDiscreteSchedulerOutput(BaseOutput): + """ + Output class for the scheduler's `step` function output. + + Args: + prev_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images): + Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the + denoising loop. + pred_original_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images): + The predicted denoised sample `(x_{0})` based on the model output from the current timestep. + `pred_original_sample` can be used to preview progress or for guidance. + """ + + prev_sample: torch.Tensor + pred_original_sample: Optional[torch.Tensor] = None + + +# Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar +def betas_for_alpha_bar( + num_diffusion_timesteps, + max_beta=0.999, + alpha_transform_type="cosine", +): + """ + Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of + (1-beta) over time from t = [0,1]. + + Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up + to that part of the diffusion process. + + + Args: + num_diffusion_timesteps (`int`): the number of betas to produce. + max_beta (`float`): the maximum beta to use; use values lower than 1 to + prevent singularities. + alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. + Choose from `cosine` or `exp` + + Returns: + betas (`np.ndarray`): the betas used by the scheduler to step the model outputs + """ + if alpha_transform_type == "cosine": + + def alpha_bar_fn(t): + return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 + + elif alpha_transform_type == "exp": + + def alpha_bar_fn(t): + return math.exp(t * -12.0) + + else: + raise ValueError(f"Unsupported alpha_transform_type: {alpha_transform_type}") + + betas = [] + for i in range(num_diffusion_timesteps): + t1 = i / num_diffusion_timesteps + t2 = (i + 1) / num_diffusion_timesteps + betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) + return torch.tensor(betas, dtype=torch.float32) + + +# Copied from diffusers.schedulers.scheduling_ddim.rescale_zero_terminal_snr +def rescale_zero_terminal_snr(betas): + """ + Rescales betas to have zero terminal SNR Based on https://arxiv.org/pdf/2305.08891.pdf (Algorithm 1) + + + Args: + betas (`torch.Tensor`): + the betas that the scheduler is being initialized with. + + Returns: + `torch.Tensor`: rescaled betas with zero terminal SNR + """ + # Convert betas to alphas_bar_sqrt + alphas = 1.0 - betas + alphas_cumprod = torch.cumprod(alphas, dim=0) + alphas_bar_sqrt = alphas_cumprod.sqrt() + + # Store old values. + alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone() + alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone() + + # Shift so the last timestep is zero. + alphas_bar_sqrt -= alphas_bar_sqrt_T + + # Scale so the first timestep is back to the old value. + alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T) + + # Convert alphas_bar_sqrt to betas + alphas_bar = alphas_bar_sqrt**2 # Revert sqrt + alphas = alphas_bar[1:] / alphas_bar[:-1] # Revert cumprod + alphas = torch.cat([alphas_bar[0:1], alphas]) + betas = 1 - alphas + + return betas + + +class EulerDiscreteScheduler(SchedulerMixin, ConfigMixin): + """ + Euler scheduler. + + This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic + methods the library implements for all schedulers such as loading and saving. + + Args: + num_train_timesteps (`int`, defaults to 1000): + The number of diffusion steps to train the model. + beta_start (`float`, defaults to 0.0001): + The starting `beta` value of inference. + beta_end (`float`, defaults to 0.02): + The final `beta` value. + beta_schedule (`str`, defaults to `"linear"`): + The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from + `linear` or `scaled_linear`. + trained_betas (`np.ndarray`, *optional*): + Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`. + prediction_type (`str`, defaults to `epsilon`, *optional*): + Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process), + `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen + Video](https://imagen.research.google/video/paper.pdf) paper). + interpolation_type(`str`, defaults to `"linear"`, *optional*): + The interpolation type to compute intermediate sigmas for the scheduler denoising steps. Should be on of + `"linear"` or `"log_linear"`. + use_karras_sigmas (`bool`, *optional*, defaults to `False`): + Whether to use Karras sigmas for step sizes in the noise schedule during the sampling process. If `True`, + the sigmas are determined according to a sequence of noise levels {ฯƒi}. + timestep_spacing (`str`, defaults to `"linspace"`): + The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information. + steps_offset (`int`, defaults to 0): + An offset added to the inference steps, as required by some model families. + rescale_betas_zero_snr (`bool`, defaults to `False`): + Whether to rescale the betas to have zero terminal SNR. This enables the model to generate very bright and + dark samples instead of limiting it to samples with medium brightness. Loosely related to + [`--offset_noise`](https://github.com/huggingface/diffusers/blob/74fd735eb073eb1d774b1ab4154a0876eb82f055/examples/dreambooth/train_dreambooth.py#L506). + final_sigmas_type (`str`, defaults to `"zero"`): + The final `sigma` value for the noise schedule during the sampling process. If `"sigma_min"`, the final + sigma is the same as the last sigma in the training schedule. If `zero`, the final sigma is set to 0. + """ + + _compatibles = [e.name for e in KarrasDiffusionSchedulers] + order = 1 + + @register_to_config + def __init__( + self, + num_train_timesteps: int = 1000, + beta_start: float = 0.0001, + beta_end: float = 0.02, + beta_schedule: str = "linear", + trained_betas: Optional[Union[np.ndarray, List[float]]] = None, + prediction_type: str = "epsilon", + interpolation_type: str = "linear", + use_karras_sigmas: Optional[bool] = False, + sigma_min: Optional[float] = None, + sigma_max: Optional[float] = None, + timestep_spacing: str = "linspace", + timestep_type: str = "discrete", # can be "discrete" or "continuous" + steps_offset: int = 0, + rescale_betas_zero_snr: bool = False, + final_sigmas_type: str = "zero", # can be "zero" or "sigma_min" + ): + if trained_betas is not None: + self.betas = torch.tensor(trained_betas, dtype=torch.float32) + elif beta_schedule == "linear": + self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) + elif beta_schedule == "scaled_linear": + # this schedule is very specific to the latent diffusion model. + self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2 + elif beta_schedule == "squaredcos_cap_v2": + # Glide cosine schedule + self.betas = betas_for_alpha_bar(num_train_timesteps) + else: + raise NotImplementedError(f"{beta_schedule} is not implemented for {self.__class__}") + + if rescale_betas_zero_snr: + self.betas = rescale_zero_terminal_snr(self.betas) + + self.alphas = 1.0 - self.betas + self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) + + if rescale_betas_zero_snr: + # Close to 0 without being 0 so first sigma is not inf + # FP16 smallest positive subnormal works well here + self.alphas_cumprod[-1] = 2**-24 + + sigmas = (((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5).flip(0) + timesteps = np.linspace(0, num_train_timesteps - 1, num_train_timesteps, dtype=float)[::-1].copy() + timesteps = torch.from_numpy(timesteps).to(dtype=torch.float32) + + # setable values + self.num_inference_steps = None + + # TODO: Support the full EDM scalings for all prediction types and timestep types + if timestep_type == "continuous" and prediction_type == "v_prediction": + self.timesteps = torch.Tensor([0.25 * sigma.log() for sigma in sigmas]) + else: + self.timesteps = timesteps + + self.sigmas = torch.cat([sigmas, torch.zeros(1, device=sigmas.device)]) + + self.is_scale_input_called = False + self.use_karras_sigmas = use_karras_sigmas + + self._step_index = None + self._begin_index = None + self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication + + @property + def init_noise_sigma(self): + # standard deviation of the initial noise distribution + max_sigma = max(self.sigmas) if isinstance(self.sigmas, list) else self.sigmas.max() + if self.config.timestep_spacing in ["linspace", "trailing"]: + return max_sigma + + return (max_sigma**2 + 1) ** 0.5 + + @property + def step_index(self): + """ + The index counter for current timestep. It will increase 1 after each scheduler step. + """ + return self._step_index + + @property + def begin_index(self): + """ + The index for the first timestep. It should be set from pipeline with `set_begin_index` method. + """ + return self._begin_index + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.set_begin_index + def set_begin_index(self, begin_index: int = 0): + """ + Sets the begin index for the scheduler. This function should be run from pipeline before the inference. + + Args: + begin_index (`int`): + The begin index for the scheduler. + """ + self._begin_index = begin_index + + def scale_model_input(self, sample: torch.Tensor, timestep: Union[float, torch.Tensor]) -> torch.Tensor: + """ + Ensures interchangeability with schedulers that need to scale the denoising model input depending on the + current timestep. Scales the denoising model input by `(sigma**2 + 1) ** 0.5` to match the Euler algorithm. + + Args: + sample (`torch.Tensor`): + The input sample. + timestep (`int`, *optional*): + The current timestep in the diffusion chain. + + Returns: + `torch.Tensor`: + A scaled input sample. + """ + if self.step_index is None: + self._init_step_index(timestep) + + sigma = self.sigmas[self.step_index] + sample = sample / ((sigma**2 + 1) ** 0.5) + + self.is_scale_input_called = True + return sample + + def set_timesteps( + self, + num_inference_steps: int = None, + device: Union[str, torch.device] = None, + timesteps: Optional[List[int]] = None, + sigmas: Optional[List[float]] = None, + ): + """ + Sets the discrete timesteps used for the diffusion chain (to be run before inference). + + Args: + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to support arbitrary timesteps schedule. If `None`, timesteps will be generated + based on the `timestep_spacing` attribute. If `timesteps` is passed, `num_inference_steps` and `sigmas` + must be `None`, and `timestep_spacing` attribute will be ignored. + sigmas (`List[float]`, *optional*): + Custom sigmas used to support arbitrary timesteps schedule schedule. If `None`, timesteps and sigmas + will be generated based on the relevant scheduler attributes. If `sigmas` is passed, + `num_inference_steps` and `timesteps` must be `None`, and the timesteps will be generated based on the + custom sigmas schedule. + """ + + if timesteps is not None and sigmas is not None: + raise ValueError("Only one of `timesteps` or `sigmas` should be set.") + if num_inference_steps is None and timesteps is None and sigmas is None: + raise ValueError("Must pass exactly one of `num_inference_steps` or `timesteps` or `sigmas.") + if num_inference_steps is not None and (timesteps is not None or sigmas is not None): + raise ValueError("Can only pass one of `num_inference_steps` or `timesteps` or `sigmas`.") + if timesteps is not None and self.config.use_karras_sigmas: + raise ValueError("Cannot set `timesteps` with `config.use_karras_sigmas = True`.") + if ( + timesteps is not None + and self.config.timestep_type == "continuous" + and self.config.prediction_type == "v_prediction" + ): + raise ValueError( + "Cannot set `timesteps` with `config.timestep_type = 'continuous'` and `config.prediction_type = 'v_prediction'`." + ) + + if num_inference_steps is None: + num_inference_steps = len(timesteps) if timesteps is not None else len(sigmas) - 1 + self.num_inference_steps = num_inference_steps + + if sigmas is not None: + log_sigmas = np.log(np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5)) + sigmas = np.array(sigmas).astype(np.float32) + timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas[:-1]]) + + else: + if timesteps is not None: + timesteps = np.array(timesteps).astype(np.float32) + else: + # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 + if self.config.timestep_spacing == "linspace": + timesteps = np.linspace( + 0, self.config.num_train_timesteps - 1, num_inference_steps, dtype=np.float32 + )[::-1].copy() + elif self.config.timestep_spacing == "leading": + step_ratio = self.config.num_train_timesteps // self.num_inference_steps + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + timesteps = ( + (np.arange(0, num_inference_steps) * step_ratio).round()[::-1].copy().astype(np.float32) + ) + timesteps += self.config.steps_offset + elif self.config.timestep_spacing == "trailing": + step_ratio = self.config.num_train_timesteps / self.num_inference_steps + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + timesteps = ( + (np.arange(self.config.num_train_timesteps, 0, -step_ratio)).round().copy().astype(np.float32) + ) + timesteps -= 1 + else: + raise ValueError( + f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." + ) + + sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5) + log_sigmas = np.log(sigmas) + if self.config.interpolation_type == "linear": + sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas) + elif self.config.interpolation_type == "log_linear": + sigmas = torch.linspace(np.log(sigmas[-1]), np.log(sigmas[0]), num_inference_steps + 1).exp().numpy() + else: + raise ValueError( + f"{self.config.interpolation_type} is not implemented. Please specify interpolation_type to either" + " 'linear' or 'log_linear'" + ) + + if self.config.use_karras_sigmas: + sigmas = self._convert_to_karras(in_sigmas=sigmas, num_inference_steps=self.num_inference_steps) + timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas]) + + if self.config.final_sigmas_type == "sigma_min": + sigma_last = ((1 - self.alphas_cumprod[0]) / self.alphas_cumprod[0]) ** 0.5 + elif self.config.final_sigmas_type == "zero": + sigma_last = 0 + else: + raise ValueError( + f"`final_sigmas_type` must be one of 'zero', or 'sigma_min', but got {self.config.final_sigmas_type}" + ) + + sigmas = np.concatenate([sigmas, [sigma_last]]).astype(np.float32) + + sigmas = torch.from_numpy(sigmas).to(dtype=torch.float32, device=device) + + # TODO: Support the full EDM scalings for all prediction types and timestep types + if self.config.timestep_type == "continuous" and self.config.prediction_type == "v_prediction": + self.timesteps = torch.Tensor([0.25 * sigma.log() for sigma in sigmas[:-1]]).to(device=device) + else: + self.timesteps = torch.from_numpy(timesteps.astype(np.float32)).to(device=device) + + self._step_index = None + self._begin_index = None + self.sigmas = sigmas.to("cpu") # to avoid too much CPU/GPU communication + + def _sigma_to_t(self, sigma, log_sigmas): + # get log sigma + log_sigma = np.log(np.maximum(sigma, 1e-10)) + + # get distribution + dists = log_sigma - log_sigmas[:, np.newaxis] + + # get sigmas range + low_idx = np.cumsum((dists >= 0), axis=0).argmax(axis=0).clip(max=log_sigmas.shape[0] - 2) + high_idx = low_idx + 1 + + low = log_sigmas[low_idx] + high = log_sigmas[high_idx] + + # interpolate sigmas + w = (low - log_sigma) / (low - high) + w = np.clip(w, 0, 1) + + # transform interpolation to time range + t = (1 - w) * low_idx + w * high_idx + t = t.reshape(sigma.shape) + return t + + # Copied from https://github.com/crowsonkb/k-diffusion/blob/686dbad0f39640ea25c8a8c6a6e56bb40eacefa2/k_diffusion/sampling.py#L17 + def _convert_to_karras(self, in_sigmas: torch.Tensor, num_inference_steps) -> torch.Tensor: + """Constructs the noise schedule of Karras et al. (2022).""" + + # Hack to make sure that other schedulers which copy this function don't break + # TODO: Add this logic to the other schedulers + if hasattr(self.config, "sigma_min"): + sigma_min = self.config.sigma_min + else: + sigma_min = None + + if hasattr(self.config, "sigma_max"): + sigma_max = self.config.sigma_max + else: + sigma_max = None + + sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item() + sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item() + + rho = 7.0 # 7.0 is the value used in the paper + ramp = np.linspace(0, 1, num_inference_steps) + min_inv_rho = sigma_min ** (1 / rho) + max_inv_rho = sigma_max ** (1 / rho) + sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho + return sigmas + + def index_for_timestep(self, timestep, schedule_timesteps=None): + if schedule_timesteps is None: + schedule_timesteps = self.timesteps + + indices = (schedule_timesteps == timestep).nonzero() + + # The sigma index that is taken for the **very** first `step` + # is always the second index (or the last index if there is only 1) + # This way we can ensure we don't accidentally skip a sigma in + # case we start in the middle of the denoising schedule (e.g. for image-to-image) + pos = 1 if len(indices) > 1 else 0 + + return indices[pos].item() + + def _init_step_index(self, timestep): + if self.begin_index is None: + if isinstance(timestep, torch.Tensor): + timestep = timestep.to(self.timesteps.device) + self._step_index = self.index_for_timestep(timestep) + else: + self._step_index = self._begin_index + + def step( + self, + model_output: torch.Tensor, + timestep: Union[float, torch.Tensor], + sample: torch.Tensor, + s_churn: float = 0.0, + s_tmin: float = 0.0, + s_tmax: float = float("inf"), + s_noise: float = 1.0, + generator: Optional[torch.Generator] = None, + return_dict: bool = True, + ) -> Union[EulerDiscreteSchedulerOutput, Tuple]: + """ + Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion + process from the learned model outputs (most often the predicted noise). + + Args: + model_output (`torch.Tensor`): + The direct output from learned diffusion model. + timestep (`float`): + The current discrete timestep in the diffusion chain. + sample (`torch.Tensor`): + A current instance of a sample created by the diffusion process. + s_churn (`float`): + s_tmin (`float`): + s_tmax (`float`): + s_noise (`float`, defaults to 1.0): + Scaling factor for noise added to the sample. + generator (`torch.Generator`, *optional*): + A random number generator. + return_dict (`bool`): + Whether or not to return a [`~schedulers.scheduling_euler_discrete.EulerDiscreteSchedulerOutput`] or + tuple. + + Returns: + [`~schedulers.scheduling_euler_discrete.EulerDiscreteSchedulerOutput`] or `tuple`: + If return_dict is `True`, [`~schedulers.scheduling_euler_discrete.EulerDiscreteSchedulerOutput`] is + returned, otherwise a tuple is returned where the first element is the sample tensor. + """ + + if isinstance(timestep, (int, torch.IntTensor, torch.LongTensor)): + raise ValueError( + ( + "Passing integer indices (e.g. from `enumerate(timesteps)`) as timesteps to" + " `EulerDiscreteScheduler.step()` is not supported. Make sure to pass" + " one of the `scheduler.timesteps` as a timestep." + ), + ) + + if not self.is_scale_input_called: + logger.warning( + "The `scale_model_input` function should be called before `step` to ensure correct denoising. " + "See `StableDiffusionPipeline` for a usage example." + ) + + if self.step_index is None: + self._init_step_index(timestep) + + # Upcast to avoid precision issues when computing prev_sample + sample = sample.to(torch.float32) + + sigma = self.sigmas[self.step_index] + + gamma = min(s_churn / (len(self.sigmas) - 1), 2**0.5 - 1) if s_tmin <= sigma <= s_tmax else 0.0 + + noise = randn_tensor( + model_output.shape, dtype=model_output.dtype, device=model_output.device, generator=generator + ) + + eps = noise * s_noise + sigma_hat = sigma * (gamma + 1) + + if gamma > 0: + sample = sample + eps * (sigma_hat**2 - sigma**2) ** 0.5 + + # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise + # NOTE: "original_sample" should not be an expected prediction_type but is left in for + # backwards compatibility + if self.config.prediction_type == "original_sample" or self.config.prediction_type == "sample": + pred_original_sample = model_output + elif self.config.prediction_type == "epsilon": + pred_original_sample = sample - sigma_hat * model_output + elif self.config.prediction_type == "v_prediction": + # denoised = model_output * c_out + input * c_skip + pred_original_sample = model_output * (-sigma / (sigma**2 + 1) ** 0.5) + (sample / (sigma**2 + 1)) + else: + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`" + ) + + # 2. Convert to an ODE derivative + derivative = (sample - pred_original_sample) / sigma_hat + + dt = self.sigmas[self.step_index + 1] - sigma_hat + + prev_sample = sample + derivative * dt + + # Cast sample back to model compatible dtype + prev_sample = prev_sample.to(model_output.dtype) + + # upon completion increase step index by one + self._step_index += 1 + + if not return_dict: + return (prev_sample,) + + return EulerDiscreteSchedulerOutput(prev_sample=prev_sample, pred_original_sample=pred_original_sample) + + def add_noise( + self, + original_samples: torch.Tensor, + noise: torch.Tensor, + timesteps: torch.Tensor, + ) -> torch.Tensor: + # Make sure sigmas and timesteps have the same device and dtype as original_samples + sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype) + if original_samples.device.type == "mps" and torch.is_floating_point(timesteps): + # mps does not support float64 + schedule_timesteps = self.timesteps.to(original_samples.device, dtype=torch.float32) + timesteps = timesteps.to(original_samples.device, dtype=torch.float32) + else: + schedule_timesteps = self.timesteps.to(original_samples.device) + timesteps = timesteps.to(original_samples.device) + + # self.begin_index is None when scheduler is used for training, or pipeline does not implement set_begin_index + if self.begin_index is None: + step_indices = [self.index_for_timestep(t, schedule_timesteps) for t in timesteps] + elif self.step_index is not None: + # add_noise is called after first denoising step (for inpainting) + step_indices = [self.step_index] * timesteps.shape[0] + else: + # add noise is called before first denoising step to create initial latent(img2img) + step_indices = [self.begin_index] * timesteps.shape[0] + + sigma = sigmas[step_indices].flatten() + while len(sigma.shape) < len(original_samples.shape): + sigma = sigma.unsqueeze(-1) + + noisy_samples = original_samples + noise * sigma + return noisy_samples + + def get_velocity(self, sample: torch.Tensor, noise: torch.Tensor, timesteps: torch.Tensor) -> torch.Tensor: + if ( + isinstance(timesteps, int) + or isinstance(timesteps, torch.IntTensor) + or isinstance(timesteps, torch.LongTensor) + ): + raise ValueError( + ( + "Passing integer indices (e.g. from `enumerate(timesteps)`) as timesteps to" + " `EulerDiscreteScheduler.get_velocity()` is not supported. Make sure to pass" + " one of the `scheduler.timesteps` as a timestep." + ), + ) + + if sample.device.type == "mps" and torch.is_floating_point(timesteps): + # mps does not support float64 + schedule_timesteps = self.timesteps.to(sample.device, dtype=torch.float32) + timesteps = timesteps.to(sample.device, dtype=torch.float32) + else: + schedule_timesteps = self.timesteps.to(sample.device) + timesteps = timesteps.to(sample.device) + + step_indices = [self.index_for_timestep(t, schedule_timesteps) for t in timesteps] + alphas_cumprod = self.alphas_cumprod.to(sample) + sqrt_alpha_prod = alphas_cumprod[step_indices] ** 0.5 + sqrt_alpha_prod = sqrt_alpha_prod.flatten() + while len(sqrt_alpha_prod.shape) < len(sample.shape): + sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) + + sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[step_indices]) ** 0.5 + sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() + while len(sqrt_one_minus_alpha_prod.shape) < len(sample.shape): + sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) + + velocity = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample + return velocity + + def __len__(self): + return self.config.num_train_timesteps diff --git a/diffusers3/schedulers/scheduling_euler_discrete_flax.py b/diffusers3/schedulers/scheduling_euler_discrete_flax.py new file mode 100644 index 0000000000000000000000000000000000000000..55b0c2460a81d1d9ca01fa93a0f0061066a7fe63 --- /dev/null +++ b/diffusers3/schedulers/scheduling_euler_discrete_flax.py @@ -0,0 +1,265 @@ +# Copyright 2024 Katherine Crowson and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass +from typing import Optional, Tuple, Union + +import flax +import jax.numpy as jnp + +from ..configuration_utils import ConfigMixin, register_to_config +from .scheduling_utils_flax import ( + CommonSchedulerState, + FlaxKarrasDiffusionSchedulers, + FlaxSchedulerMixin, + FlaxSchedulerOutput, + broadcast_to_shape_from_left, +) + + +@flax.struct.dataclass +class EulerDiscreteSchedulerState: + common: CommonSchedulerState + + # setable values + init_noise_sigma: jnp.ndarray + timesteps: jnp.ndarray + sigmas: jnp.ndarray + num_inference_steps: Optional[int] = None + + @classmethod + def create( + cls, common: CommonSchedulerState, init_noise_sigma: jnp.ndarray, timesteps: jnp.ndarray, sigmas: jnp.ndarray + ): + return cls(common=common, init_noise_sigma=init_noise_sigma, timesteps=timesteps, sigmas=sigmas) + + +@dataclass +class FlaxEulerDiscreteSchedulerOutput(FlaxSchedulerOutput): + state: EulerDiscreteSchedulerState + + +class FlaxEulerDiscreteScheduler(FlaxSchedulerMixin, ConfigMixin): + """ + Euler scheduler (Algorithm 2) from Karras et al. (2022) https://arxiv.org/abs/2206.00364. . Based on the original + k-diffusion implementation by Katherine Crowson: + https://github.com/crowsonkb/k-diffusion/blob/481677d114f6ea445aa009cf5bd7a9cdee909e47/k_diffusion/sampling.py#L51 + + + [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__` + function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`. + [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and + [`~SchedulerMixin.from_pretrained`] functions. + + Args: + num_train_timesteps (`int`): number of diffusion steps used to train the model. + beta_start (`float`): the starting `beta` value of inference. + beta_end (`float`): the final `beta` value. + beta_schedule (`str`): + the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from + `linear` or `scaled_linear`. + trained_betas (`jnp.ndarray`, optional): + option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc. + prediction_type (`str`, default `epsilon`, optional): + prediction type of the scheduler function, one of `epsilon` (predicting the noise of the diffusion + process), `sample` (directly predicting the noisy sample`) or `v_prediction` (see section 2.4 + https://imagen.research.google/video/paper.pdf) + dtype (`jnp.dtype`, *optional*, defaults to `jnp.float32`): + the `dtype` used for params and computation. + """ + + _compatibles = [e.name for e in FlaxKarrasDiffusionSchedulers] + + dtype: jnp.dtype + + @property + def has_state(self): + return True + + @register_to_config + def __init__( + self, + num_train_timesteps: int = 1000, + beta_start: float = 0.0001, + beta_end: float = 0.02, + beta_schedule: str = "linear", + trained_betas: Optional[jnp.ndarray] = None, + prediction_type: str = "epsilon", + timestep_spacing: str = "linspace", + dtype: jnp.dtype = jnp.float32, + ): + self.dtype = dtype + + def create_state(self, common: Optional[CommonSchedulerState] = None) -> EulerDiscreteSchedulerState: + if common is None: + common = CommonSchedulerState.create(self) + + timesteps = jnp.arange(0, self.config.num_train_timesteps).round()[::-1] + sigmas = ((1 - common.alphas_cumprod) / common.alphas_cumprod) ** 0.5 + sigmas = jnp.interp(timesteps, jnp.arange(0, len(sigmas)), sigmas) + sigmas = jnp.concatenate([sigmas, jnp.array([0.0], dtype=self.dtype)]) + + # standard deviation of the initial noise distribution + if self.config.timestep_spacing in ["linspace", "trailing"]: + init_noise_sigma = sigmas.max() + else: + init_noise_sigma = (sigmas.max() ** 2 + 1) ** 0.5 + + return EulerDiscreteSchedulerState.create( + common=common, + init_noise_sigma=init_noise_sigma, + timesteps=timesteps, + sigmas=sigmas, + ) + + def scale_model_input(self, state: EulerDiscreteSchedulerState, sample: jnp.ndarray, timestep: int) -> jnp.ndarray: + """ + Scales the denoising model input by `(sigma**2 + 1) ** 0.5` to match the Euler algorithm. + + Args: + state (`EulerDiscreteSchedulerState`): + the `FlaxEulerDiscreteScheduler` state data class instance. + sample (`jnp.ndarray`): + current instance of sample being created by diffusion process. + timestep (`int`): + current discrete timestep in the diffusion chain. + + Returns: + `jnp.ndarray`: scaled input sample + """ + (step_index,) = jnp.where(state.timesteps == timestep, size=1) + step_index = step_index[0] + + sigma = state.sigmas[step_index] + sample = sample / ((sigma**2 + 1) ** 0.5) + return sample + + def set_timesteps( + self, state: EulerDiscreteSchedulerState, num_inference_steps: int, shape: Tuple = () + ) -> EulerDiscreteSchedulerState: + """ + Sets the timesteps used for the diffusion chain. Supporting function to be run before inference. + + Args: + state (`EulerDiscreteSchedulerState`): + the `FlaxEulerDiscreteScheduler` state data class instance. + num_inference_steps (`int`): + the number of diffusion steps used when generating samples with a pre-trained model. + """ + + if self.config.timestep_spacing == "linspace": + timesteps = jnp.linspace(self.config.num_train_timesteps - 1, 0, num_inference_steps, dtype=self.dtype) + elif self.config.timestep_spacing == "leading": + step_ratio = self.config.num_train_timesteps // num_inference_steps + timesteps = (jnp.arange(0, num_inference_steps) * step_ratio).round()[::-1].copy().astype(float) + timesteps += 1 + else: + raise ValueError( + f"timestep_spacing must be one of ['linspace', 'leading'], got {self.config.timestep_spacing}" + ) + + sigmas = ((1 - state.common.alphas_cumprod) / state.common.alphas_cumprod) ** 0.5 + sigmas = jnp.interp(timesteps, jnp.arange(0, len(sigmas)), sigmas) + sigmas = jnp.concatenate([sigmas, jnp.array([0.0], dtype=self.dtype)]) + + # standard deviation of the initial noise distribution + if self.config.timestep_spacing in ["linspace", "trailing"]: + init_noise_sigma = sigmas.max() + else: + init_noise_sigma = (sigmas.max() ** 2 + 1) ** 0.5 + + return state.replace( + timesteps=timesteps, + sigmas=sigmas, + num_inference_steps=num_inference_steps, + init_noise_sigma=init_noise_sigma, + ) + + def step( + self, + state: EulerDiscreteSchedulerState, + model_output: jnp.ndarray, + timestep: int, + sample: jnp.ndarray, + return_dict: bool = True, + ) -> Union[FlaxEulerDiscreteSchedulerOutput, Tuple]: + """ + Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion + process from the learned model outputs (most often the predicted noise). + + Args: + state (`EulerDiscreteSchedulerState`): + the `FlaxEulerDiscreteScheduler` state data class instance. + model_output (`jnp.ndarray`): direct output from learned diffusion model. + timestep (`int`): current discrete timestep in the diffusion chain. + sample (`jnp.ndarray`): + current instance of sample being created by diffusion process. + order: coefficient for multi-step inference. + return_dict (`bool`): option for returning tuple rather than FlaxEulerDiscreteScheduler class + + Returns: + [`FlaxEulerDiscreteScheduler`] or `tuple`: [`FlaxEulerDiscreteScheduler`] if `return_dict` is True, + otherwise a `tuple`. When returning a tuple, the first element is the sample tensor. + + """ + if state.num_inference_steps is None: + raise ValueError( + "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" + ) + + (step_index,) = jnp.where(state.timesteps == timestep, size=1) + step_index = step_index[0] + + sigma = state.sigmas[step_index] + + # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise + if self.config.prediction_type == "epsilon": + pred_original_sample = sample - sigma * model_output + elif self.config.prediction_type == "v_prediction": + # * c_out + input * c_skip + pred_original_sample = model_output * (-sigma / (sigma**2 + 1) ** 0.5) + (sample / (sigma**2 + 1)) + else: + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`" + ) + + # 2. Convert to an ODE derivative + derivative = (sample - pred_original_sample) / sigma + + # dt = sigma_down - sigma + dt = state.sigmas[step_index + 1] - sigma + + prev_sample = sample + derivative * dt + + if not return_dict: + return (prev_sample, state) + + return FlaxEulerDiscreteSchedulerOutput(prev_sample=prev_sample, state=state) + + def add_noise( + self, + state: EulerDiscreteSchedulerState, + original_samples: jnp.ndarray, + noise: jnp.ndarray, + timesteps: jnp.ndarray, + ) -> jnp.ndarray: + sigma = state.sigmas[timesteps].flatten() + sigma = broadcast_to_shape_from_left(sigma, noise.shape) + + noisy_samples = original_samples + noise * sigma + + return noisy_samples + + def __len__(self): + return self.config.num_train_timesteps diff --git a/diffusers3/schedulers/scheduling_flow_match_euler_discrete.py b/diffusers3/schedulers/scheduling_flow_match_euler_discrete.py new file mode 100644 index 0000000000000000000000000000000000000000..937cae2e47f5af731de7b4af1929aff29cd29a54 --- /dev/null +++ b/diffusers3/schedulers/scheduling_flow_match_euler_discrete.py @@ -0,0 +1,311 @@ +# Copyright 2024 Stability AI, Katherine Crowson and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +from dataclasses import dataclass +from typing import List, Optional, Tuple, Union + +import numpy as np +import torch + +from ..configuration_utils import ConfigMixin, register_to_config +from ..utils import BaseOutput, logging +from .scheduling_utils import SchedulerMixin + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +@dataclass +class FlowMatchEulerDiscreteSchedulerOutput(BaseOutput): + """ + Output class for the scheduler's `step` function output. + + Args: + prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): + Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the + denoising loop. + """ + + prev_sample: torch.FloatTensor + + +class FlowMatchEulerDiscreteScheduler(SchedulerMixin, ConfigMixin): + """ + Euler scheduler. + + This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic + methods the library implements for all schedulers such as loading and saving. + + Args: + num_train_timesteps (`int`, defaults to 1000): + The number of diffusion steps to train the model. + timestep_spacing (`str`, defaults to `"linspace"`): + The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information. + shift (`float`, defaults to 1.0): + The shift value for the timestep schedule. + """ + + _compatibles = [] + order = 1 + + @register_to_config + def __init__( + self, + num_train_timesteps: int = 1000, + shift: float = 1.0, + use_dynamic_shifting=False, + base_shift: Optional[float] = 0.5, + max_shift: Optional[float] = 1.15, + base_image_seq_len: Optional[int] = 256, + max_image_seq_len: Optional[int] = 4096, + ): + timesteps = np.linspace(1, num_train_timesteps, num_train_timesteps, dtype=np.float32)[::-1].copy() + timesteps = torch.from_numpy(timesteps).to(dtype=torch.float32) + + sigmas = timesteps / num_train_timesteps + if not use_dynamic_shifting: + # when use_dynamic_shifting is True, we apply the timestep shifting on the fly based on the image resolution + sigmas = shift * sigmas / (1 + (shift - 1) * sigmas) + + self.timesteps = sigmas * num_train_timesteps + + self._step_index = None + self._begin_index = None + + self.sigmas = sigmas.to("cpu") # to avoid too much CPU/GPU communication + self.sigma_min = self.sigmas[-1].item() + self.sigma_max = self.sigmas[0].item() + + @property + def step_index(self): + """ + The index counter for current timestep. It will increase 1 after each scheduler step. + """ + return self._step_index + + @property + def begin_index(self): + """ + The index for the first timestep. It should be set from pipeline with `set_begin_index` method. + """ + return self._begin_index + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.set_begin_index + def set_begin_index(self, begin_index: int = 0): + """ + Sets the begin index for the scheduler. This function should be run from pipeline before the inference. + + Args: + begin_index (`int`): + The begin index for the scheduler. + """ + self._begin_index = begin_index + + def scale_noise( + self, + sample: torch.FloatTensor, + timestep: Union[float, torch.FloatTensor], + noise: Optional[torch.FloatTensor] = None, + ) -> torch.FloatTensor: + """ + Forward process in flow-matching + + Args: + sample (`torch.FloatTensor`): + The input sample. + timestep (`int`, *optional*): + The current timestep in the diffusion chain. + + Returns: + `torch.FloatTensor`: + A scaled input sample. + """ + # Make sure sigmas and timesteps have the same device and dtype as original_samples + sigmas = self.sigmas.to(device=sample.device, dtype=sample.dtype) + + if sample.device.type == "mps" and torch.is_floating_point(timestep): + # mps does not support float64 + schedule_timesteps = self.timesteps.to(sample.device, dtype=torch.float32) + timestep = timestep.to(sample.device, dtype=torch.float32) + else: + schedule_timesteps = self.timesteps.to(sample.device) + timestep = timestep.to(sample.device) + + # self.begin_index is None when scheduler is used for training, or pipeline does not implement set_begin_index + if self.begin_index is None: + step_indices = [self.index_for_timestep(t, schedule_timesteps) for t in timestep] + elif self.step_index is not None: + # add_noise is called after first denoising step (for inpainting) + step_indices = [self.step_index] * timestep.shape[0] + else: + # add noise is called before first denoising step to create initial latent(img2img) + step_indices = [self.begin_index] * timestep.shape[0] + + sigma = sigmas[step_indices].flatten() + while len(sigma.shape) < len(sample.shape): + sigma = sigma.unsqueeze(-1) + + sample = sigma * noise + (1.0 - sigma) * sample + + return sample + + def _sigma_to_t(self, sigma): + return sigma * self.config.num_train_timesteps + + def time_shift(self, mu: float, sigma: float, t: torch.Tensor): + return math.exp(mu) / (math.exp(mu) + (1 / t - 1) ** sigma) + + def set_timesteps( + self, + num_inference_steps: int = None, + device: Union[str, torch.device] = None, + sigmas: Optional[List[float]] = None, + mu: Optional[float] = None, + ): + """ + Sets the discrete timesteps used for the diffusion chain (to be run before inference). + + Args: + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + """ + + if self.config.use_dynamic_shifting and mu is None: + raise ValueError(" you have a pass a value for `mu` when `use_dynamic_shifting` is set to be `True`") + + if sigmas is None: + self.num_inference_steps = num_inference_steps + timesteps = np.linspace( + self._sigma_to_t(self.sigma_max), self._sigma_to_t(self.sigma_min), num_inference_steps + ) + + sigmas = timesteps / self.config.num_train_timesteps + + if self.config.use_dynamic_shifting: + sigmas = self.time_shift(mu, 1.0, sigmas) + else: + sigmas = self.config.shift * sigmas / (1 + (self.config.shift - 1) * sigmas) + + sigmas = torch.from_numpy(sigmas).to(dtype=torch.float32, device=device) + timesteps = sigmas * self.config.num_train_timesteps + + self.timesteps = timesteps.to(device=device) + self.sigmas = torch.cat([sigmas, torch.zeros(1, device=sigmas.device)]) + + self._step_index = None + self._begin_index = None + + def index_for_timestep(self, timestep, schedule_timesteps=None): + if schedule_timesteps is None: + schedule_timesteps = self.timesteps + + indices = (schedule_timesteps == timestep).nonzero() + + # The sigma index that is taken for the **very** first `step` + # is always the second index (or the last index if there is only 1) + # This way we can ensure we don't accidentally skip a sigma in + # case we start in the middle of the denoising schedule (e.g. for image-to-image) + pos = 1 if len(indices) > 1 else 0 + + return indices[pos].item() + + def _init_step_index(self, timestep): + if self.begin_index is None: + if isinstance(timestep, torch.Tensor): + timestep = timestep.to(self.timesteps.device) + self._step_index = self.index_for_timestep(timestep) + else: + self._step_index = self._begin_index + + def step( + self, + model_output: torch.FloatTensor, + timestep: Union[float, torch.FloatTensor], + sample: torch.FloatTensor, + s_churn: float = 0.0, + s_tmin: float = 0.0, + s_tmax: float = float("inf"), + s_noise: float = 1.0, + generator: Optional[torch.Generator] = None, + return_dict: bool = True, + ) -> Union[FlowMatchEulerDiscreteSchedulerOutput, Tuple]: + """ + Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion + process from the learned model outputs (most often the predicted noise). + + Args: + model_output (`torch.FloatTensor`): + The direct output from learned diffusion model. + timestep (`float`): + The current discrete timestep in the diffusion chain. + sample (`torch.FloatTensor`): + A current instance of a sample created by the diffusion process. + s_churn (`float`): + s_tmin (`float`): + s_tmax (`float`): + s_noise (`float`, defaults to 1.0): + Scaling factor for noise added to the sample. + generator (`torch.Generator`, *optional*): + A random number generator. + return_dict (`bool`): + Whether or not to return a [`~schedulers.scheduling_euler_discrete.EulerDiscreteSchedulerOutput`] or + tuple. + + Returns: + [`~schedulers.scheduling_euler_discrete.EulerDiscreteSchedulerOutput`] or `tuple`: + If return_dict is `True`, [`~schedulers.scheduling_euler_discrete.EulerDiscreteSchedulerOutput`] is + returned, otherwise a tuple is returned where the first element is the sample tensor. + """ + + if ( + isinstance(timestep, int) + or isinstance(timestep, torch.IntTensor) + or isinstance(timestep, torch.LongTensor) + ): + raise ValueError( + ( + "Passing integer indices (e.g. from `enumerate(timesteps)`) as timesteps to" + " `EulerDiscreteScheduler.step()` is not supported. Make sure to pass" + " one of the `scheduler.timesteps` as a timestep." + ), + ) + + if self.step_index is None: + self._init_step_index(timestep) + + # Upcast to avoid precision issues when computing prev_sample + sample = sample.to(torch.float32) + + sigma = self.sigmas[self.step_index] + sigma_next = self.sigmas[self.step_index + 1] + + prev_sample = sample + (sigma_next - sigma) * model_output + + # Cast sample back to model compatible dtype + prev_sample = prev_sample.to(model_output.dtype) + + # upon completion increase step index by one + self._step_index += 1 + + if not return_dict: + return (prev_sample,) + + return FlowMatchEulerDiscreteSchedulerOutput(prev_sample=prev_sample) + + def __len__(self): + return self.config.num_train_timesteps diff --git a/diffusers3/schedulers/scheduling_flow_match_heun_discrete.py b/diffusers3/schedulers/scheduling_flow_match_heun_discrete.py new file mode 100644 index 0000000000000000000000000000000000000000..d9a3ca2d4b0ad9062baafb9749cc3fe4a35401ea --- /dev/null +++ b/diffusers3/schedulers/scheduling_flow_match_heun_discrete.py @@ -0,0 +1,321 @@ +# Copyright 2024 Stability AI, Katherine Crowson and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass +from typing import Optional, Tuple, Union + +import numpy as np +import torch + +from ..configuration_utils import ConfigMixin, register_to_config +from ..utils import BaseOutput, logging +from ..utils.torch_utils import randn_tensor +from .scheduling_utils import SchedulerMixin + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +@dataclass +class FlowMatchHeunDiscreteSchedulerOutput(BaseOutput): + """ + Output class for the scheduler's `step` function output. + + Args: + prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): + Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the + denoising loop. + """ + + prev_sample: torch.FloatTensor + + +class FlowMatchHeunDiscreteScheduler(SchedulerMixin, ConfigMixin): + """ + Heun scheduler. + + This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic + methods the library implements for all schedulers such as loading and saving. + + Args: + num_train_timesteps (`int`, defaults to 1000): + The number of diffusion steps to train the model. + timestep_spacing (`str`, defaults to `"linspace"`): + The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information. + shift (`float`, defaults to 1.0): + The shift value for the timestep schedule. + """ + + _compatibles = [] + order = 2 + + @register_to_config + def __init__( + self, + num_train_timesteps: int = 1000, + shift: float = 1.0, + ): + timesteps = np.linspace(1, num_train_timesteps, num_train_timesteps, dtype=np.float32)[::-1].copy() + timesteps = torch.from_numpy(timesteps).to(dtype=torch.float32) + + sigmas = timesteps / num_train_timesteps + sigmas = shift * sigmas / (1 + (shift - 1) * sigmas) + + self.timesteps = sigmas * num_train_timesteps + + self._step_index = None + self._begin_index = None + + self.sigmas = sigmas.to("cpu") # to avoid too much CPU/GPU communication + self.sigma_min = self.sigmas[-1].item() + self.sigma_max = self.sigmas[0].item() + + @property + def step_index(self): + """ + The index counter for current timestep. It will increase 1 after each scheduler step. + """ + return self._step_index + + @property + def begin_index(self): + """ + The index for the first timestep. It should be set from pipeline with `set_begin_index` method. + """ + return self._begin_index + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.set_begin_index + def set_begin_index(self, begin_index: int = 0): + """ + Sets the begin index for the scheduler. This function should be run from pipeline before the inference. + + Args: + begin_index (`int`): + The begin index for the scheduler. + """ + self._begin_index = begin_index + + def scale_noise( + self, + sample: torch.FloatTensor, + timestep: Union[float, torch.FloatTensor], + noise: Optional[torch.FloatTensor] = None, + ) -> torch.FloatTensor: + """ + Forward process in flow-matching + + Args: + sample (`torch.FloatTensor`): + The input sample. + timestep (`int`, *optional*): + The current timestep in the diffusion chain. + + Returns: + `torch.FloatTensor`: + A scaled input sample. + """ + if self.step_index is None: + self._init_step_index(timestep) + + sigma = self.sigmas[self.step_index] + sample = sigma * noise + (1.0 - sigma) * sample + + return sample + + def _sigma_to_t(self, sigma): + return sigma * self.config.num_train_timesteps + + def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None): + """ + Sets the discrete timesteps used for the diffusion chain (to be run before inference). + + Args: + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + """ + self.num_inference_steps = num_inference_steps + + timesteps = np.linspace( + self._sigma_to_t(self.sigma_max), self._sigma_to_t(self.sigma_min), num_inference_steps + ) + + sigmas = timesteps / self.config.num_train_timesteps + sigmas = self.config.shift * sigmas / (1 + (self.config.shift - 1) * sigmas) + sigmas = torch.from_numpy(sigmas).to(dtype=torch.float32, device=device) + + timesteps = sigmas * self.config.num_train_timesteps + timesteps = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2)]) + self.timesteps = timesteps.to(device=device) + + sigmas = torch.cat([sigmas, torch.zeros(1, device=sigmas.device)]) + self.sigmas = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2), sigmas[-1:]]) + + # empty dt and derivative + self.prev_derivative = None + self.dt = None + + self._step_index = None + self._begin_index = None + + def index_for_timestep(self, timestep, schedule_timesteps=None): + if schedule_timesteps is None: + schedule_timesteps = self.timesteps + + indices = (schedule_timesteps == timestep).nonzero() + + # The sigma index that is taken for the **very** first `step` + # is always the second index (or the last index if there is only 1) + # This way we can ensure we don't accidentally skip a sigma in + # case we start in the middle of the denoising schedule (e.g. for image-to-image) + pos = 1 if len(indices) > 1 else 0 + + return indices[pos].item() + + def _init_step_index(self, timestep): + if self.begin_index is None: + if isinstance(timestep, torch.Tensor): + timestep = timestep.to(self.timesteps.device) + self._step_index = self.index_for_timestep(timestep) + else: + self._step_index = self._begin_index + + @property + def state_in_first_order(self): + return self.dt is None + + def step( + self, + model_output: torch.FloatTensor, + timestep: Union[float, torch.FloatTensor], + sample: torch.FloatTensor, + s_churn: float = 0.0, + s_tmin: float = 0.0, + s_tmax: float = float("inf"), + s_noise: float = 1.0, + generator: Optional[torch.Generator] = None, + return_dict: bool = True, + ) -> Union[FlowMatchHeunDiscreteSchedulerOutput, Tuple]: + """ + Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion + process from the learned model outputs (most often the predicted noise). + + Args: + model_output (`torch.FloatTensor`): + The direct output from learned diffusion model. + timestep (`float`): + The current discrete timestep in the diffusion chain. + sample (`torch.FloatTensor`): + A current instance of a sample created by the diffusion process. + s_churn (`float`): + s_tmin (`float`): + s_tmax (`float`): + s_noise (`float`, defaults to 1.0): + Scaling factor for noise added to the sample. + generator (`torch.Generator`, *optional*): + A random number generator. + return_dict (`bool`): + Whether or not to return a [`~schedulers.scheduling_Heun_discrete.HeunDiscreteSchedulerOutput`] or + tuple. + + Returns: + [`~schedulers.scheduling_Heun_discrete.HeunDiscreteSchedulerOutput`] or `tuple`: + If return_dict is `True`, [`~schedulers.scheduling_Heun_discrete.HeunDiscreteSchedulerOutput`] is + returned, otherwise a tuple is returned where the first element is the sample tensor. + """ + + if ( + isinstance(timestep, int) + or isinstance(timestep, torch.IntTensor) + or isinstance(timestep, torch.LongTensor) + ): + raise ValueError( + ( + "Passing integer indices (e.g. from `enumerate(timesteps)`) as timesteps to" + " `HeunDiscreteScheduler.step()` is not supported. Make sure to pass" + " one of the `scheduler.timesteps` as a timestep." + ), + ) + + if self.step_index is None: + self._init_step_index(timestep) + + # Upcast to avoid precision issues when computing prev_sample + sample = sample.to(torch.float32) + + if self.state_in_first_order: + sigma = self.sigmas[self.step_index] + sigma_next = self.sigmas[self.step_index + 1] + else: + # 2nd order / Heun's method + sigma = self.sigmas[self.step_index - 1] + sigma_next = self.sigmas[self.step_index] + + gamma = min(s_churn / (len(self.sigmas) - 1), 2**0.5 - 1) if s_tmin <= sigma <= s_tmax else 0.0 + + noise = randn_tensor( + model_output.shape, dtype=model_output.dtype, device=model_output.device, generator=generator + ) + + eps = noise * s_noise + sigma_hat = sigma * (gamma + 1) + + if gamma > 0: + sample = sample + eps * (sigma_hat**2 - sigma**2) ** 0.5 + + if self.state_in_first_order: + # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise + denoised = sample - model_output * sigma + # 2. convert to an ODE derivative for 1st order + derivative = (sample - denoised) / sigma_hat + # 3. Delta timestep + dt = sigma_next - sigma_hat + + # store for 2nd order step + self.prev_derivative = derivative + self.dt = dt + self.sample = sample + else: + # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise + denoised = sample - model_output * sigma_next + # 2. 2nd order / Heun's method + derivative = (sample - denoised) / sigma_next + derivative = 0.5 * (self.prev_derivative + derivative) + + # 3. take prev timestep & sample + dt = self.dt + sample = self.sample + + # free dt and derivative + # Note, this puts the scheduler in "first order mode" + self.prev_derivative = None + self.dt = None + self.sample = None + + prev_sample = sample + derivative * dt + # Cast sample back to model compatible dtype + prev_sample = prev_sample.to(model_output.dtype) + + # upon completion increase step index by one + self._step_index += 1 + + if not return_dict: + return (prev_sample,) + + return FlowMatchHeunDiscreteSchedulerOutput(prev_sample=prev_sample) + + def __len__(self): + return self.config.num_train_timesteps diff --git a/diffusers3/schedulers/scheduling_heun_discrete.py b/diffusers3/schedulers/scheduling_heun_discrete.py new file mode 100644 index 0000000000000000000000000000000000000000..8d0a4a830f425d4da57f4d85bd672badc9ddd077 --- /dev/null +++ b/diffusers3/schedulers/scheduling_heun_discrete.py @@ -0,0 +1,504 @@ +# Copyright 2024 Katherine Crowson, The HuggingFace Team and hlky. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +from typing import List, Optional, Tuple, Union + +import numpy as np +import torch + +from ..configuration_utils import ConfigMixin, register_to_config +from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput + + +# Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar +def betas_for_alpha_bar( + num_diffusion_timesteps, + max_beta=0.999, + alpha_transform_type="cosine", +): + """ + Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of + (1-beta) over time from t = [0,1]. + + Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up + to that part of the diffusion process. + + + Args: + num_diffusion_timesteps (`int`): the number of betas to produce. + max_beta (`float`): the maximum beta to use; use values lower than 1 to + prevent singularities. + alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. + Choose from `cosine` or `exp` + + Returns: + betas (`np.ndarray`): the betas used by the scheduler to step the model outputs + """ + if alpha_transform_type == "cosine": + + def alpha_bar_fn(t): + return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 + + elif alpha_transform_type == "exp": + + def alpha_bar_fn(t): + return math.exp(t * -12.0) + + else: + raise ValueError(f"Unsupported alpha_transform_type: {alpha_transform_type}") + + betas = [] + for i in range(num_diffusion_timesteps): + t1 = i / num_diffusion_timesteps + t2 = (i + 1) / num_diffusion_timesteps + betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) + return torch.tensor(betas, dtype=torch.float32) + + +class HeunDiscreteScheduler(SchedulerMixin, ConfigMixin): + """ + Scheduler with Heun steps for discrete beta schedules. + + This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic + methods the library implements for all schedulers such as loading and saving. + + Args: + num_train_timesteps (`int`, defaults to 1000): + The number of diffusion steps to train the model. + beta_start (`float`, defaults to 0.0001): + The starting `beta` value of inference. + beta_end (`float`, defaults to 0.02): + The final `beta` value. + beta_schedule (`str`, defaults to `"linear"`): + The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from + `linear` or `scaled_linear`. + trained_betas (`np.ndarray`, *optional*): + Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`. + prediction_type (`str`, defaults to `epsilon`, *optional*): + Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process), + `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen + Video](https://imagen.research.google/video/paper.pdf) paper). + clip_sample (`bool`, defaults to `True`): + Clip the predicted sample for numerical stability. + clip_sample_range (`float`, defaults to 1.0): + The maximum magnitude for sample clipping. Valid only when `clip_sample=True`. + use_karras_sigmas (`bool`, *optional*, defaults to `False`): + Whether to use Karras sigmas for step sizes in the noise schedule during the sampling process. If `True`, + the sigmas are determined according to a sequence of noise levels {ฯƒi}. + timestep_spacing (`str`, defaults to `"linspace"`): + The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information. + steps_offset (`int`, defaults to 0): + An offset added to the inference steps, as required by some model families. + """ + + _compatibles = [e.name for e in KarrasDiffusionSchedulers] + order = 2 + + @register_to_config + def __init__( + self, + num_train_timesteps: int = 1000, + beta_start: float = 0.00085, # sensible defaults + beta_end: float = 0.012, + beta_schedule: str = "linear", + trained_betas: Optional[Union[np.ndarray, List[float]]] = None, + prediction_type: str = "epsilon", + use_karras_sigmas: Optional[bool] = False, + clip_sample: Optional[bool] = False, + clip_sample_range: float = 1.0, + timestep_spacing: str = "linspace", + steps_offset: int = 0, + ): + if trained_betas is not None: + self.betas = torch.tensor(trained_betas, dtype=torch.float32) + elif beta_schedule == "linear": + self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) + elif beta_schedule == "scaled_linear": + # this schedule is very specific to the latent diffusion model. + self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2 + elif beta_schedule == "squaredcos_cap_v2": + # Glide cosine schedule + self.betas = betas_for_alpha_bar(num_train_timesteps, alpha_transform_type="cosine") + elif beta_schedule == "exp": + self.betas = betas_for_alpha_bar(num_train_timesteps, alpha_transform_type="exp") + else: + raise NotImplementedError(f"{beta_schedule} is not implemented for {self.__class__}") + + self.alphas = 1.0 - self.betas + self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) + + # set all values + self.set_timesteps(num_train_timesteps, None, num_train_timesteps) + self.use_karras_sigmas = use_karras_sigmas + + self._step_index = None + self._begin_index = None + self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler.index_for_timestep + def index_for_timestep(self, timestep, schedule_timesteps=None): + if schedule_timesteps is None: + schedule_timesteps = self.timesteps + + indices = (schedule_timesteps == timestep).nonzero() + + # The sigma index that is taken for the **very** first `step` + # is always the second index (or the last index if there is only 1) + # This way we can ensure we don't accidentally skip a sigma in + # case we start in the middle of the denoising schedule (e.g. for image-to-image) + pos = 1 if len(indices) > 1 else 0 + + return indices[pos].item() + + @property + def init_noise_sigma(self): + # standard deviation of the initial noise distribution + if self.config.timestep_spacing in ["linspace", "trailing"]: + return self.sigmas.max() + + return (self.sigmas.max() ** 2 + 1) ** 0.5 + + @property + def step_index(self): + """ + The index counter for current timestep. It will increase 1 after each scheduler step. + """ + return self._step_index + + @property + def begin_index(self): + """ + The index for the first timestep. It should be set from pipeline with `set_begin_index` method. + """ + return self._begin_index + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.set_begin_index + def set_begin_index(self, begin_index: int = 0): + """ + Sets the begin index for the scheduler. This function should be run from pipeline before the inference. + + Args: + begin_index (`int`): + The begin index for the scheduler. + """ + self._begin_index = begin_index + + def scale_model_input( + self, + sample: torch.Tensor, + timestep: Union[float, torch.Tensor], + ) -> torch.Tensor: + """ + Ensures interchangeability with schedulers that need to scale the denoising model input depending on the + current timestep. + + Args: + sample (`torch.Tensor`): + The input sample. + timestep (`int`, *optional*): + The current timestep in the diffusion chain. + + Returns: + `torch.Tensor`: + A scaled input sample. + """ + if self.step_index is None: + self._init_step_index(timestep) + + sigma = self.sigmas[self.step_index] + sample = sample / ((sigma**2 + 1) ** 0.5) + return sample + + def set_timesteps( + self, + num_inference_steps: Optional[int] = None, + device: Union[str, torch.device] = None, + num_train_timesteps: Optional[int] = None, + timesteps: Optional[List[int]] = None, + ): + """ + Sets the discrete timesteps used for the diffusion chain (to be run before inference). + + Args: + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + num_train_timesteps (`int`, *optional*): + The number of diffusion steps used when training the model. If `None`, the default + `num_train_timesteps` attribute is used. + timesteps (`List[int]`, *optional*): + Custom timesteps used to support arbitrary spacing between timesteps. If `None`, timesteps will be + generated based on the `timestep_spacing` attribute. If `timesteps` is passed, `num_inference_steps` + must be `None`, and `timestep_spacing` attribute will be ignored. + """ + if num_inference_steps is None and timesteps is None: + raise ValueError("Must pass exactly one of `num_inference_steps` or `custom_timesteps`.") + if num_inference_steps is not None and timesteps is not None: + raise ValueError("Can only pass one of `num_inference_steps` or `custom_timesteps`.") + if timesteps is not None and self.config.use_karras_sigmas: + raise ValueError("Cannot use `timesteps` with `config.use_karras_sigmas = True`") + + num_inference_steps = num_inference_steps or len(timesteps) + self.num_inference_steps = num_inference_steps + num_train_timesteps = num_train_timesteps or self.config.num_train_timesteps + + if timesteps is not None: + timesteps = np.array(timesteps, dtype=np.float32) + else: + # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 + if self.config.timestep_spacing == "linspace": + timesteps = np.linspace(0, num_train_timesteps - 1, num_inference_steps, dtype=np.float32)[::-1].copy() + elif self.config.timestep_spacing == "leading": + step_ratio = num_train_timesteps // self.num_inference_steps + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + timesteps = (np.arange(0, num_inference_steps) * step_ratio).round()[::-1].copy().astype(np.float32) + timesteps += self.config.steps_offset + elif self.config.timestep_spacing == "trailing": + step_ratio = num_train_timesteps / self.num_inference_steps + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + timesteps = (np.arange(num_train_timesteps, 0, -step_ratio)).round().copy().astype(np.float32) + timesteps -= 1 + else: + raise ValueError( + f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." + ) + + sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5) + log_sigmas = np.log(sigmas) + sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas) + + if self.config.use_karras_sigmas: + sigmas = self._convert_to_karras(in_sigmas=sigmas, num_inference_steps=self.num_inference_steps) + timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas]) + + sigmas = np.concatenate([sigmas, [0.0]]).astype(np.float32) + sigmas = torch.from_numpy(sigmas).to(device=device) + self.sigmas = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2), sigmas[-1:]]) + + timesteps = torch.from_numpy(timesteps) + timesteps = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2)]) + + self.timesteps = timesteps.to(device=device) + + # empty dt and derivative + self.prev_derivative = None + self.dt = None + + self._step_index = None + self._begin_index = None + self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._sigma_to_t + def _sigma_to_t(self, sigma, log_sigmas): + # get log sigma + log_sigma = np.log(np.maximum(sigma, 1e-10)) + + # get distribution + dists = log_sigma - log_sigmas[:, np.newaxis] + + # get sigmas range + low_idx = np.cumsum((dists >= 0), axis=0).argmax(axis=0).clip(max=log_sigmas.shape[0] - 2) + high_idx = low_idx + 1 + + low = log_sigmas[low_idx] + high = log_sigmas[high_idx] + + # interpolate sigmas + w = (low - log_sigma) / (low - high) + w = np.clip(w, 0, 1) + + # transform interpolation to time range + t = (1 - w) * low_idx + w * high_idx + t = t.reshape(sigma.shape) + return t + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_karras + def _convert_to_karras(self, in_sigmas: torch.Tensor, num_inference_steps) -> torch.Tensor: + """Constructs the noise schedule of Karras et al. (2022).""" + + # Hack to make sure that other schedulers which copy this function don't break + # TODO: Add this logic to the other schedulers + if hasattr(self.config, "sigma_min"): + sigma_min = self.config.sigma_min + else: + sigma_min = None + + if hasattr(self.config, "sigma_max"): + sigma_max = self.config.sigma_max + else: + sigma_max = None + + sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item() + sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item() + + rho = 7.0 # 7.0 is the value used in the paper + ramp = np.linspace(0, 1, num_inference_steps) + min_inv_rho = sigma_min ** (1 / rho) + max_inv_rho = sigma_max ** (1 / rho) + sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho + return sigmas + + @property + def state_in_first_order(self): + return self.dt is None + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._init_step_index + def _init_step_index(self, timestep): + if self.begin_index is None: + if isinstance(timestep, torch.Tensor): + timestep = timestep.to(self.timesteps.device) + self._step_index = self.index_for_timestep(timestep) + else: + self._step_index = self._begin_index + + def step( + self, + model_output: Union[torch.Tensor, np.ndarray], + timestep: Union[float, torch.Tensor], + sample: Union[torch.Tensor, np.ndarray], + return_dict: bool = True, + ) -> Union[SchedulerOutput, Tuple]: + """ + Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion + process from the learned model outputs (most often the predicted noise). + + Args: + model_output (`torch.Tensor`): + The direct output from learned diffusion model. + timestep (`float`): + The current discrete timestep in the diffusion chain. + sample (`torch.Tensor`): + A current instance of a sample created by the diffusion process. + return_dict (`bool`): + Whether or not to return a [`~schedulers.scheduling_utils.SchedulerOutput`] or tuple. + + Returns: + [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`: + If return_dict is `True`, [`~schedulers.scheduling_utils.SchedulerOutput`] is returned, otherwise a + tuple is returned where the first element is the sample tensor. + """ + if self.step_index is None: + self._init_step_index(timestep) + + if self.state_in_first_order: + sigma = self.sigmas[self.step_index] + sigma_next = self.sigmas[self.step_index + 1] + else: + # 2nd order / Heun's method + sigma = self.sigmas[self.step_index - 1] + sigma_next = self.sigmas[self.step_index] + + # currently only gamma=0 is supported. This usually works best anyways. + # We can support gamma in the future but then need to scale the timestep before + # passing it to the model which requires a change in API + gamma = 0 + sigma_hat = sigma * (gamma + 1) # Note: sigma_hat == sigma for now + + # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise + if self.config.prediction_type == "epsilon": + sigma_input = sigma_hat if self.state_in_first_order else sigma_next + pred_original_sample = sample - sigma_input * model_output + elif self.config.prediction_type == "v_prediction": + sigma_input = sigma_hat if self.state_in_first_order else sigma_next + pred_original_sample = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( + sample / (sigma_input**2 + 1) + ) + elif self.config.prediction_type == "sample": + pred_original_sample = model_output + else: + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`" + ) + + if self.config.clip_sample: + pred_original_sample = pred_original_sample.clamp( + -self.config.clip_sample_range, self.config.clip_sample_range + ) + + if self.state_in_first_order: + # 2. Convert to an ODE derivative for 1st order + derivative = (sample - pred_original_sample) / sigma_hat + # 3. delta timestep + dt = sigma_next - sigma_hat + + # store for 2nd order step + self.prev_derivative = derivative + self.dt = dt + self.sample = sample + else: + # 2. 2nd order / Heun's method + derivative = (sample - pred_original_sample) / sigma_next + derivative = (self.prev_derivative + derivative) / 2 + + # 3. take prev timestep & sample + dt = self.dt + sample = self.sample + + # free dt and derivative + # Note, this puts the scheduler in "first order mode" + self.prev_derivative = None + self.dt = None + self.sample = None + + prev_sample = sample + derivative * dt + + # upon completion increase step index by one + self._step_index += 1 + + if not return_dict: + return (prev_sample,) + + return SchedulerOutput(prev_sample=prev_sample) + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler.add_noise + def add_noise( + self, + original_samples: torch.Tensor, + noise: torch.Tensor, + timesteps: torch.Tensor, + ) -> torch.Tensor: + # Make sure sigmas and timesteps have the same device and dtype as original_samples + sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype) + if original_samples.device.type == "mps" and torch.is_floating_point(timesteps): + # mps does not support float64 + schedule_timesteps = self.timesteps.to(original_samples.device, dtype=torch.float32) + timesteps = timesteps.to(original_samples.device, dtype=torch.float32) + else: + schedule_timesteps = self.timesteps.to(original_samples.device) + timesteps = timesteps.to(original_samples.device) + + # self.begin_index is None when scheduler is used for training, or pipeline does not implement set_begin_index + if self.begin_index is None: + step_indices = [self.index_for_timestep(t, schedule_timesteps) for t in timesteps] + elif self.step_index is not None: + # add_noise is called after first denoising step (for inpainting) + step_indices = [self.step_index] * timesteps.shape[0] + else: + # add noise is called before first denoising step to create initial latent(img2img) + step_indices = [self.begin_index] * timesteps.shape[0] + + sigma = sigmas[step_indices].flatten() + while len(sigma.shape) < len(original_samples.shape): + sigma = sigma.unsqueeze(-1) + + noisy_samples = original_samples + noise * sigma + return noisy_samples + + def __len__(self): + return self.config.num_train_timesteps diff --git a/diffusers3/schedulers/scheduling_ipndm.py b/diffusers3/schedulers/scheduling_ipndm.py new file mode 100644 index 0000000000000000000000000000000000000000..28f349ae21147acbb670c4e383749174bcb92d53 --- /dev/null +++ b/diffusers3/schedulers/scheduling_ipndm.py @@ -0,0 +1,224 @@ +# Copyright 2024 Zhejiang University Team and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +from typing import List, Optional, Tuple, Union + +import numpy as np +import torch + +from ..configuration_utils import ConfigMixin, register_to_config +from .scheduling_utils import SchedulerMixin, SchedulerOutput + + +class IPNDMScheduler(SchedulerMixin, ConfigMixin): + """ + A fourth-order Improved Pseudo Linear Multistep scheduler. + + This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic + methods the library implements for all schedulers such as loading and saving. + + Args: + num_train_timesteps (`int`, defaults to 1000): + The number of diffusion steps to train the model. + trained_betas (`np.ndarray`, *optional*): + Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`. + """ + + order = 1 + + @register_to_config + def __init__( + self, num_train_timesteps: int = 1000, trained_betas: Optional[Union[np.ndarray, List[float]]] = None + ): + # set `betas`, `alphas`, `timesteps` + self.set_timesteps(num_train_timesteps) + + # standard deviation of the initial noise distribution + self.init_noise_sigma = 1.0 + + # For now we only support F-PNDM, i.e. the runge-kutta method + # For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf + # mainly at formula (9), (12), (13) and the Algorithm 2. + self.pndm_order = 4 + + # running values + self.ets = [] + self._step_index = None + self._begin_index = None + + @property + def step_index(self): + """ + The index counter for current timestep. It will increase 1 after each scheduler step. + """ + return self._step_index + + @property + def begin_index(self): + """ + The index for the first timestep. It should be set from pipeline with `set_begin_index` method. + """ + return self._begin_index + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.set_begin_index + def set_begin_index(self, begin_index: int = 0): + """ + Sets the begin index for the scheduler. This function should be run from pipeline before the inference. + + Args: + begin_index (`int`): + The begin index for the scheduler. + """ + self._begin_index = begin_index + + def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None): + """ + Sets the discrete timesteps used for the diffusion chain (to be run before inference). + + Args: + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + """ + self.num_inference_steps = num_inference_steps + steps = torch.linspace(1, 0, num_inference_steps + 1)[:-1] + steps = torch.cat([steps, torch.tensor([0.0])]) + + if self.config.trained_betas is not None: + self.betas = torch.tensor(self.config.trained_betas, dtype=torch.float32) + else: + self.betas = torch.sin(steps * math.pi / 2) ** 2 + + self.alphas = (1.0 - self.betas**2) ** 0.5 + + timesteps = (torch.atan2(self.betas, self.alphas) / math.pi * 2)[:-1] + self.timesteps = timesteps.to(device) + + self.ets = [] + self._step_index = None + self._begin_index = None + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler.index_for_timestep + def index_for_timestep(self, timestep, schedule_timesteps=None): + if schedule_timesteps is None: + schedule_timesteps = self.timesteps + + indices = (schedule_timesteps == timestep).nonzero() + + # The sigma index that is taken for the **very** first `step` + # is always the second index (or the last index if there is only 1) + # This way we can ensure we don't accidentally skip a sigma in + # case we start in the middle of the denoising schedule (e.g. for image-to-image) + pos = 1 if len(indices) > 1 else 0 + + return indices[pos].item() + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._init_step_index + def _init_step_index(self, timestep): + if self.begin_index is None: + if isinstance(timestep, torch.Tensor): + timestep = timestep.to(self.timesteps.device) + self._step_index = self.index_for_timestep(timestep) + else: + self._step_index = self._begin_index + + def step( + self, + model_output: torch.Tensor, + timestep: Union[int, torch.Tensor], + sample: torch.Tensor, + return_dict: bool = True, + ) -> Union[SchedulerOutput, Tuple]: + """ + Predict the sample from the previous timestep by reversing the SDE. This function propagates the sample with + the linear multistep method. It performs one forward pass multiple times to approximate the solution. + + Args: + model_output (`torch.Tensor`): + The direct output from learned diffusion model. + timestep (`int`): + The current discrete timestep in the diffusion chain. + sample (`torch.Tensor`): + A current instance of a sample created by the diffusion process. + return_dict (`bool`): + Whether or not to return a [`~schedulers.scheduling_utils.SchedulerOutput`] or tuple. + + Returns: + [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`: + If return_dict is `True`, [`~schedulers.scheduling_utils.SchedulerOutput`] is returned, otherwise a + tuple is returned where the first element is the sample tensor. + """ + if self.num_inference_steps is None: + raise ValueError( + "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" + ) + if self.step_index is None: + self._init_step_index(timestep) + + timestep_index = self.step_index + prev_timestep_index = self.step_index + 1 + + ets = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index] + self.ets.append(ets) + + if len(self.ets) == 1: + ets = self.ets[-1] + elif len(self.ets) == 2: + ets = (3 * self.ets[-1] - self.ets[-2]) / 2 + elif len(self.ets) == 3: + ets = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12 + else: + ets = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4]) + + prev_sample = self._get_prev_sample(sample, timestep_index, prev_timestep_index, ets) + + # upon completion increase step index by one + self._step_index += 1 + + if not return_dict: + return (prev_sample,) + + return SchedulerOutput(prev_sample=prev_sample) + + def scale_model_input(self, sample: torch.Tensor, *args, **kwargs) -> torch.Tensor: + """ + Ensures interchangeability with schedulers that need to scale the denoising model input depending on the + current timestep. + + Args: + sample (`torch.Tensor`): + The input sample. + + Returns: + `torch.Tensor`: + A scaled input sample. + """ + return sample + + def _get_prev_sample(self, sample, timestep_index, prev_timestep_index, ets): + alpha = self.alphas[timestep_index] + sigma = self.betas[timestep_index] + + next_alpha = self.alphas[prev_timestep_index] + next_sigma = self.betas[prev_timestep_index] + + pred = (sample - sigma * ets) / max(alpha, 1e-8) + prev_sample = next_alpha * pred + ets * next_sigma + + return prev_sample + + def __len__(self): + return self.config.num_train_timesteps diff --git a/diffusers3/schedulers/scheduling_k_dpm_2_ancestral_discrete.py b/diffusers3/schedulers/scheduling_k_dpm_2_ancestral_discrete.py new file mode 100644 index 0000000000000000000000000000000000000000..338412d96bd55b976e9f485f559ad9a0e115830e --- /dev/null +++ b/diffusers3/schedulers/scheduling_k_dpm_2_ancestral_discrete.py @@ -0,0 +1,512 @@ +# Copyright 2024 Katherine Crowson, The HuggingFace Team and hlky. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +from typing import List, Optional, Tuple, Union + +import numpy as np +import torch + +from ..configuration_utils import ConfigMixin, register_to_config +from ..utils.torch_utils import randn_tensor +from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput + + +# Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar +def betas_for_alpha_bar( + num_diffusion_timesteps, + max_beta=0.999, + alpha_transform_type="cosine", +): + """ + Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of + (1-beta) over time from t = [0,1]. + + Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up + to that part of the diffusion process. + + + Args: + num_diffusion_timesteps (`int`): the number of betas to produce. + max_beta (`float`): the maximum beta to use; use values lower than 1 to + prevent singularities. + alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. + Choose from `cosine` or `exp` + + Returns: + betas (`np.ndarray`): the betas used by the scheduler to step the model outputs + """ + if alpha_transform_type == "cosine": + + def alpha_bar_fn(t): + return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 + + elif alpha_transform_type == "exp": + + def alpha_bar_fn(t): + return math.exp(t * -12.0) + + else: + raise ValueError(f"Unsupported alpha_transform_type: {alpha_transform_type}") + + betas = [] + for i in range(num_diffusion_timesteps): + t1 = i / num_diffusion_timesteps + t2 = (i + 1) / num_diffusion_timesteps + betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) + return torch.tensor(betas, dtype=torch.float32) + + +class KDPM2AncestralDiscreteScheduler(SchedulerMixin, ConfigMixin): + """ + KDPM2DiscreteScheduler with ancestral sampling is inspired by the DPMSolver2 and Algorithm 2 from the [Elucidating + the Design Space of Diffusion-Based Generative Models](https://huggingface.co/papers/2206.00364) paper. + + This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic + methods the library implements for all schedulers such as loading and saving. + + Args: + num_train_timesteps (`int`, defaults to 1000): + The number of diffusion steps to train the model. + beta_start (`float`, defaults to 0.00085): + The starting `beta` value of inference. + beta_end (`float`, defaults to 0.012): + The final `beta` value. + beta_schedule (`str`, defaults to `"linear"`): + The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from + `linear` or `scaled_linear`. + trained_betas (`np.ndarray`, *optional*): + Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`. + use_karras_sigmas (`bool`, *optional*, defaults to `False`): + Whether to use Karras sigmas for step sizes in the noise schedule during the sampling process. If `True`, + the sigmas are determined according to a sequence of noise levels {ฯƒi}. + prediction_type (`str`, defaults to `epsilon`, *optional*): + Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process), + `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen + Video](https://imagen.research.google/video/paper.pdf) paper). + timestep_spacing (`str`, defaults to `"linspace"`): + The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information. + steps_offset (`int`, defaults to 0): + An offset added to the inference steps, as required by some model families. + """ + + _compatibles = [e.name for e in KarrasDiffusionSchedulers] + order = 2 + + @register_to_config + def __init__( + self, + num_train_timesteps: int = 1000, + beta_start: float = 0.00085, # sensible defaults + beta_end: float = 0.012, + beta_schedule: str = "linear", + trained_betas: Optional[Union[np.ndarray, List[float]]] = None, + use_karras_sigmas: Optional[bool] = False, + prediction_type: str = "epsilon", + timestep_spacing: str = "linspace", + steps_offset: int = 0, + ): + if trained_betas is not None: + self.betas = torch.tensor(trained_betas, dtype=torch.float32) + elif beta_schedule == "linear": + self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) + elif beta_schedule == "scaled_linear": + # this schedule is very specific to the latent diffusion model. + self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2 + elif beta_schedule == "squaredcos_cap_v2": + # Glide cosine schedule + self.betas = betas_for_alpha_bar(num_train_timesteps) + else: + raise NotImplementedError(f"{beta_schedule} is not implemented for {self.__class__}") + + self.alphas = 1.0 - self.betas + self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) + + # set all values + self.set_timesteps(num_train_timesteps, None, num_train_timesteps) + self._step_index = None + self._begin_index = None + self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication + + @property + def init_noise_sigma(self): + # standard deviation of the initial noise distribution + if self.config.timestep_spacing in ["linspace", "trailing"]: + return self.sigmas.max() + + return (self.sigmas.max() ** 2 + 1) ** 0.5 + + @property + def step_index(self): + """ + The index counter for current timestep. It will increase 1 after each scheduler step. + """ + return self._step_index + + @property + def begin_index(self): + """ + The index for the first timestep. It should be set from pipeline with `set_begin_index` method. + """ + return self._begin_index + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.set_begin_index + def set_begin_index(self, begin_index: int = 0): + """ + Sets the begin index for the scheduler. This function should be run from pipeline before the inference. + + Args: + begin_index (`int`): + The begin index for the scheduler. + """ + self._begin_index = begin_index + + def scale_model_input( + self, + sample: torch.Tensor, + timestep: Union[float, torch.Tensor], + ) -> torch.Tensor: + """ + Ensures interchangeability with schedulers that need to scale the denoising model input depending on the + current timestep. + + Args: + sample (`torch.Tensor`): + The input sample. + timestep (`int`, *optional*): + The current timestep in the diffusion chain. + + Returns: + `torch.Tensor`: + A scaled input sample. + """ + if self.step_index is None: + self._init_step_index(timestep) + + if self.state_in_first_order: + sigma = self.sigmas[self.step_index] + else: + sigma = self.sigmas_interpol[self.step_index - 1] + + sample = sample / ((sigma**2 + 1) ** 0.5) + return sample + + def set_timesteps( + self, + num_inference_steps: int, + device: Union[str, torch.device] = None, + num_train_timesteps: Optional[int] = None, + ): + """ + Sets the discrete timesteps used for the diffusion chain (to be run before inference). + + Args: + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + """ + self.num_inference_steps = num_inference_steps + + num_train_timesteps = num_train_timesteps or self.config.num_train_timesteps + + # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 + if self.config.timestep_spacing == "linspace": + timesteps = np.linspace(0, num_train_timesteps - 1, num_inference_steps, dtype=np.float32)[::-1].copy() + elif self.config.timestep_spacing == "leading": + step_ratio = num_train_timesteps // self.num_inference_steps + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + timesteps = (np.arange(0, num_inference_steps) * step_ratio).round()[::-1].copy().astype(np.float32) + timesteps += self.config.steps_offset + elif self.config.timestep_spacing == "trailing": + step_ratio = num_train_timesteps / self.num_inference_steps + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + timesteps = (np.arange(num_train_timesteps, 0, -step_ratio)).round().copy().astype(np.float32) + timesteps -= 1 + else: + raise ValueError( + f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." + ) + + sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5) + log_sigmas = np.log(sigmas) + + sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas) + + if self.config.use_karras_sigmas: + sigmas = self._convert_to_karras(in_sigmas=sigmas, num_inference_steps=num_inference_steps) + timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas]).round() + + self.log_sigmas = torch.from_numpy(log_sigmas).to(device) + sigmas = np.concatenate([sigmas, [0.0]]).astype(np.float32) + sigmas = torch.from_numpy(sigmas).to(device=device) + + # compute up and down sigmas + sigmas_next = sigmas.roll(-1) + sigmas_next[-1] = 0.0 + sigmas_up = (sigmas_next**2 * (sigmas**2 - sigmas_next**2) / sigmas**2) ** 0.5 + sigmas_down = (sigmas_next**2 - sigmas_up**2) ** 0.5 + sigmas_down[-1] = 0.0 + + # compute interpolated sigmas + sigmas_interpol = sigmas.log().lerp(sigmas_down.log(), 0.5).exp() + sigmas_interpol[-2:] = 0.0 + + # set sigmas + self.sigmas = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2), sigmas[-1:]]) + self.sigmas_interpol = torch.cat( + [sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2), sigmas_interpol[-1:]] + ) + self.sigmas_up = torch.cat([sigmas_up[:1], sigmas_up[1:].repeat_interleave(2), sigmas_up[-1:]]) + self.sigmas_down = torch.cat([sigmas_down[:1], sigmas_down[1:].repeat_interleave(2), sigmas_down[-1:]]) + + if str(device).startswith("mps"): + timesteps = torch.from_numpy(timesteps).to(device, dtype=torch.float32) + else: + timesteps = torch.from_numpy(timesteps).to(device) + + sigmas_interpol = sigmas_interpol.cpu() + log_sigmas = self.log_sigmas.cpu() + timesteps_interpol = np.array( + [self._sigma_to_t(sigma_interpol, log_sigmas) for sigma_interpol in sigmas_interpol] + ) + + timesteps_interpol = torch.from_numpy(timesteps_interpol).to(device, dtype=timesteps.dtype) + interleaved_timesteps = torch.stack((timesteps_interpol[:-2, None], timesteps[1:, None]), dim=-1).flatten() + + self.timesteps = torch.cat([timesteps[:1], interleaved_timesteps]) + + self.sample = None + + self._step_index = None + self._begin_index = None + self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._sigma_to_t + def _sigma_to_t(self, sigma, log_sigmas): + # get log sigma + log_sigma = np.log(np.maximum(sigma, 1e-10)) + + # get distribution + dists = log_sigma - log_sigmas[:, np.newaxis] + + # get sigmas range + low_idx = np.cumsum((dists >= 0), axis=0).argmax(axis=0).clip(max=log_sigmas.shape[0] - 2) + high_idx = low_idx + 1 + + low = log_sigmas[low_idx] + high = log_sigmas[high_idx] + + # interpolate sigmas + w = (low - log_sigma) / (low - high) + w = np.clip(w, 0, 1) + + # transform interpolation to time range + t = (1 - w) * low_idx + w * high_idx + t = t.reshape(sigma.shape) + return t + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_karras + def _convert_to_karras(self, in_sigmas: torch.Tensor, num_inference_steps) -> torch.Tensor: + """Constructs the noise schedule of Karras et al. (2022).""" + + # Hack to make sure that other schedulers which copy this function don't break + # TODO: Add this logic to the other schedulers + if hasattr(self.config, "sigma_min"): + sigma_min = self.config.sigma_min + else: + sigma_min = None + + if hasattr(self.config, "sigma_max"): + sigma_max = self.config.sigma_max + else: + sigma_max = None + + sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item() + sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item() + + rho = 7.0 # 7.0 is the value used in the paper + ramp = np.linspace(0, 1, num_inference_steps) + min_inv_rho = sigma_min ** (1 / rho) + max_inv_rho = sigma_max ** (1 / rho) + sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho + return sigmas + + @property + def state_in_first_order(self): + return self.sample is None + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler.index_for_timestep + def index_for_timestep(self, timestep, schedule_timesteps=None): + if schedule_timesteps is None: + schedule_timesteps = self.timesteps + + indices = (schedule_timesteps == timestep).nonzero() + + # The sigma index that is taken for the **very** first `step` + # is always the second index (or the last index if there is only 1) + # This way we can ensure we don't accidentally skip a sigma in + # case we start in the middle of the denoising schedule (e.g. for image-to-image) + pos = 1 if len(indices) > 1 else 0 + + return indices[pos].item() + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._init_step_index + def _init_step_index(self, timestep): + if self.begin_index is None: + if isinstance(timestep, torch.Tensor): + timestep = timestep.to(self.timesteps.device) + self._step_index = self.index_for_timestep(timestep) + else: + self._step_index = self._begin_index + + def step( + self, + model_output: Union[torch.Tensor, np.ndarray], + timestep: Union[float, torch.Tensor], + sample: Union[torch.Tensor, np.ndarray], + generator: Optional[torch.Generator] = None, + return_dict: bool = True, + ) -> Union[SchedulerOutput, Tuple]: + """ + Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion + process from the learned model outputs (most often the predicted noise). + + Args: + model_output (`torch.Tensor`): + The direct output from learned diffusion model. + timestep (`float`): + The current discrete timestep in the diffusion chain. + sample (`torch.Tensor`): + A current instance of a sample created by the diffusion process. + generator (`torch.Generator`, *optional*): + A random number generator. + return_dict (`bool`): + Whether or not to return a [`~schedulers.scheduling_utils.SchedulerOutput`] or tuple. + + Returns: + [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`: + If return_dict is `True`, [`~schedulers.scheduling_ddim.SchedulerOutput`] is returned, otherwise a + tuple is returned where the first element is the sample tensor. + """ + if self.step_index is None: + self._init_step_index(timestep) + + if self.state_in_first_order: + sigma = self.sigmas[self.step_index] + sigma_interpol = self.sigmas_interpol[self.step_index] + sigma_up = self.sigmas_up[self.step_index] + sigma_down = self.sigmas_down[self.step_index - 1] + else: + # 2nd order / KPDM2's method + sigma = self.sigmas[self.step_index - 1] + sigma_interpol = self.sigmas_interpol[self.step_index - 1] + sigma_up = self.sigmas_up[self.step_index - 1] + sigma_down = self.sigmas_down[self.step_index - 1] + + # currently only gamma=0 is supported. This usually works best anyways. + # We can support gamma in the future but then need to scale the timestep before + # passing it to the model which requires a change in API + gamma = 0 + sigma_hat = sigma * (gamma + 1) # Note: sigma_hat == sigma for now + + device = model_output.device + noise = randn_tensor(model_output.shape, dtype=model_output.dtype, device=device, generator=generator) + + # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise + if self.config.prediction_type == "epsilon": + sigma_input = sigma_hat if self.state_in_first_order else sigma_interpol + pred_original_sample = sample - sigma_input * model_output + elif self.config.prediction_type == "v_prediction": + sigma_input = sigma_hat if self.state_in_first_order else sigma_interpol + pred_original_sample = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( + sample / (sigma_input**2 + 1) + ) + elif self.config.prediction_type == "sample": + raise NotImplementedError("prediction_type not implemented yet: sample") + else: + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`" + ) + + if self.state_in_first_order: + # 2. Convert to an ODE derivative for 1st order + derivative = (sample - pred_original_sample) / sigma_hat + # 3. delta timestep + dt = sigma_interpol - sigma_hat + + # store for 2nd order step + self.sample = sample + self.dt = dt + prev_sample = sample + derivative * dt + else: + # DPM-Solver-2 + # 2. Convert to an ODE derivative for 2nd order + derivative = (sample - pred_original_sample) / sigma_interpol + # 3. delta timestep + dt = sigma_down - sigma_hat + + sample = self.sample + self.sample = None + + prev_sample = sample + derivative * dt + prev_sample = prev_sample + noise * sigma_up + + # upon completion increase step index by one + self._step_index += 1 + + if not return_dict: + return (prev_sample,) + + return SchedulerOutput(prev_sample=prev_sample) + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler.add_noise + def add_noise( + self, + original_samples: torch.Tensor, + noise: torch.Tensor, + timesteps: torch.Tensor, + ) -> torch.Tensor: + # Make sure sigmas and timesteps have the same device and dtype as original_samples + sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype) + if original_samples.device.type == "mps" and torch.is_floating_point(timesteps): + # mps does not support float64 + schedule_timesteps = self.timesteps.to(original_samples.device, dtype=torch.float32) + timesteps = timesteps.to(original_samples.device, dtype=torch.float32) + else: + schedule_timesteps = self.timesteps.to(original_samples.device) + timesteps = timesteps.to(original_samples.device) + + # self.begin_index is None when scheduler is used for training, or pipeline does not implement set_begin_index + if self.begin_index is None: + step_indices = [self.index_for_timestep(t, schedule_timesteps) for t in timesteps] + elif self.step_index is not None: + # add_noise is called after first denoising step (for inpainting) + step_indices = [self.step_index] * timesteps.shape[0] + else: + # add noise is called before first denoising step to create initial latent(img2img) + step_indices = [self.begin_index] * timesteps.shape[0] + + sigma = sigmas[step_indices].flatten() + while len(sigma.shape) < len(original_samples.shape): + sigma = sigma.unsqueeze(-1) + + noisy_samples = original_samples + noise * sigma + return noisy_samples + + def __len__(self): + return self.config.num_train_timesteps diff --git a/diffusers3/schedulers/scheduling_k_dpm_2_discrete.py b/diffusers3/schedulers/scheduling_k_dpm_2_discrete.py new file mode 100644 index 0000000000000000000000000000000000000000..de66a7b6eaa1a1934f00e7acfe82ff6e956f1c09 --- /dev/null +++ b/diffusers3/schedulers/scheduling_k_dpm_2_discrete.py @@ -0,0 +1,487 @@ +# Copyright 2024 Katherine Crowson, The HuggingFace Team and hlky. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +from typing import List, Optional, Tuple, Union + +import numpy as np +import torch + +from ..configuration_utils import ConfigMixin, register_to_config +from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput + + +# Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar +def betas_for_alpha_bar( + num_diffusion_timesteps, + max_beta=0.999, + alpha_transform_type="cosine", +): + """ + Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of + (1-beta) over time from t = [0,1]. + + Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up + to that part of the diffusion process. + + + Args: + num_diffusion_timesteps (`int`): the number of betas to produce. + max_beta (`float`): the maximum beta to use; use values lower than 1 to + prevent singularities. + alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. + Choose from `cosine` or `exp` + + Returns: + betas (`np.ndarray`): the betas used by the scheduler to step the model outputs + """ + if alpha_transform_type == "cosine": + + def alpha_bar_fn(t): + return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 + + elif alpha_transform_type == "exp": + + def alpha_bar_fn(t): + return math.exp(t * -12.0) + + else: + raise ValueError(f"Unsupported alpha_transform_type: {alpha_transform_type}") + + betas = [] + for i in range(num_diffusion_timesteps): + t1 = i / num_diffusion_timesteps + t2 = (i + 1) / num_diffusion_timesteps + betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) + return torch.tensor(betas, dtype=torch.float32) + + +class KDPM2DiscreteScheduler(SchedulerMixin, ConfigMixin): + """ + KDPM2DiscreteScheduler is inspired by the DPMSolver2 and Algorithm 2 from the [Elucidating the Design Space of + Diffusion-Based Generative Models](https://huggingface.co/papers/2206.00364) paper. + + This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic + methods the library implements for all schedulers such as loading and saving. + + Args: + num_train_timesteps (`int`, defaults to 1000): + The number of diffusion steps to train the model. + beta_start (`float`, defaults to 0.00085): + The starting `beta` value of inference. + beta_end (`float`, defaults to 0.012): + The final `beta` value. + beta_schedule (`str`, defaults to `"linear"`): + The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from + `linear` or `scaled_linear`. + trained_betas (`np.ndarray`, *optional*): + Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`. + use_karras_sigmas (`bool`, *optional*, defaults to `False`): + Whether to use Karras sigmas for step sizes in the noise schedule during the sampling process. If `True`, + the sigmas are determined according to a sequence of noise levels {ฯƒi}. + prediction_type (`str`, defaults to `epsilon`, *optional*): + Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process), + `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen + Video](https://imagen.research.google/video/paper.pdf) paper). + timestep_spacing (`str`, defaults to `"linspace"`): + The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information. + steps_offset (`int`, defaults to 0): + An offset added to the inference steps, as required by some model families. + """ + + _compatibles = [e.name for e in KarrasDiffusionSchedulers] + order = 2 + + @register_to_config + def __init__( + self, + num_train_timesteps: int = 1000, + beta_start: float = 0.00085, # sensible defaults + beta_end: float = 0.012, + beta_schedule: str = "linear", + trained_betas: Optional[Union[np.ndarray, List[float]]] = None, + use_karras_sigmas: Optional[bool] = False, + prediction_type: str = "epsilon", + timestep_spacing: str = "linspace", + steps_offset: int = 0, + ): + if trained_betas is not None: + self.betas = torch.tensor(trained_betas, dtype=torch.float32) + elif beta_schedule == "linear": + self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) + elif beta_schedule == "scaled_linear": + # this schedule is very specific to the latent diffusion model. + self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2 + elif beta_schedule == "squaredcos_cap_v2": + # Glide cosine schedule + self.betas = betas_for_alpha_bar(num_train_timesteps) + else: + raise NotImplementedError(f"{beta_schedule} is not implemented for {self.__class__}") + + self.alphas = 1.0 - self.betas + self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) + + # set all values + self.set_timesteps(num_train_timesteps, None, num_train_timesteps) + + self._step_index = None + self._begin_index = None + self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication + + @property + def init_noise_sigma(self): + # standard deviation of the initial noise distribution + if self.config.timestep_spacing in ["linspace", "trailing"]: + return self.sigmas.max() + + return (self.sigmas.max() ** 2 + 1) ** 0.5 + + @property + def step_index(self): + """ + The index counter for current timestep. It will increase 1 after each scheduler step. + """ + return self._step_index + + @property + def begin_index(self): + """ + The index for the first timestep. It should be set from pipeline with `set_begin_index` method. + """ + return self._begin_index + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.set_begin_index + def set_begin_index(self, begin_index: int = 0): + """ + Sets the begin index for the scheduler. This function should be run from pipeline before the inference. + + Args: + begin_index (`int`): + The begin index for the scheduler. + """ + self._begin_index = begin_index + + def scale_model_input( + self, + sample: torch.Tensor, + timestep: Union[float, torch.Tensor], + ) -> torch.Tensor: + """ + Ensures interchangeability with schedulers that need to scale the denoising model input depending on the + current timestep. + + Args: + sample (`torch.Tensor`): + The input sample. + timestep (`int`, *optional*): + The current timestep in the diffusion chain. + + Returns: + `torch.Tensor`: + A scaled input sample. + """ + if self.step_index is None: + self._init_step_index(timestep) + + if self.state_in_first_order: + sigma = self.sigmas[self.step_index] + else: + sigma = self.sigmas_interpol[self.step_index] + + sample = sample / ((sigma**2 + 1) ** 0.5) + return sample + + def set_timesteps( + self, + num_inference_steps: int, + device: Union[str, torch.device] = None, + num_train_timesteps: Optional[int] = None, + ): + """ + Sets the discrete timesteps used for the diffusion chain (to be run before inference). + + Args: + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + """ + self.num_inference_steps = num_inference_steps + + num_train_timesteps = num_train_timesteps or self.config.num_train_timesteps + + # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 + if self.config.timestep_spacing == "linspace": + timesteps = np.linspace(0, num_train_timesteps - 1, num_inference_steps, dtype=np.float32)[::-1].copy() + elif self.config.timestep_spacing == "leading": + step_ratio = num_train_timesteps // self.num_inference_steps + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + timesteps = (np.arange(0, num_inference_steps) * step_ratio).round()[::-1].copy().astype(np.float32) + timesteps += self.config.steps_offset + elif self.config.timestep_spacing == "trailing": + step_ratio = num_train_timesteps / self.num_inference_steps + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + timesteps = (np.arange(num_train_timesteps, 0, -step_ratio)).round().copy().astype(np.float32) + timesteps -= 1 + else: + raise ValueError( + f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." + ) + + sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5) + log_sigmas = np.log(sigmas) + sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas) + + if self.config.use_karras_sigmas: + sigmas = self._convert_to_karras(in_sigmas=sigmas, num_inference_steps=num_inference_steps) + timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas]).round() + + self.log_sigmas = torch.from_numpy(log_sigmas).to(device=device) + sigmas = np.concatenate([sigmas, [0.0]]).astype(np.float32) + sigmas = torch.from_numpy(sigmas).to(device=device) + + # interpolate sigmas + sigmas_interpol = sigmas.log().lerp(sigmas.roll(1).log(), 0.5).exp() + + self.sigmas = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2), sigmas[-1:]]) + self.sigmas_interpol = torch.cat( + [sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2), sigmas_interpol[-1:]] + ) + + timesteps = torch.from_numpy(timesteps).to(device) + + # interpolate timesteps + sigmas_interpol = sigmas_interpol.cpu() + log_sigmas = self.log_sigmas.cpu() + timesteps_interpol = np.array( + [self._sigma_to_t(sigma_interpol, log_sigmas) for sigma_interpol in sigmas_interpol] + ) + timesteps_interpol = torch.from_numpy(timesteps_interpol).to(device, dtype=timesteps.dtype) + interleaved_timesteps = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]), dim=-1).flatten() + + self.timesteps = torch.cat([timesteps[:1], interleaved_timesteps]) + + self.sample = None + + self._step_index = None + self._begin_index = None + self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication + + @property + def state_in_first_order(self): + return self.sample is None + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler.index_for_timestep + def index_for_timestep(self, timestep, schedule_timesteps=None): + if schedule_timesteps is None: + schedule_timesteps = self.timesteps + + indices = (schedule_timesteps == timestep).nonzero() + + # The sigma index that is taken for the **very** first `step` + # is always the second index (or the last index if there is only 1) + # This way we can ensure we don't accidentally skip a sigma in + # case we start in the middle of the denoising schedule (e.g. for image-to-image) + pos = 1 if len(indices) > 1 else 0 + + return indices[pos].item() + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._init_step_index + def _init_step_index(self, timestep): + if self.begin_index is None: + if isinstance(timestep, torch.Tensor): + timestep = timestep.to(self.timesteps.device) + self._step_index = self.index_for_timestep(timestep) + else: + self._step_index = self._begin_index + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._sigma_to_t + def _sigma_to_t(self, sigma, log_sigmas): + # get log sigma + log_sigma = np.log(np.maximum(sigma, 1e-10)) + + # get distribution + dists = log_sigma - log_sigmas[:, np.newaxis] + + # get sigmas range + low_idx = np.cumsum((dists >= 0), axis=0).argmax(axis=0).clip(max=log_sigmas.shape[0] - 2) + high_idx = low_idx + 1 + + low = log_sigmas[low_idx] + high = log_sigmas[high_idx] + + # interpolate sigmas + w = (low - log_sigma) / (low - high) + w = np.clip(w, 0, 1) + + # transform interpolation to time range + t = (1 - w) * low_idx + w * high_idx + t = t.reshape(sigma.shape) + return t + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_karras + def _convert_to_karras(self, in_sigmas: torch.Tensor, num_inference_steps) -> torch.Tensor: + """Constructs the noise schedule of Karras et al. (2022).""" + + # Hack to make sure that other schedulers which copy this function don't break + # TODO: Add this logic to the other schedulers + if hasattr(self.config, "sigma_min"): + sigma_min = self.config.sigma_min + else: + sigma_min = None + + if hasattr(self.config, "sigma_max"): + sigma_max = self.config.sigma_max + else: + sigma_max = None + + sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item() + sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item() + + rho = 7.0 # 7.0 is the value used in the paper + ramp = np.linspace(0, 1, num_inference_steps) + min_inv_rho = sigma_min ** (1 / rho) + max_inv_rho = sigma_max ** (1 / rho) + sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho + return sigmas + + def step( + self, + model_output: Union[torch.Tensor, np.ndarray], + timestep: Union[float, torch.Tensor], + sample: Union[torch.Tensor, np.ndarray], + return_dict: bool = True, + ) -> Union[SchedulerOutput, Tuple]: + """ + Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion + process from the learned model outputs (most often the predicted noise). + + Args: + model_output (`torch.Tensor`): + The direct output from learned diffusion model. + timestep (`float`): + The current discrete timestep in the diffusion chain. + sample (`torch.Tensor`): + A current instance of a sample created by the diffusion process. + return_dict (`bool`): + Whether or not to return a [`~schedulers.scheduling_utils.SchedulerOutput`] or tuple. + + Returns: + [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`: + If return_dict is `True`, [`~schedulers.scheduling_utils.SchedulerOutput`] is returned, otherwise a + tuple is returned where the first element is the sample tensor. + """ + if self.step_index is None: + self._init_step_index(timestep) + + if self.state_in_first_order: + sigma = self.sigmas[self.step_index] + sigma_interpol = self.sigmas_interpol[self.step_index + 1] + sigma_next = self.sigmas[self.step_index + 1] + else: + # 2nd order / KDPM2's method + sigma = self.sigmas[self.step_index - 1] + sigma_interpol = self.sigmas_interpol[self.step_index] + sigma_next = self.sigmas[self.step_index] + + # currently only gamma=0 is supported. This usually works best anyways. + # We can support gamma in the future but then need to scale the timestep before + # passing it to the model which requires a change in API + gamma = 0 + sigma_hat = sigma * (gamma + 1) # Note: sigma_hat == sigma for now + + # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise + if self.config.prediction_type == "epsilon": + sigma_input = sigma_hat if self.state_in_first_order else sigma_interpol + pred_original_sample = sample - sigma_input * model_output + elif self.config.prediction_type == "v_prediction": + sigma_input = sigma_hat if self.state_in_first_order else sigma_interpol + pred_original_sample = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( + sample / (sigma_input**2 + 1) + ) + elif self.config.prediction_type == "sample": + raise NotImplementedError("prediction_type not implemented yet: sample") + else: + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`" + ) + + if self.state_in_first_order: + # 2. Convert to an ODE derivative for 1st order + derivative = (sample - pred_original_sample) / sigma_hat + # 3. delta timestep + dt = sigma_interpol - sigma_hat + + # store for 2nd order step + self.sample = sample + else: + # DPM-Solver-2 + # 2. Convert to an ODE derivative for 2nd order + derivative = (sample - pred_original_sample) / sigma_interpol + + # 3. delta timestep + dt = sigma_next - sigma_hat + + sample = self.sample + self.sample = None + + # upon completion increase step index by one + self._step_index += 1 + + prev_sample = sample + derivative * dt + + if not return_dict: + return (prev_sample,) + + return SchedulerOutput(prev_sample=prev_sample) + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler.add_noise + def add_noise( + self, + original_samples: torch.Tensor, + noise: torch.Tensor, + timesteps: torch.Tensor, + ) -> torch.Tensor: + # Make sure sigmas and timesteps have the same device and dtype as original_samples + sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype) + if original_samples.device.type == "mps" and torch.is_floating_point(timesteps): + # mps does not support float64 + schedule_timesteps = self.timesteps.to(original_samples.device, dtype=torch.float32) + timesteps = timesteps.to(original_samples.device, dtype=torch.float32) + else: + schedule_timesteps = self.timesteps.to(original_samples.device) + timesteps = timesteps.to(original_samples.device) + + # self.begin_index is None when scheduler is used for training, or pipeline does not implement set_begin_index + if self.begin_index is None: + step_indices = [self.index_for_timestep(t, schedule_timesteps) for t in timesteps] + elif self.step_index is not None: + # add_noise is called after first denoising step (for inpainting) + step_indices = [self.step_index] * timesteps.shape[0] + else: + # add noise is called before first denoising step to create initial latent(img2img) + step_indices = [self.begin_index] * timesteps.shape[0] + + sigma = sigmas[step_indices].flatten() + while len(sigma.shape) < len(original_samples.shape): + sigma = sigma.unsqueeze(-1) + + noisy_samples = original_samples + noise * sigma + return noisy_samples + + def __len__(self): + return self.config.num_train_timesteps diff --git a/diffusers3/schedulers/scheduling_karras_ve_flax.py b/diffusers3/schedulers/scheduling_karras_ve_flax.py new file mode 100644 index 0000000000000000000000000000000000000000..0d387b53ac3ea6ab13bd7dd3c91972d6b427e3cb --- /dev/null +++ b/diffusers3/schedulers/scheduling_karras_ve_flax.py @@ -0,0 +1,238 @@ +# Copyright 2024 NVIDIA and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from dataclasses import dataclass +from typing import Optional, Tuple, Union + +import flax +import jax +import jax.numpy as jnp +from jax import random + +from ..configuration_utils import ConfigMixin, register_to_config +from ..utils import BaseOutput +from .scheduling_utils_flax import FlaxSchedulerMixin + + +@flax.struct.dataclass +class KarrasVeSchedulerState: + # setable values + num_inference_steps: Optional[int] = None + timesteps: Optional[jnp.ndarray] = None + schedule: Optional[jnp.ndarray] = None # sigma(t_i) + + @classmethod + def create(cls): + return cls() + + +@dataclass +class FlaxKarrasVeOutput(BaseOutput): + """ + Output class for the scheduler's step function output. + + Args: + prev_sample (`jnp.ndarray` of shape `(batch_size, num_channels, height, width)` for images): + Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the + denoising loop. + derivative (`jnp.ndarray` of shape `(batch_size, num_channels, height, width)` for images): + Derivative of predicted original image sample (x_0). + state (`KarrasVeSchedulerState`): the `FlaxKarrasVeScheduler` state data class. + """ + + prev_sample: jnp.ndarray + derivative: jnp.ndarray + state: KarrasVeSchedulerState + + +class FlaxKarrasVeScheduler(FlaxSchedulerMixin, ConfigMixin): + """ + Stochastic sampling from Karras et al. [1] tailored to the Variance-Expanding (VE) models [2]. Use Algorithm 2 and + the VE column of Table 1 from [1] for reference. + + [1] Karras, Tero, et al. "Elucidating the Design Space of Diffusion-Based Generative Models." + https://arxiv.org/abs/2206.00364 [2] Song, Yang, et al. "Score-based generative modeling through stochastic + differential equations." https://arxiv.org/abs/2011.13456 + + [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__` + function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`. + [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and + [`~SchedulerMixin.from_pretrained`] functions. + + For more details on the parameters, see the original paper's Appendix E.: "Elucidating the Design Space of + Diffusion-Based Generative Models." https://arxiv.org/abs/2206.00364. The grid search values used to find the + optimal {s_noise, s_churn, s_min, s_max} for a specific model are described in Table 5 of the paper. + + Args: + sigma_min (`float`): minimum noise magnitude + sigma_max (`float`): maximum noise magnitude + s_noise (`float`): the amount of additional noise to counteract loss of detail during sampling. + A reasonable range is [1.000, 1.011]. + s_churn (`float`): the parameter controlling the overall amount of stochasticity. + A reasonable range is [0, 100]. + s_min (`float`): the start value of the sigma range where we add noise (enable stochasticity). + A reasonable range is [0, 10]. + s_max (`float`): the end value of the sigma range where we add noise. + A reasonable range is [0.2, 80]. + """ + + @property + def has_state(self): + return True + + @register_to_config + def __init__( + self, + sigma_min: float = 0.02, + sigma_max: float = 100, + s_noise: float = 1.007, + s_churn: float = 80, + s_min: float = 0.05, + s_max: float = 50, + ): + pass + + def create_state(self): + return KarrasVeSchedulerState.create() + + def set_timesteps( + self, state: KarrasVeSchedulerState, num_inference_steps: int, shape: Tuple = () + ) -> KarrasVeSchedulerState: + """ + Sets the continuous timesteps used for the diffusion chain. Supporting function to be run before inference. + + Args: + state (`KarrasVeSchedulerState`): + the `FlaxKarrasVeScheduler` state data class. + num_inference_steps (`int`): + the number of diffusion steps used when generating samples with a pre-trained model. + + """ + timesteps = jnp.arange(0, num_inference_steps)[::-1].copy() + schedule = [ + ( + self.config.sigma_max**2 + * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) + ) + for i in timesteps + ] + + return state.replace( + num_inference_steps=num_inference_steps, + schedule=jnp.array(schedule, dtype=jnp.float32), + timesteps=timesteps, + ) + + def add_noise_to_input( + self, + state: KarrasVeSchedulerState, + sample: jnp.ndarray, + sigma: float, + key: jax.Array, + ) -> Tuple[jnp.ndarray, float]: + """ + Explicit Langevin-like "churn" step of adding noise to the sample according to a factor gamma_i โ‰ฅ 0 to reach a + higher noise level sigma_hat = sigma_i + gamma_i*sigma_i. + + TODO Args: + """ + if self.config.s_min <= sigma <= self.config.s_max: + gamma = min(self.config.s_churn / state.num_inference_steps, 2**0.5 - 1) + else: + gamma = 0 + + # sample eps ~ N(0, S_noise^2 * I) + key = random.split(key, num=1) + eps = self.config.s_noise * random.normal(key=key, shape=sample.shape) + sigma_hat = sigma + gamma * sigma + sample_hat = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) + + return sample_hat, sigma_hat + + def step( + self, + state: KarrasVeSchedulerState, + model_output: jnp.ndarray, + sigma_hat: float, + sigma_prev: float, + sample_hat: jnp.ndarray, + return_dict: bool = True, + ) -> Union[FlaxKarrasVeOutput, Tuple]: + """ + Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion + process from the learned model outputs (most often the predicted noise). + + Args: + state (`KarrasVeSchedulerState`): the `FlaxKarrasVeScheduler` state data class. + model_output (`torch.Tensor` or `np.ndarray`): direct output from learned diffusion model. + sigma_hat (`float`): TODO + sigma_prev (`float`): TODO + sample_hat (`torch.Tensor` or `np.ndarray`): TODO + return_dict (`bool`): option for returning tuple rather than FlaxKarrasVeOutput class + + Returns: + [`~schedulers.scheduling_karras_ve_flax.FlaxKarrasVeOutput`] or `tuple`: Updated sample in the diffusion + chain and derivative. [`~schedulers.scheduling_karras_ve_flax.FlaxKarrasVeOutput`] if `return_dict` is + True, otherwise a `tuple`. When returning a tuple, the first element is the sample tensor. + """ + + pred_original_sample = sample_hat + sigma_hat * model_output + derivative = (sample_hat - pred_original_sample) / sigma_hat + sample_prev = sample_hat + (sigma_prev - sigma_hat) * derivative + + if not return_dict: + return (sample_prev, derivative, state) + + return FlaxKarrasVeOutput(prev_sample=sample_prev, derivative=derivative, state=state) + + def step_correct( + self, + state: KarrasVeSchedulerState, + model_output: jnp.ndarray, + sigma_hat: float, + sigma_prev: float, + sample_hat: jnp.ndarray, + sample_prev: jnp.ndarray, + derivative: jnp.ndarray, + return_dict: bool = True, + ) -> Union[FlaxKarrasVeOutput, Tuple]: + """ + Correct the predicted sample based on the output model_output of the network. TODO complete description + + Args: + state (`KarrasVeSchedulerState`): the `FlaxKarrasVeScheduler` state data class. + model_output (`torch.Tensor` or `np.ndarray`): direct output from learned diffusion model. + sigma_hat (`float`): TODO + sigma_prev (`float`): TODO + sample_hat (`torch.Tensor` or `np.ndarray`): TODO + sample_prev (`torch.Tensor` or `np.ndarray`): TODO + derivative (`torch.Tensor` or `np.ndarray`): TODO + return_dict (`bool`): option for returning tuple rather than FlaxKarrasVeOutput class + + Returns: + prev_sample (TODO): updated sample in the diffusion chain. derivative (TODO): TODO + + """ + pred_original_sample = sample_prev + sigma_prev * model_output + derivative_corr = (sample_prev - pred_original_sample) / sigma_prev + sample_prev = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) + + if not return_dict: + return (sample_prev, derivative, state) + + return FlaxKarrasVeOutput(prev_sample=sample_prev, derivative=derivative, state=state) + + def add_noise(self, state: KarrasVeSchedulerState, original_samples, noise, timesteps): + raise NotImplementedError() diff --git a/diffusers3/schedulers/scheduling_lcm.py b/diffusers3/schedulers/scheduling_lcm.py new file mode 100644 index 0000000000000000000000000000000000000000..f1aa09ab1723c1f4d48cc4625323db0e10080d6d --- /dev/null +++ b/diffusers3/schedulers/scheduling_lcm.py @@ -0,0 +1,658 @@ +# Copyright 2024 Stanford University Team and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion +# and https://github.com/hojonathanho/diffusion + +import math +from dataclasses import dataclass +from typing import List, Optional, Tuple, Union + +import numpy as np +import torch + +from ..configuration_utils import ConfigMixin, register_to_config +from ..utils import BaseOutput, logging +from ..utils.torch_utils import randn_tensor +from .scheduling_utils import SchedulerMixin + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +@dataclass +class LCMSchedulerOutput(BaseOutput): + """ + Output class for the scheduler's `step` function output. + + Args: + prev_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images): + Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the + denoising loop. + pred_original_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images): + The predicted denoised sample `(x_{0})` based on the model output from the current timestep. + `pred_original_sample` can be used to preview progress or for guidance. + """ + + prev_sample: torch.Tensor + denoised: Optional[torch.Tensor] = None + + +# Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar +def betas_for_alpha_bar( + num_diffusion_timesteps, + max_beta=0.999, + alpha_transform_type="cosine", +): + """ + Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of + (1-beta) over time from t = [0,1]. + + Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up + to that part of the diffusion process. + + + Args: + num_diffusion_timesteps (`int`): the number of betas to produce. + max_beta (`float`): the maximum beta to use; use values lower than 1 to + prevent singularities. + alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. + Choose from `cosine` or `exp` + + Returns: + betas (`np.ndarray`): the betas used by the scheduler to step the model outputs + """ + if alpha_transform_type == "cosine": + + def alpha_bar_fn(t): + return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 + + elif alpha_transform_type == "exp": + + def alpha_bar_fn(t): + return math.exp(t * -12.0) + + else: + raise ValueError(f"Unsupported alpha_transform_type: {alpha_transform_type}") + + betas = [] + for i in range(num_diffusion_timesteps): + t1 = i / num_diffusion_timesteps + t2 = (i + 1) / num_diffusion_timesteps + betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) + return torch.tensor(betas, dtype=torch.float32) + + +# Copied from diffusers.schedulers.scheduling_ddim.rescale_zero_terminal_snr +def rescale_zero_terminal_snr(betas: torch.Tensor) -> torch.Tensor: + """ + Rescales betas to have zero terminal SNR Based on https://arxiv.org/pdf/2305.08891.pdf (Algorithm 1) + + + Args: + betas (`torch.Tensor`): + the betas that the scheduler is being initialized with. + + Returns: + `torch.Tensor`: rescaled betas with zero terminal SNR + """ + # Convert betas to alphas_bar_sqrt + alphas = 1.0 - betas + alphas_cumprod = torch.cumprod(alphas, dim=0) + alphas_bar_sqrt = alphas_cumprod.sqrt() + + # Store old values. + alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone() + alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone() + + # Shift so the last timestep is zero. + alphas_bar_sqrt -= alphas_bar_sqrt_T + + # Scale so the first timestep is back to the old value. + alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T) + + # Convert alphas_bar_sqrt to betas + alphas_bar = alphas_bar_sqrt**2 # Revert sqrt + alphas = alphas_bar[1:] / alphas_bar[:-1] # Revert cumprod + alphas = torch.cat([alphas_bar[0:1], alphas]) + betas = 1 - alphas + + return betas + + +class LCMScheduler(SchedulerMixin, ConfigMixin): + """ + `LCMScheduler` extends the denoising procedure introduced in denoising diffusion probabilistic models (DDPMs) with + non-Markovian guidance. + + This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. [`~ConfigMixin`] takes care of storing all config + attributes that are passed in the scheduler's `__init__` function, such as `num_train_timesteps`. They can be + accessed via `scheduler.config.num_train_timesteps`. [`SchedulerMixin`] provides general loading and saving + functionality via the [`SchedulerMixin.save_pretrained`] and [`~SchedulerMixin.from_pretrained`] functions. + + Args: + num_train_timesteps (`int`, defaults to 1000): + The number of diffusion steps to train the model. + beta_start (`float`, defaults to 0.0001): + The starting `beta` value of inference. + beta_end (`float`, defaults to 0.02): + The final `beta` value. + beta_schedule (`str`, defaults to `"linear"`): + The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from + `linear`, `scaled_linear`, or `squaredcos_cap_v2`. + trained_betas (`np.ndarray`, *optional*): + Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`. + original_inference_steps (`int`, *optional*, defaults to 50): + The default number of inference steps used to generate a linearly-spaced timestep schedule, from which we + will ultimately take `num_inference_steps` evenly spaced timesteps to form the final timestep schedule. + clip_sample (`bool`, defaults to `True`): + Clip the predicted sample for numerical stability. + clip_sample_range (`float`, defaults to 1.0): + The maximum magnitude for sample clipping. Valid only when `clip_sample=True`. + set_alpha_to_one (`bool`, defaults to `True`): + Each diffusion step uses the alphas product value at that step and at the previous one. For the final step + there is no previous alpha. When this option is `True` the previous alpha product is fixed to `1`, + otherwise it uses the alpha value at step 0. + steps_offset (`int`, defaults to 0): + An offset added to the inference steps, as required by some model families. + prediction_type (`str`, defaults to `epsilon`, *optional*): + Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process), + `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen + Video](https://imagen.research.google/video/paper.pdf) paper). + thresholding (`bool`, defaults to `False`): + Whether to use the "dynamic thresholding" method. This is unsuitable for latent-space diffusion models such + as Stable Diffusion. + dynamic_thresholding_ratio (`float`, defaults to 0.995): + The ratio for the dynamic thresholding method. Valid only when `thresholding=True`. + sample_max_value (`float`, defaults to 1.0): + The threshold value for dynamic thresholding. Valid only when `thresholding=True`. + timestep_spacing (`str`, defaults to `"leading"`): + The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information. + timestep_scaling (`float`, defaults to 10.0): + The factor the timesteps will be multiplied by when calculating the consistency model boundary conditions + `c_skip` and `c_out`. Increasing this will decrease the approximation error (although the approximation + error at the default of `10.0` is already pretty small). + rescale_betas_zero_snr (`bool`, defaults to `False`): + Whether to rescale the betas to have zero terminal SNR. This enables the model to generate very bright and + dark samples instead of limiting it to samples with medium brightness. Loosely related to + [`--offset_noise`](https://github.com/huggingface/diffusers/blob/74fd735eb073eb1d774b1ab4154a0876eb82f055/examples/dreambooth/train_dreambooth.py#L506). + """ + + order = 1 + + @register_to_config + def __init__( + self, + num_train_timesteps: int = 1000, + beta_start: float = 0.00085, + beta_end: float = 0.012, + beta_schedule: str = "scaled_linear", + trained_betas: Optional[Union[np.ndarray, List[float]]] = None, + original_inference_steps: int = 50, + clip_sample: bool = False, + clip_sample_range: float = 1.0, + set_alpha_to_one: bool = True, + steps_offset: int = 0, + prediction_type: str = "epsilon", + thresholding: bool = False, + dynamic_thresholding_ratio: float = 0.995, + sample_max_value: float = 1.0, + timestep_spacing: str = "leading", + timestep_scaling: float = 10.0, + rescale_betas_zero_snr: bool = False, + ): + if trained_betas is not None: + self.betas = torch.tensor(trained_betas, dtype=torch.float32) + elif beta_schedule == "linear": + self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) + elif beta_schedule == "scaled_linear": + # this schedule is very specific to the latent diffusion model. + self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2 + elif beta_schedule == "squaredcos_cap_v2": + # Glide cosine schedule + self.betas = betas_for_alpha_bar(num_train_timesteps) + else: + raise NotImplementedError(f"{beta_schedule} is not implemented for {self.__class__}") + + # Rescale for zero SNR + if rescale_betas_zero_snr: + self.betas = rescale_zero_terminal_snr(self.betas) + + self.alphas = 1.0 - self.betas + self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) + + # At every step in ddim, we are looking into the previous alphas_cumprod + # For the final step, there is no previous alphas_cumprod because we are already at 0 + # `set_alpha_to_one` decides whether we set this parameter simply to one or + # whether we use the final alpha of the "non-previous" one. + self.final_alpha_cumprod = torch.tensor(1.0) if set_alpha_to_one else self.alphas_cumprod[0] + + # standard deviation of the initial noise distribution + self.init_noise_sigma = 1.0 + + # setable values + self.num_inference_steps = None + self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy().astype(np.int64)) + self.custom_timesteps = False + + self._step_index = None + self._begin_index = None + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler.index_for_timestep + def index_for_timestep(self, timestep, schedule_timesteps=None): + if schedule_timesteps is None: + schedule_timesteps = self.timesteps + + indices = (schedule_timesteps == timestep).nonzero() + + # The sigma index that is taken for the **very** first `step` + # is always the second index (or the last index if there is only 1) + # This way we can ensure we don't accidentally skip a sigma in + # case we start in the middle of the denoising schedule (e.g. for image-to-image) + pos = 1 if len(indices) > 1 else 0 + + return indices[pos].item() + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._init_step_index + def _init_step_index(self, timestep): + if self.begin_index is None: + if isinstance(timestep, torch.Tensor): + timestep = timestep.to(self.timesteps.device) + self._step_index = self.index_for_timestep(timestep) + else: + self._step_index = self._begin_index + + @property + def step_index(self): + return self._step_index + + @property + def begin_index(self): + """ + The index for the first timestep. It should be set from pipeline with `set_begin_index` method. + """ + return self._begin_index + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.set_begin_index + def set_begin_index(self, begin_index: int = 0): + """ + Sets the begin index for the scheduler. This function should be run from pipeline before the inference. + + Args: + begin_index (`int`): + The begin index for the scheduler. + """ + self._begin_index = begin_index + + def scale_model_input(self, sample: torch.Tensor, timestep: Optional[int] = None) -> torch.Tensor: + """ + Ensures interchangeability with schedulers that need to scale the denoising model input depending on the + current timestep. + + Args: + sample (`torch.Tensor`): + The input sample. + timestep (`int`, *optional*): + The current timestep in the diffusion chain. + Returns: + `torch.Tensor`: + A scaled input sample. + """ + return sample + + # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample + def _threshold_sample(self, sample: torch.Tensor) -> torch.Tensor: + """ + "Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the + prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by + s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing + pixels from saturation at each step. We find that dynamic thresholding results in significantly better + photorealism as well as better image-text alignment, especially when using very large guidance weights." + + https://arxiv.org/abs/2205.11487 + """ + dtype = sample.dtype + batch_size, channels, *remaining_dims = sample.shape + + if dtype not in (torch.float32, torch.float64): + sample = sample.float() # upcast for quantile calculation, and clamp not implemented for cpu half + + # Flatten sample for doing quantile calculation along each image + sample = sample.reshape(batch_size, channels * np.prod(remaining_dims)) + + abs_sample = sample.abs() # "a certain percentile absolute pixel value" + + s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1) + s = torch.clamp( + s, min=1, max=self.config.sample_max_value + ) # When clamped to min=1, equivalent to standard clipping to [-1, 1] + s = s.unsqueeze(1) # (batch_size, 1) because clamp will broadcast along dim=0 + sample = torch.clamp(sample, -s, s) / s # "we threshold xt0 to the range [-s, s] and then divide by s" + + sample = sample.reshape(batch_size, channels, *remaining_dims) + sample = sample.to(dtype) + + return sample + + def set_timesteps( + self, + num_inference_steps: Optional[int] = None, + device: Union[str, torch.device] = None, + original_inference_steps: Optional[int] = None, + timesteps: Optional[List[int]] = None, + strength: int = 1.0, + ): + """ + Sets the discrete timesteps used for the diffusion chain (to be run before inference). + + Args: + num_inference_steps (`int`, *optional*): + The number of diffusion steps used when generating samples with a pre-trained model. If used, + `timesteps` must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + original_inference_steps (`int`, *optional*): + The original number of inference steps, which will be used to generate a linearly-spaced timestep + schedule (which is different from the standard `diffusers` implementation). We will then take + `num_inference_steps` timesteps from this schedule, evenly spaced in terms of indices, and use that as + our final timestep schedule. If not set, this will default to the `original_inference_steps` attribute. + timesteps (`List[int]`, *optional*): + Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default + timestep spacing strategy of equal spacing between timesteps on the training/distillation timestep + schedule is used. If `timesteps` is passed, `num_inference_steps` must be `None`. + """ + # 0. Check inputs + if num_inference_steps is None and timesteps is None: + raise ValueError("Must pass exactly one of `num_inference_steps` or `custom_timesteps`.") + + if num_inference_steps is not None and timesteps is not None: + raise ValueError("Can only pass one of `num_inference_steps` or `custom_timesteps`.") + + # 1. Calculate the LCM original training/distillation timestep schedule. + original_steps = ( + original_inference_steps if original_inference_steps is not None else self.config.original_inference_steps + ) + + if original_steps > self.config.num_train_timesteps: + raise ValueError( + f"`original_steps`: {original_steps} cannot be larger than `self.config.train_timesteps`:" + f" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle" + f" maximal {self.config.num_train_timesteps} timesteps." + ) + + # LCM Timesteps Setting + # The skipping step parameter k from the paper. + k = self.config.num_train_timesteps // original_steps + # LCM Training/Distillation Steps Schedule + # Currently, only a linearly-spaced schedule is supported (same as in the LCM distillation scripts). + lcm_origin_timesteps = np.asarray(list(range(1, int(original_steps * strength) + 1))) * k - 1 + + # 2. Calculate the LCM inference timestep schedule. + if timesteps is not None: + # 2.1 Handle custom timestep schedules. + train_timesteps = set(lcm_origin_timesteps) + non_train_timesteps = [] + for i in range(1, len(timesteps)): + if timesteps[i] >= timesteps[i - 1]: + raise ValueError("`custom_timesteps` must be in descending order.") + + if timesteps[i] not in train_timesteps: + non_train_timesteps.append(timesteps[i]) + + if timesteps[0] >= self.config.num_train_timesteps: + raise ValueError( + f"`timesteps` must start before `self.config.train_timesteps`:" + f" {self.config.num_train_timesteps}." + ) + + # Raise warning if timestep schedule does not start with self.config.num_train_timesteps - 1 + if strength == 1.0 and timesteps[0] != self.config.num_train_timesteps - 1: + logger.warning( + f"The first timestep on the custom timestep schedule is {timesteps[0]}, not" + f" `self.config.num_train_timesteps - 1`: {self.config.num_train_timesteps - 1}. You may get" + f" unexpected results when using this timestep schedule." + ) + + # Raise warning if custom timestep schedule contains timesteps not on original timestep schedule + if non_train_timesteps: + logger.warning( + f"The custom timestep schedule contains the following timesteps which are not on the original" + f" training/distillation timestep schedule: {non_train_timesteps}. You may get unexpected results" + f" when using this timestep schedule." + ) + + # Raise warning if custom timestep schedule is longer than original_steps + if len(timesteps) > original_steps: + logger.warning( + f"The number of timesteps in the custom timestep schedule is {len(timesteps)}, which exceeds the" + f" the length of the timestep schedule used for training: {original_steps}. You may get some" + f" unexpected results when using this timestep schedule." + ) + + timesteps = np.array(timesteps, dtype=np.int64) + self.num_inference_steps = len(timesteps) + self.custom_timesteps = True + + # Apply strength (e.g. for img2img pipelines) (see StableDiffusionImg2ImgPipeline.get_timesteps) + init_timestep = min(int(self.num_inference_steps * strength), self.num_inference_steps) + t_start = max(self.num_inference_steps - init_timestep, 0) + timesteps = timesteps[t_start * self.order :] + # TODO: also reset self.num_inference_steps? + else: + # 2.2 Create the "standard" LCM inference timestep schedule. + if num_inference_steps > self.config.num_train_timesteps: + raise ValueError( + f"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:" + f" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle" + f" maximal {self.config.num_train_timesteps} timesteps." + ) + + skipping_step = len(lcm_origin_timesteps) // num_inference_steps + + if skipping_step < 1: + raise ValueError( + f"The combination of `original_steps x strength`: {original_steps} x {strength} is smaller than `num_inference_steps`: {num_inference_steps}. Make sure to either reduce `num_inference_steps` to a value smaller than {int(original_steps * strength)} or increase `strength` to a value higher than {float(num_inference_steps / original_steps)}." + ) + + self.num_inference_steps = num_inference_steps + + if num_inference_steps > original_steps: + raise ValueError( + f"`num_inference_steps`: {num_inference_steps} cannot be larger than `original_inference_steps`:" + f" {original_steps} because the final timestep schedule will be a subset of the" + f" `original_inference_steps`-sized initial timestep schedule." + ) + + # LCM Inference Steps Schedule + lcm_origin_timesteps = lcm_origin_timesteps[::-1].copy() + # Select (approximately) evenly spaced indices from lcm_origin_timesteps. + inference_indices = np.linspace(0, len(lcm_origin_timesteps), num=num_inference_steps, endpoint=False) + inference_indices = np.floor(inference_indices).astype(np.int64) + timesteps = lcm_origin_timesteps[inference_indices] + + self.timesteps = torch.from_numpy(timesteps).to(device=device, dtype=torch.long) + + self._step_index = None + self._begin_index = None + + def get_scalings_for_boundary_condition_discrete(self, timestep): + self.sigma_data = 0.5 # Default: 0.5 + scaled_timestep = timestep * self.config.timestep_scaling + + c_skip = self.sigma_data**2 / (scaled_timestep**2 + self.sigma_data**2) + c_out = scaled_timestep / (scaled_timestep**2 + self.sigma_data**2) ** 0.5 + return c_skip, c_out + + def step( + self, + model_output: torch.Tensor, + timestep: int, + sample: torch.Tensor, + generator: Optional[torch.Generator] = None, + return_dict: bool = True, + ) -> Union[LCMSchedulerOutput, Tuple]: + """ + Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion + process from the learned model outputs (most often the predicted noise). + + Args: + model_output (`torch.Tensor`): + The direct output from learned diffusion model. + timestep (`float`): + The current discrete timestep in the diffusion chain. + sample (`torch.Tensor`): + A current instance of a sample created by the diffusion process. + generator (`torch.Generator`, *optional*): + A random number generator. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~schedulers.scheduling_lcm.LCMSchedulerOutput`] or `tuple`. + Returns: + [`~schedulers.scheduling_utils.LCMSchedulerOutput`] or `tuple`: + If return_dict is `True`, [`~schedulers.scheduling_lcm.LCMSchedulerOutput`] is returned, otherwise a + tuple is returned where the first element is the sample tensor. + """ + if self.num_inference_steps is None: + raise ValueError( + "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" + ) + + if self.step_index is None: + self._init_step_index(timestep) + + # 1. get previous step value + prev_step_index = self.step_index + 1 + if prev_step_index < len(self.timesteps): + prev_timestep = self.timesteps[prev_step_index] + else: + prev_timestep = timestep + + # 2. compute alphas, betas + alpha_prod_t = self.alphas_cumprod[timestep] + alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod + + beta_prod_t = 1 - alpha_prod_t + beta_prod_t_prev = 1 - alpha_prod_t_prev + + # 3. Get scalings for boundary conditions + c_skip, c_out = self.get_scalings_for_boundary_condition_discrete(timestep) + + # 4. Compute the predicted original sample x_0 based on the model parameterization + if self.config.prediction_type == "epsilon": # noise-prediction + predicted_original_sample = (sample - beta_prod_t.sqrt() * model_output) / alpha_prod_t.sqrt() + elif self.config.prediction_type == "sample": # x-prediction + predicted_original_sample = model_output + elif self.config.prediction_type == "v_prediction": # v-prediction + predicted_original_sample = alpha_prod_t.sqrt() * sample - beta_prod_t.sqrt() * model_output + else: + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` or" + " `v_prediction` for `LCMScheduler`." + ) + + # 5. Clip or threshold "predicted x_0" + if self.config.thresholding: + predicted_original_sample = self._threshold_sample(predicted_original_sample) + elif self.config.clip_sample: + predicted_original_sample = predicted_original_sample.clamp( + -self.config.clip_sample_range, self.config.clip_sample_range + ) + + # 6. Denoise model output using boundary conditions + denoised = c_out * predicted_original_sample + c_skip * sample + + # 7. Sample and inject noise z ~ N(0, I) for MultiStep Inference + # Noise is not used on the final timestep of the timestep schedule. + # This also means that noise is not used for one-step sampling. + if self.step_index != self.num_inference_steps - 1: + noise = randn_tensor( + model_output.shape, generator=generator, device=model_output.device, dtype=denoised.dtype + ) + prev_sample = alpha_prod_t_prev.sqrt() * denoised + beta_prod_t_prev.sqrt() * noise + else: + prev_sample = denoised + + # upon completion increase step index by one + self._step_index += 1 + + if not return_dict: + return (prev_sample, denoised) + + return LCMSchedulerOutput(prev_sample=prev_sample, denoised=denoised) + + # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.add_noise + def add_noise( + self, + original_samples: torch.Tensor, + noise: torch.Tensor, + timesteps: torch.IntTensor, + ) -> torch.Tensor: + # Make sure alphas_cumprod and timestep have same device and dtype as original_samples + # Move the self.alphas_cumprod to device to avoid redundant CPU to GPU data movement + # for the subsequent add_noise calls + self.alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device) + alphas_cumprod = self.alphas_cumprod.to(dtype=original_samples.dtype) + timesteps = timesteps.to(original_samples.device) + + sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5 + sqrt_alpha_prod = sqrt_alpha_prod.flatten() + while len(sqrt_alpha_prod.shape) < len(original_samples.shape): + sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) + + sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5 + sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() + while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape): + sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) + + noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise + return noisy_samples + + # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.get_velocity + def get_velocity(self, sample: torch.Tensor, noise: torch.Tensor, timesteps: torch.IntTensor) -> torch.Tensor: + # Make sure alphas_cumprod and timestep have same device and dtype as sample + self.alphas_cumprod = self.alphas_cumprod.to(device=sample.device) + alphas_cumprod = self.alphas_cumprod.to(dtype=sample.dtype) + timesteps = timesteps.to(sample.device) + + sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5 + sqrt_alpha_prod = sqrt_alpha_prod.flatten() + while len(sqrt_alpha_prod.shape) < len(sample.shape): + sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) + + sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5 + sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() + while len(sqrt_one_minus_alpha_prod.shape) < len(sample.shape): + sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) + + velocity = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample + return velocity + + def __len__(self): + return self.config.num_train_timesteps + + # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.previous_timestep + def previous_timestep(self, timestep): + if self.custom_timesteps: + index = (self.timesteps == timestep).nonzero(as_tuple=True)[0][0] + if index == self.timesteps.shape[0] - 1: + prev_t = torch.tensor(-1) + else: + prev_t = self.timesteps[index + 1] + else: + num_inference_steps = ( + self.num_inference_steps if self.num_inference_steps else self.config.num_train_timesteps + ) + prev_t = timestep - self.config.num_train_timesteps // num_inference_steps + + return prev_t diff --git a/diffusers3/schedulers/scheduling_lms_discrete.py b/diffusers3/schedulers/scheduling_lms_discrete.py new file mode 100644 index 0000000000000000000000000000000000000000..9595bb4c71ba897bc0e3ec3c7155095b90f36451 --- /dev/null +++ b/diffusers3/schedulers/scheduling_lms_discrete.py @@ -0,0 +1,477 @@ +# Copyright 2024 Katherine Crowson and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import math +import warnings +from dataclasses import dataclass +from typing import List, Optional, Tuple, Union + +import numpy as np +import torch +from scipy import integrate + +from ..configuration_utils import ConfigMixin, register_to_config +from ..utils import BaseOutput +from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin + + +@dataclass +# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->LMSDiscrete +class LMSDiscreteSchedulerOutput(BaseOutput): + """ + Output class for the scheduler's `step` function output. + + Args: + prev_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images): + Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the + denoising loop. + pred_original_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images): + The predicted denoised sample `(x_{0})` based on the model output from the current timestep. + `pred_original_sample` can be used to preview progress or for guidance. + """ + + prev_sample: torch.Tensor + pred_original_sample: Optional[torch.Tensor] = None + + +# Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar +def betas_for_alpha_bar( + num_diffusion_timesteps, + max_beta=0.999, + alpha_transform_type="cosine", +): + """ + Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of + (1-beta) over time from t = [0,1]. + + Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up + to that part of the diffusion process. + + + Args: + num_diffusion_timesteps (`int`): the number of betas to produce. + max_beta (`float`): the maximum beta to use; use values lower than 1 to + prevent singularities. + alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. + Choose from `cosine` or `exp` + + Returns: + betas (`np.ndarray`): the betas used by the scheduler to step the model outputs + """ + if alpha_transform_type == "cosine": + + def alpha_bar_fn(t): + return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 + + elif alpha_transform_type == "exp": + + def alpha_bar_fn(t): + return math.exp(t * -12.0) + + else: + raise ValueError(f"Unsupported alpha_transform_type: {alpha_transform_type}") + + betas = [] + for i in range(num_diffusion_timesteps): + t1 = i / num_diffusion_timesteps + t2 = (i + 1) / num_diffusion_timesteps + betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) + return torch.tensor(betas, dtype=torch.float32) + + +class LMSDiscreteScheduler(SchedulerMixin, ConfigMixin): + """ + A linear multistep scheduler for discrete beta schedules. + + This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic + methods the library implements for all schedulers such as loading and saving. + + Args: + num_train_timesteps (`int`, defaults to 1000): + The number of diffusion steps to train the model. + beta_start (`float`, defaults to 0.0001): + The starting `beta` value of inference. + beta_end (`float`, defaults to 0.02): + The final `beta` value. + beta_schedule (`str`, defaults to `"linear"`): + The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from + `linear` or `scaled_linear`. + trained_betas (`np.ndarray`, *optional*): + Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`. + use_karras_sigmas (`bool`, *optional*, defaults to `False`): + Whether to use Karras sigmas for step sizes in the noise schedule during the sampling process. If `True`, + the sigmas are determined according to a sequence of noise levels {ฯƒi}. + prediction_type (`str`, defaults to `epsilon`, *optional*): + Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process), + `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen + Video](https://imagen.research.google/video/paper.pdf) paper). + timestep_spacing (`str`, defaults to `"linspace"`): + The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information. + steps_offset (`int`, defaults to 0): + An offset added to the inference steps, as required by some model families. + """ + + _compatibles = [e.name for e in KarrasDiffusionSchedulers] + order = 1 + + @register_to_config + def __init__( + self, + num_train_timesteps: int = 1000, + beta_start: float = 0.0001, + beta_end: float = 0.02, + beta_schedule: str = "linear", + trained_betas: Optional[Union[np.ndarray, List[float]]] = None, + use_karras_sigmas: Optional[bool] = False, + prediction_type: str = "epsilon", + timestep_spacing: str = "linspace", + steps_offset: int = 0, + ): + if trained_betas is not None: + self.betas = torch.tensor(trained_betas, dtype=torch.float32) + elif beta_schedule == "linear": + self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) + elif beta_schedule == "scaled_linear": + # this schedule is very specific to the latent diffusion model. + self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2 + elif beta_schedule == "squaredcos_cap_v2": + # Glide cosine schedule + self.betas = betas_for_alpha_bar(num_train_timesteps) + else: + raise NotImplementedError(f"{beta_schedule} is not implemented for {self.__class__}") + + self.alphas = 1.0 - self.betas + self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) + + sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5) + sigmas = np.concatenate([sigmas[::-1], [0.0]]).astype(np.float32) + self.sigmas = torch.from_numpy(sigmas) + + # setable values + self.num_inference_steps = None + self.use_karras_sigmas = use_karras_sigmas + self.set_timesteps(num_train_timesteps, None) + self.derivatives = [] + self.is_scale_input_called = False + + self._step_index = None + self._begin_index = None + self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication + + @property + def init_noise_sigma(self): + # standard deviation of the initial noise distribution + if self.config.timestep_spacing in ["linspace", "trailing"]: + return self.sigmas.max() + + return (self.sigmas.max() ** 2 + 1) ** 0.5 + + @property + def step_index(self): + """ + The index counter for current timestep. It will increase 1 after each scheduler step. + """ + return self._step_index + + @property + def begin_index(self): + """ + The index for the first timestep. It should be set from pipeline with `set_begin_index` method. + """ + return self._begin_index + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.set_begin_index + def set_begin_index(self, begin_index: int = 0): + """ + Sets the begin index for the scheduler. This function should be run from pipeline before the inference. + + Args: + begin_index (`int`): + The begin index for the scheduler. + """ + self._begin_index = begin_index + + def scale_model_input(self, sample: torch.Tensor, timestep: Union[float, torch.Tensor]) -> torch.Tensor: + """ + Ensures interchangeability with schedulers that need to scale the denoising model input depending on the + current timestep. + + Args: + sample (`torch.Tensor`): + The input sample. + timestep (`float` or `torch.Tensor`): + The current timestep in the diffusion chain. + + Returns: + `torch.Tensor`: + A scaled input sample. + """ + + if self.step_index is None: + self._init_step_index(timestep) + + sigma = self.sigmas[self.step_index] + sample = sample / ((sigma**2 + 1) ** 0.5) + self.is_scale_input_called = True + return sample + + def get_lms_coefficient(self, order, t, current_order): + """ + Compute the linear multistep coefficient. + + Args: + order (): + t (): + current_order (): + """ + + def lms_derivative(tau): + prod = 1.0 + for k in range(order): + if current_order == k: + continue + prod *= (tau - self.sigmas[t - k]) / (self.sigmas[t - current_order] - self.sigmas[t - k]) + return prod + + integrated_coeff = integrate.quad(lms_derivative, self.sigmas[t], self.sigmas[t + 1], epsrel=1e-4)[0] + + return integrated_coeff + + def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None): + """ + Sets the discrete timesteps used for the diffusion chain (to be run before inference). + + Args: + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + """ + self.num_inference_steps = num_inference_steps + + # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 + if self.config.timestep_spacing == "linspace": + timesteps = np.linspace(0, self.config.num_train_timesteps - 1, num_inference_steps, dtype=np.float32)[ + ::-1 + ].copy() + elif self.config.timestep_spacing == "leading": + step_ratio = self.config.num_train_timesteps // self.num_inference_steps + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + timesteps = (np.arange(0, num_inference_steps) * step_ratio).round()[::-1].copy().astype(np.float32) + timesteps += self.config.steps_offset + elif self.config.timestep_spacing == "trailing": + step_ratio = self.config.num_train_timesteps / self.num_inference_steps + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + timesteps = (np.arange(self.config.num_train_timesteps, 0, -step_ratio)).round().copy().astype(np.float32) + timesteps -= 1 + else: + raise ValueError( + f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." + ) + + sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5) + log_sigmas = np.log(sigmas) + sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas) + + if self.config.use_karras_sigmas: + sigmas = self._convert_to_karras(in_sigmas=sigmas) + timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas]) + + sigmas = np.concatenate([sigmas, [0.0]]).astype(np.float32) + + self.sigmas = torch.from_numpy(sigmas).to(device=device) + self.timesteps = torch.from_numpy(timesteps).to(device=device) + self._step_index = None + self._begin_index = None + self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication + + self.derivatives = [] + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler.index_for_timestep + def index_for_timestep(self, timestep, schedule_timesteps=None): + if schedule_timesteps is None: + schedule_timesteps = self.timesteps + + indices = (schedule_timesteps == timestep).nonzero() + + # The sigma index that is taken for the **very** first `step` + # is always the second index (or the last index if there is only 1) + # This way we can ensure we don't accidentally skip a sigma in + # case we start in the middle of the denoising schedule (e.g. for image-to-image) + pos = 1 if len(indices) > 1 else 0 + + return indices[pos].item() + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._init_step_index + def _init_step_index(self, timestep): + if self.begin_index is None: + if isinstance(timestep, torch.Tensor): + timestep = timestep.to(self.timesteps.device) + self._step_index = self.index_for_timestep(timestep) + else: + self._step_index = self._begin_index + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._sigma_to_t + def _sigma_to_t(self, sigma, log_sigmas): + # get log sigma + log_sigma = np.log(np.maximum(sigma, 1e-10)) + + # get distribution + dists = log_sigma - log_sigmas[:, np.newaxis] + + # get sigmas range + low_idx = np.cumsum((dists >= 0), axis=0).argmax(axis=0).clip(max=log_sigmas.shape[0] - 2) + high_idx = low_idx + 1 + + low = log_sigmas[low_idx] + high = log_sigmas[high_idx] + + # interpolate sigmas + w = (low - log_sigma) / (low - high) + w = np.clip(w, 0, 1) + + # transform interpolation to time range + t = (1 - w) * low_idx + w * high_idx + t = t.reshape(sigma.shape) + return t + + # copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_karras + def _convert_to_karras(self, in_sigmas: torch.Tensor) -> torch.Tensor: + """Constructs the noise schedule of Karras et al. (2022).""" + + sigma_min: float = in_sigmas[-1].item() + sigma_max: float = in_sigmas[0].item() + + rho = 7.0 # 7.0 is the value used in the paper + ramp = np.linspace(0, 1, self.num_inference_steps) + min_inv_rho = sigma_min ** (1 / rho) + max_inv_rho = sigma_max ** (1 / rho) + sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho + return sigmas + + def step( + self, + model_output: torch.Tensor, + timestep: Union[float, torch.Tensor], + sample: torch.Tensor, + order: int = 4, + return_dict: bool = True, + ) -> Union[LMSDiscreteSchedulerOutput, Tuple]: + """ + Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion + process from the learned model outputs (most often the predicted noise). + + Args: + model_output (`torch.Tensor`): + The direct output from learned diffusion model. + timestep (`float` or `torch.Tensor`): + The current discrete timestep in the diffusion chain. + sample (`torch.Tensor`): + A current instance of a sample created by the diffusion process. + order (`int`, defaults to 4): + The order of the linear multistep method. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~schedulers.scheduling_utils.SchedulerOutput`] or tuple. + + Returns: + [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`: + If return_dict is `True`, [`~schedulers.scheduling_utils.SchedulerOutput`] is returned, otherwise a + tuple is returned where the first element is the sample tensor. + + """ + if not self.is_scale_input_called: + warnings.warn( + "The `scale_model_input` function should be called before `step` to ensure correct denoising. " + "See `StableDiffusionPipeline` for a usage example." + ) + + if self.step_index is None: + self._init_step_index(timestep) + + sigma = self.sigmas[self.step_index] + + # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise + if self.config.prediction_type == "epsilon": + pred_original_sample = sample - sigma * model_output + elif self.config.prediction_type == "v_prediction": + # * c_out + input * c_skip + pred_original_sample = model_output * (-sigma / (sigma**2 + 1) ** 0.5) + (sample / (sigma**2 + 1)) + elif self.config.prediction_type == "sample": + pred_original_sample = model_output + else: + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`" + ) + + # 2. Convert to an ODE derivative + derivative = (sample - pred_original_sample) / sigma + self.derivatives.append(derivative) + if len(self.derivatives) > order: + self.derivatives.pop(0) + + # 3. Compute linear multistep coefficients + order = min(self.step_index + 1, order) + lms_coeffs = [self.get_lms_coefficient(order, self.step_index, curr_order) for curr_order in range(order)] + + # 4. Compute previous sample based on the derivatives path + prev_sample = sample + sum( + coeff * derivative for coeff, derivative in zip(lms_coeffs, reversed(self.derivatives)) + ) + + # upon completion increase step index by one + self._step_index += 1 + + if not return_dict: + return (prev_sample,) + + return LMSDiscreteSchedulerOutput(prev_sample=prev_sample, pred_original_sample=pred_original_sample) + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler.add_noise + def add_noise( + self, + original_samples: torch.Tensor, + noise: torch.Tensor, + timesteps: torch.Tensor, + ) -> torch.Tensor: + # Make sure sigmas and timesteps have the same device and dtype as original_samples + sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype) + if original_samples.device.type == "mps" and torch.is_floating_point(timesteps): + # mps does not support float64 + schedule_timesteps = self.timesteps.to(original_samples.device, dtype=torch.float32) + timesteps = timesteps.to(original_samples.device, dtype=torch.float32) + else: + schedule_timesteps = self.timesteps.to(original_samples.device) + timesteps = timesteps.to(original_samples.device) + + # self.begin_index is None when scheduler is used for training, or pipeline does not implement set_begin_index + if self.begin_index is None: + step_indices = [self.index_for_timestep(t, schedule_timesteps) for t in timesteps] + elif self.step_index is not None: + # add_noise is called after first denoising step (for inpainting) + step_indices = [self.step_index] * timesteps.shape[0] + else: + # add noise is called before first denoising step to create initial latent(img2img) + step_indices = [self.begin_index] * timesteps.shape[0] + + sigma = sigmas[step_indices].flatten() + while len(sigma.shape) < len(original_samples.shape): + sigma = sigma.unsqueeze(-1) + + noisy_samples = original_samples + noise * sigma + return noisy_samples + + def __len__(self): + return self.config.num_train_timesteps diff --git a/diffusers3/schedulers/scheduling_lms_discrete_flax.py b/diffusers3/schedulers/scheduling_lms_discrete_flax.py new file mode 100644 index 0000000000000000000000000000000000000000..f1169cc90a7bd53d2ede80584e09647e5f800ccf --- /dev/null +++ b/diffusers3/schedulers/scheduling_lms_discrete_flax.py @@ -0,0 +1,283 @@ +# Copyright 2024 Katherine Crowson and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass +from typing import Optional, Tuple, Union + +import flax +import jax.numpy as jnp +from scipy import integrate + +from ..configuration_utils import ConfigMixin, register_to_config +from .scheduling_utils_flax import ( + CommonSchedulerState, + FlaxKarrasDiffusionSchedulers, + FlaxSchedulerMixin, + FlaxSchedulerOutput, + broadcast_to_shape_from_left, +) + + +@flax.struct.dataclass +class LMSDiscreteSchedulerState: + common: CommonSchedulerState + + # setable values + init_noise_sigma: jnp.ndarray + timesteps: jnp.ndarray + sigmas: jnp.ndarray + num_inference_steps: Optional[int] = None + + # running values + derivatives: Optional[jnp.ndarray] = None + + @classmethod + def create( + cls, common: CommonSchedulerState, init_noise_sigma: jnp.ndarray, timesteps: jnp.ndarray, sigmas: jnp.ndarray + ): + return cls(common=common, init_noise_sigma=init_noise_sigma, timesteps=timesteps, sigmas=sigmas) + + +@dataclass +class FlaxLMSSchedulerOutput(FlaxSchedulerOutput): + state: LMSDiscreteSchedulerState + + +class FlaxLMSDiscreteScheduler(FlaxSchedulerMixin, ConfigMixin): + """ + Linear Multistep Scheduler for discrete beta schedules. Based on the original k-diffusion implementation by + Katherine Crowson: + https://github.com/crowsonkb/k-diffusion/blob/481677d114f6ea445aa009cf5bd7a9cdee909e47/k_diffusion/sampling.py#L181 + + [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__` + function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`. + [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and + [`~SchedulerMixin.from_pretrained`] functions. + + Args: + num_train_timesteps (`int`): number of diffusion steps used to train the model. + beta_start (`float`): the starting `beta` value of inference. + beta_end (`float`): the final `beta` value. + beta_schedule (`str`): + the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from + `linear` or `scaled_linear`. + trained_betas (`jnp.ndarray`, optional): + option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc. + prediction_type (`str`, default `epsilon`, optional): + prediction type of the scheduler function, one of `epsilon` (predicting the noise of the diffusion + process), `sample` (directly predicting the noisy sample`) or `v_prediction` (see section 2.4 + https://imagen.research.google/video/paper.pdf) + dtype (`jnp.dtype`, *optional*, defaults to `jnp.float32`): + the `dtype` used for params and computation. + """ + + _compatibles = [e.name for e in FlaxKarrasDiffusionSchedulers] + + dtype: jnp.dtype + + @property + def has_state(self): + return True + + @register_to_config + def __init__( + self, + num_train_timesteps: int = 1000, + beta_start: float = 0.0001, + beta_end: float = 0.02, + beta_schedule: str = "linear", + trained_betas: Optional[jnp.ndarray] = None, + prediction_type: str = "epsilon", + dtype: jnp.dtype = jnp.float32, + ): + self.dtype = dtype + + def create_state(self, common: Optional[CommonSchedulerState] = None) -> LMSDiscreteSchedulerState: + if common is None: + common = CommonSchedulerState.create(self) + + timesteps = jnp.arange(0, self.config.num_train_timesteps).round()[::-1] + sigmas = ((1 - common.alphas_cumprod) / common.alphas_cumprod) ** 0.5 + + # standard deviation of the initial noise distribution + init_noise_sigma = sigmas.max() + + return LMSDiscreteSchedulerState.create( + common=common, + init_noise_sigma=init_noise_sigma, + timesteps=timesteps, + sigmas=sigmas, + ) + + def scale_model_input(self, state: LMSDiscreteSchedulerState, sample: jnp.ndarray, timestep: int) -> jnp.ndarray: + """ + Scales the denoising model input by `(sigma**2 + 1) ** 0.5` to match the K-LMS algorithm. + + Args: + state (`LMSDiscreteSchedulerState`): + the `FlaxLMSDiscreteScheduler` state data class instance. + sample (`jnp.ndarray`): + current instance of sample being created by diffusion process. + timestep (`int`): + current discrete timestep in the diffusion chain. + + Returns: + `jnp.ndarray`: scaled input sample + """ + (step_index,) = jnp.where(state.timesteps == timestep, size=1) + step_index = step_index[0] + + sigma = state.sigmas[step_index] + sample = sample / ((sigma**2 + 1) ** 0.5) + return sample + + def get_lms_coefficient(self, state: LMSDiscreteSchedulerState, order, t, current_order): + """ + Compute a linear multistep coefficient. + + Args: + order (TODO): + t (TODO): + current_order (TODO): + """ + + def lms_derivative(tau): + prod = 1.0 + for k in range(order): + if current_order == k: + continue + prod *= (tau - state.sigmas[t - k]) / (state.sigmas[t - current_order] - state.sigmas[t - k]) + return prod + + integrated_coeff = integrate.quad(lms_derivative, state.sigmas[t], state.sigmas[t + 1], epsrel=1e-4)[0] + + return integrated_coeff + + def set_timesteps( + self, state: LMSDiscreteSchedulerState, num_inference_steps: int, shape: Tuple = () + ) -> LMSDiscreteSchedulerState: + """ + Sets the timesteps used for the diffusion chain. Supporting function to be run before inference. + + Args: + state (`LMSDiscreteSchedulerState`): + the `FlaxLMSDiscreteScheduler` state data class instance. + num_inference_steps (`int`): + the number of diffusion steps used when generating samples with a pre-trained model. + """ + + timesteps = jnp.linspace(self.config.num_train_timesteps - 1, 0, num_inference_steps, dtype=self.dtype) + + low_idx = jnp.floor(timesteps).astype(jnp.int32) + high_idx = jnp.ceil(timesteps).astype(jnp.int32) + + frac = jnp.mod(timesteps, 1.0) + + sigmas = ((1 - state.common.alphas_cumprod) / state.common.alphas_cumprod) ** 0.5 + sigmas = (1 - frac) * sigmas[low_idx] + frac * sigmas[high_idx] + sigmas = jnp.concatenate([sigmas, jnp.array([0.0], dtype=self.dtype)]) + + timesteps = timesteps.astype(jnp.int32) + + # initial running values + derivatives = jnp.zeros((0,) + shape, dtype=self.dtype) + + return state.replace( + timesteps=timesteps, + sigmas=sigmas, + num_inference_steps=num_inference_steps, + derivatives=derivatives, + ) + + def step( + self, + state: LMSDiscreteSchedulerState, + model_output: jnp.ndarray, + timestep: int, + sample: jnp.ndarray, + order: int = 4, + return_dict: bool = True, + ) -> Union[FlaxLMSSchedulerOutput, Tuple]: + """ + Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion + process from the learned model outputs (most often the predicted noise). + + Args: + state (`LMSDiscreteSchedulerState`): the `FlaxLMSDiscreteScheduler` state data class instance. + model_output (`jnp.ndarray`): direct output from learned diffusion model. + timestep (`int`): current discrete timestep in the diffusion chain. + sample (`jnp.ndarray`): + current instance of sample being created by diffusion process. + order: coefficient for multi-step inference. + return_dict (`bool`): option for returning tuple rather than FlaxLMSSchedulerOutput class + + Returns: + [`FlaxLMSSchedulerOutput`] or `tuple`: [`FlaxLMSSchedulerOutput`] if `return_dict` is True, otherwise a + `tuple`. When returning a tuple, the first element is the sample tensor. + + """ + if state.num_inference_steps is None: + raise ValueError( + "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" + ) + + sigma = state.sigmas[timestep] + + # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise + if self.config.prediction_type == "epsilon": + pred_original_sample = sample - sigma * model_output + elif self.config.prediction_type == "v_prediction": + # * c_out + input * c_skip + pred_original_sample = model_output * (-sigma / (sigma**2 + 1) ** 0.5) + (sample / (sigma**2 + 1)) + else: + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`" + ) + + # 2. Convert to an ODE derivative + derivative = (sample - pred_original_sample) / sigma + state = state.replace(derivatives=jnp.append(state.derivatives, derivative)) + if len(state.derivatives) > order: + state = state.replace(derivatives=jnp.delete(state.derivatives, 0)) + + # 3. Compute linear multistep coefficients + order = min(timestep + 1, order) + lms_coeffs = [self.get_lms_coefficient(state, order, timestep, curr_order) for curr_order in range(order)] + + # 4. Compute previous sample based on the derivatives path + prev_sample = sample + sum( + coeff * derivative for coeff, derivative in zip(lms_coeffs, reversed(state.derivatives)) + ) + + if not return_dict: + return (prev_sample, state) + + return FlaxLMSSchedulerOutput(prev_sample=prev_sample, state=state) + + def add_noise( + self, + state: LMSDiscreteSchedulerState, + original_samples: jnp.ndarray, + noise: jnp.ndarray, + timesteps: jnp.ndarray, + ) -> jnp.ndarray: + sigma = state.sigmas[timesteps].flatten() + sigma = broadcast_to_shape_from_left(sigma, noise.shape) + + noisy_samples = original_samples + noise * sigma + + return noisy_samples + + def __len__(self): + return self.config.num_train_timesteps diff --git a/diffusers3/schedulers/scheduling_pndm.py b/diffusers3/schedulers/scheduling_pndm.py new file mode 100644 index 0000000000000000000000000000000000000000..a05e71c3c2255e984a275bd9a69ac4e9f456ae1e --- /dev/null +++ b/diffusers3/schedulers/scheduling_pndm.py @@ -0,0 +1,476 @@ +# Copyright 2024 Zhejiang University Team and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim + +import math +from typing import List, Optional, Tuple, Union + +import numpy as np +import torch + +from ..configuration_utils import ConfigMixin, register_to_config +from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput + + +# Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar +def betas_for_alpha_bar( + num_diffusion_timesteps, + max_beta=0.999, + alpha_transform_type="cosine", +): + """ + Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of + (1-beta) over time from t = [0,1]. + + Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up + to that part of the diffusion process. + + + Args: + num_diffusion_timesteps (`int`): the number of betas to produce. + max_beta (`float`): the maximum beta to use; use values lower than 1 to + prevent singularities. + alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. + Choose from `cosine` or `exp` + + Returns: + betas (`np.ndarray`): the betas used by the scheduler to step the model outputs + """ + if alpha_transform_type == "cosine": + + def alpha_bar_fn(t): + return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 + + elif alpha_transform_type == "exp": + + def alpha_bar_fn(t): + return math.exp(t * -12.0) + + else: + raise ValueError(f"Unsupported alpha_transform_type: {alpha_transform_type}") + + betas = [] + for i in range(num_diffusion_timesteps): + t1 = i / num_diffusion_timesteps + t2 = (i + 1) / num_diffusion_timesteps + betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) + return torch.tensor(betas, dtype=torch.float32) + + +class PNDMScheduler(SchedulerMixin, ConfigMixin): + """ + `PNDMScheduler` uses pseudo numerical methods for diffusion models such as the Runge-Kutta and linear multi-step + method. + + This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic + methods the library implements for all schedulers such as loading and saving. + + Args: + num_train_timesteps (`int`, defaults to 1000): + The number of diffusion steps to train the model. + beta_start (`float`, defaults to 0.0001): + The starting `beta` value of inference. + beta_end (`float`, defaults to 0.02): + The final `beta` value. + beta_schedule (`str`, defaults to `"linear"`): + The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from + `linear`, `scaled_linear`, or `squaredcos_cap_v2`. + trained_betas (`np.ndarray`, *optional*): + Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`. + skip_prk_steps (`bool`, defaults to `False`): + Allows the scheduler to skip the Runge-Kutta steps defined in the original paper as being required before + PLMS steps. + set_alpha_to_one (`bool`, defaults to `False`): + Each diffusion step uses the alphas product value at that step and at the previous one. For the final step + there is no previous alpha. When this option is `True` the previous alpha product is fixed to `1`, + otherwise it uses the alpha value at step 0. + prediction_type (`str`, defaults to `epsilon`, *optional*): + Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process) + or `v_prediction` (see section 2.4 of [Imagen Video](https://imagen.research.google/video/paper.pdf) + paper). + timestep_spacing (`str`, defaults to `"leading"`): + The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information. + steps_offset (`int`, defaults to 0): + An offset added to the inference steps, as required by some model families. + """ + + _compatibles = [e.name for e in KarrasDiffusionSchedulers] + order = 1 + + @register_to_config + def __init__( + self, + num_train_timesteps: int = 1000, + beta_start: float = 0.0001, + beta_end: float = 0.02, + beta_schedule: str = "linear", + trained_betas: Optional[Union[np.ndarray, List[float]]] = None, + skip_prk_steps: bool = False, + set_alpha_to_one: bool = False, + prediction_type: str = "epsilon", + timestep_spacing: str = "leading", + steps_offset: int = 0, + ): + if trained_betas is not None: + self.betas = torch.tensor(trained_betas, dtype=torch.float32) + elif beta_schedule == "linear": + self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) + elif beta_schedule == "scaled_linear": + # this schedule is very specific to the latent diffusion model. + self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2 + elif beta_schedule == "squaredcos_cap_v2": + # Glide cosine schedule + self.betas = betas_for_alpha_bar(num_train_timesteps) + else: + raise NotImplementedError(f"{beta_schedule} is not implemented for {self.__class__}") + + self.alphas = 1.0 - self.betas + self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) + + self.final_alpha_cumprod = torch.tensor(1.0) if set_alpha_to_one else self.alphas_cumprod[0] + + # standard deviation of the initial noise distribution + self.init_noise_sigma = 1.0 + + # For now we only support F-PNDM, i.e. the runge-kutta method + # For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf + # mainly at formula (9), (12), (13) and the Algorithm 2. + self.pndm_order = 4 + + # running values + self.cur_model_output = 0 + self.counter = 0 + self.cur_sample = None + self.ets = [] + + # setable values + self.num_inference_steps = None + self._timesteps = np.arange(0, num_train_timesteps)[::-1].copy() + self.prk_timesteps = None + self.plms_timesteps = None + self.timesteps = None + + def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None): + """ + Sets the discrete timesteps used for the diffusion chain (to be run before inference). + + Args: + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + """ + + self.num_inference_steps = num_inference_steps + # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 + if self.config.timestep_spacing == "linspace": + self._timesteps = ( + np.linspace(0, self.config.num_train_timesteps - 1, num_inference_steps).round().astype(np.int64) + ) + elif self.config.timestep_spacing == "leading": + step_ratio = self.config.num_train_timesteps // self.num_inference_steps + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + self._timesteps = (np.arange(0, num_inference_steps) * step_ratio).round() + self._timesteps += self.config.steps_offset + elif self.config.timestep_spacing == "trailing": + step_ratio = self.config.num_train_timesteps / self.num_inference_steps + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + self._timesteps = np.round(np.arange(self.config.num_train_timesteps, 0, -step_ratio))[::-1].astype( + np.int64 + ) + self._timesteps -= 1 + else: + raise ValueError( + f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." + ) + + if self.config.skip_prk_steps: + # for some models like stable diffusion the prk steps can/should be skipped to + # produce better results. When using PNDM with `self.config.skip_prk_steps` the implementation + # is based on crowsonkb's PLMS sampler implementation: https://github.com/CompVis/latent-diffusion/pull/51 + self.prk_timesteps = np.array([]) + self.plms_timesteps = np.concatenate([self._timesteps[:-1], self._timesteps[-2:-1], self._timesteps[-1:]])[ + ::-1 + ].copy() + else: + prk_timesteps = np.array(self._timesteps[-self.pndm_order :]).repeat(2) + np.tile( + np.array([0, self.config.num_train_timesteps // num_inference_steps // 2]), self.pndm_order + ) + self.prk_timesteps = (prk_timesteps[:-1].repeat(2)[1:-1])[::-1].copy() + self.plms_timesteps = self._timesteps[:-3][ + ::-1 + ].copy() # we copy to avoid having negative strides which are not supported by torch.from_numpy + + timesteps = np.concatenate([self.prk_timesteps, self.plms_timesteps]).astype(np.int64) + self.timesteps = torch.from_numpy(timesteps).to(device) + + self.ets = [] + self.counter = 0 + self.cur_model_output = 0 + + def step( + self, + model_output: torch.Tensor, + timestep: int, + sample: torch.Tensor, + return_dict: bool = True, + ) -> Union[SchedulerOutput, Tuple]: + """ + Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion + process from the learned model outputs (most often the predicted noise), and calls [`~PNDMScheduler.step_prk`] + or [`~PNDMScheduler.step_plms`] depending on the internal variable `counter`. + + Args: + model_output (`torch.Tensor`): + The direct output from learned diffusion model. + timestep (`int`): + The current discrete timestep in the diffusion chain. + sample (`torch.Tensor`): + A current instance of a sample created by the diffusion process. + return_dict (`bool`): + Whether or not to return a [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`. + + Returns: + [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`: + If return_dict is `True`, [`~schedulers.scheduling_utils.SchedulerOutput`] is returned, otherwise a + tuple is returned where the first element is the sample tensor. + + """ + if self.counter < len(self.prk_timesteps) and not self.config.skip_prk_steps: + return self.step_prk(model_output=model_output, timestep=timestep, sample=sample, return_dict=return_dict) + else: + return self.step_plms(model_output=model_output, timestep=timestep, sample=sample, return_dict=return_dict) + + def step_prk( + self, + model_output: torch.Tensor, + timestep: int, + sample: torch.Tensor, + return_dict: bool = True, + ) -> Union[SchedulerOutput, Tuple]: + """ + Predict the sample from the previous timestep by reversing the SDE. This function propagates the sample with + the Runge-Kutta method. It performs four forward passes to approximate the solution to the differential + equation. + + Args: + model_output (`torch.Tensor`): + The direct output from learned diffusion model. + timestep (`int`): + The current discrete timestep in the diffusion chain. + sample (`torch.Tensor`): + A current instance of a sample created by the diffusion process. + return_dict (`bool`): + Whether or not to return a [`~schedulers.scheduling_utils.SchedulerOutput`] or tuple. + + Returns: + [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`: + If return_dict is `True`, [`~schedulers.scheduling_utils.SchedulerOutput`] is returned, otherwise a + tuple is returned where the first element is the sample tensor. + + """ + if self.num_inference_steps is None: + raise ValueError( + "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" + ) + + diff_to_prev = 0 if self.counter % 2 else self.config.num_train_timesteps // self.num_inference_steps // 2 + prev_timestep = timestep - diff_to_prev + timestep = self.prk_timesteps[self.counter // 4 * 4] + + if self.counter % 4 == 0: + self.cur_model_output += 1 / 6 * model_output + self.ets.append(model_output) + self.cur_sample = sample + elif (self.counter - 1) % 4 == 0: + self.cur_model_output += 1 / 3 * model_output + elif (self.counter - 2) % 4 == 0: + self.cur_model_output += 1 / 3 * model_output + elif (self.counter - 3) % 4 == 0: + model_output = self.cur_model_output + 1 / 6 * model_output + self.cur_model_output = 0 + + # cur_sample should not be `None` + cur_sample = self.cur_sample if self.cur_sample is not None else sample + + prev_sample = self._get_prev_sample(cur_sample, timestep, prev_timestep, model_output) + self.counter += 1 + + if not return_dict: + return (prev_sample,) + + return SchedulerOutput(prev_sample=prev_sample) + + def step_plms( + self, + model_output: torch.Tensor, + timestep: int, + sample: torch.Tensor, + return_dict: bool = True, + ) -> Union[SchedulerOutput, Tuple]: + """ + Predict the sample from the previous timestep by reversing the SDE. This function propagates the sample with + the linear multistep method. It performs one forward pass multiple times to approximate the solution. + + Args: + model_output (`torch.Tensor`): + The direct output from learned diffusion model. + timestep (`int`): + The current discrete timestep in the diffusion chain. + sample (`torch.Tensor`): + A current instance of a sample created by the diffusion process. + return_dict (`bool`): + Whether or not to return a [`~schedulers.scheduling_utils.SchedulerOutput`] or tuple. + + Returns: + [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`: + If return_dict is `True`, [`~schedulers.scheduling_utils.SchedulerOutput`] is returned, otherwise a + tuple is returned where the first element is the sample tensor. + + """ + if self.num_inference_steps is None: + raise ValueError( + "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" + ) + + if not self.config.skip_prk_steps and len(self.ets) < 3: + raise ValueError( + f"{self.__class__} can only be run AFTER scheduler has been run " + "in 'prk' mode for at least 12 iterations " + "See: https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/pipeline_pndm.py " + "for more information." + ) + + prev_timestep = timestep - self.config.num_train_timesteps // self.num_inference_steps + + if self.counter != 1: + self.ets = self.ets[-3:] + self.ets.append(model_output) + else: + prev_timestep = timestep + timestep = timestep + self.config.num_train_timesteps // self.num_inference_steps + + if len(self.ets) == 1 and self.counter == 0: + model_output = model_output + self.cur_sample = sample + elif len(self.ets) == 1 and self.counter == 1: + model_output = (model_output + self.ets[-1]) / 2 + sample = self.cur_sample + self.cur_sample = None + elif len(self.ets) == 2: + model_output = (3 * self.ets[-1] - self.ets[-2]) / 2 + elif len(self.ets) == 3: + model_output = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12 + else: + model_output = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4]) + + prev_sample = self._get_prev_sample(sample, timestep, prev_timestep, model_output) + self.counter += 1 + + if not return_dict: + return (prev_sample,) + + return SchedulerOutput(prev_sample=prev_sample) + + def scale_model_input(self, sample: torch.Tensor, *args, **kwargs) -> torch.Tensor: + """ + Ensures interchangeability with schedulers that need to scale the denoising model input depending on the + current timestep. + + Args: + sample (`torch.Tensor`): + The input sample. + + Returns: + `torch.Tensor`: + A scaled input sample. + """ + return sample + + def _get_prev_sample(self, sample, timestep, prev_timestep, model_output): + # See formula (9) of PNDM paper https://arxiv.org/pdf/2202.09778.pdf + # this function computes x_(tโˆ’ฮด) using the formula of (9) + # Note that x_t needs to be added to both sides of the equation + + # Notation ( -> + # alpha_prod_t -> ฮฑ_t + # alpha_prod_t_prev -> ฮฑ_(tโˆ’ฮด) + # beta_prod_t -> (1 - ฮฑ_t) + # beta_prod_t_prev -> (1 - ฮฑ_(tโˆ’ฮด)) + # sample -> x_t + # model_output -> e_ฮธ(x_t, t) + # prev_sample -> x_(tโˆ’ฮด) + alpha_prod_t = self.alphas_cumprod[timestep] + alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod + beta_prod_t = 1 - alpha_prod_t + beta_prod_t_prev = 1 - alpha_prod_t_prev + + if self.config.prediction_type == "v_prediction": + model_output = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample + elif self.config.prediction_type != "epsilon": + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `v_prediction`" + ) + + # corresponds to (ฮฑ_(tโˆ’ฮด) - ฮฑ_t) divided by + # denominator of x_t in formula (9) and plus 1 + # Note: (ฮฑ_(tโˆ’ฮด) - ฮฑ_t) / (sqrt(ฮฑ_t) * (sqrt(ฮฑ_(tโˆ’ฮด)) + sqr(ฮฑ_t))) = + # sqrt(ฮฑ_(tโˆ’ฮด)) / sqrt(ฮฑ_t)) + sample_coeff = (alpha_prod_t_prev / alpha_prod_t) ** (0.5) + + # corresponds to denominator of e_ฮธ(x_t, t) in formula (9) + model_output_denom_coeff = alpha_prod_t * beta_prod_t_prev ** (0.5) + ( + alpha_prod_t * beta_prod_t * alpha_prod_t_prev + ) ** (0.5) + + # full formula (9) + prev_sample = ( + sample_coeff * sample - (alpha_prod_t_prev - alpha_prod_t) * model_output / model_output_denom_coeff + ) + + return prev_sample + + # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.add_noise + def add_noise( + self, + original_samples: torch.Tensor, + noise: torch.Tensor, + timesteps: torch.IntTensor, + ) -> torch.Tensor: + # Make sure alphas_cumprod and timestep have same device and dtype as original_samples + # Move the self.alphas_cumprod to device to avoid redundant CPU to GPU data movement + # for the subsequent add_noise calls + self.alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device) + alphas_cumprod = self.alphas_cumprod.to(dtype=original_samples.dtype) + timesteps = timesteps.to(original_samples.device) + + sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5 + sqrt_alpha_prod = sqrt_alpha_prod.flatten() + while len(sqrt_alpha_prod.shape) < len(original_samples.shape): + sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) + + sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5 + sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() + while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape): + sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) + + noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise + return noisy_samples + + def __len__(self): + return self.config.num_train_timesteps diff --git a/diffusers3/schedulers/scheduling_pndm_flax.py b/diffusers3/schedulers/scheduling_pndm_flax.py new file mode 100644 index 0000000000000000000000000000000000000000..3ac3ba5ca1ba1f4b4ffcdc7d0261c5877a4dddd0 --- /dev/null +++ b/diffusers3/schedulers/scheduling_pndm_flax.py @@ -0,0 +1,509 @@ +# Copyright 2024 Zhejiang University Team and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim + +from dataclasses import dataclass +from typing import Optional, Tuple, Union + +import flax +import jax +import jax.numpy as jnp + +from ..configuration_utils import ConfigMixin, register_to_config +from .scheduling_utils_flax import ( + CommonSchedulerState, + FlaxKarrasDiffusionSchedulers, + FlaxSchedulerMixin, + FlaxSchedulerOutput, + add_noise_common, +) + + +@flax.struct.dataclass +class PNDMSchedulerState: + common: CommonSchedulerState + final_alpha_cumprod: jnp.ndarray + + # setable values + init_noise_sigma: jnp.ndarray + timesteps: jnp.ndarray + num_inference_steps: Optional[int] = None + prk_timesteps: Optional[jnp.ndarray] = None + plms_timesteps: Optional[jnp.ndarray] = None + + # running values + cur_model_output: Optional[jnp.ndarray] = None + counter: Optional[jnp.int32] = None + cur_sample: Optional[jnp.ndarray] = None + ets: Optional[jnp.ndarray] = None + + @classmethod + def create( + cls, + common: CommonSchedulerState, + final_alpha_cumprod: jnp.ndarray, + init_noise_sigma: jnp.ndarray, + timesteps: jnp.ndarray, + ): + return cls( + common=common, + final_alpha_cumprod=final_alpha_cumprod, + init_noise_sigma=init_noise_sigma, + timesteps=timesteps, + ) + + +@dataclass +class FlaxPNDMSchedulerOutput(FlaxSchedulerOutput): + state: PNDMSchedulerState + + +class FlaxPNDMScheduler(FlaxSchedulerMixin, ConfigMixin): + """ + Pseudo numerical methods for diffusion models (PNDM) proposes using more advanced ODE integration techniques, + namely Runge-Kutta method and a linear multi-step method. + + [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__` + function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`. + [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and + [`~SchedulerMixin.from_pretrained`] functions. + + For more details, see the original paper: https://arxiv.org/abs/2202.09778 + + Args: + num_train_timesteps (`int`): number of diffusion steps used to train the model. + beta_start (`float`): the starting `beta` value of inference. + beta_end (`float`): the final `beta` value. + beta_schedule (`str`): + the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from + `linear`, `scaled_linear`, or `squaredcos_cap_v2`. + trained_betas (`jnp.ndarray`, optional): + option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc. + skip_prk_steps (`bool`): + allows the scheduler to skip the Runge-Kutta steps that are defined in the original paper as being required + before plms steps; defaults to `False`. + set_alpha_to_one (`bool`, default `False`): + each diffusion step uses the value of alphas product at that step and at the previous one. For the final + step there is no previous alpha. When this option is `True` the previous alpha product is fixed to `1`, + otherwise it uses the value of alpha at step 0. + steps_offset (`int`, default `0`): + An offset added to the inference steps, as required by some model families. + prediction_type (`str`, default `epsilon`, optional): + prediction type of the scheduler function, one of `epsilon` (predicting the noise of the diffusion + process), `sample` (directly predicting the noisy sample`) or `v_prediction` (see section 2.4 + https://imagen.research.google/video/paper.pdf) + dtype (`jnp.dtype`, *optional*, defaults to `jnp.float32`): + the `dtype` used for params and computation. + """ + + _compatibles = [e.name for e in FlaxKarrasDiffusionSchedulers] + + dtype: jnp.dtype + pndm_order: int + + @property + def has_state(self): + return True + + @register_to_config + def __init__( + self, + num_train_timesteps: int = 1000, + beta_start: float = 0.0001, + beta_end: float = 0.02, + beta_schedule: str = "linear", + trained_betas: Optional[jnp.ndarray] = None, + skip_prk_steps: bool = False, + set_alpha_to_one: bool = False, + steps_offset: int = 0, + prediction_type: str = "epsilon", + dtype: jnp.dtype = jnp.float32, + ): + self.dtype = dtype + + # For now we only support F-PNDM, i.e. the runge-kutta method + # For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf + # mainly at formula (9), (12), (13) and the Algorithm 2. + self.pndm_order = 4 + + def create_state(self, common: Optional[CommonSchedulerState] = None) -> PNDMSchedulerState: + if common is None: + common = CommonSchedulerState.create(self) + + # At every step in ddim, we are looking into the previous alphas_cumprod + # For the final step, there is no previous alphas_cumprod because we are already at 0 + # `set_alpha_to_one` decides whether we set this parameter simply to one or + # whether we use the final alpha of the "non-previous" one. + final_alpha_cumprod = ( + jnp.array(1.0, dtype=self.dtype) if self.config.set_alpha_to_one else common.alphas_cumprod[0] + ) + + # standard deviation of the initial noise distribution + init_noise_sigma = jnp.array(1.0, dtype=self.dtype) + + timesteps = jnp.arange(0, self.config.num_train_timesteps).round()[::-1] + + return PNDMSchedulerState.create( + common=common, + final_alpha_cumprod=final_alpha_cumprod, + init_noise_sigma=init_noise_sigma, + timesteps=timesteps, + ) + + def set_timesteps(self, state: PNDMSchedulerState, num_inference_steps: int, shape: Tuple) -> PNDMSchedulerState: + """ + Sets the discrete timesteps used for the diffusion chain. Supporting function to be run before inference. + + Args: + state (`PNDMSchedulerState`): + the `FlaxPNDMScheduler` state data class instance. + num_inference_steps (`int`): + the number of diffusion steps used when generating samples with a pre-trained model. + shape (`Tuple`): + the shape of the samples to be generated. + """ + + step_ratio = self.config.num_train_timesteps // num_inference_steps + # creates integer timesteps by multiplying by ratio + # rounding to avoid issues when num_inference_step is power of 3 + _timesteps = (jnp.arange(0, num_inference_steps) * step_ratio).round() + self.config.steps_offset + + if self.config.skip_prk_steps: + # for some models like stable diffusion the prk steps can/should be skipped to + # produce better results. When using PNDM with `self.config.skip_prk_steps` the implementation + # is based on crowsonkb's PLMS sampler implementation: https://github.com/CompVis/latent-diffusion/pull/51 + + prk_timesteps = jnp.array([], dtype=jnp.int32) + plms_timesteps = jnp.concatenate([_timesteps[:-1], _timesteps[-2:-1], _timesteps[-1:]])[::-1] + + else: + prk_timesteps = _timesteps[-self.pndm_order :].repeat(2) + jnp.tile( + jnp.array([0, self.config.num_train_timesteps // num_inference_steps // 2], dtype=jnp.int32), + self.pndm_order, + ) + + prk_timesteps = (prk_timesteps[:-1].repeat(2)[1:-1])[::-1] + plms_timesteps = _timesteps[:-3][::-1] + + timesteps = jnp.concatenate([prk_timesteps, plms_timesteps]) + + # initial running values + + cur_model_output = jnp.zeros(shape, dtype=self.dtype) + counter = jnp.int32(0) + cur_sample = jnp.zeros(shape, dtype=self.dtype) + ets = jnp.zeros((4,) + shape, dtype=self.dtype) + + return state.replace( + timesteps=timesteps, + num_inference_steps=num_inference_steps, + prk_timesteps=prk_timesteps, + plms_timesteps=plms_timesteps, + cur_model_output=cur_model_output, + counter=counter, + cur_sample=cur_sample, + ets=ets, + ) + + def scale_model_input( + self, state: PNDMSchedulerState, sample: jnp.ndarray, timestep: Optional[int] = None + ) -> jnp.ndarray: + """ + Ensures interchangeability with schedulers that need to scale the denoising model input depending on the + current timestep. + + Args: + state (`PNDMSchedulerState`): the `FlaxPNDMScheduler` state data class instance. + sample (`jnp.ndarray`): input sample + timestep (`int`, optional): current timestep + + Returns: + `jnp.ndarray`: scaled input sample + """ + return sample + + def step( + self, + state: PNDMSchedulerState, + model_output: jnp.ndarray, + timestep: int, + sample: jnp.ndarray, + return_dict: bool = True, + ) -> Union[FlaxPNDMSchedulerOutput, Tuple]: + """ + Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion + process from the learned model outputs (most often the predicted noise). + + This function calls `step_prk()` or `step_plms()` depending on the internal variable `counter`. + + Args: + state (`PNDMSchedulerState`): the `FlaxPNDMScheduler` state data class instance. + model_output (`jnp.ndarray`): direct output from learned diffusion model. + timestep (`int`): current discrete timestep in the diffusion chain. + sample (`jnp.ndarray`): + current instance of sample being created by diffusion process. + return_dict (`bool`): option for returning tuple rather than FlaxPNDMSchedulerOutput class + + Returns: + [`FlaxPNDMSchedulerOutput`] or `tuple`: [`FlaxPNDMSchedulerOutput`] if `return_dict` is True, otherwise a + `tuple`. When returning a tuple, the first element is the sample tensor. + + """ + + if state.num_inference_steps is None: + raise ValueError( + "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" + ) + + if self.config.skip_prk_steps: + prev_sample, state = self.step_plms(state, model_output, timestep, sample) + else: + prk_prev_sample, prk_state = self.step_prk(state, model_output, timestep, sample) + plms_prev_sample, plms_state = self.step_plms(state, model_output, timestep, sample) + + cond = state.counter < len(state.prk_timesteps) + + prev_sample = jax.lax.select(cond, prk_prev_sample, plms_prev_sample) + + state = state.replace( + cur_model_output=jax.lax.select(cond, prk_state.cur_model_output, plms_state.cur_model_output), + ets=jax.lax.select(cond, prk_state.ets, plms_state.ets), + cur_sample=jax.lax.select(cond, prk_state.cur_sample, plms_state.cur_sample), + counter=jax.lax.select(cond, prk_state.counter, plms_state.counter), + ) + + if not return_dict: + return (prev_sample, state) + + return FlaxPNDMSchedulerOutput(prev_sample=prev_sample, state=state) + + def step_prk( + self, + state: PNDMSchedulerState, + model_output: jnp.ndarray, + timestep: int, + sample: jnp.ndarray, + ) -> Union[FlaxPNDMSchedulerOutput, Tuple]: + """ + Step function propagating the sample with the Runge-Kutta method. RK takes 4 forward passes to approximate the + solution to the differential equation. + + Args: + state (`PNDMSchedulerState`): the `FlaxPNDMScheduler` state data class instance. + model_output (`jnp.ndarray`): direct output from learned diffusion model. + timestep (`int`): current discrete timestep in the diffusion chain. + sample (`jnp.ndarray`): + current instance of sample being created by diffusion process. + return_dict (`bool`): option for returning tuple rather than FlaxPNDMSchedulerOutput class + + Returns: + [`FlaxPNDMSchedulerOutput`] or `tuple`: [`FlaxPNDMSchedulerOutput`] if `return_dict` is True, otherwise a + `tuple`. When returning a tuple, the first element is the sample tensor. + + """ + + if state.num_inference_steps is None: + raise ValueError( + "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" + ) + + diff_to_prev = jnp.where( + state.counter % 2, 0, self.config.num_train_timesteps // state.num_inference_steps // 2 + ) + prev_timestep = timestep - diff_to_prev + timestep = state.prk_timesteps[state.counter // 4 * 4] + + model_output = jax.lax.select( + (state.counter % 4) != 3, + model_output, # remainder 0, 1, 2 + state.cur_model_output + 1 / 6 * model_output, # remainder 3 + ) + + state = state.replace( + cur_model_output=jax.lax.select_n( + state.counter % 4, + state.cur_model_output + 1 / 6 * model_output, # remainder 0 + state.cur_model_output + 1 / 3 * model_output, # remainder 1 + state.cur_model_output + 1 / 3 * model_output, # remainder 2 + jnp.zeros_like(state.cur_model_output), # remainder 3 + ), + ets=jax.lax.select( + (state.counter % 4) == 0, + state.ets.at[0:3].set(state.ets[1:4]).at[3].set(model_output), # remainder 0 + state.ets, # remainder 1, 2, 3 + ), + cur_sample=jax.lax.select( + (state.counter % 4) == 0, + sample, # remainder 0 + state.cur_sample, # remainder 1, 2, 3 + ), + ) + + cur_sample = state.cur_sample + prev_sample = self._get_prev_sample(state, cur_sample, timestep, prev_timestep, model_output) + state = state.replace(counter=state.counter + 1) + + return (prev_sample, state) + + def step_plms( + self, + state: PNDMSchedulerState, + model_output: jnp.ndarray, + timestep: int, + sample: jnp.ndarray, + ) -> Union[FlaxPNDMSchedulerOutput, Tuple]: + """ + Step function propagating the sample with the linear multi-step method. This has one forward pass with multiple + times to approximate the solution. + + Args: + state (`PNDMSchedulerState`): the `FlaxPNDMScheduler` state data class instance. + model_output (`jnp.ndarray`): direct output from learned diffusion model. + timestep (`int`): current discrete timestep in the diffusion chain. + sample (`jnp.ndarray`): + current instance of sample being created by diffusion process. + return_dict (`bool`): option for returning tuple rather than FlaxPNDMSchedulerOutput class + + Returns: + [`FlaxPNDMSchedulerOutput`] or `tuple`: [`FlaxPNDMSchedulerOutput`] if `return_dict` is True, otherwise a + `tuple`. When returning a tuple, the first element is the sample tensor. + + """ + + if state.num_inference_steps is None: + raise ValueError( + "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" + ) + + # NOTE: There is no way to check in the jitted runtime if the prk mode was ran before + + prev_timestep = timestep - self.config.num_train_timesteps // state.num_inference_steps + prev_timestep = jnp.where(prev_timestep > 0, prev_timestep, 0) + + # Reference: + # if state.counter != 1: + # state.ets.append(model_output) + # else: + # prev_timestep = timestep + # timestep = timestep + self.config.num_train_timesteps // state.num_inference_steps + + prev_timestep = jnp.where(state.counter == 1, timestep, prev_timestep) + timestep = jnp.where( + state.counter == 1, timestep + self.config.num_train_timesteps // state.num_inference_steps, timestep + ) + + # Reference: + # if len(state.ets) == 1 and state.counter == 0: + # model_output = model_output + # state.cur_sample = sample + # elif len(state.ets) == 1 and state.counter == 1: + # model_output = (model_output + state.ets[-1]) / 2 + # sample = state.cur_sample + # state.cur_sample = None + # elif len(state.ets) == 2: + # model_output = (3 * state.ets[-1] - state.ets[-2]) / 2 + # elif len(state.ets) == 3: + # model_output = (23 * state.ets[-1] - 16 * state.ets[-2] + 5 * state.ets[-3]) / 12 + # else: + # model_output = (1 / 24) * (55 * state.ets[-1] - 59 * state.ets[-2] + 37 * state.ets[-3] - 9 * state.ets[-4]) + + state = state.replace( + ets=jax.lax.select( + state.counter != 1, + state.ets.at[0:3].set(state.ets[1:4]).at[3].set(model_output), # counter != 1 + state.ets, # counter 1 + ), + cur_sample=jax.lax.select( + state.counter != 1, + sample, # counter != 1 + state.cur_sample, # counter 1 + ), + ) + + state = state.replace( + cur_model_output=jax.lax.select_n( + jnp.clip(state.counter, 0, 4), + model_output, # counter 0 + (model_output + state.ets[-1]) / 2, # counter 1 + (3 * state.ets[-1] - state.ets[-2]) / 2, # counter 2 + (23 * state.ets[-1] - 16 * state.ets[-2] + 5 * state.ets[-3]) / 12, # counter 3 + (1 / 24) + * (55 * state.ets[-1] - 59 * state.ets[-2] + 37 * state.ets[-3] - 9 * state.ets[-4]), # counter >= 4 + ), + ) + + sample = state.cur_sample + model_output = state.cur_model_output + prev_sample = self._get_prev_sample(state, sample, timestep, prev_timestep, model_output) + state = state.replace(counter=state.counter + 1) + + return (prev_sample, state) + + def _get_prev_sample(self, state: PNDMSchedulerState, sample, timestep, prev_timestep, model_output): + # See formula (9) of PNDM paper https://arxiv.org/pdf/2202.09778.pdf + # this function computes x_(tโˆ’ฮด) using the formula of (9) + # Note that x_t needs to be added to both sides of the equation + + # Notation ( -> + # alpha_prod_t -> ฮฑ_t + # alpha_prod_t_prev -> ฮฑ_(tโˆ’ฮด) + # beta_prod_t -> (1 - ฮฑ_t) + # beta_prod_t_prev -> (1 - ฮฑ_(tโˆ’ฮด)) + # sample -> x_t + # model_output -> e_ฮธ(x_t, t) + # prev_sample -> x_(tโˆ’ฮด) + alpha_prod_t = state.common.alphas_cumprod[timestep] + alpha_prod_t_prev = jnp.where( + prev_timestep >= 0, state.common.alphas_cumprod[prev_timestep], state.final_alpha_cumprod + ) + beta_prod_t = 1 - alpha_prod_t + beta_prod_t_prev = 1 - alpha_prod_t_prev + + if self.config.prediction_type == "v_prediction": + model_output = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample + elif self.config.prediction_type != "epsilon": + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `v_prediction`" + ) + + # corresponds to (ฮฑ_(tโˆ’ฮด) - ฮฑ_t) divided by + # denominator of x_t in formula (9) and plus 1 + # Note: (ฮฑ_(tโˆ’ฮด) - ฮฑ_t) / (sqrt(ฮฑ_t) * (sqrt(ฮฑ_(tโˆ’ฮด)) + sqr(ฮฑ_t))) = + # sqrt(ฮฑ_(tโˆ’ฮด)) / sqrt(ฮฑ_t)) + sample_coeff = (alpha_prod_t_prev / alpha_prod_t) ** (0.5) + + # corresponds to denominator of e_ฮธ(x_t, t) in formula (9) + model_output_denom_coeff = alpha_prod_t * beta_prod_t_prev ** (0.5) + ( + alpha_prod_t * beta_prod_t * alpha_prod_t_prev + ) ** (0.5) + + # full formula (9) + prev_sample = ( + sample_coeff * sample - (alpha_prod_t_prev - alpha_prod_t) * model_output / model_output_denom_coeff + ) + + return prev_sample + + def add_noise( + self, + state: PNDMSchedulerState, + original_samples: jnp.ndarray, + noise: jnp.ndarray, + timesteps: jnp.ndarray, + ) -> jnp.ndarray: + return add_noise_common(state.common, original_samples, noise, timesteps) + + def __len__(self): + return self.config.num_train_timesteps diff --git a/diffusers3/schedulers/scheduling_repaint.py b/diffusers3/schedulers/scheduling_repaint.py new file mode 100644 index 0000000000000000000000000000000000000000..97665bb5277b35bf19663ee12a59a1daa00a9edb --- /dev/null +++ b/diffusers3/schedulers/scheduling_repaint.py @@ -0,0 +1,361 @@ +# Copyright 2024 ETH Zurich Computer Vision Lab and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +from dataclasses import dataclass +from typing import Optional, Tuple, Union + +import numpy as np +import torch + +from ..configuration_utils import ConfigMixin, register_to_config +from ..utils import BaseOutput +from ..utils.torch_utils import randn_tensor +from .scheduling_utils import SchedulerMixin + + +@dataclass +class RePaintSchedulerOutput(BaseOutput): + """ + Output class for the scheduler's step function output. + + Args: + prev_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images): + Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the + denoising loop. + pred_original_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images): + The predicted denoised sample (x_{0}) based on the model output from + the current timestep. `pred_original_sample` can be used to preview progress or for guidance. + """ + + prev_sample: torch.Tensor + pred_original_sample: torch.Tensor + + +# Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar +def betas_for_alpha_bar( + num_diffusion_timesteps, + max_beta=0.999, + alpha_transform_type="cosine", +): + """ + Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of + (1-beta) over time from t = [0,1]. + + Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up + to that part of the diffusion process. + + + Args: + num_diffusion_timesteps (`int`): the number of betas to produce. + max_beta (`float`): the maximum beta to use; use values lower than 1 to + prevent singularities. + alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. + Choose from `cosine` or `exp` + + Returns: + betas (`np.ndarray`): the betas used by the scheduler to step the model outputs + """ + if alpha_transform_type == "cosine": + + def alpha_bar_fn(t): + return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 + + elif alpha_transform_type == "exp": + + def alpha_bar_fn(t): + return math.exp(t * -12.0) + + else: + raise ValueError(f"Unsupported alpha_transform_type: {alpha_transform_type}") + + betas = [] + for i in range(num_diffusion_timesteps): + t1 = i / num_diffusion_timesteps + t2 = (i + 1) / num_diffusion_timesteps + betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) + return torch.tensor(betas, dtype=torch.float32) + + +class RePaintScheduler(SchedulerMixin, ConfigMixin): + """ + `RePaintScheduler` is a scheduler for DDPM inpainting inside a given mask. + + This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic + methods the library implements for all schedulers such as loading and saving. + + Args: + num_train_timesteps (`int`, defaults to 1000): + The number of diffusion steps to train the model. + beta_start (`float`, defaults to 0.0001): + The starting `beta` value of inference. + beta_end (`float`, defaults to 0.02): + The final `beta` value. + beta_schedule (`str`, defaults to `"linear"`): + The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from + `linear`, `scaled_linear`, `squaredcos_cap_v2`, or `sigmoid`. + eta (`float`): + The weight of noise for added noise in diffusion step. If its value is between 0.0 and 1.0 it corresponds + to the DDIM scheduler, and if its value is between -0.0 and 1.0 it corresponds to the DDPM scheduler. + trained_betas (`np.ndarray`, *optional*): + Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`. + clip_sample (`bool`, defaults to `True`): + Clip the predicted sample between -1 and 1 for numerical stability. + + """ + + order = 1 + + @register_to_config + def __init__( + self, + num_train_timesteps: int = 1000, + beta_start: float = 0.0001, + beta_end: float = 0.02, + beta_schedule: str = "linear", + eta: float = 0.0, + trained_betas: Optional[np.ndarray] = None, + clip_sample: bool = True, + ): + if trained_betas is not None: + self.betas = torch.from_numpy(trained_betas) + elif beta_schedule == "linear": + self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) + elif beta_schedule == "scaled_linear": + # this schedule is very specific to the latent diffusion model. + self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2 + elif beta_schedule == "squaredcos_cap_v2": + # Glide cosine schedule + self.betas = betas_for_alpha_bar(num_train_timesteps) + elif beta_schedule == "sigmoid": + # GeoDiff sigmoid schedule + betas = torch.linspace(-6, 6, num_train_timesteps) + self.betas = torch.sigmoid(betas) * (beta_end - beta_start) + beta_start + else: + raise NotImplementedError(f"{beta_schedule} is not implemented for {self.__class__}") + + self.alphas = 1.0 - self.betas + self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) + self.one = torch.tensor(1.0) + + self.final_alpha_cumprod = torch.tensor(1.0) + + # standard deviation of the initial noise distribution + self.init_noise_sigma = 1.0 + + # setable values + self.num_inference_steps = None + self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy()) + + self.eta = eta + + def scale_model_input(self, sample: torch.Tensor, timestep: Optional[int] = None) -> torch.Tensor: + """ + Ensures interchangeability with schedulers that need to scale the denoising model input depending on the + current timestep. + + Args: + sample (`torch.Tensor`): + The input sample. + timestep (`int`, *optional*): + The current timestep in the diffusion chain. + + Returns: + `torch.Tensor`: + A scaled input sample. + """ + return sample + + def set_timesteps( + self, + num_inference_steps: int, + jump_length: int = 10, + jump_n_sample: int = 10, + device: Union[str, torch.device] = None, + ): + """ + Sets the discrete timesteps used for the diffusion chain (to be run before inference). + + Args: + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, + `timesteps` must be `None`. + jump_length (`int`, defaults to 10): + The number of steps taken forward in time before going backward in time for a single jump (โ€œjโ€ in + RePaint paper). Take a look at Figure 9 and 10 in the paper. + jump_n_sample (`int`, defaults to 10): + The number of times to make a forward time jump for a given chosen time sample. Take a look at Figure 9 + and 10 in the paper. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + + """ + num_inference_steps = min(self.config.num_train_timesteps, num_inference_steps) + self.num_inference_steps = num_inference_steps + + timesteps = [] + + jumps = {} + for j in range(0, num_inference_steps - jump_length, jump_length): + jumps[j] = jump_n_sample - 1 + + t = num_inference_steps + while t >= 1: + t = t - 1 + timesteps.append(t) + + if jumps.get(t, 0) > 0: + jumps[t] = jumps[t] - 1 + for _ in range(jump_length): + t = t + 1 + timesteps.append(t) + + timesteps = np.array(timesteps) * (self.config.num_train_timesteps // self.num_inference_steps) + self.timesteps = torch.from_numpy(timesteps).to(device) + + def _get_variance(self, t): + prev_timestep = t - self.config.num_train_timesteps // self.num_inference_steps + + alpha_prod_t = self.alphas_cumprod[t] + alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod + beta_prod_t = 1 - alpha_prod_t + beta_prod_t_prev = 1 - alpha_prod_t_prev + + # For t > 0, compute predicted variance ฮฒt (see formula (6) and (7) from + # https://arxiv.org/pdf/2006.11239.pdf) and sample from it to get + # previous sample x_{t-1} ~ N(pred_prev_sample, variance) == add + # variance to pred_sample + # Is equivalent to formula (16) in https://arxiv.org/pdf/2010.02502.pdf + # without eta. + # variance = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * self.betas[t] + variance = (beta_prod_t_prev / beta_prod_t) * (1 - alpha_prod_t / alpha_prod_t_prev) + + return variance + + def step( + self, + model_output: torch.Tensor, + timestep: int, + sample: torch.Tensor, + original_image: torch.Tensor, + mask: torch.Tensor, + generator: Optional[torch.Generator] = None, + return_dict: bool = True, + ) -> Union[RePaintSchedulerOutput, Tuple]: + """ + Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion + process from the learned model outputs (most often the predicted noise). + + Args: + model_output (`torch.Tensor`): + The direct output from learned diffusion model. + timestep (`int`): + The current discrete timestep in the diffusion chain. + sample (`torch.Tensor`): + A current instance of a sample created by the diffusion process. + original_image (`torch.Tensor`): + The original image to inpaint on. + mask (`torch.Tensor`): + The mask where a value of 0.0 indicates which part of the original image to inpaint. + generator (`torch.Generator`, *optional*): + A random number generator. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~schedulers.scheduling_repaint.RePaintSchedulerOutput`] or `tuple`. + + Returns: + [`~schedulers.scheduling_repaint.RePaintSchedulerOutput`] or `tuple`: + If return_dict is `True`, [`~schedulers.scheduling_repaint.RePaintSchedulerOutput`] is returned, + otherwise a tuple is returned where the first element is the sample tensor. + + """ + t = timestep + prev_timestep = timestep - self.config.num_train_timesteps // self.num_inference_steps + + # 1. compute alphas, betas + alpha_prod_t = self.alphas_cumprod[t] + alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod + beta_prod_t = 1 - alpha_prod_t + + # 2. compute predicted original sample from predicted noise also called + # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf + pred_original_sample = (sample - beta_prod_t**0.5 * model_output) / alpha_prod_t**0.5 + + # 3. Clip "predicted x_0" + if self.config.clip_sample: + pred_original_sample = torch.clamp(pred_original_sample, -1, 1) + + # We choose to follow RePaint Algorithm 1 to get x_{t-1}, however we + # substitute formula (7) in the algorithm coming from DDPM paper + # (formula (4) Algorithm 2 - Sampling) with formula (12) from DDIM paper. + # DDIM schedule gives the same results as DDPM with eta = 1.0 + # Noise is being reused in 7. and 8., but no impact on quality has + # been observed. + + # 5. Add noise + device = model_output.device + noise = randn_tensor(model_output.shape, generator=generator, device=device, dtype=model_output.dtype) + std_dev_t = self.eta * self._get_variance(timestep) ** 0.5 + + variance = 0 + if t > 0 and self.eta > 0: + variance = std_dev_t * noise + + # 6. compute "direction pointing to x_t" of formula (12) + # from https://arxiv.org/pdf/2010.02502.pdf + pred_sample_direction = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output + + # 7. compute x_{t-1} of formula (12) from https://arxiv.org/pdf/2010.02502.pdf + prev_unknown_part = alpha_prod_t_prev**0.5 * pred_original_sample + pred_sample_direction + variance + + # 8. Algorithm 1 Line 5 https://arxiv.org/pdf/2201.09865.pdf + prev_known_part = (alpha_prod_t_prev**0.5) * original_image + ((1 - alpha_prod_t_prev) ** 0.5) * noise + + # 9. Algorithm 1 Line 8 https://arxiv.org/pdf/2201.09865.pdf + pred_prev_sample = mask * prev_known_part + (1.0 - mask) * prev_unknown_part + + if not return_dict: + return ( + pred_prev_sample, + pred_original_sample, + ) + + return RePaintSchedulerOutput(prev_sample=pred_prev_sample, pred_original_sample=pred_original_sample) + + def undo_step(self, sample, timestep, generator=None): + n = self.config.num_train_timesteps // self.num_inference_steps + + for i in range(n): + beta = self.betas[timestep + i] + if sample.device.type == "mps": + # randn does not work reproducibly on mps + noise = randn_tensor(sample.shape, dtype=sample.dtype, generator=generator) + noise = noise.to(sample.device) + else: + noise = randn_tensor(sample.shape, generator=generator, device=sample.device, dtype=sample.dtype) + + # 10. Algorithm 1 Line 10 https://arxiv.org/pdf/2201.09865.pdf + sample = (1 - beta) ** 0.5 * sample + beta**0.5 * noise + + return sample + + def add_noise( + self, + original_samples: torch.Tensor, + noise: torch.Tensor, + timesteps: torch.IntTensor, + ) -> torch.Tensor: + raise NotImplementedError("Use `DDPMScheduler.add_noise()` to train for sampling with RePaint.") + + def __len__(self): + return self.config.num_train_timesteps diff --git a/diffusers3/schedulers/scheduling_sasolver.py b/diffusers3/schedulers/scheduling_sasolver.py new file mode 100644 index 0000000000000000000000000000000000000000..50049a53080015e0be81e1512375fbe4b256cfd3 --- /dev/null +++ b/diffusers3/schedulers/scheduling_sasolver.py @@ -0,0 +1,1125 @@ +# Copyright 2024 Shuchen Xue, etc. in University of Chinese Academy of Sciences Team and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# DISCLAIMER: check https://arxiv.org/abs/2309.05019 +# The codebase is modified based on https://github.com/huggingface/diffusers/blob/main/src/diffusers/schedulers/scheduling_dpmsolver_multistep.py + +import math +from typing import Callable, List, Optional, Tuple, Union + +import numpy as np +import torch + +from ..configuration_utils import ConfigMixin, register_to_config +from ..utils import deprecate +from ..utils.torch_utils import randn_tensor +from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput + + +# Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar +def betas_for_alpha_bar( + num_diffusion_timesteps, + max_beta=0.999, + alpha_transform_type="cosine", +): + """ + Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of + (1-beta) over time from t = [0,1]. + + Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up + to that part of the diffusion process. + + + Args: + num_diffusion_timesteps (`int`): the number of betas to produce. + max_beta (`float`): the maximum beta to use; use values lower than 1 to + prevent singularities. + alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. + Choose from `cosine` or `exp` + + Returns: + betas (`np.ndarray`): the betas used by the scheduler to step the model outputs + """ + if alpha_transform_type == "cosine": + + def alpha_bar_fn(t): + return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 + + elif alpha_transform_type == "exp": + + def alpha_bar_fn(t): + return math.exp(t * -12.0) + + else: + raise ValueError(f"Unsupported alpha_transform_type: {alpha_transform_type}") + + betas = [] + for i in range(num_diffusion_timesteps): + t1 = i / num_diffusion_timesteps + t2 = (i + 1) / num_diffusion_timesteps + betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) + return torch.tensor(betas, dtype=torch.float32) + + +class SASolverScheduler(SchedulerMixin, ConfigMixin): + """ + `SASolverScheduler` is a fast dedicated high-order solver for diffusion SDEs. + + This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic + methods the library implements for all schedulers such as loading and saving. + + Args: + num_train_timesteps (`int`, defaults to 1000): + The number of diffusion steps to train the model. + beta_start (`float`, defaults to 0.0001): + The starting `beta` value of inference. + beta_end (`float`, defaults to 0.02): + The final `beta` value. + beta_schedule (`str`, defaults to `"linear"`): + The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from + `linear`, `scaled_linear`, or `squaredcos_cap_v2`. + trained_betas (`np.ndarray`, *optional*): + Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`. + predictor_order (`int`, defaults to 2): + The predictor order which can be `1` or `2` or `3` or '4'. It is recommended to use `predictor_order=2` for + guided sampling, and `predictor_order=3` for unconditional sampling. + corrector_order (`int`, defaults to 2): + The corrector order which can be `1` or `2` or `3` or '4'. It is recommended to use `corrector_order=2` for + guided sampling, and `corrector_order=3` for unconditional sampling. + prediction_type (`str`, defaults to `epsilon`, *optional*): + Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process), + `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen + Video](https://imagen.research.google/video/paper.pdf) paper). + tau_func (`Callable`, *optional*): + Stochasticity during the sampling. Default in init is `lambda t: 1 if t >= 200 and t <= 800 else 0`. + SA-Solver will sample from vanilla diffusion ODE if tau_func is set to `lambda t: 0`. SA-Solver will sample + from vanilla diffusion SDE if tau_func is set to `lambda t: 1`. For more details, please check + https://arxiv.org/abs/2309.05019 + thresholding (`bool`, defaults to `False`): + Whether to use the "dynamic thresholding" method. This is unsuitable for latent-space diffusion models such + as Stable Diffusion. + dynamic_thresholding_ratio (`float`, defaults to 0.995): + The ratio for the dynamic thresholding method. Valid only when `thresholding=True`. + sample_max_value (`float`, defaults to 1.0): + The threshold value for dynamic thresholding. Valid only when `thresholding=True` and + `algorithm_type="dpmsolver++"`. + algorithm_type (`str`, defaults to `data_prediction`): + Algorithm type for the solver; can be `data_prediction` or `noise_prediction`. It is recommended to use + `data_prediction` with `solver_order=2` for guided sampling like in Stable Diffusion. + lower_order_final (`bool`, defaults to `True`): + Whether to use lower-order solvers in the final steps. Default = True. + use_karras_sigmas (`bool`, *optional*, defaults to `False`): + Whether to use Karras sigmas for step sizes in the noise schedule during the sampling process. If `True`, + the sigmas are determined according to a sequence of noise levels {ฯƒi}. + lambda_min_clipped (`float`, defaults to `-inf`): + Clipping threshold for the minimum value of `lambda(t)` for numerical stability. This is critical for the + cosine (`squaredcos_cap_v2`) noise schedule. + variance_type (`str`, *optional*): + Set to "learned" or "learned_range" for diffusion models that predict variance. If set, the model's output + contains the predicted Gaussian variance. + timestep_spacing (`str`, defaults to `"linspace"`): + The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information. + steps_offset (`int`, defaults to 0): + An offset added to the inference steps, as required by some model families. + """ + + _compatibles = [e.name for e in KarrasDiffusionSchedulers] + order = 1 + + @register_to_config + def __init__( + self, + num_train_timesteps: int = 1000, + beta_start: float = 0.0001, + beta_end: float = 0.02, + beta_schedule: str = "linear", + trained_betas: Optional[Union[np.ndarray, List[float]]] = None, + predictor_order: int = 2, + corrector_order: int = 2, + prediction_type: str = "epsilon", + tau_func: Optional[Callable] = None, + thresholding: bool = False, + dynamic_thresholding_ratio: float = 0.995, + sample_max_value: float = 1.0, + algorithm_type: str = "data_prediction", + lower_order_final: bool = True, + use_karras_sigmas: Optional[bool] = False, + lambda_min_clipped: float = -float("inf"), + variance_type: Optional[str] = None, + timestep_spacing: str = "linspace", + steps_offset: int = 0, + ): + if trained_betas is not None: + self.betas = torch.tensor(trained_betas, dtype=torch.float32) + elif beta_schedule == "linear": + self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) + elif beta_schedule == "scaled_linear": + # this schedule is very specific to the latent diffusion model. + self.betas = ( + torch.linspace( + beta_start**0.5, + beta_end**0.5, + num_train_timesteps, + dtype=torch.float32, + ) + ** 2 + ) + elif beta_schedule == "squaredcos_cap_v2": + # Glide cosine schedule + self.betas = betas_for_alpha_bar(num_train_timesteps) + else: + raise NotImplementedError(f"{beta_schedule} is not implemented for {self.__class__}") + + self.alphas = 1.0 - self.betas + self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) + # Currently we only support VP-type noise schedule + self.alpha_t = torch.sqrt(self.alphas_cumprod) + self.sigma_t = torch.sqrt(1 - self.alphas_cumprod) + self.lambda_t = torch.log(self.alpha_t) - torch.log(self.sigma_t) + self.sigmas = ((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 + + # standard deviation of the initial noise distribution + self.init_noise_sigma = 1.0 + + if algorithm_type not in ["data_prediction", "noise_prediction"]: + raise NotImplementedError(f"{algorithm_type} is not implemented for {self.__class__}") + + # setable values + self.num_inference_steps = None + timesteps = np.linspace(0, num_train_timesteps - 1, num_train_timesteps, dtype=np.float32)[::-1].copy() + self.timesteps = torch.from_numpy(timesteps) + self.timestep_list = [None] * max(predictor_order, corrector_order - 1) + self.model_outputs = [None] * max(predictor_order, corrector_order - 1) + + if tau_func is None: + self.tau_func = lambda t: 1 if t >= 200 and t <= 800 else 0 + else: + self.tau_func = tau_func + self.predict_x0 = algorithm_type == "data_prediction" + self.lower_order_nums = 0 + self.last_sample = None + self._step_index = None + self._begin_index = None + self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication + + @property + def step_index(self): + """ + The index counter for current timestep. It will increase 1 after each scheduler step. + """ + return self._step_index + + @property + def begin_index(self): + """ + The index for the first timestep. It should be set from pipeline with `set_begin_index` method. + """ + return self._begin_index + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.set_begin_index + def set_begin_index(self, begin_index: int = 0): + """ + Sets the begin index for the scheduler. This function should be run from pipeline before the inference. + + Args: + begin_index (`int`): + The begin index for the scheduler. + """ + self._begin_index = begin_index + + def set_timesteps(self, num_inference_steps: int = None, device: Union[str, torch.device] = None): + """ + Sets the discrete timesteps used for the diffusion chain (to be run before inference). + + Args: + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + """ + # Clipping the minimum of all lambda(t) for numerical stability. + # This is critical for cosine (squaredcos_cap_v2) noise schedule. + clipped_idx = torch.searchsorted(torch.flip(self.lambda_t, [0]), self.config.lambda_min_clipped) + last_timestep = ((self.config.num_train_timesteps - clipped_idx).numpy()).item() + + # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 + if self.config.timestep_spacing == "linspace": + timesteps = ( + np.linspace(0, last_timestep - 1, num_inference_steps + 1).round()[::-1][:-1].copy().astype(np.int64) + ) + + elif self.config.timestep_spacing == "leading": + step_ratio = last_timestep // (num_inference_steps + 1) + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + timesteps = (np.arange(0, num_inference_steps + 1) * step_ratio).round()[::-1][:-1].copy().astype(np.int64) + timesteps += self.config.steps_offset + elif self.config.timestep_spacing == "trailing": + step_ratio = self.config.num_train_timesteps / num_inference_steps + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + timesteps = np.arange(last_timestep, 0, -step_ratio).round().copy().astype(np.int64) + timesteps -= 1 + else: + raise ValueError( + f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." + ) + + sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5) + if self.config.use_karras_sigmas: + log_sigmas = np.log(sigmas) + sigmas = np.flip(sigmas).copy() + sigmas = self._convert_to_karras(in_sigmas=sigmas, num_inference_steps=num_inference_steps) + timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas]).round() + sigmas = np.concatenate([sigmas, sigmas[-1:]]).astype(np.float32) + else: + sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas) + sigma_last = ((1 - self.alphas_cumprod[0]) / self.alphas_cumprod[0]) ** 0.5 + sigmas = np.concatenate([sigmas, [sigma_last]]).astype(np.float32) + + self.sigmas = torch.from_numpy(sigmas) + self.timesteps = torch.from_numpy(timesteps).to(device=device, dtype=torch.int64) + + self.num_inference_steps = len(timesteps) + self.model_outputs = [ + None, + ] * max(self.config.predictor_order, self.config.corrector_order - 1) + self.lower_order_nums = 0 + self.last_sample = None + + # add an index counter for schedulers that allow duplicated timesteps + self._step_index = None + self._begin_index = None + self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication + + # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample + def _threshold_sample(self, sample: torch.Tensor) -> torch.Tensor: + """ + "Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the + prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by + s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing + pixels from saturation at each step. We find that dynamic thresholding results in significantly better + photorealism as well as better image-text alignment, especially when using very large guidance weights." + + https://arxiv.org/abs/2205.11487 + """ + dtype = sample.dtype + batch_size, channels, *remaining_dims = sample.shape + + if dtype not in (torch.float32, torch.float64): + sample = sample.float() # upcast for quantile calculation, and clamp not implemented for cpu half + + # Flatten sample for doing quantile calculation along each image + sample = sample.reshape(batch_size, channels * np.prod(remaining_dims)) + + abs_sample = sample.abs() # "a certain percentile absolute pixel value" + + s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1) + s = torch.clamp( + s, min=1, max=self.config.sample_max_value + ) # When clamped to min=1, equivalent to standard clipping to [-1, 1] + s = s.unsqueeze(1) # (batch_size, 1) because clamp will broadcast along dim=0 + sample = torch.clamp(sample, -s, s) / s # "we threshold xt0 to the range [-s, s] and then divide by s" + + sample = sample.reshape(batch_size, channels, *remaining_dims) + sample = sample.to(dtype) + + return sample + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._sigma_to_t + def _sigma_to_t(self, sigma, log_sigmas): + # get log sigma + log_sigma = np.log(np.maximum(sigma, 1e-10)) + + # get distribution + dists = log_sigma - log_sigmas[:, np.newaxis] + + # get sigmas range + low_idx = np.cumsum((dists >= 0), axis=0).argmax(axis=0).clip(max=log_sigmas.shape[0] - 2) + high_idx = low_idx + 1 + + low = log_sigmas[low_idx] + high = log_sigmas[high_idx] + + # interpolate sigmas + w = (low - log_sigma) / (low - high) + w = np.clip(w, 0, 1) + + # transform interpolation to time range + t = (1 - w) * low_idx + w * high_idx + t = t.reshape(sigma.shape) + return t + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler._sigma_to_alpha_sigma_t + def _sigma_to_alpha_sigma_t(self, sigma): + alpha_t = 1 / ((sigma**2 + 1) ** 0.5) + sigma_t = sigma * alpha_t + + return alpha_t, sigma_t + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_karras + def _convert_to_karras(self, in_sigmas: torch.Tensor, num_inference_steps) -> torch.Tensor: + """Constructs the noise schedule of Karras et al. (2022).""" + + # Hack to make sure that other schedulers which copy this function don't break + # TODO: Add this logic to the other schedulers + if hasattr(self.config, "sigma_min"): + sigma_min = self.config.sigma_min + else: + sigma_min = None + + if hasattr(self.config, "sigma_max"): + sigma_max = self.config.sigma_max + else: + sigma_max = None + + sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item() + sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item() + + rho = 7.0 # 7.0 is the value used in the paper + ramp = np.linspace(0, 1, num_inference_steps) + min_inv_rho = sigma_min ** (1 / rho) + max_inv_rho = sigma_max ** (1 / rho) + sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho + return sigmas + + def convert_model_output( + self, + model_output: torch.Tensor, + *args, + sample: torch.Tensor = None, + **kwargs, + ) -> torch.Tensor: + """ + Convert the model output to the corresponding type the data_prediction/noise_prediction algorithm needs. + Noise_prediction is designed to discretize an integral of the noise prediction model, and data_prediction is + designed to discretize an integral of the data prediction model. + + + + The algorithm and model type are decoupled. You can use either data_prediction or noise_prediction for both + noise prediction and data prediction models. + + + + Args: + model_output (`torch.Tensor`): + The direct output from the learned diffusion model. + sample (`torch.Tensor`): + A current instance of a sample created by the diffusion process. + + Returns: + `torch.Tensor`: + The converted model output. + """ + timestep = args[0] if len(args) > 0 else kwargs.pop("timestep", None) + if sample is None: + if len(args) > 1: + sample = args[1] + else: + raise ValueError("missing `sample` as a required keyward argument") + if timestep is not None: + deprecate( + "timesteps", + "1.0.0", + "Passing `timesteps` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + + sigma = self.sigmas[self.step_index] + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma) + # SA-Solver_data_prediction needs to solve an integral of the data prediction model. + if self.config.algorithm_type in ["data_prediction"]: + if self.config.prediction_type == "epsilon": + # SA-Solver only needs the "mean" output. + if self.config.variance_type in ["learned", "learned_range"]: + model_output = model_output[:, :3] + x0_pred = (sample - sigma_t * model_output) / alpha_t + elif self.config.prediction_type == "sample": + x0_pred = model_output + elif self.config.prediction_type == "v_prediction": + x0_pred = alpha_t * sample - sigma_t * model_output + else: + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or" + " `v_prediction` for the SASolverScheduler." + ) + + if self.config.thresholding: + x0_pred = self._threshold_sample(x0_pred) + + return x0_pred + + # SA-Solver_noise_prediction needs to solve an integral of the noise prediction model. + elif self.config.algorithm_type in ["noise_prediction"]: + if self.config.prediction_type == "epsilon": + # SA-Solver only needs the "mean" output. + if self.config.variance_type in ["learned", "learned_range"]: + epsilon = model_output[:, :3] + else: + epsilon = model_output + elif self.config.prediction_type == "sample": + epsilon = (sample - alpha_t * model_output) / sigma_t + elif self.config.prediction_type == "v_prediction": + epsilon = alpha_t * model_output + sigma_t * sample + else: + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or" + " `v_prediction` for the SASolverScheduler." + ) + + if self.config.thresholding: + alpha_t, sigma_t = self.alpha_t[timestep], self.sigma_t[timestep] + x0_pred = (sample - sigma_t * epsilon) / alpha_t + x0_pred = self._threshold_sample(x0_pred) + epsilon = (sample - alpha_t * x0_pred) / sigma_t + + return epsilon + + def get_coefficients_exponential_negative(self, order, interval_start, interval_end): + """ + Calculate the integral of exp(-x) * x^order dx from interval_start to interval_end + """ + assert order in [0, 1, 2, 3], "order is only supported for 0, 1, 2 and 3" + + if order == 0: + return torch.exp(-interval_end) * (torch.exp(interval_end - interval_start) - 1) + elif order == 1: + return torch.exp(-interval_end) * ( + (interval_start + 1) * torch.exp(interval_end - interval_start) - (interval_end + 1) + ) + elif order == 2: + return torch.exp(-interval_end) * ( + (interval_start**2 + 2 * interval_start + 2) * torch.exp(interval_end - interval_start) + - (interval_end**2 + 2 * interval_end + 2) + ) + elif order == 3: + return torch.exp(-interval_end) * ( + (interval_start**3 + 3 * interval_start**2 + 6 * interval_start + 6) + * torch.exp(interval_end - interval_start) + - (interval_end**3 + 3 * interval_end**2 + 6 * interval_end + 6) + ) + + def get_coefficients_exponential_positive(self, order, interval_start, interval_end, tau): + """ + Calculate the integral of exp(x(1+tau^2)) * x^order dx from interval_start to interval_end + """ + assert order in [0, 1, 2, 3], "order is only supported for 0, 1, 2 and 3" + + # after change of variable(cov) + interval_end_cov = (1 + tau**2) * interval_end + interval_start_cov = (1 + tau**2) * interval_start + + if order == 0: + return ( + torch.exp(interval_end_cov) * (1 - torch.exp(-(interval_end_cov - interval_start_cov))) / (1 + tau**2) + ) + elif order == 1: + return ( + torch.exp(interval_end_cov) + * ( + (interval_end_cov - 1) + - (interval_start_cov - 1) * torch.exp(-(interval_end_cov - interval_start_cov)) + ) + / ((1 + tau**2) ** 2) + ) + elif order == 2: + return ( + torch.exp(interval_end_cov) + * ( + (interval_end_cov**2 - 2 * interval_end_cov + 2) + - (interval_start_cov**2 - 2 * interval_start_cov + 2) + * torch.exp(-(interval_end_cov - interval_start_cov)) + ) + / ((1 + tau**2) ** 3) + ) + elif order == 3: + return ( + torch.exp(interval_end_cov) + * ( + (interval_end_cov**3 - 3 * interval_end_cov**2 + 6 * interval_end_cov - 6) + - (interval_start_cov**3 - 3 * interval_start_cov**2 + 6 * interval_start_cov - 6) + * torch.exp(-(interval_end_cov - interval_start_cov)) + ) + / ((1 + tau**2) ** 4) + ) + + def lagrange_polynomial_coefficient(self, order, lambda_list): + """ + Calculate the coefficient of lagrange polynomial + """ + + assert order in [0, 1, 2, 3] + assert order == len(lambda_list) - 1 + if order == 0: + return [[1]] + elif order == 1: + return [ + [ + 1 / (lambda_list[0] - lambda_list[1]), + -lambda_list[1] / (lambda_list[0] - lambda_list[1]), + ], + [ + 1 / (lambda_list[1] - lambda_list[0]), + -lambda_list[0] / (lambda_list[1] - lambda_list[0]), + ], + ] + elif order == 2: + denominator1 = (lambda_list[0] - lambda_list[1]) * (lambda_list[0] - lambda_list[2]) + denominator2 = (lambda_list[1] - lambda_list[0]) * (lambda_list[1] - lambda_list[2]) + denominator3 = (lambda_list[2] - lambda_list[0]) * (lambda_list[2] - lambda_list[1]) + return [ + [ + 1 / denominator1, + (-lambda_list[1] - lambda_list[2]) / denominator1, + lambda_list[1] * lambda_list[2] / denominator1, + ], + [ + 1 / denominator2, + (-lambda_list[0] - lambda_list[2]) / denominator2, + lambda_list[0] * lambda_list[2] / denominator2, + ], + [ + 1 / denominator3, + (-lambda_list[0] - lambda_list[1]) / denominator3, + lambda_list[0] * lambda_list[1] / denominator3, + ], + ] + elif order == 3: + denominator1 = ( + (lambda_list[0] - lambda_list[1]) + * (lambda_list[0] - lambda_list[2]) + * (lambda_list[0] - lambda_list[3]) + ) + denominator2 = ( + (lambda_list[1] - lambda_list[0]) + * (lambda_list[1] - lambda_list[2]) + * (lambda_list[1] - lambda_list[3]) + ) + denominator3 = ( + (lambda_list[2] - lambda_list[0]) + * (lambda_list[2] - lambda_list[1]) + * (lambda_list[2] - lambda_list[3]) + ) + denominator4 = ( + (lambda_list[3] - lambda_list[0]) + * (lambda_list[3] - lambda_list[1]) + * (lambda_list[3] - lambda_list[2]) + ) + return [ + [ + 1 / denominator1, + (-lambda_list[1] - lambda_list[2] - lambda_list[3]) / denominator1, + ( + lambda_list[1] * lambda_list[2] + + lambda_list[1] * lambda_list[3] + + lambda_list[2] * lambda_list[3] + ) + / denominator1, + (-lambda_list[1] * lambda_list[2] * lambda_list[3]) / denominator1, + ], + [ + 1 / denominator2, + (-lambda_list[0] - lambda_list[2] - lambda_list[3]) / denominator2, + ( + lambda_list[0] * lambda_list[2] + + lambda_list[0] * lambda_list[3] + + lambda_list[2] * lambda_list[3] + ) + / denominator2, + (-lambda_list[0] * lambda_list[2] * lambda_list[3]) / denominator2, + ], + [ + 1 / denominator3, + (-lambda_list[0] - lambda_list[1] - lambda_list[3]) / denominator3, + ( + lambda_list[0] * lambda_list[1] + + lambda_list[0] * lambda_list[3] + + lambda_list[1] * lambda_list[3] + ) + / denominator3, + (-lambda_list[0] * lambda_list[1] * lambda_list[3]) / denominator3, + ], + [ + 1 / denominator4, + (-lambda_list[0] - lambda_list[1] - lambda_list[2]) / denominator4, + ( + lambda_list[0] * lambda_list[1] + + lambda_list[0] * lambda_list[2] + + lambda_list[1] * lambda_list[2] + ) + / denominator4, + (-lambda_list[0] * lambda_list[1] * lambda_list[2]) / denominator4, + ], + ] + + def get_coefficients_fn(self, order, interval_start, interval_end, lambda_list, tau): + assert order in [1, 2, 3, 4] + assert order == len(lambda_list), "the length of lambda list must be equal to the order" + coefficients = [] + lagrange_coefficient = self.lagrange_polynomial_coefficient(order - 1, lambda_list) + for i in range(order): + coefficient = 0 + for j in range(order): + if self.predict_x0: + coefficient += lagrange_coefficient[i][j] * self.get_coefficients_exponential_positive( + order - 1 - j, interval_start, interval_end, tau + ) + else: + coefficient += lagrange_coefficient[i][j] * self.get_coefficients_exponential_negative( + order - 1 - j, interval_start, interval_end + ) + coefficients.append(coefficient) + assert len(coefficients) == order, "the length of coefficients does not match the order" + return coefficients + + def stochastic_adams_bashforth_update( + self, + model_output: torch.Tensor, + *args, + sample: torch.Tensor, + noise: torch.Tensor, + order: int, + tau: torch.Tensor, + **kwargs, + ) -> torch.Tensor: + """ + One step for the SA-Predictor. + + Args: + model_output (`torch.Tensor`): + The direct output from the learned diffusion model at the current timestep. + prev_timestep (`int`): + The previous discrete timestep in the diffusion chain. + sample (`torch.Tensor`): + A current instance of a sample created by the diffusion process. + order (`int`): + The order of SA-Predictor at this timestep. + + Returns: + `torch.Tensor`: + The sample tensor at the previous timestep. + """ + prev_timestep = args[0] if len(args) > 0 else kwargs.pop("prev_timestep", None) + if sample is None: + if len(args) > 1: + sample = args[1] + else: + raise ValueError(" missing `sample` as a required keyward argument") + if noise is None: + if len(args) > 2: + noise = args[2] + else: + raise ValueError(" missing `noise` as a required keyward argument") + if order is None: + if len(args) > 3: + order = args[3] + else: + raise ValueError(" missing `order` as a required keyward argument") + if tau is None: + if len(args) > 4: + tau = args[4] + else: + raise ValueError(" missing `tau` as a required keyward argument") + if prev_timestep is not None: + deprecate( + "prev_timestep", + "1.0.0", + "Passing `prev_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + model_output_list = self.model_outputs + sigma_t, sigma_s0 = ( + self.sigmas[self.step_index + 1], + self.sigmas[self.step_index], + ) + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma_t) + alpha_s0, sigma_s0 = self._sigma_to_alpha_sigma_t(sigma_s0) + lambda_t = torch.log(alpha_t) - torch.log(sigma_t) + lambda_s0 = torch.log(alpha_s0) - torch.log(sigma_s0) + + gradient_part = torch.zeros_like(sample) + h = lambda_t - lambda_s0 + lambda_list = [] + + for i in range(order): + si = self.step_index - i + alpha_si, sigma_si = self._sigma_to_alpha_sigma_t(self.sigmas[si]) + lambda_si = torch.log(alpha_si) - torch.log(sigma_si) + lambda_list.append(lambda_si) + + gradient_coefficients = self.get_coefficients_fn(order, lambda_s0, lambda_t, lambda_list, tau) + + x = sample + + if self.predict_x0: + if ( + order == 2 + ): ## if order = 2 we do a modification that does not influence the convergence order similar to unipc. Note: This is used only for few steps sampling. + # The added term is O(h^3). Empirically we find it will slightly improve the image quality. + # ODE case + # gradient_coefficients[0] += 1.0 * torch.exp(lambda_t) * (h ** 2 / 2 - (h - 1 + torch.exp(-h))) / (ns.marginal_lambda(t_prev_list[-1]) - ns.marginal_lambda(t_prev_list[-2])) + # gradient_coefficients[1] -= 1.0 * torch.exp(lambda_t) * (h ** 2 / 2 - (h - 1 + torch.exp(-h))) / (ns.marginal_lambda(t_prev_list[-1]) - ns.marginal_lambda(t_prev_list[-2])) + temp_sigma = self.sigmas[self.step_index - 1] + temp_alpha_s, temp_sigma_s = self._sigma_to_alpha_sigma_t(temp_sigma) + temp_lambda_s = torch.log(temp_alpha_s) - torch.log(temp_sigma_s) + gradient_coefficients[0] += ( + 1.0 + * torch.exp((1 + tau**2) * lambda_t) + * (h**2 / 2 - (h * (1 + tau**2) - 1 + torch.exp((1 + tau**2) * (-h))) / ((1 + tau**2) ** 2)) + / (lambda_s0 - temp_lambda_s) + ) + gradient_coefficients[1] -= ( + 1.0 + * torch.exp((1 + tau**2) * lambda_t) + * (h**2 / 2 - (h * (1 + tau**2) - 1 + torch.exp((1 + tau**2) * (-h))) / ((1 + tau**2) ** 2)) + / (lambda_s0 - temp_lambda_s) + ) + + for i in range(order): + if self.predict_x0: + gradient_part += ( + (1 + tau**2) + * sigma_t + * torch.exp(-(tau**2) * lambda_t) + * gradient_coefficients[i] + * model_output_list[-(i + 1)] + ) + else: + gradient_part += -(1 + tau**2) * alpha_t * gradient_coefficients[i] * model_output_list[-(i + 1)] + + if self.predict_x0: + noise_part = sigma_t * torch.sqrt(1 - torch.exp(-2 * tau**2 * h)) * noise + else: + noise_part = tau * sigma_t * torch.sqrt(torch.exp(2 * h) - 1) * noise + + if self.predict_x0: + x_t = torch.exp(-(tau**2) * h) * (sigma_t / sigma_s0) * x + gradient_part + noise_part + else: + x_t = (alpha_t / alpha_s0) * x + gradient_part + noise_part + + x_t = x_t.to(x.dtype) + return x_t + + def stochastic_adams_moulton_update( + self, + this_model_output: torch.Tensor, + *args, + last_sample: torch.Tensor, + last_noise: torch.Tensor, + this_sample: torch.Tensor, + order: int, + tau: torch.Tensor, + **kwargs, + ) -> torch.Tensor: + """ + One step for the SA-Corrector. + + Args: + this_model_output (`torch.Tensor`): + The model outputs at `x_t`. + this_timestep (`int`): + The current timestep `t`. + last_sample (`torch.Tensor`): + The generated sample before the last predictor `x_{t-1}`. + this_sample (`torch.Tensor`): + The generated sample after the last predictor `x_{t}`. + order (`int`): + The order of SA-Corrector at this step. + + Returns: + `torch.Tensor`: + The corrected sample tensor at the current timestep. + """ + + this_timestep = args[0] if len(args) > 0 else kwargs.pop("this_timestep", None) + if last_sample is None: + if len(args) > 1: + last_sample = args[1] + else: + raise ValueError(" missing`last_sample` as a required keyward argument") + if last_noise is None: + if len(args) > 2: + last_noise = args[2] + else: + raise ValueError(" missing`last_noise` as a required keyward argument") + if this_sample is None: + if len(args) > 3: + this_sample = args[3] + else: + raise ValueError(" missing`this_sample` as a required keyward argument") + if order is None: + if len(args) > 4: + order = args[4] + else: + raise ValueError(" missing`order` as a required keyward argument") + if tau is None: + if len(args) > 5: + tau = args[5] + else: + raise ValueError(" missing`tau` as a required keyward argument") + if this_timestep is not None: + deprecate( + "this_timestep", + "1.0.0", + "Passing `this_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + + model_output_list = self.model_outputs + sigma_t, sigma_s0 = ( + self.sigmas[self.step_index], + self.sigmas[self.step_index - 1], + ) + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma_t) + alpha_s0, sigma_s0 = self._sigma_to_alpha_sigma_t(sigma_s0) + + lambda_t = torch.log(alpha_t) - torch.log(sigma_t) + lambda_s0 = torch.log(alpha_s0) - torch.log(sigma_s0) + gradient_part = torch.zeros_like(this_sample) + h = lambda_t - lambda_s0 + lambda_list = [] + for i in range(order): + si = self.step_index - i + alpha_si, sigma_si = self._sigma_to_alpha_sigma_t(self.sigmas[si]) + lambda_si = torch.log(alpha_si) - torch.log(sigma_si) + lambda_list.append(lambda_si) + + model_prev_list = model_output_list + [this_model_output] + + gradient_coefficients = self.get_coefficients_fn(order, lambda_s0, lambda_t, lambda_list, tau) + + x = last_sample + + if self.predict_x0: + if ( + order == 2 + ): ## if order = 2 we do a modification that does not influence the convergence order similar to UniPC. Note: This is used only for few steps sampling. + # The added term is O(h^3). Empirically we find it will slightly improve the image quality. + # ODE case + # gradient_coefficients[0] += 1.0 * torch.exp(lambda_t) * (h / 2 - (h - 1 + torch.exp(-h)) / h) + # gradient_coefficients[1] -= 1.0 * torch.exp(lambda_t) * (h / 2 - (h - 1 + torch.exp(-h)) / h) + gradient_coefficients[0] += ( + 1.0 + * torch.exp((1 + tau**2) * lambda_t) + * (h / 2 - (h * (1 + tau**2) - 1 + torch.exp((1 + tau**2) * (-h))) / ((1 + tau**2) ** 2 * h)) + ) + gradient_coefficients[1] -= ( + 1.0 + * torch.exp((1 + tau**2) * lambda_t) + * (h / 2 - (h * (1 + tau**2) - 1 + torch.exp((1 + tau**2) * (-h))) / ((1 + tau**2) ** 2 * h)) + ) + + for i in range(order): + if self.predict_x0: + gradient_part += ( + (1 + tau**2) + * sigma_t + * torch.exp(-(tau**2) * lambda_t) + * gradient_coefficients[i] + * model_prev_list[-(i + 1)] + ) + else: + gradient_part += -(1 + tau**2) * alpha_t * gradient_coefficients[i] * model_prev_list[-(i + 1)] + + if self.predict_x0: + noise_part = sigma_t * torch.sqrt(1 - torch.exp(-2 * tau**2 * h)) * last_noise + else: + noise_part = tau * sigma_t * torch.sqrt(torch.exp(2 * h) - 1) * last_noise + + if self.predict_x0: + x_t = torch.exp(-(tau**2) * h) * (sigma_t / sigma_s0) * x + gradient_part + noise_part + else: + x_t = (alpha_t / alpha_s0) * x + gradient_part + noise_part + + x_t = x_t.to(x.dtype) + return x_t + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.index_for_timestep + def index_for_timestep(self, timestep, schedule_timesteps=None): + if schedule_timesteps is None: + schedule_timesteps = self.timesteps + + index_candidates = (schedule_timesteps == timestep).nonzero() + + if len(index_candidates) == 0: + step_index = len(self.timesteps) - 1 + # The sigma index that is taken for the **very** first `step` + # is always the second index (or the last index if there is only 1) + # This way we can ensure we don't accidentally skip a sigma in + # case we start in the middle of the denoising schedule (e.g. for image-to-image) + elif len(index_candidates) > 1: + step_index = index_candidates[1].item() + else: + step_index = index_candidates[0].item() + + return step_index + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler._init_step_index + def _init_step_index(self, timestep): + """ + Initialize the step_index counter for the scheduler. + """ + + if self.begin_index is None: + if isinstance(timestep, torch.Tensor): + timestep = timestep.to(self.timesteps.device) + self._step_index = self.index_for_timestep(timestep) + else: + self._step_index = self._begin_index + + def step( + self, + model_output: torch.Tensor, + timestep: int, + sample: torch.Tensor, + generator=None, + return_dict: bool = True, + ) -> Union[SchedulerOutput, Tuple]: + """ + Predict the sample from the previous timestep by reversing the SDE. This function propagates the sample with + the SA-Solver. + + Args: + model_output (`torch.Tensor`): + The direct output from learned diffusion model. + timestep (`int`): + The current discrete timestep in the diffusion chain. + sample (`torch.Tensor`): + A current instance of a sample created by the diffusion process. + generator (`torch.Generator`, *optional*): + A random number generator. + return_dict (`bool`): + Whether or not to return a [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`. + + Returns: + [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`: + If return_dict is `True`, [`~schedulers.scheduling_utils.SchedulerOutput`] is returned, otherwise a + tuple is returned where the first element is the sample tensor. + + """ + if self.num_inference_steps is None: + raise ValueError( + "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" + ) + + if self.step_index is None: + self._init_step_index(timestep) + + use_corrector = self.step_index > 0 and self.last_sample is not None + + model_output_convert = self.convert_model_output(model_output, sample=sample) + + if use_corrector: + current_tau = self.tau_func(self.timestep_list[-1]) + sample = self.stochastic_adams_moulton_update( + this_model_output=model_output_convert, + last_sample=self.last_sample, + last_noise=self.last_noise, + this_sample=sample, + order=self.this_corrector_order, + tau=current_tau, + ) + + for i in range(max(self.config.predictor_order, self.config.corrector_order - 1) - 1): + self.model_outputs[i] = self.model_outputs[i + 1] + self.timestep_list[i] = self.timestep_list[i + 1] + + self.model_outputs[-1] = model_output_convert + self.timestep_list[-1] = timestep + + noise = randn_tensor( + model_output.shape, + generator=generator, + device=model_output.device, + dtype=model_output.dtype, + ) + + if self.config.lower_order_final: + this_predictor_order = min(self.config.predictor_order, len(self.timesteps) - self.step_index) + this_corrector_order = min(self.config.corrector_order, len(self.timesteps) - self.step_index + 1) + else: + this_predictor_order = self.config.predictor_order + this_corrector_order = self.config.corrector_order + + self.this_predictor_order = min(this_predictor_order, self.lower_order_nums + 1) # warmup for multistep + self.this_corrector_order = min(this_corrector_order, self.lower_order_nums + 2) # warmup for multistep + assert self.this_predictor_order > 0 + assert self.this_corrector_order > 0 + + self.last_sample = sample + self.last_noise = noise + + current_tau = self.tau_func(self.timestep_list[-1]) + prev_sample = self.stochastic_adams_bashforth_update( + model_output=model_output_convert, + sample=sample, + noise=noise, + order=self.this_predictor_order, + tau=current_tau, + ) + + if self.lower_order_nums < max(self.config.predictor_order, self.config.corrector_order - 1): + self.lower_order_nums += 1 + + # upon completion increase step index by one + self._step_index += 1 + + if not return_dict: + return (prev_sample,) + + return SchedulerOutput(prev_sample=prev_sample) + + def scale_model_input(self, sample: torch.Tensor, *args, **kwargs) -> torch.Tensor: + """ + Ensures interchangeability with schedulers that need to scale the denoising model input depending on the + current timestep. + + Args: + sample (`torch.Tensor`): + The input sample. + + Returns: + `torch.Tensor`: + A scaled input sample. + """ + return sample + + # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.add_noise + def add_noise( + self, + original_samples: torch.Tensor, + noise: torch.Tensor, + timesteps: torch.IntTensor, + ) -> torch.Tensor: + # Make sure alphas_cumprod and timestep have same device and dtype as original_samples + # Move the self.alphas_cumprod to device to avoid redundant CPU to GPU data movement + # for the subsequent add_noise calls + self.alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device) + alphas_cumprod = self.alphas_cumprod.to(dtype=original_samples.dtype) + timesteps = timesteps.to(original_samples.device) + + sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5 + sqrt_alpha_prod = sqrt_alpha_prod.flatten() + while len(sqrt_alpha_prod.shape) < len(original_samples.shape): + sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) + + sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5 + sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() + while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape): + sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) + + noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise + return noisy_samples + + def __len__(self): + return self.config.num_train_timesteps diff --git a/diffusers3/schedulers/scheduling_sde_ve.py b/diffusers3/schedulers/scheduling_sde_ve.py new file mode 100644 index 0000000000000000000000000000000000000000..cedfbf7d6ad5c48075eeb18b67aa3ce36edc54eb --- /dev/null +++ b/diffusers3/schedulers/scheduling_sde_ve.py @@ -0,0 +1,301 @@ +# Copyright 2024 Google Brain and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch + +import math +from dataclasses import dataclass +from typing import Optional, Tuple, Union + +import torch + +from ..configuration_utils import ConfigMixin, register_to_config +from ..utils import BaseOutput +from ..utils.torch_utils import randn_tensor +from .scheduling_utils import SchedulerMixin, SchedulerOutput + + +@dataclass +class SdeVeOutput(BaseOutput): + """ + Output class for the scheduler's `step` function output. + + Args: + prev_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images): + Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the + denoising loop. + prev_sample_mean (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images): + Mean averaged `prev_sample` over previous timesteps. + """ + + prev_sample: torch.Tensor + prev_sample_mean: torch.Tensor + + +class ScoreSdeVeScheduler(SchedulerMixin, ConfigMixin): + """ + `ScoreSdeVeScheduler` is a variance exploding stochastic differential equation (SDE) scheduler. + + This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic + methods the library implements for all schedulers such as loading and saving. + + Args: + num_train_timesteps (`int`, defaults to 1000): + The number of diffusion steps to train the model. + snr (`float`, defaults to 0.15): + A coefficient weighting the step from the `model_output` sample (from the network) to the random noise. + sigma_min (`float`, defaults to 0.01): + The initial noise scale for the sigma sequence in the sampling procedure. The minimum sigma should mirror + the distribution of the data. + sigma_max (`float`, defaults to 1348.0): + The maximum value used for the range of continuous timesteps passed into the model. + sampling_eps (`float`, defaults to 1e-5): + The end value of sampling where timesteps decrease progressively from 1 to epsilon. + correct_steps (`int`, defaults to 1): + The number of correction steps performed on a produced sample. + """ + + order = 1 + + @register_to_config + def __init__( + self, + num_train_timesteps: int = 2000, + snr: float = 0.15, + sigma_min: float = 0.01, + sigma_max: float = 1348.0, + sampling_eps: float = 1e-5, + correct_steps: int = 1, + ): + # standard deviation of the initial noise distribution + self.init_noise_sigma = sigma_max + + # setable values + self.timesteps = None + + self.set_sigmas(num_train_timesteps, sigma_min, sigma_max, sampling_eps) + + def scale_model_input(self, sample: torch.Tensor, timestep: Optional[int] = None) -> torch.Tensor: + """ + Ensures interchangeability with schedulers that need to scale the denoising model input depending on the + current timestep. + + Args: + sample (`torch.Tensor`): + The input sample. + timestep (`int`, *optional*): + The current timestep in the diffusion chain. + + Returns: + `torch.Tensor`: + A scaled input sample. + """ + return sample + + def set_timesteps( + self, num_inference_steps: int, sampling_eps: float = None, device: Union[str, torch.device] = None + ): + """ + Sets the continuous timesteps used for the diffusion chain (to be run before inference). + + Args: + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. + sampling_eps (`float`, *optional*): + The final timestep value (overrides value given during scheduler instantiation). + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + + """ + sampling_eps = sampling_eps if sampling_eps is not None else self.config.sampling_eps + + self.timesteps = torch.linspace(1, sampling_eps, num_inference_steps, device=device) + + def set_sigmas( + self, num_inference_steps: int, sigma_min: float = None, sigma_max: float = None, sampling_eps: float = None + ): + """ + Sets the noise scales used for the diffusion chain (to be run before inference). The sigmas control the weight + of the `drift` and `diffusion` components of the sample update. + + Args: + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. + sigma_min (`float`, optional): + The initial noise scale value (overrides value given during scheduler instantiation). + sigma_max (`float`, optional): + The final noise scale value (overrides value given during scheduler instantiation). + sampling_eps (`float`, optional): + The final timestep value (overrides value given during scheduler instantiation). + + """ + sigma_min = sigma_min if sigma_min is not None else self.config.sigma_min + sigma_max = sigma_max if sigma_max is not None else self.config.sigma_max + sampling_eps = sampling_eps if sampling_eps is not None else self.config.sampling_eps + if self.timesteps is None: + self.set_timesteps(num_inference_steps, sampling_eps) + + self.sigmas = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps) + self.discrete_sigmas = torch.exp(torch.linspace(math.log(sigma_min), math.log(sigma_max), num_inference_steps)) + self.sigmas = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps]) + + def get_adjacent_sigma(self, timesteps, t): + return torch.where( + timesteps == 0, + torch.zeros_like(t.to(timesteps.device)), + self.discrete_sigmas[timesteps - 1].to(timesteps.device), + ) + + def step_pred( + self, + model_output: torch.Tensor, + timestep: int, + sample: torch.Tensor, + generator: Optional[torch.Generator] = None, + return_dict: bool = True, + ) -> Union[SdeVeOutput, Tuple]: + """ + Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion + process from the learned model outputs (most often the predicted noise). + + Args: + model_output (`torch.Tensor`): + The direct output from learned diffusion model. + timestep (`int`): + The current discrete timestep in the diffusion chain. + sample (`torch.Tensor`): + A current instance of a sample created by the diffusion process. + generator (`torch.Generator`, *optional*): + A random number generator. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~schedulers.scheduling_sde_ve.SdeVeOutput`] or `tuple`. + + Returns: + [`~schedulers.scheduling_sde_ve.SdeVeOutput`] or `tuple`: + If return_dict is `True`, [`~schedulers.scheduling_sde_ve.SdeVeOutput`] is returned, otherwise a tuple + is returned where the first element is the sample tensor. + + """ + if self.timesteps is None: + raise ValueError( + "`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" + ) + + timestep = timestep * torch.ones( + sample.shape[0], device=sample.device + ) # torch.repeat_interleave(timestep, sample.shape[0]) + timesteps = (timestep * (len(self.timesteps) - 1)).long() + + # mps requires indices to be in the same device, so we use cpu as is the default with cuda + timesteps = timesteps.to(self.discrete_sigmas.device) + + sigma = self.discrete_sigmas[timesteps].to(sample.device) + adjacent_sigma = self.get_adjacent_sigma(timesteps, timestep).to(sample.device) + drift = torch.zeros_like(sample) + diffusion = (sigma**2 - adjacent_sigma**2) ** 0.5 + + # equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x) + # also equation 47 shows the analog from SDE models to ancestral sampling methods + diffusion = diffusion.flatten() + while len(diffusion.shape) < len(sample.shape): + diffusion = diffusion.unsqueeze(-1) + drift = drift - diffusion**2 * model_output + + # equation 6: sample noise for the diffusion term of + noise = randn_tensor( + sample.shape, layout=sample.layout, generator=generator, device=sample.device, dtype=sample.dtype + ) + prev_sample_mean = sample - drift # subtract because `dt` is a small negative timestep + # TODO is the variable diffusion the correct scaling term for the noise? + prev_sample = prev_sample_mean + diffusion * noise # add impact of diffusion field g + + if not return_dict: + return (prev_sample, prev_sample_mean) + + return SdeVeOutput(prev_sample=prev_sample, prev_sample_mean=prev_sample_mean) + + def step_correct( + self, + model_output: torch.Tensor, + sample: torch.Tensor, + generator: Optional[torch.Generator] = None, + return_dict: bool = True, + ) -> Union[SchedulerOutput, Tuple]: + """ + Correct the predicted sample based on the `model_output` of the network. This is often run repeatedly after + making the prediction for the previous timestep. + + Args: + model_output (`torch.Tensor`): + The direct output from learned diffusion model. + sample (`torch.Tensor`): + A current instance of a sample created by the diffusion process. + generator (`torch.Generator`, *optional*): + A random number generator. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~schedulers.scheduling_sde_ve.SdeVeOutput`] or `tuple`. + + Returns: + [`~schedulers.scheduling_sde_ve.SdeVeOutput`] or `tuple`: + If return_dict is `True`, [`~schedulers.scheduling_sde_ve.SdeVeOutput`] is returned, otherwise a tuple + is returned where the first element is the sample tensor. + + """ + if self.timesteps is None: + raise ValueError( + "`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" + ) + + # For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z" + # sample noise for correction + noise = randn_tensor(sample.shape, layout=sample.layout, generator=generator).to(sample.device) + + # compute step size from the model_output, the noise, and the snr + grad_norm = torch.norm(model_output.reshape(model_output.shape[0], -1), dim=-1).mean() + noise_norm = torch.norm(noise.reshape(noise.shape[0], -1), dim=-1).mean() + step_size = (self.config.snr * noise_norm / grad_norm) ** 2 * 2 + step_size = step_size * torch.ones(sample.shape[0]).to(sample.device) + # self.repeat_scalar(step_size, sample.shape[0]) + + # compute corrected sample: model_output term and noise term + step_size = step_size.flatten() + while len(step_size.shape) < len(sample.shape): + step_size = step_size.unsqueeze(-1) + prev_sample_mean = sample + step_size * model_output + prev_sample = prev_sample_mean + ((step_size * 2) ** 0.5) * noise + + if not return_dict: + return (prev_sample,) + + return SchedulerOutput(prev_sample=prev_sample) + + def add_noise( + self, + original_samples: torch.Tensor, + noise: torch.Tensor, + timesteps: torch.Tensor, + ) -> torch.Tensor: + # Make sure sigmas and timesteps have the same device and dtype as original_samples + timesteps = timesteps.to(original_samples.device) + sigmas = self.discrete_sigmas.to(original_samples.device)[timesteps] + noise = ( + noise * sigmas[:, None, None, None] + if noise is not None + else torch.randn_like(original_samples) * sigmas[:, None, None, None] + ) + noisy_samples = noise + original_samples + return noisy_samples + + def __len__(self): + return self.config.num_train_timesteps diff --git a/diffusers3/schedulers/scheduling_sde_ve_flax.py b/diffusers3/schedulers/scheduling_sde_ve_flax.py new file mode 100644 index 0000000000000000000000000000000000000000..0a8d45d4acbcfb6dd922aa86058aee8bb30893b5 --- /dev/null +++ b/diffusers3/schedulers/scheduling_sde_ve_flax.py @@ -0,0 +1,280 @@ +# Copyright 2024 Google Brain and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch + +from dataclasses import dataclass +from typing import Optional, Tuple, Union + +import flax +import jax +import jax.numpy as jnp +from jax import random + +from ..configuration_utils import ConfigMixin, register_to_config +from .scheduling_utils_flax import FlaxSchedulerMixin, FlaxSchedulerOutput, broadcast_to_shape_from_left + + +@flax.struct.dataclass +class ScoreSdeVeSchedulerState: + # setable values + timesteps: Optional[jnp.ndarray] = None + discrete_sigmas: Optional[jnp.ndarray] = None + sigmas: Optional[jnp.ndarray] = None + + @classmethod + def create(cls): + return cls() + + +@dataclass +class FlaxSdeVeOutput(FlaxSchedulerOutput): + """ + Output class for the ScoreSdeVeScheduler's step function output. + + Args: + state (`ScoreSdeVeSchedulerState`): + prev_sample (`jnp.ndarray` of shape `(batch_size, num_channels, height, width)` for images): + Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the + denoising loop. + prev_sample_mean (`jnp.ndarray` of shape `(batch_size, num_channels, height, width)` for images): + Mean averaged `prev_sample`. Same as `prev_sample`, only mean-averaged over previous timesteps. + """ + + state: ScoreSdeVeSchedulerState + prev_sample: jnp.ndarray + prev_sample_mean: Optional[jnp.ndarray] = None + + +class FlaxScoreSdeVeScheduler(FlaxSchedulerMixin, ConfigMixin): + """ + The variance exploding stochastic differential equation (SDE) scheduler. + + For more information, see the original paper: https://arxiv.org/abs/2011.13456 + + [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__` + function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`. + [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and + [`~SchedulerMixin.from_pretrained`] functions. + + Args: + num_train_timesteps (`int`): number of diffusion steps used to train the model. + snr (`float`): + coefficient weighting the step from the model_output sample (from the network) to the random noise. + sigma_min (`float`): + initial noise scale for sigma sequence in sampling procedure. The minimum sigma should mirror the + distribution of the data. + sigma_max (`float`): maximum value used for the range of continuous timesteps passed into the model. + sampling_eps (`float`): the end value of sampling, where timesteps decrease progressively from 1 to + epsilon. + correct_steps (`int`): number of correction steps performed on a produced sample. + """ + + @property + def has_state(self): + return True + + @register_to_config + def __init__( + self, + num_train_timesteps: int = 2000, + snr: float = 0.15, + sigma_min: float = 0.01, + sigma_max: float = 1348.0, + sampling_eps: float = 1e-5, + correct_steps: int = 1, + ): + pass + + def create_state(self): + state = ScoreSdeVeSchedulerState.create() + return self.set_sigmas( + state, + self.config.num_train_timesteps, + self.config.sigma_min, + self.config.sigma_max, + self.config.sampling_eps, + ) + + def set_timesteps( + self, state: ScoreSdeVeSchedulerState, num_inference_steps: int, shape: Tuple = (), sampling_eps: float = None + ) -> ScoreSdeVeSchedulerState: + """ + Sets the continuous timesteps used for the diffusion chain. Supporting function to be run before inference. + + Args: + state (`ScoreSdeVeSchedulerState`): the `FlaxScoreSdeVeScheduler` state data class instance. + num_inference_steps (`int`): + the number of diffusion steps used when generating samples with a pre-trained model. + sampling_eps (`float`, optional): + final timestep value (overrides value given at Scheduler instantiation). + + """ + sampling_eps = sampling_eps if sampling_eps is not None else self.config.sampling_eps + + timesteps = jnp.linspace(1, sampling_eps, num_inference_steps) + return state.replace(timesteps=timesteps) + + def set_sigmas( + self, + state: ScoreSdeVeSchedulerState, + num_inference_steps: int, + sigma_min: float = None, + sigma_max: float = None, + sampling_eps: float = None, + ) -> ScoreSdeVeSchedulerState: + """ + Sets the noise scales used for the diffusion chain. Supporting function to be run before inference. + + The sigmas control the weight of the `drift` and `diffusion` components of sample update. + + Args: + state (`ScoreSdeVeSchedulerState`): the `FlaxScoreSdeVeScheduler` state data class instance. + num_inference_steps (`int`): + the number of diffusion steps used when generating samples with a pre-trained model. + sigma_min (`float`, optional): + initial noise scale value (overrides value given at Scheduler instantiation). + sigma_max (`float`, optional): + final noise scale value (overrides value given at Scheduler instantiation). + sampling_eps (`float`, optional): + final timestep value (overrides value given at Scheduler instantiation). + """ + sigma_min = sigma_min if sigma_min is not None else self.config.sigma_min + sigma_max = sigma_max if sigma_max is not None else self.config.sigma_max + sampling_eps = sampling_eps if sampling_eps is not None else self.config.sampling_eps + if state.timesteps is None: + state = self.set_timesteps(state, num_inference_steps, sampling_eps) + + discrete_sigmas = jnp.exp(jnp.linspace(jnp.log(sigma_min), jnp.log(sigma_max), num_inference_steps)) + sigmas = jnp.array([sigma_min * (sigma_max / sigma_min) ** t for t in state.timesteps]) + + return state.replace(discrete_sigmas=discrete_sigmas, sigmas=sigmas) + + def get_adjacent_sigma(self, state, timesteps, t): + return jnp.where(timesteps == 0, jnp.zeros_like(t), state.discrete_sigmas[timesteps - 1]) + + def step_pred( + self, + state: ScoreSdeVeSchedulerState, + model_output: jnp.ndarray, + timestep: int, + sample: jnp.ndarray, + key: jax.Array, + return_dict: bool = True, + ) -> Union[FlaxSdeVeOutput, Tuple]: + """ + Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion + process from the learned model outputs (most often the predicted noise). + + Args: + state (`ScoreSdeVeSchedulerState`): the `FlaxScoreSdeVeScheduler` state data class instance. + model_output (`jnp.ndarray`): direct output from learned diffusion model. + timestep (`int`): current discrete timestep in the diffusion chain. + sample (`jnp.ndarray`): + current instance of sample being created by diffusion process. + generator: random number generator. + return_dict (`bool`): option for returning tuple rather than FlaxSdeVeOutput class + + Returns: + [`FlaxSdeVeOutput`] or `tuple`: [`FlaxSdeVeOutput`] if `return_dict` is True, otherwise a `tuple`. When + returning a tuple, the first element is the sample tensor. + + """ + if state.timesteps is None: + raise ValueError( + "`state.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" + ) + + timestep = timestep * jnp.ones( + sample.shape[0], + ) + timesteps = (timestep * (len(state.timesteps) - 1)).long() + + sigma = state.discrete_sigmas[timesteps] + adjacent_sigma = self.get_adjacent_sigma(state, timesteps, timestep) + drift = jnp.zeros_like(sample) + diffusion = (sigma**2 - adjacent_sigma**2) ** 0.5 + + # equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x) + # also equation 47 shows the analog from SDE models to ancestral sampling methods + diffusion = diffusion.flatten() + diffusion = broadcast_to_shape_from_left(diffusion, sample.shape) + drift = drift - diffusion**2 * model_output + + # equation 6: sample noise for the diffusion term of + key = random.split(key, num=1) + noise = random.normal(key=key, shape=sample.shape) + prev_sample_mean = sample - drift # subtract because `dt` is a small negative timestep + # TODO is the variable diffusion the correct scaling term for the noise? + prev_sample = prev_sample_mean + diffusion * noise # add impact of diffusion field g + + if not return_dict: + return (prev_sample, prev_sample_mean, state) + + return FlaxSdeVeOutput(prev_sample=prev_sample, prev_sample_mean=prev_sample_mean, state=state) + + def step_correct( + self, + state: ScoreSdeVeSchedulerState, + model_output: jnp.ndarray, + sample: jnp.ndarray, + key: jax.Array, + return_dict: bool = True, + ) -> Union[FlaxSdeVeOutput, Tuple]: + """ + Correct the predicted sample based on the output model_output of the network. This is often run repeatedly + after making the prediction for the previous timestep. + + Args: + state (`ScoreSdeVeSchedulerState`): the `FlaxScoreSdeVeScheduler` state data class instance. + model_output (`jnp.ndarray`): direct output from learned diffusion model. + sample (`jnp.ndarray`): + current instance of sample being created by diffusion process. + generator: random number generator. + return_dict (`bool`): option for returning tuple rather than FlaxSdeVeOutput class + + Returns: + [`FlaxSdeVeOutput`] or `tuple`: [`FlaxSdeVeOutput`] if `return_dict` is True, otherwise a `tuple`. When + returning a tuple, the first element is the sample tensor. + + """ + if state.timesteps is None: + raise ValueError( + "`state.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" + ) + + # For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z" + # sample noise for correction + key = random.split(key, num=1) + noise = random.normal(key=key, shape=sample.shape) + + # compute step size from the model_output, the noise, and the snr + grad_norm = jnp.linalg.norm(model_output) + noise_norm = jnp.linalg.norm(noise) + step_size = (self.config.snr * noise_norm / grad_norm) ** 2 * 2 + step_size = step_size * jnp.ones(sample.shape[0]) + + # compute corrected sample: model_output term and noise term + step_size = step_size.flatten() + step_size = broadcast_to_shape_from_left(step_size, sample.shape) + prev_sample_mean = sample + step_size * model_output + prev_sample = prev_sample_mean + ((step_size * 2) ** 0.5) * noise + + if not return_dict: + return (prev_sample, state) + + return FlaxSdeVeOutput(prev_sample=prev_sample, state=state) + + def __len__(self): + return self.config.num_train_timesteps diff --git a/diffusers3/schedulers/scheduling_tcd.py b/diffusers3/schedulers/scheduling_tcd.py new file mode 100644 index 0000000000000000000000000000000000000000..580224404c540abd31cb0300bb4848485b3a87c9 --- /dev/null +++ b/diffusers3/schedulers/scheduling_tcd.py @@ -0,0 +1,695 @@ +# Copyright 2024 Stanford University Team and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion +# and https://github.com/hojonathanho/diffusion + +import math +from dataclasses import dataclass +from typing import List, Optional, Tuple, Union + +import numpy as np +import torch + +from ..configuration_utils import ConfigMixin, register_to_config +from ..schedulers.scheduling_utils import SchedulerMixin +from ..utils import BaseOutput, logging +from ..utils.torch_utils import randn_tensor + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +@dataclass +class TCDSchedulerOutput(BaseOutput): + """ + Output class for the scheduler's `step` function output. + + Args: + prev_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images): + Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the + denoising loop. + pred_noised_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images): + The predicted noised sample `(x_{s})` based on the model output from the current timestep. + """ + + prev_sample: torch.Tensor + pred_noised_sample: Optional[torch.Tensor] = None + + +# Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar +def betas_for_alpha_bar( + num_diffusion_timesteps, + max_beta=0.999, + alpha_transform_type="cosine", +): + """ + Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of + (1-beta) over time from t = [0,1]. + + Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up + to that part of the diffusion process. + + + Args: + num_diffusion_timesteps (`int`): the number of betas to produce. + max_beta (`float`): the maximum beta to use; use values lower than 1 to + prevent singularities. + alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. + Choose from `cosine` or `exp` + + Returns: + betas (`np.ndarray`): the betas used by the scheduler to step the model outputs + """ + if alpha_transform_type == "cosine": + + def alpha_bar_fn(t): + return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 + + elif alpha_transform_type == "exp": + + def alpha_bar_fn(t): + return math.exp(t * -12.0) + + else: + raise ValueError(f"Unsupported alpha_transform_type: {alpha_transform_type}") + + betas = [] + for i in range(num_diffusion_timesteps): + t1 = i / num_diffusion_timesteps + t2 = (i + 1) / num_diffusion_timesteps + betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) + return torch.tensor(betas, dtype=torch.float32) + + +# Copied from diffusers.schedulers.scheduling_ddim.rescale_zero_terminal_snr +def rescale_zero_terminal_snr(betas: torch.Tensor) -> torch.Tensor: + """ + Rescales betas to have zero terminal SNR Based on https://arxiv.org/pdf/2305.08891.pdf (Algorithm 1) + + + Args: + betas (`torch.Tensor`): + the betas that the scheduler is being initialized with. + + Returns: + `torch.Tensor`: rescaled betas with zero terminal SNR + """ + # Convert betas to alphas_bar_sqrt + alphas = 1.0 - betas + alphas_cumprod = torch.cumprod(alphas, dim=0) + alphas_bar_sqrt = alphas_cumprod.sqrt() + + # Store old values. + alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone() + alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone() + + # Shift so the last timestep is zero. + alphas_bar_sqrt -= alphas_bar_sqrt_T + + # Scale so the first timestep is back to the old value. + alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T) + + # Convert alphas_bar_sqrt to betas + alphas_bar = alphas_bar_sqrt**2 # Revert sqrt + alphas = alphas_bar[1:] / alphas_bar[:-1] # Revert cumprod + alphas = torch.cat([alphas_bar[0:1], alphas]) + betas = 1 - alphas + + return betas + + +class TCDScheduler(SchedulerMixin, ConfigMixin): + """ + `TCDScheduler` incorporates the `Strategic Stochastic Sampling` introduced by the paper `Trajectory Consistency + Distillation`, extending the original Multistep Consistency Sampling to enable unrestricted trajectory traversal. + + This code is based on the official repo of TCD(https://github.com/jabir-zheng/TCD). + + This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. [`~ConfigMixin`] takes care of storing all config + attributes that are passed in the scheduler's `__init__` function, such as `num_train_timesteps`. They can be + accessed via `scheduler.config.num_train_timesteps`. [`SchedulerMixin`] provides general loading and saving + functionality via the [`SchedulerMixin.save_pretrained`] and [`~SchedulerMixin.from_pretrained`] functions. + + Args: + num_train_timesteps (`int`, defaults to 1000): + The number of diffusion steps to train the model. + beta_start (`float`, defaults to 0.0001): + The starting `beta` value of inference. + beta_end (`float`, defaults to 0.02): + The final `beta` value. + beta_schedule (`str`, defaults to `"linear"`): + The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from + `linear`, `scaled_linear`, or `squaredcos_cap_v2`. + trained_betas (`np.ndarray`, *optional*): + Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`. + original_inference_steps (`int`, *optional*, defaults to 50): + The default number of inference steps used to generate a linearly-spaced timestep schedule, from which we + will ultimately take `num_inference_steps` evenly spaced timesteps to form the final timestep schedule. + clip_sample (`bool`, defaults to `True`): + Clip the predicted sample for numerical stability. + clip_sample_range (`float`, defaults to 1.0): + The maximum magnitude for sample clipping. Valid only when `clip_sample=True`. + set_alpha_to_one (`bool`, defaults to `True`): + Each diffusion step uses the alphas product value at that step and at the previous one. For the final step + there is no previous alpha. When this option is `True` the previous alpha product is fixed to `1`, + otherwise it uses the alpha value at step 0. + steps_offset (`int`, defaults to 0): + An offset added to the inference steps, as required by some model families. + prediction_type (`str`, defaults to `epsilon`, *optional*): + Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process), + `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen + Video](https://imagen.research.google/video/paper.pdf) paper). + thresholding (`bool`, defaults to `False`): + Whether to use the "dynamic thresholding" method. This is unsuitable for latent-space diffusion models such + as Stable Diffusion. + dynamic_thresholding_ratio (`float`, defaults to 0.995): + The ratio for the dynamic thresholding method. Valid only when `thresholding=True`. + sample_max_value (`float`, defaults to 1.0): + The threshold value for dynamic thresholding. Valid only when `thresholding=True`. + timestep_spacing (`str`, defaults to `"leading"`): + The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information. + timestep_scaling (`float`, defaults to 10.0): + The factor the timesteps will be multiplied by when calculating the consistency model boundary conditions + `c_skip` and `c_out`. Increasing this will decrease the approximation error (although the approximation + error at the default of `10.0` is already pretty small). + rescale_betas_zero_snr (`bool`, defaults to `False`): + Whether to rescale the betas to have zero terminal SNR. This enables the model to generate very bright and + dark samples instead of limiting it to samples with medium brightness. Loosely related to + [`--offset_noise`](https://github.com/huggingface/diffusers/blob/74fd735eb073eb1d774b1ab4154a0876eb82f055/examples/dreambooth/train_dreambooth.py#L506). + """ + + order = 1 + + @register_to_config + def __init__( + self, + num_train_timesteps: int = 1000, + beta_start: float = 0.00085, + beta_end: float = 0.012, + beta_schedule: str = "scaled_linear", + trained_betas: Optional[Union[np.ndarray, List[float]]] = None, + original_inference_steps: int = 50, + clip_sample: bool = False, + clip_sample_range: float = 1.0, + set_alpha_to_one: bool = True, + steps_offset: int = 0, + prediction_type: str = "epsilon", + thresholding: bool = False, + dynamic_thresholding_ratio: float = 0.995, + sample_max_value: float = 1.0, + timestep_spacing: str = "leading", + timestep_scaling: float = 10.0, + rescale_betas_zero_snr: bool = False, + ): + if trained_betas is not None: + self.betas = torch.tensor(trained_betas, dtype=torch.float32) + elif beta_schedule == "linear": + self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) + elif beta_schedule == "scaled_linear": + # this schedule is very specific to the latent diffusion model. + self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2 + elif beta_schedule == "squaredcos_cap_v2": + # Glide cosine schedule + self.betas = betas_for_alpha_bar(num_train_timesteps) + else: + raise NotImplementedError(f"{beta_schedule} is not implemented for {self.__class__}") + + # Rescale for zero SNR + if rescale_betas_zero_snr: + self.betas = rescale_zero_terminal_snr(self.betas) + + self.alphas = 1.0 - self.betas + self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) + + # At every step in ddim, we are looking into the previous alphas_cumprod + # For the final step, there is no previous alphas_cumprod because we are already at 0 + # `set_alpha_to_one` decides whether we set this parameter simply to one or + # whether we use the final alpha of the "non-previous" one. + self.final_alpha_cumprod = torch.tensor(1.0) if set_alpha_to_one else self.alphas_cumprod[0] + + # standard deviation of the initial noise distribution + self.init_noise_sigma = 1.0 + + # setable values + self.num_inference_steps = None + self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy().astype(np.int64)) + self.custom_timesteps = False + + self._step_index = None + self._begin_index = None + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler.index_for_timestep + def index_for_timestep(self, timestep, schedule_timesteps=None): + if schedule_timesteps is None: + schedule_timesteps = self.timesteps + + indices = (schedule_timesteps == timestep).nonzero() + + # The sigma index that is taken for the **very** first `step` + # is always the second index (or the last index if there is only 1) + # This way we can ensure we don't accidentally skip a sigma in + # case we start in the middle of the denoising schedule (e.g. for image-to-image) + pos = 1 if len(indices) > 1 else 0 + + return indices[pos].item() + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._init_step_index + def _init_step_index(self, timestep): + if self.begin_index is None: + if isinstance(timestep, torch.Tensor): + timestep = timestep.to(self.timesteps.device) + self._step_index = self.index_for_timestep(timestep) + else: + self._step_index = self._begin_index + + @property + def step_index(self): + return self._step_index + + @property + def begin_index(self): + """ + The index for the first timestep. It should be set from pipeline with `set_begin_index` method. + """ + return self._begin_index + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.set_begin_index + def set_begin_index(self, begin_index: int = 0): + """ + Sets the begin index for the scheduler. This function should be run from pipeline before the inference. + + Args: + begin_index (`int`): + The begin index for the scheduler. + """ + self._begin_index = begin_index + + def scale_model_input(self, sample: torch.Tensor, timestep: Optional[int] = None) -> torch.Tensor: + """ + Ensures interchangeability with schedulers that need to scale the denoising model input depending on the + current timestep. + + Args: + sample (`torch.Tensor`): + The input sample. + timestep (`int`, *optional*): + The current timestep in the diffusion chain. + + Returns: + `torch.Tensor`: + A scaled input sample. + """ + return sample + + # Copied from diffusers.schedulers.scheduling_ddim.DDIMScheduler._get_variance + def _get_variance(self, timestep, prev_timestep): + alpha_prod_t = self.alphas_cumprod[timestep] + alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod + beta_prod_t = 1 - alpha_prod_t + beta_prod_t_prev = 1 - alpha_prod_t_prev + + variance = (beta_prod_t_prev / beta_prod_t) * (1 - alpha_prod_t / alpha_prod_t_prev) + + return variance + + # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample + def _threshold_sample(self, sample: torch.Tensor) -> torch.Tensor: + """ + "Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the + prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by + s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing + pixels from saturation at each step. We find that dynamic thresholding results in significantly better + photorealism as well as better image-text alignment, especially when using very large guidance weights." + + https://arxiv.org/abs/2205.11487 + """ + dtype = sample.dtype + batch_size, channels, *remaining_dims = sample.shape + + if dtype not in (torch.float32, torch.float64): + sample = sample.float() # upcast for quantile calculation, and clamp not implemented for cpu half + + # Flatten sample for doing quantile calculation along each image + sample = sample.reshape(batch_size, channels * np.prod(remaining_dims)) + + abs_sample = sample.abs() # "a certain percentile absolute pixel value" + + s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1) + s = torch.clamp( + s, min=1, max=self.config.sample_max_value + ) # When clamped to min=1, equivalent to standard clipping to [-1, 1] + s = s.unsqueeze(1) # (batch_size, 1) because clamp will broadcast along dim=0 + sample = torch.clamp(sample, -s, s) / s # "we threshold xt0 to the range [-s, s] and then divide by s" + + sample = sample.reshape(batch_size, channels, *remaining_dims) + sample = sample.to(dtype) + + return sample + + def set_timesteps( + self, + num_inference_steps: Optional[int] = None, + device: Union[str, torch.device] = None, + original_inference_steps: Optional[int] = None, + timesteps: Optional[List[int]] = None, + strength: float = 1.0, + ): + """ + Sets the discrete timesteps used for the diffusion chain (to be run before inference). + + Args: + num_inference_steps (`int`, *optional*): + The number of diffusion steps used when generating samples with a pre-trained model. If used, + `timesteps` must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + original_inference_steps (`int`, *optional*): + The original number of inference steps, which will be used to generate a linearly-spaced timestep + schedule (which is different from the standard `diffusers` implementation). We will then take + `num_inference_steps` timesteps from this schedule, evenly spaced in terms of indices, and use that as + our final timestep schedule. If not set, this will default to the `original_inference_steps` attribute. + timesteps (`List[int]`, *optional*): + Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default + timestep spacing strategy of equal spacing between timesteps on the training/distillation timestep + schedule is used. If `timesteps` is passed, `num_inference_steps` must be `None`. + strength (`float`, *optional*, defaults to 1.0): + Used to determine the number of timesteps used for inference when using img2img, inpaint, etc. + """ + # 0. Check inputs + if num_inference_steps is None and timesteps is None: + raise ValueError("Must pass exactly one of `num_inference_steps` or `custom_timesteps`.") + + if num_inference_steps is not None and timesteps is not None: + raise ValueError("Can only pass one of `num_inference_steps` or `custom_timesteps`.") + + # 1. Calculate the TCD original training/distillation timestep schedule. + original_steps = ( + original_inference_steps if original_inference_steps is not None else self.config.original_inference_steps + ) + + if original_inference_steps is None: + # default option, timesteps align with discrete inference steps + if original_steps > self.config.num_train_timesteps: + raise ValueError( + f"`original_steps`: {original_steps} cannot be larger than `self.config.train_timesteps`:" + f" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle" + f" maximal {self.config.num_train_timesteps} timesteps." + ) + # TCD Timesteps Setting + # The skipping step parameter k from the paper. + k = self.config.num_train_timesteps // original_steps + # TCD Training/Distillation Steps Schedule + tcd_origin_timesteps = np.asarray(list(range(1, int(original_steps * strength) + 1))) * k - 1 + else: + # customised option, sampled timesteps can be any arbitrary value + tcd_origin_timesteps = np.asarray(list(range(0, int(self.config.num_train_timesteps * strength)))) + + # 2. Calculate the TCD inference timestep schedule. + if timesteps is not None: + # 2.1 Handle custom timestep schedules. + train_timesteps = set(tcd_origin_timesteps) + non_train_timesteps = [] + for i in range(1, len(timesteps)): + if timesteps[i] >= timesteps[i - 1]: + raise ValueError("`custom_timesteps` must be in descending order.") + + if timesteps[i] not in train_timesteps: + non_train_timesteps.append(timesteps[i]) + + if timesteps[0] >= self.config.num_train_timesteps: + raise ValueError( + f"`timesteps` must start before `self.config.train_timesteps`:" + f" {self.config.num_train_timesteps}." + ) + + # Raise warning if timestep schedule does not start with self.config.num_train_timesteps - 1 + if strength == 1.0 and timesteps[0] != self.config.num_train_timesteps - 1: + logger.warning( + f"The first timestep on the custom timestep schedule is {timesteps[0]}, not" + f" `self.config.num_train_timesteps - 1`: {self.config.num_train_timesteps - 1}. You may get" + f" unexpected results when using this timestep schedule." + ) + + # Raise warning if custom timestep schedule contains timesteps not on original timestep schedule + if non_train_timesteps: + logger.warning( + f"The custom timestep schedule contains the following timesteps which are not on the original" + f" training/distillation timestep schedule: {non_train_timesteps}. You may get unexpected results" + f" when using this timestep schedule." + ) + + # Raise warning if custom timestep schedule is longer than original_steps + if original_steps is not None: + if len(timesteps) > original_steps: + logger.warning( + f"The number of timesteps in the custom timestep schedule is {len(timesteps)}, which exceeds the" + f" the length of the timestep schedule used for training: {original_steps}. You may get some" + f" unexpected results when using this timestep schedule." + ) + else: + if len(timesteps) > self.config.num_train_timesteps: + logger.warning( + f"The number of timesteps in the custom timestep schedule is {len(timesteps)}, which exceeds the" + f" the length of the timestep schedule used for training: {self.config.num_train_timesteps}. You may get some" + f" unexpected results when using this timestep schedule." + ) + + timesteps = np.array(timesteps, dtype=np.int64) + self.num_inference_steps = len(timesteps) + self.custom_timesteps = True + + # Apply strength (e.g. for img2img pipelines) (see StableDiffusionImg2ImgPipeline.get_timesteps) + init_timestep = min(int(self.num_inference_steps * strength), self.num_inference_steps) + t_start = max(self.num_inference_steps - init_timestep, 0) + timesteps = timesteps[t_start * self.order :] + # TODO: also reset self.num_inference_steps? + else: + # 2.2 Create the "standard" TCD inference timestep schedule. + if num_inference_steps > self.config.num_train_timesteps: + raise ValueError( + f"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:" + f" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle" + f" maximal {self.config.num_train_timesteps} timesteps." + ) + + if original_steps is not None: + skipping_step = len(tcd_origin_timesteps) // num_inference_steps + + if skipping_step < 1: + raise ValueError( + f"The combination of `original_steps x strength`: {original_steps} x {strength} is smaller than `num_inference_steps`: {num_inference_steps}. Make sure to either reduce `num_inference_steps` to a value smaller than {int(original_steps * strength)} or increase `strength` to a value higher than {float(num_inference_steps / original_steps)}." + ) + + self.num_inference_steps = num_inference_steps + + if original_steps is not None: + if num_inference_steps > original_steps: + raise ValueError( + f"`num_inference_steps`: {num_inference_steps} cannot be larger than `original_inference_steps`:" + f" {original_steps} because the final timestep schedule will be a subset of the" + f" `original_inference_steps`-sized initial timestep schedule." + ) + else: + if num_inference_steps > self.config.num_train_timesteps: + raise ValueError( + f"`num_inference_steps`: {num_inference_steps} cannot be larger than `num_train_timesteps`:" + f" {self.config.num_train_timesteps} because the final timestep schedule will be a subset of the" + f" `num_train_timesteps`-sized initial timestep schedule." + ) + + # TCD Inference Steps Schedule + tcd_origin_timesteps = tcd_origin_timesteps[::-1].copy() + # Select (approximately) evenly spaced indices from tcd_origin_timesteps. + inference_indices = np.linspace(0, len(tcd_origin_timesteps), num=num_inference_steps, endpoint=False) + inference_indices = np.floor(inference_indices).astype(np.int64) + timesteps = tcd_origin_timesteps[inference_indices] + + self.timesteps = torch.from_numpy(timesteps).to(device=device, dtype=torch.long) + + self._step_index = None + self._begin_index = None + + def step( + self, + model_output: torch.Tensor, + timestep: int, + sample: torch.Tensor, + eta: float = 0.3, + generator: Optional[torch.Generator] = None, + return_dict: bool = True, + ) -> Union[TCDSchedulerOutput, Tuple]: + """ + Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion + process from the learned model outputs (most often the predicted noise). + + Args: + model_output (`torch.Tensor`): + The direct output from learned diffusion model. + timestep (`int`): + The current discrete timestep in the diffusion chain. + sample (`torch.Tensor`): + A current instance of a sample created by the diffusion process. + eta (`float`): + A stochastic parameter (referred to as `gamma` in the paper) used to control the stochasticity in every + step. When eta = 0, it represents deterministic sampling, whereas eta = 1 indicates full stochastic + sampling. + generator (`torch.Generator`, *optional*): + A random number generator. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~schedulers.scheduling_tcd.TCDSchedulerOutput`] or `tuple`. + Returns: + [`~schedulers.scheduling_utils.TCDSchedulerOutput`] or `tuple`: + If return_dict is `True`, [`~schedulers.scheduling_tcd.TCDSchedulerOutput`] is returned, otherwise a + tuple is returned where the first element is the sample tensor. + """ + if self.num_inference_steps is None: + raise ValueError( + "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" + ) + + if self.step_index is None: + self._init_step_index(timestep) + + assert 0 <= eta <= 1.0, "gamma must be less than or equal to 1.0" + + # 1. get previous step value + prev_step_index = self.step_index + 1 + if prev_step_index < len(self.timesteps): + prev_timestep = self.timesteps[prev_step_index] + else: + prev_timestep = torch.tensor(0) + + timestep_s = torch.floor((1 - eta) * prev_timestep).to(dtype=torch.long) + + # 2. compute alphas, betas + alpha_prod_t = self.alphas_cumprod[timestep] + beta_prod_t = 1 - alpha_prod_t + + alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod + + alpha_prod_s = self.alphas_cumprod[timestep_s] + beta_prod_s = 1 - alpha_prod_s + + # 3. Compute the predicted noised sample x_s based on the model parameterization + if self.config.prediction_type == "epsilon": # noise-prediction + pred_original_sample = (sample - beta_prod_t.sqrt() * model_output) / alpha_prod_t.sqrt() + pred_epsilon = model_output + pred_noised_sample = alpha_prod_s.sqrt() * pred_original_sample + beta_prod_s.sqrt() * pred_epsilon + elif self.config.prediction_type == "sample": # x-prediction + pred_original_sample = model_output + pred_epsilon = (sample - alpha_prod_t ** (0.5) * pred_original_sample) / beta_prod_t ** (0.5) + pred_noised_sample = alpha_prod_s.sqrt() * pred_original_sample + beta_prod_s.sqrt() * pred_epsilon + elif self.config.prediction_type == "v_prediction": # v-prediction + pred_original_sample = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output + pred_epsilon = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample + pred_noised_sample = alpha_prod_s.sqrt() * pred_original_sample + beta_prod_s.sqrt() * pred_epsilon + else: + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` or" + " `v_prediction` for `TCDScheduler`." + ) + + # 4. Sample and inject noise z ~ N(0, I) for MultiStep Inference + # Noise is not used on the final timestep of the timestep schedule. + # This also means that noise is not used for one-step sampling. + # Eta (referred to as "gamma" in the paper) was introduced to control the stochasticity in every step. + # When eta = 0, it represents deterministic sampling, whereas eta = 1 indicates full stochastic sampling. + if eta > 0: + if self.step_index != self.num_inference_steps - 1: + noise = randn_tensor( + model_output.shape, generator=generator, device=model_output.device, dtype=pred_noised_sample.dtype + ) + prev_sample = (alpha_prod_t_prev / alpha_prod_s).sqrt() * pred_noised_sample + ( + 1 - alpha_prod_t_prev / alpha_prod_s + ).sqrt() * noise + else: + prev_sample = pred_noised_sample + else: + prev_sample = pred_noised_sample + + # upon completion increase step index by one + self._step_index += 1 + + if not return_dict: + return (prev_sample, pred_noised_sample) + + return TCDSchedulerOutput(prev_sample=prev_sample, pred_noised_sample=pred_noised_sample) + + # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.add_noise + def add_noise( + self, + original_samples: torch.Tensor, + noise: torch.Tensor, + timesteps: torch.IntTensor, + ) -> torch.Tensor: + # Make sure alphas_cumprod and timestep have same device and dtype as original_samples + # Move the self.alphas_cumprod to device to avoid redundant CPU to GPU data movement + # for the subsequent add_noise calls + self.alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device) + alphas_cumprod = self.alphas_cumprod.to(dtype=original_samples.dtype) + timesteps = timesteps.to(original_samples.device) + + sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5 + sqrt_alpha_prod = sqrt_alpha_prod.flatten() + while len(sqrt_alpha_prod.shape) < len(original_samples.shape): + sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) + + sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5 + sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() + while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape): + sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) + + noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise + return noisy_samples + + # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.get_velocity + def get_velocity(self, sample: torch.Tensor, noise: torch.Tensor, timesteps: torch.IntTensor) -> torch.Tensor: + # Make sure alphas_cumprod and timestep have same device and dtype as sample + self.alphas_cumprod = self.alphas_cumprod.to(device=sample.device) + alphas_cumprod = self.alphas_cumprod.to(dtype=sample.dtype) + timesteps = timesteps.to(sample.device) + + sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5 + sqrt_alpha_prod = sqrt_alpha_prod.flatten() + while len(sqrt_alpha_prod.shape) < len(sample.shape): + sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) + + sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5 + sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() + while len(sqrt_one_minus_alpha_prod.shape) < len(sample.shape): + sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) + + velocity = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample + return velocity + + def __len__(self): + return self.config.num_train_timesteps + + # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.previous_timestep + def previous_timestep(self, timestep): + if self.custom_timesteps: + index = (self.timesteps == timestep).nonzero(as_tuple=True)[0][0] + if index == self.timesteps.shape[0] - 1: + prev_t = torch.tensor(-1) + else: + prev_t = self.timesteps[index + 1] + else: + num_inference_steps = ( + self.num_inference_steps if self.num_inference_steps else self.config.num_train_timesteps + ) + prev_t = timestep - self.config.num_train_timesteps // num_inference_steps + + return prev_t diff --git a/diffusers3/schedulers/scheduling_unclip.py b/diffusers3/schedulers/scheduling_unclip.py new file mode 100644 index 0000000000000000000000000000000000000000..6e1580290f2274a36e89dd119bd4fd991cccf71f --- /dev/null +++ b/diffusers3/schedulers/scheduling_unclip.py @@ -0,0 +1,352 @@ +# Copyright 2024 Kakao Brain and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +from dataclasses import dataclass +from typing import Optional, Tuple, Union + +import numpy as np +import torch + +from ..configuration_utils import ConfigMixin, register_to_config +from ..utils import BaseOutput +from ..utils.torch_utils import randn_tensor +from .scheduling_utils import SchedulerMixin + + +@dataclass +# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP +class UnCLIPSchedulerOutput(BaseOutput): + """ + Output class for the scheduler's `step` function output. + + Args: + prev_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images): + Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the + denoising loop. + pred_original_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images): + The predicted denoised sample `(x_{0})` based on the model output from the current timestep. + `pred_original_sample` can be used to preview progress or for guidance. + """ + + prev_sample: torch.Tensor + pred_original_sample: Optional[torch.Tensor] = None + + +# Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar +def betas_for_alpha_bar( + num_diffusion_timesteps, + max_beta=0.999, + alpha_transform_type="cosine", +): + """ + Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of + (1-beta) over time from t = [0,1]. + + Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up + to that part of the diffusion process. + + + Args: + num_diffusion_timesteps (`int`): the number of betas to produce. + max_beta (`float`): the maximum beta to use; use values lower than 1 to + prevent singularities. + alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. + Choose from `cosine` or `exp` + + Returns: + betas (`np.ndarray`): the betas used by the scheduler to step the model outputs + """ + if alpha_transform_type == "cosine": + + def alpha_bar_fn(t): + return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 + + elif alpha_transform_type == "exp": + + def alpha_bar_fn(t): + return math.exp(t * -12.0) + + else: + raise ValueError(f"Unsupported alpha_transform_type: {alpha_transform_type}") + + betas = [] + for i in range(num_diffusion_timesteps): + t1 = i / num_diffusion_timesteps + t2 = (i + 1) / num_diffusion_timesteps + betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) + return torch.tensor(betas, dtype=torch.float32) + + +class UnCLIPScheduler(SchedulerMixin, ConfigMixin): + """ + NOTE: do not use this scheduler. The DDPM scheduler has been updated to support the changes made here. This + scheduler will be removed and replaced with DDPM. + + This is a modified DDPM Scheduler specifically for the karlo unCLIP model. + + This scheduler has some minor variations in how it calculates the learned range variance and dynamically + re-calculates betas based off the timesteps it is skipping. + + The scheduler also uses a slightly different step ratio when computing timesteps to use for inference. + + See [`~DDPMScheduler`] for more information on DDPM scheduling + + Args: + num_train_timesteps (`int`): number of diffusion steps used to train the model. + variance_type (`str`): + options to clip the variance used when adding noise to the denoised sample. Choose from `fixed_small_log` + or `learned_range`. + clip_sample (`bool`, default `True`): + option to clip predicted sample between `-clip_sample_range` and `clip_sample_range` for numerical + stability. + clip_sample_range (`float`, default `1.0`): + The range to clip the sample between. See `clip_sample`. + prediction_type (`str`, default `epsilon`, optional): + prediction type of the scheduler function, one of `epsilon` (predicting the noise of the diffusion process) + or `sample` (directly predicting the noisy sample`) + """ + + @register_to_config + def __init__( + self, + num_train_timesteps: int = 1000, + variance_type: str = "fixed_small_log", + clip_sample: bool = True, + clip_sample_range: Optional[float] = 1.0, + prediction_type: str = "epsilon", + beta_schedule: str = "squaredcos_cap_v2", + ): + if beta_schedule != "squaredcos_cap_v2": + raise ValueError("UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'") + + self.betas = betas_for_alpha_bar(num_train_timesteps) + + self.alphas = 1.0 - self.betas + self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) + self.one = torch.tensor(1.0) + + # standard deviation of the initial noise distribution + self.init_noise_sigma = 1.0 + + # setable values + self.num_inference_steps = None + self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy()) + + self.variance_type = variance_type + + def scale_model_input(self, sample: torch.Tensor, timestep: Optional[int] = None) -> torch.Tensor: + """ + Ensures interchangeability with schedulers that need to scale the denoising model input depending on the + current timestep. + + Args: + sample (`torch.Tensor`): input sample + timestep (`int`, optional): current timestep + + Returns: + `torch.Tensor`: scaled input sample + """ + return sample + + def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None): + """ + Sets the discrete timesteps used for the diffusion chain. Supporting function to be run before inference. + + Note that this scheduler uses a slightly different step ratio than the other diffusers schedulers. The + different step ratio is to mimic the original karlo implementation and does not affect the quality or accuracy + of the results. + + Args: + num_inference_steps (`int`): + the number of diffusion steps used when generating samples with a pre-trained model. + """ + self.num_inference_steps = num_inference_steps + step_ratio = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1) + timesteps = (np.arange(0, num_inference_steps) * step_ratio).round()[::-1].copy().astype(np.int64) + self.timesteps = torch.from_numpy(timesteps).to(device) + + def _get_variance(self, t, prev_timestep=None, predicted_variance=None, variance_type=None): + if prev_timestep is None: + prev_timestep = t - 1 + + alpha_prod_t = self.alphas_cumprod[t] + alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one + beta_prod_t = 1 - alpha_prod_t + beta_prod_t_prev = 1 - alpha_prod_t_prev + + if prev_timestep == t - 1: + beta = self.betas[t] + else: + beta = 1 - alpha_prod_t / alpha_prod_t_prev + + # For t > 0, compute predicted variance ฮฒt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) + # and sample from it to get previous sample + # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample + variance = beta_prod_t_prev / beta_prod_t * beta + + if variance_type is None: + variance_type = self.config.variance_type + + # hacks - were probably added for training stability + if variance_type == "fixed_small_log": + variance = torch.log(torch.clamp(variance, min=1e-20)) + variance = torch.exp(0.5 * variance) + elif variance_type == "learned_range": + # NOTE difference with DDPM scheduler + min_log = variance.log() + max_log = beta.log() + + frac = (predicted_variance + 1) / 2 + variance = frac * max_log + (1 - frac) * min_log + + return variance + + def step( + self, + model_output: torch.Tensor, + timestep: int, + sample: torch.Tensor, + prev_timestep: Optional[int] = None, + generator=None, + return_dict: bool = True, + ) -> Union[UnCLIPSchedulerOutput, Tuple]: + """ + Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion + process from the learned model outputs (most often the predicted noise). + + Args: + model_output (`torch.Tensor`): direct output from learned diffusion model. + timestep (`int`): current discrete timestep in the diffusion chain. + sample (`torch.Tensor`): + current instance of sample being created by diffusion process. + prev_timestep (`int`, *optional*): The previous timestep to predict the previous sample at. + Used to dynamically compute beta. If not given, `t-1` is used and the pre-computed beta is used. + generator: random number generator. + return_dict (`bool`): option for returning tuple rather than UnCLIPSchedulerOutput class + + Returns: + [`~schedulers.scheduling_utils.UnCLIPSchedulerOutput`] or `tuple`: + [`~schedulers.scheduling_utils.UnCLIPSchedulerOutput`] if `return_dict` is True, otherwise a `tuple`. When + returning a tuple, the first element is the sample tensor. + + """ + t = timestep + + if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range": + model_output, predicted_variance = torch.split(model_output, sample.shape[1], dim=1) + else: + predicted_variance = None + + # 1. compute alphas, betas + if prev_timestep is None: + prev_timestep = t - 1 + + alpha_prod_t = self.alphas_cumprod[t] + alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one + beta_prod_t = 1 - alpha_prod_t + beta_prod_t_prev = 1 - alpha_prod_t_prev + + if prev_timestep == t - 1: + beta = self.betas[t] + alpha = self.alphas[t] + else: + beta = 1 - alpha_prod_t / alpha_prod_t_prev + alpha = 1 - beta + + # 2. compute predicted original sample from predicted noise also called + # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf + if self.config.prediction_type == "epsilon": + pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5) + elif self.config.prediction_type == "sample": + pred_original_sample = model_output + else: + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`" + " for the UnCLIPScheduler." + ) + + # 3. Clip "predicted x_0" + if self.config.clip_sample: + pred_original_sample = torch.clamp( + pred_original_sample, -self.config.clip_sample_range, self.config.clip_sample_range + ) + + # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t + # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf + pred_original_sample_coeff = (alpha_prod_t_prev ** (0.5) * beta) / beta_prod_t + current_sample_coeff = alpha ** (0.5) * beta_prod_t_prev / beta_prod_t + + # 5. Compute predicted previous sample ยต_t + # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf + pred_prev_sample = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample + + # 6. Add noise + variance = 0 + if t > 0: + variance_noise = randn_tensor( + model_output.shape, dtype=model_output.dtype, generator=generator, device=model_output.device + ) + + variance = self._get_variance( + t, + predicted_variance=predicted_variance, + prev_timestep=prev_timestep, + ) + + if self.variance_type == "fixed_small_log": + variance = variance + elif self.variance_type == "learned_range": + variance = (0.5 * variance).exp() + else: + raise ValueError( + f"variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`" + " for the UnCLIPScheduler." + ) + + variance = variance * variance_noise + + pred_prev_sample = pred_prev_sample + variance + + if not return_dict: + return (pred_prev_sample,) + + return UnCLIPSchedulerOutput(prev_sample=pred_prev_sample, pred_original_sample=pred_original_sample) + + # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.add_noise + def add_noise( + self, + original_samples: torch.Tensor, + noise: torch.Tensor, + timesteps: torch.IntTensor, + ) -> torch.Tensor: + # Make sure alphas_cumprod and timestep have same device and dtype as original_samples + # Move the self.alphas_cumprod to device to avoid redundant CPU to GPU data movement + # for the subsequent add_noise calls + self.alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device) + alphas_cumprod = self.alphas_cumprod.to(dtype=original_samples.dtype) + timesteps = timesteps.to(original_samples.device) + + sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5 + sqrt_alpha_prod = sqrt_alpha_prod.flatten() + while len(sqrt_alpha_prod.shape) < len(original_samples.shape): + sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) + + sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5 + sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() + while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape): + sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) + + noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise + return noisy_samples diff --git a/diffusers3/schedulers/scheduling_unipc_multistep.py b/diffusers3/schedulers/scheduling_unipc_multistep.py new file mode 100644 index 0000000000000000000000000000000000000000..995f85c020eda5577ee40fdce498398cb3ad43d9 --- /dev/null +++ b/diffusers3/schedulers/scheduling_unipc_multistep.py @@ -0,0 +1,954 @@ +# Copyright 2024 TSAIL Team and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# DISCLAIMER: check https://arxiv.org/abs/2302.04867 and https://github.com/wl-zhao/UniPC for more info +# The codebase is modified based on https://github.com/huggingface/diffusers/blob/main/src/diffusers/schedulers/scheduling_dpmsolver_multistep.py + +import math +from typing import List, Optional, Tuple, Union + +import numpy as np +import torch + +from ..configuration_utils import ConfigMixin, register_to_config +from ..utils import deprecate +from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput + + +# Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar +def betas_for_alpha_bar( + num_diffusion_timesteps, + max_beta=0.999, + alpha_transform_type="cosine", +): + """ + Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of + (1-beta) over time from t = [0,1]. + + Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up + to that part of the diffusion process. + + + Args: + num_diffusion_timesteps (`int`): the number of betas to produce. + max_beta (`float`): the maximum beta to use; use values lower than 1 to + prevent singularities. + alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. + Choose from `cosine` or `exp` + + Returns: + betas (`np.ndarray`): the betas used by the scheduler to step the model outputs + """ + if alpha_transform_type == "cosine": + + def alpha_bar_fn(t): + return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 + + elif alpha_transform_type == "exp": + + def alpha_bar_fn(t): + return math.exp(t * -12.0) + + else: + raise ValueError(f"Unsupported alpha_transform_type: {alpha_transform_type}") + + betas = [] + for i in range(num_diffusion_timesteps): + t1 = i / num_diffusion_timesteps + t2 = (i + 1) / num_diffusion_timesteps + betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) + return torch.tensor(betas, dtype=torch.float32) + + +# Copied from diffusers.schedulers.scheduling_ddim.rescale_zero_terminal_snr +def rescale_zero_terminal_snr(betas): + """ + Rescales betas to have zero terminal SNR Based on https://arxiv.org/pdf/2305.08891.pdf (Algorithm 1) + + + Args: + betas (`torch.Tensor`): + the betas that the scheduler is being initialized with. + + Returns: + `torch.Tensor`: rescaled betas with zero terminal SNR + """ + # Convert betas to alphas_bar_sqrt + alphas = 1.0 - betas + alphas_cumprod = torch.cumprod(alphas, dim=0) + alphas_bar_sqrt = alphas_cumprod.sqrt() + + # Store old values. + alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone() + alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone() + + # Shift so the last timestep is zero. + alphas_bar_sqrt -= alphas_bar_sqrt_T + + # Scale so the first timestep is back to the old value. + alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T) + + # Convert alphas_bar_sqrt to betas + alphas_bar = alphas_bar_sqrt**2 # Revert sqrt + alphas = alphas_bar[1:] / alphas_bar[:-1] # Revert cumprod + alphas = torch.cat([alphas_bar[0:1], alphas]) + betas = 1 - alphas + + return betas + + +class UniPCMultistepScheduler(SchedulerMixin, ConfigMixin): + """ + `UniPCMultistepScheduler` is a training-free framework designed for the fast sampling of diffusion models. + + This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic + methods the library implements for all schedulers such as loading and saving. + + Args: + num_train_timesteps (`int`, defaults to 1000): + The number of diffusion steps to train the model. + beta_start (`float`, defaults to 0.0001): + The starting `beta` value of inference. + beta_end (`float`, defaults to 0.02): + The final `beta` value. + beta_schedule (`str`, defaults to `"linear"`): + The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from + `linear`, `scaled_linear`, or `squaredcos_cap_v2`. + trained_betas (`np.ndarray`, *optional*): + Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`. + solver_order (`int`, default `2`): + The UniPC order which can be any positive integer. The effective order of accuracy is `solver_order + 1` + due to the UniC. It is recommended to use `solver_order=2` for guided sampling, and `solver_order=3` for + unconditional sampling. + prediction_type (`str`, defaults to `epsilon`, *optional*): + Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process), + `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen + Video](https://imagen.research.google/video/paper.pdf) paper). + thresholding (`bool`, defaults to `False`): + Whether to use the "dynamic thresholding" method. This is unsuitable for latent-space diffusion models such + as Stable Diffusion. + dynamic_thresholding_ratio (`float`, defaults to 0.995): + The ratio for the dynamic thresholding method. Valid only when `thresholding=True`. + sample_max_value (`float`, defaults to 1.0): + The threshold value for dynamic thresholding. Valid only when `thresholding=True` and `predict_x0=True`. + predict_x0 (`bool`, defaults to `True`): + Whether to use the updating algorithm on the predicted x0. + solver_type (`str`, default `bh2`): + Solver type for UniPC. It is recommended to use `bh1` for unconditional sampling when steps < 10, and `bh2` + otherwise. + lower_order_final (`bool`, default `True`): + Whether to use lower-order solvers in the final steps. Only valid for < 15 inference steps. This can + stabilize the sampling of DPMSolver for steps < 15, especially for steps <= 10. + disable_corrector (`list`, default `[]`): + Decides which step to disable the corrector to mitigate the misalignment between `epsilon_theta(x_t, c)` + and `epsilon_theta(x_t^c, c)` which can influence convergence for a large guidance scale. Corrector is + usually disabled during the first few steps. + solver_p (`SchedulerMixin`, default `None`): + Any other scheduler that if specified, the algorithm becomes `solver_p + UniC`. + use_karras_sigmas (`bool`, *optional*, defaults to `False`): + Whether to use Karras sigmas for step sizes in the noise schedule during the sampling process. If `True`, + the sigmas are determined according to a sequence of noise levels {ฯƒi}. + timestep_spacing (`str`, defaults to `"linspace"`): + The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information. + steps_offset (`int`, defaults to 0): + An offset added to the inference steps, as required by some model families. + final_sigmas_type (`str`, defaults to `"zero"`): + The final `sigma` value for the noise schedule during the sampling process. If `"sigma_min"`, the final + sigma is the same as the last sigma in the training schedule. If `zero`, the final sigma is set to 0. + rescale_betas_zero_snr (`bool`, defaults to `False`): + Whether to rescale the betas to have zero terminal SNR. This enables the model to generate very bright and + dark samples instead of limiting it to samples with medium brightness. Loosely related to + [`--offset_noise`](https://github.com/huggingface/diffusers/blob/74fd735eb073eb1d774b1ab4154a0876eb82f055/examples/dreambooth/train_dreambooth.py#L506). + """ + + _compatibles = [e.name for e in KarrasDiffusionSchedulers] + order = 1 + + @register_to_config + def __init__( + self, + num_train_timesteps: int = 1000, + beta_start: float = 0.0001, + beta_end: float = 0.02, + beta_schedule: str = "linear", + trained_betas: Optional[Union[np.ndarray, List[float]]] = None, + solver_order: int = 2, + prediction_type: str = "epsilon", + thresholding: bool = False, + dynamic_thresholding_ratio: float = 0.995, + sample_max_value: float = 1.0, + predict_x0: bool = True, + solver_type: str = "bh2", + lower_order_final: bool = True, + disable_corrector: List[int] = [], + solver_p: SchedulerMixin = None, + use_karras_sigmas: Optional[bool] = False, + timestep_spacing: str = "linspace", + steps_offset: int = 0, + final_sigmas_type: Optional[str] = "zero", # "zero", "sigma_min" + rescale_betas_zero_snr: bool = False, + ): + if trained_betas is not None: + self.betas = torch.tensor(trained_betas, dtype=torch.float32) + elif beta_schedule == "linear": + self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) + elif beta_schedule == "scaled_linear": + # this schedule is very specific to the latent diffusion model. + self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2 + elif beta_schedule == "squaredcos_cap_v2": + # Glide cosine schedule + self.betas = betas_for_alpha_bar(num_train_timesteps) + else: + raise NotImplementedError(f"{beta_schedule} is not implemented for {self.__class__}") + + if rescale_betas_zero_snr: + self.betas = rescale_zero_terminal_snr(self.betas) + + self.alphas = 1.0 - self.betas + self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) + + if rescale_betas_zero_snr: + # Close to 0 without being 0 so first sigma is not inf + # FP16 smallest positive subnormal works well here + self.alphas_cumprod[-1] = 2**-24 + + # Currently we only support VP-type noise schedule + self.alpha_t = torch.sqrt(self.alphas_cumprod) + self.sigma_t = torch.sqrt(1 - self.alphas_cumprod) + self.lambda_t = torch.log(self.alpha_t) - torch.log(self.sigma_t) + self.sigmas = ((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 + + # standard deviation of the initial noise distribution + self.init_noise_sigma = 1.0 + + if solver_type not in ["bh1", "bh2"]: + if solver_type in ["midpoint", "heun", "logrho"]: + self.register_to_config(solver_type="bh2") + else: + raise NotImplementedError(f"{solver_type} is not implemented for {self.__class__}") + + self.predict_x0 = predict_x0 + # setable values + self.num_inference_steps = None + timesteps = np.linspace(0, num_train_timesteps - 1, num_train_timesteps, dtype=np.float32)[::-1].copy() + self.timesteps = torch.from_numpy(timesteps) + self.model_outputs = [None] * solver_order + self.timestep_list = [None] * solver_order + self.lower_order_nums = 0 + self.disable_corrector = disable_corrector + self.solver_p = solver_p + self.last_sample = None + self._step_index = None + self._begin_index = None + self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication + + @property + def step_index(self): + """ + The index counter for current timestep. It will increase 1 after each scheduler step. + """ + return self._step_index + + @property + def begin_index(self): + """ + The index for the first timestep. It should be set from pipeline with `set_begin_index` method. + """ + return self._begin_index + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.set_begin_index + def set_begin_index(self, begin_index: int = 0): + """ + Sets the begin index for the scheduler. This function should be run from pipeline before the inference. + + Args: + begin_index (`int`): + The begin index for the scheduler. + """ + self._begin_index = begin_index + + def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None): + """ + Sets the discrete timesteps used for the diffusion chain (to be run before inference). + + Args: + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + """ + # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 + if self.config.timestep_spacing == "linspace": + timesteps = ( + np.linspace(0, self.config.num_train_timesteps - 1, num_inference_steps + 1) + .round()[::-1][:-1] + .copy() + .astype(np.int64) + ) + elif self.config.timestep_spacing == "leading": + step_ratio = self.config.num_train_timesteps // (num_inference_steps + 1) + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + timesteps = (np.arange(0, num_inference_steps + 1) * step_ratio).round()[::-1][:-1].copy().astype(np.int64) + timesteps += self.config.steps_offset + elif self.config.timestep_spacing == "trailing": + step_ratio = self.config.num_train_timesteps / num_inference_steps + # creates integer timesteps by multiplying by ratio + # casting to int to avoid issues when num_inference_step is power of 3 + timesteps = np.arange(self.config.num_train_timesteps, 0, -step_ratio).round().copy().astype(np.int64) + timesteps -= 1 + else: + raise ValueError( + f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." + ) + + sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5) + if self.config.use_karras_sigmas: + log_sigmas = np.log(sigmas) + sigmas = np.flip(sigmas).copy() + sigmas = self._convert_to_karras(in_sigmas=sigmas, num_inference_steps=num_inference_steps) + timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas]).round() + if self.config.final_sigmas_type == "sigma_min": + sigma_last = sigmas[-1] + elif self.config.final_sigmas_type == "zero": + sigma_last = 0 + else: + raise ValueError( + f"`final_sigmas_type` must be one of 'zero', or 'sigma_min', but got {self.config.final_sigmas_type}" + ) + sigmas = np.concatenate([sigmas, [sigma_last]]).astype(np.float32) + else: + sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas) + if self.config.final_sigmas_type == "sigma_min": + sigma_last = ((1 - self.alphas_cumprod[0]) / self.alphas_cumprod[0]) ** 0.5 + elif self.config.final_sigmas_type == "zero": + sigma_last = 0 + else: + raise ValueError( + f"`final_sigmas_type` must be one of 'zero', or 'sigma_min', but got {self.config.final_sigmas_type}" + ) + sigmas = np.concatenate([sigmas, [sigma_last]]).astype(np.float32) + + self.sigmas = torch.from_numpy(sigmas) + self.timesteps = torch.from_numpy(timesteps).to(device=device, dtype=torch.int64) + + self.num_inference_steps = len(timesteps) + + self.model_outputs = [ + None, + ] * self.config.solver_order + self.lower_order_nums = 0 + self.last_sample = None + if self.solver_p: + self.solver_p.set_timesteps(self.num_inference_steps, device=device) + + # add an index counter for schedulers that allow duplicated timesteps + self._step_index = None + self._begin_index = None + self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication + + # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample + def _threshold_sample(self, sample: torch.Tensor) -> torch.Tensor: + """ + "Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the + prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by + s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing + pixels from saturation at each step. We find that dynamic thresholding results in significantly better + photorealism as well as better image-text alignment, especially when using very large guidance weights." + + https://arxiv.org/abs/2205.11487 + """ + dtype = sample.dtype + batch_size, channels, *remaining_dims = sample.shape + + if dtype not in (torch.float32, torch.float64): + sample = sample.float() # upcast for quantile calculation, and clamp not implemented for cpu half + + # Flatten sample for doing quantile calculation along each image + sample = sample.reshape(batch_size, channels * np.prod(remaining_dims)) + + abs_sample = sample.abs() # "a certain percentile absolute pixel value" + + s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1) + s = torch.clamp( + s, min=1, max=self.config.sample_max_value + ) # When clamped to min=1, equivalent to standard clipping to [-1, 1] + s = s.unsqueeze(1) # (batch_size, 1) because clamp will broadcast along dim=0 + sample = torch.clamp(sample, -s, s) / s # "we threshold xt0 to the range [-s, s] and then divide by s" + + sample = sample.reshape(batch_size, channels, *remaining_dims) + sample = sample.to(dtype) + + return sample + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._sigma_to_t + def _sigma_to_t(self, sigma, log_sigmas): + # get log sigma + log_sigma = np.log(np.maximum(sigma, 1e-10)) + + # get distribution + dists = log_sigma - log_sigmas[:, np.newaxis] + + # get sigmas range + low_idx = np.cumsum((dists >= 0), axis=0).argmax(axis=0).clip(max=log_sigmas.shape[0] - 2) + high_idx = low_idx + 1 + + low = log_sigmas[low_idx] + high = log_sigmas[high_idx] + + # interpolate sigmas + w = (low - log_sigma) / (low - high) + w = np.clip(w, 0, 1) + + # transform interpolation to time range + t = (1 - w) * low_idx + w * high_idx + t = t.reshape(sigma.shape) + return t + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler._sigma_to_alpha_sigma_t + def _sigma_to_alpha_sigma_t(self, sigma): + alpha_t = 1 / ((sigma**2 + 1) ** 0.5) + sigma_t = sigma * alpha_t + + return alpha_t, sigma_t + + # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_karras + def _convert_to_karras(self, in_sigmas: torch.Tensor, num_inference_steps) -> torch.Tensor: + """Constructs the noise schedule of Karras et al. (2022).""" + + # Hack to make sure that other schedulers which copy this function don't break + # TODO: Add this logic to the other schedulers + if hasattr(self.config, "sigma_min"): + sigma_min = self.config.sigma_min + else: + sigma_min = None + + if hasattr(self.config, "sigma_max"): + sigma_max = self.config.sigma_max + else: + sigma_max = None + + sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item() + sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item() + + rho = 7.0 # 7.0 is the value used in the paper + ramp = np.linspace(0, 1, num_inference_steps) + min_inv_rho = sigma_min ** (1 / rho) + max_inv_rho = sigma_max ** (1 / rho) + sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho + return sigmas + + def convert_model_output( + self, + model_output: torch.Tensor, + *args, + sample: torch.Tensor = None, + **kwargs, + ) -> torch.Tensor: + r""" + Convert the model output to the corresponding type the UniPC algorithm needs. + + Args: + model_output (`torch.Tensor`): + The direct output from the learned diffusion model. + timestep (`int`): + The current discrete timestep in the diffusion chain. + sample (`torch.Tensor`): + A current instance of a sample created by the diffusion process. + + Returns: + `torch.Tensor`: + The converted model output. + """ + timestep = args[0] if len(args) > 0 else kwargs.pop("timestep", None) + if sample is None: + if len(args) > 1: + sample = args[1] + else: + raise ValueError("missing `sample` as a required keyward argument") + if timestep is not None: + deprecate( + "timesteps", + "1.0.0", + "Passing `timesteps` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + + sigma = self.sigmas[self.step_index] + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma) + + if self.predict_x0: + if self.config.prediction_type == "epsilon": + x0_pred = (sample - sigma_t * model_output) / alpha_t + elif self.config.prediction_type == "sample": + x0_pred = model_output + elif self.config.prediction_type == "v_prediction": + x0_pred = alpha_t * sample - sigma_t * model_output + else: + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or" + " `v_prediction` for the UniPCMultistepScheduler." + ) + + if self.config.thresholding: + x0_pred = self._threshold_sample(x0_pred) + + return x0_pred + else: + if self.config.prediction_type == "epsilon": + return model_output + elif self.config.prediction_type == "sample": + epsilon = (sample - alpha_t * model_output) / sigma_t + return epsilon + elif self.config.prediction_type == "v_prediction": + epsilon = alpha_t * model_output + sigma_t * sample + return epsilon + else: + raise ValueError( + f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or" + " `v_prediction` for the UniPCMultistepScheduler." + ) + + def multistep_uni_p_bh_update( + self, + model_output: torch.Tensor, + *args, + sample: torch.Tensor = None, + order: int = None, + **kwargs, + ) -> torch.Tensor: + """ + One step for the UniP (B(h) version). Alternatively, `self.solver_p` is used if is specified. + + Args: + model_output (`torch.Tensor`): + The direct output from the learned diffusion model at the current timestep. + prev_timestep (`int`): + The previous discrete timestep in the diffusion chain. + sample (`torch.Tensor`): + A current instance of a sample created by the diffusion process. + order (`int`): + The order of UniP at this timestep (corresponds to the *p* in UniPC-p). + + Returns: + `torch.Tensor`: + The sample tensor at the previous timestep. + """ + prev_timestep = args[0] if len(args) > 0 else kwargs.pop("prev_timestep", None) + if sample is None: + if len(args) > 1: + sample = args[1] + else: + raise ValueError(" missing `sample` as a required keyward argument") + if order is None: + if len(args) > 2: + order = args[2] + else: + raise ValueError(" missing `order` as a required keyward argument") + if prev_timestep is not None: + deprecate( + "prev_timestep", + "1.0.0", + "Passing `prev_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + model_output_list = self.model_outputs + + s0 = self.timestep_list[-1] + m0 = model_output_list[-1] + x = sample + + if self.solver_p: + x_t = self.solver_p.step(model_output, s0, x).prev_sample + return x_t + + sigma_t, sigma_s0 = self.sigmas[self.step_index + 1], self.sigmas[self.step_index] + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma_t) + alpha_s0, sigma_s0 = self._sigma_to_alpha_sigma_t(sigma_s0) + + lambda_t = torch.log(alpha_t) - torch.log(sigma_t) + lambda_s0 = torch.log(alpha_s0) - torch.log(sigma_s0) + + h = lambda_t - lambda_s0 + device = sample.device + + rks = [] + D1s = [] + for i in range(1, order): + si = self.step_index - i + mi = model_output_list[-(i + 1)] + alpha_si, sigma_si = self._sigma_to_alpha_sigma_t(self.sigmas[si]) + lambda_si = torch.log(alpha_si) - torch.log(sigma_si) + rk = (lambda_si - lambda_s0) / h + rks.append(rk) + D1s.append((mi - m0) / rk) + + rks.append(1.0) + rks = torch.tensor(rks, device=device) + + R = [] + b = [] + + hh = -h if self.predict_x0 else h + h_phi_1 = torch.expm1(hh) # h\phi_1(h) = e^h - 1 + h_phi_k = h_phi_1 / hh - 1 + + factorial_i = 1 + + if self.config.solver_type == "bh1": + B_h = hh + elif self.config.solver_type == "bh2": + B_h = torch.expm1(hh) + else: + raise NotImplementedError() + + for i in range(1, order + 1): + R.append(torch.pow(rks, i - 1)) + b.append(h_phi_k * factorial_i / B_h) + factorial_i *= i + 1 + h_phi_k = h_phi_k / hh - 1 / factorial_i + + R = torch.stack(R) + b = torch.tensor(b, device=device) + + if len(D1s) > 0: + D1s = torch.stack(D1s, dim=1) # (B, K) + # for order 2, we use a simplified version + if order == 2: + rhos_p = torch.tensor([0.5], dtype=x.dtype, device=device) + else: + rhos_p = torch.linalg.solve(R[:-1, :-1], b[:-1]).to(device).to(x.dtype) + else: + D1s = None + + if self.predict_x0: + x_t_ = sigma_t / sigma_s0 * x - alpha_t * h_phi_1 * m0 + if D1s is not None: + pred_res = torch.einsum("k,bkc...->bc...", rhos_p, D1s) + else: + pred_res = 0 + x_t = x_t_ - alpha_t * B_h * pred_res + else: + x_t_ = alpha_t / alpha_s0 * x - sigma_t * h_phi_1 * m0 + if D1s is not None: + pred_res = torch.einsum("k,bkc...->bc...", rhos_p, D1s) + else: + pred_res = 0 + x_t = x_t_ - sigma_t * B_h * pred_res + + x_t = x_t.to(x.dtype) + return x_t + + def multistep_uni_c_bh_update( + self, + this_model_output: torch.Tensor, + *args, + last_sample: torch.Tensor = None, + this_sample: torch.Tensor = None, + order: int = None, + **kwargs, + ) -> torch.Tensor: + """ + One step for the UniC (B(h) version). + + Args: + this_model_output (`torch.Tensor`): + The model outputs at `x_t`. + this_timestep (`int`): + The current timestep `t`. + last_sample (`torch.Tensor`): + The generated sample before the last predictor `x_{t-1}`. + this_sample (`torch.Tensor`): + The generated sample after the last predictor `x_{t}`. + order (`int`): + The `p` of UniC-p at this step. The effective order of accuracy should be `order + 1`. + + Returns: + `torch.Tensor`: + The corrected sample tensor at the current timestep. + """ + this_timestep = args[0] if len(args) > 0 else kwargs.pop("this_timestep", None) + if last_sample is None: + if len(args) > 1: + last_sample = args[1] + else: + raise ValueError(" missing`last_sample` as a required keyward argument") + if this_sample is None: + if len(args) > 2: + this_sample = args[2] + else: + raise ValueError(" missing`this_sample` as a required keyward argument") + if order is None: + if len(args) > 3: + order = args[3] + else: + raise ValueError(" missing`order` as a required keyward argument") + if this_timestep is not None: + deprecate( + "this_timestep", + "1.0.0", + "Passing `this_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", + ) + + model_output_list = self.model_outputs + + m0 = model_output_list[-1] + x = last_sample + x_t = this_sample + model_t = this_model_output + + sigma_t, sigma_s0 = self.sigmas[self.step_index], self.sigmas[self.step_index - 1] + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma_t) + alpha_s0, sigma_s0 = self._sigma_to_alpha_sigma_t(sigma_s0) + + lambda_t = torch.log(alpha_t) - torch.log(sigma_t) + lambda_s0 = torch.log(alpha_s0) - torch.log(sigma_s0) + + h = lambda_t - lambda_s0 + device = this_sample.device + + rks = [] + D1s = [] + for i in range(1, order): + si = self.step_index - (i + 1) + mi = model_output_list[-(i + 1)] + alpha_si, sigma_si = self._sigma_to_alpha_sigma_t(self.sigmas[si]) + lambda_si = torch.log(alpha_si) - torch.log(sigma_si) + rk = (lambda_si - lambda_s0) / h + rks.append(rk) + D1s.append((mi - m0) / rk) + + rks.append(1.0) + rks = torch.tensor(rks, device=device) + + R = [] + b = [] + + hh = -h if self.predict_x0 else h + h_phi_1 = torch.expm1(hh) # h\phi_1(h) = e^h - 1 + h_phi_k = h_phi_1 / hh - 1 + + factorial_i = 1 + + if self.config.solver_type == "bh1": + B_h = hh + elif self.config.solver_type == "bh2": + B_h = torch.expm1(hh) + else: + raise NotImplementedError() + + for i in range(1, order + 1): + R.append(torch.pow(rks, i - 1)) + b.append(h_phi_k * factorial_i / B_h) + factorial_i *= i + 1 + h_phi_k = h_phi_k / hh - 1 / factorial_i + + R = torch.stack(R) + b = torch.tensor(b, device=device) + + if len(D1s) > 0: + D1s = torch.stack(D1s, dim=1) + else: + D1s = None + + # for order 1, we use a simplified version + if order == 1: + rhos_c = torch.tensor([0.5], dtype=x.dtype, device=device) + else: + rhos_c = torch.linalg.solve(R, b).to(device).to(x.dtype) + + if self.predict_x0: + x_t_ = sigma_t / sigma_s0 * x - alpha_t * h_phi_1 * m0 + if D1s is not None: + corr_res = torch.einsum("k,bkc...->bc...", rhos_c[:-1], D1s) + else: + corr_res = 0 + D1_t = model_t - m0 + x_t = x_t_ - alpha_t * B_h * (corr_res + rhos_c[-1] * D1_t) + else: + x_t_ = alpha_t / alpha_s0 * x - sigma_t * h_phi_1 * m0 + if D1s is not None: + corr_res = torch.einsum("k,bkc...->bc...", rhos_c[:-1], D1s) + else: + corr_res = 0 + D1_t = model_t - m0 + x_t = x_t_ - sigma_t * B_h * (corr_res + rhos_c[-1] * D1_t) + x_t = x_t.to(x.dtype) + return x_t + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.index_for_timestep + def index_for_timestep(self, timestep, schedule_timesteps=None): + if schedule_timesteps is None: + schedule_timesteps = self.timesteps + + index_candidates = (schedule_timesteps == timestep).nonzero() + + if len(index_candidates) == 0: + step_index = len(self.timesteps) - 1 + # The sigma index that is taken for the **very** first `step` + # is always the second index (or the last index if there is only 1) + # This way we can ensure we don't accidentally skip a sigma in + # case we start in the middle of the denoising schedule (e.g. for image-to-image) + elif len(index_candidates) > 1: + step_index = index_candidates[1].item() + else: + step_index = index_candidates[0].item() + + return step_index + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler._init_step_index + def _init_step_index(self, timestep): + """ + Initialize the step_index counter for the scheduler. + """ + + if self.begin_index is None: + if isinstance(timestep, torch.Tensor): + timestep = timestep.to(self.timesteps.device) + self._step_index = self.index_for_timestep(timestep) + else: + self._step_index = self._begin_index + + def step( + self, + model_output: torch.Tensor, + timestep: Union[int, torch.Tensor], + sample: torch.Tensor, + return_dict: bool = True, + ) -> Union[SchedulerOutput, Tuple]: + """ + Predict the sample from the previous timestep by reversing the SDE. This function propagates the sample with + the multistep UniPC. + + Args: + model_output (`torch.Tensor`): + The direct output from learned diffusion model. + timestep (`int`): + The current discrete timestep in the diffusion chain. + sample (`torch.Tensor`): + A current instance of a sample created by the diffusion process. + return_dict (`bool`): + Whether or not to return a [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`. + + Returns: + [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`: + If return_dict is `True`, [`~schedulers.scheduling_utils.SchedulerOutput`] is returned, otherwise a + tuple is returned where the first element is the sample tensor. + + """ + if self.num_inference_steps is None: + raise ValueError( + "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" + ) + + if self.step_index is None: + self._init_step_index(timestep) + + use_corrector = ( + self.step_index > 0 and self.step_index - 1 not in self.disable_corrector and self.last_sample is not None + ) + + model_output_convert = self.convert_model_output(model_output, sample=sample) + if use_corrector: + sample = self.multistep_uni_c_bh_update( + this_model_output=model_output_convert, + last_sample=self.last_sample, + this_sample=sample, + order=self.this_order, + ) + + for i in range(self.config.solver_order - 1): + self.model_outputs[i] = self.model_outputs[i + 1] + self.timestep_list[i] = self.timestep_list[i + 1] + + self.model_outputs[-1] = model_output_convert + self.timestep_list[-1] = timestep + + if self.config.lower_order_final: + this_order = min(self.config.solver_order, len(self.timesteps) - self.step_index) + else: + this_order = self.config.solver_order + + self.this_order = min(this_order, self.lower_order_nums + 1) # warmup for multistep + assert self.this_order > 0 + + self.last_sample = sample + prev_sample = self.multistep_uni_p_bh_update( + model_output=model_output, # pass the original non-converted model output, in case solver-p is used + sample=sample, + order=self.this_order, + ) + + if self.lower_order_nums < self.config.solver_order: + self.lower_order_nums += 1 + + # upon completion increase step index by one + self._step_index += 1 + + if not return_dict: + return (prev_sample,) + + return SchedulerOutput(prev_sample=prev_sample) + + def scale_model_input(self, sample: torch.Tensor, *args, **kwargs) -> torch.Tensor: + """ + Ensures interchangeability with schedulers that need to scale the denoising model input depending on the + current timestep. + + Args: + sample (`torch.Tensor`): + The input sample. + + Returns: + `torch.Tensor`: + A scaled input sample. + """ + return sample + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.add_noise + def add_noise( + self, + original_samples: torch.Tensor, + noise: torch.Tensor, + timesteps: torch.IntTensor, + ) -> torch.Tensor: + # Make sure sigmas and timesteps have the same device and dtype as original_samples + sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype) + if original_samples.device.type == "mps" and torch.is_floating_point(timesteps): + # mps does not support float64 + schedule_timesteps = self.timesteps.to(original_samples.device, dtype=torch.float32) + timesteps = timesteps.to(original_samples.device, dtype=torch.float32) + else: + schedule_timesteps = self.timesteps.to(original_samples.device) + timesteps = timesteps.to(original_samples.device) + + # begin_index is None when the scheduler is used for training or pipeline does not implement set_begin_index + if self.begin_index is None: + step_indices = [self.index_for_timestep(t, schedule_timesteps) for t in timesteps] + elif self.step_index is not None: + # add_noise is called after first denoising step (for inpainting) + step_indices = [self.step_index] * timesteps.shape[0] + else: + # add noise is called before first denoising step to create initial latent(img2img) + step_indices = [self.begin_index] * timesteps.shape[0] + + sigma = sigmas[step_indices].flatten() + while len(sigma.shape) < len(original_samples.shape): + sigma = sigma.unsqueeze(-1) + + alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma) + noisy_samples = alpha_t * original_samples + sigma_t * noise + return noisy_samples + + def __len__(self): + return self.config.num_train_timesteps diff --git a/diffusers3/schedulers/scheduling_utils.py b/diffusers3/schedulers/scheduling_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..f20224b19009eed6c7e34cd308f4978210f5c466 --- /dev/null +++ b/diffusers3/schedulers/scheduling_utils.py @@ -0,0 +1,193 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import importlib +import os +from dataclasses import dataclass +from enum import Enum +from typing import Optional, Union + +import torch +from huggingface_hub.utils import validate_hf_hub_args + +from ..utils import BaseOutput, PushToHubMixin + + +SCHEDULER_CONFIG_NAME = "scheduler_config.json" + + +# NOTE: We make this type an enum because it simplifies usage in docs and prevents +# circular imports when used for `_compatibles` within the schedulers module. +# When it's used as a type in pipelines, it really is a Union because the actual +# scheduler instance is passed in. +class KarrasDiffusionSchedulers(Enum): + DDIMScheduler = 1 + DDPMScheduler = 2 + PNDMScheduler = 3 + LMSDiscreteScheduler = 4 + EulerDiscreteScheduler = 5 + HeunDiscreteScheduler = 6 + EulerAncestralDiscreteScheduler = 7 + DPMSolverMultistepScheduler = 8 + DPMSolverSinglestepScheduler = 9 + KDPM2DiscreteScheduler = 10 + KDPM2AncestralDiscreteScheduler = 11 + DEISMultistepScheduler = 12 + UniPCMultistepScheduler = 13 + DPMSolverSDEScheduler = 14 + EDMEulerScheduler = 15 + + +AysSchedules = { + "StableDiffusionTimesteps": [999, 850, 736, 645, 545, 455, 343, 233, 124, 24], + "StableDiffusionSigmas": [14.615, 6.475, 3.861, 2.697, 1.886, 1.396, 0.963, 0.652, 0.399, 0.152, 0.0], + "StableDiffusionXLTimesteps": [999, 845, 730, 587, 443, 310, 193, 116, 53, 13], + "StableDiffusionXLSigmas": [14.615, 6.315, 3.771, 2.181, 1.342, 0.862, 0.555, 0.380, 0.234, 0.113, 0.0], + "StableDiffusionVideoSigmas": [700.00, 54.5, 15.886, 7.977, 4.248, 1.789, 0.981, 0.403, 0.173, 0.034, 0.0], +} + + +@dataclass +class SchedulerOutput(BaseOutput): + """ + Base class for the output of a scheduler's `step` function. + + Args: + prev_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images): + Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the + denoising loop. + """ + + prev_sample: torch.Tensor + + +class SchedulerMixin(PushToHubMixin): + """ + Base class for all schedulers. + + [`SchedulerMixin`] contains common functions shared by all schedulers such as general loading and saving + functionalities. + + [`ConfigMixin`] takes care of storing the configuration attributes (like `num_train_timesteps`) that are passed to + the scheduler's `__init__` function, and the attributes can be accessed by `scheduler.config.num_train_timesteps`. + + Class attributes: + - **_compatibles** (`List[str]`) -- A list of scheduler classes that are compatible with the parent scheduler + class. Use [`~ConfigMixin.from_config`] to load a different compatible scheduler class (should be overridden + by parent class). + """ + + config_name = SCHEDULER_CONFIG_NAME + _compatibles = [] + has_compatibles = True + + @classmethod + @validate_hf_hub_args + def from_pretrained( + cls, + pretrained_model_name_or_path: Optional[Union[str, os.PathLike]] = None, + subfolder: Optional[str] = None, + return_unused_kwargs=False, + **kwargs, + ): + r""" + Instantiate a scheduler from a pre-defined JSON configuration file in a local directory or Hub repository. + + Parameters: + pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*): + Can be either: + + - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on + the Hub. + - A path to a *directory* (for example `./my_model_directory`) containing the scheduler + configuration saved with [`~SchedulerMixin.save_pretrained`]. + subfolder (`str`, *optional*): + The subfolder location of a model file within a larger model repository on the Hub or locally. + return_unused_kwargs (`bool`, *optional*, defaults to `False`): + Whether kwargs that are not consumed by the Python class should be returned or not. + cache_dir (`Union[str, os.PathLike]`, *optional*): + Path to a directory where a downloaded pretrained model configuration is cached if the standard cache + is not used. + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force the (re-)download of the model weights and configuration files, overriding the + cached versions if they exist. + + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. + output_loading_info(`bool`, *optional*, defaults to `False`): + Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages. + local_files_only(`bool`, *optional*, defaults to `False`): + Whether to only load local model weights and configuration files or not. If set to `True`, the model + won't be downloaded from the Hub. + token (`str` or *bool*, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from + `diffusers-cli login` (stored in `~/.huggingface`) is used. + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier + allowed by Git. + + + + To use private or [gated models](https://huggingface.co/docs/hub/models-gated#gated-models), log-in with + `huggingface-cli login`. You can also activate the special + ["offline-mode"](https://huggingface.co/diffusers/installation.html#offline-mode) to use this method in a + firewalled environment. + + + + """ + config, kwargs, commit_hash = cls.load_config( + pretrained_model_name_or_path=pretrained_model_name_or_path, + subfolder=subfolder, + return_unused_kwargs=True, + return_commit_hash=True, + **kwargs, + ) + return cls.from_config(config, return_unused_kwargs=return_unused_kwargs, **kwargs) + + def save_pretrained(self, save_directory: Union[str, os.PathLike], push_to_hub: bool = False, **kwargs): + """ + Save a scheduler configuration object to a directory so that it can be reloaded using the + [`~SchedulerMixin.from_pretrained`] class method. + + Args: + save_directory (`str` or `os.PathLike`): + Directory where the configuration JSON file will be saved (will be created if it does not exist). + push_to_hub (`bool`, *optional*, defaults to `False`): + Whether or not to push your model to the Hugging Face Hub after saving it. You can specify the + repository you want to push to with `repo_id` (will default to the name of `save_directory` in your + namespace). + kwargs (`Dict[str, Any]`, *optional*): + Additional keyword arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method. + """ + self.save_config(save_directory=save_directory, push_to_hub=push_to_hub, **kwargs) + + @property + def compatibles(self): + """ + Returns all schedulers that are compatible with this scheduler + + Returns: + `List[SchedulerMixin]`: List of compatible schedulers + """ + return self._get_compatibles() + + @classmethod + def _get_compatibles(cls): + compatible_classes_str = list(set([cls.__name__] + cls._compatibles)) + diffusers_library = importlib.import_module(__name__.split(".")[0]) + compatible_classes = [ + getattr(diffusers_library, c) for c in compatible_classes_str if hasattr(diffusers_library, c) + ] + return compatible_classes diff --git a/diffusers3/schedulers/scheduling_utils_flax.py b/diffusers3/schedulers/scheduling_utils_flax.py new file mode 100644 index 0000000000000000000000000000000000000000..ae11baf9ea1b4d3d0e67cb77ea7dc01eca1511bf --- /dev/null +++ b/diffusers3/schedulers/scheduling_utils_flax.py @@ -0,0 +1,291 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import importlib +import math +import os +from dataclasses import dataclass +from enum import Enum +from typing import Optional, Tuple, Union + +import flax +import jax.numpy as jnp +from huggingface_hub.utils import validate_hf_hub_args + +from ..utils import BaseOutput, PushToHubMixin + + +SCHEDULER_CONFIG_NAME = "scheduler_config.json" + + +# NOTE: We make this type an enum because it simplifies usage in docs and prevents +# circular imports when used for `_compatibles` within the schedulers module. +# When it's used as a type in pipelines, it really is a Union because the actual +# scheduler instance is passed in. +class FlaxKarrasDiffusionSchedulers(Enum): + FlaxDDIMScheduler = 1 + FlaxDDPMScheduler = 2 + FlaxPNDMScheduler = 3 + FlaxLMSDiscreteScheduler = 4 + FlaxDPMSolverMultistepScheduler = 5 + FlaxEulerDiscreteScheduler = 6 + + +@dataclass +class FlaxSchedulerOutput(BaseOutput): + """ + Base class for the scheduler's step function output. + + Args: + prev_sample (`jnp.ndarray` of shape `(batch_size, num_channels, height, width)` for images): + Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the + denoising loop. + """ + + prev_sample: jnp.ndarray + + +class FlaxSchedulerMixin(PushToHubMixin): + """ + Mixin containing common functions for the schedulers. + + Class attributes: + - **_compatibles** (`List[str]`) -- A list of classes that are compatible with the parent class, so that + `from_config` can be used from a class different than the one used to save the config (should be overridden + by parent class). + """ + + config_name = SCHEDULER_CONFIG_NAME + ignore_for_config = ["dtype"] + _compatibles = [] + has_compatibles = True + + @classmethod + @validate_hf_hub_args + def from_pretrained( + cls, + pretrained_model_name_or_path: Optional[Union[str, os.PathLike]] = None, + subfolder: Optional[str] = None, + return_unused_kwargs=False, + **kwargs, + ): + r""" + Instantiate a Scheduler class from a pre-defined JSON-file. + + Parameters: + pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*): + Can be either: + + - A string, the *model id* of a model repo on huggingface.co. Valid model ids should have an + organization name, like `google/ddpm-celebahq-256`. + - A path to a *directory* containing model weights saved using [`~SchedulerMixin.save_pretrained`], + e.g., `./my_model_directory/`. + subfolder (`str`, *optional*): + In case the relevant files are located inside a subfolder of the model repo (either remote in + huggingface.co or downloaded locally), you can specify the folder name here. + return_unused_kwargs (`bool`, *optional*, defaults to `False`): + Whether kwargs that are not consumed by the Python class should be returned or not. + + cache_dir (`Union[str, os.PathLike]`, *optional*): + Path to a directory in which a downloaded pretrained model configuration should be cached if the + standard cache should not be used. + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force the (re-)download of the model weights and configuration files, overriding the + cached versions if they exist. + + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. + output_loading_info(`bool`, *optional*, defaults to `False`): + Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages. + local_files_only(`bool`, *optional*, defaults to `False`): + Whether or not to only look at local files (i.e., do not try to download the model). + token (`str` or *bool*, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated + when running `transformers-cli login` (stored in `~/.huggingface`). + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a + git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any + identifier allowed by git. + + + + It is required to be logged in (`huggingface-cli login`) when you want to use private or [gated + models](https://huggingface.co/docs/hub/models-gated#gated-models). + + + + + + Activate the special ["offline-mode"](https://huggingface.co/transformers/installation.html#offline-mode) to + use this method in a firewalled environment. + + + + """ + config, kwargs = cls.load_config( + pretrained_model_name_or_path=pretrained_model_name_or_path, + subfolder=subfolder, + return_unused_kwargs=True, + **kwargs, + ) + scheduler, unused_kwargs = cls.from_config(config, return_unused_kwargs=True, **kwargs) + + if hasattr(scheduler, "create_state") and getattr(scheduler, "has_state", False): + state = scheduler.create_state() + + if return_unused_kwargs: + return scheduler, state, unused_kwargs + + return scheduler, state + + def save_pretrained(self, save_directory: Union[str, os.PathLike], push_to_hub: bool = False, **kwargs): + """ + Save a scheduler configuration object to the directory `save_directory`, so that it can be re-loaded using the + [`~FlaxSchedulerMixin.from_pretrained`] class method. + + Args: + save_directory (`str` or `os.PathLike`): + Directory where the configuration JSON file will be saved (will be created if it does not exist). + push_to_hub (`bool`, *optional*, defaults to `False`): + Whether or not to push your model to the Hugging Face Hub after saving it. You can specify the + repository you want to push to with `repo_id` (will default to the name of `save_directory` in your + namespace). + kwargs (`Dict[str, Any]`, *optional*): + Additional keyword arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method. + """ + self.save_config(save_directory=save_directory, push_to_hub=push_to_hub, **kwargs) + + @property + def compatibles(self): + """ + Returns all schedulers that are compatible with this scheduler + + Returns: + `List[SchedulerMixin]`: List of compatible schedulers + """ + return self._get_compatibles() + + @classmethod + def _get_compatibles(cls): + compatible_classes_str = list(set([cls.__name__] + cls._compatibles)) + diffusers_library = importlib.import_module(__name__.split(".")[0]) + compatible_classes = [ + getattr(diffusers_library, c) for c in compatible_classes_str if hasattr(diffusers_library, c) + ] + return compatible_classes + + +def broadcast_to_shape_from_left(x: jnp.ndarray, shape: Tuple[int]) -> jnp.ndarray: + assert len(shape) >= x.ndim + return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(shape) - x.ndim)), shape) + + +def betas_for_alpha_bar(num_diffusion_timesteps: int, max_beta=0.999, dtype=jnp.float32) -> jnp.ndarray: + """ + Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of + (1-beta) over time from t = [0,1]. + + Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up + to that part of the diffusion process. + + + Args: + num_diffusion_timesteps (`int`): the number of betas to produce. + max_beta (`float`): the maximum beta to use; use values lower than 1 to + prevent singularities. + + Returns: + betas (`jnp.ndarray`): the betas used by the scheduler to step the model outputs + """ + + def alpha_bar(time_step): + return math.cos((time_step + 0.008) / 1.008 * math.pi / 2) ** 2 + + betas = [] + for i in range(num_diffusion_timesteps): + t1 = i / num_diffusion_timesteps + t2 = (i + 1) / num_diffusion_timesteps + betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta)) + return jnp.array(betas, dtype=dtype) + + +@flax.struct.dataclass +class CommonSchedulerState: + alphas: jnp.ndarray + betas: jnp.ndarray + alphas_cumprod: jnp.ndarray + + @classmethod + def create(cls, scheduler): + config = scheduler.config + + if config.trained_betas is not None: + betas = jnp.asarray(config.trained_betas, dtype=scheduler.dtype) + elif config.beta_schedule == "linear": + betas = jnp.linspace(config.beta_start, config.beta_end, config.num_train_timesteps, dtype=scheduler.dtype) + elif config.beta_schedule == "scaled_linear": + # this schedule is very specific to the latent diffusion model. + betas = ( + jnp.linspace( + config.beta_start**0.5, config.beta_end**0.5, config.num_train_timesteps, dtype=scheduler.dtype + ) + ** 2 + ) + elif config.beta_schedule == "squaredcos_cap_v2": + # Glide cosine schedule + betas = betas_for_alpha_bar(config.num_train_timesteps, dtype=scheduler.dtype) + else: + raise NotImplementedError( + f"beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}" + ) + + alphas = 1.0 - betas + + alphas_cumprod = jnp.cumprod(alphas, axis=0) + + return cls( + alphas=alphas, + betas=betas, + alphas_cumprod=alphas_cumprod, + ) + + +def get_sqrt_alpha_prod( + state: CommonSchedulerState, original_samples: jnp.ndarray, noise: jnp.ndarray, timesteps: jnp.ndarray +): + alphas_cumprod = state.alphas_cumprod + + sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5 + sqrt_alpha_prod = sqrt_alpha_prod.flatten() + sqrt_alpha_prod = broadcast_to_shape_from_left(sqrt_alpha_prod, original_samples.shape) + + sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5 + sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() + sqrt_one_minus_alpha_prod = broadcast_to_shape_from_left(sqrt_one_minus_alpha_prod, original_samples.shape) + + return sqrt_alpha_prod, sqrt_one_minus_alpha_prod + + +def add_noise_common( + state: CommonSchedulerState, original_samples: jnp.ndarray, noise: jnp.ndarray, timesteps: jnp.ndarray +): + sqrt_alpha_prod, sqrt_one_minus_alpha_prod = get_sqrt_alpha_prod(state, original_samples, noise, timesteps) + noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise + return noisy_samples + + +def get_velocity_common(state: CommonSchedulerState, sample: jnp.ndarray, noise: jnp.ndarray, timesteps: jnp.ndarray): + sqrt_alpha_prod, sqrt_one_minus_alpha_prod = get_sqrt_alpha_prod(state, sample, noise, timesteps) + velocity = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample + return velocity diff --git a/diffusers3/schedulers/scheduling_vq_diffusion.py b/diffusers3/schedulers/scheduling_vq_diffusion.py new file mode 100644 index 0000000000000000000000000000000000000000..bd8d255fa9016621a4ffa03f39e019983c643445 --- /dev/null +++ b/diffusers3/schedulers/scheduling_vq_diffusion.py @@ -0,0 +1,467 @@ +# Copyright 2024 Microsoft and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass +from typing import Optional, Tuple, Union + +import numpy as np +import torch +import torch.nn.functional as F + +from ..configuration_utils import ConfigMixin, register_to_config +from ..utils import BaseOutput +from .scheduling_utils import SchedulerMixin + + +@dataclass +class VQDiffusionSchedulerOutput(BaseOutput): + """ + Output class for the scheduler's step function output. + + Args: + prev_sample (`torch.LongTensor` of shape `(batch size, num latent pixels)`): + Computed sample x_{t-1} of previous timestep. `prev_sample` should be used as next model input in the + denoising loop. + """ + + prev_sample: torch.LongTensor + + +def index_to_log_onehot(x: torch.LongTensor, num_classes: int) -> torch.Tensor: + """ + Convert batch of vector of class indices into batch of log onehot vectors + + Args: + x (`torch.LongTensor` of shape `(batch size, vector length)`): + Batch of class indices + + num_classes (`int`): + number of classes to be used for the onehot vectors + + Returns: + `torch.Tensor` of shape `(batch size, num classes, vector length)`: + Log onehot vectors + """ + x_onehot = F.one_hot(x, num_classes) + x_onehot = x_onehot.permute(0, 2, 1) + log_x = torch.log(x_onehot.float().clamp(min=1e-30)) + return log_x + + +def gumbel_noised(logits: torch.Tensor, generator: Optional[torch.Generator]) -> torch.Tensor: + """ + Apply gumbel noise to `logits` + """ + uniform = torch.rand(logits.shape, device=logits.device, generator=generator) + gumbel_noise = -torch.log(-torch.log(uniform + 1e-30) + 1e-30) + noised = gumbel_noise + logits + return noised + + +def alpha_schedules(num_diffusion_timesteps: int, alpha_cum_start=0.99999, alpha_cum_end=0.000009): + """ + Cumulative and non-cumulative alpha schedules. + + See section 4.1. + """ + att = ( + np.arange(0, num_diffusion_timesteps) / (num_diffusion_timesteps - 1) * (alpha_cum_end - alpha_cum_start) + + alpha_cum_start + ) + att = np.concatenate(([1], att)) + at = att[1:] / att[:-1] + att = np.concatenate((att[1:], [1])) + return at, att + + +def gamma_schedules(num_diffusion_timesteps: int, gamma_cum_start=0.000009, gamma_cum_end=0.99999): + """ + Cumulative and non-cumulative gamma schedules. + + See section 4.1. + """ + ctt = ( + np.arange(0, num_diffusion_timesteps) / (num_diffusion_timesteps - 1) * (gamma_cum_end - gamma_cum_start) + + gamma_cum_start + ) + ctt = np.concatenate(([0], ctt)) + one_minus_ctt = 1 - ctt + one_minus_ct = one_minus_ctt[1:] / one_minus_ctt[:-1] + ct = 1 - one_minus_ct + ctt = np.concatenate((ctt[1:], [0])) + return ct, ctt + + +class VQDiffusionScheduler(SchedulerMixin, ConfigMixin): + """ + A scheduler for vector quantized diffusion. + + This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic + methods the library implements for all schedulers such as loading and saving. + + Args: + num_vec_classes (`int`): + The number of classes of the vector embeddings of the latent pixels. Includes the class for the masked + latent pixel. + num_train_timesteps (`int`, defaults to 100): + The number of diffusion steps to train the model. + alpha_cum_start (`float`, defaults to 0.99999): + The starting cumulative alpha value. + alpha_cum_end (`float`, defaults to 0.00009): + The ending cumulative alpha value. + gamma_cum_start (`float`, defaults to 0.00009): + The starting cumulative gamma value. + gamma_cum_end (`float`, defaults to 0.99999): + The ending cumulative gamma value. + """ + + order = 1 + + @register_to_config + def __init__( + self, + num_vec_classes: int, + num_train_timesteps: int = 100, + alpha_cum_start: float = 0.99999, + alpha_cum_end: float = 0.000009, + gamma_cum_start: float = 0.000009, + gamma_cum_end: float = 0.99999, + ): + self.num_embed = num_vec_classes + + # By convention, the index for the mask class is the last class index + self.mask_class = self.num_embed - 1 + + at, att = alpha_schedules(num_train_timesteps, alpha_cum_start=alpha_cum_start, alpha_cum_end=alpha_cum_end) + ct, ctt = gamma_schedules(num_train_timesteps, gamma_cum_start=gamma_cum_start, gamma_cum_end=gamma_cum_end) + + num_non_mask_classes = self.num_embed - 1 + bt = (1 - at - ct) / num_non_mask_classes + btt = (1 - att - ctt) / num_non_mask_classes + + at = torch.tensor(at.astype("float64")) + bt = torch.tensor(bt.astype("float64")) + ct = torch.tensor(ct.astype("float64")) + log_at = torch.log(at) + log_bt = torch.log(bt) + log_ct = torch.log(ct) + + att = torch.tensor(att.astype("float64")) + btt = torch.tensor(btt.astype("float64")) + ctt = torch.tensor(ctt.astype("float64")) + log_cumprod_at = torch.log(att) + log_cumprod_bt = torch.log(btt) + log_cumprod_ct = torch.log(ctt) + + self.log_at = log_at.float() + self.log_bt = log_bt.float() + self.log_ct = log_ct.float() + self.log_cumprod_at = log_cumprod_at.float() + self.log_cumprod_bt = log_cumprod_bt.float() + self.log_cumprod_ct = log_cumprod_ct.float() + + # setable values + self.num_inference_steps = None + self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy()) + + def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None): + """ + Sets the discrete timesteps used for the diffusion chain (to be run before inference). + + Args: + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps and diffusion process parameters (alpha, beta, gamma) should be moved + to. + """ + self.num_inference_steps = num_inference_steps + timesteps = np.arange(0, self.num_inference_steps)[::-1].copy() + self.timesteps = torch.from_numpy(timesteps).to(device) + + self.log_at = self.log_at.to(device) + self.log_bt = self.log_bt.to(device) + self.log_ct = self.log_ct.to(device) + self.log_cumprod_at = self.log_cumprod_at.to(device) + self.log_cumprod_bt = self.log_cumprod_bt.to(device) + self.log_cumprod_ct = self.log_cumprod_ct.to(device) + + def step( + self, + model_output: torch.Tensor, + timestep: torch.long, + sample: torch.LongTensor, + generator: Optional[torch.Generator] = None, + return_dict: bool = True, + ) -> Union[VQDiffusionSchedulerOutput, Tuple]: + """ + Predict the sample from the previous timestep by the reverse transition distribution. See + [`~VQDiffusionScheduler.q_posterior`] for more details about how the distribution is computer. + + Args: + log_p_x_0: (`torch.Tensor` of shape `(batch size, num classes - 1, num latent pixels)`): + The log probabilities for the predicted classes of the initial latent pixels. Does not include a + prediction for the masked class as the initial unnoised image cannot be masked. + t (`torch.long`): + The timestep that determines which transition matrices are used. + x_t (`torch.LongTensor` of shape `(batch size, num latent pixels)`): + The classes of each latent pixel at time `t`. + generator (`torch.Generator`, or `None`): + A random number generator for the noise applied to `p(x_{t-1} | x_t)` before it is sampled from. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~schedulers.scheduling_vq_diffusion.VQDiffusionSchedulerOutput`] or + `tuple`. + + Returns: + [`~schedulers.scheduling_vq_diffusion.VQDiffusionSchedulerOutput`] or `tuple`: + If return_dict is `True`, [`~schedulers.scheduling_vq_diffusion.VQDiffusionSchedulerOutput`] is + returned, otherwise a tuple is returned where the first element is the sample tensor. + """ + if timestep == 0: + log_p_x_t_min_1 = model_output + else: + log_p_x_t_min_1 = self.q_posterior(model_output, sample, timestep) + + log_p_x_t_min_1 = gumbel_noised(log_p_x_t_min_1, generator) + + x_t_min_1 = log_p_x_t_min_1.argmax(dim=1) + + if not return_dict: + return (x_t_min_1,) + + return VQDiffusionSchedulerOutput(prev_sample=x_t_min_1) + + def q_posterior(self, log_p_x_0, x_t, t): + """ + Calculates the log probabilities for the predicted classes of the image at timestep `t-1`: + + ``` + p(x_{t-1} | x_t) = sum( q(x_t | x_{t-1}) * q(x_{t-1} | x_0) * p(x_0) / q(x_t | x_0) ) + ``` + + Args: + log_p_x_0 (`torch.Tensor` of shape `(batch size, num classes - 1, num latent pixels)`): + The log probabilities for the predicted classes of the initial latent pixels. Does not include a + prediction for the masked class as the initial unnoised image cannot be masked. + x_t (`torch.LongTensor` of shape `(batch size, num latent pixels)`): + The classes of each latent pixel at time `t`. + t (`torch.Long`): + The timestep that determines which transition matrix is used. + + Returns: + `torch.Tensor` of shape `(batch size, num classes, num latent pixels)`: + The log probabilities for the predicted classes of the image at timestep `t-1`. + """ + log_onehot_x_t = index_to_log_onehot(x_t, self.num_embed) + + log_q_x_t_given_x_0 = self.log_Q_t_transitioning_to_known_class( + t=t, x_t=x_t, log_onehot_x_t=log_onehot_x_t, cumulative=True + ) + + log_q_t_given_x_t_min_1 = self.log_Q_t_transitioning_to_known_class( + t=t, x_t=x_t, log_onehot_x_t=log_onehot_x_t, cumulative=False + ) + + # p_0(x_0=C_0 | x_t) / q(x_t | x_0=C_0) ... p_n(x_0=C_0 | x_t) / q(x_t | x_0=C_0) + # . . . + # . . . + # . . . + # p_0(x_0=C_{k-1} | x_t) / q(x_t | x_0=C_{k-1}) ... p_n(x_0=C_{k-1} | x_t) / q(x_t | x_0=C_{k-1}) + q = log_p_x_0 - log_q_x_t_given_x_0 + + # sum_0 = p_0(x_0=C_0 | x_t) / q(x_t | x_0=C_0) + ... + p_0(x_0=C_{k-1} | x_t) / q(x_t | x_0=C_{k-1}), ... , + # sum_n = p_n(x_0=C_0 | x_t) / q(x_t | x_0=C_0) + ... + p_n(x_0=C_{k-1} | x_t) / q(x_t | x_0=C_{k-1}) + q_log_sum_exp = torch.logsumexp(q, dim=1, keepdim=True) + + # p_0(x_0=C_0 | x_t) / q(x_t | x_0=C_0) / sum_0 ... p_n(x_0=C_0 | x_t) / q(x_t | x_0=C_0) / sum_n + # . . . + # . . . + # . . . + # p_0(x_0=C_{k-1} | x_t) / q(x_t | x_0=C_{k-1}) / sum_0 ... p_n(x_0=C_{k-1} | x_t) / q(x_t | x_0=C_{k-1}) / sum_n + q = q - q_log_sum_exp + + # (p_0(x_0=C_0 | x_t) / q(x_t | x_0=C_0) / sum_0) * a_cumulative_{t-1} + b_cumulative_{t-1} ... (p_n(x_0=C_0 | x_t) / q(x_t | x_0=C_0) / sum_n) * a_cumulative_{t-1} + b_cumulative_{t-1} + # . . . + # . . . + # . . . + # (p_0(x_0=C_{k-1} | x_t) / q(x_t | x_0=C_{k-1}) / sum_0) * a_cumulative_{t-1} + b_cumulative_{t-1} ... (p_n(x_0=C_{k-1} | x_t) / q(x_t | x_0=C_{k-1}) / sum_n) * a_cumulative_{t-1} + b_cumulative_{t-1} + # c_cumulative_{t-1} ... c_cumulative_{t-1} + q = self.apply_cumulative_transitions(q, t - 1) + + # ((p_0(x_0=C_0 | x_t) / q(x_t | x_0=C_0) / sum_0) * a_cumulative_{t-1} + b_cumulative_{t-1}) * q(x_t | x_{t-1}=C_0) * sum_0 ... ((p_n(x_0=C_0 | x_t) / q(x_t | x_0=C_0) / sum_n) * a_cumulative_{t-1} + b_cumulative_{t-1}) * q(x_t | x_{t-1}=C_0) * sum_n + # . . . + # . . . + # . . . + # ((p_0(x_0=C_{k-1} | x_t) / q(x_t | x_0=C_{k-1}) / sum_0) * a_cumulative_{t-1} + b_cumulative_{t-1}) * q(x_t | x_{t-1}=C_{k-1}) * sum_0 ... ((p_n(x_0=C_{k-1} | x_t) / q(x_t | x_0=C_{k-1}) / sum_n) * a_cumulative_{t-1} + b_cumulative_{t-1}) * q(x_t | x_{t-1}=C_{k-1}) * sum_n + # c_cumulative_{t-1} * q(x_t | x_{t-1}=C_k) * sum_0 ... c_cumulative_{t-1} * q(x_t | x_{t-1}=C_k) * sum_0 + log_p_x_t_min_1 = q + log_q_t_given_x_t_min_1 + q_log_sum_exp + + # For each column, there are two possible cases. + # + # Where: + # - sum(p_n(x_0))) is summing over all classes for x_0 + # - C_i is the class transitioning from (not to be confused with c_t and c_cumulative_t being used for gamma's) + # - C_j is the class transitioning to + # + # 1. x_t is masked i.e. x_t = c_k + # + # Simplifying the expression, the column vector is: + # . + # . + # . + # (c_t / c_cumulative_t) * (a_cumulative_{t-1} * p_n(x_0 = C_i | x_t) + b_cumulative_{t-1} * sum(p_n(x_0))) + # . + # . + # . + # (c_cumulative_{t-1} / c_cumulative_t) * sum(p_n(x_0)) + # + # From equation (11) stated in terms of forward probabilities, the last row is trivially verified. + # + # For the other rows, we can state the equation as ... + # + # (c_t / c_cumulative_t) * [b_cumulative_{t-1} * p(x_0=c_0) + ... + (a_cumulative_{t-1} + b_cumulative_{t-1}) * p(x_0=C_i) + ... + b_cumulative_{k-1} * p(x_0=c_{k-1})] + # + # This verifies the other rows. + # + # 2. x_t is not masked + # + # Simplifying the expression, there are two cases for the rows of the column vector, where C_j = C_i and where C_j != C_i: + # . + # . + # . + # C_j != C_i: b_t * ((b_cumulative_{t-1} / b_cumulative_t) * p_n(x_0 = c_0) + ... + ((a_cumulative_{t-1} + b_cumulative_{t-1}) / b_cumulative_t) * p_n(x_0 = C_i) + ... + (b_cumulative_{t-1} / (a_cumulative_t + b_cumulative_t)) * p_n(c_0=C_j) + ... + (b_cumulative_{t-1} / b_cumulative_t) * p_n(x_0 = c_{k-1})) + # . + # . + # . + # C_j = C_i: (a_t + b_t) * ((b_cumulative_{t-1} / b_cumulative_t) * p_n(x_0 = c_0) + ... + ((a_cumulative_{t-1} + b_cumulative_{t-1}) / (a_cumulative_t + b_cumulative_t)) * p_n(x_0 = C_i = C_j) + ... + (b_cumulative_{t-1} / b_cumulative_t) * p_n(x_0 = c_{k-1})) + # . + # . + # . + # 0 + # + # The last row is trivially verified. The other rows can be verified by directly expanding equation (11) stated in terms of forward probabilities. + return log_p_x_t_min_1 + + def log_Q_t_transitioning_to_known_class( + self, *, t: torch.int, x_t: torch.LongTensor, log_onehot_x_t: torch.Tensor, cumulative: bool + ): + """ + Calculates the log probabilities of the rows from the (cumulative or non-cumulative) transition matrix for each + latent pixel in `x_t`. + + Args: + t (`torch.Long`): + The timestep that determines which transition matrix is used. + x_t (`torch.LongTensor` of shape `(batch size, num latent pixels)`): + The classes of each latent pixel at time `t`. + log_onehot_x_t (`torch.Tensor` of shape `(batch size, num classes, num latent pixels)`): + The log one-hot vectors of `x_t`. + cumulative (`bool`): + If cumulative is `False`, the single step transition matrix `t-1`->`t` is used. If cumulative is + `True`, the cumulative transition matrix `0`->`t` is used. + + Returns: + `torch.Tensor` of shape `(batch size, num classes - 1, num latent pixels)`: + Each _column_ of the returned matrix is a _row_ of log probabilities of the complete probability + transition matrix. + + When non cumulative, returns `self.num_classes - 1` rows because the initial latent pixel cannot be + masked. + + Where: + - `q_n` is the probability distribution for the forward process of the `n`th latent pixel. + - C_0 is a class of a latent pixel embedding + - C_k is the class of the masked latent pixel + + non-cumulative result (omitting logarithms): + ``` + q_0(x_t | x_{t-1} = C_0) ... q_n(x_t | x_{t-1} = C_0) + . . . + . . . + . . . + q_0(x_t | x_{t-1} = C_k) ... q_n(x_t | x_{t-1} = C_k) + ``` + + cumulative result (omitting logarithms): + ``` + q_0_cumulative(x_t | x_0 = C_0) ... q_n_cumulative(x_t | x_0 = C_0) + . . . + . . . + . . . + q_0_cumulative(x_t | x_0 = C_{k-1}) ... q_n_cumulative(x_t | x_0 = C_{k-1}) + ``` + """ + if cumulative: + a = self.log_cumprod_at[t] + b = self.log_cumprod_bt[t] + c = self.log_cumprod_ct[t] + else: + a = self.log_at[t] + b = self.log_bt[t] + c = self.log_ct[t] + + if not cumulative: + # The values in the onehot vector can also be used as the logprobs for transitioning + # from masked latent pixels. If we are not calculating the cumulative transitions, + # we need to save these vectors to be re-appended to the final matrix so the values + # aren't overwritten. + # + # `P(x_t!=mask|x_{t-1=mask}) = 0` and 0 will be the value of the last row of the onehot vector + # if x_t is not masked + # + # `P(x_t=mask|x_{t-1=mask}) = 1` and 1 will be the value of the last row of the onehot vector + # if x_t is masked + log_onehot_x_t_transitioning_from_masked = log_onehot_x_t[:, -1, :].unsqueeze(1) + + # `index_to_log_onehot` will add onehot vectors for masked pixels, + # so the default one hot matrix has one too many rows. See the doc string + # for an explanation of the dimensionality of the returned matrix. + log_onehot_x_t = log_onehot_x_t[:, :-1, :] + + # this is a cheeky trick to produce the transition probabilities using log one-hot vectors. + # + # Don't worry about what values this sets in the columns that mark transitions + # to masked latent pixels. They are overwrote later with the `mask_class_mask`. + # + # Looking at the below logspace formula in non-logspace, each value will evaluate to either + # `1 * a + b = a + b` where `log_Q_t` has the one hot value in the column + # or + # `0 * a + b = b` where `log_Q_t` has the 0 values in the column. + # + # See equation 7 for more details. + log_Q_t = (log_onehot_x_t + a).logaddexp(b) + + # The whole column of each masked pixel is `c` + mask_class_mask = x_t == self.mask_class + mask_class_mask = mask_class_mask.unsqueeze(1).expand(-1, self.num_embed - 1, -1) + log_Q_t[mask_class_mask] = c + + if not cumulative: + log_Q_t = torch.cat((log_Q_t, log_onehot_x_t_transitioning_from_masked), dim=1) + + return log_Q_t + + def apply_cumulative_transitions(self, q, t): + bsz = q.shape[0] + a = self.log_cumprod_at[t] + b = self.log_cumprod_bt[t] + c = self.log_cumprod_ct[t] + + num_latent_pixels = q.shape[2] + c = c.expand(bsz, 1, num_latent_pixels) + + q = (q + a).logaddexp(b) + q = torch.cat((q, c), dim=1) + + return q diff --git a/diffusers3/training_utils.py b/diffusers3/training_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..26d4a2a504c6e9dac13e5ab048fa16dd5efc633b --- /dev/null +++ b/diffusers3/training_utils.py @@ -0,0 +1,610 @@ +import contextlib +import copy +import gc +import math +import random +from typing import Any, Dict, Iterable, List, Optional, Tuple, Union + +import numpy as np +import torch + +from .models import UNet2DConditionModel +from .schedulers import SchedulerMixin +from .utils import ( + convert_state_dict_to_diffusers, + convert_state_dict_to_peft, + deprecate, + is_peft_available, + is_torch_npu_available, + is_torchvision_available, + is_transformers_available, +) + + +if is_transformers_available(): + import transformers + +if is_peft_available(): + from peft import set_peft_model_state_dict + +if is_torchvision_available(): + from torchvision import transforms + +if is_torch_npu_available(): + import torch_npu # noqa: F401 + + +def set_seed(seed: int): + """ + Args: + Helper function for reproducible behavior to set the seed in `random`, `numpy`, `torch`. + seed (`int`): The seed to set. + """ + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if is_torch_npu_available(): + torch.npu.manual_seed_all(seed) + else: + torch.cuda.manual_seed_all(seed) + # ^^ safe to call this function even if cuda is not available + + +def compute_snr(noise_scheduler, timesteps): + """ + Computes SNR as per + https://github.com/TiankaiHang/Min-SNR-Diffusion-Training/blob/521b624bd70c67cee4bdf49225915f5945a872e3/guided_diffusion/gaussian_diffusion.py#L847-L849 + """ + alphas_cumprod = noise_scheduler.alphas_cumprod + sqrt_alphas_cumprod = alphas_cumprod**0.5 + sqrt_one_minus_alphas_cumprod = (1.0 - alphas_cumprod) ** 0.5 + + # Expand the tensors. + # Adapted from https://github.com/TiankaiHang/Min-SNR-Diffusion-Training/blob/521b624bd70c67cee4bdf49225915f5945a872e3/guided_diffusion/gaussian_diffusion.py#L1026 + sqrt_alphas_cumprod = sqrt_alphas_cumprod.to(device=timesteps.device)[timesteps].float() + while len(sqrt_alphas_cumprod.shape) < len(timesteps.shape): + sqrt_alphas_cumprod = sqrt_alphas_cumprod[..., None] + alpha = sqrt_alphas_cumprod.expand(timesteps.shape) + + sqrt_one_minus_alphas_cumprod = sqrt_one_minus_alphas_cumprod.to(device=timesteps.device)[timesteps].float() + while len(sqrt_one_minus_alphas_cumprod.shape) < len(timesteps.shape): + sqrt_one_minus_alphas_cumprod = sqrt_one_minus_alphas_cumprod[..., None] + sigma = sqrt_one_minus_alphas_cumprod.expand(timesteps.shape) + + # Compute SNR. + snr = (alpha / sigma) ** 2 + return snr + + +def resolve_interpolation_mode(interpolation_type: str): + """ + Maps a string describing an interpolation function to the corresponding torchvision `InterpolationMode` enum. The + full list of supported enums is documented at + https://pytorch.org/vision/0.9/transforms.html#torchvision.transforms.functional.InterpolationMode. + + Args: + interpolation_type (`str`): + A string describing an interpolation method. Currently, `bilinear`, `bicubic`, `box`, `nearest`, + `nearest_exact`, `hamming`, and `lanczos` are supported, corresponding to the supported interpolation modes + in torchvision. + + Returns: + `torchvision.transforms.InterpolationMode`: an `InterpolationMode` enum used by torchvision's `resize` + transform. + """ + if not is_torchvision_available(): + raise ImportError( + "Please make sure to install `torchvision` to be able to use the `resolve_interpolation_mode()` function." + ) + + if interpolation_type == "bilinear": + interpolation_mode = transforms.InterpolationMode.BILINEAR + elif interpolation_type == "bicubic": + interpolation_mode = transforms.InterpolationMode.BICUBIC + elif interpolation_type == "box": + interpolation_mode = transforms.InterpolationMode.BOX + elif interpolation_type == "nearest": + interpolation_mode = transforms.InterpolationMode.NEAREST + elif interpolation_type == "nearest_exact": + interpolation_mode = transforms.InterpolationMode.NEAREST_EXACT + elif interpolation_type == "hamming": + interpolation_mode = transforms.InterpolationMode.HAMMING + elif interpolation_type == "lanczos": + interpolation_mode = transforms.InterpolationMode.LANCZOS + else: + raise ValueError( + f"The given interpolation mode {interpolation_type} is not supported. Currently supported interpolation" + f" modes are `bilinear`, `bicubic`, `box`, `nearest`, `nearest_exact`, `hamming`, and `lanczos`." + ) + + return interpolation_mode + + +def compute_dream_and_update_latents( + unet: UNet2DConditionModel, + noise_scheduler: SchedulerMixin, + timesteps: torch.Tensor, + noise: torch.Tensor, + noisy_latents: torch.Tensor, + target: torch.Tensor, + encoder_hidden_states: torch.Tensor, + dream_detail_preservation: float = 1.0, +) -> Tuple[Optional[torch.Tensor], Optional[torch.Tensor]]: + """ + Implements "DREAM (Diffusion Rectification and Estimation-Adaptive Models)" from http://arxiv.org/abs/2312.00210. + DREAM helps align training with sampling to help training be more efficient and accurate at the cost of an extra + forward step without gradients. + + Args: + `unet`: The state unet to use to make a prediction. + `noise_scheduler`: The noise scheduler used to add noise for the given timestep. + `timesteps`: The timesteps for the noise_scheduler to user. + `noise`: A tensor of noise in the shape of noisy_latents. + `noisy_latents`: Previously noise latents from the training loop. + `target`: The ground-truth tensor to predict after eps is removed. + `encoder_hidden_states`: Text embeddings from the text model. + `dream_detail_preservation`: A float value that indicates detail preservation level. + See reference. + + Returns: + `tuple[torch.Tensor, torch.Tensor]`: Adjusted noisy_latents and target. + """ + alphas_cumprod = noise_scheduler.alphas_cumprod.to(timesteps.device)[timesteps, None, None, None] + sqrt_one_minus_alphas_cumprod = (1.0 - alphas_cumprod) ** 0.5 + + # The paper uses lambda = sqrt(1 - alpha) ** p, with p = 1 in their experiments. + dream_lambda = sqrt_one_minus_alphas_cumprod**dream_detail_preservation + + pred = None + with torch.no_grad(): + pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample + + _noisy_latents, _target = (None, None) + if noise_scheduler.config.prediction_type == "epsilon": + predicted_noise = pred + delta_noise = (noise - predicted_noise).detach() + delta_noise.mul_(dream_lambda) + _noisy_latents = noisy_latents.add(sqrt_one_minus_alphas_cumprod * delta_noise) + _target = target.add(delta_noise) + elif noise_scheduler.config.prediction_type == "v_prediction": + raise NotImplementedError("DREAM has not been implemented for v-prediction") + else: + raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}") + + return _noisy_latents, _target + + +def unet_lora_state_dict(unet: UNet2DConditionModel) -> Dict[str, torch.Tensor]: + r""" + Returns: + A state dict containing just the LoRA parameters. + """ + lora_state_dict = {} + + for name, module in unet.named_modules(): + if hasattr(module, "set_lora_layer"): + lora_layer = getattr(module, "lora_layer") + if lora_layer is not None: + current_lora_layer_sd = lora_layer.state_dict() + for lora_layer_matrix_name, lora_param in current_lora_layer_sd.items(): + # The matrix name can either be "down" or "up". + lora_state_dict[f"{name}.lora.{lora_layer_matrix_name}"] = lora_param + + return lora_state_dict + + +def cast_training_params(model: Union[torch.nn.Module, List[torch.nn.Module]], dtype=torch.float32): + if not isinstance(model, list): + model = [model] + for m in model: + for param in m.parameters(): + # only upcast trainable parameters into fp32 + if param.requires_grad: + param.data = param.to(dtype) + + +def _set_state_dict_into_text_encoder( + lora_state_dict: Dict[str, torch.Tensor], prefix: str, text_encoder: torch.nn.Module +): + """ + Sets the `lora_state_dict` into `text_encoder` coming from `transformers`. + + Args: + lora_state_dict: The state dictionary to be set. + prefix: String identifier to retrieve the portion of the state dict that belongs to `text_encoder`. + text_encoder: Where the `lora_state_dict` is to be set. + """ + + text_encoder_state_dict = { + f'{k.replace(prefix, "")}': v for k, v in lora_state_dict.items() if k.startswith(prefix) + } + text_encoder_state_dict = convert_state_dict_to_peft(convert_state_dict_to_diffusers(text_encoder_state_dict)) + set_peft_model_state_dict(text_encoder, text_encoder_state_dict, adapter_name="default") + + +def compute_density_for_timestep_sampling( + weighting_scheme: str, batch_size: int, logit_mean: float = None, logit_std: float = None, mode_scale: float = None +): + """Compute the density for sampling the timesteps when doing SD3 training. + + Courtesy: This was contributed by Rafie Walker in https://github.com/huggingface/diffusers/pull/8528. + + SD3 paper reference: https://arxiv.org/abs/2403.03206v1. + """ + if weighting_scheme == "logit_normal": + # See 3.1 in the SD3 paper ($rf/lognorm(0.00,1.00)$). + u = torch.normal(mean=logit_mean, std=logit_std, size=(batch_size,), device="cpu") + u = torch.nn.functional.sigmoid(u) + elif weighting_scheme == "mode": + u = torch.rand(size=(batch_size,), device="cpu") + u = 1 - u - mode_scale * (torch.cos(math.pi * u / 2) ** 2 - 1 + u) + else: + u = torch.rand(size=(batch_size,), device="cpu") + return u + + +def compute_loss_weighting_for_sd3(weighting_scheme: str, sigmas=None): + """Computes loss weighting scheme for SD3 training. + + Courtesy: This was contributed by Rafie Walker in https://github.com/huggingface/diffusers/pull/8528. + + SD3 paper reference: https://arxiv.org/abs/2403.03206v1. + """ + if weighting_scheme == "sigma_sqrt": + weighting = (sigmas**-2.0).float() + elif weighting_scheme == "cosmap": + bot = 1 - 2 * sigmas + 2 * sigmas**2 + weighting = 2 / (math.pi * bot) + else: + weighting = torch.ones_like(sigmas) + return weighting + + +def clear_objs_and_retain_memory(objs: List[Any]): + """Deletes `objs` and runs garbage collection. Then clears the cache of the available accelerator.""" + if len(objs) >= 1: + for obj in objs: + del obj + + gc.collect() + + if torch.cuda.is_available(): + torch.cuda.empty_cache() + elif torch.backends.mps.is_available(): + torch.mps.empty_cache() + elif is_torch_npu_available(): + torch_npu.empty_cache() + + +# Adapted from torch-ema https://github.com/fadel/pytorch_ema/blob/master/torch_ema/ema.py#L14 +class EMAModel: + """ + Exponential Moving Average of models weights + """ + + def __init__( + self, + parameters: Iterable[torch.nn.Parameter], + decay: float = 0.9999, + min_decay: float = 0.0, + update_after_step: int = 0, + use_ema_warmup: bool = False, + inv_gamma: Union[float, int] = 1.0, + power: Union[float, int] = 2 / 3, + foreach: bool = False, + model_cls: Optional[Any] = None, + model_config: Dict[str, Any] = None, + **kwargs, + ): + """ + Args: + parameters (Iterable[torch.nn.Parameter]): The parameters to track. + decay (float): The decay factor for the exponential moving average. + min_decay (float): The minimum decay factor for the exponential moving average. + update_after_step (int): The number of steps to wait before starting to update the EMA weights. + use_ema_warmup (bool): Whether to use EMA warmup. + inv_gamma (float): + Inverse multiplicative factor of EMA warmup. Default: 1. Only used if `use_ema_warmup` is True. + power (float): Exponential factor of EMA warmup. Default: 2/3. Only used if `use_ema_warmup` is True. + foreach (bool): Use torch._foreach functions for updating shadow parameters. Should be faster. + device (Optional[Union[str, torch.device]]): The device to store the EMA weights on. If None, the EMA + weights will be stored on CPU. + + @crowsonkb's notes on EMA Warmup: + If gamma=1 and power=1, implements a simple average. gamma=1, power=2/3 are good values for models you plan + to train for a million or more steps (reaches decay factor 0.999 at 31.6K steps, 0.9999 at 1M steps), + gamma=1, power=3/4 for models you plan to train for less (reaches decay factor 0.999 at 10K steps, 0.9999 + at 215.4k steps). + """ + + if isinstance(parameters, torch.nn.Module): + deprecation_message = ( + "Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. " + "Please pass the parameters of the module instead." + ) + deprecate( + "passing a `torch.nn.Module` to `ExponentialMovingAverage`", + "1.0.0", + deprecation_message, + standard_warn=False, + ) + parameters = parameters.parameters() + + # set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility + use_ema_warmup = True + + if kwargs.get("max_value", None) is not None: + deprecation_message = "The `max_value` argument is deprecated. Please use `decay` instead." + deprecate("max_value", "1.0.0", deprecation_message, standard_warn=False) + decay = kwargs["max_value"] + + if kwargs.get("min_value", None) is not None: + deprecation_message = "The `min_value` argument is deprecated. Please use `min_decay` instead." + deprecate("min_value", "1.0.0", deprecation_message, standard_warn=False) + min_decay = kwargs["min_value"] + + parameters = list(parameters) + self.shadow_params = [p.clone().detach() for p in parameters] + + if kwargs.get("device", None) is not None: + deprecation_message = "The `device` argument is deprecated. Please use `to` instead." + deprecate("device", "1.0.0", deprecation_message, standard_warn=False) + self.to(device=kwargs["device"]) + + self.temp_stored_params = None + + self.decay = decay + self.min_decay = min_decay + self.update_after_step = update_after_step + self.use_ema_warmup = use_ema_warmup + self.inv_gamma = inv_gamma + self.power = power + self.optimization_step = 0 + self.cur_decay_value = None # set in `step()` + self.foreach = foreach + + self.model_cls = model_cls + self.model_config = model_config + + @classmethod + def from_pretrained(cls, path, model_cls, foreach=False) -> "EMAModel": + _, ema_kwargs = model_cls.load_config(path, return_unused_kwargs=True) + model = model_cls.from_pretrained(path) + + ema_model = cls(model.parameters(), model_cls=model_cls, model_config=model.config, foreach=foreach) + + ema_model.load_state_dict(ema_kwargs) + return ema_model + + def save_pretrained(self, path): + if self.model_cls is None: + raise ValueError("`save_pretrained` can only be used if `model_cls` was defined at __init__.") + + if self.model_config is None: + raise ValueError("`save_pretrained` can only be used if `model_config` was defined at __init__.") + + model = self.model_cls.from_config(self.model_config) + state_dict = self.state_dict() + state_dict.pop("shadow_params", None) + + model.register_to_config(**state_dict) + self.copy_to(model.parameters()) + model.save_pretrained(path) + + def get_decay(self, optimization_step: int) -> float: + """ + Compute the decay factor for the exponential moving average. + """ + step = max(0, optimization_step - self.update_after_step - 1) + + if step <= 0: + return 0.0 + + if self.use_ema_warmup: + cur_decay_value = 1 - (1 + step / self.inv_gamma) ** -self.power + else: + cur_decay_value = (1 + step) / (10 + step) + + cur_decay_value = min(cur_decay_value, self.decay) + # make sure decay is not smaller than min_decay + cur_decay_value = max(cur_decay_value, self.min_decay) + return cur_decay_value + + @torch.no_grad() + def step(self, parameters: Iterable[torch.nn.Parameter]): + if isinstance(parameters, torch.nn.Module): + deprecation_message = ( + "Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. " + "Please pass the parameters of the module instead." + ) + deprecate( + "passing a `torch.nn.Module` to `ExponentialMovingAverage.step`", + "1.0.0", + deprecation_message, + standard_warn=False, + ) + parameters = parameters.parameters() + + parameters = list(parameters) + + self.optimization_step += 1 + + # Compute the decay factor for the exponential moving average. + decay = self.get_decay(self.optimization_step) + self.cur_decay_value = decay + one_minus_decay = 1 - decay + + context_manager = contextlib.nullcontext + if is_transformers_available() and transformers.integrations.deepspeed.is_deepspeed_zero3_enabled(): + import deepspeed + + if self.foreach: + if is_transformers_available() and transformers.integrations.deepspeed.is_deepspeed_zero3_enabled(): + context_manager = deepspeed.zero.GatheredParameters(parameters, modifier_rank=None) + + with context_manager(): + params_grad = [param for param in parameters if param.requires_grad] + s_params_grad = [ + s_param for s_param, param in zip(self.shadow_params, parameters) if param.requires_grad + ] + + if len(params_grad) < len(parameters): + torch._foreach_copy_( + [s_param for s_param, param in zip(self.shadow_params, parameters) if not param.requires_grad], + [param for param in parameters if not param.requires_grad], + non_blocking=True, + ) + + torch._foreach_sub_( + s_params_grad, torch._foreach_sub(s_params_grad, params_grad), alpha=one_minus_decay + ) + + else: + for s_param, param in zip(self.shadow_params, parameters): + if is_transformers_available() and transformers.integrations.deepspeed.is_deepspeed_zero3_enabled(): + context_manager = deepspeed.zero.GatheredParameters(param, modifier_rank=None) + + with context_manager(): + if param.requires_grad: + s_param.sub_(one_minus_decay * (s_param - param)) + else: + s_param.copy_(param) + + def copy_to(self, parameters: Iterable[torch.nn.Parameter]) -> None: + """ + Copy current averaged parameters into given collection of parameters. + + Args: + parameters: Iterable of `torch.nn.Parameter`; the parameters to be + updated with the stored moving averages. If `None`, the parameters with which this + `ExponentialMovingAverage` was initialized will be used. + """ + parameters = list(parameters) + if self.foreach: + torch._foreach_copy_( + [param.data for param in parameters], + [s_param.to(param.device).data for s_param, param in zip(self.shadow_params, parameters)], + ) + else: + for s_param, param in zip(self.shadow_params, parameters): + param.data.copy_(s_param.to(param.device).data) + + def pin_memory(self) -> None: + r""" + Move internal buffers of the ExponentialMovingAverage to pinned memory. Useful for non-blocking transfers for + offloading EMA params to the host. + """ + + self.shadow_params = [p.pin_memory() for p in self.shadow_params] + + def to(self, device=None, dtype=None, non_blocking=False) -> None: + r"""Move internal buffers of the ExponentialMovingAverage to `device`. + + Args: + device: like `device` argument to `torch.Tensor.to` + """ + # .to() on the tensors handles None correctly + self.shadow_params = [ + p.to(device=device, dtype=dtype, non_blocking=non_blocking) + if p.is_floating_point() + else p.to(device=device, non_blocking=non_blocking) + for p in self.shadow_params + ] + + def state_dict(self) -> dict: + r""" + Returns the state of the ExponentialMovingAverage as a dict. This method is used by accelerate during + checkpointing to save the ema state dict. + """ + # Following PyTorch conventions, references to tensors are returned: + # "returns a reference to the state and not its copy!" - + # https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict + return { + "decay": self.decay, + "min_decay": self.min_decay, + "optimization_step": self.optimization_step, + "update_after_step": self.update_after_step, + "use_ema_warmup": self.use_ema_warmup, + "inv_gamma": self.inv_gamma, + "power": self.power, + "shadow_params": self.shadow_params, + } + + def store(self, parameters: Iterable[torch.nn.Parameter]) -> None: + r""" + Args: + Save the current parameters for restoring later. + parameters: Iterable of `torch.nn.Parameter`; the parameters to be + temporarily stored. + """ + self.temp_stored_params = [param.detach().cpu().clone() for param in parameters] + + def restore(self, parameters: Iterable[torch.nn.Parameter]) -> None: + r""" + Args: + Restore the parameters stored with the `store` method. Useful to validate the model with EMA parameters without: + affecting the original optimization process. Store the parameters before the `copy_to()` method. After + validation (or model saving), use this to restore the former parameters. + parameters: Iterable of `torch.nn.Parameter`; the parameters to be + updated with the stored parameters. If `None`, the parameters with which this + `ExponentialMovingAverage` was initialized will be used. + """ + if self.temp_stored_params is None: + raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`") + if self.foreach: + torch._foreach_copy_( + [param.data for param in parameters], [c_param.data for c_param in self.temp_stored_params] + ) + else: + for c_param, param in zip(self.temp_stored_params, parameters): + param.data.copy_(c_param.data) + + # Better memory-wise. + self.temp_stored_params = None + + def load_state_dict(self, state_dict: dict) -> None: + r""" + Args: + Loads the ExponentialMovingAverage state. This method is used by accelerate during checkpointing to save the + ema state dict. + state_dict (dict): EMA state. Should be an object returned + from a call to :meth:`state_dict`. + """ + # deepcopy, to be consistent with module API + state_dict = copy.deepcopy(state_dict) + + self.decay = state_dict.get("decay", self.decay) + if self.decay < 0.0 or self.decay > 1.0: + raise ValueError("Decay must be between 0 and 1") + + self.min_decay = state_dict.get("min_decay", self.min_decay) + if not isinstance(self.min_decay, float): + raise ValueError("Invalid min_decay") + + self.optimization_step = state_dict.get("optimization_step", self.optimization_step) + if not isinstance(self.optimization_step, int): + raise ValueError("Invalid optimization_step") + + self.update_after_step = state_dict.get("update_after_step", self.update_after_step) + if not isinstance(self.update_after_step, int): + raise ValueError("Invalid update_after_step") + + self.use_ema_warmup = state_dict.get("use_ema_warmup", self.use_ema_warmup) + if not isinstance(self.use_ema_warmup, bool): + raise ValueError("Invalid use_ema_warmup") + + self.inv_gamma = state_dict.get("inv_gamma", self.inv_gamma) + if not isinstance(self.inv_gamma, (float, int)): + raise ValueError("Invalid inv_gamma") + + self.power = state_dict.get("power", self.power) + if not isinstance(self.power, (float, int)): + raise ValueError("Invalid power") + + shadow_params = state_dict.get("shadow_params", None) + if shadow_params is not None: + self.shadow_params = shadow_params + if not isinstance(self.shadow_params, list): + raise ValueError("shadow_params must be a list") + if not all(isinstance(p, torch.Tensor) for p in self.shadow_params): + raise ValueError("shadow_params must all be Tensors") diff --git a/diffusers3/utils/.ipynb_checkpoints/__init__-checkpoint.py b/diffusers3/utils/.ipynb_checkpoints/__init__-checkpoint.py new file mode 100644 index 0000000000000000000000000000000000000000..c7ea2bcc5b7ff86679965dd73d62f3618c2ed096 --- /dev/null +++ b/diffusers3/utils/.ipynb_checkpoints/__init__-checkpoint.py @@ -0,0 +1,134 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import os + +from packaging import version + +from .. import __version__ +from .constants import ( + CONFIG_NAME, + DEPRECATED_REVISION_ARGS, + DIFFUSERS_DYNAMIC_MODULE_NAME, + FLAX_WEIGHTS_NAME, + HF_MODULES_CACHE, + HUGGINGFACE_CO_RESOLVE_ENDPOINT, + MIN_PEFT_VERSION, + ONNX_EXTERNAL_WEIGHTS_NAME, + ONNX_WEIGHTS_NAME, + SAFE_WEIGHTS_INDEX_NAME, + SAFETENSORS_FILE_EXTENSION, + SAFETENSORS_WEIGHTS_NAME, + USE_PEFT_BACKEND, + WEIGHTS_INDEX_NAME, + WEIGHTS_NAME, +) +from .deprecation_utils import deprecate +from .doc_utils import replace_example_docstring +from .dynamic_modules_utils import get_class_from_dynamic_module +from .export_utils import export_to_gif, export_to_obj, export_to_ply, export_to_video +from .hub_utils import ( + PushToHubMixin, + _add_variant, + _get_checkpoint_shard_files, + _get_model_file, + extract_commit_hash, + http_user_agent, +) +from .import_utils import ( + BACKENDS_MAPPING, + DIFFUSERS_SLOW_IMPORT, + ENV_VARS_TRUE_AND_AUTO_VALUES, + ENV_VARS_TRUE_VALUES, + USE_JAX, + USE_TF, + USE_TORCH, + DummyObject, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_accelerate_available, + is_accelerate_version, + is_bitsandbytes_available, + is_bs4_available, + is_flax_available, + is_ftfy_available, + is_google_colab, + is_inflect_available, + is_invisible_watermark_available, + is_k_diffusion_available, + is_k_diffusion_version, + is_librosa_available, + is_matplotlib_available, + is_note_seq_available, + is_onnx_available, + is_peft_available, + is_peft_version, + is_safetensors_available, + is_scipy_available, + is_sentencepiece_available, + is_tensorboard_available, + is_timm_available, + is_torch_available, + is_torch_npu_available, + is_torch_version, + is_torch_xla_available, + is_torchsde_available, + is_torchvision_available, + is_transformers_available, + is_transformers_version, + is_unidecode_available, + is_wandb_available, + is_xformers_available, + requires_backends, +) +from .loading_utils import load_image, load_video +from .logging import get_logger +from .outputs import BaseOutput +from .peft_utils import ( + check_peft_version, + delete_adapter_layers, + get_adapter_name, + get_peft_kwargs, + recurse_remove_peft_layers, + scale_lora_layers, + set_adapter_layers, + set_weights_and_activate_adapters, + unscale_lora_layers, +) +from .pil_utils import PIL_INTERPOLATION, make_image_grid, numpy_to_pil, pt_to_pil +from .state_dict_utils import ( + convert_all_state_dict_to_peft, + convert_state_dict_to_diffusers, + convert_state_dict_to_kohya, + convert_state_dict_to_peft, + convert_unet_state_dict_to_peft, +) + + +logger = get_logger(__name__) + + +def check_min_version(min_version): + if version.parse(__version__) < version.parse(min_version): + if "dev" in min_version: + error_message = ( + "This example requires a source install from HuggingFace diffusers (see " + "`https://huggingface.co/docs/diffusers/installation#install-from-source`)," + ) + else: + error_message = f"This example requires a minimum version of {min_version}," + error_message += f" but the version found is {__version__}.\n" + raise ImportError(error_message) diff --git a/diffusers3/utils/__init__.py b/diffusers3/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c7ea2bcc5b7ff86679965dd73d62f3618c2ed096 --- /dev/null +++ b/diffusers3/utils/__init__.py @@ -0,0 +1,134 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import os + +from packaging import version + +from .. import __version__ +from .constants import ( + CONFIG_NAME, + DEPRECATED_REVISION_ARGS, + DIFFUSERS_DYNAMIC_MODULE_NAME, + FLAX_WEIGHTS_NAME, + HF_MODULES_CACHE, + HUGGINGFACE_CO_RESOLVE_ENDPOINT, + MIN_PEFT_VERSION, + ONNX_EXTERNAL_WEIGHTS_NAME, + ONNX_WEIGHTS_NAME, + SAFE_WEIGHTS_INDEX_NAME, + SAFETENSORS_FILE_EXTENSION, + SAFETENSORS_WEIGHTS_NAME, + USE_PEFT_BACKEND, + WEIGHTS_INDEX_NAME, + WEIGHTS_NAME, +) +from .deprecation_utils import deprecate +from .doc_utils import replace_example_docstring +from .dynamic_modules_utils import get_class_from_dynamic_module +from .export_utils import export_to_gif, export_to_obj, export_to_ply, export_to_video +from .hub_utils import ( + PushToHubMixin, + _add_variant, + _get_checkpoint_shard_files, + _get_model_file, + extract_commit_hash, + http_user_agent, +) +from .import_utils import ( + BACKENDS_MAPPING, + DIFFUSERS_SLOW_IMPORT, + ENV_VARS_TRUE_AND_AUTO_VALUES, + ENV_VARS_TRUE_VALUES, + USE_JAX, + USE_TF, + USE_TORCH, + DummyObject, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_accelerate_available, + is_accelerate_version, + is_bitsandbytes_available, + is_bs4_available, + is_flax_available, + is_ftfy_available, + is_google_colab, + is_inflect_available, + is_invisible_watermark_available, + is_k_diffusion_available, + is_k_diffusion_version, + is_librosa_available, + is_matplotlib_available, + is_note_seq_available, + is_onnx_available, + is_peft_available, + is_peft_version, + is_safetensors_available, + is_scipy_available, + is_sentencepiece_available, + is_tensorboard_available, + is_timm_available, + is_torch_available, + is_torch_npu_available, + is_torch_version, + is_torch_xla_available, + is_torchsde_available, + is_torchvision_available, + is_transformers_available, + is_transformers_version, + is_unidecode_available, + is_wandb_available, + is_xformers_available, + requires_backends, +) +from .loading_utils import load_image, load_video +from .logging import get_logger +from .outputs import BaseOutput +from .peft_utils import ( + check_peft_version, + delete_adapter_layers, + get_adapter_name, + get_peft_kwargs, + recurse_remove_peft_layers, + scale_lora_layers, + set_adapter_layers, + set_weights_and_activate_adapters, + unscale_lora_layers, +) +from .pil_utils import PIL_INTERPOLATION, make_image_grid, numpy_to_pil, pt_to_pil +from .state_dict_utils import ( + convert_all_state_dict_to_peft, + convert_state_dict_to_diffusers, + convert_state_dict_to_kohya, + convert_state_dict_to_peft, + convert_unet_state_dict_to_peft, +) + + +logger = get_logger(__name__) + + +def check_min_version(min_version): + if version.parse(__version__) < version.parse(min_version): + if "dev" in min_version: + error_message = ( + "This example requires a source install from HuggingFace diffusers (see " + "`https://huggingface.co/docs/diffusers/installation#install-from-source`)," + ) + else: + error_message = f"This example requires a minimum version of {min_version}," + error_message += f" but the version found is {__version__}.\n" + raise ImportError(error_message) diff --git a/diffusers3/utils/__pycache__/__init__.cpython-310.pyc b/diffusers3/utils/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ae8833e1cc84b133ae3c9d2e8303ff3a9014c0fb Binary files /dev/null and b/diffusers3/utils/__pycache__/__init__.cpython-310.pyc differ diff --git a/diffusers3/utils/__pycache__/__init__.cpython-38.pyc b/diffusers3/utils/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..10ef6a925e175de8340de010c90f3259e25306e3 Binary files /dev/null and b/diffusers3/utils/__pycache__/__init__.cpython-38.pyc differ diff --git a/diffusers3/utils/__pycache__/accelerate_utils.cpython-310.pyc b/diffusers3/utils/__pycache__/accelerate_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cd1dd3674323f4eb996c231a201bac58e58cba5a Binary files /dev/null and b/diffusers3/utils/__pycache__/accelerate_utils.cpython-310.pyc differ diff --git a/diffusers3/utils/__pycache__/accelerate_utils.cpython-38.pyc b/diffusers3/utils/__pycache__/accelerate_utils.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1a2a968302b419ae939343de7b047cce06ea5dcc Binary files /dev/null and b/diffusers3/utils/__pycache__/accelerate_utils.cpython-38.pyc differ diff --git a/diffusers3/utils/__pycache__/constants.cpython-310.pyc b/diffusers3/utils/__pycache__/constants.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c3e124ddf739a0dc8ce0dc403ed5dda1f140b396 Binary files /dev/null and b/diffusers3/utils/__pycache__/constants.cpython-310.pyc differ diff --git a/diffusers3/utils/__pycache__/constants.cpython-38.pyc b/diffusers3/utils/__pycache__/constants.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0f4860bcf9e99c1fecc727be7c0c8a6e4ca8d006 Binary files /dev/null and b/diffusers3/utils/__pycache__/constants.cpython-38.pyc differ diff --git a/diffusers3/utils/__pycache__/deprecation_utils.cpython-310.pyc b/diffusers3/utils/__pycache__/deprecation_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9100190dd31ca3185db715b9b112cb460a315c1f Binary files /dev/null and b/diffusers3/utils/__pycache__/deprecation_utils.cpython-310.pyc differ diff --git a/diffusers3/utils/__pycache__/deprecation_utils.cpython-38.pyc b/diffusers3/utils/__pycache__/deprecation_utils.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ac74144639939a1e640bdd71bed27aaad8eda5a5 Binary files /dev/null and b/diffusers3/utils/__pycache__/deprecation_utils.cpython-38.pyc differ diff --git a/diffusers3/utils/__pycache__/doc_utils.cpython-310.pyc b/diffusers3/utils/__pycache__/doc_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..278e206e5f2f76e194dd3fac84c004a473c0e77c Binary files /dev/null and b/diffusers3/utils/__pycache__/doc_utils.cpython-310.pyc differ diff --git a/diffusers3/utils/__pycache__/doc_utils.cpython-38.pyc b/diffusers3/utils/__pycache__/doc_utils.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5dbe45bed797b15f365a6c8150a3ac329f32c485 Binary files /dev/null and b/diffusers3/utils/__pycache__/doc_utils.cpython-38.pyc differ diff --git a/diffusers3/utils/__pycache__/dummy_flax_and_transformers_objects.cpython-310.pyc b/diffusers3/utils/__pycache__/dummy_flax_and_transformers_objects.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..86ce5fcc03a6d072867ac127e233c635d53f1dbb Binary files /dev/null and b/diffusers3/utils/__pycache__/dummy_flax_and_transformers_objects.cpython-310.pyc differ diff --git a/diffusers3/utils/__pycache__/dummy_flax_and_transformers_objects.cpython-38.pyc b/diffusers3/utils/__pycache__/dummy_flax_and_transformers_objects.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7d954be867299f255c8b5d1fd45d98d8f19ab791 Binary files /dev/null and b/diffusers3/utils/__pycache__/dummy_flax_and_transformers_objects.cpython-38.pyc differ diff --git a/diffusers3/utils/__pycache__/dummy_flax_objects.cpython-310.pyc b/diffusers3/utils/__pycache__/dummy_flax_objects.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7b2ee29825598706fe94b0582cefa0d7af31d278 Binary files /dev/null and b/diffusers3/utils/__pycache__/dummy_flax_objects.cpython-310.pyc differ diff --git a/diffusers3/utils/__pycache__/dummy_flax_objects.cpython-38.pyc b/diffusers3/utils/__pycache__/dummy_flax_objects.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a1cfae469b1693c87e2df0d5410e73b0a1d93efc Binary files /dev/null and b/diffusers3/utils/__pycache__/dummy_flax_objects.cpython-38.pyc differ diff --git a/diffusers3/utils/__pycache__/dummy_note_seq_objects.cpython-310.pyc b/diffusers3/utils/__pycache__/dummy_note_seq_objects.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8f95a27b0d0b782ce09ff24103f06dfd7de3611c Binary files /dev/null and b/diffusers3/utils/__pycache__/dummy_note_seq_objects.cpython-310.pyc differ diff --git a/diffusers3/utils/__pycache__/dummy_note_seq_objects.cpython-38.pyc b/diffusers3/utils/__pycache__/dummy_note_seq_objects.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..50d92209790a2f07ff2c1549f21251b6a9a56c18 Binary files /dev/null and b/diffusers3/utils/__pycache__/dummy_note_seq_objects.cpython-38.pyc differ diff --git a/diffusers3/utils/__pycache__/dummy_onnx_objects.cpython-310.pyc b/diffusers3/utils/__pycache__/dummy_onnx_objects.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..103ed08e604d43e8b089beb378220ffae11a210a Binary files /dev/null and b/diffusers3/utils/__pycache__/dummy_onnx_objects.cpython-310.pyc differ diff --git a/diffusers3/utils/__pycache__/dummy_onnx_objects.cpython-38.pyc b/diffusers3/utils/__pycache__/dummy_onnx_objects.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c056182b7f1fac96e53c84b7a4fea708f99378cc Binary files /dev/null and b/diffusers3/utils/__pycache__/dummy_onnx_objects.cpython-38.pyc differ diff --git a/diffusers3/utils/__pycache__/dummy_torch_and_librosa_objects.cpython-310.pyc b/diffusers3/utils/__pycache__/dummy_torch_and_librosa_objects.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..907300930d465a36d4dbdaa26138b17b5e1db413 Binary files /dev/null and b/diffusers3/utils/__pycache__/dummy_torch_and_librosa_objects.cpython-310.pyc differ diff --git a/diffusers3/utils/__pycache__/dummy_torch_and_librosa_objects.cpython-38.pyc b/diffusers3/utils/__pycache__/dummy_torch_and_librosa_objects.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c16992108ba56e7ddc352419de2baf8d65145e46 Binary files /dev/null and b/diffusers3/utils/__pycache__/dummy_torch_and_librosa_objects.cpython-38.pyc differ diff --git a/diffusers3/utils/__pycache__/dummy_torch_and_scipy_objects.cpython-310.pyc b/diffusers3/utils/__pycache__/dummy_torch_and_scipy_objects.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d886f42e9ac750b51c12e68310e11b9f856b7ec4 Binary files /dev/null and b/diffusers3/utils/__pycache__/dummy_torch_and_scipy_objects.cpython-310.pyc differ diff --git a/diffusers3/utils/__pycache__/dummy_torch_and_torchsde_objects.cpython-310.pyc b/diffusers3/utils/__pycache__/dummy_torch_and_torchsde_objects.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..88a780796e5cf2c3d2663b691c346e6c50ec3646 Binary files /dev/null and b/diffusers3/utils/__pycache__/dummy_torch_and_torchsde_objects.cpython-310.pyc differ diff --git a/diffusers3/utils/__pycache__/dummy_torch_and_torchsde_objects.cpython-38.pyc b/diffusers3/utils/__pycache__/dummy_torch_and_torchsde_objects.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8fafcb6a23e12b3744ef81790b6863db57cb7c2b Binary files /dev/null and b/diffusers3/utils/__pycache__/dummy_torch_and_torchsde_objects.cpython-38.pyc differ diff --git a/diffusers3/utils/__pycache__/dummy_torch_and_transformers_and_k_diffusion_objects.cpython-310.pyc b/diffusers3/utils/__pycache__/dummy_torch_and_transformers_and_k_diffusion_objects.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b9d480716191284251df4fa151b05312ee6bff49 Binary files /dev/null and b/diffusers3/utils/__pycache__/dummy_torch_and_transformers_and_k_diffusion_objects.cpython-310.pyc differ diff --git a/diffusers3/utils/__pycache__/dummy_torch_and_transformers_and_k_diffusion_objects.cpython-38.pyc b/diffusers3/utils/__pycache__/dummy_torch_and_transformers_and_k_diffusion_objects.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5e221703b958be660fabdcb8b0bfd135d3cc5d53 Binary files /dev/null and b/diffusers3/utils/__pycache__/dummy_torch_and_transformers_and_k_diffusion_objects.cpython-38.pyc differ diff --git a/diffusers3/utils/__pycache__/dummy_torch_and_transformers_and_onnx_objects.cpython-310.pyc b/diffusers3/utils/__pycache__/dummy_torch_and_transformers_and_onnx_objects.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..389266678d22ed0d6b61a9b9085b8ea3486607ab Binary files /dev/null and b/diffusers3/utils/__pycache__/dummy_torch_and_transformers_and_onnx_objects.cpython-310.pyc differ diff --git a/diffusers3/utils/__pycache__/dummy_torch_and_transformers_and_onnx_objects.cpython-38.pyc b/diffusers3/utils/__pycache__/dummy_torch_and_transformers_and_onnx_objects.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1b76778a9e661e6e6d9683a45131a52636c4f138 Binary files /dev/null and b/diffusers3/utils/__pycache__/dummy_torch_and_transformers_and_onnx_objects.cpython-38.pyc differ diff --git a/diffusers3/utils/__pycache__/dummy_torch_and_transformers_and_sentencepiece_objects.cpython-310.pyc b/diffusers3/utils/__pycache__/dummy_torch_and_transformers_and_sentencepiece_objects.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..39dfe38959939872ec8b2eea1918f35ccc1afa09 Binary files /dev/null and b/diffusers3/utils/__pycache__/dummy_torch_and_transformers_and_sentencepiece_objects.cpython-310.pyc differ diff --git a/diffusers3/utils/__pycache__/dummy_torch_and_transformers_and_sentencepiece_objects.cpython-38.pyc b/diffusers3/utils/__pycache__/dummy_torch_and_transformers_and_sentencepiece_objects.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7814e63cb488ff8681c2d7c5e4235eb16045ec6f Binary files /dev/null and b/diffusers3/utils/__pycache__/dummy_torch_and_transformers_and_sentencepiece_objects.cpython-38.pyc differ diff --git a/diffusers3/utils/__pycache__/dummy_torch_and_transformers_objects.cpython-310.pyc b/diffusers3/utils/__pycache__/dummy_torch_and_transformers_objects.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..02f7bb924d9fdd248875c123da5b60d473621d44 Binary files /dev/null and b/diffusers3/utils/__pycache__/dummy_torch_and_transformers_objects.cpython-310.pyc differ diff --git a/diffusers3/utils/__pycache__/dummy_transformers_and_torch_and_note_seq_objects.cpython-310.pyc b/diffusers3/utils/__pycache__/dummy_transformers_and_torch_and_note_seq_objects.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5e27b3851bacf4421ce831e6e1f0baf0d6e9ee38 Binary files /dev/null and b/diffusers3/utils/__pycache__/dummy_transformers_and_torch_and_note_seq_objects.cpython-310.pyc differ diff --git a/diffusers3/utils/__pycache__/dummy_transformers_and_torch_and_note_seq_objects.cpython-38.pyc b/diffusers3/utils/__pycache__/dummy_transformers_and_torch_and_note_seq_objects.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..57a2bf72e1dec8afcb86932c4c12499f73f1c620 Binary files /dev/null and b/diffusers3/utils/__pycache__/dummy_transformers_and_torch_and_note_seq_objects.cpython-38.pyc differ diff --git a/diffusers3/utils/__pycache__/dynamic_modules_utils.cpython-310.pyc b/diffusers3/utils/__pycache__/dynamic_modules_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ea14712422a986235c2297229db92e2aa56b4fb3 Binary files /dev/null and b/diffusers3/utils/__pycache__/dynamic_modules_utils.cpython-310.pyc differ diff --git a/diffusers3/utils/__pycache__/dynamic_modules_utils.cpython-38.pyc b/diffusers3/utils/__pycache__/dynamic_modules_utils.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2ee8f938e21f9b8a637492c8f684b4b58491ea81 Binary files /dev/null and b/diffusers3/utils/__pycache__/dynamic_modules_utils.cpython-38.pyc differ diff --git a/diffusers3/utils/__pycache__/export_utils.cpython-310.pyc b/diffusers3/utils/__pycache__/export_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c0250e6bc12ae2ecf5e3cf3a1b81b65ecfb9fb35 Binary files /dev/null and b/diffusers3/utils/__pycache__/export_utils.cpython-310.pyc differ diff --git a/diffusers3/utils/__pycache__/export_utils.cpython-38.pyc b/diffusers3/utils/__pycache__/export_utils.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c0e01774a7ac1018d8e4d788ff3140641d934785 Binary files /dev/null and b/diffusers3/utils/__pycache__/export_utils.cpython-38.pyc differ diff --git a/diffusers3/utils/__pycache__/hub_utils.cpython-310.pyc b/diffusers3/utils/__pycache__/hub_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4e9370d778332ce00ae5f40e0b1db6e0f1d9b53d Binary files /dev/null and b/diffusers3/utils/__pycache__/hub_utils.cpython-310.pyc differ diff --git a/diffusers3/utils/__pycache__/hub_utils.cpython-38.pyc b/diffusers3/utils/__pycache__/hub_utils.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ab2dd989628e80ff9926c217942e9ec0b0868987 Binary files /dev/null and b/diffusers3/utils/__pycache__/hub_utils.cpython-38.pyc differ diff --git a/diffusers3/utils/__pycache__/import_utils.cpython-310.pyc b/diffusers3/utils/__pycache__/import_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..23069e7c422c93063437fb602c595f6832255e99 Binary files /dev/null and b/diffusers3/utils/__pycache__/import_utils.cpython-310.pyc differ diff --git a/diffusers3/utils/__pycache__/import_utils.cpython-38.pyc b/diffusers3/utils/__pycache__/import_utils.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..84911e9dd7c9d702c3dc95fd111a4d492a2aac42 Binary files /dev/null and b/diffusers3/utils/__pycache__/import_utils.cpython-38.pyc differ diff --git a/diffusers3/utils/__pycache__/loading_utils.cpython-310.pyc b/diffusers3/utils/__pycache__/loading_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..22cdcb9d19a4b34d72b0188d98840a881d136391 Binary files /dev/null and b/diffusers3/utils/__pycache__/loading_utils.cpython-310.pyc differ diff --git a/diffusers3/utils/__pycache__/loading_utils.cpython-38.pyc b/diffusers3/utils/__pycache__/loading_utils.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0fa0c3b1a947aded69486ad0f71347bcd798e174 Binary files /dev/null and b/diffusers3/utils/__pycache__/loading_utils.cpython-38.pyc differ diff --git a/diffusers3/utils/__pycache__/logging.cpython-310.pyc b/diffusers3/utils/__pycache__/logging.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5dfdae105389fc370e6d971b7be3ab5a9cc1e179 Binary files /dev/null and b/diffusers3/utils/__pycache__/logging.cpython-310.pyc differ diff --git a/diffusers3/utils/__pycache__/logging.cpython-38.pyc b/diffusers3/utils/__pycache__/logging.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3df5b54f5db6ddc4ee0db3302947333784a5ea01 Binary files /dev/null and b/diffusers3/utils/__pycache__/logging.cpython-38.pyc differ diff --git a/diffusers3/utils/__pycache__/outputs.cpython-310.pyc b/diffusers3/utils/__pycache__/outputs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..32a599e1d6f60eeb8e0718f7c4bfff6b74c1a9f9 Binary files /dev/null and b/diffusers3/utils/__pycache__/outputs.cpython-310.pyc differ diff --git a/diffusers3/utils/__pycache__/outputs.cpython-38.pyc b/diffusers3/utils/__pycache__/outputs.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..65098bc2ae855b3b1cbea77cdeb5cef077ab18f7 Binary files /dev/null and b/diffusers3/utils/__pycache__/outputs.cpython-38.pyc differ diff --git a/diffusers3/utils/__pycache__/peft_utils.cpython-310.pyc b/diffusers3/utils/__pycache__/peft_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..79293b135feafa4b9811c5362d28ae5ab936a2ca Binary files /dev/null and b/diffusers3/utils/__pycache__/peft_utils.cpython-310.pyc differ diff --git a/diffusers3/utils/__pycache__/peft_utils.cpython-38.pyc b/diffusers3/utils/__pycache__/peft_utils.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cdbe63509f244ef194b5daacb003d4be9a5a9b4c Binary files /dev/null and b/diffusers3/utils/__pycache__/peft_utils.cpython-38.pyc differ diff --git a/diffusers3/utils/__pycache__/pil_utils.cpython-310.pyc b/diffusers3/utils/__pycache__/pil_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..22a08822653f980bac070d4e7fd8cdb15940cf23 Binary files /dev/null and b/diffusers3/utils/__pycache__/pil_utils.cpython-310.pyc differ diff --git a/diffusers3/utils/__pycache__/pil_utils.cpython-38.pyc b/diffusers3/utils/__pycache__/pil_utils.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f22a7bc95ddd4cf133d452c6be3156ad5bfda849 Binary files /dev/null and b/diffusers3/utils/__pycache__/pil_utils.cpython-38.pyc differ diff --git a/diffusers3/utils/__pycache__/state_dict_utils.cpython-310.pyc b/diffusers3/utils/__pycache__/state_dict_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..106a8080e92fd122906199a2112fa56666d41932 Binary files /dev/null and b/diffusers3/utils/__pycache__/state_dict_utils.cpython-310.pyc differ diff --git a/diffusers3/utils/__pycache__/state_dict_utils.cpython-38.pyc b/diffusers3/utils/__pycache__/state_dict_utils.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..14487d42bdddc6b1feb617a69e97c5dcf1117d3d Binary files /dev/null and b/diffusers3/utils/__pycache__/state_dict_utils.cpython-38.pyc differ diff --git a/diffusers3/utils/__pycache__/torch_utils.cpython-310.pyc b/diffusers3/utils/__pycache__/torch_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9ce15fc219c5c67a18f2a8c6ac25216dfd2b9594 Binary files /dev/null and b/diffusers3/utils/__pycache__/torch_utils.cpython-310.pyc differ diff --git a/diffusers3/utils/__pycache__/torch_utils.cpython-38.pyc b/diffusers3/utils/__pycache__/torch_utils.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4a8a399f01a4ec6db73e6a3be5d47dc3d40bdd09 Binary files /dev/null and b/diffusers3/utils/__pycache__/torch_utils.cpython-38.pyc differ diff --git a/diffusers3/utils/__pycache__/versions.cpython-310.pyc b/diffusers3/utils/__pycache__/versions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..908834290ce9e376fcb35366d81ddc438bc193c4 Binary files /dev/null and b/diffusers3/utils/__pycache__/versions.cpython-310.pyc differ diff --git a/diffusers3/utils/__pycache__/versions.cpython-38.pyc b/diffusers3/utils/__pycache__/versions.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8d66426385b04bb870116fcd42acb97c57a325af Binary files /dev/null and b/diffusers3/utils/__pycache__/versions.cpython-38.pyc differ diff --git a/diffusers3/utils/accelerate_utils.py b/diffusers3/utils/accelerate_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..99a8b3a47c256cc80dbd5de7d14101dcc7a9ff9a --- /dev/null +++ b/diffusers3/utils/accelerate_utils.py @@ -0,0 +1,48 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Accelerate utilities: Utilities related to accelerate +""" + +from packaging import version + +from .import_utils import is_accelerate_available + + +if is_accelerate_available(): + import accelerate + + +def apply_forward_hook(method): + """ + Decorator that applies a registered CpuOffload hook to an arbitrary function rather than `forward`. This is useful + for cases where a PyTorch module provides functions other than `forward` that should trigger a move to the + appropriate acceleration device. This is the case for `encode` and `decode` in [`AutoencoderKL`]. + + This decorator looks inside the internal `_hf_hook` property to find a registered offload hook. + + :param method: The method to decorate. This method should be a method of a PyTorch module. + """ + if not is_accelerate_available(): + return method + accelerate_version = version.parse(accelerate.__version__).base_version + if version.parse(accelerate_version) < version.parse("0.17.0"): + return method + + def wrapper(self, *args, **kwargs): + if hasattr(self, "_hf_hook") and hasattr(self._hf_hook, "pre_forward"): + self._hf_hook.pre_forward(self) + return method(self, *args, **kwargs) + + return wrapper diff --git a/diffusers3/utils/constants.py b/diffusers3/utils/constants.py new file mode 100644 index 0000000000000000000000000000000000000000..553ac5d1bb2794cc480e08790ad3577664c66ce3 --- /dev/null +++ b/diffusers3/utils/constants.py @@ -0,0 +1,57 @@ +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import importlib +import os + +from huggingface_hub.constants import HF_HOME +from packaging import version + +from ..dependency_versions_check import dep_version_check +from .import_utils import ENV_VARS_TRUE_VALUES, is_peft_available, is_transformers_available + + +MIN_PEFT_VERSION = "0.6.0" +MIN_TRANSFORMERS_VERSION = "4.34.0" +_CHECK_PEFT = os.environ.get("_CHECK_PEFT", "1") in ENV_VARS_TRUE_VALUES + + +CONFIG_NAME = "config.json" +WEIGHTS_NAME = "diffusion_pytorch_model.bin" +WEIGHTS_INDEX_NAME = "diffusion_pytorch_model.bin.index.json" +FLAX_WEIGHTS_NAME = "diffusion_flax_model.msgpack" +ONNX_WEIGHTS_NAME = "model.onnx" +SAFETENSORS_WEIGHTS_NAME = "diffusion_pytorch_model.safetensors" +SAFE_WEIGHTS_INDEX_NAME = "diffusion_pytorch_model.safetensors.index.json" +SAFETENSORS_FILE_EXTENSION = "safetensors" +ONNX_EXTERNAL_WEIGHTS_NAME = "weights.pb" +HUGGINGFACE_CO_RESOLVE_ENDPOINT = os.environ.get("HF_ENDPOINT", "https://huggingface.co") +DIFFUSERS_DYNAMIC_MODULE_NAME = "diffusers_modules" +HF_MODULES_CACHE = os.getenv("HF_MODULES_CACHE", os.path.join(HF_HOME, "modules")) +DEPRECATED_REVISION_ARGS = ["fp16", "non-ema"] + +# Below should be `True` if the current version of `peft` and `transformers` are compatible with +# PEFT backend. Will automatically fall back to PEFT backend if the correct versions of the libraries are +# available. +# For PEFT it is has to be greater than or equal to 0.6.0 and for transformers it has to be greater than or equal to 4.34.0. +_required_peft_version = is_peft_available() and version.parse( + version.parse(importlib.metadata.version("peft")).base_version +) >= version.parse(MIN_PEFT_VERSION) +_required_transformers_version = is_transformers_available() and version.parse( + version.parse(importlib.metadata.version("transformers")).base_version +) >= version.parse(MIN_TRANSFORMERS_VERSION) + +USE_PEFT_BACKEND = _required_peft_version and _required_transformers_version + +if USE_PEFT_BACKEND and _CHECK_PEFT: + dep_version_check("peft") diff --git a/diffusers3/utils/deprecation_utils.py b/diffusers3/utils/deprecation_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..f482deddd2f46b8d2e29d5229faa0e9a21f2fd98 --- /dev/null +++ b/diffusers3/utils/deprecation_utils.py @@ -0,0 +1,49 @@ +import inspect +import warnings +from typing import Any, Dict, Optional, Union + +from packaging import version + + +def deprecate(*args, take_from: Optional[Union[Dict, Any]] = None, standard_warn=True, stacklevel=2): + from .. import __version__ + + deprecated_kwargs = take_from + values = () + if not isinstance(args[0], tuple): + args = (args,) + + for attribute, version_name, message in args: + if version.parse(version.parse(__version__).base_version) >= version.parse(version_name): + raise ValueError( + f"The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'" + f" version {__version__} is >= {version_name}" + ) + + warning = None + if isinstance(deprecated_kwargs, dict) and attribute in deprecated_kwargs: + values += (deprecated_kwargs.pop(attribute),) + warning = f"The `{attribute}` argument is deprecated and will be removed in version {version_name}." + elif hasattr(deprecated_kwargs, attribute): + values += (getattr(deprecated_kwargs, attribute),) + warning = f"The `{attribute}` attribute is deprecated and will be removed in version {version_name}." + elif deprecated_kwargs is None: + warning = f"`{attribute}` is deprecated and will be removed in version {version_name}." + + if warning is not None: + warning = warning + " " if standard_warn else "" + warnings.warn(warning + message, FutureWarning, stacklevel=stacklevel) + + if isinstance(deprecated_kwargs, dict) and len(deprecated_kwargs) > 0: + call_frame = inspect.getouterframes(inspect.currentframe())[1] + filename = call_frame.filename + line_number = call_frame.lineno + function = call_frame.function + key, value = next(iter(deprecated_kwargs.items())) + raise TypeError(f"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`") + + if len(values) == 0: + return + elif len(values) == 1: + return values[0] + return values diff --git a/diffusers3/utils/doc_utils.py b/diffusers3/utils/doc_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..fe633e6836422f22618548b3f6d946d6b073650f --- /dev/null +++ b/diffusers3/utils/doc_utils.py @@ -0,0 +1,39 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Doc utilities: Utilities related to documentation +""" + +import re + + +def replace_example_docstring(example_docstring): + def docstring_decorator(fn): + func_doc = fn.__doc__ + lines = func_doc.split("\n") + i = 0 + while i < len(lines) and re.search(r"^\s*Examples?:\s*$", lines[i]) is None: + i += 1 + if i < len(lines): + lines[i] = example_docstring + func_doc = "\n".join(lines) + else: + raise ValueError( + f"The function {fn} should have an empty 'Examples:' in its docstring as placeholder, " + f"current docstring is:\n{func_doc}" + ) + fn.__doc__ = func_doc + return fn + + return docstring_decorator diff --git a/diffusers3/utils/dummy_flax_and_transformers_objects.py b/diffusers3/utils/dummy_flax_and_transformers_objects.py new file mode 100644 index 0000000000000000000000000000000000000000..5e65e5349bb0a6a0bac62cddf0ce0fad64237c68 --- /dev/null +++ b/diffusers3/utils/dummy_flax_and_transformers_objects.py @@ -0,0 +1,77 @@ +# This file is autogenerated by the command `make fix-copies`, do not edit. +from ..utils import DummyObject, requires_backends + + +class FlaxStableDiffusionControlNetPipeline(metaclass=DummyObject): + _backends = ["flax", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["flax", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["flax", "transformers"]) + + +class FlaxStableDiffusionImg2ImgPipeline(metaclass=DummyObject): + _backends = ["flax", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["flax", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["flax", "transformers"]) + + +class FlaxStableDiffusionInpaintPipeline(metaclass=DummyObject): + _backends = ["flax", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["flax", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["flax", "transformers"]) + + +class FlaxStableDiffusionPipeline(metaclass=DummyObject): + _backends = ["flax", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["flax", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["flax", "transformers"]) + + +class FlaxStableDiffusionXLPipeline(metaclass=DummyObject): + _backends = ["flax", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["flax", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["flax", "transformers"]) diff --git a/diffusers3/utils/dummy_flax_objects.py b/diffusers3/utils/dummy_flax_objects.py new file mode 100644 index 0000000000000000000000000000000000000000..5fa8dbc819316e96f7483addba43f90b9d8f397b --- /dev/null +++ b/diffusers3/utils/dummy_flax_objects.py @@ -0,0 +1,212 @@ +# This file is autogenerated by the command `make fix-copies`, do not edit. +from ..utils import DummyObject, requires_backends + + +class FlaxControlNetModel(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["flax"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["flax"]) + + +class FlaxModelMixin(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["flax"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["flax"]) + + +class FlaxUNet2DConditionModel(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["flax"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["flax"]) + + +class FlaxAutoencoderKL(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["flax"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["flax"]) + + +class FlaxDiffusionPipeline(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["flax"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["flax"]) + + +class FlaxDDIMScheduler(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["flax"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["flax"]) + + +class FlaxDDPMScheduler(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["flax"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["flax"]) + + +class FlaxDPMSolverMultistepScheduler(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["flax"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["flax"]) + + +class FlaxEulerDiscreteScheduler(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["flax"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["flax"]) + + +class FlaxKarrasVeScheduler(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["flax"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["flax"]) + + +class FlaxLMSDiscreteScheduler(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["flax"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["flax"]) + + +class FlaxPNDMScheduler(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["flax"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["flax"]) + + +class FlaxSchedulerMixin(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["flax"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["flax"]) + + +class FlaxScoreSdeVeScheduler(metaclass=DummyObject): + _backends = ["flax"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["flax"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["flax"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["flax"]) diff --git a/diffusers3/utils/dummy_note_seq_objects.py b/diffusers3/utils/dummy_note_seq_objects.py new file mode 100644 index 0000000000000000000000000000000000000000..c02d0b015aedc37c01fb3b843bc79547aae5da68 --- /dev/null +++ b/diffusers3/utils/dummy_note_seq_objects.py @@ -0,0 +1,17 @@ +# This file is autogenerated by the command `make fix-copies`, do not edit. +from ..utils import DummyObject, requires_backends + + +class MidiProcessor(metaclass=DummyObject): + _backends = ["note_seq"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["note_seq"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["note_seq"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["note_seq"]) diff --git a/diffusers3/utils/dummy_onnx_objects.py b/diffusers3/utils/dummy_onnx_objects.py new file mode 100644 index 0000000000000000000000000000000000000000..bde5f6ad0793e2d81bc638600b46ff81748d09ee --- /dev/null +++ b/diffusers3/utils/dummy_onnx_objects.py @@ -0,0 +1,17 @@ +# This file is autogenerated by the command `make fix-copies`, do not edit. +from ..utils import DummyObject, requires_backends + + +class OnnxRuntimeModel(metaclass=DummyObject): + _backends = ["onnx"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["onnx"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["onnx"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["onnx"]) diff --git a/diffusers3/utils/dummy_pt_objects.py b/diffusers3/utils/dummy_pt_objects.py new file mode 100644 index 0000000000000000000000000000000000000000..1ab946ce725737a087eaf936f6b3d8bce55d8a56 --- /dev/null +++ b/diffusers3/utils/dummy_pt_objects.py @@ -0,0 +1,1545 @@ +# This file is autogenerated by the command `make fix-copies`, do not edit. +from ..utils import DummyObject, requires_backends + + +class AsymmetricAutoencoderKL(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class AuraFlowTransformer2DModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class AutoencoderKL(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class AutoencoderKLCogVideoX(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class AutoencoderKLTemporalDecoder(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class AutoencoderOobleck(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class AutoencoderTiny(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class CogVideoXTransformer3DModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class ConsistencyDecoderVAE(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class ControlNetModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class ControlNetXSAdapter(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class DiTTransformer2DModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class FluxControlNetModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class FluxMultiControlNetModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class FluxTransformer2DModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class HunyuanDiT2DControlNetModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class HunyuanDiT2DModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class HunyuanDiT2DMultiControlNetModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class I2VGenXLUNet(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class Kandinsky3UNet(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class LatteTransformer3DModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class LuminaNextDiT2DModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class ModelMixin(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class MotionAdapter(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class MultiAdapter(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class PixArtTransformer2DModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class PriorTransformer(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class SD3ControlNetModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class SD3MultiControlNetModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class SD3Transformer2DModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class SparseControlNetModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class StableAudioDiTModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class T2IAdapter(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class T5FilmDecoder(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class Transformer2DModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class UNet1DModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class UNet2DConditionModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class UNet2DModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class UNet3DConditionModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class UNetControlNetXSModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class UNetMotionModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class UNetSpatioTemporalConditionModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class UVit2DModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class VQModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +def get_constant_schedule(*args, **kwargs): + requires_backends(get_constant_schedule, ["torch"]) + + +def get_constant_schedule_with_warmup(*args, **kwargs): + requires_backends(get_constant_schedule_with_warmup, ["torch"]) + + +def get_cosine_schedule_with_warmup(*args, **kwargs): + requires_backends(get_cosine_schedule_with_warmup, ["torch"]) + + +def get_cosine_with_hard_restarts_schedule_with_warmup(*args, **kwargs): + requires_backends(get_cosine_with_hard_restarts_schedule_with_warmup, ["torch"]) + + +def get_linear_schedule_with_warmup(*args, **kwargs): + requires_backends(get_linear_schedule_with_warmup, ["torch"]) + + +def get_polynomial_decay_schedule_with_warmup(*args, **kwargs): + requires_backends(get_polynomial_decay_schedule_with_warmup, ["torch"]) + + +def get_scheduler(*args, **kwargs): + requires_backends(get_scheduler, ["torch"]) + + +class AudioPipelineOutput(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class AutoPipelineForImage2Image(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class AutoPipelineForInpainting(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class AutoPipelineForText2Image(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class BlipDiffusionControlNetPipeline(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class BlipDiffusionPipeline(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class CLIPImageProjection(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class ConsistencyModelPipeline(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class DanceDiffusionPipeline(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class DDIMPipeline(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class DDPMPipeline(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class DiffusionPipeline(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class DiTPipeline(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class ImagePipelineOutput(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class KarrasVePipeline(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class LDMPipeline(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class LDMSuperResolutionPipeline(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class PNDMPipeline(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class RePaintPipeline(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class ScoreSdeVePipeline(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class StableDiffusionMixin(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class AmusedScheduler(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class CMStochasticIterativeScheduler(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class CogVideoXDDIMScheduler(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class CogVideoXDPMScheduler(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class DDIMInverseScheduler(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class DDIMParallelScheduler(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class DDIMScheduler(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class DDPMParallelScheduler(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class DDPMScheduler(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class DDPMWuerstchenScheduler(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class DEISMultistepScheduler(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class DPMSolverMultistepInverseScheduler(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class DPMSolverMultistepScheduler(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class DPMSolverSinglestepScheduler(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class EDMDPMSolverMultistepScheduler(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class EDMEulerScheduler(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class EulerAncestralDiscreteScheduler(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class EulerDiscreteScheduler(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class FlowMatchEulerDiscreteScheduler(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class FlowMatchHeunDiscreteScheduler(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class HeunDiscreteScheduler(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class IPNDMScheduler(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class KarrasVeScheduler(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class KDPM2AncestralDiscreteScheduler(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class KDPM2DiscreteScheduler(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class LCMScheduler(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class PNDMScheduler(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class RePaintScheduler(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class SASolverScheduler(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class SchedulerMixin(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class ScoreSdeVeScheduler(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class TCDScheduler(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class UnCLIPScheduler(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class UniPCMultistepScheduler(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class VQDiffusionScheduler(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class EMAModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) diff --git a/diffusers3/utils/dummy_torch_and_librosa_objects.py b/diffusers3/utils/dummy_torch_and_librosa_objects.py new file mode 100644 index 0000000000000000000000000000000000000000..2088bc4a744198284f22fe54e6f1055cf3568566 --- /dev/null +++ b/diffusers3/utils/dummy_torch_and_librosa_objects.py @@ -0,0 +1,32 @@ +# This file is autogenerated by the command `make fix-copies`, do not edit. +from ..utils import DummyObject, requires_backends + + +class AudioDiffusionPipeline(metaclass=DummyObject): + _backends = ["torch", "librosa"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "librosa"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "librosa"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "librosa"]) + + +class Mel(metaclass=DummyObject): + _backends = ["torch", "librosa"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "librosa"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "librosa"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "librosa"]) diff --git a/diffusers3/utils/dummy_torch_and_scipy_objects.py b/diffusers3/utils/dummy_torch_and_scipy_objects.py new file mode 100644 index 0000000000000000000000000000000000000000..a1ff25863822b04971d2c6dfdc17f5b28774cf05 --- /dev/null +++ b/diffusers3/utils/dummy_torch_and_scipy_objects.py @@ -0,0 +1,17 @@ +# This file is autogenerated by the command `make fix-copies`, do not edit. +from ..utils import DummyObject, requires_backends + + +class LMSDiscreteScheduler(metaclass=DummyObject): + _backends = ["torch", "scipy"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "scipy"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "scipy"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "scipy"]) diff --git a/diffusers3/utils/dummy_torch_and_torchsde_objects.py b/diffusers3/utils/dummy_torch_and_torchsde_objects.py new file mode 100644 index 0000000000000000000000000000000000000000..6ff14231b9cc8c88effc4d11f7b439fa16bb7f20 --- /dev/null +++ b/diffusers3/utils/dummy_torch_and_torchsde_objects.py @@ -0,0 +1,32 @@ +# This file is autogenerated by the command `make fix-copies`, do not edit. +from ..utils import DummyObject, requires_backends + + +class CosineDPMSolverMultistepScheduler(metaclass=DummyObject): + _backends = ["torch", "torchsde"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "torchsde"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "torchsde"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "torchsde"]) + + +class DPMSolverSDEScheduler(metaclass=DummyObject): + _backends = ["torch", "torchsde"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "torchsde"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "torchsde"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "torchsde"]) diff --git a/diffusers3/utils/dummy_torch_and_transformers_and_k_diffusion_objects.py b/diffusers3/utils/dummy_torch_and_transformers_and_k_diffusion_objects.py new file mode 100644 index 0000000000000000000000000000000000000000..2ab00c54ce2d7ca78f674085f0fe294d1f05c1e0 --- /dev/null +++ b/diffusers3/utils/dummy_torch_and_transformers_and_k_diffusion_objects.py @@ -0,0 +1,32 @@ +# This file is autogenerated by the command `make fix-copies`, do not edit. +from ..utils import DummyObject, requires_backends + + +class StableDiffusionKDiffusionPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers", "k_diffusion"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers", "k_diffusion"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers", "k_diffusion"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers", "k_diffusion"]) + + +class StableDiffusionXLKDiffusionPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers", "k_diffusion"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers", "k_diffusion"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers", "k_diffusion"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers", "k_diffusion"]) diff --git a/diffusers3/utils/dummy_torch_and_transformers_and_onnx_objects.py b/diffusers3/utils/dummy_torch_and_transformers_and_onnx_objects.py new file mode 100644 index 0000000000000000000000000000000000000000..b7afad8226b87292100270e3e7daad6885be0e7f --- /dev/null +++ b/diffusers3/utils/dummy_torch_and_transformers_and_onnx_objects.py @@ -0,0 +1,92 @@ +# This file is autogenerated by the command `make fix-copies`, do not edit. +from ..utils import DummyObject, requires_backends + + +class OnnxStableDiffusionImg2ImgPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers", "onnx"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers", "onnx"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers", "onnx"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers", "onnx"]) + + +class OnnxStableDiffusionInpaintPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers", "onnx"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers", "onnx"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers", "onnx"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers", "onnx"]) + + +class OnnxStableDiffusionInpaintPipelineLegacy(metaclass=DummyObject): + _backends = ["torch", "transformers", "onnx"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers", "onnx"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers", "onnx"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers", "onnx"]) + + +class OnnxStableDiffusionPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers", "onnx"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers", "onnx"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers", "onnx"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers", "onnx"]) + + +class OnnxStableDiffusionUpscalePipeline(metaclass=DummyObject): + _backends = ["torch", "transformers", "onnx"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers", "onnx"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers", "onnx"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers", "onnx"]) + + +class StableDiffusionOnnxPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers", "onnx"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers", "onnx"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers", "onnx"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers", "onnx"]) diff --git a/diffusers3/utils/dummy_torch_and_transformers_and_sentencepiece_objects.py b/diffusers3/utils/dummy_torch_and_transformers_and_sentencepiece_objects.py new file mode 100644 index 0000000000000000000000000000000000000000..349db10d1fa5b8505173e1578d3c83ea17e95fbc --- /dev/null +++ b/diffusers3/utils/dummy_torch_and_transformers_and_sentencepiece_objects.py @@ -0,0 +1,47 @@ +# This file is autogenerated by the command `make fix-copies`, do not edit. +from ..utils import DummyObject, requires_backends + + +class KolorsImg2ImgPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers", "sentencepiece"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers", "sentencepiece"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers", "sentencepiece"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers", "sentencepiece"]) + + +class KolorsPAGPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers", "sentencepiece"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers", "sentencepiece"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers", "sentencepiece"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers", "sentencepiece"]) + + +class KolorsPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers", "sentencepiece"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers", "sentencepiece"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers", "sentencepiece"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers", "sentencepiece"]) diff --git a/diffusers3/utils/dummy_torch_and_transformers_objects.py b/diffusers3/utils/dummy_torch_and_transformers_objects.py new file mode 100644 index 0000000000000000000000000000000000000000..732488721598d53130d1c1c3b35bed1d1544b27c --- /dev/null +++ b/diffusers3/utils/dummy_torch_and_transformers_objects.py @@ -0,0 +1,2162 @@ +# This file is autogenerated by the command `make fix-copies`, do not edit. +from ..utils import DummyObject, requires_backends + + +class AltDiffusionImg2ImgPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class AltDiffusionPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class AmusedImg2ImgPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class AmusedInpaintPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class AmusedPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class AnimateDiffControlNetPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class AnimateDiffPAGPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class AnimateDiffPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class AnimateDiffSDXLPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class AnimateDiffSparseControlNetPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class AnimateDiffVideoToVideoControlNetPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class AnimateDiffVideoToVideoPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class AudioLDM2Pipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class AudioLDM2ProjectionModel(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class AudioLDM2UNet2DConditionModel(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class AudioLDMPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class AuraFlowPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class CLIPImageProjection(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class CogVideoXPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class CogVideoXVideoToVideoPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class CycleDiffusionPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class FluxControlNetPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class FluxImg2ImgPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class FluxInpaintPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class FluxPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class HunyuanDiTControlNetPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class HunyuanDiTPAGPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class HunyuanDiTPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class I2VGenXLPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class IFImg2ImgPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class IFImg2ImgSuperResolutionPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class IFInpaintingPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class IFInpaintingSuperResolutionPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class IFPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class IFSuperResolutionPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class ImageTextPipelineOutput(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class Kandinsky3Img2ImgPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class Kandinsky3Pipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class KandinskyCombinedPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class KandinskyImg2ImgCombinedPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class KandinskyImg2ImgPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class KandinskyInpaintCombinedPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class KandinskyInpaintPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class KandinskyPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class KandinskyPriorPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class KandinskyV22CombinedPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class KandinskyV22ControlnetImg2ImgPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class KandinskyV22ControlnetPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class KandinskyV22Img2ImgCombinedPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class KandinskyV22Img2ImgPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class KandinskyV22InpaintCombinedPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class KandinskyV22InpaintPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class KandinskyV22Pipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class KandinskyV22PriorEmb2EmbPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class KandinskyV22PriorPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class LatentConsistencyModelImg2ImgPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class LatentConsistencyModelPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class LattePipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class LDMTextToImagePipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class LEditsPPPipelineStableDiffusion(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class LEditsPPPipelineStableDiffusionXL(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class LuminaText2ImgPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class MarigoldDepthPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class MarigoldNormalsPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class MusicLDMPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class PaintByExamplePipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class PIAPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class PixArtAlphaPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class PixArtSigmaPAGPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class PixArtSigmaPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class SemanticStableDiffusionPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class ShapEImg2ImgPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class ShapEPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableAudioPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableAudioProjectionModel(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableCascadeCombinedPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableCascadeDecoderPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableCascadePriorPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableDiffusion3ControlNetPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableDiffusion3Img2ImgPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableDiffusion3InpaintPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableDiffusion3PAGPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableDiffusion3Pipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableDiffusionAdapterPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableDiffusionAttendAndExcitePipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableDiffusionControlNetImg2ImgPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableDiffusionControlNetInpaintPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableDiffusionControlNetPAGPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableDiffusionControlNetPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableDiffusionControlNetXSPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableDiffusionDepth2ImgPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableDiffusionDiffEditPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableDiffusionGLIGENPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableDiffusionGLIGENTextImagePipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableDiffusionImageVariationPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableDiffusionImg2ImgPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableDiffusionInpaintPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableDiffusionInpaintPipelineLegacy(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableDiffusionInstructPix2PixPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableDiffusionLatentUpscalePipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableDiffusionLDM3DPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableDiffusionModelEditingPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableDiffusionPAGPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableDiffusionPanoramaPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableDiffusionParadigmsPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableDiffusionPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableDiffusionPipelineSafe(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableDiffusionPix2PixZeroPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableDiffusionSAGPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableDiffusionUpscalePipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableDiffusionXLAdapterPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableDiffusionXLControlNetImg2ImgPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableDiffusionXLControlNetInpaintPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableDiffusionXLControlNetPAGImg2ImgPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableDiffusionXLControlNetPAGPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableDiffusionXLControlNetPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableDiffusionXLControlNetXSPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableDiffusionXLImg2ImgPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableDiffusionXLInpaintPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableDiffusionXLInstructPix2PixPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableDiffusionXLPAGImg2ImgPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableDiffusionXLPAGInpaintPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableDiffusionXLPAGPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableDiffusionXLPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableUnCLIPImg2ImgPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableUnCLIPPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class StableVideoDiffusionPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class TextToVideoSDPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class TextToVideoZeroPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class TextToVideoZeroSDXLPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class UnCLIPImageVariationPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class UnCLIPPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class UniDiffuserModel(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class UniDiffuserPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class UniDiffuserTextDecoder(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class VersatileDiffusionDualGuidedPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class VersatileDiffusionImageVariationPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class VersatileDiffusionPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class VersatileDiffusionTextToImagePipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class VideoToVideoSDPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class VQDiffusionPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class WuerstchenCombinedPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class WuerstchenDecoderPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + +class WuerstchenPriorPipeline(metaclass=DummyObject): + _backends = ["torch", "transformers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch", "transformers"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch", "transformers"]) diff --git a/diffusers3/utils/dummy_transformers_and_torch_and_note_seq_objects.py b/diffusers3/utils/dummy_transformers_and_torch_and_note_seq_objects.py new file mode 100644 index 0000000000000000000000000000000000000000..fbde04e33f0abd86d12f3dee048a4f0585c9f19d --- /dev/null +++ b/diffusers3/utils/dummy_transformers_and_torch_and_note_seq_objects.py @@ -0,0 +1,17 @@ +# This file is autogenerated by the command `make fix-copies`, do not edit. +from ..utils import DummyObject, requires_backends + + +class SpectrogramDiffusionPipeline(metaclass=DummyObject): + _backends = ["transformers", "torch", "note_seq"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["transformers", "torch", "note_seq"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["transformers", "torch", "note_seq"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["transformers", "torch", "note_seq"]) diff --git a/diffusers3/utils/dynamic_modules_utils.py b/diffusers3/utils/dynamic_modules_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..f0cf953924ad8e6d93f96a318e7ccca038a36999 --- /dev/null +++ b/diffusers3/utils/dynamic_modules_utils.py @@ -0,0 +1,457 @@ +# coding=utf-8 +# Copyright 2024 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Utilities to dynamically load objects from the Hub.""" + +import importlib +import inspect +import json +import os +import re +import shutil +import sys +from pathlib import Path +from typing import Dict, Optional, Union +from urllib import request + +from huggingface_hub import hf_hub_download, model_info +from huggingface_hub.utils import RevisionNotFoundError, validate_hf_hub_args +from packaging import version + +from .. import __version__ +from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +# See https://huggingface.co/datasets/diffusers/community-pipelines-mirror +COMMUNITY_PIPELINES_MIRROR_ID = "diffusers/community-pipelines-mirror" + + +def get_diffusers_versions(): + url = "https://pypi.org/pypi/diffusers/json" + releases = json.loads(request.urlopen(url).read())["releases"].keys() + return sorted(releases, key=lambda x: version.Version(x)) + + +def init_hf_modules(): + """ + Creates the cache directory for modules with an init, and adds it to the Python path. + """ + # This function has already been executed if HF_MODULES_CACHE already is in the Python path. + if HF_MODULES_CACHE in sys.path: + return + + sys.path.append(HF_MODULES_CACHE) + os.makedirs(HF_MODULES_CACHE, exist_ok=True) + init_path = Path(HF_MODULES_CACHE) / "__init__.py" + if not init_path.exists(): + init_path.touch() + + +def create_dynamic_module(name: Union[str, os.PathLike]): + """ + Creates a dynamic module in the cache directory for modules. + """ + init_hf_modules() + dynamic_module_path = Path(HF_MODULES_CACHE) / name + # If the parent module does not exist yet, recursively create it. + if not dynamic_module_path.parent.exists(): + create_dynamic_module(dynamic_module_path.parent) + os.makedirs(dynamic_module_path, exist_ok=True) + init_path = dynamic_module_path / "__init__.py" + if not init_path.exists(): + init_path.touch() + + +def get_relative_imports(module_file): + """ + Get the list of modules that are relatively imported in a module file. + + Args: + module_file (`str` or `os.PathLike`): The module file to inspect. + """ + with open(module_file, "r", encoding="utf-8") as f: + content = f.read() + + # Imports of the form `import .xxx` + relative_imports = re.findall(r"^\s*import\s+\.(\S+)\s*$", content, flags=re.MULTILINE) + # Imports of the form `from .xxx import yyy` + relative_imports += re.findall(r"^\s*from\s+\.(\S+)\s+import", content, flags=re.MULTILINE) + # Unique-ify + return list(set(relative_imports)) + + +def get_relative_import_files(module_file): + """ + Get the list of all files that are needed for a given module. Note that this function recurses through the relative + imports (if a imports b and b imports c, it will return module files for b and c). + + Args: + module_file (`str` or `os.PathLike`): The module file to inspect. + """ + no_change = False + files_to_check = [module_file] + all_relative_imports = [] + + # Let's recurse through all relative imports + while not no_change: + new_imports = [] + for f in files_to_check: + new_imports.extend(get_relative_imports(f)) + + module_path = Path(module_file).parent + new_import_files = [str(module_path / m) for m in new_imports] + new_import_files = [f for f in new_import_files if f not in all_relative_imports] + files_to_check = [f"{f}.py" for f in new_import_files] + + no_change = len(new_import_files) == 0 + all_relative_imports.extend(files_to_check) + + return all_relative_imports + + +def check_imports(filename): + """ + Check if the current Python environment contains all the libraries that are imported in a file. + """ + with open(filename, "r", encoding="utf-8") as f: + content = f.read() + + # Imports of the form `import xxx` + imports = re.findall(r"^\s*import\s+(\S+)\s*$", content, flags=re.MULTILINE) + # Imports of the form `from xxx import yyy` + imports += re.findall(r"^\s*from\s+(\S+)\s+import", content, flags=re.MULTILINE) + # Only keep the top-level module + imports = [imp.split(".")[0] for imp in imports if not imp.startswith(".")] + + # Unique-ify and test we got them all + imports = list(set(imports)) + missing_packages = [] + for imp in imports: + try: + importlib.import_module(imp) + except ImportError: + missing_packages.append(imp) + + if len(missing_packages) > 0: + raise ImportError( + "This modeling file requires the following packages that were not found in your environment: " + f"{', '.join(missing_packages)}. Run `pip install {' '.join(missing_packages)}`" + ) + + return get_relative_imports(filename) + + +def get_class_in_module(class_name, module_path): + """ + Import a module on the cache directory for modules and extract a class from it. + """ + module_path = module_path.replace(os.path.sep, ".") + module = importlib.import_module(module_path) + + if class_name is None: + return find_pipeline_class(module) + return getattr(module, class_name) + + +def find_pipeline_class(loaded_module): + """ + Retrieve pipeline class that inherits from `DiffusionPipeline`. Note that there has to be exactly one class + inheriting from `DiffusionPipeline`. + """ + from ..pipelines import DiffusionPipeline + + cls_members = dict(inspect.getmembers(loaded_module, inspect.isclass)) + + pipeline_class = None + for cls_name, cls in cls_members.items(): + if ( + cls_name != DiffusionPipeline.__name__ + and issubclass(cls, DiffusionPipeline) + and cls.__module__.split(".")[0] != "diffusers" + ): + if pipeline_class is not None: + raise ValueError( + f"Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:" + f" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in" + f" {loaded_module}." + ) + pipeline_class = cls + + return pipeline_class + + +@validate_hf_hub_args +def get_cached_module_file( + pretrained_model_name_or_path: Union[str, os.PathLike], + module_file: str, + cache_dir: Optional[Union[str, os.PathLike]] = None, + force_download: bool = False, + proxies: Optional[Dict[str, str]] = None, + token: Optional[Union[bool, str]] = None, + revision: Optional[str] = None, + local_files_only: bool = False, +): + """ + Prepares Downloads a module from a local folder or a distant repo and returns its path inside the cached + Transformers module. + + Args: + pretrained_model_name_or_path (`str` or `os.PathLike`): + This can be either: + + - a string, the *model id* of a pretrained model configuration hosted inside a model repo on + huggingface.co. Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced + under a user or organization name, like `dbmdz/bert-base-german-cased`. + - a path to a *directory* containing a configuration file saved using the + [`~PreTrainedTokenizer.save_pretrained`] method, e.g., `./my_model_directory/`. + + module_file (`str`): + The name of the module file containing the class to look for. + cache_dir (`str` or `os.PathLike`, *optional*): + Path to a directory in which a downloaded pretrained model configuration should be cached if the standard + cache should not be used. + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force to (re-)download the configuration files and override the cached versions if they + exist. + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. + token (`str` or *bool*, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated + when running `transformers-cli login` (stored in `~/.huggingface`). + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a + git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any + identifier allowed by git. + local_files_only (`bool`, *optional*, defaults to `False`): + If `True`, will only try to load the tokenizer configuration from local files. + + + + You may pass a token in `token` if you are not logged in (`huggingface-cli login`) and want to use private or + [gated models](https://huggingface.co/docs/hub/models-gated#gated-models). + + + + Returns: + `str`: The path to the module inside the cache. + """ + # Download and cache module_file from the repo `pretrained_model_name_or_path` of grab it if it's a local file. + pretrained_model_name_or_path = str(pretrained_model_name_or_path) + + module_file_or_url = os.path.join(pretrained_model_name_or_path, module_file) + + if os.path.isfile(module_file_or_url): + resolved_module_file = module_file_or_url + submodule = "local" + elif pretrained_model_name_or_path.count("/") == 0: + available_versions = get_diffusers_versions() + # cut ".dev0" + latest_version = "v" + ".".join(__version__.split(".")[:3]) + + # retrieve github version that matches + if revision is None: + revision = latest_version if latest_version[1:] in available_versions else "main" + logger.info(f"Defaulting to latest_version: {revision}.") + elif revision in available_versions: + revision = f"v{revision}" + elif revision == "main": + revision = revision + else: + raise ValueError( + f"`custom_revision`: {revision} does not exist. Please make sure to choose one of" + f" {', '.join(available_versions + ['main'])}." + ) + + try: + resolved_module_file = hf_hub_download( + repo_id=COMMUNITY_PIPELINES_MIRROR_ID, + repo_type="dataset", + filename=f"{revision}/{pretrained_model_name_or_path}.py", + cache_dir=cache_dir, + force_download=force_download, + proxies=proxies, + local_files_only=local_files_only, + ) + submodule = "git" + module_file = pretrained_model_name_or_path + ".py" + except RevisionNotFoundError as e: + raise EnvironmentError( + f"Revision '{revision}' not found in the community pipelines mirror. Check available revisions on" + " https://huggingface.co/datasets/diffusers/community-pipelines-mirror/tree/main." + " If you don't find the revision you are looking for, please open an issue on https://github.com/huggingface/diffusers/issues." + ) from e + except EnvironmentError: + logger.error(f"Could not locate the {module_file} inside {pretrained_model_name_or_path}.") + raise + else: + try: + # Load from URL or cache if already cached + resolved_module_file = hf_hub_download( + pretrained_model_name_or_path, + module_file, + cache_dir=cache_dir, + force_download=force_download, + proxies=proxies, + local_files_only=local_files_only, + token=token, + ) + submodule = os.path.join("local", "--".join(pretrained_model_name_or_path.split("/"))) + except EnvironmentError: + logger.error(f"Could not locate the {module_file} inside {pretrained_model_name_or_path}.") + raise + + # Check we have all the requirements in our environment + modules_needed = check_imports(resolved_module_file) + + # Now we move the module inside our cached dynamic modules. + full_submodule = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule + create_dynamic_module(full_submodule) + submodule_path = Path(HF_MODULES_CACHE) / full_submodule + if submodule == "local" or submodule == "git": + # We always copy local files (we could hash the file to see if there was a change, and give them the name of + # that hash, to only copy when there is a modification but it seems overkill for now). + # The only reason we do the copy is to avoid putting too many folders in sys.path. + shutil.copy(resolved_module_file, submodule_path / module_file) + for module_needed in modules_needed: + if len(module_needed.split(".")) == 2: + module_needed = "/".join(module_needed.split(".")) + module_folder = module_needed.split("/")[0] + if not os.path.exists(submodule_path / module_folder): + os.makedirs(submodule_path / module_folder) + module_needed = f"{module_needed}.py" + shutil.copy(os.path.join(pretrained_model_name_or_path, module_needed), submodule_path / module_needed) + else: + # Get the commit hash + # TODO: we will get this info in the etag soon, so retrieve it from there and not here. + commit_hash = model_info(pretrained_model_name_or_path, revision=revision, token=token).sha + + # The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the + # benefit of versioning. + submodule_path = submodule_path / commit_hash + full_submodule = full_submodule + os.path.sep + commit_hash + create_dynamic_module(full_submodule) + + if not (submodule_path / module_file).exists(): + if len(module_file.split("/")) == 2: + module_folder = module_file.split("/")[0] + if not os.path.exists(submodule_path / module_folder): + os.makedirs(submodule_path / module_folder) + shutil.copy(resolved_module_file, submodule_path / module_file) + + # Make sure we also have every file with relative + for module_needed in modules_needed: + if len(module_needed.split(".")) == 2: + module_needed = "/".join(module_needed.split(".")) + if not (submodule_path / module_needed).exists(): + get_cached_module_file( + pretrained_model_name_or_path, + f"{module_needed}.py", + cache_dir=cache_dir, + force_download=force_download, + proxies=proxies, + token=token, + revision=revision, + local_files_only=local_files_only, + ) + return os.path.join(full_submodule, module_file) + + +@validate_hf_hub_args +def get_class_from_dynamic_module( + pretrained_model_name_or_path: Union[str, os.PathLike], + module_file: str, + class_name: Optional[str] = None, + cache_dir: Optional[Union[str, os.PathLike]] = None, + force_download: bool = False, + proxies: Optional[Dict[str, str]] = None, + token: Optional[Union[bool, str]] = None, + revision: Optional[str] = None, + local_files_only: bool = False, + **kwargs, +): + """ + Extracts a class from a module file, present in the local folder or repository of a model. + + + + Calling this function will execute the code in the module file found locally or downloaded from the Hub. It should + therefore only be called on trusted repos. + + + + Args: + pretrained_model_name_or_path (`str` or `os.PathLike`): + This can be either: + + - a string, the *model id* of a pretrained model configuration hosted inside a model repo on + huggingface.co. Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced + under a user or organization name, like `dbmdz/bert-base-german-cased`. + - a path to a *directory* containing a configuration file saved using the + [`~PreTrainedTokenizer.save_pretrained`] method, e.g., `./my_model_directory/`. + + module_file (`str`): + The name of the module file containing the class to look for. + class_name (`str`): + The name of the class to import in the module. + cache_dir (`str` or `os.PathLike`, *optional*): + Path to a directory in which a downloaded pretrained model configuration should be cached if the standard + cache should not be used. + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force to (re-)download the configuration files and override the cached versions if they + exist. + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. + token (`str` or `bool`, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated + when running `transformers-cli login` (stored in `~/.huggingface`). + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a + git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any + identifier allowed by git. + local_files_only (`bool`, *optional*, defaults to `False`): + If `True`, will only try to load the tokenizer configuration from local files. + + + + You may pass a token in `token` if you are not logged in (`huggingface-cli login`) and want to use private or + [gated models](https://huggingface.co/docs/hub/models-gated#gated-models). + + + + Returns: + `type`: The class, dynamically imported from the module. + + Examples: + + ```python + # Download module `modeling.py` from huggingface.co and cache then extract the class `MyBertModel` from this + # module. + cls = get_class_from_dynamic_module("sgugger/my-bert-model", "modeling.py", "MyBertModel") + ```""" + # And lastly we get the class inside our newly created module + final_module = get_cached_module_file( + pretrained_model_name_or_path, + module_file, + cache_dir=cache_dir, + force_download=force_download, + proxies=proxies, + token=token, + revision=revision, + local_files_only=local_files_only, + ) + return get_class_in_module(class_name, final_module.replace(".py", "")) diff --git a/diffusers3/utils/export_utils.py b/diffusers3/utils/export_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..00805433cebaa4a3898877da549f947a323b3ac7 --- /dev/null +++ b/diffusers3/utils/export_utils.py @@ -0,0 +1,184 @@ +import io +import random +import struct +import tempfile +from contextlib import contextmanager +from typing import List, Union + +import numpy as np +import PIL.Image +import PIL.ImageOps + +from .import_utils import BACKENDS_MAPPING, is_imageio_available, is_opencv_available +from .logging import get_logger + + +global_rng = random.Random() + +logger = get_logger(__name__) + + +@contextmanager +def buffered_writer(raw_f): + f = io.BufferedWriter(raw_f) + yield f + f.flush() + + +def export_to_gif(image: List[PIL.Image.Image], output_gif_path: str = None, fps: int = 10) -> str: + if output_gif_path is None: + output_gif_path = tempfile.NamedTemporaryFile(suffix=".gif").name + + image[0].save( + output_gif_path, + save_all=True, + append_images=image[1:], + optimize=False, + duration=1000 // fps, + loop=0, + ) + return output_gif_path + + +def export_to_ply(mesh, output_ply_path: str = None): + """ + Write a PLY file for a mesh. + """ + if output_ply_path is None: + output_ply_path = tempfile.NamedTemporaryFile(suffix=".ply").name + + coords = mesh.verts.detach().cpu().numpy() + faces = mesh.faces.cpu().numpy() + rgb = np.stack([mesh.vertex_channels[x].detach().cpu().numpy() for x in "RGB"], axis=1) + + with buffered_writer(open(output_ply_path, "wb")) as f: + f.write(b"ply\n") + f.write(b"format binary_little_endian 1.0\n") + f.write(bytes(f"element vertex {len(coords)}\n", "ascii")) + f.write(b"property float x\n") + f.write(b"property float y\n") + f.write(b"property float z\n") + if rgb is not None: + f.write(b"property uchar red\n") + f.write(b"property uchar green\n") + f.write(b"property uchar blue\n") + if faces is not None: + f.write(bytes(f"element face {len(faces)}\n", "ascii")) + f.write(b"property list uchar int vertex_index\n") + f.write(b"end_header\n") + + if rgb is not None: + rgb = (rgb * 255.499).round().astype(int) + vertices = [ + (*coord, *rgb) + for coord, rgb in zip( + coords.tolist(), + rgb.tolist(), + ) + ] + format = struct.Struct("<3f3B") + for item in vertices: + f.write(format.pack(*item)) + else: + format = struct.Struct("<3f") + for vertex in coords.tolist(): + f.write(format.pack(*vertex)) + + if faces is not None: + format = struct.Struct(" str: + # TODO: Dhruv. Remove by Diffusers release 0.33.0 + # Added to prevent breaking existing code + if not is_imageio_available(): + logger.warning( + ( + "It is recommended to use `export_to_video` with `imageio` and `imageio-ffmpeg` as a backend. \n" + "These libraries are not present in your environment. Attempting to use legacy OpenCV backend to export video. \n" + "Support for the OpenCV backend will be deprecated in a future Diffusers version" + ) + ) + return _legacy_export_to_video(video_frames, output_video_path, fps) + + if is_imageio_available(): + import imageio + else: + raise ImportError(BACKENDS_MAPPING["imageio"][1].format("export_to_video")) + + try: + imageio.plugins.ffmpeg.get_exe() + except AttributeError: + raise AttributeError( + ( + "Found an existing imageio backend in your environment. Attempting to export video with imageio. \n" + "Unable to find a compatible ffmpeg installation in your environment to use with imageio. Please install via `pip install imageio-ffmpeg" + ) + ) + + if output_video_path is None: + output_video_path = tempfile.NamedTemporaryFile(suffix=".mp4").name + + if isinstance(video_frames[0], np.ndarray): + video_frames = [(frame * 255).astype(np.uint8) for frame in video_frames] + + elif isinstance(video_frames[0], PIL.Image.Image): + video_frames = [np.array(frame) for frame in video_frames] + + with imageio.get_writer(output_video_path, fps=fps) as writer: + for frame in video_frames: + writer.append_data(frame) + + return output_video_path diff --git a/diffusers3/utils/hub_utils.py b/diffusers3/utils/hub_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..1cdc02e873285f6cc5d86232ec640b2217c16fbf --- /dev/null +++ b/diffusers3/utils/hub_utils.py @@ -0,0 +1,603 @@ +# coding=utf-8 +# Copyright 2024 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import json +import os +import re +import sys +import tempfile +import traceback +import warnings +from pathlib import Path +from typing import Dict, List, Optional, Union +from uuid import uuid4 + +from huggingface_hub import ( + ModelCard, + ModelCardData, + create_repo, + hf_hub_download, + model_info, + snapshot_download, + upload_folder, +) +from huggingface_hub.constants import HF_HUB_CACHE, HF_HUB_DISABLE_TELEMETRY, HF_HUB_OFFLINE +from huggingface_hub.file_download import REGEX_COMMIT_HASH +from huggingface_hub.utils import ( + EntryNotFoundError, + RepositoryNotFoundError, + RevisionNotFoundError, + is_jinja_available, + validate_hf_hub_args, +) +from packaging import version +from requests import HTTPError + +from .. import __version__ +from .constants import ( + DEPRECATED_REVISION_ARGS, + HUGGINGFACE_CO_RESOLVE_ENDPOINT, + SAFETENSORS_WEIGHTS_NAME, + WEIGHTS_NAME, +) +from .import_utils import ( + ENV_VARS_TRUE_VALUES, + _flax_version, + _jax_version, + _onnxruntime_version, + _torch_version, + is_flax_available, + is_onnx_available, + is_torch_available, +) +from .logging import get_logger + + +logger = get_logger(__name__) + +MODEL_CARD_TEMPLATE_PATH = Path(__file__).parent / "model_card_template.md" +SESSION_ID = uuid4().hex + + +def http_user_agent(user_agent: Union[Dict, str, None] = None) -> str: + """ + Formats a user-agent string with basic info about a request. + """ + ua = f"diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}" + if HF_HUB_DISABLE_TELEMETRY or HF_HUB_OFFLINE: + return ua + "; telemetry/off" + if is_torch_available(): + ua += f"; torch/{_torch_version}" + if is_flax_available(): + ua += f"; jax/{_jax_version}" + ua += f"; flax/{_flax_version}" + if is_onnx_available(): + ua += f"; onnxruntime/{_onnxruntime_version}" + # CI will set this value to True + if os.environ.get("DIFFUSERS_IS_CI", "").upper() in ENV_VARS_TRUE_VALUES: + ua += "; is_ci/true" + if isinstance(user_agent, dict): + ua += "; " + "; ".join(f"{k}/{v}" for k, v in user_agent.items()) + elif isinstance(user_agent, str): + ua += "; " + user_agent + return ua + + +def load_or_create_model_card( + repo_id_or_path: str = None, + token: Optional[str] = None, + is_pipeline: bool = False, + from_training: bool = False, + model_description: Optional[str] = None, + base_model: str = None, + prompt: Optional[str] = None, + license: Optional[str] = None, + widget: Optional[List[dict]] = None, + inference: Optional[bool] = None, +) -> ModelCard: + """ + Loads or creates a model card. + + Args: + repo_id_or_path (`str`): + The repo id (e.g., "runwayml/stable-diffusion-v1-5") or local path where to look for the model card. + token (`str`, *optional*): + Authentication token. Will default to the stored token. See https://huggingface.co/settings/token for more + details. + is_pipeline (`bool`): + Boolean to indicate if we're adding tag to a [`DiffusionPipeline`]. + from_training: (`bool`): Boolean flag to denote if the model card is being created from a training script. + model_description (`str`, *optional*): Model description to add to the model card. Helpful when using + `load_or_create_model_card` from a training script. + base_model (`str`): Base model identifier (e.g., "stabilityai/stable-diffusion-xl-base-1.0"). Useful + for DreamBooth-like training. + prompt (`str`, *optional*): Prompt used for training. Useful for DreamBooth-like training. + license: (`str`, *optional*): License of the output artifact. Helpful when using + `load_or_create_model_card` from a training script. + widget (`List[dict]`, *optional*): Widget to accompany a gallery template. + inference: (`bool`, optional): Whether to turn on inference widget. Helpful when using + `load_or_create_model_card` from a training script. + """ + if not is_jinja_available(): + raise ValueError( + "Modelcard rendering is based on Jinja templates." + " Please make sure to have `jinja` installed before using `load_or_create_model_card`." + " To install it, please run `pip install Jinja2`." + ) + + try: + # Check if the model card is present on the remote repo + model_card = ModelCard.load(repo_id_or_path, token=token) + except (EntryNotFoundError, RepositoryNotFoundError): + # Otherwise create a model card from template + if from_training: + model_card = ModelCard.from_template( + card_data=ModelCardData( # Card metadata object that will be converted to YAML block + license=license, + library_name="diffusers", + inference=inference, + base_model=base_model, + instance_prompt=prompt, + widget=widget, + ), + template_path=MODEL_CARD_TEMPLATE_PATH, + model_description=model_description, + ) + else: + card_data = ModelCardData() + component = "pipeline" if is_pipeline else "model" + if model_description is None: + model_description = f"This is the model card of a ๐Ÿงจ diffusers {component} that has been pushed on the Hub. This model card has been automatically generated." + model_card = ModelCard.from_template(card_data, model_description=model_description) + + return model_card + + +def populate_model_card(model_card: ModelCard, tags: Union[str, List[str]] = None) -> ModelCard: + """Populates the `model_card` with library name and optional tags.""" + if model_card.data.library_name is None: + model_card.data.library_name = "diffusers" + + if tags is not None: + if isinstance(tags, str): + tags = [tags] + if model_card.data.tags is None: + model_card.data.tags = [] + for tag in tags: + model_card.data.tags.append(tag) + + return model_card + + +def extract_commit_hash(resolved_file: Optional[str], commit_hash: Optional[str] = None): + """ + Extracts the commit hash from a resolved filename toward a cache file. + """ + if resolved_file is None or commit_hash is not None: + return commit_hash + resolved_file = str(Path(resolved_file).as_posix()) + search = re.search(r"snapshots/([^/]+)/", resolved_file) + if search is None: + return None + commit_hash = search.groups()[0] + return commit_hash if REGEX_COMMIT_HASH.match(commit_hash) else None + + +# Old default cache path, potentially to be migrated. +# This logic was more or less taken from `transformers`, with the following differences: +# - Diffusers doesn't use custom environment variables to specify the cache path. +# - There is no need to migrate the cache format, just move the files to the new location. +hf_cache_home = os.path.expanduser( + os.getenv("HF_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "huggingface")) +) +old_diffusers_cache = os.path.join(hf_cache_home, "diffusers") + + +def move_cache(old_cache_dir: Optional[str] = None, new_cache_dir: Optional[str] = None) -> None: + if new_cache_dir is None: + new_cache_dir = HF_HUB_CACHE + if old_cache_dir is None: + old_cache_dir = old_diffusers_cache + + old_cache_dir = Path(old_cache_dir).expanduser() + new_cache_dir = Path(new_cache_dir).expanduser() + for old_blob_path in old_cache_dir.glob("**/blobs/*"): + if old_blob_path.is_file() and not old_blob_path.is_symlink(): + new_blob_path = new_cache_dir / old_blob_path.relative_to(old_cache_dir) + new_blob_path.parent.mkdir(parents=True, exist_ok=True) + os.replace(old_blob_path, new_blob_path) + try: + os.symlink(new_blob_path, old_blob_path) + except OSError: + logger.warning( + "Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded." + ) + # At this point, old_cache_dir contains symlinks to the new cache (it can still be used). + + +cache_version_file = os.path.join(HF_HUB_CACHE, "version_diffusers_cache.txt") +if not os.path.isfile(cache_version_file): + cache_version = 0 +else: + with open(cache_version_file) as f: + try: + cache_version = int(f.read()) + except ValueError: + cache_version = 0 + +if cache_version < 1: + old_cache_is_not_empty = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0 + if old_cache_is_not_empty: + logger.warning( + "The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your " + "existing cached models. This is a one-time operation, you can interrupt it or run it " + "later by calling `diffusers.utils.hub_utils.move_cache()`." + ) + try: + move_cache() + except Exception as e: + trace = "\n".join(traceback.format_tb(e.__traceback__)) + logger.error( + f"There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease " + "file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole " + "message and we will do our best to help." + ) + +if cache_version < 1: + try: + os.makedirs(HF_HUB_CACHE, exist_ok=True) + with open(cache_version_file, "w") as f: + f.write("1") + except Exception: + logger.warning( + f"There was a problem when trying to write in your cache folder ({HF_HUB_CACHE}). Please, ensure " + "the directory exists and can be written to." + ) + + +def _add_variant(weights_name: str, variant: Optional[str] = None) -> str: + if variant is not None: + splits = weights_name.split(".") + split_index = -2 if weights_name.endswith(".index.json") else -1 + splits = splits[:-split_index] + [variant] + splits[-split_index:] + weights_name = ".".join(splits) + + return weights_name + + +@validate_hf_hub_args +def _get_model_file( + pretrained_model_name_or_path: Union[str, Path], + *, + weights_name: str, + subfolder: Optional[str] = None, + cache_dir: Optional[str] = None, + force_download: bool = False, + proxies: Optional[Dict] = None, + local_files_only: bool = False, + token: Optional[str] = None, + user_agent: Optional[Union[Dict, str]] = None, + revision: Optional[str] = None, + commit_hash: Optional[str] = None, +): + pretrained_model_name_or_path = str(pretrained_model_name_or_path) + if os.path.isfile(pretrained_model_name_or_path): + return pretrained_model_name_or_path + elif os.path.isdir(pretrained_model_name_or_path): + if os.path.isfile(os.path.join(pretrained_model_name_or_path, weights_name)): + # Load from a PyTorch checkpoint + model_file = os.path.join(pretrained_model_name_or_path, weights_name) + return model_file + elif subfolder is not None and os.path.isfile( + os.path.join(pretrained_model_name_or_path, subfolder, weights_name) + ): + model_file = os.path.join(pretrained_model_name_or_path, subfolder, weights_name) + return model_file + else: + raise EnvironmentError( + f"Error no file named {weights_name} found in directory {pretrained_model_name_or_path}." + ) + else: + # 1. First check if deprecated way of loading from branches is used + if ( + revision in DEPRECATED_REVISION_ARGS + and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME) + and version.parse(version.parse(__version__).base_version) >= version.parse("0.22.0") + ): + try: + model_file = hf_hub_download( + pretrained_model_name_or_path, + filename=_add_variant(weights_name, revision), + cache_dir=cache_dir, + force_download=force_download, + proxies=proxies, + local_files_only=local_files_only, + token=token, + user_agent=user_agent, + subfolder=subfolder, + revision=revision or commit_hash, + ) + warnings.warn( + f"Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead.", + FutureWarning, + ) + return model_file + except: # noqa: E722 + warnings.warn( + f"You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(weights_name, revision)} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(weights_name, revision)}' so that the correct variant file can be added.", + FutureWarning, + ) + try: + # 2. Load model file as usual + model_file = hf_hub_download( + pretrained_model_name_or_path, + filename=weights_name, + cache_dir=cache_dir, + force_download=force_download, + proxies=proxies, + local_files_only=local_files_only, + token=token, + user_agent=user_agent, + subfolder=subfolder, + revision=revision or commit_hash, + ) + return model_file + + except RepositoryNotFoundError as e: + raise EnvironmentError( + f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier " + "listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a " + "token having permission to this repo with `token` or log in with `huggingface-cli " + "login`." + ) from e + except RevisionNotFoundError as e: + raise EnvironmentError( + f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for " + "this model name. Check the model page at " + f"'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions." + ) from e + except EntryNotFoundError as e: + raise EnvironmentError( + f"{pretrained_model_name_or_path} does not appear to have a file named {weights_name}." + ) from e + except HTTPError as e: + raise EnvironmentError( + f"There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{e}" + ) from e + except ValueError as e: + raise EnvironmentError( + f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it" + f" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a" + f" directory containing a file named {weights_name} or" + " \nCheckout your internet connection or see how to run the library in" + " offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'." + ) from e + except EnvironmentError as e: + raise EnvironmentError( + f"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from " + "'https://huggingface.co/models', make sure you don't have a local directory with the same name. " + f"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory " + f"containing a file named {weights_name}" + ) from e + + +# Adapted from +# https://github.com/huggingface/transformers/blob/1360801a69c0b169e3efdbb0cd05d9a0e72bfb70/src/transformers/utils/hub.py#L976 +# Differences are in parallelization of shard downloads and checking if shards are present. + + +def _check_if_shards_exist_locally(local_dir, subfolder, original_shard_filenames): + shards_path = os.path.join(local_dir, subfolder) + shard_filenames = [os.path.join(shards_path, f) for f in original_shard_filenames] + for shard_file in shard_filenames: + if not os.path.exists(shard_file): + raise ValueError( + f"{shards_path} does not appear to have a file named {shard_file} which is " + "required according to the checkpoint index." + ) + + +def _get_checkpoint_shard_files( + pretrained_model_name_or_path, + index_filename, + cache_dir=None, + proxies=None, + local_files_only=False, + token=None, + user_agent=None, + revision=None, + subfolder="", +): + """ + For a given model: + + - download and cache all the shards of a sharded checkpoint if `pretrained_model_name_or_path` is a model ID on the + Hub + - returns the list of paths to all the shards, as well as some metadata. + + For the description of each arg, see [`PreTrainedModel.from_pretrained`]. `index_filename` is the full path to the + index (downloaded and cached if `pretrained_model_name_or_path` is a model ID on the Hub). + """ + if not os.path.isfile(index_filename): + raise ValueError(f"Can't find a checkpoint index ({index_filename}) in {pretrained_model_name_or_path}.") + + with open(index_filename, "r") as f: + index = json.loads(f.read()) + + original_shard_filenames = sorted(set(index["weight_map"].values())) + sharded_metadata = index["metadata"] + sharded_metadata["all_checkpoint_keys"] = list(index["weight_map"].keys()) + sharded_metadata["weight_map"] = index["weight_map"].copy() + shards_path = os.path.join(pretrained_model_name_or_path, subfolder) + + # First, let's deal with local folder. + if os.path.isdir(pretrained_model_name_or_path): + _check_if_shards_exist_locally( + pretrained_model_name_or_path, subfolder=subfolder, original_shard_filenames=original_shard_filenames + ) + return shards_path, sharded_metadata + + # At this stage pretrained_model_name_or_path is a model identifier on the Hub + allow_patterns = original_shard_filenames + if subfolder is not None: + allow_patterns = [os.path.join(subfolder, p) for p in allow_patterns] + + ignore_patterns = ["*.json", "*.md"] + if not local_files_only: + # `model_info` call must guarded with the above condition. + model_files_info = model_info(pretrained_model_name_or_path, revision=revision) + for shard_file in original_shard_filenames: + shard_file_present = any(shard_file in k.rfilename for k in model_files_info.siblings) + if not shard_file_present: + raise EnvironmentError( + f"{shards_path} does not appear to have a file named {shard_file} which is " + "required according to the checkpoint index." + ) + + try: + # Load from URL + cached_folder = snapshot_download( + pretrained_model_name_or_path, + cache_dir=cache_dir, + proxies=proxies, + local_files_only=local_files_only, + token=token, + revision=revision, + allow_patterns=allow_patterns, + ignore_patterns=ignore_patterns, + user_agent=user_agent, + ) + if subfolder is not None: + cached_folder = os.path.join(cached_folder, subfolder) + + # We have already dealt with RepositoryNotFoundError and RevisionNotFoundError when getting the index, so + # we don't have to catch them here. We have also dealt with EntryNotFoundError. + except HTTPError as e: + raise EnvironmentError( + f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load {pretrained_model_name_or_path}. You should try" + " again after checking your internet connection." + ) from e + + # If `local_files_only=True`, `cached_folder` may not contain all the shard files. + elif local_files_only: + _check_if_shards_exist_locally( + local_dir=cache_dir, subfolder=subfolder, original_shard_filenames=original_shard_filenames + ) + if subfolder is not None: + cached_folder = os.path.join(cached_folder, subfolder) + + return cached_folder, sharded_metadata + + +class PushToHubMixin: + """ + A Mixin to push a model, scheduler, or pipeline to the Hugging Face Hub. + """ + + def _upload_folder( + self, + working_dir: Union[str, os.PathLike], + repo_id: str, + token: Optional[str] = None, + commit_message: Optional[str] = None, + create_pr: bool = False, + ): + """ + Uploads all files in `working_dir` to `repo_id`. + """ + if commit_message is None: + if "Model" in self.__class__.__name__: + commit_message = "Upload model" + elif "Scheduler" in self.__class__.__name__: + commit_message = "Upload scheduler" + else: + commit_message = f"Upload {self.__class__.__name__}" + + logger.info(f"Uploading the files of {working_dir} to {repo_id}.") + return upload_folder( + repo_id=repo_id, folder_path=working_dir, token=token, commit_message=commit_message, create_pr=create_pr + ) + + def push_to_hub( + self, + repo_id: str, + commit_message: Optional[str] = None, + private: Optional[bool] = None, + token: Optional[str] = None, + create_pr: bool = False, + safe_serialization: bool = True, + variant: Optional[str] = None, + ) -> str: + """ + Upload model, scheduler, or pipeline files to the ๐Ÿค— Hugging Face Hub. + + Parameters: + repo_id (`str`): + The name of the repository you want to push your model, scheduler, or pipeline files to. It should + contain your organization name when pushing to an organization. `repo_id` can also be a path to a local + directory. + commit_message (`str`, *optional*): + Message to commit while pushing. Default to `"Upload {object}"`. + private (`bool`, *optional*): + Whether or not the repository created should be private. + token (`str`, *optional*): + The token to use as HTTP bearer authorization for remote files. The token generated when running + `huggingface-cli login` (stored in `~/.huggingface`). + create_pr (`bool`, *optional*, defaults to `False`): + Whether or not to create a PR with the uploaded files or directly commit. + safe_serialization (`bool`, *optional*, defaults to `True`): + Whether or not to convert the model weights to the `safetensors` format. + variant (`str`, *optional*): + If specified, weights are saved in the format `pytorch_model..bin`. + + Examples: + + ```python + from diffusers import UNet2DConditionModel + + unet = UNet2DConditionModel.from_pretrained("stabilityai/stable-diffusion-2", subfolder="unet") + + # Push the `unet` to your namespace with the name "my-finetuned-unet". + unet.push_to_hub("my-finetuned-unet") + + # Push the `unet` to an organization with the name "my-finetuned-unet". + unet.push_to_hub("your-org/my-finetuned-unet") + ``` + """ + repo_id = create_repo(repo_id, private=private, token=token, exist_ok=True).repo_id + + # Create a new empty model card and eventually tag it + model_card = load_or_create_model_card(repo_id, token=token) + model_card = populate_model_card(model_card) + + # Save all files. + save_kwargs = {"safe_serialization": safe_serialization} + if "Scheduler" not in self.__class__.__name__: + save_kwargs.update({"variant": variant}) + + with tempfile.TemporaryDirectory() as tmpdir: + self.save_pretrained(tmpdir, **save_kwargs) + + # Update model card if needed: + model_card.save(os.path.join(tmpdir, "README.md")) + + return self._upload_folder( + tmpdir, + repo_id, + token=token, + commit_message=commit_message, + create_pr=create_pr, + ) diff --git a/diffusers3/utils/import_utils.py b/diffusers3/utils/import_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..34cc5fcc8605ffe5d5e33995f0952e263552ebbb --- /dev/null +++ b/diffusers3/utils/import_utils.py @@ -0,0 +1,838 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Import utilities: Utilities related to imports and our lazy inits. +""" + +import importlib.util +import operator as op +import os +import sys +from collections import OrderedDict +from itertools import chain +from types import ModuleType +from typing import Any, Union + +from huggingface_hub.utils import is_jinja_available # noqa: F401 +from packaging import version +from packaging.version import Version, parse + +from . import logging + + +# The package importlib_metadata is in a different place, depending on the python version. +if sys.version_info < (3, 8): + import importlib_metadata +else: + import importlib.metadata as importlib_metadata + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +ENV_VARS_TRUE_VALUES = {"1", "ON", "YES", "TRUE"} +ENV_VARS_TRUE_AND_AUTO_VALUES = ENV_VARS_TRUE_VALUES.union({"AUTO"}) + +USE_TF = os.environ.get("USE_TF", "AUTO").upper() +USE_TORCH = os.environ.get("USE_TORCH", "AUTO").upper() +USE_JAX = os.environ.get("USE_FLAX", "AUTO").upper() +USE_SAFETENSORS = os.environ.get("USE_SAFETENSORS", "AUTO").upper() +DIFFUSERS_SLOW_IMPORT = os.environ.get("DIFFUSERS_SLOW_IMPORT", "FALSE").upper() +DIFFUSERS_SLOW_IMPORT = DIFFUSERS_SLOW_IMPORT in ENV_VARS_TRUE_VALUES + +STR_OPERATION_TO_FUNC = {">": op.gt, ">=": op.ge, "==": op.eq, "!=": op.ne, "<=": op.le, "<": op.lt} + +_torch_version = "N/A" +if USE_TORCH in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TF not in ENV_VARS_TRUE_VALUES: + _torch_available = importlib.util.find_spec("torch") is not None + if _torch_available: + try: + _torch_version = importlib_metadata.version("torch") + logger.info(f"PyTorch version {_torch_version} available.") + except importlib_metadata.PackageNotFoundError: + _torch_available = False +else: + logger.info("Disabling PyTorch because USE_TORCH is set") + _torch_available = False + +_torch_xla_available = importlib.util.find_spec("torch_xla") is not None +if _torch_xla_available: + try: + _torch_xla_version = importlib_metadata.version("torch_xla") + logger.info(f"PyTorch XLA version {_torch_xla_version} available.") + except ImportError: + _torch_xla_available = False + +# check whether torch_npu is available +_torch_npu_available = importlib.util.find_spec("torch_npu") is not None +if _torch_npu_available: + try: + _torch_npu_version = importlib_metadata.version("torch_npu") + logger.info(f"torch_npu version {_torch_npu_version} available.") + except ImportError: + _torch_npu_available = False + +_jax_version = "N/A" +_flax_version = "N/A" +if USE_JAX in ENV_VARS_TRUE_AND_AUTO_VALUES: + _flax_available = importlib.util.find_spec("jax") is not None and importlib.util.find_spec("flax") is not None + if _flax_available: + try: + _jax_version = importlib_metadata.version("jax") + _flax_version = importlib_metadata.version("flax") + logger.info(f"JAX version {_jax_version}, Flax version {_flax_version} available.") + except importlib_metadata.PackageNotFoundError: + _flax_available = False +else: + _flax_available = False + +if USE_SAFETENSORS in ENV_VARS_TRUE_AND_AUTO_VALUES: + _safetensors_available = importlib.util.find_spec("safetensors") is not None + if _safetensors_available: + try: + _safetensors_version = importlib_metadata.version("safetensors") + logger.info(f"Safetensors version {_safetensors_version} available.") + except importlib_metadata.PackageNotFoundError: + _safetensors_available = False +else: + logger.info("Disabling Safetensors because USE_TF is set") + _safetensors_available = False + +_transformers_available = importlib.util.find_spec("transformers") is not None +try: + _transformers_version = importlib_metadata.version("transformers") + logger.debug(f"Successfully imported transformers version {_transformers_version}") +except importlib_metadata.PackageNotFoundError: + _transformers_available = False + + +_inflect_available = importlib.util.find_spec("inflect") is not None +try: + _inflect_version = importlib_metadata.version("inflect") + logger.debug(f"Successfully imported inflect version {_inflect_version}") +except importlib_metadata.PackageNotFoundError: + _inflect_available = False + + +_unidecode_available = importlib.util.find_spec("unidecode") is not None +try: + _unidecode_version = importlib_metadata.version("unidecode") + logger.debug(f"Successfully imported unidecode version {_unidecode_version}") +except importlib_metadata.PackageNotFoundError: + _unidecode_available = False + +_onnxruntime_version = "N/A" +_onnx_available = importlib.util.find_spec("onnxruntime") is not None +if _onnx_available: + candidates = ( + "onnxruntime", + "onnxruntime-gpu", + "ort_nightly_gpu", + "onnxruntime-directml", + "onnxruntime-openvino", + "ort_nightly_directml", + "onnxruntime-rocm", + "onnxruntime-training", + ) + _onnxruntime_version = None + # For the metadata, we have to look for both onnxruntime and onnxruntime-gpu + for pkg in candidates: + try: + _onnxruntime_version = importlib_metadata.version(pkg) + break + except importlib_metadata.PackageNotFoundError: + pass + _onnx_available = _onnxruntime_version is not None + if _onnx_available: + logger.debug(f"Successfully imported onnxruntime version {_onnxruntime_version}") + +# (sayakpaul): importlib.util.find_spec("opencv-python") returns None even when it's installed. +# _opencv_available = importlib.util.find_spec("opencv-python") is not None +try: + candidates = ( + "opencv-python", + "opencv-contrib-python", + "opencv-python-headless", + "opencv-contrib-python-headless", + ) + _opencv_version = None + for pkg in candidates: + try: + _opencv_version = importlib_metadata.version(pkg) + break + except importlib_metadata.PackageNotFoundError: + pass + _opencv_available = _opencv_version is not None + if _opencv_available: + logger.debug(f"Successfully imported cv2 version {_opencv_version}") +except importlib_metadata.PackageNotFoundError: + _opencv_available = False + +_scipy_available = importlib.util.find_spec("scipy") is not None +try: + _scipy_version = importlib_metadata.version("scipy") + logger.debug(f"Successfully imported scipy version {_scipy_version}") +except importlib_metadata.PackageNotFoundError: + _scipy_available = False + +_librosa_available = importlib.util.find_spec("librosa") is not None +try: + _librosa_version = importlib_metadata.version("librosa") + logger.debug(f"Successfully imported librosa version {_librosa_version}") +except importlib_metadata.PackageNotFoundError: + _librosa_available = False + +_accelerate_available = importlib.util.find_spec("accelerate") is not None +try: + _accelerate_version = importlib_metadata.version("accelerate") + logger.debug(f"Successfully imported accelerate version {_accelerate_version}") +except importlib_metadata.PackageNotFoundError: + _accelerate_available = False + +_xformers_available = importlib.util.find_spec("xformers") is not None +try: + _xformers_version = importlib_metadata.version("xformers") + if _torch_available: + _torch_version = importlib_metadata.version("torch") + if version.Version(_torch_version) < version.Version("1.12"): + raise ValueError("xformers is installed in your environment and requires PyTorch >= 1.12") + + logger.debug(f"Successfully imported xformers version {_xformers_version}") +except importlib_metadata.PackageNotFoundError: + _xformers_available = False + +_k_diffusion_available = importlib.util.find_spec("k_diffusion") is not None +try: + _k_diffusion_version = importlib_metadata.version("k_diffusion") + logger.debug(f"Successfully imported k-diffusion version {_k_diffusion_version}") +except importlib_metadata.PackageNotFoundError: + _k_diffusion_available = False + +_note_seq_available = importlib.util.find_spec("note_seq") is not None +try: + _note_seq_version = importlib_metadata.version("note_seq") + logger.debug(f"Successfully imported note-seq version {_note_seq_version}") +except importlib_metadata.PackageNotFoundError: + _note_seq_available = False + +_wandb_available = importlib.util.find_spec("wandb") is not None +try: + _wandb_version = importlib_metadata.version("wandb") + logger.debug(f"Successfully imported wandb version {_wandb_version }") +except importlib_metadata.PackageNotFoundError: + _wandb_available = False + + +_tensorboard_available = importlib.util.find_spec("tensorboard") +try: + _tensorboard_version = importlib_metadata.version("tensorboard") + logger.debug(f"Successfully imported tensorboard version {_tensorboard_version}") +except importlib_metadata.PackageNotFoundError: + _tensorboard_available = False + + +_compel_available = importlib.util.find_spec("compel") +try: + _compel_version = importlib_metadata.version("compel") + logger.debug(f"Successfully imported compel version {_compel_version}") +except importlib_metadata.PackageNotFoundError: + _compel_available = False + + +_ftfy_available = importlib.util.find_spec("ftfy") is not None +try: + _ftfy_version = importlib_metadata.version("ftfy") + logger.debug(f"Successfully imported ftfy version {_ftfy_version}") +except importlib_metadata.PackageNotFoundError: + _ftfy_available = False + + +_bs4_available = importlib.util.find_spec("bs4") is not None +try: + # importlib metadata under different name + _bs4_version = importlib_metadata.version("beautifulsoup4") + logger.debug(f"Successfully imported ftfy version {_bs4_version}") +except importlib_metadata.PackageNotFoundError: + _bs4_available = False + +_torchsde_available = importlib.util.find_spec("torchsde") is not None +try: + _torchsde_version = importlib_metadata.version("torchsde") + logger.debug(f"Successfully imported torchsde version {_torchsde_version}") +except importlib_metadata.PackageNotFoundError: + _torchsde_available = False + +_invisible_watermark_available = importlib.util.find_spec("imwatermark") is not None +try: + _invisible_watermark_version = importlib_metadata.version("invisible-watermark") + logger.debug(f"Successfully imported invisible-watermark version {_invisible_watermark_version}") +except importlib_metadata.PackageNotFoundError: + _invisible_watermark_available = False + + +_peft_available = importlib.util.find_spec("peft") is not None +try: + _peft_version = importlib_metadata.version("peft") + logger.debug(f"Successfully imported peft version {_peft_version}") +except importlib_metadata.PackageNotFoundError: + _peft_available = False + +_torchvision_available = importlib.util.find_spec("torchvision") is not None +try: + _torchvision_version = importlib_metadata.version("torchvision") + logger.debug(f"Successfully imported torchvision version {_torchvision_version}") +except importlib_metadata.PackageNotFoundError: + _torchvision_available = False + +_sentencepiece_available = importlib.util.find_spec("sentencepiece") is not None +try: + _sentencepiece_version = importlib_metadata.version("sentencepiece") + logger.info(f"Successfully imported sentencepiece version {_sentencepiece_version}") +except importlib_metadata.PackageNotFoundError: + _sentencepiece_available = False + +_matplotlib_available = importlib.util.find_spec("matplotlib") is not None +try: + _matplotlib_version = importlib_metadata.version("matplotlib") + logger.debug(f"Successfully imported matplotlib version {_matplotlib_version}") +except importlib_metadata.PackageNotFoundError: + _matplotlib_available = False + +_timm_available = importlib.util.find_spec("timm") is not None +if _timm_available: + try: + _timm_version = importlib_metadata.version("timm") + logger.info(f"Timm version {_timm_version} available.") + except importlib_metadata.PackageNotFoundError: + _timm_available = False + + +def is_timm_available(): + return _timm_available + + +_bitsandbytes_available = importlib.util.find_spec("bitsandbytes") is not None +try: + _bitsandbytes_version = importlib_metadata.version("bitsandbytes") + logger.debug(f"Successfully imported bitsandbytes version {_bitsandbytes_version}") +except importlib_metadata.PackageNotFoundError: + _bitsandbytes_available = False + +_is_google_colab = "google.colab" in sys.modules or any(k.startswith("COLAB_") for k in os.environ) + +_imageio_available = importlib.util.find_spec("imageio") is not None +if _imageio_available: + try: + _imageio_version = importlib_metadata.version("imageio") + logger.debug(f"Successfully imported imageio version {_imageio_version}") + + except importlib_metadata.PackageNotFoundError: + _imageio_available = False + + +def is_torch_available(): + return _torch_available + + +def is_torch_xla_available(): + return _torch_xla_available + + +def is_torch_npu_available(): + return _torch_npu_available + + +def is_flax_available(): + return _flax_available + + +def is_transformers_available(): + return _transformers_available + + +def is_inflect_available(): + return _inflect_available + + +def is_unidecode_available(): + return _unidecode_available + + +def is_onnx_available(): + return _onnx_available + + +def is_opencv_available(): + return _opencv_available + + +def is_scipy_available(): + return _scipy_available + + +def is_librosa_available(): + return _librosa_available + + +def is_xformers_available(): + return _xformers_available + + +def is_accelerate_available(): + return _accelerate_available + + +def is_k_diffusion_available(): + return _k_diffusion_available + + +def is_note_seq_available(): + return _note_seq_available + + +def is_wandb_available(): + return _wandb_available + + +def is_tensorboard_available(): + return _tensorboard_available + + +def is_compel_available(): + return _compel_available + + +def is_ftfy_available(): + return _ftfy_available + + +def is_bs4_available(): + return _bs4_available + + +def is_torchsde_available(): + return _torchsde_available + + +def is_invisible_watermark_available(): + return _invisible_watermark_available + + +def is_peft_available(): + return _peft_available + + +def is_torchvision_available(): + return _torchvision_available + + +def is_matplotlib_available(): + return _matplotlib_available + + +def is_safetensors_available(): + return _safetensors_available + + +def is_bitsandbytes_available(): + return _bitsandbytes_available + + +def is_google_colab(): + return _is_google_colab + + +def is_sentencepiece_available(): + return _sentencepiece_available + + +def is_imageio_available(): + return _imageio_available + + +# docstyle-ignore +FLAX_IMPORT_ERROR = """ +{0} requires the FLAX library but it was not found in your environment. Checkout the instructions on the +installation page: https://github.com/google/flax and follow the ones that match your environment. +""" + +# docstyle-ignore +INFLECT_IMPORT_ERROR = """ +{0} requires the inflect library but it was not found in your environment. You can install it with pip: `pip install +inflect` +""" + +# docstyle-ignore +PYTORCH_IMPORT_ERROR = """ +{0} requires the PyTorch library but it was not found in your environment. Checkout the instructions on the +installation page: https://pytorch.org/get-started/locally/ and follow the ones that match your environment. +""" + +# docstyle-ignore +ONNX_IMPORT_ERROR = """ +{0} requires the onnxruntime library but it was not found in your environment. You can install it with pip: `pip +install onnxruntime` +""" + +# docstyle-ignore +OPENCV_IMPORT_ERROR = """ +{0} requires the OpenCV library but it was not found in your environment. You can install it with pip: `pip +install opencv-python` +""" + +# docstyle-ignore +SCIPY_IMPORT_ERROR = """ +{0} requires the scipy library but it was not found in your environment. You can install it with pip: `pip install +scipy` +""" + +# docstyle-ignore +LIBROSA_IMPORT_ERROR = """ +{0} requires the librosa library but it was not found in your environment. Checkout the instructions on the +installation page: https://librosa.org/doc/latest/install.html and follow the ones that match your environment. +""" + +# docstyle-ignore +TRANSFORMERS_IMPORT_ERROR = """ +{0} requires the transformers library but it was not found in your environment. You can install it with pip: `pip +install transformers` +""" + +# docstyle-ignore +UNIDECODE_IMPORT_ERROR = """ +{0} requires the unidecode library but it was not found in your environment. You can install it with pip: `pip install +Unidecode` +""" + +# docstyle-ignore +K_DIFFUSION_IMPORT_ERROR = """ +{0} requires the k-diffusion library but it was not found in your environment. You can install it with pip: `pip +install k-diffusion` +""" + +# docstyle-ignore +NOTE_SEQ_IMPORT_ERROR = """ +{0} requires the note-seq library but it was not found in your environment. You can install it with pip: `pip +install note-seq` +""" + +# docstyle-ignore +WANDB_IMPORT_ERROR = """ +{0} requires the wandb library but it was not found in your environment. You can install it with pip: `pip +install wandb` +""" + +# docstyle-ignore +TENSORBOARD_IMPORT_ERROR = """ +{0} requires the tensorboard library but it was not found in your environment. You can install it with pip: `pip +install tensorboard` +""" + + +# docstyle-ignore +COMPEL_IMPORT_ERROR = """ +{0} requires the compel library but it was not found in your environment. You can install it with pip: `pip install compel` +""" + +# docstyle-ignore +BS4_IMPORT_ERROR = """ +{0} requires the Beautiful Soup library but it was not found in your environment. You can install it with pip: +`pip install beautifulsoup4`. Please note that you may need to restart your runtime after installation. +""" + +# docstyle-ignore +FTFY_IMPORT_ERROR = """ +{0} requires the ftfy library but it was not found in your environment. Checkout the instructions on the +installation section: https://github.com/rspeer/python-ftfy/tree/master#installing and follow the ones +that match your environment. Please note that you may need to restart your runtime after installation. +""" + +# docstyle-ignore +TORCHSDE_IMPORT_ERROR = """ +{0} requires the torchsde library but it was not found in your environment. You can install it with pip: `pip install torchsde` +""" + +# docstyle-ignore +INVISIBLE_WATERMARK_IMPORT_ERROR = """ +{0} requires the invisible-watermark library but it was not found in your environment. You can install it with pip: `pip install invisible-watermark>=0.2.0` +""" + +# docstyle-ignore +PEFT_IMPORT_ERROR = """ +{0} requires the peft library but it was not found in your environment. You can install it with pip: `pip install peft` +""" + +# docstyle-ignore +SAFETENSORS_IMPORT_ERROR = """ +{0} requires the safetensors library but it was not found in your environment. You can install it with pip: `pip install safetensors` +""" + +# docstyle-ignore +SENTENCEPIECE_IMPORT_ERROR = """ +{0} requires the sentencepiece library but it was not found in your environment. You can install it with pip: `pip install sentencepiece` +""" + + +# docstyle-ignore +BITSANDBYTES_IMPORT_ERROR = """ +{0} requires the bitsandbytes library but it was not found in your environment. You can install it with pip: `pip install bitsandbytes` +""" + +# docstyle-ignore +IMAGEIO_IMPORT_ERROR = """ +{0} requires the imageio library and ffmpeg but it was not found in your environment. You can install it with pip: `pip install imageio imageio-ffmpeg` +""" + +BACKENDS_MAPPING = OrderedDict( + [ + ("bs4", (is_bs4_available, BS4_IMPORT_ERROR)), + ("flax", (is_flax_available, FLAX_IMPORT_ERROR)), + ("inflect", (is_inflect_available, INFLECT_IMPORT_ERROR)), + ("onnx", (is_onnx_available, ONNX_IMPORT_ERROR)), + ("opencv", (is_opencv_available, OPENCV_IMPORT_ERROR)), + ("scipy", (is_scipy_available, SCIPY_IMPORT_ERROR)), + ("torch", (is_torch_available, PYTORCH_IMPORT_ERROR)), + ("transformers", (is_transformers_available, TRANSFORMERS_IMPORT_ERROR)), + ("unidecode", (is_unidecode_available, UNIDECODE_IMPORT_ERROR)), + ("librosa", (is_librosa_available, LIBROSA_IMPORT_ERROR)), + ("k_diffusion", (is_k_diffusion_available, K_DIFFUSION_IMPORT_ERROR)), + ("note_seq", (is_note_seq_available, NOTE_SEQ_IMPORT_ERROR)), + ("wandb", (is_wandb_available, WANDB_IMPORT_ERROR)), + ("tensorboard", (is_tensorboard_available, TENSORBOARD_IMPORT_ERROR)), + ("compel", (is_compel_available, COMPEL_IMPORT_ERROR)), + ("ftfy", (is_ftfy_available, FTFY_IMPORT_ERROR)), + ("torchsde", (is_torchsde_available, TORCHSDE_IMPORT_ERROR)), + ("invisible_watermark", (is_invisible_watermark_available, INVISIBLE_WATERMARK_IMPORT_ERROR)), + ("peft", (is_peft_available, PEFT_IMPORT_ERROR)), + ("safetensors", (is_safetensors_available, SAFETENSORS_IMPORT_ERROR)), + ("bitsandbytes", (is_bitsandbytes_available, BITSANDBYTES_IMPORT_ERROR)), + ("sentencepiece", (is_sentencepiece_available, SENTENCEPIECE_IMPORT_ERROR)), + ("imageio", (is_imageio_available, IMAGEIO_IMPORT_ERROR)), + ] +) + + +def requires_backends(obj, backends): + if not isinstance(backends, (list, tuple)): + backends = [backends] + + name = obj.__name__ if hasattr(obj, "__name__") else obj.__class__.__name__ + checks = (BACKENDS_MAPPING[backend] for backend in backends) + failed = [msg.format(name) for available, msg in checks if not available()] + if failed: + raise ImportError("".join(failed)) + + if name in [ + "VersatileDiffusionTextToImagePipeline", + "VersatileDiffusionPipeline", + "VersatileDiffusionDualGuidedPipeline", + "StableDiffusionImageVariationPipeline", + "UnCLIPPipeline", + ] and is_transformers_version("<", "4.25.0"): + raise ImportError( + f"You need to install `transformers>=4.25` in order to use {name}: \n```\n pip install" + " --upgrade transformers \n```" + ) + + if name in ["StableDiffusionDepth2ImgPipeline", "StableDiffusionPix2PixZeroPipeline"] and is_transformers_version( + "<", "4.26.0" + ): + raise ImportError( + f"You need to install `transformers>=4.26` in order to use {name}: \n```\n pip install" + " --upgrade transformers \n```" + ) + + +class DummyObject(type): + """ + Metaclass for the dummy objects. Any class inheriting from it will return the ImportError generated by + `requires_backend` each time a user tries to access any method of that class. + """ + + def __getattr__(cls, key): + if key.startswith("_") and key not in ["_load_connected_pipes", "_is_onnx"]: + return super().__getattr__(cls, key) + requires_backends(cls, cls._backends) + + +# This function was copied from: https://github.com/huggingface/accelerate/blob/874c4967d94badd24f893064cc3bef45f57cadf7/src/accelerate/utils/versions.py#L319 +def compare_versions(library_or_version: Union[str, Version], operation: str, requirement_version: str): + """ + Args: + Compares a library version to some requirement using a given operation. + library_or_version (`str` or `packaging.version.Version`): + A library name or a version to check. + operation (`str`): + A string representation of an operator, such as `">"` or `"<="`. + requirement_version (`str`): + The version to compare the library version against + """ + if operation not in STR_OPERATION_TO_FUNC.keys(): + raise ValueError(f"`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys())}, received {operation}") + operation = STR_OPERATION_TO_FUNC[operation] + if isinstance(library_or_version, str): + library_or_version = parse(importlib_metadata.version(library_or_version)) + return operation(library_or_version, parse(requirement_version)) + + +# This function was copied from: https://github.com/huggingface/accelerate/blob/874c4967d94badd24f893064cc3bef45f57cadf7/src/accelerate/utils/versions.py#L338 +def is_torch_version(operation: str, version: str): + """ + Args: + Compares the current PyTorch version to a given reference with an operation. + operation (`str`): + A string representation of an operator, such as `">"` or `"<="` + version (`str`): + A string version of PyTorch + """ + return compare_versions(parse(_torch_version), operation, version) + + +def is_transformers_version(operation: str, version: str): + """ + Args: + Compares the current Transformers version to a given reference with an operation. + operation (`str`): + A string representation of an operator, such as `">"` or `"<="` + version (`str`): + A version string + """ + if not _transformers_available: + return False + return compare_versions(parse(_transformers_version), operation, version) + + +def is_accelerate_version(operation: str, version: str): + """ + Args: + Compares the current Accelerate version to a given reference with an operation. + operation (`str`): + A string representation of an operator, such as `">"` or `"<="` + version (`str`): + A version string + """ + if not _accelerate_available: + return False + return compare_versions(parse(_accelerate_version), operation, version) + + +def is_peft_version(operation: str, version: str): + """ + Args: + Compares the current PEFT version to a given reference with an operation. + operation (`str`): + A string representation of an operator, such as `">"` or `"<="` + version (`str`): + A version string + """ + if not _peft_version: + return False + return compare_versions(parse(_peft_version), operation, version) + + +def is_k_diffusion_version(operation: str, version: str): + """ + Args: + Compares the current k-diffusion version to a given reference with an operation. + operation (`str`): + A string representation of an operator, such as `">"` or `"<="` + version (`str`): + A version string + """ + if not _k_diffusion_available: + return False + return compare_versions(parse(_k_diffusion_version), operation, version) + + +def get_objects_from_module(module): + """ + Args: + Returns a dict of object names and values in a module, while skipping private/internal objects + module (ModuleType): + Module to extract the objects from. + + Returns: + dict: Dictionary of object names and corresponding values + """ + + objects = {} + for name in dir(module): + if name.startswith("_"): + continue + objects[name] = getattr(module, name) + + return objects + + +class OptionalDependencyNotAvailable(BaseException): + """An error indicating that an optional dependency of Diffusers was not found in the environment.""" + + +class _LazyModule(ModuleType): + """ + Module class that surfaces all objects but only performs associated imports when the objects are requested. + """ + + # Very heavily inspired by optuna.integration._IntegrationModule + # https://github.com/optuna/optuna/blob/master/optuna/integration/__init__.py + def __init__(self, name, module_file, import_structure, module_spec=None, extra_objects=None): + super().__init__(name) + self._modules = set(import_structure.keys()) + self._class_to_module = {} + for key, values in import_structure.items(): + for value in values: + self._class_to_module[value] = key + # Needed for autocompletion in an IDE + self.__all__ = list(import_structure.keys()) + list(chain(*import_structure.values())) + self.__file__ = module_file + self.__spec__ = module_spec + self.__path__ = [os.path.dirname(module_file)] + self._objects = {} if extra_objects is None else extra_objects + self._name = name + self._import_structure = import_structure + + # Needed for autocompletion in an IDE + def __dir__(self): + result = super().__dir__() + # The elements of self.__all__ that are submodules may or may not be in the dir already, depending on whether + # they have been accessed or not. So we only add the elements of self.__all__ that are not already in the dir. + for attr in self.__all__: + if attr not in result: + result.append(attr) + return result + + def __getattr__(self, name: str) -> Any: + if name in self._objects: + return self._objects[name] + if name in self._modules: + value = self._get_module(name) + elif name in self._class_to_module.keys(): + module = self._get_module(self._class_to_module[name]) + value = getattr(module, name) + else: + raise AttributeError(f"module {self.__name__} has no attribute {name}") + + setattr(self, name, value) + return value + + def _get_module(self, module_name: str): + try: + return importlib.import_module("." + module_name, self.__name__) + except Exception as e: + raise RuntimeError( + f"Failed to import {self.__name__}.{module_name} because of the following error (look up to see its" + f" traceback):\n{e}" + ) from e + + def __reduce__(self): + return (self.__class__, (self._name, self.__file__, self._import_structure)) diff --git a/diffusers3/utils/loading_utils.py b/diffusers3/utils/loading_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..b36664cb81ff771c284bc3919e054bd684dc342b --- /dev/null +++ b/diffusers3/utils/loading_utils.py @@ -0,0 +1,137 @@ +import os +import tempfile +from typing import Callable, List, Optional, Union +from urllib.parse import unquote, urlparse + +import PIL.Image +import PIL.ImageOps +import requests + +from .import_utils import BACKENDS_MAPPING, is_imageio_available + + +def load_image( + image: Union[str, PIL.Image.Image], convert_method: Optional[Callable[[PIL.Image.Image], PIL.Image.Image]] = None +) -> PIL.Image.Image: + """ + Loads `image` to a PIL Image. + + Args: + image (`str` or `PIL.Image.Image`): + The image to convert to the PIL Image format. + convert_method (Callable[[PIL.Image.Image], PIL.Image.Image], *optional*): + A conversion method to apply to the image after loading it. When set to `None` the image will be converted + "RGB". + + Returns: + `PIL.Image.Image`: + A PIL Image. + """ + if isinstance(image, str): + if image.startswith("http://") or image.startswith("https://"): + image = PIL.Image.open(requests.get(image, stream=True).raw) + elif os.path.isfile(image): + image = PIL.Image.open(image) + else: + raise ValueError( + f"Incorrect path or URL. URLs must start with `http://` or `https://`, and {image} is not a valid path." + ) + elif isinstance(image, PIL.Image.Image): + image = image + else: + raise ValueError( + "Incorrect format used for the image. Should be a URL linking to an image, a local path, or a PIL image." + ) + + image = PIL.ImageOps.exif_transpose(image) + + if convert_method is not None: + image = convert_method(image) + else: + image = image.convert("RGB") + + return image + + +def load_video( + video: str, + convert_method: Optional[Callable[[List[PIL.Image.Image]], List[PIL.Image.Image]]] = None, +) -> List[PIL.Image.Image]: + """ + Loads `video` to a list of PIL Image. + + Args: + video (`str`): + A URL or Path to a video to convert to a list of PIL Image format. + convert_method (Callable[[List[PIL.Image.Image]], List[PIL.Image.Image]], *optional*): + A conversion method to apply to the video after loading it. When set to `None` the images will be converted + to "RGB". + + Returns: + `List[PIL.Image.Image]`: + The video as a list of PIL images. + """ + is_url = video.startswith("http://") or video.startswith("https://") + is_file = os.path.isfile(video) + was_tempfile_created = False + + if not (is_url or is_file): + raise ValueError( + f"Incorrect path or URL. URLs must start with `http://` or `https://`, and {video} is not a valid path." + ) + + if is_url: + response = requests.get(video, stream=True) + if response.status_code != 200: + raise ValueError(f"Failed to download video. Status code: {response.status_code}") + + parsed_url = urlparse(video) + file_name = os.path.basename(unquote(parsed_url.path)) + + suffix = os.path.splitext(file_name)[1] or ".mp4" + video_path = tempfile.NamedTemporaryFile(suffix=suffix, delete=False).name + + was_tempfile_created = True + + video_data = response.iter_content(chunk_size=8192) + with open(video_path, "wb") as f: + for chunk in video_data: + f.write(chunk) + + video = video_path + + pil_images = [] + if video.endswith(".gif"): + gif = PIL.Image.open(video) + try: + while True: + pil_images.append(gif.copy()) + gif.seek(gif.tell() + 1) + except EOFError: + pass + + else: + if is_imageio_available(): + import imageio + else: + raise ImportError(BACKENDS_MAPPING["imageio"][1].format("load_video")) + + try: + imageio.plugins.ffmpeg.get_exe() + except AttributeError: + raise AttributeError( + "`Unable to find an ffmpeg installation on your machine. Please install via `pip install imageio-ffmpeg" + ) + + with imageio.get_reader(video) as reader: + # Read all frames + for frame in reader: + pil_images.append(PIL.Image.fromarray(frame)) + + if was_tempfile_created: + os.remove(video_path) + + if convert_method is not None: + pil_images = convert_method(pil_images) + + return pil_images diff --git a/diffusers3/utils/logging.py b/diffusers3/utils/logging.py new file mode 100644 index 0000000000000000000000000000000000000000..6f93450c410c9325aa9d0cf10262b67e4c355fda --- /dev/null +++ b/diffusers3/utils/logging.py @@ -0,0 +1,341 @@ +# coding=utf-8 +# Copyright 2024 Optuna, Hugging Face +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Logging utilities.""" + +import logging +import os +import sys +import threading +from logging import ( + CRITICAL, # NOQA + DEBUG, # NOQA + ERROR, # NOQA + FATAL, # NOQA + INFO, # NOQA + NOTSET, # NOQA + WARN, # NOQA + WARNING, # NOQA +) +from typing import Dict, Optional + +from tqdm import auto as tqdm_lib + + +_lock = threading.Lock() +_default_handler: Optional[logging.Handler] = None + +log_levels = { + "debug": logging.DEBUG, + "info": logging.INFO, + "warning": logging.WARNING, + "error": logging.ERROR, + "critical": logging.CRITICAL, +} + +_default_log_level = logging.WARNING + +_tqdm_active = True + + +def _get_default_logging_level() -> int: + """ + If DIFFUSERS_VERBOSITY env var is set to one of the valid choices return that as the new default level. If it is + not - fall back to `_default_log_level` + """ + env_level_str = os.getenv("DIFFUSERS_VERBOSITY", None) + if env_level_str: + if env_level_str in log_levels: + return log_levels[env_level_str] + else: + logging.getLogger().warning( + f"Unknown option DIFFUSERS_VERBOSITY={env_level_str}, " + f"has to be one of: { ', '.join(log_levels.keys()) }" + ) + return _default_log_level + + +def _get_library_name() -> str: + return __name__.split(".")[0] + + +def _get_library_root_logger() -> logging.Logger: + return logging.getLogger(_get_library_name()) + + +def _configure_library_root_logger() -> None: + global _default_handler + + with _lock: + if _default_handler: + # This library has already configured the library root logger. + return + _default_handler = logging.StreamHandler() # Set sys.stderr as stream. + + if sys.stderr: # only if sys.stderr exists, e.g. when not using pythonw in windows + _default_handler.flush = sys.stderr.flush + + # Apply our default configuration to the library root logger. + library_root_logger = _get_library_root_logger() + library_root_logger.addHandler(_default_handler) + library_root_logger.setLevel(_get_default_logging_level()) + library_root_logger.propagate = False + + +def _reset_library_root_logger() -> None: + global _default_handler + + with _lock: + if not _default_handler: + return + + library_root_logger = _get_library_root_logger() + library_root_logger.removeHandler(_default_handler) + library_root_logger.setLevel(logging.NOTSET) + _default_handler = None + + +def get_log_levels_dict() -> Dict[str, int]: + return log_levels + + +def get_logger(name: Optional[str] = None) -> logging.Logger: + """ + Return a logger with the specified name. + + This function is not supposed to be directly accessed unless you are writing a custom diffusers module. + """ + + if name is None: + name = _get_library_name() + + _configure_library_root_logger() + return logging.getLogger(name) + + +def get_verbosity() -> int: + """ + Return the current level for the ๐Ÿค— Diffusers' root logger as an `int`. + + Returns: + `int`: + Logging level integers which can be one of: + + - `50`: `diffusers.logging.CRITICAL` or `diffusers.logging.FATAL` + - `40`: `diffusers.logging.ERROR` + - `30`: `diffusers.logging.WARNING` or `diffusers.logging.WARN` + - `20`: `diffusers.logging.INFO` + - `10`: `diffusers.logging.DEBUG` + + """ + + _configure_library_root_logger() + return _get_library_root_logger().getEffectiveLevel() + + +def set_verbosity(verbosity: int) -> None: + """ + Set the verbosity level for the ๐Ÿค— Diffusers' root logger. + + Args: + verbosity (`int`): + Logging level which can be one of: + + - `diffusers.logging.CRITICAL` or `diffusers.logging.FATAL` + - `diffusers.logging.ERROR` + - `diffusers.logging.WARNING` or `diffusers.logging.WARN` + - `diffusers.logging.INFO` + - `diffusers.logging.DEBUG` + """ + + _configure_library_root_logger() + _get_library_root_logger().setLevel(verbosity) + + +def set_verbosity_info() -> None: + """Set the verbosity to the `INFO` level.""" + return set_verbosity(INFO) + + +def set_verbosity_warning() -> None: + """Set the verbosity to the `WARNING` level.""" + return set_verbosity(WARNING) + + +def set_verbosity_debug() -> None: + """Set the verbosity to the `DEBUG` level.""" + return set_verbosity(DEBUG) + + +def set_verbosity_error() -> None: + """Set the verbosity to the `ERROR` level.""" + return set_verbosity(ERROR) + + +def disable_default_handler() -> None: + """Disable the default handler of the ๐Ÿค— Diffusers' root logger.""" + + _configure_library_root_logger() + + assert _default_handler is not None + _get_library_root_logger().removeHandler(_default_handler) + + +def enable_default_handler() -> None: + """Enable the default handler of the ๐Ÿค— Diffusers' root logger.""" + + _configure_library_root_logger() + + assert _default_handler is not None + _get_library_root_logger().addHandler(_default_handler) + + +def add_handler(handler: logging.Handler) -> None: + """adds a handler to the HuggingFace Diffusers' root logger.""" + + _configure_library_root_logger() + + assert handler is not None + _get_library_root_logger().addHandler(handler) + + +def remove_handler(handler: logging.Handler) -> None: + """removes given handler from the HuggingFace Diffusers' root logger.""" + + _configure_library_root_logger() + + assert handler is not None and handler in _get_library_root_logger().handlers + _get_library_root_logger().removeHandler(handler) + + +def disable_propagation() -> None: + """ + Disable propagation of the library log outputs. Note that log propagation is disabled by default. + """ + + _configure_library_root_logger() + _get_library_root_logger().propagate = False + + +def enable_propagation() -> None: + """ + Enable propagation of the library log outputs. Please disable the HuggingFace Diffusers' default handler to prevent + double logging if the root logger has been configured. + """ + + _configure_library_root_logger() + _get_library_root_logger().propagate = True + + +def enable_explicit_format() -> None: + """ + Enable explicit formatting for every ๐Ÿค— Diffusers' logger. The explicit formatter is as follows: + ``` + [LEVELNAME|FILENAME|LINE NUMBER] TIME >> MESSAGE + ``` + All handlers currently bound to the root logger are affected by this method. + """ + handlers = _get_library_root_logger().handlers + + for handler in handlers: + formatter = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s") + handler.setFormatter(formatter) + + +def reset_format() -> None: + """ + Resets the formatting for ๐Ÿค— Diffusers' loggers. + + All handlers currently bound to the root logger are affected by this method. + """ + handlers = _get_library_root_logger().handlers + + for handler in handlers: + handler.setFormatter(None) + + +def warning_advice(self, *args, **kwargs) -> None: + """ + This method is identical to `logger.warning()`, but if env var DIFFUSERS_NO_ADVISORY_WARNINGS=1 is set, this + warning will not be printed + """ + no_advisory_warnings = os.getenv("DIFFUSERS_NO_ADVISORY_WARNINGS", False) + if no_advisory_warnings: + return + self.warning(*args, **kwargs) + + +logging.Logger.warning_advice = warning_advice + + +class EmptyTqdm: + """Dummy tqdm which doesn't do anything.""" + + def __init__(self, *args, **kwargs): # pylint: disable=unused-argument + self._iterator = args[0] if args else None + + def __iter__(self): + return iter(self._iterator) + + def __getattr__(self, _): + """Return empty function.""" + + def empty_fn(*args, **kwargs): # pylint: disable=unused-argument + return + + return empty_fn + + def __enter__(self): + return self + + def __exit__(self, type_, value, traceback): + return + + +class _tqdm_cls: + def __call__(self, *args, **kwargs): + if _tqdm_active: + return tqdm_lib.tqdm(*args, **kwargs) + else: + return EmptyTqdm(*args, **kwargs) + + def set_lock(self, *args, **kwargs): + self._lock = None + if _tqdm_active: + return tqdm_lib.tqdm.set_lock(*args, **kwargs) + + def get_lock(self): + if _tqdm_active: + return tqdm_lib.tqdm.get_lock() + + +tqdm = _tqdm_cls() + + +def is_progress_bar_enabled() -> bool: + """Return a boolean indicating whether tqdm progress bars are enabled.""" + global _tqdm_active + return bool(_tqdm_active) + + +def enable_progress_bar() -> None: + """Enable tqdm progress bar.""" + global _tqdm_active + _tqdm_active = True + + +def disable_progress_bar() -> None: + """Disable tqdm progress bar.""" + global _tqdm_active + _tqdm_active = False diff --git a/diffusers3/utils/model_card_template.md b/diffusers3/utils/model_card_template.md new file mode 100644 index 0000000000000000000000000000000000000000..f41b71e24e2081425d3049ae51b50036d5d28b6a --- /dev/null +++ b/diffusers3/utils/model_card_template.md @@ -0,0 +1,24 @@ +--- +{{ card_data }} +--- + + + +{{ model_description }} + +## Intended uses & limitations + +#### How to use + +```python +# TODO: add an example code snippet for running this diffusion pipeline +``` + +#### Limitations and bias + +[TODO: provide examples of latent issues and potential remediations] + +## Training details + +[TODO: describe the data used to train the model] diff --git a/diffusers3/utils/outputs.py b/diffusers3/utils/outputs.py new file mode 100644 index 0000000000000000000000000000000000000000..6080a86b871aec2218471591bccac408a45d82b2 --- /dev/null +++ b/diffusers3/utils/outputs.py @@ -0,0 +1,137 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Generic utilities +""" + +from collections import OrderedDict +from dataclasses import fields, is_dataclass +from typing import Any, Tuple + +import numpy as np + +from .import_utils import is_torch_available, is_torch_version + + +def is_tensor(x) -> bool: + """ + Tests if `x` is a `torch.Tensor` or `np.ndarray`. + """ + if is_torch_available(): + import torch + + if isinstance(x, torch.Tensor): + return True + + return isinstance(x, np.ndarray) + + +class BaseOutput(OrderedDict): + """ + Base class for all model outputs as dataclass. Has a `__getitem__` that allows indexing by integer or slice (like a + tuple) or strings (like a dictionary) that will ignore the `None` attributes. Otherwise behaves like a regular + Python dictionary. + + + + You can't unpack a [`BaseOutput`] directly. Use the [`~utils.BaseOutput.to_tuple`] method to convert it to a tuple + first. + + + """ + + def __init_subclass__(cls) -> None: + """Register subclasses as pytree nodes. + + This is necessary to synchronize gradients when using `torch.nn.parallel.DistributedDataParallel` with + `static_graph=True` with modules that output `ModelOutput` subclasses. + """ + if is_torch_available(): + import torch.utils._pytree + + if is_torch_version("<", "2.2"): + torch.utils._pytree._register_pytree_node( + cls, + torch.utils._pytree._dict_flatten, + lambda values, context: cls(**torch.utils._pytree._dict_unflatten(values, context)), + ) + else: + torch.utils._pytree.register_pytree_node( + cls, + torch.utils._pytree._dict_flatten, + lambda values, context: cls(**torch.utils._pytree._dict_unflatten(values, context)), + ) + + def __post_init__(self) -> None: + class_fields = fields(self) + + # Safety and consistency checks + if not len(class_fields): + raise ValueError(f"{self.__class__.__name__} has no fields.") + + first_field = getattr(self, class_fields[0].name) + other_fields_are_none = all(getattr(self, field.name) is None for field in class_fields[1:]) + + if other_fields_are_none and isinstance(first_field, dict): + for key, value in first_field.items(): + self[key] = value + else: + for field in class_fields: + v = getattr(self, field.name) + if v is not None: + self[field.name] = v + + def __delitem__(self, *args, **kwargs): + raise Exception(f"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.") + + def setdefault(self, *args, **kwargs): + raise Exception(f"You cannot use ``setdefault`` on a {self.__class__.__name__} instance.") + + def pop(self, *args, **kwargs): + raise Exception(f"You cannot use ``pop`` on a {self.__class__.__name__} instance.") + + def update(self, *args, **kwargs): + raise Exception(f"You cannot use ``update`` on a {self.__class__.__name__} instance.") + + def __getitem__(self, k: Any) -> Any: + if isinstance(k, str): + inner_dict = dict(self.items()) + return inner_dict[k] + else: + return self.to_tuple()[k] + + def __setattr__(self, name: Any, value: Any) -> None: + if name in self.keys() and value is not None: + # Don't call self.__setitem__ to avoid recursion errors + super().__setitem__(name, value) + super().__setattr__(name, value) + + def __setitem__(self, key, value): + # Will raise a KeyException if needed + super().__setitem__(key, value) + # Don't call self.__setattr__ to avoid recursion errors + super().__setattr__(key, value) + + def __reduce__(self): + if not is_dataclass(self): + return super().__reduce__() + callable, _args, *remaining = super().__reduce__() + args = tuple(getattr(self, field.name) for field in fields(self)) + return callable, args, *remaining + + def to_tuple(self) -> Tuple[Any, ...]: + """ + Convert self to a tuple containing all the attributes/keys that are not `None`. + """ + return tuple(self[k] for k in self.keys()) diff --git a/diffusers3/utils/peft_utils.py b/diffusers3/utils/peft_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..ca55192ff7ae83139b239ec7ff46d0fa79555a19 --- /dev/null +++ b/diffusers3/utils/peft_utils.py @@ -0,0 +1,295 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +PEFT utilities: Utilities related to peft library +""" + +import collections +import importlib +from typing import Optional + +from packaging import version + +from .import_utils import is_peft_available, is_torch_available + + +if is_torch_available(): + import torch + + +def recurse_remove_peft_layers(model): + r""" + Recursively replace all instances of `LoraLayer` with corresponding new layers in `model`. + """ + from peft.tuners.tuners_utils import BaseTunerLayer + + has_base_layer_pattern = False + for module in model.modules(): + if isinstance(module, BaseTunerLayer): + has_base_layer_pattern = hasattr(module, "base_layer") + break + + if has_base_layer_pattern: + from peft.utils import _get_submodules + + key_list = [key for key, _ in model.named_modules() if "lora" not in key] + for key in key_list: + try: + parent, target, target_name = _get_submodules(model, key) + except AttributeError: + continue + if hasattr(target, "base_layer"): + setattr(parent, target_name, target.get_base_layer()) + else: + # This is for backwards compatibility with PEFT <= 0.6.2. + # TODO can be removed once that PEFT version is no longer supported. + from peft.tuners.lora import LoraLayer + + for name, module in model.named_children(): + if len(list(module.children())) > 0: + ## compound module, go inside it + recurse_remove_peft_layers(module) + + module_replaced = False + + if isinstance(module, LoraLayer) and isinstance(module, torch.nn.Linear): + new_module = torch.nn.Linear( + module.in_features, + module.out_features, + bias=module.bias is not None, + ).to(module.weight.device) + new_module.weight = module.weight + if module.bias is not None: + new_module.bias = module.bias + + module_replaced = True + elif isinstance(module, LoraLayer) and isinstance(module, torch.nn.Conv2d): + new_module = torch.nn.Conv2d( + module.in_channels, + module.out_channels, + module.kernel_size, + module.stride, + module.padding, + module.dilation, + module.groups, + ).to(module.weight.device) + + new_module.weight = module.weight + if module.bias is not None: + new_module.bias = module.bias + + module_replaced = True + + if module_replaced: + setattr(model, name, new_module) + del module + + if torch.cuda.is_available(): + torch.cuda.empty_cache() + return model + + +def scale_lora_layers(model, weight): + """ + Adjust the weightage given to the LoRA layers of the model. + + Args: + model (`torch.nn.Module`): + The model to scale. + weight (`float`): + The weight to be given to the LoRA layers. + """ + from peft.tuners.tuners_utils import BaseTunerLayer + + if weight == 1.0: + return + + for module in model.modules(): + if isinstance(module, BaseTunerLayer): + module.scale_layer(weight) + + +def unscale_lora_layers(model, weight: Optional[float] = None): + """ + Removes the previously passed weight given to the LoRA layers of the model. + + Args: + model (`torch.nn.Module`): + The model to scale. + weight (`float`, *optional*): + The weight to be given to the LoRA layers. If no scale is passed the scale of the lora layer will be + re-initialized to the correct value. If 0.0 is passed, we will re-initialize the scale with the correct + value. + """ + from peft.tuners.tuners_utils import BaseTunerLayer + + if weight == 1.0: + return + + for module in model.modules(): + if isinstance(module, BaseTunerLayer): + if weight is not None and weight != 0: + module.unscale_layer(weight) + elif weight is not None and weight == 0: + for adapter_name in module.active_adapters: + # if weight == 0 unscale should re-set the scale to the original value. + module.set_scale(adapter_name, 1.0) + + +def get_peft_kwargs(rank_dict, network_alpha_dict, peft_state_dict, is_unet=True): + rank_pattern = {} + alpha_pattern = {} + r = lora_alpha = list(rank_dict.values())[0] + + if len(set(rank_dict.values())) > 1: + # get the rank occuring the most number of times + r = collections.Counter(rank_dict.values()).most_common()[0][0] + + # for modules with rank different from the most occuring rank, add it to the `rank_pattern` + rank_pattern = dict(filter(lambda x: x[1] != r, rank_dict.items())) + rank_pattern = {k.split(".lora_B.")[0]: v for k, v in rank_pattern.items()} + + if network_alpha_dict is not None and len(network_alpha_dict) > 0: + if len(set(network_alpha_dict.values())) > 1: + # get the alpha occuring the most number of times + lora_alpha = collections.Counter(network_alpha_dict.values()).most_common()[0][0] + + # for modules with alpha different from the most occuring alpha, add it to the `alpha_pattern` + alpha_pattern = dict(filter(lambda x: x[1] != lora_alpha, network_alpha_dict.items())) + if is_unet: + alpha_pattern = { + ".".join(k.split(".lora_A.")[0].split(".")).replace(".alpha", ""): v + for k, v in alpha_pattern.items() + } + else: + alpha_pattern = {".".join(k.split(".down.")[0].split(".")[:-1]): v for k, v in alpha_pattern.items()} + else: + lora_alpha = set(network_alpha_dict.values()).pop() + + # layer names without the Diffusers specific + target_modules = list({name.split(".lora")[0] for name in peft_state_dict.keys()}) + use_dora = any("lora_magnitude_vector" in k for k in peft_state_dict) + + lora_config_kwargs = { + "r": r, + "lora_alpha": lora_alpha, + "rank_pattern": rank_pattern, + "alpha_pattern": alpha_pattern, + "target_modules": target_modules, + "use_dora": use_dora, + } + return lora_config_kwargs + + +def get_adapter_name(model): + from peft.tuners.tuners_utils import BaseTunerLayer + + for module in model.modules(): + if isinstance(module, BaseTunerLayer): + return f"default_{len(module.r)}" + return "default_0" + + +def set_adapter_layers(model, enabled=True): + from peft.tuners.tuners_utils import BaseTunerLayer + + for module in model.modules(): + if isinstance(module, BaseTunerLayer): + # The recent version of PEFT needs to call `enable_adapters` instead + if hasattr(module, "enable_adapters"): + module.enable_adapters(enabled=enabled) + else: + module.disable_adapters = not enabled + + +def delete_adapter_layers(model, adapter_name): + from peft.tuners.tuners_utils import BaseTunerLayer + + for module in model.modules(): + if isinstance(module, BaseTunerLayer): + if hasattr(module, "delete_adapter"): + module.delete_adapter(adapter_name) + else: + raise ValueError( + "The version of PEFT you are using is not compatible, please use a version that is greater than 0.6.1" + ) + + # For transformers integration - we need to pop the adapter from the config + if getattr(model, "_hf_peft_config_loaded", False) and hasattr(model, "peft_config"): + model.peft_config.pop(adapter_name, None) + # In case all adapters are deleted, we need to delete the config + # and make sure to set the flag to False + if len(model.peft_config) == 0: + del model.peft_config + model._hf_peft_config_loaded = None + + +def set_weights_and_activate_adapters(model, adapter_names, weights): + from peft.tuners.tuners_utils import BaseTunerLayer + + def get_module_weight(weight_for_adapter, module_name): + if not isinstance(weight_for_adapter, dict): + # If weight_for_adapter is a single number, always return it. + return weight_for_adapter + + for layer_name, weight_ in weight_for_adapter.items(): + if layer_name in module_name: + return weight_ + + parts = module_name.split(".") + # e.g. key = "down_blocks.1.attentions.0" + key = f"{parts[0]}.{parts[1]}.attentions.{parts[3]}" + block_weight = weight_for_adapter.get(key, 1.0) + + return block_weight + + # iterate over each adapter, make it active and set the corresponding scaling weight + for adapter_name, weight in zip(adapter_names, weights): + for module_name, module in model.named_modules(): + if isinstance(module, BaseTunerLayer): + # For backward compatbility with previous PEFT versions + if hasattr(module, "set_adapter"): + module.set_adapter(adapter_name) + else: + module.active_adapter = adapter_name + module.set_scale(adapter_name, get_module_weight(weight, module_name)) + + # set multiple active adapters + for module in model.modules(): + if isinstance(module, BaseTunerLayer): + # For backward compatbility with previous PEFT versions + if hasattr(module, "set_adapter"): + module.set_adapter(adapter_names) + else: + module.active_adapter = adapter_names + + +def check_peft_version(min_version: str) -> None: + r""" + Checks if the version of PEFT is compatible. + + Args: + version (`str`): + The version of PEFT to check against. + """ + if not is_peft_available(): + raise ValueError("PEFT is not installed. Please install it with `pip install peft`") + + is_peft_version_compatible = version.parse(importlib.metadata.version("peft")) > version.parse(min_version) + + if not is_peft_version_compatible: + raise ValueError( + f"The version of PEFT you are using is not compatible, please use a version that is greater" + f" than {min_version}" + ) diff --git a/diffusers3/utils/pil_utils.py b/diffusers3/utils/pil_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..76678070b697c7d87fc3691d9bc5bb3bea83c5b1 --- /dev/null +++ b/diffusers3/utils/pil_utils.py @@ -0,0 +1,67 @@ +from typing import List + +import PIL.Image +import PIL.ImageOps +from packaging import version +from PIL import Image + + +if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"): + PIL_INTERPOLATION = { + "linear": PIL.Image.Resampling.BILINEAR, + "bilinear": PIL.Image.Resampling.BILINEAR, + "bicubic": PIL.Image.Resampling.BICUBIC, + "lanczos": PIL.Image.Resampling.LANCZOS, + "nearest": PIL.Image.Resampling.NEAREST, + } +else: + PIL_INTERPOLATION = { + "linear": PIL.Image.LINEAR, + "bilinear": PIL.Image.BILINEAR, + "bicubic": PIL.Image.BICUBIC, + "lanczos": PIL.Image.LANCZOS, + "nearest": PIL.Image.NEAREST, + } + + +def pt_to_pil(images): + """ + Convert a torch image to a PIL image. + """ + images = (images / 2 + 0.5).clamp(0, 1) + images = images.cpu().permute(0, 2, 3, 1).float().numpy() + images = numpy_to_pil(images) + return images + + +def numpy_to_pil(images): + """ + Convert a numpy image or a batch of images to a PIL image. + """ + if images.ndim == 3: + images = images[None, ...] + images = (images * 255).round().astype("uint8") + if images.shape[-1] == 1: + # special case for grayscale (single channel) images + pil_images = [Image.fromarray(image.squeeze(), mode="L") for image in images] + else: + pil_images = [Image.fromarray(image) for image in images] + + return pil_images + + +def make_image_grid(images: List[PIL.Image.Image], rows: int, cols: int, resize: int = None) -> PIL.Image.Image: + """ + Prepares a single grid of images. Useful for visualization purposes. + """ + assert len(images) == rows * cols + + if resize is not None: + images = [img.resize((resize, resize)) for img in images] + + w, h = images[0].size + grid = Image.new("RGB", size=(cols * w, rows * h)) + + for i, img in enumerate(images): + grid.paste(img, box=(i % cols * w, i // cols * h)) + return grid diff --git a/diffusers3/utils/state_dict_utils.py b/diffusers3/utils/state_dict_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..62b114ba67e35c696ba859909a83deea39f5ef22 --- /dev/null +++ b/diffusers3/utils/state_dict_utils.py @@ -0,0 +1,335 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +State dict utilities: utility methods for converting state dicts easily +""" + +import enum + +from .logging import get_logger + + +logger = get_logger(__name__) + + +class StateDictType(enum.Enum): + """ + The mode to use when converting state dicts. + """ + + DIFFUSERS_OLD = "diffusers_old" + KOHYA_SS = "kohya_ss" + PEFT = "peft" + DIFFUSERS = "diffusers" + + +# We need to define a proper mapping for Unet since it uses different output keys than text encoder +# e.g. to_q_lora -> q_proj / to_q +UNET_TO_DIFFUSERS = { + ".to_out_lora.up": ".to_out.0.lora_B", + ".to_out_lora.down": ".to_out.0.lora_A", + ".to_q_lora.down": ".to_q.lora_A", + ".to_q_lora.up": ".to_q.lora_B", + ".to_k_lora.down": ".to_k.lora_A", + ".to_k_lora.up": ".to_k.lora_B", + ".to_v_lora.down": ".to_v.lora_A", + ".to_v_lora.up": ".to_v.lora_B", + ".lora.up": ".lora_B", + ".lora.down": ".lora_A", + ".to_out.lora_magnitude_vector": ".to_out.0.lora_magnitude_vector", +} + + +DIFFUSERS_TO_PEFT = { + ".q_proj.lora_linear_layer.up": ".q_proj.lora_B", + ".q_proj.lora_linear_layer.down": ".q_proj.lora_A", + ".k_proj.lora_linear_layer.up": ".k_proj.lora_B", + ".k_proj.lora_linear_layer.down": ".k_proj.lora_A", + ".v_proj.lora_linear_layer.up": ".v_proj.lora_B", + ".v_proj.lora_linear_layer.down": ".v_proj.lora_A", + ".out_proj.lora_linear_layer.up": ".out_proj.lora_B", + ".out_proj.lora_linear_layer.down": ".out_proj.lora_A", + ".lora_linear_layer.up": ".lora_B", + ".lora_linear_layer.down": ".lora_A", + "text_projection.lora.down.weight": "text_projection.lora_A.weight", + "text_projection.lora.up.weight": "text_projection.lora_B.weight", +} + +DIFFUSERS_OLD_TO_PEFT = { + ".to_q_lora.up": ".q_proj.lora_B", + ".to_q_lora.down": ".q_proj.lora_A", + ".to_k_lora.up": ".k_proj.lora_B", + ".to_k_lora.down": ".k_proj.lora_A", + ".to_v_lora.up": ".v_proj.lora_B", + ".to_v_lora.down": ".v_proj.lora_A", + ".to_out_lora.up": ".out_proj.lora_B", + ".to_out_lora.down": ".out_proj.lora_A", + ".lora_linear_layer.up": ".lora_B", + ".lora_linear_layer.down": ".lora_A", +} + +PEFT_TO_DIFFUSERS = { + ".q_proj.lora_B": ".q_proj.lora_linear_layer.up", + ".q_proj.lora_A": ".q_proj.lora_linear_layer.down", + ".k_proj.lora_B": ".k_proj.lora_linear_layer.up", + ".k_proj.lora_A": ".k_proj.lora_linear_layer.down", + ".v_proj.lora_B": ".v_proj.lora_linear_layer.up", + ".v_proj.lora_A": ".v_proj.lora_linear_layer.down", + ".out_proj.lora_B": ".out_proj.lora_linear_layer.up", + ".out_proj.lora_A": ".out_proj.lora_linear_layer.down", + "to_k.lora_A": "to_k.lora.down", + "to_k.lora_B": "to_k.lora.up", + "to_q.lora_A": "to_q.lora.down", + "to_q.lora_B": "to_q.lora.up", + "to_v.lora_A": "to_v.lora.down", + "to_v.lora_B": "to_v.lora.up", + "to_out.0.lora_A": "to_out.0.lora.down", + "to_out.0.lora_B": "to_out.0.lora.up", +} + +DIFFUSERS_OLD_TO_DIFFUSERS = { + ".to_q_lora.up": ".q_proj.lora_linear_layer.up", + ".to_q_lora.down": ".q_proj.lora_linear_layer.down", + ".to_k_lora.up": ".k_proj.lora_linear_layer.up", + ".to_k_lora.down": ".k_proj.lora_linear_layer.down", + ".to_v_lora.up": ".v_proj.lora_linear_layer.up", + ".to_v_lora.down": ".v_proj.lora_linear_layer.down", + ".to_out_lora.up": ".out_proj.lora_linear_layer.up", + ".to_out_lora.down": ".out_proj.lora_linear_layer.down", + ".to_k.lora_magnitude_vector": ".k_proj.lora_magnitude_vector", + ".to_v.lora_magnitude_vector": ".v_proj.lora_magnitude_vector", + ".to_q.lora_magnitude_vector": ".q_proj.lora_magnitude_vector", + ".to_out.lora_magnitude_vector": ".out_proj.lora_magnitude_vector", +} + +PEFT_TO_KOHYA_SS = { + "lora_A": "lora_down", + "lora_B": "lora_up", + # This is not a comprehensive dict as kohya format requires replacing `.` with `_` in keys, + # adding prefixes and adding alpha values + # Check `convert_state_dict_to_kohya` for more +} + +PEFT_STATE_DICT_MAPPINGS = { + StateDictType.DIFFUSERS_OLD: DIFFUSERS_OLD_TO_PEFT, + StateDictType.DIFFUSERS: DIFFUSERS_TO_PEFT, +} + +DIFFUSERS_STATE_DICT_MAPPINGS = { + StateDictType.DIFFUSERS_OLD: DIFFUSERS_OLD_TO_DIFFUSERS, + StateDictType.PEFT: PEFT_TO_DIFFUSERS, +} + +KOHYA_STATE_DICT_MAPPINGS = {StateDictType.PEFT: PEFT_TO_KOHYA_SS} + +KEYS_TO_ALWAYS_REPLACE = { + ".processor.": ".", +} + + +def convert_state_dict(state_dict, mapping): + r""" + Simply iterates over the state dict and replaces the patterns in `mapping` with the corresponding values. + + Args: + state_dict (`dict[str, torch.Tensor]`): + The state dict to convert. + mapping (`dict[str, str]`): + The mapping to use for conversion, the mapping should be a dictionary with the following structure: + - key: the pattern to replace + - value: the pattern to replace with + + Returns: + converted_state_dict (`dict`) + The converted state dict. + """ + converted_state_dict = {} + for k, v in state_dict.items(): + # First, filter out the keys that we always want to replace + for pattern in KEYS_TO_ALWAYS_REPLACE.keys(): + if pattern in k: + new_pattern = KEYS_TO_ALWAYS_REPLACE[pattern] + k = k.replace(pattern, new_pattern) + + for pattern in mapping.keys(): + if pattern in k: + new_pattern = mapping[pattern] + k = k.replace(pattern, new_pattern) + break + converted_state_dict[k] = v + return converted_state_dict + + +def convert_state_dict_to_peft(state_dict, original_type=None, **kwargs): + r""" + Converts a state dict to the PEFT format The state dict can be from previous diffusers format (`OLD_DIFFUSERS`), or + new diffusers format (`DIFFUSERS`). The method only supports the conversion from diffusers old/new to PEFT for now. + + Args: + state_dict (`dict[str, torch.Tensor]`): + The state dict to convert. + original_type (`StateDictType`, *optional*): + The original type of the state dict, if not provided, the method will try to infer it automatically. + """ + if original_type is None: + # Old diffusers to PEFT + if any("to_out_lora" in k for k in state_dict.keys()): + original_type = StateDictType.DIFFUSERS_OLD + elif any("lora_linear_layer" in k for k in state_dict.keys()): + original_type = StateDictType.DIFFUSERS + else: + raise ValueError("Could not automatically infer state dict type") + + if original_type not in PEFT_STATE_DICT_MAPPINGS.keys(): + raise ValueError(f"Original type {original_type} is not supported") + + mapping = PEFT_STATE_DICT_MAPPINGS[original_type] + return convert_state_dict(state_dict, mapping) + + +def convert_state_dict_to_diffusers(state_dict, original_type=None, **kwargs): + r""" + Converts a state dict to new diffusers format. The state dict can be from previous diffusers format + (`OLD_DIFFUSERS`), or PEFT format (`PEFT`) or new diffusers format (`DIFFUSERS`). In the last case the method will + return the state dict as is. + + The method only supports the conversion from diffusers old, PEFT to diffusers new for now. + + Args: + state_dict (`dict[str, torch.Tensor]`): + The state dict to convert. + original_type (`StateDictType`, *optional*): + The original type of the state dict, if not provided, the method will try to infer it automatically. + kwargs (`dict`, *args*): + Additional arguments to pass to the method. + + - **adapter_name**: For example, in case of PEFT, some keys will be pre-pended + with the adapter name, therefore needs a special handling. By default PEFT also takes care of that in + `get_peft_model_state_dict` method: + https://github.com/huggingface/peft/blob/ba0477f2985b1ba311b83459d29895c809404e99/src/peft/utils/save_and_load.py#L92 + but we add it here in case we don't want to rely on that method. + """ + peft_adapter_name = kwargs.pop("adapter_name", None) + if peft_adapter_name is not None: + peft_adapter_name = "." + peft_adapter_name + else: + peft_adapter_name = "" + + if original_type is None: + # Old diffusers to PEFT + if any("to_out_lora" in k for k in state_dict.keys()): + original_type = StateDictType.DIFFUSERS_OLD + elif any(f".lora_A{peft_adapter_name}.weight" in k for k in state_dict.keys()): + original_type = StateDictType.PEFT + elif any("lora_linear_layer" in k for k in state_dict.keys()): + # nothing to do + return state_dict + else: + raise ValueError("Could not automatically infer state dict type") + + if original_type not in DIFFUSERS_STATE_DICT_MAPPINGS.keys(): + raise ValueError(f"Original type {original_type} is not supported") + + mapping = DIFFUSERS_STATE_DICT_MAPPINGS[original_type] + return convert_state_dict(state_dict, mapping) + + +def convert_unet_state_dict_to_peft(state_dict): + r""" + Converts a state dict from UNet format to diffusers format - i.e. by removing some keys + """ + mapping = UNET_TO_DIFFUSERS + return convert_state_dict(state_dict, mapping) + + +def convert_all_state_dict_to_peft(state_dict): + r""" + Attempts to first `convert_state_dict_to_peft`, and if it doesn't detect `lora_linear_layer` for a valid + `DIFFUSERS` LoRA for example, attempts to exclusively convert the Unet `convert_unet_state_dict_to_peft` + """ + try: + peft_dict = convert_state_dict_to_peft(state_dict) + except Exception as e: + if str(e) == "Could not automatically infer state dict type": + peft_dict = convert_unet_state_dict_to_peft(state_dict) + else: + raise + + if not any("lora_A" in key or "lora_B" in key for key in peft_dict.keys()): + raise ValueError("Your LoRA was not converted to PEFT") + + return peft_dict + + +def convert_state_dict_to_kohya(state_dict, original_type=None, **kwargs): + r""" + Converts a `PEFT` state dict to `Kohya` format that can be used in AUTOMATIC1111, ComfyUI, SD.Next, InvokeAI, etc. + The method only supports the conversion from PEFT to Kohya for now. + + Args: + state_dict (`dict[str, torch.Tensor]`): + The state dict to convert. + original_type (`StateDictType`, *optional*): + The original type of the state dict, if not provided, the method will try to infer it automatically. + kwargs (`dict`, *args*): + Additional arguments to pass to the method. + + - **adapter_name**: For example, in case of PEFT, some keys will be pre-pended + with the adapter name, therefore needs a special handling. By default PEFT also takes care of that in + `get_peft_model_state_dict` method: + https://github.com/huggingface/peft/blob/ba0477f2985b1ba311b83459d29895c809404e99/src/peft/utils/save_and_load.py#L92 + but we add it here in case we don't want to rely on that method. + """ + try: + import torch + except ImportError: + logger.error("Converting PEFT state dicts to Kohya requires torch to be installed.") + raise + + peft_adapter_name = kwargs.pop("adapter_name", None) + if peft_adapter_name is not None: + peft_adapter_name = "." + peft_adapter_name + else: + peft_adapter_name = "" + + if original_type is None: + if any(f".lora_A{peft_adapter_name}.weight" in k for k in state_dict.keys()): + original_type = StateDictType.PEFT + + if original_type not in KOHYA_STATE_DICT_MAPPINGS.keys(): + raise ValueError(f"Original type {original_type} is not supported") + + # Use the convert_state_dict function with the appropriate mapping + kohya_ss_partial_state_dict = convert_state_dict(state_dict, KOHYA_STATE_DICT_MAPPINGS[StateDictType.PEFT]) + kohya_ss_state_dict = {} + + # Additional logic for replacing header, alpha parameters `.` with `_` in all keys + for kohya_key, weight in kohya_ss_partial_state_dict.items(): + if "text_encoder_2." in kohya_key: + kohya_key = kohya_key.replace("text_encoder_2.", "lora_te2.") + elif "text_encoder." in kohya_key: + kohya_key = kohya_key.replace("text_encoder.", "lora_te1.") + elif "unet" in kohya_key: + kohya_key = kohya_key.replace("unet", "lora_unet") + elif "lora_magnitude_vector" in kohya_key: + kohya_key = kohya_key.replace("lora_magnitude_vector", "dora_scale") + + kohya_key = kohya_key.replace(".", "_", kohya_key.count(".") - 2) + kohya_key = kohya_key.replace(peft_adapter_name, "") # Kohya doesn't take names + kohya_ss_state_dict[kohya_key] = weight + if "lora_down" in kohya_key: + alpha_key = f'{kohya_key.split(".")[0]}.alpha' + kohya_ss_state_dict[alpha_key] = torch.tensor(len(weight)) + + return kohya_ss_state_dict diff --git a/diffusers3/utils/testing_utils.py b/diffusers3/utils/testing_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..be3e9983c80ffa7c16a4666de5f624a7ffb9a591 --- /dev/null +++ b/diffusers3/utils/testing_utils.py @@ -0,0 +1,1035 @@ +import functools +import importlib +import inspect +import io +import logging +import multiprocessing +import os +import random +import re +import struct +import sys +import tempfile +import time +import unittest +import urllib.parse +from contextlib import contextmanager +from io import BytesIO, StringIO +from pathlib import Path +from typing import Callable, Dict, List, Optional, Union + +import numpy as np +import PIL.Image +import PIL.ImageOps +import requests +from numpy.linalg import norm +from packaging import version + +from .import_utils import ( + BACKENDS_MAPPING, + is_compel_available, + is_flax_available, + is_note_seq_available, + is_onnx_available, + is_opencv_available, + is_peft_available, + is_timm_available, + is_torch_available, + is_torch_version, + is_torchsde_available, + is_transformers_available, +) +from .logging import get_logger + + +global_rng = random.Random() + +logger = get_logger(__name__) + +_required_peft_version = is_peft_available() and version.parse( + version.parse(importlib.metadata.version("peft")).base_version +) > version.parse("0.5") +_required_transformers_version = is_transformers_available() and version.parse( + version.parse(importlib.metadata.version("transformers")).base_version +) > version.parse("4.33") + +USE_PEFT_BACKEND = _required_peft_version and _required_transformers_version + +if is_torch_available(): + import torch + + # Set a backend environment variable for any extra module import required for a custom accelerator + if "DIFFUSERS_TEST_BACKEND" in os.environ: + backend = os.environ["DIFFUSERS_TEST_BACKEND"] + try: + _ = importlib.import_module(backend) + except ModuleNotFoundError as e: + raise ModuleNotFoundError( + f"Failed to import `DIFFUSERS_TEST_BACKEND` '{backend}'! This should be the name of an installed module \ + to enable a specified backend.):\n{e}" + ) from e + + if "DIFFUSERS_TEST_DEVICE" in os.environ: + torch_device = os.environ["DIFFUSERS_TEST_DEVICE"] + try: + # try creating device to see if provided device is valid + _ = torch.device(torch_device) + except RuntimeError as e: + raise RuntimeError( + f"Unknown testing device specified by environment variable `DIFFUSERS_TEST_DEVICE`: {torch_device}" + ) from e + logger.info(f"torch_device overrode to {torch_device}") + else: + torch_device = "cuda" if torch.cuda.is_available() else "cpu" + is_torch_higher_equal_than_1_12 = version.parse( + version.parse(torch.__version__).base_version + ) >= version.parse("1.12") + + if is_torch_higher_equal_than_1_12: + # Some builds of torch 1.12 don't have the mps backend registered. See #892 for more details + mps_backend_registered = hasattr(torch.backends, "mps") + torch_device = "mps" if (mps_backend_registered and torch.backends.mps.is_available()) else torch_device + + +def torch_all_close(a, b, *args, **kwargs): + if not is_torch_available(): + raise ValueError("PyTorch needs to be installed to use this function.") + if not torch.allclose(a, b, *args, **kwargs): + assert False, f"Max diff is absolute {(a - b).abs().max()}. Diff tensor is {(a - b).abs()}." + return True + + +def numpy_cosine_similarity_distance(a, b): + similarity = np.dot(a, b) / (norm(a) * norm(b)) + distance = 1.0 - similarity.mean() + + return distance + + +def print_tensor_test( + tensor, + limit_to_slices=None, + max_torch_print=None, + filename="test_corrections.txt", + expected_tensor_name="expected_slice", +): + if max_torch_print: + torch.set_printoptions(threshold=10_000) + + test_name = os.environ.get("PYTEST_CURRENT_TEST") + if not torch.is_tensor(tensor): + tensor = torch.from_numpy(tensor) + if limit_to_slices: + tensor = tensor[0, -3:, -3:, -1] + + tensor_str = str(tensor.detach().cpu().flatten().to(torch.float32)).replace("\n", "") + # format is usually: + # expected_slice = np.array([-0.5713, -0.3018, -0.9814, 0.04663, -0.879, 0.76, -1.734, 0.1044, 1.161]) + output_str = tensor_str.replace("tensor", f"{expected_tensor_name} = np.array") + test_file, test_class, test_fn = test_name.split("::") + test_fn = test_fn.split()[0] + with open(filename, "a") as f: + print("::".join([test_file, test_class, test_fn, output_str]), file=f) + + +def get_tests_dir(append_path=None): + """ + Args: + append_path: optional path to append to the tests dir path + Return: + The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is + joined after the `tests` dir the former is provided. + """ + # this function caller's __file__ + caller__file__ = inspect.stack()[1][1] + tests_dir = os.path.abspath(os.path.dirname(caller__file__)) + + while not tests_dir.endswith("tests"): + tests_dir = os.path.dirname(tests_dir) + + if append_path: + return Path(tests_dir, append_path).as_posix() + else: + return tests_dir + + +# Taken from the following PR: +# https://github.com/huggingface/accelerate/pull/1964 +def str_to_bool(value) -> int: + """ + Converts a string representation of truth to `True` (1) or `False` (0). True values are `y`, `yes`, `t`, `true`, + `on`, and `1`; False value are `n`, `no`, `f`, `false`, `off`, and `0`; + """ + value = value.lower() + if value in ("y", "yes", "t", "true", "on", "1"): + return 1 + elif value in ("n", "no", "f", "false", "off", "0"): + return 0 + else: + raise ValueError(f"invalid truth value {value}") + + +def parse_flag_from_env(key, default=False): + try: + value = os.environ[key] + except KeyError: + # KEY isn't set, default to `default`. + _value = default + else: + # KEY is set, convert it to True or False. + try: + _value = str_to_bool(value) + except ValueError: + # More values are supported, but let's keep the message simple. + raise ValueError(f"If set, {key} must be yes or no.") + return _value + + +_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False) +_run_nightly_tests = parse_flag_from_env("RUN_NIGHTLY", default=False) +_run_compile_tests = parse_flag_from_env("RUN_COMPILE", default=False) + + +def floats_tensor(shape, scale=1.0, rng=None, name=None): + """Creates a random float32 tensor""" + if rng is None: + rng = global_rng + + total_dims = 1 + for dim in shape: + total_dims *= dim + + values = [] + for _ in range(total_dims): + values.append(rng.random() * scale) + + return torch.tensor(data=values, dtype=torch.float).view(shape).contiguous() + + +def slow(test_case): + """ + Decorator marking a test as slow. + + Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them. + + """ + return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case) + + +def nightly(test_case): + """ + Decorator marking a test that runs nightly in the diffusers CI. + + Slow tests are skipped by default. Set the RUN_NIGHTLY environment variable to a truthy value to run them. + + """ + return unittest.skipUnless(_run_nightly_tests, "test is nightly")(test_case) + + +def is_torch_compile(test_case): + """ + Decorator marking a test that runs compile tests in the diffusers CI. + + Compile tests are skipped by default. Set the RUN_COMPILE environment variable to a truthy value to run them. + + """ + return unittest.skipUnless(_run_compile_tests, "test is torch compile")(test_case) + + +def require_torch(test_case): + """ + Decorator marking a test that requires PyTorch. These tests are skipped when PyTorch isn't installed. + """ + return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case) + + +def require_torch_2(test_case): + """ + Decorator marking a test that requires PyTorch 2. These tests are skipped when it isn't installed. + """ + return unittest.skipUnless(is_torch_available() and is_torch_version(">=", "2.0.0"), "test requires PyTorch 2")( + test_case + ) + + +def require_torch_gpu(test_case): + """Decorator marking a test that requires CUDA and PyTorch.""" + return unittest.skipUnless(is_torch_available() and torch_device == "cuda", "test requires PyTorch+CUDA")( + test_case + ) + + +# These decorators are for accelerator-specific behaviours that are not GPU-specific +def require_torch_accelerator(test_case): + """Decorator marking a test that requires an accelerator backend and PyTorch.""" + return unittest.skipUnless(is_torch_available() and torch_device != "cpu", "test requires accelerator+PyTorch")( + test_case + ) + + +def require_torch_multi_gpu(test_case): + """ + Decorator marking a test that requires a multi-GPU setup (in PyTorch). These tests are skipped on a machine without + multiple GPUs. To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests + -k "multi_gpu" + """ + if not is_torch_available(): + return unittest.skip("test requires PyTorch")(test_case) + + import torch + + return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case) + + +def require_torch_accelerator_with_fp16(test_case): + """Decorator marking a test that requires an accelerator with support for the FP16 data type.""" + return unittest.skipUnless(_is_torch_fp16_available(torch_device), "test requires accelerator with fp16 support")( + test_case + ) + + +def require_torch_accelerator_with_fp64(test_case): + """Decorator marking a test that requires an accelerator with support for the FP64 data type.""" + return unittest.skipUnless(_is_torch_fp64_available(torch_device), "test requires accelerator with fp64 support")( + test_case + ) + + +def require_torch_accelerator_with_training(test_case): + """Decorator marking a test that requires an accelerator with support for training.""" + return unittest.skipUnless( + is_torch_available() and backend_supports_training(torch_device), + "test requires accelerator with training support", + )(test_case) + + +def skip_mps(test_case): + """Decorator marking a test to skip if torch_device is 'mps'""" + return unittest.skipUnless(torch_device != "mps", "test requires non 'mps' device")(test_case) + + +def require_flax(test_case): + """ + Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed + """ + return unittest.skipUnless(is_flax_available(), "test requires JAX & Flax")(test_case) + + +def require_compel(test_case): + """ + Decorator marking a test that requires compel: https://github.com/damian0815/compel. These tests are skipped when + the library is not installed. + """ + return unittest.skipUnless(is_compel_available(), "test requires compel")(test_case) + + +def require_onnxruntime(test_case): + """ + Decorator marking a test that requires onnxruntime. These tests are skipped when onnxruntime isn't installed. + """ + return unittest.skipUnless(is_onnx_available(), "test requires onnxruntime")(test_case) + + +def require_note_seq(test_case): + """ + Decorator marking a test that requires note_seq. These tests are skipped when note_seq isn't installed. + """ + return unittest.skipUnless(is_note_seq_available(), "test requires note_seq")(test_case) + + +def require_torchsde(test_case): + """ + Decorator marking a test that requires torchsde. These tests are skipped when torchsde isn't installed. + """ + return unittest.skipUnless(is_torchsde_available(), "test requires torchsde")(test_case) + + +def require_peft_backend(test_case): + """ + Decorator marking a test that requires PEFT backend, this would require some specific versions of PEFT and + transformers. + """ + return unittest.skipUnless(USE_PEFT_BACKEND, "test requires PEFT backend")(test_case) + + +def require_timm(test_case): + """ + Decorator marking a test that requires timm. These tests are skipped when timm isn't installed. + """ + return unittest.skipUnless(is_timm_available(), "test requires timm")(test_case) + + +def require_peft_version_greater(peft_version): + """ + Decorator marking a test that requires PEFT backend with a specific version, this would require some specific + versions of PEFT and transformers. + """ + + def decorator(test_case): + correct_peft_version = is_peft_available() and version.parse( + version.parse(importlib.metadata.version("peft")).base_version + ) > version.parse(peft_version) + return unittest.skipUnless( + correct_peft_version, f"test requires PEFT backend with the version greater than {peft_version}" + )(test_case) + + return decorator + + +def require_accelerate_version_greater(accelerate_version): + def decorator(test_case): + correct_accelerate_version = is_peft_available() and version.parse( + version.parse(importlib.metadata.version("accelerate")).base_version + ) > version.parse(accelerate_version) + return unittest.skipUnless( + correct_accelerate_version, f"Test requires accelerate with the version greater than {accelerate_version}." + )(test_case) + + return decorator + + +def deprecate_after_peft_backend(test_case): + """ + Decorator marking a test that will be skipped after PEFT backend + """ + return unittest.skipUnless(not USE_PEFT_BACKEND, "test skipped in favor of PEFT backend")(test_case) + + +def get_python_version(): + sys_info = sys.version_info + major, minor = sys_info.major, sys_info.minor + return major, minor + + +def load_numpy(arry: Union[str, np.ndarray], local_path: Optional[str] = None) -> np.ndarray: + if isinstance(arry, str): + if local_path is not None: + # local_path can be passed to correct images of tests + return Path(local_path, arry.split("/")[-5], arry.split("/")[-2], arry.split("/")[-1]).as_posix() + elif arry.startswith("http://") or arry.startswith("https://"): + response = requests.get(arry) + response.raise_for_status() + arry = np.load(BytesIO(response.content)) + elif os.path.isfile(arry): + arry = np.load(arry) + else: + raise ValueError( + f"Incorrect path or url, URLs must start with `http://` or `https://`, and {arry} is not a valid path" + ) + elif isinstance(arry, np.ndarray): + pass + else: + raise ValueError( + "Incorrect format used for numpy ndarray. Should be an url linking to an image, a local path, or a" + " ndarray." + ) + + return arry + + +def load_pt(url: str): + response = requests.get(url) + response.raise_for_status() + arry = torch.load(BytesIO(response.content)) + return arry + + +def load_image(image: Union[str, PIL.Image.Image]) -> PIL.Image.Image: + """ + Loads `image` to a PIL Image. + + Args: + image (`str` or `PIL.Image.Image`): + The image to convert to the PIL Image format. + Returns: + `PIL.Image.Image`: + A PIL Image. + """ + if isinstance(image, str): + if image.startswith("http://") or image.startswith("https://"): + image = PIL.Image.open(requests.get(image, stream=True).raw) + elif os.path.isfile(image): + image = PIL.Image.open(image) + else: + raise ValueError( + f"Incorrect path or url, URLs must start with `http://` or `https://`, and {image} is not a valid path" + ) + elif isinstance(image, PIL.Image.Image): + image = image + else: + raise ValueError( + "Incorrect format used for image. Should be an url linking to an image, a local path, or a PIL image." + ) + image = PIL.ImageOps.exif_transpose(image) + image = image.convert("RGB") + return image + + +def preprocess_image(image: PIL.Image, batch_size: int): + w, h = image.size + w, h = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8 + image = image.resize((w, h), resample=PIL.Image.LANCZOS) + image = np.array(image).astype(np.float32) / 255.0 + image = np.vstack([image[None].transpose(0, 3, 1, 2)] * batch_size) + image = torch.from_numpy(image) + return 2.0 * image - 1.0 + + +def export_to_gif(image: List[PIL.Image.Image], output_gif_path: str = None) -> str: + if output_gif_path is None: + output_gif_path = tempfile.NamedTemporaryFile(suffix=".gif").name + + image[0].save( + output_gif_path, + save_all=True, + append_images=image[1:], + optimize=False, + duration=100, + loop=0, + ) + return output_gif_path + + +@contextmanager +def buffered_writer(raw_f): + f = io.BufferedWriter(raw_f) + yield f + f.flush() + + +def export_to_ply(mesh, output_ply_path: str = None): + """ + Write a PLY file for a mesh. + """ + if output_ply_path is None: + output_ply_path = tempfile.NamedTemporaryFile(suffix=".ply").name + + coords = mesh.verts.detach().cpu().numpy() + faces = mesh.faces.cpu().numpy() + rgb = np.stack([mesh.vertex_channels[x].detach().cpu().numpy() for x in "RGB"], axis=1) + + with buffered_writer(open(output_ply_path, "wb")) as f: + f.write(b"ply\n") + f.write(b"format binary_little_endian 1.0\n") + f.write(bytes(f"element vertex {len(coords)}\n", "ascii")) + f.write(b"property float x\n") + f.write(b"property float y\n") + f.write(b"property float z\n") + if rgb is not None: + f.write(b"property uchar red\n") + f.write(b"property uchar green\n") + f.write(b"property uchar blue\n") + if faces is not None: + f.write(bytes(f"element face {len(faces)}\n", "ascii")) + f.write(b"property list uchar int vertex_index\n") + f.write(b"end_header\n") + + if rgb is not None: + rgb = (rgb * 255.499).round().astype(int) + vertices = [ + (*coord, *rgb) + for coord, rgb in zip( + coords.tolist(), + rgb.tolist(), + ) + ] + format = struct.Struct("<3f3B") + for item in vertices: + f.write(format.pack(*item)) + else: + format = struct.Struct("<3f") + for vertex in coords.tolist(): + f.write(format.pack(*vertex)) + + if faces is not None: + format = struct.Struct(" str: + if is_opencv_available(): + import cv2 + else: + raise ImportError(BACKENDS_MAPPING["opencv"][1].format("export_to_video")) + if output_video_path is None: + output_video_path = tempfile.NamedTemporaryFile(suffix=".mp4").name + + fourcc = cv2.VideoWriter_fourcc(*"mp4v") + h, w, c = video_frames[0].shape + video_writer = cv2.VideoWriter(output_video_path, fourcc, fps=8, frameSize=(w, h)) + for i in range(len(video_frames)): + img = cv2.cvtColor(video_frames[i], cv2.COLOR_RGB2BGR) + video_writer.write(img) + return output_video_path + + +def load_hf_numpy(path) -> np.ndarray: + base_url = "https://huggingface.co/datasets/fusing/diffusers-testing/resolve/main" + + if not path.startswith("http://") and not path.startswith("https://"): + path = os.path.join(base_url, urllib.parse.quote(path)) + + return load_numpy(path) + + +# --- pytest conf functions --- # + +# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once +pytest_opt_registered = {} + + +def pytest_addoption_shared(parser): + """ + This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there. + + It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest` + option. + + """ + option = "--make-reports" + if option not in pytest_opt_registered: + parser.addoption( + option, + action="store", + default=False, + help="generate report files. The value of this option is used as a prefix to report names", + ) + pytest_opt_registered[option] = 1 + + +def pytest_terminal_summary_main(tr, id): + """ + Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current + directory. The report files are prefixed with the test suite name. + + This function emulates --duration and -rA pytest arguments. + + This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined + there. + + Args: + - tr: `terminalreporter` passed from `conftest.py` + - id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is + needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other. + + NB: this functions taps into a private _pytest API and while unlikely, it could break should + pytest do internal changes - also it calls default internal methods of terminalreporter which + can be hijacked by various `pytest-` plugins and interfere. + + """ + from _pytest.config import create_terminal_writer + + if not len(id): + id = "tests" + + config = tr.config + orig_writer = config.get_terminal_writer() + orig_tbstyle = config.option.tbstyle + orig_reportchars = tr.reportchars + + dir = "reports" + Path(dir).mkdir(parents=True, exist_ok=True) + report_files = { + k: f"{dir}/{id}_{k}.txt" + for k in [ + "durations", + "errors", + "failures_long", + "failures_short", + "failures_line", + "passes", + "stats", + "summary_short", + "warnings", + ] + } + + # custom durations report + # note: there is no need to call pytest --durations=XX to get this separate report + # adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66 + dlist = [] + for replist in tr.stats.values(): + for rep in replist: + if hasattr(rep, "duration"): + dlist.append(rep) + if dlist: + dlist.sort(key=lambda x: x.duration, reverse=True) + with open(report_files["durations"], "w") as f: + durations_min = 0.05 # sec + f.write("slowest durations\n") + for i, rep in enumerate(dlist): + if rep.duration < durations_min: + f.write(f"{len(dlist)-i} durations < {durations_min} secs were omitted") + break + f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n") + + def summary_failures_short(tr): + # expecting that the reports were --tb=long (default) so we chop them off here to the last frame + reports = tr.getreports("failed") + if not reports: + return + tr.write_sep("=", "FAILURES SHORT STACK") + for rep in reports: + msg = tr._getfailureheadline(rep) + tr.write_sep("_", msg, red=True, bold=True) + # chop off the optional leading extra frames, leaving only the last one + longrepr = re.sub(r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S) + tr._tw.line(longrepr) + # note: not printing out any rep.sections to keep the report short + + # use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each + # adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814 + # note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g. + # pytest-instafail does that) + + # report failures with line/short/long styles + config.option.tbstyle = "auto" # full tb + with open(report_files["failures_long"], "w") as f: + tr._tw = create_terminal_writer(config, f) + tr.summary_failures() + + # config.option.tbstyle = "short" # short tb + with open(report_files["failures_short"], "w") as f: + tr._tw = create_terminal_writer(config, f) + summary_failures_short(tr) + + config.option.tbstyle = "line" # one line per error + with open(report_files["failures_line"], "w") as f: + tr._tw = create_terminal_writer(config, f) + tr.summary_failures() + + with open(report_files["errors"], "w") as f: + tr._tw = create_terminal_writer(config, f) + tr.summary_errors() + + with open(report_files["warnings"], "w") as f: + tr._tw = create_terminal_writer(config, f) + tr.summary_warnings() # normal warnings + tr.summary_warnings() # final warnings + + tr.reportchars = "wPpsxXEf" # emulate -rA (used in summary_passes() and short_test_summary()) + with open(report_files["passes"], "w") as f: + tr._tw = create_terminal_writer(config, f) + tr.summary_passes() + + with open(report_files["summary_short"], "w") as f: + tr._tw = create_terminal_writer(config, f) + tr.short_test_summary() + + with open(report_files["stats"], "w") as f: + tr._tw = create_terminal_writer(config, f) + tr.summary_stats() + + # restore: + tr._tw = orig_writer + tr.reportchars = orig_reportchars + config.option.tbstyle = orig_tbstyle + + +# Copied from https://github.com/huggingface/transformers/blob/000e52aec8850d3fe2f360adc6fd256e5b47fe4c/src/transformers/testing_utils.py#L1905 +def is_flaky(max_attempts: int = 5, wait_before_retry: Optional[float] = None, description: Optional[str] = None): + """ + To decorate flaky tests. They will be retried on failures. + + Args: + max_attempts (`int`, *optional*, defaults to 5): + The maximum number of attempts to retry the flaky test. + wait_before_retry (`float`, *optional*): + If provided, will wait that number of seconds before retrying the test. + description (`str`, *optional*): + A string to describe the situation (what / where / why is flaky, link to GH issue/PR comments, errors, + etc.) + """ + + def decorator(test_func_ref): + @functools.wraps(test_func_ref) + def wrapper(*args, **kwargs): + retry_count = 1 + + while retry_count < max_attempts: + try: + return test_func_ref(*args, **kwargs) + + except Exception as err: + print(f"Test failed with {err} at try {retry_count}/{max_attempts}.", file=sys.stderr) + if wait_before_retry is not None: + time.sleep(wait_before_retry) + retry_count += 1 + + return test_func_ref(*args, **kwargs) + + return wrapper + + return decorator + + +# Taken from: https://github.com/huggingface/transformers/blob/3658488ff77ff8d45101293e749263acf437f4d5/src/transformers/testing_utils.py#L1787 +def run_test_in_subprocess(test_case, target_func, inputs=None, timeout=None): + """ + To run a test in a subprocess. In particular, this can avoid (GPU) memory issue. + + Args: + test_case (`unittest.TestCase`): + The test that will run `target_func`. + target_func (`Callable`): + The function implementing the actual testing logic. + inputs (`dict`, *optional*, defaults to `None`): + The inputs that will be passed to `target_func` through an (input) queue. + timeout (`int`, *optional*, defaults to `None`): + The timeout (in seconds) that will be passed to the input and output queues. If not specified, the env. + variable `PYTEST_TIMEOUT` will be checked. If still `None`, its value will be set to `600`. + """ + if timeout is None: + timeout = int(os.environ.get("PYTEST_TIMEOUT", 600)) + + start_methohd = "spawn" + ctx = multiprocessing.get_context(start_methohd) + + input_queue = ctx.Queue(1) + output_queue = ctx.JoinableQueue(1) + + # We can't send `unittest.TestCase` to the child, otherwise we get issues regarding pickle. + input_queue.put(inputs, timeout=timeout) + + process = ctx.Process(target=target_func, args=(input_queue, output_queue, timeout)) + process.start() + # Kill the child process if we can't get outputs from it in time: otherwise, the hanging subprocess prevents + # the test to exit properly. + try: + results = output_queue.get(timeout=timeout) + output_queue.task_done() + except Exception as e: + process.terminate() + test_case.fail(e) + process.join(timeout=timeout) + + if results["error"] is not None: + test_case.fail(f'{results["error"]}') + + +class CaptureLogger: + """ + Args: + Context manager to capture `logging` streams + logger: 'logging` logger object + Returns: + The captured output is available via `self.out` + Example: + ```python + >>> from diffusers import logging + >>> from diffusers.testing_utils import CaptureLogger + + >>> msg = "Testing 1, 2, 3" + >>> logging.set_verbosity_info() + >>> logger = logging.get_logger("diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.py") + >>> with CaptureLogger(logger) as cl: + ... logger.info(msg) + >>> assert cl.out, msg + "\n" + ``` + """ + + def __init__(self, logger): + self.logger = logger + self.io = StringIO() + self.sh = logging.StreamHandler(self.io) + self.out = "" + + def __enter__(self): + self.logger.addHandler(self.sh) + return self + + def __exit__(self, *exc): + self.logger.removeHandler(self.sh) + self.out = self.io.getvalue() + + def __repr__(self): + return f"captured: {self.out}\n" + + +def enable_full_determinism(): + """ + Helper function for reproducible behavior during distributed training. See + - https://pytorch.org/docs/stable/notes/randomness.html for pytorch + """ + # Enable PyTorch deterministic mode. This potentially requires either the environment + # variable 'CUDA_LAUNCH_BLOCKING' or 'CUBLAS_WORKSPACE_CONFIG' to be set, + # depending on the CUDA version, so we set them both here + os.environ["CUDA_LAUNCH_BLOCKING"] = "1" + os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":16:8" + torch.use_deterministic_algorithms(True) + + # Enable CUDNN deterministic mode + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + torch.backends.cuda.matmul.allow_tf32 = False + + +def disable_full_determinism(): + os.environ["CUDA_LAUNCH_BLOCKING"] = "0" + os.environ["CUBLAS_WORKSPACE_CONFIG"] = "" + torch.use_deterministic_algorithms(False) + + +# Utils for custom and alternative accelerator devices +def _is_torch_fp16_available(device): + if not is_torch_available(): + return False + + import torch + + device = torch.device(device) + + try: + x = torch.zeros((2, 2), dtype=torch.float16).to(device) + _ = torch.mul(x, x) + return True + + except Exception as e: + if device.type == "cuda": + raise ValueError( + f"You have passed a device of type 'cuda' which should work with 'fp16', but 'cuda' does not seem to be correctly installed on your machine: {e}" + ) + + return False + + +def _is_torch_fp64_available(device): + if not is_torch_available(): + return False + + import torch + + device = torch.device(device) + + try: + x = torch.zeros((2, 2), dtype=torch.float64).to(device) + _ = torch.mul(x, x) + return True + + except Exception as e: + if device.type == "cuda": + raise ValueError( + f"You have passed a device of type 'cuda' which should work with 'fp64', but 'cuda' does not seem to be correctly installed on your machine: {e}" + ) + + return False + + +# Guard these lookups for when Torch is not used - alternative accelerator support is for PyTorch +if is_torch_available(): + # Behaviour flags + BACKEND_SUPPORTS_TRAINING = {"cuda": True, "cpu": True, "mps": False, "default": True} + + # Function definitions + BACKEND_EMPTY_CACHE = {"cuda": torch.cuda.empty_cache, "cpu": None, "mps": None, "default": None} + BACKEND_DEVICE_COUNT = {"cuda": torch.cuda.device_count, "cpu": lambda: 0, "mps": lambda: 0, "default": 0} + BACKEND_MANUAL_SEED = {"cuda": torch.cuda.manual_seed, "cpu": torch.manual_seed, "default": torch.manual_seed} + + +# This dispatches a defined function according to the accelerator from the function definitions. +def _device_agnostic_dispatch(device: str, dispatch_table: Dict[str, Callable], *args, **kwargs): + if device not in dispatch_table: + return dispatch_table["default"](*args, **kwargs) + + fn = dispatch_table[device] + + # Some device agnostic functions return values. Need to guard against 'None' instead at + # user level + if fn is None: + return None + + return fn(*args, **kwargs) + + +# These are callables which automatically dispatch the function specific to the accelerator +def backend_manual_seed(device: str, seed: int): + return _device_agnostic_dispatch(device, BACKEND_MANUAL_SEED, seed) + + +def backend_empty_cache(device: str): + return _device_agnostic_dispatch(device, BACKEND_EMPTY_CACHE) + + +def backend_device_count(device: str): + return _device_agnostic_dispatch(device, BACKEND_DEVICE_COUNT) + + +# These are callables which return boolean behaviour flags and can be used to specify some +# device agnostic alternative where the feature is unsupported. +def backend_supports_training(device: str): + if not is_torch_available(): + return False + + if device not in BACKEND_SUPPORTS_TRAINING: + device = "default" + + return BACKEND_SUPPORTS_TRAINING[device] + + +# Guard for when Torch is not available +if is_torch_available(): + # Update device function dict mapping + def update_mapping_from_spec(device_fn_dict: Dict[str, Callable], attribute_name: str): + try: + # Try to import the function directly + spec_fn = getattr(device_spec_module, attribute_name) + device_fn_dict[torch_device] = spec_fn + except AttributeError as e: + # If the function doesn't exist, and there is no default, throw an error + if "default" not in device_fn_dict: + raise AttributeError( + f"`{attribute_name}` not found in '{device_spec_path}' and no default fallback function found." + ) from e + + if "DIFFUSERS_TEST_DEVICE_SPEC" in os.environ: + device_spec_path = os.environ["DIFFUSERS_TEST_DEVICE_SPEC"] + if not Path(device_spec_path).is_file(): + raise ValueError(f"Specified path to device specification file is not found. Received {device_spec_path}") + + try: + import_name = device_spec_path[: device_spec_path.index(".py")] + except ValueError as e: + raise ValueError(f"Provided device spec file is not a Python file! Received {device_spec_path}") from e + + device_spec_module = importlib.import_module(import_name) + + try: + device_name = device_spec_module.DEVICE_NAME + except AttributeError: + raise AttributeError("Device spec file did not contain `DEVICE_NAME`") + + if "DIFFUSERS_TEST_DEVICE" in os.environ and torch_device != device_name: + msg = f"Mismatch between environment variable `DIFFUSERS_TEST_DEVICE` '{torch_device}' and device found in spec '{device_name}'\n" + msg += "Either unset `DIFFUSERS_TEST_DEVICE` or ensure it matches device spec name." + raise ValueError(msg) + + torch_device = device_name + + # Add one entry here for each `BACKEND_*` dictionary. + update_mapping_from_spec(BACKEND_MANUAL_SEED, "MANUAL_SEED_FN") + update_mapping_from_spec(BACKEND_EMPTY_CACHE, "EMPTY_CACHE_FN") + update_mapping_from_spec(BACKEND_DEVICE_COUNT, "DEVICE_COUNT_FN") + update_mapping_from_spec(BACKEND_SUPPORTS_TRAINING, "SUPPORTS_TRAINING") diff --git a/diffusers3/utils/torch_utils.py b/diffusers3/utils/torch_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..0cf75b4fad4e17e416e5a3f72a25b1f2b3702fe1 --- /dev/null +++ b/diffusers3/utils/torch_utils.py @@ -0,0 +1,148 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +PyTorch utilities: Utilities related to PyTorch +""" + +from typing import List, Optional, Tuple, Union + +from . import logging +from .import_utils import is_torch_available, is_torch_version + + +if is_torch_available(): + import torch + from torch.fft import fftn, fftshift, ifftn, ifftshift + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +try: + from torch._dynamo import allow_in_graph as maybe_allow_in_graph +except (ImportError, ModuleNotFoundError): + + def maybe_allow_in_graph(cls): + return cls + + +def randn_tensor( + shape: Union[Tuple, List], + generator: Optional[Union[List["torch.Generator"], "torch.Generator"]] = None, + device: Optional["torch.device"] = None, + dtype: Optional["torch.dtype"] = None, + layout: Optional["torch.layout"] = None, +): + """A helper function to create random tensors on the desired `device` with the desired `dtype`. When + passing a list of generators, you can seed each batch size individually. If CPU generators are passed, the tensor + is always created on the CPU. + """ + # device on which tensor is created defaults to device + rand_device = device + batch_size = shape[0] + + layout = layout or torch.strided + device = device or torch.device("cpu") + + if generator is not None: + gen_device_type = generator.device.type if not isinstance(generator, list) else generator[0].device.type + if gen_device_type != device.type and gen_device_type == "cpu": + rand_device = "cpu" + if device != "mps": + logger.info( + f"The passed generator was created on 'cpu' even though a tensor on {device} was expected." + f" Tensors will be created on 'cpu' and then moved to {device}. Note that one can probably" + f" slighly speed up this function by passing a generator that was created on the {device} device." + ) + elif gen_device_type != device.type and gen_device_type == "cuda": + raise ValueError(f"Cannot generate a {device} tensor from a generator of type {gen_device_type}.") + + # make sure generator list of length 1 is treated like a non-list + if isinstance(generator, list) and len(generator) == 1: + generator = generator[0] + + if isinstance(generator, list): + shape = (1,) + shape[1:] + latents = [ + torch.randn(shape, generator=generator[i], device=rand_device, dtype=dtype, layout=layout) + for i in range(batch_size) + ] + latents = torch.cat(latents, dim=0).to(device) + else: + latents = torch.randn(shape, generator=generator, device=rand_device, dtype=dtype, layout=layout).to(device) + + return latents + + +def is_compiled_module(module) -> bool: + """Check whether the module was compiled with torch.compile()""" + if is_torch_version("<", "2.0.0") or not hasattr(torch, "_dynamo"): + return False + return isinstance(module, torch._dynamo.eval_frame.OptimizedModule) + + +def fourier_filter(x_in: "torch.Tensor", threshold: int, scale: int) -> "torch.Tensor": + """Fourier filter as introduced in FreeU (https://arxiv.org/abs/2309.11497). + + This version of the method comes from here: + https://github.com/huggingface/diffusers/pull/5164#issuecomment-1732638706 + """ + x = x_in + B, C, H, W = x.shape + + # Non-power of 2 images must be float32 + if (W & (W - 1)) != 0 or (H & (H - 1)) != 0: + x = x.to(dtype=torch.float32) + + # FFT + x_freq = fftn(x, dim=(-2, -1)) + x_freq = fftshift(x_freq, dim=(-2, -1)) + + B, C, H, W = x_freq.shape + mask = torch.ones((B, C, H, W), device=x.device) + + crow, ccol = H // 2, W // 2 + mask[..., crow - threshold : crow + threshold, ccol - threshold : ccol + threshold] = scale + x_freq = x_freq * mask + + # IFFT + x_freq = ifftshift(x_freq, dim=(-2, -1)) + x_filtered = ifftn(x_freq, dim=(-2, -1)).real + + return x_filtered.to(dtype=x_in.dtype) + + +def apply_freeu( + resolution_idx: int, hidden_states: "torch.Tensor", res_hidden_states: "torch.Tensor", **freeu_kwargs +) -> Tuple["torch.Tensor", "torch.Tensor"]: + """Applies the FreeU mechanism as introduced in https: + //arxiv.org/abs/2309.11497. Adapted from the official code repository: https://github.com/ChenyangSi/FreeU. + + Args: + resolution_idx (`int`): Integer denoting the UNet block where FreeU is being applied. + hidden_states (`torch.Tensor`): Inputs to the underlying block. + res_hidden_states (`torch.Tensor`): Features from the skip block corresponding to the underlying block. + s1 (`float`): Scaling factor for stage 1 to attenuate the contributions of the skip features. + s2 (`float`): Scaling factor for stage 2 to attenuate the contributions of the skip features. + b1 (`float`): Scaling factor for stage 1 to amplify the contributions of backbone features. + b2 (`float`): Scaling factor for stage 2 to amplify the contributions of backbone features. + """ + if resolution_idx == 0: + num_half_channels = hidden_states.shape[1] // 2 + hidden_states[:, :num_half_channels] = hidden_states[:, :num_half_channels] * freeu_kwargs["b1"] + res_hidden_states = fourier_filter(res_hidden_states, threshold=1, scale=freeu_kwargs["s1"]) + if resolution_idx == 1: + num_half_channels = hidden_states.shape[1] // 2 + hidden_states[:, :num_half_channels] = hidden_states[:, :num_half_channels] * freeu_kwargs["b2"] + res_hidden_states = fourier_filter(res_hidden_states, threshold=1, scale=freeu_kwargs["s2"]) + + return hidden_states, res_hidden_states diff --git a/diffusers3/utils/versions.py b/diffusers3/utils/versions.py new file mode 100644 index 0000000000000000000000000000000000000000..945a3977ce62a9a55307862193e4be6f12c3c17f --- /dev/null +++ b/diffusers3/utils/versions.py @@ -0,0 +1,117 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Utilities for working with package versions +""" + +import importlib.metadata +import operator +import re +import sys +from typing import Optional + +from packaging import version + + +ops = { + "<": operator.lt, + "<=": operator.le, + "==": operator.eq, + "!=": operator.ne, + ">=": operator.ge, + ">": operator.gt, +} + + +def _compare_versions(op, got_ver, want_ver, requirement, pkg, hint): + if got_ver is None or want_ver is None: + raise ValueError( + f"Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider" + f" reinstalling {pkg}." + ) + if not ops[op](version.parse(got_ver), version.parse(want_ver)): + raise ImportError( + f"{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}" + ) + + +def require_version(requirement: str, hint: Optional[str] = None) -> None: + """ + Perform a runtime check of the dependency versions, using the exact same syntax used by pip. + + The installed module version comes from the *site-packages* dir via *importlib.metadata*. + + Args: + requirement (`str`): pip style definition, e.g., "tokenizers==0.9.4", "tqdm>=4.27", "numpy" + hint (`str`, *optional*): what suggestion to print in case of requirements not being met + + Example: + + ```python + require_version("pandas>1.1.2") + require_version("numpy>1.18.5", "this is important to have for whatever reason") + ```""" + + hint = f"\n{hint}" if hint is not None else "" + + # non-versioned check + if re.match(r"^[\w_\-\d]+$", requirement): + pkg, op, want_ver = requirement, None, None + else: + match = re.findall(r"^([^!=<>\s]+)([\s!=<>]{1,2}.+)", requirement) + if not match: + raise ValueError( + "requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but" + f" got {requirement}" + ) + pkg, want_full = match[0] + want_range = want_full.split(",") # there could be multiple requirements + wanted = {} + for w in want_range: + match = re.findall(r"^([\s!=<>]{1,2})(.+)", w) + if not match: + raise ValueError( + "requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23," + f" but got {requirement}" + ) + op, want_ver = match[0] + wanted[op] = want_ver + if op not in ops: + raise ValueError(f"{requirement}: need one of {list(ops.keys())}, but got {op}") + + # special case + if pkg == "python": + got_ver = ".".join([str(x) for x in sys.version_info[:3]]) + for op, want_ver in wanted.items(): + _compare_versions(op, got_ver, want_ver, requirement, pkg, hint) + return + + # check if any version is installed + try: + got_ver = importlib.metadata.version(pkg) + except importlib.metadata.PackageNotFoundError: + raise importlib.metadata.PackageNotFoundError( + f"The '{requirement}' distribution was not found and is required by this application. {hint}" + ) + + # check that the right version is installed if version number or a range was provided + if want_ver is not None: + for op, want_ver in wanted.items(): + _compare_versions(op, got_ver, want_ver, requirement, pkg, hint) + + +def require_version_core(requirement): + """require_version wrapper which emits a core-specific hint on failure""" + hint = "Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main" + return require_version(requirement, hint) diff --git a/diffusers3/video_processor.py b/diffusers3/video_processor.py new file mode 100644 index 0000000000000000000000000000000000000000..9e2727b85377cd47f5f44224c5122118518ab8d9 --- /dev/null +++ b/diffusers3/video_processor.py @@ -0,0 +1,113 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import warnings +from typing import List, Optional, Union + +import numpy as np +import PIL +import torch + +from .image_processor import VaeImageProcessor, is_valid_image, is_valid_image_imagelist + + +class VideoProcessor(VaeImageProcessor): + r"""Simple video processor.""" + + def preprocess_video(self, video, height: Optional[int] = None, width: Optional[int] = None) -> torch.Tensor: + r""" + Preprocesses input video(s). + + Args: + video (`List[PIL.Image]`, `List[List[PIL.Image]]`, `torch.Tensor`, `np.array`, `List[torch.Tensor]`, `List[np.array]`): + The input video. It can be one of the following: + * List of the PIL images. + * List of list of PIL images. + * 4D Torch tensors (expected shape for each tensor `(num_frames, num_channels, height, width)`). + * 4D NumPy arrays (expected shape for each array `(num_frames, height, width, num_channels)`). + * List of 4D Torch tensors (expected shape for each tensor `(num_frames, num_channels, height, + width)`). + * List of 4D NumPy arrays (expected shape for each array `(num_frames, height, width, num_channels)`). + * 5D NumPy arrays: expected shape for each array `(batch_size, num_frames, height, width, + num_channels)`. + * 5D Torch tensors: expected shape for each array `(batch_size, num_frames, num_channels, height, + width)`. + height (`int`, *optional*, defaults to `None`): + The height in preprocessed frames of the video. If `None`, will use the `get_default_height_width()` to + get default height. + width (`int`, *optional*`, defaults to `None`): + The width in preprocessed frames of the video. If `None`, will use get_default_height_width()` to get + the default width. + """ + if isinstance(video, list) and isinstance(video[0], np.ndarray) and video[0].ndim == 5: + warnings.warn( + "Passing `video` as a list of 5d np.ndarray is deprecated." + "Please concatenate the list along the batch dimension and pass it as a single 5d np.ndarray", + FutureWarning, + ) + video = np.concatenate(video, axis=0) + if isinstance(video, list) and isinstance(video[0], torch.Tensor) and video[0].ndim == 5: + warnings.warn( + "Passing `video` as a list of 5d torch.Tensor is deprecated." + "Please concatenate the list along the batch dimension and pass it as a single 5d torch.Tensor", + FutureWarning, + ) + video = torch.cat(video, axis=0) + + # ensure the input is a list of videos: + # - if it is a batch of videos (5d torch.Tensor or np.ndarray), it is converted to a list of videos (a list of 4d torch.Tensor or np.ndarray) + # - if it is is a single video, it is convereted to a list of one video. + if isinstance(video, (np.ndarray, torch.Tensor)) and video.ndim == 5: + video = list(video) + elif isinstance(video, list) and is_valid_image(video[0]) or is_valid_image_imagelist(video): + video = [video] + elif isinstance(video, list) and is_valid_image_imagelist(video[0]): + video = video + else: + raise ValueError( + "Input is in incorrect format. Currently, we only support numpy.ndarray, torch.Tensor, PIL.Image.Image" + ) + + video = torch.stack([self.preprocess(img, height=height, width=width) for img in video], dim=0) + + # move the number of channels before the number of frames. + video = video.permute(0, 2, 1, 3, 4) + + return video + + def postprocess_video( + self, video: torch.Tensor, output_type: str = "np" + ) -> Union[np.ndarray, torch.Tensor, List[PIL.Image.Image]]: + r""" + Converts a video tensor to a list of frames for export. + + Args: + video (`torch.Tensor`): The video as a tensor. + output_type (`str`, defaults to `"np"`): Output type of the postprocessed `video` tensor. + """ + batch_size = video.shape[0] + outputs = [] + for batch_idx in range(batch_size): + batch_vid = video[batch_idx].permute(1, 0, 2, 3) + batch_output = self.postprocess(batch_vid, output_type) + outputs.append(batch_output) + + if output_type == "np": + outputs = np.stack(outputs) + elif output_type == "pt": + outputs = torch.stack(outputs) + elif not output_type == "pil": + raise ValueError(f"{output_type} does not exist. Please choose one of ['np', 'pt', 'pil']") + + return outputs diff --git a/feat_file.py b/feat_file.py new file mode 100644 index 0000000000000000000000000000000000000000..3c8d0ecd2d437961bc766a862d55d460d2ead3ae --- /dev/null +++ b/feat_file.py @@ -0,0 +1,462 @@ +import os +import argparse +from dataclasses import dataclass +from typing import Optional + +import torch +from diffusers import UniPCMultistepScheduler +from diffusers3.models.controlnet import ControlNetModel +from diffusers3.pipelines.controlnet.pipeline_controlnet_sd_xl_img2img_img import ( + StableDiffusionXLControlNetImg2ImgPipeline, +) +from ip_adapter import IPAdapterXL + +import cv2 +import numpy as np +import imageio +from PIL import Image, ImageOps +from transformers import pipeline +from preprocess.simple_extractor import run as run_simple_extractor + + +base_model_path = "stabilityai/stable-diffusion-xl-base-1.0" +image_encoder_path = "models/image_encoder" +ip_ckpt = "sdxl_models/ip-adapter_sdxl_vit-h.bin" +controlnet_path = "diffusers/controlnet-depth-sdxl-1.0" + +device = "cuda" if torch.cuda.is_available() else "cpu" +dtype = torch.float32 + +DEBUG_SAVE = False +DEFAULT_STEPS = 40 + +# ========================= +# Global resize params (์š”๊ตฌ์‚ฌํ•ญ ๋ฐ˜์˜) +# - person ์›๋ณธ์„ height=1024๋กœ ๋งž์ถ˜ ๋’ค์˜ (H,W)๋ฅผ ์ „์—ญ์œผ๋กœ ์‚ฌ์šฉ +# ========================= +H: Optional[int] = None # ํ•ญ์ƒ 1024 +W: Optional[int] = None # aspect ์œ ์ง€๋กœ ๊ณ„์‚ฐ๋œ width + + +def compute_hw_from_person(person_path: str): + """ + person ์›๋ณธ ์ด๋ฏธ์ง€ ๊ธฐ์ค€: + - height๊ฐ€ ์ •ํ™•ํžˆ 1024๊ฐ€ ๋˜๋„๋ก ์Šค์ผ€์ผ + - aspect ratio ์œ ์ง€ + => H=1024, W=round(orig_w * (1024/orig_h)) + """ + img = cv2.imread(person_path) + if img is None: + raise FileNotFoundError(f"cv2.imread failed: {person_path} (exists={os.path.exists(person_path)})") + + orig_h, orig_w = img.shape[:2] + scale = 1024.0 / float(orig_h) + new_h = 1024 + new_w = int(round(orig_w * scale)) + return new_h, new_w + + +controlnet = ControlNetModel.from_pretrained( + controlnet_path, + variant="fp16", + use_safetensors=True, + torch_dtype=torch.float32, +).to(device) + +pipe = StableDiffusionXLControlNetImg2ImgPipeline.from_pretrained( + base_model_path, + controlnet=controlnet, + use_safetensors=True, + torch_dtype=torch.float32, + add_watermarker=False, +).to(device) + +pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config) +pipe.enable_attention_slicing() + +try: + pipe.enable_xformers_memory_efficient_attention() +except Exception: + pass + +depth_estimator = pipeline("depth-estimation") + + +@dataclass +class Paths: + person_path: str + depth_path: str + style_path: str + output_path: str + + +def _ensure_exists(path: str, name: str): + if not os.path.exists(path): + raise FileNotFoundError(f"{name} not found: {path}") + + +def apply_parsing_white_mask_to_person_cv2( + person_pil: Image.Image, + parsing_img: Image.Image +) -> np.ndarray: + person_rgb = np.array(person_pil.convert("RGB"), dtype=np.uint8) + mask = np.array(parsing_img.convert("L"), dtype=np.uint8) + white_mask = mask == 255 + result_rgb = np.full_like(person_rgb, 255, dtype=np.uint8) + result_rgb[white_mask] = person_rgb[white_mask] + result_bgr = cv2.cvtColor(result_rgb, cv2.COLOR_RGB2BGR) + return result_bgr + + +def _imread_or_raise(path: str, flags=None): + img = cv2.imread(path, flags) if flags is not None else cv2.imread(path) + if img is None: + raise FileNotFoundError(f"cv2.imread failed: {path} (exists={os.path.exists(path)})") + return img + + +def invert_sketch_area(sketch_area: Image.Image) -> Image.Image: + gray = sketch_area.convert("L") + arr = np.array(gray, dtype=np.uint8) + inverted = 255 - arr + return Image.fromarray(inverted, mode="L") + + +def merge_white_regions_or( + parsing_img: Image.Image, + sketch_area: Image.Image +) -> Image.Image: + p_img = parsing_img.convert("L") + s_img = sketch_area.convert("L") + + p = np.array(p_img, dtype=np.uint8) + s = np.array(s_img, dtype=np.uint8) + + merged = np.where( + (p == 255) | (s == 255), + 255, + 0 + ).astype(np.uint8) + + return merged + + +def preprocess_mask(mask: np.ndarray) -> Image.Image: + # padding ๋ชฉํ‘œ width๋Š” ์š”๊ตฌ์‚ฌํ•ญ๋Œ€๋กœ "ํ•ญ์ƒ 1024" ๊ณ ์ • (์›๋ณธ ๊ทธ๋Œ€๋กœ) + height, width = mask.shape + total_padding = 1024 - width + left_padding = total_padding // 2 + right_padding = total_padding - left_padding + + padded_mask = cv2.copyMakeBorder( + mask, 0, 0, left_padding, right_padding, + borderType=cv2.BORDER_CONSTANT, + value=0, + ) + + kernel = np.ones((17, 17), np.uint8) + dilated_mask = cv2.dilate(padded_mask, kernel, iterations=1) + + if DEBUG_SAVE: + cv2.imwrite("padded_mask.png", padded_mask) + cv2.imwrite("padded_mask_dilated.png", dilated_mask) + + return Image.fromarray(dilated_mask) + + +def make_depth(depth_path: str) -> Image.Image: + global H, W + if H is None or W is None: + raise RuntimeError("Global H/W not set. Call run_one() first.") + + depth_img = _imread_or_raise(depth_path, 0) + + inverted_depth = cv2.bitwise_not(depth_img) + contours, _ = cv2.findContours(inverted_depth, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) + + filled_depth = inverted_depth.copy() + cv2.drawContours(filled_depth, contours, -1, (255), thickness=cv2.FILLED) + + # โœ… resize๋Š” ์ „์—ญ (W,H) + filled_depth = cv2.resize(filled_depth, (W, H), interpolation=cv2.INTER_AREA) + + height, width = filled_depth.shape + total_padding = 1024 - width + left_padding = total_padding // 2 + right_padding = total_padding - left_padding + + padded_depth = cv2.copyMakeBorder( + filled_depth, 0, 0, left_padding, right_padding, + borderType=cv2.BORDER_CONSTANT, + value=0, + ) + + inverted_image = ImageOps.invert(Image.fromarray(padded_depth)) + + with torch.inference_mode(): + image_depth = depth_estimator(inverted_image)["depth"] + + if DEBUG_SAVE: + image_depth.save("depth.png") + + return image_depth + + +def fill_sketch_from_image_path_to_pil( + image_path: str, + threshold: int = 127, +) -> Image.Image: + global H, W + if H is None or W is None: + raise RuntimeError("Global H/W not set. Call run_one() first.") + + img = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE) + if img is None: + raise ValueError(f"์ด๋ฏธ์ง€๋ฅผ ๋ถˆ๋Ÿฌ์˜ฌ ์ˆ˜ ์—†์Šต๋‹ˆ๋‹ค: {image_path}") + + # โœ… resize๋Š” ์ „์—ญ (W,H) + img = cv2.resize(img, (W, H), interpolation=cv2.INTER_NEAREST) + + _, binary = cv2.threshold( + img, + threshold, + 255, + cv2.THRESH_BINARY_INV + ) + + contours, _ = cv2.findContours( + binary, + cv2.RETR_EXTERNAL, + cv2.CHAIN_APPROX_SIMPLE + ) + + result = np.full_like(img, 255, dtype=np.uint8) + + cv2.drawContours( + result, + contours, + contourIdx=-1, + color=0, + thickness=-1 + ) + + pil_image = Image.fromarray(result, mode="L") + return pil_image + + +def center_crop_lr_to_768x1024(arr: np.ndarray) -> np.ndarray: + # ์›๋ณธ ์œ ์ง€ + h, w = arr.shape[:2] + target_w, target_h = 700, 1024 + if h != target_h: + arr = cv2.resize(arr, (w, target_h), interpolation=cv2.INTER_AREA) + h, w = arr.shape[:2] + if w < target_w: + pad = (target_w - w) // 2 + arr = cv2.copyMakeBorder(arr, 0, 0, pad, target_w - w - pad, cv2.BORDER_REFLECT_101) + h, w = arr.shape[:2] + left = (w - target_w) // 2 + right = left + target_w + return arr[:, left:right] + + +def save_cropped(imgs, out_path: str): + np_imgs = [np.asarray(im) for im in imgs] + cropped = [center_crop_lr_to_768x1024(x) for x in np_imgs] + out = np.concatenate(cropped, axis=1) + os.makedirs(os.path.dirname(out_path), exist_ok=True) + imageio.imsave(out_path, out) + + +def run_one(paths: Paths, prompt: str, steps: int = DEFAULT_STEPS): + global H, W + + category = 'Upper-clothes' + PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__)) + + person_path_abs = os.path.abspath( + os.path.join(PROJECT_ROOT, paths.person_path) + ) + + # โœ… ์ „์—ญ H/W ์„ธํŒ…: person ์›๋ณธ์„ height=1024๋กœ ๋งž์ท„์„ ๋•Œ์˜ (H,W) + H, W = compute_hw_from_person(paths.person_path) + + print('person_path_abs: ', person_path_abs) + print(f'[global] H={H}, W={W} (from person scaled to height=1024)') + + res = run_simple_extractor( + category=category, + input_path=person_path_abs, + model_restore="./preprocess/ckpts/exp-schp-201908301523-atr.pth" + ) + + parsing_img = res["images"][0] if res["images"] else None + if parsing_img is None: + raise RuntimeError("run_simple_extractor returned no parsing images.") + + sketch_area = fill_sketch_from_image_path_to_pil(paths.depth_path) + sketch_area_inv = invert_sketch_area(sketch_area) + merged_img = merge_white_regions_or(parsing_img, sketch_area_inv) + + mask_pil = preprocess_mask(merged_img) + + _ensure_exists(paths.person_path, "person_path") + _ensure_exists(paths.depth_path, "depth_path") + _ensure_exists(paths.style_path, "style_path") + + # ========================= + # person: resize๋Š” (W,H) + # padding ๋ชฉํ‘œ width=1024๋Š” ์›๋ณธ ๊ทธ๋Œ€๋กœ + # ========================= + person_bgr = _imread_or_raise(paths.person_path) + person_bgr = cv2.resize(person_bgr, (W, H), interpolation=cv2.INTER_AREA) + if DEBUG_SAVE: + cv2.imwrite("person.png", person_bgr) + + target_width = 1024 # โœ… ๊ณ ์ • + padding = (target_width - person_bgr.shape[1]) // 2 + padded_person = cv2.copyMakeBorder( + person_bgr, + top=0, bottom=0, + left=padding, right=padding, + borderType=cv2.BORDER_CONSTANT, + value=[255, 255, 255], + ) + person_rgb = cv2.cvtColor(padded_person, cv2.COLOR_BGR2RGB) + person_pil = Image.fromarray(person_rgb) + + depth_map = make_depth(paths.depth_path) + + # ========================= + # garment: ์›๋ณธ ๋กœ์ง ์œ ์ง€ (๋‹ค๋งŒ parsing/mask ํฌ๊ธฐ ๋งž์ถ”๋ ค๊ณ  ์•„๋ž˜์—์„œ resize (W,H) ์ ์šฉ) + # ========================= + personn = Image.open(paths.person_path) + + garment_ = apply_parsing_white_mask_to_person_cv2( + personn, + parsing_img + ) + + garment_rgb = cv2.cvtColor(garment_, cv2.COLOR_BGR2RGB) + + # โœ… (์ค‘์š”) garment_๋Š” ์›๋ณธ person ํฌ๊ธฐ์ผ ์ˆ˜ ์žˆ์œผ๋‹ˆ ์ „์—ญ (W,H)๋กœ ๋งž์ถ˜ ๋’ค padding + garment_rgb = cv2.resize(garment_rgb, (W, H), interpolation=cv2.INTER_AREA) + + garment_rgb = cv2.copyMakeBorder( + garment_rgb, + top=0, bottom=0, + left=padding, right=padding, + borderType=cv2.BORDER_CONSTANT, + value=[255, 255, 255], + ) + garment_pil = Image.fromarray(garment_rgb) + + if DEBUG_SAVE: + garment_pil.save('./garment_pil.png') + + # ========================= + # garment mask: resize๋Š” (W,H), padding ๋ชฉํ‘œ width=1024 ๊ณ ์ • + # ========================= + garment_mask_bgr = np.array(parsing_img.convert("L"), dtype=np.uint8) + garment_mask_bgr = cv2.resize(garment_mask_bgr, (W, H), interpolation=cv2.INTER_AREA) + + # ์›๋ณธ ์ฝ”๋“œ์˜ ์‹ค์ˆ˜์˜€๋˜ BGR2RGB๋ฅผ ๊ทธ๋Œ€๋กœ ๋‘๋ฉด ์—๋Ÿฌ ๊ฐ€๋Šฅ์„ฑ์ด ์žˆ์–ด์„œ, + # ์—ฌ๊ธฐ๋งŒ "GRAY2RGB"๋กœ ์•ˆ์ „ํ•˜๊ฒŒ ๋ฐ”๊ฟ”์คŒ (์ž…๋ ฅ shape์ด 2D๋ผ BGR2RGB๋Š” ์˜ˆ์™ธ ๋ฐœ์ƒ ๊ฐ€๋Šฅ) + garment_mask_rgb = cv2.cvtColor(garment_mask_bgr, cv2.COLOR_GRAY2RGB) + + garment_mask_rgb = cv2.copyMakeBorder( + garment_mask_rgb, + top=0, bottom=0, + left=padding, right=padding, + borderType=cv2.BORDER_CONSTANT, + value=[0, 0, 0], + ) + garment_mask_pil = Image.fromarray(garment_mask_rgb) + + if DEBUG_SAVE: + garment_mask_pil.save("garment_mask.png") + + # ========================= + # IPAdapterXL ์ƒ์„ฑ/ํ˜ธ์ถœ: ์›๋ณธ ๊ทธ๋Œ€๋กœ ์œ ์ง€ (์—ฌ๊ธฐ์„œ ์—๋Ÿฌ ๋‚˜๋ฉด ์•ˆ ๋จ) + # ========================= + ip_model = IPAdapterXL( + pipe, + image_encoder_path, + ip_ckpt, + device, + mask_pil, + person_pil, + content_scale=0.3, + style_scale=0.5, + garment_images=garment_pil, + garment_mask=garment_mask_pil, + ) + + style_img = Image.open(paths.style_path) + + person_pil.save('./person_pil.png') + mask_pil.save('./mask_pil.png') + garment_pil.save('./garment_pil.png') + garment_mask_pil.save('./garment_mask_pil.png') + + with torch.inference_mode(): + images = ip_model.generate( + pil_image=style_img, + image=person_pil, + control_image=depth_map, + strength=1.0, + num_samples=1, + num_inference_steps=int(steps), + shape_prompt="", + prompt=prompt or "", + num=0, + scale=None, # โœ… ์›๋ณธ ๊ทธ๋Œ€๋กœ (set_scale ๊ด€๋ จ ์—๋Ÿฌ ๋ฐฉ์ง€ ํ•ต์‹ฌ) + controlnet_conditioning_scale=0.7, + guidance_scale=7.5, + ) + + save_cropped(images, paths.output_path) + print(f"Saved: {paths.output_path}") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="FEAT pipeline (single sample, file paths)") + + parser.add_argument( + "--person-path", + type=str, + default="./DATA_input/Garment/person/1_048392_0.jpg", + ) + parser.add_argument( + "--depth-path", + type=str, + default="./DATA_input/Garment/sketch/1_048392_0.png", + ) + parser.add_argument( + "--style-path", + type=str, + default="./DATA_input/Garment/style/1_00.jpg", + ) + parser.add_argument( + "--output-path", + type=str, + default="./00.png", + ) + + parser.add_argument("--prompt", type=str, default="upper garment", help="single prompt string (optional)") + parser.add_argument("--steps", type=int, default=DEFAULT_STEPS) + parser.add_argument("--debug-save", action="store_true", help="save debug intermediate images (slow)") + args = parser.parse_args() + + DEBUG_SAVE = bool(args.debug_save) + + paths = Paths( + person_path=args.person_path, + depth_path=args.depth_path, + style_path=args.style_path, + output_path=args.output_path, + ) + + run_one(paths, prompt=args.prompt, steps=args.steps) + + diff --git a/ip_adapter/.ipynb_checkpoints/attention_processor-checkpoint.py b/ip_adapter/.ipynb_checkpoints/attention_processor-checkpoint.py new file mode 100644 index 0000000000000000000000000000000000000000..49a315b9a675e61554f2ec9c80a7db2602ad28de --- /dev/null +++ b/ip_adapter/.ipynb_checkpoints/attention_processor-checkpoint.py @@ -0,0 +1,597 @@ +# -*- coding: utf-8 -*- +# modified from https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py +import torch +import torch.nn as nn +import torch.nn.functional as F +import torchvision.transforms as transforms +import PIL.Image +import numpy as np +from typing import Optional + +class AttnProcessor(nn.Module): + r""" + Default processor for performing attention-related computations. + """ + + def __init__( + self, + hidden_size=None, + cross_attention_dim=None, + ): + super().__init__() + + def __call__( + self, + attn, + hidden_states, + encoder_hidden_states=None, + attention_mask=None, + temb=None, + ): + + residual = hidden_states + + if attn.spatial_norm is not None: + hidden_states = attn.spatial_norm(hidden_states, temb) + + input_ndim = hidden_states.ndim + + if input_ndim == 4: + batch_size, channel, height, width = hidden_states.shape + hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) + + batch_size, sequence_length, _ = ( + hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape + ) + attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) + + if attn.group_norm is not None: + hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) + + query = attn.to_q(hidden_states) + + if encoder_hidden_states is None: + encoder_hidden_states = hidden_states + elif attn.norm_cross: + encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) + + key = attn.to_k(encoder_hidden_states) + value = attn.to_v(encoder_hidden_states) + + query = attn.head_to_batch_dim(query) + key = attn.head_to_batch_dim(key) + value = attn.head_to_batch_dim(value) + + attention_probs = attn.get_attention_scores(query, key, attention_mask) + hidden_states = torch.bmm(attention_probs, value) + hidden_states = attn.batch_to_head_dim(hidden_states) + + # linear proj + hidden_states = attn.to_out[0](hidden_states) + # dropout + hidden_states = attn.to_out[1](hidden_states) + + if input_ndim == 4: + hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) + + if attn.residual_connection: + hidden_states = hidden_states + residual + + hidden_states = hidden_states / attn.rescale_output_factor + + return hidden_states + + +class IPAttnProcessor(nn.Module): + r""" + Attention processor for IP-Adapater. + Args: + hidden_size (`int`): + The hidden size of the attention layer. + cross_attention_dim (`int`): + The number of channels in the `encoder_hidden_states`. + scale (`float`, defaults to 1.0): + the weight scale of image prompt. + num_tokens (`int`, defaults to 4 when do ip_adapter_plus it should be 16): + The context length of the image features. + """ + + def __init__(self, hidden_size, cross_attention_dim=None, scale=1.0, num_tokens=4, skip=False): + super().__init__() + + self.hidden_size = hidden_size + self.cross_attention_dim = cross_attention_dim + self.scale = scale + self.num_tokens = num_tokens + self.skip = skip + + + self.to_k_ip = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False) + self.to_v_ip = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False) + + def __call__( + self, + attn, + hidden_states, + encoder_hidden_states=None, + attention_mask=None, + temb=None, + ): + residual = hidden_states + + + if attn.spatial_norm is not None: + hidden_states = attn.spatial_norm(hidden_states, temb) + + input_ndim = hidden_states.ndim + + if input_ndim == 4: + batch_size, channel, height, width = hidden_states.shape + hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) + + batch_size, sequence_length, _ = ( + hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape + ) + attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) + + if attn.group_norm is not None: + hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) + + query = attn.to_q(hidden_states) + + if encoder_hidden_states is None: + encoder_hidden_states = hidden_states + else: + # get encoder_hidden_states, ip_hidden_states + + end_pos = encoder_hidden_states.shape[1] - self.num_tokens + encoder_hidden_states, ip_hidden_states = ( + encoder_hidden_states[:, :end_pos, :], + encoder_hidden_states[:, end_pos:, :], + ) + if attn.norm_cross: + encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) + + key = attn.to_k(encoder_hidden_states) + value = attn.to_v(encoder_hidden_states) + + + query = attn.head_to_batch_dim(query) + key = attn.head_to_batch_dim(key) + value = attn.head_to_batch_dim(value) + + + attention_probs = attn.get_attention_scores(query, key, attention_mask) + hidden_states = torch.bmm(attention_probs, value) + hidden_states = attn.batch_to_head_dim(hidden_states) + + if not self.skip: + # for ip-adapter + ip_key = self.to_k_ip(ip_hidden_states) + ip_value = self.to_v_ip(ip_hidden_states) + + + ip_key = attn.head_to_batch_dim(ip_key) + ip_value = attn.head_to_batch_dim(ip_value) + + ip_attention_probs = attn.get_attention_scores(query, ip_key, None) + self.attn_map = ip_attention_probs + ip_hidden_states = torch.bmm(ip_attention_probs, ip_value) + ip_hidden_states = attn.batch_to_head_dim(ip_hidden_states) + + hidden_states = hidden_states + self.scale * ip_hidden_states + + # linear proj + hidden_states = attn.to_out[0](hidden_states) + # dropout + hidden_states = attn.to_out[1](hidden_states) + + if input_ndim == 4: + hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) + + if attn.residual_connection: + hidden_states = hidden_states + residual + + hidden_states = hidden_states / attn.rescale_output_factor + + return hidden_states + + + + +class AttnProcessor2_0(nn.Module): + + def __init__(self, hidden_size: Optional[int] = None, cross_attention_dim: Optional[int] = None): + super().__init__() + if not hasattr(F, "scaled_dot_product_attention"): + raise ImportError("AttnProcessor2_0 requires PyTorch 2.0 or later.") + + def forward(self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None, temb=None): + residual = hidden_states + if attn.spatial_norm is not None: + hidden_states = attn.spatial_norm(hidden_states, temb) + + input_ndim = hidden_states.ndim + if input_ndim == 4: + b, c, h, w = hidden_states.shape + hidden_states = hidden_states.view(b, c, h * w).transpose(1, 2) + + # group norm + if attn.group_norm is not None: + hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) + + # q, k, v + query = attn.to_q(hidden_states) + if encoder_hidden_states is None: + encoder_hidden_states = hidden_states + elif attn.norm_cross: + encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) + + key = attn.to_k(encoder_hidden_states) + value = attn.to_v(encoder_hidden_states) + + # reshape heads + bsz = hidden_states.shape[0] + head_dim = key.shape[-1] // attn.heads + query = query.view(bsz, -1, attn.heads, head_dim).transpose(1, 2) + key = key.view(bsz, -1, attn.heads, head_dim).transpose(1, 2) + value = value.view(bsz, -1, attn.heads, head_dim).transpose(1, 2) + + + if attention_mask is not None: + pass + out = F.scaled_dot_product_attention(query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False) + + # merge heads + out = out.transpose(1, 2).reshape(bsz, -1, attn.heads * head_dim).to(query.dtype) + + # out proj + dropout + out = attn.to_out[1](attn.to_out[0](out)) + + if input_ndim == 4: + out = out.transpose(-1, -2).reshape(bsz, c, h, w) + + if attn.residual_connection: + out = out + residual + + out = out / attn.rescale_output_factor + return out + + +def prepare_mask(mask: PIL.Image.Image) -> torch.Tensor: + """ + mask: PIL.Image | np.ndarray | torch.Tensor + ๋ฐ˜ํ™˜: (B,1,H,W) float32 in {0,1} + """ + if isinstance(mask, torch.Tensor): + m = mask.clone() + if m.ndim == 2: # (H,W) -> (1,1,H,W) + m = m.unsqueeze(0).unsqueeze(0) + elif m.ndim == 3: # (1,H,W) or (B,H,W) -> (B,1,H,W) + if m.shape[0] == 1: + m = m.unsqueeze(0) + else: + m = m.unsqueeze(1) + if m.min() < 0 or m.max() > 1: + raise ValueError("Mask tensor must be in [0,1].") + m = (m >= 0.5).float() + return m + + if isinstance(mask, (PIL.Image.Image, np.ndarray)): + mask = [mask] + if isinstance(mask, list) and isinstance(mask[0], PIL.Image.Image): + arr = np.concatenate([np.array(m.convert("L"))[None, None, ...] for m in mask], axis=0).astype(np.float16) / 255.0 + elif isinstance(mask, list) and isinstance(mask[0], np.ndarray): + arr = np.concatenate([m[None, None, ...] for m in mask], axis=0).astype(np.float16) + if arr.max() > 1.0: + arr = arr / 255.0 + else: + raise TypeError("Unsupported mask type.") + + arr = (arr >= 0.5).astype(np.float16) + return torch.from_numpy(arr) + + +class IPAttnProcessor2_0(nn.Module): + def __init__(self, hidden_size: int, cross_attention_dim: int, scale: float = 1.0, num_tokens: int = 4, skip: bool = False): + super().__init__() + if not hasattr(F, "scaled_dot_product_attention"): + raise ImportError("IPAttnProcessor2_0 requires PyTorch 2.0 or later.") + self.hidden_size = hidden_size + self.cross_attention_dim = cross_attention_dim + self.scale = float(scale) + self.num_tokens = int(num_tokens) + self.skip = bool(skip) + + proj_in = cross_attention_dim if cross_attention_dim is not None else hidden_size + self.to_k_ip = nn.Linear(proj_in, hidden_size, bias=False) + self.to_v_ip = nn.Linear(proj_in, hidden_size, bias=False) + + self.last_scale = None + self.last_skip = None + self.last_out_l2 = None + self.last_layer_name = None + self.last_group = None + self.last_ip_source = None + self.last_ip_mu = None + + def forward(self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None, temb=None): + residual = hidden_states + if attn.spatial_norm is not None: + hidden_states = attn.spatial_norm(hidden_states, temb) + + input_ndim = hidden_states.ndim + if input_ndim == 4: + b, c, h, w = hidden_states.shape + hidden_states = hidden_states.view(b, c, h * w).transpose(1, 2) + else: + b = hidden_states.shape[0] + + if attn.group_norm is not None: + hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) + + query = attn.to_q(hidden_states) + + + if encoder_hidden_states is None: + base_enc = hidden_states + tail_ip_tokens = None + else: + if encoder_hidden_states.shape[1] >= self.num_tokens and self.num_tokens > 0: + end_pos = encoder_hidden_states.shape[1] - self.num_tokens + base_enc = encoder_hidden_states[:, :end_pos, :] # ํ…์ŠคํŠธ(+๊ธฐํƒ€)๋งŒ + tail_ip_tokens = encoder_hidden_states[:, end_pos:, :] # ์ „์—ญ concat๋œ ์ด๋ฏธ์ง€ ํ† ํฐ + else: + base_enc = encoder_hidden_states + tail_ip_tokens = None + + if attn.norm_cross: + base_enc = attn.norm_encoder_hidden_states(base_enc) + + + group = getattr(self, "group", "off") # "content" / "style" / "off" + override = getattr(self, "ip_tokens_override", None) + override_uncond = getattr(self, "ip_tokens_override_uncond", None) + + ip_tokens = None + ip_source = "none" + + if group == "content": + ip_tokens = tail_ip_tokens + ip_source = "tail" if tail_ip_tokens is not None else "none" + + elif group == "style": + if override is not None: + N, T, D = override.shape + if override_uncond is None: + override_uncond = torch.zeros_like(override) + + if b == N: + ip_tokens = override + elif b == 2 * N: + ip_tokens = torch.cat([override_uncond, override], dim=0) + elif b % N == 0: + reps = b // N + ip_tokens = override.repeat(reps, 1, 1) + else: + ip_tokens = override.expand(b, -1, -1) + ip_source = "override" + else: + ip_tokens = None + ip_source = "none" + + else: + ip_tokens = None + ip_source = "none" + + + key = attn.to_k(base_enc) + value = attn.to_v(base_enc) + + head_dim = key.shape[-1] // attn.heads + query = query.view(b, -1, attn.heads, head_dim).transpose(1, 2) + key = key.view(b, -1, attn.heads, head_dim).transpose(1, 2) + value = value.view(b, -1, attn.heads, head_dim).transpose(1, 2) + + out = F.scaled_dot_product_attention(query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False) + + with torch.no_grad(): + self.last_group = group + self.last_ip_source = ip_source + if ip_tokens is None: + self.last_ip_mu = None + else: + mu = ip_tokens.detach().float().mean(dim=(0, 1)) # [D] + self.last_ip_mu = mu.cpu() + + do_inject = (not self.skip) and (ip_tokens is not None) and (ip_tokens.shape[1] == self.num_tokens) + if do_inject: + ip_k = self.to_k_ip(ip_tokens).view(b, -1, attn.heads, head_dim).transpose(1, 2) + ip_v = self.to_v_ip(ip_tokens).view(b, -1, attn.heads, head_dim).transpose(1, 2) + ip_out = F.scaled_dot_product_attention(query, ip_k, ip_v, attn_mask=None, dropout_p=0.0, is_causal=False) + out = out + float(self.scale) * ip_out + + with torch.no_grad(): + self.last_ip_out_l2 = ip_out.float().pow(2).sum(dim=tuple(range(1, ip_out.ndim))).sqrt().mean().item() + + out = out.transpose(1, 2).reshape(b, -1, attn.heads * head_dim).to(query.dtype) + + out = attn.to_out[1](attn.to_out[0](out)) + + if input_ndim == 4: + out = out.transpose(-1, -2).reshape(b, c, h, w) + + if attn.residual_connection: + out = out + residual + + out = out / attn.rescale_output_factor + + with torch.no_grad(): + self.last_scale = float(self.scale) + self.last_skip = bool(self.skip) + if isinstance(out, torch.Tensor): + if out.ndim >= 2: + self.last_out_l2 = out.float().pow(2).sum(dim=tuple(range(1, out.ndim))).sqrt().mean().item() + else: + self.last_out_l2 = out.float().pow(2).sum().sqrt().item() + else: + self.last_out_l2 = None + + return out + + + + + +## for controlnet +class CNAttnProcessor: + r""" + Default processor for performing attention-related computations. + """ + + def __init__(self, num_tokens=4): + self.num_tokens = num_tokens + + def __call__(self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None, temb=None): + residual = hidden_states + + if attn.spatial_norm is not None: + hidden_states = attn.spatial_norm(hidden_states, temb) + + input_ndim = hidden_states.ndim + + if input_ndim == 4: + batch_size, channel, height, width = hidden_states.shape + hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) + + batch_size, sequence_length, _ = ( + hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape + ) + attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) + + if attn.group_norm is not None: + hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) + + query = attn.to_q(hidden_states) + + if encoder_hidden_states is None: + encoder_hidden_states = hidden_states + else: + end_pos = encoder_hidden_states.shape[1] - self.num_tokens + encoder_hidden_states = encoder_hidden_states[:, :end_pos] # only use text + if attn.norm_cross: + encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) + + key = attn.to_k(encoder_hidden_states) + value = attn.to_v(encoder_hidden_states) + + query = attn.head_to_batch_dim(query) + key = attn.head_to_batch_dim(key) + value = attn.head_to_batch_dim(value) + + attention_probs = attn.get_attention_scores(query, key, attention_mask) + hidden_states = torch.bmm(attention_probs, value) + hidden_states = attn.batch_to_head_dim(hidden_states) + + # linear proj + hidden_states = attn.to_out[0](hidden_states) + # dropout + hidden_states = attn.to_out[1](hidden_states) + + if input_ndim == 4: + hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) + + if attn.residual_connection: + hidden_states = hidden_states + residual + + hidden_states = hidden_states / attn.rescale_output_factor + + return hidden_states + + +class CNAttnProcessor2_0: + r""" + Processor for implementing scaled dot-product attention (enabled by default if you're using PyTorch 2.0). + """ + + def __init__(self, num_tokens=4): + if not hasattr(F, "scaled_dot_product_attention"): + raise ImportError("AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.") + self.num_tokens = num_tokens + + def __call__( + self, + attn, + hidden_states, + encoder_hidden_states=None, + attention_mask=None, + temb=None, + ): + residual = hidden_states + + if attn.spatial_norm is not None: + hidden_states = attn.spatial_norm(hidden_states, temb) + + input_ndim = hidden_states.ndim + + if input_ndim == 4: + batch_size, channel, height, width = hidden_states.shape + hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) + + batch_size, sequence_length, _ = ( + hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape + ) + + if attention_mask is not None: + attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) + # scaled_dot_product_attention expects attention_mask shape to be + # (batch, heads, source_length, target_length) + attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) + + if attn.group_norm is not None: + hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) + + query = attn.to_q(hidden_states) + + if encoder_hidden_states is None: + encoder_hidden_states = hidden_states + else: + end_pos = encoder_hidden_states.shape[1] - self.num_tokens + encoder_hidden_states = encoder_hidden_states[:, :end_pos] # only use text + if attn.norm_cross: + encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) + + key = attn.to_k(encoder_hidden_states) + value = attn.to_v(encoder_hidden_states) + + inner_dim = key.shape[-1] + head_dim = inner_dim // attn.heads + + query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + + key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + + # the output of sdp = (batch, num_heads, seq_len, head_dim) + # TODO: add support for attn.scale when we move to Torch 2.1 + hidden_states = F.scaled_dot_product_attention( + query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False + ) + + hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) + hidden_states = hidden_states.to(query.dtype) + + # linear proj + hidden_states = attn.to_out[0](hidden_states) + # dropout + hidden_states = attn.to_out[1](hidden_states) + + if input_ndim == 4: + hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) + + if attn.residual_connection: + hidden_states = hidden_states + residual + + hidden_states = hidden_states / attn.rescale_output_factor + + return hidden_states diff --git a/ip_adapter/.ipynb_checkpoints/ip_adapter-checkpoint.py b/ip_adapter/.ipynb_checkpoints/ip_adapter-checkpoint.py new file mode 100644 index 0000000000000000000000000000000000000000..cb827bcf9b4bb4bea1f3b01060a2fab67e3f16a5 --- /dev/null +++ b/ip_adapter/.ipynb_checkpoints/ip_adapter-checkpoint.py @@ -0,0 +1,704 @@ +# -*- coding: utf-8 -*- +import os +from typing import List +import torch +import torch.nn as nn +import torch.nn.functional as F +from diffusers import StableDiffusionPipeline +from diffusers.pipelines.controlnet import MultiControlNetModel +from PIL import Image, ImageOps +from safetensors import safe_open +from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection, CLIPTokenizer, CLIPTextModelWithProjection +from .utils import is_torch2_available, get_generator + +if is_torch2_available(): + from .attention_processor import ( + AttnProcessor2_0 as AttnProcessor, + ) + from .attention_processor import ( + CNAttnProcessor2_0 as CNAttnProcessor, + ) + from .attention_processor import ( + IPAttnProcessor2_0 as IPAttnProcessor, + ) +else: + from .attention_processor import AttnProcessor, CNAttnProcessor, IPAttnProcessor +from .resampler import Resampler + +import numpy as np, random + + +import math +import torch + +import torch +import torch.nn.functional as F +import numpy as np +import cv2 +from PIL import Image + + + +def _cosine(a: torch.Tensor, b: torch.Tensor, eps: float = 1e-12) -> float: + a = a.float(); b = b.float() + na = a.norm(); nb = b.norm() + if na.item() < eps or nb.item() < eps: + return float("nan") + return float((a @ b) / (na * nb)) + +def verify_style_content_embeddings(adapter, sim_threshold: float = 0.999): + + content_fps, style_fps = [], [] + wrong_source = [] + + for name, proc in adapter.attn_procs.items(): + group = getattr(proc, "group", "off") + mu = getattr(proc, "last_ip_mu", None) + src = getattr(proc, "last_ip_source", None) + + if group not in ("content", "style"): + continue + if mu is None: + continue + + if group == "content" and src != "tail": + wrong_source.append((name, group, src)) + if group == "style" and src != "override": + wrong_source.append((name, group, src)) + + if group == "content": + content_fps.append((name, mu)) + else: + style_fps.append((name, mu)) + + print("\n[Verify] token source check") + if wrong_source: + for name, grp, src in wrong_source: + print(f" - !! {name}: group={grp} but last_ip_source={src}") + else: + print(" - OK: content uses 'tail', style uses 'override'") + + if not content_fps or not style_fps: + return False + + content_mu = torch.stack([mu for _, mu in content_fps], dim=0).mean(dim=0) + style_mu = torch.stack([mu for _, mu in style_fps], dim=0).mean(dim=0) + + cos = _cosine(content_mu, style_mu) + print(f"\n[Verify] group-wise cosine(content, style) = {cos:.6f}") + + print("\n[Verify] layer-wise cosine to content-mean (lower is more different)") + for name, mu in style_fps: + cs = _cosine(content_mu, mu) + print(f" - {name:<60} cos={cs:.6f}") + + ok = (not wrong_source) and (not math.isnan(cos)) and (cos < sim_threshold) + + +def _split_bounds(size, parts): + bounds = np.linspace(0, size, parts + 1) + return [int(round(b)) for b in bounds] + + +class ImageProjModel(torch.nn.Module): + """Projection Model""" + + def __init__(self, cross_attention_dim=1024, clip_embeddings_dim=1024, clip_extra_context_tokens=4): + super().__init__() + + self.generator = None + self.cross_attention_dim = cross_attention_dim + self.clip_extra_context_tokens = clip_extra_context_tokens + self.proj = torch.nn.Linear(clip_embeddings_dim, self.clip_extra_context_tokens * cross_attention_dim) + self.norm = torch.nn.LayerNorm(cross_attention_dim) + + def forward(self, image_embeds): + embeds = image_embeds + clip_extra_context_tokens = self.proj(embeds).reshape( + -1, self.clip_extra_context_tokens, self.cross_attention_dim + ) + clip_extra_context_tokens = self.norm(clip_extra_context_tokens) + return clip_extra_context_tokens + + +class MLPProjModel(torch.nn.Module): + """SD model with image prompt""" + def __init__(self, cross_attention_dim=1024, clip_embeddings_dim=1024): + super().__init__() + + self.proj = torch.nn.Sequential( + torch.nn.Linear(clip_embeddings_dim, clip_embeddings_dim), + torch.nn.GELU(), + torch.nn.Linear(clip_embeddings_dim, cross_attention_dim), + torch.nn.LayerNorm(cross_attention_dim) + ) + + def forward(self, image_embeds): + clip_extra_context_tokens = self.proj(image_embeds) + return clip_extra_context_tokens + +class IPAdapter: + def __init__( + self, + sd_pipe, + image_encoder_path, + ip_ckpt, + device, + mask=None, + sketch=None, + num_tokens=4, + target_blocks=None, + # NEW: block groups & scales + content_blocks=None, + style_blocks=None, + content_scale: float = 0.5, + style_scale: float = 0.5, + garment_images = None, + garment_mask = None, + ): + self.device = device + self.image_encoder_path = image_encoder_path + self.ip_ckpt = ip_ckpt + self.num_tokens = num_tokens + self.target_blocks = target_blocks or [] + + self.pipe = sd_pipe.to(self.device) + self.mask = mask + self.sketch = sketch + + self.garment_images = garment_images + self.garment_mask = garment_mask + + self.content_blocks = [ + "down_blocks.2.attentions.1", + ] + self.style_blocks = [ + "up_blocks.0.attentions.1", + ] + self.content_scale = float(content_scale) + self.style_scale = float(style_scale) + + self.attn_procs = {} + + self.set_ip_adapter() + + self.clip_tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14") + self.text_encoder = CLIPTextModelWithProjection.from_pretrained( + "openai/clip-vit-large-patch14" + ).to(self.device) + + self.image_encoder = CLIPVisionModelWithProjection.from_pretrained( + self.image_encoder_path + ).to(self.device, dtype=torch.float32) + self.clip_image_processor = CLIPImageProcessor() + + self.image_proj_model = self.init_proj() + self.load_ip_adapter() + + # --- utils --- + def _parse_block_id(self, name: str, prefix: str) -> int: + # "up_blocks.0.attentions.1.processor" -> 0 + return int(name[len(prefix):].split(".")[0]) + + def init_proj(self): + image_proj_model = ImageProjModel( + cross_attention_dim=self.pipe.unet.config.cross_attention_dim, + clip_embeddings_dim=self.image_encoder.config.projection_dim, + clip_extra_context_tokens=self.num_tokens, + ).to(self.device, dtype=torch.float32) + return image_proj_model + + + def _apply_group_scales(self): + for name, proc in self.attn_procs.items(): + if not isinstance(proc, IPAttnProcessor): + continue + if any(b in name for b in self.content_blocks): + proc.skip = False + proc.scale = float(self.content_scale) + elif any(b in name for b in self.style_blocks): + proc.skip = False + proc.scale = float(self.style_scale) + else: + proc.skip = True + + + def _which_group(self, name: str) -> str: + if any(b in name for b in self.content_blocks): + return "content" + if any(b in name for b in self.style_blocks): + return "style" + return "off" + + def _get_proc_tokens(self, proc): + for key in ("image_prompt_embeds", "ip_tokens", "image_prompts"): + t = getattr(proc, key, None) + if t is not None: + return t + return None + + def print_block_scales(self, verbose: bool = True): + rows = [] + for name, proc in self.attn_procs.items(): + scale = getattr(proc, "scale", None) + skip = getattr(proc, "skip", None) + group = getattr(proc, "group", "self" if name.endswith("attn1.processor") else "off") + rows.append((name, group, scale, skip)) + + def _key(t): + n = t[0] + if n.startswith("down_blocks"): p = 0 + elif n.startswith("mid_block"): p = 1 + elif n.startswith("up_blocks"): p = 2 + else: p = 3 + # ์ˆซ์ž ์ถ”์ถœ + import re + m = re.findall(r"\d+", n) + idx = tuple(int(x) for x in m) if m else (999,) + return (p, idx, n) + + rows.sort(key=_key) + + if verbose: + print("\n[IPAdapter] Block-scale report") + for name, group, scale, skip in rows: + tag = "ATTN2" if name.endswith("attn2.processor") else "ATTN1" + print(f" - {name:<60} [{tag}] group={group:<7} scale={scale} skip={skip}") + + return rows + + def set_ip_adapter(self): + unet = self.pipe.unet + attn_procs = {} + self.attn_procs = {} + + for name in unet.attn_processors.keys(): + cross_attention_dim = None if name.endswith("attn1.processor") else unet.config.cross_attention_dim + + if name.startswith("mid_block"): + hidden_size = unet.config.block_out_channels[-1] + elif name.startswith("up_blocks"): + block_id = self._parse_block_id(name, "up_blocks.") + hidden_size = list(reversed(unet.config.block_out_channels))[block_id] + elif name.startswith("down_blocks"): + block_id = self._parse_block_id(name, "down_blocks.") + hidden_size = unet.config.block_out_channels[block_id] + else: + hidden_size = unet.config.block_out_channels[0] + + if cross_attention_dim is None: + proc = AttnProcessor() + setattr(proc, "layer_name", name) + else: + is_content = any(b in name for b in self.content_blocks) + is_style = any(b in name for b in self.style_blocks) + selected = is_content or is_style or any(b in name for b in self.target_blocks) + + init_skip = not selected + init_scale = 1.0 + if is_content: + init_scale = float(self.content_scale) + elif is_style: + init_scale = float(self.style_scale) + + proc = IPAttnProcessor( + hidden_size=hidden_size, + cross_attention_dim=cross_attention_dim, + scale=init_scale, + num_tokens=self.num_tokens, + skip=init_skip, + ).to(self.device, dtype=torch.float32) + + setattr(proc, "layer_name", name) + setattr(proc, "group", "content" if is_content else ("style" if is_style else "off")) + + attn_procs[name] = proc + self.attn_procs[name] = proc + + unet.set_attn_processor(attn_procs) + + if hasattr(self.pipe, "controlnet"): + if isinstance(self.pipe.controlnet, MultiControlNetModel): + for controlnet in self.pipe.controlnet.nets: + controlnet.set_attn_processor(CNAttnProcessor(num_tokens=self.num_tokens)) + else: + self.pipe.controlnet.set_attn_processor(CNAttnProcessor(num_tokens=self.num_tokens)) + + def load_ip_adapter(self): + if os.path.splitext(self.ip_ckpt)[-1] == ".safetensors": + state_dict = {"image_proj": {}, "ip_adapter": {}} + with safe_open(self.ip_ckpt, framework="pt", device="cpu") as f: + for key in f.keys(): + if key.startswith("image_proj."): + state_dict["image_proj"][key.replace("image_proj.", "")] = f.get_tensor(key) + elif key.startswith("ip_adapter."): + state_dict["ip_adapter"][key.replace("ip_adapter.", "")] = f.get_tensor(key) + else: + state_dict = torch.load(self.ip_ckpt, map_location="cpu") + self.image_proj_model.load_state_dict(state_dict["image_proj"]) + ip_layers = torch.nn.ModuleList(self.pipe.unet.attn_processors.values()) + ip_layers.load_state_dict(state_dict["ip_adapter"], strict=False) + + + @torch.inference_mode() + def get_image_embeds(self, pil_image=None, clip_image_embeds=None, content_prompt_embeds=None): + if pil_image is not None: + if isinstance(pil_image, Image.Image): + pil_image = [pil_image] + clip_image = self.clip_image_processor(images=pil_image, return_tensors="pt").pixel_values + clip_image_embeds = self.image_encoder(clip_image.to(self.device, dtype=torch.float32)).image_embeds + else: + clip_image_embeds = clip_image_embeds.to(self.device, dtype=torch.float32) + + image_prompt_embeds = self.image_proj_model(clip_image_embeds) # [B, Ni, D] = [1,4,2048] + + uncond_image_prompt_embeds = self.image_proj_model(torch.zeros_like(clip_image_embeds)) + return image_prompt_embeds, uncond_image_prompt_embeds + + + def generate( + self, + pil_image=None, + clip_image_embeds=None, + prompt=None, + negative_prompt=None, + scale=1.0, + num_samples=4, + seed=None, + guidance_scale=7.5, + num_inference_steps=30, + neg_content_emb=None, + **kwargs, + ): + if scale is not None: + self.set_scale(scale) + + if pil_image is not None: + num_prompts = 1 if isinstance(pil_image, Image.Image) else len(pil_image) + else: + num_prompts = clip_image_embeds.size(0) + + if prompt is None: + prompt = "best quality, high quality" + if negative_prompt is None: + negative_prompt = "monochrome, lowres, bad anatomy, worst quality, low quality" + + if not isinstance(prompt, List): + prompt = [prompt] * num_prompts + if not isinstance(negative_prompt, List): + negative_prompt = [negative_prompt] * num_prompts + + image_prompt_embeds, uncond_image_prompt_embeds = self.get_image_embeds( + pil_image=pil_image, clip_image_embeds=clip_image_embeds, content_prompt_embeds=neg_content_emb + ) + bs_embed, seq_len, _ = image_prompt_embeds.shape + image_prompt_embeds = image_prompt_embeds.repeat(1, num_samples, 1) + image_prompt_embeds = image_prompt_embeds.view(bs_embed * num_samples, seq_len, -1) + uncond_image_prompt_embeds = uncond_image_prompt_embeds.repeat(1, num_samples, 1) + uncond_image_prompt_embeds = uncond_image_prompt_embeds.view(bs_embed * num_samples, seq_len, -1) + + with torch.inference_mode(): + prompt_embeds_, negative_prompt_embeds_ = self.pipe.encode_prompt( + prompt, + device=self.device, + num_images_per_prompt=num_samples, + do_classifier_free_guidance=True, + negative_prompt=negative_prompt, + ) + prompt_embeds = torch.cat([prompt_embeds_, image_prompt_embeds], dim=1) + negative_prompt_embeds = torch.cat([negative_prompt_embeds_, uncond_image_prompt_embeds], dim=1) + + generator = get_generator(seed, self.device) + + images = self.pipe( + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + guidance_scale=guidance_scale, + num_inference_steps=num_inference_steps, + generator=generator, + **kwargs, + ).images + + return images + + +class IPAdapterXL(IPAdapter): + """SDXL""" + + def generate( + self, + pil_image, + prompt=None, + shape_prompt=None, + negative_prompt=None, + scale=1.0, + num_samples=4, + seed=None, + num_inference_steps=30, + neg_content_emb=None, + neg_content_prompt=None, + neg_content_scale=1.0, + **kwargs, + ): + if scale is not None: + self.set_scale(scale) + + + num_prompts = 1 if isinstance(pil_image, Image.Image) else len(pil_image) + + if prompt is None: + prompt = "best quality, high quality" + if negative_prompt is None: + negative_prompt = "monochrome, lowres, bad anatomy, worst quality, low quality" + if not isinstance(prompt, List): + prompt = [prompt] * num_prompts + if not isinstance(negative_prompt, List): + negative_prompt = [negative_prompt] * num_prompts + + + if neg_content_emb is None: + if neg_content_prompt is not None: + with torch.inference_mode(): + ( + prompt_embeds_, # [B, 77, 2048] + negative_prompt_embeds_, + pooled_prompt_embeds_, # [B, 1280] + negative_pooled_prompt_embeds_, + ) = self.pipe.encode_prompt( + neg_content_prompt, + num_images_per_prompt=num_samples, + do_classifier_free_guidance=True, + negative_prompt=negative_prompt, + ) + pooled_prompt_embeds_ *= neg_content_scale + else: + pooled_prompt_embeds_ = neg_content_emb + else: + pooled_prompt_embeds_ = None + + content_ip_tokens, uncond_content_ip_tokens = self.get_image_embeds( + pil_image=pil_image, + content_prompt_embeds=pooled_prompt_embeds_ + ) + bs_embed, seq_len, _ = content_ip_tokens.shape + content_ip_tokens = content_ip_tokens.repeat(1, num_samples, 1).view(bs_embed * num_samples, seq_len, -1) + uncond_content_ip_tokens = uncond_content_ip_tokens.repeat(1, num_samples, 1).view(bs_embed * num_samples, seq_len, -1) + + style_ip_tokens, uncond_style_ip_tokens = self.get_image_embeds( + pil_image=pil_image, + content_prompt_embeds=pooled_prompt_embeds_ + ) + + bs_embed, seq_len, _ = style_ip_tokens.shape + style_ip_tokens = style_ip_tokens.repeat(1, num_samples, 1).view(bs_embed * num_samples, seq_len, -1) + style_ip_tokens_uncond = uncond_style_ip_tokens.repeat(1, num_samples, 1).view(bs_embed * num_samples, seq_len, -1) + + with torch.inference_mode(): + ( + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + ) = self.pipe.encode_prompt( + prompt, + device=self.device, + num_images_per_prompt=num_samples, + do_classifier_free_guidance=True, + negative_prompt=negative_prompt, + ) + # โ˜… ์—ฌ๊ธฐ์„œ "์ฝ˜ํ…์ธ " IP ํ† ํฐ๋งŒ ๋ถ™์ธ๋‹ค + prompt_embeds = torch.cat([prompt_embeds, content_ip_tokens], dim=1) + negative_prompt_embeds = torch.cat([negative_prompt_embeds, uncond_content_ip_tokens], dim=1) + + with torch.inference_mode(): + ( + shape_prompt_embeds, + shape_negative_prompt_embeds, + shape_pooled_prompt_embeds, + shape_negative_pooled_prompt_embeds, + ) = self.pipe.encode_prompt( + shape_prompt, + device=self.device, + num_images_per_prompt=num_samples, + do_classifier_free_guidance=True, + negative_prompt=negative_prompt, + ) + shape_prompt_embeds = torch.cat([shape_prompt_embeds, content_ip_tokens], dim=1) + shape_negative_prompt_embeds = torch.cat([shape_negative_prompt_embeds, uncond_content_ip_tokens], dim=1) + + + for name, proc in self.attn_procs.items(): + if getattr(proc, "group", "off") == "style": + proc.ip_tokens_override = style_ip_tokens.to(self.device, dtype=torch.float32) + proc.ip_tokens_override_uncond = style_ip_tokens_uncond.to(self.device, dtype=torch.float32) + else: + if hasattr(proc, "ip_tokens_override"): + proc.ip_tokens_override = None + if hasattr(proc, "ip_tokens_override_uncond"): + proc.ip_tokens_override_uncond = None + + + self.generator = get_generator(seed, self.device) + + images = self.pipe( + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, + shape_prompt_embeds=shape_prompt_embeds, + shape_negative_prompt_embeds=shape_negative_prompt_embeds, + shape_pooled_prompt_embeds=shape_pooled_prompt_embeds, + shape_negative_pooled_prompt_embeds=shape_negative_pooled_prompt_embeds, + num_inference_steps=num_inference_steps, + generator=self.generator, + mask_image=self.mask, + sketch_image=self.sketch, + garment_images=self.garment_images, + garment_mask=self.garment_mask, + **kwargs, + ).images + + for name, proc in self.attn_procs.items(): + if hasattr(proc, "ip_tokens_override"): + proc.ip_tokens_override = None + if hasattr(proc, "ip_tokens_override_uncond"): + proc.ip_tokens_override_uncond = None + + return images + + + + +class IPAdapterPlus(IPAdapter): + """IP-Adapter with fine-grained features""" + + def init_proj(self): + image_proj_model = Resampler( + dim=self.pipe.unet.config.cross_attention_dim, + depth=4, + dim_head=64, + heads=12, + num_queries=self.num_tokens, + embedding_dim=self.image_encoder.config.hidden_size, + output_dim=self.pipe.unet.config.cross_attention_dim, + ff_mult=4, + ).to(self.device, dtype=torch.float32) + return image_proj_model + + @torch.inference_mode() + def get_image_embeds(self, pil_image=None, clip_image_embeds=None): + if isinstance(pil_image, Image.Image): + pil_image = [pil_image] + clip_image = self.clip_image_processor(images=pil_image, return_tensors="pt").pixel_values + clip_image = clip_image.to(self.device, dtype=torch.float32) + clip_image_embeds = self.image_encoder(clip_image, output_hidden_states=True).hidden_states[-2] + image_prompt_embeds = self.image_proj_model(clip_image_embeds) + uncond_clip_image_embeds = self.image_encoder( + torch.zeros_like(clip_image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_prompt_embeds = self.image_proj_model(uncond_clip_image_embeds) + return image_prompt_embeds, uncond_image_prompt_embeds + + +class IPAdapterFull(IPAdapterPlus): + """IP-Adapter with full features""" + + def init_proj(self): + image_proj_model = MLPProjModel( + cross_attention_dim=self.pipe.unet.config.cross_attention_dim, + clip_embeddings_dim=self.image_encoder.config.hidden_size, + ).to(self.device, dtype=torch.float32) + return image_proj_model + + +class IPAdapterPlusXL(IPAdapter): + """SDXL""" + + def init_proj(self): + image_proj_model = Resampler( + dim=1280, + depth=4, + dim_head=64, + heads=20, + num_queries=self.num_tokens, + embedding_dim=self.image_encoder.config.hidden_size, + output_dim=self.pipe.unet.config.cross_attention_dim, + ff_mult=4, + ).to(self.device, dtype=torch.float32) + return image_proj_model + + @torch.inference_mode() + def get_image_embeds(self, pil_image): + if isinstance(pil_image, Image.Image): + pil_image = [pil_image] + clip_image = self.clip_image_processor(images=pil_image, return_tensors="pt").pixel_values + clip_image = clip_image.to(self.device, dtype=torch.float32) + clip_image_embeds = self.image_encoder(clip_image, output_hidden_states=True).hidden_states[-2] + image_prompt_embeds = self.image_proj_model(clip_image_embeds) + uncond_clip_image_embeds = self.image_encoder( + torch.zeros_like(clip_image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_prompt_embeds = self.image_proj_model(uncond_clip_image_embeds) + return image_prompt_embeds, uncond_image_prompt_embeds + + def generate( + self, + pil_image, + prompt=None, + negative_prompt=None, + scale=1.0, + num_samples=4, + seed=None, + num_inference_steps=30, + **kwargs, + ): + self.set_scale(scale) + + num_prompts = 1 if isinstance(pil_image, Image.Image) else len(pil_image) + + if prompt is None: + prompt = "best quality, high quality" + if negative_prompt is None: + negative_prompt = "monochrome, lowres, bad anatomy, worst quality, low quality" + + if not isinstance(prompt, List): + prompt = [prompt] * num_prompts + if not isinstance(negative_prompt, List): + negative_prompt = [negative_prompt] * num_prompts + + image_prompt_embeds, uncond_image_prompt_embeds = self.get_image_embeds(pil_image) + bs_embed, seq_len, _ = image_prompt_embeds.shape + image_prompt_embeds = image_prompt_embeds.repeat(1, num_samples, 1) + image_prompt_embeds = image_prompt_embeds.view(bs_embed * num_samples, seq_len, -1) + uncond_image_prompt_embeds = uncond_image_prompt_embeds.repeat(1, num_samples, 1) + uncond_image_prompt_embeds = uncond_image_prompt_embeds.view(bs_embed * num_samples, seq_len, -1) + + with torch.inference_mode(): + ( + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + ) = self.pipe.encode_prompt( + prompt, + device=self.device, + num_images_per_prompt=num_samples, + do_classifier_free_guidance=True, + negative_prompt=negative_prompt, + ) + prompt_embeds = torch.cat([prompt_embeds, image_prompt_embeds], dim=1) + negative_prompt_embeds = torch.cat([negative_prompt_embeds, uncond_image_prompt_embeds], dim=1) + + generator = get_generator(seed, self.device) + + images = self.pipe( + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, + num_inference_steps=num_inference_steps, + generator=generator, + **kwargs, + ).images + + return images diff --git a/ip_adapter/__init__.py b/ip_adapter/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3b1f1ff4e54e93ada7e85abc0f6687c5ecd3a338 --- /dev/null +++ b/ip_adapter/__init__.py @@ -0,0 +1,9 @@ +from .ip_adapter import IPAdapter, IPAdapterPlus, IPAdapterPlusXL, IPAdapterXL, IPAdapterFull + +__all__ = [ + "IPAdapter", + "IPAdapterPlus", + "IPAdapterPlusXL", + "IPAdapterXL", + "IPAdapterFull", +] diff --git a/ip_adapter/__pycache__/__init__.cpython-310.pyc b/ip_adapter/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..54857ea87629cda68ae2af64a5f4bea8fdbbaec5 Binary files /dev/null and b/ip_adapter/__pycache__/__init__.cpython-310.pyc differ diff --git a/ip_adapter/__pycache__/__init__.cpython-38.pyc b/ip_adapter/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..578468704e2480d988bae725d9e09207bb8f9ecd Binary files /dev/null and b/ip_adapter/__pycache__/__init__.cpython-38.pyc differ diff --git a/ip_adapter/__pycache__/attention_processor.cpython-310.pyc b/ip_adapter/__pycache__/attention_processor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..114adb81c259def9bb616c442c83223163a64426 Binary files /dev/null and b/ip_adapter/__pycache__/attention_processor.cpython-310.pyc differ diff --git a/ip_adapter/__pycache__/attention_processor.cpython-38.pyc b/ip_adapter/__pycache__/attention_processor.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..444e935a19d704f1104feaf39fc5a8977b6b7cf6 Binary files /dev/null and b/ip_adapter/__pycache__/attention_processor.cpython-38.pyc differ diff --git a/ip_adapter/__pycache__/ip_adapter.cpython-310.pyc b/ip_adapter/__pycache__/ip_adapter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..41a10bbff05b65b27b82b6f61f98d52eb0e1f797 Binary files /dev/null and b/ip_adapter/__pycache__/ip_adapter.cpython-310.pyc differ diff --git a/ip_adapter/__pycache__/ip_adapter.cpython-38.pyc b/ip_adapter/__pycache__/ip_adapter.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3143c59824f3a87eff05795af7868de7b890dc4b Binary files /dev/null and b/ip_adapter/__pycache__/ip_adapter.cpython-38.pyc differ diff --git a/ip_adapter/__pycache__/resampler.cpython-310.pyc b/ip_adapter/__pycache__/resampler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..265f530adef1685bde77e70a1df88c55bbe27ad2 Binary files /dev/null and b/ip_adapter/__pycache__/resampler.cpython-310.pyc differ diff --git a/ip_adapter/__pycache__/resampler.cpython-38.pyc b/ip_adapter/__pycache__/resampler.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6d0c0664045e9ae0d062da5ecd9701de32e0d7e1 Binary files /dev/null and b/ip_adapter/__pycache__/resampler.cpython-38.pyc differ diff --git a/ip_adapter/__pycache__/utils.cpython-310.pyc b/ip_adapter/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1b2edbd2537032aa2aa4d552b0050fc4dd91f1c9 Binary files /dev/null and b/ip_adapter/__pycache__/utils.cpython-310.pyc differ diff --git a/ip_adapter/__pycache__/utils.cpython-38.pyc b/ip_adapter/__pycache__/utils.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5ea1da1a84dc98b8d67bce85948e29bf8a3289ec Binary files /dev/null and b/ip_adapter/__pycache__/utils.cpython-38.pyc differ diff --git a/ip_adapter/attention_processor.py b/ip_adapter/attention_processor.py new file mode 100644 index 0000000000000000000000000000000000000000..49a315b9a675e61554f2ec9c80a7db2602ad28de --- /dev/null +++ b/ip_adapter/attention_processor.py @@ -0,0 +1,597 @@ +# -*- coding: utf-8 -*- +# modified from https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py +import torch +import torch.nn as nn +import torch.nn.functional as F +import torchvision.transforms as transforms +import PIL.Image +import numpy as np +from typing import Optional + +class AttnProcessor(nn.Module): + r""" + Default processor for performing attention-related computations. + """ + + def __init__( + self, + hidden_size=None, + cross_attention_dim=None, + ): + super().__init__() + + def __call__( + self, + attn, + hidden_states, + encoder_hidden_states=None, + attention_mask=None, + temb=None, + ): + + residual = hidden_states + + if attn.spatial_norm is not None: + hidden_states = attn.spatial_norm(hidden_states, temb) + + input_ndim = hidden_states.ndim + + if input_ndim == 4: + batch_size, channel, height, width = hidden_states.shape + hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) + + batch_size, sequence_length, _ = ( + hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape + ) + attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) + + if attn.group_norm is not None: + hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) + + query = attn.to_q(hidden_states) + + if encoder_hidden_states is None: + encoder_hidden_states = hidden_states + elif attn.norm_cross: + encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) + + key = attn.to_k(encoder_hidden_states) + value = attn.to_v(encoder_hidden_states) + + query = attn.head_to_batch_dim(query) + key = attn.head_to_batch_dim(key) + value = attn.head_to_batch_dim(value) + + attention_probs = attn.get_attention_scores(query, key, attention_mask) + hidden_states = torch.bmm(attention_probs, value) + hidden_states = attn.batch_to_head_dim(hidden_states) + + # linear proj + hidden_states = attn.to_out[0](hidden_states) + # dropout + hidden_states = attn.to_out[1](hidden_states) + + if input_ndim == 4: + hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) + + if attn.residual_connection: + hidden_states = hidden_states + residual + + hidden_states = hidden_states / attn.rescale_output_factor + + return hidden_states + + +class IPAttnProcessor(nn.Module): + r""" + Attention processor for IP-Adapater. + Args: + hidden_size (`int`): + The hidden size of the attention layer. + cross_attention_dim (`int`): + The number of channels in the `encoder_hidden_states`. + scale (`float`, defaults to 1.0): + the weight scale of image prompt. + num_tokens (`int`, defaults to 4 when do ip_adapter_plus it should be 16): + The context length of the image features. + """ + + def __init__(self, hidden_size, cross_attention_dim=None, scale=1.0, num_tokens=4, skip=False): + super().__init__() + + self.hidden_size = hidden_size + self.cross_attention_dim = cross_attention_dim + self.scale = scale + self.num_tokens = num_tokens + self.skip = skip + + + self.to_k_ip = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False) + self.to_v_ip = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False) + + def __call__( + self, + attn, + hidden_states, + encoder_hidden_states=None, + attention_mask=None, + temb=None, + ): + residual = hidden_states + + + if attn.spatial_norm is not None: + hidden_states = attn.spatial_norm(hidden_states, temb) + + input_ndim = hidden_states.ndim + + if input_ndim == 4: + batch_size, channel, height, width = hidden_states.shape + hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) + + batch_size, sequence_length, _ = ( + hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape + ) + attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) + + if attn.group_norm is not None: + hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) + + query = attn.to_q(hidden_states) + + if encoder_hidden_states is None: + encoder_hidden_states = hidden_states + else: + # get encoder_hidden_states, ip_hidden_states + + end_pos = encoder_hidden_states.shape[1] - self.num_tokens + encoder_hidden_states, ip_hidden_states = ( + encoder_hidden_states[:, :end_pos, :], + encoder_hidden_states[:, end_pos:, :], + ) + if attn.norm_cross: + encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) + + key = attn.to_k(encoder_hidden_states) + value = attn.to_v(encoder_hidden_states) + + + query = attn.head_to_batch_dim(query) + key = attn.head_to_batch_dim(key) + value = attn.head_to_batch_dim(value) + + + attention_probs = attn.get_attention_scores(query, key, attention_mask) + hidden_states = torch.bmm(attention_probs, value) + hidden_states = attn.batch_to_head_dim(hidden_states) + + if not self.skip: + # for ip-adapter + ip_key = self.to_k_ip(ip_hidden_states) + ip_value = self.to_v_ip(ip_hidden_states) + + + ip_key = attn.head_to_batch_dim(ip_key) + ip_value = attn.head_to_batch_dim(ip_value) + + ip_attention_probs = attn.get_attention_scores(query, ip_key, None) + self.attn_map = ip_attention_probs + ip_hidden_states = torch.bmm(ip_attention_probs, ip_value) + ip_hidden_states = attn.batch_to_head_dim(ip_hidden_states) + + hidden_states = hidden_states + self.scale * ip_hidden_states + + # linear proj + hidden_states = attn.to_out[0](hidden_states) + # dropout + hidden_states = attn.to_out[1](hidden_states) + + if input_ndim == 4: + hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) + + if attn.residual_connection: + hidden_states = hidden_states + residual + + hidden_states = hidden_states / attn.rescale_output_factor + + return hidden_states + + + + +class AttnProcessor2_0(nn.Module): + + def __init__(self, hidden_size: Optional[int] = None, cross_attention_dim: Optional[int] = None): + super().__init__() + if not hasattr(F, "scaled_dot_product_attention"): + raise ImportError("AttnProcessor2_0 requires PyTorch 2.0 or later.") + + def forward(self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None, temb=None): + residual = hidden_states + if attn.spatial_norm is not None: + hidden_states = attn.spatial_norm(hidden_states, temb) + + input_ndim = hidden_states.ndim + if input_ndim == 4: + b, c, h, w = hidden_states.shape + hidden_states = hidden_states.view(b, c, h * w).transpose(1, 2) + + # group norm + if attn.group_norm is not None: + hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) + + # q, k, v + query = attn.to_q(hidden_states) + if encoder_hidden_states is None: + encoder_hidden_states = hidden_states + elif attn.norm_cross: + encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) + + key = attn.to_k(encoder_hidden_states) + value = attn.to_v(encoder_hidden_states) + + # reshape heads + bsz = hidden_states.shape[0] + head_dim = key.shape[-1] // attn.heads + query = query.view(bsz, -1, attn.heads, head_dim).transpose(1, 2) + key = key.view(bsz, -1, attn.heads, head_dim).transpose(1, 2) + value = value.view(bsz, -1, attn.heads, head_dim).transpose(1, 2) + + + if attention_mask is not None: + pass + out = F.scaled_dot_product_attention(query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False) + + # merge heads + out = out.transpose(1, 2).reshape(bsz, -1, attn.heads * head_dim).to(query.dtype) + + # out proj + dropout + out = attn.to_out[1](attn.to_out[0](out)) + + if input_ndim == 4: + out = out.transpose(-1, -2).reshape(bsz, c, h, w) + + if attn.residual_connection: + out = out + residual + + out = out / attn.rescale_output_factor + return out + + +def prepare_mask(mask: PIL.Image.Image) -> torch.Tensor: + """ + mask: PIL.Image | np.ndarray | torch.Tensor + ๋ฐ˜ํ™˜: (B,1,H,W) float32 in {0,1} + """ + if isinstance(mask, torch.Tensor): + m = mask.clone() + if m.ndim == 2: # (H,W) -> (1,1,H,W) + m = m.unsqueeze(0).unsqueeze(0) + elif m.ndim == 3: # (1,H,W) or (B,H,W) -> (B,1,H,W) + if m.shape[0] == 1: + m = m.unsqueeze(0) + else: + m = m.unsqueeze(1) + if m.min() < 0 or m.max() > 1: + raise ValueError("Mask tensor must be in [0,1].") + m = (m >= 0.5).float() + return m + + if isinstance(mask, (PIL.Image.Image, np.ndarray)): + mask = [mask] + if isinstance(mask, list) and isinstance(mask[0], PIL.Image.Image): + arr = np.concatenate([np.array(m.convert("L"))[None, None, ...] for m in mask], axis=0).astype(np.float16) / 255.0 + elif isinstance(mask, list) and isinstance(mask[0], np.ndarray): + arr = np.concatenate([m[None, None, ...] for m in mask], axis=0).astype(np.float16) + if arr.max() > 1.0: + arr = arr / 255.0 + else: + raise TypeError("Unsupported mask type.") + + arr = (arr >= 0.5).astype(np.float16) + return torch.from_numpy(arr) + + +class IPAttnProcessor2_0(nn.Module): + def __init__(self, hidden_size: int, cross_attention_dim: int, scale: float = 1.0, num_tokens: int = 4, skip: bool = False): + super().__init__() + if not hasattr(F, "scaled_dot_product_attention"): + raise ImportError("IPAttnProcessor2_0 requires PyTorch 2.0 or later.") + self.hidden_size = hidden_size + self.cross_attention_dim = cross_attention_dim + self.scale = float(scale) + self.num_tokens = int(num_tokens) + self.skip = bool(skip) + + proj_in = cross_attention_dim if cross_attention_dim is not None else hidden_size + self.to_k_ip = nn.Linear(proj_in, hidden_size, bias=False) + self.to_v_ip = nn.Linear(proj_in, hidden_size, bias=False) + + self.last_scale = None + self.last_skip = None + self.last_out_l2 = None + self.last_layer_name = None + self.last_group = None + self.last_ip_source = None + self.last_ip_mu = None + + def forward(self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None, temb=None): + residual = hidden_states + if attn.spatial_norm is not None: + hidden_states = attn.spatial_norm(hidden_states, temb) + + input_ndim = hidden_states.ndim + if input_ndim == 4: + b, c, h, w = hidden_states.shape + hidden_states = hidden_states.view(b, c, h * w).transpose(1, 2) + else: + b = hidden_states.shape[0] + + if attn.group_norm is not None: + hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) + + query = attn.to_q(hidden_states) + + + if encoder_hidden_states is None: + base_enc = hidden_states + tail_ip_tokens = None + else: + if encoder_hidden_states.shape[1] >= self.num_tokens and self.num_tokens > 0: + end_pos = encoder_hidden_states.shape[1] - self.num_tokens + base_enc = encoder_hidden_states[:, :end_pos, :] # ํ…์ŠคํŠธ(+๊ธฐํƒ€)๋งŒ + tail_ip_tokens = encoder_hidden_states[:, end_pos:, :] # ์ „์—ญ concat๋œ ์ด๋ฏธ์ง€ ํ† ํฐ + else: + base_enc = encoder_hidden_states + tail_ip_tokens = None + + if attn.norm_cross: + base_enc = attn.norm_encoder_hidden_states(base_enc) + + + group = getattr(self, "group", "off") # "content" / "style" / "off" + override = getattr(self, "ip_tokens_override", None) + override_uncond = getattr(self, "ip_tokens_override_uncond", None) + + ip_tokens = None + ip_source = "none" + + if group == "content": + ip_tokens = tail_ip_tokens + ip_source = "tail" if tail_ip_tokens is not None else "none" + + elif group == "style": + if override is not None: + N, T, D = override.shape + if override_uncond is None: + override_uncond = torch.zeros_like(override) + + if b == N: + ip_tokens = override + elif b == 2 * N: + ip_tokens = torch.cat([override_uncond, override], dim=0) + elif b % N == 0: + reps = b // N + ip_tokens = override.repeat(reps, 1, 1) + else: + ip_tokens = override.expand(b, -1, -1) + ip_source = "override" + else: + ip_tokens = None + ip_source = "none" + + else: + ip_tokens = None + ip_source = "none" + + + key = attn.to_k(base_enc) + value = attn.to_v(base_enc) + + head_dim = key.shape[-1] // attn.heads + query = query.view(b, -1, attn.heads, head_dim).transpose(1, 2) + key = key.view(b, -1, attn.heads, head_dim).transpose(1, 2) + value = value.view(b, -1, attn.heads, head_dim).transpose(1, 2) + + out = F.scaled_dot_product_attention(query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False) + + with torch.no_grad(): + self.last_group = group + self.last_ip_source = ip_source + if ip_tokens is None: + self.last_ip_mu = None + else: + mu = ip_tokens.detach().float().mean(dim=(0, 1)) # [D] + self.last_ip_mu = mu.cpu() + + do_inject = (not self.skip) and (ip_tokens is not None) and (ip_tokens.shape[1] == self.num_tokens) + if do_inject: + ip_k = self.to_k_ip(ip_tokens).view(b, -1, attn.heads, head_dim).transpose(1, 2) + ip_v = self.to_v_ip(ip_tokens).view(b, -1, attn.heads, head_dim).transpose(1, 2) + ip_out = F.scaled_dot_product_attention(query, ip_k, ip_v, attn_mask=None, dropout_p=0.0, is_causal=False) + out = out + float(self.scale) * ip_out + + with torch.no_grad(): + self.last_ip_out_l2 = ip_out.float().pow(2).sum(dim=tuple(range(1, ip_out.ndim))).sqrt().mean().item() + + out = out.transpose(1, 2).reshape(b, -1, attn.heads * head_dim).to(query.dtype) + + out = attn.to_out[1](attn.to_out[0](out)) + + if input_ndim == 4: + out = out.transpose(-1, -2).reshape(b, c, h, w) + + if attn.residual_connection: + out = out + residual + + out = out / attn.rescale_output_factor + + with torch.no_grad(): + self.last_scale = float(self.scale) + self.last_skip = bool(self.skip) + if isinstance(out, torch.Tensor): + if out.ndim >= 2: + self.last_out_l2 = out.float().pow(2).sum(dim=tuple(range(1, out.ndim))).sqrt().mean().item() + else: + self.last_out_l2 = out.float().pow(2).sum().sqrt().item() + else: + self.last_out_l2 = None + + return out + + + + + +## for controlnet +class CNAttnProcessor: + r""" + Default processor for performing attention-related computations. + """ + + def __init__(self, num_tokens=4): + self.num_tokens = num_tokens + + def __call__(self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None, temb=None): + residual = hidden_states + + if attn.spatial_norm is not None: + hidden_states = attn.spatial_norm(hidden_states, temb) + + input_ndim = hidden_states.ndim + + if input_ndim == 4: + batch_size, channel, height, width = hidden_states.shape + hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) + + batch_size, sequence_length, _ = ( + hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape + ) + attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) + + if attn.group_norm is not None: + hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) + + query = attn.to_q(hidden_states) + + if encoder_hidden_states is None: + encoder_hidden_states = hidden_states + else: + end_pos = encoder_hidden_states.shape[1] - self.num_tokens + encoder_hidden_states = encoder_hidden_states[:, :end_pos] # only use text + if attn.norm_cross: + encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) + + key = attn.to_k(encoder_hidden_states) + value = attn.to_v(encoder_hidden_states) + + query = attn.head_to_batch_dim(query) + key = attn.head_to_batch_dim(key) + value = attn.head_to_batch_dim(value) + + attention_probs = attn.get_attention_scores(query, key, attention_mask) + hidden_states = torch.bmm(attention_probs, value) + hidden_states = attn.batch_to_head_dim(hidden_states) + + # linear proj + hidden_states = attn.to_out[0](hidden_states) + # dropout + hidden_states = attn.to_out[1](hidden_states) + + if input_ndim == 4: + hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) + + if attn.residual_connection: + hidden_states = hidden_states + residual + + hidden_states = hidden_states / attn.rescale_output_factor + + return hidden_states + + +class CNAttnProcessor2_0: + r""" + Processor for implementing scaled dot-product attention (enabled by default if you're using PyTorch 2.0). + """ + + def __init__(self, num_tokens=4): + if not hasattr(F, "scaled_dot_product_attention"): + raise ImportError("AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.") + self.num_tokens = num_tokens + + def __call__( + self, + attn, + hidden_states, + encoder_hidden_states=None, + attention_mask=None, + temb=None, + ): + residual = hidden_states + + if attn.spatial_norm is not None: + hidden_states = attn.spatial_norm(hidden_states, temb) + + input_ndim = hidden_states.ndim + + if input_ndim == 4: + batch_size, channel, height, width = hidden_states.shape + hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) + + batch_size, sequence_length, _ = ( + hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape + ) + + if attention_mask is not None: + attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) + # scaled_dot_product_attention expects attention_mask shape to be + # (batch, heads, source_length, target_length) + attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) + + if attn.group_norm is not None: + hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) + + query = attn.to_q(hidden_states) + + if encoder_hidden_states is None: + encoder_hidden_states = hidden_states + else: + end_pos = encoder_hidden_states.shape[1] - self.num_tokens + encoder_hidden_states = encoder_hidden_states[:, :end_pos] # only use text + if attn.norm_cross: + encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) + + key = attn.to_k(encoder_hidden_states) + value = attn.to_v(encoder_hidden_states) + + inner_dim = key.shape[-1] + head_dim = inner_dim // attn.heads + + query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + + key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + + # the output of sdp = (batch, num_heads, seq_len, head_dim) + # TODO: add support for attn.scale when we move to Torch 2.1 + hidden_states = F.scaled_dot_product_attention( + query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False + ) + + hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) + hidden_states = hidden_states.to(query.dtype) + + # linear proj + hidden_states = attn.to_out[0](hidden_states) + # dropout + hidden_states = attn.to_out[1](hidden_states) + + if input_ndim == 4: + hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) + + if attn.residual_connection: + hidden_states = hidden_states + residual + + hidden_states = hidden_states / attn.rescale_output_factor + + return hidden_states diff --git a/ip_adapter/ip_adapter.py b/ip_adapter/ip_adapter.py new file mode 100644 index 0000000000000000000000000000000000000000..cb827bcf9b4bb4bea1f3b01060a2fab67e3f16a5 --- /dev/null +++ b/ip_adapter/ip_adapter.py @@ -0,0 +1,704 @@ +# -*- coding: utf-8 -*- +import os +from typing import List +import torch +import torch.nn as nn +import torch.nn.functional as F +from diffusers import StableDiffusionPipeline +from diffusers.pipelines.controlnet import MultiControlNetModel +from PIL import Image, ImageOps +from safetensors import safe_open +from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection, CLIPTokenizer, CLIPTextModelWithProjection +from .utils import is_torch2_available, get_generator + +if is_torch2_available(): + from .attention_processor import ( + AttnProcessor2_0 as AttnProcessor, + ) + from .attention_processor import ( + CNAttnProcessor2_0 as CNAttnProcessor, + ) + from .attention_processor import ( + IPAttnProcessor2_0 as IPAttnProcessor, + ) +else: + from .attention_processor import AttnProcessor, CNAttnProcessor, IPAttnProcessor +from .resampler import Resampler + +import numpy as np, random + + +import math +import torch + +import torch +import torch.nn.functional as F +import numpy as np +import cv2 +from PIL import Image + + + +def _cosine(a: torch.Tensor, b: torch.Tensor, eps: float = 1e-12) -> float: + a = a.float(); b = b.float() + na = a.norm(); nb = b.norm() + if na.item() < eps or nb.item() < eps: + return float("nan") + return float((a @ b) / (na * nb)) + +def verify_style_content_embeddings(adapter, sim_threshold: float = 0.999): + + content_fps, style_fps = [], [] + wrong_source = [] + + for name, proc in adapter.attn_procs.items(): + group = getattr(proc, "group", "off") + mu = getattr(proc, "last_ip_mu", None) + src = getattr(proc, "last_ip_source", None) + + if group not in ("content", "style"): + continue + if mu is None: + continue + + if group == "content" and src != "tail": + wrong_source.append((name, group, src)) + if group == "style" and src != "override": + wrong_source.append((name, group, src)) + + if group == "content": + content_fps.append((name, mu)) + else: + style_fps.append((name, mu)) + + print("\n[Verify] token source check") + if wrong_source: + for name, grp, src in wrong_source: + print(f" - !! {name}: group={grp} but last_ip_source={src}") + else: + print(" - OK: content uses 'tail', style uses 'override'") + + if not content_fps or not style_fps: + return False + + content_mu = torch.stack([mu for _, mu in content_fps], dim=0).mean(dim=0) + style_mu = torch.stack([mu for _, mu in style_fps], dim=0).mean(dim=0) + + cos = _cosine(content_mu, style_mu) + print(f"\n[Verify] group-wise cosine(content, style) = {cos:.6f}") + + print("\n[Verify] layer-wise cosine to content-mean (lower is more different)") + for name, mu in style_fps: + cs = _cosine(content_mu, mu) + print(f" - {name:<60} cos={cs:.6f}") + + ok = (not wrong_source) and (not math.isnan(cos)) and (cos < sim_threshold) + + +def _split_bounds(size, parts): + bounds = np.linspace(0, size, parts + 1) + return [int(round(b)) for b in bounds] + + +class ImageProjModel(torch.nn.Module): + """Projection Model""" + + def __init__(self, cross_attention_dim=1024, clip_embeddings_dim=1024, clip_extra_context_tokens=4): + super().__init__() + + self.generator = None + self.cross_attention_dim = cross_attention_dim + self.clip_extra_context_tokens = clip_extra_context_tokens + self.proj = torch.nn.Linear(clip_embeddings_dim, self.clip_extra_context_tokens * cross_attention_dim) + self.norm = torch.nn.LayerNorm(cross_attention_dim) + + def forward(self, image_embeds): + embeds = image_embeds + clip_extra_context_tokens = self.proj(embeds).reshape( + -1, self.clip_extra_context_tokens, self.cross_attention_dim + ) + clip_extra_context_tokens = self.norm(clip_extra_context_tokens) + return clip_extra_context_tokens + + +class MLPProjModel(torch.nn.Module): + """SD model with image prompt""" + def __init__(self, cross_attention_dim=1024, clip_embeddings_dim=1024): + super().__init__() + + self.proj = torch.nn.Sequential( + torch.nn.Linear(clip_embeddings_dim, clip_embeddings_dim), + torch.nn.GELU(), + torch.nn.Linear(clip_embeddings_dim, cross_attention_dim), + torch.nn.LayerNorm(cross_attention_dim) + ) + + def forward(self, image_embeds): + clip_extra_context_tokens = self.proj(image_embeds) + return clip_extra_context_tokens + +class IPAdapter: + def __init__( + self, + sd_pipe, + image_encoder_path, + ip_ckpt, + device, + mask=None, + sketch=None, + num_tokens=4, + target_blocks=None, + # NEW: block groups & scales + content_blocks=None, + style_blocks=None, + content_scale: float = 0.5, + style_scale: float = 0.5, + garment_images = None, + garment_mask = None, + ): + self.device = device + self.image_encoder_path = image_encoder_path + self.ip_ckpt = ip_ckpt + self.num_tokens = num_tokens + self.target_blocks = target_blocks or [] + + self.pipe = sd_pipe.to(self.device) + self.mask = mask + self.sketch = sketch + + self.garment_images = garment_images + self.garment_mask = garment_mask + + self.content_blocks = [ + "down_blocks.2.attentions.1", + ] + self.style_blocks = [ + "up_blocks.0.attentions.1", + ] + self.content_scale = float(content_scale) + self.style_scale = float(style_scale) + + self.attn_procs = {} + + self.set_ip_adapter() + + self.clip_tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14") + self.text_encoder = CLIPTextModelWithProjection.from_pretrained( + "openai/clip-vit-large-patch14" + ).to(self.device) + + self.image_encoder = CLIPVisionModelWithProjection.from_pretrained( + self.image_encoder_path + ).to(self.device, dtype=torch.float32) + self.clip_image_processor = CLIPImageProcessor() + + self.image_proj_model = self.init_proj() + self.load_ip_adapter() + + # --- utils --- + def _parse_block_id(self, name: str, prefix: str) -> int: + # "up_blocks.0.attentions.1.processor" -> 0 + return int(name[len(prefix):].split(".")[0]) + + def init_proj(self): + image_proj_model = ImageProjModel( + cross_attention_dim=self.pipe.unet.config.cross_attention_dim, + clip_embeddings_dim=self.image_encoder.config.projection_dim, + clip_extra_context_tokens=self.num_tokens, + ).to(self.device, dtype=torch.float32) + return image_proj_model + + + def _apply_group_scales(self): + for name, proc in self.attn_procs.items(): + if not isinstance(proc, IPAttnProcessor): + continue + if any(b in name for b in self.content_blocks): + proc.skip = False + proc.scale = float(self.content_scale) + elif any(b in name for b in self.style_blocks): + proc.skip = False + proc.scale = float(self.style_scale) + else: + proc.skip = True + + + def _which_group(self, name: str) -> str: + if any(b in name for b in self.content_blocks): + return "content" + if any(b in name for b in self.style_blocks): + return "style" + return "off" + + def _get_proc_tokens(self, proc): + for key in ("image_prompt_embeds", "ip_tokens", "image_prompts"): + t = getattr(proc, key, None) + if t is not None: + return t + return None + + def print_block_scales(self, verbose: bool = True): + rows = [] + for name, proc in self.attn_procs.items(): + scale = getattr(proc, "scale", None) + skip = getattr(proc, "skip", None) + group = getattr(proc, "group", "self" if name.endswith("attn1.processor") else "off") + rows.append((name, group, scale, skip)) + + def _key(t): + n = t[0] + if n.startswith("down_blocks"): p = 0 + elif n.startswith("mid_block"): p = 1 + elif n.startswith("up_blocks"): p = 2 + else: p = 3 + # ์ˆซ์ž ์ถ”์ถœ + import re + m = re.findall(r"\d+", n) + idx = tuple(int(x) for x in m) if m else (999,) + return (p, idx, n) + + rows.sort(key=_key) + + if verbose: + print("\n[IPAdapter] Block-scale report") + for name, group, scale, skip in rows: + tag = "ATTN2" if name.endswith("attn2.processor") else "ATTN1" + print(f" - {name:<60} [{tag}] group={group:<7} scale={scale} skip={skip}") + + return rows + + def set_ip_adapter(self): + unet = self.pipe.unet + attn_procs = {} + self.attn_procs = {} + + for name in unet.attn_processors.keys(): + cross_attention_dim = None if name.endswith("attn1.processor") else unet.config.cross_attention_dim + + if name.startswith("mid_block"): + hidden_size = unet.config.block_out_channels[-1] + elif name.startswith("up_blocks"): + block_id = self._parse_block_id(name, "up_blocks.") + hidden_size = list(reversed(unet.config.block_out_channels))[block_id] + elif name.startswith("down_blocks"): + block_id = self._parse_block_id(name, "down_blocks.") + hidden_size = unet.config.block_out_channels[block_id] + else: + hidden_size = unet.config.block_out_channels[0] + + if cross_attention_dim is None: + proc = AttnProcessor() + setattr(proc, "layer_name", name) + else: + is_content = any(b in name for b in self.content_blocks) + is_style = any(b in name for b in self.style_blocks) + selected = is_content or is_style or any(b in name for b in self.target_blocks) + + init_skip = not selected + init_scale = 1.0 + if is_content: + init_scale = float(self.content_scale) + elif is_style: + init_scale = float(self.style_scale) + + proc = IPAttnProcessor( + hidden_size=hidden_size, + cross_attention_dim=cross_attention_dim, + scale=init_scale, + num_tokens=self.num_tokens, + skip=init_skip, + ).to(self.device, dtype=torch.float32) + + setattr(proc, "layer_name", name) + setattr(proc, "group", "content" if is_content else ("style" if is_style else "off")) + + attn_procs[name] = proc + self.attn_procs[name] = proc + + unet.set_attn_processor(attn_procs) + + if hasattr(self.pipe, "controlnet"): + if isinstance(self.pipe.controlnet, MultiControlNetModel): + for controlnet in self.pipe.controlnet.nets: + controlnet.set_attn_processor(CNAttnProcessor(num_tokens=self.num_tokens)) + else: + self.pipe.controlnet.set_attn_processor(CNAttnProcessor(num_tokens=self.num_tokens)) + + def load_ip_adapter(self): + if os.path.splitext(self.ip_ckpt)[-1] == ".safetensors": + state_dict = {"image_proj": {}, "ip_adapter": {}} + with safe_open(self.ip_ckpt, framework="pt", device="cpu") as f: + for key in f.keys(): + if key.startswith("image_proj."): + state_dict["image_proj"][key.replace("image_proj.", "")] = f.get_tensor(key) + elif key.startswith("ip_adapter."): + state_dict["ip_adapter"][key.replace("ip_adapter.", "")] = f.get_tensor(key) + else: + state_dict = torch.load(self.ip_ckpt, map_location="cpu") + self.image_proj_model.load_state_dict(state_dict["image_proj"]) + ip_layers = torch.nn.ModuleList(self.pipe.unet.attn_processors.values()) + ip_layers.load_state_dict(state_dict["ip_adapter"], strict=False) + + + @torch.inference_mode() + def get_image_embeds(self, pil_image=None, clip_image_embeds=None, content_prompt_embeds=None): + if pil_image is not None: + if isinstance(pil_image, Image.Image): + pil_image = [pil_image] + clip_image = self.clip_image_processor(images=pil_image, return_tensors="pt").pixel_values + clip_image_embeds = self.image_encoder(clip_image.to(self.device, dtype=torch.float32)).image_embeds + else: + clip_image_embeds = clip_image_embeds.to(self.device, dtype=torch.float32) + + image_prompt_embeds = self.image_proj_model(clip_image_embeds) # [B, Ni, D] = [1,4,2048] + + uncond_image_prompt_embeds = self.image_proj_model(torch.zeros_like(clip_image_embeds)) + return image_prompt_embeds, uncond_image_prompt_embeds + + + def generate( + self, + pil_image=None, + clip_image_embeds=None, + prompt=None, + negative_prompt=None, + scale=1.0, + num_samples=4, + seed=None, + guidance_scale=7.5, + num_inference_steps=30, + neg_content_emb=None, + **kwargs, + ): + if scale is not None: + self.set_scale(scale) + + if pil_image is not None: + num_prompts = 1 if isinstance(pil_image, Image.Image) else len(pil_image) + else: + num_prompts = clip_image_embeds.size(0) + + if prompt is None: + prompt = "best quality, high quality" + if negative_prompt is None: + negative_prompt = "monochrome, lowres, bad anatomy, worst quality, low quality" + + if not isinstance(prompt, List): + prompt = [prompt] * num_prompts + if not isinstance(negative_prompt, List): + negative_prompt = [negative_prompt] * num_prompts + + image_prompt_embeds, uncond_image_prompt_embeds = self.get_image_embeds( + pil_image=pil_image, clip_image_embeds=clip_image_embeds, content_prompt_embeds=neg_content_emb + ) + bs_embed, seq_len, _ = image_prompt_embeds.shape + image_prompt_embeds = image_prompt_embeds.repeat(1, num_samples, 1) + image_prompt_embeds = image_prompt_embeds.view(bs_embed * num_samples, seq_len, -1) + uncond_image_prompt_embeds = uncond_image_prompt_embeds.repeat(1, num_samples, 1) + uncond_image_prompt_embeds = uncond_image_prompt_embeds.view(bs_embed * num_samples, seq_len, -1) + + with torch.inference_mode(): + prompt_embeds_, negative_prompt_embeds_ = self.pipe.encode_prompt( + prompt, + device=self.device, + num_images_per_prompt=num_samples, + do_classifier_free_guidance=True, + negative_prompt=negative_prompt, + ) + prompt_embeds = torch.cat([prompt_embeds_, image_prompt_embeds], dim=1) + negative_prompt_embeds = torch.cat([negative_prompt_embeds_, uncond_image_prompt_embeds], dim=1) + + generator = get_generator(seed, self.device) + + images = self.pipe( + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + guidance_scale=guidance_scale, + num_inference_steps=num_inference_steps, + generator=generator, + **kwargs, + ).images + + return images + + +class IPAdapterXL(IPAdapter): + """SDXL""" + + def generate( + self, + pil_image, + prompt=None, + shape_prompt=None, + negative_prompt=None, + scale=1.0, + num_samples=4, + seed=None, + num_inference_steps=30, + neg_content_emb=None, + neg_content_prompt=None, + neg_content_scale=1.0, + **kwargs, + ): + if scale is not None: + self.set_scale(scale) + + + num_prompts = 1 if isinstance(pil_image, Image.Image) else len(pil_image) + + if prompt is None: + prompt = "best quality, high quality" + if negative_prompt is None: + negative_prompt = "monochrome, lowres, bad anatomy, worst quality, low quality" + if not isinstance(prompt, List): + prompt = [prompt] * num_prompts + if not isinstance(negative_prompt, List): + negative_prompt = [negative_prompt] * num_prompts + + + if neg_content_emb is None: + if neg_content_prompt is not None: + with torch.inference_mode(): + ( + prompt_embeds_, # [B, 77, 2048] + negative_prompt_embeds_, + pooled_prompt_embeds_, # [B, 1280] + negative_pooled_prompt_embeds_, + ) = self.pipe.encode_prompt( + neg_content_prompt, + num_images_per_prompt=num_samples, + do_classifier_free_guidance=True, + negative_prompt=negative_prompt, + ) + pooled_prompt_embeds_ *= neg_content_scale + else: + pooled_prompt_embeds_ = neg_content_emb + else: + pooled_prompt_embeds_ = None + + content_ip_tokens, uncond_content_ip_tokens = self.get_image_embeds( + pil_image=pil_image, + content_prompt_embeds=pooled_prompt_embeds_ + ) + bs_embed, seq_len, _ = content_ip_tokens.shape + content_ip_tokens = content_ip_tokens.repeat(1, num_samples, 1).view(bs_embed * num_samples, seq_len, -1) + uncond_content_ip_tokens = uncond_content_ip_tokens.repeat(1, num_samples, 1).view(bs_embed * num_samples, seq_len, -1) + + style_ip_tokens, uncond_style_ip_tokens = self.get_image_embeds( + pil_image=pil_image, + content_prompt_embeds=pooled_prompt_embeds_ + ) + + bs_embed, seq_len, _ = style_ip_tokens.shape + style_ip_tokens = style_ip_tokens.repeat(1, num_samples, 1).view(bs_embed * num_samples, seq_len, -1) + style_ip_tokens_uncond = uncond_style_ip_tokens.repeat(1, num_samples, 1).view(bs_embed * num_samples, seq_len, -1) + + with torch.inference_mode(): + ( + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + ) = self.pipe.encode_prompt( + prompt, + device=self.device, + num_images_per_prompt=num_samples, + do_classifier_free_guidance=True, + negative_prompt=negative_prompt, + ) + # โ˜… ์—ฌ๊ธฐ์„œ "์ฝ˜ํ…์ธ " IP ํ† ํฐ๋งŒ ๋ถ™์ธ๋‹ค + prompt_embeds = torch.cat([prompt_embeds, content_ip_tokens], dim=1) + negative_prompt_embeds = torch.cat([negative_prompt_embeds, uncond_content_ip_tokens], dim=1) + + with torch.inference_mode(): + ( + shape_prompt_embeds, + shape_negative_prompt_embeds, + shape_pooled_prompt_embeds, + shape_negative_pooled_prompt_embeds, + ) = self.pipe.encode_prompt( + shape_prompt, + device=self.device, + num_images_per_prompt=num_samples, + do_classifier_free_guidance=True, + negative_prompt=negative_prompt, + ) + shape_prompt_embeds = torch.cat([shape_prompt_embeds, content_ip_tokens], dim=1) + shape_negative_prompt_embeds = torch.cat([shape_negative_prompt_embeds, uncond_content_ip_tokens], dim=1) + + + for name, proc in self.attn_procs.items(): + if getattr(proc, "group", "off") == "style": + proc.ip_tokens_override = style_ip_tokens.to(self.device, dtype=torch.float32) + proc.ip_tokens_override_uncond = style_ip_tokens_uncond.to(self.device, dtype=torch.float32) + else: + if hasattr(proc, "ip_tokens_override"): + proc.ip_tokens_override = None + if hasattr(proc, "ip_tokens_override_uncond"): + proc.ip_tokens_override_uncond = None + + + self.generator = get_generator(seed, self.device) + + images = self.pipe( + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, + shape_prompt_embeds=shape_prompt_embeds, + shape_negative_prompt_embeds=shape_negative_prompt_embeds, + shape_pooled_prompt_embeds=shape_pooled_prompt_embeds, + shape_negative_pooled_prompt_embeds=shape_negative_pooled_prompt_embeds, + num_inference_steps=num_inference_steps, + generator=self.generator, + mask_image=self.mask, + sketch_image=self.sketch, + garment_images=self.garment_images, + garment_mask=self.garment_mask, + **kwargs, + ).images + + for name, proc in self.attn_procs.items(): + if hasattr(proc, "ip_tokens_override"): + proc.ip_tokens_override = None + if hasattr(proc, "ip_tokens_override_uncond"): + proc.ip_tokens_override_uncond = None + + return images + + + + +class IPAdapterPlus(IPAdapter): + """IP-Adapter with fine-grained features""" + + def init_proj(self): + image_proj_model = Resampler( + dim=self.pipe.unet.config.cross_attention_dim, + depth=4, + dim_head=64, + heads=12, + num_queries=self.num_tokens, + embedding_dim=self.image_encoder.config.hidden_size, + output_dim=self.pipe.unet.config.cross_attention_dim, + ff_mult=4, + ).to(self.device, dtype=torch.float32) + return image_proj_model + + @torch.inference_mode() + def get_image_embeds(self, pil_image=None, clip_image_embeds=None): + if isinstance(pil_image, Image.Image): + pil_image = [pil_image] + clip_image = self.clip_image_processor(images=pil_image, return_tensors="pt").pixel_values + clip_image = clip_image.to(self.device, dtype=torch.float32) + clip_image_embeds = self.image_encoder(clip_image, output_hidden_states=True).hidden_states[-2] + image_prompt_embeds = self.image_proj_model(clip_image_embeds) + uncond_clip_image_embeds = self.image_encoder( + torch.zeros_like(clip_image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_prompt_embeds = self.image_proj_model(uncond_clip_image_embeds) + return image_prompt_embeds, uncond_image_prompt_embeds + + +class IPAdapterFull(IPAdapterPlus): + """IP-Adapter with full features""" + + def init_proj(self): + image_proj_model = MLPProjModel( + cross_attention_dim=self.pipe.unet.config.cross_attention_dim, + clip_embeddings_dim=self.image_encoder.config.hidden_size, + ).to(self.device, dtype=torch.float32) + return image_proj_model + + +class IPAdapterPlusXL(IPAdapter): + """SDXL""" + + def init_proj(self): + image_proj_model = Resampler( + dim=1280, + depth=4, + dim_head=64, + heads=20, + num_queries=self.num_tokens, + embedding_dim=self.image_encoder.config.hidden_size, + output_dim=self.pipe.unet.config.cross_attention_dim, + ff_mult=4, + ).to(self.device, dtype=torch.float32) + return image_proj_model + + @torch.inference_mode() + def get_image_embeds(self, pil_image): + if isinstance(pil_image, Image.Image): + pil_image = [pil_image] + clip_image = self.clip_image_processor(images=pil_image, return_tensors="pt").pixel_values + clip_image = clip_image.to(self.device, dtype=torch.float32) + clip_image_embeds = self.image_encoder(clip_image, output_hidden_states=True).hidden_states[-2] + image_prompt_embeds = self.image_proj_model(clip_image_embeds) + uncond_clip_image_embeds = self.image_encoder( + torch.zeros_like(clip_image), output_hidden_states=True + ).hidden_states[-2] + uncond_image_prompt_embeds = self.image_proj_model(uncond_clip_image_embeds) + return image_prompt_embeds, uncond_image_prompt_embeds + + def generate( + self, + pil_image, + prompt=None, + negative_prompt=None, + scale=1.0, + num_samples=4, + seed=None, + num_inference_steps=30, + **kwargs, + ): + self.set_scale(scale) + + num_prompts = 1 if isinstance(pil_image, Image.Image) else len(pil_image) + + if prompt is None: + prompt = "best quality, high quality" + if negative_prompt is None: + negative_prompt = "monochrome, lowres, bad anatomy, worst quality, low quality" + + if not isinstance(prompt, List): + prompt = [prompt] * num_prompts + if not isinstance(negative_prompt, List): + negative_prompt = [negative_prompt] * num_prompts + + image_prompt_embeds, uncond_image_prompt_embeds = self.get_image_embeds(pil_image) + bs_embed, seq_len, _ = image_prompt_embeds.shape + image_prompt_embeds = image_prompt_embeds.repeat(1, num_samples, 1) + image_prompt_embeds = image_prompt_embeds.view(bs_embed * num_samples, seq_len, -1) + uncond_image_prompt_embeds = uncond_image_prompt_embeds.repeat(1, num_samples, 1) + uncond_image_prompt_embeds = uncond_image_prompt_embeds.view(bs_embed * num_samples, seq_len, -1) + + with torch.inference_mode(): + ( + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + ) = self.pipe.encode_prompt( + prompt, + device=self.device, + num_images_per_prompt=num_samples, + do_classifier_free_guidance=True, + negative_prompt=negative_prompt, + ) + prompt_embeds = torch.cat([prompt_embeds, image_prompt_embeds], dim=1) + negative_prompt_embeds = torch.cat([negative_prompt_embeds, uncond_image_prompt_embeds], dim=1) + + generator = get_generator(seed, self.device) + + images = self.pipe( + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, + num_inference_steps=num_inference_steps, + generator=generator, + **kwargs, + ).images + + return images diff --git a/ip_adapter/resampler.py b/ip_adapter/resampler.py new file mode 100644 index 0000000000000000000000000000000000000000..24266671d02092438ae6576336a59659fef9c054 --- /dev/null +++ b/ip_adapter/resampler.py @@ -0,0 +1,158 @@ +# modified from https://github.com/mlfoundations/open_flamingo/blob/main/open_flamingo/src/helpers.py +# and https://github.com/lucidrains/imagen-pytorch/blob/main/imagen_pytorch/imagen_pytorch.py + +import math + +import torch +import torch.nn as nn +from einops import rearrange +from einops.layers.torch import Rearrange + + +# FFN +def FeedForward(dim, mult=4): + inner_dim = int(dim * mult) + return nn.Sequential( + nn.LayerNorm(dim), + nn.Linear(dim, inner_dim, bias=False), + nn.GELU(), + nn.Linear(inner_dim, dim, bias=False), + ) + + +def reshape_tensor(x, heads): + bs, length, width = x.shape + # (bs, length, width) --> (bs, length, n_heads, dim_per_head) + x = x.view(bs, length, heads, -1) + # (bs, length, n_heads, dim_per_head) --> (bs, n_heads, length, dim_per_head) + x = x.transpose(1, 2) + # (bs, n_heads, length, dim_per_head) --> (bs*n_heads, length, dim_per_head) + x = x.reshape(bs, heads, length, -1) + return x + + +class PerceiverAttention(nn.Module): + def __init__(self, *, dim, dim_head=64, heads=8): + super().__init__() + self.scale = dim_head**-0.5 + self.dim_head = dim_head + self.heads = heads + inner_dim = dim_head * heads + + self.norm1 = nn.LayerNorm(dim) + self.norm2 = nn.LayerNorm(dim) + + self.to_q = nn.Linear(dim, inner_dim, bias=False) + self.to_kv = nn.Linear(dim, inner_dim * 2, bias=False) + self.to_out = nn.Linear(inner_dim, dim, bias=False) + + def forward(self, x, latents): + """ + Args: + x (torch.Tensor): image features + shape (b, n1, D) + latent (torch.Tensor): latent features + shape (b, n2, D) + """ + x = self.norm1(x) + latents = self.norm2(latents) + + b, l, _ = latents.shape + + q = self.to_q(latents) + kv_input = torch.cat((x, latents), dim=-2) + k, v = self.to_kv(kv_input).chunk(2, dim=-1) + + q = reshape_tensor(q, self.heads) + k = reshape_tensor(k, self.heads) + v = reshape_tensor(v, self.heads) + + # attention + scale = 1 / math.sqrt(math.sqrt(self.dim_head)) + weight = (q * scale) @ (k * scale).transpose(-2, -1) # More stable with f16 than dividing afterwards + weight = torch.softmax(weight.float(), dim=-1).type(weight.dtype) + out = weight @ v + + out = out.permute(0, 2, 1, 3).reshape(b, l, -1) + + return self.to_out(out) + + +class Resampler(nn.Module): + def __init__( + self, + dim=1024, + depth=8, + dim_head=64, + heads=16, + num_queries=8, + embedding_dim=768, + output_dim=1024, + ff_mult=4, + max_seq_len: int = 257, # CLIP tokens + CLS token + apply_pos_emb: bool = False, + num_latents_mean_pooled: int = 0, # number of latents derived from mean pooled representation of the sequence + ): + super().__init__() + self.pos_emb = nn.Embedding(max_seq_len, embedding_dim) if apply_pos_emb else None + + self.latents = nn.Parameter(torch.randn(1, num_queries, dim) / dim**0.5) + + self.proj_in = nn.Linear(embedding_dim, dim) + + self.proj_out = nn.Linear(dim, output_dim) + self.norm_out = nn.LayerNorm(output_dim) + + self.to_latents_from_mean_pooled_seq = ( + nn.Sequential( + nn.LayerNorm(dim), + nn.Linear(dim, dim * num_latents_mean_pooled), + Rearrange("b (n d) -> b n d", n=num_latents_mean_pooled), + ) + if num_latents_mean_pooled > 0 + else None + ) + + self.layers = nn.ModuleList([]) + for _ in range(depth): + self.layers.append( + nn.ModuleList( + [ + PerceiverAttention(dim=dim, dim_head=dim_head, heads=heads), + FeedForward(dim=dim, mult=ff_mult), + ] + ) + ) + + def forward(self, x): + if self.pos_emb is not None: + n, device = x.shape[1], x.device + pos_emb = self.pos_emb(torch.arange(n, device=device)) + x = x + pos_emb + + latents = self.latents.repeat(x.size(0), 1, 1) + + x = self.proj_in(x) + + if self.to_latents_from_mean_pooled_seq: + meanpooled_seq = masked_mean(x, dim=1, mask=torch.ones(x.shape[:2], device=x.device, dtype=torch.bool)) + meanpooled_latents = self.to_latents_from_mean_pooled_seq(meanpooled_seq) + latents = torch.cat((meanpooled_latents, latents), dim=-2) + + for attn, ff in self.layers: + latents = attn(x, latents) + latents + latents = ff(latents) + latents + + latents = self.proj_out(latents) + return self.norm_out(latents) + + +def masked_mean(t, *, dim, mask=None): + if mask is None: + return t.mean(dim=dim) + + denom = mask.sum(dim=dim, keepdim=True) + mask = rearrange(mask, "b n -> b n 1") + masked_t = t.masked_fill(~mask, 0.0) + + return masked_t.sum(dim=dim) / denom.clamp(min=1e-5) diff --git a/ip_adapter/utils.py b/ip_adapter/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..6a273358585962fdf383d0bb7a0e1c654b4999b8 --- /dev/null +++ b/ip_adapter/utils.py @@ -0,0 +1,93 @@ +import torch +import torch.nn.functional as F +import numpy as np +from PIL import Image + +attn_maps = {} +def hook_fn(name): + def forward_hook(module, input, output): + if hasattr(module.processor, "attn_map"): + attn_maps[name] = module.processor.attn_map + del module.processor.attn_map + + return forward_hook + +def register_cross_attention_hook(unet): + for name, module in unet.named_modules(): + if name.split('.')[-1].startswith('attn2'): + module.register_forward_hook(hook_fn(name)) + + return unet + +def upscale(attn_map, target_size): + attn_map = torch.mean(attn_map, dim=0) + attn_map = attn_map.permute(1,0) + temp_size = None + + for i in range(0,5): + scale = 2 ** i + if ( target_size[0] // scale ) * ( target_size[1] // scale) == attn_map.shape[1]*64: + temp_size = (target_size[0]//(scale*8), target_size[1]//(scale*8)) + break + + assert temp_size is not None, "temp_size cannot is None" + + attn_map = attn_map.view(attn_map.shape[0], *temp_size) + + attn_map = F.interpolate( + attn_map.unsqueeze(0).to(dtype=torch.float32), + size=target_size, + mode='bilinear', + align_corners=False + )[0] + + attn_map = torch.softmax(attn_map, dim=0) + return attn_map +def get_net_attn_map(image_size, batch_size=2, instance_or_negative=False, detach=True): + + idx = 0 if instance_or_negative else 1 + net_attn_maps = [] + + for name, attn_map in attn_maps.items(): + attn_map = attn_map.cpu() if detach else attn_map + attn_map = torch.chunk(attn_map, batch_size)[idx].squeeze() + attn_map = upscale(attn_map, image_size) + net_attn_maps.append(attn_map) + + net_attn_maps = torch.mean(torch.stack(net_attn_maps,dim=0),dim=0) + + return net_attn_maps + +def attnmaps2images(net_attn_maps): + + #total_attn_scores = 0 + images = [] + + for attn_map in net_attn_maps: + attn_map = attn_map.cpu().numpy() + #total_attn_scores += attn_map.mean().item() + + normalized_attn_map = (attn_map - np.min(attn_map)) / (np.max(attn_map) - np.min(attn_map)) * 255 + normalized_attn_map = normalized_attn_map.astype(np.uint8) + #print("norm: ", normalized_attn_map.shape) + image = Image.fromarray(normalized_attn_map) + + #image = fix_save_attn_map(attn_map) + images.append(image) + + #print(total_attn_scores) + return images +def is_torch2_available(): + return hasattr(F, "scaled_dot_product_attention") + +def get_generator(seed, device): + + if seed is not None: + if isinstance(seed, list): + generator = [torch.Generator(device).manual_seed(seed_item) for seed_item in seed] + else: + generator = torch.Generator(device).manual_seed(seed) + else: + generator = None + + return generator \ No newline at end of file diff --git a/preprocess/.gitignore b/preprocess/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..67fde0e197356dc5dc75c4eff48000996b3a7ed4 --- /dev/null +++ b/preprocess/.gitignore @@ -0,0 +1,5 @@ +**/__pycache__ + +data/ +log/ +pretrain_model/ diff --git a/preprocess/.ipynb_checkpoints/simple_extractor-checkpoint.py b/preprocess/.ipynb_checkpoints/simple_extractor-checkpoint.py new file mode 100644 index 0000000000000000000000000000000000000000..9c7325e95da1b89cf22576e41dfe4e8892d2104e --- /dev/null +++ b/preprocess/.ipynb_checkpoints/simple_extractor-checkpoint.py @@ -0,0 +1,350 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +""" +@Author : Peike Li +@Contact : peike.li@yahoo.com +@File : simple_extractor.py +@Time : 8/30/19 8:59 PM +@Desc : Simple Extractor +@License : This source code is licensed under the license found in the + LICENSE file in the root directory of this source tree. +""" + +import os +import torch +import argparse +import numpy as np +from PIL import Image +from tqdm import tqdm + +from torch.utils.data import DataLoader +import torchvision.transforms as transforms + +import os +import sys + +_THIS_DIR = os.path.dirname(os.path.abspath(__file__)) # .../DEMO/preprocess +if _THIS_DIR not in sys.path: + sys.path.insert(0, _THIS_DIR) + + +import networks +from utils.transforms import transform_logits +from datasets.simple_extractor_dataset import SimpleFolderDataset + + + +dataset_settings = { + 'lip': { + 'input_size': [473, 473], + 'num_classes': 20, + 'label': ['Background', 'Hat', 'Hair', 'Glove', 'Sunglasses', 'Upper-clothes', 'Dress', 'Coat', + 'Socks', 'Pants', 'Jumpsuits', 'Scarf', 'Skirt', 'Face', 'Left-arm', 'Right-arm', + 'Left-leg', 'Right-leg', 'Left-shoe', 'Right-shoe'] + }, + 'atr': { + 'input_size': [512, 512], + 'num_classes': 18, + 'label': ['Background', 'Hat', 'Hair', 'Sunglasses', 'Upper-clothes', 'Skirt', 'Pants', 'Dress', 'Belt', + 'Left-shoe', 'Right-shoe', 'Face', 'Left-leg', 'Right-leg', 'Left-arm', 'Right-arm', 'Bag', 'Scarf'] + }, + 'pascal': { + 'input_size': [512, 512], + 'num_classes': 7, + 'label': ['Background', 'Head', 'Torso', 'Upper Arms', 'Lower Arms', 'Upper Legs', 'Lower Legs'], + } +} + + +def get_arguments(): + """Parse all the arguments provided from the CLI. + Returns: + A list of parsed arguments. + """ + parser = argparse.ArgumentParser(description="Self Correction for Human Parsing") + + parser.add_argument("--dataset", type=str, default='atr', choices=['lip', 'atr', 'pascal']) + parser.add_argument("--model-restore", type=str, default='', help="restore pretrained model parameters.") + parser.add_argument("--gpu", type=str, default='0', help="choose gpu device.") + parser.add_argument("--category", type=str, default='Upper-clothes', help="category name (optional).") + parser.add_argument("--input-dir", type=str, default='', help="path of input image folder.") + parser.add_argument("--output-dir", type=str, default='', help="path of output image folder.") + parser.add_argument("--logits", action='store_true', default=False, help="whether to save the logits.") + + return parser.parse_args() + + +def get_palette(num_cls): + n = 18 + palette = [0] * (n * 3) + j = num_cls + lab = num_cls + palette[j * 3 + 0] = 0 + palette[j * 3 + 1] = 0 + palette[j * 3 + 2] = 0 + i = 0 + while lab: + palette[j * 3 + 0] = 255 + palette[j * 3 + 1] = 255 + palette[j * 3 + 2] = 255 + i += 1 + lab >>= 3 + return palette + + +# def run( +# *, +# category: str, +# input_dir: str, +# output_dir: str, +# dataset: str = "atr", +# model_restore: str = "", +# gpu: str = "0", +# logits: bool = False, +# ): +# """ +# โœ… ์™ธ๋ถ€(๋‹ค๋ฅธ ํŒŒ์ด์ฌ ์ฝ”๋“œ)์—์„œ import ํ•ด์„œ ํ˜ธ์ถœํ•˜๊ธฐ ์œ„ํ•œ ์—”ํŠธ๋ฆฌ ํ•จ์ˆ˜. +# - ๊ธฐ์กด main()์˜ ๋‚ด์šฉ์„ ๊ฑฐ์˜ ๊ทธ๋Œ€๋กœ ์˜ฎ๊น€ +# - CLI ์ธ์ž ๋Œ€์‹  ํŒŒ๋ผ๋ฏธํ„ฐ๋กœ ๋ฐ›์Œ +# """ +# # (์› ์ฝ”๋“œ ์œ ์ง€) single GPU๋งŒ ํ—ˆ์šฉ +# gpus = [int(i) for i in gpu.split(',')] +# assert len(gpus) == 1 +# if gpu != 'None': +# os.environ["CUDA_VISIBLE_DEVICES"] = gpu + +# num_classes = dataset_settings[dataset]['num_classes'] +# input_size = dataset_settings[dataset]['input_size'] +# label = dataset_settings[dataset]['label'] +# print("Evaluating total class number {} with {}".format(num_classes, label)) + +# model = networks.init_model('resnet101', num_classes=num_classes, pretrained=None) + +# if not model_restore: +# print("[simple_extractor] model_restore not provided โ†’ skip extractor.") +# return False + + +# state_dict = torch.load(model_restore)['state_dict'] + +# # print("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ args.model_restore: ", state_dict) +# from collections import OrderedDict +# new_state_dict = OrderedDict() +# for k, v in state_dict.items(): +# name = k[7:] # remove `module.` +# new_state_dict[name] = v +# model.load_state_dict(new_state_dict) +# model.cuda() +# model.eval() + +# transform = transforms.Compose([ +# transforms.ToTensor(), +# transforms.Normalize(mean=[0.406, 0.456, 0.485], std=[0.225, 0.224, 0.229]) +# ]) + +# # ----------------------------- +# # ์ž…๋ ฅ ํด๋” ์ด๋ฏธ์ง€ ๋กœ๋“œ +# # ----------------------------- +# if not input_dir: +# raise ValueError("--input-dir (input_dir) is required.") +# if not output_dir: +# raise ValueError("--output-dir (output_dir) is required.") + +# all_files = sorted([f for f in os.listdir(input_dir) +# if f.lower().endswith(('.png', '.jpg', '.jpeg'))]) +# selected_files = all_files[:] +# print(f"Total images found: {len(all_files)} โ†’ Using first {len(selected_files)} images") + +# dataset_obj = SimpleFolderDataset( +# root=input_dir, +# input_size=input_size, +# transform=transform, +# file_list=selected_files +# ) +# dataloader = DataLoader(dataset_obj) + +# os.makedirs(output_dir, exist_ok=True) + +# # NOTE: ๊ธฐ์กด ์ฝ”๋“œ๊ฐ€ palette = get_palette(4)๋กœ ๊ณ ์ •์ธ๋ฐ, +# # ์ง€๊ธˆ๋„ ๊ทธ๋Œ€๋กœ ์œ ์ง€ (ํ•„์š”ํ•˜๋ฉด category ๊ธฐ๋ฐ˜์œผ๋กœ ๋ฐ”๊พธ๋Š” ๊ฒƒ๋„ ๊ฐ€๋Šฅ) +# palette = get_palette(4) + +# with torch.no_grad(): +# for idx, batch in enumerate(tqdm(dataloader)): +# print("--: ", idx) +# image, meta = batch +# img_name = meta['name'][0] +# c = meta['center'].numpy()[0] +# s = meta['scale'].numpy()[0] +# w = meta['width'].numpy()[0] +# h = meta['height'].numpy()[0] + +# output = model(image.cuda()) +# upsample = torch.nn.Upsample(size=input_size, mode='bilinear', align_corners=True) +# upsample_output = upsample(output[0][-1][0].unsqueeze(0)) +# upsample_output = upsample_output.squeeze() +# upsample_output = upsample_output.permute(1, 2, 0) # CHW -> HWC + +# logits_result = transform_logits( +# upsample_output.data.cpu().numpy(), +# c, s, w, h, +# input_size=input_size +# ) +# parsing_result = np.argmax(logits_result, axis=2) + +# parsing_result_path = os.path.join(output_dir, img_name[:-4] + '.png') +# output_img = Image.fromarray(np.asarray(parsing_result, dtype=np.uint8)) +# output_img.putpalette(palette) +# output_img.save(parsing_result_path) + +# if logits: +# logits_result_path = os.path.join(output_dir, img_name[:-4] + '.npy') +# np.save(logits_result_path, logits_result) + +# return + + +def run( + *, + category: str, + input_path: str = "", + input_dir: str = "", + dataset: str = "atr", + model_restore: str = "", + gpu: str = "0", + logits: bool = False, +): + """ + - input_path (๋‹จ์ผ ํŒŒ์ผ) ๋˜๋Š” input_dir(ํด๋”) ์ค‘ ํ•˜๋‚˜๋ฅผ ๋ฐ›์•„ parsing ๊ฒฐ๊ณผ๋ฅผ ๋ฉ”๋ชจ๋ฆฌ๋กœ ๋ฐ˜ํ™˜. + - ํŒŒ์ผ ์ €์žฅ ์—†์Œ. + + Returns: + { + "images": List[PIL.Image], # parsing mask (palette ์ ์šฉ๋จ) + "logits": Optional[List[np.ndarray]], + "names": List[str], # ํŒŒ์ผ๋ช…๋“ค + } + """ + # single GPU๋งŒ ํ—ˆ์šฉ + gpus = [int(i) for i in gpu.split(',')] + assert len(gpus) == 1 + if gpu != 'None': + os.environ["CUDA_VISIBLE_DEVICES"] = gpu + + if not model_restore: + print("[simple_extractor] model_restore not provided โ†’ skip extractor.") + return {"images": [], "logits": [] if logits else None, "names": []} + + # ์ž…๋ ฅ ๊ฒ€์ฆ: ๋‘˜ ์ค‘ ํ•˜๋‚˜๋Š” ์žˆ์–ด์•ผ ํ•จ + if bool(input_path) == bool(input_dir): + raise ValueError("Provide exactly one of input_path or input_dir.") + + # ํŒŒ์ผ์ด๋ฉด ์กด์žฌ ํ™•์ธ + if input_path: + if not os.path.isfile(input_path): + raise FileNotFoundError(f"input_path not found or not a file: {input_path}") + + # ํด๋”๋ฉด ์กด์žฌ ํ™•์ธ + if input_dir: + if not os.path.isdir(input_dir): + raise NotADirectoryError(f"input_dir not found or not a directory: {input_dir}") + + num_classes = dataset_settings[dataset]['num_classes'] + input_size = dataset_settings[dataset]['input_size'] + label = dataset_settings[dataset]['label'] + print(f"Evaluating total class number {num_classes} with {label}") + + model = networks.init_model('resnet101', num_classes=num_classes, pretrained=None) + + state_dict = torch.load(model_restore)['state_dict'] + from collections import OrderedDict + new_state_dict = OrderedDict() + for k, v in state_dict.items(): + name = k[7:] # remove `module.` + new_state_dict[name] = v + + model.load_state_dict(new_state_dict) + model.cuda() + model.eval() + + transform = transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize(mean=[0.406, 0.456, 0.485], std=[0.225, 0.224, 0.229]) + ]) + + # ---- ํŒŒ์ผ ๋ฆฌ์ŠคํŠธ ๋งŒ๋“ค๊ธฐ (๋‹จ์ผ ํŒŒ์ผ/ํด๋” ๋ชจ๋‘ ๋Œ€์‘) ---- + if input_path: + # root๋Š” ํŒŒ์ผ์˜ ๋ถ€๋ชจ ๋””๋ ‰ํ„ฐ๋ฆฌ, file_list๋Š” ํŒŒ์ผ๋ช… 1๊ฐœ + root = os.path.dirname(input_path) + file_list = [os.path.basename(input_path)] + else: + root = input_dir + file_list = sorted([ + f for f in os.listdir(root) + if f.lower().endswith(('.png', '.jpg', '.jpeg')) + ]) + + dataset_obj = SimpleFolderDataset( + root=root, + input_size=input_size, + transform=transform, + file_list=file_list + ) + dataloader = DataLoader(dataset_obj) + + palette = get_palette(4) + + results_img = [] + results_logits = [] if logits else None + names = [] + + with torch.no_grad(): + for batch in tqdm(dataloader): + image, meta = batch + img_name = meta['name'][0] + names.append(img_name) + + c = meta['center'].numpy()[0] + s = meta['scale'].numpy()[0] + w = meta['width'].numpy()[0] + h = meta['height'].numpy()[0] + + output = model(image.cuda()) + upsample = torch.nn.Upsample(size=input_size, mode='bilinear', align_corners=True) + upsample_output = upsample(output[0][-1][0].unsqueeze(0)) + upsample_output = upsample_output.squeeze() + upsample_output = upsample_output.permute(1, 2, 0) + + logits_result = transform_logits( + upsample_output.data.cpu().numpy(), + c, s, w, h, + input_size=input_size + ) + parsing_result = np.argmax(logits_result, axis=2) + + out_img = Image.fromarray(np.asarray(parsing_result, dtype=np.uint8)) + out_img.putpalette(palette) + results_img.append(out_img) + + if logits: + results_logits.append(logits_result) + + return {"images": results_img, "logits": results_logits, "names": names} + + + + +def main(): + # โœ… CLI ํ˜ธํ™˜ ์œ ์ง€ + args = get_arguments() + run( + category=args.category, + input_dir=args.input_dir, + output_dir=args.output_dir, + ) + + +if __name__ == '__main__': + main() + diff --git a/preprocess/.ipynb_checkpoints/simple_extractor2-checkpoint.py b/preprocess/.ipynb_checkpoints/simple_extractor2-checkpoint.py new file mode 100644 index 0000000000000000000000000000000000000000..47d6f77681e2770519b98aef984bf5d0348a5be8 --- /dev/null +++ b/preprocess/.ipynb_checkpoints/simple_extractor2-checkpoint.py @@ -0,0 +1,159 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- +# + +# #!/usr/bin/env python +# -*- encoding: utf-8 -*- +""" +@Author : Peike Li +@Contact : peike.li@yahoo.com +@File : simple_extractor.py +@Time : 8/30/19 8:59 PM +@Desc : Simple Extractor (modified for single image input) +""" + +import os +import torch +import argparse +import numpy as np +from PIL import Image +from tqdm import tqdm +import cv2 + +from torch.utils.data import Dataset, DataLoader +import torchvision.transforms as transforms + +import networks +from preprocess.utils.transforms import transform_logits, get_affine_transform + + +class SimpleFileDataset(Dataset): + def __init__(self, image_path, input_size=[512, 512], transform=None): + self.image_path = image_path + self.input_size = np.asarray(input_size) + self.transform = transform + self.aspect_ratio = input_size[1] * 1.0 / input_size[0] + self.img_name = os.path.basename(image_path) + + def __len__(self): + return 1 + + def _box2cs(self, box): + x, y, w, h = box[:4] + return self._xywh2cs(x, y, w, h) + + def _xywh2cs(self, x, y, w, h): + center = np.zeros((2), dtype=np.float32) + center[0] = x + w * 0.5 + center[1] = y + h * 0.5 + if w > self.aspect_ratio * h: + h = w * 1.0 / self.aspect_ratio + elif w < self.aspect_ratio * h: + w = h * self.aspect_ratio + scale = np.array([w, h], dtype=np.float32) + return center, scale + + def __getitem__(self, index): + img = cv2.imread(self.image_path, cv2.IMREAD_COLOR) + h, w, _ = img.shape + person_center, s = self._box2cs([0, 0, w - 1, h - 1]) + r = 0 + trans = get_affine_transform(person_center, s, r, self.input_size) + input = cv2.warpAffine( + img, + trans, + (int(self.input_size[1]), int(self.input_size[0])), + flags=cv2.INTER_LINEAR, + borderMode=cv2.BORDER_CONSTANT, + borderValue=(0, 0, 0)) + input = self.transform(input) + meta = { + 'name': self.img_name, + 'center': person_center, + 'height': h, + 'width': w, + 'scale': s, + 'rotation': r + } + return input, meta + + +dataset_settings = { + 'atr': { + 'input_size': [512, 512], + 'num_classes': 18, + 'label': ['Background', 'Hat', 'Hair', 'Sunglasses', 'Upper-clothes', 'Skirt', 'Pants', 'Dress', 'Belt', + 'Left-shoe', 'Right-shoe', 'Face', 'Left-leg', 'Right-leg', 'Left-arm', 'Right-arm', 'Bag', 'Scarf'] + } +} + +def get_palette(num_cls): + n = 18 + palette = [0] * (n * 3) + j = num_cls + lab = num_cls + palette[j * 3 + 0] = 0 + palette[j * 3 + 1] = 0 + palette[j * 3 + 2] = 0 + i = 0 + while lab: + palette[j * 3 + 0] = 255 + palette[j * 3 + 1] = 255 + palette[j * 3 + 2] = 255 + i += 1 + lab >>= 3 + return palette + + +def masking(image_path, class_num=0): + num_classes = dataset_settings['atr']['num_classes'] + input_size = dataset_settings['atr']['input_size'] + label = dataset_settings['atr']['label'] + print("Evaluating total class number {} with {}".format(num_classes, label)) + + model = networks.init_model('resnet101', num_classes=num_classes, pretrained=None) + state_dict = torch.load('./ckpts/exp-schp-201908301523-atr.pth')['state_dict'] + + from collections import OrderedDict + new_state_dict = OrderedDict() + for k, v in state_dict.items(): + name = k[7:] # remove `module.` + new_state_dict[name] = v + model.load_state_dict(new_state_dict) + model.cuda() + model.eval() + + transform = transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize(mean=[0.406, 0.456, 0.485], std=[0.225, 0.224, 0.229]) + ]) + dataset = SimpleFileDataset(image_path=image_path, input_size=input_size, transform=transform) + dataloader = DataLoader(dataset) + + if not os.path.exists('./outputs'): + os.makedirs('./outputs') + + palette = get_palette(class_num) + with torch.no_grad(): + for idx, batch in enumerate(tqdm(dataloader)): + image, meta = batch + img_name = meta['name'][0] + c = meta['center'].numpy()[0] + s = meta['scale'].numpy()[0] + w = meta['width'].numpy()[0] + h = meta['height'].numpy()[0] + + output = model(image.cuda()) + upsample = torch.nn.Upsample(size=input_size, mode='bilinear', align_corners=True) + upsample_output = upsample(output[0][-1][0].unsqueeze(0)) + upsample_output = upsample_output.squeeze() + upsample_output = upsample_output.permute(1, 2, 0) + + logits_result = transform_logits(upsample_output.data.cpu().numpy(), c, s, w, h, input_size=input_size) + parsing_result = np.argmax(logits_result, axis=2) + parsing_result_path = os.path.join('./outputs', img_name[:-4] + '.png') + output_img = Image.fromarray(np.asarray(parsing_result, dtype=np.uint8)) + output_img.putpalette(palette) + output_img.save(parsing_result_path) + gray_img = output_img.convert('L') + + return gray_img diff --git a/preprocess/.ipynb_checkpoints/simple_extractor3-checkpoint.py b/preprocess/.ipynb_checkpoints/simple_extractor3-checkpoint.py new file mode 100644 index 0000000000000000000000000000000000000000..dcfc59cac29195544d69dd7a19c2b7e55c252b6c --- /dev/null +++ b/preprocess/.ipynb_checkpoints/simple_extractor3-checkpoint.py @@ -0,0 +1,185 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +""" +@Author : Peike Li +@Contact : peike.li@yahoo.com +@File : simple_extractor.py +@Time : 8/30/19 8:59 PM +@Desc : Simple Extractor +@License : This source code is licensed under the license found in the + LICENSE file in the root directory of this source tree. +""" + +import os +import torch +import argparse +import numpy as np +from PIL import Image +from tqdm import tqdm + +from torch.utils.data import DataLoader +import torchvision.transforms as transforms + +import networks +from utils.transforms import transform_logits +from datasets.simple_extractor_dataset import SimpleFolderDataset + +dataset_settings = { + 'lip': { + 'input_size': [473, 473], + 'num_classes': 20, + 'label': ['Background', 'Hat', 'Hair', 'Glove', 'Sunglasses', 'Upper-clothes', 'Dress', 'Coat', + 'Socks', 'Pants', 'Jumpsuits', 'Scarf', 'Skirt', 'Face', 'Left-arm', 'Right-arm', + 'Left-leg', 'Right-leg', 'Left-shoe', 'Right-shoe'] + }, + 'atr': { + 'input_size': [512, 512], + 'num_classes': 18, + 'label': ['Background', 'Hat', 'Hair', 'Sunglasses', 'Upper-clothes', 'Skirt', 'Pants', 'Dress', 'Belt', + 'Left-shoe', 'Right-shoe', 'Face', 'Left-leg', 'Right-leg', 'Left-arm', 'Right-arm', 'Bag', 'Scarf'] + }, + 'pascal': { + 'input_size': [512, 512], + 'num_classes': 7, + 'label': ['Background', 'Head', 'Torso', 'Upper Arms', 'Lower Arms', 'Upper Legs', 'Lower Legs'], + } +} + + +def get_arguments(): + """Parse all the arguments provided from the CLI. + Returns: + A list of parsed arguments. + """ + parser = argparse.ArgumentParser(description="Self Correction for Human Parsing") + + parser.add_argument("--dataset", type=str, default='atr', choices=['lip', 'atr', 'pascal']) + parser.add_argument("--model-restore", type=str, default='', help="restore pretrained model parameters.") + parser.add_argument("--gpu", type=str, default='0', help="choose gpu device.") + parser.add_argument("--category", type=str, default='Upper-clothes', help="path of input image folder.") + parser.add_argument("--input-dir", type=str, default='', help="path of input image folder.") + parser.add_argument("--output-dir", type=str, default='', help="path of output image folder.") + parser.add_argument("--logits", action='store_true', default=False, help="whether to save the logits.") + + return parser.parse_args() + + +# def get_palette(num_cls): +# """ Returns the color map for visualizing the segmentation mask. +# Args: +# num_cls: Number of classes +# Returns: +# The color map +# """ +# n = 18 +# palette = [0] * (n * 3) +# for j in range(5, 7): +# lab = j +# palette[j * 3 + 0] = 0 +# palette[j * 3 + 1] = 0 +# palette[j * 3 + 2] = 0 +# i = 0 +# while lab: +# palette[j * 3 + 0] = 255 +# palette[j * 3 + 1] = 255 +# palette[j * 3 + 2] = 255 +# i += 1 +# lab >>= 3 +# return palette + +def get_palette(num_cls): + n = 18 + palette = [0] * (n * 3) + j = num_cls + lab = num_cls + palette[j * 3 + 0] = 0 + palette[j * 3 + 1] = 0 + palette[j * 3 + 2] = 0 + i = 0 + while lab: + palette[j * 3 + 0] = 255 + palette[j * 3 + 1] = 255 + palette[j * 3 + 2] = 255 + i += 1 + lab >>= 3 + return palette + + +def main(): + args = get_arguments() + gpus = [int(i) for i in args.gpu.split(',')] + assert len(gpus) == 1 + if not args.gpu == 'None': + os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu + + num_classes = dataset_settings[args.dataset]['num_classes'] + input_size = dataset_settings[args.dataset]['input_size'] + label = dataset_settings[args.dataset]['label'] + print("Evaluating total class number {} with {}".format(num_classes, label)) + + model = networks.init_model('resnet101', num_classes=num_classes, pretrained=None) + + state_dict = torch.load(args.model_restore)['state_dict'] + print("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ args.model_restore: ", args.model_restore) + from collections import OrderedDict + new_state_dict = OrderedDict() + for k, v in state_dict.items(): + name = k[7:] # remove `module.` + new_state_dict[name] = v + model.load_state_dict(new_state_dict) + model.cuda() + model.eval() + + transform = transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize(mean=[0.406, 0.456, 0.485], std=[0.225, 0.224, 0.229]) + ]) + + # ----------------------------- + # ๐Ÿ“Œ ์ž…๋ ฅ ํด๋”์—์„œ ํŒŒ์ผ๋ช… ์ˆœ์œผ๋กœ ์•ž 200๊ฐœ๋งŒ ์‚ฌ์šฉ + # ----------------------------- + all_files = sorted([f for f in os.listdir(args.input_dir) + if f.lower().endswith(('.png', '.jpg', '.jpeg'))]) + selected_files = all_files[:] + print(f"Total images found: {len(all_files)} โ†’ Using first {len(selected_files)} images") + + dataset = SimpleFolderDataset(root=args.input_dir, + input_size=input_size, + transform=transform, + file_list=selected_files) # file_list ์ธ์ž๋กœ ์ „๋‹ฌ + dataloader = DataLoader(dataset) + + if not os.path.exists(args.output_dir): + os.makedirs(args.output_dir) + + palette = get_palette(4) + with torch.no_grad(): + for idx, batch in enumerate(tqdm(dataloader)): + print("--: ", idx) + image, meta = batch + img_name = meta['name'][0] + c = meta['center'].numpy()[0] + s = meta['scale'].numpy()[0] + w = meta['width'].numpy()[0] + h = meta['height'].numpy()[0] + + output = model(image.cuda()) + upsample = torch.nn.Upsample(size=input_size, mode='bilinear', align_corners=True) + upsample_output = upsample(output[0][-1][0].unsqueeze(0)) + upsample_output = upsample_output.squeeze() + upsample_output = upsample_output.permute(1, 2, 0) # CHW -> HWC + + logits_result = transform_logits(upsample_output.data.cpu().numpy(), c, s, w, h, input_size=input_size) + parsing_result = np.argmax(logits_result, axis=2) + parsing_result_path = os.path.join(args.output_dir, img_name[:-4] + '.png') + output_img = Image.fromarray(np.asarray(parsing_result, dtype=np.uint8)) + output_img.putpalette(palette) + output_img.save(parsing_result_path) + if args.logits: + logits_result_path = os.path.join(args.output_dir, img_name[:-4] + '.npy') + np.save(logits_result_path, logits_result) + return + +if __name__ == '__main__': + main() diff --git a/preprocess/LICENSE b/preprocess/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..62798356f28616a2ba91c923c83c49c4672316a1 --- /dev/null +++ b/preprocess/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2020 Peike Li + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/preprocess/README.md b/preprocess/README.md new file mode 100644 index 0000000000000000000000000000000000000000..5a42a79d839f4e1de50201e38650bbfd40e2958b --- /dev/null +++ b/preprocess/README.md @@ -0,0 +1,129 @@ +# Self Correction for Human Parsing + +![Python 3.6](https://img.shields.io/badge/python-3.6-green.svg) +[![License: MIT](https://img.shields.io/badge/License-MIT-green.svg)](https://opensource.org/licenses/MIT) + +An out-of-box human parsing representation extractor. + +Our solution ranks 1st for all human parsing tracks (including single, multiple and video) in the third LIP challenge! + +![lip-visualization](./demo/lip-visualization.jpg) + +Features: +- [x] Out-of-box human parsing extractor for other downstream applications. +- [x] Pretrained model on three popular single person human parsing datasets. +- [x] Training and inferecne code. +- [x] Simple yet effective extension on multi-person and video human parsing tasks. + +## Requirements + +``` +conda env create -f environment.yaml +conda activate schp +pip install -r requirements.txt +``` + +## Simple Out-of-Box Extractor + +The easiest way to get started is to use our trained SCHP models on your own images to extract human parsing representations. Here we provided state-of-the-art [trained models](https://drive.google.com/drive/folders/1uOaQCpNtosIjEL2phQKEdiYd0Td18jNo?usp=sharing) on three popular datasets. Theses three datasets have different label system, you can choose the best one to fit on your own task. + +**LIP** ([exp-schp-201908261155-lip.pth](https://drive.google.com/file/d/1k4dllHpu0bdx38J7H28rVVLpU-kOHmnH/view?usp=sharing)) + +* mIoU on LIP validation: **59.36 %**. + +* LIP is the largest single person human parsing dataset with 50000+ images. This dataset focus more on the complicated real scenarios. LIP has 20 labels, including 'Background', 'Hat', 'Hair', 'Glove', 'Sunglasses', 'Upper-clothes', 'Dress', 'Coat', 'Socks', 'Pants', 'Jumpsuits', 'Scarf', 'Skirt', 'Face', 'Left-arm', 'Right-arm', 'Left-leg', 'Right-leg', 'Left-shoe', 'Right-shoe'. + +**ATR** ([exp-schp-201908301523-atr.pth](https://drive.google.com/file/d/1ruJg4lqR_jgQPj-9K0PP-L2vJERYOxLP/view?usp=sharing)) + +* mIoU on ATR test: **82.29%**. + +* ATR is a large single person human parsing dataset with 17000+ images. This dataset focus more on fashion AI. ATR has 18 labels, including 'Background', 'Hat', 'Hair', 'Sunglasses', 'Upper-clothes', 'Skirt', 'Pants', 'Dress', 'Belt', 'Left-shoe', 'Right-shoe', 'Face', 'Left-leg', 'Right-leg', 'Left-arm', 'Right-arm', 'Bag', 'Scarf'. + +**Pascal-Person-Part** ([exp-schp-201908270938-pascal-person-part.pth](https://drive.google.com/file/d/1E5YwNKW2VOEayK9mWCS3Kpsxf-3z04ZE/view?usp=sharing)) + +* mIoU on Pascal-Person-Part validation: **71.46** %. + +* Pascal Person Part is a tiny single person human parsing dataset with 3000+ images. This dataset focus more on body parts segmentation. Pascal Person Part has 7 labels, including 'Background', 'Head', 'Torso', 'Upper Arms', 'Lower Arms', 'Upper Legs', 'Lower Legs'. + +Choose one and have fun on your own task! + +To extract the human parsing representation, simply put your own image in the `INPUT_PATH` folder, then download a pretrained model and run the following command. The output images with the same file name will be saved in `OUTPUT_PATH` + +``` +python simple_extractor.py --dataset [DATASET] --model-restore [CHECKPOINT_PATH] --input-dir [INPUT_PATH] --output-dir [OUTPUT_PATH] +``` + +**[Updated]** Here is also a [colab demo example](https://colab.research.google.com/drive/1JOwOPaChoc9GzyBi5FUEYTSaP2qxJl10?usp=sharing) for quick inference provided by [@levindabhi](https://github.com/levindabhi). + +The `DATASET` command has three options, including 'lip', 'atr' and 'pascal'. Note each pixel in the output images denotes the predicted label number. The output images have the same size as the input ones. To better visualization, we put a palette with the output images. We suggest you to read the image with `PIL`. + +If you need not only the final parsing images, but also the feature map representations. Add `--logits` command to save the output feature maps. These feature maps are the logits before softmax layer. + +## Dataset Preparation + +Please download the [LIP](http://sysu-hcp.net/lip/) dataset following the below structure. + +```commandline +data/LIP +|--- train_imgaes # 30462 training single person images +|--- val_images # 10000 validation single person images +|--- train_segmentations # 30462 training annotations +|--- val_segmentations # 10000 training annotations +|--- train_id.txt # training image list +|--- val_id.txt # validation image list +``` + +## Training + +``` +python train.py +``` +By default, the trained model will be saved in `./log` directory. Please read the arguments for more details. + +## Evaluation +``` +python evaluate.py --model-restore [CHECKPOINT_PATH] +``` +CHECKPOINT_PATH should be the path of trained model. + +## Extension on Multiple Human Parsing + +Please read [MultipleHumanParsing.md](./mhp_extension/README.md) for more details. + +## Citation + +Please cite our work if you find this repo useful in your research. + +```latex +@article{li2020self, + title={Self-Correction for Human Parsing}, + author={Li, Peike and Xu, Yunqiu and Wei, Yunchao and Yang, Yi}, + journal={IEEE Transactions on Pattern Analysis and Machine Intelligence}, + year={2020}, + doi={10.1109/TPAMI.2020.3048039}} +``` + +## Visualization + +* Source Image. +![demo](./demo/demo.jpg) +* LIP Parsing Result. +![demo-lip](./demo/demo_lip.png) +* ATR Parsing Result. +![demo-atr](./demo/demo_atr.png) +* Pascal-Person-Part Parsing Result. +![demo-pascal](./demo/demo_pascal.png) +* Source Image. +![demo](./mhp_extension/demo/demo.jpg) +* Instance Human Mask. +![demo-lip](./mhp_extension/demo/demo_instance_human_mask.png) +* Global Human Parsing Result. +![demo-lip](./mhp_extension/demo/demo_global_human_parsing.png) +* Multiple Human Parsing Result. +![demo-lip](./mhp_extension/demo/demo_multiple_human_parsing.png) + + +## Related +Our code adopts the [InplaceSyncBN](https://github.com/mapillary/inplace_abn) to save gpu memory cost. + +There is also a [PaddlePaddle](https://github.com/PaddlePaddle/PaddleSeg/tree/develop/contrib/ACE2P) Implementation of this project. diff --git a/preprocess/datasets/.ipynb_checkpoints/simple_extractor_dataset-checkpoint.py b/preprocess/datasets/.ipynb_checkpoints/simple_extractor_dataset-checkpoint.py new file mode 100644 index 0000000000000000000000000000000000000000..3e2dbeb09e6f76d625b74d48055819c38ff6265b --- /dev/null +++ b/preprocess/datasets/.ipynb_checkpoints/simple_extractor_dataset-checkpoint.py @@ -0,0 +1,84 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +""" +@Author : Peike Li +@Contact : peike.li@yahoo.com +@File : dataset.py +@Time : 8/30/19 9:12 PM +@Desc : Dataset Definition +@License : This source code is licensed under the license found in the + LICENSE file in the root directory of this source tree. +""" + +import os +import cv2 +import numpy as np + +from torch.utils import data +# from preprocess.utils.transforms import get_affine_transform +from utils.transforms import get_affine_transform + + +class SimpleFolderDataset(data.Dataset): + def __init__(self, root, input_size=[512, 512], transform=None, file_list=None): + self.root = root + self.input_size = input_size + self.transform = transform + self.aspect_ratio = input_size[1] * 1.0 / input_size[0] + self.input_size = np.asarray(input_size) + + + + self.file_list = os.listdir(self.root) + + if file_list is not None: + self.file_list = file_list + + def __len__(self): + return len(self.file_list) + + def _box2cs(self, box): + x, y, w, h = box[:4] + return self._xywh2cs(x, y, w, h) + + def _xywh2cs(self, x, y, w, h): + center = np.zeros((2), dtype=np.float32) + center[0] = x + w * 0.5 + center[1] = y + h * 0.5 + if w > self.aspect_ratio * h: + h = w * 1.0 / self.aspect_ratio + elif w < self.aspect_ratio * h: + w = h * self.aspect_ratio + scale = np.array([w, h], dtype=np.float32) + return center, scale + + def __getitem__(self, index): + img_name = self.file_list[index] + img_path = os.path.join(self.root, img_name) + img = cv2.imread(img_path, cv2.IMREAD_COLOR) + h, w, _ = img.shape + + # Get person center and scale + person_center, s = self._box2cs([0, 0, w - 1, h - 1]) + r = 0 + trans = get_affine_transform(person_center, s, r, self.input_size) + input = cv2.warpAffine( + img, + trans, + (int(self.input_size[1]), int(self.input_size[0])), + flags=cv2.INTER_LINEAR, + borderMode=cv2.BORDER_CONSTANT, + borderValue=(0, 0, 0)) + + input = self.transform(input) + meta = { + 'name': img_name, + 'center': person_center, + 'height': h, + 'width': w, + 'scale': s, + 'rotation': r + } + + return input, meta diff --git a/preprocess/datasets/__init__.py b/preprocess/datasets/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/preprocess/datasets/datasets.py b/preprocess/datasets/datasets.py new file mode 100644 index 0000000000000000000000000000000000000000..433f15af93029538b3b039f8f207764fcfe426d9 --- /dev/null +++ b/preprocess/datasets/datasets.py @@ -0,0 +1,201 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +""" +@Author : Peike Li +@Contact : peike.li@yahoo.com +@File : datasets.py +@Time : 8/4/19 3:35 PM +@Desc : +@License : This source code is licensed under the license found in the + LICENSE file in the root directory of this source tree. +""" + +import os +import numpy as np +import random +import torch +import cv2 +from torch.utils import data +from utils.transforms import get_affine_transform + + +class LIPDataSet(data.Dataset): + def __init__(self, root, dataset, crop_size=[473, 473], scale_factor=0.25, + rotation_factor=30, ignore_label=255, transform=None): + self.root = root + self.aspect_ratio = crop_size[1] * 1.0 / crop_size[0] + self.crop_size = np.asarray(crop_size) + self.ignore_label = ignore_label + self.scale_factor = scale_factor + self.rotation_factor = rotation_factor + self.flip_prob = 0.5 + self.transform = transform + self.dataset = dataset + + list_path = os.path.join(self.root, self.dataset + '_id.txt') + train_list = [i_id.strip() for i_id in open(list_path)] + + self.train_list = train_list + self.number_samples = len(self.train_list) + + def __len__(self): + return self.number_samples + + def _box2cs(self, box): + x, y, w, h = box[:4] + return self._xywh2cs(x, y, w, h) + + def _xywh2cs(self, x, y, w, h): + center = np.zeros((2), dtype=np.float32) + center[0] = x + w * 0.5 + center[1] = y + h * 0.5 + if w > self.aspect_ratio * h: + h = w * 1.0 / self.aspect_ratio + elif w < self.aspect_ratio * h: + w = h * self.aspect_ratio + scale = np.array([w * 1.0, h * 1.0], dtype=np.float32) + return center, scale + + def __getitem__(self, index): + train_item = self.train_list[index] + + im_path = os.path.join(self.root, self.dataset + '_images', train_item + '.jpg') + parsing_anno_path = os.path.join(self.root, self.dataset + '_segmentations', train_item + '.png') + + im = cv2.imread(im_path, cv2.IMREAD_COLOR) + h, w, _ = im.shape + parsing_anno = np.zeros((h, w), dtype=np.long) + + # Get person center and scale + person_center, s = self._box2cs([0, 0, w - 1, h - 1]) + r = 0 + + if self.dataset != 'test': + # Get pose annotation + parsing_anno = cv2.imread(parsing_anno_path, cv2.IMREAD_GRAYSCALE) + if self.dataset == 'train' or self.dataset == 'trainval': + sf = self.scale_factor + rf = self.rotation_factor + s = s * np.clip(np.random.randn() * sf + 1, 1 - sf, 1 + sf) + r = np.clip(np.random.randn() * rf, -rf * 2, rf * 2) if random.random() <= 0.6 else 0 + + if random.random() <= self.flip_prob: + im = im[:, ::-1, :] + parsing_anno = parsing_anno[:, ::-1] + person_center[0] = im.shape[1] - person_center[0] - 1 + right_idx = [15, 17, 19] + left_idx = [14, 16, 18] + for i in range(0, 3): + right_pos = np.where(parsing_anno == right_idx[i]) + left_pos = np.where(parsing_anno == left_idx[i]) + parsing_anno[right_pos[0], right_pos[1]] = left_idx[i] + parsing_anno[left_pos[0], left_pos[1]] = right_idx[i] + + trans = get_affine_transform(person_center, s, r, self.crop_size) + input = cv2.warpAffine( + im, + trans, + (int(self.crop_size[1]), int(self.crop_size[0])), + flags=cv2.INTER_LINEAR, + borderMode=cv2.BORDER_CONSTANT, + borderValue=(0, 0, 0)) + + if self.transform: + input = self.transform(input) + + meta = { + 'name': train_item, + 'center': person_center, + 'height': h, + 'width': w, + 'scale': s, + 'rotation': r + } + + if self.dataset == 'val' or self.dataset == 'test': + return input, meta + else: + label_parsing = cv2.warpAffine( + parsing_anno, + trans, + (int(self.crop_size[1]), int(self.crop_size[0])), + flags=cv2.INTER_NEAREST, + borderMode=cv2.BORDER_CONSTANT, + borderValue=(255)) + + label_parsing = torch.from_numpy(label_parsing) + + return input, label_parsing, meta + + +class LIPDataValSet(data.Dataset): + def __init__(self, root, dataset='val', crop_size=[473, 473], transform=None, flip=False): + self.root = root + self.crop_size = crop_size + self.transform = transform + self.flip = flip + self.dataset = dataset + self.root = root + self.aspect_ratio = crop_size[1] * 1.0 / crop_size[0] + self.crop_size = np.asarray(crop_size) + + list_path = os.path.join(self.root, self.dataset + '_id.txt') + val_list = [i_id.strip() for i_id in open(list_path)] + + self.val_list = val_list + self.number_samples = len(self.val_list) + + def __len__(self): + return len(self.val_list) + + def _box2cs(self, box): + x, y, w, h = box[:4] + return self._xywh2cs(x, y, w, h) + + def _xywh2cs(self, x, y, w, h): + center = np.zeros((2), dtype=np.float32) + center[0] = x + w * 0.5 + center[1] = y + h * 0.5 + if w > self.aspect_ratio * h: + h = w * 1.0 / self.aspect_ratio + elif w < self.aspect_ratio * h: + w = h * self.aspect_ratio + scale = np.array([w * 1.0, h * 1.0], dtype=np.float32) + + return center, scale + + def __getitem__(self, index): + val_item = self.val_list[index] + # Load training image + im_path = os.path.join(self.root, self.dataset + '_images', val_item + '.jpg') + im = cv2.imread(im_path, cv2.IMREAD_COLOR) + h, w, _ = im.shape + # Get person center and scale + person_center, s = self._box2cs([0, 0, w - 1, h - 1]) + r = 0 + trans = get_affine_transform(person_center, s, r, self.crop_size) + input = cv2.warpAffine( + im, + trans, + (int(self.crop_size[1]), int(self.crop_size[0])), + flags=cv2.INTER_LINEAR, + borderMode=cv2.BORDER_CONSTANT, + borderValue=(0, 0, 0)) + input = self.transform(input) + flip_input = input.flip(dims=[-1]) + if self.flip: + batch_input_im = torch.stack([input, flip_input]) + else: + batch_input_im = input + + meta = { + 'name': val_item, + 'center': person_center, + 'height': h, + 'width': w, + 'scale': s, + 'rotation': r + } + + return batch_input_im, meta diff --git a/preprocess/datasets/simple_extractor_dataset.py b/preprocess/datasets/simple_extractor_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..3e2dbeb09e6f76d625b74d48055819c38ff6265b --- /dev/null +++ b/preprocess/datasets/simple_extractor_dataset.py @@ -0,0 +1,84 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +""" +@Author : Peike Li +@Contact : peike.li@yahoo.com +@File : dataset.py +@Time : 8/30/19 9:12 PM +@Desc : Dataset Definition +@License : This source code is licensed under the license found in the + LICENSE file in the root directory of this source tree. +""" + +import os +import cv2 +import numpy as np + +from torch.utils import data +# from preprocess.utils.transforms import get_affine_transform +from utils.transforms import get_affine_transform + + +class SimpleFolderDataset(data.Dataset): + def __init__(self, root, input_size=[512, 512], transform=None, file_list=None): + self.root = root + self.input_size = input_size + self.transform = transform + self.aspect_ratio = input_size[1] * 1.0 / input_size[0] + self.input_size = np.asarray(input_size) + + + + self.file_list = os.listdir(self.root) + + if file_list is not None: + self.file_list = file_list + + def __len__(self): + return len(self.file_list) + + def _box2cs(self, box): + x, y, w, h = box[:4] + return self._xywh2cs(x, y, w, h) + + def _xywh2cs(self, x, y, w, h): + center = np.zeros((2), dtype=np.float32) + center[0] = x + w * 0.5 + center[1] = y + h * 0.5 + if w > self.aspect_ratio * h: + h = w * 1.0 / self.aspect_ratio + elif w < self.aspect_ratio * h: + w = h * self.aspect_ratio + scale = np.array([w, h], dtype=np.float32) + return center, scale + + def __getitem__(self, index): + img_name = self.file_list[index] + img_path = os.path.join(self.root, img_name) + img = cv2.imread(img_path, cv2.IMREAD_COLOR) + h, w, _ = img.shape + + # Get person center and scale + person_center, s = self._box2cs([0, 0, w - 1, h - 1]) + r = 0 + trans = get_affine_transform(person_center, s, r, self.input_size) + input = cv2.warpAffine( + img, + trans, + (int(self.input_size[1]), int(self.input_size[0])), + flags=cv2.INTER_LINEAR, + borderMode=cv2.BORDER_CONSTANT, + borderValue=(0, 0, 0)) + + input = self.transform(input) + meta = { + 'name': img_name, + 'center': person_center, + 'height': h, + 'width': w, + 'scale': s, + 'rotation': r + } + + return input, meta diff --git a/preprocess/datasets/target_generation.py b/preprocess/datasets/target_generation.py new file mode 100644 index 0000000000000000000000000000000000000000..8524db4427755c12ce71a4292d87ebb3e91762c1 --- /dev/null +++ b/preprocess/datasets/target_generation.py @@ -0,0 +1,40 @@ +import torch +from torch.nn import functional as F + + +def generate_edge_tensor(label, edge_width=3): + label = label.type(torch.cuda.FloatTensor) + if len(label.shape) == 2: + label = label.unsqueeze(0) + n, h, w = label.shape + edge = torch.zeros(label.shape, dtype=torch.float).cuda() + # right + edge_right = edge[:, 1:h, :] + edge_right[(label[:, 1:h, :] != label[:, :h - 1, :]) & (label[:, 1:h, :] != 255) + & (label[:, :h - 1, :] != 255)] = 1 + + # up + edge_up = edge[:, :, :w - 1] + edge_up[(label[:, :, :w - 1] != label[:, :, 1:w]) + & (label[:, :, :w - 1] != 255) + & (label[:, :, 1:w] != 255)] = 1 + + # upright + edge_upright = edge[:, :h - 1, :w - 1] + edge_upright[(label[:, :h - 1, :w - 1] != label[:, 1:h, 1:w]) + & (label[:, :h - 1, :w - 1] != 255) + & (label[:, 1:h, 1:w] != 255)] = 1 + + # bottomright + edge_bottomright = edge[:, :h - 1, 1:w] + edge_bottomright[(label[:, :h - 1, 1:w] != label[:, 1:h, :w - 1]) + & (label[:, :h - 1, 1:w] != 255) + & (label[:, 1:h, :w - 1] != 255)] = 1 + + kernel = torch.ones((1, 1, edge_width, edge_width), dtype=torch.float).cuda() + with torch.no_grad(): + edge = edge.unsqueeze(1) + edge = F.conv2d(edge, kernel, stride=1, padding=1) + edge[edge!=0] = 1 + edge = edge.squeeze() + return edge diff --git a/preprocess/demo/demo_atr.png b/preprocess/demo/demo_atr.png new file mode 100644 index 0000000000000000000000000000000000000000..87ce3583a97aa124a93ddfaf38178aaa7ab94b1a Binary files /dev/null and b/preprocess/demo/demo_atr.png differ diff --git a/preprocess/demo/demo_lip.png b/preprocess/demo/demo_lip.png new file mode 100644 index 0000000000000000000000000000000000000000..d1b19c533571896d653a081fc949df897013cdd0 Binary files /dev/null and b/preprocess/demo/demo_lip.png differ diff --git a/preprocess/demo/demo_pascal.png b/preprocess/demo/demo_pascal.png new file mode 100644 index 0000000000000000000000000000000000000000..85dcbc77ac8c8728530c5c4400422137a1867883 Binary files /dev/null and b/preprocess/demo/demo_pascal.png differ diff --git a/preprocess/environment.yaml b/preprocess/environment.yaml new file mode 100644 index 0000000000000000000000000000000000000000..037b3be67a371c390e3c0076f142927a6dc64893 --- /dev/null +++ b/preprocess/environment.yaml @@ -0,0 +1,49 @@ +name: schp +channels: + - pytorch + - defaults +dependencies: + - _libgcc_mutex=0.1=main + - blas=1.0=mkl + - ca-certificates=2020.12.8=h06a4308_0 + - certifi=2020.12.5=py38h06a4308_0 + - cudatoolkit=10.1.243=h6bb024c_0 + - freetype=2.10.4=h5ab3b9f_0 + - intel-openmp=2020.2=254 + - jpeg=9b=h024ee3a_2 + - lcms2=2.11=h396b838_0 + - ld_impl_linux-64=2.33.1=h53a641e_7 + - libedit=3.1.20191231=h14c3975_1 + - libffi=3.3=he6710b0_2 + - libgcc-ng=9.1.0=hdf63c60_0 + - libpng=1.6.37=hbc83047_0 + - libstdcxx-ng=9.1.0=hdf63c60_0 + - libtiff=4.1.0=h2733197_1 + - lz4-c=1.9.2=heb0550a_3 + - mkl=2020.2=256 + - mkl-service=2.3.0=py38he904b0f_0 + - mkl_fft=1.2.0=py38h23d657b_0 + - mkl_random=1.1.1=py38h0573a6f_0 + - ncurses=6.2=he6710b0_1 + - ninja=1.10.2=py38hff7bd54_0 + - numpy=1.19.2=py38h54aff64_0 + - numpy-base=1.19.2=py38hfa32c7d_0 + - olefile=0.46=py_0 + - openssl=1.1.1i=h27cfd23_0 + - pillow=8.0.1=py38he98fc37_0 + - pip=20.3.3=py38h06a4308_0 + - python=3.8.5=h7579374_1 + - readline=8.0=h7b6447c_0 + - setuptools=51.0.0=py38h06a4308_2 + - six=1.15.0=py38h06a4308_0 + - sqlite=3.33.0=h62c20be_0 + - tk=8.6.10=hbc83047_0 + - tqdm=4.55.0=pyhd3eb1b0_0 + - wheel=0.36.2=pyhd3eb1b0_0 + - xz=5.2.5=h7b6447c_0 + - zlib=1.2.11=h7b6447c_3 + - zstd=1.4.5=h9ceee32_0 + - pytorch=1.5.1=py3.8_cuda10.1.243_cudnn7.6.3_0 + - torchvision=0.6.1=py38_cu101 +prefix: /home/peike/opt/anaconda3/envs/schp + diff --git a/preprocess/evaluate.py b/preprocess/evaluate.py new file mode 100644 index 0000000000000000000000000000000000000000..c1dd9088e5dae7783e00ac153d7b201ff437e6fb --- /dev/null +++ b/preprocess/evaluate.py @@ -0,0 +1,209 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +""" +@Author : Peike Li +@Contact : peike.li@yahoo.com +@File : evaluate.py +@Time : 8/4/19 3:36 PM +@Desc : +@License : This source code is licensed under the license found in the + LICENSE file in the root directory of this source tree. +""" + +import os +import argparse +import numpy as np +import torch + +from torch.utils import data +from tqdm import tqdm +from PIL import Image as PILImage +import torchvision.transforms as transforms +import torch.backends.cudnn as cudnn + +import networks +from datasets.datasets import LIPDataValSet +from utils.miou import compute_mean_ioU +from utils.transforms import BGR2RGB_transform +from utils.transforms import transform_parsing + + +def get_arguments(): + """Parse all the arguments provided from the CLI. + + Returns: + A list of parsed arguments. + """ + parser = argparse.ArgumentParser(description="Self Correction for Human Parsing") + + # Network Structure + parser.add_argument("--arch", type=str, default='resnet101') + # Data Preference + parser.add_argument("--data-dir", type=str, default='./data/LIP') + parser.add_argument("--batch-size", type=int, default=1) + parser.add_argument("--input-size", type=str, default='473,473') + parser.add_argument("--num-classes", type=int, default=20) + parser.add_argument("--ignore-label", type=int, default=255) + parser.add_argument("--random-mirror", action="store_true") + parser.add_argument("--random-scale", action="store_true") + # Evaluation Preference + parser.add_argument("--log-dir", type=str, default='./log') + parser.add_argument("--model-restore", type=str, default='./log/checkpoint.pth.tar') + parser.add_argument("--gpu", type=str, default='0', help="choose gpu device.") + parser.add_argument("--save-results", action="store_true", help="whether to save the results.") + parser.add_argument("--flip", action="store_true", help="random flip during the test.") + parser.add_argument("--multi-scales", type=str, default='1', help="multiple scales during the test") + return parser.parse_args() + + +def get_palette(num_cls): + """ Returns the color map for visualizing the segmentation mask. + Args: + num_cls: Number of classes + Returns: + The color map + """ + n = num_cls + palette = [0] * (n * 3) + for j in range(0, n): + lab = j + palette[j * 3 + 0] = 0 + palette[j * 3 + 1] = 0 + palette[j * 3 + 2] = 0 + i = 0 + while lab: + palette[j * 3 + 0] |= (((lab >> 0) & 1) << (7 - i)) + palette[j * 3 + 1] |= (((lab >> 1) & 1) << (7 - i)) + palette[j * 3 + 2] |= (((lab >> 2) & 1) << (7 - i)) + i += 1 + lab >>= 3 + return palette + + +def multi_scale_testing(model, batch_input_im, crop_size=[473, 473], flip=True, multi_scales=[1]): + flipped_idx = (15, 14, 17, 16, 19, 18) + if len(batch_input_im.shape) > 4: + batch_input_im = batch_input_im.squeeze() + if len(batch_input_im.shape) == 3: + batch_input_im = batch_input_im.unsqueeze(0) + + interp = torch.nn.Upsample(size=crop_size, mode='bilinear', align_corners=True) + ms_outputs = [] + for s in multi_scales: + interp_im = torch.nn.Upsample(scale_factor=s, mode='bilinear', align_corners=True) + scaled_im = interp_im(batch_input_im) + parsing_output = model(scaled_im) + parsing_output = parsing_output[0][-1] + output = parsing_output[0] + if flip: + flipped_output = parsing_output[1] + flipped_output[14:20, :, :] = flipped_output[flipped_idx, :, :] + output += flipped_output.flip(dims=[-1]) + output *= 0.5 + output = interp(output.unsqueeze(0)) + ms_outputs.append(output[0]) + ms_fused_parsing_output = torch.stack(ms_outputs) + ms_fused_parsing_output = ms_fused_parsing_output.mean(0) + ms_fused_parsing_output = ms_fused_parsing_output.permute(1, 2, 0) # HWC + parsing = torch.argmax(ms_fused_parsing_output, dim=2) + parsing = parsing.data.cpu().numpy() + ms_fused_parsing_output = ms_fused_parsing_output.data.cpu().numpy() + return parsing, ms_fused_parsing_output + + +def main(): + """Create the model and start the evaluation process.""" + args = get_arguments() + multi_scales = [float(i) for i in args.multi_scales.split(',')] + gpus = [int(i) for i in args.gpu.split(',')] + assert len(gpus) == 1 + if not args.gpu == 'None': + os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu + + cudnn.benchmark = True + cudnn.enabled = True + + h, w = map(int, args.input_size.split(',')) + input_size = [h, w] + + model = networks.init_model(args.arch, num_classes=args.num_classes, pretrained=None) + + IMAGE_MEAN = model.mean + IMAGE_STD = model.std + INPUT_SPACE = model.input_space + print('image mean: {}'.format(IMAGE_MEAN)) + print('image std: {}'.format(IMAGE_STD)) + print('input space:{}'.format(INPUT_SPACE)) + if INPUT_SPACE == 'BGR': + print('BGR Transformation') + transform = transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize(mean=IMAGE_MEAN, + std=IMAGE_STD), + + ]) + if INPUT_SPACE == 'RGB': + print('RGB Transformation') + transform = transforms.Compose([ + transforms.ToTensor(), + BGR2RGB_transform(), + transforms.Normalize(mean=IMAGE_MEAN, + std=IMAGE_STD), + ]) + + # Data loader + lip_test_dataset = LIPDataValSet(args.data_dir, 'val', crop_size=input_size, transform=transform, flip=args.flip) + num_samples = len(lip_test_dataset) + print('Totoal testing sample numbers: {}'.format(num_samples)) + testloader = data.DataLoader(lip_test_dataset, batch_size=args.batch_size, shuffle=False, pin_memory=True) + + # Load model weight + state_dict = torch.load(args.model_restore)['state_dict'] + from collections import OrderedDict + new_state_dict = OrderedDict() + for k, v in state_dict.items(): + name = k[7:] # remove `module.` + new_state_dict[name] = v + model.load_state_dict(new_state_dict) + model.cuda() + model.eval() + + sp_results_dir = os.path.join(args.log_dir, 'sp_results') + if not os.path.exists(sp_results_dir): + os.makedirs(sp_results_dir) + + palette = get_palette(20) + parsing_preds = [] + scales = np.zeros((num_samples, 2), dtype=np.float32) + centers = np.zeros((num_samples, 2), dtype=np.int32) + with torch.no_grad(): + for idx, batch in enumerate(tqdm(testloader)): + image, meta = batch + if (len(image.shape) > 4): + image = image.squeeze() + im_name = meta['name'][0] + c = meta['center'].numpy()[0] + s = meta['scale'].numpy()[0] + w = meta['width'].numpy()[0] + h = meta['height'].numpy()[0] + scales[idx, :] = s + centers[idx, :] = c + parsing, logits = multi_scale_testing(model, image.cuda(), crop_size=input_size, flip=args.flip, + multi_scales=multi_scales) + if args.save_results: + parsing_result = transform_parsing(parsing, c, s, w, h, input_size) + parsing_result_path = os.path.join(sp_results_dir, im_name + '.png') + output_im = PILImage.fromarray(np.asarray(parsing_result, dtype=np.uint8)) + output_im.putpalette(palette) + output_im.save(parsing_result_path) + + parsing_preds.append(parsing) + assert len(parsing_preds) == num_samples + mIoU = compute_mean_ioU(parsing_preds, scales, centers, args.num_classes, args.data_dir, input_size) + print(mIoU) + return + + +if __name__ == '__main__': + main() diff --git a/preprocess/mhp_extension/.ipynb_checkpoints/demo-checkpoint.ipynb b/preprocess/mhp_extension/.ipynb_checkpoints/demo-checkpoint.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..79a885a1d2abd4a66d30ff9f8d672146d12c23f0 --- /dev/null +++ b/preprocess/mhp_extension/.ipynb_checkpoints/demo-checkpoint.ipynb @@ -0,0 +1,307 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, + "source": [ + "### STEP1: Generate COCO Style Annotation\n", + "\n", + "Here we show a basic usage example using DemoDataset in `data/DemoDataset/`" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!python ./coco_style_annotation_creator/test_human2coco_format.py \\\n", + "--dataset 'Demo' \\\n", + "--json_save_dir './data/DemoDataset/msrcnn_finetune_annotations' \\\n", + "--test_img_dir './data/DemoDataset/global_pic'" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### STEP2: Generater Instance Prediciton\n", + "Here we provide a finetuned ResNet152 model on CIHP dataset with human instance mask. Download the pretrained weight in `pretrain_model/`.\n", + "\n", + "- [detectron2_maskrcnn_cihp_finetune.pth](https://drive.google.com/file/d/1T797HPC9V1mmw0cDoVOPSF1F_rrTcGPG/view?usp=sharing)\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "cd ./detectron2/tools/" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!python finetune_net.py \\\n", + "--num-gpus 1 \\\n", + "--config-file ../configs/Misc/demo.yaml \\\n", + "--eval-only MODEL.WEIGHTS ../../pretrain_model/detectron2_maskrcnn_cihp_finetune.pth TEST.AUG.ENABLED False DATALOADER.NUM_WORKERS 0" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Crop the original image by prediction bbox" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "cd ../../" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!python make_crop_and_mask_w_mask_nms.py \\\n", + "--img_dir './data/DemoDataset/global_pic' \\ \n", + "--save_dir './data/DemoDataset' \\\n", + "--img_list './data/DemoDataset/annotations/Demo.json' \\\n", + "--det_res './data/DemoDataset/detectron2_prediction/inference/instances_predictions.pth'" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### STEP3: Predict Local and Global Result\n", + "Download the pretrained weight in `pretrain_model/`.\n", + "\n", + "- [exp_schp_multi_cihp_global.pth](https://drive.google.com/file/d/1s30hj8zeYj0wuTA5Rek-one-v5uT7kX9/view?usp=sharing)\n", + "- [exp_schp_multi_cihp_local.pth](https://drive.google.com/file/d/1dwDrXHkhAe_nYtnSqi548zrjo5mlSPF0/view?usp=sharing)" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "/home/peike/Projects/Augmented-CE2P\n" + ] + } + ], + "source": [ + "cd ../" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!export PYTHONPATH=./:$PYTHONPATH" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!python mhp_extension/global_local_parsing/global_local_evaluate.py \\\n", + "--data-dir mhp_extension/data/DemoDataset \\\n", + "--split-name crop_pic \\\n", + "--model-restore mhp_extension/pretrain_model/exp_schp_multi_cihp_local.pth \\\n", + "--log-dir mhp_extension/data/DemoDataset \\\n", + "--save-results" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!python mhp_extension/global_local_parsing/global_local_evaluate.py \\\n", + "--data-dir mhp_extension/data/DemoDataset \\\n", + "--split-name global_pic \\\n", + "--model-restore mhp_extension/pretrain_model/exp_schp_multi_cihp_global.pth \\\n", + "--log-dir mhp_extension/data/DemoDataset \\\n", + "--save-results" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### STEP4: Fusion Prediciton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!python mhp_extension/logits_fusion.py \\\n", + "--test_json_path ./mhp_extension/data/DemoDataset/crop.json \\\n", + "--global_output_dir ./mhp_extension/data/DemoDataset/global_pic_parsing \\\n", + "--msrcnn_output_dir ./mhp_extension/data/DemoDataset/crop_pic_parsing \\\n", + "--gt_output_dir ./mhp_extension/data/DemoDataset/crop_pic_parsing \\\n", + "--mask_output_dir ./mhp_extension/data/DemoDataset/crop_mask \\\n", + "--save_dir ./mhp_extension/data/DemoDataset/mhp_fusion_parsing \\" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Visualization" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAABLAAAAOECAIAAAA+D1+tAAEAAElEQVR4nLz9Wa8ty5EmiH1m5h6x1tr7TPdeMjklkzMrkznUkFXV1aVuNCAJ/VCtlkotAXpQQ79JT3rRiwABepMgaAAEVLcKrSrVlBOZTA7JJJmcyTvxnmHvtSLczUwP5u4Re59zmGRmqgOXh2vHihXh4YO5fWafmdF/9T/+UkpJRNyo1qrq7g7w8XhclsWsHo/H4/FI5LVWVb06zIk5pZRY3B1qxBARdzegWimmYJKU3H3VNcmRiIgEgBrMjJLknKuuIkhsIDVdal3VVnef6QGBmZmIAABwJ4MD0OpmxswpTcxsZsXUCwAQu4gQQVXdncinaSpF3Z2ZATIz4TzP82pUSgEsT8SEdb1VK0SYJAEACGByhjNAAKZ0rLXWWgFwIhExd4cCKKrm6oRoqkLdfaYPpcTurqWCbJJE8ForYCIEQK2omXld1/W8LDk9WNd1XVfVYmZqxd2JyI0ul/X29rwutVZTdTgREWFlBgBysNA0TXHbnLOWCvA0TTlPKaWcc07z8VhPp9PxeMiH+cGDB0+ePDqdDkJE5DnJ1dXVnKSsa10XM6vL6uUDVb1cVnfPeXZ3U8Tdnjx5cjqdVFWtALi9vX327BlxOhwOx8MEQEtV1SlxzjlLutzcmtXT8ZhSWtfVqjLzg+Mbjx49OV1fGwgk0+GUjgfidP34CUh4OuBwwvGEJICAAMi3//gPrZ4//7lP/x/+9/+768N0SFIv59Nhvty+WNeFWFmUsp9O89XDw0/flxfPn1qpxykfUk7E7nRZVabrc+Xp+o3/2f/qf5Pe+qhZuij94R995YPvfef2+duf/+yHb178+Ofv/ujPv/bNf/dvvvKDH7w3nx49fONNsL/zwdsAH+brj33kE1rpsj5lh7urqqoSUXTOupa6llJKrfVwOCzL8vDhQzP7/Efy5z/7aTN1LFrXnFNZ11JKTlNKkzutpYBsOuRpyoA9u72YGRHlnOC+rpdSSkxjkSzgsuq6Vnc6HA6n43VRqKqZARARInJ3M3PCNE3MDMDdL5dLrTXnbH42M1VNKU3TYV3XpZacc5Kprqup55znfDCzda2lFDBNKYuImdVaoRbH5XLJk6hqsUIEwIhoOsxrpcvlEvdPKQEmIimlaEk0xsyszWZ6lB+tepkPKWWcl1siknwsK2CzW075APfL5Qa05omJdV2Zkzx98Vzmw1LLv/2DP1zVHKTqH/voJxJPP/r+j2aeMuXr4xUR3dBNDNM8z0S0rouqsmBd15T4zTfffPDg6jvf+c7t7W3KknN+9gy1VhFxVzN79PjBo+vrtVxub18AOJ9vpjl/4XOfPxymn/70pymlIotWX5bCnK5OD8vq7777/rOnz4lkmg4f//jHnzx58rWvfe3q6upyuX3x4kUsVWYcD/M85w+/9cZnPvOpq9PxBz/43nI+u9bz+WyqRLSuq4iAsqqCqJg+e/b8spQvfuFLAH3wwbPlfHv74vnnPvvr6/rs9//Bl/7n//y/cF0mXERkzAQyj14nIg65LUJEgMVASJqYecwTAEYAwMzuLiI5Z3Vb15WIpmmakpgZuRORiIxhBXNMDHVzJmbmJMystQ20N3Een6VJZhJ3D4lHMc1QWvNg7goysFOTsQSAIEYMZyIGYBzNBQtATnCFGarDHdyeCCFI/DYWrxvBnSDMiZhBAjc4O6IHLBoKAJgBONyNWuuJDPFVf3egP6t90T/fv2z7Zjt8dyVFA2IgiCh+G9/GV7/qQfCXf0h37+ZwwPpf0Vrr/3n/MH7C418CkV9GgwEQtZa7W5yPvXIcmXN7aIgpd5i7u0iKP4lIiIkZ7u5utv916/9xh7iemUEUf1c9j8aMWW1W47Jxxt1DZKnlcdv9t4w23ONZ8W9dVtodoz/j87isNcBhVX13jBeJZ8Wvxn0AiCOUjVjCIdiZWVXvXRl3YyYiApGbtV2AGMwwix3KzHjXbwwe/SYiiBu6xx00RmT/4rpGv7mR17bRcHQXyK0SEVyfP3/+4sUzAAeexpu2nmQWkVrr6H/bPWWej6ra9CvmEClENN73Xle7a2yIOedQO0k457yua594aqFBMjPzel5DHoZAi1kXbYgeDo0xdigzI+SUEogMGnPCVNdapmkCbCh77q7uZjbN6c4kBEy1quacm1hjdrNaq8JFhGXu04wAMEv0fPQAEbUB7fKZuHUm+Vh6W7eE2AnRGgvFDyAiQ59dTCICYTciYWYGk7ubNilIXHeTsO8Czr3P9/OfsRN0+0lC6PvM3SkKgJEcbuZ9ujIRMdhgrxKM2AmiOwfdkZnbUX1DCuwANbHDFAKqS6Gx+sR2d7N+8zuHu1NfqqqFua1W06qqpsXdp2mKdyYSMMMQGmHIHyJiSomZiFzNrDLIoVBzN7i6KwCQSTrEE0NQMLNAzIxBbUTcvdQAX+4+J2VmxJ5lprWu62o1lkNGX1xMTQeo1ULQOblDzapZdVhdC5EzUWh05CAiIUqf+MTH29xy1FprCbHC67oSuSoBdrnclrrUWs2sXCZ3F6KUkhC7G8wAqPvhOB2Ox5QTJwFR1dXdYeoghQKsbm7ETOY1FjyDQNH/zk7uPk1zTJrd6iUmcncRjnVL1NTTopVNxvjFphCTstbqXXFvg+u6rmuF1FrdlZiFY02yiNRSiDgWdvRGjFNoQjlnZnay1io3jfGBNelIJC4KX9dLrUxEMCd2hdJuu2Vm4pSIiKdpmvI8rwsfj8fT6RCyaS0NDyyXknM+nU4AwZOqmsENNy/eBWKkVlW9XG7RpVut1aqKZBFhlgAqwpcQ/znnPKWcQ5DKG48fHo/HRw+uD1OeUn706NH11VFVr+ZDMlK7NcPxdGKWFy9evLi5ABeWiTjnSYizqqrBwUnE3deiqlrXYmZFaFqVmQmW8qSGy835cns2s5S5Xmhd1/nmmYPVwDlJOhjzzfkCTpSn+XA8Xl3P82xgM1urvvv2z+ZMtN4cp8wwUwMs9GYAxRyu5AZGgdV6JBIRJ6KitTqgqIo00zRNn/rUp4jZzmc+PRbgd373S//iG9985533L7dvkz59/GCC8+VyOZ1Oq7qZEYGJiEkSiRARl5tFEHKemHNMsFprzIuYM6WUdV1LKUR0Pi9L0SxgToo1lGY4uVOtQ4pCVZfFiHzIejND31CJaF2rWQGn2OdqtXVdCWfvijgA87ahSqLLshC138Y26a7u7M1EAgBmlcgTMTu0LGYOAtyrrnAWoSzTWivDYE7usFqKaq3ufjxMZrZqIbfD4cCCUkoty7ISM+ecU+JE7XHCoqqBNIiIk6S27wlnnmQKeTtNEwBvm6JUg9bV3c0r3EopVVfmgxdVVa4F5kIc8i7nJCJMLIET4MUqOyH3fcaMCK2vCNM05SzzPMfSNrNaQOBaPeRDyOsxHMfjcV3XJnZKEaEwTjGzs8eeZGZmoZhZ6B+7PWYD7UNq76SNp5SqSKml1qq1hpFunue1eCh8IaZELKUEkO/mxhhltP/RMKg14xEhpqiFcOkKx76Fm9ZFIKKUUkgVZnbHeETcJ+RjvxkBSCnFvUTEmcBNbcrTFOqA+bbTOzSlyd0Nim7+c5iqqdeYsMS9VUwBDwDAGWBCLEEBwJzc3U3DxODsBI5dGoAPSObteiKL+7YNo+kDjoAAXZN/nV7Sr//lj1cqPfcPh9Mvd+WverwSRr7q5C9+5dd+SxCH7rTAMbXQ8CA41hEAuqvoN1DUx3YsGSeEUuHufre33Z2a3u9jEXk/4+5hiR6P6FeKeaWuslPAZDCAxGmsgqE4mhmId4pyuz8FiHoZl/Y5sb84FG7JGaGI7JZbiPf9c8dvY6XHs7B7Sjx33w+jwbYDzUSkblRtCIGmIPb7D0B4fyDdrTcy2j8e1PoZEuIuNiYzC9UNd9dDkw8dfrv7sE+N4bj3yiFtYgKMvSkA1b02uPs0pfFn1yFRUHYXE7mhWxJyzkM4+86MpaqjVejwm5lr6XMxBtY9tjN3B5yGo4I5Rfv73I7eB0DM0tvctl64EZhYRIqpdDkNwAE3U1UaBtOOwqLT4T4aSb4ZLNrIxm+YxtMBBjb53+Sw7fFqG31mBjVBELKxz4RhugU2QdG3s1cJP++icycH2q2c2uref+Wg1wlGe8351wHCl5rCDRPizsyknb1md7xarIVIQV+hMQ36C3qYIYYgCtSBPp/Ht2N+ksOd0Ddmd3iYHt2jV+93GkBEZS3MLH1cxrogMncnM/R5ezweYaGNyP4dow3rujKHaQAgU1UzNdeUUgBCYjczmBNRYk5P3njg7vBQK5tOA6CU0u/bdCB3JaJ60XVdXTV62MzWda26Aqi6ni8XIxgU3MT9YT6hwWghEhJOjMxSbQWgMAbMwZ4cBGgpZRie+8wnYqrFhvYTAjmlRElQKewozDw6VoTWdWVOAPY7UK3VOKxublWdwzQW1pTQeLq6ZhQiiDy0omRwU1NVg7p7KcXaPBs20Sb3a63MSM3+VJmImVQLwOaViDgREaeUnEiri1A4c9RKKTmGeV1XrW4GZoGzqtdqZvbGk6O711rWddW6dslYSinrWsuyhi1hXdeQC4cjLcvi7ofDYZomotuU0nyYvv/9H14d5uPhoKqJ8OTR49PpVNb1Yx99cDgciCSl9OTxm1dXV0vlc7Faa7q5TCe9ygd3X0pdKhlN1eCKwC4GqLsVr1rhdpxmFqnGRszzzGoAJKHYamd3glar5k4EFpekhmKmZpQyQS7rcnt7u17K8ThfHef3f/q961nKejGtBNRqzCnnJLBLuS222qrK7O4pZxYG4FprCduUPH3+bL5+wwgOlXkGWRaWq/yZT3/hS1/8bFnf/tpX//Xlsph5LUpIWZIwU/IQrW6rWvXS5YTGhhdyo/aVz31RaK1FtYrIUqqqsjCbrbWIkplJTpKSKQiccwZ73MS7EuzuqoEtmjjIOYe5V1hEpFbT6oWKTFPM+b3ZtXnsren/PLw3quEHs26SjC2eyJmFyGDkbnXRgDQQga5rMRFJkidhSGWAWcLQBVUmEgYTjMBOZHo6nY7HIzOHHRKAU6h9ro7wHYkIQbrQ9GVZWSzPCUBdi6usel4uDg8dwVIGEak5rCqBVQkQ5iyylOpgq27VSRroyiIpM5kzkzfnVBOVDjULxdSCE1GrmSElnqZJpIZOw8zczdUhbYYRevQ2M6eUCC6iQ3iOzSAGb3+9dwgXuBHYgFaoTd7IGk3W5ZyXdRnbUrsJ1I1DmRkicdyfmoF5uzM7Agk3haw7NPZq31DXABA3gBdzQETgNE0TEU3TVJYLEfHdzTUULA8tUMS5+QNDXY2ucIKIiIT9AcRwNXcQGYsQk7deCokaBjVu/i1nb53JAJOTE8e2RZLJzZ20OthhROyM4ZFmI4RzLxaH8dp17qZFuQeGvLcp82YdR5jMMTZavLSFv+Z4pXJzF+HACTR8g9tFtOlMXbn56/sJX9WGV2LCVx6hNvHOrzj8lkREcAGw8wRyaLO7hwmBQEZ3X7NhYG7217ZKw3wAD6P1K1/3DoK6OxV9KNTYNC0iYqSmGN3xZxLF2tF9g50IBuN2qwZa2s1i3e1aEmZogzejPjUVuOGuYdi4+wpjAd6DRq1LX3pf2gPChhQ6Kuh6UncqqppxQwXMTGamZmg4rWvom6YLu6uVDiXV3UMOtAYLw9EsN25OxC+ps9vFdOdFBqDda8DUYfZm0tptZPuO2qQTIaUUzk93b/a1qiIyjFDNMxeCAnduNdowlMzmZtzgdxiMAG8yk5MIJVOlxq2AulPo6CKwDYuGNkmxlYabpHnvuvNTxHWjrY3GRHt8dwyDnXvH9miq6WYH8eb8d9ssdtUdzNL70+BE4gBx83lv60I4gavvHPgNCsbsRb/yFWLh7qC/Sm5sqPLOUg0F+44Q87s/p19glnrFMaBsUD6ouTfbTaxZ/rY2NN9Qk7p3njXW0dgozQxQIjZ3GjchEiLVYiQSE8bJvK2vmEbubtp9+KEqwgEGKzkcDiNA7wiujhmbnG/zoLVnIHyr2/raOlYa3wBdJgQ0c3eiti2TUHhI3ZWakNmLwfbWqda1N4uJSYhFQlHgYbAxr+5zOBy8yLquVpqjWWvQHdXM1GuwH4tWAJKTiJxv3nVCkHMgTGDJU855miYiZwKRJ7amfWHKaWp3Vi2lqLqZgYkgXUi1jhARTsJJGspi7r2gZoGeCeCdYd6tuhBDkkP70nZ34/DphfHMgt5EzASEochKWdTbOowRHn0IIjjcAyh4EqpVh0RVVYOl1LuxOtDICDFngGRGy7KwbKIhpTRN07rUYL0GtAsrmlACYGalLq4WYiF0/Vrruq511VJKrRYScy23KXmpdVlr+PHCpiXEbosqYG5mZf0gp+fLsvzFX37z6upBzjmldDpdTfkQHuPD4fDw5zfvP18eP344TZN5KM25aklEWSSxQMR0LbVAS87iaz0XJYeITJIVZVmW45S1XLCuYDKzUs0InFJKyZysmqlSZSJZbm5unj2rS53xYDHWsj55dE226FqEs5bVJB+mY55khd0+v4ghnWaRaQJM1WshYng1Azifb5aHc37n3Z8ty5IYy82L6erhstQHpwef/+3PAE9//s5ffu/bX9diibMJr01OhtcO7gZbHSyJvJp6qaVJ+dA1VJVcCWABM1jaf6ao5l6qW7k5L1lomqZpOjClYmpwTsKMpeFBit3OzNxDgW5Wn2lK59uLFRvgJgAfD74KENOgLeyUhrowdndVnQ95mqYBV8Kd2OxHAFzNUK1adQWUma1oKZCcD2BKAsRUDAKqAFVXL1Vh5D7lbMd8dToep9ndbbjZa805VXPUGrLGFE5qZro6M9ZyAfuDlACs60qYvJrXoLmkkDySGEi6FAJdHXKaptU9S6J6cUZ4rbjtyJBEnAgGYYltJrg3xM7OAILCVKsNMkZK0+l0/eLmJgjesbjbnmydwEMUsmuMixBzoipa3chBDgYJsynGnwwKCiR504e4q4NDfwr3Y9CfADBRCMC9EjOWuVvjSjA3/SloVLElDa1it2d4zIdxMPVZsdO4drssdad3x/Pdsj42J+qPYGYS0VJARMJNJHayxujA+IqYmiLauP0gApGDnAjC5MqxhwPm5mBCM2mHfB6uGDcw4GRwgzszzJ0JBO3aAQCSgQ+iMcQEhKrPAJuDgWb+AAPh5d/0ktd52O5arF8JpX4xU3TcKxxcv8zP/3rHuM89TasJkr/qh8EabaZ0AnnTKztSGrZ7F3fv/dY8LHvkGRqt7L0ZMUOG3aQ7WFwVAJhAxOR7tWn8lIg6nEFAOCICgiLVWmB2x2Ibzhd3911XNA0bDkKAqDhfa22tv+sxa2pZ76D98gz82FDLaLMpdg698W93hzp6KEhbgwYPZRQNYMQKSr3Bu1XZNJjotOG8auua4ATeAWDqtMbN3z669A4ivTsJUuLNOd/MoYbOPTHqGll7ikjz9e2pEME1uAt47oDz8XkTF7v27AWX18JEAJk5OmUshHnr77tvUFRHzEIToRbyguMp+ycSkTMpnM2ax5SAYIKIQELu2GDnMtHwhmNsxEQgMgIlSTvoS8yI2zRg2ZbPaHnvjTuAuXmqPfaQu144kvCYb7NHGLyNbKBBjMVF1LwfHtKfACZUAHdRc9P/W+ej7XSjTfvZcW+2YBA4N/k/dpfNnuV2Z7t57d3uHK8Sntt3w3ADd4VzyCIicmqe1cacHC1FyN2OF8c49iMoValbGSx4Uq2nY0pTUDpjTqFbwpoZBuTu3FiBAi3tgUYwNtSYRSnUr80O1Bz4OWcibvsoA9qG3sJUzRyWJmu08JIl3dMootnhIScGKIwjLCIMcrVQb/YmKHdPVZcRHcEcW75Qi99y7774WDi1aiLOWWhKKRpkczBGglBam7xQMytazezmxXMLjhyRqS91Xdf1QhJROu4uBBHKiSO8JaWF7rD5t8nUm72zKjHXtTq0A+jo5I3BMhSd/m/TbIi5G6cIWt0NgzgU4SksIjmGvpSy1mKdIRaGqGAltnbSzu7lHmYKMyNyhMICZoEZ4pWHlFS4MNWq3ZMDIjJjdxXJpa6laKN+iwQt2M2IyIyIk7tzV/9U1T2f/OAWHUGh9K+1ACilXC6XGJFSyrpe1styey6l+Jyylnp780xAZsZXvLy4ABeRnJ8vpg7gdLq+ufnxNE2n0+l0OgQ0TVmmafr4hx4fpvnq6up0Os3znPLxMJ2EqJQC4qpqdRV3FuGUEyZisgatQcLMSMxJpvNyidVgZOTkCtZL8vXBgzlhtUUnpttnPydAVSnTui5OStN0mg9F8fzFbTqkB4/lOM+JaF0WNZ1yspytGEk6XD08XV9Xt5sXT69ca7nMOE2sdS04X3DFbz5+8rPmRKVaa6m2rhdxwCoLCSVmCDGtQFNd1EwBayG4rjAHlMhT4pwlJQb8vC4353PKzNCqzsxuBKJqWgJEkDFD1dVhanOSsG8x85RySslctcYOxDF/QnQ23NjjLngXI6Gq48/Ym8eedykV0qiPEXkbO5hZjRARhmRhhauqayUYuVktdSX3dVkWOFvOV1dXRMQguAIwI4KT4+o4ZyFvXlNLKZm7kAuRurq7mqqByAykqqc0EQOVidr2TA5AD/NRyIgyANXiZrDABCqS0+HI04GLzpKsOpJnyZmFSNy9WEG16AA+JkfAxU0RCUEZnOppOohkd1J11TAL1rHPjDicbbfwjfVkZgKWhuJcREy67dk3XWcvhcwssVAPEAqxGTpN+JkBBtgNtRhcARn+q+1WhJgA7l5rDRkWyNl9QbfTx+jHvNmrHcAGaEbbtq/c0alf4frDBgC9B2nTvd2bU8LYJ3yzPXKn2MVNYgMrpofDgbktJDOFKYSJSCShm0gBhGQmIzCF5jxi2BiAs+rSd1NmCoYSweJnROTgTQ0gapQkggykYhShg0ExHb0wTPi/KjB73fWvVmgG3tiPwp7B1b8a7dGX7vGLjnHnptihOR/GZgoEAN6mN2hDgP2Db/FLd1+nRTHBPTY7RMsNEKJwG4Yi2HRB3l7N9840DIWuNdWZwuWmvr/m7s6+X9Sh9YqkoO6HehCvQG3PHQFFvcOJfFMRAQ4I1ff3bcm/um87RaiZb/Yd42MwhQGMEJTxr7mjOx3u/OcbdXZodUPPG/0wjuAWcGeem5kTpc4fCIlDDAoDf0qoZY9i0R/jcDfb+LtdQ3YzGraSqmZOQNs1gpsPc+/9TB4GuLHpjAHy3bEXiegCef9qzDxNk9892ledZBFvnVmIyEUGhkQnaumOIDoeNzrqlROJiKZp6swaNPNWvGHzsO2Gg8kNRLwXnwriiIA1ZxYKdXF0qXlHg4OygfAo+kbj9N34uOQ53gg9DK/PiqbOE+BEwkwSRoGBcqk59ZmZpdm8SLrRwj0mKbX126c69znM3p5FaIaFzRB230wWFtm7+wh1EkcYHPrSHqD3DgV6+xVeff61h7eHxV+2ce+1PZqsibRXSeaXTHvjvIYReSCRuDm7g8zD50bUOClEREYUqTmIRZgSBG5GnU6i6uQGsg2wszC42Wjc3ba461AS0Bm9vjN9yZTHoGDHFVcPRgPQYjC6y6/vsNa8X01MxaIfizDsOm6eALDgrodXiahztSnwFQua+lLDHUfgYGNGlpc0TVOxMoUTP8LhajGzh1dHd3diOKljXWrTfQ1WailFrbhaWb2uBcB5eZd6/oMIhxORyD0QaTDuiD/VYKsCLbqaOTy6w6TXTFMxrjyhVONmpHFymDXZndLgpkcA5hDBqq6Ahw0ODNVaa5gwBwOYnQbS9u5iNYBSYqEkKQBzDWeuo+lV3lwQcX8Jq1XksGFewzsqDIDN1GBVIVRjw2jxF0KMcAQpc8gaJiKhFMBgWWvO2eCR6oOIVHVZlpTS7YsbM2PQcj5fzmvMjHN9fj7frCumiU+nUy1mhvPFjsfjWnx9evPB05uYVSmlPMl3v/Xn0zRF5qHr4+l4PB4Ohynl6+vTMfCVuQhx5ofXDx/Oh8uzHxOxN+huAMis6jLPc/Q5a6m1LpdLWc6w8vD0aFnO63pJp+Pz58+maSISd805F9/2S4AIQpzIjFJyVzeTSRLxUhciujodz5fb+frJOz/54Yc/9rGrh0+AYuUyiWICRI+n9MaTh/XyocNxeu/999N8zdz0JDOrddVa4FbrygALurYR09ARMhsGUkCGkff2fHlxe86Z5kmIBCTF9LyWjQ+s5k4OhjPISylmplqBZrF2jwYYM8t0CJMNnM92josDDdIu+ISZIz1UjPjIT+Dut5ebSF9kVqeU3JUcU0opcTUlgLnBvFpqLUbcaIjV3EwdLEkkZzBX1aJKnOd5CuTAIGeyWsIwlFNyq2ji2BgQYhtOKRClcIFaSokYqh7cLXdTVYcnCgxZASeRRAlpcbKJCXBxmnkih7jUqq5wCvKtMNwpEZO7hkbbt/y2b4XzjYhymnOaCVKLrUtF16G5U5yIKISDqgC4XC6Xy+V4PAKNdigi00SACSdTY2Z0/LWjbQSks8Zp7E62QQGa5+NizasPgKQlZbksRVVJOMwo6MZmVa1a2uaRIsRaGE71jq09flLdkmFoQkREaCF96eXYpBaw3YHcAE6hJ3XLgrcNsGlJsRwcbnehpndHdBAr+vwEc9O/YRZh+hHoctf2B6YR5xNe7GgtY9OZLkDI/ARmIg7aM3cIGiZyINzV1vGJb/kYHA7uhuRN0Yn9VQcAanrY9v1LH153vBIKbo47v4MJB+4Kd7I3vyiNk78qQPUd/B93CO/XzmfodOczDQQYuR/usz3H3fplumub0Q4Wok2q1vO2/XCYaBGm3KYAWbPbtu9iV9256e7pneizfaf3c3tIqKE9eLXroO5uY6BpZy8evCzYpgj68EoHdBlKmG0e+NBw+R7IYcJw/vZXuAOA+/V7AEDMIG7ez7ByuGNvk7o7IZmYRRqhVHUEofdm+9DUua/Q3cwIHaT9ZNgQRwsBFC3sLJTcXd3gAFOKvDVh2O39cc/kBHSf5N2v9vAsTgCbaBrdMkxa9/qqB/BvN3F3djCx89aN7h1yRCa/Hucfu2FYtFsfEqEre8yMlKhWuLO0UYuOcDPvvjuSsDYwc3vomJxtDjAZnKnR79t94ilJQsAG77RJspBv3dUab9hgABOsPX0DXSTUzCtkAdd6bqGIk0KTbO6hITJ5hTUrXotfa6o+j0XhYzjiQx+jEKDku+Q1A7aNeUKbAnxnAnSJTTsx6E2e/+0c+4caAk+79cVuAW2cLOSpkqOnK4zXbPsCIfLQuHvQ36jlonDuZhyi0QmQnJpQFRCJ9BkeTBwwR8iDW8uxUmttiINdAKYENki3PLoD2kyYDHKqZs0CEdElPW4eLHA3VTONrDYikpKoakyjpg1yQh/Lrgd4WLqJIsSjAm7uoY55AD3zZBpXAKDubRRmqrU0uqRZn7IgaqQgipQ1XqtWKJKniHFS1bUUhBuHGuErLAKcMpymfDADEZ+O16paq2mpasWqhrH8UnKs3sipUUo5n89V2/BM09LzyhBABsw5h4KSUsueN+gK0YbIOjASQjIski5FaAgREbVciADcqWlo2mzkoBJ4ts0VbRwz6rMn/DZAsycJR7qqqlaISCQya6BvhY0w5u5mNYDtCGjeM9OoI3hQeF5DWSRYDxLFlls1fNlo8RdKJJAm7k/XR26x3XQpq6qSSJqmwPCmcLPD4fTwsSdiM7upV7XWda1ElPO8LMvlvILog6fPx9xi5pRZpNKFjhPVpV7KzftPXwSFJjLQnk6n03w4Ho9TynlKV1dXDx8+vLq6enTA4XDIeQIbEVJKwZV9cXOrWohIiNa1LEshkuvrh5zTjDkl5sTTYRbJIfwOV8ekANGyLFCb85RTtrXcXNbj8ViXtZalJoHZsiwidoNnkGm5vPjL7/75wyePf/0Lv4k0v/+THxKt4IKb90p5/uYbR9fHWWyaOR9kzqKsgJmrrV6WlZDQHJx3iJo1otuJmGHmDlOr5srMJKlUrWagxGQwlGJrcRhN0yxCDniL95DqLX+jO9Va63peltLV6IAI0UN0PCbsxPGm6Hfh2FjcqszbjhvMvWqupbTA6aoOS0yJmYIQ5mZtFZh5vVQWEeHsRM4ynabD4TDPRzMTIplPAOY8AShlISKjtdZKVCOlWUxKYbg5M7NrsohZFCFikrUuZpaZmGm9FCJnTlZUbdHqlMLDyZEABoBjuaylrIutpmFqQGJOtTQITUQgd3ZKNCVeTePVdzqbRwwhEdViPekUtewlQQMm4u58R6OfWcj6sNfQSDTlxpRSgrtShEM1kJbcPWRa37ki7JD2A9fmT60557rK0EsiR5QTD3N1T0wR+JaD0RTSP57lQ7sbex1z12vh1BwubZIMSmR/x7G7xlXDko0OFuLKnPKmojFLp0ihUdf6Dbh5XlQN2PyTxJwYQpHe0AKYhIPdwhZvpakLEagIb8oH0QjHangmZGCq7jBUR2VKbQpw7wnZK8Fm8JE1tN3E2+oIALh3ld1bWaN7urqJX+J4+aL7SnMs7dA2Xrr+DkD9Gxx89877YL97Z0az9pgwjuZHGjCyK4XDrd0NAdj3eUyV4TmE6TbH2uVoHKeXtX9YyzGx3eveEWu7OxgJcHB/NA1nkXvESJPHlAOFHgci7+7jYd0I5Smc/LTHnwFIRjuln48EfaE/jHYNFDRyKe0WUQNdHWBsrXTPNN17QeyQ1X61xm9F0rb6iCAcDFiHkzlF+IswAIXDlLqEMLNG1QEFvREOt8124I0CKu5UTduVQozQFwnO7rWvh97Il7LCYuPH7gTQRm/z/ZnxspvHtWPIpunWai0ZgeznjEgGbbmsxrLt7WuAmtsGOtJvNA0bwQohwUCG0pY6AEqiqhTLggMq9l4a05Ujy4uHs4IGfTQa02PZu0OLOqRtL74TMmH+aclpTdXdzS3IqfCgi3gkbqQNDXLEy7l7KKJMBI+0IQRnZidOaGYm8hGdtgnD3Xpv2WsQm0af2A0Z4rVS6Z6ciTcdMmSzlcD5VbcI8f6rxhBuFjSMLM0RTNj9kEEibR5Wj7UAItpN1p3xhmBKDgtc5XAncwf7mMM7X2iYGclBDPPNbhU8mvZjom5gAhmhR62AQVS1BpOBdtYQN3i3/+5tkPFny9natsaYZpFPppERiO7Jmo1FjzAL0Gb0CQWn2UTUUik1pdThTeQjIhCbGlMiipA8D4OBG2ldx7gGdEKQ55YlHplEuhJDiQW6NMOykkPgTS27ubkBwBARmdKMCa5qZg9y7mUemjaj6qXWy+UyeF/dqu7qXtdVt3AUCnyVUjocDqG3pTQBMGs2b2kpDWFeQ9MKXfxyWbmx74QZzl0ntjpksZq5dQhO3ECY94xh5sSkWkJ9XJcFMOTJRMzq6XQCYEbM7AQiN0vO5D2Rofd8lbyRAFtGnHBeCxFBtICZHWpmUDcLwl51s8jSYWbMMk0Tc3J4qefGpuAkQmaeJj4cTsuyTMdJIKo6TSnLxMyXy2V5cX78+HFKEWaG5VIil8blsgCotYRXytFCFp8/LUG9a7bASD8rcnO7hsKaJXLdN2bL1YTHjx8/fPhwnufTYbq+vp6m5GYfeuMN9RLcw+q6VhJJeb6CTPPhitwuy4vTw2xVG3tfhN2XUtZ1XcpFCKT1xbOnDCSG1lLXoqloqZfzTUpTKfWTn/nsex+8eO+99776J3xz8/Tv/N3fv3369m/8+mfAL97/yV+433z8E289ffb+B0/fyfmApmk4AGr5ApgpJWJdi2r4yWfmoHEqAGKJEHyzqtqSME3HA4RrqZd1ESJ3qbUKqjsRS8ozgd2dSRRuWk+nQ6yjWuuyLOtawwKSEglnB2rR6Mucc85ZRzKrrs00udFEh40lE7NrvpqDCxrTWEuFqbNd9Naquztb2y9FhCndrEYycZrUzIlTPsjhiubjaZ6vDlfMXEop6+V8PrtLyjnhhriALEsCrKyr2crMpVl4EOFjMJAwmLXWWmuaJ4KoriIyp1SsJbkR8XBhzfMxH6IIynO7vaz1bF48HUIVk5SOko7TzDmCUYu7O9QoWK0EoojlCfMQEU6nU1DlIyTEW4aYiWghImIHWhRfKSWwHADmRKSxvhogREsAo+pwAVoyT+qZY8zCrrTlfmgfrAcDuNcaKatdq5tT+B5VvZQlpYmokLCbDldw266YE7GkFvVt6mY1Z+5N5QEIhyaWOpNkpACxnhV/006YiChMCeNku9hMuniPm3qXXe4OphaFxZtakHK3JTdNW3uwk7mah24pQpQFsdUpkQCOZkNng5IzkzR7efOThGYPTiGoEXngiYS4kf2cKDTc2BEV5vCMOWhCA0UY8YZ+7mQ46PrZy4ePJDT3mbN3XG1096tXH69MNMcN3bx88ldFiO6x/e+4Xi97CPdt6yfvY8Joob70IqGE0e6isDeUjtEiIU3oWqQcanh7u/FbokQIVufOVLEbj02yDXJdPHE3P5s5aNOwm4kciNjRoAo7drpXaVZzsr23pr9MR5bNVxmeEoLDbVD4WuhgpI3Yid9+o93IutNQYnYYYKfVGHMz4VkUBWhxrc45wYPR1J/bhq4pc9vdwgbXPfzc7TWuZmacc/zJZq4aDZZdZ3qPqnKCuU/TwaIshHuYLdFibqUPUaO6BQSi0bywgvc4vf32RAORDvR1d/9y952Ry8dNVDUN4z5nMHut7i6S7y2i8ZmECeARCNdxWuBhJmY3D+cnACdXB5N0LGdmiDRuQDhDqcu31jZ4oLLRco3wHxGMmxAax9YswkaIqE3IMIebD3tBt+yb9epQzSohFJjGnIac8oiR6/53773AYXETarmZevh/n5sEAhMx2KFhmtjQ787VEXN+T+O0bsLo83zII0YXMnfZBANkjkF5KYOXv042/tVHsBJ2cdi9YYSO98Jn4+6dnYDW1Rb5zRAolKkJxtgRI1Aj+oeJzMHBIekguoEQaoQns1iz5GoOLQCISCiF4sg8R47zSJ9gZH3jRCeExgKOWYHYpKPZbaY1jqpHPJs0U5kjIqCZI/VazMKoQlNrtVBNybHznwOxvgTW3qgxks0TPJmyVgu1UiSJNByi6gC5ScQGhHoJaomnIgtCkhzag/TKV8uyaM+k5O6TNPZCSgks66JuRuIsxBA4B5AWyUxJVSvd5jxHUnJvrG45MT98+LC2SKoQjE2clGVh5mW5XC4XIg8+JMievyCmFt88TYfD4eDul8uFPAzqrTQZM5VSOW22+VgVqo3JQ4KUJNrGTTCNgAHqoaTbdjXNx8vl4tDDYQLgVWtdRWRZFnSLoHVEG2MRf4UKN5JVLMtiFiXLeCSEVK1GxsLSh4ODhswpLFgAZZqpzdBICxGIEYolprkBl/Wmqk0pm1d3k8wpUSm1lCWJmColm1JW1XQ6PLg+ieSosBdHRLLVWktZ1jBDqi7LUkpRd11rYAAK7UxadorDNB0Oh6fP64/feyEEEck5nw7T4XCYc5qnnHN+4/HDRw8eqpZlWR5eX18/PKTjAzVd14thBozEi5XlvEwTi4SLRrWsgKeJJ2EiEWJ1ULRwXQEcDofD8epnP/mxc3788Pji52//yb9/56c/+v6nP/O5dGWQW7Nnv/aRh1nw1pvXT964+tk755yunr54ev3wQUqplgLw5bw+ur56el4Ttawq63phZjOUUsJDzuylLDGIy7LM8+yGw+F0doOrJL6sy2Gab5/fPnr42J1ub2+j/sHNi9tSl5zl8MbjyNKktWtjzgRZlyrhWCHSdhgRm9YoWHc4HBo8K4WI5sMhehjA+XwGeooaYoVyyuRWayVgmqZpmp49ezZP07qUVTXlWdVL0Xk+ztNVZG9649Ebn/z0Zz75yU+99aEPPXr0OAwcdS03Nzcvnj57++23v/ud7/zgBz+4Os4PHzwSxgcf/HzO04sXt+6ekzC3+jJFFSCRhMjEQy5CSy2hc5hVMuSco/hHrbas58PpOpz65/NqS43SPeCD0QQgpbSUhVhub28fv/mYGJlz1Uskep0Oc7jpqBmYIrMlM7ODQtU4Ho83N+ema0Ej/UwpxToGq1GAyGoIDQDrWkLoTdNRRFr5Vvd5nq+vH9zeLGZ2OBwAW5bleDyu5QJgmrK7JUreM5SG0IgaicvSwqcvl0tOKUYTLAB345St67osyzwfARyPx9vnz1JKqpe2xOaZsfadEqVWadalpq8U15Z3RxBRlOfbxbtNocnwLb6l7VktDoWZmYtW2vkPh4ZnZkG5qbWCWx55M5OuXmjTGl2CMGq11mrQFGVb3V3AzJzTej7DaZoODImgl7JWTuSqrsTSiKYggog5X5ZVJB+m4+VyIbJ1vUz54NDD4eQwQnqxPrueHjhKRQ2CfXUVEUa6lIXTFLlnDNwYXs2MDYzomZ0mQURMw1o8NJNNpX5JaXmlutNODsbujn4Zw9dVZ+f+tw+f0i9/jOt9nIARkbWw+btfArvYHtts09QVJ+y+bb/i3V2aNXrz68aPdo/hrT20+87UwqbG/YY9yYe/5L0EAMTqi8/D8KG1CYd9H0XmE+xfhnnATtlRprdIne5zG+c3pDfOd+wYqhsGKzImho2f0M6o38hyPe9gV8GZRCC5uYtDQ6TuMQdASQKdOhNAA5XFsiIiSs0/SQZmDrdnSCTu4ISI2Ch45hswIyJHbUmV7gxQ7BelluYGYWnmBIp0o661MjMlrpday8CENNSYYTYa//ou6UUXERhyZhjm9h7CIW0a4qqVU4uAoJYLulcW6jNxIzSaV6/cy2+gcz2857lpPeOe+3AE493MGHAmmafmGk8SKGqbV/FEaxTnYaEAYAQWcbOw14eS17udSylhfARRlM4KjLqfJNRpsdzig5iZrbrWKkySUpCSCNQIw0SBUYRTDCuEQ6zByQlQN2rWHG/DEUa1QOMguieCXk2Jr1aYOQC2IRy8EnUFd8F4HZz5uH/g3UFDHQf3ruwr6zUGr9fLvXaHZljyYKezuyqGDzC6JGEzeTTx1dnt7XXCPsWpZeQ0d5B02ry6Q7tpOUhOplq1wjxKowNRyotcmB1EAqIop8eSUMNP4EJgEndVs1prSolEoN5UOhARhRWGHDnnFKVrgmqkvapjT64ZkwVE0La+gvQkxCIiFLmUagN8rZ6W56i7Tq3qCcxDK0s5H2KxMWV0a41ZSywchu1mzQCbqXB3DBI5UOP9w6DkcI/I454atekWkcSFtEXUBCFKLXibLbo7Pt8PNg2CHNDsU9yLBFpkZhdZU8o5m11FsQdVXcvFe9q9WlrNDFWtVUspiSWMT0RND/Oeg1hyeLok+jSmr+T4UPZGOACllIj6A4fVvwmaqisLJpmEgg+qUUutq1whTgWYVLVnPqDhDo3PImK2MA+zfdhEw4mfmvEPJtq+GuKVgMiwZJ2opj0tskXEHdSVocg517qaGcyY0lrU3aej8DpFMBJc28Ycfkw3IZBwlinUA3dXPXpqFoGoY+7u7DCz29vbGqlOa4Mvt5cl3Z5ZJmzlczylNKeUUpqnpGsRocM8h8Hi4cOHb7311tWjBzlLTsRiOdEhJyYyZY7c9sF/nbTqAnNTXUsVya4OTkkmmoU5RYQ6AGGwqWu9XNaf/OAvLy+e/sYXPqXvvf34jeN6PrvrGx96eHt+DuLz5WZVqrWWoqqemInEG2YYW/sWpB4RdOhmSOrh7FZLTP4X59u8IieepsmvKKVUq10ul9vb25xz1WXVNef8/PlzVV2XSkQ5tflZSqSqa6Uogu57OByOx+PPn30gMoXPsNZaawnzj4ble5fOK1pVq1r1xmwBuXmtdXHM8/z8+fOUMsv04sXtNB+vrh9dLpczpU985BNf+u3f/b3f+70P/8ankSeYgRPMwJyBo+pb5p9alr/ze++9//77X/nDf/njH/9wPV/y4cHNzfMHDx+z4OfvvZ+zSJ4OzLnoomphK2dnElVy90he4+7ackTh+fNnzHK6fpjStBZbalXnw3wyp3JeatXplKrp+XJzfPD49nwudalazGpwBEYFqjZGNhQUIeJSittgAZhZBbhF9nYXBBGar76nKdurkpG6KxEnYmYUZ1VzAzsSsxMJ+ZTSnDPDyFwAVZtyi7ds+581CR4UX+reNnOAmCXBOVh5RCQSdFZ3V/NqVlsVLBqgUcBDpITpOQKQtpMGiBNoSxsD3PmJ7/S2O1qOmarm3PJVdPtlEwXtYGLiyA/ZPErhaWROLIA1LblXPUlpkikhJVJ1Uzcn8pQ4KNNumucjZMpTKstKkJQSUoRLVTcl4LzidHqDic63Z6t09fDRPMPrQul4OT+VnHLKhOlGyySTgeHqVifJZk7MKSUm7pjDXu/H26sjr1aVXnW8fMEvAIcvaTyvNpz/9azpeyJWt+Vv7Cx51ZU7wqr3821UrX97p8coJlc7Pwz2v6jBMRtGDcaw4mt3U/DdPtjAWDfwEyiCuDzIXdy2ubvvsvPU0f7t4ooOPO5qoj5A8L1jONNszPxAYo7dEHrvu+GgHzdon5mDXLS1tRcM7LfdRQHsYhfHmVbbIHz+Y6AYkRjGrAVNGYJ20wQLJ9mjsmam2/VAQ5IhOeO8MAFRSYYdsChdExkpdjXgOy4fzxqwc8Aw3OGxN4A0GiO7coX77hoyKtq/Pxmd1TKLmrdc9NTsOgQSahGDYcu2Dn3H7jCcrk2zzu3+kS0EfVC1FGdv+2kL2IthZCJyJt+NctcDGl2AmjyMiESOvgsrxYhuj2hAD1jG3fnjzjl3XqIwTNru4CISEyLY2JvLzt0IHBUAYwwJ4e0i4gCH8QIGtCxGY+ps3X6f59nnFiisPZGIKzJnhOfcW/6u7fKgArQZfd885M0dcmeg3Xt01kvHa3BigP8ebto6efuFYVSr38QRARGa2RRj6ia39pWixY+Dmuu2AZwIPHEEMXqX+YllGAXCgddeUNtSYk6I3VYyhVByY0osiGJMIDKUIQ3cXcJYHLWXeMsfMx5KIxTNDA4RQU/OwMzuLfOwmxLFLh/FF6hXD219jg4w3N3Mk1v8wZ3m1GJamDnYyyATYXcOj6iR2a6yXzF1D+dVK7aoqq5GRNKwJZyYnc1Qq9aqxCncWwTCVniKo270XsMYHxxIaWrYyBHKBLWowmmaWn3wqBY91eYYHIAwJJ6qllKDYrssy7qul8slLO5EZGbTNM3HQxBoqXPWZ0tEEYsY5bYTkZhZKSXWbzPXuZIHRayhVpirangDg7zq7qpsZiGUQ+51h3DLGajqgI7rxyQYQrbpbTAARhYcsEj0GtLQzVzhbvEWEsED4ESILDjaiqRRUY3oebeyqpJHnpgWnGZWOu/GQZYyu5N3tBkELRGieYplME06uIvu/uTJk6hBomraKojEfGpZvGqttepSyi0KM2fhZVkYNs9zYqm1pvefff+n7xrzaZ6PpzxlkoRJeMoixKf5dJjm42k+TlPKyElSznkiJ0iaXZyqVvBayuVSit0eDqdqaqtSKnDOQvD64tmz//b/8X/5T/7Tf5Qz0tWkWt977+nVw6ufvPPOfDyQMElL88Mp11ovlwtzo78ThTmNh+PXmz2MMVI2d7fP1YNrUIWVJBtoVNXz+VxKSSmB1F11Le+8Y1HBhpkP03GaDmYWDNWuCTT8GX/nnFuC367f+1BSdjbacai3/A1G6q6R5nL1JaUcNiBOpiQGycfrq4dvffqTv/0f/w/+6Rd+7++BCMRwg4T4QzOdhqTNp4fXjx5+9Nd/4zMf/f/+q3/9b/71/4dFrh+/qcvt2+/87DQfQD0dK4kt66pGDKFMCE9h8/zDLFSEaZpKKRAG082ynI4PUz7e3p6dj3WtSGTVKOXHb7z59OZ8e7mk4G8ibGAtr3dstE0hKC0ymdmaBbpHBfRVpnZH6wIQ2QOaHtAtzVvtr27SDk6vuqvpVkeruxAb2UF6Kjx3H5ZjInKLlU5GSBQ555oMIaJeFYiDDL9PUjce1BesmdkAhNTTCKkZwSN97piWI5fdyKY49uZobQ/A5qFg9fdtFpBNkbiruUb3YVNNxnEnOCRNk6sa3IqiqJkFi2Z5fjvPRzkcsfq6FF0NKDe353feeS+yHB+Px5yFmSRliCQ9oU4QOqSZJvh5AZwkwWyimSm56tX0sJoxkmkBKalBWOvCU5JA9WiY0KiFoBE6ZfJlnPaKg1/z+XXX/JXn2SN0H8A2NH99YtVrSKH95Kvjdpi2nBoD+HnHePET3hWStu5/2Ctv+zt3j9meRgY0lAHttk3f/3hPV6O9J6GnE+x+Cbqv7PRn9d/6nVfevmiAcADJ8fxRwOAeMhzKSecrOZmHkNlLE3Tzyvb4xhEDABl32VTh0MQ6bbJhisa79b7QQDS4iNz2oD2FD8Mp4Z1MZ+H/aQ8OXhnFd97hh0c1Bvgw1wZmgLC3VC4cSfgornEOv1ZjKLWypbSFMO2UV96xnAYgDOkkMsKkaf/VXi51xS/kYJdIG2+zPwji3NAXOq6odWURjsTyPT1pqODogzgkWDA7+8i0PBPk5Bx2B2+mrp3X3VlitEI13aQlgJ6wZ4QzwQfncPy7GeO6CN0irt0jFNQBcGRCYvJqtdY8T8MkV02ZOfBn4L2+wgLjMYgcRE7OCI6Ydxb3cBKCtqXUJ/Fdy1eDVIxWe4a6KIjPbQcfZSr6LBoFU3D35hiZnEd/hpcBv9JB8bJRREEdoMGlJAPYyLhXq+9NRYN/7aQZNn5pBF616vYNdEWOJ3IP/OwKEjjQiJjEo9yIaCkijUJcw4lNwgkKCLGkIJZXi/Q3g0brHrUOqHO2O07zSFUafG+PwkpNRm5qgKm5t8jEPnt7zogeoU0tQ6Gbt6oQcR9q7Ni4DCls4p3kmZhZ1UrRaWq5K4ljhWuLQ4Y2ZTSJM7W4o7AAUYQwR7NgCoUTSwAltaqRF0W1lIJeEcgNTsKuHohy2M7bRDR3N6fwwO97AYg0EfBOXySKFCw2VlTKnFIkJu04p1yIqJRT9Ei1Vuzr+bObaH5HO81yP8+563O5A0Iys9PpRE3viTyrElaQ4hQ8PS2VmXPKmKaRYjQGa117csiUmCWyjAbOZLZw3piZd9VzbDwtHgm98+AVJk4gV1WmBoM15i9FNagEsyCWtEmmrqGtEihF9RIF3MxKsdyzrfag49SCQO7YfloGICK3dY3rONj1vJVV4ZzmefZuJmiAkCISrEVn1VqhBlgphVMmIsqzOVbTF88v/uwsacrT7TwxkYFqFDAQSZklSzoc5+M8zQeZEuUseZInjz90PK4ppSzpcEhOU5qT5FzABjKHrtbYYVYv5/V2vf2Df/+H/+Q/+XtIR6nLJz/9uXk+1upHTlkmyRkSYR1MRMUrc6IorYKm9IedVySPvHN96oYRpJZSTlfz6XQir24VCPds2yDNrJTFXGtdifzFzQcB7JnT6bCeTkUkaXURmaY0z1N/RHMrSZawbkSu12maAocnyXvMsG1CUwbIalVouFlgFeY3N5frB4+WpVwu5er6MTjNpwd/9+/+/d/6p//Fmx/7OAjr+dbB89UJ4GVZAmC4mmuJFNuN0fbwQ//0n/3zj3/qM3/47//ND777F1qc81GmZKWEE6xUU1V25zSllEpdyZo3FUJeIx7Y3H06HubDsag75Ld+5+++8davvf2z97/2p//+g5vb0/VVnuXZ7fnZ+cbIOEV0iLeyHDSc+WHIZgDcU0ibmWppu0nnOI1ezTkvy2WI1L3y12SvaiD2fpC7M1NkXwjRvK7rmAmBA4EWwxy2NjMT5ixpALxorTNDw6ZvoboYV3cxr41DyFuKhSH9O1S7A132mvGWSXBL0BcaAvEdpfne9nwH7DFzf5d+t10Rw0hy4GjZ7cJo6HDJAsCtaml8sOAtw0GcuL+BiKQ8I2fcwJToosR5ytOL5+dvfOMbf/LHX/nWt79zPB4ftePB48cP33jz8YMHDx69+akshdmuTzMdUrm9TDlhPsCUDyeUi6uRnBL0fHs+nh5CnzIzCDkiFW0gmti5bAss2YGE3fFXUKpec7x0zWvB2I412gOC0CNz8HpL+euPl38Rj3i5IDWAnUYSf+JOrhQ0718ogi1uojX2jnusedR3H8aVjEY2A5ra7gCiAA9RLNgNk+xCH7cW7l9m/ycR+SvcqrZ76/2Z++8+Wk/3VsEOGao3c4G7dy8BGfl+2sS7enTJluO0AYC+YH0HeJhGcKztU8xsNvEBG/ZvHm8SD3L35qUkcoLk5C2tRW+WhAHPvMMVbQRRZwcJtzDnhiWbKGjg24k2MdJHUlqNVfRUwDsYQOj8gj3aGcf+22H8Gi977333N3f2UMDj9amD23Ed0KfV7inxoeUjDIdMaVvAtj9G1J93+D3mCoGAlPM9w4C7U59J3jSu1jdEASBpAKxNtG5VKAgAp72DfSyl1sMeDHxvOnbPMxm7DLoW4uTQiIdkdm71ZreEzPFvy4/JPf6ZPZ54Z9LSq8Xabo1wJD3y/gKt2Txu1Eetv+OWtWt3GvBWI/QOBO1u11/h6AYj2t3GCA5EgY0gxFgzbGw+uPjRjv/SXrjHxg+rEJzDTtMKf1Jz8Y1KHMKMYbdpxh0D4EzkYZ8AMDx+gMG4W5PgL7v+WMhh3soQtOsi0wFMGiEigNEmQ+KBvO3IvL8tEPzLrYSP1gpAiFOoE9J8VE37HxArHHsR1hEYJtLmUc9LQ1zNzJmyi8ckIwKTK5G31K2NVBeECA+Ph3GCiCRpLqaYtG5hw4rVrmbmtJm9qdvC1XA+n0P4Dl4BPCrP3l+lY1R8mNb7EwGwQCQ4AxK0AXWIyJMnxczWpRYdAXu6rqsRSilWqjfYWQIL7UR2K7wRDhDtzNvQcqY8ESAipTQ1zqxe1mXoVRGKiaCiOBPCi5zNSszGzukYHHS4u6l1i58qICB3ULDWLUJAqRFFEAXWIkjRI1kOjNwxzTMRtZCzeYaN8tYxFannaKJAL95jskfsk4iUJqmjCi71GC1alhqxZwm0N/g5tegC1VRrrrWlz4qu6EUXlHOaI22ssbstqxGqe+2WjMKUzIwBacV+LGVMU7o+vj3Px2BUXh1P0zQFe3Ce58PhME3hzEmSyODqdpqu//Qr3/jkr//ax3/ns3j6c6t+df1oOpyKOrOHTdOSSOaUOCVR58G4iQFpIfUQh4UZCdiycaTEL26elTq5rVlAsDlPTx49eP785nScE8uUctV1XRfXWq3UWqOAk9m6XsrlsghnM8t5nue5zoc8Scw02tWZGCTJEYO69+qMpeHuOU1mVqtaXQjGDKEEASpailygGl0/ePDZz/3WP/jH//Hp47/hpRSt0/EhuOmDaZ6bkiXMMveU/gSWUpY8T5/67b//8NGb//Zf/7+/++dfOx6Pzz94X4hsVXevbs1ErVWJSlkAFk8iQpyCCenut8uSklASJro+PfkH//gff/izXzq/8/NPfPLjf/AHf/Cd731H1/LWr/3ae0+f/uinP56m6cX5dqnTUQ9qxT0lSpkzEV3q0kEXR2RyJNEF0OcSUU+7QEQ5p0Hp9G7I0F3a97YMu6MshiDEQtDam8BVpeaj0zEKakU0Ukr2kM5OxRzXBKffDFFRiilbp9Pv4b0INQvd8A32qtCbCjj0Le5pn3ubudUNv1/0jHp61QbRu64TfSgikTSrXbwXuczd2N71SImM561E1ZDJ7h5MkIh+oUh3VOCqvHrOJzjXYu+99/73/vKH3/n297/6p9/82te+kfOcchYRhwKWsxxPh+Px+OTRx9948vDqav7Exz/04Q89eng9v/WhJ4cl8XEOvYcP11ifwym7Yzm/ePcn0+GQppkfPAQcpgRWMiaxXmIBI/EIcJdO+dc4Xg8F/6ort6LSm7L41zj2P9zvlXvP3r0fhOjYAa2d1by7B7uesU22SBbgXTF6GRbuj/ZV8FAb6cK52dyJo3Y1YjB2b+Gb3jnCGneIcZuV+6Nj2pf4ohgK392jmehfOm9dD4ywLKDTLNvWdu/uIS7uI9iuWe6fQ+GecXdiCzTZ/rvTpPbmwzJgHrbv2EDbmzhc0JLJoHvyvbkTMcqWtN28N4WJI/XG8BAiFOpoRZRJcG8MXWFY9w4F1dvd3c2bc6+rOpsuF3E6uItvRUS1l3fuu9WAiEMubWgqOpmsYS0iCHOoqNxzEW/d7wCmeR4ovN3KbJ+GbRudmIySgF2ll+Zv3IGc7tKhnizH907Rfv3m5YsY0g1hbHyijtwIgI86imOLAUIO+1iKY9LHDBiigSLhc44K2rSfuaEXNisHw1kRqZpazNcdDge23nOju+fbsVGQEJNsA+3Y4Fnr8t1PXxZubf7s5VvPd/qK43UCsJ/fYpvRSKQaMDj8hIAZIYbTG+j3ZqxpmI1sE802MusQkRPcjT31qqrEkUwNuw3UYwR5zHMI5yiDNF5I1VrtEzAlQzUzyVOsU2aLVPJx43A3w1u2F5Km3dMeCG5TKEBdAsAl6JNDD2le0J42qIGzNsptXYAaU8lSretepejmZwBO7FFjArAgJYpwrRZqg5nZyCfu7K7eqiEmIx2bvym1dAJOIEkiClf1pju31dRwoPko+xTQtA2Q9eUxJl9zlJoWa/l8QqxY40Pa4XDo+slmSmfmnFM0XjXSxJiqkqS4npmzNfyAKFYmU8ukAjMzXZujY1nOZi094NZ17IZTtCSqTj531FLCgZNzPhwOKSXhjNSGMOdIUcN9SFoHdKmKFibUHL4Re1YRu+hQsLolbBtwdzAioomibGjMH6NGlbZIS6jrZSGi4/Eoid2hWqO2ahCp3clNQ9NVNSJkBnEiBsjANE9zaMwNqGpkBwhyCJhj52jUCHcnKvGtECUWazm0KKVkdmqQuzSRDWHceinLaou7OKdQgQEq6msN84k61L3mSeYZ733wHhF1Fl+aJIVd8HQ6HY/z6epwnA/H4+Hhw4enq4OIPP/hzx89evCv/9Uf/BPXX//iF/hmPR4eJZmds0WwFTMzzGqxhZTUNdLhEoSodSxRhDwZgU2DWx+VYoWILpeLWzFbCxNcrZZJ0pyn6+sHTGlZFvOqWpflXHV9+uxZhBeez+dWL1BLKVqKns/n5yAimuZ0OBxyFgBpzpE8RkRiKjLz8XhUb2lFoyeHt1BLVbVSilUVAk8SlPeHD6/OSzldPUzGL87LZz/663//H/1Hp49/yoqy5GnKAMzx4lIAHI65GhK13BHEhEa2p3S8fvf99x5eHd745Kf/80eP/p+1fPsbfwoWrVVVvVYARlDVZXF350ndBSDmCd6MeBG/sdaS1lWmKzkcTtePIfPxIx//ex/98G987u/83/7v/9c/+qP/8OavfeTX6/rVb35NocG0B8zdrNQqpNWZKUpEctQtbCCHa8+h5T27d5ctW/2xvvPp6DraHcxMkMg5HAG05/OFeWJKAc7XdW3u+l75I1jTFAZcbhrDQJubSkRwEhqVe3aUmyaoXd1159XcdBpmjtScsvfdMbOQqr681e7ffahfw9C+B4TAuGHao7tY3QYP4ldjoIWiycwiUQi7wd/ek6aa0uTm2qN6RCRsVfVi77/3/ve//8OvfPWbf/xHf/rDH/xsuVQzZlrnmdJEEcWwiteynm/r07e//pMkj59cffjx7//4/LNvfPAz4pITfewTH330xpPPf+l3cL5dbsr8a59Ic/6LP/njt7//vQ9/9CNq+OLv/z4iqwQlgdlLuMVfgS1+JdLmqy5+GQ1ursI7p/do8G/v2PTa3Z1HzUCMb9GNqru30AbY7gQZ3vMBYhBt97TSu/ff/zlgIXXNpl9G4nc9e32Fbu6goZUC3SrXp+7u2E/6e28at5PYcl8ere1VXnG7rsV21GB7hb632zcUvXtiaMC7pDLYrcReruVuWb+eceROS+JP7pDCtlUcLqYtsLCfJIrkkhTssq0aW4gpx8Ae/SftFmGRCmeZc4OvcAEMfVHvmzckzzg5shN7S4Q+2KGNmo6dxfnOu+/uuRfUMQahXAPEzdlM6Am0uv8W3vJIQ0QgEUK5FYhHsEYBOMi8+XlpY6VG79ZS9v4XCk0qcO82JQn7Scg0Gr11aQ/bDgQYzokN+Xe0Oa7nzm5t1DkiJ3byNj9acg9iSkgyMqDSIIu28SVqsBCdYxxw5p4RI57OvSH3sGK70mG+XU/Uqceb1zFmzm4h7gIMeJzpwLWbN+Lza+xl9JrYwl3DnCBhmQ43UBfhskOJcCRqeTk7wO/dQm3RjETKLWC0XURRrNYAIxjB1V0apoWZtv7cZmmv4BL4yUlVqyk7UhIQs2e4UnShOpG417GrMnMUjGzQPBAA0SB2uHsbZiKCqDbaUlcnGkoa7fHdFk9EaRIzgzV8ZGaqDrNU6kLNYhFioUuiFuNKkcPFTZklJa41CidQAEIicpKhVZmDQnJ1H7eCO+2b3ak6zFqGlfDPE5lDHGpuaj7Un5AL1IkB3I6mVzQ6JZPtkjGYNb4FMy/L0qxN5ERkNrbA8K05EUmihMnMIlI5tOq1FDMLX9m6rqfrQ3j/2iOm4i0A7Nqbwril3zSzy5ric2D9uqzL+VLqcjqdmPl8Pk/TxD0lNJn2SutOEPTYUWYhal0Uah52oLeJAiEiCrsHEUGH0zJGsEklNybwPsLBEZGAHmihvU6pzmxm6o5uQ4oIfyIJUoBp/FQIUDetyuxzVtXSc4K1IkhE5E5Wa7VVVUMfJCJVzaJtIGJvknhZOl8uIpKSMGfJG0Pv0RsPSinVVpBVr6VWdXPKl6VwraWamS3lcllu6GKT64mTViMygQOas6VkAH78zs9TCyXAPKXr6+vr69M8z2+ePvreu6T+9C/+4lv/1f/if/rF3/kHH3xwS3xgycW7ich9LZdFJpqoxWD0Iq9DKQlXUHi5zSzc4+4USXdTmuu6mlW3ej7rzXTz5pO3DoeDcE6Zc84iXPVSa3346JG7395enj17UdeaU+S0rMxcaw17xLKEDcLM7HB1DCZzzIoAhy1az7dtz3qQ4brW8Ht51ICqbl5V9XSdAOI0idGjJw9/60u/+5Ev/mZ4AEFYV1/KejjNp2O2kLjd8tVUwgibd5wVj994UwBbz3L14Ld/7+/fPnv64+9/W9ngjTYcnRShjMzcMYIyeiVNUM65Fi/VlOtSyrLW66pwB/kbn/rsf/nP/5cyT1WXy7Jwojwlg05zmqZpuN3MTCgNplB3+jUXdxmUIahDQZE/HGNR7Nca2kZ+J+dBKDfLsoSdO0akZ+EiABFXo6oxHKUu9xSdoSENWUc7ZmaLydAaUSXcmeSqyqQb2GPeR/PvLfT71s7z7GpDjOyimbYNZgcPt2xJ7u7dh7ltkzu1m6gFlIZ+Q8Ed7apfcDBHuQJqULMl7qpqIpLmA5gvtzfPnj37wXd/8sd//Cd/8sdffftn7z97sUT+YADzlOHZLZlBzZVRy0rsT2Z99uLmjUcf/f3f++Jyef/b3/7g+c3z40m+9Y1/97kvfB7LR370o589eeujOGfw9LWv/jta+XCcfvbOe1/8u78HU8gE4h1IMICtQ4dXG6sBbAlXfll3H/AqNPj64x6w2WlUv6LH8hXxgfSqN7vnMIzBkt0k4btQKm4yJl7saC119kuYcNzfXnO3gX8EtE9XuHvejpEYx1hor/xz107cfd97LWnHK/2YL2PCPfDb8NSrmtoAg6c98KPhbiLq3qPxa3e3XfaNu+/SSXkDdXiU1RzwXsjdyTbg0TGT9Yy1vANLIArFyENbM7PQxiLdBnZ+qrg4FKl9WbpocpjU0F0l3IXD+PnAe3c6thMiWiHvu5dZL5uBga8ovChsIGvundBOe8mMrVejixp3ptQdoyHstER5356dly96tcO53tDYNog6H46opYQBeqDpXtKij+u96gpErUQENVNjgxnU0ps4U8/eGFlMxg0b3vfeNupStBfAGFFz3j2B21NlE9Bga57oscL4btam7fMuueO9NRX7QpMS3cE4rGf8qhV3h0uyG9Zfyb726sNB94xZ40wfxZdlTkhva1lYERoNAW7gnR2Hx68IMgo1jYDcNobE/bkgIu2uVXevbgh7h7OZwogb75fHsg2DSPdgtyqn1hmU6BpRK19s7eZExCPhbWcPMbuZFVOoha6CZoip7qydcxFeOyFWVXP1Tu9yNzdLsfZCRQjUFjpN5OtnBgvMws/gKVI8JSGgaDUzblX+wqwPdyVzN2MIkTALuUaUTuiDtYbXw3LO7t2OQOEvdOsQeVvA/f1LKSKJe9loUzAzJXFBzrkBXAC9nsFQ+PY3YeZlPTOlUkr1ztRScBKRPM9z6uVvwt1HnIPE2NyPZtUU2opxhSsg8lwPNunj9PDm5mZd10gcb6XWtdEgI4HNuq69EheMUGvTCHOao44Cs0QaSSIj4hFlO9TQ7q/gnmOCmPlyc2vNiBjKZVMHA0sIR4SPqlmUVokuIsJhnpjZVINHt2rdm36pmdVyNSvqADi5MDsJxH0rgaAikU9eiCSz1FpVG+ZMCSnnCI2zctuLpYYUFiMjF7i2LNpmYE6ZicQUVDwRccqcSaFSSgURTzLrwdjBzrSUy/l8A9JpmvT5M5TiTtWs1rpoETGralZZMIlIomq+rB88ff5MRL734t3r6+T0/PHj9Jd/+b/9vd/9fVUXzmu1VWstBwAk8AoiT0nAQFesvW2mHlpy05cNwdoNB93a1hGbGcHmec6SjsdjQHQtS11XdqRDS9P/6NGjZVmWpRDRNE2n41XOcySeUXWvRVUjUPZ8vjmfz1HPk4hSSkGLjdmYpjziNjGcS2a1et9cLM5pWda1rhVvvPkWwEX105/6xOe/+JuQhEvRGQSkRHmeDSiAakv0SIACqU3OZhwUoBjOy/rgOEOXT//279588O5Pf/jd+XhdCKt7beR1RB3FSkutRkaIMskttsZVrQfdKcDz8Yg8AQle64sXTz7x6//sv/yffP3rX/3hT77/4PGjy80Lh4KdBcyRGjTN+RCSYZomgN3XCOW9Jxa2P9lZUC91n0+lpYrdmcBHT0bfns/nw+FALZcde88aai1Hl5lZsyitNIprwbeFPBLPhIbU6HkNcBJMWcDSAKH32ObR7MjTs1fC3B2JhXe52oN6BOow08e11HMz7H6+9c+47eiBqMs6oHLkUiamRt0WRi+YFp2QqaXdVy8tb3NKFJxzJ5Axc12Wd95576tf+9q3vvWtb33jOz/8wY9vbpbD/CDJfNGLqeU0M09E2Y3NzI29Zzq0+lxQxMsh2+Xm9vqaCexYfuMTbz5+mF988JNv/fmf/dMPv/XNP/l3N7fr0/d/+vnf+N3HDx/94Ps/AjFU3Raa87IueTr0ufBKaPBLHkMr+hsevAdFuwi6VyClv9bxSkz4quvuwbBNy+2hcXeaJD1HC+38e3voxTu/652vnIaJ6bXvOGbj9ivfCrj9Mq+zb0lv/64Nu5Hz1/QPsbxydCOn58stfPVBhG0lbrJIRIiHr/UOKmtwJT53O3EsrDuouKep2yxZRDtqGZVagOZYQLh7QkaFJoEGUCKTu7sXra0IL4VV+A6RFb2JcX+OzMs7WUTdwhXmyLs/coSh6m5O46FObO/VUTT119ndyt1bedJXiHQgp9wCDs0s9uYdyhq3YuaWzjGoesItYtBbun/JGdR/u7mVGnlt/+hIRbPXY9E9cW1biafcZWTYLt8sUXN4EpFXox18ZWYikS6QqZVLtp5Z1BEVE4ngjbMT3qXoxGaHaK9w17R0B8AO+bPv0may8ZeMKWPPItoWToNLr1rN3WkRvx1Byy0i9xU/eL20Mg90a/2qkDz7Ahe0u6f4HVj4WvvU3YPddd85DpgVM4DDg0V9uxRCZWGAXL2U4uZEJCxmJsQiKVJphncOnfVN3QIbaX8jJ2xwDO5J4A0cRQXLUJmi5kLPR0eglBKCD+WVSFR1WG3DrL1nhqPPTHdP3HlNAW+iF808pdQof4BRYjIClWKU4ajuHp43aCFiYfLIaeteI00MKafJclqfnUUkUdN3YUZMLbqXCAQ1q70KVjolL0t/c/WeNodgDHJVI1prNfPw5pVSXHjVyGwjIEokAGoxq84sTIArOdgdCrWicEJFJvGkrgA4M8FTgntZliVS/E/TxMxuJmmpZTGlw+HEzHU1M2OWnA8A3EkrQvV3R5KJpDw4CU5Hka2AGCK8x6JMYrlcLoFXmXktGljxcvvCGnuqOTEi7c00TW3oU5rnudaaIxU+JdPglQmJMAWL3gJUhwmEiHlys+KoUTsrYlWZmGFTYjO4m5oSSZ4mALb4SNNsZszGBNeqtebIwK+LWqPzmulyPotIFjKrZlHN3HuBRMn5GD1QSjELny23tPQcdi+yambVQXVZiGjKBwbqugbIuc03of4KhdmCkrv75eGUSrlEF13P5FMGMgBLjwZVMoSmqqqJanNKLJeNpQZhWi8vlInonWc6TfbdH/0HwIoVUz2IlBc3CcJ2QkLig/C0rDd7g0WgwmiGwUVkOuRqhQSXdXmUHz969MiNbm4XLZIlHw6P3njy4DBNKcv58szqxVVL9XVJOUtmrjfPHl5f48Si+fmLF5fb2+Mbb5HQPCeHECVKQsJmdV2vSinr7Q1TcveYUerrui611rzOTmh85pbnzdZ1LVbYwY6kzoYEmn0+pcMH75/nD11dFqLjgw9/9jff+uJvKmW9nlU1i6jaBGagPH/64MEVNExp0HWVaWpSslYKAeLldJwcqDZlOfz2f/Sf/9HXvvfVL//7xw+uLlSmWc4vnk+SiFgk1aWKszkWW63RhElIcj6Uolrk+tEbx9OjS/FrZpjXKRW6NvcnH/v0P3zy4T/92l/cPv/vro9vJbv1hUUzKxtpzsnoUmxlIrgye07wnkEqCefkl8tluWTTh1fHw7taRfJyvrCwu4qkqO54c2NzOsBFazkejrV4LUjp4C5mOB5P1b3UZGalgNmI2MBgVve1VnVP06GY3lwu4Fx0QQZnmea5whetp3RUr2oWJQprrZP0wnyxXkFrqQlsVhNxluTq11fHd999n9wZfr55cTqyu0qCsx0wuzuKW9KuL3pKab2caaNmRYxMGLmIiN2tWE1Ek4gwq9acM5hUlanFSCNqSMwa6dM08mtz5pSZUnAuvSircQ9tBQB7DCjBEjvIDZFU1W9vLg8evsGQ73/vx1/506/92Ve/+a1vfeenP/2plkn1utbD7SV2qROAopWX8+GQzCq8ins5rzLPxPQBf+IwX+rh6uIXyZfL87cPTNBcbspcOS3ntw4123vnH/7Zj3/wM3/n6cOPPZjP5899/AS9Rbqi/KAicc4EJNiABw5RiIMZZRArAN0yKLQYmIbhd1qM3YWUYbxvHJnYfLdrnZ3GE+8cd//c/YRe6eC6T/jc3ce7Sdubp6hnfWv321PLgJCid27bHq6hCcFl7zXzXvcSXQXst02jc7DDV+x38OHWV5sns+vQjbp2twxV83eQujayKJq35OX0M3ePVyt8gnQnqKYpd8MH8tK46K5GZyOOxvsGC2/cZyQv3bys7t7CLzuOBQUJbXuIkKjXWiv34p/NDNSdb03X72CSKdPwOoahh2uojRuYFCYOoOkcwgVBsowVSqgwJglXQJjDhCHiZhNlOFpWRXcQqqvBJSdiZ54wJXmml3LDxSdJxCrM7r6qAtjkBjf/5D1b/8Bj3uj+5h6VUrkVFGlKf4uTghngnEgkUyfFEaVhF+j2rJFFbw6EDAKxmLu6ursIB3vWOXycRlSZuD2GnFg6/VKoKeIJwqPN7S16oXl3r1Wjt0WkAcvgQQkEPeP3NOm6hrlz84J2yz46VozZ5wBPEqNvzuZsZdQTrg6YaqRiYiERA8Nd4QwRkDixgw3JDcIHH4UHu3uQQO6V7iDksegI4E4pveM5FOBlo4iNFo8r279070w/++rzv+rBo+ZkZHKBErK7Eg/WKMz3VmAFpMt57zVyWqAdAAU7bazRnl2JG5kx8jQBoMxiza4Di6liUKIcHkj11QmSJXDEdDXDHaquZnE9EzvQo1fIQexiDCJk9rUQJ7hVqzBnTghPGKWo3sduzImYDOquaT5C1dREkrMX01YlwiOkjiWK5FnYDyznTDCFWtXAXyCmLIkg3bPU7OJmphpJRJp7arMbG626tp7t6YMaNQlCTO4kEuHHUdn5FVL4DumiexcbGVTEa6/EZUZk43ySluoAQK06jOWU7pdI3o19/0AYJf26ON/M57EQQ1Mc0mrzCfRsSGZG2EriREuYg8JPEWvEzOY+yFpmGuz06FUGicjhIOEdjWtSngHUWpdlWZal1jXSb67r2mCGGWiU7UrN10Q05UNKoYuX8DXH/iGcQxEMKDS0wPFGYb6nXY4H941DsK6r73JIoNGLtcVcdbuX70wXnR3WzzibrVGfY/RnSi3PdinnbToBg9oR6um+SfG1NNL/dsT1MbtG6ovRwnk+mEUxBRsN1h66EFhxfGsEFzX3sq5rWYgoss5EtVBv1cOnlNKUZ2Juka79QLOgt3aOCT9NU2TiuVwu58s5pZTSg9P11dXx+Ojh9WE+JKF1XRtpQUDuVd2sEFFZzgautU7TJJLO5xe3t7eHwymq2bIIqUetrchReeDUeIPua69K3wJNJYWtodbqgDDmKdml1lrWUrMhgUGcOSdJv/Vbv6WSmPPhjTd+50u/CS1OzpKziLnNwre3L46Hw4Prq5YHr6wAyTTB3YtRAqUED19RZNlFXTUfBCn/k3/8j3/wna9rOZ8OpyzwqgK36rc3F5A23MMyTZnCxuwopczzETJN07RelnVdoapqynmSRDAzzcfjP/r9f/gv/8W/+MkPv3fIjXybcybZ2EfD4NXz4jaaZWeQuvfUl/GvUlORiWJ1t4tHtp4+CaNIjDpT9wZHFfshTrGfKt7FQizk2MCiTE6zB10uiOpYjuCrx3pfF++0CB9vNHhWe0HXZ+MdWk5/1rZs4wS6zyH4/EYmLkOoxgraJXbfrUeegc4QhBD17MpldXcSJpJwmLOru5f1qYhwIhBHyjF1t+IPHj35/ne//+WvfONrf/bNv/zuj9999+cvnt+WosUv7rHOeqJ5CHMrDHVHFevm0jaI0lUlN9UyxtrdsZbL5VKrYePlbokiejSLwb1nk/EBn355Z9p/P8d+BPen+1j/Va0N1todfexX92feYVLe45ruj1fY3e2v1vnar5pT5e5320uOud2Vzt4vrzP2v/q8gIfbCduS+QVeypch93j4zt0RtmzosBq+thlbTvy40f2kemP1BSbcL3DcXf77M/fEwva+I7tbuw8xEycCtjqErSXx3IC9Nrw4lDh52GjMW5E75pwznFLiELPWV+vYhXPOr+zTtoRfanyttTGJRsf1w4NcF7HK7butTiO3AgBtgeuuRhoQeek5JNVoALoey8xjPpu3EpXR+ykl3GVhxErqxag56m2EKksi1IHpWGpNFu1Swe0Ht7279wqQPY0qGjpmyaIt8eQedbVjWASIU3cHxisLgYhbxiz3nYNw59O+A8rupCfdH79IULw8D3/x8UpogI06/jc6+orz7rH8xQ7A7eEvOQ9HxCN1AoEijFwts9Gdt/ZQf1t+qG1YiclVRzTpfoWSEtBqCboZGOKN2BwVMpkji6HRXaZASwc8ju6B33Y9DUfUXYNUDzJ033s1aICyxJzc1UzDeGsOtabcREY6dPAdP9aqFAmMW+FOcvdwVYkIs3Ar3hZO7C3sjTra5OaE3PT1XeowIuahb7vXofGLRKGbwGO70d2wTBgwORpmPbAw3lMoKKDM2JIi7O/QSQtpgK64w2BnlVKYnKh18QBIERZMnWNWa0nEe/WxK6ANP7RK111WXi6XeZ5zzsx8OLSi1cEsDTiXMrt7hKIR0eVyae4vRc6Onqwi7iBCvWq8RT/kPAf+HP5GgOJW0fdmNoqnufs8z9HgMWUHnWMAS+pUOnev2tBgvKlHSYpam+tyO3rYZJODA42PedkBIVq2D7iY2RjcDjuNiLLI0LC5M/po/7Cd+Q0YiZn6+/ajuvEBANZ1vSyiqhGMyz11pLtPU1M0R594Tz4J3EmZHe8VMzF2l8CT1VHdcprn4xGc1qqlWE4sckgMuFpZSymLFlcTjtKFcjjkq7VeLisTHQ6HdV2d3Jov2qOsXxKRlM1MiPOUj9NcSnGmaZpqrUFsvlwul8tlgOfn8+nm+YvFbnLiTCJEmad5PjDjg+fPrp68ecj5U5/8dagmAcDwKoCX9TQlkEMNqkiZhBFBLA5KGcHfW1dLBGctdjzkdJzgQPXPf+l3P/uZz//xH/7bSXRKRJB5nqvU5dnz+ZDUDKDEOefMnGDVavPSmzNgtdYpC4REZnMDXGut6+V4dfzCF77w5pMnP/nh91SVaUup2qwY8Dm38hsjXHZIwN04bhOMOI1ot9jsx/gOCq572FlCBqZhMbmnhI37D4gYiJH6lj8WkaoOanqtGmOHXtQETZnYINDgU/VJjcENbuJORCSWmIwltlvRW5a/YKImauaPvmpkWRZO0bwttam7c01oHi9CMwBVI8ynOZgwalq1Wd2YmQ8KD4ngxIlIbi/Lze36p1/5V3/29W/92Vf//L13ny0r1kXdaZpORc8EcXI3LWtY2VSYmTkKSJubuym8lQAoa8XqOgGQFOGKIQB1dFQp5Xxeaq3hVYjO6RXGw2m299p5dzthBwvHEYq+/y3wQn+VkMK/nWOXOJRbwcXxOq88Xs+q6pWb9xd3WHXPB0iv+OH227+SwXWvlzbP2t2zAx/u3bP3tMBfpBf+khrtsHf0x21Q8BVQ/G4cWdc6XoUJ0XykPd6vZzTZWXPQMeEvaBwhXLg6rKt3vgkqaQvPsa40E4u49bCr3f0DwMDD8RKNjCCy0DLIIwugECeB6VD2hr9uLz9HG9DlGBF5D9THTtkFAGHedtVdZk6WADtt/YaOt6NfAnt/6Sbr4lsGfLOeAEDkH2myghGTx4ZmOgwNHgTNTtPb653UMtAQU0sh3f7b+nG8XegIJFHCkeBw2414y9G4TSWziC3MIBcmmEVszRhWouTi8d49J+o2eAHYfWcxQRvFlz7fYTFEM+g+0wGvlVqvm5Sbmv3LXf/KUj+/3EFoeWX03knsplA/Yg1Sl2J72gXRzq60l40WMq7dM/yPW4H71nx3J7eRHqknYh3hf6P84VAJGhjr68KBqGOGcOSnBDLreQ3C9IC71hag1WbnppFy2yntjv3NO1EZvfxVzMidWu6pZzoNCzS5gyDCke2DI3JxOEOIaJYZXbS5j4hlRDiXgEAM8mBhM9iwxJYwMs9FG7Vt2+ht8kh7kbd4mC0UhyDIPP4M7aeF97hHEQU3oOVSbwkTCK1aCwCwuTHQ+Pe6OXmIo9Bi1GOAg8kJkUlZOECCuHuthWA5Z8Br1UALzCwShIHGeq21gth7lsIhjHxLPezeXRaqKmkKrXFdLwCmaYruvbq66pldBcDxeAzN9XQ6LctS1ub6ixDEeEdVLaWqRYqXIG3yPEebEYAwELvtSjUyM7XcHs2iYLsgqCGpufMi7m2c/astBQV1QOW+39S168F3DGNjLUU37p/ormrerGK9D7s+J9R1+tFaIU6SVLfKGWPSBjmm0Wcgoyywui8vFkmcp9PhOEd5usiRk3Ne11XVIzZPUnCWNd6YiIdZM56g6qFgh3IfJ3POZc2l2vmyno9lKipSgZxSvl2WOSeeM0uGC0GEsouznVUNrgQGy5QPWv18Ph+Px+iu4goYgYQ4sXiFVddSYDRN6TgfALj5PB1iFCaSmZO7R12KBw/p2eF4uT0dUk4sQRA6zCd1z1nmLF4uEEJiqK4f/FwrjldXXmtZV9WSc/75z5+++eab59vl6uqqOtJ0QKLLs5v5eOB5nlqggcCA4rBaLzfp4fEf/oN/9Bdf/6rpJZE5Uy3mTiI5TxOrQt2JqzlpgZpb7VyAVdSYbU4ZDtQlH2YYZJoyExxTypfbMzvWdXVr9ulmsomYihDlL9Vwj8+d7N1mUUopKjegE+7RVEANgataoy6rCA2jz1hHPRibxhKImTmeTkTh3AtP5rhDnAljkKvGlCulXC4XwjTug52NxnemDdUNrNouYWm/3n2XI8d7hHqsoyiZuDfbtV0KrYYycx71i4hIa6BlNwY13oXDsV5uWzKbqD7AJK2uV/FS3S3nxJx/9vZ7f/gHX/6jP/mzH/7wnZ+//+LFbRGeE/Oiy1rMtNDUsuGpkzNFev/BJTN4k9IwbeO4wEv0m5m5KVtkETNomK4SnFUtVuh4UVBL7n9fQcQdnXg791od5lczpf/NLt7/6h7u2rwsAHY+q8ZTJVDfGNtP+nW+C+N57YOI6FVZOuneb+Oytv3eAXXjmpEmb0DKoY3t1TJuussvpx++pv0vI8N9b/8ip8H9KXHn6Cy1/k/o3i//nIhG740VFJjwFUUtWkVvBfpKbDpZ8FebOAJtP42VBqJ770IQgInuZB9oG64N7xFpd0G0txh+3wa5QERGHjGEvunDBECmDDWCoee1N7MSo0pNK8BO18QOyO63Zr/rMEF7KO7Yc0flpIDd3sAoRqA1tNV56y6K3RFZFhyjTkkTKqEWknROcqv5LRLUuOZRaPiXqhu1rJMBLFs0YK0GN8aAUvGCRtF+90FjbUhn58mJLohEIjQcm9jZ+QBvFo4d5CBEqpLGBY+VPRjT0QwjJ5gbSHoNjZgSYxAaHXbnnG1QkIjg7aZxpj/4ryOpfuE6+ts89ksMiOLeHAunO/eilMNIN9owIcAEuwsLo9GykyqGnl2m/xwBPmMaAcPm26bTzqcSoJyBVuE+SgLGjpaHc0WtG1hDJYCrEhwicIQDkGU4bzebARERp9d5XAdoHNr10Pybut5d0NGe5NorKwKRcYGZJCV3ZRD19rfbu46F6h4Pi0hQj8wKRFxrdSPVRtBCS5m7S25Brb/2yke8mJlZt4iPFrsR0WbOJ6JgYDfPW6tp4eQ+yhKaWUqJ2kA2a/GQPmbmLWS6rSgR0VZhvJEzQ03sCMdbcb8GhziusTsZAlsYt4hEEcne+wP+FW6ypQmZfj3tyZtjXKuuA6HtF1UUrrBD68BoWFcQayllLZd1XWsNXGTn8zlau64rQOtaqGXoae47pjS0WNrRRMcFMRwRojY8LcO5Ea4qEXb36LTIKRo+jf5Gmz+2+9CIRl2U1slRPqWVQQsErapFt8D0tnN4U3D3+xxtgdx3NlrvzA1uXOjNTdQawWCRlNIsAI7o+U2ZuZMMJURk1ZbItbdzLHg280aT6/YeN1pLef7s5ucf/LzWSuxvv/3uPOckkrMcp/nRw+vDYb46Hg9TTikJsaQkSTKgtVkoTlfHab66ubk535bj8SqQRfjlXLwFeaoMwA/geDwKcdWVwihlngkytdqYzDTlE2m9nqbD4ZAlreViipzzWjUdJvViutz+5EenT30KL17cfvD029/+7ve+973lfPnud7/7yU9+8n/9X//X/+f/0//x7Xfe+/jHP3E8XefD8TCfPv3Zz37qM5/7l//tv/nYxz529eD01lsfPjx42GqV5ZzoBNWPfuQjV8fT+WYVkaWUy2XJOc+HK+cKT25aa61FYzMmmIiUy6KNTeHny80Dra6m57Ku6+nqCBZYyZLapO1rfKCyeOXOj2/alZl5r5QAJjVTMxImYdPaUk+DAJjCNOxxTWdtWIuaz40FqsV6nbHGePdN3dnN7W7eJgrv35g/w/LSf+7OW9HUuI+qltJIqtSCVSrtZILvEOOgSG0uE7i7B+Dslpq2McTNx1qImU9EgEcQdV/IvNEEcATCjW/m1oK6YF6Ns1DKgU1dUWo1s5zA+ZTIfvbTd//8m9/5yp9+7at/+uff+8GPj4dHazEYL6XWYrUqQO4WeYMj5F1EKKWWHL/JJ3fyro15VKsjBmAOhWqtVdwIPJjh8RamqEW17jTUIR/i9t0yib/pwa/5fPf4W3APvowJ70C13afdZ6LutXv5bvcv7kcEAeq9KwE0ozmAvSuAxuzlXUte/nnYyV8HC/c/ecXxCxHs7v53Tr7sMIwt/f5YeAO0rzruZ23ttentDjWxhxfuriZ08kv8+Ro35iht0BT84Inut5utaQ0T7N86wsQAMMGlPe1OXToPKMK9+AHQqkoQkXWsGLcl7rkMm2fAA6gbgYlNHAaCO0HhBifbkgL25drcIK11dymvzLz5x3rvRUGnvQzsKkNT7Ruua/pkj14JQeR07ynEzrInGCuG2sAtXhDSI/fC2Ri7fNPfwq9BXMrIMRrd2yAcdv/xLuGNt2m0qc9NE2lAsAWvxR3C4xg3CQi4MUPJHeqKVkm6d0jnhLb0KdRvRQQjJ4fzfUTXupC3v3ZHY4q254aICD/hfnX894TufpVjk3vhAPbGwWYgajkoOlGWwnjZ0iXaHhPGGbTCFWh2nBiO0NhBABmMt8jnZrpy7KrldQNZD9U2bz6tcdh+yQ9tgZLAQz8xd0dVVRVqvovY1k2VhRAWTNpWDXd6XefC7SxBr/HiTNMUWTDJsWcqRWF66sDJ3X1QOgOKOTRKRTfNgNZIFxkz3z3S/ARUQ1MqfCj3LCmjW+V7Y917lBp14/TonQGlRCTyD8StmvOt+Vh3JkkLuYOuLSHsAV0j2vLstYA6kugo6bRDEWFO3ou/D0hGTSoFsNmoaPFvsPI6/tYuRjxN4qrD8e2dTrkJggHNiZg5qKFD92JmkGWW+MXAnEQIR2VZ1TumH1ovEakWonw8Hs2jmt86mhcoJaWsvVg2uuII53v7btRjHPffu1aGlMfO8BPesEGjdSNma4pbE453PKUxeYbSMJRmszrO9Gc5dsb71gwz3U0nIhJiYhQyMyul7Cjou9xlTiPNDIDBbSbC8XgkdmpV04gohU1hXVciaYlqLAwEgKWWBXNTrVpjWiUDZ4JMeVLV58+fn89nyl5MrdpSXshzcqgQTykfT/Oc8jzneZqmaZpTnqaUUnp8nQh8PB6TyOk0XR8frYVuz0/TdGyMPLNYj5H7t5zXR48eTdNU9byuNec6nU4ppbVcyNxhItJSDFc1Cj9fZUZKLIkzJ60OgVXlnMu6Qi//6r/7bz7yrY9dLuvV1dWbV1dPvvgpZv7Mx990J2D90KNjsgdP3/3pD1/cHq8e3pwvruVzn/nsv/xv/l9M6SMf/VAp9UNv/doXPvfF3/md33HVfEg45dNHfu3jH/3IV7/6U4oyPkSlKFCTNJ3JtLm8hCmT6Fpu9UWeT+tyZvAH7//8w592mqfEKeUMAqxCDdP06PpBCLXAaTFhGuHYWgHVGK8gUQyb2Zh+kQXUDDlncyYInAdDwTeWJiQRmahq1bUVVLetaISZqW5Gh6EM7Y0Xg2q+X0p7OWBkIaljohKCXu5blgt321JDj9sA3UvZapCQddXuTshKCF3q4YJhTYgfjrVIna/Vz5A7KZwdx5Td1QxKcA8ihQKYTzOIUOpaFc45TzEKt7f2/vvvf+tb3/7Kl7/6ta998+2fvV8Uc37j5nZ1o+rVzEE0HQMeF0ICYHDq+pMBUEuZOQlgDgubWnAZDlMGXYidqBn0YO6qtZZW+BESJrx1rd63Zu9qWvuzZ155WePxPTdshJRsYumXwHW+V8v+phrV3a393t1eR5UMNaVrRRhgoynad8HVK7HT626+uf6I0BSRvQb5Ui14onGHfS3pcCv73affV2fbq2z/bsduk3q5wfdueO/kr3i8Asnr7ubhhWj9PP7dBxYS7qHKu+0kod1LEAC4MxtaoA7uTICXUXH3xO3CfBowcOv7KQEMArM5uWvLixmAKzChkDRST+RLai/U/AMA1KuZpUAQkamOwSytjvaWv8qJiGQrSLi1KvhvtLXTW7K5TQ1Az7xIRL0i38CKITzNezywt7A+AoEb0QOlmvd7GrzhpKjX2uRjp/U2zXv4EQPjSbRTEvURcneHwRzMgwlHHZJh6JH3BiYcISltodoD6hrctY6dwrnDO6KggJGFLxhG6GlO2jbX5KR3SAxB+BzBTBlRLRriGEX/WiO7GpOwJ5Q2VyN3x9e4fMy4v7klq3X2K88z/2r3Dw3zzoIadj1ih4EEFDVUaXupbsny9nnDyXTX2ERk/fW7XRjG7dE0ShY5tpncdGbmSOEbxs02amEiaNGnDMDKur31TidBm59tu99DJMTs7JTymKjETGYjvsPdCc5MLGkgL+w058bM6ncY22Ii8tAygUhdGgCmmmmUzDCr3vIyiTsbacvg5OFkbON4W18IZxFp8IlATKOIRSesb+NYSo12cFTwHuoR8m7Hsj3moe4pGm/xulkVo7KLSd6y48BkPNa7CAiu4HCO9Zfd4uLcXTj33ve9jT+gy6BvOTs1B1cLInJ35kRE6AVDVLXXG+RpmjbY5nVdWtb7KGLmrXiID8RltoFS2gG20BFHtJuImGkou0DLXREK7kinYZ1pNk4CuLm5GQLdevE6Zj6dTrRLXLZbMLTBM7QEdL2pMRHbCPY53TYMbyebXF4W7fN1syYAwC5LBwPea9028z/Ie1HH/vPhitxjex5v2pX7NuOqF13V3Yl9gIpxB3dlTmNEHcota9yeBNVNNdjCw8byCw+tqjJRKOvEXMwvH7wYW11mSbklTzqIisijR4+E09XV1YMHD549e7ZcLvPVw5xzzlHUVNydkQGczzeXWp3ZmUGkhmWtBFvXSrBJEgCnVhbFoediAFLiy4Js2cmUHIY0CwsDQtCv/PG/+9pXpvP5/LFf+9iz2w/efPPNnPNvfPLTU87f+fJ/+MKnP375+Ecev/Hm1fXjB48e355LPl7Nbzz4H/1n/+mq9b13fvK9v/zBl//yu8/ee+fRafrmN7/54Or4uc9+8iOf+eSHPvTmPKVaFiLinM/ncynL1fRAVbWawUlYQtC55ZyXZZmOUFVfy3e+/Rfz1cPT1YO3PvobRIScWt7kdSWiy+UyT8l33t3G1IR798j5lg+mOfPH0k4pIk5rUxCYmdkMqjHdvPvWmvWn1KXW2d0Bq7WtTWpUiJ40rB+D4RzPGnrSmDbWy0h0i1U7iQYIqYvlDccOhBmPiDdDN5w1g19bRMR8R2KE1hF/1qpR8WIIWNUaWfLGEk4pGTU1yMyWUiIvonsFPHLEgKwsbopSFC7zfPSant/cfPDBB1//xrtf/dOvffnLX37//Wc5TSJvllqePj2nlI0McE4EVvNS6nIpy+n4Ueq+Su/4uVeEdmL0PB0MdicjhmvVUthBQiKktZS1DrxHRGZYq9Zqo1K0RQJ6DhwTfsKx66NrSO67iJZ72TJfdfzSYO9voFTd9eBtmOpVoG73o44GxwXWPPgvteoVQPcVjrWXvo3e3tctBGChhwHYSEZbga/x8/hTX/IWDm/I/lWo/9+eobcD7ffGZ4tUfPl1BuB8Cbm9fpS9GQP2nj9pmxwNbu2oin3HxEA9dc02AV7yNw5CWjRmmEW8G4Remjp9DjS+Uzcb9YplO8S+AQnf+zOFYffDjEFwsBGEmnKJoewREVBMGXAmNvKobMckIiwyZKy7Aw5iplS19l2SFEagREKcnEc92K2niIjTRlBC93V0lBSezOhIop45w/sYdl9AWOCb/xDoBoswrgPqHqwwBocoIGag1xik7oJxb8UqGnrzXnswtvwMoOfeCZdSW1nhWol38B7oCDCo1ZR0D6zaZDjA+2+jAcIeWqU00/am5cTrGJzMISwko1OAcJexNiZk2IO4/4e4DwDssvju/Id3rVf/f4h2HiP7t3NsmZkIGIroqB/IfVKTI5K6NVhIkG4R2+QSYYsh3DFtmt3H4drBZX+X4JHKXhC17d/YSIOW6e5G1it/9t2fQD5I0U4AhMXh7EwMclQlIkrCjRvXhAztvCnsIBHcWy/eE/buLSxdmOiwODc/SzN8p5R4WUqthYgiKL9WrbUEO5aIWgAJNQveuK97HeAkLHwxr+Hc/RgU9xxW+bbCu0q0nexl5wHMMg89abQSLb9CM9WjraX+5tg+7P/s+U8DljBM0Xlfw6I/HEcRYUUdYoWIqbUmmcwscqhu4FBQ69qBXPO3xg/LpaTI0s41krXE45ZlEeKBzQCI5JQS2NXULLJZICRpznlZLnvlb6iz4dn3nbozpoWqmlfvMXR9uupOEeSIJWVmphQ3HPgQzSJVB3YacZI559vbW76bVCYePR/yAMZjaFS9ycSePKY9wrnUdTyLmSXd4fiNqbI5QJBqra7Kd5knpRSrWkcBiTue0vtTgnrs1vDPBNCtbk3P60zWoQdP09RBMjOzSGfNEW+vsxuCiDVNKY2wtGmapml6fHpzPV9ubm6WZWk9oL6UkmLOdKc3Lxwl3dQu5P7wpoT35urqajlfUkpv//zZg9PVo8cPrq6OOYlIi0N79OZbqnrRyiw5pdX19sWLsi4StUAyzcKmeqm1rkso9RB2p1IWg4IZDCMwoZ7Pqn59uspic8b52fn9d3+ktvzk5ufTNP34e99W1TwdHj1+I8+nD3/ko5//4m/dPNNf+9Jvwxk37/8n/+x/iOrQM9L0zre+85Of/Ozxg/nrX/2jt3/yk0ePrz79qY8/ffbe7c2zaUpWFcwpyTQd11JqraV2Ki/gVVctVw8fLctymg/GaTH/+te//pff/9HTm8tHfv2zbzx+8oUvfO7jH/vI9fXp8vxFFOExM9ollYk5LiJrLdStGwMudoDXeMboqExVWQ5RQBVYvbEnNuTmu4SlzByxCkOOhYdwKEMxNza7bwOfyXfmEjNb1zViRGN+ppRPp5P3OiJuiGWi6i2T5xaje+cYhsD2uQUvyTi928AcLUeOhZIaMBKhwt97BLco83j0upy70Ry9JBu5M5NwihWbteInP/rZl7/8p1/7s298+esf3NzcLEslenB78XVdmeV09aHL8mIti1pxUqWF2CTz9fFYLoV6gRnvedISb2GTkTOYBe5sUbKmlHUV85pZcmJdrEk52ZFhqrtFyvTNBDs2ivi77ccbz8v9FYFe2BmDfjGZ6m9fi3r9ccc+dffoRjFIvFeDu+Es+aU0s816/YqD7v7fnUy2e7WyqyP3O2roW/3+I/Lc99+//Njth3c1nrsezj2efAU4RAup21iVLa3867rFoIDfZ5mSdb9Kf/neXWnTRPeQZ7zs/fuglWrcOeOae8G9Zb5v17a0LhsBMG7egskMPdFazzRiMIcLsWlLfh68GICc9F59yci+YGYkZG6uBjN0qnnLKcXERD1k705SqyGO1Iw7XWJowIM8uVdht4NbTds7t2qvSDLNRlv3ujsxR+5OdCmnw+Hv8F6GvYd7deckiEJNhXuUMw7CIdNmjaBm3h5T2KjRD4koinm02RrvjnaB7wyCm0EkuslM/n/s/VmvbcmRJoh9Zua+1t7nnDtEkEkymczMIqq6kKUqNCQ9CBAgQS1VoSCgngTpoQH9Cf0C/RK9SoIklKARBWhovahbjaqu7iIzOQaTSSaHYDCGO52zh+VuZnowd19rn3NuRJDJzGR10isreO7ea6/Blw/2mX322VZRgohEQqqee+JoGz/uBMRa2BB7FKVQ457e1WK7zAxu9GEjNCzKcHKQg9ycJbU0whb368q6vtpUj7BJL8bnb3JB+xzLzudsIyeZ+zRsIyzy/dq65x604sbqatTjLQKMZoCgveS2knS3oMe0iHi7unGsPbwySImoq7gFs5Qig4xbBK+FhWOYUpcdiWHh7kBzPYPYlwVR2a0bLYjyLRJgL8ixXSTFTXoJTRrjLQaSovkivEXboyuWpYiISKbmR4+QuSSDciIyuJtBS1F3m3YZyGpFhMzo7u5WVff7/Zw5cw7hk2VZKipz1OAqu91eVWtVEWHhCIYxS067w+EQjpCylJyzpLQsS87TiveIrCeNhEUlIiIJoForEYd1ztx0ZQKNaC8rDxh4TTPrNRUQlcSmaQJhKafIPNTaUobjGHhIVbpwChkF6bDHFG6ryqhvIk7oKj3jPQ37z+Fmdj6fmTnJFIG+ZTlRlxtCTzcK9KLVBpQK3KJal6UVY9xgaY/SFEmuY/EZNmUYspIoxPCxWZS9u9iHYRqWsVaHXIiFmtkgQA5cZ12vxcyePXsW+DnkEKPVWk/ng7QKmDxOON5FAMKUEkFCW79VROgme6ReoIdrqC+Lw5g+HQ7rXmIWbz+lfD4Wd+3rNnf71QLTjtcRj0DWCGbjuu254Dk1QOtoW9ropfhJ5GslmbagAhuXUtxnSLm6rVD2nXfeAVCTT9N0dXXVyj+411rLuQZ0tFJj8Ki5wgAs6on541cnZhbxNwdNKTEv5by8jxfzz9LV9U6EVMuU8243ffFLX4wbTilNWXZ5muecd1fHw22p7JLY2WDVsYBZOCUzs2qFTapGHpgD2O12ziDz4/EuUSrL4WqXSjnWemCRChURIT8dX51Ptyz5/Z/95be//c2rm+dX/8n/4+add776B3/8la98Jc/7q5lu3v3C7/3DP/69f/h1nMu/+B//D//T//T/+/HHH37/e98q9ZQyyMtSzu6Uc1ajU3N8MEAwd/Ip57SbQMbky3La3Tw/nnSa0te//vU3h+MvPnlTl/LVr37l/fff/9Y3/qsv/94XPvnkk3meT8e7lHie57YyMEUJLLqMDY6B3URi6upyiklNVgKkhejLPM8D711dXZ2XYynnOPOyLN1gaJAyFituwi2sqsfjMcjDY/WIdUNElmV58uSaqNeS0Rqs+FhOVVW6r11Eal1qLbF5nE4nf+pENE3T7WuttT652avqbrcTEdQuHyWNK46eWDD8OLxmaK+y1EttlVFjBnGkDQvnzoiOT3bX+XQ6TdMUPsRpnm/fvCKS46ne3DzJN09++J0//+633/vTb377Rz/6y19+8NGRvlyrJ5nv7g5Jpnm6uru72+2gxilNQqJ0FkqOszMWtSTUotnafD21qrru93tqeaCNpsFwBrla4lYobCmn29vbTJxzo4mGCyzvdgBK0Sm3ojJXVzfYX8Gzm2a+WnA2N45UI2J0G7oHeYYN0doDa+YRNNgRS1jaG2j0V3O33wtRrg7pEdUcjqr2SbC/aI2ctICGXN71Z9hnkbJ5+RltDcr7J7qMfa1Ous2DuGtDp9Du6BwVwFb5sYen9wefYACi1i4En7BRRB+fh+Hf9sp7vfp2a5VGgGU9zDf6E7S5XPx7fOWbzx8gw37bCgWaiMg4SfvH5bEtTvUgYBuXY07jk/GARFTVARYW9Ep9cRI3IwaBU0rhKwLAIoYWoIBJUOCqGxTTPJsqiKKQWjhUSymLlm4ItS6KFThWM0Uz85pzuVaJatLu3Ggaq2ByxBvDWPfuyHNDRCZCPjSqkoHIVAd3dOjXEBFIHc6SmDMHy98teZTVQVRnA7qizIZd3/h7gZmZo85eILSLQdL7ubvA23S/BwjJPUzslHKEHHt2JsGJOPF2bFyQhoUAsAYTnoa8lhsRCQukw0iHN04dMxhITsIgh5jQBg3yem6C9/nbEcRvGAr+5oDf52geTpIeo1pdMI0p7f39BHhvP2rEhJW5s/VFhUPYAQ+hnU283YMBCaDHIQGrXpmYkRxVrYawWad3qntLRG3Y0rt7rsXzDC2ZDpwzzGFGSZKFnr967Hvdoze2dSayKPUJDK6yWam1Mo11AL6pZRolzZdlwWXOSwpwklKUGeXz+Xw+n6IOW8SjmHmed+4+TZlJkiSCqGr4XJlDuHIlSrm7e40MHNXClAb+GfAJG74WBuk2HgOhLlhtfTALKZQBIMcqY43S2U0Fc4e5mcOnaapNEI/cndq8YZE1ewc96LTO3t7iblNKUVSte9/XdLvItdsQIxsw48zSDS/z6tW7/VfRHPOXjvzwn7VXAniIenHV6mbulZndai2k2lJXqQW++q3yOnfHENn20mitPxUDXQ8oONbwsWXGz2PtNrP9fu/ukd3k3edn1vJFtfqItUbcOOec0jSAXIzLMLLHTbo7OklahPrLakHI/rkMoDjwmHdVoa2Z6+5wP3fVxGEYDVt8rPXaN7yI+jaMRz6ubj2mOkZgsRAg1TntxgiJXYBoLQqHViUW3EtugKJYreScp2liToEJreoWb0enmVldUmzg6l6qoRqfK1FEv+o56akaMxwmcs6H/JNf/jKllCeZUk4p7ebperef55xTYrf9vLt5crWbZoBo2iVJKEcIO5mLmKNoiSKN07xTdzWFmrqG90JEzEI0V1sqrCP8Taql3JXbuzfVSEH/9t/+63nezfvdV9599oUvfGG/f7Kbr6+vn7x68fLmes75nb/4i4+FShIhYlG2qnC1CoJFtb+m26SuKMKyLMvz588//uTlO3kWmf/xP/7H/9E/++e4frYc7IMPPvjqV758PNx+89/928P5JCn1so1tzGtVd6q1Ul8A8aBRD2WM191+q+FosHhNzNDczZcO1eLn8boiy9Tdaw1fVVviwhU1TRMR5V76wi912LHZL0+nU0yuRKuXAcBSOtf90vs+FqXm8iBqGkjusZ5LIuqUUepekr6cLtyyDbNqjdkvK/8QsQJDeJ7nJLnWqtU4S5JcUaddSknMy+l0Op/PSeb9kyf1VH78o5//4L3/7F//6//yxz/66ccfvcp53u1vXh8PNzdXqjYrSjka8vWT6XB80xcxgic3UxdnJ5JFzwxKAgRHQxcB5WkiarKJ7goHbd1PEGYWYg/zkEhFz6dzbPmqiiiJlGS8BTODKZDMjDgoMA/HSDR+yz//6v7yv+oZHkTbuFs3iCAFgHsZbluD5i0g8G2Bubd10KNPYY/g3lZWYb1Bp2GMdtOt/ScsUyeQXsIgf+SpH2/eLjXyFSk+WWVFNmj/V2kryBwPtn78aQzV7eeXv71ovP2aN0//qRHTT7nEah2Mz43AHuqTdJnn2VAQ+FIPxjp+7sl5tZqbNi57RNnAgEW9vu2Su654wrwhPbXVLK61VYXo1C/0P5qPteVEtJVw7M41XJoUNjvRRkTU4et5AG/FAomI8n4XTomWE9gXWHXrLP0eP08CIo5CE+2eadyn2eMkNTMLGm2PECIS3bubiAJ8oouFtrjoo/Ap+pacyLv/0ojImYzaOwscDIBciJpufnDtPapHUAAJfjDg7+9Ef31Rwb+edskIAC7po9hGAoERah8EUevR/m5/wtGlRNv5ukOnozcjivdIThbqQH1RErVlkNDHuIo0+IH5eGDRnhtCRCwyYsbdZUfgRGaA0bCEO9eUtjm6IFd1V4oRvUE02xG1xQLD/sHGoiCidDgvOWci5DRN08QpSc7MbK5GzIycc5qumDml4GUJALcKBpGnLESUJjufzyLSytMAafBCFwu2nrvnnFW1auVNFChuOf6XmWGrEU89la4XCl+d8bTm5Iy+DcERmJm21LsunQKoNnEXmXKAx2EERJxPVS/cdQSE8ggHIFkBFQCQqZUQsgMgnNflSauEqEP4uqzZZ1itOtDG5qu1ABRWynh57i6JvDo7x2+Hnv542UHS2K7aw9xpPdn0p9aC8mYWmZ8DENZa3dfgKhFFTuPw0o3Txt8jKDo8CiL7MJ0HUAzL/ng8Emn447Wu20CDwdBIeWXijjmb+n+MTHc0OQntq/wYvmrV1jSw9nQdlgcW5VUjlACoXYiLjtFFxGRR2yfwoRMoXiwzU0pdyKKpCrHzyBAYqz+REyFi7qPHwLVRVphGf4pIzkJEU8rhFt26Blqkd3flkXqq1kpuDM0oZXU7LAUtrdmAE7IlNakCP6sWAU3TNGV5evOk1pqYrvdXV1dXU+IgRT/dZyKSnLMwi6BVB+DDqcJMi2mppAs7x9JzdbWn8LeQg5KjCSAZKou7miROnA6nu7vlwPLsvff+8mc/uymLlcVvbp4yy+nuTq0khiZOiYXYVSpiRzRB8w1XVXeYV3d2tbvbu6vdngUppTztvv71P0ZKIJpurv5g/kPO6WY//bN/9s+0Lv/yf/+/SymBbKjqqyrApRQxQUp9UXYjWPelxkLuTMU0wymJh89vnQurG6U5p7v3JMmUc0SMRwZI9wdxy76rtUbabSklBIfGQrwZOfdGI23nSGdu0+ar4XFryyC6d4M65IvwoHtUYTB36+Vn5nGfQxhJRKpV3lhXYwninGCm1Wo5BYBsRPc51Vrv7u6urq6ePH2+LOXN67uf/PSDH/zgh3/6zW//6Ic/fvnidprmnKXW5eXLl3Sd/+k//+///Kfv/+f/+b9hYcbkBng1JQiTUdj95JkJBHI6i4gp3E0IrmpGTCZhJzdJMrhr5Es2gh0REQmxhCQGKJ6OGL0MD7xXFkVHiQifZCiGb/bNB3GbT21+D6H8zZtQn3rFi8zA0egRMBmHf5q258ML+WMWrPXKE9tPxp2Ete4RfXF3kBMJNZ+9IN7n2q0VnxsEbm5yNdHGF9TYX/LY8Z+/beVSx109xlBt99Eiqz1P6d5FP0PYpkmyP4CFWK3de1cc39+/hDdrJ23+GTiMN+EpbqW3xzoQkROygCUd3YTFK2AikpQS0pzh0p9mrGxj8QqghW72jAMefhIfjZ2dEIZMq8pA3lZpIgI1MBoUhubW7lU0GgXCWzdyM7OJhCFNIDTULUJ4mpl94xmErMMVXeVlu3wPAIaxmA8oC9AqQRnLWvN9B1qlXjOdBly89/YunIDUwrzUALw7mYMagInof6ckMEcxemtjhDzq0dNAg48M+H4bb3N+fXajxxaCv8F2CQs3hT0fW/toXaCGJ7/7W6hN1VZixxHuBu88keH+4Z4ACG7+phboYurlICUbVQERQ7X2JdG2fUV983J36qJ3MZgAwA09RhHHN6EBXsO8wx7Y/r3aDJePP+zk7a9aDzITUfr+e38euXM556urqwjUhNJAqeeU0jS5iMxzFkwgNwUzJ8nIGUBIIzLz7d0bIorKB8HcJSKHT7sp59yLp0/MqqBAhiFeF7PfG/xmgEVaulegUERR+LZAjDfanx86XpG7N6kbRMAqUnpARKZUDIDPrYjnGgdzdzMK4t7oSgwd+Q67VutQANDpdNp2PXU/VlnO6+barI14OgOGlslqR1InxHsPGAJwaLBk47Q9FoGU0rLEGIrKGept+W4XpFbfEm4Uhk54iwIfanWzYptUqFqbY2DcQ1AfH8yfIMitxmgHbxZ4NTiTIpLzHKN2mnbxymqxyIaK/rw7vFHrjJRN2+Jb37D4RrAuwHBiiRcQRmrrxo2MEsAiAQgtIjbMNNwq3kXGu8gyd0udQKaKUFCMkdmUAkiYIxjOnLkHiMZu16b0iEl2v48B7O6cklWtYYmaRzKA31ebjOojSUSSzLBRRaBF0Rlh3GsppZbFXeNVVF1K1WLOrLGgACh2PhVZitelmJnwJ1HBfpKUUnqyS5LTbrebr/Y55yi5kSeZ55nBiXYiHpdlEIMMEZAxgFQ1ypmQLKo676+XWggyX6Wc6HQuWk7P37nJaT5JBQpsAZIkYhfVEhy/PlMSEUNB4iKRfMYEAyc3M9Onz56czsdpmg7n40TT0+fPAOB8KER5nsrpmJmeffX3URaZsuS0nI9Bq45RJ2jFNk11QKxYb8fUs6Yh7LH6xetIiVigpoS0nQgeAgBdN4ggFOUoew3Prd9kYA/3yJrjGMCy1uxZ1+Txw5jp28XNzJgz89B8W9d97sTUbp+055Wcu0O9eWfQxZM3c61dQlWJPMgV1CwGMLMzqWotymxrqFP4fD4fz8s8zyjmlI5n++lPPnjvvR984xt/+ufff+/ly9em3csu9Md/9AfzPP/y7idf/ep0PNqUXzFk4t3pVG6uJiC5sTo5sVN2UNQYrKbkBnNTBSdmuOuynHIWb3JTzk4wgzURajJAI2cqVg81M2JXLdYEnMNAajSEcDvCLNbjlhpFfXfZLn3b7JL2Ci7/+Iwyyrz57z2T66+EGzfb/HqePlzH+Bkshv6jt8SjPqvdjy6uT+Eb8LyG8uLbNR51cZP3QZG3pME+/Pox9/i69zHh29V9hmEX8zR+ZV1uvu0vPd3o12jbHqPNAz6MHH7Kbx8iw/HhI5a1h2vx8onfEjCky+83lwjrZURlN5EQc+7Y09EiaX02EMMjs9haeAMgIpFkKCABG5MkkeTE7LUU2nhv4+Au+39BxRqan2MFi8E0DvAgAzg4zOzWFdz2fWaAhV1Rmcmp1fLqIcGepRc0on4nUYgUzDZSgWJJ6Aa0MhqnIm4yflutE3TXOOFasxFAB2YrJhQe78D7qPPIiliDlmTd4BkuOfSJPE7PDkRtAyKPSGHXim+3xzIiUUJCRNbDg2FR97WdL70Y/f2OsdKecbhs/j1olwvwvWjhI4H6QbGmUEVeoeP6200PhNYu3LVlEDR83pRpHFGhIUbyMCzbXtJCzc3pwgNFgIgsdjznLgXUCJB9PEhk+1ljr1AXWhu+DmzcLuNX6M5ldw8/r+n9mhNbNMjdezJMlPTjH/10YIB5nsM7HuZRBFumaRKR6+vrcHgDPE1TzjkYhqezzzNyzldXz4M+UOtiXdLTrCZkZnEv7hEZM1MvXnPOQ4Be3UwNcDcwaJRsRutJRMynR8CazRRTwlv1sHhUAjTEZILBFZkmRmLBNtrYZ+301AhX3SxbBV6p1TdjX/UMMDj6gKWefmbaFCCoi5d4pzIG1BjWPxr6sqG2Il3OFH2j8ksabQgnjBVWG1c4QF2NZSLWs/GOB1oLwyillGTqILkhMe1Kp2ONjlESQHfYzdI1S9FdCCMZL46PqwzqbAAtVY1BFYgxXAZxnt0+qWopJXIR+zjRrsLVbbzOIRnvukGUDY2z3dLlEF/zEywAoVDnlG6TE+KfZoaCkHMBJPIjhDhl6YgopAXZyYU5SVYrHQ1uV6Mo9r3uO/25nGHkyjFsrGpRItK+Q9hWbAkAQTybGyMCOyBKYyrBRJiqxKyAu1dNZ1eYm7dgIpmX6mWptaDrWbn7mbrG6YsXhZlTSmmeoq9SStM0PXnyZJI0TVOWlDJPknLk+1L2kBSnuHFxM4eRpJxnNYQDKCWmsx0Pt65gPsBT4ibRREQp5bKczJyoEpFXF5EkkwGgc0hRJaY0ZXcv56XU8253fTicQnyzWPnggw/e+Xv/AOp5mrRYvtrDDeW8HA9Pnz6NUFoSivE2pt52qFysm/2f0vXQQ+ul1jpNOzOttRIpETmkrWm2spRL0fP53ArAdKrzmERj6Ri3EX+nlMxyrbodrmMYhMvMzIJHF+8FADwBCJXmMa2HARFQf3WXpORLE8hJjRS6sl/GlIkztwmS5N63zhTrQ0slIHL3amq1HA4Hnq/2V8+rvvnLn7z/7W9991vf+s4vfvHLn/z4L6+ubqbd1TRNDLu62v29v/dH/+Jf/Iuv/eEfFPrwL374433+/Wf7/8Evfv6Lw5vDyxe3y2n55MUn1dyVWOaUd5BU1RfTnNndc87kuVYNPFyX4lbdld0SsUPNLEjXnMgNjQEQ0889qCLn83lZzswUvksimvc7dPAMVUxrHGNEFTBiF78yXvhNxgY/H1zhcaS1EsyrldD/23rl3i9XoYzHrrsxB1fu/WdkP17Kt3QrRO4fdfGvsF14YCFv5a/agcNbSnSvNsZAv/db8/S1YOD4VdPSW3/V4dWDHLzPbPciqMNaGGbovUceCecPf/X4h1sEf3EiAj/y0G8LNj7+fuPzGN59ePRYlveIykZq2wAKgozz4GQCUeQ2pGrgZlotgEg4nrY2qNG2KD3a1buUJtBWzzHvhnmmcHQHWXtx7kSpj+3OtUMCNzn0Rgnl1STgFGY6Vi2VzYVGfZ2IRRIR+mm3HddlRZssifdVAut4i8P6YzqYJDD1ZqknNLXJbTzN+1vY0Brbq4lHJiMHwA0W9Jm7BgHGErbxdK+RQAbQaxJ2NDiEZB54tH5dL8lvVXt0RowP1/nbVZE3YsgA1tUvpgaP/45twsHekoww9Gki+xfdoms4sQcYOaoamHXY3bb0eNnYpI9RKI0zo1Q14yi4wpCun4JmYhJCeNSMsEEQF60FFca0HUTT9qTuQM9KjcL0x7MaQvjOl1qAYl21z7u+iJnvdrvQwbMF05x2u11P+kKeJCV+551nknie834/T9MU6ZY57fR8GsAygIG6m1lQE+PO1G1gFQBb28t6HuPgNK5ZEs0qKhF09ealY5Az5FwKQYjgTqoaCjHu0E0kqvVIX/5UW+7QaEQkviqIRpTG19iOd+xR+74V0hvxJlZk4hbYMjgCazmKNvyGz5JXnfNuptjAMOtLjtsIOEHxRldjdHuYmcHAhBC7bRBxvQGizs0YDzhWQu8WKHoIcbA4xlfDPO0/adA3rtLLLSTu9FQievpsF+NhpImarVnpvgHDUYXSPMQeLWpojjcVQzS2hYFmAUiaVAf1lBvF31kktbKW3gI73SFqBBYWh7lL8OtC8WWzlLhb8NUurAFiRw9a17JVlWwvi5m0VFcjb6oMw+xm5qjUsTHdwsdzHq8JnpgtLiKAuWWmPOeYB0QETJh3AbBVFWrxd10KgTmxdCldVzNQdXdKVo0X5dO53SNzlvTRL+9iIw4YsJ/n/X4/TdM7T2Zh5Jx3uyknIdqxVBY6Ho/npaq2UHNKaZqSqookM4c7MY3XahVlUfOqSuww82liJlZVTgaDoZpBJAuRMtz1ww8/2F1dnxd9fvOuE/2bf/tf/Ml/+7/j5kc9JxFSZyHkNN1cX91cQzilFByfka7sG5/CMCu3f8cU077Oes9ZpSYCTtQSQdsETCmllEOeN+LPEbsbM3RMhyFpO64YDqNa6zDzOoRcJ3sP33kQ0eMMSSYi6iLy66ykLtUw4uTWw4xj+nezY11vO6F0qMvQyBx2d0DNghftOedYrEop57JM05TzfHV182bhH/zwJ9//3g/+9E+/9b3vff+X7/8yBsD+inKa5jlf38xf+vIX/uSf/P1/9B9+nW5uMO1vX3xwJe/+4Zef1fN/UM7LixevsuQPf/nx3WF59fruzd35cFzeHI6vXt/eHc/p6ksff/xxEs95fvXyjdaym/dmlT2FO73hFTUldfctUGjRBgfCuZmaAwvktdZlOdW5dWwDhL2jRHoJb+Ai6nXRfoN47zdzqkvrbUWDWG0dPBawenD1i8qBGmeOkNpmT+HLbhlhwAvLuZ+w39hDmdY1w6edhMiC5OZRVm3FKg2xUPPKo2tvDnPtU9omKnuhHkHNBBzI+Vc0gP0CFT/skPu9Teu3D1/H4z/Znn3cXQPqtP7sQXvUDr4PC22wGMxHkMtdRSSch0Q9Nht+T4363eF6ZRA3xaVaPSxph1vIxxFAUWFrNUAjoYKYaCwy5CCyHoQUoksvBgAwey8YeM+qcWrq0OirKzMTBKSbJfWiikbIfo76jArjIOU1Zl+L5zSThhBFHZrrWTg4QiEq02okbiaab6WPtowSwjjn+jLoYpKud/iQm7DBlmH4bzcOd2dp07bzSFuzNhJaeNARYRgCuLOwNyQFx5iM96BgJ0f8Jp1cfx3tUxHs1jWzRgg3O0aMkyFnhdV9FgnP3pAFkbkzvKXSdh4eu1uPFvfzr6+4nSqi6qugeVy47+MVVbpQL5hCjxTMGOokaG7Krd3i3vNkV++2XowrspW6sZkL6AObegwsBvbAI2kpFllyQbEEwsdDzGJmak0vZFnOp5PXWuti3TZi4uhHA9k8T+6626fn7zzb7/dEnlLa7aYZEJHdbjdNE7Aws6TM7IfDCYCI5JyTpBDbICKpjewRPvvuDr8/hTYP1qL95h7kTCJhEl+WCGgpXFt2W6w91g2+7n2M/9cphdFLAWaIws/j4ZNurAlqO0rHDCt3i4irDS7KCpyCcOhdYqhbbxSoiYjG+xsboZk1OsemzEM8tXWGZF8INhcbJlMrXp9UVxkY6sxGM+t8yAvNlbht7olMgdnGb+OAocgyfhLHD8OUiALDAwg0uB1wL168GBclIpERSGwKNKMORLSl9ChiXcU80AOhbhe7qZnRppAGjTqN3fq/7AEHNfxgq0z//QyHFn3tELMXm4o7XLnBVQsRDU2n8YzBHSemCEbF625Pyp4c4bXsj2vildzIjSACDS3h7iwsFAVpADUFKOf86nAAIKDMQpIA3s/u7kHxDeak5Z5fqlYBc1Unsf6Mbgt0msTRWI7MnNIpz3ci8lMvQpjmfLPfzXPOCUlYhL747tPj8UxEznI+l1qNGqV5qVW1Evw8ybzfPSGi8/kYqWvEXt3NQHB40WKUTpzEPZKujZlh1cyWZSFJpfrr2zc06Y9+9KPvf/c7//A//G/OPDNwOh6X2+OzZ08gwinFShLRfu90XACJeNn4aKkPA2/SnRXAqALfx78FKvYODMJBsCzLmJsxvFOaROSsVRsvUQIaqTZJvSg0EgitlBJOkKjYPkZaOCCIKOccKx5a7l8DqzkRtUhjcl/r0Y+VodkxRufzudb6ZJ7XLQGIRxhKTvGrWBBEhFlCFGp0jsK9T9VgmzghKCGq+vr163/zzR9961vf+v73f/DLX3y4LBVOzPndd979ype//OTp7vom5+wsWvTWcSIWfPLh85vruxcfLYfDs6c3PnumvN/v//APvm4KdTglZ3FKajCzf/ud2//3/+s/efHixW7Kd0Ln82I+aSmy3xM7ddFUMyNzJnIjcmo+/I35wkwpJRaYVagGj3RZTtRcYAZ3dOXnlvLxoD2gjH5Ke2g5/S3aUp9y6U9Fg2870nnTPRfHX1q3LTqH1US+jwy9e0UvYWEwtSTgaDeo2nBtJxgU0E7Xf8tNBwXuHlRb3ZrDBBy3+ldoDxHdZ2C8cTOf+snatphwNMPj4O/tJwww6QDkwkwMHhy6rgZ7aHKO6TD0DpgACSTUYiRERAnwVhiBsxDglFY+JOA+jKGxpdqGwDnuZLvhEhGEPcj/Pa4SJvA4EkDX9ycAhuaDi9ONwUYARs2GfqS7m1vHdt6TgdpPhhmDHtsMA33EZuONjOPHSLs0x+neaVuhjs06451EG7+18eybk2ADeJxAxt5VlpgZzi1iSdxE7BBk0e2LF4CI0oDam/9uD3sbrPqVs17/xtvnXGbX+9+QIFZ3yUblOD4L4AfA0BREt8EA9hb3JvRkQoAcniiti09Y5uTka6f1odWam2tEruIHgfHMtFbqbpgWlel1iZulGC4YCqm/RzYpb3ki6d7nY4wNy5+6ZKO7J5GZeVJtkpPMDE8EWc7FfbAEp5xnkWRG+10UUjd3IYebV9Vaz0R0PB5evdJXr94wo9QlpXR1tfPTgZmfPn263+9Vdb/fP33nuYhETYjdbnd9fT3tZunlCnKWWiuhcS+7NMiqmDKeqiGPoS1ormYjsSfnLJyJxKsZVSBKNqcOetFSTXql5tEp4VBfKwQUhBOd2Ck0iAnoNtYqRdNpDEULWEYwre1nkatmxtRWLurorlZ36JiQA+y5rzAhTNL+gqMiNiP0oJk36Cku12AVM7uTGXVQ3bKYOqJbY8db1FfKEjw09PMOEBWtxf16r9Ve+Ht0HRG1qKwZUwp+Wk+9u5/4NDrqfD7HP4UvFuWMxh8mRzxCoIVaq/ao8rCPscYb21Vi2KBTeaNtVEaZN6XkAGeWxsDUCAElESZiElVVujCMsCHbeGfVokl8DRxizkRJ0hA5BEwYS10GQCUiW+uSI4onMTmTC7XaVn3wG8Bq9Xw+A+Y+C/Lg2Ji1TCoi2u+vm95868nIAKzFWskNhjBQa9VSzbwU63YDOdiKn3UhouSFTFnwSnhKQqTCJkwff3yjdt7t5qurK2IXkaurq/1+v9sznFVtOZ8wyfUVE9HptAAcrucgCBubFluWhfyUbCKRCDaKZFN112majsfjzdPnt4dbqih49a/+1b968vydL/7xnxzP5+v9fr+f6/EuMZ4+fXp3d3e9m70TDYaipoiEN6jvzT5A3dbrEYAw+ip+GyWLI9waYyNCfAG63J25iSd7bZSKzXhrA6CUQkRBuY8pPJxzm7Z6iNp0wwoRxyoBAlF2t9xTBNGrpHjzOyBo2G+Ob9oBFJMaKXHUetmMk4GZyda96WIJYuZwXkhOZvbBBx9+97vf/bM/+7Nv/eCjH/3oL00hksxgte73+6989Q9urndmy93d3dWN3Mxpt8s8Oajg2bv+/i9uX7+5ud6J16rnL33h6fF4dz6+yfN0fXW9v5r310+vbp7w/hrz7r/13/tHb968+dNvfPPp02c3NzfH43mXdy9evDBzIbYukRXpssMrFAz81fxyPx+Oo4YHzKZpmuc5ekBEog4ZwCxCRgLxX8PQ+VWjS7/Rdj+nbsVIfPnh9qh7Ub5PQYN/lfYYwnnYVxehwtYIKxLpv2vw8eJ56a0m7MCNwyyLUGcT+bj/I/nVcftD0Z1HMR7ebjp/GiZ8i2PiN5LTRYB3A3HcwNpLAZbGOhB9SCzt4k5hqsLJY5dwbwGTXtQCoMhzABp3rk3JzlagTaBj/Bftt9s7bbkG1L+ynm3IvH1f9+1sIHzhIwHvUrozGH7u7pZS7osv0NbYuPntfbi3VJbWS3bBtQtRkYtoZO/n8Nh2y4yJaYRWW2+37r2HD3vPrx/S+Ml6T0Dj/gVN1CnqSRAwgo1Bao1j7tO221V+tQH124YD/1paLzG/Bfz3V6qGwSMQjcCEgTCZYDrigY1xSmZK3jLRiCOQbtLryFszvJkANw8hwbaOp9R8DxHRESFmaLhBrbs5AjSu5eWo+dq6/IRcxFcGZVpVTd3UeaPSr6pp8TPMJEkOqOAKoADISCkRNRur1ANjkiS7ZTHVWmsnvomTMO1PB2Y8B0s5Bw2bbaFypFJPOefXhzMQiUxHohfqllKa5xzJijnn/X4fWWfPp+vdbjfNc845zdNuN4tIWC1JnBPMFK5RIxQonueuKGIeHewgIEtyN7eaxKYpeqoanUnpdDoJszDcMc9TFrq9vZ3miaEEq7WqnfYTcmb3g+y/aFaPxyM5nlzt1cqyLPM8C6Wq1WsiYphYS82TQmdIJtDQBc3cnPxwN/dzqCQHKbIWoZVzH9DFnUAw0zaTCeYolQji7olZrc45Ael0PsB9mnIpBQ4RgkiX9ISZVa1XN++cj8dlWeL2CMLknFBK4RDEVy+lBBwvpRA7sUewpWoBXFhi8aha4WFOpZbMzLwsi6q1keBeS6hQZK0gsDtUjTmlNMdb0gLyKKoeFRGbK2I/X/km3tiGrJn62b3l0RGRmBAJCU9prjVTV3mlEepUA1C0LssConm3I6JF693pOKfMIZboLsyuthxOst+59xqkaLQXB0hSShSn2gBmF0o91wtjIjWobMbumbnHk52FzYWIFjgF3psmBKpLHQmX2mcyA1RpcnEjcyKDDzRTag1D9lTVzCnNRLSop+xEhqata+4tWrUsB4CJWDhHnJaIk+RMrkqrVhUlN6jq+XyO8HisRe4evqizmTtIiQxUw+EiRHRLSZX0hRLd5ZyniXI+M5f97hR9tdvtnlzTa7PUfbRM4TK2vM88SbVzxaLyLruREZGRQxfT6oS5lJJoKodylSYxneqBP/zJ/+d//b/6n/4v/pc3+70tR8qJGWB+fbilaTpF6T/JSJlzNqI55ePxaKl5Z0rRsqi7T9NunudlWbIkAEx5nvamYIF5LZUc2aHuNVgSp7LIlEOyd5qy6tSsH5KlhOsh1WqqxczhXMrpdFrM7Pr6SZT6CZapmeU8VVVhPp3PKRi2KR2X8/X1tXafHMfSXGvsBUtdWLDbTaUsx+V8Op281pubm8Phoyzp9nAHu0lpdquZBWpFKwmbFYdN08QM9epBwk+ZiGo1NRNJhOQmWl+oat7tp3nnJkzJkZZip0I8PQOlDz569e3vfP9b3/3eX/zwxz//xfvHN8n5Rulwt9hOnhmKi07zm3m+I6PklIra+TjJ83J4k/fPIfMLmeh6p36rxYXotCjxtNvJxLb3N1N5JccPFrmi9EzyTbKb85v3nuxffvHZ7mqC+/541qfPnh2O5fb2FkI5p3rQggLnXUoJbhXzLpdS5inr4lWdfTJ3spwwT+nKCoSmcq4302RlsVrmaYdJUM8LMqV89pIo0pHDCmCmlXa1MRg2WMuGEdszc8KGvqj2/gBsNALRal4My2NjlNDbUcr625670r9YSTSDvBR3uJqDj4CKB5DM/cJ87A/G92OB7RKPJbUB6Lysh0/HrACasmvEmUBRx899pai19+DOPG8/ubiry/M3N9wDQBX/jrpt3WDfHmMrdHzM0X7/bMbjr0EFQtPt2OQoDpkK845Rt00e6U0AgD6m8upY60A2N1APH7E/bqm/7fzWjpcWDFmVGAMeRc78yGPkqj2G4AjxP2upcEQp1BtUc7JSzMnNrDvggObAZREIw4wAoSTuqrqYMnPOmXohYjCx5EHslJwUDo8qiMwk7l7MyJfugA641uxdktxMXopqa+0eZEqxQyMyAFmgqG6gSkRRv7qNQbizuWdgiwfYnFwRaSAj4zQcKQzIGoEZKC76P9YBoKcgWkPVa8QmoGgfHtZ/zJdDvTmXQWZQ99R0BK0hagqUu1owHoJnwAQIkOACbwZbD84CaMkcb4OL/3Vttk7EAZ6jJ0evS/8s/reEp4Moer4xzuCTwzqqNsCbVwRqsCim1mg2Lc1TQyUcndbGIbOfco8kOiGBqplGWbWqaOiO2OHq7tUnZ3AiWLUKdZHkVqsqe1Y1BgkzwbRUVQdSrU3cpLl8qekRnMuZmSWncJqYu7kZPAmEjInYVGFRmI0NrKpQcQAq7MyUGEJGZzuAgAx3q65wJRAoAtPkSmFNRoVCArGwE5xgZvAesje7u7sLqRjVwszzPEcQ7GdnTNMkUxYRSWm3n6ZpcvZ33nlnP03TnESIyVOSKTFzujudCa1kKgGZZZ7naZqsqrurV7VSaznVEsoQVzdPcp6ZOSVmhGgKrp88y0KRiKWGadqllIj8eDxWO+cs+6tWddDhEWQLkoWkYAmytaqMnMASprbIWl3QDE4dSIMjWtk8UiVwvncyG2DuLT4QS1nbA1wdXrQavKgBZu5wnEtRrSnkEQFw9aiNQATmEeJAW1YU1OOHZvFtqSWnqQcWIqFUB2YAUGsVSfHmAvuNnXtFYmMGbdzVAyyNvwf09Z6F6J2JOnpg6wWc8g6d56bdE2Fmu13Th9jOdiK6efJEVZda5nmO4G+t1SullGQoOUWkzl1ERmQyZsy42zGLfJPfRUS1NuogrZOs0/9ibegirvEgQ8h6e5+jK7zzG0dIljeuU29FAiwUPsIptb23cZiZmQUKbcdTW8XiiYZ0qjMjcl/Hy40FYp7n1RrbuPOPxwvOcA9y4nQ6jUDrsiyn06nHtY7uTo6U0rzLU8oCcujVbp9zmnOeksyzqboQuwklMjDUXNVUoRXuZK5uzMnBqr7UpSiYX5Zq/9f/y//5f/Yf/8dMOL55s39yjXI+3N6JCMGFmpLTZky2uKhZFG5plOMIpsV7zHktdDkEWkQkYk6jn31TCmJ0Tg9H03ihIlHew0+n0xje0eJO+NKJoKqJWlGZyAK1fq04wLpmsrtnliAmhOpyRPJXZfec9vu9ns/zPLuru6YUMprE0njvwtM0MSZGS9mtT56/cz6ftdrhcFoqpvlq3s15N088/eVP33/vBz/67vd+8L33/vyjD1/eHQ+n04J6DVGDqXr1alprq6jp7Gydcu995UIT4g4Lxi1c9ITmI+AkbJSEU045c85v3v/kdDocDofb21tzSmlOKeWU1HA4cC0adJvg695bYeLtB+nGO52MiOKNovuYNveGPk3gfUeP2YWW4nQvsGVvh2qPts97/NvM98v225bP8+sFSBnYLDHerWZv6hj3LvC2a/Sg1EXE6dPudRP42pz/Ikwyzva5TrUu1GNZ3vDKelTB+fEbe9tl3hb5HMmTF3dID2Jrn3X+wZTsHSIg6/iQHv5u7MXhH1Q09U4igjsz+TqnEObydr8zMxJeKW3uW4IE0O4/3n3c2YXezHicfoGNKzPKp6Z2BrQ13GKh6TjZOgGeu3YoM7NH+Y3VqePuRoBdOFka2gqRj7e2X20W1C5a06dzu2LvsYeM7u0/Gi/s84zSt7XPtdL83Wi/Vk/ypi7LOl+8r0g9mN7O36LajUUYqhH9Xcd0bmXW2g/aphZldZsZYK6boGXKGLyhzchpEUhvGrnS6TO1lnXp25gWw550b2ZMbNxpn26YmUDk1dxAxOBEYhzxcI/qPUJCRu50pirCwhnupqYeZYIiJMpgDP1MZWFmNnHiqAI4iNXWpajcPUpyA6VWc/eJZqvqpZ7KoqpB9mPBbvfBbjdNU2KCu06Jp2lKia+eXs/zvNvtJs5EtJCfC+Zk19fXKcnEZATVsrNWgLuiTnlYxrQsi7nupvnN3VGEUtrtOIXZ98knn/zsZz9L+cl+P3/hi+/e3NyonmE+BG9ieYnVx2BqTqqcndwYREzSaZnmBnWvFapMJALxztiM8gfQcPdRU50xoHm6qDE1ekUKoyRTQNNp2plF0TzpFg7QYE+FszspFXTp1MEYJnIWaKsnCxGRFGOQQo2g6uKdZdqyv8hqraYQyTn7sMTGpNruBH6/KV3uQ7bqi8ZzcVDyMMDVhnO73fWD8cLMUTAj2gaOYlmWZVmqaZDrxkay3++9qkddja6zQkRF64BG3pGwdTWne2vHBeVPVpZgs0Q3xRuHMbr95F6ncCfibvtQu8e0ja6N1sjo2O2GSrSaCOMpxoLl7qAmJxsTsxdaWIPS8chDCbZ3e3vM0BYeNz/Wjq1tHRAr/s6pxB1KonxMIhK6yx/rqynLNE2ZKXRNsyRmzjd7Zqa4Oa2whusSR3B9cm949nhazvry9Tf/3Rffff4f/fN/vn9yDVekfDzemSrg5s2LQRBVraFE1RS8KpyZEZoH7q5q3tKpu2PCCMzDMTEMPtrkna5w4uJdcF/Q2IlSSlE8cxwwhHa3SzNtbBF3jyqRhjYafVOQEG6qjE7JqG4Dzba7UgCK8CLVOac5Uke55XgzHLtdNjO1Amf3xnpl5uPR3IVlzkKcwTSdjuXVm9ff/s73v/v9H3z3e3/+8/d/+er2DiTzvJ/n+XBWIGLJXq2ajpEQ9X4u+IrobCsK+WR2dweZkwkJBCzg5CxCnJBmyPTk97/y9//+31+WmmRSgztKKcLY7XYiB/fS16XLZODN0kFEof01+nbMx9rknfusjK5GbIw9UkQ00rmd7hcZ/I23R4HKr4u1fosabfJ1tx9+5q9wHxa+BQZvMQawcek/fgm6/8/xwaVO2GciTBpfScsnb+lDsbf2yBt6FOlBDPbBDVy0z7RPP7MPP08jolFzwtdPRtsU5m432iVG4yYHntzuZyACBxVubG3DoWLdMwoiCPPQfQmm1hbrtlm43tC2T1o+CxGcFM4jB5AgUVGXNnmnfXcG0JK+iIgpOTOzE6MXhdOLnL51+wZTFOqwnseFYIp+LhzO2Hbx5sMH45EfGaGPnZXui5Teb5E0CJJ+znse83uH/7b5mP6m22aB+rRU3v7tPWp3m0QdFJI3ugF5G1eR0pUIFhHi8Dq0AcmAdolHN0GTOIqsqLEHmRu6wzrueLgyqTmbmn+EA1gyBw+F+q+25iI6UNxadKO5e3p29ZyZbVOTIO4DzEFEY1cQmHpB890cIYbYab05yC2ztNRkH1PLQK6rxgMJtVwAIqhqlI8fNxo3dPIad1wUqjByYyThT17fpoMws2sxq0IQERaqdZmmab+/nlImIg4qI+grX/nKNE27/TxNE6cIyQlnnncT52xWQ2KvqNaqV7tdyg5gKXq8O0Xyz+Goh6Pa3cvDYXry5Mn1l69Vp9PpkIUiB7Kp2jk0ymyLw5HgZEpoZUCIyMxBXpeFiZrPnsi9hCXaCAPDvmcQnMEjcsXMXTLOASggKQWmzjm7kqkJ03lZxKKehIcEIpFRyIy0MhUWhdiC8sfMZpWZU5Kw/lV1WZZ5z2bGlAx1xJHG+hija2sWj5unXu1wvMqN6byZaR3jUROMbTmQAyJSr6jWVmpdBkrhLjbr7qdTy8GD8/b8qlptBWZmpnARiVhoBJCpJ4MJcZqaJD06OtpWtB8TBn3vM4sQvAw9m3HAOnfAo+vMynZCbkHUdvCvA6DXiYoeGgf0OpAY3/ZN18YNxFBpdOQWtmo31daWLuXqXrF6K1YhFqKMHkiJcbJR912JVWY2hFjG48SvRm6kVtdagcpBRSJeauFjAcDwUJNiZv0IOct+mpNQlHBNLIklZ7iYSwwMZknKMFM/n/5P/8d/+Qe//5X/4J/8k/LmNYBaltPpmIjN68ClpuTJmZmC28sE187sIrTigeOdYqyJ6NRNbBp1J9xIl43V0huqHz4Od1szVK2Hatviw8zMuqk0GJ+0r4RTSr1QNNtmYPg6rlaHWnOU9M9b5iTHnBICw+MFxWBgre4wZpZEQX8K5edXr5dSVYSneS/En7y8/da3vvPNP/32n//FT97/xS9fvb4zYuLkTsfzYmawXRe40y0ea4sV+v+n5oQAbCg3W6deu1AUewGH5F8CZXO4Q1J6/vz5PM9lqUTZvYk5M69qOm3ee5R1uUDXzAyz4SVpfdWqemJIcz1s7vfJhu6N1Pjo8X+d7VEr7dc13X4DWYKPnuGzu4UeR4O0+Rv3vv0MMPbYybcHf/oPH8Vg/tlGMx5821f7JvM4WJdxP3LBHf0NtZHu1plp3Z30K/sOttmD3BxGNIxdGl+1o80IoFZPnRjscGJIy7aycLH1aAi4O1KJ1nIT1oXZPWhjm/JR/dCOBkN8hci7qP6990HNhiQDoqY8mq0d29/mUKaggQ/rJH7YCxo6MQ3Mxpdjr215D4DfqD+B9qLblUaHXj4TgA6nx+cPKJpd4+RTGgGhjm7unznAwiYd1x3mxIMKE/SZ1/2vYWuahRfFJz59BtFj8f+RDWrbrwgcOq8+3Iqb8wyrTygMcldHFiEzCtGCzUAZwRum7seIKVPqsLoalApmYLgJhhO278tjog1rvJlnK9HJiRA2CIB0vZ8hHEeYXkTwDBEwaQ9Way2lTPPzYQU6uXGLMJCuyCFAbbsc07ADmkCYO0CqDrgIR51x644UVQsWuBObIHrNlJwnbaWnnEmiFmdZTGQ6ne1wfEMO4ZxTypyY+YNffi8snpwTJUmJc85pys+e7+ZdDsJVSul4PAJWC11dXaUsObFhouUkILX0/J36+tWLadoRyel4BrlIJqFzKaYtCtztPyIIkzHIzc20SZkTGMQs6ktqRFEKMJ4YYD5bi4y1hbUDDPPqSnGfRD4CF/0dtXBWlH03W0sddksorF4yqwIi8o2ShMZQUS3uqYOxUFxc0rRDK44hWt2lxcdFpEto8iCUCmfzOtCgiMCb42CswxtYgnFj8X8PjYAxOQdeGsmy3viT6OhlUFmwPclASmZ2Op2WZQlA2DpVEhHxxpiI3BJvfD8RkWmaYrRvjW/v7pZlWeIGW0hkc4D10AlRUxkZ0GIL/Prr5q2rBt143c15nFO1oidVvm3ZGpG6Tagwqjj6pue9Oz77U5u51+G/ZSGmLrYB7njYACqljv4EVlHWoDSPNzhuo9pERKNSnKp6pNdTZP9GSA1S/HRWAItrSmnOJxFJjCxpmtOccn1zt59P0zSFGtRuV3OeAah/wMz/h//t/+af/tP/0fPnz3/6058eb+/IFQy3CxJyaLVqHVmRq9pQ5FWCo3sDqqWtKUAb7B1P12dic+ANAV5zCz6qqrrDjUK2lIasc6cQx698w+4Y7x1dvDcsJ+v8DTMTyfET1fau0c/Aqd15SqlUL6WYwbSxqsy8e+iadg4zIzGJOKCq5XQ6ns/AO/Ne3P3nP//w29/7/ve+/96f/8WP33//w1L19ZtjNeR50qqnjrQnTDAL7sh2YnOENZt0UJ+SZoBfeF6dXIg5OZE5lGjiRClznpBmSMZSYuKoapoSOe/yJHk+n2v4EQBuicfEzMhCIQ3fZmtL0XPfeFXjbMw8TVMbtI5tLp+5hoWJEAMIp+LjrbNAVyF4e1ht75Hj0Y9vaYT3WyesPkox/Ft05P+aaHA99P7C9WnPMo7dDK23uu3fAjjfcvDjaNA/85hHfrRS6xUI67AFBIbt2Ffjt97820Vi3tY/dPE/Dz7/3G3kj2B9losgq20eBBjIYcQj6F4wu226QhJMADUNA1K48bRVNefcq4BvvZ/rhUh6ACQ0tOIWN1ZBsM0XrSm0/doWmsAk7suyVLfk3JBPT9thXzFkR140qLaN4LF5Gge1KdjH02YTXRHgZrDdQ4P9ZMGgpcsPN7384JO3OKoAkHWlcQYFmOEGaZo3Y3t+ARE9UEt6FA3+nW0E8c+lDtUPBzaw0HvisbfajzBAiKL2acQMW0TRoIHSomYFulkWRC1/oD4afzGzuoeM4uoS7q2foUuSUqjoW4QouHs2u222cXYPv203Pn0TKoxvk6IKBCBmj9pUAMxa5GQ1sJxVtZRC8374p4nc1UKmT1WDcUgbfiOAMmvTpXQMLATzNE3BUWtP2GmyaZ7cHWAwkepSrahDa0qpOptVGEGIjQtU1TNJraZqAopYUaUggUlVPy8FdI4ulkQi8pOfvUm9BR+Sha73H+73+5ubq2fPnolIrdW8Hg6nN8eixtN8vZzt/fc/3O2mL33pi7v9fHd356IBjcyKu8KJGI4I0Yx1JswySsRJJgK5tWrhRJJTEpFabge+RkcFQeFTVepVwsIWZGbiBtfdrZRS6nnYiOMFR0gwVsxal3BODGs4CgqllLTUSDmLaI+IpMSh/roxZ5twQLPqOzBrQjJ0MaR8SGKoMnf1V6IL27GD3nsgZ8CebW+MCdD/y0BLAEwpBAaHcHaPpNXSkEmY4D0KHSOtuuF8jgsTkRPc1sttj/QNM5M3TFFgir/H9B6wkEmsE1zH5w8fZzurB9AaMaUob9B/3hIGxg10plvrKBtsVR9hpMftDIoiqo2QvGarBvOViJpuNZFv4vZEJImZ2cwa8bR5rGCu1P5BCJ8xMwCRFnE1s1ZBwaq7L8sSuhdCTZy2lrOZpXlX1Uo9w5wJKaXdlHLOpZQsp5bUl9M8zxFRxN352bNnf/an/+79n//k93//90Xk5YuPd9NMREosxItB3ZIkJxStqtFv5o4gJRiZS6BcouYFSCJVJKeUUs4xAanTdLWn/o4xdm/objqZqKUgRvroWk+1gzSjS4+dhUaWasgr+UaOr/vXWgms8WaJCMIdZ7baWap+LrXWSi0ep30AJ3d3dicyghWvtRKzSHJ4KXUp9vLlJz/92fvf/va3/92f/tkv3v9AHQ4uRZcKYlmql6pqyHliTnaAs4NU1YXWB6FhdvXOMTP02jkypBFARBJKtgo3ZxdJ057315ivPF/Bc+iy+kQppergKba+MSzJzIopEyKI3Zzimxk3Eo0aoaD7SjhKloHdeikpAKMq3cOwcHOL4FeAZC1e9OtJNTy8yt8up+vh1R8O+0+/wwfLUV/B1g82LrN1kH/KGbZ387lzCMcpL5EYrUW6P5+J3OlP4cBuaeNAyDwOLZb2kICEp/WR87xloWZ6vD/DcfnwJKPQ0eP3+cgXI62ANpVpwovyMFTYRFMGNHKPNGBEQfo0qKTMwW6KtCGgUQViilowpjZ3RE3+D70GCRDhjo46A5qNmwCabGIil5BbdHcw2BG0MAmxbqVmQUe5iJD+ocaMsbjgCAA2N+nmug05bbqaAVf3iAfGgtx7OH7MuD9GmwOoocFLKGYPxz8A8GPvPT4pkYfqCEF7JiD4972jtnOFN4Xp1//2onXxaA+u83eyPQwVAngME45PaDNB0GsSDi+eeRtDcYBG0heI4WsC8D0VUyKyUDhfbQvEgIyba2/L1sr1kgQKUotZ42ruFpir7V9xImGYg4g3NqFvFHEHXrCNwoi7p+PyetiXzBzl4NxdawmLxNmJhIIkn5iNSYRzs+DNrNal0+fM3WGrG5uZb6dWJCCuaqWqalQYjxs1RRRaD9v7eLozM6aU54k5pYSNZU5mDBtFwxjupSoR55SZOVFyp1rVzEO/1NzhQiA1coWan6r7aRFR5jO6Pf3yxV3UZo1fqZZpmlJKpZSrLHe3p+Nhcdenz56A8tV+piZfAXc2JDAJ5yYDUw8EhCFoUU7EoMIyzZGd5o5gsILYnETyBmAPmUfknNHKKq6hhjhCbUkpsdD5XNixn7K7B1AEjImjcLe7s1unlZl5JaxGP2DEDmfu2UphQi1ViRAlqq3VhSMAxSI3LHx+bSQZmW8InymlJGRNaaMNAHQvRVwiS1ZVM43s0wvqyAbq3NsvrRVOXPX9YwxERKVfKEZUy82DOzPn1CzyiJC4t3qG6VLbwzexmriHrUz/+CqekbrDciu+UhZFrzQYz44W97eHj0OjQCLWnmnwoJ4GGHgr6uh3rqrcczb6lF7hXHR7HN5/66pl+5gRPCRic4XRCGkOcuk0pw3gxCgGMyLS40HihOdSQU4UJVYJxEzZoU+vnoXTR8iDnJymzMzqXVlHzaFqdFz0VIzgx3Ohwbe8O4pISmm3r69efHx1dfWLn//0e9/51te//vXb1y9T4vEuilaziLdLrUrGMVTdIBAnh7NXVyjIRARq3PLaHGo0daSNNSc2HjYoEjEpmqCLu7AQDRcYhQyvyFrhZ5wkfhW4uhUtZU7EHBCbORErN6p5e3EOb4pK7p0yzSxu5O6mqLUWre5Uo+SG+ZRAZC2uzs4sCqcYii18Tbur3TTPx+Px9Zvb73z/e9/4xjf+7M++/eLlSyJJ842d65u7uzxNkthJ3N1cmGGgspTk2a3NeuJmW40BwC3/kYmIJfazJhbi3hhexEqElFIEEEGZ8oRpj7xzCC3nu+PheDpN00xEzJ7zbMqTOEdOpKmZmToJeQ8Jtrmz4WNHJE77SzRvo24730HExNZdQ0Shvc8bD+uIYGzlYR5KxXz6Jw++fRAkJNCDM+DfBzT41tZU9R8E8TaG7GhrfIq6i+FTb+Neuwhvfcod+ebIhwvy9s4/9TTxP9JW/9U0RxjrDt3ezFvP9taLfMrz0jhhfxZ/6/GfylntxNetTfwwVHj/zxZUa7NZyVttd+qvDdYcN5JS+4QA4SwTupEQOvu+2cjaDQ1U5r7qvlK3EMKOAZIIkgAEC0xqIekYwgYN4gbvlIGW77exNhvMJBucvqD99t4aRYOICCRtWW+Op4H9NvyRR4Bc69zAxY9+u/brGDwX53nk7y0m7FQGu3+SePi2mKwTbSCBNQL/4Lb+brbHQoWPYkI0L4nzY6IyEcIlb+S78KeskXQigoNJwOYe1bdAIGHxuqZHRb3N2FXXpdDXnIs4m/uqF2Jm5iqhlBSxpv5/jijOFBkiRN37Q91du7V+xz/T4XyHUbOCWXIrlmVHE04iEsq8w8Dl85JzTvMsHF2pAnXS/X7vQK3VqnWbiUXE530gKyJiR80NELq71wYqrGrU7hORmRCJQJSi3oZVNRuMPmZzuGv38jDluDGoajEVRAJSUtXgShEjqORaYeTMUkoJBFSK5pxJ5Hg6MhNKTRVEVKueliWlWmtVxutXd8tyzjlfXb/6wXt/wYx33nln3k1Rr5mIcpqurq52u0BEu8ZVEEGtXs8GBURY3GrEfDhnEBUrZSk0sXWrnJiJnMGG6iRGnZYKV3jwlvV0cterqytmaF2YeZ4m6yDc++5kMPcmpEnDaidFWD2G8/mstaaUWJgNtawmbKyB3mUn0CtBm1nArRH9G4NyLWEPiT9KKbJSZNeg8W53ZXCtpdZQ12yV36OeofcTxoCkLrukqqE51FgigIhobaGANrURWZIa9nSplZkFOcJugfYHNqKeCVb7GQYGow6hbAP4vLO6hTvNrG/Goyv6ecR7WUJ0T0zvvb4M9TxJbOKf0aSrXAIwa+KTETHevJ22LphZ1TpAsrcwyMjQ6NgY6+ZRy0CMqzGxrgiG8UTxw2XRURBvHOyrSlVfoXpQa+rlNFpPG7TFiswRyYpeazX3PE37/f5wLKPfAaiWUoprsciy9ZoB6ph5mqY3t3f7/f7Nq5fzvFfHD3/4wxg5ahp83ZECSh1j19oAElOK+FIQCAGE4yMi3gH2KNfgfEbJ0LFYm1l1DyFiZg7Jom0ipZm5A12XSDfSO2PwM3Pt3P1Y7lKSlHiaplIWbnVQ2+sIfjwLW9W47SheH4MHnJjNqXm8mcRjhSQHVZC6u/PkTKRi8EjMVNWifnfALz/85L333vv+93/wX/yXP3758uVSC5Gcz+W03DEnSbvTUms1Jwu9zgjPmp6DMBDbDRERCcf1iSI1SMSkN0StnU6aJVdTihom05xBajADE0+g5M5KzDdPdrvdvMu7eQeQqwpR0ZKyELtZNWuqsETksXNhnUFjNCp6cu3gqpRyOp28i1DEnkoIDezY3qnP7CCtXXrfH8F1WxXSz4MSfT1hx4Qjp+gecvhN1J379drfEAq9BDaXX221uN6OnIbQ6Pacb+u37efbJXSzfXzG5e4f0DytLWLTz2AE6df6tNjy2y/z+PERQVpNTMBbIsDb3tdb+mHtruh/7RIysSPIvbvTSAnpqXxEFHPDGoIhNBknMlOoVRSRqC7RUlRSSpwSKAT37Z5iZwA27iX6vCUGejBIqacRUn/RSy3iFpusk4QtYs6UM3k1Ix9afK2/Ahx2kgE8XG7NnUuN1jnEYGMLNAJD+kVjl5cWf/F7IPzyTYaGQnsa3n4YS0X/aODA7QB+ZFAYet3IgeVcQWz9+dpC1U8o3jT/ByZ8OA5+hwYv26cgwA62W2BQGyZs87CRQcJKjzRAbyqyHiHuVj4TZOQCBwl5MC47MONIeWC4m6qbsjszotJgWE2hyN08m6WGhZMj1Q4t6M0iHmVTAHTDAxtuzjDsrdfrHk+7RYapmqdEtdRSSs7ZGKjUbA70WJw0K9zdUyl8ZrqjnPNut1MrZpZSsvM5Ok5dzY2dwRPnKdF09fQJM59Op9PplLo2QOMdKsgxiFVmVsuR99NSS61GnBLRIhrWPzMLyAVWI9fZQDjVRUQCnQMk0mRIa10AMCdhBnso8YF5OdfwPddac5qnPJnZJhbXAybmy6JEXEtca4Kn46GqVjO7ffNBhAgkkSks6g3u5nme33n3CYBpmvb7/W63S41Tt+x2O5GJZ4b7qVZ3ZU4yzaW+IRICtOqipduyspxVFWYuyVPK19dzUNeIyRxlOU1T2k1zKefz6ZhSIhjBVGugRIAJrrVo43V6ELEosvJI4cTMpZRQrxERR2QG7iQlFgBRcC/ieEREET0GWswWHtw/BdAriOjxeCSinLNI0i7QEhMs7MK7uzvvSapo2IOJKKXmdxjjldiJOZCUe1M3iUcbQzxGTsg8jk/CMBQRhVvoMTKpmy9L/DCzeAcwaYUioaTSyhKE5T0shmFrMrV4EW/2PKZ2MLPEXhWpngSRdEGCHdBrnuewVwN1dOgSxQZHuG9F6T3/07YTOKzwMautlxTPOZ9Op4HTtt+GQNFISaUm3BqVLbJkVlW3rfGk53MjTIpIBBhjZVFtlxvjalkWTX3xCiIfObELk7NT5E6TcyKBEPnpfBDZuzv1DD2qDBK3KSXePi+sxlidUz6fTrXWu7s7EZnnPUkfM+Db21uDz/PcCNKUitbeUTI6bbfb9UHYpF9yTldXV22pzbmUspyP8Xe8kfP5fPPkSc55WZbwWPGaWjnAZ9NwYebj8ThC05GPGqM6xmu8fS3FE5/P59PpdD6fOhBtWCdGY0SqqbvlA9VEjuKidZqm4/EYjpuUJicBFXNOu+yG4/EOlHOezLku4EyO+dWrN++99+f/7pt/+v3vf//jjz9Weu7OVaWUooaU90RsBHdjYQDqBqdSIssuprxLTq6VSPLE7gdmVvV5Tq6IsR0yTpkIRDnnqksGuUFEjsuSZaeq8z5Lcm9ldMnBgrm7nHie53IuIrQsy26eS2V2LMviEM4Tmxp8l3NKKfG0LIuHkrB7VWXm5XhaliX3Ns+z5HR9fe3uu/nqYBUiqBUTVVQK527UlYqaGK5GcCTa0Gw2zThIPeTwESA1B8xWJ1GHGivx9D4mBAa59BLJcM+fumj0q1aT/o01etRa7V890gZ76V7vrV6k9YNHzjA+pE3PDCD36KXbCvvYRR/8ljb/EyvM9rYvbuAtbSTFxA0yhVBEzyny1d12Adi2WPTR8w7GzL0j26awTe1rT3H/83vtwYNcRLzp4rdbBmn7pG0fjQfantzImAJjEdyrGxGYo4AetzrsfTdX1TpUrMNPGlshD/0N6n7qvqHkZD0tv3njzEhYRLx6dWOHcIrwpBOI3HVBmAfsTu2h3FoVSDIiYh/WsXs8kTucm3pq6+FIndhAwYD+gQ/bQgVGCJPiXnQOAHem6HC2tsjnRbzo4g3iLag+3uwCBIWwqfig4QjaBhVHgcSQhQOALpEcm/z23f+ubRuBcBkn3EztwPHW0HyTpeURG4yYbSA/IumBd+8LizE0/AONEuyKmDjSXoY0D3WhQH3CBAq1BTWNRHXqjDBuWTlCROoRANwUhqEI/Rh81d0ISzus4h6WIKLL9WfNn/ckPAknFzYlgngNu5byNHfSIDWp38hx8lrdVJWsHLQVoZ58EpFIXGn2O9HJTJZyroec51BiEIQCOxNJ8baZEro72d3M5hTpc9OiFc5FbaeaZFp1QayamVdVVfOqIBGCUQUTOXG8V4/yYoCpFatmQYilNEmCmgd0Nq/npYYRpyC+IL/FiuYVpu7MUaRRHQ6Foy4uoly5xwHOdHsUkQ8/eRmW3DSlnHPqkc+nz56EXZIliVBKkRZFcIiwiKQ8EbfAgrnnSWSIixg0ykZCGEqgxJJZwu/g7onpeFzMWl6pmzM7gZn4uCzU7eB4j8yc07wsSyRSjlU4hpprYuqiYq4MAYycXAGyHsHrlF0gTWlAHe+8U2YO27rHRlYPR1e7aea+r37ENevGzNSKqwOYphQE3d4afdGNfFWWv5CeHygustrUjTv4aU6XuGhkZvZnt8v0rRHi2z5dvwJhs866O1OUzWgE8SHOBLTgyXZ3H1G76B9a1WgtpaR19ei0EbhBhiPi3zvTk6QtGIYznGoxUxCv0csRhDZTYkrCQTsEwAyRTEQinQ1L1uvUrdbwQwtp3MnoJSJibiuLmQMXBmwzGqJngnRNHD5d29ROZBYwpwhLRsqtVnchyWmu2RujciktEOemZkaQyM8NnxlB3NxZc87heEJHxejFrMZwWweeV0EwFRspdPv4Y2CMBZqISilBKMo5m7lpEBzWsiXxXgawHMMvJWkk0moAJkl3gKl6j1f3EdKGQfd9tPuJMhXCOU3i7qHYPM8zFapVrQDEkiZJsxnfHZcvffmrP/vpB9/+7nvf+e73/+IvfvLLDz68Ox7MpuoVgJlXhTqpO6AGb5UzCCMB2BooYDNj9/ijuqVkmzEQXdQTIAf5657Z5LzUMmuiTCKZU0LsL3FSIN6RuTJzuMgZrVCQWQQnBWB3KqXItLpaqGkdIzj/AAKQt7MyMae2LcbQbyRRBtwjTkLbgun34MrDoN8jbYN5tGeqbHDgr9kuAlm/SvvrifitVLe36qb8Zq6zwaJbO2a7qI5v39aoESIe/e7XM5K3sjEttIVOG9viZ78HCMfl3nq39x92A40fOdLfAgi97xQPMLRdQr5u0K5tvFAH2FfgGBYw2i4M28bHGzBzkvD4R/WJILzESfusi72ROYTM2NxC/zPwjXe90naXRCRrnSQikqmLmXkT8ofDOmbmnMIsD7E/Q9/cyQcPxekiZBcpzdQYgNE/Y5nlTQy6wXtbe290wNqZtiLnzthsQ497jHX7Rh7NNL63VCYigtvIXqOmKNMBqWtEL9ttDiBK9+/td2jwU9saJ9yuLUTUX5OFYM/Wb0JtkQdWXgAGXOx/99FyWbgijooqnczs5C0OH26VsBLNiTkooxTBeV7zyNCikSAiDVplGGwtczAmRcQqLh5t+JiY2RG2cLOQ045T4lQZOZPkFCqRRMTOpRZVdUCVwthSVZsyCJRARLWHGhY11dKuF/kjImzMTJPWUnTpAHdEmRJLMPKYLbRa40YnoSmL5zxZdsei1W0KrlTYf+SR5aiupqqTFyJSdV2KFgNAToFBACIGc1IztchHorJ4rdUJQYRTVXbLG1QTXCOMEI2Su6FBj1CIjdsWB5vDwSAO0SA1Op3UTN1dzspcKHKbmX/+iw9Tq52YIhtqnnPOOckSQHGe5/EtEZdTG3bBcWLmlOYkUo63IDiTEbu7OtTUFJQyu8LMgi3qiOtONFGnsQXhLcnUUrL7+IgCg3GxKWetzWR3915CUEdAL0bjmDADqHfs15ZdW82u/lG3GjcoRdwtgk6qGsD43pwcIbLxX/SUwoHQAhCul7j0qkaxjeFH2ZzBL7HWNrCO8cl2aSCiUpZ2J5Hk1fBo8C0bx6D/iu/d/2aJWfufLmvF7OYrX9tKO+zgxO7dlQj33hPhplDaijo6BybsNAaKSGCLxQm05zNH30Rt4nZj7YftHgcyHHdGmzZuhpnBPWvSmgERrwgWkBTdF4WAq8KEVhq1sXY5N/eTw9mF2AyJPXpg0vPCklJmSW3Ljw4vpvEymEY6f7zQ9seW/NnGQw95UXPg2vbpRqh2DO9Y69qLUQ2kUUoJz3i8k8EyjnkR3TckrEZ3DS8MM1uotqYUyxERoeexWCc8CrXKhGMInU6nu7vj7fEwzU/BBPBStGidWCSzGy3Faj2nnIhFVf5v//f/509++ov3fvCjn//8gzd3RzOEfOt5UWZ2hzoFD9Tdm2eJbRt/JmuieTGhYqiUatNkgxxOzYvRVgCoIneP9ep5DTfK6LEx3WIUVmZ2IvPq7jmLGcNJ0RyHVRXuJAyjagrFNME5FhxyJhGp3m4Jl4M23nuowkY/OtxMmZO3JV6HmnyYcveYjZvVYMzni/L1m8WtP/X9xKwLcPhIbMf5ETP+b6rdu/nHr/4bKGXxeZtfWGZvXUjf1rZT5i2HPC7/8+mn7W2IXQ9i8gZrreUoNvezYXB95tkvN6PP8V4uf/spX/Y/vBHhLuRkNhlTwDaHDW249j2ovQAiCJHG64mvcFk9GJEzz+u7aNOEKLaoRuBsgmXrDo6+Mbp7KNMkSvFtczRtbA5ncm/osK3EUSJjCHS1kGKI31DTg6HAgrQ+KZG3TLwGBSPea/2P6JbYNPpcYDRLn8cjuruPg/se49u44uYdxXM8fFWtUHV7NdahuaOXLgCxt3sGgVZKcLvE20fB79r99ggm7MFV7+7+oOPGizAHuqyo9wkSTIE2ugw2VnMHU6wYF2uLIUT9woEORxd74ig9TxZBQnZgdRKt+3IrsMlEBiMP8U4aZSq0BW9GMXB063fcwdgZmTm9s58ALA4jmnJ291OpZsbOTJoJLc4fvomcXhYP4QQCFdNEmSW5e56CfEfVoap1aVFL0jfDfiXiKmeimI7k7gShJCKnblvTftIgRDm424VqJmHjEnsiFuEkU0zynRUPXta5Lkt1NXKHc9Rty5PknA1Yyul8Ppd6Fk4yCfWyG0ytCgi33CeQeavYrmqq0jjca3ihpRGz9PygGssWR6EbBwVxsKXP1arG5gauxnpWPy1xntzioqd499T1M0SEpeHViAzkLPM873a7aZquXEAObeKfoCxJRKiez0AI1dRa1T2AKM71nPrJQ3Ak4Kg1qVKHM0HCqA0Q5+7BFB1rNVpMPGydVf/wcsdah9dQ+evm9cZNZRRLeBNquyBS2vjnSFEa4JyImFMbJtYcKe5opP+eNla7Yj+Ee+I/YoT0RRkW1Vt7xGrc29Z6GIgRDTI1O3epNoz1dpgRoNQ6a2wq0s9TLmbvY5Sh9V30a/V34QPJhNxRQETruqbYQMpBW42vUkrxZ3+u3i1b+jg19eDAAGjroLVktIYNYiKjb9N9cRvb8GV4dijtJeLxNuGkHrK0FGkiAIwd8ARTWNAqAA+Zzsg6jscUkSwxkJQBBbuTObFk4pZXaQSu1Zt6zMrRtVFYshHGB40nhiWoS9SyrM6zbdu6EkaiLG+qxo+X1YSUqJWmiPE2rJntq4/GPZltCw7v2bgcZdNpnSNjWI4bmOd5N18tu4O7l6JWC3Gapvn6al4qXr0+fu+99779nR988xvfvjuc745FjSA7hy/LshyPKaWWPerm7uw9fA3n2GgabdvincYDBuGcIGoh4dNkHKjndkeHQAtoP/p8a5wIsbuXUpcF87IkbdQauLMgVv427I1qRaEqzEJNOotYnBsHONZabPY5H8VvI7LaUxJKKTHgx7d9S434wGBlhwFqEadB88p7/5a9E13xoHXxi8HRamL79JbjL9rjQOtv0aZ77NIPgr2/4il/1YjiWDw36/MmV/DRPWh89TnO//j9v+2n1hERgCbK4h0W9kxC2nw7AkIXbM9Puy972+O8pQ2D9aK9NUVzBZiXkO/Ca7GOVWpWb5u9BDIINXLs5dYZ3h1qjnNs3LLOVN0EklIa3kd3d9OwHgPDhXXR9/Ze+pDYidwi1Y+cbXBbImgilJybsaFeXTv9RITIERGSnqrnRrE/CHLH6wPLhdUh3j/s6aDsgLau2Wp4RrxU+s9btQoAcIYDUajDW/fG040R/Jb3eL8NG56QAPdOQWwXpwhT0njjj81HxsPx8bt22fp8ucCEQLgEtzOlcUsilzQcpA6m2DXgl4eNv2OyXEYIAWyNMW4aciNJgTpJBd2IhZma8dAUdGus0SZEFxoNRhS1tRjM8FZ3Ar0iVMyd5u8mi5Ep0gjJ6X/+P/nnx+Px5cuXkXR0dzy9ePHieDwuy1Krq2o1K+V8Op4i6vD86e+VUpqTYqmc0pRIFYe7W0kTSZ6AosYWvlq52c3eJeBU3UOckEJcgcGVlGpbWN3drdjxfAroknc7AKXWSHMSkZwSMyViSTRNU5aUJZkZ2DmDibwqkTDoaj+bVRGZdxMxn870xqvqMiWZ55lEWtplTi2PzsMuZGcytihu7u6kw+diAIGDqItQQHEzB4eREIa1UuqdHt8bAGKPxMWmXwgb9Np5vm5RILWlFKCVJay1OgxASmmaphE/3HFk3HEOQJk4pVarsKVi8mxRbS+CEnc1SR7sqYgT1lrNoKWMaIlslCS9RbcCS0gwNo/HZWuzDt+dDZpW7M3URCOJ1oDG1hoODlWDeQiN0MTcCml0m77lfQFIiYcdHAOdiIZ0DbZJAWCA3Vv9wGAggy8ijcw8fLTAigEGrNqa++OwcYktMmw7X1dx3Zry4ZzcwsuHqGAlT659DiK6vb0NxE5ELdQcjoPNYbjAkO2AoT9OJCklEQr+cb/hKB1Rct4P+56IiN3Ua60xQvptrOHWrVG1NbjHAfebGhEJc9sNFYE0CC10746I6ZkDjopThKWImWDuFIued/ZB9Ia7WzVVneZx/0PXO1BBakOR1rtl5nJWWFeP7NqpiEKn3ePB5EIS1dzHm9qOW2bu+sMaKKWlxDAnTgB3zdXVbbKdFzHp2vrWa1HE51pqU1hNzj0DVr22kG8wr8hHbzRfAFxyyjkDXM0Oh8P5fK6mKSXoPqfZkT748PWf//DH7/35j7//3g//4i9+rs7VfKnuRtUaRJY0qZYVvwHhKYtIIZGLtNo8ox/GTAFWxR0PFV8jRyhIdRHd7jaKIT/GSKyEqkVhtZiWKlGDkUvznlKb78JkToByuMYj0aCfx40oiYgwGxEZRQ+vmmkYfpPuAeyLCbCZidwWdyJqaLCLtd3XwHi8bYKEIwj+qe2xaMC/j2jwt6M9igk/Hxr81a/VMo6GdwBDSn6kDGxyw3yDrB6DZw+a2+ORokc+3E6rhxiyez9x6XzsrevcNmO37/hbYf2htn95g3FRpsE+3XzXxca2W1vAl7YNterw8DYFo4L35shuhI80LHqsOzrbM5IGmRvp1NnYzOJ6YCaJgAuN6GW35IdVzt3BGUwTRBlwtAghCGLrwsUdXG2jgtu720Aykr4mrNsoXRALLxq9BRA2LVsKOBGSRfG5AoymWBkukos45/aWHr2kjWf4XeuNQP4As/VJEW2dGv1gdP9Ir4EWP3pk3HJwCoIdGm2ojq/30LZUiy2IunzAOk0Gu2Zca9Oo/7OqkqrwytCJ+Ffs4Dlni2gGUUopAhxmlv7h126WZTp9eU4pPXnyBMDd6aSqr169inIRqno4HV+/uj0cDrXWH/30dSlIkzCnu7uqVlXLy9vbL0wTU+WUq6GaOjtxEhHTGp4XdhChusGZ4XmaAFjIxEUYyghAVXfXWpWIJsCdltokHHLOWQoANwUs5zzn7NSwQaMxmjPz1PBPUSLzklKyWpgxJQj5NMk0TeHCj1jZ3ek48oUMXkqppQmiWJfYMTO0tFEAVnQBkDKHxVObtL2FSo2rRaiEQSmlPMnpdGLHwDwgCzbwR58cqUcC1wiY+/7qqVqJJLdzsaWe6HAGjLWz9biRQlNmEbq+vg7o2KOMeXJJPQoRvwijP6w72iy07m4GMy+lMmnnhGikKPbUnQvHydhurXH5ejwQw7hcMVKYZ2gr+BYahfuPmmhYow4qaEVozAOb0TZCtY2P9WdcLXgisiH8OAjZ/f7dXdXG3+7DodgeirmZ+LQh+PUgZGOrqluPkQYqhqOxHwehkjmp1W1AaUQyH0LE7f3YhnLjPdI1VqKLt0A9CxFRBVGkB1gCEXXz3aKrY+TXWtEjve7aHaRNqLMDv6ikd8Fu2nbjWIm235IzgRg5NlSjViuFN5KqY5N38wD/IGZnczYoWwqbvtbK7i3NtWrVamZpl7WUSJmjtg9u+m3zKlvnKqEBOQymcNeatRiu8Qn1qPV4edT9HRHxsw1rdPSSmoasKxGZeSjfLsvq0UCPH25dAGGGjPPEw4Y5BfMy3HggiITYrF3SnrV6+NpLKa/vbrVUd+Rpgj//5JOXP/rxD775zT/7xp9++6OPXxEncD4cztbMClLX6ppkkinpoQS9qongUXQFwiM/5jWaCRLkYemJDO3b5VwF1dmMOBeqtU2fxhf1Tay4j5xlWRJEpzGvVbW45OSuIfPDrRBueBK5C1T4JjrqJCNdfjtbo68MsFCtLQWtzmoUdw0MSDAjCRAIGp5Y9LkQFSA7S2VjO1mnPNgjm36z+1cksCky8fYg4e/Q4Nvatk5gX2nazrFdOR/54frno4ixf/erRiwjYnwvnsD95ka60bivjR/k0sP4+O3c++evC2u3EcLt43fHxTogW9IjDS8wX4KFur0r95YGSY0/d3/9b1kKcfaw69wBTNMUR6it3qVGBGAmdx25d8xgGjMazQ8U3xEJg52tMTM9qg86mbfACKcUm4JDzRzUnLytN4nJ3Zpw6Hg7TBjpnTzCQRH364/HAMOjpl/AgG1sdthSAyvyBpLzeCNbIZkHY+BtQcLuTIgdFeJoxKgBElqJFzTdnM2NfRoa/F172CIqdx+3N0xIm+6MaRIjjTYL/nYSPcCWb2mxO4ZlGd6NmDgPV4xIRfERnOgsp/gk58zBZe1iTkKNubk+YJfuN7MQVwhuamyv6Z2r05KWE5+YeSemqkqLi3/5j5+WUph5v9+nlE7ncjwea60/+dH7tdabm6fTvDudTkZ8XuzDX370y48+Pi31cCx3d4fbw3I8ns4FdYHvrrUqAVPOkiZvM5/D4+HuGjZoB9tGiYiCuaQOd6sGInY4TENHxLS6+1LL6cThnidzIoY5uaeUas4MMlMnSzWxoHpUbQaRmp7dkCRqQzBY8iQppXh1YZzVWiNIWBcjIlWtGqakuHupFuKcFMx1RO1PImIhBty5k9wcZM6OSRLgZuRWmQgQImLi3f6mv6eeOKRq0GoOJBC711p1BGeudtdh65pWKwYUOoGIPnr5pulMiDCljjB5stdzykE3DXWfaO4uzMwcGGCYqp3qFgOyQoPM1j18Gxs3Hl+9Dam2gkM6gGmxqT4Im/kYhVzGFjVOGAbxoDjSWph7HccYCMTXIFU3WPkevuq2/3rn4ycD43Vab+O3DOt8XGv7327ia4TsqykQZLUevjPqmkoRjUzuWmoZUHOYs8w8KhaM6R1tt1tzCMevompLPNY9eNZcqo0AGQVWEOdnDoJfI6vEeLgMNvZ+TjQ+3Dz12p8DX21fxwBFA+y5e6KJQOwtt56MyEJAKxayWI/S+Dn5yQNKu0ZumhEkYr9q4dAtpVipcBfhaq4OD9Uuon5CWZazbdJBJcqlbMJ0/f7DbojSBdH/QQRljDTpEUze+EHGw7t7rfV0OgXMsGAwtsdxs5avS5cugO0IHC89XkfVnsdrPZQapoS5wXwLI7sWq/fAKTMHmbyy3d7e/uQnP/vgxz/7zne//93vfv+DDz8+nwvxpBV3p0NxBxGnzMxG8FKOy62ffaLMzEQI1mVj6W/edaBIj8Risl5RdrDBKbSFswSC5lppWbAsi6pu3U42KtA5CGZmqrCiVtxUYS4gZ6CztZnhvUalGZikd6wAMIXDWRoRdCBwd4+Xrj0hVDeU5jY2RFLQr8wgwYIzNDrWhaVOdN8y6LPvYbjGsTHBP0drxzcD8YHd7/eNxb+h9rbr/trI5K/Y7l13Myxpu0p/zp//Ru4obqT/cwuf1jDCEC28V8di3PPbbow2OY33lvq33MnbHrCNrk8dkBuz1e9hQtyzZdcbaEfC3GSTO4C+KcQ/O/NzXCoWYWv7Qg/uwR1EHuyQWHaYmFhEbDh8CUN5vzma2+XYNrfXKJlRwcJb/XnzyzFw0Y0Dp0UkoJEvYzfWrU+nSZcGBgweyUB6m17qaNCpBe82V5G3v46BTret78jNW2UIuZoWs2gF6L135nqGCyfJ46/+d2jwsxp3oWCsr3iNE0aU+EF4/JE1AVvPyyYCv7nSZRoL+q7qHQ3eGxfhPWmEBBFuE9GYuRXzI5CvxQfDi7ux34IBB10rTnvov4SpkzIdSCrlhYgm9mp1l4qI3L36+fl8FhHxZ3J1JV5Rb+vp9PUvT6eT7a5xdcWgJ8+evTvvr1+9vpU03x5Pr1/dvrk7HE7n02k53J1Oy/kv3zw7HA6n0ym0SQ/H8+FwOC5lKYWj+oGrN93WsE9DfUGrObNUs5QopVRrBdyI2Y2TkMNdDZ68q4ag1Ttzd3J87Wt/AIDYnayUsiynWqszdKmuWpeFU5RVOBFRmnLDY8IsmHia5yY2WIpGZy2lxkphhqIV/iwMu2KjuAKIqJ48hLWIKGxNkKkqA+5g0BohIicCo4nluGtnZDHB724PKaWUI30zNEJDBKIwMzG8+d0piKgpTSCq6lXVIrRo5u5PU0mtVlAAwNaurq520zzP+4HNwnhNErmL1K39S0b1imG6dUs2ftsW7E1Eawvt4rq1BMdGB+Mxft49/WF564gGzHPu2+cmxaupbqz70Li91RZ3U9UQsd2ivrGBxq/iutEDkW8WZxiW8ToV21WI2NGeNZw0cf6Bo6I/ebVQu7Jo3CQ3HO5jQmJjIngvdici09SAkz6oQzj+yz3PeCuV5k3HksPyNWvRJxFxa67Z/n6dmYnXkgzUc1ljqNd6emiXjA63Tdw4ziBpRRTtXVn0Cdzh6sTE0tjx7iABXLzptZohVkzp4sbMzGRqZsI8TdOh1P4qqVatdQmY5L4yM6VHPt2daR7OFN94pmtd3MktYuPVvd229YnTSuttxnbExXRTYDAuHaHUyCEM4J0zlc7HRgdysfQzr2zY+LzUKF/RQ1vdexKHgWTYWEMlGECt9Xw+Hw4H1XdF5LAs77333ieffPyNf/1nr169Op+KM1d1tTOlnKe5nJallHo6U6IoNstZANRjHQ6LMU3MrMcDFcawNewGVe/5xuOP8/lMT/ZjApZiwS7BWuxou60Zeg5hrXVZvJQyhXcG04BGEcTr02R9EeFKc3dzl14kppQAhDx+e14W7+HisQLE8EgpSZc+7NRQj+CHt+DHOtlHis6v3QiE+9bevfbbxtv6W4xM3m9jxcPfHkh+rFEHRo5LTLjqD8EBcDMfH3UrPHpeeuxvelA++zPv7y1QcGWKfq7WVs77n7hROD47LbIvIN4BH298iEQEM0gAPVlRWbCHmAgj04TQdZX9EjYHDuzuWiKiqG8BIoAF5K7eNIjRFBSI2Tbelm5y69B1BCI8qG0RaP3jbWeK1k12QmQVbD+837Hj70ZfofFPNCfQvffycO7f8yBIQ4DcttFmQiIBBmKHXuYQ4mIE/a79BtqnBfoCu/inwu8Nt+T++x7BunDAd0C4cXsh3AkNOGB4lOIA5jDHhdm6+Srw4f236oQYPJv1c3ioEXZ+ZycB6cPDzc9//rPnz58v56NI3c9pv7s6L2cSdrfzUl+/uj0fzkQE94lFLVEm8+TIpvazn/0s5/zs2bN3njz50rv7/Pe+5G6lnk+nw/l8rrW+eKXH4/H3fu/LT589/+STl69f3y5n/fDjV8zpg1989PLVm/NZP/zlR7Xabnf14sWLu5Jvb29VNU1CSkZQt+VwC+aUprIs6jJNOzU3y/M8n43uljuZcoS2TPS8HN959vzZO0+/8MVnaRZQlYlOp1u18u6779y9fPPy5etSyicvXxPJvL969fK12VmVc84JdLh7MwtfXV2V8ykx72zKOZ/LeSmFiaa8y1fTslRzojwb8lK8uDtxUT+XxaaI8CAAcDNi1IhhWphsJzuQWa3sRvBCxmzc5N0rA8E9TAlESgoLKp+DjLy65NAzbcatQBhMRHpWjTwZRjVzF4gz0bFEIUEWYShUC5xJ2N+8jOHYyrWHekfOuzlGEjFz4iwiOWURWs7nKBjiPbOrG6Z3qgjpfwBEKiIyTYrFralf5swdd5m1moEIC0yLjmCmmlZVs61nDrWVVfRa1d1TmlJKbqQaGJJqcTMXSczJ3YlCjMQYSKlp/RNRUS/1XFQ9NB9zy6mrFOxequYiIruJzEx9qZWZY1cYCKrUyk7QcBVmdy9LxH1H2mHUEmgV6kqt07z3JgUT6hfs7kupYweVrrtrLWO4hRBV9XSyjiET8woIzWwFgG4coNFcBCkJrC7LaTdNbZt1pbB5i9almnSWLCLEFGWXpBQdMbEePqWchZLUotbwJJMhkIx6DfK7u1drAlZOOC53wwXQfK8RtiEzMhdX91LO7Rgh931nUgcjInwiLgy4uVnzyMBBWMyF7Xw65JxznqLAA0Dn8wmxtBERJYiAKFgTi1Vi6lCB2dyh1RVZ2rIIqsrZk/u8LHe7natb0VKMWMQICj/rYoxTPQvRQka7fPPOs5OWu7s3wgjduhYkZydrj59zZk4BCd0EJPN07VpUdbebd7u9KdRBzOdapikXr8UrMxfSiuotkr9AXGRKlG5v76A+yZQl1VTg5YtffA7Yyxdv1Ojf/lffSykdi5mxymRmBgOR1+pOhoiHC4GoCABRMjMhQtd3RU+FI2eJwrPKZzdVd0pgV2ek1wAfTjqnqdbDnIR5V+uN2hPIWXFml1Tm5bijcwq7cwEvdZdcbvbvvnr9sYmKn0jI1A1EtmOdhRhSKl4kuVoOdxPvre4BqlbM1bgez3eLF5a9674uAvecQHYS/cLuSnY75ZRN1NmVqp1tn3dcnEmud/OyvEE6az7g+rrUF0yHUs/ACXlHUHIxgoAZ2aPsEpmjCJFd6DF0skNPyyFIl5wZtSXAPlJ5uy0I2MYwe5BkaI4gDgiBqKnY4bIcwsAYulHFHAdsCX5hD/K49LZY2bj6uNv1AhsbnWi9/4YHrCsRr201cvA52rDs7VdFv28BgZ8THL7tsI1/81e7H6ILH0d/sSt4oJZw0XKZ/TGt0U+74fHH5Z1TTxG/d2TLZ3twel0/pYsDvF6cJLQ4g77YAJgAsWgTgIzJ3YwM1EQNouvEmZAIgGlRO5fjLjPvLdEuXCvk3kpw9K3BA+8xjV0snEyht8/CKSeOSTCc1IOoIqGh6MzJiVjYIvckLOQmEA2P64LIjI2IfFnrTbR54SFlShFwEwd0OyZptw31DFnRVommo8EBdN2ntZOBderRhlZKKx39IudwG7m893L72xcevMQOrZHD79AP4X6eh+sGAg3f++hXm7p/R9uYv6V/Ekt94xEOvmiEttFLoEHWqLvBCQ5oaPMSjMgM2lwXZg6bPHmfLxP3kLUbw9TVtfmCHSF863BmSpO0whIeUnbOMGLnzHkiabjRHdWQur8gKqUaVB3qIsSIdKdiNXKUnJnT+XwSoSnLlK+0nN39cDgsyykLpZTOd+ej2s3NzW6a7u7ugDCROec8TVP4X+d5fvr0aY9ukUia5nRzcwOYu3+5+MtXr6dpurnZPXnyZbevAnR7d56nvSrujue729M07Z4+fX53d/eLX/zik9v6+vXr29vb169ffvDRh29ub91d4a9v7169vk0CTnI6v4H5nGdGJZMnV+LshYyInHxOU6L6kx//+YuP95xdhJ49v7l+spumvNwd5+npO893L1+8JpzKonUp8Ol6d5VzJnMWCOab3V6EXi+vlrLsshyOp9PhCMCdjqdlXvZ52i1qVs8uSdLMhrMawFf7GzVx92LKGvqlzdJNBPesddFa2DHN2WoppUxZzAgagyZxq8uMZVkAdjdyEgZTEknMvPjiCHnZNmLDJmZuTHNXdVOFw0BEyScA5IyomOxCRAwOKr8ZSlNeMGZfxN+8/rCvn06Ogdau9nMHVxCRXZ4CSQY1MYWsTefCDYlIN+d28qYNo94ifoPIZ12us9YaqX0ia4DOzIiEiEUal8O0Awj3CG71XD7tGdgUpeG47z3unnNmZmtV12zUCk8pYqRdz8ZMq9la5uEifGpm6BIpzMwsROIW01QG/m8kVebMbLaMgE+jcTJHb9ADHmlKyXSNua1u166aOHoYndM4TaHStrYQc6q1hhgrM3c2chRD7eUWeywRzqCLLKyx1ADgqG7Y6bXjbjcW1UUQ1f2C1Lpt28fp+LnFZazzZsfJucsUjZDa9ocRVOcmsJQ4CieomumGnAgiyinUWVsDtRIhW+rs6M/h5kje4vbjbY7H7IAcIePkVsdTDKNkPNH25cZLL7WaWVQBjdjV+RzCrHm89CBTrS/dyMhGbrABqs6ccp5V3czOZVEtESBdjOLDvovFrBwiN4IGtNvrzvnC0NQ+s0ZEnbCqjHp484dxo24tUy9is1VYzci0BduxLJh4OZ7ef//9a5g9v5LUKgECIG9pENTzdAhAp86ub9+IQL27BBCEgBA7sVPVgRm484c5sVatdUEnAPch2LJq3XtoEOBWaLgxGnBJZe/tU2ADb6KID0Y+bdHa52z+yHl+3eaXj/OZjNYxSTuX9dP4jX/DbSwav93t8yYOfXr7a3rY/ipbiJwooliNg0YrnCDAo6JT/+TiuZikYY+wV9q2BtRWwAohshGhPOoJ7cLo2RwAqG/q3MP4IYnYHnzUnOB1+EnUkuEmw2GdQTqEzbk7Yrahy8tnpz6kB3gbDzgmSN9ButBokFUvviICmC4cLuPvxhJaY5PYduxnt8u3/wimU10A9N38t2SC/h1qPdVwuEjGAvt4izQnajMC3Fh+hAvxZIAIDhBTzonZSbtJ1umj1khN7SdbM6wf1/7dDJsWsR+75DA7Ya6q8QgcKTbuyfR4tc/zLDdXTw6Hw5RSKeV6tzerN9d7oo+Ph9swr0Xk6mpXS1NUb8kkzsJZOL948WKapt1uSkmIyLxGkFDtlHPO++eTLJhSWc7u9PypHM+3V1fXu12G3j55Il/44v716/KFd/+ohjaGiLvf3r0+nk4GN7Mf//RnP/7Ln9Za07Q/HE5V/e7u7sc//slHL083N0/P5/P5XItqKYWF6/G0202nN3dErlp+8RNzYJrAjN//2n9jnvfv/+KDly9f5XkfEZHT3YGIMstumuGERac57ed33/nKOzdPpZzOqupGy7Lc3R3P57NTYtCHH7+4O7xOu33K+6UqSbq6yuoHM6s6bEGW1MCJhHfHGG5mlXzaITk9MTPzqqqutTNDPMkuQjW9QiC5V62kdgRCDqMFrxiEZkC7wSuM0EqxG9w1ATAj4tRtYvIkkYdJRFEHwowUrmyU7vFzTIiJ6OXLl8xN3zYKJ6aUiOAoKaV5nnPOidpyD6ClfbrDWZ3M4E7m5ABtQJGDzTzEjmwQ/yV5Z4VBHXARjgBgZ3S00Pow5TGSBrsjrW9QsAZ9DUCr1OcVXWQiCh+PurceSfMNC3Kb4uZoklKrtUQtDsYR7B+c7N4sfOpWqogAHmFCdyJKHe0AWFPOArJGKiU2gPBiWeltg5rYTLd5a6nlhQaADIAt91YqwiDE+mAFj8WiXzpGywXvdzMwmDc8hM0GRvfuvAO/FUaOpzAzkXzvuvHixgYvXf92gMbApaoaQwnbq8bqCYW32UHM7orQ+XDzPloC462KAczSy11sb56Zp2na7Xan00k3r2D8pGjZDMiLIbA9koesqCozRwnDaU4h9iXdm7bthEhIjcRUCAcQIlF3Op/PADiJ5KTuuizuLX2x9joi1GjPDeFHrD5STMc9Sq/uMG6YVqA+hhm2n0RYfphBftFw72wRGjscDj/60Y+uXOuX3/3DP/pK9IOZMSeSRD08zqbOjZ46rojVgFoTQbuzqaKxMCp0rXwtRMZUrLg7vMLUTMnVPVLNh43Wd9ChTfTW9jic27JJ/TNQ3Db5JPz997PL3t7uRQJ/nTbG5WdajrTxFrU/tpGwv+3224cJHxrrQJMT/Jtpv1qEs8eRGwJEC3qwUQQ9whEQxEvqTzcWjaGdwwZwCCHaBsIBPpKuO5yL3VVrJWEEAuykTQnGxli3Y6XyoAKlxuBgxkaILnSzySwUawZjlZnDTG1jtXt8NutqRD4DgvZOc+7R+zVd8N4ffa9cP6SVTTpmyiUs7GU6+pLYrhU/+LVaO8XISQsW4RC27bT635qJ+nemRdiOSKLQX18Lxlq9+WOUr4hpEU6ZUDeMQ2LQmgOGlGAWqrwRbHf3SBeMnTYkFEBMsVdGrT7XbYYDEVXVdr2un4duY6hpRClH3WAzS1OiLMl1EbpOzPv9fj836RGYlqe6m+Z53oEA5nPRnOYcqpKS43SL1tvj4bRUA6oHF85LOR8Oh9vb26v9+enTp8uZztmvr685A572V/t3af/q1Zt5vxfsDOX2zfsfffjL6+snh/OJyK+vr588efL0yd59Wpbl7nh6cvWlr//hs93u6umz50+fPn33C79n1X/8k7/8zg9+vttdffjhh69f37548eKDX36kqsfj+ZMXL169fpHneb9/+ub21Zs3S1lwOuHj/Ydf+9ofudXT6Ri5e3Belhrr0n7euRqZS+KnN092881uT0ttFMerJ0+/+OWvAMySnPJ7P/jhX/zoJ6fTIpWqQa2cjgvSYZDunGWEHQKC55x3U9ai5+UEYDfNyVq8ydmRJ3gXSw3Tpy8rIV1Ya52EAEAIaMKtnZ8SkWsnN8CFCQwhojJ5EM97uJKISBkm1MLToDgfQKBFj2Nti4PB4ABs5gYnM/cl3AFmBm6ZVCklofVho1SAbIorxoPAnblhMBEeng8zE85J2t6gVdWKmblB3GRN4270CSdUi5qG7Va3VimL99QgOJG5l1KYSSDMDGGmSURSzqfTqU8eHlDBzMqizGteHzV3IEIQhYhC+kSb7iTVWjZ4Bgb3JpNzv2Ages4kBojtgEe1VVxADwBiA4SwiZgN/LNGLp0Aj+gQw6JSC5GE0xXdKAzRliCiepQZ9LHN3Ed37m5N3bS5eMdhQ/11mJjxq5HrOFbMOOxeXb4h17m18Nb3B4Si1XrbmwqKI+0z+n/8yh026i5Sk2kdoeC2b3foNV4KOmAbHT5uvuF2Xrtl8widDX45/Pp11sKS4wCzWuviLTjZZk3DpcHH5QSUNVgKNiBon2Tei4hmdTscz+16RqUu7g7mRs21NmCY2S08gz4G8HbkjH7eDjbv+H8dM5u3TMH4agm0cNomRdCmExAZyxABvNb64pNPjlZvJvraH/6+u2lx85rT+i6qLlxFpRHjfJSuwTp3yDxCkS1ZE0as5kW1qEK1mKHtlw4RIQ7/qCFq/KhqqRfLRFtQOnLtwxi0xYdbU29r/DUiWScOfR673AaFASuebFbCmOP4HHG8z9+2p8XnRoa/wRv4K7axAt/75LepXWDCfqtDTOJtv3n8KXwzkFqsyz99gMlbPn9bs3G2SzlcQ6+1gPZI3u5o/Yq2M8XDa9uHdOPJDYs3FhluFFBOglZzguAufTucotDOiORzkxyLutCIM7TLxbRt1V+DqOPmDiZC7Du9ywYIG6IdkTEVcC6IedLJlsNDFGvacIh0/l434uM8AHpl0XtQ8N4fGw/Rhpa8HcnbRpeOmLcolIyR1mQ0APyqyaW/a79uoz5ZDG3lH39sNxTb/mD7oQOA9gIhF63v5g04kFvMCneXqI014pBrONH6bCL0nFghcfeL3Ys5Ahz3xh0zU9Q2cx1wA0ACzkS4fXNHsHqurhpFh8+n08uXL+Y5f+GL70x5zhNfXe2Ox6PwlfQ6v7XW4JWF5Kaql3JWKyI8TdPNzc08z6QfHW/vPvj5+yLpq1/96vNn787z3qd0Kvr9731rnvbzvJ/n+fnzd3/v3SfztEtyLKXY8urN67tgtbq7nk8zp6fv7pjZ9fb8+vTB7Uc5z19+tvvKf/efTNP06tUfmNOy1E8++UQkqeF0Wn7+wS9KKSJyd3f3+s2bn/70p9/+9rfL8/kf/IOvvPPOLN+pL168LEWFaZqIOZ1P51qriEQi1t3hxcefTJyffvjhh2VRd9/tdvO8X4rO8+7q5unr16/Py/F0VkkmeWLiWqvXk7elCmA1cJUGM4j8fD6fmOZ5vr56QkS11onJo7JIX0XDNl3KSX3Y3AJgN6uqgnXYNN794gaHc43S0gYiCk0MMOU0W6+FaGv1gottVUBj3zUPP2Gk0iB0vJwo5cndYY2nV1tQStMkZL6UBVhCCiwOCMDDzLlzvRq80Za4OM9zW6zJmHkpJT4kIq21ujkYzLnV3SEggplh0XJsDA+WV+6GLDm786o90wu1o6ODxn4c9SfWmezB292KZ45Cu+6uzFlEghdXTIPku+3VahpZnt7EEptJHSRbM7snMTpobLVWtwvxmHHVAEjjrQ1GYtTHG1zHfl0ytHp3ZrVWCzK6iHgvltMubgTosDzGMHD3oKM0v+ql3mags3uGNcCPbnZxzhgAW/QYsbJxgHcIumWl+gbRjQff6P3YiLm1Y6jt9wNpaxO/3dgoAIBaq4B8E/aMmbTb7QBgwYJwwVkp5fb2Nt5dY0176/xaKxPBHyGJbV/f5r0Msr6gixgNViq30G6QdblB7jBfODkBLMRk6qfTaVFlZjON3FrmrqWEEUCDcwWg2sKDW55tqCWrqjbPYh+Em/vvmPlirejVEA3BPnWymG4hJQUrpdTMHswxIkDnPF1dXU3nE5ruF4GcIGDxNvDUPVLpCFDzWmthUiEnATMLURCh21gCGbmIEIfIg5spegE3t3BeoGV6uIYUtPTnijUKIhCJ0dJzJ0EbyuiDsUyX/9iIQ3g4iakDvO3x92xBbwlIG+w3Rve4jTFstj3/IPnwr6U9brD+bbNGt+vh33J7oBMIIGaJP1gEiOStd/2W/mxezy0Avve/f9XGl/TFza7XkFTzVqCjpSgc1IEHxxcMASmoUihXu0gCgyEdyFF/xsaT41jch5UTHttYkZuPr1HpeCiFBuTrHcagQRmlhBCV914Qvu9hPpBV5Ct2J+ZFDjDHxLRNNflGDW0A+wIWAgx4x4G8eRV8Pza4dmzr1Xu4kd4u5/NQkPbCueC+qZDuwWxq1Qi7AfNZTIfftV+j89h0HwABAABJREFUPe7ZaTpkrRZyGEJjKH1aix+OoywyjrotRd2jYUEN7ZuCwWFO5H0XaEqcLerRxomttlTfPlJKiIB1Vz3s0pgW844cqmsdi7Sc3hDJ+XhmAJ7O5yI8kfOLFy9ub19/8ffePR2Xjz76KCV89Q9+//rJzX56ZmaHw+HNm7vz+RwEwqB4iZBZLaVEECXIRRNfk9TMxc2PByU/Mp9v35yfP3v3ZnczTbta7eOPX9TizPzR6eM8W4tFLFDVqMm+41RrrUd18DzPMH356jbn/LWvfW2aFXx+duUiCZie7VANzDLP8z/6B19jTlGHKu/mjz766Bvf+Ma//P/9Zzc3dnPzPMkfHQ5fOi31fC7H4+nVy7tyClqXWV1KUbAkKbtJyvlwPp/hZPX0+s3L169unzx5BvqFEV3tJ6Fa1dyKG7lpTq0SjDFpGElV3d2Ep2mqSzlreedrX/iTP/mT3TR//PHHOByrltPpdDqdzuVUq0bY9/r6Ogy4aog1OWeZplTrsjqSG1aJIDIHPlHz8NzH2s2uG8KxEQzm7sbclmgz0zA/zdU0z62QpZk5h2FJCbwM1T4Q4KpxJXQHILu7s5M1iOWlxgg7nwtvGHFmTXQxhk1oq0ZQbp7nquECUOv136aUVyjCzTsoIqWUKFG27Y0InmFMkM36G6a2u0nyHqZz1ZpGsr4Na8soqm4zw7l6yFo2NKmuYdK5UbHqnXLaiqfDTYtt6hZsZVo3nWDB+kNnHo7P7xkEA0WMk6wrizsArZ0G0Hg1vYjLYp6IJXIj4c2btO4x3pL3nEi60c9xt5tjrCWB9BYkVfRMj+2OFXmYigtDdvyxjb95FyVyb/Yx97Il48ior4Bej2QL25rXQBUgFTVrKsRErSx9QLvwOjfPce9LG/LlqkYEOBHFQLJNmbt2JxT1U9ekFyCK+fX65mqUGRt/yhh7QXm1JtfZniuldF6OxXRmBnA+n7nt9U2OnTobton9EAtLfxcEJ2dX9dKDzwBAjOavgJkzMYGJVndD9Kt3IdOYVtSH1nZuepC0+4NsAWF/NK/aqAHqLfgYcr4DwPu2jI07gJvr/R/9wVf97u6dm7kNCRb0btl2LDOh1pBKMwkqS1ecWramT6RMV8BZnFgJbT8Tzk5LPIiZwipgQi5Ewpw6M5y7CCHA3EqNdUd7PAje0lrGEfeX4gC2eSAP2v0cwl5Remv/WSP4P6hbPTJy6ddFgvdu7PPjuoeI9DfSfm3J1u0U+1tuI+Bz37hfV3v8+l03sgnuPezbzvarXcVHyCvOv97kiEIAUZevLS9OJIBvQ5cNLrojJAyZnajF7mQTqYuzusPhw6XYqrY2X0g5nxs7Q1r6X9SmFRHtcTKixltomqJMLV+GWg6hEbwVwgkhzngY6fBvSLmEMU0U7BgaEdHW5wTxXh9ifD465F6Q8B7Su/cuGrDsa8VqpbzlvdxHg42ISBcRwlUhtmVs9Y4hAv/mXAa/aw8bdyrHMI1q+E9CwuPhi733UWD1vo3aqCbhXZy/OccR84U71WXjo4eTGSMMN1eAzQEEnqReSzp2ZPTkCCKgs6s6pyms3yoUPugVPTJzKsspyUSwcj5N09Xh7ny1F4CePn1+fX293893d8ef/+wXVU/H0+GLX/zil764V9Xbu+OLly/P5xKAjYiYMU0TiKppXeA4EaOUIkuZ53m//4K7l3N6cTxoNebbn+dXT548+cIXfz+ldHd3jLDJGf9/9v7817btOg/ERjPnWms355zbvHffe3xsxF6SbVFUUxJl0bItx4ZKcZNCfkiQcoyyERTsOIUkKBTyDxQSJEDgoPJbkgKqUkkFSAVxSrZjKylYMmzLUiRTDRtRNMlHvo63Oe1u1lpzzjFGfhhzrb3Pve9JJEXJknwniMdz99ln77XXns34xvjG91lEsFqcUQVtuAkQUkmkTUqpaXjBqxjj6oVV0zQNtXlzEULg2AImaJer1R3Y92D08O03l8v16vS0YxoI2gZWr9xfhk/8yptfjjSuVicnH3tv1y45Ls7PL3a7HizkLCq23+w3m90wDMzxPe95z52T9t7ZQlUvLy+HYSwqb5S95JvLK4kdBG4DUoxEITCzWShDBgABVDA1FaDi571RY4EQjEOEhjV0vDhdnD148JKYiuSU0n4c9kM/DPssZRgGZhazMBHT5pDrGFocAUIQUTIVBSD2LcQMBXuaio9aTASnU6AGrFCbtUxVDY70949mZEZjjmDGgOwWBYj+kQcZEYEIybNoNT+InlT2VSFWo3FERCUVLTmlsbi9BE600nHMu91urpbUQDDw9JxaRiMCA1IwBVNAJhc7yVYD2qkShQQEaofmIlWn4AVzBSjzyJLmz6uqc+uDWTUlgqmqMG28tTauUL00ppyigss21aVeGyndjxEnxZ3pI0w+gUTHxEjEGvgd7wX+RXiFan4EpjPGLxsRCcigypAgkhGKyGQvzl7pFREMFWmYqWlFRETkJSMz77ibD7BD6FA1wYEQDx/EKt3U5uuU6Qif9iN86rLr/jjF5ar1Z08SzwcnHjl/4BHV1iuEtbLKFEJQTcQQkLwhZI6ips91HOhM7642tybOWBfAmNDlXszhBBAFjiG0bWtmLldTFWOtSi7N9+doYJ1pU51JVdUKEyNZCEFVjcDID/W29o5m0UPfzoGOW2qCoxq9mIGqlqzI5DjE64oiom6zMSFtqyYuAPWwoaoh4eJGh0hi3kmcgGRzOqM+eFStrV8HEFQpM0ZnbR041XVTEhHJCklgZcvl8r3vfW++uVw3AYxsEjb0PT9LcR8kIiBA0OIbBeGt+S+5VK8OQCAUMFVVyS22XgY2M+ZIIWoR1cT1SmQG5HVDnD+s+v7xbCHwEDEDHHypD7+HGlROgLc+iDUIOK4QzsGiTkmTKZX7rkw/PaKcfcfGU/P/d3zmUyvxD06siX9wSoXwNDJ0m4Fp5c5Pejeq57cK8N7t8W+th/B4XvnWeHzBEyzxgGPaSEG9DKcVCgICihkq0IFxCqqKVgibCYXVoXXT8O2hzi7x2iCRSPb3rwX8aT8RsOrdhwfWKMzvO5NREMQQ3bQVANwD0A5fwIRj6+6FyJ7TEXDVvBqNTwK/cHth1qLi4WebegiPod+tu3vMFXqHr+bbSojMG4IerqH2EEI9LyaTxOfj937MEN0p0m45yTVwepe/mdqXfFUdtlacQ5RDVdkAKoqrk79yPm0uLM5AAPwvTaepcbsC7Y53U+p/1rk4LEMRwHmrN1UNAEYMRJRzRpDdbhfDAhGKaAi864ec+9M7dwGKqF1d3+w2X10sVszctgs3OheRvu+9aCMi+34LoOv1erFoCTl2yzFLSgkA2pa6bhlatwfA5fJeP0jbhpPT+2bGzC+/wjltdrvdMAxm1jZhvVyaWcnb2HBg6bouDXL++PzevXsnq/X2esfLDGaRYr/ZxdiHpnvy6NE4jk3sdttLJlmenS2aCFKA6OX33Pvuj35svx/ati1ZGKlBPu2W91b3xlxKVsIID4golCzjOK7X62Xc/PiPfN8nPvGJUorHz7/wC7/42c994fT0zs12f35+dXF+dbPd7Xf9btePYw7YmLk4v6lnp4EFjDmYSWAA4835+Wd/5VcWi0XTNA9haNt2sV61bWvoco9anehiXMToqhtzjaJplwDeXKpOOXMBWwVUBVWqmpxIimCKcVFsqgPopK6JiCklRAQFYCVAZlIFICoTtWqm4U1Bp6mqmM0SJkQETCE0Hn2mo9nGtR+vTs4avhsAWKDjEBxcRgaIVCCl3lGHdyR6SHojVQCmbdvZOzvGEGN0gDl9NFaVWrF0R4MKJ1RFS1GXQUUMACRiLgiKiLNGZ43ysd4rmZziZkBSo//QlVKkFAALIRKRaa3qHBk9gjNzHcDMN38upNAkYcJHHRQV9hzZNx0jlhl7HAN1f/68R8zkVf/srpKJaIEZkUWkSGkCVhaNEYDUUxrdBFKnaaYTilM6qrnB1EDmH8rqFnVLiQSO0MUxGjxu9pvvJ9ZC3zuM+b4dw2atfhjsRaTAB8InItZcmSmiIZp3OqvO/j31RhGA6UwhrpfERKHaPBgR+qcWEYUDoXeeJ9NUQSI60AanFzu4O8441sos+dM0jbebhkBQjea7Ot8UAKfEs5GnCaWKA6FIqbVHhaICWouZIQRDyFLfAmsXTGWKzhV1XxHzZFMogZoQQk7DjJnnI8duiTHcals1Y3dKdO4WogKymZWizApmJmpFcpZxHGEYAEokvHN2kky6gLkkU1REVSlC2SQ3U9skmCsjExETMDMhmalkMVCfPgxMIQAGsCKqqkUxiRQRFw4lBFYgVYiRkKZZoQKqWlRLnaWeCAMRYzHkdz/H32Fiwi05h6ODua7DQ9B2uzyov/27mM1Z42frYL/D3/424+lUyDs0KcE7PAGPljAcbAB+/8ez72rf5p14l9f/zrxaTYrZ4YJnFP3t37ffC/Q7w4ZZTnM+QKB++3O2wo7+C/PPZgYGTDwZK2RiVmNCA8XJFg/B0Z0ZIHhDDDMDH8STiRgDtVhZAx77ThsLe+XkwB2dD2j3uQVARHWSJyIiilQFLPdnqjw6m9dNTUJ5xV3ma0ICQAR2zbgjWHj42BMH9dCLeLwY7SkYdjhkfi9yKIddZWa+mFl13YA/KGrAf7TGLfh3qPw9Qx337hqrjx/+imqOzyMNOgquAAHQKdAT89Oq1IfUs3vagedAF/Sou8E3Z1Awnu0owRQRjfSoeuzXMUdKzkerqW2YVl9tAXvzrbfv3r0fw6KIqY2x6a43N22zjDHu972qiI5quW0jIvf9uFe9utm0bbtenSyXK2e7nd25a2aAJpK75WIceyAzpK7rQBqw5LLEGIIgAwYOvFwuN/1AFDqBfaqKgk3TbK+ux3F0YlgbFmqRme/eW5dSIlOMcRiGXPRmsytibdvuN4lZAMacs3ej5ZyHYWDGUsr1TUaSxdkZNAFUQfWF9Qv/zb/4xx/60IdOTs6G7ai4X7VL5HYRKLMBUMqlFNXRIHOwtuXHuX9CumkZl6d34Ozsv33nJz79qU8sliuAkMay2+1vbrYXl9cPHz6+uLjYX5IhfOlLX3rrG29fXF91i1WSYmYpy37fF9FSChQ2icN4o02zl3RydvbWm68TM3fN/QcvDsP41a+9ZoiVVxmiB8SRAzMvl/eapimlvPzSS6rl0aNH6/WaiIZhUMNtP+ScRe3y6qbvx9VqBY3CJNHhkoallBBC4Lher/f7vYgUUwFYrNe73a7Fxm6NGhnMG5CIzKnxUoRDMDXVyv+fokaPjA89fojotbfkqpuIMguEgA1pDCGoOvEDiorkCnssV07prneOpScvtes6dO3WiqxwBhghhKZp5vAdmWMIpWRmJ6lakSIKAAQGpczCzTxX5gCQKIioa04ys8NIVWXy+kmYcU7JmqV0XadTc+Dc++rgwV9kXptzHO/fi0Nr/6eI8LSnH3h3ADCJfxwjkwowYuVPTsAmFMnjxMNxZFjcMpAgxgjOJQdSEPU+KJ2iWKz63cTAwcED5HHEWvypsHbeAf3WeS9oSslrd1Ly3NCoU88qTgXA+W/9I4hIKhknhExHUpzMXFSICNBfR4mImDQXVe26LsaYpRCR3/kZQMpsT8/MzK4o64VcM9NSZIJzaSxEtOi6nNJ+v3eyg82eFgZOpSYiyakGHgZN04zjOCuu18QKgHNcRRRAD84KiEWSf/ycExGN4+htn0QUGzZCEJh7SnPOiBxiK1n7fiSirDJL6dnECJgAvAAYkDt5xulLKT6X/DM2TYPIPjN9Bs7piVzGXEY3iJ+xH8ynyPSVuWT8/IcikIoQ4CCpjZxSDlwnhYiMY1+yxbvrPKbr6w3ECKA57XNOMXIgLDJLIofIsW080cNcE6cITHRsyzv1VZph0zRmg4HlnGLDSYSZGRkRm65NRTAEVRSBENt+3N2hFTQ8fzuRWKWKr3Zd5ySBlBK1zVHDE4DZU23/0+FdM/FQAdvczHxI3ByIoGg25WzNivuUAtpROF4JopWXV4l5akC+t9KRnL0BTBJZStBOD85vWkkFz4aDiKjH3WiIc4Xn2Wcev+zTj/zBKcp9mzWW7+CrHQOAw2boNwiR8ekw8Vus29z2RTxGHu88vsXXPyK14q2eNGci3PpECgDeuWQI6Fx0UEBwsXBPwHneEQGISAy1IsH6EgdYRSjgImC1bd7xFzGZqpgSEiGB79JSOARRsVIPTQVAZA48C3wakicjCRGAkQWBjyYtIhw48L6feW1QkcDcgIdMUdWAnipq+t2p7zSL0Mz40o58RY8UPOjZBfi7Ge8+M/1sPRKDmH055mzEc2T4HRpVXR5g4m44F4Cn/Hwtyc0LR9XFwrSmUsEA2ElyUyqBDapVkvNkDm+mM72Kqu7GFINP0aPmkhGdXo0AACpOBHKsqG765Dl6gCmT4uEiTUGps73MNSBzSc7QcXOycP/eK03TplSkGEU0s6FP+93w4osPQmDmxiDkPHKodmZNt/QosGkaj5KJPJ7zpH5oW4gxzjFxs2gxcpDaojOOvdmeDJErhSyV3sNrj72+9JufX69P7t27t2zWwFTQKFDTtkFjLulqtxGRZtE00JRSxt0opjnvVDW0YbFsiSJHWjBv+23OI+xhGHbLm8v1er26cwdOTu6v7773wctP3nr09f1riNS1y5w9O08IYXVy2sQFh6YUTSlFpE4u6e462DgO4+X2ortaxBjPThaSNgoUUNfRVvcW731xjR97n6pGOru8ukL80//Zf/F/5tj82I//eCp5u932Q9rtdtvt7s0333z77Yc5591ud/7kslmc7a7Pd9stEq1gvWiapomn69Wu36vqMAwAAxwVVd544+L09LSJfHpycnZ2cv/O3cViYWb3zu4Akyqo4WazTcNXry8uyzhgqC18Mba5cR0LSaPFGIcEY8YYFx3zOI5gzenJYnt9Q4hTCxOamdTcRv2nP34Qk5yKPHZUhzRVnqLSuq4QvaBQ+FDsmn6LDjbmmuQUgSBUodZaTZpkJwCR+iHh5OHuF1tJbuoNiml+F/8cquoVOSLSSeuaiOYd3Q8PA2dSIjOrQTF1K0WaGHR5SL5ImQNz3RoCNhMH1d+RjQzMkDBMLQTP7NFz+cV/NTNbDCaRmFu4/PYuP7/gOI71t0eBwTxbcGKoVlYQ4jAM0+PeWMx2JIc5h7bzQzP0nZ8wgQf2V/V17c6TRBSecVCYr5yOaJ/+wwF1HPl22O3QFifu6FPTiYiqZiyAmfiELKYiFCIdinhu/AoAoKg6a+EwMwckqAJIMR7sMbW23ikQEhzfSQWsRhRtFzlgLrW0C5WEfHzfSFU9RRVCiDGUUsG/qo5jHyP7R1YEVb17966q7vfDMPRdt0xlTFmYqsH1PAEOc7VWPqdFOgFvL5JOM8omfiZMF3nIMtQ/niij9as5moH1jabT6Ph7QWSDajI+f7kq3g0rwzDudrvdbpeut80LK0ZgFFe+ZWYIpMRFUjFVZVXPm5KZKUi4XVvG2jFhRFUv10VzrWYyTCQDKBExNQixKKVsWjRwzGJWXVIbMESIbWhzzl5enjk5tV0H3mUcgubZu0WnYJonSIBzLPa04RgqAuGMMFGPIn6Fd2R5oYLREXLTo+d/Z8a/VhHRP0rEtuOv8qkHv2Nf1u/hOOpJq/vb0cVXu4XaW692C5+YApBNyuN1PypmxrOoDAAAoEud+D9oasQgNAAxRTHmFgAdR4KiTrLZQG5vUxsU+SAQWrVtDpcCBGATGqyZHQMzZDSbVplLf1U0WDccRQAyNDR0ZT24RfO+9YOH6tPifYfd4pvGYN+p+f+HZ5r9URvzF00HlDiRQSdqsdtCyFxXRHAeylSBP0yWWeTCcIr/0GpzR61I2mwHOvvU++Myn8dWqzQqYAft/VrP9zG3+XisfjjTp8DbzCyc3H0xhtaP2EBxHDLQNqVUTJiZG1Yz0AwAqqQq+3EIocGgmjLmAgAhhBba3W6X0uDi/stVt16vm65h5v2wIaLYBUQWgZwrctiXLSOZoZZaKfLre/m9LyNigfzw8ds5S4zx3r17d++eLZfLoR8enX9jGIb1ehlCSHkAgCbcHfOYSmmBxUazPaKFyEiBgwGqIQxpLDcy5mG137z/lRc/8N/9K5eXl9vtVkRLKY8fnY9jvrq6GcYkpsNwOex0Nwz9fux367F7/WQZS3/dxZgslz4F66BjPusYKKpBURADI0AEIwjw4GRtu53mR5/8vh/8kR/8LjXd7HbL5TIVads2cAMcIIQvf/YLP/MzP3N250MvvfKeh48e/epvfPYXf+VX7t9Zdsv1xcWTMe2LiDgNEhGJTKQU69rTNPZl0GF/0wV89PCtkkcza9s2xrbtusViaal0pKddIAqCjYg7jmguBQAYGyLe3fTLVWOF1NgC93vd73ZNU10tDhDBJ68HylOBa64W+tQspZRS8CjodPRlZjjV+TzPDAB86ORGg8ryAzUth9a1OWAlJLWjKe1z3lMyE0MH0SAf99Q5GzPjxLeZrFxsZqJiJUP6uAV4Zp1sMJwk0ACOIukQCcAQAYERmCgAGR4kGRnRvOG8Lr+jJqQ5Fp/B8/FHriBkWvPPAkJ7pyJAkTw9uV4nHCExnW/TrRy/2+kaYQCn7kz3CqdCkP85T0Z2x+9oZrOchk0URBFxA3HDZ3zkjuqixx+2groQdBrzc2aQMz9t2stu3ZBJoxQdijt1GhFDoGMSLAD6PKwqJeQaHhj54MnuF1ZKaZpGJ1Y0VKGC2orpRVoA8AZuVU0pzQBwBvNYC86OGVw0yJGSlZKJqKp0YpWBzVn7NNL19W7XiwhzfPDgwcNvPE7jdS4FA0+fFxERjHTqJ8TJrlBMtYqX+jdL09d+QPg4dbHqkeIREZkcbvvxLJ2zMzPCnJ5DtWIwZTPBSA1VQEuJoQmhlJJzFvaZY4PoqJJBiqDHZxWim6JYBZM0qesAGhEwIzM6KY2M2BhCmJ1asM7husqAEP1ZENTIoKYqcpGc3fzTRMz1XKtijSogAhEZPZuJNzsW2/AGKjyoLJofx7VK4ke+mR27k/nL1MlyYBA9Gwu6Mci7hpKTMcB3YHzTAevz8W0Mp4w+Cwu/U9/ed3C8I4gFuHX43hYymdJQMOeAqmPKvEN65wA45RumTLH3/jFUMxt/3vERUFT5aNEcCm80dw8ShgZhtpOpNXZfMQrHTE6AWuerLYRmfhBz/RQTGkRkAAIjAHQ+q4tL1Xj90Bd6fK9wfpH5vaad/xB7/OsYx9zU5+Dw93Q48JuZovT0DTef7b4VMKDBgT468UYAAI7PG63ZQqgLxJvNAACYUM0twsGnnh4lakFqVDy9mpnNhcd3G88GcnPEgohVhG+7s8UCQwhFyna/y8OooO2i6ZYtIzVtMIvMXgahUsrVZmuWEbHkGkmEIDkJIomYsz1zzs6MIqL1vQ4DYQAzTXnsU2+5krtCCABUUgYAj9RVddWtSnFl0GxmRZvzS93ur9frNZLt074fdxQVRsh5XCwW/c3W9QbHMY2jAZYY2cmr2ZFSF0MI+33/5Mm5qp6cvfKRj370zvoO4939fg9G8uGXFUkFDDG2CwNKRS4vrt56+AgAzr9up8tFGfom0LprixVmg3ELIEBU90ZRyBnGLKUgI73yyr/4//yjF+83P/Hp74NuJLCzZWPDRnKGEkTIDMODBx/+8Avf89GXPv7xH/yuj33P9ubqp/7Cn/7f/G//jmK+f2fx4z/y/VfbbT/m/X4Yx3Ecx77vd9s+DQNyu91uA+N6EVcdBhi3uwsy6K+Th8XMEYGS6CoYs44lKiJ3gZmdsJqKDLuxSLHGQGBIY4wxaCgqKETRVL3NSAGAMCB7gTGimXPulQ71H1WJgePEG0M1DAfjBFX1iHOmGuJUeaAjgSMiKiXPYpv1mTUA9Yh27iOorxMiHe3Xc6oeENgUS1YAUCtzLGtmzEKUiW69fo4N3S5qEdUI1WqdHevpMt0Td79QKEUNUfyfNFloGCK4RamrWeitcugMbqFW7Dxp5HTmaudwfDHzGp5j+uMljYhkgDxTQ8u8wnXm9d5e+TG29YYoKuhTzN4D3JKDYsoxVAAAP2Jlco6qGMOImWKMpun4Hecrf+prfaZK/HT901/2WekdkyM7RNQ5vBARM5m1OudXjg53yVSVuV68uvlB9GceuhPnC6DJT9Jry+5GMwGkg4NiQBLPoMChWdHMAHVKW5gXnmepnlSyFySbpiEiEdnv95vN5vLyuowphCaEsF6dnodLD1BKSTMImSaiT8yKgYlcy6KiqacoavOlPnWr5zkmVVNqkt+cFssRRDzcUkQs8690dsWYnChqv2LM42BmMcYYI+TeNAOKaREBr8WJKRETMiISBubI3ACzocDUXhtCYAxm4rp/xLGW96e21YBEQSM2qiWrFUE1AmqQWyaWokTBgIqoKCAyGLuWNRExo9tOkHqrs3pVwW9TTbgeJXFn7Oc84gkzauX/eDg8tRQeTSLfvhDh6OVuFQn9kUlafN7QUK3KV8yvdiv4OCZ2fjOI4zka/H0Zz8SIfyjGIcatK+spTGiuHYNzedyfSggMpMBERNOphZMPYQVnNjEQFOoKcJgHZuJSybPtKjITQS0JEhLpcULWUNUQIfBx/oR8M6hdfgDktfpbdTyd3OcPdAaAmuXy3sYZH8KtZfLOaHB+pD73WZ72d7h7cJ5RTy30Z9/lj1Lt/Q/SuFX9e4oSMmf6ZikEf8TmUuHthfMU4ZyOXUzQDMBAFQHnSBGmUMqvAskfBK2pWuN5ns+vi1ULzSOtKWio8eSkCiHzaTVF8mhmIRXO24w2DsOw2V5rKctVt1gsqN8RkUEXY6xcO4Ocpe2WiGiAxUnMBnlUG/rFYkHMy9WJWlGVfkib7VZELJ4xRxcGHMcx9YNp7bdpQmtmDvxCCETBzIbd4JdPVJ0Jcs4ppcvL67t3z9q2XSza1Wo5pl53ggyhiQGiSC4loXdJkfXD2HUtqlgRUwYLTWwW3UkIwbSM2/NxHE9PTxahhK6Dbmn73TCklIVsVKCz1eLO2d1X3rNeLBb8Ax+8ubkJFDbX27aNyNS2EXLBYRQRFQFnNVqFN2Malk/sX/7LX/j0T3w6fOT9cPEY0gghIJUYM0cGxv7ysnz95vXXX/+lX/i5k/bB3RVjbF7+4Ad/7If++D/9xV8+XTCuuve+56WiJmJEhECllN1ut9/vv/SVt/r9tuua979yp40h2oPtTbPZbKTENJb9fr/f79Vg2TbUcko7xNMkCRQNyUyXzeLuep2K9uOAWDKZpiya3b+Mm6BF6jatqmCKiooIPJkqw1HciIqgOVe6szv7ATBWV3oAMDJzI8EpThKd5rcZTBUtgJqvn1mFc0jK8RheHiBikVtx6pyYRK4EDzNTw3mVGaiKI4iDagsA5H2qRxnZjFWIqG3jfA1TNc8/iFOn2RDAvA0CYLYZgEklDUrdHnSGVXOiyNBtKrGWXmuQ7/f5CJXh0ZhLf8e4xcwAlAwPh9+kbuoFjfrkWX7TLHrQq17UPFTzjvC5AVQh/LlyczRqYXOmSvrrM4NPA6DwFAjx4epTlVY6ZX9EBOad64BADp9u7ic8Mlqckwu1b9UvJkYGIIIqtiRacwEx1MZR/0LcI1NVQc3Rl4h4wY2IFosFIrpscmOFiPwPDMEb/wBVcr2rIYTFYjEkd4CY78m0Kc9GiCKI6P+NMXr+hRsCgL7vh2HYbG72+4GZs4qM4343XFxc3Nxscs5N05kUMwFFrW1mPueNkEVEoADwBOJARAhjnSBGZgfHjnkyHHOJj+dS/RYOref1B3rmSzEgPlQS6jdCRIABUQEM1KWPEQAgJ0SJTMYYCAQZmVHB63qIXHt7AMAAwb0hiYi46uscFqCXshlromVijIeilov31ARRTNmgFCKj0HATzdAMm7homk6htqoi6sQQAwUVkPBOdULDSc4bPCKUoycReq14Ci3RlSoOUewszgFT99G7cERvjek5tTXl6fjyWbbn5Ez1zuNZKDgHAb/TlTwf38Z4lsj3rYbp3yqk/Faf/ztfz+1TBqe/OrLU8PwGMliZgRbO29D8rPnMRKh9JS55NwW4hoBUA2KC2bl+yjd7tUTraY7VLaYy6/QWa7S+hpiRkUcbBAxedfS4pRL2qMI/IjSstUGbpWIONcpbH/8ZNAgTEnwGpf2+LaujXtA6ngPC37NxwITHNfPjBBAdiNaIlZvlx0vNJOI0Tz1AlaMv7pB4nUcpZfYnnL9Xwym7CDWcg6f2czy84rTD47OHA+LBp36O5x2jhSKxlEIMHLrFUsCEGUpJ37h60oSwXq+X3cIRnRXb7/fGNF1EDSy8g2i32zkjzw3Qnf4aQrh8fOMhoEeBMSwjh6ZpxjG7YoACiYoVMkJEik2jYqWUoR/7fWrbRdd1IbCU0o/SdrFrmtAsKEbkSETN8qSU1PdKHNs2LhZtLuN2e5NSNgVETqMMfUbE1eqkaxeMw3i9BQBoU+6Hcbdj5kePHjWLDokX69VyuY7rBXBEK2CZT+6uBYsk05Kz5aHPWVIaFotFztlEA3PDAQC82W6X+t/4jc+N4/jqK6/Cm2+C6tBvx5zOXnkFU9pfXzchSi5EoCn3u906lg5T2zVw8+RjH3zPr31GH5wtdmMmymSopEyBCAqARYSIf+zj7zeT9WrRxdC14SN/+ocevHj38vzi6uK87/vdZnt5eXVzvS1F+76/vLx+cpO3GyylDLlstnvNfYbdMIzrk7vb/aaNzeKUc9FSdCy5CWEUJ9ZVG0kzE1MDKaKmrusFEzMNzEzK6F8uQN30IQSEg5pIDeYmQUhWOp7BFecQtE2Eo9KQ14cAzIjn2PR4QrtSy5z2qEwSA7UDUHFi5LSYuEZsRxwPM8s6NV8Vj49rqjNL5+KQUAtrMcbIzH4qIhNhIKqtugBQini3kPcQg/ERtfVAI5yvzbV55sdVpcLsCUMdL3W8XSo8vg8OzKjA7LhARMhVy7V+TLcEcfXhatyo3o1PRN43RhTcPL1eAdTC0S3V/rodVerCAcdOe6XnEOYrnG+yU4jnj++PONgzNZrEb44/I+LTpFnHbBMsFJhgv/+zlCpUgARoQPb06Wigpn7HshbzjcuL0iKaUgpIXWyMkJmbJnjvnCs/KlhKycXcSinjOA7DoLnM344+E5VNl1xnsi8oYlhY23WtS+Zst1szG8cBKvQK7rF+dXU9juPULlvTMAxAgEig4LVrA6glXgw4A7zJjrImAsxb9Kbvar5gmPC282bnCWaTfe1xJuBWkFfVJ6pZKE3fptWav6BajLENERGJ4nb3pOQR0YDsmeCVPEnpnHayAJUOIFMZVmcNDD1q15xvKyskK1IMgICiYcjJtvtR09C1bmjCRaEoAAWgYHZEJJ7MRRXUZ8Lh090aNbPrGRasZAQ//tnqee8rnRARyeYaxWQbPS30b2XMX5bVYOSbKQR+sy/7fHzHx20kMBkR/X6/72F8qxPmqTk2ASTvAMSp4jFnNLxIoe6SpBNx008Cm7GVmcM68GMakSqoQ0QMGJgZZnmMumPPxBZSFbPJMguROQAzwHjrYyrOO70HpVN5hHTyDIW6bJ8dNCvNTMSKbwYtz+9dX3q+gc889x0nwO8dbHu2BvV8fOfGMSa8NW7zAhCnGQVmAEhgYFBq9bqySFQnIg/XKMZm+FfTvS547v1+R68OUo7eCs1MEcjppHZIwaC37yBqOdjeT8HSrcBS7aBOxMzh/GKTc+oWzWrRKBKaZClp3ImWrJBzGpEAMlNkYBUY80C1d6em7d38wCsAntMvkgDMxR67ZqWqVtAgUGwIiTCQNsumM0MDDY13dyghxRjHYQwhds2iCSv1fURxHLRtT3IykZyTplFjw0RsiuebRzln1dI0YbGIIVLbLBZt40YXLvc3DGkcck6236U18TAqmN1cPFQVtbJYLKCwDjDmfnszQrg+u7NfnZxmkcVi9fbXXzOzxaJFjAaQRil5uN5eh+sNALSxWXYLimRmJgoAfS9f/vLr3/3x71ss7mye3JzcOYlhSdw8+urrXdeZQB5su90xcE7G1KxbvHr8BlzGuFhT6WXYfuW3Prc8vctNK4oGFGPbdV0TQljEZVxt05BHWS+opO32Oq3a93/4/S9vzhr+8Hv6vmcipphzRqQQmlLKZ37jX2UpXbccc3774aPzi8vH55dvvPnwarMJZMZydXO12dmdO4uu45Q269VdD9ZTKWYKYJ6qVzBDrSJiZjCJcwCoSDaTebGIqZbsqoahErTqvo+IgK0e6WH4xHWSmL8gM3usKYKqChwOwe5ENXyHZOTh51toZCqLH7EWZ0tZ9ObVA3dR9SAU4WItM+nUJzkR3Qx7ojCZIfD8KYgIg68InFElkultYHwMCOfLnt4aQgjwTKNg5akeQcTj7YAYANWmJotphzoCY1O7Vb2Hzp6ZIP3M+mR2eQ9APDTjEZELYM57n014r0pZKs7f5hTZ5+PPCxM08tc5rgD7jdUJNuFRzcr3JjyyIpxf59Y3fjQNfL0bITEelxNn1c0KWozmtyulr3KmRXPOkVhV0bCCN4QZEBqCk+FtqviV2T9d/Tuqrz9XvL0EOn+tMUYkA4DlcukSuKWUIY2g7uASUkqEgYhcxZQ5mhURmSgjisBmWivgiHKcLDgQvGuO4HjyIKKnJ/wGzs2BfnkYJmr3zEuZq+7Tcjqee75RwyFvMb8URCJPIbVtGziWUkzk+uqi73eiGbQUJanyvcKTY4oZiqiIkOrBeay+3WE+5FLtc4xomjUGgCqgSBxiCI0BiaIpIrGpiYia7xiYUkmpNKHWnEUUJpfGY1md46l1+5GpgRBkxoRmgui9Ip7C0DkB/A680G+qPHjrGo6+gne2ZvlWx3M0+Ps7JoTzB3xMsqh+KE6w8KkewrlOeNhb8CjLSa5hCCSEdLSBHPIrfuQQVXtfTzkhgjgC5KMbZXB4hUoi9WXq4XX91VH4C7UyM0/w6QRExHlJTj8gsLmQgf8WcW5EhG/323oGDX4HcwHTS1Ux4ed471/veJY1+tSvqG7+t1oNEau5ERqIC+QCwPwKnvz0n33eAjOagU4KMTalH6ZTwc9x8iZ8jycn2Ae1InGUVbwdNwKA6vHZXRDRfwz7oYz7fhzH/R7TuEXKq0UDlmNgM0kp5THnLDG0XezMbH1nzcyVdjW9gZmllJhjjG3bglojIrX+UKrqPVhkooABJYph5CilAAREJCsqisABu2a9gBoi10sfUxr6FEJjBSySqfb9hhljDACw21yrKjOqtkXS5gaXy+V6vUbEUjTnEZEJAzOUYvv9OOxSCAEJht3+xRfvc0BGpK7dbrcpqyAAWR+L6TjmtNvmnNRMDXIpCRlSHttlC0D9mMlAi2iRgUgFmIiZX//6w0ePLj7wgQ9fXFwzYz/mxWKxPl3dvRNDCNeXN2a2Xp+tutV22w99CSAvvnB3tx+GcR/IfuTf+oFf//wXX//q+d0XXx5yGYekgJEb//pFpFjph93Hw0fvnJ0UKjFYWIa7sIZF1zx5okWIQNKYkyz45HS9+PM/+WNZNK7XwHHcbIpCLvroyfnVZv+Nx0/eePPt/+Yf/9xXXvvaRz/2oeXJ+uHDx9d7Gsdxvx9MpOQMRCEgc2A3Oq9x0SFPH0NUVVObNu7JPfxQ65ot/pzaRHOUPOcn8MiWABGJnNaFqkqxmdfM8eu4wkd9fZl/tBDfwd5A1ehIrfH4v3Pvn3PTAJzGZiklIuIQ5vC0hqQKBkoAqga1juFIRjBwjJHZK+dWYYkcoO+MZuGdAGFFRFMZZFq3evyp54vBqdHL5THtiGuuWhBno/mnQ9vj2NdDaptqJhMrFVRVtNowHoO6+SbDhK/cHe6Y6zu/y/ygD28qdtgvk8djCEFyOZ4kdtS99hSFuOKZCeEffQQAACc8iAlaBRWO4UFKpfgeUXAVq5mhpy1UFUQzVXBs5LVBDxXIA5e2bamAiGiM3gE4cPWx8Kuevqh5DtP8caoPShKZjChtgo4mRUSYcZ7VZHRzc8Mc/UZVWSVzdpV3KhoAwawNQ7OpJvCRTM7tb/kwc2apnhnB2hEgnGy9jirSRy9iZqZPZ9v98pg5cFBNLsaTc765ubm+vt7bLudRVUhtVgmevgt2WaP6+Vzf5XbexNGs34S6gpgdp9d/KjBzCA0F9pAutl3LjeoeiR1gM3POOecS+UBAgPmbAmI6jkenz/vb1vTsOHFzVFyZCJy35qfdXnff/LBn0kPf6vgOVhefjz9C41tBLEY1jwgwgSuZjNHruUZAwIwg85EBUJVnfBSRuiUecUyISErlWCM6wYbBA2Ii8hJjTe15acNFmOz46g+H49QEiLXH7x2Lfod/1qfV//KRucrvEnT9XlaGb9mcPh9/MEfVkjkiZsLkWgQGfGhDqEkKJ1nNxzQCEeQCUNP8s56eK1LM68WmumIte0xxIHhh0qYj8hZp7qDA54+JSCnVdsLMwm988YtT35EFMiRz8cTT0zWqRc5d1626VYyxL4GZh+sUY+y60C0WIQRVZU6B4yBC7YK6RlUR20VkM0sp5bR1V7QQrWga9gXRmjaQATPtdrvN9jrGuF4vW27NQiPvOT09HYaBUNvYpDQACgcg6s3MchlyFsnMbG3bNGHV3U8ppZR2Y1HNRLTrYLeBu/fO0jiOYy6l96pF27ah4WEBV/sbM+OGr8/fJKLlctl2MWOxqKqFmW7250+u3haRruvunb3sZUYzKymT0XA9WNE767OUUhNCyWmb+qZpjOHy6vprX/nNk0V83ysvyrANbQSFEBEzx66DfjxrGRcrUAVRSOO6ZTq7A8ulqa64WZys792784nv/dg/+2e/8KEPf/gDH/jg9eamH1Mq+dGTi81m88Zbb243w707q8df/9IXL5+8+p6X3npx/b0ffpVbBi3d3QgYAKA7W8AwjOPVmM/lnLuuG7dZSyJAHfYg8sHlSTzr4P0vwo+9r7349f/661/59/7C9/2pP/Nnv/61NyC1X/jCF/ZDL4aPnjy+uNycX1+fX2+//uajZnG224tAjLzqBxNFRB5WV1oKqiAhEppodQoEZoOWuvVyfX1xfef0LjPvN/tMVwiEgUFVEAhIwFJSqkGxgigiBsbGQM1kKulALUpXonLtYDUTsKmVHRCxAUolFzCOpOxum4CGbdOWMUkuxACERVXBgInkUEZ3hwsxFfEKAKoeEFFdrhwNQGYSmLu/mDK3SKRGkk3VxV0KABjXANcNBgmQkYgoxoiIqljUlf2J0G2RKmgkQFW14rcy4GzyeFAENSQSC2rq1Wl/K1XVKrvq7WJEU4HBzEasAKyiNXFwTl7jdQAZkSOzqpa+QHANUnXvmtg4KSDnXKk7SAioBkoUAlPaL4mIgEFJnR5sFAGY1cRMDdHIao1GhJvWBYxqJXZKBYCZZM2gMKdCzAwIIzcNBnCubAFgYUBUbZumlGxFTEouwuwGlWTSmhigBQqqoin5pilaIjMYShIwAGMVAgBuzdULkEkVpBRDDCGUYQQzMls0LQYkNmpwu90wBWICIEATzWoF1QBZS2kZmxg05bhYDde7GGMA3utVDHFz06dUFsvTLGSksWvHsc95H2MEk0W72FzdxNgSzUgomFl2p07vikQ2NFG1YooEpIgoChE2RzlEaFuE2rZzSBYagoEgAzOZ5w2mM8fjO6gf331YZlqsOkc0AJoWrLNKuOFhzBCasWQyNuB+KP1ivLm5+PJXvrDgXdd1Ja63Zc8xANBYpOXWsgkMowUtXFIJOTbGkAGChui+hAKGSAKYsgzcIoTddnwEcJIhmDUQurFoKCE0UlIOIG0Q0H7d0r2Tu4FfFO01taArxEVoO4wQVmihHRWqXXARA1YbW45mGa224BIEQ1I0AOdyz80Y3tengAwIBmTTZqcT0QCnXl/XYERCMGIXCZhU4LAmlwkAKrMC3WfCu1DI1/JUlrFD4REAIB/CjqMf5i8d4BAvmpnBcPwI4lwnmf5Tnzq92jvUNuc6z9OPKwC9S/T8TOvjv6njXfD8u6P0b+p+/s5v+y6P26H96dY7vtvVhNB7l51LLfncQECADKjg5QUmLZaSAgJo4VhJPYRUNeRE2tDqbfaBmYECuI49VFt1dAcpDIAMxOgy8QA26YoiR5j87hEICfnW6qDpw9cboC5+U3/lLlOEyKYI6F5WgDCvPgNc1xt1+27O9/+o1DL95unxnURrBu+Q45vAxe3rgcP1fJPL7Tms/GaGV6P9Z5+GVinR3jUATge1uqePMNE2zZtXjAFtMqYwgtpWQ0624uBygoaMiAaqZpbMAEGNjMDUO35AtaY+zEwVTADcf0ldgqxKUhOwkRNkRKTlMPHjzCZNeDMjQkNRVUN1PxhAQsJwdXMJAGiK5Kl1BRUzu7m5MrOA1IQYQgMAHnU3i6Zt28ViEUIVn1yuuq7r7t2772IM47gtJccIIQSD2LZ3kUtJw24niIWZkaAfcgi8WnQUIERVs+3ObjZbVW0jC0CMEQx2N5ubyytiWCzcjdfF402Vcsn9kABgvZo1SIBYEQ1QDMpbb70OABwwxkgBRfJ+SEPaAbYAxG6Kh8xMpZTxenRKlUPNGKOq+A+b7XXbtm3brtYL5jURack552EYiCG0YbnsSimPHz9erVbX11fn5+cf+chHhmFYLFtEZA6qallkuAGXHsmSUspZbjZXRATIFLvT0whtCwrdfr9YrF555RWisFwuQ2wELMT2A+8vFMP19fXl1fbFF1/c77f/+//kf4dA73vPq6+//vp3fdf7ddxRYAAFyaAGGJqIGfL11fVusx3HkdHWyxUZSrFhSLFZjsNgoyLEolCKMjUvvfLKmruXXzpD5qZrs9hmt7/cbJLiw4eXX3vz0T/5Z7/0la++kdINAXKIiKwWUC1nzZoRMTIv2rZtW8mljAXIbrY39x/cA8Cby+vlatnGu4ioYEXEnTQJycgLOGaTy5gUrRQQFCKKHLwsKSJpHGulxR3eblfSYojIxCKCim45S4ymABA5BOIq0+SNRcxSMlU1SROrBoWT8sR0qHgxB2u+HeaKGaHPRwNDMHq23uJuS47iJkCoUKmY0+NTwYQJcZLaJ2BmUDOoKlLHzEzPFQGAEUaK85UYOLapBTREmp9WP4sBTRXausERMhw5QNZdDwEAiBig1E3oUBMG/93Eyax/YmYmAGiQDUjNj+EKCBUA6y5qgAhWADMiAqppBHOLj/pK3lqGCP6tIZBfpqEgomopTtE3MwACNw7APg1mAmDISISKIJLMLLqtCFZwI1AMzADNLGIkCmBgCmboLnmK4DZZOMlMm4GKo3Iw4yQJikhRMP8evWQnc9274cbL3cMwNE0TAg156FZdSklVienq6qptlm27uL7ZIWLXddeXV2d3VoE70yIqgbBpQhOCKpQqpARmUDz4UFEV5ECeCEREVZ+GZhYXVaOlmvXVL0hnX1rw1OUUxhxPhuPSnGpxmDh/yzZV1OeURN2MlUVkHMdIRhiIFDENw3B5ef14ff7SXZxdauYyuKpKyczMgdq2bdvIjGAKYhCmguHMqpnmGFYicVBjRFRTM2VGIjQQkWImqtLv9mm/v3/3DnFBb1xEQzTVkvJQ3H2CAhw0isSY7bh7EBWA3HUbfrtxYIES6OTKCDUgnVbNvKqeidKPSaTfGqH02xi/y0rj8/F8TObsztWcd+xaJXTSph5N8koU8tPBMxUTt2Ve1POmVI0EiZAIDKV4hzZ5oOwsD0TU367+dlsJZrrmw68M577Bo9VwAJPPsgCfj+cDfvc8CyPDArVD3mo93IsKc6XBWxc8PK0mbYaubz2981wGnCqBVWAJ1ESEpuPbI4FKsNKnT/BDUmZi2eBRm0n45A/+oImIZNUiJec85rEvJTmNDA0CkiqklErKqjpesMvDuARFjHG1XiwWi5df7mOMqrrbb8ZxbJqm6zpmlHFYr9dd1wUKi0XbNsuUx5ubG7OUztoYWkM0sCyacxIRUXl8cYNkDi8FEZCT0DD2IiKSAYAJQgjMARF3w2b+tN7pkQqPea9WEDEoAXVNiLEhP/HXq7spj8Mw7Ha7nPN6vWyaCEB9v0NEUXVGlZmYQSlChGJFjEFNtahqzjmlIUza6KFpTu6c5ZyWy+WXvvRbAPDxj39cRAI3pRSAst2mYRhEpGmaUopreKRUNpsNMVxdXZ2dnRFwk4qIjGMWEaa43+76fiilbHe9gCFit1ikPt2/e3+/3X3vd3/PT/35n/qNz36mabomtG98/c2T1aJtI4ClcTQz71MahgGBVEXVCFHEikHJVvKYynUuKsa52PUVvP7mozfferRanVyOX18ul0CY8paYkfo29otmsVycfvIHPvZn/8wPf/FLr/3zf/4rv/zLn7m6uQncdPGeRQEAVR3zkFLSYRhyyjmHEAjC/RdfGIYegMKCIKoaExITccBQYZUR0SQSYxaiZyIccuxHlVxSDeOqx3cbGxenNRcbpepoR0SSq4CHmpJBDBERwdSKeAEQEUtJisSETKzh4NkgImDoAoellBpKT1RK8iIelhnvuWbpzIRRqPLXhrXDEgC8D5Zdr99A3IlxEoPBSaLpSMHV80w4O3MykucmfLs4XtioaDj7N06HLCEhuWALVcfSw4ZizHY7ODVCI7SiBlaV92cFUSKcAa3r6CoaVr6fVXeN+kkdpQMlIDJEAJ4+FfnK8ttJSEaikBkQiUUDIs0mCqBqQDjx4xUNqhwlghEhCfQKSoCKxoCe3yKDcUzgOrGAAgYGYmpmgYup1T3Sqj8DIkgWIUU2VRD1fJJyiCVPm6Z5WnreW8wwGIgKiIqIISJToMA2yaKIGAAUKwB6cnLiBphXN5cppfV63XQx53HYj9y0KaVSBkYGopzHk/VSSm7bOO5Hy0llZBDJQx4zd9HUKYfYEAO5sTIUK55LB0Rg8n5sm0SlTRHAVKUmBwiZj8WclKBiwmOW7zFmMDMzPUZJfg946oavh5KaU3Mr8resWlQzAu73+34YdjsNIWBw8R7xrIhINjPVAtAiTsLZfo+r5VAGKmCAJGpFTJgjETEHRGZlAKzHXrBAoJpz6ZFksYwgXPox5X0XGEDUkmriYCGC43Z0Bhx66fNIssUEwI003vlUf/e6iw9v4wz222C/Wn+b4d87YsL5z9/ht0eU1Ipa4eiYr+Ooylcfr84Wz8LU5+P5+BbGnAw6fqRmasCn2a2+hqmH+biIDXA0D2U+a1ANGV36C9y0iRSMUCch5aem7u2q4LNo0OZqYX3C7VfAmnl8vhyej29i/C4xodWT+LAGpuUwLZi5pxUAiWq5z63pn+mBQjioZKMLj00DRGXSeCMinOjVdmTAVl8E8RCITus6xLYxk2jRaxgEWkpSEXKzYz/hVHPOpRRTLOPSe2/UqsiemQyj/cZnvxybYGZ9v3MwUENYta7rvORycrK6d+8eMfS7fUrjyckYGwaAto0xRlRGDKHhzX7Ybjeu6t40TRuaYUwnqzullJSGnPMo0md16u2d9STegOraJBTICMml9xxYBJqz1Jt9r6rEYXVyCgBt28YYcs4Cc1+NIGLJkEcRG9brpYxjP45YRfBzKUWLrFYrItrudghw9+7dxXp9eXX55de+eu+FB6uTs5vN1VjKOA6IViSZWdOEpmlSSma4Xq+NrB9HIEqpXF9vRCRQrNr0FCPzmMVEGUlyEREKLJQsl/3Npmni11/72kc//JFf/7XP/NZv/tYPfP8nmOJ2s08pMLrUAY6aVDXnAkCBOSwbAlWzPOZSjBBTnw2DKKxO7p3dgeXyTsoAu/zCSVmdAID7UCQiPFkvxqyAsYl9c4YPfuTj3/e97//Cj/2x3/iNL/zmF7/42c/dMFGMMbQR2nURyZqdjamGHMOf+bM/8SOf+tT5+eWu37/11luph81mc3FxcXl53ve9mbr6Yts0quqojAHdg0RETk9WdjRKKSmlPKambeeqIIhhLeKpmhIRgjrXhN2ZSAAAGVwEzQIGcq1pQF40OPXj1aqgq14bzZHWLHDCzMNYgNDUCS6IUyFR9RZRe/557rea2N5VB1VVgRBlLjNOMM+yHTc3ar2wtm3rCkeGqb8OEczycaff9CtvcZzRjMywjaDxDztvTVbtHc2gMgd1+giI6HAREQEYSlFVRKaAUzvckcedmgFQLNU8AxTUpqY0cqkaADMgQEVUI1NSLfVz2XSd87433VK3ClEEwElF02V2BB2TKxFRcPkZEbBjixRTNAMzAlRvRzkSpUMRJ0FN5VypSI8wKLqsDvv9F+kRxcxMBBRQrQrJFEPEQBRbZwSgm/SkNDRNYOZ9vyM2g7JYLs7feLRerwBgEDGzrmsBIKW06lqk+N5XX9pvb4b9frlcDftRUu77UdlEJBetZl1gxmSKrOIHiMs56CSUZ4emtZlTJKgs4BjsILELBl7UhaOv+/jUmc6wo66bo9qg9y8ggKqKaL8feUHAc8FZcxYR6/uxXaaWOyIqRQAEwEyRicwspYF7Cy10uQFtMTBoTc2wc9QEDBCQTA/XeXSgIhFwMIOc8l41L5Yx0iJHUhsNokFOuc9lABRmIobYMDNXNIzGxO6/dNTX4SxQfSfgZBMdTe0ZRHcUkOK04o80Bo7+/PgPbwvXeecJwKH96ilMCMdXVcs1IEcVj3etNB6F488iT3hHpug3M57H09/e+MNz357W5/Szw4yOvNdAEdjX4xEa9CcjsPFxXQJv51w8oUk+F612qqubVOvUMAUAVdgJj6R6qnkgHMnGwFPT2Ikq028JnEE6dVKB0btxev+Ajz888+ePzvgWMaHHFc8WtAlMDx5kMB9nAG6xAr4b+w+GM1/Z461b2jM0Y8UKcKYg0qptnNcY59P8NqVrEsmb9SMAIDhNzkzJkAMqMscFNyopowkBMzMBxM4rMSHQPdWDqYCZOD482W+JSERSHmqtwN2rhURkyKOkMpTd5aYvpYzjYGYhPjZR1dJ1XdM0rnT/3ve9uFwuVVVkH262Z2dnL7+8bpouGcauO1vfBVRHBU7+WZzg7GJPjIY4puLdOCFQC22POko2G7SIiAx7LqW0bbtarUIIABmAiOKiWyNizrmU7DcrJ5GS+/GJf1Jv/ar5MOJUJKWdF2T3fX///v3Pfe7z5+cXH/nw95yfX4ZA280+5YGoAslSSs6Sc559yfb7PREVke1mXzU5mbuuQ8bFYvH40ZOby6uu65w7wUhaBFRSyl2MTx4+Wq66Ljaf/bVf/56PfrRtYxpzyXmxWKzXJzFGJxB3HeactQiiRWYV6FogCoj8xltvx6YRxfX6/oNXXrh77+UQ10PKRFbyEEJghqHfIdPJcr19+KjrlrkfpcD69N4HHpy8cu/7f/ST3/3kycUv/PO3vvSlL33uC59/fP6IQmy6tgmY1LiJCsjNou/7v/Tf/x8AEIgC0faJKxhttzdXFxdP3n7rjddf//r5xeOby6txHLfb7bbfl6yllP2+z/t9w0Gqmg0Scxvisu0EbBxHO2J1ikjKuZTCTUSvGwGamWVx8/W2bUzUNT+JKHIzLc9Dj2J10gMQkTY2MFNDodbWmVk0z+vnqTD61qI/0hSpLNMpU18XM9W4zw/PygAwY6ZJMNSJNOYwV4bRpx9Nb3uYjZXgWt+uitmg/zlOAS44y9LtG5mYQiBAqf7y6LuFu6oBeN7Kz+4ZmIEiqgoIIAYR8Rvibw1qUl0rELGiLCM084YrA5rwsLNDqTLsVXPVvwExEDW3nSJn+hkAVCF/TwARQKx0iWoboMpMBIQR0ACqdcFcoSXopu/X67l1c2QKYCDZb2GDCKZUxBALUZUtce69z7LFovNbQdwCQNvGUlIk9hKlg0C3sgQAQBnTUL9YgLN7Z+v18lOf+lTf95/79c998YtfamJ7slwNQ1qtVqtFm1P/0ot3v/sjH76+Ot/eXLWxSSlZsZxlL+M4jsOQxnGUourtCAbWTKJBIArCNteOok/qAsTsVBACI1W9fZgdSqZzWsPLd/PJMU1sf5c62aweH2pW9YrU0Mx2u37Vrduma0NkzjL2/Zh2u/6ksTSW2CgzQ8k2+ZoggxQpBYpgKeTaPH7IuWF9CAEgABQDZgYpBtXrVWe9MSJPCQAilJJT3msZAQqxpLFfKBgU1aJaQAVAUSXGGNztSMS5w0gmmiN6zkYrzEJHTepyU0dR5rPlu6fU3ud/2jNPm4qEx7YE9R4f//mUT6m/k+MY1xu55tc8KuHeLkv+rmVOn4/n4zCeOeNwakadjgxnlxjOvHQiMHL1JyKS4kqG03E5bTdco2KqKY1qE++Fepo6s3Telw5XcvAGPAqsbxkGwvETJmRIFRlWLZv5t8+1l56P33lM2jC/41Sp3Tpz+7WZp7bf6Tngs3pOw07ZyXlz92QKGSFomWhoLgUwFw+JPJ/t9C63oxcRED1mOcExJgQX85dZZw4Rg6BHalis6Cgu1kFobdti7fbFMomtE1kpIxG50ULRDAhx2SzC8v7L9xxulpLxWHBSOJfR+X6gNgz7/X6f8ughmgucFtA07MdxFJFHFw+Xy6U/eRzHxWLx4MEDjw8WbbdcLruu8/ArxjaEcLJaGkYKSkRNiIg2DIPkLWODFEU59TnnpKpEgEiLbiX7fcqq215NENFfkxnNbBiGUpKr+eWsiLZPGwBwzl7kSl1gpNaw73MkXq1W/TC89daj17725oOXXn35lVf3/Xjn7qmMCdClqxgIUnYNdB7TKNebUsqu31Pgvh/NbsysCRGnKlDTNN2ibWJYLpfT101JCmGIHLbXV6vFogn8gz/w/T//8z/3m7/5mx/96IeJyKE+ESGwlCJizLRou03alFIKa8OhbReh6czgve/7oAEX4Fdu8oOXXt0Pad+nEEI/5H2fSikGcnFx0XXdyy+/vL26HsMwpJxGWa2uFt2qaZYxNB2Uv/BnP/GnPvWxN9/+oc99/jc/82u//tXXX08Z18uTbJnCQk3/5S//yn/6d/6Tv/63/wMQgLZdv9iuEe6PLwIaRAYoAAYl2bDfbDbn55dPHl9cXl9dX988enJxfX29eXJxc3Pz+PHjq6urYRjMjJmZsOs6TwT4bI4xcnDhjVyKiEgkIqY5AU9aI1ed5BkBQIsUAjNzorC7RjiucmQIR8sUAUy1a5fzEjqEzuZ+ub54qwWuqqopgFaRYU+8zMV9qLV7nYsOCGoWqjoouC2hkcPRoH5Z1T3gUOexSQt08ul2C+8ZNcIxegRQwvrxzUyxugPXDY4Q4aAN65+iGv5hZQWrWkZFEQTGSc3Cc7iTM2Hw/AkA1LqnkQEwBwWeSlIIHv0bKhVANRBUcZsTA1Cr2nKE7jCpVPGtmnagEzVWTUBMzG1NmAMFDohiJppFRUwDLdQUnc5QvxU11UBVNAUBCaoKgqpyLIgCgCpqZsTRmxTJsqRsZsG/TUlWRtQcmYgMSRDUxFSLmKrq2enKv5oQ48nJyfvf//6f+qmf+sQnPvH3/+u//w//4c9+9Suvbbd7AGKkNIxNRDC5e+dE0qbsKbK2XYgUEXHAklLp+37ox1TEnLSBmIqoaimapYjkmca8HbIHOuTEK1c6QyWiqkSGOve5wVSQwqlN3gM8hHqP6xwDmMMmOTpaAECBGNQM27ZtukUICDCaci6SRhYjJso5l6IUiQiKGkN1SVEtAAc1Wu/sBDBVneuBZh4XwiTUBNORZi641/e967n4gjYTNAFQM0WUJvBi0XaLhpkBdfbeUDXRzJpNIyiAFm9LnLXCzcQQDcgdhfWWuF9NtdgtKHjUlwvmijJQ75sCwhFTFN9BKvDWI89U8CZjADA6VAWPcWaFr1bf7oA8D8PJpRPIf5ci4bsPnCfIrQefjz9k49uqLDm1ksCZaofT56hYXTOY5sya6jh/VP5Q9d55Qw8F5z+sPzpvBsGqirKoVFmmqTV3Ou9qM+J0YdOb3IKCRwmUKspyQI8VGdaK5YGGPS+Db/3+PB//Zo3fLn3wDnaFR+DwHX9VOwlVD9oTnpT3gNFLhFNnBwDMihKmM7vLNKuIiJFB5YpOcWT9wyOy6JRb8Tc5AqKI4c23n7gcHxExIDFEDsy07YUAiCjUEgoAAAMjjGSE6no4SkRISExXu6uaQjZDNDcfBYT16SolyjkBADMvz7qX+EVEFMmIOI79OI6IWHIahsFzz/v93kXqeRhSP3zj4eP9vm+aWtJxBlsIoW3bEMLJovVKRdM0y0XbdR2A5pwfvHBvsYAYdRzHvu8RzOuQpSQDjjEq4jBkEdmPxey66zpAdTODxWLBzAXU1DgsVFUNcsFcVIuYWSCi3QgAoEmMU0oXT54Mo3zkox9cLtdEgamNoTSxK5JEcqCmlGQKTddNsJ+IAgBhQMXKK5o82Qoz5nHo+36xWNDEP2KExaJbrE6JXri5udlsrn/oB37gta985Zd+8Rc++pEPSUkKttvtSlYASMn13+HunfX19bV7nZlZ03Rtt1TB9cndz33h80+uNo+fXL711ls3+3/62tfeiN3i05+8e3Z2llICk/3eJJdyj09WL0gpFLuWDI1vLrYA2zsnZ5bzqG+Ukh/cg5d+4vs+9aPf/drr3/iNz3/pq69/4/H5zbbfjYLt8u7f/a/+7l/4cz/96vf8MdgOZdGZAREyErisnwqo4OrkdLk+fenlDwIBh5rME9HtdrvdPpnG+fn5+fn5drd79OjRfr+/ubnZbrfDMKSUxpJF5KTpttstmjUhNhz85IgxppwRsRa3zZgJ1JRR1RAJmFQVAb0lzXvMplDV5SvAVx8vGiRwCdDjrA4eLTycSKQ2ZVIAwPBA2ENEE3EZEKjRO5IZkkvUkJqaAlWtHVRAYp52IoDJ3wARVUWlviwRuvajmRGB1jZlmzEhAMSIZkYlN5PB43xJ/hynCTqBHQEQXeYHiLxfDQFUwBVnzBSLFZpQLjGYNoQ0GcrN+WMkDIQe6fuL1EJMQPD42aY/8SoNIpkWQzQ0qrkwQ7IA5LEDIymqCSACGTl9iRCNkAxMA5gzF4eKgxA50ExIRRBCg6pwLghsZkxIQfxGlayIGIJGDogIWkwyB+66CKCLBmHJbVwtYkDEmjBquhgjN5GQKXDTLtq2VYSm6X70R3/0bH3Sxubf+Xf/XVX4ez/z9//Vv3pt2cVxHC/PH79w/46WfLLotoEClogUQ4isCKwmHCFibCNKMTMkYkPOOZthkYl9oFU9bDvspgmIiAxmaioAxPV8maMlg4mP6enJirPmjKY6qp/+KS7qcHyaIbJZAUBvqDTDYUh53AQqJfWBCYy4aXLOKaWuXSCTlhExmKlB9b2Y14vVGkPNWap684SjKZ+lTBQQIlE9CBHBBEoxMGcNM6IBAodJtBiRAwZiAAAjwpDHwcw4+P7jTRqeHvLsgHd0KAFbpcSpJ2fhGVg4He+3Km8K7ttJeICLz2JCOMDC42pGfYSfKTMeMNtRQKHPtEXd+tX0xR1TjHR2Of7DSZR7Pn7/xxF4c9kVnOe2wVFYjGRoaISTn+3hEPQY8fA6ZggEBjpZUNjUve7Az9NYx0eq/9fgVlJmyi3ehoKHPIsnU56CgjDVCecn4a3nPx/Px+9yvBPww1ooqEaaNrk+TKmN42Tc3FVrnqCf2GNmZsQMJmY0WeMCARqpZDFzWVBX8rO6gqbQDgA8HK3voTX7SUcq7qoaPv+FL7mtlif4CdAV29sYQwhd1y3aLkZ20BUMOYjkJCLM3LSBAmYo45jUNAYOgQlRRJK4poONl4+diYeIhmZAfUo5j0QUYwTGZtE2IQAsT02YuW0Xl+cXpZT7918Qkc1mw8xpLOM4WrHZD1rESimi+uRyb1NrGZjMgjcNh3v379y5cwdU+74XrW2Np+vGeW6V6XpycnZ2ysxj2ntRqGlDbJbMbJBKKdw0VW7ESzqkBEgh5DEtl8vdZnN1vW9CvLzaL5Z3Tk5f2PepbRdjEuImhACF1ThE5tCklGKzaNplCCHnsVssRY0CK4galrE4HG2bZhzHfhx2u03XNQ7tVFVM1+t1vx9ijAZ65/Rks73+2Mc/8uT8G1/84hdeffVVZDKzK7kynaT8RJByLrmJkRC3+2FMRRQAQ39x/jN//+/9xue+vDpdP7nY8sXla69/fbvp33P20x//+F3E1guV+/3u0cMNIqZhLKUwYNd1ViAQg3JJCcNVzsN+GKWAGN+7E/7kp77vx370hze9/NaX3/jaG4+vr/tHFxd/53/1H//H/8v/dfPKe4yAEFz6ou7qxNAQ5AzMgHFaUQhggsinJ6cn69OXHnwIEebMour1+ZNhGG5ubi4uLh49evT222+//fbb19fXv/Vbv0lE++22QWaiPCYAQIPlYhFixMoKLl74B1Eto09OD0NdCzMYalFiZF8wiAYO0cBEyQzUvPY2Z1lmQacZehmZqg7+EQ08vepPMFdMRQTvoCciIrcaJ69XgCAgGBqimiGQFUC3vnZ0o57ygcCtqk5yiAhGAqYqjDwV/urhXVU6JTsuHabKjC9tXxTs+5ALcSIiYCQ4NJ4xEoAZIjBRnWBqhcBLYYRARM1c80EwVA8PgICpxrPsFUK/YIKJxAlCEGFS9ldBVE8GgwKoFjQwohhN0RAhMBih+38woaqiimadVVtdhKTsL6waN2CgiIhKJuTSTZUE7oDe/9lEIAIwEjZv3Wxjg4ivvnw3l9ENchDx3r07pRQOdGfZmJkhMXPTLhaLVdMtQmi2u34sstvu+zG98p73/tt/+d/5+m/91pc+/8Xv+shHf+1Xf/21176+3+6GYehi08Zmt705XQVGQ1C0AkZkkNNgAplqSRi1gKioqcvrEiNCE1iBzeJM43x0MRIRcQAgVS2sUiyYzR6icwxnZmh+fBwjk8pvtuq07p2ZR2HfNI7rdar6+Mk5kwYSK2NgTWMiot1uCLweU0pFOyDnfakVMG8aUrUiUiolXO3YeBoRCUmNAYWoamcT+RyDeoBZWYRTwmwqoKgCIgaiIMqe/ck5J3FX+lK0a2NKycxCiMDsW3rNg6gC+P9wuht6FCEqAJCRIkzthVQZ3zMre7obAIQVuXno/CwmPLY9PA4FnqoQ6q0vxdfmrF2FczHw+L/PMlqPL+wdY157Hgo/H7/twGfJxnakMur6Xnh0QHskCgai094+9Xcgsk66iTa5H6lU209EBApUyZy+/yOi9xf4e/kfTlgRqPoN2vR8PC4YwmQ6j0eUUfCjqf7wPDXyfHznB067sU3lQbq91VciJ5jMLbVTBmRSfccpTMIqIa6qFBgUULX20xgAGIKJFS/pmaEWKSJ+iqrIjApngmhN0zDM8g01llMN+1FhHBETT/Rux5fepEjTMeyDEU/vtXPvkCsouN+01+u8eYmIQnBHAGyb4NcAYKpUsAAAYFRzmYSgKmMuzGyG+2EYh7JcrZk55VyKxqYLxG3DJyfgi5mInFZXP55hKaWUVLFiTikl18UZinzj0RMDcQScc7ZSculdn8NlLU9OTu7cudN1Xc6j3zVXpmmapm3bGCMHuHv37mKxQsQ2RCJCNWI4PQmI2Dbr3W7X77fnF5sHDx7EZrUd8m5MZrP4nvjdSHlYrZYPn9zcu3fHiHaDFmBqFt1ynXMe930aRyuy3W6bEAMzEV1eX1Hgpmn2+71qaZrm+voytivOnFJarrrr6+s7d05DCN949PaHPvLBx48fmxlRiDESxyGNRHR9cxVjrMJ9QByazW6biiyWpy+956XLzfbJ+VUpsFiF7/rAB2PX3n/w6n7UtouQNWczbC8udgbing1KtN+lUgoTmeFyuZRg7aLhGMYsOStCiM0yNidnfXn5hbv5h+NXv/b21994+/L66r/4T//OX/+P/hfG9xEx5dw0wavkHBhEoWmnheT/RfMeoSJTahJAve0HAODspQdnAC8d+grMI9M3Xvvq/+R//Lff+noKCmfL9V/+7/2l7X73xttvXd1cX++2qWROqd/tI3Ek3u/3S+4cDs2VdF80TYzemMqAgJhLJoAQggUWqWaLfpjlnC0LRTosFO/WUwGFhmfr7YmlHQIz92lkM1UtKk57ZWZkxsJGTGguQJpKcVZpKmW1WKSU2thoLsioRZh5GMZqzF25ry53EwI3ZiaaVZ36Xe+TltnHbDZYA5ibkudYfGpQbIJD3ACutsKBDBBxzCOjjeMYG46xBdCmiczcYHz55ZdLKY/PnwwyglpkTqlkSe1i2UAYx9FjCa8cllz1hEHUQEXETNqmA1AxAZOAgSctVgBommRmqGaWCBFbl/zR/X7PxE72TiUDgMWAFLp2Q0Sr1UpVVDMFRsTlsvPPy01s28Y3rhhj27ar9k7f93fu3Dk9vZNSOjk5OTs72+/3aLDf70Vz0zQi+ebmJg2ZGUC3oCgqWSGnLvXbMetmu7/e7BHio8cXZ/fuS4Kf/bs/8/Abjz772c+u75/93M/93NtvPzw7uYfIu91uuWhFNI8pxhAImSCQjsO2izE0IYuKiqoSWGREAhWTnMSmAtFEdvUSmU/AGJiIpRhARjTCoNPwfMTc+80hHDFVwDGbgbVt64m2Ump7LaKJ5NmlAqtAGSNgKWqG/X407S33XUeMBBZ2+zQMBTECoGe7QggmCmjjWH0sc84lB0JEZMgFmEvRYUgBcxODmRUpMmZTEFEVGMc9cmdmMUYk1q0NVsZRVdG9QyhYLq6zghzb09Vp4KaJC8aoUpM1fd+DSO1i4qCW/RwGAFAFdOVatwV05W8vXxsDSNWHKlPBwc/kSqklnATinHfqUM08bBVEgENyl+AW+RO8JOl/gohghKgwv3vtET2UNGql/Wmy6DEmrJTgubBoIIS+I9X3xW8uIK6EwOfQ8d+4Ub9xrbQOn3Se4awN1jUzigiEWswbFbyBxQMqYgYKfgzqzEkhAkTRRMjTDo+u9IbAagpgCASIdgRHsQaBM0alaR3VUrlzPeaewyr/Vn87LdMqwkRHk/+340s/H8/H7zgm5ReeNPx00nEgRFBQBHYFdDjCfjDVAO3A5/J2G1ERUKU5deF99SKHvkFTNyS0WYGPCJy2OUUFvjj95+PBzADqOixzYEBEQc3VaY7ODAOo/rp+qYqVBgOIeLnbeAlx6laq4HCxWMyhZIwxxujB2b2u8+c4boyRY4zMbKoF3QMAEAOAexBgkmy5REA0dhQ3aPEoJ4QYOZhSqix2osCKEpvYYDNBVlNVk+L6H1aklCyluFdEqZ11LCJ93+eclejiZieXN9fX1zCVd7yi6Z/LzE5OTrquC8RN0zAaIjYhrpfdycnJ9mYjkgkxCzXdycPHV0hpzqA7BgghhABmfHU9Ill+fBVCQLTNNoW4utnsVVWKiJiKgUgpykhZDAPf7La60aZpiOh6uxOR2CQvZ425U9X90N+5d/fzn//s/fv3H7z8kplJMWJWVXcB2e/3zAyGqWQzjKENbUcYh2H/kz/5Z//UT/yZy+ub//y/+L/euXP3r/x3/uJqdVL666qtwoIBNZeh3+/32yaENoYYo5RSxsTMsQ1Ai2KNghpB12FgzVlKHrRIE5oY4joEev/pRz/0ohjs+vG1f/H3vuvP/Q/3++1ytQAQQGAKw9h3bWdTYl4BjvORPFcFHa3NbXuVNVl1WWBip7zvIx/+m3/zb/6H/9P/2d3VSdd1f+Nv/I340kt2c3Wz237jyfmTy4vzy4uLJ+f9Znt9cfn44aNdv/dK43a7zY7kjUxxsViWWADAb77k7DIqPWSnwFkRRQ0hLJoW205KUVUpRQAIkMitNUih2OQJUDUhwYNfJLdPUY2mvk8gUYydkBRzn3ENCIUU0dq2BYCuab1OGIixwfkRAKiiLlbn8Gazm1co0dzhCEfG1rfoZCklOKr/wGRdWJIB1EwSQ5gQr4NbaNuulKysiHZycvKn/tSnX76//t7v/ePve9/7Hj169LM/+7P//F/8wuNH5/funVxd3Qz9wBya2IhIyUqITNx2LUDJORPRctEihpyzmbRtq1bMnK7pbgHIzP3+oW8mXYwheKHMEPFk1TFzjA3HEB3btW3TNJFfdkDon5eZRdV1jwEAGUIIiGhQfUfb8KKIvPzyyy+//B4tst/vt9ttTvvXvvLV/X4/jj0imuRxHAGsaZoQkpmlVMYxiVIW2Nz0V9e7zbZ/6aVXm/YkD/lXf+XX/tWXvnZ+fnl1dUNLfPz4PIRmHMehH9sYAWi/G++sO1ADVJFsREQG6K7z4v9z9R8VKEVSUeaIaOheCk6ZYCainEfPOBC5Nwc4HbmWm7H+DIiGqETeqm5Wc+qO0s1MtEx1wplE6pznuh6pasGbT+mSNRdjYIWgBmiaig193u72bduoQslKgYlInOFM5ml/VFABKd43SDCVo33LByCDggBZ8/z4JO1rABBChyAIgsAhNMwsJeec2WviWcYxj2MxY4QIFur1C6oqqYoJsjqanHo2TE0Bi7uMTIe3F64BjNBUEabOvXrfcOrKuG1dOHU61aLiVCeEQ8KYMRwKgAc6HFUtjarXf/ya895odovwdjyeLfo9pVZKv9Pzn4/nYx7zEYzz/z818byK4AbIU3veJG9G5O7b/sxaJwSeTL7ndiwScGFBp8zR9EZPtQXOfYO3ACEAgFUJbpyVSA0BK10cfM0+TTE9/ozPMeHz8R0ZBLcNbK0qLaB3roCjqmNaaT3Onp6BiDg1+XvbPE7+wuICLaA2ybkDmedUbgXJxGyT55aPuVYP03nqj9cKoSGbGRlNunzTmqdgZt4oaEZeZwMwyw6ZJtG56YMR1aZ/pxo6GimlvGUaQmjaGEKYHdUcEblMiz9/uVzGGM2Yw5hLbgRjjEYRCDhQE7vdbgcEyEwKpYiqMscQeLBtjR4ACCZ1e7VAjCAYsYkNVUnJMzPTEnEqnnq4k3Mex/HBK7Ubp/IzpWbQx90gMgAMasVE09iXUtoQSikvvnAvD+O9e3dO1uvtrr+8uvn662+eni3cntA/ZtN0bYsxAiKWUk5OVinpMCYienK+Gcey2Q5ERECAjWDRnEYpZBDaxTCWIKCq+z7FGCmQAlqu4DwVWa1WMfK/9SOfevT48Wtfe/297/sAEfX9KIBFS+AGUdOwLaJmqKpgZKgkwC3lnE8a3g+7u2eneey75oW7d07Oz8/P1o2qFElN06y6JTMPu9hsIISgJZc0imZuqWlYKe/768cXvfOHl11HAMN+6PudqrpryOrkLNLYttItF/bk+vXXfmn7//vgH/+hH4LcFymhWw79GNo2g90ykL21uKaqNR6xTKt/xGHZOG1PVVPf/4W//Jf+7//l/+3zv/rroPZPfu7nfvIv/kW8c+fs3r2zD7z/46ZAlZYGYnazOb+6vL6+fvjw4cOHD6/OLy4uLp48eXJ9edX3/Xa73Ww2br+pABQCM6+aZs6piIjmklJKKa2WS5st6dXmCcaRnPtJkxG2h++B2A9EIQIF72UCMSKX6DZA9b+KYJ6j6PvdsluAFoQCZoFZVQNPXfnOOQckDMh0MoniKFb1lKmdA8Bb9uDW3VOdDCSgbitT6UMBAEsBI4Dk5m2EiGiMtlotRCSEjli7rvvkJz/5vR++u16v73/0/a++evorv/KPP/l9HwH4SLtcfv1rb3zpS186P39i1hFxbJgpmhW2XlVBEwChLQmJsQjKdpecst4tWmYm8nRSXDbvdY/TZbdomoZrucPW67V/KVXzMwYXq4xwAgBt26q6UQOmlChwzY0VG0qeVIKUiC7Pv7Hb7cwsxnYYhquLy2FIRHTv3p1A3LbtYtF2XbdYLBCBmYkGEUMaDTgnEysUYoxN20I/lpT3Rejttx6n1x/ud30IYXuxbds2hrbvh5zzom2JOARHp+S5KmWIkQhBtcwQndAUsO5RWcyQmUlNodIfgllgLimhmUzJLAMBQAJGJODalucIhwgioIBCZQc7vxidQaqqNSuA6nTaeXuvh82UwTFVQ8hZchZjIEAVM8CcZbsfNjd980InSmPOi9AQhWSJDslH0+rsWnIWjkFS7wrSORWwhGiiRaGYeV6/5hyx1pgFjJBYRVSBiAiDqwAzEVdJWTTFJrREwfvPD+BnQr+mCKCuWwvqhhqmoOb9i2Y4uy+CABCZ9xM6w/oIFtZKWi3QIdKBjltlP6c63kHB5QiMHV5qJvEqAJgd7Yq3Og+tUtFdj7GWB2/3UHlcjP61EuLkSFj3jAO8fI4Jn493GnNv3vTAkRI+TAeyIYgZg81cG/S0J5Mz36wyluup58R0MKh283XF1Pdy+bRn0OAxJpx53QcKKCLXol/lVLsKjkFd9XQ0w58JM56P5+M7MaYkOwHYlIaoLA/Pwx7CVsVJwtqeegVEIGY0AC0wdSWB5yXNwA2v9IAecUrBQCWjaT2kAIhIJ296m4rzTpCEmnk5yF4AQED0XCqYeqqGvHyBVAWBGZ1QVC96HPySnl5RNFVyvM6ZkqlazrqXgZmJRvSz6HBN6JxXAGBmZ5wCwJ17Dva46zoGVIWu61arFTNn5VYKonctYyAwA/GGNEKjWRdBDUyLfw0KAFwOeW4tlboAQN482XbWLcps1eCfxatwqmpGoNUuQkve7TYl5zaGy6tzJOZGQ9ulnHf74atf/erV1dU+74mIOTAzYUWGzHx6esrMd+7cWSxbd1e/2RUAfvPti7Zt14tl1zUxttyeMCARpHHcbG6WsW0WzfX15Vhyu+h2u8HJq00TkhCOxcbxwYMHL736/q/8qy+/+fbjl156CTmaQtssXUQH9KQi2yIppZxk1/ewH8/O7m63291me//F7uWX7p+sljLuNfW7/T6lpEWaJpTVatG2BtI0MTac+tTLkNOAiKnwdiellBIepHFkRjvhrolS1ARMdci7knviMpZ0dfN2jFER2q776md/+f4yvPI93xOQIA9d1/Ypx3apR3v08X5/IDtOBwjU4MVhk02VemAwZFJNWvL/6N//9//Dv/0f5Jz/wT/4Bz/5V/4K9CMEtMjQ+LevZAxIuF6/cOf0BYAPwyFND33f73Zvv/32frO9vr6+vr6+uLj4xltvP3r0aLvdvnH+KA/jMAwgEpsunAQ+kNAqRRuOMjLbcevrjZldn8mhCEHtrwAwImRyx0wqfZ4+uDIYOJmOrG2jFGobIIiRVEthAkKX43QWuGVVFSuaNSMzm7pvXV3UlUgg2az6DQJABYjT+kW85U8IAJPvn5du/FpAAU5Wa5HsJ2vOedztck7/9J/+04s34na7/emf/unlevWbn/vllNILLz1A1Xt3QtdKDMLYE8RusT45WapC6Z8sFosYFyGEZtE1TdN1zWq1Ck1smma9Xq/X69i2fvHMnPqrMLESQFFESsq+VHMWlw4KTYwxmlkxzduuFB1GGIYkxQzR8R5QzQRt9/vdftv3/Tj2IoJMOeftps8JYoSubV988cX3vfd9d+7cWbTtYrHggKqax2EcR6en5yTDMPZDVgU1BmhCy/dX7eXV9uHVk7bZbfohhBjbrmkaAQsYShYA6rolAuckbbtAZubQxA4I1YxDVFUzYW79OAAjQFMBEfUGTjM0y4CsAqYoxTKKece5iEJlgyCwgxMyBPDYS+ddTtWVHWYgoWgAKkTB0xI+J8gpYVhl5auTis8iAwNRNRFjQjMUAQQtgsNQ+n5UIBWTsTRNBHRVp9qM6nmTnGUcxzLmEsI4DDlLKZpzVgmAqpYNi2tiAdAsdeMnXykSET35qKqmBVRCCJIzAICa10ubpgFDr99TzWkwIPtP5v5OE9fg+GyGukz87JzyvIpkniuZdq2K0/Bo08LbmJBv1Qnr9vZUpcX7qGbC23EzIVQ2rz/lWDYGDEAmNUV4pvPwm4d53+rzn49/U8ah4RVhyuZ4594UziEREkABIlQwF7JyKW1VJ7QDMk7Ek1mNDQIBIHk9kJySwGBuFEqTT+BxtPksRMQp1UIIhIBHQjKAtcdkOt2+LbPN5+P5+KZHzeLhoc2ewCUhnqkcVsRYMeFRedB1F6dGXM+bggoAYBFQrWL1gBDUlECNCPxM8he4pT52NGYcJCI4rWQ7sisLWZKvE7Mqo01ESigyJafBACo9CAA4RoADrpvAJXqZyCOMop5jRcOg2IoYugcYTUI3Kuv12hCLp6IVcxIqAgDbb1y5KV/TNGYmxbqmWa1Wfd9XKb8YmTlg9M6f5syYOXKtyEVGRCSAid4ZCEwACGuHiFEtKaiqFB1l9I0pGs6AsDZDAhliE1syCFE5YGTK5T6qLLrG9Lu2242WJDmN4/ieV18updy5d3eUYrVlUUpxp3UQha++9lYIIX3payenK2aOkUWkbduf//lfXCwWZyenp6enJ6vFou3atg2MIrJcLmQvut2E0MUYhyxZY7RgagwxcJvU9tv9zfbri9Udbhdf/K0vt92q67q+H7vOdttxvV4juXGYiZhpENW+Tymlm5s9EDmz9yMf+nCMcXNzZVpkzCBiImOf8jhssAKSSBwCNU1T8rjf70Vy5SJKGfqEJqQmXQQVE0WEyCGNw+b6hgIVKVkFEVPOw/j2P/tHP/Pj4+7lT3wSsIGcYmgAptrUFDHRBOZv9w9M072GQV45nLtrwMyatpWUf+RTP/pDP/zDX/jMr3/mM5/5B/+P/+rf/qt/FcqITRBnXyEFZ3ZTACkAFZwhM4QAy9VisfzQiw/qyvQ3HYb9djsMw2bcXV1dPXr48PHjxy55+uTR45ubm2G3TykNw5DGsZSDRH5YRIcrqJ6nNwISkRijeDSsIkWKFq/gtWEZEBVEzBAkECCiokreLztK43bRdl2Lo2YmCcQFnGpsxFBnP7AijeMIhpWgZnQwOp0yuPMVeoLWDmpv9Rf+/9m3IWQkCkghhEAR0Z48eRIC7UAXiyYGcvD2xS9+8b1nDzb73fn526vV+9/7nhcfPX589eRhCM3ZnXt//Hs+Fv9EgyHud2mxWN2/9wIznyzH5XJZTT4IiahddOvV6fV2Y1a1IwEg5zykpJoAu5Ql7dIwDOO+H4Yhj8XVp1JKWQoih6apgFBluKRxHEVht+tLKURh1w8icr3ZeqHMqFIeiAggXG2vTk8a5sXJ3e7ll166f++Fs7OzF+/fV1Ui3A15t7/Z7XZlHLwyDAyllHHMY04qAMBZTMT6YZ9TMYxZMYuMYmY2FCmYmE3VAocmdiq5pBRYEdgQY9sgsqICccl5whKI6ErERGSIagimoGJFCrNnD0kBsoivaDNTK2ZIBkSGbiAJ5hVAqOQvRQRiNJ3PCRcomxdZ3ScPUmWIJl6zwqMoEaaUhAKwAWXJgRAAc5Khz2lUCihQhpQjHwQhavnOUEQlSUqFm2LW+uFU3SlNzVwvt+Y1jtOZPkU5ICKqFi0ikg00xjDkvVrOeQRQT4KICJNHp1VQBxAZGIARJ+kXNK/HeSkdEV1fG9Ud0zz9hEigpmSszyiFWtXKx6nm5uaZeBun1YIIPD1uVRenj3hoc6psu0qGe6pjEN6JCwqHrxPnS4LbrhVPO48/H8/H7XFrUine+renGtnNZl09GFSfajc1mmQADn8GnoSyOQU51bcnFZkjniccVfVp+tcxGiREOionHrONChzFwe8+nvNFn4/v1KjAD2dW6KQ7isCITku5TVGenegB3NLCRFQziJKLBJjMyRcg760FXwYoWiRzneGVC+b5FQAopThl1P921gvw48D/eYzmQkoDESGymxcDIFlgpelJbhZsZeqjcPKVk1gmBQ1ExL7PUGU1ankTkQCoFJ2kchC9d1hERG7G3l8/ICEi6VygUAAKSJZRFUqRPg3bUcwMtALZWh/wkOIt17CpggqM4J2Ei8UiBm5DbJoQY2xCjDESg+BmrtrhVG4ygEW3QBFwIhmiVbNss9HtCgoWbCObiFq2QQJhCEEJdrsNE7z3A+9NaYwxCkZVleLMQHMhBDPb7XpEfPLkSdvGYRhSHsw4WEiZJed+d/Xk8QYRyb8808Vi8alP/cjNzfVvfvHzq/W66zrR/NJLLz253O73+7aLp6v1YtGplWG/e+mll1bru08ePd7uM4XF0Gcw3u/3uVhO/RxdIRNRCAwFLecMIKjwxtdef+HuvaaNu81NSZkxTwEZiuZi5vn1pNliBEDCpm3cDxrNbDfurPQl5+u06UOk6kNkFElVkwA3kUPHsRHV3XYvu0erszu/9PP/3x8s6dVP/ihADBQ2275dL49ZINNK0iOy1q0FZ9OvD2sKgAC3/f50sQKQv/bX/tp/9Jn/+Xvf//5f+IVf/PH/1p87fe8rhpABs5VIQQG0FDbApvbQOoeaZ9tAACjFigAAxgjLbrnolgD3IL/fG6uYwcByvry83F7ffOMb30jj6Kqnjx8+evjw4fn5+Xa7TfvdOI7DMFgRn1BOS3a5QJ/DjgzdNoNrEF5UiqKCmVhRk5/8yT/znlde+vVf/Uwaeslpe2XjsAeQk8VpKSUlV24qgIwkBBRIDTkSI5CCOcvATBGbGQrOYjNmpjQnbQ3wSEQOFe0QiJuih9Jdu1gsO8m9Sz52i/b09BS07Da79736an+ze/0rX/sT3/PH2+9vHz8+x8AAhBSb2KWkV9cbIu7aZSlKcqmj7fu+HwdVHXLKOSOFpmuHYejHnMUbgNOQk4hoab2yl/qUUpJcsfd+33sl3BCYoiebReS0eyHnbIr7oTfDtumGXFRhuXqhfvCqmwWKICLrO9YtlwBEHIzXu0QXX3v0xS+9QURgWkpJQ59z9lYxMxlT5WOLqqqpairq5bvFcr1er4m4USlZSykp5yJpuWxiaNx3HgGYAxEm0VIUORihqGWRMadALEVUIIuqq6ooiKEZgqG4Pwc4PQRUVEUdCXug7/JGAIgzmXn6uiv5BICYar1YfbcnRCMNpRSYBGAnSoWn8yoEcpDqt44Qi4IUs4joFGVEUxxz3u+HzWaLYUkRcs5zqzAYuPMQK5FSKToOGbkqpsKB+oITGeAoMeRTEQAAmKlpmkC9gQAUtSKSOXKMjGhFkpmUklSLgTZNtFyLq15/MDBwMvG8tdQjD73ZA8kDSvAbOSWsnhpPIbFZh8aeqgc+o/sy9xw+S92c/egrEn7qLZ9SeZmAX2XDP0scfT6ej29rHCa2vstUQlezQAIjT6nj1LsOiAhMPIleHL9sZX3PDhPeJ+WlhNk94oAGdeoqvP041bK6L1BggxkZgn1TUBCeo8Hn4zs17MCrOkA+m6QQAQBgVgvziftOlFHAUopKQTVA7yny5yIQgtHckQuTuKbbksEkHgMALrNYiswrwE9zb2Uiqpa2/vxDAUO1OI27NgWrmyvMFXZfhf4HamZqNzOgrGvexbsJAUUNnB6AEx8pLAPcqiVCUHInP4CpL0y8kiZmVjIwsxDl5B/VzKxPY9d14CKKOSMWmz75enkfyWjwW1EQ1CuEZAMHjByYMUxu3USUcBNj7LrOGZU4qb+sViucJEYDN44YJ2mQgDKZyDFatpTSoDkG2m6uS0nNqhMp+34LPWBcz3wGJI7cONhen9wzs3v3H7Rt0/f9MO6vrq5Wq9UHP/C9Hu+WcSg5j+OQ+iGXEQCGUYdRL673b37jXFWR8cnlDgBubm5iwzHyslsQQRPo/OLm+upyux1e//rbJydbMztZiYiEkE2A2pBV9vu9qnZdR0SqoAqr1YoIrq+uwGwcekRctt1mt5nvA1EgorZljKEUkyJugR1jFxsupfR9v2zLogklW0lZdUBjUc1ZIHNsOpAIpTNqNUVDprC4v1QCAbRf+ic//30JPvypnxh3u5P1WZlipUPAZQpmc/76mX0d4enkNgJit1jkkiPg93/605/+9Kc//Sd//GPf+8d+9dd+7UdfuIPLTsB09p8mZKQ+pxACIUFgBgYABVNvUkWiNk6VZZOSSylx0aiaqhBYYMY23nv5wb2XHrzvQ9+FSFV3O5dhv99sNsMwvPmVr2y32/Pz88sn5xfn548ePXr88NH19TUillLcA6OouCy3sIIVEVGzbBndzF5zzvmv/3t/9cf/5I/+X/7z/+y1r345Dft+t7+5vkr9sO1LzjIMQz+kcRzHMQ0plaLEEcFMpBgispPSEUnk0FVsR+3GkVnA5t3BXTUAnH3jjhqoRqCaVBGg73tiDASllNOT1Z07Z6enpx/6rve/cEc/9P6PLdvlo7e/8eTyEpG32+2YMiIPqex3abPtxzGrQk5lGBIMvar2fZ+kBI5FZUjFzAwhZckqqlC0NvkholpT+zOlql9GikQ0DAjWGLVEZCEgEyIy0W4cRIQoFEBiprYNFMwwqYMfMjRmDk1jZCTSLJallL4fVYf9+Jg5jOMoKdcJCYBoZCCaaxNyWPliB7d5UFeEJkOQIQn2MUZu2tBiKaVst6vuhIjBUIlMlJk5EGMpJacsCoAUVIuIJCkhBBHvSAUppmrFC1yK5u4mRkCoCiK1C9RMVD35BsRgSqWIapGDVwEYVE8gmGhaqup1QrzVVFATQ+7yZ0c9hBNhrK47Ncg558zWBST0IE9Eht5SCrtdv1i1baCcRQ0YNYuQQXEV3xCQLafS96OAmYHbirJlcOIMqTuhzL27VhUMi5nb05uZogoHJPI7YOSi95o4oIGKZJHMwWk8hFhzlmBghiIS5q2kmj+ZNzBNn3cur9kBs6FWIwqAd7B5eAdNTp08Bp+q7D31555CNseEtY5t04l8e0wkIZyKfhMUPKr/w/PxfPwuxjuRz+gZEOVaF5UdeiRcwUDoAfLtPwfwkkWVrTpO9zCgP/2oNlirhe84KrN0MpY47uOF385D/Pl4Pn7PBvocttoubtWj6CAZetj/0Y2XZyt5qIDtKIXiRBEzQ1BQlVIAjLG+OE6aoiYqotVt0AycGjq9yOEAre9U/ScOj5gFYiT2oggCkPcfwoQmEb2nBMA7CVFz8cpeDUdmy7sYYy4Vv806/qUUE559CKGWCTkwixYi4hBijP553G47UutIMqUkJc+Z6ZwzIBBWq24zIwYz2++kXowHR8iBiIh3wy4S56BMBJBpKlJKU6losw6qiEip1+xyON524oCwRVwul+5ufHKyWi0aMDHIXYxmcnl5fnZ2sl6vchlVixsAeAZaa9AlKmIGzJxSWa/XImoGTezSWNYrXnQnTZRFp4zGzIwgIkXSfr+/2Q6rk7Mf+9SfenJxXkpCxJvdVlWXJ7ZYLPrt9vL6ZnN91XXNV7/6VWZum/jZz3/BRKsLCNJisTg7XS/XKzO7ublBxPv375+dnTkhtpSiRU5OTpbL5bjfp5RyzqerO9X5MAOyYUBUBKGATYwRAFIeRUQzEsQ2YGh3IQSTMO4h5wKKacRaQ86mRVehMW0uLociRjG8dBofX15AOxRc/Oz/+x/80E5++M//dL/PzTIeIk1zNOhL4XiRHf1sBgA6c6nrc51CFqxPiPi3/tbf+j/9H/6PV9vdz/2Lf/axT/6JZbwf4tITMgWEVCEwxTh7HMmctGFynQ+r2A+RiJuGm0ZQ2U3ZwYrXa1xkJTCogQogQuDu7LQ7OwWz933wg1UYChBEZLu9PL+4ublx18S33377zTfffPjw4fnlhZMet5uLoqggEdiQgAFVRfM//Ed//8UXzn7xl37ha699+Wy5vn/vTtcQY2yWa0Rkiv6d5qIp5TFLzrLvx5ubzc3NdrsfkrdUGYZq71EjRQJwjWQiUjtqUzbH1x6YGhISkPvj+ffQNA0AENEwZJdCefyNhx/7yIc215f/r//nz5RSmra9urrabDYUw8XFlSgghCGXNKoZFMGSlYhiYiLKUqQYBjazIoaBiygyMUcgEoulFAEjIqNJtACAiAkIkAFCCF2NQuoToOIaHgJzCFERiEJsgxKqQogtTlzZECg0ERFF8s1+V0pBBUTeDUXLSERtXLhwC5owB3AOBCsTZWmL1kRJ5QYjAGFseMzFcIygXdchERgYGXPYbLYIfHJyN7ahlKyazAqRb+NERGqogKZu90iq4OFXFbecQjHCgISG5PYQ/nVIKaJARCE4fq7+N0TBjrDBfACY4pxTRPN+Osa6w7MZ+D7ve7Kqeqe3F0XNqkclgKmULFFV4SCeZCK5qKaUVJU5FhnVVFEsSySuj4cADF71FRAzGMcx5xyxOCJjApxsQt1+loBLKUYyKTRJLqNHoszojvclDUgsIu5QLyL+slSHAUVAQuD5MISpVX4+lWsJ4tYGZDbVS58ZT9EvdfIb9PriO2t7Hr3UrdesFZUDrtNK6EU7ep134IhOV6rfdLvUu7/I8/F8/E7Dt2JHgwCGR8PMQAVhypPWP7iF0HyPrsEheKsyAbDMzroAcEi7+LhNIq3/wNu/ndua/FffJCx8vhaej+/k8CMYAI4PbpwksieyND7bvs4hEJoVQQNHRioKAKRaci6lAJgRc0Ai8p55Py+cpwRwOE+OGDcH7Jdzds01f84BECotSxEAYCZiQgbnTIIxT20kdhRhufi+xwrFU/mIiNj3MvuwmUEp/ofRRhM0RaEp/Zytxi6gRu5378rvZqq6WrH/tmsWvFijN4ccAhGrKWJPEqvaCeectXY6oUhJkkFNWAYAEEBF4qqoQQYoHQCAAhT/YhgxAIAUQ0FIBdE/IKlqKaUJRAgNE0GBkpYNr7sGQe+enj148CDp6u0n5WbMzaLtVu/JfV4phtAGRnJHbySM7f+fvT+P1iXL7gKx3977RMT3fXd6L/NlVmZlSaUSIAkJoQEECA0tBAipQWDT0G5sEDQ9iG4Qg5fbba/lpnvRbbFML2SW7abbggXNYLAXlhCT3FoFViNAlEAlqVSaqlRVqpKqcn7DvfcbIuKcvbf/2Cfii3vfy5LADK3KPCvXy3vjxhdfxIkz7N/ev/3bKbWHPvNZ6gmH3e583aIcvOztQNd0p1kRpXIovasn6YDkhbqT0zbpauXOdke6dvXc7iBPyQn7g7BNx6GMJdfIqto4jqZ6eflw3TW5lPsvPWiappRLQ7ter9frjtjNykdfvNycrTebzWE8rNdrcydJzz777NPPPOMt7fsDbUegEVmJEADK1gAbsr7f52GUxOtuVUnNqv2wb7bd+fm5Wh5GAXuzTrr2q/uvARj6h6WUk3y12WyGYej3BxG5z/dGHXE4NO3J6Zh/5N3/r9Xulc//jb8FW6A7gQKp8dRk4hFg8HoiXFXDczHVcCSKxKwCgQSsWjxRm+Tpz/2F8tTZ//m/+7+cbM6+7S/+ld//n/xvbLf3xNx2SmJJDtD1ZMPd3F6mXYFpCUsN4Nm7CQjLvH04npyMY5IRmhwgAsvJ3Xtvu3vP+TMBOMPchmF3vX/48OHl5eVht//oiy9+4sWfuXzw8B/+/f/xox/+0N3Nae57Wun3fPs/+N6/9Y/IPOfxNd9+rLkfM6JJY9N0Xbtq2zaKZ56fdOm0PT3fZBuKPa1u6nQYxkcPry8fXRPJ9fX19fVuHMdxyOOowm23WsGtpUaBIas7te2KQCXbmGzyDZVjBMktszq4ZEizfng1XF9nAr7t2/7eO5+5+7bnnyMSLvz+H3+x7/PrD1/ruo6lIWKibiYNxtWGNK2WTV28lLS4cdRFNI+aE3XiozBkWmTUSmbmltqmAXWkNhJR29a6gnUZlrsAsjklY+JCjAQrhTiIExEJ51zGnLOZ+Vig2diZGFRUcnYdMphhEjHJEEkmLWZlGKeMb9R4G9ydshM4OeugbCguTZcabp66uDPubb1OQY4deCBkwdjwKJ4b37EdUhkBykNOqdtuSytt32czTynBfBgOTlifnUTxiWJQtdFM2hUzjyUPmojInDxb1kxElKRtUs6ZCGly23tV4PQYw2Y2RoF4UyclEk7ipnOpFHcqMIWQTq53IhK4m7uSq7lBXEkLCbddHgctulmtX77aNxcXJ8O4wmocx/UKfb8TpjEbJ3Z2L7uNn7g7shn00J8ctg2hTS0P+VCUG+5Iu5y1z6WoEqukRtTMxMHUjKMO61POOUMBJbLkWsyoZJg3o1J7cq/HvvenPvjTh/VT5TN/2XPjeOmuxJlsgGG12SDnWts6EgDJGAaC27GOqNPCYxKx1loncF48CoN92tsXIDDsZZoKOM1xDDRY++PGrodkUsQGKfg7i2XKFifXROuwKabcRQDsVHyxXi3O5/nOgSgoVw8SyP0JqxhN4Zpb4UZ5cvjorfap03wasUQ8jwyqPB5xFdOQN3bhBLTchOgAe5RTi3i9GlGaCZw06zMBMS18viq4Sp6FfDQ44n7zFDMkAgFCkMlfM+VVLwKPcT1MRNNju1kwYxq98zk/79FgfuzILYfWrSZTSblb5/rNrrDj+fFX2ERy4HrZUrvTCcQKjpz19p/nIX4eNJqzmm4eNA4OCAExsB0Q0OwQdDd3crKQQYRyimJHBAWByKDFzBIxEZMYcjEtUA0xFxgnaYWSFY3TvWjO2jUr1eyqQu4S3DIFAEk60Rt9olXWaORU4Sml5Ef1+4kRN5lZlVHmRpN+I5kdN8IFGaC2GS7Ov84H559poV4Yrbq0QSISKjJxchQspkWFw6Cqxu1F+l/TprZtI+rI0s4U2MolcxXi+LWUMo7jmIfg5pF5t5JbkdP6LuUoXhdfDUBVx9G6RixJQ9aQ9EMeh0MIsxrxo8ur693BKBnxenNCJEylFW6bppEkIk3TpaaT1K1PztpV1zbipWxJuYxNtzYWMAGsTk4wd3WLpGw1NE035GtHZmn2+30/NJIS6UBEzGl90q1qPjfFexn7/vLRHS+5aSS9613sttvtkNZmZlZyjryr4eHDh6++/vpuf80iWQvA65OTtl2pm5nRoE2TTk9Pz09Ou1XDzG2Stm0vzk7Gse+aNjonGI8lm5tebXdRO069GGG1attmvT/sxtEOh6EUmJKZmXPDzeXVHiBOQoaU2n6f3//+91/uxy//rf82xhFNOw59BeRFUzr6whlsONo7eNLqBsDhSRIJUBRqv/+b/sAP/dAPXz54+G3f9m2/8ld/6S//ii8f+n3TJHXLjlbaxQd/lmybf948gxvbDwGR9qDDKJTAzF13tlqdPf1UqOL8ioagBtf3v/frf/f/6n/ZDwe4CuPR1SWRx5BW1NnhhHXrzLuIEDIoNdw0kjo48uZsfX5+2q1XqelWq5N3vfPp9As7Kz6OY9+Pu8P+0cPL+/cfXl3ucy5OUoq5e9u2RBE2ueHYrctZNYy9aRphIatJWGUin770+utDxGo47Ye+uEuTiFOpoiawagrUSw956TarolPFjWfW+sx9dwPAIl5r5pRYl1Q1iiiG6EopZV7HRGS9bpmZiUXE1Yo7k3Rd06UmvqgUMytjyRUQuruzqzs5iTSSADOCWWkSM3MiBliLenFTPi50DjeLVVwS5VwAV3hWZC2dtpvNJnXtZsMyjsMwFFMzJVhiappGcw6tr5yzWaEUrArfHfZhIfV9n9UBuFHf90Rs5kQSpQTnroswfqzSMfJsWvfnn+cOn7gVIKI0BYQdIBJnArE7BQPFQr5dwM5WuSkTgZZJWOImx3F0kyTVUZlzPuzH/a6MA0pmLTwcTK1pusa8wBnOWmgcMPTett6x7HfXfd/nPKhlmIHi7TGzx3ZwJLzMvID4Ntj8OO6eUmrb1LbtIetut3v48OH9Bw8Oe+yutzE2VDUNA0RAHGSaaYcnP9qfyOF5je5alOE2t+hhm6Ad5p0O1d0boZOZtCY315B6nIJK9C+F2PlJLnq81bj/uGH4G3/kreDJW23RgjlS65hFzH0SrHLPC6IajhT0mcA8L063KM1HROfAY6mzPyfYdss6vf3XT/WU2id2DT3hnJ/FnCHYEhO+UY/7W7JUNxtNoPiN2qzJNGUg+AQIQb5gjJQCt9CSISIRielWq0Qxi8OsmMHNiCgscAqhJijm5EBQEEhxE5SJiE0Bw7AK4t8plw8IK78afBAjY6oTMsJlvsghmQOGPl10nvPLL3Z3mYHWYlweFwiQuwcgvPXx+NIAhACGYaBJUUZSxYpElEst/h5HmLlppG2b2MijODXoJO48ETtycKgCIkakUaduiiNxEICqStcV8LDbE3yVCGVk0/Ozk83ZnVF5N5Rdr9lLVt+NlqR176V6BoyNjABPymyO9Xq96tp1J+tG1snvnq37q6HdP5COpHGlkZM0iRIlct9fbVM6IfaiJTXrrMacmqZT7gBYMP3MdMxhnnVdZyUTy7bfbnh17949Yd6cnRdKUw1JqwGX0NzzYmb7oR/6XEwPhyHnLCKX14/6IW93+1fwqrsD3gillJ66c5Fz7rrmzvn5ZrPquo6I8jg0adM0DSfabDbFfL/fjdnbNomsuy4RdW3qpOnKMIxDr8Wu+4dts4IMxIfUrAv4+pVXXnnw6P52/1t+x+/CqmtlHYlBq9Rs+2G1ajA7vSffy7LdWqoIYm5CXFy92Ori7n/wH/wH/6dv/uOrk82f+TN/5rN+8eecP3vv0eWjizt3BBhL36TVG03dpQfoX0TW+Y07lXYFAO5Qm/KSBEzIBwAQOTtpzXPTkCkNw5C6NmtRAO2KCKpQIiLalUzk7CVcPkQu7JLs5LQbct5ut8VUi5NI165Tak/OLlarTdd1MKy67uLigtAcDn3OqjqyIEkCeBhyySOI3Gq5TmYiSRS6RxxLgbgVd1eDukPNXR/0213fp5Tg3Ofs7tS0Ba6KOAsBVyY7PuMoezWDlijEPh/0iR7s7kIz/yFmbS1k5+7ELiLxcywdKaW+J2bumial5Gpmtmq7zWZT3IppgEBVHUuuCsO0EmaH5lLYWJCI3VQbWTExgdzcipbiZomQmBGZmeEKdwREY1cNqZpSNOshaylua1ufd+dV96XkXNSKR1EhcoR/DoCqpiRqoV9qMdfGoahp03Tm1I+ju2Z1kJkha2ESAAGGjzBvsRTPa/v0Nmuej6o+2ThTc6Koi+tu7hZ1HVULFnAiHH1EaJo2pQS4qk5vF0MeDzvfbctwIM0N+UbzCGKhtfrIkBqP8+QW/4mZEpTJyU0tu0VGgBmEPFyb4cubOGaL1Ij5Ecys6JgSBc2h7/fb7bbv+3Hk/X4fu1L4VmIXCU6Ox0YGFhInCseHpHTsk0BON3Ipj0ygaSu3MGTdnXxO2oziH/MSYKjuLTpeuv5swFy0MH72SXXjtqLp3GaaxJy4eMSoN5axI91hudvCZ5Wu2zHAm82mO3kLGb7Zm4fqWF1qhKgANKdCVXrz5NkPTbvpo9XHV3+h40EAwXO2o993dgveHnLLHN05XPMmR4N4Q+voCfbLY31xu4eDOPB4HGwODy6PEBhuoKNr6/E00zdHi+SLmA03Dj7eZsrJspfrhuEGU/JadH7yM8YZBBCYYVEuOKJBA2CT1igMVWzGnGb65LxRElHTNLFx6lSPPT447Xb1+9it7lPMDOYItc32BAJZ3tx945uObtTJhpsB4RKYzucE7YqIQjNhNlNCsn+GmmYWvRlVKAI9jllnXNt2myU6BZASN5LiVoNWF8JxKaVWUvieiKRr03pVjSejGpkMKyFuIPp0NDQsPcRLn5qU3Tk1Z3fvUXfy4MGDB5f7vqiklTMdRncfRYjcUd8TATAv5gLhYTdie1glCMqm5ZOTdz56ePXRj3w8rUga58albbr2pOW1uGw6ud62n/mutw9jee3+q04rJuoHhpTZnmNmabuoxppzZunWZ+3l9dXV9W6z2SdCSmngkhIzi1kpWoioWW/aNmUtzLwpZSyGqk6Ltm39062U0vf94XAYh0POmciF+cHV5XZ75VNOTmIBLOfMtO66ruvai7t3QLbd71frtuu6UK8RkdN144CWllgkNd427WZTStkdBs5DWq1FxJheeenF//5b//Rv+W2/4+4736XDkNYnDL5YdfA8LVXqxMuKzsu2NKycADcRoU76q6uv/A1f893f/d3vfe97f/Inf/Kv//W//ru/8d/brLuoDNOkG4vgG9Gf/v9b1xgLruntOCQRkqAm0Baok+7RtiD7h//gu9frpowjMW+adR5VzQo5pQbExq5wkWYYlSeXHzkzWYJboaLUOJnS2JftdlsK2rZtm9Vrrz48PT1drVbZTIubhRxILkUZxsxguBmzpcREpJZAAIyYuWZkCOCq2YjUndzZPEhxRCiQPGrjPI5927a55JTanDM34jYrMzGIzUznbvHFqicsfmNJwRRnATAH9ol8dk4tGREzSoyfh0HdvUtN0zQhXjV0Y6Q618p1AS7NEDl4FpclcpCzUEcgLSMRa7FIu41FqXrHScMqKmxO5GpKBJCZSZOEGwiXQlnV90PJxmsmZhFZJeGM0Uso/7Dh6upq1QAAc4JzLnl+imNXMLEzM4/FzNTcVT0XFYkinDX3rC4R05I4r72xIi0B4TiOcw/XWeAOKJF4RQs+iVkrEM9NS+6Ge2ROpgnRuynMAVViz87b6+Gw15Kla8+07N3USifUCkkSTg2v193JybpruyRMfkiMVkQS0WAOcivqZFNxT4VHlJAlASAnnzIItEq/uqqO49A0HGZrDI/ICp93ouiBOJ8W1INqWlrNFYyqJ5i7ceGbmAfn0vSRSFhyt6Ope7RS7ejRUgbNdjOOyI0mYRidylpwaOrUdOmfrS2uc6vdWOvmuCXRTb2P5fr6pCzEaXi8BQvfvM3da7WIKAkzsUAhPNmFx5AgEdFRVv82uSw8WVEBOw4SyVRLsNKnb9QVRJwfeblEkyp5jS8s2lyMavnrp3yTf2Zr5WeZxUvL6la77aBadDDfrrj3ZmnRB/NYv9VpjoAz7hR4G6jD2skJNYJEiH3eLRyQcDP32LrYGBoCTk5EidjIwt4A4GRTisIswrG4t5shtxs3NtkYyQyxIZpTpOtg4f5UIT5WJrsRc5y/gBfEshkQ4oYT6DhSaHHO44bI41u13wxrzl8R7n8ATdNEWG++EzMrXrQKN5KqlijGTdSwhJbM0o+uqF86RR0rwywRufs4lGa9atuW3LuEfFgz2cn50wrqMzythY2kIZKi5gaNUKo6EwlFYUZk93W3zlq0aDFrUJiTpXS5223HkQzcqEshiEjf0Do5l+Gwasszzzz18NHVP3nve09OnnFscpbzZ9fMnFJNGOuatm1Tw6JWGNS1qbi46X4YhHgjaTQ1IyGoeS4wFHFNplEFUQ1ZNUnbrarWSBFr29Xq9PzupK9ApgBYsL267vt9KWXMQwRbxnG8vL93yKNtf7V/Nes4DEPTtQACEMKcObWpIZKmac43Z+cXJ31mIioF3JAVTZQTUsmH3YP+L/+FP/fr/s3f9It/+ZeiFJIW44gEcHgMmaon6kYOzc1mBgiSuxILTFcX58jlP/x93/j7/6P/+Kl7d//yX/oLv+QLf8kXfOEXEuj68tHFxcXtCyyv9Un+9s/QHndtgoAA58xRSJcQsQwAhWEZkv7p979n7HfuNAyDqhl4zEVB7A6iQlBnrTkSTD5R+cmMXEjv3rn3zL2Lk9P15YP7H/3oTz/a7wxoujZs7XHMu91uHAtBjDCOJY8ltZ0k0lLUkRppWnIDGVmtnOKT8FMEnMmc3EhdpQ4VOEhFAKSmyTmLpDEXJlIgpeSqYSmE4ClCKpPScnYb1VoHy1XFqYI9oqPbbfI9kx2xXJ79SnFcVdu2KaVkUypkRcdx1FJmAImJnjQviJr7GWoe1yijPJRAzhZqWCJEpqpjqOqhWtVOoQjtxdyLcXJmlrZhdwBZy9XVVdO265OTpklt2woZu3Rc2HC13Q1iYzGhyn5PKYnU4qjOBOVhGCykV8BEwgRnkuqYr4/ME7Nx6TWPLk3EfJMHvLDbpp+r468Zi+Uc4BmzyNkqtajyNuYTd5dcVUuTOHHjrk6OqjZGWfN2u72+3u12h1V34ZZK0b2NIsJcrJWUUtu2q7YRIbWx5IOVDCqJqO0SoWnatSP1o4q4iKB4Tcid7j+gmy2aTzHA+bWKkIgU06ZprKrsMKdk7kVLy4kWSQ3TTuTunsdxuWFF7zFQ7BjBJiKZcF+UiXefS1AphclLNmUkV6dQxDKPSgPVTRx/Dd+8ho5pXUPIHc4VkVYf2WQ0O09fOI3qN2J4HgU56KjgvIgW3hgcbwgO3Z0eq8H4VvuUbzFACOBaXTPGzDQliU2NOc1bWyC9Sa5saRbO43Ai0E11Jux4EHCu0+dG6PqTDGzM2bCLmfWmgIJTW5otP5e5+XOdv1Mn3qpDWb9SHuNvvZk9RgSbQtg2VSHC/GqMwB6BPxOQw6YpEb1X42TkHsWs4MYONXV3hoTcP02V1c1MrdCCrVnvgQQCtprpMwfMYkMspUTpuMivmT+VjqZY4C43WkigxoXIjhYbFsjNF6FG3ESD8w/MfGub4TC8lpFKJq5SVTVRJIimYXDEvQStdPqi43f1fR+e/jQJls4G0OwsD+gY9tAc3owTslVxmlpwItXU55lAa7IehgxT0pLZxXGyXktqh2EApYs7TxfHmNWJG1CTurFk06yqriVisgVujsvtrpSSEhcq3DGlpE7X+0M2ds1AsVyMirgzPLmcrpqhFGrau/fu7Q8wHAA67O1+fzn3duXPihB51wjMW2EyO1l3ChKhdHk9rkREmkYijtekNQAjAyWkzkyzuVGUYFTLhTyJWvQ5VeFTdvcE4W5zGgE9M3dloJTywgsppXTo+1JKn/u+7wHEv+7e9+PQ592uHw6jqnbdg4uLMyIyLynxyckmNQwiaZthLM9/2jub9elf/LPf+htefPmrftP/DOMgJIDGPIq0UHemuTThcfotRlfYVSwAhpy7bmWlPP8Zn/EH/uA3/bE/9l989Vd/9ft+4Ae+6Iu+UMfhzsVFfzis1k0d1YuL/MumOjgQVDSHm5urudfgRgvDOGLNT13cKaWcnp42TbPb7foxEztHYQwzq7lH7kxk5sQGpLoIu7u+9tp9t3ynPx3H0nXri1Np29VmdXp/e1+zulPfD+qWpPpW8lgSEzEXjFB3CtWo4t5ZaK1GDzERVWICUUA1LlZ7jdSHgGQGczYnByuxSyoGJzgfOfazb2heTGzK+J/XkOotmvxE7i6TymiV07jpdVouedMihojYuINEmqZlkBvmxS0Wb5ukPLuWiQuIiGGmw1BUvQxjRRpQYpYEoqhkoMVZREBMxEiStBIfmrZ1d3WoOvF0F+pjyeYOZrNOhFNqN93qpMVJi7ZZkQ8AF1NyLcWapmsaLqV4IDCzYciqyimphyBUC4JJfJGO4xiRutqfi3TuSihgmdHyciVUOLw61JjIolpmzuNY3EkkdU1bOR1GofU5lDxtP8bh9ax5aMQEosj8I/cyjoddf73bXd+52EQl93Ecx3EELDU05laaMbUnyUk1x2bAIBY0ypxS0zZqtHLREstyIDEG2KwiI4uM0umJwEROpZRRi5lNBVgToDLloLpWF2qgaCICSIg8qvdiKghvYewi6qvW+TvvaHLb1oxqKHM1eZ9Go0iKay0F4AjsRz97KCmyLzAhyCZMOLegntpj0ovxGQcBTgvA9rMn+Ny0FW75lecHUSwx5NwJbypj+83d5lF/q6BKXUuPOebVrvPZpe/OkyRNADxMm2wE+qZLs4EIPB1kIvFbzosJSS51/BcKH3wL/r3J0OBjUX167E83++ONWFFv1CKuxeA5AFjFUm776t+kjY5zxOZhTTUpCMHjAszmUB4UtSpucDMt9gy3HD5O2FyRkw0K5ti/uEq9WKS/SS0fBfIbHoFSSqTEYcJTMzgkrt7zJYxMC1fNxIqZC2IsFEvnsg0lvo8QM30JAmu0L5aD6QvaSU4Qi1AhLeQNeFEVLeJO85m+yNbYbDbT6mK+GHabTTen/E2ET2VQyOJP5zuCFQmKal3V3ZuaNXdxA33fiwiIPApR6CTF03C5HrxkLaMwnW/WctJk1Rdffvn6arc+PTOnfT+ktnOj1GpxsIOcOLAOUQt2ghHnPJysur6/Ou3kZNMQEWDUdiB2kbCzzRtGy0jXh30r+rGfefEz3vUCGFdXWxHqe1hTWb5ExOpESkRCfm0mjDL2K5F+3BzGgZn311dl3YUxJA3PFTWY+e7du107RPeuwGoMcFqtk9d6IWMe3TUlEmKHec6llEaSOOWZVWukg3bcSLc5uWjPqGowjONYSjHD0GdXFU5l1CDljuNoZewP1/0w5JwdJecBsKZbvf7aK5uzp87On/pLf/b//sEf+dHf+Q3/7ub5F2aDDKYgIbKwwp60kNn8v9gupEmH3XZ9ssnb3Vf/+l/7Az/w/Z/92Z/93Nuf+0v//V/4hn/3G3wcVs0xQYiOU/bY3oh99XNvn9xJVqk1aXFi6bHeoB8+4xd8Zinl1VdeSdICsJoepjkXNRiTOZkZfAxNEQYbcWIODW9ybC+vh/0BAIzbZqPFHz26dkJxDUBEzl4tabRdAGPruo5WBCCrEvk4MipcNWfiqN5HVCzyoDhsAXcHsRFbYWbkUdVpyOokIXKV3auWHLuqCo4BjWg6mRlHPHPzSPzAdMOhhSlUOMcJby0dMSBporVPXMcFbYEJsSYIi4i0JSyY4AyOg5dSVCt0DBeJo7jDvDgs8cmUWhZQZFopUwN3jhXNKXowvldVw42VknQtr9IKJE2XZMpXU9Vwu2RTN4azwswmE5yZIMwo6mamjlKKRbGF2RSLLMqbIPmWsyyWTXcK7jAtmP8OjGMholXbglNgyXAMT+UfFGrmFtHUxBwBTzAjimcQGwrUmcdseRi3l7tHT4+ncBN2LWXMezOlrIaU2jG1w5oaFmvlbhOodUrAM7NSCJMrRFUlNURRVIPmkGCtShGiaCmNOmrdUWMLmFgnRSMJ3cxElVNquHZdww2YI+CorhQ56SI3xuR0wQqPJ3P1qAIae+1UHCIcw8fBVoU3ZsmfSbl0sZwFGyJOrmtEsLFv6yVGFcQp5xDsrgS2yprnuvEfLx0/aM0YJH6DooW36Bc3lkAPsujPtaDFW+1Tpy2L+y1b3W0IMHM3kcnBOmE8hy9cuEzVUcHHeEhNlI1KqFKxnxPAHsHIRbTpiP2cPfx2kxv0MTT45sQnN+fmcvrSjWPRZlz3RCPnSfNcJ3/ujRSYSVqm9jm9WXsfjseymuwNfoiV3929FnquBmilgxLIffKbOwzOTpj2O3eH1rQXLOwoc+LpHtw9m5pFTkcoxjkIImyqNAGxGRDS7E4OfDQlPiw3sOkJqDLDsVQnm8Qblkfmm4sjeQHAbnjxpwAjTTwcMzNCKCgcszumgvIBJ25FADBRoSKgN/l6vZE0o0RmpoklReZJUlhaWhS51jCk4B2Rw44YWkQA8nF0MiGWJjVMq1XbtDL2g6sNwyDSqJOWwpyGPNoeLq2QC4GnOw+xEAvpi1XKw2hNI9KxYNW17ebEUIxHJ1WHG5O1xA2ZrVZ47cHDz/m8z7r71FMvvfgwsTQNPxq1dtpkolCEkskvzk4MRUmyU1Z0Iodsl/1lBE6Hkvu+J6KTk5OTzdmHP/oSM7cprVarNjU5Zwadnp4yMBdjbNu264hhqrrerIwpE7sjq7h7aMnQBs485qxZVfVw2LVtC0mpbUo/9nkPo9PN6nTTnhkx8ziOSaho7zo2ydTGcdiZqbqNQ766fui5NOvTb/ur/4/3/dP3/v4/8E2f+2v+jUmiz6vWAhmc3yibRg3CULM+l/WqTaenOo7NycaGwzf9oT/4R//of/bw4cNHjx599uf8ol/1lV+JUpZJBrfm9b/UNqkUVgvM4dnU3VfUHR7d/8Hvf+/2+vDcc29/9ODhMAzjMHQpNU3joCGPQ1YH3Gm0IpwJYE6tcJuapmka4cR+6HdGogRTV2PA4WIu5pFGTGbuFAIdZGbNqss5N2hOL043m42hZvM+vD+OJY/jOBa4WjHzYgAiZSxgUMhoOIOEk7dEZKbupOoCUldVFyKQCwhKILNaS5iMYRSJKDRPRgA5ZyymOU1BDPiM5W4wEcZxnBNr54kvIkW9kUREXlRVHa6q5GiaZlbIiC+Nmqj78qqqkjlzYpDCKaFNNAtfKbJPSwRJCJ6EHyrGTTWy3b0izKZyYIPTx2q1bIwWKWIqTCZI5ZCvLrdnG9Fiqt50IipuFOuewkuxok5ESQROq9V6GEvVx9KQdW26FmN/lBH3CS1jsVrObrU5oGao2YDH3gZEqG1Xq9WGmUu2w6Hfj4dSCk9ggAVdxUxM5Ov1KqXkrqUUCBu55mJeNm2vzv142G6vtv2uYWobMbO260xHc3NXtWxUUtN03WrYkjqZ1eKdTBI231B0LJpzUXUWEAlxIsrutZRi/BCPwCJt2xUbRRomV7VSSnhSVbWRlFIyM1PlrhMCipWilkQmCErkxGBmzSXcEBRBz4m6YiGqUYOMC/fELI3o5DSPVQC20J4LbhUdeVUVFvIMAqdfMakU8GMUTQLZwuqySbXUnfhJoug3SKQT+J/4pT5vuI4lHK3f6LPt5xF7fAsTvpna42gwnEFgIrBb5Mm7qaZmhaUFGGoU9bMBIYSmwN0c95tESiIhkCvqq2mEfJwmccXKL32TjMB/rpzAT2rM2Bv8vOzQGeY9lvFSgqwVVKwZ2QjgYLr5iTfJG3pii7WVfN4bbBrmEWKtQ55c4W7uHOFBc8BgDhaQC7NbEEGcAXW3kqHGHkySOstSSlrKjLmMEKLfZJjlSeedKOyi/X6PaZ4uXclpDiBGIyJADLTcUyZwqWZOkABL8wfnc2bGzjIJMJsys/gT9Ej5JrmUblJJw5fv7hHT40l0tGma0An3Wk37SEkNWJjzsB/2IrJarVJKpZSg2JqZlgx2p4p53H0YhsoXbZsolBzHIy+xbVstA9xCWKJp09n66TalBw8eHA77rk05D5DETHk4ANwkyabmBlDASwPUSd3i/i8fjony7nL3zPnqfL0+7LaHfqXIzpkbcGo4tV5kUDvbnOyHRwoqaqenp6tND+P1ejO2Pjv4VZVAkIYJiXD/4fXJqhvUr15/+MxTd9v1pt2cjg8fbfeHzcmJqkDWxPLgst8PcHeBAANjH0ENYSZ6pDryVByyaZquTVHYLaUUeTgi0jWyWq2aYuG6CMJVgosIt92AOOT7nIsDoMOYi6ORhoHiboYkjTTMKA3zZt0QgVyL4blnWY0d8syde8z489/6p7+2333Jl/zK82efRdZxPDSrjpoOgKuRPLZYuccxEU7URgCdUwKMm9Sl9Ht/7+/93/+n/7thGP6b/+v/7Zf+ks/fnJ7qOErbjsPg7t165YCaCj+ZZ/VGXtLpT8vl4MZpqnCHyDETItCgmgpX/NM2rbl95Ec/8K3f+q2vvfryr/s1X/325z/t+vK65G2XmuGwP1+vh2Hww+F01eWcD+P49Nmp2/D88y88/7a3r7o1kVguDx48eO21VzZt6rpuKPlwOJye3X1w/7LbnDElplrzU1WzFjgnEW7b04vztumkSUTUtq20Tcl2OBzM+jna5rMbN0VRFo+5bhZLDpdcC/IRUaIUpB/mJrFMFe0DeEy9EDV5qvNlOmbmBBJeLgvz+60v/egPq8S/pmsDcbmH4gA5oGYiqZgyIrbIWoqItF0bRVCbrg0qwTCOQbksTkRNX4Z1J+ZusFXbXV9fd00rIiRsZqrV1iciLaNbjQpVZ7mbgbpVF5O0FGc+prq5jixNIjaCu49Zscs+QKzPWdv2BGAmGYci3KhZzhnCzEmEHK5qFvniOataP+aYfaYoNs4rJKpMVBV5DgFhALqQLIvX2nVdyHTZpMsaJIK2bYkTgHEsfd/v97txzO6eWESa1DbhMwIw1/9omu5w2JnBCVlLYuqaNadM5MOow2ivvPz603cusFkBKUnKObO03aqR1BBS22zWq9V+dSrSdSuT1DmDkK4u96lZCbdaRpJmTpwoeUipJfBmcypc3LFarcDpcBi6RnIpkchaShFpiEikiQBF3/de1NW47VAUkmQOA5o7gZkNrqrFNHnQRtymkhMzcp7Q55zv5Ih1iQjMYJrKtYWu+AS5AdByebF5rXAKmVNheBSrWni+Jvb0AqfFeBO4Q2pmChng5KiZyTOvs37jFE48XjZCi74MFdDkooqbIbIpLOPzc88ItirTvNmIeW+yVkfy8SVTuGl8HmHCQmmeClQzco8Jq6jiagw4Qar37FizgAAnkpCQiQmGqQjh4iIAuLqsQHMpwqUmzc32qQ9JFhVKCVg4neYTvJLRK78kSAtPsmPsCGOqoUJwrwBm2jJcgXl9SfNe/i/bgf4/wfa4nrOjqqBPQcJlSNAc7l4AkzpvnCL2VimMGhE7qHlRShwBN1erYcGipRTkitxsktsMtEKAaiTPM4CIHIYr02xZzKIAmJQ+pwpOk/vG3ZNNcXau9zQjtOMrdq+5D0QUF1mWtsDkbpwh5dE/NIG0CC3yIsRXIeV0kfmGgn5GE3/JJkG/uFrNSKSjrPwMwGcpBaLWJWIlTVwzfL0AlJhTN6PhGf6BaUahUaXRakalrxIhSLQsZycnbZO0jCUPTGQEhhOcmAASIhIacwEZiGlikk0onOJ5jQpZplIIzjBVVwSvTJGN2ckaMX409NB+uzu4+/pkY2aumnWwlBCuNaaEascIo5RCTWssDnPmQW006k5Om92hH4oZgRtXK+qqKau4eiGKKE1k57E74OZKZEBx72Nk01HUNE6mCB42rRBRYjJ40zSbzWa1WknDARoZMgxDKcpMQi5ESgyWbtW4q1t2cidxmJq6lS6lRCiEJMLSSlolaUXS3/rr3/HBn/iJr/o3vvoXf+EXtCcnIPIx7/vDanMqMa5igTMjGIWKYkxXwsS1Cu4hIclnf/4v/a2/7d/6S3/hL77yyivf/M3f/F/9if9aqN09ujy5cwHgwev3z+9cNCnpAvgdd7Pjbz/7MnFsBgBpcpIOQ3b3rmuYqJRiVkLgvmvb3W7397/nu//sf/Otb3/bc9/0B//IF/6KX/Haq69+z9//+3dON6U/rJvEOl5surNVGobBOH362+7de/aZe/dWb3vb83fu3B12+aWXXnnpwWvXl6+P/W61Xuehv3Pn4rDvt9tts+rW67U563jABPhZmqZpUtuIyJBL13WpbQBW98N+uLq6ury+GnrKUZ0FrpFrR0IEWdTtnJ9VgUgGnnjjRKEvQl7L1nEQR2me1MuP33Iw3VgZOGLiwaO48ZEnv4VFXLFpmhQIXw1tG7T709PTeYVZromSTs0sCTdprZZR+ia1d84vDocDgpfr7sbFHIATGoJBg89pzBJ4jKWWRqzL6fHGvLIqjoxZU2TPRKbqqm4GVZ1SEBySqn/N2byYuRUvbn2/U0MxBxgMitQbkJcja4MXGQIR4WS/0ckArq6uIiHh9Gxzfn6eUtrv97vdzt1V9+NQ+jHnnLV4hFFXbUdEQbwNAm0UHx7HQZcpfEZO7E7jmJgx5k61K6XdHpC1CLC9vjw5WUvC9tXt9V5IOmnKdne4fIBhcPWmqBgoyappWZr1OJo7A+wGVXcjr05GuEdBCgk1i8Bytby1EcCRjKRwM8i8kFFdOAAt6qlpwjYlQJiEONIDdZzKrvgcIDsSYQBAdRnKZqqAdTapQATimZbpwKQvR6i2FwE0M4Jj5arrRTV++bgdz3wkn/8EQqku43ndm3Fa5dRZ/TWuU2VLH48EYmErhmWoQJDabXEbs1vrjaRr3mqf4m3pGXU6Ovf9jbHBpIS0jPVh5lMA5FaHvQNMsggPYgELKxOVbiTHvjUIKzJ5QudPFskSo8yq2nHKovucAKZaAS9wNk2134iAMsAd1CIREwtu5Dff/No3I0qcmx/jafFelrFBEIwI7gZyEpDGtuAw86KmmZVcs+ZC5pgCduTxLsIu0qXdggm4UeTvoDqgAZ0ZocuIHS3EpZcGyjGHyghk7O7hTCQSh4bWd33A2PAWIjGzx3SycniZwzOfP3+fC/Pkoj56Wxd5L7fMuxlM0eSCxySqE1kcAHTKrD9q67mFazQqkpVSKu6ZtEl5YnkBSCmFz6Ti6Um8lJjD9d4iE0kuQ9s2F+enTSNXV7u+72d+Uu2KqNFkYAGDElOEgM2cFSA4EQsxGxsJKAk1QqHXEq6FUoq5EiDEAGnJCbbdboeSz87OzExd4RY5UUbAFBplgxm5maTGiKDmJP1QDuN49+LOnTva96MbNavOocWsaRNJKqYSdQqiaApFGTWX1IWHbmKVAQDUicoUsI0oRA5dTPNKzU2Jq0Q7wMxd06qqQDar9enp2WZ10rZtK6lrWrVMZkm862iVEgdtr125GqubeVEtOvRUiGjv/N5/+v0/9iM/+qW/+su/7jd9/d1nn9U8npydW8mgBGA49CKSWgE4j2Nqp7qCRyONQCijJZHt5aPf+Xt/7/bq+q/+1b/63X/vf/yT/8c//of/8P/25Pyif3S1unP+1NNP90MvKbnf4KQuXZRvtMa9kY/c3MP6dwMzuq6BA+5D37ddSpFxQfSTH/zgd3zHd/zQD/3QN3zD7/mar/mazdmp7ba//bf/9oevvPK3v+PbuxPedK2XvEqp69brZ595+wvPPffcc0S0OfXrq91HP/ATL7306v2Hl1FPEpa79uS5t7/w0suvgrTvh9XmfBgOY9ZEJYxpZjStdF3XrlYi0q7cQOM4bvf7vu+HXPb7/X6/Z6wDDcbcrtlPfANXzPaxoKpDxVGQmYNBHnxwIrhxDTLEf9PxBYQL4+KGk4grtiEi6A3K6HKtmNeo+V+aGKERrEvE6/VamEspq9UqwkellLlCq7sPByEWHW2bh8S+Xp0kke12S8HlgJEncmN34iTS5OERSWoSR9hHA42YBsoSkTmiVQEns4eysTERIXKWzU2gbsViGpKbRczmuFQSkbGhqGoxH8YS9jgJcxQ7cHZ3kaNmDIjmlD8zW61Wq6ZV1cPhEDHDtm3Pzs66ruu6zqGHw6Hv+3D2XV5ellLyqGYm0nRtI9KISNM0UTnR1bLZXPkQqBwQuBOJsLOQA5o3INvt6PUHQ9Hm+jozeRTm2W6tbWi1TgT5qQ+99jM/c3+zWY357OHVAHApWQ2rVcpKyXzImtXd2EBsFCRligcsDlYtHvqEpj5mJcaUS8/MEsGHgKw0OxGnrcvVIRMBimhSSCAwTfU+jxsWR+0JXvg9J9oLEWEW5fbQFo+xaJhcU5gCbQ5QlXQNRxvrRAEKDDmjNyxWnqUo04QtpzxAoluTwtyJEm6IgNiU3UNTtcMZ4z1u2sWgFaMyfeoty/utBlQD1/1GrImO2rrHVuHcNLZuhfviWtVdMikYhivkFllUcOOT83WOuUW3tW7eBM0fs0cet0/mLhHCVLzRPKS5YATioxyru3uVjZmcVIBHcAumMEO+hDG4BZwaIRxFqw3gyhp9U7dlkHaqNGiE8N3WBEtygKpWd/UVusE9ivDBonRaJWBUKFh3hmX+/7HRwgzDApoF4zJaWAJhDtHNzWJGXgngmXpa94dFMkkUvDhGkGsI8sj8pCmUR4vaVsskxfkW669y9FIvz+HJZRvGI91IfaG5mOGc+eK12NTI0sZ1FhWWjRw5Z0yYWNKUYV+0WOWnjePo7qlt2PnWI9hEtUKNsCvDV21zerohopwHdxcRYnKiKsvKHFm2zIj6WlPQMbrAmIUITAyvQVumxAC5NUmUQW6jVUYeEaemEyrX+93V1VW7XmXLsMRkpVjIegT0V682ioiIUFFzcyY5jPnyendydvHUnbuXDx8d+lGsif5MXUMkqu5goxplc3ISVDWFWnOOiW3i+CFKBRAAKgDMyUqlBNf3WwoRBQOQHCKDFwXoerd/+OiamYUSEYXpyfCuSSdd6lppEgu8axMzp9Q2TZOamp1ERCftBsCYy3f9D9/5vve97zf+5t/ypV/+5aXvpUnjYSdN161WAPLYM3PTto9Nz7o8pVUHt9M7dzDm3/cH/9Arr7z617/t29///vd/+7d922//Xb9rdXa+v7zanJ2uuhUAV0W64Tw7GmX/jFuOyOStJ2gpCM+Fo+samIH56sGDd7/73X/zb/7NO3fu/Jd/9L9417velfsBY8+b9fPv+sz//E/+1zoc/p9/5S+ePPe2zekmloOLO2fveMc7JKVPfOITh5+5//LLr7700iv9Ibdt2zRdSuwqv/JLftmXfvmX/en/9r89O+nGcRz7rSExSfbsTsw55xzTuJiBObXdMAyPrraX11fDMLiRwc0JpE7GRJRIwJFDzMxRlW7yQqEaylTVCOusn8ksUxE5IlKaOHIRG6G6nbs75hg/0VyYPqD50SKfjeZP2ubzS6mFIlKSbtWtulaIzZJqUc2lZDOrNBry4gbtUKxNq3VDKdm6kyZ5OluHS0qLZ8XoKauZERyrtq0dAlIDqSmKGaUUiiQ1OzoK88XbNyc1Q6h6EgCO2i5WKbjJyEzdag8IADeK+vXhhnL3tm3doZGoBlLHlDt+JDj4ZCox82q1UtXr6+sIXXZdd3Z2ttlsOLXDMOx2u2E8BE9eNVITlZmbVtrmpGm6SvQAe9ECM1ctFvBm1oWOl1jUGwuwywDUO4Ze78owbvu+rDqBFtOSmHLO5Pn84uziztq9pAZ37pw/2l4/eNBTEqKixVarDBZCb8R5LO7ElEK4ggxCCRyuVjI4QUQaIlEYFWeeD4pwYk7MEl6AuvvE6CUQUR5HhNAFE5iCL2GEBAn+MU0lHOt6OAW7Z09lLKXm/oQlwmcyzOJAVXwxgNwZ0JgD7pGTawjrynmy3gioiRyEmGgzkxMAk8OjhhuiMGgct8XX8kKOf45Axg3Ndp1Pxrd75D64AjLlDU7pQm820/vN2t4ozuPutNBSetKAWCSsLn/1BSMNYETRnHlGhNUnRzTovGCfhhukyplMd0L1sm++NlcTnX997BR3QOCBPAjOIMCYJgYBIufcI62FqviZgQlmMEXJMHPNwULsaIQLmjVSg5SZSBfE0beaB8ir3XEMyrq7I3RcnMKREsW8rNxw+MU6zgwQOzv7xCEsUIv0uUhECaQ3f6/Otbs0Ch1RMLm8FABLUIbHYm/zz+5+jBB61L0AkvMsCkVTwsDCUR1mXzgnfT5CNG2lqCd47Ryb76BegWvi2fwVsZVSBZyP19Nwn+pK2VRieMpo8hkK4whlAdxAocfuUMvjIQTics5OiOy+kPUDUEoJCc3jDTTm7iTcrVepbYZhKOacBCRCbOHWAjuLu8OplJFZwERwBM4UEq8FWx1q7oyorkNDVgYIAfHSVDKQiIgJwtLv8/37rz/11FMiNOYMFmKQE1PNYpLJEe3u6happW2TSh6ut/tHl9ef/sLbTjer/X7f77dKbM7SkHp2kHvxAgMJRXwVIDKrol5REcFq6Tkv5pPNFzZuBefEKXxzAJyCqxajjVwY5mp8sGKqMYh0u41BL/BEaIQTU2JuhCP9qV11Xdc13bppGk6yPi2paYdh6Jpuv9//5b/8l//hP/rer/na3/BLfukXdusVJTbL7h5x3TyOT8KEAHA4HAjwouuTtQ/jf/7H/pir3blz587Z+d/4K3/1a77uazdP3fWSqeWrq6uz83OHT/7wgPRPnv9v1BZgyPI4unvbdQEPvBQwg3h/ef2JT3ziO77jOz78oQ/9zn/nd/z6r/1apITSt6dr3V1LOUAYRA8e3T8/P9+cnrzw/PPDMNy/f/9qN/zoB3/y6urq0aOrYbzMWc2k3axExIkabrqGH11f7vfXn/7OFwz00osvv/jy6+5S1LK2RCQibde1bce1xHm5fv1RcRvGkkc1D2lkV9QQhhEEcDKNYqkq06QLcoLLnGOAeTLWCa7VVq7dEsVW527i2VBgIvU5zh8UR6+JWPCJps6T+sUTYeHsY5r/GkSAzWZz9+7d1WpVhrHve1cbxzEUWZwQpd7jmitp+sN1I826ZS2H/eV10/hm07qWnIsqJepS06kkLTCCpKRuIVtiIJAEB6Goz9e0oj4FmsAhNO0eCyOERECWdVANfNVATREhL2+krSX9vJL2KTUNm6Q2q6FYVYydOmMWv2FmmSR2iKhGBUFnZ2d37tyJ1MG+7w/Xu2EY9vv9mPuJ5UHMfHFxnlLTdZ1w5FRbKUU1j/0Y4yHyaedqPVZ1npFzzlHLxB3AmKlrmlzyMB4cZd2J28hwmIsQOYb7jy6vr5tWzMaf+tiLnu7udr1I4+4556bLIuIerrpUQlOZiEhio02pFRkJKZyKwimlVh0578kSnCECJJIkkkQaCqRsHpQaCiJqKLtSRdex/gerCklq50Y9iWOkbVawOQ7I0DOlhTYSUKOCt2w1A2r2YPX7VpBGruGZm0J2HI7kiRGq81Xi4HyxqFvPR0ZpVW5cJAfylKYSLFOuM3A6+fbdYd5Yj/Sc5VR7ywh8k7RbmHAOE88HozLdG+2QdkSGxx8WJ1e2KeAecanIJHwCGrxBDKmlKWK1tydBoTdHe6zMRm1T3M+oep0MboSgshtNxQIAhTvMoAVmsAJTADb2bgotbmZWoKaWzawRdmmECX4Ciovr5GnC8cvfrG0KCU58k9oMCM1cYwRhJHKw9UjcNIeaW4E5mQPxq5mZqfqEANOEgybyXnhT6vyLyCGRzWRgByb9ydghGUROMLeqmT1dJHawdHOhF8Ccq8MmtpAw7udUmGWkkhYBwPmvuGmu+eLXhdgM5g/OHw/Q2aVmvuz0LT7/OrETqwmSc75FOQMw8doWwUma0It5SokZRGgaUbcIIgKuOmW/eJXpi8+GMF3XdW27GrNeXe8O/WiGYoUkUlYIREKMkAKARKDRw+DgJIATwyHEsOwQEmraDTipOkyp1o4CMyJflwBVbRMz4+rq6oUXXjg/P78/bkEGI2IDgZyFyIK/B1FVU6gbgxxkhKHoo+vtO/TuxcXp1e56tz+okxIZeVFrms5MLYpiMhFklvhftIaklkJWVZpweCWGkIFMKdX8TM1mBrXK8mMmcyGGxDUTJxaRzCYgOIpaMcs5xCldBGaj6sHdwcSJmqYRkbvn5+M4rtab9ebUCG23vrq6eu8P/sAXfNEv+7Kv+PJf9at+1cnFuamGWda0bVX8o2kyTCSW9Xo99P3q9BSOkq05WX/TN/2hP//n//zf+Vt/+/79+88///yv+DVfZbkw07pbxRb4JHbLz96OTtMJFzVtA6AMvZm13YpSQtEffu/3/42//h2u9mt/7a//j7/x95089TTcsTu4HKhZy0kH+Gsf+sn3fO8/7of92Z2ze8+9rVmtXr+6fLC9frDf9j897vv+7OwsFxYIEqu5DirwzXq1Ts0P/8j7X3z5E1/+5b/69PT0h4VzGQk8jiXjjIhYJKUE5pxzf+j7IT+8uozCEMEwMLCDmdijrgTVytr1uUhBaXpQEwpCXOzNiqBETk7f6IwQg5mXBZsSsZJNdrYCC+K3qnr1LdyYy4nS9PPjFsiNFSla27axSqjqMAyH7e5wOAhxAEWvcpFRRB1ubmXXcCYbt5e7Z585+cVf9Lmnp2k4XL780otXV3m7dcJBGpg1PbkW9Dn7HIdicYKpZdWga7oTfFLJipi75WOROq8GNzNX+Fyh6dzPIEjUTYyHExEjj/wZ96mugjumivM+AcJ4qMC9sUien59fnJ5tNhsi2u/3l5eX2+12LDZnAZhZ0zR37945Pz9vmiaQgI7a9/14GKMbp3caoGKxzk+LeSll6IkFyuKuQxaQjLl36+nQ96OuO3n67h2YXT26DOGuflSQMcOsHKyZa8+qahr6lFI47LpuTURRtGneHSy8BEamkVVPwYi2uq7JrFnPlJK0Fa3FKhHdp+ZOKSVGzYwEQZmo5qtb3b4qcguCfXVYMFe7S6f9imVySFU/Uo1DEhHgRostD+QIwaEZaBpADHPcxIQAwWxmkx7He1CFIqjC0746fS+C/aoxrgJzeoWFTDeuf7RdHI4IimIuBj4hgmOQcEYEb1Yz/E3faErGuEGQ+1k/5kvn6o0NloL3HsmB1YNIi2or9eumT0/fXo2WfwFP9PO03RKVIWSaYiKTsmUVtySyust6qdE/IliGO1RRRhSFF5jCjdVMs+VRLbuaao7VsieWbiOphRegAMJIdgMBvnnRIB6bAovorVffIE3JnqYwBzPcoaaqrtnVYM4OuNJMHzX3SDaZGhZ4J1JFaKpkNEOw+FmEA/74xBXCRJwMShEmmz/MiSScahGY+reK2mbKFupmM337TS6oL1RDl3BzSlt0mjIiMVkwAIKDMl9h2YM2yYfSMVSoYc/NlNT5X2YuU4nG+d5iX5xR4mQ2BTGaQjOGiDgJeU0UZObqWZejZzfuLWcrRU/PVqv1us/jo+vtfuiJpRQjIxI2NbCJIsSAnAAxWATR3N2YhJiFhIjMFZJSQ023aprOIW1qwGbI5hAQmGMJJOemYW346uqqaeX84vTRw4MbEqOar0TmRO5gJnL2cJA3lQzAAtNhGB4+ePXizlN3zjc5DxESDSgJWGSAEzuxRCTX1YbxRi4ogKhfOXdjvDBmBsicSkEIybAwUxMRQnc1VWd386JmRVU9EYvI2DVhwpBTQjICO3LxhhpzMoiREgDnXJzc+o9/QkT68ZWmay/uPNUXHdXuPvX0h3/q27/zO7/zi37ZF/+23/bbvuwrv2K1Xo/DAKCNPMbg1jG5I5K7Dn2/7lagajEDePqFT/vGb/yP/g//yX/6gQ984I//8T/+R/L4lb/ha8DQw0H7vl11T5jWP5sJVOf6wtORh0PTNJRS6ho4Dtfb9/yj733P9/7jD3zgg7/5N339V375l9977gWkhGGACNZrErXtw9319v3ve9/3/ePv/b73vKcMY2qbV19/7WOHw6uvP9juDyxSHMRMWTXHtZum69bCQjA97Pb7u0/feeWVl56+d+edn/bpH/iJH910bIY7ZxfbcqZV/bL0fb/d73e7Qz9kBampmZtT1JhhZhJmUpumvNZoR0TttXKkAQTPAUxEuqCMOtW9eg6FeM0MiXFFAGguqjNN/1sIcP4TTRb2dNywWBZvTdv5jUibHKZWrq+vmFnHbGbMiDiYuxfTWnQQADz5PrVIqQyDvvPT7vy2//lXf/Znf1oZdx/5qQ995MMf++AHPvLa/e0wUN/zdp+HXjs6i6Q+JxhYHYMV09I0baxdcW9h7JiZmjKllJKDYyKKkDAYoaIUPheuycxMc+r4tKDVR9RipZRpnZfoTKaU0sJ35k5E6/U6pXR2dkZEXvT+/ftXV1dRgSZkt8ZxJKLT09Onnnrq/Py8aRKAvu/HcRwPQ9/3OdeKhfNHzCzK/AXxNXoy7pkig9FNSV3NkPvca+lTo5KIaHzu+bf/ql/5JV/xFV/x3X/3733wgx+yotvtdr/fmpe+L0WZSNQs2GElu5uVoilRk0JC7bibBP40BROIeK4/UaE4wJxQ3CZ6zvypOkimgc3EMo9MdxAJSJLMH1gyVipovBmIrltd8GXg7ka2dE9MYriL8Tz9QHU+VczvRCEWesSEvjS6J/AWWJCcJ21SqgDP3bBYvSs11Ccfl01oEAvMiZtmnFEVF1hiwrfaW622iFzjZlg5pBDosTMBLEdXSPDNR+oscHbyo1e6OnHmzYJmt46jTMFtxnF446YV+aZoS2MjfiUieJlyiQ1QmLoVMoUX8wJTJkeoC8KIiMsBalAtZTRV10Ku5NYIWxm1lDIOrjlWdHfvTVZu7fqMUOCGx/Rn3+TtlkLXDUhIxHNHORC1lYCodWdlLKW4WswsuNEUelteYckUnVyi5jW4Xs/BIuIHHE33MOpif5xPmM2taIknP+gc/HkSIJxOnwJ9893E5W4diZSSOAI7sk2WTxLK/vPdz5GBUIKZbYt4JABmFnrocT9zTy3RyzF6uUCn8/cycyMpsmQAcJXOV56FZKa8tflqZgZhOHeb9erkdLy6yjmreWJ2cmKCs4VSqBd3KqbumpmFEQoG7hBphJFaIpB56M8xc3Kwqq67rlDJUKpa4CIuQgKPPD6/3l6WUlJKqpkgbRsRVC6x9Vv1B7EATiICMjdnZjC74fXXX3vqqbvrdZcSu7NDPch/phaBaRDg6ho1tEXaJdp395DxIGI1RGIqJtYuAEctFKbqxRVQd4WVlBLMJWLLQg2jlbZpmh6jFYVRIiZOibiomqlnJ2Zw4qojWHRUM72Au3vTJne/uroyFgW9/PLLq/UJgO/93u9973vf+zmf97lf93Vf91Vf9VXPv/3tKHkxO0M/VolkvVrncRRmSSmdtLY7cNOc3b37p/7Un/qWb/mWv/2d3/kn/sSfeO3B/X/r3/7tq9VKfcqSXSLAT7rlPBYYrP+2bYpPPnz1tXe/+93/8Hu+Z706+WVf9MW/7xu/8e6zz4EZzhhHgCAJRfcPXnrf+973gR/78Xe/+92mJSW5f//qer97+eVXzdF2q3azHrOyCIhee3i52ZyuIMJdooZSalpmF9P06OEjM3t4/8FXfsWXPX3v7v3791/8mU8kgvJ6HMvhcNgd9n0/HIY85FGLU2ocJE1qKCnIzNSNzMDViRMxPTNzMJkxh379FJx3hC0LoMowTv0W8HJClZgLNsRfGWzh47q5pUf9nHmaH6Nek8zVrfOXi9ryr8MwCFGwnYmobVszc7Ugdrq75cncJwKw6ujQb4m8YeyuX3v55Y983uc++9lf/Dmf92Vf8LEffN8//EfnH/mpF6+3No489lTM72/b6+vrB5ePtru9m4ukzaZdEYZhPD7jRIhwVScncUmJOJEzuwmTCDfisb4xM9AIF7CScD+q+zFNxmvJGdMqsOm1b+oyXmduTNImpZOTk4B5n/jEJ/b7/f56G8nVIlJK2W63nNqLi4unnnrq/OI0pZRz3m63EVcc9of9/hBlG9q2TU1UmLyRAiBJ2rZt2/ZwOPBUtYIdampmVtSaomPv1J9tVmfnbdusPu8LPufrf8vX/aIv/ELX8dPf+UKT2qhh8errr/3AD/zAD/3ILnYBACamlhGUfK6dYzbH0yJexyKUpJsKZjRBlZ3HDDBn4QNgM/OFD4KIWAQkXvRIoWHiRCxMInCnyoat+myxtNZNaAKKzCxEzqSzJNxi3L5x2dQnt5rXvSDlRTL7zRFf43UxOOL8CWOae3iMaSpTwVN0hW/GHucF7pZgzI1f6abp/1Z7q91qCpe6LPyzfbDGIY7ZsMepcnPIHbVJp0onb+oWGQS3NkF3T+yTMaJeStHR8uiWx+EALY4sTBzlh6FC3JSDa9Qwy14BoRHsULJ5sZzdjEynjGmMI1KzMi9SfVyKyt5/UwcGH2+3eqQWwpoH9USS1KwVsKlaqTpyzJzHkczhbqpe1K3yFo9RGQ+DQiurFz4bObOLdorfVEA4wzSr5RUmmYxJ+QVAagWqpZQSLkfmkA0I1qV7SLxE9b+Ap1MEgAPx1tt0EWmaxI5SCtR5orRGxoxMnFOajEWHBliYBIs9OikREwfCNDPjyUmZmsa05JIDbbIIAQ2LQjFJ2LPMhrgxc3CcUkpNMzFjCaBkaqqZkUVEEjHIijayIkolm7tLw6pDLgMzNcNBFE+fnCSzR/cfDP3InLr1CcbiDncXJwHYrC5p7Uo1Q2sxtKwFAIsMw4GJUPJKfLi8Pm+bc+G14VUdVVWNIC0nYcApK7JRPpgobUZtHt4f757eQf7o256lV8fTGADuSsTEi1AJ2STAAUgKIY/tbn/oh5P1aaJHh/0+pZVlYWItnoRZ2N3doqglMYtTFD13CrZTpWVQlUx1ZyEjFC/kRMygay/szIkoRbFHU3cnLRxYsL5d73U4lL6rIq4R5xw0xjZ7qaYWWTWAqeWWE+9KP2faeFZmD8tvHPYl913X5ZL/yd//++97z3v+zud//hd/8Rd/7Vf/us/6rM9Kdy54zGgSRvVSaL0CvG3Cta8gHdtiyE3TNHdPv+k//V+v7m7+7Lf+mXd/199+1zue++Iv/VIRwViQkpVcCKldFRhR6nVcoY2tKCypmsQTOagO1ZGIeCaJiex39PGPf/yfvOc9P/D9/7RbNb/ha77uy371r7rztqcxjtBraIEphCDSv/iJD3zgA//f7/rOn/rIx15//fX1ep3a7vLRdQd0d+5oPw65jMWsWONU+kxEF0373KrpuubkpOsi9CroR73cXrlp265eevX1Z557/uT8rFk367ur169ee9CPOWut061eTJ1IVhLxXnfPPoXinQxeyhQxBpkbzdAsmGlV3NgccHMgu/ikB1B7KWBdk5KW6s2iKcbi7v3kQpqY7qFaW6Jw/MxUR5VMNDKLLLJwfUXZtwrdfab3YYqJOMlKx37d+XMX7UVDNmx9HKXxUqxpV5JWpaT9fj8MmYhE6NV1lw2SZNPg8pq+6//z/T/6gx/7BZ/xwnNve6aU8bVXt9sH+10/OBOl1CV5YbVpnn+2H59+8ZX7L792/2o/jGrmYu5qcCSiKrIqYGmaYb87OTk5Pzk1K1rG1PCqSSz26OHl5bB/pnl24M6ZHzy82mxWAqETGYdhHEciT5K8VdeSbYwegSFqGrCJO5kZjDTber25c+fOarUCsL3efeLjL11fX6fUEqVi2vc9M5+fXzx7dvbU0+exH4zjeP/+/evr65wzMw/DsFqtmlV3vr4DoO97Z82lqDZ1ExEGVYGryXmHouYQY7IIWTVuDKTUpM3JyVkj+eJU2rQ/3VwDH2b58fPzT6zXJ8+97QWCfc5nv2N//WPXhwNJo9rsd+PQF9XVYV/2u7xetRMmdLPi7gRWVWr5UPIm8fawLz5m26ln8g3bBiPl0ZQulcrJuRbL/ZCG7KCmTeiogxIH1OIq/xUMEQ99l2EAjJoWIfsTa5AdKwrGcEWkWFDoghLGUWJ/nSjTIgKIa4kVo47WG6gRUfeeOLJ7HG5GU9Jp9f2qu8PduAEQiQkxySK24tA5kMhOk5Qq0RRXiYTDSB1yYqEmuKMLuY4ZL4bEbeBMCW0br3NKqxnhc5lQX9D5jleb8fBSWQGAL4TN32r/02/z+5rpyFz5h24o5oWqCKU4g0jcGqPb7jkABKnV66ZxEj/YlKc/hQcJEAdCkaLuIzVFKs6MsluPXf9fFEA8hu2nKy+PPOYXKdPzHAtuEEDQOo8McIZF8hgBXpECAwKkqfrQG95OnuqZuoIAYYBGWC80Ahl+gA6wEVTADiJkhhnKiDJSHpIV1UyuVAqjZoBECmGI/I15ItCFLEQuZgbXYRiapiESMphhPPTu3nVdKq8A3V6R7KTlpxQgQ0MIcRonHifeUEIhuKP5F/BS/vU1XSC7aVDwRGJmAOS6pNwzGLWwoIIUyEBhKDwTOZBRMtwmsrOKGFQdSgwT0pytqBKRmoBUVRdpcWa1gELIvyECMy1UsQB77lbUQERN4iqbF9lbDs3F3RnkakIppDODPBoQIA3DAVOMDou40LJU9PL4fBqo1hyLpb+UIiIBIDE57M0sYjuL7aFGM+dNMZ42vL/B6QpsczQ9p7YssHgshLjwXd6IPDBHNGAGzfU2pk1aUq2lFgVXVIukyq9ltfk+Q5O9bdvdYT8MAzMbMI6j2Q2WQljIDAoJDWcIs4hISswiqR3DUIA1iaBN17RE5PCUqmSWVzRl5oUIbSOllMN+n9Cp6nq97jr0fa+ucDcvXsPEPmHgEOk6vsoK17l58ODBc8+9/d69e/34EoiJ2Zy5BUE8aK7VUWfRU/Por+/La3/Wl0s0DweKGpdqIdwX1uHc4TSVYVy+neV7X/ZeoE0jmh3d85BbvtZ4QREyzTkPwxBxlXEcf/iHf/jHf/zH/+b/+9u/8Iu/6Eu+5Et++S//5Z/9Sz8fKc3RBCvZzJyIpDo4mDhf7dqzze//w3/4ZL358E9+8N3vfvff/bt/94/8kT/SnJ4AyH3fnZ+XUnLJ1OhKunlWqGU4ENXZhPrDbrVah2wMAO2HD3/4wx/84Ae/7598/1N37n76p3/67/093/CLftEv7M5O4Obba+oSEgMNBn35J3/yh3/oB7/v+77v/e9//911tz3sVW0YhiEXh242m7ZduWEcx7GoKYqbqhORpHQm3LbtatUCyJpNnYiaZr05Pbu+vjTjdnX2wqd95o9/4CNX18N2V7aHfbDsjKpckBMRWN2q0uDc00xRJI0eSxWeX+J8fP6Tw5cvaznTl6c9PmdvjBPQPDyW3+tmcqxbVW8yDNJ5Qbh1WRaBpy6lrmlXK5Hk3jWJmEiKuTs1TbNer2NAutrDfmy97VhWTduAHrx29fJPv/gjP/T+uxfnxL7b77d95iZ1m5OmW61Wq4ZNmrao7/uDuydpFV4UWorWKgbCcAExsyR67rnnVquViGgembltKLVJSNVotT5ZrU6HXMa+B0lq1qUUIwYlkegEcSOnJmRKIzmEIEQS8UZi7wTn5+cXFxdN01xeXj58+PBwGEJZNMBe13XPPPPMvXv3zs7O2rZ9dPn6breLEiPDMKhqVHDdbDZt2wKYBWPiJc6UFT9mKcg4jrPWaCyqcZqZqRO5M3PTNF2Xuo5baUUkSvVGFqWqMjXuTiSr1YpTGzJeXauq1KSRKZlhSgeeqC8w96OYwS3XKbPUuqkLtyi0Vp4skYOAaXMPBR9UPyhLXUYBKjnX1SzO9Fil6z4YAxQRrkMVPIvvmsohoZTiHtI4fstu9Smi4kdWKNXbeOyE2JNi/8IshjN/mupn3R1wBk/CP9O0ivBL2DAhbVQjhFpTUkPvNK54gyV/tIhpql61nMU/N8LYjUzFt9qnQJvfO1Fox8eQ/+Qv+nayGR/tjVix6V8z//CxL79RzGL511so7smIcc65ra5jR7h46hGvkX1M03ziewII/w+4rnHV8sjxnw970h4+mPZuI0dcBIaeooCBQ81GczUrMFcr7lAjVbXiqhavKztqqQOEbr3ZtMKH6kEshLGollJq2vqiNvgbN14oWv28b0ez6OZxvz1kbimLOuCGQjCCImCDGsKPompWETmZW1FTdTOqMIoi//+GLeQeHth54sxZEvMbWVrXbdsEQWZpVC8t6vnXsNNS1sKzfz4GIQFE6qWm/8VmFNJDZkugGBSm+GC12MI8Rr16mAhLcxCPWZDLZ3D3iQJ57OXqNWJmuY0liMj0KCrjCwXw0CEIZlTcdgxolhURMUtKNMVVPaBOUzOkDWBhdiOQt0179+l7zPzo/oO+7+O+cs4IadeF05eoxgJq7/uRqDZ3OocSvchms2FmKyhVPmheBqJbXFWd1MxLzn2/P1uvNptVHnoXLUcXtc8PXgeHuxApXEABlYX5/v2Hzzz79meeeebh5dV2NwAgN5JUGaeT6EU1xad7WOKCele1TgDNrA4KGZzHUcExZdRng36Zl7jMyVmixPnj5OCppvbytfpUTSWgIE3qDm3bquput+vT7nu+53u+7/u+75nn3vZZn/VZn/kLfsHnfd7nff4X/NKn3vY2Tg0v5667qTan6/Hquj07+z3/4b//9/7Od/61v/bX/sn3/eMPfeiD3/zN33zvnZ/RnZzCTMu4WW0cGG1MkQTsSERgBwxF3WzVtXDd3X/wYz/+I+973/teeeWVVdvduXPnt/+W3/yOd7zj6be/HTBYwX6HJtFmA5QHP/XhH/uxH/2JH/uRH//RH3vppU+Q+bpr+r5fNa21yKPmoVcHkQ2H/fnpSSndWNTdNSKQoWOio4hwklJKzsUjB0lakkaak5958f63f/v/8GM/9qFdT83q6XJtu8MQa00sKBY1twnkYoxgelr4t7wmhj1x2i7f4PwSl5OaFn6f+U/Lsbo8eXkwhtwttDldxBVOqPoDx4sQuR3Z6TfuoShpXXBrMcOUWkkioupZDSzCDTMHZfpp2TzKxsX8kNWbJKnldSK5/2Dn7ttD2Y9AKvTw4DK2q9ziFUmtEQ+jDcVHozHbIRuzVAYBVcEdkDGSSKPqpeRSshAKsw9qnseCBw+3H3/xtdde3+Y8uCt4GMcREhxXVA64e1F2NFHqnImJRFIjIgRxdxbZ7/cvvfSKu4/jGKtfLEoXFxf37t27e/du13XjWHa73Wuv3b+6fn0YhmEYYpGcBZybpuGpfmP8G4sqUZpf/XLmLl/fDbRg7CC2xNQyK4EIwkhwBsQUZDSOJUkU1KFxHMWdqFk6jOZFI/zQRCGhEhWPMC01NzfOED+cMCozJ5Y0ufxi1Y06PHWdwsRZAyz8qXTDJzlhuXB1uc7TYR6fgf+bFFUgUzzoTaQa4/joVL25YAIA1ImXx93VsDwyn6zxxqPHo6/d5gllk2l5I0UwgjAELxVEKpH40eg93odPhTEovDyTMmR9uRMspKOBdNNTc7wIL+7hLUz4qdOq7y9mYx0JYeeEiTgZaUcmTbRF7t/koogTpwgh/jXyD+lJ4bonB/CeCIiOk2OWV42IWcxMoSc+GtUvJgJ4koEhkKuQwA1k0AIv8IzxMA47cTU7WBkCDapplA1HDwDu5q5qWTWrjaEKw0RwjloCrlNkxWf+PAcy9KIRgRiGQ7BFIqijqsO479KYy9C4Ec/2ALDIEPkUawT8rIRYv0Wpr+Q6dVfQTPlQJgOmco6msOKqxaoWYwi7m5mbMYjMba4FvogQYhGrm/e70ECZs+ei2ZTjgEUy3e07n1q9caI0bzwBYybQIjmX5SfDAX9rkz5eyI+hg2pBEiHASWWVLayEqXwNUWTt1TTHyCVbJ1nshQuQAFvaH4/DXNyERlHAnRZZmO6ec+5ktbx/M5tuL2hyDhYmECwJgbnrus1m049lv98D4CSqTsx+zB6dCI3uoFC3m2wMII44ldi8650D63VHRPPuTuw12YMscFfO40mXNl2XEna73Z3Tk5OTk6s8IIQ55s2ZKMi3QK0Q4CFDMUVTWWgchwcPHnz6p3/GnTt3rq5eZLCas5NNfYxFKmax25GWuB+bi0/yzOkFMZc8EqrhxVyDA9VknEygW6MFj4WJlrYRpnBrxcghkvuYWEvR0nStiAzDMB72xbRpGmmSm/d9vz3sr3bbj3/849/zD/7B3bt3X/i0dzz//POf8Qs+83M/93Pf8Y53XNy9c3Jy0rQti6BYe3YW2b2/9uu//ou/6Au+5Vu+5bu+67v+/X/v3/uG3/27f+v/4t9B23bdathtu5NNMoAUOcMdSeA+7nZXjy73+/173vOeRw8e7HbXp6en73j7C1/6y77one985+kzz0ASAOiAPKJt0ciDn/nohz70wZ/8wAd/8gM//tM//TG3klLadKvdbvf6a4/OTrpc0/eMmRtp1C3AocE5lAlBSiAyuJWpu1V10BKchaJ49cX77v7iaz/xoY/8d1dXV2cX5yLy+v0RkmIpJ2aAg+gJI0ioEbl5Dbq7F43as4uefyMId/yVHjtyazjdAgyLyT6N6JlZ/qTmDJBN0UhUcEgiEhrJi1PN3QVmruIMVyjBjMxJzNSYU5saj4x6VSJKzH7Yb4DVKiVJXUNNoiLWNOLAaO4CF1dqs5JrGiAbFEpO3BY1dTaL8ugQEUfFqAxzgFzc7fJqK1E8npyZs5pryXkoyj/y4x/6qY994vLy4Wq1GobD6w/3QkTSTN3iXGVfAdAUuGMmJuPIBgfo5Zdfnv1ffd8DWK/Xm83pvXv3Tk5ONptNzvn11x/cv3//cDiYWSl95FGvV+u2beclNHQ7SwmpLDMFM83V6nzh4eIqMqyllNBk5pqOHvVjU9AeoG7FNYdMl8EZzl7MjctQuFFtnClNq7oRObGzEzMkUT6MTqilKkEUpTisEFfV2ZmZwsxOlEtBITNzProJRPim6kXVlXFCkuQ+Sb+YG4U+uMus0lMLOkl9bFUPVSTcHNKLdTLCHnzTlqBF7y0/6FOYYHl8EuM1gkxnLryoiJJToDqNMblwmYiqdjnXEwIceljtlUdaISWRATTFCZ8cyY8nvVH0bIpizrfkXn+ecCCqlxw0RXffSjT6FGnTSGDc8Ags7cMbwG9ut/DhdMLEwnvCN/0rHjNzaP/JODCIBLcPRJtA7q3PUhx0wGF8cw54ZAMawUEOz0c4GbRT3UcxHyuZ3NzK0G/Hw2Gz7jz3VkYyCwVLsqKq6BtHyMZltVzKoJYjD4NCDB+hscxRPjuXAwB2VMlr85xzznnod5FMvlq1m80mdRJeeNmMNh5WXpjhR3XIimAfa586MPGTj8LFthJwwxB6jW4gJ3czBQFeoAYrrgZTzQUwmMHq0hn5WRJMQ4Aj+jJvFsyzdR0JcZPhfRQgvQXElh7hWxb47KlcPsiNshMLs+zGJ6fvU8AVzI9NFCeoKdtRoPKIFfVoSi4RXZw5a5NUvqgqT6S72ORj4SEiNWOuFEmdNl1zl8V+u8QeS9S77CZMkKmUCUAaRaGK+ikydzcvgLfEm9MTEdle70zRdavRQnenUdflcI/UCndnlqk81IxRYWbCEsEtEwjs5OQksQJommTmBld3dXM1dRMiRg0Ni/jhcAhi28P7jmTs1RFMMnlvY8zMuFRrIWt3N7L1+uS11157+ulnnrpz56WXXlHVGktxd6vFJIloSuybHAFL0tAbt0CIc8NCc2j+edlmEu/jsH/5yua/UvVwHAHD1O3U90MMm6Zp3dH3QynlbH0Sox9OxJKLXl//zMc+9tP7/nBxcXF+fr7ZbC6euvvOd77zXe961zPPPPPCc88/9+zbVMs7XniBTM/Ozv7Lb/mWiz/6n/25P/fnvuVP/skf/MEf/Nrf+G9+2Vd9dUP08NXXcs6vf/ynttvtgwcPHjx4cHV1NfYDMVJKv/Bdn/lLPutXvP3tbz8/PePzE4QXJ0LrZsPDhx/9yEc+8pGPfOynf+qnPvyRj3/8p/fbKxHZrFfM/OjRIy/adc29e/e215c5D+6eUts0bFpisgRdOSrJ+ERcMzMDmVqGEQlYikGz73stFhGw7mpXMroHjzJRKbbipgrDgCJ91BVO5OKRc1rN0HgNvHCyLBeO5ft6EjJ8Alac7MLH7IObMcPlQVr8lea4BEssT3OKtrrJE+zX6lRrhImatpEuNU0SVhX2Lkk2D4vdErO4F3dXh3G+7EhP15vNqltv2rbjvicHTIoVRyHNWuDGybkhaRwtsxALnLW4OYgQsWsmc6LEAJjcQ7TVjIFKyFDzKZ1TYekTL90PHHFyQofDgTli4CMC9JIR+RTvIpFCgFvgnMI8xKxp2na73e72eyJqmubpp59+/vnnz87OhmHc7XYv/9RHLy8vD4eDu0cJwdPT09mVuFyKcbNLYxcws4nDYkcS/iJUOIfpjnuVkoGFkFLbJCVSV5RsUKj6OBZVz6M2QuzcNC3X5PWgWphXKqaF8QHAnd0r5rw1xjCJXWl4MEtUZTTLJbrZTDB5r2IBhFutixPewJnmE9d0o9gOYi2dKu4E1iXHnCg490P41DlYVvN2yzdG/sywiCmxrCcxgWGprsHjW7CoBrHwRVcSWrWwfREJJANV9C5V+WH6Bw6weSEnsABLRk7Ie/Bjpa6jCik++U6w3HyXEiCYfHmPz/232s/fFqbSYqiwoyxf8GNQkKa8wXr+fHA+weckVvcpzIib5/8raE8OYt+srGCfbCbAlifX5/QjaKqmP4XpVkCFUEAOKBBVHxSu4Yij/hUvmnM2zRSSDGNGPhhazaOV4lp4su5VlcfWXM2s2OCujgIyIiOYOGrMqagZKApKYyCwUaQJMwC27HkchuH66iGAzfruap3Oz8/B2O124+FF09E9s7jXAjn1YT4l28827G6Pllp+mQxuXFVYHcQWKNErWmerKQyOCAnGxZw8omuTm8QRIkGYFRYmf+uyYXYgTpvvYr11v5mfhcmsehwlAUhZlUOXLaUYgsXLONqcgMciFIEEDyqxhnueF1HB+NoIchJCoGLKJHSZLf4lVAikuyyjHBaMa5l3jsWfsAxmVjIhFPCISi3ex9EymH+dYckcUXV3NwoRCoYwJ1UvpZA7wYlV8yBC7Xq1Xq/zqFdXV6UUFymlqMGc4g1VGLawjFWViGuHB3Zh4pqCHy/JmPnk5MTHXX1dbhX2Ooio4ZC6YXfTUoSo7/umabquyxloDEdDIV7zxM2jGqqtiSJh4ri0Xffw4eXDhw/f9rbnnrq4c3W1NRsSM4WCyBTuX46P2SoCsGAH1fdydC0QNUnoWD+zegSidFgMk8fnkC8c5Lcw/ON2Ay3YifOn4lUGd7Rt2/m1isjV9jqyCospxrHrusg2PN2caC73X3v9dXf6mZ/5iR/9sZhXd+7c2W+3n/mZn5lETlbdqusuTk8AfN7nfd7HPvaxv/23/sYnPv7xv/Udf8OIr66uXnjHO55/5vzevXvPPfPspz3//Hq9vri4ePreUzg9hRmY4UDO2O0vH97/+Mc//urLL3/4ox9/6aWXPvaxj736yiuXlw+HYWjb9uRkHa91v98TkZm2TWOjjttD13URJxGRsZQyDCKyXp9QUZrC6TPEcne0SUeE5n02zqrjUHZDudoP7nCSDDfjfgh/UpIyuqMGQFBtbSYuU1wZgECqWwdwvSHJuBwntCCFPv6Wb83HGeAtD77RZ28Z2cfBQOTOABF7De5YxCI86gqGzgZH8csYL1pgRi5MLnCGC5yIusTZXDWn1DZtg1byOI5jf+9u6vsx0Z45n5/fvXfvItupgh5cXfcZuH893L+20Y1ITU3HQZUTMbMaci65mCNBKtuZONRXZjYEmm6Fylocg7hotaIDS7OKZ98dilkjJFZoWlvmChYqIsI3XC1EPREJhIjUDmZ2586d559//s6dO8x8fX39oQ99+HA4BDU0oGAsj2bG3MzTB7Po32J6ViwU5YJUA9wsqSk0kUrmX+cPxiQl85SazWp9svKmHZmZnVHcsvX9eHJSclZfOYBYN5zYDLkMObsp5aw5D0TOjCgaFMnkcSSqNskUpjvuEU0SCaXm+lw0EdHBM3xCDRISPLZM98WFCFjUG4wqvYpi6hNvp6LFxT4oxEzxFYvqT07EbLB55tLk8rAnj391Er/lWIECxC6oLpv5EQSwILe427zqTpwuB0gq9uXw6lamntukEBj/LmM1mAzXI13Kl0cmIdMpTZEqd2HygS6prHFZP9bCfqt9ijQiwsJFUZ3T1TK8ESSb6KA8BxWnMHUNbWOWjTmGBP+1jJX5fo7NjlMAN4+DH7NwHp/PNWLoQNj3kxlFUNBIyPAC62EZmmHFNKuq5WKmjb7mappzKYUd6rBSbMwDcSTPWKkF60IsdNOcm1m2rDoalNjBRuyah+oQUjODqxEJO7oNC4sQwVmYCTI2oW2YV0mkkfOLkzt3z+7cvYNE3ZW89vJL6pnYRSgfgbE9iVf5qUAR/9mH4LESPQADKQB3ZZhDyTWK1IoHGlTTTFrjBLEzwUzdGQRzL+ruYPZSczQqZTQM40k+Q0Si/hMWmhqzT3b5Q9zYrSQFqtkfNCO4+SNptgPqHs9+LLJbDfcqFjJvY7NBv7QAMGng3DIU5qyGW9ZkGHBYnB83wKbz/aHuLseUpwrwBCwcquuzYTTfxgwCoyujp+J4SkmIiaVGw4J1jckjbnBYkxhwhzdCZ5t1k7rr/e56u1e3GgNMqWQVXqgnMUXgn4HIXWSq9dXCrnB3cMjkOLOw83rT9eNWFUVHcwfYCCAwU0qpSUyuWobwJe92O1Vtu9S2GCN3CHBGyPq41+AOz17e6TW7O3vKWdu2vby8untx923P3uv7fhz7xICRM6lHfJRnhZF627PtgpoOukgiv23Wi4iqmvlc11FEZtGI5VsO7vFtVBPjJ25g6UF3hx6F3XlRXyRmhZnlnEMzYw44xw2EgH5Kqes6IgpYaGZt23ZNE5pLBHrw4P5ht//ABz7A5GUYz0435yen2+12v9sF4v3RH37f+4qtTjallFc+/uJv/vpf/9TFnXe+850XFxequr26fv8Pvf/R5YPdbvf6q6+9/vrr2+320aNHL7300v1XX+v7vi96OBxUNUqbRLJW0zThx2gkqVbVx81mkxI7QYjbtg1OoKq2rbDqtu9rON3m4e3qMONsBnOzvDschly0+GHMLsKcivpuHLX4arOJ19QfajVSP+b8VAUnn1wbwaI+voObQP2m/+k4HmhB41z+9RbkW64hyxOWv36yWAIHoTTG5zSW4ksdIJMgGVOMIrNSxI1NEHQDjc5SShEeqf5SBgGamISsbahtpUkg8uI6lv6QS7fpqFCzG4jIBUkEKsW8gMTYhSEkoe5oBJGYCDX4VB9Z3bmUUVWDWmlmZSpwItJQ6ojIzYZxTKl1kexW3V2RgQg4rCiKTUVEp64lImYlotP1ar1eP/300yLNiy++fHV1NY5jsIC4NgLInUSoaZpx7GO+hMwSJndPAJ6AQz4lFJRSUtPOS+4MFwGEd8YXJWTjU4lTCQDnCpqjcQxmd49+iFzHIYyeaZqnlNxcyTiDGGQuiUQqzT7GLTNHjBSYCsi7m1lWF1kxh0bVcYzNSPi47BDBXU3nEXccujdj3VTpD5Rm5sJ0kditw21ZMZAaAAb5IjbIIHfXW+x3KpXheXN/NDIsyglisa/FNwKwGnKvbkGCgWLiHk2B+Gj8y9UGWUI4nelAXs10RY1kHKMZ4XuN6E28bSJBCJnWdWLmiNqUAhyPcwMTfgoYiG81zM4XsINADIi7BeXkONxna+FJaYGLWoI3Y4bOj5/8r7Yt956b+bHTL7YYxp8cMMTj3azZYhH9g/fwAT7AevhoZYBn02wh+WKmuZiZ2LW7eymWcynmajlnHXOT0owDa+6fuzuNtrVg0UGd1cmZzMiRNKx7JJATc5NYmFMXRaTB7i4QggwE13G/LbAsLI04ubmPZJLLoW3TmCgliRSC8FJN/jWbpW8e68BPyWbTguZTdZ84YoxI6TS3DCiZwhRTPTVomT40/Rcvz3wWmFHVWlG+qE1CfUubebnPzibW0lE7b8Ez5WdJEJ0/OJ8ff61WdYWhPEnATwRTwNwbX1Q2dHeuya83LMLFQoBbt3grXolFjIgW+p+hVNnIreizszMzZy0+o8EavnBzS7jhmZ7vYrVahZ0R3TFNmMivEJAzszspYi9nEbFcYNylhligh65Np+tV2PGqaoCQSJOart1j8DmIQTX1PqTfICwiVKuA2GSsBlWSmYhBwmhY9maaIamKEAC1sJhpNm4YLiLCnXg5HA5936eU1qebQ6krDBEbAc4R1fFbyTEecxXMstvtzs/Px344HA7PPffciy++yERMLgQDEcOYQczuilp0C8AcNpwvWF80T3LnzCBS1RBuYOZSdI4nUHU5+JKQthyp86ioB58QC5ojrsehQlOybFTTjiu3bds0Tc657/u2bYdxHEsWkXbVJZY4OXKcADCoau8yN03Tl3GzWW+3W9PcpWa73X74wx9+/tm3lTyklC7O796/f//k5KQchiGPH/nQB7/lT/3Y2dnZZrVi5lCgHYbh0aNHmnPILW6321JKRCZVFZQcomb9rg8zl5MNOSNjLCUUHQtJyeVwtQUwlhwh1sRCRCxERIlYRBpJzOxQEWlTIyJmVEYfR408w6vdfhwLiELx4jD0BuImgWz0ceiHrl0LJwqkTsFhEcTAFjEz9TKvI/EiGnrCmm5TTZvj3J99EIucItx8p8vXerzUlHuw3DXnc2jBzaFwUiRiVCcLhdAkzEGNJA5wBSMCxWpLvm6SuK3atErSiLhnNiIiK5qEkVozG/vezBIjJe4HAK2j7bNeb0u7dmlOTk6SknCbVh2ED+Jg7xwoo47mTCYw5uTEcBQ3zzliWRyerBjwbu4+lG3JNvtNEnGk9xfoGN/dtsZSQFO92aBxgirLUc2KO6kXIgJFSmFY7YWJCavd9jD0LxNR3/fhiSD2Wf2SjvE9AXi9Pol1MiqmRFczV30Id5oiu0LEsS3M72jp/QmZ8mX12vi6VSemWsowjH0jEBhCojMlEamZilRGLTwMw5DHMYsDqPF/VZQy5jwwNUTOMknjzjsIQH7DhWRmpShZNrecM0FVNcwmBmgSQS0hu0I1AbIVscVwpcpRNyKGu2qp9TOn4DmIEaLIE4EiihPaBIwppLOJMTPrYiPH5Hah6vCqUQOX+RFmTDhVWFl40KoH2iN5vZYEQuA/J/BSJp9AsQsQKEzwBJ4sl4jXMbxesxJPIsp3zBALlhsThW5e3A3BQ9SO5zqHNTh5DO/M4+RxOvdb7ed9m2pCCFBifE2Gwlx8+3Y+4RweXFaWnzFDZT7TfITq1f5VN36Dn594zuIZY4rXXXX+a01Fhk0naw8r0AF60LwvZVf0ABuZjKKIfChPGbEZzLbb62ohF63xopI9l1xKLOLB5hIKUQBsdw+JQIk4kTSgxJycGW27iZWBHAJm5kZaEcm7nQgBsGxwNytFhzz2cCXylLhtU2pAiZEoJWkasRTqXDb9dwsEvhnaDP+qV5omcEhQQJ0cVmDFLZOpeyZXuMOU1EJfZJ4wBI+BHrLcFCoObk5U8aIZMXuEo5iIYKZmU3kwAjMNQz/Lp81uSSwoP5g2x/j1yPKLm5hs8lrq3Tw8wViWLRYhIp5DeXF1hktKFDFHUJJEBM0ZwJT+UY0GAZOZE8+QwCa1urCVZ+QatxsmyyxxGzUIZvPF3VNKIIssyRmnDaWKfdMUYA172ieN09ie4wQzY2qI0lQ9mFJiAhOJjhrQMCUe+sPZ6UkjfrpZOcvDh5cpJTAXN3PaXu+brrUZAquFuFv1v4tYLvGYIdJQTMPjTkCSlMtwsmrOzk4+/BOvr1bYQUFO7Mg2FRsV1yxJXJ0FDTdJmkePHjz99NOf+MQnOJuIUGoAqHkJc4JTfAUz56HPObddarg9HA4D0K5WOeeubV/6xIsvvP35Z+49fdjtG+HLy4dNu0qr9X63h6Rusy5DTnzDameu+M/MajQStRpdpWOFKVrmSoY8j8KwhpeDLwbA1E/VQnV3m5zf1VdBAFHVhBx9hvTjOM7vd6ngOo5j4MMwSZdCuOrGToF2lveGyaItsOvrLbkDdBiHYbTT09PD4SCJIrpy796z7n7//n0tfnZ2ti96/8HVK+XB3OFuVsoYQsDMTNJEaK7YCMCqzgO5JHNXBcBGrZlB0qDeV43ZJnQhsjs750Lu6moOpXAiiPCkq7SsBoHUVSNYTVVjsGlEN5iISGGUoJ6l8Wxb0BpMAKlPtdfI3X0sGdVSvQnYjuS6o1MgZh+maBIwjQcc/QhHD8KMFQ3LzOYZz88PtViknJnJfCpnP7vHIgffiV2Y3JlcYWByMgUMpuYuIBZKkpKkF56+t141p11K1pdxTzo6XDW3bQsnN633wE5RLEDuMmPfD7vD8PIrr/30i5en52ftaiNdB24OeyScehm0gJHEhbsTIjJ4LqZBqZwagMmJW2swEEyZHUbs7tAqYsJGIEhqxZ3UjaQaWA4oFIA5wSE1bzJQQMSealIIPMrp8W53IKJxDCq+M7OqBumDJnmVeZ0kon7IsVSmlIhnj6OFqBgRjSW7u8FLyQ6f0zXnlRlAlJ3Y7/exfQQcCmdcLgcWIvZHjx4k2jCKuuWccTgY6OTkpG3b1eZM1Tebk9jS3Gm/3wUWnbXNHBZuHSIJV4kWH4ZhIy1Qy97MUtIiCWARyjmvV9JIWq1WZsagqL0R+ZNwdy3E02IyBxJjM2MGuTh8CokHrFdXZi77fX3LTgCKlTmFnkV0sdHWSBkMBF5QvhMxmCBzSkXkS0d9b7eaWFI1io7z/Ti5FJGOACHMTpUq/e9TcuBkivoyAmNEE70TqAVAaZJJvW3jRng6aKUUUaAqVEPVNe5hHXK9q7qw3DKmp1U3YOdbzNGf343NMwBzrSoakbb6ZDQVUenKLqY6BhhzatSN828GEukJ0cV/ye0Nv8sMU+3PKX8XzFPRCJ/8MDMaJKijRMgdNqDvkTNsByYvh7HfOYYkEB809yUP7upFXeFmIb8MwBSH4RBc91JGVOq+nW5O+uEw9kPQASKBSxIZH6p7LKX1yersznl30kAITChjyVlH4+r3Ke4ldRw+P8uFOQEyDgeGuxYmbxMfDru0kpPmaUBzHog9HHOAOVwgjggQ6adoPPAxSrAHhyJSomxKuzGHurv4aF6oKhSYaTErwualQIupVleaOdTgqlpqSDAyGmKSTGbPDFs8dIGmLIyZwjNjq+rQ12rPYOLs8KTZuXyECtBkFio7XicJOchDrcLcwmMaYoYSPs8FPHDA5poHUVLFbogLscPMozC3Qs0sJeKJOygEkBOBYeRKvujssCPhQuLBigv9tDB/w8tqxzShOEWI3WppKV7UUQQQNbXmPh2GIQxZL8Wn3cpdw19FJOzcSHLNcGsb8ZJPz86evjh76f7O3ElEo14bMU3FHGm5S8em6+rOFqJvN7c8VY0AYaD8lFJIlzPDnVVVEjG1RCR0DBC7WjEz9b7vc9mklNrGSzEbh5TaNjUJpKpZXYTHcWyaZrXaAPvxMKhQ27aunPPAkvq+d7XXXn7lztn5y0kO+91m1TmzljGl5CylFJLgATgmOwHAVCCokoNuxa9vUq1uhoNq2kA1g2hq8zuakZ5OuEJEbIEobCqAtvRn+M2iKzMUiavlSSVVpurwk9jCZE5NplMdxvG8RFRl1iuZdL/rzy/O7l48JSJXV1fDMGhxd89N4+6uCJ15AQFi3AzqUAsTHEBlOgNSS5vYJLdbVTyrIuts4gMgNrhClCKG5UjinhikZNmsVs+uLCyPGE6Q07zOOHISF0x04EksqRY7Mq9h4xvNiQETnnNuZen4XMbojgcXL3GGB/XjhOVxTPBgOv3GFYhoHMdj9NiPjoaQ2qid6ZOTYoKNPjlcXUsibphNs7mTqZCvmvb09PT84nSz2bSGNjFD4SpCwg1BK4fRDHyUnXR3tyJysT3sd7scFcx3WR9eX4K3qe04NcW47w1KzOTEyW0oc8wNIikemiZq9MxjrApPDhInigTrGIxsUYiqipoE14An7jciDBXv2qb3wZELN4XVqUbswQtSXjhofOI9VszDN/mKWDL6jjSS5a/z+503gpiJvGBou3v44GKvWl6w7aTf9+NoRO163XWtlFIut9sX2nes1yepW2mAWNanDKv1mRtRopRaNypukTPZNI0ZRFhEAnj4UcW0xkjnO18uIJjdolpzbJqm5SQhth5LG5qUCOG1rVdwI3dSAxkkLS81dacvvmISQHLAHI3ATKZOOBZGnJasJZMj3unc4WRGdNuuYr/BHa2iBUfFMudK6phzDmnqg4juVZGY4+t2Zncjg88phcBR8ONx1cS4SIm1EjjavvV/05fV+6y1Kt5ih34qNyKq26YFXStiyPR4iBjADP8mEuUC+83nH2ODOIYH/3X5DZYj3GM/xVHuUE1hLORu8CY4MdWasPoc09MakJEP6Pd22Nk42PiQmUsZ8tibj57IbSzjyEHBViOFqnkukdXv3tkIIicBjAO7wej6+lpr0XkAFn4cUqJWm0baddedrk7ONu1ZizaBDLkvljNGIyMHgaFm5isXB4QhiZnZsquWXMamado2MlcSM4e2CSXywfGpCfx+jq16zRgElAkNKlzdi7sRq7iDFO7Q4lZgIwyx5wsBzDA1WEC82L9maXeKkO+i3u/MN1nu2rPHeWZuLj3vs6t9uXMtn2FpjS9/xSRWfgSI0/dVz/3sAzYzkBOIlIUYqNqUzCwgopt2g1aTLeaSu8MdVfc2UIXFt/pNUhkTqR5zC5cP4NAouj3fcBglfsPiPJoypZSIGq3XayKKOGeU0yAKXkz0fIETkZBR17RWRrLSiKr256fPtY08urwahjHcxOaEBkS1+nztRCGJjbmCkxRWNRFYhIhSRayWUkoCzei6bv3/Y+/PYmbLsvNA7Ftr7X1ORPzDHfLmVHN1sUiKpDiUBsoSZE00bNgNCrItG54eum0Y7Yd+MGCgXwz4yfaD+0nttgwD7QdLBmy4G5CBVmuwGm21TdGi1BIlTsoq1pBkkZWV4733//+IOOfstZYf1t47Tvw3syip1WKmVBuJm/FHnDhxzj57WMO3vm+7FeJUfcvibkSSpJo4MM/CBhXixIGCo3EcX3rppWfTU4aCZRy2nNJcbIEIe87js+WZLUVS3m02M3OATpU54vfLvLDbu2+/8/nPfuZTr7z8j7761Yur69lsmksaRnWei6Uhr+gWzhozV9Y7nIgQ3F3OpP3O0UErsFDvLnJEKuWUD1wBo7VtAmYWPycU5FjuTkG04Ah5T+uDuJ7fnd1P/OwxIFvFkZ+EpPs/ALCoMbMgLDticnUCeRqHJ6+89spLT4KSMaU0HZf9fr8UEDGIzYs1PRWnVALSTEo1G0wAmVnX+TEj7bZUt6Zc1jVFADkLnN2bI05kpoAUL72O4QTrhTeHF+41GQGwkRGRk1VawhDACT08ja+1Z9mupPve7cz1tC/uyacFAWfPt18GWk6jn7Aa5ataqX5wJBuppV/idQiOtsfarpMIRGWZYhFITGSu5G5LWYhMxyFdXO52m+1mHLbb7XY7bvIgDph6md2VybNIDWeFs+ROIGaOOJcbIFh0nstkBBJRrUVR87z4VKyKILLa0e0IhBNYce/FG7Vvi9XFPZKdHjFXl++0CIdBzRTSUEYednUnPW5lWKuuNqhQEKnU+urI/hGzKVpuuIIJazeKALXOp81fJVDymiCKURdLcnzRa0iSmOMYuDn4hDwJW2HtN6JN5HAXU0rh8O82Y855mqZSlpzGV15+ze/mm9vjB+/fXl7Rdnu5SZu58HFWs4poquFGD90+BqpMohn6WBKRpMzsUREeQdTab0y2Uiqq/Va0IT9b/1B1kvrmUYdFhRwb1Pteo6j8znXt6lnrdTPrJ6PgO6qiYCfoe5u6DsB1NX/rk/JgE1xNOW/2dM3grcCZEUpjq25xBc/2FZooqGsozuCudTSFYQigMjq2QVGDD6tJejodt9UzNtywjXILXsRPOMBRlXxuONrpbGjsI99vn9gWzOaxiFUrzumcGvR7e3fn3uBZMSG98NE/v9ZiM/f+rmGVCsqJPAmCjGphzgCqJDOBCY09yQgOWzDf6eGDZf9cp+MmzWQkOhc9iBZTWFlsmRaNBAm5E5nDwBqQldEVDnLVeZlUC2BF51JmSSziUTnPwQIAVToOw268TJcPxnw5YkPADF2O5ailqC7uLhRpDSKYjBs4QTJnAmc+lLBWJdE45pwzSxPOcZdEkwIJFeG4Xvp+F733/yLbenGvC361psIbVIc6CrzACtyBBWbuSjBYYVvcDG5aZjKHa8QIVVXnRVVBbTt2SAMudvxO31iJOTGb2VQWampPPU3yogd0zzDr//YD1njR9ZEpsRjIQKrqFkJnCicJh7BFYXseRTKlJAAKRRWWMLMVWhmIhNQ4wc2jCsq6OlO70MrS1jbs7ube4x1tVlF1qvpdlVIAI6rQtfW99Q4NmoQ4T0+PNj9kRZ1jDmCTK6ZIyIVpM25ef+Xlw/7m6dOn0zRJGtR0Ns8s8RBSzo6qFMkrz6cq5p3prVIQOYgIoKpLSmnIUnQONDAQqMhGqwNy1mk6MrkkSSJkiBLKy8vLV18OOQF3p6VoZhovtptxdzgcrq+vSymmixZLLMyY55l4A2eRrDwL0fG412X64hc//1u/9VuqRYulFqVee9oAunVu5wyxjvvJz3XPvzCZvA/T3kXcdeRWwFR3h7ToA7waMA1p7C0u0s+4ThL2gdQTxf0jRa3dSlTlUux8qLiTmTmRkJt7xTiDiPwwHT94/qyUknJ+uNk9f/782c1zzyMFuSEkjq8sHSyqi2nU5gXDa2DNTojrqqwIwLlUZVhtjly/kVOMo84OFjPjlHtWs1pn8SdFZk9BYgjcgcIN4euQoXqPVVRiZWlauxoCKpvovYdI1GrU1u982Dqy7tIwt9fH1wneUs20CndJSv31epx4tzXaadkBciFytyi8IyARpSQ58XbcXmy311cXu3GThNydrdiswzi6m8FQ9WG9+s7V5UBdaxRGZjCn282mOKW5FANy2gybMY/baV72x2mapmKzO6mqk4vIbvdK9wBp0ShVNTOm9EJ/9t5ujIvA2jCmoJ12Jm+Vle49+HZyCVB3DjTKj1jQQjSmW/N924hoXbD+1h5mXw8wtJm1vtr+4PqRfV3F+brtqwR+P75qwadkjqnMh9mv/ULyhh3Pnt/9xm++lYdPw7PIVnhze1du726Inx+PMAMpIrrS1mczK2jspvWHGmAbCuG49RPDjTVzkpqvTkRDSn0nOt2yu5eibj30WRlRvNV2tGHpISwUhSOtph81HrQa/0sxnJa16KjY6rAqogZCcgo9oOZAibCsn2hQfRWyqT6Vt1HDsRhXoCaZeCMGBrgFmuuYIbh1+hdHUMKws0eeeSWqG9BWrwnDXk9uNesRx1R8YESb7PTFUwv7/nslCb+PHf0XoFV7D9JiFn4aJ3RvPMRHtWoX6C7f2t/7OA6GFjCFGVIGYEwaNWPqSr6AMkxAGc0cRWxtxIBBpzLfluNNmZ6jLPN0B8BMy7JADWRWtIR6BKAA1RkNMof7osdlngMXOk/HorO7ljKbLxsaSITYiZFzrrD3K7q4urh+eCVXOwwCMiw6l6nYAnLOzInFSSgJMVFCyoE9hxAwYDYn0xoCS20XoNBSb2sXEQmq2dv8339BW783gjsMZBRASVeQEgqhwNV9RlQJlsW9wBymbsVNyRRutpTuDcZ2qW7qxn3DxWnD9VWNG1C3WwAQZjuzb9H2OGvi9X1P73vf+vj+wl/IOsa/qftdaIk4YEU6Wj2BujMycxYKAslg0AuiCyOe55lbHqh/t/92l1Nc3Yl0B88scH4uDMp5fVcAAlopzMSRPWBzo8D8mBOdSGt6bwIIhxCr5CeAUgrXMv0WwWV2IGr3l+MxJ7iLF718dLUd8q9/9c1pmoppIgIJUNyDc7XmalWjXAspwvQgdXYtDvVmEHhzd82sWFmWJbAH+/0+tBCbGGNdeYgosbAlYWzHvNsOXqY4yXa383fvxjw+uL7YbHYGOk56nJdlLjZuhmEEME3Hu/0NtIiIjHSzLySBqEzsCrPf+NabX/nKVz7zqde/8Ru/WbQMm4u9qjmnzWZRT324nF7UjFCLCtyfMyuD/sydYNQosjekGQV4unmANSsVDyLXTJHh9ATXjijO+zPnfM91iRdrOl00/zYmmJ5mdvtiHQhMxOrqMHKDgskAvP3u+0+fP98O291ulx3HZV6sl8BF6L/Ua2NSN60FbnBq6RWzWFXbr3Xb33w1aLuRF/+Lfuw9G9fPzLWWp32rUvpyHMXuXi1TFwfclWpBgwSVPgCYB6lEbN8ndxBBKnTmwDTH7IQxvecNrvv5n7T1xS6qzvrTiMxSFjoejxV61rZGA4h9SKJLCQ8hCW92w4OL3W6zudhtMksS4tB+BJIQM6kWDn0Cci0LKCTbgv6XCAKWWr7F7J4GnvKlSOa7PRb3vM2QPJdFGYvb4mbBXclKRCROEDMN4tBSipXiquZE0hwJQjXKoz9jfFvthYjrEqjS67qiKsvV57x22le+OYdnC3KGMwuhSuSJCPWFmCjqvuI5RtkhyAJJTBGo0ro88or9a+0r3ntnTdRpZusK8/VXuhtWaQ3MizqRjOPlbvtwu7kyTXd7e/vdG/WBacjD4L7dbR8xsxlKqR3EzGpuhpV/GoWfsURxKSVnIj5pPIgIUtK5om5i94kq3L43uVkxHTRELuS0bZzfBoMhab2D1B46V0GM2HhIBPaVs56GCO2qiOhD+XPbRPOoiq5KxGZGqCCLNcdvpD1P+vEGBFOak7uTtNK+WFaph+UIBI7MqtdRUb8S2GNu4837yANADUDeJRC9Ypi5lo+vUn8UndFYT1+80xfb91OFn9jGBHEsH5nBO2FB22RafagKGQgAAQAASURBVPcFthg6GzC/S+nBF1u/JhbANUp6fTkYirtO0zRuJpvTdLSL3SOkC11IUnKDSF3udV7KdFyOdz4fWJfpuEfs6VpNUDLhCBC5l1JMJ1UNrWx2zMu8lKkWV9uRXJmckpWi5EtPIRFpSkNKKV3li4uNbEckAhRaSpkqyx0LQaBAKTAGZ0jCcoAaPKE4QLbM8xL0e+wEDQQShWRrRJ+ZIMwCsIdg6mnKv/iw7Hf9Cf7nbN6MRD+7E3MYw0I6klCIjEidFMtM5u5Fi7opaYGphX6Uubn2naaGFFuJU0UGqUX81Zqi4BL0HHLupjWL2hsYh84zhGgbUDDwv2inrb2zeBEbXJLKPcSBf2uhVg8NeGakGgQyD5GlGiE8RQuFOOLTcXYBpRqbrhAXIjLjCJz3LZmZgwkgbn5ZnIhSSiRjv9BqU9sJWsa1XHLptDHhy3VXs/da95jjVzoJzUBU7605BiIS5lvdbR3DIJe7i+c3T7/71rvElwSBsFDMAAqZBvQcV03aeMiJmZmj5m46DC8uwLyQGRFtt9vg5QNQvGTPp2v2ap2N48jkOfM4jtYi+rvd7nOfHpkTOHHKOW2c5W5/vL29U8N77713ONwlkQeXD1QXs1IWJpm2w0CwIWU2gpXvvPVbtzdfev1Tr7719nenm8LMaKWo1PSm65hHxezVXg134xwideqBhhW8P6POh2nUfAbgxJqYIVA5sppT6UTkTMTBHHkGSMYKHu3n8Q8/izg0T5UAOo1Y4MwnJM5Eod+RoKG1aU48JJ7L4oRh8FnLfr+/2+9THqcyh+XWSfbbSS0qGe9Nv8QSiK2Vc+fuLu2Ldr5gmmst2wsnqDG4WzkVF531ZzcUKwqU2CMHdYpWnvzIyL/WFyssovOLS0a9EV+9XrXVjb/wldWD6A+oPvTzA85uxL2hax2GPjD6AGNmFlRhxt3mcrvbbobNkLbjOA5pzEJubAYoyBJxFhGhw6IMiFApZO7mGohfgAgSCDsDA+YkzD6SOpNb1oFGHmV7cXsoHzy9QUqHBepZmFzIuaiVYlbu7qJ62xpBK3NUetToRjw1rxGl6hBWi4dOU6bmexrHI51goqEBAzqtOeHbl+qDxBoIjx0h5ZP+qrc+DbxlQ5am5qoQMxebaRXCW0+0dTIwXvSiwf4THRlLp4QktZ92VZ3KktK4G8ecN2qplFKMiqZhuCQe5wnLIlrM98chH1UHUxiFRXJ2MYEUPSFC2waBxXTg1CidIy1JmspkrTbZrLEKk1nOmYTViqqaqoESUZKkqubOgBEBxuj930Z1XXnqXnPa2HHKoMLBSWo0p+6bq4AOahDYVpMCNWdd50CfoeZKLv0S6nhZAyvce6AnSkOpstJxu+Yeamqz2+uQa5m9LiT4Udk8dmhTpe9rCK11C5vfiPY6Brx+mNjAKTh4tkp/P1X4L0jjjxhFrfmLj/jcD6yH/W66EAY7kxY8jWxjUpADi+rBfWGhnDWl/WG/3N4exk1KSA4GJaqFAAQFqflcQjfCdR7SYBbExw1YoTB3VYcWtaWUxXwGjMmNiKkIlcSJeUk0K5agDhZ201lLkZSYhFwTYzOkzeWQNxtkAQDVSOullPIwgBKc4QZ1zAYuYCw6LbNmJFcaSOZlWUwRqqZtMU8pQcQrDTUzJ276rv/CN18tcVbzAApSIgc0aghBChTiQu4IuWAz0+LLHMyxVlQSN7nuiv0IBEqgmYAabQshOTQaGHVzaJD/U0PAma2rCsNUMK+u5skqjrZmWMTKMHix1iP2o5qWqYfTWV1W/73emJncAtkUdDJ9fc8tztp/NZqtJOPCSQurYqWMXGlziCjnnGRA9wbJjIpzdveoSGFm81IKcotSk+Ref+mtFpNaFtVWZDMxMQTiFPmGM0+Dia+vr3WehoEfP76+vty9/dZ3BRDJCIAmMZGom5qnBkatrAbBFRMGCjccheNeb5hZaowXwzAsyxKZxtlnLjH3KouVusOMYDCMQ460ctQQ5rR84Ytf+qEf+j2f/uznH7/0igzj0w9u3vruO7/yK7/y5m/+xptvvvnuu2/f3d4wfHEtpQzDZjOk5XAQAcFczdS++c1v/uAP/uCTJ0/283eL6iBpAUdfsffg95lDiEByhlfTknj1Ca5drdX4iUjDejiujFpHy2iH+Epg3dYMH4j3i/XH1518rFJ/wGnlwrmjEiek1VOwfjnUricCdc4gA1PkoQAvZmWZweSEaZlvbm4Ph0NKCY3Tonn7auauDndiakDoMAGj32pGjmp1UuTuWm7yLO1Tb6xXYniDDgaKsMvuxffqMVZe8OUiIRwKMdyeZH04BXdnXVBfmnekyjrI5HxmE69/Y1V7vDbyqBHxrxeB6l2c1yZFW6ebHG2PbFw7FAjaKBpMKWV2oe0wXl1fPLi63gwplmCGWTFxB3kiSI29zUWdOIPcNNQaDARvCRgQgVKtR7XA83LybMoD8jZDMS7zMB9pmkZbssKVoQ4yVS+lLKpLXg7uFVjRHQN2gGO9ZgAGpqjAhlBNm9ddYdUfRh6k5WCSqAl0d3VthlRFWNWq7DpIiElS5PqoTqL1YOrOXc65s4z2qQ2Ag7KTAzdBZiuylgCw16FychvXz26d6l8PEmoBS6ZkinlepmV0w83d9A9+8dfI7fGjizd/4xtvv/3WMFzf7aeb59Ptc5dh3O8PED4ep7rmaMcFmFW5rfiVuqozkRD3y4hCADMTEdf6ONZ1+RGX7GPSum9FK+VV9xr1Ayy4HUI+oqnLMkCnUszTrufu0ucRek/DT8tFa1TXiig61RVvsFmFeRtOy90pTi+GvkQ7wOh8s+FO2oqWJgheQHbiFI2H0z1QKDkbOfX1pLpnK2agOvasOXt9YsvqGK22Uy1iZK+sgy9wWLUue3Ep+H77ZLUTWVEtiY53+RR2PLVWffo7AkTPvMHfFc/QVlmtnuQ0mIINUOg8H28BvbgcMQzAcS7Pb+6eP3r0EnDBSECvy+DIp6WUkJKxkIFpUFvKglJsmbWUUjMWEkpJwU/FxIHa9oEWYQ1J1UKz+uIOeEnVbE7bPI7jkFLabTYXV1e4GjCOyAkoKAoRSSJEOk1sCxn5bDYbFiNVYFk25gQlsJOZd11cgIQzETlTSgnCDb4uxEyUzso+2073L17rqc+IT8doJ7hZYQq86AJf3BezAldaSqX+XmbThcOmcRUFuZPVbaiq0bvn1HIbkfnohlC1lojq+h4MLryuu+kwnJUn6GvDmIhyPiXYsNqAeq1HfNpX44S2exHAICYmqiWFcIeFSUghqivEaxBgb+yglNYUc97i+suyxFZ3co5fMC67t9lhS23TPKFgRWQYBmYuejqDqkoeuz8ZGM54PQzD8ViV3+IyatpN2eCqxU3XfXR3e7t9/HCaJvEqq/Xmm79hBciVvxUUdO0o7rSq+KzP56SpUCtK3Wpw4eS3kCdGZh6GgZlKmSMM013llLIQS2ftsyJCwzBs8ub6weVrr732uc997k/9yX/14eufweUVnDAbht3F6/TpH/Lf9yf+RHn//X/wD/7BX/vrf+UXfuFv394+320vRWS+ncPlZnYBSim77ebb3/72j/zIjzx69Oi7771/2B+G7cbBR9U0ZJQPQQZG/8dAr7wKZyMM6zHazWJyhPvULTBUo6pyRVRnoCZYQETUygi7IiLpqXCUVm09SPo765FZX/Dp/RMECxVQ5YCpR3FhrcmpySIQScTwSDixdD+2Zee1TUNqeWwLhUw0qzEuwFBOAz7kwLyzP93v5Pa6T/LoKOHg14ylqR0Z1PbObm69qwE0UG7iqP8LW7AakLFhRbM66eNH2bmpVtcWiYX21HDe+mLUoXq+Cq+slyQ6nYHohYnfa0FjCYgXiU9Q0j78cs4p88tPXmLBmIecQ5fPiSiRDMJaJrNSYG6cOIiIjMYBaDV+PcsdPSvM7gZ3NTINZU4YE5IgsdPtQZ8f7m6OMBsXZWNRh5s6iYNUSZUyTmtad0tMEYXWhKoJaXB2cVTJqFM8IgCHZIA4KSBBVEtUg0znhNEcDxqtSJWIpQE+UatKV95/Q2oQuYhEvC8cmT53+jO61/pzXD+saD033sV+0FKF/aH34SGShFiX6d133pvubuHzy493zPLapz79zjvvjOPzz3/uXxnGi29987fe/M3v/Pqvf9154pwCPeFG7h7g8L7atFEU85FyjjhhxYlEDWcpnmQwP61FaGGjw+GwLAsJ55yRs4Syl+kwDLEf39va6r2f82NhtbPeW45i3PYdjdq479dvq7IQYq6aFu7kLMJ+Vm9/uuEYtXVeQitEvD++yulPXt3UKmB4QnN0n7BGouJ6tK+P/Uh+IYMX6ZG2UCicqzB9VJfUazt9w09x6+/V/Ps+4b8oze/5hL9D+2jP4Xc1N7hq4ROuE54G8oCEWzne3j5zm0WuN1cEHOfp9m7/dF5uh+1Las4w9061J5RSzps8bEo5OBU1IYYI3Bg5MQ+xvJkVUHFnYA7TyaNaWWdXJUogJqhw8LkygAITwmbIF7ud5Ly7vMSDByh3oBlaoNO8HM09AHu2mJthcTFOnogzzE2VmVVcSMgJBrNIaSGROBNYGnFkZfMiIubU4Q+t/U7J4U94q7lBuFMwxwbLphEKbHFbzGe3YlZonoiITE21iU8wVUFaMHPIz5ZThthaojgyB9VVC9pFdPw9VSY6aTJFHSZqjZ8fDRfZd5B7d+Gr1usp1ls2EaXG8mhg14LZVESYxUHurkwl3hGUosdpEZHKmr7iIGXm4/FYlkqAY8Am5Uox55k916IqLQQyBaAiQomgKLYQUxrF3WedVEYhZjcmZ7JynMh1t9moluScKDHD3Y5TcXfidFf04cOHT997Nwmurrf7uxshk0Qiy9VlWtSZhSXPc16KOafNbmNmx+Meisvtxt3neQYsXbHxdPXkUkBy/fA333/nMD70wecF4/aqmLr7IHlaFjFnhoREMoGZFK5QZ5dERHtVL8ZOzDRYFHSZMRg6Fdg83co2b166em+6OwxQfgJSEWUq5DN0FqYhZVbdXlwPwzhPtLl8IBeP/+h//b/zkz/5k/zo0+ogghFKUhFiYnMTInrl0U/9zB//yp/6o//H/8Of/2v/0V/RUm7ee+9Tw82777+1HbY8bJ7dTtePX3nvg3c3mb717Td/30/93vnug1974xu2fy6yE5MhpWMw5Yeqh4sQk0hiaaqo6oRK/U8mIkzE1MgG1dws7ESrCFKvDAWtzWIk1cjQGEiMPERRLIKUR+HS4E6owGC4u2lcFjNzHlKUaxHBvJhVtFhp+mkNJNUcs85IxM0/ifMnM4CpFveSB00gnMGWvcjhdiFf5mMxpSTJm7sXuGGEDiWchM0dHsqcVEowx1Jpt24rNguqgY8zq4vq/VvEZRDJS6rHa4lM4CogBHd3VjmhrfwMJVjTQ1XQoR1i0hP1qKTGxi3WH+tOW0rUXInEzNxqij4cgFJKaYtAqBr2nDIFVLXFg1qsCW6GlbuyWmsdZ6Yk3H1xIsnqro6c83a7vbi42Gw2OeftMo3jKIncFtIyZGK4luO8KKwQEScRilJcYk6lLCQCYvOk5g4hNSvzOKQEJTPzUsrs7pxYUnrfL53FmPZUntNyJ8s8ugOlqHrkFStJj5gz2USZiCNbEs+ERCiRthU8nZwsJ6JpmdaPXZwgwixF5+olgt0r9sQdwuNp2YaBnMhq+je4PwUlMpxERDQM3YtTcjhq/R2a4qKQ0yn1hzyMaNgKuDHB3bTYvX2iw0FLmZmQcgJQyszMTCi2SErbzUbLBC9MnISHnA+HQ06J2cAKntUWLcfd1cMf/JEf/uN/4o8a2e3fPnzmi59//4MPxiv57Bdf+uZv/kpOW2Yu03Q4HHMeXXG730sezCwnkpRAUFf1Ep7PcR4NdnHpqj4fp13ebmR8Nk2EeS5TEprn48WDDb9/W2jGxrYPRhkd0Hm5G+ZMnMhKIqZigZYBJbh5MWRGyjxrFH+4xVSMwewEJFR/irw9OiKFdg8x8psQDo0ZVIJUsiD/DKwYUUgPOVBc3UBRA8qMqjXiqGlyAuClTqhaIWxeUKWDw3gQhNZbq0T1hGoKqDdAu4ecIAmRETyd5q8zDX7CSWmUDjrEoX6KHzFR5IIIKHHrNSVYRVMY6DkEbulcAsDUi07rXbQV4eQM2OodxqlI9d5hH/3O99s/s3avCCwwJFFDRUCtGaLcJMhg9KGOgUVyAy5whkuwkgBAJyZtEQkg6krPo5MfdjH/RTTRFFuWh8Q4AIDAWDAI4Lfsy6vzb3/7qz9/HI+br/yI/fZ7N7/5/PXHP3I5P8Z8nYdRYc4LwQhkGGn4rOw+W17eF9wsuM12l8swLDvMo6ua3QJPnZ/tD+9pmW1ym9TnO5ue6fK+Lbd7fSnq9BJopNFs73qgVIrNm3H78OHrDx5+ivKAVCB73P32QRgtZUIQosEXLiZqEBEbbLaD+cSymE/Fpic3L4Flhh8LbSRrSXKQK7kIe34Z2GC4dKTbJHeDPbsrg2AAZYBZWGFmJpUCI9n6Sd57+fFop+FJ/W9r9kpbrCoXAwAMDHfADbQwLcAEn4CZ04IygQxsKJOowb0shX02NdfAK1VlBqFkRQGYatU7AAlgRtO8NHOLAVg1vcFN6lkqHYUTUeIEnkgUBoe5h1nLUX8XBPsB24mtxt0j/+xeKQaI406dpQVYK96kIm6CCq+iH02wTgASV8pvqey28b7VEoRaXG5RQ5gyd1xKKGIxQ4QSUtiRqZRKHJ4aB503fvyVmZjcRUyXMpdpk2TMAne3ksgERQAWojEx2XFatBxef/3VZ8+ejYNc7DbT8WCmY8rTfBjHkcIQCuxlW7pCeYmIEnEXN2dykW3O2Zby+KWHKbOqbsfN4XDYXWzgvGgxM2YhETMbhgEcTIMggnhZVNyd2IFWF+fsQYGj7hJE+YmwMPNms3Fv5Z4eg8CIncAiECZmhmK/P+S0efzKE07yp3/2z/zkT30lXV8HdCh8kZwELbg7z/NmGKfDcRzS/+zf/De/+LnP/zt/7s9dXFzI/P6jlx7fHpY0DJuN3t7ellLybvfd77zlP/FjDx48SCkVYjBlEivqrRyFHcTeDUFrJToknOCLqVOQxOgJitbTdKsZ6OcSxYRTmhurtGGjvvASOfOekVgppzU7CwBKKVXqjarYm60oLmxFV9NjFgAUp4rHNg+pH1+vsH0lLrJrbXfs8TqsglVB4+mWz1/3s63fWZ/txW/1P9c/5y0jtz54fdoXr//FKNG92r/ax3zqgfVX7p2nX8z6jl68gBdv5J8iFdB/UVUjXxQCBnnITpyM4SUYNBOTcTZVoiQEhxRQ+J/MDA4ecDanomZmAnIDLVpYhZmZWHLEQBa1xXyZj/NiR9VlcXUzx+IW/CPulfEYH1YQc++u1y/ujfb4iFt1bu+ie4/1fKRVapN4HGYW8647bO0xGVGs1kQt9tErQckVdCoX9BVohBsbc/zZ4xfreApCKbSTea4UkDq9aqzxsW6klFhImKBIQ77YJRjttsNulMPdMyF7eH1xuHtejgdhjEN69ODhNFd8h6qJpKBxil0tkBrn+wWP4+h+AJBzzjnHsjCOo05RdCqccnGLK1yWcjz6NE29TkG4pqb9hPNsU9KbPiQa7HzdG3Qa1f0J3psm3kAK1FC48WnwuriHysd9Aja3YPU7M6yJyPhsYvax1N9ez7KG74rK1dNzjKsx0OnZxTIZqyVWScU4TyCsoXYq94oLk9pFHzENmgr52TWfrwyOj/ryh7cP9QW+7w1+IlpULnwSHtZ56ivmBQMyANOM5YjlcNjvv/vOu2+8+/WX3vzaHVjx8Ief/AguBySEmCCaaDtLsgIrzikJsmGz4Yxhi7zDZgsHsAddQR7t/BWYYQZm8+Vg07Myf2DL7e1xJvNkSHBRczsIT5IXiD19ti90xPWCbQItkAKxserBLpUNzpnAcB4kMSU3KiWEszKIzBOogCIqCSJNrMQLKO0uhsLg3bgkFUlAgrNWFNG6fz7ExvgEtJXp9zt6rUxRveJBOuheSBdwgS4gwNXmxczgOs/zhiOaVmNqnSo7mJzNtHpYqJuvyGlStL3+zGTyCrioZn9scG7oO3gcZmYn+JufQKQ9o1iTAlT37l6qc4/sN1kjFBURS4bCxU8p0UQn3tK2A0V9jjdTsjoMOefgVAUgdZNlESnWvYU6lppBU8wMXbGwRqORBEkIEJfEZmU5iNtmM263IzNSYpHs7ocZd6T7w/T2b7354NF13o3Pnz8jwnYYD8e7auhwigo1uMLA5MKVIDngU8zMIUXOMqYsiQr4lVde2d/eLdM8DHmajuM4ujvNNVMqIup+cgjdvHWGuzoTmTKTKczZwO7OQmRsVjhxKBheX18T0fF4FIGA3NS8iHtV4CJhSiBsLi7cnUj+8B/5Y//qf/vPYsgwVzcRbmR3cHgpJTFHPnbcbKb97ZDyf+1nf/Zit/vf/q//N3NR4nR7+wzzwpIBbLfbYRjeeeedN9988wtf+MKvvfH1m7vi5FA1FBIij5LXwAmrFTgFrsmi2MiJuCocNBuXnZwNoPa4HaBGE+kVGhdz8MzwxaoC0N2LG9mZDRHJkHq8BOrpDHkVIzJGbASrYsivx23MHI2oyrk7FxMmSo/CQI81LgIvDoTf7gQSDm6YdUaeG8qxs5uuby1WgvX0Wbt29xaCdfPzZmuyn9YztX/ozB2953KsX/sKOHfvK0Qf8sXVp/XNNYPIvZ87uTTm93rgo+7xe7feXWY2z3Pv3lnLMAwi7K7C2AyShNx1tx0T0ZBzJhYQwEKcJGk5OrmDjEWjEIxBnOeiFAxYzMzJoaWUMulB5+M076d5UjMSRVK1EpJQzSHkVsfXHYjaDytruvb26mnWIyXXldNPOD2H9jP0URShq95tax+A2FNbirnhQk96OmhlZZG5AQByDywTcIKYAuY9+IJVsGDt2PR7sRXZ5trh6Wfrf65hLeyAmhBvt9uXn1wOUj796uOXH18f7p4OKK8+urp99n5ZdCQkkU+9+tJvvvWeG+Wc42SmEJEoYejQeveqKR8/Mc82z3P84t3d3d3dXaLL8HxUlcWL6fr6V53VxpiqnHzCmLVO5jCrKhPNRayoOIottvpR51OpnrYHboI5qCMUVkOlnTbCQ60PzRzuvOJbDqwBxx3hhWgaCISIha1na9Vx5b4iVcynnxikDCBvwmmR8LF7VX9RXI32OQAKNDZeWCrOvxeht1oVHqkdq9d7fpi3TaE3rjxP/a8PbZ8E7+Jf+rbaUD4hbsMpcQRUyRZQpIgyISWAn93e7YtdPXryyqef7J48nvTyyesvg931sJI7LqAEj5pvgiembQYbFvYNdAsP9fpLEAED6ALC2CaMTKqieynPYYft5hmKRTE4oPA74BYyledvPz08m2aldHh0uTGo05ySYRKYkgNQUKmSiQ7QABpgnMFQhhKUVQV2NFeSLMTAETwRz2YRdJUkGxIBCSiBN5KU6I44gRPCGww0U9/KPv4P+czh7yubn71D3GNZoS9CVNwXwmI6u87uM6miFCFFBddUVljYmZEZBgyZ953LV7FFVe1aa+utFiuOjDWyDDViiG4Q9mP6twD4Kli9suuMiKwl/E55BT87Mp3m60cbbN3QITfq0UQiIpJepwLSlWRc3xqZqXOgRf9WT7V+ytJ4zyFM5AN5ghI7i+g0kS8PH1x84TOf/tSnXr+63D158uThw4ckfPP87q133v7ggw/efPv5G2+8cTiUi2EgScd5LqVcXl4WQySyvSjIYCYxQestRSHhbEReFIIFVhZcX19dbLfvfPe7psWNUouXO5RrSNjCGMycgKYd0FgNiIgzZ7Cyz8VdFSZBLGilgJNZScSPHj1KKZViKdUOMTWPn3IGwZ2EeZABnJjSn/wTfwr5Yrm7TTnLJjBa1fF2QIiEhQAXglmWtByn4TL/0Z/5mW9+85t/6d/73++P0+MnL+3n+Tgt4zi6LdM0EdEv/dKv/I/++/+DL33pS//wV95w0yxcrKCSkYi7w+BqxRVASsmdO0cCzDQA5d0EISNuHqD3Z3xKpQSCcR2OWdu4tfdaNtFbxJ0hBTVqECZRnWOQiK03K9aoMpGgOaQ9Zk9GpBbFXNyzjDE1aZVwM6qAK6q2O7NDA6Ld1VnahPEXEl8fOrGp/9fA2fHl0zHe0ZWIiVCDPH3h6stB16VZ/Vy/mGhrk3Td+lXdW0TaNeNDWxjAa9/MV+nN1VpzCv/fu/31Rx/aPvRq+zu8qlSMd+4WnWwBzK0AnlNkAU2e3zFjTDkH1sJ5yDnnvOEiIu7qakxMwsTsrjF4jFydvZiqLYvOqk+P+2mZj3MpDnA2puJu5nbiRYC7C+CwMHtXJv79PDBeeIeIJBL8zZkM3Vfm7q+hS33EIGy9YidsQE+fMzGTUJWNJSJHjY80b6WR3FR+yZhTJFWLsItS+PoJ9n2r/1L/c02u29fwSAZ2adnI1dciBzUmZKar3fDyo+shLY8f7K52wmUvmF56uF3uPiBk0aMWPL68+E28F3GHlJI7DQMBbKD9fn82ts+noZmF7MeyLMfjcZM3RIOpTdMkmCOmw8w5i5y2JuoD+55DQr36zsz0NJ6pQ6/p1DNUtUSd2MPx693iq5m7zrzVzjQHoF5rRfqMEmKvZlzVUAXATY4C51xlfYyFfu86uFYXhNRZq2K+S/iEXJcjRtBqoa2iyDECK8CTzAGCMNwaTSiRwCvivbPR9FLDGHd16XC0gsN2EWehq7paOxzQtY9HK23EF9r3XcFPQFut6ufP63eAVnwMWisKoMo4GoVgBWUCKcqiKT148urVxSuPnjxcBr7Mj/nhQwwpBJ8AM5REDltghDDs1MEOTowEHyOWDw+raGvO6oOzkG+EmAUQQz7C5yl924XEOLMAE/SZ6Qfqz5eLwR8mUuaXPu8PHoBm98NC86CALbAD4Ug0EZYqoV4OYAUGcIJnFKC4aJAjGIsbFncFTZwmnxdDBmWnESTOobK4ZXbmSUQQagMdu15RqtZd6OjAf/6P7nduZ5BmW/2Pzt5vBoj7RG6Aks+OApvdFtOFaUFZnJ3YKWCSTMWhLTTp7tTX4RbKXFta9ZNzPe21d9e+t7bTPEmGl755mVVGABFBI6dYY/K6WXj6xeBT6MZqMzqoOoQraFbFqjIRUUrJoR38UwGi57bdyRtktpYaYj/r28TILEYQEZFGMUrIKatIKQtztfaEQcSshYmszIvOF2N6+dXXPv+5T3/x85+9vtoxnEiXw/vDMLz0YPuZ176cc/5gb1//xg/+/N/+hV/96tcePHopX2yfPn1KeMCMlLIWL3BXY1dmFg87EjWZaSUoLuFczETkyUsvaZlvb54R+eGw3w6jWmBwFeAgAlYtrORZwpR3CtpVg4MpEg5cmLTMi1odGFoIxhAAIvLgwaMhjzViTU7NyTQjgxvIYMJ8e3v76KVXv/zlH/zyD/2oH5a8u4IQCGYgoDkOLsQELMsixHDnYRjycPf++xePH//sz/7s3/kb/+Hf/Xt//+Hjl9JS7t56SxhlWTLZbrd755333nnnnT/0h/7Qr3/9W8/389X11c3tIc5BgWYiqkTs7kKk1Qj2KE9KVfOauxWC+vixljGo/2/bADcJ2zX2wJvQPMkJoVqnZgvfnE0bOmUz+oRhZoXbchYp90gMKtYGUyDEPPglV25iGHzkoWJuUVdtDgEhaIbNIHRvAvd/1++cXn/E9nfP4O6v+wR+8Zz9nXP/yj/0Gu75aetGq0hV+/UPcdju2Z2nZe7cw7l3/hd/7nt4g/duf/3mvY/6SXgYwexQDdlH92QEIi9Ojrt5Il9iKUspjSlv0xSJ3yHl3W4HSWykSiIDYG5e5rJE09ndnx+hBnWoUw/kGJxZOg65dXqMS+A8mtadDV5lv0+3sF4eqeFVGgFVGMx96LKDpUcZmJoZXSWBAQYxKmaDwxM8daYHyQyq40eR8+/6QBEyrKS5kaiJPcbN3Lp3hxZHrBMtYiZNax2IcgWIZGaOitNSTLWSlJKTkAvxIDwmTgLofjk+216NbIdN0rvbiSjPh8M861iZP+fwOc3AzOM4GmhZlmCPqUOxCa8HgJ9lMjMS3mw24zgmTlqqa5qZ634JENE8z/M8i4iqwgwhqwteb21RRxtuzTr56YHFYaBnU4lqNq+51lZKjd20MVwxOOgszXWQ1LGhHqWAZk1AlbhqUnnQBMFXYNHU2H1XuhfxvjTw6Wm8uXu7TKo7c/2U3F3AKzGeymjqa10sWIuuGUEYHIWI7iAS97AqPso9s5b7q64mgDX5REtUenS412NO4Q++f+bv+4GfmPaCN0hw7tWkH+tWRZ+aOACUYQSHLoHfgCTebH28fD7dTm8/LWnz8quvXeQHwFicQW7wVNN/E3QGDwDgBXoEAbQDUKvVvDqGSuy0NSQnLAAZxJlpJ7QhfEEkk4iDCQvklvCe4C7BX/ncl4HNxeYhIUc+ULHgYoYtYkf4HewWeuPLrdmxzHcMyhzUOAqPsi7At/CFJTkVFKOsMoJ8GbeDCsnAygRnGJtJMWHJIhmSgJ4WXIWzP4pW+GPTvJeF1j9p/SdCRiI+AQAlPwKAL/BCvhAWQmEyqKotDpACVoI0n2ClSR7E0tY9ww9tRHRa9ldvYmW5rW2he4dxVd0zd2dKDYwm632hV1FFMLNLXwTXJhH5CriJEKb3FomM0gh2AZNDuQFgzYuZuZZ10Uj3BolOnBVSqyhrdJGa5SEggUsVTDJ3ZmYYnNkaFkjNmZmKbi4ukJk5f+a1J1/+gS++/Og6k9689908yCZnzlnLNC0HLGPJ+eHVSz/7X/3jP/6jP/B//w/+0j/81a/OzhcXF9M0jbtdTiOTuip8gZMACSi1OhPkEBFXI3JmEsbDB1ePHz98+v67y3TIwkdXSVFS78qMCi8stRbanAIQ7IgcPYEIJhyRcmcQuUbFIlyTMJFLSqP4g8ur7WbDnMoCpkXclBCZ1BoQcHK36+trMv9XPv+F/PAhGK4ggTtUNeeWulRjEZjnlKf9ftztoKUcjhePHwN4+NJLf/rP/Dd/6dfeuLu7M9Q0glkJFfhxzD//8z//r/1r/+PXP/Xqzde+OQhIJ9hA8ADSOsdgqD/FrcwkkQhVr6zHUiIB4S0eTZA+MbD2f6yN7+7erMIY7HDmEzCsD9V12LuhxWhVBEVJmCrdzXraKGqV7mmStAm2BmF68wmBKAYPa8jZK3A0uKHshZm9dpOirZN4vkJWrWfp93CB1myN6yPX76//5RWo+6PO2fuqTsxzgB8zt1XyRc/WA662fhzrW177h+0rZ2mxe+f8qHbvcce31umO/u9C1vm548cUJB5Bq+gHOLGZKeeZE82HIOy6kGErg8q4mKmrzYu7q+oxXISyxPkPsxMJmIzYnUJayCJIfLrfMy7loEu41+TDlnhUKH7j8AiTva3FLYdD0vk2CJQYqNyPQJV7rYVvoUpPDhg7kzkRWFaJvvh1dmAtVt9y7HVanWoIe5AFNUFX2/ohxqzpc8dbxLGXHpzmcl1tArkYJdzOpIRFcBSw6Z4UKNOk83I4Lou7c845avyIOMa8iLh5ZCBPV9h6V1XzUENIKaXtdjtuN6ypLBrg1c0m7ZcpZxg88paZ5cSH6Os5WvNap9fuiI3TAz5q7sZO1gNS1a+GgH01aImIqXJ+3uvDWL6a70ciLbDoHjiIAFFwqi4vqo3arK9RUJdK90Y1jLqf0726vLM1gVpdIdDQmy7xBFe4iUa23Lu5OZBVWIK56uiQ1V9rJSiVyNQAqrw5Mbpq3T6auXhy+cKxjM7+UJ8QwFrc4vvt49/Ot6HIFzE+NOL4sWxaYxaMphAc3E0QggPTtD8ceLh69OrnhJfdJqftqxdXT5Bech8XE2JWm4gL6TTdvE/HZTNkpAQokkEydAYSLAMDCIioD8whjtEwxBzzwFoRGGMx8YWJIDQSO4sxdga72gyMnWM8FmVOzBlakGaEFgKOsAP0hvKN6J7kxvW4lAP74lpgniRm84XqYolMJ9OJkgybpK4yupOBowDBQcEDlJgTSwqFekWDtZmdM0V8nFuPTDUusLMFJ/DzvvIMJ3ioC0autRAZQQM+52pqXpYlDEUtpS/mfcDzCV9ZwaQArAXv1sUL0fy84dwAtmAz6AC6lhZ60Txrx/eKPEfbRbr1636GDEIwpa18PFYYkTP5skTBkLuKknsjKs2D5FagElgYqrw4JuQWqthAEoqyCC8aTHsAQAaquSHXYu3i4e5RqWYuIgwfh/H6YvPpT3/68cNrLcvtzdNXnjxkMrd5no+l8GBDokVoZNt869d/6Ud/4qf+V//Lf+vP/fl/7y//v/6Ty92DqTBBRDKRGC/GSrBELARlZ6ZkwiBhtgheMZj59Vdf3o756++951qY83YcyE2E3V0CKOUWR4qQMMxInBrJhBmBiNkhoRoFgxnFfDLLkghIzMMgFxcXu+3lOGzdb0mVo/KXTtFTImLC5cXWnD/44IPn3/6OXF1pGt5/9vzu7p1SypMnTy4uLt5///3j/k5EDofDT/ze33s8Ht9+67uAPXn0OLkv+33e7f74z/zM//Mv/+U33nhjv99fXV24KekCLeo+5M03vvGtr3/96z/xe3/8u2+9M+1vRiHttnZL6tEqDMzc+G09MEOUkqyG4ApJWJ1BXo9OxNrRk3urwd+tfzKvQbo2voEqzlHPsIq7n+ZMmwlr8gbgZB71IlpvHuN6Hnqlcmj3K0zaqJ3imKZ+YW0W9Tvt82rtvfTXteb2fNKuj/yoF2vzvffG2se7d7YXm5/nCdfGer/ae7ewvgx3B2riYo0Rxbo+6kMykB9+Md/jIvuL9R2tK9bWRZtETgRKhCagEToN+/0eABMlGYjIOYRBMEh2MEQgQyE5qutSlsWmaVqCLtXUzDQ2Z3cLvn5nDwbGE0diH+R1FySiaia/4PFWcq3VfdUFc11Aa+19qjfF1cWNld6JyVosw1e7FjMInjgw7CQgCsrwU/gFqDm/cNKqv0SNM8ZbVWp34dZDYu3s1RL25uDdS8v3uSkipURtp0SqPw5WVTNh5kRRya1ClhJvtrnoUchmU5BOh8mMkqRltpwzM0c1dcjTYzWSW+8GZYkDqvCIkVW+GaCUQlZ6VwRylVNQ71DOOaVkwYtmxhZIiFV4yKqD6O4wI1VQF1+pj10CLGMWrk6kBKOImaViEABQuJQIRoLTEgGAHK7W1Q5jeghWe3PEuVYjq/KS29ncT+F1r6ePneaaE0pZ2rONUdlqOjgF1hUA3E73BjJaL2J+Uqro48vdG+ClXaCtfEIATgx4SKoaKiy5nKlTEOAcHUiV+YZOp/p++8S3kzfY4IXB3/ixzhM6gBMruq1LXPR4/K1vf3uebl9/7ZVXH30RpHDD9Ai8gV8vLpLIgWkpd8f30vIch2e6/+DZdEzi292w3WWaBmxfhjN4AAQsgUZI4AkEbBNGwigUSoYFsIR94i2GK/gAL2q3wC141smSAHlHjo0nwgBDolwou0MYTIE7VfgRfmTssTyT+dZsDz2UsldYFmLboRwxgPSOeJ95xGw2HjLL0QwJcCXy4A9mbxsi3XOlUNdk+sQIUNTLbZyiBFT8AqF5gwY4fIKp20Km8OJQsgVeInUMc1hBWQyAs6uycC//A6oSVP3FkwkBb9Zmz9StD6BKU38yh04f1ThknKR5iYru6MZh7RrQfxrwbgysI7/dqok/q1HFUewX5snK1GtuKLuaM7t7ahDBeL/qmwWksL3DqyYcziHlxG6JiFiECF6hNwQWFw9Odma+2G51mVV1d3kxjuPd3R10GRJ/8MH7SSgPlFJKSQBXLfv9nZBdXT/6rTe//pN/4I/8W/+L//nN3fEv/7W/+anPf2l/XIgoEZfoBbgQEkOJhQXiRJCQsIjAgOuDBw9KmZ8//cDdzXV3sT0ej8xQtYrm9ULu7B6Cc0lI61OpUWYGwY0cQsQOcVOQwJyJYGQuSRLTMAybzWaz2RKDdGYCCdfcqjuDhLFJdPPBe69/6rPf+vWv/l/+wv/593zlp//wn/yZ337ru48eJGa+vLx8cH113B90mYnISvmFX/gFInrnu2+/8sqT995779UnL2+326/94i9eJR/G7Wazm+c5JynTcbjc6TyZ6jJNnNLf+3t/77/1Z//sL/7iL77xxhuPHj+ZtNLUenXAOMpOUkogxAMF4AXQxYE8XkWKIAZCjAs6s4bDOq2+TWquhSHEnevPJBY0F9HMoluJSFdejTWmU6xMlvhUQHAU95ySN+y1u0fKsXtWZuZmvuJmxKp+11cBfmPyYmbWHGAjImEO+mA0NOCLzkCfPn0yn0+8swTdeo3op10vBOsz3PMG11/s/fCh19MbtQzkvQuIu1t/t3/aoeDdZ7h3wf2S6uvzy/YPdxpXfQVFjZT6qjKdzBWRAKNIHlTxz6IFKyhm7+qcR/eTNoaZqbq6z+7mMMfRDMcJWCIj6O6llLksIEkpQ6qEoKQYZlA3ogR0VpG4bT2rG2w5kFOOqXp9p8dRxxWojvmVe9mSJAywgOK0xKEnUX2ziMsxmkw5MbUcU2QIa/fUgFsth7NqZxM1Hup5nvsg6ews/RhfbVoVr6EaJXmBOYkEnYjoYutYDDWh10jDppQ6NhtAKSXJjtgNWMynpcDmICN1FBEBFiI6TAvxOOSN6hKBTBYICzMb27on1wUVAOCccyZSbfGaUso0TYJEnh2+LMusUFUiEZFhkHme5nm2RtdG4kxMp3QfIjQf6xZ5ldYgOT1fnAhg9B4ewd2hRsxrT652ekpnichYM3uNbpyTiODBQNe/266rNivqFZdympKJ2c109RCpBSDsdF8xx9t1RikE7q82KTXmrkZL02+OYACHbRhqhADdN/+6cVV/3Hs8Jfq24//J4a3oyCubF7cY3onS4/vtk9liEHW3gT8qUPhxa9awnE09xeAGsE6TbHavvvap6bi/eHAFIegMYQyfinG6tDgGEV1fbLeiULMPnr//9rs672V3RXwBpfnuhkJYRthJSFg5GyeFUNqCNwzxoktZSAuZ0eY9psfAE+BKl/3++PY8v+t2vNhcAZd59wTDNZFAL0CXpFJGOKFUX4cZTJSZrgiG8SWME2MGJil7tcVhsCzzLW8Au/HpWdZDRsbmBmxpKUtOy2KcAFL4UctRg/DJT0mCXtcOGH3s56y1fDWDW2i3rdY1k+EgA7T6hH6AKrQUW8iUYO7FXUUCbRnpCWKQsKiAGGF21mBuzxC0hbfD3xQwqiUAeMEgXFt93WIMFEwzfavwoKpqqbKEZ3dq7u6R0Go7/1kWJL4efKcd1prMSkqDu+s8L8ti8I7MqS/IAE6JRQZ3d6v7vau5WzCKRrXMMAy6lNizl2WBuYhY0f1+f3l5CaCUJeccVRZpkFKl4W0QhgwRaZ4WHVIed+PDhw+HYbi9eQ7M28zbTSpWRJOLHw5HB0KXbH/7tJSynZbf+va3Xnr1C//Gv/E//Xu/9I/KMm2GIXK1AprNMnN0zbgZKyEbAuEDEYGVz37u81/8/Od+7ud+LmW5vLpYjtMyHW2ZF+zHcby+uliWZZnVgTwInLwouSdisGn45TAmCEiX2R3jkJh2YXoyC5luxs3ASCl94XOfJ8lEst1uuSQIL0UVzknYwLCBmWzJQsv++ZPHP/A//O/9dx9+5gs64Kd/+kflZFTgtddefe21V+Jh/gf//r//yiuv/Df+9J+GLhBBKe+//74R/ub/9+eur6+J/PrqskxHE3jRybQAeXchkr7+zW8eDoef/PHf+/T995ZlsWXZ7XallGG7MfPDNA3DME1TMyPhhZhZ2ENMb7rbj+NIIHMfx1FE5nkuZZnnMo5jTilk+iJwsiwFftKzXlQ9kqiAlQjwu7uTg5q7JR4SVydXCgAcQtwUVywsV2ZOIq3DuSt1WigcyllW0M2KqpmN40ghbdg8xojNiIgzsZ+Q3NSZb0LBQhUr7y4cjDiMGxVKSqmc0/cTnQjf16bYPe+urxF+buStHb+2cHzIkTiZkvezi/1UqQndeK3Xsp4+iluI53UvmNRfr9/vfxJRQIXXtnuDDp6y3+sXrtpXyXt3188QrWalbAEixBJBPYJLQBYBlHJ6CnGAEhtBzTGp2kJEy7IsS2EWAxsnrzBO9hr+dBIh5yr/E3RXIu1iGBFUrL/u0iBt1QRfVQB2o9xbGi2yXu6OxrwrqP3jUKq5JVmBSCEjoKaqqoG1Dw+SXIutQnIAEjExzcvEzMwpBn8pVorFXafkKaF5BVF+IKVozkOMWFUNcp1Sitk8TVNgLIdhYEoEgbNIHRtElHMWkXAawxs8HA697LyOWNlAivniSCxZDcWU02ClHEs5zuXZ3SGPm2nGfl4W8/1+767MqQ1RM60jR1X7uGtXq1OZGNhebPuQ/s53vvP6K1/c5K0ibqR2u5nd7I8vzebuiSXnPGy3GDaYl3meh2ETY8yD4SclGLQUgN0tlKCrPFVVBDWOwypf60mKQ0QA8hDFkRpysmDZCZYyqrnaeZ4hFSJm8xzfJUngE/2Au9ewSPCusWEFIY7pEQd3lLKvtvyUUnienRbL/LQMEpy45jOjvMQbQbmvsE/ORBBU9ohIPjuIGUnvEfQBHkZ1BWhE6k/Dh4wZCcC7wxCSjs2NrJOq+rA9efj99olskRxHyzCHF9HKj6kFsT6syv3j4T1GbApOQJFhY/MEzpuLB84DgSEb81gXoIyUKqh6HEQMIEYypv1umGc/6jRjs8Cd0yhCxHBVc4MmThvi8b33boZxN47bTPCiNE/sJqAPbr9xufmcyD7JI8kk8h7RW7sts+6zHFGOoA3GS+ASfg0bASYkIBOyg0tI5yAYFrdwdiSiXUpPpE7YhTczsCTced5i/z4uHJwglpbDcjyaFaAAC5IXPZimzfYiPHwJAiqEytpH0iV8zBq1f1uRkMG9ruKd4AJWzItZSeXgpZhZEkIKbWaFlXmahQgOuEkwdGkRorIUwsl8RVgCHpgmRMC075idjjvCr7FjRlDbV1mNHnZk5thMVN1sqbg5o9jgRHJ8Gh5HlJV1HcGGxSAAqrosS2zftipHcvdkZmYlKv6CJWSFavWuJBHNzHSZmeCd/c8leEPdYaWUsrj7kHMSlsREkBRLvxO8U6U1ZJElIUjuCRpVZcew2YybjTnNxSiJ0ODkx2kRds5wo8hGHZcyq+k0XT3gp8/vnh2W1z77/Ad+9Cf/zJ/+2b/wf/33Hz+5JGFdakllNdndtykhopdeo7zsUFjO+enTp1VjoGioYuScFbYs07IEUwWYkqqGDHq30Q0O00qnapFbA7uzWyKAKAlfXF8k4XI4zIfjf/wf/ydffePXiGgYNg4QswiBRYac3Afxh5vRp8Ojh1fXD652yR5++mUM0KKgdWlSXXHj5ZMnTzabDQjmhmIAHr/6yu97+OCn/0t/8Of+yn/0q7/ySzod0pjvnt0ty3K5u3j6/Fka8rAZ97d3P/dzP/cn//h/+Vvf+PpvfvvNiyQ5C1yFkVIW4c1mu4y5LE3AJJFQHe6qBaAY5cacWeAQELEM22CA4NRoEp04gawpqAQuoiYAmczMmqiDNkCaAzyIu3O1t6p6RPgLbgAJBwVey7dwYO4aeYZ1J9BqptJOwut1YKxfnFw17XQ2Zwn97qWs3bz+xX5w/0rmOuexkpepbmFboeLWsEIV9p+j9hNRIdnzBdT+by/upqtE4vo2X9x3X/xWv6OVV3nuSK+8zXXzUybwQ9zae/28PlW/sHuX1wXx7h3QRC8bOM0AUpxsiD5OWlU4B5sjFneK/mSmlABxMqYmrBQzi+C6xFHtrt2h5hbP534X+hl7JPspIhhfZxDxaajcA5MAqLlusuraxSgmoAI8qr6BiEj1RSuuzw2M00Nlh5GFV9aDFHTOHtS3FiZhZklZROKCI2saq1x413V7SGkYhp50JQqBxzPVyjgytrc+TtBiAYuZMdixOIo5OYpRcR/HC0q5YJrUtJRiAialFvJUN+6pe5hrhZt0r2aFIo5cn7otyxIxVFUtog4zuJktphVa2iJEcVO2LD4ViAy7nS96GpAN1i4ikNwzexSKEB4xBCN3slDb6QEaFck11xfToYF+Tz1T0SQtj8zc4ZrcfEtYldhps4gQDiGx2RmUKJLOYOqyE+vZZwDxKUBT0QfucMY659li2OSIApDVczSiGnv2lsk3V3iUazudavwqWDROGMI9aPGRllHs+cTotCg+YYetM0gEoJUk0VkG8nfOP3yvNe777XetneWUAHxsNQlb3WBbnx1wAiXAwMIyEpFHdAyM2FEaYygAx8wB5fCC/fPD3TMtR+hhVmyPA0km3MAIUgglAeAtQOCUyyGJJZrYC5Yj65LcwTQMJmxp3GB8ACyDDapsPg2Z4HewCZpw991StsAjkcc0JIIwRsIWyFE8DrjaxAwmEARIDnEMZi5GKgQihrFcgSaUGxSGAwXuLomRHKwQzaNhkhBuQuueijog+4SU+3JE4AILTw05CnegABoVmG4zmbIrxEkVXnQxIoIWuALIIsHSYh7afmEKeFvZmq3YHKi+BbgbUdQQaCnaIunOHEnFgMwpM4dmXssodpBFr22qe1mQPPRdnjlIv/rWf7I9iKpqbd8KAZhWdfs4JrW6Bed2LgCNlrTuchXL4k7NGj/1rrfkgNdfDW8ypRTx2sxUWhfEf8yUhNAYzNBy9LH5T8uy083d3d2b3z6+87Yk2CBuOl9ejINwHoSZU5LNZpM3o4hQ0mEuxmme529+85uXj179kR/+4SGLLjNUvUZ7U1jTDjocDhrKYrFdu0OL2vLNb37z2Qfvv/veO0Qk8GVZ9vu9iGAcy6KllHDEXdwUy7IMw9A29Mg/MTNLIiu1I+AkcOfIZfHTD96/vtjlxMtx+Yt/8S9e7DbHu1tmGYUksbEgy7jdDETbxA+3OZX88MHVo0dXrz6+wPvfwcuvD7x1kYrN7GG3E2Qul1K8FE4JRKUsTEibEWX58pe//Nqrr3zjq7+2f/5szHnYbt2R07jMhci2u8uvfe1r/5U/9Se+/INfeue7v7UbhJlhnrIwi7GIsHsSiXAZVeHpZrplSgAKF2fKOatqJD1aBZFFjFyLq6kQh6luMLiTVygJOZjF3Y2qkdAMLBfmMGXQIvQSJmOFS1s8gly5lVzIPQI+VElcza0CoeE90F1LxakWB6LZZz1sqV0a7tx77JVU4bHEa24ljms3rNr950wbL/pFOM/jr52l/sLP2Q7PjlzpyJ0te+fA1BfXxfUFvPi7qwvje+/0pefe8e31/TOsr//8SKDpQPblcu17f+gdMQrgqEYk14REcwi9weqbZ4gCBipd01JK4sycOCUzC2KOYg0axyTMsBK/H3VwfQ08GwwvvMOOF01RWqViTiuyefMEuh4PyClLkLh02Z4Kaa7LP4Gl1W06yMEiAgqFgujomldchSrcY8gTHMRCzCAOqQqQmMFMD/Pk7oG0DKeirufMzJxzTjmHIxE8JJmTu4PILFR2NZLqOQ1uIU3g7sbsRGTqBQaziFw4MTk7MTyByVzUyJyUuAAALdzEY5rLxxxORUxz7lDVMMSYKaVkOscSERm2i4uLPvY6tDjuK6W022Qzm6ZJVZmlbuBWp6q7u0V009Aj3/GQqivlAXI+ndZhrQoUECbu1XCnYdxLMMKRNosTSncR21pRbRUzbzTl3mIb7A6uRXf3olpk4E4asxqWKcabObVqFq+Vie5FiShEhrp7D6AvtlFLWMGdtQsCMgqiEBhkIveTw9YLCBmkiJJ45xbOuldY1MM3ncxL1ujTj0g29AM+pu7E99uqrZhFmzdY873OH/WAPw5ttd+v32OSJMxOIBaHRKkh8ezU3IDqPoJBmHW6OR7ujlKKLWZmtkRKbXY28sWxEBEoQxgYBhpEGT67H1mPyZcIAJHszHbgR8Bj4GB0TeMDPT51JFNlX0DLNB1vjiJ5vr4aUMTBRsKUE2fAHYv5nNiCBxDYAAOwIYxM7JITJUNhMDAAPN0WmUrKNs3T7EqJwAwsoEXEmRKqDuGLc7AmC3vnfQwfMleMeg3JgUBkMIWHK6jwBShks1lxKFzhBjd2c3PTxczIXKoaq3nRcPyq+dKZva2WQ1lLb/RMW4fIeWfXb5Ud66U4Gp1v6KE7fDKTjFD18CpEq3kGQLXxPqIfWrVU0/OuxyVmjjB5bLdmpkXVbRiyqzeKA2cWWDEgPD0RgVaWgoheq6qIRAVXN5KYeWEWjUQ1Zz7RxIU9XQPSOHWWqh4Oh+lw57aMAnEdMpEtQmDBsqgqNhu6vLzc7i7HcdzQYXd1ff3gpauH0/P9/M7T/ebi4Wc+/fq77z1jYmHiQYgHK1pMYT7f7s2slNnVWlcauU7TdHfzPGUZx9EIm80mEqmzFdWg1mCAYRp+e0TKiZkgRARyZiQWjSojc3djIBOF0vzLDx+qambeXF4u03Tnerm92O/32UsiUWYRTkyJkZONyR9d7UYuD7b0pc+/+s1f/4dffHCJq8F9ch6BU7FFc3Xw2c9+9vb2liTo7kDMpZT9fn+d+TjtD4e7y+3mIicr883NzbIoUUrDuN/vx+0O6n/n7/ydH/2hH/j7m+HpfjbVLCRCTFTgVhRmwtX/SSwpCTsMxI6LYePunoeUkgx5nuclLTlnpjTPs3rlt438g7urnPIkxUlVS/i4XFEHBCQKc94RpT4tKdeyRuQgYgG3pNkKx27Vmq+ShgpnYmdydzmndmge2mmSrF2dEwbyHGm5njwfOmnXp+rnObPRz0lZ1r9CHRi5SgL4KbZ0L3cX/95/M1pfWe5d2D3/9t4t3Lt+5vu1H+trvufi3uvDdb+tf/feTwSQe/2t/hPrN/v6sJQp7o9XuLlTPUM9w8lk9/ove5UZt1h5vaoFeKrVVE4h8bByUJlZKoum4eQ/s9JpbGTiF28KOBWRNQemuQRAVAm2IlamE6mJAeI1JNLKXPvICTucTuOK0ch+whwhAlFZzni/iaLkUGTlIKlDS00HzvORTslDCc6VSPfVIeTsFdZNBDG7f35acQ71U3lTegApahyQmRKxMA2QNC/LvJRi7izMEnJK2qhKAyca3sKHTC47sXXnnCcnCrwo0ziOVw8fYB7MjGUdjoHXndgZzQsioiRkhlKIpU+qJhvlBKieSiIBgEzoTKPCQ6WHqswjupy9GRoMoA5FteocraazlpOoVHB2hSVFL5gC7sF3CwQyuf72aVb2kBLO1gc0OhmuTDqohD3MHG5acIy3QdhjyY4WHgPIbIlUYTiLTpV4w9cZvBa5hsdgZiDoByNPGvC69RLKIPPwJPs76FSrFY53Ov8prbQW7/q+c/jJaN0bPD27jzJXf/cavTCeWpiTQhQrUvyno2ghuGOJYxgiYEFCERRiE4C1OHNmGkxF0g6Y4EZQVbAx8Yh0cbkbVBezPcFSMlACGYg5b9W2sAvgWotous7D4wXleDyKWvYZuF2wpzzm7SzbwssuEkeg2fhIOC76vOgNp2kpezNLPGa5Yly4bdxSHj+NcQD25kcmtf1+urvdeIGZLu5MNCYkgRqgJpGOklYX2sLoHZATy9g/p2f1T9kCQtRUq6MocvEyEQq8wBe4mi+BxeBygBqHqe9uCi+qqsY1BwZT117wcs8aOrWOeMLKJoycygqSWePjL5qF6A4hnU6lqpCTgXd+WD/D6Uf7n7FddmeVmUUCK2qdqY/6fhCpQYqtmE+R+2qgMIlQ5sBtelCSssDcc2KCqKq72jIbgTmGtVHLGRLIzKBmq/BtxLwBmNk4MLkyYbfbXl2MZLYdOAuPQyby/X4/TZPk5O43N3e3d9PzZ+9fXz8l/s40G+fdsHlj3F7P+/2Yk3nd2lV1KUsF6TpXSJIpg1ggwonSMKZ5Po7DZllmwLbb7eZi5yUMJm8SWGwKIkuJ3ZWdCZH2bHuYmhA7LG7I3SmYMeCHw2GZDrYZmJncReXp0+elzA8ym4AALUSzgSwrFjrwuEvCDy/l5Ucbn28xPwN2OhuPIwB0LucwQYDPfv7z77//PojmOZRDSUjG7QbkNzc38+EIt8SYrGRJIvnZ7eHR41c4DcsyJcEv//Iv/8Gv/MSP/diPfudv/113kpSa6j2MfMxjpYBnYqGoViIHsQ0NJ5bzkIdh5rTIknMmojklAMMwENE0TfM8m9nSMGnMbPB5nqeymJnBg5mGmRVeShVkm+cF1dyposxuZq7hZ7q7N2smLK3UaBI6fV8EOyL7ERZ5S7l7i5mde02xqNXg/RrJdEr1rA21e5P2Q//tB9+bvTjZRg3ltQLjrc/8UW3tbq1P2/Vn7rX1wrRu9xazFx3IF33O793uuYv3XvQ/IzjQgbj9Rta5nRfulxHV4fURMxBrDBGtOjbO3yBzdO6b1WMcBOMQwQ5F0HSiSwn1dq04vq7TwFUeoLUP7892tf2hU63MnKkF8ySydRIICTS0wbrPlWM8N8RIHeVRtNBk0M2ru0btnXqS7uQAAIrBVRsmU5dlKaaMquiw/rc9Got52gvWVXWZp+4d8apJa733iEhEihUCAGVykEUaUyDu6oqymFnUHEChWvO6ETAyU2MmU9QSShhRdzbaaOFTyS4ASrLb7WbD8aCOKmgUBlJg/p8/vxGRzW6bWco0gQQiLLCinCTIXQiBgwrJhMoBQERgpxh1RN7WE4QGYRsJpmdsPTVowdSjv0F27i1uFaRN6zEZKGJuuFMCIp7Vxvd5CDlQRmteUAMRdTz62XxvE+F8jHlglfuI7QfXy+klhWhA0BqDiDOvlST05EICDT1q4QQHtDtCmYa1VEugORbcR5++wFZ4du/94HuHfczN0X/ZGq9LBz/m3iAAwQIQkJy6KwgQzJy5V8m6e6FK0Y+mEAEGE5jA8A1sHPIFthfL4dbdRYTyWI7Gmw1qwYc7uPiY/RL0WK5HmfbT8X23cCoLVGFKYk4MzkA22Sa+EFzw9nKaFmaYL6Z3kpery51sGJgoPxAzKQpS0AJbxM38qNPT4/EdLdMmj2nzALyzKZfFj7dPtxeb2Quh7JgOz75bDjeSk5biDk4JkiEZXoy5MMzrtlKla1tO4hPUCO6m4PAGFQihjgNQ3IvbTG5uxb2Qh0pCC7e5x8LadkAjq0LW7u6u7h3AcjKWYk+MAKv7mbXTA6lYURt2E+hFGwyAaulRTjivwGdhwqEja1oO8MOFIddmZ4feMHMqpaSUqlHV8jABGqwbbrN8ECmaolV7vOU61vdfeSmKGlnkDMNBYgEIXKsc1Z1RFANHdUp8PQord5ssjE3avPLySy8/emC6DAnjkK1oShxhbbC8++67v/2dt+b55tOfefnq6sHt/nh4/ym5f+c73/ng6beuHz/Kw3icbSlWAtlk5sRElA3NoGev1KaJpMoYpCGrldB2TCnt55nYmUCgzAmAugklOKsqMYlwStJ39HAa3bWBgCoCkmFW5s1ms9ttpsORqFKhPHjwiG7eZ1Wj2dS0JIIupsp89+zw5FMvv/7kukxPr588eue3v/nSMObrx6WO6boPh3tDjl/7tV8josdPXqIwmmLzFMbN0ze/+U21ZZ4O5XiwZd5sduDh2e1hv99Lzu6uy/Fuvvvt73z7j/2xP/af/aNfPxwOkKRmDHMWV5dEwcrCzIOklFIUR5nxwJJyJqKU0pCHUZIOyjmVUobszMySASSWLKmUkkuZyxLgrqAqpyhPSlJKiVyxms0gVYXwrgXU47BFVdWZpQmwQN0De01OIcpXy5xWzpt7lZuPcVvtCnc3S5K9yYWds+rVdmZhA70krLf19PtQl+/FWf3in2t/6Z539719sI86zxqiuT6mn7z/u16AuifWlpn7p+0nWZ/hxfbi+/167n9E9qF3ur5Oa8gGIrJgsAq72rkmXJw90sZUeW47ZJQaWsHhTE7sDIMVco2CP6omsnMN3wS9/ulKghGuQsZj3XRuy3F1Tu5ddqf9XK/piZiJl36ARbltrKw1Y7m68VrSxk2cPEYt9cBcS8id+hwAkNJwTwfXFMUNdYG1pZTOhyScN2NNCba9CqrF3WOlJaJxHFMSAKWomY1ywquY2bIs8brLvaAFNequEczMWOCZ3GDuaq4+DBsiKaXM8+zsiwJsEG+bCJvBLRbV6hbVybXK/3uUnQewpfm6RKR2KqAniWSq92vuLrEHCZv5VCaR7Arx5lj3/8op0+6Nobe7f8S+wuq0w1buXFWPaEgbAFE02EZd9RXrtbmxITb1Ct1sebO+Bd/7rT6HTlT58X4r3jNFRNaqNlSE+BsudG0WxA+si/nq+Wvd6RmVrne615VaUlxqDAH3sLditJi35F6djzADA9pKELmSu8IBJlild1rBn1athnpe+NHvt49bO38u67rBj6U3CACYgQRwl1S2tj0YAChHthDkpkQC2xArkyiMkYDEznABjSSbcdyx59u74qzIatMC3xoWOINHZ3a6QHqA4SXIJeSORXSh2SF2JJ+8uNlepAT7qXAyDA4hpMuLR5imMu3dMaTMmwtgBAg0QPpqUGAiMBbdHw6MLDxfbCTvCF6SzaRKh6Mhh/N7WHR69oEf96obM/NENCZDgjNAi+NYtFYUfxTVUwsYfWwbBaiBDK5OC2GGz8AM7IM+FD6bqbuT1lSWu5uqheK8GXMSDhEgAQoUhuKVlb+ZE42HwNqKbQ0+6iuSmO4xrQKXp11+7ROi7Sy1DiugZN4r6rsy8L31XNRr4NLOawjXiQesDMLKYSogo9MRkVGp20ANIjqImFmb4RgHd3RTqCqF3YNcY8YAcs7ugRk/0dMFaCqzROm8t2QCM7MtOQ3CkhOzgMwTp8RiyYeUAGy3F5zk2dPnpZRS/OGTVx8+fPxg0dde/9xxLtu332N5m5jff+/9uWgxUMokyYmdLMhsusNPhEa6iuI2pCGlxLvddrMpZR6GoZTiS6WddCghMAPMzD45M+ecc86MJpsmICMY42T+Vtvr8vKSYLe3t6728OHDTR72fHc8Hh9kkZw8J5OBMyfijWAzSk6aWceBNhnbAYfprsx3g24LtcFU8ZX1Qfzqr/7q1YPrH/jBL6ecAUy6JKFpnqfnz3/hF37h5tnTR9fXvB2/+9ZvH/cH47LZ7J7v9ynncTOoWwL+7t/9u6+/+up2O07TFKVALEySluVQikhQLxIFalSEGOLuI/I4jCIC4e12myQzMyfptSiLaVksipSWZZmWIx18WRZhcoILqZEH7jgYNITFyALpySyUeuahlCLBWe9eTugyqwzyRATqm04U4oLcwg0grI1np1BEPB/MpwQhorqmt5NX2iP9zUelBgq/N4H7NO6zbu3prZ2fF90zfJjZd+/9Fz9av9lJRD/qgI+61IoqVH3x+Ht3ce/r7cI+km7n3vXX8+Ckco6PdjVPfTtkBFuGESiy77H8EsENTHD2yPFEQXf1zOoCGfNdjRyx7rsHgJMJZNajBYEGOXv2wkSRjLKGj2Gic/erz81VRrE2JmbmLCfpVwDuVmNG7UgzOyumquUOFCkRoprMYVnF2rFyVLgRPAJwNrPFSriCfnrEQkRpyCKSpe49pTmK3dMjogjYrfu/A25P0n+NsCSYPK0JKtSxhEIQciNfyD1K2syMIK5mqmYluDfDhfEWklB1r04Ri4iWU00vETXPzoRsHDK1URdvridsS2HGFmPDMKjqNE3unnMGyzIvqj4MG6BW9NenU2OkbfxD1xM8DwPwgl27XgFW0yQ4AWLdkEaTao0tAIAzmdVIFlqMTE90xOuMqPsLwSacz/f13DE7DVD3yqkVWGP7MLOgZubrNFSsp797UNH087u70zoCXZljmukfriDOawjr3Amf0KEtM0kga27hOsX4Ymuz79S+7xx+zNtZOODj3RYEXJkCh4KaAaz8I+pVXCERDAaoIAnxwKQW0Q0DbACPIMEgmYaU3ekIHCm7GYyEkEDFKBuNzhuSK2CLYWAxW5Yyz8tiJMSkQ5pyyjAqR8WQXMSMRbfIA24/KHeCNPJGYFvQiHI55wNBhDaMBAygjJQIWZ/fQSciYQywBAXMsviweQZVBTlSmRaab0nnWsaz2xKJR+2hDE7zVMsZQkGHW4bwhCb4pDRydyyMCVgcB/LZcQQKYXEUh6Hyh5lgqMsmYK6opd0SMa+zpdUNq9WnLpItYNz3yt7WGKjuAa6tvr7d9K2tL7w1Ykg9DVgDiN2uqMbOR7eVfXJmZ6a+ZcJO/O84FZefWQP9x7oNx8yBBXWt3u0aPhQ2+okIvrEsiEjOOafs7sVL9E64jm6LMAi2HI93N8QwpoEJQ2JVXaZJ1bMN8zyXYsx0nIs6FtOUBy72uc99bndx9Yt//5fnpagBJCklTlkdi+pcNEtmIvLUGB2cCUQuMhD5NE3ulkT2+7u4r2JSSlFd3D1J8KRyd99DsbPdnLaaT+4Pu4IQiUyXy8tLIhKqal3bzW5ZloE1jRnD6DlzSonKRR4utvTKVT7c3R7unn3qtd+frq4evPoZDPzmN3798RdfCWr7gC5FSsQJr776quTEzA63MCuFNpvN/+0v/aWvf+Nrjx8/1rvn77/3NjkePr5+7+k+pXR9fb0/TLc3d2QTj/zGG28s03Qodnd3x8yU8rjZypCmiRHls413ATChmlVIlMZxjFvejZvLy8s0jCmlzWaTUiqm+/3+eJzDIdwfj8dpL8yH47FPj4jALaWgKTXH3TmziCxzEeKWSzkNPyKKkWdmvUCRiFIaetLcV9QUc1nW9hwACkbW1fRbfURCbG5lxZkZv1saB2OP6987w3om94nXz9yPefHr6/fvHY+P3kq7B3Xv+BfPHP9+D0fxXnN3NPq0k1H4EUnL/n5NR5wXSQIIB+PFe2E5xavoBW+wm/WnXo2tyN2JPci6wAgEsldqWYWRWwgBgTyYWAlGLDFRGxtoZ910Yg8zVq1SfbiZ9oHUwe09kGQdvPkhmdhwCNG8a7RYA1qA7N7d9Xs3s8gN9o/unQfVlTjzNt3hru1c7eQQAIooVPB5nlOtD5TIhIdrp0HRqRoUnW3tEhHEmsyc3MnMmRMzl3LsKMQuUNR3BzqvJ0QQL8BAHoV3wT1PfioqFhFOyVwL3M3DqwRO2BtaRVJjQ6GGjTEzzjyOSYv0n+5DKIKpa6BOp+IchmEcR3efDgci3l1egVPIdHRJwwjySKcM7SnKup6rk60eQZv7FkB+EhG033V3dldTIpKV40pEi9cq57WhEPFUnELLpxAve+3T9bABAKrAY5xDtU/jduVOExEM5tpHVxxsZnkY65ExkNt5SimBhyMiUJQsOEJ3/mQFtddVob57bhX/v7aX4v0VOpQ8cjIUkBens0zghzFYfNKwav+ytoiAfCLSg8BJlLy2ytWEMIG0biUwr4EeQQEymMRxki1AGuEOZmwkD5jLBJpTFkCC3Y5YzMmcCzhDgAywyJXLvvid2SzMLJS3M/gCR9HZWYh4dB8lAUebnpW7Z8uwwzAIO8ADZHvMx4TBMQhEIOAMIxCn9IRgjEHI69wcNsSM5+9gOYIkpY17ybwQg4qSi7AgD5YFOYNIhixp6KzIHsZtkE1GRh8Gl4/9pDSCGbQ9qoV8gS8EdSruxVHIrbIeAqUYGjE+LBmKaoG5qQLGDaLZjKUGM16ZggCs6fTew231xFvfvNZLMV44lZmlKibhod/WZAm16QyfGRVrA6y5i/Wnu42BFXaDiJKIEi1aFnHfDDzPRYvlnEFwU1ESgRQbJDOneT5qEoUn4U3ODh03QxpEl8nJmcEsZlCPcKoxSYEj5WNRIpJxw+5sxczSMBhKKcVRQG7e1Ip5c3dXCA4/jnn7+OGDzSjuOqub2eyUORmnOeUjyzQV12U67Ic8Ho5Hg9wdp+f7abi8mvfzcpw5JZIUxJzDMACoqlYEN2cHVawZAMiQRJg5qS7jmA/HGy/BA4CcM5HAlIiYrCzzw6ttKUV1KtMx55wT1Yi1jzBl1OdpbmEhpJSs6JgHW4rNQbtCA8uRl1Eks2ZKmTi5ZxPB7jClx48+e8BWxzFdOR4dfX7nc5/lt5//wm57dbV9uRgLXRJt4ewLHl2/9JlPv07HhcZM+7vddoOl/Kd/5a9+/f/911/W6enTp3d3twziIT999qx4GYSm4wFWUhZC2qvp5uFX37k7Lk9TSomHBL87HoZlScxWls24US2X2+2yTOVwePDkkYgfbu8e5ke7fKmJd48ebR5dbx88+MIPfOn1T396e3Gx22yY+ebm+Xfffeett9769m//5vzuO5fPbzRxPg46TwPL4U7fefb+1cOrI/lEtMBIwDkl4ek467yMJK5GUJYo9oSqOiEYTWE2SkoDHc2hllKafRmSiAxCJxScV6lQKDySPMWMYCQQTQAoUbeJIS4i1XaM1H/j4qs2WS8QbdJnIVFdK3kIoJMdtkE2apAANNELYqumXkMInFIcdXIC1ejnTsNQcQcNy+IAIJZq1KY5IxBm5mVZUNXDQsWxpsjItZp7IfIRX21gNndfmtFMzGASLVEhYSAwAW4gwItbkJoQQKaVGcWwUFWQW7t59fx1DfJ16F9bUgTrd4m0AdXuudm8OJFV7TRyYrir13oAAJXbE82FKF5qbg5MzKW7uHBwiOywM4Pq/M2cokaEwFwzOx7449oN7tkDfg64BpJ0nSL2UF2rxWghGG6ovpOb18AEeSTQGB5MZxCRlNggMVTcDURlWVJKRCmkBd29bSUBdnCmBIS9RcwylzlGaYo8PthhxVWGgXKCCJhrZbUpTE2LuwdVGsGEKBFY4MJZEjOZLzAK/Vy3EkyZ3SVrekoUYg8559vbW2Z2WMqScy426nJU+N1UJtXtmI42T3rcJFl0nhfNaTPNAt+wc6ZM9G7ObIqyzACpUyllmcuihTmYj2kpJfa/PGYrZZp0HBMlurm5YWYtVgX7zEyZPLFl10LGm3R5XA6UriTtbve6uRiHUOmY1FARMSyNp0XN3E20gkdj9wy6MHMJ2vFzpAwI6DV7kQ+s6AYi5oFTgFLcm5II2BBwpBPQIFK1Q9SKA0SU0irZ2HZ6a9qb8RPL4ZBSovZwuzHBjTMWRMTEkeKzCqkQPjnwNfaxzPETFSnXnHKuQq160rNiZmijHq2Jzs7e12knrFajSEwfxhIQ6OrXOhMJgUFQEIHh6gCRAAFhXRuYhFp8yH7mZvSE5D+Ok/hCaeI/WfuXLQPZk8jVVm1ePnlLIHuvwV5BoWMQAQgYoXtIejIRwbiTVxsKzuDEkZML9NPvQlc7Hq//5NNFMLAxH+LyQOTsRi75GYEJmT07RAmaYGnJePb+s28+fjQB/uD61duZDsOrt4u8PN0Ad2R35GXgwWhIvADiuJoBw7VgN263R/0NzE9HEfBPLst7BV/LFyXJZ1W3ZJdmRr7M43bebIyWQfOYJoz/P9Db1/YV0Ai6AB4BLwMPwVsgby4+c3ec5vIB0fNd1DFrxpJhX5j9rugiRqqHucykumECsSXez8fd9SO4YVloOow2L+OCQaCZZcgAoERHJMBGICmhAEARgDzB+J9ciuKfdHou8XQcABKDVmZERKYUpKjk5AYsIOUqL1FgR+jkWkhAM8pi7MyykQDalILlFlT10FyV3ROzsB+WAwCI5CSAlDLHnl05HQOIZF5JeMLuqcHBiAp6KbYsSrwwp8YNabGQMjPcamL6PItIkGrYoPT9d13530N4ESPm0yQS98gWgYkiEO/moJC+KMSeMlJU+jXyMGJmReX8qIX8ZuqRMMT6ytDARe0d6nHxfgwzCYg5R0llRKLDfgrIV9jTQRkX7/uxXFxcwG2a7j549nTcJOatlhmwNGTOiYSXZbm7O6jq9dWlk8ylkCTitByW5zeH589vI/bcHfTqHEfMFf3BhFffI5GnJG8lazfvKozxTK3HAxabaalhb2IzQ2EWSimxVrkFrzLTXf7Sa1GlCMUyWtTd4aOXrAgzTJzcjQKHOM/zs2fP3n3vu08uMfoDygQqD7OVw9Onz54/ePipMh/3d7rMdPP0+b/7b//v/if/+r/+ld//EwCTAM/e+fm/9bf+H3/xL26nY7C5MLMqh93X4gr1Sbm5u7p6iepnEldTVyJ2rvz7IhKYTxGhqq8teTNutjtjypvxpdde+bGf+sqXf/xHX/nUpxD0DLHvi/wwY3r+/I2vffVr3/j61//+f3Z7e5uTcNkebp5vt9uXXnrp9nBLAYHzk+kfKWviXoBzv3ivjxnUyl0ACJBb98f6azWjqubFDiCUQOGdMAOAqjo0GP8j99iRk3yeIwqLvVog5JXHz10J644lomVeAGhLHngrieS4zmaNNfUXBOUqEaFJKfYxDKJEtaKsX3MvYauWGxGpNVR03aClD2kiorQ63sjcUHMacDRhnjB0qRYBeUh5uJvDGezdeK0iR0QwJ9yDT55lAsMR5IA8/mO0e5nSdZei5X/QuH9oJXx/D71Se7ulE/pDXLN7cauoxmo9vfeL60zvegHsi8np04YZ6LdwWlfad6gjDValAraqfe3yhpvN5uQDtHwOs/SFN75Yi6uB4iXy80HspC3ikHMNTCzLUlrqDEBOEt0oiYRylpSlXnzIjaqfkCruntrGQy0acnKEW2Yslsp+2X6aKSFYcOpkMyMyVYCqu1tKEZEIVMQQrhmzFR1FbIF9kJiX6hu3rTFCKabWKy64wcvXOo0dvwFiRPE9hdxfi5l0Y/cjcvX9rlefnobOahRxL7rzhomInwiEyfq00YduQWlzSvfdW1j6kmitLsXM+PywtQ9575qrAcGnCRX/Ugt1hZzG+lu9G+tjBQym5iEKUjnoAIB8NQvbeQwQrwwdRJVbB0S1ED4eQ3U0wCe6r5CnX1HIOBi9vJlslXrqCcnv3fg/t0/4/fZCo49a1T+qqOx+gvfeKP3kNF5FQHpznY7zfMT+gDQj5R3nIDJ2FnIBJzAxDU7i7qTFJQxRJggwJNmSHJEYeAo/AiMB7sX86JgdC48ybDEuBpvNDUYAw7CUI5MSJ+YJdARHSaTyQBvjQ+zXrsQZzCGgE9FS5sqNFTcQFXL3dnCs/gBOSfx1o9+F3D2jYVfvXwfQ/EAHzOFki7uDCkHhjXiPLKpQmBlqsAUaKr9GHeMnkppWRBCvdBhUX2aZOZjMTmvseSkgM7uf1RPmdGZIxDkbvvRUjd8PQFuK+8HVjfmwPQJn0NBTGNcbgO7edz2E6d2qEQM62x7Oln6zVewnDjPTAOQoEYmEYOPZNUWmpVvtARPqic4oDGOu4kxhSRwOc9i+drC7u7vnz4fMJIlSEnefpsWcSHIxVXXOiVMqimRMkgqV/XQsqpvd5bHcOKGYiZtAIkhlZglrIG/l0WEmM4v6s7DF6z9EcGdOEQ82hAtHcS8ppSSVJB2AcErCsft5jyKH6UekVqxS9iUK38OcmZNdQ1md4CzOxLY4HSe+GOkwTdN0KMe7crgZ338LsmCTRr4ex/x/+nf/nTe/9dbnP/vDt7flO99556XHr9DtO3/hz//bf+P1137ix3/s6uri1375V772ta/tYLd3h2mazUxEnGDLEmrxS7CGihBxYYPCXclA0q+/jpssmXLNAEzTtN1uCZjnebPZPLy6unz46DBPn/nSl376j/6R3/NTPymPHjWuG4YDy9GL0jiOD69//A/+/t/z4z/2xqdf+//8zf/0N77+zXI46DLe3TwvZheXl4d5EjdxoSZFTOFw1AyKc1QDmkdxoFX9PaYq+IHCDjKpNVrhNMU0gDe+WWdpHkKUiJvkENQAyFgBCkVJFpEwst1dwNz8xgiUVO/LEbzq3dyEWUFzzOo0WU0H5nsCYqflq2PtTt89y4xBLWrcTqZbTeu1tcZdgzuJiDw0PIiYKYL94WIEQvo0wcFcy8NjMljE6s/WVXa4EeDs5uBAi1GgQ7xGEyLjduaBv3iDVDkSTxfwPdqHmt1YLY5hvFrrt+osOfx8D1svaNZw9u1+T1Z1PSGRr0JaL95LHBZhnZYx5nsXGf7LeiGOh4LmhNQRSCwisXf2p6ntOr3lZ3LUttXbPfVMl48HllBGrcELqaB9XqH6ezxusfatc4ozqVlO5ppJpnmeE0sMpYYjJaqP270hG8Pp6NtY7QQGtOYP3cS9+rdnFlNvZmRk7A6nc4gLAKcqQ2qLUStt7UPHzCTIcniuKa9ELFWaKR4s6jivzz08xrPHSgRyK5H8r25Kd+f68Lg3OD8kYBHPekU8s36fVicUIKYhnKR/sR0uTZDpXuyk3ks7Sx/2sVZ37i30YU8natPWX+4tYlWvv/LXnoyJ845pM47Q17TeQ6jfUqJEiFA0U4tQxdIXg5Y9iGRPwIZmMkZqsfKU9td0eo0onfXQD6LWAXUZYYBAhhUe9R8r2nQ2Er/vHP5jtY/I1v2z6b0P3TI+Ia2xpzqY6pAlWEThD/v9Nh+xG3nYJmIhDv1VpgHkRAM5wwvswDIJNg4Ao+CS8gOQIwN428mJBmYmFKej+5G4QGjYaCnzPN8tnjaayBJ8KNOeeGAWSYPwDr4BBwnOIglpgWoxMUkCCMwROKnuFBKhKei0Jmj8bZUjiu89LK6m/+/OM2Ss5OXP2LVqetAA85CXgFWAg2kLYTTxOW3AByJVJSt1wauJjZNUbEUS5bwuSTjFZ51jj40tnlY19lh5j9FW1QxnsA4AKaVzBoMT9SZWwb5m2JwVKKLtUJGsonuBvFVhYJhu659Osd0TkRkcDUO2Om8kQPplmZ449ztXW7ULqMEknToNPFaOZSh7mJl5IRp7X7RjDKBxHJVM4ON2U+bp6bNn7np9fbnlLRHUjQxEXkpRxzAMxnl2Xw7HYnZ7c3y+P0yqJELMRrAKoFEK9hFTklTjlBGkp5gJpMXcyMyYmMIKhlAH6SHo1NQNQtBTgDkMInJUgjsrs1rxCDhYEG1HQUjIJcCF4VAtdZ7Sxo2tFFNiT85GpHuUy824TNPxOE+H4+3T9xR34/UwfvoJvv3rGHZX+vy7/+gX3/7q1x4+fH2Z8dVvvCFDfufZB++8+Wu/9Lf+BsheeeWVLMPTDz6Y5zTP81xKJYD1kNNsbCjsILCzMFlo7Bh1Re1EnFLKKXNTS4sMQ2JmBYDNZncwe/j6az/103/gx/7A78f1Jcj3h+Nms1Gd8zCAN2S2TEdnSkPOm82P/eE/cvXg0d/4q3/ljV/65c3lxTQdlttnF1cPF1OFmxawEBhqHmjMmN9daJCcGOyIghy4qZXqkeBkNXdhsDYBoKrmzmAi4qqx5mZG7MGxEY+vGtMiOedq0QY0lDgIkDh0wszhBgqiWW5oGphZl4GuvoHE1AAAJwiRwgGY6nqWWbNZ+7/MTUu6GuAVhw07fQsA2qJzWgzQ6YI7Q2PtEwAa9MfNCux6BuEInWDmfW0SABCIkjORn2T32EDiUZFH4eg4kDopy4piK/qt24G+ynvwR5gCfB5aO7ukOKDnSM895N7q4mPkNcvjRHB3EQYIRlZt1FPmjahzy7Ug6KqfrddxMYX2nKrLC26nnwAIH1ZyGQMIwbkcfKHoKd9+b9XaJmh4aJzgTkyqaqbupSzayu1ah+WUUspZIgmmK0ghgHUNbXWKqksQTCeQIFFQXVQBK/NirRQcq2r1MwcD1b3vviUxWKof4jWLG0PjVEUJRB1HzKgqfi7Eaii21HQgamRW27d4XdDedkSzQFPUWR7I7To7gpIqtbusU8a7A78eUt1Ld3ec3SBITnME6zCBr76OGnZCy1TTSkmC2vNdT9soknP3jhEIDiynFplt87GPin62PkX7Q6mjd93DL04rr55YsyFOt9nZld2DULo5sd5xglAEm4IH1Lx3SABfEZ1UMYQwuJC0uzagojKcSCNiBa5kfDC33ldmVew+vtjKEQFUxUJE+RNRV7GP3uveJvwjXMKPplj4lw0C+k/T+AUXuinheEC92jrZ84HefPWz7qXGv+w1xxsD+EPW7U9OS2iBmvAJDQDMYSKyzDr6wjkjMTtGYagAyR1kITYI+AI/wPdCA8COTNgxP0BysDp+BXyVeCAeAWc/aDpGUBc0yzCLH8qUlvlq2AzApkx7x0RchkE4DyQJcDBgz9mPRAtshjIEcPJiFePJTFUtNpZaRujLhcg2JB4lQTgYZRrL6Gm6/TN7aP9089FRBU56M4cSKVAcxStXENjVXQ1GXmBKrm5KrnCFKXlU8UTZtsEAEZRSzgrsSUS6gDJW7pK3wvW189YCx6dESz+Amd2LqnXe774NVTOj4TL6d/sG1PfcmEp9z8XKDsFqYe/X+eI1r22nVClh3Esp6/7s9nFKIq0GoZ+6/0yvKKgbTPMJAal0S8MgIoAReeWso1ov4Y34gRsrZyllt7tgJiZPhd11mo9Pn90cpuN2u72+viaRuSx3NzfPbm7B2GwvZNi6+/E439zs7/ZHdZCkYjAK8V4vbmxGwojf4+7othBB+KRVJpLdFiIPoJTaUoKsxJnZtRg7jNkMQmKKghId4wqLIrSlGgS2TrLlHBH60AEjRyklEQMotoBpMWVmcjJ2J09ON2kSn9797tN/9EtvPH7n8pVPP3jy2qORHPuE4/HHv/T59771nZde+sznPv9DX//Gt//qX/8bj588ebwbr66uSplvbm4Oz57u3Y/749Eu53lWVclB08eqczF1qhw/DkS5mjCZu9XBERJmVN0PNXXfDCMLYG4weL2jT33pC7/v9//Br/zhn8blpZZFxs14ceFgIvawZwR5u3Og6HzQckH0+R/9kT+2LLc3z3/rG9+4eviAyJ/ePA9aL3Z2com0LBunvASbU/NsEvFCZ3a2r6B0QHdmm4q9UtFipswMs3iQ8dGiJW5hnTGglu+LHMv6/UiYgMxB5TwNJUFgc76TccgKcK0VdK/equJsBtZ51D20YsQclW7esxPu4zAE3Ui35OJOS+g4e3j4HKMLQM45S6V3WoP0ClPou0Qop8e3srRMYXNm6krBmQCHJRKFm5s7ed0oiEh64KmZ2qUvNGHO1owBznrsRU/vQ9vasK591UI5/en3FY39ZCK/6KTdO3N/uLYilhQRO1/fXvzu2qzvP92v0N2DlcTP29pwD+FQM+NTnO2MBUQ61pQAwAzuJwLPGk7UtaqsUBN6ZSERdtSytOq8uG23G1XNpRTv2Nd28Y2NNUJ8sBLjp585LieGDYOY0WsmUSGsbGYsndKTewItsuj9dav9WuWpKImzM2soUsSYtDj5SWUppVQZz3vsp25pTm1BKKW0gV0joxW/arYsk2oBUq+C6y1CMyRM51yILz730zgk0qbzuX5wACTl05999YCEmEff4KlO7VZJ14MaIX+24t29dz0scrYIoEVtoxWNdSaMlQoMWTuiLUm4vmxCdURjrMSl1JtVj5CcBO1SWzLQl0qQrnKM4XcxhXQEtZBB9RNW4E4jZyf1ziADAZxDjAVBvRvzSOs3apcCzu6lkegohVYIqFcRA3jRrGwC92v9w++337l9pHneoNRE5HVJ+V5LOnGEJFqCN5R74KB7HuMn7Ok4MqqQaoVXMZxh5Ehp8AVlKlKeC2+Qhk3ObmIqbs6FweyMEJeH34C2gq27gLbEVwYlnxd6Zr5N2AKXgJtPTjNogPNSCovKiGWCWgYuQbMvz5diThOU2HPOAhRAfH6mdgufW9yIQ0mDTB1WWRHImJnEGOj4GZCAWjY+pBZXmhM9XvfPv+ejGdjhDGrkP+4OoghOrb1BJVfAag2/FscCL+TqNsMWIVhR14UcHOczVTMBW8t79cFpZliWU/BxZQCYoVf7m1WWOHTnsO3jfoLY8Mleavvm2eJ8bmrSOTilYZHODu6HMUskpVZbhq8PQ+uyfnepb+f9OtZGcN29/CxH6ev9YA3BaqFZcg4km7uzECp9u9fgOBiACJvV0kQi6Ui8ZZl3ux0LpjKT8Ga7Lbo8u7m73e9vDnuRbPDjvBR14XRzc5Oe7lJK81xuD8dlURmGwdmXomYKjxLTis5e9eP/n71//bluy/LDoN8YY8619t7P895PnVNd9+qqvsfta5wmacU4tiySD4DvMSJgQchXEH8ASHxCEAuCRJABiRATQSRCwInAsR2DbMlgY7DbTndXV1VX1/VUndt73stz2WvNOcbgw5hz7rWf9z1V3Z3ypUkvHb1nP/uy1lxzzTmuv/EbTQ0FcYBVd59y1Iy5uTkqO9zVvKq2OU0Qr2okRA5zSgR3V2pVduh53/OeBMPcHEkDVYU5O1SEiI71QxJ2I0s5mZuTEyaj5y+Pu2T0wdUv/4MvP/ne4ebDt9755reX5frR/Sem/O23P7h58fJ3/85P//gXv1iK/9gXvnBcy8feevPD5x++eP/l4f4Dd7x48UJ7oBlsZiBxERHkGkBquDvUihvcjSgo/IWH1jRXVaJVjURkmlNKqSxr0PRDoeo/98//87/jd/8u3LsHgWtaXZmywYi5mHZmBWJilmmWab29mnbpi//Uz/xzT9//D9//4P3vXd17+ODZy+fNyG+oydYBhtmpqQ0LgpEWxMIpnwaAzKNuFwCE4BSOUGI26dPOGuvNrKZoKesjotEsjwA/x59MzhKL2UFjPByNvJqzF8KyG2QR6hyKsfsYDICrVfdhIA4vIn44MoEAgiuCQdyl7kk6dLjeODkRcYpGfCEaFM4RSc+CzJSEU+xQ97ivG1W4ApaEiJgoEeDuu93OuxFpZmhocF8oKgNJ4ewNdWEAIoTRpEmrum6ixqGbdt7t7lqRcjfoAfRJe+1xx84+ycRXpGR8WUBbLp/T9zfvnCYNxMFp3HOVzJxZmLlsSua2I7lznnG2JGmIwWYt44yu+c5BREYBNu62N5OASilGCJBJMEwGYgNg1VpKaYxHm/OwkHAKzy0eVq3Vq+WcT5W0RPH+/uIQ4xTV6mYbtArUiJ3EGORQJiQRiZreCP175NaIKCCBZ+13RYS4NWwYmBEzi1gTEwQEGPlY0MyI2vKAnALwxFC0oQKBUSaAY6OISNEa/rOZ6QbistlGcTvVrJqxqqB3RFArI186vhZ62k/pMo8Grnf05Z3FcGd1bT999Zvb5CGjNYtvUqsNGac69e7/qyq0N2Pgu2ceO9S7b8nMPvLbsaR9sB268yvj78UfW0XPRBQtoLQp7hPy0+BMvWiihZDGXrKopo4y4wCUMTPEYITg9/Uo2wvRELicdruRsycGNz84vtMAFYGkaJlJ69PZw96OAIw70Cgfoy/La5va+6B0t36V/sndZ/bbxw8+YsJPfxJ1SB42mdvu/rV3fgC7yGul5W+Bo8cyACN3bs0ovH/EtdjLly+f7O7j3iVSIjCWZE7waMFbXW9IXwA7TJfwDEvgDLpQX9Ss6JE1Qx6AL9yuq966F8OO6aA1yyxz3pdbcRxAF2Dd52vTWuqtSyI/gCa4wXOtLxzX5IWFhBmBC8mTLVfODvbGuM7OScQl3JMeg+PhE9ImzgXgB7iC//B3V0hYi8gjGYEGQJRQHRWoDHUvDgWc2VpYqzkHUYNAVqtrsapoLgzCORgYsU4P5qpaa6HuEEV2YeTrOik1ubtqJSJAQhUSO3v4e0qEAHBF0HU4DuhKTVXhp8TgsFWGVhqaK27mTJifUGaienI4xxe2qm2rtZg5db2SmBVO4Nb0sKcv3d2bX9BDQc2oarT/iZkJbq6NdZAIgTEgcwQxZ+RVhhobGUUToWC/cSjIiH2phctx8uTulCSxoDYGyNvnL2rRYsqccs6U8vsfPnvn+fXFxUXOObAokwFAUdfTnbKZGTxozkavrfAHgqODNn4zgESJBUQCz1JRa2UIM6c0RRC8emTVrduRkRJUMxPioo3DbXC3DM1N7lYVTJmll/W/FJk8COdIwgZyZCd3zwTUBeXKPnz7+YffLR8+e8+mbzx/eUuYbhb+D/6D/9ubf+eXKe1/8Utf+dTnPv/h1e03v/e+uqWL3YsXL65XJxKtKknIudbVjVKinLNXY3d1N1O4myl5Y04MkzQyO23FgxDoLuJg0xTJzDzP8/379+eLA+33utzyfi8pLaXkDAUlkLBwd2uq1uoVRLv9YV2O0zT9zt/7+77ypS+9+8531lr2F4dSFnFShKqJthPGITh7poQ4NtVYvhR5exCrepQtxY+jH2GsxjwNvpC1arRBq+YNfkkcucmzOP3Yb8PUjiVKTF7DIRpODQlIcIJSIyxZalCYTptkbdhMAz82thLZ4IRo3USwxXSZA1jrEpq1+SHMUR/Y/Whyd2GC1sjLkikIAkkt7u4AyDxDGZq4d9GU1mhunufIFI37jYu/VDOzWmsxVVWjkJdwh7PDLfRaUyfGLuoIMivqO9/diWzDUbERXh8l6LfoiCF8x3HnT9nw2Yxzxh4HC20qoIg4qKC6yeJBfNWWfDB7DXHZkPCn+Bv31/FfwIu3mWRmDr80SGvQRfYYktlJLgdC190rPPrTxZsx/zUmHFZrHW3iRxBx/EsMh42ULzFG80BsNEf0bmmumnp4BUTkVauuZCSUiMFMKQjGO6mSuVPAO5lTSlZLKMWmOCTQ8uPuTLUQpUjFAxAWNeLAjwHUSjxJJAeRkqqaM7sTN7ezp6BOEy8S9NwS+3FQdaORSNWxW8ech1oJx497JyTiUzHniN22BdYWlbqdtxjhE9TzpCN6AfyrS3GYyrZFcLpzSoJeTCKCACx493I6z1C8Vm81J1tvrvmT1no5jpxeG1NQpo1dw6c+qjjfaBRB6fNCICLqRcA9F+otZBgCqiFrNvYEtzDT2BxtIgnw3pj+5J4RtcaORD3px82cMyeWDlwlghACrdUntF0kAluGxmtqADefkYgaeQ11x2Nrp8ba6w5rgy/99vHrP15j9Ls7Ni0oOxR0QEb7JN9FjZ4AwNv3v48i+Cf8CK4kIPxfC7cEQMoTyyxphzrfXn9Q15II8IoM9uSFycndHMXsRuqHcIFcgibSPSiB2CHVqRbJfkH0CAoQm63gAs/ge7UeZD6kHXJmrzN8Bqe8m/J6rLqYZfg1bAYMTuzHakegCsPdqSickGcuV6A2FuOGVCQwTupXQNLowJ2dI2FITtDwqf4JeHSnGJIrkQGVgke0tRYsHp1CXDusQIW6iyBwoLYSdGZH8EsD0XX15GJhE48b8j/stFYdxiycO0wSKWVrRfuhaCAiZpWoEdKAInHX/Le7bmHX8sOEGDmt0JubX3kneDsLVo6Cw+1v4xhmVQfjdMPSLOAhxMxbntNBY2pmjZHDTmnTUAqBHWI+FVREgNA9ggkR0NNRghgWWFg1ta7hTDq0VvWGHeWcctTGzDmxy/F4o6opT6UqGJS43tZyXO49nOc8H2/Xl1e3xPJwPuwPMzPDudYqOZ3408IJtKbbamkJGQBM3slQ25GYiTgLp8TB/3N7rGWtFIwgGlgpEaq11sw5uN2Zo/QhAZ5zPh6Px3UBMKXkPTdIRFNKAOpaAMypxfVTLpIJnpxMpEAZEIMeDgcvt/M8P354+eh+3onfO9z7kQePv3Hz4YOH+Rf+/peNdtN0WFZbbm7miwcfPL++qa48HS4vnt+sT6/Xw/7BixcvUll3vHNCVbdanHOsjJRScGWG54dw/AD1ZhmAIKDMklISZiIqpWippZTIdO93hzeefOz5y5fICdDb43E+XMYyYAcTXF1bHhxJUkIyoEKNCI784ME/9Tt+x1e/9Ivf+eY33DVlNrBC2F3VjToSbGMSRaZaAN+U/CGicxH+D9vTHB5GkzFLSolIiWaFE9WWJ3EndmKPokTiKI0xtIbm1ALtW5BAi5e3AHzrA4m4PtydrMUXRipg/DsCPDHPSiekIvf+5zBXYJ87iQiacTnC+dT5i8O6jRtZvI5P3SoxE8L5pAwkeCbncGDMnezenFTJow0Mu3BL5aW6xCCjcpylbQuhqda6rliKVbRkABGBhSDuVN20tkIuANWgYT26M1Mk5x3mDobYppjq+x+vscI7rCLGOZ4Ib/ALpx/2LMKW3TEenzeW2caAkogjN9IYKafTSXAuYYcMHaPiDXAfw3rmAXEkO6vebg4hUbPlt/K99y9pXmjRuq5rrZWsc8OIDEKUoYq0EXs1h5CIZknMSIkD4ekEkEmiaU6qCrKqplY2PlUsdQMSEdiJBSnxw4f3SynH47quazxAsAGt3YtaBYGFB9w6pSRCIhScYegkYQFObzO4cYYj/OxGQa/lKXAW1M5fHb2M8O4G7A+iOZwig7eNT2w6yszaZb6I5Bwg8IY9H3odAFFiZs7pFAqy03rbqLa7Sek7/8bRulaEeXziHKZx+77NirtTElJtIlcaNpc0uuOO0iwg/Cf3EY0SkIyfAI0lNXKOTde6twTbXbpRosjpnIZtnfYALDTAPv3bAKAG4TAPt2n9vop4e6r+KxoOACEoqygicUEI1T46CXnv00YD2EkbzxDNV+dgXyIyRIQaguYNjlHbZkj9BbVEYkeC/Hb14K/nuDtL3feLwwGLOmB/DcHM9qvWCOLidfiErzv/b60jAsPkLeiAZuVSni8k75IeKF2XaKkGglXMREZsQk7mcDW1G6rGLpgewfdwAu0i/G3mVA+EC+ILOEAVvIAKkQB71wP5JSAkWtbgSa6g5GalLJBUyw3T5FQdTFwc1ax6JAOrCQMpIwlBnbgl4ZlsbOoNsTN6Aw5q+3o7A4NM5x/PwY3NI/zA5u8BFYguGOEcqrtGA1yHupq7cuRyvbpVdogwKEE1OBaIwFkiejaUhfUyjaanOhxGtbb302RG5s3OZKZRyCCpuyQNSWRQwIm5BXDvOH4n+zYMsw0GZET6hurZOqg93OlAZA3aqba/HbbHsK+aBxSGfs45pSlaFKPXQbo7weZ5zjnP0xTWT3DQqxYEeiegOO7TlMhhhrh59ahpoe5tISVhnmuttUZL4tYaPqaVKKZVkXiaJmKvbok4T5OoppQ4T8uysOQ87d774MPj7To/2N+/f58vcTgc8jybopZKRNobN0tO/WmpiKhTLXUCU0riMDMCKHFiwaYZdA8wdjPOfMoZzWgTJgo6h9xrzBjEowADfjweAeym2eCqqr2UxTsaSkQCnMbBXiqp4bWjaHdKiTOzrbq+8eAe0crJP/eZT13OJHZ86803DjcvfvlXvvr40ZMPnx0vDvfu33vwzbffiyUn0zxNuw8+fL6u1cEvr68huRxvfDlGJm0p5Wa5ISJORNTkccB/4K0xGtNZdRARmWotZZommAc+Lec8pywiqsopwV3SvCMHfM5zW38ASQd29ywLAwqRaQ6D47Of/1ya8sXlYV1uqkb/K1OtRJKnREc/lqOkFG3BBz+emVWzyE6rqpbCvXpQRHbz5O7rWmutWZKralUiOi43dakQZoGVWk37I1azaJHUzKCOvivMTA5X45zGnCQhB1BOuzcTgWxK+bguLSRjblZBnFMO2cnMolECGHkVHvtT1bbOZykFjUziTL8yc+7W9hAfqhoQZjV1q2Qe7CBENuXEBHYnhHOtDVaqVcwu7l0w84sXL9bjzX6/F2IBsbBIWtf1+vqKmdv77Ilp3qVdxu2trmuBI7rYMINERJIZ1qKlqJlCWJy2bKjDqHZXBIJigMjdk8hWwI3gjPRaqS320jvC8+Sw2QmISw2bQAhruD/NeF+IA8IwBGv/+Sl/yMwjDTtmfvif7t4YIGtNIvM0LcsypWkImfADo8PP8Xgc9xLN6G3TxYQ6fZFsSgi6yG0aKM7mtTWaG9I/xtOglSN7wm1FMfNwDq1FfGBmt7e3Dx48uPngRojvX9774MOn4RNO00RKpipC9+/fS4xpSo8e3n/8+PHt1fUvfenL61oPh8toMLjbHdRWEAKVuhknq2rUKgOIsaXERAhFmCgDneSGEzMfj8ebm5tIBorI8XgkmeikU4KBjSTPRHK8XXa7nUjaLpWYxuPxmLNNU4qNsywLgN1ud3OtLBy0O8fjkWg2M7dUa72+vn78+PF+v+/nMZbmDbbTRmLOmYhsA4oZjwk9xLB1BeNPEW+NK1S1qrtzEhZBI4lhEMEsCkMBQq9FhDuqgoiI0ySuinC2R+6XCESSEkUu0R1mtpa2zumEGoX2QmAzjzXv1KJCYx/lHP0r0Zd3XCUljrF5B2C35wEkT5QTgABWcRIQk3egoHNPWgKQkJ9M0lLC3RWI6HAXnhwJwIiFm5s7HM4NUzpopbph2hpgazdVGaQOOrVGBLpLyRvsYsTguX9q8KiRCcc1Iuv47eP7HA1gsgnQMHPkhyJ8GtlCgnuD7TFgBAGow5eHlPZekBZnBj4iAPdP5uE9sMvETq5+9im1GxY1vvfgrdv1Q1W+vP9IRFAWCCET80Tw9XZRW4WdxcxuasVEF5j3IIEWJGMqTNV9P8/3Mc1wqF5zWlhUraRil/feoPQedJnmgxa6On64n57Lxf3LomvRpSzXNy/NU5qckNNEMs3ABXRRdRIGBE5q6uwgpN1Ob5el1klmDYoJJgNN04y1Al5rXdc6XSQIw1E0+NFaKQjsRE/+D/UR3gkmB+4P5EBYdAFdXgFzP2pdQFUCmx48/6RQVVu9qsIJxnCG11oUEFD0wwlwRK01ndDmp3g0UbMruJO3DYV+x2eLgC2aYdPnJxguGhaPQ9CNCHUDsxCZnjhFB0/4ua1yipgPa2dLQ6qq8zxtzYZxRLwYQJ4k51z7kTbmyBlaLEKGKSXmdhlmlnSKBTAPUF6TyN2NNmYOTxDw4PNgboFbs9rsHnZvaFWEsI4JXddFEiVOzAxmr1SqqdWc826f5nkveb6+XZjTNO0AkPNutxORFZ13wbRWjSdnHfuqTryZF+0rK3JBcGdKLe/iAS8ZjQ3ITD0yQgwzY1gKa16rQwzEwsPzHawPtS+FSJ5MPQNA3dUMoBXxRCQMZpqYJLHkLFNiQK+XF4/v52rX3377K1/41Fv7i+kbX/+lq/nyYp72OV/Rcu+wO8zpePvy+dOnNKWUmKeky6JFzVCiR8a6qlZK4sPpZWYmVS0twhG+Gnl7LnMPvQcusE3CcnsEME2Tu+tabmmZ5+V4XOu6Yl0x7VhY3dWUOUlvtNnL6ZpGoPZGUl/89ua9D57G0rp3/+LFi2KGxPCWcyBPLIldgpbThdlBJEKRw4M6NHBXDfJAHmZY5CQZY3OaOwkxC0hEiE0aoDSYFceeinCMdBg4c8vFSW8sQUTmDnMmF4Y7ZEsM5V1lNvsxcmittZe7n4rczLVxXjUCiaFrmU8eQktm9tCDtM6/Z/s0okDMLSmL1pCItBYipoxMLERqHmZoTgYBWyGniZ0F4nWSzBFuV0VdSFeGiGeRlKgzGpvOhJybxGBG9O1hQiUII4urI6xYIlJQRJWIiCRYaaMy0RSnmtrMJ4fQ3RsCpTl+TfoPBeN+JtfY75py59FrhHjlTr2BTagrpTR+vj3n9tdbM4XPGfzHgyhatv5qYCy3KsQb8q7tI+Fmh5uiuik5Nr4iMWcSZhGmZrbKKSl6eujutVbqDSE3EAfyziUTiyc23PASY0Lmed5NczRrLaXM87yf711e7Oc5TYnf/NiTz3zm0w/v3X/69MPvfe97T5+9AMxc4/R2Xm3ebXEBMM9zrXVZbtEbzJZSiJJ3RDEnYTZ3L0UPu9xCqFG2rAq4kYXmi6R7zHeT6qp3q78ayoPMrFbrWjm5UykKRB9TF5Gc5tjHOWegjshuo1pl7s/n7GhuYQWo627qgNFGoeTNXQwPkAhEbopelYCOKbCRxkeLwPVun1Ej3XEEG7oXZgbZSADy4ECK1O6Ge6AJmx7AHtbJWKjjX/QmE+7ux2N8bWBHm2Zc13HF1ma3k9Ezc5yhfV8tqgr7hXwYhU61/yrIRDtZLlrNKMO15w87ZWhHkEa7sOh73goOLU4anxJFdeh4f6SYWoMKAnX4KAN6njMM+9WIxg8jF/oPy4h9ZVn9ljwIJ3hwP4x6HcI2dklns43zJCE658f2C7+Vju1uiq3UwdBRehRAWQKSpEvs7uf5kd0+Mzw3J7GCSjABE80smuq6GIxdhUn1Busz4B0kRboHQFBFyrx7YGVle4rdG2leFrsuei20gp05gQ+gPbFQMk9Hk2u5vYfdvUdP+OX1y+NS1/WY58u8n2ErwGBxFzJvVDEEmWd4NVgERnXUlkcbGWYkQRb4KVYbflMIgh86YPQHno42rZ4BOAzmDiUyRwUpeXFU+GJeYAVkaARGClWwE5Tdo4ecmzpgbqGJeqBa/cTscNdCwHlkFtvyDaJSStXW5o2Zt20UWlCrJ+vMzOFm2jgZeoLBNix3d/J43R09MZK0SXAvpQybhMbFmgreqICNDzlcyvEmgNGlmrbdMKgTLaSUmCm4AvJ0ClR3QwRbbRFk9s2dYEIwH6YoPgnrRJijrCwsBiOCSBSrFMCDrWRE8ZkZws7kJOqUUs7zfnbKeY4UfEppR2nOU7Q1FyJ1T2aJ6+2tuKmqiWSS7M00FR5G+WY6mFPL2LEwaqNipyRCupYgiKOUiTw6heUs0bUZQOKgJ7HgzchZag2zyTr9nbtblGsywBwZxbb4ap2JxIkF4kBy5wxJYDKjBeIFuF5ulrormp+9+O4z3Kuro9yQrV6PL5+9d/PsfZTbqtA5WU1eKhtcjarBySUZgawCQcUeWcm2ymsNqFWCwyxStWKNSUg0aa3V1dxtmqbGKqdWDCKVmadp+vznP49pRl0gOyEOxNSZN4iuFBwgFGACmLNBLi8vP/vZz37wznfNKbF40hpdT6KWUlREanAej4KZPpV6MkTC0miXMlUmSsGOKNDagF8iwsoc3BhAYlZSop59JI/FjLBsiHut6amQz9WsKm3K6YkIHca03XXUbUjZeHehCRnk1PraE21DrifzrklelsHa2G9d4R6WvXUS5ODnpaABJGMnwARczcAeze6SkNSkXtx94sTMEa7IJAYtywrzeZ6HQIn8UgymdyOoqGViSjl5byfgcEKFGSEq87yiFkzCAuHkvprC3BnkAcenaGAgYEtdrVYd87MVc1vTf3vEd/h1imjL1DKEWOKtNOxWOIF75ieiCxsdc2LP2j7o4ZUN0RkDtnrW69zMCGdfO42ZWUQULX9qMKoAGh1LS7XlDEQiic0seFq1c8BsFwmllmxxQlQ+wh0ODpoYLe7srg0JbRVIzBChnNNuN+33M2CllHmas0S6XXa73b2L+fHjR0+ePLl/eVjXlZnRmq+0/dfroq3f1mm25zmX0hRKC8Op5hSmo8VowVRNl2W5d7EjySKiZqE+QkGFtzas/AgpdD/HtmFRtKCJuEcYrgk3d69VmXZV1/D6UkoABRQiZeHeajX8Ma9VVTlJgwlsFg/R2WIYS46o9zUNg8A9eIDwiqEQZ9ANcphoY1t7s0Viygj9ceH0BYCEuDFxE2xdY43G5WJxI0o8iADXXr8b8qFUHffS85snflp359YBou8vPfXqbLyi3FhwALieNRxz91PHFPeoNQR64Ks1/jGP0E7zB1spCsOcA5bW/ECCG3zTH8I7m2hMOvpHipYVDNujuRYdshhBodYCwE/Zqq370dxCAIO54aMEzn/Gj9OkvAbqv3UFz9zCOLx1lRwuIjYVhiOF+1usnjOsDe/sStFFrHnFzSFMEa6g6WHaP6jXl8bJWMBmttCaKAtE0kzVk2l0YnOoVX8mSpQXpMfgHVCoXJtZSivyFVJyXBNpYhYYvIIJNIEPJC5T8ewMA3bYCUniY1G/VUqSJkwHOypByNiRqocDWFhXSQIzN0fsPiJiFkjVoOCrUTeM1k1BiQRNpW6DHf9IffshcNDEiMMK3JwKuzqK28rsAvXGVhU9cSq5+bq08KK5u5K7usJ8ztmtkVy7G/e+yiOEdu6DnZzD7llRc5qE+NTP3IffpUEI0N+3fsTsuZ++6YMRtIumcdc9yAsM7ds+bbQv2GCsmLkLXRoac2uTjFNt93VwecvmAyYCcyuG6bdu7uem7RmrgcNJhISFFZECcHdAMcxjNE/S0YBVOYtZOGPoMCFj5pwn6Z15zUxrQEgi6MuBjQQovJeUEqxqASubmasSgdyIKMtJ0DNxQ8EQezBHOJxan7dtloCIhDO8MiMGWYrWyqqamISlcgVkSjkmQRr53oj+6tCmQpGjaiCocDC4h8ioZydWmzPnCKFT9BbP6mr5QDnLUW8OkHzYP716/+rFwslvnr189uHLF8+vtfK7b397qQJdP/b48tnVtcAImqBMxAEThNOUW1aELFgwAso8zzNTIpTSwEVEABM5uOVhzLTU1VtIRkSIWtgDkXjhRCSPHz+KhyUtQvY60UBt0QKto5GDZJre+sxnfvZnf/ZLf/8Xnn14xQIGsZCZgb1lB9AdH2ICqZmqRSFhAgThu0bivtmsZtW91TVR9Fon4qg57LSigcqLmpxRjBcBjlgnwqfc8pn1b5YE4tFkJDD3DFhAr4UpmBKjG/PYxm0A/fVog3naipteB8QNF5pylw5Va63cEuwW/CIMhB+S82RazbrLjHBxLFLWiTgJZRZPILirieSU0rIsZsYpodTb29vjcQU3twAsxaFqszfSS6jVWuuqKfE0ZSIvIWHh5nA3psZJ6W5whYPNzYnNq9Va1IglpQCPORPIGeK9SmrMBp/THY8tudUErtbzzCfp5pFifcWiGwa9bShqRER6zvAUq/POL9ma0La8LnUPH+7U6pyDm5UBUGjS86dJwJ2VA7TybmZey2oWPRA3EKwAo254mz28f3d06k7aHCwgyhuhDbPab5kN0TUH7RIOmE/RoZAlsUwpTykXXoutKXDw5EQ0Z9ntdvM8p8QAiF21uqvgFJiMKB71cFifIvOOeaZNswcWMAdNWWNtcSezRlTr7tWsaIVzMc8JRLSuaymFIOHp9Tk4bRnuCfy2QswDa98m3BlObhBhaHj94o5SyrIs6yrNiyMfOzpekERlGm1nNeZz++edT8efWhuxjUz5pID74knCW2bU7QrFcJaApr2ZBbBSiKx92t2/EadH9yrbQt1sBwphhGav+eZo1zR39xMPrTtiG8PdfZLc20C1S5M5JITHKf1IqcvYVm8T4z9NCLXVNzrlcN+PAzUtFMRUcHi4B1GebN1mCMSF9ZmijcvBARLzFt/buCK98CnwitS8Zn71ay08hPFcTnP4fY/fenmtH9ZBG59wrLpXDjv3+mKGt35+E4m/jqn+J+7YZpKb4hA4AW4jUxoStyBP2NH0gOZ7PO1pShAC2XJbd16QBYnSnKxMXqiWAjetN64memS+xu5QzdZlvXn57psff4x8Bffb5SXN0xRZHFvdnWwG9kQukjwdCAfk+3DVcnO7OtJuf/GQp0uYMO8h3J6GVDcyNYOKWex8hHGYU6ZMIrKiUVp5YItqdBEHBbe6NJmEYBD8IWYKX61E/Yhn0a5YCQo28gIoaIUVRgmdTWywCnMEblBVbA3RD7TWWQ6nCKN2ScveoxV2inadX/oUKBw6uZsr2EJ2wot2d2Y2w4CKuLf/GgHpxtnT3slwkAvapp/wcBq3HmnI8/HR5tK4s8mGfbW1nYYJZGZp5OLCGwRO7bnRqBdAPVYa+qDfuVNvih3fFOZW+eMFirBra+/uLYmq1m4YDQyMjemIdijsxm4aQV/FsizrWplZVXOeSqmllFrMDFaMMs0pRXSWyFiougEkvUlAkCUJSFsWh9zczHojDZi1APWYmqaNGscmT5JUSlRLMZO4MFHKfHNzk4hdhJlST5YCspTVu7vPSQAER86YqG3pibvnfE8YGUxaxVTYCAYHkagWFTNJt6XU5Wqiev9wIJmeX9/crKXW9MGz93b7B2987K0XN7eLWp6niPEwyJmC4rP2dvQAGo0KKIaXMwNA0VLUzdiJSQwtxBvVqNabnFxdXSVmqBNRzpNILqVcX1//2je/8eM/fSEphatbbWVOkVYaWUEAjXK264q6llQr5+nJo8f7/Xx9JVap+kjyqLtUD3RxNMNM1czUaq1gOm04Gr2wTjuhmacu3lLZJJJ7xi/2egvYM5CFEgPSqrBUS6QNmBqGWURpAxmNejCKZLGRmLcWFA1wrCfJYg5z6s0A1awXizaZMpbcidOIeLBWWufxDw8wsXSTzNwdncFjSvlYVdcVIjkxg9zI3UQa2fHQvswxqBhvglDKk1SvuK1FcbPs9zxnqZyKk5tP5gQi5VLL8bZULUQJgDD7YJzSLlxIyNwqz5zibybwlLjyQqWqmyqInAgGhZs3AzqwhUM2vfb1drpe0Qsn6bZ1G8aLEUjzAXlwsLRyu2FDj0eACFOFdditanS/nYiCTyiwcYwTU2iXimfNeLrYVOsFAAHcJ2uBKh4JXhHvRCwYcQTmnKR5710mExHLlrw0ZF3ziKTD75kZ3OT2CXBxjiokIquaL/aHw26/30cbw5zlsJsuLw83N4coMGBO6hR8m1Urn/oTcsQpvCeuW5l0IvNKoHme3VlcmITIDV7UDEkku7fGhqUUJLbgOtsY28zs3uqcx2ibntqUkroTx+bjHMKVWg/S+AKFuWMGVS+lHo/H0EctdEMt/rLxBM9UaBcpwElBtOGNJ4VNwAhqJNKgnh38cwKCb1ZmvPRQ0gPnSQ16amakStY73fdbJmbQmZ8XJ6yl8IZwaCBIt+On7lFHGNPvhr0BAFMfQwR8hi/aw9ptZZqHkhyqjSiq0tWiDHBc97RKmcjtxNRlANjFqGmHnjUKK20she6wAWgNpOLN5oJ6V1j9a6UnnTpGlCi66wKMNl1nma/t4/Zzo/91x392HUK0JUR3cKA91NuYVc7hoL3JCUINobedtN6J6B/h6P8hHDYiGb1RS5A+qWOiGfky7e/L7pIyIO5QXWxBzX7kPKVpcs7FjnWBWGE34MpwY7fPfb2oSGq4vn6xHN+f8yOYuQnpQ09qyAwGzY5LMhActMLVoOw76HEpGXLY57y/eIS099Uo70EMBjsyFxOBTvAEY7C5CzhJnqZpImQoT5ws58gOgRrvYDSnjhsnesV1e11M9jc1oz/4GMB1aj0GK7yCKmwlFPdC1eEKqwgpV9xUTTUlAhBdl7kRqrm717VwAyEE0TehkWii3+/23vyOUB3yX7V4RwO5o7OM+jzPqlorPOJ6RCxwdrUFm5IWdI1DRAPUsBH2HpX/m+xi+8nwJ7cGj49k42Z6Q6R3RGuL044BpHE6b15sW9Yi4mjlkoRWUDH8y+1l3B2u61pySoMKggFmyVm8knmNZHq/0XYhESISVWWhibOZxJMDkFnYUdzNbF1Xd1/XktLClFS1LIu7CxEzT1Oi0sjWQmtFCpBchWCNsqLnap0D8tToJQJQQhz8B2G0RVMtd6ca8NfGMOuu7o0/xt3300xRZsYjZk3dOAuobXMI46N5nmFtZsKsbDOZnYmZPBGxUSJQlLsq3ZZltz8QzR++uH582D96cO94vLa0q7JXkWPBUZdHj9548slPf/CVr2F3YZKXqsXVIYYo2+hdiTmSGzIOhL/mZFZV3Q3EYE7VIuVFIgOzSLlHB6IZ2jQh55mZmdLTp08BgGGmkCQi1lRAr4tzePcGh92R80RugF9dXREw53y8sdYSMDoWkxGRs1Mr0mnpberMe9v900kIThvDNXrR5LGww1+LPAY35Omp/I/NN5T0il77N1b4yLAnFuvGdxjccaSeM0H3weIFd4uzBMHFub3Zb60NOxG7k7rVombVN3u7xsCsMVg2kF8vNVaN1oWpZazCB0Dnm2pVUu7ua/HiCginjJTTjqdVy/X1zaqWFNVX50rJCaszqovZ8bYc18JEO84GNm/2bTxfRaPJajPZ8rDcjpxEpLpd3xyNIonsRnAjp7uSpE/dmfD1j0Bznb25wfGO70d8k3LjZY5fEDsLWCIr02yUli/eoJzvDAAb2Of2KkS0NR+H39IG1X22sXFUNSRVe+4i0peujaQoIZDGAjGz+bA3gq9hhrO7q1WzXkUAHV6NsDAT91DlGKF3GPDAKYxiwpRSQIVrrWZVZJ8l1Vpfvnz5sY99bJ5nj840YJineQenzHlzj6GYrHZmFDNzKPM0ivTM2IxI2PlU3EjCJIHQF85OLMYkmQGe5xlACOroDmgGEfEtacrm0bgRpUArhGYHc0qJavFAvkR6KokkmWJJDhnYJX1IyGHCni023mQIx6X9TqkGM3X1FhYGrKWF+05okSA/yz0SQCIjQniiq6FIArNHTKLbBk5wrbEUaIOZZ4AQVXOxIlsG0UEmMm0XczwAABoVln27jcMG/WwkD8PtMo9qD/QdcrIEepHqdpacjIlxQoduprSRNgwbyNhhMckIhHNskF5I2oGgvcNYP+V5gmrryLkbWue01skTRGaVSJrbgmCfx0k/bvf796/7e11M6s4Z/v//uOtEg09NCJs37qeyw60s3YJIfysdGw/k1cEP64ZOfxAUSfJFOtyfLy6RbhWLkzF2Xr2wipQ0XZLkXLMxQ2+BW/ajWYUms6PTIdF0ee9we3wG/u58uLw4/IjaYnbNNIMPxFGtNMOZrKBOSuIrSdpPe/A0iQhNF/CdUyUDOKD4geUDJMMFNZGReI3msyYZYBghWgAQ4AoX16JWWqr/fKmfqCF+qPN85+isFK9+usLUXR2FvUS7ebcVpoiG4daiaaE/hLXJfCN1I/MOE3Xf3ot5BPZHBg89vLvVBZt3bETwY1jbiOH2xTjD5h2gB+/QTQszGzbkeH8olDGknjw4dSkbJDfYRDCHeD/N5wgQd1MhjLq0sWwEqLThVQfOTgFEiP8sHG5m7DxCtifKnc52MM1cK4goCCrDgSaiWleRmajxUIeftq6r95aAKSUw5nkO9h0iTjK1e2Zel7ocy2625fYFgKgBdCZ1U3PFaNNBrlbNnRKTmJtwHgFyQdDJMHMzDVTVpXG4hdY3bR1Xa63SaiPMq15cXJipN2MojANntFi9xyypBd66BQzQYr3bFVbpqMQJzILEJmTcOxqmNFXlF9d18nxM+eU165Lf93JL+cb0FqTTheeLwvONkfJOSVZfV0wAFw8uNk5YghR5PMrmMhkBnXk/OEXACIfslOggASWRIBVclsU0UhxrzmtkPj98/hTCDrtdjvvDwSHOPabb8vHmBAMU5kB1yQ5iICVYee977yzLMrLkY/kKi4iLiDVWW/UokxOxHtLYrvVm6hGzCDOrnnIgA4085hxEAq5UeylQ6zU/GsjQJtdvZmMjuHutq2qJNJG1CHi0O5OOsDjJrLFpB4gOesoWtEXYiW2gVtE6XqgG92lzX4cfEvOSc07csgG3t4uuYd/3vgU9I7RtVNayi263pfpScs5pFqvulNK8x3GtbsdithQzq9GinVjV6Fhvb1erdnGYZZpD5zGRWmT8NK7CJACbGZkbwb2W6K0tnNM0CZtC3YqaxueJCMLcxfO5lHxF6J/mc3w6Xrg7/OxXQ4aOf0duLfMpixIX7cGc3h7Gz6SnexvfSAwyceR+W6FSj8zFIxaRwJ9EK3lvUTY9ieD+KLnDK+JorF2dkic6HKMDg8e9D6cufmVeqbchiU0aXFJtPjeu7CAf2+oDEZmmyayu61pKEqFpmhx6c3OlWuIuzCz8mZSSG1JufTVVi9mZh5xzTrmx1/AgwuGInjszc0o5D6K2NoAMcslWHUDdMMq20b76OKyVg7f43SmASiMxxdH6iLnhThH4/8zE8zyPKQXgaIQ0eZ42C22T4LrrwrUXXTKAiDgSd7ESwo4w8yAv7093jH+7Pk+LuhFwnERHtLNEeKotTk8wo+6JMVrfCN9EfEGtzenwUe11lgEiNbmZ2PaCqGWwx0f9TrkrMj+fB7jfmRbq2hOkzHwq0vOTQ0fdX2tjg4b37u5E1lN4UV7RH0IHxgKg0VgVcp5CPH0p4FI+fEIEw3P4hIGEkvBPXnkiv338xg7+qNQQGXwoO7xCJPNbj1dmsyC7Nd9iFgYyuHSULAAoIDxjf29/uCfZlFZnmm1fUcLCgzt4oinv9lINpCtRERzNiWEEcUoPLh+sVkArKAEPhBL7CyKGHwCBC+gASBY32rvvZbpBMqacMAMAMihznqwucIGyR1Q0gjUEQILrjbVZ79EyrosMQwfaqKpqVfhWTP0j2zNbV/DMxPLirg4lrw4lUvfCbmDALAogw/4VFpBAjwC8U7+xN0KHxBkwihBuL9rfCvk7BsZ2DNYpi4koT9JQdXQqsWfmjVyVRigDAK35t28I1UN/qeogsGA++wI2Js1QZJE2w4aThplF2EzPB99eDMd16IX4N+Fc4d0xtZmZeukVbQzTrQ3n7gTknKlFgExEUk/qnGzZAVJq6Kl2b3GfgRxTVbfRIoyZJKXpcGBmYUpBcbncLFdXV1d0FTbQxU6CwQxEZrZqNStk7mpCZISqphWU2mQNl5UAeHB7uDA4yB262iMiEcosNxoGllp1JxPJ7l6rirCZaa1huItQlhTIwCDwbDqm9wde19XVovqLiKTPj0PD22VIAqXIYxMR0b3Lhy+vnl2X41uPH7/73vXbL995/OjRO2RXN/ritjrtTPjp1RHPrpTnSgZPlaBUQeIeJbJMejNWLfWSDDNjSrXWUqpqBN+DRD7quCDkHB3H+0K6urpy95ymnDNAy7I8+/DFu/t3f+lLv/wH/tAfPjy4L1qrGxEv68pTIurYkBY9azsgU8/ErCvK8b333iuloOF4/bQBmBvkWHsSvLlPcNOtxUmboxGCgmyTQ3d3s9ILZe9GmMyC8WFTHdH3JDNzi8e0VcHMpSzhCQNBJtTSLymF23DXG/QNNDEEzPZN5hYuVVUrcdqTUTt2MvedMsxoYo7ltN6u7p6nBjRVM4KHV0zdF2Vm62RZx7XWWndOEwuKSk4GXs2ZZVWztTjgYJdk4FoNpS5LIUJKU04zkRpchKRLt9i8xCKlUQsKUNy0qLpREpacU3rw4MFay+26LGsp0WZu47NtDeXx71bm3pnVH3gMG2VZlvE0t3a8jHYX574ldfzGdmBjWW5WVEPKjTOPqwQadl3XdV3NLOccdLXMnHNetY5rmRmsq+H+5ngRvp/1oy/XBj3trKSeUpqm6XA4HA6HaZpePn2mqg0Qwj4YVmNsUTk26maZ+Xg8znPe7/a73S6k/eFwePLkCXU/c5qmnOdowaqdMy2GFBKYmc3s5cuX09w7wahGgE9VOZM1kCenNKXJwBSuZkA3u8ek5mtRDxApIRpAUetou0m6hq6+8xTGHBKiJJ6IXCgPKTGevncY9thNofmHwgbC/xo1b3cXIfUoTx+L+yB3if9aGBHo57FNI+OxvNv5a6Xw95iC73TEIgdrQ1uN7oi6337zbK4dtMkdTKtRNwmED0mADTjrnVAxnyZ27IsxY+P24yPvVazSCWbiX7azBOO4R+uB3YDmdh8WRBsfYGgI4IQCHR8G3ehdEq/Y2KOJxV2nQluPO0fwh0d1U3DHt8mHw2gQCNFpx/06ZctvH5vj11vxtT1+i071dv/GO5u7sPaV7R8AwEhz2s2gJCAkJpuz00pHU4S5hTRhntJydGdTZaxMbCjuxW2Fpf1hlmkPY2gGzVHtDBN4BhGwA2cQM2XIBLwHFKA2vDQSkMDEk8PUKYESrLRWyebkVquVomTeDJvYEV2jQQRyilS22yX+4SUF707z6999RQ6P/UvkpNbK5anjKRovS+vBQ4PohZjcQsoF+4s0PjDAzK10y/mkcIfA32rn4bx1DdZI/lh4aO2oehuT1lUuDZprNP/orLVy2Gyq2hmsm/U4ftUt0zN+uyGx4wgEGRHXWsaYx7H9iXf4W7yTXBctljOlTKsWSWqEdS0OZ+aCmkk4ZWVZFUQ5s5pZFklpAgzmQikCw9HvdZomLctajo6cp8NalhDi4Vj1khkb80tgN4vm70nyst6aRZMrWtdVi0U74XU9BkAu7/KBDqsv0zSlvVj1eZpznhSu1TN876Qgf/7itjyvWkQyiauT2qpWrVhglRAZHiIjKm4JPiV58vDy6tlTUNX1OE/58v4lET9+8vji4mLVmnP+3rvv/NRP/VTKeZ7nMPj2u11wflweLq6vry8uLogobA7qJWdRBXQ8HuMnwXAAIKW0Hq/XdY2PilpfN7neyG1x9gfruv/qt0rO837/8GrJ9z7++Prlt26ME+er4wvONx+8+109LrKs87zfE90UFQGEb9cyT/NCB1WFoJTCIvN+b2YkUlXTvCu6lPU2Z0kpr+u6lDKDp5QUsOqSMzGbskvK6YDo4FcrETnqUuyd9+ruG29+72tf+dGf+ZmdFlwdcbjMUOgtOIJcyYy0GHM203kSsQVaQACvv/orv/DO03dulptSV2cGsoDh6sbkwu5EmXzJOQdmzMzEiA0lkFRwEGpZi5kTSLDoMiGBOeWw3Q1MktOyLK6RNGYicrO6KtQnyrUaol+1OyuJIzxhYU7EQC0RYklBRaLVPM87p7WaMjGJNE7ZWoygMG31e6ReDUlMqupaqodkIXKCE2mDVg0+BsVEQUPbpI6GWYUkBBErFUBka0VYVUtZzexodb+bKKVqtapqWdgty84cMC/kuVd4WXWALy9kXf3m+PLq9kXKOc2zuad5coKWouvKOXFmdz0eb5hZEk8Xs8CRBEw5zQQj1eUYqX4noqJ1mqYkFX5cMOec57RnrcfjcS16vLni22OepzTNu/2B0krHsmprEhMVoXLedfBk4PaDejSqeo3txpy8RlYt11oH54SE0+eeJOVJDnJRVq21wlwoemsH+yibuZoGZuQk2SVmDhTURBTZBYJDzQgskuEOFmJO05xzy/2qVg2QOZ8omkMeQjg8kGJWe9VlYkG1UircRSjnPEWv12pEbKVaqUmEWNUWFstZluV4XI7hqiU3SZxFpsz3L9KTxxcPHjyY5/yLH757u14fDgemXKySZK0V7AY9rrfmVVimRDYnK8f9RMYurLuJEilrmQiieshpIpApw9UKIfNuUvHqfploXVeGy5yISBIBXst6uNiVUqB2mHcTCVWbUmJiK5x5nqY0J6vLWmA+T0sppVYSLl6d+Hi8kTQT88zsWmE6zZOZVrWUploqjDIxgZWFnE/euDPLwuT76R7r3uo8yc7qsdSyHI2Z11sXu4TW9XYlGCc1W4FKpKqL6pLSJMKSJgeC/9o6A0rLQOYZrZa+a+LehImIglRZ3VUrNDwoFREIj7gDwjgJNIJ6+1WguNVIJnd388Z/HDzEBKYIyHW1XQqROwH1lC4O6L9H6zc6dcQiYicOmCm5CRBeXHM2zRXGHI1moKYiIinBvWjlVlzQWmPFvpOUwdTS5aY0MKnukoPEKoobqUlXVRFxtVpPVotZMbjwnoJY6ywKg978iEZXegKYyKCvOA9nOT2CATwSCNLSjqczNsiqM8BwMWKCjEADm3b/NhxLbt49JEjDxhUjLud+Chycm6cnSXX+ScVrjo9wh5ytM/z9Oo/faIptdBB/dQR3TuUOkFMbv5w+96jZV1CBLcDSJiZaElLtoEoCoGSIsvaWlo6irPG83HALAo2OIO1FnMHlDn77N3PHv7HDTs/r7EIEJxp4UQrz2N0zoqRSAAEyGc+EmYpiVaDSBPsR2t1j+47rV6x8L+G5A2ICSr4o9CUy41Ar3Zbi9XiBchDNSXOiA8vuRvZUd+AHkirye8B9x6X6ZAhw7hSLldmYdoQd7F2AgD2cgRV0A3oOqgCBb6keqZaOcFkh4qkk92SExaTOylSUSeajm5EVCxlBqD5VXj3V5CCDWVakhEIDfh1dtoJOvFWHejqbz+2ERqaV/O5+ueP2jZphougsr4BRS8kq4IYXBCDBXcncVeFGRstxFYBHbacrEcGB2kuyHXBt+7YtxijsJ+5lZQCKlsgYRq+BMTDZ9E8mIubcpJwGcSyYJYk4N0sg2uSOwGXQtqmq8IyGcmKm5N4yuJlzVDe22B8ogXLKNcRvyMyRDSTa7aY+kqRupZS6FhHhXkuPJppkdMKoura5ZXdUEFKmE6NDex4h80fjQWlAOAYZWipz5NnCIWx2XctIBNxOgsu7lELnTbTGDGIj00eUHcA0TVEcFaKBA8UnEp6V96yoiMzzLCI6GjS3cGwQ2DUIDaN1ZyZnNGLzHtO1Sq0JpExMOTG5mvvnPve5H/3MJz/zyU98/tOffPz48cc/+cWb4+0bb7xx+elP+bNn/9e/9Jd+9md/9tP/zO9vEQhmmOOdd+COt96K8DCYoYpawYyUYIZ1RUqoFbVCtf0HgLlcPY2g+LIsy7KsxyVoD25ubm5ubq6urspxCQ/zxYsXz58///q733nrE59cv/6NZVkPl5drqR88/d6nPvWp3aM3vvnNb5aijx8/JpK1lt2Br6+vPdPhcJim9Pz5czefJC11ubp6sdvtmPNuSrBZq+m6sNNhN9miUW4XmLfME3UcEffSERGR1Gym73zjG3/9r/21Nx4/Wtf17//if/K7f8/vefSpT8GqHauToLDMu3ni65dXu8MFO9aXz6d7F3D/4Fvf/qVf/AfvvPtdHU20zwtTR6ohHqX2QlARsWgMCgzcU6t16cc4yUj4oMVaWl6OextM01OYH6Mkl1Z2TnYCVCca0MK2dCOsQn1IISCYOXy8sdQjXzTY9hzk3rpI44y0jTf8ewSATh5RG+qABbrbneYTFLAIPQXSzKyn9t0iDaVKRPP+IJINuF3WUkoxI2Y7J0cZLUmZeZr2DBI4QKWoLsdaFq/VVBmecw4+XjBFNgl+6o4qIlSt1lK1zG7JPM8Tg+Z5njGvWkspS1mH3TlCX35esnV6mpuJ9U24Dtb4RwTtCRNRypxzFmJMLdcxUnnUEBenfg/o+RCRHjzrYMV4JhHf2S6t+De803gzBCYJAwi8aJtP7U+/S/hxRZGodO4kzG3+T7DPkZaMhHm/lonINOXdlC8OuzcePfz4W28+ePBAhIi+Kr0klSyseY6QyjbWGF1oVStSw9ASgZO4+7IsL1++fPz48ZgTU3XWlOd5zlZux6bgjnFV1WVZQmJYI4vOwgwgp+y2bh+iu7Mg5V7I55wSiNk8XC0dc6uqRGrm0cwwHvmIGoxzmpnqqbFvC9TKaUUR3V1LAAI96O4Am6lscKThmLTbjNhDSuP8IzYxRMFYjUTUIJDYLM5mWxMznwCWHdB7Z7X3z2BmoFOvESLyFjE+CbQuOogIJJKYvdHTcfDiA4jvY+BC3d2doiB+k3flDnk4yZ/Nar9zxCKO9dw56GkzfiRm7QW0tAE7DFD9mN7tOe9srniJV+RA8N6NTKMHhcfpETSHKj534AyaSMbBSutoT7/1fmFvycZIVHJPRMZZNbCp7sOqfSVfcXrnN5v78n+4rs72+HUNMTjJAJx899Mv3c1Q3SrIg6OcEDEH802VIPvJBd2c2TZNIGkzHN64Yf+40KQ/8LqvfmHrzTajZPg/Eb4DZ+I90YxFiAqROimsmu9Yd6A5ZXYlowquDGXOBAUp00IAYYZfg6aWTcEsMlvjAl7dV8dqXt1rshtYUV2MKkslMscKLyzmuropmVlVrxr1NbQL7LnBHFbbLmJPJNbZ9mAGrUWrWmPoQIfO0kdl9E6b7rXzGUHWjxIwp6MjwxFiyVsA6HSV09LpjSLCjss5SwwjWEWGODqdupUOAhg225Dm6BJpO0TaGGx2InU7g0hsddNG6Z8yittvekf6jO+Mqwx7o8nPTfeLrd4BENC54RsCUDthjkDDoOJtkF1EAGlt9BqIAxh9CM+eQeiMzWi6ihVmLmcq8DSFKSWLZiYd1GREBqQ+1lcU52vmy8zCAYgriwRlYtNe7j7P8zzPamWtOdzu3OBPBCIwyNncvNERuLs3/KCQcNaUDrLbWjNZ0pxyEnr86OHVhx9+/OOf+Jf/5J/4Z3/un7731pu42IMZ2KPW2BI+HT7+6c9fV//q3/r/3N7e3tzcrOv64sWLr375K8fj8c0331zX9erqijaMrCklM1vX9d69e3FrAfHa7XbTNBHR48eX8bX9NB8Oh4vHD57sdiJyOByYGCLIGfOMnMOx/F/9G//jv/23//bv/2d//k//qT/zv/sLf+HHfuzH3nrzR7759a//3b/7dz/12R+9urp69uzZ++9/sKxFRJ5fX99/ePnBBx+Eoz7Ps9ZVGI8fPbi6ulmXW7cg/wsmWBBRLbXqmnMmkXmeLi/vk+D6+jrSHaptmcaSBcDL9Ttvf/P/8O/82ymlb377W7/6K7/0X/iX/sVPf/4LfNjDGUTwUq9uv/7lr37xi1+c792fLi9Qytd++Rf/n3/jr/8n/+DvvXj6gat6PUVfiDbtizf7yjbA7rGExk+YyOhsjZkZsYdgDnuOO4Y5bNbu3JIIpcRErWXZyTQho56uG8Jiu9s9GuJJc8naPjnxmLu7R+M+J6iquRvBhwZ0D58wpGq/r9cgatw9z5P0iEytJVLNvDm8exFGDqeinuDd6AH7SXCklA6HgxPfHo8KEEkSjsSmuxOo2qk2uqwKMidOqkdVK6utC6xeXhwYHkhvA3W6HF7XNaYiTTmltCPRo1cttVZ1VNOUkqRJRCZhZjacsJGnCrpXxMUdC7IZ5X7XbCUKdlwnIiEewGwAbiNINmT9mdzrAq9fuOM3eNNlCEDYhf4Kpn84CeMFtZ7pFZtegsSJiAaSUxIxNiVt7QGezjAc/pwzgHjuRJ5Tnud5N0/zPB8Oh3v37h0OO5jtdrtSSkqJwLCGwFQ9FRIE75lZcDghNf6rhtDmTnN6czyOskN3JzXy1hPFe3gCwjlnapw0NuiUc87zNMXt5yy1MrEKQzhgGSYj7mvO5MxMnGL9bpCNfd8N58RZzwGE3AGZtTaHcKitaIQi/XbGqeKpEo2VRvEkg/eqD+AE827JQGbe1GFuH/F2k/aXfOd999HBz+2cAGnEAtri7GxjzIyNH0sUCTPf/njMRDQMBBBIh66cw4baGmwbydm+75G7Oa0Qi+Ti+d7wLbbzNP8AdEOus7UcRgvW8UTasGI4m4fbzrZxNsap3J3k3KHw4asPMXvmlp+CPj3NaIC7NrRplH4bO1uvYOy1PQ4jGilBIumGL8PZT4nK15hMZwP8fuwjP/j4CPP6h1as9VHn2Zjthrv+LftYEL55ds0mNm/Wf8RBGDBvfzZVR+zdQ2j+9sYnpLvXb6iMf4LqCxnotLevnT82sPRh91+0YkICQ2bgAnpBuMTRCGBeKh3NmVQJE2EPvkhMlqrTFbzAV6IEUuGVeCV3eIUX0C3hAMqOiQCHMdSpuC+uC2BmT2uttS7ELjBmcxQzpaoEIwc5t2IFc6mWdt4KPb2qmxMCdclJwNx5AV1HALpHstCnw4FXpMNrgMR8/vK1u8P9NRPc8oEBIO+8g4ABuok2RL2AjQZep+ieNe8u9K+ob9/fXNqHjdflUgsin4mpbbLhdQUFfiahmzob4XVshGSEZYeiGddtwnujy7Yj3JoKY0hbdbP1VN1dbUzRyZlEx4C08sVWSm0epDLbuw1JOmyRzCfocFOEwgNfO74e4sLdVy211lpX8lbH/+ocxZ/D+Bsz3v41UkRriqb9VLW6R4ZQEhH7uq6hpyODNHQA+jozqzWqRNglZeYkuQ3m0PsvJZYslFLKIknEa3n8+PGP/ugXv/DjP6HI3/nat59+8N7Tp0/ff377zjvvfO+dd66vrzmnd999l5Pc3NyIyLqu+/1+nucXz55fXl4Wmdz93r374wGHMWdmqPXWjBW6rre3zyN7UGtd15UxovWKjfq8vb65vLx88OBBXOJwODx48OBwODx7uXztG9+dDw+/9/6HX/jJn/kDf+APPvqJn/jpr3zF8/wv/9k/i8v7+s47L1++fPvttz/88MP333//ww++96u/+qvvvvvuy5fP12W5urq6vn652+0udvvb20VVU5qmKXueIkv55MmTYHlxs2VZcr69uLi4vLw0C8Kh3PLDVdVd3Zd3vvfVL/3Sxz/+pojk3fz+d7/1b/4bf+7nf/7nf+wnfirlqdb63XfeffrBs6987dd++qd/+tGjR48ePfjmr339y7/yy+++812r68Vuvrleb8vxbEfFEkySPK1LYyEaTdu2ZsfYsdQj6GYKeKC/QNawnmDbGC4UJH5KCIM4CLXcRYKBr8UPWtkPmbnDTEjIxM8DQhuPsT8+Dnar5r00g49QaxWzZsC8LmWxdYC9o0mJKMzReZ69aqwT7XkYEXGzwCiGy6zq4Di5G7WESfD6xl5TQ+MOIQbxUouDAxzFbRcrdzwKeTBxgdhKdTWDVmbKknf7fWRHa13NrIY5T/FONfgEn3eHnHkODL+7utZF13WVvOY0B3xzv9+XUtZ1jV0w9vI2XbyVNtxZIidJ2q00DjYnB3GTcYEDCO/Xeus5Zia00q9+RALqTOwCIMhICcfbOItBGE4C+qxNiLu7Wvj/8dWRWTpPYjdAYHhZAISYaWNNnisD6g5hzrnWSuQpy5ynKOFrsjpn7hgNia5YZpEc3tYijogDJ2HjaZqEfDSDJfZ5nvcXFwFib0JMBPFErO6TIHXCz+YonUR6SilKqcdzFCE4CXEiZY70jZtVVwtd4cQwjmie4qTwMDKuQOhmjNhQp/z1eK4jW9WfoFkjtcYr/bJH5KLxi8Tm9JYBdnezU1LL3athq8upOzYnx2NT70pEZqdHtn18Yz1Hyz+8osjjLu8YRNvZaHkaM9w1RBRgPS6DCBTtrhhE1mtWe5KwfSFR4zci8mEzcCMzcHZCJw/oF1H0qD716kQAVhduMu8k05iZUtrG4MfuaAlLb0O8c/e0cUTb9xsh0wBS2oY9ptWfxLepuTHW/ZA++aD2/sknrGQcMSKnBd4wihGIMTDABHFygCnaknlj07HNcqIzHwB3vJofwFb66hHNG/z1kNGPzqX8Rqv4XvWz7BzL5+O0tE0K+ukbIHSGXiUozKLPNNACDdGSPpwFdrZROLppLNnPOxzsJr3Oh0q/qXv8T3N8lB9692m2Fq9tvHL6FqE7hEYI5PYE2oMuYAfQLYhIXGhdTWFMqMkn5AMxp2wmrvpStYoUmiRzAVVEJ3pfgJfuO4eocZM9pAQFFqC6Va3X7pWows2NHAYyJgRfBoOZE0tSpaqm5r4WGJkrwcFuAgI7mZNVsxopQTgRGQFMEn1KXzMfr07XKTP/Wqzv9km/zhxC17PNCSQvZhVxv2Sh3kg8AovmHnnOEMemVUBEjkCPBKTR3HW0HGvmRBvK69jOX39jPR4aAmqb7sLGH/POujK8QdpEuqnLcNUTJP6kX4BIwIz3fYC20imAeHc+N87eeMfqGe5sO04ADU7ZI9EA0tBkw95KhspcaxWQtb4yDnEhYiAK+dS9miXG+G3o2ZOxzkTCYBoO+lb5cccaASd61viale38nmgAOge6HY/H6+vrUisx5t1UlwLAeq9kd4BMGlUm5pTTNHMKtF0GIGBiF+IslDhKUhRmV8tNfvjk3fff/9//u//eN7/17dvb26Xqsiz3Hj6Z5/nJxz72qS/+5OM3nhDRj3zqk2+88cbFxUVK6f79+/M8P336dE7545/7HJihFQPPQ528OxCkKQHwE/eglVK8lsDWrut6c3NzfX19c30dFYY3NzcBE33+/Pm7z1987TvvLsvy/N3v7C8fvfvBiz/3P/2fv/XWW/+Pv/H/+uQnP/nw4cNvfetbf+Uv/cfzPD9+/Pizn/3sZ7/4Ez/98CHu3cNyhbKi1uv3333//feffvDeN77xjWfPnn39V7/2ne985/1337u9vb1dF2ZOnBPLs2dPAczzPE3ZCKUuy8q73S5QwZGVXbVoR+3uJk6+fvvXvvrGm2/Wp1USPXr8+O/8zb/+D/7u31HVeX/x3e9+N+f5+vr2xbvfISKeppvr63VdpsTJ/eZ4sxxvzAxqblarqUcHBYlFAlqJIYnZqXfl8iCia9Pc/UcYuYFPxihvdo5tNkxf8OzmlcDmWvVkNwMAas5T+KBhgLaFyg41tLYcp9bMfXedGOQH+Kpt+O6fqHfi2SRbE3YMj1oaJ+iOSKK7IDfEeSSI3JvVab0a2JpDCIWTeTHdpcTkINnKArgFkA9mruEwotZazFmEwSTs3poPxTFNE8zhGhFhIp7yNOcU9+1mVd3MkxGlxGk65Pn29nZZllorlxIeS0pp1RqiQFWraVmVc0op5XkKhzwcQtswZAyhMUQTM5ORoDX5aCnf1mX0JIu4cTSeJH6cNhyVWq3WE81GXw+jL71tvcEhxOkuyC3sTiMCN7Rtc8DihGYqIkRSq6saB/mwu3MD2SGyXYEgRYTgaWShGQQyEJE51KKsH0BKyV0TBaMpTVl2OU1zPhwObzx+vNvtIiWYUuI0MTODXfxEobE5mLlLKQqPpZSllBJ4m622i3kUiHaDus12l+2xyDMPSHMDVLMDXiVYu4SYXBhkXkuJzeVqSq4WQYnWf8Xda63uvbjUTNWA1gdvPLjmv5G5jYQfjfGYu6vcSRu6ay2q1dyaH4ieqnUzIgk7ZjtX1F199HSxdUT0yBNuFejIGw+V33+7UfA8dK5AjeBKZ/qRiMyq96fT/n0FGeEbmWZmLf9yArs66JRxDXnisBCMxNywbQQWD/ID2pgsLQ/gfua2Oe44QSODDXc4gkvG3Uk1HEK9I+VEiTrFaT+lN5fbyS0cUcCpdYW56xJQy9w6cMrPj6ml4P/tDqEjnLpgvN76hBaOkGMlkj6TDMT32MEAN2Bcp8NxAD6NYZzCPA0kaQOEfLKXWyDzjjXcUhD4IRy/OYfQ7vwdb24QeqfPHQ2r1+MVBhjBib3F3mARpzidr8HHmAhbmC45n/mE/er0Gr/6zjv/KH3Cs+O1DiL53W/5eNPbr8LWjqgsfILtqu8SXSFIryAMU1fz1WxhO4APMoto0oWKPgcvc/JxCrjBVuDKMQFMbYJjd1ZCdVtdteoaKsmhZuretB4xVN3MWRiSRcS0aKlLKQkkxGB44oheG1xESlWNujCWYL0ywjRNkAzlcCVewQ3gNAVt9l7vYP+gpd9Crr1cMGpZldjcK8Hdq0NBTqZh/8DV3eFK5gTXtTg5M8O0Efv7SVIQ0SlFGeuWeRA4N93XQhvYvonNn9v3xzuqp6QfunfjPaF1ig73HN3o2LQ9s3cxfrqojYDjaZCnIOMryl16p2h0XTOsU984hH1Lx7SIu55YRodK40YVOLQogrFr3AZ6wb25JwoPuM1yyplF1Dgs4HVdp5TvnB/dUP6otUA9DxD3YMYCLMsSGEsimufZO+y1hbFb621B+LSJUuJ5mhyY56mZuEMSGsUEmyurq6kCc0rf/PrX3nnnvSdvvfuxNz/+e/7Z3/+FL/74pz/3+U988pNvvPFGurgAAe4wBVGppSlCZhAfHjwxM8gMMyQZt9FfAJSQdy1Gm3fkDiI2y7NRaxnPkE1oMCY7Sg1zPp2q1r/2f/4//vk//+drrX/8z/xXHzx4oKovX758+vTpkx/51N/4W3/77bfftla0mn7kR35ERD735hv37l185jOfeeNjjx8+/thnP/+F3/3z/3ncv4QDH3548/77b7/99ld+5atf//rXv/3tb3/rW996+uLF8Xi8urqSsjx48ICZb26ub26up2nK82RuilZGGMDXzOu3v/mNT37yk2Rlubl68uTJsw/eZeZnH7wL58sH92+eP93tdjPnev1cVW8dKSWBH18uN9cvy3p0d3ZvJpwGPV1r9uCdjr8toWHxO6JSS0Daen7QuPEtCsuCHLat/sGC3dahmSUjqHkzjZ16xz6r6tlbdAqGXpdlFi9OAX4z4wpL2FSqODyaKxO5grhjHrSbTDKM1zuLPyzO6I4aqbDoVR81Wt4cm1ZSVbTu8h5AtVjFgUEldbAk9LwQBayurBWgxL540boUVXMnkJO0UtvADYQFKaPaNgQAwynJxDzN02E/mxbUiDCRMztzOAjUqQurOtcansx+n9erl0StG426BWUIyPRoASVIKQ2f0DdlxkMgtB1Xm/dLVM3AoGY3W8OPSrSlJButGofNOs4jiYYoG+/H/GvFCB+M59udCmzl2Fh1vnFchwQjR5DELMuyruvIImrHCUfcL7Gco/MsWgkSeyRoAHNXcU7ElEiEoBHyaFG2nPOUMjNyzhcX+yQSfccTo4q4ExTe9wifkF9qZuRIrsw5pyTSXSwzM9vv9znfEJGreVXOU855ZiZbGoUpgYjmlKtbSmm9Pbr3MbvHGiZmhkmixCCvAk8pCcNqTUIpEvJOGiTDkDEAVQUYp84cSCk5tYWKjaJ18+3DGmsmNnHLdwkkxSaIti4EcH/R9F1Kso0djOcldCrv3Gr64WiNd06PsI/w/LW5s5/HOBr8yZvnsbGuohSQBhHoaTts/UactEzOI+EJ7WlS8rOMa7sooRfYBfsrUSzJzS1sTZ+g0ugPImJIJ8pynrZxt5B4AMEaO7cPoe0bsBNtwyt3HN0YFVqkz89ieXF/8Xxt3NMwL6mTdrXawdiwzQeuAZxv3NknaGjwo0TRICHKTpwbqUxj8gn3rycT4kbPsI4DA9ClfS8I7LO6tXYGZG475cNaeH1q4jXvtoH9gEzNK+c5jYTPzmybQQDjmY5azVPDhQbhIzdHhVWCuyAay7Zwg7Oiw3RJQMYeXZqsM/oA0PMLfn+I6D8en/DVMZ2Cyj/gqDy6MjqABJ/cLyDfBANIhANTJTBQDC/ZMuQe0j3wnJSrouAFYZlsDw7fT4EVDqLF3FNA99suMFiFrt7ayUoEGoM1gCkRTxwBXGW4gluYOLjlIUzEbNAUW5LJWSZJ5GIRG+57n0m4maPucZFXfbs+pLv587tzePbOSd0PJIWCgFhjbkQOcoISDGQEJbgHK4xraJzwBs/AoJt0FEefhp4nGFV5bjZqm0P5bR2xASPFRsoBiIKpH3jwpmOE9W7A2KTBhiGxvYr3thBncjXsjZ7PGB+1qPbGQSXhYbpsNNSJ2hoIqIgRpVap7hEjk4SAlsV/cA/OPWuRb+YWORdmEZ5S8g3TdyTF2yA2RpWDw4Osprm3OrwTTx2jR7+f/gTaSmVm69AaZlrXMs+zqppp1Iccj0fAmNx7f3B3A5iJkTCnXHIBOCVpapQcEWK3GjqDg9rbDebHsn7xi1/8Y3/qz/y+f+bnP/87fhZpp6vJxGB40eV4TCnJlCAJhFawCixLmSaW/SwOAKVa9OnqN9WXO5Oqeyv66gqMhYKTCTDDWQMohwi7MOXcnvoaDCLyL/zhf/Hf+/f/w6WWP/pf/1dDZ2JdIYJaj8fbWuv1i+dvv/12pBa/853vfOWrX3n//ffXdYWbCN27uHj06MFnPv3JT3/yU4fD4cd+9Atf/P3/uS/+gX8Bzri6ev9Xf/Wrv/bVb37zm3/1r/6VX/iFX7i6utrv98tyi6YZzU5NQVBN11oI6+OHD589/WBKIvD3vvfdlHna7Wop8zzfvnzGVo9XL9zp8vISwPFmlUREZLUw+TRNuq5La6ISxoOM1azdqjiV3mIsibaxBbwlNgjeS+4lgg7qpkOQfN4xyAKQwIkyhC1bv6iZK+zE8AvArKryQMShW11t4zt806feNy0NsQE6BiANhG0LLL9jEm28lOZymLXiMSahU/iZmSlJJ3YyYWZOrX0iwd1YvZgy0rjQenvtTmstRZ2EJWURgSP61YSgRIMCcq315rgKuWwubGa1ViExNgdBOCSN1nq7LguJGkACt2JKqtzrZmutUXEkvRmgu9eyuGtwM0ZmMlKg/an5Zko4tMKQ466I/iIneO0G5RR9zxiehZHEzFyD1QeJJRj+ecPpN+TYyAhtfQxuyGQg6Ob6weSR+PJuqhI7EVgwzSnn7NCqq7sCZB65LmGHRSacOaXUM41wZxFJLC7OzHB0V9aZwaDM4qwslKVlnFp+dV2vrl4EzU9wizFzFimqrlZrxc7H4u/zN0ztqJqT4ZnHCu/U1WRmoibM0zR5aZ0Pm30gAkX4aVEUEUpokvbbLDzNIkRaFmHs57zbkaSmnLj1qTOygM9ulVZTSM2Lif3gZ3m5pvPOlUiMXIsGkxPxaRfXWiIVvN1344rbM49dz962sLlBLZy31KzeDTARgDsREvEgEAYaXUE0mTercI/+iEQIVcubiglyCtzgcJZGN8IWTATgwxnrmjQ+SgkWrZNOtVluxiJnUfwBq1APmHcgHdwdXgFOgwd8GykGRRB+TNeYwGSk5zhnADBnokH40OaWTsOnbjmgDzX+OE1Fl9KN1PTVuNlpVlpEaPMcTrZUl8Z9JM3FbRQy7kotzajUiuVifAY4Bx08bYrBgI2nFwuJ+psjr2XUvJf2w4847vhCw/K784uTF/3KGX5jrmA/3dZG3+Rd475OlZm++U5bEi1ZFSk+LWrFa4FrOO0dSRiYh4a+dQ5tx2BjjCZ31n3CNgu0uVBDnr7qNLRZfa1b+P2dyd/kcXbS3lnm1SMsSPJGcBurXH1hBkGoYWITeE/pAnIDTfAZfiFQoxW0Gp6ttWQn4rdAD9O0zzRVvF3pZa4JwsTcYhAEhC9kZl4j6AyONC2JsZGSW1gYEnzFALlaJRgYZmaoiykDSJNomrIkcnaLBCIzhKJpexSKE6Houq6mCBrMuOtY+5utaZsJ+8Gu+9jj58n/FsmJ9wNcBxhDEf95dRRyA9ytOrSrAY8KyViK7J4lgQym5DA0MBQzw3vgY5Mos0Eq04ZB1IuBqp8Iq7Z7cCujxr8AosZinHmoWt0Qyw05OWw86pHQrV83RoItKmdYmB9RxBjm4nBEB1vB9ju0cdaaa9SPZLS9mTb6cTNhoYdSDIaG5CygCBpExmU7R8F/aK7jVrezjJPVTvM8W3fH7URbClPbMLqecrL7/Z6Znz9/dnt7W0pZ6+KuOef9vMs5pyRmI5YfLDJNN1Nfu9EBPIm4m5uSG6DELiAw3nvv6R//E3/6T/5r/xo8ozgyVqM9Q9cqU5onAaDmzFSrMfOqdcpJUgpSrfA585y2kcAoSg9KMxLqohRxKgDCrc0SM2RKp/1k8GhJZR28l4QhzPje+x8+vz5+4hOf8ONC+/3y8sV8/z4ASN7tdwAuHzx46wtfBNBAqktZnj374IMPvvnNr7//3jvPnn7wla985W/+v//e+jf+1tXV1b3DxWc/85kf//Ef/92/83e9+eabL5f6c3/sj/7cixcPHj4E0wcffFDralajcik8AbXqRuqGWlQ135vSNKU5f/jiuVl944031vUYTQsBPHv27P79+ynxshR3vbm5yXm+unoZDnbOib3RNpRSEckdFhFxsGtV1RxZWApjHUEhySJAkwrDAQhtYWbMgRNrOJ/YU3YqqLVNT0JXtZRSuJGZpTDHnYoIC0F5rMawd3eHmSlInzRQbae9PazWFltt9U0SRPIAgPaaiBwK3aBMR7uJZuDGtjez6Ck/3lE0FspA6DGzBRkGotSFyMjBqm5q7OxGnJhETMTVGGbkiYXIlBhArbWayjQHxM/MmFg4kOFwqDeuVpjZUkpZj8ut7PezqxEH1bgYcTEtRX1u0OJVa8O4Oplhv9/f3t6W4xE4RbAY5MwRNgurfSsNXyuIw+3rnxkhcc9/cYc0uZm71hbhyx1u4KpFFRFs6othWLdNIk85j3jeycAdwhBg6ikGIqbRXKi3n9+oisQyylOtU4qpU1sD5hBPQimxQGLawRYUR+4U1XOJRTo+tp2ZnME5C5MnwpRTzuLu63G5YfGqVmrlGnQvjcozWhhpL+jq9JUAIhtfGobTREhySlkiPpVSSmlyg5lBjcybQ0Hubq5eK6vqui7uZlqrFjcV4WnKKSW4T1M+zBOcjsrMvJvyfpfmlOuyqiptNBzYA3HXHU7S1n+Uos8KOv/tFiozkD9Dm7TlF/dILUEXYW4zI2SCwBnObpFROlkGQ2ENVeghCjbUaGM5jeVxtj5FuCNmfbQbFYEIN6bfUJ8RmR3hXQcaAXFUXgQK1925FfR1V9m7nUFA71ka14Vr79i60fdhDEXKjTvHDJG5Wm2hvSgDidOKtBBk+CjDHffoo9024OnG3Ru5CBEFY22bECIyd/MTiHT4tADQ7C3bGipD3/eQXfiuZyZPQ2/axk0OobEJwLUJHSlEJqh3SGeLFgEdeG99UIJW9BW/bJCqYeCEDDc/NgeJqLnezgC34kxnIhnYwY/glbF+uTvvjIm/8+XzTN4P82hxjdMAvOdPhmCkkTkM+VoRfB66wotbha9OIEqwiLsrevNliuSyd0nrzGSEqJrofmYvqD0NiQzO5LYxg7cDfq2zYT9cn/D1MNFNyeDmUA7IGYW9G18tWq+lVcRmQEAM2jFfoi5AQtnBDkQm9FLp2rEu643ZtOMnmA7IlzOxKwzPFjsyWIjYQOJoYRYrpTgU5ilYEpzY2aCJq7u7OYE4jHAIjMyMnYnZzKq6K1JKnOecZhYhQ1G4kbs4GEzlWNxJ4qZrDdqCJiiaHGjTEZHQV+bFzlf4dhrbF7ooovNnOnx+BwxU2d1RyKujmlVuMKugsGhQcw75BCOAw5pKBGOwu2q/FDwgfu3wBnw6xXwc7gNE0xbfBg0x/gWQe6pmfNqsOG4af2O9oOuWIOZsGgQttr4OG2MrPAfHCvUQW/OVungf4wm11Oj9Il23GZWdQCJ3w3YdBcbtHAYzb+2DgdaBYbAXoidAosU8ecsMVK0iUurCzPOcrbODAnBXIk8pOWjkkUbLpiC7iy52qno8HsekeA+UqurhcGjrwiwMbuYIrVcz3e/3RHR19YKUaq2jLXIzKKPg0sDw6GK/LtVqcSbmZFqJKCVYoturtVGfs80pH4/HP/rH/ks/9VM/8e1f+sVP/czvOS43u8Nu2lNx5CmNKJYIOZASGzBxAgJ8DQD0quwC6DyktP3CAIptAWNjW7RKVxptlU4C7+rmSJIfPHpC+wuYU57ArOvKKcHJociZtCXHUJVlnt/4+CfeePMTP/nTEfbFcVluXn7329/5ta999Zd/8Ze+8Y1v/Ud/7a//u/+nv5hSundx+Sf/xL/08z//81/4yR9/9LGPvby+Xp7fvvnxt7773e/cv38/eBfCA4lAaq316bNbdVxeHp48eSPntKy3nPJaCzmeP38O4ObmiJ7um+f9zc21aRVhIkCtnsgJ2WI3Eq3rSpwyk03TqjWzpJQUZ0SCI5oymlKEHRmBBtUIKbGTUtffjT9ZVQQpTb1Jd8uKNOr84xIAehEppbihFSJLS5iEoRyhl6rVkOZ5TkmSZOqZ8xEQUtWgZWJGEmYgko8gSimx8LrWRozkYY313hUbW3P4Sz56XcSF4OSW4LXW6kagakbuiaS6OZ16ZzNzALSnaaJkzFwNa1V1AnGGL4UBqgFmkNZ74ISdcxB7UIu4u2lZ1Gpd53lmZyJatVAVETkuhbJN01Tdyo2aGXFS1aWWAx2meWaR29tbNZ2mOWRC4wcOadPzwDTIeAKJEAiFgLITqyNCTkSUBo+oOwgikpOY1VIiA3YKuVln/e2L5zTJ8c0Barvjl6L7eDGksfamabq9vZ0mJnKz0FUurWGm5iy1ru5a1+Vw2KWUnj9/rmpTymZVWHb7SUSmlB8/ePj4ycMPP/zw7bff3u12MDsej9M0uVrRtWq5vHfx8uXLcOeYQI5WEp041BID05RyzpENK6Uw8ypStIrkUC3Rlcjda61Eh2maSimqZV3Xy4uDCEXrTiLKOVXV+5f33Z2JSikXh8uc9+Qw8ymltiomNrN1XW9vb90sp3Q8Hl0NANTiXCnn3DpxTGTzPOfLy8t59lKKiFxeHl68uCoKAKUsnHcgyjl3MhsxN1VdlxptJ3KaAwUaAwi1NU3Terw184E6XtfV++MLXXN9/XJdPaU0zzNHto7MvJpXJiZKzKy1bFlVh1s1aFdHJi2OESc6qV4zd8/dNbWNJ7kuS6yrVnjsrUKPRdZlSSlxSo3e3WwgCtCd0qYMzrHFLRnurgYzm0TgTjDm5lv3vb8Zp3GTg0ycUiZya/1viEDCSKkeG0swUUcYbxrWoydvT24VTklOqHk0VmUa4KsRRtnm+tuWD5qHsBPklKd1d3jrshXBo608jBMTegl3eCw93xhtBZkICCLREdmJRE2QTlCvHUAkLg3oXpwC0R1Eoj0QqMcpWnLTw7RyjDQWA8wk7sEmVjt3Ept7wPN869We5+I2UxrjG1C0nkOLb2696e3xEc0qxhWJzn81PHJ/zfc3+ToOg7uZHkTkZO7sCi+gxbSsyw35Ksl9VU0qkuAO5pYF5pE/Uri5gUhAEvNNRAQDyFC7zdtCEA44FM6AwkF0luKmj3Sefzg+4UedImIpIxSFZli7oDrUQbXqLu3qgjwBtrDfqlpiA8+wfagQ54w06bXVo2YRTtn9ytU4rTknocXKFest5kdIn9il3bF8kKa3jczIIc5iUVNOXthdizvgxG5sBldxZ8YUsQtmZmppebgnMrBorbe3ixumaQL7ul45ZxYhSTwJceOgYYA5vXz2MsuEUkE8pXk316cffvhgEqiCCHJas6+btNe8tzV13VtZRHcqEeM0FAAgIzeDAmqu3vodViEHKUzB1hImASx1DbqSVmDgSiymxWo1s+YEmhWzKfUquZPD5nCISK3V1cL28yga2gCFNi4fcyff6uJohB2ZNx0ghr6Ik3Si/nZ4y4QV3hxoqQUMSw/BS3e6xCmXOMYzFmSYKEXrsizc2yhsBklDGtRaRcJSKlFfI0Lu3tzZsCG8e5NNn43Cmy6NhNkhcFfQEOWhOLpQC3r11iFNRNhpWJbonHvD5xyzhm7ZB9Oge1jAJ+6+4JXZ7Xb7/TzP+eZ4S+TzPMPO4H9mCGcXpomJdhmAWzyGqmY3RUVESFP4x1WVAbKLi4s/9Ef+yN/+//79y0cfe/jmp9zgAEvEETbxkI84PvqT39jxEaEpDEqB4/H47NmzT33m08vV9Xz/YuKDG2SePHQvEsxdArURZ2Q4XAWmJAwC9oc5yed++tHnfvZ3/cH/ouG4vHz+/Mtf/ur//a/+x3/zb/7N//W/9Rf+h//6n/unf+/vW4/rzbJQnp5fvfz4xz9+e3sbq01IzBD1xkS0Luvzl9fq/taPfPJHv/j5eZ5ub2/feee719fXx+Ox86naWpfjcbktrUisGxyNHdGIlmWxCk8aaLcAfJOtJy+r4/QsqFkACfbMYBZpsQNWK51yBRSujJupt7aUMDQCirZj++akwT3BHYJOYBaK0AsRpc4nyY2SVKLduW+4Rtyd3azXqrV3OvDa3c0UwtxzR5bMXaJ73jbv4edYshhAwAe5QaObfWlmkTBs2XqHkpNHr20ia40cyZPXyozLw0XOk8Jvj+vtUqq5qzGzelS3EaG1QOxWmJtXs2iRxMzMLiCzoiE0TnRYRMwobhIFvZQGBSsRLcuSciainDOZRjrN/SPjiCPSNMTf9h0iitYF0WUH7iwtkDak5JR4N02J4e5GwbnaSJNDzjh6wgGQ/oyW2qGz/ZubhGFkn3i0KkmJ13VFr/xsj5vYO/9tw15KmuY5MA6laFT95ZynJJeXl2987PGTJ0+Y+enTp0QeRVDmldj3eTf0h1odk0nky+0xXe6ps5jMeXr85OG9i8vDbr/f73POeZqoEFiKrmY2zzN15phaK4V9whz2djhL8zxzL4F28t1uN8+zllpLubh4MOW5VnVpa5LOabu3Jnt4klFmPKesZV3qNUMf3nv4sY+9Ucp1KSXIb9ZSqrkqipOojjzP9pyx6r2RNvtwV4YinKYpJY1z5pxzzseyjC0ZXWdS8pQ4YLZD+44Ba+9J045edN72eB/M1qwfEc9x43TuSY69PAbcLodTv0QQsQBk8AoNtE8E9Vvaf5wB3YIn3jQzJII7uxHxaLQFAGSESCzfcQi8J7LawAOWEPYCzFgtsTRYKTdTcozhdJ6zl4yeAzzdsgEiAf8ebzY0mLcAsW3d3SbMzwic0GyjKCnsVX+BLA6IBZxCelCjzOmXieEZjbPBe4N4JoCcjBq5jndvi1ryvC/hXsfvxGhWJwMQ8k1B4FgzAeih3m47MvkKQj1f0nGxDWlqfxrD/TutsY5iaImULcUAn//2I4+zh9/O/3o4H9OrZyIiUixEFLqFieAKqiAzXVwX9xq+iJtAACek0iC8kWVsCfFYJ0Tw2FxwgKLsvqIlb6Xlb51gEiwt0f7xo+/vh5wY/KjjPI09WvhEsKMYVJBymgCkDKC637pdU1SFcAFlgJwyMIMfyeSwmclBtTkThUTIbSn2POG+2AXkEvYmlUufXhAZkYMrYI5V4S1v28oexCEBjDR39pnUzKozIyUIwRxmoKCycRFxdkmchKujqBQHsRE1/joYoRq5NTC1MWrVtay3a13VrMIqkn2/x/ID5tAAdG9w5Du8f6lVqIIcruQFpOTqXhjqZFAFGbQDQGK/ROosZKB5z2iTE5Gbx8LqdINNnfdaPmaW8/o66/LQz1k94/3IaWGzubZ/3oGGxhciSottJUJ3fyIRONyuIRKHwTMcP/fxsl2UOiXp+PI4yZjrUhf4ME66TYeWgWROkemMMi6R3OLl4S9uBxr/5jCCe/ZZRMCna0ewjImE2KJReIe7WG+SJoN5lltjqHAJRuZwTHd8v5QCjPDomV+eUpqmRJTd3dDyxeYayI3WzS26hKH5nFNKKU1mtizLspgCrpUTiImF1IrWwjIT0V/5K//RJz79mf/yn/qvfOlXv/p73vx4WXw6zOYbxEpHvPj3FUI/LPn0uhQ8ADGzUsrl5eV8OAAoWiUndwxQYojtwRoe3qQ5gpunJcTnfYRTQMnE7r31id/7I5/6vT/3z7399tvvfedLX/3yV/7iX/y/fOXLX7rczYd5urm+/eDZh2FQ5pwhbGaiyVrj4926rm9/752X11df/+Y39vtZEt/c3Oz3+6DTMLNatHTIQRJGT7iPqWVmU1RTrp5z4xftzx0tnhSqhCgy7S2mIJFaczU1rW5KNCNq+SDMbMFCsdlJDoVH1FzNagRfbIPJJCJ2MnXIyekiIiJhCMhiKfYUllKrMmq+jbvH7xrDOgwwiR5i3tx2GeIAVBvZxMk2wia/MY7xJyLWzsRJOElVNUQIhKJ4JcShmbkZeUQ2MM5vx5u4tNa1rstqbt4Yy8jMmdxBICbvXH9xOiUiEsqJZJoSMfaIp5ASB0NhzLBWV3FmJmFScvcwiUopIJqmaZ5nqiXwAt9n/fvmQA+YtWmx4d6DiOABFSaYV0SjJRv+Pc7C8+aukStgHh140B5uOBylbmXOCGONDEe8MzorqFVvWOcAnwSqx80qkYtQFItm4SmJVV7XNcs0zzmxTNP04N69R48ePXpw/+bqJblpbZ6JlUgdC3cr82QiM6RB4kGIbsIKIEAcHs2miBKziXDKZrZsGt+jxwmHugoTZ57n3X4K+RArXF2HIhDiJOLqMnj/AaFWGRtSvXksHdXJzJNEd0FquduUmE80Zu5uXgEhYWn95c4Ss6f9yHxSOv3foYCnJEQW0kZEUgovLpCbtStID8UUqzkc71AWvT1k94StzUDjznH3c2qW7W5C12sxmPF6q8jHn1GZNmyF+Df1DNKINBOEwFAbpdHjotgmdrpPGA8T1hVg+9QQPl3P/LQ5bz3WAesU7Wwj9eHuKebBHbWHYt3cfeJGt0Xnvh+LRF4nLuHU32cmBztGIC/eFwmB4kxNVLVWFnSCKXZ+qJMOtGA73JDldmF4N0y7tcPackQAF3ttGhGiu1CAbyHbmyJHOIHm2osGNZyxXhYeUovaJAeI0UPuRqiOgArvDTA3zttmARu1Rg1jJkd3kKl/XR1DxG2fLW+zix8Vix6rZvM92l5xfNRRjtQu2kPJffxHN+bheEf/AxQhE3M3Bak35qEw/kp7RkHKLdxsIknNdCIjixRxARFa5jZqiQXOTgxyptRQvh5Q1XGbdn7Xd0rXfvj+4XleK+IHtgEDr1rXnC5tsPKQu95avSKqAIHvgWa0OmIBnoAKUjUssKXWI5yJ55Tm6qr6XLET2wEH+MNMjy19AzCGAaujhPNsMJK224iYCDaCnpVV3Uw8NqBGkgZEEU5te9iJnMFOsNmdzElYeJ5BGa6wClp1rZ4SqqLYerMeb27D1YRFG4yPmjDuyF7Ga1bnFhGKvpJH8EvBFS0DX0AGRHrQyKuTkdfW1AvKgAentyM4NBvXaOxUL3BnkG/klYgMw2DA/tuwThX1vQOTnaT0VoL5JqGF81jw+MJdMXUemtnqizGGceahNYaKOVMBXSHShoBte8Vx/v6d0xkGlXokLCIon9IUri7YmTmdJuJch0WBqbvD3Jqwi148LMGtiSB5aFjbHkGn5pR15cHEw/Lb3ga6UeUbn5jOD2wsobDLg68vUpxqhdi1GCVzMuHc58LcufjKLDmlec7uTjAhdnchF5F1OQpcK6pZBlhEq/5bf+Hf/uyP/cSP/fTPfvXLv/zFH/8ZAGVZ59xMcOo4mVeP17/76z3uirBXzzZkNMz3c/5Df/gPpsQQgqm7MrfCRe8/d3TiESIDiMDpHI9vvqzLNE3E4P0OgK8OSZ/47Gc/8aMf/52/7+f++L/yX/vLf/Hf/x/89/973/nut3/mJ37iww/eV1XACCSeAYRfRER5vnjw8KGqSiIHXl5fpZRKXZeyltbHwczMQXmep3nW21tuvFWRZrQoywqz4xQHdYW3kDHQou/sCN8ubF8iEj9tpDtRmdDWkQYBTKuPWOOZoVl1UPoyC7O0ndVlp3ujI47QknBTobQtFzwVycf5PZBaZI1xEd1b4FYQadv7GqGg4fbgzI3B9k0zG4DJCKD4SSEj8oROvBQlNUBDqsTTD25s02LwUlazyoFrJ/a1WCdCHBcCAHaKhnNCDGNO85x3OTFzWdfjsoiIu8VoRKRoZZVE0e2cV62kZ7iDyOEM8/ej1ModATpeC8iapDK0nRn2ZNilreaH2O+cLTK9zA2YFP0J0cWOB9zNqCdd78ji1omBOioDMNWiWkQaF8uIghERemcC74B86zWE5Cc/UxLNc56SmNnxeGzJKDdiFmH2gFYSQBJlqwQiD4TfbjfHaQN3WnV98eLF8XijqsGHFMHgnFrfwg639kA+m9cYj7LWKuZVRjd2bvnt6/VmXdd4XroWpTSnibhFZ+I7E1BSCW/wpISiJrzUmiohMbNwTgLnhoiWHF0oBWEaJ4IJCWnZ9ktsHAd+fmxDAwBMTQnsJzrv+GjEcZq8c6hqKYsqmxdH7TxqvUWenpjf4gyq3kdCtoG0jJ376oolopFsHDuozQkG9wrGVcg1rORhgjfFx84R5eSNKdBfoSv5k9XfGar4TqETkZ2HXQgAtEtWR/Mc4bJJHd2xbHrEfYyE+gtzJz37aDCCeM+gNkAqb6xD6t2Yepp0nL/peuo9hYisn2crD89GQqc58ejqMhThZrbDnuj5wzDrOW6/3c/wspzZDcCGXsEw7JxNYQeARpVJBqTWgMEZaDgTgjBtaOVbRDMMCcVw0ro7DfRykdPzCk8MGwVzbjB8BPH/9t7Pvr5hGW22WrtQMIp5WNltbESMAmJ4ARgGwOAFpmwFrm6VGFAjlwYgtjXmizm1FCCJD0oWSWgBivHf2q4GBmVQ4vZojAAPB7IHxXByaLfPWNHafvzwj77UG2wkZo9b8wy4e9VlPa67y0szI4cZhCuze701LGaJ6QbpPkgcbEioD9WvwS8dt/AjqIhPjB3plFyNFscz1YPUe+BLzgQ8cqhiATKjtBBK6wzF5CYhltxJlcl1aRQEZl7UOAirKWB6MLeAUpFwgLGT3yNyJ5AIaELYEaRwWm9LzoybFWutx0WPZeKEliG3zXy/Nld4gkhs3rS+y5zaGXpZR8d6esvkG6E6DChBZsusDS3s1hYFglY9AY4QU6EFzM17bRGMtpwDgNXa9FTXzrXWojaAYDRQSAARVW22lp/jM4e34r2H3FYvjApA9EuPfunDGwoFUWvZfvO08LprSt11jEtsz/ya5Rpf7o4fM4PE3SMF0lz0lrGP6EAoU4sEZsqcal0DPyqJSrUwFEDW4ZccQsOIoBLdydkhRIk5EZcg4VG3qilvqFStK9weMhyD9o2HPWyIYYQJTsWUI2Ac/y+lEclIknme1djdy6qnrzt1tlTrCZwOMBPBBBHZ5+yuCcbktfISRY+Eh/fu3676r/+5/9H/5n/77+xn+frXfuVzP/GTsziQTyJsUwhhm1V/N0r5wz5o0+j8U5/++J/+k390rQYY3KZdrnV1SUx3Hcs2zvPuudY6VtG835mZmQoLAJNGZFWujvni4ubp0z/yJ/7UT/7kT/43/+y/8q23v/Oxx4/W5RadiNKd1I3ZmfKLl88ieXiZDxcXF8yssKsrB5CmyR1rLbUYEe33+8Ph8haRKM/xgFqbAXA8pbEYRgLZN0W0ARxFZDAoNbT3Bmw53MJ+s+i0Cy2VQWTghuKz6FpewSxC4rSxCAP06S1a0cjrmaMBr5lpS3zE6o0plZC87ZGdQtemVlQ51nzKcCMzS9R8j9gI2zwDXhENW2NXVR025MJJToVlYNTI+glwDzy1iMDTNE0C9wR3XdZlWZZqzikHtirnhBCFMKKoM27cPMKUWYRgZtVXJKIpD0c8/B7KHq6Rr66q0ttIkvUUXG+8SUQkA+xqr0as40UsDw0Sjs54DMAFVLsX7eTUaTndAWThnIUomVcigrlwOwE21KYtvk+8sTTg5uaW8i7W5FbEh/NjLVHs4au0mXewv85a7WcYqihOlbOIEJMz0y5PsXdqrWtZWE5El9M05Zwjrx6ryF0pwnDMxE7h9rlP0yQidS3repymyywpS+u30e1wWK15nkVo0IcMv6tWC/RmrXXU6IpIdJIc9+g9eCmn3uUtTrGlvcksay+cC0Tx5cXBrEbn2sgTVm2PvppWVSNSNMRTbw8Zblhzos0sKgheFW5EZG6lFMmty8hxaQW36BjyfiAARLWuta6qxb30yDS7w7QQTZsNSFGvyN1uhTVQYnMfBsoIGC6Tu9dSRqAzJkpEkE51E0ytOotArqHyu9Uykt5m5pEboV7r2gdmBcAWQEjNDghUWFssbTd12XjSyxRejkeseUiVcNfce+QTLemPYQMRg3r3KWosNylYY/qz2D6ju/shGEciIcsnO2mMijqjOnqCcbzupzhhlzbvN9rAccJWN9VuE0EZRH2bE7ilFby7ZFFw6Ce/tLPRgGOsLQ8ZWn5riN3xDCu17lzB8RiPrHbO14i5ayD3thPjPdoe80+nRu04d3KGxXF+Xfp+9Pdj7janKptI29k3CerwIZDDcGTW5vRar331Citai9aVtMS4lYTd4O5pMLpVqMNTo5yPqm13EPnohElkdIx7IWQKn717y+M2w/c6m/vtqNvkvL704D/NQS3UEoc51N0Y4Z9yOCRXL1+uS31w+SRxsop2B0JWF/Mj9GqmI7hCMpAcqn7PGaAbcjVfWEwoE82oTCAhVRy1fggcZNqBHzseO4rh6L4YLQwhTIIjYzW6ASrIQM5sImTm5qfSZS1VVQUimVUdIOaUJXGilDNSAlG2A5g3ybyIrWR4KUstyri6XZdlvT7qsqb9nsjBtAEqWHhjhHw+c3ilwhNn3mCnEo3uoN76B0JRgYZpAyqgDmUECsabT2jRbaJZPEAkBqNSzdyVAfOgRmchciB0marW0mv2usdlHR2KczkZVQ+qm6DhBjcUwNGtI9fkTy9naDKtH8OaPbmmPWqMc7EWl44neDIO/eSLDnN3XBG9TUWcijdwFbSQ0+n7IRV6UDXeb7rM9JXG9OiSPeechhnNELQGl64BNWnPmT2YtftC6F3CtzpsHFvbaJCboxt/1i1E7tH9O7ZUi6AJmNmgWjSQrpu5NrOmhnOWWqPvRxUQVMmcGZndnSzLlLO7hpGkhqvbm3sPHr73wYf/yz//v/hv/3f+ux88f/HO17/y1o9+3izHAh+RuG08ZAjpj0p0/CaOIeR8ZJMJ0VyJgb/6l//y3/t7f++P/Yk/jnK8XZeJ9ilFhvAuXmKEIvuODQypMjfVFzT35kbEkuGGWpEO90A4PHn89O1vf+Znfua/8a/+t/7N/9n/5MX11S6dQZvcgy3T7l9eGPzq6qqUZS3L4XDI8xyMLMx81kdL2JkkT+4hUwRhZjRhQePfeO7krSkHbdtI9OUq3BYPe6PC64F5jcYV7u6u8PBM0t2+MZ1ocTiB5BGQl0ZM3KRC5DG4BfKJ2D1AcaO3W8wJzEd37L5uqRlYaKJQRNwCvntixxk3BaAlq+6sh82290aNRyQC5rrxfikCfY1xlBtKrlcLsYgICfimXKvq7XEtRUlSSgkkppYSGxzqTMLMNR6HWmuHGGAArdXqEU7waZqIIMIakwEwuXllnmNCZMqMJKYtx+InY19yYgEL/LX8ZAC6GbpJ8rRlMErpBnUYdYoRh40AABywyM6dspHU85/9nA2HNq41Lk3bdUinECD1wHB8M6Wk5VjKKqbaWqHGD43CKmRI4pSEhVKW2aeyNpEVbTzneZ6mKWUZ17VNPhlkI8E4lAp1TRakMkS0ruu6JgCH3S78urYg6YR7DA9zuGpVS19URuzsiCaQ7g6YQW9vaxQBBk3LfndvStlUqU+LmXGfSW8FfmwdeisgVS2lHG/XPImIaPUXL67efc9rvbo45MdP7jc92he5NSbhk2oM7RUu06uR0fhTOLutRDRNszQ6VRWR47HEPDQKNVhfP4v5/4+9f421bUvOw7CvqsaYc+29zzn31bdfpJpNUs2WFJK2FT9gRTJCOfoTgDKcwIItSAYMyPoTIBEg+EcQCDFkREGAOBFgw4aNBHAoy4EiiYJIyaJlkU1b0IuSLBsURUoi+0Wxu2/f9zln77XWHKOq8qPGGHOsdc5tkpH0J/Lk5em9155rzvGsUY+vvtqCUaa7PELsdLNkaoW/DIM27VYfu3iMw6iaMz4P7SNy4/uq25/TvQxBjuROTdDVWjtvyq5hgCQiME0D7qPR4MpQUM+1aIal827eEY3IW5hGwuFh8o6ZNLNGP0W9vOcY8wDej0rxQ5WgfRPGD212ImzRb5ujjvHowHrgSrz3lwZ1ECIBAiAI6PKMUCWS3U5otopDOpNqU22vUKUWhi/BrcUXw8RVdOWJWiUtIiKGIdIxImB7vRbaTLYRc++Jns137O5Mg8QCRAIfBdzb1NEIdQKAkxfsAoq7aRREO3wBWI0vXFINvfSa9Xf3ct2LOJsafY4T9YBYa0bpbySA4BVa4dX07LWQGYm4kjCgBqcWMm7OfAQ1j0edDxNiN0I/+qPcozdq1siAxxp97KLBW2I7DRDO7Ir/x3vt1iBZ06QsIkf7Fr5/er9tGz7ZYrkNpF03coNursVlI1MIw81JDBmeHSkELbmZV9EEElBKSA5Ue1B/e03E6QH4NkeFHoET4UT8QHRkLMCJW3y1RLyO2YRQV0tMYGIjAqsqS99QwsEfzhyYMQERbAUzwN5MLwUYkrygbpuaHJ89Pz2ctvvj+XRa8wGwFiHct8OLEcIXTMFpUEPfi4Nm8O5zNyqiimoczeTqUKLqpuZRTsNa7nqXchy5k2ZWm+swtK7gQQofTmCXVNVVJcUMNQoMEUopC2joh6HBUai3Hakxhxk+6pqOABr/+hRR2HXF6SvBrjde4cPJeGUlds/+Vhsk23tA4irwOJ4cTwuOgB6BHbiQ/dyJhyzLEt9Kw5c5uEYBuBqlNGrTc6fUH4NiMQHSEtyJkGXA95wcgSkdR+Po2NDCo15WXxl7hMTKfsoyEzMjGXxYji2sdC7nbdtqrYf19kpR4B2Ya2TEDpZQps1MI82RW3qPcBKRhQhvvvYpdSPJX/jCj3/2s5/5Hf/G7/z5X/iF199/kl79zMTB3Giy5yOG/uFE1Eu/+1FLzwnvv/v2l778C9/5nd+BRW74oKZAKtsxLzcvxdATKnpGZZi10YPT+f6w3kQsqJVeYCTGtpkYw+rrn/q2h3e/+bM/9/fWm7tEBt1I9sxzdw14oVpZluXR3a2I5CUROcOCxI8gYY/lBSSZmVWL1kb/yBnM7CANO59TiwwAAIQQ7HfBCuhQkLMgoFdmRqDBCtiLYWoU0xNBStnda63eS73Fho8NCkAbpq0CKXCh41Se9i0nSioX8DOfyEIADC+OmaXdKA0ttj0yPFXqVE3J9wU/NrP72Dgtn3CfcfcZX2qNEoNSSu7e8KKNUAHeMwYD9mqgVgq1KduOoBhkYbk5gFJe03JQ99O5nLaeA8ZgoewsBBfWoOd3SQwXMVit9Xg0AMuyhMFQXAMRETOiqs602s5B3weEos2KuvIqrRTEyxf7sMHGF8eAoFFCdHnW41fkZtQMpzi5ffKf9VFqlRiwO/Z0SraGXlYKumoMsMtuhIWv/cAww+Q0GVw74flq0llEeTc7I3DFQuu6Qs1KjbA3kVfdwoQQIu8k1GMEVHW5vTmsa4Bviej29vb20BhousvGWfaFGkmb0ewGAwFEJDMOeQnDsotN1FoT5xioRC1xgJlFmKjvI/fhi2mD2fGe8+p9/vz+0eObdBAlKqUcj0f3Td0dY/4IDQquQCN5a+alXzzq6t++GYU4pdSMqzE1c5jXprJGQOid5q4OHQJz9EJVY3IILAKCzothPozHcTa+ThTMBS1heE/L70w27upTPJlojwm3T7xFG7nH/MncOb7CBG+llhkUYMexSq1xWV3sovEJTSGiFuWzAOWNEGJcQ6gCmIOTVkZuWN92sysZPYYzLMBp+4xBc3fuC1k6eDFM3SYVQa0mfXfQEBIRgTwCSlPPRlbbxRWblOBonJ9GRCMhE8BkfFnUx2vj0cMajRe16f4AAN3LY1xGQvqf+6D2ARnddWpFwMP8rdS01JEMeREKI2qQzh6I7VQ6zXjuy/UjmEVfGId9sPbPcX7xXh4cBC123DQddxA2hC3qcAVp1AGvXtUtUrbBRIGvdXOn2lITJESNg8mg7kRi7gmAsxGRurCzUSGSCB3CFb45FL60mJVHeRYgTv7mH7ho/Gwe/6O8LqxBdBNEAQLSCHAdlnU7R8YytTilEUpl8lYdQY0aVCk73CXBE3RxX5izpE3UgApagAxwInaY0zOHAc8cnyXLhBvGxjgybkifgQSc4KO0Smm+bKaUw3J3AJKJo3yUQlWDeSkxQRLArcYKpfiCIZQHYyOonx6O5+N2WOSsOB1PWqudVUs1M7Ry8D4pvy8dwMtfLxJfzQJs7M0axOCV6dagQ5tbN/ywqk7Wso4nBxOaiz1kkTO6Z8rc3SgIUUOcmrs0wjzvSVgjjMaXmXhDCexx8t1lPG6b/52kYPtw5Izg0gJCF/tDJZgHZTRgeMT664aE3s1OTEMxSDeAYIBozYgSi/3rITfaEd+Vz3CV5hiyNBQm7aw7RGSOUop7U3SEuEEuzA/LUruG10X2iGaa+WTm0ijKgHmw4ucRFRy6WjuQthaKHSYgERG41m3MSosziKTEdGl2SgK8GY0h2ppak9Ks53mrCWaqumTOS373/ffWdb25ffTxj3/8T/yJP/H666//wL/8W7/61S9+x6ufQZx0aKeCdSfgWPYj3IB/OLPw6usOilgadtQnfeUrX/oNn/+e21dfLU+f5cd3Iou5rstqzRrcbcKpgaO4ihEaZf9hPfS9Nyj+YeZ5SQScjxCzH/mRH/nLf+2vEtG2bYvsweixMSzmT4pqTYnXvCyHLCJ8GvxILYsGRKWUbduEuLq5e1Lqru2+MSIM0umLYroH6YLvFLgaCjeGtTBhkCI+MHbgUApFRCS8/zsYKbaTT2l+behIhgbZ9PbpYuYFvE315Wb1aH7OuF9SUncquxKZkwSzztAa3QPjPyW29RhCe6Bco6y3bZPBit7TUbxbrXbpjonFH+z8xMmdXJKT1PO5E1kW68iEYalCS5OMIilnY7hWM3v2wYePHz/mnACwUESfhNjNi6lUDBvDIqXdLPobnKVtWESKVnz05ZNBOI8G92oT409Es87b2h+uLzMbXJFBG7uuq/a6ICP41hZMFF5vi72VYA1Po3mvPx3mnxkDgflss9+U/raeh1xy94i/oZuOY61u5WxmiTjiaVl4XTMRRUptznlZFiearUpVhXmQggrIXR89evSJT3zi7mY9Ho8RdVyXJaVEvVKCqt7f35/PZ5/K6EGYHCmlZVlyDuT4bhA+fuXJ+bx5Cy2uYU8+fnxXTWUioR1WXQxy3DbbhACfTwVe1pVfe+21T3/6tW378O7uLrZl1HCvpubhOvbZIGyEINNKGJNlk90uLMxQ1W3bBslbLIDwU8wAp34EDKJOawpUVHAwCkIa4SRRF/aywN2llCDMB3U4FLQ5vOY1DPMhY4e0aY1PaXi758emZYHt0OTh0PUW66ZYb00xirN4rnE/goGuDUrq8X/76UxE3gKwsXtIiHzikOdeQHWscG+RrmtjL37CdFvbCBNe6Oor7h7H1Xz6AyCm+R7tVUZkNgV6DAAvXPNI7h5qctN5HnsfyQaGotnZCBLHZoGPBw7mafmIA75LgBgdbkwXbrQHKsNUbnUudvDtLrUMwKUThGLFdnOzn+9TumNnT32hPdixFRenEenLboeqXeRdWztH4McwHsiZDKZGZuxWytlKdYAc7OzO7GxqzgH2C+wBOTk5O9xJ3IxRQRJKKjPMW4zIXRliUPcIRW8LZ2vt4H23Tu37xxok3Cuez9VB9uHZ3/7mm58wfRsQGu4DZhAl4krkUfCg+2cBTsxAcl6ED+IFFEXXBSaoDKxEtLAW3oifAvdBcCsEpoUoIWCVWkAOO0M3eAULvDY0cqowbZlCUdzMEMXuiCgYryEJHJRThLAEmpFnDoiq1PrwcDydTreUxKhuJVg93V2tuCqlMAI6vPwF8PH11Y3qi0Fu+ILrdP2LbzWvQhNo9OKzG7ys968jIGwrLcBwIZ9paKQNL6dqZmQ+G1TohpaZDW/x7LvHJXZpagBdVcqlzkzRRSj1JsWq3gOGszcTRLvAb0pm05fCkJmveEJ83hpPu9VaygaAKXUZv4vibvq5u8XXa62JeDUz8+SWajlridwthoJERBIzSq3HWhdf7pakKJyxeFItoCQi27YtWFJigL1Wt5qQa61qRolNvEU+mIiEOTFDIFbATO6ABlImTm7URZXcJZGQlQJUd91OJ2YwLZk5peyeiKjBgn2DJ7TIkBASDGamJ2dP6ZDdcTqdqpuTlVq26jlLVMQ5pIXFRZxQD8nX5F/78t//ns/9ukeJ/sR//ke+69Of+s7v/O7/7m/+xf/pP/vPg9lqTSkbqFpNkmIbdJmkEy5l9iBOwv16i7Q/SfjM6PoP3L26qupqKWeYvf/OO1/6+lvf933fBxEjBmc4mGTbal6WKxHZPZAN8so9Whi2oZUz5zWWxXnTtCQnbEq3dLLzcV2XL/70T//wH/uhlevh8frhO8+IJXR9lpzWQy3VwMvh5pZvI4BWaj0V5rwWg9EtkYYGzMxWtdYt53xzc1PrBgWBEvGaE7GXYrUq3CRBVU/nuiyL0Y2DIIuY11rNjZIwscGVmPMNL5liP1dlIiKBqFuRrETi7gIiYSMjr34+Wi26FRhxEmJhUErp5ubm4eHkpBocGxaCPtDbyb0lU+lWRAjqKafm9XPzAjM4sYE20yWJOlU3hSsc7uJEVDktxTvHMRzmTL6kZOeifHZVcVNXrRXh5oEzs1btUSZx9IBJCBpnOFsEezybb+28IYczeHHzzez+eL7JArAIJQF0y1ytlMyyCpN5sUpqJFzPJ9w/T6YrZ8rJXLdzqaoIGmIXUxX4IvL47iYLay26nW21++Nzf/Dl9nB7dytUj+fz4Ua4WFIztdNmacnpkKyilJLX1cIPzLnW+vC8LBnLsiw0dP3hPBMiWigVKxmSsxARg8UoMTuvhkiT5IZEEGIjBjXcIoQJaiH96eF4JKJlbVjKYKqupkw00JXurTiHak1pic9DyLKEOQqHMzW2w3XNwbHkrhGqRDBZNcpKC4W41no4HNa8lHIWwprT8XhcBEuiTI66WXlwvfNyKpvc3uR14SVJAqnikA9Vajm3/Fty11Ie3z26rwVJJJGp5pwWSTc3qzo+fH7Mhxteb29vD7XWdXUmMq0pLwv48XpjTjhrNibOC2VhPpWjulfg+FBE8uPHBzfetrIsN8wp5/zw/AGoxKVs5fYR396Qbk8zi1DiZXGClUosmRMZiNiZ8+GgzkapUIRhSevTUuywPtayvf7665/4xMefP5NFDoAkXtnTqVYFmRulnLMkWgWbIKmSa4txiUhKGXB2y2AhSWAHEeg5aOVUfFOthJrEmbyUM2hB5g+PR2Q5lRML1+29xzdZQHauGZKIM4twVtV0OJSiplbNiQN4XTZzZqba3Q0Dtwm4e8uG7efr8EoEE+ClFWTugzib3Y05iUiEjcxDqW4qCBFxz/Izq+FZ4NA/4CSp1sCOkjus1qGLyOEQZQy9YTub8Qs+BNtSnAJN7WIyVeI4wrhspW415yxLHlUxosct/L57sttRHvkGVqt3YDx1YzVqAml4oEHu6Ghq5iQoBiIwRw8D2h4UZRAGEdS01nD4SkpIZu6AThZdr+XYjjseqhcRjLaYKiByJNpNMpn0+xHpTpwjKRxhxLQvNjO8DRi1/rYTtUVKIxBh7fHx7BZejP8R3lmfO5rUK2afwpyW2JbQBaKs39OM2+vPAdv9v3P3qIF5WtDFx78uzdTxthzh7hS98eaxaP9FdhgZrI4gDNyhBnOULRnIwfAsgG+1nsixeIZIlO2zczUzJTDzcnOAGkzBipQAghuKwbJ7EPNUFoBJ/Awozk+ZEHgVUAZWR6JGlke9RkVCUNy5gBjDEm39Nof7uci6AKyAwbdSmXllcdMW24RFeA/Nzgt9qU9oT00sWkGZJBORDaAxE9GjT37bG7EMZalqtdRjxRvvHSk27w3fPRJZqaSw2BRW17K9ofbEzFTLtp2rbsuyRJ6zeZxKh9gyd09KzpkJtYCIU3oCfuT6KlFFPiI9tfJhrR8Knxybo5B5Wgj1BKugDGQ6p6S31et6INwCeYOf4Kv7HeEA/wb8DeJfs2Ctx7esftnKL+rzD5bjw6u25ecnSibk90ZHuz3QJ26VabtHekbpIHZXLZE8cnLAFC4tqSKS8zei6m08Yw/A3cwN1CP2ZIBry89UkLFGQKhVmnF34gXkEAcMWsxqLE1yA8F8i9WPVs1kcOT6ENcUkfZwnVvfCkSxRcPFQw6CmZla40U2NzeXlPZN11is3N3RM5C6nA/XlhGyqQEcvshaa6lnMzscDmGw5JyIGkI15yQ9hBbKDPppcljWsdPDWRzroUQNBTd3EDGxJwpntaJnLIYzKFyrZgHjUUQaESQqlCVZ0LEqqlbKMYa9VRke+CUNFNw4OEDMjVgvpdRKGw232VTCHt3l3+WktX97YtXw6HPwAF1GVFSLe3J31QpAS2USr1pKYaJlWeJ0jOph7lS1htZVigFGU8NiOoNFULoLuw97uJoaShZAsCkwcy367Nmzb//2b39+//TmcPf06dM/9If+0B/4A3/g+77/X/i5n/nb3/Vrv1vyGnORWeKs2cWvs0Fb/djLYNIl2PrqT9/q2ra6LAkRQWIpp1O09jf/5t/8sY99DMD6+DGA7Xhcbm+WZXnZK1760hZIDGuwlsKyLks6Fzy7f3jt1Vs48Xoozz78j/7j/zBW8PF4XNc1WDqa5z7lhdjB67reLRLqkQMOlFIMXmuNMyh8Ht4JFUSEcuYWYUCtNXL5Rs4hEYm08s3eWJiwOkXSAAEAAElEQVRqH4S20mI1XcX0gFb3UhI3X47tHL6A55zN4EYQ7vX2dud0b1JjUnF36QWRW5eHcyWUAfRvBRVNvFoomC3a3nHMeAAM2dH9OsPHc3VPR5f5EGc0YSbnOwEIWtU7au6z8JIhrwvDtehpOz+cOFnNBEqJu3MrDEqt1Wr4FP1wt9wc7jatH3z49Hze1jU/Oqwk63Y8le2k5axF1nRYloycSjmzQKEkTKYEpMZl3JKw+uw0kpIuKyh+dYOZlVJ8whSMH66Rby+7fPgnh9J2qfMNDX4AKeecOusv4il728xoaIGtPeietjKEWNtLzCml43Ye6W2xAkO83N/fx5MjNjgcgaODwU06vnvV8nFFhGTS0ds152APl2ScQDG/AaOMqGytVdLSZLzuPFtEBJSxGseAjPgqOuykvcLcid3dMEWk+3qOQQ5xamZ571qrCIWO+Jjje7Gn+pbYASDeYHXXobPmPiBEEgjl7jUmGzPezk6zlJKqrut6eig5r+4qnLqvdzhfR5QMIhFsFyIy91qrzFS00yqNgb2au/H5uG1sc+4IIu/5ltSpRMaqGKcqdT1g9L0Fymzv467/xw/WSnLt7Qk/hdUxdBQqE1oim7dkaSxLbt9SDS/t2MFjR42I0+TUd2KUyShtN3T/5mT2dBSbGkbLiai5hBGtGVzNjReAGcwwYyIbj+pvcXfvLiR0dS1e5t4SI/1qQKZVtI/5RPqnqugByfkeImqGn7taWy1j6wEg6sU8LhZqBWB6Mf7jGjM+G6lENGKRbdT7BrjowxjW8WM87PKLF9no+wLzMZjcVWcA0N1oDAuwrVVqhlX/UzcU43hVCzcE9WOxlsKqHPmBoRMSQRhVDR7VlzClCiOg0a5ggTmgYAIYjBbCIu6ko0MaxDLm4elAExQvjhBxXuA4nu5JEqe05uyOUuq6pJlyJ/rvL9XU5tm5/Hce9LGohTMSXn/9dXcnkpzzkpfpMa8yaU6avNWh0YOqlrxIT1avatUHl55wSjH8pmqm7nou9bR98BR+r/rM6WFZdDmwUDJDzq+CBPkMv0c1qLoTpZSRQIAbtPZ0Lgcq6AnqAUggcRct5ifzs9ViWqEMqCm5VYWC/DocTS+P03/UdRGCo1YL5uLDbsXZ2BoeyR9WidoWivNIiJjJYGOdB8jbAXaQcEsHj7lxhju583XUpBmQO9kwtaKmQwebJXl4JGgnG0c43tw7kqvrM2rFnLQlMcmVpjcerrVB9GdtcLaqaErlQJTvGu2MGnvu3YsXFkgDgjToZSeYv5qJccr0AyWG1lv8kUGJxVNSVXI30xZUhQIpymxx9wXGV8a5KyI5S+8n0Av7hIrASeZ3C0gB9z1RYch31eLuRDynYKoqiaTE2xYz7kNL8A4iIvKBM3Ezavxyu7o/ZnckSMx6DHf+wEB/vf/++/rY33zzzW984xt/+A//4d/z7/wffu33fP7Dd99585OfLqcTp8yJztt5XdYZecJIsw9vusZMfDTY+mXfCWvwfD6LSJJERGldz+fzD/7gD6aUAuMrrRret9Kex27tKAfuUHeYm6SFiB3Igtef3NaKZBsS//Af/+M/89N/+3aRu8ON6dmYI5fDzIgT58UMDqzrQTjU+sjBbzZ/zvl8LkN3GbaPaguFhWoSiXzoOVrtlCUeKV7uHvplEqEeIgvit1DQYwrRtlBosbvG0NckM0nR6u4Og5NN65N2PR5l2/OvqFUqGxtmKB99FwlEJNJbWaTHmnyc9NaP9usE0Vh4PiEEpktAmYXYoRYMvxg8lu4CCrjYbAmFmGSwETeODHT6U/hW67lUkLIwtzJohVkkJwD1rLVu5HqzHu7W9eb2kM7nB+YCLMw3WZbDevR6VMAqamXTtCQhJM45i7qda4kgG5MLRVVDJoqBtpbySLJn8UnLizN3r7XLU78cuuvNMmucl39wIo4cGI/EgDa9bdcPLAQQjn7my6zCWUE0s/CsD42wK3x01Ybxc4Qy4sP+cBeRWutgEB1FaWlgLOUiDVUnrdRbvKW91C4rkbTBUUSFT5I0/KCjncM+DIQJj/RrkYGoHAs7HCDoNmocXbGj4ytjiZoZebNS+HIgRhusJa3qmFAiSh0Cip7q0KeDxpiE58CBAeK1oTRMav38ojAI+zKw+R5mrmZFCxFt5yIiqiciKqXc8IW/CWSqHmrT8FYQxN2HZn61Sa8W6hiJ+FV8Xx7zQhopJbs7Jrb/5Ouh6RrfjdnnDkVWVZ4Q9Xurpty/3ei6FINzH6j7bpiZUgrTC5FyM83d3rWilAbwCTSqcTQrrNmBk8w3hJo1zRpG5JBGxMDnHdjISMeubHUjwYMndPQACmPwzt8wy4Z9p0zod5/tkOlO6qw22tmko+P7vF+4q3i0fTyEWoggbt6LJaKrOt2g6/9R9yxGV1qr4q/XebP9FfP0od/fUqquTpGXKAQ9OdA6KVeLtTV/Kdxri1O7D2sQgNUatl9kdzSrvg+IqsLM1TjKEjHF/hURdgaTtMxPqrVGpfAAWgAt/di3UNSJyUQyJQnIJUpxJpA4JWIHJW+g5gjodBw4E2w3Slp40KcU1rohLTd5QZKtKrE4jJkfHk6pLc5GajPvmhevsX12YXuJk3R4eDggQuzrsvbbQkMjh5tC/A1E8gcHtR6SO4KfVAA4A8kqogvMxTRCRywAQUiMlgwclmy4db0zHImOW3n+3rN3n334wcff+OQrr6xpPXpVr/fbpqQ5J2E5gA2moA1cnUFQAIY31W+y3wGZNNUz7Ky8eS1UlATkhOKqWsiQoMSGqB4Rgb6e6+kwBkDyUpvaLzdLW3RNk2+MJ9df8QbaBBnDSy0i3BLIwIjCStzNtubDGuDqmM0xQ7LPVQPI7Q64/Wzt+tusZ3RJZe5RQwzTQRZm2IX+kBIEu8ZLTb0MGX5JhTBUjin5Bf0UuPIgj5YMlXWwD0bWsWrpfsa9UJNqYY6K30PCxP+aRfFoDtdwD5HDktW9SOCSUtMmKlQ1sio666+rllpJKQ2FA/1QDwXIp0tVzat5Egc0WAhlGAwiEoKPe0LLeGDOGebEwWLP20bB+uDdqx0uWEXtCv3unofCzJgonPTezHoDIORGPFvqTTEbBAbqpZT7+/tPfOITTz98/vbbb7355ptf/epX/8//3r/3B/7d/+Pn/6l/+mf/+//h1/8zvxHE98/v7x49QcfGEBHFGNGVU+7q+lVD3s1tWRbqOaAp57/+137qJ//Cj/3e3/t7H7/ySrq7BSBJTPW8bYebu1/+ga0NHP9/3k6HdXHg+HBa11WEtFYI/Q9/6S/92T/zI2+88uT48OzmsGzHimUCHAMkgl6uyHstl2bkuUe47Hg8o2sz3nOoaq1kzsw553AieEtfVdpl+kUQJn7w4aGMqCPtuKxp7xmR19rqVTBAJMyeODPz8XxqQUWz3e89cY1i8k0MhWD8fKWY7npDVxu7ItvLmqvppb3Xn+PRBZGhOEJA2tWwWI1jQ8VXx7bqjbSJzbz5S/uvjbez1MqZQUSSIByAjOIelcJzpkOLM2zMWJbllVceiwhpFbdDZl85C8hKhnkWDpCkF1dGdSMnIDHc3GtR8hWLNfc2UacorAaGkTBL424N7xolFmvmvenubp/H6mrpXo1AGxnaPT42RVzj6wIKbuSQuRE6U62zxJ/17/HkWQ8evqcR+ZmVxab6jKy8qalhX0W42H0nxaJdK7VqVvqFfjy476rvTnczmRyqau7btrn7CDOa2bZtAOKNsdcy8zDzZlNq9LSUwrDR2kgEjWS8Wis1fhSSJC3uPUethckM3QDrAJWdXzv6kpjju9Eqi7CwtYOjbSJQSuIso+eNsQEeMBeFCxCkTaFjhhLQN+mF/Tnv4hAs0aomprAMMWJmcXqa2bZVAoeBLTx805P+NzZwg9k003qs3nYsTjWgRmPGQ65aaJ20YNzGnTrFR01LIuqCFD03fuwFn6TiaGE8vW+dzs4a/03xmX3QvIWAPPA12Lt5tR+vlhCFhJ0iTuMMjGGHd8BkDOmUdE1d/277JeIgjb66qT5mlpcO2Yrjhi8b5lGFek8an9t55UnBy6/de3glDcaUjR/cnQVmYRbGK0KvYlNtoq9X2Y4b0lQgpFnAL1OZiRnd2YhpPbzYjH0thcokPM6mUf/Gx21TvLE9sT0niMKnsJpar/LbyufG5xwRUe8OEnM3g3lYaeh0gz0s4R0h5k7OQWfv7mF/OriHdwCQMDGbVnfA2VBhVSyTCIQhEus+4kLtBwzpOB16RAC57VGBFoEIpYwTTsfn52MFvfvee2lZnz17xpS+7ZOfMjSvLqfUCgG9JPttvGT/03zbsEGIokkU+VrTndyJClrDncPc63cROWf3FrAFwSn7frI/uIMpMUMkEUF8wbIABroFHgNnoBz03vRW66Pz8fXTKqvfu9VarW7wym58uMmuFaTE3FjnyAl+xpvAmvkJXKnCT5U3Sb5YPauRS6pgtUruQp6lOhlI4QVU4ZVoiXVGjZDiOmJIUVJovy4zBmPuED4dEFHoA2hk6d1M6UmkFsj5LodNkcY4hvRoPpXwyoTSyNRxorF69n+HWDAXEQN42i/NAyLkjrB2J1GgwhzbovWFDGYOZY6+pCFzmn0xlc+dL0ni7g4nJh58GX1x9yY6mKLWQJjEnRMEICM01YI5as8NdVGB9lHbzR6GX0jk6g6+pKdy9xSrkB2MTlNm7j32Ygj6RAaIaeIAcZ/QPWYTB+McJyGiKOhcazWvsDSWgqqKXJC8NQkrVAJGCEfOS5ZmSPTjGJNrGd3/145Sh1KJXB5iuFogG1pjulN2PlDHq6tu7k7UEJLbtr311luf+tSnvviNr/9f/k9/8Hf9rn/zt/7gv/LNr37p5u7J4fYR3ACCQb2FiKJCgjehdH196xDeS917aupqkjMc5XTKOYP4K1/5Skr5f/J930fr6qWomSwriG5ubj7quJsDlPM9CqT1cCo1C93eHrbT2TZdDocPvvyV/+I//89It9PxudVNyY8PD0+ePIop8OYiMHUzR6319iYqX5NF3S7VoOUbJ7Gq1o7LVdU15b6yJKXE0vgPZz1pyPSw/8daYmaDq1ZVTXltHnRceFO0k+XyRH1BLVjERKRO5hCh5CmlVLZgz3NmGWtpMB71V/tY2HKhKpmZQXWotswsQg3N0uxMc3d29qFwREIaR+5boEScg/mxSxC/gKgZIFexiME45zusyEeMmoFimp2cm9hmkiruzolYtdq2hd3u4HW9ubnhw+FgVa0W15oJypQImWD1tDDJYanbycysFhOWxLVWc99Ua62emCCNMa+TIKmTmzmBDcMMdgMHgrQHE7T6CB/NkasXNbOX7ZsLQ87dzWvjae1zNKg1vWeQi7iILClfEHMRBcGH2i6L2lz1G0bga3xxJuLCpVo/m2HzIrRawzJJ3CD/4VcaMR8HpEexMNZM/3r/oRUqHDG9bu3sQ4FLOTm2lU80ubVWkV1umxm8JeydTgGFFfToIuKgTZ0DCahuMK+1blqvXtr9R5p6vIhgDw8Px+Px/v54c5AZ8upxjjvFvmihg+5vqm5RuFDHgATGizyPECtGfL7NEREFUutwOLz39N3gFiLizj4a+SIRYQqGIEOcvMyxOC3sX7mAAs570GwvhjGuZcnzaI/TcF3XFx8yr/B9a3fD70WT0t1bhcmwG33iEZ+useABkDMR9mIMY5qmZ+qcDxlCNSoI7Eob2J1ZIEwTCc1u5vYOO0ANy0u9UgUgrUaMd8/I/roxApNFiv2zrtyjn15RFqwb7O1zUut2dWC05pjkWA/j10mKgnkHMM+b5aOET+CngSBGCdAvkSdXI2oxtkmOifNONjj/e3VxZ3T1Rs68FwhpVy/oOkUWHaEQdjm4S7M+XBeRGRAcE+l/EFe1NEIOvsGIgvheVwxRG969eWGIIvpWbZC9UCttMjmzjMBgOMdUKVxEyAlReLYtQgYkp9wIeAyOCgfcYAwYiN0QCX6u3hLH5TGiEJQzIhHRyWOrujuoZweQqZrZDaX33377WMrNk0evvfrqq6+9VoouywLrIZbA4bSc04+EcX1LW3GfRrTei3ll4h5AYyKOshShOSKIi/ZpBDGsP6rvTziQBISoKMPxNXfAQMKwqIrCxCvk5tU3XnvltXJ8P93cOfFTq4vQI+hz1a06VSWjSqKJjJjg7LaAF8XrnDJoQb0v9/d6fxIr5FSLm3EFk6Gqq6rAFgm6LIUrrII9XNmGSLV4qZ7r1BOs5tO2r9lG2NYWPCJ3EEShTpCzCLhVePbqDm/op1ZhKMq6tKBV53AnwAlwc/LuId8dVegb3MgpzCdCZFA4+3Dxt3t2eeLD5vEI90EHQSF6VV6HEhMFe7N7FJ223SC9HgHuQXXuh/4IkIyj3KcQxZ6SADUzOAJpDDQGz/6K7k90ClCDWyQsh4+Meu+sW9Pt6Ezh1nVooMNdzUxNI3Pa2JkdEbDOkpaUcxZmDqbKrvE4GlniztRM7FHAbF3XIXBBNlibRrd9onGjrnVBq5mBLJpXaw3DQFVBlYi0ZYFz5yGiyK0kIrgBO0gJMDTkf0CzcgxxKHOhH4QF/+TJK+fz+e233/7kJz/5xhtvvP322++9987rrzx+/v67f+yP/tDp4fn/8gf/1a9/85sMqrXePH5ViIJKmbps+SgN9ltQYn2UIScsZQreSl6Oz5599atf/eSnPkkp6XZ293S4BUEDjPERz6HLtwyzkAA3pJTI1K0uq8Dow298/f/9R/9fX/3SFx/f3j5oXZm11pzFqlpVbryaThSpcyCSPeDQqZA8nAgp+eQPjnpry7KQ+bZttW7MiH3OzGZ1pJPECnFXohRR4vC7t2AFhFsC6j6oNCUcEglNDmP3xtspIuFW0XrhwR3fZZao2DNWzmg/hs4DdecxmgyYVRh5B5z1z21Yov3rGvX92ofoDWhbPfrdzuFQvmkuSUe7+x+7ZIntA8Dh7LQXiQ7R54TNXGo9luqJmMQTKWirplW3h3Mo+mte13VVLSCknkINs8R8WJatGiVOiV0l6lOF2liqljhyiZmkmKu6Odm+0gPR79VgCBghoK0ch4iz89DDBiLf7EIaTAPos3Y4e1EE5C0coYGFJmqEeU3RIUoi6F9PSYQTCVPUYXN3Y2ImZvK2CodGOOZ0hgTPKt1whw3Bvc/yyzDqtXOCjY7nLMvSonPMexYfLmnN4pmRGh21WDHBTXPOt7e3zDvefhwwsS9Y8nwaRfTSzJhaDK0UPVc9wINsaavltoU3KfTVsOe7GHFTK6U4aBS1H0PEzKn7R4awZfKHh4dnz549PDwc1kdEJJKYpKM5zB21vyGGEsHgEeNWa/wab0ePHLq748L08m6JCTdmadWBKqQrDlQiIVJ3X9fVLZgAMZGxjUe243NIBnrBbTHM5nHNS3c41GbJQ5dRsjHXL/47YK7NUzAd4/PyoI84B8gcaCGK+X700slD7lGtGIAd2qkTzYxUyX3HxUToWxKa3hG6fNt41MmWJq0baAkjs1bUI2+8+3HIKcqf7urBhYFmZGEHqVtPxey5+mZVRHqQMwwQd/dAm13o4O3fxsQTGyY+N1Wklu2GcayHyco2WKPQkmgk0Pr7bTvzYYvjNeU3/p2cX/s4dNd91Wacj/gwuluQdjV6XzAynXT7wosl2llW55XW3HZuGEm0NjQCHVQLe/ZoQMrV1FsdjyjDq2Wrtboai0RtCJskJxGB3XjmeGzHtnd7B+RR15SopUCSsruRJwifHx5cFuJEKVFeWIJYmpo1GN00c4+aFtYAl4SB+9Vaz2Wr733w7MMPPv7pb1sfP7IWbuzESENpoxZW8o+2+n6Fl7uHUROGSp+vHZllH6EItoXZls7+ibTIWwvutknm/mzJbtm0cp+q2ycHiIFXlhU43+YHX565HYt96HQW2SjBKbsvbI+JHoNuhQioOH6wPX/PHj7MOAPqpVbV4sbExbyaMSyTOap7Id+ACi8UvqOd6tYwJdPuLtsX/CCDZmaYi8GLSi2+akzJXcFNsLDAzT04yZnYk3UHhEfReoSHIgKO4AhdjyELI8jCGHYCaPjcGxucwnZ9tUd1HaS7J39ECKF26WMCQOQiZFapBzPdHeDmeWxohYb/j70cZ+N8TKBrFANbNItrdyfuaY3N29YKcDFzYBMs2LtjkImwV7ih6UPwXpriwi+Z4O5WrWqYRt4B4pklbmUQXN3cA28KAjUCiUVSYmFCIqaOjydi7x47AUUhe5gTgUOx5Thv5gPMwjXJTLXWzCQi5lVL4LtAbvOhOaYhRFc4tj0lYSaqDgAmcVxEHiniCeauXd3ffYFtIFKqWkopn/rUJ4j4G2997fbmkaoen33w2mtvnB+e/Rd/5Ie+8bWv/+5/6/ect+1cyrKsxIkkJ2H/VSUI/oqviHkCkGWB1i//ws+z2/d8z/dAWOQGQdOmcd7IRz9mN5zCGvT+c1W7zSwi5+cfrje373/9H/zxP/bH/tuf+Au3t7fPP3j/lUd3p+NRtdwebgKfRiRBck0USaXEzKWcfc/i4JwzmGqt21aHou/cXAMppXreWmOGm2AnyLaW8Uf72m37gT3clN6Rw7GXGvy1V4Aws6h2AGfrOy0C8d24IlVVNzZG52LCpCQxM1BDqPXTdByo1o5KHX6PMa4Y92PopnD3XYnErrQ5RYDCK+0h+3A4gwlhuXLnm4tNQuRMTSl0NI97CJWoGOUAg7QpymC36q5uW9VTTUDz7pdi1UiN2Eyc1mCyVnWFECMlyZyWlbcKTuAELiSJHCSVsbikzdzqVsyrA5yJqZjVs6rq1pEV1g02A6OH5ibNylU1MEjc0Ybe1dCxMK6lcNv7PiUKzHZj0y2YWbCX1c5ZiNysvaW7zfZCOyGyhyzuYtqHijzETnNJTNFjZvYLpHzrUdw5Sk3MVgT1YPVyWA+Hw7rmZVnWdY1l1qt1mxFiJXmHiY7kuuhXhASr1GzCzDnnw+EwAPDjTBpfcd8jsaNIOjMTjJmZkncLNpZQFOeYyKybZ3HbNieISSsHykJJcs7bts2TNTYpah1vL+V8Op1KKcuyzIYrwx1cba8CYmgZHVca2n4G7xDuHgzp4z+m0msVkUg2riWieYNCs7mNwhVo7sLJm3u6idMRXRmTS1Nqx1DZfXJ7MfP5vE1KcIsVo5MSjWUQD4z1hpddXRbtNX7GnNL4/pRqMRbqfLrPnyMiILQfeSFYRi/azt2R6m1z2ZSx3yA2PXVHVZdWDzAUa+Jha4We0fscegeYB91L05CiFz1yiD7iEQkkomaje/f69ytRU6/98uidj/UxGmMwx4ROMxs7tC2Z/bt2EfQey0A6y0pjTQE1hbV54YTc4qTgcOq5jjb0f0N0z66BaExY7UYcsS+Pu7yL+q42N9xv/DeIh2aPJIDgtW5HFg0gMEXuNtxDqUNUdWjHlUb2YHPNxt3Vh6K8a8lq27ZZ1aC0zSzdTiG3SszC4jC0qrgc2jl6DZU2HapgapFWciZSVHJyN/Kcc3ZKkEySnRNzimHathqZhIEQGXKeQEFtSj1QYWaw89/8az919+TxZ37dr0POwgRvwAEfBBzUAT1XO+dXdvlIV2zWz8Cdxrps/HkNzuMA3fevvsQ0nEyjgf2JBeNX87sVRKyIGCRhfKYI1TEx/AAXkCEXoqdEH/Lp3kU5b2AiLK4HpzeIPsZwQYU+rfe/ZA9fX+z5ysWrumpwgSpxBRlcCJLYyBxGrrDqou4WRMISw0AWVd9wuVs/+mKH9hheU8nCMmwDOIQyhDjA7RSKFUkSYtf25xaKDXYSB5hhHsxjbZ4cRAxv3tudGwAAwWp4PjqTTfMME/leXoKIiJ3g7BzyLYIZUQgNzbVhaEVHbfgMzZSF4aG1jjgE0DkChoSZj8Wr8xRdCHNztoYAoPanCYs71KM4IaI/XfG/1pmjk8zkLu6eVEvQHtRayRFkYomYBRZFYqjF1pjZq56thMs5HMzD3hiWMbHPa+F8Pm3bOb5OgTgP/LTMAaIW7mPmRIwFKTPMay1mrWKPqyLO18wE0aruXlUpooWiIfioxQlDn1AzT84eQVVDYuH9asSsA154Op24ZX/6uq5bOeWcnyzr0w/eefWV1x8/vvvJL/z4N77xzX/z3/o93/49n3/23tO8rrLeSs4kQmilX16+6n8lW+Py8ii1Z261khsxf+Yzn/l9v+/3nWsBxGrhnEspy7oCvNWS04sFc8fVcMY81cFl4CYzDK7bent7fu/t/+q//NEf/VN/8tOv3T5//lzYT8d7LRVk560s+XA+n9HRJmbmBHIyqcQtakFd940NezqdcmQKigT1agNr1QpYr36WAISPZOyouKLZ7tSgU+29RdUDVzZuZUnd+7JX8zQzcrjbUOKHQTi0doBSYq27frYP/qWCMkuEMGMcaMuYiEHMjWbjaqKp59+6O9FO3hsKvZnt5z3RyKplpq6tXQQTwib0GUZ1tWamLtRaLUtU6VDDZk5VrRY9hQGQiciJAapFH7b6+Pammrkhp5xvHmUlBzYjpcSUiN3zSskVOG7b8Xhcbw4KYU5G2LTqWavZpmSIQCVjcuuHGoAB8I4xgQJIJELBouPqPUdlCIr4D3Hweq+Ltl898jAKR0P2oXPAIhszpDZzipHRaiQcGcgN9wt4MBilRdXMLSqjxFybt3r2KSVZcimlmDISepbdmOV5CgYIYgSOKNhBUwojcF1XWbKIRHWz0IxtvrpTY9iEYWR6K3nIPPGLRJguWGSiL8Q8GhD/xlYNQpphojCzLHlZlvjitm3n8/nVV18Ng5Bo17oAOpdm+DkTiFJKBFHVUkqTAzS7J2M9B2PNbvMfDoey7YOTGA6BoWrp/hoDWN25x57ALV8NwhzcNty3RnOiD8ugR/WNlnxQ1WVZHp5t63KTkgE2pZ2HN5oBP5/PAMBC1Kutws1syUTdOp0PWhEBtRLA5g1mQhO0+wpNem13RYUGVThd3RadGrmCI8hMvTjVi4uNLi3VcT+AXtypfYcdNjm2abKUdh/H8EmH16D7d6hHWWNGGzXs1gxgDIFJLcjWZJa79q8zs/Q6Whw8ouGqn9zUV6EzIr6wLYEgkEKUb3WHRE9aALTxinc7uzVsSI1uXIUBRF3Uh3o1E00NhonRjtC/RgqcoVKzAJiMvE1NU5bcHchCRNb5otBDYfFr3E/d0BsGJyVCwHx7fLIdH3tYGAgYoTs8WNlaI2dnfxi0aL7DODHRTxm4C9Dp+wMuZHCH1RBD1KO7jAxyphj2UMXdGFY1ACICsogPoHHCDSIrizJu5hGEiwAjE/HYYg4yJ/EgiZHADwb8LR0AqYbtrNtxq2bq6u43t7+GWZmSiFPKQsQsROK2c9rF25skLBvUwILzhiUDBJHtfF4GhLv9/5yV+Ku4NKhhxvFDMZz9BISDeHf6xDB2i3HPR+gO/UmF5FgB3TiksC3Nm9HZkYOjjAvQ1P4CEjjBF0CAAhTQvSzZRSgZkBxifit4Hf7xTPcJZ5S37PyLSd++lROLQ41RiczZnBlMTgJxHhwcLfLmgNHckesR/Faab09cpm7Yh5rKfTsEna0SnCgcMcQibuSmcCJIkuRkAw0EYPewu4IA7rS9PdLe3Ftoez8c64Fd3ouN8jRlpatw0AhAxmmupZfHmDxQzJwkciUi2MBJspkBnFJ6AT4KVWXs5G2lFOsM4eNEGMPVXxRZKtKIIQeA3/czgjncuIaGNMcIMgMIZBlzoN5DMvPwWSdEPQkRBkkSqNdahXnbTiLSGQFJRNhRSrm9W2PLaSuO4QPMGdrJIDyIeyRJaiafM1Mpum0bM+d1ISK1BjcKYjdmNq+JJJKSc84wq3WLszWitCklU5RybuEgYakaQ9NyP9zL6byLJFURAZFKRLSiAEhUv0G4kOPwDnf+SU7LsoRWcT6fn58ebu8enU8PRHS7rD/9t/7Gv//22z/wW3/bb/9f/Y7jVraHZ8vtXZbbEOCqlhqkOVSmHWLcWdrb4vxlr24kg7P4ZjD/0R/90a9/7Wv/m//d/1bLJikBvCw3scTDCW1o2MH5OXGGhJg9a+NbMkfdalqTlqMs6ek3vvZD/4//9K/91b/82pO758/eq3ULu9mhTFHau8b4EMuScpTiY4ireQtZwd1rKQDSksPeCy/Duq5Lyt5dd9DKnCM+5i5mNVZOzFRKBEjVLcnCzGY68JPakG9CnM7n87KuDaPlXsq2nWtH/ybAU0pZxF0HNUsYBufzRiIAnbcTS+qKrAOLWcsHE5HwWRNRhJTdHWSHw8HdJQVV4x7wiW2mWqRKBARilwamm3vlA3eP7RR68JIP7p4zL5bv+5qMjdPjaVG6s7lLIsTKzNu5xm2hVjLMmbjBBoSGZGQqtUpeme1cqyoVrQyCLJ2zFOwQs0RgQn1+XnLeVLFpYl4fPQFQVCmlzeEGc1EtqlrMLR+enjUtK5yqunFmTqbVkqNszfmEXUy7o0HTbTe/U8rruoaMDKAUS9R4AjOZ67Jmcy2nLVJJQzmkibSjC1YNUyd+LaWsaw4lUUTK6Xy4aUUIw4LKOZ9PWxiEYUGt61pKqcUiUYN72ps3d3aT+OfzmTrEH5Egl9LpdKLukw4zzN1iZZ7P5ydPnpzP5yipcj6fmZnYHcrMy7KkzDnnsLs4iZMROGUm2c1+M7u9vT2eJE6LVu2AvdZ6Op0e3961gQ1tUmRd1ybxmpTzKL3A4h7AyK74ppTO5zOoLr2sbQxsKUoizPzs2b0TjHA4HJhZHVCtNcqEApFLyWRVzayqZrN4eNi6x+PxcDgk5m07MUkpFirao0ePnj9//rGPv5FSMkMtVh1Fi5McDoe8LvbMVJWk17eYWGHiyC6mQpSSANi27dEhJ0qqaq6RWlnKU0CWZXn27Nm2vRpNIqJaTfYYHW3bFivHrXkbEWgChbuDGvvx4Om5Shm1jtflTpwTh+kQ4LNtsyzLlRUX34pDJ0B+2O0YWFUnY+bEwaxHcAi1FAm+NHJi2V/5JmKP1GGwTUHUiBNGm613AS2xv5GEh3q9czjzwC52IlDzCECNox8iUJ0LYISSknp4EMyuRi9ED3yCXNALoACaDKHxIWMbMB+Eew4OQCfwBDGHbRpz0T3rcShHNM6CCBnUiijGA8MLGRc6XDz+Su4ta50iXEcw78XQOgdiBEvJ4VpLCWkQbvQYdk7JawVd68/uztIaNhqzz14PYPoUt2R09AFGtDLGp1fiSS210dTULGW4O9RqhLUdIGPHtm2JRcK1DTet1Hw5QFWtxaO0LzdkkEOZISLcXbrefARaq3WoAnEU3lNlr012t4BceP3gDkhGZpxPEIBpO52Wx7cwHI/n+1M9qzkJL3y4u725WW9u34jd0/+lFhwSHtsqfhCRJ09effONj314/4CHB9w9wrapiHAa1uDF4L/40a/gkpdjszom6MX7cfiom1/4Oa6yK4x0QVFhlw2I9pPAoGjhsNgH0FLPVm+WlXB3ts1ryvwK0SsoS17ehb5v91/R8z8Qf485CLyslqMkdq7bphWQvBhqgd0G5Q83gt+m173Q0Vawb2/XsEakw0ptuGwcxb25z3v1E2cQQVg4YLZqJQxegBulSSC4JaEVhCC4mxZYePMZ5HBtzixHSK1yOrV9zUQIwGP/LxLliGEWQIlZGFKLt+1HrburWjxNO/mnwzrpiRBR8c0NZprSofHcQyPJWoRTSuSNaA3dX4zum54Piz2rcKqm5hMiiXlRqzpxTTNnZs6yDBfVbGGG1RPn/pzE2E6v9uJdQPVcHfbh2hSWlHZvYihyQIP8tqwYAYzHODKjQfiAJJJFqkhKQkRZEnfihLEXnBylYcsSM5Nbg5c7cwBVglmkGX5kRh0D46HkwliayRsMcixggQWntntaDtLo1xGjuSxLSqmUc/O4pwZ0jHvMahIm8tPxXpLC7Ze+8uU//af+1Je+9KX/9e/4nd/++V/33lvfcH7/jU98gomrno2SCAhQ062eo8Qz9ZEdcI9f1iZMKcG1njZyyLK+/9ZbX/iJn/je7/3evB6IqJU7bJ4tGtKhEVr7+CMA1FqJwSLCOJ8KiyyZD2vScs7kb33pF/7If/b//Ot/6S+++cZrz55+6FbjOPIpaheANafIfTc4CVyYJLFZocvODF029L/ovnWSGN/hc7sbezr5aBgMccUSD8XULPhJ9iiKqjIRM+dFQFabt3TA/HY1aPfQ93wSmuiI9q1Cg3q4BaNG78aOCjXBmKopA6oalZcGgD7EWbjNttM558xp5s9oQ8oWaKso79b/ZM4OtFIT4VIEAZklETOxCxlxSAYmDiREb2Ez0EFGRq3Mj7nBlUGKRFxdAggQB7wQJzYhBtVcNRETLI3KH2qhl0faTE/oghOqc1InUFU4ExtVl4JEVIYUa0ue6OJUiOkIl4naKBMym3kxPpi0n3HDvNI4YE5oqYBExI5wYMUi1FrN2Kt6VUGEAXzEA8e0OhE4yeKZeX6vd4OQp8/nlsQKjPSwuWGxOAdp56zAhe8w8JYj4jeKC/Xm7e+KGHtcOWezamZR7Ij2WoJQ1WAZpY50ndfbMBXauIW7x93dU5Jaa8QMvduKOWeLelj98qXHiJgiX2cOa4zox9gdoztVi5mZkTt1abCktKiq1r0wABtVxyAPi9DrJBkuSkK5uxJSF25mkg85LOHjw7NSyqNHj54/3dGhF9Nt5EbojmR3cjdTj1OpR4V8YNe1NINqNPWlv47+ysRUNF86sH190LTTX82fjy6PJ9MLR8UQsPPTxjk7Ld14oIeEA9A9+y4Irn4PzoAYWAo2fFdQBPTDlR4vclf3MYzRP4YIRd3TiGJHP9Em7LrRYShSVNGIDIBecgbAwBNiOghA1MppYH9tu6HhJX3yyxGFG857xKJVzDNEaWVzIkQRxhAQjBa5gloI3pARRMHO1wz0xII+pL2EsndA0hyP9R7Pa4NPRIkY5lHsIRFHyhRqQOxemFkgckDGr9i3QDcK+onfIfexYN3NEUrRtACYmaJMvPUjtIa5bMII7dTN3XxJOdYI3F3VzdxcoaIEVYrIFLV5NLM44MaUhfQwioqCNLofP0QcvCVONfO+LUeiVY9nUS7lWFRvX3my3N6ilG++/YHRgfPto9vHt688zncHkLvXOR/v0uB6OVHDkyevPn12jxCwOUcs2gO84vsz/nGk/Lz0Ik+//E2Y1jp3BOZl7+z6dusfs7sKOaQAXs/PTe+J7PbudYOfq9W6rOk1SW8AN9AD7H2c34G+I/TU5QhngM2IhM0LdKsugDiYSJzYieFBJBSVrtrhTgBdNGp2dtBFS/dq2BGVNYKAvCM7mr/GYAyGE2BGRkhBCxnkOy2+GvvY3d0kIqhR6TKiq4g2+sRtgrQu8RUBWAREMPNOGS2X6qcN3xYQasbw4XhLE9iD0k0dFbRjLjgjwEZGPdqE6SxucvVSXFyvAvf5oGHm1KJ/k3dvIDsM1H1k5uaGIHqMrowHRtFCFgLtBYZ7e9BoP4kIE5/BqOOOOTkhEec0NbS6p7Hno2UjsRihnhq5a+B5zKpZot3zreQ0dPcY4pAa7k6+Q62oe16rgYiuOrBtldxaUlmvz8hM7grTcEdxh5q5N9ofVa21iAQhBYWvy91jFgdxa4CDT6eTiJjzutLtejgXe/ebX/vCj7/7xS9+8Qf/lX/1B37b/0LWw3vf+CXJ+ZXXX9dSTCsRSUrSHFG2bXVZltgNdCmAvkVM/f7+/u7uERxQ++Zbb5VSvufXfo7SLlDcPUjZrqTDwNYAsFIkCXHUjPJHhwzY6XifDjkl/OUf//E//Sf/5C995YuJ/fjsQzufA7bWbJMeNHN3sFPbydJBAkbWi8s1xX/XSIbbmIi0amCM/UKru1TKL5HTw+QL3yoRBcSXRZwppRYTVtUkEnSLquooRK3uyvCjxAPjhrF7I2g878AXd+P4nNi9dvmmBmqeUdFKFIAy6ziZ7obpt1ctOefEwg222rLKzSosD53eOqZ9bIcxMkPP2LVAMvPaOPwSTXeisZ67JQqaGcDd2FWNyAtgPuVMwoSYmYXU1RIrUWPzlzYR2pEqUUW3edOJSJsvUqsbk3ByM68uCHzQnqy6j62M9PG2fNzda919WjHjQ5hcjQZ1kpL4vFuDybu0d3eabHKPAA6ZWokFkFjOtTn7I+DjTtWMfHdYDBPOmzuQ5i6gC+45dDBij3PsqJRyOBwwQeDir2UsJJi5q+qmNffTgrrPok1Qj1UCyDkvyxIpu0Hou+uII/2v+xHHJyIXFCDeKUZpMmkiTm4tgRMAgnLGrQE39rOnhWpc42Zq8YNhLY8XNfOyVO6ibrf9+tCN+5mF2aCm6mZmHUGlbj1ToBHJeKMWj786Azc3q6qeThvwKOe8zSiasWYMfnnWtJW8e44GVb30E22XTn1kdhtsnMHDbNuH6IX6UeMrNPmexntTSnOrxkobB+u8/tFja1cPny/qwM74NbGAKOIIDZwZx7q7T88f3QlX61VjIoBGtJPQ8HCrS1Oiwq3dAnGR4II9tjGMUchuXI2q0t5Jg9COyP0a+33u45iyXcjL2IA9dko9RBa+OrvW+ZsY6bk4+yti8Vz5brv6Rr0jFIntvrcJCD3W2i5yuDlzkxIU7n9mWKu189I5fGli2fiXeqeAnqnUmxTWXNOEzaz5KL2ld7oHGCdTf8g4S9zhxpKG/dzsYDUAXqtPaOQQqlZ1ODicSFoQpbEssnMkAe9LnaBag7gn6hOOScyct227Odzmda2ne5idT/fffOe+2uGV19989Y1P8s0jLCkMBqLco4LfasDm6803PvaVr371vXc/eP3V16PqBQhaNaU0HjMMw4/cVP8Ir1/hO3z877fo4DACMRQOt6RWKW2MU8Xzh/IBud0eMvCEQbAVVt2eABlwq/dUnnp95nRKK2A3oFvgllf4N58bCrsy3Ekalj4vcCFkkAAZJAxxmDd6iqumssFfCB7Gr4E3CJtQfA9tuFvotwqwweAmxPFs8ogHhFHWOBbicEdo+LGpiIjIqFKjMG1Mv4EIDUigqhpR7tkHpZTE4qFPxLbg4Ojz610aBe7JsAulcaZFtjxFZgoRuUX1sfDShGNrFB/ePXeOHdUwJp+4Ua0MIolGsuLSmKB8RNypf7Kr09bT2t1CZeL2pGB/6ilRTYqG8mlhcHYERYiJ6Fvo0yBTJ9mtuziH4riaPeXWbDyvtMdkGqrK1eI/da91Uy0w01FgMbW1b8395NL1FiJiBjlVeMtCgZlZrbXaPg9WGrojpSQBqTciUNnOZkYIMgnjRo5MZka2cxOHRA89KgSotUpfDWfy+JUnpZQk+XY51Krn44lTvj2snPJbX/sH/8H//d//r//Cj/2u3/27v++f+adLLedn7603ryFsNtWyHUGUl8OypNmD9cu6owg4Hu9vbm4AbA8PmeXjH//47/zX/43Pfe5zFzf5zL3/4uUAOK9dlYeez2ZgSYdk9+++9V//uT/3X/7on3p49vQm56pkWrRu+5cJ4HCVNl99T2B198gUczdLaaGe3ELYD4a+DPalGZ+EIlh1E8io2uewXo2Gmg+mVwgUaTpBS2gRCfpaTuSFrl5kZtTR1yNteFYNReI5ZkaSEvPQ5DoFWt9R47HzHiOCWuNUiJgIEQgknQQ1minB7w0AyDnnJYkIgXw3//Z6gwAkkZcWFnhRcRlT2aZ9nLKNbyE8HuOvLvAAU7n3teYhg9TdIU2PYAeINaQJyN2LasTa9gRNol7I2GNJMKu3PGmQGpEanMnFItygY8TYGydktFl60j/UwsfNCLTVnlSNS7N8tOHqE2aG1ohIAeCR2jRlenEnZo2HS6IkOaW0Rdmr0FKolboKR1ms24i0DBtAtU3K3Ax0L8mV9j+mJsRU/MsdGeiReMZEaC6JrsJe6JwUazT2XYe4h7occcJ+Bnio0aoyFtIYyTjkQBQ8wFetHW6X4XcM6yjYn5hZ1cEpJUopMSU30kFsE5nbZs7EcDOP8zW2doxRBBVVlRyHRWInmtm2bafTqWxHeOtOSomLhf0dlV+KNRxvtQZLYGai7kYMJ1erwukKLOui20OAmYmoR2XrEEoy8RVFmpk3x19Ltgy6tm3bRLLkdrZ7j+mtchFgRHdbzJG9+TC+2r+zAOmvprGM58de3TkO7Ljo0swbTxv7osylI/qHPkyFqrikkwHAM+/BC0ZXSBFMXyHmkKp2AWjsBlgX9a19I/4124TTNex2uzy/YoYGiR+/2LCuKuy/Uss3hrnp7onv2YMhlX3w3rWUxq6GxlGPiNY64K38+hh5ahla7mYQdJ9Xn4ndEXAZwwk+YeE9EdocHbXhH2EZDJ/7PGgXa+PKNh7IT2Ak0UURrDYm2nafqZoqhHtWWrN4G9ZIg+jMIhiYfNRq0+Z53h21aqalnIfoCNhckA5G/QxcnrxjCTRlYJpyc2+EnCkdDoftfH7nvfunz+t3fOd3PXr923HzKii5VmcnNofzr4w/b1w3r3+MSL72ta+//mu/C96KsID74TGGvZ2ULw8z/qO8PlJb69cwTz/yMrQGD1Ez7QUDI1d9Bvpgq++TbEs6SD7cPzwXuWG+WRcSPgArQLwy0mM6H0G3td5sm6dySPV2O21KB6KHzMQk5lTVKZEsKyGDEpDAAhdcBbqH82I3bl4YTwpln8J76zBC6t9xGzwM1nQoHTYhADcQj4IkIBPJRo0ZhVranIXhBApSyV6Hqm9izB630U6mSLYKw5K77A2q6l2ViqYpEmegDBExfKzcgW+xU0bRKwAEwY5nbCac9OK9+xDGe3sK+hC2swYy9A1pVMyx6cYxFKqsEzGxdE04WIvjXFBvZFcXOFIzS077qYPmtY/hayo1mGZgSbRVphrEGP7FvcZu89NfKhygyeAcv16cCk6oLT+K3Mi5J322JPtAiod9iFZVLMBUaVkWduPq1Wu03COs0fI+p0BWy9qMlEUdjkZrif6tSLQkJsZpO2+nsiyHdV1rPZ63o9RqIH043j15/OjNJz//cz/z7/6B//1v/pf+pd/+23/7Z7/rO97/5luHw+Hm5g4p5XVt+8UNw0YfpMnXG2W+7ObmZjuf6/nh9tHj8vz+h37oh777Oz/7HZ//nGkcUbG1Wv7yt7hqKSlnwMrpgeCypPrs/S9/8e//V3/2z/zkT/z46fnzz37Hr3n+wfvPnz579OgWVpXMYjF1l+SkuwyTaXgurKUvTlR4PJExjgUTAbTpUDGatiWa+rKHCvsn3u6vG0W9EDRvBTmzIIDaOjHXUWM7J1iUr0mxRb176M1dq0YOQ9+3uz5H1IqX7JuilcHwsQnjRLV+BZ/MlW7nbqGLiywUXHPO1HiMyDSaOnFpQskoyHjJEZgM67ubgCg9JsQsZKLKVR1EVKd0nHlIm5bQlzwjeajIDoCcbOS0GojMI4asbhTk/tEGYu/cXwCcXB0RmwSiBFY7XrsbbPepExEHZx2BmaLadwyEw4W6UOgl+66MK56ueT1zp6UaizMuEYL2rAY00pWccwuBikhqK1O7/tfsMck9dCzhoBptmH/2yakxRpt6UHHIsXkuwoJalqUUD8uw6UPMEykSOpR/5lYNoou2+McuGCPj8ICS9kTudjAMgWyRlkCU0hJfqaq11gHmRxe/Wm25TTkvZmbw4Py4v79f1zVi6N6KHKpQrdU49fOi6ZX94AimMRF0g9AjESI3W7RWPR6PT58+reWBcEuN0isDW601+OeZZTyNJhbKsfuuFCV3P51ON0taaFUtp9PJrI6zRvssX0oVCaExuyHGV6YBv5jN+bo6mG1CIgy5d7WEMBmK49ykDjSaerd/US6LZO7vpesnj2U5rZ9poerkMsPOwdg6jhY83OXb9AO02S2N12S4Wib7xLU202tgSaZVQSNO2HJwXkI9yA6jPfVrDG9zv/dw5ZXdiMlsngdhRK5inK+s7vHF6VcNHBARmTn59eQSEWMUgDFxGb0Luy66ZeZEfWa7/8U9OFqHyU3QOCKTTYfOxVRq50Xch9gBUERWbTdT97Eae6NZ40DPWWg5ru04aDUqxlR6tyQBWK1RhBATM004KfGycjujdVc/R/CEmBFRlGgnEJxSANytuxdiA6bMKA8no225WR7O27bVb/v0Zx598jPgxzAxJaQDsRts0+PNt7AHgZdYdI8e3z56/M133kYxJGJO6kYsdh1qNPwKjLWPeGNcvzJL8hrO9eINV79fPXZ8fS5gMZ07DCY20KmeGLrc5IQDIInvhHsJUyRgAUi1ynHdSi56qPa44Ibp0Sp3norxh8QPIiKQrZBbdWcRAWfmBbTAEygNBG+vQ9gb5Zg03suO9MU6Y0fjBwKo5cQS2GEgJiDI7dnCy9RApAyAjEgACTQ4RxTKQxljJjBcyeAQsIUkcW0mA9CKtXFOh65+DKXXiJxIzXJ4wbq7xwetFAtBzKL+BOaKZZM26EQmIkxpVm/GeYFuIg1pMG+o8cChAACQxGP7MyUCh36YJClG3D7iaXG+kFkEA0AU1XGcaC5P5USxN80MKfqobtrr/1bTXmV1CB1ygrUioCaExFK9REmJ8LMTMIiNQzQRUeKG+aQGBO3pK7gIHMUnfWFbpz9Ow9FOROfzMey+lEK1xVa0lEKSZmnl7qoVZsuSag3W2d38VdXkTkjMiFqFYQ+WMoEi3AGTzl35/PlzVT1tZ7p/7oog9jgdt+18PJR1e/BXH90q9Cd//Md+7mf++//5b/2B3/bb/jVjUTmLKaqAGQQj4pQi7O6B825bw18mhQzA6Xg8HG6WvML8J7/whf/mCz/xz/7G3x9e67bsXvga0JH5sBEzSjm7mZZzTgKBfvjBn/+xP/Mjf+pP/NJXvvzGa6/ePFq//pUvpcxZcP/sWUrJWo0RRG0hcgtITSs/Nag1CcIkPXw4X0PX0VGXLGXuHD/M7NiBbUTE0gxO7X4HRNnA5lIIvTAQ6rui0x0nux+dmTsqlXszAtvpF76apjSoNHDCrm/tukjfgUPH6tubJCaRiAZmKfQl7VbufnpW70C+MP/EU3emdBqkqsNV3MQEO7EHtGBICiIyU7OdAHCfcWoLafLIEwDXIAp3EhYSIpiBQVGtaHaxD3d52CQY2qS7N8eStGwfuLt3zEGLHgjYSbXnF5ojTCRrLpgOJuwzEuYudarH4aOaxOjuWZi19rjY46S4WG+xUPwS6kbdkZFYymAaANWAekZuEZB4DkzjXLYwjL21krmnB0TMXOcy1pPDi3ZnHoCWOx1/2rYTER0Oh/P57C0i1RIyW3N7ylz1oFZK1hdtrGpmHpl+ROSOQbQzW4yzz4V5t1LQ/Rc8hQq9lYIobaCojVs1fXh4ePz4seRlPt4ozM4wU6kpudEDnvIV0ZG01FCE1WoRFoI+PDx88MEHhPLo7jAmOmKJZhzpUDLX2eOOIWdSDzpzinmZ5SYR1VrP504zRnQ8Hhl3OlG8jrMGl4dusM110UGGnWpSZGHmej7NIm6s1dnAG/Glea3SpZNorss6is4Pc+Vq8Y85mi/qY4JJZI3uzBFLmlxpifJ4LPcjmjsuepwj4wAd3dkbQ62b48PmpiGCmfYyGIgRDszFKNhoBuExod0HtY/keKZO3DZjkKMJ0Q7usm6s5xApfkn0F0RKV6Man8yDNnr34shjirRfLezmyJkmxT3oOffBISJwIoChZuaBJ+/eHSA4qZnRqtjPXyTskd6rJuXZRzk3CXtpinGsxD5CLxMysBIAyDtYLRChcaZ37yM3H7PDESzQxHtp1nlJkHQrlxAVuNFyDUJht8tJ3KPoQ3i2BzqxyKlUhWVwUCi/+ubHQQku8BxubwM5WORuQCj7LPxy9hXTJz/56Xf//t89Pn928+qrYCq15s6F4RiZb79Ku+4l14W7/6Nu8Jc3+COjA9b54adXYH4RXb7UDZQMwSxDIliBXNXWw9221e20MbNkYfKyle3sS31c/OTJeHmy3KVb+liiV6Fneudduv+AScmMDSgKoURgXkEZSACHI8dpGNcT3WpwA4MihfvyCoGsDdG5m4UGgCl5CzyYM6I8iFEiL3H4t/kKJQQkUQ26ZRmwuSKc8Q0KHgIeaEYSWemYdjRmXSJCYirNk9Uqr3Qhg5SDqfNSSlDHSw7ZIuGrq8X6d92dQk0eR8M4eXEpAGf9x5u+t6MwxnvNLKVVmCOVA7jwMw592xtt88VB4O5EUQsnetrqmjKz94qkHkmAu/QJH35s7qhy2A3Cud1D+ymlVN0krUSkesFyFrE7IoIr9VEbA8Hm6sWs2c3hnI5rCQxKH6AhoF86guNPQc8AanLHrebDnZnBNDIrzRF8DKFjDe6Q+fzj6aJe0Pnm7s7MmKSUAiNmI2etW0qJ3MjKs6fPOckn3nj1/vkH/8l/9B/8+J//a9///d//L/7m/9nnPvf5u0ePIEycaEmmUbNFSKL0CgNQeHqJQQgAh5ub5x98eFhWqJ3Px9/yW37Lb/pNvwkie+GIl10epXipVXYiotNpY/iyrtvz93/sz/7Ij/zw/+etr/3iK3eHj7/+yvH501I2hpOhqllVThfubdPOlDtlERBj0rxtLPGxmm3C1zU1FF1ld88pkflIu0I/tqnHIoioRw8E7hE+8u5CdnQQa1tpOsrQx8qsRfsiDFxC80wPjzv1cEokSklqL5tXr89e0omlY1+HE+MF0DxL4wjkCfkZPKtzNux4bLMnaVSuQ9RZZuaRHDwu69fQUXzScTEJLOo6OpkDLCMOP1Axza0cv3RXFkeJHpAQgMhyCdvLXcOCDMnqYWkEILl5v+GujoAztblmQIlUEc59d5ee6eDuDQ/hg6Rh76BP19y7q9toqJgAYGHpjfEfSzECcoml1cQSSaDAYbq7qQLV3as6oMxcapnFzqyZjR+G6SUi/sI12EA4WJfNVPX29vb1119/6623UC56NNbJ4XBIKdUwR1WbnwuNd4SIzufz8XhsTtBe6Hy0UDtxqPZLpMn2kGPDmeITvT56JMp6OGX44KL78erqbcBTEvUyRmOctaOnQ24TUSTNtgdmcvZt2549e5aTD/fQYAZmEXXyYRBO+46IQBScGfP2iVdHcNU6WiSnTMHYJDuycZ5Nn9Lj0XNHxxtpsrXGBFHXX4fhh8nC5EsP7rxo51+vyGb88jgfbZilKCaRMv40ebtoNGMWBWMKrpTyYTGSNOHZLAd0vq4p4PbSa+4ymDG9Ak1nCAZAhHh3dyOQtXI7bdiHyIwvEsGd3Wvn1B2bt2/wnsuHVkcRkyBtJtDETxY5Bi3TrwfQqAfzrnpE/a+zJwXYsZeta7NncBJNRIKuKfVVQWYYebNE11bfPpSTPBk9enHWxrtmSDBNxqFP9+z9cqfOBLBfoUKH7TpsPHNzZ3VVTSzgwObFnTHWF+FBn1SmedWNvxLmYvT7zgqKZurCeTyEUOEaafZERCz5IABDHSmBiAXVUSso4cLyaTN4FYZ64XL+1Ld9+itvfe2tt97+7OuvOSCS45QKK+RyjD7SMPvo68qSfCnodEIbNjKsF9t89Ylf/s3wwnPpKubWbjWv56qbVnLImVhY3PjZw32tm6OmdTlgEV7lcLPkxI4DveLyHrAZsuA16C3Kw6PHn96efZ3sGaknuDCEICKSFkjulS3IneCMVmoo8g6+5WhddJFBdjlunbCMFJAerA4WKAEZmuNb+uYeGTFq4WsmiXO8vaFvXu4IwcE3gL5xwpY1arUBzE1o9/P2cxpNJ+zuJ9XGba5ahhB2926MUQ9AtsN6tKftGhpqT/scL3PzzdewSsZ5NOkPHHmR/Vu7Ek6d6949GHHC6d9HAxbuomYZgloOIU9aTPxvdYuCpOoXKOCxpUfeSHxdO+/quI2avhIdQKQUA010ql1YNkPoJBaV4YM0UKNDXXyJyGat1XsCTEoppdW8VQSipqKluRZCc035tdwMaT5JrabW9N6VbaullM0U5lgPd3mF0el0ckWw8EkiimJDQs+efwjgs5/9zNd+6Ze+9rWv/ZW/8lc+9+s+/8//C//i9/5T3//6G2/6EbdPnnjkX1typqhu/i1qdz7c3z965RU9nf/u3/t7pZQf+IEfoJsbPDzwzSMSHqpR18ABoJ896ubSefkOh+Xrv/S1v/QX/5s//+d+5Is/9zNP7tY3X3t1e3j29Pm9uLFZrTWn27Tkk5qZcWZmtjjoCApnuMFzgM32cJaauVmayS3GgGPyl0/HZ1tf1uFYQysah1zc3vOn2Lm5NyO/cMxpPN8m/AztKg6bmci1UO5lM8ECpiwieVlK5wQf63/04oVl3H7QogBYmj3Anf2Wp2DRUNPNDPAowgEgkqVsio27O7rlwMyhQMVgzO2ZWxJX6Jc8Ofj38eye8uZyi5RLkDpAlKTdFvzO7ADYhzOJLkINHIQcDaXWrMGIkTY5G+AgsOsOgRojBgXI3bv4i7ZaXw+OgNUn4kQcuQGR1OsA1Eg8qO3DMmUPbIgPUTikcOhpVwqTmakW9JJ9sZJSSkbGzDnlbav1Eg1leyeukQXePSzzig25N3hB+sw2ZpqY04iAhUH4/vvvH7eXGJkAWtG/WoZOPC0hmNn5fD6fz8HzHApWt0v3YybnHHDNVp2iP6GUImkZ7xqeBXePfWFButOjWLP/QlXNsMl2WFZmcngdgPwuUL3Xr2uLP0RxDJfvfDm11uPx6OsFPjP2JkveqhUd3px9X/dYNL2oahHRVsp6k9fb25ub1cyOx6OZ3d7elvOF02QMtU+XTejNyCHkJKOKUqmbO8lllHWIMusJipiE3ouvm8d8FoPjGvJz7rhPFuzslQBQTWfJMEvFqy3Qnl90lki+6xD7z2EQDvEy9v7oCF0F6qnxf9Za13wAkXWXwXgyERm1DLfRTndn2TWE2cIfpbeupkkm3s42kl1VMesecdqdueh07e1pV06Ey8UzmhqbGZNkHvbkaMlQyPoDrlUdm8qQzBOELjH2NvjF9eKqGNe4YZRI4Za+PvApL0y9+4zgdXdyeM+Noc4mQxRoJYeaupuqpVYqCWjqFIhQbeBUx5oZ/FVjnVzN3dU4eyeZG5kj6DuCycu2HQ4Z5OfzOef8yuERJCOvzfctEAYYxV6SjzapPx9xqdJrrz1+/Mrb77zzWfq8qpPQVuuS0vRF6w/7h7nsW9qEe4MBvOyG8cmFZUjXhmJ7+KRyXTScktZyLqWYkrlsToc1LcttzoXIgqzFkYvWIAmGo/rBcWfIwI3jUQJANzePXsvLLc7PHCrMizBEkhCiAh7RPBm024RXnf3ocWgZf95XzYgTajP5oES9BG1fsy3/0IfOKQFBIgjBqdd5I2aYurnuen/b170sEMGdRYaz6aJthKj0p6oD8NWno2X6mFrUj+Hm/W/3BVt17+MgR6xjm/eR6Y+dCpjhhX00xNHYNVG0b7Kz2kk96yQ+eQmZEomi8YIbyMyqudI4f9vL23OSUzKoKU6bEnGEZbWqSI4sYTMrVs6dfVuWxFUMLjkCLAtxcjBLVjNvRcOzOVyJKoCFA7RnlUgyc4Vu1TOvpBlbAoy8ZjJmUKbkZGpqqkZMCSQmALPLouZNgwZVNYOTsPkJACUiIWNyMgdIeLNiQsJZJAtnd08G142TSpauEgU7tqm2Un5ETuSNucYqs9/wDTOvyw2BtlolHbByVS2q5f7h8eM7JzmdTsuyqJXnT58+vsvMnOjpl372b/z9n/6pV+6e/Ibf8P2/4dd/72/85/45Yk7r4ebuNj96hHUBALeKu1rrkoWjApBVgGB6e3Ow4/3TD5/+J//pf2yKP/gHfxs44fbJhoeFUido6uedw0uJZAdYnAdWnz3/yi/+4o//+R/9m3/zb/69n/07bvX29rAd7985Prs9rMyHrZ6rOcnyoMYOuTm4yHYuunVzTjczA0NIVKOOo0SxbCc2kDoOKRx+kESkwiKJxUClnAEkWbzlZyIz57QUfd7DgN68LJJrrRXGkY5IDtRiBPe0poaUVotwNchU9XQ6rXQgohEGcTW0HCQGQ904C6d0LmczWw+5aK2klTRzYqZSzrVWrSW7UT3lVRYRh2YGct58S4R1XbwW1yTEVtVAxct6SAZXK2ZWySkJgKoqKYV/IaVUStFSiGjNudZSz1tmyXlxrwxa8yJk521b8kFA5rCqrnCHas3LUouNeGVCy0hJoFVkTUnhBA+eNtVqxCOnN2gzw1wMqyAxYKa1AE5kicBpdXcKstEuU5r4GA5Hh7ObmauzcKD/qDuLvJFMcixgKBoMNb5elapJYiau5aza0HFLziKybZu6snCFmZWcczA8YY4JdCV4KHneXQApJTdUWc71DJCk5Ea1WJQVNTORbGZFt5yzm3LKOefGf+CkVswQRJ2n05FZiN20GpwY7r6VKhJaPncGUSciISaCsLiaKuBk7kq2pORGWl2YRJIwwM4iklIgqdTtrOd8s25Wf/Hrv7RZRSmc0nY81XXNNzfMXM8b3fr5fK61mgIZ3mZYkixEVEw5pZu7OxXadANADMnyULeV7CblCld3hT97uDdiIyhQ3baqvNVSawXlnKLKQ5wyIhnA8Xy/risju1MpZTudk1CWdLsevKo73d09ruZ3t7cuy9mELY59JiJzSmjroRS1KLMhYnQkERPb6lZrzb465w3spoX4ZKSlnF2fn46VnFehs6tu7kpOCSau5PWQVlVz8Ol0ymkFsQhD2Z1D9dCiESw9LAeqzkKrLL7Vu9sbEVmWg6qzWjH98OF5Oiw42wbbXNceoAjh40CtxmYMuEftayYSIQa5w1nSWI3jkEaHgEaoeaxSMxP4bBV4o3XNZSthlqFpzI3ixOBBCtS0Bzhxx6VH7qXpvBe4nMPQQSuU6tRKlMf7hDr3c3eEVjjDmRslnfcVwEJk5iBL3PC6VjdGamzsWptq5V63vbaQu/vmAQzIqQUbmbhZeuYEZElWlUINoWBmJgotVk9N6bGu0kWWfYkaswy0vEoRgYhu23h1G7pR5775swiNK8DJDNQoZV07TpUIROw+4AlN1ATMRBt1LsyJKEuCu5VKJE79JeYAuZE5WFJtFUE7uRYgeYGqgEQI3jwuzaBdUyiSXW13NHDmnqgN6kXO1MTTWDxmFlWdmPl2Wd3VtWqNkmUIJY7CMFZlUJs4dzcjEZRKGNz6ClMGiJuaKyLkDlMmsDBS9lK1lgadH9F1NRZhJteGLBBTmB6EmHay4pQS3FU1ZRmGaZs4h7vf5IXIzbSWTUTWdYXwtm28PBzIuJ7ZkJlWyp6oPLyT84LVnO4UiYxSRNYqaugIQHN3XqjzNBtaFoHxGwbL7cdee+fLXzm//3R95RWca8op/Ai9eDlzAzJeV8j8FVy/kjISe6t+mRTIy5u/xV+nyMaFTexIvL5ys74yfydCsbPdE17fsAYSATgAB+DGI6E3W31Ez/T56weuT89J0ln4xOl9ysJvHPLdIhmo7id4JiwYuNDGTQCET6LnRr2sHyEe5CqEzjTMXUXwl5I5zFs0vsZbAjvjsAynCIjBO80eXFW1uiuHkIq2hEekQ2zcI77Jcb5v2yacRVLAia1YraZKnohTboa+mZsiEvwTQGSm6ER6zCwpefMM7rAUbkmbUC21VocGR0BAH5io1kiVSiBqKXIQVU1pGcpP0CDE1Kmqe4iIxjbHTKo68+EtS6P2IG4VDuPgCpAsXIJtYyzH7l3SNMMhuOOgjNW8ogfNQBc+4/ny7hePmMx4SLxAVZkIIFU1txFUNDO4Moc2z/CoFWKmBnVVJWGmxMxkZKpa3eDWC7N2idPcWG2eVd3J4zB09iDBtMg37PAb6aEYd3cf9BLoiBrr0KmwD2ut23lj5pzWqOlcio5kHumYqNltxjmdTpuI3t08Wtf84YfPvvCFL/zEj//kenv72e/8zl//vd/3a77jM6++9trjV195/fXXn7z6yvLaXeKEQEtrARPIIFSefpgfP/7rf/Uv393c/Nv/9u997RNvbk8/BMnyOMeap+GzqKXWmtZDhAjv33//y1/+8s/9zN/5qZ/6qb/1t/6W2UMphUxZ6P7+Xmu5PSyHJW+1eFfEB/iYpoQKZiYVdArZEUImjLDM7mHtK6EVQdohSdYAObEq1Oq42buTxHpd0cnT7WP91KJazcxJ2ikYa1JEzPp+wO5Hp+48DZamIHgcGlXfGDurpLvHhBI1RSSIOnLaC0kP7753qB4mHJpfJvCMbUJ9EBqeGfumUG2A6jagkyu6uSqGb7i/N6CzIhIhR+mLL1ItbQez7WhJVY1iYxGxIWaYe9VQaf0SpRBpw31bEU3Rj+FzGk0V8IsHZwxBpLrTy66YOJ3yc0TEbAPIGgjZWqJFo1RuIIcODrHhPaXJaJxHGy/zT0eP+oq7gNu96F6WVnBWbQITjgTU0ZFhHoxF1fK5O6HzDH0MF0bYw8Oi4NT8/T4llbVe9DyxWZy2+e2LaLy3OwIu4gwYIt6dmXPO2klltNdcQof9c6dRzTkfDoe7u7tHjx6hU5WO8VRVYjIf66R9PlZ1I7bpykEbgQ2upupRgdcs8v02ogHVbjBXcwqhCu/sO0G/A4qHj1lmZuqB7uhLsRIR1CWXYJgaCYQpJZ6iTGMvx9imlMJFWkqxQT3dcfne0znG6pqEjF8tiXa/6SguPO68nt9piyRJV0s3erQsi418ih56ArAucc7uq8tbNSbgYlM3PDPx3ux5y4/c0Xkx0wiO9eBhMHTxFLKOO72jNH2UQh7/xa/DoriILPYKuvEWAG4XKNCORIhxIzPJGf2oaPfEWy5xFuMHH+szDKH+LjO7CKu4+wScvuiCOxOZdqDpNOnTW9zMuG+95E4z2rYHDIlavfv5vehyY19j0592acZRNWInxblcezYsMR7a5RTu8CHBYsSYJXqnhUGj8IOZBS8Apn0BYAQeZc/OuDiXt22LnW4d0cAdONZ3xMUpYBPMHnNd7LEp3GEAVavFWG07UyqUTOC2T5x3q29kndG3juyZGbN8/OMff+ebb7/99tvf/vgxsC+EX+bL/+RePuSzizBzorSu6+3t7XZpAhC9WE3zV33RZbr19Z+iQe4959DQJQIAEIe4jmqG6ITAiXOUiED/eqzdWYgR7Tks4XZXVY8FjM51V8/YN91en2ZvcNcP7SKdh65uGyeXw5uvJETopa7UJC125phxzZba1Sj5VIcWQcDBHZXtNn+F9jyXJv3mfR2i7FrRCaVtJEeGOayqZM67WbDLiLgGQZxPzHhoxgCJSHjg+lvcOuCVmeHcSI/dyGBwNjIzMAGRX+A9tMwwB3FUI2hMJ+QwGFlIPiJjonAHQOFeOZY0EU8sgrMgoy7BzVr+w9ALvfOC9DuDWMhnhWB+ZoCYa7XTaSPwVps1881vvvX+Bx/89N/5GU6yHm4/9omPf/azn/nkpz+ldPvmm29+93d99tOf/tTjR7f57gYMbCU/PvzpP/pDP/zDP/z7f/+/852f+254We5uEC5e65kAQcJzLuV8/u/+6l/92i/+g5//+Z//4i/8wte//vXj8aiq2/F00odlWVISrZuWEqng51LLthH7IBI0M2cit6uVgB6DVlWWa1kwHBjzCMSMtHXmGpWqmFnhbE1BcXezXdNyd3ayy/x+u8xOnN4rRCKSW2CKSCiNY1JE9AXHwYuPHe0fKnIc0pmlcMMHRu8Yu9qUUvJwlEwbGV3RNLNeNGW/Ago1lnrHJO9YtTj6aUpqnbs8VteuzfO19Iz7qRkPF8XWQoTNEmR0f3+FT+I1BMrOJhW7qyEuvCuX4xX7cb7PTrgjd/1pTDHN9hsQA+LT7Ezb8Dq3at5lHVW7Q3mFSCeVd76//0y973u054WrzYteVhf0YV13S284j2rPeRsTx6lZbDHv3AtCbNvWQHFJxgDOA9KtIx1/9eBumUgymRkkVzuibQGRKFQYNTmHcjbKXehUhLPZAC/kQEY3SymllKjzHiw4Y+V4L/+AMX2Ng7QOCtO27bs91pz5ZuqqqmYEb8yoOeebm5vTcXPfNHzBNMo0hdScf7WxWuet3RwH2Bl7R18AEtnprCIyc7W/Av1KTU+NL2Jsz6HWv7CcLgzLi4VnjXi5hVPaOXJRpoI6GY9P7AV+eZpEztUY5/GKqKixmxM2Xs3uu/G1t5kvzum5/dKZaef2u/so0zzPMi4340AajUS1KyVlNH6Ikb7JeNgto8EXuxsXLRnrf4jQ6yfvXwwp1snPu3Ua7+IoKYFmHaF3c+74mBVMb/RL1O6OnzSzyUxqFKzYO9IWxmVKRTeDG8q9DxYABLCTLiSTRW3A7toOPZII8H5goCqGzda3CRF1wROPsbAPAUinxekVa6MMxpx8EZHe9ihHS8gcG2fceTHsQ5fb/URg3qVx3BPJILXWCK0vy7I5ARCnVugeDquwWrezpDNLpciQ8a7c9WV0aRO+5NrRg+6PXn/91Vdfffvtt7/9M5/BktF0y/8/tAb/EdhnAIDZd+mNziTltBYSIumj+/Jg5v8Pozp0pC4SrG+myCokh4dt02GosYNiC19oCGjcNuE2qmMvG8GZEu058+6I9GY4RCQCz0O4xFoz3bUdAITIe4R16H6ASOOvzQfkNA/dVTfjwEJ/k+2Jf7uOBKeRxzR2lpnFIbuP82TRtdSPS4i+mVXdZmVpfNdaJcXrRu6F6dmb+ThrS23zR5YUhdN3TzgcN8xdGvoBd8i4CJsJyHm/3HRKmp8WsaTFSonnIIgmKZG4uqMVSJPew2S1AkbOxM6chOAwspB5ElXP3CLp3ENeRcOmE253blELbDaHdNyzLEtw59S6dKHGOedt29C3SrfFGcD9/ZGZc05EVLZyOp3hvK4LM5/P54fzSXJ6OB0/+PC9v/t3f9bdP/mp73h4eG5eX33y+Lu/87Of//znXn3l8cPDwze+9vUf+7Efy+vys3/7b61CRJyWXIttRKfT6cP33n/77bfffvvtd95559133nn69OmHH354un/eLNhOzVprRebT6bSVk5Z6WNKTR3eSVyIYnBSQETy+tmzHmMR1Pp9j9HdK8W4Q7kstKjR0uU2dHwX7yfFyvSFWOcdBw+2taJ7UJJSMgscJbkHNClO4UYSRc0ruvm1bKWXJCxPIwa3AlLoZObt5q8sED690YgFf5D5Zjw+P8F3rqfe+dJAqM6uZwLY4KbvTl8FjW1qLuqTZjTLGatckJtD5vM/71T4JWZBSI3oefx5+o/HGtrHDc8wN/OOh5ZgFVvDicsalaJ939BzpopGZDZThfn6ZA/tqfn2Ksrbn9xuGtJlFyosr5KMEzjxi4+feq1YgpHM/NAV0PKqJHmrma/fqxeJVdwsMWrjV4htmoU9STJHp1KM2oRRmlRvUa/cQIQLLIiLrisZsXIbVJCIiaQcvjKJ/dt3TcYWlh77Lcs7rug6DUPZcaK09mDBWoDRsXkvEHUs9cvyeP39+yMubb74ZcYD4q4i4XQyydatv9D0kf1ieWi0EvlBmZhFQeFIcndPIAzm2rmsxJ42aUUTTXvAm80FEqqED90BKp31ycgEiTXdZVhGjIJVhjjL3jUHn0gBD2KjVnGt8uCzLKCGAaX+NI/NiQ/ZI8vjr8F6xN9svxiGSOW2qzznWdbO4bM+lnA/WGPlhl44/lc2aD+dSYrds4jE+ZC1m7xfX6Iv0cil4wRyl7gi+WtsXHyKENTFP8qQTmYzhilsv5NtkpMU1Dpr+luvkvXnorFPUDFl3tfGHlIjI6egALQuCWbjbitQtQO8evRmjwq3+mNOO0nMzi+bS1cVcti0IxK9adfXrNFQvEWLoDjtyZzR4d18bwIU93NlZqmLyq7p7hDV56fBR7ZnrcVKkYAELnJ2NshNEF+y9Y/o0io66B9p89FhE0ItOz3I75zStNJu/EsKwj33rl6pHxTkhilw0YiKyrdzTdsPLDVIiAii9kC/IPLFTxid48RJBVbC89tpr77///vHh4WZ9tWzndFhfcvM/oddLxu10Ork7A+GmL9XWEAsNm0BotVheTnf/y17UUNTXqkJnKuNhCobNz61cB1Fj6e91q+GAujPcI6SBLuvh5k7o9ai7KeUU9KVhlNlw0AROPx7pjT0lgOtdClH3pLi7DbglE3sai9C9LVO65PpS1dnVNU6Wge7pxuSsd+2gyyHzh6y+lNg7IAjNCusAiiDN6bbfSybiUr9qmCWeOFeiw9xLMpgZwXnXiS8c2+Oh4fIJ9XF0svv7d8Np/DDM3/C1je4RCTfLQJqy5LVWJTAxDMNfLhG6i2dwELcTm9VwGzALs5v65MzYB3Scsj23FZiyPkJSXSzTPnPzURfCtpRSioa8C926bKrVbaFgZrp/uP/ww2fq5gZKElKUk+R1Wdf1F7/4dx8/fnx7e/PhO9/8i1/5hb/xV/7bx48fJRGzesicGP/h/+3/+sprr6/Lzd3dHYTf+ub71LW3yKKJlqSUtm2LqmtWS4BaVfWZbYAnYklcze6PDyC/PaxEpKZWQL2aM5zhcK+zL1Agvb+D18j7D2Zm6AVYRGTTytOgtfHsGg8z49JNHrWh5+EFQOAkMjgGNLLXiBKSRtIFnDmZqhpiuSrczGqxWkxYifag+aAD8d3qu9gYY+2FA6bWCjWaahu2dasNtZVTirOzGb3W3MOzacR99VAH+6kqs/btYLVWEnYjVY0s5CE+hmY5yZemPTctv4cZqXv6Z8XOrHGZdFqtbjMQRWGoedvycJnPgY5G09DbEBu5eaB3ginp2h56SsfY8mOPMDM782UYeV4AYyvtPwBuhiAm6f/F58N5iBj3bgDLtNhG9nY4L6KXo2utmIfbVXtC7NDEzjLQ4GNtv8RN1rQcAOCpdsVYSOZaa61awxqMx3LwMZp5l4phsKlqMTUz5tTSbqntC5niSNZ1rCHHxuUdJ9kmNwaEmg/ywiDvozrUslmkR5BQVZk7hhMQkdqIRlNfaUa7l0RSWtwtDELew7y6shBRInKn5liRRrpDk6kDxJm3PzYQ6GYay3lMYht878aJe9RbG1Krny/ZipZSarEZtbeP10TkG+LUCOOYGPuOLs/1cf/w1I4xbA/vccjo4It2C3ARIUQguLsXY6z4w7KOE7kRfhOhMyRTV556QGmnNR6Ls70IerXX4oZLodHMUe6xphBxAGDmE3vTGBAfHhZ6yeDMzXDbT0wigupVI2NE8BH6yljJrUmXgnGWHu0T3vt48Ux3XCB9plPp0gk1xmce0mGbJRG8EDJtVKOXzSYivsxJGyNz9cnlxMU+JbcLNX1IgH02QcR7kaNAsiSWCMWX85knyjEaCrFWYgYaWBTD1zXKYJjRJZCkb6uLdqrGyUxjMGPBJ9/3xTDzY8y1MyHP8so8c0uoJEBBRm7iKrV4PXo5Ulqb1UfXjstfLt1u3EVWttc/9sbXv/71d95559e89mp8/CLR5z+Bl9FVPcZ2aalN5VgW00TVJS1EJJJbiYU4R170dPxq3h5x4Rc/pmZzvsjTY50fJf5jR4nlETLJ4dRAE0bN88yjuiCorTF0sHA7Sc2IOj9qlxtOBicIw5108rkYAZ5S4v5YcrhT2B2YotitiwBRg+gH3rW5xEfSV9sO8zFxPapDgM9H/CTVfZYM6OZlU+zb3btiOZ1f13ixFGILk2aMS3FgZmmW8rgWYV1A1O7q3v2jqiqXitR4+FAFxkuJCISuwSfJS87Z1N3rzJJnZkwpSC9UtT3eHUbODo2qIWBhgHsN4gEpbNHOuTGBERr5PH1kBVAiOp2PAWQidquh7zWqn/l8HZPBtLgVVau1psSS0+H2Roq+++67RsjInNiMyrmWci6n4+NHd3o+fnh6BlMRWjjX0/396XR7e5uInn3w4RuvPXl4eH4+PpxPz58/f7i9e4VAZkZabOD1icp2rqUoEZy2bRsIN4rax0xRJ2PbNiYkJpHktaoZKUuiTrrrKS1WDJ08WrptllLijnIZg1lrTWIAqCttoeAAIPLw9TlZxF72Y+zC853cL5Y4MwvPXGS+x3UAYmHAhUSE4oRX9+YEkswopaTLmBt3ADA1jNxFqGrahPtUCvZlOSyciDg1JGGE6SZRNjYndxgqNcjZyKnY3fztODS1KTIwb5Nd1Rh7sId9OnZ6KJ08Ojgrpg365kYeldCJmBxNYb1+3WQijL8G41yUGPGhe2snpSK2blnIJFCoq2j7sAsPB9BUx7KPw6TwzftoVpXmpl5bwrsi1RswzQsNO60/WUTChu+xJhlLHd2BdTU4Vz8PfZGZE7GOovbUJJh3os49iuX7lNWyAUh5GRp5rfVUtlOJ+uwq0saIWYC9ZO2QNmOch0IfEKzgIYwwHUcB2B4F4o6NZxHuH/olZrWrfTZ8FofDEvLcphBE8DHEiTNPdPMcYl/hbdBabiChb0BhG+GpRJmImlfYWlZXU4h7ZU0iYgos9O7p3PdUSmRFVbetnmu52XMm2b16VdNxTgv2Lc/DwxLjoKqRqD9vOncP0tEX1+FcL+5S/rd7uGeHEpGqjxzIvkFGnuS+PuejZBxS88OJiAOV3s9nIhoA9ti+tBtvFa3+Swu7tO90lScqRXR1nTmYfjtLATpJjGnUG5tZQL2/emfcnZuNrgC4T3VN2xa4YiN0nwotUMTtXpDMRD07qH1jYuKd70ETRhT6wHgIEbqDb37yvNPn2ZkfOORq24kdSLyrCt6me3Qz6uuMPU/djqXmloK7U6N4bD0KSRhtDdlECBJ9ai74uTB9j3O6QYjV1K3R/KArMRSl2C7Dv2Rx+hnC74ZWkdmDr0/N3aV7tYZ8wOSuRfcg1FoHsG24/Jg5AgPTim0AubHCbaLnrbVykl4RDYBDFSiOEyXy8mDnlfMBiYDFAQO/zAQcpgJeNBG1Vgn0/s1N8Dx/6uEhP7p9mR3yP15ArGcytTLORHgiqiLSSjghRYxuip1j8g//Kl93GSpsgTdHZ5W2TosQka5mU7m3asREYjAHN1cykTlxc/ZZ6AdmNqoLdsoR2+OIwUVF3gsSOQ/zRAhRhgo+neauasuag/YkBIrHQ81S4rHUJ7tul5De9IHG9zv2whyBd6Na6+DwC/Xmwu04QUkBuBtPjnh07Wj2WvaDtW3kjnBpPvThTk3cMOSdSrh3e1juQ0h6sEhNaVKYxM1g644WDOMq7eQZs127jxNzy1xrzlADwFGXuZfMcbNWd3IsI+6QCYYMVQaIAm6IWSelRowfMssIEwnEuKjpXhj6UCjftbqqq+q6po78aTA5VY1CAjHoYhKGjZmdTueU0u3tQSSrainb6bSVUl55/bXnz5+fz2c1O9wsxBz8CuXZA4BlTYnIqtat3L7y6PGjWzOLStbf+NrXP/Vtn3733fcT8badyMipwdLavBLcqZRSO1qpamURFnb1stVt2+C2bemwLEvmlDOnzARKQsW9BRv3qKmIRKwXPcPTcRG2YhLu9PeqxV2oh9t9aJYEN6M+wkS5L4J+UOya/c7KPZY7R7gSFhR5aDEE75uNOq39nrkqYGNUrXPsYmyPKw8K9SjQknPpVWJEGk0UtZN4zwEbQTkJbDuRoB2HYuaTo3ruHfXwgnTG8Oyec1b1c9mmLcDzN8cpKyLWBdlYnOEoHas3yP2m3rVfu59kpEF3tWw2YntYYrj6hi9tjFKLbo073SF9AAeMYQZ+mKubmcmlSjf2l3tUiCUAmBOJL3WXIWEmiebuvgfryKgfRETNZQD3CJvPXRhLN4aUqo7t0zIYfc8JHarerA/ZlFI4JKG7Ry6S9YSEMYUtCc1JRFALEaW0xHprTZLAbfbScMSDxgZMBNEegRkNaG/vOYQXoNBatV/UXe/Vbbn0U0xUlbsMvEo4lLTPe0op5yw9yVZVlZUS5qnxl7DaSBhvRJJSwuYREWeylNLhcGiEKHEoT1A09OpJfXhBvTB97DCidigRyaCXYmbzPXMmgsA8FV5nvjg1Aoh7ZWZTOA2nmu99Thu44GophkI8RnJeb2MzzsVImh95Mub7vnS3oCGVedVRR9zMIiV+7Rnso/0y2DtiGY/tA8Ddhhk895eZDfsn4/k+BVplCkm9oFv4sNXHuNBkm40dDVxUk78SCEQ0+Ne5J/5RB9OO/dj2r+5wmL5gmny7ePIAgLQx6XofgJexf80zO1/74AwEkzsTjXS7qGxI/cijiAe+6DTH3trxWA/B1X8ekzv+6u4gY2JQo++ZXXXzhMpFQqnFwuoSMvhXPVheHdgRHmYIo5o5EOyBrg/3OQAKHdqMLnM4RwOYOdSz+RNmLnUbYrPPERG1shM0FVKjAHRYrAEyNnYfGno23cpRZZFyoJSH1Ue+s3rO0dfLAu4XUwyAUgLwxpsf+/DZ0/c+/ODjj25fOun/ZF82BeKgqqFFp2YokUgmCEEckQcUkcUp3vMP8e4RKuyF7M17lcJewr6BSPtbmmMl4mqBdmIiwIJFgeDECXCQcyTjujt82v8RrgBMwYRewn6X4zEO1Vs1YOrZt0xQAOy0YwbBxC/WiZx2x4jIjcLLkXtn5LMvqYniF7SmEcobIbddlwDcESWIhzAZMmGcdP1fuohjAAFoG+1Mfnm1rlDj/aOWsddapqrWXZuz8HL3w+HmfD6PnJPxqBef3wcRvf9gZmupy0Fzb1YNXgnsTC1AR01WeVfTk1li6WqWRaWyFnBwLqVgOl/38TX0Cou7YzK4KN1bAsysJDE3uqHQ2FJK0kNAfdC5TxtUNedGRlrUiYSSpHWB8MPDw7Ztpdao15yYk0jOUktjAoRbKSUxMfOjR0+27XRzc/dd3/Xk137u8z/90z8Dlrffe/fu7rHWYmbVzd2Fs8FN7VwLEQWRn1bfaiuvt22b53Rzc0NEcCuqiVOL6oiY5SDZsc5hSCTDdgoTt60qj1rVFEU/uTuUAZgGUnZOY9DqHN/S6kI0JzVdqTjeFEpvR5ijZ/o1Q2bWutwpklQmbU+Yyd2tNKU4DKSxUYkoNN1gU/TdHx4MMTHybqqemuXWCYksGQ+EZN+WSDkHV63ChU2IlWjQGLo7T6d1bJnYwGPcwjjcapkHY6zPEWZvUaaeuDXyWqlbs03jnPIx5iFNOSHcb+7tBjdTTS4X7+u9i8KV4F0puVLdduEQ2iDt3+0hB3g4n83CSIhh5A423iHoTefY0eNzS8aLXvwVPZzYa9xbD0k5MCmncTbRFSZot7pbM4i8izZcBDFawXTp+ZMDaRbRcuvsnb3uEfrMo69wDkkYL+prNZnVnFeazgAiTikvy8LMwtm4DC05DDyRXaEXEe6Lf59rt9l85ZycSeHSXz0umy5MVq5PpkjYbHd3d9ZdjERUO8tIrTU8c0NNt06iGwbSWCre68R6Va2uaiSeUlrXNSzn7Vy3skXer6q6ixlgzQBTVXdKqSEku/0Ti4F9Mr63Ush8GIix+GqtterYTdEkcjAn1bNOV590Fmmqhu3YuD2z2i+DtD4f3tNFRNrfO3iqrzIt+9R3/wgujvC+VHhEIMfn1iKWc/3VkU7PZkaXzuOxX4aPA1MQj5P4ZYgyBiRK7DalpGexICdc1lYeLxCJsgpdRRjOFxnOjotNSMQXH7X4WRPXGKEib3hEmcykaX/tEzF3lojQ7Y3x4Vj84T6YP5zHZ556dxfJ4fjzqcdEZFXRw2vz7Ez123fNB+g11EYbu2ct/ouiq3scweFePZAjvlPpEpOVenl0jnc5AdKrILZdTDxYrC48I9ZsVjOHERG3agsKCEtEYFgACJNa9S5bhlODuiNVRNR339mYwXHYXU4TRi5DeJ/7uJHWChiTW2BPwKDQWSspvBypHLHcQsThBpGpzMMIsQ5Bb5c2IQGUBA4wodSbV155/Pjx/f39IJX5Hy/vKMy4qJvZidhDX/IG5kpLppTA0q1Bnrk/Iprzj65dQbPXK9d74D7DJiSHxiFJZFEgazioiRAEY6olAa1QYWTMRGM9ETRyfpr/LEKSGuddl06qLQ2hk7F5aj4v6uzBZj7cNMwMAbtbvSCRGj+LdJylJXCr4MCdIrWN4S469jjB7LukieRlPpIQOR09gV9E4nwZRAPjTmYGJIq3TQ8R5gi9IjHztm3rujIz96BHTqm4WqvO5ADJxFMHIBjtzufzMJnCGnT3CJ01QHlKFIzeE3BORM7bFs7xWitgoCpJRKh6TcjVKyes6yoi51pKKUOJUVXOaV3hvQaUltYGSmvOi5mdj6dAT5ETszAnkACmVqqWJAQgpwVOpW6YcA7MKbSHXpMwcIkU/DHLcpBEtVbtjIXMfH9/7+7gQV5HTii1ai03NzeHw6qq27bV8EsDh8PBW4xIzex43NjFHXrciD1zMtA7777//nsfsuD27vGyLM+Pp6JWiqZl3WohH5WkUL1YKR6oaTNzr8GEwaRRIAVuZhsArcJ0WLLBT2U72Prs/gzTsNbipGy0FqBK21hVTVOEpBR4v6hZxe6uVWutj548Dn1i023btrwsIlJUg4zHTZtBAmUHIApyEvcSuNZgy+1nlfkENm6KC5aUFlU1Qwqqnlozi3alqsmPfgLZXmJbt21zNDb8dV3P5/O862LST6eTmeWcE0vtGlKcakEddD6fD4fDsixDe/aqDEpJahx+IIkFY54mQGNKiVrQu2nP2mFLQd6YuuFXu7Klqg6UUg7rqqqllEQcCyaManSXTynlfD6PhTrkwkhE7gU5LIuklIQisMKsF86aJn0cIyKxPyTICbyxaA1dJxTHm5ubotWscaWE5Kq1StpJdEJINVMcplARWZYlDAiWpKpJJLbSEGcRmwqlYVmWeIKNBDat3M2BmGjuZLB5ET2RmQdJ5rqu8UVErTlmFgmVr5YQR8IigUcYaX5Dsxk1lK+M7XEDM0ceOu8BN049QS6m29SjlkMMkYgkInc/HA63t7cppYjaRd/dXXI6nU5JFu6jp6qnMvajB6g+1kDYYIe8jDtjCqIjtVZCyXll5jqKFqzrs2fPQlmMO7dtOxwOwyyMh4R4J6Kbm5uHh4d1uVXV29vb08OJFKOIQtwZJsSIcZ3P5xY5b2iFLaWUBUtu9RXWdSXzdV23cnr//ffv7+9N6bidMVJT3M/n87relFJAEmEuIpr5QX2QPwsTYGZbLfErM2v1ZVnqqdnk2+l8s95Ff6O1y7IMvbZWazWqOvAydKBQckL4UyfQQvfsBGbkRZtk5FjGaTgO43F+dx26eeWK/X/J+9MtWbIdPQz8AGxzjziZN2tisbgoSr36/Z+oB62mSElkFetOmXlOuJttAP0Dg23zOLcosoqtatFW3nMjPMzN9oCNGR+StpvA4uT2h+2MkEpgCN2+X52R4WhK0uEaSUSZ1QvQR5UKKWHVNmJSITJTKymztaFNQ34DOFFG194GbdvFYWCmF92FI+8+VS4zc8uYLs4w4JJXX5kjPc5md7xErmixAM83FphE3H1auTiRQvt09+aeqwFyd6vFT35YqWbnjquBmUqxo4UX54EybZqJ0ujYsHzswrjz/4GxbaBKbVvihzWvC/deFTtJJHvqGTEockq7k6Q4WIS3Lfxe9u0jh91R9uNoI9DdRbjYb5YCHcexGnWrFR0OLGYGn/Kidme0errGaZnBxMRwTAUJmMyAA07bTYzm8fxlyEY/CONumPD7WQP9D9p01dKdI2vUzFjwr//1v/5//L//X7/93e/+6q//BeAGOwM7EVt+dZb+X/8yt8CPN7NqSGLupqpvb3efT58OFlPg/Z0hwACIIFX29v2N+H4K75++Mne0qBsgd4PHmeUyXTtO6NFx24ghDCfKdhQGRJ5I+QpiVyvjGp5WQW50o8ZtW6xFqtLtzi7eMg91V2GEbWxT80S76dKkqlg9tTMFZUrEgTbTMFy1+IBWX4M1T2SMAZCfNTLUOlUsUPQlajdQ6LHuXmhkqRKH9vVyVPma9nKRX9IpndeopSQmdag7sbXpg2+NpF3mququMaVwucX8931/u99zmXByh9CZRFqNUHMj8unzhC43Q/U2jFeIQESEArFgmk+Ws7bBXfWIQBbGGAJSaCUquLBEUDW62iX8KY9wlF2t8CvmRPl2UwhFl9pa1pimLlgI3x7P0Nvc/Tiez2Pfs5z0Eu5oLml7JF4wuSvgamY4oH/913/1nPrH3/3+28fDDHJ/CwYupOEVjL11yjqQIxM4JEonPPBVqfsrahA/UVQczcbK7dUOGx5UDZTcVxQPd4+cM3edhWJaUJbcxDDGcKa4Zc6pkRNyyifdNnH3cNC1Hx0AEVSR3VEi+v/ilTenqZ4kwbL0aiNzC3uHAD2c1EHEkfXEQiwgYTEW5WipAPbkBUEVrWQwMh2XR+XmgZrmiUgqX9cJZB7KqO8HVzsBLGpiQy+0RwMVtTg003Tr1NjKU2JVF20h1zD+1je8HOakiNfktKBOgIxxiR6UqsHkn59THm5mZrbUNNLy6X9b/+mbEwss8gsWAusb4D7doQZBTTx4vUXELpKbAJR1piVc4gaKf70S1EN7422zCnS35dzlPbxAROQ4rZnY8Ci9iRRUSDvR3bx80IFF5Mh4DgEBUpgetGYRESdsEzGYQ3vuReTx+BBiM0zP2JpW/M3doxKo0h6ZqzHASgx8RRXqwxuWc5gTxb2Tfwab5eWCMJzJxCuAELb6VpdXMV5bODWFs1bhKlE4cj2inS6RVG1Gunr9PM45WVWd0/RQcwqjz93f39/f3t6jTVmcaoCJB/Mgckem3Z6tR0WomExI4lg+e6qqhi+uBznnvOe6Sgk+MZsAtm2Lwtaol22i7WzuJvIms8tJ7+PjZ3Z9na8U5z2MMAg9leMtcLLC+6aVRB2CdT3WbqSHrXHvHhicO0LbpxdlVXJxjPiLRWIhn2x5GSd0zst7M+yf/TNO/alIq+HsqPBDkm1WdB2lWrg7VB2zD0tSJqd8r/ee4d8wMqnSLq5Leq7AOjtfPO69EVihsK+rZy0O6kMgHWR57nDp0ee1v+wAXMmp4QF7hFQt7cPRidBmk5kQaLCYJgpNsf0M2stgoIS6G9wDOgZrluzyb235VSV3DM42jFTcIAOAajCHTatGFIMYzBkJ+RSJTbBFz8E3tc85CSccdy9d/DXhl6m5Yhe7vhbwux2WfyKPrfeAP5u2wyZMbZJsQ3D/aXTlkFfV2qfrJUhocB7dNh3jfvvpp58ikgFhzqyy7+Oj/Pdw2UI6lLVBQMWZq5ArjRwogMA5/1OZooZK3/3HXQQkcmR90pWifBqbHvc4hXUIJQjQbeudEm+GqLAFSEacphq3IFy4x5H1MuEEi0GUW3C5zjygFAcVzcs/cyQ+X7LE+xQgDmPefLqlQkbk2TFzj1IdWU5WHq6AL2kDr/JcLqojgEjHIqIxbivu13n6rAHJci5xnYhP1v7C4jUhyDO+oWUw4LRiufzEx3EQ+e12WzNZqcIU9YqTp4vIvkQzPV1xZhSw7aZwNt0sIOzITJ77PsYInyOZ2zGtfKXRDNvVpqXjaoyByk/LtyS30qmzk4WC0Rm0lQZaXI9JL4VHUuATcJutiaY6qGGIhscxzP0hg5xgZk7Gshnc4OnlNiJnIWYmvW0A2I0YDCd2s+mwX759uLuaE4khcGRTU3ZASzk0dw3R4WTpoo1m3gidg8+we2wrwRnmLGwkcbQ8awzI4DxOG2YlFzMTSaDh43j4GYg7k0ZCrnoWjheseUKalICvE8JEwlt6BGYiE8K4lP50DBNXQma0xO4ufJodjUPONWm1Ll5OlbMEqw/MKvOkKrvQWZ1IAVYuUhnEUDO3/GsOrojEfaret0GL7iUiI+Kufob4m6pW0c6ZmDeb5azk59er13AxOc65xDf65yTXxH8Dy8X92Yc0zLxIhVq5RngDzI09cYvLHAIqRPBilL6oLOvn3qA+C12JyEY0zcmcLLtiTlC0PGVH0BQ7BCSgQWxkIiIyPcM4p23g7uGWYma4uqK0EBGxWJpWkWHqWR2hJJeCz9KETi8PVxJBUyZXSI15BCkEFdE80ZKI0tILhnOmSyT0S7BN2se4zQlnTZSPKlM0i/JaL3jk4ziIT4iLtAPh8dhIi2hnzbkmBBGJFvNpiaFarlVxbPysqs/n89u3b7/++uv9fl+fg8qYZT47kdiJiHv262vz1QoPKjiMiWe02W3OyLrp5NtNxEjEjcyeMc7jOPZpxJs7Edv9foKymBnqlE13EhYeIsKy0Qk/G0ZpwGkN2Lhvm4D2/dnxmXBhrSeLsgfncgoWJ+l6Hi/W13JUFxGO/uJa6oxFwAPYajHzT8Wdtup7dB6rOOrLe+l6nF/4xsvgaZH67t5m1+eJvDycKHsoY4lb9l/503j6T566xOm2MDMZZ6L4GAMcrr9Lgnp/vQIQl5nGTq1sZH0jcSeHZYwUtSxegC69SsD56ssrzgbrHMG6k1dXTeTFBHN3uxjq61DPu9LlSiTCyBZ5/XZvrXFRH/NR5P2+/lPfcNJJuXjRkc8CuSLAKwTj7mROntDx+BOOgDCS9wVOZpmrqeqQi6Oh92LxVbVF3zJdAtbilBF2EDPAWUHBLg4WH8yuavZQd2MByUaE2w8IQUncqY7fzVPsmZjZYIEjzcIx/vJf/NVvf//7j+fj9vYW4BFmJvSPt2H+//hytMEUYnJ6lnI0FrRs2wZeSwcJZxeh8/onTBtFkDewvISXN8SHmVxKlHBZDAPInCTqfz3xQ4vzmbuj3FjVOJTbffzqHBdmWrxmSdsRCminDAMeWupxnIqciARGDnE4ryMKY1xIyFg8rTm4DCNecCVwsrETpQWlAzRrWjlhaA5mJrLVE0rH7ss5WjqvC57dDGN0q/riOQS4O87ut1gHGt8NFeT9/X4qHGUTVkIgAWyf+sAiNXhxcLQQYObDprpigddjZmYFGci65BQwMoWbM1vivW4ilNmPoIkG3onY7dznfugMq2ZdArcTQqAGFvCkUS8nHUEK/uwFI7GO0N0BckfEapkzktPSOKau8OzqQRQd4bao3YJFk4oAEDULOFAJpYVdj1l0Q3AE4ArcXeGWLZeLpAJiLWw/ZMYR3EOop7N9DEKEQoSci8pDg4zT3lNLF6Dqhe+3EZgNqTnVBfMptJXK2J1aRquJy+4TETEN9aPPm5kxmCCRKtnemVVmE+Bu7tWLjJJyAGOQywaEqyLNeHePjOXV7L/Kra6hKkCXRfq2xhwWCzOTEwkbwOqNPbM2dOqTSZU13YVMfb4WG+aiyfUNlnk7+ad2YZx+6OQm+VJfjJllGOn0NXcOyq9kHjrPYG4SUGkzn64aYX7TS4dru8gLPRZLt7p1nfsU5OdFD+3rWpSGK0deqG69Mx8TYOtjsKTpKyJ0pmGsukgCebm1oUhckBum6MDjOqriUSe3XWnYEpis/OIL7nF/xd2tGuI1JegVCWaWNtbEMOc8Dj2Ow+4bFW7QnJOrrGjWFTGOThmV7ISuRGTh/7le0218T8OL50+1X3755ecvP/zN3/xNHxBbKkJLS18VdCHKSrn0phlFG2B3iCOUUnSVYyY5qplFiaxIQGPJdCUEz7sRxGDEYgYQF7hRou9kk0OR6XYjMPM27iLSNYTA200GEW0szEPVo4NWrtoR4qjqcnOb6HMEQyt3Xa4wod81jYgoWM1KsX6NrhR1LfG6q7XTUZe+f2VTq/69GORYN2s5/tG/MdgjEaX1BbRIz3OUXICI7GrXhS+AQMwZNA9ZozmAMQau5LQekM+fdET9hYo6x7XL8fLzxfi5kOxyD4otuGcvS2Z+ce2vzPbyqHCZlf3j7pHLlH5ApFnDV51hHTkWgsnPvT50j5zS9NRGkJAIEXhElDLFzUTMZAaf5dZto5MACn8yzt9Pv8VZcn/OCdCI1bwuHemJDpyaZqmVvSm9FFjI6WUX1iSFleT6tvqVrisHZIOZ2l/bQcOMjYXA5JluOkTEDrgxqc6P/cEQ3gSg3yB93OSuRuypE6Zu8HJ5WBRmJBLS7Yeffvr511+/fftGIuFmygl+X/T9d3B92iGYzbmPCvCGhL29fcHtXtZg+Glfv/iPtwbzAC17QRCQXY/zWXoXYKdOWr4n19PMizvNwNJfd3YzB/PShoGiHjuiI4V4GjQ9mEHEpYYVzTMjK2yBaN1BZqe2huS2gys3ldJcUneYIhLQUfzf3cO0SbWQaIyxJuidM/8EyS5Lc/ic4qIKLsJIGjzGm6k6BePrO0ek1eJ6ntcTzpzpeVo1VC/ZrlRab+SIR7HT4/Fg5ixNZPTc4hWhmtfXl4AGk84Ya6ydrf5pd1c7IuQqg2i6mbKL6mEGvjHzNihzOLXgYUTIEfXPSuRvb+9RQNJV+znrBX2+VzBAVty9bM7mpQ0pLmOkXtv2YfRC5CEywsAzxaRRnvgwPFi2cRvEB1T1iPry6LfgTA7b3u6Px+Pj8TQzIrltohY1Zi3quJVSq+ifu6trrvYavfFMsciAHQ+3CSAypAeFcsyjcPqbZYch16IuPuzm5nNO3phl8Ni6ZibACb99+1ZvdnPzdmS2D6NMa9MqEelTl4tvAJyMOCBnhEX6W9u2tS1EBRrhpX6ZGZ9Q7K96QJ+HleZbb+tfVwdMnwvVrOhTt+cz638kysaqaUpq/wkwd0GcbwKLjLjWjOMiOiG/67CsHoemzIzDtDZ50eFKxWNmyh6BJ4M8p5OqS6aBpSpf+kzDjZ4pkXFKSsj76Z5I+zDvJ1YYEQlICWGXn+4Ad48s3NBG1ALGpjeIv58H+2rVxBIxxDhhIbZb1nTpPFQHV3XfuvWr0pNaI4u7T9PGPKh+QSi1Mlc1WNwL2y16A/OobFmYgdnnnI2fyRywbKlAq0bLdLNcGCYeJDHUaMd7AW+kSk0P8mk/30rXwYSjPX3coGZ8lurUMVmgRgFE5kUHFaMKLdj7ly9fgk/2ZLMcdJP14KwHMCZoCpJTIPnRHL6Wwsnd93yvE7LTsVXiaxNwHTo3dyJnGoRylMRa1ZoAfZQQVPTcZ/h9Y+Tzke7LITfhjcom9yrFDDAwu8adiCi6JKyb3qzgu26C1YO7XHy73V7IL/6wtq9AnayV4F/WeX1oH0MsjNquXZ1WUvn8oovLQzLJ8MJqYtbtbvyUCmUv823PVHfwSPsHUgyNihWENRIpo99btMuCv6zP+pV1xSLSxcwv/L9TfD+v4ctb4t8tGtmXAw414E4Oehl0Ssmy/fu9CMDnBcwm5XRkVLdnoV9BuLwUqTNY9f377lr1yCv31aNNxXem/OnrMc5gCGkKFvX115m5QwtNbyt1NW/s89sU6Ipw31wpJ2tWmALt+UZEmknbcLhNI2AwIGA3O77ZN3Yo/fAvwNGDhKnCNATBn7gC92uabsHxjoPvtx9//PGXj6/HcWz3GxVefYzsO9bRf1dXEICqqt4TVCaPW1RNlzVYlLlc/4SxQVr7kp5J8mt48PzZHQiI31ThQhMkBxHS3DKkkUiCtRMebPpU9UiJR0FInU6Q+NeKKKgM1hCm1A2l6oaotihvvrW6crt3/14FoiEWE3mtKno93RPIJ7QEJLfhF0HT3DjO2YvUaCa5MvMIUa6fuLtrdueKA3siQMQVuDIi4jOiD7SGPlD8K9SayLMPLcQsczhb/c2iyaX4Hlc+Ygn+Ti9DXKddDNA5G40kU2z+Ev8KPDLK2kPs3mGi8O4lZuAaq2nr3N25SttfGO6cu2rHD9OfHXAjASvKHGGGMpYGqeo0DDMJOcHkC0y8uwkNkXEfGzO7GUHIjAXbJttgYLjrMY+Nib+8u+HQ+dgnAUNoqgAeUWwjEodByUMaRfsjrmw/AkCSAso9MJnIFK6BT00OJ/Ngp72ezf1rL8R9EpF7QQgsBvCcM+LAsZKzcokDyTf3FGrXIPi6BShV0stbU7QedQVFyOnsW32QKJlrqqrHDKssCl9baBMRcCnEKmo0APex+dSOcJ6RkDrSHe529+M4bi6QsZ69nA7Ai57Qp6Z9Hyj7KlJto4ajI4F9BHqclfqY4etWQOMlcUit8xzapdIDCF5A+dcsj1pe1FeUXTeP9HB8cH7TCYYzBnYyAWGD92at3z2HcTIsXi29K1M730tLnPM74zQzs9UzHWux8pYuAb3dRkM2+HWQFyKMT7Lx5KXUc11Gq/T4Xt4XAgheGjHIuKeLA2nRjYjS4dVX7WQwB3a3oFUQjXFRzftQxDjD79YPb2swGkUUPgL1krr7XBBGXhhyP6dgKSisxNoOHmPcbjdyUt37Wz22RaM6SyyCSvdPhlNYejYzH1vEilGDSI7jW9p1jjkniOc0Ndzv700b8M5VhgwhmB7+nMe+72YsIvf7/fGxE1O2n7ITNEhkXXC0UGhNud8Swx609t9LU9DPviMnYb+Qx7prqMreVqC5/CBnrd3ih1oPi2cGbK6ear/o1MKxAAasX6QqH12PQD5KLp4Xd6cIhVWk7kql1MBXOceyCrAc5/NAES2GYhUi5finrJZncpvsfxjUtFLLS/TpHFKt4EqEZiaV0pLdGnp4dA1XXbkhFbPyNp+YI3cI1zO47vHl+Yio3WXdvBIx6NNBftmsdW5+QTRweOLuEhH5q4rSYJkUeBgtR2SJfZU16+6RpwrLpp/ImoUK85YB0CTkJyKovxB5+z6aw9Mnvl2Lf359WUkmAtOTSJQD4JqMCKAN/nzug30Thim7DzLXj/mh4/4gEfANIdVT+y/FYF3JDCRWxAcIRC4Gfvjhh8fc3f04jm3buKxu/Pd3hd7y8pGqRtWGJ8gKNIpCd/WNF6L7b3gREWD/2T0hCEj7GMO5gJ4sg/BgotLzL4YmAIQKY2YyBoGhE8je3SnUsoJjKlxwClNUcOhktouJZIESnGTlKBAKM0tHerLuM9CS6UsdRXE3SykT8qq5TcugPk2rTK+H2Ovw8maOnO31T6sdeUHSK2cWjzGOmQ1G4/ltPiVO8ZWp0aL2deYSEWkVLlvZCVHl4u4BV30VfnlyVz67ytEYTv8XmagwFUqeFEVwJeBTv3eoQIjAQiDSpR9OG4RmNsZtlTj+STt092qqu2CaBc8u3AhmFqHAdQ0VxytByNQNgGaGqrvHSm98CEBCY/D9bRvMYT7tXx+3bbvd3wH++vEw+2qGMcbXjyO84yXCWQCC7dGqiwfXlmWKIXslgCaRuZmqCrmbCcHJzYyr40MoCrjmIzU9+en2wJUWT7DvcAS0TM41pHMZ12216GbNY/XKxOfMHFFTKreGmXUwas3/bDVXVUmyI1ypSl7a3aln+KLzxeWV1R0P70SyHn8hEqlHW5SiDWYWdatSwMECvhgA/dhah6x92ufRp3r1bryYzZe1Lsq83HZhb4uGF4VnbpzqO2Lztb7exVIR+7NIvlheFY/XAs2PXWbmSBmlOrPxxgD1oJLH/mnYsVa0QFK1z77f1T9wAXU2SXgZ54F+icjqZlkVkTSSPTMzl+rf9LoVNuNJZrUayYsi/7nZ66nvfmJ362qvk+0xc8dvm9caQG4KFg74ljos7uUwaoeL10NWft2v6DxGL5DJzijOVEzOuO7L56vLI65usVNPm8dxHMfx66+/fvnyY5yFE3l8jF33dQWuxHdyxfjBLMj+ouJ3kLIMs0zIF2GggKmcTDEcLFEPDmSjERCYpZx8ZohKqIViqY4YcYZV49dt25hFdffKagFQEKlboIny4jz2VBROFXwlyM+UEH+9j1MRX8jjlOIrnWABj7lwiSvL9dWXqmsbdxSY3nfS9uqT1/G8EOo5C3eYQbKvSNsGPYY8a4sBDGTKKcpE/EwWPZLicuQVHCMiRH7iVb8513YxNV9WHsty9XKICDWCZVBjf2XJrseyRNpYd+1DifujFqhWxsP1luh/V+Mh78n/nW8M3hKRumL+8XwPw4wIXr0jz3khegWhfJ9ASOpXOszdjPwrP6/zTwtrbQ5wa1QM67pHAp8NG73k8qpiYlHhekcCw7wl0csN66uD9a1b1zcIuZfnLuDnDKQwGEYgUe1PYxnjTV19Ph7fvm7322DCiNDKn4wNJg0QsOCayjYAsMhvfvObfc5g+JL4Bc7yDz7t/+LXuUGBJCQiyzlNKVnXf8Pw4HoRLXTzvZcQiDCc1DEcByUwDhGE4MhsGwoQXSIqJJdL7OHyPoSMqbT2cMFk4hwVHkzw2zQC8wnC4uha4pJBLCensoqWOSXcRurYaUKe3vGg2Iv+ExddPeBIhUr7npep8JJPRIvW3ffU8zM9Z7RuZGYiJwrzGCM+EZHuNNAnvBULIopQ4dvbLd6977tVG+UXg5CWDLSxXXJkicjdVoNwvXxhbS8rosfu7lz4hwgUkDE8s5Iyi0gk254RD1/sgVfiWzS8loIiZ02RmVlCDjYcCGkVCDEz9CRcMzPPliMzEPANUSLWeicBTM5M2+BNREYwVvqX/+Kvvz0+9v0Bku02fuLfHDrd6eNh4e4zysRIZnYnCk0zlMjAfdWMQXmkscF8KR5K/SOWYoFmEz+TxNY7e5WsmnoHvISIND5DlR2H99/bjwBUY1D/nHZ+0Z5XAiPvEl64u5pRgpp01vUC4sI2QUTkBJFEvwj9I17SZ6DfGDGW3Z/uzktKc+iO7hCR3tbqVHnWOsJ8EG8y/Kwlc/fuCpJcQK4IHDjR+bQB+tou7UXolW8l/twyu3p3VpdousBWrtECOyFV2hyKTC2uDKZ8ftWp0PWqT5iEeTmAScF0GQAsm9evlNMHlsqAj6Zz7crqWTfrWImk17OxNDl3/8TkEGIRcT3h9VVfmwJRdjkninIyg4hsg1tnen9/7+CYL/qNLbHx9Zm9DkTkrob8Sth7zLzaX5EVLTLCIAwetW1b6CVJ/JYHM5gn88YiZly8SNoA9rT3DECAykQzkmm6cRrbEdhpjrdeSXJznqS7LJRWxSMzz8P2fSeITaNxoY3rOpx7bWbFUF8Nj9qFM50G7j7VCUQcszCDG021jYWIRThc1O7eZdmx7HPfIXJnDnDUyBFuL8YKsRMOJr16UpOWWIPy1idHoJLlVUY0va3OiF7AXmosgiziunHFX9sbYteywOUcnWv72dhbCI/iBK1b3N8yszNVoq784qc6E3fHGkFa4pme/SakzcWVA+ShWNwK7s5jlKWUIoBFwEzw0/yLWko3PZv6IkwHX+wTWqLc6xK1gb1635II3X0xv+sruYO8NFnRU+uqXQMBOKKPX8wiqMizUcSqH+Bkla+bSB1pVPU2lohOsqiJAAhAXrhjMLqstEzNoBh0XGJ1kX/SWHorPxOVme2PY4zBY4CE4xiqk7vpmVrc/ArlWOzVpoVL+yeRHelCvezr0eAq535ZH0rEg/aDkBGZYxOJwmKb0x0sxjA1ejy/GSkJCzGE3BW0GirfuQwYzAEkytsGd4Pfb3d1D6zvP7Vi/z1ehFZg4oPwl4dShDE+m4L/512MghVFBgOFElkU9WGcmaWimLn7F8WZ50zFOn0ouCgkAnf2gQilhv4G6HFEPs16PzHT7caqUIUzqkYsKF9YavXMEQUO3t1zLjoXRERWRRcFExClByjrw7vt0FXC9hrRtZwNSH3vRSKoZe3ScFISP+aTGep4HN+Y+Xl8E+ZpO9FGNxbife5g3O43skPnw4117jqfpiRDWd6e+0EMZgbDWWewuY2IRojD/Xm4QfjmxpsMtykiW+Y93c02VbVpAh0yDp12TNlug3lGFOiYR2T6Mj3n4YAxqeqGLXzOQswQEFSPYEzbYIKoAqbEg0Fz6tTHJj8SgRyDxcwOPZjAhE2yMtDmE+xCvgmePvbjOY/DReQmg1hV53N33iEm22BmOuh2u7nR87FH4zPmARczGrjdBPt+wOw2xj6ft5uMje5vbP7k4e7HbdtECAANG7cbQNjdxW9vm9xv+zyez6cP2bbxPHbnp1mcWVLXqaYOHuzTnQ4HHYGxBFcyF4dpgP3TYGc4bMKV/HZ/fz6+6TQRdmafh5uJ0Be5Pdge+8Ndx2AytUOJTApocRvjtklkviVEjQfoj8McpiQ3IZo6dWpi1iuIaM55u93IjYjUDp/Kt/H+5W5mj+qGxMzeQb0hzHzQMbErHSQC2JxqBgamGzOYBjGISP1wOsDK2NyguruH9lyeEnJE/xlzPabzYExkj3SoqrAJiTEDYUWTqm7b5ky7zl3n49jv9/uDDrnfeLAdAEzVDjsA7rzZ23abb2/uETSlHcN1ym6D9bbRYHvoTvYxBLwNuo2A2FQ9dN9ZcMw5NlafTqY+jWzcx677/X43Mhp0j74gpgN+6DTaiheBiKLG0uGbiLvCeQJupiDmQUIk7oAC05yyotiJyCILkdk10uzCg6xi0bB7SU7LYHjwG5azJbMCoDvbcTBYZItW7IfOUIOGkLsBtm2imowwFpnYx8ZmZn4Q2zF3GTJ1j2akx3FsW3SvEYA7mDZVmWex+juJ82AojuO579v9fh/jhvm4y12nqivTBg9MYNzGxjSiRMx8EojYt5s8nzszb9tNjxnB9NPAU3PXqKZnZnNX143MzMgHe/SbZzjppP1pLEI+3sYwmTTth/uNiP6472RsU5/fPnC/j7c3chzPSSz32/vHtz/Kdn/uU2QDMYgVvol8+6YABotPMEhozI8n/5Q4gSIy7fh4PG73+7FDcIcxY4PB3URE5yTgbWziuLGQ2g/3t4+Pj33Xqbb9eDfD4+O4395Y7jLe3r789Gd/8dfgm/H45dvzxx9/ImNzNoKMAaLHVNV5k7Hd3oCo4OD0uFVkgwhDhLZ7VIoStuOJ48NwY+g4Dp+7VVItnIlATr7dhs4DxLfbF90Pm25uqiqybWMTEbU553TQGCzCB9yhTmbw57FPZXV/7Md2+3HXKdsgjsqq6djHHTzkmE+QzbnL4KlzbMPsGLIp1NzIw+SOXr2+P/V2uwEURY9jZE3EkFswwMZ9jY3QsCjKKildAUSgIWSkqjoP8YT/gWYWdBenhWLdNYdEJGenPh1D+rH14enQ6fAvqv+KYzaAUw3JAUgP62qw6b77am8Ih3jNX8tUSPuQEu0zzIgygYhQqaEiQRhhaAgxPLJzHV5oDMzC3J51d02jPwzIAtTGxQsWOUKR5Up0IrkD5Km0GawqR5h5zkllvU83irp8vuvj2esc41c3mBtB4eLm7tOUwpyTs79e+z/cndyjhDiyNnv9mdl0xjKijJ4wPn1Wy5zuX5IwxaGU8mJkOuAIpIxFTc+lIIKDeLG9IpkmUJjLuxHNe5ghP2zm7n4AIE7QDbOppm0KInZmMBGNOQTigWTnzsTTDcDHt0esWJBrxvy3MTbJVPlqORn1Cqq6mzIiFUIMOGyq6i5v7CxgwXzzAwjLjVlkVwbAsolsZrDHbma/0f+F7c72G9hv8PYTyzvDFDeDR+kLI1baIiYpvmEJPEREm0EwfBlvLu6m7pPIScIx/jnI/Q+ZQHa9iS4fv0a0QvKmyzWr7+tP7oCBAj6FAYSr4oYb0IbHd0Nkl+H9lxq1dzzN7zu9O8zIhuJmuD0ev8Hv/vLt4+MB9Z8+jHT47sfmd8KtJ7o6Njj/+ceaixo9A5andWuVqkEyUCEjkAPPMgXTu42MGYSv+SCwu3r0oiCYOYgg4q7HrNaCwq5hWU0Ag7iThhjTERKEjIwsGazcwoOL4OHubm5qKnMSB5lpAuo2q/DqTwOQ06Dh7NNnGK6UOe6V8U7EnL1ac/qEMUR1du/lObPNcgMHWEESolyEWbJYYL/BBGJ2UQKWwgv3TZiGjPbwAacLHMC+7+6YNJ/fPo6K9anqT++3jh92z6vShjviesYHIg7jDQBI3hLlInhaoJqCSSBRtcIVYNm2rU9PpNk8ns85p1j1HCNaG3dEfSNl1RaLyNTD3VWn6gEk2+rwixULZWYi70jrsWdAr2VqMDhmdgs/elers4gcdlC7WBYezYtUjlHd7/f39zd5yy3Yxun+//Lly8czWjtssg3m8TyO8Fi/vb3NaapqTsRMZIUnlgm3kWISAuZ0A3Mp9J6pcQcfc07+1IDu7GMW0Z44mgsEazgbwiAcYxxHQIfQpaim6K/CyNLbHREQgrQ94FUTBTpVq/Z8HOThGuEVmwRwpzF4jBGQvt6dplySt5abOBZgjHHWfJZfxJYSQWRY4KybQgApLQ2dI8xTQjeJlojcE6a8fauqGr3RaNti3EH8RO3amdEZxZdGKcR+TLXv+LZXaX95O5dDPRiHVyapRR7vMspc2GutUcDrXVQBt0YKdfdCWW+i9swSk0v6Fio4GQiTHjy7xhlPbj4VrDzGGTU/7lYB2DwdTWkXLdB9jKGaPUU7+IlrGn0ng6iq2ZLia1FXkO4J6+TtauPj7oFGE+kT5tqdNuecUpWovDTFziVa3hvjFJGuhqeA2ODQjtx8RuRqzvl4PGJl33/4kYhEeGxsTiIDpoDMuTc9LPue0UKR7NQkIm9vbz/++GMXyPVJEREROo4TDCNHHaXg0ct+9+dTVL/I2OIJf/zjH5mzJcPjsRP4N1/u27YZAh/eBc7c++hjY3eXQVMDVTGkoJlZY4oRM6JtNTenzYJJATk7rA5aEWQgLefIW+enM0yz3bZBkESEzm6Kt9vt+THVzvwUM5pz7vt+u3EHWnlJe6lgESNZYnhVz36DnE13L9klsbO91EEn+JTkUjSfX4yRBw1sLM0nUVbE0p2VVmbl7gE+1Dyhd1OWmrFeWBHRqnjka/wzb76OEmYBxN8r0/zEP2UhRkTrZCw9KjslBV95l6myGyI2GDEZVeczEQsv41kYXb+l39XrSYs11GpM28nx/evxqfEvK8aFB+hm0Y6tzWBmdnImRtVG+svAAODkmecUaEVPvbCyppCXz8/h4Uw/69UGgBXdhXnuO/uF3ogo/Xc1cQK88kqyqVJqpTmEpW6/kqFKl9OGc6wjGTka19eBKLQAalAu7dIhzkIkFKOmhLITEXGd/XwssfF93+k8+5nnMkQc05VtfvAuIMFdIENw5qSGT8LdyyT4nomHtrGiiqtDnZ9TmP7LLBy/vI3Xzz/d44uBF7OWnoLnAL879Jc3Gv1TR+28tC+vbALEDo5/dim1nsvpXuzCPWIKWNOdFk6QvdYIoWaA6rC7O6JhL12ATvLABpELSUKjxYt95SQ5pDrgq4Kw/ql1BSyenZUJ9FkLlaAZy3rbygN7tFSlEM1DjiMAzDs8mK8QiUbBlp+k1U0DZx7XpGo1vrDaTL8ZY5vzOI5DVdw3FCPwJWej+5AGwGBMqft3cSXphfSy0yPo/RBmNl+yYko73LaNmaepmQ2uX+d8mDFvlvktWBeiJKi2qGY/55Uap6sD8fmcM3IgLTE8oBp1196Noqk0/pjCseucMyA3o8+jiNAkM0MiAVhv7cvGi8i4397e3vSpc+4AbttbKFLbtr29vR3HMYGuqNnnJJLbwDyqjTU4wVZKXzl3xAWksRDhnW2GZIrp8ziO29iIqK2sivjg+XwuBJ2oP7R0ge9ZxDKCbX11aNJaQacYatR6hV0nmUfDYI5GZPHeOWd2z1j2iIjUU9Vb0MAYAKnKkDFGBN9POoSYWRVMWgftwwBotKHQesP8YI7YIat6NGYRkRAMdubHBsKN9Ynq1ciFXQCKzAI6VQHILTLPmYjCvUyneXlW14iIMfsifYlIiJ2sD7yZQc3lO0IiXguE1xfFIC6aB32PbQGY3ZCqbnNOE5CZYafRm+arxHhQKQ1Va+rkDk08XviirZ6ccUlp6LNwHZ6vI/TFc180eTJEv3qU+p6VNfV38+aahSZgb+4dmv/YheH2z6rKLEE46MLxcyWbm+erOdoo1mNyVA5U7keztbDl3B10ejHc56FzzjlIK3Z00RqDJ7WoW/gk2k7DouwG6FfPt/Of3T3aGwIIa2qM25wT4F9++eWXX34JoTBkc/ePj4/bl+olsxgYzQFeWMSc0xTO8KXrXRuEJWKICi0JlaRqWlk9HMcn8g+cOhzdmWzqSqak1XijUiKvXhV3jyR6rzaPTRJxWKgcIy917C2qyoDMHPJTYC8l5bZq8Au5oqrliS43r2dkZQWrpVqnrxT6sidf+M/LOeq/0qLZtAy6ENKiu8CdeJynJraSlhsAN6OCCW0tZB2Mn8pHYnKGsznm6KYyBpizwNQMYHmx95p7A2aXZXwZ/MoQygLpFb56rBaSaBsSxaYobNS27sxlAd1JswTJyV+Wu16R7Z1ejkBw/ZfRAoAw2etG9HhO07QfqIqlJHj1+Hz3sojVnyPpHHjxUp2pjMPe3IXYXmZJTbr5c2LdnxXvkdEzjz04ZOiNVAjJY4wAtVpJnUHzlYenj49KB3jhFcoTdJiS7pAIm94d8kNWXCUGZuQNEsBXjMrrRUA2kYs5+vXP/wVW1qdbv28KrpchOmyE1D5XoKxYFNQNgdyzbL+TIf8rx/nda93r+Nk9yCzbZqpOHczM/5mKzf+TrpC/7u4EMi9r23FdfII4lJjhFE350sfi7K6ppMXTKP1+fSjqGAJAQnraxUG2DmcZ1fJ2otYN+oFYDMLmTs2usXj3VhnR1TTNwU5Ou3ha+70nZ77WpZeLLx8dEnt4Iq4m42vXKW+bmRH7bbtV3mqmiavqti2VUZZSapk+k01bOjJjUSDWuXX2V3M6ME3T/dgBjH2LrYkBEJFJZm6KyJQxiLfbLcNQkW1Y/7GImU1Tdwe7bANMsg13H5uMIQ7rY8jp2eW1PDsWK7ib8EaQUKoqqsBmhxmIhNksI0sXa/5Mpqv9tqC+pYhC9QBAMNVD5FaGCr29vT32p4AKFYBFsG23X78+VPXQSZAAiWX2gJ77fFSo9eAoegRBDQ6dFwHQ5GJmRKnrFC5OeNpO9aWoXgO5A3LBQcnn+MUAtgXKIuhLRCJnxo2InQNEZAVZqcjM5h7l44v+HVAgmazi0FU1ZyaQub0e1Iivrnp83V+zY2/3v7s7OfDSMzqNZADw5ttoV3SHMFqA1eJbq6FhKQIYY/jUUhryZjVtoRsVce1P9UWrQKmnYbCtR695CjFxtYwjIv8TmkPcr/CBU+crzkcE0oKFN/do3iLOWYVP3XjaAkcOVOYW6gSVV1hESCJvl00N7oZpPlUhkugwZmamUdYl3+vVQYVTtbLCxfnXOgT1PoZLZageeyYCnEeYsky02bqIHHtmFmzb5pXEf0QBMJ/0UBKCIh2aPrlyaTH4rRIi3JLUI2/27e3m7rf7eH9///0f/3Ach6pS+o/KTShdNpfnLjnhFaLWzPZ9fzwedYpJNfoZnBg5qofq0Z6L+PB2u0temxkeH8/H4/Ht27evv/zqjv/wv//tt2+P+/0HDuAlOTvbRgfO3oh2yqiqs0/TKOZ0uFSywnkYw4lARc/ZZqrEZLZCrbBDKEQ1zZwycprHcTgZ81QdOl3Vsz0jbWveZh+ZlY/5YhBKQZ60ISEBPVqOrXCVrmLbygeJ1ZG8XOvBfClnPc/v4jZo4bgKyvUhzQpeuEFPc/03Z1r1/+sY1rGtT3B3uJ6HzlOpssXb5RVfahKiJcIW4mrNjclTSVQ8geG5xWFSOFHcFWvptdmgajT7J8yensK68g3SU/lmp8M0FJj+Sm4rFUZOVPGFGUZgFtQW8+LxpHIx9pbkv0tuSy+vmYWZ8dKltr4YKW7LLnQGx1XRNLNlRS++jDFGJAVh+VOu/MVr732C4v96EgGusJKZ+1kTbzqJ/AJNxFTTXDaDz1PTIjUcTCjZne3kFguQrgZ2ZHjFMazUjMhtM7NSdgcAV5pkzzlJCALDMGzvhBtoA0ABnBkTon/IfCKiKjb703bjP3h9vvtPW4AvV8SwOHIpir9XMgWYlghusECEV+K/WRVf76eZSXZT85l5NAQm2KdJ/DO4KAK7gbgPlE2dTMDPlhAgSO8PgRHh8qzVl1Da8lbn5EqVrFSZI3nU0nRMx8rVaGwfUSvMlO1PmjksPO21Pft6EvvDNqZWpwx9Ej1xf2sFMdHTZQ/EyQZwHEdxyPCr5rfGRbrUgBikbsdxbELbly26LKh2Ym54cjOvMlL4NfaAmiWF05kaPiE0hsg32Pe9rM4tdbJynPMgdzd1ANOcNbP4wqc+QvcygCAkm2wQBjxit85kBGeBwGEOchPDjHoAzzOZUIvuulrz0VwOgJkQcVg7Y5hpZTy5w4ichcgZx3HAMk8eELjGuc1dMV+lVO/HumcxX5BtN4HFwtq2CTOO4znG4HnkF50lWvkwyzZYJ820E6Y5kRGd+Xi6SA6HgtgdUclgwlxDOo6DYJAz+S2+PrZxHIf5hCNSbTv5rQV8lrFC55wEZ4pk4PTuj20jz8hwu9IjP6pToVbCjeN0v9/nfoTmrdmOzDjKNmAIbzL2yDVNL8PEjhlNVEQkXhFg5JYFLdaSLJKczcbBGipjrNhIs01jFiICZPuKbdxO90xhn+DTta5e7jLO1OUWge6OBX1bQLtHq+JFiXRUIguIXz097Cc5tRJm01Jvpkz1iYXdti1x8xCt7aBu7Au7IoKFqHE4pju5kbm06pMjSrHU/SeiLz049q3ZrmaGgFq8zszC27uuVQ9+uVDK56vF2z+8euXbZ79EnD7vTi91PEFEnBgOrjBL+6EtqyZpHQCq+sXLWfbizMsDqEQOQupJ5aAsAwZOnu2h3d3ht9vYd2u6mnM+n8/H89vz+ZxzD5WJmW+3G5syw+1gZrCvNmccgcNCkkXAWeacHx8f6qZuBGI3OJidnQxOwup26DToNjYZQ80cuG83wN3o4+PDbB7H8+Pjw92fjwcRzcPu97uZffv27X5/v91ux/xWW5CSMlmZmZfdG/tqmbRD6lCzqTzd1ewwvymv0C5cWSGJQhXO3aL/bAIb+S2ddxDdDKGeNeRp+K+5/YivWAhgFpExBtFB5TjolJYmsJDny8VeYY31jPdXfOlHst6wXk1XSRILFAeV0bUK+AytfIocfnYVYzHteMnqX//lylHvHDCu1lB94tbheaVPn4fOvKGSTnO67sb3LvJPajFFc2dQUIgqKPsTrme/LZgWmtIiow3U6zr36vWhrpsv7k7OjP3zirkIsS9hK1pM5cvsesqhl9un+W2cYLifuBDS23HuF5XhR60V1H8EeGPUiVDUqZfDYqWl7658XyIvmSQ1r+pIg9L3egEvAz69SEGNlh57PssinBwYRAQmTjPWhdNX1XsXv66KLBaC32QASBcvdQHVJYrui0FLiDpDBcyPSO9V2ibbhLxhvIPuseHlgOqFsjIVXorunBxnfuk/6fWff6KH5Zq51UiTkIlgWHh+mokdKvxvdSUV6FnthnRobmOM/5Zv/q+8KEw+EnclSERdAQFmqCUAVe/uSyC6CCPTstTtzJMignXydOXNNoh2XV4GISijy+4nTt45wnLlnt9aLvtkY9OSCbLevwqpVRsBEPpwnyzv+JOFydYM7TQ+g9MQSXAGbr+nFfchIlMcxzHGiNZzqiqUCSRm5mpcLzOz9sDFgxgEJuKzmCcy417mT9WKbZ1n3BxJH3Rjd0Qb97ht7egVMRM7ZkA7COg4DneHs3NMlUOj2vfJjDGGg8KQtOoTjYUPMruqveQRueUJZBJ3i8zG2uy8ns9n2Q8wswBW9tIP/JpYyJVGzxWVbrNnHk9mDl9oGM8isu97FKGV24JFxqH+3I92LbvTdFO1OS0TsxBOP0dlHhIkA9zpcWcmdNpbExYz0xgUOYpLXWVsn5mBAjDX3ReRBlU7NtpeWPxnol9/bQrWQgKEn41Mkl7JqFrZej45uDbR4iAPvyn4XF4z63Yb63janc/MMkiV3I1oENGQSGM2lmxHJjKZ+dAJKua7RA4BDhWzrhKl5QHyq7o2mI+AMEwyiKF2InEI2TNxlxyuFu7VPl9mNhYUu6tYzWXJlMKSKxSx3YUNBV2v8OXZFB7pocxV7TAFME61Po+0A+rIMCky/z5+dHeALFOkWKMXrDPTiCiumbVeEvMdY9yEich8VhnVpTMbnSkTl7jHOqk2cXvxaYlmhJsg1yrCx6jMcDq77vZSdzcINwOd7UPX27zjHj3U76lneb4+CdGkSdf8z/Q4DhEaY4goUbjsp5mr6jbK4w6Pjko9VAAQHjIqz18i4WpZmVczu7moDBIjECn8OHaf+ngCwJz7cTznnF++fNkfB5F8+fKj8AgfihlkwN27Z0nXlge9YTW/U46aw6dDPXE7wxnZYuJFRSsU0KCuPFOqGqngtbPKnF6q8gj4ycog4Tay5eow/gohiwozmlkUeDYhISH1qVxLl8iGVfrly2r3besnTQko5Ti+lUy47nmxuLrBMZYbRMSW160awPquF+V+/bypos9IP2elk2by8aGArLnKwuIoDa16YzPweDv8fMhCh81e0grq20obW4//i8L0cvZXZtIEWVO4pLdZ1D60UGiLlwiRCPE6wpOHU1mD59OCfb6c+tPVRj3N1ttyNewcf6/t8lVkMm0vCBGXM70ETd7fZPbdGkJ8umrigFejFo+f3Rciz2dWSjZvmyQGxGHVFLHXnJii6JSWS0OjSi0lNzfOkVAW7vRphatHzNnI5VTPOHIkquqBCJEMvyc2hDgR+4PcQE5mrpPGD4BhOOgGiEMMRGfUCG0TMnhxWbATvGJK/6UmD6F9A2XE/mfu77eWk8ANlNGUIJNw5DAJIUXtImUI10oQT+HzXxnhXC+HCg04zJXcVGeDGMsYcnvDd2t9/8++yiaMOCGZM0eftbC0qfI4nAELH3hGkB2lAMhl37wa4DGbzpWnUZ/tQuXFCx8OXd5Pr1ZcakreIg/9rZXNxpH3a9r5Cz9/Pp8vpuD6zHNNiFp3QqWqvlA3lfcTYGdnGwgHT2j/t7EBiAa+8aDbGCISlVeR6+XuMA81BtW6zcxUK7fQIgabHSmYZeohmh2x4rFjjNtt7Pt09zTnwC8zx8LlYzw+lT2b53Sup7tPU2ZmYRCFazxoZJqOUiXVjAJ0hbn60gfoVk5f1aMYmsBumQ7VvMw9mpilj7Y3YIwxxs3MjuMwi8xbklIAY7Glum71xhs8cj57lY7jcLVosNb+48KLJ1MHiGTz+dz3OQ3TMCOIPaGqc9pcUkYJIBoBNQFgAkDUvQxmptKq5XYTzr7MXq4Oh6tlLnLjsnjiXQXNpa+992gl1lQUVKdGI/szMbhRQJhHK5FrWuBps0lWTsbDaRBIVEMdZBEyP8VMKtxE/qljb4cHvRLqooa4D0Nm52bTwosDRoQOTb0wXtJ1EVIx1TYzSrHoQMGpacXc537JCPdIR115+nLhmrqZM706pM/nN5gEnXKCTlXvZBRRKOjuYPQxO9GPCMR0AufVN8MxElHBSPKCwzwxypMPllpUY2akplHqLBNZmgf5uuv6AFjhb7CEO16m7Fc7vz6/cNV1MKi8g88cs0dSWlIOmHDJzr0w67raAdGPok9PNrPKF4hfczWmHYHGu/CQwQXrz4z0i2FO033f77d3X6zcsgmZiITHtm3mPqcFI+2C8l6rl1nE8I7jeDwe8ZV97vu+m0Xb604XlK9fPz4+ngz64Qdsw9/evhDR47HfN7ZqOkrlbWmCTLILf2x8yOLT1KDq5jn2yBJ82REBnAgIVhBGozO7pVys8S+qLS2TKtNvLYkCLS0Q3T0qqDt9PVCscrnOB58E0HvU28qVBx78mavotP1N/a6VR9FiX63/vtAqzkNE60HoyyqslJGlkjKg7Dwe9MzrlJFxHjTuaE2kh72+kfCaXp6fm0F4HTYtjuoe+eXsLJH8lQqprceAc1uTQukEqok757X9zOflWtWjoJMXbnBSQx2N1chMly4BlYJxToQaHOG1n3Uub6A0r+9aqLK38vJ2+FqqRkRWNmHuYPGWdU063Lcyz3Veny8zu25j0jkTeyW8UXUzXvdupVvPhiNsZpBob0XAyJPWHpirpe0FNMiFnXu73TrqkBhovQoezlEHjJMDAE35pg4u3hhFnkzODHFTd4I9AdAw1YNNiRRwyHQaoO1qHbVCzIak8mXr5HNU+//YZYnVDYsn/8NXD8hJCQb4mdTqmULsFHgokcPcmIV0ncU/8fUiwsofnSf0JoJ/lhFCj4UCOQjgGLDC2RkUrKPCwnHUXCmTl8WtIH27aTCwnC9igIS5+WRUUwMUgsVJa+/BRHbysejqTEvhkrubGz6d2VXt9PLUNOvAwplfREP7OrGIiRduc1WKOo32lVvWJwRgiAz3R9h7RORG5BhjPJ8U2KHHcbgmrPBxHDIyJ4eJRTaRNPNUZxTbJUCf2Zq94+5q0z3z+sI2KFPq9EeublSk/i1jDAHt2C8qO7GCYtjMo8iXCoAh+D4AnXOnnNMQEZ9nYQ9VemFDR0b+VYHfELORaXuzpPKJ55zRKiRY3pwzNI/YCyEytOZ60RLMLJBaVDWSu/7iz/7sl19+eT6fPISIoj8ykYAJ6tNND5szvJtSXvZweJs7DOQErp4QegG3MKRqnhpkZA62lzp03zmnu0ZFH4N+eP9xbDy2txAhIsQCZn4+H6EDbVtCsMQ67/vu43TlUkSAi85iZWVIBF767PWQTk8hgOGR/xm/Bu7FbXsjKjwWDzuTuoKWIzJmblelfz0L7uZukY4SfZzLQmAAqmkwxI5MR/cuyzNWSVYUvk86Y+BhTaR5IGehLC4c/DRagLNwPQcfE3D3qZEaxGOj68V+cTp6mYLx3NbwTjSMAr0ALIKEQROKdCz3k0OpdKTyXjFbuHuY9AAw0sAL3RMONzfz0YBjXTkZaYB69n0GJ/d0BjsRhMjIPNILiSgChzGYirad+Fq4zjqubdty4ETuHoD8vNRSv1wv6mOE3bosO1/N0l6egFAnIhnZ0SSy3KMYBp/4bHxLA9x20RtVNZvPMdz96NBQ6cbNwePmOWcW+IlQlEI5t9VkCUgOgoDjYG5jDCaZOr/NB2/jz+aZtfGi4Z1kAyRIoHO4wIxo3/eYk6oKBeKXi4wff7jfbjdyejweY9ze3r6MYXGC+uRaFiOdOSrB22NG7s4+PPIwQkw6R0nSqs4GlRO4HDYBgQPNWPxKCCB2cooMAs9IQu6sVbFuENLKXuKNZtZZaczpV+7dzLNc9QF9BWU2HhVKGNM1QMdLXPRFKvOCPhrjCYcXgCylvq5G31/iKdc2vupLHmmPvynqtMHqafGEkGsgMtXIA1rUgiRIIoInbHGkD6ykLiAsCRr56nW+dejy/iinWEJ/ScrB7VXTJTqkPYzozatXt9B/OXE967ITknHIkNq8M5jp7tvtlpxqQWZvXp0LyGfqV9APCaOtNeSol2HCieRP2IR0NT7X6RBI5+y1yuFG4nq3oi31FEQs0ja/l2w6N9pPcfBCS5+vkDKUog3uTuWrWknXSoKHJ9FOTBpmTlDAtlFrL0L2nR1TQ8Fj5vv93kudx9CMzElkK1sxFkdYfHExWMCALeR9e/sSBAI1NQWUaAcBmE4OdmIHvYNvRDeBGLaa+klInH6ly8pEod5/lc3TNuGfvPrdhKOCUWUQtngvWxlOREKgpeaN3VFB739am/A0Ys+Hmo6NxSJdCIBlrYnqP6M2hACQvmpyy3ifR9kaGxlnveUaO7Wqdh0RKzF41PdRSBVLP7q7w1kBGYNISROlL0+Bl2VIdLrkqA5jDMzdC6msTjrZNZmeImkJ51fa89/8DeWtDq4VcSMqMAWqwEZE8vo5WI5Mf5KjgoKinBLApbYfwBDit9ub17WNET24B7FDQz26378AFq/cttF9MGAeIUEzu9/fATuOA4b7/R6OeSrkj/AY9RyI6PF4isjb25cwq1ILh8x5RK85wI9Dx7htzM/n052ijEQPhRFD5jR3UvXIbIwqyfv97u7P57OYJols7iq8EeHj40N0uvv7+/txHPt+Iu8hsokqU5Eo21eGhaaqj8fjN7/5DYDn8/mXf/mXv/zyy/6cc85ojzGPU8ucc7qCbgTg8Xh8/fph5p0CZJbhQQBjjF0fsaOq+tifd4LI5qamcJaEI2Y6HvvXj2/Px6EUndN8P6I/xEYQqxR/Tj4eaZMEIJJ4g1bmnIMRqrw7BQQICCJsZGYmG48xjvlU1YgdRqJNtHpPihkDQL6Oedu2Iamj9HaTcKxMqDXHcXhlOXemaFBRBO4AfHx8MMgKo/Img5nm1I+Pr9sWVQGpmZmpMP/8yy/RpE5GdSkIgF3OtkvMkeuSmtPtthFxaKtjY3j0I2U3en9/V1WAh2wDZGbyFLCY2bZtDoRU46jsipOeKlSGSp6PfdxSyLa251Ww1yrj8/m8Dfny5cu3xx88E+HUrk56W46uqkItHni73aLe7P3tLcRtqHSVaYkQFll8UsglRKRIT1WQ9fSTJUlrcEyRkejWqMJR2mGqduZFR0a0MDHRGO1MK6ZDYR2ma4ngFrGiQCt1MxOObmzW6HPFBxtWyuxMqwvsX1HVfZ/3+73ucVWNTRkiRCeUuYgcZfD3J7fbzed8fOzq0UhvVLWw+XQiwsKUT68bSEZq/2EPgDj2d2XoQfOd2mS1RD5T94qG6WpZUktI08LMPj4+iOg2tm3bvn08ghM6ENu3z8MMx5EyYE59PPbbbRDJfjxut1vkiM5pZiY8nsf83/7Df/ztb387xojEksgPnbM2johpfHn/8ddff/3ll19yvrH/zETZS5MdAagBcjNnptvbe0yTmSMdPVS9PgjBwcpiYTOLrn0i8jz2+22AaT+eH8/jsT9B/P5lc6b0u+37vj+OqSIWgWIRAY8QfnOau6iqbFs64Ko8GwALSOQ+xl1IdVoCREeLv4NOW2iLTWNmLcsqltSzkYm6hYXi1bklUyca4ijYV2BxrxGPzlwFLsgxtNQZhnksC/qitytnnhWDtDSs74dIgY5iMd76k9q7q1ZRNiQALwM+NqW/EqskBVtFYxAzzOZDR6GAmpnR+V5VJTOS7EDYNpItJbXxuXbLgTgdhD5xzNlwIn4e2+aUvr+e8qqqRzi6f+WEtubj+TxfmhmhkDGiBINFgDPPk8oW9SVp5Xxd2x5l/wBoh5dFEZFVR6Wrz97MphumDdliZVFu/ugf2bty4TDLpFpviwrtUb7P+Lff1Q9pUjmn9jkmQJKNPT6j0YQSGZSpJ4Z2s9YSCWgxHS6kbdusWg4+j0fDb5TDJcfTrvyPj4/+0+PxoMU7k4U/V8dfPEHVoGZ0OjvMFWbEQlGdOImjKWO0hAlqsSlsALvZfB7H/gtvb3J/F7kDPy30xIBFHHYlqr6+99n/oau0re+YapxNDkLKO/DNVUkAm2Byn/v+uN+/qO7JdsAUpSXOcBD9ACD8rIk9k3lP/6gBXwZfPwhEDwzCcRw+lYRu2/j6Td192za4Q0Z9pUjuv33Q8IyU49VVBwAesbmwn6NszIFAQpgEq2Ye5m4AMUHNAneDeJBGHYxHz+RsjIvSoo3o0HW+KCPQuuSYTgWbhPU5u8IIUXm4wIYxJxBkqEOqCqRpV2z8zFCoI5CuzGA+YSnEG+NI2lKj6w1X0c5ukQj5JO9iJ2TXQM3YWKoG7u5GY+GqqWm5bzkUDCSKJEccj5lVPXjpKYfO8ELxX2vrFmYzElOb8bUWxXy2CJtHgMeob5eoOzu0yu1afWxfrIj4nLVPZzFDS81sHgkSkYg+vdzTH2SAwjM+Gf4AZtzuY993NRu4I3N2Z+iUpgiQa2YGzdhLO2YvDiPjjfue6mBU4qFAZY7jGPDpMGIHTYdMm/NpCgWpqhoUcKN9qhqcoAY7kxIDnJMCZdTO0yldRugW+Xh25oZUdA7pMDglO9GYuleWHRWEd2ppp0lQcaS1FjE23cxAxhIwBu3lFS4CiB9FhMgLJZU1Gn+b63FU9FhDXwTSsMkMe2TO9G3bqo62MUKkKw9VrTz6eSrCxD3ZSF66bRvRbMJkygRXm0c4OyOmtGz9VueFA6AiHngcR0CiVRLgiAEkI0iovdFBLCAzNk8us8r+0AUXbpvumDEi1tpqH06V47QkzTMRlhaxkWDxS8a5JswdsVXWQcUC2/Fv1gf5ZDTuPuvcARcO7USNXNPn3U8FKINF7icG7/r1lxPKUfRaakQhPb6u1cv0IwgfK3aI7fu+B4YQCzOHo09EHByO8G5yAIB5qy07UW3WQa5bFj/b8id3V7dw8ztfdjMbRRQTav4Zxq2IzGnP51O2+7jdhcc+jzknbeKGqGrODWcC+LnP5+OwH8A09rn/7d/+p3//7/+3b9++7R/7168fRGSGSAQdYzANQjAKfRUYBQHNS1MEOa3WytdwBk03M0aW7jLFYGK1IANkkOFqDhixEQNEspnD5uQIjcpwzxYRcxlJ1ixB25kei2nVPHe6RRUIEbkrkbCAWZ7qE5PtOI6pC6DWGiCyTKkgAFGkvdJPkMqQrVXrfLM7Eb9k4bZDsykhckBOwug2yh24gwMQhgTifyQU0AjndsfG3NfBnxbLdwmPqnLBF/u2H7L+wHJx6ffg23iLS1SRUo/da9GDjK9JmGRuOKNYevKBS5yqi2/Xg+ruYEHGb6NMLjDbeOnRna734mGXqoT4d00Lbkfky2KVcFs/WWJ09UxbIr19L/v5eb3J3Y39VNfWXXZJ66to4CzRZBFUe4nLixIK87K/HsYkpTf6uo9lhF8Zn6p+bl9BBJ9zva356tQQpiuaEbBknfWHNYC0GIMgmxWr7o2dBsC5Qhm1boxLykaHZJv8qJzUnF2gMjE783dup+3NzMTO5GC4cQQuwQHx7CADE9zgILUoFXGd0On8NL0TEfMgRJW1VEeKIoCrgUGvqKT4P1CVl7gdlvBtKLNTBYhoK2BIULavsB0AXMEbEZgf+/4xbhshisAZ2GL9AOjxlSAQ7ibbVQn5/TjhP84+S85peuSAQV4++jhT/6jH/ze6nCOQ6UAEAAMjD0SOE3qh4FVoWcBzr5vy41fOAo3E20UtbHzfmRjsVF09i9Wk2dbIDO7uPkvVQaXvNV+lxbUXXw8jLcjeF8QyKn9KS/AWOuvBbybjZRm+vb1xQVd4ub10ycOvD3Nsax9Cc7tAuq/Kmbu7E/M4jkegzhD1u7NukBavFUf83dxgZrNr+sN6LhZQTWYgRNnqlCx9mgzqltltEwMgJhIGEzit87aPubJe4awaDuxRQtPgpqqSvSVOowjliKXqrt5RFwAC8qnMfBO+D3nqZMcgDIIxGCRwsmDKCsM+d0K2o/VCvT+O2a4492gwkAahAHNiqjPTPMw0YDZ9u99UfVdVgzvNwvjSQ1W1JK/B2Ux3vUgvpGgGYMnvvNLXzgLToobMFDp1fQAZvkI4Di65N5HHG99Q1axJiAWc1IvZ1jsWDQaA0kiohmxDlU3AmXkxG7wvsJEn1pNByfNIbNs2RMwDyaIJNbynfVrM/VS7aYm/R80nV0VQGMBEZDhrRFdOEWArqOiTJzgYchPCBqu87o7C+5noBcom7ImOk3ynvEcxfum0d4fgVIa8SoPWcEGfzfXycgMzMxGIs2ylEUqd5FSVQpMADBjM5lmcWwqZu8M4N6UV0F609e3nUXK2wixQChRZ4mvj7BdmREQvHjiunH6idXu/kxS6zr25itlrKXYMm8cYsh2qCNjlxFWhz2ior5O6pp72Y4mqL/sn9LemqzlpmsIDGxM23SJZvEIooWIRTQBmkHRDxLmkbbuP27bpTabetvs2NhHRm729379+PL7+u//169evEQ+MRImPj49p9v7+A8lmdkyDq083Iygwq419c7w1ddCr2EydXYOlmynCICwf3Hae5cr6m269NeuauztFb3RTYXNid1e3jpGqu/ksLQpBZrPMEvegRPKK9Wll2Hjp902oRQBbeuig5dCJI5n6ZTPetqNiXjV2KhMm/bVcsIr1qElV6Z3HkP0UoO4rwvBKQnxNfew1j5rtzyT98vPKsWuCaNdys/2VdF8G0L82S0FXSiwPX1WHnGClsp22mRqIXDIuVC7RdAKh1f11AFFO45n93PTB4bovDK2Xoa7/9ooFlax/7T+tVlwrrysLfWE+5wM/LZQfhUS1fiVUe7fm4c2X3C8plBcJAnecEb8LPSzu2JU4XoYUU77ect7Dldy7PN9VlT9tPa7TqfF/etdyVQ/6pn8j6bwJdxV3VzhVo6nT3o7U8HKrrYSXi0bEzId6eBB78DERAfmyxa1OuBNVlyO6tBkETEEEUyJ2PfyY7pvZD0QEEpENPMADGJkqCPJMDuHTHfEddJZ15T8bh9dzDaUE9laGApOggAEKn3DH/ntihtywf9NdZRPCIcKU2YwCDEABiSChhKONqCy0zoGk4oJUw/jPFjB+/ypTJzLKDJA5dzM7Y1sE50vc/p/PVXgwxWXI4KG2FgAYmDy7SiMheULVRN3gAjZXuKOadS0O8TMS+AL9QHQG55uRcmmG62WENIjKaCyJRdnRqh4C0Fpqh4UV99XSZ71n5TlUGm95YRLVKQAXUbV4LCCSziEIPjb6lLp7lU84e5WEOTDKSazGfMKFtws2blvPMxGhrAgqm61fuUqR9DzVbzklApExsdAQGtPNFG6k5ZGdhxHm8/n89vUxs9RtQ5hbRmnUGxnCkpzuPoZ2DCRWquYRJfpl/kXik0QLclebj8eHe5YyRmuyjwcij2jOXSQdA+4KcCRhhqblU20IEQ/O4kAn0loxMzvmfB576FtmLjAVZYkAg/GNpttUV1Vz2tWOeezHfM7D3aeb1h5VF/YiqXKfBm0UiaTrurfbzJiJar+8CCvSxnDCNL2qI02OZmrmoxoZS3ewZB5jPB6PQCaMsEh8X1XLwGD3gPsbZjaniTgh0yM7J1lVQR4Yla7TWVDmelJL6R+9qlGTU5g055hrInDX1rZFZD73ReFjQxqoU2cMhoCAq83EVEW7SjquQkTCHBbycRzRUK6XjojcDWoYOR4RCQdqalplrZk7gxpCJrfG3d23Stlt/wgzb9v21BbpsUN4vThKWsNFllpBca+8hdpc5GIueGV/fbUi9cKS8s8GCGd2hyWToEAKoeh7glYWcTrVzufTmfJ6vtGW63xXDs8BzDm3bfS+hGUYfGaMYUQ6MzjTT0MH8Wr6RKSzuPMSYYjnvKTHXBWdUneo1RZyd62BM533m9mccx5KYOkiJWR2K4votAgub9v2fD7HGJHcTs9D1UXkOHTff4kDYgsCMxHt+xxj68pSINohOlV/UYIMuTmX0wTkcJtp2i3Ly1jcTIujMEjtVNBnPB+RjAC30Jsp8oUzQbCAdfc53eahESbqBF1zdzIFaM6p6nEGKrWNt+1eUrPCYhRlBQbc103nrPE7xbl1//Oit0JMNeJT45xzOhNRZhEUOXDzwKaEWO0wpM0sKn687P8hSeGreKbFE9yWYT5qDYPUhWutWn+IsgvorBM++Vt/TsulOM9RP4eIQtHvX/uvpqdycz7QPSrVQeRlGweYVdpgQSdFJM15cWUgVFX76wlaB4aFOcQn5qu2dPKZFwfT4iZ7faz7d3bwJOnl3etQPYivD8ApUgEgDLx1AJEsSstqoDyt/a6VzZ1Pu3Izv9Yoov71NRK7XMwMr/YVXq44eEiuZcze4nK9/KWEqNf5U0qI6nGomptc/BeuqFkv8q6nECBdWBPKej2ZLYoy3GGuSygyO1dXC7l2xwAYw4BAMovMUa5e9CGyFJ7lTASHHYRfiRg83AfJBt/AAyQAOxgooJqSnH7uURTd969xbP9E/JBQnlU4JmCMCUxgdz/InvAj0jzgT/32IYc8j32fz3Ef27bx9p4OLAxACQOIGRBkQwS1nFMddw7DIdZ46U/4T3AJMSxAtifcIzwYwgVD/nmijL4WClaPtDT/KA4FMSSU5b7Twv/lGV45LWyiU9R9T//xgujzCFSYcWV7aVUKfeLbxhVsXEWDL3eGaBERGUMrv8wXg/Dlu1xYlaGfdNisXTNxbFfetbLBeq2Z5Sg44FjCQKJysuIMg1wQseJxG4uUGGg9O4VQhSbzlQZTl3GqsDFZrzQkqvbKdMYfSELUgaodMzFlWULP/DmP6fZ4Ph7HTkQKcgUzO7GRhgpw27Y8nw44JKqJxoY5PZvQcuQNx1vmnIGLxYxo/2Omc+5hTMvgqcfUI9bn8fw45h5+btooATvoZHzubqZOyuMM8gSzVqTqsO/7t2/fbmNLaUHOGXwjAB8fj2POQ80UCj+m7ft+HHPaDDG8bgpREGFzZHidiiTNqyRIIqMECGOOGkJveqoQDdqsEpFtE9UT/dXdOzW5R6Kaua4dPQAQFk+ewlNZh0j+LCLu2hIw973BzoFM983s7EsFhVx1o2VBTnHvfnrxUy0uDXLWqCjB3GilhzAZWpipqkCa4M1MzEV0jOHEGYT0ZccB8oBn0zmnMBO5mW1lPEdGXAtRT1fWlWXU9Zk3EdHpSW0/ceujn75i4eoDVkQsFDOQa+ZVem7JVhW213PlLOuAp4PUuaFHDcSuBFcbLE2QzQVXGg6SazrJG07FKaPTzSKb2rEAwJqZ2WnbHIc1beiccDtUVXXbtk4ZdSzxT8qUUbxkytGqap7rVq7/9GFHcDK8QuaZmsjMwkn5+0Ft+5XzBfOwjG7RiJeLiOWJDicou/uhNucuIsdxmCX0kYgIkao+w9dIxuzHMefUlg3Cm85dp8PPIsw5w6JKkVamVrIRZnLhTUYscmRamtnHt2fdwMwszKGZ7PtuBrPw3sOzuTwf6lLgbM9DH48nNnO/jTHmke1GiCiDuuFDIGIenrhcqNq0CpjnDx4eO4WqwyqCoarHoY/Hw13iBPiF9XEw+ZWiABzHQSi3zKLuAr6aV8xcnrLulkSFDFX/Ftrw+cpimLak3PeRuW2388Qt4t+W3NQr2X2CdfkHr+v0v/P5edAWd2H/Kcq0qM4t6s4YoRBBjYi9IsZ9ZKicjJdj3oHE+DmIs8opUfe3FQTAzV8W4TLaxUmPctxgEQT9r2VLrstSnwf8yo5y/OvIl8XJPYKv6YaJe3Z9xPma+FYRGSrklTeW6uXuPd9eAXyHBs5rVUaXWXsaqO3rX9xbl/nXxz27elFy1yqdjUCCdiVgfO7hMjmZBq8qadIwLqva5JWjLbD6QNbsgzYLdCeuOmtMmCArTJjU9QH2CANZ1ja6AZhkRPJBRAyBC3CHC2wDCeRGxFlii2wA4yDH9sm4il9fTME/ZRqZQ4E5/cF+EJ4+H7AnYxJFfYf+/u//g9m8f3nb3rdBsDn1UNnugOSrPRotMohdn+7uGERMWVp0I4J75T1W23r6R+O9MMARqqy0pjNLR/ifqzUY1xrO5So9MIDhljAz9asDK5Z4X5e0r8z5up67yFh0p66kD29rpEGF5g+Ps/NCQyLiDLbTuotbtvs9OVilzITmFvXeMbUWLkQZUTzFzZI46p88g6hSI3ePdotdo66JKxmMqP0hSHkfC8BdQBILEWkeEBEh8yhCZwhRlvo2IwixwsROEtbRwmxONlpcqSUKkk2m7GAqtKZgHHTx3wHxmoI5DaSNWKBPL8VqJPQGMI9A5SREWvoJ1RWVl1SWVV9jSziBfd+fzw8RMZvHEca+JSvhSFPMJHsApnpNX6YxhscmadrDz+fTfL6//dDuSaLJnIlM+thn6LVgkKjqYXqYavUMjTkF6HM0CiuyDg+fs4MAi3Q4awo/73m5qCIzRbIauGFl4mr9fPF8r6k+MS+13fmih1nl8DURM3O4rtuvT0So4qL2cGzbdtizpUKurch3hXg+XzKB2UxbbvW+NHn01+rX2JzA8kqgiJbccUXkENRhjTNlLg5xrMa2beo2xohmkgNg5kgNd89G1z3uJk6u2fWoVrcQLQe7PQ5kbsd0bLUUeUDUTl9AHRvCWWdCALq8LXBl4gkZQvZTOQCAa4lUj/xcQCsfRLmW6xC95ji9UF3QGIKf1FjtiqHnSwil+d3nHfSKBOJqQpvZnKbRFac4UCsuvqzDOSm//NrXSi19UTatUYCRNbSXPIie9TKeCWCM25cvP0bPdyE2s+cxI5XRmWXcbrc325+Px+OHL7+53951+oEDwNQZofXb7W3mZaVQjjFYhnP0kjIX4dodAjCzZt7bX+PuXpgoIjJoC+9M93C7jy3mWNWb6UxpEg1a6skaLvN192kKJiEGeM75eDwIcP+yHgEAiQPuPsYtSnAj1B+sYyV7EAkLMxzq2RuGm0LW7A98z2vengIi6iAwMw/ZnCnStGJv40+quT68dIBAYSDHb94p3yJWhdO+ZDBSuT6twLf7OZ1K0KS7Et564pLknBDw0uFsiiYZncGI+Kt7H5Dq6977gpJHL7p7fM4uhGwFQUSpBa6u4YVT9XRAjKrlZmZiDumO6+BRdILvmXProX75sD9/WShcj6S7o0FZApDtwiVOgOWgcyAQJs7lXqgXafFe3XPEZ+smz/gx4u7Ycb7a1Zfv5pQ/T3FlOPl7q4lhbDeIy3ejfOuLks2pLtYggMsq+SeGHKf79JWcBmSd6wBBJUO7z4j4uhEru+tFmIWrwatzpS7tdhS1Pr70LettcjcRoTFsftSQxYnIxJlciZndwInDR2gYF/mAg4xBm9NBvoEEtEEi5saAAEfUbuZjYwfCzsRLBLV/68B1xn0zKQYGGMEANd/JHoyHyITvsAPH/vH3v/z827/94Tdffvzyk9zEWA0k4kD/F/pwNkInadRjqhsU4AhSr+flvzZU2Pd7/3YczxErv3bRdYL9s0MZXY2Z+NXhmRYFQaY6hTaSqpSf4YnL8QTgC6wDYjMkndvu0b496Xyl2Pg3UjCTCuIULKmeVbh+4V0ANBvv4UUoLDt70XNW0XDOeeGrviDQEJHqeahXbl8qd8NhRESAhhWknie+Qg6OSBryMQzCdAlXXxS+vqCTlPyqy6oezPf266sqUzWuuP6b31WLAjWPPFXLB+YkJZkKmHjI7e2e2lhe3NOec8ahLc99KvRbrR1RtP6MKuQz5uPu7ko0mDEGW6VgMp/mdVg0tTFd3D8BDOJdo4PzlnBtANLln/saiRCq6rsK38uFHBxck9cwmdk0OCmTaymq665HvitR4YU2HZdNCACZjU8h4dzDTg4CRYDoZLu4gv/6LGDC5O5PulNIHyQzQ3nK7UoAZgZXcl81oTKyTppm5q5HB+DQBTvgcmhDKAONo2Z6zKkFCk8XyURExNGUQloR7L0OpMQeCkf52+KPacoJw5IWLd8LPDADoXxaLFiUmD7DOUedHQFIOrQ8cal0luWzbkSL9paUPTYnr43G+uqV6QQ5IbtsXT5WuFzdBF0LR+WX7WVfn5k75YyzlrhZRwxg9QX7qgskUbmZVfyETjfHORLPSph1vswMiC5VvrFcYbE0H+RCQQg+fPIxSLg/VbUkR/L09KgdNQa8DuayynkQagynh+L800Ln5nTCG4bFEjl7+77bVCL65eu3b18fcXy68Uk0h9j3PXLUw54MA1LnU0S2cU9LqV50v70hcH29KgecRDai2c4LIAFImQmsXGlaY9wGS1QJhgNijMFpbXJ8a8hWy56nz3LuAqJuCu9GbqREkbrP7OY2TQ+d20Snpzbl91aGXyZJqNZ5oXwXkTF4DHHQxBQXERbJHkKoHmjzuGDntInbxyr2nCrVVliMEAahn17LV/9Cf3dlj30qsZR6NTdoqdGtC5fJUkdCmnT7uL38m9/S07/bZmprAOsJzeUlWYfaPzRqdJ/HlGt12npIONlaMNgsD4jvsHQiWd3PHMBHmbkbz194e9G2IwC7PlX3LarpOeyVP6Bss75hVafqh1oW6t6Gi10dttmVIYYpXV9nAOrOnzxQ6za1OtrkoXW6eknxEoZd6mu8nG5t0mN5B5b9aq2w/10EXE5wGecpiV7WMMdwJZXXVy/Mf86Dymzrz8MRRQUpl/UysfJUCcPLY1ugfPZ7olYj6d8yTthPEJD1aWNmmk6AB74PByyxAeYgiDtxmnnpA2V/pOcEChXwhNwCHB8YMVwDA+Q5hffcGnjZlLRomBfbIz5wgOh0fzqcMJkwddfj6+BDhgLP/ePnj19/+Q//83/42D/+4i/+7/J+hx8M58FggU8QAROe4FkZdbcPgjALEUAMN6gZlCgYsoMIXd/2X3t5qYtuRtP2feesrOGwgPK8/PMzB4uwwxnk7o78+R9aEnNvhMFYQ2ZGYY2cQalU16kC+9VbAnlDOL/qQfnpOrKWHapK2ecrvxvXvqek7pKr1meQXz+L2H1xlFslaTYz6a9jOb8tXqNzDAB3NVORLfhcy8F47NCyAE/d5ROCDX26zuUo/TUKkszMfSmaBFT1dsvGNcFHtsGvTAFwJzNFSY6Wsr0QAcSXJT+UTKIkwXfG5ukkveS2ubsnqEzz/bwKKMXnnGH7hWLxPJ6qB9F4e/vh/f09enZt25Z9PwqV23zCYlKxkrhFY4A1g2hZzxJgpmo1Tlab0SPJyW632zzsMPVwVAHxt3L3cdd5M7P7K2gBeTiTAm6p505u5OQR7mW4zRniPsz7ypl0d+4Wz6G/mmV/tvMVZ2GMuztTqpXmFMD3y5orSvmDX7ajqSsUSxEpYzUNNgu7ONXryTxCBJdR55kYqDPyCeNYiggzRTmi2jHnZIrGX6fssQLRLveBN2vosKcv9YcRkSbLx1qCY6cSBcriw1DftTpBr8eSmaNA1K9XLyYzq5+Ls5JoGBIRRGJmsmQirr66Bl+oetW0XhSsviY84EybuVQN9KpdvXigQ5YzXfkjgAZDM5DbRBIswv/UR9TdPeLorZxdH+7uZj7nFM5EVj/BJy6winH7cRxjyBr7QpL0kq1HxJR4+lIoo+suu7tbsr6onKqRGJejsAe5Gp9eZkwXzPASDGc+ixy+vP/o9nXf959//llVf/311+O5u9Ohc3/OH3748Xnot8cH82DZssBmIY/kVI62gYNJcnUKCYT3Mudunsg02+Px8DPYkK53ZuaR9BlwLBHK67WNze/1MbNp7Qirra9d9atmqVnskry9ma5VK8h45iCmyBj0FiVeuuXJZ6plgjv0Nt6o9PsejOlJwJGC3iNbqT0ydaMkWC1tbHc3dSMQCZXHE4A73t/frrr1iceD4vxXI1Ne3lj0fyJjR/1hHqhr8nbT4Qs/7yERuA3p5dBdfDdYNsI/ma99/wsfOL9IxIAthk6FcdjdO9UTZhRhQM30HA3u6k7uUXcRg+tXE3CmlNSp/84wlg/6r+1hiV+zsW3TYRcO1IFFp/XW+gNXkxKlcC1u1nbqC58iwJfM0jbSkubPRIzvq5+9QSeXi6h8GofF//sLAbqzrA/FQ7zCDleGHAdvXcM4PiyCUnRW3xl329jr81sqvUzG3cPVu1zLbp5KYLknTMHExQ97qeOvcXB8ccJu4+YVKoiTJdu4FoieUxhzIvw27sAAotEWgdicCAQnMiLi0BFhDj/SGxocmBiYBIIdDnMWN3aO9EvtKX86wrGAL0cy/aefrihU02N+7I+vd97HHTY/fv7D3//ut3/3+9/+4dD57euvf/H4htuG+w1we37QtnkqkOIWvFMB/0+/+9sxbvfb+/3243ZjJgFHMUdvEIjwT2ETqgNmJIF67cek6SrttYUIWP7zD/r/7eWfvBt1oFIkLZ8Tsij1cidQvQgXqLC4Fj5JVAZhmI5zzk4TPfUVMwYD5zlqQlLVcKbRwpCJ6O3tLY5eJOfb9NcePDijZVg0Q9SxRcmLlkqrhtlyxxT9dTPr8qnqDEzBjYZNH3KfcycSdkBtyGY6o83LJjIY85n9i479oWb3+91JWGRz2g9Vg4wbZICIZZtzfuyPQ93Bh/oPb+/j9g4WsI2bg33Xx/RjexOzQ50gwrKRG0fOCd/NpprxENnYaRoOEiNWj95CHKkKOGwe8yBFpgUTs89IThCo2mFmTkblZxrENxnQww1M2zbkOCzQksMwmHtidQwRuEdZ5ybj+fGY04RvH7fpLCw/gN6n2lSaCgxmupkebnOM8VDXTB5DpLMe82m6s4hPBTkzz0O/OcYYQ+7HsaumBdIGsBHGbUzfjyPsCgBwVTsOkptb5HCQWeyzgikSVCOOZ2aGNGMM4hOInhRE042chHiqbyOawpE7nnO/CYNI5RjE5lNJb2OL7CQRMXMzTFUi2u4jifi2qadNFWrecRxR1XWjjaDmNg9joe3+NsZg4huLHyaGQ+euKmzw5xBe0i+hao+nb9u4bbfjeIgPFtdBxzFVJwDVCWzTtGuYZBOwquqN36bp4/Fhpl++bGR0TDdn2cZ0G7cbBT6N+z4/Nt7etx/nsQuzEGAHk7wNtvuXr18P49vHJPcxnZzww/t97t8O3nbYjdRFd36aKJzmDnZhHvvO7+9yv78/Hg8WM9MNG8+dyebc1WXcNz3UCfftbioKutEgp2PqY+rufvPguewsLsNlmCoOPfRxv93uRuOwcR/OeLgfGU9vTSPV6HLFk4WrbFG8pKv4Fsbki50cdwZCNgAy0iyVibNCzXTaHFjZq/KU6L9iADDGcCefyrKZuRptcqMNUYMj24BrgpA4IvrQaSrhA2MBb3w8v02nL7cvbjvBmdxtAgCPgFPdtmGqksa8EdG+7zcZu+4f+4fINm5iZofu4RU7dIIH0zB3UwvMDBnVkTNYqoGIQtmPM0dlJ7PAGcxEuBOpm3n2DoYRppuAnOSxTwP7uM05bbf39/uv376pkT/sl+cfQ67oRBSE8Pb2dXd3ut1/oOo/PljmfgyWEG63twT5fDweEWMMmaTuM0ysmR5HZt5pv91ut9uNmd/f39ucq7xQQvZaCr0wnZRzzmN/EgVStBuR3MhMn/Opfgg2O3uKTmYeuYbR0C/9J+50H/c5p9BHaIBvg+d+CH2JRhuP568kEzJ3PIbf1NxZ+Pbu9PHYn0NuCEWTiYmiSZeQj7Hdt8Hk87kT+U0GJm1Et7GZzZ+f+1/6D/b2xY9tRhLBNoycxJ7+6+03bLcn5v14Mvx2HOpQEUrPjgwzVjV3I4IIsYDIdZ9cObFQo1B8nQmkdrg7gQTl9yHmcbPMDvUwrc3gicIqRFB11YPKJSyDbQFdC1uslWakU+mEZGMRLpN1mrrO8+Qu2d2ny0zzcKJansb9uqsvzc0HMShq/JJ9MAUekLlbdD5L/NcJIsrCjZFudXcHsXARVXQsDNtDs9VlMqlOOVa1RflO1YpPm62mLOv6RAU6M3W4NY0ZPYhobIIThfWsVwSRzbkahG08D7mtVUBg6JxqVX9b2l4yOmY7a3twGjbmY5yRQKBQXoiqJVoZk/EwRxU0+mpXkMMLeGztDwRAxkhI0vJ2sQgx2/MJZsDboAIRzG1OTg2UhEgVqupqCetOFIEQJzXOSpaYT4KuVXmkQOywG28GmJvQmKpMg8SZhUiCpDyaWxHdKMYJgxOdrrGU7hG1Cwqi0+qTpX8VR3AvirGnahvMZDp3v917O0RkhLhUMyjYGQwJmBAQCEJ+3ACEuxojkvrM6IBPJyYTMLFLObOI/ffhgHcaDgY2AwPbkZG7EealI60NAoTg/iT9VcYT+Ir5M/AB/Xj79st2HENuvssvP//62//48fvf27ddiXge7k+QbJg36M6HwYm+/QFhd6kCph8ff//3fw/+6ce/+Kv3dwc5jg/IG3iQMeROIOSxClpSdQX9ZcYkQOphsQTI6ZmTuvq3JsYGyMQbNvavkK+g336zP4j81Y4vjJ83+SoP+1HesTNuG67XJ3Ttf+zFfzoIWUaSRKFW6CdMR9aLQbM7E1Hh1qZWTAim6gJS/cYgYgIs2pTFWVObwXgHMdgiWwDADK2GIpOYneLMO9Ogys3RY7o7UQBk2nJyM3rPZ6keGRkVVjYcQPbIReghDubsLx1ss5W31O7OCNxpE65mYTuqgm2yZ3iffdQXRSCY0aghUuosSpeZaERzwy4xRABgHkd2Myxs6xiimnmlx4R/+nnsZvPxeNzpLQQlM2/bFhmPbY+u3vQY+vP54WfJB81Z6UyVvhhpUCEJpFo2m5krhJ2ZB7HyUNtbqPhytYxs6RILN5Ylq+4Pc9/39/d3EZFB/ai4bd+fZiYyPLQ3p+g4174yzkJq6Y4anHGzdsupmfFSa9eD6fH04C0a3QJRgRaf78fRQ6oP2zPaKRYpdlYp4tH3zyrgXRczPZ9POA/J/F0zDgt2owSBj9R4zvvLrRj/142VzLNUd7ExUpcuiVILUiS0oEcyn6AytiK5N4AEECAQ/a1YpaAa8RFJgFQe4qBeIR5jRCVRehbH0Ojf4e44z1Lotdu2OaYeu5ne74NAx3y6u6k6AtLIVVXntMr98zK0CFKQVBkjFZEoJPDLFYFcrRW9oK5x1d70HF9YIRV2hSyQxJwIvaerGFfnU1HIGTRYecpFD1uKiPrO9bZ1JFjUqSbLU4ezLCSztKbMnRAJ1QsvizVx/344hZYxF8WeieUvdxIFmr1Vmv75Xa5YWFCgXQtpOgBFwliq1PoV3jdVX7U+qgCi0LSadnovZqpcy+AdHkyVmR+Pjw5khTIdGMu1ItbPWc87FsQOqiau64FqOnsbSySW2SsBNTyR005mnotQfep8iXyKyJzTAL52Y4OzEYygcCZSRJg4m/4KE7GQyZwTpsxsBCEmyk6Ptlwi0g0bzvHDI3bHS+60Q+ecmV7grm4+iWBc+HBTj6FWbs4QGkclUMgqhlQ13G7LGQl3Cs05nUIAcEa4KwUj8i9eCaN+bYnW24RF0rVEeDwefX7ju2HS3DfpwRT10gsNRGgubuukhpfjoAXS1hRu2YJ8A0DskbXdJNQsNwYf7ZaIMrV2GQyoVmRdATJ3cphRycHVAAv6QdU45aOCUZw22imSAFA2GQdVXAspTbQXGcWaorCz2Xuvv+sF5KyjeViYxivDqezWZpW9cfHFdg/lVlaqbfyVyyDsXKRc0mtYD0RYEA3gjuvI+wyug1uH6mYvESB3J3ceA8zkPuLmgu1pWYLlXPcGrPTf3LuW9/JeGU0nF/pf2ez6eW3sSUPxpwYORakWKT+X57xO+ZMygGjt1dtdy+gWAe0QYQjZHAvaqbuqWoSMs4ABXSyLbq8CqIM9An0UQ1YOCxMztR3nrC9VI3EiwyD4xPFII+vjV91V1aH27evjt7/949/97R/+9m//dh4f7jrk/vFtfvny5c/+7M++/PC+vb/jlx0//hmAP/77f//v/t2/JaK//uu//umnv3rotskNxBk6ZoEMyACkiCW22gASGtBZWfg+QA5MN3MEXvNqECrM4QwRYDAYB/wAdn38uj+/8m+YTFVnxJB4CIhh/mKv/dNag/+0F9FrzNTRdoEDhFMvZo0Ic9zmZz/VEqRMldgSIAv+qbQKfS5KRqOzri5i4kxHYsc2pMVJC0f/lKS5ShxakDuaL1HlcrfoiX91n2F3xkQaU8MsOqyd4HzxwNGpaCIyKJm7oarOPqmARGQLi2FmVZ9zSnikqvWAoMBLb4POi8OMFJFffvklShctnFI+Q4Ok2jNDpoyGhbltmy6Z8fk4TxykNi14SaJVVVQuxEIl4e7XxIOp1VE7WtDG0lOBrBBRhKHMTC0bGQOoFihAmoVhnNQzqwwjjYQalburu1R+FFgA0uhjaq6ZA+lP3yMkY+6qp0DSqfXqXIfiaXzKayby0zCgBBpI8RS+ScsSRDiQQl1NVYfLSuhUCs2J2BH2lWfNd4WP0s1Mp0LsfoUF0rNT5Tmw9ciVYn0mqBBRQPWsh6GPT6QqxWGjSmbQBMBA+kQRDoXItj+3+2QZ7mMMM98fGp0IED4VRjj5t02YObuxLcY88UpU5O5UmXjm6wE2XPqhXYSce+WjyshEWT0FXi8OZbHAKRrP1bCTZRARV9PoltmExQxaJO76ihe+tt584ZJ1LfrE+es6jPy6B4hcGmyUZ5OISEpmN3msW8xrH0KisWjAfSct2i0XIgKXehomh4jwiDzkfgVwnVG8vsd82qsE4ESK10IEWZbHUVF9LBZ17NRp9RXNxBP2/Xkuu7u7V1Qz5NDrHnnRaudYLkfAmw9LdVwYpeE01kukoCTlfErxbRiY6YbZbqk0J+xKFcxsRW8rMeR5XAoC+4gl9bub0Ur/IhKFo+tzzO1kwle6AmBLLqvBEnEYdhzHkIBpjdRcUz2YkS0SyznuMNWDKGon1qYOTGBVRXLslXMyldd1Je8QLg5f5+tlsL0Qas+3fQrr3mkDei8HAVeMASJxn0VsF4n83WO7fjh1Z2ZagmP9/H5jfYsAWov98i1cavTLFSf0kz60XJeqSOTsThrm0iXiLACoOqZTZHfywnqU3C1TRj+bEHRmda7zrVN+WeSXRaNz3wmfyA9tgfQbv8cSz/ubi9TP6xrKnx7G53khGBro5U/unlm7PQA/xfy6Zz2p6Hy1bFaSWb+sqctrR/K716FKqR39aTB6KqN6rRp/mWB8xVJ7vrz380FYJ2tmfDtVKQ40I3diYiandA17P5AIQ8iM3JCx9wEmIag7XEFwF0CJpJhvdW52gCWSDB0QDIcx4G4Moki5AoCJYwd2+BP6FfMJMTD5w/7wu58f346Pb/t/+rvff/318e3b/vu///j56+/n3H/+ef7bf/t327b9+Z//+Z//xU9/8Rd/8Zvf/OY49v/4H//j3/7t37693f7Vv/pXHx/0fH79F3/9rwYP2IDZ49svj+cfnrs+jukYYBljjNt2v9/v99vb2xuPgcBTZQbFvHgjbATzI1bEYu0Bw3S448cBG/Qr8AR9QJ/Ht9/b8W2jL+4d/gmYMX5Ji/5nYg3+KTYItADvWtBrfjhAJaNlRHdKhxqBnM/elBy+qgLojlSCOdW/+1Y/lb9+qS9Aymap9fX9L2yKl/DAC9PoG6jNlvIX89J2gpbmunJj1bm6++M5UUvVC4gSYWN9WZzh0P4PZir0l3XFqeq7wigKPex2u2VdRA2Ou+TRzvm8GAAhJZn5lDtEUdgSAifFLZPDbvdtmqsqOJNt1lL+Hmezs8yJIgsHea+/FnJ9rNcYA2RTE+a4KjtB0dIbNsZwC/FM7gElGn73UgRrUpwpOmoUHCR6Fo9Nt7Hpvu8BDHPGR2J3iLT8iPk53OGmk72dr2FeIsBjiARwgAGzipy3nlwbzIviMc2iipo9VecI8IqbKYxP5xznf0bu5IrINGXnQSPi8QCIhD30MREpXdPPjRhy05k1hJQJhJWscjWH4lKgUmjqEF5DB73X8bmYnXgkftp4lrHu6Kt2BGKYuxGNMYYUVTjMqrbN3VUdnwJEIqIGgCJkgVNxyenIYvcu5/90CzWpZ6jB1f0WAYd2owYqRx6Zqs1oWqVzXy554VFKapnkcOlTz8wv4GMAeNHsuaqW+1u29Dn175RPXK7rvC4f5p9qlwOPNjQOrSYykRZuBQT3wsrj7bzobf0uoQsK/PqnVAKWWs1YooSZWSpDynartK6rk9vdp1ty9JIkNV/e92fwnGY1zEyUgf9eQyvKbA7TE/SsTz6/7mamiJJUjwimaZO9XcGH2vJcFyF4dRTFxZP1+UBw8uYnZu7+9etXHtIOQpyGdyaZq6piBhXGIeKISl/W4TTC1z3yFHXhPr+0SjdTRzYXiiBVe5fWffdA7+imtewOYWL2PBvLawMJjCJNwyjfKwmDT9RtPyzMDS3LM2zZPqppvYdqysxYAqqAjY2Dgl4OhF8DU76wbpToCQ9FJ9e4+/1+X7e1bzueHy9E6I7ofLseeYDDJvQq6m7R8HoWrp90xLLpav23j4+UMX+BIlkev2p/ecBBcHRNqfvF29VemHXhsJjKcWr6dfLiB1n4zDq1c++40UQBogihuJ+RNyzlvsgTeqmBj7cInX0q6ZrssC7juZ5L/8bz4miHV0YIzgMDfzVQL9+7bqK726c2G+dolwXpQdqCUhvzPH++GlQ9VFo5Sbu/SyThanbqrK6/uCxIsDCgGuEtX+m/rq/OrW6rH7HepyHq18WPzDVf7MMIMGy+ehki2kNAJHkHHIu7a/XYja4WIMqOHyBzCyxegwlFSSOPOFwAiBSgynsiYBKY4WnAetFQIQQChm9/PI5vzLvbk/3JrNj9288fv/tPP//8+19//7tf/93/8h/nQUPuP//8/Ls/fp1z//nnpNL39/f39/tPP/10v9+n7r/97W9V9X/6n/7Ht7f588+/n3P+8vMO4OMxf/eHP/7x52+PXZ3YMUwB4VBOxhjbJvf7fYzx05/9i5/+/C//5m/+5i//8i/p7QvK5I44OMyE0jbOZcfvCDv2P4AVz19h+vUP/9HnV8I9nc15sAmdTZ5U8s/CGuyrmPl3LndHKUWWaHGR1GIMR5SsRJ3pJ3cMC7ePKU7rZ/2El0wN12rjdB4Hbo9z0fOI9FQB+ZJJt47Z1oSCZUjtMG3psI5hVZJTDg7B9eSWknyWxjRPNrPEnm6lJORHqBeqp67QGklH5FBHVIS2bbvdbjlEpshd90wqsRaNQFq0oVcFsl2p/kNV3ZX5BsCQnf2Y2RfCI1rrlS9aly95UKu8pMUwylcLQGY+HQY6vdphdwRMXft9hYdyjH+/bW9y2wDMafueqaqanDRLMz0bY5K7q9koFJXDtDImAJxJC7aiyhKDQUaATzNVR2YSS/K5edGe21nvfuqjnnU3qRAaXXQX/55wyj+YzTnfPKuVYK5QKZmRBBpNo6dGx20FyY2IyA2WvcdT9xeR49A8irVNIhLdIJrvu3sgdHIV/eeQAuDtSverliDCnHVQr0aCmU3ddU4WBJZ9NRUk6TrgilK6+3EcpLMBPDyaL+1RI1d6aL5XuoosU1XPhdWV1L0qXlbe0TuwIliuqYDNMtbp+Jl7XGocEDpuX+uBl/7Csr+lDvsnJe2a8Pk9a3Bd/F60Hu1nclpeeuoH372n12RdIgAM0s/K01LUf9E86IQJXSM2Vq1BgrnmJ04VCIrexuXBc1c4FC8IY/32jg1a4aGH1ut0kmUv1BriwAIognCm5DVsajRf8cqChlrbtPFF9bNBbe8REd1uN/qUGeHuNAKg5WLzE5G6UQmMXnMiAok7zMMghBgvE7fwx5GfO/WnyAMVwzSL7E1gIfLlu0yFKAt4FAt58UUBNaw0nIytxi+rVzWOoxMyjJs9h2jf96mHZ7Hc2a9yGSpY2nV4ji1eEZplDTUEn5vOF9Oi+I9Efm+7ouJPjXa7CqCwD4vdWZNQU35Tcq3YWRmyzDpSWppaTuBHfM+0aNrjxav7cmBfJpUG20Js5wjHmVBz/kC0OYGi9q+S8wJ5rx798qIQoNQvrX2lNqUAATvXJ9fG9DVUNr2URHZ+77IOF5Sd8A6s6+O1Hy8j7Lesb1znflmx84mGZQpAB+vIPZHvLw8HXjwN69Z85sNE1a+iBhO5A7wNjwNUhmjPl2s8RCf0dvPkz89/4fDfnWYMoz+kUvP6RYth+Z2p5bdAVI4/XLGRXqe8DCBY3HEczerPG8A+p3MgTECRy02UNZ9ORDTgagqDGojHBjLAAOFw4wbKKIWFwIADkyBAhM7V3QmS+txUn1NVx+Pnb19/UdsZ8+PbH4/5QVCoPR6PP/z9119/ef7x94/f/f23j28Kl9/97g/z5ma8H4kj8njuv/3d4/13e7R0Po5ju8n//h9+/rv/9G3fdxHx+TNA87Bvj+djn6Aht/uQ28dzLyYTyoZHq9v3H/7ir//6r//Nv/k3f/M3f/Pnf/7nP/3005//+Z+PH3/Exmm+egi55OlEP2P/OH7+7YZ5fP2FQX/87f+K44NsZ9OICSpoRr2iDPwzMwUJ5OHxBlAd6lNARDyw7kymFpGLT3LMVSMGSOaRd4h0g1GESQNodIzhmkZUEzpdsjkIn0gXXah/6nhC5F3vc07netvLwemJWF1efvD401o1E+JYdYbPdzFzfH1CjSevQeWyHWMcx3GYRr6cbCPDHeViCeUj+mbEz1aScvVleqXNh8BjzMik81JxlsVyAMRgiqpLUlWlaqRYDCxk9HEc6p5ZKDWeMYYVAPcL40g9m8+x5Woik0XNrHq7qbve73fVtQYjpmaRPaUOs/24HTS21vvdwEJGiXlIPEL0U5RQe6hYu6pGTRpXzYyRy5L4e1FZiPzauYGHBOKZVtOOXPMzzzlHGwRC5aoMMdd0Scws5/ofx3HbZIhUmgerHqrqX269p+vwTgGw0CUAt+hHGQHhzKtUVjr1zji3AJyZ55xjjOXcxrkSryZRXj4IIuLqMFZrZUXl2pldWKypXF5PmCYhHsyRB5cBQ79x5V7WPNjM2J14UBrPFlAT8dd4IzMHnn4tDmpU6ZIJwMxVCw90xq5i7YNNZM0I+nBe2Md5Z6V4LH8lcmagqi8/Q4uFhD7B7/rt/poZ34uG19Y3y/6+POTzDVeeZVaJwe4eHQgqh+IkCfPo0aKqZuxXgzDJvme+cMbPPBelSq769/qVjC0vKaOlyqanA4AnmH9MJBTfV+VMZFzVSOsDsm4ZkDnM651VfBRhwE4CTTaCLNtWd2fPelevzorkJ1H1Mnb+dm9i83dJAEjrm4kITFT0+bLUkbhlBjcCmXN6QI/jIBp9j2vr5d/R9uKAtHRZ/0REoEG6dC/UntFJD/0VERFiMFnWAHvB+uR3A9aBTNx9mrJzBEqP4/j69evzcQ8gX51TVVVZ1TXZ6cVvEie01sEIDPdIECEis2mmZiYup1do4UXrEVjPgunhzp69KzxKuoj8OLqfavTLCYuGbmO8HMlaTFxZ31n71xxjPQWr1ffytJV4mp4vpLvELU9zqk6QJRgsfdr5HAEoy7ZK088yNiq8iT778ctpOBFRga6Ynui1Hj6zcsX3SHCywQsf6z/FuXvhY/Hr2LbVddg/uZ5vWdeTOEOO69ubfby8IFaJ/WIQIlc7clLcu3auD6wnvpcvFz7ld3grTssxOV8+RmaNEkWRTa+A409o7r2C7q0hLHO/8LTA03amM/RqqRusoocckU52iralPVLPIrhN72Wc8Bd+HtdR/dkabyb219B9Cy+Il6rqTlyygIREGMwB4JQCmZ2iT7krMCI9nKJLRQFoalYJRoTQOVKx3EBMIPiEA2o4Dns+53HMX37ndrzdhh7z1z/8/tdffzadHx8fv/7y8bvf//Lt6/GH33/9/e8+Ph5mRn/84/P9L39w9+NgM7vfb3PK4/FNdYqI+TTzY+L5/MO+78/nk5kx/3C/v2/3G5McavvxoT9/0yUdZpOs+LFBQvzVoMfHH373d//P2+39/f2v/uqv/m//5n/4l//yX/75X/x0G2O7iYywIdXn4e7yw6/z8e3442/dj29//GXb7t/++J82mmwT5eMO/gimpVvjP78rM9AJEGAibcXXK1MdKLuJUimo7eZw8tO1RuSHZtxrAUg2M1kdcHS6lSFCFqnFi3ZkoHH2hmkGzp484cVxuZ6FFxFjS+TQyxALVwJwsrJ2zvIiu+KT6PXQxwrL4SIKgLgLb/XwLYVlk7Nikqx0x9GKIwcoAJvN4zi2+y1XpARwrCPLaUwCp+W2DiXs2kCy4UrSC7+vuVI1OIogUiwHVb8p9wWfis/nxwJhESRpxFbNrXuiva0O3fKyZRKRiCTAHWeL52DjZoGwHenoBS/mICIRMiPtTNlw6IY4F+YT5TVYNmsU8p27DjOYwaLIirCERryETBBlAreEyKFL1U2LGWTtIDrhLtAAgHYel1ToZfG6o2sqZIGBjtweqlKWCvZGlvkRShvTjN70vbyU+ToJ/FCEexK6V7OhoOYcTPYb5NV4Lu+4hOaUgoeLMNirfKEKzEDOkRiZWVjsZBkB0LvItm2YYcraGMLZUXCCBxI1ZGxVo1Uc0gEwDWYLO391VxOMmaPc1GAxi14Kd+clzzPorc7Ci7+oNOzMVY5gCItIJ06TXxhEX0TUW7De0Pv4IoBfJPf5w5/wlF/43eUhFz2kY+KxEUFV/Xx3dzWIE10G0JTzWVdYP3kZc0+wP2+aCeQI8rPVGJVeYli5HwrvL7zFBqRq+jJTlCEEen1v/+vtlpJLz9m+mbwLU1vrpX5ysqzbrb9SrpAEfVl3ii7BJQfYPWHN0inqbpqqODMHloy7d5IqEUWeVQOYxzq1TgBkG/Sr++/VzkSekdnUDhSU0hI/yAZj+eoodwmxRMzdXJral5vMi0AQEc5aKDd3iGxDtnk8v3177PskEvdAyWKA3djBcCEM4Rv+xBWnTNMs8Wpe6ptwK82r29HMVmzeXopwJ1ubZLUHDfWDelyuudWZ5wsdujsV9FQ7yKRQ+xeBeFLL50O9zs6vjideIjNYDl0bZsGmr+vzHTcTiyCNEBCRE7x6mwCnmvUymha0oEzHemE77k5O3S/s83n3BVIvCCQZ6AladmEOFMrDeopLE8yHM1/+euWl/UHWmyAC2xffhwYC+tUhRQt/iHY++errkVm24Dsbh9xr96s/xj368Cbuji/vOutiXnh1GeS0nOI4mZ+3AK1iLi9dN5Q9M/JP8jt9pqcq+jojAEAG2P0kyPXOOonpmeWqIR8jMyx6ZXMkCWBB4BH4iYGvk45Cro0mGgQKqsoJBo5CEXdgTIQp6A4yeGR5MaZiOmxi6vF47o/Hvu+3/Tl1x5SPj6+//PHXr1+/fnx8/N1/+u3f//3vvn09fv22//Lz/vPPD/hGfOPt7bG7iDAwp/OAun88pzo/n7/e327v7+/7tJ9/98c59/v9vm38w9tvFL5/7PtzPvYjsL5Etvf3u7szbCcSoSGxgNjModOO/YPoD+6//dv//X/9t//zj1/e/8f/4V9/+fL202++fPnh7SYsg5jBzD/95eHzY//ljyA/fv2Z7l/mx9ebiBigNuecZmAiYRdS+D+7vhPfN/zOnNiXT9bMZ09INAM5RW2Eo7XS+K/Jr3pGXLzSWI5MHT1ZX03ly7PZHc3Xr5Rb5MpqsJwy6tS/umG9uQyZClos3uG4YSyO7PXz/noPJt410v41HT7yz8JEUNVp6mrE0Rcu5cd2v5GwulGpKe6ubuujzeasRvAcNoMm+On6bwkdYqFQGdzVTIh8DDGDs5tZ2IfbtlmZOvEiIYaMg47lvc06z/3gqtGMqow9Xa1pXMXn27apaisfvdwi4v4UEZIRWn6bjpHJsnI8Ol/qRA4QS2AJxHOs+FgRq7PDV4UyZwFXP60LjipKD8BLDle9L5WpzbJXfnpSAKG10SJyjVaBIsJMehxhyo7tvm0bcDagb04txFOPU94kZZmdGcQjA1aQKFlkjJZkLZwAiFA4HZnZNHYNCiWiyKOtrbyKq1rlHtt6qNxTLplFF4bTU7LoQGe+Ydzq03VX+UHu93d1+nj+6q7bducCyGVmq4pBDjxoPxsJLhvkIqJa9qcQICyY6qpqZAZyXPBFE6mMQ0RdfBaBJUfkeIFTS2P+lH/ndtupEvbSiQiZO59mdr/lhUh6m5r7vLx3ZVK9tlFCjLJGVv5FBPZAhUnTV9Jz78ycaLyUBSQ9gDZCTtqzky2uJ/rCka+qZP/pvBzr8LCc7nRmJwUTwhhcIwYVp3SCqdV25/rHZUtmWp84Wji1qkLXKVCYDMEMOg02zsig6hpUOKK4tgHsBONRkaX+a07/GikFMgqWnv5gC4iWNd5S5By/EvFZuuPuQBvVDKZImGn2QtckgsiviVyDYAiHTkY5Osolkjn56aQSAO2Wra3KzHNmFuaZls/pQHRmm4c7M91YaIybuexP1UmEQRhuBBf4CEgFIma6idzc1M9z1EEMIsI8ha7n2cx9ObM9V5pvWOl1F1Zeh0UY9bY2b8n12TNnmJ1ZTuKPBUQ5yLLtLZCOzoxCc1MFsPWWrYeoGxyfls+Lq4UIzkwZ35tzMlNqi5Subyw1YCtULwAepw1QbNY9atu6oL08YjEolAtyZTUSZaU1gR5e27ovusu62vHxyX3yNlzuP46m5CYzAEl+XNVo9TWrA3IykjgF2QOqYqp1f/pM0wOQWKkAzrqOGmV6eSuOCoAWsKheycu3lp/b+ZI0uSApEBGExXM6VHFILEk0KNwqEHk0/4kvOtaX9kh2m76oobnpRNrwDR57itrivO3FhsykBktW35zZyYIGKWPppd2N7ZxUvJQJzNbadpdVh/MzW0rUyljC/wQ3M+NcBHaKamNXNwjxkj8ZrJ4RfTxcyT1LQB04DMec+zGPw459fz73j8e+73/4w68///KH4zh+/fj197//nbr9/PPP//P/598CPJUez/nt2zxy/XTCP75+vd/v27apBey5HWps6oSPx3NGwoj72/sPxP67P/x+f2eiSKwlJ5lQM5u2q0+YO3Rjut/vfN/CZH48HnPO2x5QUvZkfn58+/pH+Q//27/7zY9f/sVf/sWf/dkPP/7w/uXL229++HK/3wfvYtO+PiCkz92x6WMf9zdh1sPUDjPQDTwI7Ar9Z2gQAsDSRjIugjjm5/tKIUQ0c/R0EAfskFUg5Kz9jpKyEEnB5VdetF4lAgIShLDklcAxo13NgkXskel9HpxXJJR1zH0AW470bas12Ld5g2LYyfnjk8AROI4jXXh1xdezhE9A6uZmIqSq71/eQ597fjwsIcJ5zhmmRSMr5FYsQJq9joNYxnYfm+Noq1T1ELmJCGDbJiKbiHB0inYTkej9ENVcxzyEJJpoPZ/P+/3+fD7nVfbs+x4Pj0nOeXSXgtvtFk3k7/d752WJyO126+KN+FMkJ6hqZC1yZdBxZdLu++5q7+/v3W+dmVns66/fvnz5crtvke39drs/Hg83jx7KPKKB0gxqjVGp2iDyWqvQ6jr8ZQg5WuTCDGCfh9kJvifRQWFR4t1dzW63N/cZ3bNN3fxwJhZ2nRGcc5tmBJa4/9u3b7dNOCo6UvCnjAlfADNTmND7DkBEwmVuALlv9xtHA1ORt7e3Tcaccx4J/Y9UcVKbCVJT1W/fvplIlD+lIyKNq2rW5DwGlcqCMWTf9znn29ubu0d/7W3bnvtuZts2iGiqARgisWvHMaN7O2DHcZgxEdlh7+/vb29vRHwcx8e3p5ndbjeA9+cx9cCcYwwzRDLJ2ATAnAfLbds2QrVhnJP4du5XKQ2hrgVkyPP5DKN33z+Yec55u9/G2Bz6eDyYadtucMhge6RFrapEXqqbBsm9/fAlaB7szhwYwe28OHRGw4N2RKRnczHbzAwaNb3MSxVT86NX3Wj5t9nEGlVozcAaMmp5WohnGs4gGQPgOaewiMgxZxtILpyKOOCuASE8xphz2jx5egwjWvAFE9j3/TiOb/6t8TkosyBcmM3s/f2diG632x/+cMRCAdi2TY95+/JmH49t26BmHihhU52IBxM7k6qmBSIjuLS5Rwu1SLSemLT4m7Qy1fd9dz+1T+ZEqxtjYxaiqaqBSJbz4ktHgYYsyhDQAqqE6DEAokoUjOn0XuCqHK/rFv/asoNtvq6bC0DhakqW/q8UR2GZdG4YCREi2YQ8d/ZFIKkeYwxUnEGqpSGRMEGPY1d/t/A56r7v3759e7sz06gRkisOPVxlzsk8otgShZmx70e88TkPADIILOw2d7tv9+dj3t+GGY9xH+ONaDuOnWkbcocPs92N4WLKhBmEqqogOzk/WF3cPbBqAnwRkVp/25j5OI4WCkT0fD658IR6SUPuvOxF/KnRR1czTzXQmvPzXFIyVOP1IK14afCZNYOuH45qdxGCGEuTEr/SQKST9A3MLDSwUNS43fKtDW97xSSQUOa7n1ODmrCkhWNugUPRGlE8gABAqwYsDFNgccnH17mbdIWpkWSZUxAJE7XhkYqWo9fWqirljFgEIvp89hR6ACCCdXtAX88UyoyhRWfqP6HSwnNaRAiIQvdM9/A0uojpzCopbimgbh/STxZmGSPaq64voqsCum59zI63De42p1XrHZhr9WnkwjcKEL/someG04APgzCZWzOTYggSCtI6JDMT5vBDdtTaZtbJBMxVZOW07pffAkUavC5YR6caU0w1Zoli4gABAABJREFUWMf/l7o/65IlSdIDsU9EVM3cI+7NrQu9AEMSywF5SA7ngQ/kGx/Ifw/MDDjTQLOW7urOWjKzsnK5S0S4m6mKCB9EVU3dI4uzEOeg207VzQgPd3M1VdmXT4bN08nByYzTUStkbYIokwgkRmgSXB3ouGUtX92Kw00JIDJnZ85mbqZsDHSIexGCA1p2XZcTSFAqmFANwLvvv9teLqZ6fX5ZkjDz04f333/13e+++urHdz8syyKZPzw9fffD99diRHTda6nAuuRMpWLbrgqXRA51cEq8bRdVddfLpbg7C1BjpJ6/XD6EBX6tSkRqJcCumNkIXvVyveZFElOt9bJv5z2fTqeUpJowl2u0uEc5mmsUSLx/9+4PX339yaePX3z+KTM+//Ttv/yX/7Jey0koQx2uV3//8uQVlGnbNlJlZrUNQEqiWv/xzaUHOoz6EDNxqWmMESGKPnn1qCeZXC3vv4bYnSLfE5LYMEn8SLPEP0GfEZALsZNznigvqmk0BOBgW+IpA9Tn0QPwHu1FDwhSLzIaTGEdomKEw4YfuO97tK+PP4W+OOV1BDTvxFcDO7m1JVLwXkhw6nbkrrUZ69PANFWtrtxD18Majl6LkTGYOdzdWTIzM4lB0asrm5flu1ly95yJmYnVXGPIL6gNpIqOqZS4gad7S7Rw734MU6ljPFAYl5jCbFEyi3vp0348jINOGGOLmwMW2sibus1tTq4DSFmopwsA1Lq7q6OB2glx9MB1+6/2EUBBrUyIk54SX5MqutnDoLxmfESVUdji3uGSuc9DMyIyarqYG9pro/rYriQ0BglGgK0VlsZUD2GRZIaqxZ0oJSFmCvuLnQRmPabJInlZIqYgIpRTG8jhTik1i9y5FVvGMlhiKNbhS4Ttfr1e+5G1EM7giiBuZt62fVgGY2eaZRBA8O4poKhZ6jFnjlNiogZaw232JmLkcZCxMHsvzY1JFRSwar1/6TiUhuEhMe93NhUiZeruRK2w27xKQ5yH9bh4nGPOOeda6tHyqlpqrTzpvynKagGhdKykZ6uGcWD9pA+xFht+jFe6R56Yr3iGu89Sjxwf4uw2cj8+Pl7nOabrGG2r5BNcezMOG/LHECZNNpkbHSEx71myuILBfSp48F6gHjYEAOYD9CXEkjDnnNNySmpq2ErsVXsuB8YowlAATUQGnoq3mRbkc1l1mFU3dl7bhLs856v+xtA+5jrhY41dOmLzTR/cdv3N5/X6xZvXJ2+h7V3ffpp8+0Ze1M6rfdYoZVao1QPxKCiul3YzyFudtnmv0Wr4Im2WXatzJ29h0cP29SkKNm1Um3IOsxieYj+VEo9tJyNmdmLhhYjcVSteni/v333Usgmv2/ViVk4PlNKaRKxWkTXJWsoVQCSrA/mLKAEMJyY+ihQQBoTK5PI1aul0OCq6xz7fHfTdz94N6BtPoJfKj8gLgOGOzhcdGUufN4Q7OCpu00fjS+dbEfmolfCoIobO396Aa2LNcVIIp+k4iM43mCDdp5RcEJK0ZoqD671/vC8FkS3j1vbajvb2eaOYeuz/sd23x4FbXqOp+N7NqEVXj/7GtmD3PsbpEHFDqsS7ZgeJ6LbotD8WxVbech/cyUCSA1tsCDoA1Y/6ruNJ+4aP0zm+Ik58ejO6Ptq3LUwUHbWyROBmEd7LyQO/6f7+1H+4u1KSQ7Z3um3O8E/lMyMiT71kifs1vsjMoi+60UM+yoJmrlnXlW7pELe8NubetzMNdHd3EDk3gHwwiBPcW8MKkTNTG5fdJ4QR4SgfIldLcpLFYIZaoUAWmH/443cf372vpZTr9vH9h5zl7Zs3wvzh43652suLfXh+AtNle3m61l25mNXi1aIEjIpqgRoZmbtp/OhTJmovVzERG5QSWVbfiwGotcYw6pyziEDoei1eHSmZOcGuO6lfmJnlTEQiNloLAcCV3Zjgri/b9cf3H13L6bR888cf/8UXbz95WP/s7frpw7LvVR1aiSQzCU34TDf/+ad0RcF/sAYTmTNL9yEbRwmotYwa0IO7R5R3Yph+mZlVFZEhZYLUiRmmmGi19W44BrC2u6ObN0zQHgS/s2fu7brJSJg5xfo13j84ZXzdsHJnJiIium1SiH9bhrCBn3KTsOFdjLCiHDM0LPawRXEi6z3Z7t2bOkRq4CyOlc3KzN3NKlEOmz6UpcPCVGoViIB7JE92DhByYgYJCI4IbPt0KtQ7LlqZDfu4czjQu7ZshogQJWa476oa3gszcwuDtLOXxCD3CJazJ5JaRXX3KXsrQrXWrRgACeDQyN83u9fdlYE6RyLd0DqC0nxOuG36d/cY5hauv0UBSLMhWhKApmioOxn1QRTkIbvDsQnt2Sgv0ZgG4aoxKsDUAfWcUs7UawJFRFLilFSVmNjZo0LYht3Apaj7HoYOYopD764REYBtckg6fVrV3RSmYXe3EilmVG3zP6h7HdynQcZo75HvDQcgpQygmAbRLrJGaUmywytum6kgZpGc89rH1gcyLTGzg60PZYvDtY642PmHmZO6R6XrJHGOy5tpchOJ6XhqTS0RETNJF9h3TDszC1qNk3oPZfX7E8hGdy5NvhP1nkmfyoRiOXNH0J+6/FbAAbjL2k0PjiFEbi2Gm9g2EQlDhKwa0CYZzZ8aIm9e8BHq7qtiEDnCJbszULRPELkTL/PGAkgpLctCanvpOY1ejelMI53R0GU6foMRXBWKlKMKi2iy7Vz59YbMphWNXMStJefugcTZsvevkDDGWcxgG/NNxhvmI0MkNqc3DDWgqnPl3qQejl8N4F6m6ybu7gS3mwP12/v3H1Q1SLGT61g/XLpH4VEfHj36Ik0cuUe5D7WZQ8bMg3OjdDGyYoOinGme+Kpq3Ox4NsPp9PDwF59//bsftm17eeZt2zjhctlenq+XyxaDFZiZhXq/ZfNXRYQpoRXa9cR4l5Odc5tMSCn51K1xrO32KA+GfeWboRnBNxWn7ayn8tT+8cbrr+/Mr5BUbkliSN/BIJMd0F2wkDZRLR+bHjeK+whIe/jkcBfbNxJu4+5jwW0ld4/c5Ri1sttWUDroFb0Xgnv/JKZ03AhD/9STApjHUdDwurV12cmxzG5IjRDAfBN3p5TCKDxKWCPsWFqmehw3op/wlsfb2gjeAAbbu/tWQ5bcCjr8ZmbgEC/3++bzZIgenmCWMOEmueQ99xuD0wYjtdIy8GTs3tDn68u7KYhOovyq0iS2jxs3DujvQwBG1nq+SR+9dTiir/d/3xuK5qHFmEHH3N1wBw8Pn0aUHNQDbe190TBNQMMxAHmPpDaNxMPuTxEeMkU1GENref743bff/+Grr58/Pmkp2+X67t07Zjw8PLjaV3/48PHj04eX67ZtVXXT/Vqj+gRqESRQrb6XvXV9axSLFfSok3sTJIC51yOoxE4ELdFp6QAsAFThgOecSUjd1eCqWr1UY+aUExHFXFYKY8CdYYmJBWSoV73sF6slvZRr/cO3v/3jF5+++Rc/e/vPf/YZsIHosm8Pn7yNesqZQuhPUcl/+as7eG1W1J3FYiMNGNhX4fxRG07fqkQBa0l+ayqscWu3LnhC+mBmkk5+3b4KigQZqOFf9w1j+DEsdIQXR9PuwUoi6JUjMbUBU6xqXPipgKPITd4FXTXM3MpzPYII6F6KIhxCZpaOoDggFnSYbr2ulZlD7pFw/K9NFesFjdOtjy8Y9aV+XDrvbFjG7k7kImRtJrKP0hc0P61F1mdxY2ZDoYaqi00cEFXjGpEqrQ5ouKB9c0UEOa9jWnGXqxZeGBB6s5oZpEWXVa/ePIqSczarpew551CmBIroATkc5mrubdJlmLbN67g9pEYZjWqPEuRWDWhOjpAxCM3aGxIO5SRMYxSeIrwAh7o1GzeUYGCvA4hJXej63NQDLYeYzd1dk5tMlcDqpqbetX0QSUg6ERHOwlnIwdQzlh5iOnKPcYJDVQyTNUIRg3y7EQB3Y87cC0hKKeu6jnBIcE4SjJ/HobsRQRITNafdzTQlzZQSZ+ZUSzVzIOZ3HxAj7t7jyE7cnMN2LmARuWzqXmqty5IHSzJzqLaUxwiyGs+i1uc4dYcnPEQ/xgnY4Hxmjsa52G4C+RiHOpG9u+G2kGkWFkFCdutf+W0P8etrmFzz3YjIcES+53/5VYS702Evvo/9vV0YejR6xCYw7/C0/sEXg7yHgTiKjcNpjDDEKCEbbyul1Fyh5vUYYulVSynX63UsBkEe8EjCkid3BPB0Vy8GtSZA5/oKcyOTaV5cJ4cbx4mI+FaTjqfuUpHcR5/ArdtAdrczd2Jt3qJxw/mv4+dq2lGW6OazByEF9jqku4gAWIhzIqKW/jaLnqsgW+5lCxPljIf03hUKgDsuT+uxVLeUUsiY/nZnhwHU581HUeJosI2vph5wdKYG6B+2VPKc87LEPK68pjd/9Vd/eb1u5wdKKRXdZg4ajx9BQ3eP2UZEHEHTeI+IEAtra4ob5uk47ilvPYdsj1LASaNz7+pCtH1FOmfQxkzn1JTusc5xf+pW73CTxoHKhPuFiYtFuNuaXUdA3SnnbAo/2kskbjia48e3ExGIhHpJHrqjGF6ZtKBnt6y6x+j+p2Eub+iWrNkYYwd6ajmejWa+jo8PYTU98iG7+o61Pw36nGPt3pVamgJPmDz8LHJ7/7ZuC4ARv9lqdzcGY0IZ7Q3Sw3cdEKPtpJgRExCmPTEzGe7xa3jqfn8focNWBcogIu0F9IOfR3/6VIIxQl2zxPAemBiBh8HR2svhZuE/nn3WKUQknHSAnE1u/hHubEkFoh4cvWOfcY2SUe5wMsNznpmlbbgZchInM4toa7MezAStNM4IxOQKEoc7tbniTYgg2rnNwQ6v9XJlF1j54bsff/vl7/7ul3/77ocft5eLqlrVy+US5s3Ly0uWL1R1r2UrxQlGrEbF4O7azf1SSnS+hOQC0JbUqgS7A+/m3tHp2LkbRehZmQhWqlUzOz+cuSEKuTvVHuipVYnI9Sgri539uF0WSSwAorffxay8v8omLxfT3dzptGBZpVjdql7LzlqqqreoJcPFIY6OHfyP7hpthN1l6mJhXARRbAAMzjDACOZuDHdXGoEKO5xwP/p7O8nF/SV1Kdf5YnQUd80wyQ2K3rGby8mtg1QFJ06GRDeebww//GkDIPrsOiEdDfDQI24YkqfpcWqFY7PEAJDULcaqHg1dudel3CYuQpZZLeMuY1lhnLXX1RzHsNSipRd/vrJFOlerFtVW9OJEJBj5gWHJtZWEDup9FJFeGyJj/Cn+ZeYYOxEWZLTjqypzJghB4HA7+jf66Cr3Ca9FtURVjLnWuhMyyEYg3MyAPNA+iNxU3QzkDWwexiBhjgxJIOspM3mk5ClLmINe3EeIC4A1VIbYBO01913Htn7/xgmNaJoVIRhzVAhjyAH1kzKDESgd2KQjQhYCWuG1eymqWqh5p20n46w5uqSVssADlIt7QaUA2MreiaS3FYESsUgrmnJ3bXhFQVqZ+oitrsUPUcjMPBXsJSIztGGrU0HXyBKYgRnCmcG77/tWmXnJnhMTEYyirauHVRr2pLXe3G6KtJjQQavM7N4SvbHqBnBMpGqRZKDeUBd06OVgtvHvzNRDX3Z2wMT2Ns5oMmXU/chumR2KeTbQZyY/JM5Pqd5GQ9Ms9bvX56WOu8322WCWWLP3mBv16/YON/BfZD6sq2H5uXv4hPPhEt34wD61MoZDOGiAp2p7mTKKMjk2sVkKiuTVGAaR2mqlVca14SG9po6nGHnrFUzR3TFsdO91FvNjjq0LmF6A3NWONkhrmYRuQxARKEZpz0d/rxLGEU/7f3z14W0RCOO8uusyxTD76x6vs7dAHrxVxYeOcHc6SpdlCA2goWc1aunePhFZHcR5E2hgSsB+VGo1xueAOkeD29JazVyKaimFKU65PUV1Y8eaxaoBBjKQV90/Pj1/KM/n83o6ndazMcNqFaFlTcywTgBDtR07adY6uBANAsRM0JtizkF7tdbXI2FmypxPynuqJI57tAMxs7cBNjdsYmZEEt7pYOcgqnDwBi+Mj4zG+JneboImcYpHDOL4NBGj13TYcFFuCRgYPtetjWWT9SPNCUOz3alZ2sNLjJe6wTR2Dw61VmGEkVH3QFduYe8ICaOJ4jkK3G/SwjRju8bTDSlEdzs2bgVMh+fu7iMTeP/H2IW7GE/4TnDxm/cjmoqDJNBkWmyFd3NrruFs59XWfXN/6pbo2Lo4xaia4Z4rG+Zg6/JNTGEDDTAM6w6/H2FQdMPXJ6us75XPAQ7uJaMzU4wVMnOdSnAxOeR222Ig0TtFVOxo55n1RQANzPtvZuROXcOOXWv/diKkTkLebB3zhn/LBCOR1hhS1YgAZ2diB3EL++/g5FRxvV6//v03v/hPf/Pll7/5+vffkLZCswhPl1Iul8vlcokFBB4Vp0xJTFkrbRPoWilVd3VzEmdZImEAILp0AqWrlNKBnCjQ8p2jvrVzOjkFkkE4HhoVREgpe2ANOtzYYSDSw9FuG1h2q1QkERGZqQgxG67bY/4cmy1P29vn7fN8WpcTjBT+vF0X3zziZZSEEkPaqMYQ+/9I3cK4XmHMjGT+AJWBA25uBK9uiQnoodvpyYYj9xNPGxVl3SmP7U64wVag0QUzjYzHYZIdHOfTSGrpKOLeLZ9hYOCVAWADAfEW06F9ew+sja8eVuLIzRyyDeiTpsysVTBifKY/rFbTABz3HlUd7qZPoBphBsy4BWPTiEhYbALDGf5b57G97Tt7SjlqFM0sEZgzegoIPcwZQiol27Zt1gpx55xzvE7TDlJ3mmdRZWYESZJiVGC/iRCRSBbJwHM7fDNVFWkBHhGJPs6mV9hjIrHuewtuUSIy5rwI5Zy17tY7rIQoSl9nqzocI3O3vqvNiPJbMd061cmjdrSVEKCZUdMmxL+OPXaHW0HXzfFTKCd36ph+nFpklHNiB01BaHfXUBvcaLSabtsWtmOQ47ZtgfQjOR3cOZ21u5rVqPH3bvHYLYjieDH1/Ukp1bJHHXbOuaoyE8nRhsRTfVHfhKbp+aj8jK1mN1hVJ5Z2hxvb4k9dRAQPmIEGEjh/xKYwattJaQ/fWdoHQ6nRoHwzi1HaTh7lDO5Tr/NtFq5n1yeZYm2M260WbwHpmfLnn19fsyiZP3LnEP7k2+Ytsr68xm50tMbJ7TfPq+LuCLXFT5UYd/7tCBvjCGArurkcZeHUCZ2ZE7ciCDJ0oZGWZaEXoj46Lcb0NVkRcfe2nx7t4GbmNQqVhykW9Ud3vm7I2fs07MxxU8ySRyCp/7GHe4DwBsexzobRHdX95H7e6AwCpgxS+yAdFDI9BTXHoL8ygnCxdcICPVyR8S5M+I2gYwHuHoItYnPBsEH5M+cysZkNPDQGIca5TkM4MMKcnHtEKeKylcUcZS9FdQP0cnl+/8Mz2TnnTCKl7LVegUzkrTOwu4LU0zXMUdneCtI8yoeIHExHh/nN/psZTS7ucQoe6bIjBhe+pvcYB4J4HIyo7h6n3G5uHSYNzvEV43U+xMoRJblb2CCD+HXf9dhnPoSJmc324h0V4TYQEw5MkM3Qnu11myLfQb/ewKwjrNpmAPEE4/kqokF9+uJ0qxbx0v6GmWaIMMyPvuYbq2P+Ex2Fvodvfxzc7UcGSczPCIBGVnZsON34zH0nD28uVhtzAr2HkAZVUHfeZtHHwxkGZpCLvnVtWw45H72s8dfoiS0HlEN78KkJnCItOW7YzXq/CWHc9CPdyflZFLf3RwTBu9vVv9cPbKAjP0ndVGXmljJtIYbYsJEvb68c+0lExDS1QrRNGFZyKdZNI7K4i3W6GMHJgduUegCuBUpABNVSiu22nmS/lq9++9V/+G//+//x//3Xl5ctUcAlLDmvYTJWV86nh3y6fryq6l62Uk1xBbGZlXpIGJBZbWVVTGI9Kusek+1NtRE7RAhCElQmkZcT8lKr1ureqreSSErp5flFJC2SclqQUvFi1c1AKYYLsQOBaBITFkmSqpKLMDlgJMRkZi8KdZPny9uPWc70Rh4psYlbQBPCYzFMC7mQp6Gg/hGnCoEeQJlgGDjIopfUxQ8tWDWCoePD40fJuYksa8mi4IFwiDBOeYgLFyYzdNC1yGzDaiXgaD1oHOSuWmdvCyMbPEC/JkgtRIL9FmlphPKHQzhrTJk6xseoqvbtdIREB081EK3aq1i4eykc49Wm0RaR3aYpARJvjhyFqvZBfOJT6QWNmL3fuOwpJbUynl9EeluXDeNetaSUW9pLhLnpB9yKe3QLUif40yHlhwMdAH2CB3dHbxgbu6PViYjlxmNEzwB4dXclDj3k7p4XuV6NiCRRraWpcCBRMyWmFSIRhzxzdiHWSECbRxjLJ70CgKZsXtuQHk9qkeMWLLyR12Oy9p3stiP5YRTDVSeDfnpntPezuxetLV6bAlDHpbcUMnN1g9leS4QGLvslbhhgpJGJrbU+SEC2gOhm4jyiPpvi6TA/fvCATNBJ0m5iETIJPkkpYZrKFSQhaaDxSkpL54rgblmWkzc9Sr0kNhk89EFsVNyQBXZMh4stbQ71fWqrX+jaPToS3VWEe6O/9Xqam8+iu8fjDuOek3xxZmHmn+z9o6Nfl6hHQ/kwLsmiO+pWfd5R2nzp1Ks2L2kmqkMN37ocry4TpGik5duAAm5F782q9Bi2Nh7kJ9vYh0gZr4RfPUQB9bD0cA7Drwl8KevIruYULc7N9uqGKXUrMMIUjKDDZqZ4x0WIlYzCirFF4VPMyxuP7z3y0jbTG7juIKWp+fTGSB33H5t/dxA0+Vf38mQ60yZSCMNMnGVgFyyIGfX9DRwBi7bVOBzL+Ysittbltva7mRo4EnOtsQJd/Fo8IzMnSabWnY0e32EhiERrW+tOuElKEzFgWksShlutV5CdH1a3y49WvW5mdTklYs2LwInFU6KKtkgzo2MQIgHHTG0iImJ3U1Xu4SpmHs/VyG/qwRgLG/s8s7bf2sTUYWNrrZJy3//233GHcODuzn0o1jummF2dQQZER6lSP+FmKw/6HGQwHhOd9LvpdKgMdtg8Of1uGeFxoyu10GOKAXRMROp2t1extsTSmgm7x9jML7qBdemC+oC1w61cmm87ToGnjJZPe914bv618eErkTt6qifGdIw6WWLpYwytvdTWcaNKoG6tcySyP/G8NnUc+eSweb/V2N6JitrrSz6SfqoGhzXhcxhgY8Gvhh+GR0Y9LjzvHiaxMEuJedvZu0vXH3Am+1vSuNEj44BCcaNL9fEtw5Y7DqV/5E6ntNPv9k+tdcwOAhOz3LiqJOFatzLIEbwzs6iV3P3pw/d//3f/8Kuf/+Lnf/PzP377/cP6gCWVrZrS5WXfaimlbGUPgyQBtRZVLaXualrNQI4enm6Ykw5nAQlzNbH2gKWFvwkApbQGFhpRoFu5G5mDvUKraQGiygMilIgTSWtddTiYvBd5mRKByAjkLUXjYHKj7Vp0wbIsqs5u4MWc1IlAH7ftm++/r/50epDHNwslySuLihnICGC4AJmQpiP9R+UT3np0wI0unS4e3BCDTlpxBLvVztOIviwA7i5D370SL8d3zc041qGKIkYTOfkm8EdRYbsiST5uMoBnzCylNNrlBtfQzcS+9s6ZN2f1gRZgOgzC2SOz3tEzPwURpTC1460NeaWP/vTuqjFzlNeqauDtxk0DXxho9nri/kEMYaiDb7syOwS3mVXsqmt3gqtqG8EXn621RgDMzHLOqq59ONtQjcuyePeSo/8wnmVdV3d36CjGjdueTqcJRpkBjjbO2BHhlCQN1zncjyGnYn/ci7chdRWUqRuFJBzvD1dTQvVoK0dkZvcIXeHuETApjCF8+TZmwL0p3OkmPDD0XEy5uHvd3c3Vm509IDRlpqT+7a1qaN+31kfemg1eRSYctVanlrPNYOntW2Op3Gv2VLUBfnUbPacbdopwQMzzcHciySKSWo9sSunyssXuhUdaax3ttqkRugLgbtNwDs5cVIv1uk1JEitpe8tJREjHsx8KiZm0jkTlzVKHvuEpOz8+uCyy77Ubmq0mthTLrTC9sxy7iDjR8UqfZkZMIqJegSMxOzP/aL7tS+XhF/FNyWv7SIfImVzE/ymHcJY+6DYEJjV/3PwV+mVf600Q986Iv7u4A4hOdHgf6Bk/DCrCJIKZmQhJZCAJD2OoOT/BZKUWNSfetm2v2jxDsDuY+8zH4JcolWSmtl3Wp+cNYL2bPRxRt+mkGmuPTbgzho4fAOZjM4mmzvVbHh8i6I4ahw7wySd5fd0JYZ36k2eNcqNp+iBBAN6lMYBF1kP3tHK3w/e+pdjWzk1EpMqAKquq8eiUcABMMRC1rTSUEciIpa+q+UvWsawd2vNFJk4iAie14m7rmq2ukhjE+1au16KqTlZqLWWzVkI5DmgItyqSg+WJiEWIPAJDtwzoQ6HGtsyapRE83QD3U/dAYlqMT8DIQYR0I0mOQE/E14jvz3Qor5m55rPj2+Zb5jzej46I6dGjGw/Ltx4+JViAjgcQKzWjWVKUUgz7P3aF+Zb2ete0iOC2RLM9owN0VE6OnY3Oe/TE19jAmC2EXtrTKC2J9olTnfiPm/U9aQtt5ooIbksu25unSoqZTw8Lq+9el2g34ebjGu+cmvRAtO8FE9e7tyT0kNXu7vUINBwxsDtvEMCk7g/Un31v0oObR9p+7nEZ6UOkQyRmEkyHheHqH2RzlLqgS+CRj52pTobIn2IifOQr7oXVzEfoki1Lmnd4bOQY3xK/tliMmdNRGn3cNngWbmbmqtXRAotTvI/6I4Tk6aG4NmnQPApBddPffvmbf/fv/t2vfv7Ll6fLKZ1yzpfna85LuV6v1716VD/xVlSvO/armbXR5u4aWPRpLUXdq7mREwUCIgFGknPIMW+wSmEBtSqzOClVO7qWUimltIZtMlMrgLs/PDxYVXcyg6uagR3CiWgnanlm6W4wnMPu5d6LaECUzqZlZSHd8eOH92r02c8el/Nnkvl0OlHRUraqox+cGXJH9/+YfMK7675qFEMFtwXThCtzBEN9UqwArDuHNEg0ZqHHKKl424w3o2FwOKjBkLqZq8m6QotNUBfMLMSQBoJFvSRea51r7nyq0L6rSvOpA7DN+Zysi/Egs2oGELP0Sik+mUnjwdP18sHdhV3d1E2dDLV6lUWuupuQ5JWIrM+BOMFLKQwkgsAzOydh0iRu2ACOkiA3t7Ib6SILAHMFLEq7g71LKQE2sO+VSETWdeV931OtGUwGVqLKtrlF94syk3Bu+VPznSmlbC/X6u4BdgxhZ6IkKSG6eFXVCcv5RIzqdV3WfXsutaSUwOzukkEitRS14mB2aAdfERF2rKeslrWFVK2Ufd/3WjdmyVlq3a7bc8rJYWr7elrLjsS+73WvdVmSCawaczKre6nMUtVNGcJVsaTFtAk4Ya5mpewx4lzkXGs1tEElpaoZE695FMvBiT3G5jiTuwada1MkCEql4tQhAywQnwXErNAKzUtyF1d1cmPf7JpsNSdXlLJf95ecw1vbzYyJuvWW3B0kxKZilJBzCotKVVFJUjIzEFX1su9oGI8nkFhtoQQwHFZBxgnLguslyaJ1Ny3Z0ykJE+/Xi7Msy6otIqCn00lVyTkt2dVAklNyd91UVRPE1ZjImc1IRKKO18gDALWouxPYwA4YsXvM9alO5onEi1C1c8IVClMRB7naDlhaF7pIWh7ZTbBIBWpdqqLuWmtRA5EiVa8LJyISSg8LFSJNmmVJcFFkSysJWS2oLEVpVzZTcqdTfvi4PVtuWkLVRYSRS9lF1goiTioSsB7ivjo9El1TSDUnMhLRDizAEiUNBLi5iTcBsVfrvhW3wjYiCpT4JmLCuoroWcfROlT1gLI4StdGEtWdoJRlgbkkMa3lcnl8fERKum1MDfGIiFJaaq3Fnc4LGDtMmVQEVotWJ+RwqlVZaFnytgNF88K11jdv3qSUtu1aa80p0QFxDHe1sp/y8ryXh/Xk7m6oZHk91W2/7rWYV3MSprzA4G22W8rcZtUwLGBFQ2MwkVvx6oEAFlI14njCnthdybsD3zwWs25COTOJsMPVAsxWdmvpl5YcbK3FGLG8jnhAbu5wXhczKxo2DEs3a6KHl4g4jp6b41SVmZklDa3mALxZw84A3UyTIrMGLk1ERE16wJEYEiqLSJHaGBY4lIXRKhRuElMRnovCfmGJIaVy+kgwplPC6oVNhbOjXtyva8rkJ2G+bIXYKy4pu+O8rI/VnClft5Ly2R2RAgwlmlJKKaJXyJzAUhyLLInOD/kkhvry8jbjsj+rbivnN2c8X3ardFrfEpaqL2ovjp3II0JkZoARVe2Pw958NhGpRUmYp1o+c99qaUdM6P4Pc9RfMMDiZlXVyhGkMAenTIA2W9nNUR2JTfrgGXMXFhZmpuSRokTnl4aC5n7j3gf/xhincFP3fZ8zmTlnD0ryI1pHEHcnhvdqiIPHtVBKJOJlq1oIrYitTciNsGkSp9bVb7R6ZCE6cACPcnqCsCA8MdWIlDOvoaWO7CITiEEC85EePGBmSlm4Tc/zUtQNzFQj9gwnZhGH9+AjhYNhZu6HYdQDrzcFC118xVI8oLzbK+YsEm0i1msNmJiIXY8YSsS+m3fXa0FGZCwkZ1puHc7mVBNgNkY8BpQRXKHc40itIdMPNDiA0NwnYEArEyRld68xdYkEhFo1pRwVK4mEUhIzjjhFvTYPP34NudOAwYFwLsw4KgVFdoe5b3vjPoDUwCK11otVGFqVfmIIUW49VKrKfvQtN/JTZ0nS51gYHEIdI5rQB1IC0coxMoet6yJ+zd3wBTB0k8NrLSOeIkSBYOfmVpyZkYSSwFFVkzAy61YE4lpiRkd9er5+fBLJ33/3/Nf/4b//u1/8vJT69tM3Br5q0YWc3Qi+Mleqtda9oKrXuldxTm4wuIEoiRGuWiwqjWw38pwCPtK2UphKkAGRSkJMCKuszHA1dl5SInKrGvjS+6bn09mZL5fLvheRDJFaSGvJpzXySNUrFjhot7K6SJ9undZk5vu+AQDJ+SGb2fX6EsGImH2yvDxR8r0qnT6pcv7Vr573p9NfvF1qyu62J/tQr198kT/s7z+nSuiQndPVSBj/xa4O0ThHedyho140whxoKVQ2d5Czk8bItYgMgGHk3tT0+CChqmp0wri3FpIoDI6AdHxbjP0UkEPNNfxkVfVaiVyIbb92XQNJLUBTVXtFPXVBYnBzQtFKTNz6Uo4wN/VuQ7wKMc9t5NzGnqsLdm3zAiiJqW61bLWMW5m1GKWZlVoTT9COhqMiZb4wtJqNeab3zjRRW/38CrqHOoJhcxwZPUEUf4oIjVD7iPQBg7gFDqEpPWJmARUTWSZM3Y+xZukYPkOBjhKv8WhxjfwD3V7bvpU9UogHxolIih9GMHiUFI6wJY9Y2quYYkiEgDdYemq4kfKrcrj5T2MT7l/8qexB24F0V2/WkKzODycRcSNzRQiIXUspbJpzlkSURFx6GUMKTx7WTipG0Koqp9hSpNTFOSXJtO+7SFpXSimpupnFpMHzuozzJWmEuO97jFstpdSyQVUSLUncnQVETgdMBdwJyUZGa96KeHCbpIP3lIi1DpykHnO0a0/SgKLgpyX7iZkTpZQSmzfHaKrPNDPvIy4OQdOCSebesk+t5dW0AoskZmZkr0VVRx/aQYR02FIDVvt1Qo87untvO+mhpiOm7F0638TmaYS1gMGk4/4zP2KSMgGqMlhvvHPm7vnfHjmOgs/OqjQxPN0zwh2tMocRIimliMxZh42JLzUzpiiFPzLnr+8zzt3MYo5cw3kzjJrzWA4RdUHfIvdyw1zxr4xY4yQ3GCAhVtSIEEag0Qge4/gkqjK8wULC3Z37KQAYMeD4zWzkndqxRpJ8N/NDC8uUyxxiNraOYs9NGR33JUQ6jEdtpN1u/1AJQ0zNl5nVUIEBcXwrmsahUO+Gcj+qEOOdIkJOblGDoehNv6qdtjsmLXDDsD5mPELdG44FupzvDiEJqO5GgLOztIqVMUwcgKru7aLr9bpt28ObPssrwmpNHZC1AQRTi6MhUODuqJcmH4OmQOy4ZnYYOsunaO4QXPxT4BzDA7nTUK+JfF7PULI2pdDlgMDF7SKta7pXCXxvff8Ac2rrVC3BLQCIONKE0QYZ5Y7+SjFxzojJVaPlgRkiXvZBnH2XhFigZS4WiKK/EEMD3ZSYo/kCIvDaqgtubYO79Ox8dkQ3smKcxdjn+XVMPDjv+Sz6xp1H0mw+qZEQxlTBG6LGveWCmiyaWK9bnLPLeqTX2p+8+1g9i0tTgQZ6CvFoSehIShhxqE4ErbVdp0pUgIjMnZvTK9SxA8bm2ISQ0Rt625lWO2pKebphNOyEaJqspmNC/eyr/+S/8UPtSfv5rxilzmj5EOsvEuchW9DAF5xUZT3Z9copgURfXi6Xy77Xpw8//vV/+vXvfvcVgD/7sz9jlh/fP5ddU1pK9AVqA5OoDV/PoqzbpE3OcRgZhSFNgKTERHBTAhETp17jAICcLCKJYXcYmZmp+yKJhIkE5hyBIEfjVSKYO6kp8V7mJFI8b0CP7loBWolihx0MVKID4GMgw1/qJZ8EraexAnYt5bJtzy8mVJTNnVJKeVla9eM/xtn0/8suop+uJj3kwPSrjLx9BFwjXNLFKXVuijsYgVNKzH7jubm7z5Ud5t1YVRtj53BrGkXl42A3vJYDN2H6G70w/uUOxolb42389e6riShF4sV6eVWtu+oye01mFViY2UNF2dHhjUnHvN7WULB21PEHz7e3hf8mIjnnGBSObtGG7S4dLzS2dbbyhziYz3h4iQB0BppvotleP3z8Ovur8/3j9XoNuEI07EN3TIYyMwun6G3TYvteewPgEY6Nruv5nsxhxx21rN7HCwyx2KqVZiJ+XSnRL3bYqEzDHK4Ztq8DrSklDiJKW/d9N6+JmKhVZLG5iCQsRF6tll23Qokx7Q8H9LyZNQwIcFSURXa97VtHGxPJRDbKkgNO1swkp9PplLNE9amImM2jLHsJaC8xJTo0sQNWlI9OM+boCTUnmSpnmqGM8UrgLo/7iAjQHUtvypiZpFUoVXOnDr/RL3OYeXU0Ku3WgBPFuIdjlrd1VcrMAeg+VO88lHxICmYOsOoRViAieRWTm49+Zr2fvI5Na4nom1nwg3de321EGcby7lZ7xzL+qttk/Onus5jcifFDe2f3yaOL9e5W7k5Mwc7MLFMQJ/46AEu9m+x5zaEFQy8SURJZAs9j6godcowoobtkAHXwd45So/6M0ycgbWbdAQRkRKQNSMICoqJBCrnNQ9emH4yZHa2CoklwCeNbAQeIHUQ6xr23dB2ZkwcaSvSMGMjdyMPhIUxlMH28+I0kOXjk1l3XKPIEyMEOoSaQh1Jxp2DYIdlmsTztJ5mZqxodBX53wQ7v9YfjV1U70ndODo26CVU1r+4ZAUzJ7s7WMdxyzsuyRMriiHI2bdLcxVovY8oucxKJIYdQVRAxCYFjpM/w2Gf6nBf5mr/GUXYpf3xwbMhQTDwFfQebzPw1PjLzUVT0zYEw69VEmK7xFarae8ToUHPHMtCE0/RQIYKYGQHfbyXIRNA+4K1EisIhmBd5rLOaR46oNyW6xkTyn/JpzcLXOTCVejAGpof8iw1o/hINryb+2PZcRyfkTfX7fHa4FXR3b5hZA4OkewDmUDrTTYa/19bmbRigTXX1xwJeOZ+jEREN3+h+nQf5tf/N/eyOicaO75qhcW/aetFWeBvIiCq12bON//Rk7414jzvzYXMdsBYdut+JekyRaNzHTMOIGtRo06yvcedDBvaPH9wxbc68t8zHKGwQwc3dWQScxqzwdk7mDiWJ/j1CKS9Pz4ssyvWbb7758svfvjxfc1oBen66PD09uRFYrtvWvNmqtVYtpZo6XJgAIycIQV0dZurEA/0VHhkpByCcyXWiLvHuoUU6h0EaU56D6AiEQ2AOYWtuHrh4A6OmfZcbk5nte4nEDjOregsaMDFxSg1uI267lSskIRmq08XF6+WlfHi6Pqx6PgGJ1BA2OphQy086hP8F04P//1xEc4H2oY594ouWp4+KKj8glKa73N4wfuBmMbNr2PqH90UHvgAR68Tg8UOrv+rwGYNPZwk2y/+Zke9enBl2rHDmmqFlhhRNYyi8qrru6Pgx7Qadve9ACzu0SX+XmaqSgCKwOvI5kB5596G2MTmv0w27YzbFMbv8utmy+ZnNLOZvDP06Xh96d37/LIlmKdbYbAL5GZ9lTsyJ2agVa4UtS6VUswYNIpyYtHrZ933JZ7vNAY0jFDmGtA6kkBY56KzOE9S+dw156IM5Quk35y0g7Rt1BECYWl1ZS8gxc44hDvu+B40DMMECoOXKxUDV1B1qHqDIW5diABIFbGMjAKakTrXaQOwQSS2w6uxtKhE5mCQz+77VolVVpVrQn1lW8+CZtGRmpBRBR1NVeDVvTdkRogqdQ+RHLB/MTKJwP0Cux0ZZH649jqNzgplZGNoTF3mPXfQGd7NWH0VG5iLNAQCQGEhcKwmDzIWFiVNKeUkMTYnVRW5Nh8GKS15yVREqNSi2opPfZOIc0KxDTgGgjk/92sqJEoj4wyC+EHyN+aFoRkU3cQJH5BbUmEF6hJGaVR03a8UEtyZsXzlFDq41oLp5H4YeXDZatOOM1IytDTJtp2Pk7GYmnAYmzUz8OSciuCP6lQenj+D7bNa4u4i46UiiUjOUf8KLZuoRmpbzaJ8AImsStEGBTe3u5lBzl2Q4jC1vOCBkZgYCOxjM7NYN96OxYRhAsSERQAlsgKBhV3WKql+0HnifWBsIg6D1OsZIXefUE58t83AYUk1zWD9uIooZ6M0NaJ5zfIc1WHNqlh1mITk2c5xmr2k8+pNDiXg6tnpmQDMDbpQcjaiKO/lkzrphAj0KyzUcQodmPgdBem/PC5nQ/zUiSiktSwp02ecLStESrYXkIjFRlz15MR062D3oRMZgYnTZe7fm4SGMpxv66zWNTRTYLe87vrsNTY47j++Nxz/obeozn3Xr+BNRmw4yvnRemlkEEqfnGhGLVg0FQKLPhaKwEyDrvfAtYIERd2j3j+HMISU8/F54RBZShxvtdO+qMZuqPezsjzjUnXSE5M1iHLk70o1cJYr5URhp6mGpTdEHGo85n+PN49MBLjUocCgIAoINQTdpt/meTS12GyN6zmcS6o7lTzVYThbqcfoM6tIDMyrMrSHYjJn+c3Rq8DQPs9lIATLU7Z+mIvmmMbvdftolJrJbz5mIEshfRaiH4TvvBnVQuiZeqSMPE9GkEcaD3Dz+5Igen4o7939HHI2I2ISYTJ0o+gMr4GCAHN7qtRkJ2y4i2OvT+w+6a3rMzx9ffvvl73784T2zpIR3P3549/HJFJKX56cXVQ+hHu1OFvFu5oW5etA3jFxrVUNEmRt1gxhO3GrKklcABg1BgzaR0AHIaFZv7MXUSl8JsERMqSNBqBOLmdUJViB2yYTdvQQOAgoJA0iUCIToFqGUc3YmM6tuC0t1Q60GVEUGf7xs33//8ZPHz9c1E6rB4ckIjYGn65+oH4gWve0uU7wSJtFNCY/fGF1hyg5kcoO1JpnbmHWtTZ4wY4x/A4WfAgB0iG5imNb543HYM7/glpVqvYERHtxxV2Y4dNDkfB6qCh374CCb7i6loUolETgfKDLRy4jSFEjsktqYa9FvJ0A1Q61VKFHzSVpTDSaZMr4VAPdZLmgeFwd1iQh5JFvpRgpk9novj4wQ6iGiHQMGM75xWJnjdNsZULTaxjiaHgswGkZAjCgkkBuMwiLPEshdTmggvCBYHwGPJmxJ6KgjVTMrpsttaLnthnDoPYdWC1wpmjdKVQca22vbYsjxcfZN5TsG1mj7UvPwr8JOkv41bl6rMrciRNU9xikDbE7bvleVJDF4IwKE7t7S5WquYasauXv2GovR2rQjyIhTK5Ib2QBvZ1FLK6cMCBam1DQTnKK8bDwsE0lUK3WFza081cwCgZMm9RNVu0HoZiNT1/Cv40XV4k6gVohYyh4tKG1QjLkQRyqIiNhRA2wndKqg+RIcYyadyHsQNEpEmBIRUWahlESklm1NC4c75Ict6LhBLJ2zZNyqGr0/V5TLkqkS3CzHwwqxztp7JvIbc+dQyd5Tc9xLfdwPCIS5X/mnKyoGUd1SoE9XrB+AqxE164Z7+Xc8e6QyVEeIgeH3URszSymL8BzYHjwOdJDynuBAWJjmAXQwGw2qapG+rmoMczf3UuodV8X+cDfIep5/SPtRciwkUShhTjB4RHnNLeDLDNUdBKNmEHJP3XQn3wMv7sj7hgpQNSGP9Daj1aKTO5G27h5oMFHreSCOjn60zWmxb5FEh1EVfxpndFMq3wjMmvkx7hxXq8AHpaBZNIHsN8iNwDQ5ox/fXRUlCSeIMEZCLIbpjXKAFqQzb5mEUR/OzEKshLGN41jZod3hb5O8bnVngFKQtJH0QTy1VhEwCxEHsDrcrE2Rye6GY1LfgeUzjmk8OKYAyvhTfKpZbDNH3DYjzExkZtLdvOHxziq80cwU/VEtM/fFNQdAZ62hqjPoy3EuOOYGh7bq7iKPb4z1taeT5HvpBHUjvIIk281tSA5K6wojVVUvsasiiVICit+Ga83M3FqAcpSHjAkZNw0XBz+yZ0C5mXXBLMArweWzNsS97Hr9a4v9j+16VdDb3jAx1Xyg426DVuh2ZslYDHcwCb/NEKIesavj1Q4e77epMyKaq8CJxpzGjr7TX2kbyxyNEcedh3eXjq7j2csNQd01+2F1tB/M1Q9oaIrQ0fSw1mYl3DTj9CpXeO9191uTZjzgzAtEHUrn9RmbjwoyMxsxD1OlyD0nAeDDrGp0g+vzS91q4vzHP3z3H/+H//h3v/z773+4MDNItq1odZLkhutemNmVohKqVqWYUyrigNdq5uakWltLCNOalurmbgJKOeecU2Zm5ig6NVQjDYFMAMjdA+0DaGVEgXay5DwEswwjFg2LpE0mxOhGhSqLSOimojUg3kWkNAkjy9LaPkPmSMrqxeEMAom7PD/X3//hh7/42acPjznLYq6GpOrQguXVSKV/shf9VOFoo735FeZoqx2pWvL7cow/cR1okYdJRu5+2HsyCrynsFRIwG3bBr+M9QzexG2SkG4DWK+1ISa2mt+Grme7ecMpvDLzGn9I/SoWJm8DzwWDzM0MXY7BeWQqIrIqr1YJYET63QkoA4ARzQ6OAjACnJmFE/wAaaQes0ws7oUYY7QRhJmIOZlt1tHnADTPZ6poPY4//IQjBuxDI87IP/OGcnQ4hAiJgjvmJAtyRVfDZVcmAzjnta2ZCWQ4akeJIitoULfeNtNRv5abLoVY0l5bYyTdtmzdiU4GKbxH/0O1eNTPxzKI6JTXaLzhTnzxvTGilBBIGLp7DYJY06oa2EoSfZCRQFpSMmkgSW0boeZG5SDWIOlaTbW0kglKnSKty2gnMBhmdVe7ll22AF6vSYjd4MVhOct5ycuSnrYXgzGJU8dNNY3kKZi60TKmcRz+gGr8oTXHcy/vBDhLWrLsna+oT1E7BIF5QKgFUIT3lFdKycvVrDo4gtfuDYHLrBKBFGrVrMLVrapqTozwkZspAQY5U+BWj0Chj+I9Bay69cEJfkiBW+Pm2HZEzXCnhCEG+nsbOPh8tXfYMRe5+Ye3MtJ75KnbizfVPnHNQupYXixgpO6HuRC/jqGUYyXRjz090XjY2DMi8oAMrjVPtkuILHdlpyhKHPvjUJDNsMZxaQfMbLc+DLD7Ij00z5AAB5GDQEJg4iP/7NNFDTVV3SF9OkxUEXf/B91A1EYLCDOLJYW1G8wLGkTV0EzgoFZwZU4UIEHxsI3yFczuItks3F4d7VV+VJ4cARfuoVFMXDNOdui8hiU46JCOOUhE5M7B2jGKtjetUfQep5S0kWAip9Ee2cQ7exRNsFaWtp509Ox1fEim5FA+zoUiSMvRg8PRtYlbvrAGdAl3lyRRdCASdQl9A1qtIty9lKvChbOkBIoAKux2Kv248zh3m4C8u3DmWbnQVEQ9E9hIogb+VpSwjjfg1uHE5BPOrDduPnPN4P2eGTtWO1gYAIu7Yl6edY0f9wsRGp8SdINjLjYG4M7cJxYQOR/pRy/m7pIXYrbq27a9XDYzN9/uFK6IQKL1piFOizB7662qrcesCbJQrqCjTjIaZI9tbZ4LxSMcBgmNkoh7Hh87dkiP6bDm12kMfO/0P/r0xjtvpE0HQRhf5KM7dOp1xPwt02eH5waAkkSzws06DznfSbSn1nPO4XihKs9fxAlE3WawMADMPKWEwDoK9YpYyw02NU35kIMjAu1uGE4Ts4zdoNvuYiJqHSc90zDvcFffencf/qmkanuByfokFZugtt3dTElYOo27KiQRDJDt6Xl7uaz5XHf95S/+9n/8H/7TH7/9rtjp5frkTuBkhlp2TpmZi7qZdmRgZ2YnOEENRU2ra0c2cnchYbLkFrOX1kVOqwTwIRUNe7WY9ygxEdGuNSdOfQhHGGnmFvBLHChi/StgJmklIk4CiJnV3pe0qYUWiYxu2BasbualKLOP2Qax1QqtppJJ0pJSYqdL0W+//fDtXz7l5eH01l0ycVZzrVWWWzPin+BFveTneIV6VHX+f+fWwBtHuNxmZsa4KZNxd5vuF3NH4Yq7eSxAjEX0IwwXWuzIe8337GHuxuFzzmC8Z/7UrBFmLXAnl2Z/ckjgwc7MnMI/6fTXNRwAGE0CVEB2u2J3p95YYh1Y78gzDAeM2wgpvzWhunKawqutJuKw6txd3UKaDGZzI6HwPWU4P0MFhh3QzcTZG2wnMTuE81bOP7RHbuWdYBKgNKgGj6g/ldLGGESZIkBmMFOM2akNfoYC8tKjMwPpWA9gkyYY+zMf5NiQmOkc7D3e5u7tRkB4odTOui2SmRdpuAjdsmEiB7sbW2QYnAB2D1g2BrcSI2epDq/WxF9rcoQTRDKzi7iZVSt98dzB5FR1rzVad/RAVY5TZmJmMQLAxO60bVspG7kuqyQmRnXYvkvKnFnSNNMi9oMoGlduGANCIA4g26ZRoFVVtPkkaoU5z8hDTM6coqsw8vgSE+ccw3QI2WAdNo2ZVYt5dReYuzhMAWMhEQGzD39jjLg2CLcewga7bxYgkE3/EZxuoVeIwrs5MEiCX8yn4LxR88SmmF3nvUlS3BjK5GDp1nxHrxn0NnuDg/y4jWwK3mgGHxO7N5PN4xxHASRHwOm488xcPlnJdFsoPwm7PmTMelkys3AOe2AYCiENcs5ETnaT4QykL2twz8vBEsQFrUSp8fotFc0y6kYazKEi6ADlZyYzIxiTxikbufZkWxtcjgr4kmldl3Vdz5RGtjxu3M9MVLW2kScUyE/uXsbcNgcBDTDSjRGFUM1YNoK7uPuuffZJHxvT5G03keMakPHNiuo9ZsOWmu4APnop72OQwfh3wVZqoboAOZDQpN5eJ6LwDHWojNAbquQdwxbd1ifq1c5QMxC3TD71yEJ/Z/MoxiNs27YsC0HNLHGOQGFK6eUCU7gdjx8eda27OajFrRoiowib1cE+06ndhA5nAWVTJnAode+OQbz/ztnrO8bj2e+0+EyQr0Ou40xntp2vWZWMZZtZlD4N3Nr2bzKywGHnnqK3GFx0qzHbXQJzl4jQCq2DSOm6F3ePcbMvLy/v3r378OFp2zbmBgmuqteyu/uyLOu6Pjw8iMjSr4ZdSaTUAlIRO6AkzN6qu5yjUlQAM3UzN2fJ9/sQO+YNFuLur3dmyfSAP7Gl85vbYQ0i7IGD+cjQ7a1xAa2Co9YyvmI2tDLn1ycYawKB+PC5m5C5q26d6Am3pBKHLjBEs9lw+7pkJiayG1i+m7/2qMeAihnvHOUDZsa9NOJ+Jw/ZM6wXu8vH+FQdOu/YUBCCVi0xHpaGz+wtgB4jPFLOFHV37IC5gZiihg/mXpUI28uVSNzpt7/5/S//P7/69tsfymaVad+smDLrtld1W4w5p1KKqscsKyJyeCjQqrKXFg4z88isJEnwsiZJTHmRU+ZlodMiKSUruyqX4qVYtf4sTCfllGL6DiGJO+/7vm2llqa81oDDgBqRe8oi4SYSSdFaSlEnZo7q6l581zRp9PIMzRu2dJjK3oEsjAIWJ4PytdTf/P47x5svyvrmi4e8PBAnMxccQfOfEDT/1C66pT38pPCMMUgtrNnkM1MYzK4dhe7QAjfQBGgfBLVKUSJMxREMqjfYchgy6MaemZhRbnu2x88/KbJoCkHOVyxgRCHn+6SwyVS19j+rqmqz8qP0Gk3pOrW62eO+REQk7iUqP5lTNBUMOKOhhIZkUXVvPWk0dRhaw2YsdTiWBnetUTWnWmqt6p7pqOohooCpHJ2Q7cEomtPszr20hp/WngjDN+j+6k0MlTmlpA23KqC3R4QU0YXSn0vMaimllHKWLtfC28gJtcWTmmXT9HrbvRAl6DomWv1EZN/3QWSGBvk1iKALxONFDsucaDTXtQmKvtNkiBNRFlFguzaFJHxzOYFYUnhx5KpOiSWly+XCo7GnY3eAqPSiPpHMzICZm4Jkye5ezV0LYhYZCRGlyPESgwGGmutWTIv5/kbPp1WE3GG8IXIRD29OaB1cMLgTg6NOLKxtMnIiSWFvGWqtDje00SbR7ZIyBwg+sLh3WlIlopRWr2oE6fPm3A4lFH7SHMI3qx1t4gbMYM2ilGogFHdVTYRaa+KVmYXYSkOzECBLShykbh615nT0AlEjkhs/cfw6GDgAn+5E2ABWCTIfiFjhGqQYF9HrFsbNVfW1Q+jeM6WvzNA5cjz4K1Y3tPWdDPLuztFkxFRVY1HViFXDzG/ROGi4DZx9srFiASJE5qXWjmKHmKLuCqsBHHLM85mtO2a2Yd5NS72b/EYQp1bP7O6B0t8IgMAiogVQEIRVBCHazFQSNxgndmbOWd6+fXjz5s2nD5930ecRI/eG3Gv7Xi4v277vqj1CZ2ZbIQcRO3p6z1sJKcydhqHr1c2NzMoIQCY6fBJm7hkF4sCPFBBRjc7Gti9G7tGU41p1RLAjDcJgAtHhCA36YT7qPGnSSVGE75VU1aiVfkTWAQCRMxNL0HwYK7GvTfUohqXrAnI2gIJPRWTUclNPNPUSgMNKtjYNlWulQJplbm3hRELE7nBvgDRezR2qblZi6jT1TOl4LuspmllTjF11d7MbcI5BcBGgHJmxcSgI4G32Jv9ZAKhpvH9mhP5v9VeGAm4dD7q9xjunmzTncKzT7GCoXgS4gilVrrVW89zXQxyAtAyrZB49lrFMd7cOognn6+X69PTj+/cf371798O7D8/Pz7XWlNKbN28eHh5U9enpadu2lNLpdHp4eEgpret6Pp/XdV2WJU5zeWynEMAWGWyJiEw4ERyIiRTUmnYBTglRSjds1yDFaIm93YeQe5O0vIkEtbQBM/zw2CRl9E1GOF3eUgdBBDT8zwkQYsiru29v11QG0m4Yd7izUKcMxrgn9UoBPx4TQhSl3XHf8YYWver360UpkOHHTp3Z8RVzYD1oLM5lDnwAYEc0PZuEvcc8pSDC+hyACDwVI6CvPx5f9QaniqdEwvx1bRuplZ+CQG4sCwCyEiILRKTK0uR/JB+ESB2qnpgArPn04f3LL37xt7/5ze+ul3J92b8vu4gkWfdaajUWUdD15eruauajk7xnOKvzphbTa6PNTAiZLLGvmR9Oy2lJSWhNOJ98WaiWXErZ2Heg1obLRySyLCHomBMlAXBhY92uxRI8C8dATrM2WkBVnYWYAd6rUaUKY4Kvbc62dcfDzMLgGR+MmPVh8qlXd9Kq5pRXyjlZ/vaP788npJVOn7yR/JDSyVq4ZyLGoCL807tGD+G4vGe/ZxoDsO07eiESZ2ngCMRu+/ggME1jVPUJwAJBt8I+Tffp9OwEeL3fP5tAwrwLVppsMOsVFrMi8NsKFL+t5PrJTTgSlb1s0MxaC34J/6bppGbtDRFmZfecmmoEhwYVEYKoXkNqxJZt20bkOWfrMOXbdQPgFs3YwhxpliYZRcQM27Ytec05xxRjUy86CjOaEFmWpbqxIjzYCIFEvi4YIADHI8rIgqenp1DqzFxrNdMwGVlkaOVgjLAGdBrMPW+rGWq1UgqccxoHw1E8MGRWrQZgXVfq5V7omxyPkHOOrrllWVS3y3YdpsZhbbfWQsRTqGq1w4CmFh4wd49hU8y0LAsRaU/TuWtgJyxL6gG8NlB7TWtKidxLKWaWc25CP4YEpiWeXXKb46RmwrIu2cxK9PtFsk7twK0CiCjnNSR+hxJt0pzAkAAvNTMXibOrZm2R7uGzKbHD8OH5qdT89vFhleRO163knF+u13VdU07mxFwBBVirOYt6hEEZOPT6MHybd02Nmfd9NzO1Em2hOYtZjUZWdkQusNG83bQPze4HEQkRWCLxsiyrMvRFY+tK2YjTw8PDw8OD1p2ZrSA/tEF58MhoLbFXe9nDhDXbtNRSNtWHJaXL3lHXMMYKU611EQ64wJSFmU0rgNPplHZS6jguE0R+J3IOtB4GWe+JD6K9sTjN5ywQZoyKjrw/pExQ2uhBGlZOv5pUFZFo7CilaKnX6zUF3s5UI8c9bBEfY06EmIeelpyIKEkudR9fFywc91+WZGYx6i3nfL1eAaTMWuqS8romMzudTuSIYU0PDw8vly2CSuggQzAHB9Rde4qiOxEF1IG7u7e4SRJRLW4eKSXAElNiT8mKV3dLlMjNvIp4PmXVsp6Wt28fHx8fU2IReXx8/OTTN+Iansm+79frVoqmlJb1dD4/mmLf96en55eXl+s13qJMpNUAixAPolOXmUDVvTXuDogvN3djoiQ8rC5mZsK6rr0BpqqqoiQ7XBq/vciRc7ZuN6zrejqdPMDKI6rdcpNt9KI3UJnRaRkisQKwBCbKOWfKzA3KuFZd1jTij8zY9x0CESllE5GUWjwlsqbErd81CQcN7OUKIEvKOb88f4TUF1zC+gmKOp/P1+uVc/UWEafr9aqqIm3m++VyeXx8bKYnJSLKuRFzzjnnxT0arWmI6Nicua91UL5NuB1jT2jqrRpsdaALdHk1h2YmtWgjqD+7lzOCzvjSuOfg0LEkm5B74lNjnmGoxaEi0au4jbaYYWkoXCnSvOBWj04R5HYdeCplLymlvZRaTURK0Q/vny6Xy48fPjw/XZ6fn7et7HsNhUVEjvzxafvw8Xq5XPZ9Tymdz6xWQPXl5UOtNfR4nOPpdHp4m06n07rm0+n02eeffvHFF8Kp1h0ws5qXBBdoIUhapGHXBGYpy+GnNQl2K99EEJA7E+xKBFdCNIWphW54DenW/BBvkNltP0dp6G1V5zC5hmdFd26hCNTCf5Ocfde2yePEw1GPSr+Z6EJNHIhccHfhNCgLBjiIU08qOjORl2DMMD9qrQywSC1TxnKi7fAf2nxRgIi2bdNaRSQONOTSMFG4VR1H8dORjm6LnNIhLcbKh+6YqXdUNjZt2PNa3AfWm1lEp4jICVodvg1irvtOFAOTq6TUXGtmV4V6WhacH07Xen0p3/7h+6+/+uMfvn13ebqoenVSQ9UCgDgVM2wbgFIrM4uQO9e69+ZSL5wMrMQwWxKt62lNlMkeFvn08fxwSgw9Cb99c17XTECSTy+Xy75nJ9RaL5cNQF6WCAJWC93qYMrKUjjazNbEn7x9XJe07/u2be6+FQ2/2wkL50W4NMHuEMpCrgar7fTNukljIf2oV9kUc5EMcXdX8lLrxXYjoVq/f/f8w8fv6JQvL/tnkCWf4OY4qLcRSRwQ7q846v/sPYf+6qu8E9ZYlPlczhjR6vZOa0HGKdUZrPGqfCDo8HQ+j/fF/8wMVjmlaKILSujxaAKBWlLDrEU3AKB0/gpK79zry7KM4EtTx242NcEBoKnUs5q2aG9KwwbrN+t1H70/uZpyz3MMDTJstphsgUlYuXsvXeuaLPA2mDmx1FTUvbk3bSNcvTUZh87TVsp5A5UzHsRHZcV0NsPp2rZteLohblJKtVYwRdtvR94jZq4eWrNBgKaUhbO6xgD6UCFDsoMa6CV6Jx71PAOTSxtmlTD1TM9bM9tGAV4ytjL0oKqu6xrPJZLDLXGVLsCtunl1YWVV9QbiikjUugcwQKBEDBNK3exA2gDQa3AOqj1ka86ZB1aKV1WFec6S8yqJCOZmKedlWbZdmxGjupVivX4gRsxHcY6ItMB/Rz2N59RAdNFu38CY2dwEFL49Ea18Dikc5mYU0JpZktyJLxikRXEjdeaAhbNtqlqpwnzPqQ+0iMJqddXdIG6UswOc0uIutWj10RjTo6VNWISVH7nfxGwdsPFofAIZyAS0SApGzSxMMXksTpwzZ1y3QXs4eNXHiymLiHjEW1sFvwDNHAxZsCxsfqwz3mlmA7zBeyCnx3Ju8sAtZzj0NICAtkSYL6HUj0YEDPRMtVa63BNuY5zDTZps8OM0YTxeZ28FFaM+fsgs3GZI/M6+meuQuLmjzjwcv/G8nenuw1dDIDS+M8xfASCa9Nw9EXNuZrGIOHRYF9K7RrdtAxP1XtCGYUJO5pFZip5+8YZQ1asJDxikCErVuhNBEpidyZn5fM7n89kvz3Q+hTwJ40aEchYiMq+AZZSF82ldHldZSIm97Kp1Ny0wYzJhLFnKdgUgzG8eH5Jw4suzG7kxLVstqkpgM6OQLqrhb8ORhIUlLWts76VeeiiEaIptXS4X3B59He0EEzFQU/hwNUYb0xIBuOBfpuYuAA2caRxNbys7RGgIWFMK5CmyYLdqFsVX2bzGaQ6SCXHER95J3dv3DQJp1G4eIFJmFtAmw+Mdkj+34sNGdSmlbT9i542GjVpzDoZWPipKxs/x5qGGZw6iTvfeeylHoXu8EgG4sTnjT4MdfArozow5h3ibPpIE3ESgoxMyxhW4twVHwSf3xOOddvYb/zP0rFtUpHNBg//xom7qHCG1HhUyVY9RPUkopXJ1UFKtLy9XprRr/e7HH7/9w3fv3r17uW7bVswMfji913fbKBkFiDhVFeH0/XcfX15egsxU9eXlJZzVP//nb+Lg3rx5+Bf/4l9otc+/+AyC5fyI/Vr2SrUCSImBBGItxXs597AHeOrJORR9FJ+LULfqvBt8NoN5vpJOr+sfmNluUf5ef3zsP/XSJB+ZwJGaGD/PnySiwEchohgEAkCtdTSFcu3C+SfxwOavDs+107mPT8y0503VOYPInMy9qtoRoAzKHAnMY0unBwTQahjmR2Fw132N4Pl+u+LXOUAflhuFk5wTvPdides2KsgbH3LznNttSZoCNkDAnN2qq9NlE8mXl+df//offv33v3n/4cUVtarlTAY1c/cawZcANjczIqXo43CP19xpJRERUiZeBOdMD4uchD9/+/DF2/Pb8yLQxPRwWnIS15qy1PVU61JKqVX0vKgZkZhZUYu0ZOCog6pnkiUDOJ1Onz6uy7LUhcsaGZFk8GqoapWc4JnEMqtmZa4izpyI3C06oU/LgpinNPF+NU0u7uZmkEAw8K1q1frZ+Xzd1W3743fvf/f7P3zys//t+YsVhhu6/mkWAe6KJv8pXEQC6Rh7g66meFDzgDqAmZdCPcDERxWGozRQjW7eOABxnwIcN7LF55bCrg6oVy7Mb+vy/waLYSiOkeUa72yPABpCb3zdzLB3zJvQI3+hsYc0TJKyCGkd3SaBigsHMzNJDwgl5lJ2ACAIUW2tH615iUW67BvDi/oDx7plKK6m5Lrghnf16ZSobnutFc4x1QCAurXi6Y7EHUsqpaiVsS+vD378jxnMrWdyvJejHgYOcBiHfb/aPoTzP14fP8cW+uGruLoNh9AaAjgi7xfvr1rMEjmjgbvU+c7jCH+SdAa9hkuwrBHXDtEJEcnMiegaUk0tKlqnUKgdfRqq7gOFtiO/WcTgyKqW0kB0RMRh3lx9AEhLjhPkhgTYJgG4NWd7pt2mwO5j3kEaZE7XUuypllIez+uyLMy4XjdVV6WUkoENpiDVwHt1ayHcjn5npm7cOpVoKF3rEA61VgFBzfrzesv7sZubG9SQenDEPOb4zpqSmWG9f2+aqaWqrjDCvu+JBWSRvq5mHt2nrceYgt6YOYZ9U/deAs7xlaDq/kmLE5mZOWtiNubEAZnbMxh0DJiSBm00yL5tiHqNhMHgjOhsjpRjM1a8jT6OP6NlvdVH0Qi1HaejDH+45MH9jVPa2kRGEmOQBEWAoM9FbAUbGK4ydYGAW4JRi8mwagHl2s4XCvMBtTJk4ugYFFBKydlhxuqq2i3nFj4UQIhLNE2CHA0Xyl0BclMWEkZmBmnOcj7xm8f8+T/7y+h6CgIM04WZ97L1MvKNyJN4EidUM9r3675fAZZEgKQkOctmJdytdV2ajQdb1vz0tLtrANalRCnnE1pCviEONDxXhSpAWYgITEELFuXOITVmPTHcwJR4Em3wBtjh6NiewZ8jbnUTonK6napEUZP1St6StaWEgOVIgA0AboruDOaezjK4RktkjNB4bS8yqOMKhr9yk8TDBKvdxIIeMm0mQvQmyxD01I3KIRl4Ht3xKmg4RPEkzY4WCZ8ia4fBepuEmRW/TXA18/vHU7h7d/z87iPzr4PZx9pomhcSS4rqiVFMMRy2vRQRiZQpE5NE+lQatzCbaq1ubebstl3E7PJ0edn33RQ//PDDl1/+9tvv/vjZZ1+oeqm27/u27dvWkvNal1HLk1KqRlqxpfr+/fvwBiNuXRWlulb99d/95nQ6reuaEv/ww7v379//63/9r//8z3+2pl1jPidIEoMZqjGnqNOJzxf1aqNBvU3+35ZyDWk8tvTuvHzkGiYDYz64cV7AYSe8vo7X/QCYmZd39+bxvf2liBT2TP30UP3vXRf0tbV3DZpuor1JTBFpOcnOJ9TTKN5NYbsFNzp6+25tytvFu98jygAHiBqrHXpnPgWblh0KBcw4zmtYXAQALOq9taedF6gTd0TpiQJhFcyp1uq7ueVv/vDH//iffv4PX/6+Fl/XR5gF6qmp1u4RdMPazYzdIE1TGMXI4CJMSWRJOGf55Jw+eVgel/SzT86fPK5vzpmtsumSJAu7EzgRZVXdNqo1AbBwROFFfdtw3UuNwdeLJEoLrwI6nZa352VZkq8oJeovkrkXtb3aXnUrXLRqtSpSq9dEpCIpAl5SK5acVJsmgYQ8iZNidXU4ibEgcA+1+L5I+fACXL/5+ru//dU//Ff/8v/8s3+1Qm/tk2Eh4FUJ5j+di3r+8O5FTGQZasPMHE5dvxCRdHgFH1m+mzBfC8/haLhtsexx88iTd9kedhH3eFm7ZsVNdCijSMZRbyzEnQSI773Fhf1TimOwUtJG7hq9N1FQ1PmwrVuYhRiCRVKgR1IXtyMajSmoyQ0AI1zHmBvmMBqij3pBC3PKOSfJrZCpOYnRZdKlEh+6zazUKu4Uk9ijUHMWwUOgcIdSHYo59g7dMThcI4AcI5M+TituUjv2BncMtPii6/UacWjVIrIwMyUTSUXVOXJizeoNnEFmdpDCQwJKB99PCWN5w99zd7qFYx7nLb3n092EWYRSWon8fD47lB255Q+51nrdXqJUsn+knVeU+wKI6rXxgF3cH8OLYia0tsHxCKRvhQUmBBEFjCimrJFIdvfr9Uq9TiY84fYeNOvU4BY97ERCMJOcGG77Fn3bEsVpLDCYUzWIu9dqWhtIsqHfd6J16u6OD3pg709UoYSOOmBeCcRxdrgp0YGPvo6mH7n7Nimluu+11m3bVuZodMw5+2ULo7dWNbMkJBHdMDcYOZjIIe4NKkZEGMYCEXGLzLACIDfyY7xvzDcdhvhM6mPPmZkqnG40sUzodiNv5u7CR/2nN/isG6NWptrR1+Q3vneg7d1ZPHfLs6nCbbB3EGGttXfR3URA5ls1wdLmYMHMvB5YO4f4I2Mn77NMvbn6qlrWlIMOWURAzjBV92JmTgyAvcMkti8FAEW4O9bGOsJZaEnImbOwpOW08OM5PZzwxWdvcs5EKKWYQQREprUkttPDA2AvL3K5XGDVdSdba7G6F5ivaxKJInlhQmK67lp0t6zuDi9ZeM0nSetoeY1jXdc15/z8/BwRKzPUWrdtv+611iKAqpUyTDWyNksuWxsc0+y6LqoRZVcNL97cqQVKIwKoHa2Rmql0E2UkOgBq+pFFP6EeitAi1MHM4Da7pc3SDKQQwEHJuUa5QytlROtEEkAk7Xt5RSHmPvoXjmBKPJW2ycshxzxa32uts0L1bvJF7IYOO2BmtJ8Iow46nzli8CN6M8LYolGoST12MxSNT/BLfnuN98/a7W4B45VxIvN9cAskM95mZqVsPuXNunxQUiIQCzslETblWnUv27t3HyLwwTmZaSnlen3Z9/3yzFGLm3P+8PT8i1/84ne/+505bbsBVM1KKZfLtu+7ujFz2UqplpKmlER02+sLX2MN5sTEplC1Wmzfqrura06w7M/P16enl+tld6eIbD48npclSaTw1WHOPR5xt2NzUnf8qYtAu9/MOPrXSC2vjmA+R8kZXX1zh7sc8vPuFuM/3iu+Gr29IrPxjRxIMIdt1upgUY75fj4lNKQHDmgS4xReMfEw5QeZtwFr3mOBjmiIBLdMnZmFHpkJJspwtQN9eQ/Nzo/A7ERsHfUXQFS4EBrW3bz5gyaH5+kD/iAqzDGepak1kICJBr6aR1M1N1ucKKaquDtqpahuVQfoxx/f/d3f//Y3v/vmstWczk4J7E6upjVaagOYtkf5EyHMSSZi9xS9v1JPS3o4rY9renNePj2vn39yfntKp0RvTsuayXbzqkvGmhOxVAWcnCXTYtXMrJqrcCklAZSEXDd4JWWShSlBWPCwpseFl4XJRRMAlEIGGKiob3t5uW7PFy1mlak4NnYIcmamBPMdEYY2ITYmQtTHeBhUSOQxe8uUAx46p5fLBXpZV7x/9/TrX3/5b3/3zb/63++UHyDWSeeerv/E9Y9rUMWNVO8YOURkVmHezFoAgTIwo4kewoQBRDSfe2Ed3ToOiL4lp+bwNO4cA8V6JuAWxXfwMCZ28Jb16S4lNVeQejn3YB/vb5tF3Eha3D4+Zsl/vBkIlNH7NQGthSmWYx0BvwePbFhi47oVf9xx1eOLp6RZf4Z5KYd8oSmOG1P+Oo7ouq6llFKiDwHjW8aDjQWklMiOxN1xopExoFbA2VpcXunLoYO7SAq0TAlO6b2eYxOPaVrgxMyb9axIh3gBHOQpcTU3b14Zcwr/99Tr7+Mpmr/nxq+22AggJAp7Fzkv67LkLD3WSzAb4rjUbdu27XKVtLiRGzGlmITTHgGqWrVOSTyCw8seG0Kurd5jRLjZj0c3C7hVTFnNVt3hXqOqVtqEyTZfJL6l1t2ncDX3Qiwt+7pmEXKrUCtFt22zUt98+oYrTKvWjmPGKa/5aOAMQBgid69uY7pTEFFUzrXnax4dB4LsGFzp3uYYJWaCWIxjaHx1ZGVFhHqnr5lFIoTiMB0ppZhGxMw5L0tOXnXjLYqp0VIlRwk0w9khxImgjIAvm1jUyTFqEgQ0xESnLushlRaOsYC6bS2Do9LsxuihQEbttIvJVoiwjnSVPzhihIfuLKpZVs68fMduY6QHWkxrZCdoFnnuRGN6io3ZkjJEB9AwaXJuc2XG5NYYPxChuuhNVdWUOLoy7uQME7F2cTS+cV4zPDbfW5K0ZT6XREtODyuvp/Rwzufz8nBecxZmV91HYCVldvdSyvl8Vi0RQjqdTilJ4GSsnMO1O5/PKS2XyyV6Qsx136/7vkeepNbiAAjnhyyyAqj7FsEdkcLsbx6XlFLOK3PSatu2Xa97KeXD9VpKuV72Uqu6Uc+Wm1UAWcJMSOHgEZGWPXDSWuMzt2Ekg4kIzTX0vkXjT8E+psRy71oA7YhrraRppJY9fL6bos1ml6J3ebXgHRt5Y+34ZgSYpAe2Wbs6oi8PPo1LWq+4uXsSyTmH3As5MDRRZwGL7sihXIdDO+45LfiGdF/9ekRqvadBQqrfaV/vhvtPvj6uO0YbbxhLpW6vY+qPoCnHcq+qAcBqbZCDZmZWuZfYSMpEYkrqMLPrZfvh3fv3757+4be/Oa0Pj2/fPL45n89nMF5eXp6enp7fHw3533zzzS9/9esPHz588sknf//lb9b1nJasquExgqOsOlc3NzUFu0mtGwBgXVczo4pAsdn2rdQCIKdUiprt7urQDx+efv+7r/d9P5/PKaWcc63GBouxSedT8v3uwUWk98jdQCwMuTfLq7FXcw/bOE0ArWT69gjmA5p/nn89PuLTPIlXcbfmHdqNsL2Trj7qS197m7eLfn2T3tcUFde3didh5EoOOu8EjClsNBmBPV86ccG84E6MPu9hUyhoyMOT0Lj/FbesRB3jYPxJVYngFuijIfCZnJ26ORenJhKOaxKGiFm9Xq6/+ru///nPf/Hh6Xk9vTHIy65upFzDMBpKM4SdgFgoMTPMTYl8EUkpvTnR+Zw/e3N+8/jw5pTfnpdPHs+Pp4S6PyyShI0WSyQiKYswJ43iBTrnk6pv25bUsKTnZydVLDGscN8Kq6oJ5ZQyy+mcH1ZeF2GX8CtsEYWroyquSZKwELYN5iKumwiJnlJyElMh82upTEwsBtLIDToRsStBiIxqrdUKi64snDMZgWhZTtd9+/3v/vDrX//D/+H/9Mc//6/+zURg+FM+4T/+etFerTi9clfcDKCd/iETmrCN7HS0BHe0qQn71wCKyQhHsr1hRR0sgBGdmQRFU6zeFGsXzjcjrME0sllR4udT1Om1uLCp28KnC1MGqH1vbzJPwzL+SYUXDqGqqlpANcbicm6To+OzR3bvPv7ajLnhd8XupZREcqB6RFdISguRjh429Pyku6uWWJsI0HvnIsjdPbTW69+gNUnmAO3QmtwL32fZFA14r0mhf0tk4Zp6MKsAB5TggHRvuxmHd7N7AUtLzMxJsFcfk9nQBsWMmjcaRT6Em/nssQlMoSliw3POp3Vd17VXfDi5Skoi7O61bNHGllKiXv3i3YaLX9WKNVg5ibCQAxrRSgBtbEIDlQVgpao7C7glbJ3YgYSOTGNmBNF2Xq6qSEYpxw6H4y2cjVo3gvdcsbQxid4Sfo2XXCSntDw/X8Z7Ym+X0yo5uVHMo4yUuDPBnQ1zHyaFZzW4xQ69ElIViDFKIFDLARITM7mHBemmg4C6YR19fdE1thBsL0X7dDP31sEYysum6jmiQ5VyX2Hzab2GqxT15bHA8RS3dDk9Ti+8mQ63zeekAzbKZ6HADkVUet9rX4+RhHTcc3xL1IdGZCOA4ZppMFYCGgHD17bnzE2zTOoMZW0Woh9RJBwBpoNVW2BiXaRD0XhLvLsQm08GSqgktejtHDspxJIziHOpIjIKnjDtYVgSw38hImZkIoIvCefT+vbN8vjm9PZxXRZholKLBp6t24A0AJBSiiJtZn7z5k3Yr+7+8PBmf/s2JJUbCcir7nvdL1erhdzJNUnilGutbgq3nE7rutqZ911K3cyMXCVLznw6SZLEzqprAKO/edlfrtePHz++vLzsVd3dwACpGUkSyWAGWN209RIHlmc7aOnVya24gZpkU3VwBN3uYa8BmM09EuPiVnV/+C3HPge1D+MhKD6CBal3BAW9qDacqvheVTW7QdREBx5Dd9vurMnBaGjpGTOr1mtGGnFC+0SSm8LL+XlGzJGmcvRJ1LT3cK/CmBXieOf8L4AxCHReLU2DwoejOyvWO85CN7LHY45vYUpBkrcPxekY5qne6+JUlShAxbyold3fvf/4+9//4fdf/+HLL3+b8rqeT2/ePLz59JPTadn3/en5A+3r4+Pjvu9f/+Gbr7766sP7JyMu6ntVYnVwMd2rmTm8GlxY3F2dxUWIo8xARJ4uTzHfKBRiXmQ9ZRF5//5HMGKM5L7r09Pzd9/94O6//+JrEWGhZUkpn8hIrbBqzxL5Yb/0U583CpNAHgc9/ztv8qHi3fkWdXO8QUsZHx+HOIyTG35pfuDxSoSfMWQ44EwTXRw5gWjcR6cMV+XJpvTIZk0C9idpuK0qRgbR0WVgfdLM/Fx0692ho1jPcQp00zq+7e4b52W8ZpZhT87v5Ck5M3Y1/qDz9w7txa05IvQvdRpwd6vu0IWZ2rcz1F9eXn7xqy//u//uP/zil3973VXWs+1QVwepar21oclBzFYLRIiZzBmemM6n03ld/tm5PDzmT98sb87p4SQPi5yyrkJuJsyLsEs2a2EadV9yCgNgyUsPtXtOK8Cl1qKW1dK+8r7ttbhTzrSkfF6Xdc1LbtjVgHNKqnqtuhVNTCKUhcopY5MLoSwiyMu6EDd4qlrhIk4SQKlQ82bRCDupl3AIiSsSS6ZTFoYw81785eXy9e+//fqrb//8f/d/vDnXW5+K/oT2/ydxiRy9qfQq7jlfCpdetBQIhO4+IikHv0xs6N5CH7il59kEGxJm+ALjbTR5Z5Fmeq2G4polVVxyWzI6q9HxXd6DsO6eBlrUUGNh7s/6Y36SaI6fHUK/9VBnl5K6AzP8onlZ8YSxlGVZorM2yULsoeUDFct7X36Yt9xkDs3fRd0C9tvr9apG0YVM2R7rKFjznqqqgqwdjLtTrRWFo02uH/PQqW3HZGlWhao6QMmZJbNcOkiJCKPUWnfzNjNjWDDtth1Fe5y6EeiQ+2DmZcnRlFVrASwxL6cFgJmaaq2lBzttTE/Q6cF7sHwQWWyqCccUigh+qI3MXgcd8ZarUXewEvhQQaoKVMKoaB3FHod2qbprKxkFM/s8JY+l1lrU4CrkKaVTXs/n8/sP70optZYW0gaqutsxERho86hwq3qbopg8K+t1uf09xkhmFh2Q1JDXbWTYWvyga7IR+3g1kJPMzBsoFKtqKSo0MAATTWD9qlZrRRUfp9vzFdbbtF7LoHtuZxotXrbbeKLYn1koIAz9ANVwtEYI7sU29zfHvHt3f8Wkrd09Wn3maxYC1DdQiFNKI8QVo/nmkzo+fjcUaCpnYmYiMMsgwsEdLDAjNR1N1aXcTL4eNzQz4sQiyZv40lJrMEWXSGam7EQNvA5CiSDEwrYu6fHx/Omnj588ruuJl8yC6ubXvZ1dJJCdiJmXnCXnWgnAej4/Pj4uDVUc8cO2bZfLZd/q9Xp9eXm5XC7PzxfrSR4RyZm5YN93sBObJFskPTwmkbfBcLXWFHAGxDBy4/NJAHz2Z3/+fHl5//79x48fn16ul23f972os+QYMRo9AmOwRVp6eT8Rg9osPxyuxVb2PWa09O7u8e84wWgHmKl0EKGIkDH80AiD5nGr8IjJ7NBBzOwEA7lbb6gYIdcjXibSTJz5hrE/Qy5RPfqZq462jequY4zepGsPQN3ZMh5UFD+Mntj5ibwb+jwVDc7MOMh+MMtwSMamjc/OOm58URSWv5YMQ4XdcWIrEcfRKRfLC4fQrKp2cBF3ANumcK5VX671+en6x+/e/ebLr7787Vfv3n/k9MLMnNPDm/PDw9ndX7brFw9fqOPjx4/ffPPt+/cfnUWY96LbtahTNgUgOS0dk+Zyvbo7+VGzlFJa8zK2SPoKc87g3K60gHzbtv26Xy7Xy2X79a9/Xcr2ww+ffvLp27/4i3+2rllVCQWn4fpOwuS29H28TkSj0mTsdgyT8O7gxSLn7b05waDqHggYZ4delh1/Hac/jtImIGuio2EhNEVjlsOrnSztydwc6vVOXBO1WjWeHEjMLmL4gV3mjqcz+MCMaGKt14WNlpOZWmambkHQbn/eaQeRHmp+FcWYd2Ywxd39j3OpZf5GbtXnNGAj++sN64EZtTog4JZWfXl++fbb7/76r//6l7/61Y8fPoo8uppBOC8xX56IXBjUPWQHEZIIgwQkKS2Uz6flzZuHx/P5L88fzsvy+CgPJ14yrdkX8YXUMwkjSnaEsxHMY7wnhQkdu7Wuq6oxydu3b7e97qUW05SV85JrcfdTQs55XdIpp5xYmABjB+dUtKaiS/Jr1lREyDfhIksi0u20JD09PBAH/v/CdC1Aqd5Btpu+S5TUd40Aoe/E2GkT5/X0oKqXy169JEpff/XNL3/5t/+X/+b/xo+P/7OLRf8xXtSLrTBlBQkEGG4ZbbDqzedDp1gHFxmqsJfGNo4bNl+7/z2QzPhprlDACA/1NzfXoIOiB3DA4A6doPVn36cvM/TvfcnuUAG41WjBU2M4wU08iSbVJcxZUtRhpnxEvOKdqlp2VdcuDYNzmb3F72uvcaduSsY7AyZ+WN4i4k7RqwYyosMijKSctkFwrf+Ne+9imdT22NmhTe+0tbvr1D0YH4zppXeqou04SA8vgtQKavOXxuNEvkJVCQCMU3Ms1Rokk4gkSV3ZS+TcSilRQhMQqdQLWd0dfE86s2Za1ygVS0wUKQgRkpzXdb1cLi8vLzHwgIhUSyllWcP3EzOPIcsx7imQ4ohiZIECHmg9vhvR7PHGSIhW0gMyESZYVBkxo4wCvFvmGXQlwjNuUNmew8nsre3k1E18MwKC0lR12wozf/L205eXl33f4+bVNGrzTqdT2yKO6cStijLKwrvCQEMzbWHRo12BO1WX2g/Iaq01ZkXKFAYerBhRiZSS1epubWRFD50AYJIorFPVCAqmtNR6+I/uHq4LV43aToyYTbcjZ2V5IzsAHGA/FMglIqLa5lUy8TAh+vNGzrPBJIRXz2DpBtCssOMN4+uGKKj1DiOkXfJqpnyjVeGxw3Dm7hB2k+uGxdz/ZNVT/54ObUIxya2F0OJERvey9sHZY0kCcpEIuDTjxgkMSWn0xLIawsqZ10AW5Y5omDOIyaBv3779/LPHLz5/e16ZqZBXtaKqOT+GJInG3dj5GGOTc46J2zHiBUBK6fn5ed/3p6enD++fAmnjer1eLpdohw42CQ4NDrpeX1TLtj3nJT08PLzJ55wzyN6cP2n9LuqmTiQppcxZHn728vLycD6dz+fTh6d3Hz58BHQrxBKgvtUAbjDLwgnY2+BKR2LJIonbnLHWALyxXi6jeb0WzBJpPs35WL27KOMN0c030YyOU47qB2cZofRJE8UPhwy/U9s4AgSNQ6MF0XqmbnwwRN+2YwQLQjOgh6X6TW3S1z4rXbrVNUOY3GkZ6uG8OcozM/Xtx+9RuCMSte+ll0Uc5ElEajf8OG54FwEZ9+9y+Dgm6g4zEZnpwDfv+hFEXopeni7ff//u66+//fK3X/32t7/Py4mTGAFX2sp+2QoL9n1/kz65XN69f//+et05La5aVaHVDPteS1EQ5UVSSlV133eRRVs1RKuwWZZFV7WqQ7DkDgO+LMuS1uv1+ny9ZOFlOaXERHS9XssfN9Xy29/Sm7eP//bf/pt//pd/dX5Y5XQqHaQkrqbI7JhyOWg19qFHwGkQUuwXdasufCfumbSobpl3GT0FLQEB6j6aCYXa5HoaLaMeIJU+L2PcxztoxCwo478jfo2pBIaZTVuMv/07HeVx54lc6KeW5JNj5lOGUFWZePBU89K7h3zHqmEKM7eqovnmXZgccY1xNHOWfmKBIw4ylhrvSAHf5QHYETaxOyEtzR5Acx2JostrydK0DBOZqz4/P//xj3/89g9/vF73ZVmI0stLreZJTkVLRZsLHfYY1AiejNOahcBMa0rnU3r7+ObTt4/n8/mLdc85P57zacnLktYsS5LRs6oOUzg7cWKwEch1XVd38qpEWJbV3WuxvK6OQDDNxGYiUpM5PWbNOYc3mJMkQswsNTMgMaWUTNQA3rdSUNYlua7ruojow8OZUgYLp93Bl1qr7nM0zQwBpq0xJ7YbBbVW1YjCbwZ1Sl9//fXf/M3f/F///u//zX/9X///KBb9J3S5+/wUvU4w6r+69gn7Y5IbQwdN0rXHeoLg681Ai6HF5gDVpMuUZcUUOkR3TGxKyI1IqHfvYA44hqi8Xq/jDpgE4Dy+Yr6kj6wY6wxdmfxyzWbE65WkuLJL4nXBuiYQbZterZQK2HIy52uVE5u7kZZl32Lc3Q7frxfbzig7ETELg8WgMJhStMBajRSKg6r5SlnYCNVUIZLzypBSCzmYTVXBiBntZpWJU16I6vVi1ZRIl0VEqPq+7du2W4t3klndlZjykjmXlxcRdqLi1dg9SwmA8fVN8auqYy+nvKiqaeUmnow5DStTREgSE/TaUNfXNZ8Wrl5LKSyVpdZad4MQ0inV3bZtWy6rguBJCCIpc8yR24Up6pRSOo1y2eteT7nBNripuznBzapblLRBzUxFpGV+TZ0r82pWrvsemdVlyc7+8eOHsHRLtXD8RCTls9puvTjqdF4fTueQ6apkZm6eueU8ba9KKiJAZUZewIpa92rVYEwKAZEolIhkWVPOLLKQhbuuqlrdQSnJKjlAehJxwHkwtxTup59+GuwXDVHMycyu276mrIATjNjZzeBgGF9eisjp/PaBiEop2K+qWqs/f3zOOT+eT+u6JmaGEzGIP+ruaqCGvlNrFaHTaa1ahVw4My3OJefVDOpOlN1k091UlyUDrKV6MdTywHytpuZMSdUvpZ7Pj379SFrd4QpyYZbEqOSOi7kSc7VKdOLELy+VwFy3lLN7BXM+r65iLMSJCbWaGLsiERtw1e3x7af0DHNd1hyQ+ufzOUZ42V6Ch9U2lpMkKWUzXB9ZiF3JHWRCRgQPbDFe0iLE7BbIe5W8srNLlMfWqi1xJ71cIaYsNlHWipcCIghtvErrbCWHVSdiIQaUDk2ia4rRh6R1JzOIXC/P67qyoF53gYRhsSxLKSWnZJJVzUpdUgAdVYMnJBGpdY/C3T1m0wmX3T0vFZRTTmtL9jIIMAEnYQCJEmWBPO/XUpTW9JhzKralzJx8L08KIrZd92qVRCC8aSUGJxQrVN+4VZDlBQmFbVvF3pzW/82ff/rpW8l5W84nIO2FiBfdNqqm2356fGQiU2URteLVzuezqrKg6l51e/vwCOCH77/9/vsftm3bthLomFpp33y7gjm5K7lb1VpektR1zQ8P5/ffFyumRbHbSzHb6ul0kiUx1Tfnh4c3j+QopbR5YkzuP37+6fLZJ59+/in94Zs9gRP8Ifm+X72nZ813AFnyuq4vLz+ktLj74+Oj+/727duc6XxevvjiC8nLenr821/95ue/+NJdsp9th+oLjRxFT2kyw6oCKNtORFbVXdec3F3W5EruLJxSpl7rq+EAu5nWnTgFqtmSicUv5WXft3NmNzbfWzDba1BptSLG5N3xfimLZFFk/rTuGWC1i9Ilr1R2KluGffL28dMf372si6h9EGZ4Vm0T9lLiUOVlN/SeZ1Dg0BjIU8rDQkVHixnBiGHLHplDTq0M1kFG7AyNAvgWoBn/xlWnjMeUqkLOkcE7GjGGoUA9CDIr+IhHjMUcjqLvkegQkZSytEIYYkjZquSHh9O6ay1F91K3bXsBpbQ8Xe0P7y5f/+HDL/72d19++Yd9x5lErE2R1cpJPCXxIt+9PO37/vTy8WpFFlqXB6n15fK0ntanjy/7vq/ryna2omYusvguMPJerpJSYk+6c0pLS+oau7FacwVqeYrGWgalJDlnNr5s9eHh4d2PFyK6vPiSvsvyyV/91SeXF7Gkp9NJhOAGU4s6ndNKMaQFQATOmAgCJlfDMQE+jK84vaMIaD41WRNagNgcIEQ9PTy+CA64JBLOzVcMfConImEmRDLdY/gbebTMmC9rJhHpg5dGQu8IjLnD/Q68oRm1BCIyDcwFAhHUSdjMrNbhXLU7qPsE7BkxwNAsI3Kh7u5qMBY4nBKpqSxZRKqqM9W9LsvSQJjNBxwV4CTCQm2ey1TysO97BGS71SvSJ2A3bgECPi2eYN+vzNy6nQP5vCigvOYwjdvexNBMMHpuX1XRS97AVNTyssRZyLLart9+/f7Xv/zq5b0/LH+2y+Xj85WALFb9iWh3FWKYV/cqyR3FzVJKq6RTkofM5+RvH+zzN/tnb+W81p89fp5SOq9LSimxsCDSeARfUiZyLcVdE1kodEsLAywU3TRh56dM6+rrQvuOUPefutdqtda0JhE7ZaxrZmbXUTgm6qbVd7W0V1l0fTyVRa5b/f7Dj2sqp0/O6+qGfeP9mZ6f7PnlUn7YX95BPzpfsRRaK1F9eG+GjJxlDXvJFZb8w34hYuYzuYqcdHv+9qtf/vbX/+2/+W/+FQzYFesJLtteluWxsxbQXUXuP/8vLyK16Qb/k29r9XQ9G1cBdyiROypQAQNVwKileq3DMURvCAHOEMAICaRNihNHmoLI2dsomihohySB1lqYe1YfGsq3VZ21nqomu82UGhipB2hNEymuQfT1ehn6lFpsujXL9BxYqOx27mtaVLWUOuR/2ep+3XJM0e0CIoSGsEiWaQ4Tj8QMNyBTQwOntzDIU3xrrcfXx4pDOg8PGDCRZVkW1cuozwmeFJFREGWB5x8BS+5BLDPrcwujqSmzmB3B/nGflFK0C0piTjKWNPSx91wNQaz3O7l7sQO81czwqpMS1hqhWDilFNMIh5efuM0B620t0p8OLMRCxOg9Sj4AJ9umiwg1RhWRqM05YgkjWzJdQ7vLVC6C9jOIjv6B0UswLA83q6WF1rh3XNRaw3Ucjr514JzAsVyWJfIVWVKcRQC1uXvkmsZqHdoj66I1jWBGj1gMBWnRApCWDPRymh67ct+Z4arVqjmn3txoVYuVsbzYcCIPCOt4ccDYhtNIjlprVcqSADRQQvckKeIfROQ5ZzRbKiWpXXP2vA265WHz43hTzELkgVhevYVnepLHRXsCodWyHdCd8xEDMf0cTmxTEBRTptRH1DlCMnfNuv09OecBl6L9ggYXYHCZKoZu+Gl5GZlEgpvG4FKDm5uwMDM5YtAgRzvo6CHstUgYQWgcVdkz1/s0YtSmcoVAGyI6iCrocNQBzrwzL9jdBwKNI/rEWj40qL8xeB8EZDEUKLEwL5LGVtAtNI5q6SO7LFq959NBrygLA4zMRQjMQg4ztV0S3r59+8/+7JNPP/308XxqvWeGWivnlr0JwRXd3rE9nPl0Om3bFpuw7/vl6WXft/fv3z9dLutyfnjzBuByrdv1ip5+Z5YkRIRaLT7LzG/evPnw/LRfLgCqtYTnKfGHDx+uzy/n8/Pj+fF0Oj2cTu0sGFFx8PbtW4Ks6zmv33/48PTy8hKE2VIizDnnZVn+6q8+f/PmTc5rlBicz+fzef3ss0/MLK8nSeu3f3jHDLWCuhWluSVhbPigzHG+Q5WoKozRCsjvO+LGcXiHkzlescgnxLz7ABMlEREOw6jnFhCgUF6pp2VElmV5sj2grUIdhirRmH3XTt/ctQd2aVkWp55OBMIhpF6r7xOU4iD7QcmDIzANIB4cNKQ9psqcO40wS/i77b3jEe7pnZ/8OPVQ5viiy1Zm1raOVaCqgZKmqvteQ9g+PT1tsgDb99/9+NVX3/zmy9999dVX79+/F8mqPhVZmCTKObn75kf6aF2Xh4cHd49wT1k1brvvFUDOy7qukbydjY14qMvlEq/EaFwAtdZ9363avtW9bMxMtDBzKW5m2/YjETEs58YUz8/Pj4+P//xf/vn5fAaRVheRlBmuKAWtdOrAYHB3MswnNW+s6n0faaOubrEcXtYc4B/o8/3pCPn1abanvn2FogyeabinwyccwZcblTHlE7yXWjS6iiH1Ig2IYRSOAmk20XvOFDju3x/Wg0ytapfbx7yTmXrvcua1Vu714SNKYr2IZmjeu8TgLJLjK/ogn6N+LRRxZMK7yTvaRrjLcHf4LKTIvGjTPuV6/fGHj999993z83Mp5Xq9BqBXEFv1sQNNv6QocQqflpETn5b8cJI3D8ubx/V8Xk5LZubcNBsPSRitTrEAEWnLdaSUSi+IuyO8IUmG8AzVT4mYeU05bGztnkAMQ4r3pywLluD4sm+n0ymv68PDY1oyEZWHh8c3Dw8fH172XT5e6MPmz9WV2ZfMyX0PqIhBdU0+7EqO02lZloWZ9628e/fuV7/61f/9hx/PX/wZBLbvvDysy+qwvVgEsP4zXf/rgEn/Z2HZBIA7vXJUQwc1EgyMQXdz7XOJyd1clbshp6rScW6ZGbdNAWa98aAZ1e4WU8raK3MW0aec4fh1NjXna1RuD75zd4CHXEXXOG0Z3VTrz9hou3fhHXU6wSMpZakFoDBlulHFbtX6fa3LL0+Ziw4V3tusmftw8942Nrl51p9w8HykOLWD0MzhTAChWhLdM0y0EqgfxbjBVyYe+PVD+sRd44DNbLA39zQrM+fEa1qZYcruDdtgEtlBW+SuiVdp2LJhjYbHD6OpVYCo1upq0hrujj4QM0NML5mk//A6ZmAj73QJtOEh7Sxa5pqYmCj0hbs7QXJaclpBpqp7Lb3kj8YBBR2HX5RzTizuXkrZto2IvBf6cq99ZWZYFWIhFhEIstCSeBT5eI9paSnRRp/91EWhSEIYf+PZI/4XVaMwN6+05nBIpI8OH4fiVUtp7VghhqxxUWSswvhfK+1WKyeB+a5VAwV3WVLy8Th2mGLoT0dRJDuG8zVF5SU6Cc3M6fjGQcwaberDVZk+a1NlZkppNyVmnyyGetsjcSMCJkkUyxq0QdSSt43vwioAyCEgcgQOJPdGoHaDV7G40I7hHY9vYBy1cYNJYxTdwBNur8f3SmQnbuANqT1RDL/mmA0OGMii4IJv0auGQTCuO+7GLTZj364Qt4ebcZgQh2DTGOA4fwURJWKZvNZgiGmfezQKRAHT2qrDhMldFWQifnp4+PyT81/+xed/+c8+SwnLkq8FQdW11ki2nE/nqG3b2/h45JzX9Ry4ozGXcHu5fPjw4Xq9llIC2xNgVb/u+1aLW6sHW5a0ZHG3qnuDSAWvD6e8b9frtZqiKLCldVn9XEop123fdzcD+Zpy+KXh/MSInM8+++x8fjyfzz+8ez8wV2N4wGjVywvWdQX4dDqVUt6cT8Se19XM1vOJeDmdFhuQV3Z/iN0gGxWY906+qpKDEQUqrqqeDo2Aob3aUCJ1d+FEFOoKklkA1QZ7Fk6sCFUNyWoD+NuseX1GYOaYMl/dqmpRraZBo/OyrZVOtMGtdhgisTBmgdabqOUgp6Fi7yiZp97meZeiYGFW/3diYf6U31rMgykASLqXJzMvvz6dWOdoDEOPvcIp5ojsqqq67eXHH3/86qtvvn/Zmfn5+eWbr7/7+us//PD9u8tlWxZKCaoaY0KizpnolHO+7HGDSE21Z49S7cD7vVy26/Vaqy5LcXfUZA2jTumYPsVRUTLCRuFJurvV7Xq97vuecsMZqpViOmtYRHy5qJq7v7y8PD4+Wip/9Vf2ySdvzQxOkiXsMfgIbYdE5Abl6Xy3e2PPh9i5OYJBusN9cXf3wG8DEbpN9vpc4tgnPUIUZsAErdRyg6GPpiCXTSWa487uzjE/cDJG0c3NIe0CaI3ppm5tXnyUpN0u28bbuq/CNqm2mcZmeuaeQqDJqO2v4+5T+BOXu0vM45oaehvP3imOfsOxqWPHqAl0qVWJCCK11nfv3r179y4AmSMOIpIIXmp115QSuSP0oGpglyamVfjN+fR4Wj89r29WfnPm05IX5kCX+cln4dZsGbKdrLfDRMx5OITzIwflD5COEAJR0tNijual00/QRzj7zBnDYnxkESJJaVkMUK0pyZIkpbTu1Xnd9PlZX16eyx5dTQ2/8GgxjYSYJNYScZwMwMyenp5+/vOf//t//+//H//P/xfyAquAOVhNl5zxvyYZ+J/hupOEAzm8gcINln/dN9g+1bNzRB1/MVJ5bl7NWvs7JpNvCCjv7Ul8CzA2LiJiljDeZ7RPAEAbviCTvpgVzXyh00O/J4+Cz+5GdaSoVsw1WrQOuErMO3BEJO9D5Cnn7AZWI2pNBa2ykcNOjMc4ovIpJaKjhnW0Loy9oLFB3NBH45UOa3YTS8bIhBD3XGUVEVXG7UTgod6GSo6T4BzFlh7dIEQUAzNwyEeaF1Y1WIncnYkTcYlsWyBvsaNDPsebmFfmCEAB5m1YeRtEXt1izkELeeachcS71xo7FlMt45B6tuM4Ajefo4zwQyi3jZrezcwU22I6HDlT1+puZK6TyG6tEY4GKa6lbtpSi2Y1REzD0nQn97Brw22a99l75Bu9Ymrf97K3eZWlbMycOKdESTJnZk5EVLdSmQcOLWL4KfWEOCem1lsYDxuRdW7Tbo+G1zUvAMKLU7eA1qxmGRKt5Ga2eVH4ajmlVKMZTwSuEUUzMyJvsWF3Qe7Ub+5atUbTFOecJbn76BpthkuMvARUc6SvJ9ICbtF74w/aESzcXd2oR0x9Gt93CIY4WeJonnmp6lXhzsQCyiy7iIgUbZGYIIzEAqYMZj5qxAe5u3vLBPbPEBGoBRoGd5B5K+MZCszRUocA9V7W5mObDaOE6Ygg2DRIirpDLhPO/vj4vC3jou6fHs/W9br7vWSMv3HIE+IaBF/V+iyv8OepMZG5amBqdGwhU62KRpOmcLLsAmYiCBMRLe6FTNjevnn42Rdvfvb5p599elqW5Xp9EmpDVvZ9r1UXEbfWJlqKXq6X6/W6nE45Zwa9PF1K3aqIiFwul6enJ5g/nM8Pn3x6vV6fnp6u1z3kFvekDRGltDAjaQJV5hTC/ZNPPltOa5jFClXVbdvevn2jewmq214uhS9Lzuu6aszcQxFJKS0p8SeffLIsC0B7rURyPp+jpi6I/HSWnFcze1hPqrqu616uCuUY6kgcnYQpSU5ZvQZk+aDhQ45NHs5QFt6askZC+CYcfn+0bWYgTfRGKdyyVkcgIdaYhDnMbqHeZQhAY0xprUUrETm39rtOakHqR6WD90AhEWot1gtDQEbUcVZv9dp4cJ4ydbNRknuMZuzAUOTHSqZden2feH8UgMzfOG6Ln7oGG2JS+eMOMkLSw7ZmqdWqqTtVtffv3//6H37zt3/7t++LE6QUff/+4/c/vnvZrgpX9xoOWyIR4YJr4er68PBAPaKnGqxxjYhJqJWcc0AD1Np6BB7yJ8MhHFsUAeXxgFFM1CtEdN9rrdVd4KWmSkQMLMsScLRGvFf78PRMxB8+fNy5mML/OZ0fVkYClN2oY7vAPaiQrM94nUyFeZ8PQ62HZVtjGB+5lHHN75xfBGDHdAebEgOB9csIJmcDERjkfTTadOjjTGcCGP7bndJpvBnlbURg8q7LpLPZuOFBjTiS8/27GnbijFI4SLG9GUyvKJOoj5HpRuedmTt+CMosE3jPeEwAPa46daQjehTbI8/vV9Oo0/IG8T2dSHAQAHetvm3b5XJ9uV6u1+hY4fr/5e3PmiRJkvRAkA8RUTUz94g8qrqb0I2mJZpZGlrMy+w+7q8H4ReAaHdoMIO+0VVZR2ZkRLibmaqIMPM+sIiomHsWBsCgVynIw9zcTA8Rvo+P1VrZIaC2kgHvLRcEDAQp8LrweQ2XNV7WeF75vNApcgrIdCBCoYGheJUUqFGMWosZcQi+Rd7MH49E4hsrwkEVcHpeECFgmrGyBzqUmYGCmgAG7OV1znExMVJAjqWU0pfoTKtBOZ30dK7nvV532LKoKJPTjJJRU4EioppSupcWYvPogKr+7ne/+7f/9t9+//33//P/8/9FxCBiPuF2UAj8/+fQ6SfYlCUC8N188L5s8lW9JhN77G8wP+KwlQzAbMzGayd9EOPgZow+Bl9crzWjpU2oN+8KBqTWhkxeNYoAXofYAjpEYMajt3A6BoE17fCYrujvNxfRRh1+CyiMsoLjEVzSemXciPIMnR4APG5bzMBATLvlx73gx4yAiRBQzYSnNspxAEApZfh+zcSBAyZh6PjW8V/q2muc5lUet6uq3O0G1xAwSRZfiwYD1ecC4TRx2AWWThk8P5hZlE3qVoUBYU0uZrTdpyA2H1jVcx2uNTwFUb1OEg0IDPTYDwIgMEBgbIOG3+zoAPYYgaxObFZNSSeas5YNO257uIjUZDERIQdQrzLPZiZSEFGbgB7hNM9cRT+JD3vwjmuHyAshnJaFiEopqpUZlyVaHY2wfitti58vZ+ghAAez8cd/2fc5UNhFHoeVABZV1T5W22c23mr28yhRb+A3ANr33bOC1FqMskcTNjNXzQM2zcVTVQiRGJ3uW5G0IlQty7KEEGo1Kd5BAWYO+ipIKfa288HD1E0/x/4BFQAPkbRNOj5zeEdHEMT9HyhOjc24dwdz8xB438qGQ+Mxe2a2w+ywKenV6RZHGgd7vg4ARjkxIY6VfxB/c83qRIz+ORTr6P/HrFXswY5BnGNNtHGWx4xbthBbxhoAzUABjdjtoV+4n+Efaq/oG/djZgSdx9qtoI9M1+noMlHGAlEv7iUzMGnXU/MstFnwRKhW0VqUWLU6Z6uqj1IIIRBV9zm5Q4whKII8neLlsv7q2+dvPj6dVhKp1+sOWjPCXrKI5FKraVRExC9fvtzvd2bOpagqx9Y9a2aEwRRy3q7X637fUkqn0wmZPMFSijCFEIIqWHH5BgAQY0wpurCtVZHK6XR6+nC53+9fX19dxe/7/qtffV/Mx6+BI22o1m2T5fJERG6F7/cbEAPQsizMnKoQ0eVyOV0uxOg5TER5ejovMQZOr6+vxBCBz3GtJhwCAK1rQjIGdiClAel+0NgvRUYHKY5f/cOdlToO0xQWHGoJgADIB9wjcqdh7JBCRmQunBFRwAhVEcIUYFLVEQe2FtdQRAQmqPouHS1m7BrFadIdQmym7QPj42MJ/Ru+g6m1byjs8aT6LkMIj47czDjYI8H0mFoZcMa/uNTzc/mvVTzm8uArAmAIsJcaQjLAzy9f//4f/9Pf/cM//vZ3v8fzt9v2cr/tX7++/Pz5y7YX5qhgxKSqoiBakEyMeGcOQY1qrblsIuLdM2ZWq8N1OLBWOJ3OAJj3KlWtwatNMAkAALCua9uM7iiOuxUVMBQx1YLZiChEMsSonBKHFIhCKfL56ysAFBIGFrG/+Is/+/67b8BIRMhgCD2ftTfR7bH4M0n75DqzKXOCPSkJAH1zB1V7IHsQAB55Ax37OXNCy8k48hy2bkPA7nB2kTc2mvq1xuKg2zPeNPEYjpld1oPv/OvduxhnMzOEI4M30w8hIowsLo6H6uXTD8Rsw1PF41axAyw1XpwgDMc9zBw0fh2XOD7jz/VYSjdWdOKaQywBIGg11WqmOV+v1+v1/vXL608/fnq9b6ARgUrJVQUZzDTvRTEAqoEQWmRKDOfElyVGtESWCNaAp8jnJawLRiZ/VgKvPEI0MDsG2PS7YhwTgtF66MAQRwscAgDzoMk2apXQYogVauscMQEAYgBAVQJQM0JAQyTqilUNQqTAxIEDxsRmlnMWBTE4rfbhoruGHe/yusFePVJgAIjGSD64GWp1Oiml5MyDhF5eXv72P/7v/+7f/bsP33z7r/+H/9GDdwCgUslLF5xW/anhv+34r60XddoTQJ2rRs0E0B7fOXzFnkgUNIfdFgLEjkMLZoAKvTVslB/SSM2JcIxW6qz4yOH6RY+QdvthAIgQAAQBzAh6ygq6N4udd6aMzqEv/JO+C9vuvbWNj6jNhoheTzE4yHr+0FFGH5i6lTTm8ZnxRexDh7zyExndd8QQfBWqgYCha9NmCksdAYOj6g+1lyoRwSFZVLX0tmYAYGbVt3FTPy33tuaU0pA+MLnC48Mz8yM6Uou3Ox5Z1LZDBuDVMsBqFc0IIIUopUrZts2YYAlMjkoh2jfP783cGFarCEpoTECTpANQRjAg330v1jKfkNHjyoOR4IhSHRszv27P4gV+HXXz+K4BNCsHCQiM/H+1xreI3jRCSBamC6kaoAZi7GXuDTQBMRDFGENwo5pEOBAToIG6Re5XrFK9y5km5FwCS4FafT9x73SralJEKiICxxg93uX5Gd9GM0CfbGbO+TwYQLWqhrG8Axxp3/d56FyKsTWjS0Viim6DsKpUEQNQq8uy+K0qGKH54njeWFUVlWEq/cJe3CtCPbpDFABKr4FsRiGOINAUyDnU7VRCZt6/ER8aCKX9bBwYY1QQKsdS+yIGagBCeOg1a5wPLVUPZiYqqCB1GKzNeumC2PrvXhdLRP43xOPE4GVR0IPMfiFAr9tqrNcDz7M06eLugc4H27z56/jiHJoZrz1j08piyXwQ44jCBKTuIXZYYFEtVZgIMDISeALpaPECNWzeMrgEQ2Ds0jwSKmJKmJLFUKu09ldUBTAwlXpdL9/+2fcfv/n4FNi05orKjJFJVZ0XDDCgIwPDz58++ZhsRCTmgMFT6MuypJS01tttq7kw8xJjCuGH3//BDGOMKa2moAqo4s/GrQ4BQghIpgVrrS/7i4iczgszXy4XVfUg8Xa95ZxNKwE+X05rWnLe7ve7bZRScigg0QoizNH7pq2V9/OaOMZYA5fAudzYWZIlRAf7AQCLMToq/em8EKGIVisixbPoMB0uabjPE2tK1A6QwDfMMr44k+JBXcDjdUOtMERwDGGo1ZUcNwFvqOroF+QhGE6RAptDWEFr01Jovv6sLNWhNbp/6/jP7YnaZ8zAVNrNDAaZ1fasjPxFlTpez0/tOMkz5fufRhT1/WqMpZsX7Y1/+P61PTqrzPy2C90AAESMKHBIr/f7D7/9/d/+/T/88cdPgPz6cr/dbjnXWn0ETwCAnOv5fEZsuSZv3q9Vbrfb9bY3a4khhMCtwjzkXM2KLywiLukU2OstwNANXp99YmiEphR4LC8Q+tBLUenFSYAIBhUAEiMRCJrXvSIFBC7ZSi4Gsv2w317uXz6/XK93NPrm2w8BfTQLgE9e975B9LGrLTIxpPeR5ZsAjcfGHVsyefuzfTbTgx+ij8W96Aa8mZl1HXfod7PA3eH3fRpi2TMJANgjCC4Tj52lOYXYS1EM5vuB+U4ejzeEN45pER66HsaLN4zwSGkPWsNpfKbP2Zzz235zhofzeIxHm6I5+GVqGUSyGTvSOjQaAJRSvn79+uOPP/7hxz/+/POXUiyEaIRiKqqBW0zNyFPVFIjXGNZETyk+n9Lzsjwvy/M5PS18ipgiJiIECRyZgLwmu2tY6MY69SkdQ5+OuxubOEAfRkBhLC9OFq87bUcELaCqkgNuAaARBGD2OVktd2tGzKiqimQogSgSLimdFjgtcsuSq8rAj/CTh3YPpZQQgmNBExEThpDu9+uXL1/+4//+v3368f/9r//H/7vHRbyd5k/R1b/A8ZAefPf67a9zehB6lgh6GhzN2tA4EFQXDKCqNprIJp3VXL6G9nk0oI0dR0QAbQnhcQPWJstPRhYeKCcTK80RwzdaZrw4COkdo83UMlgPHv1MbKJH5oKCcSoiCm6sExEAOaiD/80bzJhZTQBArRIEIqhFvRNxeCxEJNMWDCPAh1yPOjH/6YWjkR6mG4/lEJGeheidVD0H6lUlVY9yUGx6uo6VMvfsoanAEZpDRFMUM6kGZAEJQojMDAhoKqq9EpeIDMRxBJAIyfq4ql6Bg4YNBQi9I19EQM0zEjUXswgjTToWpEMmqKq3VfYb65hErWXbhV2jjKNqZdARoueg2mtga86KleLalwSQW7IGAIGxeKLJl6FtBEBKAT3FasCEYKgmuewrg+ccqBUt8JKCV6kN+nMy8z1aT0utNWdDZzZ10I+KfUQCtGHrAGiqmpZYi3cKuW8KZkAYaq37fmdEwtZkMghs7LjTgCHU7FnHUGqNITjfimoWCdRILuesWpkTMoHWSFFVnR2kwxERo9aHMMwgRW0e3KF63lP4zGad02woPF9AL+SoKmmGcetlPO0SagRt0PxRh2kwMm6exB732e5HrbYOKL89m2/Pua/Zti391ZxA7P0kNj2R38YYWjg0t4GY5wGtIiIxDEjJvmg+Y8pbbCtwxG48UU/0uRE/W71DbM2iqz2dTwIkQHV0F2JspYJHcbyol2QQEiMYGXS0QIfudu8Tmh1jRBgQkIGUkIj52EFPziIomiHYmuhp5XUJjGC1mFWKEDkAWCm17plCXE+rG8qvtxtqm9xFROwPqKa1ChGlJKI1lxTCeV0jBzQwkXVd1+WMyKWUbdsrYAzDjBCrQjFETorEzLLLz59/+vwFP3z48PT0ZIiAGom37e6UIGXftlsKnCKbxi1nUCXKYIRAFIK3q0jNtVQzY8aYEHAFAGL4eP4YQsg5g4bI4XJaN4ZcNgMFRQp0Op2ISKQx4ExjE80fenFWMI2KTEFEkYGP8P+swJwZOsRROydzQADvLSQisgDgE1a81ZmrFhcRnj8SUu1E3TkL1O+EpiZ2R2vWahY9lQEGHmgZxtwIqownHaw6xPjsA8yPbFObPnQFRET7vg8WsKmnfbjQY7mGnNFp2u2xpPPA8skQP1yayYAAgMABe5VRvzqoV+8A3nP+3Q9/+Nu//4ff/PZ397yHkD5/edm2jSgghZAWZCEMpRRRRQJDEFOfy5xrue/3+616yQlzIiIA7K0ExStFETHFdVmWdV0BKG+CqNYyfv4IjMiNm9EDiEhEIqVWxT4HbwSUFEgACckUTbFWqfUYsXt/vZUsOdd9z/s9//Vf/9Wf/eq7p6eVfbaooXneqYtDnf0raPaZmTm+1Ruxj/3ziEff8vj1zRZ46o/6RCuDjsiErfBxPNe8a9JaJ2jQwOGU9uqy47sTJ46rm5n1AhY7ClabsJ0fZxw46Y4ujz2RAuMMx0lG1VXHW3pYQICRCp3vzTyuM2nz2ZFut4EP1xosMGsN+yXHFXsy1cwMBQBNCRENwYuBmEPler3ef/jt73/88dOWqynXqqreGq0iVUGYUVQAkRmXGM5rOKfwlMLzmj5eTk8pfFjTeaGFMYKCVICKS8NTBO00Y27cSmCHPzfr/aieKhoZwr41Pt0RugY3d/HehH4oAGIQERA1M2VwhHbxYCZ4MxeSSrMx1JxtPfiFCujj2sDQBE0YLDIykGozggCUrLWi1Vovl0spuyowt2IiM5SaP3369PPPP4MpIKMJAMfA8F8I6vLf5/BL9SwfYk8Pep4dWgdsl+rQOFqhE4n/pDHry8BxFsgdyP5JBrThv4zNsZZIHOZff9t4aqCz40LNIWzhKKdn66DTjyGaB1If15pcvrlXEPpOzWgg8OhAThw9Umg6Zj7REQ5wYxtDv56aZ1g7OmXy0EWM1dqbxA8jdMcDWGtCPdoA5scbZG2TB2y9hKFtr6rPcnBsYiIijuPzzOyY2tAEhO8qMR2PTUfbkjFgD1oQdKRjVUUAFbFaEPG0rMsSUyDTUkRqKRM9eb+vuik5wmM9HueKoTv9cKgr14IpLcB9eKunZ/o0yf7JWcG7hCePnjm2kZriVGgBAEYIchgHbm0y92AAuCeJrRkTURx9gQP3g1rJa8v9KSg6YlUtHUvGai01l2y7L2YIDSuImb3Z0O+lu3l9zB0zgaEF6Y6qWYvCqqpIIcaUFmbWSqUUJQZ2e2hIfiSiZz55k3cIBBa87g4RFcE6SGAb9WZ127bIobWbJouRiUhEpFZK7Mnqfd8Rzee5iUJKqdY6umEFTB2JvLMZAiwhqmqRByXExMhM07iqN8Q8qH3wXlsiAxHBwJqLG0nBW8MRxTyjPnzmRsOBuE5JjHFvzCy5tHDBKFJF85rbXzza4r4L3Q1and/xrJr+0uPMr8eawGQY+R1at5uxNwxrBxaGrv7nr49VcuOGkEBtrhYxMzgWWUDaPUQOXicMZmgGQQEcxwkAABWhQ2g0VtW2IyKCgtVARXJpRTidJZHMXJb+2fcfL5eVSbVuYBXJUL29ueFVPp/Oz5cnRH69Xe+v18u6llISBxEREKnZumsdOSgUNEkhnk4nEy17/vX338W4xBhVYd+JAJVbfK0Vb7Rec0xpBYDT0+nlH16+fv0aiD0PaSaWaF3XdVlU636/3l6vMdCHp+cPHz7A9dZWXjGEECgxkZlSTP7EptVEwYfZMJlK4IQxqcrtdjUTJAghKAAAxZTO53MIlIsAIAfWd6WSYzdh8mHGvqu3DqjXAhxQW0N0H9RFw2NB88mintaf8jB+hBDEQLSqh9UVpCfYRaTWUoq7jr24eaCDtLpBebyBhmcDDAQBABAYUFw4h8eRpLNGw24H4+TRSS81f8+PNIFCj4CXm3EHzU+dDkPgjMUEgNpg+uHNJbyM4oF9nLu7BdnjKVxNpdp6vmz3/Pnz53/659/8829++PnriwEyK3AQQIeg23JmjutpTaeViJjRzKrkEGhZFpFyv9/hVolCCCnGSEhVcsuimzW4cyMEjnHxkU5Sms0wzBdq81RleMvQcxTbti1pLVXUhIEjR8fsAaOSBRGtVCggeZc+9hMCpXRG4D/84UePGwakGH99ogAAgIYGwB4+c/f1cMAeqfGtn/MgBol6PLqlX2fh7x9uhXP0Tgj7xjFTbVBJXgVIrdm+p/6IoE+pbjE1AOzm4GC3WTLPF+GUQNW6qwyd8ZjZ7wenutyZwGaKRQCZoMvHtRomyiPewdALhEfUAzrExb7vzDiffNhs0z3YWPC+nEcaDSgAkXdnWXfj/RM+KNz1R38GsVb6R2LGKQWM27b99NOnl5crIle1WrIqqKlaraUoVg4IaoQQCGOwJfEa6ZT4FCFojshLwIUpRUg05heIYxYZkRkwYreSGwt3jyIgmoh4oc8bZn+zCzb52ENMNQGjVr3hxsDI0BDaJAMiNI5spSYgI4Yq/nUOAQGqmCHvYikExDYYbUmxqPcMlgZa2uHWnFT2Xfd9NwvMHFoRH15fv/7wm//09ccfP/z6z4Ad+0eR/qQp8t/1GNpn5L71Xd77/8QzdeJk7AoCDQBZvQjBUNW9NAYExJpzs5A9czggox5ZT6cyxjd8BFPQZJQuEbfYkD6OgYURYAohvIsJmlmpLQE2ugmGKpkv15Xsg3PYU9BlNt4AYKCuqGpw4F1ElCpVbXC+jymvWVNKalhKiemEiKPkdL52rZWYvXQKADowjLiUH2YidJORmYex6M/stj4R5byJSCkQUvQpFN5c7mBH4wwiYhVTSqXsbuOKFm/QckfIb8PLW5goMLtUXdcktWGwtGVCcu8EVAIRxwgAPibLn8I90n1vF8LWdqvbtnFH+0HEWmvDaKEmxB3bIHHkELT1lXGMaduze925apu/1AjZ3O5FRGQyPAjCm6aYWYp0VXpQnnmZGUaPBjUXmllUJWeiNgzQzLRWZIwxRg732zXGuKZl3+/7vvuKrdRwPk+nU0AqtdxrdT/T8f38iiNnm4FNq6CJiamCUQwhLMnMlhT3fX95+XJ7fUkpPT2fn57Pm2hKraxOxOtcTKqs6yoipeh+30IIKTLEqKpIdL/fRcQtnoZOTpBrcbyZfd9LIcdOjHGxei+luHFQyv76+urDwbu3c/gwRGwG6huXmJBqbeiOHnZyohUAN6SNsFHCNJbdsyu+uSml0s3BnHOgEELYJVMMLcv7WEzii+k3U0QGR0DHqt62LaXkbzLziOtA95eGUJjUBkGv1jNHNu8ZZqlVwCIGt9UGJyL2YSePpo/fpPRp9Q57bT3nP7cCDhN/vp9hHiGiz9Kw7hY6Ty3LknMWgICeJirkZf/MAEAGaVlEa4yx1szMqKalZsgpBaJQ84YGMcQQgkkFZFVNS/QijzWGV6IQwrquIfLLNZdSliUGJAFKiQBul8vp9fWmUpe03LciRf7sz3+1RPj4fLrevmaADx+flhBi4vv9FjnkbX96ejqfT7fXV1NUhCWFyGFNS605rmldV4cyX9c1cZCy5/1uoo6aCWaJgxGeUowx1qIUQlixcMk5L2nxJDwz1rwXhHS5xBiXlfY//3UKBFqvL189wFEsPz1fzuezllz2O6iUPb/CS4zxvF62bfMpntxkr4QQa9kDgZS673eRE8AiWs3ssl5E5OXl9Xw+xxjVhIxqlcvlct93RNu2OxGZSYxRpAFI6rvklb85T8MDLwmhIOWYUPL6+rp+ix6vcTORiCS3vmK3zkMIiLmUAkYKWEqFNiAAh6HpROWi0qTdRg8pwrIsOVnJpKql5FKStubn3QRyzgbCgQYxS4UUo5hVqdZqB8xbCeaSzsEdQy/QNMXBF8SDSm/0o0vUuRhshOpsApiZPYoY41jn4TvZu6DMOOGbDOQwLEAO4DdErGZSTURuP30G5n/8p3/+//6v/9sPf/jjtmeggFX3DZfUZp9+/PjRZ1QCwOm0+ECI+3Z9eXnxoujLhU7pmYhqzd6IXqt6e3nOmZkRHNwqiEgpEuNCrKK653y7381sWRYkElXNDp7VEAT80ZZ13bZ9iQkg+I6jp1k4xMhlz/tWkGxYCGeD5+fTvuV830op220HoCUk5viX/+rPiBCIAQ3VgNssxPdRbF/qKnnQG0w+WFwWULXJAkMiIOKU3PgiZk99el+X7Ptxfn/fUQZ6vE9VUY6YIIyUgUgHVjmc0mGPjhgccUMO93ieX4gBtRSABjDWRL3bNiqOg+rY361s1RoLH49fm1LzC+WcEXmE+XyPOshiW6Xxonab1W/GO+f9PMPtB9eqIhS4oRswtbYDwpBSyRmJsDXbsAEgGKgAExz4pS2HYR6XGS742E1ECjHVClVeX19fX2+1KFOIEUupiFCqbPtNQUJARKy1EIYUKDCkYCvzZYlPp/R8Wp7X5bJ4WZVYEQ2IJGZK0ZtmKxEtrZ/FVKtI63ZJfYYKIhLxwNoYOOrae6+ayu5FpGP1VNVExVrZC5FH/6GaIUHkmBKKNUA1ADWAUmqpDsRFzTzedkBKIaZUI4fQy2zLvlMIMUbqVWwMFmP0cA9R0JprrSklqeqG6Jcvn//u7/7OU2b1voXT2Yej/Qt5hD3kMt7Q+UXfdXXvvGUFzZAOkCTnLfAaSOT+XU8dCigAChCBKpi5d4TaSqoGXzCgVxObqoGpiKsqmOIyIsLdlBqxJVXwljoaBZVitUoIAdKCIo5yIR1FzIVnmFD9B2Fgm/H74DpOC3W0Mwzz1ee99YYaHTGa5g5YFZXB+IgYXNcQAZEyHopqeJ+ghhSGTmJuwG5DVIUQkrWpsoheFSWqWj2CG+MsMqDHe7wJcn4SFxBztgHGZnZ1qKruPDNFIxQRn2isBgwMPTlADpRSfTbfwWkhBGm9jp4yRnPZCgHVR7UD9uGUQKRwADrDZHYDckgxxggdZJ8pMHlbkRrg2MXZLEb0lRvICr3sxNenQd8DAIx8TTXxKQiKGA1VFX3q6UQEvrCl1hRjCGQWrbfmA8Dz5VJrdTQf7PWFjARoa1qYMedtv98BIKUQKLDYui4xBSJCaAmZbBsiSsllVzcNI3vtgUrd1xQi4wa5lGIGJuWW8/PzcwhhXdPpHPd9zznf79evXz8/f/MtIjJa6qF3qWZaA5NyEK5FrdYKal7nat0OeKD7at5VRX2p21Bjs/MpbDnf932J0QH2iRzCNDXjTxjxwBF13nL9bm1Omg3KbCvcO0vNJJcMADFGNnVDpNb6JjzmW93sG+n1A9iYZRjTaOYzJJpB+Ti1yaM77nkS0W7q3QB+2574VtWe0H57NIYdKXlnImyNO48kbdacyOOR359whHUGO89SSfsxCH58wB4PnQ5fWwRoRZ7mAtpUq2Jw1FwHPW6nAqOODWhmpj1cTXi73ZoohAOXQlUddKE9gkCBqkYijm7VwOtNCpmlhZdEpzXUsoHq+fl8XpJPgCC0KjlEcg9w123fdiI6peXD5XK/35loXVYVvV9vgHo+fQSwsmdQu6Q1Rm9RocRBUU9M5/Vki20bbdu+oy68iojFtrAighADYwikUtbIl2XdS67bDqrLsgSiumc46bIsz5enfb+XUkLgy+UyS2YAyNtuZsxBwdZ1Xde11vr188/b7frNN9/86le/ckdiifG8rjticVi5Xtaec962beyjGzSDooZCMoOhHYZP0ij5gOEw63NxAMC9ixBCCBqCgzSJ+nAIE6/8QfzlYSqqrZIQwKuXkcwAdDQE+hqqghNHP4AAs9gosEE0AGWOTKyihtjzIn4VU4Uxfu0NMQ9DYShgeNTfgzWGbp7197R0x5ln1hu/jo+9Z8yZ8T1eNt4Zp40xjVs1B10yMCQM+I//9Jv/8H/8xx/+8Pttz2JYS922LdfkPdgpJccicvt+33c369UqtXJsG5Me+5orAARORYv3aJhpB7JrrsLwct3q9fOLiFdwzKTlj+NZ8VKK1OoilAk9d+EotpKl5iIihEGq3W4bABGoiKDBp0+f/+5v/+H19VXrv/nu+w8fvv0IAcEEtKKhWKtKGMt1rC0dyz6vtslhP7UPqCLAnLOynoIDVaJZODceADjkp5lNIBRARtDzD2Ot8E/PJTsojR5810F4IwsHANDqlbyf8rCyZ16mqe2NiBz0wgOYg7VV1dHphy56//X5tNbM2alye7o09USxG6zWG9fHs7eUoyoi+p/ArRjsL9vDqZkpHg0+QNAgOrtcLVKzaC62lariBiR5Vo8YQInNGIEJFsZT4vPC5xSWiAsDm5EqmAAjEcXEDFbcMSBigg6XgDQNL5UpCoyIrrOGhBx2NR0VcIeQAQBEJkAjQjJvcRqrF7yMDTzTZWho7yBkfcCyF1yI6L7vefPEBkQmNdsIHK4NzJCaX0oGPprIGgwyiYiimsL99rqk9fPPP/3N//Ef/pfvvg+r52laK+4vkuh/l6O3Ao6f2t+R6f1xuP4yAAMcH3BmNlTvLGvGj0OMqUhDdjlyRQZTkfObK1Cr8gV43C8OwdqwpUmUhYiefpzkc601wDEjdLBM81MmcLKZoXxk8Uwnbkm5fBgqYAiuqfcKZsKrtbqS9csNrgwi5lkRVRVtnpiqAjS/AhAJlFq1j7d/6JQZM6/uGDnT+Rm6THmoxfcL+5AimSArutRz7dWc2sEz4zm9jCdwrKaeOIKuDlWV6fBm/UKqimooyMwBQnEEHRv1BoiIAUlATEFBrLQ0iylKtTbxdOw6APQ98xpuczgiJiBU82L2jpQDR98U9bL7sSzkxesDD8/veRLxbSO61Wut84QNwfDoBziIDNVRggCAmc/n87Iskl/8A8ycQmDGQEyMIpJSMLO8baXs67peTmciOi90Pp898WUNN1z2fY8xlrJ7fIuIDOTL19f7/X5+/p5PJ2YiBjZkigBAub58+blNpwy4rul8Xn1Q2F6qqUNAGRMRBSUtiOf1lEJMMeacS5aDl2IcgUkdGdpatRqoGhFimyPfYU1IFUSUyFLL4HfoP58f6coT2bG/OITAY0JDq80wM30LBmBjYf3D1GfVq/aBo51CrFvkPCp8qOvmqVzzIGyzMWWeeves10a3X6GhvwxyQtc8NhuaDxKrnblb1NhEAoDaoDRql+sU3vSq9Zyi5y7mRUBvtHLqHoQKAH6bj6mOg2Pec/EQFNTrYYgIW3TP5g8cbEdGBGJHVgQRgdDaFNn++N4YxuSuQCmFm7xjAFAxcVwxs1IKEUTGUvbA9s2H58u6Epdctpjo+XxCsroXj1Lv+346nda0gImUut1uy7J+/PgxMWUwZkLQ7X6tZT+fz46AnvedANK6BKTQSBTZ6iWEyxpDCFsILwiJwIDu9zuTmVmpAmDexxsDaZHn0xrAvr5cr/ddc8kGWiWFqFUw8rIsAFqyEXGMkTBEDu5fu7gupRAV5lAwI6JYM7b2/S5Saq2+n56SMrPSsQoQec/1ft+s+T/gRYNjT2erbt4snTJmg3bmLRpnkG5ew+RNddHteetBCdJPL6rspeuIqFoZGNEYfSDlQyoPejcyoLrQvkuZ6da5kjAWMyTCHl4xkBkLbuKAQ3nDhABBPcB/GN/984fnMC3FrNrGarxfzHm5fvHAR2Ni0pI9CNILAQDAkXMAARRq0b//+7//27//x5fXWxGoatteXl9vcSGPxTGzFwWMJy2liBZ35GqVnO+11oiLiIRIRCHnm2f+AeB+v1tv+GSOIQR32MYqDetEe9LJfUvoNopvJSdgwBnDXIkAi/kkQdV9zzlvAIBMlHeksG0bAIAoAb58vYLAy5fXmstf/dVf/tW//ldPT+uyxrgmYKJS4F2pW1/tt2s+TPZ5p5qcfDdpve2Cqof/YeIOBHB8tV/e1u7lDIbxb80O4RtaguY1PeC19JuZKg/tQDIAAJ4uAYfVB/DIyHxoHEZmzHmcf4zKnR/crHVtDV02EeThOs4L9SZW4jfT/VhCRKaGigAAHRNo8gUnUJnpWq4dCIrjfyColq3mXPNec7aSqxoPn1NREdydE0AK2KdNnNI58hqICQNZICCkBiRoZAxQMvXhmX2ZaLC8tkmnx+IwYNdH6EN9aAx8gjbwyaYtNh+96IWoE8ZHlxsGgAaGgEjqrr5OQxfMTMWnEYYijXQjc4yRs0KtjiPo1a8D7RvRwMBjf+xgziLMzDGUfH9+jj//+OO///f//n/4n/7N86/+HKQAxTexu18IUf9fOZoW6AlAn47V3oHx2kAauGgn6umL40wy31zbSTPQBnpCjyg0o93XP9fexF8Am4FutA/mGScxUwoRfMIcIhMStylxRAAHvMhDE1z/rk06EQPzrFvHn2Y2tOPwbnnnuzDrGmZ2U2uEJFwdh0mFm5qCUa1aq1auqgBGgKA9FNRF+YPuqbXu+76s6yEf3YEhQ8Tci9Om1TEzSym12c2us7nprVLe1tTCBOYzVIj1Tjzv8RgiYDBhIDYv1DRQPKJ3kYKb180QdhDGFjhz+CBDBG/eVzVmBEeSARMfhUw+6AIVTM0B69G75GstKa4jSz3Lvv4KBz/3W3ioPFZT7UOB2mZzGxNvjXytUSZin3niyR9lZuS387j3fVetIYTz+fx0PiOaVqlS2DWIiEUmOJ8v64ePT6fTaeFWi0VEnkoF8JqHjIgxxrSE0VcjIvftWupuZu7FhRCJgpo9PZ2PTdQKFFJKxPhn5w+edtj33cW9VGMsIBqQOC1LiDVVkWYsovfdAXguzolhtKmYD96JcRQ01rqFEFtsW0G2wgFDCP5x4kAcqYPrAHi22XF52toiGRoRGLFDmxiYDiDp0+m03+77vhshPz15YDt3NWlm2s3ctkR9RN6sJKz5fvym8ycgSYocg5ZiI7Q5FCQa9Ds1BNMez3inYg+W6RHWcXvjY+08XmKjk3yZZ69ZG2o/IiyNSlUHvsh8wjfyaL4rfmzCPKQeAkBHeOiCewbOaBTOzX/OOdeTRIiOgebyx1XXYBmjg79UAI2YA3NAoB5QhBjj7XZri2/1vC7ffPt8XqOWewjhw4entIT7/ebnzDlLycuHZyLYb1vZ7wR6ivHD+bxte4qRiGotoPLx+el8PkvJjKS1pBAiAqKdlnQ5nWqt0SChraZr5AUS1BhAjNg0c/HmfiGkNfESQ2AixXA6Pa1LJA50zVVqrbdt//j8oda6b2ggIaQUYoiOX2EprYgl5yxijAQcVPV2u+WcQ2BnDdV6v92+fvmSlrOqllKktngHYiN2IjKrIoIcCLWIIj3gbU6HDHN5Vk5mZmieG4BJe0G3/kc3nSsnEfGNPuwSRYdr8Iilxx9VPe3eUDoQkbk3PAOiQYMRNlCrtVYpVauQOREWaB0fFaDDEWsl5jYBoFcZuVWjE6jG4AJ4hJmZdZPNRsNM550B5xfDWHzzrTenGi8AoE9zfcsgw/R8o3FqqUPlAyIYmWlV++NPP/7TP//mer3Gda1brqUW0RbJHrasFuhdZIBWasMVq7Vu9+x7t0YfWH8G1G3LMfK6fkgJAV7NLMawLKdlWdbl7NEZAaum/tPUyNo4kK1k9/qcNwV6OeW+u7oh8ulXbXiVP6zbHiri41v2vaTYktiolmIEMalaq/7TP/72drt9+vTpw8enP//z7//VX/7FcjmZtWlGb6UoouohUua/DhfCQ+4078F0huO7AyYV6HGX7fj2lCHsYblGNkOQzhQ17mqW5zNzYZ8U/T7BPn9+PuEwOXRKcbs86Sc3nP5USvFAHPOhxdpf33kDbyh2pufRiGhm8+12hjp+d6eu93219Qc6YD2xWergiA9t6oAaGAPY9fX+9evX19fbtuV9V/BxxBwYoQ1PclBxVTQlhBhojeEUQwyYAkVUQgqMXligqjkLIkZ2wIXAXqnSkwFvHtOmIAh3WFF4FCZviAea191ftyy7r7D3WLMvuRoEgOLfJQOBVmmLTIgDiyswpATrSqluvBVVdaMOzBCNgXrVhY8DUK8WIQ7aM/wxxhSZmbdt++1v/tOXnz89f/8rMAJ8t+X/vY9OHjYNk2joHv56/iQ+vvNwzBZIKxwV9IwOAIKAEniVinVeegzYmfWBMS1OTTimyhHVfWu/8lFaYp71mdPpntgBEMk22YTj3rxFzh6rQKFn/IZYaFEzptlleK+Dhqoa7yOZ9kTgKNQXkRDjAlbVqGrVekg0r4pOKaGRmJoBGAF4ffOBacYcUkpVFm9uaXTvpedkbmqY2awRtaNNdKMZR0R5/In4QfZ5JtAZr5Tq7SUYGBFFjgb9Wisg+Rkcu5/h6I9CAFPlCB7RM2MPJXikq4ejgCgwh8GKyEQKiGiKPmNewQypyG4taoACpkBiWMSYBLFtWysLFnmD4nUonikeDAYCpqqeA6FeU8rMgZoEUdUwA8sCoAkh6QT7zpHR1My27bbv94AYQvA+utNpUdVd71btfFpNpICelhVPFkJgwMQBUbdt83QcAIy+NfcSDcRfxxifni6/+tX3e/aySQUABfKEQ6v7JQSAUsq+b/eXmz/UvmXxZkGBEEJMa4gQiEQqKAIAAS0xWfBxxnWfQiZDkpqZZ5jh6HloxUhMngqI3rQjUFFQEbxJksghSckUVUCqQer8Yz2LYd6eNwKcD0p1EDCA5eydMzMIRMtWzQGUkbLWnuecOZOmtp+x74OrsXlNOJRBNVVVEVAVMHkfXvVDW4oTiWgodTUtetj0ND33LxoQPSZC2BszDke3PuCCqJqKiSjEBxSscWPzcw3Z5JLR+ophc559qrIBAIGNqEd/zOa0izVcVoEj0dSNNgeStiK1w8yM+Lr3PogPaNJalRjRzufT89MaCDEsPjCwlKyqp9NScv78+dPldPY+xrzfVcrzaf3mw9PltJABu+ZmRsQYOYSw3++msnBgYtMaOTxfTk/ny/V6vXiuWCtViQSnxKBczZ5O8QoVqiKSAC8pxIgx0jmeOYZaRUolwFzlet+lXm+327quGEBMmTktiQkc+yaEoIA511LKupxjjLXKly9fYfEBEqKquWyquizLn58/IKIW8+JSiuShB1NkJm+KdmFu1eY6lvmn24V2jEVigD4bF3oPniHiQQbz0TSkUe9WACd5J+ROz03Ie29BT/UjI3Lj6jkyaj7RjYBjm8DrYWANAbA3gXvIWVVrEaKkAEdYF9Xpug2t6XGQ9zf/htpHSdig9mEFvmGKN2sIU8BlXGvW6PPrcZJxrdleGSJlvCki1RSREFgNapXf/fCH3//+93uuy2kVhaICAGk91VxUqaXTtfYMM93v+7Zt7puVLKUUAGDmbds91aRi+76rRjNzawYRQ0gppcDJbY6cy17zoCKX8CEE7zH2yJpL+GEPwTSq28yKiYiQoeTSAhneXs5sAKXWfd/NEgCgoyzKaoYIpAovX+/3+z+HH/jz58+q+pd/+ZdpCTKhwo4FHz/fWGnzn+ZUHkw91QclNLE2wvkuoOYKf3z4783RO0rmMMR8D28u9KhT2qjrGfoC2j0bjXem4w3NeAEIAUhtjjeAUBtZ3EAcEGjwO3XMVX+ewS/DNRqi2964sj1F40s0FGIP+CIigknHagXyQOS4eU974uTV9/HupurhfQDb9/3Lly8///z5er3mnKUyMIISojGzoA+gBiIgxUCwxLCmuC5xCZgCMUIg9L47AgSEplwBA7d/3S6m2SFERO4lP9b6xEDVEKHFwaAZ291IOCSqP01tULyKMNzB3hJMRkAiggQePxmYeV64hugDz9rAc2BKKaUKQNlGRV4RRGRA9dG32sT1sizSh5f0YJOJyOUU9u3mU0NLKSACITal/AYXDgBA/8XqSPXNa+uhXujTwv5PDptSgfOLiZX8Y23VJ2UHakTUqsTn5OEUHPRvt3ZPMK3VpHs3RJGamYRzjGmKpaq+fQRrdtcxphVHaTQ81KeMk+AUVHr4E5n0w0/u0XNmDMysbKRGFIgqAPoWIhJzgqQmaqKzfwJgI0oXY1qWxUBw1JebYisGP2AG57UVEaGW3mFmouDFVK0oxa383uqNIyXYrY1SqogwKRkhYtUiUojIrQLq2PQqqqrQpgsCd2fVf47REYgE5IgtAZqoJTACIiYX/d4YSQoGYGLKregDFAGQFd1VhnEPs16Zxe5MaW90j5/SHiNk41kcVtG/0op/QHQ0M6gi4rquqrUUIVrWZU0pxRhDCPvtC4CGEBhb4U0pxUkTVGutCBow1D1/zXutmZFKKaK1S/kKAMy8romZidsy5rz7E4VIXk2wLCcz27ZcVWNMLkq8wAJaraPFGL98efXnOp3TupzXZUFgEUHkUiTnbKUJNYYQkPZt8/nyJgpqXqaCBiEG6PESDzq4CjmvS8misi9rdIgLkTYO3vG9mkruSQgz+8X41rx3Lg78GIESqCXn7EAgp9Ppmjd/LvvTyhsAFA8uoAkmF0ZUHkx9PrqZgIEPLw+h5PxwKkICMrAWK3x38ypqHb3Wv2YDZHk4ZoTqs3Gn9OBxBThoeAgpv2F7E+V9fNjZOoFHk2VIwOPXiTuaVIUuGXvgfLRkqGpYEjM3vCURHzDvU0DH1QE8NjpuBkUEKVDwBT+eyLSa2prC5ZxOSwK0hS+qmsu+bVtiYuYvt9eXl5fvvvkW1RA0MdHp/OHp+ePzU2LC02JaVXVJKUTa973sOyNUkSVFRLQqS0zP6/m8nkDrMwgAMFEAAUQLZIlykbjEsu9ClZkVjAhioKfL6QMIEO9bqevCgLtA4MQcXl5f7dtvY4woZuZzcwBJsfp0lsiAYsSIKaWU8OPHjyKy37ey57QuBpZz/vz5869/9RcxLESOpKopLEigKi6AUlqXdOobF0QEHuvEBpvQ1Bowqx/v6iYi7Am4mULG16d35rimB+mOHEWPiFeaUsrmBhCqVlGvCDWptaIFbo3EadvVVEspbXpzj9dAc3sUQBXQtCvyNtWts+079ezBqUG6R2p0YoSDGqeIz/ynmf3xnTc4s8/47vuvQ4vM8lj28XUzQ2jwV1WFOYQ+P/p6ve5FzGzL+74XFctVSA21MsYUaFlSCGHbpGg1Zcm71WKAkkVrJYDAKYSwW24CqhR/QRSWJTqOrneK9j5DuN1uW8ldtQWPmqo28JQeeVciE3FfAr3yG8xG6M1MY4y5lrpn9yd5QvfZ8r2dH9AUa9Gc88YhRk4hqsjL/vV+v4cQTqfLv/pXf/GmNW8s9RyShndi3KzLzLkfbxJxY4Pe7bKBw1qIwDE1drqudbLrX7R3ztsv3libLfRIIg+Zh/7zOFvXgLMp0vmieXTTVRCnsCwAMAWewCb8Sef448NzdRi8ccX5Z7+WjiaLfnfH+A3s4KUHo7X/VHHeoGnxESiuoGAit9vtfr/XIkQBQ5DasNabajMjtoAUGJcU1hTXFJcUY6DAwATMTF6nDgxkiBRiiDEGzMMHGOiwHocdkeLpAY/AwYgRz1tsvcDNev7Hm7CRRtAa0IA6oCCBukWPBA1+FczHqKm27LfH5auoESOQTlPNlmXZt3tRMTNDUmtmLTEPdAzsTSJ+h5L3W63ffverhpfTRmLRv6TvB9ApEKZ6UADwfqDpU+p/H8ri+NeaBhtp9I+7s21oUGtBRGrNhz15i62b983NvBHUZuYIVe6cubyFR7BoP5vr2SETzIxDy87NVhwi3u/3N3bdLASGTdUZ/Lir4SvCL4ULAcCXgucEpplqM59CK1EwAtDDNjZiZpxYGhER2dQp9UHhtbyHWUw87lV7Vae3poxn08eaokNvURM3tdp42vFIMLnR1HtwhyEyuNETmE1IIamqdNhlmoxFVeeaipiIGaRl8vwDjkZA1voSzUFd2sN2uUmkCETBzBzsA4AoxIgUEH24JxERtApdYvYc2tjOacMAujRvyUwkaKih7WHFHB1ZAUCtlU0e7TetZv3oPg8p+tynlJKWV8cdqbWWIlKqmhDgtm3nNaUQRUviABwcjrbW4pICW+lp8sK8z58/L8uyrAPfpVFSjGldYlpOKSUzXNdCFDhGD/oWqY68IlK8KeWv/8rPMLyCprPJKGBlwAy5ZAcoasUYzmBNcI9kchUpVVWNxxYgInnYY/AAEamaxwVMSWVYS0RtWvehMt8zz1Dopkot+90WGUwHt4/w/0zVfvifPHqyxBRCGPQ0vjJiiqptGtugRoBWAGkACqaIQD5OiFxHU6Xposedv0nFOAuQgc+Jht4hSURIpqACNosf7CHwgGkEj1VhtDB4UAq7oYCIRK3cdDDvvIY4MeD84C0AhBgG0HtT9DhWxvPS42x9AoXpSLwjhOgt+w14iTrgsmd4VAFEQHvjtTbH3oxCCKdTujydljWZCWopNas1xIv7/X69Xp+fn8/ns1plXr777jup9bSspzWpqome11ORWkqRksu+mdn5fAazGCOqUIrffHhaTykwPp0vT9rQIA1BAWhNSHajIqrEEAQwcKnVpIRAHz48PWtB5LIoERHesRREjjGpKgPWWlXBGmyShRCoFSaEdV3NdhEBsXRa/uIv/uLl5eWnn35yFJmYuIq8vr7+/OnLt99+y8yI7NEfcEw8IkT0AvFapFZhSrWPbJ0F77w1NoEGtV9R6Z2hMOxU7KaSerzHv2KiWkWC56mwEwJMEQdRJQLVwF5urCYOBqzqpaGtp98M1EC09ej6uMpHk33kLQkJaYyH8h7eDkb3LgIyLLb5fZz4AQ7bGubXs9ZTbZUdb958L0zGck3K6CHf8v4OH3ifyDPje8mvr7fPX6+/+8Pvt20TgP12u912CKHWSiFFMDP1fyK1lFxKQWyE0YW5EDZn4HQ6xRiJsNZ6Wi8hEjOntD4/f6y1ruvKFLe8AYAZbNtW9BdQW/0RmHld11GC6FTEoXUoiFb3uUIIXh5SmQIED31aB4wtpQTaiQL0Wce16E57rbgu6ZQWEcnb/unTp8+fP3/33XfLKbzZ1vESZnjPkfJibmm3R9+7S8g3LXww5//am/5PZkO2UYh/aHYI/9SeHn/qy0hTg5D1GusHkutJjFkg+xdxspJ78B0AwFQDjVJ/GgIZm0PY/PDZgfnPUOB80YNHTOcPQLfBnAaGiWXuUro2B3cFJi7+pRAMtkJ1Aq2lFEe2s9nyVDQDZRMR0UomxBQCxcjLEtMSorceWGVk8ngpKlAgYiBjDhwTSgtGj8Uhagk7AJiRRbiVkISxVtgToU7A49mlexfYMGMa3fSlIwCoeiTEWh71QUoYQK+4AgEkEXETwPkrhMQs5K3mtZRSkIM3W4pIFQEg6PB6RDhwR1QLEpdSrl9fEA0YYUIOgzcU/9/leCwVQRie4aMr2Cjhl/llHBND6Zs3rQfEZ0Idn3//zvh1Pjxjoao4uWee0Tk8QyAihiaybL7EYKU5bfifeZZO7ccn+R1sMkyqZNxnA/iYxhL6qcL8aaKD55m5IfFVUUCGZrCqaggcQojMTiU+L0jN1lMCN8FbiQIwM7RWtMnAtUPUOgCmqkJ8i7c7HtUFwfTwPVFJ1HowZohXJREBtdPpZGZaqqrStJdERIDyaLCqKgIDqgpUETNhC4iIQDC1zECb+YdHq4n5WE/RDvgBE0zObBgNKTT9/IWdRkQ+0FydAz3A2ypbvFZn3JI/PlFrMzudTufzmYiu1+unn38UkW+fL16Dh15eWytxE75PT08EmMt2WU8phbaYak9PT8uy1AY4garVhwgRkVrtuDLoIvunnz5dLpfAOGAbQwg+ZsPMGCksIUautfUlDvyAnEszZ6tItW1zmG8WEamiqoSBiE5pcRPEvaBRJnqsgGLD32wBhToutN0diYQCOtJd6zdDxFqrmCJArdWc8Drgp8MLBjpktBlYj6sRsYjUXNjUZWsfoXH0Dc4CgplLKY7tS0QxxtHbPZjO6Zk6AMAIu1q3DqtpKQVMcJoDORrvfvFomqlFGrpOIm4zi8EUWqyjT8RAxda8h5PaZmpzaAYx+77P2OLDenO3fLDqvA6z/zC+ZWbZBzG3qDMONg5t9KhPXFTuNau9fZQ9RwrYQqoOKWcGXhhv0zzr1r1j7hUHAyDiWisHRIoxhtPptCxLCMSIt+vNUbZPpxOZfvr0SUT++q//ek0xb1sI4dtvvi05o0EkLqI55++++67U/Y9/fPV0cYwxxWiqkdCMUkqXyyVyAIDTsq5F0rqEEHKtVcULgUQ1iwWEQkSMuZhIJaLL5XLa75wiWCCORSBXKQQp8a9+9SsDuF5viJBO0QxUhYhMYN9LShhjzLnebjdTCEta1/Wbb74BgJ9//lRKqZI9cvTTTz91m54AqFXhVqUAIJKwAfOIaOCDKuxRfgIc9Pxmf83e9hS51vTIXQiBqAvMnpTwDUWu6PPD/WTcghHeTAMmjrg7ioEH6XLoXTpmbgiG5DVvwMyyebDJRuKRmTCQiM2xGGiFxw9j6HBKpMgEoD/CVYiIb8CoHvUdTDHNIbJwcpBGxsAmh/n9MWvSmdfGOYcAIeSWPKyQc/ny9fU3v/nhN7//49/8zd/8/PPPYT1vW7nnfaFgyMuyyD17WbvLydvttu87c6i15r2aIXNFxCW1tjGvQymleLIuhJap+/DhQ845peT9ng5AKiIceAhhmLS5qvqQGPcJVdUtePJ8JZFZcintas6/q6qntCCid5gTkWrNmAGqhSjrqHBGohBCXNdzTJzzpgLX6/3r16/fPD2POxmHfwlHYm1aYSDCR4sQftH86sBdOkUnhwVwRIKR4KiMcDqm6Sx/0tUfMvaB9uztAQe6WIOFxZ7ht9505w9FflcdzYXIw/Q0ispwxGKmsUM2IUi5iTlc4uN5j9cPXZR++NzOwRHQg90HJx7xI53jCNitMK8goT7f+WAcIiSEW7Za932/3+/btuWcS5EixnxSO+6/SgUpEOAST0QUmKNHTk2O9magQLFVTplVVaw1TFJrfuQmJayNjR9sDj0dNBxCX0yHIRh69uALsUY1XYw2RICB6t/TOB7xYWZtMxQMkRUBwdGXlB1ZMKVlWUNoY8aI0arknDlCjNEjCVUkxoWIyEA6rqHf+XoKFOO2bZ8+/dQwHWqllOBf+OiPf2QIBwDPf915OsjTAVyACISuRzxIoFMV6APnAjQ8+gcPBdtp3S6VNs4Ae1gHrCH0RG4xjnEzZqbd//d3xjywGTV68PKbi85vvhNfD3phVjFOY26rzFzm3w0v+RY5VMhVNkRInBgFyYoBIlZkYyAiw4K4n9a6YSyllFLxfE6IZmgIHBKZKFBVZQ6IqmBoSMglF6mKHGJaOUSUrVZVCabRVBBqCMaMwKpQDVri2oxAwWuq1ZSNY1qRZNcmj4wQgKpUlcBAiJZim+tiZhjo6+uLaTU2IFAVqa3noe43JgQDEy81JufIWuu+ZZFuC3oJitQUFyAlqkAFkARZzKpqMTWmomVd0romIg1s27YRRvSYdBUUwEABGJWA6bSsr7d7zreQokDe6w0DSo3SByJ5x7wWAYBaPObkepd93JCZbd3yRlMiCkQIJIha6nNKnk+7vV4//fxjzvlyubi9m4iu23a/3szktCxLBBX54x9+XpYlBvp5vwak8/l8uaznS1DV++3WqApURAmYAAlIxWrefCSj+vxJwVu+ff7jl1JKWML5fM7L3UfMXS6XUja3j/fbq1sMDUgmi4qel9O6nm+37dPPX87nJw8/V1MIxsQiNde6BsagO9ouctvrVhWRVg6kqrBxgEiac7VqEGJVNaIQIgZWxL0aghJBZQvEVfbbvps9Pz8/B7YMGZksMxibkoq2OjQDVQsUETdEZCRkFLPIxIHOHOF8eZGXlE5CfM3ldDrd73cwWihJvi8eKJEayNZTLC9ZHCcWCc0CEnAwUSlVyGKMsQqHgFz2kgUkIQeke5HQJkFLICSQEFHFEMXHahcV8h5WeY2QI2pWUQNFRiRVC4uJ7B6lBCQFEwExXfionUaXGoqIZCMA13+6hS66cZtNqj6e0SstvKfR0VYNAcgMVUg3IQYLIN5xEZAiYSJUqRFhlwohIpEgQYhbLkz+FFZECZiRgVVEALHUK7GJ1bisZkpEqmYIbcjkns2MkckbHZFrqTFGIC6lureR7/cd74E5pZhVqgoiikpAWEm3fAMAZD7Hp2/OT0mTSFnTN6TXxHRZwn79uuD+4fvL2fb69frrj999XNdYysLMAUV2se3PfnW533+utZ5Yar3GEM5LsPzKtbLgaUnfPT1/mzBR9dK3p3MAEDY7pagW73lHEKOIADEQIlcT1v2STr+KdKn7nyVSq1X3FOrpW/6a4s8vr19vd41BKb5e99dbsXo+n59CfGKLAe6ShQg+fvx44uUffv7ycr2fT2t6etpLuZxW0OdPXz6XUolISpVUrreXdV3Eto8fP3LA+31D0hCSGO1beX7+eFqXbXsx2FKCPR+lXIhgPc5aVYgICNs00uBAO5VTlKxmEo0EYC+lAp9PpyqSRY1D1s2I7nuNiZDCXb4iVwvIcTGIUsjA9j17MZoXPJooABJFxCCgYFYA1Pwfl0ql4n2vCEuMkYhDSDE2yN8YQKohBoLIGCKnkgXQzDgi+eRGkQJgHMwYa8mH1m8mndveItJsQTODHjD1MF6z/xit1y9lnwvX0/J+BCY374bKH34d9dzLrOC14SojEQW38kRAkalN2WVkYgZr/mqMkYBfX64c0571N7/98dPnr7/9/c//n//1b/7pN58Uz9eXets2rZrrbU0n2mtIT4i4bZtDd9Zacy4AxY2/nHMI4ZtvvjEot3s5n8/V+HJ+sk3rvRJDFsVqz8tJrcaI9/vL/b77ZKWSBRE8AIQOCc7soy8/f/7Zi0uZIQT88OEppfT69eunT58Cp5XT+Xw2wvv9vm13X+f1fAqJW4hNS7lW2VRAY41VmIh2gWuRE+LpdLJIgvBlv9nCT5flclnCErPaLZfvN2AmYAaylhZ2nHrymqlWRGxAHtql4kljd+ceeuSQiFv7nJm2jSOO0E1JVa1VESt7isPMS65a5RExGgzUK0BDsjG0Aj0G4q4Cs3uSpooIXuRiOnyt5lSqWnsoNwQ9KDkaASazVIqYmY/sChQ8yqaqZh5Jce/l8AZ95sRwLIefk3NOSzR7yPW9sVm7qerFU7hQMDERh69gRDQEpoDMOiYts8NEGaAcJSrDHUAkQqkGhF48QoERCQy0CCcUw/XDRwyX3/7ui8CpWjawLHdw0DhTBGVMaKsZxpWJDUCYlK1qrgJmgYpJCEFC9b6QNRAjEle04DqWAcmAe7YM0RJGIRBDpMhxNeIqEmk3Q2YOIVUFcb1LCRlVVVTEWhaEKCDySl5SB2aIngUBU4VCpBAR0NxgFkKzhZd7NhNBLagVpM2dUFWKgZiFhGq9xP2beM3wk+UvL/ZEi7CZ6p7FOKwhLkYKFMwEfEgd4RIY0WqVew7ntAoyxvgf/uN/+Ov/x79BFQAFC2At8zIBk9OfBHf5U8eMau4rebQIGoAC1hb4NQNQBO2elyAAgKDK6AAHVG/T8Q8AgEEF9PDwaMpso90Atb0g50OjHp8xM1EhAyRiA0EjAgwIoo5w1gBawYC5O4EtIe+MD6ZaCzJTj86oKZKBYuQ0RP0SfRx3SctyhHWseb8ImFLUVlR8RIJmv25okJaLAmHPbbXwf4s1PDqTrvWqqgYR4QmE0K38UooXDY4LdMXWPc6pBsbvZpT4w7sEJfRgnmrxrL3HEcd5u6RgM6/j6uUHiAguzqiUIubj2gxbXWibmqB9+gVMad8Qgs/JAfBZNC1OA70dTK3Be4xMAjMjttDXSPgguzHthYg6npqZPbatHd6tepR0SdprkBTb3rgV0CPfPGJFIlIKwFRQ3qqeevcXPIYBbCpOmEfMeYlUjEut9fPnz24re7FuKSVxKBq0FbegiOwq67IIWM65FjQTEL3dbp8/h8s5NSCZIw6hqppSooYxCKp6v9/dRIjQUFvPT0+n00lM970Uqc/PzxRYdtCtIHBMa0oKAOfLc60e8GOpsG25ij09fXh9fUVEDBxpVJNWVd03LaK7yC7goyXJYXo84QTUCjYRiFj0gCwiQvCUBwAAlf1ODGb2+nptKwYMhuu6tl418jIzQeIYj3iemXktNU7z6FsUs782s46l0TZR+pxDACCCQL0IqsqI6Y6IpssbT0KKHBULzeWekEjfBIHgTeXeFEhW1dAqpb1czoeysTcu4mNV0jinTmfo4tgG40/feIuLNfPRYPlRo6T6oBUO3u8owWbY2hk77K8qKaD7G0jVK12JEA6758iz01GMAUN6+DHk1cNXqI2UJYJ1Xc/nc0oJAVX1ut1rvhOmGsCT4YpQa71cLiGESOyD0XLZTDSGYCYgtWz3WnNKaUkpcQDksCRQW2JyBKbTsjpAS5AbEUcOiFyqEhE3doYQgpqhWuxA5pFYtcQY17CuKryFqnLP+14EmBXDRghaS953jiuyRx+HGBHVWuvtdvv69euHDx+sl9utMflCOc7ztm3runjZUpOE0JFUetLJ4+KllDGXbxZNADBG0o+NmMXXexKa+WhiI5v/6qh3Q/W8uWLb60dytR7yX5ZFawSAKjIgsoioFBjs6TfJzEjcxyvO7NAKt8azjJt/c8VBddh7vOdFeL8aMwvLVFFyMN27pRtvulYVqd4STb13btynx6rNobOriTaVer1ef/jhh7//T7/93e//+OOPPwZO4rVyCrXWWtQUU0pWWpfH2BfHJLvdbm1qpOpAglmWJStUlVKklIJirmfv971WqXu+3+85VzMkbNOhdNpsV0/O43NZhD9Iw3bKDY3Daqv38e+e15NZ8l7iUgoqBjyMCu31IA2CGEhMDWRZ0vm0uK6vtV6v15yfUkqMCL3RCMgMAM2A3NdoVQv0WB78hgBw3qTpfe3Qr+OuzBSh1URg23cCr4zzUjhrfk47n/9KhG8gOyYpOhih//qWkKxbqNYHuLmX3v+qnUwBHpuvBoIoTrEMRJxLPWfGF63wjk/xEZ7ngZseaRt7d88bTedvQXOGcaRtH1gGEd8LHERVzVkcTqYbV1hVDGGUqUOf1j1kHQAwhsDgcBRoDrYPqiqAYCJQEfFyWkBtTK308xMgEwIRieMaOrQLQKsajUSkAki4LIt32Oac3brYtg3VTueB0QjISEjICBS8w7yIQVUFA8XexIpqJgZxWbFWnzdpVkS0urwDMyCInAItS8OAhDZx1qeYmamCCLRcsja/1MgnOI69qLXqXb9+/frlyxfIGemh6Ppf9BhWE3QDY/AbTnn4N1w6U6Oax1C6t9YPIvL5rNa4v5GFdhSScS4/4eh7fiulrSE5DX7UUVc1AHVnRwnaZItxBurjyt4fZuawW0PRDHEnUwn6JGqsDSx5FEHDi8GpBc9/9UY1ZjZ6qEG38bTd8jvGyz58puvUUt6WQQ6DeMgdRwR1CaJuHYuAKjOGOFB32Ez9CugFONAaDLwSsD9Yu0K/GXXDhQC8+ZgABR42ftwwAFhL5rt3Id0hdEfsgOUZCwGPBX5j3c3M+Rl7baeIMLPDgVA3iHGymN/bB9THLvkUUezO8Jt1HhedLYNBQEuMRLTv923btEqMMUSKMa4x+Z2HEHBxmNQqoqVWFdlMEwcOCGb7Xmwz02Xa2Zb+5kBAuOVcazbRfd+3beOA67p+/+u/cGwVcmzDKqWUaPz73/9xWRYfYAhAMXKKq6perzsigmEp+6efX3788dP9viMiIYcl+ThUIjIEQAbUbbuKQTVUbZW0wcdQBq4qaApEQIE8tKtmx9AOIkLrAfiqkiia1et2J6LTaW27CVhrBTX23LQIGgTPOnd6tu4Q4hTj8L0fro5jJ5DL5RFN8JWnVmJNYF6r3EaegKjSfEJ8dLRMRGt1X9PLYnACMmui/EElH7ID1TC4R6TgKMEI8/0fn3wIFj9INzNzj3omVx2N1P3Dh/TpJ6Rep/RYS3VcwsGx8bBoW/lPj/n6NKcexFO1PmAA1Oa27HG5wbPq/WgB+n79shHvMW9mOp1Ol8slxih5FxGrLbWyLAvaibSuS3w6X87L2SG41mUxs+1+rbUua8ylioO1lHJa0xoDofnQdQJMIZ7XJTnFMhFhMG+wTWZolrmZYIpqKUTpAoSZyYADUdYlcFyTgw/nfcnLgopZoAKvIQakWqXmIlECAIdQavUQhhdxZakvLy/X6zUuaVkiwGnfdyLaa8k555yv1+vT04Upuq1N5FU1OMJSviCGJCINEuMdqcxUMehBRCzYEIDjmItkxq6pmqoMXK5xcuw1yU0Wm+E7ZF0zG3bMaMIxO6qqx6XnEUrvqcLV6ji3eivBOwK2Xjg9TkuPTXE6qfw3L8ZXmhiHYz2H7829HmQmcuj+Xq21GYpT2d4Qm+NCzSnKFTls1+2HH37/9//4n/7m7/7+y9dbVYwx7rXvgpF0NNEidYgUd6gclUdVL5cLAOz7Ppa31qrA27bdtvteMqLnCLKjtuy17FVUxNndTWH0DtWOLKWqSOBF1POymxkyx2URn2lUiqrmnLUKEGupSszMClr2nLddq3gmYPYq/aFyzmoUENQqMy0pxHgBgFLK6+vr9fpkZmdfybZrZoimlbBpgb4LOhc9+icPymmgVjYv/ti7N7TanLYen4LJrIKBazQO/y72WQv+ekoCzOcfPIjTu/5JGvZfD1I3EX20ob4T1v2E2lH9Br2JVuxdhcflHkPYs6lzEPAUtpveB2wtf9Pivne/zQCaZ9tbcVyDIFN0tVoJaH4QREQspby8vNy2XIoZMDINI1PG5O7AMUS0ij7z3uGpKQKYh7ANK0kIDMpAQF7PbmYDn0JEHPEFfUo4IAVWVUMFq2Ac3R7gBABVKnQ89m3LOddlQRHZ9x3VUkqQyM1UNjIgAwZDUatVS61FDGRCFqQWqaHApZRaai7V8RdVVZGcXKACIqTA5zU9rcs1BbgfQ558Z13KiAhHQiIGFtFqwt2jV1UT27bty5cvJef4dIL3O9WO/wakmTenGiCr/iedakeP6om+1QaA0OADdAQ1sJ+nK5C3Vf2N/g8EBG84N8edHoZfr5xqVpAN389gmOsDeOUN9XIIoAfqL3TKV5GxQPaYUX9ztOWYGg3gnVp5c2n0AMojDw7ZOL44r0YgPIanW19tRBQRn0vSw8QP6tl66+S4jIcPsXVDtixtL+tXA+c6GK0jAA+yu98T9QZHdIdt3C5zBPN61JaXAwAT6GXrh+He7n9k2w6ESFNVimBmOEndvkyHJ+1v+uMwM4KNp/OBV9oLQLx5yZqVH5ga4qVNZbuuXMXBvJF8hjd1q3eklfxmZrfc7+oNAR0ZGENmChyGi3+73fb9HkKIy6qqIdDT09PahwsTBIqoqlqtdhWLYBpCYgYGIFTVIpWZewcXAAAhIND19ValQU4vp/XDNx9Pp9O6rkyJQ6xm+76XKlmqu+mliELRqkT29etrzjlyMLPrtl0ulxDS9fX+8nItRVT1vmeiEKvkXGOMIXHPxVmtCkjExBxWJLcDImNgglq0FlUHigA12KuEQKbGasYCwIhmSkDW55KDGey14IYxxpQa4guoQQigVlopPzoYX2e8Bx6btZr14OLBHV06+McCYUHk0StIBK6AVdWMkUGFDAjBg0r0OIXCus+PiAjDEkYGtIb6M/fmwRiriv2+++sWNvO8bt/Z45h5eWYNtaodB8J6QVS7WfSRQIYABv4MB3wrERkQ2BQM6+hwZubWFbZK1MMWISJGc3yUEAg6vu4ENArD4m/xuYlnB+MQUeiB3nbmHp0xj/CoEEKM0RsIsfdzb9vGZFqTVQG1EMIS05rSuixkDazYpIKJx5elZlRRKQi6xhSYyWxdEgBEDpf1dF5PpyXGQGgqpZ4cWYgJDDHEHIX3DKIisoRYpGazwMxIYMpIKSKTsqlKDVbPIdh6DsafX++RcONwjnEX6gR1SHlFCEtaL+fw+lJrfXn5coGnZYncR4cjouSCiNu2+aQZA3TeBwDyAcphGc0MXhVj/fzj53HFOcc35QDfGJo2tbo5pfS9Q9Uy4vSISEhG5HdlXZkf7OD7biaOpA/mFA09kgqKoQ1GbyFXF9eDFLv36Hi27t15wK619VepCEfMZeaLN08KA3dUcb7VnrcAr/mHnqjxUDIiAh4s3zUdD9Tc2bHsYscQgaw9+1iQdiddZDStD2YQtNrv/vjjf/ybv/3tb3/35cvLniWtT7lI3kuu0pND5AmQqmA++9WwiEmRXLWIhbSu56fT6ZRzdsDGIvLl5bqeLz5UtqE0kbGFWuuyLMyRqEBIzrbcgNY6P3Yn31sHfS6ub5aZVcmlZtESiAlQSi2llD2rqhDXUErOzrZ1LyaSQlCiUso8c1VE9pLjtkVlOi01ZzNNKSxLTCnlnG83/vz5MyKu60pMAK22zPGszfVHDzgDoqNRTpT8gEvR5dADO4xg/yEAFeARDBCAoE95B9C34F5+HEMd3DZ+UBCzSnr4LuLxxTfhCbdS/FciUH1rirdtehvZHw87jxA8HOBO8OPn8eBdL4yTAIDrZTpgIExbM2NLAbUbboY+jAHU9kDqR2cmT+DY7buBiWjLeyllL2gDApTgaEhDpP4+GXUXnRAJjPaaA5ISqkJVI0Fzj49oWU7o2T8ARmOkEEJkL8gyIkBkVQUTt/wcu80IjdBUzbCUps5ETBWISEGrtchgOp29pkzBqlqVuhfdSr3dNkf4EhGXTy7NVG6tAqKKmREDETMiM4tqrVnBGOCc4vPTueTn32XTagFJQc0coMad8153gMiMBq11VkQCRBc4nz9/fr1+/fbpo4kgx1+gnv+rx3+u3BQfEUf7m+YA0WatXhQahbz9ZOt5gPafiBjI4HYzo57LaQK8GylO7TRhSj+el+AXpxMdkv9tCH6wxmBnVzf2ePiHvaJqjgQNL+P95wGg230weNB/hj5LfDyRH2H8LtVExKZc5GBnf9DxGEQNBHe2AMYlxeGDCbuea5DEKfoMU7eKicjH0LHDihIyoBGRossEP6cTIQKQeHF1KxhzULjADA5kMvIA2mPA040dT4FTQGvkVVoWZVLzvq3cajsBEbkDsfgCvo+luWrHnifEaeXGNrRMYGAfbOq7wsBemYaOnDCkPA6cvGN5AYAjewUUuegJbWLH7XaTUkvZHXUNHW6Ukw3fD7HVTxq1kL+4HUMK5Li5gFhEgTBQQGY0qbXmWs0kpXQ6X2IKgdhb/1s12u3ruq6KVA20CgDmIrf77iAoInJKS9nz7XZ7fnpa1zWtT7XA/Xa933cR45gSMXILQqvqXrJYCMFEpJQdEICQKRIHQYQ2knFMtVYnXeNoSIBcawEAAWBBi2bGSkYCAJhNTCwEAqN73lU1pVbfa2jM7O10qiqm0FEoiASkixVRoIDAYARGpggN4yZs2zaw1tCdfWQiT1NarVVrobAyM0g1VcREANThXpybtEo8rYMOcToaeetgXXS7BQP6xSY6RN/j7mv5PZEYiMj7WoQha/wEMOlyABi4rLPun9XtLK0a/QOKGqI2n9VGRq5bBv1Wf0Fu9vyMqiIyNYSD5pCMetHBEQToVT7oE2uppf0HiNZIqPbUIwioo16lFJ8v56fz2jEpW6snUXsQNAvMKQRGSkyRU+QgJaPZEiKDqdR8u6lqZAoxLimSQWB8Op/qnpcU1yWuKSwpxBhATRWX6H1jCIAphZOl277BzcD0dDoXlR23QLQwe+1oiiFEQiukdWEK68oGpLa9XhXxEsK+nqjqrlDKDhvRAkWLitaa13V9fr5cr+dcy/V+oz6V3kk2xriuK4QkIg6muqxxXVfwgCwBocOHBfBJWcTY4c9xcmBm0epC8ghaIaoqAc+iFYCG+ejvD4DbmeYPeoa372BTUo4ra2bqYOMAgMg+cQRnQE5kREX0aMVhxR46YuTkzcyM2gg3FRHuUctZfc5aTyY0KURM7P7PW0duEP+b92dwpnfM+AtHo1L2sj0zr20hFnH81LY9RGSEoBpCer3ffvjt7/7hH/7p55+/iMKW6z2/FtFty/tevP3XE7Oq4LrDb4mnkAr3OZPn89nv1lE6ooGID4tyi4QAoEobxxjDAgHM0Kv1BMDhZ6RDrSCBdxKmEBHblHMObeL8vu8LnszM28sdUK3WmnfyL/rtLXF1YBspKh4p7nOJt21jpFKJGaXsIrUji5T7/Z5zflospfT09LTE1iMDR5Sq5fGaedD8qEP+2JQhdDbpKbfDwnsj4hjQjpK8TtITOQ2T5c2+qwjaw/tvGHDQ1bg6jNRijw8ikdYesD5iFo32BsM82BuBSdWsOEDliNf0cx6Ziv6th9t+rymGOeSHtLAIvfnKYIf+S1+rEaf2DSJEIghsVT1m1Ze0P0sRhECBc66OPohMClBVkIgoIAC3+YdArS7XMe9H6h7NEGM0ZDGsCiTme2lmVYEIgZjR1MBRaFCNOXgkKiBWITNjBCZUTv5IIdCAo3PmanZyWlW1Vr3nsiy033YiUoSqUsX2Wra93PP++nIVg1prFQ9JR2gmhGeuzEwDUYwxRk/DtOweABDjkuLHywVqvbzsJlCKsDh+vaOCuPkJIsJMFAIY+iRYDw8EDDnnH3744Q9/+MO3f/FXqvYnihz/G453TuAvDBXU/jGd3vlTxyxLe53zHBzxjLIIwciHNSLzlewuwyGczUbJmJ/hwWSCieahn8cTbDNhW88lDJ2EbnVMQZOZU+Bd7t16rhLxoYJ0cBlMqaY3f+2tOm+PB5RRRCQMhCFwQmAiCCEpFi+gJwwwBoA+hnmsA+ureoX6EeorpQ1SNBAAv2kRKTYZAW7c++f7krmsAejxYKagYMzRbXBUVPBxK73yBJEc1glNSi9kF9VHQ3Ysbl9Zj041ctaGKICI4VjBBoxeVaD1JGCDghxe5fjwiCUjojUMISHmNuswBORDbI0P90pafkMu893OYtrMkJCplS3VKlL2JSaiUEqx1S6XiweN9qaAKzUFD8wcKS7L4vWfQKBgHjYjolNMqrptm3T/JkSKHJfldDotRFRrvd23bdscxX59/h4picjtvgsYOuDbnl9fX2sVMHg+FwJc1/Of/fovLpdLFvj06dPXr68iomJbyQAUY1zSyY0ArbVqMVHPQ3GDLcGRqvbwl5bcq3RAgQgZQ0RDyy1JomhVvSixxzkFACGEFEKoFRTIkEGL9dogQMTAVGGw2XBj3jA5tk6eloLWabZelzztRZMsUr26AxuKgEVCASKiyBwIyUBFfJbX7DtZjxj5Oa3Ha1U9LfcQD6beWD3z/7CA/QwtEO7UpQeZtZV8V5B8yM1Jr78RNLMcUAEIQkIIhmpgShZsqqnjXoANaERAzNYLb81MTRUU0UopIbikViNDavQZggV/aS27wsw+PBMAeoTPfEyLUwi2dKX730BGaBYDP51PHz58uFwuKcaBOXk+nZjhlJYUSbEyhHVdE4cU4hKX4LB7pimyCtxv2/XlKwHEEM6nZQ1sossSz2vaVJbACxOhEVhkionNDCWDolrx+oBIzICgFQ2eTieRugVeYorEhBiJmSkEDkiBCdRUjEyhyG1ZMnIROycV1rLXW8lblRhWIFSwveTT5fzhm497cWasXj4XY+wd0bgsC4RUSsm5vLy8ID2t66q9O0JE2MxnXtVcLPCwzGbNNNPqLLhmykFAgIOcVJ0bjhZuM1M1EdEOq2xmDdoXcFjPQ9W5jgAABQOfM8neq91c03U51+LXAlWfAO2DVWWcv38YiVnVb3I2zpvEfkPhfrj8n9kEH0pbH7TMG0k+SxLr3uOb1Xvzlenz0xnGVwhNAY0ATME8s+q+xX3PP/7xp9/88Luffv4iCjEmxHq93ikkA/LEIBKHwNAiKqwKZmrmY+WNiFJamXnfC9G2risix7iMlqqSa1FRBFULJiKhFLndNvUBMMCqyszm9us0mpiIQuRlWSKHlFo9Dgc0Nydy0VI1mZkVqUWqAxepqpOx17gCgIeKtErNRfvCEKKI5FwJ7sFn86isp1RrzbXc7/dSCqA9LXB5fvqY95CYGFoAGKairAax6GG1Zpzi5Nu/Ifg3rDG2GL2sA+Ah6Dbi+J2JDtNqEImfRKRZtu+o6A05vaWVX0ohDmU2P8X46puTdKZTH0ClvdEGEYlshHv84yOXaD0+ON/eMCqwp+iLine2+8c8kEGPtWPk0/D8djrMY7uEAWDrI2AAZEKf39u8dysqCFSr3vYNETmycRA1RXA9gshqx9CISEyAASlxYI5mYobEiyj6X4gTxRBDjIGYsSojWDVlREJkAAasKmeOAj4Y3ml2jLZjA0DzRWm84PVQzhrMDEY551K3UvXL14yICs4CWqTectlzvV6vilR9oDQcAdAIoWFAGBBDFIkSU6AQyNvJ+iwNPYUgy/LxQwK1su8VfFqFgJDh2OKG0z7a3gZ+ZinlD3/4wx//+Mf/yeshf+HQ/0zJ6J+aD/Eu09KquPvZdDh4PnbCzACcxv6kT9hKSdtcPRxo2fNnmHlCbZ+cqFIREXsC3w0MbOYWGAL1ROWQybNVZgf7/ELsu13slyLscyXXG4Uy7FLr9rCqjtE7b2OR9HDdoanf2KvYvdBgZmDkY4UAiLurg4ieygMjnYoc3HXB6QL+JKNxv2vGhpJifaA8ERlIlQwAxEBGagKtmLUBDbfqsxYnMx/9DgAI5BtGJIio1kb/DZMCJrqbNbRNCToTNQCN2mrgPccCx+xEgKPmCoYcZ/BebjPT1oWvRg/ZG0QEIAdBoXCUiIwVgKP48Fh9niZ9j6jbuPmHp5go7L1InTESQghEaVmWMQ4Y3IFSQ/LvEpEFXlS1FCllJ0AObToNElX1FuciIoE4JY5hCSFcr9d9381s3+9mxkgi4lBjr7fb/X5/fX31GxYRtXq5XNwpejqd12X55sPH7777puz773/86evX123b3JtN7IOMl/1+BQACBVSRltQO0VuyWdGqVu8JQlOUqlIYvembqwAwC6AaEgU6cgOgLVtGChAdfh3M+1MQodaK2uJiWRW9BYUB4Oi1bYpncv4PL7HvZkSOMUKX7NZlBwESASP4XCerRVXBzOsNCIwwMI52dkAwL1xhZt9TZ6tAzMxgwtOjESAQ1ke7ZNySqhICA4KZmBqAwQMFvjE9qeOkvznbeFJ/8dbvffdhA7AGptSE48wmHTDAAHxiOdoolRpQWogO5EAGqCaqWAUxNBA8Pw80tId2WqbaowMA4N2VvnoeM8Le503QZtulFEK4fHy+rGsKofdy7HcUQINa2SqjQQxIYKaVichHGZsSgFbZb/fXr5/ZHOLi8t3HD4gIoud1OS1J9i0GSoEjU2QKhIzo/jZ7H5BDpqogKAGS6ZLSEiIjLSEyEagxoLJRQA5ImaAIBrQYauJvns5Xga0qMyVEFNOyq2qt/iyw73fVp8vllPOzi4IBmeBMGohTiEKRV/aEyeXp5NvpSdJaK5YCAAHZ8z8BqXbojjdbP+y/maj6J70X0XX5wzwVGw5hBy30FHj/Zvu+9pZx39pOwH5R9q+Qg24eGhjGPTze0ltOmSMms+BlYmAKEN6cAcCjyhScGMmzj8jICFil2iN3vP3uI1vNJUBDVQ0z+o0ZYWbuslap1PHTzUyqeqlBQ5hHQLNSyrbvXz7nf/jH3/zzb3+37QJEBgEpcFxiWolVbd+2DQyY2wSpXCoSp5SSVw5zAIC0rDHGl5eX233zmWY5lyqqBnlroNMGWmsVxBA8Cry3xnhQUwVwbI/smUNnN2ZeYvKJLH33axXHwd9KyWay12JmpZYsVUyjYQsCV1CwGJi8qkhFwTiGAc/AHZtBwVTg9fWVGTlgzvV23RAxMiHibUv3+7ZtWwghpRAX8qI9h/xs9HzglRgT+07ACNi1QtAmwtr+dllq1KsP1PQx9v8W+2LqBzmOh6jKW7lNffo5TBG6wZ2DgRqxWXMOmyliYFNLjuoDoNjggtEvas3YKHMLtz8Y2ASA1/nujd0yHs3sMVTUu+LRY74Tg/zCOkyS57jPDtszOJkcK4XAaUyq3e/3r1+/VgUKDBhKLU1Jjeq7FmcxglaGxkhoINVMzbtdDJhC5LByYCA0QFHMga0NOtbAHN2lNPu6FzKILCEEAwFRxMLMGL1Gw4uGBJEJqdasqmCkIrXUqlJqqVVut/stoxlmqdu+71X2kvdct1y+vHwFCmZW1QAgxdWtvoUNDZCMAQfKACCpQoiOWY8IiqAx0LrEX31zgiL3+12qoaIZVBBURPamoSbVxUfdI8YQrW/Q7Xa7Xq99d9z3+5Nuz3/p8Qv5wE7w2JtTQGy8OSx2bE6j3wGSwSjlMxi5QbRxj9Z+HIHvY1bnIC9wzuqoLd7n0hnKC6e78eMvRqDcm4O6CxC8hxAOGm58d3SBNlPTj9me6fdi1u20+c3xYngTfvJWIcUPoavBgx59GMyIPRwTalVhby4ngF9APAMjM7Fe+dnc2alQe1iKfkbDZvC5QeZRP59WNJp/+tRRGr4iURt06yjVblW2th9FQ/O8nO8KNcDEwIA+vwWmultvQEkpEWgZEThTL2kRqTaFpfuTaq1l7EdL3KGaCmJAAsRh1zaA6TerP5ZV+5jOY9cbOmVbE6JGUy6vvZbGz+N35c8C0z4d0hagykE9qlrNPOaNwD736enp6Xw+55zv9zsRhd4zA4Z9Gh6oqkrdtm3bbmiQUoopeOnCH3/8FCI59OKaFkSsNd9uNwYsVszMBErJe2+b+fLli5lt21ZE3LgppRCAVVliShxiCOf19PHpGRV/+O3vX15ugfhyurTARgi11teXzcEYYyAEqrWSSgAOzGBkSNWMTQkIiIBQAf2vaVnFcCtWFFW1Vg0OMwMA5noLiIiVzYxi0ColC0AOIZjg/b4vEZhIEawKGgRGDsF1qI7UXCcwZmboFc9TWgAATmmBXlg1IiNEFIglBATwmg3r6ya1AiIGQ8RAFAgTh1aX5bhivr8eNXTjxphAGTvXgCHi0NfweKhqHzippioqQB6rlDd0NW4VAPCxJtOfeiBtzOcfK2OPWnzggvf1aXc73mxfR0NEBmyxDEUPmjoTM0EI7nirqg50X3ocRgsAZA2dL3EQQCMUEe/49ZHK7hMCGqCSkrEhgKFGZgyUUoghMHNAEq3bti0QFMnvJFBcEq0xghmZzwIVRATTkvfb9eX165dfff+tlvrN0+X7bz46oNvldFpT2pmXFM/rsq7rElMMBCJay5LYC8V79Q0tgZcUXvc7IzARAzKRT8UENUN111m0ilY2ILYlhfN5lb0SoXeJBNEQQlbzCW+q6qVxbiUg2vm8mmHkEDkEYi8NaoGSELWKHu1kwMyGVLPWo3C6h0LsSCDPmmbIc+xgvEMpHHveaGDyjmyyjx8TLON4sD5nd9HeqMPRKYBiuG2bVabQyoYR0UGqWwbo3SWatdA7UhGRQ0AwKw/kPQ5PT/VW5wf/7c3P+cV7VvWE7cibjfPDOyaFYWdXEe9Pa5Y9ymQTI6IBVJE9123LX19vv//xp08/v5RSAXkvVURjjCmtxLZn8QmBMQIilCLFaowxpeTzbJ2DiOh8Pl+vV39YnzzsBR1FxVUwEdUizKxixiBVmQBwDEhvZZ+EwU1kU7W+46XsjOYTic3MvQ7pg+bBp154n5U3k5kBIKEhUYjRHT8OYVnX6/VqraqWQ6twJCbe9t2MHFP3dgvEYOsaI5fswDMl54wMwRghePgOptJfYwJAM6UQ3RsEOyCLJuE2DI7e1WZG6M2cD47QTNVv6N93fWx5Yx6iBrj1tv+wE/ADPwK4JTpTZucfP9soprCjhhP+1DHHQN0E8kzReN/MoA3P6E/xSMP0OEp6fIt96Lm77q3ErKcK53vqlnpDGe3TAIYIYGZgBiKw1oTpcK24nlRyzvl23USEkBQt1+pIa+gJlloZCVTRDBGgzyEwAxEDjyVjoBCRF0WqYqWI1qJacbnUWiXvABADLTGlQES4xESggdFroa3PG0wrLMuiWvf7nYjO62pm+9YwPFV1y9mt5Fxk27Zdkojc9+31tl3vty3n6/2+5aIAFKKBRwRoXdGQFiQrW5NLBMwYsIUzBNT7sEULiYJZIFhT+BYu++vta4wlilbTauLzpYwRgYgBvHmnIEAIrKpiGqzNnHT21FpbIgRhyvz95zoA/4uPZp97Q6C1VCEAqIGOyxkIPkSzZfL6Grq+mQEq2EGcNhVOg5u2Hk2GI+TB6lVKTcg/JAPnu+x0ePwJWwdveycEEAVVm4pTcCoqecMj702mWVzMzgtOQwuxZ+aHY1Xq/c13D8fkzf2bmVnQNiUzOKiYoxjte1EFXlDEVCTGBcmIuM0+7g/vIt5MRk6DmV+ur+uanFBSSvf7Tn24U4zxdFpEJEb2uiGzBqkHAA7lpAiOz9nMC2vzbRygzDfMgzv+2A7nKlqko0V5gbcjXEODqaCi4kWt8XxCRE+81JoBgncIMLNbPyGSmW77jZnXdbWOl8OMzImZzcQzlm1jsNtAJGbm+snMdhGgVukmItAzP+4fpuCgoHutx8YMmvAMZoMn7l7i+AxicH/AzKrI6HBwLQ4A1+vddWophde1Vt22m1YJIaQlbpveX68hBLVKwGqtW8NjKZLLsjwvy+Lwm0RkVeqejTlFJqSimreiVkMITOTVuWgQPOdplghjjJfLZb/dgWxNy7cfPqqUP/z0x7zfQa0UHz0SHPAmo2WTQEAmATGlAKkpGyLkuKhANomB1UC01lpNyodTyjnLfoeQQPF227aqHJKDCtCUoFZVAUOAUiq1eQMmIsBtalSpFQG8VkS1KgADp5RAZN8zIsYYXULEGHXf1zVtG5dSkEi07Pl+Op3IwG0aAA2BPB6SUkKJOeda9sREa6QYRERVUoohJgHvLKUYo4JZbVh8nWgrEZ3X07bdBwMTUcDDjA6BTycqSnuFLBla5q3LKC+qRAQEL8njI7SBA41tSAqcX/ezHFNWe0JjSBA/D3dQRO0dTQYkImrHNHNVXdf1fr+7JMs5p7XJO/LZboDgc2IYETEg5Zw5tGFR5BnjEEqpWoUMGKmULOxQTtWs+ULrunIIWurwCUMI5t3wMRJANWHCZUml7ufTsixRVYvlxv6BlyUdLddqqhqJVVVN1rjWvJuoqRLa89NTRDx9/PDh+enbbz5ev77cbuVyOtVSYqCPT5cYQslbRAjLBYILCgWtQMhIDqLPzEuIkRBET0s6nxYpeY3pfFpAa4hRwSoIR2IKIIpoy2m51RoEl0ShAFSJiU90sm0v+So1xxgpct7v8HR+fnoqOe/7vizL7fb661//+nK5/Pjjj23OBCWzmFLa9+3Tjz+tp/TtN9+/3q4fvvkWCa/3vG25eeOIpRQIi05oGaPMyeV/rdWb0OQYAkQiYlKSDfhQ88p2hDZqzwMfCi1W6OrNFxygA8wQqbJ2dajeYto1KBF7F9m6rl+7g1rAGHHfdwAwwxgjYTAr3euQPl2JSykhrG5amZnLBzUxwshLv8TDHCCaYoI8NdCs6yr9sEc/eXxx/tVhxEc0c7bKdTIdmmdDlKVSD0h1vY6ImFIyL3ngqKWUKoh4z/vvfveH3/3+j8whxOX1vsW0nuKy50qBt+urqj4/f8w5v7y8+M1bUanVVBEgMF/O5/v9fr/dCDEwq6rUGkPQlErO6hO9AUZbYMCmsI48kpnrVkeakWq1ViQLIQQkLbWqhUj7vu/7HdQ8A0dEMdC+77is+75Xq5QY1WotojWlRIHF9LZvQeq6riklU825rOs6BJSagQiAKqmnZeKyhBDu9zu36fYkIj/9+DNh+Ov/27/+5rtvEU1y5iWUXACAAnEIYFarmCkGdpPOH5YDoY8dUwVmUB0ZOQ9RmI0GpMnfQ2ztfTTSfYdNNgw06J+CYQDo8YF2QsTR7DRSaNAAMJCYx234RkD3RVW158TaXcUYwQEY+vn9W7U2LEdmJuZuelqfA3RUnXivEHOYTdXZaWwU0qEHXUTE4BXs9XgqRJhQELFVu5D7gdKbg47FJAIiLdW8eM2fqCNG8p4B4NNPn30S9/1+p7QO78vUQIUAg5cibzsu6uBhrmRVVYsSCjA+nS4coqjd9r2WrCWrFFtRRKTkWiuaeL1VQDitKyOsKawprSlxgycqcZfAN7f2GeVrreaVXN4cW6uIbLne7/d9L1UkW9227Xq/51pzLbdt23LJtYa0DACYqnbbtlxr3LZv1rUD9qLXxIrpfS8ExphqLUxwWRcRsaxrWi4Yv/twkfpt+PL65Xr/cttZTdFESogLEalVIjqfTiKy71tK67JEwoCIl8vF8cloLhntCr/T8Z8OM/zS0cMqUxgODI7aaseSOZLhhAjouGUNMaH5bH2ECwC0ur93ucejkQE8BIMg6lbTQdsKVit0PwIRzXM74kMXOsf13E++NwfMH8RldQgBarV6zDFqmS0i7WMX0F0D9xZERhB2zjy5Bz76eGdGG8wFXXGIHM0R8zFO5WcYGtx/dYzKhlAkYn1ZfY5fQ2hAxFqLAyiNnYDumzrwyZz0m13blIKqmokqD9+m4aHnXEpxDMPJyhREBCYmdocQCaB67TIHFaKiBiZaoYoI9udtbjESMzZl45hRRDgkTTsMQL2bbu7F7CM7HoY29tGuPSzRZavNdcZ2lNKNVKx2BPkmmPqFEJB7PtfM5sthD6f5vo3bhSmHo/oAtuvbB2AM6KXn4t2xAO4oeqJARGMMTFxyBRXwEl8R1cpIMbE7IWTw/N33KSVm9C6OolrKnvedmSs7JVQ0Y0AQLbKTgidrne5rrQWACWTfPlzO33777ffffstI99er1nxOETqlRsYYGREXTiWgU4XPahu0bmanFMVUNCiSmomEXLZaFaxSwFxl3257MTBc0kocVbJV8VQAddx8MwPEwNzQzLT1u4rIuqxsPeNaRXwOILQpjjM7+a8xBP9iKcXjZJ4AbyaCF8bNiIsgI61FjATBevu4gQK4OGtfqVbtMXzlDgl0Gh2tdATYCnbo6JAehIGI1NLMCGBko6URxrXclHwjIgcbvn//DZW+OdvxyMjSMNq8sJsHI4yvs2f4kY4KQA/gWavgsIZDg+7KtqczQAOtrd/DmXqI1yFDVJVUoWe0VBoZaKkIRsSJQwgkWhxsMBBWxzTMGaZ5aKCmqCKgTEBkqlal6Fb2bKAB4ZSWJfDTuj4/P3/74SMUkVJPa/JKo4DEjm0DGEIL76spIxi0qlGukALFwCG2DwPTElORnRAThzUtGIyYyeH11LlazGBZ4m7GjIQAJqhoKiY1eDmDSdn2Ldz2p6d1Xc/ndSgMA0kpffjwYds2Z2dmNkPmwEx1l33fmZmBjbHHwlw5EeJjYdlkOPIEbz2TSggBAAkYaiOVIc1cWuaca9FSCqmpiWDzphC8kLT1ELYCmNb283AVZg6BGOvgrPakSsyk2OBh3Cdx4p3lqp+k1grG3apWAAOtigB4AAiPb5mZthHxOBSC/3XfR8XK4cr2R26fn2N8A9364NxuB7znMoADRrj/oX1x2zaOycw7mYiNjRARX15vuVYg5BSTYkgLAxIvani5PBndrq/3+76LGRntWwlLmP3bociGXHpjqfsHqMlXRqRSpJQ7M5t0eRJQtHnIoOAbK1oU0ae11YKllFpziJQsAqibR6CeLSxWvcUJEC2E6MK53aFU27dcWwD0+bSamfZLQy9PoNbGQkU0eBmnopnlKqvBlstPP/0cQnj+8MSMKkYUAJS8Cg7RvXX2cfBdGKoIddVAUyXk3NYxKOzBqOhn6D/hwLxiGhIQJj5ySf5WMs8y+fFP6vcwiW6/BI8OqgeNAKrVhyF2KToykwCAgwakz+gSKYgz6MMDSYwn9bgJdgcMes/RICcVA2jw2Z7LUoPZXvKjWq9SCccc1PkYC9Rf+w8wxX3L9/tdRBHZ+VWncWWgiqYCQICCZiyB8eTjrwzEW86Jifi+l/vectdoYipWyz3fSikmgmi11ppLDHQ+r69bRrCAFANFpjWFJSZmej6d3Z5nIgYjBhN14aRqRWotet322/1eigDA61Z2R9gtecv7nquaukyGJosgNa+DmVG1qoKZT/ZSkRIIiSAG2ktJgZnZsR44BjBYSM4RL0vMp1Rr3WqpxYopqmfgTYuaVCMisNjn3DIAIl4ul+fnZzDTmimc4O1h4KV5/1UHvmkv9PZv692D2g1vT/3BYx5SJ+TRo9sQ4K03qKpTeEVhBG5mcupZryGuxW3vLgDMzHzYu5l184M74PyIv4wrWqe6oUqICLnL9qmTkNpsoSMZOBRHW9l3ecLx/jjaUxxK6njfeg/8OMPQqoE5dgUw9Ba5HdDdZTKzWjSQeOWnqlovXKQOKzfCEs2iwnYfNKVEEdFcG6BxIKqtdrKLBhAxz0oBOHxs70hBR9ft5wFFRCZwZJrxkNZTIiOxZmZaxSaH01tLh5CiPhXDvcHhzfYbs8m/dqHREBdgqgJyGdGIxiefvOvaPGJpvX7GobRVD2Fqk1IZEYJxV9SGpfaNB+zZ8PZ5Fd22DADecubfEim+pCmtBHa/37UWZgYTEzVTjpw4rSkFYgBITKiSc805e/ZPtJQ9Q4yOgsKAMfrURNFaIwIjRCbyWlg2UEvEMYbny+nj5bww3q/X6+sXKSWE8LSgiAcaICavuQIRQnUqajFI6PB9T+siZobkk0xqrbmAFNr2G3EqVT+9bmQ1YjJCQxWzoiIiiYmIOLgylxgTBUoczKxWR6NVVd33PYSwttCA93QBU5RHfjZz5C7zaIJIUa1eJ46jeB0N0Ri94gQHUTUj0gMQSKpVfyHWq6ptmuycARjCaHD4iLqY9yI6hXR7teHRe+JxkEqXDzOzAMAMlTWe9M2h+PAV/3R7Ihg/XDaqtcYNMq/MeEQA14bYBADpcUmbN/jGIXRaMGuCujGhtISJY+55jWVWjSGQy3BRJSVrBec+BoYAWv4KzT0NM/lw+fDU6wXu9+u+370MRkRqNUYDMAFQIzNGBDUrpdy3m6qeFmbG0+ny8Xz5+PHjh6fnL19/lprPp2ctNefslaiJSRECHUaMmSBy02dkBL2/2gxMIoUQQsUMANymVKGZCSA1t8MAwMiWNUWREDkyMhiYsmokhGKAYKBFt9vVttfL/4+zP+uRJGm2BLEji6q5x5JZ9S33Ts/09EwPQAwJkk2ADzMA//8LfwAfSAIEZu27fFtlZUS4maqI8EFU1c0j695ZDIWoSA9zW0VlPXKkihaW16drxOAiu16vP//88//wP/wPZtaRAcCgZt33/e3t7fX1i5mBRXDPUxLcHTEdybMAnyVzSc5SaMQJEB79ojzCzFko6EMGmAnB/QcLh2nPpugNrZsScRwHk4iWQJiFDeZkERGbre4+bWc/FVKWWM5LfcgaRng6pn5yaH5cI0uw1wGz92Op3/OKO3/ij8y9n7TBeUmur59d7fPzyafRrLMWiyAzdzixddv39pe//e39/b1ZgKQUYhE4s8b+vkstF3+6fRySYyaCjuO41soTpRInM7caRnK720oPDoCVB40c9aNlqZYi/f5gwM2jW3AgsywAGEQUCE+lGZ2JlKWweER2Iwlh32/oHUSLw9MAAQAASURBVJHBo3gE3MzaSlUv5ZCX2swo4xYDgBjpjMAgzkFrDTJhOJ32W2sX+/7re4KGIuLl6+tWsuVVAJohNxMBJPjBLqyoWETATDGnk62OQbrHKrmC4zGkWZMScucHlX0WufmPs8OzhIQej/mjlA6OvrgzGH0SpOWznQPa9Pd8dmX7pDTsvQOWwyHOXtMna/VJS5wvKb/oh62Myeo//PTdB2n/zTs8I1TP/wHR/Ljd3t8/MhDNa3Z3IfYkffAgRFpsFrhZEd1KpYQ/uAPczUopt6PlEE4AQvB+9HZ89972A0AiFD4+3rdSPKgWEYqGfhwQxt70Uq2UEt1L5uYZzFDiJGMfNLyOWzve3j7eP/ZuzszvB9y9u6X9cu+iUi412bWX1l0Kp8RgXS1FA95ag6NuCqC7VSlaSkQEhUrtvVfEc2V72QBv4Tdr3Q/vCAoKgw/CHoqAiAjc3ZqXbYuIl5eXn3/+GWuw0FgyuS0W0P+tcwgfA7nxyWMO+jfAqJ/Cwjv7aOBMtHeHfd6XQMw8xSymg5xzkEmEr1yYz/WL4Xjk4SPG7MGxLTn/tL4e8yA/mpJPVu8c+8UpLkgAzgq7lpY+W97z9uO6O1/DOtRavw/1/ZF3R8LlIcwEMetJdh9BZlZKSWTp+Qb81DGyriwvN/OyKbJahA6a1UjzmDz9zITpMw0MuvcwsWEbicjDGBJhHh2GCANU1lCpfFvmDnInmDsPj9MRK6CiGWdnjd7MPHoihnEHygeNATJp7C1C5lvBfI4Pam7lzzADuezPGUWqaRiygGnhrbV934/jsDb81vme7m9uxkWfp74Ky1LNANbFK3HSx4AcPGWXKIJIigrFYNUBEcODwMrEXIqysohzTty6vX/E5H0lQJU1Rj+NsCgxC4roQIix1Mn8BoYwHOrEteof/vAHIoId778e+76T9cKxKUHUnQEXka2ICHkkDt7hAYSIT1xKiQiecyfyuZvTvvdevJUraTEnVaVfb98+7NZvBjWPdBc6D2JdIsq2PyLyDF9LYebWdzP72O0S26ZZmeQZEA723fQs+9QF7g4NkLNAlESJOMJ7a/sKCAHnCHLLT0b0JlN3C4lJa40ERMREDCYlEg4eWmDVZGKVvFLpz4guIpAIZGY7ObIMWi12RBRxYjYnGhO6TgbbF5fM4tL4F7yKT1oJjy7F0i8R4WMoHJLgYiz8ATqdHi25jNDyX9KMQ22parhbD17pUCCHOka3JAAcj3Emp9KhyUQdeQTFaEeeqZ1u3Y9+udTXl+fBtRuWtLrbth2tc4cXxeDdvdNAJ2nwvu/eDqFLKcJC1+u1KCPsuO0ACsv7+3s7jueffqrCRPd20Jgt5pnYWRmrCBv5od7rRS6lmh6J9nEzHNbYVSQ8aI6TJnYgVPlStAirO3evAS2ltY+s5XXiMG/7rR1bhoKt2aYFwNPz5Y9//OP//A//8TgOsGZppVattdxutyA8PT231kbZf1IfD51z7xh4EJVVOFrKf2ladlEuS1RoYIxpPQFmFemqZG5nNAdTTmQmotFrSnw+78xTZMoyPDvrUsEu33XoT2GRoirJev14C9OgnhRthh9EIHuoRq5tGZQlwzNTc2IdmA8tryGLtOevLw0fZ0zBD/DU89L4cb3kplqJyM2SjP6w/v3t7Z/++Z//9u3b28dH7x5gJybz/Tjc8f3jffMLwIm3TBf//f19U2VimHvrwaLEl1I5YK1HhB3tOA4RqbUKkgKamAuPQpz03iPIHeRETgineyERgLPzTOUQEfHgnCMRBVCKCOM4LBM/8FCAmUiUmcOph7t3242IWGvObw30Mc6RR8lgPv6HyI1mtMPMETRxenIcHdiP42BmqUVquV6v5tnWFZn7Hh2AJ08JQKbjltqJTNecArmYAczSrmf5GSsFuA9dIAq31c+3pGhcNh7U7/mmIkaxYh0es2y4dHvMwsWQf/8sWvcgaoL/17uLiJzldBZRIB/2A4ve9GTu6n39/unixxrnwe+9DpK/LYc4d5A5EMDpB5Phyfo3g8AZhA+PPMiaW+vJp+Ru7pbL3W3QjxHARApSlq3odStFKKw7j8GzAFmPo/XW3T0A72HH/tFu+w5f+BQzA8gijm77vhflqiwEISTDs7Lg+WXrfcWEns0cg//MWve99aPZ0bqZQTTiJF1EolxquW4bhBOalGPGdHJ9cffupsrbViLihm4RzACThZEwabFjB0hEeuuFO1UB1SDs5rdj/2htN89SCBLlmHUXyypZDxqIRFXNMUVcSrgTrYBnBjMI+g2SmH9tO49yARIWkISiI+0+yEVhpyDQTz9/41AL33eSMcKcNXUuq40dsij5GDjlykn7O8NFgINWJT9/ptwukZ5/ZebEQa09h8B4PKyCWV6S2UPop4bhpXnwuNDOduFHk3H+ZP1z/XxQDoDu+161zN4JhN8BNnXbRORj/wCQRHxmIUznw52PlTKNoQdpFJEWt3hYTrnI9ir3wTGTxDMqOYOoBg3O3FRaADGxCK/WlIgw770rzezg2j6pIWbOKbAjKjspkqwKevQ1eANAcuecj2lmk/VuiBQ9bulUj9eTPsEsxZ4vBhPVpqreW++970dEiEBloHDT51/qeyInH+qHEUEqS47PKjixjswsKtk6FTPNkASY+76HuTIgEpYOpW6lKjPMmh3JlVIqeQQTVSYiEhZiCIM8FCQMDsp0Wslm1kwckROzqqa+3rbtj7//3X589P1w92vhQlVApZTCJZtOVUULU6IAzZLSGoBQgh/GOCxvTpRsG0JEHnwUb43cS3PvzlxeOvi2//q274EeNOdoMxtMgpjzCWT+x6iUqkULY8/M0GFmza3kJIcIc7ceOfV4Jd5iLdStLInCyhA/csn6ZEBJ3DIrszERBQezgCn5XSDpd96rwYATILN+slwNVRGRYI6YAAbhZJoZLbMnlbGuKpVpul+JvFQiO4n3J9fyUXV83j6b4R/+Ok49jsqRiLHp6S53eezMsYia8vujV2B+cFd2J68i0rZhYbfDzMrk9lhLW2YfqYjQ4KwiVS2iBLN2dDu+fH2uVSk8zI/jyE6zlBmMeQOiGmMMAezj48NbT31NAFGIctWy1eruHx8f7v58uRLRfrtlh9IZepe3lDJARBkNmTUPSR58AL0fG122S2mHpg0+juOiwcqU4MkIBBI92qIL86XohZWaxcdNSC/b5dAKIJgojAgUQQEC3W63/Tiu12d3r7X+4e/++PPPP//1r3/1qO49W8FLUQCOeH9/fyKpsoloKaVIVdIg1qAWnwz2kLckWTm7epg2j3AfbpTojJyBsSYV0eBPim4t9CzPaVaJiHrvEc4PFi6IaLtcKCTr8czMKu5+HEdm8dxHxznNPvsj7qyJuPud5J61wJS6IAJn0mJARjNJcTfoRBxBEYu/Nv9K66WfAzxMFFDaxwTG39fCCe0cj48XJ+E/r8H7XzNJROBh35goDPH+/v7P//zn//F//J9vt9tx9KO3IGGp5P12u1mQmb29v2fKK3u5ieh6vRaOiHDrvR1FpZSy1VJUUsI7wa2HmzARoqgc3Vmyo55hDgMHqhQfWHciBIVQOIMYLDRSxjq51VIpUADkCIZFWI8IFmHgRTQLjU5wchUekwaJszA+1MBUDcNuzh4w5pVKChBl/JV2qncnMqatdyPqEfL2/eOf/vFPtdbX19d0IBaEMqMN+FDRa2DZeEEIdwvzjMoSqhARbnafInGS6mHKaQWCdxVhbrze+6JOSbG/H+FBEpY/sATmvkbivAGAzEw3zQ7G+/YYwuV27DvdPc67SJ/MgaeXsjTwj9v5ROtS8ggDPTFvkO7Zt7smIZqDCind8FOFf7nds8lw+OI5FCWCqRBRQvIj4N1CuLBCYH6ih4WLUGH5+lqv20YUbh4E5gLAPPZ29I42ONvdjr23w617NGEGwtpBRE+XjZlba95bODMUTGN0SXhIP/bLeA7Ozsh1m+6l+92FJiIDRfOkwmrWI1xVVGTbtm0rTy/XWuvlcnl+utRaKbmmzPqtH8fNEbUWR4Bqa80SkOLRzbrbSCIHedCFugoTq0d8tPJ9v1w+jo+jhatZdxA5sQgTuXfvxlqyr3sVe+AOmW/55CPMmO1/R4WQI2K00qy1No4Z6+dp/7n9yNt7F78RVrnZLHsxATSdN2amddjHgZ/phaTfdBf63kcX7nl1eSzSvk/e0vDBMBbAWqR0EvVP0IBlINZPngx/51vL7ZS4eVj0LP5p5/MOnw5FWSGcgYy4jfFF6XTm2IkV2+TVpMieT7kuPUEXKSs840aefDPZPT9aBOeL9MiGWi66lbKVItb3iAAsq7XpzAEwa8wacyTlXXEc4xoW+6LIOON6UkS0oPMgJ87OdaEH3eqr7LaqDWb2G/JMnuz3OL1LZqZ7V3lEhCHW0zhrN0zDz8zbtgWN3DmPLp3p0I83N4Qplvti8+s0akTjdfYucx53Es6k5spPIqKZwUPLRRg9mhC2sl23MriVuxFCmOy4pZQPFypAFMoSFAlA5wALJSqViMT3stVs/yu1RkQwtm1TOIniSSVhq0cDoKov9SnxaaJpcM1Mej/MIzFyqso8icjNS70GE3h0XbrbUah36f342I+9I1iet7JVrjs1RxBI0pfLt2BEtRSNiPA+qnRKWX4kou+/vn+kGHvUUui+Zh8Ksz63WktriQxys2xJisTESXba20Alg4IYOUDZRRaFwIqLhjTx3exlK3z5sUI4o1z4eTHntd2N6xTIyOiXGSeS5btiipVK+C0XMx6M/b9YMPy8JqbCchcwHMnRaw1hJr33bD8eSyPbMETOU17WkXOtjHucd3qHxMg905GHynA5Z6Ks44hIJJtrkOUg0Kmjcv7Y8/VJRDIUvL1/JG1VzIkL+Zib9W5HtBa92dHYqJbCjFL0er1ul5qTOZn5dnuvosnuGxGXyyVrce7Oc6aZT55Nlow8U1tYGtaETWAylJi1o93e37+/Pj1rpNMQ8KCZN6GAAkWkMqkH7Z0omMplS5KGaK3HEf2pUYRWaY3Xw3X3y/X569evv/zyi3Bp3VPP32576o/v37+DBFwTa4STsV/isXRa7pAlhR8/FxFywuPUWaY2E5+Dmqt357BuPeTcfzuu+GRt7uWLDN56N4ogOMfIuGVwktrfTihHw/32z9Ke9v9RsFeik3INDV95lugH1aW52RnGGR6WPSHrQ5rZ34TencnEpxg/tJSsr/Q5QnOpoLnSPPxhSUcEksxzPnC72S/fvv/TP/3TP/zDP+ytZ2tWXlA3y7KwFN1vh9mhWk3Muy1Dnxc27fXglLper8ucTZMqT09Prb/lt9zCPLFCrIze+gg9mENOwa3HgKoPQDlgnSAxGjcm3pdYVd27hpqZuVk3QwhQRcBilkUDeCCIlCn9hKz7EZHDOUZ6zonSafaJtkiHRPWJSPZbsx71WmM/jr/2p6en3//+96+vz0REGCmIIdup2JcpPymcZHDJ9BtONmPo/N/CQTBGCmyFfPeXnMc/WaO7Q7kWFxEBPh3ET+tumQ+sNbt2oJm6O9UrMqH8SWjX75h6SZV75/PB5wXGOi9OVmZ9njRddKLNo8lafP/62J/oNMAwRqFi+I2ZEP/0MNd8gbG5j8pMBAIZDSY94f06AQ5YBNwtnD24UNXydKmi5Ga97xxiDA8yoHXcuh3Neu+97d4OphCRftu5FIDNOjGLCsJ7a5etKIPCMyYSJlWuqrdjR3YCZ8qWIZO8Jt3P8Kw5kRssbPKIQgtrKaXKtm1S5evX122rL9enp+dL1SyWtHC3PfZ9771D2MJF+Ha7ZRwbhL03OYSJhLkjIKzenFGDt8pP1+31uf3y/vF+aG8Bh5szazqZFBzhw70WYaXjON7e3kCf1VGMJpr/ndvQA6d/DsU7osEMAX67KjjxooOP9BMredoP99PwhsUyah5Yy+GO2JzfenCQpinC3D/P/LAA6fGxtNYKL94QQvJNuKdYzsLY2OQ0vWxVCNeiiKkQfjwLfvDfFixlfTE3/oHsKgYmS6qcZ6z7/Uw+BmjUVRkrpezt9ulAmJCbsKmLJ6A0b2BR36Q6dvccwI3BB6Dna3Xv66kjwn2MWGqtqY7SGcBVB89nxDF058lwno42yKyS4aL3rjboT+g0kismHvdczZj2Yx6ah/SsZzdckxmIBsgsWs61m/Mi162VUsJH+fQczS/Rj4hTjiBsKMR70v1+S7MqzcxMjGHy+HTZti4gaPS4CxeWAWQtpWwimxZlhgeROgUTM+iYlErEI6wSZmVhxqZFVZMnetMcU4OrPJWt5iOVUoI8vYeWWV6QFo4In/yZL2VDArtgQHj03uE9iAvnDJ/MF4+bpa1ckClw5mCK3gprl2gMCpAEjF6frl9f+63h148DQIQtYzfJ/VORJfwa3Z0igPFyW2vejTzw9LTp4M/wxWNxwoDxhDIOp+W08JQlJFbif73f8ZaZRnGPxrRrS2hluIdbuMEcQ++cK2nL/VpaZoEth1n1H6zjfQmAOL2Oh0VxP9pp5/WVH4/zm/88e/+PR4gIeASfHJSzb+GTAot/oM2Iew/hWdg9R4p/eiY6F6+ZqQgFmDhBmee7y4Wgkf20th97RFyv29evr0Kc4872/cPda60RoXMqWnNja37cnJm8RDcNyfGFl61cr9e6lbLV/KI3FyFmTqaWpJp0J/ee1vquLtSDRiHO3S0SQt+A+6tvrfX9uGzbvu/Un3OOD/kdBEURQtQjNGgTuahcmQ9H6SavV5+529vtdnx8WOuXy+V6vfapf263W922l5eXiCjMrKWwtJaouQ3A29sbS5FyFd3uYjx/OTtqSww+1dLHehnjUmj4Ovzb5S+fraQicvyY6byflJaHvOLP3jtDFon+8F+ZzLpZ4AeytbVkP5nSvNzZSD+MmrvL4z54TM8ta82zmP8po7S+dY72McspKyBcqmY9k0/bOiPxg/O7LBENzi0Pt2/fvv3jP/7jn/70p+/vb60xM0sp5PCIfT/e91vRjbWspOdxHJmwY2bdBplWR7PWj9uetvL5+TnchfhStyQ8UBYp9bb1+RwyLStE7memnDRmbp5zfYKQfFEgJgYNoixVJfi2lVKKD+5xMjO8NSg7i6lHNkgQIqKRjIoAA8JFCqtExNHaelw8CcBzA7i1RqRmdvvY3X3btvf9ZmZE8ezXa1zM+P39/fv378/PV+bPcZy7g07+1o8VNuH7n3iMzfmk5c7E92ePZawjlXtL0jngnIMi1s6rnHc/+5Lkh8saizQ7as2OKWkDorZkNR4XeG6rpf+sw0UkKZ3nnrEclfOe5+MsN+a0Uk51xdMVn+3d6clMhUOKH68z5qI4109GqZDWAkwnJLMn5tmSl93mUUS2bfvy+lqKCyjCyMPJW2sBDma7T7WJIEC4FNmKXqWLFBKOiO7jrrc6enQi3M1gIcEGOXoPbEJMHOFkQAg5I0uvmV1K5dZm/w8ra2EFay01ESlVay1VpCoXBSMQLQJKAUFRUqlm0t26MzNE6ej9OHo++cPsabuAqB8NxMoeTEBsIU+X8ny9vDxdvt/6rd867l5u5pOqDKedma/XC4DjOKAKM/xQ7V7v81/4/H/TtgyHYZAafALP/yvV6XMPYa6FdDUiYs2fpJUEpIf8YyjxIvZbhs/M+VE7UETmjoCReKWziGYwRY8J01MktkzeuqmVkcFJyePRbJ0N8XmHs7+B+w0+fHe5r/fSFwBA15fH921kAVeQmmOs8vdSytFJREDBd1CiL/N8eg3zgJM9EpTlx9FYWEoBqNZK4IQcjHL5AyXomNaQzXgiwkzaOjAI5XwhM0dsnzfm5KGqiEE0er6wPHU6XjJHR7j3JIpcD3pGm76irPOtPaSvaOwfIOYcW/B5Tgjl6IIc4zf5LQf8Vetpt3sAufq7PiloBA/kxnShM/xLBE4ef7Fo5v0efjBo0yLCnjgKUCklmUzIhreUcK6ESvLoWHPOgU5MSqyqtWgp5Vq3gVJw//ufLshYGpFQHFW9Pl+2bbvdbkmSW7UkmfxWCg9V17x3j27GXRDK1+sGjB426/dKGif0C5wQAmMIUTBC6Pl6EQ866OD4qdP3j/7r23vkjJMIBhGHu+bEESJiGYZhxKAR6ejcbre2H4lhUwKDMHNIdGrazs36EYAIlZK+Xbh3s5ZNNcyZ5Uzssbl3O8EbYvrQInJYz+fe4P7oBP/mIjqb2yV7Pvu5P30LiDs557T6PsRsSFQe5ceTftrOCarzJyL3TO06yBA/QIgDSOEZAyZVw0qt1cyyvFxzauUtE0B5kLuKnAsz+68gIKXRRXnO2qBbuGPG53GiwBkvmlZULzm3XFVfXl6en58jIo4RnRJFrbW1prUwHrp5h0JQFWcRYeG8nXTRWmsZWUXAzG63W5buk8wGGPjVfFmehM7kzn1MXcPQoqkk8/lExKK4sN4lFOIUAQohgCgoY99AmBBfS73UjVsIcTZ1dHd3zFa68b6IKNtrcybQy8tL4vBnVc1v70fKxtHaUiArV5jErej9RyGJyTKNU+JvZVJmzwBnmLAQHH7fIs8rLIvpCHeruewWiD6vpm3bhJTg0Y+xutuIh3sPkju19RTjzw5rfjRLFGkNLbJhFAFv67txCobPeOBlID6toyXGNOdwrOP4LFZk0h2P1jpmG+GnD9PPGNDKh41VtZl1t33f//qXX/785z//+uuvnqVXZmY+utlut9vt4+MjNmIPYS2l9D5aLvOqGo8VERHHcS8WTTqolGTLJaPKtdbeezuMyZjZlXvv7sglgllScx8kYUUKTnEFJbUGy3YpgCfxr4cREYW3QCkkIqQCIAFvx9H21nRjB3e34fR4txbuXmpZT15Za63bttVScrn11oii9xIRHtaf+/tb2HzFqlrKFhH7vpsFsxPJKXSHu2s59byd6gaZ42YWnIgoxkv8QR6GWzUFIukSh1pWxWQXwyedH7F04/qQmBeHzfocRNZanABpACJAk/SImR/0UjLizOOffcLzh9NvaavBaW0xC33npMZZqjOAlNOAIqRSMhBlNppjuOp3o3PPxuJ/xeaB6QKnvgIRjCLC+wz/iAjU3bu11hqZgcHMpZTnp6fX19dSv6tywjh59r+JCLrl9ZNKcQnrVXkr+uXLhYhYNZdY1tVrrbfbe5jDLfygcEYgrHsnKt1NnXtAibIfjihXXAbq3hOIF04kxJHMXlrSyCH1p1vzjtZp8OABjGDm6FihSsBEaOONRFQrCe/7DgepkEceXxjO4AAziuh1K1uptRa83Zg5C5zeDUBhEZXdukYAyMbj8eDdaaJG/1e+q39p+6GHkE4fPjhC/7tCzSBRwGP2sjEGjTmpAquwfI/TGHRHgc57G7cZ9ywMiM5z531iBe9fvK+UiLla/ETPSTMhmOvxU51sxQLLXq5buucpfour7EEtnC5+BjhxPktE6O3WttKu12tvZkcrVQmxqXrfg2JTeTvevZuIRLeyQUa7CJg1kG1RZN1I5Ukut2OvtQIMD2u9atn77mEAwqM1U+XL5dIPUy5E9P72cdyaahXlnMdt9akfu7sLMTHDoyEcKIhOwSwQ2o8OkqpiezcAwkRiEY6oIiRiFISAaITByREGdpCBikktl9vxASE0YqIn3nZnNGgIpCYLXnqWzhRd3d1NFFu3jkaAwIM82GMTBaP3ozWJiI/9zXuUbauyQfFx7EeP4GaiEnyEHqS/Nn/7fju6B20glFJPjpEtS6lLPgy9d3Nj5iK6o1G6BBn9hQHBEcQwb91ilHN5dFtVPJGEKFRCFILIbjbzMAc5ITVMrWAKCo4PFSlCwiHhVfhS6FKVvRWNraDUuBSvW6u1lqKmHy9Pz73729tbKeX19bUUARrRcdmMLlSKqIZq1CqXS+3vvu/7cSCquFPv5K7E4e4lPYTTqnB3cpiHJ5UVsUhVFXjt/WitFSdlU3b9WvzG7VvcekuP+XCUsh3g/X2/PD9dVcmP6J2JXlSDtn1v7x/7y0uppCRBIX5Ed6gwER2HbZtOygeLiO7tdnw81yciopC2jwHo/XB+KhHc9oNChMp1k2527PbltYrt+7F773K9ioi3w8x6YOVKBEQwjXc2IqLWdtUvl02FqR0dSVrLXGu93W4cFFT6nJDiltmru05x8wBYACanpPLD6EQNCkJmfGbWFSPfRoAHcQz8yolZsZa6FNBZpyxo1lJA6zKUJaJpYW/NvHPdut/EO8SJ3Y4OoNaLysWNBTDr7k4IkmCiEOoIRT1CN3mSwv24veeYFB7w723bLirNw8JB1M2aQISP3kGE8GvgZXv+W/uLb7twH8GwRncrRf/w9/+mBxFTcPnl17+axc9ff2JE229Mdr1eu0NMyCRsu5TamlPfL8rRbpetXMi13S60KZzLl3/4j/8cEX//8+/e3t6OW//DH34PAxTebUyYNOv9SK9L9CmADnJx597a0frOveP2zu/v9Ov3F+Y/Xp7+8n6To33ZNuPiWhydyFXksHbc3iOshxMrVbo807VJ+/b9e3v7+vRzjeu2XfdmB/e+scf+8fa37aKlXi6bfn97h/Uq/P7t15fn69cvL/uvb7VeA9RENt3cCUZfrq9+O+LjjS8X7ni6arChVBLdbqPoSqMZLEnO/YjGwQ4ERSOz6OTUohV7CnKRAN2cvTvMn8Veen/ae2EVEwuC6rU74IJQpivHDqAU6RbNnJTNozWEoRJzKcxsYYe1K1+IOcyUmInafjBFJfpl/+C4SICBqiUcBbJpfTsaUUGoe2SyIlMYvR/CCQCmLG8HFOQ5TSkF3sKEBYC5CRPCh7KVoBxu4bbJNhMWC+Xu7p6o9Vw+q9wNIMEIEWF2ygamqMwBPDa5/pl59FoHklqYKaErzcN72y/bZd/x7a+/vv3i7Xb125dffv2PtdYqpaje9recMrLvO/ZWL5uQtNZ6O0Aog9hAI1xUhcndW9s9eb97UwblbJ79I3qPpvDLZm1j9is8ZDc/mjVqwRHMbeIt3ZyJShXyeObW7diofn16fbluCCM3FXz9ssE6yGvtIuzRW2vGDZcrAGUkRri1eH8/brGTaAt00BHoIb1bgAzBPdUzQggSUId2EmLq3u1SuVY1MzA14E/ff63GIlKq9PYGoKqi0ZfrK3UnJmTkrxStmZnWQmcqZBBlRMusPZgLnKK1cIgWgNCdlBCBlVYDEXl+E2e9yuPdo+fMtGwbHlmK4XcSWCQe/T+2dBMz/pwqetHe8nB380ThUevoYaNRqMxkmt+dzkeae6bN+ixuhxMqE1ozEclwKT1oosgoxDyUJTHVNJnM3L1o5ay7Gwgk2XQTaJRzNTMRCyfQnN58cuMjmAAwKNLlZQbfqWgQAXYgILOPjALoCIDt9ecnfeK3j++3Y7e4CEuRYi00PMJKb1flV7Wv1b7I/mUL+IeDg9CDHBROsB4RZB39ELiAatHn58vr0/N24TErK2MVj+M42m3XZixEVGDSWmutuUV4lEI4ejejIlzUOFlTHExB6L0dvh++g1qpIgKZDdgaqrRJSP9Aez/qtex7v+24PGndNOtS5BTm5ImMcyXO18QGMkfrF2e4x/cPFfpaioh8sxdBlKJP5n58b75/wf63/r7hCK5Wq0OsI7lpClSfdL99/N320+2Xb//Z/+X/9J//V//Zu/8q5UroBWALGCgCGgdTBz2dBkGcw5hPceP6k09W32wniYjsRSQigk/eaL7Pk0f6IQCcButMMLqFDYa/caJ1OgeIREUC5hE9Bnv3kBsQkHRi+cmshJsZr+wYjUt1s5WizAtS4sgpBkjFwIADUQohevY/MyngRBAhemQgjxFGzpr2Z4jl5yJhhOVKTeZOVRElGh2e5mZuWHN9QV5YHNR7j97GArNObpoazSy5KwZndE/z49ZaK/WScWStFZJx5yhbzZsc7zUtXzb9fwpeASQQKx7TTg+5n/PGbG0fZYHMc3wqq+J+qDy+iLS2R05WnQ+x1tHjxzwCo7uCUyUZ6sbMIKg40QDSScuk6P1wnfkEWttHxDJxKZTTgQW1VmbtbqV4n2jdrFdkLpaZax05thb3R3EO6Kdk3Dl2R6pAOCKIzzIBjM6c/DpH+ExacSVlCmIXMgonyu62SBVGFAQwUxHKmWZMVbNFEMFEVaRuuhWtUmuRrcqlaM4t3EpRVX2uzHy73V5eXr5+/fry8hLmR7tlhTlzbzkWchRAxuodEUXdkp1fe++LlHX4QKOiijEzhjholvu9t0bMTB4hYhwN/Xq9Pl1F35gch3uYm0XmsFprNw8lkxwe4C2ZskSkN6/1IlJyCKGzOJG7b9u2qhzMnH1fSwAWQC5tOeaEboyGqN5ngeX5+TntwWAVSkypxxKtBYZZYdWiYjpLHaY/wfeNcOoh+V+5fVLE65N/aYCsrQnjc+dPmafz9mnt45RXO9/IeTkA5xFAE1Y0V+vKeK0vulF+sY7ZKpS9fyWIVRwIeJxAR3kuTZRm+LZtz69PWdbo1m63m7snAuJ+ivTeInv5rPfOwPVyKTTqNmd4pLszg0Hfv3//+PjYtsybemsGjwRqpaInj5kJDiIaA7hmATNXx7YVZr1ctxS/UwXNCJ4zNmTMR0F3I4+kFigsq0lmYMK37TA73DI7TtwxIYvZtr0xv76+fv/L30CiZSulVIse45nnrbXWjmZZPCfqiPuMtUmrQTQ7Ns9vXDGy/uf85Se5ys3dEeSzny6ba8wsqAeah2SxhCbYDGvOG0NEeutB8H5AevidSnrbNjs4Zj2w9763/TiOCISP19p77116d8xB2ziX5ZNygh7U71qqfUHrT7VBPs1YwqwNLrVwNoKrxb3W7bxYePKJr7xYfn3VZDhrLH4yjR6YTS/5yvZ9v91uCdDIajDAxMrZtc4STomlz8GnEeGtk+q2bcSD7oWZhYJCKaDKfT9cUESJoqiGm1n/+HhPq3cpW/cws8N6Za7CI0XOCAtmCEAhLMF+XEp5fn76+cvry/O1gFVwqUqUPd5RqlbhiGje3H0/aPScMwmo934p9fZ0fNx2dRwGWOQAFyMukA9zZyLhrVS9llIKM+CRfYkzQnWeftjoZ+YC5n3fv3379vX11cxKKenkwx3+UFI+6zvMIbFEBB+QQWZexb0ZOs7f11fNzuK0pIjcH3aLWBHgOvvZIsDPn8wDzpUep6IKg8CUsMn19Xlwl5xrt7CsSblBZIf7Y88CMyfbTdBpfOLsKgpicLrT9/WSpnZ58fclBmgdIDWcMKvr3sdJZ6fGveoChD/aoEFtczJw+Qvz/r5///U9a3fCYhG9HUAwBRETKKsUdSvMPHqucl1HclOf6kXMtZRr3bZartftUrevX7dEWGRg4A5v/bge27Z5t94tUzA5orn3br1FMIGYIcIyuyH8hBqrwqg1PAczhqiWUoikh/Xm6b5++/UNwvWot26lFKJI3lHbP1R127ZSanT7+NgTDra3Gz0KW+qWum3R21wUrKq11oxAzGzve4eQg4KD4L35Hk/X68f3t//yv/h3/4//5r/9u59+t7s7N4XEGMFFYAKB7nSgS8HFD9CGhxcNjMQIHjGQqUFP3/jEL4rJ+TJFAZlAoBkHRu4QyfO0NPbELwDofdFhnvq5gPq46PK7K1W3NMLyi6z30+cDQTmO9oNgR0QGhDS1xLrle0VkLdWx3SOsdT08pxhgOjAxy4BxH7kZwJgNdj7gKmnmt+5jJ4YRitGBlkcxb713jjvS9G6rcjzFCVO+LuV8rcSUI1YSnpSoLRY9ux0j5SxyPsI4CBMI2TfFcXdbT95bTzRCYh1b74Az13UL64GuZ0dE0wVxInJ9QISeN5r6fQF+mLmIsKqZnzO46R+01pTLUnZDPN1h1sOP47jdbsfeu1sSTAAgKXkQPrFXr1c4LuzslAwWbMgD/RK5G7PQaClMYRXVImYIJ+tOxjRq3wziCAILCSMKkzAXYeaRTlAGAwqqRZ62ei36dN2q6tOl1qpauIgMpJlSRBTR63b58vJ6uVxa34m3JNVIxKCILNCvQAAo4ziOVL611lq1tUZDTBNSy2bs7mhhCA8Cy3wIFkHCUNUSkOZUPKR/eX9+fXkqH83dKL2xPsYPHLemNbSOKCpth6qylG+//Pn5+bmoNvMYsxB1NUEtNSBzDixOOQKcbNsSewAJhhvSosKnrok0DBjK3yjIZ7VwKJQ7b5Odj58bTjmXs9r617c4oTFPov6wlPDo7cTJqT2vhfVXfiQtWNeTGguna6MZ4H26pHO8h+EhjUvlOU76x/0Tc76SZ0J6t+J5OkZah3xrhr7+at22bXt9fWVmIhzHsQ8K5eGUlFLczVrPOSUR1HtvjFK4lIuEJVwn4Z2JpY8wZWbm79+/e++/+93vtiJpbJhRVNNGCkYqvmrpffRFLwXYe49JT8KTGqr3/vb29uXrK+DwcHjEGLARQaVwu/XUe8/XeHp6+vXtPcxTV1eVy+Wy937cPsystQaiYMnlkAGhEv/+p5//+X/6j2YG6qXWAH//eM/wD8CYjtP9Lp/nqIam7ZkPHpNfcVimhIH53chlUfF840u6psEKYDY/uMPMB+3zgw4fiwsshTEDSKe1Lqh5qFY7bBnUpH0/L7QZqQ4VT6Q0AMmYRneQtZxNsk86fjsxg68lwI/80ssQ0L8s/5lIWiuI75XDgdDDo8I5HyESGE8jQHV3wDL3lP+c8b8TtVSfpZQMCJn1drv13kvZCsvBu6o+Pz+VmXJ1934kNp+UAYARBC/EpRQh924RsakWESJSplCVih4ISm6M4X5RIBk4lPgVfLnUr1+//vz1p8tWhFBVrlvpx+7RgdCS8wxtcwXQLpJOm2STj/v1Ulvvv3x/88Cthx4u3bsHSMG8f7QeBosIK3zJQabuBsoJRui9g1mZ4H4ch1JdonUcx/fvgzFYRCAEy1F1M/oCkPMw8hXMVosh0vmuM17yMYd68I0+Iiw+/c6TCpWmos49Um7oNNTq/MVla+g0FwcTMkojfLrDL+PHYOkuiiBypsF6DwzXbj0ZWu4j7sdGOoGPJmhkZPwHF+iHjX4rA57a/9N6iXsP1cnsnrznzGfGwucOb3xEq+8ft+/fvx/HQVKFpPdMx7gQEQUDtdbnp8u1bozMpadvRkQCjLg9PQdlvV6vr8/X62UrpRSW5220k0SEG5y8Oanq9Xpte4vY3T2KqCikQ3r3BngEm1m3EC8MZREf5Pk9vDOzEicmxlR63/e+M2sQt8Peb7f32+379/fMdtHslcjWjJ9fVUS+fPnyu9/9rkptrdF93DAREWcToOTIDSuq5hbTXalz27btYx/izSyFWIg1qLMJgd3/63//7/8P/9m/paNdtmsPckpSqKR2QPK+5Mt4fOtLEvLz34gP5yvOBMX4hHJO8SN30PxCYI6tX7Gfh2eyfUlWlsTlnH8nT51PPjiK43FGNCYpy3JshtD6mAYCjzitlLxWnvNvMInWx7emJJ8dd58tGGcrMI3t3T6uFfHJH6M5bmR9ssI3Sq817qspImxWPh/dyHuMqr33Zj3TVfMUTDpE/GjtOA4BqbJIvR+aSU6ddamSkrTgfLnpfCTzXgYGrfXT8r7bRXd38js2fXSy0ic/9fx0IqKHt2YZpmYmhmYW33LSoFuYZUY/1206st2NJtY8+X6IiEgQPS+JwCwkIBrMYQ/qO1Ottda9HT67LzAalyOCmIctH4fNItXtdrvdjuNY2YwYTtR9Wy7L+jkOwinr40VmE+ddCgGR4dAkB10EeTcDuXXAGcZizJ7VPwYpSEkYLsRVWDVqCRGRwvlDhYRwLfW6lap62cqmcrlsW9V0i7Noth9HKeXl+lRrLUL9uCHi+XLN+SqqWlSWcx/mFKjKytdaNd/ReC8jwxbu8MEhHU6gCEFYUICyPYBCAiA4MzSIGCFmJM9Ply/Pz/Lnb4VBLN2IQHO691QKRAkhYFISzXxVloBSZfps8FjLYb0CcAQsHcElumCi3nvvWQlfqkEmu0wmSuahZqNO21NcGRwjWSj0yEBAp4KhPU4wyxXCLP6YTv7Xt4jAby2l397zwQx/jgbPP9du521Fh0uD2xxsPeV5rEeLYYpi9B+OXImWhzaViPDo7gGmoJUaiNmf5t2Ngs2M745CNnp1976On1PX9v3j5eUlAx7MfsgYFeNu1hPRRCAKIx/sNSo0Eqi1lFJEVQhHC7POXLz3bdt++vKS76tupajWqiJCHk4kSqpDKuABvwtYHj8mqUA+mdbax8eHmVUVYg73zIilshSAISIgLrX6T1++fnzc3m8HkJP7oCLMlFl5cnP3Yagtk9aFiL58+fL1p59++eWXfd+3qR5ba+/v79dLnRnrEaOGsDmIhJmTRMHM3C2DW2ZGMIbXzQDIQT6nhmD0tH56p3NpcIyBq3kKmmr5xAR+MnurdB8RwiykFMZskRj6IWlhZjQC+0hAh0gOIvPJ5jdH1IomZjlOU8iJODhgWLudr3mlYz8tHJ/YTtzl8D48CndcwANkJk5GOj9fiIN12KEQBmSUp6vEswUmZ0+PC1jtPdkFdzt2JnOgNWvm4VQrpf9XWFBKqvXL5WIfbzn/0SKQRQyGqpRRxncBQC4qUoqIPJfSrPfWjXm7bLZtt6PdjqNcqplFJ0RweDJgVy0/1+3Ly+tPP/30fN0YBLdS5VK0M8I1wjJwnZ0g9LQ9uftEi3R3EmLVGu5GfDVcWn+79f0wYyaiA3y0tnvPFEBwLD1ctYwyVD7kiN576Ohw9iTegGd9tfe++KUidfSczn6P35gp7hS1vniDmBPgdX7vv7k9qF+aHbOL8JMIzHm1CYA6i9yDezB96MjBxnwvpo3NBxnPWZM/liLpFJTd1bvMh0CnJM45wZdpy4QaIYLcmQeJ5vke07eVkwLIv/aZe83D0YTkxA92LbXBvNMf7315+SMaHOBq0uZmEaRFXCwIyZViDs6emSjKl62KUFhzcQDR89piGeSkIqxaLpet1lq0qooQt333ZBHHmBvRWrMerbfuZuEBBoGYRZWY2QZXYiqr6BxFOFCFPcekMrk7O/KRkAaBPaK77x/7t7f3b7/8+v6RJ3EbnHlBzNfr9XK57G83M7tenr///fvf//GPWSbJEYiecb6waMJMnJwz78zMiOHZyhxvqNblOIBQ4cqFCeIoT+Ifx+//+Hd/fHr5n/7f/9/vf/3lv/q//9/065ceFpn5SSgvOcMJv4k7ui+KUT/87IPck2jJoXWShR/4RU+MpoEAjJNclHxVj0DO468hXKbwOPHom80JOIhIyqjcYYx9xkCCLK8Gy15HLGZRotGSy6N5NdfGCAHADDM6laNWsNv6vUf99Mt9fS2bcpb29ftaymufXHrLZok8sPJGjFGKvfu9IZ+IeeymMVET7t7DJYhpMrhgMHThRC1gEX4KYpY9I2HGPZF5X/DuzUymFcciZys5YHB4IWubzVHm7gaoqgacaMy2Pp00j5PRIIDjOFhQ58x0maQ456cpk0UmPWye2L/eB0mAraEOwKgSDPG7nzEf3CJdoMmcwcw9/Hhr421BggfpPHNprR0DeUWiQiTpe9wOmw7QveIUP0Dy1j8VvMBanGnTiHAXVYS5x4jWCZGE+3CmYInCUKGiUoQYVEBCpMQqXISKUN2kiEC4qNRaN9WielGpWqowcVTVrcpWNA1ikh/WIskEMAP+xsRXra3vzCQMzkniyFLqePLCRCCBRAQzEYJYAo4IEQ7OZm81GGtYhAAW5OmgAQzOOTIBCoYROdHrtb6+PF3Le9ihUiKoOStxzMmQvYFhksBUHgT2Ty/PianLAeLHcSDGEMIlZmtFLQEYRg6jOfihcn5qrDezZK/h2YbU6V5DEBEWATEhSimi1HuIUNagVLV3X9DTUZJyEyVVVWEVcXc7DvzWdlYWOBUJ6WTLH6Rr7v/jX3/c+ezFrq/Mkkss1bl2yQuhGLJLJ6oe9JXVftCA6+AsSI4JZlaV1kcAmWfMHkqafl6amnvyWOCt+wl3muv9OA7MChgz86wyEUeEMwQAAzbLPinYm9S0lCJiEezOTN69t53gddOfv75u2/b9+zeKuGzXkjV38+6NQYm+2/d93/fwQRuoLNu2PT095ZXkI1LV69OFPgjA7f2jCNcisdhrBwi0B1hYjt7h/vry8vb2/vHxTwgzb2SIMJlZDmZGRHTzQIt9yqzVWr9+/fr29vbt+y+G2C7XwnKL9vHxtlVNrwUgZRailuxq9FC5XVImy6WbL3+8oFnYx7ncTXdRISIm9oFCHTmRFA5i9shqA3Aa0buaIqw7B1F262HwSPsgcemO0Gkveu/uXUTMWgaNZrZm5WWT/1nh5oMjlhytjpMxjumgLxOwoCI+w4D1lZjR4/KwYwrV/Pmg9tcFZMJoGSyfhSD/AUSUR8iA4bi1iFgV5oTJuXvrDnQH2mG9e3fs+17KVkUjgomylH3cPvrbdxFSEQCFqWjZalVlb53gREL5VplrLbXWV9GPvd2we4CEAqikVyZm7jM4oBBlbFu91O33V3z9+vK7ry+liJtxqKrUImbi3gOWF2wxvZmi7h4GM3OhMLecxGsdLB1SjsbMRdrRPQhbqelFGCHHfKhq5paYORH7cYrne+8DW9RaoFXl4zj+8pe//PGPv3+Rp4TlL8fDsnI4A8J7QEW0yI3x4Njdy4OftpUvXhI19ifG7NAeKNf8esaWj8jJH7fBQTt3GD6PR0wSxTEeI2JBEpZflHHkTJhm7TkByffr/F+0DvfdmCgeyBKJKGhMWVo7f+KHv+c7zyHfWn2n2O/kI80w8cFlmjhbcC2Xp5fn6/V6dNv3Pljnw9ydzVmiCDGD0M9js62HJc/n9BKrlm3bikhSg7lx54DwyOE7TUhFi0CmGhEMSfw5EYtAuFn6CwHjCOJgplJEpIo0wEEU++5kAmLhX/f36/W5Sv3l+/d//NM///Vv3yO4bFc73Jx6S7GTKlXr03Z9RRzW+7dv37x3b/7HP/7x+emJKFprgDMAiDCSrIGEbq1xoIrmnT7IjHmYOwjmAUtVVbWWTf/+py/vf/7n/8+f/3z56fWw/l//t/+NvDxnMzmNLJ4DIb8xh5AGN8xJZpfYALhHKfddsjkwA798xT5F3X2GiAEHcpRLzj8Mmufi9DvuX0WE8QzJ0qQSUU5OXlZmXK6MEWY0WwCWoMVMLZ2vPyJH03pM8nBmrOzdWgnr5lfmbskzEQF8B6k9/nWZkvnXmZeZWJKY1cX8XCfuEsv+zrxMvoux1iaMa0w8A9MaCZUhZu89ySFrrRE2HXqbCxY4uYCOHNvwoArz592qnf40LJw8hI55qIzLLcL7zD1PakE8wmTTDIsScvCC9zBy4rTTTy+v3Q5zjsdqZA+f3OITWYE8MFk4uYRngwqHE+j09OcIrES33htg8siE5OfI+yVh9KmSOCKilI35PZiSOTvCV3/LerD4YVtPeH1S9JSx8zu8yt0RiHDhVGEkFBFRmZhJ1VVJBUUo62JVuRAVZhUSoq3wVlEKB0iVrpW3olvRmqEJszKXIpdaVBVz1gERilRlgbmjVxZice+97cIJnSEJ4ggRdg9HcM6H8bCe6pSF0wAggiEzUUhBcASUuAAdZBHdAShRCIEis/zZqc4ger1evj5fXrZt/7iZGQXCez8I5N29Xmqa/2CoVJCjd3Oo1vfj3bttygFCRNYo7rJ9qq3bnFAyXvGnhCsSQTDeTi6iIotboucsGmYupayxM0GxLFoSSyRwt5RyHEdm0CSt+CCnNvekiv5f3s4qA6ciYUSsz0+r9TGTdPI7cdfXn8E8j+dKwryHLX2UqmX5yuupJqJiHTNtfi5SszvdORFlH1Ep5e24ufvy3mn6dqoKXprEDOGTNXupi1JKBmmeLPjzbSZRNHHy5pOICMG9h3daxV43nmDOiGitee+N2S0iwqy9XJ+eni69H94tZ2wyM8P3vnu31RcehPwKRmYqKO7Td3rv2cVQtZh2Ivr+/dullqIXudPJMhHt+85aidha70e7FH15fvrzn8XMrPWk5GeB8GjOoaHbyW2YvX3fL5eLFs5Y4na7sahIUeV9D0/Mm3eEJl8uwGYdRJFYm/nwl3VcNNw5tSHxzflwsVoszMysR/dTzwwmgeIq0S/pyrnBOdzu5AjKsONm0Y2rwJ3pjrGhiQJgZj8xVJk3kQlJGKYkfV5DKCiI0ql4gLrcY9RTS+SnhU/38Ozz8sEp3osZHy7f4lOAd15TJ08d9ywSCxERJOKeEAGQQMfWursneCwPq6rdjSzcY5FFMbB/fDBgFBToEWnfe+9X78JK7sJQ0UuVqsyMIFYSVRXiTHJlcqR2q9f6tElv3qx3x6UKX+rRmjG7IwMAJb5e6tPT9e9f9cuXl9fnS0T0luyFoOjCCW4qDnh0jzGLwiXDdU5ijyFFoAgLUgdlq7wy3Y5mjl97VwnWzQguRJN4chXrlkgACNzHpeSzSprfX3755e3t7XKtdavjLa9QRAadIiIeWGFWVneVbnmlpx62k1v44PmsgOcsTlmBZJH4YSA1ftgEA8h9F79U94ux5vT1s5jhB1Rn5MJw936vNqz9+ZG8PfVzyqefMiAZBK1ZTWMtTFcqL0DKRA/lVU0/7dMTG18PrOadOdc3ln893sWpbZKISOXydH1+ftZSLDIZNH13N8BVuRQuEowIGKkShCOCzR0CcPYIiCaWUkf3aTZ7mpXNuvXe931/f//+/v6eXbt0mk21FjgRCcNHNjBEoxS9XOrTdQMTCzx67/04CEmIqly8RMT7/v7x8ZEM+bVeL08vKruZt+4RBKai2/V6vV6uX55+33u/vd0+Pj7+4R/+wXr/4x//+PJ0ba0RRREGjKkkHTqzwBzMRKFCbTEmTEWHnFJNTESl6KalVPlPfv+H3z0/v//jn57K9vbLL/8v0H/5f/yvLy9PIHd0R7Z5OcEZHrgnBAn0m/HhozAPPyQiQKN5/vRXz7A2Z/KuT/L9Z20wL9vdQZ5ERec8Apw8uwJWcGWZxXvI3dBqSZ0DDJZx4RhJfwbFGFJLY1rRrLO7u8/oIE89vYxM6MyowV1P+LWz/qfH8O9xrT38sra7zKdjmdHKaGAa6i73MUIZliULg3PSQIQSBqvBREKbrSl2uoZSoIeTU8BnCXLkLN09JT+hcXdlxASm8Ehd7KeZvCu7z8znoGgogqxPEqeC0elw9N5VNcP1cf/DVxvgzPSofJI3fhpJ6XP4YyeUUrioEgM0x/3ds3TnmJOIcCp7DkrHWZYkmQwoGI5Oa03mJA8kcUImTlmvl0spYzaue0tvVbhkCTsixgDGUyo6z3WWDyKKcNxdQwIwyP2Sj4iFJbthnMHEYI6iVKpuVYpEcofC41qkiChLIQhDlS5VSiERVZZa+VK4FtlUVVmYEyRXa44fHJO783EpsXt3NxvsYBTmIqppHd3IMwNHHkgWejODdRAJszAiPCITTMtykKdCiEYkAvaApHXL8e/ZIx9hBGGqTJdNXy71Dz///Pb9+9utMSitliOyU2BIQlB34z5iSouRqXX3CKT+Dh/B9tlwzj6uAGeJkz69LExuoSXM7iNJknstbUtTkDy6I8BjDbKM1kqc7HSalp4jBHpPfnMZTdK/0Zvxr2/rds4OQUwgwSeX9LzDWiBnh+MssfktJmYaAwYw88FEIxO2vh4zGhxPmDwvAIEI65ar73zG4Zcw67pCkcIjFeUysxRnA7y8vTwaTyopZsoUTCll20r23WZ7Q3gUYSHKMtwgXwVkLvbmhgDcsn6oLNtWyONyuYjS/vEOcuUaYZRz9zzbu7kIAxBVVzUzxxiSGREq8vz0lEQyEcYs27YNLdQ7cWRxGBjgDOFC6O7OvGIkvtbt+fpkbfeS+Wy+1O1jb+bRj1bThxMd5YHeb7dbQqOfn5+/7Me39xzGSLVWN+u99V5670aLxIVUBI9NC2mxlht6ZxsDGJT4iHyFd729BGAm5O+fI4iYKFlPfObjMpl7D+/TuRl2Lsl1zIgG2w0llTJm0OgtKHIoRk8cOU3f7NStFMO5jOELZwwL46CcDzZnFY2uwtYSSnSfH3jS0/f19UmHnNdX3nvvxrNj9mzsP2HIl5EC7r410Z3Bb9/3kSGdbdvjWyQEIXIggaY5dYAAtNbyBXm3sJY3cq2cAH8hFKVrLYXZ3UtVAalQKZol9DIc3nyz2mpvXXo4kbBIa2SdAeQ4ZiG+Xq/Pz89/+KIvL8+1FmvtANPo00yadc2OtObi7iRcShae87FIKoFRMIvnpE4s3QuLCm9Feu+/WARgRImNMQp3cycW6b1jlvgigolrKZd6yedca70+lculunt2drg7mHNK5gzu1yzQod/PbkMWE3JVjypijFRffIr3Zgw5ZMUHMwyI4IEz0Cs/JFq9RmfFi1OKnOegofvy/KGmR0Qw/83juDvTQ61mGKgf5tzGY8A2rcj4b+0/VxkFxofLatgMC9JM0j14zsX3OR25roAmlG4e+zGoBjIlS6c/g0UEBLFhU5iZWmtMEW7KdN3KZSvKLAQuIlxW9Z4m89wCD8PckmjGRj7rn3uCPj4+Pj5uH2/7vgtFKVq1sKBIsoUlwJSJSHNpc5BTKVqL5FRbwIXQe6+6H8qECiJmeXp5vt2O94/9yzO9Pr2SbExCJFquZtF9eoMEEVGpF/5uZrfX2y+//PL+9vbt2zdVpfC0CLPaOV7Q0DwToimTGCJ/r+pFFGGK2ASvT9eXp6u+8h+/vl6ZpPWNy/Fxa798025pvA2eyjMT+oQ7J+KUkgfhneLzIGPpD2RyPld/JBsijRrgigBx+p3XwHryiNGuELhT4g3JCcYJgT+XodNEvkTEiazE3Sa+5v4tAsAiK5TNA2FpiemzzaL+aEM93/yyCz7JwB4F+44ji9OG33D5xp8GghLACbECjLQsAOIg8EpuBtMaYR1B812ERiQlj7CKSHGPZAkY8aF1R0a6waKqOhKuMYPOiIw7/LRNwAHRZMvJYImIRElI0q0Z0qnCOKFGQQCHSEQIYU1BPPdVj+c7cnLR+0FE23YlonbLXpGSvfWfvmVmnQkLgArcCzWzl6mg9Ohr/yLCdw0zwm5yZx59iUhcq46RnRQEJoIQ9ZS3BTnIMybJAjGp1Fpr90ke4JwrYcllPLCVjGxB320aIF6ynNevLFWZyBHG7iyhJEWlFLlsdN20FhJyuLHHVkphKSxKEEZVqlVrkefKPPo9eCtSilQtSZsxp69xJghyIRSm66UCtbUmGeAJ11qt9bzVAJwCQohAWFZdmEIYzFSYEN57n7n6YdjCzHuzdlD0IAmWkczItg0OciN3ImQFUokr0yb8n/zu65//+R9bawQOYift4UQYnaZEMeL5AA0s9FavHr3tB/qo/X7st63UMyPWeYGdTexdtCZJVyZHlskc0zXhIpqOms9BcBGRw2yZRZW3bXP3fd9ba4nWc/e0PnnhJ+1ggP6Ye/7XtziVB8+3EPf07W8ckGax9H6QHwqDZw8VxEE0q31DQ01M/Wf/+CTqzmPKHJl5zoFkphnOdXe33s1MaiEWRwxcIYFBfdGKYF0JmNM1lXzmAMBkZr0fWX3N3sLMeHj0GcBHVc1SVnQDggIw5zroWLqAhQnk1imiVnq+bq01LUyB4zhqrUllkVciIkBcak021DZvX7JJQbBtG8Dbtr2+fFFVd2eVCEp5ePv+t3QpotMYmEbERKWU5kHgIqTUDSiiz9fLL29v3k22IC01vAg1673tWgsBQpx1FjNr++FPHhHX58uX+HL4wCmoqqoMTjwzIwpzBrEyQc15ZRzXO12F8YhkdngIbCJCIgjG/LmwdhIh3B3ruc84IDNA3e4sRzMHP4jUALjBNJiFSYPu1LgLo661iIgBrY0ludJtEWuo/fA3pojmbXEakQV1ycvICau5ouPkIq8g85MtX99dOyxzybPZ47ya8qTn3FDeu3lTVTDPrPRYZ8krKz1uuy0lk/xAiWUxs27Rezcbq68fDR61SHTb970oPz8/czQOMLGwXKs8VS1MZlAh70bmTFFKUU7m/7hes9c0ROrThR3Re+9u11q9D+OYYefz9fLycn15oqeLqvKBhPqO8Zg52DbLPkfvzTozqYqb5TSFlAV3T3zvy/O1WxzmzCbDhKE3+QlyuB1uB2BZIHC3nry+6PDU0SRUtro9XStqay16bKVcr1dhpK8yKlGpfyKngNKdDWLKKoAEA0TvOfYBc0ZIdhL+qE8xw62zRwGAIjI4Py8Qv2OyHlbNUqESE7JBOEEkVsbhTlU11pSPvHw2UOHkTREziOle1gDmRNZ1AcMTAzHf2WRi9HrloR5qGsNLnJHzyKnQyP9FoLfGU9dj1Ibu1nYcfy2BlYcSmr1qc2m05nSipF7/jab9tu+7u6vWIOp2JD8FM0rRUoQFoqEsPgNUIlkrmDz6fpCHtz4UFDwpIf5///H70fa2H70fYU2Fnp+21yd5v30Xggo9XQrjqQoIyu4i14CJU/BEo4iIUK1XImqtHbU+O2VihYg+nIrUl6vEBiIhyRIOl626gZnpxFoHgAO99+ulPj89/e1vf3v//nbcPj4+6uvzVaTUWmsREcrOFFEuAe/Dp0lrWFjG1F8R5R4gClfh5618fX7SZ8JxiPCXqvb+3Y7j969ftCgQ6eHN/5zyk7i3GEwM50ORMEakNDMCsOmlZDToERnmxdzJR+YOAJzvYaEvsCjIFUo0zjXw0kjpJJ9KmAdiP4h0ra4cA7Igx8IMEaQJIxLcZ5Au9wU/WLQhpSwx854iWc7IL91LUNO9n7ZvtCCK327rOGen65wi5FktQ9bAUgam15H/dHjC7uTUu5ENBSktONkX5GB6MzuOI+MWM7roFUCWio6bH8eRcPKj982su/feB1kQkaeT1Ht20wahRIWP7nxVZWuXyyU7ImqtHv3orXBZ0+GJyD2SpaS1dsSR05NzDiZlbGCmtZhZtAYMPiURiRhTlc+5OmQR4NRzglm0vVwuVYv7qEmTyqXUJD5tzbJe391ZUEpJgm9mThRWPkfRAbJNY5D7X6UACEKzXqgex6ECp4yciYRJJR19BDOzTefm/f09KCd/sIwZymOAgczWzZX8ytuRUc1PIDzcXZi3WrxbVVaBgJhERZQREbVoKVKrioRZB8XTVp8v19v7x2UrT5erEocfQrhc9brV4m+1SK1lAIZFapGUtm1TlZyaOOKE3jvX0vZdZlIC7qIqkYRQznPCR98PZt60tOYAmDIPPWxM2BjEmaKYj3qrlQDsFhROpETKmgBgb42ZPXr0IOLC0q0j+vNTfb7Rf/rH37XWvn3sSnqznYgZYmYu6E5MIVKyG+T6/JJDLwlyuTyhOHrjOQMghUdV0zshomY9r9DnsOmZxBiL9oGnhImLUu+1KlDzi5sWUdpoS/u0XS6s5ehjBkNr7WnbUpJtULAW83a0WybRi9DlciFCa41mrcAnj4WZuUNURwH/rnKHzokTueVZc6UknxjA7h4ATriFcyB3Mpx3bRIRBs/xILfbbdOchBtaC0VoZl6JWCWL6pwtPUmVl5FO+gluZlZKFaXL5XJ8vGdd1Mz2o4Voumvp5xbRrdSI3dxE2SevhrtnJSQiSpHeeztaKeLuz8/PAS+lXK/X1DaX6+Xt7c3dL7Watarl2D9U6efXnxGJLNrc/TgOgiVhizKLyOWyRcT1enX3j4+PbSvZsrRtWz4uVV2XkQUmVe3NReRyuUTEvjeBQFiUkt4tb+26XYjo65cnVU2wsYiYdXe0tkNUKFrvrRkzh0V+5X2/ifC2bVIKhF+aBd32PpjrWu8k2lp7enpS1X40wL99+1Yv14yvLtftr3/96+VyMbN9v13b7oS6KQuVUmxvfZKO8qRKT3GVMiCvABjwCHcvIvtxpPWU8KB7OWVFlb333oy4iqhbmFneLwVKLbCYwplpRDBnDzMlipgh4WEIjni/fdTLFoHW+mEDWcrMx3Hsx0fdEmCSUzeQBugcyEUMKEwSFRCNrENG8pkOoNOAIj6xRi1bZnbvGV7Wd1momO1hub+qrvGDfQySGoTvmT/FKX2zTnF2CIAIeCqHt7c3cLlcLpfLJXOU87vk7t1tghjC3RlBQkzhrXs6spfr8+V6GThq31SrirILixAE4CrMRQlFqZSylZrjKBLSCKCHkXst9KQXM5NLMvESB1T1ernUKtcnBVnrHRS1SqYYk+08TfzeW7mIBnczULD1ImU4CSwitTd/u30gyK0riVwqM1hICN/7G4M34cQUdxATjJnZ4VQ2jYgMkrWqhb/vt6iv27aJcHopRz/++PvfJ+MUM8dxmJlWQQSYwwMiSUSbqckBaM6wMD3hfJvHISKkiozbAYzS/JyIvehkmeVUguA5IH4o4dlD+CkgXP8Mm3+lAUVLOXEzmh2qD/q8Vpi5Wfjj/IkIt8EzEfNi1tJYVzuS4BR9QjcHAFsmHefE52PWEplZitqc2HbXGBkJLQxJSjvC5xHumK9ZYAxz0jtGfTzVvDBB1vzHY8liDhEK997f39+P4yZSArAMQd2yCyqT0ao5ss+It4wiRAhg7x5h5sHM+/FRa32uT/u+/+M//uOf//zX1tr/+H6ppahegqUdfhF5/fqH//Tf/N3t+y8f3/92e/v15q0KKgc6AeDKSszCojIsSKkvT0/5iPtxvL/fCC4gYWJWN9m9DXfPPfpOUkqt1A5mUuYiKAVBk3+uRTaib0X/8Lufj5fn6EYU27Zdt+2yFRFSYuJghLe+bVfjDnOftZOMYcxbmAmxkXPEUy1fn5+uVaIf16pXVT2ciZ4v2/PTBXCYuQIjL5HB4SimxojwlyhOJTY89rQYcfJJnAIEAq9KYHqK3TAhHkhcaCCZ87NKNzl9CQCNvBchFvMpaNCiIMJTx87R36lvmZkRiVpNYTbrvEpfwEoxMEvWiM/RLTPbmFuYtzw6zDOFl7zZMdFYQ+37nX7CwqPnW7hTIXo28SU4wgeE9WQafC3DrENkaubeUrgY0ZgBCJCOqKoyad4hAJFBcqGc9BQiPjFyZtZpZLDSrtCyfxlT00TOzpTPuCW+l/5Xi931es1G/zRyidhN3v2YLSUJEPKchEBMzjZvW4iRVezBbCk2eWDzDHECuMcJmERz+1T16G5klCfvvTcR8lilCyBZzzlmda7HRLqmVreckUW+Rsllut36eAgOgjBzljfNkUZoPz5y6BmzOoxzMBoNQOrUpQGAScGfkaLr+ik8WU855Z/GVQmFEFUhIVeOIlxVGPF6vYhSKVwLMTEjRIjCny41mWCUAa/KUWsppbzoVVlUE+QgyqwEDmfWAhYGmHPFNTem4Jwdlil5AMxG0eFFdK7uwakVRGG8t4+MqFMDBJiSOASGECaqpWDW3ACIchALkREjOY3cs8MMLCQpDaQem3CU8tOVfrmW5ws3VzgF62BGJEREto2rBrMGxXEc+7GTB4Mkpxo6VHglkBiav2c+aFAoLoG/q6sHL80QPvl1n8oDyxPIAQE889QYWf9ZjTFfEwtTcn22Wi0TGxEiyc2eUroEB8wjbZufT72zqnU0e0kecBqxMilnMTs5Cp8k8NMn57v7Da9lLsMMipT44c9EHv18hJi56oUsWN525gWJOSJ8cj/yqYZJkZD+ob573LtA59liLsbbqEhgoilWZ5dbGIK7EleVUor1cBujd0illFK0FOVRPBcutT5daipfIU5fGdPKwQOgII7MQzkxJCsseUEZctB0kfiOY3QiHM0izF0aRUR0a60382DY0lHp9ilDC19rKapwZ6Aw1arauLt5t6BwD1WSnClPfi5TvLy8ZGxZa3X3ImQHeu+YkzNTRAk1oWWcqyFmLdQA4mxA8LzVTFcViQhGQh/uMvPx8ZGFPmbuzawfqZvBz0NE7zrxc+F6CqcQASNZNGKqYRSFiSImZzxxMHO23WbrRBaKzS5AlFJFq1mEM8DBAcjysQmfDUrKicyXhVUGiYiIjOg+qXR/RAStWzinLM/LzU8ElXQCji55psmvQKsFA6GqYD1a2/c9w8tSCnchMhImGxz6mYCLCBWpogQ3UkEipWHtJiJFalVWdiEU5SoFEUlhnnZtK7xtoqqN2GOgGc2sjbZbFvcqYxYusxbRbau1VqcuIkxjoqZM1hwiVmWDcbZmhZt3ABcmZSKi7hTmBoAGa87IpgfTmGkBCr/06LlgHIbIGQIAsoM9y7L5SCmYYsTeHnQcx3YZxZbBNcWc/EVQzkhvvZRVm6JZQTtLZ97UQI1OIui7gzFlIBbe7LdQG+Ngn/75Yy1ipf18NVMNSOqDZvZJMe19aMvM0kyJYpbTiG3KDuIIVGLEvS6Rz4eEMOcoJqm5r5iNOZjWOIq1CtxdTsp/6S6WuVLymgmgGaDOvB6dQKR4tDjjmZAnSiE+Jy7djqMU2Y8PdxfG0f1S6vf9m8OzQKeFk8wiwmIQWI2NV4XULcKF1Vr/p1//6du3b3/7299+/f5+HMcH/5tb70JdyKrUcr3o5VlEv3z5qbArhaIXFoInpHgyRQcbBRHHaFVQpihyuVxenxsRjafrVEg9Hczkfezd3d+A7ZIJ6+DCqiwDuhW9az63WqpeN8YzD+JQ3UYLPVIfDslVDXPACeLhNJPCl8vlrX1Ev3HEy/Xp59eXa9Foh5BdWDZV7B+3261X/fkPv8eXV4xy3Kx/wYCOKe75IGPkqIOggYSAnuU5AGTwP4eZjPUSMAoPyqVEk13G19yIU4UwuwboPr47l0P4zNcMHCcAwb2INFw+QrI6jaXuETR0u5mRhyfIJZCZHT5BQiKCQKqKCbaiE/0EzZk0A1EzcyJxwpKczQSfphBjZQPBMUc5xOO2AkiaX4+Zj87PbS1YMzcYIniYAzopKE386LjujANBFoP2OsOwyCCEBwkseBQnlmdMJ8bOWUAbF3079lX2SS8kpvd6XtUxY84C6Zy8K4N3MF+Z5UDVeelH7wr07r0fREGU6xnrqhYCJ5YSCSaIWWceKKChklYwCSHKzkZacMHMWJ9tf8qBT365Pj3XPGnfPe3T4vU69t7juO3t49gjIpvmEMgZ0Cw8nR9f/sHKWKR8f3pQLJQDvQAag9sR2UGrrIWiEKrEtZIyX1RYoha6bFpLETIhLizKUnWrqsokVESoFiqlPF+IiHL2RBEt004UHYMoiIf7xZ2EQaIrrUgBJjAosXMjb5C6NSG2zEU0C7xxQnJHJOnXsKbunnnciKhaPGDEANuCrLj7jM3C0b2HO4er0MtT+fJcX5/qrZs3WLiBSQZQqPU9S3NSiQMOymlaRSTMyQweLk5E3Y3jPrw4LV9q5CU8ZpbJnBSS8YaEeWY69n1/2Z7u9xj3xTxT48NHSeVONNhrF5R/KYVSKqZnSYmZOZn8mJotQzAfOY7PKZJsK8FvbWul/Pj5+RTrgPQQW54DyOxuvOd9iUhpZAByZ3dPXRmLxA/IgjqSBZuwGHQiLIc3pioU0WUnVJXdldmwQj7KceiZjnRC74dZXdDxfClvb2+LdYM5h+dBRNyNY9yjFL5cLpdrbTebnIdcyggIVXnbyrZtGn27lHLZdLSsopRStpqYAnJP/s2I6HDySHTAymFRjCDe3TkA73CNk25UXS3N5u7dskJ3f78x+nZcRDYtT5lspgBCWbZa637srbe+B9hBjCoiRdRntcS89d2fnp6O42itM/P7+6/P16t5O45DqlZRIrK2I3iF6HGyIuNaPTJI5Ozn9JkgjoiTATuL1gz1zcyAQNwp1zAaLDHCTkQ8jv8BkOxcNEXIzLKASUTM1JnhA3IpWeci1AoddEUYj7S3ACIkH2MWOMet8RgF8uNa4EX6evJBYwbnPy6f8+/ncJFOjVsxzYCZLfnEYzw5pwqteXFZWo9uPZ/At2/f/vSnP+UoEZu97jg4xgz6AYaiUduhcBKEshDCrZcqtdZr3VSVCQhnmLAUYQpQuBAJI/EgDBAHR4JXwUxsAUAU7pkx0VRlqrptZVONGORMzJwMWxFh3hKISBQkEFZGZB1YeixNaDYIC0iKu2efXXAocUigiFW+XuRo5hZGicMRyyc/+/RWPJbb8HN6eO/m9bqV4zhy3ItnSDRCAsfotX6I/e6i+CMtXATMzp7bknkQxSnIfxAPupMfng/mp0DlvAkJMDCZQ2anJl9ixtMXAcZsDDwK8yPqbbQnjNhv2Fxfyp+ZQUhwuy3+j5m1WZotY8Jl+zLYO6e/xzpdic51WfMF3fXzYisltslecw/IZwzzsL6Q4yIprCFo//hAGDG1fS91NDCOPMLkCctSDMHgPtKScEpWIs8uYtr3/U9/+tO3b98jom7KgmJq1gJea/35y/Mffv7y+59/fn6+kO2Fv7xcS2VUCUYkmdh00HOqC4CEI0Su915sr/Xao3c3RDhVugiBMfJxHbgdh0fve3d38yPImZGGiZl7XJiVSylKl0u5lEvm/d09y7Eg5zHmChHx0Y/eD4oQUlWt4Mvlcr1e9X3PjBfBL0Wvl1qU2kdXDjFjt977Ea1cn/7u3/0bXGqWrobfHDZfSmrUjNBivjKesE8OBCWgZNTlOHmGTwLpgAsixtCx/GSsEEYAFLAxbzBTxhmjnICpuaIelucgHh2o6fSZTu3K473EifGE/L6EMedwZhQEIiSLYawc6wCrz4eA3rvO0HHVsUTujYs/RoMxg8b1IYFtEtKsxeunAuZcy7Rcx6JDttfxk//3Ick4HiaYWY/jSADmTMNIRnuzxDepvTzSo7dHdqn1lO10yvN2u92Q+UuW5HrxCZmIRVMxb4yIkPB0c0z/e1lPMKl+ThS1iUEf3ljCEJjX8TGjweVHEhGS7TPVDTPfiyRCRJ4vAJxZRD0RVZ9eWOo4ArDaRWmWiXt4THfIrO/dMmuLkbsON3OCmRFsXuTnIsz6nYjWmHKiYBZmInBECFMQEKZERbAJKqNwPFW5blS11FpE6Lrp5apFIRgzgqtuAhZiZVIhVVZlFRbJ2dZaRDctIpKvp0yGIaaQDGIdwai10OqmoElwKvLx/r6eGDOrciJAysaz5XK8Mnf3blQouz5pglHdXUClaHcLJ4ev/oSsF01JSB+qA1Dip41/etafv1z3bsf3nbs5Q0RVudZKHvu+mwVyaKZ5KZJJHwSaOZmZaBKIfVp1KyD0Obs0Ht8UgDNMc+QO7uVrixgtCkva05mTOeu8d0/i5plYuRvFPHxEuHczcU4nx3EaWbOWY0T8mEo+L9v8//nDiIcphfEY/p293k/bJ4fmk3MhCRBJHZ8ZwAk4jgjMZuiIiN7STchLy7KPz7GQotz2jrkwU4TcjWLwGB1YvQH5WHxknD975J5DgTOC8tFRk9nofHwOhICEpTAri4voNqb0JVRhxbHMXLgMh5WQWCML5OT54fAnb3vaqlkSPD1Sgg/nfgX/DKS3HUDZ6pSlAALOSSWoUozTS+04jR7aqhIRRChAAs6xx8TWuhNnf8a4YDPr1tqRSLmn51egvr9/A/w4jiJiZq0dopdSRCiObqKSYcU0e5+jnfihIDYQktMNHKLFtG2biPTDZ5Mti4hwbbMP6CxsRGSn+SJn8pp81+7OlEQyCCeMcVDss9m0lCLCo2ADp+xmFwHczM2DWRGaSQOMRLdHBLl+dtlPSaJ1nWuZL+40PNpHPoFIf2PJzM+XDNwdjh+eMzNTZr0zLPQAvLsR0ce+/+Uvf/nTn/707du3fd97N1bhrkQ57YppLBgul0thIsDdwFJUmNn7UZ9KrWk1hImStAnwUi5wo+BFt0VhSdkXEczEYBLIYNlBxGhBz+YOVS1FVDlcNWfs0Apc7hoPyJZqAeDCYAo/8gmoKmsREbBKxPvHHhGOQFDAiIaDtam4dwlUUhHZAUqcWfLisqgq4CRzhJVH4r5utxuLV+VT46VlHJg1HSIKxKr4fVKCSxJoxirpnWUZ+/4GR935ntr+tIJW7fFf2j4r4RlA5t/OHbxDMuPhW/cip98vYBTiEu5HmUCRkRPvex7Ep8F1RJiXUjxdkJHfn6YKdzcpaAyhWeedzI0PIo2TnAeP4dOxjny+0wQmnB/yORpc904+C0WkRfv7cb3UqnJ0J6JuR93UOxgmOtjOHOn78njdKd7MTOhw783d2357e3t7f39nxtPT63a9RIQeXyKiKr88XX//9eWnr19er/VSgM5eGL4pOYWHdwoDQuVqloCIMfs6PHtPdHC9OCfvdGEGOFCUqaqk4+HuFh5hx3G0vrfGvR8g58nLUEsVkVLKpWgVVhmU7NlsTxwUJETKMqg4s8MCICalhDnrVtXMBKhVvRtTiLsiWPCiuoEpLAT6fP36n/7d7//dv4V30ADFUAQjlWykpp1VugzKRn0vAtnjN0t5BGBwhA5DCaLAGJWB+XlkspEmVHLhR9KdAO7ux8zdxEmWOLyf/nRfVICPgmUeZ6YyU6KE2HmKMTE4CFDM9HrEMuIjB5dEIksyT+vxrNjPQr4CFmZm1gVSOBcGMln4aeGcA5N82mvrvZPcuzPGDsQQrF5lkXJfgIBO80OrJWktyKQBdPcIZ4pscv1kw/J3P8WgK2BNt2mbPVEjbIsH8pyEdSVSbtC+wZOXbz3Ku1OYCBNmba2NQYIJm7w/1umpdKLt8THNq2WCMDt394X+gy03ZOTzT989FU88wFimK8Z4KxfJASCwOQzjbB6YCbDWeu8958lbDx+TA+4NYGf5WF4j7hm4+fqJRIiT+9gZROSGQFHeCj1VvZS4KJ4u+nrdLkUv22tRqpuWCkQza0qiIk9bDadM52c0WJWYWbjINM/Jt5HvWEAIkIcKZ6ZJChkLzfcic75C3sxl24jIva+RSpIYtZVxmaKcknO5PLnvZxFSqVW1Vkcna04xBm0TkbJaOyhbs6iTEREJBzNfIl5ftj/89PrR/a33mzuBtaonT0lRVW0eMAsKDkVJJz5kjBUZGfocKY7ZIzH12lw5k5V3PaiIcMKnVz/3ze7cB2LSCHMn4uDRS5x/dWZJFPiSW5oFJRHBxDv13kkUcAxi1M+1iDH0Y67WCCRd1Jnf5aytPi3q9c9z9PJJFH/8yl0h0Gd66fMDidMnS/JnYo3uoT75aMie/boR0cMKSQ6tdgM9TqM5X1jQ6JKfOWALH28to9AMCEGOSD8e7h7dACQUwswSercVzdB93WAEciiw1C3ArQ9AFE8aIZ67R8zyFgBgE/UxC+m+6vvRMolrZmYNkEwPMdAHP4oBCEJOGOUgKQqzJNjwQZHSAQiIVUnIEZ6zOxki1HqOKOUMpyLCWt+PvswYEZVSVh5kzMEDMSN70vy2s97t1pLt9cxpZjpWdpOZ6ZRYWe8o/5pc7a21xeMYiyb0zu2cvKNY9BhTSU5r7W7eyRM6Nci93B0xiAEfbCGw7ziOo7Xd3QAXJUQiDwmUdMQETJIMZkb5UcLnjSS4I28/faGH9O1aXEuf/1ak99mk5ucr0bn0SW7DYpGk7YhIHj9aVY4UPxGptRK8U3hBAtFTIWTT1vVy8Xa042BmLbyVWiQTQHueSJSKaFGuoiokQiQCDw4WRhXJyahBGaijDFY9J45USTL3GfaBRYilXsscI4mwsUwRtW6qbEzUGzP36AGnIKk1H4gUJZIgckeznh3Fy+MToLObCVNwgAKiTKIGeFgEhwxjl2rWM7AL6x0isl22ReNpc0bxEsjlBzCNoTEA6FQiZmao0idlOLq443OMN4UGn3JyP6RXltDG5BTAo8pNtZ7uyuhqIMIsWSBLMJ/Czrzs0WnwicOWsmpE51EB0xFJft1Uwilgi8U+s19ToB/ENeuEMQsAazHKzBNaksydSWXmMllZJ55ukrA8AMjvVaP7k0kW5Puj5ijK/+7f/dvrdfv+l4/npy8ftwZmLhzOWK8zr6poghTCe6IEiILDm/fjOL5///79+3eIfvny5eXL6+XypKq/pxdVvW71er1etlKVEIbWVLkZ9qO3diC8Fnl6erperzBubW8HuXdmyngszL2bd6OkMBfhIGYNQlCIipZLLuGUBGI+jsM8WaNahPGcplOuT5q5Ndac1YwwNwdAEAITIxGmQBgi7Q5FMMG6W+vnVBcFtlKeLvWy1eulQun3W33aLptWIrodx5d/80f83e/A4YTEBs3BYQ64w3iVB9crIiH4rCjmdu4hDIw5JYGB7/TsSV/2l8LnwsySV0QsYrD8LiyCyOe06RUX+FoXD3KzEuj5+2ksygjwViYl+3JjLOrsH8aEgOLe6T1GvYxlPZ03nwSka12fawZne4HPF5itUnOx353DB6DKtJh39WWTlmx8GJzlTR7kbXdW/NxhYPyZ0yTKb10lVv/gWUOtA2U0WGsFuawemDvY/g57ExEmqKpKWX/Nc68n0lvHBIDN+eccjIlEvZeVADgoffHhRMZ9fNynZ7qudn0es0rr7jHXwPnpL+0WEzixjiZENh+3mUkZ129mggmZSMeImDBKwNbD0URgFsGJbcO9y4ooggZH7Z1OJk/oqxNXOUSyeAKioOSDCyijqmyVXy/6dOGvl/L8VLdaClSESmWRoOAQVaZStq2UcGAMq2BNJLpQ4Y0ZRaSICgsT08i+jMHiQqwJWFQioj2MmTKxNN7+eK356pkmqUCMZst9CthYPBFh4csMmxmTbnWEQCqtp1MLibGSuai01iQHogUAz4n1BCj1p01fX7avt+1v7/Wt9SNYiCz6sEZa6Ei6ZiKl0cg06IiHx7lYE4e6muCZCAuUxX2+lkMeWTCk9KwdMAuk7rqEfGkEkCfwKqYnLRB3d/IltOPzUkUkRv5puQvscV9Bka2Dc53OKHG50YOc66R5T/mqk/bB43ZeMj9++OmTiPvQufvxA+kApNHKpsCkFraxEgfTRmSKei7DEZKJZrCEEahQRNgpFiLiCJtYuPvZST53rZgZ5uQAEWktMyrmPrhMtgjmIeH5LWG+XC593zHn1zPIQdk53LssbZtezkjkZI4pkPNdV0woo/MRjnvbt7snN0l681OngYiMHfAHymphAiwrYURgdrIpqPCIwgKVZmNsbuYj2OFBOZF2nbG3pHMckzlqvajq+/u7SNLeXplUS/ny8vr8/Pzt7Zac9Z+06FJfZ6U6Tk2kohGRs5zpxJS7oNdpFLqRuwcsfdEpu6lo79s6y5JeGrTPM3lxgvr7HNPava2IWjTtuC+uUWYQacz5tDMfEUQUeCiErrs+3+8ng6Iqn4Q//ymneVPng9CpsYpOXSV2giCe1jv5yYnPgD0xZ816RKjqtm1PT0/X6w3Ui8bRjxhzbgdHmhCn03lrR/RGAWUpQqrKiO3iIiJKqlqLFlUZ6CfPVKoSVKQI14EHLWYtIiRdenIKZyERKizClKWJHFfC7qVuOT0qGSeIQwb74CiJK4gYHGIBENW6jVVNEkRm0f1oh7EKJVJlUuopUEQIh0c3b3BQUbb1/s5ROsWYyjjaBa9Pm5zadxOvkYuCAUoYOYCT8/RJ9dGqFWCGfPmn2RYx4r3pn6y8/vkoY7fTglry8+MZz9/KaPBT5Em4pwk/7T+dnLFqlrmYf7pDvTQHLBERU0yiI8qJqSMhO9bj+MrpMscaoTERKGZAyMxOcRqsiqUxxrlPoHQiukd9RO6DQgJ0B4vGgKRSjnM4LzEcH8KXf//v/ou//+Pf/enP/50qA3Ecx/VSuidZemoaJhUWDcvskET0BJ1kq1TAWt8B/PT68rs//uH69Hy5XGq9FPNt27ZtIwL8aHvb94+2H63vyE5d5VpK3S6X6/P16anf9qRd8H4LTP8zIvV/5nEQ3N0iyN25EjNrEEkcx+z2JLperxGXlX1bAWHIUUVFShbyz9MUdM2jn9IlIug+AvE1Ooh527ZL3fD9o/V9uz49X6+vz9fny8ZeX1Wv22UrxQh7/9h+/oLKEErwsDjGkD1EUKpUDwy6ixiq1QdtywDnr5/nzZFY0FR3K4dMo6EUozdl4DopUlvIQvsD4Mjl7LMYS8zMIcOdXRISEWGjRDmM1zgFgOgWPFwaIiL3O15g+B73kQonRX0yhWvwhg8vd07evsdZdApPIoJoQIv9cbzCshcn5TA+tzXIfl5DwvJ3GzRv8ywjMKFRAcRamHmWRUR2VzQRM7UVZ30RuNff7tpqSdtKZwI5YXZ88eiHzyGE7p65gIeDq4jfS1LWKaP9LN5Oo5lkU7RCOBus0CIix3FERCmFWXxyuH3KxZ5vEIBP6Td3Avr0iujky04DPNRxXiH47vH/+Ch4ThyefYm0Aj5Vle4IBxMHBS+M/t2xyIefWpWZTwk85tWiGT0TFu6diIuQUGHYy6ZPl/p8rc9X+XItX562562ohDiBXCMK57VzFVXVIuIOWHKZQoVrBnQUIlJktO3nOtYc+Leg/AAmECvmHL+TNFOE7XtfPhARqyrI0ZyTUJZ1rhEnIuGyaNyZVPiu6N3beh1MlEo2xwxkriCljZnFw0FCIRKXqpdLfX6qv7bue7j3ZA2NCctu5m5GWtjN3WUgwUFEPgav3ZdGHt/8AUS6tqQlkFoqUXoSwGAyjJmhkVMP6rqG0bs7488hb6nixWiO0cuD3KGVESykki2INi9vxKVmLiR0GltPjxWGf2WjqfUeDOppja9f1s/zyhrvi8DTK1h75oOkBRMgAi/VOW6ZpkJYZRb6wf0qpVDvD4+ROLs8+NQwxrOrORtd1nW6ew5+PzM65ufZstt7T4qkiJC8Cx8oLNFBL2bZLYCICI/ebh8iUniWi4mZWUl778m2Lzw8wsxK3rqtWQU9QuZIjDWqJK/t/sq2u75OpZEnaq3NHn0QUfCUFpgogdg4GFFAqsbcqsIgjkGMlLHQ4HMuegGIaNu2y+Xy/fv3uulWt+frkwfptv3000+/+93v/vrt7Wh3x24lyD4lOJlWm/hMQ8x/5usWkYSxYDKwuUW3fKH3GtqDUPm98XIZtrOs0szHtdZ6rH4nXm73rN5j5KEkmwXG+g3bmQrQAUYwcVJuRJxM3lm/3S3dyXouaV+3wKdOy9XT/+nWMIHEPolk1jWv867fiYhnSth99BAyE7NkYH+5XF5fX5+enkR+SVKHlPNt21TrWEpJN+UWk7UrJZ8R9XIpJVS1avJ4MzPTgD6MNN/ATU8gTNXSKcxM8q8UifTR2biRYLYxlBKUTbQhAs+iak5P0t6PMIIHIxQCIR9dMzKL5WgWvTfrEYSEcuT7GAQcEd29sFB4dOsgsDhkvTI3m5k+MzdHpDC01o7jYKDUqqqXy+Wnn366XC6r6Cqx3uN0wX7YYkKFlwCMNPtjtLP2X3J1FoaISPYzTAmYDtm92f7TT54ZlLX/b17hOpHZHDsxrcbSlhlKER6rcJih5snvYmbDoK9YiBPPAtFpLZxu8LNB+STz6/aTVAajo3sSWa9meM4OGRBRPHJix/0NPSQrKcK9/f53P/2H/+v/+c9/+/inf/4GIPN9K+6lwSOnzGzWmRUEd2YK5oERqlpenp6fn/H1y8855j6zZpfj19IO9ve9t33fe/Mcu3Icrdb6/Pz88vo1ERalFBaNuGUZCazmB8btLs2WdsQ1u8WCpAy+U6JkRQ439HF/QkI8xuSuZsi/lCIyKOvHqgUAh7IwMQJh7km+O5syJtI4AOQSECUAAlIWJmptx6VupWxgBUWYXKri8vt/+28g1CgeQjoP4lUTNCKZuehcAkkNmq2DPF2FMZySiIAHixmwAeIdFcG7e7BOyLz6ZPNn6nDGqIhnhKJEAhxwpzX5c0oP+uRcmay2mNSZODtyU/3ydEJ41gw/r827lZxu/MkWrCs/V/79BGahxxBxHAGffbmzfUmdbI/KZxUz5hJ7SDDl1zNAYxYi1j7G9VnMRpT1XMalMPGkJzRrLNt6E+uXiEitGkmzFjynbjzUTFTVrLXj6M2nCf8EBMLygD3xMJHN5XfkDJ0AjSR66/tgH6lVlOGcJ3qwvsFrAtunRqn1RHSNgYIAYzghM/vJj1HVDKowS8Oq6gksmlfV4TRYW0NEciaheFwul+7obmDJ7P756d1lMZMZnOO2Y4r7gN8wM7pz9jwGAGcuwiQhLy8vr5u+PJXni75c5elaLoWF4rW+mDWPpgVaSIgTcVqFw+d8eY7MtxbljGTolGng0QYmSIZcjKXAAKnmtLQVqGdHe8JLlq5PDyy13roRd8y3PPBjpRSVamyrFtR7Zzfrk1tixDy2jCkzIVBKCQSSVp2jupci10t9fn5+OdCj7d2fnp5671kRYubo5h5mls0sKSQ5vnk4AVJ4kvZmXBEeZrzIYOhOiZGMIEPCVxCS73T597kNrgLmfIkx1iSxDAHr00pNSOpY7TTakS1iwB2ZOPMs6Xad5Xkp97XmiVKdntXoQ9BIUz9+UrX0+OH6Zfkonz2VhwjxLNufXSIAjlE5GXvCktEu3EstdyeYR0KklCJHRmNDt0hmf+AiIuEyG4hHpDGHQ5ZSmNGsrztLlbuu0ObWrJvgOI60w713OPadL1WyeG9mYe4IM2ud+n6ruYkSkbKo1k3d3TMgjBxIPOnb2u24q/uT/N+f1RSYIUUYZVIzcxqGn0d+gZVl+XMQiIib8Jj35yoEFu1diEIVwQgsaXT3CGLOGRU1h3A8Pz//5S9/qaUoS621dVfVl5eXL1++XC6Xbjs/zhlbCnnk/md9Y93OkfokewFk4O3lxKIcgx3UmFUL22reWPKDiAieI8KmlynZ7LekNyfgtdZyeRFl3ZhXGMkCEez7SNZkU3Ep0rtHks8NisNYy4ceA8514zGNer4+OY0Q7L2dHIV7dLcqfvT4fFaOdq2Cszdw2m0sBC0cEdbT7ocQy8z/qqpyeXp6qrVGMga1SES5SLkrIbibaZqzUooKhVs3ZxIRsxtzAtmHsGmyZkdIUmcTy4QLIYKFxZVyqKwQCyszC3RiAufiVRJh5lt2As+cL83ywVZrKWJmvSfYBwgPp+BsYaHkl7bJ5G6WEKG7vllqE0DOiSJRz2a91PljfLKfuB/o+fnp27dvlo5IlFrry8vLH//4x+v1WrYtc0yQgTQgQqwC1En70RzDs0K48epF6BGytDRanzT3SxiG2PzQ2j0EgHlFlesnABju4dvpREvw1kUOlesDxpWkbBilNsaqZM5rnF9oeWSPSaiGYB5vl5ljJXHO3zod6Cy9ceKE9/BwX6TipyognS1mCsNQL9OpIKIzaGI+oodWo3Gop4u9t1rrf/gP/+G/++//9N/99/9PLtcvX14/bm/r+Hyi98iNmcFM8CxsKJNcaimFhGu5tNZu+84spRS1by7i7h/70VpzkEgB+I+//30p5fL0dH16KaVEVuqYe+9YPFVxVy+qCsuxOzY0M4iEneY4hMJFOJxbMzt6bwm4wCxdkzsxcy1aSxER6+yEgQl0ilmhWb2Iqpo0uvCEWWUFK8n9hn3P3OW+77/+Es+qLz9dxMmsObi+vvx01X/zn/9b1GqrdAGM0C/fC0ARSZMRnmNRUmTuVd/ZSQjAEYM0CEDigWN4FEMTMo9U4Fol6acSQFDAgyJGxDhGFy45WrKC07SkWblJJqGZy/NYKNcqYw7QsNQTipXLfwrz8s1iySdRhvGZNiEz09UNaHc5p5kZWZKQyN9938+rJl+ZWUc8BI2nxX/PiqbPOa6kKACLM2STzsphPqLZmkeVDjt001Kk+c33bnZouXBJHSdKtffulpUcptKCwCqeo40QWTCNE21OmJu5E4tI4bIfe7RRdhdmkhphDBcRitj7frSj1ipaStX9MA+PnN6gPFUxSNgJzQxMQRQEi6QZ3YoUwMOkR8DFw9777fVlwszc5zj1XISVrCg4gAgSqtetdO37x7uIEKMDSlq4UFDfuyHSkvnt2FtndgSZ+uXyBCfqrFE0CpkATCb1cjWz3oKpilg6H5VELtfn69Ptdvvbt197b7UUYjB7G+RvQTn/MqAMlQLzMCfywYnHnRAq0stP3Z09REzMOfrG/Kyo/fbl5eWnjb484euLbupMrYhQqbVsRCQUQqyjjSQuUkPsiAPwy+UikpAtvhQV4rBovZOy1OrmH7fbdbswCCAHM7EylyKqajuq1sCgBM+5Atbas14AD3MPr4rq5G7W7FoKgxksRVBKFmeMLKSY9dabqopS7x1+UPTtcrVw2/ewXkVUvVE380YGFjeyAIzFnYIMRH65hH2h452+X+gb4WYkTZ7++r2XiK1DrR2tGcle9Lvbz3FNFJY5JHH7zICrN5WKcK3CHcd+YyJ2b81aM1WtpVjvTKjJvtAtWttqfQ8cx5F4j2Ze6yUijqO3ZiJSdFOpPVy15lxKLSosq2b19nb7uPWX55/226/v7+9hrhzs5v1WCAfCY6BGu5sFmiWE2MiPynCJiL13Yq1m4CDRzJaBrIu7+3249tmaisigo5upivx8shzT5O0cX/RpYAYYcgS5tMkW3uHMRbv3p1qbHSgXt36t28vz9fvbN3auTMXateqv7cOOg4lYpHdEhIg6Obh051pKb9ijCUkE9v0oToTY39/j62ts/Pb2DqKi6hbMjG7RDWZObNwZhCJvx+0rwijK08Xs/8/Yn/dIkhz5gqBcqmbuEZFXHSSb3ezedyzeG8z3/xyDBXaAXQze9PTBbjZZlZUZEe5mqioi84eoqptHFt+ugyhGRrib2yG3/OQnlVPaWv3l5fnp8VHBf/n5l4/vnz58+vT5p5+u1+s547o+IIKiJkJDMOaG7blUIhLKu1NtFgW5ZKl+TQ8Pp6zStCDouuK6OO0bmJ7X5ZSYwAWww+uAr6183V+58bIsDFy3KyKKZPVWqitAQkaQWmttCAD75dUcHckxVYdiHot4cs6MsCRJCFaL7pqFEmd8ktbKVnezhiSEJORrXi67taLVYH3IwMvrdvmyPa/ros/uSUTYDH76/OX08PT47kPZLo5MwouwWgOnx2VJiK47yTnCj2PqEjOMRISB68eg7QkPxwzITGQMrtW4xFxJWouaMyl4AyPB1qoWE0kOWq0u6/JSNqfk5k1tZbHmaA3AMSUHVVRHo8ZrEkJ0K5xEQZeFm1cD0KqS2cxaNeakhtsVsgBTbm2/XH85b+uSPghKgwLQHAp4Qj8jLsQOdDW6Eq96YMkexRef0a1H28gD8Q6qEJiuGVXH/0+4BADMSh8iKujcDuxupew4Ci6zazDojgDBvREACvdejLsCWG2Y8+n1uhuU5fR4fveECZzqRV/dHsE9syBRa7bvuxkgyWW/ppwzUy27a82JTosw6EpE7hlsEVozoysxZEYMuj/ixIh9EwYBQhZHhAQpJwYAb5VIzr0465082tTQGRmAFs6orqCODmAgJkKcxNwbGBFQShhDQ86ukOAKwa6hgOYZuQlUM2SuXkqpnBMy7nsppQC65HNKS8bSqjbdjJhkwcTVrLa2LAu4l1LLVlrZz+dzWZgzb9t1WZJZK2X/8bc//P73vzudl1b2qM9y7ck/MQMaCUM8aXcYj4Y64WsP6SKrAESAYOkb7YiezKtIipDZeyzeE2RAvsVoDojMIoxorfXFV4NdiYiQGbRNsJ872KD2nY0RGICxKFszJ3dX9SACgCCX0kLEgTeZe4DiI8YQ6AlmhsR1t9aaSHDz9DJHr6OoAzixR/zet7TRDL1oosaGIjgQ1NZXBXRNcQBzQ4gGtbuDGyLmNQGAWdPmUwlHiOtICObkAkiAPHkoAeiyfT6//7T/8vy//Pe//ed/+Yd/+qf/4z9+urRWWvVtQ1nPzE9kCVsTUTEAADYjEhZyRwYEkcS5B0jaAACsNmvWzCv+0RuoEVHinJZTEjmfTuf1tCyLEBIR6U7UKJFaK3sF39yUEDiRkoCpFq1b3bms68ro4mgEsc6UibSSOJt5AmRmBSVva8aKFvdfTdEhS87sTIb0AA1MkRCZMHp/wNBai5auZIK+rl0NULY9CKkK0NWpyGLaEDQppG3Ty/V9Xuz5pe1L/vTj8+cv76DJeeVP7z/Xr9/9l//8+P0HeH1elxV4AyBABQYXCO57BCcsjuChH0FBCuhOhHGzKdgGemHsQPbCwTuKvcNGbOgxL+tIkfI1dyBoOLoXEJmhRzcb1W9ZFoFZVXeHIKmingoO5jdEEQib2jRy5qEyDGYEmCXdBU7MbAYjf55lDiSM6jL0MlRMowORxHkSWpQ7iLnPeFiLZVQ4YIOtVBn6iBAYVUV3pj5VeSsi6bFRCYiY05lJVdXNAUAgAc4xy3An2A/rbgqmFvve3L3sTSb245aPEtFYgoF9WXsD96CvVWyD+WC29Yd1sDoL3kS3acWDtTLsVVWL2gMiMhe4dS1H9Shg1gZEt46E9YnnO9TBMJ4Gg9KQWWgQWyNisET4aODsrdcRrZcStRSvdXd3c+1N3kMpN7NEeSqAlFWbmSXOM5juHzEzRHevtVhMVA7cIyI6wimvVXUrNR55aeq1tdZcMiESdu5jb2oOzZTxVmlARuxFWY7Vf4CG6uiV3QVcUB4fTo8Py9PDej5xzpLFAVpK4vbqREIcGB7ujitMnRFXAGMRYoj5NLBOekEA7hjs+YwkIuhOQfQUM8F9Bdl4ZAgAse014GhlTJMjQOfDSEtWczA3uDHN0m1836KmEr0amwg6ZGRBMzM3AERyRHez5sExYGYKboDuvp5WwFbaaVmua8o5W2rULNXSR2FUtbkBsogslKzZKHACIDi6glMMqf11+M2M9nw0/aIqf5P1e2DPaPdNuJcN13gnRTYWTozQU2KGahaQaIx73Upco4A0Cj86y1e/+rJbJfhW3j6e9vFPAMcK1P/shb/2jYeAGJHIxuAWjks+dDgJkeY5zJZL71EIuTUz09oA0vHm98BpTvMerBkd6ItwVPX6B9VqKcGbElP1l8tl33cAiHZi8CAm4duuyDg4KKJpa6UqQKk1t6sCtFKSeQXXfYeLMIPnhNZaKzWJrOnGmBp18agiB+RT1QFarAZqTWszgL02C/RjtasjAaE5Vid1QCaiqqpgWoRPa86EkjNiMEiCiCyEquRAZUzvBJwSJS/LEirmaqWUXsXjzgXQ29SjfMbMhNK8d/Zyznuzo/jNZzEbcW9kgJl7+dc6dXe8LZ7X3ABJSACGyLMWO0UogJ+zqgr3jeiugAAMRgeqp5gGmI3fmC2Muv+kXWFmVQUH7DCBXn6Oriy4mitPt3hoD/o3ZsFHYDFxyPfu6a53MZ03ACD6TAiPekRjGPvNLZ3F3cMAEkb3BhFxMCF5zCOx7NX60urWWuukIHGeDA5u0QyZ9w07pkdS8AsASoTf7sHwZ2izixWcQ2AQD59itubAvdSfMvo8LvSSORh6uIU4+0haIpLvwBUzB9yaAri5Vo0eEDX3ZnC5burIaUHA2speWm01TqIHVa1Va40ZLHjuuzGppQSPaK1127aHp3dCtO9bq7W1HLd93/fHpzNzZ+6BMdgWFxWDOPFccVyqqjMzMHcEWtDWjFgi3gOHDvDtWSMCAE/0xDeQsJulO8A9uhYMWp1p4hD7DPVRbCZWzc2ODYf4q00s2Nzk5O6jlXcknjm+RsiLxz/N05snc4vo7tdmTEWINULHDyKiEwa9fJw2M8OgH58ztzcTMZr5M+WOCLqLKLPve865Nfrhhx9SSsuSBJbLtUzXE90UZmIy7TFIf1AciDDXIDFqbqpKGCTJ/TLDxWRZ1pwTpyXllJJgzA90jHq4WDNLKbWwxq2aWayEDcwU3Xa20QwSHGL8zmJLxYxs4685Z1cDgBnuSpLj/UGHIN7prRoAOCwJd3cm6pBsj800g/wCDFzXZfnuu+9I/fryfHn5elpWl9HSTPnjp0+wLJASCIPpxEZ6rPaG6M1HG+sm6r2f13+6/XAEKvOg+7w96L8S0Ryt6/jVbUhkfnxGOHiQHIDY2ATuzkgw8KJTaAEAWvMDmLOf7bhN89uPSotEcnc+cSlvtdvNcCCn+LCGPp6yqg6ivZutQETiw4G9oyeO6hOZ10SiIh8BQTC/QvhwoENCK/Mrjw4ex5DirE32oyOKyPV6nU3JmZ7y2CcDg0Xj6ESnX5sPZg5ZwY2Z52AlHYcR7g+7Q3TY8FYPuzOOZoYWOTYx86T/tlF863oPCmDusZMWWyuxyQfQzYI1tJEwoAFCjJqoKo5NxMFBkljcvc8XqhmbmUUnr5SCiBYLm7tljbFBLK3WWg2AUzYzAyfhOJsoa4C79pVbYJ1wCpH7/pJuQ62ZG5miNzZL7KeUHk/84WF5d14fH05LxpyZyJGYWVBfmFlYRCTzALwCMG6IDqkhuuQasQQiJkj9oTAxglkjxFhH6MHxawaI6k6ADapzFDK7WHZb4O4ONWgkSACxqbKQ5OSleTTlwWMlChMbc8wQhrSomltDMGEsDRCTMO16NfeY/kEzQEMicHR0ZCY0JAznIeN1C558WBUmIhpYM8CAXPcqBvSYEA0hkQPGPDTiRJPPAx615o2re/PygedmTnMQ1waBfpSozKdL7mFT7P8cLtnnRyYAe2aADHxX0ApFpr9iPo9+9IA7wvvR2aO+/2pK/OZtb47vfoTzTGjEZE92Jvi1i4K4OdPh2bC5b75xzoa54ZyjGxHa0Tf0sJuZhVmNENFTEpHW6uvra8Tua8pmtl2upRRm7vQCLEwUPtRVGzqam3nsJzBT02oGrVY03HZtKujuoGUzZkxMmsWq1aWelgVgReQoYmBTRKRx7dW0VcMI1tVKKaVpM1DV0nTf902vKWdgUTNDYklsbFBaK6qahFUfnk5rEkaEAsBmjizSM3DsnasOFjqt59PpBGj7vpdSUPExP4iIEzZ1pB40I/YhAREhWarpzLWwz67fagpTEY7qNiWBcwI1732N2wONiIS+WX6Jd68eI+AIQImIYg39CAGngtBgjjuKgXswdSFCUKLcag2R9iOiO/CgQAZEwDHoEqrNdwWgefw3IcL80zw+HHLCo1+/c3MAs4T85vdwHwJN9ayT2KOT3gkAoHspBTm5+bZt1+u1dPpWK6XEvpxatanO/OLd0wOauimORRThHhGZkBMlCtqWyKID+2QG6GB9n5wAMlFQgiMYQzSgsM/TAswBLEQnGRWxDsqyKAFRuL8GPp9mJ5QIeBYgntRBVRuAqhdre2nXUj//8oWISKgzYROzZD0QhJZS9maNOQEDUQMXyWFYWql9HoGInEikVjLVlNLT09OyLPtePVCayATVDaJlOR/G8YngiJ3mn8B9xigkckOtHbRmys/xid/ec/jtCCJueHs6ICr5DfI/qDbuS37T2ro7yW3PIR5KOQCdhGYafJubNu5sb3/NkxnKbsc7gDgwpd/kw/Mgs4Q076TfyA9vN+b4ZgCIKBXu1HxMM95rS/y0rMu+leX0aGo/fvd9zuLu2nY3BTcHNW/mzZ3Dh/R9yTimRcIRgwCYkDCC1sLMy5KiHSKJrCkALZKWtIoIQdj6Tp0WTEvuDmqgNsuX0X2IAP/ofImEqFfD3Z1uWNZOf0VEQgzWTY0xwIGkgG7llMP8Fx543d/0aWKJzGA37GSWVoMULS/pu08fHvL67/+idd8/PD563aIgmM6nH3/8EZIAOlgddYMo9kRH0A827A15TGC9O4kXAN6jv2HklkPjRp/w8Ke7oOtg8H2WSG560YcC+1+PlnyKSuykNbPY+TGjrDbIh++k7hthhlnZr4e2VmjcYdx9WozpttyBmZAIR2vh6FLnV9PsneBbXrd4vx1mT45W5SYEcbXjtkVJV1VDAGjSH8Kt/n2n5L3skZI1jfpi3J11XY52ZJb5IyqlOaB88HwHZzOJr6IC9NZfwkjbVPUYVCKi282eunvP6QCos7wcboEZUZ+FU1WPYdAbjKfNJ43oasKcU5Ky72btjZzBYAch6kOyEVwSUQtgRsfL3RIGJJ/zimamZsQATFXbZSvXfbfwuUAIfYesmTUzq6PBTYwsHmvV+s0EjAKGemtXdAMzQRWEk9DTyh8e0sPCD6vkBCl4NgAQGJgWRiJI5CKeGJlQkBBnk4qRPPfl1zEHwqqKDiIMANYMERZhcDXVvje0+2312NuMUcpyJCSKhRzxOBSC3B+haWOgnPPUtNsdw17BnVJODtJtFxZVZgIgN3ZwoETuDgVZwNEAwuc4S88NCDtuLeyAglZr1ppFqVucmVTUvbVWtLMkR4TprgQQ1Lvx9DmGruiWWsCvvfBgZL/907xMPyR+3V4Pg9S5hEb3TwcVDQ7KBICxxKkPjBE6ICITR+R3q6q8KZjdv+a1wBvLeF85m7//awc62Mfbm+O64Jsh5uN9YO6qhH3Wd1babqfXbVdthYsQEyR3j27edetbKNzxiKcXkVq9N9MNVD0szPSU8xYxs5lu2yWqxSmlpiU6JyklAHJHU0D0TlOpzRwWSapVVUWMEIWwtKLNykXdlQdaHt1EZEnsmjWzuxMw+t5aIwJySIc4jEiaetVmBrVWB9xUt600c3Xf9/J6vbyU6+lklERVkWldwN3Vreybu6vEpKvmlNaUF0raGjNHHTFqw0TEbJIIEVNKOee97ZEQEiOtT0SkvTE22wIux8SpkYhwL1ve1fLh4JyOgj2V4igz4XFV1bhzdZmZqqmqqdbqiLos72OI4qYvZmaOjkzEEzwZRiiCrp5SdXKgWlWbIzJEIkhCxA4EIHigGJnqYD4ywKNsH7LNo6bMNxy97xv5P57h/Ovx53nT3B2RjjfJf0157+6wd+EBv311ZO9JxJq21lqppWzbdimlNWV336uWUgIW4e6tNeZ3iKANj1fhCGhATuAEFjtxIAadu71RQEIkoBiXJY69XgQEaATIyCIiHB0MiByMiAyCxEjRJsgKiG8s1q1VGAClGHmKANEoV9OqsNdW1La9fH15fXm+fHl5DlzUsqT375/ev3tCXkw3dw/afXQvZQfJmiqomHv1mlKKmzljr4AJxITKeVkfHh4B4PPnz999/LDmBXOnhnajkXocTNsxsY+Bj0EdNM3dmyd4lJ9vfx8SGe/oudmvOhQiHBQvcy+fu8+lf36Mj/3+NZi3ImMbTZtYWH9rsk0tfrNZd17y8dLMLCDTIYrThhyV4lgomVEi9EL/7WaOk4zg5O1NhoPfmd8O9ykrHA6FiADs3kO4p6env/nt7/7yl5fL9Rr5VUzomzUndLxtACfmoNATZuZYDgEpS8rijZl5XRZ33fd9zYuJoTkBA5hpReAgpwIEpAAdubu11lor1sbo+KgjByJ9SflQ/SS/ZXQmIj63FkNsgMNlWaCH9c1H0ZmImlU/AI7mXo6qN14MPIw3q1o79F0BDF1dLSc+n/LK6eGUP717d/nycHn+GrPfoT7Lsnz6/rvof1U1nHcPFGL3B+LYMdhJRcfLIegM3L2zy0T01YFpAIEg7UYde3AG8E31EL6xzAMOefeHMOU9ABhbAKCfBAAA2ihPj7xu/ncK0vA4o6OY0k09EcHcD+sP8DAZ6IOL8eg64VYQJx8Z5vyKY/v92yudqjyE/XbOqop4d/52v7huvm7ChrcdYe5+6y9PT6n3yn+0Wce39YLxOMt931V1AkGPF09jyzYRjYlPj4/HDzjyaTNDZERFxEEgTvP9EKbkkF9O0YeuyT1TN7Ockh148KLsPesuMXox4I4ezX0AwOCVAjNrsXkSrO/OLk19LHNstTrMnAFDthiJkVJK5h43IFYdaHQLiQy8maubATVrvVm85NtTckfrXTOznjI2iy2e2neqtApuAsoESegk+elE70/pcYVz8pWcQBkk9iUiyYJrN/5OYETAyBy7B4lIUd0dPbk7KqETMXrUCh0AzF0J0LyBY+xEJSIm8vibuRvZje0wyruAiA6DfRTBwNUNHNSNgACmgg39cxfmWqsGBcXgKjQzNDLApmYGMZSqvc5N6t4L3sTk5OTqnbNUOpGq4OiSq4IaVtAAQ7hhMNbDNIXoOJgwmZEYEB0BY+vizcEf1PLo3t4I/OzoIjDR9JEAHQ/Z2Wgj/53D3BHRllJarDZHRBxQchwzSD7aOHZTzDex6f/kdcxpj4bjV0KTnhD++jHnJb95v7mNQKZTN3QX7waDGlubq2rKmFICt9iwBHAD5rXWhNgPcUkICyKOCKcnq5FWTab2/jist6kxdg2bq6o1BQFSwL72grRUI5ScSREAgmmWSADIIFJzCsYEAnT2VquqqkiSxc1a2fd9r2WWFRMzR7dCUF7rlmufpSiliKCIpMzB6BaoEKJYxNJUfW8NAGrT3VptpubXvbxs+1XVuVFrtVYkqLUGNVc3quD7jtHbfPfwiCknh1hJE6nWkVk+EQshmNZaW63ofYFN3M6b/JhZU5SehOecV/fHx8fT6QR/+am1ehQ5H7E1MyNyeCMcm7c9YBjmPFQIRtrW5c16BcQ0FtOhpVkgiEwweIacOuSCQphiBaCZBUMBEaHfFhveJNOJSQhFnbS5NiilTxCUUkopWRwASykUWA1DdKPBfUlE2m7rH2ZkAPcu9qh3b5TijRc/3rf4/YzZbmHK/OEb5QKAlJa+QgNuN8rdAxih6oKUc2ZmU22tIXJrqloRUTInyeEERQjM3JgqKd0FbAQ0uBYcHaPui8joQaDtBJ4QGF2oL0Il6uy12Ndc3QUM7u6AZqDayJsNegYwdiUAUPCyVyA0YiB2REdSq82hAJTSLpfLZSu1WSn1+fV6uVz2WmqtpWwsdN1rUTufV0TMRHmR9ZT5mcFdVakpUKWcW2211l69dQcIljt1s1IKRzrjcL3sdd+uv/vt6bxkYUAGMCKyN4nHLBxEUER3/bE3pvj49KeZOj7W2x3rjPB3pXkwxyMPRBxNDdQwS/wGR9R9tOezBjlPeHSKEIBh9DPjWxAcB4niPP9Y8zDPZIrcG9mex/9V+Z//PQbW1gPobi7ogNMbEd1NfOLmm1lKyzif+dX3zyW6DhBoZtJWmBmqieSH8/q//i//7Z/++Y+XvdVa0TlludHhBvHVbYQBGTuHK6CzUEosSCogxCmxmdcKBMYIwOxq2hSAY+gqscR4Dri5W1NtdW+1mvZI12cvyKnbwDtifAZo7l5rH+mcjjuefWKBCI4D+RnhHYDWFrerC8AoYiySmtsoytw6Zs1Q3QIe1qyCxlxaWxK+ezif85IQT4neP6z7y+e2vdJwGe8/fjh9fAeEIEQAzY2hF6Pj/6DDwHRE8upIcPgngKOzjZzQIQBmPoWlB5NHmpo37cH7RZQId7IXL8JgDOuo7Pv3jK1FCHGHw4iFnPfSBr8t0kXJG47yBgAISARmkSzAoTLIMev7TXFnKoUP1sDQgjerp+zAdkNExHcJ4fQSb7Tyjf25+RS+9R5ud8F9IhplWh8c1XScMZxZKSVqydF0EZFW+uaiaEQxUtSSPcyWA4SGAxz3y48MDmfMPE90hOAd6VcOIG7vxpYcjTsuhJGpd7EQDClm9iL+xv4FAJNuddTAwsRoL+Apddw2IIJa1d0QANCIwQGss1q12VLw0csJAaq1SlpwbMU4wjaYO4rGorZBGHipoCh0BDWE2LZM3XBPexSS4dB5ds3JzaHnA+7uYC0aheQNhYXwnPkpp8eFPpzzKWMiA7RYl06EzIh2Au2dlIaMwcWIRCkhckfaqZiZa3Inw93d0PuHwKy5W9O+wDS86VAMM3MiMBDvoPa45601H7hMHavzPCg3dewX8nCK7qDuHpyQwcEVn2o1RlIXUPVmwRiPnWkcGYXQNYqaLOCkqmSm2DHZmWOxIoKbqbtRgWZtt7qbGWJKJIasRQEsNqUSOKATQiImQHJAArabRuBg9Z2iO3+w2/RsuJNbZx9BiOLmcPd8hKpqY/bdR7nXzNww8qV5KFXVGR8c1Nyw+/9eHxmOnIgcf62Ydm8abrbg1wpINxPzV5LMY1wbN2K8/2hl3Ad7lanO8QOAWx2HDvQAM1ZwNWQJtxp0LKbu7rVWAA4ParEspLdz3a33AHE0pZn7RmwACIANE0fwkYgxLyFvQr0lHl9NkpkSAMyJ9LiWgJmpKgM6Vm0tUvd4BoMEPyMRg21awLxVi0alCEiidV2AMrYwsyJuAEGOhaXVZlrVS9W9aWlaql73cq2tEZaqgFbr7qa17sIIAOu6EibApKqqxZGIhZflUdSAQE09oBAQ9RdQCybxUsrIwGFZFgp0Lnjgyn1sCAQAIY5FFIjw/t27x8fHY9A2I9ppw9/I4Ywg6aYuw91id3J9rY/1IyBQnJuq9nF56iCfCGy7KjAggII2VTRg6DPxFpTvA49tZuQRRZGqqYL2AduusDaYIb0/vhgftr7b2vt/j5pyuxK8XdFRI46/PHruGVW8+XibK7COXZ1f60DGy4KsoSOsbu8xM1ettRLh+XxelsXdtbYKFFmQiCSWQBwFKps5obuJuDUDV9AEHDQU4AgKHjsE+xVFj8JiR33olKtJiqkTiNRDh+lZlkVVW6vu3oH68SBALbZxN2sT1+NkCK15AzdyI6qgeyl7bQX8crl8+fL89fm11toMWmulFHXDgAntrX3+sjf94ftP7969C3qC0+m0LIkvbG6qaqWclqUBtFIDTxE+HYO/yFutFYXBvLXmapRS1wIHAAckIO/rsMejxUl/GBvLIocZ7RcIw6dKOIggoc9OhamcvTi4xXb9pr6xruEQZ3UbANAOn8J57Jtrjj/RATB281ZhrRHBDdxUfUacUZGDwdEQyl72djPLh1RzJJa3bYG9Q1AiMYgzG4tP++pXGOFYX+zssW2AbseJ4wf79EwDphrOcyCiQ8Hk3nm5w2x1EiFwSuzNEYkZ//CHv12XzKAITmgMg7zWxd0VHZENx/IMUIA4DVvzKokYEBtGcV1E+HwG82CPdFIzE5LMEpXBmD9vBrU1a01L1VKBJLpDMyFkZqY0HxNRp/GInlsAwRCZiRMxIhogE5tpEKHTYHiGTsx+yx8mpKh3xSzm1eMpdGRpNTczkB6GOSgCIHor+5rw3dNpSZgY3z2dX7+sXYeJjOnTD9/D+QTQALMDepyYj5qIG6AhgLp3OmgncAUgRwWngIU5ADlbX1REwT4MgLEYEHtWGRGz32WDaNTLmCMIOeRb02biLXXErh0+/mkOODJYHANlQbc75C3+gMfa3ygDebtxL2H0bhExgIQHs39zB8cayjjgMdt6Y+f9/jULMTp6Wv1tB7MwDzIdMQBMAtib+tz22d1e5i0crsS3Hc/eR/HG3bs9NeN56p22+NZFDIlMKZkF34yOCgveXL53Tzy/KMb6j9fWhbs2hSHLjgbuqjF7Hh+fExeIwZ54vH09D0fsC8dhRNuBJ/TJvgUxOxF2qbkrHcagTSMvAyJKxN4G4TtzqRXNZZEB2CMLDJ75bPJ41BdiM1gU6y0Iw0hE1BURU+osDrXTOL6tNzshMIGpOcTov7upAbOAu4Kg0Lquj4+PT4/np8flvKRVEKy5GzkFcJVMtREAEqIBgpMBgaMhaDPmYPZFREYUksTMCGVOuKIDMntrpVV1C6x8Zj8KSVJQ0RANHuPRqgpgUXFRb9EYcvNSC+kBGnqYIjCzVqrWzomsg1TGFcyagzOio2tr4C7E0Xhk71y07ljIVV1jFpyFmSOvQ0M3c8c2yIVjlDA8lXuD2O/tAGjoRAQsGCBWHJshLDq2h0UpA2jei6YxkIajqjIj45gpndiJKaizHDhfUXIhakREKECNJJrtd9kmIpp6g74dxD2mW2eEHUb07fqHo1H49pdHl3+MaN8Yqf+fL3cfEQfMBZvz6o5WEgZSHQZy3QFnlghHS9rnIm53qQuMOktPS+xgJXtQM3kyHCJJEGIWidG7cNeqqrUVwFaLx4Z0gGgeuKEHoMBVmypoFAvWlCOJAoAlNfQUhkKbtxJnb/tWWrPTmltPGillylnUXcFVMecsOTsycgJQ91K17bVtpW57Lc2b+lbbVoPMBQJiICIImITWJJI4sBiZiVKnnN3L9vxMlIEYIGoCTAjsfUelBp3Mvu/WmqqqKwPNwnPOmVOGUTVDxJzz4/nhtJ7cdV3XyDF4rAmZj2mWEY/ubT5fZJ5VwNsLDsuXxkAvIROJOcaagwiWfK5yDX01VzUiwlHKjRM2ABnrDzuzJZD3yX/okyMQoOMChzKc97ZbCrRddAeP8jyHoI5ue6Ja5q2YV/atfs1fHu3eNH1jxdbbaOD4m5v7d6/W0IPAcsxpIQLAXjpRCjO/e3w8LauZNS17U9XbpvW9lLheRCTqQXzp7C8yww5zVYWmJpy6WwUENMLeCjTT5g7g1S1m3CMjciJnj9vbt4CAD5MYW2WaR8dEPRwKoSADp7xV29UqqqJdWnm+XF8vlx3K9Xr98svz15fn1noMMDiBMIkI867Nni+cs6TF2TinxJQlCXEgU8xMayNHb7q9Xszb+XxW1cgACSxROq8LEe3Xsu/7w+OJORFKmH5kgth3Gm2NYz3e+6wpTbr8N0W0g9E7yowfWm1Hq3jUIICxcf5oEseQfmgcaC/4H+UNRrbEB27bN6cBt65UDyIHGOSWE8LQa7qHi38rojByyElpc1SNbzXo9qd71ZgC3eO78REaiNN5BBxMLXcaF4I6v8gMu/kBsCZIp3UhMILmbQd1N/Q24DlOGA/XsKGRgaMjggihEwsyOCJIYnJAhMR8WhZtLfK08PCZJW5BN4ZgaO5qk8fFTHHUGmecMMzUm4QQI6YiRI9FCNa3yNBhz/bx+ZoZaHAg2ZucUL071w7NQwglN6C5DBAHPCoJLkmE+OP79+dTRtAl8dMpiwi7pEXglH/zu99ATuCm4K3Hr4c+3ujp3ugM0MFjtw95D9wRwBGcARR7RSPiprA2UQ5BBwMbJe6AlRocKGgI4SgDBwEbUcMou7g7RnNkjnW535BW5p2TLz5sZmYSq1lu8nX7650971EPmKofeCUwpt/n1vj4zts+uU4AfDw+3nNVwEFnEVG/mV2P0ziUYuHNn968HwCQwNotIp0lIXcXVYVDAhrvmIFa0EtGoKmqtda0BNQTopRr45VzrnXvGZHfsAdzmnk+lPh9sCn2KlFs8MDdLAhToq8c6KCA9rkqAjQe25+iIwsAIhLzP+6u2sfR3d1UO63IsGUUvb5oV7QGpoIxUuJE3GoZdy3O+kbkGjE95yWlVGoFgJxzrFLEw2dAzQdlLQkTCjCQswGq2ZLXFdAMDDZ3ZJb+qWCpbdagBRxu3plo9aDXTm6N4o6K4giESinl0/n8+PDw+Pj4uDAZurqpae0FIVcAay6I6EhChGiK7sDgULdXkRz7rOMpC0lKiVosCTRVjUVqFUmrbtfdzBi4shFR5P7untX6gsSYcl6AGV0thIfMwRQcEMlUayno0ovxfbqpB3/1GjbTzVybRouMmYPPlror9KoN0VOSqorgAETYi+XsZqYoCSiJ1SxL4iyUwr62ZuLkiCQxniSmVvc9zw3p3SCiBFt/0PjdiIMsIsU5fDLcZE9f54wE3qNfYp8M31ik4gjQmqkqIBI5Ao25KZM2eGU8TSUf3QwmIujdRUTqNL+zDzYLNPrX+oMHjPvNOnzTjji68L96oNsb3oYFARDAe8j/EV4eWhmKT6M45wYNm7uH1OFoKddaKxOgE1FKaffJLnCzdL2hinNOmEYdEEspqtr2wplJLfoDkliErKmjYxB1mEtiItLmlizclyKYai07mibBZVlOp9PpdELEfa9Esq5GiQjFqm1bdQWGpEUvl4sp1FbcrVRcFmlNVGutlfNJJOEoqEe20tz2Wi7X7bI1A2hGpaoBkAiiARojprxk4SXhw/l0Oi0BjCQSSeKC17JrLS+qVImZRIImMrhsIHi6cxYQ6tlgbVWLqzEzODCiiCQRO+TVcb0pJbh2f9ZaQ8ZpsY+O6igSURPUgCyKvCkqjHTobiEvYhBWe3C9DK5dG80VYg62YgsRo8j7oTIzKJk1i548qPXGb761yR0RiDkJ54I9cYqTr7UiuCwkkszQTcAYnI5LDqew2ZGx9l5B5h2Y+jVf851H1/tregRvDnX84PyBg/3cATFIvvpfs6T41iXn83ldloUiNVZAgJTSsiyTSaXvSu1kZRBFTGWtSkY9dqwGpKBKSAagjAwdVU/qnQfLmrdWYxlmpwklXoCaW3l5NTO1PscLox6Eou7oClZ7pZeZENNe6VrstVkBKO4vW/n5+eXr8+vz62vQge61U6cCgKs/PCQ1I0AmQcSq9evzKwD85v27hR4QkQiYMRk7UQOs2855UbPL5SKJTqeTq10uFwYk5iXl83lFh+fnZxFcc+zhYuijR3ePZMSWvSPXrysW38+FENghx3BvQvGv1N2O8eIUMHRAOpBnRlsM/RZfIlq9LZqfpxf/7p2gmFDyMe8/oK3TfhIhkfgotU/5dEf8K8J80OIR4w7xxoOs4iG5nT9P8xIHTHQcIRmqjX228Hh/5kGgXx2M+9/fdHg8x+TTzSuhgFtOvDDlhA/r+svnrwhGKEF0jBjckIbOAGAKZi2oRxGRCaL5HWYVABCMBZcswMK9WOXRGDSzuhdwQyTvczUO5uiETtu+pVGrnPRppZSckvuvlJNaNbcbVUcPMxyQ2Ad39OjBECJ21VaoHVPWLWgncYxaKrhrzDQ2PD2CO5IAIQM4wpK4JP708T0Bfnz/cFqSttLKFcFy4lVO6+MDPDw8/fA9LALaHMBBPXYJdgynBa4tnkfIC/S/dmoZnHWHIEG9achBz4Iu1t094quRDfZX9OSjo3kY5+uSYMf+IUyKF7sroNxkxb2NkgePLF1VmdM8yE0xEXGAwoa+HMXvvvICPuNDGNkF3ErbgIFUDBSVO4496pMV73jAKa79aIaHcxiovcM/1aPDzzOdGfqLh0vvKgkA4u6EWEp5eDidTqe67zxeRIeJvXuqmIjAUkrxs4js+x7wWTwQvhNRLPFT1dZKay3WJxDdqPPpwFBqZoEBK8UV7WFdRCScSpxxKWVZFmbe9x2Z3T3nxd1LqQAgTIR9fCvgYsMoDAky27Y9M6UcgGdHcmZWawDQWgne830vMU5cSmla+7Zx95l51lrdOs7BzJYsMbYBAFdrwYgAbgy4qTrCmnOz9vr6fL3uAESIddvcnZHMa0ysgMUIS1+GkVIKdmxyC7sQaH5bTiklIgVmWR+X83tM+WXbl4ds0Vt337Ytm6V1cTVdmqpqaeTAnBILOajq9XIxM+G8ruvL9sXdU0rrumLb4w4nFsnZDBpYWh8UNgIA871quW46xl6r11xcRJgADRsqJiYga17aFYVTSgRYda+17rXEbPOojRlO1kLEsFBHG8fMrUYA2tydBHPOQNBaWXJGYXNUVQcllsA3FeK6beuynPLiqlGfM41oBpAJFFtre1ODHG3tYNkh9CUvj6flvErIfUxqUUchIjOHAMQcVy3QWmMmd4voOeRtWZatlm0riHi5XD6dz9FTKnWrtZJDSolFEGMHgwvZrMgiYtUWbLQpJVXdaxFmdzSAEIlSiqynbjIc00A3RRNga5uaccrNjt4UZonH9C7r88PrqCk3S/FNk3BagP5z763AjHIQwc0cPKVEPskbqnpfud6aLnkJSycpbdumqoSMDlFeNTNv2ggYAQBSSkywyTVOLGKaaP+0fSewJQXzpLXmOWcRLtpUdV3XJGKqpZTH5dxaW5el9bVygohMSERrysVLIOHD0IskQjQLq5IBCyLmvOb1HHN3T49nIjKEtEhrtub19evrdtlB6Zdfvr579+7y8orApe5Rwqq1lipAbBKFWhSRvC5Ye+80EtdwSy9b3bdGnJbzCay8e/fu3ePDelrWTGDKQo+nNZZkuLssa9krgNEa/M/GvDS37bJxwbycgKVWq7UyiVBWrft1r7Uixh3U9bSqm6vxyp//8pd931NK27aFcb5cLgC0bRsivnv37vXZpseK4sj0NNGemg4s2lCFaBIPQ+9MmLq6uzYbg3AYHSb3BtjCme37LpK1GqE0NwdvVQk81p8CIrillKxBhGKMPIXcQWutOS9VO/w1zrC1pmqIuK5rzjlcGGFSr6UUAEMg6MNUvfmJndgfZtnlrfxD35kU/xzNt7vJCx8z83EOIrKuKwxKajkUdGauuCyLDf+IeGueu7N5g6GXM/90t9aqNzWC0+n04/eflmWp9WektZ9DHA0wAtCaMqMkkXDicfxS2is0Q8inJS0ZESAxCWrVvWlmaqZbLQydhPy0rIBZY5QJIPB3DioMN48/wYSI7l61xeCoGwgm5KU51M2q22tpL6VcVa9mr9v205fXLy/P+5U4iTpedm2tpZQIUbWS8LpmA9xrQ1Awz6rmuO87p2VdT58+fHi57L88X9SKoZwfTnvZixohEvDD6Zxz/vLlC1sxa+uaz+vHp6enVi7b5QIAHz6+i6W9nHOsveiBjQ9smJmptlr7/bebXswADoh8yIn7Xd/gm6hxdA5HKugzkh2/J79Rzsx4lAdiEInAvbWGh5ZaR0yNuBoO8TGOMmVERnBgMR2uwcw83BCMhTS9Ukxy+LlP34QWPOR1moUZB8IopswYcr4AMQge4/1xSnYglRnSC3HJeTm5mbY2aWwCq0wxOOA+mkChHRrPhUJxCYX9YVlM69PD6Vp2dw/SREJBZDdAIXNHN2SKCfJ93wkVYXEiAmdwcF9SZqR931deHDwxiwhYP5qIZJbmVmvtlfFxK6IAp6qu6AaMpMQBV2mt5ZxtjFgj4r7vQj0bLKW8vr6u67quZzPY6+vAKPVtcihIhLV0ZJz0wnffpcbkCo6hoy1MIgJxKYVF0D0oLSLUeXh4WFI+r6dzTmTetAnhhw/vEUDyAkyPH99DlJrICYCAo/1HQEDa+7QQ1U4KbvrIcBANgQe7qAHQAE8NLYg+YeRCgIDkQWnf5wm/oRiNEAXHKhw3MHNrOGoxU4TQ1N2B7uvdOrpVFhN6DOEXD3zFR7OMozrTC/EphZzF8g8A8L6qoJOl9wRH+qDvDLR8rOQxc2aa9n/24TW29dziz95RmD6uC/9ICEMfiWgO3NlhP2FcRJAV9QvpBFR4VDqLXQkz2YP7Jp4PANh8D41WtQ98mqqS9veo9ktS1RnjcpxEEDNAcM1ipBZgjgBj7BAJUIgbcWR0ZtiauQcBdG+bJEQRIbMigojNTA90qx6R6f2Q8bwF0wgCoCmMxXUj8I3ZFsRw6swppWQGMUnvdLs58Qp2U0S08WgVvLQqsbo+anshwd4T0bJvWiuRmBo4CmAM6AEoIJATIhioN60Y3afowtn0Coi4715RlYCt/fnz1wUB2vndSpl1oUD2JkRvQF4Nm7dYOxPo8trQIYkskp5fnkVEqL6WC3XuJXT3BznlvMd9yHkJc5ZzNllba62WUpqqB+MOAm7PF0ZaluXhtLhZ3S/CvAiLiIJxc1CzygDQWmmm5jzl1cYsmRvG1jUYTXZVdUciSiTuPn2GIGSRGEoZpJ9RbkFFVIXwymEehFhr2y+7E5khUFgONx8JmAc1kQVFahbKiUVIANCt76GI7uRwZDM+8/sMKqzAUfBgxKBBZBfQuFCiIDNIKaE6BiBkkG1wZxa1BMCUAgzaS/sdeBmbV7s8TFM1zRZ2qDYfIoC3fYxv9QK+QQHNYLe/PwJQvDvIX3uF6ZiGwsyEmZEAKOdsWiKUyTkD3L70eGRmJsYZR3ZO1ujUsgIIM4uQ8+ARHUurLNaiqPYsNGS/1lKKmZkzdJMJiUUSJeLY2RsnIEjDjypYQzcmJE7rmpdlFVkk55xzTmvUv3Yre315vWyl2bKehaSUVrf94eGh1M2hmTVVV1UgbNXaogYOw27DEOCUUqlq1sretM49cvk3v/3E6K21/XJNdF6XJSXmvJxzZ5nrLbUtGLB024puG6C714TCVgHQHVJmGwO9AICu5hB1YrMO7InKgpmhGyINhjC7XC6Xy+X19XXfd6Jlpjrzhk/fOZ84jFYhisQgiQ82ERgBMREhiUgGaISJEIgEkN8I2Ig7e00Qkc3AXR0ckVpr5ITOzoPtrI/vkFWIZmDT2pQBQERUY9Km3S52DqOMgnE4fEQzUGvtFrweFGc42i428686FqbNPHn+cDxC+HVVRaH5z3lXAWDbNh+Z5DGdSCkTCHLs6W3TZyEiI0lyoKTNcs6CpFqB1pSSENdaSzMAIBRTMANtTmCtWW0GDkgsIpkZidRtayqE5CaUQZIDuBAoO5oCtAHwTgjohobogO67NdoVyQWptaatIKKkFPGMmTWOLgEhsrOAc2m6FX2+7K9VX0p52faXUr5eLs+X12up0IhLcvfS1My9adSMqjqV1qoxqjAQeGtWSpGnlcBMW87548f3Vf2X59ei5XK5sojkJYLvn376KYJvSsAohLht2y8//+xe371/+vTpQzwnMAME8AEkc4DouBxq/POZ4hvA8H01bQoAHkrq8/1Tm+aOxzdagOZAHt/bWUl+Db7B2Dey+NBB9Du5Qhhx4Wyf9682BHpzPlMmpxl/c25T/edHSqxuJwREJ9TpepiGH+o5ar8bxDOknF/XPEiqfN7Y6XzbGAX6tj/ZO43Es/AUQ0VdVa2BE7oK4cMir6+xSajt+162tG0tJyZ3GpxlZE7shGTeYucngXvTqtYJXRySJJn7oh1iATsiSrDKtY5WaK21qq2qNlO/0ad5Jxw1M4sefsjkXjvrNREJYa1V1c1g27bn59eHh+3p/TszMwUAjZkjEcHm8bi7zRnRKQEYOAMjQkQOZtbmPScGIIVOPhThEzosSTKxqrbawHVZFszg7uvDuTCtD2dYMoCrW4UCwJ3gBpTdoDNfRGljxAEIEDOZGJOZOjSqP8+bqAMAWLjFaG0DgJlTXwN2H8z07P8mzTAqBTAAyeQGfuPROerUbZ5tSDmOT4XIzcLfrGWYGR7YSSYEoEdfI8ab7aI+Q+Nd4mflYlyFRwY6LfybIO2NayC+I6MKWbrXym5neqQ6oC7uDmATuTn9NY6AJ05YmPnGFDxuDwD09tTktDkM6th4Ya9c4iwC2TBVNtY74gCkjdPtasBjhfTxansoNnbaVFOACApv3CTTv7qPGZHbM4D7TsWdge5ek1OUCtws4JpxwjkH22fwCYTGomoT7kM47v32BEU79rW9/RJizqd/xUE8AcBdwZprs1bJDL1qaUGVsbeSgSFa+QhOrODVDKxpIWvNVRWC8RoQgRxyzuxGoHvZ//3Pvzx//vlP/54/Peb/9v/4wynDOUsSXhIDigGB+2ujupfW3Bu4ErguJkaLkaFkJ2pa0ZGBW23btl1Ag5t7FO0kpXReT4+PT+7onGFNrLDv++Vy2bbtQRjADCUlcnMrRZjxtO77NZauSiIKxn0AACitICJyT6tataLNzGJ4EJGAByGnIzqoadxnYAYRZ8KUWVDNeMJIwtJ1fgpHErOKTqflTI7bZbPzohYsNh4jlA4zSusdjCkhBAigbmqGRG83pfFh8BV7HcTc/ZgQTpXGsUZi6sLxUCIyNxzPzjDi4ByDrkGqWq3agHgFQ6mxR92x1EaDvzuCDAxfOOLI6WfHuX1jFv96lng85TGN0P85g4O/9gore7MJiDjYVlXVa8VYGGN6DAUO3h1mYVVVgyAcOskRAXjk1S7CjCwEBUYAodPKmxkHORCiEEeQ7oSBUEgpLZKY2bQCAJqjOWVABFdTK66VEZgEmU+nh7ysKa8pryxizpdiZvZ8fb68vLZSy7Uw0UM+Ifq7d49ffvmFmTLL3umkQNVa83t7NReCg5vGHK1pI4fEkmXJkqzUZraVKwCYeTufOKddQZAkkbsnwSWvgmxmrmpG+34tuhNBcjIAdAWglFJpplqJRYLwWVWD8XzVJBkG34yrgWtaT6fTKWCtLy8v1+v1+vKqtQEl7zFd7AJ1CEQ4EwAQg3uQtDqSz9nP/jAOewh7SaX/NVxz/IsAwPsAT8jfzZKHPzMzAzU0kghEbupGHYWOZS/WDNGXJeWc67WUspVSpvwfVdiBojgJfRXbOGEwq3U+KTxkwnQY4+HDSOEM9eyQM7xxc37ImZmo72g8+Czv2XKvDeP9YP+9eiL2ii0jogAisKb0cDo/PDxkFgUMyop926o6c0rCiLjvO3kizEH/HlPWDqjIqmaXnUsRxGJrUwcAFlRkM4i1hYpYrdbqq4Q5J3TDiLu0BtGA1qqqiRGpLx+vZrUqohEJsjiRNnu51q+X/Y9/+ksxf63tZdtftn1vWk0dkR0D5j0vn0a/0czMG0QZdBQ0wz631rLI+6d3n7+8tlId8Onh6fT0tCyn6759/fr1y5cvAUt2NRIkorpfP1+fU+LTkn/z448xOw7ugLd0hUbc3O9/UMj0aPKuoNZ/vicK6gY03JbdVOPuv79WbrtNEh7eTAd0HR6iWABAos42YW6/kjca4jDod19zywmPsdO8hKkvMxk7CHY/RimFBuZrtjsAIBrRR/8SdzWxfHMoH+HH4dRmoKWz0widv2fkNmEqADFm4gjAHZEEU4JaOpev+5rl4Xw6vWwxquOOrVlpZkqIrh6dLUfyGcqZWStlOa2qamBrTgGzSiK9ANTUBDHYI8ZVRC0yvF/vGag7dgE+UJf3DuEtH3ACJ2ZgZmzWGDJ5SqmU8uXL87ZXB8o5ozgAtKbMLILmUGvLxACg4HzLDBAdDQEIo0Oo7p1rBGOtSLQso5XKIsyEggKmtRVtdWFZ1xOal23/ul1akr97XOGUg2hKtS6cFNTBwcAt2J7MEdwVRw0ADW3khA4tLtMx0pUbRJ8OzC8+cadooNGDG0t9Jjf8UIop29BbZ6MgMveshEi7HvW0H82hjaTgKIdzhSAzIxEQQeB6g7fsWJQZcst0Z7HNrO+AOhh/PARdIqzapn2bvyei2Qj1g2ofJmcdhufDwR4ckfOMzYjI7AaQcXcYqyDtZn905oQeayf8/nX8zTwPGxyS5DD4tTpHGAESYG1qrYNSGGOdZr+POAr2ADCmXTVwdEcXODJJMDNHInIzBUpRia+t+L01tOHacRIbUOxFv0Wybx88orsRMrMbiFrRAywnys/u0GKFr7kpIM/eIyIGB9v4zWifAtMYeiG9Xg0cPfgqURAbgLue11yvstvuzRh0TcspZ47Y1cAADEHdHJzNwd20mikMeH3ceUDQ7QoILExEreDnrZRSrlv+l3//355O/OHd+cPT0/t3j+8ezsuyEMOVnratWaWclizMDtDANv769ZpIERt4ANC1tbbvtbbLsiyRa+WcCZW5Xar/6ZfX1lorzd1FZFmWZVny08Pl5RdGQkXcSkJAhxNSbWDqgkBszWLDB8Y+xn3fISidEdVRVZt69JRrM7MGyMwMgyKyueaUUkrEwIyYkjI7QWztAIA29Lw126uqKAcdpfvT+WFdFmvghmZgaGwW6EYAiMXJTn3Sw8xaq62RSiIAVWVGMyC6cZoouMzhz/sK8WGq6mYfptww88TeqCpE9uiE6K49puwYgwZRNYThG0IPiWTQ1w0lHQW/kBAbTUsEcLMb4Mangb17vTGLR+U66suNVKPbHji+7dtXvwOIOFigu2VE1D6AaSN1MTMjm1PdhwkrtVorISTGAelB6rS0eqvMICM5dZW82/8WW2kQ+84YQQqQnoGWUtwMKRaWYy+peoc8eTIwAu45CvTmMANlA2lGbavbfnkN7vutupgkelhPy/mspX7+8nl/vRKAEBKAWkV3cAJ38kSewnBHEkgMApQXOS3pckEiiHUnhkhAYF739scvfzmfHiUnFN4Vymulq3KuVtv5YSXwZvTuLAjMCEJ8Oqdm1WsvufZSnXef0aylDCKSJW2Dl7KX9txLLZOLa1mWIBZ297LvWlspBQZUcsrPXzOz8xUFtGNk7INl+2bt3d2tNWgV0jIIpbinQ+Bs1gvKZnZgpQULcK/34VEAGGCcGtMiDs5CRGDW1KqD0gEydHfCkwOyi2+f1QjvfoyA8VBPtRFAzHuyLMt9YfQ2djj/Ow81Msmbq526rKpw2NM9Y2siNGsRuhMRYqJwQ0LQgOOSmd+9e/fp4/uHh4fiJ1UttdVW3dBJ3JCYa9FEvC4oIiK57n1I6bk2REwKAKBW18v28Hhi5pQkpRbFlpSyu9eqAPqYKjMntiAeh+DZN6itag2uaWEDoKBd01oaESGBM0HDrdpfvr7+8vz65y/Pu+q12ute9tIcAYWFE0Fr2kxHkdcBAIQZzYkAoccbQs4IwqiqKR0AkGoi8un9x7/5w98v53Nt9peff9q2bdsuxezh4aFsl2VZgla01eu6PD0+Pj49PUDncdXBjdHzOBgUFBjlZ8JJNBqGb9jIaXQRvFOTHZO6mc756D26++HDB4vaOwH49uPYeyDD5xztb5Dg/1qLEltXXnK3+78COPSlZNM7THENzZ2Rkh9IzoaA99vSje8s+iAiYhsR4Bsr8e0/f9WezPOc/RD3uxWLOG+OG0T50L3fzbAY6OAmSKclL5LOp0XVG6kgu7EpmjIQakMzBTdgUNKunKaXWpckAk5IWYQAa60qwspAPggy57R/L3abGQDGAgwzaOqYMEqZkRBKfyU/JJAwoBAA4GKZ+gCXGVyu+/V6/emnn89PjzGlEg0NlgwArRkyIfY9hsHx6oCGVFtlZid0R/UukjT4NSKQoLmfMDrM44mnZVnyWrbtsm0/XV/e//aHj7/5AR7P4E5MpBb4HI6QBLsoO47crIcgRp0oJiSBewjv5HgsLt+HImjhnZsP6HV0Dq1rkM7C9GgMYi8cKACQH/KBCBdrXzRPfhS8u2pd/D6grnP2DwCiem9mfmBhwYGgfOMIprjG0dtYcA9Do8e3w1SueQS/QT0P6tn1qMD9y316xtDcWyx6PMjhrmrve41LHpT44EG264NqHA+9tchwIk5VVRskH8EaiobHCreI1AGmD3E/8AK+bTvMmMAGP/iba+5jTsja1Aen5Q2SG43PMei9l6MrhdGUe9vomGeizYKBkgmsdF6cJHK9Xtw9JYobCkCSiFlCfQAcAH20OABxbiUyBImokSn4jkwdyXDMX7IqIKwp1TV7LbvVE+Onh4d1XVei63U3g+rW1DxkJSgO3fnWEY1Oobv7CoqAGZCZG4s33hr4DotkrfT6i/7bl58Yf2LE4GaB9XtXZaScWBCgNQRbmFrZrRVCyFlEwoAqERleYgwmpfTw8LSkzMw5r6fTqda6bSUWTjKlCBmfEp5Pp4Ztb5aIHlNCtNYu5zU7OGlUx6pZg0i9BrmAAQCQujWDpgZApWpRQ1bhBAxEhtjyaQWMlgNQ8+JKqoiufUU4aOsFwliRYg8LmGpRdz8t63k9IYKqmiMgNDK5uRDw2+BEf+yuSp4kU1GjvmILDm7vhhwehdI75ew+d9rBbjWAiAINEktqZ0B80MkwKxIY6W5KuAvArY/UEz8cBurWWBindyviHE3MtxbkW+341deETMR36ghe//95Ha+RWaITAgEHFTneVXII8eiFJ7p9RVheGPvliRERzZtqVZ1wowiXw64p9PouAkDisc5EDZi888poXsQ9eNsgETeR1oKFvTkjABIDICO4qTfQptau29fny+W6f/368vXL61aLGbz//ikxl91++PjhfD6TmVtr16saAKBZUzVyZ0pEQpTMakgIIRIgMS1JlmV5WE+t2XapCBXMzMF1t6KAjMjIGYCb59qgtgaXVkp51wisXK51f9dWoTWLSDanJEtKO6A2N68VgMzNgVWDnM1EOsVIhCCzTtFVmzkw/NfXi9ZWDbZt27bttjzwULOcwqOHmbr5Hh88E/Gkqe+0M4XgTJcoFUfWp9oseC+QETFgte59ho+Z1VpgR4PEJGqLAtEcu4llfOm6nnfTrezbdtm2i2oTofN5Lc8vcACzWBDJQuu803ADD8fl5WV5ozIw2LCmkIcpODqyo/S+0YipF8f3zBs7Xfh85/EN8UWxKoCwj3FEj92augMzO1AGf3g4vX//fsl5Xd59/fq1bTsCcyBmoY0HR7H0FoGiT4JorSozO2YHvVz2L76dri2ldD6fU4rJwMZc3CHk4d1SmHlNOSdeSJhAwAmwVbOm6EEPV8TMTFtrVmpDBJRGrTo+b/WnLy8/P78Wg2ttl+t+3UupCgDE0qhmcTNjkuA0sNCvvQFjImEmJiMwMANkQaplO51OxNzUtFQR+d1vf/y7v//P7z9919T+4y8/79ct1jMSeGsNaiXCUlEUifHDh3ffff9xXVdk7DMugVdCixiAmN90BsZ/8fiY+3+HQfZDOf/YUDtq0xvN6kfy8RXjUDBQoCGiMEsS85T6Ozs9LxxrN/eFP4cb+zpi59MeaKdxhgcZ9sOZz5Il9XHZXieK0dz55m7e7/HVx/+2FizfvevQXUZHx/yKFgT1qI0Ft9jbJre5KQB3m6SO7uZsBmbgCGqS6N3j2a0llsRiGmsAvTUo1Zlir5hG3o9I4E6gEGYnxMEDXQvkgAMHF/15d1It1oKQogU5IiKZQa26l9ZaW9fMo0MIYzKZmc/n81w9NxvgqoqOPEju1tP548dPrf3589cvW2kPT4+n0wkA1MH3jYiaNsaOqUEmdkammfUrOGJ08XvfWMFrbcwckcZR8GbRlomZxRFKa1spuObf/t3vP/zhb2FdoBXIwoAOjcEJECiWRCCSxwQhWzd3cWSO+WwY1WIgQMdbxXpwz3TRi38pDDadvp8+0tmjHI715sdLIL912OY59AgEcNJxxtpKSQkOeo3eSQpwkBrAoXn4bciEg/oVByjXBz1K16ujIzj8aONLZ83RxqTAVNW7AO9NRjNmCG+/OXgTdz9W8A/6q3QYKTr+IFPyOroD8fj85vsC9ubuIuwjf53aTkQppVLY7luuiAh9qNVC7WP0ziwIOfhYaupaTfHtvaQdr9aapM6qpKrBlTR57c064JhGPQcPUxy3pzAu2/oyFo7NcwCHpYWRNKqCo7ET+QgM+oDntKpxdwzB3ZvFCmDsAYa7myVnQJQgtgEkhPOy7M8vzeH948On90+JOYElZwOvqnvTrTWMGSkMyEDvX3k0r9GJ6NTZ8E2LW2vmtAM2pYaJHLAZuTABEYKzql6+fM4558RobmUD11OSc5aH89rUGDWxOBNYI8o5S0U0gP1a9Hp9vjYiAqeU0rbtKaWc1+AUJdLdKu26JXvneM4LmZ1zImIFt7LvtTCBIEAsYnRFCg2JEUFXdwNUx9q0mSOn0rSqsWQRtrpXdXfnUqNHZ2YELrEdA9zdc0rUkYSE5kFGd+Y1uUFr7i7EKaVE+NqakziAeWzaiLTGwV1dDV3AY01IihlCYiNi7lwyIx8jIvLmU4EjVz+q6DRJ00XVGpuvll4oMYg6ChHV8pbjGG+9iFse6J6YQylERPRYsjrA1cY39pLYnQG6sxQ3EwbfvN788viBnjePw7xRrm+PMzuEMIya9U5hBw6E5vKMGEaBKb6TBzAMYt5osNF2UHlfRdjczbyZiVmbFr8zqKM5aM45qrmlFBRurVhToGAqnok3p5SwtyhjhD0eroFBa82KJdku1/35+fXL8+vr61aqiuSc85evV2Hct7pIqgtr3c7nNT+eXp+fCQBc0QFZmFKiTE5hnWJFZs9wmYUQwciB3AhQiDImc3ZHTqem8OXnr9dqLtmItxLrjeFTsbZfFvTvPjy/Py2f3j8+nNYmwDllWNVKOCYgdNemPWZ1NUy4plxzRYco9plZa7rvu7uLiKtfr9c//elPROTAz8/PX758mTQzfowpDxJy9Fgz23cP3N1NtEL1essLAZERbX56in1sA1SNTa5OTHqQJR+Fv449wbEmrkNAtbUCs1FM3Xi764jV8aBr4O5qiuiEhm5R9Tkwk/vwMnedk6OXHIUJjz7qm3syY+L5kRkHWLvxntsBhs3MMC/nULwnIiIxa7dzm5A5CrodBvCHh4enp6d1zenhcd/KdS8dEdp6+fjx8dERW7NWNbidHZlQLqUxe6yOeLlaKfvrZiLykdZs3pptWwWoalZKM7PXdMmS1ixZ0sKcE5+SZCaM4RkgdauDC8DMSdWJq5et+VXtZauv18u17ApctcX4QO+2qYIqCeUsMbXLzGZWS2mtEBgTJKYsnBMyemYR7nh7JjE3FvzNj98/fvj0+7/9h2L+57/89PLysu87mKeU3LXtZWECAK0NGN8/Pv7d73//+9/+Zs0C0TBznSC0/qypg1MPqSAAots3YeK0nD3+AaLeuXoTJr0xuVNypvwdj4YHSBgeBAa6DHQIay/4Hw249+1/8+chP3eZwHCM/VM6TmY2veEwnkC3ZUL9i7q6dtPT02DvewUBhh+xcVEME5x1C3NHpfBwT4aCIM4I+NY/x0Oi6wE3781McHeoVVUZGbSd1+XTh4+gtqa8pyizozZs1Vt1YDJBIgFoOFZrIiGLrDmNx2etVhj9EmudSgPcWmvbtumghKnqAGiO27ZdrtdSGgAuyykRM+NMCHtcYZ1i6uYi+1RU7+NVbUTy+Pju+fXqv3y97hsyIbKkxOBuLbhaVDViUwJ3BALv+4p55Mzu7t7i0txUDQj5zUAIAAnXvs4HgVABSdJ6fjj/+PT3//U/w7snKMVbcQLumMi5CLATjkbXRHreDrOTTeB++yLrD7wLhs3ZVkQcVDTQBXuIDyJMFtMkfTki3L/cvW9a/EYLYJQ57qXspoxx+iMku40G4KjOh+rehUA+z/nuGzHEaCw+nao3XzfLcDy9+xLMTd8R8SbyUToZ5FJjlAnH0Gy8x+DuwvGv1yLjB2FmG3N0s/kZJ6GqOiaaJhvB8Em3bZg+5s18dL1ba9HqRUS7tdFvaBm6x9tMH9laQ8nujk59p2kcxCwYa+JkoOfigUTtiNi4WdRt9d1UBt41dsRMS2lMkcT3zkNQyIwb10/PHSNJBuqEq8gUJodGt0TdcGzcMjNyUL+VmRExeGXA9Lzm18RW4MP7p49Pj2XbKgKvCxCq47U23K9eCporkjEG7ZMTgvZ5Pmam1xchUkBVQ8e0nIiTixhi37FqSs0I3cy0GrEhqWoD09Mpff/h43fv359W+d0PnwicUIWR0RA9gL5XRVV9eX59fn6O7dvaHEWgwa52ebmqvgAAYd+E9rDil+dLIlqYnk7r9eHhcckCdr2qMGWBQA0D9qX2CWPzo7mhAVb1UltVq05bqbUZSSLJ5p3i4sJIANECYqYlCzMSGCPlJWVJIU4MWGvb972mZRE6I6F5a42BI8C96YDBoWo7hAWMhJaUo8DZWhkZCk2TjQOK0IuCHcVxY0kdXrDHf0QUlO7hSaf1iMwQEYs3VQWMaVUNq5Ggc+rYPVHbtm1PT0/LsngfMp61IhqCHVpZDQBobJKc4n/T+ZtkwiEOmBbnjaXgObKLb23Zr77cZ0BxZ2juTGTYQTRVlREozKYgA+ohWpoOcvrIYC0LCxOHUqhERDGNSz7cDM870GtPajGsnyXVsnteiEiIqsL8CiLHvooueBOtlbrXttf2+rJ/fbmUok3dgNWpKjx/eX339PDwkM5P7zLpl5ev9foCrZ1PyyIpSzJFkezAZgBOiHhDXGcxb0JAAK01dEgpPT0k8wTApfq+1a0U3duXl9e/fL1UJKdc3dV9Xde0nNtVn+ullvKc6PL6+vHdU3o6pcQ551K1tU5gAEhNnZldsbXGTVNKOeeQs5C3WmvdiwdfGdjz8/PXX76IiAO/XNvPP/8c6SIcHOeUmemQjq5oCtgxsJsyEB3FwH3U0sxM2+3jHMy68fSptwVCrw4y0D3SGEEhnFhcouvrq+BJEqfMIizJAGAv27EgMs5yLgPsEn7UArW7mf7hF+4AdTPIdvdYaDG95BT4qY9vbqCbT/ASHHr+pRQc5mJ6WBEJckVm7hNLZgge7rhHTGYA+Hg+f/fdd999993Pz8E/vJS9NbWZuLp7a16x+v2so0Ku1Zqrg15336sVa0TWfvp6Pp/N4FqKO5pZ2auZuRQRySkxQkI4pfR0WtclPeSMYEJoverPEg69qTlWrXXfXrb6stXX677XWgybBnwAGSaQmxLDklPOCzOJsMiia677te67iKRM6yKnJRM04bDVXkphdUc+nx+f3n/38PieiC5fvvz7H//t8+fPzHw6nWJgd9/3lYUZU2JJ9OnTp3/4hz/87ne/o5zB2zAj7qM1BAPuNZ7dDRI5n9GUHDy0B2cm88bewkHYpjWbv6EDj93wMTeNOx5ivg/dAW+41qP4zbM6ntu3L0Q8Ake/Vec3ktwH10d6dpwb/NU78+aq6ZYM3s4T7uhzb/lwOIQ3J+y3SBLHfwf0EZDmrJcgFF8fH3/8zQ8iZCgiFbF5A0XX5u7owDF6gCiIGsBHIk/cN7UwkYDUWhkQU9JqbgTQAYGtln3fWyvoLnkNTuOmfr3u+16ZOeclpUQesmMAveWOiGXbA/wSd2dqPRghRyPEmLVXhfOiqqU0pOsZ0RDAlIxEZK8l7I8juEJzsw6xIY/ZPrPmo3NhnbzAbxFRfzrM3EplZkmZJRNxXs/v0/Ldf/m73/zhDwAO+4Y5OQBHym/zMbm7KVkDd7DYKOH99wcSpg7JjpcCDFqHOIGexmBnBYEDuNnhjmV08O5OIYmYLqhmv3VVEtOW9/LT5W0SpI2YysxE7tZlTUE1HdwW4xvj+DYSpTkUytD94+1k7u72nVrN/PDNmc8HJExT5gFgor4PitnH3btC3TMb34550KOjURJmbgeTdCxkzoPOP806/TzLEe92UpnJ3obUQZ7gNwrU44XxwIDNQ3ViJTn76MAEGFUkTzwq3poJCIDob9neCHFiy+GbV3xv7Mt1Hv2BWzpqROQ995KcMwABqY+S1VjgeVv3cfPr3zRz+ykNuUTEZVnWlE3q0/m0rGl7edZaEj2QsCIoQm6yQ63QTyxuHZr7uBsppe/ePRKn6v68bZddFaCZl60AsQdfnpMQJ2ZgB9Kc3LWV0rLgD7/9+N//63/57W++Wxi//vzndREBb7q5WhYmAtW25veIyLJIXkopZW/gnlKqf/qLu0t3SBzPupZ6uV7aXqDBmuDT48Pr09PHp/PTsn58Opt7MzRUUldQVTVvTzlHFANOjmTmzXSv9vXl68u2bbsaEEtCxOZgZldi4hA/WLLsTYLgOrGcaw4CPWZmpNbaXpp9/fpuXZd1QYNWO38j9QyFFRvGelT3jgRmdHUHx84uy2Za9m3N8kZy9JDaRBz5xpf7EIgZERr49IgAwU91VwExMySc8S4zxSL1UKJpqkzbvu+Pj48ppZaz9fUnsU2YQnFux4SeEB4EcnhTAD9En2988BvRHZfzNog5BgTfvswsMvgjKS8covyOhxkHuRmQw9BjWANCICAe3r1XZM2IPZZGRLe+67WEO+8jEONPncI7seScj+wUY4/fiHtsjCLjZP/rBrq0Wkp9/vr6uu21GEtelrVV3/b6etnWh7Tt9fnrq7t//Pjdyti218vXX2rd15TX05rTKedzq/p6KWFhRCjnvCxLSmwNLojuKkh5kSd8NGfwtBUt++u+bZu1tJ4kn0hq2UqF5syyLOn0sD6+b4z1YrW1L9tr3a/75fUDfP/4eM7LAMoCIUtESUysBq01CUDRYQ1svFprQEjMjrjv+3a55pwduChdLhcd2wVvKnBPLjplaaZMqkqSogZjZnSkbiJKKeWFRWTzSFaISKbYH6WUmT3IJAYyx9EiluQOeAKwW6sBwIiIkNRt3/dSijuyYIw2BF/avJBwYkSEMRMAFGA7PyaNh9B2Wv5j1jfvQIR3b/4EYw/Y0XvGP5sr4t1BbldxSDbmaMblUkOEEHqRVJiCBmx6MSRZluXTp08//PDDv/zpn+Ljqnsp1TEY2izaEYmREDmJWwsDlfNp369V1d3VAFzMGZz+9d///PT0xJJqrYhsQJ1dve0iWkoDV1I9Jan7+em00hMIAggSsDMjIksSRr9eqpq2ul+315fL875fdtuaGiWNjq6RYp8udTACAbO6X4t7Tqs8PGQh9GytETgDCvGahYAdDEyZU617rSp5Pa+n7777DlD+4+df/vjHf/vHf/zHvbaP330np1NMtosIt5pzXpaUF/nuw8ff/OY364cP4BWs2wWAgJRMjNbcdnCjgKeJdb8v7U+b/MbGHhVnCkM8azvU2X8lhOkUwY7f/O0Wjdx/8s5Qm0dF20dm6B4IVELsPB39tL27JD8I7bTAUSE/mvfg/CaiqgZ+84A4AGjtwO0xTHQAFjoBoR9eCjcMGhzSQvfY0BWfnu6jmVlKCQYGDWe/CAmFoZYIGgERHh4+fvyYUtKYclMI79maqTqzqzqAE7t5IzQyZ0Inia/hnPnG0Q9b2dblwczcTVVbrbchQDNVLaWWqnup7p7z8vD0ZFZidMK1AYC2FgPzrhYJofuNuAgAaJBoxD+rajgRVd1LMXdmTj1+MHdUC/sgEbOqWlOtpqRk4D6HYod3Q0nHeB7QgvEPABBIBJfltCxLdGiY+Q//8Pfwu9+CKRDB6UzewtFkSdD5S8zRzczJ7H5lPNzlh9++NMCjGFn9/aLJObtO95Tp2iYA/lZBdnc+zI7dLCoRBPTbfQJEscsV+SzBE2OAJ3GUGA/q7O7oHoTwfQHSff8tPmRjh0RXrHGXpyTPjxydix9s/rzMowoP6Riqd1jL3OPJkal1feHAvL+FjAkHkvquvOLukshey+W0PD2dFkSsiVvO5boxEpoCMDEAKpGLoFk1V/OqWq3TAxozSk61VkciSZIXZFHVtmvVesopxr6btmYWnXbkW/H+FjoTIRM5ns9nRHRQEVHwve0ovSEZzBxelZzdfU3rXl7AirWGzo7JUcAQHBKmzHnfdwQOOo6ICLctMntXc0JGSoq57DXnJ6MdANiJvWDgXQiwAguCOkBL7nutBp5PK6AJQmKM5dXWGnIGonZireDuzmBmtZkrJhGrtpXn7949LLYvWD+u+NK+cv1qXLKsgoik6dQWb1tTystruVbVYl6jiU2InBF8T7Iu/GFdfpcese3by9evn39+3UASFIcKUCRXXHbPxdGA+UqmmhF+eID/9H36MX2B//hzXk7fNWyv5kgEuLVaU8mnBIy0vaacH7mlBYy5cN23zdrlHb0SYGut7Q0R13WVlFSB24/0Xr7q9afr5/9xff3fX17pT/CU4R9++ze/ffr4IaePDw+njNqulNiUft6J0/r5yyutC/D689fXUv11uxosW4OKjikr0LWYMsu6+vNPp2U1s1orYgPf+8ov8/P5nIRSSqfTCc1F5OPHH7/s+3OFP6tQOvHj+eu5yrtf2r/+myBZYxdSyTtbw4p0JdqlLeYKAOf19PT0JGReyuPjBy27KalwK9UcUVKpNRjBzKBsdVkWcHEDSbTvOyERJpYFmKQ2x2cDNYRr8xMlYFFrbTCjWm3uviRmPlXVqDgySNPmblF1YSTKkjJvuzHQsiyZBdzqvrfWEgsYa92Xh6SAZt4MAIBZHJAl77UCobp5AyJAQkZx95T4aFCOHtcPad60rcgIAc/2MMdAY+eP+9hlExOGiAAg2ABAXRGQaDEQhayNSLntFzCVptAUiSmJgyBqSskJEUgMW2tFNbi8U0pJyN1rawAQixbVxQ0yihoBERDvbX/3cK6l9paJGydJKARIDsKOoOQlu9Xry2J1fViery+A1bBUqCQktIhI3UstRZY8hhrIvarGfHVFUqG2PKT37x8R8fnr68pFRBAu7x9PP/7w4WOuq13ePT3YadHHd/t1C5OVV1kWtBOb+OX6KsKEiFW56EoZYd1hffWcUtrV8pOcH5722l7/4z88Pb/7Hllzq3Wl9psPcn7V674ZYCY7I/+QLJ0//ke5Pr9csyyF5KeLln/+1/Lhw/tP73NegThmJJf1jLTvVVOmBUWtIqWFpLXq16a0w/rADgSWmB+y19rSIv/8rz/z6enxh7/5//zrnz7v9WtaL+oMUZ/pTii6OjH1kbjP5xJRNG6JYG+NgymawFpEM/l8Wq6XrZQiaWFGSajm7pWYgmmyts1gcUoImXBB5NY2ACBywFhl44BEKG6X6pJY0HArsO8M9F7k+w1aU84L76UXjgmVqTmLs6A05sJwxbYhvBN/hKSeqqGp7eDAzgirNQyugpnd0QEKHvExHPGfFhR7puoBtrtFxsLQkXIO0JcSGUImOSrjvLFzo517pwZxgNqaJDerpXbHT0IGcC07AOTEqq20TUTQ5PEB/vt/+7t29f/xf/7jXi6PgtBsV0PKzb0pEGFzAddLQ4CFOL2qwuvnxIyIe1MBBMC6t2KASi9fd5JmCBaD0G4AoA3BCgsuKS1rrkb75l/r/ssOj2v69HSSNdGCKdcsOzNsP/7h559/+vNz+drSi+dXBSM/n+T55RJmCBFJHCRiN0AzVkN0baXVS7VXWfI50bo2hEJK15fnzO9fX1+fnp4c8kM9WWtA9PH7T++fPuhWXi5f/u1f/uUf/8//6/LyRQH/7V8vD++e3n/4VNUfH9/R60uSdD6d1vPy3Y/frY8PqhsvadsaEUhQdAD0CBgRWAYz9B1kUaSTqURoFvVCQLhfut4pj7wpHmZQYUDLwD3waAA+J9ZmvBc72rqQDKp6ok59ahjOxdxdRxsZHEwbAMRaQgff98G9EWIYUykHeH/s4jIPRmRLhK7AwQphO5IGcw8EPx8Jj2XUzbTPuiEAOLq5WlxeDHKHQ+kskmN/bVxV4L2nvnBPThEGiyQaMrMIttai1kPjbhuwo7s6xaiAu3kbkbQ5bCjgpvtlW5YzlP3p6enpfGr78wOpoe6kqra9PF/ysi6ffGkGaK5WGpmRUN1JtwKnnNcFnK9VGckJSpQXW2Eicy+taVU1cCUzU7Ptqo7U1F+vTSSn5QFwqWWPCSNVdSQSTK6snRSEA9hpfYenqq7r35S6gfm6ZmGo+1aqrw/vLn/5CxG54/W6t2aUOtTi4ZTNUQ20GQZxZtxhZnLXWPBrk6GKKrRjb8ZVAN0VWi1W6iJJ941yFgQH/09/+Af7Tz9CKiAC5uAXQCZvGU1LIaK+tUMRDAQZEYn11otGBjIAZEQ3sKi2BPIO+kSJdNIw70MBoES98djPECKx7eSyQfM0J7/GVAkwp1vWF8gmdzDT1mB07boGHbZA98Ss1Qnj7xSaQUR/wBCmJAAwSvOH+os5qAFRYhEkdydmIAqYdTcaQtETAQCtGpnUPIHJrhKdLzgUWBGRkN0D1RO/DCvErYUGRXrbu2tmDmATfSYiSNRaq1V7RX1apHF88VEr0r65q29nig1OsyR8DBOPruuWWYrEm2EAo+Mk7kxe/JcQYQK0SLWDiyOvnfDU2P0yYs5Dx9MRMcjW7pqezBydq3l6t7ZkQDoR414gIkfj1drMMxGmXQaY+FXETGx+Y8kDQldTVUIOj46Is0jgHYDYAlRDs6zoLiLilBOu62lZls557Q5NEZGQGKwhiwiZm2pmASQ356AMRuZAzpgK4brIh4fTOb+n7z+W31z2ff+//uVf9wZfN2jXYqLLiRDTtWzamDAjlGX9AJi/PG+rG2kl5X3TvbW9tEvZjQ0zNauvL+10OhGBm4kIA7oDo/D6noiwGaSKDs68qRf1xWomOuXlh9On99he90vZKjv8v/9ff/zX8x//89/+zcP/8z/ntEBKTvrTT39OsJRt//l5g4u9bp///PNnNYvBm2YKTJKWBnRprRpJXs502bcXVWXGZVnAHQlOOW/b1upl32spBdRUtVZ1h3cfnxzo9PDu9Pj+8f33nz9/NrPT6XQp1RFvdMUApoDoguQsTBgCHJ6ptTat2Njg2YXNzBgP47/WiChcQvRYBCgm7N3d1UiW4cJv0V5UNM0suJ7n0VT1th/qdqJuY60Fcae9kiBPPAh/KDxVPVIOjBfgqLQdjReMevC3vzy+86jpb45wVO3x11GjQsLDPoChobcicVSFiWelvLsoP4wHuzvTzV3NCcM7K4QIh0h9/omZibmZqvahl+OZLMPagvWStogggFnLOee8EEPdybU1FhB3ltOyLsvycD4nWT69+7DvVVWXZX98fPz04eOn9x8elxMBem1a27vHp/g6dat1L/uuVuHehBKRD2O7pNwAlcDd15x//P7794/v3QzT41bb9bI/v74+PL8+v+4AxJKenp4yE6F99+n9dx8eWivXy0sp9S9fftq2y972jx/f53VBt1pB7QJzjx+EGqAHBDHHXmkCUwyeG6KUkhOfz2eWFMZKVQno5pVn/mM9ITzCVG7hRe829NrB/KXZ7WcdxDaqRqSc5Pjx8azNej3TCQ9Qo/uXz7ceZ7QMzCxao0GLYwMlq6rB3tPMkMxJDc0NIMoZOKZeDlc6NSrK9senGRce+M+jRsy04agpU8XebAWYxy+lHBWNxpT7zakdVAAPYcSEuJ/P5++///777y+/fPly2be9mBHZVpuZsEzK+OhRmLdSeuOC3MxsL63WHikYIOfkQIaxmD1+G7P0bOhqXlr1TRvCRmVh0lJR11X4vAq4IIskJLK2tzXl07p+fb1oa4QuIoCUUpr9BHIeJK9g28acT+tCtDICIyC5tcrMEJMcDoGdiTnhL//2ZzP7m7/9w7IsAFBK+fr16+XlFRHBvbbiQKWU6/W1lNZKfTRdn57eP70DAURcloXX1eoeceC81WiGEW/J2xbH7T3f9j0Qwd+2haf5+0Z8bz/SN0eiQYV9jL7eHhYwaOkpEkWiAD4ctWgOZmMv8MVfb1ekpgNlQiklb3Wqlar2LVBzpQQaaKyJG6N09tZrvJF5PJRXANDd3l7FCN6+9Tv4Ztrw4KFCQ9+06xHRVeNm9iHG1hDxdDq9vGzMhQXFhRyQoLb9crmsCxKgW/NaySpFwwwaurm1NWXCvho0SjbX687MTLfTU3d0DufeakeEuvvlutdmasXM9qaq6gBEUpIzc61VhJiZ+i40Df6Un375XLfdQZckOYlrNWvoJiJh6iJaJqIIUGLth5mhd4Mc1YvZgT0KDyLOhYd4mAg1962Uuu/LsuS8NDcSBqM//fyX30DPrMCs90zcAYBzrJANUbA5B9qnUN2DkHcClILbvT+jGSc44L1xp4OhmzJylJajXCF2qHl3N4eG+RSJjnA+RibQ5W2+E49G+WBv8f5L52+O4t1HYMxC0G+C+msuIE512gcf4OqhBAeA93jNdGy20CMhjId+K7v0NBKmmZmiAvd7kt76NRwEPvHu6fnsAImMG+g9lewnN10UAIA50MAGtDa/2AK7fNDbEbfdjm9mNUbJgcLKxxd1HpGxlQ7HOJbTbYBy3lNEFMlEVLSAec5ZwfFgI44RITGmlMxaszYy2GSt+jdbt81skCS5Uzc6zaxqW+SGyou6nSGAe0esauA3Ydz0mC4DAMw5i0jcQDMToczihLrtpi2JLIC7Wk4Lm7G5OqjF/gtigFVwEVzQV8H353WVEzytrbXHhS+l/fy8/fsvzz+91JeXL8DymJdaMgM3syxP6OcvX54L+Iu/ePP92q7bftnKrhUTmdj1eq38sOQaHbnY8EqIKfGaskgWYubTkjLnjGZMrZbPbptXx4RPKT8tiz8Y05Lbv//p37b/7X//47//8nw+r9/98Olv/u73V3l3+fz69fn1y/NLVX9+eX19vS5LArcshOiJiJFIhDiBJM5LwtZaA8OHh4eHh5Obuevj6bxkAQC0vllbGGutX79+/fPP19fL9h9/+bK3fz4/vf/ldX9toAopLYRojIAY228MFAEBjQgX4USMvRBQ3UCiOmtuCOiAY5n91C9EH1xkyMxohuSEfT9eYtnHGMAs5TLzJPIdEanHn0RENDYuNPeOyO9Wwd3M9n2vtWaSox5Bh6rfGC/MrMXKRea/NvB3tI9TfeZ/59u+jTngGP2MF9zb5RlzIOIxxPHDlL+ZkfSTxzQD6xt6fpp1v81G9slpRIybOh2wHejH+++Hzbmdo2O4cHBIKZ0QLvvV3bXWEptygCjW++5VEBZhQDMCQlhFFmYzO5/6ar6clmVZQGHbtvxgp7ycTychttadHwuel4WZnXDf91r3METrujK2qF9YbKwiSswiwqYL4taq1kYLv3t8onfi7pdrfTxlfTrX8uG6l9fr1tSJ5OnpycxaK+npxIyXl+cv7KrrK+2tta+ffwGwp6envCZhYkEHSgDI4sDWmcZvpYR56+LmC3EBOJ/PTfG05nXNAJCyyFYixj2Kirt344zHsQ3riZlE41ZxDLmZWa369Hi+DYUeMKKId8IzNeggupMT6s4lH/x+R2jfH7n/7IQ2uIVD79zYoHe9YnMm9i5HpyY/6gsdXkP7bPpQuE//jvpyFMipd3CYlTpGaW/ec9TB4298Dr0QzQrmjInXdf3+++//63/lquWX56/1l+f1tBj418tu1q7bZclrLaXWygCEaGaCtLtra0XbXmrTIINlIgTiZiNQQiQEJwQHbRYbx4igqRt6cmTAy9aE60OxqmiUkNjQCT1LUmnkUK5b3UtKWZiLGqFH6QAAmJCIiYGRHh4/LcuyLGlJIoQsGPit/foKAERoCO+f3r17fCqtllJ++Xz5+vX593+HCLy9Xna119dXEX44Lb8QaqkKLptsiNXMtC7Ebd+I3+clSzAjqLZSSQA7y2jssiKI7pYdhfyuoOb+jcXtNfu7INLH/PfhPZFPwv/khUTHr57h4JQNNHe4JaXujmYw8GXTQfCIW4LEExxm7yL0kTwaS86IMPjL3N0MVNVVPeGUWwd3tdvucRzokUPOfK/db7zGXXXn+Nc3cg4jIn2T8h1V7Fs1sc4v2plAwcBjhnyRWvd1XXJet73+8uX5Wq6pZhbHzuHab1xzIFPpJJZoFsbDC1H02bDVlFIsczdHMHTDpq01a61dr1tpRiSl6l6+MDPn1Forrbaq6hbYgRnQArggsSAABN3Tf/z0l1IKgZ2WfDqvS5IlcRbOOatGHa23fNmYiErU9ZKwMXNQ9qAhWCk+Gs3TezpFf818ToQxAyIQcJLPX35BZhCu1RT8fH7EJQV4B8zcDEc2GK1aAI9unbv3PRQAPpD5NirFsTgAhW9ZGeC3dXAHx8NK4VFBuA0T4izKH6wrDbMvSMe3TSEhP26GOXzdLDPfJ37T9eBc4nKsdPza7MxsuHWnEAHPIWeBoynAWNTSGViOKjCbEFO2Z0D419TqaGfmSU6VmcXESbxy+M28bwdGo/lhHlvRiAg6F6jN11H3jp4PR4Qabf04v7g781MGjn3M1SIvsoG1HTeRVCP1CqqbXnmNvuohEHRVcHdDMiQed99sLCB3t8E7FzcsLkctOoSIyNrQHU379o3+7MctJgZwts4rRghgYxKTiOY0tI9H1x+IA0FfvO7q1tTNCElVCSD22oXzZmZEyimty1Jba7Waaj6diROVZkiC0Bd3EqE7AxLiSSCjsje2IibixKSL2Pd//zfP1+3Hrf5+//GPf/78j//2p19eGtZm+MisWggpNeNyVUd4/fLV1XX3vWltzRiTsDXYC9RFtIk1bc2ZeUksOZ/OD9paAWgKaNyQlQQAGlJeqVpr+0a75fO6pHVZTuvp6fHvn0r9x3/84/Pnf3mm/Hz++fXHrzsywWt9fr28vr7GlMrj6d3D+0fydkrMfUspNGBiZMnEiTgtSxKRx/PD6byElVyX9O7pobVyysv5tCDiui7o8PLy8u9/2V4u13/7j5/+xz/96cvL86WY01LNOC3uDh7TC45o5IyBmmKURCzzYUbF8W1jHAAIkQSPWjBBCkQwsQeJWBILUzCt11pVlaRHkTFucTRkRAErUBospoff80yHbhp+CDKmxbTDC9H4QEbqo2B2tBRHtT2a1Dd256jgb36Y5+OH13yDuaEDHojpEiEMqEZkda01AOl6bbdg2u12/tMuz8hbD9wYMLpMB/N64OHqfm5U70TAQSjBQkU7xyYiAhIjILIg5Uw54GruArAGFQY5Ojw8nLMsZuZacUciWhkZAbSV11e9XjfAxLIuyyKpn3OzUYKFNQkwcXV319qsubsLcc75vJ62WgRpkaSA1lozyBmXvL5++YWw82XIyqucAAhIUpJaa3EXrIyMGU/fvT+dTvv+6fPnz5ftNbPEk2HALMkcjRE5uYNqLFoF0wqQHMGaam2u5uTeSACJaF2X61ZzzqfTKRF5kmutze5q/91l4O2JTHk8PiBzJ+jezg5V7SEtROS9IBjpBrLbjZvUrCVmR6PYsoyIAA7kfV8thpG/PwHoHIEB8wZnZpEo2fVJeyIiZiRxhAqtN1iAERgdAXqU7Af+s5uIHpRudnvMzOmmm0elmJc8nez/5OVvIKMHK3S8b1Mp4g06iB9hdGzWdf27P/z+sr3+yx//+OX5KzE+8Lma76Vd940ASyla6rquTIDmnpILqapG8keIWSSlAH5BIzA1cGaKoouZIRuRIToyRKkAiJylqdcGW9Vtt714TuAOBbTZ9vXLl5//8tPl5TUiaFCzWpjQo+4LxkwimBiZ+fG85JzXGLkVYsagFaxrIqKUEiB++PAh5/x8ed330r7uP/3yGQYRlwKi2/unJ0R6eXm5XC7eGrTqlYWJ3cH165cvj0/nx8ffLsvS6t6uG5B1vgiHLoQOQLelzW8eFoyC/fFPYXWjoozHGaHADR/wTR5zfADwa73BN8Z26l3/YGSGYwE9DrH0Q1ltSsgox/dr6AfRW+sAEIlZsEdxpi113gTC+4SVQlPGqdjEo/x6+/4uXDwILR5JYt60Uuf9nLZ9Xrgd7t6b90+j1J+FIRJijJgCIFGQCanWlGSVJeW811JNc6KcpWuPB5CC3dUdACm2rSsgOMTsTFgGMtJYZg/WCfVrq1VLKe6wldaapYVKaZfLlZnl9Fhr3fd9q83MkEgkU5J1zXXbW2sp88NpRcTXy/Xy/PL1tWor7l7WrKr4+LBmicVgrq3WvbXWWjFrKaWUUg3oaQN3d4Tg24BAFgAA9E3gGp1+Q6LeLIpskIicyN0buhL8cn3xzKe8PO/7E8LDx/fHqblo7ET7q5USfar5jAJnCUMgwck6J1I09n3IRV8tGE9Nbc4Kmt9nUGAOgxDmGJbM96D32Ug0NxpqcrDXaA4id6KlFkQPMKIdftukRO87wO9/i4hjsngGSwAQm5OO/qJ7ilF26W87tPrfxFrH30zJn3+dcdFU88iVjgCoQ0gGgTs95G5xFXcZ3NGhCAwglqoGdmg6165XQZA1Togc3EG/aSAQYEoJIuqK3xCllLSWOLiDwaBEVG0TTRr0BhAjQymhZB1Lq8ft4DdN4Pm6pe+HuzDxZjgcsB1s6KGY6rMnq6p8Dzkbkk3gzkwkDACtxIQDS07MXEi7yFK/rfGpAK8SUWlVVW+VeICYAW3NSimRdTNClsTMq8iOuEgyIHff1dAMzQWJCRyFmUV4Jc0EK/tKsJCehSR8gu8n0nzm7z+++5vffPr9bz78H//jn/7l369XvZIICYH75fJarldIfN02b9aqORIJUfQQzBHB1dRbRLFLSjmvj+eHd+/etVJrrSXWkakFczcRFSiIiqSLQW4mbbNiTYXXp48//PZfvl6f1dL58V9frv/0//3nvC6rptaaKrjbp/fvP/7ut48r+/6KbWPsCBZGAs6YFmQxXh8fH095SUmWsNfumYkJihUAEs5q9fLyGnWNdx+e3n/8IOvp8/PldVNauLhsz9daWnMwVBI27AB0AmOiKLkJxXAUYEqZcN/brfkA6u5oCkQiEjvZcgo+QwhWDut77SszmWt0gbQ19Nl1JxpVXkTMORORV9WxkzNSR3SYHHF0YFLh8QplnBaEmf2AO+3iF1HsrXrdlcfMCP9qMDqPOTUF/rrrfeObp7Zy6pts3D0aLQcPcRfKwLTj30TPR9N2tH3eA3Q9fmMPi4dp62o1QRFAjlBNW2s8JhVS4iVnYiYkRhIkImIiIVklMxOTC1Ja5ZTOqhXME/OS0rIkcti2UrdLJ3xzIDdBZHBGXIVPOa3Lgoi11r2U0iqhLzkHn+ddWc0BzMGcETMLEiQhQN627XK51H3Hsz0tWd1qLde91KoOxJyA+Hr9iohoSoxZltPDIozn8/l5T5Ko1nfLKYtItRb1e2FyIGJpIfYA5k4OOvpL1hq4QkXFtqT0cF4TC9K2nJbTeZVEW6/c3QWGOOIvGN0zv38peNR/b0Jw2AI4RjRjRdYbMbgd8yiihMiUENwJ8UYxQX4QpPlm7yU+BPDjd2tgJZu11tCtqWGKnUigyGQEgKbmB27tNzJ/c3xvqzZ3bYqjPM8fprOHaNQc/mr3SLnjO+M10SXz4PH+lFIMV/NYYhZvEMG/+f1v/st//YfL9vr8utfme80OUErRupftqqpM0MzN7HQ6qVn8z8CBiTmAT8BEDkYtboL/35T9V7PsSpYmiC3h7gAitjjiapE6s7KyqqwVh02akU/9SJvfSCN/AN/5QDMae3qGojkz1dM1JbMyq1PcvOqILQKAu6+1+LDcEYh9bnYPYdeu7RMBIAD3pcW3CE1NxSTFZOATXLRaRalGBGKB+bRUgkcQKXm+GplZQHOe4dXrt3ePD4njOB5KlbmsDJbioCAAgESBOAT2cpQYYAh4mOJhTERgolUKql0fppTS4XAw4pQSM79+/bqu+XGZ53lZVx/2o8iBiGLgF89u5/k0z/OyLJxCYKLAqoUp1lLyshDoYRzGmBCMuwlBBm0aYU9Zq11kdJ/s736/+lcXGS3qfYD7je5D7duew3ceWw5kV1DtD7T/1p+MfZDGzkHa0157GMTWGeWZQ6/GF/GuJCdFEFHoJaZN1xgZCBjCBb4gQ4Pce7Ise0reP8n+hM3i2v9zE+DftRIXTLHXEfjOBAuPNqGhKBCDpzdyzeM4rmvOpYSQbm4OcYwhBIMKQBsGp6kCSFVEtvlULUUVYeZIaBAAgYgVoHa73MxArYpWlbVUouD2rSmWUue8EgaVJec8z/O8FlWlwClKjHHN4vw7GBkKiD4+5sfHgohIwWt/oRc1xBg5EGqMkZd5XtdVpexTumbmqRdE1N4NRYiGwIZGSF2tk4HhGTu3mqGZInz6/e+998lHv/zlL3/71R+ur24Ph8PNNDz/9CNvTPV4H5iBqVQfTuZuAfkPAZi26omNWdrIBAIERJXixSAeR9e2ldBroy4cJKdzu5xt7p9vgbPtFczMyfyCerQVJbrN38hMzrgyHk0L2NwfaBCv5ghjfvoF3W4Jw3eOPV80wkaU3m24ZwREJKQnPYR2zud/BxDoE87qirT90NYOurOOXD8Hol2LR5ck+/hmWwH/Wjr6bVNsvfJz54M6JHSvN+jBl71kZCTdsXHLyG0MvNtL7ZBcuiFqwl4m7syCNiP1ophteyTrdrPufO7Nm0UfS+R2mFsnIqrmvZF8Dm8BADBHoqra3IC+K2qKQEa1UZkn3EWEQu/K7KEzn2pMLt4BCZAa6qkhYuJA0OTvuq4ze1KTTSQyTjytx6uUC6VQxWrl4TDNaznlVcwAWRHQFIqEoJFoYEwMiSCBJgI0WNbTzTAo8VrnIY3Xn7//7BA+fP7F/+NvZ9BTYgA75VVLeVSKhwOlMNQqzDHEKAQKUhRDlMcVzGStGcyiAhWEBTTgcRwXn/W7PK6qsERLKYSwDCcWjbUS8gAxGEI1EAWAcDjkEN/O8zThSShXuB7G0yqRI3AMCNcvPrh+/lLmt6rAqkjADK2Cj1nUcllXQ0KSYgBliJpSTJEhERiDqkkSjVJsmTMzDkNa82NIo5mUUnIV4jilqRi/fZjNrGUVwBAYUBEQTQg4MhOaWvUymw1OtpG6toZWUkFitSpazNrEalTTWqxLEBcWZoYqqMahDxRqFqpqtwYale4yLW78XuS7tsRz7fM8taHbOctwiLY79pLCJaKZ4W5iBOIFK22C6Ql//TEBtJ18lqqXv35O1DsmpGOaNevHS7sDM1sXEbYLPG2/5esUQ9zLR+wacYtzq6r1r7x8xnqflS+RtfObzuDQYEhjjFGKOPa3qBKgGnjJMFREIUghUuQRTEthyaupoNUxTMfDwY42zzMajMMUrpCZGcnhgggwIJFpLTWXXHKGNmSZoGoV0SxSFFTJwAxFRUqVUgmQUImYUzTR2eY6r7PCIVqKaQh8SEkMwFABVbVG7HE0vro+MtKynKRmAjyMEx2mNA5AUGs1wpBiETUgZiLFGpWVqmgIASEws6jWXCzXilZAgfDm6nqNK6U0TYOj7+aHE6ionltD7TKHvKefPeU0SjBAq53e9m7bFn0gRLZz8wls5h0AOFg6mJK1ngkjRAoAZfcAm+voxckXUQMfh9SYjAlgG/0sIkrB7ZtuWZorWxHZg5eeCZ53YzY2r5Wot77tmAt2dvwTtjLb5mtd+IT7tX1y7RNfdDtn62ncLGkXF1WWFy9v/uLPfm4mv/yn375+81Ckikgd+HRapGSiYFVKrYyEBjnntixoxIhoKsVr20TdGwIDqFo6lxGYjxBSkCoNQDoQhpLz6/k0372+fx2nZGDFJJPQ6XQapuPLTz4cpsOru3ucF04RCNUCoCEaIxGDo7Qch0BkVpZZVxA1k8AcYzxMQ0rj9dUVAKy5nk4Pv/3Nb+Z5Zo6Hw7GUsswzILJqDCEwx5Tef/l8OZ0eTqe2X4Sn0wnMDtMYmaVULzxh4lJXRjJTMSMEpA3aEJ6gLuOuvOKJBIZmDp5pgAG9l+t8ubuCuM8/fIdDiJcZRT8Z1Y3gS2HODIhtKXdicx/XO5ss/Z8hBGQ25xBQZkamjrn9XRakwH6+WVNk7kN9d7zRNpWx527oprb11Pcm2DfjCt7x/b6Tv1pY8JIFEBGQ2xQhPwiHaXz27NnD/aOuImpsTAwhYIw8JFJEAWhZXLOqhuhJRiyiLIaoBciKqAIzEiQTn4fbBmtpValuowuFGBUFQIEQuKrc389rzuu65lwVLAQTKVnhbp7HmFJKa7Xl7tGrcykMpS7IxK6zhpTGIU3jMI2EQKYhEJiHlJuMCv7uneoEjLQN0vQq4Q0s3VPKjujWGgeIDACZOPD3f/rjl++/d3j27K/++q9fvHjx8ccff/bZZy9+9CMndBB1Wam947RtlolLUZ80LiKJWli2xdUdTMtD2OSQSNwovG3ZRVTaUzIAYFQRzlEwgi0X3cmoE+QWc3G6AwA4h2F30+96RQt1GiNzKCcAz0a2Us9zQOfM4HrxkPsTLoZE744tZb2l4xpH6NNYxp7gbec9+iUNUtFtwvMjXXCW9Y5fM3AI383x2zy4zYY0s32INvhzeFWbXyMiWqvGlsEzz++dd333kh5dQETEhpXfO2g3PxURkQnACM4M788hpqDnIU7qSJJ2hu71GSkqYFDNsIdTN4ewbn1HqlpUyM5pjfYMzDlnEUGf+lwrtCCKIoZNMhOytLEhtEXI2osjS2lIOZ5cFtNSCoXgMGtGSLuSXEb0ijaIwcM5VgU6IBIAILDHp8cY0zDRLIF4GtPVYSQiAyQSjTxOEyKKlNrCGv7iirVyoEQYCEBNpYhaQJkijykYElkpdQ48/vCj559/8Pz3669+859+xwjHIT+/PtpUB6bE44tnN6WUFMc0jWKaS6mmYvX119lxkBFxGCYf6jCOY4pxXfUU8j3z6TRLfQRBYw4HAqmkMAa8TokxCARN4fW6PKxZQCCgqqYwDIShAKSEAKKFmKfjwUwe7u4PLENMgarluixLEVTkU5aHU34QHoZ7ZgoEKTCSBcRpDIdpGAKPQ7g6jCkFME0JieI4DtLCdW3y9WGahqtn98tvWRDAHHHTaw8QkbSmwCmEEIKXTYlIhbMWNDMAMrOWUnmiGk23Gdnk6QIHvwTbchv29ADoYw+23utGKmaOid/O6zUXzhoiYr1TfOOjWqv2jOJZmnyXF4ffFXmBd7TsWVTtRNL+ZPsub3D/VO3h3ZBVEZGcMx7GTSASUVHFXmvX3l3PgF3YfWbadQ/u4Tr8TBHB7oU69kETc9VlBYK3SfhTARIGIwtIKcSZSFW0SlFjIgUkA0aqsCIFASaKISJjTAEkoOQSGJlgjGE4pufXBwROMWqUVoWjaiIitaiZ99uAB+0IAIqLF+2TWovWWqVUBgtIwzCsJYuCiaLa1eEQiB7vHpdleVjmaZrG6ThNQ0ojhQQAO1AW9Q6rNc9SFzAZOeAQgRu2+5AChxCG8bTMYoQUVNA7VRGxEgMyGtSc54f7WgpYwWE4ENk4IOLxeGRmr4gupaQQRXErwThTyDsq84I2NrSVbub1bMDZzu7WYctwOLJj05HwlNi0tQgygtLTn0NodoML5HOrnqqaAQITBqbIzOCuJVJAMqxNcQioCvfn3F5nzx3vfrLRrQM2YmfDFu6BLWq4cZPXpO35a1tVZ7Fz5fNmKCBiKdm+y4DYzLKLYBMisSHa+x88/7M///n17c0fvnr19Tdvvvr61bffvv2qfo1qMQylFKmZwgAApooAMQQi4kjMXGuVkmub1Ume9/DuCcMQPI5DhoiKDErMPIQwBF4zSC6ZTANSSkNEC2GkIKUy2sB0nEYzizFyDCIi0HDLCRRACQ1MVaqsZVVVETMZYrq9vb06TLfXN4g4pkgUar3XKqe390teP/rk+yGEcRxFhJjN7DAOwDQM8fnN9d3NgVAIg5vXd4HrUo/TIQVOKTy7vgkxmYh3hBi4B4ygG+wFEMBGqRsBACCIgjXwOd9wgBYA1j6OzAnaaZGI0Hbp5E1iv5N1QGhztC9orP8EXNI/qsKu2/As3nv0wUvyaHetVw0CtOCjdQgoNx8DcRFHLDyja7itaBe2VgBARzXccfT5D9uppJ3KIKIdrfYxFXCpdzZlt3+pJ2prnzPfbmhmaFBrZSRmjyDX6+vjz372k6+++qqUUoogSa2qNUPiELE0iOnzowpCMPTaOzFgQxVTrGTKxsknv5tHD8ynH5cihlCqEAYMwbIAIYW4nE6n1YdwmLqRiSQEYKoGuYrACgAgFRFT5BgCEoRAQ+QxxeNhPBwO0zSN4wCmIBXJBh0AQMQHs5nXsvlRTUlBe68WGXV/7DzZAjuQGBIjoiIQUxgSpkAffvQv/vX4vZ/95Pr2Jo0jpARE0KoyjREdGWbz3q1rZUQ0VTBBUFX3I6z5e9ZjE462jQimfcQxYq/m66sPfS9k283WWw49kajnqN9+bidTqxnZpg46PYNKg0x3P6X/lvccNsrc/AJqleN7E8jesYjOTApAHd8LdqlCU/XwrV3aTgBgembSjS+63sG9waMdC20v+fu15zO158CsO4ob5z4xnzxxsU+tIWJvFGzRLKLdGFwPG3iuc3vJ/WFm1OeqbU8cNOxzlxQYLgc3tYXort2e51VVci6lOGg3IgKQqooo9ab9ru+7GjZCYDAQkVYuDiDmAAdghAYCqMzBR1awoe8OM4udi3pLqS1nyoEQoAMkcGDVKiLIFEIIKYppVeVt4AzRft3ZGkJpQmTiSOyobaaAgZksMIFgCGmYxsNhqeUxEgaEQMzoZ9GBSLVaKVALAXIkjsn3UurJf9RdNSJlgkDIiPPpgUO4vblF4lNeifTZzfHP//Tjt9/8ri7w7Iq//+kLtps8P46BGSWgjKMcb0gU1koUYgjHH78caq3rWsyMKUqppRRmCCQSsKQxH3FZQinFoQW/JCUKA8vzw/HF9a1WfTuvc10qp8f1vkKOBFjyszhdpymfljuyWkpZZ4tMsq6PZX54e/PimJdHAZU1L2s2jGE4xhBSAlzGnJUAYUwENC+PUtfDkPL69e3N9ZTiOPDts+sppWEgZqNDyaIiJSYOonPJa8mRBwAyUzQDAjPxJjFEG2IYhjgdhsOQAmFV0cZpZG3m9VnborWS4BBCCFxKm67KzK2Gm3ahYj1nq0SkS8Y9xhogtMzYFqF5kotovCpqwRAxcpAYN3eUDNYu6YI/E1fagN46ZyHu4mmXyfZNreJFwOkcUd6SeE8u2STAE3Vetp5h95cBaq0+01xEtni5quLuJ5hZSDejFrpZgNiK+5nZ7BxmOy9yF6AxxpaY7Q/DzBxCK8fV8wA9P2cIESoUqWBiooiBAAMREiEjMioqkHGgMI7B0KSaaEQ0LQwxpUhEALbOp1przcVxn125AiEzDyFyDIa0llxK8Uygr7O3keQhjzEMw4CBH06PbFZEQe14PNwer19T+ParUsRyzrXozKdhOlxfX0/jMaSAiNXUTEJgA3Gc3uNxoopuYSOagLUGCbBpGEXNMAqZiBVQ1cqIFVWtrstpeTyVdWbTUEsJvDBX02kcFRskOoKO05RFyOAdEjoP5dsv8o4CbZvb6Rvk+yLSpiB2OAxNw4j98H1uioZJTcGgOzwkVkB4aKH8s5PWGEeaQ+hqgIgIAxFUlXouxaEm8IErqm6elZk3ujAF6R8/cQRE6o5lFNp4AXjyJNv77iO721ohom/cE9sXd1lBvLSGtxXeGwqucbZ463YHRAyBlvkhxOHjjz+8uX3+2Zv7P3z16re/++KX//DrmsvjwymE9ObNXVkzqK0LBWpPGEKIiRFxdeBlrSGEFELDpG2VO8BGZqboz5BFgAgiQ0A0Bg18NcYPXj77+IPb2+MYIta5gPzy2zdvHu7eTNM0pmhmYpqGZCBmolrB7RPXznUlxBQDDYGIDuP48uWLF8+eX19f5VqGEJk5p0EPMk1TSokInj27maZhGKL3sAxjVNUUCIc4Bl4JgezqONzc3AwBH96ewhBCpNvr4/NnN4BW1iXdXgEIypYZbFVkYDtgie5TbWu+yUzc7EgAE3X7RxzUqu8j7IyfPTfRrjgFLt3DZkQZKGID/8BezqaqKmZKprijCkQEZNrAPJ2XocWXWw8nIiA5vEHEaHCGKwSANIysqqLSMB3MsVWY+Qw6fM7MPz32LL8R82bBI1KMQXdJSOy5vv0n+/WxvuBPvgpd1O9/FADUtFZFJo5sUksp6frZ93/4g7/6q7/y0k0z5hATBkbJeYEweELJkQC91amCwyWipmBmzBzI03BsjoRkpoampmq5Ss05xmHJOUZM4JkDAsRStagaIAbmZg8zBgai4zSt63KaZzAZYxqG6DAkL99/ETnEyCnQFMM0Ta2xC9BM1ZCZhyGqcsMhd7AraMpRXFoiIKKAMZL1cICLxlqFiIzN7WwgIAQLdPP8GZjCcXp5ewUxQikQA9R63hci4AiqgVqnGvrsUARE5I6ib7WnxTrLaJ8Rgf0TtDPYsvfDwC7j574AIaFtWT3byhM3vtiSui7AxSMaPXRCIbT76ZmXccP0UqMQTLWh2HQ7beMvR/FojYieI+xu8FkO+INxoB6w4xC85g1FfH02xj8TuULLLHQBwg205Rza2N7x/KM7/tqr2s2MNOt1SfDU5Wt8nZKZ7L9yk6k1/Hgdl9tt4zguopvw8pJlXykRoTR4CtqfsFZlBqLAbMxVFYgCARMwAplaTKF3zlit1SuCOAbr4Io+dsLMPHQbMQJALm2KV60VMfrDoOcaAQBcHqKIZGi5aateYtdCp2Maaq1qFRFdWJxOp2EYlmUxLz0pxRvhmKOv+DhOtZZlWQAgDee5lAAaUgwhbCzX1i6lksWqGIGqBoMUk9q6JZQvTH/EUooSjAFBpZrGIYU0UlxUSorHZ9dXpaxv7x8UIA7j+jjXdWHTcZhCigrmBvEwDKjGzMMwmCkAMfOaT4cxHqYrZACpHMLz2xtErKo/+/yDX73kX/+DvLie3r+9Ri2PVpm05jxECFRQ5ykMU0rAFDiNkQBSraHWiooAyWxQVVArxQoDTSNzz17Wep2uH1+/pjWPzDlnCjEehzwvGQCpQoVjgPeP6fkwPYvTQ13/kOjV6fHlzeH0+CCPb8Lt9e0xkVYDWU6PWoWQjchMSpHT6cTxWIoWybXWJVAgO0zHaUhDCutWscnMAAEAAElEQVSynE6nq0MCgFOMV4eROUR4OBxvN38+pVRzee/DW63/JMXENJiFJs6RCRo2Qi2VyadyM/OyroEcp75wikOa8tBs7jVnDqGUkmvxMi3yAS+IHnFwy8w/YWbsRQReY+aNZO5AImKzgGptHS+ItWUOz6DnGxvnnGVMRFRKQTVi7vEyhB2OLjMDca7VumvnImQTFlvLk3Z4jya7uym5l0FbtOiJppc+lPnJJ15dxsxMHY0jUIxxnufrw7DZ9y5Ycs4S0dnc163dR5SZc86B0hCTv7tXc0zTdHfnc8yVmYeUwM7VDVsYaxN8oiBgWGvO2YZhTEOKAdUih5wzmgYOgchEQStAVLCqGlFD4jAkcjphXh9mBICG2KkP88NymnPOUnJKaRzHafDcnYtXuj5eGQIFLqVoFUQMRGutQ/SAy+oOpCuFIURo4tFAk5QcwvD85vbFze2bu2/v7u7u7+91hdOyPjw8HA/XV1dXKaWUUhwiAEhVt3/NLE0BgwfjVEzFm0mQWWFes2oxY0aCQAQMajWX+fHh9ddfne4fQKsBpsMEZqY1xCSqITAQer7lzZs3AtM2h/BMGK7hcA/41uiHI1oVM8/H1lozwOC4fKpKBMwcOHoXRJtvgUFEEHleFo6TaXUlRUQxMAcUw6rCHCkw5oJIXUCVWuswDESk6g4SAEDOOVeJMap61Ykb661zHq0CIQaPy14UyIGpiLhy3DYXd6kM7QWcOxM2bIsALeWCqrArD3c9jRtmJOJFrm+zdDdO3AxoAGCmvRralJTXdGwPtp1vJjFy1aLZpjHF918Mw3B9ff389vnjw0MKMaXhn3716/fff8leO2MyDAMzOvxVKcVMx8MYQri+vp6myXyWt7ZM0RTTsq6Apgg5syoEjgwMivez5Lq+//y9P/3pjz//5L33nl+ByuvXbxH07X//Py6PJ5OCZschAVFIPuVZVcVAGMFMtBYKUURSCMfj4fp4NY5jSmmIMRDG6ZBzvr9bOIT54ZHQcqnTNPniiMg4jsysVYiAEU7rcn08DF65kyKhvXx+GymeTqePP/zg5uooUkFqujpArSDFQ1pnp8wQrA2QtJ6G3VgA9919zRPzwJZsG+oaxIO5W47iiRSF3pOC7zTQ+nwOUDWRhvFIBHsY+u6yEhGFAKrq1Ul9jrzWythCYwHJALx/iWHnYnlNnVqD/iutNlhFCLiqqp5VBqiniZqzF0LwVPiO/Jq16uf3uhjoKpJgZ9Rux6Z09q7dds/NLNbele0/vRnZusN5osApMCKCGoaQiKHkT7/32f/6f/Ov/0//x//zNE3EQURyXqbpuZT1OF0LcS7LcirVIIQQAyEaiCpiFTOrQUwDDUhCVksFVZFiZg4GQURiKDn7fokCsusuGo+HkTIAALVUR1X1pQA2DpggMIYUKKVwfTweDodpTETEaERwGNI0JWYqpQQmZGJgETElZAgURSTbuXag6WxCAKimJqJmXiPqTiwRMbACDMOgbT4qrSXfHt7nOEDNkCKYQlmBGUwNDJFrqQAQEGGdfUO3evVGwQCb1ArkCcwWjQXPFQU2NcMzKG6bhNmCLU3oibgoICJS7aO8TKAn6sEMyUQqAnAMgKi1IoBjlZBnJbHdC9xtcJbZBR08vllzbm1BXbBCk/Z71gTskDNnr6xrwPavnJ3aa615XbcgKXbra3+tqnIflbfpHTh7jBV6RN5l/pPG2j2XnQm+zcpu1p3zUwgMvXnHL8k5+8jTfdthKSX09IVuHTjuAW49hPuAzZMw8Par2juYN6sUEcFMRQBa5SR0KCFkCohuFjvheo0fegc/lvNcUdpQIuK6rkRmwMAuQRrFVL2QyF4OSESGDQvEg892gcnTPXXR/sDsY4sAwEIr/AJU3wkxYbnITrS6vUYJ7mC2ozVeynmPAcAQBAw9SE5BQaqIAMYh0TjEwGYqNWvNUrLPLTUEVEUQ0KziaX90KEwiImBEBjNREDXmCMb+WIokqiwKTIzwybOX3//go3/6m9/ZItfD8fRwx8ZBWa1IqTlXySXGgUIahgGHIFCZ4zhFhsFhxBz4QasQHQxkXdf58SGXjIhp4KHA7fP3NK9a8phiBVseHpUhDXz3m29hhU/fjz/75IMPx+tbHuC9m/9Z8u9o/e3v304RjiQvr2INR9IiPEQQEUFgjBPH6aA8DMPv3iym2ayYAVpMMRwPdDjE0+MyDmNeHh4eX9dyur29PkxYZR4ZAdRMVKXUUusahsH7nonFxMAE1ZAxEXFAs1zKuhIGIgxsBtZHDCmQGJKiK1TmAIhaCnjRRR/x5OyRa40iyS46oDrU4jle60awM7CqiqhssPW1MTB0b9AZJ3EwFgC4zPy1o5QCQLVuw9xEejoQL4QZPJE47/5zf/4TufPuHf7YsTdtbSc09vreX5x3dU37Q1VhhzJ65m5HIeuOKLVCcSWUENht4CapWh63oTh5cvjih8yCy02r+xQwqClZDBhipBiATMUqAnl3ueiyLOtpVpG6ZhMFgEOKUqygmSTP06aUKIRxHGsfd+dF44E4hVhzAeV1XXPOtdaBAqCJqp+fa2XEyByYmTkSh/TBNB2Px+t1XefTuq7r3d3dsiwuqCmSuytxCO5lmRmbRmLgGEzEtKioGZimwGKUiylajKMySKm53FlVKRlqZoOAwCIo1cyQ0Air6mlZlpKJwjgOp3VfUfWUKvZmwbt0cqntWtXSee3VfLqOmVkb3+OnaZ9ED2IAwNI7PLBn3ZHQFDcF5CFONQCQfVU2cws9FNmPAEVtA4sFetkcmZpom0VxafE77ckO0nP/ppuu3C7ZzPr9EullXdyTq3AX98VeQeT/9AyS9pLR7apNmcJlZbhaJQqBsBbVmgHCEMPN8VBfPP/4g/etfnV/94gm18cjAMynNRFFBC/EDYhMEBIH4tvb28MwppTMkACZo9v3UvPIVrVUESZQBmbQUmJMJVAc4ovrw4vb4/Pj4eYwEAADkn3v97///du7B0Yap6GIiWnglCKnNEZGB60Rz+eMo5SKCOM4TsOQIqfIPrFNpNY157ywxJwXEA0EbuWYiUipNSM6Th7P81xrRYIQWzGFObgdweE4Auo0TWNMgAhVoA116yYqQC8K+w55dfYDLxNZ0D2fJxtK5JBFsrcLnbBccrv02tMVItZaUQ2ptTbBZhP7KCO7xNxrOZCL+g5/jF5L1p7cgJoZsxVQgHhOHHszDrQ/QDW7Dmq3BYbeN7QZpiGcR/7suX7PQZe6oPUyPSmH21y7bfWshxH399/u47YydRTfbVNqrX4LBgMkUANDUPvggw8+/fyTP3zx1SHGWo2IHh7uxnFUq4Ct0IOIEEwUxFSrREYIjEgKKAa5atUcLCKiIZuWXAuLqBlHz6YQAiETiCm2rN3hONRaq4pKBdBAQBGGgCEgI6eIiTkNYRrGq6vD4XAwUSIKBIS28T6oguNj+PgiIh/BrQopJexpKzMTU7eBocGyaFEBBRFpRaRIRFRrrWjEGMzSNL18/z0eEjB5d6uqtVBICBvyMyACMGKz1Uspjaigy0lojdjYcF4bKZqnzclaMhCxdW8DgILUcxyB0Lxr7OzwmIBR//XGaec/DHy+LiJqqVty33UMYmsrh05L0Aq5AeA8+MJ1j0dzEGAL6ACcs/FOjRv9bc+DALX7NV6egD1uyH087Bata0S7dRXsjs0g2ZP33vPapE3/P3gFysbs0Hy/FhzpXt+5JEG1umZs3ESGZOjzFvZe38ZOey3ohzuKaGdQrL5SYGbUfXqRrbTAG2Zg42dVBWYy2/KYRKRdFhB5pgW2O1PzvLZxVXuotzOyqrZynYayQB079PzyYGYCgKoV0aAV+l3Af6mAaFFVouZec8AYYxEDtW0gjxPcWRO3yw1UQdSDPttWmZkiKBB4XqhN8EIBy7VWkThOYTmFQISGpJE5BZSsUmYjBquMSFatmCFwCAFoCMyEPreXkMysGgwcZYtdmKFqVQmERmhr/fHnP/jL4Xf3X99jiTfTyxGm+XSPgcQ4l6WcZNYHAAo8hvB4/XyIIaWUmCMTIrKPnLl/nENgIsqlLqVWqSEEJr6O8fp4KDmIDGkcXj8+rCZxGqtIfoDPX8D//p/9+b/88c9exsiPy4D4MdGv3r/9v37931UFmO+uwkfTe88ZseZTXmerYsSAkdNRjR5Oy3h7Wtd1XVczCYzTNF1fHQ+H8fFOUuRl4bs3Uspq+lAKlAIMV2DCSDHGQFJq5YAiJRBKY2ojkEQ8RozRcei3GHwL6MZhKGv2IjHb4vpEzFxUGojVrpIMVN0fQ0TiKFoACJlQkYn3zMLM1NuaPZ24DTJx2RGYa4/xI2LDki5lT6tkrS0bLiEo2q90Lbmf7fPEWNkLoO3xNpbfq/DtR7cz9xdut7JeyUP7HEU/c5ODdIYNOMtEAGBmZvXTiMh2g8vPb91DX9DNX4+wMinsGidUW18Z9sg09MaYJs26WcZIAq0zgaz5AoDoXVLWwUUc6n6AAU1QzETKagxDCnEYhoFbTtI9uBjjMAwcIwB5wYXP77YqUorkAkxgHuUXAAdX0lwK47kUkJEAlIyJ6Ob4bBqPtzfPc86Pj4/3d3c5Z0S8f3hY8+yLG2McpuTVRFdXV4FDDESBzaiIEFFVmOdHphiQVl3NLFKDbIsIFnAICIFJLRJGBDSTkiGlGNgQlrwua2nlRJegGk+o60Lq7vJUcKksdOu6tJ0YVxOQwE1l+wxU/8rMyFNGhtgaqqnDsZAbHNZHOTlbhXAoVTwMf0H8gOrzxBpBRTMEAxVANmwonYyA1qGktCfSrUMW7en/CXPtF2dzBuAyC7RX6m7kbXSOl42Lewa0nX2wBVm2t9vWdv8M5xt2xD8iNanrcnr7+hUTDjG8qbnk+ubVtx5DSUMKiACKaGNgjkmVAnFCMVnqXNCAQvI0qJYseWUtCIBQMXEaBqKQl4wKCypHvDoMz66mwxjGGAMp4nEax48/eP/Nmzfz6fH6+noYOEtNKaYhDsMQmThYIMcyKBgHR4INIQTi7T9TkVKRYEqDtAx1NbMU2JiYOTIxAoGhqdamf8eYLBg7SHjO67qGQLfH60h0fXM1HQYgA63eKdATFp4bvCjWeLrUuygePBW2ZzZpuykN5OMsOR0R2gyJXAFtoux8K1Gli9JMv9BjI7AzOc5ivAes94J9I8gzodoZL7Fx245bRRyjBRCbDmIiYq61tvg2kRuE8s6777genzzD9nNbFGMLEu3duScPbL1kGnZqa1OmTxa/KcFd2qp5yQYG9uGHH/785z//6stv/NtA/PDwMMQkpfoDeBVPVaOmqkAM2UDR64GpqpjYFu8UBEf88GsB3B9nAAKfvEcUYhQWQEfzRDMC4hA4BNIqhBoTp8DDEMaBmQAlj8PEzETAhB7vQFBkVBAVqWLioGVEBh14ua+YgpFCUWnRLTMjpFqB0OGhzExUKLC26AUWlUMcn798CSkCAJgCoiGJCoFRCPgOqBIA+Ky1ix1vxAdSlZm9iBqpbY+BIDhaTKve3GwVbCaNegQPqYO+qvmMe9tl5PaE5PFZp/atmZBai4xtbLClhQAcVxABAHpwn58wBfb+4Q1Jzc6ey/bT2/NAV3x7Gb4J9u1p+yQqIiKt51s9YZ8nlpj/380Ge3pcTDLbM/jlTQjxIiXWTkPdXM1zD2HnzGYJ0W7XN/WDiK0+01oT6MY8tdYN9cF2LcgeIRCRqgId0eWcezyD0rYqAsAGg+O9upv2ZY5dwrXnJCIkQGRENcKt9r6ass+n6i9sJtoReX3h2nN26GQiqqWWUgGVe0MgGcUYgUwEmHgzCBqQ3FY07P+pbfoAsaE8tQYWMzUrUo2YGQXMEHKtReqYBmxhlIqmIcIwxmpVV0GQwGjoNSpqhJFpiGFK5A/JYDFGqBXAwK1JIEMA06qKokZCQPlheX714oOX9Pqb+8e79eMPP2RJgaKURW0pNZeyrmtZl5xnWURmnd0kMkMyYvZeKco5O/5YChSHwxCufHzqNY5SCxEOx6sKlh8VY0op/ePf/gMt8OPPXvzppx/+6P3bFzFKhAGRn714f4p3f/jtX/7Vb8rDI0g+TtccEDWVPAIoIgMQpwMQ3czj9w4vvNa/lNXMIuMwDCmFenVLjKAH/ez5vDze3d0FhKuJiBEAxhSvDuNSSBa9Ohybh49ITEQQGIdIQ+IUiGhy1Fk/qiqoIZEagqEaqoAoVPEQqiIHAABRxY4TgGhEyKzQIBLNDAiZY63nWTp6zvifmXYTAU+khnacTMdBtu61aq/L2wK38F3HE638nzlhL9z/l9/n/9+rVNXIS5U2lW+b6LB3HFT8LusZO5Qi7Bntj1g8m53BIRF5W2ULW2JH/vBoE7S8YAvqu2NWSylIPMSU4piGIbKyjCEOMUQkEDVDdwiDKvKWBOiCmELO2cwYeRgGESFAEyDIFNhx20XE673NyCx7hC+E4CVtIFo1B249ZsMwXB2Ptzc364tnfqYnGHNdAQC8vTnGEHw0i3tuqgjBwKNQoCJGyBCJBRxF2kD15mpEA328Wg1RlMAOKYUQSimolZmNqaoVqVVCtQwQn2y09bqaTU06vW/yH1SJrS3MDmOt7VuHBzdFQ2sATi0WeZ6wQkSmrf3Ed9gQEdmsIiJhUETY9L6bBLoxjQfTpdaOgitNB0lLRBrxWV8QEXb1Fi7b7LeX2tTB9qEfIfDlC7bGjz2JwoWevujM31+y/+fuJ8K73/rdtj82LQwAMUQzq1VNNYRgavf393/4/e9+80+/e/XNVzWXFOj6MM7LqeT67NkzM0ZkJkwpHqcxsJlUZp4f7zVDrqZVGMOJo5lpUaPeXc+YAo+HiSjMBloVQUGFCSIzE6AVE2MKQvDi+W1K4e3rb29ubt774P0JUMAIsOYVCLSieyFM5CV5nuA4byRAiNFhw6qU07I+PNx5zRGixRhSSuOYkrfaNvBMhxdmM6NARAigXlb64uVzQLy+PqYhAjgCRGseNNuD158HY26b7tIM/niXte7KOjZIDFXlGN1y8H1qVlOtRmePaAvT7/G0cMuzAQKg1vIu/VgzhXEjDOs5lka3fViFeiD7wnwkgt4R+xSNsA01QQ+g9Gm3fucAFyuzPxC31t9zUMO/2kKE+8/3N9m+3f5+cn+/antO6Eb5pgigQ4bsXxJi/MlPfvK3f/P3b9/eRQ5FC3NE5Fqrw6vSeT4ZBiR3aTw7IOC7ZaoaWADAG8scXYagVWkSkBGCebddGAzNTOojMUVyIICgCEQUiZlHRIxMQ0pD4BDZSyVjZCZyUD0C3MK8qioOu2yoYqgKgIDsD4zcLHYjdKmqPnvQy516XSQQStaR2QgDM1FcpXIM0zQBIqgYInJEbC/L1ikYzi4QGtg2N9WlWRsB7u18ETzgCs0pRxNz1BkiVDMEM3FOw1ac5bnH1uzn5Kp4QecNbw+AYmAk3akb7HUdHcy0m1z9hMYguymC/v+wj8Rt4SBsRvyl0urzAO3p4SaKthnLjby3lhzYuhZ3R+e2c6SpswPZLq66fb6n/H5bSinpGUtmE0G4P3/PL7AFHEFg93IXDmFTuj2I25L4O8VTSom9OWQrS9t8RU+SUgwbD3tsA7rSby4fQK0aQsDguA/Q+0m8iOv8DrbThSEE7xR1IbnlQbb1cgPdzGCHab4JBd9jREQCA+1e5UYoLSCkCi46RATJRCSEhOjdiSjao8VVPE9P1sIMfjfqXUywewtfDQGLTIpgQMjRrBTRoDIFQJIqec3zus5aC4ECVgCOTMgRPaWKGCOmAIeBEUMiQ9DICdBMKwCoA0gAeuipqgYhUx3HQau9/+KDX/7yD9988+aDlx+J4jheZQbieIWqqiXXdS3rIjmXEh8QWFVrVVVFVKKAwMfDNaBGwmGM0zTG6B1xZZjtblnSceLEb+7vlTiN0zdfv77/Wj55Bn/22fdfpAnnU6RhGGAKwOv6fEj/6s9/8Y+//M1pARAlYpHivTZgDj7JiWUch0Oi6QiqodZUSvE39YUN1ykSE1GIVEp5+/ZtrfVwOBRm6/mfyJg4HA7jaV1VCnEMHIhgYBxSmBKHwEqtnbWb0SCGpKCubv0/BRErRQyVRxd8pi3kRQYIIDFGzygCkmgXGUySBQByziUFADARQNvKqkMI1nMaG7dbT0roLtPehn+r8k7vduo9/62q4kL7HV9xE0nwTiZhf8Mnn+DOlt0/4Z5J97JG9iO8rYHsiQgP0aWK8zJ74X49l5pvPYTWjbC95NoElAuZLU+IAGDnwBjtSpJgM9eY0OHwW9YJXGOmwGZRRESLqapgKTQxgKmACFZK3rPGoBiRGGkIw5ASA5ooATLxOA7bOohYVR+up6WUUioixhCO01UKQworIz3U1Yu0stQ1Z5fgrsVjjNAlpKo60uOyzkREmBh4HIfjFLeqrSJZRID73GFCIqprNvJdI68XsipSZExpyRWUQohRbF7XWtREx2MiwOvDkKqQKgFOwwjcQn1IpIir1CKmgFk09DoHe8cs23lr5xS3mZiKUdgrwbNVcd4spygnWnSBTOpc1iuEe6KGiNw270SP1ud5IzBRICKvk3WreGMokfOYvlpr8ay+iSqESIoIqm7OOFKo22pPJLntzNMnLLbX6084bv/VRp+IDjfQ8fbOF0IrT/BgYxv+3Ax02+VJnnD0nlX7A8BmbJuZiPcmyHx6WOcl51pKmaZBFcqS58fH2yEQwTDEaRqvjgMTmNaAmFjJQKuVNWuRZsMFjMNEIcQhACEwpXGoCis5/DiYiZkYSNtxNARb5tPz588//fjDX/+n397fv3324jkSDSmpKoCBut+kEDhSVIDAuzZLxa0K6f7+fp7nu4fHN3d3Dw+PIdKzZ7dIwoFDJC+CcnDiKuqlaMBkYNbj0YjmvZEh8fF4BGa3NJEaxNB5izea3ULAex8GzzAVGyl42m0rXWjFoN2yBCKo1VwReIFo7/khbw7s15KBg8FsJNd47TIPcEGIBo73vv+2jQrrE+Q2Yvbn2aw493w2FMrNNd20pAu68/nnn77IBDx5tr25tVlluLPftluJiFfE7E/bDtn1TG53xl5p8oQZwWsCtx3x/xOCEah+8tn3fvGLX/z7f///5UQi5ojKpawhkFcqeutQCAEHRgM1MQPLVUSYkMx75FBVPW7b+hgBDCSGoZnSIIbAzJoAkY99TgMREbYx4N5oAGYMxsxDilMaQiQCFL8zuT5VU1HHrtkDIop5YycyVS1ERAro3mAfMeiTXg3Bi3MoMHpEgck/hBDN1BCmaRrHEXykEwYHrOaemNn2txGP7gJkoK2kopsiqhpDMvPaDsAGbwMEKKYu2pDaa3iUtk+kIPCOM3kKWtsaZ86pPIBdVUUg9roO0AYsSZcMwsy4A5+Dxkqdo/tLeY/lJjY3KvWbED6V/OZBFjMcAiCSSOjLst1hW7qNdLuWPFPsu1p1z/iwG/N+eRD1bpotheY3iGFoFk5TDeTJ2loztGTVecqDiATY6S3EBqpBRFtLqhP6BpnI6WyQgdOmqqo6HgAAYDjXyBGRWHknKmyquhl22+GQANCbLFWtK0bCVt7pY6Vd8Pq+2TnXiNxnBDHR+eERrTnKhEYoxUvVFSA4Ory3mNK5grS9cgjnwDB0u1ariAqRUG1JSUSMyJE4IAUkZaZQtaJ6J4b/LgKCNzWZgnEMbCQip2X98DhxjLWKSCl1FREAZcZaK3IM0S0hQ+TAhGBjwJRSCpERUwg+At3MeoaUENswUF9n1Dkm+uzzj/76r//wq1/98oc//P50HJf5bYNrRQNUHmgMY0gwVKXAIQQAUgUVIOKAAcBblZAZkUCkaC0GQt6jEm2Y0mx6d3pUTFr1D7/54orhn//o53/xg5+9d304BgiEPCGAhkWx1vefvziO/OZRSpE0TEjDspwASKSUkk2ryoK6AkCIiZkpMqaw508iSimVUpZlCQYvr25dm+YhrqWggc9pIArTMHz9zVcAEBhTCoEgEgwRUwzMKBRaCTQRYeCAYFUMDVkBvY1QgRQIiJhIDEw66C6BZ57VkDi6nO4cyhQiLOtG8E0GESG0ZkJEDIG1jY5oweC9hbclA0UEqTcf73Rw15JEvXSFiMgAfB7ad0kO2FmN8K4l8V2+4pNzzP4z9774FrthYWYhBGemBvpS5Ynix12v1B5qDy/fN8aIiCK96AJAHe+xP6eq4mV5AiIr1p1QJjKIMRoqNNgMqSLeHx/ZIjERsxEbs1JZcy7l6ni0KpoLICMxGDjQ2yaFVaHPO0EzrbUuy6KqhKFtnCq7YGGOzGANadPVQ0gxolVVby8kI40RANIwMHNgJDTT2jp+wKE3hAmJsJo3zIpHtcnLKT2iLAaioiXGuBYxUUAtpaynpYipAooESoHQIkWLAWFMURiHaaxDpBgLYK6iBpwCvUMbti8VdsJvMrN3uFnz+fcFzPtt7dqnpSVMz14T9f7P85mehDQfbQyGQl2ZgVmvCEYikmqIRK13gsxqv23zGM+GJrCXXImigSKoR7gZzNi0NeD0t97acza6vci3t4Hg7Z9wBvUNfQBUmyHQb2rdHIfLBMjeMthbxtZnOW7rD70ke+Mj2HF3KcVb5gBUqmrV6+PV9z7/bFnys9sXpdjf/s3fffWHL4EwpVhrMUIOGFJIQ4iJGQEBUqBpDIEIxbRozlVK9RQtpYmIwhCAqagAoRXhFB8f7xSMiMTUUXbNGBBBNOfl6ur48ccfv3pzV9b126+/CSkO4wgATBQCMUFGC4RElLvDbCDYpjw3ofJ4OgHAkkutdRyHq5vrFy9eZKuBicCk5ErAPHS4kVVRG2BKN86GYUK0EOjDDz88HEYgBDPsXcWtTmwrf0MwRa11v7zNpmyhhO+WhwDgY6z3BI+1Wmd/ZxvwYifw6QUKWwDuMkiHPfLS/NI+03ujFQe/dwjBjaLQsTgAxNDMPCjmUEpK4CY7IAJfVjCbMTDCeSI4EamCWYkhbCLXzKBBRXZ49x3pmhnAGZzsiX6xXu2yUbjsuiv39O+HdBSZ3c3P1/r5m02MLQHSpj/1VWroO9c3N7/4xZ/97d/+g5Q6z2uMg/d7iwhRt8LJnSg06svkK+ktO9jCmsacOPgAj67ZQymlVlUP6HSX9XqYisiW9SEK4zgOwxCQVKvUCl7GQDDFgZmNUUTQlJkJSQRLcUFoZqZAClY1iwhDCB0gBAAULxDS2/p7ZQohS6OlGMaiIhUiMZjGw/jixYs0TeA9opsfwsHAHOdzW2Szc1mcbZGOLWRP5+IH7eijiIihuWEIAKgb6frznMu1DLTDmhCDiCJuTon/qJmZ1bM85E06S8cgsDbolfr9L2a9QHcvrTuZiKjmsfeNn/YMAb0lXS+pNCB5UNJq9ZDK3utzkm6c2I+utuDJsTNdzt+1IuT+DLDjIOh+43fqDm2lMWrQsKCcEty9wf5Y0H2lsMVpmnWCvdmx47qGEMwS9Xzglv3sVk5fpi31Lwqx8bmqKomHQGrNWx4AERXBl6m7f9oyCWFbFN8K3KK828u7aN0IfSur3WSo9rZ428luaBJkJ4V3N9QODCOi3WwlALAqJm0UQVtxNS9fITQ04N5d1tZazoYp9FlSRGQIwCSl1FrjmAipSM1ztjS4wl7XYckJsZAiZkSsaYzjMLiDvc1rDgDX05RSYCQCQ9omPDa8AfSKn8b/+ji/naarTz/74LPPh6+/ffXXf/tXf/qLP8l1TgMqFbEqtagqQlAkI41SIyFRmzRF4OVTFtCYkNBqyVqWUjJ6NyVzMKyas2hVeTzdv331sD7CTz766Kef/+gmHSegMbLqKlgy1OPwcl1LJP3B9374u2//4dWrN5+JXh+OCqJsqCSrrUuFmtfHtyIy1NH76LwwiSgigIkYSCm2LMs8z23xL6OJBJhCpDgMw/Dw8BAJY+QhREJl0sAUyZi8Kh9NtFY1K6hW1aQUR+3zMiQvOvKct2r1qIQYOoyPm8DdqWuRuZ5wVsRzTKFZq4DnuuWteX0LRF1GXrvwOMc797bsxfteOoRi70ia/wXHE2Z59wT7L912k4Ob6MKzp0dEvonn3uhOsexo2tarmKxn/J4IPtxx4iZhPdqqIG2EaU/AYncdnR8dyVSDekVKL0BVFVYRUAPROlceCFixqpUqiIYmpUosgYOxmk+3AWCkwEk0A7gb0WLbpUitdVnW+/v7kiWEQC55FHPO8XYaUhqZ3bMFpsAhDGnQagh1WeZ5NjMyelBV1efPb6dpOhxGVCFTcNjn0OHOAREMRWpeqyo70DMAgAE7rjeEwEZoikOIS5ZlPc2PSykCQODTINkCI4U4IAfmcRwLWmWGkGKMhYOqKgJzIBJ4x+41syaN9Yx1dt5ZYm3jNGXLZe+jmL6Hm07amYzUa/HOOtntD+p19mKQyKu3zES2kl0zI4rQEi1bATYReZzIM9iBORIFdDg61D4gHKEVmbi9sUNa3zHmRngbNTY9srNW3+WpPdm3m+xAYnaK+Tuy8f2nv+Num3jZP0+TKuq9aRQCoQkaXV2lm5tnx8PNsmTiOKTx229fv379Fg5kZsQAjIpaJC9FE3NgY2ZDY8KYIh8CiGo1MkLEorFa5RiArBRdcs1FpNoyrxhCYiKidV3ndTlUHkIQFSJSkMPh8Omnn949PJ5OpyNf3b19CwDMHCMzAXqDCdJjEQ9pW0voMREZyDAMqno4HNI4jIcDMyPTOE5Q12EYXPxCKyQT1coxblSXHUvcyHGhFey9D98bxxFAG9CCyN5q9CV+dzfb8Z3gWOe4GGrrgLoQXyJCm7RUPZukfVDbpsWcDs+/2Ji+29949qn8qq1YFLpr5A2RzWCjcDau9oHFjku5fxFENPVOl9b4pNqX6OKFW/70icq4pFt48rdLAtj1pPktXZs/sfeeKIInlN/W83JiU/sbDEDgYmC4gRkBA+AHH3xwc3Nz9+Y+pZGIpFRV1VILgJlxSBGqmeVaAjE4hpU3NitogEhBTDxZImwIPnuWmRlQRWQtxRSAAyAQMhAye41xi0oQhTGmwzilENWq5GIqKcQxDbE1IpqoqFlgZA4ABsoCqorVVMSdTQK8oD9VRUXoWe0W0yL0QQO2n9dNKLWKQsQRAKZpevbsGThiLRIYWCnobTaAJuogtL7IF+7ZWW77aAuX09jzeWdS8EuIyIGMG4c0c70JTxDVC5HbDG+7EH0KACKtE8e1gLsDtZRGQmYNjxd88rQBXgbdumfukETQ4zvbc+5V1RPe32iMvM8YEQDcocWdw7K3Up4YMHsCfsI7tmvE6BqN9vro8pDNptrfynrLhAuK/bGZkdsAIQA1o7DZZ16JAXR+xO1ezc7cyZ39y2xrtPl7rNE3RlXD2ExAkeJ9NeR1X31p3EZUVfeLcLOPYaMxJz6CNujZU21tmZjDJkDPiwtm9LTjf5Ms2wHdvGZmJHfBF61t3r2B1FpTHNuWnNMX5BFZL81qd+i+n27GajeJN5fbFzmDTpAQsZRyOp3u0sqJp/E4TVOuhWhZclbV43gcpnGaJo5DE+5+w3UeYhjSAFrRxEzQ0aycxbHNwkbvMVUbJj7Nbw9Xt//8X/3Ff/Nv//2//W//ejrw59//iCJ4KFRNUIG80tQwgTEKGoAaKDJSAGBGlaLVxMRMImmK3CZDhlDL/Pj4oDER0evXr1998/aQ6Bd/8vPvf/QZllyg0uFQTWMMYYinB2XmGOnnP//5//A3//DVN69ev3qLAQNxSmlKA9RhmaPmtdZa8vr221ODTwTHm/XspRJRLSIicRwiD3kpRHR1Nd3XWRWYOaU0EQmNKaVlmUNIiUOIBAqM1sqAe9FZrdVHuzKwTwUouYK1dJaq1toS3RBb4LAdFMjnKOde5EmegijYQtG74Igq74w8xI4ytNPf1pniCblup9EOSC0Q1zojmhczb5S35/0nevQMom4X5js+ebBdqnAv2vZffefBO4D+vUNYa7UQtxyO9ZLyrVIUO4ioqpoDDu/YdnuAC2Xvf3uFMFHtpYzWDQsksly2q7QV2fd5GAhEFJglBBNVVALUKsoCUWsuK2NASkyMmJeVB1CN7liCmjGgFmVvYHacap3n+f7+cZ7neV4eHh5McRzHIcbACaD38bk3uI3QIWKfIoOwluLUaNVOj48PDw+vX3/9/Nmzly9fHoaEiDHwNE0xoNsuyGRAIlW1EkDkUIpWlVoLAFAMcUgxxoF4LaLGVdd6mksp7g7lnEMIQ2QJUZMNyIlDTAEAllrBjGOIrWq9L/wf2XcR2WC7NwIgImKyy+3Ti6Kjs7iEXgu9nbAnTmwkupXxnAly293dbc/mqXbAJwAghMfH+XrRLHULQaKJGQak1jWIjEpKBnoerLLxxUaKW9B3zykA4GVg+M46bU+1fymALdlzJuxN2z6h+c4y5x/1V3j357bziSjGweNZgWNKLOTNO+H589tvv307DocPP/zw/ffff3yc1/W+iiGRIYjIugpC1YBjijJEAkMEDjTGlDihEZoh8N1jhdqi+fNpvT89rrnmXAVsGIaRQ0hJHDZdAAKoaoy8njKAffTRB+Pd4xdffnl9fY3dl45MSAaiiBaIdV69MxABmHEchxgjkk3TtOYcY1zyiktmZjGttYRAKYUQQq0O1Aml1HVd03jQ7rmt6/rw8ABG0zRNYxKRm5sbDGEzB9Ursftieo7QLRAOLWIN3Q/xpfZSLrs0OQCAQ0IDz5vhrghCVbGD4EufI+LwqbYrJ+mB+x2CaE+DwGWq7WybASAAp3TpZxo1A49aenCHiuHQWf3q9kN+glRhRD4D+zUSLaXAWVWdlZTsAiiws+l1l8HeyQEwO5fVuVPndqDs0FBtl0X8Tp7a/ng3INWR9mB3OQKgqdWSmXmaprev72KMtTSMq1qr9wB6jDLnXEoRq8QAxG2IDAEqVETvBCUz1erZVwzInGqtuZacMxgyUgjBJ7+j1cAhhAgtF8rjOI5pICICEmJTIUBQnee5rDkcUs7ZQEzHHppou2CKqrJheSCyQBuWYITcAs5tFzwZZdYai/wS9WEDVTFwjNEQhmEYhgFqhcDAAKpFKiAmSEAEovCdFYtuSPYHA2gDY80M8Cw5/YdbLILI0Sk3VjJQMEjDwWdztd101xSEOnbR7ucIAJhDI11R2AnG5s44jzh5eJtJCBsNYQfItX477OM9d3L+3AKzpzQO4cySOyLcEzN2dFC3c5iZmGEnNBAbus93HmfV1iXGk0Aq7ETNZlPtuQzAW1jJMxWbau6GkD9Aa771zwPIOgQoUBHUtCpQCOHRlBEwJQxcpORlVSnMnJiqrUBEAWsVZHLzpohVFeRYda05hyFNh4Oq5pwjHSRD0WpCEQdSQGVCsmqmoIBqKIpzEVKCqClIqRUBOKCPLkSyNJJIRmmIWg5Iy8CMNMssJoAWiREDtmniiIasQZGkQq21VAiBQ0jZBDggoxEpFEPJddZTTkNUkxQQwqhVSilkYQzpNC+HwxFAcykB4zTxsixLXqfD0RCUraCVvAjjEJBVGQM5wjCYU5xYrVoYaVnmIUQGW+Z8c5iIx4T57748/a8+fPlYTscxcaX79YTl4WocwxQxjRQSEJgImySywEjTpDVDPEfRiFlMiZgCAYBIERFmJQZUiBlAkNf1T3/0/Wfj4d/9t/+v/+d/8x9//Y+/+cEPf3x7e3v7/CWnCFJKXZUER/zm1BqDicg7n9EMpA7DpFVCmBzkcAhxXVcAeCUv4+EzK6vVZb5bvvmn1wPC//a/+vlPf3AQ+22aUhxHTXEarhEJRCMV1oUl/+jjZ/+7f/6T/8u//Ydvf/N3n7z3L1UlpVDXPKUUxskQrm5vTw93j5lLKWIWIhnAYz6JlM2FiEPkUNVmDkBEuTzWx3U8Hg0EJlvW9fD82R9OX50gf3D9YuRh4gGtiGSwaoFwCnUJc57neXX+UdUsVQVyzdu4ms0Tg7kOqUUuqNSrONA4LesKBitAjKmarvlEUNGqyTJGmDWQgVbUaqyQonfkV0QEJgIUqaKgxMpcRRGVI6lKKWu1owAacUgDERSpRSWGCBVqLcGoaOVQ1dSTAEbsU1zdmwdUADNAFQQhMmIgTkF7OQCIbgao7GZnbTLGDOo2CWP31RPTdqdrgWsmDopYPaTDEABOtU5DKlXHmMaYvPgGUCtKKeswTJG46KqmhJgCKyEamMmyLBqYabAqwjWmMIyAVInsdLp78fxKpKBUHIfTwyOIgkoKzMxg1bQEjknSFKY1Wy6S1QJBBRsYGa3WUuuqIqTCKGZaRDgdK/LjSkUBI+OqlTUFTjGKQSnCWJWAAAMSp1gecxwSmC2n9f40353WV68fXr9+++bt22EYrm+OFEceooHWWhXL8FCmdJ0oishqOqsaKY5D1ozDWO4flmqHwzURP5S6wvLN737/7es3r97evffi5fX1bUpprmualQiGYWBCnWspJRKbWT1lLGswBDUFI1OFWletBhSTiIhWAhsnN5sKs5EEyQBMQgBDjIfririonAqP07Ov73K8uUYaGbjM8xDHtUjdXKBekgCA3MPztQ2LcD2uRSoDxhhZAaVsPpUXe49jADQzQSQgqDWHw5UScgiqmjA+LhmJRSSDACAGMgIpRdUQOREVyGYcLFAciZMoV0GDoKpAVCQDT5wiZEEIa4ZphGGYfv/bN598+PFHnxw0KpQc46EsZDASF4xqsBgBYESeQB6xWVbNsvR3j2HY6/4WO1Wj2Gqecde7qKpADYV8N3B5C3ZsdjYCWK3ZzLylonPb2RTQ6rFsJAJGZsewadSF1DLe1bzii7ysODqYX3ULHFG1liLvv/+8VkiRallurqfIlOdFyxJguBoHkVLXTBqUwnoqxyGhMSqThUAxhCS5rOv6dn5Va53necmrJ5GSWiS7uQqnh/sDxagnyDPjFVEIPGGk+fGNLvOz8cpMxyu4wvdyXq6urjgMimSASKGoVLUQwnhDzW9HY2ZiMFPVuuZcJM/rqahbV60ayuhYC4MxUhSRZZYQ+ebZCzMTU2Y+nU7r+uht8sVAyY4f3BTWlFBr6R1MhEagAETeo4vW5lxB7WVpbl8SgfdlVSFvqNh540AEKohn7xK0uqBkBDABVUDg4H6GSs2E3AMZwmZA1Edjozuge68MCUHZs4WErUZOVM0Ua2/V2zyiVuLTSjXayV5KQGTNv1XTM4qMmaUQpK61Vi2K6mPGKyAhsSMCUmAgVDMFx+e8CIiYgYgB1Cf+4S6ocQ64bAzl5TlbNezGI6pW2zQBUG3pAddftVYA9FlQWzIGAKh1rXUv2lUiqILGabBaP/r0w3/8p19WrUaGo9EKH338wXg4/P4PX5gZqL159c3NzU3OWasa4hhSSikCoQAK1IDMAZiMEUKIgVTlUWYEWHVZNSPyAKyAqKCqPEUiMsbEMYSgIrIuD3n1NsIQghiccqmPrbCFlwfPk9yd7hgppUQEqsrMAYEj5lqLVQ4MhDkvVtUtN8/81H4oglUzhBRiSiOHsOWuU2Qkeni8O9xcc+LD7REiGQiAAlGKXjgqAIJkQXvaGRHZwQ0NzFSk11YAAJsZtla7amK1evUTElGIkZjBGOxcjh8oNGcMuvfVSRTMAJk9ndj9OsQW1SVDyFV2NUGMhClxCEQ7iGlEjMHMIDCUjtdAiIBe2ge1Ngbo+Xw/wkieXi8l674Gm9OWXdybQzEkJ1oP9XiaAVrcxEw9zS5dniNi810R0SdIdZ1SzuPtOsujV863T5SIIidsSJ+lf+71IAG3WSAIIeIG6Inoo2ra4iA5gJejn1jwVivr8RV3M5i55qWUIpK8X9YvZmBVBmuDIkSMyAueW2bAYyrM58kNPQOgpthHNhB6U6O3YbdAV+fslsc02/Iqu4pebRWx1ZvEPD9r1oZsEpIPAFQzh/Rg5Ba622VdmuBzIsY2QLzWauIFzbhFlAFSjNHxrD3PmZeTiLRyL2h8QQ5c4eTvh2QF7+VpJ4TAhP3VQFQVCRRhXddvXn37wbODmR2Px7I8iBkQV9WBeDyMHJOUrDmzVSKItDV54iYutUcmGtmZMbPj8D0+PqaURGSe508//fi//q//D//4j//pl7/6p//wl/8DcXz53vPv//AHn3zy0YsXz47H4/E4EZZXr159++23p4c5L8XMxnG8uroqa5ZqCGaKTARAPg7kMIz3d2/evPr69Tdfvv36y8OAf/qTH/3sxz8jguAA+oYqJqIBW1Zqmqa7ZeZIf/7nf/6Pv//9r3717YsPfvf9H/5gWWep62G6YRtmWcRsvLpOcPR5kmZaVXzedIy8yzYPQ/SMq+ScwbKIvXz2/NdffjUmToH+8R9/m5iuDmPEYaBAwNXYKBtCra1lq5SiagKmqrU4vRHuQBGdqhBhLVIVuIgqmACAgmogCiGhqZiqmOPie2rCLO7SIGbW5s40+rZ9uNRAzb36TV+euWCX6GjcS8yMwYIoGiDWc6vVVpje1O+5Fu5p/mQz0LfDLqsXzt7g7tjpe/tjf+wPatUOkjuMyrY+iONmH2x3UFW3g2DXkC07HLnzqxGhNSAv7XVZwEQN1NTMgjemu2EkIqVAKcRoABo5mOhSCiLEyAA9Nt8FhaoKmNJFZa8p+bi8dV2RaV3XZS2Pc17W8vj4+ObNm6+/+dbMpmm6Ot68fPliGFJe54eHu6VmnwXDCBQTM4mWUoyZI7GYpBAiIwNOw/D+i5cB6fVXv3n16tXpdMprfbGWw9QOFzUhBNEiUhgQ0NOexExAVESKCmY0D1CR1Kq9sbahB6tqFcs5o9rV1dWUhpLL69Mpg6XbF3FIFZhjYGZFMPV6lYuE8962g53Bt1FI4ACiqgpitKEpkDmU3ybSiZCI3fdrUh+UzqEJNDXoo1w9H+tjEjykTkIef1xKDtGIqEgx1VLLhvEdgpfpSinl8fFxnteW5fPgqIiBqFUE8VQuAoJbWpdUvXHZ9uKdErsu2OXooJfAbYS9neOf7Kvj9ofINv8KsDcgITpShD/1TkrAvnLnogavtQ40mdNli+IwOBAuPn/+/IMPPnjz5m2IFPAwr3dSqogMMULk4HCFBr5T67qWpdwbmGFd87qub/PsNx9iujocvVZTSl1OD8dpOozDMAzefAGipRSpxMzTmFSVCMcUGO3N/d39/f0wjopURYljNV1yUVWLnFJKKYHI6XQSLczEMahWVa2mqhUIg7XMRVsK1+uIiOhIdKfTCejcfx4wuGXy8uXLq6srx3MCx5rzyrG+gqC2NWMhdjSXHeWfDzvPb2iE8eSEnrE5n79L/bX4/Y6ctgnfUuuZAJ7cgamV8/gvGG7ghJcE1ZVYT5NuhAo7RsaWicft7fQMZ+1jn1vaHxG3XOL+N76TmM9vd5nTRnRL/6Lt3LpzuGeiTaqEc0sRALht3Yzp/TNs4K7YS7fa+mtbNmKWnBHx088+/vGPf/zrX//69evXwzB873vf+zf/5t988eWXX3/7zZs3bzjFTz755M2bNzFGAKFNd4MREjMbiTesMPE2QqYZx22RcZMD7vWhgYjOeXb4Cm8DOZ1OvWkC/WKXhIfIUiVjdVpaS2UEAJiOBzPAwBSA+5IBYZPVl7V7BuKK0k/AZs+DqqaUigqYcUxE9L3vfS9dX0teOQbrXvgF1RFRt8+t988/sRn2NLDF7hFRsWF6Qd8O2kkqc1jOGD3JiWaAaJ1aai374qOzENbzqNiLn+4lSNpidR04sHtxZ3LVVo2J3dLA7kIiohavxkSi4E1SuHlorVL6AhVmA3O6+ImepH1XaOzVwXa+mfk8Se0QA92Cqts0hW6ciPXUeuevswmnqnu+ANgnNfvEFzxXvCNi2MzNjXqglUk0MhuGAUxrWben9ygaAPlgEF/oLQvp7or/HVOo5dLUA6T2uOTtVQBNtW9+jt9tu+GmR7e/xWHcN6KH1qjvhvC2o25nwE6F6xn2R0VEerkabAmQPYwHoqrGGH3gwTQOiA3iLI3Dfhf3ZtC2owZi5tEyIEJxeEk0glaqGiMPw/C2li+++vK92x9g5Gm4WuZHQ1yrGVBMPI5jCKEQCioJBkYA8hGfqrpBllsfgeMGAQCoVR+ANgwxxhRCymt59epVSuNPf/yjH/zgB69fv/7iyy+/+OKL//gf/vKv/qf/cToM19dXh6vjT3/0QxEZx/GjH3wUw3A6nbyujA/k+ARbDWRe35ZSv371+y9+9/tvvv7Dero7xvDTn/3kT//kZ9MwWM3MTBRQTWqtzEjRWhGmxRgV4L333vtn/+yf/fUv/7u///u/f/bi5TQN03RdqtW1chyMhnWdKXFKk6PsWF0ZgCOFENIBEdGbOQ0RkFVy1nx7+/ybN69OJ7u9vj7E9OWrx7ev7p5dXUXm4GUMaERUkauUpdbTqfmEbXirOI4fIhuiI3F5QEeBlLBN9FpV3Y0MxJGRhtEtj9aIy02Aam+Bsl0hXOfVChBcHp1lBJ5BkjYRNgyD5PLwcLd1MzbuMPBp3fAdmpgAZNPZzcCwcy0f7FQyXErw/T+f2B8XtsjOddxf3p7EhWmX3S2U2+Wsq0bZNTfvwhmNiby2lHqJ+JMfch+bAMH1nAekRD0/uknDWitS6yXw6YJaagYZUqgEiBaH6IhZgBaGqKq11BAo+CRgFwUetFaJQAZYawUkwrhoBlEmWtYyz/PDaX08Ld++vXvz5s2yLGkajRzzMsQYzWSYRmSKzEQQUxiGIUWWkospjdG0msqYoh6ORJRSAoCHhwcK6eHu4c3dQ66ac518Sv31dQqxTCmlpFrNJDBvfT3M4DF7UTMRYzMkZC9o5BijQoNd8dhHLSUx3d7ejjH94atvTqdT5XBIqfEvInmzJaGZIMY9kWw74qKVOmD1RmnuqKuqiXclP6UxuzhU4OwQAl5oJdjprSeka4Sgu34JJsPmavZWXucBIrLuEM7+zN4LQEQGZ4OeMFAfNXSW6JdcYHZhyG4EvKnU/otEXjK9M0S+k+/293/3577z2+3DYRjOvNPPqbXyDq7G3wyaHue8Fub47PnNhx++//d///fH47GuWWVc11VEEhMTMMEY4zjEh7s3gbyzogHio6KqPn924xyd4sgx1Frnx9MqykhpGq6PV2MamqR2pVtrYEwp1VoiMw0RAATs7du3IaRqKrWueS2i8zznqqus4ziO44hotVbVyswxBd3ogYyBHPcLgawjb20rL9aRC5hqrTlnTjEx55yPx+Onn356e3uLzLAZmj2l0ASNSAe/vVhx+CMyE3bmIHTT5byP2//7CWeSJgJEk2Ib+gWAgz1usTDaCeR24S7cSB1ScpPbjnMB74jovTW1PXX/p+1P4G6wMpsRer9cC6B3c6v7PO8MZ3uHaG33vrsAytOVNLMQgsuN/cnwTuWeda91K9ndPt8kw8YRiNhLzrFnMIaf/OQnV8ebzz///Ouvv76+vv7o/Y9//ItfpHH85a/+cZ7n07o4+YkIBGBvSRQx0JEjIkJgDIwe7+9iT1VF1NAjob3Yj6G1tqrVWiWXWitY68X1ksKNcrR3YNWYfG15A9IMHEKogMiUcDAiJW6Aohyp17qbGTQlaooAqkjAFMkRF/v6ZDmDJBHRhx99BCHIfOLYS6DFthgUIkrJG3X5HXwX3+1ta+vfT0bEbRLKxgidyc4H9baRTZr5g22hcF/MjR5kA+bt98FuexBzd99t+wqqXHiw3eDnrTBKL2jX28ewH608VdVE96dt77gFdDY+3UIqZ67cdwSI8m4oxXY3/3BP/274bXB72/ps/NI5qw+M2AGs7HbkHDp048r6FEC/Z/C+atgFb7a1s+5kb32GGCICe1mET4zczndUvXVdXTOZGaASRSLnYVJVlTYuZgNxYmZTZJZW7yFnnOa97HhXKRL60ELDXFQVRMVL482txe11zvD9/o7MLA0pq7GNfzuOQ1mzanVQO6cw0YJ29vVbZKQ/y0UkBrivMiATKyM0zYmq2LpkRSkamhkLCHgDJ/Pbu7tTLrdXVxhxurrOtbDVEIcY2XPkBIYhcAyRUasRkfUJXZ4ZMPVCI2+bIgDY5ocepvHx8dEGm6ajnZbl9IAjhCE9u7n+6IP3/+xPf/rNN9+8fvNqWU4PDw9f/NOvXn/x1el02rDpmMM0TcfDdUrp4fTomXowKqW8efOm1vqoqZZstRyG9P1PP/n+J98bQ7x/e3c9TWREgGYo1YxNg5qZgs3z49VhenNa7k6nDz744Oc/f/5Pv3v97/7dv/v5L37xi5//bEjhsQJIrhYUBlFGwyq45qqKKcUQxxDp/v5+GAYAWLOK1BjRLGCY3r59O43XzDBc3/7tb37793/z65HgvRc3bFbX3MITEY1klVokP85SSmkYVohmgBwQoBal4HUFZKrSosbExAYgplIELHNAjSkltaoIBj0FzswhpBCClqemQ2dksjNdK/V2VrG6P9O5jzs21FmOmImKmQhjFajVG0bO5vIFxfbUyiae9qdt/zwT8x93AmGn3fd/7NWw2XeMfkFER9DZRLY7fFskaB8P6jfE85lte6D1ukrvUusvRUS6E5H+CSAFoP1jmJqXHCAm1YzI48jrupaygprXHaQUujLrepNwnudAEImrSoEGBi21qlYRW3J+nE9fv3r99u5BwW5fPA8hhBTFtEgtGgRMgYwwEBIIg6UAjFbzYsJDZDdVRdTR7AiNicA0DVMaplIf7x9Oat9cHY5rqWKQUrrSw1B9hi9KdDWjUmoIAQirQhuMaVgN8jIbUkojx6SqtWiW2YccEpFnHT1WHYeUpoMRCphaC3kagpcz4bkHHZ4Q5J48dkaDI/grqnJX/9ptXGaOIYVQ3GwCO9veqqqkm7glIu2GjqCP5RQz05oRIjtkYuAQAmIxrxCRc85cRIpYzpnIu0D1bHOgggEzA5oB+yBqp0xC7lhjZ2W0Mctei2+msO6O/bLsM36bhqVe1GR7m/VyDd/59XfYCtvw5ScmxaaXL28rDi4hIiFyYE4pXN8cUwrTNN4ty7Nnz9Z1Ba0hhBgIoY2DKqUiYwghEqcwTMPgCQ0I2FBGCGuteZ2X08OyLEQ0xjCOiZl9DplX0QYkNSUwUC1avdn72bNnwzhiiLlUPi33p8f7x9PjvBLRaVke5xkRY4zjlFJKBlhLaQ4AIiEbMhgbsFqbSKYb8EGPFMQhmZnrtelqQsQQwvPnzz/77LOGorGzaG3nEWHDs7kM7X+XJ2+7lOB+o5+YMXv5/OT68310v+PNDqYOrLDdHADOIzSspSqwQ1AAwFbhvCfUTQJvX3nge7ORNjI2M+6Gpl1aumbWx7zB5hACAL7jPO81yNNXbpgZT5fRdn1Wu9Mu9M6eExGxV5kC7FjjYmvObgkCQj6dkjfLAXz86Ucv33s+n9ZxHIc45cdHJ4wvvvpyreXu7u5wONTaan3NYQkNsgERhRSpoySaWc/q9t8i8Pks2xvVuaIvb6nuHDqdbWAkWwWNY1avULxlnYg8QBYjT9O0lBJCOBikcRBjRMDGaNU7IQ0VgJCaO1drJQBEz19Vt1eJsIikYaAYxOz2+bNhGGCet3r1tv7Sp7S3LMsZvAd2MHLfub8mal17t3nxPgGqley0EnpCdBQZEAVR6JPloIXUjQEDUvPpAdG86gnNnbTm3enm2GCPubh4UvMR3Wq7gDhiy99i/yECr4E9H9Qt6kbznd0C8Z5DtxPcBN/T4RPL7QlTbA7kRvnQ+X17ho0lzawXRbdMrxcGb3fbomDYE5LvyP93wqmbO6MKACGltCWX3F9yBbDW6t3OvQayDSTcCAJ3YXjruq3Wmobgpl6HQSVAcixks6otP0tux7CxeNcpsFQrUPZzNi7eHADpnZfv4wRVFdQosBudAMB9orev8bY3ZVcfa7sX8ZN9IBj1aWDWy9kNodZKvXKs1kocrBOBnecKsIIBqJExekFwq6SOkdHBncwcX4fRaikUeD4tr968/fiDZ1VlOl6fTicewNHtzQSqorcZIDAR90bVbYnMlN2T6e+IiFtwfV1XRCRAqYUJrg6Tqrz6+mtVlasrQEUtU6Sr8fq9Z9frB8/fvppvDsd1Xed5zmtZrZzuT1/Wr6+vrt8+PAIAAns0Yp4XZl6X9ZNPP7qaxuMYP/nwvQSw3D/eHAcUBTZUsy3Sb2BqoqWsc6SDajWtjPYnP/3JN9/+f0T1P/yH/3j3+u2/+Of/7Od/8oubq+P6eLq7e7NWAoA5n9ZqomsWlCXbbLWCmJhZKRUAjFBKXZaMFFXtNM+/+uK3f/13X04Bvv+jzw0GqbyoSF5XUQNSslWtiJVSVBWAwFvu0MAICMWq48MYmCAB9FYEx6NCUFQBgwoVtRZVKTEwM3NMaNpr7CMUcLS7LW2gAA5pYwBbwAZQuWEmJVX1p3Kj2V2F7p+33tGoEUTNQBChldS+k6lr+AJm4KC4TyFknoiz/bX7E96V8nt98J1irv/6WfqUUghN1RF6aikFEDjsBnJceqG1VDQUACXcbGsE4D4yuDmE3JoNzNw7pApVa0UgGkIMoRREA0dXQwOvp/Jiy5yrmQ3DeDwe7+4a5BUiApDHlVWgkIBBKVTWJXEIxCAKpiISkDyIKyKPy3J3ejwts4JNx+P17TNVVQRFqKbFrIquJa9rvj2giKg1yD6v4ay1BqQ5l2UtIgJAOWdVIKI4jjfPn2MID/en5fXbtQjGFNIwr7mITnUgIiaMEhvciEoRVe+dYWZCMBDV0zKHNKaUIg8iYlqYI7Mg6PVhOkwTiJ6KUODr2xs+XmUO1aCalLx6Ly0HJETT7zCzvpMY9p97rRDvEOro8jBDU4YN2xrR2xD9b7FzJnxHaecQrFllEMXWdgA9qrr9053AWk0Vtj4xaPh2RA7wQGqg2pC6CMwUK4cL9Lbt2INeXC7C0w+7Im+6dVPw1MEGNiNge/L9Yu71N8B5jsXlasA8t9JNT6hvQWKpLmp6ywoQgCI1J0dNQqDb2+txSqoVyQggMqdpuL25SinUsloVJpxuhhQjI6Aanuu9cX6c/THUMOfs2cVAPA0xhTiEFtVsHrJA4FBUGClyWMviHtrhcJgOVwpYpPKwrKrz168e59M4jsiU13WtJdRQ0Y4cIjFCqEpO4wDYoBjNx8+ItAn1COzTknxkrppZCCFNo2OWOMzpRx99NF1dQQhg6jYyAOxlopvWRrt/7l2sy5Tvu47iE3tx+/8WjziLPjt31GlPLfqLMDPsrt2TYrOAL3zV7vvZE34B3NXgYatDa5kEdLAAAB/BZ90lbm5Mj6G0ldxZmXAOcMB/hno3o2u/OHa5etv/ceeNb+vTTbkzj+yv2neR7T/f+ghcfbRyYARkgsBlWWrRcRzTMATvAzLLZbm6vvrgw/di5BBoHMfT6YSIDOgniBmaVYOqFhFNQbFV9HjTKRqz93uqmAEwIYWqUnPJy0qAIiKlqKrPfPRqtQ6+KLV3lBGRKpastTjuqakqR5qXHMeBI60ARzcbkJnRiFWK9bkkiHhuduhasqqgKfUUKTPHGI1QTX/4wx+GENZ5Hp4/g1o2/2R/dGjuFkfQVvDfpM27xG9mjU33m+5hPjNwkJvWAkOACFX2duxGA+wITO8wFG3QoE9y4B5Y7dbFlnCjM3m0C5uf3/iRMLQLvfqMKfhocjRtRdcCDd56p5LMzAFVMbTWx81E8ef32OsFv/sa9vfdrzb2YtH9OvfLL26yscm6rv5HCMl2sDp/ZG4hbAuCdK4eFxHfXTKzFk5mYqYYoysYOOcTaStJQkSioOpftR9IKXkRXXtYMhAv2nFx01+jN92JODiTtboA1+q1bop2kyDnFUQPCqBfVWsVlYAkgBtGrbdNA4BhgzqSS1e71irVkAwRCMO+KdmNM2YmQh9LSES5Vo+X1Fqgtzk2Y70VwVG1EtVjELD1wABVMy+H827YDRhpl91mMqS16hdff/29zz+GMXDgdDimENeSgRzcq70ymFbVVuFkZnYOOYtZ6HEaRPRJTdSQHiDGhAjz/LgsOYU0DNP1cUDEnBeRMkYiSw+Pd/M8i5Tr6QYA1pASchklhAhGuTYaGIaJmYvouq6INI6HX/zJx5999sk4pMD44uYo67wuDylEkwxGZmQA+9prJh2HcP/4FhSe3Rxf/+4rUrm9ubqfl7zW3/72t1/8/vf/9//b+P3PP//pT37ygx/84P2XNz1FhmYmWuZ5XpaT79c8zzlXAKi1PjycXr++Z6p/9w9/H6b41Zv76xH+4p//hXH68su3GkkLrExuha+gWWuWYoZmDkWEKlpVwYMXLuAUxFPBAICgPcII1LSKmJVa51wioQFaL1D07YUNb9M2nxDFzKtr0ETV3DfossBH75yxKPbVktuH2AwFMkNFVAMShTZ5qFHa3oA2MABTUwTeatDP0rnLoD/2yf7z7zxt0822a03xbCl06lVVOwMu+PcGADUX5rgXeZv7Zwb7an6PyaWUYhiKLv45YwvrGBD4TonvVG9EkX63KmZGTKplnld7ZtsI+CkNSwhlXRExprR9XmsVqMWk1kIqj8uMiExABqbgJa+KcCrr/cPDaZlDjMPxajoc4zip6mme7x8e1Gw6nFwyYIiloQNjsyoU0GwtgojSAFNRFe7uHh4eHt6+ubu6fTZdXYc4rPmrt2/fyt0J6S4XOxwOxSCrBcf1StXDNBF9WGtRBA4xGiqCKBgyAKn5HGBq4OlmkWBICQDu7u6KGHIcx6DDiESGqAC5iABWEWBXqE8HQL9LJE8sCSIiQPYROU2enzVQzjnnDEDbEK2NlvaWb8+NALR+dWQjRDQeZLWaVaXUSuf4ushWHmbN7CAiUP/eCIzU4VdEiEFVCc0ngJupD4EGEGLbXufJS23K+ImOh0vDd3/axdt91/+fWMz7O7cLe9pzu7OagMIW1G/at7ensRsoPWXRVwSRMa9FJA/D9N57L16+fP7FF1+kFEB9BDZItQoiRUwqEhokRXGkQdNadIVecVNrLSouV01xmqabq6vIZDvx5RhCFniIaCZEECNX5ZyzIUcgQyIm9nmuiAqmYmpAQ4yMlqnW+nhaitrhcBjHEbWNW/KtNEUfpCfSWkMVAQMzc4yBAjvigIDlnGvNh8Ph088/+9M/+8U0TcDsAyqa0dzhtc74PwBkrSCoGbKXxLCXe084Yp9e2J/wrjjtBPA0+QzYWmb3N98Iw2EKDN+hNzW7fB6/pPYSMu5IEv6QtRRoLHau4FBV3c2S9btAz4Rs1XTQ7GolIteRT3633/nMKbv3/S8Q/JaA6grivJ7v/sr+5u1yuzhBVfwOMTWUyOlqghBknl0g1JynaVSwTz755OOPP/72228Ph3FZFkT0/lQiRGWHNlEfNGHexggBXNcgIgIF0wqNR0BVS5HT6ZRP2cxyzmVZwSxGjsSm6vl2B1RPZp455xBA0rIsy7KUUkSLSBXR02mhkoGwqFSROIaQUlJRM0IxMyMjCsZ9aBuRZ4AVDEwA2FCKAiKGmEQkV7m5vf30008hRp2zp9rMo0du8ANslL+RLO22zwt/99TfFhy3PKCBY/wQXniDLoyd+4gAu3lDHcwJmpHRLtl1t6kqGOAfL4F+wmWqysSICNrAaURkGxvjVVrg0L7EAA0T/skbcQM+3X2uO7Dsek6wb4E/Z6XtJnuZsKdtuBQI+39CD/ZtU1j9KqmNibaeVbcPzxHz7qh3Q7TpR5/s4Lfa/HlVDSLmPaz+tTtpfjDFJtAJY4zekEtEG5AUIhNZCMGDK208gOP2gqcsTZXNfGjEOS5Fu1xfy5tg8/a2pdkO2/Jv2kvgrGHcqqpbjYzEzIFaLBC8OeQsxM71M7sV91VrX8kOJdbMRAV9aoyZmOITUcVk1s03dPxfH/KDwKQVfDeoIzQSUV5WAFACZYpIyICBgxKlISt8/frhy29fpQ/fz/PMGJB55BGRDFFVi9QtoKgdIwFQuc/QFZHIwUwcOwjJTNTxBCKHnPOyPDLz1XGSaqeHtwAwTVNZTyIyppimmPi6TImI1gdARDwc5MUzM1PDWiVXfXycx7HENJYi+e5BRI7H48cff/xf/cWfH49HRmCUIcW88uO91DzHQN30AGBiRkBTELQyJrp/LIxkNUfUMdKPvvfpP/z6n6Y03Nzcruu6nObf/Oa3f/c3f6eqnIZhjDc3N8+fPz9Ok4FXeBaXqsuSa61imtd6Op3mOb94fng8LT/57NPPf/B9DHx9+/x3X34DZXGJhIFRtaquIqecs9QkZAaIBuBNgKoIDIQUFKGPsiJAsBaJNkRQb9hANDVRq7WGGHxeRV4TU+uWQgNARpA98ZgZwBbdBIBW34tkAWmu2TdUuUlGzw3mvPRU/Q728yycn6bscGdD+C/20VMX+hgvRZ5dSrr/4ufn3+rSjZCszc84f6WqIQ1ossm1EIIxA0ApJUZ5YgP1AlrEjgmwpXpGoBBCLVs15/nx9kbGlhcyI6bN7gkh4LrW5fT4OC/Pbq7RdFny0OBGkIiIg0uh2iA4xGSVUsYUTvNqZmNMKTIoVBVQq1bXXHItijBcHW5unoU0qIKazUue19O8LuEuINowDNM0PZryUKMIVwtgazVVqTDHGJei85LdsH7z+u7u7s7MPrj9TFWvgF4aQYiPD6e3j4+nnF+qilkVb8rFVBKHbGYBARFF1QwpFC6FQ1CkmJIooHi4lFxc5VwM6j2Rlvo4r+Nhunr2XFOsYMZBkBRZpKlbRqwqrpDgjxhk2+fbt+bxYKJdGT9gb8ZWbXoIkdv/rRWWu/rYMCF6PmYXQzXgHjT1UPrWMehbT3DGyrcWPW2wBLqLEvq+GyqRhxPZDAgZgVWxo1NcYGxAj7A2ct0hDcC5a/eCszZEBNhZUVsDjOqFmwfvWAZPFnn7p4GaGgB40+m2/ltUZcOz2a6yVoWLHDDnjDi+fO/Fj3/8w6+//tJEhjioKoEPijDQ6ulGv4KZmAM69ryo94aLiFWpKgAQYzwej7e3tzWveV1FxJfOg3fCgUayNs5ba1WgoGDzmhWMQ3rM+ZtXr+4eHpF4Oh4oBguKRIBcbF7WdX2UqraKDMNARI7fH6jFzd0FarijS8uxUBgYQwhh6TiBx+vjD37wgx/96EfXH35Y8sym3rjKXWhgR3HYdz1Rs0ifenT7PzaKOotofNoysNHMk/3dO0Lbseco2Ela0BZ3pA4241e2irgeqN8/z554tkfd6Hk3b/NcUKq9WQsRzURMrYOXKgJsw+daTNBJ6yLX8eTVvlOPbILiiX0IPWa0dxr367z/8AlrnG/bRtvx7hwzg5Kbno3T5EiAwzhKrTFGTrHkfHt784s/+9Mvvvjim2++SSl4x4+qmiICIDWoXxHpm9DQ2dXE+3qIgpFVFckVQNfTfH//WOYVEUFURGIIiVMK0cxub258Qr2boCEECoiItnIpZV3XUkq1qirzuszrac5zLXmeKSRWHBGhEJjZSpWIuLMD9EoE8WYJA3DZheg75YkNRPzo00/SOAJRjFFzbvkhbCmJ/apuMnzz1Z+Il3c3q52GAO46ck9HGxgCqimo+3Uo1hCS/DcdrhoREGAblk7UMES0OQJPyAx2GWPYqSpyoCY9i0fbIuzYeX8XcQMHs229sgYABKQAhKDShb/7yNYcYpGLlp+NOHc9fufkp6pGDvsWvnM8fdeJs3fYWrD9nCE4j4vf2tT377vx9cbx/s9dz6dJH+TOzEFEqroPpY6ooaprrmddhdA7Q7ErdPEVl53D40+DZKqW8xIgAqhjErSlIQQ5NxC7sPbn8HbVxEwYNuqBS7GlqmTnXBNuy1dlW31E1A5DehhHIvLZL9Ali/+u5wocC4oERESQQkPvZAAoOYsIkZmxYYv/xBixyykiErC4YxWzNmUlpSAiuZTtsa175GgbcPbZyw1jUoSHBb5+c/fee+/dv3n78upoZrfXR0RQ1bWWmouYhhBiHBQcSVbbkhKiApIBE4oCANKeIWFeTkMaQ6jzfAJx4AfwofbXx4NPKgXQNI04DWaWbq8dchOZpNpSaq3VgAAVH63U/Pbt3Zs3b4bp+Nlnn/7Lf/kvn0dghiHFWu3+7hWajEMoS0H0zg4AACIERgMQUMyrAk4xoNlX3371/Ob6cHX86KOPUuS/+p//7us//P76+vb9l++lEN7GeyKqKKWUh7u79XTyRGtkTClZQw4MwzQCAN3Eq6vvH4/HUsrVzdFMBIQCVanXKf0uLwqmmpCRQ4A2whxLNaoCHf5VgQy51ZMwoarPtrGdXhZANlRDs47jC+ZF7yplXdechxTZRQxRICLoMRgMjjaD0AubyXxHDE2IiFgjRv+hvcWgHbFjU+1ERA6EXquI+IQMAEBkROmztRvs03bspcz+k3fPgXeOJ2L3XSmMPWItXUhvOthfx3r+Hx2WYGeaU8/8N0Zu4s8awrCoiCi7U8cErRvB/T0zM0XCsxil0Apri1SCYWtTpMAUAqzLupZ5Xt978Vy1zo+PPA4b4rOYNQBZtErotWhmkiCtJatVbOUrDj5kyzKrKoQYAdM0jodDHIZatEhN45BUaq1znpdlAcRxHO3mCkOhWI1qCLCuNecclnpzc/Nwyq+/eT3Ps4jc398D0MuXL9eSzTCk4eX778U0fP311w8PpyJ6d5oFqZgyMzGmWr1kNAK5D1BNgTCIDGmiAFVWCnUYhhDTMIzM9fHxUVXvH++991sFwjSEFCWmUqqAVVBDBkLqZRQMKLvWuD0h7TMke3PNQ26qCqrWJq4pcCMzh91LaTRDz29vsVUzH47c83zgqDYbJ6oKCABoJgvMTBBoG1LvNIzoA4DqLq9OxGBV1UvnmhLhgNoqWYi8iA7b4D3mc5/8k8NfkJmdPTdRvymjs+4D8EyaNIjXBvRXax2GpGdYDtgUNhHvjBnbaHt/c0T0xKeZneZ5ExHbM2yR6d2DuQQgABnHqafLxu//4Hv/4X/6y7vXb+4f36YQx3FkZgRFiGBSRBzeLIbIDnCqNYsUldUjcXlFxBiGMIRxHIdpKqWUWgGpDho5ZFFei7ClIERUpM7zXEQ4RkBacskqHO3Vm7vf/+HL07xg4OEwAbLQahUFqmPFZDVZ1qXKlTEzJ2N3ipmJDAEAA5NKKcXDcxwDFyKiLNXLU589f/79H3z+Z3/2Z8fbW1gW5OawERGwF43u1tCgITTvtuKJfdKWd4tV78IEAG06xX4LNhNiv7t4zhu8k2HeDQ1rx27e9FYQhoieP7HajOb9421pjS0Wb71gyjYAxnacAfzMTNqcQHIhXIowsxFttqCqeRuuOyD0XQ7hk/d9ojJgB3W2PQZeZjm2V38Sytl/vme37f+OC+pAG3j2T+CMy1/zsiwAAClAzcgMoHGIj6f5008//enPfvKrX/1qGAaioGYq4gPfA7MRG3ERDdiqYdhL3hSqSgrRa0x9kWvVh/vT3d0dVRhjijEOcbw6HG6vb2JkFbm9uh6GYRxHHz7R9JcKoNgw1HGsKsAIoI/z6fXDmzd3uhSLhIzIiEwQEAygqjACYfSeFQWvO0EtPRjHEZmZ2vSpajqEEMfhe9/7njf3hpTEtAXQt52C3pIXIqh6Vs2xHH2bQghgYG3cEDQTHaCnUlsZkfi35HVNTa6bhzDMBCxY20oGQMcpafMDm0fU6McLaQBQz3WhtINacGb3ZKSIbDaYaVUTNQUzMvDGWwIgr2wXkU6KzanuIlRV0dqLnHnWunA4x74bL++JHM/hwrND2Iy64CDBTufcMT+Vg6tNMTAiRjJTFS2bSD9TOJF7m1tx4vaE9F2ZSb9wXdfOEWcuI6LgaGwxkpmt6zpNE3bQz1LKaZmHGLZmzcBYSmnd5Ofh3AgAIbQJFntFyMxGAdhLMMkCmpgL6xBCIiLmIcRaWxKMo3Fozc3MDC0FbyJK5CUotJU+M/OaFawGZlOFbkMT0TiOXu/ElxFTRExpwDbekCIToBLR4XAwUGDzqKffxDMzqacXMDSbEgqIiFgLEmPgAchdYqZQRavIFpe1DixeJAfH/FGKjCJWRWKMuYICYrJf/eYPP/3JT97/8PM638UhuWxFpEA8xCAGyMEQKIYqBQWHtgsGhIFCrXWMARFFCiON46gqy7IQ4ryczGwaBvfsxyEQUWqQVug1SMSNc8hsGpMh1KIAdQAEoFyFAQyklGUceBji7fX00ccfLOtpqXo8HtecpeRhDHnOS17HMda8cuRhTFplkRUXj0upSfXIkazr1WEysjTEA/K//lf/4qP3P/h///v//ttv3x7TCEQDOwolXU9XiEhoIQRGY8ZpmrZmV2BCYGrtdTFHH7ghhAQGIYbfvH2dOCwCVWUpOmedc52XJVcxIGI2s9qQCFrwPEuF2iLZhuCS0RkdpAq0RHwRYzNGVtWcM6FJLqA2pUFqXszAJIRUq661hNlS5DQNxC2Li8CoQgSllHVZMcRlWcI4/f84+9cvSZIrPwy8DzNzj4jMrCp0o4EBBsTMSCORQy2lw/9+9+xn7kMftHukQ4nEgEPMAxhgGt1VlZkR7mZ2790P18zCIrJ6SK0fnEZUpIc/zO778bujTTfn3Lg0hFLaBplZjFFyzTm7hzy6prv6R/U2B7SWXzHCDtYkt8MbYAp1D86FyVIZjDMr72FYwI2t2QzYWiuHKNYQQUopiAcRMRGMIcaYUsItI2KI0UVHzvl0OL68vDCzAuaaowvBqYzcj71WFwiRk6rFuFT3NrUCEDN7PCilFJdUa/XCsn3f63EFgG3bjOjh6XHb91zluKbLGbNUk5pzXtclxRRjJLRac62V0JjZexDR1JRFoagNuFdgqmpFhUI8HR8du4JjAEIKAQCqihoakJhtpf72j99tRhAPyotq9qme0ezzP3677/v5kr/77tOnT9+nlL755huNASjEEFQ1bzun+P6rryl+/vz586eXZyPEwCR1L1lV1nU9Ho9BgWNAxCICAFFNFIiDqh6Px8DRq/rzvpdS1nVlzYgYY+SQ0nLYaq0AWe287XRa9pyXh3c558PhUKuEyFq+nFJ2lLyhCLG798xca0YERkZ0fC+MMZiZ1Guko1ZVFebrWOq4HhCiWgM3Pm87EhKxgldzkNfe55wXZlNNMYbQ0juuHQfgkG8WwBJCAEAz7IUwmFKySq+vr4yh1h0wYxAi8ijQMJStpweHap9N9vEnP2gKnNut3zh09sgDeHuwF3TJBK83utmvXsCtHTxz4nAyB0viyLQ4EBARtU6HAuCj9TjnPcYIYIz8s5/99K/+6l/+3/+v/7cFHy3Gbduq5MOyqgigPp0ezZrXWtQYjTnGgFLtXM7P51ep9vD0eDgekcJ52+n55fx6AQMCzFUCKiJuuViEy74xsxgUs13MNBtQNeVl/c3f/t1vf/ePey3Luh7CEtdDjNEqY87FdEE0LrJt520XvezV1rQcjlV1DUGXyDHGGOOSgpuDRWrsI/WqKSI+Pj5+/eMf/+IXv/jq6w+Hw8Htv7Aso9ZAJ2CSIYRNrUoZBpN2Wwjmejkiu5WKLhCb2do3blDL2NBZtPZzptMAxoTuRhgGpmqT0B504t+QNdtIR71uN6PtNhro/xyVxuMc6gaPdKzBmQVc4Tq/d6obOWcTkR7Lu/Zuzes2KHbICg+Oc39mb553pEOiVgg9ng0AROqg7VkrvS2Bm7VYM9SnkgHslQ4551YXtm0cAhgAKnBclhjCwy9/+cs/+/Nffv/dRzMTg10MCJYQRex8ueRSDmtqjobaJtXMas1aq4ipgIgaUi7l9fVy2S5mCCLhEJ4eHh9Pp9PheFwPKaVIbFoP62Fd1/bkombCZmtMAFAYS4EsuUgFK4dIcjy8T49xjSFFH7ECJkzk807V69pMzRpaqSIAk/fCiohZM/IxLp+fX//H//Yv3334EQKDmIjwYTGpplZ9YlPHCQfAum2DeGa69ZF3s5HQSJTbXnj2ioEQEUSVFNt8LhjFRAEQ5yTkcFRiBG2hWL+dlIKtspT5FpfL7xtj60OhEBixllJrRZEY2B05qyK9nQG9t7ZfJExlyY0RvD9wkvmqimp1avSF7jTSlG8c63AT8iBrFGNW4VpxU/pLDQZsdmYprl69DGQwOyKm2PJq7pdp71wbDNIc486Ao1lj5KJFy+y4hWVZLpcLIqpcqzfdtm4uEAXT4q9UDJfVUZUYsZcTwNUI8G+YOQRCxBBCyWZm0t1HIPP0iGoVQWobLCbq0xSwFz/M3O6rg1436lr5ChlwDYJqTw+6T8jMAARGKE04ppS2bSvbrirEgKbUMYXUFNTAqoEnCVREKlbtwJE+H8CLFck7sTowvaoB2dyyYmbMEdt4HNLax1MCEyEzUei9i4AV0cSg6D/+8fOf/fTrdHyUuqsAQhUpNWcxMCQ1VaHYWwcVAEA7lH8L/XqRMVqTmz751MxAvLsZoXWnQ62ZRgM9mhNKIHbQUu/PNAMzrTXv+4YkIjmXXQ1ChBAp5/Pnz3B6SPqS3ZsC1FL2mst5PzMAXoKAkan1QKOZpVIFLOdaShEA5jUFprhWgJ//9Ju//PM/g/prkB0lPj6c1nWNR1pTMrOad0I0UzIIkc7nMzCb6xUOBJEwUnA2MDMg9fmoplVKKVuVXTgX3MWySNGmtITaxLvRiq1AYOCDgF0FOx12rUgEaKRt6nsTD2YEhIYtIphNhQBjjC97FRHqF0CmEBiARIrXmSQOIVCIjHwd4Dn0351uG2p1tjFGgIeZ0YtVkIu6cdMLjQzuAKl+6JgthiHi3/5qtm+++P34SWcHBsHBpKrezmYAV0t3vo6ID33qyB/dwmiDRn3YzoS4peAE1tqhHfKbmaX3/YK3QxCioiIrwF5yShE7zjUAlFIICgAEdtBLUy8EUS1SAzEQFqmwt2lMwASEQBpSPKyn9Xhw9g8hvLy81FqRaVkW5KhAVosBfnrdin28ZDseP2MHD4wxhsDn83k/Xy7bbhSX4+N6ekqHQ4hu1gMyIROHEFOK60Ix7LWUTx9TSjEycShVPr88Rwy0t6ELxLyYZVVEFJGiUlSoT631/6782AzcEOOSDDGrFTMhULSihl6oXKsJAPIYDH1HFT9EV65BqEOOD5K4E+8wl3iYOpG4tnAY9xB8akx13maYEzve4WLQUMRBwThGMhJt89+doraSCU9mVdVjxCYirWvOs+rDxAEzA1OBN3Dq48m7jmr+59z0243da07Pwwd3VpRN3S8wNczPhvu8XNR7RebA67Cz+5XHr9pnUwBsEFzT82vPKgEink6HP/3Tn/3VX/3Vb371D446gx2hbmm1PF3AARqCKlz27fX8er7sxDEd1vV4gsC5SrlciqiZoagSUFEiUSRAQ7Xn8x5CMMKqJoC56F4u55z//rf/x8fnz5ctp8MaVqyAKAKInGJAOBBxEKCihsXIRD5+flnTlutRjnY6riFQBFDVfa9FqoJBLyIQMAaIMXIIx+Px8fHx8emJ1hXQExX3uW7sKJ3XfXHCMDBRb0tp519dv1aSYWZXmlYDU+ilcfOgLDDzVLDXusJw47txDL1ibuYjT1fO9OO3aTeza1tOk/jYyomh4Y9eSW6+8t27OxoF9IB+VYfPBvX/YRvKSL28lppuw9mZpSkPeccXcIuF6DVv1gu4xmmzMT2uSdTACMcbaYdjdVvZ6/q8UcrvINPYM0ABax7F3ZgKtx9BDWPMWwlmITIz/8mf/OQv/uIv/t//9D9br0iquexVPGIkBufLvpgmSCF0a5iCEuxFVFWLlNJDh0DI9O7x4XQ6PR4fDsuyxBSJIxICrGlhQCu1+f3UUotweWVmZCAVK7nUjcFOh0Na2JiATBFUq5p3LAIdPJdgVlGhIqIYKlDJmaiVtnZbBhDxfLn86OuvvvmTnx6PR0wRYgTTkjO54zQWH1rmwDv5bcjIbpOMkvVBSW0qSaerQKRtfwkIHanFNaypWYclTxQYycBo1GGqQRUHr72W/Pa0PPYq7hGdmeUqduYaMhkMGFA7fRJcAW/sbdIPgFuPrs0XB4DEwYOKMOk+Zp4RiW/l7fUWNhr0pgJ1nNATdCrVxp4DdJXt4DRX5lIppcyKBm/biUclpjaQwvZ43EFlZntPVYObTUwRvqTgfcazUbcwWpzgmrUcdT7TflzxgkU8nggEwUygdSECEXjtgWgxs1prKTtWBNCQkqpCB8kBAAcpbRZYD4sOEFEtVVWtinGXh0xIVFWCSItaqc1P2PcVCN096qa+2YiFmJk/p0oF9LrZXs4EhmbMgTCgg1JCK69l5mpsSIZmCIEj0QhcoSkCASJ7AQMQKyDFCJzUSgX7+999+82Hr57eP22fv1UFMJFSTdQMjNHcB2neCvYtNEQDYkISMEYlIhMdATwwA9HadqS9LABU02hIPn1LGwiHGQDUWjVnj0VRUbls55fX8+t2yWVXrcQxxqAo234WLXhWM00pLcviCatS98vlsqZlF7nkHELAnr9l5lhyjBEockIRraL1fAbOFNPpsH7z1fvf/sPy+rLhQY+HeDwe1iOfHo5oUPMWiEMgVAPUTwxEpABmKKZmxRShAqVErg29NFesFClZdtGL6KVCrh0nyZm2DWZAM9A+un1wl3mvoJo7/3Crp133dtmhiD4juKoENAsExgFYPYlp6FPRSwAjgpxzjNHamPpuYtqU7+9h/jsL8hp7QwQw5oBOF0Sj/7/Zo6Pazm6eGb50jLvcGaPj/Ldibnz+oWPc2h/JcARQPPIEdCtP735LLdjdTAciisTQGjJhOJbzIxEFMbXe7MEU1QtmwKpKrer2rBpU1cuWHx5OcT1IuZDCui4Du9zLvwUBRKr6bAk5rgtVynUPVJZlWWJCRGYDgBiWh4cHRyc30BACBg4hLLBUBaBSVcVMwTid9grffX59vmS3fhgphEAMl5fXnDdiXA7H49PjejyGtHCoiKhA6H3SUUOK67oa4fl83nIWMMWIrcJKEi8uJBUhhLBUoR0BIKWEF0TEQ1pSSlKrV4s9HFcf94AxGYdMIGDAJIqoikyALXhvAtha8W+corv/ztQLAC7CzbDX/3rMTlMMflqtXhEH6Dg3zU4wVQVsGDbu3ssE2kZEPinXu4OaI9cFeyPj3itCbXCCx1yDtK7GBvJORiEErTeU7JwGhleYtH7AbWZmPA90o2Reh7FWo/DEr3+niYeDN1/h7UX8GBbDCBVJr0mZmentRQCb/Kc2n6pHtbEuy/KLX/yi5vz9H75/fX0tAhSwiBCgxlhqk5ZK4BMFay7Pz8+fP39WsOPxeHx4oBjyXl8ul1IKA6eUCOwQk/c2iJEa5goRAbkgchHZS3m+nD89vzy/vn78/EmBltPD+/dPp8eHVn2DaCaAFOJC0SgtGCIvRUTUPqraed9UVa2aCWiNMcbUQtL+7tLfXc0k5/P5/Ho5r6/LQTUsC/QqpLciCImsF2oSESC26jUicM9tInJf7qtf16Tudeg83ArMIaUdOBE6pij2EN7YuDuROH8ehGFm0Ftd25N0PK07+hmXnS817jJISzrKJTN74/f0AM2QGLmagYN/95zjfZvB1nfk7km0t0VZ97iwx9RU6xzrHIXW/vyz5B9qcbzvdeOG1Ydt4ETr9WojawFGOggAwEik1B1QEx9qze/fv/9X/+pf/epXv/qnP3yLrdkYailERBhEBK8Zy4CefELpas7cGaOYkjFHAbEHiEtMTARqWqogKSATKAKqVWlmGxlpqTVn2rcsUlWqagVRxrjGuKZzMQERADMhROsY4qUaoI5uEew1gUVqpMjk3ULY8NqQq+Zf/OIXP//5z8OytMSCN010fwSgQcKYWevbnvZxFkfz+o8TVMWjJdD7OMFD8C7ozLiXWjsU3N2VYdRB9F2+CxMkH7A7ttjRQb3v1MxM3W1EbwcGtGmcSSsIn+j2tkQcAED0GhDUURoAQOF2Pp5PRfZ0Qj95MktuUoszV9ptKHD8dWZw7sgm1gMxY2usY6BM17lxjEcLrrQp8T04Pqab4tQyDRBERjSFKDBQ06My2Vu+jX3VrkkMB+lXVW9O67sIqlUVzcSMq7osBQCPuAsBI5pXuBCBKjKagZheIQGbO9iAXoiZY4xaqv9l+NOllAG47GMIhwHtfVXUstUtwyAiMcaGN9iipN2yNMVWUtzUSdtIac9j1kcpOWx9h58DAEMyvY3UGpmiopqC1xqCmJKKIJEKswoIGoBQSIJBUQ35H//pu9//8fsPjw/io5OIOEYEMNECpA3qSchBhL2IEyz0sOC0/VdLiIisjfoYIrIbTISKhGZi6nUo/gq11rzXLFVEcpXnl+fPL6/7XmJKT08HCvzyfK61fvr0kZk/5fO+7yklL+NcT8dlSbnaLjtv+/MlrzGBmReyppRWoJgwJaqGe5FcqyEhUVjWEKtISRz2SABaaz6fXzgkrTGFAITHJR0PCyKC6RKCC69c65ZzVREw02rCwBAIEGmXUlRL1WpQAStgBauAasZgAF7U3huTbpNjOIKrLQ3XvQ4KQ3x4CpEBEC14ORo2xFImUKTikJdAbqKXUnJGQkthaHf13ihsMJn9yrOQelNc1BiYyEx88Nr4laqKw+21J8Qh88wMgMzuMx6Nfbqg+WeskPm4O+3+JwMM5NZJmH/l5Or1jbOVgNfjKkxdG7VJaF5TMdIy3l6o48lJVbwcEUMTVl44UKQSAYKKCpp+fn159/5hSYmgEkXRqi9DWCNzBAAzcFTYUkqJkaiaGYS4Mntdd61bTCszU+AhzUVkCVETKKCUAkTMHOICAB7Sy1s9X4q/XfCXirzvGxGt6/r4cDoejyExkDlSlJkQEQYCJYrEKYYQDCDEaCC5llqrqhCRVDLwQVlARHvOFAMifgihSM05byWvpThIr5nC6ciMyBE4bGa7qIUUl9X2jMRMHGNc15U9yKUN5QUmPTdI9C2pvCEn6mHat45T23KaysnADR3HGhAd1mEIIXBEI9VmjlqPjvnFzKxWQZ2M5qnMDLTFXK9alklBsJmMzWwBROtjjWaC92+8bWMIfCLyTorRoH9H6vam1HPmjhEnHtT+dhm7/DEEQEKiFo0a6nuUA93yKfYiIkAkA3V0YkeLGI+HZB8+fIA///Nf/Nmvf/WrX50/fkY05ogGodazGQOaRWEDqTnn/bK9vH4+n8+HxydeDpSWXPV1z6973fdca2XAFOLpcBBDg1DVNiyIyKbMrEC5lpfXy6eX548vr+ftktZlXdfju4fD01NIySF+Tc2RvQ2JgYHMIHAo1SyEuO/bdnnd94tpBiuBnkJgb9tm5qpa21iX1OrpzRoop784E6SINX9BfHVitdGMemvY3Rlh3dnoY9N+wPWad/96o25NodvKb3oO26E2Kutuvveq1El4dke03ct6fHBcc/Av9HzyeM47RfOWhsdnug4Qvzp7d+fP38x2sPamLAAQ0YYf3nsZzKyU4kOG75hunDYrC/+v194PThlvNIugeek8oOO/mL8vRVKKACiSL5dLSuvPfvbTf/2v//W/+/j/yJdcqyIyMnhDGQAgmIKpoaojxKqKVRXseeUQAhHE0BTxcq6RWErdcym4lRA3RkZaQuSAPpmaiMxkL3nf90OpIlJELND6sB6PBwycS9n3vYKAdw1wCMyOgqtWEciADEC0gXqIGlGgVkDF2sZiMREdj8dv/uSnh6cnMIVam+fGPEUoOta3mgFQx3y8boEBA/pARZdlnYScIczMR726HTVodYBHXpU9AWi5mZs3CAbpKszhi8dEnPN/3Ym4cqLzLPTnG/zSgteDgFtoRqa5i35B7gAQ3W90la/6JfjQt//EHstwoV2ljtecpYp173eIHf/njCbtQXZEdPOmX+eGhWddQ0Q+kcjv0gVRc3n8m2Bm7mV6oQ4AAF2NMO3outPv7z/P/Ea92P16V6sKsbleDWFfAPq8B0AKmFKIJZpZiK1F0CbwiRhjiBRCKFVErhEjf4zD4eA5U38G7X3SKaWm9QERsXq0TBVtLIchKECbisaBzMRuX7AaEFHtznRcEjMj2l4EiImlNgiD1p9digihNgfAilQwx772OpPGL6pajU3FCmXly5bNLB7Sx5dPv/mHf/zmw7tHZFWhwAsHRKx7bvehQCRdkA3141EEF9Bze7pjqbtRdQPr2lxlZaYGiAJgqjBGZmSzLZd9y+d9O5+382U/Ho+Pj+/SujBFjs/Pz88+v55Unp+fG9swffjw4euvv+4phcq0Jw6gJlJSCIfDgTEwF8QXMRURI1jXdTms+/lS7fJ62SkG719FxBB0WaGUAlpNVGJRjYTgjr2/OcfAKeRSs1Qz23WPsA6veMtaRI1YgURRwAFDzDw3BybgU9TFOg5nEwZDMoxP7cNV+zIgIjBRRFhiJBCHNI7MjCC4aXE0bwigSkPdWo94emEiEhEjIRIH0j6h3t4k9G2KkHXldzUvfOOuMgWv6b5JvMIPHfYleTo+3ElhmyJYd38dwn1UN0EXgqbe1XtFZfTIYpvvNLmCAWl3UJkqhiCEpZTSQ05MkeDaDH33/H53VfVYiUslcCB7T5kCA9Wc6/PLy+tli3y0hhhHRcWkmBkgO2ayEAAoMXigUAwYKS7LejymZVXVnDcvx88516qBWA0cptz7YliNzUJckETAMGdEVfSko5hZ5BBT3XcDk4eH47vHh9NpSQs5mnU1BVBDA4YAJELMzJE40IopRCqlVFVFUwQ1zfsFAKypT+XKSVNKaffWQS77vud1BwARr2VaOUaOoRjkUrYqFgIShrgIIlMAgMBMBoysBHyNDN7kAd6aX+M0YmYkUkEw7yANwetEaIQRCSORj5dH6vBLNiUlPGYHt6pORKCPm6qNxNSBo2ut3KPZI1THzLWKASIyUfAcY4+VNlzgJj8drAWIQrq1MAAAzSClpKqel+1WUEt3zNzaCfDa7zQ0ZteSV1TtWbEOFT5+1Yz7XgsN4CM0uk8yLfgwuBHRlKwhurOHX6EhuCL0vE0p1XOMT09P/+Z//B9ez8+/Oj/vNUdVolCkIiLHBZDVcM/l9fW8vZ5LKQaEMUGIFbiYCgYMEaqUvW617EWLaMly2fMSU+KAACiVmI1wz/Xz68vreasqECKlhZeV0qLExVRNjZAR4nIQEVGoPspeW18gp5hMpWaPhtRazJQJvDyvqvqHEEIuJcSYcz6cjo+Pj+/fv394eFAEqxXVB//dSLBZkuAtMAwiQtvu666NnwEAduyWmUe+eNCUG7xerUn1ezFNgFVncGkAAGxgjNeiOGhJA1VV5pv5YzOHfvF5cEq+jVlzIgLosB3qdfiuNe/GHr41f+cvR9hovp0OmMduvo7WKTelmFn1mpDwBLt9ycEeJ8w9zDCZ1PML4hRbubuI/wQI4/FQtk1VQyTVuq7rX/3VX/31r37997/5+33fj8cHZt5ydelEymYoIpWg9YaZQ2eCt/ErtByVJ3uWSJEYEVQERPO2ZxOrsqTgcn1U2Hnnc0E9HA7LmozJUWe2Wj6/vgh6up4CBeCg7NExU/ChaewxN+0h0xgTEyOQR4jMgJkA6Onp9Pj4CExWBAy0VgMLIeY+h9BLP7mRJiBf5/tN1T66Ho9jO+3W/2mizHrMvW29V770LejTsGZHFHpQ5kolb60OuY58GBQ+RyjMruJOVUMDW28RsqtDqFM+f/IMHagYelIL1X9kpnVcU6v0u98MYxwnzEbU/C5EhFOKb35T/zzqqGePbDiKzA6FrdoztDB5gK5eO+DLEGvqoAwppaYp+qr6xQNTRMSq4nAvIuo620tDx+vNrNUeugNK+bblnMezAgBe07GKPcIAAJ42bNWXJoAWooPLYOuanuzgeZlGO/5MKNh7AEb8bJTgj+zzrFPHoao9Bt2IL8ZogiNc125qYPHarD+JrWuNaOtfcmzuPkRIAMhMRclrsYGJAoD253P9TYrkM6qlmgIiL9/+8ePzy+X9jx60nh3/mcGhU1HBkGhMvDAzQABTASSrAJG5jXtkuulGU9VmSwGAGVRT1RgWo1qBxsL1pAEKWCl62etl23NRNUxpfXj8cHp4YOa4LsvxeDgcL5fL58+f91KNoxpWM6j48Xkr+j0zHw4HrYImjK19NjGvmyowIroTIFLNJKTXlNL5fCYiEUMmNdvzBoQhLfu+n8/nyEiAKVDmTAhkkFJQBECKTBgYiTVDKQVQ0KphA3kXkaomBmpUAcSw9mUhMwLLKABgYDf6qhM5WZu/CTeOoVuFiGRMHJgTU4xEio5QjKaBCUBNK/NiZohX6SAiBVrNvfPPnRDxqAr07ginOu1Ihi1v34PLRYuqmrklZEQU+uCHhojsPmG/uP6AU3jH3T90jL/Op90pgPHluO84396YDv6B3twTEVtjan+Fzv7Dym+CnomZWfpyDTdj/NOZtLkgITKaVUUqey2vr68pYAxA0CoSZ9vaxY7UqlYDcZFqZpgSELnMFBGKIa4LA27bpqqcCAFFdV1XjhgXTaVetgx4Oe9Zaw1IyIEMBKmU0ubSGtRSUoA1xcMhrksMrivAZQmAQ/UCc6gcOWrw0I8CCGhMIS2x1rrvl4u3Kw/9ChYgtoHtISBizvu+74h4uVxeX18fH9KRCFVzlT0XAfCQMsXkM/g8vqZVUggBCfVm34cOG0bkTADQumsBbrnLzGp1ee5o1UboPUpmaHdnqqrqtf8NXMVqVQUplZLH2UxNW8TtlhSdbIb6qEUpNLvNHXLwwgpAL57pGqFFT0Zo5o6G3eefia1JFbq6ysMUHtp6rNiVX8i6heLX7/wF0Dpchk+Caqa12nU2NDSF3p/qZl9ah1q7YdM4rZC2mxoeVhsmBSL+t//dn39++ZRz/t3vfrdvNZp37oOZVVMrcr7sz59ft20jhhhTUdtKNapVzZgpJaxmXInY1M573S6fPr+GQ1pSCEwUCYmCIuw5v5zPey0hLsu6PLx/vyzLsi7EjAzMAQ2QWhVAqVrUtr1spbiXUktR05AiQQIVRKyStw15Wd/aKn4sy3I8HmOM1dTUEBFELLZqoDu6NTNmHnaDTqH6O3ENXR5Bd8lgbHH31uZ9mY1au3Ms30TlXDjMQ2Xbfbscb8nxKQFiZibq5tb8UmSAo3P06pLdxCP8w5yRAL4ykZn1yo8rrAVcAxlfyHLMRQTj/PkBhmUMkx08+OVOGpuZwy/dLRFMfYnzT+7ea1zfeg8hkuFYwfZGDFKcO9Z1LVlKKe/fv//v//IvXz+/nvcMDU9RhlBTn8BpYhZjAiCk4Fi87kOrqjXn1uiUjVNaUooxESCYaKm17FbFDCpWaz45pcMCsDzGdDidKPDLdnm97BfRgpbVFAGJOUQICX34OKp5mYMLE8WKagZiqmCRXUxhK8ZHUwEBS4eVmUEVmSEQdCCTsQU4iu8AHVhh3sd5+2BiIhgRCoNx/rUDFoCQAIGb0LdejwWtKmOQeqvpUK0tc4jX6/f/63s3HmnmvjmlbGZVS+cg7w6ETkXUOn5tXBrAjLj3MFgTBNe3a9t/pfYRcbg5YcTmbh9ypt7ZdRrcNygW+pSj8XauuVTVC/i193C6KTiT/axurrTRL+XSRXv5pIiE4S9NPH/rRE26fCgPVz9DC7bv7ToHYzxT41EGMxxR2HEpEaEgiMiAVUutxq0B97qUpRRRCCFAx1FtP/QmkL3kWkop2DFpEL3rvR+3niQiwhTkHj4hdsEwv+ys2sfKMHMI1qal94wcITFHZq61Wp/Pg9waOnsdETowjAXzfD0ihhBDXERLqbCuh/P5ebtk5ggVVFWKutWo5tgJbd2qCTfU3h4grzWlEJndjB4F92NzB5k6aAOy9il75kVEOZdt28BCVdn3/XLZRJQ5nB4ORHw6nR4eHhXBQw4InOKKFF5ezg+P5GDyyFRrveRsWYoRqKGaaQGpCBA5vF6qhIUAQ2BiKOWy7edaCwOq1YeHh6fH94fjaduzwi4Kl21zyPgl8hLTYYm11hgYQ6CYQFVUAZGQmY2Zi9SFGEBFGkyOz2zfRQtAFa3aM+GmCEpzyAMMZ+HV5SCAsudbWrehS5MmLJkwhRgDoDeGkrUZCQTUB+6pthk7TpY5i2DTbWIG02QUVTUacHYOyWieJJ8kUuM+PzojaDOsKZBpbcXeOJCfZmEEXzoGedzJ9FHJ88/85It/ajEIVeoOg9YrOk77+WwA3YoOItI+ZJynMsIwAMRdrk0dLO156PpPEalGAqb9GRF9JCogUq310/NnJH3/cGICVaWA7IPX7Vr7UU1BtQKQmrFFiyLycj67bD4eklljI0djQiS/BxFFYgUsVZkjYhkBGlAjwMjBxzgj2tPDkQOcDuthTSEiozhemRL0eA6ZKSA6+N7r6yvFwEaWDREPh0UsAllteb/m2zC3GBbHsCxL6FgFzFzK/vz86dtv6fHd03o47QAVKa6rxHiRqhxEBAIDQBzT2bUihrtNHB9m4hlfegGzApoI1CoSa60VHM17apid4nXNwvBWwF7RgNg7p12/Apihy/yeDIfxW5fSJleI2qFHoQdcXSmMe+Gw5idq1I74Pz8b9q4Bl+3uno3vvWt/eA5DGc+fZ/pE0lERMBYTp5DtWMmxXKPLYz7TzBBvxqP3Z+IeSyEAbTLGY+utIyPM9z0cDv/m3/wPRPT/+n/+z7/9+99Vc4BWqqhoVErZzufLnkVlDUuKq1KoClarmQOyoRFTTAHJRGvOuVbZiwpkzgCQHNgcrIjtpQgYM3FaYlo5MBCKqYo1Q6dUEXNE7r3KtpdLztWHXZkRYSBc15VQI3u52Q4hDgHCzAOcYzmsDw8PDw8PnDrYW4wAkCW7FTALvbG80N0SxGtp2djrq1XXUXaxT5u0Uag5+2m3nsn4PPrxQGRAQtitxJ7FLLXyOz8Hbyi5H8OgHFezifAmYmun3RldM0fPpOhqDoG0r4y7lNZjGXf3HbcbzzY/pz/UfIL02WDwRhnNimym2HGm9cDNOHPs0durATQtbrMOAiDCy+Xi/QIAwMyXy05E//2/+lf/+IdvP7+eVaAUzwdwrdUQa2tJIFUFjI1wWr8DkgtwU6mmWvIupkpqGOKSwpoWXg8gh/2yIflsdEgpeYGbqq4hGEKWWqRe8la2C6SAMRgahoQUkYKYl/iBofO4iYD3EBqIS0QAEkBCxzW0URzRlrRWiAFcfsZYamk9hAYDdcWJYJQ0ExGQW16ektHGL9bbVKwP7uyybYKwAOg8h4Ms8Sq4rhm5iYoGTcLEHYwE3v81CXA3eVu7rzVwELOeh+qEQd3BRDUOAayPQJxod5b/1L1H66Asfv7sLs10OEju7pnH9yIypkPPvDbES0cz+UIlaotFwk2oEW51B9w2qxMR9njrvu/te7xBYQiq6i372OFTx6JNa8fdEQrNuTI0uqYvzZuADUIIIleAZhsOocdZJ+aMkV2Gch/+M/MzNH3ZHwOMiMLUH+j62OzGHPS+QW8+WJbFXVkvo/PTvPFmLK4ZuLIUkZyz1uIFId6N7ZRlvWtl7DH1ih3rlaJVjQko3JTUMsX2di18ambennnTVEqB47Lue7nk/fT4eM5y2XMpJSGrllH60nJqyABtwiFdTRBQtSKaUmB2h7o1TyIoUbyj0cakRKqdQQBUddvL+bKbllrr5XJxPPT1cHx4eEgphZAent77dXLOIoJMx+MxrQ9E9HreSikhxVIK0kZEl9czIoJDF1dhxIoZEfm0ihTddoNayl72i2omotPxENJ6enpkDoj8+fPn8+v26dOnd++WC6JIJKJatIaKYNyHelURZMIYMTApB0kQdhVQKUhtTqSq1qoaTMHhZ7q1BMh4owzsthgG0RDBu7UREdUAsXQLwEAQgwc7YghYa5vQBVpr9UrlyJQvxUQZVdHlUqPWWisRIodePcpeql1vI6/QLRu8PYjQriWU11g2IoKBiFgr2m+kNsXVvuwQzopz/uc0wPRezX/xOv2WnZe7MPUcHfbCJO1JziFtZ5vJhi0OqpFtOlwKMbNKnXiqx7MQfE6Udeu/1ipSWrdhk11aVHItW94/f64IsgYOvHgVvqN4AUCIxA3kYCGILkARfRqe7fsOAACUIm3b5pNsTLTmgkgxxvxcgAIQF9Fa+wRUMxYtSEICnq8xIQMk+/DhEUyPp3WJgckAFdSRL9ncrQLxTigXQbXW42ENIez7Xms1hBSTmRHCtm05Z3PKCeg1CiNQ6oo2RnZP5g/f/lOu5fhUIcR4eliWQ01pq1JKEcKYKHBMKQUiUJNcYAkzfd6R053mczmDE+UM2uC7gwIim4FMNO5+vCGoKI8Rnv1GzAERc93tVtFitzirtfIYxauNSEQhJAdRa0qXGIxAb5/fevVpr1Wb3xER13Wd32j8la699DYzgk3e4HD/RIT4PtMyq7N53fy/o1fCr0PMTthmLdHR16BJDGtl5A25pwe0cV1XH0U1vBEiIqPL5eXnP/95rfVv/tNv/vCP/1RKLaXkXFOIIial7jmbWQgprctyWO1woD6DR9z7Z0rLQUUIG6QjigFBEau1vuzPhoDAGFiJOcaQlhgXYDJiQ8JAAQHJpFQzFNFaa6m65/183p7P51wKABwOh8CEEZcUD+tyiIH9V1Pk19cw52xme8lfffWVT/hoTpeq1UqxGRU09VojInHH40b07Dyomgh2R3Emdevd+9gpxybLBKaM1ni2O6cRuy0RU8DJXbzjrPZ5ek5or6KD3ca7D3ZQVZp0gN2YdldW7XTbZL7HTeQ274fdfhWRrsxGEabd3Xf+4VsTuZuIHTWk179QH8dCdF1e7MVZPt7gjt+xWzjS53zOEZlxvk4lo1Xy24v4W2/bdnh4ALH9sjNFB4r/xc9//s033/zDP/zu08fn1n9IpGbBA/QqJOLNBSEEr1hDszmdMrhMVS+XS8a9LhEPdlhTDCE+PCAoem4wJSLKZS85l8tly7un8tbjSfdtNyWDtB4wBo7JCKt3yKuYWUztTQfUKoepateoRzeuEShv3nFMFwoBQrCSPYXejB9PF4mq+ny8vilvxvCMBR/EWaUGAOtSzrq3Y7W6z2yDTfxD9+tm8jYzDlMJ9MxQ4m1u5HzqL++OHNk1QjGsDp7Rg0c/PPQcTk+KjhtJNzOIiPgauQMw0w4QQtjnIIOXmnVmuQEJG1/O0p74hgjvSAUne0lESik+gntwEFObQDi+HDlA/6Gfrx2kE9HcexoaBKndyN20YHaJLAGhVggQo8YEgZM9VzXMCCkhVqlmiLQYkCGtx5OI7PtmaMAIjERYioYYUkr7bmYMGgnATBCt7LXsBIZkCcysAVhboKhmda8ABJZIgSF47bOaRg6BOF/OZnZcVlUV2Zk5BUY1FAGrAUwoklniFZgYGA1TWGqtpIRCFIjA/TFAxD0Xo1KtAFtwqaGiCNkMu1YlQgBSFDMDRlGyKjHyYVnU6r7vVgubWlEFDMaJk9SKGMiSFrKKVhCFAYkwMEd3UxUBEFM6EFJRIMXIqwLE7bmcv9tNOYV/vHz71bvlsn1i+zHIZgGey+cXveAai+xY9hMd99pjYECgrZJI0I6HRRVqUWIWAx9hF+IyJCIRhBDIoFbNVfJ2BgreJKxac6lb3jfZwWH9bQ+R3r9/fHp6cqDUwxoRckoxS7W6p6OSWoJQX85SDWIpIHu+QKkRTASlFjUoVS+5qELgSIHN0ORbM/HCMwRjxBDXEGhZD4fDcQmR0N49pkAHk5f99SIlCiqylSKfzucs9Xg8ChHWmpZoSHvdUbPLtWUhFqugYVmysTA9n881hnRgA841G1vEcBbZlRQomy2a2qxtQkMVUJ9Oo1bVkAAJMBEGJA4EAKsaqJgqiiYtB7AT08oYI/v8CkIgBogoihthDFjB1LQQM0AECIGXlFAyYyYSqdlAlW0Xo8MqFyEzUGGUBWrUnHMt+xbDKoBZQtGoEkCqFQkKidat5FKyMdABi9RaRNkCppbhNHBciUoVAPi+Nvaqru5MHJgsjLtjtk5mEQYAFawakHkXO5lUARFTI0wpsamAcQwYHOBRzAxQ0xK2nIlIRJZlUVUpGSkwo3EQJDUTRCbatk9EBUxAoBTbshyWWFWKZmRTkaoKgIEPidcU11I/WZvF5VYaG3KVanRUgn/8fH5WrWs42FIqlVLe6XMIJCK6l3BIAUMGc9dKFM4ipGEJayAFq2sKuOy7iQEW1L1KyVoEVHC7VKkIgBwDYZsCDACgl5CiByeXw1prVtWH0wFDOK7r6XgkJDIKyMEAKkjAkncvyC8l7/uOAZdlefrwPufMjE/vjq+vr2AlxRhDOp7WbUvbtolUJmIEBmQtB1twz5AihnAGLSHoT75e352+f7XfIx7S6d3TezmsFzWqvKS1bhdGCWVHo1pzQRIOwolQh/rxuQx4W1Hirki39nSxihxUFOOiwFkjhBOGgmS11rqfySSwGahRQOKouUA9BHi9nIEWKRI4IBEQihRVI/Yx9AQGKBA5oGAIIRKR7poLVSKxYAyGvJ4YQ9lKrqgGj0/vXz8utXw0egE6c7C82/LwpEWzfKJQGBFwQVjMSKkalZgWG727oh4lQqKiBXqp89D9iAgQtA2KazmT5m80R3EYB55tIg/QqqqKQ0UiMQd2bOqbuO+UITEwUMFugYMZjJ6xzrmtxQUJexWrNz+7kWdFBQOLqtSMiMimIGJ1Ob37w3efTk9f/+xf/Nn/9u9/VYv5aKhioABVNYsA0XI4pOOJUiIUJhIArWqgqAhIQFKLIqI59JdodqPEQNPXjEAEzLYEPBzj0+l4PMX3x4hoPhNGRHKRWqsZfvcsqrpt5+eXT5fLq5mGSJFtRQpIh3h4Oi3H45HdchIFKEXFDJEIKFW0TaECPp0eJS70cMqIZrIsS62VlwSSXUGWqYOOmE3lasBZ8y1VFZXGXuCU5UiLhxjEzIgRvaujFrFrOGDYfGpK7KD16vuJiMRIHK6Wbt9N8c5nvnaVS6uDIEQEEQL0YRjXB2Ke7WBENGqmNoMLIunzBh2RoiCTijJzTJEt7Puea2Fm5BiYtRpaZiRTERUMaWBDtihbo3DumSTtpv5wAB3gRKsYqo/jAkBDiCF43bIBoH/uJD+6dfxSTBREtnklxx6NGP3YqTmjiJOP3aXT7AleFV+Wz3Gl7fJClogXVWIiEXm97P/23/7bnPO/+3f/jlg5JB9giCYqVVuw115rjS3xYJEZ0ZCE2AJrtb2cL98WYOaIgRTSljbVdYuMHJlD4BhjMNS651pEipguyBZS0fKybZw4nNYjY1GJiRRU6sUMTJUUU3dpRKSqIhmHQMGAUEEVPUKsouiABcSBAHULQRPTUq1gBMUMW2VDKxooAiKoVsliVX2cqUXkABw9ntI8cGITAyMcGb/umZBV7z8HZkBP5hRVjYeDQzXYqBNBMLMYotUyyjqwIvkFFW/a/HxQEBEEv4b4aBS3S8hFoM/wHB6SO3VIVxxRAAeU8A4LBABGn10uvVAxIAEiMUEIYGbTVGSnnAHo0trH9JrZG6cBeKrsWtnhLhkRWa/5JyT0Hh8yRJxrW5gRQAHUs83uufk7eUMHTC0/MGUF0YVXK167Vi7EuAwQGjMjbOi+qnbVItpgUYoDPM7aZXi61JsUa71iAamqT0X0iv+rDOrMScDMxBTIANrkUytlYybkK/yJqhKaAVjPQZmZ5OxwcK7SeqoEJne6h5263nUYcRg5wxvvGbiFVNvkDJq6jLg13qhWkZYAv3abFPMag56q9viZY5BaJQueP0fpPWB9Vf2pSmmPEdmXvoE7E4ZAkSkTMigU0ddL+fhyfiI1k9dSX2uJi7cgghm2pUYspRBCjDFG9qZFVVVUpIa2PCSxk4XzHRGnFEII5/MFzLxLTtU8/LDve9n24/F4fDgxBp90SQSHHgautZZa5mjf6XTa9x0LhWBApVYpJVcFkeJXJgNDa+l5hEvOiMjQ8q7HNa1pSYEfjiszXy4XH0D/9PREREtYPn4+55xFyhPisixuGQMcmb1YwESFiKp5mxBUyVUUIlUE7dFHRAwcYrSKaopcKokMZdZ4aZrOMHbZOpowEQUOjFSlgICDUhFeE/QOnAFgnnshDIj5Spxwk3ZrtAGAHQZGVUVMnJn7VNPaBW5KCeA8FN4ge0DMOVepqmogZtjieKohWEMGRvUYX/vdfylD+F/88p8/rip24kr/RlVNqjfGMbNjHcoMZXwfSLvZjrfKG69NXHbV8U0ltdOYKLSQcS9VwBaNVoBa68fvP1exw+HAkVJK75KPNDBtIT9XVWBmgBaQYozLsjCKVAOAEJIqqJiIlSKvr9t2qbnox48vtTjiK4cQkiOREhHlVBIirsdDSmuMsdZKHJe0ImLJVQl9yJ45SBK2CGWtdd/3UkpMjGYxhjUmiuRFSmZGhGZE4LeLANGBlEkMzIooleKleFIiiqTD4d2yxlOotTLHZVmoJ95JKcSovRyjGVj8BXr4L1JIX8Kb4h9D8GG4NuKpYCqC9kPk2X84ohVtHLZnP2gkiccJLrQNZFjkvoCIB+jUqL02pO7VB9KqGoDDToEXPM2K9o5/m2CdctojejKYdBw6qPM2c4K90Zf5ar++vem4Nb3paRzaef7m7udDI99dbfqnjVuHEAKnb7758U9+8uPf/Obvct5E7OHw6NlFREwpnU6nw+EAAHvOzD4MrdcQAjqDXxeqdTybIEFYIjORIuhhjY9Ph9MhUQOqaB27Oec9Vx/H/HyBy+VyyZdctn2/oOmHD49fffXVw8PRVJjZ6+sCUa1VSKxW15XE7Jh6I1+UUkqx+x5qoAJ4BbG4iXzdAnr5CW57SHe0aGCQmtlU1tSWdKw/fGHZv0jbMGUOZ/oZKmYcNz+cnhNus3/zlae7X3nkzrS9o0z/1UhTY4/y+PVLKVfD6YrKziI3KNY3ohuI6Mo483Ld6bVZyGsv2/b04J2OuCNmnIJT2O29ccF5iXgag3G7Fd4i2BQY9Hz+uq5m8rOf/fRf/It/8Zvf/F0VYwo55+SlFjaQFHE8cPAUDl1RJWOMCQmAvFoFajGzgpV8DmHlpBI1Alk1VcemYQYEUiNTZTAAZGLGLeeOR+FJvCbQtIjfi9ht1Kbxu4cMMGSRqRGVUs7nrZRCC6qpmjqRR09dmYGHSADFq4s9F2cCYEjE3Q5pcEpGN6m2wVZm0GuCmj9TKxACEVID3lAXvlOJZtsvN8OYoefbzQxVwQBECumQwyICLbFGRDRGFF59TjPi+87e8ZqNVod/6mpCempx6JQegBhkNsS1qhKF+flnNrTxmtN/75jUrwId7utODtyLKYA5ndlZo+kI/8bRd2eWISIinjWIXcupJAyuFropuRl9nWNpqA9dGEJh3ollWdTqCIQPp0hVPdXKzGTX/c45pxSZEAmtT4xo2m5ksQG8+cf6XGN/JEVQBL4OYkWjK1sPBQBTYyQMnQ2svRqNiBjBrC09NuF7DXqZWYzBjSQ0cJOIiNTQtY6OKdt0ZYBupF6L+5kxxjiqHWb5yxyZI4kAsQns1b7/fPnd7z/Sj05Z5DXnXe0Uw8qLpy7NjWmiUooapJQoBuiFHdWUoc1q9+J+tQqoyG1FBIxBiSgs0RRq1VxyrpL3csmXUnYMjIGXZUkpxbh4Ubu7nR7BYMAYQqQmZ1kMiYirKqAj2ZgVtZyzeZwUkYGZmQgN4UirAzEjwLKkp9PD6bgGwhQoRU7M6MhOgHWt+XQMW8k5b68bIh4OByPwiWGlFAzYNCghAIjVNvQMwUUCqNNtYLbadDoH0BBCUi2irR4SVQEIWqMIIiIZt0YbA1AfY+Fz4AjMyBQFEdytnWvfPa7gwqILhRs8XgFTNxEm1PirsBBv/Bsw+t5DGBwSCq5llVf72qC1RQWEKsAGpBpsuIJ3RwdfenPcqcwvfp6PL2YO2xM5H02kDs1SBFXVCckJur9xjbjfKPsbkYpX9+/6qP0bt8XdVrYpJnhTL9ScT0IzU0DmAESXc97++JHT8/F4fPfunQUEIJ9oUKu4tOPRmNiexEIgwhiZCNlAiSDFmMlyrh8/frps9Y/fnkMAREbgGKMJuRe0rMicETHuZS8CADlviPh4OmgVQvMkuZYqpQLArruI7PmSS2Hm4/F4ejjknN+9e1rXlGLigI/HQyml1AwAYsl6oz4iIiiREXiwGsTAZ+vZZV9CiOthOR70cgFvLSEfGQzAFICqWTUVqXstRSqxGo72Jd9qArgjj3uqUFWa6tVmNTmLQRERA0BNKX6Zqm4VqnVP0z1DuNVKbnJgSzu00IQHLokIiZdliTES9UwFaAgBSBCto240M2c2iGdoHKdDekNpo8TaeX9QTTea7701b7y/e1NthaNXohusgZN7MC/gP3PMBsowKUZouX3TO0kCmntSf/Inf/KLX/zi7//+tzlnRFKriA4EC8wUAptpzvn18kpEQAEA3KnV7gK1BxM1VCIiphCoFlySl33D4bCcTmsgy3l7eckisu/7tm2lKgCIWK11LyHvl3y5bPliZg+Py1dfffXTb34SAuWyS5+yNWijtJJYRENVBQGs1QjB5HBclmVhAhVQrYiodjP+cVxk9u3HsvtpNtHtD8nMgVQx78Lbk3GKJkz64iZk8Pb6d9eZfzIuMjs8888H2dAEXNFslR4RMLsJgogId0CLcalh2Iyfz3zdnwdn4dBc6CnuYD52iMIX33cY3HcREGa6u5ffrjfEMk5Gs/Ve36E1Bi+MHb9/bKZWfUAALgxIyYAI1nX95S9/+enT88ePn//w+++QmKhZWWM9q2qtSCQcMLTmPQAgBA6caOFes21mbMTE0Yiq6KaV0UQwg3qxkutQ1opMFsgADbWAalVDKFIBwAjZAhEZqICaStl3CiGOrntAaHM1midg6rhS4v5VKeVv//ZvhfRnv/wZByp7sYiEbIQmBqAIBkyOJqNavY3wdjHBXWgA8Ax5pzkCAIrkYQDwhg7nGiITAQNUHXAI1D3DdsUmYDv1Wtc8V5kIDqs1zC1sizuUwj3lA4DeYyI074BDc+QmpAif13oNr8wkOrM/Tj1Z8AZ3apDcUE93onh4KGNVh+ukrSiXWv3xZCONZZl/C5Mo8H/mfC2xhm6j3hmBflm3xMKtEm0T/xxrVRzWoucW/K08UT4c5XEPERloMeN7cidgyiWKCIDB3MrVNZ/f2kV8DAszo0Gt7f1FHbbeAw3GzNoGPFx3vUmNHocYQlyYsCVOFAMNh9BJR6p5nwkjAV69QeqzqhBRFQGAMBgZYUAHg7rd8nl9x047vRLRqP29eoxkABA0gKEqgBAQV9FP5/q3v/9+iWkJ5Zyrki4PjLwwmCmGtCA7hg17Wg8AAjrozqRpCEG8adX87t7/6jLLTGKMtUjV/bJdtr2YYQjh4eEhrYtP2MHAHgP2LmXm1IgshABhtF5YFaRAtF0uFyI6Ho9A+HK+hNgQOptMbOOjkdbTukTn4TXGp8fT4XAgMDQ5HpYlRJVayl5LReB1XZ/ewev5+XJ5/fT8eVmWZY0xxr1kALCCAGAgDAYADjYQA9GIQilVU1E1orznKuAQJO7amZkg+IicRofY8n6E3vgEZEpt3Hwb/ktEoBYCGgKbY9U2wRqAAIwZRcFnwYkIQAscdJZDERT29RsRM3AfEG4dLZxqk4gI+pTOIfLQRwaBkQGaAjalCGYwGnYBFMQMrLVJwT9/zJT8/8cxxEIXr1e7BxGJGRGwXkdOwcQ+s9DEll20mbmw4/QQ0Ri8O6TNbCrBJFh7rLNfHBAMzQwIiaNxVgQVPedCly0fYoTAQGZSpGKzzjUy5mpVJedca104pMAxsQvYGIIKnkn2rTw/v55fRQRi5MAJkVNalmUxAxNDDg5CsJfz5bwrmNRsZs+HVUsltDUtBJq33aow86U8lwL7DiHC118fHk6nFMO+bVuMKbJoCRgPh2OIDBdj5mpJtOZSulWHGEIKUauPpyQkBOJS1Uo1rkYqpui5aryC7RK1GQUeLiciZEIgGaV0wDiFQvoe3RPPuOC8xaoa19gmXzdtamr4zxOnzUenU5d1I/QxmCUEEjFUGUUlrt1qrWnBdfWwFPlwEQYE3KHFINrlB5DvkOez4esTa+cH6+94w7/zcbc+7ZsxOWa82cT4MKn26RbXpqCZ7G2y2sdvb9bs+j244mhy8saecC2/PTwev/nJj5clSi4xJlNblrD4sB8199y2bcu1IKIhMgeOgVCth58BkEwVFAB6yzOZCTHHGGMAYt32V8n7tp9Nqkc5SxEKfFhPy0I5V1ENxxOYqOzH4+EX/+Ln33zzdUrBQFS8a5sc98bjth5YIApiClVN2KB4EOp0OEYmUAMvwwRQ0TFgejbaACDECFMg/062XBd82prpD9cvsf8Tu+g1a3PBryaEwUhZkINbgKm1ymPo17l7BucW73V8a37M2zoTwIgy2G1AAack5+wQzoQ322zDNJrXzVrHzQ0Fzo8B0OT2+BV0n3zcaC74xJ7lm/5pXexcnx8RO9yuhwWHUgCRbD17P3PNLc9eH5iAwAiRzZxO1FVnLhJCev/+/V/8xV/84fffvTzvL8/nFBfJZwBs7XA95khECVIBMUPGVlvn210jtlxYMKBgMRiiERdRRctaQEFBG9QmwrlKSokTI5OYyJ4VVMBiSojo80ja/BlDIChVHNamzTUm9HxMX0ZWs2rq8+J9Gf76V//pj58+hjX9+GffFDVQXAKrGHhrtQFoVfAmdk//t6grdqte+wwGxOsEb9fgRtZV+SB+BDNkBuhk04U2MUufD3mN/FqDRsUB/eVZSgAA4E7G7AkTj9l4ZruP+xjrDwA+6nyi7R6vH121t3J+2GNzYGKmnzv+Uikzn3a+uKK2jl/xNG5+pkCVa5nAEOlEjptzA6pkdg3Pzhw0c+4cQB9+ptm1Ox0nVBQRDaPIUzvCsyojAjN78FIdQqYRAdQ6xgCOmE0Dm/HBmn5XmTO/XpgBan2SKSKmlLxu1a884rtmXdu5427RTMTQVK3FBAypY/tWAr2KKhed0rEZva3C/TpArRM8AHmsk4iu80YCI/kgFjMjDCFQCEFv5SBhAPC3MICWmpiamK9DVCYqGc6AjB1Vq2YO14no1WGkMR6M7WXff/9xi/HjGnWXc1rs+KSnlcwM1OISVQ1IU0q11lKyqsISE5E7zNJRnRyRJ/SpksAKHZJRFb0QyGfdiAjFcFqPKaVdBBrsRIxrZGjYP1fSBIRW1dYobF1X1+VVgZmLVK2fvTjESInI4VKaWOe0LMsSAjOvSzwej0sMjMa8xkBo5giZyLSu67JGIaZIl8vl+fn1+0/fPcjDuix/NDkej8nbh82Hm1v14YrFihmzVKMimnMppVSIfioieUdY5IAGGbND6jialllD8fYQBCMxEQOyhwtFzcBBQQkRgNnQK9kVYC9ZjUJgAKBQQiBTHNOfXVACQMd4ADAyLzx1Op/ggO3WL/LAuSfYbWRCsLn3iGRWRUoppapVUd9AQuxQme4Hvs0W3hx3Mm7+8p//4d0x8WITr/4h5xzNG3eRSsuizNJq/ATavLjaDfQbg3sqUip2PfStQPS18vq3jtRPyMQUMLCJVrFcFZApRAPIAs/nfX/AZbHIwaz7Bu1igGiIZiognvanSCyKAGxGtbqvKGDELIcFAwcAkFKFGBdMMRKgBjSzGD06VgiJ46qqTdsiq4JUy9lxHShxPB34cDg8PT395Cc//vDhQ87b9x+/q1ZVq1ZUJjBlpEBMCNUAHI/Ta06uth2KWLGcQmAKcV1TOsa0FCYOAZGBWpagqlopnKKZAXHA4PUCzKwDrdUGBseNtTc28yozHQsagQEEzMOw6il5gAEHbeYGQfihiMWdInR08PFlq/CcKBg9MoJsZsVBEzCEFPd9D1FTCtAbOUSEEasImJjbC8ZgAuRc024x84KZT7mwORfdSfdqdHqW0om8l4fcc9M1N3XT+HFtnbpbhGHazkkP6I7rfPIX/zm+HECOc3TSzKQKgORc1/X4ox+9f3p62l437z9ZlmWNKUY+v7yWsnsJrvpUJAPEjG0ofAQmMwvoXQYZzFSgF5VE0S2XYoa5qEoxKR6aOx6P6/oIAMvheDqdVODl5QV12/d9DXx4/+Hrb77+k5/8eD2kbdscjXnUK3r5MSIjBYd3NoTaMuXGYDHGdU0AgCYMAOogsQodbnFs7hySuydmRJ0yxsPBszHGALGrEpt/+8VNvDthyM270AD8gBxu3xCB6h3xwC3a5yQnDRFGlT53EEEzG4Nq7m49fzm+d5vKOi9Pl4ee1Bnu2Y3b5gsM2EYoMbPU0Qd0VXzDcp1+1Q55g3rtPxwZUb3tDhzjnYZh7Ywjt/PKcWSJxcuAcQohmhmKFCIypa+//uZ/+p/+7aeP53//7/8PREb0yuFGNmTBezzN0EOHKUR3nlVB1YyDgSiZiJrWWhBEVSTGSErmPS+mjgklZovUKCVIRAYzq1aBAJgoRa8x0j5521EMQ0hefD4O7qDKwwgBNATwmlIE/vTp2+9fPp4+vKN1efrwLsUl150BvSMGUEWrqjbUdR+e3KirUQeAdbmtTeQTAPXizObFXYMHAK3E9EquTrG9jattencIAeaWl+nLBsxmzpVEhGrqWN/dCBn00+ikBVyu5OP/52tIvaoWe30fYyOYVhM7ATjNFIjXl7t3yQbjjCzReCrr5twIwbTrKIbE44d9J+9zjzCJJpr61KaH0TYU4Oq6O0HfoN1QB5VhB2obu+L9kY5F08I/0GPqoG5dMCeXBe7fzx16Lp0HY7e/Unur6TQKAQGBuZXbeQ9brRUh82mB1pcoXhftnoUieG+8ogEChWYX5txW3FldHENQREyR+FpCYK1ox09j5kjsOL/jHAT0NKTLlMZp146Oa/zMJQD2fAWLMOPQqWMjO4f48N/inlWbbdl30d1XULcJkSjsJi/F/vPvvl8XU3s9PvDTh/zuaGwgKmgdnTUhM5d6JXoXYqogIP0ZPLlVB25qG1TIIOJ9kkYBF4wUU4iEZPuWD8u6ruthOYYQTDGEkGIadKmqpibWBqFUscMhxBiPx2NYKgID4b5fQghV1cxL3pmI3C4P6cDMgdiz0IHQra4lLWaqoiFSigcA3bZt37IhcAyP796JmZS9SE0Y95KjRDRqcAwCiAaERFhqqWoQTBhEpNRaVI2B3DwHQsBFsTIjYsxBarXZ2vOxb2ZWxRgIicF724HQCMyDdy6JyP1BADNUsVawgMCFiBZEDCFEQWXfIwBHGQX2hk8EBfDpUICIDFhcn0GrhrfeW3jXO3GjmxGgR1Wtz8kwUDMeI+xctAMAoL2xSO+Pt9bA/6nD3OsehkUPa5VSSCWGzmtTcfWI/o5duLPJhnRTVZ8xwsw4JfMRcX6v8fCzPG12hrbIaYwRclHAYhiJDNnxeM57PhwOh0RAwVBpzMjxygXEGCMHdOMWAEoRHwW978UU379/fzw8idj333/yKcAiXlMApeScMwgjYloiBt4BArr3m1WE3OVUM4AlHWKMp8Ph8fHrx8fHp6eH4/F4Oh3WQ6q1nB6OLy+fY2TiNkqum2gipYJaA8Yw2/e9qmClNSUpomp7ldUgxCWmlWLcSjZCYq6qYqZmQFhUSlYDRIYQr3AmjvPxpUpRGOLxjnLmvXDu8HRNKaWUUqt185RhGij8ZizosC/1LgXnfALWOv+0w6mJoIiIigjUWksFsDjoIcboIUJVrVbB56rd6nhrmC0TYd9SFCLejSvA3k8ySI6meUV3nNuUPF9zMjgdQ4PYrS/alMh4yOmCb3bk+lTzpfyjT7IZ9jd2v1fE7WlDtA8fPvzkp9/80+9/v20bI6nWENbjcQWTy+UiaiFSqaBiPvnJ+3pSWt0KdzvfVFVrQFL/J+4VEdEIbYnheFofDh9C4FrKV199dTqdVCyEEJf1fD7nnE8HK/vlkNJXX331zU++TimUUhihVCFsysUnVfQhYgRIBj62RHq5UHg8nk6HA2hVMCYyFURF0zr10c1uttyU/raVnNNW9CYChejeYEu4DZofuzMMIXuTdbzubynwlsy+lNHyw8f8Ntk4HXdm6/g5UVCtnRLccndMDhzRjVEBCIAjoXdHUTT1E8JVYqPZXeSRRkjF/91f/Ir+PxbWJoMbJ6MWu9nqIujtyoDPkZ6O8dcYw2QHX/+q10jZze6ouq3sn6uqGgIQcQwKtu1b4tOPf/zNN9/89Ne//o1K31AFd4qasWlqZlpNQMgaNoU/RjGDViZuIlo0g6iIRcQQSMCqiIIQMjH6rhTJtNcGYMUQKaYYtNUyqGeSXZJwwEgLEblxRcTkEVhyYMUKACagqj7JKURCxHU9/uP3f/wP/+FXx6d3//Lx6XhaXl5eDykpI+NoVBEwZAKwqqajfNP6XHXvEXHiUZ+z5eWO3nw2koRtUw08Cezzgc2oS+0Yo3VJhYPgdSLjKSKAiEo2OjaZ2dyLyWVZljs2bBSI0NzUHzh+SJD63+bTZskwyHXWCDZVJo8s3ECLmSmQOkQL9DANIRtek2ojDtKdr37fL1hK7fOItI7w01tRNqsAf57QUzfNlOz+a7cs/SXJeozhCyVtzqVDwdzptlkm2lUwXbUUInrbFUAHzOn8Yx42p+vKAgFaL0VmHwTvAEOARDFGNBWRXIujKQ4BLKY9F9wWTlXJpx2Z91QoI3jctz25YtVKvWEaAMzQw5BEE4HeUNt1cRHR8YLmnSDqJfsefmA2KeRQNwBVBYGQuCBvl+cDQlXIQc65ZrMFERFzrQOiGgACRyQww2qK6n0TWj2X4qoLq1tL2LeguS0MXrgbQuBDjHExIBFZ1nR8OC5p9apHMEPD1uVlV0TblndSU1VHNI0xxhgFDOAhEG95b20qTITBDdaUUkwHbdPA0MwkFzDjGFULAHAwogCgtWiVvOeLUhCw0+MDEP7x2z9kqY9ExJxriZaA0QAB1YNUhEBMAIrsfRHklcFghMCIxsRkaNEiciCQEkqpCODd0ABgDRoLjCggMQCBmSghojH12K83lTWfzcx/5uJeQT23DECRGNHYSzlb6JQMSQEEzJHoGvYvAZExkUl3oq4sfS1XwMniHGw9ODEgALBaVb1z/L4MjPFfc/zQ+fYDIhVvbdDxmZnBh/PiVLoA1x6eQMRc8Y2ZNW43Imn45eL7G2F9zT3aTXhMVcnlNhFRQA7EphgFTCAg0ctlPz3Y0YAQDMGaRauISMwtwOJ7wYGZFYSgQeMcjsu7d++IuBY4nf5IRm6H1KL7vl8uuwlCYFUl8FSlrcuSUtj3nVz2iphZCrSu6+l0Oizrhw+HdV2XJTIzE6LBuq7rmo6HWGs2BCI0NQRIHHIeAhxHVl8MxFQMKUSsqgaipEhiUHLda04pGUCupUkDYkXbcwak4KVTvv4itSre9ib1BefZjJv/4tpk1gLMHIKJXNqVu50A971PM8jHzUAYNzuGCzouAt7y4mEUaw7hSDN4HQozE8mIp4YQVMADDV4eQEgIDIaGBliHvgCANs38DWVat4lnQ9yNpGGmxxg7f1z5AnsO/E616+0g43lJhxy40Ts94/fFY2bGfrVZcRliywa40kcMaQmI9uHDu1/84ue//o+/+lyeEbnWXCUv6RCYU0qXy2utddtgw63Weql121QUDgcBUC/KDYxmhub9qV6vD+u6GoiU+u7p4Zd/+qfv3783ra+vr1999dWyHF5fX2sRb9EXLYn58XhMKXzzzY8PxzXnjVSWZZFLBQD0mTMIAFDFw9NOQug4N9ZAYenp6elwOPi7E6OIgnUnph9z7/ckWK67bN7A/6XUmTO1/8Buj3kjhhwbkQK8zQFiT1sNvrvbu7Fl7cvb3OA47Y5+7mTyXX6SiKrKsBHfvjW+iWjINN553Ho8yLw+7iWOR+rmKVsffzVupLfvAm/aq8aNZs/cJjdyPvyapej82G9X4E5kkS7AXltjAABkzYMAUCEA+v7T59/99g/fffddDElQ1KJqUVGHPKRWGUiqZtpEny+U1zLnfCFiRFRCQgID4wCou1SlaKhFRVEjByBSUBUTNQIJFIiAGTkE9wq6HqGYiDn0MCsiomMKNitIrwpXq6jXXnJUFRAys7gupZR/+N0//ejvfvfhxz/lsHJI6pvlXTXoWAlqBjMQRls3bMhYfdlHYNrNGAaPnvQNarrbl1cNTEd4cSbyTlIw793MDu3kKgSAfYCEB8NxCsrALRfDPTHooIeZ48YDWJe3Qzq7qw1wG48eN7rl0/G99irouzcFIER2Yh8vxcyllNtVvSHg8SXeWmjObvMthtE4YiJ+E+sTjKSVbbZbhzl2Mj7fle1gb9OHFkR03LmrBBGRlJJIQwYDV7ROAVJm1lVVACtFOJgDeTATIscYa1VCRjUGBGruvrYySI0QfIEUBZyI/Jn7o7rLAaZOstd05Rjl5928qrVKrdUAScFBU0INQuL5HmbmntYQAUI2stvV8OkfaIraC20J1UwA2ngrAwFoWcRBZNiBmD1C4BPPVAqSMRugiBQAYiQLAQ9HjVZLLQgFWZCQjYmqCRIpQClCBIGYuNccqyLZGLCuAEwUOBlIm55s4G2HHhYtpexlY47ruh4fHgEg53wMy7IsKpIvGUgZg1WpuaSUjByBqmgrWwIiShy87tQ5qpQCZqeHwwkODiSILWyK7hAShX0XM0uRVTUrMOG6hm07e7drqbn9kPHh6eGlWiklLmmxWmq9bOV4XFNK1SQuSyA2AJ8uiIZIdAis1nItYlLEUUdrRbQ2NBWCD5QBlLicOWOLIl/77NlDz0QMDezYWqacgPx9mtknBh6/8t4FVV9YScm5gwGa4WKt+N45k8G7vf1WZO4QEhFAi5yJNnhJ5hBjNNtmgeVaEc10wvVlJgNGn5jifHwb07rt8/8vHPNv/+sPl4lvj5QSSFUVR25Ub7eoAscTdGN6GGRvn8EmhxDeaA4iUmnf39we0EF350SNh8Aul01MkQmYFElEC2gAft3qXkpWc1AzaoJGQ4hIJFpFJOe8JIYYQgjr2hxMVU1pOR5Ppri9Xn76zY/2veScTaAWjQGXxGZHANr3PYTgdUTv3j0+Hk973lJKNe+XyyWXLYX49PT08HBMKQUyIgpMMXJYWmG7Gazrej5XRASCfd99QXLO3EWQr5bTEBC/XLYQEhAbsiIYUAUsRTjFZVkQeNt2EcHADs9GRNDaOtqh5pUW9+mCt0Ay/hffNwAYcU64qhJqNaJ9xqaYgYgiHr7gcF69Jr+Xw+ciopd/m7p9N9mI1NVNRzM2w2xN5Y9CGPBIXDURRRMEdSHh2FsGBnC1pO02Q9gwz7q7OFR+CG39AWCk6MeO2G3zPBFpnw8Jtw7DAMq+YwRPCOPkt8zWzNvjalrd9BMqd2B07C2XTZuLMlMIUaoej8dvvvnmdDpdLpc1JQBQ1Zh4iYd1TTFyznlZ2MGo91r2fccu6h8eHpaUYvQJqdC8QYDTu8PT04OJvr48v3///i/+mz97PD18/vz5kJb3T+/M7PWzXi4XZq551yrB6Efvnh4ej4fDUqWYVkYKRImDD14Djw4gmVlRYUDpoXxDQCTmQDGsp6O38SMCEFltUQof2OAiyFMHKioivcgcrDehqBqQId8s9VhQUJPZTp12Z+zC4IL5T3cHxzi493pxgJjSiEwNkkBsGTm8bXOdjysXIcIt6JF/HlacTUbwkMNDuYygpE4phfknfjfmqzSwSWGN3sJxEBHoFQ3epjw5Ed05nNzj8qMgYtxXOz7NrG7Gu5cyTHYcL2V2Ba2526+uYAxRDRVdRxOVakQU4uG7P/7hf/lf/r//+W/+FggP6VAcBbpWQmIKiEBUtMfrvTNpRPoIsQGuE5ohiCqgK8QiFQgNsXjaUYUdB0vbDFuH9kAyJgI1YkIxRAhM67LGGM1AVSt2UDUgMDJFx3p0eA5/MPeyGC2b8FprrTnXWuXTp5d/+O3vl9Pp53/yTdleK1aTyoQjUCAKiCMRDVMmeTRCX4t9nOZ8mouYjt+Bp5WwYYroAOwgBg6mAtYm2sNNDmlOq5AjiYKvJ/kwiYYk00IMei3Eg6ky32P3eDUzcJDBHce1395mrRujMVut88lXKtIbBpz5bibjwbD7XiainUCeVAnoLbL3m4e8ufutZPAJE9dBhfNb+Jhx7DDv2KwvCrXmnGspRdWYWmeLF1v6Xb0qspYGklFr4yVXq4NdL5eLWgWA4/HIzO7tmNmyLKCoqjBJDSJG1FprSDHGOII7tdYQ2UO5rnGdWxAtEDsJue9RSvULdf9U1CGQVBVs1nbI7PloQyBmRFyWBcAkF0R258mNMz8RAFo0tFZmFhFsYwAMsZXD5VypT/ycGggNQEULoA4i8FSSu0/u95uq909ai5aBmZQqtARRFSiB07nsSwhn2cK6XPT88XwW1d1qQKxaXQGbWUKqKmSwLEtD2XLLA4mG2a0WQghIqlVEQI2IlmVhZlFAonQ4Lmn1wRXpsGbR8+VFiqIBKBsCiJqAme1qIQQk01IFDNFqraLkQ7pjjIiWcwZCn6zqFtKyJCLKOefzZXt5BSZPJue99qUj1eJorrVWN2JHaHBdjzlnJFjX9fT0eH75nGsxtKenJ7HKRhiRga0TuoLFddmkWkgClqusy/GiWKsWRdPqfikagMphiYcc932Xaq1oKlcAgBCQPe8ILf4EYN4Ih8mIEDASA/loC1OVEJiYyILDka3rUaRs24bWiEQRvMXFFHKuFLzGOvpg2RZCrAKAtdbETBDdbF2WA3OOMV5yy+GEELwzPBDGGGkvrpwFsHR3HY1aBMRxmPr8U6lXxTnLvrmRA74khe8OZzH/fHemmxqO0KhVD4FKKap6XNft5Zk4HA4HPp+ZeYnJJWCtNddaSvV9d2tYVdUEAGK4RtMdZc5r0lzIxBgNqtc2e04I0efi0GFdt7LVNlOI5hDdiNrWqmpVCM2oAp5zfbmUpydYAosaWIvlA5GZOc6sj+exJVXTh4fj6+sroC3HhRFzPiPy8RQvl50ppIgqAKqmq7/a86fzw9rwANISDktc2Y6nQyn78ZA+PCxeDds8MYLD4ajWZvgyYCBSnyMisixLzvt+2b19qtZqBloVxExNfRZkCAIMFNIh1CKlVCE7HY4xrc+XjWOIMW05myExp9T6DZgYAESt1hpTa0V4/nymsNRbAMbRJTu+7LGpJgOZSMwHexozN7DliMxcTZdlOZ+5iiAGg4ZNLdLYnzEYkjlNNlneesaaDqNYa13CkSFYEd8XkVhrVUEx3fe9FB+VxDnLqLd0ZSki234GPUiRkCqZYW93J2x3mVW4+3/Wp5BDqwi5UcOzeTF/r1OAfOY+4pGruaZrmgfeVf/VwJqcxnG1nhm4JnCGwYE9+Gg9aQy9wmPbNr8L3YGOAlwulxBKrRo5fv31j/70T3/28eNHANj288Px+Pr6Kql++PABEV9eXtTscFyJKCwpxk/blkMIgRnMDofl4XDc86a1vHt6dzgs27a9/9Hjhx+9R4PX18cUaL9sjHRICx7t06dPPtPlu++++3S+ENGyxIfjcVkWZsx5Q7Q1JmCUKqDG4CXdeLlcXs6vVSGEsItEIB8Nh4iAkmt9CunDh69CWs0kpai1hhAul9fD8TgG2Wsn7GGQjWWc9DtgCF605j1FLlsYyUNC1n0nRCQDNVO42T7o9tKUCb+GAwDAJkfo5ta3sfyrDWeNIHyXrzH9qVBi/tPdTQcteYxUR0Na57JhqJRSXN56NMcj/nqb9vGoDgDgNeE8MhU2ClXmhRqfZw6aibw34Kmb1J6HGDTv93V+HGJnLNFk6bZ/jp9Ax7kYJpwvLwMyh6pZrCKDqIhU5IXD+vKcP3+6/PWvf/O//m///vxy/vGPf7zlXXvGzO3kreSSxTuxQm/fMvPJEVVUEwcRUTNCNkRQaN1MhIMIDUByyZABAFWD93gDoBmIQktEtzaBGAJTAzvAEM/WnkfN1EGVEQmDWy9lz4iYIsfmkNmeLyEsVeV1q5et/qf//PdPH370/v37SITARfdSyxLdnLPALFbHBiEqYgBE09YUauDWcoecIAIEz07Om4tmQAR2dfVhCMEpqvKWO3phqbTwDWKIySN8w9txmrZOWoPSAhIiusfffNyeK4IO8UJE1gOYM2lRL4QcBDb0gvVgRGth60gls3GFiMuy4NQRM/h6mDp+oxCC2yZdl3jjXnO+XB/N3A1GN/oIEeB6F5xyg4Nr/IlSSoNBHMTen3fuIbyNe0HzKVWrl1lB14XjlcbnGCMzi5IvmTYi7t6nUV9BNNWRRh9s6ddjZjAdECZN0AAScWypg2s1FBGZNVQAX3iXWeMlp/e6kU1+ceaAwYiICXyCtogMJ4oA570ca+ISpBYvTUERsZFJ7imjIevHtjl7dknaYsmeU2UmxzpHBMDax8UFMbVAqlwZtNjLZXvZL2vATQTWOASxAjGYAaqqX10AvaBUAdAajGqtVdAB483AAnEIgQIvtQ/PQXe7gRXboD8zUxQAQPPe42VZai5mJrVetkspPrZwX9LJ2al6PkpRq17yq6PRLsuCaoiYcy6lmFk6JB8bVkqpXViXnVJKwMCJrdq+ba57mHmvlyqSiCnw8XgUKcjctBSCj9gw8HlhYqZiDGZuqjl+rIA5aBcRGZKZSamReE2J0lIMXgAvevEYNqeAiImDqpJ3+0GraXPeQwPz5B2IV5k6rlFVYSNBDb02CpEROUbEquaB5warFZBQQYsoYlUEqVdIiVkO4qimG1zZQ7PMPsAaJdeu6a8lEh1jxXsh/B8eoUfo7cF394IfOH7oT0Mg/DM/bHd0BlDVUTrYzYUhNz1IEULyk5uAI7LuVwx2Jmadqi+0CTwRkVHxbp4HaM+J/X+9lltVdWCRW4tBKilgQMyKL7m+XEogOgRiCIhIYLVWQwCBimAmMdKWAzGEuGBrOPddIFQtVqXuZsgIITJhZCKH5vrq8d3IMEfiGBkRwZSOacD/IKK7/cyIkUtRkaJVJACJAag0hNUWgTatIlKLlixqzBSBrKqqmld9FqlQtZRKaUnrkUI0Zo6BQ/IeXaKAdjXCumq0Irrv+77vJj6kh4rNGa1p1lzfHxfvY9dEC/RKc48h1kpFKNCU60bEBiJ2tWK/SKKzNHam9wHf6sLgikALpRTPA0D3uFwcfV8zhx4oJWNm4siACuVOCcKtDLcJIQkAnDLpljgR0c1WaGe2xYRrjPlmcWDyA0etC15TNDdLBN2PnfluVm0/xIbz6zTBxNc5VDY5nGamWh2HwDru9E9+8pO/+ZvfvD6/MvPHj9+9f/+jw+FwuVzM5PHxJNqy0wIeyjmrKqLVmsu258AEyCHEGA9piT7mSZQAAxIBW5W6Zwxh37aP33/PFJdliRyWmHxG8copJSbEimamPuhDHJRf1bTVCimQSDGE6yAQMDPDwDEuy7I8PjzFuOS8XfYSCCgtRPvlvFFaxt4N7WwdnQ/xCpDYdn94ZSOUhnR3hYYQ1oI40EbCtxn22BD0ZS42m/w0ZgYktyt6qZYZaKkmgiPV1hKXFVsXfTNJia4G4kwbQ9Je7eabPEnL/GOPLAwys/5ewyzGbpoPX26yfa86aHyezmk585neRrHfrObmc+6IGdH7HtEMGhhia3ylHtDwlyB/FZ+LeCNq3tiENwwF3sFFBgRkqKiAoCii333//Jv//Nv/+Nd/8/t/+i4F0laC0NhntjzH2qqqa15qCJnAhOYYdmgAJCgOQ5JCFFPUqooC4v4egC1h8Y6txGFZYyBmxhYI9j7AXmziu8DUHHhrbZDmibTz61Ylm2p07IZAMcbAXAkw4Locvnv+7uV1O737cNnK8/PlR+8PQEpGU8UpDAelOV0Aw49lZvB2S2DAa5PeXMreuAkREG1ynAazAADY1XqHaWvmAM2VNjo+s1uCE1HhTPNmDX5jPElD25sKkh1V2LorNW46/Kjxp7Hd15eajt4acPUJ5wDTOM1uKwWwW8KIaEZmMsCQ/JTBm+O+0FUS9Pijtiwa4RQJSikMmTY92Jdrs83AsVl8tN3oJr924IwNGw89LIax4sO3Rgpu6APAyBHVWhkb2qeHBJxcmWC44KqttEZViUMgNiNllzhEBCEkMzEFNe2uuDuNDRtH+/75uo/Oy/bfW9nXXrDNadGxIoo9qNBopuGJUZ8tYC0jrSEkVR1L30vdZBiatwr79r5t3RTAiCi3YkSfhyqGiCiAWFSqWlBU4Jdt33IJx0fYLwCggArWx0beRqwRFc1Fo7o5YlSKawskQzNwhBUQjrE6JqGqCph5PwaMcnMrVQgoUowcnp//sJ33UkqVXGtNKR0Oh5RSrR5ERJ/xlUJQgJzr0+lJVVXq6+dnd6RjjEtKr68vThu1FETECIYgIufzOaXEMRIRYWAyt3E3OQOCGKJBWpclL4iIgaspdxMSXDC7ieMlQEhmtuV9L+LLVT1PQeSYA0i2xOWwRGSKCGxNpzrKIAIs7J5AFz1dsCCigYBY9aJhxECACCKmWAmNe0+wYl9J91mqFFUGjIGiMWFAT/V4xzVwCAhIIL3jv1HctUwCO9CRiAQaKtZaGToYEZEhYmlytUkr73wjN2loyicMcTYLuDsxNx7g7nj7/Vgo6KKfiMawE38T9Xbz257m5uCRj5LmEQK0EWeeDru1X4eAIiIXI7MyAM/uTrFwVSUVMwuEjPeOsSEU4Ne9fHrdlkQLR2M2AMBRS+TlUphFStVSm7jz5CQaECGqiVRVQTMDQgNkQAuICAiL934iM3MKMTqYWC3U5zS6TAMGQjXV7eJFNUocEA0dsV5BTaw/s1QrRWutRQ2qcoyGZiULGAUW070IEVxKfTw8rMcDIAoYxwCBS9maA6BQa1UR5taoYmBYa9lz2XYzI0S6Ttm+SXN1zee9EFfi8RPIQ8Uq0qiXRaxAcYDjThse0hHgdEeBcCu9ZwpUUyMrWkmU7dqC7xTFzByMmUW1FBUh7Y6Z12sM8U4hVO0BAoPWktgLPW5VaVMr2AdX09SMhF9yKbWHz2ftC8Pr7mDdkzENnWUA3gAs3fHpeLyrBd+fVqdGxPlF8Fq120KcnpSG7rQvS3KSRsTj8fjLX/7y17/+m4/ffTydTp+++35dj2ZWSokxhBBirABqRodlgYEPDGBVLpcLIYQQmCBftt0JWLnuuQKUnKtafr0gYgpRRL779vsQwldffbWmhQDFDA2YANAUvBpfUMwIQTQwGrJbNi3cA2hiITIAiKkqGGEwBEIK8buP3x9Ox2VJBlZKBaYKPlsIcbIyxxJd6W0y3VxvW/dwJt1+JVd+A5M7GwPwXzo8/Tiz1Xiezno3pOVdmjaEKpENoKBOb3ArKudjnFClDqtvUEtXQ9eTZ9mLkxXbv79rfG2BSDPrGUVsPRHXi1yXaLxIp8lrRejdSg63diiyu1cb/3RbEW/9fPgBph4nMDEAi1UjRsZSabvU3//h41//+u/+09/83cdPz19/9QEYoap1A7g54YQcmv8M0iLzzERMiMF7dqD5adRaFa36zAwyA0BDDA5YGIiIEgUz83KiJa4phRSICFOI6EAyzEwRGs0rE6pH9GWkXgk8oAnNq3fZoFIVwQIQ2uF0OP99/vTx+cd/8qefni/f/vH7x1PiBAYtB+XmLnTMlOs33X6uDZyPvaa0cUcHBLmhKKelDtrZToY2RkVu8igA0BoGnEv7KvXQmg7Q9i/kFcmu8ZeBqTAbEnAdD4tABF6/im2VBuHd8e9bO+SWy27eF26l7iDvcf6I3Yyl6BF/bnZjuygCICE60gy8ESbTA1+TgTNT3DGmTtnywWVm0NCIxiWwezihARV1/cEYAok4TsrN22ovMyBuJZ1j+fyFofuvbh37T71cBcjnMZgH7mutp2WJxKpaDVTUVI3AWNy8njeAmVNK+7ZfRYmrcnO8isYhs9XSHqPvnPUws4iEEXhQnEJ+12ZQBAAjdO8Ua8lCREZMVJxQm5yiKwiNWxizDO0rc2UqqSrehgs+/9oCe3eiiioYmtrrVl63YshqFBr1UBs8gGCiFSyE4D4iIXuxIGIA8JmP3sIRDMFqMUVTBDCKIRqImqpWbRUpYUk176+vl/PLa9kKKASkQPH5+ZkxLGt02/f19fV8PtdaecLuCyGk4NFle/n0GhMf0hJjTCGamYrWS8mlglIIoRZ3GMHMztulKbDAy7IsywJI276fz2dYgpe/g1oItK6rmqSUiCCE4DXWZjZachVMwRShmm57ybUAMBjVmjlGJmIkMghEkZGRjusi5YBSc84qoNT9c3ewoaFJDhJiANU+xve2/76H+0270Y9AUjbRG4UHRogkAAwNdIgImTGCo11f2edK2N2G8z3yplUzATXCFilkQyAmMzABR9u7yiZ3Or3g+R597k6y3B0/9Nc54nUjFuU2ctYDQNBlIrzJeAxtrdpygzFGz7TYrY2L2Mpr5/VxoQXIiNqWgUilLx0R+jhtGLIVyLxERBARTMwIyNFqEELain0+bw9rPMToKKNADfPDDAHVEFTAwatoy8wMLtmhh9LM1CoIgFYA5BqISsCAiK9589dfUsLDEmghxKq11DbZFRlUFUy3WkVkE4wxrofFU19qVUV8uia4iBOoVbfsTXGIiiJmYKaoYDlXESlVYiRDCEtK62qEYmpEhipV3AlXmQquDFTVu/JMDdQYkcEb52ho1VmXQEOivqHbKyX084d9MBgKehHarA7NUFUIHTLk6kRBT/4QkgdTxpeKoA1Oxt+iOWz+VzFzt2HSbo2MxQT1aiphH/lmnc6vFD7Ml359us1v2AR/322mK5e5joVeqwbT9QFgJMMnlsSZ+4ZFO3PlWMNRGnqz7ADwpkgVWv3e9QkRkXCIuBHyj1KFMPzkJz/5xS9+8Te//hszY46vr6/ffffd6XQiWva9eJAC0QAUyVKKMQZE2s+XUsrlAilEUC3bvp0vzJwWeFkWAsg5a5WaCwAcl/V0OmmREJeAHEJAsy1nUC0gvnse4ndmR8TAjAqmtbTJSebqo5VgNaAgNgCptu3lP/7q13uuf/nf/TcPDw9lz6LGIaWUpO4zxb4VjHf0rOMEadCH88Kimk6Tcua9+KIgHSfAvUC78YXuxOb8eQhV6OG2YUfeXb8/z32PXyc/VHNWI1/AZkdTBwB743HNz2O3Rur84tZdx/6lt9a3xr9hkv7QygwSnQUOdN0xVqCbAfePGuPN4PthEM6e5PiTmRlIFnAIlGICyApcKv7TP33843evf/zu5ftP56Lmk66KVeztvmpVFIjbKDVmNgPPTyKiz1wlQO2FbEQMAFxa+5VqP5OQKHLHZm9z7BlHOQYRheBjnRWAzEysjgGAY5asP0Y1BVEWFBFCRG46uqjPABKIS80aQhDR856LwOdP5+Px5UfPJ3iIiYcs8oni1Hy3vtUjtFRry4UOGlBVAhC5NQ/65kZHAfU/TfsViPsguamQGABGbOIqxFREuEfNfJP9BLArrikS4fQr6L7o+KdTkZbSKAevGS9mllK/KBakD7oYJNS+rzf5yTvStR6DowkVb6ZDaplebwC5ESDNtLj1J+/A1YkIkeeM8bWI8qaZ/MYxHjciojDYYP5DO3eUQ7YyM0UEphZuAbjJnxLRNbs2xeOxq/NaK7SgxbXzwdMg142fbAKVjtdCwOzGPLXlENBSOcbEwWwr3tJDmIx9CUptjXZiSoZm5oCZ45m7CDDsdclrPARG16rdgOFBdmaGDWydVNUdNlVoYzkQRzJDbgCOBXpRLowEJhNiK4svpQBGhAgm1LFA2VSAA5KFBKCGcc/7x8+v5/fvU7FwGEyhqmpiojUoSRAA9rdVArwCihgiIhFTQAIl8kmpzDERE9Vtz5e873vZtm3bS4z88vLy+dOn8+e9ViCASMAYRGRdlrQ8AUDt7aMisgDv+66qy7Kg4Z4FqeXN60Vf99axZqLe4QNr3DD7CQJ2OqW0rovgy+uriGTZEbeYUgihlLJtO5RtXdcYYyCOMYYHVhNvsGbmztrXxBe4EEQQUR8E5BiSQEHBpBqQEDGDpzs0AC4cakwmKiDAHIiZozdGOvkPxQ9mY5YjexEuQDVl9zuuffM0GMx/yMjAiD7BEUDNci7IgBiIgm+liNQqV7Ng8PzEaI0vnXtUVQUZfRoStvt6JUYT1z2CTf2Sze77osD6P3WMn4yfYxfJsyi3/hY4Omcmi3ZeJZ1MXiKqt7X41zgOzHJtiKyGUTlLvSYfDXufG6IaiFowJCAzRmvgiqBmPjRJlOJe99c9v2zltGb2mREeZwIwUzTNBkTlvKOqIoQYIwGKQxtZIQMVIYOqKqWCQkVJHDAgETGhp+L3/QKgoBW73eCqCImIQcw8FawciVMIRAxVsuTWvovUmsVFZC+Sc/Ew84FTrSKgSGSie8lVBBGLqCEuh3U9HoTZCMRUq3q63leYrt1NJqVyiIxEIcQYHcrGqvgg71nDtd/StdduEoCm5n1ek15EgB65Z2aOIYRAEEzRmz9bNZqiopo1wnaDqWUg2K8WUI2YCQISe1GW9AqR7eUCIKJ7PCzgIGfdW/Ag3VBSiIpq3suNBKaOU2IDkG4m7+tbQKtqmV8WAAan3rFGJ+xrKn4sXVOaHQ4Xe0Zlzhn2iMk1u3LHszYZVYPBYcpGjj9pHwX8lp2xF9e4lhYpYng8Pvz5n//5f/jf/8Pf/e0/EKKIfPvtt+73llKqCqARA6I39Sn7iyxLydlD0SaiVUyUmZ8/bT63ts3vEWVmiBgo/uj9V4fjwkRSq4mZiNbKK4mJNZpHCi5mUR2bA1TE539WChw4EaHP1wWASJE5ikHO9T/89a//7re/22v5y7/8y8fHEzJqqbkY472QGSs8H2MBW0mb3Uo5M1UducGZHua9mI9BBndb2cBsOvLgENEzwcwPOVAfzcybMvz8a+pjuojv7LjU/OGqd7qtMtPP7JUhtgr8ceXpCtdB3rc0aQ4UN94aEVXHPOd7IrxblvkJx4vcqa35SYbZiYgOOYG9mXaYf2/v4vrCJ5OSmUItphRiFXw959/+7tuPH8+v57LtUsWqapFcLQcTM2nNxJMOgjad70o/DggGoMzYW3yAuCJDnyzVIFmoAaH58yshRg4YWL3qXikAiyn40JSOmgaEplhkBHxJzLSF++vu5dvAwRscCZkxhLBLuWzVW50QWQX2XEqRT88vSzikUwMvNQUVM1VOIwQ/lo4GSFXbFESrioC3QyhvgwVzpYOLWz+YSNtc8jonu1Shy8DBU6p6reG6FcXXwNlNZgzAdUjvyB0G5EwS/U83ebbx82GKzPQ2FmRkk+4IdTQr3v28l0Bfn2Fi3isZj6WS20C8dTt/kDcRjyC7v+AXj1HKO0SK03+YF9Ex9HwvBn3Ph4g6qIPd2qnNjJimtI/fxhQ9LGzdxmXmEBgbJpCX7oAaMGtKyeWc/y8QM5LngnzaJvT+w1rNtdcsvp0Y5ye3yRdulAQ8kiTzQndt7Xs5BHobl9eodlpB74cxwlbCFwIxEqPUq9RWVehgHo2YTGZGUFWOpxAWog2ASA20mgbwQp4lFBOgSKjbLrniMR6K7LGRoBSqoKYmRFBGyp7A50nwNbbCjv1FyF5wAOAYpKwGZnspsm3b88v5fD7Xctn3su9KAA/HdDocUlwCRZdoh8OBiE6leHtJCCFkPp/PIuJds9vlUkrxcrht256fn19eXyS32UoigvVwuVysK7DleDqdTnvJSGRmW8nbtqmhz8JGxD1/zjmfTqfjegghxJgcVTWlICK1FvFW5hbgB0RUEUUpope8lyqY0ExjjGLgoCPEZkzQ685TDDsTGmgVbknBq2NmJtajqq4MwYAARyTYl5umzLCqOrE3SS0dZBlIwESsFCEzQewtGjxqS3HSkLP67+Lmarehgompgud21EhBR78cdC+oX6nPFJ3k46Sw7w3E/8rj7a8Q0VP0N6NZGjRUYGYj1Em8es9qbzm+SupxwaEDhsyBDoFjvS0NScE6dGRP5/vJItXMdBJlqAYjGG5KoOKIMaAAUEXNaM/y/PJ6CMYPK1MgAqsOro1qQKh7MYBNRNbl0bvrAi8mVcXMFBRjXNCqFS0iqLKhMigRHdfWzF2qqKrWTESOcqFeY4SAaNjFnedeDKRWFRMA4EghRAXLRWutW6k555yrGjIzBm6dK4CqULJUlZCi1rocTumwhhSRAwTaS1a1xAkRfWAfd/1XawEAFAGgyCG5kaqtYGjWkX1fmoy9CdU7KFivg721Ie4TL9aa62Ci864y30SXzQxd7YE50CT05/G1BQBg8nmsg5YEPDqL13OmQ1UBFQFU0Y1+BVWtPv9gfubO415/0n47XkTkrn3/WuxtHftjtmAmT+Pq6flhdmN2DOPj7cPcfZiXa6zzvHrOcXOYCbpB4C4rEZtZKyRh+tnPfv4v/+W//O0//KMT6na+fPfdd8fjcV1X0eLvGyKpElRwfywtCcwYKcZoUgkwxhiQMPDxeAwhoIFXu0QOp/WwLMuyRkTM+5ZzLioi1UuUW1UoQKT7l1LVDkWmBBEA/F8iAkwJWjOLAvzx2+//P//wv35+fv70+eX/8m/+9Y9//GNju+R8ika3Zp+vw+gFnTcFAALxEPLj+3lTxlLPGzFL2kEV8CX52c68ZZb54uNeNLUL0tSl4jbYbHfOV/advXsqMyP+wuSYmTYGd/vTzUAP05s63d5rllmqD5IjCiM08/Z2gzVm0Aqbaj7HOmgH5LhjAf9JKfnuFahnde6I3x+bgEUKECAxiRlSqfbx8+W7j+fzVrcs2169nKKoCAhPYD/QtLgQkUgbs25marVWJALiwDF4u4eP6hGEQMhmMXb7wefddCsgpkiMkUOM7CPXxLSa5zkdvwusjRwDM5NmwdIUZa6gFa+HtXkXAF7ButdcJXiQXAwQWQ1fXy7laYE++N0EvTwzzMFfQ1UQKTrATgyBEWBuVb1mccd+dYoEaBK/fzDzyLCZtDYmb6XqigE8FoTATIikRrgrIQH6Xe5jBzbpBd/fGCMQdcOrTekYTiY5oo/I8AljiNDTejN3NNz+N1XNKteXnRlkoBbPFp2Z1XqTrB6/FRFEHmTQ1YeqVZz1bEfHGHKAqOWo/fsQaFg+05pcQYPvuNi1wrVYRRuazcjUXbW+ZzAChqHStOPzNBkqVxAe7ZFaYbBrQOta/Ibo4Dzknl4u1W6RcLA7yj5cwRyNFxSotds2SRG4NfDMZcHTijTEPBGVVpQFb+TseP0uy2zskLtSZoaAbhwQBYDsn8daDyE4CawbtMy2WR0UyBcthECwEAYENAVVZc/uKRYpIUZRDRwpQMlWsobjSWyT9myoqtgz5l4SYNawnsxMEd0njDF6j5+qLrRADycEdgTzNjU+pVprLfl8OCzvH5fj4eHhdDqtpxQXRkc9DqfTycv5HM9dRB7g2AK0HrDMUktR1cfHx0gtxerl7LXWnHOJ6bvvvhORqvL9d5+yVCISpb1kpshEPuwvhCWEZIjH9dhX1Wqth2VdD8u+7yGw02qpFcmoUy8zC1jFWmt7ZQATMSODNmOw18SrmVlAisRLiLYuKbA7V6UURhTo3I4KRm6j0xeKy61FnbDFElygMI85CioiVQQ4mHm5aQtANHulUz6R+eh4s2uCAno6hXqZwVXajmyM+u/8u4Y+N4Uw3BtEA+Ef8OLeytDxJ/jScWdqDO6j/k3Tke0dGrfiVAWKfbnaG3XeH47x2xuNe81/tVuf4f9H2Z9/SZIk54GgHKpq7hGRVx1dfbIb6AUGAAfY5bzZnbd//bzH2TePnCWXIAGCOPvuqu6qrMrMyAh3M1U59gdRVTePrObs2kMXIj0szM3URETl+OSTvS4DTD97xxPYp6pqjybACcwg8lxqjoikZo/r9v4BDokKOTojejRHIATQQQnIzLZNzAAPh6Ukp9zaBu5ElJAggbK5YFN11ebq7rWtiBhMS5JEJDMjujcVAAi2GEcLs4aJmTyGxQAFEzJiYkqsrYlI3aTWGq14iEyUVLpx2LZ2lqqqCma13j579vzli1JKaw0ZkRM0j46vQDfN1euOEZGqIgAmRERQw4GHnG9kWjbfFUwuvmmXYJozBOnSbh1dYXOIvCpihOqHtDB7SolZgSJ5QLPyMCz0dErc3UUF1IB0tFO5uqWUAEF8dt66SHj5eS/2RIQxTftazLBr0sUJRsSBSHqqGvsfrhLf1/WKJ9Lb/zthBYOxPX6lHfjrfu25zo4MuHbTn6jJvP5ksbu+26c9w/sTAIiZWxVETCm7+vPnz//iz//HX/3yNz//l38JO/Xu3Tvro6WjCtGp9nJmxIt3xcQJCVJJzEtK7r6Uw7NnzzInFwUABCAHM5DWVES1qWrPjJgy82M9uTs6ZE7GOdFYQL+YDiIK90xEVGvsjA5epTlj0SMiI6cvv3z9X//+HzbRh/PpL/7iLz755CNmbtD2XERztS8G5Dr2nr+dzk8/xzwINqY3tj95vqB5zX0Asz/m+53h+t61oMEqMe/hib80RUVHL+KT689AcX/sP5n2E7t/fFmHeaZZDwht0HiO27gqgF8L6tNJD99q4eda7f3m+SvfuftTI/Zv6sMLznvbhSgXgafrAkqXLPGAVG0mJn7/cPr9l6+3qqLQBKoYYNTT3L2TC5pH1R3CBw4QPmOnmlW1FoUJB8gYCDQHUwRHR6aE5L0yb+gOYBgmjtwpAzgyIVNQl6nquoV9i4l7vU1ORcWNPRJYUR7s1JRgUEohhAmfNDfRis7CBgBRv43cCqeiqusqrTXDgwEEuwgRE7JZm81VU2bMDDsq6hI1xfLqzH89sZYRDV7/Kp5jCn9cxMbE5alfABDjwck9sHDjz4Ggl1n3oc5eHwEgMEocNcAYMbebWQJwGX9l0bDwgToDAI0KG14XyU11/13zq6fwxzF9uRkTIaIPQh24GGSbd9LVc4dAgevNaEZPe4uxl+2dglyM535LMrPEzCnF7SrsMCpP9AR2ycX5yiMzN2akRMWm6wOMsFg7e9vlmGvn3bREoCwiAo6Z2DVGJOjIGiCRmneyjZSIMvei3c4W9PsBdw+H78JK5NccWe7Tt7jcj5nRNLK7ScoDy4sENquF7l5KocQ+OKxmBE+U929r2lBEJEZycrXhAmHOWStpZH6g+02MhEhNFIOzDlzd3z+u9w+Pr5YlLR0COpeRKKY4tstrGqqKgEw5lro2YRFmJqPatsQZEzJxzvkIx6UcDzfH29vbkvFwODy/fX57vMs5MzA4gvWk6ePjIzMDYbxxRLRqzIzehwG8fL7EmeRBGJ1MOm9BQgKAr2tNqZS8HA6H+4f327Y54eNpPZ1OQBiDQ6K7o6qs6wrL23Vdc87B9MjMx+MREYk6jBsHxGJqgpopkiqOzDGYWZNKvKTECZyJEMxcQbmKutqSS3Q/mvm6rqfzubVG1oHUASiEgXt8InhEBIGatuTopp1clzkzcyJWvOhnKEtmQLvMg5pattP0fXoC5qY7NUWEGAzM3PqEFRhLwZxjK5jlwSEV4N6TBE+0+8knH2rr/+lvr2yudxmMX8QJEzLx5JFjAoqPvs1JBwV/IHBVFdqlw6H7SZ44A6iZRYVw5kcRyZFHvE3Dbg5OFjR0QEJAi/m6TpwQTaVKe1zt+Za3hGAaWAUicFAwNVAwIIL3949lSYgUXf7uaOLIZA5uSJiYHTI59fLU1s5ERIAMiN3cc5DvUWQQGCI1qODsXkqJV58SM6Nh711099ba2qq06JrjlErOuZ4qJXbDbds2qYZgYCLyvR/84MWLF8T8sK4JPaeEiKmkVlciYk7oHehBDtG4X01jriZ0uinIxI/a9q9mvvQh84MUGxjRACBx8ug8ufTREXwbFz+MLSNZZEDARoIDdzuouT15+6pgZo595GC8ZZFGTvv7ZOagz7mK8SJjZZZS7LRBfkaIwfR4OXkKc6gbcVSir2IARAzg7jDFl8xIzNqBGYUO30K0jotfiidT9eefwK4YtX/8ucc98XdxOC5z/pjv0s8AHUpHo8oEOH0RjJR8rTVRLqVsp42Zv//97//5n//577/44vS4EqVaVzNb1/Xm7qCq2xZTmnJOC1ECkEgC9scHcCIzV5W21cxFs2ltvT/KBB0Oh0OTTVWXJR+Pi4+0acAg55vtmTFM2gQGN3bOi5saoJi5SNRezKHWauB5aQYuIo9n/erLr9++ffv5559/8cUXf/VXf/Xpp59+/0UupdACyL204QCGFtnMK+vn3Y7xGOg839c+I/zEHu7fxf6EJwH5hyfD2FlgeBf7nMjMSwZia94M7ID3vgtE58VFLoHi1X/HoK8n8jyTINPDcfcYDDMrY9MZcP8Woz2fZd7eHgPCnD5cnLAn+8eZP8+1nUoxnb258vO0EQ1GCrjjw+cVhiG6egtNpbWGzEgsIluT+/uHL7/8SiSJQKvRxEQpJQNvrRUoZmbeU/wzXQsASI5IUcfryS8k9MgghDUbbdtMbhaJRjcDMEYiBiKWaLcxS04AaOCq4mIFHNGZmRIDoYEHhzkOkirZFSpwiFwYKB9jtMxMXBFTFQnWeGlGRK1p45HXJnd3ClJzKlVXuIYsubvbJQ84LDwgETATRtzFMIQ5FKbng8b0iKk2gBBn4nBfeABAYLxxmzJklp1C7GC4vQgIdIUKvthwRBXh0Zfer4kXG+7uxEjMDKABOLL+bXAdX13Kv6OnbCzI6F3a6fKUZx/FxrFBeCBKdrb3SdRwybruhAr3lxqv9eJczTQZEcUyfeBk0rdGg2aWVMg0mAkaEXt2zy7ozulw+9KkrtXLgQkdwUsmYg767TAEHYFmpr0a08u9huCEo5tLmYkTu4O0pgoAadu28ONVNTrCAaC1xkRAiISckmhDRE4IhNospSRawVirGMLxeHQAdGtyBjR0ZubE+XxaMfQJAMybtdq2IABsrVVdI2SgjIlAm6BjggJW1SGhU0KRii4Erm0r5QAxnAqZE6VU1EXtjGkxY0RcSnEzVUfx47JUU3Rzl/CljMEYnBCcpOpWGyOllMHZFN3okc4NmyOgF3drSE4sYLlwW9/dmrH7oWQF+sV9e/GTzw4Pb5I5EZoLVmcEYlilHpcFrJl6ZiYgc1PVzWwh3+oZEblkSumsmonyzZ2ZVTRGOtweSkvbtmVMN+nm5eH7se8mZuSpsPTw+AhA4pogAdLWJHlLKUEOLi0BAMjCWV2lmmTipkjO4G4GpJRzLqW8OMCzu2eP6/l8fr0c9HBLm1Tg9fYFOYI6iZjBBnA+ANw9M0uLSFrPVUQPx9u8lCaOqbhTU9lqMqSFF0pBw6MGLg55uTm9fecCN+Xm/cN6SEd2VweIhiowYW5mro1S5uOSRqhgBr4d7LDAup7PZ9l01I4MzRmBibYmAO7sZpAJU0qmAgC1rbfHAyKoqqE1awZuibZViBmc2JmUt1PLNwsSQ0rGiXNGAz29t+o3+eYk9zGRgoAxL8LLBpQTmz9IPaO2Qlm1VaWb5cB8pMeaMDG1YgCO7O5Wa9vU0feMau44+lnn7jst5vQm52/3Ngu+7fhDG78AAgJRil6sBJAcg2VeTJ1ARMyRgVE8pcyCumleCjoZQs55rRvn1NpmaI5ADgnTQofihRoVQmnKzFTyw8P5rCrMZIoOXh3HGOKFmFNxEwZkaDlhPTURSSmBE2JiopIpp9VsbbZBysQJyZbKYlaBGh+r06nSRwf+6HAD9fzyeMD1fCyZIVmr6WbZtq3CBuY1bzU6X1OKiMUQlECTK7AnNHCwDO4vVRBRm7n7khIzEnhZDplRtSkoM3BQuJASyeFwQHQmJCSKCrNjEwcnMVajaiBGjg6JMSe9ISQ8vbt/eLh3UCNOy+HFZ9/5+Ps/flR9bwDHGzoWIUsZ2vZeHlo+3vKSHFHURVcEcLfbw9FUAWFbz4+P79+d3i63L0+tcR8Nf4lbxM39qiFKzWrbevkiCK0AEiCaY9ugGubSKigslu6qrOdKzEhAqCwiAWfYzm05PE9EGGQ8qwaaCIPfAN29qTuJoGcgQlMAuH88ffzpTTM0pCaORKZwKIfzuRUg31rOz1TPtYo7MmVmBmR3iLqWd8ZTBSCHnpKDkaqIET7dHVegcR+jmtEbieeOjsizr3gkKEnVwinvBZ/dPG3oPuKFdeDDIx9SuCAEgDA9iVDeHZ/ZKOzo+gAAAfRyYDQDICRk5JQzAK0ds11SSoawwFna1trpdkmAtrYHLEtbypvT+qN/8z/95P79/+vf/m+Otrw43m/vLcn2+CzlBfnGvCZOqWST5lpVKpm5inJeUia1JpuZfVIKPjw0YnPfWl1bdaZSyiNUdzeveIZUkQjQ1c9e8iGnlCknSIkImYGouXHJGzStqOgVTADFXNQzYZOKiEtKzJAQs7uulTA/e/ayQno4wze/3H5fP/93/1i//6Mf/V9/9PKTTz757LNPXzwrKWNJvBQqid1bTsTkEXlGAkVEDuXRO76GAROQgzsRO6OZ6shCA0DArJDO0FmgGTGBEwzCZwAAFHc1VAcFUABY8jMYLl3AVrv/Z6bRVm3m7hIe6lXh6zJ3gQiZOVgY4OKqIgAQyYdCBQBqRtfQNY/J9cliXo6PgmSkEQCAKPtujl8IedPGzDFdU1XVNNQiwPRd7BMz9ukITDwvDrvUpw0wcKjerDHEr2a6P24+MlnDxzUcLBVEWEFTPBSqgbrYiBIhmn6wR/4IhIlLltOzJW9iZpzp09dv337x+Qr40bmdHrezoDsGLIug8sJ3dn5k5oWyutdNwZg4S0OipYmiaUwIBHNt0hzqQzAgJAAghJxIVbf1MefsYEgQgK0IIZqayhavw8zLoTAXBVZvp7MgImUozoggomaKiIXDIqm5mFvUzwgxJUZgBEIgBzZgE9vMiEmqvn/zsCy3y+3zdHPr+RaXGyH74kv7zicflWSnx69uD5awAoAaqlliLKUgYMDBAAGJzUAimE2ZOAOQmCV47AawtdYamuecuZQIF8MygjsMji/woMk1ihOCjDTOCWo6QHZy9wQEQA0bU5zVC1TkgBp1IAj5REe3Ht8CgEmb1tUMiRmJZwIPzKOBAY3Io/GBODhazCdBiFkLmZw7AgCYRRHrQiINs5A2cqCOFMxTRERMh9ITOmYW3OMism1bikGgegHcRtyo1kSlrw8wdDwZECWiUeJ2B4iyE6RMxBgoowjsAcBdmBmpd9zAjJnRLj2EcJ0Hmrv7/nhiRGyQyAPAsiyoHfEPuzB30gkQkdOuGbTzAj09IpgdZme3WNcQC5pVsMv9x17eXxdfD852u4TIM3EL5ipCQCKSmFPiuZ3H34ahwZ4CvITR/REwMH8QFQfoEb+aX6UTwq7VWhGxlBKj4eP2TqcTLMfpeV+s9qB3x8RgoqqbW2uttnbsyGBGY8WGbqqiQehEqKoeOfLLSpg7OA8anJGrSKkj+MMEl1JSSmBOFyRVsAPEvWHOOVq8RMQUVHrW5Flecs5E6Grbtq2nEzOjw2pWOCfiaGRys5yWUpomJerUEepGblHVcfcxqAkJnbkXNumwtCbM3KqmVMqSmVgdTqeTmaVMPkZj910BrGlTcXHToYfmjolDnAAuvd5Tupg5pWIG8aZUddu21gJJcRlUamZRoiOixAQAKXHOGRKbmWsvcIVm7gE81nN/kT4Mf87ElCTkhGaWaOR9OVEa4no1Vwp3ILGJw4QZ9V109QqWsz/8OsbD68Bv/yff+uf//WN/8ZAeDCPOzMzhWoTcqapabcQ9gxVmDPzp8wK6x9s0NkNGTt1FuMqfdfwnuLujgoO6kVlMnpzPEqvHiE0mggDRe69eUDHlnNHR0Qcc3d/ev7tL1FpLZqaDv8cQgdfa1C2dUs6ZDofgAYcd1iJmfDIwJkSkUi2l5IuDO1MigIDGpSWRUXIFCqJiwXhEB6KUUsqlEGcwrU3NzHs81tfZDVW1tdYaidu2baoaN3M8Hu/u7k6nk+XFwY63d1s9HbCY6+3t7buH+55vJXCg8KXcJ6QqI+WcMw0Om9nR8eRF+87s738VuA9CBOgkZbHsKaVaa6018kStibvf3tw02XTU2EVEnQI4ERbeRrq9Jx8Ngmc4JnUwR3kwXEzIOadUmFut9Xxe0fuXptwrIbEdouvlxggRGJzdO7Z5j5qZqkc7BAruKn6h9bMAsi8afLg+06o8UZzpRjzRwdgRAgaP7gaMGFw+/TrjTOv/i7ZM6ORtRKMsAO5qDu5qyFASmZG7Sg28DwAkJARkdVSFZiLbw+F495svfvX5716f13rICck3Ob1/OB2O6wI5xzg0TrECh8PhfIpEgam2BgZUYn0e13P4Qo7Q1GPOtrqd3p+JIIYCVVTGsOcXTygasZg5tKHpft5jh62amZhMb4GIArcf76vWunlTyFs9vX79+v3D+v50evOz+vz581cfvbi5ORwP5fnd8e7u5uaYXzx79uz2eHd3czwcClMTR0RwPz0+3NzcHI43vXAnYOaq21KOgEjgTAlSgoFu0rbBRZYUHN1lKJFBn2QLEDQ54Day/nsBQET/A8ShOw5Ff/KraH/wa64E0fYkhOuFvj/E9mkXj2h/RIp8iveU//1tTH9mfvLhdZ6Qh+HoHozxevPK80vnf6caxpV3K4bd4xsnK4d2cEqMbCmlzLyeK4Azc0oZkAO+AQBMqYk4MDg9Pj5+/ebN/cPpfG4BjIqQ42ZZYq53k3pH7BBDgMIUuI3sT9wEuCsAulFQ+8KoyV9DECOumOq/f2VEBITqtq7r5UMGdU9KUUIPbiEiGuw2bi5BgEeEHj1pABfau1nslbZVbbOcYxYuEBdszc/n84ELUnJwNSWwUo4horXWmKKBo4fNw2giszmSGSg7psHyioOwSsGt1UzceWVsUBuMfd9mYbOXo9XGXL4PnZNp93wyskSibRwiMtiJwyBceoC7zEjvyvMdgmN+xV7pfOdIzJ95sOmMl3Uhg70SXZWpDvtXbLtKo+/00boBeUIGhvMi8/b84vVd4QviV7PYNjUljhCYqUejO4/2neWXkRQTGf9EewFmzD0GX1ySRkROT+54XgGuDzMLPvdQGLPL65xPSESgcT/q7sfjMp92/54+/Mb9Ck4TAzsjxcyB25xvBbsjTohX4hXP2Dekjo/fXXOAm5EIZyCN/mT1fXgwhTMzBNF2yERrDcuBENHBTRATw7wfJUQiBINm6qZr3U6n00clExFE38tM5oVB4QgmIXbS+BYVNQQycuszUKd8EJGyDpwqRtZq4UGi4KST1wGjT11DjAxBpavTw9YCzGOmJgPDCaBiSy6lHNBiWDOX7OLeaosvampVI+dkahLuuA1dmvkV1cpIpZTEQJSYWUWbWkSDxIsOaoH+hyU56LlurfZADi6zVsQ9ap4ZEdXd3bbHGpIQcLLWWn9Gg+ilQZyudgOAQz4AYGZOOSNYKeVYMiGs6yq11+4HcF/54izCBbrZYbE9ohMRohSoDvsD3FZzQaa0j8XZk990tuVhoK9x8yH8fyAaxA8Gp87PP1Te/9PD3XswOMIVcNi2bYlMqTtiB4DFyJPLUNpxKPjsdVS/dJoJEYMxckrEI+9lZsidCKQ7QsGY5GrGsvvzi7W9dlkIaSKHmlZyQsZU6Obm5ng8gLXzeWOmA6CjF3VHQ3BxcEIzqmrnrfJpRcSSEjEw+OFwcFfQJb7aByDzYCXnbOYx6t3V0D3nXEpGdGIgAvGm1hCREpZ8yDHBsxwM4VTbVtVNmulWZW7kjh2UK41F6rZt4ArAQQR1d/s8SOXW7czM9VGWkrdty5lnv0REPkPvAqzed/FlWXLOsUvPNN7eAu9f/T7+6ad5Z3EFAPeLyzhNNHUajCtsZLfqfsGMzegXLi4XuCN2aWPEzrnuI3aqVQ4HA4Ccs2u2aA9OV6gbRkckVUXXaOAGN3dUVzNJqcTd2mjYhp1P4NcOhA9bNDMaNDCZzN+iXFeXuq7kPPnvXGQMopQLvvTixwfw+IO3Q7HlRYjSIyeP8YwSBtfdwZyJlpK2qkEqW9WkQXNvjmL41//hP/6Xv/m7//Af/sv9u7efvnqRjoXTjYNt24YAdMBl6QOHEKCUpFIUtmoiIiYsLDH205qFU+hA0SAAiLSxaM2ZO52tKic8EqW0EHpOVGL4GgdklDkldQtqx7FK/d0EO3Qu7MCImYiQHABSSqJta06ZRfzx8fFU9VQr3vG7x/PnX34JbmZSEi0Hzsw3t4fbw/Lq5fOPPvro1YuXr169+vjjj54/f17KIfGNWZLazPCQS87kpGJA0Ju6sTY3E22qmg/sPsI5R3cLU9+dQvCJWiSHkMO95Z/CjDuncDrBM/CYcjV/VlW3i1ztRcJ3Aef0m+mDLM88Ye8Bzm8cjveFPX/vv+1vaX812LlGcRrjnMd99Sezl9JH+PTkWfaqsf98PObYzhwmN3+02qtpVCYSMAKCE7gHqysCY2E9bcBsSPcP796+fXc+n1tztWDK6I3QrrZJBVci1LBRSAgOHZ7aPQF0N0CEIH5BdVAxcxS9OOKIBIixLQEADmbIfh1IgZmzMd2NmTBxxj4SScXVmmonM6m19kVACy8dsROl8OgIFZGI8wHACJqKmaWcD4eSc+6z5h2ryP3D+VgIKTU3MCMnzugxltcc+kRfQnc3hT6KVgmVAQzQ8SJpgdiHqHWrBgkpRAoY+qXMgWP8xnDDEYGRaExmn9K114IpmTaBTkQw/PYwbvP8vRTt5W3e50z2xZkzUJyRG+4C0XnN+eHA516UaK87+51iZ6Knr2VuvbbRbwy9Tz7bbQFPrj9/MLuCqY87vwpoZ9g/rzkXNryjhKMcyde0UdNRiGkBLh08HSdc3NMx1qm1JibTQO8Slj3qVVXc/ZYZo2yHiESYEkaD5Vz3/fHk4eNJGBABFWfAzT02wwsoeS733t+dizslYP5zL1XxV61tXGJwMxJRVJw4J3d0QsLojulobHGNdznTZuSJiFLKAEjDf43fI2IpRcwSUgz+wjwqFYgWfcoAPh5wa3L/8B4/Rjd0U5FmLomQmYhYWzVzcQGAHBXlQeQTPXDkBsYxuol9FLXDC0TM1DOptfaeFr2S7MsSdb9z6Ji5n9c1oCkzyo2gt5RD4pWc3CGlVLLlJs1rDwa0NZWYMSWmRIApaA4vhAHufm7nINLNKSOlGCZRq8xhO73zs2dVMOci5qd1q9IM0IEoJ1XUphLtMbGNUURu/nB6PJ/PqhpFJ1Ng5mVZbu/uhhawqq7ruq5ra207r4zIuXBmdMg5LzfHzKSqJtXdHYLJUCGGB0Xv3+htw14zi2Z3iB7xSXmiqjEuxd1Fxd1L6eRM+5zuNIh+vcfPmtHeQ7pSog/23f0P33r8od8+sarf+vlUM5wkyBhGdhQMwdX7rAUnhJhiTBgWPKCBhEE5D7NeDRfSOWhtE6mFytyt+/rEeroY8FilHQXU8J7nK3GA6DVlBHFLlG7ubp89u2Py7fFUtw3QC0I6LM2U3YhQmiGhI5np2iptKWVChEJMTDln5pKJPbIVAxdwTKUXkUSiaMgIh8PBwXLOpSRmbFq3upoJIt4eb8IPRk5VhFwiptq2rda6tabqjjPZb9IkyoPMTMzRs8rMx9vbSuksNTK7RITAp9N6XA5Eoy9hUGAjYhPJo28+xM/hsv57s7yXkCcWG3rxFhHgwibk7u6tSVz21JqI5FxUfdu2SInOXSkmVgAEM5xHxnRmRkZ5JMBENp3XIMcLVylnYs6JWZ32MjsdMgJCwDmx0LwHhPsBJxBsh2Oc5n5nmnvltPkzJN77H0/0aNw/zf1uv4Z7J2Ouavyz9J6ry9WmVH+7x9AZyxAxGmXBRwnF1QAsEdnotk1EdnjRWju1RpRKPtTz+i8/+/U//fyX/+u//Xe/+s1vv3799pAowdkVXhyXkrhVr7USQc7sBKrGiIl5WXLFaORtKk21RTidnM1AIpsJ0Dt+xQB6PhPROKWc83I8HJZDBllyujkeUikRExqMAnKzFkgT1YhzAUDcEnZ/hlO3nEh+PB6ZuZ3WzN1SEqKqnis1j7ZABdNzFTypu6qImRDB7fF4d3f3/PnzTz/+5OOPP37+6vDy5ctPPvr45asXr54/o+WoYtIsM2ZK5KYmoIIAiVKmZN26OIA79CFuAAB9bAkCRnoqfvMt9nnqkY+ga58xnAHkpHO8+DZji5yfuHv0su5TNrG/2LUEXgTyA5mK3wYXIF5XPPaR6pRq2nG6TIndXedqXM188ForjcLF/lb358ztD3YO3vhV/J8jMDq5AqG7ubpH7nApB4lJz1o7oRlnJKziAgTIp62+eXv/eFod0MDO5zMABJBKpJ1Op8x2PCQ3paD+AFcH66ww/XkNA9VHSAiIBoHt7PV8ZghMwnDTL7hZGG5zYmdjZoQgQcYxAs6MqANxVc0dUgpfZeuvD41HtiiWz8cCmtkldxRgSPJDPtzc3CxLTilxTsxo1h4ez89uD4eSmikBKIC1+EsmdGQC6Cw4jAQIwE4ISAoBZ3TbLnIUQ54gYCiACMjuDoTee83M0SNxdiX8mIAIdkmB/ggGRIQj0Ya7zr3QiwhMIC59zV8w5XaKFl1XyKeORJl6ytg8Z1Ya5/lxA63pXlznFqA72sv9g8DgPeo3M/Yc29WQn9QJn+ywcajqEA2cSwEA0fjgu1DQR83vWl9GQLhfo7HYCE6lpNZaM+wwU70sirtP7qm5ZYqIem9+mF/mHv2rXbgD3dN5NQqaqUGkzdABmF3lqlNzn3nah9f7Vzhxj/tjrtZ+R4eLVQXo+6iAuRKISMnZ3WWMh6axTYoDdluDHX/DREoKiNipfs1FwaPVbF+KBIDwOsLQy1Zba4Foaq3lnA+HgxlkTowoZglQR5kofNqI2zgzgYvp+9OjvszkEJ20bmgAIG6gasbuTu7NVTWN7F0MEAs4WDjfjshsS2ZEjLn2AU4FQDCYGaaZ6kDscRQOQFRMCAxrfuDco/3xmoL4m5m1NtemYkQp54W5uTtiuKqTYCoEVJw855xzYubJ7DorYHGEy6uq7io6rGeEFl4U1N1X1XOTx/O2qamjgjOzOnGmeHspJUrJwEyDVVWjUzz4knGURB4fz8OZnj1CsWtkAgQiRDYXRyBKOaWcs1Qm7Li7yOpcAEt42ct9oE/JO2FAOOSAuB8pOxVhii4NUhndtzi3XfZ0Zy+m1Xhitv4Ab9TTwz8gJPjwhP/+FRAxmHjin/mwLMviJpdhWRGWRXgWkSJEggwAYigKG3OQwiAz58Q5OygRZCBO3e6bGcZobCZkwg7eiSkRwMFZmVJY3f4W/kCyzQGWZVlbLUt68eJFOab7t2/u799hrYawEj1blnAAAHxrLSUWEzMVS5xkrVJKIc75UBCxcDoshQBdNRxiREw5p5SyuZktaSFEIliWBcxT5rwkIkiNMWHXr1QAwMCbSK1trSLN3NktQAkYfkdIs5m36q01jCgup0NJzKxNELmU8oJfPdZTKQUAD4dD285lKWbRZd/JmdUUB/Yh9t6+xcq38AfuXzd8W6DIzG4APucZX9W7YJeGM3PT+ASv06xgGPTnQ66uU3uxG1k3WUSYUkrbo6SUCt/krCKyrjWazGlHtx3PGB0mKScHdjAYjKmJUweCzLjug8ZaH045jjhwcp/MR8PORHW1MtNX0CuCu8uCzNvbf93+2/cb+T7ju9vmHQC0V6EcQKPHhBEIU20bAeacSkqq6irapAGegHK+ywzv37//p5//4h//6Wf/6W/+7h9/9stv3tw/nhvmgxO+e9zQKWGW4gVJRNbVmJEOhQBSIkqYokAHRqitNR+7+dbCsmEwk3HJAYw2M06YmcLmLstyPJRSygG0LEtMIQpmvypybhLbUKRFmloVa2oeQ61yTiWXUqJZK8wkE+ScRVZr1TxXaSWpmr/e1lLKsuSUqXBKiSJxhKSyra22x7p+/W7z37xm/mUpJR3zzc3hu5999qPvf++7n336nY8++vTV85cvnr24Pd4uvKTivjVTNsOEyIzgkeGEnhSJlC9IyPpOKuBqQPcfPKaKDRt/cVLxulgxTf1e3Yg6GOSJ/4bX179sH3Sla0PqesAzqYmICABVbZ+1hIsXhFNE4dpKyADsPZFzvBBQfUsO9Fv3Jt/lUOZSkAO6MxEipV1K0d1jdpQbRv0pvqUJGhQT/ObNm9ff3J9rZySKjSkkSrZTay0NkA5RtGlFOUwHnWo8EZkJmhsggIFDUyd3jMHd4AYGOwOyh62FXjdtRBfGckRUVYl+MMZpmiIZqn4pbT2xtPFhXH/iYwmwNT1vm5mVnJdlwT5mloERID2s7WGtzEfnKIuQyImDb8tjhqTFtIvIzSEgoIIbgIQdz3ycN7Cfk5lSCo7WKR79vRcC95j8ECpCFGgLvph7APCYyYRTLi9y1ZHqO4akoLkYybspadN4uvvxeJwffusJT4Qz6me+y63s0zR74Rwv6IrJ7InEzu3A3aO/xq9mwOgFuT3ynvsL7t/y/pP9beyNAwDUWnkQ8I70BLt7Gg88Alnp3DXBkTgUFbHHNhQeeTg5PnzQiGGiR8uvUd2hhfNGB+Ijxb48/hxEvNa6bZVyCqCogis4IDATQQ8Ir21XBHgy3tbOh6bd+g7tmvkDZoLkwV4wWxGJOh/UND19g8+lQyJ9fHX4roiABsAAkQHv9i6EfpAlXl6MSvSid9rYCJzMjIFLStFCD9FtYOZ4mXoM7sFNYK7rtsnmVEpmTimZgplsWzVty1IAyId9k9He6QPp2jmKYbItH5MjI5oJQUeBi/ewNl7KbgFJB23msiwiArBFZHJuJxrl5dZaACBzzmJR/jIVZyZnR1cRWTJWV9RYltAlc9CgKWMm5IHCBkDyQz72hkOtaiBi8X4zp5g/BUMDxVRVV8Ct2WkTUXMEA2ciJyxlAaApGNpk21qTbRNxQuaclnjRiVNy4uXmSGM0orSYLofIKScCNGR2REcGZGRyQiA0iFrDtOZidjUnc3w7KDiI9gbUoRc5ZwCS7Ty0nab5btDmfuw7oqrIFe63arjebmG3wU9jBteH75pX9+c/+fz/xyOC6l5NCmcaYsq0ihuEpbYLQ2wzE4+ijOnuy3uMAGNsnJmCOhozEhJBMMj1B4r/UWAacCz1TJJFAmhHkx2JOIhEFABEyRANMIlUd80539wcDG1d1/VcD+Rbs60oUgICVUQHFQVEAnZEdd+a5q2lVFPixIimKWi4EiLEjEhDwNYaIgYK+rAU7KgBzSlF4z2AiVQz50Q5ZzJUcFVrzWqV1tQM1MGc1AOD5GZNRETUzFRQteUUnVY55s6fTie/v7/76OPnz5/jmtb1VOsW3KV9tcMyxRhxU3dfUkqcQ90ihxUuPeFlX+zLO/75ZMucW+M+C4HDz+ugEpFw3LdtU/Xj4dYuriMSEXWj6KHg0W/Zk2FxDidz9kEo2ktDnImqiKu16CAmSq4wJexqmx9ZPHcPLi5wIuqzTFRtumIwyCouz7tLPM+ddZZELjvjbnOBXXIHRmVjbyX2e9DEm8w/FJn8T+GCXPKkhkAE00/qxZ1uxns9iqJLDz1xVIqgijFQyse4/nmj9+v21Ve///u//8f//Nd/8y8//8XrN/ebkhoZFkBYWyXzkuS4qRvmJep1kgkZfck5gP2myAkJMjMsWlxGQLjaBERQSqXklAkRExIRJMaU0pLzcsillMLpJntKKY9ahEFnfkboVJ/hOoqomgNATjM+GblaVanbtq05MzioNCrZAZoGowg1MQfFJitjEIk7WElZPIk7AElcqhrU6ufmX7//zRdv//a//eyQUoByZQABAABJREFU08tnNz/9yY9/+uMffv87n3zv00+++9knz44HyonMxAWqAWOwTQJIFPIHajTeCAU6YWTC8DpEvAqopvzYDtI2aZndr2KkIdpP0tM4Q469RxuSsf9S3yUEcXeMe75UG6D3TV0KAPsdZ96nX1e85xeZOSFO4onLJkLk0Z+zy4+Y+76y8STsfHogAkDi1CPjIL7SrrAqiokTsRMpOCKrmYga3Trh4+P563cP96c1CgSOdDjk88OZmZdD0YqBNNpaLbGR9eeKlXTsBBkBV0F1jTSTqlLKjgSIFsmxKBWYizTf5Y8QEZCAsG51mgLOaQICor0WEQ2MiDJlc/Rm+WoB+kuzkV4X11EuI0Q0xE1aa80R8mE53ByZ2REcQdQz5nWTh0fJyZaSuBydANgiuFUXcidOiQwtxks4gHGQskBPf4i28crcO68J8252F3YPeTBpqbiZ6iULj73i15jziHUIohpGFMSTUwDMLLoooxLeg5QdZDQafz6U6tnDuf+QiObn82bmb22XfJ8iOtVqXqrL/L7kuSNSmRqBF1YI3WuQX74DAeDiHF8fEWDv9StuyVzmGj6JIcfK+5U9mQ/zRFef7F6jRafv9DGV10dAGPt69Ht0b5UHBrVf52lKOAy1urmBGah6tGktOcHOfvWlIATwfAFLXITpA2t1ufknz2JmuJtcScHSg5QSjS2/G7P9Btyhs9CRVEDo6hLsBlE4Tl0IghvQpf+hD4BokE8ASKdmjv436s1pnHJKEfICOgQpawfjEjq5xWxrQgAXdxFj1mSJkAFdvbMv5JwdPbJWjoDUkydi1onaR/U1XqjBacmFmQfYWtnA1QrN8uzecLMOI4KIpZTDQcOn2c7r3GZqrbUpswGiGTBnRVACcwNTAGqiNiLS2M0djBlTSpwSMvXhqlEYRDMz9AieA0Xp8QMSL8ui7uhuBmIxm7uqal1SVdiaqicHbWqEYO4E6uCuHQVd69q2KiKtRWTCIuauKXVVDyWutZ7P5zAiIY3LcowOI8QeYahqc7vIIPZlp84Rd5lpgxgLq6oIqDlAu+NlE9HYQi6FiC6N1DfUi6MzPNr41q6jOyWfGvTEcCA+9RuefLLXfR+Qng+PD6/85LcROExNrLXqIVPvs71U7E0VEMEx8DbxxR7VYHB3N3d1URczE7cCMRCQCBCpV5ijUX/elaGlGC8EZr00oe59Pqf7bMC6PEX8kxBrW1MpNzeHtKRt22I/QM66be6YUkG0CCBVq1mCjhrHpnraKgAQATrgklaCXJEs6OyDiN+cyVSAKBmiO6CZVlNAKAF26GLfc0O+NXV3dVPx2nyrum51FT3XttZWN6nSRoOkuXuinFKaNo2Zzez+4f7L+/uPq34E+OzFM9Wma6vnLefU/xaAUzQng9qlNO3u4FM4lYjcrra6KSf7ldyb7mlyzUJbr0hW9llVGH5qb+SmKwHbx1cAYODkgBjb3mhkwITY//zVq48f7h8fTzVkOGdqFtm6SzjX526hJSQRMZdOpgM+V+BbxX5/w1MNu139YDPdr8P+Vzjc7g91No6ZkIVd3Dhr6AMa0+tfqnLxVmY0CIAg/V+Bth2hAqes4lUEwJnJDE+n0+Pj4+eP6z/9wz/+9V//f371q988nk+iUI3XprW5h51XOTI0JzXEXFpb3R3Rt21zUDgcODknBzAihJI4gWt3ksjheOjRso7BSylTjJ3goDdLlHMumQPqfJORAmQB3lTVTdXcfZNWaw3Wr+mYIXZ+qGFy1Yxdm0hKKX306sXvXr89CzCzRFmGsbZmY4hiygSQcmagpADA2RVrrWZGlCDA58SIWQAfz/Lm7cPvfvf6V7/54j/99d33Pv3oR9/77k9+/MMffvez73zy8Xc++eju5pYLyfqeiCjAgWF/QHy0lozwLxwkBAhc3j4Ve8kw7veFaT+ffLI/pu2dGweMsGgvmfFDaFs4RwAQ8+sRYQcu7N+5V1UAmF2s8S06yL32gj2bAJ9IOxHhB8Yk/ql65RbPJ1IbDu6uW/iJWu3XjTsKx/DCEOkA4EjJkBJbr2shIjma80FM3z6sb+7PTUwNxCAvxZQU9Hg8Hu9u3739stZ6WA7emhGKKccQoskj0isQECnpmERjbgZITn3iOoAnDGCZKUjrVpGIwIMgHN0jgI2CrM8CoJkgZkfAYI/CC8lc4uG+Xhazr16Y+m54B1yuShPTnA/Pnj27u7srpZM/NW2p5HOz+8czpZyrChYAWIgjg0OQCS0hKqh6JXQGDTI2cKNQREStDdhSn+oTsFJipk6OhYizTQgBEVRkF1zlmDcITj6S4PN53C+QKNg5JNEo2QlstJOETpGwHaH6zNzNM2EX9e0lasr8Xq2mTZ57ZUTa8+S93kWP0n7jmJe95Dt2aUrqg0l20GsG6O2F7qM/a+6zsdHvFWHe5H5x5joESY+NxtR5nTSVPPYg3EXGoZL9KmMt+i5lV1pqk9Cpk2QQ5971FNNC9meqgohQnxXkkbYk8t5WhDBdZJydr6FdhOE+B9ja7JK7jYoKReGcelyxP2zwmm7bBpAjTYJBIzMSikTE/a3IED6aF4DrQ7UFQSEhpUzuHQccT2FjWlxChhyEaTklaq2d64OZLceSCwMaoTMSzxc29zZG6il7a00pIyMBogcRh6qjR+c0c8bgp0EwZnYuiUIEwQGRHILvLUy/oTsDtFUBKDmAaWa27OSubrU279ybQ5KJcEy3D7WZmgAANzc3EYkBAOeUofO5OTFyRiYTUW1NTcVrrWhr1ASIwMyQIOfDsiylZE44PDx3d4zQFFw1Em+JwcxAVMF9a01ExNzMtqa1qagDUDVshgLkSIYURDLmra4KyNF/IrW11n19MyTCaJQlorg3RDR1HNPVfETRAHBaz+6+5OKqiEAN0kqJMXc/HMx6nTA8N9+xwJmZkqkIC9BC88BLjsMvidvh5uOAymD3ES8BYXgqNOkriPZiut8X+yfj8ye77P5nv07W+re5Gk8u+y3H+CNHiKb+lPOyLNRZIgFdMXLliE7RNxiZjC5XKXoD2CID2teUyF3dIcqKI93kZjYcGgfoRtnQwNHQAIK3BhwxiDedUM0JLv6HAxBgoPBv747Pnt0WTic9afdBo6JNzMzIouIAYpDA13XFGI0DjAgpmZjH7BNVbU0TOKGjdkBkZicQVxcXyUiAYAJgJkZEHBPZmVQVHFS1bgaE5ljVtibntb5/3E61ba2et7quq4wdNJ6lLEVFUiLEzowU40N/8bvfvTvXh7r9j3/1lzkvRPT2m9PhEJw3BgM0goiuCUhERFP37bgfgMBNn4w4373s3T63P4eok2r4MP4i5C6llFLK+eEh8lnMYRD6vigiQKICFqSx9EHaAgFjLIQjOSHydKbN7NPPPr093j2eDq29c/d1XcFuQrkuab6ge0nAzFXsQhLW26s76uTirQ6/wQbvGu3axaee2shtT0yOu7v3Xg7cHbG2e42bdlV2A+ht7KHuznyMbDr0ipBrb1HbqzOGb+/u6RJXBJs6qKM7VnVENF4QWZHuH+5//vPPf/mLX//7v/38q6+++ubNa63NkZvapqaIgo7ECMREyC6mq7YX6blsD7GYEVGjOydn9CWnKFASsyHGeDTEC/dMMJCVJS25IGJCSCnllJgx4ZjXmhi80/SLqVYxdVU3x/N5e1zX8/m8qTmSqkMUglNKKUZ3YmysAUn9+NWL7376nV998dX67uTuTZqAcXYQ74ED4uy3iWREsNQG9bRBDEJQUCPAhs6IJR3Sgm721Tfvv/jdV3//T794cXv76uXzH//wB3/2Z3/20z/+ycuXL3/4yS0QARqxAZBjMwuODAMAjiDQo3eXIGCM11Y3REJ2YELYuZvrehonXjxRAEgp4WhO239OoxdmlgpnIuNDrxHgEhC6+yztTocqOBWn0zmd4ynhc6uCXfi6PwHwol++c753X7pjrECsbYPeLuF+Ib17ahwQg5UMRdvc1BAxEce+rGIG4ecoIHNmZyJMm/FprW/enx4eN9EooFEpx7pt4HRzd3tzc9Naezydbm5zpGDdvaowZSIiJndwtN79BBelJiJArFudDnMIOX4wRG4ugu+SAm44k+mcyoBNwCijgoiKSFl64EF4vbMTQh+qZ0Tk1GtWm6g5UkrL8ZCXhWO4NICIwcJN/XETeqzMLFjN7PaAi6dDyZwwERq5mqhjZjAQBgAUBHPCRABEbNKHl030r5qIEqKbIwJT3IhDRO/X+YUwuSqeUgqSANj1wsEIa/fGM3gZ93m6aJuK2KSUMk+eghe43/n5XmX4ml9zeqfz23FkXee7myfMawKA20UR9gIP0Fkk9k2J2KkxLqMKrneNi5BMrdnf1TQOe2Xc7zLzz+cJc0NMqp5SKqUEgCduutZacuf9j0S9NK21Hg4HiH5NpAgaI6ASkWVZ9mukqoGCW9tmYwiEqLZaU2JbGEfLXSQSeBCI9ceKBNB4/vni0ziBiOIr4sFqrctyBIBAbNpYmtYaMx+Px0Rca3VXpOD/UDPLTAHZ2rYtp0QxNOupZQnCPY4gc8llWZa6tXQ4BPLDRaPvDpDqdjZHETmfz94nsxMApVRErG211sqcS7n0kkUmY11bLiitcSmllCoCoIjkKnMIT0pJRMxS8I6kRKUsTSqir1LdLKUEgNG1nHNOnebdkMkJpDUPxAVRU02JxToEszU1hcPhkJibnltr5/N525q7l1Jub28PS2615tSHzqPDoSyHsqhq7anEzim8wubGOedzDSgCBt2zmIs3Az+WotpUm7sT4/Pnz29ubnptBAC6I4UDCdNqaw6UUjazda1NBJHEhR2YMyKIbO6QcgFM27ad11bFiZfH03mr4kgPpxMQI6at1q3V0EoTtd7FwTBonUISIGaoDLMw1SyIsA/EzOxqta48/tDMatWbu1s3qWdFREpclsXUWmtcMomyASIxc0lH8xbCSZSZuUprW3PrLZpTd7obPjQ27M7UBRFh5nAFw1lBJByKHUw801xO42jwtLt6by/21gT+u8fe1ux/ADUZY3biWww8IZrZ1uoxpVJKr3KnJE0o87xgKpmMHh4eECAgYf2eo27s5u6cuKp4HyoAqlpr1UNidgAAJkwMDoE19c6Oa4MMQLZtyzmrIgDGUoedVjDiPE/7zne+Iy6IkHOOnbdkOtwcVTXnlHM2acsht9YMEM1B6GxhPyUlSoRSmx8yIxCWY06cyQ21yfn8eHM4llLAfT0/ZOqcQ63qzc2NqazSlmVRtU1aKcWBRXRrstb2eK6P53Z/Op+3bd1aVVE1Dc5Rs2VZDocDMyOYSDOzV69eIeKS+mjyb7755iQtl8Of/NmfbPW0LMfb27u3r18vyyEvx3Or6pASqSonQsDz+Zzzcj6d3rx50/dX6gPWrw7sYqNzPu8wboidOZiIwvVjvrh3UdWPa7TW3DEnVlHAzMwpRcHHCYmYxDSlNJlIB3AjshvAzAi+bdu6rvGwv/jFL37wvR++fPny7dvT27dvCUsUx3LOAKuqllLCDxARNKdEgIQEjJmwACR1c7dws7pLh7j33nDXjDp9BRgkAdMbnjt3uGIppRBaRFyWJZR9RpI+4lW4TtxM/dLO8x7OtAEYDhsVCBTpDTPdB8ooMcJH3UzdITkhYGpmRIsjPTxuv/78t//17/7b3/7t3/7617+9X2OHZSVorTW1pl4NFQHcMrOD17aRmbhVrR/f3QWGYkmMCLWt6wpL5gZ+OC6HcgAACObAgAw06RU8InfPaCXBoZTMveO917eJAExFU87uXkW31qpIUxfTTe3d/f35fG5NmwpRSkwGDAAlZXdF55xz5uSgrrosC4H/9Kd/9M3DafvHX75b5Xjz/M37kyMtyCoSYSciilRVjNdUvUaKnCmDqkZ2VaojEjMghvEgQKCFMm2Grx+237/73T/+4nf/77/9l5/+9I9+/OMf/y9/9qNXr1784IefpYRgyonMcSmLSD2UBQBUPXNyx1YVQDld3NDpvYUHpbsiyfxh53Re2WEz8zEmnka32zTasbnEBUPeOi3bNYDZB8+ZOxJZ5HDDnIoIDAqcuQVA0PnufFMYbvq81QhEQ1ZVVVrvFtn/iV+yn7PNh+dT+67ugQQ4ainDG6S5RaqqWWVm4j6IS0Fjrmk6Zje0oGNAMgcTb2aWy9v3b94/rKLQ3AGzk53W1QAez6dD4u9+7/tf/f6333z9u9vz8uzuNoZeIqLzRFChg8eQNwMH8JQy7kZtISLn/shV2lyunHMpS6ywamSrLRHTaMlxdzVRVUBUa+EkpNSZ/Mb7Gq1V1O1qSp0ssLUmopFtibcvIpyPmeD4/NXN3XNEbq0hMGybu79/fEiEBlwNlpwezgIAW3M++ZIbM+ZEpfDtcTncHEVrZgdyMFGrjqBgoJ5T9yVMZl4AmbhPgwBzBSRiYlOV2jDFOeniEGLC5ABgqjBUINhiwgvCAakdb9yfJOZoBzax7id3soxQhCl7T+QQsbd4wDW2fxrbkOFeDmFOKUXVZMaNOqbI5KVMyQeAyQSRM3uvk1mks+KfonECzjuc2uoTnrl7KOtMCjuAW2RASjYztT4dJMJyJgw/Bzw2MTToFYiEl0Dzot6zdjG3sWVZTPvAEyJCmuwy/fzWmvkFAI2jTY6IJMCBEc6lFJQhPcfjFgHhfLa9p9ErJOTowTOkgIh2AXMyc7jNYe+AKKhXaYAY+5YcBbMwLpyYUTu8Pl6+e698dsxoJHz7agDjCEyxOzgOAKfTiYhSXswMVHyMeQjuo5SS94glhhaE57l7f3h5qXF1JOqJ5ii1m7kjYaJAJ/ZkWDgBFxmYptMQkREQ3dyBzDGgXyklCN4FIDWFgV1HJHU3UQLPnKK5HxDDsN7d3d3d0VAnghEOxT9Vo0pjZsaAS8oJCRHFjQDFpFZPiAIGFg3VzIjHpeAB2/mBmdNABN3eHnPOItVcCNLwpbpOEiYkcDMzEPWI4QjBnczAQFvM6TQHp9r0vNYHd2m61nbetIrFUEg3b9qaqIqphw89MOsWtI1pqlDfkAhgpxFT00pZmNmgEZGprOta6womN4eje86EaTlkQiS2kcKcHnAXaXRwJUp66Q9mYtYBt96bpA+PvfEKgzVrRIQ4iSW3ptO6TUlz9z1JwB/6iv+/jv0uDgCMyN77GgF66SLsRmsp8Cg86L+siSC21geNJmIgzin54AMQ6E6MmZmJepSHHTER8R6GF8+pwQ4IjpQIGUfHGyJecAeIhCRqSM4JmTEjEaK411oB/NWrV8SomzJiTMkDaaIeWSSYiUM0AKubeVALAKhDFTydN0a34yFUNTyoQ2JQkdqQoEkF9O70YnSmgaqez4/RzmF2Z2bbup7PZ4EDAKnbucnDut2fTo+n7SyybbXFSFx0oJQ5hNnVA7ZKGL6XCLC6e9vEUtP7x1/84heH28O/+uEP6MVH7+7f4MiettZAzb1gsB9v1cwQuZQoIAAEAwSkvRBOSbUduOW/L71zd3C/WHJ3MAMi0tH05t5Tgp2Ab/eNO3lzD35JdMfLDG4ielxPX331lUNz327vihsAWASBgBJQQ+i6mRjJvI6Efqht/5bIKA9zRDBqejiOfdYm9sF5e3NNEDH8tukZzx+myj/5q71TPl1hIjKNzy2kK/rt4z4NvDUxs5yXcOJFBOsDIxFncAcExQTAzSktd28fzj//xS//03/5u7/+L3/z699+LuZLOQZ9nImKVBETNXEw4Cj1iwu7I6ihIwbXGJWSVJEC0OdWaz2d8PnzOyKKKCsgo2HYeVDqEYCZEuCSOSc8lDJeH/QGrBgTDejmYtJU1VHd100f1vPjum5VgmUxdD86imHoOwESGAAyOpjXbTuU4x//5I9+//r9+8+/fHx/z1gS9nY4AAQw9CgyIwCIVEQG6Ds7ImUiIBJvAFGVMUVHxPBgDBgs4i6stb79/Tdfn9rf//J3v/r7//zq1as///M/+eM/+lc/+OFnL58fRUxWOS4HJw6i16aurbphKWXm+ueDTN2ZPsm06lMah7pctMPMAtWEI4XhA5a2v8j8YWZzpi53Bw/RrE/sHNYsai9PCR2e/HNvDZ78sP8V7hDmsNt/54755JGRdtXF8T3gF5fdLoMxEACi21/jIuAA0VpBYKCGBuhIBiTiTczMz+v5/cP6cK6bGlGiBGjqqLXWm2d3ZMqSPv70s3V9fDivy7Jsrsy8XPMMB+KTEkInZazBqBk+9XxwGkWz+YImcHEuS7jQ8wSH0QJ6zY2/W5O+Ygw4vKkeOMUrVvTepeeu7s0Ac1mON3k5cE4pZ0hpeDvaFIgkF0/ABgyArUpZCCwRwCZGTU/1nDMfl1QylYQITJSIenMU6eZOxNCHvru6ocMFKex97PwIgHFU5GzyUoePlCZeozOPY+hpw+sUho255T5b+/yynnuzvD908Bfi7vhWSR7fK9M3noI6gXX7fLrthqjPVzx3kAg456W88+lo6s2CPqV6FxBejnl7tKuUDg2Nb7xGSl8jYJ9oHwCkJ1bGR4iHPaODYQhKKa2mQNntVReGezT/3LqsD2xrYsQ+ezQGuUPP1BZVBNMJau+vqmRgAiZ3G9mvC3TYzOYIpvGeLvvrkDA39xm9uLvpGA0H1slduxZFv+mwNXgh5hrL1HEYw1KOiN91SRmYSilqFnjoKNaoKgTIDZCIzFHVPdof3N0x8JsKnRbUzBw0ws14sM61BzDuhnywHahq5pKZAo0OvgNbUzD0BUs72eDrQSYgMgRxE1NxTIBhwiwYWZA45ZSyE5l7FBlKKYErC0+xVU3MhGkaXIAeOt4uh7BotbXaNkY4rWutTVRFwww5ACGnQGPeHEuI07IsnNPhUFJKxMbMy7LkHO6m9v4xc0C2jp+02iHwJG4gog41oiEgADhv+nhuZ4BW9Vy3KqoGFpzD4CKjm8zM9WJDB2RlOvoJBmexg4VDEmcO+6LhQiCBawx/E2uCDtJqKWlJ2dFlq1gdAJphlDWcwhIbosewcgST6G5JkRlSVZ8Kcq2Sl01ial8cqVNosl43h0wjNf+8//baV58W5Mle/uGx13e49lTmdUICffc1fYmJohJXVRgw8n80EPzxCucTMbPWxkyKiENVA8XtQbENgZMlBHLDIHp1a+NMM0dNQAhIaeab+uqNRj3AK5+DmAkQzAH11UcvCFCbMPOhLNuybLWigXn0lxuiAzkBuGMAmwHADBu6V3vv3uoKAFKpbVXawdX9WMjBANjtvK1NZcklEYoIj4KAmQd37iOuZhZbxfu2EZE6PKzru8fzu4fHh/PaxNYWrU1UmHKihIRgqkqMzETAElivaPlzr7UaPMK6PaxbzvmTjz56+fLZw/v3ik1VKTkAhZHk3asBgMPhkHqIzmZC+FQy9wI1l3oWLsJE74Wkv+ILF8MUbDMz6IGFmRmNv/Jus30v2PFLxEs7A5K5a/xFznw6nZo83t5yzrlVtw529egZm/trUJqKCKAgASBTTNwGddfj8TCdYNg5Ft11Q7zezi/E31OzxiNcsjNzw7Zdoveyge56g/eaRZeRzcGUYO5q43UQUYCSVN1QDRIROibiRQ03ETEHLsT5/bl+c396f/r9f/vHf/n3/+E///PPfvW4VuDsQOtJcgJVa9K0iYopuENMSUWP1k006uxfEJCulBIzmlSLWFLkdNLjcXE/IKdEGJoJAEQpKjZLzsyEDomAmRNxTtOBMzMEgFB43Sg+UkADr03fPT7ePz7cv3+s0kTEEd2VPOYWsbsnQgIkBwJ0VzAHVXMD0Z/+0U9+++U3X9+f1rcPuWQHiMWM2UIRY3dnDtFdoc9bIkQL6uhEfdc2tWCIg6gilgIAgMwAOR/WWr9+f359f7r//LRk/tmvP//TP/mjP/+LP/mTn/7ko1fPXr18WXVzIAIidCZyRxedTs5MClwU59uS5nDl8D0NCCMHNs+cBvbJvjCE6mmPenyYcq8mXew5Yu9cfXI4AWCs537T2d/z/Ir5SeRT5o3N/075n2eOn7XH+7DD8tl8FriOjsDpgkBx6O8LsJiDmAkQYKpi57XVKgb4uN6/e386r6Ia7wcB1Aya2LO7u3pegdInn3334fHtN69/v6ll1YWSDVfT3WOyIYAjEqEpmJkjemRPEo2lGFRb0SsO7g7Wu2M6WhICvwZDfRwvlR+IFgVz4IvJRURX8ZCcC07BprmbSzFFXRre3B4PN89LXhKXVBag5O6cUDZXUUfgrTgXTgBA27ZlsZyVmZGAGQ+QM+DapGTMmRJjSbzklDIjYn18w9D7wh2iFcsd0SlxcINbRLmMCMzwpDfLRy/k3kUZcnKdAJqw52vPxMwMeplqVsWnRD1NH+DVVrXXmpkLhpFk2WvWNN3hws2Ctu+Ao7gjkt2/x8u7c7cAUyDOsjldZ/CJyEcr6RMVG1ewJzvIE5cyPp9lzL2GwgwII9EZYUx3qnSwj6iouLODk5mF98nMAJesUn8eG5lmgIwlHglGYAnQ80yIIALMN+6GboB9AEvc4t7S9Ooh9bxs3Pa8desFgb4dBjGJiUJvqLtKG4z1Cm28ymHHY7bWYkrq9avqeh6bvEaOAxwdRJVjOzE31dYHo4sYBv97r9kHBamj6cUXDxmN99G0RsmZ3AFN3KL4ljiHByYqTsBozgkdcuFcOLIMAIA+XeleazYAjK+LxLooRZUGKOrCBp7MdFlihgszOqEOx+Xu2V0UcFIqITdExGSJGbzzXsaWEN96CE458G1ba60isq6n07rFY0rTTZqqE6YcYyUIPML1TACQEy1LLksKtnhmNgRVQlWRTUQapCYOaCpuHuMZTdRVpYmuVcyAUgaHs7SztNW41lariZoTAXb/EgADFRMaERpLmBJfHFxCTAgEDirh6XtvGgHulWJQacDm7mg+w0giUjfdpIlIEUR0USRPKYkBIkcDp2Fv5kaKNloX6irKTGZgpjnnSMFME+M7LxN2NYT459RHUge8FMRGemEaka4CAR54YsV8x1b3rcfeaO4V5EODEmxEw5ZRwAQZnKIBoDdHmYtTzL/CS2ODqmZicthEDiUDeif8gQtpR4yymI5FyDsROSAYAnKMObr4E+FqjA604ZcAjvKiiBggIyZCBzowv3z+Ivr6llxKSdx3334lJEAzB3MQB83E6jFXzQCwqZltrQEzl0Q3JZt2I7aUglSqrKrKouqQic0VB7Y8pwJEYnB+WJsIcy4lb6JmbW3t3ePp3cPj+8ft3HqFm4gT55QoePuD04tydE1g+FpzX1HV9eGx3D43ks8///xXv/rVs2d/8cknn3zz+neTGBndmBlMpwRG0oc5m9kexvOHhGTvinWpsDmSPrbPD7fAsPyu6uBCC9sTqoABykBEx6uJFO7OFG3ol1EV7h48sVWbmQEQMyq5KgBaKYv51QYJI4kJFyCGd1dk54k+kfO+Ptcp29gf927KEw9j74tM1Zu+zl6n5oeXdeiKHKZfI284mpT88XxaluX29taJVbA1qZuaWW26LAcDOG3t3cObr755+7Off/7L337x81/99s39+fXb97UZpZL4IA5mIrqZgRmqgwI6RAWMENnMgheRmVOizImZpa45ZwbaoiGTohyh5/P5UJabGyWK5nNXcFTPgO6ojgxIhJMHq8mesi/KBe7uW8wlcnQEdTit2/3D6f3jel6rBThsGLqUUi4MPV8MhI5zPpxJSqWu55vD4Y9+/JMvvny3Gq0VqkaKsbsxMbAm7EwaQzi7MIwcV+KRiaeg70AAdaLtfHZHoJRSQk55OSCLqjrau3X7519+8cXX3/zDz3/5P/zpH//ln//pv/7zP727Xe4OicnAtCQKjI+D+hCJ+PZp5XCH0t8LiersNb0YvW5+8RIQTkcwIPRTvy6/5astZn51mKb41bjIJVt0UQcngNhZrpgz5n8/tA9xNYM+YjYyLggQc1KCtQ87/LI7gYigJghgpog257PNxCMgAnQhdUAHb6MXF5EcCZ3RyAyRyqp1a64ma7XTuTUx5vT+4fG8VTN3ZHOXJlW0qTiCqFUVBzre3r38+JPH82M1bA6sniLcw7ihC6eRe0zCiWw/GTrSoOD0ID0y986n4nAhLIiKotokHwKASP7EmphI2AdDJwIkQEJAQqkyG6mGebma34aRkO6JUmyOudwshxviRdwzIiCaWUI2IvXamp/PZ3Xi5UCY2tbWtYNQuPCyLE0hZ7+7OTQnbW61kVvJVkpOzFlSgsTARECQAnGXCE0bBMECqZmiO5ITkci6N5sISIOFAfBiePcyNk8ekdhFPrvZHHPun/z5lEMfrKT7386kzL7cNxUBrhnRdoLt+8vOM/caNy/lA3oKO34XIiqlxPsZJzvARSXx+piqNN3ReRCR01XPwjQFe/bU/dEpPYkuLXwdFJtyYJRbzy90Wou5Ov1udklfJ1dVFNm/pwF8J48ewtZS6gjPbpgsHLxBCysy20/NzMH522qdcwN+QgU3jVc0aUDKMPIliOgOQBS5SQAgMECPjrXxqq5MKgAEZSAhIRO5R9UuEW+6ESCokTsRJOYcLRzxdVGRV4329C46Iyid4mKmaDZz3TNixNnMauZqhJQol5RLKXFiLHxUO3BXHDdHdzAPXg0HwCbC0TqYGD27iyko2rZtzLykTJwBSMSMjBCByQnDTY9FCH8+MYdr229vbj9qOSciysx2XBBR9UWtKzN3XVcfETISUa1buE1AGPDjwLjbqK/q8MlEbF2rErQmQO4eBTZwt9oUkbamVVQM2NnU1q2tm1ZTETcHjJHEyGyKYmgqiOBOQBhK6ISIhS/8Tsxc8iG2zMBYu3dzvCxLzpmIIuYSkYphNQQAgAmCLdiM1SNNjoilkymDX1G2IiKqCvElkwQjSZFzDl674SleYs5Rt7kyZ4Nnz8beQ11tte6tzzSFc1rxXsj3Cvstf3Ktfb5LPu1PG9cEcphFGwhMX7SMM+ecxHq11kckfLmKGoQdGp3l5IA2yvRDwd0j5GOAPnWGKJkrUrSZFYtaBBC6E03kidrAOQPYHIXAzDPZ5+DPnj0/lqWZJWLOacnFOzElMDiCoSmAASqDG8aGjRj7cuS8zMH87fvToWQVNwUDN8dnt7xkruKqTgTO3lhdFABS4EzQmFnM1/NmBinBWfGhbtta7x9P70/nh3VbawdpmDkn5pwoEVifADSZXS3gOgzMbACtxuCW/sjv3z9+/tvf/asf/ujTTz+5u7t7++ZeVfeE1tIMTKOlpOjYHRmIBhfB7ghBuApghghNeAzAHntxsS0zD3hdRUSI3rjdPzE0IbLC9PQe4k+jeBuOxcP6CMpmKhIF/0TYEem7YZc94ASAnDOgOJg5IQwsL6BcHMpo9O+rEY3cNGQSPqjn7x0FAAjIzNyn9wo4NP2y9ez/EIZy9b+iWMxePQ0EGiKmspj6w3l1R3BGYCBmTl+/O9vp9O7t+5/98lc/+/kvf/P577/86utv7h9P50rpgKlwXppobWvQJG7mgZsBTwjmEDswBpsuAhFTwj6eYb5Pv85Sqfv5vB0O681a+2JibFJelltmBgIxbK4E1lr0OkQsQO7ezFVV1N1d2QHAAMxgq/L+9Pj+YT1v1QycAq+LjEgEmXFJWZFyosKJqAt0AMfJYNN6enj8wXe/99lnv3v7uP3m998AohGYoYW1MUAEAmUEGF03NObF9yOxgzsDmGFvNHUwCxZu86rWiGOnACIQWpTgZHj/+v2XX99//vnrX/zi83/+l1//P/6nv/xXP/r+J6/u3L2KJAZ0E3cezEZzMf2DYvLe9k5ZGsXMoQzu06ecMgYAYTbHRXykxYJ27iLD7pfhKLYjoZm1mv024Xbxg/epnHlM/+SJsuxNxAxa5j/3fYPzoWZyM9IT84u0M0pcbWSquvUgnwkjymJVcCMDOK/4uGkT35rXRuqcvLx/eFM3MSB3ENGt1dbEHZj58XwyUS4ZzO+ev3z26v6br766YdjUoIojL5kjYWlmweAg4EFuhMSqaq1F+hIRVS24r3vd5QIlmAwYAGCFZ08mDrtlUVTEOUIMvdMjDx8gxm/GWzNTILzs+9dZ3bLcLjfPyuEWOMWwO8I+fogRPCV1EBGvtXDKJWVOtda1bu5OW1rP9Z7J3W9vj8shLzkTQ0I4LHCwVAptmrMxMxMjgTM6MzI4ADI4O6CbubgLmqNHLyYCXDIL4TtjD/WfxjBMV/MAffBE+uDhhAG164s4NAJ3Scm5Gu5Prz9/hddBZu95uU7fzIvYjrk0vkiGPdnvEftU4H4jcNcmV/Hb+IoLE+lUrr027fUunk4GKdT8w2trcNHQOP8ymJ47KajPmyBCZrYRJQIAU4bRAucfOI5ElHOOWtNkT4q5CH2th6WIxvruCvSmv96UKRD7/hDf2JkRg0KD+Kr6QYDM/aW21gw6R3pEtqmHBKimw6i1VK4yYZGNFKallDgdANwnOKfzpqaUomPJsFvNkjIgEaDFROcA1AD3+ScGHd4YmygRMwfRHgAjdkNsgx0REJDcLXJheGGkHgmDJedSckmJ3MAk8oJhSmg2oCOHFskATqRUDNaw+7zHTiCd11oy5wGAqWDZmXLq60xkBDxCU0I01dgfc85RkIglCnwVE1Ei90wExlRSlweEHvyr9npyTujuOWck2rZN3Zixd29YDwgphjEwAaGYNhUXNwBpZuBqsLaaczFAiYEl0kRs3VTUiVMqnAA451QyETWVKnbeViapOS4fkMWEiMfc80mxoxyPx1D1ABGt67ptGyIeDof4lQOIyLqup/PDdrbW+o5lY2+28EwB3BQUywh4Qmd9gEWZBjBvOL6RIsIyDBZdtka/pkZ84hxcNt1oNx/4kP3+Ov+L6WJHYLc982A7nGc+MTSXK1xDLPYfQo+vAiID7q5ubCZua6thRtHcXcIAahPmQ5QHMzGAMfNxObgamOL49rBlqqomlJY9vtQdY3QqAgAyovRtIFYvEgmgE8jkcMWinolzzq7WzMyBmF48v0NENMjMzOn29hbdiADbnHwghMbokJCcDSg8IodOhhnUvqd1iy9sLfjxZWt2XIrqhog5JUVJSA4G5kSeUt62UwhnDDjcqm3v399vj+e1vn98eFi3KmCITgyIDujESAyOkZtHSIgW+huRL2jflvqAUF5ExM0d7f7+/vXr17e3N8+fP39//9hU1VxMmTkRAkCtNafSmgTNUpjKJ298SODV3hbVjKlTxGOX9YvLqKpjzuuExvU9aVfiflqODp+gv3eEDid2c1MDmaFqpHRVNXWPVs1kKM4FJzNFqNeXcSYOwkqbxYTj6/LdE+HH6zQzDNKXqS94qewZfKBQOy/wDzoiT7TPNTgwbRYi4r+HZTmfVhEhLMxUmz28e3h8OP/Hv/vZ119//atf//aLL37//vEsok1sa3r37KN1a9vWECwvC2FqKrVKU4Mg6AV0JDCInAw4RFjIRDmlwkiUwILECqFnRnyW7Vtr61pPpxWAcs6EKcAamzpFVwUAmEWNMI38IyIruIrLcF8gAQCoQVV5PK3vHx+2rYkZcibqyVwCIwImJATiVJhyiulz3uuErirNwd9+8/rTH/7xp5989KvffkmAOeWq6Kg+YmyEQLjz2iQhMQNzDogSkINDcwTAWBzFmXjtDk9/eJcedIGdhA3wQAsmbK29fnO6f/in3/729198/vv/5X/+q3/zV3/x2ScvUmFCM9scdNetc5WJ28dLMPRrGnDYV7SH2wB+5QXCt0VrU7rErgzjLv946UXfO06XkNUJ8CLAe+fYr2M5282CHnWCq37a/cOO/rer+NPMOF3uAcBhDCA1M0TuHXqGYyS28XILQUaACMCi3sRF4by1tfnj2WozteSYHFJt8Pj4KCJNVcRazOhzcCQFq7XmnHMqrdrx5u75i49ff/11MzFVbIjcEH1him7anGnsNgiUAHpSmKJ2jQAEwOijEhPAOkIaTWiRgSImtoEzpIGnNAROSERMhND51iJFf+jwrhT1RlV19H0r1LQY8d+buxc3t88PN7cpFSICIALgPvcbUmZSt5FBYOYDcUJICAqITAZUpdVav/7668PhcHNzOBzLoZTj8VibM7egniiZklPArNghMxCkRAE5UGI2zGAKDtkbIoW1UWvuTmRENLGUgNP8hiR37/dSYQLcOzN7sfdrAOd0nGDH6rxXsSFXFyjmE4dn6tfugrvyzG7HVNPJQ7M3+7W2fRH+ohp+ofDY35Kqul9iuf0zDnzY04TRfOlTxfC6sDev5sGlMdsbRkdGB9TNDXtaBCJK14PmfR8lX49amnshDugzM/sg7GZmdyPwCRruXzF6zn0XEA79B2Ymh9Fj0kH8NGEVFlUBd/dJghJaYUNcaq0BXDYzMHU1QN9zfM+b6T+IuqhDZ0XzwRatiDZasftTq5krLPnyqsbMKyJKqWhfJWcKoNMAfuC05uDeR9r5YIvJTMflwIQuej5th+NtFK8k6HaoAwKZg24X3dFMRSwe35HdMfZyBCJnVEREaSKCmzQiUkUEoAQRqE8Z0h1/OrireCgeEoV1wy4BjgSJOOa8IRgjOYRfaIhESEwmGMSwqbWG6DmRQ1JVBwIzYHIwd1JTVQ3njplBI+ozM6siIW+tiQNXVQumEdNWXVUdqSxHREyZUimcEwA0ldQUAEoGMw/WdQBIqeScD6kz1tYqRHQ8HmNCC45+7liKUsrNzc3hcCCCWmvJnBjPnAJuR53TrKlqHRQvichHv6vtTHkYDGK4WJmEnDgoflvr1UXsrbnTgaaZ0LoIp3vOpbVIjkoHiFyyuU/LfU+MxRTOvY17ogJPzv/Waz75qxBk2CWnHXeIgBHc0oX7LtFgX2SilFKmyJ44I1lM53RXExGBfERTYAqab3cXkW2tS+GoRqqa96pEBE5PeXqm4a5jFFLcNqd0e3d3c3ODYCklE0fEVy9e5Jx7/Apqpu5EaEhOhO4EwUmE6NCJ+sCJmJGSmJ+3urmta1qbnM5bSZnY+9gVdACIZnszW8rxfD4TJeYk6ohsCu9Pj6f6oKpVLfjBDYmJmPMcZebuCMyMQcKhKiXRGCN2sfUiiuTbtmHK5ZDv7+8///zz73znOy9fvKRwHonF1MyQC6KLyLPb56pns25JxB12rsUTMcAPEq7jnNFMPrYw62RjMp1CxCjCmKoGdC12jV0tHM1s7/ECeG+gpv59ZgI42wM15wx9zBWFLmsLTDsPXq6rm7cOA+5WeDrYe2d0X+astRIRXrVKfku0PNff7HLaPMF3HvDcNXDnfz8ppwDELIw40wHNxjXl4cHUU0qE6eH9+ee//M0//P0/ff755//HP3/1/v370+Oac765uYG8qGxN7Otv3hIl4gwIbd0ANgQuiJuhOxGYe8BTY7wpOUTSlkpKS8aSnTkhIiHFWPHoZQZX1YaIprCu6z3et6bH43FZFgI0s1XjcoYADI7kjFgRpbaYiiGmKi7eqxmI0sxFpImstZ3WLYQklQxojAEQpzllLtyHEAhzI4pQ1pzcwV9/+dVy9/F3P/3O8+efv3x5riLr6j3FC5AAERyBOyUbuIEbKDkiRnsSaQf7xV8gEjAQADSpREFa3gCAIuBV9fzMKAslB4DESFjb9vXX7//3f/cf3775+uuvvvyrf/1/+elPfnB3W5iMU6egGBvKhcVq5009SUBMD+3yufcE36UqMgXSRykAdkHm/rJPpW7gBnA02E8RRexj9L5VBfbbwfzn3t7OG8PRADb1YoaIeyWaarj/rRvOPWVeGXbqY7y4QTBcikprdt60CZzPKoZbhXNzd0TOTe183mptrdVNNrEWG7o4BEkh5URE6o6cyvF4c3f36pNP9asvHEgBVFzQCCA58uhn6y44WGCJj6NraR/M4wh6cccyRfMwnLH2lAQc1EdmvWUags6Neo9At2Y7PyFaQnyAogfEAO7unh8Pt0s58uEYeLr+V+YeQWkQf6S+WRNiYaIlO5ATO1AMKQhP8LxKa1oX2bb6xl1Ebu4+Wg755ubmuJScKDMwQSJHkAyQGJacEjsyIIOZgJ3iJt3dDX1MuTfrDsVoX+3W2PfNBWMtZlaii5lf7DDilcA8kc+9TZ4SO8+c7xTGIPsP7XlEdjsdnBvWt5QZ45MnJwwViFd6mXsBPSCUvYL48CH3t7G3A5guW9IMO2EgN/dnxjm9Qmi7mgMN2mtp2lqD6yxmn+NhfV/yEZ2nlMws0uEAkEqOO+CcTFxjG0Aa9AlWSkEEFzDwaAWOWpxzCO6I+SzmtUOMWmZmtG4otQk6+IWzlIjIesDQRyzQYEbmzgnLD6czM8bn1LccCD1EBManPk1oJTMXTgoOQPHIlJOZccqIGA8bIKIv7x86vfDVO2YimByi4eHEF0lP4IGaWfDrR7UvInPiGHDnptu2tVpvbj4tJbW2iZuP9BgiMmcH9T6eys3MDRG4q0cQtHaEU38RPro6GQkBFElEojoqIjEPPJ4rJ2Jmaf1IFMVjTikFvhHRl2VBInclZyQ4nU5ukVlWREQDNEVTPpTaVJtGcBQ6Yzb1cDh0gx8EOENH4qmIOIDGObY1czE0MxUQNyDOmSzasXLmnCld5tsiYs4JkZp6axom8nA4LGxEFMDLSUoy90vVFvRcEa6E3E6DHh+GnMckw9Pp1NopIjSgRCkF+eFQn6GP2IN2RB/9Y7k1BYAY6siDiT5u33YjN6fJ9msoUdgOZ07g2UH9ghG/bPZDi59Yvb2C7//74fGHPr+cIDpSit0GkQOhx5bfWrMWqOrgl0/Nfdqc7qCYmxknRofZrA8Q7EARUnaSMqIEsIlIrbXkwwyGHQgYOCMSiV4Mt+/NIifbtrgfUQXAZVlevnxeUgaAwqlKQ/fb29tSynmkbAHgMt0eAGk6WxfeXQQ07PTrKq2pCImryVYRkTLc3dyUUkzEzDDgdk0QWZouywJUHh/PZoBE21YNV0yMlIgZnJBTSolyJloSISGAWawejt6/wAzjJDthzjmbAqg2heNySKm8ffv2q6++mpJQSnFOYlfVrcPhsG0tfptSkvYtAjC3xH3CAsY+d+WiAeCgvFPV2bgVR8gGuJa84PXGFrRP7j7Hjrk7Ihg49IETHFiiMFytba01M5VqtZ0PhwWGodt/42WPd0NAzuzA5gBOCIyYDBzg4goAgO0GQ3Wo23XaFS+cW5dv2R/7TXZe1oen+6F/AHtHJzQFY4AHQG88cgc1x4x4Pp++/Prtb3/zxT//yy/++Z9/8cXnvz+d6uPN99VTPt4CwJv357ZuzHxzc1drReBEvZNCREQ2VQVeEM0NvfcdEBEDcqC+M6eccymUE2QiouS6qhox5JyJ0MbYNxetm5ieWusWnTC5WVmO4GAIaK6mJNbQ0KG1ZuAqXnV0GCQmTObSWqtNzaypNLXgnu5ER2Dh3vS0gwp2iCMgOJgCRRoayrI0sTdv7o/ffPP9P/rTly9ffnKWL7/+2kafBbqL9YiwF5oQNOJBj3wcI4ICetCUOQAYEzljQnJI6Kam5sKAjBjA8pMaOaqBqKFByujACuZN/+6//sPr3/32q9/9+vT//Dc//fEPXr66eV7utLa98OvgwuVd+9Dek9uVBa5QpkSk3xbvTVW1gWrrEu5PHeueWDmk6cXCE8UE2Ne9u7ju4Nz7PWW4WDivEDfWZNzANWR0GpB97DT1brbJed+mw5+ctC4XO38WV9VWtTVtTc6brpvWZuYZqTTF1iiaER9O29u397dosaGoi5moqQGai5mXUrx5bVtiIObj8faz73zv89dfhoPohKoqYJwTEUutl5t0J/SImuLiIp0JEwBUJWAdY4uPxekeYGew6/8vJgc4ALfWSFWprw8zMpdSCgUCRyRGWmNkKs3CP5+dO4Q9VinLkcsSXqYP59zM+hxgcySMYpyqmlclVHBTNzABRSbivKQCTqf1vK4nFcw5cyoIAEC//uL3x+PxxYv27O7m9rjkBIkooUo9J/YDsx7LzaGkRAAGmKzPxO2PiwEx7e1sweMVEtVlTIaC4AjVyLt1jcPMcITB/AGpzJTS657hp5k7uLhu/WfZdcbtlWIWJ3FsDaFHcxDrFO84ytKRujNMjUNqQ7xSN79OoNCO+G2swGxe3W0ralOzZoly/+w2+Evju9LimhAcaTWsniglw6UJ1PXh5pA/en7b6nosZGbv1vPxeOxjJxC1SWutu0cKAtpUzEyJzL0R51xElbbN0YygohqTOCTGZzeHdWsqTVUcEDBjocho5IzEiATkRMRR0mFAFaxuIjAYJjIlopQQjd3CPQAzdyTEQy6cCmNCJyZSJVNRM3fIxwMTIzkqiYiDJ2bfkxk7oAM7uBoaSHEsSRHOUpkxGslySU5B8MiHw+H58+e3t7ei9fHx8ZPPvzidt9qsOd0/1jfvH1prJaUmXoWqMVIiD/h4Zi5wyHo6U6HWjJhMhUpp3iABsJ237bMf/vir3/6WRF4cD7fH48P5/nl+frg9yINYa8SMWgvzJhIoGVU3DkdFxTddHRE1IYY8QWYCIjrrtqTcBETWQy5EJFqbg7/dXrx48eKQTfWsm6kuVhbQQy6GzUHRSNQBPJGD8aEQooK5NAmjLSYK7uTMbAatqqoSMRADIHLOyzEmXJmJuWPivCyAuIkiU0bWta7nFYCW5fnp3Ts2JlyADXFpItq24A6zqlttDVLlsqbD2WkzvHN1EXbNrlj7rLORau2zemK+R3bIDtpIwxtzV5H39V1YH6IU8yTP2+ruuAE+wFrP6X0H2YpIGIXUp03C4XBY19XdYyYnM3/66adgEtdpD49qjgBOhMiAiYiJ2JyrqKMoirMw5JQULBovYUahALAsS0rk7sQD2EEIp5bAM6c1qRNxToqE4iAbAjCgEyKYoDqBeYDnIDuSUgpHz92CoT0nAY8ZL6JVVZeUs3OQ6EYSoLMHY6/SuzvgZFV2AJAE7uTxlJQInLyByiEvmfKSsiHW2twUvbqtxsVA63ktjqSG4AnZ1ZsZJsRlMVBtNRMQESMlPJzkUdspZT4c6eEETmpsVbaY4EVEQJhJ0c6mfsADAt2Wm0d6JErpcBQiIHw8n8rNoazL9u7t4VDev6+s23dfPXv28tXD+XS7IAFmTozw8atXj+/entaabg645E0bMZGyS8ucpDpTSpzFLVGU2MAxaGZAkQxIAcz9sYqqHqi8e3hMaQtAOxCauRlt25mZqa3uj5c6amawG1d3cUTLhIksuZBIYnZRQCcEB26x95Mfl09EtZ4fGQ2ypKzvz+8SQypUOW3qrkaASvBYz1+/+eqz73368Wff/eXPf/Xy+fOXy916erStpUwfv3jhrumQH9d180DjGyBYR+H28GzWzCJXfUlkiLg7EyoAJmbomTZEp8SUORNQ4vARzex0OiNiShyjWJkXxBiKbsgFIRFR8EESMwGjWjjtW7tBMoQGUBdmgjurR1BmqpBqL5X7AaFycswb4gIMTTcnJzYxKeRusG0bUiMiQgBDdfMoWkKNvXe6FxPMhohAFBDxWBJERNuImRIzs2MycDPbTBw9cA6EUDjFoGI324RjZGgfG6tORInZTBicgEBVTRmsE3KWc61NnQGKWmY8EhzODd+83f63//1v/9N//tsvv3779v79Wjegj539fIogP+oMhUsys/vHGiZFEBEYEIxRHA05QRA3KyICobqaCRC7++3tLbkdFkwgtp2Px7tFV2sPKRVw2NZt55klXLKKn1ZZq52rHc6tlMLMB8CYi0ME2iCmxJCjKhhCk3beTFSBCQ1Vz7WeuztIiMCZw9OCNFqH3F0cmFPKyVNC2MBJlRU5caaciRggIOuIdv7q83/5oz/5oz/70x+9e3hz3rIIrGttVRCJUgnitFUklUUiWGJgc1FBVE6ELoSYmRCpE4CrNdeexWNOmABAuqeFS1pFpYknSqkkN3eETZwgb86n1+ubf/8Pv3xT/+//87/5y7/819+Fm09vvlxtZQcCB1BAS4yFUwDApQIAqyN4tEYUqUoMkQkhRmb3SJ82CO4QJ3RKDuSOgJCQAF2sqaqaxiBoQ0dCGBhsBHQE76PdnQiZsTXQkVxjZpEGuxa+4aACgUYEEn4oIjJnZq51A2RAUJPhMZO5iefINCEhgKMHNrK7vwTkAZ/rSfmDi1o14EQpGbhIU89cSIItODCP5Buomajqu/aDAdpv3RmwhtFf13TbttYaJ0yNyLaFH7fNxdgtuaIbkzuYuDurESkxobO7E/DN8UXi4/qdX3/55ZdN6dmz26pmTgnL/Xm7ORwjxkZwxPDRG5JzOSaHJmpqbp5SSsRIvCyL9dke3d2PFHNKjIzMvU0Q3MA6bZi5kAEviZkBkbJzAWlsZgYOxCkvRJBA3BqSFoamhrzk5ebcoBk/f/lq+fgzvL2xUiAxuGmrOGjgkBMlCvAzOJIpgcvtnfXEazeIYM293t3duQnDUUTOj6dEfHd3Z2yf3LHIqd6vp3pTx0CplIqqllJubw93cLviDQu7oareHJpI1SaMlBOUhExGJokdXRCEQRGdQCLWOchtxEOdEpYC22HNFJApF3AwA3UUADQ0e4eEAKxOVRwcEy5EvG3S3SowcHEIpgAnKnt4c8j/zF/vw6r4fF1PO7TzTMFA7lOylKhzzMSfuOXIaIRGuIOIuSsNyMw+7EREgM46uY943Xu6wd1VRWR3Y00psgjmtbUZVaaUYiQBQ8RZPZmeqIMzY3e30dIaXwDSZNu2cHkj8dzfKDEmNzNzCPse4Jk2brG1tm2bqh4zMmd1MTMdQ1Ri55BIgGHv/lbV1hqn3APcnhPpA/tGybuX3GEgXQE8aEgMfNZWA+/iPqj3zCRo7Ny3tlnOKSXwwIsCRFJ4HDOIBwB3XVKutYrq8Xh89dFHh8PhfH68v78HgHpe11ZFZFnK7e1t1IvumD/79DvN4HdffiNyWpYF0+KqiXkzsVaJSExqXb2UkhCF0C1ldncg7k2QTmLCnF+9erWua7zp82k7LofeDkSEYwRWvBp2UPHIZeac0UADghX22mHCDiOYAUcFBzV0qCRBnMyqZ9kiWUJgIkroyERE70+PLu4KTDnAqUAIhE0cydENTTtDLvYmUg9XCRWIoptLzazWsMLR8w6IUdhpIkFqh0jEnHOuomut9+8eFNyIhUDMm4irgncCsW1tj7quzi3fSMqelnVtAMBsUZSutYYcwnVbLQDUKutab25uAUAjkdMhASmwMbGhdoUEBidTEBNVjQ1Gd03Dy7JEjj/oZ3AM/0W6DKpum3jkou0CcdnnaVTVjIJ9aKQP+2BTvLikvdwx0opJ1GJp1QGYp1sGEC0xPVSLg5EIgBAJCH2AmoEcoLV2bhXIj8dj4eRAhLSd1lTyHDc8OmsNeaSvZtrMYfzzaXP2NF5TxXgcsMtGu5oiTUTfXBYzUzF3R4foK+ghyXgwJmLmhGTo4QeYWcKU0yX9NmN4TopApZRa69pqFLRKgZso3GEMeL3k6g6Hw7Nnz6x93WGuBq21XMKGXCakE1LnkrUgvAgzAnt44swsznG6MMZSzWLbTPlbBx4nxN6plToLERD2lDmBBcBhNFR07+wyDRaRmUspbta0YirRAUuI54fT69ffHI/HuJO11iXnnHNr21o3ZLK6AVD48UjepXEA+J+84v1bHsbT3QdyeERN7gEE+pZkZ3SDjF0tSHESEZm7qeKYT9X3xcE5rr0P7XIbYQ/r1kdvBRI7aqjLkrGjWi6NzQaGhjBKl4gEFvtzIFPSdAXsuhF/r8I7wc4A4IbqbijRk6/g0RzZ403rjHmMdHNz8Jglb4KD5FpdwF37INQQDmxqYvpQT4flBrk8nlqtnkq+f3z3m99+9b/+23//D//y899/+Y04aWBFyFpTgbhVDmM7No45XaDH9TYOAJuFdOsyiQCwLMuhLAk0c0qmNB58NuoPMe512pzzfJtB+h1JpcfHx4CWBGm+mYEBIrbWmNkx4F6k6qqbiOgoOXZtZIo/j+Rj7PMAwGnYW4j5YGzeezo6+ZeBA5ZS3r1//+7N21evXizLcnt306RG0SaAuO5X5IH9dcOuKGcXJDbsfMFp5Oc67MXbfZe8dwcER8yUmOjhfPqbv/mbr79+/fr167/8y3/9f/sfbp49e7YcFtcmbUMgBxSjlFK0KjsyAamBWJOq4OQOhJ4yQwchUczbNHdAB0Pz8RQ7Epe9DsJAKj7R5b1S7/cg7/7YRR8vl3II694TRwCOqO6OF4RLrGuUwHLvsbcQNPIOMZgZF4BOMGVmAIrcgNzAqoo5ijkQq3OrhpQop8TZEUG1tdpaO62n8EXD+MdGA6YPDw/x3n2QTVgTM8Po0EYwcMdwYMDckSnQk8F6GTMGyfi73/vBurX37+5rlcxEjmaWibfziuiDKWBgeRBYa0jLfBEhIbVWvH4v3OedXrbCWexy95yzgkwZi0rptm1sBExMKZqnVI3QEKhukkv4MC5ijul4e/fsxcu0lChy7I0YDsqPuIGdIE9yjSHh3Zvo2h1av21tuqk8ZrTu9SUkYl3Xda1v374tpZRSEhciOh3OOdGyHDknjmZpN0d5XB8KlZIWJzNTdyUwJIdyjAWKnRTMmjUREXUiBe6hhCNEC8nYIgiCfcZ4mAsJEFBg8acq6GTsQ8QduPfJDrjf+OY2Md8mANhgXZrnQ49u+igOgA74GI3re/LOXjEG8LkTzSvM9zKXeqJM3T3iTxyQt/kss8IZ72iGVynCVnftrD4aBcTEnMAknKjWailLyIyqJuKIK4nBNa6IyGjG6obo4D6GV1iMi/HIGdOFRln6nAZyQLXWJEoiF2QCjj0HEcfsQXB3AaPdII5JSgkdNQKX2Lnbo740AerKueScU2LwJL6hdySqmbmTAU2HzN0NvFYBIGYSsfu379qxMvPt8Q6ZzCSdzw8PD+vjVs8NEVX1TPr8ZUNO63oybYzkrtt6url9ZgsnSmbmivmQc04Mig6MxEjWB0k5GZsZMpVSXrx48fb1a1VFg7NszFnUI7TjIKpGVOi7lA3sNOWUDa2JSJu7tarHhmHmtW4YxDCqrubuhsQJCTCXLObb1kJ+MmNtitDHCTIjYHdzVFwMbnIC6C1jowiJiNgcgnhGHZCSA5mZgrXzudbaVBAxlh4QmkhVQyADEJWqZti5WzZp5qikArip1FodNCExUjNXdxVvYJs3oMyIt8+fwQUh7Fwz5RS7XewHcy8nImI+revYyzgGOh8Oh1Rya41rAafoRy/Hw7IsRLTwZfb0zBgBwLIsqnpzcxMedqglES05h7qWfDifTj0YdvGrY+ciX+2yl81gJGAnlnUMc2dGbD3pgQnNJtkQAOBu7uDexenk7NArPO5WVfhQbsvR3QlJ2hbgpcPhCAAqrTOO5CW+Ha9oI6+Oue372FX29zzM0AA07qnt1IAEEQlj2J/bmFc7jAlM4F/n33cAG02JgDErzMxAgYIqVsE0kiOm4KKeVJnQHcIbQAYRQcbb29tlWSLOD0+0Kage8iHfPbut54duPVOCqojITG2rzJ2uGju6DlQdB4kpDqJCIiIHJyTsXc2xDSDGuIQOqQ0NnTprZoyh7ImCPBu944YI5mYdwhiSYyo8GjhFhFpz///y9adNkiRHliD4mFlE1Q4/4siMTGQCKKAaNV3V3dUz3dND0/thaWlp//IS7dcZoqHdJerumbpRVQAKCSDvuN3dzFRFhJn3A4uqmUdixygR8PCwS0VF+Hz8HnJKIjLft+F6LKXO8O0mHR5Or1++BnB1dbW72rdSAaRhJOHkMtdqteTtLueRguKd2b1F4vTDm+7n8BHrf9GYih2wJIRhpWPuLF4IdxCJu5sij8PqOHuY4qxLVr3+U9j/IOVZH6vDw+OJfF/cbGQmC8xMu7Nn6iBVkg5kXXCq6OQKj7SA43EZQl0eLpHk4aeiEoH+ZhxCWBx66w5zYSaCoSkpyIgCd5aiG6muDip2VjukLjZ5/f5YDZVlPGn78te//5u//ae/+ft//uqbV+/vT1WRtzuWxCGs12ZtldY85wys6oFd/A+PEsIIIwQcEYGDwIxxHMcxZ8oiTk4pcWKWxF4ejdbgEr4loIXjapqmSPnmaEEQB081L0aPmdWNIOqu2qppqbXWOuQz1JCZRXgYhpTW/LybkeDYiAvp9BgXwo9L8IDdZvvNq3dfff3lv/sff/Lkyc1xKnR7o7WVUpqat0AjGXOuy2zeo43ngaT9cBbu8rFGzMtuPO8QVY0BfmEh+NxaFheR4zT95osvSinfvfy+PvzZn/zkxz/+yWfbYXSjTFCz6i2ptVbcScSZk4Kc3F05aobMFLWwqgTr7i9wrU4MhnOEBmaPRgloqaqs10hrpPX4Wvx8YH3xeo+y3/jTgKimgQL7b512QQTgJSI/J8ziXeEm6qQrktzcF8KK0CtiIiEipYlYAK4Kp0TDIGkHSmO+KtWmYqfJatOqXObUGiYptdZ5LqVOa07oaofDqZTSWlnpIqEmnIkCiZmY10t+hOXr5XIBEeWcn7747O798XSYatFhF76+JSbzTsInIsHNHIYlZk0lcUbMmyDYwbQqdXO1EghTSqKqAgp6DyEGdbM2jKkZXd7EMGkACyRYHqzLmrsw3JyMmcQomQnn7f76ye2Tj9o4rtJEl+G3utEC1qXzg9cKtXe5kP6EFfUKoJQpKuYhak1EMRzEffqD3T2lVKtGos7MSYYYTHjrU855GFoWYnjKtBllm9Pt9Sdp4DwIo5UyaatGnlmqT6raWnXvrNoikjNQK2ASSQG7u0dW1SxcrkezDeSOZmbMRkRRjD/7NydJRNbtJxC4xZ6A0bL+AJbnUFTgHh+TR7+59JWXy/uBH7l84eN/Ijy2S/EOMbq1rPD5Ez+Aia7ecM0A8ch3e1o9QXhp666hmQVpO6fUkc0ElDKxIzGn1IFzqsoMVUoU0IPg3qOU0piSErdWwBJMJwvhi6vq3GpONAxDU6ulltLcvXN5L+GsLk1CiYPSzRQWCq+1CN5lXOyi2yByUTIXZuXVPbu7mVvTWpVcAROGsayUAsA5zJiPU7QOSpnv3t7d3T1cXV3t9/s61XEcb6+3m/HqdDoFHWUt7aCHw2nO4yaNu+dPnj5M9Vg0MT/cv2dmb22eJiHfbkdGmw6n/X4Da7CmqiBigMiJkId8c3NdazmdTqia3ZkkySDixIk4iSjO0o4aNzgE9wJqdFk5ICI3b231/WAOj+UxQBUdRIIMWWoxeAFADCChuuocQS2IXM1q1DAiJ90QEbFzcyCmqpwIaIuoTm+ihH3lWmvV1gUniCAMN20159FBpdTQ3Tbz0zS/v39wp6KhthzFCgXgQodp0ubGIuMgxmSkBqjNvnQIzaNmFkdlHEdJFD0HLHWUlFLERuEqyjxNZT6e5vjGc6un06nWKpKbo5RGRE+uRlUNaHvU3aN/nhK7q0hy93mezcw9qVbVIaUhReRzEUSqamWQumjv6YXzIH3EzIZo2y+HObzLagtE5DFr5nkD00W6d/HvFLlKPwVrGQzOSYiottbqnFkGZs7DOAynUhAqfIkJ7FqdkHIv/9JaP+ykj5E2XFjA5cucI84L6dn4d1qaJ8YXTF9uFPBVkEWAssyVMrN3zbCodNS4Ul8GBgC4utVWiYJzqLnFiHx4BiOoGTNvt9uU7uZZh026vr5OKdVycmjTYgju+5Jz3u/3x80mZDZ329HPRSiIsJk1Uwp1CqaBpOqSsVDPBNLC05CQA13WBUv6OnR4FTpVgDEzKPALwVO8TCWBiTw4Y9jXfL63MNxjySxsYmAujIWZr/dX3715b1rNvBW3nKqpGe4OD5998tnTp0/fv30/l1mIUuI8bOd6d6oz0si505evHTz84EF9mIGWqtwj37PsBHiHa5AZVN0U2rxqixlyN3JyWSqdkYw16/I5j97KHYRAuC3OtFN9nNO8RZw2jqdqa9XmGQAvte/YnkSRlogs8iYCEmcGLIQ3Hh2upbqxut5Lx+/uuo7lo7/92khhkBtAxLBFOYPmcuTI9UFETuzRNks5m6FaDFsupQAjSzeAmvrX37/73//bX/+X//1vf/+H7+4f5nF7LePeQerUmgFWWyu1JjnftUv3379ktPzt/K8KB0VzAWrqDkkYUh6SCIfEyeTWhpREaEyi9VwIv3wACIJEIlomtCOOhJmpKYAUITAA52EYXFW9qmo1xZISrPhcXDSjsMzMRLCwZoPr7SCSHoEbIi8VkaY2JslJvvrDl//mvy+f/eiTb775br/fx9zpu7v7UibOgyA3bxSZVM9VaN3MPwzd1v1/GfatX1VVgwbByd2IuvmCE0qtqri63lwNT+dy+PbVq7vj8fTu1S9+8Yv//i/nn//sx09ur3gUg8JLc9eQqnNAW3yCiAQ/qcPBDmJ2B5glWXhjBCWXuBMbhePv+3RZSTNTt8sFjCgpMM1pYXFcD3V3GaoXpb/z70Oq0pa0ec0kWVJXE1hqQ/G3tNDiRzYYL3cjicEhUEy9oWuHonjLiZzYwaCchh3xTk1ORebCp5OWmZsNahwsZu/u37l7NAlj9gGAQ5fsbhjHsfMCcCMi1N7s5QuCBqDQYid6/ADiGE/FcPXk2e7u7nh3p+rODjV0PUpKiYMlq8+jGkFrrPw6aeILED2ijrVsijXcX/wpC13u9oQu0GpmxEwQYktEzDBXj9ITU9Sg8jAQs5MQDzRsN1e326tnMuyxktD0fdtNxLqZew19mZZfoKIefFZEUQjtxnA1udrhr12pKB7L9QbJYor9qxrGYKGdo526lAbvmF3sRhlGe3t33Iyy345DZuZtlqthSJazDAeYkbVWi7ZJtRFUzEUSwdmNHA5lD0Cyr2l3bAOzthLVkpODQLZUGTlu0+pNVp8uIoHxWQKebgqYGT/I6PrtlrM8xuUKf+BH1psOO5sXXPBR0wXr5+URu3RDtNir9Saux/DSNNkFFer68hRVwOhTMavD0Qm1HOjbdP1yrTUBzLKZORRq3qrBjdhFXC3gXomQlmpBLZ0OaK3XLmuH9duISEqoWpu2eW5xG7CASIko5koRAqZmTkB0eikZhfAzKUAh7GKmburGbgbvXkfCVsJBgbSKCMZJmBNTWlfEKJRQQwR5QbUZkQx5ZDM7PEwP9ycR2e10d7XPeSMybLdXsb6Ht1+WpnkY91c3anSq94lchiElq7U2a6NAkqSwnAJGlKyVAYq2SWIlbLbD9e3N77/4XazD0HkiaBhC4D0TEbfmXvpFLx6iRbwDpiRZuJym1V6bWWtY+4QATGFmClVA1Ij4Xa1DCk6WPG6yMDc1VbTWMjcRcY2JOzC7wKf7QwBXiT0IYCOAT9yHa0Ugji6K00wDeBqoLLdk5sbNlSSBuKqXqlOt2vz+eLp7OJClZkqCtB3zQjADTnM9tuoKURZzzFVLm6jZYTpg1VBZsJ2+YKwvOwY553EcB0nxtFJK0bYcGIkzP81zlL7GcRQRd9y9q5fbeBiG7Xa7yZsYNYwPPU6nWmvOOaU0zzUGxsi8tdZrsZKsVTc4UyRJABZ5knMkfXmSbS3i8jk0YebAm3XLEkGlXXjuNQRcwvjl/9n7oDqcoNH8FUBNCIMgm3udy3QqDk40DtvNOMytTdMEkrwZVZW6Xxc8wkX27382ixcgQz8H4+vxFwBCnCWpW4TGKSWtU5TlUkorIsCbIVlKfRQnkG9rsxQaEg5pSKF8SNosRDWD3oqIDN7csnvOqbmh1daMBE+e3NzeXjMjJXFPxJwSmMncx3HA1W663s+Hh9PpdL3JImI6u0sAXVprXj0Ef4iInfIynX9Z/4vQjLyDiDq2cPER3KvOaVkWk87bxJlZlto6M3JKOWfVimiFUaS63QFkJkmJAG/uFFIRnjk9f3L7xVfvdSpgBkiLpk2+vrotpUD46uqq1ja9muY6j5ucwMNmW8xBpKpFm6oyr8iWPwIJPoO8Hu+EpYYnAd/3HqaH0qmbRU6o8FCeQK0V4JhUbK1VpSA1WXfQ4kFjZm+Nv+PjIk/oIexFIsHCGUlz7htyDVD6A10kllmIEizpkmLz0u+6vLoP0o/1PAIoRcN3EBMQrXhzdxg1a7EnhCDM2qsbmoQIrG6dSgtJ1asrAAMzJ5KOt1fVU6OH+/Kb337x//3//Jf/46/+7u5hTnlraXNqkDTUZqd5MjOPRaCQTDjH68uN+yCxOVedzZkcShZNShIeUt5sh5xzdBHaXBK3cdgmYSbPm01rtuLf1vAipUSgS7Wu5aZ0qAjMZ3de8oG5hvhql6rnJCJJRJh0jcsvb0HO2ax3idcKsrvDFuhm10B1I3LXnHMw+j+9ffLlyzdvXr3+6Wef/fVf/a1Zu95vS7k9TnO5PzCbs6oqc3JYhzrROca62PB0sZ7nFtllwZGZTQtR7/B45Es9RPFogE2znnwyNyIvD6e/+fuvvv1+evnm9O9fH//kp589vd5uRhkzxo0k4ZwzJ/KmzZSZJSWzpm7uGgsXXLAAa/Po1REJQdyNzKGrpiv38tHFNujHCgB6c8ndmfPlcT5bM6KVxubRwe/Pt34sOZgDHlmMNeEBXNzW6BskHc8lIpLNYQZ3MgMRAtntaYO8Ic4gMs9Vx1ZRik/zXFuqTWpjVZrmdniYpml6N38/DAMz11pbKxyEc5JdjDkBGIYUvh69M7aornumBfUKpMtI+nI9SqX91dNnz091mst0EmdJUNLMHCyz7K4EMKkTEHVkAB75JAKHr0ZMzIg+dnfoQEx4ghBxNROJdFvUq9LcO3vBzi0iO9GqDrcGZ07MPe2UYTADsUjeDle3108+2V8/azRK76sjaA6jDRvB45JsGEE4hjvP1p3d1brEzpnNKMoiw7CZ51P4ZQLHOy83vfOsRFqYcx7yZq3Im5nxYGCRHDpvZnao7VBbmR6SUEo8SLxKhmFISXbPdL/Zbnc7GQ1t9jrBKrzVMknE/wAjCEqCfuayY2ZATDyFlh4RQpb2vLHX8qJfEB1hycQuXcCysR918PwiQ+sL9xhaAlohUhbAGmJiItduqCOGs2WUL60ic8v72wJZ77ICF+juuH2+4o0vBjSC7cJ/0KVMDnUYsUviZMkWdI6IkGqo/KlVsyzCMOfeFjAswugAzBtjoax1dSezFm0I5sAqIEmOykHMuW3GoczTNE2cJOfNVlJpVo8nYnNfux6Psm301nkPQI1wttQs3WxYF0SoqiS5jwT5YuPgOW+6yp24K8ybNi9oeTMayBzkFNuEwrPwthrmaV55ZmPrXl9fH+Y2tQcREc7jOIYswenh5fVuzOPWwafTBOf9fj9ud/eH4zRNrm2zHQU0TUcm3+02dT5Za2aAobkVM6/tVPH5z348buR4etimwRMx9Uab5LS25jkGbPqeYxImBixyLRJmgqjU+M7x5WutsWSmhlB+cTYnVwupP8Bq06HlIXTZSMUkVpC6nwA00GAN2qxySinlcMnu6HxKQj25Skm4UlSMzCxLFDO5uZk2J6hTMz+VB05DM2/utdk81+NpPk0l0p+cBuFsZNbhdjRu9lXnqbTi7eSY5laJoWAlAOwgitFEVXMA8/G0xg2xGsMwjLV5qz0QWQnZWIi5tmbLNE1QR0TH7Xg8yoUoS5cACdhD6N1f1D6GYRDJQr2ERqZc2d2ZKGq06/b2AA7aI9vh3kePsKQ9/We3UAg4nw5yZnbiJdhdaKb8IgPrQfPypM5VCCfAcZqnq7zbjmNzI9VMtN1u9+PY9uPd3d3dw7HUiSXnnCLExWL5ACxaBB308+jYekDOPhhRwOpVL8ILD5YcERkSzxUrfhLKuMiNmZncolxFhEABiYiZE6cUfEpuTuZEkgZuxklJWOEKV7MEcE42TfM8q+PmZvf8+fNxt51bJWF2AbkE0bY1EUlDfvL82dtaTqfTYZB9TmzkRsN2IBDA7hzao+rOhLB76nDgzEMdUbi5CAFpuct9Xi7iWiIyC3h2l5MaQwKcmTgwVNJnpUxjczDCgyx3E0rOQmQcg8MaT3p2c/vkSt4flcwgXuZ5t93u97cs2cyGzfb61u/v75spcVIzGWTcbkDcbAFxPebyXkP8xc+d2bEX492ng9ALB3G953qqO2m0Cg2hwOqLJMxyCvpYFIu0ZrYoDS4TRx2Pt65tSsM60sa976Zm3lpzo4iNVBVhOyldJhLmRkutdumSuH/QzT5v4D/eJiWipqEHRkxMnZk2VkDJ3VxhTheMdt2LR4+UUpIslJU0j3tV12anpvNhvj88vH9/fzwev303/frXv/7lL3/19bff1eKb7TU4TcfirCO5OtSJOLEwQj25TpeXsAZw6x0kWuSe47SaODoxMoRSknEctuNmGBJMyVW1caLNZkgE1Zp+MH4fP6iqXKRG55gJHMUDI3M3xfI1XCJGlNQFaXop56KULiIpra2UheMB0Xvs5vSy8eLu0QXorTV3c7u+3vPL16++//bHP/v5i4+e/f6rL4dxvLnaHQ771oLiLgYZbf3alTzzoznJy/Tm8vdryLiateDPcHciRvC9OMjcrKWUnLzURkwpDQG+mTX9/ut37+9/+e237z7/7OOnt1fX18P1frjaj1fX2+dPb65v9rFLxnFk4dJCcygFyRcRkdNczd0FEnVbdmeHmdMyUPPBluBO+XQGuPISNF/mcnTx+OA3P1gNXnzBilRqOK9enAhEbBkVnIWDXZzEQU1hTgqCC0tmyQgQy3gLTuZQ0Fy1zD7XVhvVgmmux1OZZi1Fj1M5HE7TNI03BuGRgnU5iUggipuXZcy1mx33hbilA1wDo9BpBSJqXa/V3eHCRMoYtvnp008O9w9vT1+VUkYazBoNg8O9VWNRAkhCEzMHALZfftwDeKLWWpRuQJYWb+nee4DuvZ5oZgH6ZWZmoa5vEQGtiAh5JTdVVxAjkXAohxBng6e8zbvbzf7p7vajvLsu1fOZvXaJW0SAlTxZqPMS9U0edb9wBetZcHfmKGU2Zh6GodYYANYsfSNp90VROSIiSynFqMR6dphTDemgqslTrDcLu8v2+ra1UlTnolQnIgKO7JD7sh03290w5pTZN8n3m7wbNpSE0NjUoa4KtNArtsToM+e+AnAgbt6IiNF1fQEO8uqm05L4nff8ErydyTyjAkLEqufi0XpwzKzWgkfRzpo3dmd6Gd4wR3m0L+9FAQXrb+JVH7zh5T8tp/vD3iNdwPvjhWubBEBagEY9tlvunJqZICC51Npi1kkFwXvb3z2lxLIGfB2vZWYIiTi1nPNcm5lJIoT+o5/rZ8vZUHcCbFXVU63hv6OnXLQtbZmIRH1hjXEl75zT4B6aSuej85jLJ5AjwkNzKqXGJ5MLiVPrAdl2u4WzgcjhECcnEYK1Ju5etB2Op3mu6IJL+f1xii1uyzRtqC093B+2WxsbzPg0FSPejttxu/vZz/8MZOM4bsfBXOt0Amwcx5KG1trDw9GJiuthmpVwbE3hr16/GTbSSk1CDE4pXV1dMWvAjWJBoqNNRO4UMZczQ715KLTYOI4xvwHpIxYBlmutDchm0UgPEJcBmnOGc6luaI5iHgKDNqRsiZJHoxlA0Gc28UEd6kxEDiX3wLCfygRYFlFNgLfSKVhsICwpjVdUB8BN7XCaJVcDT3M7HKfDNB9Pc1UfxwT3mHqf2zxNEzOPmzFTPs6Yj20yrZBm1AhshoBsLbtqbfl1zHQcg5QIQExj5DFSu3Dbaq7WsIAxPEy5kVu3ImkcKES3o/RgOreKwldXV3kzmpmbDttNgBIhfajawSCIZErKToSgdI9peVzqVfoyuyIgB+ky73R57OM36hEiU4BKA+HmALHLQiodjuvSmpB3Pg49p21wgmqdywRtPOuTq+2ffPLJn/zo808/ev7l8e7169e/+/Kr71++UZ03497Ap2mmlOlcMDsHSb60a86fSB1W0q1P+J7VAjiRuXmLpWMSZuScZUZETsGejaVC2nEO8fckRFC31praalpJDc0UwjnlKqSMSAVjmShJGodmSkLb/fbTT58/e/Zsf3MdhXtmclczT2kwgqoKsbvv9/tys79/PT8cD+P11Sis8KotWBCYObkgQggCAymlRmQr1GSBnZhexlJAL+s0IKbijTvINBw8pdwZ1BKn4OuLuVtduo5rl4t6mFIdKaIrw0r/5Pvd5tOPns1fvzwqklOpGuSoRFRqTUPe4eo4lVev315d7VhotCHlrCBtJiI556bRhfZetowu7eNq5TlEsD+SMi0PdqeOTVCrzbSTHfIKQjMz84UchxPAHjWOR3WEpS3o7Gi0HBAzq1VPx3nIZBEmG7mTaqtlreH2c2f98BuJw13VOqkMcZ8wCC+zAKWwgHw+aBKuxzOizH7SQIB1Sgl3BjEnRkDhwCAicXNF6KOyUDaX42l+OM55sPtjeXt3//bNu1dv3r589erly9d3d3dfvb578/pdrbrZ7jPjNDd1E861VRarQaeUkmvHAcplGtPDmovEfkF0P0oSCHAwcxbOOW+GYRiyENQ0Znu343Bzvec2da7KJYYWkbVwXmulXrLpvAW2IJSYmXJKvha5eF1V6uAgrqaBdJDljCwJYVoYZaKgH1MuZ+r5HjS7BgRmORr9c918SHx7vX//7rXX8vOf/vgPf/g93DZjfv701t3vHg61VRAB5M5w1ZhvPNe0+Lx4F/twQXDAzCOjJCKzDgFxd2YKpHfEDACrm4FIJA1JUlKtpc1JdmU6ffvm4f70xe+//uZ6v7na59122O/k+mr77NnN86dPdrvNdrPZ7Ta73e5mn25ubna7XezDIbiL3dytmZMpmwXmiwW0al/12HFxK9FSXcorfQiwi/s82huXoaf7WTzz/OjaAAbrCnpBeNZfjqWS2P+6YMKZyNmZYQISIzaHusTqUN4gDyIC57mlVq2UNpc2TXqcmjY48fFUT8f5/ngsVd3dmTAgCwknJok2UEqcc9emjkQ42LyxOF8zA5PkRMJsoqqOCndiDjQKUXDKQFXhzkygwaztr5+++OQzPR7L4b26edUxeyQbMdOv4fOcfAGSxBryheh3LGaiLvq3bKrF7JBHoQPq7o0kEzkjqN96K7611rx1hFCw0RoJC1gaMicZd7fD7obTjtJWhh1ceSk5h7IJybmYQsE+xSyS1xQlvsuS0sez7KLtdk42loSw822aGRDHaKnBKRQXAFQwEcZtFKZqBHQdJiC51uop4Lc5TEGsYfF0nO3hdIIVho6CzSCbzB8/uxanTDxwCiCRUTGoocERAymAExvQlTWWwZeLGUIsM84XiICzj3P3BbGydgtVbU2xzob3Qj9mPVDdgPAqGb1iwRzgxF1HFAhgToRVZ1F7Wh6xndYeYK8vL58ucs4Y12+lqitN4OoLeodjSSgBkCImylBrrVUgLtIJBpnhxDAlORf7F9f4YRs0IEy8JH5eqqrKMk4Q096BxNtsNs281HoqNbLH1kpn8+dzc7ZPrC4lSGInuIHCtMZwmoM4IBIQkpggDUEgMeoKf+7uBmEJuGAKURdT8+atGbEh9FCacyI3IvE8ROCCYSSQGYr7aZ7nqepyX5bVfA2nVh6YMYzDdrsjyQCPR72a2ut3Dz/5yU9e7K+mpuZtGDdmdnc4zDYNm3Gz2YzbbSPdzKUJXVlLm/H13Zur6+2rb96nzESaEt8+uSZ6f7kvVzONhUCk2yw1M2utXO/2p9OplCJDjpJn7BhVbRFEEhhsMHWgmZMCyiBuaAYDD8mI6Hg6DElyzpFNM3NzK9owtbVk69AIhSVRnScAniVsR6s1ajBehZmbo6k7GYOMXA1TU/LqhuOp3h2Op+NcWnMiR3IEyUeby6lZvd7ePnny5N3D3ECzejFWYTDQI9QzzsMUbuHVmMBmupjdGHQWNwqduiXL7awt7n3IgolNLKA3McjTJlue70ScZBDOTClUH1tttSgzt2rTNDGlu3Z090FSoHy7ayQxmw3ExrGFmBnIKVWaZ15kc5ZySZ8xwA+wagAenzwjSkzU6QmXRxhurHR7F5Uq751zjLstmZHbJy+e/Ptf/Nmf/uizwWw+Hq52w7Mnf/Ls6e0//PNvvv7m2zpPoKyqKT2CEi0fR5cfuv51tUc9cQE5hWpLKq2by9YaVJkcSIsgU+fgWg75wtUWx42JmZ1hBjUUbWgW3P61tmot0yAs5lDz2rRE3YQ55WHYjA8PD8M43j55AuGbm5tpmjTEVFGjAeUcpUIXdidi4d3u6nh3N5cp8ONa5tNcEzqXmiSC8VrLC8Jud5cOH0U4j7ZwIwN9fYg9hYz1Ih0Z73b5EAqSZ2FmXvhZFvardTYHtObbMXToDo5p1zS4P31y8/LN23JoTA5TIlJHaXWudUhZvX7z7fe//eI3T58+3WyG5x8/39/swdKMOElKSc2Y3RluHOH1egrQq/6X4xDr5oh2PT2uS0RlKkagtakBzCB3gqqB57nUWnNmM4KrmXISgjjUz/g9EBGLRFzry0x7KWWaplev7q72krOMw1ZEzGAGDbj7BS+Ox8Sd2pDWMUyDBUoQxA57tKV96ZnY45mxZcP3qMnMzBtgQswCZjIzifAaHvIzcSbMU+Bm1bgSSqvfv3zz3cu3v/r1b+8O89v3d3cPp9NU5nk+TqWUcphaQ5JxKA3uNGz2cH44HqJ+Sq0JkSRSNTQlWExD9CTdbaFiWvSyLhLs8z1zJjKJgkGWlDgLtdZcKxiJsR03+/3OJi/WElJUxwMfsSJB1sWhxx1CbUHIFImiYFmWmO8yYjenRUJNRLhrkXYy7bUuHBGwg1Sjow5iF2Etnck5uhycJLEQHGRZEoCm7Wq/a9PpdHj/oxfPUmaGScq3N1ellPvDQ62VUg52y4CMLrYVDgixP/a8a1jGS7Plcj2D45qIU+p2DQy3PqgGmKo+LJorkobTcQ5zUtQfTk2pzNreH467Tc5vHr75/u1u803OaUh5uxs3m83Tm/Hzzz//+OOPAzhwc3W934wAhpzJm7uyqbNmlgBGOfcZ1yj7h1ECcdw1dwcsUujLoM4fP5bL7Bd+eQqYYqKbGEu5oYeyfc8/NlzsnIMB0omcBS7G4sQsA4NB2Vg0Dwapxmb25q2cTmWa51p1nss0VyKSIXPKsvGBmqgGzrfWGbXW0gfzAOScgz882pVE5AydHUDOKSUxb97qGjSjV5UdQAAgqPelLZ5AROpS5nk3Ds8/enF4//adFtOpuTezhcWp+74YmtTWxxd9MSJRbguQCzt8geUvtyNCfxcSZhZyY5glPu86ULCkmpXSxuQQSZwJuXkyypAskpunMW3z9joPOyVpCsNg3ICybuNO6bpAEMORpJREEtFSO7OlbaULtPWCyIQ59aFg79OGGty2MOtycb2B35p1oBCxuRk637HW9ymlYcjM7N7Ma6uGeu7AE5GB1PrlS9oAEDahLUFN54fDdN8OD/fzKLQd8363uRq3OY0keyPLcgTMvJlV88K9Q0zgFeFAAMMv8VaPXAAtDc+l0POIgHrVBl9NQTxC5me1imsfT9va6zqfIzPjlNe3XYtlAKayxBsLSc+aCl5+3GqFIr0Ezrh6M7/kKL0wVgZYEpHSVNVAnFKSZsTsTGqWGMfjMazGcZqJqJkLmbmYO1Ow7ECbq5aY7IpAIbQeRLJqKaWZRabXiGi72zCcJThMImmpCzwPgDFjiTO81hbGlIMWmXngJGeeYicSh6c0SEKdpqlWgoBFXCDszNH1ig3aQsEXxKAkw5PbZ7dPrmF2d3d3d/9O3dxgVVWEWLzT0DXLGUCjNCnuDtPpdAKYkgQPvrtfgqAADOOGmE3yhCQu7nS8P7x693A6HX77h6+GIbm21op0NZLWlCTR/eHw5Nnt7vrqX7763X/6z//z//X/8X//9vXL2yc7UMsDcgY1e/Zk/+TJNfM9I0bki7sPY3Lo4eGUc27mbsQ5gz0ZQv97Ph1FaLMZzMysiQhdsGCrAeaKylHHMtO5AxJSZpvLNJWc8zAkAK1aaprSolkahsN9muZe92Vn0DDkYUy1avCeBaNp3C8ATKK1OkEkGWEupTZrhtK0nOZ5rnP12oLbWxR2LGW3HQwGq5vNMFgAvdI038/F8rA7nerDcYpOSs7ZtIeeRATv43nM5AROQs5h0ogJvPRUCQZXU2ZOLARprYlQNV3m01IUaTheZR5U1HAvraJQM1W3uZaYeg0+66DPgZGItJy5lijGCbOA3EgJvAC7HWzWOknsQgIep2adz/YF1BoRcPgAaD81xOHmSzNq6sxcSsksKSVVdwYJ3L2XiMN8eIwdGqBCAPk8OayR17u3r1KrVuqbe5WcmoGtEnzI0hzcELTbSw24o4AEZLEx+nUEqWYt2mS/2W63IuSmtdZWF/1uprnU1pokpgXwdTqVqkVSYs6KQkQsTES16iYJhAceKyxvxs2Gaw1KeXEhba05lNh5KEblVN7PDzlnHvIwjqfTaZrna6ZS67Pnz3POzeOY13HMqmrQqD0Ng2h0+8iM+Pr6us6nYbf97PMfv/rmq3f3d5mfuPl2GHSeu11294WMzt1hnojR6cywdkgWq92jBIkAiT2ouUzVrBGMXKLfQfCcOkyUmTJz9BnGQd1j4vnS1lutM5DdXT2aqL1lsB3lejt+9slH9ZvvilpK6eOPPyaieZ6HYTgd55TH+8PDl199c3d/f3V79fXLb4fN+K/+9Beb/dXDw4MDx+Pxan89N21aI2IQplZdtRJRzuNS6nZ3X7VDzCzw20SCpVvt7kHkq7YlopQGU5TSmFPKSc1SSpvNximLGnFq5sLZqPaWBQErQs88GL/MrNY6nYob1VpLQRsVsFqrqteqpj6OKUnmLAQpJWS+0pjGcpoIwiLuMU5DwzAA4TfP/HsrJrznNo+LvnFzT9Nhux3HIbWmrbmjqaKUmnOu/TkxBBLNK0/pqk6TOjmlVy/f/t0//uqX//ir716+/f0fvqE0StoYUak6hTlRbb2u0cu9tVY3SizMVOfJVAmwMieRYchMqLWt8YFczJ+oBgSu5wa+ND8BELurt6bbMd1cXW+2AwCFXV3tjvf3ifzp01tv+v7tu82YSi05j2GygoBnHEdcdInXgDh01aJXsBBOpBjtVtUs49Jc7OjQ/lUJKcXTMM9zqT4MaRxHomTe4puLCC/V4pQlWnAp5XiT1hrBq9arq50I74Zc3F+9P/zmn//xf/hP/+mTjz/6+uuvtyLMcnNz9fb9PXM6TbOTEQmzOJGqlVLiDK6+fg254uEr/cZSxY6nMXMkIbTQH3BHhlvr+8uiQGnm7prHjTVt2gBrs81a5pq2m0GNk7RpbveHJkyJJU42++lv//6319fX19fX+/3+yc3VixcfPX/25PMf/SiRZ6GUEsGqFW3GzOZDTEzEy0Okl6MGlZJqbc3cPWiFzGxqXbFpTZNsoam7HBteA1xfiN9S6ny5iAHjmKcFmBjprF5gaTtPlZK4E0seN3sH5WHjlFrV2lCrlYfpVGY3EpH373ZmVo1V3WmTtwMRgay5GplkBquHTggDQpk3tdY46UR0dE/MItIpTVXBtM5fsbPn8OBlifhp8bmGHq82XLCkkqdxtycopc3zjz+1cnr35js4V+dMGIbR4HOdnExyMrPtJhOREJIIDSnesJQiQuwXdRDm1lzVhmEwi/n0MKzRamYi4swE8bBcKZlC2adylDGxZJaRkIUS5Q3LwDk/++gTR/riD9+8fv/w0YvP/+wvNi8+++z05uuUUvOWhXfbfWtlmqYIPDgNmRlgVY0yQc6ZqTua5Uaf+/8XGh603+9D2Gm765rMAOCrZMLKknBZhyIAiV3rZG0OMQxd5pMhMfzZGZLXRy4yDNmdmlvOkmRrOVHaBXPw3aHImzIM02673e1222FzuzfV6iDJaaDRvZrPpgqQMEESALQwiUZESR7ldWtNaq3R22V2xQxfXO6iGdahE/A1D8QZF7po2OIsTrja4dVorCkfgJTyBwYHS8Z4LsqsaCzmlZX9cubIlwZjvDMv3LlmljSGCMEGmJMt1WbrLTioqoNySk7MralbBphTUN41r5H3kywTGk5OuiArHs0VxA/9AOSITy/UbwIUZAZ3xoBz2ckBxGQwEalbrTWYbVtrN0+f1VpLqUUbKBh+xR3Tw0E4x70RokVnzgTe2mme5/cPh+GbRObTfJymU5YUyAohxDBxQGfv25vFr2gp1TrtS559QkABF9sAgCAnrd6MqHGxaAdbp7tUpTmV2lqBWWwGMxMHJZrn9tW3r4d3rz/76Y/+7V/++e2zK974l1//7rOffHJ4/y/E9uyj26fPrmt98NzMmzkhNOWYTdKQxQHXpg6tDpBqFINJrTqMBcypm3k3bVh2MPXBAoCcmVgD3QZHiFK4N/XabBySMRReQ75i2Xy52FoSjiparbWUFC0ONm+wpWre3F1L3HEGEzPHQKKZldpOcz2d6lyaqgXZT6JUm9dmpk1Yx8zDkPM4cpJh2MjYhCn5kCEhBa6qhPN8jl809yMEuXws56o7hsgEmBKzE/VsMOYVo46LHt0uTcg+dojg9HMvQa4QhY/oSLRmIuIENbh7c5dgfkvBDWNm1ro2VbcXQbS7nF5bw6OuotGJClThzY3dxoU54/KiuJMifQjb8/Mo18XDnAnamrB9+uLq/u3DH37725tf/Ol8eLjabL0e54LDab6/ewsLAhnfbDa2ZN1na+j4AJ6KCzxhp/wm5j6rEJSLYU9XrUXrAlCrfVsmlMxs5UJsrRmTE8xhjmKYVZGSe2vArFpbq6YWul0s7+7eB8fPp589+fjjj69ub5h5v9+ZWVKtdVY3wDl16rDLdTMnd1I3kqw25TFfPXn6/vX3d8fj1X5XHTnaGRdewRdC/77gMZO/vO0a6RKR0NpENSYGM8E6VL43kBbKO+ZEocEmAMipQz6it9bTUBiZhtZxElcO91K1eaubq+v9brjaDrsx6dQ+ef7ixz/9yYtPP+WU1b2a1jLPVeeqD6fJhIYh3Z9O/tt/GcZtrX59fV2KHY/HNA6AuXctXQeInUnMPly6eDCvPJvWJe8AMKkuIkMk5h7eh4BqCpA2V3VDUzViNHNIlHKWaqiv79R7OOM4ClBrPRwOp9Nps4E75tlbs5QGES9zVfXWmrAv54tWx+ne1j4I9YDe1FQWUo31mbgo8a47fE0I3es8WykUUNAQnk5pFJFaazNnYBg2RDRN5Xg8jcOOaHM4Tl/84bd/89d//3f/+Kvvvn9TqnHawFJt0f32WqkqmzFTW5LS5WNhIA/miWD2oj7V6lgyvf7d7PyF+w+9VKd+rnwTC3Li7bi52m2HMUngLd3KNJNrljSO4/uHd+GI83LHI6q4/Di66JitgFJWZcfSeY2PPpfYl2BXyDxA8TyuwQ2FiwmfXmt1aB/yJFPtzZwhUOXE7hLvTHAmbHebxGLwanWT6GozsNfM/tOffP71V39oZZY8bDfjpy8++uq77+8fGiRgsx0Qvqz5H5nnudzzq9NZH5ENAoCHSqp133ERGPVVInbAUSFOQaMFam6lqc/NwSMl9gHRUlY0IyE8PByBA9G78BG73eb509unT67+u1/8/EeffvSTH318ezUKA0zuUKtJRjo/euLNKQaAe5djnQkCHkXDlw96nPeuB4S8hbsnV3JxizHRJUiFLARQAUn1kw00brf7PZE4MfFgTSuP9w/HqbRatamVpnOr2pyZ1WL0w53MQ4PaHW4xQOHu4C6qxJxEHMFjaT1KCWc65hyw3vjlmsGSo7kpXEHOAWJ3AoM8FAnQTT06xCNSHQe505A2+6v97bO5TMd7l2FTyqkejykF8JWZ4G5aO1BlTS16FcZWqZtHKHTAEgtYAON+g/pqM8jZqXNdkAhvNpssY3M0Y8WQ846HLaeRJLVqv/niy3dv716+fff2/vC7r1/dTe1fT+XFbSTuFGF5oIXD864bYM0f4OemWY/igBhuIKIIXXq4vcTPPe7qfbPOKwP4OI5mWJlImSTkNjqtMlyhRrF7ACZxFkjm0d2NIhogAqEZpV4hJE/uZmruGMabVYCuGd1P9DBXovZs/36zGa72GyJULVorSxpyJjd1bcXcnRxEwsLifKkQFvs8HNCqsthDMjPrclwcoc6lcQinslqPSxNB/EjtY83lpkUObX1V3yrc+S/1zNvsvshIfHAY/fFjvZv+eORhPQi11lS0BZZd3at1Thk1MAsJw6p2QfsEgjVHcx9BwjAyL+Yk1GNWZmbJMFOHuyu8mefUK90LYKAHzZ7Y451DA1p0GWXvDfrV9QYCyju+1lqLaTQ2s3me3759N9UyT9VAIgJOAJkHC1Ptx6bbYHV3pmnt9gLGfYFURNA1Is/sT6p6WqBui/mLds5cStCRL5iAuAHUxGJzNDciassBMFWfilJrqs2tcYeL2G5I5VAVMII2/PwXP7/96Mk333/1+U8+52zPnl9vdxiA3T5td6x27L7lXJtE6NW4wz2RupMbSBKNlN29qAsF1wY5UTO1Ro627I+oUoSLAovwBdqnhqxIbcyltSGwoESEhWeCyDdG0cVi5tSYiCp0KnPOmQjUZfUsskF3KyFotiAu0pABtGaH01yrTnNtTeFMKeAQSRKZNbUa9c5xzGkzJJaqbsox7QkS9RgCtJAb6Rtdm2kNK7MwYi3rBkTNki80eS9P7Aeu3btoNYHIO8k3HK4GqJsrqzf1tfjqYIs7wUROZKYdEk7MLJ65637AFjWFwGBYc2u61O/7gW+tdV5ZZiOofxB59Dml5c9+pj7w5etL3D1GxYFOJ0EOZhEoqWrDPB/qfPA6FXLmBrC14qpC0FbmhnE/am2X6Z+vM9cXJunyr9EfyCRJwnuJnucQYtLLUzd/rd89MoYAUHjQWDkJM5s7IJREJBuLw6p6MW+q07wUikzVowZPtdbtdvzs889/9KMfbfeb3spgqrUxI4+DzbNIpFi6ujcHGZw8uAE5ZeYkKQ1PPsr39+/fHQ/b/c5Us3B3Tb7AS+KSFlPN3MWjlo0n7u5QYE3OjTpBjwcwnkNzIvwFQ7AKscbp86at3zsiBLm29RSUBWCHRPzBZqatNrctdLdJV/t8vd8c6v3zj26fP3+22W23+11Vm0sjEk7ihFIVU1F4a+Wb774lyHZ38/TpU5H8+tW743xytzA7RCGHuEJ5g4k+xtY6ZlgC2hkWGAiWVVroy1dTE4yR6LqF1kkOiSymfxaONV+GwYIjPH7TmpoZp8RdkNOY0zBIupAMhdMyu0JLibpDqeNQtNYYimW4Fx+47W4z/vjevjwF+6vd4lyiyVKLNoJ4LeO4HYdcSnuYKhGzjJvddir5zZs3v/yHf/qv/8df/erXXxxPZdhcba/2p7mqUS1WzZupG6mLw7siO1ZEGdwjCXTuK/xBsH7+5uvz10tYEXFYAh0WYXikFle7TWJymMNSlvk0MdHt7c1ut3v59e+3AjUM6dGyrLGRu/syxXf5oWfWH4QmR9xkk5ywaL4L3PvE1DlEBigcUEo9AiMK9x4izhZbyNG7FhFsADF/S8JQK8xiqkPKQ6aHu9cP79785PMf/TehVmdmHofh6dPb9/eH+/tDWToTxBf9PXe6RFgs67Z+zyVQPi81EXeJEcKFvbTEojGtuZJV9HfrED4WuEPNjsWkVnVv5uYyGIe6GpMTUR6uSinzNNd6cveUHl69fb/bbn79xe9+9uPP/t2//cW//sXPXjy/GXMwBbg8zld5SdptISQEWWxdkIFMePAupL6kCmeY3GOir7gfXdOo3/TIBtWQ8miKOU49hFLKOSfJOT1lTsNuN1edTrWq3R8m4jrPtaqVZuGd3EcXV6Bh7hB55qgcmoZedFUN4iliHpidxJlGt/6dwwFF3sJCArJFjHQJX882ljmYZs4xNxHBxdGW+05Ln9CDzJmd0mZ79ez5cZ5Oc5nci1pycCA03MldyAHp6ayte0YSsZGtJyXsEjkLJXcnJhECmLy3rYg95JEpJeGM2EtEwzA0UwfLOI6b27y9Ns6nqR4P5fXr1y9fvn737l11lGrvD9+7pNLqf/4Pf55zTmloWiLMSCkZdXqD5RvGRAOB3MyZOYqzl5nMevwjssUSMtWiF5FJGOGI5GMwrcJjeKQbKJZMJu6uttSaOQsJGZMNUHGzqCWqkzsn8j6SQElA7qzOBmoWdNMEgkWBXs3MvE38MI/vT9td3o4p5TG5Qd21EMBOvCD7TV29JTpztFxa/hUYsu78WJDQ2lnBI+vTVr6mS/sAgHAWmmem8IBEqFUj2cSi9rnOQ12+/LKXuBofunhcTr/jsVP74Pfxc3KQU1DhxoFQEBmBU8pZrIG4AiBhNVT1TGjmVU2oC/WsfC2BQXZid1WPrOECBdtlSjW8Aq2biGhhvPW1JbI8eheLQi10MUNEtN/vr66uSinfvHpT5jaVagpnhtdm6k5D3sR5XgKJDknVNvV6urvDpLe801x0HccUgTCbwYxoMzrQbO10m9NljTDYVpf9TgTKzmQQgy0ezgGilI3YI+PmBCZ3VqqG1KwNGxl3WTZ0++TJcTrcne4+/cnzzZaN5tvnI8+NfHKfd5u9EPHSPWB3wIU8LVSuFNmgM8ONgnA1AlBGBNaLmVNvTIldATa4W9/ZofkbJzaK6M0NijpNmSVYZx2BjXFm9tY748yuwtSJsIymRhwTz9HjXSAiknMWd5/n5u65OYCpzKfTDGd3MCWXbpFVlfII87ghDMpJcsrMfDqdjtN8VJma1qpLT+kMbrnss18ekvXn+EHAzt3DxaX1cS9f5+8fc3MvaLXloMYQcJ+TYJZYtHPbrLtQBN+dE6WlTW8mBr0oTwDOtU6qusjAdE7UeZ7HcSSioNXtqWWYg3ON1qI8wUy+0OU/6tg5u6uTM4LiwqOUF9eWE6PR8eH0s58++cWPP7veDpubfZ1O12kIBl0ze3sob+5P96cyaPWLfqC7s/PFuPyHE4z9+5lp0MZJOuerRCSMyL+YFOZmYMKZfGaF5BGYwCm+MjM7EUtm0eZ4/3CopUzTVIs26sNSBhLC048/+uSTTz755JPrm73V1tpRRE6n0zzP2+0YlGjxZVprQ87L/iCm5DFnT6REw25Ppttxe/302TcPD++naZB0NQxRs3VGTPqZGbTB+3h4sJj09yNaKiMuIO+z7D0UTNTZNViCdNFjYbGUKrqjMmutpb7TIEQOXwXM46CpmxM7yANe6Cjl6KZD5ie32/t5un16s9lvwL7b7e7u7t6/u7++vt1d36RxU+uc4HMpImJOtVbHcbPZP3/+fLvdv3zzXSktRL3UzC0q3CYSJyWiX+qSAES6DEYuOVjsBwOw4h4jSVtPQReYhQTBHHeV9GDfsfMR5r4agKtqKS1LSynt9/vr6+v37+bNZhjHMeexzDXULJIMsvC+0zoKErVbN1clcuaL+IZ4heWsRuOHxgQXIJx5ngGkxCFuVkooTSR1zNXUqkMobee5fvf1999++/3vvnr36uWbP/zhy2++e1mK5vGK0lgaHNJU56a1xVp1378OHy3L5QAUXavx8nv2bSabCxOgjw+m+uPil7vnxHDNwjmxCMENrkJCRBUuQi8+/YSES6mb3Ya7aNIZN4WLN+wmbVmZcLubFHoYcF85lUlYck6ui/BGsMuwcMqO6n2IuheSYpNzZI1LXZJoiURb40RB4t8Jx4lFpJRZyPNul8xZOMFevf7+26+/+PRf/fsnNzcvX79m2qgqkTx5enM4Te/e32lPkrt3UDd3Hyh/sBlWU/xHoy51gz7yOIs16yS8K6N6DLYxrBdOeuNUzUxJMJfWrFUbhiFLkqWtRPtBSVyyg82sEtnkx3IYH/Du/f394cHdN5u/+OjZNbElGSIsPn9m0MoZVKu725mFzdBbe4/u6dqR+OBi16w4kO0aryQiFzg5eG7kTqBsxMzCnKqzWiq+sWqnh/Zwfzid5mZ+Os4QGfKmKlSdKGTHOk9a0/fUD3+oW8PczDuNPgVaLTImIghabiTMzD39IJfEzNxU+ZzDBHFXZ+m7cFgrFPYcSHxw/GubicjNVBuzyPZqc32bT8eH+7fwNGRJmxE6a51YOGUWFr8ohFFH1vSeChFFjhqhY3TC40MZFuqOLOgOFCJDHoedOkppBjAnli1JItnwsJuUDw+nN2/v7+/v3759O8+zg4V5s+EEmg4Pv/n1r370bOvuL168iDlGZuaUyLs68FqtiL+Ymfra5esbKDLtc0AChPC3iKz59pK38FIU4ePxaGZhAmzBkLvTKCOTW9/4xszCKXEmYaJkRq15rWstDGmQJORLtwYAhFPABGCBDwAzC+eciAh6d5rnu9Ocj/NuO1xth+0mbUYSFiZlUrgbGgI0EaRSwIqOXtM/kbwUFGLXSWizLw4RWGmuYs8sO2c9O9QhoGd8xLqkQMdByELTtT5nTSB/uBXXffvD3/zwsZ7i9aLiQ5MzccpRBhdZWklgA5kTWEgysYMTuYIFBAOpqkU+yuLLQALABgp6KyEzgyHiiaj6BgbahJBzBgyPk+8l6HnkV9YLZmYJcUIzh3GSnHMasrx/4OpJ0Njh3CwidmorMevCPSDERDT5LBcY3wjfQx4MHN+KwOIcip5Offink9MRszxmCurSF+e0gYkQsEPQ+Q6dbwAkKOYNxpSnuaVxQ+IufPP0dnO93V5vnn12q2g/+8VPvvv2D88/ujm9eY9WicvVTfaTkiuHoNGCZU+J57maGdwJ7ARxNpiripAZLcRBS5WSl5Zxv0pv1jx8KjUmcqPgWe5O3ajVpqzUiJeBsSillBokfk4S0m1sZrXFsCKcLKTqz8m56OAMYK5mZslIVUspqkYE4UxMCRJwc8A9EdwHSYO0yNHGLC4yz/NxnqaW5ubFWtAppi7+1TeQYCFKu0zgFjsX4bm6myqsgyEAmLfonrFDu7dr3PXWJU6l+8JThVBWjx5QIna4mro2NwWBGlnE9NFaJoeZqyrnDDbY2pZksKVlOjye3iHBdq6SLDiTD496P0eE4JWhdRj64nx1c0NnDpKIPQLq6VU3Y/7044/+w1/8d599/JTLJNp03j5oGzfbK6Nh3JyUvvr+TfntH5r7ZUJ4/gJn0/eh9bmMhHzxM0QEIeHkTNF8tyjzApwZRBZD2sKmfcjYe+2J3NDMKQnlYVZ7fzi2Wstcg180RogN/unHL37yk8+f3j4xs+PxGBt1u9nY0dFmZ3J2hau2hnaqpxwifrEqAcwBGeCm2+1uerg34qcff/z67Zu7w3G73VZhDvoKWhqaWO42IQaDEIycQTy4tKMJ1Jk1FgRgWCphcAwfLku6/ulqGk1jNU4ZAAMx16RQAInZOS2qXdE/JIYQvGlprYLa9c3uI8fTZzf76932apdz/v71l3fvH4bt7ubmJg/DaZrcaa41o48QTNMEyDjsb29vd9fD6TS/e/fu7u7udDopPOQfqKNGhUSpy0WAmVqLpIMCa8VO0beM/dyn0KHqaFHCao6lKNNPK1M0A43giCGu7hQcrqrh6WstZJZSGscxJoIitqPOdNI1IZnZz+MM60wdi4gTBRql8/a6Pa5s9DuyNlXWX37wr+5uhlrVzGpVJqE8kDrL4E3uHk4vX33z2999+ct/+uff/+7Ll++OrVmzsB/bqTlqATdwqtqqNjV1IoFEUBKWXC/z50iNvReloozqvanK+Qff8/Kvq5M9H2FXZhpz2g55yAmm0cpopimlLPTs2bO71y+bajPdjhvhEAA+Rzmrv/algr4mS0SUSGqv1TtBzY2JGV0PGbA+SYWe/OmFPblc84VCSEHGjE4+yqlDlxmA6SL65w6zJkKbIbk7BJuBydvD3Vty/clPP//+1UsiIrfS7OnN7fu7h8PxZE2DSImWlncEr2uj7DIMuNwDj08uL1bx8jkrtWC3hOsPnDj8+EouAmGHm7VJa63z0Ib9djsEybO1+0O1PmcDg7h5gXlR2m9O98d//M0fnBic/v2/+fNnT6/deetzfLRZjHsbUZ8NWS/qkmAoqr2rtJKvlamV1WlZjQVowxYexgFKLmLOblQqUt7kcZfzAHCpOs+11nonD7WomR0OJzMf8kY9seemZMrB1EBUnXum1GwmIqYENIBNI20Dg4gT9Yz0UnqkiohsWRJL6ixfItJas+DbeuRYyd2J+2w8Uxa4wRddN4dL56Q4H6TGLC6oBnXHMA7XT/e1zU11frBEvrAQEYwXOYf1pAgeCQv3PWNnGxety3WPBXNSzhJzp5ISC9w4peQk4zjS/nmp+jC196/v3z5M94fpNJdWzZw5ZXFMpRicJZc6zfP8y1/+EsDV1dXV9U5VS2vDY2yRe9fH6LuaundLKbVmUbUvpQgn6i2h8xPWbRSXcmklWmt0kVUuqhkCT7EEas2dKG4rJ2GBk6oFCOsMuBWnDrYy1WC7p5S4BswomNgd7uQwgmjeCW/I1Fq5O06H0/F6N95cbZ/d7piaeXErQbyVE6ckOlX8scflSb8sBtVa16TRLzK3lQvt8rUUARCMCUzMFJpkQdVJTAwneGCA3AE3J3qEFF2tYuB1L38TT7gce8aSicQ3uYQ2nO3zPFV1VKMGqYZmcDR3O05JKxKZuyZIMwVxHoYQsFMHwcxhQWZBrA6Cuy0TY05Oi+HuhtQia+eFp9TdzYLb6vERc11HttZraaoSREzMqh7D0AC0uTucOUGc2KsRJ2bRttSje3WDgqY6pcScliZGh7M6sXekeLSJyJGWiFq7WqUD7kydVFUXIAG7IyiwI3nQupBY9hW42AfBXBn8tl38pzFxzofpYTeOL3706Sc/+vRHn3+atqna8adXn//D32zaKZ9eF5aUROGzt6pJVDmJRCgJBiWeTyohselGIAgSVuZnQxDXRvtB2CmtJPwsFHLYREzCbhZUalEgjM1tSwVU1WoHVRsqAKTWTzvgzZ3ZDdyURTptH9Tc+0S2u2MupSrHOKprberupiaS4Mts3lKXZZKiyvCUU04kZIkpZ1FGrVrmOqnOytWUKDEcEuzMkYCqWyN0aKh0Wv7lFAHRson0LJwcR2mAiRZeMnbYwh1ORECvg3j4iT7p4ebmpqGfrnBzKxHJMbF7cxNHxJowV3do6wQVsTEWCsqUUsxzm1mtNTon4RVSSjFXzQt6eW2BEiP2LRGI2Tso71Fi1j/L2QEjrEGxGBzODtd2++zZf/4f/6cn15s63++E3r9/vx8HVJ2nk0K2m/Fqc8V58/rdw9ev3rp3tWVcRDOPPu4Hv6E14L5oXUYHq7+JsBtUVTL3OezldfEENe8dKKKirannYWMsRnQqRaPKHW0xIkCcsN/vnz59vt2Od3d3Zp6HTq49DENrAxZRymiaRlcHUQwF9TrWEtM5wQjFfH9z+/Tjj7/98stZ7XA4BMwmsRBRixZ5rIwDOEtRE1GUHOPCmfh8eeTswfrY8VoR00bhs0/kmwesiNY+CQXTam9Tk4MZMgyty/r1BQRMCGNOR29ZoMDz57fX1/vdfthux+b2/v37w8NJ1be7/ThuwPeRw5VSirZx2Ko6l2LKRCJZrq52m83w5MnNPNfT6XQ8TtM0aXNdmg8hARrEZg51OIVvg63jodS11lu1IFvBGlYCUNVqKiF4ZabaZSzcvbmx9TTYXM1oCT56K6k1m6YppWTqpc4EFpFhYFWPCKY7ooudKSKSsiGtAlNmMFNiW4cCVq/pZ4js+U1Wx7/d7yJ6riUmq1POm5Q2Zno6+XcvX/7dL3/913/zj1/87suH40SSlFOx0loLE+pGlHjI6XScm2kUrDworaAARZ8qNHW9MxAGHYNHV8ScCCF1Y06Pas++KIX6ksysNR3qSBao6nbc7vab3XYzJDGHxihHNRG5vt6P4/jq1avStFXTTR/5w2PSgj96/JfMISbpNEDgvUosmE8nZmZKsrzYaqukaRzW7g26BUdP8cwcSuhNnuUo9W2G3vc1Z7ZQOBSYNWKF85D55nrf5qm18qd/8rNf/sM/wdqw3VupOcs4juNmaKfSdOHeZBYIEdns64pSb+XFAezIkXPYEe51sVfuWKqiC1iaHhEFx3YS6iygMI8x+wVfQNqKtuqoLAaMkhhwZ2ESIpRqqj1HIx7uj2XMm/vj/Hf/8C9A2m1v/nL3F7vdDnO9qMs9mgD0c7vArdeaQThHkHTR5FTVS1IZLC5pbiAiC9JGJkJ2Sko07HcOmU30BHVr1auJKVej2tyMaiN3iHBTg2rALM08FCYDckBqgABi3vnDo7Dghpxz5IGLf2zWk4TGnJk5XbB8mzXVKtKLLL04TlFJe7RjifNy2NUuHuvZZzGRZCA2aQ4iGvfX10Qk6f7tS53vpnnOwCanMQWfBC34BmZiWRLX1loHBBr1TAER6HOML5AxwJJoSJJSIuEYuY0VyDmnYTMMw9uK09Te3k2v3j28u59OpfPPseRgRBARuDUtbpJS+vbbb/f7/ccff7zd/lRE2hJv6DJiAya60MQj5lXBNaUUbW1tlkYSEaZzuhulbScHU3QPlqyGCE7McUZMlT2BLUlOQy5zA2CmUdlhEWJ3aG3FO7enLp8e9W8oXL1VqwoNrKE7qRWE1hcJoHA1FQOKOxFnkTRuOI2u7TjVUo/utM3YbmVMG0oMq26llJIf4XgvEryLrB5L3cTMony2NgnXgDNEgGjJEtcqf7AQX7oS702IHt1dmmsA5yGIZZfG47KLeBkifkCEtv65Xg49xvmnw+nY1KuTcVaSoJ00tAe3E2kmD1YWKVlSSmmgkFMThpmhmRlJnwsiMCQK4eIEOBNEUgrQS+zL8Em11pQfFQyYKaWUTINkY12L3tcBzGye5x4bLSNJAEqpoTtMDDNrpkRMEGd1Z4qGYeQcEaww+4XfYk7xk8G68XY2d9M+u0jW2y0KNTdoJ0Jzg6+CTjhfCAGAhTqZm1EfmWWLHmPCytIeOIE8Dg44ydX19e2zZ9U05dysXN1evX39/e56PN2lYWRual6n+RBbBv2+9pocEUkic4hTNQNc4BAZgLnFMEavMkWo6m4pc6sLrQKfyZSBqC9fkIUwx1VEohKk5EQSTf1ismgrkwJCZKAG1ha90xDzouAoNtfkrAbu6isMMiGWJIljPxCWXlCkryklhudMm8yZdTOkLKktI8tuXYxBRISQJLTg5fLcxkbqOoTL6RKRiOPhHPg3AMyLpHWS4PSjSucz1pWIZDl4uhR6OaiLzXoCEW8f/dooFFTTRCzEQVKq5hbJuvOKl8tZOOdSCl147CAzXbfr4nqXhDDS+LiHIUzPDMgaZ7ivBPe9QrUYBQHQ+Sei+c3Y5EESHx7u7t++vBrlcPf2QGBOc1XnVGmQYq0hpC+ZUqcHWPiGAx+LC8ioXySKEegvfG6ibWmww92hSxIV1lLAnERVbam7rKRBrTVw8uaz1VOZnSnlobWm6tEfV7A63BxiDsy1NFOwSE6DiDBqnUsptTVwIOyRMkeJfBiGpGxg718GBpCTOsYxl6Zp3IBJhvzxJy8Oh0OZp4f3d+Pouw1zktBC7PIJsabLFnRD0M0HdSU7lDXgw8SR+i2T+NFU6UEgtWYiCRZKIguULrN5I3NfVKT6LCg7ETu5wp2JEAM9nWl3zNKsetNnH318c3u13e8k893dw/39vSmXGLAZh5QywE6kilprTqOqNbNmRea5zadxHIdhuBluANSqh8PheJxKKdOp1Np8UVozV++FNCAABcZ0lggNzYnWrcoiTkV8DjqX6N/iHCwe4XyKfcnNzEyYGZH1hYDVOUsJ/7IOEXWPaBe+2SwR4Tyae7bn527kY4eK/z+P+/vDelpFMpEcp1Len96+f/jHX33xN3/7j//yu2/evT9O1UUGoWG2qg4Ng50TZy5zvX94WIgcQYnQia9AROa8pB8r7qGfOEOAqqlXWEiis7R+t6VEe169JaXsDXsAY8777eZqu0kpORqZs5uCWMCUXrx4YWbv7u5qKbjeOyFijjUAWiNmWYa5Vy8fa2K1mZtE+ziU2t3dvdYqIpndmMmZOs8Z1s0Qbw6FO5jhUKJoDHY+y/4EMjdyBNn0GeIVJ6q2AoNSTSldX+8Pp4e7d+8//8mPX3zy0avX71gwjuNc6/X+6nCaqj50UbvF4dJFwnC5By63x+X1AtAwgW68/GsEXhzg/4v3jKPsDa4RWBiDfSkrxYcoazM7TQ9qZQiRW085jQCpt1NVgERyIlalzTgw8XE+/f7Ll//06z+8+OTHf/rzZ2avVmUU9MmChfu3I4qNQjOi13HO35CWFnQEA8CjNQkfNDs4hObBBBFKBlHilDZzscOpnCat6kRZggZWhD24lkttytUWX8w9JDADe2IBnAiqG+8cXkFYs/SkHETKYWWtuapqMesVqPN9ok6hEfdCyck8GF2CnSXmR8x6+B4j30QU+nLrxa53eRB3aAC/yAhgysOw218TgDa9bz6rt9oTmNbpBOOtVo3QNXB37c2y9bP6MYpWoUCEEouIcBLmRJDmzkgimUXc/c3bu8Npfn8/3R1LNSfJ7lBHK7NWJfAwJIfNwf7vrVb79ttvv/zyy2fPnu1v9utH9xIM9/DQ++j1WWXBO5hXzGy1nGYWMK61cFm7utnlqem7KGZQ6TFJR6dNp+qsxMxJSNxQmrZgFSY3SQMLB+mrITNpL5B5W9SC+lQwEVEMW5IAxo5iMNPaeDsO47iBljof5jJ9//L9buTbq3R9nfcbYbFiXkvpurKPT7cvshzUOahXC8Ep9QtZD4tdoKzP3md5w9iKdLET+l/XqfnlxK0vWW9QPOKvK2p3XfzL7Xppsta31YUx+LLGkVqzpt6cnN2TE7Ez1Pw4nUhbJuQU6sDzZrPZbiVJFNsC3L+kDZLYQUlEATBRowU8NQyDeXUmAYmIuWottdbdkFZTz8SJWL1lz80fOa24cAcNwxABRGydy3qkBagIbBrcdwZOqmHROnsRea88Gs4a38zJO/QkWJVShBfhoyii5o414kXBk4NryS5N4YW5DE2GHudGVBiQ6gC3GEN61NubyM7N7Pr6+tmz57XW3/72Nz/++cfXz/a73fYf/+EbEdrvt6erXb0/qNZaZ1k0QGNYCGpLqEBk8OA+EWfvIjjcp+MAsK+jtWYpBWdui+Czr6QZhb65h5rto36OGYgac7Q2yV1U21Q1CDXcESLLRGjGWhUcq+pOPaQz8MgppteZmd3gTJKGLGYmQRC6avUREdFms4HROPKYLVGLFpnVFVQpmSQLbzYDow2Z99sdM6tqYilSLg/w5b4KKvOcc8rj8Xicpmk5w2cSalVlrtp8PX5EpKD1GF8e1/U8Y2GSNFsnYwOaAxJinE8+L5WE5TT2PUbnfDW31pppBEyM3szsCeHFpzNHlk7Reb4cHbyMTrC0B53g5iu8M0YWVfVX//TPz5/s0GYvuh3HN69f7jfbuSnJeD+fpvd3SptaK9GZEg3nn4gWHrHLT++ud6FgTikR9aKMu5vbYtoN0YWDhvist2ZLaI4FOj+dCmcz42OZjqejEjkwdYYnGNjJ1dwggEM4EvvlXpCamiFnDkLReOtAEJVSmHk7XqlTYHI76JIJwDhuD/d31/sdkzv4+vbpk2fv3r5+U2LWzhamWLgvAeLqCSKoigu06BA6wohF44PICaRWrfXugSxmvZmKSLQfsagJiQjMYu5XRNwDV0giMteiDiUmTvG+7i7k03wUuCQC+ZMnt0+fPrm5uaY0fvPlN8fjUdJmmqbIl4iouQWHRFxIa82yIbh2BbXWeZ7dPUkex/Hm5ub6+toMh4fT6XSqtc3zfDz2FZ5rIyKCC3hh0bHoTIc9Xyx5bwHlnA0B6zKAetxBWCNyv0gSwohHQy5lWsA2vN1uHx5OzJwkM3MAOJdRZ7ZegO/WsqGFRVYLzgZfiycivXLxwVHyhUh99eWXf3ZSIqda6/t373735ZfffP3y17/9w1dfv/7m5VtV4WGfhIta0VpoHsetJArMS8dNhfUITWbAbA3Wadlnj7KROCAAems5+KIIHbn3g8cHgcL5ycButwvKeGtqbV7AL5RzZkrPnz8363ctQtjWmkheQ8PFn+owDGvXixeidiJqNseEs6TUqYNMl5bIojjlKpHYsERoDiAqCNZ1j+FQEc5DiEGIe9f72Qjpco1MzI4Q/IvsCeaSxNxZZDvyq1ev3rx586/+7BcfffTR6zfvYC4i8+FwfX1zdzwep3qaSrPVPsPdmYQePy73xroNfEm5W9eiOO+THtstGgOASZ++gVsHRro7Q2K8T+HLjC6IyLTNrbU22TDknLwlZk55DBdgUT0Hu9v9YRoYAnrz+u7v/+FXz59/fHPz5OOkREEsfi7WXBrqXplaGL/iy/TBtov2xfryNUJd7qATB8MRd+JQSur09u6+Nq4NpUGNzc21EblKBZhBrVlrLUlOMqSU5tNkydzdajNvrbOht+o7d6ja0lfpLX1VTTl05wEybSW00ChdrYc9uA9ify6/VHeD8XmjprPG92pt1ksONw2s/+oOM63mgSnrXP3O4iLb/VXyWu61Hk5mRi4sREtLY93wYQB3ux16D/+DI6MAiDn0RSVCCDJGsPIM1cxcmJM2O80zyb626f54Oh6rcyZJ5jCtrZoww73UednMVo7TZrN5+/btN99889Of/nTYDiml+D4RaFHvB9C52bFEL2aPQos1sXEsuXRs9YWkk87lYyKilJaRReaQLIo3MV4UNtgoMSViMbMmKcrGyolkIM5wr00rINmjCRnexRxwcBBVAA4rIAFpWPSUt8ychRmYSvHaMuf9Lh/uX9/Xw3xqp2N6+mSz30tOiZm8PmKnX0/xqtOwhi5husNdxgZbu3OXee+l9eYFXnHxTy3SFlCMUca6cXTb475c+po15lz354d2ZikXrvfKe1HaFhnS+PRustJ9lZwzEzVTqOZEIFTFMO5aazO8ROm0uZws1dPHYsPtbRrGqqQ19AONrI5jhlpmYavGUJ1tpqvtdqYKsnKqzMKU0IgpeaU+MiYyJrfmp9Ps00xFVbbuEv+RaIQ/qhpJR86ZSFQ1SapWHx4eimmPcMlTRhBOEsqYU2tN1ZSIcwJETc1slN6gg2Od52GCEAHNzQWIuNx9aQFGLdY9sPocY8ALzbpfjK0BCHZRsKtreD4nVTcXB0gR3PgxqQI4nB54I3Nmvwa2+OTjj3767MX/9v/+X9LN8OJfff6w8X/5L//l/S1fydZLy7XWqlcfXzGlYkYpVdLW2iYPTMxNRS2x1FpbOZHIOAwp7YJW0cwyswhzMpRG6kmyMzWDqjMSkZCTYwsXJhBKzA5EcNRaI6Y0iKraUnqF8NBpryj4duc2M3POuXkhogCkEsiAxDGO35YuvBKTEBN5KxVA1BqM1LwBGIINWN4mJq+SxuvtcMMYVcd5bqAkaVNn45SSMNo8bunmKkNIhFm9KqRbn46iJkirtZbGInCB55x2rLobNpkkWhVElIYsIja7ERiu1MzdxFMiEUlG7s6JoOQVqmrBguZO3mK+FNrQGpu21sRlyRiraiVGlpQHgp8cxkQ5JcmZqRLAItuBvYoVd60e2qPaSilTLdskKQ0pDYkmtoRKNjt2xoxg6QElGZPCqjYSVoIxkjDMyJSJmEBWATCcmNSjruHsLknevn6zH+j5fmtFi8/zfRMej6fcKr+7v1NKN88/trx9//b3o2xbhKeEiC16lkNIDocnJiN3jw4DCYQsZd6zJ6skfcLYhyFtjjrPpxgZnFs19ZxHgMtUF5bCxFlArkSnQQ4baa3WqaqqrcKsTJF+BOklu3MAjRrN1QzycDwRMRs3VeI0zQ2QnAYeevnSlAOKcjITEWaJhnIiA5qZeTneXm1b1erOMpDLePv5FldHG79/++Yw+YthEEcyT+BW2pBHIzaGE4qWWiYWBLAnOtLOzJDmfYL8OM/MAHEnBBJB1zMsOXPKkWkkODdTEm7qxMKJlWFGKkGRUg3mQsLOVIhImBqkuSDtjkanWcdn1zR+kvLNT178+PXrt+/f3bvRkDILbp7cjOOYhuE0F+bc1JmvptmYx1o1JZ/qQyxsMDeoldMUhUnebrdXN/n6ZrTQA5ymeZ5ba69eu9amqkaViMDs4gXY5O2kaLRVlrk1YlfyPKYJJzPzZFoq80CStDmYyTqvTKLE0lsmiZKlIrwRuiFoa1NtD+BDyg7P1lJOiUXBJ0ncqgzp9kRTym4oksx02mYS1mZ2KMe0BadcKpuCnYQGK40aInwoWswbGMJUCUW9tZbSkDi3ZuzMlGqze39qBlPc359+89uv/u5v//E3//K7u/fHwzSzJMmfWfKipXhzciII7bQRESXZmtl0qoEVF0nkBA1UIvsiAuQckc25b4bFhQEgioT23M2otSxPFzdikug1mzkoJXIib1prKznLZhyvr4ZxAEiNmosJAglA7LjaDH/y8Ytf/sPfltP9ZhTKThvKvlE3R5MkIsmqt1KZXL1EB8SXAdcoh41XKVSj6jwzc5IkLKY4lolSdpKmjSnxkJ3kVFv2pi7kTSSLCIHd0LSlzIlT9iwW9OBN1BhkMsDVrLm3JJRZyL1V5O2WKcdJz5S9SZvnZ+Pt63evTqfTX/7lv/3tF1+A6jBmkVrmt9dX+fDgB1YmJlAtDc55HJ3bkkj0AHHhufG1BrQGZMzsXi9DSSxIipR6gRuduHRRIUu5E5TBGCAIAe6mDQwwJeLBYKp2nCi1lHMGfFADKDFqrWU+hEkprVXi7bhRtV9/+c3Tf/rio89+/uRnW21zHsRwOpb7/Wa72WyOd8ectixXpGhqxJ4TAU2tMklr1YlJWK3XRs1BHdHT9VJlAbWkdONInPYsV5PK4eRT8QaSISM7slmea5lNa/COtBMPg7h7SmxG5hXMsxZLFmC1hkbc2dpqRc4hUt7Mu9iVd0p5atWL1pQSgVUZ2LCwm5OTkIBgrrXVWrW16MgxUee4zOMI5mBmaQ5nAZG51maqDlDsTDOwsKNzveScyYdWW2uNSdK4ycxsc211O+zvp0LjFanNcz3YDB6Z3HHMOSdmM3VXchhra81bS4k5kVJFlD3IjGxrFMKeBhcGgTLTbjtqPQ1uzM4yVhoqjSXn2oAhW6lXdNve3bXWRMr7+wMz02BO5MamuTRtLTqLm7f3h9vb29999c3ul//0f3vx6bjZHk+njz56Nk0TO7xVbbbZbIgcpMMoxOOSUUfly1oz1UacEKPvdhbkdPdEA0KoQJjBwcIYQ/KhnCwiiYOc1t2Nm7fW4D7mIVPmxqYOynBnT4OAg2NMhYBBtjD1qsw8cLD1kbtXtZQyM6lq1caMlMTcT2V6smFiqVNt6sM40Haodb4rJV09cdX76Xi8b5Vvbn07DmlMeY/f1VpBNgwD4LXOAPIgHrk/LOQxALiQMUYTTsmYo2NkC/HMdrv1Tv3gzNSHV0nUukDFZY8hcrzQE3H36ClFdidkHUQds/G1qwhGayclAdgWkp7LKttKWRSnZk1Wl5qyd7nOzXaIClzwVAaJTRaBmRBxH3AFAG+t1HqgFqm8iBAnJmeGMymIe53Vo6BhjnmeMfYB0yVPjeUQIjJ3axphTBQCc+b5B2jasEG90s9O1AKVtxbt1vT3osoFtbNq8GqR158/yPj/Tx7rOyxFMP8/ecllgr5SGsRvVomzeKyt+e3++qTzyefdbqeq2/3+u9cvq9V/92/+/Tsc37x9ezydUkokqK2V2sBbs6hoOdzZkTjgWOQu5kgJQLbqtTVVzZtdfLdejCcOfAKYA69IZxSWAUg5ZgV7Azb6h6uf64t2AcEYx9H9TCyfXUI7ruNIOWYnnBcHmczXfhr5urzR62whtC0iaZFIcjXJQ1rUBTmzC891btbSIAOJElGSMbGI1qrX+w0RBQApeCJEsrunlLR5NNZpmdUmonG7iaO4IrnNjKT3/VeJRVqQ4rK0vv3igbUp5EYUMk4LkFWkfxwxRc4NMrPNMLgH5MpaqVU4pZSIy7KwfPGgZeY6pTRIWkfhg9eHOQkHmXt8XBqE1UosIAV7r0Xr1RAvWTf/Upw307lUVb26uvryi2/fvPxmtx1+8vlnPGy/fP3l6zfv02a/ub4Fyel0MhbOY0hwXDTxeogEnH91ee7cXdXPJ6hDctiIVbWqpehQL+2pgHwRJ2JSr+porc0nW6uqWIJhZlqHiAA3iz4biHA4HN6+ffvjH//4eHxwcZg9eXJTSnnz+mjWcs5Juu5Ip76+QG4AUQjylW4uSh+06DiLyH53VQ6n0/H+1Zs3tzk/u9oNOVmnIrAWCgqwlFKMUQRNAi/S22q1TU5EOWcivuywxteIOl9TpQ4+D2SpjXkAGSdZfifLkzUICtFJcZyF3Hjcbu9ev5uLPfnoxX6/b60dj1MQbI7jdhzHIW+IWxqG+Jx1b1/eLCIqJeyYrjuTSIjs4eFBRJLkmIPNOV9dXQF49vxHx+Px4f798Xic59kuBtlX02oe1Xxl5jSItgUQ7toHipeWS8DYJGrdHaG0HEE6b7bwxAQ4lFRba6Yh9Ky+spkHUZlIksRm2ihsgDnIs0RxwWizG1trtRYnij5M89a0jeOGBQRxMJMDPJX2/v3db7778rvvvvv9777+5pvvvvv+3f39CZTyuHWDkbk3i37u6hqCdRNQs6bN3JyJOYE5YHGrhYnKIz/G713+eblz/Adu1N3g3DyIgnDxJh0xkXMax3FMWUSE2EWcjEN9xc2Mnj9/bmaHw4Gs4+5U9VJ3CQuZBMuH2Nqw9gYPaxymwJfWkzZfTKX4UrSmZRRnefMQG/SUWIYclA/M3Gl1NeYtHS3GKKlT2CZJ3Bmbl1WiQFN3o8r5/fv3L3706c3Nzd3hwd2HYWjqA/NmsxmG01S0i3FdQNougwq/mPC5XPO1q/bBDYpnhmfxi3ZQ37fLuWPyBbQC78PqK7LprMI1TdNqvlYUjGq9/FB3mk7lyy+//uu//ts/ffIXt7dXMhBMr/I1SI/HY1ybo7oFVKdD5ACs42RECwXKRQuCmWNzlBWQlTZztXmqzQ9Fh7mRIYO51ig/NnMl4pD3NH0E9QJWBPGF5bnYz0QUznq1xkQUyEM+Qxkj5mYsUB1a4vI1pFknjz44MvG0pdnVf7OarCCwcHciW9+h1przsNls3GhuGhMf47B9+f3XXuftkIfrqzI/1MkAZmHxnCglsMEcQmQOScKbYQS5N7XG7sog5gxCSs4pufeiLZOHzNtmdwVhUNaYj2UhpRgriJBM5BB4uhiMTymbWfD3rA0rIhqGIczy999//8UXX/zrf/2vr6+vj8fJXaP5jkVDmIiSnJlLcOEovfepbN3e6+H1BZ24Pn+9L+vxWTWi/aK9drENzjfoMpboh25JpS6/ki3CD+vWWm/9irRq2rwSND7d4+BnIa9TafX+3ibhcZNTbillZoTUk0gOTtmIeQFklta/YpgiN7O6tpp6EMgaoc9FaMHMBD0H0z94rFbl7ALco60V67CeEV/JPpZu4cVtonURLs/R6iXXd+veENbHNHMfumjuxoQVExJ3nJnNvNY6mdX7h1Op2+020BpCQiwk7CKmVi108BjCDfBaVzK31po1TQvft8Nc1ayZekz5EwXjbL8qXSaD4xHEUEQh4WKLwT0Pv12cb77c9LQgPJkfPfmHZvqD+4HHpj/Q8Y+M0YJl+uH70JKdX94AXgUbl3BK1YUZimEYqtVXb17+xb/92X/8v/yn14d3//Wf/urvfvur7dXu9vbp93/363J/+vT2o3pq89QMYgyGBvcLmiVGgxG7kBCJulW1WhtnXdfBzBQao8kdnNCnv3tKFlij1XquvufSZPcLiXE4d6HUjUIo/LAqoqTcpQUUvpQomJNwjXYZzIyJmMEUMnNsZtZ0TXtiAROlMW8SnMiJlQRK7VCOr+7f+uZWNhutXtrMICZq6vf390RUaz0cTq21lFLO/Y5EQli1cRIxa62VMlljVS2lTKV0mdQmXM8lE+1aq0uhBYEH88s7u3ro5dSd/UdKzMxJJCVOxA51NdfqkSQ7YcHOQY0G+mDCeD3wstBqrTaRYBHoRCzFnMwJHl5HmE2oS3StzOYABLTQ+jOiAk3m7sKZs202ux999uNvv/zdPEOotGa3N6OZEcOsmet2HDdDUhmKmses6Rl31IMz4Dw6RhfohWZaFe6ERdrLmUACphgNdzCnHOP7SInMPCRtCGAydTdMdVpN27pQ/IMZ7vWvb9++/e677/78z/9chFS1zlPO4zzPzKyq48gppbXc21oLGPzZZHd1h+buvk4Zu4ejHYZh++IjSfzwRuzhfmp1bircarOU4UxuKLWxICVhQQRqCLmhEFZeEGIXsYsTnd3YmCl6bsKcc2YBBa5fADC5wXqP1s8yYnAnjpFiYgKB6N3de0np+ZMnT559dPvk6fXTZ5zT4c3kTgERZGZ2WctY1vkPPrSTl1GadThfd/nuvTwQxaDYrnkYc5b9blNKmed5mo5Wm7tL6rPsgGMxjDlnv4gqLu8jEQJp3iNRePd65+13oRBHJCKrsO3ihru9JXLVFX1tDvMARZibO7kQO3ccvN0fD7E3NuPoTHMttbJjU4rU4qW0h4fD/d3h4eH49ddff/HF77/47vv74+l0nM3IkYIFLwZcoe5oZtYQsQWHool3LpwWkU2Psy+CGyyH6IMQgRYazw/W6vKHxYAggidaU+fzcxxASmmz2Wy329XOMBM4sYOaOZA4/fSnP22tvXv3TkRyTswswroUOq3jRRszJ+pFFiLykBFFYNSxXuOS4AWJBS+/fCSgSkRRI5R+B9WdUxpTSilL7LL45PVKzR1MiVmoqxnH2BWCd9fZDAyLMctIML7++uuf/OxPPv3003e/+nUpdRxHn6s7j+M4juNUjmZtqZJoJxJbAoBLg7MGwZf3bi0zXd6US/O1WvgfnLXLZ55DQGAhunR391rVDKo+jmNKTEvtn5nJGdaaaWIxtW+++47+9m8/3Z7+w3/8y8+Gp7Upszqaljrk0YM9wh1OASojIjfiQbgxo4tdrfGAaTTZxNzUoNYxunPhufhUtFlTSPNkDMCKNutT98bM0XiIyoItA3vrul1aG7lgBBCRUopfJISXL4nftNaE0+oWsUSerTXz3iRY7w6RLw1efHBHeh3sfEOxAgLNaaUkEJGg+wrGtzggp4f7f/nNr8jr05vdx8+ut7vrIXGrcznN10RQCgwWAQJiAeAJyU3dev8/cYrvyaKE5NQnZoUoMVISYVG4uzJnjnKzkXubprAt0uNMWkGgpq2nW5fbjzm11ojs++9f/f3f//LJkye/+MUvaq1ETBQFmkcByUVj47zJvZOBPVIcvQyNfuhBLm+6XihvrYNt/Znk7mfKJaIot58nMmTRObs8SmtOGM6IlkyJmedaM6GZNjNoNfVmygybTYLIjQVOpfX8IqfTzc3NkHJTg7bNZmChUqYg4Y8ydPZo25l7V75dl9d78TfVWv3xASciAjVtH6yVX6CyV+cVLzGLEm73vGtJFgvZ+OoX1reKmFbk3D9b745fqNuvMW16/vSpu4Y/iATa1Zh53AxRaTg9HKZpCodVijBkmqZpnkHk2GT0qSgBE7i5Ta3CbOBBODLMiTmqOAvzl0b0kLhLi7CSRitZoR1/GdWa1lzNxXp2ulhIax4RiNbeGF22LAPGQam5eNZuvs/hzR95rDvmh491TjJITsI4qxqz/NHxDJb1hWd2HF5EZtJCj73evDa3/ZP96Ti/evXqT/7s8xl1uNm+vX/1//xf/l8H1Aq1eRoSX90+a8d33759/5Rv07GS5DwmOAFKcArt4l48EKAPobFkV5OUOOc4D6paI+W2JWQnTyGy4PCLas2aeJiZ2znY6pcZC9urIn04B0LELEZGaK0hlO5BFV07gXpZJSg9o4ezVhz7hzJTFiZGBN/jMAySGG1IJMJO2kBN2kO1tJk5D+o2n2ZTEozjZowD2XM5XahrIhCxs0ynurmBtR0fJgC6OhXqxd3lWy1ZvS6+4WIXXR77cIeJhZlp7Vu4p0RJJOecswyS4KqlNqVWChMlQhISkUQJSylutQXrB5mZN/WmWNy2W+t1EwTJYB/7hipI4H0oeZmBiC6+gCyGkagfWwaC9YtqrZlxOBzMjCTfPNnvhnw4HJ/c0rgdbnB9KtZaYxgRYI083gEC0jO7gGCVmz831QlCQTTSrA/jOsHABjeAJXMeQNwCmA1q5szS3ObamgYLNHfaz97R9BUET73QBfTeI8W4fr93qnd3d+5+c3Nzd3dnIjFrFDTN0dRyPwb9TWst5xHAqqm49ihU1ZNHdBIvH4Zhu90KfHdz49om1XY83E1HYDduhkbESUiBaPss+yqFfB5MtVMSBct5MGatsWag1s0siwR9SMiTCTFxbJhgM+0cWMHlRvBlftRCHcbhSuxOc9XNfn99c0PCs7U9cXOUqkS03++b+jzPp3muVWutIEGkCn1culvC1YsvdiAITtTdc86BRVxLvLENJDURGcbx6urKrAWU1LUum3whKiDKiYNVwR1L4UwMZBz2p5M5mTfve898IaF5bLGXjoH1CTpfWCjCAi2HBaquauxqcMliRIAyc4IIWL252Xa/adWcqbmXyR4OpRnlnN7fHb/99rvf/+7LL7/89s3rd/M8Hw7HN2/e2EiqDuKcx8RjJZTS5jJLTkGCESloVMKIqOn54UuOtwa4l0bgA7Pj5w6JrX9e2u1l667dHvLOahvqwQ09jVdipCSbzWa33zBzHDN2SKj4CDnR9dX1ixcvfvfFb9++fcvM45hTT7Ta+olxaoIsYE2WwhfDfGXCwuOoSIQpUasWX5Wi4AE1N19aPUYQYSZKvQRGHKVEDrpeJk6RQZWuXRa0tv1TDE7mQIhNetBvBS9PrfXNmzf394c//dM//cPX39wdDvvrbVOvRaPXLaBiFnxjfiE6soaztpBtrC5yvWuXTmT9pS/FpvUmrkbezCQHNZeRn6EoZnEf1+jw/La8aNW6e0QXka6bNWKuSl4aEjFomsp3L1//t7+anr148dGLj1LetaLMvtvkYRim4xz85AQmOIyNzJ1i5fp39qDddqhZbdWdjR3cnI2EIA30/kGbmnl2zuDMlJqh1la1BsgT6Oo8ocYUO3+NkdbEYN3J65LGla4B91KNwgdrq1GRfryqnSKOz3KRS4aAFatFj/P56HfRUuloTdeEMCqqThrZkZmbqimm2l69evX73//+5bffnaZ71Ho8jK71+ZN9SmPilIfNprWIS6XfRGcQCTUtZEJkLKP0kpC7O3h2SW4soKDOS+TMFPV0F4aQUQixllJ0lS3c7XZEFC1BVZ3nWVvnzI6tF0AMAEmGYcillC+//PKf//nX19e3t7e3K4wwbHvOycxqVfMV3fahLbq0TuuSCj8qiKxnZzVxl0bbHhfZQX7GFdKZFW+pPZmZyYU+04fR6fL7NWsVEbLOTBBfblE/MCGeSiFVIYzbUZhaLa02+ExSr5AcTJSNgqiS4R5qBL3S5g4zwMoCdlivLvakLvp0l/aZwB0v/ljq4/ISLg1mRFOXdp4vRtmXledL01RKXVLH3iS73O3r09bPSp9++iIO5JgHFjKzRJyztNZ225GIDps8T8f1dM2VHh4eTqcTM6ec0YcPzUrHXDmxuVd1WlQiiCiMNUmMlHk1FRFJIA9UXiCzHWDoo4TkcgU/2HDLVlw3BK8WkxZ6MT4TsC4dyD+G+fzA717a9NWr0cUNji9gi8O5fHmfeIx9v2D7OlUREGgqAKWUMOJirM2zpNPp9PNf/Pw//k//bsb8v/7X/+3d9JCf7Ma8nY7zw3QamDEO94c6q0nxrUvOo3SZQCNv6kreyGHeY8ecx1Gk1ClgHcJAlpSStFbUYvRymb8Hen8BIUidmBufT2k3vpe1jfMadkZmIqGFFcbdzRqpQwSAkFPXDDMBuIsURZsLiLiSkJhJaFg468iRcx7TSAA78iAyknEzprQfrj5OM1OhGZwGToNyTuOQd/lKANSqKR0XTZjuZmixcbTUVIZhiOgniJY0CrHCIjLVMvBAzNxaBGtE58xtrazE2TGz3W43SBrzICJkHvUlM2Oy6Hlu8pgSE0yJU6PiC6GC8RpCwbvCeLcJi8PA4ji74fNzAUmbtWYWzRAjAqIjREQcZDEORvCAQ0DV+ilAdHgRLe6AALT37+5LacMwXF/f3ux3tUzb7Xh9fW3+wAOu97uAaBmMqTcbzKn7tvNpcJzvLYxcOi8+cZC1CsNjEDwZu4yUN1tjmVozV/fuaJvpXFXVHByFFtNHyMAfntxLxxA/77ZX8zx//fXXn3766el0inrpdrsNAXFgQa4CIWaT8xjne0Vvxmf1emEeOrcKeBhSzvnrr7+eDg/l/o5qzUxTU6lz3mynaR4JoSqoaqqdJjTEhS/d1Wq10kpNRimAye7kAXujcwWdiHKwOiHIJyhKyu7EzM1rfAxczE2JzLy6S07NkDbbm2fPr5884zzcH053x2PO42azubu/v7u7f/Pu7eFwaM3ykNZrB6I9Yw1go5h4WuemVvuAC7ZbYcGi9p6ZzazUybwxSET2+y3znoHWqqqKSNMyz7O6HY8nZo45SQDOzhDm6P2B/n98/VmzLMmRJojpYmbuEXGWu+SGTABZBaBQ1Wj0kM2RaU6PjAgfKEKKzF/lEx9ahBQZTld3s5tV3VN7AYWtcs+733u2iPDFTFX5oOYWfk6iKgRIOTeOnwh3MzVdP/3UULVkASozhAA1ckYH0zUBaLlhVRVx4IGK1GQQUQQKht5RW0d9otlyajyWYBAAUXEoMwBDmDK+fnvz+RffPHvxdhzKcZifP3/98tWbw+E4jRkRU4yxfyxhRLK56DCJ6kGUTBGISCtWohL6A4gZmPlkLRd4N5fNfVltwckSfdd4rS/DlYr+vZetlEulv1OThLzp+92233Q9q0fNLQsgiEBEH374QYzx9evX+/1+t6l0LyKyS2mxswpAqCc4tJmpFTAkQMUq2zGxKS7PWFSBCBADsSuQWidsTxFC7TZwUmhEZEZgQrKTASE0BYetQEP4Y/sYwBYW15wCMBDUIUxRFV68ePHTP/7jjz/++Pa3v12yt5pS6vuUUig+GhXVFHmpeDTvojqsK55VWPLRD7TTd7dsvbn+CeSTWqxmttqmE6Hdyxr4DTC0QplankvJwimm4GaXEVWLCJixWc7H4/GbV/Nf/s0vH7/3+I9+/H0Ms2YFojzNkbiWgrypwVWIedbJPAh0Bk8UNTUCdASvEgpGAxYFETzMgJCQEmJCTgBsMpdS1DxuklKKlhkMGZgoCObmbq21ui0hn79pSxTt89Ob2kHElj9qf/hAFWhD+7dmpVX5pcEZ/JWztLi0bbT/+HA3jcBIVVLqzeztu+vPv/r6q6++ur66yWVOTIHDnPX125thOG438eJse3lxTmNZ6IP9YZ1OF7vNmRa/VWn89YgYoqM2BMliCIwZQRiw6xMiCnIGnEopxdSACC4vH7up2u12tzf7Fy9eTNNUisiJ3IWYCIzcM8k5d11XizfFPvvHL/pu+/N/9bNHjx41zsUG8TNTwBPacy3A7Z8tWlt+u5DN+Lnwc4h1JqR740QISFbUVD2ADCFwoHXFbJEEMznh1UUEQBuU16/0eKwC2peSY7MIjAYi6l0fjGLqHWths/GeQ1PJWRQh52yqaHQ3ZOS02/QhWJHiVSsnv2gOcpOWVchzL1r+bv5CVU9zG5tMLZXPVjdaB8lm5vD15jm0Ou1qBWxREafVaLporXBwocZpesbXYVNqYcKpvbhLMaV0d3udMxFaROCuY+YYAoAeMvd9P45jKbMukOtcLOdc53EFEjQDdfIDOk2fBET12V61NUtQTcdpLnNGj7/BPJfzACCHp25IlwxA/2giq3BTXMMrAI2QcBmKYmYG2rjXmln9Z/T1aeG8knJ/HZnZCNeF7PZfF9NWEvStTSktGakFuini2ReC7ubd9dkH548ePQohbM53f/pn/+XtdLN9//xqPExz2W43Sbrp9rqIbfvN3bUIzpusZxARi5tFUSM0Js9WqBmZOr8qpxCX6re4NSVKVIoFLAqliBQsYEamS9HJFzwAqWk5SXUrNz0siJ/0uBTHgAAAgQKwSSEiRk+pGIIxnnIhhECEDGhEBAiskTiEAGqqyowxcoCAIMyUInKkzIiJonXv//DD18dxHmYw6vvUlxQlmlHOAgCtPFi5pGsFgzwILyXbgiVw+Z/nOcvilmGlGG17mlc9P7PMUPHMFennr5RSH9Om65kZRMtSolSZAYAWyJZ5qiIExq3kOU+jy4uq1pHyi6vhPzBzCLa0IwMjRA4ckAFRhcxbuuvUhsDewMPZIHGIxKrZTwEZOD4HCcAMgT0bX+evG3Bg95AuLx598MFHv3v3tnTKHIkhRh7HY9qeXT46nxU5YDSanTYNvPDQJn1jm/DmEuGR4anoEwMSAJKgWSCOoaeIiLGLSphzEc1m6gVeZEIgRXJgtohlKYHjMlkUCBkXJUvUMkSntKKZjTiZ2RdffNHS52bmeKIQgpv5FDs/lcxRlgYGcPC91qNdshYuqorAAGJm81xub/e/+vxzmcZY8vsX508eP7F5PO7309s3KfWFKFhQM0UoKowAALPMzIyBUdFUPerwkqMBoZPU46k4GTh4c2lT3KiiPsxVnWsCEdEWtj0C9HxY7aE0UMACME6571gMvn3+/HKSH/7oEonvjocYI1MQ0WE4vHj+6nAYtB5tpwUystpYrg5mlcrzvMqsE7r5XPVGNl+5yOwKwRtaGGGBUnMIXEqhTccBj8djdRYFmpX1FmcARY/f0DupRBVFcq1Q14nm98rp6wo/IhIGR+FW+RTv7PRdprpilTOwfo4sCZ1hnPu+z0av3lz/9d/8w1/99S+/+fbVOKtpOBxH5rDbXvQ7cyoYMNjf3VDgwDHEYOZjhEGBiqkhAHknGHlLr5m5V40rFElTrQ/st78qPf2igZvu5dUA4tVBAFwmWwLQAr5xR8EFyAi567rNZtN1XQghAJgUt6eqCqrMHEP8wQ9+MAzH6+vrxBR8TJGoiVL03IG6lpM1hA/E5yeufQ63xX6ntVTCzBxr5Uc93FpVoQlFCykABFygP2YCHJYkugf8Jx6Lmp8gYkJmDByI28RGD/0BEYGYzLrUEdHz589/+sf/4oc//OFnX345jqMohBBihE3Xb7dbEZsnVfFRg+nBCj/4odrKlbv8wM1oN7nevhYNKsiD65dPILD6sK1KCQAmQJVn1UopWUuZVGbq+55S6IiNCxFpkSxilq8P9l//6hex7/o+ffjeOVssRVBKnzpXekC4zKsEgIrsaLdBBqZGBhQCAIuxWhSI2XAqkEWBtwCkEMzIihn6VBn1qZOeO865qCCDEpng5Ad/HdG19VGfX7Ja7RbRNZfGU3rrckeFKDu8s1EhAHA4FSE9qMBV1Qaqe+aZixrRnLQuoqNImpvub6bUl1Kur28/+/zz3/72H1+9eetOxTDnTYpmwkw73mLcjBnL7fGDzYXrSR/eE5bWd2bO85inKecsSwtDjHEXOKVkIGglMZgiysSEXfLGQlTDFFgohL6DkC63n5RSPN1pis+ePfPQyUmMHOVhSsywpFeiJ0kd8HJzc/frX/+667of/ehHT58+3Ww2TNAmZ4bQle90D62FfP3PtR5bW4Smr2Tp6jwd9mV/uaLB7xVmPHh2z9kLKqoKoHV+2CoXg4gtdbIy6Is9wkAUbCGJcJ9wnmfuExFLlv1xJFBGSqkz6w6jIZfY9RSDFgGBQEAhGGT0RIkzriL4rsGKfbRKJtcpprgKvQQMTCMvaZfFeC2iZT7loIFomldzknNPzi9FSFthTRsULoTYFI6tAp95ntc72IQ8jMUAGAhU6+RLIxaRi0eXWkTK7LhlBFUtIgJoqSPAMA1lmjKYEJqo5JynaXCkIgColhACEZhJDF2NlYqZGaEFw2HKMTGqs0WLj8NRRz9pG1pRn0RqT6rXsqhh6FUd+larBmbLOJjvCCXictlJvL6rcwFWNri91pF9VQ0LTqmtY11TrLTy5L31RM01CSG4BLsi8wQGEYWQRNWKlTn/42efvR5e/M1nf/Xkk0dXwy2kQMjHaWIM5++9dyzx1dcvvzefFYCzac65gAihBQaU0uo9iBhCEICsKmUmFjRD82gYFiwCew7Slv5xIDMkxUpr2zRyQAIkWckNuIb1HTUDU0BDAu+ORTNCMMBlfkulWo3oHDbGPkJ9Af9F59M3ISJUZOYU6wCxyJwiE8AmdSFKjGQkGIIFyCibp2cXF50ex+F2KGM2C2CkBQ63twAwz/M4jmaWUu8WJYRgQMM0znkmIlIR06LCSKo1AvG6BM8MTGamrbdnKduZmdMoA4DzQ3KFg0ZeWEaY2RagGgBIUTAs6PNbndLFENRWScpSiuSiUU0D3i9w4RIYMBIjAHOIFGlB9lfZYwAKSEYckMwMRcgUDVgBEcgADMjbFrRWxBAroI4QwSDPM0FtHN3tzsdxvjyHTb87Ho9Pnjx59vzl8Xic57FAjEzjcUYmMPAYxpzpHdGMyPlrEMGRaVWNAQZCJkVQAy1ZvUuFQgidqhSDUkoutT27mM0izMTM3qpogIKmcHKF1wdwrenW7wPAMAwppVev37qRMzPkmPOcUmIkAxERjgEA5jyllFQMuW0BNg5dRPXENHFUhZzz9fXtb37zm9dv3nYpnqfAm83u8SXm7bXIdBwi06xSig9eATFANCJcpsUtgAUmwhAAmGPVM7XGUhv6kQEMBY0MmJkdcGrihTsz87aT08BH8sowATIiI6BPUihgMfWvr67/85//Nwz9/+3//r/8/F//a0MyUTEtRYdxfvfuehxH8Gl+y2xqRXM0+aIMPWjx/atL7dbdFryoh9a+HVM++gVdjABa1MS0lDIzdiGaWYwxdR14n17fHw+jN3iUPFoVcZ+OLeCtfeitqiQ+PEWdfsN91dM5bf/1qAPRTQOqIQGa+vwGVPW2DKUAqKbgnapFhcyAKOwuHu/vjp9/+eXf/t0v//Zvf/vsxVvA1G8up0mASREPWUopqoUZEYBTj4himGeVarfIUIgCAvo4IEQTU6cHIAi4IjFfC/MDO/XAZj245p/5b9umBZXhHgma1GJv3/d9nyKxSeGQlMjACNQZ5FNKm65/8uTJN19+td/fnp2dIRTP6y0ztxQAeJWFMbMQiQR1mXPYKgZ1SRYmG0QkCkR18LihwTIgql5vBcAR5u7lSAiBOCGiohFam4FWtztEVAcVmGM9mJkYGumf6z0jJKQQQkp9znl/d3z27Nn3P/nBxcXF67dXIXYq6t+16fppyjkPJtoi6gf70vyt9TbZKk/fNgtP0cXa04Wm6uU0juLeqz6hAjEhUFtsAAJFvYfrtmIlRlNVdoJFJPPhEWpDgSGPf/er311env+bf/0n3//wcYrGIaEUUzUU508kA1WsToU5aR3SYnEIUYEQAlAqGsYCQ4FJrCgCBqIghrmI2QRENRpEcUydt+2BgRYoORec3Ph2Xdd8j7akLgMtNmhuSaulu3hjTectOD2rjrW7WO0TFlVw8tl8+vn6sPg7bYub29NqLMwRgHw8QAih5Pnly5eff/7lV19/e3t7619RSgHAYlBy6bru8tF773/wdB6H/f72xkhEyGALfMYdh/pdh7tDmad5nr2jgRE3G9rF/hwDU+IACCWRgQCR9JEigKgCwSZSpBSxzxiB+3l3RkTDcRrHEQBENITAFN2miEgpKkv4gcgp+RgDbNnSd++u/+Effn04DB9//PGHH76/3W77vq9ruwTJquoBUBNUESG611i0lvzvFs2awNuqERHAe1XvhYiL3si+yx7utoAwcrVG7UyJGSxb73vRnKhSipEkY0RTVfcSPRs7z3MgNpCSC6ohWCQgNrAgUtRyiDMiRgxAonXk8un2zMzZGdaPU3+FsPhpC6aAEBXZzHu+1iqlaYOF2QgfGAVbajDLm3UZV72dtF7kdj+ymoRhdmKCbevm/w2v3l05txi6u65Q5llzjiHMw1Elb/q02+0CRRExKGUYiAikzNMwDgNWqi4b8jgMuRToOvAZd13XBaYQCAMzUVGyImYGjEicRYMRhRjSBqiAkRmL5pAiCoUQHJJmZmogasxsIIBs6H1TWsRyKcQJACrwi8xZod1lATVdgU8qn8/Kqj4wsb/3td6JKrVMhCc0SP0tVgK4GCunZd/3ALCeFwn3oYbLfeLTp++/unn+8uX88U8/evbqxYcff++Ix+2ji/10FNXNbhe1m27LUbNs4zCAgc1m2RRVO4JABEhFxOevUYzEAQHKOJdcTAVWDdaIKObmFcHjbkRgWhARgYhLKbNkrfz7pKpkEImXyewPFMEM6qwfwZaTXI+iiKqhGlMFC5uPBFhyRd5RTQBmlJgExElrPfhhJmYkob7vu1SYy4yFOBjCIU8aYPfoAnI323Q43mHWRD2F2MdeljEbIgYwu9I5jkOrEPpDDcNxmkYTQ0Sgk7aapSBijNFBCaW1OXlJnYOtmnGbskspnY5frchKO4euvAgwEBABE43HI3logGqGnqm61xnsZE6LHCLWdXP0oEerImXTn6vO7KkScDSplDmbeX1XUI3BgMBp98rCWdy20EwYIYQYkVX1eDzGGM/Pz3/4w08fP3509fb5T//4XxyH6bMvv5Jcuk1/eb67O8zg8wPNDAgNjAjsxIYCAIBa9SMYAhihIqihaAZRBQNGJZzF2yZ9PUtIkQhmNQiROCp5HhEQgYhjPE3IXY6Pr4Q3l5sZIKITujqtdaK02WyOx+OXX33z8ccfq6azs7NJCmICgHnK86wxsoJJWcwkeBB3ckfsNMeyiv005devX3/22ReaYur6kEJRyQa7bX/53ntQZJ5nySWrBgQOwTSLnZwbMXDPL3KIMVrqrSzUarRos8Vl8bbeFJiIYgygBQFDYFeNBmROkVFLhazg7VWRiMUIFEDJWDH2geM068vn3/zNL3959ujpXHIX2LKnqHQ/HPN8gtksxxwVjc07OlalvzY+B9GtgNXmWxYxRG1FDz8UcylBoX4C4TRk7RSkAPQ55yJydn5OzKnbzHMuRaYsy6grQzIVQarMFl75NFAv43oFuu7NIoEOr1MTlVZPQCmWUo80hpD4PpmEx4KO6kQzFUMIzPjy7f4ff/f5X/zl3/z6t1/e7SeK2yJ0dXsgTILEzAIqpBQI0MZpImIENAMFNQwOR3X0hSfjkAhQQWp+Oiw0Y2bmWgtPiNyHsQesePMeGPL1ZWsVbUuBrspyy6DnYqZIwIydtzejlSKAAR3YRR7qY9fF7XYbQnj9+vUwDH3fz9PBQMjhSIDO/OC0nvUOUYlCu6NFksUMHYdWjam6a5LdmamzRKhWFR0v4fo2hMCM3p/MAVNKZgoOBDLvcK9UK5FYGzCtFR+c3aYt2mpNYozjPAPAy5evfvon/+LR5ZM3765T6vfDrVT2I4ocCBDUaEkEn7T0d7appY/XL1vFeLjqiHvgHyMuhF+rssmyegqry/zbVGpX80Lrhug93QDDPHktMRBjMLFqqu6G6fHFxevXd3/253+1TfGs/3m47IJpAHDmIyLPu/u0vVp2ptqRW916AhxnAyalkA3HgsOMs6IhiQkzIWJphQ5Uw4JoBsWgMAHFGBAmEdEiUHGwjV7hvvSuH9n8XPt5aUkof78sPPteIbQlmet0Weszggtlt5mVYiehBUBE5xtr7zS11kBDXMkXKz7o5Yu3L168ePbs2fX1NQB2XefpQkRj4WnOpdy8ub49f/QodtuLEL98/nqeZ1Xt+77rOgAt09zyaOApETMi2gmcQfjgDEqCAIEZAwlhZJY+YqQiYEYMKZaQJOOU8zxlTbrd9iGIiIzj5KW/BRznd25zziIncfWkDzOLKCKJyMuXL6+vr9+9e3d9/f2PPvroww8/9A7weZ43Z5vF87lHpCcizPpd4W8VEVi50HqfRqg5S7C45w/8csQlgFHTXGTOMmdx7IlEKVb4hC4uWQFq66NzT9pCIV5K2ZxHt8IKmmdxjqIFLmTTlKWUxAGQxpynUjYBS4FccoxzioE3FDECmGpmPAknETkjQ3P4XUqBTgy3p2hwMZcIpEXuP+m9ovRaCfifi95rL2zL6O1RzHzqkEJExJxrr7jex6zW/r5FFZ/ef/HmXYyxCxHR+q67PDuTMr958yowas5g8vTR4xA77BMQ9buddwZHTrvNUwCY59nneneRu8g5Zyffe/v25nY4nu82KW299GJmznnbhYgcKUTiaKC5aM5GiKoyTkUAQ0gOmzaTaZoRzRkgNpudiOQsMXSqOsps5tVahKUxIzBnMxFtdRVc3P0qfwv+fK1z2x7o0iAOLV8L94vXvI7OaygIAEzkKRZceKsauQIv1L0tY+Eawb+CQzjcHs6252PeH/b7tI1X+2t+EscycgzMAbMVzRQQdz3OudxRCfz2cLi83KU+EtGu727HkQBDCKhQzEwUXWNmVS0EFZegCKbkUWvJ6rwtRMoKDBiQJJoUc1xhjSEJzRgguaeKiGpWnBlhIdV1XTnPI/hkHg66DEIJTERkanOeluqxqWoK5JNJJReOfLbZlFJiqC4tkMbEXYjMRJwCAoHN0xB3qZi9fPlyBEyXZzc4Q6c/+tkP/urtX0tJSHC4G0IPMXaPHvXb7Zkz3QMABSZvB0f0kI+IKDAg8gI1rEX8RR+VpdJLp8HxoKpGy4PjCQDsWb1R1ERTSng/SxRC6LuOCMkpxaRIKSEERpjHCdG22y0zmllKabfb3dzempljIGtMagawpaUJntCymORMgMyYLE5xosEkz8rMHPvEOUPHpEXRLLInjYTJu04NzMAW34UQVcs0YcCY6Or6HapcXFz8wR/8wXA4OK35D37wA+AQ+v6bZ68O+7s+YVYroCBgUJBDpKAIIuKFQzdqgMIcA1EEujg7z6UgWc55KhOQKYKojsX1OAGDCQxTNhAiQqKx1P4lQy0i7r1JPjHurA9mC87XOhoRQWEYphDIzL74/Kt/+fN/EWM8HiEyE6XdbjcO9RnFNIQ4DnsXVC+lNz9js9n6Lg/jfH5+fnV184u//2UMAbtUSoEuQggUIgQueIxdTCGO4xgBtczFNBCXedLAbCilDgJyYfNwFwN3ISLiPM8mJ79HFYpKF1MtuYtyYFrmFxs4ntZjbkRACV0pRQtyJINQAIphAXz8/kcffvLJN89fXe8PFOI4jq/fvkmptxSD2G57/vLVu9ub/X5/SH03l6LqozJcwouokhEHIGq37WDXddhc5xPiwmetqhRCQ+8gIpJF50ZA2O/3McYyHGXOAHB9fR1CAOC+7z/55JP3PpD9fn9zu7+7O0zT2HU9euNALkSYQkBAEQmEAJpSD2Vwwti1+0hEiKCqUpSZt9utx42+lYlDjB0IAJBIcX7sru+1JIQQiW9u3/2//v2f//pXv3327KVZKsrTXNTYiIsJAIhlMEWSYgqgmCwPwhQNQcRUhSwwE4XAbtHJSim5TCKCRCFF0hoKrs1QWYjUcUmu2ao3ci3na+/hnwhLWsSirXPbLyilIFgXY9/3MUZEQDRCQ8Tbu33fd0QwSz4Y/B/+1f9RJF/fvAshgGYmBHHE+D3fvR46MlACrU/BC0UzVTgxuvlDREMVKWC0cNu2e6ZmWQzB3TU0H0YfYgwAZiAElXEI65AnVi15mgGA2AnMEBUVFQGYWQXQLMZo3lgqGZHB8Ozs/Ouvv45dd3V19S9/9rOvvn02TZMViTGKWInx7Gw7juM4joFTLqW5j7YQ161/hnu4r8qf0fyN5raKnGA3zQCZWYidZw+dn0xXM8SsNjKL3UtSZ0REPmlCLwfNeRyLEFEK0SwmjhSDmfWb82Ge99OQ5+lv/v53F9vtn/z4B+8/2hQopnq22R6nkZA3u7N8HMkgECtlMwNDAZWiAqiGGPusPGY9FhoLFgjGbEAu9gCgUEt1hkW1MJlBBjBEAhJADoGYu0nyMAwu/81RdkfC/YrWYbs+IMuO1wyUB3K4ZFLyXNoHVjS+Hw2pmHaPomOMFfW28O01YV6/iMg/IefcYF8xxmmaXr58+Ytf/MPLly+vrq4AGRHHcTRDbxEysxg7M3n9+m2M8enTxymls4snX3755X6/9wwyAATmUmbndKxRBAITD7Pl2+HNPLxPvN1uExvp1EXrCAPOViyFYETFLBBtt30+ymE8XL15s91uHz9+nHO5vr4mIqCw3W6nKbch3tsldBnHcbs59yExXqddFk1Ehq+++ur6+vrNmzd3d3c/+MEPHj165PhYV55dt/HC2jiO7USs9dham/mGtiJkUxcN9+vy7/UrX2T/FlUNgSpTaC7jNM3zLJqdWbuUImC0+N7tcCFiSqmdFD9HDoUIgVVhnov63aqB2XA4MrOzChcfTVEbaMWKp1v1MMybPmz7LcWIJpInUHHq4lNOEQCW9gpZMQOrQt/3VttD1+CCE6zggcJvPmd7qHVs4uvWllpVu65b6kwPI0mPjdc5x/YVuAKnAAAzB8B4HPJeB0a6uMR+I6XoMBbTAiocMBsIhmwkcx7zcNalhrsFwq6LzLvNpnv06FFKyQPuEMJ2uz0ej5vNRj1BpOojnkRtmovhEBJnMSLk2HnSX0QMOeeiJl4bzEWLCjMbUkiROKIj44jmOQNRiJ2TZLRjrGKBAizgt2ogxU6IyvsJpwev9VL6Cvr4hLp/C//ESaZ9ABxzy+n6jaxNgl/sB+a7hqHWT0QAbH+zv3h8aYnnkjGRqlgWnQDECBi2FGk7XA2Hcd/tnoxasvFZSOM4M6ecJ2YAJlDzagsTdJGzcoOTMSCST5qAwG4xgSAUdCiIkkKx3LRt1aGAIYR1ZjouiUNFWPWMLV+jYmL+CT7VV1BxwcoHOhG6EpgRMCCgMoFH7kQYEAMzM4XADBHARIt325RSjof5qkz95c4Y+vNus0mf/vSTb//2xd14e7l5TzVP0+QnZ7fb9X0/z/NcasHTzMACrGRAEXWZDOZHgpipoj6cqfyehLRzWKml1pAhqp4xITSyZ6lUTJm8fxq9DaNIzkKkFbRrZmQLAmGZ5NaFFF0nOsNvSSlGrmhDAyIK5NztyggxEIrPORQAEJQAyibOrdpSRCGEE7ywQmEV0bpNsjJfnO20yDbx2dnZF198cfX27ZyPv/rVr/aHQZEuuv6Tjz8e5vzFl9/kSULAGIMiqiGQkaGzhrAn+QmRAjNHThEJicxw9kYcBFEwp0dlUkc+gG8/IgQkp60H79azFcjeOzx1NRySvtMwvT6h0zCklBD5eDy+u77a7/ePHz+OMZZ5csPjEBQ3xiEEHzTUEpa17GZ2PB7NkNmKmCrc3d1VK2CIiJvN5vz8vNts+o4dESR1soGJI/CQISgEslkcdSla9xGW3iAiqj20odI/ISKGuDQ/1jhw4cdiq2QlpgY1l2E4ZmDuOEYMCTklDpEihC5tz774+tnf//I3qd88vXz0+Ml7OWeO3TjOjx5t3169+/rrr8dx6rcbIirLNCMoXmcFM2Ux1aWEUXmJTgpzHZC3dSEiMVBAWDCtKjBbCapEdWIBQCimDM3BFdeWbkSQQt/3pej19bWqqAr4ZJVWmSTVAv5/W6hZ9cTSqd70jShFZJom1QRLuGrm5pkQoYiFkCBGNc4F+m7z9t31n/7pf/nbv/38xavXw1BiSgZYAAwJzEqFoCiSmBQEBe8XxF7BHJIKQEAOkUePbRwoDkZE5mTx4AbiPtFFE2bzvpka/C+Qz/sVpHva7P7cLau9KABAnrqsH26gWlbBuSF4WwFO08TsHm3p+k4Vu6579OTy5etXwzBUsvUYuy50XUzh5IK5dy6avfDjABlVdT+JDJgocCimMQZHX+OSLcVlnq3eg6Kp+WweQNBaCCfyoSPKTo2xDDJspz4w44KnqE+3zMJxeWFEIII6d5k2m40ibLfbt2+uvvjiq/c//OBid/bNsxcxJnOouCkhRg6B0LxZ//cVZpsVWJ66/nbtt7V0/ml/VwF8PTurDV1fqY7gxOpgNHySp2DAtM6DbdlqYGADgyxFVSVIH1NKqRARdEplGPM/fvZ1x4QGm5/95NGjnebjfhxNJfSpNLZ6cLCp+Bw2ZFZjUQRIBbkgF4NZcFQVIEAyyI4sdBMJ5kAdyZpFsiqgERgbGjGpOtlQBU/JqvW3rOZ0NwHzF69eOWcXea9x1YOmsF7b1Qad1hNXdUJcJVPad60vEJEQks+v8nNdSn758vVvf/vbV69e7fd7KcYREDFx8hClWRA12B/Hl6/fHsc5RNr0lzKLFfMnd4FGCyYkAibGDF2IbIiKMssBla6PyPT0fLtNQa0WEmOMRCjgU6M1q8y5lKKH+YCIAPj27VtVPT+7NELm2HWbyg2j6OOPmRmRzs/P/RmH8eCRudtTD6hubm6GYbi5ubm6uvr000/ff//9fucVn+SwJvt9A7GaFgI49Za3NXdR58XLathIWtqsmurzrahxjjRMqZGiWe2LwNX8vbU9qgwOSw2NGlxF0dAAQMW71U5TGVQV60HjhZJAhpwDIQBNs45THucSyUAzY0VrAgACLFQplH006wnadZIogXWr+0Oo3QOJXeuNtqqqSljRajV/AbXAxctoxybJ/gMRr+3C+otO7stK/kPqNkUkz5pN+Dh2XYfqJQlvN0ABKADBcCwiIjhPOWcnhWPmEJxk7JSaHecZiPrt1pCLAgbyc0YBEqe692zEXTFFBUM2gpy1GBmFohkZzR8DyZCRI8cOEZfZW96aIuNc1EAd8le1ZK3MrKWzvvSkQdeb0fTyA+MKrWYIoqbF1Ns3HSzq6SUDcOITbyGrGVxCAfM/MSfVQDBTJlQ1ARPwgVh1HjEwUSCxzIA3727Pvn/ebdNgM4KZFFCLiBQACbrU9492+dl8dXfYzOn8GLZMFylNx2mbWIoyBgzknD2ubSKTEi6yjs7NQOZrhu6mESObIaMiKxlNSozE3M4qEUVmH6VlZrgAycy7+6We87bmZuZmu8m3H10/6NUDMAAVRIzEHJAAgZGJiSqxeIjeKk+BO8IZDYghceACwzDeHsen55faDdfz1d27d5/84feGl+Pxm3w73GyZGjzGXXwKHEqtvRyGIU+TSwgFRsTAtW9BnF1apIFaFWGZC71O6jQg1En7+Ouk6az2RptZUdFsC2TU2GcuqyBo9E0gygveXcHGcczzLGIhuOKq0DJfWo+4IrNxJlAzy+MAAAjKBKqCRmTGhh2RJSbskWs2CACIY8sPea2bCBIHDihlIpUffvyRSBbVy/Ndnqbz8/PjgIfDIGJpEw+HA1D4wcefGNDNfh+6nmOnhsVU1EexAxGY2WxSFJztlhAZWMb5OI3mbrRZtuzcJ42FyFM2S14NKDTkQ2WctmXawD+jRtuxhcUtazbGDO/u7r7+6tu+7y8vz6VoShRj5JCIBiJCjJ4m1Irx8JFQdXOHYWCOCPWurq9usCLyIRBfXlw8ffp0d77rQ2WyJcUgoEUMgVTMdJ6pFOuBiAIgSimgpYV5uHJTyIFeiOCJVcIFfQBeYIcFmOBwyaIg3viIuB9ls9vs0jmlJIACLIgq+OVnX3759bev37z9gz/40e7s4vz8PGfpVedc9vv91199883X347j2PXbIk5ZFqoitBqGOA+KA6qr2V6UGAIWmYmIVg6B/7NtIhDV4ySQ0YKJqKFo7f8nJLViollCCAoUQug2feq7czk3s82mOxwOd3d3niGeRfuYui5Kmd0ThwWM9CBCaE6Cs8iqkkEWyadiixtghRi6SRSAjbtvnr/7i7/4i//tP/zZiyNOU6YQ1GguKgYK2QFTAAqgIAZYwMoydGELgGKV371VkBRMpSxJXCAMnv2wZcWav/tAjG1ppqpiv1jx9ZXwndfpFIDVpINb/bou3qTtuScm8BYsRDLIZgB9irP4bEx98uTR5eXlb371W8e0B8IAFAMwOitvPMU5qIgYiC1W92t52wAb+1/1XTzyX0pqRRQdQbpeNGtlQ1RECoiBuHJWg5mhiWoRXeYWgIGzgDJAIPIeQ7d/IZI3NiASYTAkFVBV6vqcc+o2N7evX7x48Ud//NMf/vCHL1+/5RjVQTc4A1jqgue7m3PxQPk0R+LBm7Lq1Wn2sWWr17rLf6v/BOOdKvCS/GqeHJxSMCdl2O6t7ouox4SIaIQcAiHG0GnGl29uyvy7lNJ22//LP/nDvuvm44wEvRfWiBmQAHwJsgphCqEDxaw4z1owZqWsWAyKgAIimZqTeJNHqur63aSUYqCgAj6GFZ0qlkgr+FNE3IdcxEPXT9HWzaWIVkgQWyYGLQulCxy0Mso0ZBDpElqv3L/mNFbWq+Xbm0FX1TVvDRHN8/zy5cvf/va3v/3tb3XKIoJMRBxCoBiYGdjrjQoAgIZoOcvhcIgx4sg0a2eM2RAhRgyKpBjFd1xi4hQ5cjRVEdmDTOVQAGOMKfbMqiZGCFgMsCgWhUHKXsaru+ndfriZKYSYc7m5uUHg3W5TTBH5/Px8oT5hBZvnzBxS6pzLEAAOx8jMwzC4xHoZ30zneR6G4e7u7u7u7uOPP/7w4/ffe++93W4HUIkemzw3n6dJLyKG0Ma0gJmUIiGEOpg5eC9fUa26FPEUnDdXQVWt1EY7JAvEhazVdfE+LTOvWGRlGabte9qAe6SEiDmXPM81MAYwn3LnhWKjSlZnNGWjFAx0LnmYcs65JAqe6wGjkyyBISOiqdCCdLBTioGzPoSGmtUWq+/qEF3BO/8p9d5SaQ/ebziL5v+s8SarwhV+VwWpaiCClBIubU6u4re73TyNWkzMprnM88zMTq8wzs6/IWYmxoYWAiPglCXbMBzHeZ43xVJKgCQiZfZZzwAQDAyA/TZuD8ecJ5FsZqXoNGUVZOYpz5vNxhtsDofjPE8pJQPabDbiXVUUcsnHYRrGmRcHd32wAQCtBh4PFPRpJ74Tnbc1av/0v3JyBV+4lsYDAK4cTdz4YV1AK///ijfc/9bvrVWxm9ArGAOaGBDeXt2+N7zfPd5MIPM8QbFouEkdGBUBi0h9SB+c2+Hty8NVT/q464bcRUAxZtpgdfGLKxvPPzPUMRhWt0HUPXcgBe9vaTzCXgwkMwNRNSXTUuvg6K19AKDLoHl0jtCyuM2LmlYBRR8iVemGqSoEZHDGCzNTM47ElCAQEwFhjQZ9YnVkCoGYCZdxPRGZkSxrOdp0LOfnj/7t//X/8mp4/ld/8WcwF+ph0iHQpuI6AgOAw0hil/q+V1UKAZlDCHPJJ11Gplbnc/mOiMgsJcbYhpg9sM1VTu7T1aqqgDhkggzyPHuDeEuUgBqgMSATMFiMUXwCoNlcMhEl7H1+Y1OyLnLeat91XeTgJAcXFxelT33f96nruo1HknmXS5Y66h2AY5pyNkOOgZmLmtXp54QVB6hFs5kFIg4YCLuAj87PxuPdtC9dDNN47FNXcqXVGed8c3crhmcXlzHQ+0+eYIxAPKtkqXQvRUVVpagoFBXRLMXADBUj0DxNpkrMwJbF1WGdTGxmdSyDgQ9WZGZAXGip62w0qxUk81zKUgZRW5gYEBFOtSY1s+126/RCXdeJ5G+efdv18ac//amfU/IC1JxzmdzfTSl54lDEAISgzYuHELwMYqWUm5sbAGCOSByIdv3m4uy862JiKqUrYlMpgoSJmZjAUGcZx6IazQLUfIoaukSaFHdzkJnAEM3qVAILlhC9ARcWkQPzJlJEQSwCPvETmIHpbPeEiAaD4Wa4PQ53x+E4TEOWl6/eKMKTx++9/8FHPhSTiMZxZOAvv/jqiy++mKZMFKZpUgQ/L95q3Er7ZqJooFDHNMBajbl2OVVIoDlV3JlqHQnkuVTQZZsEAIqoFDWuM6+goAqITWbmousRwAfbD8bDeHZ2djwex2GYpqlebwrA6DN+Ft1ORIhGhE5ktRx2ai2gqpVtXKt2wpQ6Drw/HkOMh9H+43/+3//jf/yPt7ejhvMQGQPnYlOeFYiZxYQMAL2tS6F2v3h/Wv2OlgUHALXiMOB2qBHRc40Op1z5QCer9F3vChF1Ueb/1MW4epkPXF3a59cvMyPiGGNkt18ChgjQd3HKMwD0KZVpVtRPP/0UAN6+fe1D55l93CwSoOQCjmRGNVNQQwPvVsg5g6O5kDDUIylL5gtrpOSph9NpBQBAICbCCppycj1UIDRiJgb0PisQAGx9rUTES4s1MTEBMwckdHghceSgjNGIiIECABUxERm1vvq+3+/381Q++uijzWYz5cJIXqkuRZ0q3NX7OuPc1M7ac2j/XLtc6y1ul7XNXVSZYQrr65dryaxCoZY/IbdalWXXbF2xB/D18SwSlVJUdBiGaZq2psEHv/YblHgc5XefPyOC2IU/+aMfUr9FmYspqHZdlFxCQlIARkA2YqUwKx1z3g+TBTYKBbgNRvTvVTUErxoFRDYrKqjFfDfMAEFdu+NSF/KjUbyTYqEpXq9tez3ofSIiopbyWyohjOsFb2srWhNGLXX7YGts6a3C1fgEAPI6ld/PPM8vX7z61T/8+osvvhyHqYsxxdqW1qXNZrNxez2O45zHUoqRD1ap0buO04ZDhxWhGpG96JEo+AVd6vrU1wIDyoxlFsFj2e5z33chdkQEMIvNZpZNR8GD6M1Urvbz9d00aDgej0TktP9d1wXAzWbT99sa6iDOpajWEuj5+XmIZGbV74pxmqZSyjiOALbIf7m9vf3qq6/2+/313bu7u7vvfe97Z2cXbc3XYfP6CBCRVz/8/bKacd0C/pV+Xu0X1iKvHw31HCUIVV5Er6ggIemCTdD7iIP1GWw3Q0R5FmAFgDxPZS6IRsQBSUQMRMCLisGIVCCr5FljhKKoUqbJSqmPqaII6lPCEOtwMENqBqgdXkQEJpMC9/M1UGFR6t5KO78GYAatdn3SK4hYQ+b6OADgBq5l/NcawF8uz3J/1GdLSOmqda4GkEjGBJRCikxoIYQucNj0KQYts0iOMS5gCzYik0iRIydwZrxQI87H753lWYDHTtUjTBIxM1EgYjOUgqV4Gd0Q7eb2aprG4iT+AtNUTDGEJCAUUkiganPRMWdF5JyB2VtjmaEUmUWLASEFHz5xf+/r463CvbY9631q6nWtvr+jixtBkBPA1/V1Z8V3pUg+yYHe0/jtW1r7xwNTMZUZgEUKoB7vjnfv9vG9TeBYNKNSQGAFsVJMsxU0e/Lx48t8eP2bb67Hw7HMxzm/tz1Dkb6PSiamhBAYkRUNQDQQARQAE1hS0aoizjGK6iMBrVYCADREcnwhoBJDNARQESeYQapYPtVayTlN1yUiJxFxh2/Jw9UVaFABp+kEADIghsiBGQOzmXo06CifhkPIIkDCiIykxY774bgf51GuXt98/PH342xffXN5eLl//+Mnd9/ONmgepE1DdXFnVQDIUgcNb892ru9c8alUHbTYEkLEdL8N/Z4VvweOOJXdx3EsSHmavUJoqh4QqhmS1nFnC+smkutBJQRDhKLWdf7cl5eX8zznUvn9mNkj8EAcAjNB3/eRn6iWFGJMzMDMbIiaooi41+DkDDF4nyBlsVLmXHJ2spEY4yZt+hTjtuviZtOlLuRxfHx+RiiWtzEwqQyH/TiO212PiMdxYOanT9/fj4MCpq4riFlszPM8TUVMwEShlJKziOmsmkXEwBQBgI2EsFaYXSiYgJCYXf1VkfDlWFizqn1A9xtrpXvxPR76yt/1tGgBzZdSAKnrAJGHYXjz+t3jx68fPbpwprIQQkgxl6mp1JYwNjNU56mrWs5TZuMwTdNUisbIpEaqWsSKmLAwCqCAjfM0lDlwZAc4GnM3gxY93GVUAPCBQvV5OaYUwBpC0BTVId2lFHYH6r6NETNAMjDBYIgUOwzRAEbh/d3h3fXt7f5wHMbjOE+5zKUA8Wazefz0PQBKqUfELobD4TBP8vnnX757dx26lGcB0BCCiBWpw+jIQd0AHoSfykEATmmoKqoaQvASe/utL2PaJDHFAnUSHSojIbKYOLWxmSoYKqqYqjJwMbU6o1WYGYjAOUh3/cXlmYjc3ezfvn17e3ub5xwYAY2JmcxNJCKGEEYrRGzCIpXG0MxViiHYMivVT70CcUq9onLsbw/zX/7Fb/9///UvX76+3W63RUEBsGgpxfNEyACqVvE/hoZmjE2LYotIkQMSnfo9bNVXtrbH61ChKZe1PK9/tVSEfo85eyD/65/xfmkdl86ovotd1wUPCgkQMXKc57GUcr7dXV3dXlxc/OhHP7q+vrq5uVFVVDMCYgqIMQYE5YDOBmze2aNtrCKYGRkInUZliEjoUgsIWzHHXU9HhlcBQ1UrohKoA1DHM5DDDZGRHNpCZkbRgz43vhgIY4xdjIieLFAnpEHk4IymHIiCIUcFEZmAN30ye0MUbm7uvv7660dPHvd9fxxvjCwSp5RklsTcRZ4I5rVXsdqa7+6F/9e7LVrJYnV27kWSS1iqsFzTLMsDYYAKTrN2ypqePJkl1Lbg7gUKqJSSZ7G9dGljwVLqu3RGmt/ejr/4zRfdJl0+Of/+x09QcJ6HiBSIxzx0Gs0qFUIxnMUOU7kd8n4yUgsRlQgIjRTURKVo9jFvBJECmoAJmVCN0oBUnXj8RH7rIUer2Ptz8Wo+RBP79WG5vybgbXv+CcbQCrMtxjAzNWmVJW8LhBVYFxbo4LpAbWY+vdD37vb29ttvnn322WfeB+gehf83xf7s7Ozs7MwJL0Tz3d3d4bBX9ZjFQqBARAyp61R1wVIiIhBT10emmFLyD2zHPEMHJhOUt4e566ZNf75JURSIopR5FpkUJw3ZgiXiTeqFj8djzrlk3W63m81OwC4uLnIWwlAd45wRsQ6b6WOLwD0fejgcxnEgupymad1geTwezeTueHNzc3N3d/fxx9+/uLjw/LXjw1v1ab1ZrR2jFXVbWLJ2tk9aEcRVa1N35rkOdCwGMjOICtTIiJhNF852VRXxMDKEUKF4sMwhWPwH56XLs+ScG7KvqWJVrWzfqpolF81zIRWyMoUyTVPOxrFqAUQU5/Jz1A6eOlnap6nTvN93Jm0h6l/L8/oP1/FCEwZfirZc7fgDnAT+gUppx0T13r21i9sR868IH773dBgGXwUGjDEi2DgaaIEUJINI3t/dHfb7aRpVFUpe7jIgjlYf21TVia1ijIShlJIdeWrCFAGo5NoBzOxVzeJxFHMABo6sCgY1z+ergchMEYxU4Ob6zmGWTmhesrgvpYpqAGCKNTl70hcrSuq2E+vYfa3KHxjgky7WCpJ2rILbNofh+c+yvJA5LAMo27c0mX5gOfDEFgDQcBEK12+vtsfLtOm2cYMcQskoxaQoqSArgu0uzj96/O75s3HKt8PxEXZPNucmGmM086FBBuCYzCKlWFxwaGa6THB27LUpBCABrJlZAzBg9lFyTg+NALW/08nxDAxQkYzUFtiad6I5jYisn5Sosg7Qgg63iskERETyRfDYD80IyYi4krWQMRqCUmAijhwZVIvOhzwfchF7+fLNX//1X//jm1+8vn52xv2nP/n+68/2V18OZxC9cTbEGEPnEeswDLBkHJ14tu/7tOkRcTjODoGe51lEvLZMJ0rr++HHwo7gR9ruN+0ooDjeFYkXeImKgBGwD84EMCMwYqbIJtlFCdBCCLFLyHS+uShZy2rIocthzhMiImgg5JRAGdDRcbn56GYGZgXMR3tnqVixaZ6Px3EqGRF328uSx+EIRaXIvCBb7Mnjy0++934fw9lmg0JS5pgYNAyzdV0Cm0wxbVInZZicHyhALlNWVSmSxaCo8+8HtYrfAyfRMUJgcdQHOgkhuQstpmxLEr0qzWUx3Q9A9OSZyygaYKB6UfurWi9EqKh+Wv4BADCPk7fvj+NIRAB2c3f72Wef/exnf2JdZyfWny0RdV03HWemyFwW96IOUPVwHakmd/q+B7glIs25gO7vbq6vkun5psTj8TjNhVPHYoCci5gJmxVAIiYKIqZaAICJiimJ1AxfTaa4C2OBkDmYqPK9BgDXG6pqhAAEasgBQ1SkLPb5i5f7/fEwDKpQBIesc1bDsNvszs7PLy4fpa7bbDbzPOdpLnP+7LOvXrx6mXOmkEopIcUQ4+FwwAXvZ2YqzgZORIbOCnwP/oArawrOL7pYZxFRkVrqqY4CA5EtGt49V4Slvkc+m8QdaFMi8Bk/c6bd9nzTdcx8drZFxM1mk8fp9uZtFu8BzqZSCrhz6WfBfHxlzrq0mhsoOjyJaurKBS9nyUWGqfzd3//u3/2///Srb1+fXz69u7srkcwM2MfLe2FGRLM3jaBzMxmCsoslJiOGRkfZrLJ/Oze17zUfANWyNj1rG/QgGFj7AQ8M1gO71uydqi5zpB+++pi6rnOqwxSICLz45i8V8fjwo48+evz48W9+8xu37Llkb+IzE0SjpRkpEFOIDW6Xc+77DtVaMOzI0tZIrytMFKy8f799f4J6gQotycTqI6IxcvPjl/4IRGRgiMjRrwfz0mggjiHmnL2GzC7KgE67nFLquo6ZSynzVJ4/f/7pH/7Bp59+erv/ZSkFzFKgkriUmFIKIWBZmvRWQSAu0NC2Fy0UcSehXdleurDsNEfZX2XZd7V26ldTB1fj0ZvbV99xaUE1AFMARBEppRBBJCYiCAGJtEwYk4iMw2xKMQQTuRvL3//mdx9/8v7Z+c8f7aIYMIFqQTWTLLmoqgFlg8MwXx/yfjDFhGIFFUnFWLViW8TmPAsAE0QwVgXLfj8JwQCXIjkCBLSFwXuN51r/0M5OW/AWHzZhc6ypK/nljIOteBRPaRdVXaG0PO+HCxf/P+UKerTgUdbnn33xi1/84tWrV6rq6doUe6eCOz8/Pz8/3263jCSau6673d3c3t4O0zHn7AlHEQnMlBiNNJz8ihBS2m5dFJvAuBSpMKFRGUcZrkc5H6RLsaeO2XKBSfKoNEEnMUWK297ODF6+fHnYD4joLClZnXdHqXbKVbHcbDa77bkXBtuShhB8TLKZOVskAJhJSpUpcLwd3V+apvzhhx96P2Hf996o/2AN5T6coW2HLnVaWcZxN3luEUuLXYjIkQBL8/xp+Dsi4lLyaqqmKRZZWGGbMUJExA7NtGiZRYoSoBbJauijdoxET+IhC2n2DBJBSrFpmkohiMSMhGSGwROjhgJohpHajdcHF1WRe6ye/pBm1YVe+8xNbzxYySYSJqdAGpZkOi6lUV+59Tr78jYzRAsnzVrg17FiePns22maAI3AmzkEzXLOu03PzCpZVe9ub6xIJWyMcdmtasJ9cmkIwQlgkgaz2R9EAMpxIJrMWMWIQkoEwGoyTpNqMRPETBRM2Z+NGe72x/1hIF78QtEsVYC0dqwFL4aIOfpxWR1yLNaJhms1evCkiNf6dP1+M7frA8mITnNE1GyQUeUCEP+66saBiLMMtOsW5U4r1qCaCFxSMgoRyQJEQOgs3by9eXx7TI97NIoYmESKARgzUAAhuZnvtmfhg+9/NH315uawH9LusB+2IRUQRVMAs9pnAqKgxfPisEQ1xRQxKAL6qAkllQpbUhNTNDqdutPJRD+Q9c5xYQNT1Rg6QVnnGMyMiEG1zaD3wKZuk/PmEuECZPJtYDQiJmeM4FaUAKdeIAYWFNE85TyKIY7H6dtvnr+brkKgaRxGG0Ki2FMnXe1+9ECUSa0qYlUtenJQYt/FGAN3XjAcx9FjQqscyqcJEFWdecwuC3nGd6rKzZ9yXENVSUwOVmuLgwTBR7dpIWJvMS2mzvoVt1vCQIC2sIxWOfTtcCfHxAWJDLoU1tuk9W5pKlaRKhRC4BiCVhsZAEBrhSQVJ+owIdPhsLcuyTTM4zGPw7briXCYQ9d1x2FQVQGb8swxAuGsJqbHOc/TOOQi4JQSBibiTVpmldraiIAJgAgJqDi0DFlJtQj5CFRVAjJnKNVTTz+CmSxCBWigS2YNllNf/1fxSgDuZzfr4sngnLMUxYgAtt/vS54uL88/+ugjfPrEjJg57LZhJkTMVGKMgEpOdLYaQalOvEkEAJeXj1+8eIWIzrB+e3UdQAlK7vub/V022J1dIBMCGWlxqJKBAabNdh6Oqp5PBREpBiFCKebVs0Xw1ZmVdDkuRERop1QkoA8IBAJDKgbTnI/DOJWQlTBuAtF0GO6O8zjPfd+/tzt//PT9vt/2fQKAPM3v3twMw/DNN9+IKQA5thkAnBjMM9cOBRTVJVGyat+nNYSeVxnKe0nfk3e1MNyqainLaVDy+Sou2iKWUU5/bkjkjT9eSRg8wmCk7bbfbrc5Z9N5fzfPQ1EpyNWB83Ygr05DTUiRFmedFWOldRoOEICmeR5t/rtf/vrf/+mff/b5V0hnAuk4iU9JYO89J5+d4EQpzu5EbNGVJnvphkvrY1EvP4J5q2DDUzgLIwIQoPyz2ckHRmol9qfffvea5lWcLnav4vRPjakPja2IvZnRGKDMGRFjDCKy2Wx+/JM/9I4pZo5IMhEzR2Y09ajL++ddEtyhLKWIFFVNHHiZDaBL91cxbX4bLUinGOMwDM0pAQBEt48EWYmIAwUfwLeADN3/UDEysGpMAyJ2FBxTioSBAjM2BkJmDiEBkiqoAQMaYdd1+/1+mqZhGDDwy5cvSyl//Md//I+ff3ZzcydFEDmEEKNWNGN54JA9jNubq1r3ejUjZ701a4fsvgCcGDjWH05EoKf849rpXP66Nne460dEBmAmqihEEWvwiVBANUs2VQUuCikaBnxzdfu///XfvPfe2b/6kx9FIjObpilGNiuiWQSMoggch7wf81iIOjZFy4UQANSJwkxk6ZvQjg0IvH4OZiAFABWcxlwBNaCogIq0ytIDH2ntJbdzsX5wXLBXurCar8+CW5C2Yma2Gt7Y/mp2A9FSEi2Z5ZeVUsZxHsfxxYsX+7vD559//uLFCwDwTMpmUylWLnZn5+fnfd977zdSAoDz8/OU0nHYH4/7wwHGeVItpWf20NVCSkmKGUK/2fTbbYw16eB364IUyhmbokxcYob5dsi7ntO2G2UqEDQwUmDYoCYoBEJluJ7GzJVRMxARY+WsRuBG2brZbM52F5vNBrD29JY6BNgQz5xRZrvdFu/QNvPILeecNbtlvLq66rru8ePHfd+37Xsg/6radR0shriFKG1TRGSeZz/R31Vf3nIObmuQtYirYl354uuqcvtYWkgBcUnAtftBE11YplWVjFENTE3VIgIIeD67zrDSEDpVf2pRhXkep4m2fXQzTYxgaEiOctX72pvI7xs9HQktk7pSCOunbvcJS45MV0MIl4jmXuPf+owsb56sHtT5kPc0Ult5F4+WnvMbCONwJKK+6xFtPB5FLaVwtt3M88zkk+osIEEkr2XPdTbUgqlDIG0PZsxBAYoIMYcQQKTb7cwwZxGyEFLXbTmgSI4aRSBnK0UQhQMTsyky4zSN3rUYUx3Q1HiHVbWWChbN0E6yB15QaaAWbMbiTNh9Lqm2B+0HX5SmZRaRo4jRz/xaE9nS9uD/ZGYjhKpHqG1G28hmDFomAxuHlRoAMNTU4N2NHQ7DbpY8jxatB0A1QgyBJ7ZiZT8dtrvHH3zy4avX+7wvYHQYh+1lD+AZUgRUUwU1YmYL2e71+5ohERqbgdtFZKwj0UTEFG3hn20Shsvg6UmKe1o+26ehBZzQaW0UGTmLwn2hN3PmvVYevDeyxldpkd36F77jxUqBAlI0YylaigrSu7fX79696550/RlP7/Y6yWbT3fLstIjAhnwAAQAASURBVKIOBzWzKDF0HGOcc3bV71gRAZvneZ7nFDdNe87Lq2VQWprAzJY5b8tz3fcDWoZYVQ1JKyl/8Rn3xQxFAAzUEFAXnsPiGYVSmAdUm6aJZokxxi6t1x8RQyBCJPKJFwEDBkqISHjKuNco3W0qKjvPfGAWjjGGEFPXDcNUtWT0OAN8vksXeTjswZMvuZjZ1dU7VT0MQVXFCjNPJRtC6vu7wx4iE0dBmIvMJUttijPzOcR+UD0Zjw4qssABADRnQ4+ueSHeUEZGRORVwOEqErEUqfp5Zc6XY3g6g+s3m+DhwgGQc3b01zzPIjNR9+WXX+92u2b7U0oq2bsNEZEpCi2DARfPgMl8MJeqbrdbX/aOWa1Mw7gn2O86b7dQYqQwFu26wByRlaTSOfRkOWf0ITSM6hMmAqkakvMa+o6YgQIar4Zre1jh50JEDFCQVEHINOtxGG/2h1nOxlmyFEO6OxwPw9T3/dP3P/jo40+ePn40lwknnIbRVF+8eHH97t08z5vdmYiAWdd145zHcdru+mmeERFQ3blHsBgjAOFCgUNQx9kQEWKcpmHJ+5w8uRCCrY/2qRFUQwgORXNqKlU1rIMKataJiQGranWq3mkSzCLW9/02BKIgIo8fPzbdaz6olRCwAa6Y0U8kIsYYCUmgelqO8eGlKuONKGhoan/3d3//N3/36+32scL23bvrlLaTZNFsEIBMpNYTQggesyCwk2OgOAEHIJ9GzIk0NM2J+UNV13PwHliitTDjKoRov1qdj3vXNP1DdM8AiRUwMjC6Ryrj7mbVWLUGpWZoUgoT9X1f5mm73X7yySc3Nzc3Nzc5Zw5RRDCGGCOadV0XAl1enPkkp4aU8QM1jiP3G6/bNPcLyWK3acx+sLQVLWMnHgY51RYjVsj9AsclqkVkrTES0QJC2aTei66BMHUxRvbBrSklDMwcRW2eS156gZj5xYsXNzc3x2nssX/+/Pnz588/+cGn235ze7svpcTIVHnt3Qe9FxDqfUDj2or5r/6pwGZdMWgvAB9egIhoeGrvqe9ozZXQGhICbRNP+X7RCs9u3vDpexmLzPM8M21CMueDEJWLc/rHzz//6psf/vTH3+/6ZHme83yxOzN0chEEBDWcpeSiBsGAik/5RAjESxlzgbTVpyYARDBEFslIoODjWIzUqQutOaNr+X/gBz9YtwenYOU2LFcu79tSGqqxyuKpuj/zwF7gkrxuEANEzDnf3d3d3Nx8/vnnb9+8e/36NbMTtGhK/QcffOCew7bru65DRBMlhpS6nPPZ2dnF5dk0ne33t3d3d8NwyDnflDGwf1HgvnOXaHO+7fqeQwoxtvDYXzFv0QobRyXId8d5HOZo59ucs1EMXei5V+2PE5VSJtHXz57t98dHjx6dnZ357icOISQA8NlvvuCbzWa73Xrqs8G2RYiZu67zNkhcpuCqnhDIh/HgLrcfN0cZhBByzg7Q42Vok9amvlMvaFNKsMT8bZ1b+aGVFlTVq2Eu+w880iV2kjz7RzlODSusEGsigDkwVeQCIiJQKWomeZZShOt4m2gmaoZaU4hN9mjBIJjOHErONo4yjjRvLCQCVEe3LxEBqDeLLz7zgn1bKIBdxd1HCuD9V1u3lkt9IKIn+1hX6YRQWC47yc9aHckyOROWSGR9ZNplYZc2AGBZDawLPaZ6msxQjUPsYkJfp6Iyilodp0e40Fc2jUxmlg0RO0ygYJORobICQuwDiZgpBnH+rs327Hg8RowXmw0zz/OsVmKM8zyGhZIsxuhcVQToZCGbDZWszerM07HvNmoqIGTA5qlKUG11GzBVh9cDIRiQiZk5XITIFKoli5tunudcSjUtMYSAFHgXlvzHfWe0GeC60+ADLUgIp2kkor7vnXzVUyBYo0GrRS9EMyxFZ3l3dv64DGITnMXLPLy7/ctXH6VHT75/fltu8pM0zJMJPtqcxf38RPD1oxt7cvHim7vw448++5tvhqvrH5190F9wmO8eU4hcZh2yCoSUrZuMyBKoAjnL04xSAC0wqqoQAEAxAEITUCQFkDIj1HjGj7cTq4paRymGICL5OIM3QHOCfAxmaOJDWRxQiwDEwdRMClWWfUFniE0VVBYIEmNkTqGqrSqahmBkBECMROnQD9P+7P1eO3033ORkYduXq7G/A3ylHzx5/PbqG+riMQn//OLbu6/xOUMyZLUspZScS0ddt9l2u36cp8HHIXIgMI+0c3HqS4iJu34LsC25Fusc6mZmDiglCF3fz5B5SaqvjU09hJU4DSeTokUZOfaas4gUI2ZkNgMUgIBB2dQkAKUumNIwTABws3/d930HPXPMKm6vAaDvOkQMSImpC0yIIEVElN2RIllI293f5JBEJLu+APJGRBFpsOpFZ50y6F4mFRHXYvOsOecCGxEBCMgM0IsVK8j8mJBzLsX5QrDLKrUUYEtheXE4EdEAiqrH0g65Cs5+YzBkcRYIrFkbYnQFV/I8VXtATsaIZgbLgL4HDoFZTfquNR0iBqeaSAERpWjgyMzTXIZx/vqbZ2cX5x9//LESX93tiShuz0u+EZEsZZynEGib+sPh4LnJac4qWQWMQjR7vDs/Ho950yF1mXDQ+Pq2nKlyf4Gqh8NRsg43d4Q+fAkdPDlvRowAmDBLr7QJfVCVY+5TAhQlmcmAASPnQAXzo3gBACI5IDIZopqKAacuXg1DoV7S7s2hvLy5HYoZb2U/mIhM8/54PBwO20DvP3300dPHuy5Mw0GxTo69ur5+e313dxhDvz0Wx/rqWCTGuNlspGiKUURKMVMCQ2JGCKbUpYVRXRUEkRAMTO1sc+55ehEg4rT4BEOelqnHYmDEpqpzKUUzESkzETg1kao6aY2qV7/VjFsaIIQwHQeN0ac57493vr8fvv9BICaar6/v7gZ7H3fHMeWZGAvBkPyolT7GDiNbukp8awAAZ4WevD7iE80Xl+HdVb66lv/Pv//rP/uLW4ufHFUnPeZu2Ou0gwvmAAYg2FFnZlBgiYoNUdSKmBEvc2iXOKdNYEJsLlEDEZ2Y1vF+FNTMf5PttTfgiYGTg4sesmtZ1dwIEAFNDdXALGJQq5BdH+LCaIEIMW9Sv93ETUrEQM6MXaQQhRAU07u7/c8//aP3P/zxf/nP/2l/N4cQONhmG1DGFMOu3yam7aZTFSKLqAyFtXA0rNO5WBGLGSFuOo5obKqSh5m6GN3jVFXxNUHrUy0nTtMEZqFLNUokYwYOlhLFyNH5ldRS7OZ5VsmEwGCkJWLou9htC4APPsAQKCVmJFLtgiAqqMbQh7S7Ps5DsZi2N/sMvH13fZjLjBQQ7bPPfveHP/rBz/7lT589/6rrg8isKrPOFGl3sb0rE2JpTptH2SLmWIHF0EOtbhi0fqSmqWjxlWgpRsEKZYpIJiAq7uqZGZHWSpdni9TyirMA+IQldkvgLp4ulQEzyFlKCw5tq1qIVKkM0zV4NI7IQ7dJF3/7y7dn58/+x//hv3t0ttnfXSNvdpYpBpmmLDwOZRwhhjO0UIS1gKgCqGBBE0QMhJC7+Th502gue0SMXVBVAz/f1oUoInmYso1EhAHH0edSOq1mNmPXJF568qV2J83x9swMYOM4egd4q+811BIiciAyLKWkGFVbjMeBoZSS5+JMNggJgaQQUyAMIlJGU0W0YKrDdHz9+vVXX3314sWLu7u7nHOfYghh06XHjx/XkmAI3vhHFchTScv7bfUbY8KLy7jdXR4Oh8PhIDfXAUOFaqfEzISMiH3sAQAFyep4cVcFm+QVK9RCFneFz99SN47brnufl0HzRXQcDjcv3r58+fJYynb7COMuW0ycjN2RJTMrOotZ7FKMse/7rnddWrG+m8226/ppmrx1cAlIYkr3ytFn51sXtr7vHz16dHG2jYzTcQghhJoxAVQxFRXxIpwHIbIMiCcKbgpFRtdqplRyzaN1HEr2Cd7KREigWkouBObdM+KthIEDJhGDMpkB0IkuvmgWK2aWUkIGsVJyrqpRwcCIacz743jYbDazhnmcYoxE3Zi1+UUq4L3xlG8wi2WceTvRZur7qWwOGbnLanc4D4EwYojag0VQom1xsEPNXORCRAyg5qT1CnUoChAYkelCiW9L3aueZdFKzk8nNlFTY4qES2YToBVWm4YxkwWUCoiAqGuW0VK01Q/8AUu51+cViAJA+6xK72NmreLcEjVsrKzYNR4RQCSAU81x/bkejKmagqM16oxRXNCG3tKTUjo733rmvs0n9VDQ4dQVm15WYTdFr12LCBJ5MSeEQEuctr6NdQjnKjoGVlWxhcqfa5pwGAZw6gLHQ4elb2HRy+0DT7H+6rUsEnpCf9ndWp1bHTB48GlEpAhEbKQAwBRLLi+evfyXf/gz6/VuuAPEgOF4PPbcMdIjfvrjD3+cu/GNPONPHl/95vVr5bMQfvjk8UwKKDMUBUVzog/gqC4iXgg3RYPFtvhtq4F5j64Rw3Z7LiLzlF1TtC1zQW3y2hI5y4IjrAZiImIbctp0dLVP3mVHHEJw2kxm8iaHtix1fJSjIgFSFwFkHOcs0jim7u7urq+vL6bAMSqJKq5bsSseTVXViAh57kMgcjoN7y4vDEhLedlfy2NiCOHJkye6kJK5cDoHiYtlSskxqNM0jePY5rHCKou/TswgIi4NnEgWcKFlN08E+1BBVwRlymW2IXChwBBCDB0ATKVEBmIUwLEYWCEDACoiVMtKIOZlWxCwnqwoiKgY+lhOERExtwHOJaRACCzGKgYiCoQcEUjawsVYhqKqhhiZkYmNjRCZSm0Q8863e3DitTu7PiNNZtrPrnPsfqGvnYsH+qR9pr/WP7d1fnDEbJnDYwAA0hoLgVDUnr96aYQK8L3vfW97fgYAm80mHw6O21HzalUhopDYh1uaGQA6EtjJA7DqVlvkBJFOtGlo4GMhPbBBxP1+v9tst9sNzgJzySoBQ7cJOc9ISl6HZguBKSARackUPS4CVa/jgCnIXDgkC92xiGvOXGwcpnI3IuIwTcfjEQAuHz967733zs/PnUO1mB4Oh3fv3l1dX3u7v4tuWzddXj7fyB+ZlmYnRBSrFgw8HrJFs/rczgpnrfA1cwu9EOdafYa6Qb5lRLSaWeOfb2JKWj+qqin/65IVIegJYTEMAzNfXFwA6C28EajoRMmTpwIJAJgMQUVzzog8zzLPhSl2m42Yvbu+ffV6/7/+r3/+y1999fbt2wKMDAKChCEkkROLyFq6quguZH0cIy/zlBHXde6T8MN34r210ljL//pXzej4BcXcpzD0ajGYgvc3kgIYeNFmQccgNhJKozoLNjCGUJvhVTXnzIaRl7YIKwoqkneb/ic/+YmqPn/+vFIuW0ZkDKBAxJxS5JAQBMlAzDmQiCiGoAqmdTwJmREaE7AZAkau86mbAVJVQGhFCVhAN+AZ6wr1pBgDM1dIHvoAJWRmhgoidfRH17GqSlFEjjGmFIOhSgGAGAJxFOCpktsxBp720/Pnz2/v7jbbXlWnPL59+/bm+u6HP/xh3/fHYSJKRLbdbm/mu1JO0tu2aa2R1r6Bv+nlyuYetFdjXFzLgDXTdb9s1fJc7RsfyIl9h6q+3c/6SvOK8WJkAcDPGmYEo6urq6++gk+//+H1H32aIouoAQGxSPEWqTGXacrU9czxeMxZlvlViARKRI4NMTO3mB7wPFgrV+y8EJnA0vrUVsZd95b1O3lWzEQnppn2XLqCetL92mn7zFoYAVo2RZeUdwUN4cJyNA6j8wDd3d09f/HtN9988+rVq7u7O/cMN5tH2+12t9ttt9vmIXtE+mAfaXFUeOl1rD5tl07TvGLsUu8H3M+ja1qskZ74X6kIIodNyvMo8+T6LYQACtM03d7u3727vrq+vb3dD8MQdxV66oW+1pFYKe5UmdnLg34BMzeEl646Nk9VtdULANQqwiildHZ25osgbLBCJegKguhjC5pvhqfePGxO1wItARHRVQ+OqnqXqK3okcxsMfX1S9fCgFhTM00Drx08WJBcnq1r0theTUs3KRrH0QzRTtKYVUoBRULwP1dz4n0VNHBkbasAtVPpCYhm8nBxnolxfXvtJj3kaVvQrin3uWHWf/vgzteqY61/2rcvZ83appdSwtKMa/UDoflVuqKxMUJywwcsIuLk1+zDslDV1BlHnCbeP0tNDVDEgwEGcLYSgFX/QC7Tfm+bzcZlYj0pkoiIgtesc86SSyklxb5C/kTMLMZY5tLkCVaQlUYfDACAoAt8tyzjj+rCeRuPat/3zBy75FqsLBPoaIX6aBKm93vi72k0qnN15nkGqBDqJrKONLynzRGLSGDmmAhwkzZ73b/69vX2H79870ePjSxuUmSaj4OxTfPwePPppxd/YHF//dsvPvjeJQ/5l3/9zXAsBvz0PDw9Q05R8wiSUamnUGgGM1AToXqCkBnZgNhUrAAIgCJpWCi8SylIwIFEsE1N8JwNwMlNXzvxyxPhgiU+IW/bk/qmi2HzLwFARWbVYvNut2uXLZkIKaUYlb7vFA7jPBUFMQYLRDyN5d3bmw/Gi67vMs7GdLZL211anRM0M1PLORv6QxMHNpMipWQxH/S7QK6ljTGFRpaFzO5kdJvN5ng8Ho9HIFMrc1YpFbQAoN7UB+jPjk1YEFGKVJY2JAQfkFCZ6NGUAZEUkYix7/oupmm2Kc/jYSx6RA4cAlEwwoAUY0xgk9cktAQkZi6ojdV6LlrZ2gwmpSILuxqcTODdON8T1+WlVv3Isrz8mvE4+VzHGGPoEjMjAxmJaa4oBQQ4KR37ff4uLJYAVla8GcsH2rCZ8LWma79dS91a1681HdxPBv3eVyklhPT69WsfP/D9H3ziGsBdVVk64qBNznSlrJWjLISwOz+/3e+bQi7qc7TMA8Jq3bE1D5OLvWYAIKZIKYiozGXCAoaAxmDkU0mIAwI666oUIMOAAGyEiAkJAfDuMIZtj9xNx+NxmtUMgHKepnl2RZpz3mw2nskGQk+6H8bh+vr63bt3h+MRnd8vl2Zgml4tMnuCEwAak96ytmUNamr2Zt2rgKsYxtn/mmyYIpLFGF3AFrNdid1Vleshqlw+D4yfFADI/l2u4YcypL57tHmcUlCbQ0hqQDHkEVQBTIlrjVpMZykGxEGK4OE4G0YK/eeff/sf/sN/+/P/+su7O8kaKYVScoFCEfkhMA8eCJXvafMI3eTDfUd8Lbe/99O+K/y/9/jUJb3vPay169o5aMdB0RQdJGtO/sgcQqg1ihgjoWL1aRbEF1Eu5ezs7Cc//sPj4e7582fMmCjOU3Eu6VwkixoHAQyAKODnAtXIANGIIIRATETICAEDo7EZE2QAIv+fm8plc7FCv0JI1cIiuJVgrvxt5MxlRMTYJksx+DTF2PUxdSGFUIoAFUJOIXYxMYLNNTsIqGKiCkjEyCJyczd+8dWX+/1+d7YlIhF5/frtF19/9W/+zb/54MPvffnl14goYDEEWygZ2t7Z/djvwb78U1v5e/f9pNDo5Je3T2g+dLOqa2n5rkyub2YtVEuOB51MGwjBW+nUbm5umOzd9c1f/tXfXJxv/s///b++OOvnUiRyFsxmRW0Y5zlL1zMgzCUXJR/WyoACwkjG6BwK4zju9/uLiwtX+41q9fcuhS7tFZ6eiDE2XEC7oD1Ig4E0teMqyxGba/2Pi7vfghPjyoLbbkZWHOPN6h2Px5ubm+fPn3/zzTdv3r6a5xkRd7vd2dnZ48ePz8/P/Lw3Y8ErFjpaoSLbMWyRYYyx320Ph8MwDAgUQ/LbVtXFDz8lSvyvQgizZACLMcVAhSkGDiG8evV6PA53d3dXVzdX17fH42iGMcZNOL1o8eB1Ic1m5r7vt9vtdrv1NqjmJzdt/yBgaI9Q5QoSLuPWuq5rYd4DU778OWod7tPyv9aQ8+vj04RcTzh5WJIpvlPLrFEAgHssKVSp7219274CuIpy/a58l2uf/IrvVFevtXyqKlEIKTKjmY7juN9nRrt8dO486T5hDUEACYzWPsmDWBRWSn6V765Y3PU6mBnovRBu9Tn3FM5adeB3rNUDLdTKNk0qdOnSNLPKxXg4Hpe/Vk/O+jrwgtkVEVBz6CYAFD6N9CWi1ih/XxVy+0qSLnM14apqVjeemUXzdJyOx+M8z7vdRlVLmf1DnL9edW5tCb61XrDyHfUUCxCuvve0dm29kAkBVNuKr6wvk4e5EfHi4gKgEoc0ilQiIg7fEeh/ztFcy7q3ySFijFEqquc79ROmYRp77PrQgSJDCJbGcf72s2cXH11evvd4xsEki8gwHwj44njxuFxONv78Jz9+9tXXmx9/7+3rw+cv3hxm+cOPnv6ku3y660BF5iko9SEJZPB+V2MzJAqETBhUtSpJhoRWzMTEzIbD0Rc5hoAJTMUMmCjXB1czh0tXMDQxLaLmboFr8JN3uG6UAFDGWh5MHFxI15y8S7LIcwruQ0MIlJ2/kNKcSy6GEM307vrueDv0T3qCbETb7eb8on9w8KySWJR5nkOKyNWiE9WtaD0JsrwIDRHneV5UKnp20JVgUfE+w2lJ4bSEX1Og6+MA6lhZ/2cdDWsApUBEsMpxqAEppbDpO4iYj4d8LPvjMJcjEBqyD3iPzgEGoEVAlBgYqdHcy3KypJIzcalzHrwT+BQ+nWTvdPDr4mMtGsxa65aAxi782bRDSCk5YW32zEtlBjopKZGHVr85MWudLksb7Upb2VqnPzAtcF/TPdCwsJCJPXhfVZci072suZmSc4pO04sXL7wF4sOPPvjwww+BuEgeplGLxcQAEYBKKcgBqAbAItp13aNHj+7u7o7zSI4cA8cPE+HJHSFmEKxGxcWJdqJ0nOaEHFNHIUAuU8l9WOY0E7BPzjZFMWIObIgmpqaEjGYkGDBRhjjM+W6YDuM0FZyBfJqwi0HXdZePH52fnyPi8Xg0s9vD/vr6er/f56UlO+fc5kS11WsGElat1ADgnX5ezWsZ0PXGtSOgq6Ddp7c1/amqaNSiTVjB762iTtZUNARAbsMaTtKMiE/5uFFLSDGluN2dX4yPYlAAMCBxngADH/HSZCAbIgcISTDe3Ay/+s3nf/kXf/uf/vN/LSUCdV3XK9BcJkCfJ3GvPLg2rr4srq78kRueAhsLyH13f62XmkDqd9Jqa0H9PWL/nbgR76dCHh6ZJQepAAxg4FOXyX2mPkYkAxUkIzO1whhiCvkwvff+k/Pz81/84u+naUopdJsOAJAZAI/TTESp3xrAQvoDNXRDMAtmSATEkZmZAKEQGCEjYIepBvMrIID/ec4Z6OTroHurUntUGNDAEfDEyGpKRIxGRCFw18U+dTFEUCNA5zNDDxeJMMacp1KKzKVYmDCE0CuEwzi/fv3mzZt3MbIBIoWzs4up5C+//PrnP//vfvzjHz979mIqwszePN/3PVHG1astuN7PGjeBWXl+9c21x/lAMOw+H2BbiuaBrHMuD76rfeN699vatgNeTJGQgJzfsH4+2HGaH5/vispnX3zdpYCI/+KPf/LpD78/EQlygXicy5QVQyAOhymvbZyoqhWn1iDunCLI2/jjwkEI940OrtxZXTovVs5eHVX/4DK8Ny2mBsYPrmzP7sdznmt7CwAAVZFr1meoA4SKJ+5FZB7GV69effnlly9fvhyGIcRweXm52WzOzs68Nth1XVNTS93iHl9uk94He+FijxxUDOzUYrrEVDWgaorOP3waj6oagqtWLgD7/f7u9vr169c3Nzc3N3fjOKoAInfdxtku1lCpFg2CE+nF6LXNFr81K6xL0sEfZA1fvB8Q3uPbr06Fom+0rurY/mrg8JWyCut9we+gSOq6eaFJHXRSI5Em/L6JIkIUXb21Q+Z/7iDetTJsPzt+eL0CD05oSzqAs0qDgxVQROZZxxFj0JJ3GDFQQACQNgzXKtiLquUCMERuK70MRiK/KWY2y2t5btLCK3rL+9bhXrNMk+0Hy74WSFuaidY6oT21raJlMwvDMNS3QbyWQODmA5gRvXIiq53jkxNgK+PUtBwiIpYmjiSoqvNUvL87xujwKlUNIWw2G9XiA7adzINTVFWG00nznQshWIEW9/vFpZQQYhNlWo2CAAAncFuaPg3cFmLFuBpCIKxNsUsa2+4H8cwsRVpyBX4fD/JaMwIAL+mKGCMA+rl6wKF0b8dCHPcjMvW8MQPNGLnrgxyvhrdfv9uefcIxCGoX+36bfvLjP/yfNv/zR4+fPHs9fO8nP3v2xdfXw92/+B9+9l//099+9e71HIolm987exRSAAVCpILsgRqqCUBAcOp09LbHSBERyIy0ZPWwObTH99U2sxC4FWb9rmtrZ5054TMMHSHm7P+g6tQhVSIIjdAIiUNMKfUxhRDITxAAoqlIa2ZTVV0ASJwAyErREDco5XA8jLMasCnfXO1vrw9Pck89AxqSxoQeF4GdxjLZwpEF5PNCgJmDY43QdJVzXRsbvxMzKyW7e01Em80m9Z3DRMucZSFNXmzSw6wEVAwDIhqhAaCaEigaESygaxFFE8loQEQYYog9pcmmOWcrWQEVmHPJANn78sjA+TmwEozXOWNm/theGVAfWmoLUHJRK/eAne0gMyMujJpaqfsAFBIBp9hQKBj8FBQRUXDS/cbk9BBH3b7iwX/Xr6a1H7gvzcF6YFlXiYZ7FZK1RlpvJZ5ynM27MqeBHsd5nqeU0vX19d/93d9dX//g4vzSzFLq+z77TIJhGCq9q6LpAkwViap93+92O+NaIFWTosIFI0ZiPPFgiXu5S4BqOM5lNuhj2HWpTx3HiBKsFCBDQiIL3hZlwAApMcVAhGomBmYkRpNZd/bozd3wbr9/c3O8uTsOs2azYZqxgDuvFxcXT548SX03ztPhcLg97A+Hw93d3ZxzO9eqyhyb91AXnIw8Bl5NZ24VvK6LzRNam4D1bjbvAarxu+fvInrRiohrmsAVrPsKDz7QzNx4VY7reqsoICqgqgGDH+1AtNltIzqlExigAIJaMS2qztbDzAUgT7Ni5LT74puXv/nNb/7xsy+LgBGZT5RUNUPmyGyleHblntC6+LnDsaiI0hyaB+Z5HSesH+3+A9775CbesIKWtIvxfjzZfrs+O+tTYJUKq4qgwyzjMk0eABDNUH0qLBozsqkQ4Z/80R+Z6a9+9SteQE1ASJRAy1QmnMo2C4cOjIiRkNkIAquKh0yKBHU4BIABmzAKYeC0aY8jIro023klGRacVIu3A0ErxfjDel4sZ0mhKvQUYpe8IwtJhYkDBjNDlTJnCkxoBEhgipRFZlHi3ohLmV69fjvNc4i7w2HgFJ8+fZLzdHV9+823z3/84z/6b3/5N4c373a78/3dMfWdTb8/GoQFirZ26ayWXB4iiR5IwvpNM/suL3r7KLgf8tkqfdMu+723t/7z6uSAomTzehQRAYXE45QDG5l9/c3z//T//bO7u8PZ7pKfBoF+zPu7Y86KqdsZ0vF4mLNJJfLzoboqaGRo5H1i5I33zj/n7lC7T1xqFESEhLJwesNScmeODdSHS47JFhiCrlBztgAUW7WAloTjOgG6bM09fFPXdSK23+/9HRG5vb39+osvX79+/fr161LKdrt99OjR5aPzzWbT8CMtOOKKom+5mHqrrjYfQC5djxFx6zrxsCRncQZEovX4KL/ezCznabfb9X2SPN/ubw+3N1dXb/d3NzHG/X4/zzMYhUAhpH6z8WDP13ytVfyOU0qOFHXlvygrC4EBfBwfrfTVCfUKAIiwUDI3wt7Qyol4mqJ+Ejw3tXQfk+kKp5kJd/lUtJFL+W46UHHJgyxpC1VvfPDFd6RriAFOsPj2UCf20XV6xW+yBYSOPXThCavRI+7X+Ylm0xiNMFBEAyymaqxI++Ow6yV0XtJwtWwI98DMa93echC2pIRcfkQIzDw1492kPtrNmTtMTc0W/w3MgOjeqW9rvg4x4DtmhSis/wQqGacBnHrx/CaDnAC5AuDDiD2OUkMfG20EHkD4pL+EiF4XUj2pNl5i33YAVNXIRMxBWQAAENvhZ2YOGEJQLV3XpRSIIOd+LoDkY9m6yOyGFgCk1JSPW2Iz8xxD3ULTyvq9CKCjjxWMvQ5jpggBccpTCCH13WkG6NL7YQgOC66rs7D5rRVQC+W/mxGsq89kKoBIgUU0SwEAQ/DmUVtVw+qfExpCEZulkNRhjCn0x2H85nfPN+fb93/wqNumOWOf4icf/+D7sM37/fvbp1d37372J//92eXXX339/NOff/Kbv//s5f5q/Md5f/vhjz949HTTh6SSRAQr+TMQmFfMROvEZ0M0Qh/zQsFAkfo+5YyllJyn9oyqSuSI33tSiIhSC2VVdSOjQy4da+pCX8fNMxNRdBRmjITovfMIjUa4TqFUVUBFjw9NCuQiYsjHedofZzVCCARyPEx37/Z5erzdbjIUAEnB9iIqgqpkRMiI4hi/cRzFVMGYOVDN1aiAwslsNG0ORk2GtXKxaONWZebdbkc7NDPHxiDi8Xj0+Qhr90VVU6ymjpkAIDL4DVT6OzCzoqIiUmQuEomTa/c4CxeYywxIQJy2m5UoEmKFqIMUwDqj4sQrQeRdXIZetq1iBw5VPQHEDXywA4Ax6+JSAGtzOLLmkLq03fi0LjPLOc9zdmR/jd6xpgbN1uWde7qpGYy14/LAv9FV7nydU7RVGq/d2IPP59WYkHYNIpr+nkO6iPQCOxymnCcASCm9//jxe0+e7Ha7GOM0DT59mAMXmfp+OwyDK8xxmojCZrc1tromefK9JCEA9tSKc+ND1R6qCqAGBoo4i8o4joQd04aJAzIAECAjE3YBAxEjAimj12oZY4K4ESMpcjvZ65v9q6vb/Tgf55wL6Ar7kFLanZ/FLh2Px/1+vx+O19fXqqp2omtzUfGpFqclxVqIaCAWM8m5jYkwj5Obv742om0rmxtnNa17Yri8F0la7SOiBdnFzGU+DWKweyOSyD2J9UabYuiCgs3zTH3X9dvIRoExJsMIWBQKCORSCIpgAMRiZNztx/LFNy+fPfvqs8++vLmxs7M+z1S8FYKQqBJplaKMp16O9dM1C9qQZu4kNWXSHqFJ+z0zcT9B2z68/dX6+n/q5/UX6apvB+7FCWigSIiAhB4YETD1287zO1JyyTMDhEiI1vfd4XA4P9v95I9+tL+9/uqrL/xhiwJxRCQxU6JJbJglbUjEGBERkBjRGADZVDXEpN60SIZGqMaAgSkbtKprtQ5mZsZEzbKrKqk3khPhyTElcO2KPiuNiBiJCGIM3jBFiCRIFKByA4pJERDvZAkhxNDNQ56nGeaizIdpfnt9BRzGPJerm6K6OztLXZqm/M03zz790Y+fPH767u1V6jvbHwFoHMe13V9vov6+LhLX0A9cN1zKHScddT8+XKvE9s4/Ixu4CkHb0VvXau4JJACCjx1CtKU6ZAZgaoYGInC9H/K3z0PsHj9573/+H/8VEt8d8+1hFIhEnOcyTtNhyJ4xjjHSSsU2OLdP8vAhdSLyXYfVlvbR9iBtAU/+AHN73x9Nl3Ji+yes8iCy4g6tFaTaPQimWnmOHbFjwDGEFIHweDjuj4f9fv/mzZuvP/vCzLbbrU+Zd2R110fPofhdLZnrGgeuN2UtEk3Hti3w+3TEqYg1Fjcz8+A5pR4REUvLvPd92m57NXn79u2b1y/H8Wglm+Hd3R0AdV2nAqqATN7b0pZCVyVlP2t93/d935p1/UF04SeH+7DDlvBqT1eP4cIs3Tw6RPTB43CyEQ8zYvfFXhHRvanlbtUlJMao86wLBVHzHFSVGFxemmDQKvm1PjhrGXvwT1V1lke/Jc/mu2S2gqGjydo1MVbqriKCoIAwFe0mvL7ZgzFTCGhFNIIQEYASUyuf0gIVbqHsg6O61tjrI0BEtLT5PHg6bFXZ++nFtlYPHlmkhv1wX8OoPjRSVbbrbZEhMqCC+khf2Wx2RMSw0JUuZrvg7iQci1PqEfbqvmsVTlUPelf9oAoErcJHRIjgx3aaJk8DxBinPJ9udFGgYOSjnIsKzDMsxDOyMB9Qnbpj6lHLSjQQERCMkP0kQ991XYNQF3NMeSFiXYF+VUHUFBUdV4roLOc+UYQo1LYxAKiDdOvNysIfU0qRZfTCyorc09FmVlQpBiiYi3YUYuxQcxbYxLPhMLz75urR44sUOR+z5vLrf/iH7eHbDz789P2P/wDy9l/9yf/06U/e/T//3f+DOv1h/uD5765fPjvIeF2O4XuP6elleXQGKJ4uQhNAtACmJkUKhQ7MANBADIwAFQORufiuUMFgZjlPHlY1UYbFZQ9UOYvB1lhzppp4qHDzxhvBiyq1IqXMtij0s7Ozelq0+Bw//wsnrcsqYy7vrg/7YeZ0RmqROhgOV2/uDlfD5qJDImLY7tIBswsMETEwGpipqeZSKFSyaVAvfxOo2TKJBVpQ63S5C08MLdww0zSFEObhSN4OHhMzbzY9EfZ9F2NwEwQAC4usgVjkmpthxhQC4ikiJTNSESJV8VKpq6ckstDtMCnPaiBmJKqQ1RhIPVenpKZmhICoXqEFUENTFFQv0S45CGmJG3QQxArY6S3sCoAGS+YCl+QOEVAIyCxmJWdXbU7GC6RaG93WHtLvSY/BgpnBhf2laQ//lrVvtJaxBzrL7veTrN8PIbb9Wn9UmauLoHqP28BMY4yIaZoGEQkhHo/j3//93//gex8Pn4xd17333lPkqEAi5uah6wJMSBCYoDWWUEA35zSil+acpyqltN1sHSvuCWMiQgPJPtQbCU2kDDnPosJ8vu0BxUCBgZkChy4gI1lQ4mghKAaIG4mbeSz7sby+efvi7c3V3SHLggRHCpANLPXddrsNIeyH4/F4PBwOzihQRwFBzY75KbNy8lYNxKxu3JL9rWveDIYDxFqqvhm5xk7eXI2qIgibV2EGtHBuVRdEAJGYouOW5dQK0mxn3T73Y5iZCNb3U+lzzVhin6KBqBFSVEBARjZzIj7IhiBaFFPs4u1++sU//O7q+rVASL1OsyAEAcBTsk9B1MfZNZmh1ctWOPMmrrakz5p8frfE14TzQUqxuW6weGDf9XXa0Wji/d0D8uDciZlDxhVMRUktUw7OierkLsZS6swkRDMtIdCHH76fUvj1rz+f59k7/AEAKOTiPLJhzuXN7dGo2/XoDhNqYVIGIIYonFJUA0VgZjQ1tYDAgcpkuEY2Imp9FomBAqOD3hcEnYKpj1SoHihzWA54IHRNGxzx4XgiYAI0M67TYNFEiwkzi6kZF7WiwBCOs7x6d31ze6cGeco4FzEFwssnlymlZy9e3t3uP/nBD7/+9tnxMN7d3R2P493dXc5VSltRy1a+ftu7JjAPdrDt1HqPqpOzLMiD7YYlhb92kddysna+T2f592lURBRTpgDutYAhGAKZSlGNTKY2z6oBwgSv397+8tef/Z/++58j6s1+2g+l22zmqQzTpEjTNIVg5Kx7fs9qhjWR5OHEOI4eULlibDffnEP/53p2dkPrtXjDVlEu4gmO3h6NVkmituwPlmJ9MGExRq7l+r4/Ho+3t7dv3769urryG768vLy8vPR4iZlT7ABPym15Cv+oUxD14Ah/93giYgiphbruxDpZnariMnKEFmRcCAFRxnHc3968e/fmeDwCqM8/OR6PtVgEBsv4B4on/km630fXOGZaEOIrnHPF1q0zFE0jUUvQr5AR383b2qpau8adIlZE3nodvP6PSwsYL7Miq+uFCKiwAMTMBKBWy9otucj76jlH5voUtNte9qWeLZe4cZznuTCz9yrnLL7s7t/mXMZxHobJC1chMEWGtuOglmUckMmQqUtp0wUgRAUFVSxgBKsk6frg4ypmw+W1Flr/ua2erTTDfW1PtErgtgsaSdVaOcBSKW3B8/J1JFJWdnmpOiqGJ08fEVEg4uA57KIiqo7HQACo4yA9mQc4lG2TNrcTlX0xbdYHoMHB3d8yM8Q6MlhEmGmeZ09J+sVO8llK8eKmX1kFzogZvOm2lDJMIxogVmY/hwaywxfNYGlhYuaiQg50acnFEJ4+fYqIAlZUSz45MYbg6NC2nY4GEABDJGYgarT9hmjt5C//A0RAlFIaqFW1widW6bp7UT4iliwpRDDLRbaRI8cpcylDnkRZX371+vLpRaSnMXSbLr349vXm0+6nP/v+y1fz5cUHv/rl19un9G//7b/93/703/3hjz+QA4zv7t7elDJdv72WD9/XD99PH3TvMxMToSkHQzIAVcq6kgZTNGOEgAigJRBAIKqUXOwpE1FBU/IhwMtqq2oIaXFDEEEdGomg7BGKIYISGoKZmqgiAgggAiDEEACgthQGH8QpdmpyIUIUmw1VVY/TfHM3zNli6nHOkQAmuH17d/t2f/50wz0C2abriASIgIyQGBnU9+rERIeIsAxvISalUyZmbdRdgTBzTFXI5zzngqnbuA3TIl7q3CyvvFA2V9MrIiIBnBQLmNF70QMhM5c8I6JiNfbMTKH6ymhGoIGAAyZlAylgZiYuOdWQA4IZEPIJRelH2tQUdZnASUSoAHFRHKDZzADueSpIJ8pjW/WPMeNms+m6jpgdOgsAyBQpiQgaNz6bk+bSe/porRPWCmj9XWsr235LK0d8rUzX6bR2lOA+dKql2cxsKUdVLuZmcYnCOI7MnFIvzlhVCiL+9h9/NwzDbrdj5svLS+Ywz/PVze1ms9n0W0IGBiKaS0amTZcUBNA4EKB5lOiuKSM50Kg1PLtayBNXeEykDkylmEzFlFJw1UIExmiEQMzEfBaQQgEqygXoOJY3N4cX1/urw/HqMB2mImpGGCEi2jzPhCnFLnabojDdHQ+Hw5RnVSWO7sQY1cUV0ZwzYUBE5/yCJRrUOmYKHGTV5CGE4AphHf5BzUSeMFq0NOuLCMfQSPwAwP0JVd1selkSKEQEQLnknEtcoDvNnAMAgK7OJqr6/CU1s3EcQ0oAwDGYGYEgBBQVQ0L2MZsC1dLmUox6MRrmLGC3d7M7KCl2uaCoERmaFSuoxoFT6loXIa68KFyRWzR/1NPPKaW2CL/XKVz/8+QJLdCaeo3VOUb1pIDTXy2px/tI6Qef32zN8l8DcJiESS5g4oOYh2FKgbsUQwiEPYAx/v9p+9NeWZLsQBA7i7l7RNztrblV1pJVLFaRLJJFNtmc5tLshY2ZBtSSPoyAUaOhXyVAn/RNECBB0JeRgB6xNehptkg2m1vtCysr93zbfe++u0SEu5udc/ThmFlYxH2ZbAwwjsTLuBG+mB87+2ouYZfL5YMHD66vrz/88H3PCwBCdxuNaYqTmOE8yThvgPovrO56XaMCBUAmI2PogZkRPGBIBGAaAkLoGKnXxiRoDQAc2DUK1KzTxxTd5HMk6WpQwtRMvSKBGiTMs9hT0pKUwUSm4sVIm+042zRpAF5QvxivxqfnV+vtJIZTSl3XrbfT+pNH45xO7548v3j5/OLl66+9uRhWn3766bPnL3zOwSyhKG0EeWYGAEBu2lnS7TAXSxCgF+vl3P6SIWuWyyygaHloZka7jaubi8VHia/yl9U8yfYnbcIyBywXAAyBiFCyNqKqojZ03bRdR7RFN4RAN5sxXK8ngVk6Vb3Zztuo3Yo323mzncOwaNdWMVNVDamqi7V9Zc2fr4Kg0njU3Zhya442nFJ5O5Zu/vWVqaQvehfoIra4Pkj26pz34rEpJTNYrVbjOF5dXYUQTk5Ojl8/QcSu4zD0AGBg1IVuMcQYfcpd8pKD2jVg/3WqeHKe4DyzJcyceyCSZz8MpQIl7QbEu7vNT3h5+ezm5ubm8kpEjo9XInJz+fLq6urunTsish1ns7RYDEenJ8NiCR4LYaYQuOtCGaCtqsNiQUTgs0nKBwWgLnDfIRGa+qRCqsP3CB0hc0EQITB1XUhljAFUgxCk8sDWekTEYRi8VaSU2sgQgg++zwpSCDVnwPe9cDlBZPBZU1iSRbnWmOQEn5jm9nF+MHNZ5M528se5teKmck0/xNIRtIYHnbE7DjMDBOI85RsNSczirElIjDoDM59zZwDi0zWxOEmpOMGrhgONnrkDoL9wycpBRCk6aksFqpoTd285mKqV2GI4lFR2LN5JVWdRuzFFUIws9wWH06PlPM+qgkbEgIABkXnw4jBVBcoqu5mllBbkGdspbrfmoRJABg060S6RkjTOKLLs+2m12m7XvgciklJ+ja7rkAwR+96vynV3c9pl7sUYAUgge2M0R1qKF0Rz9M+5D6iJiJSxHmLqLWcQEQMf9b13N0+W5wJrE3gFQmmqWrlU2aaUsGg2WSYV5ablSloqbcxdmqWDoofXHDU95cD0cHidGYhIRyGEMKWYkiDi0C0GnQlpc7P+2fd+fv/OWX/aP/n46e//43/0G7/51gcf/TzO9wc6+8EPfvJH//XvPrt+3iFBSu989YvPPvzZ1RbCycmzm/Prp+srW43h5u233+qHfpyuAnqbtRlZ57Q5OjoK1G+3cxJg7gjCNMYQsIbvi6ctU0ddeUyTq4z90IGouzXNY3EeWAUMXSg2necM2zAMi8WKHBcRmbHjUH17MUZvPYfoAjQHK5BATbuhny/XN5tpOybV6fTkTOctDWSTPfn4+dvvvEEDBiSV6IvsKAQOIEAUmCFKCqEbx2lOcblcDl0PYCklzFlsjIgeReFSJOA1D44DROS12t4ew9lE0kzDVPxzQ9ctl0vf1hjjer1OKc1xZkYAm2cxs9VqgdyJJmY2ic7sDGyc524cu44X3Ok8BaKjob+5uUlxAqSUhL2ZBlIRvh7NQLD9WEEZXo7Alc06KwKvC0TJisjOk+QaGDScJwFA34fVarVY9HWWJhF7eBl8mEdulbNLok4pEXfV1wjFPGs9uG3uEBHVlJt6SWVeB3dwf601Drb6a+WzlSHqvjcdG0PXmxZ68YDtMs/ZdbW+Wzw7f3F9sxmneO/evZOTky509+8/PD8/n+ecGTgEcL2fiKKmcRwf3Lu3XC7Pz8/dETuO42KZa6VOTk7Oz889daofQsDh/Py8G0KM8e69s0effLxcrkjjmHR5tAymcxw7xGQ0JqOB4qzUGS9WaPjixc37j89frKOG7uJqjArYDaiKAGboPr/QdUA4zpOIbDabaZqYeRiGcRzNY7kC1fueUiICtaRpZ044tteiRyjpNEWmRj+hZhn5EcqMosoMIUc2ujyf0owol2fEGH3oBXudervR5PrTLjG18uSUZsQ8er5wYDZ0Lh3GcdYe+o5m0QV1YViO61mmuR/CsFhNUUy073skSqKaZLOdcoGy4eTBo75HZEAMFMxExDNHfMw9+3NdKvlQ3B3RNe6JivzViq6ga6U47iYXk7ML1xGdw5ycnLiHtJKG2Z4XvGJ1q2dXiqh/IiJYHf1MjKSWFCBK+uCjj157cE9NVouhD4EJzDSldHZ0tNlsXn/9dWb+u3ff67oOmRBZEeKckAMHfP78+cXFZd/3Sv388O44z6erBSOs11erBd85OxGRpCJGhjnoStyhipsHTqX+n5WmT06P3HUBaTJx9Oi6rmPUlPJsSkcqg9DlPMCMn4AByXNV0jQxc2DOpUdl4o6Z9cOwXsdRrD872SR796NHP/vwUzEQUw69AaomQLheb5DD9s704UeffPvb3z69c/eHP/7JNE0xRpVd+/u6sxU/rbHTqm2Q0txqgZVGuMl8btHDd69FLWi8XWHX7He36bgffHCqca3jAE9U1bu5Oscr7h4Db8SPPCz6eR6JiPo+iR2f3Dk6u/u97/7gahPP7tydRQ1pWC1TBGehzMyAZrZcLDrCm5ubbrly6lgsFtvt9ubmxnt0OyOqmjeX0VxJU6VxzH075q4biMijNFhcq1ByGqkk9VTvjJn5I1yhd2Ht96eyLy5rqm5W/BFqZkdHR6+//vpqtdpsNmhUfFsYAlXnPiKmpM79ag22I9WB/t1+sBLzqVJPd9YLFYWWV6uVt1G04lc6OTlZLpde5JbSvFot7t49i/P49OlTVTg9vUPcicIwDKvVMfe5Drzve29ftlwuPYLiUPL8zNqjoeKtqnLYq36SZjic44l7vaE4BM1SdTlVj1hlhgdmD3PnTe9g99bZreYGXg4FGbo8naZpKP7AsgsSQghd9gKEEBAtzs5jVUREoyftEBFCLWmZmVkVQsjVhlAS1L2Gsy4AS/VE1mxj9FonT4uIMfarXAQrIooYAqhhTDaTxdkkcTheLHuQaZvi1BFrkzruMPQb+r/VtKmf+35hxZ1aSnlcjiTnBpb7ghEReU9me5XhjSWbumJ7fbW6HlfhqpzKGr7kKGKW46Dz0CEAqwqZdQwALBpDF1BVERQRwFQTGjCC6WboFgBAkGKM40ZcSK9vppyg7F6uXkSESHY2KEDXMzOnFOdZF4uFNwTH7PdyVujGQA09s/eVMqy9/JuGRUxMWK18y07BnHTu8086V1w4u3aJaJ7G5qHmXnFIu2TIyj3raRVvKuVUE7/6xQu4yYsG6x0q4868X/MmYXXjGQGQjxYTMzUKSAbYh8HDntcvbz746Ye/8CvvfOXNd+6u7m0D/dl3//Lh3W987Sv3f/XXvrFaLvE5/eHv/JFq+rM/+evf/4N/8Jd/8dO/++G7X3vnK8+fz+cfr1/GcSvdm2/cO1oNSDLNNyDzoud+6LfzJDJCQqYFYAdKXcdJrpmr3Eqq4AplSilG9SEiNUM6hABZFAEgmBLk4BUcLxfqpYCYlf5AiJZKDRIHYiIgUECfFoqqKiZERIGJyAclI0lUm6NMUaPolCTJVoWXp8uTdLzdXq2fr3ULR/dPpnnLOdMsd55AKLQBNMeoYGScuhSIDRA0gVoIVHmNNa2DWoOk9R2oJgBSTabe7BPNLMl8dHTkfe6JPLLTh0Cr1WJ7fTXP8zRNblqPIwDa0PWBGBg1iSabY1K1KGKGAYwJKAqqDkwbBDUZmAQUDAl1F9xDd23tqQ55zUiWa2Bv/1ZSnVudIxcw528CYwhhMXRDH4AJlFQFqtfDbf8mM2TXexaA9udKWZP81qqqeytqjvbXAwW3PefgPnsUvd/Go9G0MpEaoiEgU40+lfMJANwZfH19fXNz8+LFy7Ozs9PT0+VyGbqBkZLM26yq62KxWPZZgzfEYbk8Ozvzp3sz8aS2QOz7/t69e6GjohNgN/SI5u03H77+2jRue1wNAbq+C6g4d6YqZEyYQj9bHGhYb6fHz68+eXZxOcqkNM02xqjUCZoR9hy6jgFVkxAP4zherW+y1OmCAYxxVtzFUg6gWrG9UR/32hU2/yoFb4+O3sLWg1nIhExI5FRX03RAsyZUBB6aoUj2s9TdJHKPdsfMc8pDhJEYiZCwppu7bgdkCgaUU4sCkSH46qNIvtQEKIRuAblw3pCJAKGMThZVzb21/DVQDU1zg4fceImQOU8XzL3K3CNmhkSpEb1UNR1E2+8aWnHv9jf+2XVNLOXWbuu6/6WlkboF8db8Oj+IqGYZ2f4W5102M9EkEQ2UcNEPNzc3muLRanlycnR6vBqGoeuHm5vrB689vHv/wccff3pxcXHn3v3l0K+342KxiHFab8ZpitfbeT3GMarR1YePu3tnx12YgWy5POo7y3aCa+AUchIBKrrsQzazRKiKKZmPBjEVJgJ0Ly9wbl1BzN72q+iRSJ48whg4BGJwDcItBM/acH0SEA1NTUAlSYopmtkkoDgsj8+uRvn+u3/3tz949/Hzi251BBGzpUqoqnNMN+vNRx8/+vI7L0Ts+Ph4nuI8Ra+kRQy2l9ucmU8qc3qwSS1GRNWdu6oytANWVm9lZsVefrWdf3CHA4yqv1bhdfvY10lqwhMFzmVgSWwmWa2WX3j7y9/6tV9/+XIzTikJjDGZoiQz4hrX9ffKVgSxGbb5n07mbh/WdqOtQti67Sr3zi5dd6A3v/qSW5i3yq41QUUoPuyu66TU1jL75LOdS5GISvdON4H709PTeYzuGi6n5aYA3nU8hOAKTF0DNqZRtY64sULrK/sRmvZIVtquqOpqtfKHmvWqK0TcbDbb7fb6+pKITk9PT06Pxs12vV6nabYytxARqcu9MDh0yLRa5gGDdJhte6iRVkBBw5Hq+Za9SFzNeCtZ5RWRsEmagP3sxGqQeHiwRUhfA5eMa9h1EvKGgqlB+Bx+9EKtFh8K7K06K51v56nvGZFqawP/c6cM0L7v0oedVIOwrqqgFoJX2CKSUjJjNbEwRdiOshy4J/YVhUCSvBTCMzty0YWKAiARkxfIeMJXUgEV3dkLDWHmJlseWW1xCffHOvgzILuKyCuyK04CgAc26hXe+sGKDVwDVBWHw+bq5WKxQLI6Nw8R5mmaNBUplX0bzBxCF7oZcT1NExOe3jtS1WmaQtDVMoTgmZW5anae53Ecu7sP5ji+ePFCNAKyqsRYQk5EHLDlcQCmCGJ5AImL50x1uSI4HxUXFcHMSuIFlrbVeHx8HPqu73sjFJHokCVUSVWCazE+D8h7j2UXvK9WQb624WiVluqvcEsnqJ/9xa0UPXdABD5hSZIpAQAEMA4YksTj1fG82T754Pzu2Z2zo+OXj2/++yd/+3cffxrh7Jd/Hb7+2hcef/T4/tnrR6tedP7mL67ff++jr/zC3Ufnnzy+etqt7iB08+WLn3z45IMnTx/cP3r7rbunR8xhiGQahVQDhX61QuvibGQYuJPSUVa8xYJpYOK+H7egSQRy453MRhHUlScgRKSu9uCivmMR8MIt75SLkFQ0EDJxx9RxkxZoAEwAZtngJPflzBIDgZmKWBITQy8EmsbNcLI8XizjfL2+TC+fXT14836aNwvuU1qLCJoSExkwhszUVFqaQ0RAVFP1Vkq0a/NdOXvhCJrzfRCYvRUoVfVLS9TUvyqCB9wt1/f90Wpxc3OzXq9FoohMKepGdKFD1xMYEQIFoFlV1Z35mjqEWQVVFn0/jGOapOvYTAwUjNryP9yfebj7F/bUhT1ek1uP1ApYQERvSZtEzCAE6kKo7avTjm9W0w4RQU33bdGM4xX/HUp6K1J3iyj2IoSNQFJstJx6zsGfDbUemu7ts2xfOd6nUwTYzY6LMXqWekxpvr7aTOP19XqxWPR9WC6XIQRVbyQye2ixH8I0TdfX13fu3HntjTcIYH2zAbS+77fb7WK16rru6OhojiMixhiPVkfD0M3z3C+GlOY333jj2bMnpGpxNFMjQOAUx9B1tOgFUCE9X09PX7x88uJyE015kRTW4yRIFDgYiUZgcv0p6miEUcWlmvPhCpDKebQY/1ZyESvyOPW1hSIFUK4GMdOu2VK7ZS2OVYnrVFz3sVXgVNUzdV0aibCIukuVS+cwLF6nEDJN7bQcysEfBTUFMTOkpN7DDIV1yaEbFipJJEoyZMqVvaAGatmrAQaUnbAIedhN8aoSQ0DSkr/uyk0VzxWjKnhbTPtM6tvveARFMar1F8MwmJn7j1qK/qwbvvIRe6KHS6/h/DMBqKcVrLfTdjut19vtOKWk9+93p6enaRzv3rk/LI+ePHu+Oj4hIgUMoVfA7ThfXl6P0zzPSYxSVLuenr+8Pjo5no0CyOlySZDmebtYeCcGMFBVQlRGJApE0IWFiFBKs8youcTDd5kga5tVO2RmEkMGzK04kJk9X0lEevJe0QpqEpOLVODgwS9Hc1VJKknFkAVhVri5vHn30eXffP/dD568wLDkWVLSZBYCIyGAJrMxppunzz59/GSc4/2HryXTqBJVKiZA6Z5SsbTqkQe0AI1qgfsdRz5rW+uXt2/1OWiAjd/tc7GlLsMJ3NM8fCgJ5AxlxH5YfOFLX/zFb/7yJx+9N86iCvOUFBDdHYnqMRZ073BO6cHAfbKdzl0jJN7Arz6+phemlHwwfat31XIha1T2yrQPJEW9qpY8qGqdSVb1NLrVxL+wr8xh3CA0s2k7j+O43W7dkqmbVRUbgDwfLz8uV0GX1nGUaz/aKNCe+sd7K6FSiT0MnScxOtw2m8319c04juO4OT0+Pj5Z3blzZ83h5OREYzKzvu9FTDE33ifmEHpmPjo5LYZr6fJQeMsBUhWU9AYfDpDaKd3MlIi7jt1jbpbzkgBfwfCt5OlUaFc9yvXD1iL19eh+olwlJcvz9zxz1Us/Ms74/dr1w/64KX9eRY+mEywS5c4udRSKNY2gHHk8m9T2o2oiBpAA3dAyswBkANB3tB31Zj2t+rDoyNSo8LHW6QAl+H9Ala78qypgNryrCe0Lw1AVoR0V4H4pTbub7k044APuXVEfv9RY6fkpAoY7z45fFd5648FisSBy1470fU8MKSWvoun6vusGMxMxjxS9+bCf5/nq6qbruvv375vZer3mXB2bBz56O6Ptdnt+fv6zJ3h0tLq+vooxVuXNGSgRqe6azuVNZVQ1NTXZSw9jtx0LvGBf9Bph6WTJBAgAq+OjvB9gXiE0SzKzRb9rf9weu5j+vq5TeUcFXP2ysrZ6iAiE3N4GELyeyrFAvOSGK97nfhzsrZ9RwURB1MDQAqARyaid0HE43U7Xj957TAAovO4uw9GDx5fnf/HdP717fDek4etf+aW/++nPGfWrX/3a3/zNX50+6P7xP/uNP/tPP92uU6AjGVbR5s04X3369OnFswd3V2+9fv/N1+5qmgiNqQMMAJgRjyJRdsMCmFmor9n1bBCIwXTPYdn3wT0xAEbknUHd0yOAQpz75OZGGgQcqGfuAjOzW0AOqy50iEbMGLgLBIiqwAiALjlExBAphDALoKTN+ho7O1vdmeDq8YdP3/ji692qW/RH0/TYkgAQKjIghRwdDhbE1ABUPGOH3HbLey3gCcyVVruuq75Aa/VpKH109wX2NE0+Kb4yGsfGxdAfHR11Hbvjf5qmGGdPbiHAjr2TZODA5LGOJIR58scwDH3fr6ctYK4KVNxz/Xp7WDg40Knj1U5iarrdZmTOzAJ9TFMfwmIYQgiM6HGcnLK1ZzkQEJgZNvc5kBPYOIPtloJS2Zlz6oOriMidgtoUqrUX3qbf1k6u4r+u4RY71n3G6nkUAADjOHq7tqFfOiZc3dxcr9eMeHp6ulotmFk1xTTNKQLhwwd3zWzczs+fPweA4+OT5XJ5cnISQri8esnMhtAvBtGIiGZCBPfu3bu6uloulz47/s6de5cXL5CHedqaWc/BYNH1vQ7DFOPzi/Wz5+cXL68FA3RDFBtTMuJFN1AXRESnPNBZRLbjWnFW1X4YMjK7FGGu7bBMS2zv1l5gCY7VX9svbx/W2OcHqZJ+qCoC19vX3eGcABxCiC13FREzrGlLdQddRiyXuaN68UlzsmQaBUDMVE00j0JVhjBQTyF0vZiKKSNZ1lGSiJh4phyZp1cXGiKP8BflL2CY62TlRoMx23mmrVFYzYxuKfG30b794MkXNTHBu9u7MtpifkVganS7g71oPzT7m41/QgTEwAzAgVhUwUjAppjS5dU8J0AK3aJbLO4+eDBO83vvf9CFPvQDc5eSvry4ury8vry6FkEFQu5NNSm+WG/O1uNqOSxCiKJEisUX4O5WMWM0CIGZ+9Bx6BBzXo/jTEeMiN4Pw9835PwoT2/LTS886sjIREwGMSZFLxGEOvvUSoFGMjUTQANQBUDmJBCG1frl5gc/e/SjD54+ezljd8zD8The+J5K7gaHoKQCKvD06fnLl1d37tyrUqCqaFX8VfxsIxUVn7FxaR0wsdsEWOnoAGFafQMOqRUOmKc1SvlnYWDh29n94ffKGipj13Vdx13XOe1cXFxKMqCgCobGTEwotUh4jjKIGqIBMmJgS7lZhZtknmrr291aZXWpoQt171rQWaPU1n9bcWz7qqDf35o5fjnugZnboSkCiKlX4Et2OO56QLiuvVwufUN9bIYrtLLfJbU0GjQASHPypEpuChcrPrRiMe+4i0z0M91zGsr+Zkctc8l3JaqZ6t6eoO/7s7OzrhsQMcYYVRAxj4AYFn3fD/2iDdlBca8c4FVdW20z1iJqVvm8G3wZ21jIcM/N6pAX2LWYrtuh6m1rd9Et2NcKqnONm3pRzvabQwYAsmLmvkW/0EARag/LrG4XPaDpT7Nrc5iNyZpNCsWIrZ+rNRia4WcxRoIec2tDMEIw7YxVIQlsttKTbZbxeMnotWImiH31GbWv3NJpy6v7rm/ho14dbei1Y8zBkcH2sxIODjPzlNSDg8rsd7sVKyYMje4EdZ3hX/zTP6AaEbLkF/mfPn/MDKcxTnEGcDfeyw5hdbwEgHHeuhHY972YupobZ7lZr50XnJ+frzcLtUQMbNh1jMBuWK7Xa19HpX9fN3S1pY+1iovH1hERms6iHvRzu9yn27W4Nc9zVHHOV1HfU1ZU2tElYAbuNK2yPzN6JMxNY3edjokYAEPIfTpahQB2ZeWHGyZ5zl4Bde3bYcF5hGctoQEiGAhT6PtF3EYBDdRfX2ye8vm9k7tnv/zOev08xZs/++s/u7s6+5d/9K8uN5ff+9GP/6vf/q3QDV/88tufPv3k3msn3/7Nb/zlX/x8vb7ckjAGCrAd0/ZqHuN2ji8vLqavf/WLHegYU0pxCNwPbKYpzQ4lLE6LujXO8hw4udO6EREtKHhaiIiYJhVDD2S5cRcCM6qJJ3T0fU8GzMSMTDkJEXI3WesCAXWIaGhiAqgc0Ewg96eNMSUxc5SJ09oE75yd9oQXz66vnq/fuvsmXL5UVdwZ7c5nS8035GahpmL90DETUCWzahD6PqY0+0jGWuTtcUvvNpNS0sIloQr44jyr+KCq87QNIXDokRN3YbFapjlO04hmEtM8J+/lwERIwYBEXLQyojHRMCzDdo4pd3L7rAMbxTGrtxUND2zI7OuqzLRca4hAHNC9uVRKRtOuGh4xJ3DvlKH2c/5mvxj67z3aa9sLDwRMPV7JWBGx3omaJCK7FZBpALWDUbvUfhgAIImZJROtlowCbsZxO02M5jmMwyInY9y5c2daTs/PL548eXJ1de0GITMP/YIYPTtUZCC0Yej6vr9z9zTJTIQhLMdxvHN6tr6+cbbPIfRDpykK2kWMNzebn3/0KIkZL41omtP1dha1MCy6rjPCqvrMKjHGaYyCucYJaxV0yfE+2JFWzBQg5F7Q7hDB0v6xhTYy7YM9/1T3xfYNkjbfXhIoqlpqTkbmrvYyVdWcvuy+aHBKTHWd7kb0dgvu1wdib7aLwObdq5DAbI7SLZi7niV631cwUDCwWPAJwWOEQAa5xBHd7ED2oEdU0SITrHH62r5u2hqEXJpeHCD57c+FR+3lp9Ru75UPH5DA55NVi/B+qCTX+Y2oagaQA+NIaIY0J9H1hs4v5mQPzpYPX3trPU4//+DDruseLJYp6vMXL589e77ebLfbSYyZAwIDdob4/OI6hCdHy+54cboZZ+jsZOiqHAGgnMnExBgAyDsVQ56PuMup60NJbSKs0oeIJCoioaKqms+4UUtEZmgCgqXnrWduICUQK6a/QW6dBsQxRTa8uF6/99Gnj55dz90d6o820bjMOgKAmMTMiBEoDMvV02fPHz959vrrDwHZm9cQUSyoW1Uraxr5tJSV4V/iigf2ob1KT3jl959zSUuAn3+T3SUGkJsWOdWrJ+cAopdp9n0fAgvY9ebmvQ8+WN9ssZjBYAgMZsaABKopbfTGIzld1lx3FFEttGma6kSEGqOoJNNaia3DpRbj1ZNbwFY3X33fOhjdzNzf5DCnphmsO0mxiUcB0gF4iXOB9GKxmKY8th4AiMCVECguLb+KOqoGYaVQatrMHGCF5WwUcEaHmDfC9Vt/BS+yVk1EtFwNiLbZ3KSUNAkiHh2dDMMgItQF9pZCPhVw6LpuV8tXd+02529lLt8a11RlaDvbvVhWVGJ3u5tUQqgZHFU/hzLPtt2sqmNbsQZr/TkAFGXN10ml5giwNKqAEm7ZR3hxZc/MW0C23jqrWus4jrvckFzjggWnsH6DSGag6tcaEPgYBiIkCoaMxKakmKZoMZqIdczm89VBKpJX2dryioqBWPxce+jRoDfs8/9MOAIAQG2KihoABO53++vQQkRC7HL9HZQ2e5nueFe4W4EPAEGmTSwqvviYQq+1S6nrOsKwnaftdoqzeHMqs+12u00p9d3g1ZkiMs83/shhWMYYLy4u/M/1ek3DwpsrugnObEPXnZ6ebjYbJ4zGIDQAyONOERHRyFUUQ6RaheK5euTRNcDFahHKlMyAu2iA9y2YJflvRkiAROTpUlKafdUROrWwssLIDyN0TgiEvpOeZOHWUZK0W5i7zWRnqXvqXr1/3Q9XuTIxGBKgsRmCGSgCmqphiuNqebLZmGynbhFklpvn649+9vHl6fLoaBg6Oz7D9fbq2csnX//y/W/+6te++NUvnj979Fu/81998ujjjz/+1GTxS7/81ofvP9s8HTUmAu6578ICQv/yGs+fviA4euP+6esPzrpOQLfJRibrBuO4qIiCpVBVRMzAx0ZXlg0+W08UQAHUW8KEUFKzTBEpBAodm6GZhY76viODXFTiU8XQNb/8JzIDQNTkqZxECOCtLNkH3RMZm6EJAaskNl6E5Wa8efn86p1vvCOzgbex4gFEJXkyZxU8qGVIPSMFIiTyOYqIeSaglc5gMU41tbroxCXlzNRAAHxkqqc0WEqJPWbmkkdNS688RyZTJMa+73UY+r5HhGm7ncdJk6AJAAiYIpgFBGTqwcQMh2G5XKb55gYQ4HYwEMyL9goWe869U0jRO20vpOAJvtlKzLm8CABJBAkpdNz1yMEAxAsGM96Xrokum508a3ywVUH2Q+sHJXzt+VUzqEdeYfa85Ahh1TCgyJIsiZvSrco8W4WjvWfF51bJKK8O0KhWIQSRnM7tXzN6IweNMaUUAbXvQ0csGs3MNN69e/fo6IgeOnXYZrP56KOPvvSlL/V9T4wh0GLRh0BJZjNbLRZ933vXkI45zXp07+T+vYcSp+XyKATCwHOcrq6uXl7eXF5ebiz0ix4Ut+N2TMqhA7A5RmJW1VnUUvT4uy+eQkgp1Uo8CgwAClZVqMri/H2rqsrMiNWza2UXdipXZgvNRKO6rdak2VQLp0WMVktAzM1pneWW1LucmwRALUetS/IUNTcOmSnG6KODVNUM1FDAQCCBUbJIiqaSkBA4dIRBii3EaOrT+LzKtiCAF72FQLnIR0vKU0GlimxmOxGL+40fDxSv20c9oX7w2iQqPZC9/56nRN5W4/7eO1eVrpIbAoMaABKSdwRARAVCMBERUM/DNMTL683VzbbnN4bl0cuXL0+Oz4jo5eX1ZjNeXl5eXV4nr3k3UAFkRCMz2IicX169vNk+vHNMYL1aVE3zXFAia6sIGKN4+76iiWIuKwBDNA5VIQOAXcCfoLQQVzUBBjACM+s4EBEYqqglk5KEAh0CIYAJmIhArnqi1dHx05frTx89eX55NQkKY1QSYChN8FFd3Ggox3Y7bbdbr6In5o56Zh63JVIHOdldb/mbWvj7YAdDMCivh4BE2jQZas8n2yOcek6l30pQ+fxbzWwPLr91mOcPm0/mMEMUA+z7HsBtchGglNJms/nwww87xb7v2XKZpSQUrH0cVBXGcQQgWqyg8SP70x2xpczqzApVIR+zPdOi/mp5EdKuuMXzlpPXL1s/IJU2J6rKwA189qrZzcpA8CaTRUTRpyUDdH3fD0MprZxxnn2iNZdBc2Y2LJZZWS1Pqa/TrrDdzfKKqVl/Ni85kIiM0xzTnO8fOnCX3zwyhr7vA+aWNiEEps6bk0NpNq4F5YipGj0K5l8CIVAO+XiiRQ1BEHKFpDNnCgyEIuLdC5AJEA/wNjtuMNT9qhKkGN1l1mWRO+1uUjOtoAFUZhJQ2irDrnYUAHYslIjqaDevkm5fwVlyfZwrflhyWbH0rXQ88YZeVqw4K/HDGCMyMfvwCRIyVYhiHUPgYKpx1jmpF0sJmBQ8V91r8QK33Bm+1Go01m8qYK0Jd+8M8s/wlbfyvYIaEU2lSvOqDmHOD7cikbOLEBHDi+fnADDP83a7VrDFYsFdUIWrm5t+sVDVy+v1OM5MHefgPl1eXoYQTk87Azw9PUa0Fy+eznMUkdWKVPXlyzFG6bpumhDZiyY9LK8i2ePrVJNhZJQ9lohulO4oB/PYiJ2XiKjnEEIgDEQ0rBZcNmBK0ZJUcAjsgsLVZVWJs+4T6s7DVxkTFhMfaLc9frSaaz0qU9BSxIyIxLsZd/WeO1Hh9xfVYJ67ZGQAKiaM5P1re+6gXxqIgco8Pfno/NNF981ffgdX4eju3c12/Tc//M6bb33hV/7BNz796CNGfHDv9eXq5P69N77/vR/GL53J/PInFzaLDiEgD/NWrtZ6MiyH/v57751v1xOaPbjfdyElnTqMXaAFHvtbeGooMCmSUsn5NiAkCp3n+TAzzLFj6kOo3AGK/h1C6Puu6xix98bEIeRCUFUFsEBI1BW+acDO0QzNXUP5ayxupBBC6MSIWRTR+n5ANZsVhZ89fj5tZxASkS7POoNUYhFEhJqHJNR9hEa6EBExFNN3LyFeGxd+xRBmNsAWtUQk2Y6k6yXed7RUI9AMMyJSYAJ0f4omkZhAFIBUNXCnYMidwByTdMNidWQv11sD8KkEhpWqAYBgT/MubrZWI/f4VuU4jn22M6IgV1AJUWDuiIJZHtpTDQc0j33vCMdJo9JpZTdouy+1TCUK+1OJmwVD5b8taQDkfrMt+VhxG1sxEQ8caVis0IO7tdvdKgTQxAnrCd7Z2E0UMzNFL7CwXEJgLgFnSWmaN+M4jeubm5sHDx6cntw5PT0V0XEcL64uj1+8OD4+RoKjo6WZrVarzVYBIMZ4fX25WCziNLsckhhfe/DgxcuXS6IoaYrjZoznl5fPXlzEGPujs5TSZtpsthNRCF2HIiJbT2eS3AwjZ6R0XSeNo7pibOb4CNqUbTg0PB8JAIjAZ3+20qt2l9nhz6uCEpWImu0jaAx421ngudmIVwDkljxlu3xjcU+92xn/lTxV1WxTHqRqoIoKaO4kJkwgGGAK2lN+k1QGFJlJLoQlJiAkRmQgIwJPaEfMrg5HPbUd9z7ArtvoVFXhA+C0qNiSDBTdx2/o3tVpmupPB7f6nKPqBwffM5J7PRHRZxuAERJ6sw1VUAUiBsIkEuPMoePQffLpo+PTMxF5+tFHL168iHNK7htiIjVABiNAErC+WySVZ88v7q66t+6uVHWaYmjqxMBRFHMmMNEO93xVHbGHcCsc/Cr/TNQRse+GNb5atALGQt0iYKbMhAH9hApD0Sg0ffr40Qcff3KzTkarBJwUsVvYVH0fSKUzNpimKF0fjo5Oqj7Q8eApVHUrzQ6poyWQihIHor8lzPbPfDl9XvzwAIsqLh1887nX5l+5CbI4zJlZJEKeKSqqenNzc7Y4yk9UVULVWVUBSTWFEFzixDh3XQ8ACHnSqUfS3CB097fnRddUxprN2FJEPeoJFaQFSgdZHjtGV/Wu9qVEBGSPJKu3KxufugvgtFRZN6WmTZYehX2bPwkAPe5067rjLcnXX7M6IbGcTM2jjQjdbPawJACsjpYi4g21HHQhK0tEBkaIycysGIREBMysRm3MrVVWqTTXyeLNDUJI7bIPxGjVunfMqgmGH0hVPeyfn+HcAsQhp43TsG495npUc9Mupy/liI0deBAAlTAws9Z8D9zBP9vGpcFMPVJUA/Xif6/WrkWDLWPXJqQWo7J37PJUTlXPIxAOxiCC23EeR1p2rjDkpNZ6twr8Fl3rv7afXNDS74FxUXHSbA9jW+yqaGmNPyXJ3liO8qA9e7VsliFiSPMYQjCJPpWrO+n6vt9O42azEZE5pZfPX26mebFY+AS266t5HMeTkxPC6eLFzcVyQ0SPHz9ZLBZXV1fMjEgvX740M2+/ni6vfX2LxdD3/TjO2+324uJihw2KsBtBj4p7hVvQWPxVNey6buiXTqiTzGaeS6IxRisBH39bd7ua2SzpAAsz63dSSeCKUYWpllmIhD79KNekVnZgJd5d3Vp7bN35he1KDqpLqf5aeR8SkIiRGKiP5EyahmERYzTRPnRzUgRcLlZm8uLZxbs/o2996ys319OwOJrS9D/+hz/+1jd+FSR8+a13nj27+M7f/O1v/Mavf/Htt549++jtLx6P29V7P//o+mrDq8VycRwSx2jbzfZk0X/4wZOL80dfe+fBV750enRsZmmO8c7izR0mys4fBgDzPHuntZrXxMxBd+EUKG4YETk7O0P0mTOBg0NGEdGng5gZFh9MGTWR0/3RO1UienB4mmaRHbSZQYyYMcW0PDtlojGpCTx7cr652YbQey2flfyjCm1NkTlURCqkpvVFACBZqhkOUvrHeturqnMj7xkhjhUtGR/ggIh4HUIIQcCmMTFi13XTXNr3DxSnOU2zAagCL3oQQTYzS0n7VVgsVkQkBmBouzhh7uhSJjhbCROamZjHVf0EAK+Ry0y87T2aKyENEIg7YubQA7KopDpobsfXdgp9rUlouRIAiBk3g+kdxjVpED9XU4Gy+vK4vVBe/VybE7QnIyLznrcFGibY3gSKqGusl9qYx4oMa/3xAAClckIBKpzz06dpe319PY7j3TvbzWbDHFR1tTr+9NNP7927B2gid0IgnxSPiHGap2l64403CHiapjsnd2KMDx+8fnm9psDbGK/Wm5dX188vr64222EYxknW6804jqqqOqabG0+5d0LzcT5VHhDRPCdE6vsFFMWLiLoysNgNANcq6q7dhhvktumv6HlITUTC9vWnVp7Vw7uf+0qqEAIAbdyfdV+qAkGls3yRWFZ5bHWdeiP7oyM279RnYARgpKoImhLECEhqkFJKUZKXyKY0q5JqbmTJITB3QJbSDLVSS01EJakKKEj1c2kTGKywskbdtGLUtvA8OHBfWUQ1MkjTLHNkQFX1iQuUk5p2UbO8L6/uHfyZ1mOS7H71FrB+Q4cldR0jxjhNMXbQEYUw4GKxWC6XH3/88Scff2re3kZsu91y12eiUDRUQgJvD4pqSR8/fXLS68PTL1mglNLZ2Vnfh3meYxQVUSAs7RydTqswdeHS932UXWpZPfzFCkIagbYKLgCUAMIOf1TNxxgiIjKB5AKNTz78+KMPP3l2Ps4JuqNBuR8TSEwnfd+qU4iYG2MirI7unp6eutnsvShijMyrA34CDRtsX8E/dP2u9eUr9YSDC/GWLdFe217Y+l9sH0k+5/AzD6xBAJimqe+D82rRrClWtbLe3wmZOZQ2+pyizvPcdXNM6ulTZlbSLMk/uzSpWnIb9xCVVh1v9eAWRBVKrTjwD5VX++J5v4FKPdxHDAVziHaZCNa4z/p+kVsaFs2bSv52B9z3/UHjRyuD3Rs7xCONe7OX6gtGqc2H3ELjqkPO8zhN02YzTtN0cnJyfHycUjJZezWfG4TOw0E04CK3vfGpksx9H0LfSWotIsU9S2y3zsYg3NFai73ahE+qGBWRsI+N+TWNvGl2BVqJoOzN4ahPaZmVqnocsCbJA+xMxLqV1UKrmOBP8TFvxUCCugARn0AJIrmjdYoaY7RS7uicp2bqVe9qrSr0BTuUmAIG9GAIIoNBSkkDx5TGMY1blOWCu7ySir3VElbVUCZzYGPOHJgkLe2XaVi75FJfVRkARtV+qWCsYPc7aPERt/pY3jjbgVRz1Une0DCFNzYKysonDzFwHE6u5unZ82dPXnSGqgpTOhpHjtdR9VLNUlIierK9Vr1UVdXHZY8vG4aFAIiqgD0lMwPjXs02syAH5jBK5JCXrrgX6pVZfAuw1Np56pqq9n3vY7KrcZUsJVFLmSYhsECOPgEjAAuCxN0goJb173mhEARMTEWd32WnCxiBmpqYKBEFYi6xyjTHjCpYwydmYBYVA6EHQ/2lsg6JTLkqL5mC5WYP2AkAGCJZP4D37xEREZxDUF54FrhgwnmrRPxL48Nn3336XLsv/eYXrmmbAmzC/OFf/ck/+c3fvXvv+D/86b+zy6vXF93Ja3devHn/0fn8+79+PMSP/u5n0PcvpnRtR3fnCbHrNsmgWz7fjC/evXiytq9+8fU3Hr6xWoSNzia67KkTE5lOjo6Y+fn5y2F1ZMSA5g1Q0CJAJItDWGGXa+1SShg6Xi2oJNljrg/pvYPWOI7cBwBiYGYmBvCULNO+7xQMEBShAxLTGOMsCZbHMM4M22MY79MY0K4V4xypX80Kahxx6MLJdPXi5v0NGy+WSCTr8fp4OFktj+YxpqhA1vedaIxxVE1gaJEU1AL2x8M4jjGmYRj6bmlKIoIQutD5whCCqc0TxNnDkkREAQdFjbMBIEInIqqmhCHkWTee97VYDNs0IyMRK6Km2cwwBEQ8OjnWJOI98AhgCBSCdnwFIzCsdT3algdmnnHernqZxgRIqhjNy1wYcghrguZAyCFDK+2MbV8HdUdaYeguBVFEiSF0TKxJRjMDBCQTSczefA/cFVcKe3M0w8oIZm8O1nMQEywzDQnZJ4ggUdeFahW0AjjNiRpfJu/quXO/tb6vxWzGHFKKLX+sKpRqrGyhnlDdN/595ZitDV9Zf5WX1thXhiYic5TM0BEAaI4GQEgLUZujMPfTqE+fPn/27AVzHpQ3DL1ePEeyF5cv7l/djSpnZ2dJZI54dnYmGs7uPXz+/Plkto3x/PplWPTb7fb6+vrl85fPnz+fb6YQKU7TZlqLiGr2Z/kyttutEWLgLrCIiLcWYFKE4AX0KY+d7jggojfvtQLhCnxmjkm7rnd3WN0aM+HQFwgDAhKDqSZJUMJ0tbt6lWqtltxqIZkzlAK5LAgVAIiLnz6J16mGrtvds97KLI+m8A314cjjGOf5muSs80l6IlFnMzHTCArEprxOOnRLm3C1PAK1pDzjMRAh7zzlOfCLpAJpFk/NyIsnZOzMLO3mEuWhnuVfJ6vcSsb1HNg/Wunj37TYy6UhoapeX19noihuqbpT0GjAVd4fIC3sx4vK/Qd0JFfB0iAa0QgUKQIAByMDwCxJF6vlZty+vLr8+MkTh/Z2Mw3LYxFJgACGwaWlGgCBbaFDWkxI33lqT9LFN7/01pcfPozrzWq9ubPEI5QpzgCivIq80BA63ey0JSZEE0vbyWeCORjVzJJq1w3c9RpmA29ckaKqJey6rgsDoKU0mykF7oeQZE5znCGx3JGtLjh1KL03H4U+8erjl9ff+0TO8RjuHK9FQacO1yw6KQ99WCwWDBZjlHk01R40zjHAnQCWZpm2M2MvCSUGDOZtmWC/lizrWyULDku1hW4VfXRBYOKsn4gkKt16CbwrAYCoqULYxU/qVlZ3ZP0GavuZImUrl8t68X7Rx05Td9TFna7pPxXGKMtlTwmGsLh7eo8hQOjGJAASQuio68PSi3ECr7ab666DxWIxTfHq6rLvuwcPHsxzcgbowf/T01NEfHn5ggOenL45xxEAuq6LUbrAMU6kEHwZiNSHrUSTyNgReDd4NjP0DCpkFUXaqwugMhWwAqR+RsTlcmk4IyKAudxnAvJwIioScec1EOLgYSKD0SCPVlITA/Oc5X7AlMDM+p7NKOWx1Ym7PISDgAIFb2asatZulqpKbqesSN4ZS3cTXzGlNM8xJb24uIwx3rlzp+/7eZ4XiwVql40fYqqV4ZhMlQIHymFMf+u0jf2RtxMzzwhREVHvuqHEbJCIGVFFU89skLx3Y2ZQZTADETESqCFhcOQ0cCY59KG+l7t1XFkmBkgKqO4nSvPsW9NzX3yIaF7praaSsdDMBITImBFJRecuoEh2RocQEEg0eryBchW0D2cHtYikJaFwZ/AQidquJtMMiXAcp3Faq8XgLXySjJutv+kwDBgADRjJkNwp4ZSlasiIBMZgBjkdFBfUB0Rcb9fQY0B7frlZrvoHR6fztF5iZCNGFDAwRSRi7vt+jlNtr5rSrLlnDCuQv1p5Cy2O0VAj6lliGiDmMRIAeyFEVR2GoQYeagGtqoYh242SlHYKjyFiF9xTj6kocogYfDjE1fX1xcXFy+sbQFSk7TiO42QIUpJ3K34rEuYE36whOv6J3TJ2waF6KKWqPKuyzc/n0noVSmVtS/Duq6vRBte5VRWausHWt4SNd+HgQa3CV//UUkjTnlzzqaypcj5wnByw6eqeqd/gvgPJmiLmVjm2JmrRagytpFfVy8vLsOgef/IYTuVL3357DtPN5cX9o9OXF1cA9JWvffX1u/eR6Qc/+vHJyck3fukX/+O//8tv/cavJfvRkyfXpLhdX6eZAw0msBoGCrCdrz/89NE4Xc/yha98+e2zfqDeQJUDL7o+pjhN03I1KCgDavAhe0gQCIwYKY89t50jrWRZwL4Sg8WpE5CMDclATcF9MATe2wHcuM4b1BEnhI5w6ELf933XDVFmoz7iJIKlksG52OXl5XI5AOGU4nE/mFlMs+eAZSzNjYKCoYLDE8QEKyZgkxvcbtwB3laChEYn8AmNfn4ocxoBoKbKEJEhpxRFJk1pmqZAuZGXaO5bQ2UabwhhsVhgSfTv+36eXCfdIXPRUF/tEj5QH+vReg33FcfDkccHT2kf196kpSzYNz7rHVxytKpJfVarzkLjAmxh2N5fytiY9j6I+BmvCz4VDYuDsyW3+vr1JgfPahWyzwJOPk1JM97mhI0Y53mekUxVp2mKUTx39N7ZAw9BMPP19bV3Y/YpFjc3N8+fP7+4uFiv197MQFXHcbLG6CrqH6VmLEddhqrWYsn2jSqraV/Qr/XannY7CpyzLa1l6Ihf7t5WKIh9cPJtyNQCAahJXE2KRF1VJcDqxWyxqO7awcHM2+02WZ5XkdSdBYjkzesB1YREBFMSNFPxxDdqX7M+rnRL3oHo9qZ/DsV9/nH7dfyz5g4A4MXqkAEFsemzmrcye3p2lHJASvVoqaOl8R39FnHZEoIfIoLIiuTFjZUlMrMWz703Kq5b4HQdQW9ubh49fXbM6ej+SgWjIIKF0GHXRTOJCcPOU5BJm/J9iqnfMTOikEhg7IgNgteuo1GcZg+MI/Bi6Pq+N1NPiSeivl+gxjkqgyhCMlFJgBTNLrc3T8+f32w3UYJxcqc2eAPG0JNHHiXFGFV84JUi4vHx8Wq1evz0fBxHEUEfLXALM1uww74KhIgu2tyvlAwBvMkvmB16T9pQ8G3u3XKndt9l3792gHK3ZQSXgeN1hZUxtk60oRz1/LprrhRtN1Oeu9MvuHSI8JGDfkkrN60pH6gLc02tlpDVp9Q1VyTx4iBEVlWj3bJpP3gCjTjGoqol3d3K02VL4Ei8bqKSgGo0w9p4vPKodkOpiUZmgOqh16btuwZVmy8JtITetifU980a+NVVlQs+sAoRa1+MSmu+ALe3c+CgcQkd6CotFrkW3QI2v9d+tmR9UA0ftSwI971+9VZE5AgAxeEIOW2HN+PcflO3/mB59c+UdnN9/IaAiiVYXbZ1p7DFzWRFOefS2bVFJBFt3dAN7ml904qZ0Mim+o2IpEytaoETJjLolwwGySeiRYhRUhQEDoEAQCVHOkTEe4NUPunYUYHpttkB8puZT0Q8QC0zW/RDC7pKxa3VU1EOmkZNPoCQSs5tDYTW3fdrw1//1V9sN2NSiZK2UzQg6oIaUmAVUwQiHxBbDCfbUWyrSWBztBuMGZRW4rkOjz31WkuvvLoZdZNCCKvVarFYeMqvlXyDVr85wH7b1/DaFbYwaozyXXSiAhSKVVZxqIVafc2K2fXmTva4bxAerPD2Ufny7Utadu9/Lfvl+c354w+evv6lN5b3FnMaOIWPP/j4xw9+8s2vvMNiF8/PHz58+Oz5MxX6yq984/vf/cGv/tav69/8+KP3zxeL0+txWi1W42ZCJVEFpIj0+PnN9fh3n754+Y9/5ev3z06Jk5r0i27ebuZ5Xi2PxnEOgRCQiBiQkDr2uQ2enQtdSV3rOh6GOg0TzIy9EQ9B3zEzGgH6HDHwQgLqui4HbUBQMZoSOOZR8OmRgVd9t+i7KeGEtIgYE1ruoSfTNHLg8xcv3vrCG8vl8PLFmpboSDQshjwno5CBaa6LEjAS0Vlbm6RGkG7js6Nc7WZWVcmWjF1Lrkxnnmd3lOSU1NKmDAQiyGLovFckGSsYAiGQqOZkDmZvV+CD7Dbr0YmJEMt02F0PzNvH5yBbK64qvgXu3WldXwr2ibp+T03+douuWTzgLoO6BV1rCrb4zLSTFtAYBpVhHSy4noBNy5n2vawmohQtpObkOOFX/1l9I7wVcqzLPuByLbXmPwiBUMFMzExAJAlxSKoa5skl2fXNzeXVlcv4nj6g0kw8xjgMwzRNrnhtt9v1eu3aJxS2noxuw01xb39bdtHuRcXDCpkDrG45WwvnFuAZzo3Eqgpc/dxCsq4qQ1jTweMynpSULdtX/lT3J2R+BiZjqavRpDUhTfPc2oBGmlTQ0FSVRCxFRQAXHcQ7eVw8/Q63PVDX9z0gis8/PoseYV+KHwgUaDxN7U7dvvb2T7cX0N4W962LV37eESDiNEVEDCHM88zUYTEAmLtsCJYoaMYZUUJMKgKwGedPPn3Uy3hn+EK3pGRAwITZ8QYACKU3Y0089v9wh3vMXGqYfbOQmQkQjUxUY06f0Y67rvM2hknFT0M0ieIqR1JJIqHvxlE+eXrx6OmL9ZQ0DJ4F5DyfwZDIJ5FonFUVsl/bEPHevXvHx8ef/u13N5uNqoYud9WqQDvA6paIdq/T5di7gFjyb/eKgnY5F06z+zdv9/E2jiGi3Ioc1gtbqjzgBpWT7+5TyrNTSt4n2fWuyjYt52TmiaBVlq1WuRzGVdiuW9R7Uu22b+Qd/9vYBQCIiIfLWvSu2l1lC4RuCBEAWCkwqeoZFs9OC4fdy+46n+Uwfg1RQMMt60Nz6fi+QXibWnc02MRawUVGObnVAyvPH2VnILl3w8cUX11d+W09CW4YBgAIIXR0aMDXhVV8qKBoFwz7bNn3qEWhyijsViVby3itUfURUaRkHu3rpdXZWuNaLV45tZkZQDvj/pDF4b6D0nNQORwuvoVGZZtYnC+aE/up4pKUrBZEDBykyViuz6rbdJvh+9Ue5/eZdvVyBBATEZ3mmBJ0fUeeEsmGsiM6KzMRSlM05tKg8QCv6koWi0VmgOVozbwKrrrv9QWtMZGw1NZaKREs57drk2qgElFI07hdXyWzYbE8PV4pUAJMYgoGnDFLVRWprq9Qzl5dY8XyA6pm2JNA1igomYRM2m+05MJ6+/thGLybnM/qbffSXyDtTcfeEWdr70HDQyt+131V3fNJtydYqTeti283r6XJFhQHz/JfaxaQH60FUp91wAdtvxODgzSEEKd01B3P1/O733nva9965/UHb223l90wvPv+e//wN3/j0QcfPXrx5Ne/9auvXTx///333/iFL773+JPnjy6+9o2vgvLPfvjkjfv3ry9vCIjYiMjCAijdbDY3Vwr99gc/ff/r77zz2r0TM5iihb5bwDKlGDpkQAAyRQQl89kI5CPcALCmajBTCBRjxOz8JmYsvQRYTNEAc9cQdgZPbiASMgZRhQRRozOVoMnI+g4XPS+HMAoNYEOPsBi2SFNMUeZpmk5Xy/V6bah37989P1+LpZ56ImPmWWZEpFzCyEQgZmoglkyMNOcnt3h1e5dbNlERrKVP3RfMO17MJgkjCKKmKCJW2ZAZxiheEqCqzObGtpmpmehOcVmtVqG/ij6lMjfL3dM//ssPzM3EStfQPYtoT104QOzbAqNl6DtugO2z9vxVrTyr/AiLTUtlelVLIC3HqCy7FWb1QdZINWvUi5oZaPtir92j1iMgsucqguJ1lqbJQX01oozXpW1PiXpFBNQYnZZBBDabcbudiMhHb1YgcJnv7JYJFJ2YiNS8TpRaOZ6NLAREgtxiAXLoCMByY6YMhKpVw74h3Z5QPfT70N4ViBamt9sU94nUDuwt82xRIj9FXxGGrQ86IDrVnY+jvLTlN1N3AAERghEhAhFTUJ4AFHw6KOTuIwAUmAEIwEzJlN3XnGQnoV1L2NXKvsqH2BJ4izbta97+/dXfNiBq6ctxr8U3h4NrhO21FcoHEP6so6iJhyqOL/02mTuxXG/Ws6ST4zOwWmTF5g0VgADMdc4qsmOahn5ZAILX6+3Hj+LJwIsvvz70HTPO82wSAYkCdQgd9wDgcz4kN9gQAAjEaKBJkL3AVNVs1qkbiEC46/rQaSepBGQ8J999WIjIFLxeqQ9ejQGIgQJZWFxPVx8/Pr/azApModPSogxVzCzFqBJTSiAJEb2lsm/ivXv3mPmjjz7y2WUHnLYVEC1soVaith4WRFBSK9Fy2kl5NFBV91EQUbK9+x9gy+0FtGz8YDdvU+XBN1Y7eSDO81wXv1wu796964MiPHlBSz3SARrXQlAsNThVGa1kRaVb43a7PT4+hkbjOsDtemG9SR5mWRiGK/pQBFYrRxoGXnuDGwBQCUqr1g0qPNB26d9YBDrt14DVLb4dZSqwNuKcn+JYjUWdhJI9x5wbvSAiKrod6F3Bpmlyg9Bv1g7UqS9IJfpXUa5995YJICIBOss0M1BDA/IxBGpoYP6NI5UWXt8kzrTY2+KeVRNur/x+T19qjat2X1ruWjNNbKfo7vHeerRAtkPP405+dV1Xt8ZKj3cRaaVm3T4oGcVYDNeqfr+SdqwGM2nvrR3bHVVS0gl1s57Xx/FOtxrnTYPVWTCbmSn4f4iEQEW+YQivdnxXA6oqlvknO2Q4lR+2pqPVensCRDQFoJ0NbGYirU4FVeyGt99+7fwZX92sk6Y4eScBimpzFB8Yp0hmapZcFEnpC1dpEorh1LLISts97zUYLDsn9QUQc1dmJ07f49VqtVzmtjEebPUitBpnqFirtnPktzhawXobfI4HlXlVtlUZQevCaVG/3TbaDynUDy0E6sl1m21fmWhpo67zgDDaXQeAOU3jdlyerhj7p+8+P+mPH6zung13795d/eE/+d2r7fXf/vS7v/Dldy7XN0npK1/9xemMX//im2endzcvNsswBMGr5+ujJR0f3d1up5dXG0AdhoEXKwAbbXj3g6dTol/9xlffvHcyyUSIoe+38XroeqJgCiJiyYnM1F0LBkTkfcCVFREDseJuoDC6IwcAkTQXrBPnXqAComDuOyBCQjYDtETJhRAqI1CgxcCr5TCpzGZdTMDdFFXTLBpVNQxdlDTP8/3X7v785x9PaVoulmHINmopACPvuI5et2pKBgOG1jvgOMlNr6AWf8xsHMcaBG6zU8zmg/3K6GRYWvm5wpqcifR9n8RMU+gUEdXUEorM3OVCuErYWjA2qZmVZFdnrLRvgf2XHVZKobDUnfvrqJiaQMN96gm30RUaedneuS1Sr6RaaaFSQaWdknN+eJ9qHUFT/duST0tH7WLqI6yoOxWSBymO7WbVC9s0gdsGfz1tB5M8fTJAdjnXMjJUM0JiKlkPqiLaNyMBXd9CJhERUx+InldoCghAHqvYhz8h7BvDUEM2TfVda4e3zBn3/ce8PwcJd4bZ4f5WMjngcgeMq/3XzNR2vLGtiMDGx0xNM4ZixuxxS2sazLabgm0r7dxdCX2YRSA2Q1A24yjZoEwxL9vlyIE12KLfAarXb9qt/593vPKe8CrPy+fcARrFpT3/YJGff6t6OTaa39XlzWY9Pnz4sO97r5xkNkQyBUTwIdp+lec+okYmkMzhCbi/3G7f++TJvbPjxXCn64YEyVIMARkNIRENRITkyEPV+6CqHnTyRUHGrmQSfAIIAQYkJXQXTEFvRMSOA4fgIyP7rrMEAmYdEy0mCefX20/PL0cB4w5DZ4Z9oL7v0zSayhxn0yQiDDveYmZd1927d2+9Xn/yySctbiO9ImJ/sIktrcVaxsxEGlTVUAEISv5tHbeMiIA+4ervOSqd/r2n2S11onWo+UGl1sOJi5lPTk7u3r3rOlKd9QIARDlQ4w0emTkl9d5ODU3l5LoKjRCC50Fst9ujoyMraeS2r/YcgLH93otIVT23K8PTH1rFdK0LqHfOHkb2u6GncBAR1lF35i7dnUt0p5HuM3xqIjAHT7nNDFEb6eYyqMvpms5wvJXoOI7TNLllaGar1UpVvaUTl+44zEy26xeCjbxu5V1FTiIC3D26/bVVb6jxZVcG3lrXHqaDfRHglwzdrsNWy7orHCob97s1TgRTVUk7qXSbgnA/M6ikgL7C1VsPb5ripkF7nwOztioPt/famuT5unHVHkuaPGIApZANgJAsMRMTgUFSUrlej0frcbVaaZp3oPC39Aw1TVDMUQDwYRkGFPbmZO60ppqkBk1DHdp3v7aHo8eBTc7M6iwF96Rz+7hqyHiH4XDneNnRg+OTo6vrm8v1BozUQJMqAhMZIQJKEeF1v7XE1toH1G1u3zA1ufJevAAAZi6/d1tVLz8+Pl4sFqvVahgGxOxNUdW+7w+g4FfJfmVVvQ80ZZeVMqGZtmnWVNA2CbUtjGDfX9KSB5cp7e1ibN99cvDcVkWzRilpqaJFU9sXPNk2FmVmjdphv7Ljpx+8OFl8+gu/9qUYrVsuvvd33//Tv/7zr//SNxb3jp/9/ONf+sVf/s7jHz49f/pPf+8fv//D9+er7e/9wW/96X/4izjinbunKuENpJjg8vL66uVlmuL1deq5//DRxZ07Fw8f3B+YoqwHhGHoGJEIjRC0zo5CFUVSImIkNCXArnN9FxZ9V9dsZjGqmgGCMaoaohFBz2SWrbOgkdAAjQwDsXEGfh9CUmOjcdGvVjqZjCD9bGOMJurl5uToznC1Wb927+HqhNI2CqSeB63oZwjZOYZA6OWLZUrsHmfH4jStqN4SaotgLZ5XzG9Zj6O6lckZBCgi8zzLPDkBdV3HPHAIBjKnpKoM2oERURh66gIAeEclZoaYVBXyjDQ3CujvUQpedRQOmLt37HFGyY2e29e8fbTMpQqAIgB3K2qdtZVVHdzzgA/Wf1t5Q02BhKcU4i3nqLf3hEaZaMVVuwZVdR/BgTSqWw9N7YQV52L7xL0XcXi53xmw9oBVBUAwNCnjbhW88/5YdZcQwmaz8XLwVmOonHqXfFPBRVmdqTa27YP0oPCASje5Fo2x0Sbrq1mjRSHuxjG1ljlkpdDTxry6nRChpggBkBULFhEBjGhv1FJdW5VYB2KsdbTVl3KFrN6hMlstDf0AgJmQSrKrkYh/0CRg6v0LLKa9PW1Vkxb9WjxsWfH/EodHn6qKVll921emBZ3u25O3ZSI0+ImI1RNse4IY4FXim5AuL6+fP794/fU3j49Pnz9/bgaqFroOraA4MiIgGpCZ2QJ6IkKISXSWxEQphRfX2/c+eTYEZr7TU08AAJZSBLBRytxwrg6Pko3myO+qByARh+Djzs1E1TdCVEBLxD53JgRiEElJVYBZlCElU+yMhudX60+evDy/mUboBImMwJAwMGDU3M/OCsru9lrl5OTk/v37T58+ffbsWeUVLWUd0FFVoA+4SkrCzD56E8BHB6HruABoat5NGwHVBJTM9rwwrRrQ7tQBor7yknpVi8zaVPdVqndeJCI+WeHOnTunp6dQNOBpmszMQzHlWu8f3qc0juNYdJscdqjY6mtg5sViMY7jPGU7sOI27YePXglkgNwZ229cAwlVOvvJzvf8/pXpAeTUgUZPw8I1s5VZyaglq0og0PCHyqnaRZYkX1+LS/CcblN/QiJAFUnTPG5j8mDpdrt19wcRuR3oybrej5rKgD6ZY7vXWirfauZLNQ79rZOlljfWpVYp3yq3la/Wcw4cfC0m19tWFlpP8ChxPafVKIhAxHUA8QG/pru9Ju/eDZqDabDLQKxLVcsVARVnYN+t2exRxjdmdmSrQKtCvL5+/dC+ePuvH74XzGiEUCPAmKGH4C4HmqNttpIS9YHr8igLZ3TFL1t04JEncQtIdY8tt/RewxKtMe89z17J9qH0GtW2XhcNcXfbqjZALuLbc2eISDg7XZ2eHt+P8uL6+vRmG4Ev19vp6bmJl/2Aj44Xy8QzhA5LKXkL0DaHu6XwHLzOB1a5RrtUB8EmgHt8fFzrX93ud33CG6/LfhGtv8se77jFLg/4Y1WMWo2zJXgtqXrtg16FfHu3bZ91AIF6czwQ7Y0xCY3f4uDLeqtMCQEJep0BlVdh2FxdP3r/8XAU3vqFe/+3/+f/Y3mMy3snf/6d//S//pf/m3d+6RuPnj370Q++9/L5k5//9Kdf/+ovnH/86PzRi299+xs//MG7F5sX9x++9fYXvrrdpKvv/HDcyqo/GkIHNN+M1x8+evHGgxfvvHWHkOY0LUIwTQRJDTz3gAlMAcwQLQRCtBgns8A8ELGIDymCMvlPAXKzNdMcVWNAtQRqhEhMhIOCgbsWDRFRkJJKb0CiaLTobLHUhUoXE3OS9WiKISCqilhK6fhsFSVasJP7xxefXs1pChxMkNmnZil6jNB3gRAVDHLhspP9bXxud9CPrsvhQcfMdvexcUNUhojg3FOZgYiJGACjJBvHELyNFpth/S8mcXHLnKfr+hiJEALRBGAGWoqdcsbRK48DllGPWyx+x2Qz6hq1ZmbFwBar2xu2Dh1VRcaW7VY0Lu+yh9UAHht+RYJuBXv75yvfq11VvVUrDCpLbTcUGkLef8SOhPmzC+TqVZabu0B7OQCqQtdln64k17rQzAJ3FRRdNzDHYVg6t1dVvxvRrsOWxsaFBAa2tyOaw5J7KNrK48pVWnZdoUS3MmnLzal5wb1sIq8oq44e/9WVxQapWia8w4fbQv3gkgM3f/vo1qGAxaqH4pjLd5Bi9QRKST1eGxigdEcTI0S0XU7XgRb4CtQ6eJ0DNLt9fBbdHSDq7t330ekAIO2++Afa3+v6ayPp9x53sJ6K/XWXWwUREW9ubs7Pz7/5zW8eHR2/eHGBCHOMxB2A8zpG9GwHRGQiJFTT3JgrzmIETCECfPz0RRcIAF6/e7TqujhvCGS1XI4xpxx7sM9MCRAZqHgHQgiSsgvMe8x4kCog1V67kodImXm741h2lHiOWw59lJSUZ5FPnr54//Gz9ayRSSgA5OZP8zxO09gR73TmxmBGxAcPHty7d++DDz64urpyg+c2m9L98PJtfL6NGwXOjIimoqrUjPpUVYNDV1cljdvo8Qp0atD19pfYxOHd3qgcwL/v+361Wp2cnHj1IDZ1tvOcah8pN7gWi9U0xWIlHkbvzUwk++92LMh8bmEs6sErckzKta7REhEBuhG7F95p4e8OlMoioG2OdStSVNXIPGl599AdtCpHPSAr3K/fQ0TIYwzMTHxehec5USlpExEfdRNjnOdxs509C1fKVANvkOGLdIOw7pc2kbHq/Kpk0lqnLW5oOayRYu6F8Tu76uLVm4f2wz7qtlyo5HekCpnKOqrC/Epsr3GgsrydhCqn+T4aUaj6OTRUVlfYkptlv16rUey5HX2RKeX5TNB02K7wpJIOVh2gLbEgIoeQsUAFA/Yh1CKLXH0DnZFIStOsc8JF510M3TFsAEDQLDiZWRJ3QoHTV2zRtS6g2sC4b2swviITql12a0OamWXa2St/0FJe2HCJbO+EabMxwmmWNE/L5eLu0Qn1m/OLlxbFM8iYOfRDAPDaa2tqE1sPStUPDvgCYIBdC4Sc3Y8YBLylLyCRZ057NW2MsyFOMVYPLhERc/adu0eayBC90ICbrlmwL0Ftx+x3KoU0vUDqlxX09S1KiUIuqW+zVQ82CfePFibtetpCz5az1Fu1alx98ZYA/JJZJxMI2EsUTTp0Cxn13Z/+/OT14WjZffz08cPX737/3R/r/8Df/pVvnz+9gHnGWT798IPf+da3v/H1Lz978oh4+Nq33vmPf/aX7//w0Xd/8hPSYbwYT/qzk+FkfXkDx2QwPDm/+PmHHz+4M9xb0jxHNuwQjAwUABUJfA6VKXqfTFVNMSazvusMUVLyULgrWabqrL0LQZJ4/N1A0rzrvuUeSjEFMwJSNiIioZBU0AARmIg7Cozk2acuGLySAeY0LZf3IcBom9Xp8uL8akpzjwMBdqGXqNkNbYDeJBdBUQGwFkh4zw/fCyzF0C3+FMaXd1lKayI/UhLaryrErAezikYVAmHOXdocH4JlREomSZKBIZNZiimJatd12TFNRGah73hiIjQkBczOKSLYr6Ft2fErv/dFtq/mDY7bZXuxUO3udYDwB3KopXcRCVyGOzceTcQD0fvqrLl28VUJa3N1EA+1pZbYqyypS8IiKdtLqGQE1IW15mKTH7WXOVy/b9/CzLxGC/YKTsByJ6HOm3OkNGPRd8WSSQ4Dhr4TUyA0BCB0fxlh7VsrkqTHDhEVc8XH3vveymiwxjuOTcfOA0WtZU0tYOsJtovH7l1YzbDqKXNVoAKq3VMrQacKw+qDoybiVzeo9c0d4Eb9FRpB6GvrApetT2YoIghsPokOgADNuEzvRBfhLSY0+7iXg1ef1b7L/xKH9xOC/cxqKOpXe2R4Nim+7VLbb1oaaTB/J5Ios6bDxGAimuf56up6uVw5P4SSGioilptkAnPHzCFk+KSUEKnvwjSnOVkfiLC72Gy7Z5fDMCxXA3MXZ2GYl8tlQTcmQkADYEB3V5fSPsQU40FAWFU9qmYgCLz3moSmGlWIqO/6eHNl1M2Ko9nG9PHL9eOX660RUg8QkIN78pLM0zRRPxgwwI4dofcyAzg7Ozs+Pr68vJym6fj4WETc89Ia8O1RcfsAYZDy3E9RVZXcO8zfCwhAskaD5Dmk9dUOHnF7l3eb2uBqe7Rf1jtQaa86DINnJ3rWYh0lX5uLuvlXdJIdI2XKk5m9y8M8z42fYW/LtOnnsdN2FF5pP1QbRsrAUiw+C8sv6IYNMqPLfa8dKzYqeOVd4YGICKoqsTRDVgNFFfHM5IKEe50Fzcx1VGZmYlXLjc/zPCz0em0EBRMwBUCfulS3bAelMr3Z026hzOWq0PAeMyGEo6Oj1Wq1Xq/9y77va1qcmS1LV8m6fSEEd8C1KGHFQqufdT/eUHVLKZOi6y7Udz9QAypjhDbXNII3RAEjIgrcmycWAROSgamXGxupWkpK3a73j98tp9Sgu8p3SkIVKBWX8ovYni8GGkmUUvKIelG99ppuOLRbsHNp9VnrxRzDa15r+8p1DSlJSpEIBqojQCilJBzU1FAIwcC2Yxon6WnCkvPgfnWCJlPJJ4AggXlzQFTLBnatVfGjdQD5vviamff6CVVoHPgp6q2kaTbTijP/XkvAmchCIDMMBoKGZvLw4cOXN9uLy8suz3yPiGRAhlV4I2LWRNuFQqM33LZ0valAwTwSyWPEur6b51kkDsPgJIGIKeVREzWLxvbrTMxa3u06VlZcrIkyQxOUqFXRVCZYtKLXr2oHB7XwqkpV1TXrSqgZbIqN4w2Lilkk3x5uQbHg2/GmLQ/1O9QxPgckCgDQGUJxcqulSQEjYPzRD378W6/9GjA9eXH+1jtf/ODZR4//5Pxf/3f/5htfuP+3f/2dP/hHv9sF2Gyv/82/+e/+X//2j+Vm/N0/+r1//z/9+eOPbh6edtyHwDiv13ePjq5ojjZ1wOcXL4fFSm3LIahGDIwGSWKchIF50XMgI/EO3e5tctojouVy6bCVMrKs2tVd6c9BRMR5WKdDz/vFoRv8kGFFYQio0VARxjQmIAidAo4xKQVQYqTlEs1siuPJydnzq+df/oUvnT99LlcqBCAGCF3XialZAgSiwGw585AgUKg8FEq+Qeswq5vefnYvkTNW3/e5zKXMlm0xF2OcaVekgQDE3EHpZTyNc4ruX89bnNLWkT+lVGVzLk4mRCZJmtRCN1DglBLBLge9Igk2NbHYqOD+TeUvAOBTPQGo4l7DvlHFOfbO8VHdFtCIcCoVlV3XuWes/tReUmFSn8LMPpCuNXUQsXadrbKNdllA+/pWYXzeFAGLL7MGjrA52oXV6B/ut2AOobOdWgYidWFEtBvqVSUccVlMyUZCRCQ21ShJTNUUiN2HhUwkgIBMDADzlAL365utmSEwInvPVdMSNwUGppq/wqUCXkRKRwCzfb4UQl93ueVLLW5UUFTq871T1WmaELHvQyVeP8fhUN+6stD6TS070cbvaGZJdinxre3hj6sZH3VJHj3wM51vV9WqxfDKsaOPDcTcfQKBzWye42KxADEgSlGInUXLsDimmzFJxqj2niLicZsWqdonHohhvZVv2V548BlKdpY1ecuO59R4aqtlWLesHgeC70DSVQg3YhELeHdzdKCGmhs3TUt6qmoGl5eXq9Xq/oMHP/7JTwBoGIYCHLecdjElX0PXdWKgCs76opiArvrVs5c3aR4Z4Re//PpAQUXGGHviHnf8J3Ts7yEpd1+cpmmzvYmzLBYLBpymyWk/zdFlIoLqnJi6YRhC32me64umME7zHNPAspkEV0dXV/Gjpy9GZeSB+sG0jI+TBJpyoiPgYujGcewIiajrwnq9Pjlavv76633f/+QnP6mQPFqu5nmW/QhGK74PvvF/hy5sNpukcHLn7MWLl8Ni4ZV4PmCAiBFANSVTNkRCKw4+Lb6eihUtQtYjNV0VWm6At3zWFZc827OaK85YnNY2m83Dhw+Pjo6cMEWk6wbE0es8RWSeUgiuf4OILJdLAPCYT0vOm81mHEfn/DEm1eS4cXV1dXZ2tlwciUjfZ6L2NcQYfdyOC1YPmvndArvLkgC8XEKrOlfZkesYdSOqvutJ7CI6z7GW8HnIKITgNbEO5NJcZ2zpouoGrhVU6bnjb2kXOcjgLUSRB8ebmZmb3FXDxGIxLpdLN8t91MQwDM4DicgtQ9lvY+HcIxNCUVypcdFq2nXBaJ8FAA7banY6zK1Yp62shJIFWnGvyu6+0Z+hZOpWjaLyLt+Rvu+T5YGNAEwEdfSFmZXgnnepNb9bh3uxL0REQIdJXU8rgNIcs54/R/Nqo/xeCGYqEqd5HicfzGuis0oNCULp6dg696sSm1VBBZ+2ayb+skTEtAglUieGSZQZ52QvLq5Wgx6vjkIIJooEHrbyR3Rdl0ARmbtgiqpGXdDyrPLKmr0OYJ4UXF8WEbqOU4q+TigtAAGBAvp8ZkREBiJi2DWhrHzpQBC0MhdLhmogMDUQTaL5WzVwXVA96olNuE9VsS4d6q2heltLNyf3dgOCWwK2M5bYzAA1RmXmxaJfLBaLxYAI/uI1a6iVhQevUfld+82BwG6vqvpQZZo7+ilkX03BSl3VBG3vjI0SUAX5wWrrh1YPu71UbALx9YT6uKpdtQRvZsS+ZjUTwkAGhmhGVy9vfvzDv/vyN9+MHF5cXS6OVzc327/+yXf/22//7pff/jImffTRx19958vR5v/dv/5v//v/4Y/f+/T8N//Rt/+KfvDs44u3Th/yZDjF64vn6f5JGHqTNG7ncZyPVmE5rDpIpBEBxAtLgTwtQgP0PTMTgDEjc0dEiOYhEURkdhj6XFFkDj6GIcO5pBOb2TiONashis8nDF3X2ayTzlNMoxItlgAwSRxTQmJENvNuBIYqyTRqpCVpZ8uzo+vNNRCQoTeA3GELIYAPg2ZElPmwNqlV+ltcwiajuJWvdb+o8XrueGtxgsl+0bOTelQxQiZmylwHxL3UoGBREgm7vDk6OtrOE00RCNBUNJoPVaPDSMIrj4aCdjnuuKezktmuBX8jG/ZCee0J0DiAoDCdVo6276uN6w4b/Yn2feoH5AnF+effu5Sy5rh9Ce5rQq+kTdg3Ttp1touBRrGr3cmsybFsl9HGNPKv7lzW3DYCb2mN7TLqNy0AzSzF/KeqGZlHR9Q8SZVzvkXt3sTkE29xX6Za0Wlak8NPc52g8sa6/hakRAS4yzht0eYA4W9jHeIenNvXrFpFa1wRBTOPSiFRQMzZtg3GUql88Ds7MdawNqKnMcZkyMxoJOzGjIF5iqnsQaDlxrfxBF8lWT7jTR3mFfg75PE/ITeM3bWYM0OxPZFU9lpxf6RY+6AW7dufWm9sJbeD0+rn25fXm7y8vNyO49nZGRFR8Zeh81GzipuY5ZHVqWAAAkoCJoazwmJ5OkF679OnJvMXX7/74HQB3Hs+uprHHpPOwozMLCWLqZrNRBRVxNxlBFJpAgAB+o4MIcY4pShJDZ2UCEJ/MyUcVpsE73786NnLG8EuAXUQvAmkSVJLoAAGyczHhVWEdHf3YrF4++23t9vtixcvoKTbOaWUvdvbd2sy6Oqm+7G+fHF0egJGmiKBSJpnmfphWa7C3JwN1BCD7gJWB7tzEDFut8C/aVn65+BnbQ6MJRvCSkTazBaLxfHxcd/3hzkjtxhp7YTZdb3u58u0TLj4Rs0NztrGiYiYOgAoMVeoTKyyKTzUwfY8nk30co//V87pG+rvKyJ1smt115oZs1abUJpZdvVF6vkHpGS7XIwMq+oR42JQ1ae0e5QD7wCI6JlxnkHdBl2rL8DMAu4cSdBwYH87uyUBuzI2g9DDpFm7AE/WAPQMRq8CRoC0o6pDPlDX2QopveUQaaVGidZSMVoOMdYvxWbEt9cNOhqYmfdgg8Zf6ZdVZ2g2jspzKw44+XNp7eENsSonbPY0G6UHmUeVc7bkIyJq3Ob8UUZYRcz6ABiY94ZHSoCmrIaioCpshKbqQREkRlL3aSACoqJReZYvRppiey6NmqDx9WDjAqjA8fPbfODKP6mpd6s3qXADNCRvOeu7a4AWENEn88TZV0PZkVLGBrq5CmoAhw1UbrOM+n0D5ra4ZS8U1vfh+Ph4uVw4NYpGNQm0sMYBUJ948KxbeGYVZJ+F3C0JUZM2Bvv89wBFDki6/dC+b/2+7oE14W9sjDpq+gccWBHWeP3dZ1O1wypsXENSEACBrK6DJtN1+vAnH95/eOfe23fPr5/BMcGSv/fzH749d7/0jV989NHHy9B/7atf/ejjj3/8kx/94T/7w+s//v++3Hz6L/7VH/37//f/9OKD87dP37Q53T0+eQo29AvbbLfztNlsHiyPOHSdmWjOFkbE3NiCHS+luuu85KNyYXfaQRlm7TBP0tjJutsv5o48TwMhoABlF9p6NiGCToawUtk++vTT9x49m2ZMYkqg4KIIspKMgAva6nhy7/ji8WXUSBBMUqi4ATkpiJkBxL0sBzjTmnwVXa0YhDHupRzXvasM6wCFsFXymvAyNxNRK0DMTBEUIRmgJJco7s0dVsthM4xTFK+x3ifAzzoqYdZ1+gZCI4krMqvuRdrL4nfJBhXVKyZXHt1CDxulBPeFev2zEiMj1xXWcw4WX8kwpcTct2CvF7aKSEtfVdt7JXwOGMvtM+v9PcHGSgSm+q1ExBoV3G+UXXyIOx684xXVPNjDkwNDuiwMPcJWF4aIhrl2HBtR7eMQiUjhMBWzfZGqVezUstKsz5fBTePcPTTAV4OrrhkbngyNHXXwfXtJPc2P8qw9e4b27aKW9MrNm27gwN6VyhBSUkQTsOCSm7O/su97lbnVsZoFv8IAe+Vxm/wPfn3l97cvN8upCrZvELavebCkA2dE+1MltJaKvbmONfq0mUGG8G27F7nj8/Pz6+vrt956q+97AIhRENHIcmceSxXmiIiWArl5gABcLUYfMz/N9vj5VZxHBB2GN45OlghmhOA9pn2GbW7CUcrNVYmoC50ryoiITIhIqtYTOlEpElHUqAnmFFUMObDrq9Stx3h8enL5Yv2zDx+9uBmhPxajnFisHiGs46FfYXUz87179958882Li4uLiwtvYwgAPi9U9FDHaPduB5ayR6tlWPVhuxklwWoxIIdxO3WB4ixOyKpWhh9qIrCmzcMrcax9et3o9szKT/AzdKEDDoxNtt5isTg5OeGSxeM9Fa3mfyYLbNa0/IXi0moynnZeiarOqrrdlVrUzSHi/YxlZ61U0lsyMoNTa6hMw9+iiatYjYZVHU+bhFUf8FANYN2L/iUAcg8UAHh6cBXNUIzPaZqgKbgAHxLYdZy1oJ0i4U/38GAW1I37qc4X8QCaR+pqIpVrUM6TETGEwMUhW0HXisUW2aAoiu22tiZ65ectPhxw6VYkQSM16p+VZ7wS59vHwT6Pah90ew215ZW7+K0ZFCGapMx4qBKhPkjKENqKGO0Tq1mORdgBoJvcsclLh9LVs71Vxiv11jCAyAQ+AVrNTC2pMoABmqolgCg2J00KYowK6m2ZrS4jhqDJ1BSRQ4U0h12GWisWWzuiQuxgs1rAViRst6Y98wBL1aRO/KroZGbZ3GQKIpIExHSeFYAQea/ighD92fvGWCuzb4soMxOVopMlr0bw2qTVauGxwRACoMp++01psjH9tu3S6+pVFWDP0qsrqbBoDeWWaCtTqKh8sCX+b2u1Y9Okq4V4++KhzGevCVd1d28T5IFG0t6w3aT6r6oyEIAmM4FoIERkahC16/t5LT/7znu/eXb3eHF3mufhaHU9rv/jX//nv/zed7719W/8N//kn37ywYdvv/12/+LCQEzTsKIo2298++s/ET1//PTh6b0X189pcQ9BDKwjt1hsThORkSEZsicjEAETdYEBJc3VrxdCR0RmydOjmbsQOn/RlJI7/gFAAfIQXgJQAjAAXK0WkqmRFouBmEVkjnETQ39818btp588+enHn/7k/U/PLzZdf2TaIQsEzIUkRBgYGSToaPPq7gn2mMbUgYBiMkDLujIjARpo9hdXgLfOgooSB4hNJZm5Eu1tKj2ggvb+FQ2KSbbLvlP1JlQIxTOkXsgMZmazpBWHYRiWS0m2nqJ5QKg2/zgg/tti4zYrsVIRY3sHwn77dUDAfcXilfzIinrdGrfQCJh6WovP7Yfbd9YmcFTP9FTSVoCVx8HBfnFTQd4+CItP7ja4PkeStawWi+FU8afmF9Q7+KZio5mVb/ZWUnlC5TMHcIN9B21tSk+lEbmZJRWTQ4dofUprOlbzuD665vBgY5RWuXh7o1WqoQ4FUACepFSKQtwaBtceGkvyYC/qe9Vficj39/bWtKTaIoz/YYoArniBqQKQ6xCoaGYKxuXkruviLLB/vHJ5LbZ8zvFKem9v225ofaN6Qmvg3b7qYDF/70pu3/9gee1tzXa19/X1+75/cfH88urlF77whcVisd1uAZT7XkQAFfJACEFEMQQDnZMymFnPgRCRkJEUCJBnL/kPi/UcP3z8Yrlc3rn7INioxsgUwmAEvgYxpcCekm0AHMJyMQT20FPJbiVDQyIyMhWZUlQwVVDHMUQFQKAIGJYLWBydXz19frWxMCgSAKeUTJNKtKJoorfHoaLG5Cw+JqKHDx8eHx9/8MEH8zwvFovqvyci0EPk8SNPNtrHCjNbkW6unke1vlsRmCQjBJkjAGH2t6MaIICZ5/i92qvYOtqwjVZ9rvQ5QA8szUhaTPAblinzq+Pj4+pHizEiZsflPM9pSG6lH4TBs64p4N2AEIujujmISPVwkpOZ5SrBwhCqot/yZCjuM2ZOlto3gsJpa4CuklvVwao+2VpKXuRSQVpvqKWqSvdjiZ5vWaFXI5k+zaIeWZaXMsjMRRu9ggrr9rxNLzJ36xEba0dLXYPJYUCiRYMdiHba+GGqsP952xRsj/ae9cwDw9Lv00Ks7hTs87d6LQAopbJyyeUUFLCkszr0sPZVQlXNQq3uV82vuf2+FeYlQJ3nT1S89fxPKLWXIQTE3BakvY+WGEZFgAoBSYqEYOg1HxlcKkABQDOeIxqCmCZ1G8oIgnrAwn/OxYMCat5lFAzUVARrghfuW7O4b522Jswr+YzsN92pqOvzbCsm5EegTVNqfegA6tsYgBhAmXlSScnirOMYfb6cARSb0JsaAAO27LBFRHlVFyAAkJzCZL7bBhI49H04PT31fMIks5mpJkRjxva1D1C/hQLuq5gHNHPAFlu3N5fiomr+ObpDcSntcaLymgeU0H5/gEaVPCpXrfd/JRFKU2CNjemo+86/SiEDdRHEU3oN0XxOmTBPtBqOn713+eGDj9/6lS+E4z5q5EVIp6urzcQnx6s7dz75sz//0//4//vDf/bPnz49Z0t/+Pu/+/2fvHsnnfzqP/yV7//ld99//+npgvu01Hle6Hzv/tmiH1BNoggDAigCgE+Zz++EhD31te2SlHplT9CvORgVGjFG8VJaxAAMYAZ5vqUBIaJR3XLKY4f4KMLw/qNP/+Q//+37T6+UUEO/HrXrsEOvXSNSoMBEpADWY5J0fHKyPD2KSUwQGUkJjUA1i88GpCG8onlJddEdIHnLWFsEE5HcHn0f8epVlW15gkS+nBCRDCxpMrFQhjs5ABGAGQEgmZLANE1d35+c8JTiejNidv/HNkLSHhUPP+snAKhFU7YXk+kqtppZNWBaGVCprPWh5H2DPappablyLmoLMnGvtVIVJzUFwhppCgA+h6qloHKrHRzaLbjNN6Bw20qwlQ9AcaTV8yuB10BBVTJ8nT5npboUnKe54VfChDtmcrAY/6Bq2fea9Z49D6h32HMGawjtu+f1I0AJ7ZoZqLVPqffhZlbY7tpbu+No4F7XumwzM6gNdT9Tq6jgOmCer5RhdCvppf55sDZuOta05/t+iXrppk86B1QQ1V2KKYCZePQJCAh2HdsPGCzWzJlbL9Ui0mf9WT/TrZBmPaFl5gWZ9favt+98+1YH52NxpEIRItjaDK862hU2T0Qi8kaj3/rWt46OV1dXV4rQMYq7psCdxd4ZxW05RDVMyccOIRJS8OwRM8MwcEcpbs6vN8fnF3cfXn/xyFhz3jIRmIkiqCAjJtUoyR1nHAIxa7IUxQVPLao3s5gSQAIgA2IOwASICmgqtDo5Wp1dTvrBp8+2ScPiaIwKACrRNFmK5lIA2ZCRmCmPCMOSLoiIPnfhxYsXqoqlK6aWARXWqB+fvzVmJvP8xusPfvlbv6ZG3/nejx4/OV8ujscYiYK6AEAk6j10qQpMr7inNSH9dscBwG6biPB567QSzfOryJOzRBBxsVicnp4eHR25seQYlVI2eFLUarTgLrkg35aIvG+O13o5LiFmqDLzdrt1/3K1uEoEeCcjWuSnJq6FSNUYSzFVNlUVOW0c/RUORRuxlFKMOV8UCkv06HcLWIftNE27PnkpbbfbaZpExGN3blf4yZwrddHMkwvQXd4aNSUvuSywz62giZk7ysaJhwf99XxhdSJaxhwRRNQSZjxwn72Sus2MiCtXafGwgui2DGptRV+PKyFVIrSeVrgVsagX2v7h3ysl08rMM6oeYHgrNQ7ejko7iYp4fs7O8INgO+sxY2BKiRmrOmqljUUIgTl4+nfdxFa47MjErHBUd/SY5ik1hCY+R8yTbgn3OPB2jssIjGAKSsiIAGJmnoYtoskUQBOod+nSNO2wvIkKprQbtvFK8Yf7GkVLQdikOtdSlz3MgV3qEqIVxgCqGmKMYhhFN2PcJhgnvbpZT9NkioYIvhlmPj6iVggecpmyfy22QdV6CbzSCVGZusWyXy6Xw3JQVZEokotYPKFUBYCQQtbIXwkscJwiBAOQz+TF9SXrl62ro6J7JUhpfIetm6pCuf1wG4n9z4M0USjOpMqP6n3aNRDt+QVhfzBlC+FgPaJCiCZJUYyQjciCbm2Fq6OUPv3J0+5k8davvXU1Pgun3VUwXob/+J2/+vYvf+tf/q/+1f/5//h/+r//X/6vR3fPfvef/OOH73zlk2fPPvjk49P7d775O798fvNnWxM4v8E43btz/Oabr5+sjgJLkEBgcZ4I88g/RUsaQYwBe8hB5urRISKvqPZvPOl5tynkFMTAhLlTBSPSNE3dYhh44QNOFHKWacLuRz957z//4HuPLq6gIwsrE0IAxEAYgBASZUWZzFATKpFCh8dnxy8uLivMmRjRDMS7OzGR6+zc9JW1xiqoqjPshzKqB64lRVVF3IvM4G1JbIJou5geZc+n2W4WnCOpdyJGIuacUGNm8zwPi4U3471Zb6vLk2CHoi3Nv4ISb/3pzB/2M8cK0hUKuqXRVo5jr0rtsyYzTfcn07xyMQ7/KuCrYdP3/YGHzJ8e414b8XqEcNgx8mCbDt6iXVW1DXYvfwtiuh+u3L2LKPrwD7VcoWEAaoGKse3t2KgU7zURQts3SA6OqvxVPVXyiItmWB8C7Bciqn3mDN/ikji06lt84NzrTOveFZabb8v7RY+38dw/UJMYX89pH6qvikBWk6ZdG+yz33ZbiQiUikRCM1VxtITSYSKHXAzUq+FbadIiDBZb95VG2t97tFvZ7u/BaS24LEuf3Qu273v7/u1PeEs21Z2qaFNe7TAcdHtVdf1mpprmeX7y5Mlv//Zv379//8MPP6RQa718rdmV5feh0KHXPsdkoAgMLEhmBN0wmNkcEwp1Fl7cTD//6NEbX3vAzMCUVEmVGbkLgXCeZ9AUY0wqfZbRAsWhYmae01dfWX3rvPMjkCmICQAdHd3Rrn/v3Xff+/jTWZEM56QdoYqQJjCl0rfBiAEZUaUZraZgRHTnzp3j4+Nnz56llBTIzEIISBxjBHpFjwMsEQ9oAvK+FyuGf/jbv/Gv//f/h/PnF1dXV8+fv1gu+qhZj0LE0qYyqM6wj/wtCVQ+2dqE0PCN25v7yr2uDBaL7KvLPjo6Ojo6cveuXxtCUM2RhKqeMncF/Qgx+9DBCABijB5pgVJDQaUkrJaN3KYIaPjPAYvIJii2fqVcyipNXZ+qDsNgZiLqI0ncMJjnWQRLlM81Das1inUBVHKczMwRYbudp2nabrfznDel67jvwzB01cGN6I33c3hqnuesxMdUnZ5aIoSVyXR5rh23TFhLCxYPV1RsRMQWFW7jxgEXvS07DvxTLWK0PPk286nssUphKPLd9uX+wZYduP+SRCIy8+3LRUZmVLfSDTkAIAYCEs17XWUBkpmZz2ute7d7U9x1TbcmGuw9CmvNqjsUQghd11OpF63mvRv8vpjU5KBCrmkXU1RUEBVBVTYz0YgJkYyIjAREFSFEvrnerJbHIQQ0VDWvvMOC/yIiAIyMSByAQ0jj1hdfTVN7VbZai6stsdRtqvpSRZXayvGA9tXaBhmad7TgQ1CBpCnGOI0xASaDeUoxCpTmMRkRwKpEgH0VuUWsFnH9yxB2xhhz13XdcjmsVsuUEhQ7VVU9kZBLRe8BymIpQq1+iwPCeCXG1/W3/rD2nq1jqZ7Q+H5UX6W1WOMdaZmyXyv7bXPrVVA0nsqM/Fa3HT/12grG1rPVScdshpAsqQGAEoRgQWcl4qNwMo7jB3/3cbjf0xshpTRyPxjZPP+HP//Tl194/Pu/+3sf/vy9e2+89tZXf+F777/73e/+9aywgQ2dht/5b37n3/9//tNpx0zdw9fuv/2Ft/qO2XQIHaZkwESARMDgHjEF8Z5+2sz9bMur/C2qZesvq+imDhORmCHuRmajD9nypHnFFHU7x5++/9Fffe8H7z95OpyccL94cTPOokN3ZOoT/KRukpP3jKMFULPjs9MX9FJMDcEUicmM3QRjQEX0sHHLWFtS1CZCQk1qRItprTCuBsMBoTJjpe7dviMQYa3ybdlZqSdBIpI9DV4NYLnk1Wq1Wo03NzclhQleedhOzT3UJusHxyjEXd5aQUuC/dlBtymr/VB5maoa7uyx2yTQEleF8wE827doQfrK71vCqUe7KZXcWgbVwgf3NZL2Ju3j3PNdn1vVPkkTUgA0QAXbuVpb1QdLJo972Gru/kFhHjYHgJdtNw1psndu9yKqWnOPW85WrdwWLFKaqjW7fFgfgo3d3kLGzKBJJG43tC64Pu7gRaikPBzsGt7i8H55HTcCjTg8YLYHb3qAGA0LFTASU1IBBUME1DrIEYvP4jZqtQt+pSPgNkHV47PsSWvMeNhD3UPAtq98sKq6TbffGvaztRt8ridgC3+/Dg4p1K+V8/OnzPz6668bYghVSVUAzxsyAAOfqkydIapGVAUVREUzcXdb6AHAlJg7U12P8ydPzh8dpTfeeOPo5JjAkszZ94SEiLEMJKCQM3pC4AFz81JOs5mFngE8H9lETAwNwFTFkJm56427Zy8uf/Lu+y+vNokGnXMmGJiAGaMZGQJpTn3CimmgrlroYrF48803N5vNJ598UhFYRIgDfO5xwCfzpgPMm/Xly/NpnI6PlgFxHDeWlLhHRG0YEVEwi+3W35Y4FWNvM6491vG5i2yvrQlKqrparbzLZcWiEIKIMTMCH+j6qrvMhUojroJzGXxXbMId2hMRIrvH079hDgckZo1/0Kdfcm5ZlACAul2KaYVzjWh5B1G3CqZpmqaJqK+ZSsyMmINFLferSos7aMZxXK/X2+1WVbuuG4bB+3/6oIgKbTce3NSpNYoAgLoXwLSicTtIW5hok5oEhdscOMVCqZOsvO7ASGiJHRrnZosVkFWRQ90MIEeY21thI03qXu+Qh17RmvH2Mg5w2OW+KhQZinW1O0D5TcQqHPyt3SBscRiblkIpttbR7vWlHG5sc3O0OFbxtsrBCo0McAsKqm7rWurK64sIYkRDL2g2iwYYY9xsxnlOsHKcBxf2RFSbGwkAUjJif0JrBN7ehRa2BzRSN9EJk5uWKNhIxlpiY43Z0mDRbo/8CF3XaUIERUwdDz2V5h+aQY8ZZRBumUAtZA9W2RJbibxpCF0/BO+kdHNzw4wckMAbk1g9cL+i7wBH26fAflShxewWm7FV4BryoyZaWLU9bmqas8596571aNGI9js0tLi+8+g3Wpf/WZLy7eDfdu+rrdV1nd2AtwZCANUEEAwAFIMFiDCO89nd058/+jD+8Cf/4Mu/9WI618VRQDy+d+/HP/vZ5fuf/pt/9b/9o3/2zy9vrt//5OM//rf/9rUH91LffXL+QsVO79z59u9/Y/3HFz32D+/fu3v3DHRWVUROaVwuFkREAYFJISkIMQBDB73jiCcEQPmvH4aK6FKAUF2GyDlOqCUHmULueq9gxB0AbOfp8vLyr//2vedXl9T12yldXL2YjfthFUU7CGIICiklz1XymmwKyNSB0tHJyqMEzMzA4Jw6Z28Ae90JYGqY9e0D9xPotQnlHeyjlYQi2OewlQVbm6oHWnEMmnncKSUmMGIGQ8SUUpxHAAghcOhjSszcL5ar1Wqz2STRruvS/Aq0/JyjlSUN00ntW0PW+fxF9tSRFjlt38bLmgHv2ZywY9a7dNxWdLU1gS0ja7eg/bMl532BtLe8dkMPIHDARuqXB5LyAFYHtTd1MXsOUVQk83nH9ct2Gf7/FjK2z6ZftcId9zs4TcHwVi1EjUy2j3Y419TuuuwDHIadErMbUZV3p6Lu/jLwllV2AD3cN1bb7+uZlcQczrXwo6WdFmLtTco3hGhIBBl0/mXGCVUF9bYouTOaX1iNWGxkLbxKxrUn3MaovU15VVk/Ni3gP+fa9lm3saIupv23/bKC0fe3rOHQJdTsxatSyMyIaL1eE8G9e/dcCdbSTqyuyOFqBsk0+CywYiiamaOYDx5Y9N2Cg06yndag8t57L7uuO7tz2nWcUhIxkwQAq8WQUgKmQNx1HXL2zApClaq2y+AAxKCgKGbmyUwU+mEYhnWSR0+ePnr8VA1C6G4m7bpBU3R2T25AuhoE5CWnBdMyu16tVq+99trTp0+fPn0aQuiHYZqmcRyHxZKZBfb2pQKQ9ofQVrqTBH/1V385xXjn7uuotjparDep73vxkU2qUOd4ESIE2O+leRs9bD9zqsWEll+9kqU4zrta3CpOTmje8dKynZZd8/O8GydT3fFWRttXbKeiau++2fnWAW7paUTkSfiIOwxsOUz5dS9//gDh2/edpknLSAm3BDxcGfwqIuYQkIiISyvOqhMyM3kzzyTX25tpmuZ5JgAfGZ+B4+3UfZettAqJkbulmaWkMYrPIvanWE4iBTMwMfMyUdobKF/f2g2GOjGPS4kTEfkgomoz1412mFSUq1/WosfbUKqGKLQ9J2lP84F9lf42OvkEUdUEuUDcu434Csm1v/ZuCOgOBQCg0o/YzNNetBjVu0rCivL1iX6rk5OTFp91ly38Cv0Ea9Ve8yKtlHEMsSIdSgnioZbOzJJIpbRjAVVLqp2ZMocSK0ZmUhUgNMKYpFweEBKiIQQmdXye59mH0+c9ihGaGGxFcistGH3xbepim1FVqVKalitVaPptb7sP1Kg+wp/WAjYgzqARdGZS1RkSdoQq0ZSQiSgAgloSRQNBCKHP3S9zn0NtW/A7gtVdVxUNrNO4AYCjO2dHRytwKSRKgGmWOFkIIfASfJyXZOej2i7oRESBgkTxD5rU212COZfJwV9ze7wQQGlO4DhayJ4oTT5/hlQ1pmilCVsdfMfAoICKAUPXd9s0tsy35cU71PTBpWBRUk+cqy0KFjrSu5AzVTE1AwqMPmKy7G7rvEFEAHMOgAghOKuVGE1CAAMw6mEVsFcwQ5sX08gjMyrqfJNO+XT83vTx9Se/9/u/H9/4adD+Di5++/f+4PyTF9i9AUevR+7+7C/+x5uj6QZeEvV0Eue1Xm7Sa1947dN/9dr3/92fTC/DzffTcLF++97dN9648+Yb9y+vnx+jHifUy/Hu6cOwvPfsers8Ow3y0sHSUQ8AqkaIXdcZYpLERCHgFGczDaEnotMpJoRplolxBkhghilourdYzuv10fJ4VrqJisu7n15s/uTP3/3+dql2J45riaOqIdsMk5ElsJRShxqAF4zGsA1R+3QSjmOME1w+eOuBHo22DTYOfVpKVGWLfYockyZI1MVhsCGEEV08uFTDPEKn60KRLqlRTNUD7gBgtnNtUo4MF5O3SZisunrBTAAwNGCfWGygqmIJS+ACkBFAzFQUsetzZrWooKpdr6el0qIf7pyeXV1dTdsx9J2kHd+vnMXMAPiA2jH7Wat4UICk6jwxNcJjF3pKSWt5npmpARF6gRBhQC8CVM8e4MBEYWe/aU7OQkQkxsqLq9QxMwRyKq0SLg8CqqnXRaX1NRx4+GBfaFUgtPWr1fjhZuiClZQkalu8qtJ+riM07WGgSdyqOSeAgwICAHcMXi4ABszUda4zqFltCasA5laKGhEhoNfMgnerwiJSW4dCx+0a/KddPjOCgWFm7aBJwmpRlke1q7MrWD7uyXNXPOzjBTMAgGTEOarm3AzKQhAIEMwQjEyRec+AxMZ2cslUgemnjdMmA8qo/uqmackj8p31guAcTs+6BGSjRdVKr3AtLjYPZwEaa/NcYLQS0mFmomyJLULnE0cnmzigm50A5GMnU0qEAQAAtfJ1aBzzrbZUqal+2f5p+16hek7VRegwYLKrXypEAcWDbi2cC27H1vCoz+JmuGj7dN1vguK6CtRiVLO2CNkASCIqXJxfnz998fWvfK1TDGKzJERi9JYCZEAKhkbGtiRXjylhFyGAxxCTMs5kkQAl0ZYoYId8d2P4o832+rHJg/5Lb9wBuR5s7Dqb1y8N4nHojBdREGERuhPFMM6JexETQBj6I7DIpmoyEA3L5WS2HpN0nUCwfkl37q1j+vhl/7fvnT+fALuFzPPCjKJNMXEIBjRnnFIEQ5gJ1JIsFwswERUiSHO8d+d0uRyePXsWRbphGOctIAlCMkUmSMGJw8AbVHocAL3ACAAQDdGIQNUQ7UX4wiiby/Hq9X7Q9MTSejMjroZoyixBiCOYkRIKzsKxS7025N+iFhJh7bzftKduuRw1kYFW0aweJcGARGqAQNx7uYRwz0R0//7d4rQN2+20WCwkGVAwZGACo2S6nSfuuxACIRlp1BkDqOh23CRNKnGFR7OKmQVm57nL5fL6+jqpmXk80ELwgXsWQjCZNIkmATM0QiNJSVXilFarlXPjLlDXsfNRhl7rJJ+c74YAEEcv7bY06zjO8+ydhCEMlQGLl78S7aa5IqI3d1HVzWazXq/Pz8+JaLVanZ2d+SRAP2fol5mfNwXbTJ2lmKZp3m7SPDNzH7Jv1FUCly8UsnIROu4IiZw2NTNaFY3zEAJI0jiriMa89WIKYZesm1ULIDUUneY4eq6jx67NTH3STkk+QjJicNYiIoTBCosgJlVLMXY0EKAr3OYBTANUC0hqqm6Z0E4GpWCGAIE9zGcAyXLaRUoRANzY9hfvuo7oxK1cAPfXmapaEvJMEBACCNnMRlYCVN8719LNzIwQQJLt8BwIkCErzym3QTYUSYhMRNM0pajTGFVgGBY+09IMiLgLg4h496OiS+hiMWy3Y7alFRE7MDTtxFDmNZiRAWII3PfDsht6CpQkxmlk5q4LmgCxY2NNvXan64lXMw5dQIgGidkUUeaJmbnr+hCSKZogKJD0/cL3rlryzLkFPVEdJknVjZ7Ta2tCVgm01FB/K5Kw6YIp6tmztTRAJVfbefw9AJiqBgQ2S2ZARGSkKknFc5+q/oHg/fp3GmfzSAY4lE9VhvmLhcB1FqoVj3jtxmklnQmyJnHooK1Zc/Wc3cL2hW7LRltmCk0k3QgFTE3V1AhziTxi9pSAgSkiKnj1I6Ecqpv1M+wf/qCYotHe+quDE2B3VdUerHBtP7+C7raq4V8SBERTAAQEJjR1xWnR9UY2zlOMcRmWzOnJ42f/6c/+4p3fH+6eLt5850t/+C/++fZme3V58/PHP8cjulzfAFBPixjNBLquU4HtfPPg7mt/8M9//wf/7s//5EeP3+r5uz+QNx4ufv1bX/8H3/qF+eoymjw4vW9m2+3m5OxoC/G0y2PWVdULMFrIZCZSWvpmSuTgEyYYVEFAsfDQIGbJSIyePnn243ffO7+6SamDpimi+D1NEBmQiYiVPPLu8APAxWLBiP0wHJ+dvHxyaQjU8Tynqn0yMygFCx101qm7EjNiM/W59WIehn7gb8uKndGOQZdtbFGxLrhe235/gMm4n6XWIlu9SaPhaQhhsVj4ZCdrAk3tzT/roCYbpF32K5H54HNLaNaYWPVWZrl7dH3Z5leraIxNON10R7CVwVVHdftGVqyE2/TeMoSDg14V1nP+c/tNb9Pd54ClXtJeWFei+77ben7bjRARqYke5/vvv6+nYFETIXQB4MUthX/kBrZUMiDaR7Rcy5oKQys9yhHRdlx9t+zyRqVzskAtUj+AVYveB9BuyGQPgFIaKlaOUa6F9p7tAbAXSzx4bv1caRwb2LZVA1Dy2QDMfR/MbHti7X/O0b47/BfEFW9/81/yKzUh9BZudQ2ff9u/93AIj+N4c3NzdnZ2dHQ0TaOqes6z5tgaGIKpmmsHn/t03wJBYwMzm1M8f/78k08+WXV6FDTqlpWOVycqs4JlJ4RKjBOgqFqa5uCjaYg8FNhzj8xiZoLI3A1L5kG4S1HW2+np05cXV5fzPAfuwZV+5oq0r3xfVVXJKRLMfPfu3Tt37vz0pz+VXfsroyZCVV5TD0jg/8/an3bbkh2HgVhE7J2ZZ7j3vrEmVGEgBg4gwAEgCZGiCLZEiZKs1e7VH/wX/Md6LX9x23LLbdmyraZEihQ4AMRYAAo1oKZXr95w33t3OCcz947wh9g7MjLPeUVoLecqPJx7TubOPcQ8wpwaKG5qiJ06naREDIIAaTzz4Xzk2Pl58LZpT3tbi/KzK0bgn/XTizFqvQ0dj2s03MnJiUZFVr9HiShbTEeqOcwISKiJiLaBpgsZpbVNU5sRVoc5M+farwLmPnnTZnnu5On7UoTDwlJyLo5BlTT6vu/70gVeRNp2asXuaT4z65eIqKrg5eXlMAxN01gupdcYte2EnxvX6qO6UiKqdq5yKCY5YA3v0rIxpo760SyUVFzMiMa4+m0BRxkWnJSdmES1FILZQM1UbSA6sWBHYG1KWGVXe6RQVMjq9SxTmNoUQ3X2MkBJxUxJsGQgq/hXDegWFAquMQmgBHW4Tc0MjYD7HNTFSr2ApEN5l5ouR1eaUqq1i7MxzSXmOpUBXa0EAfbRAfr+xdEUETdxzgIdEZDGlHKlBqB50aCLAmZOnHCOoZ6qG5gZ58q1eIR3mFko7AJIVFZUZBQo5TzUeR6mVJpq+NWsqxAakBEwxNhwBs5JHQ5AEXDyWdnFc/MkAljGUVnJnBgNw9i27WazsZJKUHN7zNACM3qKdthGVWXuQjXqY6DGxRJ2JMTIkxioArHBGc3zLhbAAcYwnO91gTwwZ94iYgE2NkghCs4MbHM+Kr/7mSxmpUHW7u3lMEcGJAIWHnPYYNesrq+v3/rZ23d+7YunbfP3P/hxu/m//9ZXv3rv4Qf/+S/+0/nlOXUhhNjFlsehDUJdux/6vt+t85P1mn7/j3/3u//fb73/Yb65gR98sN+Hn372s599aX0j9D2FTjiNaVg167HfM5R47iwMAkYT9bjNaaAOimEY9sxqXWCQnLNkBkFUn1mgkePVKI+v+5/8/N7rP3/vyZCTcFHbYyBGTaVjZkaGQjg0ZMVifXO3XuUxMcLdF++c/+yZFOsvKRUJgFzSGANJ+XKhuXk6jk6sJCIkDfqfghAcJz6SO+fHLBkyrq2Fj/w2OLFzXwCwgrqWvFutVupJ68fRbjP4PAZT0/iLlXp4Ppw5zoBtWo7R0+VQJR0AlvgiE6cxpCtgUy1ktud00H7AzeHI6sQrVEceWZqHYC5RydzW42Vu/RBc9QWPjEc3DasZyxOBxblM++Z0s8N9BgAFXSObBjm1aIrY8qu9s+yhjW2LZWYAV7PUv5RRW89Xh+W0maUshJBwOgpaniMYSJihoZrDRWS22yKisUYLnlqF1Gldtnsm6Pjx/dl5CczDnriq3HaPm7kAqGPryLVY8iF+LSDh6ONHH/GYdRRK/SYcvu7wwyfP8xe/mqa5urp68ODB13/rt2/fvv3hhx9MWCNa8tPeK5/8FsFiwWHmIjsiPXl29c67H9zYrn7p1TsgYT+Op5sO8shZtLa7AKdhnyEiEuMIoY0xUoA0EIhQbJquTVm4ocxC3XagNguOQrt9euvn75yfnw/DEFaNik1IKhUdnyfOoipyCOHu3bvr9fb99z8cx1HbxKlCCFpVeMLT46MhBkMBEQlEKaX9fr/f7yv9hzSRTQBAEAKQUv3oORuKTlQ1fBEnu3vTnkHUIWyoYVpEcD742dmZ5vJ4g8sCxqRqg+B0jFCbFnowzjnnMamrZyH4mfqqN4/DkGt6syfjUgU/sYYfLujRmIuSsmEoLkHN5RuGkV08RQih67qu68i1pTVxf7fbnZ+fX1xcqPR8dnamurEyIN0TIyAwCf0lvNNVnWlqJclJ49X9UV1UFUKYI76JgirT2+ZXComIM2rs9wcqD/WKk+mW6OQTT+r9gRIRyEzJMTi3QTyxijGOzmJIRQMoa9GuiVxLQxfgdKZGO2idmio7E+KAnmYyKDJdCGtblOnmKieEEPSkKqiwKjzM4FVla4g94DAMw36/1xwKm7zfHMNfWz4AJGbkItCqBwJwkiKwqiHjmPu+H8dRpCWKDJk4M+dCBtUgi4XaQGn1OTvi7Jz//uID2z26S+O32cWsefUKa4KlbRROmoUS53qIICFnEA4UggAOQxqGkbkEkhzyM9smnGJOJgizWdqJAkDbtqvVyqwROO/m5zk3zEkYOonHLASGRXW0mUnDXl0HDjZtAy97kcdwnLf68XcaefJCm4lBHp0QkagYDuxBR7iJDsQ+2wFy7iabAFQZy27IIyNXzVBFGQEAzGOK2ESMSXjcZyJoKCLit/7sh9/8727f/fTtv/z7v/vLH32r5+ucBwoEA7BglCYwNwgYCbLkZuiv73fN6ebOjV//x7/x7b/6/r37+eXPbT7a7f/y+z/5H/7g9zft6cXT89Nt227g2fX5iHy5Rz1cdLWk+3FcNa0/U1UIU0qjcABAIA1+BmYUQAicQTJmovPL659++PjH7937+CpJs4Jc+bGQABACihCS6QVSbWlNE5smYBAIkIeUeHzh5Rfe3b6fRx7HIVIkJIJZ7VwRKdVQEZBIkUM9b23bwEySLuSSWevbzUppAYC2r3BgaXBYjvgoPvu8i8VlsKdkPdeiakqSdKu7rhtS0sl44rWY22JYvXyIMhzwfrsWIvjiBluskTBBtu2yGA8TAg4xKOdZBrKRhcOZ44G96eiO+a2zA5mNUAnOYmT7jNVAYKz0eccHB2fnj+DoKmz//TeLnVkcH9cwCiNT4GqU204aIXFkLTnFTEFomXU27VhRBZeRIGU+9X4T/vxKbZcQl5Y1wxThGdhoyKjN3x4xvdFkGgUkpf9wjDjbl5XM4mKNWO3Ei4OGA67/i1yLm+VAYftkNPR8ChysLm57Pk2YrfoTZvULLufwFTHG/X5/7969k29+8+WXX37/Xq2tMi1WlTuMoA4BwKlh9QF9MIGpWEUbEXl4/uzex48+9fKd1eo0759e7oY2BBFGFqKcEqc8MFAITVhRRIiBAjbAktKQGYiJ2nYTO2AcMYhQZtwP6eGTZ++++/5u11NNpsKDMKr5VTz0McQQKCVs2/bWrVu73e7999+vgUsEAKWhrujGzkLKAQDpyPEVvJaSFZKzFglGnCVZ/aJ6u6HkAsDsT0/clBPBAVRAqV4uzBzQXCsUQjg5OTE/Q55yBZeJZ95Ys5iM/jmOI1bDZd/3IrLZbBbCklGznHMaR3CefzssT9wsgoyIVuutnqwShJRS3w/aym8cx/1+3/c9c0lxVCKpmpg5Kk3/16ozFxcXV1dXiHhycrLdblVvVB1DcwhV6/P9pbV+qSYrknVhro0x2PWxoNphwppVmDsU5qTPrH72ayFNgjVcn9Xyjdq5kVEYEYL+VyJtBFhK270FNbbLlm9HhlUgt7P2xlk790KQsSqfMqtWXXTampZsHjkoc3AF/AVKyDq6OmRSWLudtWciHniMblPJAhObITPnPKY0JYzAvCIOM+/STlOCrYeKHASHlAVWHVi4CH0GtIjSdQ1OkS9lqjnnhNzvx/2+36xD0y5pddkc0IBEFMk+oN+fl+nk9n0V/NBgW+ZqCM8zZUyGKYLi2Fss7nq9drAH9YUsIjHlkLkBAIGY8ziMkjTcW9BMWCIikFFgAbILWlAmVwUTNaKsVp2GIkCteKPnVAOLZ0VcZG7JWNA+cfMx+SCEmUFrIQ3YJI2+2Jcm5/lttRGm/arNu70gYjNZTLW8/Ni0c861zcAM0AtqIJrVBEoZCb+9RVTCapRF1FbMxbICCBqLTNAESOO+ENm26/qdfOu//ODXfu9Ln/7yi0/zRY9Xd1+6ef74ceAGMrAEyEANZcmj9HEFN4JE7B8/e3D66ou//se/+3d/9f13P766QfFvf/rmr776udtf/IKEq4GlaZrx6iKsQs5gJkNEzNW4oufLJWfS7VjbYtNAIBkTCSBSQxgIUsr7IYfV+tFF/8O33nv/8fXQrPcQQUuxKbYLV6sMkZTjyzwCcoyhaULoQmixXXUUA4WwvrFu1y0/yRhKi3ctcUwEhYMLZxSGSUoWmejLghEa3fQwtqBZBioeelFDkknBDwtVFFAYVGFbRzJ2cgjz4ERbDSNUw+eu74WzNTXxlAWOXTLXH2CmLcx0wkNhAhx183K2MhIlQ6lWkFusYoFiE37xTAfzu7q4yoRdN21/24IWLSZswxqWmsvI5uOnvdj5Q6ryCZc4scZvwtFJkjP9lMEXb8CpFJtNw+j+oYKt7TeWm3ageKB7r92q/3hZASqx1RhpjT+xJxa6loENO+8xzLfC/o0xWg6huGCNcZxyWQ8hcKGWfwJNNrnHpBypgiYza0BBCHp/NfS498A/dB0eKDwfcfzmHK7LdmAxuB/z6E+L2zyoH97wC15c02g/+ugjRHzltVfj97+70E5FBJ1HVZy7CbG0EWeA0sJY65PXOweWVbfu95fvvPfhnZunv/TqnU2IGSmDCIjWgkiJh1EzmmIXNgMIIUDThCYKAgMlIZQYYgcZMgSRmFL66MGjH/z4zfPzcyJqYpezZOEYG5XviY5HjS4wdLvd3r179/z8/MGDB/40TV3hgxAvANBUVRtK6WtlJRLn1t5iqv+FTRAL/F3AgwnlcExXnB3ZZAGZGYOUkmw2G3QmvDrbWUEXj62eVnvSarisNFaVLpMHoFIqDfIEgEDT/X62fvm2dXqUKhmr+DsMw27Xq3qmnp9xHInCarXS0qBtG1Qb9B4IncB+v7++vk4prdfr7XZ7cnKyWq3UQIyI+jjW2ASoUo1qnqbXrbTYnhPcwZEdDZiydDiVWu1ATY0RESs2YzsgzjVqfBbnBcbk8IKFfXDG4BZAhYhY624YPC+4nj93AICAAIAyo0t6Z4DpZqhyPiOklI0aN7URO0vS/dTRxAnYHuAN/LzHck7tC3At9lNESnZcBXvdT2HU4/OYZY/UUXUTZqomMyM/LyQesNQ9ZgnYj+lq1283TYwhACNIsKQMMQJbji+EqW6tzcdjmZ2mP3EDFfveL9wfnDaJAYBSSqtmjFeQBgDRQA4iEsEoTDF2jE0SYmERQIohhqSWLUfNy4znauGCrPlzIoIYSf3vGiAKVawJtfSogTg8xyVq+LAQxL32uNhKnFUTtflM8O3ppl3kYhIM5mBO9xd/2ks9Rc45eUDE6lZiZgUanIti4jwY/hUWH+uxkZnV/SiABNNaiAgD7vd7EYjUFJoOKBkaPLk6v/7O3/7oOl5/5jdeHmX48MnHkPOqIYKQhSVCaCLTECI2m1W4fjqOmTabK7l++Vc//42bt/63f//nT+9fSoI/+/a3Xz47+dLdm/3uKSTedKtMqV2tgCix5MxZktQSW8PQi4j1V0hpVBgWIgjEkJmT8Bi19AVDzsgSEocHF7t7jy6epZBWm+shZc7IGTgLl7QRjWQOFLTjG4uEQG0bu1VDTcSQY4yrTSej5JRpHXa8O4kgY86cBTJQMWlrBz+VcS0yGwC0LLUdhJFm3W1lEl4Q1xPwSfxYDIQRAHIe0Wk7/pQ9FVh877HJiKknhWrf6rpus9nscfBE5CgDWGBoneFCDoYFbONcyvTmKM+oDp/yP0k1ni1uqD/hYh/wOe64BaLBnEoamzRDT70BDvEIquDr8dSv1DMHmLtrFgTh6KLkgLHZDcYvF4NMC4HDLSo3U42B8b+iE6e45uv6udlPfplHgQQRwVEVRLViikgpPmlGRz8lz4HAAXA5skl8wQXiaDWjBdweop796eTU2fb6w/KA5E8QHNA6PP0H9SWuWu0R+f0Q5g/R+fDmozd4oLIFLp61exZ4AXNEgDni/LdegqCerAcPHlxeXn76059u21Zl90KOhEWbqdZDRzn+xixCOjctfSQAAPvE2+0qx/Hh+cXrb7zNaf+5T929eXYr530BiTSkmlc2jHsOQAOllLbrTYxRqAEKENt9YtjzToBDe5X4o4dPf/zmz//+hz9JKa5WLSJyToQhhCBFCGMGOpykQ5wcQrh9+/Yrr7zy+PHji4sLg20onD0fpauI6K047oZKYEvUn8Wog/2jrL4c+FR64Ni5zCFkQWn9DQbehzco+BFNCMLMMZIm9RCRzyG0qYKD2wWYGawaXlONZk/V2aiN3bWK5iSXVxbWxCm4MbtkQqgISzWxQv/c7Xb6ImZWl+B+r1pnr9qaiMTYqF9Om5yp8dRmq46KZ8+e6evW6/XJycl6vbYwhBhj13XqEtTMzxjjbrfTSFHL3VInhwbEQi1cWXeYLATUE2ezGixoF1S/yCF9kAODF1YddcF39PJZf/7gjP/aVYbi6ShthgsSaq9mZoEZz7X3knjHspcKpsebWs6HJWnsnifAXCL4fHSVWVjY+IL9anCec05pEMEqyJXXWXkOETYKpp3SEDFQcUTVjtkTOBfDrAhqXdASRyYUCssrjXAgSC0lRaRR3xkCjUPa74dhzGtG0pB5gCxMAurdZZXOCQAAncTod15DQK3gtloWiIi5OMY9PPjj9pvDzG27mvoxktphc9/32rcTXBlOPcY4ZkRqg3CfJSWu3SaCSK5B/wACWBsFYGnmBgJZqvgCoO1eC4dIadCQNvVOepOGl4ds6kbFFJaMQKCzT9j9C0pkGGiAuPjG75pHHvtJ6eAC2vwkF896pJID+sjMtRX1LAYA56LMBOuylDUXE/Az8VNaTKYkBQEGbABAPcvM3OH67PaNd979yd8O32tO6VNfuoMocUPDvqcYkIFHjm1oQtfK2MZ21987u3n3/fcfdS2OT+/F9dm/+B/+xX/9D3+ODy/eePzoWz/78a2bXz9rG+7361WbR9DORaWalnl6Kx2IIWoJLwt0TOqkAwbhIEwkyCEnAQhA9Pjp5YcPzp/2aR/iPstAAccB1WsKJX0FEaE61xggRIQ2tKsmtAECNG2bmQNRxgwEZ3dOL96+zJIJSww0FKevTnDWoNKoJDrGTwAaDustGgaZWKqW4fxYCYA8bCygRWpIgD0iEyMsMLmA/FBbLVUUS2p6PDs7C7STyroWBOJ5l3EsqdkRBpww5zH2RoPhQxwxBqDAb997ucQkID+I7r2HattYjzJ+GnSwMsNidpbRxYFKLaACLhHFXuexyRMrz/D8ZBav9nu+IE22D966ebixfihx+4yIUI30Nk9y/q7FtP25LGgUF0cuVqC1RlhYwkQhaJ0MHaNmIeoczFU+C6L2F85yucnmQ7Vx02Lf9NX+fGvWzaxTopkGFlDtBzRMsRukOr3948HVp801DRgARPJ8Ncv6OjaCrRQOKDDMsebwKbvhKD33gLT48AteC0703/Ssv1QsxkDn5+dPnz599dVXu667uLgIgUzvwWJTLp+XM0Eg849VP6F+j4hMoc8MGIWaR08v33jnAx7HgHTjpGkAOXPaDyK5qerEkJJqlRSbKChAsQkYusQ8Jtgl2MPw3kcPf/jGm++8/+HlDk7WLTNobVUVIpXSMsJRxy+7cvNt275w96U7d+68/fbbNUoQAUqSS86Z4pEqBvqHfclzb7n2OdLSlPWn+XbND4uILPx2AeSG0lDZEM3DLMFhzYL6+fGVkkHF4rZtt9utKja1BADUJUsWm3YhJrlmwhuRtFCFPK9oZbhmIqntj04sxhhj8f88DztMrtCRh2HQaag2uNvt9vvBisqosL5eb7SnovkGpWqhOWfN57y+vm7bdr1ebzab1WqFaOlhpE9lVxdHUxNtY01NNTFvQWfsJ9sEi4OjeeEA4xf+xI3Oe0bgSaWUFpHW4WAqtaKCnzvrAm12auBoy0Thj6n3flGOsCy1NQAgAaiboL4fpb0xhJSLUzQ455FOjGCaKnPS3Q2AMHm3AlFAFs45hJg1DzhYHZssgEhYDxdNXdc3Zh5ByAISuVb2ltLecFqmKR8VlSd6nsbMklNi5kQU7YBSygCAVOA5gLp7JDHImMch55xBWkbBDIDMzCqC6iZnkJKBWY/Ve4NFxDJXF+dl6GPwYNiEc3ur/WTEJxSIsrS7QiexPB4AJO72IwAlwKEfr/eaD5lHyRDCRPTLK4GIsBaBACHAie6ISPER51FFzBjjZrOxZMeyETXiyAoJmijJxXo0CU82MlfbsFFDL5EYvfDiSH3vcuNsv+xLclUWbIYTWXeNjKHuhm290Qhbi8cZT51DCCYre13R47yn3fmgKIU+NeaEeoTOjCGCKTFRjEhKHGOMFFvJIpDgmk43Z1eXz771//z73x2+/Nlfe/XxxUOhHDvOKPvUryg0sQnYUm7Carsb0oufemUY4tXl05NNu15tvvFPf+ev/8N/HhN876O3vvjktd989dV8vu+uIUqUFTAIBoo12oSZQSqo1Y1SjTHGGCKFSJCZgwhASyQMDJAYktDHj5+899HD6yHvWrnMe1pvQJhAAiJo8WOETChAiCgIAQGEQoNxFSliJtlut5fDBSTOkk9WJ3devvPx9iFccwhN3SsqDUsk5QwFsaurUOefc9aqeqqxmWAhIkPtYVrhDTU3wbQ7BUwnBC9FPQ+Biy89gHmQM5KRXTlm/XWz2eQkaoX1VsDDYReDEx3xhC/uBNCsTQQB/Q9rp9WJVCEwCGvsLYAyeL/Ags4CVRCf2USw5gAbu9IjUAOtn1s5PqcveT7qmeX8w7QWco2n0DE5Tx9C7dC12KLD/dHL61cwcZfSd2gxQyyhko741PF9A1kPLVglDK4BNp5t2NZB1alWq9Y4IiIYoHIxQICx7ToNtN48WFYRtA8hM6h5TsNacpacs4akLqDX5sAHaU7T8mssoi6wStszc6YRT3sK5kTSdsYfihzLIdd3GVW30yTClMqdRFrpmBGPuMo/+fKv84TaQ+zhJiwwWqpne/Hr0WcXDy7ms7jheXP4By+qPZSvd7urq6uXXnqpaRpmjhFB+U79p5C7A7RARB/2XMsR1JkgXe37lmS9PWUe7j98mocxpfSVX/tCRwKJx8QNYtM1ASSLUIwikgDGxCOPDKHFrpWIbcwJRoSL691P33rvO997fch889YtTlk7ujVNQ00cxzExa/+Ao+vNWUyGXq/XN27c2Gw2jx8/VpBWEVRYc7iYpMZflZbyKgsB8hJowfK4AIVFtYuClZoDdqw4VqnuXsdZEBADM3/63mgFB6jn4c1/L1rfggQRVSE0/oWl9YvOmZOwiVX2du8sxRrOJ6Uz32gFP000OiShSgnbttXmFH5dR2FbuZvRinEctTTofr9PaSb1rVarzWazXq/btvWcWkdIKe12u6urK73z5OSkbUs1WiJS47WiwH6/B4CmadSd6FvSk3MA+tQD+x6dfKiOUKM/dmSLkzLCeEj/j5JTYwomLddhZ/IDVluGMTV/HMysjjJ/QLqTVqnFgxnWmAo/PlWaLVUfJqJxGJQoS8oIEHGSlilMlRor1WU736btbChTnu3oDQvErJxh6lbg+YhF2DnnMCNSdvGrfuu0Z4YuDhxhZ5DyX5EMue4nEKmxRwBEamqlMI3AidXDEbSmAtTMfMLqMgERRopTMRjjVvo5VTnT5qlWFUQxGJNazNaEEJ9FgtUzHEIgahUfFc67bs21vLAwVEMwA0Akavd93262LdLVh7vLqyuhJvGovbwLiNZqbCKCOAmjAAGxcGINFA5xymgMIbRt441GIYTVaqV/qiXSggRkKiKX6oDJFtY0jVYEhtqBVFF3GIamqT1YmBUf9dlaha9YdEyM1qACqJXHdaNzLTBtyTlqMBMtnVtFigVyGpX3J4ENLvoQmkZkmRYeM5swiy/3BMKwi6q/KOdMYRYYDawarBCRpDzkAYBCaABQV7pe0f7q8sZqG5nP713+9C/eWEnzypdefJIfA0kvI65BAu92vSRsm1WOW6SIITSRVp3srx6m66ttu/3v/vtv/uCvvv3BTz/6r2/+6NWX7r52dvvyvft3N9tx5MypFmJpu66rZ5SbpjG1QYk4opBQyolQgvCYRwbU9jhM8bqHt9//8P6jJ81qS9QMuwFpj8MQQQiAkAAmoXDksWtXY7/bpf2NZtuu2tA1zUkYx/H09LTnXdu2w3V/dvtEQuYgDLlpGiJMNCQei2ksUJZZlU7P9oLzm1G1gNa6c8WSZIEiWkM1JVZIM6DS1EEbx8HqEYMClEq5CoqeDcs4JgAkCsyix80sfT88u7zcbk5v3rz55MmTZ8+eYVU58JhYgIjMBR+NYS/UHk+DAEDvNz5kwGl01utOdjp2sxFxgRKO6xdr9xs8G8vEqp4tliCc/VQ9vnhMNPuu7bNRMOUdagj3jFBvGUdZgIG/x29CmTwBIgAWCQ8JtOcUoGRO9g1quSmlSCA1aqyYm4hI+/vhvHQbACgLMXqieCS1JpBNSaq2OQyDY3WTeGGefP3TE15bO7iVaiZ5oXswNb0dx8Hvj5F3BQlf60xqG0Ylp5xLbzQTZUQmsLTkJa2IbQDpj0/pvy5f0VDdBSYi+DMy1VS/sUyesfREjQAw9JapL2r91RbJIlJ5JGBtyGknIpV9LPDL/7u4efHngs7DvLrP4T2HI/ijB4c4h/B58Oyh92lmoEkyUnVl/PSnP/3iF7/40ksv3bt3b1osi/aHEGfqMvyVyQDjcLO+sqxUILPsU26RYlydX/Xfef2tZ1dXv/alX7pzthlx6FZtt9mksZfMSLFtGghxN2QK8eTmjdisL/fpeuzbk1uPnjz8q7/73utvvjVmpNAOI2MN5xMRbRVgXRZme1Kd3qtV2/c9oYzjuNvtTk9PxyF/8MEHV1dXoemIiEGEQQhDaJiZHC0lF5dkiv0EkIws0qd+HUWZY9OMaWQNHqEQoVQbhUKUSEIIMoo6VUwggVIyrREHHgtgMPZk7RYsxG7xSIzx+vpyvV4Di0ApUnXz5s3VatXEJufcNE0IpFjWNC3BVElFR6BqbZcqdtdSLlMd/6Zp9te7XLvU6L/UNIqGOpoGrVmgJiKmlIZhUA1KS7xaoXz9XiWK/X6vh6tSR0pJK8GoeLnZbNbrjcaLxhhT6k1b2O1219fXIrLdbm/cuNE0jfkGtUaocg2NblXKoI5BVRqNLhkB5BoBC/N0ZayJQkbBLMZP65dSDVfJOV9fX+u2WHcuqARBlyzVgmZlcrxlTbdCiT8iApawT70z1wBXXaknF0p1A5llsBBJrNHRtsZsLQqJYMHrncqv4JFL9+as27Ver6+urkoEWQBNHxv7oc6fvaBrdFvXS7WkQoxxHEfTi2yLUkqQxqZpVIEXKTG9XdflXBoZKCQbgmBoOAmD9smEYRz6cSAioFIIZ+gTQOmKN9RoZ0DBQIJARIEadTaGEJClT4O4JggMuOpW45AeP3m23YTNClkEEEIIKK74jShOQUOz/Atw5XZtNwzNtZOwHYrVZFHrM7tAPJNetNWwZkHlnDNP3TsK2w3KygufjWPiJBAyp1FD/Y5YLwADAiOQlKbFHgJm9jARHtPYNM12u1mtOluPHESjUWlbOUvGBdd42l7h5Q+jg/ZrPhbZjzjrtAMug8hYoqIy1M4egMgiKqRo6oyu0xNWqMYq+8ZH/elJUxapzmg/VWORJq6VAec5hPbZYMKDhUjt2WwGaVJXIQKABKXHOP0qIrzP/T7m5u7pra6hhz979rr8BFi+9HtfeO/ZOznm2DW7q/0qbLfbk4vHF+OqbTCixBBg00lGlrTvOV3tHn/1n3z1jW14/acffOsnP7r75d89u/VC7PM1X2qksQgylB40QDSmAZP5WLIKh+MogeDmzZuXz55cXV3cvn0bgJ5e7GJ3Nqb4ozffePODjzLFUWkcCOYUQgggAQCBAYRLUI3qDRhCaGNs121cNdSIkEp4lJXXErVdF9cxX49tbJGRijJFgERCABDJebCr7UM9hAZ1hXoiVTvQ7LD0V2YGIDU224mHMOvSaY94yPGWKqmRLYvxxXkS7PR12iolxxhPT09F5Pr6WimCiEzCzxyolO5b3wJPzQ/FWS+R+92w+20VdQkTs5k2VqQoOnWZIpPNbIG8epmFDJwKTTSV85t40nMipvxy0InL+pMvYw1OBdVs76MSth9q+ldmP8lcTfWP2NHP9vzA5L94nRzzHdkFx47MCSLTG4+2YhNNXUIEpeqMAlPVu3qTHhAZxbOF2NbB3MJtc2DmQBZPNZt2CIF59tSkTjznYueMtQ0k52Hz++NxE6qpnrk0/XaQbA4KhmNOm0PUODxfo9ULTuQ/exjwIxwCDM6V20++7FnbFr92OACnY99MyIiIaRzX6zWlvNvtPrx3j4heeumlnHPTtKAeQgR0RnRPo/xBCJQYlsXq9AgQKUseGTKGBLmB+PYHD5t2lT798t2bZ8123csoQt3mZBSi2BKG9Y3TzfZGFroaGJp108Z3P7j/vR+/8c57H1zvRiBCimOWRqNnpLotCvnVwz2SQ6iCZhOpaZrt9uT27dvjOJ4/fhqoSN7CojU0/uHDEBLR9ttU3z7lholISmpBDoSNtnCsu0dEwLWls0dwv738fJA4BHj7/vDQVdilml+9Xq9V0aK2q3aZyfCnbYEWusEnkLgFfbOFmP5jaVHVCj95gbwutOAUVCpOjVdX16oQ7nY7bflANbO667qTk5PNZtN1K6sio0rFMAzqUcw5awkZFT7RBdtrWRrT3Iy8QLXEebENnAnSjJ6TVjAnZZ5PSXG9Ji3HSq57IRzQUgDIuYSA1TwUZIac1ayJIcS27ZTPAmDOjFSeNQoslemjC22dYNYzozpbT2Y9UDFzZucRdXRbLXRUZBsE0LKIkzwjkHO1CSoAME9lF8gsI3PWZnBlu+cVQkTcrE/0Bi2hqfMZhsE0dBGRPNndcvWXiqsaqrvn+KkqY0dYko6P2mgxAdfl55odmpFSRETKWVLinCQDImchRTd9RYgAXBsAepTxCGtn5xFcI+88u9TPo2s/5lUejYxLKWlConUjNMFMB4Sa3BhzzpylT+N+6HPOggAkKKjEMwOTGrElqA4lSuoLzdeBJg9syhkAuq49PT1tmkbtRp6nogthMrDzKw+1cwjXcsMGCrYGGy2EYAZO42pSFUUAMCO033HbdI8zJrzazXqPF8Tt1wXyHLyCDP18zBI4+mj4ljNX5iGeqCr+VypQ5oJV4J7NhBClAk0gEM1d1SnRmPabVZv23J/3Z6szQX72zuV3h+9tbmzaF7rQxN0wdF3XQZf3qWmaIbcSAjMAp4a46SBhvu6HHvKT8elX/snv/PWzp//xe9/97c98kbjbdut+fKwEjoiYp6rBiMgISTgGUg1E10VSwoxEcMzCRL2E65F/fu/Bd3785nsPn8Hm7HrIQ8pd1/WpD3EdQEgkCGRgBAiAtaAbC0Fs29Wma1eNRMjElIiZBUVQYhObTdycbnYPeyCRLCzMwBIIS3lPNlpsl8mv5FK0CVBd2Ol5FRTyLO7XaH3OGUucntT/dH9C3ScIwWyNAq6lp8GVI1VL1ouImk+43W4RUQ2o8BxRAFSem1tYDNr9+LYPMKdH9tJDVlEBG2wQ8rEoshy/zA2PT1Wd/AY2PHf4eGw6JNn27+J+j8g+vNaLIOYRXYzgxzncq6O7d/j2Bf0BRxBsYjivRM8HR+Nne7h8mGkFS03p6IoW3xxe7CIaQpj5ghZk7XBPDDzUEOO/sXh+fwrMM3ruh2IXV1wnE8gV5pU5Q/U2e6nsX9vJiFOB9Ik5VTd1YjbmEnTdlx4pjgLk4vqEQ1ks/BMuPzG/LThXJD5hMosds88ZJKX0/vvvj+P4mc98xkCuqPUlRMyqp82QseIRF2MHLM5RmzRwFmRhZGkwSpDry90b7300CjTrz5/c7CRBEl7HNudEsYvdarU5azenF5f7PffdanP++Mn3f/TTH/74pxe7IQNEaig0zKMCFQCqjxehmEUQUbVUm0lQAS6npmkIY2xos9m89NJL19fXDx8+DCFgdbMDAGB43lF+8hVD27ajqh8m9jRNM0Iy+wMRWZcv85AsUCDnDA5V/ckuTvkouPqrqH8UFIPOzs7UbYXVipdzEbRSThruZLqQhfbhPAbShG97tX5jTqcQAhyoKE3TKAsGAF+6k12Uo4oWIqL+Qw0TFRHt6xhjJApaC0cbylusvjr6QoD9fm9PaRk2LWwBFeapuuM088IvylGJZYKWlJI80YihrtdOEI6Rd64FV4dhqIES07M+7gNqXL1ttamO6ByJXnODGliHiBbzic5cS/MeZiLC87ZPJgxYBo0/fajcJxwzOgCL1qMCXwMPGElTb2rH5nlSADOTTFZzgw3bN50SuShTcykTEVKxrRtk6tHHGLHUlJKRy5kGqytdiZVRS8+a9U8FwPpTWabGjoJw05REUFSBVIr9iwVzkkCQk/Tj0LYxICJgSkl7Xut4RWrHSSFcIKk/C54HlNlxG0CWja0QK67ijo2Tc9VdcaoZwaU7K+kYIhJHzoKgRXhHzsVspLQSWFgsJlkYNbDBT1r/HwA05ImImiauVisr03LIgfTSaB+ahy/DnFNO855rw3pDpUeOA80tl1R7AvovF58NDvw8DT5gUiynX73lSeaSZQgBhbFiqS2E5i7+xbnabX6jjKTiXPhmJ+Q5OBYAENQkWwQoNWZFJOfUtltIefds31H70s2XY//o3vvn//Xf//VXvvnrL37xhUADUeDEY86xazo5CRGEhzHvBcZV0zQdNZB6gD0M7z3+8Le++ftvwrf//PVv/++/+vuP+l3mbKG/Y06QBAA4lTzMnHMIGFz72jZ2FxdXTbPe3ohPrvcS4Rriux8+/Ovv//Td86s+dMJ0PewxhC4GyAlRsEoWJEAAuYomiTMLUwDqIjTIBBnzhloCIQIJQAEixpObm0u4RkRBEMbqmEFAZJgsr0UeRVS7B7OIVVAsbdULcsLBhTjFThs5AyGEALCMVrKz8wBgQK4hpp42GbB52DD6nnPW7CztNrPZbPQ4EKeVejDjGtiTa66sh3aPAuVfmb4Rp5ixixRdYI0f0Pho5lkAp6F8mFvtF5vj9MyaolYfxANh148sTjBFJ6f6LYU50h092cOdsaf8vwCzU/O/+sdtf6Zf62EevhGrpgIOTvywtgO2mTJv5Gj/Hj5+uFK7YXF/LUNn5GtpYPYjg+NSUnj2BBjkXL4iYkZZcNCF7vIvMoezQQJW+RUqmHng8eP41TnYI8CpiM5ztoOtk+3i8kDo4erobc/7c7F1i0kenxFOt9m/Cyj1VOXg7cuj9zAptQmhiDRNo1Hor7zyynq91nYgIAKABMCFCM9Sy0zc9CMvpkeYCUlVy8wIEJAQEOO2Ob+6TO99RLEBjLdvbEPEfYZmcwLtptlsOXQX+5SwwTY+vd7/7Xe+/+OfvXVxuW/Wa2DJWUB9I1oED6CYqhHBFbmxKzg81ahFQGqa5vbt20+ePLm8vDyEnLp8pRJHnI2HGwtVflBR3hMH1CjzOawWvoMAc3Lk4eGQXLCL1bR7PAwsEDaEMI4jUNCJafBkTXAgjZHTEdLIEGYjS01dMZEdXfKSldr25nW9p2kaTklcSQVlWONwRbXYo0V7qW6mapu6Ppj5+vp6v9+PY1LHoCVUhxCbplE1r6pYxQA0DMM47tWXqAXY1uv1oo6xOR48jfLEaoHjXmP3d04c6phRzAaxVC5rX2Fx734+UrMETSE01doTSbufayh+5iUNNzXVo6cJlmrcWQCbzdwED3swBjq8TUQ0d2lhs1Oj37SrMtWg0rbsqMVmcNooi1Zd2P7spXqbJVhdX19ruK9V2dHLK/OxkVzzxtGlUZhtgpm1mJmCLcAkrrPKcQiIk/8WkEVIhIrVSYJ6bogIMTAIADJIGllEQgwqrTJqha0SdU5Agsh5Soy0g1sgO86Jla3RH3SoF7iuVACgPob6UwEVQ/Da9MveFUq60ZDTbtilNJQXTzWHS7CESGkn5zgKITLM6U7TNtv1Rg1OOWeBHEJcgI4RysOgNUVmQzNwOSp+U8RVEWDOPp1jAcd4yGuPaFNom+h3X++3xtn2jX7w9M4fYSCySus6ebJi8W6j7Aq4DOVCJ796xbL+NFl0DKDBBFwSWx0iAkJoumfX1+u4Obt9a+wHvuzXq+2LW/7o7ac/Dm+swupTv/ypy+FikNSs6fL62TbexQgZJElIzDlgDNTC+qwLPdODj5+88rmXfuUbvzH89L03nr37pTuvrK8i4GS8LGqGsOZehhBEcggBMuecAVl40+97CQ21Gxno6T69c//x3//k3Z9++HhHXQ+hHxliG4gkpS5GphABgyQSyiCkihfW6lLCFIhC4CBAzKRGoEQBOAAIY+TN6RbpgQAQSknaVyQHBIA0JNNtihUqKcjVc2fJkD3HXUCjiowhzGLxCWeuLXiOS2Fx9FJD+Iz0eI2IXZU2e1YfUcegFvXt+/7p06dqKD0cX6rkZ1zQG2VwLkn4efp/ofpOvRm1vmRWSAAq79GEusPLXgcONaDGNXkGU3bjmPi+2EyP2ofr0u/5wIO3OJ3FkhfHdzjzxSB0EOLrVzeRoIOh7KV11QIH71qs1H/2Hm9/dnbngsGIprcjigCW8pHOVlWCgIzsHIlMW+CF/VToUqg1wOaWhTBvTG+xZG3bublNu+ojR2z5KoP6wzKkM3zMrha88Q4RccX3sxYSru8VgCW0HD33BX4twHLx1CEy/v/3miDKgdniQsSFQoiIptuUDbTQ3CZeX18/evz45ZdfPjk5efjwERQGOtnLlZ/a4x7eDnGq/pEAI0DQUg0glCQAhNhSTunxxe4n73yIsfvql7/00p07nNPZzZORM8QuCV0PeXtyky93P3nzre/96MePzy9i17Xtqr++GscMADklCg2AHq2TcU1JBEYMlkBY0geU+5OcnZ2dnJy88cYbqnJwibw66rAt6LA4goUwIILDMKQmCQNRNDqWUoLK9hHDYvznkaOjl0yCU4H8wxv8496kAgCadKfIYkqdJyBUUQYc5VSBEorFeQoiZddgUNFZDcGoTtHq3lnsWKrN/aAW0zYvkN6jv+52u77vx8SWThZC6LquabrVaqWBr17nUary5MkTotJXQ4upQs3og0p8LM3MrGl4XPQqDlKqNSn0y+A6hHnufIgLUmtYYNV7La0aa0G+7OoP+fwpG8EkTxvcA4lxZKjMXZUB/xQ4QhGo8YzS2GJ2RQ1zTWQNIVjIov17CL3G/hAxpbEsUKaoGQ0WFRESmMzuJIhggR2WVagvMq+DUXXdyXSebt682bRhvV7X/UyIWtGjnAYRQWTOpaq2PmguaF1107TOjqF2VQwhJGaArJIOupYYKSVARtQI0unoRZCzsGDO0vd9SrhuGoSMIRJgKUNjuglidvmchl9esPf7rFvrj2nC04MCeHU0MJsmTmKJHnYBg6p4owjHzIwxpDSqC15IGBkBMyfQAkRlCwAAQZaUEasjIuccG+q6brXq1K6TeUREwKXipCdhjTUM4OqeztDSQ7a9EZw+ltKoQwEAIrlZLS1kXmIwYuQXAvOMFDnwKiwwk2rLHXF2nSa2BmT+2Kim9CxwUrsPyVzIsMnYQoxgMc/uLx8QWWMc9ZRENNkTEUNsL/dXGMdN1/QpB+BOujC2p9w+eXP/I/kpjfHFL97BVq7hsj1DeDIABWkRuiYx9xrHyNitNk/PLz73S1949OzpWYQ7X3z5/Y8e3l3fONkH5kZBjUIgRNUARViYVe8oK4UMAlfXA8b28nqQQXK7unf//t++/ubr7z7I7dkAq6eXQxbZbDaS+zwOJ5v1HokASYhEVF4NoUEKIoLCRBAa0t72QggNRA4jjwUXJCdJq01DTUhXKeTAwoycmQE4Q9amNIYqnpaZIWBhPLMD9TBmI5SIWSGiUqxojilLiQHnioqGQHtIMIw1288CDimEGBqpbSfU8grzy95S10emBBrz8FXFuOZLKDgZw/C7cYgR9V2zXz1TXKiOhsgLjgLVjG0MwO+GqVGOkE1EzV63ONbDKfnle6RTBczPc7GTfvmLOS8O2rZrQbv9zSwHr3MD+706HOToNOx73St7kOeuYPtXaxTV7VJiMqvYuVi+WRMXy7HBTYiprz5C9ERkHMcp4rAaPtFdC/oMLsHGNsRGO9wBdp5hsJKPrjeGf2pBfqGoOkc6Zx5enwwnbt+OjOP37XAyn3wZMbH1gsPHwxXVzzPsE2fhNToQYmxiTGnY7Xb379//0he/cOvWrQcPHpbXIYDGGM1n69dyeHYmOBAz8MgoIihAjMiCmWHcp7bZhNBc7PsPPj7/9Gf7Vz99Y7vu4grHq6tBAmAIXccUP3p4/r3v//B6yBgjUuzTmJMQIAEGJMAgklVaIR/3e3guVS1UOhBD+/LLL7dtd+/evTyVgNd9Pu4lni2w8uX6CirrLqFVpbRvjJHyFOyAiCX4RCtMqeGDjuhgiGjodxRgPC48j3zppXqaxvBrmU1EbNvWMNfuNBQzrISqBJZBqtgGVUr2JNePE2NMw2AkN9TifyCiyXtm+zMdBpzCptpgSmm/H6zezGq12m63Mbbb7Xa9XtsMU8p932uTembebrenp6fWV1BLs1jKolQ1z9eFPoRhcdYonNddMznNHAD2jcOyQoFti1RJCyG0bbtarbRKoj/KqoAtC5DqBFI1uFsJ9KJI1DRUR4FdzfzJzFHDd6kAWE2xnigVVm3QwlCJtFf5UhtEAQjTtujyNdcsUSYia66LiGoyTikF37MGS3xfDIGIYgiz0KPakU+3l3NO4zj0vYj0uV+tVl3XNW1YrVamsdOBz1z31ReUrrp9I3OpQErlkZlFgAGCfkOotK9w2DpHvbKgZMmZxwH2ex6HhtcRM1BtMOFTExfw5hWfBQLKFNs8UXgvXfjDtUEQMUZFUtV+a/WjgDlnwBlkiqCIxJSHQN04jn3f5zxCmZMwAhluMxTZgrQmjqEKgttxtTZpWSQiJc1HOml4FmgYBVOEIfkFew63IHZSvb3u4KcGFR6T/RvNBOW2+Iipw56ieSSS/1Vn4mXrxcwXU8Vjkhw6lc+mhPMygD4xb/EUFscggOZ26uuwZHsiImds1ptB0n73LAZq2k3KnHZyo7nV8dXHP3n63fz932y+vP2lLoW+OQn4KKUhpwDckEA7jJkzBAm76/Hu7Zc48+0Xbr96duP+j19/6XO3PswPXpOXAEDLRIUQCFGPqGliTknbsitC6dLGPYNgJrzqh3v3H33/jXfevv/g6QAYw56hxwgIiZGANKA+ICIDiVa1Q6oVDwGABSBQ08TQRCFgZKDQUBhEEIEIJEvOabVatV2XLjKJFQ5hkVJGsKl9z1gEtDHrPA4KnPrRUGAn4CJilTSmGDZj/0qgQ5y47GSAqr4vm4/R4glpXWicB/4FmfAEJddyZ13X5ZwXsnh9fGpjIHOOZR88omVZkhhbrzjhz77XtgTPw/fFN1g8VLMR7KLq5z9E6sVs/T6YbVIvL+V43X4xpkdDm4z/cvHe6Y1h6VP1k18ckMyb+foHfSiIzdDPx28gHFwVeGYJId644Ef2C5zzmKma9CLswj213EmYaRczVY1qOW/Fo8X+SzVp6yMq2eQ8C4jy93sjKNUiE/brInTfcoEWp4a43FVFCpnLf/4pnEuHC5Sx7cUDIfLogItn7cHn3fAJl4efT4BVO2s4riostW4jPjnnd95554+/+Uevvfbaz372ZlkpLEFxsWpxjBUACsE02IOkfU0A1GMWVCFMDES4ateS5PGz6/fvPfr0p/uT05v7/hlDyIlD08Wme/+jB9/9wY/eu/cxUmi7NTMPfUJECgDITcCEiBhFMqDaryfytdi98k2N74gxvvDCC8ysJVWJiNEtsCwN540XGQ5CcBdX27aIo+o8FUQtWm9ClpKMo65XJ5saGoYQ+ABsFpu/gMnD9eqX4ziu1+s8JiI6OTnRV5jrzDMjrB4Mk7ANbXny9sz4iKHzgmppUVAdp21b7QnBzJzTMAzmcrQIW9XZTBXUeFGpSpf6BtXpF0KzWq0sl4+Zd7vdxcWFqqw3btzQvvNSvXNGpqrAWevDV9LhN3NB2/VOTW6H6kZb/OrRzV4n87Qu3QdNfbRseR8pauqxf9xETSOD7GIrylnAZIM24Ta7JoQLeDA+yLUngX5vA9r5Fr9lDSeyQYhIc+Q891GiqmsUEY80ExRVCGGZHLZtbAxsbFZqFLDy2lzzMEUECHe7nW6Odk/Z7XbOguOin0giRk2K9ZvgIUclQ4AlJ63XXA0BLBafOk9ERAIW1moafT8OqRVphRkJQDSLdmrMa+h/NBDSDsijueGdVH5qrlQb085d1doKKQ5PWcuMmC1++jfmnDEoVI1ZWK22pSO9KYQiqk0SET+HCBLRarVarVYowsxtF0XCMAzKDmwocR2iyHnADPT9ar1aZVYKu9THqu7BGlA05eaZtI2OESIiudA+j3IeXj0oGF31FLYM5SLZoAbRQZ5JXQbczMzHjOUeSfxb7E9xghQiauoLOPqOiKrx605pXXAA0A5yYy/r7Xo/XqdxpDZcD9c00q0bt4gx7oIQn39w+e1vfefLJ1+89aWTjy8/ejV+fodpBJZISAgYEKCBJmC3XZ88OH/8yqde+hf/6k//LF19+KMfnty9y+c8mx5qw0+iEinNhmzlZDHsrvend25d7nff+9HrP3zr4RAwrFcPL65xdRrWG0mpH/tNE1YN5ZwliFY+hPo/2zHUE4wBo6qNApKJCLLuSdDgz9jFbtWMgSNERi0FByW0HWOEMJEDDYKv/VIXJx4AkRB4EjQ95EwC67w/r4XweUwW18rJSB7OA8Rlzoq8lc7wXykjYQlfMZhZrVYXFxeeuLihJtZlhMksDgtaiTWF7PDXQ+a3oGW2LTpVCpPAOse458mp5S0e/okIZEYfF9fhOD7UsMzkwA7nZyWyHMeTWj9y+RIKTT8kI4uZL+ChkBeXR2Rj+jdaTY5PXq+nSOBU2cV8FtQMETWkvyYETMswwmt7Aq696oKlYRWkbA4228lJPm/oulqttLHhAvYsymNxmehmVg8Ffu2SZ+dLNBX0wgOntNRI0QVT+AT9y5/OAmAW+7/Y8AX2LVRof7McKFTwnOMu+3gwzoJW+PceYvTh9/5q27bvewSIMbKkDz74YBzHV155xY5VuKRdqHSE4Qhw2ltKgwo3Pao1ohGJKaAQiOZT0H5IKIIMw+7q/Xv3Prh3f7PZpHy52p7kLASYGN5+650f//inwzAARiJKXI0peeScqAbsKB1eqm/18hCmSQ06ye12O47j06dPi/SMYFUP4Qjh+oRrZmXWeH5VaYZhSCkmIiYO5NQG/ccBKjubte4bHaMzHrw9EPovjS8oqGu2DqdMNHUjMIQy1xkiIgSV04JLERRn1cJq0LfLkNG8iDrgbrcb9nvtBW9I6lPpVC5X1lk7xJTW86YWIiJSJCLtYaiOtRCmPvK6b9pNu2mas7OzzaazMC6LKVX900dCmlRtxNyTQ9tVqu5No+deFj8qT4Kjmezak1DNPDTGpFeozS2qYADeN+CnIVXFreVzglE8g23z4mo7H6r1L6a35GkJNiWsNVo8EJbPTiGsECWA5Nu5ISIJaBc46hoNFpUpSyWraWCarUw5ewZaUqUmwwWTtGeqAZGWAtpsNifbE203wgzazANx1nmCMI5ptIBEX8ncBsdqpJbaZXRieaWAMEDx8ANAiRe1SbYRpDQokJGzkX0NmjX7OU8OgFk4YRWdax2Kir92Cuph9uiv6LDb7aRG6FA15+WcRVQuVZwldYdLtTQhosXpFA/hOWPLcr4fno6Z4jqGMIwjiDQYMAuwpocRIwAiA0Sm4hkPSBQEMmBElO123TUrYGEl+iwpj0TEmQ12oTLynLN2VGNOFmKOSMxiSaWWRKTr1KJMJviaAB1CRCS1RlhGg5FR5lTZrQIcpjEBQBMNDnKuRTjKs1DlPAAE8DmEXrDwH4wsImLf720C5EzaIlO4rdT5N7VlnF/sIUEn1wccSVTpBydlShYC4sSscKNCFQMAbAPm3b4V7GglCSQwRH5Cj+OqucTLIY0Bm/078ef/7uLWb33hT37jn/9vzf+0bjab7la6xryndbdpEYf+Yt1e4ZNHp5cXf/KlP/hcu36hfeFh89n7z9q/eO3jzQ5e3DWfwRs3pZUBriXJJl7DyBE2q03e9bIfmONuHDGEk3WD63bc3vzBew+/f86P1meXI+778WyzDZKofwqSgZCJeiQIgVKfRUZmAAyAQWKTtVwAX8n1HvY3X3h1/eJpL/2W1mk/XuFV0zQAKxlkTHmEuP3U+vLsevfk+izfaIeW9txBgy2kIH0eWuyslUQIQUoLu1GPDRFLeR4EDnrKweFnAFaY1bOjEILaqpUARQx9ykb74EAOo5qNoPV4TassEkktxs01J8oIohGLjrRQcHlLGkcQWXWdMO/2g1ofDX602ZHWrRbRhGNARAyUcxYQNSIIgiCwlvHJM6nXQNQmY1hZ8JFARNipxFTNBIoLmTPCLKELsQQalBeV1xXthasSi4hEmNLkf1jgo9F0+6ni/vHmBLrboVaq1Am3sUkpJZfoT0RBfZXm1QTIzFHZf23+7tEcESVzpBCpnJpkJrVG5dkkjVIZJRGayvojIpUQIHDpWrMPWrWFudgLgJCFBQTIFYtnDiGwcDFSIpByM2HEnDkDABKCqP5fN4SBSMq5sbAkzhypMBJbr4lBRudt87XKn8kNIlb3K4ypV1+6VZ8r8xfMollqNLUkERGRnCQECtSklNLIANDEbugHACCMaEmDSQGGhLXKViktpkIEADZxVXcbAMaKdACzdgWA2ACA6adSCYKtDg4CNfH5HgZ/+S89nT+85/BXKRKJoaQCuRkQl/5nouC+ARFeDLjwoQWRCNI1xCmHGB4++PjiydNPf+rVLsaUchYhCgAEAdPIiJRL32Cpnq1y0D4yyNQpEMmhqXjSQx4JMQRCwpHHtm0yj4Go3a7ff/DoL7/7w9RsXn1524V2t7vGDj6698F3f/T9y/6CIiUZWQvcIbAAECBihozpCm2DMJTMJCiCjEpFJVZKzzLvCVEI2y7cvn37cnf98MkTatt+HBkBQFsNMWQVtjCE6MC1pIGICGAhJgAgkrgKcE0+J4J9Or1/sbno0w6D4K6TkXPEHIkCBxEagFOLECjuM4t2oFWCL5I16e4APLjUmYymUAAgUemfPo7J5lOxDwAwRiCCnMcsqWtXq00X2yYzMwi1XYhRQqQWc87jkAGgaUJKiTmLTDGW19fXm80JaMtVCsJJOCPEGDoZR2zbtm3PTra7q0sUbgJdXFzkPO73V9vtqmlIJDHTMOxyzsO4LyYekEBEGJiZBVfr7dXVVT+kMfG+H/f9KIIxtIh4dnaiTr+Tk5Ptdkul8AzHBna7/bNnz8Zx3J5ob4mQ04DAGCMh5pxTFgAIRDmnJhIiCKdQW6YBSLNaqT4Jzh7ntxERgQACICAhIaGw5sdw5owysTZEpEDaSLaYvRDiuGJJeRwg524dM4/9fhBuJUPbroVDKZaDgSEhhlFGjDEnbuL6arc7PT1NeT+m8bq/ZEnI0kCH2BARZySKOe2tgx8DCgVlRsDCkoFKOyKuDVfbbisiOScRJIpWRHq1Wu33+xACkvR9P6axbdsQIkIrkDVwVNMNkqTK9QQRLUmyaRohpGFK2KugizG24zgiQgAEAcQmNDUmSw2steRSypKyZIaUJcRWFZoxMQuyIGcGSIRxpCyMbbO6efPmjRs3tCfhMKRh2COiSE6peBf3/Z6lRwy73fVqtWrbZr/vh5Sk1OEURBDgZB4ahoCQxgEjrU9vdl3HGSlEEMjVjIqIIUaMUQhkaFfdRsYhSW6F89VJuHkToZdxSDywZEQJocEQswAzUxBtL6jbnpmTZJDSyt6UAiIKFBAYQ9T6zJkFsQsxUmwEkaKKl8wA6hHCEGIUATJbUs6ZMzIyIGTASKShuFB6/ErOKeoR9mlkZuUNfCyrx66iiUVL55AYY9OEs7Oztm2ZNaVYdCUUlm4WrLqv1EpQzFNnDHb+ayN8tinGkv1PRhy9aQRqvQE+CHjwqwDHm01e98P61y0uz/hNGPICgZ+P3WBkxXZjMQ2TtLypyQQRxOL+W8wT5l1T/TyFi7TNzALAkJEAgHa7XdM063U3pPHJs2f7dz843Ww33eqlr3/q0fkzHMftya29pFHSyWZ7sr6zjbh/ev6bv/WV3/iVr55//Oj22c1/+c//2bvvffjzn31wGje0Wr1/79FHT+69eveVF1544SrtxiRqowXCHAkZkTm24ep6l2L77ltvvv6TNy4uLnoOYyZVVABEI0UxYCIU7QwJQqUjJ5EAcM0CaorqEGMkKnYRNUAEoBgjNIjEAIi8LGYrkkvBp+p585vvt/FQCsR58bE66HM8SFg/OFB53p+LZ+3VVPux2nvBSaIAmGV69aGpaTGBo69mZwg38Z2ZWTjgDM79JP39i5n7G+wzOt3MDIEzS6G7bBULr+liRTb4QjQ3TPdZl4cltE1VAAEAAElEQVR4t9gNP3N/6B5/7e3iiI8N5Q2c/JxIV3s1uRgen8Qvzt5smtLzFg6OpIiIKtvsLLJUc3KMRMi8HtWCIoGLdzVDpql8TWzEEWR/Xv6AFiPXPQGoDk8iYrEdcKcv0zINFP0Z2UH7nxY7LC7Wzo6SrO4/8OKnCj8yn22hq/7Po6/zoy0ef96dh7ctYPuQmNg3ixehyz/xL/Xf+GefNyu9lCOnMbNkFNjv9xcXF6enp5vN5uLiUo8wS0IOnzza0TciIhzEsRu0F7IjLEI554uLiw8//PAzr/6qMKSU3nvvvbfe/vn5+bnaBWBe7G2xav0IasCvnuc6k1rHEwBq728iunnz5tnZ2cOHDy8vL9WoYeRNwcLgxBvFDCYNVRebiSm1bazYN8kGx0oaT9u1GB+ef46OEYB/cPGnfWORKSEESyBUu2TTNE3ThCAANI4jSGJmztaXb6L2ITRHBxfXZM+cjZrI9/Tp0/1+v16vNbxTf8o5D2PvxTOdCQhdXV1dXV1dX1/rOEZ/zs7ONpvVZrPRpEejRcx8dX1xdXUlItvtdrPZaA90yVNoq8nWthDyvrJ6GR2ze+g5kRcGIuaW8XBoI/jw+xAjI0UMlChGkLGUolBQ1N0OIQCyJs5RQARSzy5L6oddGnqWnHNm0diKEUQggzARMeJUAM/iL3ItQ2pbAY66Lii5La3C2MTmcs4xNP4ee5CZVSG0cyycN2XDl+L7oiAigjkihWrACgftAETEqg0tqglQrVovIsM4YlVbmqZZrVYaJHx9fc28U9hDRAM5HU0Px4r6iLsW++DZjReo5EAiEhER2vVXiG3baO6dDMNwvd9HHFYdFgaMghQRUUsfp3FngxiT8iMbppc9RP1SdHyWNAwMFomjJRcLk0UASePk4TePMQDwmDSNyzM4RIwadNv3vX6bjxXn9ERcREKkiooJANo2ag80riEHut2KR5mnhdnriUiqHxmcq90L7oaTHkthTuzM+asTNle+wq4lmJrfXB0mBnBohhyyFS1luEPGrB/sRSZaPY8x67u8HwAcvh3ipH72RASdxFarwC93Y8F3bVgDMpaqoggyc9s2mTklRqGuaYZhfO/n719f7r72pS98/vbLj64vnj293t482++vz6+fffmzX/iXf/hPXj2702bIuxGu5He++ps379xBya///VWI+ebZyc3XXkzt5Rhpn8dh3wcUtdpypMQIjKISb7MaWX725s/ffuf9nmHPMjI1TSDUup+ono0mUA7aQqPkXRAgsgihBsljdTqFpqEYYszap95+LSw8gCCu1+tn8ow5iTR+34jIQu6N+kvJfUoLWbwwnnn0l6dih9fCruEP2hgGVEIARbHJWKNZDsmTMSdx3kKBSTK26FMjmh7abQRcKL0wgYrNSkRYOIbo5w9zCnW48MWm2SqO4pRHnMNxbDLi1AC7wSZj1+EgMtfWbAds+YbyRovsOLyFiOf9pvx+ilOujOMuZrIAEvvTM7/sKrnbbSEEl4U7Y5aLLw83xId+KSQYPysmlfqTP1xxApM4Q9uCfB2u6ygK6OZ5RdTADKo7y2DAPaIvwurvEhEWYCkFyliAS4t0p8Itdv5wVvrelBMetP+u8LMU+MD5lj/5OgqER2F+8fkQovy/RwHej2NferHGz2cxSfuXDxi9Psg4FUxGwP1+f35+/rnPfe7mzZsWha7HCgiMSzvO4QIPvqmIY741mVGSimV0eXn585///DOvnt26uU8pvfPOOz/5yU+vrq40r9zOa/EWQrPI6FvK5S01uqkeDUXk7t27Z2dnP/7xj58+fQqFSgDAbLcRayfG+c57mmzgbXgN0C4wVwhRUFi4ZDBMR3D0fD3qHVCD5YbbnYcIKyLMJQurbdtbt25tNht0UlkIgQhVl85JAEqxHAuCrYQuHn2FVAbkAw7HcRTJ+/1eq8IYS1KBfhzHrutUKqvbjpxBFUJND9MNDyHE2KzX681mvd1u1YtgPf1yztfX1zlnLSGjSn6MMUOyo7EjMxJnZjKTJL04h5UhkgswERfgd4i2dpv/1WNihhwChtAAMqKG/wBiEBEeEwiFiBREQMa0H8c+CYfQCIdMSCB5TOM4CvAwJJGMpFUuRQRBkEAilmwsWzXUFOsFjVUe53MFdZs1FGVamsz4oDgJfQFaUNPLxRVZTP0Ax8iRFZU0juB9Njqf7C6PC7bVOefMmavSi4hN02y325s3b26325RYK2VaWqZpgEoMuGbr1GnPDs7TWyKYgYqUJqtzTBRmDpFFxszMQonzKBgbOt2ehJAIAFRJyjLmpC20NIbI4ESK2CVtO0OxcsAAmauCLeohoZKBiYbgWWTq74HQck3QKzQwl6hJs4+ggGQWEUKMGEOfxmoSmzL6MmcAp5NIMatRmM4VEa3W09X1RdM0zJmImiaKSEpJGC0K37NMEYF5z0CbnAaCS7XOejO5Rz97ysBoMT6XgIqZYOeR1p+lNxoBLJkuOLXNvjRBc4Fmiw/+RA+xaDGyn7yhitEvmJjl9KAdmccZvz9qLQgRgYOIFK0KpG1WV9cXY05d190+vdWncb/ff/DevfF/ffb73/yD9mSThuuMEDaN7IYxp/46Pdld8JPdy7fvfuVzv/rx+cfvvf3magV/+if/4o2f/OzeR0+G9uzmnbMnT4f89Pz2+sQsH4lgCIAirAizPXly/8G7H328G4G6FY4SYyOM7aqjxMJZhJNwAGpCxEAZSs59CX2GoIkiSbIQCEDTBiKiGAAYCGOMgiAiwzAKY2ACgO12C8gZhDkhzDVttbjMjwUPDGlGNVIN6DfxHREDTaqX33+Zi3HLX502YqB7yGnAVQnz45Q/hdEV0zMC13UdHfTJlZqe4UdD1LTfskCqaQYAEOi5Gs4Cwu02PEAZA29vK7HX+RnaIHXnjuDLoWaywBFDHHAY7eeMvh6sOwtwUqMNaPkbi5tlLrotsPKQwhwlLIenSccKadoO+/30Aq6fM7p98BPQfAkiWsyH67UAtkNTpV5Wx8VmiNUY56nT4el4lb5sIJiqP1leEWeyl99PD0v+ZI9ei+1a7P/hmeo0j87f7//hs4uVenQ7+i4PhzAHmMN5epL+PEqyeJeHh8VQh/NZgLFeEzcEHIbh/v37X/va1+7cufPuu+/CBDa6IfkfKqqyvKb5CFl/e1CYV2qDALX225MnT15//fVPv/aZGzduXFxcPHv2DJFEYBgGoIhTSbvZyADaVYhErBjMgvXXJHtELaYiALdv344xvv322+M4brfblBLMd6bs/8HB+T/RhWZUKTyISBrzOGYfRmtjKszbnPyvh8blY2uZcBAcfhn1WJyvBq4zc9uEm2c32tigQKRAgKgymQARCQUNH+SsLXaLdJFdk4kFcfDoKZVFVocMeaajhCLloipYjRZV4VJK+32vzQMt+yvG2HVd2660II16/1QV1MzMYRhiE87OzrSLhmkUC4Og+UkOthH8IRrVonnoilQGvRC3DvHRdMgFAjJmBFW6BTAwIINwhhibmi2FzINIL9yndN0PQ9N0wiR5jGGFkoseM2ShjIxCQpQQA4VICCAkpe85EkW1noSAOYuW+NXCf1gyHTDlSSGsWwQiUhUnIiKEoABQz44PqRwiQo2TUjBY3IPzKB7V2A9plFU7l8qRtfSOp+T6rx5uE5oEZT77/f7x48fqOrbmhFabFJ18ZdqdvjQUB8DEL9w5gtTi7RpPm3kEgBIphtPC9akYkYIgZiBU910/DNs1rbouNhq5jMysUV2ClC4vqhG/LC2FlFIKVKELIXPKzMKCRIDJViEQRbJOu+s6AGDJIoE5SalNwF3bMDOVkQQAchUw8pgAgJoGK3cmosjMal9BLMniiMhVNvA6oV6W+0sEhpyK0kSkQRF6WhrkZpdUIaYi6oRL1pMU5lkZZp5ZoOUhSbVT9JTRP2h/2q82MQNiG9yTUZsGzNHech09R2dmqxLhX2GfwVH5Q07sL4efUwURgJIRjwe8il1/IayGf2YWGUKM1v8aCYE5JU79GDHGtm1ig4KNoBA3Mb737Se0+7uv/NFvvfrFzz7NTwYZmwhvvfP2v33/yRdvfOqbv/l7L9188enjx48f3D+90X3q058/Db/6hc/98p/9p//y7s9+nrfxzo3V9ZP9CWfJWQAySAoiEAgR+iQiPcQ337v30eMLaiNQAJTtdquV4jSiHGtJKmYmsl6mIgg4lVFF5sQAGSQ0EQJhDBkYdLtQEIUzA4AAsaTVtiuZJAhB44igmCQMeAhwcejMTAdKBVeOuOCFRw/RMwyZC2qmkKCTXBfKm6diBmAe1AtI48xeYCqlAc+CKR6dqgczrCL+J0h7Ng3/r/KzQ+C0+z/hJ3AqEDOHMKlGfusWdOB5WOwX65H3cBv9UKFWfPFjLtZ7dN8WE/Pnbj/5HXjeKSwmmV05ATtEG8fTPSOVxtSwGv7VMlqzmyZP5uIEF9Mg5yv2r2YXRGrr8lYMmIfCYqW6iGQVPitxsyVnIgtCs/+g5hwKTClqy89Ht9Gfl98lj1OLO+k5nkAPAP4bf8Qwh7TFaR5+ft4r4BdQdBdj2s0m43ricJRM+Qkf/smq6rEAAAMz87sfvI+IL730Eqv0U/xyIsrTn2Njfd7a7UVTgugEBlpLM1cYg3EcP/744/VqQ0TDMCi25cQppaZr5ICeYBVdcP4uRATU5AO/ISUFehzHdr168cUXh2H48MMPFUHGcdTd0rlNzzjPqrhLVZpDiqTnMgzDrt+nlBADCGRmxCAOSgGoljmYzTy4mmRHz9G42OFW+H9t4VpkQmuurFYrqeEPTpLR9zZNI0SU82hRJ1KiECVGz9dmE7aZqBY3DEPOoxGiXHsYCJToLRXfLaQNEff7vfoGfQ0b9fys19vtdtt1DdUSOBZPSEQnJydnZ2fm9RKRcRw1KBHnaefGVmxpE0Fz/sAFMNvJel6gn3nufvRg5qkQIkJA1gZcRIEohpYwUizNohAhc5/ybt3h2enJMFIWAYiXF7vLiytqEbjlWiUlQAgUSiJteS+Y7E71gur7hWrwtRStnLMyqDnMBKJJdq3QXEsYIFsFMr8blXRPyK5KZhBA9dsL2EaJCDUtARKSZrajKPhP7ZEN/n0CJ1ROl1Jq23YYhj7vhr74/cZxvLi4UGvvjRs3FGxUu/GvxlqK0tZm2G1rsfqUWHONfbylQYXhnE24H66wa9o2NA1Blqvd9ccff/zkCQDvu67ZrFdN18bQxBhD28UYb29ODcg13TvnnPPY971IUTEEpro4SYaqLk195xEp5bHORK1OehzIYxKR5OQQZgYNE5jzLAIggFgKIguHEMpBE4KZz1F9o0G3xlVQyERN13VtGxFxTD0iilihp1J4RmsH2yYabjCzt2yZibpI4c7fbRu90Am9H2CBnIaiJgPZjh+A/hEZzmYlRdCZXlSBANE1tj5KiGUegOfxx0tj6Pi3URk8CFGwwZkZEfwq/K+e+tt7c84qk5XBhVAKD4kUkQgZNGm+wVZIboHc++nD1elbX7nz1fXdk931XgLdunXjt7/4m3/05d/75Vufeeetnzx4+OGnv/QpXMHb77119dHT3/8n3xwxfvT4fzm/Hs5OznA/3r94erNtGKQVoRAbkoAIYYSMT/bjz+8/uh6Btt04CAAGaoggpRRAmmpJFM3uSwmwxB/H4ubHnAVyllKhm2PTAAkSZchYPX0xxiAjUgxEMOB6vQ4RQWNggqAQiwgLhKm3gQBo6yo7Ox3KpwcoSVjIxwAAvBQo3dEUUAcQjfyWEpkGRGp/YkQI1fHufIFT2KeP9/OvICKoZfoXoOg9YB56DTvY9Y1g4UVZI2OZXrg/RBYDdZh43hHPlRyUHsEqNBiggkM923/HbKZQT7vZv8hsnIu5ieNbtmS/M4u3y0EwAs543oRfOoKXMzxiehHB748J7h7G/AxtbrYiPx+/usX3WA0N+qVJA0oEtM+V14TFyYLirHX+WM1MYM6NMFUnmxmDDd5MBCkLL3P2FooKDwGELUlS1JeyILaHe+hBzmbiD8j2Hxwa2r8UCOd2DoMfkSOUHOZoZa9Y4MUCruzBxXnZCB7qnvcumAPz0Wn4Fx3CxuHnw/ks5q8xY9r8STgj4v379y+ur1557dUYI7OwZMSgmdnmJznKiZ4zf1t1pXig5icEAISAWKr/hYA55zTCxcXFOOary12McdcPiNS2rQAsPLoGCYjos1IdFM2iZA1fcs63bt165ZVXHj9+fH5+3jRNyRdAOhT4PGYZsmB1GS2gUUQII4AkljRmLeLFCJIRCQUn/LUwDw/D/sPC3+KO+/i5T3G/c2lBilQj2sndy9/MnJOGnCFAKSull9r6RTAl9k5CG9nWbvSnInu2ApjqexhTSRpUCVu7/0EV/VNK2kJQm0wowVytVicnJ6enp+v1dr1eA5Q6oqmWA9Takut1BwDWnEBExnGksIQEcQYsc4yYQS0fy2JQYdIzIE8b/ebbVljMHTgRVESAAosgo4hkxAyCGIjCmJiZATOPQ9vCq6+9+Mqn7mYeJXPK8uEHD9746buRkFECEkkIITZNDAEzqD4MWn/NJuDpP8xD36l6sJUyg6Nm5jojKtqjmg9EJCdABIxydNVa+21BbAGAxGPlbIvsskOxkT0WeOgyq0HTNMVpnGPfjMMwqOoxDMNut3vy5EnTNCJoQZ7G+0wGMEbGVgFvbkAs4kTmEELTFP3RakeJk688niJlxCC6vSBBZD+mfb8PJPsh7XY9YtDXEUUiutF2AIAk1amm0JJPTk4AOQRsmgYAhUEEoLajIyIN2yYqlhdN+hMRAEX8mnVpKVS2RkYRjLFF1ArAgMzCkkRykjikMXEmIsKYJAmV2qakBfTKWrkKzJDzqEamtm01W3ocx2FU65cVHiQDJo+KBqkhBKiJTx7NuIQPBX0iZ0lJgydLHrMOoFCkBcSZpzKsMtchxXXwMyB7niRh06tQUsZR1+0hmC4SfjxYyDHhwAARXTgW12hmtXmb4RwRREwiVOLFUHqezIY9nJvHVRGhGvU3o305BGpyzjnVtj+hzTmP49CMMY7hze+9McThi9/4UnfWnZ2sf+VLv/onv/+nK4D/+J0/e+v11//4j/7xS7de/n99699/62/+66+/8sv99W7Vbbr19tHTB/f3FyeUzzaxISSWQBAAIxKC9ElyP7z99MHHzy4kBobIkGPTar8MKXZcICLt4kkCWSTGgAdsmSETEQZGpKYNGEJsOCFjiJAm9hYCA0WMuDlZk3a9IqXFk2nTiGOFLlT3+iRQzvV5L8QvthqcvaAA2Fxe8T+ZS9z/65U0D65mLiHXDGp6BCb1yd6Vc+Zq5pMDIfuQptsaD1HDT9tPzN98CJB+1f42U9v8hhze7CfsB/dCmN/2xXLsHq/h2AfPYBaLtRsW++n3fDFzdH3V/Z+LRTkS5+XvJTlCpwyz6z3l10Uu5NU0UqkBXYvNwdriz4AhH/ToW5gb/LkvwIBoXhS0Tt5v74woFVMlu8edtlm1AiIt2MiaYkCksayTFlq9hSAyqf1msPPXYu0LyAfXh92fo+3xIVYuIGoJZAegtYCcw/thDtWH73oeMPt1LV6xsKzLnAcdfvZw4l9KmsodkFgSc4zh8ePHjx49eu2117qu2+97zeNmhOYTfap+2+dUpYRr1nMoShdRw8nyVnR1QYRPTk5E5OHDh7HpTk9Pn11eCUjbdkMq4m8Z7fk76dEHEQNKqQEGjFDo8N27d+/evfvWW28/ffq0VjjTmc+W5qkWOq6KtejXgpggIudMYYbRMiXLoDBAKX2GUL+fcOeAvh07x+nsFtdi4fXLDEBN05yenmqYma8qoZiNyCKqwRbl0EbwlsQFptgN9tIQCrWB0o8RWaZARNUGfehgSklTB62kBRF1XXd6enrjxo31eq09crTUiIUCqm7Qtq3W/NYH9VjN2jXfsWXIq98oqXd6RPP7rwTwkHov9sFC9MUVd0FEdUcxBBbOCYYhjZkJ4ziOwomCZB5XTbhz9+Zrr760769SSjF0nOSD9z4CDgJNgyiMvOcYY4yBIKsrSe0LDTXg9LoFYVyQdABgFrXFeLiq6vrUlnBB1harhjmyGM8SEXa2ck+T7fLCMDMLFagux4SlfERSnqWdYACZOYMIYcQYNs16vda+GtfX1yLS9/3V1VXbrmxu4DIGF6yBmXM50AkYTDsNpX1GcQ/mUgUzACJMLW2nDWnbNraNSN6PQxAObZMZh15Otx0gpFwToAQBMiJfP73W3db2ebUMJz84fyoisaGua6GYOXIIgeIojCGEtm0BMOccQqMGEUREZEQtWarMl0Pu/BkVMBDQhhzqY2dkjIFYmDlyBhAK1IQQkjD9Q2n0uTZvaZqg4aNj6tWJqfiPiMyFPio+2xaji/+sSvsse4p5ovIyTxFUD8YCHBcw6hG7bVtbf3DFHvJByJ/NbQHfChMUl1XmDGf0sxePFtScXdsZvW2OKuzfaw96FWVhF0S0JoPPZXuexolIbDBEFBFJk30ohDAOA2EkCACgTuaAESIyXq0wSp8/eP39fth/6be/9MLJ7Xvv3f/ru3/77NGD7//N3/L19W/wV2/B5QDwq1/56r/5nX82Cl48evrqSy+fbk6un11e8a49bWlM7QAtQSPQZOIx5X7od8NP3vno4cVuRBoTU2hXq00/5hACoBAICjOIFnsOFAJh05ak2JyzMIMQBSSMmTiEQEAUYwihRHyihBBYOOfcj0OLQBRDbFbbFQSCEs0oUPUaqZWNjEYAlO5VXL3KIqI1iONzygKBD39yJ1VwD6bbPBexvoJGqbmGCLILwNM/lfl5mJca7sIpaVj3gjmllACX8d5wwL/rqyeBxusGVmxmsd7FgItVLN5V8X3K2l8sxPBUjkmxXiOyzfE3mKVj8RTUapkLfPHLMYZkkwH3Lv+IV8AWNMcGlAMLl983W6M/SuO4tkyjV3WjZoqWTVvPy7uvi6BWObi9KNT2fUp77QhCCDFG6x9lizXwM5uXuIALz1YtxxucR8K2opKksm/VxK5Nye2MZ7Vh9YPtCVTbvN9tv/msoXcHUh3O9aX5hiwNEP50DsEDnqPdiWMiC7hdTGDx7CEiH468uO3wwyFW+n/9NUmi7oI5O/NXpXuNcM45x0AXFxdPnjx56aWXNEaLmREzAFDQljB4uNijM6kHqus6orIq4MVAqOyACBjv3r1LRFdXu5OTkxMK9+5/vO9HoghTN0FavMVvtbj4INBmAQdqFSKenZ2tVqsHDx5cX1+fnp35RNnD/YF6vl478sjuASaLBIghNCE0IFoRmgBAyhFgBnleaP6EQwfih1vCDK5gDlqHwKbPtm273W67rhvHUcPnioMCY86imUTirsWsZrNE8YHb9VcmCt6PGmOkMIlVRvQ0h1PvHIbh+vr6+vp6GAaNaCAiLVGhVWRS0iqRSQVCm7YlMKsGqImFIYT1eq3VMowyG7X0Hl2/24xL6gEOOBdYDAfp/fYT1dKOMg+zzyhERDHIKDmzigE6bcAAmCt9Tfv+6vzJA2A5O73dxGL3EyAEqk48ijEioAiqGZ2ZI0Zb6YJ1YmXudrjoIt08Fa1XWbvfN6yWC3BUpeLXMTH4MJzK7app12IGvjBDJdtGdpGDKg2WVQB37bptW6uOozGiwzAoGPvz9eGKfvJ53hTdb4WqggV6IefSXZNCcRyUkY1nVfoQEIEF90Max8snjx/95Mc/apqwWa3W6/Vms1mvtu161TTNptXqR0LERBnQkgmzAIeA3QAiWduShYiSrhAxBk2P5L7vAYqHXLXKtm1VNQsRiSgP1zr/hjSbtDF8IcBxHEGQGazObXx2dSmEKfEwJFtSrcyRRVRkIcQizHHKq9VKa+AMw0AB6oFNgQHMWbm4L9VIteXaOI5933fdqmmmCuY6S/Vg6iPWcRhKl4wlXbNfFxBmIXYevk0RhYnnkcl2VOPODdOMXUkV0diZvjQKwmLffSR6rJ1YPYTZ25l5HEebiYdIQwPDHENFc3znnFmyJsvWtyDU1k8qnoWg81HPMnLVq5s2CKOUYj9s3cxzzsCs7a04cWhlE1rO/PTB/lF+eNad3mxP20/f/J//l3/brUBglLj7T9/71tkrt//ZN/71HvZPP7j38PH5Szdu/cq/+u/HiN/90Q/+6i/+y4P97s62ayWtAGA/4jgO+3HVbX7+zgcfPb2m1Xq86iGGJnZjyiRAArFtCQQhg6p/zIhCQvv9nqzSFwDnieaKSLdpc86CzJJi1/S5Tym161ZEuq7TZKMxJQzQtjFf8aaLu13fNl1AYNIE7WVdOMSSSKA1qSZIUEydQnYVMEqEqwiotFTxTZg5IMamsb6xWuhFM+CJEEBMoGdmRCJqtAqcBwma2ltN5gy1j/gMbCJSNNntdtqVtWkbrNm84FSmBaYAANCsgTtONsIgeeJnBp/GXTy30+lFV3RqYiyO5YR5+VxFB6o5IcZ7vOjgY78t3UXvV4UhuMo0ngdkV4XYZq6ESGU+45cWQDKMgy3NcyMNrzIkNcTMteWa3WlcxFZtlIpcfKl97/U0Xa9XPhFnlWOp9nG1s7AYLX0kxqkivJE7vUdlIDN24vyCGj5n8/Tae5jSEbOmHlleotn+jHbZbjNziJN5WG1c9qdGVYWIIMHYqjhRTLmU0kw8qO2ue5XSMgbECzqeltYB2SKITFT1vWdsAoewB+5CpwN4H4ttuwdFOmgfYoOgi0bxe+gxCOf6hn+7vZQPDDG2ZC//gWN/8JyraZr9fr9ddSIChM+ePXvjjTdeffXV11577bvf/54SmYjEB7qQH8QEbr9kEcGZlWqawzAMXbvq+51QZGaWvN1uh30PAJvNRqlEaBoAODk52e16oKgIoQNO2FRJAWoulAhUOkACXnfBaphbrVavvvrqycnJO++807baIU0Fu0l7JNO31UToACa46hRevNYpERQN0LgJgDCrYZdI0wZcmXg7XE8zYVkl1RukZsuxyyjGQgTKaez7/vbt2ycn22Hoc86r1cpsQ4RSAslysQelfeIs4ziOYyIMLDwOaWxSE1vlfSmllMeUEksGlL7f9f1utbphdFWTklbrNqW03+91K87OzgqPdlrcs2fPnjx5stvtRErf6fV6ffv27bOzM2VPTdPsdjsA0ZIhRsd0czQFSzfZflX49/qt7aq5LkMtQSki6qGyAFE7YuVNllih9w/DoEKsEQFzXW42m2EYwImUlahOAbcqTEzQknPKPQLknN977z3B/Ysv3CLAx48fP3jw+OTkZHcFAoEBTZOPMTax0fhADEREOIqTwwtzybXSplFUdnaNWnsfc85EWWVLXSwza9t3sIY0idVGaaxE+UvXtikN1YEWrWzsmPcGvXrWqurrTuoW6buKjoeTk1n5e9d1m83G6InKM2Y6D1EQUUWv1WqlvkGs0jI49oeIbduG0KtmYQgbYxyLhjlRUfOvxgj7/nq1bjebjc4nxpBSothgCVFkZmEey24AEoVA1O/3saHt6mwc9oCrjz9+9vDRg65pEbHv++1227ZdSqnr2tVqtVq1Xdd1q6bruhgJUbqui01YrdrNZtO2sZy1UOSGiBLEMQmzIK5DCERxtxsAGGAEvK5IAYjYhNiEaEJF16215gsRQWZ10iPimISI2m471VL3ZEWQCbH2wJ3IkOKkOU+RBDBUeT06vjjxM0UkY3sqLqv3H52twiBMy+7jXFmHg8Qtu6iGdBoCePSzwT3TNfroX6R8ztOXIjfkyTJhKOTlHj9JPz1wzgSeK662tMWHBUG3X9kZUWwz9bLZ+lfLgf24fM/ThLUbtf4UEJEEhEIImUfOsqIYV6e7i/6jH9w76banZ5sbN2/t6NnI+/Z0/aMPf/b0//Z//eqXfp1G+kJYf/lXfrWJ6+/+8IdDgF/78pdxu/pf/92/jW1752xz9awfd3seKY/cD0PCcNmPexaIUTCAiCb1xBhz6oVEI2IFS0sJANJ+uMr5BSMFAWWbJAkBEWMTYowD5RgRAoccRC0ZwEgEwBiobVuKmCVrFVwi0r2IFJjYhITg8qOSq7dpG55S0kYwXsQREagpyBq+gq5YljcBcs2qN3q0OHFmVkCQWubLhIMFOhg4tV1XC7HWdG0i5VIGAB4Y7BIX9gO4BMJyD0xNCOxLOhbE6GHbflrAoS1kYofzZ+1avJS9B+8TXSjTig6wzN/p0dBkOP1XGZXdQEvL39KJxHkpbdsHPzHbNE/HbL2LPUEn7i8O0U7tefufD3Ib7FfbVSPmtj9+cKrVII6O48VfcCCxWJGb8Cxd3M9kWj5kUHc0iF/p7J66ezbhuoTj/i7/xudtyBILDm6Ag7JG9iA7T7Wf5OHRLKB9AY22+TAH7E9Y0QJy/GjPW+bh5+ddAqTFV5JWRxMEgIuLCxXNc86EIYOAMFHUiR4dh5xL4ZAgLGYI86IATdMwBxV5+76/utoNw9B1nSDlnAXyarUZSx/CWfXaJRLV8YlIJAMSuBIsWmqxaZqu627dunV1dXV+fm54Qa6whp/qUdItTh5Y2N04l9w8EWQkAAKZknEYAUUARJhFsjhvm7gLnEi9gJb/1mvTrQDl5tnpuuuaECIRcI7YIksIGENACKxlBZjVu68c01sWzE5KRCESYISqRFnBEs83AUvklx1TRecUo+aqjJo3WGyCIQLAer2+cePGZrNRmVWHijGK5AXB4bk1xBg3EcmkCc50bKuiaQ+Wz7hEVX/01UNeYvZ8hIXnI3isj1FRUKmYuiQzQK1xpSUlCQMEljCO4+PHT/rh6t69e5vVut+ny4thHJmoFSDOYlSLSNt1gZhCmGvnFfVBa4IMowgwQBIW15+WCNfrtUWbI2Lbdmr6DLXRn76KJt9JqHlubCKK7ZWu2jcXCXUzdXs1SNhra1I9FkWgVTO0KweqmxlqUqivW0tEsSkFrrG2PFiv16rHEk2KEFWLM869LIpelfgAIha7AGgYZxQetHYmFF4m1cOfVcvxp6yDc4YszELCQThSWHWrs9hsmZ+OY0DEYSBE7vsh5/zgyYXqROpfVaMGc26aQERtFzUvDxFDwBjji6crXWbXrVer1Xq9Xa1WVpnJVDAiiDE2TbPb73pSOCfJfHV9AXCBiOv1hpnTMI7jCEBd163X6xhj1Dg8LVQLiEAIwChLejeBdcQQkQKov6T+OkF/zlmq0O7x0DR7nb09a4oNznUeccZvcX35FoitZ7pQmYIrB+SoD/vBbTQ58AFi9Z+oxcIMDAZD7IooHMKEJyKLTfDf+z89KQFH1PydhcLCJByzC0k1GWWxCjSFMMNEgBCFWSCDkGgIlwgiC0loKe8SAJw228Dh8tHVu99/Zz9cffEf/VLzAq1Pbz48v7/C9r1HD957789++Ze++K/+zf8YYvMX//E/f+tvvv3rX/vNL/3GV9pV92zcP3z08a/cfuns9MY2njz5+Ud5n6/64cHlxS5nCQExlsouOYXYBoSRGYQRgQgIA6JWTwbgoiGB7r+uRUQk5zRwlqrgZZSgCQkszMwSEQmFgQLErmlX3QVfJGGMGEJIKSMSBQL1EgpbOTLHQICcGgUlBngCrenOPOVnGplDxEDUj6NSFnQ+bSNhHgiN+4tT7JU0e8YDB8KWzCHQHIx2swlbfi0zEAXxf84A0oUz+beUuc4vRNTMXnCqi0kGR19hgGpzrj/NzDf+XR6X/Td+W/wcZiutiKOKnx+5MPI0MSqPufbBz3wxKz8gOWeXiS8mJ/nHbX9sfL+lXgC1+XjUtlcUWle/W4zjd94mY3RVDpRnM/EuLgV1u3NBfGx/DORknqBoSxBHgf3uhdBi1dOckK1kDQE0ARjqh5mC9Lwlz+bvQGmxar+cw+nZgEdP34/vQc7j3eJFC8hc7P9icHvvYj5Hb37efH6Ri5klVs1HHSnCDx48IKKXXnmZmZGAgEpcxjxezs9w8VKbtruzIi8AaMhxDY6ITdTiIrdu3PzUpz719OlF13U3btx4+PhcRGMKYpUap30zEm3zsSXoDs12qf4RY9xut1pR5v79+3CsfMvhNh6FK3AnbvcQRRBCJKgxFwgBhUQQoZGSxcACwmqBes5h+ZHnO3zEmnCwSrAlIEEI8fT0lGqkd0qJ1lM7OITab4YRQGqRoXJGKsqrtzO40qBQ89Y4BE1MUscjIgpkrCqicUbmlHNOiURQXW273e7q6moYBkQMEbfb7Y0bN27fvr3ZbKg62Jmh6zotG7EQ5HLOcCA1iQhXV9JiW6SGihjXqOx7hvh2aQqSBYnYIIazC4uAnZF9UyiqRAQIhFlAJGsZdS29gyREFKQRzPthvz/fPzp/2oUIGEAakI4wCKJmx1CDGjCBBDXJMxCSOuA9MOi/XuT250X1THUheigpjWx5OlJIeN3YwuLlWISL35m6J8TMwEgleVKy8JhL4Q+G4pnQ4qgMwhWujPGJiOaU5pz7vldLpSkRRCWwRXmWKm+mt9tlQpFfrM45Z64jWJkZbdot+tLVqmuayJwtc2ext1iKHhIRNU0ngmPKCCSAiTFg063OTm/cjQ+fQk4UAgVOubiy19tTCwBkziIAyAJ0vRsAgHYDESFNvQM+Gq+KKowxhBBCo3+dnJy0bbtarZomhBDaLm42m67rVh0honpZNamSiNoQnz7bt20rEsei9csw7FJKcUG+RcR0uYoL7PhRAQuVvMvPnEx+hSmlB7E6ZINLPhnHUWGobTuo4UDGNUXE2s4aNOjgwSW0eFaq1MEuIxMLgPDYa2YJW6aRKvvGJE4VsMTJYfag5z14cJkg7jU0PMY4vdBgP1kdCNsBmAvoC9KzsJnZHHJmEDX6ihnwbFb1uIVFCAkQQtesqIWBOEkL3c11ywN/9PaDi/HZN/7l7+4lb7tb69Vqf9Gf3Tp5cH35cPd0Hbox4u//4e9//RvfuOj7yydPvv7bX3v40b3r+4/v769u3P7U8OD8erx+NqYHu+uMESIFDpwRkzRICIwCCCyljjxhIERCxpzmLNC2jhkjaotOIkKSnDOyBGyIiGs9cQx6K2CA1ap9JjnnsaEVoDBPVo+yGyixkoNcq25IzXWZBO4DZiDVWGAUB6piFohk3AMiICMRgmgGAAAMwwDALNVti0IBkDCNM2VeL2un41FVr77vGYvF69CIYEMdWkM+QXTwIxyylqOPewJikPm8y49vmGJoVSWGSXdaoI/xG1uOwbPp5HYP4tL6wzVQ0M/BNrwfpz6o4iQJezvMT3+xWHudxsCYHRRqQKZV1bN98H/6t+g3Oc9MsH7TFltaCM5cnvQEYXEKOn9yaZZQxZp8UHVmesopk0ZDTP2jebA9M4dIniabRLiAMbt8iKbdtgBsA+nFiSw2ZLG95Tao/5t5KpRET+du57gwRB7i4OIbe+nR78FB+OI2D1G2k4fnaKMdpQbHj8zdaW/5hJtFBCEwM3D58/79+xcXFy+99JJijTqLANRMTujME0eJw2Lmy2+0hjkKC4fQpLGHAvn59PS0bVenp3Dzxu0h5QcfPySiQDHnXPngcnu1Ql6AifNSbfAFdhYw29ibN2++8MIL3/3ud588ebJer8caDioyKYc8E1EmPLX99OTar1Gl51JMEiNCkGLsAkYw9e9wvz75ZN0O/0IAYLMdx9Q0zWazAQBmbtu273styWg3V3ljthbEgBgACECYeRiGIpfSjHDpT03TrNfrtm1jQymXfTOSSAQWiNi20Pf9brfb7/carNs0Tdd2Z2dnZ2dn6/VaGatZDInIF7nxa6zpqVNSAzNDtezZrhp99oZaqFQl4Iwc2YuMcFnOguq99nbbZONBReOqOnOpgsHmjLJYkpQ5CbWEKAJIgaijEEBSzn2fOMaWsEFsWQiEgBgjhExVGNEJoNrQaT5/46p2QBYGpdMbazKecatc0+T80ox0awsDcB169Vmp22iIANUqygJJmLRODOc8isYD61mknNg5hCyG2YCKq2dPDQe+hZKIAJD6maiUZomqEFK9bFhbpl9RzplZ1eCkMZZFPeMCyZDHGLdEpM7JCi1ci3lO5JooEgUFctGWgRgSiyBQbG7eutt2H/a7PYaGGobMLAwIF1c784ICQIhl62K70RcJIktmARBgAJKWM9TaoQzQKzt78OhZCMGafqmHMMa4WpPG2282m1XbChc/6mq12Ww2p6en2/VJ13UqDTFzNDypWzwrXieSASbeg7WLaNV2poSTBbkkKrTOnrX+kjnnYRhEJvO/p0Qh+ByD6fvskoC5XjlnqGH3BgG2ogV1XvAqcXYdhW/P1cRi7mVS/3jukcPn6HJw7FoYU+02jSkXkarv6PwlpRGPsvCqooSavihVcDH8J5qYtEbg2hbZ7tlCyAQeZCJKOWzWm9CG6yfXfT9sTzc3b91e3V299eCt//L/+Js/+td/tL27fvjkwenZ2cC831/8+ff+7o//4I/+8R/9swi4H3YP7n30uRde+dpXfmM3XP3NX/zl9/7qb966eBhubUYMj86fDW0nQYC1n6CEEAIEIRGWJlCGDCUtBBGCGn+n7sUsQoIsyCIiTQjIGAhCwCbEhoKUvg4UMEAkiAAkVp+l26yEkEH0z8Q5MEoS4UljZxDLFDa9gkHAlcwV5/q2O7VZjRHNYglGZGYNRTZ2YvBpwffgUsABIFX7yAIRqBrhzNR3eC3Y3gLkwDFRZw4oUvLzBsRfQODwlxb78hvomSU4lmxw63EWnL7hWbJRFeNwnvH4tSz0BJuGDaI3LxqsT+gwJyB+cI+5xlRCE+17QzRxF7sGoV4jAkdD5JiU6ae9OF//usWXC4OF3aCij1+yfjg6H7+Hi8smPL1xbiv1g+ScyRVcXWzs0UvEXgHMYjnSJVBHRErVMSWVYmv1i/KIsHijfcT5pT8eLtZP1a/68MMhli2GgvlhHSx8hikeNvy7PDx/wuv85W0H/sHnTYNZK3tJFo7MAPDw8aOHjx+9/PLLN27cePzkSYuYODdzkXGxOhOyF5tQ7hHtFosANasiCUzQTgB51a1v3LihIRKb9WZ3/uTq6oqZA0HOGURVlLpRrLESVdpDQakTmy8X57SOmV988cXVavXhhx+qrJmsrcIxvPvkTT52yiFnTmPJXM05MyDnDBAFE1FEFK3XoKUA2KXb+L31Bl8PFZ9wmP6sPVxtt9v1eq30U2VoZVKiMRqCOStNICKCNIKjXbpWRLQctoAzTVjF9+KxaUgtCObAGccxpQGx0WqQAEAUrbk8Im42m+12u1mfWBFUJdSmxug0FqRMF6IetsNNWFAD2w1VITz1MHQ/pLTWCE6zQmzk7Dqv2GUmTlN7qHaQChgEGUEEtP8Xa1LMZAgTaIACtoIp5xAjhdAEakWCcBDRGGMgIkZOkjApThERIMzyiTxJ8dGSeCAKmlOu6khMhFZ9RNUwky3NSkKuLkbbNACzTfBwS84SrcvU4xaXGqN/ZuGQIwYSBAwlX3cYhzJbEIqhadvYFrGKiDSry7TuruvUuMC1LqMZ08kCd52bCnHWh6NIYpocm1JDaAVXmDlGAlgyR7+x2rcTEZEaVAWHAobYrdeCtB96BslJWDKyIGJb7QuqdxEAAgJgHlKtA6KIj4gIAa72JetSoySKfQQhNCQiI6diXMoMYwbow5VoQRr1qeYsIYSu6y6fXYrIqtsYosXYrlarOHmvylGVAyMilcFtW3Vm63UXQkCUlAYDdA08AMfVYLKvF7EvhNJ8cxgGD6mGQhOrEKVziBAQWFhyEgSpjUMRhECk/IezYAC7TLuzmXBNYjTKyK5UQ66N5sEpb0TEabTZGvLb+IfM1eJLDd9kblpe8E52/kOaux/hmNSSZUqGtAGxxrjCgfRjIXxstX0ljeMYa5pZzdYraskouB+kQcZ1DJAGHnjMG1iv9usnP3/2F//2W7//z79x88UXL66ebm+unu4e/tm3v/Xg8umXXvvCjbjZUPOZFz91dnpy796HTPjHf/TNEJpv/+13Ygy0iU8CX6EwqFMCRCQCtk0QgMyjABVeTShYYpA9VwYA5JIR4ImLbV2GbMUnIGRGAIQQERIy5K7r1AIkAKnubc4ZGGb7TKSIavQipcRmbtTpHZMPlG/pTNR/nnMeh6Fdtx4MsAp5NPfMGOyRayVvnz1sGEjrNLquS1OHkpmp0kY2gMf/Fi8BayMdmvIq/VOHXHYBeOhkI/+9fwvM9Ua7Qcn04ksDdZnb4233DvFxcY9tKbjqi4cyxGIrjE/447Of0FU/W+ywTcBQNYSpJKYfcDFncSrZoUKrt5mBzAYsRJtm39g+5zxLvDEO7UvCLEjN0cshCpGz6KEjtn73jB765Sxu86/TdRnKGPc5CoFyIPKio7T+3OfzX55LffC5x+En7OEWHITjHMvsp8VxHM7K46MfZ8Eyjs7qE07K7jRWyM5d8Dz019J8tkkqql5dXd27d+/rX//6K6+8cu/+fUTUQhREpKnoC/UP5418jrxGRGwmAKDwicUirN9r8tjjx49TSqcnowiUDoGCauqbdDAhAwfP98vaYXI1L0piAkCM8TOf+cwwDO+//76vI4CF9RxA18EGLgB+sbFCKMXTiFb/RkSc4oGiIiYiIgkfIXdHD6sCwNFjPG4wAoCmac7OzmpOVEF/LiH0qlczgDaExOoVnJWtsmGLbsaT0B9cjTRm1jqHADCOoyaPXV1d1FbV2t6t0a6Dfd+LyGq10g4T283parXy54glt3mKRPCG0cIccRZMUenelIfvJ6+GBiMyjgiwHFAA3TfLizNZN7hmj6YS2GdTOO0nAEAMwoJEiIyUkHRiPKYhlNq5KBiBgsjAwOOYGyCBSBSBUBiypAycIWPpHicgETEQgtoXvI3PqITRf6N7hbUxAiCFJVeV2vghhNC2LbmsE0sQw8pBKjOaOJoRcIrB8tGERRBU02POCMA1YQ2ptKZ3XrhyTFqnxw5d56NVEkzo8uFRJuFbjTpVT7A4paZKP2UHpIwwjllvizEiRV2LZuipJEgEZkYBhw4eI/phUMBoaqijTmB7ehJjHDlL3zOnQMAgICCJqyY5Y5QxRn0DOj5FRKGrQqAQCAgXt8EwpCqoQIwxxLLA6/6aBSk0TJEgAjILjiNuTm7u90PKsNuny6vh6upKEXAqzCoAMidDMC/JRQFCKMKuosGC6BscGEniUgZz+tPjGBzYwolIGPywVHP5pmnM04qk5pQpxHtjg71iQRY9oQFnNkDHZb0cBpXDGfJ7743HPd0UmAez2YCeMR9lJ+D2f2G7slXbTBbk0l7ncRIAiKaG40qjtYZmoYMEQlRqigADIsFqv9v1JKsmxk3X7/YPHjy4utohN/lC7j998JfpW1/7Z1974Qs3P774eH1rQ1fjD9752Qf3P/6dX/nqv/i9P7qJ3Z//h//Pf/nP/+mf/u/+5adf+ie/+Y3fffPBxx9/9Ciw4I3T8zd/ztgRIhEkEZEE0BCqG5qREJAFkJmFgSAAYZAAUAsGSgGMBmFkVuYuxSqBgDKqtSmgiIzjwAEbiAGjMjwMhCjaS6Fov2mMNMuVYmasNhGDQHDGMJAJ4MEKdnHhcIfiiP/gYX4xuMGbRoceim7TBNwgcmD/FudO128WjMpP3lBgIW4YjxQQqSXFPILIMQFFL5+r4IGT52ZdP45XmD3YSzXBeKXRk36/1cF5ojza2mh+/uDIy0K3pHmYt5+kHSI62SK7spww12/xQOUAAGV4BhU2Q/86vwM1fW6mxS2sRfYIM2vfUXGsxa/i8Oz8YS2ozdHzVZOMvV2cwgwujkinWnizaLTLpO3b/eAoHgADiNZ9By32py1gisCgBJBBvYVC2pQKlgkOs3Udmb+K4E7Udpt/RPhenA4c4MLipQtF0UDRW3M8NTB6DrAkLIdvnK8C/PjPu54HWs+71I+hoRa64U1s9v3unXfe+cM//MPXXnvt23//957KLd6ygDo4ALzK+AFKJhJLXSMRppS6NnLmnFMI4eLi8mcf/ISZT7ZnL7z0skljiCFXKs0Zaha8+N1Gk08trFSDGx1tDygnJyef/exnLy4uPvjgg6ZpVMQsWDxfmt/qwz08ilb6dhUcCrWJjeSMAEBRpCicWIkRIvC8LQrMcdN/WBA3/8jhUzb/GOPp6akZnTS8cxiGtl3VY0UigprKXhcSFCRsSkZwIE/z8RJOygOFUutCp2HeQhFJKWlnwv1+r3Uluq47OTlR76X5puwsahkbleyn45BKnHU6BoSH/3qw9D95kz0zW7E9PxQAWGEbcFU6Q20sAXM+6y97Y3kpIzICsMY6AYBAZk5938fApNIIhRBJpBXgzCOMKtbOKuczJ/WHMzMIR4wYSEgyTSWp7e1ezPAQIiJcKi6UfbDzwur6029i7fomIrb/epVhARRCoGaHwdzbYd/LPG7TJqa7SkRW74drDwlwDhhTtkXEZqWMRmMjdW6qwHNJT2t5HmMI1dafUlKy770pZV+KWWrVtqVQreVbeaLtVwcALOqTIGbOPMYYgUgk37lz5/TG6fmTx5JGJImx0SjdlEZQ7VTfSKSqeJ9GnZI5M6V4erXqvn6DRJk0+LdpkTlzYpE+jTKqfi1Nt2qqeKmau4js+z6EwAxtiJlRICK1nPMwSvSCGiIKaJklNMTTK0S0YrKFmFbRzRo22E9YVaaccwhkn00tyTmrscfOScFuQfI8LfN02b9IP4orVWSiicdnIwFHOSJWD4zhNpYsT8Ywkz790owD+QENatFZSQ2I0YnUC4rjCYpiZql3VDGnWN2a6DmfTdhvETh+oJ4WE8JCCAKAulhg8fIHskAOsAVsUu6vYY/AIonHLMN1vx9v3bx7vn/y3uvv5Y5/q/v1O1+69XQ4P92sm/Xq4mr/+PqywdWzp+e7J8/+zZ/86Vd+62v3n338ZLy8/dorlxl7vLz10gsj1g0UyMg55TwOIQTJWXCS7ZgFVAiEAOAb/gIKBCIMkFWmDKVmcdu2Y5QEuYT9pNRzzwFDo4QGMcwiDIsVIM1sRZPOL8VMYGDm5Tyci/LMzCkr3mKNmSlh1U0zptEsDnCgV3hk0Qk0TanpL06r8RKkRwoA2O/3QsFeIbX8l8fHT2ZUAIBwRLq11S1wx5PC580qu4Y/5GJU/D2Lt7DLE2bXidH2fHGPf+lRtPLs09AEjpEXT9AtP8ojL0wSyUQHdMAsM8Hd8HdKQ62v8zADzlfpN9nvSf0wyR9wEDm8WCO69D//ry3kkFIF127ETG9yUIZhekomEQrmYoHMY/vt3LnUgynIpckqGqbi9r9U+bdNtHMXVz3LH66+NM+rqh5S48X8mVnDC/1m8tzOsrjfH+ICd8QpCYvv/Wn6Gw6nBHOmBgfcwZMgc1McPZ3Dy0QuO2X9/ihJ0e8LR6YoIpBLMNuHH34IAHfu3CmF46EI4toH0kPgYmSDZP239G+YllD+L8ZIqCb/qEklOefz8/P9fggBLy8vT852iCWMre/H2HYikz1iseQ6k4nq+m0vyAsMgJvN5u7du08unp2fn2tRU7JcDIAJwN1mfvL+L2gsJ855SmGq+1BmU/5kRmQ1gTDPNPxp344FQx6FpU++YqT12npVl5QtP22ioJZASyqrkkljZJkZ1BOVs9ZuXcpXmpRleXpVIZmovSqERCSStdDrjRs3Tk5OrBWZxrlRbUigZUJCaFQuVSodXIsRRFQFCeaqKR2cu96jkZ80z13POWtOv79fL/VhUpWt9TLdyXZpmszcCmOPaO0Q3TwW9gU49EPKOWalqJBGaaLKAyxcO6WZmCSc88gMCEhEmu5iapWnVF56OaB1mroi6l9VIV9E2rZNaVQpVzdK4QERlQN7Ut80jSmE9lMlrEVh45pIDy6M0/MOhRntMWNwwjWsL9fiQPZvPeUS3Yq1tLuRIA1FNA+WVqaheZa7fda+hWbT1xzCXEtgennMEHlxxHo1UdMcQNvdMxMzZ8Szs7PT09Oua/YyYkaKCEIsuaEQKCBiFmApKa8g0sSo6UsBKagxFAAEYmyd1s3MrJH76tssYeeF5ocQqO/7JnYhVHUGiShQCGkYiWISSP0gjAKEgRggtoHUJ6vHFsrZDCGElLLU9sScRBBCMzk35aCDsM7ZpFvDMTuhesAgUk4rlLxe7QPDiAh5UBgKFEIIFGJKMI4jksQ2qP1ARCgQiKQ0opAaFEIMMbYAJILMEkKjniWqBWeh1tQCx2L1A7seaG5DNQJeZY6s3CWPLJlDCJFCSgkEYghqxVdyGYC4qovsTDUxxtLfQACBQjX8j32KUR39QgRN02Qe9/s+ZQQAYd06BMAQ2hAAcmrCVIUZtFGkYBoThRBCVECpGy4YEhLERmMAOCUhokCEAIFaEOLMzExAITQkdA1PEIVQWmlaoBCIQT3pw+Xlx02kU9o8/v7D7zz44df/4Bu/83t//LPwZ6uw2oTt+z97+Fdnr//2L//6v/4//B8zP7u4fPjeW2/87L03/+Sf/nHzj/7R//n/8u/eTvtX//DX3/nLv5crvtHeyjvuqDuJN6721926BdqlMI405izEoeEYEkif6KxjTklYY65JIANRDk3sNozX11cbWHeBno27vrmEVd6161VoWt7EvuGcAgHSXlpsz3KGHmW9TluU2OddjwM0IkMW1fMRm9AY0UwpAZKACDNVQ6CFRgCL1qYSEQLEGoJvfNFAKEADAGlkQKvHlSqrI+YaiQGBJaUxjcMVVgsTcxZRuxSY+8JLwIiYCQIRi4x9T0RN03SrFboQaKkt45TXesJqfAuBcC7hYMnmpGA5plr0hxGFQPKUWyhAiFTs7wJVwTCSrdP2ueBe8LWFmPmtUt7JvVa5WkAkJfjGWkwJV5ifq3yIJRzaVFklUFM1ahFgFq3AW5hNlPLolNQhIgJE2QUXqT4RmkZ7p9jRG8fyJNHz5hCKx6CmIujtOYRgW4IIMSoLZ5GgXjdlbJVQt5VqmfiCiBRCwGo25pocYjPR+mwqpUk1OgQk0HBtxNgU4Z5Tidu04zApp2lbL1uY7YxcCIOZDlNKISJkyHnyEBrJtcNy9DbkVNObJaM2iWZmSVp9LkTU/RHImXPmUuEAnU0HqgC9EJf1LDKX+iCCUKKVAiHrbhOIOF0FFuBq30gtfmPfG2Yplvlz958Nwv2uekOhpyH2wRZoyOKtCR6bxMWnGQ7atqCzICwQ0K7EozKjQXoiwqbdj4li9+xyt9/1v/S5z59uTsb9QCyIeR2bnmfWjcW1UKehVmcBXFbxHfo+hNA0odcKgU17ue/btqXNC4B4+4UXwnp9cXFfaJ0FBTXUrOIXTTsAzBERAoFUUoCkiawYwn5/fbLe5DQSEUhGxFs3T164e/Nbf/1XPKbEOTZNzpIBs4TivCzdZwERSYBpcpXYcQCATLl/AAJYcx0i7tqOKIRxHHIpIEYxIvMIglq0DARAgggCInMB+0r99EUCgFU8VnAqUbL1hukRVSvU71eCbJm1uwDLcHZ65+x0m9OQk6zX65QYGEK13QRSDQcoiACw5LHfCQgFHoYh5QGJA1EISETD2IdI6/YEqjwaQmhW693uasgpMSQGhiKan5yciEiMcbfbNZGaGO/cvokIGr/adZ026S0EliTlQWMRtXaLFaHpmrZddev1GkkyO/8qIoZQsrYApB6AePOKNh0BERBEyMKIhIGAMeWMwiGEgGI+BnCYm5k5SYaMUWvPBABARklCQEQUIBAQAOSU1c3Ytq3eECCQEI/MI6e8B5AAoW1X+z3wEFvaBGgaCiKJeSARHmHgEGOMIaT9nsgypIgw5pxTP6SeEQMKoVIMApZRcmqaRiMrlaWztrikWpxMSoTUBLd5JCzflYxQgTY2kjMBAhIzj/2gh9vGJjYFxkAyEGiNBSx+ewAQZrUqSgjYNE3OwsySORCdbLYiMo6jUAhtoRjmCtKDHva9RvMKCKccKYSmTSm1sSGkSCEgSeZh3+uDFJrdsMOR2iYAcqSAEQmyCARUlRtjbIFwGPPJycl1PyBRn0YQFECgJmVh5qZZG/5qYQkEjkG2223f9+p7NAVYW1/UDn4p5azWjZwzxDUKhoyMMTRtaFYoPOz7BsNrr7768YcftBSIYNz3sSUShFXROChQrj0qCTGLhK5JnHfjCDkrPx3HcdvFAtHIQFkEkwycsG1bbSuqzTZIQFggAGVJaZfV5aZRuzlLyCFQzkPOEmMbm1jsziCxnuWMdyp999FfKmtqS0dj4Z4bHTKYKjHMTNRQbWDGCRaM0w/l/cspJaqNbgygRQSnd9Wg9onpTmKKcUS1VVjALtXCm5ZXbZzVM2bPcXX+XhQQmYS/zLnWkp4tJOfsCwAuuDtRGSHnrM3fqjFG6/eoVCfMHJyd0qbBGq8Yl4GvJgrYZSKIviKlyQqoN4TSEUVAe1ARolCIcYWrEVIiEcnDODx48OAH3/3excWz3/wfXw4cYoyf/cxnXvzsyzdP71zCxds/+0mM+fOf/2JG+Ltvffvr/+gf/8s//dP/6f/0P7/yqRd+7+tf+85//XtCuHn37On7F+fXT9sY98NOWmAUzQ5FqYaGoHK/IAohkcoeDCI8jhkCAEDf79Y5hja2qy6FHgVTSlpjSQAT98IpYtu2LcbqCOKZvdaDq53IoQBnAOMhxJvePRJ5cQ2q792fe8UgU4EKVbVZeXOjPzv/FjtKkwXtV3Y9SGxdUnMXJx75D9mYxTFFD11+gQZjhBBcUTg/YXL+MaMth2bvxXsPvzHKY4uCCv8LPKVaC24xzgIj/FZ7fLHJL/RSoyd2cIcXuvxkW3L9fqYqLIDw8FqAotEZa5vhCa+opUwtBXmKo/OqKVT9sMQ75VnZT4OZMK/97c/dg6htmq1XP1um0DjOrANURc+u63LtyelXEcMKXWgW1k7ETdN6wPsEODl6zdFqWecAHfc5PEr/4ej9HjUW2GFvX2ygH+oT5owugwPnHPbobvjxF9NekI5P3jd/r779+vr68vJSKxCMwxCmGmbH3Z6H33iKsbjB8NTbINRk3DbNfr8/Pz+/vLx8+vTpMAwaanh0XYjol7VYo0c6ROTMiPj5z3+eme/du7fb7ULbxBBEOKUUqLHHbLYBkJ+zbf40F/tskySk+TckUHcDZmVa4cAb+Qlw4l/qSa6/QarwvdlsSkdBmkJdsIZo6QyrOXIwQ88hVTTCYlKy2hyx5pUNw2DFVzQ2T21A4ziC0Gaz0UKHIa60qo1pBeocHoah7/vr62utNyMiqjc2IXLKVmkGq/jqF24bvgAAgy6lb1w76fm6DxqMdwg/RARhNrLfVW+CAec59MiIRdpEo3sWYA/FnTUJpf5kPSDZErSoD9U4pupZWXJnY5fBtVWbWeJkeouxP679vf3G6hrVzwYAfrZ12xFgCkCo9tmyD2auNSiimhMIjmqN46iGS12j7dJCzvGsJLhUHd2K1WrV9wNWZqcuxxCCglbZYVE7sqjK6tlQSoPqe13XWUEdAy2ex1Qu0E3BPoQoVewPgbQZxsnJycnJSdZypiyI2HUANVJSRDqRDBM8EFEEEGIgVKIXQgg00f8ajE1aeZGKHx41D1xdFiEE88oiljx5EHFdNIpAqNVJolTJyVAFqjxhnmIAMKQ1qPLwOqfIs8tsWnX7yOrt1m+W1RQn34MzHnuxA6uIGUKoGeMIMEkeFTimmCWT6nxYmiEz1fTfornN9QTPw9T4YbvkqYM9gnO1Qb882tPcYU7Bxpzz0WJZuiYiAi4eAG8JVtf2IRHEKhfahtirPScwANBQ3oxVTQHJtZZPE9qcBSQRYEAad8MHP3/v6fkTePFzX/jCF85OtrdfeWl79+w7D7/3vW/99cs3z/74D/5xZPjqr/7G+q23nz188oXP/OrnXnvl4aOPfus3v4qj/Pn/+y9Pw9nZi7d4J9vt6f2P71GO6qggETX5AglEyGkARAy1JaAUxtznFCKKyNV+d8ZbVexHHJCRmRmYmhYBx35fRNWm7FXiHESBoahhtm92rI6cTc4HDzz+TttSTxo84ykgREtiDcVDpRUFpkh6P5oHsAXpQZyBkxEswxqvporTCuyDrULmjHMB//69dT5w9BKnp/n5yP+PtT9rmiRJEgMxVTVzj+M78qjKzOquqj5n+uB0T/ccmJ0DxMxiQVnsw8qSL3yhCJ/5nyhCCl8oWL5wSZHdxYLkAAsMiMFgMFdP39Pd1XVlHXl8md8REe5mqnxQM3V18/iyeyB06c6KL8LdXE1NVU1vm58j6ilQFmOhM1pwvvV64M3iNSD9PfavR6OHp0Gy3blEDiw42pAsIqEaYMZQUAWF7WGWUtHgs9mhb0OpX2jLklrirZDZohG/n5034TRUOIxpvrIzPNufftX0cYXZIpBQRZnlnVaDfPTjGybZZahaJ/dGkIKrL21Wn1xI6igJ+Vnbl5Ufp+2zed1tVzMIzMkD5gRvt9k3SzJezvTo1TzVzNcvkP/VdkmPh2ay7Pwp/oNMN89+vby8/OSTT958883z8/OXL15oIDSlJBT84J5OXgH8cu5erbdBUkqB6HA4aP7hbrdriKT5sMTG8r36eAhBMnZd98UvfvHm5ua9996zYYkIMhdeEkVJuRIIwHFWbfh0OUEjRK5ZcDP4q13tU/3hFgLwM8X57tDIwAYzfd+fnZ1pmXqG3MAsIoCqEUVmHseSVOWz0O1d3okGlffVJgyhU5UaETUzU/MtmTmEsNvtAvWbzabrurOzsxDXBvA4jrvdbrfb6UEU+q+dO7darXQ0rOVtJsSKNHAOF65J5nyMJAxXlrhoWPWp/n4H6bs10Kx3g1GUbdxWtGZ1KHa5VSgH3ENtXKnjM7NGvA02mac8mLS0bExwPbpNizMaNmIwSV6TBtFwmHMOc/XbXioyUbI4xPoaeKjJqzg5hWdJMVRTWsT5FKDKXk0+R1cMUrwSteJRAVbe15xhQ4vtgxRn6o2IdF13enoqcqV+BBa2cymGYUhcnNGaXqlz9bBB3XRijKenp/qsXxeYM7vXAEUkafOeUJZsHMeO+hgDM282m/V6/dydaxC7CHHqe2KUkITVt6JWmC4ZaL/TdHAWeCFdFPJWH4AgChIgUgjlXRVFhUZi6MT5C7hWz0a/3bqJlW3Y9n5LNKJjySFYPXzLLWrObzOFbHkhImFRaew2U8XA9fBUpHRdl0c78HQW92iEl7Glb1hkEKo/wOtttjzaghlmgh4bmWjAi6ZwLOS1qCLi9giZawn2FrtKIWxSNlaHfSSicT91BfRYyjkTTq/zH2C+K+jKWo5HnWYuef+h03QcEMkoIikDCGj9LkIGhNDHVZAkLMPVzZ/8P//89L++J2/jf/jL//jORz95efH06UcffHv19RWe/OBnfztc7X/z2795Peye33z65psP+s1v/fTHF1/92pcvnjx950fvP754H8bAPa/OtofDgEwIBCKEKEEYOWNOYyLtpxwQAQkEixcTEUFNeCoJ7sJqNRIUkxbUDmHMkxoqkgG6EEKm4v40d4DxgsewMUyDc65dZMTtx0tHwDRmHQpc72a9y5O6yTUbU98V46wG0j5kzcZzXzYeDZ0dz6uwjDhtjp4yG841EkKncHi6ctKsHcQ+yzGFqRnttmspUvwI/sUN8EcnuHydOK3LU8LRWYAjDxEBmNbCP+Jf4RdL5lsLVF3qNpFoSok5KaTWzvkJGngp5WY0T8z+3+x6JnuUQtXzvCSxCXqhJ85hZ9NH1/NNN/oyJpCwNmkEZhhqRYfGJRDKsaIYp/faRBq+w9ttpCWJwnw7EKnFig4/ywF/4SVzAe5R3SDTpG4D3i8zPril+WWYpVl3OMYmDVV7HkEsXTixanv2/dXV1QcffPC1r33t9ddff/fnP8fq0QCapc42cDazgLmDxtQRqQZSM4iqzrbido+469UIAYd/GwerAnB+fn737t1PPvnk2bNnm82GEcZxxNiFEDQDVBOIDZ5f+CKPc78KFdZJOhWnFdp8a2gltH4KG/YVswMnATy9eRZGRA3K2Z3MpYsJlmMkUJuLqlpPhF3XjTw7/9aoxcJTNhQiakQihJBSSenXtDJjMd2JYuhXq5XaexRKYpTmhd7c3FxfX+sHVSlXq9Vms9HjKFarFQGKyHgYBIGINNG0bHAA4npxF7DZuepcei1CsSERIFSnBggwJ49boxZwVOfRbh4xH0vwvM/Og5lSYtYDDJNIeUStXGYWCfZ2e1AFPzvHrrGPD8naG+tQxfyzbUKjZAYbuZzzhlahyP8jXazZRVaa7yvYIMI431YaBtQ79U/dg+YaCxNZnAZESE96yNmOiVIBXlRWiiXmwRlSShEJEU9OTnLmm5ubnHPoYkrDbrdDCCklk1dYXTQNB5kM1xMsQKYSM88Cfq3FWVbWSdjPMQYc0tj3/Xa7ZRHIzAjMHGi1H3Zqa5kfgZmlWJVIRLrEKvoIUGBUQ6BR59IAVhNUsgv7EEKA7DOVMOfMCChYg9g1CbQKmNKC0hwbSnwpJ3Ld9jQ8iIja+6uRRLbAS8GBztUhtTuC0QEAaMrcjH9EhGYbIVb1xb/Ck6NJW6O5pV5uPJbnPQ88/Etd3AOwZHL/jeOoGYfY43TLmdEoimctiNKmL7NuSP5mqWaDUaFULTbnDDTLwatgz3JRTC8pLhkqDXLUEQIAPCabr6hPtAiCLnQxI2POAREFOQNwimn97//ff/nVf/Arb6QHz4YnCXZ3H5689+LjP/7Bv/lffe2P9sN1D+sPnn3459/5j7Cl//Uf/ld//eb7/+pf/atv/eY3ui5852++H3H9fP8s73AbT4kjCBKLkABihjFBJts6WTBEAQ7YiUiImGs77H69kiTMHLoADDFGyqTV59RFjarGvsO6BEQEtUMDMZlR52nGC0cjYx/Bg/kOYbtOI2FdOooJ9FlEyAbxK96svmeHxpul7GUweODZdRIypsjzEsQlDS/pzYPnWaNhLhEBd7C4/WpRKUORPc7u/CUDqYHEpnN0jkdBali1Gcf40cNp/5p8O4qlZhAASKmcGynz8s5Q6/Q8BqxTVwOJ5XUsL5l3tbVkJC/H/JJ51QHm1GUItL3EbEIvORvHR8MaPI98Qt1Bc71wfmSLR53ML/OnqLNfwYuxt5eiU8Q11bZZL1jQLVQSyvVc2cYYzgt6+GUuWRg2/l0GJB2L9C4Zx77/ewGwfEoWe5DBgzVN65cc/yjA5RUAADCO4/vvv09En/nMZ4ydYc6GDUhHh7VFMSAbMYLOGmRmYQ61MYkpu3CMsGf4OZbFwFM5SVED7t+/f3Z29oMf/ODy8pK6qG3/QogAQHHqWuzn8wpz/ui6mCjWmhG/TDpY+Wz0fKyZk8dVQ4r+Xc6NNbEJ15M8QggnJyd93+ecrcDSlqM8C3kcWaTEzWIMnEAYEAgEEQi0kkRQvyQMCMScAQEQOJsdIsMwjOO4Wq0QQtWXgshA2vUxw7Nnz66urpB6lScAMAzDzc2NponqI13X6VkUelh5SikNo1Sprg32vCJH850LoIReTdobbdtCGEN5pc4TMyJq5Z6J6Eq9AaD2k6zBN728Na4v0qTZcRztLhFUpcvyI3LOiEEkGzwA0HXR8AM1Q8T8+N6Z6E21XIutTERbjwwFBkxu89SQHBdasZHfJOKCwS8ApNDaN1JsDLFtxWwKnnvbzfixxar4B08PNhG1/BU2dTpwjRLrOMKccx5h7Lqoptdqtdrtdro3XV9fr/oNzAW4Fz62W4V6nqE+GGlmbhj8zRLbThe6Xu0uROxqEm/O0nW03W7v3Xut694ZDjsAGDmvmA+H3UxPq+Sk61UCvyFoiytm9SApDQni5PlV0zGETn0rUvuzpMPgdH4YtUwRSrvm0v8GhEAQAgBEct01se7lJJPS0HVd3/d2uIdXXDyavAm0JCYNy7rApVT+PbIwTZq+/pRzJleBKtUFIlwfr+00qRZAzrMl0YhJ7zfdRelPw/fs/PF2v9+i7BXaJMM2GGOJLsbk2h/bezVl1P6cPtRo72SQCzdMa100RKNnc9wq/Lcp+h4DGkyyZaUpkbr0kgohjOmAAFhfCgLMIoRjHrAmG+QsDJFzYpZ7m9c++uEHmdLqXv/g81+45nSQPBxe/n/+9F9fX9y8ef7Gve3p3/zNX/3gJz+4++juDz/3vd/6ld998fSTP7n8t1/95hcHPPzHP/vJNsbN+bnsIeRArEXhjMKMDDggdgGF1S3EWTCwJIQAQlo0qj62IEFCoA7hUExZYUaUiCSBAHI5T8baCCEwc4YcJXoj2fjcarRkrnV5L5c4z5DxgpeesLBwmBlxUtT8ULaazYBYFU37yZZY7+fcqiDBtaiFeWctfxs6xaLhOJx79z08lZYyLEQkTvdKA6fncT/lVxPt0Q/L78lFqOwnv3bN9P0NDUhWc2jM2+yRzSM+i7iZtXc/2Q1ELUpffdmrvRvVT6dBS0DLuUdmZqntYbDsOhQxEAFKiQXRrEmYn7tXvv2LPOXYU8ovTiWdGfx+OfSyDcVs0ZoPVkKL9quHoVms5k8Pofi0sblnGhxX/sKFsAX1yG+QwPMw6StW065mLv8JNxgwDdc0Aqfh3KNvmXE6iBRRI82SPX78+HA4fP7zn9dWAuVd7kXLYZdzoXnmBSwO4fCfiQjq0cFObZgSp/2s/V+ymJqXgaozrbrw6NGjk5OTn7/37s3NTezX1g8zpRQ7c2wxzmA7vi6NX6ZZjmputchHnA172xot19Rj2OOzLtkMmVg1mdPT0xhjThLLCYFOw67d7dV0EZm1ljBJaPvdUOtIpe5Q9quOMwzpcDiomqH7EdWz2ono+vr66uqKAgB2fd9P9l6tNjw9PdWbtV1HSkn7yuQxmQCJMaIUVdC4z2fQApTuaCZ+TU3yKbv6U9n9UdpZY+lg6VTcADDtj3BMJoBrRC+lmnrUCKGVVvrkPK5XQ2CmB3pGdgGVCf+2I4ATSgaMHiBBtckc1xRNa6TX0JXnKXuL1A4aRu168mFjb5dGtcY+9TLZi/N2CR5gLU1UVKhfQGG2rrBKYNa07DCWgkPORW3WKr7VanV+fn5zc6MtHvf7PWG0kjcFrAaZpo2s0nmwbqtmyhq0Jkn8nmLuVBGBzIlUMSgcxCLM3MWwPTsNIVzf7GNApWeB7MfHjKKNeupCiwgy53FU+wQpE1FHdt7J5A7oAGIkW2W9rOUKUWQE7VCnRnfOWbDGlrKupnakmddKgasG0ai0G3QWkfdo8qJn/iswixE8TApcq5MVpppT+ZxWxH6q5EgpFWtK67PtshnBYkOyl/qbbb/RuTsOnM3Ryz5/mxciVBsx+/2g4TobkNxJRMqr1j1Zx1MYAEDJhnCmJBm0fgnsVxFhzuZxMYBhUuAK8CodPPr0cKcMAgSCcMipC+WwzoAUAgGFlNLwIr356HMXF0//9T//N89uPv367/xK14WRh0ePXvuf/r//8re//q3/zX/1X58+vvvsLy7O7p/vXu42gI/u381pd3qn//Xf/trF9fMXnx4uP355RveJMUJkYRAEYiYgxhACqMTRaloWVQ2AATph5t1ux0kAKFCXZFShEIQCEQTInIUzIvTrFXUxEybhaGlRUjYGr20sScJLeXHFbw3xNxxBc184VgsBYDofRZ8WERbzXk9C2TOjwgXOkVkcH5VhPBNVkphuu81fM2O9Y5l4yw0GEZdakThlq3nRURTR7bELm+YSVKl+GVxk65l4gblyVkV8G79tZuqFO86vBnKZizt0m7FHkZeTNiZR6YLuh/LCYXkRBdMVsjtc3kKFDb9bBNIDj7XJDVb/MdeaWL+TLU1f2+rQiVwPtv3UcI1JFQ8eOPaZFLiq26lDKqVZ8rzMzbnlotyGOlvxo3Ton23WtLkaMvNf+ll7PMPt5pwnP6Pho3d6mXMbSA1iDavNCF6qNPAfAQwELH7lxo8xPnv27Pr6+u23395ut5eXlwWM+eOGkOYVzdvtX1uj5UzL9zXAxa4tbUPb80EQsVTkNcskmgaFpOrPer1+++23hzS+//77ZeE0xQaRZ1u2nqdd33X7cR0NLaGTh6ZiNcixf232zbrYDU0GQfOWJQJx3ptEuez09BQhqAuVaiSAayyOKJgNY1As8ewpyh5XNVTtN2bWxoyHw0EPFey79TDuu64DoL7vAUDpp+/X3arY4WryaYmaJpraNqFpwyq1CDCEoHmk6/W66zoU0DKzCdQqZwCmc1PB7REe+d6HZfQ8IVmIcSI8XWrPxVVeUS1XVPmmcR7RWI2+tzZNzaZy1IjFRDl15Kmb4ziWbvzmgeV6rNTSAyUianZm11FMP/vOKF6B9J3VwamI5M4P98SWczLBVe+c6lxkqinTF6Wcp7Mo7BUe7X7iFd6St+J5J9SuOZYqqN9o596Ukjbkl1SS3UKI9+/fv7y8vHh5ZXWAzAyGbZZKvWgMgjX1xiVFztKAbRaNI9ikEwmwiK6gTiHpwdcYYiAtnR3HESmKyDAMsSuDe21TWcA2fY+uDCMRJd3dWKQ22e77fnfYHw4H9fJQPVuhD2q7EcYiJdQaDCGgpkIUNkHU1kTighIKgTcbsAZPuVT/T1njy+VsBFZjW8tszwhN9z8R0SOJCQs/m0Sz+JWJaUVZiaclE6VTayMqBanTWnrM4rwGr7Rfr8XcZj4Zk3AN6RrDhBAOh4OJSHBeqJQS17bR9t6i/c91wcI/MG1vhWlJjCBqbaFSGxERyEQllmzQYrJim5k5ZbQydUAomyXk4nGYBH0ufcD0IGlhEUaQ2naFiIBAWFJKmKXTZqSCwLyS2B3i9SA//NMffPYzj06/9PD6kN/95PH9zz34u+fv/vCjd37/9/7o9c88ev3Ovbfuvf3s4sOb50//9/+7/+13fvqDP/+bv/nt3/3Wn/7rv+Kd4FVCFCTpRHswESNA0A5EBEHlcNmfy4r0QUT21zf7/V46AcBxTJtyZBzFfhOCDPkwprHDjkIQrGlIVILDEWIHnWZWGMl5e48XpYOy8IwcRb4hzbNSpSjfXszU4tn4xj7gdi+jQK+jBwrstorp+xCYp/zAhkgagG/73iZunGufG9VzYgVnCTfaAzjv5kRUdYSjMOAxXdwP6NnZZmqvxnpYrTE1zNvViBPBMFcU/KLDojepTcHG90gz5Hg1sUjh+am+nsyOLpCfuOHE/7n4MFGsXxWpShsAWKqShXqWCutSsNhoMpfD+o12RQNn31o2h+HcG8xeuVcsqdrXdStxeW4Oql+gATeXKYh+FhV7s29+If5lYU01N2BVjJb2mF2GMc/mt3GlH7l5aUO6zffLMZd3gleXbzc4m88hBO3z8dprr61WK1XowS2Ef8qvjv8SnMPIHle8DcNgtCQuLIm1QgxrJ3DtMNmwqhGwg6olEmbuui5QEA5EtN1uHz169PLly6dPn65WqyyIiBkkVtXCUF3e8sqV8mqxn69p4TqXBfMakAXUo5k+r6YTf1tFXQFJnB6lYROperaIKUWzM2DAdSgRmYobPXkYSMY+tmTq2VEDbxhSzjkgxhh3+7zdbkPQo4PgcDicnJycnp6enp8p7w/Dfr+/UTHSdfHm5krNy5xRjxAvr6BAFPo+rlZd30ci/TUjrhDFGgiTaEMgyDwr63Vk1TYJKy4kmjCpseG5a2nmahQRlf/oDJhQj3HXQLT1W1Ys7ff7RrLRwkFmL1Lk223iejXp+XhG4aap6okIRrQ1LFnyKnUcC7Wh26o8WYLb7wwqmDO7PcXFP5sNJ2E66wi8TmL/ajTYSCi7enhyyXpcE0yUcxWramObrPCDo6aGVfjv3LmzXq/TswuLsjIzxmrIVYeX/plr+xZ9b19PVzLzzxOM3uCoYoYQ0yGLCkcsQpt1H7v+9PR0c3rSdd16tdHDFTnvDc/sHMd6DqctmV1SSyEAgJHQrcLhcDgcRk5ZQ+7KVqlfmx1ejBeKMcZxHFX3DyFosmiRErnWARs1i0ytmUK9pB4cqc1XzHY3OrZv/NqLUwK49iLXPqcll11xwUgYIcA4jpmTHkHoJVRRguuU/MIY6jUmHuqp8TFGiz7bFDRvoVnFo9LBltO0PXtKxZMpEESkS1Z4AKYBram6zEMi9lIiSoPaBhNpqvSYiy0w7g2ASdgBUyim6zorUTBgEHG1WmkaBhGRFdTONZi57VFIGxFDICBk5gzl2FxE7GMHJAQgGQIE7PDq5UW3wvv92cuPLv/4//Y/f+sfffN3/ui3nt18/GT38ny7+ed/9sfw+/C1L3xFEj9+8snuow9+85vfDJv+/MHZzXjz/R/97B/957/7P/+Pf3p5uRPq1/16d7XrKIQYr25265M+jZBFUEAThhlZGIU5xrjb75jzMCQeeXO63csOGCTKarUiVo9LphgC8HijEkGypG7VD7shqlBGMGdbqidVQu234RV9L5eV97w8lakF8CzCrJxS1QvTabwKXtJp0lh6sukjlqDiIifZRCHVpHxE1LNGDBgDcr/fqzhWyauO2/1+32zn9qwRZyO+2fXOmeTDvGW2Nx5wriB6CasfVOyiO97AQ+LZ0HOKAexrIbg61bIrqJOqoOht6mnWmw0/RvAmGUxoWlYJzyNj/jasqq06UMK8TEJcbZ5fFx0npRFrl3CuHbGptjVqUGFT05vtFejOCrIyRYVcXUh53upTwfB3KlpUCzdBt9zePAYaxYWds8/QazLZzk2yXdNn+8Pc5eHFoz5CzvldpzDzoToAYH7b5EQ3vcc/kjhpXozRZ3DVns2UwVm2DVY9fZrd62smm3X08t/f43mwYRz7qeEOA9KYyBR6nie0y6K7r43v5YD/F4SxoHQmKEIIl5eXP/7xj//JP/kn9+7de/78eRFxcz8CHAsd26L4ifincj1oy0/QbjM9wRpj2Cz8+PUns+ImZ5xUpXMYhrhad113c3Nzfn7++c9//r33fv7kyRMg4jHrRqmJXuwMuQlmgOw85jYjqk2elvRj7MDMMYYY4+5mr39idRDrf/TzsqlMQ41eBPkvvQBUrxBU6a23aXSCmU9PT9VU22632rEyl/KChIixI+HJgZ4zW4AFKquGWpKgxrka6qrCKmb6vteSuXHMq80aEQnj9fX1er1OKe33+zt37rz22mvr9Vp7kKZhHA8DAa66HgBAICB1IXYhAsswDspZXdf16369Xq9WKxV3hR8BOWWe93nWWZfSmCpI+76XWvFlvJzr1XUdq8INQZw8Vwmfy3nxUyl+cAXnXmpp4uJ+v1ecgMsIxWoybTab/X4/juN6vSYqh37jPINDAdbYqWXP6qtVToZ5SxvbINiZc57FDGB7NtcG9akeFJyLyTELqE4MLlpMJH6cEAJzyHlk14tE1RglEovvKWZUOPvtwzhUixJzzlpA6DN7/eys11TiPI6jNqEdhmHTr/q+OxwOInBxcfHw4cNnFy/feefdEEJpOjqms7MzRIQSy5WcC4+EWtZIBHqkQkpjF+M4jtbSlupRGbEeX6eqmulpoUTkYByzMFvXH1W9Ts/O3njjs59++ikJwMjr9Vq49/tp+VdkFXt2jbj1yPoBUuyiYjXGSISMoifhpMSa5k1EgvXUihhtFYq2zyCyF4QYi/0VQx/cUQuzQg5wO42SBblOM54TvOLlJSDPo9LGS27DmKIH+oEw6sl7UJTpWAonF61gYSFwVe4bDP4KIRD1pnA06pERrheszetsdkflsubQVyUJTdfBCbCps6WN4JHj0WVjY3Vgm1xrFAt25r7XS4go19pFvyimyOI8XMPFkkR0Cg2UnTWXoUQQgAgJAjOrVcamggEiEtMgOeMBO6ZtXo1P0wffefzv5c9/4w+/scOb959/vJL43/2//se78G//4Jv/2esnD37jG9/eHy5f3Fys1vF/+Qe/N4zpZ/LhH/yjf/DXf/K9Z+9dXg5ydn52c3MNQ7535+7VzSWD9igDFmYegTEQha7TSH/XdTdX18MhBQjI2MU1CQAyQABgcGdgJkmCGs23HtkYCDEjOPXCo8ghpFXIyHkQqFaEN/fo5e60VM8p2QDn2cV+fT0BNPlC6DLT7F0NffoP/t+/72WU419ndLtgvdmrvYZkDOV/dduAeCRApVtwBpXtl36mhrQmcwEql5ltD1V2NSM0KHKSahZd0fE9x/m32xSoFmn4GXmKMsmJziYhVwNs614HB3vQz7pZYvuwlGxm7zkZO7MSDT/i4rcm/arnYubNbZgCrK9VTVvQX/VgYgXZrQ5ILQyrT5dyJkTweh5UUs85W2KVfW/0Yhj22IPbObfxX3gjank1Q73izl/m8vDbesHCvDm6vn7ido/niIaw/eMNedvnJTy3XTlnZH727BkRPXjw4Cc/+UkBnmaVZg1IDTw4N0T/XteS3pob0MWuoTYrt4WLMQqzSB6GIRBuNptHjx6dn59/+umn4ziGbkV2wBcAqA9aP7qWBrrrLYm/0VUaOL1Ys88iFsyYB17mKesNzYND3VKINejy4pfqOdKIhBjsGC/1VxrHMdcwF3XkTuJejmzYZuemUcbXBp6qy47jmLqiRnd9rw+ebk/u3797enoKABp2NmPSBBHVZFF9qaYzdF23Xa272IW5e0hnBwsfHDMDUk3yJC4nYAMAMat2owIBiVS4CRGBkMBMbuScmcHMSL8EjSPDq3am0ItzoRJ2utI89y2GEBChJpHOMpVg0UdQXDNJe6na7Y1BaOtlA+oHE7+H3cFG9uai9wd5Giibl+q5OFMDRNDP1OvPRs+2jdpQ9kgu7YhG2z40pq3tZGAuz9FUIMJcSgQl50w9IEJKabfbb7fb8/Nz9TgMabxzvt5sNtf7A5Q2irka6ZMDTsqJ61Yol7Vyyk9B/0z1JEnzlXDJYgga1610RRCIEJCKd3Kz2RDR5eUlAAwpBcjoXMahustNV+FaZlyWHkZ0YVgQEhL0RU9CzMLAREBO9yAiwiAq5Whin+JHQFCYoyG6EeVKNBaYNkSYwiHVqypOE7I1s5ULs9LkKYpSbmPtsilz0pltnJMyNxeI5V0ye9wkglTfrTj9rNE2vBpkgQuZe/6Cq5yGhZz1Xxr8JulNCasun8kMtnGqnjepBYiTV9vJpYksDDCcd4dHmNQ4W0QPqmEL6obq4ddLnYh162MUC9UyoOEcABAQhJA6yQeBBDSGNa/h+vDsJ88/ffbp9u72K7/zqw8frMb9Hrruk4+fffri+e9/4w+ffvT9Jy+fv/Glz74Yrt9av/XFL3z+Bz/4u/uvn3/ua2+N488e/+xJQOFe0jBucDPsR1ptkFCwyheQEGKH3TAMALTq1i+fv9zfHO7S6Y6xC52kPOvuDZJBBCCXruUjAGOIhiV0nkK3UrNd0EjalriJF4V6WJNp7UuJLDDL//SpUyoMcb6f2eNcYlnRltLgMfr3FOI+t6aL3Xn0Wv7qOdqG9bKiIc4GXYZkcsElmLOw39cN7Uu2sjHNUASYCcrgjki1R5TZTQTJPAe4maZ/pIHzKBortGKUYL4zA2/SS6qWo+UlUm1Cc429Gpl+QFuF5drJPNVT5vuxv81mbRFs+8bDDHND1MBYAmA2rQo93RrUjdqsuMw1J09jWHuxqjvSHLFcI59+FYxeEFsdvXmd/1Mz/3ButFT8HLFScJ4iuxzztstu8E8tR7Y/b/vcvMgzSDO+H7CREh515PyzR8TCLdNSUfPBBx8AwJe+9KU//dM/XcIjToU4Cvarv3n1dRuPeJIGI1dCqNU1/icRyWPiLCcnJ2+88cZqtXr33XfHcYz9GqhkzZTRCFFqys9sF55S+5qJ3wZ2s1geJLdqk4PA79rguPK21/ml96LM3x9jLBV32fdxAH2jxlu8TRJCAFFRRvY/kVJwpe4bEWGGnCUlRgxKWcyJiNQm1GDgKhbbck1rQA4h3Ltz9/79uyKy2+32+70eNmjGp6FXqqKi53prrG/dV/Mg5QRTrkSJ4c8zHZg50IwFvCPVMwvVGK/I5O7Ui7nUkNoCYTELiVnUkjTzzy4R0ZLIhiliKG07uriSmQYIXj3z4JkjzyMHqq5iQSRwWXg2WawWYDOgaTsy34Jhkm+NmD1G3qIVc2KbLFHUihjDswHfMK8B76FlZiQBZERAkBCIAiCJ/gsggIwkhIAozJmZUaLuF6iQjAlAhmF4+fJyvV6P43h9fX19fT3mdHZ6R6odlVLSYzaZRZtYWsRS6U0je+ACYEc35cpHxd4OU7MrMVrS/Db9O+d8dn7erfqXH7xcr9fMLPmgIUfDs/45VNdAILO/ChURutq/qMtKkibvgJkbRHTY74zgcznZCUhKXjdCIKIMIoVsMBqhzAQQiCJF+4vKpCdl4zdENCQaKI16Z4RImrrHo9nTOWfOaqmWpwrR+LNi6iCKJu8Wnuh4XqxoKuA4jugaRnlFyr5UGaSPeLYxSeTVNU/QImI5xFxzPyqhTJ4GP4ucs9pT7fZQHJlgDIkuw6E5ZhormzYsap+n2yo8njk9NioOp/kWBIqIHdGCgCIoGREDCiJwkZk6WhAk6Adg5JRxpIBxg7jb72+epn/3P/1ZEv76b351uNndxKFfdT97/N4LeH7v9PQHf/anLw5Xn/niW99//7vv//y9r371V5+/uH50eC1Benb9/NPnn9xZ3wXEl1eXSJ0AZNH1ZQCIWBx4zCyZV7H/9JLHm0NHr0u6xCSsDVo0AgOifidEiDGutpurOAAQZlRpkjj11eLSJDpbO49DWOjWnh685DVD0VsFdZFMThSmNXYzg4Gdl86/2obyRAhVS2te5IS7ePb8+17ifJPN3oAYmqxIe2SJOrsaKrUP9iL/rCGwoU9r0QZzprbL07m4wjlYWNrNTO29fhyTGNnl7nsBpUX2SxjAtUQG16tmpnvPJGRr6VWqgGZSWH09ONcs9ZG+5qgUh/Tcc6REptB7KpV5TnvjzvBLtlxN+zO4Bs6Gxpzb2K+BB3ObROb2ScOM6sf1IPlV8CzZvK5ZXz+4fdMQ6uyRyrZetPIrg4pLFOHitBj73m86JgpsFv7VBhguTCOb1JL1/OBwjAfh2OIevYhIG41+4QtfUBdYjDHNx1yC18C5JJsG4OVTR79vVtC+D4EAijHoR0gpIUCMFGMchwMAnJ+f7/f7Dz/8MM99OuDcrOUtbnf2b9Q1MifsK1CnAj9CZzLkVoZa0KdH71HJeRv2bC2knt9ARMiTUWQyRO2xvu9jDFp1pg6dMQ0C64Z+ygZQfevmuzFZV5RjignT4XA4dP1q1WltW7+Kfd+fnp6G0F1evri8vLQiN8O5vUVLoTRBVBV0Zo7o+juIcNVGp+x0mdF8g2dZlAj5STEzwCwGqBKMuQ0YMAuAunTJp2tqhp6my9buJsGEMBEhaGJh0FxQxGn7AADNxvROLkvnw2qfYLXx1PzTbEYzKmCxIZo3tvnANT5MriuBMUKMU8DAC9iGGhGD9rzAyUuulDlzvIrT0sEl0HrVtCxK1T1rBmYw/b8BgJnHcRzTobTr5DSO47g/7HY3h8NhGEYAuL6+fvr8xeFwoBhSStfX1916Y8BUCEW0R0m1HaydzDiOwTVpM5phV3NuwOtotcIZAhJwANDWGIEIEpOInJ2dnWzP9sMh9l1KScYEUJJ0ZOGtQ0QM5L+MnQAQAYqUHnWFzYk6ikYVtqCBqjQWTJy1cImINPZOKBDKIzkLYpo8uOSCG0hT8RK4ncZEDNc4oQm7RlwapzEDIhCVoNbkEuAp5cA4UNFOc8Fnoxl9G8BcY1lGu7ao4NSaxr43B4lRmNcyTb7bZXNZit1K0+4GbOWRVPNS6sEY4lVtt3GD1j+4CuBqmjl1vJ5LwS7sYA6Yxo+ocssRdFllJ80nXVx/GlOCypOqjSpeAqKI27+BgFAEDpIxAHWUM4MkgrjGDQOOz4fv/psffubeZ9946/Xd/ooCfPLio//2n/+3/4c//G9+/du/+X//H/4f2++e3f/M648evPF73/6DT+TZ//H/9H9+je78Sv7i9/7ixxcvX5zEO3KQVVwPSDmPDIwEXSDCyJmHnHOSGkKHm6tdxEiM6SDSTUEzAcnAzByABKGvuQdENPIoYi65WVENzTPuTBbo5YPMRnKW6WGLYvdP40yOwOyEUe34EmYlpkYnXsTD4hIRcjLX30ak6b1ylJiPDnX0zyUX2Nf+fiO5xuJycmBW1QBOks5Hnu4xxvdo8ZwFTnB5meMB9inTBokX6M1Qfnx0uGXXWcrgJ6KcwfZXz9rNjOzKrqeOwZlz1jocmDtTmw8eJOfTOVKatVy+JSR6+VRbE5vsCgL967CmmDaDYD3nSmp9jjliaX5wuV9Bz2gmezUOIPOtoVkacDyiBfENMN5r2TLjcfKfXUteaNjwFfj8+17/CUPdxqqvGF/mBu1Rkih33o4gInr69OmTJ08ePHiw3W4vLi76vr/tdr+s/sujsvEXwr+8GuFgM1K/oZJIc3/OOca+67o0DiGE8/Pzq6uri4sLKi3oGIhiCJknfi/EhlMz8F8IcHNxPbHdO2gqW03jTXxdDZ7s6qL/k/GGVX3SIiv9SScbQvS1Xvv9fr1edx1pc4dcO/ABzlBhqPa9JAwerHqLatWZU875cDh0XdByJoCoVXxXV1f7/aB5mMbRWl2mxlKMUVP+9ESKScCmRQofIgDoERdqtknmhMmQZjwrVXE1+cC1xt7vDuD4hXkSWSZPzEknIkRBu+iLaMIta+am+sSIQowlrKLmTRqLiCu6+8h+Z8HJ2XekaskLT8Wbme4NuxnZEJG92peKkctk8Rq47ZXeYSdO/5fF9o2IgOy/Z2arhXGYnFHRUYrFejSa0o9GhtUSya6KVQlPjeHLq5uc8zAM47C/vr5+8ez5ixcX4zj2/Uq3jOvdgZlX3VonFRxWqzZYkKCv0OpBseIImLrKG04a9QDc/qWtTgBQ+8YzAzNKTQtKkrfb7Z07d4hIXSRBhU89A09KoXu2+MSEZEIAGMZclXX0ahgR5cAddUq01exCPb5FGBhKam4IXYzx5cuXCAFjUHEUY6+sEXGuPfDCe2Q8H7SxpFOPvAJhokecEsPMasiKS2IuJOJ2JuNGIlINwYYy/mTmXFP7lO3LaNbPYJ4gF2NkbuMMykKWP63fmC3kJalRMFG7sZkoMR0FcZqIsDRv1D+7rtMlMT4vVFVuUIYUEQFsfcnoLlD+mxseVNP97eaJmFy6QjXugGtvD7UZ0HsmtEWqbks5o1hSu+IBFOcYSAiFKQPGjoAQSPjAAJkyrXGdL+TlzdWf/fd//o/+y99/9Lk7GW8O8vK9i3f+L//sn/3Tf/pP/+Ef/NGzy+ff/t3f2tLZT/Y/++u/++7QD2k7fOE3vnCTDn/7794JadisT9OBBEAQEnOAkrjOzDwwAARATgIIFxcv05CChMR2OB4jRnEiLPujiqrYJSDIk11nMsKz+nIVGlIx+1BcfMwzarmZJmeKEyh22+TCMMFnxGwU1cDWgLQUuH6QpWLx6svAMO4m13nP39O8zkCyt8srI4f+G3vEtszbprNciBmbuO9tCt7n5WsUxdnM5LJrjkohv75LYMQZjbbWPPeMhnnHLH1FduU69q/dYNNZUqa/GRHtvCk7K8gMvCWW0Nxq9TLAljAY1xwFyUdQsbrYmFkJu0Em1wJ9T5NSI7GwIHIiskoPg6pK0cnH7JfDE+GMQuYV2jBjouMuvyXdLlfh6OUJTJxi56eWXXmwB9sWpXnqFfzbIO0VZHMUmF/murq6eu+9977+ta89ePDg2bNnzWgNuTZ/Lufy972aSTU/zVYcAWCGjRjj6Bq8nZ6evv766y9fvry8ubZscyyvYM1cqm/RsY68Dn45TBrNe9mI82QBG9bKfvzjNM+4ax6BI0ierTgiqm7thiq/qrUwDlkL+dSmUmNP9LQDBji2lGHRZMtmClVJ63K/298cDoftdhtj1LYL+7i/ubqG2orGImlcU900MHh+fn5+fn5yckI1F13FmsxLQ8v0554aEQEWqYp+sxYNeg3gOsEZz0pNPHbNzGrmXt2hFHJtPWJzMTNMMW9mGELx/ZnagPWIAq9I61uafUecm0y73IOr/eHaSQudNWjvhVpnBNWmKgDwtEF4m81/Xu7dXqQo0nxMqFILlSCuy5XzZgXVFhv+XYoczXDWgjWah3PU/NNGO8MwaPXpzc3N7ubq6urqyZMnz58/E5EYu9PT06urq8vrnWUzaRhQ3GWSQ80zqrFu/TWEIPNMZlsO73M0KmLmEHqojFw+UIRAiAhCArlb9ffv3z85Obm5uWFmClEERTSbEUWARd0QyCxZBEDQ732iTRBFpGSHiQjnko7UhVJhCFL7BehpxPUcBK3C0BNogGKd7OTH0QaGyTO2WmV22oTpUoYXYxifrwzH9iqTp0YQegXqEk+ODU8umsboSWT6qZIpz1vZLgixPGWOkDzvANZIMeMH45k5f85EjwFjTGjvrwPOks30FYU/FwSE1WPtZwGISotKQx6H4ERh82cIQZ1Zhs+i2s6zv0w+0nQUnhiLMnPoIgCgygkRBCBABETRwoG6aREChkyMXc8CSICQBSWPiTiGMdxbvZYoffCdx38e//wf/ONff+NL59s37v7O73374XvnH37y0e996w8ywOPdh3/1yd/8yV/9yY8/+kne4Lgauw28/Y3PX16ML362w5uOASAQQCRg3XxHyVQnFEIY0kgETz/59PryKp530skBRqhmLQIKsxAikBmEZeFAYpkNRnem9mQY37IBN6aF3xuM/JrVrxvLLFJXSa42jEGGudXqSXoaxPGLE9YzM9JuMJjtpUsT69VXw90N7TV3Qt1pvKLv4fHcNy2EqyXzWPXTxPl+75nF+MjWDqe9fNrYvJ1jm+VyLkdxaE81c7GfGqYzmGG+rfpZowtULgfx99s5hA14JrKaMdOYGhzaBnZ07TxRmXwI9ZBDWsR7LbWmwYapGiq4zK7zdAhOfnpJ6HVlO1TARLFeOc/69LBzafvFshsMYI+0ZiEM87dhBgCkAmOr2XD3bddRFm640v/qv4RZZdHxG45efu63vauRJIshXjWjcRw/+OCD3/j2t998880f/ehHqmo0FAtztB8lbH/zq2d0G5M2jonZDBB9nNMIQx/RcwLu3bt39+7d73//+zc3N4gIAkQEdd8PMbhUZzUsxPDm2V+cjXf0src7cW1r4RFSSdE9ZfNtxIh9b+t4lCDtmxCCNucAgBCiCZNxHEufCOpYE/DGkSiaxQKgiJn179VZqAKtf5rCjXOdJ7gODmrDENEw7i9fvFytVnfv3jW13hogd113enp6fn5+586d09PT9XptnqYiYVKWRWsrxqmbKNd6NoXDNgice2PF7fW2J4oIwCRX1Z5RyqldiFtxsd/vLVqlOIFqgRvmLd0UEUMAqUfDS+1gbLAZHoom7Dq62SrLMQXYxLu+1wwbN5fJsWVaNFQ9kF1Ep+6YM6ZeUrjHvzB4gd9ISPvTeFBkZkBmd+jXZrORWshnjbihNtNOKe12O21ENAzDOI77wygiWoaqw+px9jGmzWZzfX39/PkLEfROH3GdbwBIQ1a2HP6IzhCCBqQNOTY70yJgLoVsqzXyM0nIzICCiHfu3b13797V1VUIYcwJADT6N0VBERLXgxPdlUW6Gi7KWZiZquNAiWekGFHdFiXMm9LIWaD2QgcAxKGwM4ZKacXiqJWFVTtxVBtUKGTXY4NdzrH92RhXXlZC0Z5nGxvWnV7LbRfbBuU8puoHwWrUKc+knJU4PDlOTWnqd1bBQgTG8Aa5tr8Ht0PkepncMUFmqpjNzojDbEIjFJ1CxIA424ChKr567i3iRH8AwIUT9JGigpi0QlBfxTSgobFhzhCCFgDaDfqirBkFRAiAXJwNJDDs9lzjtyEQImY9lDgGEhAWFtGQIhBGgiyAuvlr8BCRERCQKOY8ch4RMqAgYGQmiTGG3fXu/un9j3720Z//u+F3z3/9tA/dNvzaN3+9h9VLvh7Gcb8f/sW/+BfP0nPsMVNanXdXV5d3Hr7+rd/51neuf/Ti5zchrhiFSISCSB5zDgIAFCmIIIXAPBLBy5dXh5vD5u6m71YD7wCLpac2GIggImPJ2uXiTxQREHd0ux07Qa6HuMe5kTS5WkGoe3w9N3I6AsikcCFmsli038Ura7guz8slbjYhz2t23fasJ91Gv/9lLr/xuKFm1Oi5uInwGDxejPhduQEVnWLRjO/hIZdViIt9UW7Z+KXqZOYC9EDqv5aBg85uEXf8riHc1tdTgp1A5RLDJrnh8WnL7RfRA6Ovi3FyEPhtG+o+7THg/a+2pXmBI05/tXEaxYhqbj86g1CcVmqYN4pSuaqtKRQD1qbCXOx6BXeEiY3j32tOXHuK62lpDQHoxPOiBZTRW0OiZaWEazqGjwPcat0d/Qn/PpH2ZrRj3NSyEtQmB8t3Lf9sRn4F/DjX0o5P8BbbhojUefnixYu+7x88eFBi7POMsiMD3gLPrQDcDv/Rb4yqodCGiAjDrLga3ZadUuq7eHJy0ve9lkR2XQeA6ldTaozUOXVoxpLmpPBk8wpHW3BNsAyeCVFT/7xprb3INXa7TXQb5uerj/YWZRYN/UFt8if1FDGVVDGExFnDXH0/xb7Aca7HNtc+NF74VC4GcCYWM2coskuF0jgcLi4uzs7OTk5OLi8vEUVtKrUB1uu1nk+opYOavyru6BceE9e+UyaRAEFbdGTXq5kQk7Sn73iJ3biloGzuR0iLXDhInJqueJCZzklkwcB6KqN1IkVErmm0OgsVhloyYFYuls06mAD0+46Oo0dK2grqG22VDQyYjkeipd4CAAGDX0db3xgnp4CJXHDnZDb7iEHuY3oiUjpt1ourB8HO7zV9wD+oPxmhioge9DWO4263u76+3u/36ke42R0A4HA4gGTz+IzjeOfOmohubm4uLy9PT8/VTs4567ykljYgBlX59Vl/iibPvajGTc2//lcREUlFu1CqIz0oCxAh50wBRGS73Z6d3iH6IMaYxpIsU/mRMVAIgbUXIhAicq01YxbhJKKZqFxIv2CVAIQ5IQWpiQAiEiMwCPAU9UmpkBODJU4XV2/XdTFnibFnHnLOq1XPnERkvdp6FOQ8pjRQPX8Gar8Ekxo6H1OsjTGISJIQEmROYxKRiCQgeTz0kZgZOHHivutCIAQWkB1nY4AQJ5O16zoZBpMORkkpDwICJMKgyoGKQBHJDHo2PVKMmhwLoGZmkQLFHUcUooiEGCmElCFlZY9IgVgGRQXn2s+diAiLh4MQA+WchVm1GaKAdavwFJNzDlBsNgCINUUQO2Hm0flIEAkFEILWc8h00JZExBRDdL46G1xri8ESAxC8uc6sZqaAdmgijDFSVci0oA4RIQCOo5R9UQAJQwSiEUBzqQkgAuaceRg6ohDCwESwYlwhcAbOmAZkkF3OQ3cSxnRz88nuo/H5j/jj3/oHv/m9Fy8//fX/4f6D1z+9uvzk6sXz4ebnp8N+oDvnr8fDgfaZeEw3N6v73Rf/i0d//Z0fvv+3H771+HPb9WrMhzFdb7YxhDDsx5wjQz+OtDp/LdzwzcUO9+EunOyvry7Px5ACZYIxE9AKVh0xcjjbnmAHQtyv43CdIgXI3GMUOGQGxND1KsJy5iwgRJq2rlp+UCcKIoagfDVp8wAIgOOYquxAp4qQ34oyC+OUvJeTKtkAgIlL5VUWQaKpL7itIEjmLLVfvyAIABKOQzHAvL6i38Sog+jBazKOI3MKUUBm5WFSu+yKtPIdAFIaTMoTBXWVg8vctp2eiDQFnbNYTnygIn8Jq2sDAiGVXh0sGGbBENv5xNl7XiMxgPVzYwHCXIMRkRjbo2W08t8iWvqI+kQQMaUp5AvORhKnqEHVwEIIOSdw+oSdUGobnvkIFFGxDwwizIKAgSbNVSRQsPDamBIRxa6TnFUbYWbmKXu/itkQgo5cT1ut7dFQjUAfNapo4nrUBM21VawKaCNmTVNhZnV8GA3Ytq0btu2gFkg0Mei2TGk+S7XWUkoAJrJmxxgiovKgEUYtYUKvihme7angDuJDRJKApdVoKcIAJEESYQC2tZj+7UIGFoQACKSLlQGJSB8B68GuJ3SF2tHb5m4oMjAMGACoYSFB11sbAMbxsKR8A8phdbJ7F5adPjjlwPsB/dL4V3RgSvHshJVxHLtVv9ls3v35O8P++uHr9zYdHg6HGNciCCK5ejMBAAgTZ/XVApQjwjWNiXEK6XtshPkxEjYZv5T2kz215HpGqutRq08RAViEN/1qTENGiTG+8cYb/Xr9zrvvDmPOSCFAPYUcukjpsO+6Vdl8C1EEohgCMicvOaGKI5rnWtuVgNTteBjGDTMRjcMBQXNYJAJJIAESQS1djCUNx97AlZyUltA834hAhNYW3yMNRJAgp7GL6zGNCHG96gglCpJoHz45HA6r1aqLq8N+XK87AuBMOaGoJZAz54AYzdOFOAWvqoQEgLLF5CxqjVDA/X7f9TFEGsdRi5cuL68Qgwi+ePFCIIcQLi9fjONhvemvDztE6kPsVrFfdZuT9en5ydn5SampztyH2t8hMwJ0IIIQKWYqTj1NjEIWyEwCeoo1IgJhwACCFtUwUle5kZImswQiqzMnPX2k0pUgAnPKeQQAZl3qbF3rQY+bKwRZ9hoqcYWCqpwT83S0wH6cDifMkClQkhRpnZizCKvvPgQgSuXc75XmX0ntrqeNMYeciCiu+p4I0VutIefMCBSK2Om6brPZ6JGzkYKdEJtS4uI4gBDULk1E1PdTRqtN0/bclLLaeCp+ba/su7XA5Au2DSJLyuOgt0WKqqmmxESRiJBFkkSMXV/qP7sONSqbc1LizzmN47jf78fxcDgc9HRHjRDmnBNfD8MIQkRRADPGIcfV5gzC9pC6q6vLlLFfbcZxBDh0XQcDA8cY+u16lUYRwRBXXezGceTMXdf1fUz5kPkAwDe7cRU3NVBsSaBT0KiqAVPua5BMLAEDUGnjmRkOA/en2/2YNnGVE5xszl5//QF8j7p1z8TDUM4VXFWr1SQbkmgVnQBgDERdznr+oRAFER5zJqJ+tTay5CgEJLnARmNEDAAgg6j+EEIkojzl+OgS4zik4TDGygmq3IyIqIxtB/UCAGJAnDli2fmqTSKDVz6qhBo5B22aVOv5BYARiBCQWAQJMwjArIG+yUIfcsnHOsCaMxvmG14jnd0uYpOaqb/mOfMA5JxrgLONhvM8am9P8YTo2WV2Mjh3Ws5Zg8XknFUAgIsui3W3A6QjugVUVczr0Aa2LZDN0QjOfmoAg7mugC6G3GBjGIYASICoRXnFbRzU0OxWq/M+XO+vv/e97/Xrbr1ef/xXH+WAO8iHyJfpgOtw99751cuXPBzunJz2Idxc7Qnh9ddf/9VfTVl4/Oh6PMh6Q12/vt7ddCH0YcuIJAiS83AICIcxvXj24uEX7gfqJbOIazWNFTlJ3ALVpQeBRXGIn6B9WC5pQ/A+suTxQ+4Mz+XjZs9MS+Z0ZY9zu62hT1qoVg1j+mdDCLmYoDOirWAcxcPx/hyefmShBgHMNgZx9ueCOI/UjHmibfG+uBru8/M1Lc3w0NwJjkH0BnUlGvINjCUk9hZx1ZX2Uk8P9iURAUozyPKzOBrQb0w+LPHpZ83zZjn+gxapmyfbvvfiwksDu9kniWBN6fRC2CjBTGiDvNGSxV3LFa9roTAjwAxRyyUoCFksh02hYRYD8pchquWLsCb4IQQQz3SNwTaLgNnEG/ZZvqL5dcn1Rymw+WzL5EY48pQR5PJiZsaWeaFmCaaULi4uLi8vHzx4sF6vNTtLTUdtmaYOCGH1QLfvNpJdEsBtmLntMoMWGwE4tcWxKBwiloAYIsYYN5vNgwcPDoeDZm2p34rBXD9VHhbQfqmQbMNubt2nLDLj05wzButwqKpPgbsxXfwcGz6FOQH4S0e2PV37c5j7XkQyzFo6aUaYQOk3CK7HhsDsZDKPf7VDDKSintVQlpXSYfmpFlSL9sYcRCTzyAQBBULsuu7k5OTs7Gy73Wq5I9dIoJ/pitTbJVhordgPwfpZmHsui4jEMDXngJmwOn6cmMwvw9hy9fWnuZ48XVwziQwwS52FWvhn35iAVTMYq25gebBLkvOtYtB1wtTuIIoQkmJdWMaKmbI+vqqmnRGYQmjOuIbCEclKzjzGcs7aJcEr0szspaOXUTpxTVjT2JRCiIgApVOeAjwMwzAMV1dXKQ2WKerLNY0gAUqQue9XzHm/32tSdO1Jk5UL9DwVtWQZp+M3FJnMnEsMber2r0ZvMCt/XhPhRYTtL/aUfqPxTCLoKBDidrvdbDZ2CLCnT48rdbuDS2P0Hn9j8Ib2PD2b63zBvMfz/COUXnBERCkNitAQwliLVusr0WQHV3c4uNAEuO2fa6acQuVlWcNysBBqjYjPLk/d/2rMbJRnA7Lz0zc87GXaktbtZ/Ppigi5NxqulYeXr2BmuKUIw6+WF/HiyMjLrOUIKkyR2q56Ul3y/hsdzWhXan8zc+2YpDPpg4uZNsjxf06UlxKTehARSaDEJxFC2O2uGXhz0occnz/ff+97P0gp/dZ/+eWRcg4Sukh0GMb9fgTGkYNc73YRY7fqgWJO8trDu98++8b7P333gw8+YIhnmzPe02Gg1WYVhAEpYc5p13UMIzx58uxX81f0lFKCDoRQiIWlaCklaFCIE2oHl6DtmNBaUxQ1VOhV53EdU08t5djQZawOC6o20l1itRnEiMcGpCaeRrN7wPGXvcLINcbIYzLasilUXeGobTyJGDymthqxeQpfYswmaB/8r7CQa/7mo+TXTMHzpgcJFgTcgG1/FoEYO4+0Zq39gM2v9kaq7af91IzLBNmP04jBo4xv95v7ANyhOIYWA2M5DlZDzpOZxdOalTIK9CSKrnEoHuvs6j/kekJ9jDGl9oQGI07/vQFf+yIfF4C34cffg3Wf8uzQWK0wpyhP2w2dMCOFEECfYnjFGpHo++EWD8KSfZrvXyF1bUWaG8Ctqb+OvsivES2yoaRaQDDHMwtirUhh5ufPn3/44YcPHz7cbDYXFxciAvMH3OeWxwGK2fMKOfBLXs2sJ1bC6QbFGIsglvlqcGOz2Tx8+PCjjz56//33Y4zoiooNCtaNAQ35R3jql4FZzKggFS+lawOUZ6WagpMK0cgT/+dRDNyCwFJbDgBqECJitgK8+VZCRKvVakxsupO+jpnnhzhMnOLB4xo7ExHk6ThWnS8wp5QsBYMzW4XOOEroAsZiDd69e/f8/Hy72XRdp/LTKoptjnocApc/WQSn45EBuBoSIqI9/AinRHeYZTxOCSDzZZ0lxjdzN7mXXVsKWx1vM+R5LT3XwwCkamLiPGgW6vBnvGHpboAz6Ob04A3CBhgAyDC5NdXGTjWXx76X2qoUp74GZYKWSGlboWKdmUs9U5GQtf1BAKtQABPXVT9h5nKwp9eCuJTt2bl/KWlRWFmsw+Fwc3OjvptxLFFBqbGinDPo64RyxkCkacZQE0dzzqen2xjjmDMgY5wiIgCipxyClGQ6q1ps9sRjCJ8akSy4tT41V+fSyCw8juMYx/Wq0yrZjz/5KOLU8sdqvrCGqm00g8o6YqJzevKiISLOd2QvLRtZIU74i0gkomEYEIWolNitViv/ZMUOuzm3QSeeu9JnAhpmxZf24mVExcPn4Tbo7R6/GOgatRl2bsszUTZHJESCqbGndo7Jk0sQUSsFmGft3Q0h1nvdT7b8eiy1TBfSI2ECz21gE5wu928mDGCmwOFcNHtNDp1+0/zaYNUjFqptIFVjaFCNTjHVQUoRMyCKQMlpEQDIklF7Hg7U9/0JwcuLy7/4j38TX0vf+M1vnJ32N3hzdnLy/HCx21+enG33N4f9bugEaLXhLLvxBgjuPzg/+4df2P+Hi08+vCDpaLOlfeAcAkZgWYVxx2NEhhGunl9ijlkQI5I1fWVgYQYGgZGzFhCOnFcYAbRhD8/xGXQTRZi5M/RHv0zLyxyu4jLuRETbKDt2mOEcF4qRJx7D9pIYyIUWYdGTE53IgHlzy0rzR7RJ7ytpbtCLF97rhjaO8rK/vJBqHm+G9RPxfzY4lLmWvMSkz6lb/tqwKlTPqDgnl7lRPGIbH9tyjs2fk/8lAMxX3xNGw5u6r4hLjbOfTNuwcY66Emym5sn2U4bqq/YoWhqEfnBLtfVblOq4dr8JHCJCbFF0dL3cl8ePLZHFqc3LBYUFOS2xsczuaxjTvxQAUCQUQhKpYQmoQaTqM5kAED6y9S6pdPllIwqWe7zn9wZUWyBPzFUj74/OLudjqYbOYNM/ueqjzEwh9iEcDod33333y1/+8tnZ2QcffIABoTRude9o5qrlILfM3SNqefkZeRT55ZuRUBVw86lJcWQjIuLZ2dnDhw9//OMfPn/+HEMUnhoROGKQ+aS4HhF0K6hLPJskIZdK3ff9OI5mDorM6Me/Upwu0eDBaVzKjxnmF7vUErP9EDGDJGGo2ddT/I0mGqsWi1LRwiKsDimNEM6IR8S2TjTlsEQIZgEfROj7vusDRVqv13funJs1aHVuzAm044arSdvz7IQzhqJh1nhmy1Nmbh1lQK/9V1RPRwv4+ZpSBDVcU5mOTNz5CJJ+kNqKwiJydmd2fQpgHhoyg1BEdH0BwAtSnanBCYDmq9Ls1mmyudCJHY0YQojkjlWoQRdPYOK0So8EIkpJz+5GIvTrIiLCJCQeUaIENMlSEMG6pxUq8ofbQY24ighzHoZBKwYPh8NutxvHMaXBw6ZXCEEYmWugW2S3u2Hmco76ai2EIhxD6RaDgBlUM9BT61gYU0onp5uu66qlExWl1hDITxbccU2e2EIIGpiGuQhi5hhDDDHU08VOT0/v3bv3+PFj6ilMu+1k2sQYM7NItjCykZbf6agemeO9w1h1QnDlzehMAL/cHlQN7BTaIKKu67Qn0pgOGmY1R4I1Xwru3Cqqlw4XwqwKtpLaq5iwgckor+FedMGW7MrtjPf8XthsMOSyuWSxD03EeotG7gVrA89ybxaRgLMt3N8PLhPDjIfguvZ5SjoKj7Dk+VoaWZj+6oExrUJvMxHg8WAvNZj9UIY0D4afcugQuCbySwZAJAEM6XDoupXWofCYCTBlGMbxj//5X5+evPb5X/vcuOKEhxCECfbDfuTUn2yidLthhMzdOgjyIV0+/NrdL8nnbv5iePbeyxO4d7o+hSESAPMhBgrCWsR1fbk73OQcIZxQQIpIxCQkwJBz5swxT4q+4gwBEWOGsc5mdiBe3UiwomVGqMvLSNEzG87byi+XVY5peA2lGXsbTzVUQfNQD1bzABap3VUiTA1X5hRypMGpUYvXTRs4bTSPQCOn5s/m39i1bV091/h3+QH9PQ2XNYAdXa8l2u0ahsH2AKjF0s0gyzEbJmoWyE/f57c3vx4dGatOY7g1pYTnSQriLE//Cv1Xk5GMEkxPsswCL388vxtsqgZ5D6Wfgvc6m2TmhbBqqHS5CkthC8688TQpNdrvQTV56G/zqDBlyy+TGfxHLiGEQBgyZ5GirKhVCFiVNJzGzzn5wT0+G9KtPx2Zvn1uVa5fInej+ek2+j+Kdv1xuq3mbgEEkISIIJJz/vCjT1ar1RtvvPHd734vdr/gZMejE4dX8uBtlx/Bc7oXUI7g0P2n3JZSXq1Wjx49unfv3rNnz0IIgigTuqyRbLBH/L84j6h7wIzkmtn5f43jRARNnBoR6Z2LVN5XI6q+kSwhV5mBmUOIWBvN651919VNSjxtGFM0fNRQlJ+d+Xq4VszaDqgqrOYBasMYRFRr5HA4XF1fHXb77Xa9Xq8329Vqs1Ll+Pz8fLPy1iAzMwhrA323i01Cj4vRHpAIlH9pwjZBYJxSzDwP2nQa0YqIzKYPtEg2xdWXz/nbuKbOKdpt8/U5WUjBiKERpza+VEXXZ7qZpq2Qhxi8+i0y+eg9qAAQAAFADSpmjjHmEIClHi6AppFqdSJMagOoBWDWr6lDAFD9BbOWZiBoMFf6Ufin5QPn4+5D9AE3Iyed++Gwt4aitYkRgOvWrm9EDIIoQaw7y263A4DVarVer4loHMe+j13XCeOYOATtZ6A8gESkvRstsk1EevxmKXR0yZ9GD0aoxhfFBTAyAIBMJonekHPuYyxNFpg3m81rr71WVv8WBhcRS3/3O3Ij+sSytRfmos7Sj2CP+D9N4UfEaHMDgL7v+1VkST5XtTJYEHceuq0czPewZovCqn36CRh9+6eyr6R0sB7B0Xw+PvvR2NW7xDww5LJ1/SC4qK+zAQWy3W9eHz9rObbPNfs6Oh0F5pvEJN38TjN3Q04LyaznXTYLDy7XGauir1eox9R4FDUQghP3eWoQMkuGhsVVFjQnEYTMLIiCWA882YbtMAyJRwDIzFkyUFj33fV4+Os//0GK/KXfeHtPQxr2sKbr/a6L630aSSRSCCFkHphTCHxJzx99/cGLYffy8qe7Z7s1ncZAkBgYESBCGBMQwvXLm4tnL85ev9dRB4LAJd5FRMhVqQlB54GBUFhYCDGETtx+j4iWUGocZwsL1YHricqvpq2vSRDm2fd2Nbnd014751u7vEMBnF8GcSIkW0RPIQ2tEpFmxaAz4eo9t5olhh/703hExG8hYn4p/2xDbwakzNWm5r2v+NL/2nwwPHjEeoS0CJ/bGLZ8Zrk5p1ib4r8EbwmtZ39EzO4g5kYONCzvV60RjP6nZq3NSiSiBgN+WPvJK4L2DQBoDo//ycpRlLBD7RcqItpBxyPQ4WoWxlx+bpbjlmvWRLcsFSMIsUwejVcsBMwpx3DuBSY4ep7uBNR2IiDCOQCULtAgBFjiZyigSZVwbBYNYM21pEa/xM16HX3c5DbMK1j0anYrA8mz84wHZ1kAjdzA4TB0MTx58iTn/LkvfLHkg4gICAowuvI9QgAAQtAW2QjaNeXoor8aRUcvLyTtcdJz6so33iAsTDiMo3aUSSm9++67IkKBsgjMgpxtpN22AIDjnW9gvoIevBo0IhEZhmGz2QAAM2vqYykfrHsKLrZaT/AN1y9QohJm0q8QETF0HW02GwWg6zoQTT7SHmPFilMyNoVSpc2Q0pCSDwAsZ1cY0Xm0Y1dq84jIOgIaiLn0FElEpPr6/Xv3zs7Ozs+1D6SkNEJtYU1Smpb6QyBNZOXany8EQJisa0SUEv/MIsI16uIVMJ0N18CdV4o0wgYLmiTXZVofMZHumdQirnoerGmVXFNG+/VGqsHjdxN0BqFxrnnwZxwqwszrbkUuKckWyEpANee2AMdyOBxy6QqTND6ZR22kGeyNXCtIRUTdFiYYPR7c3qEsw4W09MsMIrkiM0dtDuQudFcMkYjUZFZotfv9OI56xuB+vx/HIadEAggYQgQAO6kbEVlNL3VyVCSreCMijEFH00BXzlnXHZGFQWcmEkOAENU6KMFSgSmwj0V4kCdmv5E1shQhoOutausSu8mn3/fre/fubTabNAxwLPcqpcQiFIphkms9vxfsfk9kFyRs5IMHvhEyfoNAdR6lPMSOcs5I0vUBEStOy8PmQjZ6pXn/ElNVjee90ejBgnmAsZGhZfw56RvxodNfvbOkYSGZt4fxs4Xiwc1+zGYZ0NmE9i5cqM4NeM2znvoNgDw/TseWpOF5cDLL4618c/TL+VP+QtegX29bGrQevR7zfo6NtuofLPiHghQE0ez8vu8HPZaHEWMgJADmjGs6//mPPjzAfnu/u//FU2JKzH3fA9JufwDJ59tTATikASX16+7i5sXp+u5nv/LZm+v83l9/fPP8MoigxEA0jkBdF1HWIe0u9y+eXnzmzc/eUMcJeGBgpoAhhj72GDAcghYulzliYBaSmR0FAFo9WPE2Q2+1MFtXt9GDoctTCLlSfv/I0VUDAJx32zNObJbMkK81hA0MMBdV6DTmEALiFKSym5kZFm08AACR/J92vYIkmjtNhIGjrub+BjmN2DUsLfX+5as9Mj0GlkPBgn0auW9bQnbnQ3qRgojsUpiWKII5OzOzFv2rjJlLlFDx79EVYKpunck9S/X0s7M/vZvJizubnZc/Xob7CfoZ4TzsbJ9hLoQb9VF/9zhZEoC/vyGM+qH1woCQ1pgo/MGxjOG5oU/73LhR4RddBBIJIwWmWmBjWBWEmmHuqaJ5qZeoS3o7ykRQiecV7O8vL671G6MKPU9vOcLSx+dhQ9R2D9OvLNjHOAJQiB9//PHHH3/81ltvnZ7fudmPkyUFk89MPB3rEiOATDAspehts4M5JmGSV0f8cWrbHcVq13VjGlar1cOHD1+8ePHzn/98GIZ1KE2kKoN7wThzqRBRs1yvWL4GeMQSXFUwfMF/HV//L1RjXA2izHHjlxgAGnigZg9i9TluNpvNZlO1YakGv1ghX21vA7X9CYzc6iTzV0CFZ9pc9H6up8YZ9/V9LznrKerkDka3+1NK2ppIqqWnrdqp9PdWk9LFCVH0uKgpUy4QTY6wkpFoooxl6vprcqa4auvl9whfPraYMmi1ESKozKsLO63B9F5mX29mL0rVIZjnbV2o9nOGhVvHr7hHnWHSG+Saum8Gp46Tc6Ya/dY1mrJegTTTEuc2xjJjwgY32qi68fRTgZanhOTgzAdxkSgiUvPNStP1jfv9XvOKtZtoPep8FqLIOWceYZIwpan+fn948eLFMO77vtfj4CvqtE16rPs4ppSk5I+X7Nntdqt2MqeBiDgnTtz303khxpLz3W1mpYsIMSJOW3/O6jQUBOAMmbLEIAJd152dna1Wq+SOb0GaqVXKp55flJZ8mR7O7S/7fmJSOW7KsqsX9dOJOWdrbRTq+XvguuT5ZQAAxCla2hTvLaWVt7A9WhFRo7p+ryoTm5tGjX9Cn/XRS3NU2/Q0V8FcNQ0iJky5whtYNAUxkGI3tSEyeHieiedfIW49PKKHYfANgiZOztP0pzFdZM/vDfaNx4nJOPvTf0P1TBWshweYYe/HtM+hnkmN1UfV6H/N26MaDIGABbSpNWcAur6+DiFs1ieHcZ9ZQiBBSilxovOzey8+ufruX37/t+7/2p3Xzl+kF5llxHG1PRGm68M+Aq9WMQINaZ9W+Pzw8u7913/lm1+4/PT68dOPiTPhaey2vB9Ct+kwdphurnf7q8vz7foJEGfOKYFgoNiFQIEwEuUQurL5mewTqVpRbUwKk4e1FFE4KcAAiNh5tNsCaTmyUZ0tvWaNy9xzAYtUvYmN5+2FwKkCVrFd5enMv9i8V9wW6KnLpAPOowQi4nX3iaR9wqT6khEAAQOBeigaJ8XUFR2PjObkgH5jkt1u8L8uuawhP7tkaVojIiJz+6DnDn9zfePMG3Xbew2lWbLUJBbwkEPVjFm4nIsmzLzpi4fYQ2LLscSYTcQ7+WRhANj00V1GP1LdZHZzgyiPbagKgb3CC8biXa5JU16gkTuG0X5lzlLMAD9HcTZe+V+FZ5Y5ouq9IUFEDy+Y9hRzSjYI8X96xPr5suuv0NCS3ROJuhAoYMiQHCXUR6T832Jj2lIbYCpnwulncR8AykkMDdjGsOJUPSOAZi52NYnNzeejzOjvnACAyRr0g6SU+rOTvNn2MTx9+vTx48f3798/Pz/fD8+FMQAyCDlTkE2YuDJSIWiGbdbrl7+MdL1lWwhaZjfoT+M49iEi4nq9vn///jiO2mIUZrv5rTn8S8EFC8QevSz0oWCFekb8YRxESj8ZjXIrIlReHWXJBmNQ9L8lDKRBG2tsvlptiGLOKaUkrCcMlSPs9Gxkj8wMLHnqhMl8fIJSO/QYoymhjuOovRw1QtX3Pae03+/9cqgdeDgcBLKkdBkjImrEUquWQoir1Uo/a6dN4/FY5DYz5yzCLOhK1xDbomXjIHB2DhGFMOsZYzJTaOmwEABtRiCEiIQAxAkQIYNkdzSOIUG1dm3WYnRC9dg2qZq9VFMfnCXZwGMU3qyytdvheZwzq5HsCE/Xuus6zSnWO3OtG8xZAhFCQAggalELIh32I7htDgCYMzOrPgwuvUJluyaJTL45IRHJSXKcmvlZ9gFM+whr2mSuB6cpbBohtCC2DuvLN5hZ48YBGBBFcBzT1dXV5YuLw+Gw3ZxuNltGPXgwhtBRDBhIOKtZmHMGsbPahVn6vicCSVkjqFVnDjZTqkUQdnkMmFEHPJXlMwtAOUdgVU1uRY7KAXJhLSIid2Z713UpZ83WNJJQpPmyf1sgXmT6FKJKYqbEUc3f1AD9N3Zdl1LSXszgmshJDW5YArE+YHnhWh6t7wj1BEyjFSdi0ESGQlyyh/veeMNgFZFYQ8/gonP6XrP9zJ2AiKvVyojJsNP0fDcxJC4Urpc5Udh5dOx7cuEjH533+5ChqHw5D1QaTqyDsC1JHX9q1uqNWEOLNcsOISCS0CR3bF31jepOUw+QMZJhxrMT1Yw4T0C259mGIbUVr97g/QX6LhEZ8yCCAbU0AscxoQARUqXaGHtIg/ZWiRQPmfq4vXz5/Pt/9f7dh9sv/+bnISJ2SlcHwRgQxpxxyJmE03iI1K/6m3xz7+G9r/3G5w8vLg8fHK4H4Qz9drvb7ajrIQc4wPNPPrm+eEqPSHLarE/GQxJmhLi/uey61Wm/LWWxYNkyYci5F/ITrzTPRJONp86Cvu+7rjscDkaWRrTeWm62cH8+ods+IWc7qH3W0HIchtIcWa3Puh16lrbVx+rLsGX1N5gQUTrEqlPqoTK2NYqIeivABTqc0JgUkWaj8t9LNRsIZ74oI2yqXlmTJ0UaOkds4+uShRUtxY0+qS+y8EwRkaZpKVTaXs+vVwN/s17NAXrWPsoMpObq4jR+s53L5JJGACAkCsEEly2x8rgdRG7IKQ8788CWTL08DWay6yBn/K4320vRhVYUIXquYK49eI2QrPW0iBwOB6n2XuOehKphrNdrcOqXbXWWesq1ibyJU78czv4xJ12bydl1HWG0QWwcp5cUgmSnedhk/TI1rGo8QrXNt8GW8+Hs7AERfXq4YU4AJAC6aQIAAAEyCohmpiFTWNk6SlXIPHhGafpvUwIgtpEfs/D9CP4t4LaV5i1qYNtmYcLEzsm0lSrUmFnmfX3sQT0MehU3Oef3Pnj8K7/yK+v1GjJT6FJK6+3m+fPnq+0mxjiOI1d7uExhWoi28ZW/zUOin4Or8vALanVxzWgsJULoGVwZUIn8zp07b7/99o9//OPLy8uu65hB1T7FX1cz3BCC1Mc9nZs3fElyS6QpPtfr9eFwiEgr6lNKUQMRykFASNTFDoCycM4Zj7ntYV4RQC5bIdTz0AyHIgKCmdPJyclut+u6brvd5pxDiERxvVnvdruU0nqzoRDGcVyv11QPs4ZAFkrSSQ1DskO6rRVKfW+2zpmG8MxZWcPS8zRa1fdd13XjGLuuS12Xc97tdkhrrOZQrpVs2nMSANbrzcnJyWq1UlFWI6tia2Fbqr49g6DL80+Jc84UOqqnftuDMUbmqfcb1rQ4Ikoy6w7qGYdqz3au0ZWcM+SJXI0SVI55tmXmlDIAUBdEhJCQgAICSuaUGVarlbCEWJT7nLOwEJEOR0QBSYDHNA7jICLCQRtp2kuLHHD7ozuTBw/7QUSoTCRoC8kQQoDoI5k+v89mZP+adi01R1S1I4VQVSMdzZ1vISFoWLXEqhSx2rxN44caEtRc4uGwSymlcUSBLkTt9gQCvR64qFIaMNS+BywpjyVn9cmnz25ubrbbbQwxBNQD9/QtwjgcUkoZurTdbne7/TiO29WWGTSR++RkI6lMJGetgMJxHGPsVKdglnEcsh2wHIKK0NVqhUiHwwEA+37V4+TIAz36RYCIdrvdycnJMIwdIQFSgO329Ozs7OXFhZLW4XBAiqphli67rqed8YiF+qn2qjVhGEoXw6KsKmJDPTdY3LkaXrags2VyzhGAEYEInC2j0q2e/F4NA6VsbzEa6TTvmFESoPpMRIRFjP/VFa8QTR+ObWwmH+0Cv4Et3D+KrM1mY5uKYaShb9tFoEYMvE1bN8tZEyrbPrNrDkHOAE65TSHzmodNx2Azw4yqIwQRCdCzerATWjITRYPWSyLvJTIaanDobQn/r4eqUU1mS+nkPlQ1K2DUM+s1562ggiRATQ5hCKHTWm0RWXf97uXN6fZkgOsf/uXfrc5Wb3/t7ZvLi7EfBuDYAWpiJwIhhLjJacB1t1mtn3zy0b3PnP1nf/jrf/Lf/xknzImfPn9+enL3+uaK1l1cw09/8PP9P3yJDwFGEeSAOE6nqQ4SJkWc1C8fgqDo+ZqO7IuFJnMtxBhBqjbQEPxtf/oN21OCrbv9WgwqmTUL5bofTJv9/CKnQCztUn081xbSobaGM7PBdlO45VrSCTgCvu1+cCqdffA/2eMGs3G30XAzFMz53QOw5Dh71rqoe+mBx5pD2M7diEs/F1gKEDcX/yzOTUTDA/GkIthCeAGFzkBVdDRA2iXOxDWQzM4UZ077NHWf7KC6UbMithxmjIGjqOxqX42EfMcamNNkE0g0ESfHkg7sRkOAX3fOADA7R9HPXeZ2lyfRhvaaD/5xqllqJkthPPQdYaCIcJAsICyYUtLACdUj5Arwtx1WswjsTP/K7IaG0Rogl/8u59VcPG/vaSQBjuNm6MKp5hyxVEkiMBFJbec4DsOzZ8+6rnvrrbd++pOf9xgRUXIpYvGvmIBHgBkgSzjZ+4PsczhWvn4UP68QYvYrEZ2dnXVd9+TJE9XnGqgKHlSRu6WS35tAS+EGc/nTDC5VgTuMg587TK6x0oRjAT84jkBEENEkf9Lc6eZFhFG5Q12K6s0sviFCCkFcTmDR9ISJSy8TkdIoTts22t7nBay/wDnUqJbbce24a154fV3OeRi0yG01pkOFedKIpPg4JKW0Xq/X6/VqtQJXe+lEZbWNaZLeIpJzCRvEDolIXcA+YZVd91HbVpZCw2Dm2uzEPuuDaZhqDj0xGBl76Qdzse9vG4bBJmW3QbW4oDKIRh1FhLOoBWViVkEKnctGRiSYKr9U5PZxapQSQsjDpOR7D5HtjyIzrxxixxlEgMPkRoRj4lSvw26wBvuEIlXsj+MBERnKnjIMe52OJuJqHa8PhRER1loec8oAAGEc+bDfH66vr1+8eJFS6vt1jHEYhtBjCCHEqC4GqsGkcSwZcwzCPB3ACKiFzmU1A1Ko1p15Ewwe2we9vAInOZuNxs69FEHl9L7v79177fEHH8Bc4illEmkt0KzVU1g0pxWXxMfOj6NLw8yBZoLLeIfnPtOJfxUUK/+VSSED5un8SoNY286aQUgukGqImEkKnBbV+M1n1nmuA7djHd3ODcVmgyV3MKi9woimuQAg8xTirwqICrLgZQQgIKGuh9yiE8tCLTaMG0LE6T1+LrZy1mZqtsa+q019r1TaNPw026EZDy7SNfP3mHChRS2WJ1/P4TJJXvHfT3QPAXHWqr6Mo+fPcJF9AYpnousx79MmbiGnZz85/PTOu/fuP1ifbmBMElIAJBRAkpSHlDsKfThDoZubfbfthMezN7bf/v1f+49//N3LFy+3d++NKWOg1Xr94gXwHi4+edZ/uU+JGTlgBM7MTAE417JlUi+aR2DxH4smfMsMJ3Z5lrPl8KiThQYgNfjQoA6c7b18l3qALKTmVxDn13LM5jZ2Gae2B2iTpEZg4TEDyQ+7ZMCj8/XfN+CxC835e3A+gg2yfCMUIp9+MvljFkiD1dsw4+e7vN8o2ZN6I4XtdexiBR7shuvdrCe0m9d5Ceo02pzp7CIXDbb1bYZqmB0XokzmwTRbnUlauip2dKkT/lk7+bexBPxqeuQY8OBqCtzEsaGBSqzBJCo2ImguqZpvGlry7/JvMXhCCHYaGADEMJ6frhkodgH2rI9lIK0bzGq8CiBqR87AC76AY8JhUiCkhVPmRnUzwmw7uMW1N7+O2L1G3p6SDSf2rwBByZdDwomMhzF98MEHzPzFL37xX/7xv+bAWsNTAq21K0M7fSzz/f/LdbtEOm4WmhP94cOHfd+///77wzBst9uUUaoKWEcGmDKB2zd67mi+fwW0OIWJ0qrr+76/3t0gIiEJIosACODMO7aUS+BWZ36DLtNMwyOicZhiBZqiBpkTZ3QFDnUukpgDM9RwDUDp1NJ1UZM/G0+cZyIPIdEkHExWE1FKI9fcdQDQMrau6zCXQTAiYYSasWnpSMMwaBbDyDkJs4wAwPUwM0YEJgbqgio8JZc01yI69WJYwEphSykhkjcCJ3EXJ2eW/Yo1xQaglWNeQW/kEri8EhPCuZ7/JFVx11dY4oNJ8kaYew2EmVPOlubjl4PnsWtL5AYTlS7wgIjMoKHUuh/FOshkeEspAwGjcRHJSYimVij1zll6CDNn8WSJiEWuaoQwlxjyaMcGQsV5jL0naWaGeam2iWhESintdrv9fq9BsBBRRsR6LErOOaWMyGPOklkPM8QaESWCrgvMKefJwAYWDLqIbO1/PLV428fWl5mHpPSAjLXmX4r5h4haIiBV83/48OGPu25ICUsm10R4y23U6HBaSrcL41xrMqHk9TqPPXOU2086QgSQECjGQO4gGp6y6idCl7pxahaQVcc2YDVEHKtp7qWYMVIVHIxzDcCLGM9gxocGT/HzuTt1R/eGoh+B8/SlR2sTkcBqA+hC8jxuYHB6DEj1kNli+DXzM/JDeYNhIn22GvdCCuXPuda7pJ6josSTi8wrymSuXYGzaT3qYEGXExL0O0HJUz9ZsMIMABFGFK1qYOYx3azWIe9HAOwQPv67Fz88+8k3f+/XGFCQx2EUyUSUxiQZINJmux1uroZxf//8ZHd9nSC//fW3L18O3/m3fychw4Byk3OGDjEGefHR5QN+HRg4SegqBa5iEs7MUo4YyirbmJmRAZZtMCZ+M3Jtvl+u5lHS9QM2v97WxcvSLSwmb+Pb2vlVs2W17U2cQrkE1Z8z5mXZUbCXE2xmdOROmT3V0E8LjPMiG1nqn81WZwKEXOoUVIeLyZbm5iWc6C4/vruh7R2nH5rIGLg1Nf5q2BDmqltdl7Zox0/n1QvhkDZZ12YSU83nsaU30jKHtAOjUJHp8R5+njdlWYZY/TRl3kwCXf6/n6CXP43Y8b+ad8behViODIW59NNkP8uLsps9bfu3NOveYNXWN9eDCnUum647O9+C4EcdCiQKPSFxMRuU4AWdke+BMYCPvrE84pZVnIDF+eZylBiwNQyOXH4dm+8bCNG5WZegGskppT1+/PjZxcuHDx/2seNaMWWJZ3Ss6qmB3I9vwy7Bu+1aCpMy5i0vjTHmYVxvVm+88QYAqEHbgOFhM0K/Dfn+Kb8K/n6RcuYaIuoBfUpmqlQQESFlO99C8b+oCfSE7YFciGVqbNic87pb931vHsY0JpZC3vplKXJzrGFAhkhes5eFqrCUWsZHXruT6jhTLVwzPw+HMYSw2WwglbS3kuKOUUQYZL1e6yDFu40ANepYgQ225YWgDZBL9bLIVFtBOZv0NkoGAKJ2XvprCK1EwnlCDTghIy5l18tGrP04bI1MHAmKeeqxtnXwCPR4No7za62P5zSLcNouoAa/F3QMQjATJqO+XYSIDruS8afrgrUhTdP3CBY8K2oThoIflf9QxMiEw9h3vg5LUplCwVfpDJoswS0AgkBAnU6n2w4AZDsfEkSQFWXCzLXelTBGJApBRIZh6Ls11UK2YRhDN5bGOYSJM4gUbztIH7vVaqVvL/a5zHR7i6N4DFhFRvEbxljuHLT2ksS6QwEAQNd14k9MyQAE9+7d2263N/u9qBXdHjUJaiJ62jM7xUMIterBzFQjoaOZaBYqh8UmEo0VEVHLRo3sRASgPNxYREsq8a56rI7zpbqDziC8baiGoD0JylwVtu+Xd1oNjEeHOPuqeYV3gHkKNuXGo09clz+DWb+nMB0E6Rn7KPYAQN1vtk7lKdc82o+AiFQliIEHbk81SJbrZZg3tHh90SC0L2W+Afib/Q3M6hRU9ubaUqT6FOqriUgPz90fLu/05yll4nDSnV88e/nzv31877X7dz53xtsx4SHF3K83kAkyCfZyM8ogZyfnl5dX6361OTm5+PTy13//159dXP7dX374xtmDm6sXtIO7Z3c+fXJx8dH1A36w6VeUETKICOcc6hk4BQxCScXZxczsuNRQJDXVzaPXLhvNU6DcopkZ3pYk7V/a8FHzRnB07hcIFgZ88150mlbxvFChNJ9p8PdVvzx53Haz//co0tBtwx5Fnl/8v1wyFwrpGtrhF7EYzFfBf+9lTvO951/79xWv8CPg4vzJCQkMPE1KtJmR1tbrIiv/104rEzCeAJbeItMqPEI8AhuyAVdx2shGcbWF4qs9K6nYzbav6CFRhjRT/nzOjwegITm/lMtLuHS69TYe8xHSgjm9GUkYYN7gadbLwFaBbPt9iHJ6sgIMXUSRDCAqpMchi/Yg0dNOa2/8o5wCC/pxYEysjW6/aB6HhSvwNkpeXkumW0LoCax8CTPjsAhD4SQQQnj58uUnn3zy+uuv37t379nzF2VN5+l8R18Ex371U1v+e9ukYIbGX3A/ESWRzWbz8OHDy8vLp0+fVpV9sqKWPP4KfvePNBPx309j1jJaEbECSBYW1LRe0r5T6Jbj1TKtARIRwR1Qrpy7Xq/7bs31EAIjA2HJNS6ThLswVarbxJTxfd2vnxfXrtQmlHT8zMkwQDRLPNElDu709hBCxF5EGArHhRD02Keu7x3zTkf/OVLJAFrqnIhoDwOzglHxX1ukGELYpX1iPXTX+wWYGYN1ezY0IotwFkSgxWns3vsmC9eGF60FJwHtQe2a0/e9kYRhyeSwplaCbhX1cMJxHAOVzLKqMJS0OK3dbd2mCDFEAyO7Qy/ywGoK0jxoudy/wOmWtqzMSAHAZeoRkYg2Yi3an65AzhlYIIOAluoBqEXndGxw0s9ktp6SpVgqtyWQIOrKTJkjhb5br1b7vu8zSEqJQQJ11EVEpEpXMUYQSnlARD2XRL0kugQ2QQLEepIFlpBG28sAXO2658Gcc0AsZuDiJ5Gab1gmQmdnZ2dnZ88uLvy5TThT/9oluG3Xdkw3axkox2SUl7fN+kZyJ0oxs7gu5waBH8VyYcF5H7EaUV4OKkyZC8sZ42H19FM9BdIbJEepEOqqLOfWPGuk7NfYHpdFAqcNaAFPs2xz9S357200cp5vWwYjbj+OPuUZzJDjwQMvW/PkjvUCt5k7uxzXUFtQwlRy3Tp9DRiap7DbKjfiG+b86YnP0IIsQCCCpUge3WZmJhaWbs0558Sc+bBZncoYbvbDVk7whr//Zz/85uarq0cRV5mHkTEirDkTY6Dx0BNhwhg2KcvVIcEmXvLl137zy08/+eTZk09pBZ2sxjGTwMfvvXh7vz/pT8a9HnYEI7MkyZmhRvBNzAgySuDSffh4Dq3fCBud25OcJyQ/jl8yQ6PRm1Fgs38Yx3lSsctWVj8HCs09R4UFVqte/VZl81AT8ZURBgN+yZ4GoUcIwsRuxuD+EU/2/oNB7gG+Ddv+A8yDnA2cwR0g6/nXM4hfXE2Jb17awO+/l8Wv6HTro2LXbjZgPJkdvbm5vBegwZVR+FHe9896IgFHew3wHqTmsw3Irisyuc7SIYSUhzIgOscWALNrU44iwvMaqglL4JrI1y/RvLZ+iTyRHAV4iUmDQZNKzJRF68mch74P3WrV9QEYmBNBMDGJWM4e1MxPElhuwLAgJE9dcswn8opHjoqp22YHtYZwiRl2qb+ebicAYPYgUkTgNI7jOEailNJHH330la985bOf/ezz5y+8KPD8btsJHeMmnG/cM7TYv7fMy8SpyNwGu8WA0vneuXPn3r17H3744cuXL0tnIFnV+O4sJRLxFQWhMxiOzsivlNG/VGXa6VScQUqhJhIzB5xJSz/4TOwcU5YQUaSUFIpr0GKvIyLA0t1RamB/HMcuribexGLTlG4r3NYsOW/yzMXjZaMZJwaYZpUDTFIXAHLOfSAACPVLswlz1QyhKHVJwcquJpkLbAgAOUk1BYMFGwEgVOWT6yF1GiqwwjzT1vTPMaeGB0W41JaJqENDG6epvw4FzLgV13nrcDg0dKLDMk7tJwzJUs6Fn1VdmTJZRbqYPx/0PA8nD7127V3As+Vwkt8MvxhK90tTSGDh0DSE6PfelYyIlInjlHxbIZlmUSbOrJG3Sj9JRKAGS8t7SQjbJIXqJCmAFUrjMtR2eyoiIQwppdVqtR8HfUPOuQOIXRdjDF0UkXHIzDVOiFkya9KyFsCnlLI2FoJKXRmEhbpJSBpu0Sn2NE+B0XMdAerpRIySGEkIEAC6LqYkoboO1+v1a6+99vjjj/VcFpZJ/SYiQESaeYUMpV5aTtvoXLzrPSFEI2ZbNeNicIJX/4wUCkH7wKi6ZBsZDcXwIOvI5DFlosF/QETvRrIBYSFM/cbmJYunSOMiG7BRZ9FZROS82vNXTJB4RjWcYk079o4QW3u7zRtUHjbJRcn2U/azw/m+SzHMPS7MzFy7gBgDeL4yxrClpXmEx97iBY1HcrMcDXk19zTP0jyBAYACBAZQWT9RBYAAiwjVc371/lUAgNyHiLhJB4pxJXJ48cmzD372wRvr1/rXAsswYgCIPACx9JQl0OX+6vz+vZe76xeHl2995rNXzy4efP71X//9r/7Fv/ouC/RMu+f7s9W9l8/Gi6fPtm9smWEcx7CexGKu1cMTiiAAzGrllaONC5q52xS8vX2Ulm4jbI9bIza7FE6Vy0SzOPOS7T1H+NuM9vz9fh1TSuqCbR737NZcnjXsS1noprBQ4BoS8n+6KUw3+zubVGq7z1e1yULrWsJpbbU8xvyw/kv7vBzW25wew54S/GX3N2/0r7bdpcHYDCFzMdhMsBGVhrcGqqP0gFV99+KisRsbadAQuX2pqqcGE6hqk+Ium6+9yBQdjzRElOaoOiGBaYKIqKliVnvTkIe9zjtZ9MPS9+HXcUaLjjDG4dB3eLLdrtc9ETBkZk45IXTT04DlWBERmJ+05FfHDz7BLC1UpqHasx75y9FewbnNTOHYgvphPW/K3LQSEQvOYwzM8v7776/X6zfffPN73/sBT34QCESZ2/OEj4LUjN/A8OqpNZziBz56/ziOAfHevXsnJyff/+73bm5uui5oJw8p8CjBuCrxY29Et5c1s1gyFwAg1Q4LVDKtbNcuSls9srGMgNMIv3DWx4irXMxsVQkx9NZWKsSywamCMXIehqHvkhoYSsNeLlnAIGfL0QihdEtuJZunfKjal81XRLRGzKwmZmY8IgkRUWprtzIUAmg8CaYUOE6lUhEA0qg0iCF0MUZhJEqI5TgE3w2VS2n9iO60ABvTaquOCs9leIBAfLCIahGyTaRZxwxsRqAZXXqbj53m2trHiyZySWTa7Keu9dTr0c6d85ATkS/WR5dN1uMKnCT0PsSGtirlz9zNYLaae4RdxYErtWAU7TmEiDgMSUQ8ESOV85uWZC01Y0vqmUZcE25X642IxBgz83a7TdcsgrEveewW+cxJMow5Z/2GKIzM2seViIbxkFKSxCKiEU8R4ZxRBEJkFybxHKFweoOQiDSFFQDYrA1CEB7HUQl+ZBbSbBRAxIcPH27feefm5oaIxEWSpLBhu9EbchqdHxwD+vsBp2x/25R5HhX3A2pQtQzHNc/TK/3mVygEhyWJGV2gCV3+ocxFOdF0TLyRvp+Y/Su376P11VPKtTiF7CiCZC6djQfSsfMJlcFMeNm/XD3fXm1qboPqt/HQetQZ7TY36AcTVeZ4ICKGiT+hOc9jSvhH071CCFb5beNwreEWZxUbYA3AXiI0iyJuI0Tnly1zV0gQRaJgRiRAs2913fVOsycBQYZhWNP2dHV6PewOu0N/0v3djz7p7ue7q/MDDSFHwkEy5ogpHRgQAj759PnJ3XMK3afPnn77619/+cnTz7718PLrz//633yYD7s7p6+9/HTYrk6fP794+PobnHJKqaN1CCFBBsI8jJk5C0/9zo5pDpXaZ+2zPC1ZYA2c0t/IL/+U4bmhfKs19VqyiORKct6M4dp8yC8HFj/WLGvcVs37a/zCpTERtnrhL3PZCHhMU1leS+zBMb5GpwD5y9959I1Gq4Vl5h0CvLzLtd+Jp3nftt7TeQhT7cQC1NZgICI+ZpYY/DZ9cdfSVmlEoseVzCuyDD9Kh3lqCzEBIFNa1Cz4TPOESazbP83zi/SyUjqcS2/bXUyqmHqnbeKoNrWyLC+D3NOkx6Qn6aqdVzlTsWRrjTgLe7oo1IRSmRvwthY25WalcL4X+IXODCGEzXa1WnUxgibe5ZwpREQsiUECACCMMgdmCRg4QiqLlWcGoSdj//gvZNhGa3EPhmYErAZ/syKeXAFa8Zhz7rs+1H7XzPzpp5/mnO/du4eI4BSREEJeAMNQz4ufT81zSgMn3ML4t8233EzhyN0AKaWuX52envZ9/+mnnw7D0PfbnLOJhAoDTVmXMP10GxjgSM7fuWRt+77KmQAAuaxCQAxyuynYDP4KkLAECafEP67naUFh1cIX+iW7w2Bk0TkGXGqcOD2yMRv8CiK5vaxe/gblzb7vdUfTE49Kr+P6YJifrcVojEmCI2I1qFg4ZeWYnES3gmLl1XaAqoh7MKTWj9mkvEzI7iwKj2Rr928qlt4TZIYiw9h6vT7K1zF2UGumbO6IqCW44noHNOJdRBAlzFtegROVZV0YZn/68wmIjPBMtgfuDSdFQ04p54xYepwYZanoEmH/xorRiSzn1CsyWRCsUbK4yMaqoyEi+kNE/QTFaQj6JWGkgLY6emQlAIhkkeh1M6yblMGKEEQGg3YYBtsIFF2cgSDLIjXPbvDblgFMRNKceAcBCBEjVqVrHEehkHPW1s13797dbDZFu4OpVrC+d2ZKGAw439nL1rlIBEV3Ap8rDJwdom58rVOI52ev5ZxTYoDQ9+UAOj0KwxbXnBbDMACJgGAgQWAQisXe0GJWA9FoJVTmAafjKncp9GbE11mQTn/KBS/lxFVuCqSUzUOMGEQ001WwpjLqtGNHIdRTQUQIKYSAjGbfm7/Hwgi2xhMXVTeSWVa59u6zxfDigLooauOhurUAAxHMiownGsrc911J6U6ZAbuuYynp0UYEkzJE1BNpOyZVwiyua+Orr8igypIFBQiyZLPtOXMIAbDkZ7EG9wIFCpzYIsCe1k1B9EhDROachBGQAgKgoKihejgciivRulkAsAjle0QEMSa8wRWu+xwYV3wyPrl4/jd0Lz08v9fnNcIWDzAOcPWSrl7b3F3v44O8Ptn11y9ffOkrX/xvfvufvH/x7v/1v/tn3/jHX3mRLn7y729O80XPfDrGF3+zO3vz22OAHQ57HJmGLma8PpzS+kqRFTB3xDkFlE2UJKReYUQwf4x23zVW9ASMCDEGgKn/r2mFRGROdGZWX1rOxqWaTA/6opyT7T22dojAY+bkGkmJgEhAgnrAFrAgAIquHmYx8xtsExURlXHee6LdICcHRIAOA5IwZ8YkCCIZimoXVJYJoYxKM1BfofPVMW03CpP9gs59ACDCgQISCksumxUgYuEOBHMtQe07pC/yjlsv5jCQ0hIiaqdHbR6NBRoUUcdjGScd/LGHmFJxLmomFZUjqiaPY64pQ/pORCzuy6LoTP0AEFGEdPHRGTZS9TBjH92NCl/XYx64ntCF86YF9pR+oK5XYkJr2SLCmXOxnEjfrf/T7xiEAYkodp35L7yeUQkgxBCG/SElfXVg5mEo01cPtJgbGy1gaJ6dyTOqZS0AelpU0UL0XYFWZntw6d+NRF0X+2k/AyEszn7OWiZKIpzrupjGJiLM2kFeIRFrAmSj6Z0Wq6wkOvnasKj7jPVIBkQUZo04BIwRJIIEYcwpdtuf/uzD+xfDeJM6DIElpWFLASSxIAAJEOqRDAACFDTpgGuKrJ665J04AuLb1qtn3PZyKk0tGIAq1XnzBOcH0Bv8zWf7l9Nkk0sDiXM1Gu0llVdFkmPJ+wAQyDlnwJDzyBQQ4ocffHy4Hj77+bfWZ+txHA+HAwAFQB6HGDsRYS6GYeE+RAalX2VRsM0FwHho1pFCBDLrrzP+qmrezNQv+GYpepUGl4Ehi0je9D3n4e03P9uF+LOfvwOEKUPs1sl3Q3Wf86K/br30C+8CFpmaY03UWKaWQkQhCsAYiHDE/fXu/N7ddb++uroSQQqBMzDmEILUTDHwQ8/cH5VlKj3vD6PBJAAgGRFC3wEhxXBydiqQWdJqdZZzDn2/HzJQR12QnDEzc2JOFCBERMoE2EVSLQcFY4xDPgAwkRoS2rifc973fR9CHwIyp3EcBViAAYEFpkbQhKGLDCLDmHM+HHYMSLEPq/Hy8vL+qmPBzdkZaDKWQACMEFBItOUJoijmdTMFEEJBACiqZhpzPVI4lgXUNo6IDDlrbzZc5Xos1lg33AwCAZP+GQOFkIEzMxJqOxMVqFoepepxzQXoiIgw6EnuCIEiUjHqjGYhC3Zdh3ikSKGTEWNYrVYvXl5B5n67QqDMkBm77mS32zHrEhBCTGOKBJAZifTA2zyknDPn3Pe9xgBiF4mE08DMDGGFSCgETEQY7DQgIAqZRUQCdCEEoWpjEI3jQUAQQRCYmSEXJR9LBE/pXURSzkgCAajGvQUkc+0sqIyBmIU1J1l5IWgdHxWzOScBEBYgIQYgioja0UCtAKyWqbdbMIRO+wDZOdiqAENEAEgpXXz09GZ3mfJhtVpnHmKMmQ85Y9d1CFGEAbq+75A74Syc82Fk5i7E1Wq13wsAQEBElECjsKrNSCjDoEqdMmDXxa6LRJTHBCScGSEQESALZM4c+pBSykkgAyKJDHoDh5BJqxZDArzaj10fuo5iv7179/Wf/vSd9brHEhiEEJAZMx/SWM8CCcUG3h8yIiGglBNisqqdMfSM7N18xSEbgJkBQVAERUAYWKemTQpCCJB13TmGGI1kmw3GvrdfbcFNQok7oKaRVv4RE45+2za/vteEzDL0Ln845pluNkWazjUuYTF0pycbjgy2Bp6lu0VfzW10FNG1Umjw02Bv2hKg5hNX7vJ3Zlex7aemg3gk5NqnIddGXoZzZ5zM3ouIFMmwar96hNirCwFJmyppCGlm6pcGqhMCnf3cRCQK3nJ5tqlROd2eXDy5eOenP3/4pUenb5zzIQ/p0G371cn6+nrXwcnV1ZVw9+DuvW99/dfun57/7fef3F2dPvv4xWdfe+Py4Tsvf5rPz7YvX950V4ebmxs8XWsyd+KMWZhhP+4Ph0NKqSs56IzMjElkFkkwzHuCwZk/ZkYqXlX1KXNYNeBG0W+I1kja7KIQjni4TRMyYvDfwJwajVabERSYrusKteDUdpJFQKa19k/5bqj+pU4fakHFeUa7kb1Hr91vyGnkxnIhCjPCjIX9S5tnwRGnR6O5Qo27w6xAXJqRm2cbweKdo/aUeXDAkVChf3vHPGTkidBjj3DGv0Z1Dc7tRTY7oysvGP0b9csGeIOlFQL11XisKzosIjYGCfPMme1LZexLcX56rtVNhrflQjTYW36QRYrv9KxHmswozejKxiciHuHli6tA3ThqSEHTQtGtNoiI1aHBLZen89kbbwn1zKjQDdIQpH3p52jfLIX20QE9ywQ7KLJ4xxCqrQaO0URkHMeLi4sHDx6cnZ09ffqUiAQqUbnwMjj820ubz4bzZhWaG/wgy5Ryh14dpRCuzmC32z148OD+/fvPnj374IMPlqc9/ZLXUYL8BXci2BQSNNWwtTIN7J7jA972FqrFBXoDV4c4WdeWegpcdcdMe4FCoo7+0lhSynYWQrC2JbYonpH1WasctPnmnG0KjUg0kvDXMAwkNiAzcxdjqAcpl6+kbKMskHjKfagOVtPZgJkzTFnrAJDnOV8KjI5mtXxe6QouE0dNbEMVABjJKPBEpXtKzhlg5sEUEYAZpxehzQKAOQsz2Oow55SGEDB2EAjG8TCM+z7ELnD1kkyZ/OpttJgnVupCF7xqKEStLP1Wi8rMX4nYtng0c8Lj1jHyjHiMOM2B4gWviHRdxyBQD6/HekRZ5ix13dnlW2GlqEZgGq3a9kG16QsAXF9fX1xc7HY7hYuItttTjIGIchLBUWrbnv1+b3Cu12v1ezZSyH+2/jLgZJHORTUoWwiFTQWLTqU4T4UmJk0lbKBhgBgjdt16ve77PoRQWzLOzoozPhJGweyValtuqueTsasYMoC9tkM1QmCeaP1G7bic83QqsWdsz7qV88sOLa7mwSsZYXF8QiGj6skDp8ewc5+bUVuVLWlebeQocwtNL57OG23tIq4p70ajXk6hsz8bd2AjJhoSqXiYTdZP2Q9ia+NP6TA8IBdVwmwDQ2ZjEoOT47ao9jnnKQyCLo3Wk7hBCE4P8CtSGABm1qOfFCwucaEArwHI3EFg60VEOZcVzCihCHQikpP15uLZ9YfvfpRjfnMV1ne3MOSRD/s8nvabr33t1z57cv8k4ecePfjKr3zxyU/ef/Hup3/w9d9598OPHndP4Fn/w+c/3z1PuZfrF/Ds+YvzdU+hS5JyEqQszFBPLiIiEhJEgNZEt4mjO8TPaLVS1PEEM5OSVFPm/JjLyxACzp6Hua7frL6nOv9v8xaTAjK/uB54WsSTI1FLD6tSrMpHACT0w3rgl8TQgOqQNhvBM12MR8pfjUq9HCiLRbeidAme/WssA9UssTZr/lcR8Vn7sJAYtmp+sg2/2Ky9EMjW93yh7+p1VH6CswGaR44SgNGzVyD0J2NwlS0WOfTAO7HfMsVyXo1Y8KIGZw6UyfL0uqkFgmyyJiWacWarP6+phgWx2WhLg6EgAY87Mvx0ZktDdHOz77qbLBwojgRq7eRsb2QAEsg1OHF0+NmLllj9hddEDPOafE/VzSMyVwv8OMuRly9CqWUwczvQ7j8cDu8//vC3fvvb9+/ff/z4cQgBoG49C8x7qJbfNywPx9jKLwo4LvCYnIugSbIpId29e/e111774IMPnjx5sl6vc2JEFD6+EJ625wAf54ujIwCAT4FTok1S5DAiQluo2Jq+DU0243uWaZ7lmvKgsRRTGc2dbc96g5CIEKdzTbl0dpm1pDIulmIQcrOmBoaXCf4ej9txHDsKJhZEhJyCWxs51GnWVFJEhBK4It3K9D2eNgow3AKQXSP+JcubohxCMINQTU3E6WBqvZgZp5HZFwGKCGLJxkfnpyOMzDwckuaIxi6INqPicb87xI66fgUyCA8x8M3uAmBLqDZqFsGckx2SriBoJFXH7/oQaKot9FsWIlIJfhbJb8aDXnl+ZjURWHK+Zzp2DWleTZyepPPUgrWgInMKRc2YyjKJCISb9YJpQ5kcHxoeRMTduL+5uXn33Xfff/+D3TCU3kUYUkooyBkQGQMBEGIgiiJJRJTaN5uNagLkDnw2lBUeKSrjQjqx6DFmnuxhLqYQJ2eNsUzO2ThDOXSz2axWq5xHzgKoPtxsJUXGznogDVIwFkBXieatdEO+zsJXlxgYzWJRzVE6ciq6/eznYMjSNv009/8dpYz65axIAOdRL5gbWgZ0lXEswoWaCTWDQIkVACxiV+XaZFaVxxGWUNkbjXVNUPrb7DM5D5yh9Sjde2x4E1Rk6rQhjfoiIjJJWxsnuHbtfkY8j3+isyj8Sz3+fVKQn1cjxE3xAp4m4pHmDRU/ggk7LzWMfhqUighDFmHgCKDdumo7VsBt3+3S+Ml7n4YuvvUrb3dxNd4Mcb2+er6ThL/1rW/3Q1oNKV1d9wf5R9/4ndff/Mxf4Hee/uTi0YNH+6+G//Cvv3vn0Z2b3YunFxfbB/dx1TGnDBAYgWJHvdbWE0bEcnQY1a4yONctsOZei1M9seShtdrnEkWewpfn+cAxYXd0TOMLf3NzDzrSskdgvtz2ee4odbLeBSo8z0rJ1p7GNFQcfYtxesPdjePKYyBG8sDDQp40SPDvbb5Z0jbM+ZFdQMxvfv4VVGvKGyQ3Bp4sVDGYSwBxlbp+p/Fr5FfwqCQBAI1IezRSjT83SNbP2fWj8vi0Z62WgErFyEw+1A8zS2O2CjI7RPEoDsF5ATQr3RBylFz9s36ZDPLGY+ovT3INJsX5nv399tH3kbTpRArBHTsUKaYx58TC9awL0R1Bn+VadUaaVQvQcuhRgGFBY0duu8WW84+/gi/0S5xbcR5LR4lQnAEjlfzQSchCxoTjOL7//vt/+Ef/8MGDB8MwrFYrsDM8XeH0HO1TOkkDsNcomu8bOL3Tc4lJEdFYDQuTQM1hl9PT07t3756enn7vez/QQUZOy01tie3mS4+fV9w5/aT3F/dj0aYck852YZzzhUkMIrJaIHBaxJI8LMBrLUmoXuAOMQMnGKFmlxFR3/XMMI6HnLNgESYhYBN3sh2/0umRy5ha+ddX8XmYWURdKYioB8cBltq/VE9HwEAxRuoiVn0sjZnJiqI1OwwQJ/IwAHxXfP+9T7DyAm2cNdssa8FcVCxxARIiQgyBrBJKrFYTiovfpRdXjPXYpZRSSvvD4fT0NIQwjgfEfHLaHQ7702347JuvdZFyHoHThx/mjz7W85Ojmb5FXQI9A6Mzw5UCaIy34tZax1mZrG4KnFLSg4JoOlyg3c11Xz4qUXM9vtLoqmFAI84Zn7IASNlFSYhoFEaeAtoFqxPxzAQFESFO0R2qNV+73f7x44/+7u9+8uTJk+3peQx9Thko73aH0PFqter7deiiUn5Kqe97zeRSR4lSmp3VqVOIcYqTVWQSui7ZAKAJyQIla8+WvsAGylnqkkBE1DFZSkJlSimlMAzDOoTtdrtara6vB2YOUddo4UCBUHs0lhAtEZGVMmpFSQXRaztSQfJBYBGJMXqPMNSi4mik4HnV/7mADA1T9u+RfWiusTV7mMk7MySWhNXQ4nx5pst8bO4bs4ynQxShGlo43wj968R15/NQ6cWuf7F/I7nNbwmDv20pmKC6e+0pcRagh81jw2t7hkzjEHtLoxT6dZzDMHuLLBoPeo6FBfPzvNqQs9ViFdte3M0TJWAGAEaKGAKCEHDmO9s7Pe8vrq+evPfpdn1ydv90tep3L6WX1Q//5gf/csA7iN/6ypc3d+/cXW0Q8eXPPr35+GqLJ49e31A8+8sf/ujF7gUAvHh59XDMEfsxa2lgiDHIKCKYk5TCrZwjilabNDRp+IQa+bRVJiKZe7CM1HXvLGxf3W+esJfcZDWEnvhlbmn4dfH/NmziiQGqR6rh6JZtfTtEnNXP+Oso5Ec56OizhkNwvOwvH+o5Oh1bF8U/y6wp1FFI7EsRqW5RtAVllzhgXMPzzN6jjAPzxG/PpLgQUzV1ZDKH9M/s8rv8g+y+95IkH+tiumThozd41FVn/ywibU95EaR/2rlSdUSb3az7pfHLXBOa1Kacp/5VlqP1issT/4xc517Poz/5y7zgflFefam1bPEKhX9MjAEYMGcZhsSi2jZPqd3IwmjeYsAjDiC4xYoD+IX246uuRhSX8Rr2lBkx2M0N4zjB4vR+gVytSqgIiaFyYuYPP/yQmd98801r0eRJaAmbCRnPv0afcMzRc5vNtmT8xRtnsyGiz372s6enpz//+c81LIBFZzrehOYVL/KXUWAD+RF6q91W1B4oIY7Exew/5um7bXCYrdeR77HGBr0hp/LH+NrLN7UQlItjjIiDGoSecpqr8khFggBM6QMza9A+LBUnIhIKiNUetHy2MXOWlPI4ah2gpoMKEY5j0uZVWLRHE+OlKltqXNsU9OWKiLOCDDyTWji580yOVcTCpLUCAGKwHjpqEGI1gUIIAJMr2R5ZhxPmNKQxpWG73QIAolAQGHMX89nZ6jNv3D87WYOMKQ99P15dXxXVIvTsusqnlLquCyEUjyqUkICaH4qZOtlpubketXo4HMRpejDfC5hTCD3ONUCWlDmp4dS4dWShORuqffM2NLHDvF6vc84j5wCWoZNz5j5MteX+35RSjFOTVQ1rD8NwdXnz7OnFxcurLBhjZIbEsup6hKAhQSIijCCUk3BOfeyVNvq+twMnmnQSvxebiaUWqW1PtoeGWCwLgw0AtBBSI4TsDnQ0lOZ64O16s9lut5vN5vr62sgmBMw5g5BazggBaJZmZczrSdrg9HgzKWohR1tQP6CtXfTuAS9f/Ms8QftzzzzFeE+eB1qOeeUNeqwWiGkqnhqMUj1x2Dd12eye2Ys8rcMxieApwH/vZ401fghOvpTcdJdj3dDukVdg+4qyKiKqYDVqsXeEWFTH8NwEb6WGnv3ye870Bhu6qyEavWwdDclye19EcMFJBUazO4Jrzeq9kswshARaso01M01IEJgCymm3Sat0c7P/5J3H49X9h2884iGf3Dk/XO0/fvz49LOvb7fr3c3V1dND3qf15uzLj774+v23//R7330xfvTt3/vmn/+rvwCCq5vd/jCeyCYxZzU3RcYhp5TUUYnQITCiBAxJJieZUUien20wJ8WGZMDw4E07rNEYmV92vxls5j7kKa1iUqkN282HBoDl+P4bTw9WxKUZ8LjYL6X6KTw9mIg0kIyoGvCarcLWvSnaMXz6yJt/RfNGOapjLVbBc58Rv6FiuaY2fb/KsJADzVvse5z7jBpU2EupZhksp+93iOZ7z7/LKSxXBKp0akwyD2pjEPpF9HNsPBr6n/IrlxvQOTt0fRvI87zTaeOHbihkQp0jhiW6lkhubvZ05YlhQt0ki9H7QMzjE+vxAAV7ApBYBEPoBBGBQhchl+bixVTCaSe6zcBbEtir6XkO6uyRJeqODljukenPRp40lFNQVP/WGzT6UFgSFI2UhAMii3z66acvX7783Oc+d3p6utvtdGPKOcti1xCnXR2d+FGQwJ3Xat/w3KUCAIsBUSeuKKQ6zpe+9CUAeO+999QnuOrXh8MBjtVsN+PPv58MV0QEqFsYNSII9X8Zirmj30liZtZIBThiaNxxXg7A3F+Dc3VI86emDaAiR5suztM72fMgl/Z4ysj5cDgcDof1aquqrTKvP5zTvYF9tMFjjOtxI400biZF7lL8a6mWIEgNQJnDRS0+IeScx3EcU7J4pggi2hlRJc6ZcNpGg2t4y/PEtOC6r5tASynFrvPTqXir8LeHxBCIOddmpWhE5KsKbfo7GhEx51FDxiQQEPuuf7m/jEQhCEEWHobDDSBvN92dO3cOh0POmVDTOorvMtZLCY+NvSj6l3LNdxURrS9NKQ3DQXuAUS1vme9iLDIp2MyspzDquuR6bOCSYWEuZAqCls0XBDTzttAhQADQxFFmZhIuhdpOJQDIwgSox/vklLXz5TAM1/vdyHmzOen7NQAdxiFQt+o3+/2ekRBHEYwMXdcrp+z3exHpum673Wo/BQA4HA4eCcYm+mX1kpRLQwL2ZyiHUExBFIeH8l+sqch1pyg7PiIeDoe+71erld+s1ZxiZgSCyaeNBhK4vdvkpKdMWwgT7F4XNSVQCQCrmZNSmh2ycXQfhblgAiER4EIQSNVhLIxlGwH/+Oz0p8ZFBM5h30zP1kPc5XdTk01Esf40073QJSnRIkDhfwXXw8Zgw7lGbk+F2noUnK5j8MDcWWJ6lczPdYRJOJaKXm/jNYPYohjSzAiEapf6mkMTtTZBexCc3rD06NQ5zFb86GVEJk53t/HBBYXAiQxFFwnClM6u7y3GfBZed6s763Pa0+FieH541nF/763X5cXw5a994R/+/m+9+fDO+++9M768fPu1h48evBFxLXH12tnJ+xdPf/bkgy9//fPvPX/34+8/ubneH8a0ZuAMQMgMOUtgYAYQikRY/JiAkmmRPm4CvVlWw95RUeib5frb/D5kBAbOoYDO8NaHvGQx7jCEL4hNwIkwT11+uZcwo78AACYaqCz8iw0wmCi51dE9UeHCvm2cFEuiMhw2QsBCVZ4UXzE1QvLNfuy96qc3yWDAeM9xMzVwCeTLGRlabJWNN8EZGD6bbvnSBm8AmoU3yQGDpNHJDACzvmwKOo43yBsLs0GgtwaPCgE/cb8KNnF05qg6qsWJYnR57A09AwC6JjrgWKZ5owcGjlGjJyqpLkIiwqkGbDZOjBGAiYhgFpyPcXVzczMkDt2q61aqSAWK5vgtN8/dKMurQeYrpOsrpgkAvqmbR8uSFGFuaTSr1kgkRwkCAFgn07CVW1bKwhcXF5988smjR4/Ozs5ubm6YWVUNsRPqsHn1VKPbgG0irkGaL3nwjGls2CBqkirlzvLl6enpW2+99f777z99+tQkMxGlW+iqAW82ctUZlmL2yHIoKrSLP7DmYeoh2lAFuEg9x5LFNHM/OMxbjC6B9KibRF/ND/daiukGFYfMtU/Jfr9frw59v9YzRZexZZOWziacqYsq3xrSIiJxgtfPy7iSiEhNjmE0cW2O+K7rGGEcx2EYVMvWyA9M2+uUuoWT83PSc/R1jSPMC0lL6MCZTj8ZhAXDc4EpgjkNUIhzOhusUgg0mw4AaLKiro/Fb2OMZyfngCOPfH11RbAex6ELcLI+2W6nA7H9vqbndoQQNAeZLFsbJnmrHgBFkba+VJtZQ6wwT5OprxDH4ygimuUIdcdsNCVbUIsE2lAyc+xOJhagEOIwDNaCa8iJqpWlcTMA0DPZm31N21lryWu1YRiA1uv1fr8fExPFvu9zliQQcs6JM3HNGhYAGscBADabVd+vtbUPAA3D4DJ6dHMsLX+6SHp4vdusi/mq4IUQAkWtjVwyJsw3I1hsZOM4dl232Zx0XZfSQETWmdlzlhfvXhv0VGHr7umQKyVYfiwiFgaf61H6IfpFbWbCx0IWdpE7jqOx9ExCeVFiDzbQq1pGtSsUzhUUP5oXKDaaAYw1g9wgKR6HesFCmPqRvYLuoaW5AufpwE/WBve6jq6EjuaLRKcxWfSQSj9ZE2Q4v3QWypNcXfJQtBZ3nEBdrOppI2ApaoT+DwQQ0jiSKygSEQFhmTROQ7VfL5mr7zBXQBFRBZNZ2lxbj9teQiXyiVSa50oltoQiESJSyIFhvB5eDs/lSUj8a9/62u9+67e6EN95953v/+1f/Orbn+/vnr087P7iz/708vrwe//FP/72t77xg49+8s7Tx1/52uc+/uETlRdGbswMuWR9FDyzQOYECXjottTUherCTR5Kx2+3/SsiboOf2ngsmdmGyrVnrOcgIvL+FNsJZJECaiQnMuMF/4qj11RbUlUlfalGfhw3VQce52bw+n1b1uVfigvB5zcJc3EBACIY0fr7TfLYdqUUHmJ7Jy6sEXtQf2eRlDOJRJejDrm0syyiJ5ZciWzdX+b0b4xvb5eiYbQquI5j3Vm96QtzIxudUG14bVpinjV0NVJpHHngSNFjHp07TFwzW6O6OK+ZNKpTekYnqPWeGHqDU6WHF4DinFB+Oiaagjsb139wAM82ES8Y5fY0OX+n/mtbuDFRgafaKp5JpOTqFH+mZ1iK8ZDSMIwnJ30I8Wa3x8whBFzUEpsR9YrLL25FzvGHFNTlfMvhCm6QBoyjVyM3/IfF5+rvKHDMEAtQO2YTMvPNfvf06dMHDx7UIhy2880reM1OPb3qdgDmr5vznRGkpR830oCIatktWyq8CGs31O985zuHw2G1WuWch8PYdV06EutqQfLfeHiOitl2LghAiKwHpCDUVmZg+6kACyCX4lx9uhUCLiXbfwkA7Hq6EBHUFEqeRzlMibSmMsMweGiJSE2Fvl/XoMHMMYRO54HJoakG2jQdW2+YS2MPPLsrxKKEQGY1VNQkUPBijF2xFoWZx3EEDIioyNBvRBCxlLRpsEXbNpqI8xLYNJNcT36PMdoRREU6TXMBYRaualGVrzYTqCfCi4gZCZrJqbeYkDQYIsa+j3rEgjnpiGKM/WF3eHZ5vV6t+riK2Meu67vQ99cxxmEYVLMyu2W1Wuly5Kw7QkU1BtvImKfjH4oRVW3CXNsaKR/VuF8292VRmCGbiLYqUNtDDas4L4kyYjBc+hiGEtF+v9cUUGZOqRQ0ahK14o2qL9UuXe6bm5v9fm/rKECZYUx8GBIzn5+fr1aby5vr9XodKGpeaBdXiMQMzCO5DLvD4eBHJiJNItWpxRj7vo9Ujrgw+pGq3RkeiMha8lLUdPrS3FVEhKd6Kxa1qN3myLhe9ycnJ5vNRiQr3ZbDYJxvxUjXS1ePfJMY7Jp06gjDMICl/TNTCF2MhFiM6iptgpbQg+szu1RhbVDTX7PzuNiRd5445hsANG3rsSY1+U0anAmB8yaZinc7FMvjwm6uxt6U2Kmn0+jRwWJHaTl1aiZDKx7NE2BhcQBQe1VfpPO1THGY70Y85Y6DFwdSK5hNMppMRBYIk73ql7bZkEytUXFj2NDPSs2KVeVbJ8HB7gRnuRmJZ1ddjYhEYRxHdYSrrISaeu63E6glN7boRgU6Ow3HOzvHzkXBlJIgYkQAZOYAmqOCOfP1y6tuvYoSA8eYZHwxPj88/hGn3eWzX/3mFz588k7Ku2/+xrfx/PTf/vG/+eEPf/APfvv3ujUdDtf5cH398tOH904//6uvvfvR0zSMwzBQjBnGNDIMIw643w0A2MUVKNkyAFFx+detrgroCSH+JyIahsOS4NmFwT3xMzNRaerVrL6UA20Nn8oFk6OX3BEU+jpbdBvKxgdnmesNuZ6WKc4M0OKZQk5SjuNDhMM4OL5APxecewSm7+cqS/NhSb0epUZ4Oi0TfCZttEBC7zcLzbZV3ch1o+WazlRb5Cm/cwauDpRBBYg3DKRmOYbF6aChHkBs0Hrpp6jwhD25QtwldvZgdaVPjwdUtY+Zk65LoNh1wzAQUW1XDSIC4nqLVb3fOM6XBHj8+43ZgBGXJZLd4YchBI3V25f1lGRu5Vv9r03c5LZfbs8OVc5PkEDNbiAi57KZGXIw30T8XNjlqjUEaSRqH9ilvBrfjeNIsYNjV0qpps/N5OT+5hBDf9iPmy0ChnI+NU0l+x4ePJbQ6G9ooIWqYNp0bBY5Z0N6c38zvoksqR5JG4SIgKedznjQELuwxmecLlD+0e9VdKSUAhEjxBhfvnz5t3/7t3/0R3/0q7/6qz/5yU9Wq7WeYDxy3b6b9XL2XkNCflIezmahbTRfIQZzmkHdrMexW60poGQmoq985Sur1erx48ciMgzDOI5Vh2u1qyVt+C+9PuPXZbkQBljOjIgkMIqKERpTGlM6OTkRwZvDPoaIiEkTWhY6QJVjyXOH8e8wZvL+67kVlOvx6/v93nJBlY808pZzCcHpU8MwdN0QQrfZbA77Qa0yZqkiVA/9m9poqxUkzqG52+/tYAPjcfs159z3/c3NlYK3Xq8RBWoR4/765nA4BCz6XgghdDHGKISihg0ip9x1XaQggpwyJwEQDUt2Xb/uV6u+jyFQJS1y4a/gDqBer9em1ZhU9zkjtoLMnHORn+x890SkUb7KIMGkDSKuViu1r/REAUVa13VdCKvVar/fX11dXV1drlb9ZrMehtTHHqHnvH/8wbP9dbp7doYk42G4uMYQwqrfVNMXETGECFD2QWYuTltIAEBVAcuZU5pK/vb7vQqHNDJXfzdPOWWQEqdUNDd167Akrla6yUMRJsLadRPMoz2Og1q+mrdrKoeSkMNMSTnWzMJxHHMuMasi9LTJImHmPNS81mrY7A+HQ+ZMMUguwN/c3Hz66ac3NzeI2PfrELr9OCAiSKHArluBa8KJAuv12vqL6qvnAcDpfDsR6fuVzTHXLkcAKAKeEbCqZIdhH0LAQClnAN0EQ86567pxHLMwEgpD4owJQoybzWaz2Tx69Ojjjx+nNIxDypxFJsq0HVZXnuuJbnnegcLiajYLJX7TeTQ2qHNRSzi75kBoDmLbcZsPoZ7VPgdovpPVR8BZ814uw3xjtvE9K+o0LHpulGdLwjy1aW4QpDxpqrDNqj47xdzE7bVegpv8tUGMeux1XgtBZ2Eu9wNc1EQVQswz7WT6jJBcUE4WupEtgQe1cuZsD1Pt0+O5gjHhHJ2i4HdQe13ZVKoFbggx2PyfHre2IlLlkdrkugZGPCGEDExGJDQxYZ0O8shZGDMRIwq8fLzf9hc/l8N1vjp5sN3n9Fc/+vEXfuWrj778xbe++OXPffbNkYef/fTddU9vvf76zbj/X3z9qzfj37zzzjtfe+2ku9PnzBnSNqxIiFOSlJkyScDqg5e6Us1q8txhYZ89gTV48wsnczXLqzhQDXLjQ3+nSVJ7UEnIarQaHsnVzvFv90zq4RcXoAPHYjLXt/zVgHf0+1/msok3D6ofh+bmIh6LQB69bNWWC6F/epZZAm/TnyTpPOHeVJnb4DdfFTpC8rDRPBeOraundkFWJztLF2pKZwmlaI9kyjDzNZi94afj3ztjRie9c60C8oKIiPKYvHhsRJBJPDB3Bk9GIDnHZMMmMEmM43RiCDd8lqdYjn7v1675ZimE0e0d/h7jLFErzFFCSkkkIyKGQgD6744PzDzm0spCk5Eig3ZPhdsPHvwlL5xPxuAhIryFw0zy3zamX010X3pxjc568YuuBmQ53AKUQEtEhZmFGVGKtg2CiKqNPXr0SH3bYd6s+zbg/Z9GTh5CDzNUvmt+WjIjlON5y2dmFgAUWK83n/vc51JKjx8/3u126+2m6zoQHIYBqIO52Jxho17LXaCZxRLmejdA3RlBgFErpqYtjxbY8kPB/4+3P22SJTkSA0FVNXP3iMjMd9SrKlQVUCh0N4A+eU7LkNPkCHdk/8F+oOz+wv02sitcWdkRjsgeM5wmuWyezWY3Gmig7rvekUeEu5up7gc1U1c3j8wqdFPWpfAQGeFubqamqqa3rmMBmjeavue/129KwFvXxdrWr9C7ywb0l9qUq7SzJLQbx/Dw4cUAuozg325TKqKUW4tU5xUXAzqllNI8m5RlRSCpJjiphXouUY6dCKZUYBLWdVNtmQaWbWi9LYdqoEqjBC7zl3ajN4eImEJIVOs0xkhEFxcXGoKuCqHOLcYYSfq+Y87quwohABBneHl7C0ABD+Pp7uMPv/osPO9jCCHA/lEIWoMki8g8p21YClHUphR6g1TVxUdXKhxM8DYpNAQkAvWsohODj6dbjRE1WVeXrHM2ad9LkrkmGVoUEiJqiR8EQBHmBJWTUACh4GFuCONRyHg4Vzmn2Yib492YZiEExJnzaZ4AQBVkIuq6QVFImFEgUMdcSsiYtUJdAp6ObLNCCH23AwDOnPLsK/ceDoeui16TVIkXXIpHtfKz/irVbE3UE1GM1HWdmn1Vd53nzJxZGEDmOVNt91KvgIjIylFAJ0sCkFlEAiBBieuwX5FXtV0MpFJbFYRa7MP2cSmJ1hzt/lhtKBzW15ZmPLLmda6UZxxYrch2MzMjCaBOEXNmDXGXnIc46OEkIJyzgBAFCgA19qfZUX2VBwE8eKETtb0YYRK5n60N6BdlN8Oaceg56h1xjXpwdnpmwmHngjD25x/xrNZvQYX5ctx6yPuZeyXWVqqv9nq73WPr3e6+vWuBwzrWWTWAZQTELDlzyYZn5nEcRRAZA3TCctEBn6YXX82v8vHd8F687H75wed/8f4Hf/C7P719/vL9zz8kgavL3T/5h//w/c8+/lf/33/96OLw0x//1kdffZ7mESemHucpHVN+FB6lufBElICIgQJI5k0Olc6/OR5s7Wa58CD1OIBrSQI2pOQ3yGAlToL3BGhfGg5gzU2txk5uXt3MYbuucgOuMBD+K124Ftq2X/pLRAAWDcerwf5ZWBNU84095cEL9Syhe1zuyu63Y3JORmjZVR9l5/nxELalmTnGdm071S0C2E+hFjloqbiqDA3Jn12UPhVcnrMHtX3pX20WR3+z8jFDdUQUM3jJasleJzy7O/ehljcB+NnyepnN/Lek6t/bQJWd99LTjg3pozXF4pp48TbbTya/CmmftorPsoLbt17bfbnvavjI2SXDuRvab6T9pmFoDSJJVSMFgRB9XVAR0U4dAqVSixB8+OGHz58/f++993a73e3dkYhSSlmqwr8OiNX4TYODR2yP+Q3L8vP3j2yB6emunJUCIHB5efnee+/d3t5+8sknUAUgzksk25ZYzmI1bFiZgdT4tkdREaEQ1EjqVgRn1ysivqeRfekJBNdqvIhoCOUWSrhuQnjWLmb3G3pzaV6/FKDesizbLwAAYIBV1FUIQSsuKcWVWC1c+LkpJ3UoTImnMeWcEUKgbs4zZ97t+q7fDf0gIjmlaUyZBTEgILt4JRNUTAHW+E+pjoTMK6bnPyhYYu1E7/m20Y5+cBrjKikJhGJXyo3ESDaBEMLFxYV6CBsnUqAUY4gTdV1QdR0AmQWp2/UD7rGLh7vbG04ZMIbYn+YZQMMdS3mYnARBZsiIiKSz5bqDeZ6Lyq1OV1MIC/e20P2IXb+qs62TQcSU0pxG1SctfiqEoG8xjLIN5RqkY95IQxhmxlgTwQip3pzyJLN0XdeF5ewm0lbvxreXyg4G8MYym3O2IjFElNKS46Z1iUzbt9nGGDUbwkKl1FteaXNFO6rvFUrj8kFJiQJ1Xd/3nVkfSkgt6rmpOjMionAR2Pq+74cYY1RNsuDRqaRuMcM0TYjCki0fFRdfQqFfNdzYikxhCS6cuzkLGghUQ8xyvxdaoqcTr6jo94vhvNK8NxgYdXkq8oxjy0f8OqmGh4mzdYVNY2gbZ3OiK86Jv9O4FREBrBi0P0vsFbQO8GhW4V9kS2iWbMzd5LCG5xKRCPgNWGiGBQI1r7aJ2av99z781b9FSdce8XvU7JdtqMGkUVD9Zfc3ki44JGkmCW6zoEoYyyaqGUPq0giZBUVybX2WEkMVNJl5t+9ub0bOgTn91Z9/9MZ73yMc/ud//r/+h3/z799549l7b7/5mz/84TAM0zT97o9+upPu//ov/tenjy9GeC2PUy/hsN+NkGKkLsWcUiCSLKjFqpFMnvMIbGdA86V+6Lq48IgNyuHmuLXv7U4Dl41sbBocp0OnIEEtjozO6ODf7t9r33hygzXm+73WMfM9Hjmb8BaXGoSpb7lX/WuG3c7qLE76wXETqm3f40Z2adbbjGYjsPO5iWtP2hCRfdOMaYbGs0CGDfD9hP1PFhl79vH7Jn/fbWdn22ycD5cqIvKSjx7Mk1AGwYLhGg/WwB/WeOLf3lS7tjs9G1/jwHllz+73FGTjbDmeJwoPbRdRWfoQGr7lDMycZInNyzlnEEbIIEk4u6Lzbp++k04oGx2jLISWHqB+yVSrrzTr9R/OvqghhO0Ixq8aJK+/1g2C1Vts1Sjsn3r58uXXX3/97rvvHg6HFy9f7Xa7B+a2Xf7DF25ECzkHK/86IlIbUwgBWBCRc766urq8vPzlL3/58uVLLeg3zzNhCCGkvDruG5557rPYmQbOA6Cgs0liUQIrwAlVkUbEDELrYMWihMvyroZRy7qti7H9MiHH7e2z1JRdb8KWKi7zEkxUsFeVAZWPY1RlSWphghVYVm93Gr7eoAqhuFoGzBxjK61SDQpTL9Y4T8AlzF4nHKt+MnNOKU1pLswZAtSOaz4RuOuGvt9pAiRzUs025ywYYU0UCyav48n9Rpc/ASuUVtKsAZYw9kPxZMZafUR/2u12AKsCFoVTcZLMLKWmiyBlkcRAGDMDimDoLi4fU5Vd83QTAuvLhYGzOgkgJcZa15RLK5M556xVZLgWazUNzXxBiGiuY0QMVHKONKQQAFKe1J3lzSUhlD4Wdj4aG1HVEXxYpg/EWDCZ2QVeVnQlRNStDxFDCEKUi4abDEsDqPLfQSBICdWPLcy8KI0K5rJBgXLSUkllkHp8pP3+YrfbEZHpgeqO9t4/dMYUn/EB0Ft7jz52fd/3fa/wgHpeJJ4VYABA2pGhlvnZ7/e7fR9jjDH0fS+SU0pCEzNbCGsIyMIihjlLjQl/3plkaKzYR2gWrrLmnJ6b8TrksCK5oLadOMsK7cUNFz7L69El/nmxmGuwsmciRiH2vRd/Pb8z9LI12CNcA2Rl49a3V7t4mWWezWIda1tA6aGpdIJFV3G5NxuB1RiujS8mX4blNntEqYUcQ9+CmlxIT2N98aPhRmByS2vVaVmH7G6Dde0VtndK6h43/AFgk2lQ6OxVf1ohqNRMF5+QREIIkDVhPAfk4fnnd8LPL4erj64/m964+P2f/vinv/vTD37xy7/4z392dz3+k3/yP/zkhz99+2f/5aMvvnh8dfj69OoC9nmex9Nd318EwGmaEIJWT+cMIYJIKbdj8Pe70yzn7K94Tnvxl6yzgbe77E8awzRPL/prcq1gg0siPTsrrkWlGrD7mWhDP6zREVot7b/i1VClx1W7wdOj1OQfrCFP4OjIw81vlv/mLB9ovrenTA/0aM/MXQye7VjWjecJfsBm+8wr5XOc/G7SOkivIS67HOBaeRerYrNdLLriWDZbe7CBPyh/Xmu/dSZtRwrDK7VJN0ze41WzpzGugm8962gGbz57+vIwbG5odrbhPLR2jMgqx5tLkpz3TzInWQppcJFLwORLuw2EzlXhpQeCSOU+Pn/uTr8uDwqB88G9frtXNLVUVV3xHNspQ4z6wUhJ1cKldmJ5SoFWObiIfP311z/+8Y+fPXv20cef1APFjAvN4hBhWY7frIY/+DU+cDVsAWqimojEgGUJzI8fP97tdu+///7t7Wm/36meg6DCyXl5pkEkN70WyGeXYOBlBBFAUZ1w2SbfWWE9QnvoKNibSD9Hbuchg1WobahYUw9MUSkSOYACapomotj3u77vQVD9ezmLZU2jM/XamCvG6OJcTENwOrP43Cep+Zw5ZxUBAgUtl2LeqmmedBpFWXXSF68ztO0pcVGpEFaHCFSpzG+xcSdaN2oHMG5m6118R13XBer2h4FIVcFgkQWI2Pe9BvDZfpXszcQiwjWBWsXUnLMQzXOCzCI5UhDCPM/TNDGKHimc1bkq1VuoqwAAzllSShpNqg0GuVaOtcPFb59FEfsllyjTNPrkgqr+AdYkF6nHtF255p/bQWCkwVz4B6IgRiJiBIFcpfeUcznlgwBg9Nua8yzOrIA1u9szOmbOwnNOWThoCUkJCCA5A5QcwpSmGLVSC+ectR2LOM+KsXp7neJSIZDc6E6dKq6XhwurQWB0QEQR1S2kAZkxhBCo2AiGYeh6teljjDHnmZmFSER2w0EFNqIgzIhRZDEQGx2JSAxtc9EzNFgnBADZ5Ys2POqsChPtPhOD/KANZxSRGHtZXcvo4vS9yqaZ6IxMpqTuyQ/dZfd7/aeZp1TXolo4/BHr54wbBYbWBjbPFGAdPAlOK0MnpjfjoNPOwXFwD3eU4qH2M0REwmI1hM3lYSJOavQKmP/Xkt2bq6b/LovijW3Yv9TKgqMz85BLarc1bkcTEetaYQoqNC9iQClMjpmzpnkQYcgiAIQUg4igCj4UZDdc9Y+nmSnsRaZXn959vXvxw998IyQaqMvT/LO/+PMX37z8yW/89nvv/Mbzr17N0wkkP3vy+OtPXt68fHkRdiQwnU69DKfbO7Ao3JwDsTBjXAX62kJsvVRTTPXXeZ5sRxrMv28fwckN23tsI7YkIJu8RP+i+udqfxvW4OmumYk4yWMrmjTT264Iz8mgiAiwFnY3xgJx8rdUgcaoj2sorF/C5hVrkXcjH9vMPZF6LiEu/83OAPvJzgZy8RGeq3pQNKAzUvUK7RaADRDYFd3x8pZISSX04+hebxXChul5mODas7pCOTjD+bWMhB8Qa3shK1dtiKqSh+WcG1T93m03FJFgvV9bXIW1xO+Jwr5ZeOm69h2sacpuDkt59GJ/ghrV3yCAA6AAFHt8gaSQU+N+vajR7Uq3KHIfWM5+2eD8A09BuwVlB73aHAAzAgCQgCAE8CtV4x2gcxkx80cfffRHf/RHb7/99n/60//ctMPaXp5a/eVJ9eyKzo4Da35iUwIACIGZ1c6vKY4ffvhhzpBzxkAxRk3e375uS27bVzfzlxof1DyFqDnqldw0axhEXC03xKXTIwH6INuGRUBlXN6eDufOF7SMtRoYbwRC1T3YkEZx2gjNNZ2vBgouRUG3u+bpVC+vAVrUQymNLiqgLw0Pcs76OmZGF5/Zdd1ut9P+6WYJtbdAjWnnGu9n4rtxJLPipWr4tmf9WeABC472653LSmt4TtTik8Mw9H0fQ7/b9xXUXu4tur2hNC3V3cyYi7p8Zp7nsgoIICxJZsggkiEISSSMAgKQqrGA6iBYLTMEov9JThYrO9tphdXGqrDVydO6TBcA2HyoOtCIyDfZM4WN1xWDxOWoNx6dEJe4uYLwS6dcrmUpJfMCl4YD2ypCbG1JzCySmZOIlcrPIEEAYliSa5iThlwiYoy9lEp5wXp46wcRQAwhdESRKGqkrjfPI2LXdX039ENUV7BhEVEp30hRj1o1WwQiiqF3pgpFWqWIlFJKp4k5KZ2yKxCa0iJXe7yiasHxp7bFV9o8F74hq4Z2ZgXw+ogn5LaojB+rofayqZtqSw1LBcdVbQxDCL/l/suGrcC5a/vGnDNI9vDytO3n4x9sWICnmQbhFANsWOPgth/i2m8Y32xeKiK47m0lXlBer7HhVlsIeLW+2Sx/2dJCrS4oVfTRR8zLZDFUNpT96XUenztn84SqEPoprcm1YCGaDSYtVS4AICsaxAAgnDMKxj6IYJ6TCIfQSQcZA4ZhOkKAHR7nT37+6dUu8gk//eCTv/27v/WP/uF/l2d5evHG7avxr3724b7vLg97oHB1cXh+fPmELp48e3b7/Hq+nU6nE6IWzQ8iMwAAnXfH2UrxnDTgd8dvpT3lScljpr8HNpfHnOBKOtk3hmN+s8CFvTWo4qnM3W9Y9C1C53/dq8F/+x5xBTH7yejrLCE3SO4p199s0o+NaR/IBV1UY3mBmb3C7m/EEb+iLd8Irsj4dvLN5Sdgc0N3mUK4RdSzl+cPUs1VzQHgkRNqzkZjdxRnqgshQKVoqDvl8aphrbAmk7Mzb1bhfm15kd9iv+kNGLcjewq19doMxbnmEDEvxeUWidDbifwgefG8VWhbMiE+VGbmLEu5786zDSkak4RdZ4eVNa/e3u9Jr4h9DGjOU70Ny3vrK8r/qYQ1jqePP/6Ymd977739fm+BZ3Zz89oq1LYWnPs4mJdmmvnbzO1+W6kuJzET0W63/9GPfnRzc/P+++8PAxKVbkuaUh5oqeTsp/EAlZ2dDK6lNH+JgHj9UIBxZaT2DITvOSxoHVR1FuH9QuoqQ/NT4XGVu1YMLzYsrJFQKSWrziCLTihb9Cu/SpEJuS4/13rCIiJ5CceQWrXOFBKu3rkYYxdLFRyN4rO4R6gmA5Qi+JkyozdrgwHVZKx3BSJOtfoiOttHqIXuvZkMHE4WtrzkZ5IqAF03qDaojqZAnfp8dJ89CuWcq+pUGiqoEkUsIpJT8enROIpI4jzOXArIc845I0oAxCDEkYiYAVE1LgRY5EMRUP03pSWEoexILtEEuq1a20Ydm8MwWFc6gKURhboHyQJiiUIgSxpU5hlqdr1BuOHPPvG+4k9mXrEgKTaCVPcCsMZAVVvdgvBlbvOsSY+GLbaJABDCYtYHZIQgoFVWir4EkHX1IjKOo5ZgnKYJEbWrR4PV+mtKiVzRR4WJ7unxeDT6amyRiAg11zTnnNMYQhjHcZqmlCcRgeI1ESKiSRDFhkJE7f/ZGGQNIHlaolSMvho24lG6dnFa6dKsHVzOmcOW0oXN6xvGYWxO3IWurJMmdy50i6gb2fcl54RcjoqpwugK+hWyd9GVfiVeGTPhWFygs9aIA+dn8GuxNcpGLosr8QABAABJREFUvPAvMiibiEaucbmxFV4CVgVc0wKpIjitzfy5FeIr2knJIfRf+mPA/7umsUVe0c+2wQ1CRCL2kpzuWlU5ipnHAA5gliExH2wI1qPC44af1SIxVwTUWLucs7gV5ZxRADJLtZIyMGAgC/UhdXUip3KcxF189fL49PL1+TgF6N58+vbXzz/68sMvnjx660///X/43utXP/npb/HMn332BU7Dj3/zxz/4o+/9L//qj//jz//yjWdPb7+8PR6Psefj7d0+UUqJcECsUjYiIiWnxuPa0b1skzvm1UCwRKTUX60tpCdLQzlxItcWA/2fRlNYvUB6j3LwdQHoe+UST6fNPbLIK7U3zjnTb4ONW9T135+9bJnNnc27tNPGfUDzDG6rZTUA9NRhVNxMoFGzm59SSoHQkznUs+e+3N1zATbtJGFNmLZHxmeMkL2hzXMhG4pd8Or2QmftMjbl8cdfelsXljYbuJQdg2maFMOrcFCMETEsNkR7RNYhvjZtEbEcb3AHjYikVIyaDUVoxsV92AJrjGqAvL3T07VOyR2EgrLaDj3FGRcMMaGzSq5JQI8DbcS80Qbvv7aEUJYMC/v1UJKqEJ75/tzlkXlLSn4Q22vPcwwIqs+KU/3saKufyeBGiHPOr169QsR33nnn8vLy1atXKkn6nPYzq1479rc3wJq6t6A7u1i9jHAULS8uLt5+++1PP/30k08+6fseMXCNkWsiKitjNIH+/OQfWBc45oPuKhP2R7/P/pB2kAZK4OgXXazXlhU4ZF6BiF2Mg12OFZRQui52qGVF5lkTXWz3eZ1M6OYrxm1ERIun258lTYsXHIPqA/TzEZEQQ9/3fVUIcy2LMo5jztnkii4sIY7GnUwhDCGkNHl1ZU5i/pmCtGvrdrOiiEukhmVwCeOu67uuG4b9brfb7XY1By8ILcv3O86stfN8RVYAgDzNjFpJlVNKISUAENCehCDCWZLWrWLMIkLccfUpqdYnIlmLrKqunTmleZqmaRpTbVKtohcAhFhYtOp1qhCqgqe+2b7fnU6nUsY1lxIyfd9XIJR2fKZp62LtMznHPmxoVt9i0Y+60RYkrA0n+r5XLJuVcBYQLm20ipJJnSlgVO0dLvcPtCUjIhIuQVtSo1qIKISdiW0AMM+zDWKHvl5LlmOaQ1iCgXPO0zSxJAJUkEL1VxORCGNQc7OeqpBSykmUpu7u7sbpaHQQI/V9/2R3RbSUuPPUZKTnubT/bJjc8AovhEQuKK1JWVT/JVn0WHDHR0QIbGkCOqJoQiuEWhZWO5qDQJpZMJnGpXsmJb5OsBAtqKZj8aJeb/FsS5wUZfva7I3ebxI5VKFn2dcKC+Mv6I5/CzE3Lp+TaFwvYcEzkYK8CGj2QhAQLhUyEYgARYSTIAIRhaIrljpmeWatDk9Q+/5lBms7DqXOkM3fliaIyPryhUNVTloYQQiBSO1tDFUA9WAsjMw1wibv2WdhliXPm4IAZAHRLwGxim6o6DWX/fUHT9FDahYEKz5gkZWZOXEmohCLop5dgqLqoogYiBAgScI+UpU1+9grl0TEIWiQehbm2AFiIGI48uOhz9OrvgeA05RuDpfd7c34i//yBY/f/3/9P/7jX/ynD//uH/7e93/w+pvfOxyPX77x8of/xz/6p/nl//nffvgfHh0CwzTe0D48zt9IOHZhlAMG4Tns9y+PN/3hCo8nEQlEpnEpT7TOdaURHACIAMhYHapEwcLe0OX4hbBobugCdQyNpaphRszgWiTNOQlCFhaRLCwISISIiTMzCwjqiai7gJDTUr0W1uIjOkXd/MCEyDmjclXErAtk6OOQUsqcgaCEeUPayLliQrCtBSEsXIm1awBibdAki0WjbSyx/lM/EzOZJV3HsTtFQGSFlsbZc21L49duq9bvbUNNTfJ2YkdKyCzVj6EGJtHk9QrRlSQ6aRm9EDpNeqnG7Mo5a9nGtaBmTN/YvR1ODZ5oDgMBdiFmyCKijSLA2adyzmw1JKCEJACi1nIAADWvhBCUpSuFFk8YLSeNfsg5j2PSHey6yuFnAQiEAXFpbeoABYjADESlc6lV2U5pNuT3a98ibZlqXsnK/oBsMNw22l8eWZtvdExS6yyAJRCW3UFkxswgFIBo1sLuFELOjJSzaBARiOyG4e7uDn2/AOcVNFVKoEZlIwAi58IGwRsIEGKIeE6tbRdTfw1O1vEHv7Ae29XGDKX4iXEhgyQ4o61XLfRDQqcImcG93kOIWoZEBBAJkC4urz748KMPP/r4vR/9xv6wG6cTMx/Hk9kQAUAERQtXQhESABryJ2YlHL9fANCGOtuH5mR3c5Y+Dpwmnnm/253uTu/93h+8/ebb//yf/3NJEkMc09x1/TRO/bCfpgk77yNq7Rc2vOktRFF31G0LQ2k/j1oNwh5jBuFJ03Qll7FFSMuNBMA4dAEhjaNo0DWOIXRe7NGhSv0P10ZPagAbcM28cAKTAEQK+2GHAoSx7/qcM2FEIM4MQoQxFywq1SP1CCOMXUcAPM9jjH2I4erq6vnz5zqfnAuP0m6T+j5NuSKJajLSvq8CKJmFGYk45XEcKRQOwwwAEbGbJzym+e502/d9jFEIOAjuIyJNaU6n8Xg63s0jL55KIESRWeccAnZdl1LKeQ5h9/jpo5wk5ZyyTHOeEyNizikzCgNh6PsOlc6B6n+iGpgKdQAgLBPnkhsGMOeMiKpqHi4OwzDs9/vSaQCZMRFxhH3mnFJiAUSIEWOMkTAJpyR5nlFEJHGamBMyJ4k55XniNOYck8SkaJcxMSJYFaKKA/2AFj0okjVwVSBVPwqnNB2PR1XqRIQCq+xBgWrMZ6j5aWTtJczJkfKEJMOu2+17xEuoNj5XOwOmacaifsRxHFV5BEARnqZJVvYmO/FK8RuUDCIxxogQQLo+dLh7OY0MiTRWmmfBoDJ4Eg6ddFFF30xELCwg3a7LGeY8J05EBBlijAw8zqNwkszzOGm7URTmDGEILAlpyJI5cbFPET693F3s9qfTKQACQB8iEUUkRhKWvu+6ECXlWcZQWxxnhCiAmaECkMapiNm1FWENJ6YQQqwpWkAIAFxSjvn6+lobFym3R4Gu62Kchic9EfVDt9tfhNgLEqBKjNHS0Y2fkABUWUv/tW1qbMcLd6WQdS8AQwhAAWsDakOzQHp8Z2aO/vmt5rZ9Aa7bD7DzfW0vcdqgHWP++G9+glrtCtcKYTMlA4HUXBS7V2p4A619qfYiLwJ6S7afQyNz2L/iVPPmoHL/rjLK/MheZrWhuDobTXNo3u7djwCQ8+wB0tzsD0h/3vs7G9j6ebI/D9cAzzkTLrFw5iKjjROmGZZdWSTYhLA2E7YlO4mQbXFQRBkKEG5v7z744IPXXr8YT9PTJ8/eeON7//7f/5svvvjs7e75j//uT/7O3/5bH9x+KEy3+U4yzOP8zVev0jT3dIgUGUADPAQyEIqAEDKCAJTWiISMwAiiv4JBqOCPz1KgGr3g0czg3wgudo8PAd1Cb7tlRmjb/bG98+N4pcLjFTVy5AaR7pvMvXNzlRIeYB3NU4Yz+Z6+Jts7/fK3FOoNLvY9rvuC4kaZ9CxCqqLu7/dLM5r1b4+1/6cHqfFrP5Nyj7ShYnqppGXsqFmXH81gYnMDh2CNKadZ11Yx8LvjF8ubOjr6Cssh9C9Fp096xqj8gRZvIbvBW/5cP+P2S//Gb8Wxhy/P8M8yruZiZlWIlMzVtv0dacRfW4y18XUPGmgQ0dmQ0e8yfvN9QxewRuwtxfnJoBM+YB2walh0c3Pz8ccf/72/9/feeOONTz75JIQwDMMqzA9JFUIAsECeZtrbSeoVwsYqBSDrKtzN/USkQVj6f8+ePZvn+ZNPPpmmab/fO2sAxBjzua1p9uJhzobrsKmzs71vd/wjIoJrvuTJChzS2skLsOTBNnzAM1WbJ5qReu00hvWO+wOCkGKMp9MppUlBp2efrx4MTu7ylEW1/lnXdSnP9qVhlPEHdbbsdru+7yEzAKjiUc5HAfcISC3RqbPtuu7i4qLrunk6HeslNWbH26aDK/TlmSFUOzszW8URnY82MVcXmbnXPNxUI+IadzmOrFpB5llEtGU8ICNiAATA0+nEzKq8SY29nOdZ23giQnM6T9OkInHjCdAA0WmaTqeThiMq8GOhgmAlTy0EN9S8TajHgVbRlBrEaxATkWEYzDCnk5mmqaT5uHI1/iiR9VWmCkvdUazmJI/SdsCdlcZN0jZxy3ZKA5t1jToZwwoTqk0wU6bUhSg13dfGyTWb11iKf0XGKlpUf5WNr13+LJnwrEKYK9XO86gYojF6XYjqiUVEgRKe2kDGGVtN1oLGS2/3+3PZ/2TlABaKg+3jC8BXIROwTo0zwvZcA+tl0PTTss8eLfxndBdsWO2Wb26PLr2oZgdpD0q9V1GqGcRYSZmA0wz1Msr0nBQcp/MkCmsG6sFSp7GwcgMmOnlX1mcGogAwFKkaDULKMsBUlwcBpZhqPM6Dy2/i9vLfe1K2eS4HwzpRs4GD/5NrVJ6ftmGOOugtIKfZMjl3/tWfan0XJIE4p/Hzz59/8vFnrz27GuLwzdevfv6Xv+y6+M7vvfna00e7t/urP7345Qcf5B6Hfn/x6NEvvvgVJwxAzJKSVhxmyTlUiHkXBLqquX5dXANv/Fki7iy0yRt7arbbXhFqlmCzO83abW7NN3Y1Mr3hsKc4f4PZF+AewvTIcB/arIb16rLIuaJ96/vrxTUqj9x19o0GZ4BWebb1+v1qSNgPYsBpiNrUGFlHkzb8Z7ubWwvO9in/oZGZDHTeEOa/Z1dvxk/AwWRZsoiklCyOxb+F19V67Ck/oL2xIUO72eQJv6GeRcBaZ8ib/ORmIfbv8qcslOUnBr/mdXazpEoDftXbO8/+2XVd3/dqfQ/3F2F6YD5n/5TNGxfi/RtpvstbGrpuZuLxDTZrt3uaHcGq+c95zjn/8pe//KM/+qN333333/7bf6uSt+fwJUapSt5+PuAQDM5xCSst2EyvQT8/mulUSNj3/fe///27u7sPPvhANw5ztnM/hMDiPYTtudmsffuuB/60Zz1R10EWdWh98+K89W/EdfqA5yFGtzaOcYOtgUlH0+WHEDKvsnhkI6gwMxKp2KohDCKkJODjzA1u6BROOyh18DlN2ZW+gBp2pNhi2leMMeVpnrMphDY3G8qgag9eXFwI4zzPp9PpdDpZwUKbocVMGm7UjhoiTqa1M8iGvbi42O12dmj6U6Pg/5xsYilNzEwEHGLO2ZoEKlsVWqBRzCUYpQoPFsoewpIdx8xzOjKX3EhVL7EqANM0HY9HBVTdu4ItVk/IZuv7znv2bqoIrSNp1ftqgXu2ZHbxqLBmGg1dF7zKixEw1GboMUaBbDgAqzQ8yK6krT7bIL9JmFD7CXuqVyUUqESEElFKue97a+2ANSja0Klq4wsbNCSfpWqMFZ30+67rSIo9gqovJ4QQsC4kEABXD6FF9okmYHRdt9/vD4fD0A+ZkyKth6dxCc9YxHXxsAnrv56vGnUgYo28qKVoJPva2ogIsFigiKhVCMll+p1lviKLOXw7M7vs2eAKVcHm8kQLa8VmO77/BizJMpkEUxDaBau0vgsRbY95Xg8Bx8Eb/LA7V7z4PA0ssqY3sZylFqihnltQGIp7Noob2784dRdxtV5c5236kZuVOoS7VwaJMTIs++gQbsEW9gHfdap2wm2vRidvIOBhAgACLFIieAFgv7sAyD//+V9dXg3ffH39erj6u3/nv3333e8/o9e/+PrzT+4+e/3pa+F9QAg85bvT8XQ3RYjMMI4nnpn7ACiZ5yCd7ayH0tazZ1Yxs2h6O5lXSDxsyeWy+os3xlSodju/Qby20i1IViGzVQg9PdrjvO533yCJrB2MD6BBc3m29R3uXJ0f+q9XUP2YZ5cMa+OW/9BA2PZiI3KJvbeZiY12lo814/g3+S9p43OzNxZC9uGgG3ObnWoNOXudEJ3TRtzRbgjDNfXfAyHX3sGGlrLJhfZI0mih6FK+m63cEo5UcdBG3s5fu1d7EBWo+s/uhjOQX6NBczV7at+w85T6VZzFnzJhQETUyoen00mzrM8eZw9c981TN+8sf/gbju9X/fDlF7v96Sw3tklKSjHGjz76aJ7nH/zgB7vdDhEtYB6KebQlNI+0zVvs+2YyHu23k7ebmRlJqOpXl5eXP/jBD66vr58/f973PcZApchIra9OZ/bFiGuLRQ3E/E/bueG5s9V4ptHOdqXNfJpd8AA8extWh1hY5w3anUV45WDKiWK1n63G5giDHnZ1tnNKmYh8UYo1dbeeIpPv9YOqeVJPHNXUzJ3FzOM4qnzsGYuIABAAqfypjrW+7w+Hw+XlZd/3t7e3Spu4zhLUeFTTjtBppHZ2k6vIqsL6oV61OKTsdjuuiYvikhjTVHhmhTADUAnHd8xchMcsAFmzy1TKta008IqIyGIBYebMxTejaphB9XQ6qXtQC6KEmifJkuzw8hvEzLZAP5rundroTYMyDcf0UnHSnUUsGyP1eN7Qgp8GIobQhcDaC7RBEv1AsGR+yeaIMcjbZ8vxcQRVVLKcFk+muuNEZJqmvu+5phCLcwU1LzIkKd/X8fWKMWr5A7M+FFkWsJySgYi0dVpBfiLqumGIXdd1Qx8vLy8vLi6GEHLOp9OdLFY2gTWrcZPRkPWFUXhuSZtUO78LiMiSG5WyyN4uI2/pQ+hFARvUD1d2jpd0HZsEuVrGzcu8odr2rDkP/NRlcxJs+Z0Hhw5vgDMdoymraiNkzmHTrwY2Z4B5gXktcoGTcpozzF/6jXFYE8iaDYZ6WG6+VMELQDM7UfOaAHERrdhJluhqG3oI63v9/npQgMN7u4jO9tcCZs61kphnN35AIx6vNG5RCCp6mL1KFm64Ovl4iZcDEeFa8UlYjvM07PbH0/zhB5998skXb7311uOL+PKbmxevvv7VF+/DFf2jf/gPXqbrP/6TP9kN/fjqlE8csJMMKZUYLQJJPAtEA5pHA1MIae0aVfr3LgKv99rMpeZ0eTry18IBHRyYOXSroh0GGXQKwBbT/P3bGzxhbne8OTy+qy64vBrOLvDhy+43SNrZ3MzCcwaAll3Yn1vPz/bxZpIeGz1aeogZ3EL4Fk9FwxO82dK/0fDK/qVzvnfPW5qbZSM9ewCaW8DIUP9U+4UeSxaDZCfrdlF2LnisQ9ehxKjGbtjyt4Yi4P5rud897p+971D41qt5kJ1HGl2rNP+67Z5ytUbTugHPfQv51mn478/+cN/330poWyQHx9agLnA7fLOPftprGgSPGFp34JNPPvnqq6/efvvti4uLV69eCWI1Pei7QOB8sdDveG2Pm7OqFABQgBBiAGROAPDs2WuvvfH6z//iZ6fTKfTF9qcVZTMICxOcD+LwaNxMYMs/DUSekW7vbEYzgPtV5HuK2RDRdgT73GwQAKiKhU6qkSp2e3Hfn8vi5N1lPszapaDve20iL1KS9+x11YyFIsBcSoOaAMa1OYTebMYCROy6LkT02uA0TafTNI6jrQtL2siSDG/r6vv+yZMn+/1eBDVHC5ytP8Y+BIl98YzZ6sT5T2zhpG3iuu5wOAzDcDgc1DHI1RcqNbzTdMJSwWUuHjOsSri+CLV6MyxdYUtgFKa70/E0jV3XZWHMNOcE6hjIkFPinARBi1uKiNZ9tAlY5dXj8aixpsbQClQJdb2qZqsipCqQ8T2ufleu1m2ds+qW+qDUaoKqw6sjkTfehUbs2Z4CiKEx4xKFrutOp5PH/IXcNCGXARBKWRJBQko5SS7aYM4ZGHLO02ka747AHLvObA26xoBk9X6IqOSpitUNKVYAk//FdVTytL9I+46EvTXZWzdMIcw5YyBE1CI5AIBYcOxit++6buj63bDv4hBIEHGe55QmKHojIKqVtq1jZz1LYF05RtZFwsk5jRZRgYBkqdFat7I1O66kTzjH+PwNHlLormbebu/PxP/Y4dq8VH+NXbQxt8xxO1UTbhCX0nCyOb3sEXGV5biW2bT5GI/2nNfu33Jh3GQGavmWRhD0fzYAtAn4YWETlOgf91OyobxyZfc3zrpmg2zwBWlQzdWtQVepRSt9mQJjJqVmNHAi1PZc9N8rS0Knj4HDECPmBnTlewh3t6eLy6vbm/E//of/0sUBMB0Ou7/zkx8OL7tR5tefPP37v/93/viP/3XX0YuvX6XbueMLKiEqCEEyZGHQmjEettv98ltjFjUjS7O62dLs1KFaLQnWdNTstf7Ezk6DzsgizmzRjNBA3qOun4bfNd64CpvJfMdLzsnoeL8+6XFp+5PNWYqGfO/j941sJ7EnLlgXYQJHtgsirUnDM1P/lu3J5wdszBnNYj3A0zx5fmIMpDHM+csjiT3l3+V/8oUTPb2ETQH67We/ZFp7CO17Zj77LJzjco3EabflnPWE9ZP3aLzFLgPOA5jgL292bCbZbMp22Ga2EEgDtGDNln+ta8sJl/HX99jnX0sh3ELmYXLebrpH6e2duD4T7deu66Zp+uKLL375y1/+xm/+6NGjR1988cWw30MBY8UWtNEA1hD2u+/fUr95aG5n5llAV+b8ve99r+u6Dz74YExzCCGlzCKSmBGIKM2MzqD8ADDdlM6T9tmnHmaqhudehNje7wHSjC8i5NoO+Q1SCZhqfIpFIW5H88dQtRmtwuaZWSPccs7jOJo+6YbSQ6qcdNrI3g9ORFinZxEEMdIwdCGSaixENE3T8ThqWVFHaKSl0PRdXJPfuq67vLw8HA4ApJGiXKPimQsnDCGErrdjVOoBQUTjOGJVnNTTqKrgfr/XwamWl9cJ397eapyhaRQmXKWURKMKsTOg1fkXAAAICOeUjuONNlfUMMWU0niakcTbjnPO1dICFASQBSDznPI0p7mWdZmZGZApABEgCSAD4jDsUNvl9b1lD1oRS4W/qutek+m6Lues9T9V/ZumabfbqY/UIG8CtqGZ4UDVkUr23cLWyg2+5fjqgEcVOWuX1y1OmmHds8pqOzipW3gYBgaYnXgcgtZyQhEZht1utzMHlWrvelCqxivOWGBboNPQDjEAEFwzRoWq1hJr1qJVskWQsyCaOwmHYei6vu97TZSNFEyKZk63t7fjOAKycFYTQEqTcZX6IZC054Uh9pY/i2t2ypLpTCnsEnPnGUJsTmtwXePQiSn2gEdce7E/1LFe4BQD2PCsZuOXcVjrXqKIgJCWLAIAEIJa3UtEAIpqB4vmmdk5zRr+bm/RxsRWP9cuk+ZNiAGnnPg5g+PafrGyVif8S3mdCASOUaY8rocCREDEzHPW8q9CiFgKvQJacWO/a/ZeWWtWtqFnl9AASucD61Au+4CIWlaUnGPar8s2lGp9Xh+vgu7Y0/mbkmMirIekmBVEAF13pnKbhMurixev0jxzPs3/8n/7t19/9fyf/O/++7/1B7/3+pu7H+HpP/zFf/j840/feu313/r+j7749NVHv/gwn3gvEYUCdZEwyyiQiUDyympgsPVl+j18zJzmD1fcZInw2q3XYKP/IC5eQkrB3oXvGAvzm2UjNFvZgG4FMYPtxrLuKVExzb6Uc1qfPbX9CREfFpVl87MJ7v4CaGvkPDDaWSDbfDxPgLVdjddBDW4JLQ/ZvsJzLXBHCLlwcT+mXUZB/qezdlapiq59r5cp9jqUuNPLlmaL1UfyUhqX7F1UXYVbpiFrNm4wbL5vZuWnYZZIA7VU2bdBZnGXgmS7ifdiwP1XQyn2wdhX4x19eBA1foHrePTXmNJ2ZABYNIz19ddYNZ5TY2yztngOayr2EPOfz87EM2pEPJ1Of/EXf/F7v/+7b7755i9+8QtZZANToswV9hAd+auhwe0kt/fr1iBLAgaAruveeecdEfnwk48Nx2KM4zhDoECdIHvjY7P8Zob3XVtW0Py6ZbYKDX8D1hPE11VuJnZ2fD7n1KVaIyTUrgzqJDHvnLigR3SBPFKPNkQkbcATMHMKIRwOh7u7O3aFPepkyJR2h05UZ1cwJOdaN5IZgNWTEmNUD6H6asZxvru7S9NkM8kaZlnNIyKiek7f91dXV0+ePGGGm5tXGmJa305UexL4MNHG4iYiquXqtdvt1EOoN4/jaCDyka7qoPP8UJ1pbr8ZibQPDQYiAdWFcuachRlU1zJVs3T4IDrhyW+6O/6WvnN2v9X2tMxzXazucghBFUK/XvOPydps6j9Tbcxj3T7UHwsuoMmUPX+Meny0oaQ+sEFmwlrlMmO2CajAYjqMZ9FYa08YNalt4vb21qtzuFZJQihJPZqeqt/HGM2TnFJSoOk3Fgpr8CciJVUi6ruOaixYCcamxddi5x1KtS+rGhGLL1rr0+73F6oQhqoVI0rO+fb2Wk0tGhQ9z0vp+Ib/kKtnuT2LwfV8KjAJmHNWJFozbbFpYyVeZo7Lzq3lwi2nLlCmYESOLr7OuJLByK/EszwvSduvNofkGoluwQEb3u2J078LzrF1EQm1xY2sz0JehxLZl02I4FZy8vCBdb8gvTwJeSCXncsrFXELc78oRExz9iqZVAFRZ5jXVRy8WgL3nC7NK/zGeMAOw2Aho/pr41qxESxojVy0ulezYX3w2P2ejyyIZAYFKS/Qn168eHl19eg43gzD4bNPv769Pf2D//Yfffzh13/5p3/2j/73//i3f/t3f/jOuy/Hm7efvvXZXz1//dFrd598SRhACEBy5ilPEjN1Ihsrr77OQuya1UGVKc3gqgvxxccaM0ozgv2E7mA2SV25np1kfjcbqrFBwFEQOL6wZdy4OrNXW+wVyO94NUNtSbW5+YFfjfTqFyuSaUZu/pS14m33G5V5uyY7r6nH/O1yeO1K8rC1pxBxmuct0jZ7sV3plkF5KNlCuNYvsT/9ev00xHmkV+ylnnN6+PnRVGq0ZXr7KKw9bN780QhARgIG56a4n3/Wn1UptdGqZdh78OQ+eN6HdXZ/c8M62+ShQfyGqiDoE2wexuf7Rtt+fuj+X+8N56+GjZylWY/n/vvtB7vTIKCeBBH58MMPAeDNN9/c7/enaTI82SDq+U18mBbOrmv7WUSIhaF4APb73fe+970Qws3NDVaqiCHcnkbM0vWtUWM7n+ZQaObWfLOdqvFVz58fXgs4IPsxQy1C1txvZ43fGtpWw695gKGWWDTCtKPHxvFDdV13d3cXI3b94mVy+TiFZ+MS6ZCkHmoiomZ6AGDJWu9K25E3E0YMzFmVJS63MbOxkYXi9vs9Ee33+6dPnx4Oly9evHj58uU0TVB2quhFZrjBqvN7UCPi4XDY7/dXV1cXFxfqnwQn8lmioyoeVsOT62VbmfKsjBQpsLHWWj0eKEguIZfzPDODCOYs8zxbqqQmQ56OU4hLZxGbbWIWRABIzPrfnPNcezCGEFQWKea9WPqKq0Ko+xVqDy3bJh9No6q4WTP1yjkfDgcR0Ul6tCSr5siMuLgNNQRU9Xwv/zvUWlmoNY0w82zwFCFEMkeCnEuP90Gzp9NJ6dp+AoDgjOwx9pYiYfWEMIZ5PIFmDKXUQRdjLOEDWn8FCQmRAhGFGKFCaTcMCjesFQE1DMGkwSJsJCnrJYwxUlfKkB4OhxDCMAzDsO/7aI79jmCeS8/6emLmNM+7Xe+5iggyZ5GStiAu1chzA5serhK5a1lUlRMARDK5eIEqsWQRjppqaWYP23gVLHztu3oWglGOIa6NboKIcbGG0XhmZ8jhD60lCLte+pQJzTZU/b18ox05TByxidmAxR4GoWGvSjC5Fl0wUcYfBuKETgW0slfDfqgWd+Yl8FfWJyKsS+yIq7wE7gDwbFrZkyH0PM8gq3axNrIpV9llRSr2WOYAuQhGcBrdSmEr626Nx3aZLUT5kXGTJl/ZIGyIaFkH6NotkLOveI4MNWMHvPVFraeIRDTP4XQaQQInjnG4u53+P//vP3722lsB6YvPv3r3vXePr+7+9E//dLo+Pj08fk63IaEIB+mYgTFhCBRY+/koxKgGf+pMNKTEk5mdtXba+d33IDI2gS63rUF4W6y/mYim2rfNsH2h5IonJugzM54rvInViGsHgGkpmk6w6LrOZ1VRbsk1l7WQhIgiK5TQX/zeab8pWYs+tClVZXMOtWa0wQ2q+baZgL6L1jF7No4pS7BmNbCO9jSMkhp2r8g/DIOeMQhgipPXqI3/GIc1DdM22rbG2AU6y4tOwNbFzjZv9imPcvpB7cfgCNw+e+XWDlF/SHjoGRYZyhmh+Ts9YD279pqkx0aPoqWItrMPmiXbDNg6AhFp7XiPSwXBuCC5QZju0Z8Nz9FJHjagN+F5Zm6JIuT8tFCpyZ8yRSKc8uPLQa3Rb775pso95Vc3E3ACkJ+kh2GDtwshuGn7swDOXX6NVh4Q1+dpg3JbYoF1GIsHb87ZZI7tCA27A+3ckHOM8euvv76+vv7d3/3df/bP/tluv68xOGcm7wcBR60ejNv1Nivy27f6HorZaxzHrnvte9/73suXL99//307g6bMfd+zoAoM9n50eWW8KSS7BUUDBxN7PPCbGTrcWEwwJrUzs2h/WfeI52+eRTg8WQoIh1JTMR0Oh4uLCyVJEx+xinOn00mdcnydEEtCoPrH5inP87zbHXQ+mjcYY7y+vp6mScu3vHz5MqWsOoae9aXSIyPnjIRmc885AxQ7++3d6XA4fPPNN/M8P3v2bBgGLeBJgQG09+moqYM1fHFJNVSoDsOggX9Pnjy5unrMzF999ZUd0yKiCoBOW/lb3/fafo2qiog13NS8glRDQxX5VffToEqNnzTl2R+mvm/2MAxFikMB9cdiVA1QuVzmPM9lRTc3N69evTocDohhmiZyxv2cgHlGp10QUaoKiR36UK3VoTZMV+5aMgZjHIZhGAarVgi1YpDGvZuTUDeoQWzLwDK9yw5WrgU5w1IEdclIqjr84pDQx/uaHSoiAGGaJkSMsVcRuuddjNEC8gFAq1IpwuvZp+Alot1ud3d3py6+u7s7NR/M87zb7TQK9/LyMsaYZs45qz9cH0cBTlkQjscjyhhqY0ZEVM0/ur7zShGK3uadMuQxcZ2IuricaAbDYLU8EBAlxKh9LBXZ9vuhDxERQQrHON69muf50aNHOecYaZoEAA6Hkr9a8lSZEQMRaYt5YwjgDh3PNi3+EeqZQk4jQ856dtnkNd1aUSjqw+yMT42E4SUtgFXrWM+bjK17ztUcNp7B5VoPqjnP2JlzbFhjedtxbA6e+Ros2AVIFKpbF3XAdTkWv7RyjxPB9VLUp3Vv34au/PQMv42e/UGIwLDi72cUZg/evla5taX50xTOFfTzf1I18NhyyJXkyjlzSrw6kcuzOWfVBUzibBADHLac/RWrwu8B5RGgwb26ECwh+F6VAtjtdoCcUgIhAro7Hd//1Sf/8o//5B//43eub45/9S/+9Qefvv/j3/nJ/+n/8E//7/+3/+ef/atfdBAJ9MwGwggkEAKIZGFAoBgAIAvnXANLaFXYRAAEBAHI+Q2arWmh5lbt//S770HhNWQzOijE+r63Oxsq8LvvPwfX1sLzDvtcCNwN28xZod3Q431Lw/tF2Iev+57yc26+b2gfNtTnP28ntpD2hodQTTBomIkhs+cteoMZETz/JFfUyr4p8MzFLOIX4mkZnV5hBjW/I3guyMJmRWuTmc3ZBFYA2K7CA8c+nzUK+Dc2uyPuMniy82CYFawJZECnYf66OOQ3y160MIqNrtVAzA/S8PkCagQNKFIzv+euD+P7FutgjST+z+2dDy/54aWdHcETgjjtq/neEBUcJBseYj8Zvs3zfH19fX19/dZbb7355ptfP39+3zwbVuOxBe6nWX81xN68hSydG+Xp06ePHj36y7/8y9vb277vBQt6iZ5QICCEkJvtODusw7FfO5LivlXY6751x2GDTtsvPRWgyyG0yxiL6b1+l8GJZFVTjaIqdK1mKSIgyiRn86oVw41UD4mUxCQTZwGWuqDH4605bUJAkdx1pVYNO29GStx1ndUIiRSq8hMfP3786NFj1Q1Op9Pd3Z1OO2c5CyLzi6pV3URzcrW1jC/pYqUatVUtVK3Jinl66AV3lZTFEDAEAqqniZQgzzSfpnEep7u7u3EctVFnSoRYotXU7h9CQGIz9JC6/qoN2kvpWvPGXmqfh66zb4zcuHo7PUWDMyoZAwy1M4QehaYtG4jQVSMDV1G/ehpWsVFYDXDesaSGGFO0NLfNVG6LcPHSS645pVAJXG8bhuE0lkh+XX7OGQAPww6JNPCyWDrmOecMLJqHZeuFejCllLquq/uSmFlRBSpYQqUjXZ3JRTaO/hupGpIIQwhx6LWZyjB0Wu+2ix0iwmJOhBDCsOv6vp+mU93KJWwTi18nIiLyGUtfw1HtJ6ngsxkqMZKseIVABllO55hd/hI45QedMLE9xjwLs/0z4Hpw2+Rsgz262ON2DjVrk81lt9njco8EIC4F04tQfv/IOWdgfSR4oJvLsRnHUKQR6URaldgM3uAOGxHpgiuuA2idZKriCoBASCAoLMp2PViaeYo7zLB40pby0PYlujQA04eVEiCz0Crfz7YPaKUFbUHqDWmNQuhh7iFg8refPzk/jEuad6/Wt0NWMo5xCDh/8dnX//pf/snh8nn/53/xzcvPL55cvPHG9wba/85v/PTdN//dpz//BphFgFkEWUQwQ0aQnBvWCRuLi78iBXQhnYYSWwUYHK1udw3XurrdT2Ep9OzhpiYrI6Ll13sEJ3RHu521WG1gC4Q3xWzuW/h2/LNfmoW7gcN94zRAsJn4OZ99UUPyBpPtu5rvtztigG02xe8vOrHY80NvS/IjN4Bq3t7MVqqjwE/eGIsxZE/FZ1fXDG7joFN1PJ6zC3VuLnbmeW/j2/IcWCOwoa4XQ/2r7deGW56dxrdeHix+MmeHlbVOIu7gaMaxuXVdUEO1CouWZQSOhM++aMv6PJUtQEPEc3zyPpJpFtvseMNgt882dzZM3sZk12GYXDxh82pEZM5qIb25ufnkk0/+8A//8J133vn8yy/VE25vlvVT21k10HM3f8sqGsoqeEsgAk+fPt3v93/1V39Vjm/BauskQUAJUHcAnbTTgLGZ232Iet9+3fN9eRdt/Mk2+gMUsaUpj7eIqFl2NrhZkM39C44YzUg9TVOgLoSgyg9iGMcRgOd5tCjK/e4wDMM4zjXeB3NWGbtk1mAJGowhpKqHlDNL2+Xt9/tHjx49fnylnfEQxWIyi4yUre5UEfS7EIdhr5U/tejLPM/agq/oMxAQszG6mjwZuq7HELCGUKqSrJ+1GIleZqzPOZ9OJwOLqWEmuXnOpn4kCsWhRESEJXwjV0SyiNN5nk+nu7u70+3pmEFC34W+AyJABGEINE8ZIWjNhMTFOxpqswQvPOtJYU3nvU6oszJWFmoakSk5DZ8xHLCzzI6zUCuscvUD6wjKCVX9I1o51RVsDcITBFlipESTQnUC81ywaNl6gCDVBQcEAAFCkIAZI0VAyJRFJEIMEgDh0B9eyY3kUhdHxySMakQYeQ4h7HY7LXylvm6o/K3xA6nNXedjG6rnI24UQn0ROG5mkkDfFds9xdB1XbfrVCFc2p9EtXsWBEt5IqK+74ehe/EiAUi5YZXNW3otiiyFA1Z8ZMMEYDkU0Hh4zrOIloashxEywlI2QkRW0sB9PNFfDCsflwHUH/D6oFG1OCmhAZ+H6WLb2/C+Lb/enmS0jr1uuLw96C2gzYs8HBe0PlemxUPAz9NPuHmLLdZ+3cqgthA/iJ+bIqInZiPj7LqpGEg9/2pmYhZfcdKh1APJz3/RY9flxfz2Na/GtWfJVtHAXKpCGFxfL3RxpCKC1MG6kJrSyVRD6XSBfT8cj8dPP/1s2P/9d3/4zqv/fH3YXR5fna6fv/zBW2+Pt0dMDCjKdbIwZk0nBFgoZ0EDtQ81rK1BjAZbthYjv3GwJl2pjcg9WPwcjFVJFc6U3xlk8rqM2xbUW2Jppt3shd5GRAQrVf+7XHhOfvUY+MDl8Wc7Q4+HIgKwEIJHaVvsdnWersFFlnpt2d9vnN800q1BXdwx0CzPxvc3r8avXS38wqlGtp+dqpzj9ff9mV0iPm28PTafhsM0AzYwlBqjYTM32Db7COt99zP3vxKRClrNG0XEp5p8l+vs+HCOrLZg9Heyi8gVdxgPMZ7mpIHWKSU1zKs0ABsC3256w8D9nWdB/V3We3YVHuAeXeG7UaJn7PBga6VmcK7SWM75V7/61T/4B//gzTffbCgaajU4OGdre2BiDX6iOwft2WZ1LDmGSELA8vTpU2b+1a9+NQwDEIL2EgNBLGbGDBJk9RYPwGYy9dUPQPHMdc/qWjW+An8x8N23a3jPDGzOqkXo+cI1tlzZgiYSS3WJGJ7bcWOyhJ4v8zx3nQYpiIioa0sVq+NxRCyM0QrGxBgplE0JwQ5QZuZ5mnLOwzA8efLk2bOnjx490iDGu+PN8XjUSMKUUppZPe+aWafhgpE6rSJzdXUFANM03d7e3tzcqH6bk6SUKBahPcY4DEMIUb1DXAGiQ1ls6jiOXH2YuhGm9oDjAGYr3O/3xlRVsldQYMC+77tQ7ERcQjEl5xwAx/F0e3un6u7pdLq9vdaGE6qd2ukWI5mKIiIhdIiMqNuh3qHSKosoWsJgoxDqUBblCI7ZSk10MiuPrdqjGTuvBtWus3o2Kai97ZiZHatbHUBeOBm6XU4CUIISdRWIJUhYbQQikrmkMqEsfMbQUndB16u811LDRESkplMSdV1HWGrJRCSLBfWXnZLeb1m9ixBC0J1lZo2wLTx8LduHEEhaegwhIBZ3dOgUS3odzXfBhBoQq5sSQtjt+sPhkHMiIgA8G6wLALjmfp72z8psiEgl+o+hFrLS1EfWfq01CcuOj9g83w53LlLf/vR44LHQttDmWjXdRTMJLrnfsCe41qheYwRXPAbW50Q1x6waHtadO6OKNKto2G4zJRGJzn1kKAU1x9K+t18bOdU+07o6kH3QBs12JyIyS/13aUJY4dmG3Hgsl7VMZr+aUaF51iCca5JhjJHnBBvBzv+Z19X5thBu1o5rAcLszd4lYvMHF1Oqmw7akIcMCbM6CPsuhC4K9yKSZuYMIDSe8r/44z/5p+++91u/+Tvv/eid7//gjcD06SdfTKcTaVObAMIIEDAAAAiJ0RLV0kHGffymG2KYFuFZqgd486HBNA98Dx8DhW2Px0l05gMz98KDl+KnGU39JMU7ZusczGxMUFZqr374RX6BDbY0X569PBY1GNUA1nAVNszHv6KhDvvTQ4w3Iej2auNRHsPDuUbkDSHbILlm/DckUG9AowhPs545bLmQXWY187u53QU/N1uyTcMD1lu7zkKy+bLZuy1WizMwwRoTTMgAAD0Wcq1zaG//NeVtaJbz8G1+Ps3WQ1XLoe6gLVCq+UCNcX3fa0W4LVREBM92cd1oiQtsAeDXUQjPrgXuD7He8hm/6obEZJ08jOt2bf7ttvUUQq6Jl1988UVK6a233lLrlbt5pRD6JcAGgZsXgYOnx7cG94wAsQp8jy6vvv/978/z+OWXX4YQxAZHEASEwASaCfDw9cD0/oaXPxOhSDjtTx5Vtnh+djIxRjVbgDPl2LFlPplcE33FdSZUWTZn8ZpAnSFrKZRAseu6u7uT+isQApGSjDBLyrOfmzjNCgCurq5ef/31q6srqi53bXCnk0kzp5Q0zlUzr2KMfbfbDYPWgDGdpHqoSBiJWO/X9LYY4263D7XkJrjYe2uowC70UedmLSVMBvBc12BLtb+fapjDMFAswYrAUj2OMs+z1mIZ747X19eqfJ5Op7u72zEtfbxNFQcgrczErkCDvdEOBahcVCdArpiqkVWaR6M+z8RwLZbLupcVu3QVvUd5nZGVvp1qxrhlDNohlc71/QaAGqEWKRiRoohiI6hCqCyCAoYQ8pj9Sg2BAcASYhFxGAYFqYiUQxu0jtQeoWRLEsU+RI1VplofHjZGW/337u5O/c+IqKqg4s84TUo+vBZFEFE2LBerbrnb7UJf1PkYqetCicTB0mjXSsZGohDwcDg8ffqUiJAEETPn7AoNmhof4Hw7t4YzGAABgAgU2lBFa1RtQpI2atRnDbCRnWcDNkzKyw1l76tm4XfdYNQAyAMd1wohO/v3ehyrQ4PVLuJ9UPolVdVoWa2HUV3eSquUannSafg5w5rz2uQ3h9NKWpX15de7HYQ2nkl02uOW0WfXUN5MOx6t/QnRqJrbMWWtoqMLejQ2pF/maZYaUuyXgE4saFikbCRUL33as7bvfhwPH9tHW7hyfACAzEyr1YUQApIEUlYugl03IKZXr6af/cWv/tbf+cnQH778/BseP727mX/0w/f+8k8/ZGQEToyCkhiQIDN0m23agsszyhDO9/DwEAB3HHqweA7rNwidhmBvl6r+KX+3+hxet8e1UuE/c62L0KAEO8xELBV6PMAJFp0fHrzqyO3kG2R+eKiGL1fItMU56g0rN34z+JYpNV/6p+ylDY+y20xmorVPw+8arKnYzy2va1MtfC8vdVw8pZgCb4zX9rHBombHmyV7TyOsJRvP0nV8XreN8dSt2E7rcIBm4xo4N7wR1gRucNAblrJvDcP8azV02BKUh8m3fgYn6+ifPpUFKoNVg/F+v7++voa1iuXpDr6Ndlp6uWc533r59XqWu33X9hHPr7YfmnG2w9q7NPpunudhGF68eDGO47vvvqtlHtyAi16nIeVbdGqwaDthWAO8mbMhQKAS6vb06dMf/OCdly9f3ty8mue5G/qMotkyCKBt3hrYG9Gdndh9oNjO87tc6ET81fLvee998/GX1HjRw+Hgj1FwRwPXOnZY/T8qsquPrjK9ohSp2ubVRRVja5s+SSkFWjBqnmdEQFpon13hGRG5vLx8/Pix9bg/jcV7BrUGia2rxGYzDsPw2muvPXr0SKd6fX2tS9BBQKwHANbMwM7CZYkIyCQE8Y4X+4DVgmDfe/5psiLVulmagqgKoRWtAYAsmTmnxKpzvnr1ipmn4+nm5kZ7JKqWwrAY6LNLpUYn5oVa00FEBEEQGET/A8LQxW7ou77X6SERUPGryVL6sTBhb7m27dDPterP4g+wl+pTnlcYrvKqiFr5RqrJzMBuAMxziSDouq7rQy1Yla2xBy85EaviebJup6S7o/uea3JjbVhS1hVy6A4dYalSczj0espofiYAzPPc9R3ZiYyItmqAOSWpaaICkJnnivziEuAXLuFCVWHR1bEfYj/E2HcKphhj7ELfl5Qf1cdERCALZG3N0nXd48ePYoxadjXnXLNHtZlTIV5a5+Q3DGQr2xhbMIUfEUEYEYUXJPdPrboYw4bxbc91dpZydM4HL6n4uXrG5NH0LDtrfvJv394P7mTVnd0exg2Lb35qBpdzl2UPGtCx4pOsBfE6/nk51Us/nsA0RMoDql4sUhRgEWFWu1GnSYZbgHhs8L/a3JQvW+S0v7OxQjWjNUeyPWj3GPvwDGUL4e3g9iuuyzM2rwMAAAX3MqXy0lDs9DUCkz7+6LP/7Y//9f7Q8fTm48f71x4/ujx0KMySSRIDMyMTAANmYFyJkl4NM75pYnRhCiSVGxbdzCsMW6TyS0Z3wZqUFmA6vbR5xKOlLj/GeJ/o2BAmOsXGA7kh/+Z154dut2b1ZWPkfngQ/96z4zfoYYpiQ7aw4UtGqg3J08a/6gdEd+Bt2cJZsPgdsXvIVVXdsiBZM8lmtIY3NnM2AjwLLv8uD0A7ReyGBzBBP2seETvLcbMK2FxG5ranZi2yaUDlnCFEu8e/N32b6/u+y7Os7Qz9qrfQk5rDiU6Kqn8WsRgR1YdweXn5/PlzOxcMJRp8a8BF64wGe9A2zPbiYcJpbrMB71sarEnbP34WDkZNfje94cCPafPR1X3zzTdff/31m2++eTgcrBxrnZXxmTMLaUDnecLZFfmF2we/TCJ68uTJfr//xS9+MY4jFGOiqAcXXWQEbsSVsxzJUdMD0/nrXB7OIvJr6pUtmiGieghhvUf+gDaxMtTeDCKSUlJRGwD6Pmjco7Wlqd/3IjLPs5boPB7HaZpqXQPUCpNECIzas1tcSWrOPI7jPA8pTXd3OE3TnEa1sJQEwlyMmGYq6rru8uLy0aNHjx8/3u/3GmF4d3dnK+3iQLXjYhbu+15dCFxLaIYQBLG2fFhS/RFRFRKsFjRTiszTpQqwKjIaRqhhovv93vLBYozjnKDmCp5Op+k0atmYV69eFRfo8WTG3K7rIgVreAjuwFVXZUOViAhCpQW3EEIIFPpuN/Q7g5IwCJSoPxHonKppaqdqudl1UFQWp9tqxO7vhxXOr2QDB8aFYdZjdGUe5RoHa8tR/yqiqAHCIzBnYFqil/15oRE66svZ7/cAoOj69OlTCPTq1Y0p8+PxFGJviUhd1w2xwxo0m1KKXeFX5CLCoNa2PZ1O6mZU24fGLeveNRZeEUEu1V91N7UmzWFfcjtDCDGG0Bf8qVvKtVbRXCRMKKR3eXnZdR1PWlY0xdoi2EMekTSQuGEdHoyyFqG957ac5gAUgITonM3dshVXXKk52teIu1TPM9uJGm9kLc3g+rC36eZa5R/csWQYUNniqjCJOBFqe2zUQVoNFta5Z/qUz8Xya/RrZ+fFsrU0/27hVt9y3pia89Jrkp3VX9kobqQu3FxUEo2yv8GDzkRbQwusToAGtvXVi/5jW4OIsJlDQSbnozPLE6wtEFLliWYyBo2w7kSidGuh4X5FngE1G0FWF4fZMn4Lm4v0ySef/U//0z//J//4v/mDP/jxv/njPz4Ml59+/AkiEBIDEaCgICGTEAZJi2veMyC/KLPYgfMwe5SGcwXZG7w1ivDyvd+sAtgaEG9gN3ox2OZ1wTFsjhBHmzYNr9bqneWGc3wE15z6/8+XiNhJAxsQbRVC/d4bbux7o4sGLAb/7WjgdsrmA2sc9nsq1cLqjbu0brPRjGCTaegCKinZzsKm6hVufNF+VrjWde1FzQcbR1yVOXD07ifWPOjZ73YaDQMn1x+l4dvGtWDN67br+tZrO5MtesAan/1nv1h2ra7KfAQ02SnUsr2Hw6Hv++PxeHYm4uJZjAAfmAkggtvus7jxwHrv++D3q1m451fNzopT0sC1UtiOY49M01zjDPPz58+/+uqrn/70pyq0ubNmUQj9oXx2uxsEw7XToHlkO4L6DVRwnOf5l7/85TRNVqW5gYOB2B834MIrtoTw69or7kHpMzp/YVZh9dQDmLB9XD+EWlXfX+wbka03XS3FGqIJNYqsuikopWmek3bM6/udBkP2vaY8LXENIRT8Ka716iQ0hvDq+tX19XXO834/9H0/TVPmeZ5nzcgVEWFkZtW7tP7Ho6snr7322tXVVQhhHMfj8ajyumqGRNTFQalyHMfQaX3ExdGn94imJlax2GQeqee7pnKp+BFqQW+suYIaF6ohmqppa5ESv0eqq5xOp9vb2+PtnboEFWIpJU4TSxaRvut3uyHlqBqCHSK4yIfo82U8zzd2quqH1uPxdiuPBt6xpreZQuhPFt1i/0bbQasfY99j1Zx3u51244DacUDHV8XJI20ZH6OlC6YcqQT3dqZRG74Zwtj0HHaVHgy6dtPbLy4uQt+lxOM4KiIdj8euFxFRlUzvFyRrHzJNkxII1sKKUhVXvV9ENMRXt9vkUqq7oH9quqAVrVXM2e/3facEK0TYdV03DKo0ppQKqaU0z3OqCYRDTwAQY7y4uBiGQT2Esi7Voy9X+FjtCanSoD+yt7w6uT7qRaCtqHTuwOUokAWEa+ClcioACTECeB1MVFaKQVMCFnzlnOdpQkQUwtp2lqg4s7MworaLVIsRhtDFiGohqKcFAJihcYYSGMaK1QBqNpMaskwiuVJsSAlEkFm04q2mMqfEIsUFVGuqVv5POpSPsUQA8THTZN4/ZMKlRhJWh3KoQfl6LYdHZohLV01YGLqinZ7+OlKh8QCSZJGWlHSVAJCoTB0gxEhELKLQMo1IcTqgRXWzsQ8RkZRDieCAWHJICVmgdFCAMjWuhSgAck2CsXE8TLzAF5CQcJozCAUVrDWmmCiG0u0UEUOIIS695jgTESEgc8pJmIUQOEMMvcBS8NZBb1EUSRAKAJAzRyTt5yQikkZh7km6F93hIt791av/Av/lb//gd/I13I3jb/327/67P/ursKPTdAcSAsYAGBCBcXIaoOeYjVxuISUYiKzjYkosjIDWolSJB1yUKWVBZ/QSFgQhQqwxz8wMImACNAYEFC42bBE1DSJU27EwcAZhRAggZMe8R2muQThY9W2plr/lAEDEKq9b9IWIpFPilAkQmkxFpWvlTeSSmKVSFkCGpSiR8XeDpzEsu8F4GbuaJYgI0Go19agQYP1zcTXoDZGWIkCijgBF+9h76QpqYraGrjGDlUUlIgCs8Fhaa5olta5V/BGlAoSIkDFlhczadWzkQ7WqMFFQe7YueW2AFANqjF39w0BLiKX5jWGsCFiBBD1o7VeqqSaGz1XKFwBMaYkaMCJFxK7r/Z8itgWKqKuQECh56oYJiKhR/YC4gNF2jWroO4ImKQkiiKAwIlq7FHQHoUIJ1gkCi5phaAUl+X/pQ2hYAQCAzE67MKMAEWlXYgVaFsZQeknNQpw5xl5EWOTuOH79zYv94fLuOK7telaTZnmdf7UnJVsCFsnZqQd1s4lIzgWb0Dqaw7aAfVCQs28299s4dmZ5ZmvoathimF9HtsxbKTuCMkMG4Sx82O3ef//9f/Tf/cMf/+i9j9//1eFwePnyer/fA0IIeDweY+yllGVHt2tlYxSxtZGvTc+kW4BaqxQ9U4JaRb3aX/o+A2SA3/rd37l48vqvPv48U18AieqWBmEBnhEgADjhRKm28DnmlXVm2a97migaiLhG0LW4t1KDQ0pJM19SYu0Dhhhi2Nn2Nfu+FdyVPAmFAqU8CQgF0vwlRFRdCxGJomEFs4QQx3FWwSFnCDTshphSgq6fRoiRQghpFqLQRRIGzH2as3AECWnGLh4Q8XRKXYfD7jBO6Xg89n0vIIlTCCFP3Pel3mNmTVPEaZpOt3c8T9cvXn7E6erqQp0wqJyVgZBiV1wiKve//vrrb7zxxrNnz0IIt7e3N7c3x+Nxnuc5zQwZA4jAnKfEesD1mt+IiEQBqkKYc6bYm5gkJYtySikdDod5nlmq3QcFEZEg5Xk37NUNqJ0P1b5wcXFBFNVhaKwyZ5nn083NjSqB41iq3ZxOJ9UBNBuz64faUoKePnp8OBxAiDDuhoiIAqXAieKRqiWLuMVIEECAkxCE3dAPfQ8iAMzZwto1Ry7kzAitJY5rMLl9WTphTFNyvZGkloGVdfioxzpmnqaRmZWpqA8NEdX4goghlIbezIIYYgySWSDPaQ4hdBJUH+u6q8Ph8vb2NsY+hNKcUO1ujCWMNgnPLJBzzjiOuQv9MAwY+5tXxzTNPe3GeSTmzLcXlwNSnqYpRtVHRhHEQF0XEGUcjzH2AaELFLDDAEQCMs9TMQGEEDAozjAz6E4x4zwzoEjiEAKFUMU86agkBPZ7LXR0ESmoat3HMM3zMAy7falnG2ME5jTOXQhznnma52majqdxPJbAVzp0fXjx4tVut3/77Xd+/vO/FJGuG4jU6aL/Fj+TCIt0/uTVckTzvPRDcsymuOvUfqGaRUdBQMZxAqfBiYj6mRAw+gEMMzybu++yAxWsEF8VRvXZ4tmIK0+anToPDO4PM3uwMZbYnSqfMJv2WN6ip+p2cJNQaVVvQ1Zx287dl/ISQmCKNWyK3NS5LbkZDU/fPmunL6wPY/0QQgD3pQPLEsrM1tHL6fl+ZERMwopMWVhESKDohzHq6bccbDpPbIsy6VDGUpt1BVdvw3tFbI88RmGVLD0yYLWQAa68XgquOZWON4hYtS0EKHpBmR4iswihCFDfTWkm5HGc7q5vLvrDaZ6evnb19HD56jR30OUsEoRFqCbUbV0ufoM8nvPa8+lx0kQHu1nXVeYJIKrLIjKCHt5F+kAr6ymsjHQN+ebyhLkl0rNP+Y1rCLzdgmr9MlnZv8sG90P5t1gZd3+bh8Z2eh7O7sOZpSk9WmHY9ferfjD+pY0NtSE6j8zN9GzfDVC29Yaf4kLKPaD8U7DSwVbQM7udjz/0Hzw73bII++BR1NdV87P1SO55VwMu+9dUTXD4IwtrbWHY7OxZaNhzDZmfu/PM6eBf5PHW/1kXzp4MV0+t8daBbtXsCxc1b3WIqCDVDPs3ue7bhQaA4OAD6wz8LXqcfUtDlVsK8m/fTqnCmQFwfTOGEPuu19Spr776SkTeeOMNH0wl1cNARLDe7vugYRP2go6A+Kq+ZQvW057G6eLiYri6evr06TfPv9Le5VbR8V7GdW4CZ6HR3Gx/GvybwZtnbUB/dpxlmP76VjSzcbTAva4XNtQk6+x075ZRQUIPOHGYBbVjtUhSE5jK0PM8h2p40nbhAKBagZkjtXqFhSyitbYCgGqvzJmp1swUESI6HA6Xl5dvvvnmxcWFvkiLgqoCYw0qEBbLewglbpNr4T31+4lI6Aa938Co/n/N60MqsitWy2lRBfudaoNayQYRD4eDVv+HqlNpcdS7083t7e319bUqhKfTaRxHO0n7vgyivQdCCDyjKplFw0dGLJ0SbWtWV7XlhVrmRyegzgIsRl0yrxdsur7ZxTWSNtdSXqY6wjn2u2UUsM69NMA2qCjuKM85Y9UFdIamotisPBFRNQKGgCIoUEofWRFHRJFcAUXSUegohLXLHZEu9odhGKAcZLOeiTHGVK2WzMvBIQWkpTBvzlkEVKMLTVePPuy6Xh2PGj/cdYFTac5xOp32F4dhGLouWqyswnOaprkUUZrUxapbMMc5RE1EjN/73vc+//yzV9cvZC3L2FZ6Xsec1E4qTjgBx7jsXGAXipxBSBYHMjgmVhiI5xH+kNvudCW/VsRXrJJqxa3G4DOnjmeyW9a/bM89vBicItc8S0QirWPBv8geUYl7O7KX9pY5u7fYq0Ukr4GzLFOEXU6FXt7c4s8MhXDERXrz8yTtUbMGHTMLLJvVwBDXAm4BAiwje5r3e+Q/W9Ea/16/j804sR75skmhsbUs7rU6SdsUA3sIQWokqqm7/qXNh+YVBnDo42k6DoC3t7fffP31s6vHn3z+2dPdxfefvfnq5x/s9rs7ToEwC7MQ1xQedp5VW68eFeYsgmov97K1v98A63+yQ9fYtMGqgYCHuYkLW5h7BmqQ3O7RVkFq0N4/azyXXP8DcbWY77uwJg36obbzgTVpb+fskcdTh+dICl1g9AvxYGkmUN4I599oEG5I3u9RwwP1J9tQKO0TFg+GB2ao7Sv8Xvt9MQtRsfRv6kvp/ffB36qeGNBwMW+1hwS4inbiLnBtSLcg9RjrpwSbmpbsUgBgTQ4AHleXybBzpPh98QqwX2+D3naBQxL3Zcsby5R48RDCCrtWyTYeaNpoWKeq+U5bBcND5r/idZa0t5CBtcXB/2vbsX1kS57NI9v32kfQzg31MrlQRD7//PNxHFUhVBM4M+fMuMaxs9PY7qafeZkSgqao2YQzQAC00x8R53m+urp6/bVnN6+u0zw6GmRNyP9WIDdT8tfWwOQpBdaYcB/kPW02rOMsZLZDOaoHcDFNGremNRhtK/3O+nl6s3J2WWem8huL6LoupTTPS0UZZg4AWljl5uZGpfxpmmIMNjIhSW1Sn0EQ8XQ6hYhPnz5We8Fut7u7O9n0drudliF98uSJapjH4/H6+vrmprSmyK4qDEIJ6oFiXOssRIJ5qZhC82LiISKNJ+LarDJQXGT9ruu6sN/vu67bDfvdbndxcXE4HIo7qO+JolRV83Q6vXr16vr6+u50c3d3d3t7ezqd5jlZVE4ZZ3dQbVCjChFxPrECrTq6l2wUk2HMGhtCQCjRg3WS0Q6FhvmUna3765mwRwaTr4LL7tse3/6Y8yjtWQq5Gocehxu2Y3Ow9ARTvP0poAqzOgAWcZFIa+wDyjxDzjMiBo2sIUHEPnaTtoYG1IaXRISB9vt933Wylg2ISB0qdhjVswa6rlNbCiKmlERASQmltpHQANRddxh2GkJcSyvNih662KurK2s2CABQGwSoQjjPc55mazhBRNM0UdBTGN544/WLi4tX1y9ECoO1IxtKV8ayy8zCvESFNB5dd8BnpKgvKhaHlDOs6tyKiLoHi87cMETjs4ZDDUf26qYXLFJKIGRwt0f85cexf5tjwIQk2LBjfa+JWR6Jt2y9zrBlyN4y50iFmwHLEhBkXf5BKnb7RuGL0M/CuDROsIWTSwCDzbWlt7KoOshqC3A1w+bUxE2KvEYMb19kM/FLs9EabqL8yJiOBwic4xqwLktgBkjdu+p8WJmRYowsi8HSQBFLiFGrE/rle2Amglmgj3hzfffFZ1/+6O/+wZcffxqm/O4bb/zsz37R72EUCEQ5g2BggFg1d3QKvxR3fDRAedka1mKBB7ifPNf6hFJDIs0gd9YEo3/mdb3WhvoMPuK4rZ+YfV9iicMCPbu8zN18r2Y8Dd6QqrdsKVfOQR4cfvqftmwEnfRp4PIrQlwRsuMYFkbXGiaadxnLM4xsZtsAfztPG7nZZaNovadR3W2Bfq/tqcYTaGN6n14Lz3OiqriyAehEUm/1B2etMGJEpxCC61DqgeltN+bD9MeBRz+PTvfttV+ITbUyAfSg8ENt4eA3zl/Nez1vsZHLCMi8BnjFE6qRTkVAIaJ1uHTZU/VRaBnDrc3lr3FtKQUcXfgb7kMVzz0emNJ93zdY950fVzEAEGme0sijZtHcXt/c3Ny88ezZfhhub2+NBJhZ64f7E9CP2axri5bLIwi45sAJBEAjX7jrYkrzxcXFs2fPPvnkk3EcY8AQAt9PSvctfIvMD4BOJ3/2Hv+lcT/PV5tfH5jn2V/9MVG78AV05iFwVKPOma185TGn+b7hZhpYqLKsCs3H49FSNzGUdYWIIoE52SMayt513eFwud/vNXFRSY+ILi4unj59+uTJE3WmvXr1ahzHm5ub29tbDRbNriYK6MHEaOdUjLNG+UJhYhlAABqKKExMNZkQyfr41WSwLsZefXrqJDTXJSIyp3Gc7+7uTqfT8Xh8/vz58+fPp/moPp+cEgr0sSSVPXr0qO/7YdhbPwwlz4C562IIKEIGW3FVf2R9LsvScqwWwKySJNeCseyusOZ75g+0R4wAjQ9vEQw3YqRHJKpFiexzc740/B/XtTFVq0dE6/VnxB5CSLl24csgghrqwcxdH0VySoKIFLsQkUuQL6KA/geiZgfsqhUPasg0aD4LgqYgAgBzUU1DCIghdsXtV1ckGjOMtR9baYIyxF3Xxxg10zLnnNLUx+7i4kIrOYXS4CSJaEIra2M875v14p9AVtwG4MvLy4uLCyIC1wv6LEP2FBpqwqHfd2YWyCCLzVpvS3kGWYkfdigXBmLoCOfOhi2PM9EWnShQcE4W7LF587kiKx4F7aIaw8muM1UzJVhzXj373dzAkkxo8dGRRrNWFD8TogauUax/owgH6kzGYst02tQUcUtrZVO9vOPFc1hJ2U1jLeauV93gR6Plb+/cgsu4jyd1+xOc9O8HObsFNpO8zlS2m7c6j51PW/QFgGmaLIfQeA0iSn4oi99L23rNwBBIghxPpy+++KIPfRfi7auXbz591geCNFMWZJHMEAlAANq+drDBTA+x5iDfft6ScfONOMNKY6KTjW9wyw48lnrINyD1MIRz2NVssd8+e8SfUttp1H+XI8Tjv59Sg4Rn6Xo7vqdTI14dfkUmbneaV/tvGiixC61Zrc41L92SwJbQPMezmdj3W2QAR7neYtqkKYrTuGCDTv69zeCec26RpLm2M9xieHN/AxmbktcY7V2GD1g0nGXLVCZQG6JUu/VZe9nDV7PpdTku8czzpTUU6lRXvs26NFKLnMdVNZR421ZzbooIfAtet9dZWrgP/vc9YphwH9++Dwe2G/rwq7f3UC0931Egotvb26+//vrtt99+9OjRq1evuhjVQkqIgJRz5rVNZMEZKSmCzb+ZV1W+ywTWMqteGQBAOkQg+v7337m8vPjoo4+++eYbALi66nlmA55fhZePt8tsGEhDlfdR6ANwaxgXfBuvsPHZGeLBbSjiyrarIaNQlQdv+KDaEqBIqVVPUHUI3QVrlM7VHkQEOZeQyxhjyhkRh6Hb7Xaqs5Gvzl0RQ53qOefQR23boG0SVe5XT9HhcHj8+PHFxQUA3NZL9S6NFPUTFqcQmq2qFv8oKtMifgAjKlvDkjCGCCgq32voZtd1+/3+cNjtdrsY+/1+rymOIKjarL59mqbr6+uXL6+Px+PxeHz58uWrV69YSiraMAxd7IdhOBwO6lfs+77vd129ACDnfHNztzXQ6xFwlmOL8+kJaACmqKMMABCLZcGEZ9rwQ7/dPvGneUtDUJ6Ho5MbTeb3soHn2w0mi5SsXHtK9SKNPfbiKFfLu2Uhanwo1EbqemcIQV1qOmdOM6cEKQMLATJiIIoxancHZe8hBCEkgC72l4dDCEEQRYqHM8aoRwCFxe8nUjy0AZfbiChERCh1eqAKbKrziza6vLtlZpZkwe1ELrukBrsqDoQluz4DwMXF4erRRQiBpdhQHBvIelziORmeiFhqNVEREEEQ5EXwruixVBiGhYVmKC3dkABLyRpyoU11GWfShzzK+s/1hhVCl8ep5bMe4Rq8bFAZHH/0oq2HlOeGOpFGJJWCUoWV5LxULmqQHmpw46JMQtZQdb98IgpEyqTYWdMBgKTkxPs1et5qfMoQuk7Ps/ilqZSH8/YsWdbIS/GA1fcAsonCMmJutBG/d419wtO/3wUpprgztlL/pwUn0Nq1+8C1cBk+w6E2+7t8yCgYiFGE4auvn1+f7naH/e3d3cWjR4fL/avjKKQFbyQyIqOQLWRxVTEvtebZFWO01YnzuthTiK2qY8Tvgd8sxCCpX4a4FA9oLr9BsCEZ/0YRUfemeZAMBzxdb7fJ5mMOEMPDZte2m6jbjkudj/P3N9979HazOqNCnwWCfelnKI5f+U5jfr3srLP2iE7FnOHNJO0tizXH0Ygnc3EFirwkp2IQurh6EfGMyK9XqtxzFlC0BJOAYSm7JmOeZdnMG/G3+cavK9fSqbjWFowH+qluN8UzCj9P26YCFlimYT/5mW/fsgXUFnqexdlsEREQg7NjiggAqSqxIIygJZ+jc+aYJVScZRD+xlfDLvz3Hmh2J9c+41uc9w+65bQGmoYR+Tsb9vVdrpxz1wVmFJF5nE75+OH77//0xz9+/fXXP/jgAxEhQCwnmkgWwEX99ojq2S+4TRRYrQsA9Chv4F+Lb6NIZk5vvv763c3NJx99SCBAGu+wgNbT15aP2esaqG5h6+/fcsjmaniFYWCD5Gen4cffvohdwLbqHsxc/HVruQURTTHg6gy3EiZbitPBSyPrckNhbqF2AjgcDhcXe2vzoHwDUaC0y+KcZ93caUo5z/0Yb29vY3y83+8vLi4uLq40HQsAXrx4YeGX6nZTB6CfsBe6AEClLecGYUUP5qwSP7jgxq7ruj5adqsVuhyG4eJif3Fx0fc9YlAXH2i+3LyUYLm5uXn+/Pn19fXd3UmV1XEcY1f6DVxcXOx3h91udzgc9vu9boEWTbFNaSQx2xRZx/IYbjBzDL1tenUlAUBp52CKCiLGLuScNRDVY4v9KU4asfHNUeE5sFQBwE/SxrED5WFsX/hwLeJom6hJdH0fu67TAqF+5FLCQIQZCIsrT9NT9YBSxT6lSSSf7o7j8ZjnRIoGgl2IQ9cbPAuGdzGGbhiGXd+rVxCEIJBCr9gEIYQuajlZqGZurFXZ6k5xqid70RgDEtLNTYkc1pophCXQNMbYdbFouWnx1hpO1uBEDoEQ8eLiAlFSSkil8iQALKGXAACCJCASIiLqBAgReW75Rjmja3cZgEUXNfQLCABLkRfUxvSexRhHsJPPUASrTKNx4R4hKks6c/wQLqKDx1EvivllwHcwsXr8ljNn2GLctbkhUq0ryJ4k6oBslZr9YqFWoPbiF2wOAzCeK4Cu6IuXisDlWoA3TFaLpZvMsrQGniKiZC+1j0WhIkKTVzzwpeY0wvpAspH9LtvB3ADn7Im1ZQe4ERwNyTyPy3kVXQCO42iCh/GRYr/ElcdsO5nmeO4gCATmKfT49fX1x199dXF1OOXx4vFw+ebjL9//hLqQOQMLsmCulV4AQEBre6qFRQBzEkTNVeCssQoY9QMACiPXmHUixS4kJI0xRiAQqGauYrZjFtRiVYUoUGpdO6ne9UZk94e0l5YMqg1AtnvnHz/Lxz01QcVSZXbWpdchj+hYyzgrFRc2AQErhGnejmth12brV+pvtn/9r2cfWf6l1VOecv2D9n22fgPryZuRT6o5Vu4JrLB3eXv2llnZW/xtzVqamxt+YvuC62Pez8Ejgzi+9MCm6CN+wOa9DUU3EGhwL+e2YtN6qNU0POAb+DTb1/CijXrmAeYfkVDrCTMzAGEtWVlHWLKM7CkVc1XexVrG3QxwDTf7r3g1SHt2/LMA2SIwrBFse3+DKnAOqxEr+YPeqYc1d91OUgaUgISS33//fRH5/tvv/Gn8TxQAgSJz1hGIu9q7yP8LAFmEUBChijEimq6xkgcqb1wtpNZIYAEAybzrh7e/9+Y333z1+eefa4v2aZyFTM/P3knYgPcsJp8FSANbzyRtzPseaXhUQ/Vn37glUt1lq/erHNuUq4ajUr1MD+TaWkP3lF29NHYpzfY602cQxcqcqE7Ydd0wdMdjqivKOlmWNOekPEc1or6Ph8tH6ka7urra7UrI6KtXr9QraOlVqmHqMq0UjbfIeO4EtT+tGfqLPycEjJ1pg+r/0QBOpxCSzqeGCwIAaOOBlNI4jre3t8e70+3trboE1WOZEgPA4XDY7bu+7y8uLi4vL/e7g71FtQ6iCACqcmtrCpFc4yh9IO5i1EOXFVW3mAAgs9aGzRrl2vc9Eaj2klICXPFD2y+uHrktoip62OnTIO19DB+qwdTP1jNM/5Z63KxOZwXsNE19H81na1IHZCBtKELMrAF0GjeaqbTc6Lqu02DYeZ5Pd3fT6ZTniWJHRISlvg5oeY4Y+mG/uzh0sQ8h9Luhp6KnaYV20GrSgn3fCyMixmJJqeGK1RNQ589YCaSGgKbpNGpgc0pJqS92pBGkACrvIWT2LoSqTIYYtBotIOKcRvUtHyX5ujuIqMUJCUBTFhELLVhti+YoX9BGQKuRFF+XO7MCaqnzKskwg0j0afT+WFVqNzyww5Uo5LwqBg12oqwdx/48tld4rNqyOVtJg2ENqjUDwpp16jT1nJC1CKhGIz+rOuCiJYJLrWERLeOrGGb6fYZVouAysUVIbk9fHbMhNmYmWYnUfnUe+PYKm4k4eZRqLLgfR0QaFd0I2xgNuYZpzRlmoNCb2UWBr8aBeyMAbS3k4g1yLsM2Oz7Ps2YJ67UYzGSZm//gZXGPaZBDynNmjl3/aj59fP3Nb7z53pRietLR68PxU9j1ILPWckYQ4I3oby/yyGBx2FpRWn/Nm/AnTyni7BE2Z3H+JX8bVVfGnFZ9Ppvxz8L5LOTNE2VDYa3svCU6nZvaRGySK8S+Rx797pcnWI9p251Fp1b67bAP2/lsKbr59ezrDCz+5pQZ1W7qhF8AEIDMnLRQgYjqDSlnIkLQxtd1fKnFex2ZiwHZ8T3lsVxb7qLTGD0W+S9NaLMS4Vg98PqnFZvZooqs/Wb6p226vUunZBn/9l4PWHTGIz8r2xqP/x7P70Nge+S7YLgf3z94lgxXiAGZCsxVCGumtAThewal5M/OYb5q3/I3u2x6tjR7qT8F7luXhwnczyK2xOu3w1ONPb7Zi1LHu5nq0Pe7rp9g7GO33w9pGp4/f366u/3e997Y7/cigqHwT9SilM5l78cXZ2fxq6ZVe2H9n7bAqryXV/PJPL/52uu/9ePfePni+usvv0iZh2GggMLAyFUV/HbH6RZuDT02I/z1+KR/neHed9zB5sJav9E6ldWnVizFPIeIWAu0LAdQWQu0OqGxC7MVznOOXanjoiGgzJzzCWt6MCJqp8HEmZn3h12M8dmzp2+99dbjx1fDMATEaZpe3dwws1ZfVBLTEFN0IodlnamHr2FERdON5YCLtSxkaR7YH8i1mO+6ogoqrPROzRbTJXRdp5qbeinv7u5evHjx6uV1zvnly5fH47GK8tj3w+FwuLg81IjTQxd7dEdtCEEFbouQ8vk1oVYNFRGsmGn8PNa+5KfjhIhIAjVDkgh0aYiiu5nybLpfAC9rLZeB0Vv/t+jt7VyG6muuwohCpe8UV1JiKJKUiDAAi2QRVkeLNaUkF500juN+P5iVAdYFBRU4OQuzaFOW6stVfR5TgpTSNJ2m06iYE7S/HdVG9pkRse92FxcXh8srhX+IceiKum7hlxlEBLuuK7F9LAyspkN/fBQoESJCCEGdxtM0HY+3t9c3d3d3IYT9fn97e4uIw66rS8gpzXmaVVHUS+EQKUQKh0M/TdPpdFTZbLfrd7vdeOq9WC4iAgxC4nQQAAAICracs5oe/ClgNE7UGfGGWhQAODMUbdzsZCqftOVuFQ/UAYVOblDQTNNk6ZjoKs8SEeDK6WcMzgjA/5pd9TxZhSYnz3arBYXdDaWJZAFu8SCJ7ZmiVIwd4vKNP2i51nkPS3N5MS5m2xBCoBBzElhn+6hip+57OxvqbENEMplbqly+Ld2poNZYZwvr9mDJOUvFBv1S+V5tCbrOx5OlpqVnlywSu8jOzWuSTSitV5L/1R915IKjwroJkleEmJeeS/a4KRXLDOsud11PJUw8a+0yZ25feJO9LmwKY5hpJLvqyYaKmJlYhsPh+ctvugt4zuNrXTpe4QfHr17/7e8/uXn+4vMbYlHTKiHe5dwcwFSdY7rXxqSg9nIx8Jogbn2rPZTKNq3FBS/3i7uMQPwx7HHGgOynWgmeTMm0t6jhwGOU0Q5uBOiWkB0ovFaAiFqPcV2q0csNQNXNi2u5ytbr/7QJgNMuQghEKx3bL9n+bMb3t6HTpQVbRmQ0ZUq4px0lVWuJC2vuZLSgESx931voOLkLAFSmsVNWJ2M8sNlEHZOcwc8clV5A9HvheNfWIrtSev2R5qOCDVV43QTSOCTWaBM/oOIbrOMdtPag3yCvB2L1UYisUFpEWJbYyxr4sIS12Axhc9ms2CV4uwmwsS90FwBOU/KgNkQSQS2A0ZASYNnrristtrTU/n6/v76+HoZBJYP9fl/Y/j3SPLvMDfAKz9qE53/1G+cJ1kPAkLPBNHYhxLYjBkwb2cyIumrcRCjUi3POwzCUEyGACoJdF68Oh4uLC8RHXaB5HCNBQPjyyy+fPXv2xhvPbm5uYuwZZBoLk7f9kHq+K7KpAUKt/nY6hxBmAQCY53meRyLS206n074fWJYsuIVwRn7z2euvPX7y5//5z06n05MnT6Yp9X2f5kwiXFE3SznN87qoxrLgdYyPQQwdz/QPKnp4IIsTnMQdrP5fA4W9t1HD7IMmKeW8MnOLCHOxEYuI9jNgZi16VO90Bu51zIJUd40edtojTtX4cRyJ6HA4CGQitKNQN1H1opzLKXx3d7ff7x89evTVV19BKchJd8cbzb47HA7vvffek8eP+r7vugAA4zjf3t5af3lrJmFTgtIGoKSTqeama9ejVqHhebIt/PLyUil0GIbLy8vY70xgqLpcUc/UvYYugQgR53l+9eqVdo94/vz5N998c3d3p5qhNtlVmiouwf1+tx98zqHqcnr+IhZTIVXvZd/343hEjLob6lmpOLCqGGl0HTvSxhun0wkAdruiz/R9cQzYCKVwZa19ZZulmGyUjpvQMzxnGG2EAamGbC21aoepaXHgZBv7kHOe58y1MKxoV6QQ5pRCjIlz7GLsu+PxiIgUw5RqM3ApSB5LO5McO+r7br/fU4A0zSlN03R68eIFcwoBrWMEAFYFiaZpotgbeXbdEPuurwcd1NREQhSilFIXBwWUZA6hVHPlKh8qa5rTKKk4Y7S8UM5znpMVv9XcUavECwABYOZZRNTHCwABSdP5LCyx73uWREjzPO92/UsAkXKE5ZwRCzpJymvHctkIRX4RwU08pp3LBe0LyQBikf/jkmaMqDVJPWmhE6/BnV72b4yxcd3p/HQCHqvqgbqwzuZXz3A92nmGZedcdumwa8kbAUAyMAO7/h1V4FtUU1wLEB5qdjJ5RomIspomGpbnjUHX/kxO48LWDtT666D692QtQNs8YX0qIGIIZIf9Aj1ZWRadjgfYR8Hq69DgEFANsthKAVC4fA8g1tEF1xlW7bYuG3q+C4J94yUh4y91Vxd8ax60y3gEEmrGEdaGHAjEOQMCK/8AYRAaEwKnxAmh24U7Sl+mmzs8hj5e/sbrF589+eiLF4+G0NNwfHXdcQfDAmRbLDt36JZL2m3uVL63MoEudXGeIxaRsTHwA2RmOafwNC/y1g1Yo4fHoi1G6WVGyuaeszd7yNRVs4gwtGGEy+oevPxGb9+4/eaBm/8al+c2HlDgdKQQFs+MP1D9B1tFw77AkW1jAzISaCZg6pBqHbBGOZtDMzg4Y1ZDfWYIsKuZKp4TTP0HUzCae8RZrNAJ9FjPy2aBFZ7BQ8wm08yw4TPNzR7Z7sPw+xC+uc7imNTM8wYrEALgmbYfdiqYTbChym996WYCLViabT17g/+TnFWu+RIrt7VhuTqEv/uEpcZLsyRgskGGoRPJAQMgh4AEeHd3M02nw364OOxSmlTyWNC4Fv2TtUIIAESh6+JuN/jdH2rvH573SNB1nYhM+93d3d2uG7rOqjIo9ofD1f7y8nA63Z1Op9dfezLOmQg0nWee0pSTRgnBmgy3yGPEsoVMg+Swdt76vSNn/ju7a37M+/YCHA40NAsAgCuHANbcJNVbuq6zlirgJHiptRNFJOdVpZCccwydFcYkoizCnLQdIFYZfZqmCAIwGC7pgKdphGkadp3OM8bY97urq6vMPI7j8ZisXijzknZoofhQmZ4V7TAw1gDPlZVZlcOu6w6HHSJqsVBFiRjjbrdjINMDQwgx9qZ0BVd0R8V9DRB9+fLl7e3tOM76wTxsFneqHSm0sOR+6Idh0Ko8ALhUH1FXTAYA0AIneZrzNIeaMV71KF/6YSkZYjtie6pq9n4/6OsWTMBC0eV+KoPn0uaxTJ7XroKzOOY/N9Own4hWhGNY16CQfjZlFWt3E6huFa9GeuMmESEEAZ0Ahqq/HXZ7fVAgq0no9vb27njbSfV6geaLlrzKLKUxYLjtGHCaUt/Pse92MRSlHeNifMmcQQgzLRZwDQfl4/FOoafxw+N05Lk4UTS8GVG6UDIPa9JgZ4md5FSIAs/MQtR1ph1ICIQYMoOIOeFWpeYVWQBAatpC/X5xkFR6SVYjzRC7YTUAjJrGhFhr0goKECBY2wl08ULmQ2gOOan2e6yFrYyzFPvNWt+rIy/za37y37u3r7iqOFnKz6SZmBRZWWMAF3tzCMF0Qs9H1jNcxvY8V0TrIxWCtFnlnIVXOYf2k2mkNqJxH3CHh98ezpnXRfYbYpON1C6uZEVd1eIz9POnc/F1enndBtbnnI1sTMHLiGvcanVF+0zncpDQxaxXtciru2eOYXbuC3sLwGq9XPspiUgE5BAyp9jj/smjvIvP0+0UR6L52cXurZ++/eHHH918OpLGFggAtKFfBXTOa2Qz94piozR6Yd3Pn1zoVwM9P5Qx0O2dfpu8QrhCgLWsvx3Bj2Obwuu4oGaLt/QCACIFzxdnVxPwjAtiALSI3Vy4kUq3SLV9dovMD1weOH60Bnls2LWxqWUysnZ1NgPqn7pBasO2Yw9MJq7Psivs5F0K9iKoB/N27eLOmGae5MJy/Dh+1ffBStzlN+IswHUob89GdzXzr6BwUe5lnqvQJnvVfS+FNUbdN7EHEMTvdf1A2xtKHn/Nfmk4EtfEcq8Qco00/i6XjcmNeXUzQ1iTkp+J/8bQ0sMHncHetsmYeUPddaUBoGFlS7AoyxL8wpyZUTjFPnZdQM7d0BF2Kc9ff/XlkydPhqHfDX3KyvCRNRBRpLQSFSGkgIFIUJhc8gIFd+4AESIFSkLAWXISERR+dHmBiEhCAuKqdoUArz19Mo/T3c31G2+88eLFi9jvAGBKWfg4z3NWgAgyiLm7m8MF7mehDfwbevGY/zBblnOCzX0o3SDGepyFFQdXvC0vfQXR6oIGl/1l0+Dabl7/nOc5UDQfo391zrNIsYlP06RmQc28Uvv7brcb51OWBNgj4jiOiBLj4XQ6zfMkIpxzShNUuYhZQliYkkKS18Uw0alDwzD01aeHRfUlbQ5hEnlYqmoJEQl2KqDWpD4yctA+KLYcbXHx/PnzV69elabzdyeDjFaOOewvLi4u1CWo/tiL/c4F3ZQwVFryj9AgrJVUYh9CJEetSS0mirqI0TZFRECIpVTWiTF2XVBFnYhEzLC7kLwBzbQv68coG+3OnwgNN8AagbI1QIvca4+Ghc8vXS7Ud2c/QTVl2m26NblWcwwBtcMkMyOGjsrni/0h50wBcsZpOt3c3NzcXo/jKNV9rUaGWiQGiGhmSePIACml0zDp1hz6LsbYDX0MHsOJmTGcnEJYNNjT6ViRP+ecM89aKdQmD1BMD9qK8HA4hBA0n9G0QX0HM+faqRKxq7iNfd9TgJyJOXVdMG+8ARZxsVdqqSYAYGFZejJ7R9qKaSTO5NKwNR4RWDpVSREBRPLSPr0tRWg6HjuPk6Gap1tw4rId88YNlw9O7vEM1Bss/WUKgLf8wbkClZ4/OqCDVgVwc9ZdX7x/tiJem/z9tO0VJqB4OPg/V8QgEuIqKszosDlyVhxwLeLYKQ6bU0pENB1pS4cW0ytVQAEAEkiuh6l/ygsiRkKIxVnf3O+RwX84e0Rtv/EQztkUwlXhCiItGoy4xrroyjF79awRfN2LJAScEULXXT1+REM85VPucwjwxc0Xb/7ojd/5+7/9J//zf765OT66uIQUMK9OI5strYMlbA7NzR6lYXO6bxHGgGNYbUu7T6X0D26HshHQSa5n98XTnd8+XMuFHgjNOWEPEraywtl5yoa6/avt7X7O9XVtiJ0fHNY73izhgctjl8feNUhXfVbtZtsd2eiNfkVnIekfpPVtZsvwcPM6f4OTZ5fZaPVbKIFj0bAGaRMNfnYcf9mJ63VX5pZym83dwqo+XsLDTHBp1LOz68W17AIrfeYMzjw44JlyqSDakGb1lMcWPaRijKfTCa0snk7svqpK52ayXUgzzy0XfXjMh+/B9bXlD/VPAliwRa8QAlIpZU5UMgVUML26ujgejznPUQJFihQ//fTjm5tXNzevmBMIUAxDKI0NTtNY0Vg7ASCUmA+cphI6qCNXIBBiDEQYiIkjIgB2oVTpyPMMRJGCAOSccs7UxWdPX3vx8vlXX301jydEDCjHce66IWi2B1eFVpbUie9+NQRul4e8CVENX/KfG6b38Lv8PbhWL7lWE4gxlo7nAKboighzcdRwLadnnERdHFa+xZKANALciBQRTeMax3GeS+JJrrVeACClbhiG3W53e4waby8A0zSZ3KUN66EqGwp2dEHp/sjQyahaS0SmfQ3DoGl/1QkDpuOJiGaX6YSX0qlFNqcQAmEJ3638NuUsGrB6e3t7fX19PB41RrRm/bA6fBDx4uJCY1APh8PQ79QtaRp4xYEi75lfThPVwJ3v1VVJvhyooo9HHhFBCIhglrJYm2QAgKbVAYDU6CgREckAbFtfd5+bw8UjmMF/vYozR6T96Y+n5icb1k8g5wyu0xu7y6f8mNgfqdxJRESxD2XLapCdMKfT6XR7d6MNDDPnoPn8wkgY+y50kUFiiEmy+qVFJMzFtzb1XfEQhs7NWc0EgZcz15JoViGCRBSoYLJOKaUpUtBmKoh4dXVFRAFFan4WIqgHj1N2JMMFhxcEzoih67pYNLWCUT5rA0FSPgGAxkUCigDkbAUgFrezbUohtLogbWsJLBXgoJ9FJIZA3kNoiAtrBcBLIYjIXOIlPX7kWvGy4WJnWZvna3aO2uucZWuRmcSFQkkVyExM9wpkzpkZBRZXdX0EmbPxnWZiNg6sOT6zEEajE7uZqk3Ov0LfAhvdVT9v04cW1rxpXVj+XSvYlW0sioTbMh+MLgZYEGAXNCsu/lU2Omr5t4p52/3C9bmF62u76c0WGLLW8c+/qNkd88Qaa9MxvQXFf0iSgWHmBL2G7eV5mjim7mJ3c/fq7bfe+Mnv/8bLz55//Gef3l6fdh310tv4Bp/tWQ5VCzXQ+dXhRhhtYIsbyUCjZcRJEmffeHaP/Ib6P7cj2JdnzwM/T7ifSP03lde7L8VvAaiI/8Dl37jFKD8TkdbQcHaSf72rAaB94+zHy75AzbVr5ry15BlybkMoy4qcSxDWpO1ncpbW/DiyPpiNqM+FiACsFT/Y7AI6VtPQ11n4NMhs8/cU2nAGIvKHa7VbA6wNPfo0rGmque77/uGf7Aa3HMvgcyH6sFT98ZDxQNBv+r4fxxGcQwZdcPgDeL6dzPZLD7rm8YbqwdGpmYc8vTcIZgK0H8qNuZ0JAIBW/AIAow8i6rputxuGoT/e3lxdXRDgnMarRxdzGl+8nCnAYdiNp5mZE5c0+P1ugBqCBaJNm0X9VznN03hSdcIcVl0vKBlCABGUzMgBiRBzmgNCvxtK3OOcCKHv4m//9k/eeuvNLz//4vbmOuc8dFEAIpGGY4UQ9EAUFJC23YtBssGiBw4pz1o95P2Hs8/S2vN/34vuu/zWY5WYr66uLi8vlSktjRMYoCIG14odasKw0p3qolHEMH3GtEedk3MbzDmLF3JEJPMMMKiMq1JvSglIulBa8Jn6hIhU+0JnV6vJ46fqn6YQqlNOYzW7JfgzaiKWPdv3Je+LiNSTxpKBOnDVR0UkJ2BmjVydpul4HE0bnOeS3Cg1Za7v+93ugIivvfZa3/eqmsYYYyjLaWxhUnvBF/4PS3kYrEGqtISwMuciP2sMnzF2HZLqhYhaDqeut0TACSxmSr00HbTBT0Ot5rwwQsbNQT9NE6xVynpbPovYDVOy4yCludYNRiMQrmmi/pQsVM9LoyOi2MVimODMApmzaFjv3d0dM8dIIiKEGQQD9N2w2+1CjCKQVJBFEOZpTDHDRCiCeTwVkEKAhZXFruvU+81VX8Cihiz8nIiQRF3atpVUD3Qt8Lvre0TUpGtvRM6uWgczzzMfj0cAAOxiR4B9SrMFNot4+lJDA4gISXPCqos7CxcTG7gzVKGtNTgQWERXS6oQKqJBedAggRHW+dOmt9Am9Ki+A+1cN1dvVT/aFmeIaEUdPCLqZ5/O5CnKqNGwfGvnaLDZbrZJ+hdBkTkKNjYMSIq1srI/f0ZKqXota92VXI4lOs0WKj8Cx/dx3fmqpSVZCmv7n0jjGTeX68dCi12HVzHHfhzZnG1e5PUg5drmwXgHr2MM1rNuA1lxfbiKs2KiEzf15Q1jKoI1tk5pWGvXdrMJ4lsdPgcUYA1P4jxjZpxnmUaeuA9ye/3Nk8Ozv/WHvy9j+MV//HDMt6/LAKCZkCgCiNqUhpj1s04bxXWFUU5tOFgNYOfz8dyXqy1WjFZc1YVWyl8K9jQg9WOehfnDl6wvD2S/WbJRd2GNmWXNToJv3tLcvx3BlmBvtC9hjcANPogwVXn911q7h8DZ3WnmiefUm7Oj+T/tNtMQYMPZrKCoye7b22CN0p58ZK0Hbsf3Wiver1PBeru3P90HJVu1OFeDfWlHoDEZm6lyYL8F6GxeUE9cR1a/3vXXQwlwcAAAhIDn1F3YqM0qc9/d3WVXYessq/xrI2rzrJ/A2fk3/GdL456uDT+bxVZbDwKsQqkRl3H0IoK+jz/+yW8+efLkt3/yWz/96Y+F+frli91u9/jx4/Hu+PTR1e7icHNzN03TadR8G1QfoNURgZrvBABaOWOaJpUU1X+loaFQhEtgySwZBGIVyHLKp9OdiDx+/Pjp06dvvfnmp59++rOf/ezLL79kyTF0WWC3252mSW3hOZcgPT1JPJ2e3b7tLtz3E8AGvb+bjneW3Z3bmpYtgMmpiDHGq6srdVAY09BThhZv4VKBSXfBjlGrhKfDZldzy/yB+pTiJiLqp1BLJWeeiUhb8KlCqLl2XYghhBjDEs6HS/PVu9OtZ2vMXNowFXMNhRjV7aP/XewPFVZCrkiMegt1LUhAAZEAMoSgUFqipjPPKbGWijkej7e3x+vr65ubG98KHGulmf1+f3l5QRTVRbl0d8TVRngmzDXkFZz1xA0LiGZbIVx4tcZGmimfKKxMk7TEZWSAwMwCi3imehozCy/lOu3CRqx1lynksDbM+fQ/dFYSCxpf0BUZQLQftYAAMiIggnAWWIWneTzk2lnEMKHYa2Rpqkm0pPlxrTh1fX19ffNqHMfCoIiyJAYkDSIASSklAUgAQCKYM1cdj5gZoukGgYgoaiwxplSy76qTsKSMpjRXdqeqPAgu9ZlijCJMVApzOkPbAnyApamjkkBKSSRrUmK/E6SuBnWXHveq6DZbk3MmlxpW9XPVlq3s5SqBEBGnNEIV0RQRVR0W114rOPN0tLLjsPY72Ubakc81lkMnZ0VTFl4jK2yr6LJgnrEwQ9CFMFydQ+Ok/lw8i9AKGI9kZc4YBVbpWG60M4oKuP5jHgJQu1T5q4xS5SGomq3+mXKyBTbA9CtalnDOeFPmtl6vp0zbjvLN0lNoZTskAXSxK/5Ffv7oziHDQnGip7/BsIXXQcXNZaKhB4KZu7Zb1rzFruwQ1yjBCM8PVXhWH1kSZmTm+TTCNIWY53HMp7y77I93rwYaHr/+9Ie//aOb2/z5+5/lMRsSNkAgF0cBayOrgY7OOY6++2V70ezO2dEa/PGPiFMbvssgsNY0/FN2+cPA3aAvc4+s33UWmWFDts2LxMk3W0Lwazd3unw3BcBP3g/l1+jv5E2fUr3HGJ2nQf9ns0ajx2bhYgWrzu3RlmN86wJtm/z5sb2MHmEDWFgD2ZPDdjINyjVv8URU71kN4tmC/n8D6vtWvEUMW/t2ix9GjPrrKli0Pn7Gb+PXa4Ca51lL1VvlYd2Cpo3ud2QLW/J5gDD991vOvMWos2/Bjf7TfIlLZrvoz4gILvorhKh1R+fT+Jvv/fC1p0+Px9vHVz84nU5doNfeeWu32z19+vRuPGlmSsqLYKQqH9aINWY+nU6qJVobOv18Ok1cfVacfIzipFJUmiYtj/ns2bM33njj7u7ui08+ub6+jjG+9ui1lDkLzPPcDQMz3N4dITMTEhGwZFlS8Q0IW9xugHz2s+cDnorvQ4Cz/PAB+t3yVUM5fSSEoEGbdk7V86ucVlJPSc1nE6sSVN1l4ziWRLvaqxNVq3LiDZaY0hkR9RQwgV73qO97bdvQ9eHZo6ePHz9O0wwA0zRqmGVBnhrtKchSylHOtrkAoJMxxrsFCzqzuFZP1CVbhp4iEoZSIwQAmGGeZ80PPB2n4/F4c3NzPB5Pp+l0OqkOrM5J9UB2XXdxcXE4HGpI4SJsAAAIiojWjalbj8bhgwsigw0DsSRJRDQW7pGHcKk9IyJaMF5ZjXctn8EHYW9wUSIKm+LShhLguGhjavSCFm+SAhrstWV6ZK5hUKseaVKTKmMmZlbXsZXxt7kVMNS6OIqlNzc3L168uL29FREizDljh5xZBIAogxynEaFYfCh0qgSqs1C50OJQwTJbJkbkaZrUd46Ic0pcHlkkgS1p19zOohNpCCsOAyLWGGGFp0hRnaIRZkqTUVMuVX9S38euj2p0MAdjhQkTEQmEqGhjaqEGlJZG2fUCqDVUyyB1p7SLHlYLiTt2hZkhc9TK6bo2XheZsMPbb2cIS98q278Cr0ZVU4SWxbPnoenx0hk/VlI4n7PQmwFMn1W3ZF63WEGX+9cgsSch/6WdT3qVNyKneSXBFPKQgi4NK/e0AQubWBpb2TcGN6IAeEb4Y2bYmIexdrCw19WXloOBnMdMOZSBFxxHUCuFyXx+5vM4ebCcHXa7m7BhN/42cj5JA4hISWywKXkPoWwSbJz1ZZX8aYhaXkfCSTJIzjydxjzNNDDLiAmQA3aBJd2Nd997582Il//Lq9v8vIQSoWPcDf/a8oIGf85uul9Ig37NDR50W4zdPtigSnPb2cebXz3+3HezrctjvkqHvu0EeBfWPXPDjfR59qdm97eTf3iq33oZQjavwHXurudI4BRCf5uOQJswCntXdE0dPf7ErttuN1aLrJ+eGXdxXY6cajVqcGfnA3BroNcM6NON/Py9QugJf8NhVry9oQJZ5zZrOpM+VUJGWWyldSNarmKfH6C+7375+bcf5EyBpQYsdmXXlsnuYWYNvvmOkxEfeOC+vO+l7bNrBIYNb/Q3NOM33ziiWL6EDcztKEEUTeUignE8IuLPf/7zL7/8/Kc//smr65ck0HXdy5cvnz9/enO8C6FDxCnNUG3kWCuFqOMFAIZd13EYdh268o8iAhJFBNi732sHpnFUiVP1Aa0w+dlnX7z11lt/2PcvXt1cXV29ePlqHOfPv/zi+ctrVQhzzkIBEVmQXc+hBuBnlw+umq5BkmtumMlLjQV2uwXN6x64bbvX/jZETOswKwunVNILIWggHFQngwrW2hACKuYU1Tol7awdaNWblGvGrMZIq+ZGFLcKoYhQnUA/XL799ttPnz69vr6WzC9fvtjtdsMwqDLAuWikBjepQZU6q5ubGzuXg7s0wiLGGAKZy1HHUch3fUk+zLXMplt41uqUp9OJM2gVmXmetc4qOBanqrUWC4kx5iw2gYUVA4sgW9PFnJkXOio9WnBVAc4AzjwzlxMEwbPfctYQhu1Tle0Xgx27igB2LnAtOqq7qaq+0k7DWHjjObRri71bptQID5U2V7HoioSaqOlVBqmBteDk+e3gNg09IOZ5vrsrWxYCql8uhA5AqIbmppQQJEbKwhGtOuYSFpfm2pGopq8DIYAGJy8ZrWqDUsXe8AdUwgGE2maGiEIoVYVUCR+6zibu10JEFhWs4CeiWO2JOc85574vxVdNB/ZsnAgjEqA2aFlZP4mIOhUDjC8VbdCK26l2VppWlJi4JeFdtcGZORaloXYjzKrLIpqRQ6dV2llkhrw0cTJPkU6OXXUywFJen6g4kxUTANDCJ9Uj7GUvG1kEiGLXBYPOMHQVOZRsSD3CADMLCzASU8V5Lg0wsiE9IogwIoQQPT1U8l7lbho2Iy0REbbBhijalrrruth1xVEZSBJkNdopq1Uzm9XVxVZDm9iiCwqjYUmS2WL6F7pgQQAEjYEARFT0VHADqNav/tuaLh+jpJTy4ugrVYoygJAwZhGrIwwAIhxqoEJlDSBSusnbJHUTobjCM0LpPsdcSkUhEkjWdAEAEMlSM806jLoRgEEAOu2hxAyFMxa7o/bGFYFAK2bUsC2fGaVQHW6GADNgSj28PL28wzepoynEGSRM0+P9/vl0fXUgCbfw5vjs9/j2ExLOEhBY5jHthgEhSs4UOwBgAKIgyAySJTFARPSzgCpFKZZvj3MRIQomxNdpa+6BYGHf2ULVTfGYpknT6JlZzaVWsBud6qKRrsqsKkdAZTcNuBSf8zq3TTaCr7fOiDIeZhAJRIiYS1wxMtf+XZgXTi5AdMYz1pwx9kavcXn4IPpkfWUntXFcqgYaEqimLxDR/GgPeCNzJboYsB4PJUdfiUohVttdAlKAxS57xsPpWQEzC5ecZwANkqmEDAIgVDbEsGJlWqjMSkAkFdGWuKqRRBS77oGiXIZ+5II4tkWYDPi4tr7pURQC5Zy4NndVU1FKkx4k81z6WcWOYkfMzFm3TOuPi0jSWagduorCi3inUwLnFCIKmVnbHglJzoyBInYAwAgAwDlrYmqIAWtbS4ZM4OHAZb/WNjgANMtkxaXFsyGMGDQ4XNTmCrV4AHMCXKLTqZxCGYEtaskwWgRSmhCl64KIlvGYECVGYq6oVxC3PELo2axxbRRam1GKgojkJCS/ihomtvBAr7r7qyKwIkmrdSgA9ZRe+UVlLmneAIgBETHU+iIMRIHzHImyEE/5EHd9H0Xk9nSbpvEP//C/+dF775IA57mjcDqd0jRLLeCeUjqdThiyiJxOp3nOItN8uj3e5hjjy7u7riutCKmWPAGAQIN6D0o76SoAWaJUqI0HstDdfAM4v/P9N0IIb7/zekrpb/3+T/p+9z/+j/+X+RSmQ/dFHncxHqeJYo8CgFSOm7XlEVwbGH9Y67/m1cy1knmMxXtjdGdo2ZjObajtfhUkg+WYg6pbEtbih6auQ+F+sdtnBqR4cbjadf10uusiXe6f3N0dBSJI38XY7XZENE3T8Y67EINATwEAcpJjOkbBXegxYk7CzDBRyply6Hb9brc7yYkQmFkQMFDiPKUZAyWeYowpjcdj2VxlFIfDAREvL3avP3v26NGlpOkwdNM0PX3yqKMAyDlLHwJRp0/hmIGCiGQKTk2V4fGj0+kknIdAHQLmJJBnSdjviCjneZp8kwkIgVQmZG04UD0EQAcRmWcYx5RSniYZT3J3mzQ4eRolZxQBwi7EEGMEtPItXQhRs0X6vlNWNo5jTV9URjTlaaxMr+y1th44Ho993wsVdVpNXn2/yynv9/vj7bVkjojzdNoPu8PhkHOeWI7jOKUEXdcPe4pBBChyloQhkGZ35QRCgjLOoxNfkRlzSinleZpg6elamnlkEXDGfUTMwiKMgLuuUx1E7MgjBARmCSAqtxNhNSsICi2sTApnQ0KuoYlFcmOJ1MV9lyUp34yuMWOMEViAkZNwygjQBRKRgCDUq4ieUmaekSWlaZ5n4XRz8+rFi+cpjxcXQ0ppmo/9EI6n292wT8LzPF9cHjqgeRLsBmJmQY2xDCgAeZ6vRaQf9ibEAghjRgFkDAFDIERhTgQ8dDGEQCDzeIwx9hGYWWBGQUEE4JwnROz7oesEaBZkFsgMx1m6rgsUMmQGwIBIJABTzsMwdEMEgMgdwEHbrmAcEmcUJKS70xwC7g9Xx9N8cXmA8SSQKTAigsyakMrZWhArq0lSWVgIQZVkZl50apAQiBG1fSIFIkBmnsZJMTlW6VS0TOtWQMw5Z2fZbTgXs8Y5Lzbd5l/P7LDGPcIyQbRxVIv1ORhq2vE322ln7Ljhzvan8dDtZfIQrFm/3dCIqgtAkP2Qsrm4ZmYbsTV9wMQsx+svq2y6BEx74NjC69GyLJNCG9TkTw7/pc6N1vFv9q+9sdkvs3xvj0MPpQYrtq/wIXMtVPWkxBa1yrMCgu1GGwDtFWv4LBOgiJECgZqLZJ5TSMQI6ThG7vjAu90eGF89v4Us737/h78cvhyPJ1Gtt8hYzm1VA2NwMVatDmw/zwaYzWXQFucSgTXtgAtZ9CRgdlN/1W/OFPz8LjPZfmk/mYUbEbOL5fDXghjrX/56brwtbT5Ay/VD+83Zy5DZFkjOreem/ZA1FABUPPXT0Ctz3j6+RVf/puan5kX3EVfz2SZpNxte2Qjbwbd463msnpQhBM2J98MqcTAzbtq0wMaRaODx0DCUtmkbLSyMRYs03D/49jpLdw1MYH2C2Eg2sbMjP3yN45hz7rpOO5XhEgx8b9XKsy+yyftfH0Dph1HXP14HOTNs87oGP4uyDSDaHNk4EpzJrpymScv9S077/f7q6qoPMaepo5DzLM7IrRYBiqWFV84l4pFLufkQQsi51K68u7srQYPQWTBhE1hodvRxHI/Ho36Ou/3XX99Mk0aTzrt+98abu2Ho1AbPzABLuUvYSAWeLjzTsFWTa8Vk+OPPd0Oz86fb/btmmyRrUeShm9fVg41U1dFH1ftkispuh2mezXCjooSCXUQ468EaHR8AK3GBiJpEZ6Y0Zp6maRxH9Y1IVZW1GufFxUXf98aRRKSjoBZJ+0YpCKr3svqRBBHVHhpj3O/3anSQGt1qPkl2bQy0tR0AiBAApDSrOjSNR53n8Xgcx1E/aCd0dmUIoBLFbjdYvKitN9ROElAqgswGbapVqVUhrGsp3qdANTGy9mnc7wadWN93XUC1N51OdwAwpxkRDhc7Cl0CkMwYKKcFzQwnDccMmGae8MhpZ4Q4btyg0zRN/uj3eO7vpKV82upkMTyndXSlrTpLhioNMnPOyQxbOWtDlKUxac45pYL8St2S8jSd5nmexuPpdJrneZomdApInSSiC58uJFkBBoQoECgAgMnnWCUrJROPBiEEVVyZWQM4HV0qreEwDHpn9RMWO74aNSqJFSeT3tB1XQgEACGgJi4i4jgdmRkRILNAFmHNXC1AXDOS9WF3Jubf9h2q8TTlGRGrm4cQkVPWdRkueVk0GhDNiaEan3nM0HMoXdtily+ifzGBbI40EbFWTjbIA6cdrDmybRucY6YeKZvvwdHP2cH9NyLSnJr+JJD1nXbZl16hhTVhrODg7vGbZ4i4vAIEaq62fkG0DHLfWe59COhTntYvbWYimy3bHoH+ZsdeWv7SzKqBgxvrvDIffF3Z1am8Wqbfo8YqoeA6pVk6yZAzA2dIiSkREHXdkKf86utbuAjD4wMmnE58OVzth+u7m1tvd9C8bACwSG0A1HIYFgjgwegX4sj1XkHWY28DH3QxtLAmAa5+eK+uyybEy16CG53KI/P2ak4FmwMtnXnO3A+w+EC293zHyyObx9WHb3iQi7SDn8Wf5s5la+9hLGfnc2YEADiXMi332Ib8Xt/3Fhvt7Bs9NnqWchb3mgnYg57pGSurXujiykZEnyi+5RiykYNlfd4gkpX1qjxwVdjg7PLRyUAGNI+o25mQ61rEXi3hjLiKD99C47tc6q7f7/cvXrxIKamN/4H7GwPQt14PTMzzjfvQZjm/pP3eU8TDk2kODjlHdDnLMIQ+xvFY9LoYCSRwTiEEBgaQEJS3c9cF9YDFGEMQlbp0HBWXiSiEiIjjODLzbre7fnUrTsywiZmyYTGBqk4wdfM8A8vXX3/953/+s1TjVFUY5RrTi4g16uXMeQQuZLoBUfX2sKkT9/H57cjf5druy9np2UuJgtZTcSUoUTETACyhRkWCYYg5pYW6saTBi0DOWVWbEJaGE+L61xOViheqAao8qvq5KlpYe9Y/evRot9tdXh7UcaoSMzP3IVJY+ihwDT2TosfOOS92TxVYYyTdO1U4ReR4nGMs5gPT1vS9UlOHsvMQTqnkd93d3RWv4DSp+1cJ1gcZ6RJwfQFYWX/xLEU/B7Ao+qIQ5pyJks6QgzWIL2xKofz48aMu0vHm+nDR//AH37+6ugKATz/9/LPPv5zSNAsnBm2y6GdiMjnVDqieHxYFrPoGsy8CtM64gZrJhYgagmt5UsZbiAhlZW7AjVvF3mskJlX4pJoFN89LQLVNEgAiLdF/FsGRUmIu1nC1/uRpnib15R7FupVyRloytpiZMAAhCGcuAReicrRNu4ophtuIC0sRkcpPwFE9iYhBhhkBQ62JmNWWUQwrGqQQexEZx5OIxBh3wyFG8raAEII2X1GC1c3sh5hSApA8zeN0VHD1fT+nybg3IgaK2kKwLqRtjGTMoR6IXMKscrWbFAWubF/sOirWP3eU+LYTBi/FOI9tnmBg0aEXdKkWxGVadv5RWCkShlLMbEmchlgWCw7robw8ZPPhc9WTbDm8tv7aHPxC/NWwgDKaM417bDaW2owgVeaz08IblprZ+nNlNVVgYBmGwXpzYVUJcJP4u0UFcGGHZ+88e9g0O+tZg31v3FChdHZwvzppZUETAWtF4AoZEQk1BMjeaK45P6z/k13hbBt8koTMMzLXlO805YlzHuc4xNM04fFmD1cd93meHl0+3e+f55x3fc9EoQeoXmuqobSIqFosAIhk9ZD4aRg+2wybeW6xrmGszSYaWfkd9IriGiCLXOuUkLahue2XYfhmnFa8NrrzdLFdlDcDicgDHpKz13YyfvwtGJ1HdAWN+8axBxsNbXsDVAMKbA6/li04uvCjodNb2IV6+n0XN9tm/n6cZl3NG2G9ofZ9cIlMfhVnN12/NHqEmhdns7JzOues5bXQKRjNfJoIBT9nw2fduy2ONVj9rVvpuVZDa804tE7y9As3/nYWON965ZxPp9PTp093u52Gy6podR9TXNjaGrG3K4INW/CDnOMA54Fw347DPUzA3UNQxA1ELKkf9cXt4zkXBXueixMvgfbmAiSErNpIcQ4ABM1tweIgmoiIMOacMUYgYc55TiFEYJHM02kMBACLLX+Bf5p0Nl1US3yv30+MOefD4dD14Wc/+xmicE7TdJJFkCiCkACLgMlDDdyaDWpgDvVMbNjsA9f2FLjvNj+BZsdXnMrNULUjrBlT8zxr2wY0K6e6YZ0MDUJQOjajCZdYivShET71SwqJuQ1zzhp4zzVXTWORiOj29rbruthRX+rvO/ZLEkJUp4q5eTUtiBmJwEKCEWW3KxtqOr+IIIHm+4WQfQgx4uKd0wzJXFMiE0f98nQ6aQKkiMQYp2nKabGx1rWTN6zrZYGX4mQ297sekcupxyxaAFNqrAO7K0+p72LX9YRCgd99993//r//ozfeeMbMf/Znf/7v/v1/+uzzL/9/pP1JrzVJkhiKmZm7R5zhDt/35VRZWd1V3UVS3Wo133sQSEKA8EBAK0k/QILW2ry9BG210595ECBogh4XhFbCAyERbA7dzerOrqqsysrxm+49Q0S4m2lh7nYsIs69WU0GEl+eGyeOh7u5uc0DY9jFHiEUwPMAmnxGiDEEaZI2IVrdfOU34ubm1UVEFEeEF6TGX54wIlY/HMwFXWqdNhYY6M7XBZ65WR/slBl4QwhKFvzExnEkiqoTagJkHsZpGixrbLPZHE8xj1ZoUBARUIgAW4IbAGmpdlsOgBRmzTExuZQIbGfhIlf7QP2qMEuNxiQK6gkEERqHQfVA/bdLG2hWQrVEpNhjMzpwc7C1tpY6k8zMm23FcxJgqXHXIYRxqsild9D4JoBqg21zA9Qg8zpnRyuKyMXggg0eJuGb3myOAlQP4QItDEAeP6AVyR3HEcKy2RG7ipeL4yQAGooAc4YEq0vmLHMxjr9vD6/Jpaendi1+YotfH4bFugA550uepI3frOYX76XRWTuTNpqRkvUCYXUy6w4twdKAtjLwr5mHXy+3hEtPmmFuPl9sAVIV++w/rFIgQitWBLDcPk997EW2ZKOzfg5+T0UE5rsjzqiz2L6rnNIkv5CoEBOFSBEjBAgyyHSaMk6xxCL8fnjAh/jRq4/+y3/wX/4v/+f/i//j/+v/9GvFYe0XIgIotr8AoMS9HSc0QWkxscWGemjAXBsxmCx+eHWo2uS3nbI1wj/126uXLgTmIp1tlj3jn6cnYiwvb5y7fP++orU8K9CDw6gFDl+1kD01zgJ5FktYjLyAwPqmvzxw/EErrf2Md5UzM4X41Jh+ev7DGvietqxvLujSU0s2kmX3L3TvGjGpA8Ks2gG0HW+JDUuS4t54acBj9xfuwcXcFrsjcyvJM6wEXc6kX5oNvjiM/wkXMx8Oh5ubm81mo6X8cs7aFvzvdfmzA9eQ0Cb8zEmBv/9arrqRnzrmnvT5NyJiLcghJIX7rksUukQIgbRET9QkRQ5RYooxxnGs5QkUYoiYuoATqkpTSlFfUwjEXDQ2zwwTJvSLSC1qMGdqRAScD4eHcTw/Pr6f8rDb7UII4zimmkJcEDQZKxcBkFlvJ48bTwHTRG0fJ2Yxw+vn1xi7YA1XL9txvwVXn2Rm5VCmaDGKtMzGLm1ijC0jVpg5xMi1oASISBEuwiIylSyMyMggpEVldL/nYJcWt2nl+BdKCJIM48n8inhxxKH5SdoImZn7Pjmp6dICpNZukJJzBhSBmvPcdV0pmhOHIpgzMw+IqApkKUW1Pm5O4/M46IdxHMfW+4SIhGdkQZHHnI3otH19RhUDm6ptU65tMy/9z7Tw45wpBMRJ97Trumkc3o7nLsLNdvPTn3322U8+LqU8fP82RLm73T08bKYMXerHLOPxXArrTggDlyr6M4gm90EtIyHCXPKlNYhXtKCtwXBJRKQV21eP6IJf+PNuN6VKIBfXiMdSe7U/KcysXuJciqk6BkZAMLJpZxyAS5lUlc858zSaU0RTRlNKZRqbFiQpRAEIIYQuqKIxU4qa+UdjEYyKWrXbqvEi1RqheKmvGyOEEMZRRBRtJHWhnfrQt4JYetZ0veM4xlRDrKGZJ5illKpCK4KpdqlIp5UUiSi2jDMN9i6ltCxBLkWQhVlYSqDOaiV4Duij6vzB7FKvkCe4cD0liRfLOmoIHIpAXHBx3WOac25DI1WgxUm39tVaatT7pagtCszI54QMLJfErSrf+PfiXL6x7TQm6n/rKabhtHcrLz7YYzZbt0w7NsVyxrxGB60dgj3s57M4ezYNMVPZE1cbn1DA23guBGgl3co1GdesIMxM8/L3spK9/GwBQHHXsGoxtwUoFm4BnLNS21PzlDZCBEJITTPhlqMCTfH1y+G1B9W92lFhDc4OeSiCoNWAYgwb2jCUKY+btCkPkxSADO/fvad3eNj/5D7cv3x5jwLAknM24hJTsqICdRWg6cQoPAP4Agk9Pvhn1mCx5+1PvyP27WL5/l1+NP8nkRakmAnNFVtWP/FoCXb8fZW8aw7h2Zz/nhrg+lrsrL1ica4rBJ4RpK59Q+6w+2H9Ob16LWz/67nVickVAw20HHq6tONrVfvmT/4g5izQw66FYciWM5vbHOsWKAHXgNxueoMIEBHgrJqOvq6BaElJ/KyIFPdmBTnbkq+kSyFW48tiUf69flF+IYuFPwWHZ/78/S9Vac7nsxYkPBwOSoWs/tbiwjke/uC1Juz286sncf3w1e2wEa6in9K3KmwBIFY9vAJzNQ4RxdBhq4jYdZ1uckAoJecyEVHOw+PjIyLu9/ucAwoJMyBxLuN5gE62/QaCjOM5atIakUpMIBKIx3EMVEOtYG5eme81IAgId13cbvuU0uEQiaDrYykTS1EJTUS0wpkAKOny++IvdWGtqcS65oK3aCwweQFhP+en0ACfxsmrxAERpfVMM7G+Nn5IiTBqIK6Rl67bIAaiS68UjYEtpbDYW2pRChHJrT9haRW/YC78YNNFVe9KXWBm61BPteDnBFBjyjQGz7ay75dF+ypUpWhXCu+1m6bJ9zFm1m4BNX5K3zUMVYUohaepjAM3b2H1GcI8TPSCyTH6uZm2b/suTabyOICC2CJvY4wh1CK6XC7eMKVoVPvsqQpeCkKBcjg9fv7Lv/3u268f3r7Lmad8AuFpyFKIhbAG8YZFHuMaMbiFX9r1PIOTlhfqQbHgIOhKCegDIYSSl4qf/tBCbe14VvkzzgpSxFiLCeWcSapGXUpp7RNQN0tdbaUUqQ0bIFDfdR2AbLdbzhNLZi4hREFWP1iXuhMXkUy1/A7ThTbWjNmq9LddVghVNZV1j0BzULEWB4oA5khnwphS0izTFK3+pRBGrDm6G5U8CSPV/nx1scMwpHQ4Ho/7m+1ut9tsut1u1/eplFLLtRROqaQSUkrMgEAxIgC12BxGRJx3elfavOBxtju6C8MwAEBEwhjtOKNgKYVbjqPaxfTnl8oxHhv8UZl9BRhjLJVNzPLWWDUnADvqaGKpLHopympVl8vrXZ7IrkUHfz01Z4+ysopg9L9aKIptRVXxXvw8uC4F3KKEDenXb1yYY21dai9Zw5yaXa3dbwBHRJfU60fz1QXXALQVgdNIF6AziNnIfpu8wWnOAq0/2wzYAKjSu1JCRA13LQACrQOMkRtjTn5iFb/na/FzQxfgav8WmVBAdTwZmc+MBWii6XE4nU4BY0/p/Pj4xeHtF3/x69PvDrvNVrePanq9+MkASdUzkLWFiwCbVqIHEloxLgAQbzVAFAC1OfmdWpB1Qx5sxma/L2uw+JvgLr/X6Lx2Tz0Gq1OzgLPeN8TGa5IogNoq/9M9LR7HbMLSrG6L94rTPxeTf575XX2FbbQfZEFnniI4fvAZwrTL52CDO4Na8nc94TVsPf4/9UajwPaV/+16jYtF+WO4GMGPjIjW7hVhxvJtgeSyDObkhbC5102WIiKTKDyoWYNzXI764qSsQeHx0w84g6G3Z+MVN/t/AvYqfg7DoOZtEdGCwE89/5Qw9xSCXX1Y5EqbisU4fjlrouFxdXasVm9c0OEQArAYJ8Jmhuu6jlCLMYoqycIMXLqeJpaUwpTl+9ffAQASxBg3aVdKCaHkPB4OD8z73W4ntepvISLNlXjz5s35fO66brvtQeNH+cK/Siv0rYIyAOjcEfH9+6OI9CkCM+dR8jSIoMyqLoNFScAVG5mH1XovFti1oBgLyC9g6Ad5at/93avIv3hdDEEEUkoavaaLUmdFKQUhhBBSq/xMdClRplcI0QLn8lQlmabFsUg9sOIkmRo+RzWazoQ675uybEMVvrVTiLSEsZRSjBSrqnChY3qxFGaWgg5dhQhCQBECQa+5Waiq9qI0ZQOEmNW9VDUlDzRuEW1oPS0ihUhae8NM2B7aa2TQZzQGz4rQxJgaA6bmCy0AxaoijZNQiJuYIvHxePzbv/3b0/Hh8Ph+s9lAAcklxpgiIFAIXdqGaRwbxKqOUaHUjP4LPdBKkXuWtEBL/+eCA9oaRSQ0acKTtTltv4xmpnlT7xW2hTNR9Ur4Q2HDNJQxv9YlRhcq+8AYqeScUhLhzWbDeRqnMzOFEBhKKUXb953HmkAkWmAmIEDgS4RlECf5NwRokjmYC1R8kXNtk6jout1ut7s+JS2MX00qgZJRoRDCOJ0bSawFWUqrsYwIp9PpdO632+1ut5mmab/fpxRijKkLPGUR6vv+5uZus9kcj4+BqDYFkKzhx+RSEkTEVqZzlhbCLXIxQEw8AgBjVIe8HoQUkkKfAYgRXNHK6I+iuLet915pDSIWvkRN2DOllIs06o6QKoR2x0gGESncvRPZ4qwWLGp9LP21IL7gFBij17YxC8N/m3AN8bpM+wl+DHOC4l+hfxq1tbNqbzFm7M/qglRBY7kyE/guuZqqL7TDczmothd+CWq6QwD7j5mRSJwkd1lFE7BsfDG3xtwbqfebyHsFVmtuZ8Tr6ldP/VYcHvpX28TsMQNplwJFKpKnicc8nt4cIMgwnUsej+8PecxlgHKGF7ebgPHhq7fbzQsuU0BSnqrFDJiHQMkHrSrlEADAS1aYBtWyVFOIiJDVpTXVhWZrpBZGyPOETAO7JVfgE5f9xKOfHwqevp751sxd0LxPeiT7rvNIdXXX7AjIPGDj97k8UrlBliL+NSRZJttcHb84G9CCvgHUBPRLHe1lqM9lAutzUb9q9xbQsB3x9211V7/ya1/cv7pMP8MFqfTT8PO/NsgFVu7+ZS+kNsuq5zfOIw70I7aIEq45JFdikGw0RCSKzHm1OjK+48EF82sB2MU2rR9eWBhlTrHRxSL+vS5lAefzWXMISymbzWaapmdyaNebBU+04bq6cBtkfRY8hBeDPLG/s5se1ISqcqqmhHqnAlxp3vK9ZFGFVpmcCLRzUoiEAoeH94L06tWrEML3335bWnXWb775ZrPZTMMJhDabDQD0fb/dbk+n01e//c3pNNzf38ePXhAwQULESGoBDMzh4eEBCQKGFDwZ5FevXpxOp5zHcRyIcLffjNkZcGEm2hJSaX2SwZ2+Z+ikVSlcnK/1r+ymPfPUnl7dlGe2bPGknbI2nxrzxsyEQ0oJmlsEXeSXCcTGknLM2t4aGidiZg2qtSU0/0rQ0MHiapJjk2dERL0r6p1AFPXLaN9IEUGUGFMVWKfhQg9RRARdYQ9mDTaslTZSSiB9m4BoeZuqmEEAIWHhAiWL6mDjOOZCOdc4L8SAeCE72NYf4iV4zRa4gG1u1bZtvVUNKKwKW9d1KXVml1eVQKtigmZa6QGJWxIBEUIqhd++fb/pOwLpEhwOp9PjGQp0aVMylenSy8q7UsRZwYoroqM7q3+a+Z6aZ3wh9xoErMTXgjoxs+JGPVeuI+5VtL+kCzreh4jjWFRhtvnrPLuU1Jnm7zOzKTN1wkV7YNK5TNIsUyEEzNWpW4BFKJJJo1AznNxaJikkVR4OoZrcsYXLqgpaStGEPWVPimA555T6rut2u+3Nzc1+v+169bNNUlLhCSFYH/la1Io5ho6IpmnSnWnaICo+wIlLqaVTz+fzZtPt93vEHYrEmLbbzatXrz744INvv/0aIRAhc8klQ1HCpX0XLw5/+xBau6zSeuEoalNofY9bVG9Kqdv043kQEQAqjaSrwzaqZ9+ju3598Yc6rU8/BLyUJFUXZN2/xl+LK4VCRLIsJG7EUQwVPMUx5DYved3XaTJNjKsr+YKy4tiqV+0WRDAE69QBNr6qN7ZANbABQEyk/UOh6VeOVNURrPyUUmGlUP7w6NvXjATnUublTDbe674CxbnzOFLsbS0m5PkpUWviJGakaZ1JfYnnBRXAVsNKu6MYeI168lyHDC1sOqVa/GaxX9h6bXuUaBWrxe77jbAXLVBlQWj8CNhiDGwvUorn8xlI7ra33719s6fd/mb3b/7iX6cQ3n6T729hG1OGiU7SRfrd33zx9vQ3N9vd6XDo+15LmdVOeiSIIFKz4S11kAJ5dF0gmBk7FD7TNAGD8Q9FjDKPthcnLpR5PBI2/mTUxBiweVqciQhN7tE4BHYOGazmmEJucIOwEnRDKml6aS0of7E8VXHBzAQiGp2P/hjCtcs20b9Xr6u/spue8iBimbL9zlESCCHINSnNv93fmabJDHu2diWGtikLg5c/1NIYWAwXeci/xdNAdDyPQrSzLy1dQfnHes7Yovc9RnkYGtZ5euK/9QY4cHEQDvhXUgPU7G1YyszYikMKV4u4RwldiyIPt7LszYI+C5hHRBAqrqcFAJhFFgBEihkcPT4QEc678eLcALeA3mIE/5UH73y/rlkb17cAACClpIFAx+Px9vZWRB4eHvq+tzp1hopr0+FiAraDnqx5BmfHBACa9Wl2fHgepWIvUvrgISCNlxk+zyCMiFpwRERAW02CVqvIOQekUnIMNE1TTEE53TiON9tdSklpFQXQGi9S8m6/Hc+n3W7z6tWrd+/epUBfffm78TQg4jQV1RZKKV9//bUKTC/uXx0Oh88+++z+/r6UMgyn16+nXM5ahP1wOOz3+/1+fz6fiWgYht1upzO/ubm5ublRMI7Hx5xzCCkQ6HGWzMMw6AkKZCFRgYswZAy0gIPBH5xBwZ5Z7JE9v9jcNfmSebaL35TFLthv/WPsMk1s5DYNSSnd3t7udruccyk5hKDGzcPhICI3SJvNRkTyxCEkZh7HcZrqY0Rk/qvzcBa+HFhm1ubSqoxpV3ci2u/3h8MjtcKw+nBtN8qTlpM5Ho9dFwFAtyzG2MWY86heLBGpdURDKM28AqDtwpFCTEljCzEgdTFdeC5tfQegrusC1W9V+yqlNImZYuxOw5lnzjQRAfVF65VS6rpkJXmsU1dxncH9puuf1EJMKVTBI8aoflEAKFkaVIl56rru/fv3t7f3RBEwaLWUHDJhGofhl7/6DQqDyKbfbTY3CB0zloKlSCkioTGORgd0+6j54qBp7xYm6lSCJhM6FVeHSunS7EGPBjXO7kx7Fzy8UCdg9YX6N3oW44kPIlaWKrVa7DQV3bVpGLte9THOOacUtKXku3cP1DyiACC5AHCMcbftx3Ho+u2UT4fHtxbMlWIUmaZpoBC7rhuG6Xw+d5ub2rmB65lSb6eemlImRQAtOZtzpYSBUjtc3Pe77XabUtrvb2OMu91mv98TwZS1LhGjQAjVbasoDYApddhq7bIrhqRg0bYTpVTSF8K5lHI+dznn0+l0u9vDBnLOd3d3d3cvUuoDAYCMYyaKQHkYhpubG+ZLnd4FC9M72g+j7XXR2qTS5EOd2/l8BqjhbBesRhKAqJqP3YVageqKvRkRVZsssGTV3vygEDGaJVJdJjZ1anWKrOic0TgbwSQbT2f9JNEJPQvKi01Q4PmCPTGFOT+2Mw8rtr341v8KHH/1D4NjJPYT7/mxB7yWdVkIiVwzJ+OzLdRkXp0Sq6XWZ2xeQm7sV3457edVDzHFe/2kB2CrARCMGHk4r2f71EXzBy+zmhft8Ku2iSmW65xPh8Nut4t9fP36u32Mn37wyc9//vPP/8Mv3nz38NkHu+F4gqFsMN2m3XAcwgQoQA3RgCWXjCgsohy0LkEuzNgD2at2hrFmBdCb8kT822LvYI7eRmfJ9ST0z+BKklh89ofCw+qpHQkuehmbbmmHCOYIucCE/8zrmdEWp0BW+uTvM9qaYsA86tvAwnN/OM/DyP3PYU6mFpc+oLhRaoZ6TfIZxlk/Q7uMhF7dXJuJxyI/PZjTav9hrSWuAeV32VTHOakEe7u/79Hp6n2TEurBgQCuOgkiisx2Yd5BfabG+HpOi/MI13Ae5meqfvh9CZJd18tIiqBG7DBDKUIUQ0giqFUHr4Ll6iQXCp5BeIHGvz8hXZ2LxfyvMNM2OAJgLZDojttiQKyR8C1kVGvsha7ruhDaEqDWFFCafH9/v93u7+9fiuBwPKvJQCvxABC3mv6AnMt4PB5DCCb0v337ehi2IYTj8ailYrTld875eHyEyuOk71NKiQhQFIFVl6mHGlzTwtp4DCkiChJzvgrGBaovaOB6R56i7biSba4O5c7OLLvkB/e9lBJCUM+q1jeaxotMP40FAELqVGPhUkLoALAUPh6PImIKoaGcQBmnEaQqSyKTHmE1R6oWdDwezXRo/KKqHLns93sNAJaWK6SVYECq8Utc6c4Ua/J/O8sX2YOIUuo1DvQCIuzNER1j5AIAoKYZZtD/ipUYzbmltFwCxav+Q9XF5NzaZDqMzcfzCNs+aUpUjHGTuqYQ1qhdZhZWx111mSCiKmUxRoEowAIFJAoCoAQkROBcBFIugAxcqiaDiASgogYb8miWG7OoymgfVNl1qHU5vM1gd3FpNDX+YnWal05MKUFhs1eaXdizIYNnjLPilGvzVnsnxVihGlw9EY//uh0XpyIVACYiEG2nwc0RV5WicTqXIiEKUlIgI1FAyUWDNRARATUwQGqN4+ZKleY4UX916kKKtQvlbrfb7W66rtvtbhQ/x3HUuqAhYtf1ZaqGg9Z4MCrabDZ9zvl4PJ5OZ3UwLLRlRQ8l9lqo4ng8j+MouXRDZGaE8PLly5xzAY4xiGApJYUYo6qXSznQQ8/TDW6FeS8ogQyggjoQEQQCQe1HjxhSoqAR5N4L0ZyntXHhYrdAvTpw2WlqfnbmmmHg2cycqC51tsXlz54PYLNvaeX19gKcHdcFD/OTQCfQkzP/mxjiRfA2mVl9hfVswZF+v0MLy6IdrRlmyMVpblOlJrTgvCtdvbOSuuwBW8Ia1F4JNIBc5UlEpLmKLfGvOB8CKqDauoAIp2ls2bfgm3Ly3AP2/EUC6HoteiCbk2OxrV6A9novIqaUDg8Pt7vbw+Ehn/J/87/9b373d1/8X/8v/6K7T8Nw3PT9+Xge8ZRid7PdvcsjEUXSDHYJ2ndbFiX4LkuWMmuAY9BeOPdMISyNTq73Zb0d5IoqcWs4uzgvHp9FBGDGAxbQC672ox7Xtbt+MbJfhYiSC1evzIGlPvPkrl4ZHxxz9X+uF7h+nV6WKb5YuEhteQpzQxJenp/NwY4hzHfNI5KHpJ+G30Qu2T57krIwly4ms94vj88LoOlvvevSr9GTnYWK6N/oPQ8eFDiX/tvDl6Iai83S4CuzTOGKIenPbZkzIiZkKk8DAlnNNL3PwlYIdwEHvAx5AaA30KyXsx4EnrgQVR1aXU/8wt6rcmdoRTL9uxabBfOtr8fc3V+gij+GlxGem//yM861kcXzC/i0v2vEldEWQFggj2G7Cm1qJt90fR9TpIAigNIl4lwwIjPf3t4TUUrdixcv+w8vvgXVDEspKvTEGLW1/Xa7/eRHH6lXYRgPakw5nU7n81mbU4cQrO85IqogrpOMtfsYlTKxZIJL0TKbvwAgRRIUIIaLQrg4HZ7SPnVdRbD1Kft7/GTO0xe7s/4hCIQQu66LtYtjkBbmEELIWHPqVIKexoKhQ0RBzAp8EQBQwdR0PBFBZCTEiDCBKYSquvtmD/5SSWbKp7u7mxhJFSG0YirMoXlOvBuKGey3UiUQbaExA85lL7BTGUNEQgjTWEP1pqkGT1r9GC1kGlJsACNECKEqMOp30gwxCmgKHrqoHO/e8YdRYauOst12p9qgwh8AxiEzq69SFF2HYbIAkBqjKxGRNbYGKUYkCSWEBBJYACgQBmCQLCAkjEWEZy59LKXkifPEzJwz54lz1lI6rv2ghY04za3CmSplRmNbjarrvymlzKOJMSbRocv8EleZRh0Dnt9VZtRSZuSSFljzjZscJdJs30SkXmVV1YkIqNlnuUq/3jmm7SV02JIn0FIuBaZpwpDcUUEBFu3DMF3SoMwapWaRzWaz3ewVw/u+7/sUY9ASnKWUYRjP5+MwDEiSUkSB7XarFUFjjH2/abIWl1LO5/PpdNJ4Li5QCrOMOecYowgjVX81AJxOJw1WKuOk0Xx9393f3+/3+8f3DxIgpTRNQwgBcVPKtFAIDSEXhtHKTJkDKRlvYSasege0bQ2MmCeGJk5FdFqKnVW4Fn/SuEX1ediE7Nib7Oh/u6BfduxLKUbfvIhgl63TM0hHsJbjL2B0WYsT3HnlM7Tf2jjsTOMitR3CmhB7gfLq5ceXuZq6/hdWPMD0AWa+mHXd2q/+yk/JY4Z/zCDpweVB6rfDA389/mImiwX653HOzPxFF/mjwRYQanwCiGMHLU4cjWwtKHUpJXbdVMrhNHy8vz08PP7mV78pY/mzP/3H/93/41+cHk9/8Nkf/vmf/ennn3/+m19/sdtthuk8TQMwk5ZzwmqgYl//EBlqIR8Nn5lJnAZGbLGgpg83mnjJufLAtJ+ACzTVdXk+NEPFuWjYln9l6/0GiXMdE6E6JJ7aDs8AdEBe2R1//51dP2yf179anOi1FopN/0TUWvbgBWSRK/U2xOcHrsw3Bl6vvInTi/xJMQ+/nz8icrmMzC4gE5pCrnwCAJrFeqZK2ZgLemIfqG2iP3eL+eP8mTV58WgDq41DZ1JpZvIlLdJaZ/oGw6gZnOdBIjarOkJLlMYm6GHVBmdartGpxeTrVyvF3i9tvfWLh+tPrEyUu35PHF6MD80SNE2TSodeIVw8fBXnpTHTxf7KKufncvyfUAkXxPAHr/WTbUXXnxQRlPYrl5qh54KZVSurojSg7myM3fF4TCl1XT+cp77bCmShS5YODIQ5o+auhC52nVqi7168UCWH5YWI9H0/TdPDw8O7d+8gUN/3P/7xj1UhFJHtdls/MzdqS1KqI4KZqaYeJaKItdwliIDA0oCyQMWrcPOPLX51dRz/w6d25yqurgmsJ/42oIrO0zTVioKxwrbv+xg6dt6JUopuB7QQKmwJHVFb87WiL1pPfxxHgUvUj16qt59OxzWfUkpyd3eHrYGbftv3PZJYxXGTxwAABRAQpLY3coDSzMbLXlQNJERlshqIWMo4DMPhcFL1bxwny6kTRoSgBlB3OmJK1XZDLdkhhJpk6YUi++BprJGa1K6u66MrAaqVh1XKN4WwlEuNtIiAFAQASWMyCTBocLYg1jocUPN1GSfr/bBAJ12vLdYKqFqYolf2pCGPURW9Q0TQvDuWKuwp1fp0MF+e8YCykc1WaFghmgZQSgjhUoPwspwqq6tCXoN6QUSKiARFXSiEQZUrw3kAYM6Aakq4NLoU4GnilNS32tyVYLG1Hsc4hLTZbLbbLQBuNpvNttNOLSoHllK4DNr7ZBhO5/M551FTHzdd1/e9KpMxJlWJj8fj4fA4DMPhcKzJHRC4xY6mLqeUiDCEUKiUEgCKKqWImHPIOT88SM6bLm1+9KMf/+3Df8w5pxT1GRHJmQmbgaNtqwo+LLmtqxFpEM3815VqmV8kQUHVWhEDEcYYqZUQzzlHQ3rbvxACXmMY9S2OGImzSZtKKU3dr7+iC//wL9Jd95i3eOMCNf1LPYKuuaCNb3THEyz/ugUdx3nEjo5gFuLFIVn8fDE3OyE2Ew/nxRuXQEbQQmGX09um5J9cwIFWFVz1MlvRgq6t993mbOZbs+j45aCTBszmZLzBIvEWMFmj0+Iy2oEC5oJevN3otecQ6PzJIjIBl5Fv7+9ev33bb7av377/3//v/g+ffvKjV68+2KTun/2zf/Zf/9f/06+++ur/+X//vx0Oh4fDu2kYy5QFAZkFq75RYUh4kY5FgEFEYtQaSGqaQqjFqYrafrQymy6dmUvhEC/eS0+jTf3zcacLLPWfvebpKfIa/+Ha2Wngndk3/DPB1WulFlsiIiUboVliSx3kP0me/sHLFMIFxl69PHn5PSfjT7pth23EQhxfg9f+7bpuceL0K/VjkGuhpq+LKcCcmKx/PjvFzm8sKza8BtH6gBtN9tTJLuZLbiS4jfbWXDubAEtCt565X5eIEEZxdczcM4TIi5sXIjB/S124i75ezGcBE2gB5AbeiwB6zW5ydS3PX/4Aqon3Yn2/9uRT4+CKaJs5xoDwPMz1Yhcs4w/+M/OH+W66z7PDdkEnbkue68P6r0ZEKyIh0DRNqvZrHyMucD6PMXYC3EplgAhYBnjOuetYdZXD4WAqSurCNE2bzWa/32sjstvb267Tou29qBEwzuzaAABCIhIwxhg5izASGGNCsErdgBZW4Imb/+yBsN67BZANAezDU7tv9/2/AFDkomU9vduzcVSZYc3zDIFCZeIppRRrcjIzp5Q2m/D2/UH5u/3LrYug7YW4YnKpryqlzz7abrc5TzYBbx7q+36/3zOzVhXSnc1lTCkxBS8eEEEIQSvp1+2DjlsR4CYyXcCOeIkmKKWM46ge48Ph8Ph4AICcyziO0zSVfNk7ES1LI9DKmVIrZNisTDUJ0PvHmGdhC35bbQTTCall9eech/N0OByOx2PO1VMkgtM03d7eqjcSIFMQrZ0JoAIDlRltJ4ICUKZSxjwCzWRjY1LmAtXPun02Q3DmSCJih5x1RZfU2aUnQBc+TRM6M2Vu3UeccXkmcBoaixOeRSTzBE1NLaWEUPP0hKXpyZfttsIHtpwYVP9Bdi0WpUa6grYtZCQAEhBCCZGQQosQ5sKsFkzC1mrSRa9QixmOMWrM5+UYtt6V51OZpmkYz6VMMcb9fru/2XZd9+rFC00yVBOAmaseHx9ybbvIiGgBWX3ft+SRAAAC2qSRdrukO19KESnjCIjYp/TJJ598/je/OJ5OIdyIiGb2qdILK5qDTiFf7aY9WZr/T/l+BTthbHegFI62W/qjmjtEVFxByMvpqmdIxMkZRm48/74Yyx3l8oRMrQULhoSr6qDgfu6XSosgkPm3az7nyAoaI79K8f1NmXvqF1wBrzFgu2NvMR+3JzT2ALvE5TbCZQ/bgWy7jsg82xTbeDuf9ro1zP01f+MVB6Y9ptMorfWQwU0/BEp6vEUAVfsHFAa1ZOixBKXmGpeLT05JxNp8zOZge+0ZD69cx/pvEckiEQlDoJimcfqX//L//cc/+xkAxb6bSv7t777cbPq4Te/fPIyiXYwAKCBWc70SK6SIANaRCUDUeB6sTJVHEmbllNwq5VpOBcxlgjViP3Pf/8r4kEdURHSKw0x8NG0ZHJKwsDUWl7lE4pM/DSfZhSAusOWpTXz+8lNaDLI+72s4rIdanNbFHXSMHObsyj9j33oLiJG4maC5IgIWlOuPts3HyCNYwR63hKfWtfhQEbvFnNtXns74+fuhLlt/TSEEZ6SYv1Q89jKzGFhc25ur7wIA9YLWP2kWcu9XZxgF12iO3b88MDeWL2B4FRRGEt3Wsxpx1m/8e13o4hQsapSeXuziw3qx0qxsCwRbguhpNcE7qI1WP7W0p96iFp62wPk88bJwlScUpVWLs7Ic0ky9mgAFAOOQp/F4OJxKEQqTrVRErBCfyrX7/V5FKwXmOI7bXa8lSVRq10hRlcO49RgQERXoY4xQWAAZkbnG9ZkjhZmlhn4gYiAEQWKZPASunj4PLlwJwXCNrMnvwYIXFLgOzs+h93q/hEX1ZPXQxhhDQPXGUEsQUPGaiLab7XmUUso0jaVVBNRx1IC12WxMwTBtpxQXmGe5fylBswh4beT29k7dudpLGQByGXOhlFIXorUgDwERIxERpOLqdemAIKgCA9IMmUVwnKoP8Hw+Pz4cx3E8nQbNaSyFp0lXdokqLO5kqU6oYmcIQZPTTGsCAGa2dLjF9ilIoQVe2qU/VOVhHMfTcVCF8Hg8i8gwDF23mabpxYsXtUoKZKi9uJAZCqCAykRISC6BlIUmxjFgh077Mn3JPIR2pzIgvBTAv/yQ2bQdQ91GahpKN3Xrsl6Y2UltI/RDaHU41wTZyKCIZJ70+caAqIpzhZtAdTGYmt3BpJfGpERTjgubL7S2vkyRigatSIZCRabCWAqAILM2HgVEhNYuJYULiG1i4zj2fa0xI/Kg5qRpmqZpyhMSUUxht7u5v7998eJut9+EEPbbXSn58fHx/fuH4/Go1cWOx2NRa4QYua6hyKybjRjCRXNDDE1PxrFwq8s5brru5cuXm83meHpUsEABQE4pmY/UC8PY6rasL03f8BfV7G5iBmbO0KpghoDaUdHbxQ1pFoqKvRkcifSvsbB1T1WVAYtcIa9FN23Fzp+ZD11Lg/HTEBcdBysnw4L++jUCVN7m4Gifl9NbYD/OyzDQKpTR5gara70cEZFLJTljA17CuGLOWczKflhcYps0oVCcpefK2x0EjBDzvGOE4SIzh4ZwJqrqZVkBXlC+CoTLnJsmbC8KgNxsitwSTvyS/VrqXnTdeDofp2G/243nCZA+/tGnX3/7DTAfTo+f//pXj+dD4fEXv/yb12/f7/ddkB2RxqvQVC4RXGRIUgnoxQcO8xwq5Sg5Z3UWaey4xQqWeYcSP2dv1DFsKasQTUMnWMkZhg+Lb200G8oosjYKt5HtLRrgjnNxh5mD00A8w7ioEL93FqHNeTE3WJ0OXGl6629x3u/EH3lwdMZ7TtxvL1Bao+V6vzwY7Sd6lTI7OAb59eBKKPgatPxGoFOB/NYvnvfvWtABmat/et+n9Hitw0/SkSxcvAtab2VA8mTEj7PePnGF6T08AS9ItdhZxFpN9AqYrmG4+yGuJ7MgeuvLgx2uRmM+/UNjHypbm6NmAQc/1cWmrCG/ftLfR8Sn5uPhswDOU/O3TfHzFBa0yN4FGlS5bZa8HULoYkLElKK9mZCwVi+QFHsuEzN0qc9TOR3f45w7cxElPlPOqeuGYTgNx/3+VhCGaeSjHI9HCikXURxm5hDGEMI4aXE/CLEDDCyQi8RKvWsP9BSi+lJEQDsZEpGmiAsSXLueQMvrIF2cVnTkBebnenHH3/cE2Y+8+HD1QsSu6zabjUWjxVamBQBYIMbILc0hhHBzc3M4HHLOKfUAcD6flVupLrSIJgUnJ5jibQljZinzsuLd3R0RnU6nLqZaDFOKitoldfoWIiilikm6B0QkQqWIhtjYkksWEVZ3ih7k42nSikTHw/l4PDLzNJVxHM3j4crCI8Cl9HpoFUHDpWhNsP5+4KLbrMSOnXEDiP62Kd71vKu78ng8ns/n82k8HA6n02kYJgAYxzHGrkVLYiklUAYAAWK+iMpEERExQEDQbDcRESoYBVvNLXHaYHFVc2Re3A5nYbd1U7hlstiuUazimYUC8ZoJCsCc6SMiNa2YWm1IQ3uPrvZqVF2FL7ys8c2CVeQGWwI0qmIqJbmODja4QaMiHpfCCKhO8pGFRALAxfpTcUkxmdDPljlPE6o+TxSqkJlFbUkhhJcvX+52u7v72/1+v932KYXC0ziOv/vd787n0+Pj4+mkUcGlmVGiSLUaqFNEESbnjHSpRRRirTPXtlUk566L6m8opfR9/+LFi2EYNDI0hKBWhLFc+LhxefvX8/223eAfs91h5lZJX89vIBIiimb4sREBoDyd5ENE4tQnM8TqBod5Mi4AcKtDAI4JGRHxFHCtjtr4Jm/pk0aC4RoxtVWsKfuCWLulzdizf94zxTXbXiwKnBgnc+nQ1vjUlAzLQRhbMxa/3/o0OmFrAYf1rNQlcWkgpZKWMBIp2dWuU/oVsoBwEea5X8gr5+vzb8ZI23e9Yuuj6h8Wkae4GzYLtC1KKwX3fbUpmjlT5qKwHYM6B5HTOHzw4uXDw7ELsU/deRwg0G63O50fP/z4o5/+0U//4t/+63OeBKFALeJLhMDILIgkBHIJHSkK89AIVR5HIjI1CZtCuNvtttttzvn7779/eHgA5S45AyW/TEeJlqqLZ8BrMcKWyS3xoz1G/mFDg+Cq7BoisfACqTyBMFWB566Gq+g627j/vGt9TtcHZ8Fv1iM886TMBbXFyOvj48kaNqlX6y6sAeJxfvEWI5Lger1QuJRkWC/cQ2Bx0BbrtVPg327WKOOv1K7s2uH4hYRV+S59zMo8VoEAZ4151lMyWzIi6lMVUcslTNqMHfbGORFGEES8hDV7FPVrt33BZvNaX3bfNmhNe/9zLnSawNqC6ee/eH4xSOHit8PmvMBe+D0O2lX0fgrH1vvoDsITCmHjOPa8olZoTZnbn0AIYx71sc1mk1IPQCA4DNOrfq9CnnZ8VcI+DhO3iMcQQtd1t7e3+/1eRE6nk4J3GAbtHgEtYgUAVHLa7+uY0zTFpBXOLwdERwbsbBdEBBgEBIR8EjJcQ7mrCL8mgzA/s4vzu8aK9QbV+4QLIF/dPrvMVQWOHwGAtm0QwpQSQxUfc85aUNF2rWW7FZVZAaDrur7vVTIehiFwVOeDauNa3/V0Oq3xSu9ode5hGGiDGkot0By5XRGRWgwWUbe7i1tqiScqCiNijNiKSbIpP4UnERlGHsdxGIbD40kbkDCD+nIaTaim2Jq93yIgYusSEWMMgZg5pdB1XYizosqmfpjkaTzR6Ce5S+d2Op2Ox+PpOJxOJ61+lFIvzeWl9g6q1VwZAIQFUdR/qyOKCAKwJtRJBi4CBUnQ5UCycwbaB7v0mRQvGq9ZpbNj7np8VK/zdMYoKpjCUC6NE4xGBQprTuqFCnLBJmw5Ml2Q2lULwdFk/6RfCLjIeW5d3UMI6Iqb6L81bhYwxI0SLkIKoZuKIGo1HX2jvr8ELBokZdNWrnQ8HrVpRCkFIWw2m5cvX97c3HRpv9vtNtseAIZheP/+dDg+nE6nh3fvx3GYpknRYaE2p5S2222KvTT5WX3vFCClGEKwnHxFy67rhHCz2Ww2m2E8KRBUpDyeHnPOFCIIahEL20rbHQBQpPWgaztbKzkvTus0TRS7EAIAlZYjhohxHE5d16U+lUs2p7BwK3jrhTNhkAKXDifs2lvVD/MGUwAggl4OMbxBRJFiVAVdGz0iQDTlsCJ9ShWDq45Z8xk8rqKZYbwovABTKVPLVyZE0OrD5OwQJsCJJsDQDGsN1gYBkzYqpYBAFGyZwoKA6OIe7SRU4gKIosV/XAEl0lw1ZAZmAaC6HchsrOiSpYOIqLqC3geA3MIwilMGGUHNDIzY95uF6aV2nSqXWlIehkpZ7GFslicKAihcszFAe/iJSOGhtagr2LLDUWrAiRqkQst2CCExQAEWECRMKYbQSU25LSGQCE/TmPMEoCqlhoFVxua5Zpjyfb/Lh3ETegKEwoQSIg7D2/vbzd/+4i/udvA/+Nkf/pv/z7+664IcALY9Ao3TBGMOIRCCcE7CIQ+p75jpfD4j4ma3U8to6kSLXQXE/X7/8uVLTQ+4f/ny8fHxq2++645dV/qpcC5lChRliTmegxqQed5myiCPTc32RND/KfPx280LEZfWYlg3wCs29jw0G3Bo3SbsGRY10rIgY4Cg+y4AtTC2nXQSAQYMNIsr9tMm52y0JWihsMWlQyIiNJ+x3WwxvCiuj2jhIhVvm8NaoOWDxvon1BgVABAEy0hxMxEhrKq+2oOtGEPOgRBAhIuXg2ieW+IXy40F+d0MITALAlipr2rEa8tDnU/7t51mEeG2p7pqjQ+pleX08IoIs9lZgUgxRwM0oOuSWpZFWJmQzjC2moSe+wKAGXRyHgEohg4RSy5I1UsfU+MCpSDiNNVGt4Y5ugcxEei5RqIArKUeNJJTGLiWKlEKEpGYl2gzA7e7AFFao3BjN57z2c3ZVyRqj/dEeI2Bl0Hcl/5JBoldEjWyxJC5DNMYY8TJXPFk3OeZVwQNr2UGX+hr9bjWUELES9m9FTfxC+dL5kU9X40+WB/UtUAPABDiBgBYRKCd1gIAov4CBJBcmDlRQAEpUwzbGDeIOE6nzZZSZCkjBULkSBS0nD0WkQzAfULADQAw8SakyqFauk7sO0Tc9N0Hr16qTkJ4N9xsEF8hECLe3u5NFA4h5FwRGLyTqvCm64sAskgu01QAaLvdjlmYy263OU0ZGQsWRCwCkcJCEjWUMJAaIpELDAEnT9uTODfQuLPg4XwhgBfKY25GlsXTFd9a1F+uuVXVwxND6Tu6v9ttukhE236Tc95t7xCxFMnalJ2ZKTJEoUAANze7EPB4PJYyqY/CQm+UU6iGqX3hpAnrMSYAGMexTFntxinGPE0l5y4lkMJl6rruw1cfDacxxjhNBTFobVIRSLHPwkOeIFCkiCqPYGAoIlymzAObcfM8VlWQS43G1B70AHB4OIqI3uEakC95ysKMGIWBpUpxIdR4167rYiJAzqWQkLK4lBKiWFyb6U6JAglo4itoJoUgFNmkPsbYp36z2fR9X8uTImWezsfT4eFxGIY8jXkaxumcy7jd9aXwze2m6+Hm/kXsYSxHCoRxy2YcJIwxIIrKtMNpEqnmdfULBETVYBFxGIZpHFqIJsUYVRdqKIQiSBQYMDMQQAgEgiBIFDabVIpSJJXiRCa11AM3+RYpVJy2cDBCRIyoCoMeENTav2ZfkHk3C5qHtir50Uo/jaMxM4Nw6ir7SKlXZel8zkTl5mbHzIfDgblQTOdpGIZzTPF8PlGQ4/vHaTxHxHE4AZdNSgwAXRJGdQwSpcIwTGUcM9XySDGAgAi25rpaSlbdrIJRi7CiIDJtNpvd/c1ut9vv99vtPqVEvYzj4fHNm1LKMI3HY60WM3IuCBKCAIRAfd9FCtgcGCHU/pxWrz7nrEvWP5tfmgkjBSilUCBGFih3d7cx0fHEGOjh8BgCbre70+kUQ8eZQSBnQTSPF2o9oEBe8pLqWhcJ6ns2P3AB/a1W4QIWIqKgZE0IIVponxdcFpTLPgAAUTSW4zmuf9iTQpmn59rzIZiCB0aFcW5F9nQWnr2eYroLmQBanxMjxKW1H9XpLeSJ9c/tKvMK4xeDtFyXSPxoa07jwY6IFGaSzXwQWEAY52UnFm+0B6QpHjqyUmqLKbXJhOC0WfdBQ7+8WovVBLW0fcoTDpmrwDRmrMEqHgN1O07jscZetp2Cyoln9n4Tx0teJviBCIiklKZp+uf//J//8c9++vnffP7ixZ4oDufp/fmcUkq7HTNrZEsIqNS2CKulB1qSgIjc3G622+3t/ma/393c3Ni3n//N357H4XQcch6Jaq7YBkMel/NZAGSBt9qyqbE6XvzKLpxLtAtxsOlRMxsegAp0F9/X+u2LrYSVMQ8cEs6fvOL9WO/4MwfqmTnMhoUlvvk3wvxwydzDvxp8mefDLBrGvxjZIPAM3OxctKFqiJHfoGeWf5WWggtRBmeuXqzXDoU/ep4NSGPbdsciwL35zEPSKHxwDSpDCE5sqIdRJRg1NBg+wpymLda4poS2I7ZdCwh748XzY8LT7GD9Q24C0PMPr1+9OLwLrFiQPnhi65/hdP5X68HFWTEMXf2/Nr7pMLBC0avX1TOl3/ibdqmYTkQpqaRYVBcO1upaNHYPQdOlAOxcGBMpICTxgmOuYuF2u63LbwqqXn3f55xLqfyCWll8Zi7CpYi6uJ2pd9ZHvj7JlwC99QFfIeeMtS0Y3FPX+tv1sE89uf6V3+4GDVDHglV51aN9Pp9NIQQAElBX1abfIuJms0HEGKM2r2dmzSHUfbGUS1W27V3+XFt0tH4Yhknj6yyrrS1n1jZJmqSkN0VkmqbmqBEDss4n59zaKmRN6GLm4TyKSAsDVmhAHQfFC1GISvRAu5IbDBUf+r7HFhfWEBPgkrJRF6u2s5RS3/fqyVGFEJpI8Pj+QWvb6LpSSnvaa09I83jbYTHdycNTmruimgsds9CvdOFaA5YbwRJ3eSSxQ2QpjgZ8s5+uUVeq2l/YOZ0AQKSQzGivVH/dZebSvMfgwm7NwmKr858Nh3Vd+qTaJnQocn3LmHmaSgjBqk/Ye6WaqgnAHFgCtXFjFaIShdinGEItYlTZYnWLadIcAN7e3qbY932/2exU4SciADqdHmsyYc7ncTifz1rfBVrbhhBCCiGlpK3LYtRgslbSrMXKppQEiqnWgEFjvJk5RN0sSqkWqJ+mooeaiErJ6JRtmjcSM1S3KO7FzmqOtOcdbe98scmi3V+YOS48V4ZPOc8ati5YV90Mlx9lWCVyodFEVFbRm9jcBQt2tSAcCxT3k5k/dj0UB5xIKk4+MIeSOJ3WyNNimU9dHlY2WzsSV6fqrYkeob3MZJDRuOE53TdaOUs38v+alGZHzs+TXC8EaSkBfuZ6+aPr12Ijg8ss92f76r/rjZMrLE1KK1Jne8GtVu84jTYIEbV6FRDCLDGJfUpeFV5YhASqQlhK+dEnH/9v/lf/6+++//Zf/ff/apqm0+mQYieSpmmiUhhNq+RSyn6/V0KfUogxdl169erVhx9+uOlTC9GRcRy/++ab77///t27d4LILICBAqUYp2nIOaPrCm2g9jjmUVeaeGR7tEAzv+/uPi5GEKcQLlALiaR5Ihf7viDxHm9hfn79ctbX+qQvbq5R7gfHWazEP+PPrMyb5Tz1OkRcxIldBgTAef9AA5QxKpgHfq/HXx8Bc76VUlLq1iDCeVuLBRz8CIvxPYSlqXmLSZoNyNtZjHSL8yZ5rJAmupkZCBppMusSrhql+kXZ29fwwVbnGp5AkgVlgzl3WEMb1lu5QoA1EhrpYBc6tbiu3l8s2S/BWMn636vjP/UidCqHB8LVca4CBOqJmNGTq48tFneVv/C8zyo2Fl9KVrzSeiHabQLU8MqXShIWIoVW1ZOq+AaIAS5xy1bv0bMYN5/LBFKMMYhZuIiQADmP6lbKOQuSKkg5F8FLhAtcToG0mmdXlED7c2GYW2zN1cvzrAWZsoMDbUOf4giLAe0rf5BDSBqiwq0ZwzAMRHEYBm2MriFFhKRK1HazU8Ugxtj3/TAMupVWFM0gX1znZCMmqh11XadFPvTOMAzTNN3d3Xz00Ucw1/pCuGj42AiOhq8TETOUPCGiSI0XlVbvtJQyDtlae2veYM65TAytTI6iNzyBzLpRqQuhFREFACVf+lXdX6wyZP2zcWH9k4j6fmONLi3ncJqmw+FwOBw4F1XVqkMMQSWZGiJI1CJa0IC52E1VaBuULqDWhZcWaWLxa6WUaSwiolpKe7LhJyA6hdDjoUX22XxEBJxbCVzeQaWKzOAQQETKnJLoIBYI41VBwwRjKJ4KoTMXGj9S/bD2m2nFjURkGseUorToAIFSDQ0kPDGjNvcyxs0E1HVRRFIKfZ92u12XNKAd+n7bmCkAAKEiBr548YIwal4fUWRmxepTPirjnqZpmEY1QyhAYoxdTDHGFEKk0OJuFPOr+VXbTujqtOaRPmCXQiPUON+L9NIaWsRxtFhohBaUZJBsiFTTRP3O6ocyjWt61Sib5wgMEBAxetbop+iJo2cDC8eVJ5EwJ6webzwetHGWHjCTP3juNjSkWZBgm/lVqmoPr5fj4eJnu75v4/v74iSnBblHxMK8GGGxXhvEdtT/WeHwpHvwigJsFNzPCh3eeDj4ua3JqEFpMVt0rsUFqpSyTGzVr7xiCU/oyXO8miUNawBtKYWAantkRBQEFhBBgRRaVaUaEweCKvWioADVbGy7hmE8HA7ffPPNL3/1d7vd7p/8s//JX/37v/rmm29u7l8qtYWac1zJyuPjY79JH7786IMPP9xut0jS9/1utwtI79+//+q3X755+/p8PtsOxhgFQSnKCSbgM+dC1DIZHMQWJ3OBq8ZaDIweQ1ZIscQ0fygW9yuw5wdhjfY2gvEA+8ruLGLasOmlhHgVddfvWrz073Ut5ukXuFiynpf27ex1NucZjbp2LtBd6wkbAbQHpKkZNkO7U0rxxcD8tBfz9wjgVTK/HTb4mgCSU2vRuQr9CPqtWawXA3Lzm1FLtACAlFLJmVuJcAAQxlyySniGyHJhVDOAI17Aa3KDp8z6xALs/s/FVPV/yzuX/Z1dlbIJe0BdHXn9K5jjyfqNBiK/s/6zjbYAy1M3F9d6l6+eJnCIZH+u8fmpt8DygFx5kcd2aqlKIqKZZswFmJkphGTRqvqoziQLR9Qy55dcZW9n1JFNSze1pEFJzyipRRwRoTaHrbUuGAiAEYJIdcjoCEikKF1YWEhEWdLM0bfYFPuXXOLJM6D2+7hA7MXzTxHzH9wXP6yOcH9/f3d3V0o5Ho993wek4/Go9QP1X4UX5zwMg4ik2Gn5UK8mxRi3262WSWSuIeWeg/ulERFhFT1D81eklG5u7vb722E4rVEltEokGsCmP1GJn2vsYg1b5VY5lrCGRFblUMvSlMK5HihuufQKja7rEFHrQ8KS7NRN1NxFU28Ra5jRgnL6JJQYY993Xderu0YVQgBQ7TTnzPlSol97lNljuvDcMBmbZOXRhtnb5cUDp2pEkyrMlwoCCpCaV3mpYH8JOLcPHvEWhhU7UKVF2AKL/6oaKGXmElhQbK/7mTJvHASbE0I/27pkzjHtkALANE0ipes6TVU9j0POo75lmiYp6qmrnmEMQlo5loGRkQoAggBhpEjENb7g5ma/3W67WLG96zZNJ285z5QQcbPZMrOwMr5pHMfT6TQMQ+igtDrSChPb3K7rNl3fxQgABHUhVEP6a2MSLcJfo9vIsqVEQNP2qsFCf+g3aBxGPaHThOaNZxfMuMAfSxz1OEYrhyHATISQmdTBqArhwmbALRJgfbABAPGis3m0s5/7Fz8ViiPOmO11KlNzF3x0wenFCRziLj8Zf9Nf9sBidc/T6PUBW6/oqd/6Z3Albl7VssxM65fp7ywGkWdLJiw2wtOL4loOeqAZ0ixeauMYC9dv/OsM1Twd9N+i45cm7/Lcg2pDrokpODXJQewCUWjqCoGRMw6BXrx48cEHH7x5+/qXv/zlb7748t3rd8xyOp0tpWocRwpwc3Oz2e1evLy7u7t7+fJeraHvHt59883Xh8Ph8PZBRChgCnG/3UET9BERUIiAWjkcNRqNE/tJ2toX4LIHLJmeVt7X3/PyILK9qP82g8Ly/opk4EpItd0hIpkrfdiUK8BLD1y4gtVXhEt4okjpAm0W49jnq3aip367GF/EPhi2L19tMCTXCdOPYxbfBeVVjPKWPAWd2XRtIVdft7hsKC+veBuN3jHxl5r4yy0jqLhYIAvX5+b2XyOYN8yB03JZmjW31Im5XZgJxJ4mr/9dQKatnYpZf35IM7GZXbk5n/b6WuzXYrFP/eSpI2xMk5lxHk294Fxw7aDZfIw2whPnBa6dKb3oWr4iXjO8+rdcXWYdfPWuBXLahGMkTSgQweaKCATIEUOlNoAkIoSYQQCx1jrWJtVtMjWUQ5AFQZDVwMetQKK3TwGIeT+4no1LZBQixhhqxEeoJw5DVP5VWARU+lWV4AL5BVSvnsTFviweXgP2+T//vtdiC4joxYsXfd8fj8fHd4/b7VarqgLUaiv6GxEpPOWch2HIU9nv97e3t33fG0hDKxyKraG8CgYmqYtzSnBLD7P40hDC3d3d7e2tsULDMZFKiLSLdwhBS4bqOOM4lrEols666mURGVTz4ZrKUcVxgXamCAVQQBCAQmucy7UML2o9zMt2sEjR3txWf1KglswVEURpzC1ZaL2qdr1qhH1PrZKHxtmeTqdxHKdhtPmQECLqw6L1QlJS16f5vVtovU5DDJ203oyRTW7VdErrZ4PNSqLQa8mZFZ0sDTJ2tScnrM7vghbpNErWTD9mZu3erl8xM3ItPep1UWnnkV3Vd3IpS8aSPGc0pGXnGLc/tWMNVxNkTQyOMY6PDzmPlsShqppYyEyTc4oUkQKFgIoAUEgxhjxxt+lub/d3Nzd934eIagcBqUXdYoyaZapOQkSapim3YqHTVHR1koVbmDFwddypPaVP3WazSZqNX7ixZgV7ddIGSsou1XkAVsUNlF9rieCu6zp1WyomsORpHA3fKnvV2F0pSptbE+xgRgG/UwZ/kwMXhKvJA87Kjygil0Lz+jPzTvi+FoZD0HIIF0zlKg2tv/09LI4eRw2x/CByLbSyDYVX5+lVVmUnOuG14uR/bv96sKyft195dU4/k/MIreGzWBe4XEQ/SZOq7SyhKfR0WR3M+ZBNbG1IsDWiU8IXkGzzn8U0wmXfLwKHI/eXPicyF25sH713wi/HpodzkdTNFEMIuZA9hQiItUnuekD9N/OIAs6pCFKbD+OvfvWrX/7yl/f39w8PDw8PD9oq9DgMzNz3nSYE7m+2t7e3223/yY8/zXl8+/btr371y4eHBxHJZRyGYZd2hVsBtCmHEFKI235zHgdteVpKKVMmWJdX/WE/ADSHjFFAQy07j8/81nZNXNy/f7WIPPfua0MtuoHb/evvnauX/r0LUPw+r/Z/rhfuD+zzN9toM8PKYjlPEaX1Hbmm5CzWCA7DxVWcijHmvLSRXZ2PjdnYZH2vCStGeRYA95PxwLe32GTAkZH1krHZ9e1X+jlQEs4lM+Nks4oxAsyO8BogdomI7YWfZyN2V+jteoP811fn//xJ8ZRKLqrOD6Dl+gjbn0Y/4zyGzdPSBeY/hXJPLfYJrF4y3/VbjEguJvPMSkVEYHmUFEQesbGZvfJ0rGIrhIAQQ+JSEAFiCAIAIAQgJUSUIlrhEIFCo0Zrw4ThZ0CJBBGJNFnAFohQDRBNl1TkmdRJ0ErvIIRhGrNwgmoYZQFAEmHAQAQ4z6leUksHtzVNuHr0rsL2mX1/5uZiBNtKoyqhNVochkElzhSiJsD3fY/NvgUA1S0B8O7dOzO7q1Kt2fvTNGnm2zRNj4+P2pqi6zqierTFCQ+IiEAh1IoDKSXlnpZepfNsxl7Ut7SQRUBAEfV05TLVxuU1IjTXGBn1ZwKARpNaqF6bycWRAEvDBzVfjfp/0LLpANQYQSEEJJO4xEhre55S0raWndWPQaxawfl8VleqtiMHzfoLZAzXRHBstNrjc3F9rf38p2nAuRQn81px0vxUpZSc2eCD7dL3dt3GQd7Lumqv0ZNCWgSuFCZAaXHenhrzpK6n4gUSaalG1AoFqyXIgfeS/uc5lF0X4CAq1qlmElo/D03YMe5TSkEK7QT5LAMUAeZSXycCyAABhSNBH+Pt7Wa73d3c3Ox2uxACgIQQNikV0UiWipAApMzoeDzkXMZxnMaaqFmKAIAaSlhqI4rNbqv96ImoT13XdVEF48KImChogEQ1ERBBk5dTSlMtghWICEl1y4iQ+r7r+x6Ap2mYpnGaxsJTCqQzzDlnLeNMIYTA+eI4oeaVMeNvO31azAtEwDse13QGEdqT9Znoc5Zs/xaCBQBcvePRek1G9QG+xkE9qfVG7rDq43R1Gf6Sa/Kl59zYIsIXQ9mR8wR9zbbXk/GvkDm7FZFFepL96202i+U8/8bFV9Qqbss8xYJdjp/t0WLJftU2mr3C4wCseBjOBURp1o4QZgq5QQPmmOC/pVp/WdYujgsq1uKpqEZKpYzKwIgoxmb3da+rQAMQZASNYCNEJkZlIA8P0xdffPGn/8M/ub29/YM/+IPz4YyIH9x/EkLYbDZ3d3e3L276vmfO0zT91b//dznn94eHx8dHCnB7e7vd3fZ9z0MJVNMImfUMD8o7gRAEx1wQMSARcJkmpATXjtUCMnY/usashpyepMI1LF38+SRKI5pDbgG39RFrZHvmN37+JPIPyTrP/Hz99h98/iqey+8hxrXfPDnbNQ1dHA1veFtAxnAS5yHlhvzrTcQWouNn7imVjUYtgHONOdCIgJ3u0ApEWYKinXRva1icQbvIBYsanfHkDtvVqp7KehBvnBKxLZsV4XSgAz9Pe8tio2eovr7zBObU+cOMYvw+WL0efP3VYjsMN55hKOvrmRetb66ftIXMZ3J55qk98pchz6K/6AJENlRMFGOcxuqyExYWyDlzVvQgQRSozXsUJT3Q0FVR1kQ4bgHJ6oLAcClAbwvEpoEbGl/MqajMBbkIxRRjzBPnnGPSYg4CgEiAbA3BZwtc7Nf6aIPDyWfA+BQ1Xp+1q7v2zLDoBA91rejpVg2wlKL1A4moJrUhQvNLEFGeyvl8fvPmzfl83m632+2WajHG5N01IrMyFepI1XJr6u8Srf9B9XmtuZJzjtE1uyPSpCzNJMTWAsR8XOM4ArMwaosLK3RXnWPlQop1ySpqiwji0uTtdpCR8KKlBAvgFGl9IFIXpWrUdk6lRu4JqQtIZ951GwuLLaUq3lbPhogyZwSI81qMpZTU97ZkC4UtpWhsJ8wPIxFo1P0aN7abjfpIFT6qG+eJtRiPWfda2OEF7BZqd5V4SgvZNQeEg7CQQK59a+uTFUrtgOge+XPnuZs4fzI0NdjOlz1pLIBdWGkIpIBS+E/TANVrIqXWOsoo6kjkUoAiAkAAxBBAQAgJSoj44u4mxX7bdX1TVikABYihbxRGz47aWzVbFRAxJuJy2Uqh2qZFELuu2263/bYGnaotjAA4Ny8a0lgmAO1yMQNC13VagV/bTgiUEELXRW0rWErJeRyGU5mGUjJLTttN39dAZUMqTzCpOQDpEtB+uQMuz9aTI/+BWsA/XIR/iT4qwD/q989v/DRdHJT2K3Zd4xcEml0dy/m5vfhA0F12JPyfz1NJf7TWb/GYCmCIvrSY0jxL5+rlx1+AaEGeFlReWrE++8ozicUqRISlqGKA8zil9eq8YIorro/z5HWAy/G2nxvZguauxDmfs9GMxPhyw0RLJXMBK5yzxgXmiDNGzP5sxqSIVMrEpQASBQhEGk8UoIp3KrdcJkAMUCuQYitxhoKZ+YMPNooAn3zyyU8+6z///Jc3N3fd7QebzWa73VKA8/n8u+9+8/b9u9Pp8O7du5hCSulmt7m5uUmbdD6fx/F829/Wc54L8wTMfUpd12WGGKMAAhB3zNyL4HlV3F1WYoQHiO6I7gK7MpULvWKOqNflkgUhcO+a6Yr2rw8qnp1HuQzoCNxaWl198cSUFtfVyS+ef+pIPnXqf5+LmTVofjGNhYUILmf5AnPj/eg84bZ95PpWiYuwKq1zmhvz8mq/p36LvTHLNLRpmpRVLCZpnLuUoiEr0uzKnkyZEmhLAGdxNLpBzouo0oaKASJIFI1uGA1fQHJBAHXFBszF5O3DGh/m2D671g8/g072wIIKeZJ49VocpTXvgJV90P/k6nlfLMcPvnjdAiZPzfMpKKGVUnSTvArnNk4zQs2ry8jsmQv2tgpb5gOBPOWszZRRkYRFQKBoJD0QSqs9r9OSlioSWrlaAowUBKlMOYTO8tn0jU2S1Goxs47V0Prv5SKCoEcAXHyUNOcS1tRz8VYhg5Kda79YavE7+EPa4Bz+M3wDZ3hdEK7nBzTUsikpd1A3Qtd1idI0TXmcuq6bJnUaMDNrKRcIlTRpddb3798fDgdtIq8ulN1u9/j4OI5jjHG32xHR4XDQCpCmxIbWKQRb3UEESmlW/nfNzkJNVoyqR/jiKDlnABLhictY8pinpj4BhSiSSymhFmwEzvVn0kzh0nxo4GoWNOwAJEESliJC2JIeKWBMoes6bfAQQjAPYVWiRFKKXeoVttrtLGfRBoOHw1HbXZRSShYuMI5jaLUrTesGJ1ZZ+tlU8jRNXUyGV45ykolehhsKSS38oxVNFXTTNI1DNiVQwZtSb0HU6OQ6adG/hr3+7aUU5tFooAAzM7KUS6hhs8e1lmYtVZix+ZaN36FLWvPGGhUXi6suIyJaPMZOhKHEzc1Nrh3etXZRLFPOOTNPZRprz8waiiQa8wcASBARJCAXIUSUXKZRCgOyVn9Q9TLGmLq+1F4jBECWoaOWEaKWsNbaRaRNa9UuQkQpJSBkZg1kpbacUgoJZORWJwsbVC+7qZvV9x0iTnkopYwjp7jVPT2fj8NwQikhEAViZi371Pd9IDJXagrVxLDY667rLKoutASo4qpRAIDGadgGiRMm7ZhHI6n1HLUi4wvWcvlB7Lh1IDHmh3PBSH+idByadLsgFt42YGpucWnlHntsDv6oQC0ZCh7XDTu9AlYuqbcXquo5t4houzlD6AXjNDkPnhC17eZVvQhdo2e7vFipI1ygSnXaZhUwnmSlvfS35VLfGdbwNFLuuamBl1pQ+2Wz2nzIBRvolIqrUy9y6UVpQqrtjg1lJNu/V2diPN4KW9us9Cdc2haUstvtFDeaQ/KiYE951Ptd15HgOI6iIXkUQ0zAwMzIHAhV1Jim6Xg8ImLf9/f39zc3d4eRT6fT97/99v3796fToZSiRPDmdt8iTWiahpzHUkqikHPebreJwvl8BmSt3z1NU0BNEqgnLYUwBSRAitE2wqPuQtqwrzQUBOeqvuZpyFzma7u8FD3twPvXGWBjuMzHH5w1bstcyUdXAbXwleqRhoSLk+t3dnFkmNkqY/kT5+W/9akpLXOd5pkSZhtbmJnsAVtm+3fm5VOkQ8AmKFwqW4QQSskGMTs4isBGc7iV5DZCyq1zAzo9zeODHfPNZrOA+WLyiOgrA3t6YrtArf2DztBoe9d1+ryhhJm9FdF0VMUlPWtEFKMa2pP7FlWXW+CG3+vFnWAdkJgBWgm7wp64+U00krXAZxOYjCDrZ87FP2Zv95Dx/0rrnOllyqu/1Q+L1InFuwxjF/NHJ3uZRg1zUrwYwZbm1/vUtR7HfrsiKUpEZzZfcAWi7PkGlugH9KBgZrUPBVfAIMY4TbVf/Lt37968/vZmu4mqLSAGjBiACIggRCICBlGa2XWdSC1loe4RRAQWKQyR1CXS971k0dNxOp1yzvv9vhQVP+Dh4WG/3/d9fz7XfJsQwlggpn4YD8MwEAVBOh6PMXQaWhlCKIKlFKIgrIm+4PHK1rugbOIs9B5pPUkRZ9y0x7jFelxFsAWiLv5c/MT+tRqM2rpaBccylhij1lpTYlLKVErp+r7rutLsgMxF0+E05O+bb77ZbrcfffTRdrtVuqSup5TS3d3dNE058+l00rWbZM/MqqWcz2edhpFl/TkiWu0Nw7HTaVCsyFkbzbHm55t7sBQxPVaVDUTkIqXJRTFGqGapYi8y6qpIZf2rqpSMLWQ0VLoXYySClDZEYEoUBaiJglJFl5RSDJ2udBim8/msVUbMjZknVlAbJakiSkraQYGIVChPfUcxlNo88+IJLK6TlrTiq8E12QohjONYY1MBSikaGKw5irpScJLkZrMpIIuaQOiUBL1jOClNDq/3hZEvkoAyNGrYruhBzfUqzUetW2+KonE3fwpM0jAVRUFhKO0XTi3jTl808hQCpdSfyyTARBSDmp4xhMggfZ9KKXnKzDnGDpnHYXh8eNd32xAwV4VdtceCodNWE6WUUkYR6LpIFIm0PHwMl2qFKCIYa36miEy6XwwBZ7pA1Q6q8R99sR+i2mdVVbuu64I2/UNNmp3G4TBOur9MBJGAmQGrIWC326WUzict1IQAoDV1AASxtoJ0VKISHOOVXdchuj7D8SLPHA4HIkLShq2CJISAiNE8hrZnOvpahLX7/is7ikbgTFwzcUTmLNYzIRtzfXNxeVrpKa88kRVFKw+ezRZWVnm5lmhh4yzYADaJZDFh/ZPm/fHsWg++4Dro9NVnbIULxcDgZq4k+7G90X/wq8C5rFxHm8958a71zOxU07zLeXCN7MVJXV7VtE00M61/vz4fG8URKSIFsRopHx7fq0tdRHIex/GMiDESUixZpIgAI1JExEQBabvZx0R/93d/p34/nfmvf/3r798+1DCMnBlU54wh0mazQRQgJKLUflJK4RGS9jKWalvSvHMiOp7Hvt989NEH7x4ef/WrLx6Px/1+f72ZQFvsGuHX0rZBY7F32OT+qxjjz8sF8ljbW6+38urRE5E1OsuzjkDDQLzmM1lfpfW3XCxzjQngcHg9/6chIAsc049rmKFmqfLyVPpTPwNmE85smUZCbfv8hBvVuqjW4rj1OkcUKxsuuKISi/X6zyZIeQLlkecq0BbfLuxfC6rQpjGj7dyck3PgXy+7T2TKLfjn/TPrqfqN8NTP/8Tj3mKBfjSZX3DtegZpr87w6lj4rHVjMUN5Oj7W/l1ThquXA8vMDuIPjgfd4tXomFF9RhHeQqGonoJxPBPRZrPZdn3JMpzHRAE3SBhBk34mAWTEEgphgLu7u1qfVsQqNBLRtt8cj0ctdzkMA4ps+z7GOKEw89uHh77vUwincSSi8zQi4uu37w+nQR1cWv+2nM8U+lIKs4SQWFBEKEVVPi9nAbSY7cwS9wwk7ag+s4nrU7mA5GJDYY7zVwdfDysuUFYVHk1yizGSkIhQLVYRRaTve2YGDUgTAIBSSt9tTIhX+5EqLR988EGM8fb2Vusrqn08hDBNxUSgNqvqyH3//v0wDNoocjiPhGG725QyIKoJ4OLCAgDVatSIph9UrxvHsRSZptJcNXV10zRxUbni0keHiEoBQqQYtZEJNjepp6V64vRzt+1Nh8EW9RpCAKhVQLRLIbWEFARR9QAApjxMY9HWc+fzoLqrirUlV5RQYNoVUlJ/lErqpZTMhTjYPo65Fu8RvshFnlybYbEGCuV8Pp9V/babune2I6EFiyJiatogOMu+Z16e4BCRxmRWoxWg2cuqUdXRWI+K9jobfyHDG87bAbejZAqCx3NqaZ/aD1NrxmoFF+Gs8a3MGQC6PvYxTHnMXFJKdTBA0mYbgZBESinjdGIuPJ3P5/7Yb7fbzW7bdd15fK++x0DJpA6REmOnTcJKyZVHRwohFmGa251tgbbpKICqTItA3UqoUELXChLV4DKez+dhPOWcmfM4cOFJLeN9n7b95uZmv9tvNl1Svz02B0/JgoiRklyECm3SOJVS+n7LfGkpTNhihvES6YBASpJVal0QJb2iKRhGbvRzceWePQ9ehKXat6Vc0k/tVDMzXqt3tECvNV9fU8YFGcVL7kHwgy8GWWOe/uWXrJdDjtnGL5bvOasf/BmWv5jY1Z94toGIVqnleV7lr6vw9K9bc771pqwXcvUZ/9njCTQUEhGLB1hci8cWdgeDg0IAERMxIhSs4mfJtbDVtt/kPB7GAVETVRAAESBSEJwKIBHt+s12u91t+74P03gaxtNvf/tbCvj111//F//4v3r16sP/9r/9P6vVN0RMXVd5WNKC0ea9yVLYCqBF6CDEmEj9lrk1rL+7u/vo9n6z2eTCp2H84IMP+u1WRPL4AwLcQlzwcrzfRw9Aj95elXJbsxT7HHiXwre9d/GKulNwrQiKzN4I7lBje+b3PBFGcBaWl8X8/Ro9tjyD0v5XSyDU9c7aIiGigNgsFq/Da+KyLYFXkQ688qA2qlhFJZut/QquHTFTJ545yOvFSrMxe/CunwQAXrVC1j9NubWb61fZTMGd68X480EacF0HqjlsGcMVWwDOw1nBsarLJK6J3U8BSmYmxRnOLGjm1Z8/c/lp4EqtWl9XEdhPwPOFxQKfP2J+N/0g/kWLaSCi4oPIJRkeqwgYmFnmCpLK4tBcfNq2m5kPh1PAWgqCCFMXUr/Zbqv3hgsEopKLCDBDpBBTGseMGDZpw8yP58cY43azOR4Ooe9T6qbpqC6O82nc7XYxdF9//fWXX34JAJvNTmeiQnO/25fC+/3+eDrF2J1Opxo56X13LpN0fS4WZ1Bk5rR/ZvvWPHcBf78Rzhi6fPtT+yVNUufmc+77frvd1pb0oIVNUESIYs45hMTMU86lFMRqZ9S6L9gqyph3bpomrRSaUtKbyme9h9xCFqepPDw8HA4HDe3jlvjH5VKziugSsiuuKuY4jlq+kRlKyefz6PXAumTBPF1i0NqRRwDCCDSrosmx67q+N9dHbNVrKKCCCKu+cVEXiQAxUIAYTWmtFI9aMLvC6ng8no7DNE0KpFzT2FhaU5MaRKRpe6leRDUWscgl3YtbVOSCm/jOwNBIU4unhWGatOWGzGvFeyp9KZhJFOeCvj1Z5+M62tt8AC4t/PxFgOi4z0IDVPzRjfP6nu0jt9IyphMaj7usfSX8UAu3gdb0suTIXGohVOZAFELQAi2AjKjzoRCAqjoDzHkaASbKw4jxFFN/Op02523XdVOhlNJms+n7PsU+xhhjRy3JMFBoMKnglQvALjZfABjHMSAKkcZiR6wGhYkzIgJcYn9auE1U88fxeDgcDlMeRIQIxpFDxO12u9ttttv+Zre5vb3Z7TfABRE1rkE0kk4YEYUnWPFZcn1xREQYBWscH0tN7/KchZktxrAJKkKERPM+hDJvFeBPo0PW62qMWfrtV5f5te03IWNBKxcEd43QdqjseaObnvguCOuC5q4nBo7ONqJzxVDtn8Fmy/GDzwD1BI/GuVTq57xYNRHNUzb8w9IaXy5N+xaKAPM+JIvV2TSuWpoRa9Vme+OaIS1mrohl4xtx94aGBSItSJLeWwABLWCSq8LZdanrktRQvdL3SUPYUoox1vwE7Zt6c3Pz4sWrV/ev+m6bcx7Ox5zPCqJvvvmGpUzDBACaTbG93S22gLOmBVfzD1RLD4UABKFPm5yz2rGIaLPZbDab3W73wQcf/OyP/0GM8d/+u3//69/89nQ6iMA4jgDxKXxY31mcOwMytQDgNZJrvB8s8V+8x8nIGUINhIAVqq8FI5vVAm8R0V7qkOGKcvgU3V8sYXHcVmv5gevqwx6YMIc2Yq1kuT7shqOyEsjQKTZ+OcqqF28Mq8Rse8Awf7GEci19FwCU4S3I1DPL91SRXVG7dcixnVnF8wVZ9rT6GVAjom695RJ7+BjMFvBRrXgdFSlPxXu4Q+FJDQDgiqKuCex8oMtXfsmLnVqPsLjpzcb+5wtCt4bheslXF2s7iBcb2aXc3+JfP8MVNbiiXcDTBWb1IzYOdBnH6Yf2fLOQo3If9e/lccw539zclCI5j5lLLiVJBMAQAgKM4xj7XmHIuRQpRNSFOLGcz+cupVvNI9LmlkAsKEDv3j9qlT8MRJCKwDDlUsppGMdxLLlG3GF4BwA/+ewPU0qbzVY0eSyQT6ZBREBEKFo/cw0cv/AfpEUKEHaxfziXTK5ez+/7emsWh0iBrx3bVP9BwpRSQGJm7ayt2T2sonxMIjKOY54ytnhOHUF1Dy0zcz6f9/t9jHG/36tDjCgBFObJGJBe5/NooXQaDAkAh8Ph9u6S22yQUe8WADCDFvSXVtQktzY8lTwyCIhQ5V+IGGOHzjHe9z00nqg/0fgdhY9qaDEF85LFqBy8tQEIFYDWsD7GWs5Rp6SOVtWQz+fxdDqdToO6Mc3GR0SAtRIPadEjzVGrhSub7Q9r5F1oCdheEP9BygAAIhrZeHGpmVZmuqgqvUS17SFC8Gn/ACgCApCXGyj6WT0k7fZFwNDGekgXCmmGSxMvbQkKAP3KfJjG6diFidnPvaGBXQk9jflSGELzi6hy0fd918WSR5aCiBSUiGn4OomItv0QQQAszIjCAJIrph2PRwzEEKvZP6XNZnOzv7u9vd1u9znnzWYXN1Fzf2xKFWTNeCTO/q5B15wLAKhCGJAgKfLXTirjkNWDNwyDeQinaQJkxZf9/iZ1Yb/f393d3Nzsdptuu930mzSeT9KqudpkAIBgVqsGsWJCKZWPg1z2KOcMeIncWZAmRARg1aiUTTDzJaFoQYYWIYi2edBsa4uh7YEFebXZGzunVgNnTfU8vVvMB1Yk+4kjJHhNdDOIkGtF6qm2/9V6zPUrFp/lh/jH1fuyEpQ9bH9Asvn9Lv9DT4MWm2KvlpWEsfjWj2O/XcCT5lFSC055dZLrb+srQAAEsaI+c46RkHCcaoyoSJmm0nXpxYtXd3d3+/0+pW7bbUNIw3kaz8PpdJqmE5ex79NwOp5Op5/8+CdfffXVr371xc3NDWAtucxYpSFEAKEUIsXgDyS1rkTKaDZdpxRZFcKvv/76l7/+zfvHh/NpQAq3L+4f3h8OhwOk6wohzPHZNmIBH2yC4NWcIgO4P/DuG1ncF2bhpYav712MbzurrYSXX8Fs359Z1w9e3hMFDrWewpNFsOp6+f5aPOnvMDO4OjH1vXMD0OK36wH1MsMBOz+hJ0HgJBijvOiULnZpbAusWANhMZMFzuDcDKRXaNWbnyEjeE278D+pi+Em6tFCQp0RQBEBuBSdNdrfID/zi8LT+74gC55Y1QnL7GFYbfRymauHnwfLM9eaxAGApQxIC5axvfjBQYzbwmpbF6fVA2S5QKcr+vFxxVOujuAMQ3NjqEuGNOMFEWlsoQrHmnJGRHd3d32/y9OUSxHJpQBrlGmgCOGUz9iTFEkpFSxQGAOEFF9/9/rzzz//8MMP/+inP53yeDoN2+22UJymiRl+85svb25uPvvss3fv3otIij0ITeMYQtA6OJoqNpQSa5XFFLuNpgOUUkLqLuvF2nXu6ibqv3ZePLN7BnRXN+6pbZKV2LB+fk1w0PlnjB+ZeZpc/wzV9EJIpZSkeV/9RmX0GC6FQKTFnSqWnk6naZrO5/Pd3Z2mPMUYh0E7FQO43NFpmm5vb3POOY8mzpkvwRxxBkkdliiaQ5JaIzvTsrAq53WlKSXzwqErfKUaLACoCqak11wRIYQQSTFB39510WalzjxEVHFc9cOmKAZ1ywHAOA7nc63qOY55ysM0aV5MdccBAEJQYYCRQ00bvGiDpRQgLC2XzB/eeKnKfsX6Kc6Ql3P14FW669RRaoVhqbUe0dTfUJu8XFDO62B+HCMOpuMxs2YBKIgKFwAgvKCiaba+LqN95ZF2jb36agucsShfmafd2iA6n1r/CVFEUkd3d3fC07u3r5lzjBGBmTMzp9hTIGmaMyLFGImZkbB2p+BcRhFhEIobdWYDQNd1N/vHu7u77Xbfdd1uN9zc3Gz6HVW/a9BKpPrDupCm0XRdV6ZJA8SGYdAuHSiAXTB4AwAXsDDjEDHGSISbzSYm6vs+pbDd3IaIu93u7u7u9na/23QpxZjIKugQEbtatZseCaFkQxtW2SS0YgxWDqdB40KUDMKVYtBl+0wIiZcD2axcfmNgfiGi7Ts4biEth81eZtssqxH8tKTpD/NXXM6G/cnzYhXu2ysmc5mbVD2jXUx7fXMxkwUc7LMl0XrqLyJPeQj9fqxXt74WE7YPmh4A7kjr5T115MwYxtLwCXs/OR/9Ym5+zuj4oudb3qoETqaxOSygvUCw9q34sHgxFgTQ96lZnibmLCKA3IVOe3r2fZ9S3O129/f39/f3210fMR4ej2/evHv37uF4OAvnlNJ2kw7jKZcpxng6nb77/nsp33/xxW/7fhuDmwyhJkggYp4YWJhL4VoqWisrphRi3FZxNud37958/fWZGYquLkQMlHM5TyMA3L24f38Yn9riq0iyOLcGZ7uMRF49hraJvo2B/3EuF91jMb7dfB4tES+JiODwcIEAMtdznrrI5U74317Ff7u5Pkp62SCebnjoVsCqhwCWZ1CqSXXZjQbmeLuYf2hVDcy45heymHAI0YQncpE2NA/qsB0DWJ4a/+f6SPrZGj33q/DExO6s12uT8dio8GnAIk3jac/PSKjIrM744oOslB+YX3MKMzPALRaF8+fh2p/+LeJsmp6VXH0Y5mdqAZmrzy+eXG/NMytdjCNOeruKe1evxTPidA+ck5Rnfr5YY+VsIhf2j4rMivtQ5S3mGCNwRgjYlC6EQCQYgqbunN8f3r55w6VozZguRCHpUzcNIwD85te//uXnnxPAjz7+JPabPE6QegGIKb1/ePju+zeH4/n9+/cfffTRZrM5j8P7x8fNZpNSIkTNS9MYMEY4nE/b/a26etBdWsPaSw1Xt8mfCC/vLggvrA6jR6oF9vqLnL/3me1b3LdVKMfXeEgjO6UU9fUghhij2l/UhRj7vta6HGvQo5UKV3Wo6zqVbrXu2n6/3+/36oGUS0WAWtAuhPDRRx99//33OY+qlmh50hcvXhBJU1EunfdyzjkzwGhRl5pBB4IsNbldmqyo8eQYIohQU+MMaClEnbl5R3W7Yov+1Bi+2H6Vkja4u/Q5QBIArcwZnCEPNJ0SGc7n8+FwGCugvEvjQu1VIdQtMJUSmmJsLh0LsjV9VVrpARHxoZoQQBo1LaVMU6klXvNkkYdoHSYoaPkcrIpu1MwpIioNmwwP2QJ62+W/pQj2GLSueQGpqObT7lAtmsIUQ022RATXzuR8PvvTZFKKnSb7rNvkKZvHbZ1hzjnnECk11ZEB8na75XJ7Or6fhlrgDYCAYwghEmmiIbaMDGYuDLkWEyYNErcCOSJcCqu16PHxMcZ4d/dis3m3399ut9tNv9tut1oqyUwb6u8tpUwAzNzFWBqsuhCHYZiGUYE8TZNaENRhq9i42+1CVMNZ7LoudYo/mOI2deHm5ub+/naz6YDz+XyeHod3b17/5je/+e6774yMKKiH4axoHC8N4XVZoR5q9pyIALLfi1ZATonSIrGIRSCaOOJ5pKduK1ZX8WMtvkBj2D6FBp2zaM192yxnXoKnLpPtDI0QsYbCzcf3D3js9Jcf2b99QZH9n+i4slpW1gT9qSUsBn/qh+3f5568eqHj+sYz7Ie2QR6AflhDg8Uq/LB+CfbZQohXeDKbmycNJrI7LLo8qQ9rCWQACChEIAEjpVYfLO332+12++Ll/cuXL/f7/WbThRAeHh7evn37+vXbYRjGoTBDiCgl5Dw+PJweH9+P03G/3ZRSvv3mu9v93WazOZ3ON32HVexGEhBkZkDEXMYIXYxx1226Td91HdUOpLmmFpxO5/O5TIOIMFDXdTFGKMwCoet3afd4OL19+xaov7pf0myBdofndXf8gVrs8vM4gIiq1XiQLtBj8Suc15Wdb9ysU1yb+VLotDuL8/KDqOsR1c/ZlKXFmLU/9UoR8hTJT9hPfPEJcXkuTGk0QLn5X7EKgaseCW7LSusT6DfU5oauqIwf0GaOzoblR56Tb/Cf1zTWzH62HJgTanvXM9Ow+/YV+ZkICVxyzg3CioEeAm1WJl5coX76ALiF2Agyv2nzWdC0xVQXm1XvP5Hx6K/nT9kCzrDancU4NA+XeP5dT+2sHVJe+VTXo83hJuuRn1mjf8zT/Lrh7nRRixG1kh56ZzzlaZpubm6YQd0AgEVE09Xo8eH49VffHh5P4zhu0qZ1/Q6IfLO96dLmm9998R//8hck4bPPPitZcpHj8TwMgwgOw/DFF1/knD/55Eddp+1+Oi3oZRJnjJ2W5mNmjefvum6/3495fnwuBGQGavvArie4R6GrW+lHXlDL9SFCdy0OL1zDpTWtayuNWq9Vb1a5S9R5ewnvspqTIYTNZiN8lma+r5VgQ5VZdShttaddFrbb7X5/j4han2YYpJSJW6PIEEIIKTSrgLrsBEZyFSMBqqLVrLoFANQZCBrhOe9AAxdTUS0ME1tLPZ1qwBhjBgB1kRkgq1M0XjLcmkKIFl1ZgU81piPGCoRpGqcJ1I1ZxtwKe6rHCUspIpcDWOvTQPWCmmxp7SWYmUG0VI+dVsNPLpfYTd0p/VBKsSbmXn3yuX/UYpQCJfVwGrgW6MHOJWhaysxP0y5C9j4hU5prSCcUu6mrgHKpSG8UqbhyqXrfGBzPMwyNUFiVbJxf3Cp1h1CN8zFG5jKMA/Vd3/e73e7IJeczonRdVzAZBAIShWgBuoDABQFFj4MgiCDGSDWjryrtrcdjiTF23bu+77eb/X6/v7m52Ww2+/2NEplWWiJIo+q6Hdt+I/uScy5TZmbqI7dKuQCgGToVOKg1WkPf9/1GHbzQpZ16C0XkcDicDg+Pjw/DeHr35vUvfvGLr7/+2s5+CBEAxtyKAuDF9UJErUJvURWUiNSWYbS7Ap88ObpIoVLpRvMQejroBQX7Fy/jXrbQW9Fs6Jk2OJ+Nd0PZ6/wI8kPeCc+DG9GZkcsFhV3PYcEg/bdXXypOlvL3LZJwOc5TLsK5dm0DLiQAu7mGw4VwXGMz4ODpT+DVN+K8QKVnYwuLu61u/ToPWz8Ou76Z6x/6hiX2K3D0qI7TahuUUlTs6Lpuv99qb9C7u5uXL1/e3d/GGN+9e/PNN98Mw3DUdN3DSfsjbTY3m67POR+Pj8Mwas1xZXIv7l9OQ0bEFy9eMJ79qxGIEQDo7u5Oc5RjrDWjH94/Pj4+Hk8PwJfePmlT+//kiYswMqauT31irjkewxNlRteig14LuNkzV8krESEuVXRs4vgCN0QEkNUq6e/7XVjsuIg0/WthO6h7up6/rNjSAm2egoZ9XuDV4hWLw4jNue2FQnB0ADHYTy8j4OxFhrqwogMGh5yLxxM/c3Ypu+TavSyeb/eLIbn90J704j5Wxnlpd+FneBVDYIU/4OjAYhc8rBouXUCxJqF+fA9hB+olOQIAkQtrsEpROC+i42eCiz/nWiU4jNWvCGbI77dscafed3/ak/IE37mK4Vcvm6fwdXT1Zh1/kYvvXS/c5iaODa2n9NQxgWYQsUH8e+1d/oeXP9sZaQPWbxWNKahIHUGk73sVnkop5/OZc95sNt9//6YhPHd9dQ/2ff+2lMPhkHM+HA73t7d933cxAUui8ObNG83N/ru/+7vvv//+n/6Tf/Lpp5+KyLfffvvll1+GED788MOHhwcA3O12yhd2u91ms5nGMk5nYez6XvPZENES27SBm5anrgtc8URwBHZxoNaI4bFlQanWuyBzZgfuQF0Fux95fV+cyKTdJozmgMrrrIoW55y1rAu1+IWqFlJUIIvI+Xw26mQqkw6Vc354eDgejzmD8l+bqroTv/vuO2VwGmeoDDrnjHSx2jPXRhRWki23JoRGTKxKpy2TiEJIBmdzvhFRoAQiAFH1QmoZ/loBTrMHjZopGnRdqG0nqJb7169KmWy9OitVDMbT0BxyU1XugFTqBjAHXbIdtNoNpVZM1a5XqN2tAMA0t5xz5kJSTDH2Vj9VCKnVG6PmmxocEirkN5tNDN1TmAmAtiIjvJ4feQQWTTMDaJNpwbeAfd+XUliqyhFbKvuUs0XE2DiXtjErfPaP+Qcsl3JxIrj5unPOsRRE8s8oqZGSD4cRAbbb7VkCIkphNSyYeUKPORExIFMQkczCXKiFNKcEoo0XCzCztmEI4Rxj7NJBbUkppZcvX+12u9vb2+1+V20BratKXSYJxSQteWHgKYQQQu1sMY218WbXdYUnAIgxbLfb7a7X5KOSccrD+/fvz+fj6XSYhhOAhIjjOL57924YhpRiVhMziRq5xDmfiShG0RzCaqkJUQ0latGw+pSNptnn2ohVmudfIRwZpJSC3LSI1onavLrULhF18M/4qO2WcS9vHyqlkJNUbFUrPIYAAQAASURBVByThMwuZc+40Oqa59OwJViItkP0Yj5QcMVt13hm35Yys0x4pPQrMtvPWqTwE/Y8ozJLuM741WcteFHZ688RWBhqodsWgjOvvzczKVENyPErVYuIBoq0mahdBACy51IeGthCk3HlILpsqBpKdeFEAJBbmVkBKMwIyRane1FKEWFtRCsrSZSpCDIAoYCABBQCJoRQcmCS1gQpS544p5Rubvvb29sXL15st9sUolKE/X5/OBy+/t2X7968PZ4ez+fz+XysZTygSyGllITL27evh2FCxJS2H314G2MsPOU8MQD1CCiQSsoEaiVgLlBIS6hv97f3d9OYp6l8/+bh9es3gVII6fHxKMC3N/chpPNhmMZCmKTgmM+73a7w8PLDD4joPAxC4SRDYCKZAEGgMKtZPWolJwQUEINKQwYuZekgMpa/RjlmRrT+bPqVlHl1uPlZiOxCfD0VvqjijYKrYXUqeRFhBfVVs8NSfyWiCic01m4/kpVY3+6j5mYAgOoIIVzap8JcnyECaRnwl/kTgkARBgQMpJipdhkBCWTI3wa8YLjeRO03ASBBpOWUtuPZLiKrmhgsrYWZVb/WyBQiDDEis+RcSiFECiQABnMMRHjRGBea4XpTlMF7Vc30CpOiSiml1EJkmtlvxBCRpBWVVYOokTVmzbUIMNuLmc3OTHszZRtrXYScL1lDzKw+Gb703AtmHDThBsxi2HqZplTLD2pRBOYiEhQGfhdEZoquJ8tcQ6AdheF6oNydln4TQDiI2DiBCEUKc/HiigGEmTUlQwCQJCABMCKIFC5MFScJoAiAHcC2j0EA9CQBQAzdOI7YCg6JSFBWSLkiJqqhAjEAIY7jxAhICBgQsZBiKcbpSu794kzZQRMRAGpCwIUF265x64NyOWikzc24WQYLCogIAQZhIhTmTd+PpxMJpNiTQBd64pTiLsAmRd7cdIA5QAGWTUgqu0SMHe3Oj9PnX339m9dvOOfbze4Xn//68O79z//oj/+LP//k7es347n8w3/4px//6Ce//s0Xr9+9/ctf/XrzwYebbr/Z3f+Df/TycDh89dVXiN1ms/nit1//5Cc/6be3U0GGWIDTZtsTHY9HmqYynBPSi9s7IppKxtRR1xdGplQwMgbAQBSZMzARFoOe0RkPQ5hfMs/PFNf62Nj0QrQAx0+xVUlpbVcY4MIHEBGxLH4LACAFq04uKVZL7nbTYRl7dVwAjuM4TVlVqeP5lFLiMiJiShrewkoAGYWC9h4H7RghIi9evNBg0VIEMSMyIpbCOU/ffvN96sJms1FXMFHMeUTEGOP5fIyRzueRAm62Lzdbbea+SalDJOba0KIURgzTNFiP1pxHXcIwjrvtnpAILx6n1F1K3yskQ6g9JIgg5yphRkQiJIJASAQkmCLHQAjak6NEAkSErgMQgZr3gRhEME8CkEoOWvf/fJ5yqeGsY47DUKZJACIAqjgXYgClYBQzoKAQYQjEiHk6Ks3JOWvMZ8485ql1nmAAmKZRKSdhEK7SvDFZIgKoVShZivZiGYZhGIaSOY+jhk2qYqwCMwXo0sawrpSSEoWAOY8AgACRRLBK4MCMglImEQERaBI2AoSAIpaKxVyYS6USu34nUDjXCE+oEaSQui6XklvlTEAExJiS+Ho1rsDmQm4x1SC6voWhdQphZiIg4hALhQAygcSUUoo9vEeEbrtN5+Pp1Sv65OO7777/3Xl4f3d3//j4SBH7TZymiSh03WYaSx5zv9kfDxlDjxyl0G5zczweic9q6A8pIlIpZThPE0/7zTYLMHPJMsEEgDkXZXyvX2NK6ebm5sWLlyqIxhj61CGipggKCFL1EnebXqV6QCCkRJwghRDG6Ryp2sVCwMLj2/eP0zQdD2+1rUhQ2wcSIsokERALJ0EYObJsUy8ik0iEVLgwYwwAKpIAYuHAqBmPzKDluAJBl4IkakQJSNmicEDoVFZnzgygbBwQ2OUQBpcESLVw3IUHV7eP83/5E6sEguchlHTN16G80OjmmquZRd9LnDYBdP50bg2UjUx7+usp+FP0/amv/JQWgsiSUrupenFtPY2FKPPUxcwLiBkjoZrsuyw7u/7Tbtp24FzHMPP8wnT0zBrXi6q/kjbOPGaAW46yzCNRWzYCh2qLEm003O/7Tdcz8zhkEbm7u/vw448++eQTijX+Hlg04Pv169dKMc/n43geWGqZMqWY56HkMrFoAIZ0XQwhdDGFEHIZT6cjotzc7kLop2mYpimFhLXjTYoxQrN8f/mbLx8ej9NUCGNoEZh3d3dImVmkTPv9Jt53BDiO4zBOL1/cHM7y5vtv+u02pf7tu++7tL27u5vev20+tYsfYCU01EMEqEVkr2zl1S3GJ7JDYY7b/t+FJQ8c8izeogROYwyuju+fX5ziqz955vJLEBE/wTm+wfOjXj34Vw/LwgJiT64tI4sZyjyugeViES+tsqjRAfuJzSGXS1U9ceaSNST18na09bfYophs5uvoHWganZ+VXf5sLuCD1wKJ2eVyQ0Ncb4kz+BieL2CCiCGQ5QwbHOCHoisNbh6Azz+54j41wt3e64X4BROBi9VgPjIAwKUQtNYbQAkAgAACuaYGAQCQ6Yfa7kw/h7bwaZqihUBLldSQCTT8CQERGZqiWD9cuZ5hT4sTbagoKwd+/cp0kzn2ahkGAKj9Awhb7hkej4/DcM45pz4ibZFKjF3fp8KZCHjKOYeUAhGEgPub7UcffzCezpiZAtzc7DbbbhzPmaebu/1Hn3yIMfzBT3/y269+R0Sn8+Hu1Yc/+9kfllL+8i//ElH+6I9+9uLFi9evXzNnIoiRYqQQ+lYEPyfsbm5uur7XeWqoPxGdh7E2RqaLgPEMr3uKeiwwClz5mcUDFT1aYUmzmvle0vVhR1twLsx4tEREOzX6vOYQoiv40exWME2TspKcWS0RamFPm+4iDLTam5qPl3PuukmbxWtJlZpzyJM2J9QozRgphJBzHoZBp6GmKFVa1IuiOo9WxfDmbGiRxka4NGbYvDp2ZO0ZNXwZ21KnKCJGpNpLkIgIAmgVkIgAiBJCiBQQsbTaCqpzliI5T2bJKqWcz6dhGEwhnCbSaQNArV3Z0v8Wp6m4OtIqw3BRARVEC7rmrLGstc6COieRPB2uIaZNbAyuUQcXyTmbHnhJkmyWNSPROJe6FeDZtUfHFk9ne3HBt4qAs4uIrOchrOjG+pj44+AHCSFoFW526qI+oyrx4n4pRavClkwllAuPANhut+N0Rig//vFP/uk/+fN/+k/+/PHw5tdf/N3vfv31X//1X3/55Zen0+n169fv3z+mcEz95v7+xTCWECikkAucTofMcHNzkzbbOmdCAFRtU7dg0sgvVqyrTpfj8agTPh6Pj4+Hh4eHu7u73W633W43m03fbWMrhwsAKSWWSXP6RKRwAYW52kCROZfhdM55tP6fgLWdiVaOUaWRgh1nQpRcKtGgWClJm38VhxTcsCJn2IoD+Tsax2PcPzguLwg1CtnMAIpq6OJCYaZCtKI2197t+Yqnp4Y0C41xwdTbh9mf/rdw0TQujGqBoOA0WJiTb3ji8i+ypS0WKE/LHwv+uhah/Kz8sPZG/4CtazFzI47qC5G5KCkr6Wc9lD1s8PTLsY1er/2pqdY3mosDLg0JyYWELbaYJSNiiERAAVBIMEYMoe/725vbzWaz3+9fvXp1e3tLMSDim7ffHI/H4+Ph8fFRy6C1hRQprP4KapGTiHhzu8tZvYAigoAsPI1THg+j1vYVKCJlPA2Asu37j+4/KrUTkJRSDo+Hx8fH43DOOaNWw+pSSt00FQDY7/an4TVzQYypQ5TpfD4jwd19//2bL1+9evny1Qd/9md/fnv/6l/8d//y+9dv3r9/pNhxjeOd2ZXBuao8MAFmaO/3cUFwYU6FF1gEKyovc0Xo+VeAFRVsucv22OKMr7H36vgLpFqfpgUCe5R2IAKR5SEy0uYf9qYZj9uLJT91Hv1ndLqKXtwCd8EdTK/w0LxixOJE4yqoG11I4WKe/kXe+yeuvIqNtua7MhcI/EwW1qv15SfsF2Iz9Ni1foxb++yrwDcpeQEWXlX6WW+WOP3N79EzS7Cto3AJ91oA8KkRRDTWeoGcAfSsMiCqtx8FQVqbmaa8XWq+mY+RcybNa0qIFJmZa6ZuQSJl8lXjFKiKJov6jtYc6Cp6ewRYILbMqdCM/gCwIAADVNOLyCWrU6UTadXztTKejI/Mpevj999/ezy9CwHH6ZgCxRQA+NK2O0VNNfz0008++eSjRAFy6WLCwrvNto+p72v1ywLyyY8+un91T0T9djMxIMput/nss09fvrz/yU9+st/vX7y4+8UvfgHAIWir8bTdbkVkmoYgcHd3d39/3/e91n/WKLv3D0eVj0GEmuqFAr4DzVXArgnXAq8W/y6+5XmZKxXIWavv2MMkwFdCte2D4hI2VptSUmFdK5ogYiDjua5W50WZ104AzMznaTC9IlBsdbO15qH+1atCqK3kDzIIFA0Gnqap67rNptZrVVm/73st7kIa/4kBBErmPJVpvAQuIhAIq5wdW90XRSSrFwoAlpMcW08FaxQhVQfe1DMroK6N0DTGFEOMUaVxUzKxFQUFABHUmi5VC+OsVU/H8VxKmfKQc2ZOXpDTeV4iTlvJMTHligdm1l4aJTMiau7WeRyYOSVN5uwbOkguVRGCGZ2sIYgW7Wzutdh1pmxf1H6sSogPvITGI7wG3jDqEh/o0dVTUcM3ah5vbEodOv0B2udg1VaUMjvU9WeBWq2Nq+drTcYVh7kVwrF93Gw2m23XJcr5/NXvvj6d/tFPf/pHH338wd3/7NXw/v27d+++//77r778+ssvv/z1r3/zmy9+9+33r8fhzAIp9B+9/ODjj+M08vbm9jQ8VL8lQsWvbdIkQ1UIudSlNGaqe8qllIeHB80v3e/3L1++BIAUew2lZGtT1oJ+NW82WEJji93NOY/jWe0FzByTMDMZfrRMUbm08RMAUO96SgkkEwYiEjQgK4cy6LOlaYj1vSRAUOdNISQKUKaWAhMApPJoBrkk7Bp2cqvV4+/P3v2ELLXARWg2LXE6CVyzzXtMMinlgmoOTe0ru7mYDM4FTZEfUOQWT3pMXTy2eH497POiyXqQNT82gPv5+H/rclYamj28mK2/uTiW3hj/jDx0deaz+YBNDxeXyJV1acN6YS55YpYupe3tbtv1P/2DP1TTy263u7t7UUr58ssvv/rm61//6m+0i6ieSW1hRATbfoMhahcgANCkWGbOw1GN2SGEAEEIhAkAXr68z2V8PBxI+Pbu5ub+Vd+n7XbLJxwOh4eHd9rXmDkzSECMadP3/WmYTscj7XHT9YghRcpChIIoIGPJY4rw4uXthx+++tkf/Y//5M/+9Kc//emPP/vDb79586//9f/3+++//clnn37xzWuDmHILg6dc9u6SdgwrJ4Bt33pT/IZ6qmp/+lO8GHMhxJgusTj4MEf1BQVf0IFnLhvkmQ9P/dB9WGoLfmIwV5/8Mv21cGfBNbc8zA/gmqpc9DcUH/EiLkF3PT1wpduNWi4WsnienUfOvH8Nl1icJqwT8AqYX75n3l4dfQrsXr+1f4NrokUrp+UCbTy0l1OSCxwW2EXXiN4aPjbmVafZgh56TF6Qo8VbrtxBrmaIqjyo069WFhARTbLAZpHiSwwNAoBADdDiPAWiPgUpPBFvYtzteoD+OE0FgKvDIaAgiVZLF65zEGCpIfbXlBPbL38qr7KtNVYsLkSstQ49cBAAwHJR2oZi7Lu7ly/efvvw0Y8++ujjD4ggj8N+3w9jFCmAQgQowJxLkcdHPp+PIjIBk0DO+fx4AJYUYh8iM29Sh4gFpJQSUuw2m5Di8Xza3b188/Zhu93e3O53+23h/Hh4CCG8+uDlbr9FRFVibm9vEfHh4eH992/v7u52N/vttiei8/l8PB5rm0QiAgQBFia4bhcDh3seCRd4taYJ/rwsMEraZc8uqCgoKpXZKxaX7Z0/76H1BiMMLQZP60tLSql1JDMqJznn8zSo84eIAlX642Q2VJedxrblnFMcVSGs/tVWzt5KTWq5S2g+Ky5V6rVYMJxfij+qB3JNgqruL8CZRNTE61kAQu364LrexBCIIMaYQtVeqNFkAGh9d4tKyaWIZlupRD6O4ziec86lTNOUrX4MKboLEGJACkggauJBYcl5smDIKR+ZuRRRhVC1O1UIERGxtxWp2HZpxtei8bWnvBkIKvQyK2yptRxUhbApCQacmrxQSmEGEci5pkRZ8pE5PBUCa5akNxd4q38aa7MHZP5b+xxd1X1b4CIdCS9h6mI2U1tOe0U7Qa19glZJOZxPiULAJCLff//mbz//JQUZhsM3X/7m7u7uZr+/v/vDP/3TP4HNpjwevv7q+2+++e777x9+/esv/92//6vjOcfQf/3N64d330mobIu0Y2XsU0raN6QAMrOwMtZ6KLSqJ6LWxBqnaXr37t3j4+PhcLi9vX1xf9ztdro7ulmFs6qIegQSXVLAxBlJ1QJSSgkRmDlgK3REWsc+SkoAUMpkWXVQJSXQqkgsF/qjZ1yktC1W7CLltUQUUDO0AFggwKUW3NzkB9Y1W0R8tQ9oo3pihIiEhLKUYIymgBMrwcofXTMPeFxcoBe0pEFDGnvAkKaB5klB2V/yhLK0/mxz85xgMVVwpNmeXxL3p9+yfp391o66AtCnHYvbDqTr5nMD3UL6sfPsDyfijMgu4GPrWrxivVkigjKjIPbkUzIHAHApRHR3d/fRqw8+/uij+/v7bdd/8PLVhx9+KCK/+tWv/tW/+u+/+uqr8/kcY+Qi4EIpQgjMWSOtQ7Seb4WkpszlMiEAYkAQlkm4OcCx7Pa7D17c95tus+nGcXz//v2bN9+XR9TS5CJFc9lS0K5EjAIp4Lbb3d/fAdDhcDgdBgkZUVIIt7fbzz7743/0D//oT//0Tz77yScvXt6O04SIIOcX99t/+POf/Zv/379hZmw5Y4gXRo5mwnXQ1Q/+7lV4+kE87i0w0P8p7jIMMTT2bzHcwJVkv7hMBVq8QsNs1ngCT6iCa2QTqaxzfab80tYwWXOpBQG5ChPPsUREmod2ccC9wA0+kxkqe/M83r9a5hLeU1D1ZMcvx1p1+R/6N4prhLj+ObhsQ48nRhbW8LcF+vWuh/WI5Nj5jJIgYrVQzreyqlqrYqp6dzGl5am5Bjr/pzTuY0Dja62VpCnYXpT3b6kfWADAq51SW6NaXCiAEKiF1mWAISLVEuCMAgEFuUDmGML+Zv/i/q7v+2EYTm+YEAUKgiqfF+6me1YR2KxH4TnjHc61FE8i7Ca7wmMGMbM7aEytU8tRg8YLFMmXCC4trY5I9/f3H3/y4Z/92Z9pjOjufg/IQACnI4QARZtB1AsAjsOjlHI8Hofb/XQ8g4jkAkxffvWlZldOJWMM2+0WEKdpOo5lGAb9arPZvHr1SksmNCEsxRhiJK0ycnu733fb03B+9/YBEbXDXkophZhCi2YUwJrjIEggAAuI+Q8L1MK5HcGj7lWiJ01/89SmCfHrzWOAJ+ntggRRq60/jmMIoUvUREylDEBECEFE1BuGiMxCRKGLalQNIQSKRsZb1cdL6ULVDAk7bahdWkfHUtrPQ6DaLrwzcJWSTXv0R88zF2rBosxsIZEUrtIrMYXBkLneQVHvoCqEXdcl8rpHBVrhwgzN4aZJYszM4zhN0zROwzTlUnRdWqfRiZ1wCfSwOWisrIILEbXMuKafcS31ASISkDCQObhKKVMeRSTSLFKjJfSBFvjRV+eppljHGKkBzWIIA13qahoGliItCPPS70eabqb6/6JDmEmIni/AXBIw+nmJj3UEyhtSY0v7VCV/wXYXjjJxqqM+qeYDzS1ULBDN7WYNuk6IIec8TbjbbhDx8fE4DlPX7YLwcD6eT4cYYx8TYiCM97fbH//4HwOm33zxOxb4/G9//e7heDq+P53H/f2LEEKMKXYppRSDOrdjSkmoNhQx+ADAOA4KdsWZlmpbROR4PJYsj4+P6r/V87Lf1rrHkgsBkIAwSy6JAjMXkBAiBCoUpmmaBGJARkaSGGMXE6JW6lAIVx8phpCoJmEZKkotsBJEBNVa2T6048aIEGNtuYJSGIOAIAlR8DUmKlYQIuAl8U+cZmLykuGBYcxCkvCED+ZkFFuYODjG7IdaTqgiaWuwuCKI64cNr/1jsLrmP7xiMfWDeHkCF8Li3CK45hbrCdif11Y6k1+9VOe3w+5wDeFbqqky13gXr/BwRqc/rOdmuwOrfVyPcwHIpe3E/D6A5Ssb+RCR4XyMFG5vX/z0D/7gj3/2s5f3L3jKx+Px88//5j/8h3/33XffPTw8pJS6LookbYBbSnl8fFQci4QUewAQaC51rgUX9BXKnDzT3aRO2+ymlMZxfHh4+Pbrw/l8FpHNthsHtXdSSj0Fpa1ZhFMMQHy/39/fvRCR169fC+f7uxeM8POf//wf//mf//znf/Tppz96cb/vNxEwPz6+Rcw3N3en01Sm8uNPP+m7eH93c3xzbmCfbQpqgFmzy1aoIoJzwnssWohxnoI/c/lTc/XzepdtzDrJJ5wMhkv271XEXhyuxZPPTNuP5jHKqqSuedL6hzivhPzM6+xbcp5zDxaee2AuP2xFtq7C0G5CA7tVVsAmeF0Fix/T3uj1Oj9PWyC16i8yp9jGjP1Z9vY1WBGuNUgXyhs+QcD9n9gUQotDqSt1NSTIRdvC3CO6ACNc28Tnj4DMmQ4RFr4gz3ot18Y3ohdEBAiFRRjrEq6cDlLmbAsKwihACBoTsQ3bl7f7j17di8h3xwdoBlpEFKniHwgioLV5REQrk8MrLmNLMIHS4EnzeC2/109tvYjQRYdkQ2VEynSphJRLORwO33zzzccfbH/5qy9Ow/n2dr/dbm5ud/e3N/ubLRHFSJD6lPoUIzBDKRDCHb8AAcgZKOTjMY/T6XAkgJ/+0R9rXOLhdGJmLfJehIepaOzG4+OjIpLV5gmtd4K6eqowPZS3b9+eT2O33d3f3wNACOHdw3txBnURkWaDu0q47LwvUMJTP3+uPQIvsMses42gGiCQLZzYv/0pdNajZHuaUtput8ysZlMQNIUhpaQja2ChGo6w1pHCTRc1eSmlFCgaYVH1Ulq3JzvaMUaW2rFQRd5SUN+rs7IUxFBLj0DOnDOXIsygOhgRaQEnfRYA1K8oIlVcDResDq1JRvWZOo0IEXNmAIgINXmQKEQMSDFGJCIzrSIJah23IFJyztNU1POmlyVJapygJRbON1qL6xSDPABp9pe1+VWFUKVWP1v7YAiAzQjl6B6IiG/AQBgEBGAy9AvNfVp1QowX52eVz0n9k6oQhnBpYm7I0+A5u3SElC6hy375Xp6foahjK/4ndljIuf6aKfzC/mSeVGWE1y5m1pZ6QIiFNEUIgLbbLQkQllL4q29eD+MRpHz66Ufbbtjv931MUDhDFplEMOfy3XevMfS/+eLrkqfbu30WePXqBTNkrMpb7JJWi/Vl8BTCbWmEiPv9znGu5r8tBRGHYRjOVUWUVh7l1f2NphdWFREJCQkwpWDGR5ZcShmGIYYgWJgZkGsJU0QUBrS+1oSIMQXEKFmd0sWQTUQQmUh1QpBWElZE2NLKSJENUAKBCGkye0VRZrbAE1UlozBSCIjAyuUwIKAAE6EZBhQ6ACRSU6G84cc4upFCj3ALJmS4YjhhD6+Rb8GoFiz8gqMrlrb4c06gL/cXP/TP23IWs/JMxb/dk/714HJNaH7qLQbedjxmzhARsVK8dhNr7PXsdf4V1LwWT/EwcIcT3Faiu/zM0SLWkBYwdMg6k+F0wBTidru9ubkJIbx9+/a7b7598933r1+/noZxu9sAQC7T4fiYc9bKTsfjMYag3TxjJBGZ8jBNU9/3JCxcPEZJs6vt+k3XbSpJDSHG+M0335Qpn04nEdntNn3fn06nh/eHV9v78/k8TWeCsu1v9jfb7XabUnj78P50Ou226eamQ8QXL37y6aef/uEf/uH/6B//6cuXL1++fEEow3A6Dw/DeULibht5KIfHd8MgSNuf/+ynt7c3x8dDSpuhUfyGKi0hbQU0JTprhDQEg/m5QCe4LH/2BIYvWMLiHKGTXdb4/Mywi2Pyg9fVOTvMufITEQGVlB05s2mbrdGGWh9emB+ZxXvb808qZgto6DQwLK3XurmlXCy1V2kUzj2xC1oB13bEooCg8Ww/uB/k6vbZTXEE/OqOyDXPBsybLi7g6emzRy1a+wDdG50k+pwR4eqOXL1sDt7vtwD7VYJ29QQtB0cAERACUN+f2nKBWvUs/VO1QRImYRRG4CASCLbb1Ke42/Q3fegIxmHk4ZBrtE872oSMgFWhrHGEwS1ksZynpi1PiHSLH+KcuSBeejdDiyoSBGAJgVSCFxat3vHu/fu/+uu/3v9Xf/JX//EXf/Hv/u1ut7u93d/stz/60SebzebVBy80aDMgbbadeo26rhvHMzMPx1OkcDocQghSeNtv1EJHRCFFZnx4dzwej9M0QQzMvNlstrvbalyGcHNzIyJISYC0qtMwFpEcY2TB/e393Yv06Wc//eCjj4+H02efffbtd29/9+VX6mzHWiedhUWomOtXRBawkt/P4OWl3jXNQRTNyWy2fB8csTz79uvFKwCEiKZpsgYG2idNbfc5Z4SxKf+qORAApKhpmZbKi9M0QUB1Dqgaad5Cizj1R56ZkWIu7M06IlXZsFWnlLTpiI7GrW8eNCFbLxFRyywiqlOFmUOrAtBWagXeRXPJKmQAkQURtDRRDJcKKxTAes9kYOQLRWKWiadSRHP8prF2viitRYQW3C9cVM+0jLwKAaiU3NxZ6mxkFwcntQEVhhAiEaakMbCKFVrYpiI2hBhjHluRPCCASxFO1c/Vcqe/1dVpf+MaMoqXNEKo9aVFwzAdIl0cgwuz4xwtZ1KZXp6h0DxSlCywc67oGtov0Bga5fehpB4fPL+esSpGpmqTUqzLOaNoXVkqXHb7tA83jw/v/u1f/Pu//EvY0OOnn3662+0Q8f7+5asXL/t+G+L00Yefvnv3+Nvf/vb169dIab/Zfvzxx4jh4ZybR7r1jVCCgAFjzc/UmRBRy2C4TJJciqDe0fhPbIaD4/GoFZjUStL3qe97PbbROrtIVpoGAKfhUE2WLeUuaC0xyWpqiYEUGTKA5NoPgghQBLg2CxEBZgC8pJXmnEUAEYoUZkbRfoyISNg4ZF0UAAYQRgFh5tiO32Vr1cdsfTaMtVR0W5X/luZf8iMYMsk1TcPEpjVBVDsWrK5npIqrpBycYOH/XD8Gjn0aybbj4Udbz3Zx335y9S1+zv5aPGyGBPvT/0qkqEzsQdpmfsXeaUTW3uVX7aehFEQPoR8B5icW3O5L8zNIE+LtyVKKLdTEdyIKmsUO+ObNm++++WY8D1IyCvSbbpqm8TyIiPbbVWoYUGsuBxEZhqGUQgG2XSp5Ks2rWYNbIKDQhx+/snju8/n88PDw7s1bY3uaFzGdp1IKMGz7/nh6x8ybbf/q1ctXr15om5dcxki86eiDlzd/8ic///nPf/7Tn/70448/DiEIUi7T4fERIHcRt32iQEjlfD7uNv2YYdP3iNvPfvyjD1+++u3xqxDDnHpW4DOzXGjNFYPI8yi0+ApXEu1il58f31NwQ7l6SJ94+9WzAAD27FNzXk/1qUXNh9V3zXRFT7XWP/S454+wfrCcDf+wjrGYiQcOuuovFT44i9MGl0q9WAJUi2yyI0kumXBRHsBfvGrsaV6vhdomrYiLf3INT5uep1cLIknzaAVPGO23ftUyv+x1XdeJCxCqcxC4pD9VXjDz0T1FHn//yxs10LEAboyQW90CW8vVdyEwgCvlUrVBovZ4gFpYC9UgBdp6mwklAKMwSQ7CPcWX93cv7+8CopTMwyMMY+JcSrPQAVTvN2gRGcUfAQAWIazUIqxUFFmxJ9umtTHew2dhlm0yn7Qj4PlmKQVrjGKuaS0PDw+ff/75t9/+drfbnc/Hm+3u1Qcvb29vf/Sjj4dh2O+3Nzc3KVLXdbe3NyKiMZ/jcOq6rpSy7fqHd+8B4Pj4iIh3N7c559il/X4fQng8Hs/nMzOfhqM2BNOuXOrb0UOkQZIxRiu22fc9l/Ly1au7uxe73c1hmIZh6vrty5cvP/+7X6sOECOBIGIAECgMFzJwAdTiX/9hISgb9Nb8sYHugoOmATacZLhSlOG6T5JcnbbSevMSEVI9zta+JYSgMq64NjNd1yFSSolRagJhCFr0RYkPVovPBSX0K+GgnmpuCYTjyNM0mU4iLSdK5wASjAKQSxfULdP4UgBQr1cpBSVTqztqECaqI8+kRCggkGIfKFjJTc2nQsQyZaziTUFEZEEWZj6eJjVGqDaomqGVuAQARLUqK86HUiYTXwmr/UvDOqZp0tpyClVdfqRaiTSEQBQ6xJB67TOubSfI1aEBgBg720RuTXoUFAVq20YVV2KMu90uhEAhUOyACIgEUQgh6P5CKQUB9A4IgUijzTEEMdNk3UoRwFkUmLF7cZIkNvnN8wjDw+I8536cMM9LNxIUWs0V+8qsio68gE1DWsCkooPeL8TTNG12+5wZISDRNJXH4ykS0oZ/9+W3Kit+/+3bvzz/tYi8ePHh3f3LLm2//O1Xwzm/uH8RA6f+Zpom7GobpCbYUKN7UaEKcjngpgZjVVAvbJ1a7VnNtjU4xIDTND2+f4Aadx32+/12u+36qNYQIhLhQNT1Xd/33RCZmXMZp7PKt9CcqyGErkuICLWnKJaiel3bQRJARlQ7wqQlO4xZVTZRioD2oCMKEAQ17lRRGtXEAsBQ4R+NE1jFAiICEfXVqu5rNjBCEm8dcUq/UoQFGkFrGmaob2jEcw+VIwdLGc5w1A4et3TVGKN+EPcWnJsl/G+JiDn7A2C47o+Hf6MtVh+wNeoZXgyi59mkHLvPLt5swTDE1WsyyS/G2OoRzzyERBT+/5z9aZMlyZEgiKmqmbn7uyIiIzIys+4qFFBANzA9fcwpy/2wHO4XUigrsuRfJT+uCCkr7NmZnoMz6J5uNI5GF+rIM653uLuZqfKDutkzd3+RwNIFUnjp4W5upqa3qqlaIzwt+ZAATiUcoCgCkVG8nHB+t9QaFaQxtfeF4rhUngYXJZUkJhCNyVu3MhFSzD8gxu12u6ibytp91wmzIbq/vWPm1WKpTVqMMVFYhEPotQcrs3jfc+irylpLeqqEJYrAcrFYr9fWWmuq1WqFhgjw9evX2919VVX7/f5wOJyfn/d91+72WietahoR6fu+a/vLdXV9fb1YLG5ubn79q1/Ujfvoo4+ePXv2Z3/60y+++OLLL79Yr9fOOUCJMRwOD7Y+Q8TaKkMJoQ9kIhnWk4f9w85WhjA8ffr0Zz/96W9/+zuwoDqx9m0jIsCRu07xIqHH1IrTH1VVlYklGZdETmsPKk54FOEfrnxn4uSjsY82D8iPxPEyoy8xSjQlYTyZ/Fb+TSndkR9N+JTMdvJsafDYmcmw5Y9yFXHcj3HCcMp/zidZUmumnRJcxTYNCX6kzCGkBEU4umD0pn4hpMa+mnRkUl+mTG4ThgOFDpqZeF5m6TnSHzklFcaVljn1A5yselL7YeI5kiTCS1aAY40Bx1oFjN1PbdvmYY/8XyivRRHJ+2M7OJhdODZmYMxCy5vp06RKVfmKjCPPmLrgTpj85BUZ3BBHuDECAsYYq6oygN53HKO11iBE5th3i0VtCGtn2+22b/npRXV1cf786rJ2VmLw3pvKici79mH3sK8XZzFKFzyzlo6xhMgIEdS2J+Cg7JUEBCLEoyGdETtvSt6RvNFUKGolQKQogJmf0byMEqoyeHWPsqmua89RAMHY7169fv1GixZQTmp1lVEjzaIKLLLW5s4B52drRFQtGVlEBFPd4KqqtKhYvWiyN71aVIonzKyu98PhkGvZp7NhR2nIEjebzXq1aZpmvT6zrn737vbb777/7rvv4NigxcTIAlGYQCbkjFkOwiliLCFTOs7yHLhIDSWi1ORLXxcATaTM+oAgSq77QkSDVjyumYwAak5rT3m1hNXq0LZPSJj6PQz4EGMU1oKiTslQBRDjMfjv+2NniMGpSjazdzXkEFzXH0le1RL952az0ZNv1lotTKroKIX5mntR5CKZ2V0rwxmQotrqAOfBclaRZ61FkhACIdV1bchlfQlp6GIVQhAQiDk5fDjx6L3vAmoDjOCjhgd1ycofZDh2NURXtPhQ7rhg6GjYDHdS4mVms+oZ0qigViQ35IQw89uqqlhi732U4JzjollFloMxsB7rQkS19rXDBzPX9UKtX0UYvY+IfR8y89S4p06PZJikBj9zIjEiCoxabqp4jce+qVjKC92yDCjdvhCCmlDZEaB1bpkZ0vnYrMoO/WlE1FrL89TvKrboSsuCnIqZ2rEj1TcdwomHvltUjlliiIjm5t1t37Xf9rdVVS2W9Wq1Wjaruq4r17x9c/v69V3bhldvbnovhIvV+swQUuU6bhWGZIwxDge9VKy1nvUg6SimiqlAd9/3er46hKClqhR8Wo0iM4pw2EFh8Gu5I2tt3bimadbr9XK5rGuXYx6rxVIftv2g9uNQ9B7rulJrwtnhJIgxxtnGGAMgRKDpAOoiqLDyHDkF5yNH5b0kYG3lLCGitkee6FQyZBYMPPDomCm5Xim8RxxQWADhlMKU/RA8Tp+QcSoXFAoWnLqUJsv90Ks8zQgzyV3eSXs8eJiybiSFsTdXO0pSKQefqFwydo2UImQCtxI+RDT5Vn43Zd4fxYbSQ2YWWRxiOt45kUyT5Z/858k7+SaNHcnlMsulZeBkdYSZCXNEAmavTGELSe3o+94ZE3rvvXeGAGCxWFhrmYPCm4iMtfWi2d13BqWqqlWzAKz7vvXeu8qKyNlis1qtVqu1Gn677aFt27u7B1UdRKJzvbFYW/dwe6v5NqvVyliUGBDxycXZZrMhfhDwu32/Oau/+vGf/+QnP/nyyy+vrq5W6yURNU11e3tb167v+9VqVVn20VskMCAchZklAjKStO1BV3F2cWVoudvFZ8+eoUZChh7EQ8ie42CrTHCvhNgEtUqamrwyud6DFcUcjtdJHCivGMPkjoxV8PK/k+lN1Mo/ZML5dmlqlvOdfKKk+vK/8y9OnpSxcnx8F45l4rGwFrITpKRKLAxsmSXG5BEmdARFjG6yF/lzeqe0b2FsaFERoCu3tes6KoIJ5VCYRH42ITJjKV11yjzLT09ge3Lf82InGznxOwxTguMpyjy9gdOOA19/4IVTPD8ybRwRzjETJG9BBuYcRRER1FxBBEgFlWHQZUPXM0pVVaZyEj2HaETON3UMfbcNroGrdXP2rPn4xfPzszWB1IZCCN0BQgjehxq5Iej6FhGtJQDLglEkaBQTQRAQMWp5Q40dypQh/F4onaSFky/mkct/HvGWCMd+/UyRIBgFRBSGEpmJwv3dQaGt/mwtCgkAvu2INONJRNR5fTSfch6gZlCJSNMMhRazHM+e60wseRMREU2w1mqpQOOqumpijLvdwUc5HLoYGYeDZilKJiM/0RiLTjOxyf0SzpMnyzELDiAwLY07FJcuyW2ya3kfEVEFWdM0xhhA4dT0T08P5icVwzWoxcwhxBij0JHJgCCnM1HqqNKKl5ldAABH8qHLNT80yqqDZ+NQZfrg4gFAFGspn2hSe8a5bBAqSgzJ/1hASVmIpi1nb4WxqPYpDccjAUmQRCBquf6sBA8UnTq/6+GungeDMIVDjzmf6UcEADKDRe3bDgAyN0QR1FUov+WpuHGV4p428Gg0sVNE9l2bDDEkJDXVnLFxloECAMJQJlIiorVV0yw0qzArsRnUAFTq20ctHwxLTFsPiMdGREiD7yCzRy1jPMHS/CP1rhxM35LQ8pP5AWY2yeWXF6Uype/7TE2Zw8ShyM2QzzKi5XGuGSICsHpVAMB7HzwjMKGNUW7v7hfoRehw6L/53Str7ZPzi7pe9D6en10hUt/xoY1v5K7vwTULQ87ZmpCC59B55n2iNYwxlhHCUpSrbOq6TsvXq0GoVS20MX0uMwMACzspCjDo7XwbNLO6aZqmqZbL5Wq1quv66vqJRg6rqgqxjz4wByTZ7R/UQUAGjLMSemRBlLqpiIjjkGNKhJpcQwYMHq1xDRs656xr8qbjUJFYmLmqnaaohcEMGaq9DJG0vCX5d+YII0SRaUPMbGiZ8fmW8veAkTOPO5y6Js/PcXHyu3TOlcJ+XkQhuStG9AxjMZAVl/mUyucn2nP51twHX2pCI2Cm50vkg6SQlSQ6AZeMr7xf8/mUw5avJ6Q5AYTJPyUJP6JRJJ1SsjuN+vweAyyTH8VoxCH0fahMjDGGrhdjiKip6pTFxxrdUg+TQUuE3vsYQ90454xzzWq5UCe0iBwO+7u7u/1+H/vonNtu97H3elTdc7dsNvVqtd1uXWWAxRBXtrJNrUvousOz68UHz1989dVPvvrqq48++mixWKgsYQ7MQSKeb1ZVZbsORTxzIApIBpGMMQxRRLPMxdqKGcigRWIREf7ii8/Ozlf3MDpujjioVY/hf76y7XGSXsp9mbwohb4yR5ucqlcibf5ESXST8eWUGpQvPFopAAXS5ufnrz82lF758P1JkoQZip5E4wnllo+VJlMeB4c8LphwMyl0wcyRColVLl8mE57QrxnbeJCsxzmI8jgT4ZSnMVkgFqUXyr+Wc5MioIQ4ssqywp0VrBIZSrhNBoSiwex8p7IqU26WCGdn8JS7wnQH4RE8fw8+5Gpsk03Xdc755+Q6OezkJgmgQYMGY4gxoERHVNWV7O8bB5sVPL168vzq4vxsfXl2ZkB8e3AVgKWDUNfiwUcKHQWwziMYZokCQYQFBQ2SFa1co6cDQAYlWoxmZMyX9hg1lTzksXWVfxobhEdjyRRFKcbfJR7pTwQMBIxoASCymjpq6jMAONOU+JknIyIMEKNAjJBCGQDwsL0vcTVPO7d3zzMZ/oo9Dok8EtTXDkaNwxgjAg3Hk0CIBC2CP73LmT9kaJQYfhLaE3ZXcoYJGyy3Lz0p5agy1pcAjuTPzFVVrdfrxWJhDAmwiMZMVA2wMcYQ2BijHdU0iTTDkJzJbSfUmauRn8xq9MexbzgbPfKk1qBKRkwhIEk6verERISphEnWEvW3TY3mMSmNeUoFlsYMAQ2UGTs8o1YlFKxAkkWXrdlso2pcS8vGtD1ngzDGXJd/2E0eqnQg4BD10qqtpmi+p/PUh0GGc3HZQ1GpQWicHhkzxgzOCEAZzndJSv9WTj5FG/1jhpgxRgRt6j2YQ2cZGUJg5mF3siUsqfzT2FcyFPwUiIjDscbMGxM1jfD8OKtT8Q9InCTPJ5vWYIYIJxbVj7KWmE1WKZTzbOiWdCEStVEoADBSBDYizEHTCbq+931bOXIVqvNiu+sDkwGMAYXl/uHAd4fd9vC6vr26/uDi/CrG9rtvv39pbz/44KMnT6627YOi6749dF2XD83u93tyVvPRyn3P5xj7vleDUAlBJ6+FfHP02BgTOCobSiikvhhvDPbed33/sN1aR9ol1Vq73d9vNpuLi4vlcuksmaqytrGO+r5fLpeu0kA9iRgAsKkGqQAnd5umgEZEq2VFlRcSaQhRtNlMjIEEjEVnrR5MiDECsrByHlRHDGSDMENhwKdxJlimW+ShseYchxR3MaWeTiRQiVLvvzJWZb4gyeCcMNNyZBmrL+Wn57OdT6Nk0BMPWR7kMZqZjFOSzUTgzaX4Y8DB1L8xj3A0ehFEIO9angbNonzvgfbJ5ZR/krElgEWIMiuOGbBptFHJx/ILY54ikixea6tAPQG6qslCIqcfD3BjWK1WzNz3XdNUl0/O67oigru7u5ub7X6/b1xzeXm1WW5ev3797t07ZxpbkzFoLBqDRAgSm9oYazOEqsp98PzFj370o08++eRP/uTzqqq0Ak27P2y390S2rp3KyO12e3Z2ttvtrKWhnhuxgLAUwgw4BqnrRdcdfODtdhu4PbTxgw+vr67O7954GDFTztpLdhOXCF9eel/5eOkjnGx0iQZzhMmDT+5MeHHe30KciIhQcaqkvJ83CKb49qjxNqfK91x8uu3B76dBGPsjJordbLZzfnI8SFlCe24i6gPWuZLz5KnmIHDJBPKKJvExSAYwjKNVGdqx6Bmj+DBhdDJWWCeLhRlHKt+aQOMU2KGEZ36mxIfHEHjyCRwrzZMLx76234skk0lOliljlouogb7pdRItR5SlTAtFlXZAJAGR6JwjjqHviYO15BBiv28ifPR88eHz58+fPV1WTthX2IeuBQ611MaZCqvekEXe1bZxfU0QhYGjCJCAaFE3jgJILIhwQoWc4fOELZTX3PEBs70YQy+Wz0yen3+IQSv781AtAkkEmRkEEAWAEIXAALAAAXAfBHHoalgICFBlS4OiNMhhAgDbDCmsg0qRSoaQHdqjS/qviMZAtNAIucoY5qH0IjlhESQk0nS+vN7H6AVm6Cd/sDX42FU8wLnBQwn890yAU4aC2jxaNNsgIBkRiTQwhOQHV5Gt5hMkPxHHGDW7cZAmcIzSJOtiUNaPXiFwevAsZ+cqSCGxaM3CzWmWiIKgTd6MtccSFWXGhIgwq2uSYDhiN1g4Wa9gZlcZY2z+Smm4yrhjQTZfY3ElJpACAwQQQVOQdA5JlzGKcMwcQp8jHDi8M/YhygA9zbAFAGOiMcaaylZu8Ggj6npJSOjIiGRobVfgm0xxDxGtrQCCWv7e+6ZpFIYJaY/dI468TihlrlKMpYAb+UowaV/ppgCAtvWao1zmw1klGCT+zCOZkT+H/ko0HtIgH5W2x7j3cUBkEWBEEmEJfQTLpm3bphJnDDgXuD/c7x/ubrfb/aZa9l1AxKpqKmuFUZiXy1UU431EY9dn5w/bbrvv37273e7a5XmjlltVNcwwVJrlIfTXd9MDZZq3nHAMMpXFGJ1zzrqMDBrlszikkSczfojAV5VVx4RaYsy83+8BoG13i8Xi4vz87OxsvV6u1+vVamFs1TS1rasy31DPUkHUHG/dGgWaujijoqvI0OEsaQsRmUkAkFFM3lDvvR5qMUSIlMPNtmRkE36X/5kpOSumpTqYdZEMxMmWlwh0Um8or/LYGxQqXb5TzmeOyplISo94flh3KCPihAWXlJPJgIo6K+VK33OVGkn+1sS1X3IZGhcFVvY3lEQbK3kiAnCCtBJApvdlZoXmic099+VsJ9oDFqllkE5nPbYRxYCjOShUJTIA6eGLuq7ZBwJsmmrRNABaPTkiekqtipDt+fmTs7O1s4goIfZ3d7cPD3cAcL7ZXJydgZD3frc7hBDOz57s93trDUvYrDbPXzyLsdvtH87qsxD8YllfX11+/vnnX3311WeffXZ5eVlV1bu33zBz37fGOFs5dTsZY6xlAIhImvcuydXaQQeROQIROVMhSYwxBC8Q+z4gUR97dSdtzprLq7NfvnqtcItxEKWEdjjjekzYyBs0TRPCQv5NMKdACXjsd0ZmGJMSFMhcbnf5Y8DGYk/fs9cwRsJyniUtlJ8uH5tf5dng4nX5Q2zC8pX3U+sE2qAKbNJIylWMJFZxHT0XiXjnPBBm+4WFa7ycZzYUJzQIM/rNUjbfz5rcHBpSFNGZfDoz9jzt7MWfT6bkruX4J5GznPCEx4JMK5NlgE125Pcy2/k1Z+yTv2YgnET+DJPj2iFbGwQgKFEArDF9eyCOjaWqromjxB4i/+zLzUcffvD08mK9XETf9Ye+MQ4WhsQYY4RMNBgr5yz1h3Xo2u/vOIIEAESwZAOYntnHCGQBCRBQSCQikKraEyR8DN/mSy6h+v4HTj7MRx/xEKgZNCdihgiAoFFBQmAUgBgjod7ECKxGBQBYskgEiFzKRwA9WT2kWwBIik763sOIfIaJUTG9ch+NtZ0PIpFIpTYSGetcjJH4WEpEtZgoQWu4ngTge/AHHjcmM1mluxodnQ/Fkltvn3qx3OIEeSQiPXCVrKzhiKaq9dneQDTOOSYQEWP0bOEQKDNgj8ifDhxmFMr/lOSdOew7rZKfFWUqetuo1qv5olJ4CfOYOa578hwyAHD0GZfKQIIhcMZaMlEABQgQBSQymGNf5cyoy9ig6u75uJru9RCfxKjaucYJNY1IRJDUDRdjNLa2ebE6dx1fCxcJH+tg5V1DjRmmCg4IQ4BxOP3IjKQBdsPMqr4DDIHBCdcFIMRjgYZkyWc5SIhQeuRlsAahBPg8NgDDuZ7scWMiGOyHmbzI6FeiekZOmzLaqDg9Ya0darACQHKeKtxMOiSfRYzOUAqpB4VUQoiICIYRQSAyI1BgAGOOlljXd8Lx+vr6iy8+e/v9t7vd7rDbt23fU0BEa52zFQTufUAw52cXu0O/b988bLfm0IK7WCwWy+VysbKYMp811t0F33ch5+IqCi0Wiwxt7XSi2zEcsjVVxnbtpNK4Y9cNItKs3exrGJAWWa39IUeu625vb/u+77qV913fr6q99b7XbNuhpkb0zAjJPkJkQ0NB1JzCTXSsiAMARAYRa+vQVaL1bCMzBxGklH3NIAykCf0iwgjWpOqrWJ4BYyGkoaOAgAgIILNwFFNNU0cU53IZVig4KRFxoUlAIcAeE0IlIsK4SkRGyvyVCQPNSiSm5AoprqToTNMvJzy3vEZENdNuJzit18R8TbDFcrbl58rv5kyt8iotNyJCBCySteCU3Hr/HX23rLIIj4ufDJ/SKs48PUdTmVnG2slcMc3wEREJMcZYWVdVFbCoAZzPnWtpGaWxzz/6wdu3b9++fhNit9s9IMnz509/+MMfWoKbm5s3b97GIHXdhBC7rgueP3jxjJn3++3Z+ebTTz8U8Hd3t+tN85Mf/+izzz7VIjFd122323c3LxFxtahFhBliUAgg++j7oNA25LYPu6qyCKauqes6rEOUKCACjtGbVKWOmdVTE6IPQZBwuay+/OGnf/V3b/UkfQjHc4PZy5BRKKHHCZx8bI/m98u/ljdL9JvYhOWUSlLKCBZTY/HyT6UaMSFYken0ShdSXux8FZMrf1RG9RKlfOk9PKT874SQT5Lt8bt0GnpDQlQx89LFM+czc0jqOJzONmfHfGlNzWGSyT9fMu70M1lyplkYG1RJN0rVw9M+TlxRWZ/I/yyXwEUKQAmcjF0ThgxjpC0WeFxvyplP+sTv8xievEpmmz/3GEjLyU/emv9TEAYpC8eGgaCQlGhQnCXxfe/D5fnyww8//Omn9Xq5sAYq6QJ3BH5pDZGprQss3sdOQgxsIa6XTXyyeW6g7fzDfr9voefAGIyxZF1kQAEEYBbE4beIaMPV99POZI2TfZnwh5LbzCGTrxi1/5WacsAgLCwiFoEIGASQGCIIikRmcc4hiXaBQ9ESXBEAIh/dB4NHTI7yZS6njDUn97HEljLyy6z2pAgQIgGwAAa1lwSZBTASkSY+iAyprfNVz8Hye/jGKd3g+DoO2Ybj1zlLeygwdjKCiAAMdJ1rhKhLEYrU+rTFKpSHUACkXMTMc/b7fUYJbcqXP+29F8mpbpJkMYYIMRUXMcZo2b/sIBYR7cuHQ1oEEwGzYSY9xkZEAKjqbCk7RFCEOB7lUclgjRkyM0sGC8kZfdSth+ORISvxvZaQSQZhjFljJDJoZICGFrZh5hA9AFSVNaYREYs227fZltb8VY0QlqxbRIiGDFVmTttsEVGLMpohCGOMMWrOaEkhVjM3pU0mw8/kRemJmKqqgBAQhYWVFSkqAmjqZ1ZmBQBwIKjMoOSIXqbMMcGirN38iJNeWRctxdnEkwhJ4pR6PhTWXX5Ambz+UxGsdDFgSi1mZkNGSBAJMKaztcTIIuJDF2VARWvg/Pz8ww8//Pj5NQB0XXd3d7d72O73+/v7h7vbO1c3cbd9/faVGLtcLs8uzu/vtl0Ir169Wi6XZ2dnq816uVyqvTdkCwfOUIoxtm2bD1KmSZrs1xii32gluWKHyrfJ8zXgc65GndLvERFgSMAOIZBw5MDM6rPw3j88POBORDhGXzVN7fSjenYUbDphq/E9jjoxQDRgCABiyPqGiDAiI5EhK5EjeOWyivAxSgw+clQ3PhENjU0yUzi6FooGBqUSwMwWp3WaJSmaWdso38JCW9JvlQ6M+ZU1laxAUCqeWz5W8taMmlJYUzQ+z5MZSsmpR4I/zRBmymh+t3wSCtZfMvHJrMq3Ti42VzMr1xVS29N8FR8aMh7y83PhXYL9f69+VQ44Wc4cGiJSZghO1A7NC4ICUMyMYAgpRun73hlrbSUxGLRtvzfGNK6y1lYLPWciVVX97d/+7Xa7vbp6cna+6bqDD63aV998/VulWEMuhNB1PRFdXJy9e/dmuVw+e3791Vc/+Cd/8tPr68vlqjrbLM/PNyyh79uXr24AoK7rs83KWnv/0BtjLaJWkSKyiNpXDLwPf/8P/9AddjHGn/70j2NEZkD0pDxBBED0QIX6AitbaWGzGIVMZY384EefVtVfSypfRkTqeJEExATJBE9mwhNnzDIplVtzco/mmzj5UXLq8WaNqhGOtng8DoypD8aEkB0uE8wscTiPgFMN6XjRODyV+MY0U+Xku5MpTSCQ3y2puJx/yUbyperFxD9Vwm0CpTkAS9DN2RHNwnFUONcnk4RHBHl+uNzckmZlbEtMOBs8glf5NxYmIpzCt8ng+c58pzKQ8zKHV2boMOfV86Emz2eNZzYfIXM0cUuN5PGhcP5lQpQYalc5FIiRgJ88Wf/g808///STS/PKGJDYVVhZIz1FYm+FAJiQMAbuWx9EGGpnL87Of3hxfuja27vdm9v7dw/7g2cRRELPgDi03QTW+NmJMNMEhx+7cGYTntz3k9s0gaQqncfnkZE0XieIIhAFEGjI+xqqjAMLRC1poZX9Bl6UsyQQQTROOHEcQOj9Y/sCYwzUH70XY6y1BnGQkzGy+E7lLGI2HtP8ZfR6ufASaCfh/H6QTgGLGmGeEN0JU/CxT2gNWCLSWESMMYDoGUItcIaIqUwoERGCGj+DZaUGxq7dh2Qyca4QDoBD3eMhdKD+r6qqVsvzCqwmxamRowwMkgYsImorOudCCESQI3UlcHKLwrx9iBhjrOiYFIdFXAULdzMU/sQYPQAwQ85czQmizOyL8KDaVjF6Im35rRn+BmGwqI0xLFE7aOiBLpHoqC7YwvEAxTABOUpPvYhQUq8IZTssPtuQBoxB1KQgkVLiHHFAl6wFgSCF10y6AqeqpEIwoPQoPUSvx/zv+WJmlZ75tFhe5skr2/wpjjJsHKdIYOKNxxNnpTTM/seyMnZ2Iqj5lwunZ6sYAAJ6BNSCN+kMZBTBh+19UzWLxcI5CxJ229v/9nd//4tf/OLZk41e5+fnz58/r6qKQzy03atXb+4fDu3B73a75y8+Wm/Of/v1d99//z0RqvXY9l3f92dnZ9oKAoSqSjLkldzUzi/We7SNtaN15Ahj8SGFTS6iNXKQiHJrHERUP/BwrjKGyCEdoSKjhW1AiGi5XK7Xa4EIMQwJqCikWWZEaAgAQipJyiwwnO7xeoyxFPoSGVHyYVRJtphOBwexjgJyrK2MpYY0PqKWd5qKaNXkk5RKhJWjZVzJRFW4sk5fODYg8xwmP/JV3skYPJln/m+5qJIvlzua9zK/UjJNHNt+pbQoRyg/+n4uT6lcZ35eGZx5pC8ZM+dzFDCr2TBZzuTmBFbz++UPSe63jAA8LtUz/DXFxAFHIpOIBt9VAS5m1o6CMpyNZkMkQoj4s5/9DAC6/WG7fdjv913XxeidczUtLi4uPvvss2bh7u5uDu324eGhb/c6E+ecNZV+c7FYXD99+vnnf3r97Okf/dGPP/n0g7p2VW2cQ4597/fGoKvIuiUAkMB+v+u6rqrPAYARtK9sjPHm3d3Nu3fWVFVt//2//w/Pn169fv36n//zf77bbZ1znj2CITQCHEJAGfLFe9/Wi8paq2ULkNCHbr1eVlUVknfDGMMM2jVEkljI8FZGUPpiJky/vPkYRZTC9eRfy3/K2F6CMV0M79LIuqOihtNkKEgCYz7bjGZY6FglA5mPZ2Z1VqTwbkzmDEUNnnIhcKp66kn2ld8KIWZeVzpHS5CW0zZj74mMY3QT4Jz8a2ZZUtQJKPktFFwiv5s760x0rMkX8z9Lvjrne5PZyrjS2HgLcD6CJL/vBBTzT0jBV8tdKz5K81fmkyyhV85wjnjz5yEFZpOPn7KVm0eb4LCgVrs5cvUQQl1XED1LeH799Mc//PLZ5XkIwVVoSESoJgoIPgYkJOf6rre2soTWWhM9GnRuGReC1RME8+w6XN7e/+7719+/vXk4hND1ZGpCZECAQYNDMCJxfqQwz/zkeidEUULsPdf8Aa11UWS5D1cEr3iKSWcSFETDIjh0XBDF8UJfAgBgEDURVUUY0l4AhLk0EcgaGR4sE7Ew08Lw36SHWVPlRgUxRiK01gLYyN4YrfIRZUaAJ5FkAtX3AK1kKZMXx+xuuFNg2sh99tgm6lvaMwlTURNm7mOwzoQQ+k6bEJK16tUCY4wahBqS0gQ3Inr27JkWze+6Tg3CkhAQTe7To/Ppuk4r5htjtA9kbkyv9fShaGkTYwSIACgSY/Q4VBPNQcKcV0JEqGfhjDn2cMsWIOJIDePUZkwJF4aeN5J/q3EYijYkuZNHVVW5l4Yyde3/rnVoLOlBR9CjejF6ZFSrG3Ggd0hvIiJCed5SRMSY2Pe9nj1TH0Tfe++9Vk13zjIzGPXTpbM2rFnVx1Wno2KUuTqmsj1Ao4O+InpEJVRVhWg0vxTGnZyoKASQEDS5wwbqGan6E/SefE4vhXCM0VibcSNvjYhwjNneiEXNmJPsNJNwgRJYyAUGEEBO9UWdiDRN44xj5hiCiFRV1ffd/f29+MM//Pa3HGNdu+VyuV6fXV1dXV0+/dGPfrTePOna8Prt3b5tD4e+aarPPvvs5u6d975t27bv1EmxWq2stU29hCQXyhZfpTTMcgOOonmkHkjq93CkfRgoveu6zL70WK92L9zd3wmM4lhVVRmLALJare7v7w/trus7EamMJUPIAcAQEdnhCCsIGWP6vse0NSnZDWKMTe2YWeJQPImLAj/qhrBoiAyL6Aliy5EQCCB150QmIiSw1inlo2WW2AWPiFihNRWrUSJ6pGFAoBACxxhJ12YM5RowrEfIB8UFAAEIMRQRP8UFRYjehxIdM6xz5kNmEzK2MLP2pn8qU1hLWRKH0gCqLaaKfCjCEoVFgJBU9VIMiFG/OGyxzkUth2T9JpoDYAZjaoWbIlKeg8JgwvsQMbKfFLrNvBVxqFOktxFBBBCMRpgmVIQaPBzoEABA0/JFjjpQCYpQ9Csrod22fbKoKe2MLnJqYytAAguACAKiEQAWVHtVe/choiFIbFoEKIYdggFrQ+j33qxXZxfXzzbL9cXzj37+859/9823BoVAjDHr1cJay/0DoX396h/7vn/35uVysbhYb5bL5sUHzz788MObm7d//4u/qyr7wx/+2aeffnp9ff35hy+qyio7FojMnkMwAI5sDFoUFLXxA0G1rlbv9tvb29vN+my3O1S2rqrmf/lf/l+b9fl333/zf/o//pu6WvzRH/3Rq9ff/+Y3v/7k4w8RGXgzsEJhAFZFhEGsq/suioAxWFkXhINvP/vkxZcfXv38b/62RmhBuhDBurbv69pxZBJWxzEQCpAIRjGObMpXIREIgZkFgDJvL/ENkhdzwnNFowyK2sPmZmqaOkr0ftk/E5KTT0Q0Ypkz1kAEBUDADCcQBIdT+Qkr3NHBUV6l9gaFpClwqbyMDOEQ1aIQQMOzI3MUsRRdyr84D6sB6uRUHSmyANk9liGW6RQyWLjIU5jPdrhT8DF1gehsaPyiJKWWDaJBgCEiQpaYOXDwbVDCp3zYRiR675J+NllCCB4Gnp4twygizlWjIr1GlcKhb2rehSyMQxLeiCjFNiGRAMQs/1IRvDBECY6sFZIjoJzeQCPZU564dwIjW6f5WgEAjEVA5C5y9BCHJLHcJGrgkzGqgpuhql+PMAhvKfIPdS9KTpv2i3ThR6ubBQQ4pEwaANJTWDDEjhZouxgCoVRWrIvC4oPz/fWigYe7FcAPPlj/6LOL680B/V3fH7DqrWmgblqOnRAsr3qyEWgfd4ZJIEYhaGpB8pG7EK7e3m6uLuqr5e6yuvlw9f3N5S9//f2vf/sG2fYHFCSoa295x3tPPdaw2TeqZQAhEAIwChACiVIDiGBEiCCMJAAWRxRXkMCIJDNiI5ryfkGwZcpT+iFisTkOjiAQEZFQCU+/ovDUo2sjBcvQMWuaMMXujrIFQMSZY0thpW/mIfSEg8pVXCJEPXMPDAhgDUAqm4EAHCSjRhaLXtgUOAvJbBUN6c2UYyZNChNDYMlY1f3UYgEwekQMlQOQAFmiKBKZAyCgYULRYiRI3HcY29qaTV07kti1vvPYAhoQg71gLzYaK+QiGO/9+uJ5d9gaMpvl4nLdYHdf11XbOxncMcPxfv3h+2gMpCxEEAFjrDHG1dZaWizqLNMHmkz9GJhZBEMIWqWTuI1dD8ZUxlSLoZtcjPFuv1vVTde14n1d2647BAnOGB+jsbZKtqJoGZtclFKGmInW6UZEoEhkAUCDxXqGX/m46qWRNcs4MIcYo7CLqR3icVO0rnfk4IOwkFBlhjzDiASYghUKghShyn0LaWikCWIsV0cLTf9qkbKJNZBPYdiHfl9bFxbc9fHQ+d5HjxiN64Eaa8VYZ4SQxXcx9A1IVe3F4aGL+0PPbKxdGXQQIQAQCQtEBkAgi2gETLC4TPL0yLQNEceIIIYGwafERUQAS2aOMSSNUWIMzBw5KvVpM718LlSTHqVQofUU/eFw0LBwTNa4JiZm/s8xSpKPXDSalnRoQoYzXKLuXf0nR9VgMEZ2to4xhtinJFIfQgAWZgES5ypTGUADQADGBzLoEMAYMBB920vY1ZVE7NzCSIAQ5Ob24fa2/f77WzRfA9D5xdWTq2syLkZplouzp09DCK30VYyu7/f7/Zubd29vb64ur589eyZocsS7PGpmqAFtLaYIIFEFw3q1ybyrFNCJIwAzC+hZiEjRJz9LJI1OJ2nY9WSMi9ELeAFAA47ictN88YNP/6f/2//47ubVt99+8/XXv/3NP/xqv9++ffuWYn04HJjZOGSGtusAaFmvgW2z2HAEY4II7Pet9365OAd5DcBAYlUEMwdPNCToRg7RoFgLRMSENZDNKxHRtvcDYzzKDzBIv7+yXOlNL8fUBkRYHEhLiHI60FF69EvZkx/ILw6CZPx8/vT8Krdt8li23LIOMRmznED54vy+1ss68o7imswrSaDjk2mEbOWecFKWCn0mvHznuJW/z3Nc1i8q4VaeahiDaPS7GJZPPsbMpZ8vr1FTMjQRHHtzoIOI7Lfbv/lvfy2RjUERBoTlYokI9zc3yIcnT57UdWVIrp9ePnv27KOPPmAJZ2dnde0+/+zTH3/15YsXz66urrREL3StftQ6sraK0bctd12Xur4OZ2pjjN9+++0333wDi+a777770Q+/+vl/+ev7++3//D//3+u6/vKHX9SVffv2zZPL88Ph8Md//MdN0whA8N40RxdDBlQ25vM/UUQdjVdXT5hZ+xQZH+OpmNURH2ZlwajI0MuPlW/RLBheoNbUM6c7Uz6ZxWop88qRk2/shIN8cif9czqT8snZZP73XbNxpsOWXx8Qm0eBsjTC6EVEPO7nyT5RqRbf/E/lQsrfORU8Q2CgbhmhSjl+iVFSKMETDjMInuEQ1LGWTMldy1cwhffzXuf5TNC43B1OVQ3LQ4bl81hqSGP+PEGk/Onyu3NX1PCtZKuXz6e4EOTWZSWvw8GSH5FVua6TGwdzc+LUFWIk6yxBxxx8i0SVoYaq7cPdh+vFly+uPrt+8uS8IsMGrTELkChgCBxqpX1wiIYEKus4+uD7wJEFmQxaWtgK165eV2dn6zPgi9Xyol6tpL5Ynv/8l7/D2rVALXsWWNSuAvR9N6z9OEESBGQoliqDRaZenPgHhQTnwMFTJw8nT0rRSaxEIZzJ68nrJ0c7+VhJBeXG5dNlf8g4k2mMphS5fHH0/IzxiggJwPA6AAADoQzZsIh49JETChpERFMhM4EY0V2C4b3gHWHj6pUz69otHDVna2fpcL89dP39vvWBnaXauZah73sklEHUChIwgAEQwKqqFDLD4TTm5PsbQkzGGGsxMwq0R3JT01HfcpXLjhJEw8xN08QY/WGnD4QQBrsr9cjWqwS1FJ76fByLU2JR5kXGHD07yoqJSOBYr0UEQvCDYX3khKPkrHypuxmgNGiPfHI42ZXOXubvWmu13k+e0kBUbsjYzAzEFDJlWELRNFIqAqA+sLG9rThE6XzfdR6AJXLofZRIyAakruv1cvXs2fMY5e7h8Pbd3XbXR90XAd0vtb7IJGxkUbM2r6s0g7P3tnTGhRgTnkBJBAXbVsfuUL3mWOt8TEFlMjAWuaAlZ55sffnfzD309JOkppRaG0ZmGSh5F4bzESSabCVp7jTkyAgicwhd1/V9D8CiZ/MYYAj5IBEBSlXVNzfv3t7cuaqp68X5k4vzJ6ZpmhcvXmy324eHBwCoqoqZu677x3/8x81mc3Z2dnl5uVgsYmQt6GKtNVRrPjCnfFctdpijiBMSABzuq3UioKV6c4ldHOqOJpEdI8ToQ+gBgwBFButEpH7x4sWnX37xKX7+p8Di969fv+4O+zdv3vz67//hm2+++eabbx4edn3nraWu8yG01jkf9sFHAEI0VY3WVsays5UwZhRiESAxYEIIxpBzKS4aGRGryh4NQijYOgCEELIDvqBe4Hi0ifMrmUGXKDsgyszLzkXINd/MCPQYK8+PTe5PnoFCJJxkVTCWeeXCS5aRFiiDS2imk02kbP5dJlUeSXCIZow+N8CkWAIWnaNPfuU913x182si3sbTG+Y2UXwfk68wA2MJTADQw3XlviT/05DzjxiH09ghbJn1hHvtXGVd8P3u4ZYIOMQvP//QVaZp0G02n3z6wQ9/+MMPPnjuvd+sV1dXV1dXT6rKRvYxRtATN0SLxWK1XhDR4bA7HA4qmfq+Vx09Rm9MU9VWRO7v73/+n/7jn//5n3/yySfb+92vfvWbb775Zrlc/uM//uPHH34kwn/xF39Bwp8uPgYYzg0LjGrDQOElygxamSAZcs598sknAIwommDDga216m7HDEYBVXWpqHNNqVq3jn+SNKDA85N//b2YkAk5c/ySfMrtm3yFH4kBwnspFMY20h8+4fxiCQplr6MPpYP35eTnrGAIAxRXQQxYPHZc6aQIE4zrXZWryzAsEQOSjafhQRgDELOtM4NY+ZVSDUpUduKEc1mbKkv3ws10vIgIxmgzgRul3J58UMcaM+aQQ2pQ1sNOblyZBwvjE+ZQ8AdrbYiZybP2OofCYTFEAcdAPrW/ML+Tpvd7mOR08mQEEYEQ2QgQiIVo2JPA0yebTz58/uxi5SAQdkiC6ESayBUaR2itsUSWBCQycfRd67uDiAAaITJc2crWV01z3qwX1sS4lLheVM31k3Wz6GL45evbtw87qCrnnO88xnDm6j6lcHNyXRAMhwsLoo5l3Yj3X+/hG3NsnMhBeGQrJ+g3GW0iquZitKQmGG/3yQEf2808sVLHKL81YXH5LZxJ/OOAiIBMAqpMiwiDoGi6FACAoBmaaRIikQ8ggiwUk32JwAYFOVYENeCS7FllnqwXm0VTO+svzm7u7szbW3g49OKZDQkYEACDMRCKIaqrRjVfOFYdRJEhBy9GYWaOyn+UFRyrenTd8ViNwi9zAzmmoB83sWma8jBVzsnMm8J8LJeaWbQ+T6n+ij6ZI29QZDaq4xgKPbPkciXPy7tQGkJ5DvlPmJSoQYDWDaVOG0TqKEFEdM6hGb47WIyAiBhpeOBotbKIiHMuYxEV7hcUC0DWR7K2FhSgQ9c+POy870Sk771ET8i1NRfnmxcvXlxeUmComl1k7P3t7uBD7IXJ1igievDRaW9nEdaQYYL8hPObcVd3velDme54NCa1QKYxRiuaHPGcj4ImA1bxKkdi1QTi1HkyQz5vIo6ryOD4eEgpKYRkgjwFBhan8UkQMTJHZs3IAzLMDChEwsx933vvq8oOkkVoaKCAGKPnyHd3L0PEzfnFctUIw5s3b24f7pt6+fEnH2oPz/1+v9vt9vt9F30I4f7+/nA4HA6Hp0+frtebpml0Yod9p1SgCoAeT9V0qpLVZPzEghUjojCJhBgVUEpWGKPPRVkRLXNELUJtQCAKRGtNjL7fP1giMoDOPXt2Dbb65PNP/+xf/sv9u3dv3ry5v9ve3d198813v/nNb77//tV+d9jv97HKdW4ohNB1D+y90p2zhmOytwGGlCge/ESCg9NkquXolXTQE2y91EpOCoySenFm5p18txwho8gE1rGoIAqJfRCN3Mn54Uwt5VYNm5RSqPM4usT8e7xQ9cQd11Iu7STo8l/zNX++/GuMjDB6TI4y9YQbhuj4QOllmVSe0B/0yHGm8p84FuREVgrRO4dJebME42R1mX9NHgshREBGEUJDrhoCKbFqnATvLDqLyFE4Xj05v76+Pj+rjTEvXrz47LPPnj17dnV5uV6vRWLTNNYSAPjuwBybRQUAu93D2XJBBpi56w7b7bbrDnqevu97bb/EzN53VdU8f3GNJP/uv/6XFy9e/MNvfvOXf/mX19fP/+7v/ttXX351dXV1tt44555eX+63t8yxqmofOkltCSSpy5I8eZhy1I5mgIgIfPbpx1VtD10c0niZjTUSsXQ0iAiKgNETekdmXbon6FTkPP+ebBPM0L5gyscXJ8gweX6yxZO9nmPmMFq6N5lP/tyEIk7O4eSUcMhC4YKpieTeu0ekHar2lBObEOPkmzK4fnRWp+FTEgUUYJ8oxPmLMV15+/Q+pRTfPOeBXgo/Wp5t+cwcLPmZCTAV/ZL7I2LSeCbzVNSKY//dZGuyGVnae5mHz3n15IH81wmXhkIdL21aIoJR/8bjrNIIDDw6WE7j0q8ltstMysiQif2oaTG/qKr74INEPVgkHIzvIYaPnp1dbhbE4fBwHw0vGkMIMQSyDbDjaJAckrFo1G5gwi703HaIyBhCRI+9cf3503MykeMe2k4OnWG4cChn1Z/90efR/fbwj+2Bo0TBGIiFgEBISFP0B2eiiqjATAAoejYPBIbThohHx2sJ+cdof062JbQfu8bEdYT8Y4DF9wrTybcmO3XyrxN2NxlzQkojUBDijOmVCylHUwxEGPLxIZ1tZAFAA6hJvAnCCAzC0apZgxBBk3sRrXBlrAO/EN5YebqorlaLxiKyr55crBvbVNbSzZv7h0O/JzRLW3UxcBRDUFuzXC2cc9qmQ8uVmdQWXPVMEYlBcsMk1ewHeo+ZXx1Du1iYBGntqSxf4UECNTm815o0+Yexo+3jVFI/H0fMXAgKwk8W16DiI6jOgJCqPgIAosQYWRiGQiPEDEN/RMiMdMjvSG3Zht0cqoNYo8AxWq8j7WNd18hHf1YWtSGlRpuj5ytKEfAAAJOEPQAAAyNYQSdi0QChjyGp/rr24KNHts658ycX3t8Y5xaL5XK5ruvDoYsxFB0UmY1FIkcDNKJwy+nclxROOhEJIcZ4TIkfQkCcifHYbg1JAEbkgIjaliCfN8sBusleS4oG60kWGTv+Jr9Lzq8fqqoqxuj90YnARcR4QtqUKxQksyr5+9SlCNYgWeLs6LQYe20iEgEACZlDjOIjVlVlGOqmqqoqeI7todv5ruu6Xx2ePn16eXl5dnamhm7fBO/94XDY7Xbb7Xa3211eXq3Xa231uV6vc+Q5x6LL6FfyoSSss3lFkjYFgFl72SsfipElsmbX9/2WmYmAtDNZ7OyB225JKKH3WFFktkLMbFBABHy7PFt9erEBABAE7x8eHna7/cuXL7/++uvvvvvu9au3r1+/PhwOIeB+7wnWIhIDhxCAgAwNWmuEGGOQICiutkQ2xtj3nT06d5GzjZ6INnFVoWMYWibq11EvmW8w4uA3y3x2oujACcZ9bL5XfoWKU7MZe/D3lVeafEVEcEYYiAgwLRaf1HomOLphyjEfE5A4vtJtzqdajpBBBlUoxwYhPiJE5+DKU80bAWNQn5znXHZO/jrfRBzrneV/SyFavHjMMzxKYhIBUf5jjNHqzMYYIjBY9e12uWgQQtfuGovPnl99/PGH10+vfvTVZ03TPHv27Prpc02PISJjXPQ+ylC6hsyQjbBY1ES03++1UaExuFgsRCRycFXuw6YnpA91XX/88cf/+l/+i9/86tf/4l/8i//u//Cvna0//+Tzq6ur+/tt5czhcPjbv/0bAj4/P7N2g4iHw2FhqznclFQyNChl6DHz8+fPnj65/M3X37JxoIX+xeT3CVAQBE5w2Iz2MHZwTHa5fLG8WXrsJpx68u7JPc0XnUpwLbG0JAdJOXglCZygwT/YFJwvcILeiLZMoBs+pweAAOBUBEmlYb4/IWeW000gHqN6ToWs8pRKA1UHySfoTo45vG6m9/VHquyHeXVHUBc6lo6f1b4J3Ob2WAnDyUf1oiLLNHOYUisqf3CREToB1+QTGVAlpvE4y6t4kfVZIorJUMRUwX+Atpm2z+FxxuljiDqHxslLyHjxAGINEsTYHxaGnm6WX3x4fVU78PuHXVtZJFnVyypGqe0C0IKQMKniaACNJbOs/d60EoPnwOIDeEEyHtoWDMXasXRMPRqs0V6KIWvdDz97Ujd//euvX233rlow0X3X1malRQgiDIFBFBBMR8x1T0UA2KgJAseCq+/n/L8HDidMI4CiFm4J55Os5jFxU+I2zLZpjs/lV+aDyyPm7mMOtVwmZ/LiYyDSPFxCBEl1cUCGZnDJIAQBZoQIOOR9DJaDIJOwFbYEKwtLV5037nJZXa2qJ42tjGBg42Bpl7U1FVHlzLuHw7YPHXeGQfqhOOZqsbS2EhEG1uIoVNTBGo4xVkYV8a7rQoh93yvEnKkAhnAiFnVcurYfA0e0r0GIwQff+957HyP7GHwMgWMUjsKBo4/BshEEQWBhLR6R6ZqKYpWcasBAYpIq/ZP5F0EIMXA6zWiMQRRmTlk5LCJ950vsymZGadRBYd6UBqEpmE9VVWacsTVEFFNRDMwOXzPCGVRhn9hXFFZM0NsCADzYPNagMSiR1Dj3vXZBFLJIaJ1zztbORUWWoJ36AAyStjSMITIzyKjHRpYj2vBdGb4eTklgUWrKobmBbTJz/pMU6n0uBT8hYd01jYtm+BhjaJxUmF/M5FOGZ0XEWh0h95mEDNs4rlCYENjqmTdEBA1fojHGRh9EQE8dD4R7nIBo5VhElKElB6xWi7bXsHZYb9arzXrftdvt/s2bN23bbrfbs7OzzWazXq/bQ//w8OCcq6qq67qu677//ntr7Xq9Xq/XF+dD6ooGmTG5gLOJqEHUjDN6Kr6AADMzRIncJ+TkGKPEwYrR/oQAgmSIIHLgYCVyXdfGGGsNs+bcivQBrQH2YAzEIEMGuN1cnW0uNy8+//hP/umPdWLv0vX27du//Zuv9/v9zc3N7e1t27YxSuh93/fL5dJYAmuEh76Z0jNgsMN+4CB6AUA9IilFKsFdz0gnDJiIh4ngH2tUOPkTjg/pTWR2KV1KIUFF2lW5AeUzeVb54ZNiac7rEU8o0PmfMLvmg5T8Yra0UbkFGpqTQOmzmcizEgKTr2SH/eTTnFKc845MJpynPR/5pGSFYh/fAwoujnEmJ9ZRig/vDl2IERG104mONvB9JINSOdvudxwPz68u/tlf/NN//a/++T/56R99+OGLh/09Ig4Vi8gYwK7z290OAKrK1nVd17Xv++39g3Nms9n07T7BUHn0gDLOuRiDIraIxCgh9ADwx3/8x0R0eXkJgGertTHuP/6nv3rx7IObm7fb7Xa/3z69fPLixfWh3S0WC2sJmPOOlr4JEcmV7lRC6+ouztafff7xr377j2QrIjRF8ArBqHNUNyZrF3kHM/ZOyK38b2bEkw0tI8bjS8qH8yATkswI9hieHMc6RVDzdycjPPbKY1exzBM9lPSs9uQrUmSTzvlAmW+jwnJgJjxyfMw5mxQbBIU8m/w3y+9S4MHMQM2gNmYQtDzOAqUxg83vlowiY6B+TovKTKZa8kwpsn0mqAVpT8ujg5AwxFqrRd7y/UzLUGBsCa4JDEuwlOPgWIkvgCkajsn7i6gVKzMc4skJTFLf8wRSfd8/9GpDQERjLHHkvnUsz6+WX37w7Hq9OHfWxNDGNviubQ0ZNZgJwREaNGrmkzFggZjJWkKBtm17j4yG7JKw9rdtD8YSIUl0QhAJxIaw6MOn9aq+fur2/r999+pl122B2BKiIKA6EYeKSQjCAkSpHA6jQERKne9+j/PlpLzI1+99sYwSnxzqpPiYPJnVXBlbZRPSK4d9vyP4/ZMf3R9XcYQCl6AQl8fHgWXQO0FPtTEMNiKAEcVqEEEZAlDeq9cSJQJHwuhIapEVmuv16vnFelObpYEVhVVV16t6b4UBHTlnnqyWzevb7Tdv797dbQWlj9EIGgDnXAL+MDtVigrFAdR5ai0jYtt2ajwAgNCxfnJmHURUaiN6lGMo0wrSdZ12eBIZVR2f2+rJijvhUCuxJX8l8ZkwcIN02ClGCSEYg8elkQyvi8v7lXgCIpLWUM1XPjdoKktlymixagBQlbbEh5KT6KX7WHJCKThq5MARAscYOUrvI7dtK5EJ0FprCQkEJJLA4XB48+bNek0A6AO0h344WkKEgCEcD34PxgYHZiZ0WUaU+5WFRYaqGicSjm5H5mSrIBtjtFBizvsDBsCIcOwLkKUGAIQQ1Io+QRePEFT+0xF0KVMs44Bq2kSkdpSuJyHMcK5VBGBIiNbkXENkQu8R1enpWbTrhp63RBA0ye2CiIgGCHf7ravq9Xp5drZpmgWSpYMNIVh72XXdq1evbm9vLy8vr66u6mpxcXEhIlpIqW3btu36vn94eHh4eHj5/dvVanV5eXlxcaGtIzRSWtc1M2tpxkwsIgIY8tqTNz+KFi2jmHYtQO4UoKWPYoAuOGcMDdpjXS1EBIUgAgKSUAjRmarvAmJERDJA1gIRRM8hkO1FIhJS455++PTpR8/VFfh/+Z/q/nB49+7dy5cv375+/fL7199+++2rV6++/fZbRAw9tG3b9YeqahbrZn2+soAMyFqcTa+TvnNE1B4spYCfsIP5HREhM20dMVE1MgKlMUfFQsuhSrUge68nth8UwiYPmynkPeJtPqthDjwVCRMgTNZewk2xNgNzoGcDZbY04uhgzJyHlnekUOzylYMDeaXvId18vzSA5wssLy4SUydjluBNf9eq0wULIFFXtn7FOIsCZUK8MQZQ2oP/pz/743/1L//8X/+LP/3jP/pq2djd9s63Dz50CIZD4AgM8eFwePv25v7+/vz8XCL3fb9eL1988HyzOWcOals5Z1Qn7n0X+4goxpgQehFR1U497CEEZrh68mSz2Xz37Uth/u6777755psY5ca9vbl5u1gsXnzw/MnZedM0+0NgDnVd+9BmfOJCMoUQ9KBHCShEXCzqL7/4/P/9v/6lcaZh6j0HZlSehwMTlDi03QGJWMAWZmK1xIoJqkz+OXcZpB8nzvzAmKeXn56QYTlmKZNOzmGCKpNv/YFXnkxGNtH618miU05gU9pSXoJ+5GQqNcBxzDTVtNjH51CuUe+UkdtJFC5zUSpSLkV5/DgZAQq6niAPFMVpYMwQJEUykz1pcGwW5uSW+fznDHw+Hyi2LG+0pP5FmXLzHE5uPY7PfJZBCUx2qaTwpj6sWuCYKbEWbR1tCWqfviHsUALtMXqBgi7mzPaxKwg7YyyxeG85PDtrPn365KPLc+PbpcOmdjUsdjuOvus6rKoqQCC0zlhC0KCRGboJomtqt2zirtt3nWexDTvAm5tDH2EZI1UC2FuSRqCK1Ah2D7fnwXx1/cQYQ69e/26/Nc5pUVVt23xENgANlBOACCAiIQjwvHPRhJrmf52ImzkkJ3/NhDABNRT0dfLr5bBzfpUHyag151cT7J08MEHpxwxIwSFEUs5qELIzqtGLBEGEEVjbhSMKmqj/nwYgEC3hWXEwhI4sERqkylBjqbawaezzJ5tnF+tKAsXWIdYGGkdAwCAdCNbkzNpaAuDK0JuH7e4QkQSBK2MBNLiDmPA5H9NNJ6mGLM2maYyx+RCgpq6VjmP9sV5tyjVK8rynzFBfes3mAC8ZY/lMJjQpijbnSIuq1JR6FYiIUnTOcYXUhQsSk7S2lqQKmlRCWUSstdn2KFen+55TJ/KV8YpSLVk1VCJMhUUuQntELRkArl/s+74Pnln6ENreazB2tVot6gZRLGHljITY+f7l96/3G+Oci0z3223fayvFkedO7Q3mwBJJQI+a5CXnOYTcj66IVpUcW4uycoqvEhGz9hsYjPYBmen4VhZM+c7EL6yfKcFYUl/mKiV153kOkohHDKHEnHTEkZiBJQ7KI5F6ZtJvDiEAs7W2qipjUMAgogHKg5NBa4gjNE29XDZI0rYtGbdYLBaLhfdRU0O7rnv37t1+vz/bXKxWq8VioTmiTdPEyCGE/X6/3W4RqG3bly9f3tzcVFXVNM16vV4ul5CSijV4qDQSY2TWOqk+B65iDBJiVVWcEAYAMNWWY4mUfDSVs1Xl6rohsnfv7hyZ84tNXTtYrAiA2hbQVJsr8J5DJyzsAbwXiAAIDGQtoED0gML+EGM0FgHOq1X9Yv3Ji08/UdbkHx7u7u5+9avfeO/fvHnz29/+9ptvvrl9d/Pw8LB7OBxrmidaBU05SBcMBTAKQT5n9PC4tFCzfsKmJ6ynREGYWYP6I9XLsnOsnXy0/OvJeWKhCKZFCc2OyiAiwFCKHWeyc/K5kw8AMKaaxUN8nATHh4iISHi6nAmxlZPPimNJk+VbmXTL6UHB4PLDc+CkOU+sEuCxkwyKwhtaJzrx5ZjdlEPkEEeKAubK4wFEgMhUVVVXTiKZ9ebf/Jv/4f/6f/4fKwfb+3cPt15ixxwEK4Hg+/jwsP36629+9cvf7Pf78/Mn7f4X19fX6pXv+/Ds2VPnHDPUzjBz27ZIYIyx9tjezVpLlDqr6ilaA0x2v9/f3r1rD733/vWrl1/+4IeI8tnnn15fX9fWhBA0PLjd3td1XfLuAb+Tx46I9AyPMmJdr7P0wfNra6muXAC2nY2dFxFSF3+5OzKEBx/b0/LmbL9En817VpwVOe4mnKpxWoiQIa8j7dTUNVMizJxC09dzq9ZBgRDRKgU6SRl/mXNK2x9yyVBMTzRGW4aarbUAI5tKr1wMIFsCCq5JpA5gCuFya+aUkscvqybkdyHxlkkoTET0OyVF6O9Uje1I1xMHfP4EJvEJhV9Min56JtX4luIkIRU5q5k8jTG+6HFc7mY5MiTNIIRQV1UeRxWLbAxM0CMvfDKyFOkeJR8eHHxps8ZYx3rkqcRDvZ+8sEekVbgpPCd7+j7ceuQiQmsQQhDfbWr7wZOL6826Yo/RGzHOWLamN9QHH7ogjGQPzhBWlrTIJAQV1662KzxvPb998Nvb7qHtodtbJy0sV0ArlsbF2sZ1bRrnDBqWYFBAYm3og6uNNK76/tXv3rzbEdHQqZ4Km5BEJKeei4gAA4hM0LrApcfWOyH2/OTEszC/L4WXIcN/gsD5v/l+iZDl6/mZ7H0o5zzHNJjt7+SV0gFaPjb0ZTqllmRDsURgBJ2Puo9ZtEtklnfAZjhAxupkOq9DZV1VUe1M5Uzt3KqxC2eXFZ2vmnVtIARDtrKIJCH4qqmiaMF6DhxXBp5ulrWrNPNTECkdb0GyYCD0LSKwMAmTgEnlbFrfG2OsrVxTu7oJIWgdjt4PhWqzKaWaydu3b+eeGgBQNRcRrbWqy+m5skl1Ga3uSETKaspDVlg0jcRxEpMOklWvgeEwZuNWRE9ChjyaGjCQalBRkaGg42sGkJ6oBADx0zSNUq6VPCrbyXl6et+RMcZ0XXdELRnCcTFGBjkcDn3f+8hd17VtzyDOuWdPr+vaAXMgJGhC7Lv97vb+7uGhr+qFc5UPHEEMOUCtS3SM9TEH7QonSTnMCJz5pKSjffpWTsRFO5BhjEZEAAdvmkg+Anf068VU7T9vTQZO7jCZvXjDV8bx4Sl1FDZhovfh9SEzC472Z0mqAINzXPk8gCpWAmg4JZoOOMPRIFRVBdKQ4RAZEUkgHfQDAAIia52A9rQ4M86hMWQJgIxxxpjlchlC2O129/f393e/Q8SnT59eXV2t12sRYZaqquq6Pjs7aw+9Xvv99uEhWmsfHpbL5TIfQXTOVpWNsVJK6fs+hOBDWfFoOE+UTURKedoiUleLpmmcM01Tr5ZNVVPTuLP18t/+5b9vFnVdu+Wi/uijj549e9o0zfn5uakrQEf1AgyC70K7ZxFjqD10VW1jDADsFrVKe6oqCRFiqzF/Y4yra7euny6unn78AQACRL/d3t3dvX756re//e233347uJ+Tj8FnAsuTBjCIeXclciwdvZJcMkq0Jc1n+3hCY/mxDBQp4gyRRzpEgS4jzMvjTHwY5VATGZDRd4KORKS1RvJjhegSPW7EqQg7FC4rKopoHR1Ow4pULT7qPcYMOUUThitJamM6EZQZKxaFBws5NSrgDmMRy0WqWEmupX9UX8wCey6AS2if/Eq5KQkNok7MpCuEENnHoN1yLabADqIJIWqcUDvkWtqEvjMgL1++vL+/r6wAdwa8tdB3fUTrnHt4uPurv/qPtzf3XeefP//g6vJ60TR3d3f397cxxrvbv/3jn/7k+vrqxYsP7+++wRR1BICBazNrzJCZs/KkF3Ow1m5W6/325Xq1+O//+//OuRoR69r1fR97sY4Q8XDYaQdbQy7vHaXubVycjkBES0ZSddAYuo8//vDqycWrm4fl6uJQeRH03lsyCEOTX9S8CWME0dKxGFImNM1PgEJK5YsTvUihkMHM8Mv7mIsQTAgh73uOvJWvI2K2chU/lb9QmfsxRtSSDzCzGmYwvhAxxiP9zqVLYkHHJagCgWgBMPfS1PyomLqv5u9mUp2Mk79W3EmhtjHcJl8vhVk2RcpBMnwyTDiVBBiIxaDWYyinhIjWmswWSoor+RuNWjWwuqLLccyAdREAnLMxRu+H8tkimqaeOY/EGELwxh4N5nJ12SuvQC7t6vzFsjZAycazEOFxca88eP6dmWHGvcieCEQGpTDLiBh7SK1QS5alsivTI6erxDcphILI0RsIY6kxqKosowdiIGNJYk10fba+Wjc1MPR+VdvGEgKH2A9dm0UkhN3h1pIslpUlYogxRgEkwkOIxrrm7MLd9+HN9vaw3XnPsv27W3N5tv7o6smL8+WTylWd9LWYSvb7A5DxJMY1i142Er9Ybc4C/I3n24d7qhwOdiGGECw5ZsGheRIhMqIhTJ1oC7ydM/CT/5xffCoIXAI2X/qtifAtJ5DfzQiQw0FzVJk7sCZTnXCVidiaL6r8uj7EYxVWZyhj70mejLr2WYIMrl49/sFEYAwaJIkBmEnYECHCmfHn58uzs1VTVbVzjTZQ4r4ichBiiI0lQosITFhVTjjUtjKIMcY2eohhYVAW9pMXV+b17duHQ22oqR0RGWv7oEch9PKD2jCcuCNrrVYuIDTKJJ1zVaj6vg8+EgkRIRCow5RD76PqOXomDWCUiBFjZB7VylLBtFqtFos6xF7pV+v1Z7LNPDxvB49rikzwSkQAxVgUJpVTWhCu6zwzW0vq6NR3c9FLAMhnKYnocDhQih/ms9mYTD41I5VFMHPXdUSkFsJ2u91sNkrOzjk1inzsdCHOOf0QCmhAsm3bwHo+E7RG5Xa77/v+4uJC3QJ1XTdNLZFZ3LJuYowgLZFBNGQELXMEIAKyXef7vu/7NoSgJyA0TmVc1lsG9q5sSvWZ8hghAHRd17gq80Nj0YBLYI/JepRyB7MGrvA0xlRVpavL1iAUfpm8oRMOkF0AmQnj0KThWKdaYRiLEkfMrCeijTEA7L1PVayzqjNUQjLGcGRh7ww655pq5Wtqu62rKIQAkYmMcxA8xyhROHUOZOZgTRVCkCjWViJora3rGgAWi0XTNPd328PhcHNzs91ul8vl+fn5ZnOm2aEA4Gyvy/Tep4TSdrvd3tzcKKyWy+VyudQAowKh6w8hLJi57/vD4UCEzarquk4Td/UoKwGqzYmITdMYiyi8WCwuLtZNZUPwMeLr798ai33f/eOvf3d1dWWtPTs7W5+fX1xcXFycr9frxaK2ywtAAQ7GNCCBGXvftvstopABbMPibMMxxBiMQWcIhIEjxAjRAyIgukX9dP3B048+/qM//xPw3opERMmpUnnXeepBH3gozbg8FoaQ4k1OXyyFfXkNdDWTDTAECE9IptIVVEr6+ZMlc5+LhFLByhPIeJ+XkIhqKCoz51x5tLz8NKxMTM7hHzgEQ2R2YVGIQi/VestA3ARKeZmTHzC74rgNxknlQE6d38h/Knd5uigAPfNaPqlg0RpKZXKCfjE7/PTsdd/5vuqBmaPXCL7bNMKMBCJire2CvHn16ne//d3D/e76+sXlk6dPnlwZ47z352dXt7fv7h9u37178+rlm4vzy9ev39ZVsu2Hz47UygLmw9YTIAGena2bpmqaxhinDVeYQ3fYdV23XDbr9dq5OkbvvdfiNCXyAHMsyIFS8juo85hwtWwuL86/efkabee9RwGjRU2AkQFFojr3RSTRhSQFN4vSDPCJJnQSN+ARtSlTd57tBJfmV/lMyRDmyJbx5NhCQQABDBIRSkwlGIoX4JEKEI9NZrau4TBZ8SeDODKY8WToQGtt549orFe/O9Y/80wyv8qy8OScMUnZMoEnv0VEgJB/l2ufsZHhgTjOJsLkxwU4+vglHceFZBRJ4R2Qou/ryWlPtlh/TwzRuWEMpwA798GVfHsywsldThwjiBZbGF3HeHJmVuX4GaTzPOEM6uym1Zs8H2e8uookHnYVyQdPLz+6PLPBdw/3Z5uFEQmhR2utc8bZdrcLnScyNh6MFdsQw8pUBlBClE6iMSb6eAhMVU2LVYf7B89k8J7N9mZ/+9Dfn28+uzyLq9q3wcihWRAQ9hCReNVUz+Jyhabuul9yVyEBAAsTgIghUAJjBIOAAMxAIlFSaaXHth4e5yr/f1x5QyX5iN//GBdBYB73E55gy8lxThqi8HgGBDwCBxGBGQvKtJNdMJmgcs4bIqpjCRJ5EgAKI0eM0ZJZVG5R1T95sT4/P99sNhI59L1IIEAgNhhBGIWYLVoUQ2AdWGsjMIfoY+hbiL4hQkfIiGfr3aE7dMFYQyAWiUVCCNblpPSBRQhHZq7rBacqkWoQKnXXdU1FPY/ssgnx2GYgWyDMnB1kNLQHHMR9LNqfZtUipzlkJSr7uEsKnVylbMoPMLMeKSSiqmq0CiIi6SdKJY1TWyBKV95ZZpbC4ciI0Rq1RjRmoiDCdNStbVtMyVyZzxsYIFxV1QA6FgBQdd/H2HWdj7FtWy1JEmNsmqqqqto5aw0ACKBBFGsNs8MKAFjQMpg4ZBgB2br23vu+r0MIwCHGGGOIMfbFmcAsCADgcDjofHKQsITGkJl1zA8YMlmMMSKjs1FqSUpRm0N96DnAk6Gqr1Tp/Gqeif639N/ppcaPth/TvDBOJVAHz8UQBIzJRhARYWQagtyMYGBmgCCic3bZVNYuRS7u7t957yUM3goENgYYiBmMOYajjTFA1lprjMsUrbRQVwv1BYQQDofDfr9/+/bder0+OztbLpfr9TrL0L7vtf9hjLHrtK1I773f7XZak8Zau1ov9N0EQ44xhq5/cnmuINVZGSTnnHOubXtryTnX1K5pKmupbdvDYee7AEDONhYtkdnvu75/uL/dknmn6OCcOTtfX19fXV8/Xa3r1WqxWNZ2tbYGABh8D10XY3/75q3C2TmHjjGiIldVVRltLGp1DgRmm3e95Ik4s6mO219gQMkoIekTPI6fqFdDRyvPnJxmzSmqNufdNEvphFPSIk+Jx5HDfJWukYmWAwWjSXOOZFx+hou0lvzWREc8qSoBDu0rsh4jIuq8RaSkPmNJ0ppOPVGeRIbjKBnaeczyTvmhFFE5Mt/5zsKYtk/+wDFlYiFEJwOqNVhCOD2gb6mfddDG1e/ojOu69ttvv7+9vT8/WzKDIRJhY11jlm/f/Oa7l69ttbi6fv7k7Im1Vdf6plkSUb1YXtVVjPGXv/zV+fn5YrGoqyQJhtwemYClXDoiarVr59xyuSSiu7u7+/t7Avbet20rInqKw1oHYEpVMsOTmYUZijN1R01CgIDX69Unn370X/7272LoOQRAC6B+6ax9DtMziJzIqkTm8ke57ycpdLJHJzGz3O5yQJhd2TGEY/WIik5QeXxOpUoyRWCyc8onS+QpdYISneYcoNw1OCLk8fhZGmRUTvDkKO8ZvARROTGepUWcJKLJ7pQwV3LgdKi4FOQAUKaSwrgaTeaZXETSiMwc+FBEyfRzGTJlpK6c84QVT4iFi/jnfF2TtcO4wioUShUU6MrpzM9kqAET0osiQ/BmPL0jseT5lyPnSc4Ng7QLR047AfhAvDKiEcvRWXh+fv7FB8+vlq67eStdGxoyqwoAkKhaNHXk3aGLvkOkGEPb7e7vKUpwda2pCiJiKwiRWVzdLF1TB5DIgBVxNIfgQ9cHf79t/fVmeb125wtzhlizRB8riIvG1Jtq4aro+41gu98GEC8MIkM0UBXAAdAEyPpfkVTicnY9xjreQxoTqny/EDlJ7DCOqL9H3LznK/P7f8gzj/1zjsqlMJ2Q87CW7LQmFCQaHIvafZ6RxQCsnLlYLzfrVWVulw5WFXKAbddH3xEZay0wi7AIAjKRQ2PREJOxAOx93/ccokUia0iI2DCEi/XKM3K1IoAQApOGVgY5qwYhF/SsJ5pijGY46DQkWJakgSluFqImFOSjUIm3mNSGnZmjRBEfo49RRHyMfQx9DLVwFBHmPgQ7xPOPBqeahVxkl+SoIyZLLJsENGSvWGWVWjvKWpeKVR6PZHPKvCgVzuQsO+5jCKMzxhCjepmIjrYlJPs2xrjb7SYssTI2h2GHPKMQvffayG7XdV3XMbNaCJA4SQbxwGfAkBEiqjWfX4CZjaMhy5MGkeqcIwEtNCKRYwxtCLpSrVzCA9qA9x2iGGNy+gkzhxAk+hn2DvIx/xMKGafR1CzX8t5lq5hTahIXB2tLwZT/K4WKkueQ9v2Y/5LF3/CAHPnDwPbFZE9Wpk1EPTArzBICe49EaAy9eP5BjFHTXfq+b9vW95GRfC/G1pQzY8lhIZs4nZiw1jb1UkScc2oQHg4Htfp2u11d14tmtVgsNpuNHkHUwjMajvZeg7q9wkrtvTdvdiGEs7NNyiatyABEPRbbS0r01a6XRLTZbBDRWbLWhtBv7+4P7Y45WONEBCL4PgaI0XOMMVo+HFqFXlXZh4f9N797CcBI8erqcrGs1+tmc7Zar1dnZ+uLiwu7vrw4XwOHsNsdDofDriWi2hlrrQQ2xhBZlqiYpiR1DHdMNuykQBUR4VH7qVL0ZmzIrJaIbIoR52eygpJHOGpFaZS57KFUthH+sGsuuqRQkSfmCiS+MNdRyjHL+1SUZS8fyCmd+XU9NzgZpCAZwlnkMDG+qf2m9Cyn1OVSkyuv0lNbyr7HRPvJQSY3yw0qVcCBWUvgmGTtUUUeNDkiQNSWKghDW6G4qBYHwVcv371+e/v5Zx8JGkDxMVaWXr9+/e2333ed//STz548ueIIPkq1WDjrmNmaCixdXl39/G/+6/evXn76+SeZKalKNERljg4CbTuVZbyKpCgiEfjQ93c3b+/v70VEue1yuVwshtJSKtiGCPlxu9OWFV6Vko687+qq+uLTT2pXidZpZAzBGyQSxQMcPAKklQJPpGbNSaxApPdFfsqdKv+bKWJyf77p+X5+US+TOh3nQaQ4f5j5gBRn20qvU/6r4LRVZp7/fML5T+Mnj36TfGdiNswxnE+lwCEiQJy8pf8tG9OX9+cgzexxMn7+Z8lwjvbM8ezlUYcunywHYWbFRJqVZ8zbMflnCc/yd5yd8terVAXKAKNJ9F4uJ290iSR5qhksJWSyGlfOcAIuHLSZo4sw3QFIXcjKqqEnEWmyxcaMfBMye31CX+C762dXnz67vlguViRVXfvYtbs2rFeuceCMIevq0CwXxlmDhm1ggEPXBsCq866uKmuMMf2hBzJk0VhsnF00dtt6gnBOm94aL3zLcr87vIz9x3D2ITVtH582lWMhiaYG2ziuqyounoLbb+8fDi0CGCQARE2a1YPogxJFQBowFISpMJ0D6jG4ze/noSaEDDMGcnJb50NlHHhsSo+9/hjfe+wVmWkU+btySsARESGVuKo4bEAAUChh48C52aDBGChES7y09smqud6szs/OLpe42WwWTXU4HCT67tB6AGPIkiEiS8aaylWOiABNZGDAwBIFjDHWWUaCELvoD7v9wtVPzjZQryWG7nBAV9ertZYuzNxFRMQoVOloLGWZWISSMkFp5luFQ+fxXFM0W0pHu4shF9lXpU514rY1muWY9zH/mKiRmZVl7atw6DBA1kJ9bpGd+yUaYww5gSCDWxwQgRmV/0PRaqL8qKbiZ/7DzMgc4ZhEQER6cgMAjLVcdI4thS8mi4uZQ++14sjhcHg4HHKMUcNEtXPr5TLGQDQ0dVDc0lOlMQqDCCMjERktLSdJxTXGODJkABE1hWqFICJq7Wg8KqbqrJjyrWJRdMd3XSGBRyIji+VSGPnUuLz04UrWopIcyTgAhYGAxWHCcg65euqQHjmK2R4lte4pHBESiAjUPhQRLec4NI/E4D1EBpHI4XAI3aEnCojx7OzMGNNU1WKxWC6XzAxCYOzD/SEyRjGaRG3QIgAzxNjnTyfNBABgtVoR0cXFRdu2h0O73+/btr29vX3j3y2Xy4uLi/Pz8+VyadOl+cZcnAdRaPT+ICL7/X6xWFhHsfV1Xa8XjffeGFQyMcZgquWrRMfB933/sL0PXXt2vn5+/fTQ7itjAcBZKyIg5Iyxxppl1fc9c4ieYwh938YYkOLrl69cZavKOmeso8Wi1sSEzUW1OVsNfRcXNvrQtu1h24bQK0rHEACgruvFYlFVjZ0wyvy7NJZKliowjTzoFdNh4pwleJJlZ+1hfmfAklN9z+bTmMx2IqXmdzKfyqbgOFx5jCjKTIHGIiEeCmWlJJhiITG/AoM1OLKusbCOEBFg6G+bzd2SXCcXIuqCJgCBgoxLpozpjLVkB88YbjATnzgTjfqj9AIc+c14C4hIIEo8BlISVpAGNJg1H4BFdCEYQui7IA0Yqm8fHl69fMNijK0FQ+TgI9493Ld91ywWF5dPyBhrTNMsm2bR933T1Kai1y+/axbVatXsD/dtt9+sFohIZDVpSoYTIrnhOyAQIIjojg9KubobQgh17S4uzkIIQ8mpRa3p5pGHI5faHoCZU+XxozJkiaQQG+p/4hiI7PMX182i3rehsrbzjJohCpgDhZDExgxVBib7GFa8ZytPbugESfKd/Fi5osnr+UUelxeffxTGpHdyYu+f6oQGoSDh+Yc0t3D2MBGR8LS4SDmrApOnYMSxVTYpTpMlIhdRKX2dioD8iS+mWupSKMRExPFE9gEU7JGKs7ghBPVBj5dwXFSeG6ezHHPaTxzjBJOEWc7ISbCXo8nsmr+IhaoxL/oyPDOOMSKiZoq+h4XOWRY+bofkpLhhJomd5o+W24qIF425XFZLh4Z9Uy/OL853yNuHm+3DbuPOF8aBIVM3m3P0XR9C7KANgX0QL13vxfW8XNTLmpiZSCCCYbg6W3724VOUl6/e8cL5ELwXaWsbnd0x9NvDtu0+XVTGw7nB2qCIUGWtsy6unvj4ypndLkgUdA6AgUVSKFUAICVsCwAhA58whN4Dn8mTv/f+3EU7IYf5X9/DN/5ARnHyizBjNZPR3v/PyVCl4MsII6ncLeKQKQralAAQJIr3KGFZ10/Xq6vN6ulmvVktF2tjbNUH6XoOHryPENkQdEkJq+u6qRtE1I6IIQiLIeOMrYioj8xtH70nYAQmYGMINYyDYUWk3jBmBhjqSKlroNTfJKnpIYScdpTrcqnrx1U2m1IazYiRRURbig9MDwYWZK3d7/f6cAihbVvmoMmHcUb4XKTM5MlA4l3ZYY3j9OYYPaLJ7A4AramsUdifSETXRNAySKjLLPWf44XDxEAP5qWwmLXWVJXez2cIh7IWyQaOMfaFzUxknSOtjdw0zbJpiHCxaDScOFm4iPig4gNAC0wwamq8thKxamEZQAEZWlkN26R1NXPwVq3xrutyCt6geNCI2GVWHRpg5LRtmuYk8utF44yYkjHKOGMuy4uJEM/mdBZT+ljm/3ysjwoAYMglohMwkk3yEIKEiBDARGbP4QDQE/Hh0BmDi7pWhc0YY01l6+bs7CxE2Leh6yMzOyIiK0U7pWzIafcETR9VQNV1s16v1TNy2Hcistvt2ratqmq1Wq3Xaw0VYlHq1hijX6+ay/1+3/c9IuiOdN2h3WkJ3HR6iwRYhmI1fee9F2ZEcc5cXpz94Aefff7ZZ8+uLvu+v7u9ffPmzdvXb25vb/vety2D1DFGY9BZIkI2pKokiPiuj75vtZMZ8Ov6TdNUjEHPTK4Wy7Ozs8vLi8vLy9VqZUxtrbUOPPi2bXdte/9uF2M81jSXQkHhcTGAkmNm/M4OHkjuE0y1tjJyiEiud1wKIVUISj3mOOAj7r3SIJmg7+TJUuZNJB8imhmVnlxmGvx9x/Pm1K5XqaPIiXzOWWChdF+li5lFRhBLizqudDJ/nB0RxKKY2AQy8+W8/5rPJA9Vsv4c66djRFdUQ9ZByIAIhcgAaIzhCH2IgcE457v+d9+8PHR+1VjhKEQhctNUTVNpH8L9YXtx/rRZLkIIzHG3e2i7fQQvEbvQ9bFHOzgmjiIh7QwfI0IKt6mqDcAx0mazQQJt3Knz1/QAYwxPnAUypLPlpSm4yx0REUSR6K+uLs/Xm5u712AaSEWYFGaSxlEylsLAkFQfdXIGrNy+uUKWrwnSzlE9oxCnMzyPkcNxgeP4UoldUDCHiQVbUkc5oIicLoOY5l+S2ARvJ1MlmlAZnHyrvDl/EuDYSmTyVixK6mVJeXJK2Qabz1NS+bujeMzS9JQzKG+Q/lAjfAhoz7r6Tgg/38/EOFmp/uCiSFU521g0hsFCAys3sWQF5eslAk9mkqdX4m255JP8f75N+iwUJPl7eZo+UKaZIGJZQ2gyWyKqquqT69XCmrjfI8LZxcXZYuOAOfpD3zcxBkBr3dJWslj0h/awOxy6nWcfI1OgaCNHNIyGqakIWVDCwtjl0/PNemmB+/bl7cPWA0DtXF1FwoOPvPdg0cZomX1tcFlbAMsxgrGOjHRGGIENAgMIC0gUMQgaDkyujZw4miLeWAjECdBKNH4MjHN6gcJXNd+g/K3HGNdknJOk/Xuvk2zqscdOUhYR4Xu9WqVBCABIKDTYhKIjqD+XPUbvDG7q6slqcbFarZ1ZgLQeDn0Xur7f79pDx15QUARQmGqsbLWom6ZumDkKA0AXmIGAKjQgALH3IQQOfrNa32zb/Xa/WmyI0Bg8eO+9r1YOAJC1MKnEEPjID1GLTwsCg2jXAYO2sG+H5MAYo/Uma//WUtM0ako5x0fWx8O2qrWjOagyFALtBx5VsDUcx6YyfWFS80t+mMplHzcLETVIyyzCaiBp166BI+UgFQDoacn8aRmCUWzQAgLDKIeccXAMAQAWNg8RudTEVY+EiUjshzqrXddp3qaahfqMOAAW5xyiLBaL2rkYhzOKElkiiwDQ0bF7bGKh1iBOwww6wyxi+q6VwqhTSlH9PAsUSJq2996ZJSSC0o3Lp/t032M8VuLJI+fl5zno5pZRk+GtsTjL1JG/on+dyEdJxj8WVS1KakVEHZKZUTmbCBTaARFFiBrugxiEA4JaEND3oT/0gHeIiGCqqjJV/ez6Q2NrYwwic5QYBQyDkKtdGVZFREKLiFpQR1eqFlTTNIvFYr2Kanhr+dC2bVXn0Tp2nLz/CjFrLZqgBBJCIAJrbdd1h+3DoMLh0RGpDV02ZwtFFTIAaN7ddG//3Xf/27/9X3/4g8+vr6+vr55+9tlHP/rh513rRcR33a9/+fV2u729vb2/fet9h4h14+raEbgQexGByBJ93/eH3YMxJopRD4NORtdVVdViWW9W64uLi9Vq1TR1VVXWNsaAVdoROdrxcyGRNwUAsplLRbZSRtaJ2SYiGpk5qU/kK2MhImq24YQpl2ylFGknJdnkRx5Z/1umqpZKzCNCSKTwgpRPzueTvpJv6/ZDho/eK4lQmR2MXh8u5mMFhRI+eEqq5eXgWN5DUuwm+1IO8tg1n9IEvOVHh+9CFD4yfSLSc5LqvZOk1IpIjIKgZ46VlaAh1wl8++13D/f7ZXMWGa1pPHeCcHt/c9j7Dz/4GNEwh647pGuHwMvV4rvvvr65ffujrz5fLpti9wepgFQIpygAUYuRAACiUT4J2ZhBMcbmcFBMXXeUl8FYXAEA47EBlG4bFH2QmJnIRZbrq6fPnj39ze9eESLzqIv6EaSJw853ORtsPI7T6t/zk5PNyvhQIskcaXXwCTmUY5ZLzoQvhV2UHyidQeWU5v6gR8jt+AoW12MLHC9WQ0kjkJYvT/A5ey7zK4PfegZPTLFuGDMNSeJwzgRwHJQrJ1DCagKHkjwzq6SC02blWylrvt1SKA2ltlTOTcY6+mMg5fHxgfn2lcApP5QxhGcpCfPFjjEZAEASMZbcDCAi2hKM5Twn8KfxQamCIRwhP0HpPPO8cBHRrlObBTl1ER2wbzuo3aJaLJfLfh92h0622zNzXq+WjkxlrCVziN7L7rA9AEgFgAKBOHResKockDVk0Tiq60X4+LrC8O9+cxv6GBEAxEXDIQqZXvDVoRUJDx790oTWNsC4p8Nut7tvfXswAtZQAAms/m+NCg62jQgICAswgnmEjU84zPtJsnys/OfJkTOPkrFNOB9/MuDJZ94zmZN35jOcPzClxEJdgQJXObHHTAsAanEjEKoXbxAxHI2IJVw2zflquW6aCpj7zkfv7dK33WF/6HaH0LYQQ20ILF6ena3Wi81ytagaQxTVbiDxxkVh7pk9h9i3bav82TrDEkLoF1W9qJtoTNy33nv0Qz+0ITXZGD1MaIyNcehqm5O2iDSekDUQxuSaD9FTShpUmlJzyBjMEOAoOWWxhIzCYXBcFklYpczKdJoNuRLUIpIiV5LYp9EsQkRCJC2gKyJd12ERmcmjNU0jhZ5zZM5DQumIOUvyUAOApNLo+opJ3EMt1dLvlg/yqQ1AQysFQMTaVdkg7PuOiCQeuylKqvugBrmIRBAOwLnwnh0cxKzHagwYPIY6OdVlVVjlItXlUU8ZQlt17SwUEiTvUdIDg0jDxVmPXAdIx8/8MO+UziEf+3QpoFICWUT07GX+U95WZg3wpvzSVHg/G7RyDFmLiIQYiIZMUUiOcvXyxyzjki9GBBQa2vldtQ9EjIAvX768vHq2WJ2z2PbgQwhCBsFk7C3ll4jkut/5/pB+XFvNBM42ZIxRq0tI6mWvfUd0+VFa5wb1T1+M0Xf7AxGRyR40cGaogXT/cLOom6qqnDOrRY2Ir199/803v/sP//7fbjabyrnrq6dPnz598eLFj3/41fn5+V/8sz8Vkb7v7+5v3r59+/bt67dv32y3264/xOiNMctlo2VLvSdmNlwhowD4ELtD/3DXxvhORKrKIiKyKN5eXl4+efJktVrlCGHUPcvbPGP0g3yNPPUoZzzIyFHqhaWroOQg2TM0UR3ml85kEvkZ5lQcdJnMarLr+XrENB0JrWI0RhkpjuX4eT4wkm3Hb8mRKlgTKgCOQEtcVbCgwzwaFxGD8q+TmeTfEwjkSxEdZpK7ZAEn77/nscmnj/QPIoV2SERaiCWT9FhLG3TcGDSzFIXx7e3Ndr+75jMRIWspUl1X2+22bfuqqY2l29vbvn+ji6prZ51pmurNmzcxhk8/+xiQCQ0iQZHPmU9QDCffmfUWkSUiAux8EA5AVkQ4clVVVVV13cEYU9dOc2z61K4tnx4ZEIljBgKnFhDKSlRAEjKiubi4uL6+NsbAEBsZYDFgadrpEp6TrS8xs0A2yNsy2eUZJos8TmJ8qtKDXqWBkWclhY1aYuYEjfPDE54AhcyezAlPnSXLSxYRBIOnaicyMxQtNDP5WzPhY9OR08PDwgVOIDwWET8YQ7Xkb/mj5T+h2BopMlXyBEoOdiSlxCJsap9Q8gRKJYiPrKZQCyYF4iRpAyWES6TKd8olTyaZubedGbpYqP4lbEs5UvJbGRu3k32kpCiUL5Ywn1wn4T/xU0z2Iv+TNfQAAABVVQ2cigdXtzGmruv2cLvcnLvKtdv92zevahICYUFj3O6wP8SOjFksFpUhW1crY87A9AEedp0EiagZAggs3eFQm0VtTGTuD1sx5nKzWP3o86/Nq3/83at3rzroHprVVVOvve/avvXsWbo9izlYc6iWXUuBdw/bhx30XQ8AjgwLMjOJcloaDlcmVBYQFpmj/4S+TkJmsinvuUqWPpcLcsrUnLAymDGrySAnv4szsTW/c/LFOfZOOMzxMRnh7YBmJIgISbgryEDEGNOQWS4Wi6axBtmHvm/Z0Nfv3vV97w979r0FWVa2WS2buj4/f7Ja1IvFylmrWSAATGjQOgwhShd877u+970IOufaruMQK2vX63XTNIcAMUYU0AqHTdM4a9VEUYNQCVrbrKtpMfBhOLba03ieHm0wdmAUMcYQfG7XpGf5suEFKcYeUsf6uq6JnMm9T4ujQ5LCiRm8pcjI/0yboiwrH+6yMYqw0jKBUDYqyv9mg1DN19JhOrDQCDycGxwpmRPZpK/rgTpIJrQuU11UGibSgiLB+5zxaFITCCI1AJwIO2NHcygqS3MEAAiawcuRiKqqMqnSVYgxstfKqMPaKpc2gvKsmFkt87xNOgfnnMixaFkp4MKxL+ioWIbqZvloKCQ7P6Tej1jIdyLq2lYRJrcEV9S6uLjIBmq2YEUkRv3ukFcJMEquOfoamDXUBIIAZAqDMKOljmmMsQhMjmMgAkQHAJKae8cYD4cDt501/uLJ0+VyaR1w3PsAwogE2fDTHF1m5ghl+ozOKK9R/6rNVPKOm9S8pE6pqvv9XnfEGte2bYxRq848PNwhYuMqlgCsFW4CEVXWVVWFiIKyWCwk8qHdSeyapjGGnDMXT84WdXM4HF69+v7bb3/3n/8D/29P/tIYs1mdP3v27OOPP/zggw+ePXv646++rCorIt9998133333zbdf393d3N9tQwgAYq21WHHkyF43BwAJDSB0ux4RAYWZ9/v+7m739dffYel2xaGHOBmDRKThSCU3AGAejl4M+jWR9hUgA6itxoWRAFS1AgEEza31bacVlzKOZhcsnxT2GJRPAYxYvBJv6SmRpJVy4VTOFM7jlKTj4caZdpIGzLy95BojLjNJT4dxvQf9kYNLiGjIHU/rHovXsbCW5QIA0PJZkJS5zKrMsbVrVLW5hEYm1PxKnkk5CIzTAMqZQ8E39be1Vs2YkVD8AyzGDP+c/x08i3byMXWECBIBB9bZSWAfAMBakNgjRBC4vb+7uLhgsnfbwy9//Q8//cmX+7uHBUjs7k2EH3z86S9+8ctf/PV/+dlP/4SQWr+72Gw4gquMc+7Xf//rmzd3/+xP/+XT84/WzZP9/o1FMmTUXS4sQENVQ0SBqMLEIGh1b2DgyhkUYB80XST2mkdUIRhCE0GYj2WpWDTYBfr/IKgE0fftwG2RfYhKLyJS18sQhLv+pz/++P/5/7ir19i42Hshh3pOm8gSEDBCJARCjKWOkhHv5F4AQIxH1zWmjGI8Vuca7ziJdoac41KOhZaadImHACO+CakTA43b8XHO80Bgrd6DQNboiGptJfITIMwh5Ql2UTrNUnpGjDEAhSflONtROKLE+ahHBEAo5e1EZmZvreXIxfMgAsyMY/LJU+IxURx/FB5xKa5SNckcT4mCgEBAopaARBTkyC6ddRl2CkD9oCF4KNTuPFrObymxglI57/zdvJaS8ZbCGPA4+ViUDi8NwhIl+r5XRpGk+FALofwWp6P2JQ6X8OGiT+CY3yKBERDFYD1fiwB6/ANmzAeAgLwIlXw4z1aOLQePja044pBPx4KAMNRw4r7vh7o+BBpqQYpdv2vFh5UYQLeoF+sNWLdv9z1LF7mPXJFt993u5mFxdrZ0VfT8ZLM0cI58ePPm1Xbf9tYG2HhYnp9f3INrO3GVayrTVAYg+ND9D//sq/8PP1TcbTt+u33tlnaxevL2XVg25w9BDj4cOn53v79aLxxJbOm/3vWxugghcEzegYoYojEg2mNHKQsAASyA6tnMmUuglvfLyJBwOf9+tG1DhnD5A8f2dkm589dlXFnqBDWN70+Qp8SxyVt5MiUmlIOXT+qVPHoy1I4ZH1Iti8OVRsgm7plMD9SSBDSCzjHViK7lq/X6Ka1qdn2EHXX3h8Pt7u63/Rfdw73pu4vKPqkDOL5q8PLp8uyyWtS1rdBTYAF0VkQii4Gulw6ij6Hvfdt3oWqaZnl29+b25qE9v/5oubmMaDoWWqw6RmTxkSX03rIee7M0ZHKyRXYcQtBer3VdV1XlvV9QwzyEhvQVAuz7ngiYuetb7zsAcM5YcrtDp5DUEo673c57DwC3d+/2+30IoarsctlYW3nvm2bZ90NOXfDeWkvWIrP3XTIRMUZvyaBUEoNABCGdg4iw5B7eyKB8z2qKLiUzlqTW3dTSknk327ZXDd4Yg6gVTcg5EhoiexK0Gg1mVJligoghskiIKCzB98wMIvq6dp87HA7KJ30Mxhg0tHROTQtjTGWtCFjrgKjteyCSdFham0ACAGMUEQZGCxVaRIzs/b4zxujgCl5lj3Vd+4cWEa213vu994MeK2IQIzMBVHUNKdCHRfscLuJ1+SYiGlPlm8YY7ry+3iVBo1EvZmZQGZGziggArakEhgYMmMNoRNp/RwMAOrLCLSnzUSR63w88GcVVGCMgAYTBcI1RmNm4KAAh9ABgxRpDBEiGBAgiDwuPEZgAmr7zLLu6rk1lQggxijHOGIpBSHD3sD9b7zdn5031ZLc7dL3mytbKUUSi/jAWzNCaMoYYCK0xBkBiDCoshcRUVBnnYjoKByFKC2AsgrOCC3S2CoGYQ/RVY5xKYZXRAoNGrYwXABQ/FZHqeiPRGqKL1cZYDH3X7Ti0iOy2Dx0zkzWuXtiVbTly8K9uHn759dfVz/+/y+WyruvVavHRRx999NGHL168+OSHX/3sn/1FCOH+/vbm5ub7779/9erVzev7rut0Z1l1J4EQwmKx0iRYRAKBQ+eh88aYY5guXyWHncTf8JSnXEcoY+tYqEHOOcZR35gjjz7lGkTE1JJtEglBAJxM7z0SBQuVt7wZeZT6VTL9ORAmV56VzMqa41jDzpPPoMhVCrN4mzw8WUse/7GFvP+OThILL/vkgRKwcvSXy2RiOPPvwhjUEz2v5LYwxpDJnTyCOjT7vieitm1fv357d3dnmH0Pxrjl0lxfX//iF7/4+c9/jmD+5E/+9OnTq8OhXa6WNzc3b968efPm9dXVk5/85CfnF5u7u7shEDvDh2ToYrlwIoS0TQyiPabyQkSrdxCqt061Dc8dIpqkRysEQCRGAhBEA4CERDaZcGAMIRl3cXF5tjm73/UIltOHQAgQEPU0gcIFf+/mlpd5pOVaKHqBHgdJ1clOIkN+fkx07/MITEjm5PNzTjJiAjJ67OQI5VBH0M35xtQeUELF7OXJ4n/yrdF68TgajGkZTnGbEvPnXKuEzGRp5Q8s7KUJnE8iA55qq3Niu4vrMZ4GR9IYLjUUM5TynwaOVDjCSv5GRR5m/hOOnQslTOY7OPxpZJwc4zYTaBxHQ5wQdTHtEU+WwaIo+HbRrMEYo8ka6h3AFH/wPnZdZ021tNVyuVytVpYgxO5u23EMbIlD2O/3D2hwAc4YRFg06/Ozy67zIbztQzBtL0JN7YPhYG0j0dkajKmsdRU9qZt/9bOfreyv//rvv/UVeB+6+/uz5YKBQDgE3vZe7v328GAghL4PtJnY+ZADEWOQpr9O62DDGLVmaPZ7CHDy4mPXHOHnMqic0iRinBnRhFnNCbAc/LEplXg4mV65rnKSJSfE4urBCJgAGvhlhICCwLioa+cQLRx833ftzu9vd3f3+7av9ijekhBIZd3FZnl+frZYLFQfYGaQQSEOIYTeM0jbtof9HgCccwjGc7y7v+/6vq7r5WbtnItETeM4ChIhEAPkkA4zW5sqQuEQACQyWVppdyUAKk86aH6jslZjzGKxiNHHGCF1MtQgyW6303ZtAJDbMnGqKskpDwgLC6TvexTRkjOS8ik41fATYGeHbu/pr0REIKPIc2QNghHAQL5zGTSE94smW/rDVC4HrHI4EQpCKPl2+V9JgSk9P3Y4HBRFdaiqqlQTds6p/bNcLpum0YePM48xu8Y4HSfRwTOUFMFSyZ/jEbuhommq2Kn2lYYBhyzfBPa8LmOMOjIG0TcuhZgBlWmBmY0+TJhT9nQo51yUod2FJspqVqQzVjsH5iWoQdj3vXMmZ5bmr0yqc6tvvaR6KRw9ao5SEsG5dIIcE5IjMMTgo29j9L7vXcXMbAaupbrQICwOh8Pt7S2SWazOV+u17fqu68SMlqkBcEwZKyKCcOzAAQCUOqZgEUwClCGmmmKYRFRVFaJjhyHYjIcioj5HjSEnRM3h0yFpDYEZkcTy4H/wIBFgSMJS3FBQrNYLfavrut1u9+pV/7vf/a5pau/92dnZ9bOr6+vrZ8+efvDBBz/72c82m023C7e3t99///1333335s2b+/t7PS15f3+v69JaOMys+3uscJDldIlAKSp4FJB5h2aCZyqi9IGqqhVHS0SkonvyVE7gUYGTwqaaM+4Sz0qMzzuU75eDzE3ccvC5vMHifsksoLjmcq6ceWYuBVWMyiFM3prLdRjzr/dDo/znSSjN5WvpVZqsq4TtfI0wKFLHAEiOT+ozJeM4CTpdlQJnuV5F33377bd3d3fPnqyY+6qq2OGLF89/+MMfEtnd/uHv//7v6nqh4fibmxvv/fX11eeff3719AkzN03DHEo0GP6LqNkgQ+EyLlF3fpxPGcGQAo2MQIUT0ZAIRJHIkVJz3kFcabknRILc9Jx774WJAZ5dvfjgg4/f/M2vmuVGCLaHvYz6brNABOFskEihD52E/2TaY3o8cRYxkQFO7kMRVZ5s+vzJCQaWMia/PvnvfIYww8z8ZH6Ri2zD+WgnIVA+kHFSg0v5c5lNz8lh+BycILT5hKEgqMkCSzCWb00M0fKaPIwjZXT6ZDmHEkSTEeYzP80NUqo/jmuTDt7NGR8uFZqMmVwUOSgHJyJN0cFC4k742/uvtMAogpObeSFY2PATblkudoqBOKpnNCxweOY4SGTo2mhdWBtLRLUzxLUP9bKpbSBXOWttCOFhuzWA6+XGsF3WFs+k23fbh13f7do+Coa299YKIkYhILGW6qWtLJ27swt01ZfYsP2vv/z61YPYGlik58AIkSAI+xAeIhtgZgg05KplFb+kwbycIkg+yts8KQUmIIdTKHoStWC8j+WfJiwl3y9FzASHJ+PMuUeJPBOWiHNFYiy23sOdHlvRnAkjYm8aAIggyGIkorAVJJHlcglW7g73h8Nh2+06Dj3HniHK3mBA8eJbg82yqdeLxjkXfZDIPgZrKlWavfeH9tD3/aHd+a4nIkLrY2i74CV475t6sV6vrbWM2NQNCPrABqqY2lMlHSN5Kg0RkXMOhoZPBADqeM1NHdRYij445zQpMgxFNA6Hw8F33Xbfcsoq1NKaoGl71mpe3FAe09q6rrUgvoJOFQPvPTBrxko2XcAklQDZ2TqnGlprDSMz50QSAAHQYwKoxwvLHSlxA5IghoLTioiNVUbI8pVciyUzsSFoE4a2EzknNjemS8Acyk0px9MqnXVdn5+f13V9d3fX9z0i6ulBTkVHsmmXjg6ldoXJJtTPKRroP7WCqIbaAIbERQV4jFHZjk5V46tUnNwuWTSlthClmJakla2qJoTAeOQniKjHpwUhb33ueBF6n5KkpFxXjLGuXVVVpUEOQ5ux0gk72jKYMxYZUWtiywQpIwbx2AoFkyclsjjniHRWWiQCvPcPDw9kKkHXLFY6N7B1QevHTKIBmEVajE6yDK7mhyXNPPIkj0DQEIA1RaUSwAHtU5UJ1lrFGYA6avkAS4g+GIPWWkHIi1XDO++dtWRMHUK4v7+vqurdzZvbu3e//vWvNXv57Ozs7Ozss4++eP78+dOn1z/60Y/qur65uXn9+nUI4e///u/3u/bh4aFtWw0IK8CHE6Ll9uSVnJTZE5shv1V6dMr/cqqakvf4iBwFoz8iBIJA2WYaMrqUNzJt5/lMljChB0ye7JO6AhbycqKyEI4snMnzp66paiuFoTUXe5M75YcmEquUhfPRHrvmU52vhYqOQJOZ4ExqzjWDTD/lmJmiSqKKMQpNz0xKqt2yXC4fbg6vXr26v9t+8uIydB0iAvD5+fmf/dmffv755y9fvvzmm++616/Pz8+32+1yuXzx4qMvvvji2fOn3vuuO1xcXGy3W23BdJyqkIAaSEa7tw3/BEOEICAIUYp4iwzK4gRtMrsU0eawApJPwKJzVqICcIg7iCAASfQCCFEuL59++tGn//k//dKsXGUJpAcIg/8eGZAFRIAJq/kO4rgq43x/S28LJsW93CNIxxbLok2S3AFyyugqt3XyuQmXmJDw/L/lxPLr+evzFUGB8793SnOamlwnxY8kC+M9o50cdsJASvYyn+SchOdD6TU/aaMXF6mb5VsTKJXM4T2znX9XnzfjvhQqvsp3s/Slsdeg5IflNCZfzPLm925W+dfS4Dm5BERt2XI6JFt+5eQgkwEHMk8vDkdrwPSBa+Q+xMPh0HWNNbCoK4yh7/YhWmYmoQPZGLEPcLZ5UpGr3bJp1otq1bYhRIkHb2y7bOqqsmSNEDBEImsqXItEiZ9fPml+UnNA/vXvbru+ZRZyYKypXQTqJQJGEkYS6EZzngiRU+JphAAnYY7FYb85HyhHhjHyTJ6XUybZ/M7892R6J7dpsq3v+X3yczQ+wAwFsZSs6eR8yqs3DQGABJSIEi2wA3EEvX8I3njv94euDSzGGts4RygBQk+xdZUsHC1q46w1SMLIomjnY+QY42631e5n+uXYx67b9yEAGqoWxjixw4mbiGyQCZ0xWNs6cOn/KpqyHlvaDlKs2OJez8arzRZ9YOb9frvdPbRt2/f6v15ijDKiLDVRtAkbEWnRQr3TNI1zrm33WXkdojqI1lKuuoGIbKKaNEhC2JVydgi8MCa8MoiGUIwxymhzMYK8ZdmYzJkOkk4pM3OEYyCrZFNaJXKOaTE5edUaVENIjTRIljARpdxU1G4Q2j4kLySmTuWcym9mdCodZCcXkiMHaoDVdZ0tLj0xqD90C/R+7m8RY8wRwsl/YazGZEGDAjmpVZKmaq31KR3XOaebq+A67PYCwxxyuU5JETyNPpVkxTwUaU/3KRuuuikqh0s7NjvqMskTkuBwAh9wmLxzDp0D7EIIQGCMQ0S1vgkxBCYrIcbtdtv5uFgeVqt1VVVFZjilIOEQXAUA4akhMGF3OgeBWFUVACAc1fsYIwADG+08l2GudXEnEvDIGJFRK0QnEkigiAAWMNue2qVzOAOpD2jRGkTIlTuTa6k/HA673e7du3f/+a/+y2azsdaen59/9tnnT58+vbi4+PDDD3/84x/3fb992N/c3Lx8+fLly5dv3rzZ7/c2w6Y0lsp5T3JEZXy2JIOJZimXmCOBqchS+UrJgkdCCBHSi3BKovyBlzqtaVY5txSWJcHMpcvwXxhJi/c/nO9gYfdmCOcRSoVPeU0Jz5KnwEzZmsxhMquT4Cphnj8xMSHKrTw5Ds503AznyepgzIPKm8P4WQUZYIsiose1+xjevn376u2bf2K/5IP0gfvoq6o6O19ba5fLxdOnT/f7VpngBx98sFgsAMAYrKpF3/f7/S7zu/F2jDwRgzmnMwLRVl3F6hCS4BwOR0UWGLqh+dCJSO71RJriAgQsIprQSJRPQ6v3DowP3DTrD158xAxd68VUVo+nwtF+PNkSfRJxnV8lYuSNoxTSzMiTBplag1AQ4x9ylaOVQD4OVUQYShTNEacjGgxAHmHOZD4TdjG5WX56YjhNns/THrlmkjMysxqAaZXRkl7+QBCVqygpKysuxwnMVOpydXrFVKShvM+pok85yRLscyDMmcZk7ZJ0kdJVXL6rgt+mQ9FZdNFQLeD0Ndn3zBsnEy52kxEB5fhY4jAyOd427OnUnzAVupiY/wzJWeB4K8+QAYARIA55WYR9lOAwCu679nDYny3rxpqKiPtuv931bWds1TRrH7iPHNrOGWPQLevVenWx7+Pu9m6/7w5dOD8/d3XViITIre8rj6aqwu7u6Xpzt+/7ZfXnP/1RF+N//G+/M+QdmYjCiETOMwQG1fAXRduxyUZnmhqT0pRIT5LP5K+/lyHMX5+IjMe4Sin7SjwsB8xLkLG8Lqnp/dN77LsT4EiRQv+HjCkiPVkjgsKO2TBYYItMAHcPD2AAycXK2dpGMDFi7MXhHkK3sHJ9sf7w+vLJ2dlSa7yTAUIADCHs20Pfdrvd7nDYQwJ+6H3bd8xim9pWTWWl72J76Gnlo9jY9x5ZkJJif+SinItSxrzS0RJ4uIbW2CEEibzd3ndd50PPzCIRUaqqqp0h2+S3tKqKkrxGzBaLxWq1wnSWzFqrLdryZsUYCcA5kwNlADlDlY3FGPZ6uJFSdhwAgORWroIoZuithyLCxdlOTGpMoUbLZJmx70trML9YSiJ9PqZGO2oNalRQQaRMOIRgrW2aJmvL2ppivV7XdZ2tWV21lgzB5GvLfsNSTklRxnzYiFT9JdsqOV0Qx9Wb85KhkDWUqk9nVSc/plHEvHzMZwhTkCrPXH9nwxXHxXuePHkiQ4jumE0ahx7rYU7FJSsmoqyB4NFKN4iYq+PEGEEocwlKtd/1WxKjscdSIHlPNeIdYwwhCiM59DHWiMZYZj4cDpFRBKp6gVaBOZgGatgPX0nOQClUZU7TEBHOx+MlJJEn431hg1aXm2UoGUhM5giQkhcxBwBkDiLgfRfZA7Crhp7DgqyeHRnK9RtEEuFU73fYOx86DR2HoO0xSZHq6uqKiO7v7zU2yMwhxLOzs+vr6xcvXnzxxRcfffTRJ598IiLq+7Dlhk14XylQ55g317xLJCjfzeNPUCSPWVI44nCumMaHUpgZZs2jYSz+y+cnVyZ1NKeroo0mUH4iaY15+ZNP5FfSM7kALpXLz5Rcvjj552TkOXwmD09AMf+rjG378v4EVomuRsUkJuPjKWFcwmTCDspvHT1ArMf3S20YAbBtW03T3+7233//MsZoyIEE5tC2QUSMpfOLzeXlJTPrKfnVahVjDMHrvHA4qquTJADWKesniFLhr8H+0vb0kksbl1sgKb8lL1lEjPopwAAI4GSlUYbabkDAZNzwFmPgLYLzgSvyH334Yr1aBGYiTesh0f5KMDQtLeEpM2Vojgb5T/mxhHjTPRr+K0JkJkOV4aD5JacUu4loec/cJjg8fzJzBhijVulgej8cJuAquXliJtM4BhS2a/npk2Q1+SIW4RRIlsaEGWb6mqDQSW6pY6rHt5zMiGrGL2JKSSp9nCfhnyVrSpkecQZEZBkxAUkKZjaAJ1Sfn5nMP/+3xKWMWiXrK2VKuVN5KFHTh4TgqMSnhRzhPEybBHHqNMl7NGd66aHkFwBAyCs1ADGRP6hutGcBpnWFgKbrw6Hv1gtTGXp6dR65e3t373vf9cxgq4WnPtRhh4jNcrFYLJ48uexivL3fPWy7tutZ7pumXq6qqjaOsYtsAzAFxr5ZmKXQmbg/+vKTLsS//vV3rfTsBZjBWkKLgokDjXRZLC4YU8Fk78o9mvDqEmLwOMKfRLCTr5y8A4WHCx5hLJOHy98nd/P9Hz05/nzt73lmzhCCgIgYBmHGyIgiJGKIHTBhBBHlN4wcASIA75cOnj/ZfP7Rs09fXF2s6oUVZ0wIgcgKY4ji+9gHz8xa/Ui1bQGp64aMI1eBMRYpxH0ffB0jI0fv/3+s/WuPLUmSGIiZmbvH4zwy877q3a+Z6ebOUNxdiiMBxAC7ErDLD8uVsNB+WEBYSH+OX0RK3yRAXxcCRe6SnBkOOf2s6q6u7qp7696bNzPPKx7ubqYPFuHHI+Lk7RpAgcKtk3HieLibm5nb23pgNLYNUfCcFq46lwJHi9AyD72v9AGtXKIrSkhuaXB8rai2VuPZonOuKuxQfJu567rDATR4MEYvEovC1nW5XtdlWcpgP0WAIkn2A8NhRhT1dA3uMhmcgYDEY9zpOCsBAMn6YMOorWl1E0FJR3OO3rn/AMcQUADwY9FgBUWu/+c8bdAeY4TImjfYtq320NblpBxIVSHUR1dVlTYxTyVhjDFarzgphDkzxEwhnM1ZA1MRUT0EGimqqTHKupNNcDZsPkiMUZtanWsZjldS/CCjWUTUdM18PgAQhYkhn7m1Fq0hIiOEhFq/R7MZVQ3u+77v26TNZifgeZsGrMwOAhpruuZzyw+FnB4R0VhblkUM0IXWew8iAlpFn3sZchsRjIio96yqKrIFUWGsQzRd10GYqxvD9BhxlNuTdcOYoW3kcL4bo/QiLNqQsDCkoBhXEVCMIiqNySlIAgDalnBE18xAIFGExp6ykTkgSlE4a4zm8Q77SyLMIhyCKKk653AM9CVzLlOSavwosh1ORyJikKIqbeFijIFj07Zf/e53v/3qq3/1r/81M9d1/eGHH3722WdPnz61CUEpcxLmV864EXM3i2TbLDI97yeMOxstHwqzAyyj5EHxm+F9Gg0W12TMhSgwm1s+wvLP2YtEcofBHBqzV+c3JzHEGYUkmzpOj5z08+RSy7lV/oqLTGEpGUyWMJ1eLurp/Zg1Fc2fnEHjPXdyHFAUTWat2RJkbAAoPBboRAIgBtAChn3fff3114fDqS4MMTtn2rY1xlVVqZzaWrtZ1afT6XjaaTK3MXQ6nQBgtd52bZ9guMTbBNczrBZg1EfyIPU0TowRjWqzg7GAIYqIsIbSCZKgGICAaJhj5OjlZF0lYJj9J59++OLFszfvjmjRidXMKAEBQEEauibwhLIuAn95zfYXcV70KH8yx5AcZ/KvYErIF7FiiYoiAgtaS5ifY0s+Qlrp8o1LHJ4R6XLmM0yWx+Y5ncB5yQstMR8zzeR8Z4EkCYsg81s+Nv8ZrPJX6JUHOOCocc32brZrOfdYQnUGTMpEAZp28MvhgOegmnNXK8hyMmdTungopJsJLLmrcwkBIEE56zkAoH76CTY+su/MDDC3eYkI4tzTK3g276R5sogPDMAdA7NfFX7ljGXZVvRkXVjEVV0/ubpmhrvd8XBsT33smcpji3UtwEBgCrfdbnqO94dmd2j3+3g4dne7U7Wqi6IoCtt7oo43hewPt+vts2pVyPHho4+e/udlfWi73725x+BPfcQI1jpCy4CIJNznk09ombZ4ycmXJJPj80XSXl5L/JltLiyoKX/sMW52kbnlU6JFLYAZ8V781ezPxwh/9nwOjRn/PM+TPQgTM7KgECAAmkgIjoKAB2DPFHsXTCnWWbOx8MHzZz/69IPPXjx9sqlWVhA4Ro6RQwxRoOv6vu99H0c9SsurGOdsWZbGFl6oixJCFFJHnAMiRuTIzD7EHsgm25DiuEI7cnIQndeokqKKvLpQ51xVlCp9IoE2Huy6hiUUxgYe9LHkZEvVa2T0KlxdXaUMBc1xSo61oiii9yH0qkQNXrjIqZxGjJLyEonIOkqysl5ElHgyM2tEQFyUMJQxVCHfOGNMHKrscirLMeNyA6zGCfu2S77QQfofiUsrhSrotDdVVVWr1dqMNZZTbKQdS0YvmfNFjo1DStgQhppiTVM0JmdlaQZF15/7y9MkIHbQbJdvSZNMcDDGSBy0a90FIhIEGT2KwyvOHR/pGMXYM/6k6eV6aaIXItLKx9npPwF7egOODkNBNjRRfYkISYqisJ2FsVrygNjMSIOow8wAZC2CEDOvVmVVrcqyRFMiGusKJCsiaAsYowVTLqIwJvRImGOtNcZq/QilGsi80IOQScMMaUjFB4mUCHA4Ssb44aQQagDXsDokkYhAaVHGmKqqhAOAYQ6a167tWAAcx3RIgTo/U2R413WaPUhE6rll5nW9iTEq/zudTjrtEH3XdYUrVbf03r98+fLly5fMbHOsnSEQTAUCGFUduWS65kdCSWNkrZ2RdvcsQ2QyE4yyRYgBHjlR4FLAW35g5KdLEpty2iCiMFb4yakRzuLCPIaY4HwQpmfStJcHqqTg7NErOIMJTpXq9PPZe/NZQXZc5ZuFWSQbTcvl5+PgVAIY1pVX8crKzS+XM4PM7MqHTaa7NJkZWAYSmKMZAiAgeo6rwvm+efP23W5/dFcrg2ALq+HazIE5OmcQoW1Pzhki7SQTRESfadsWHxdWxpuIiGferHaQiQ4wmDDTnLVjIQwp+DI0XCFCBKKx9gZr0jYL6a99iCHGaGsuHAYWMPHmZrO9Wr1+9wBiyQAxaW1REU1bJIA5kHP6WgJ/BuQMYVgBkp4Z1zLBtNk4OarIlAZnW7n81WPokfAnMdx8RwAGHgtTJpNTRxpWRotjvrnp36UaMxCInHNW0zzzzYUJciqDPsMnEWCSeGbcZsSQSa2aNI2c/NMM00tzAKYDPj05EmMC4RgclhHp0gOZ8+f0QE6PsyWLDNkp6dU8trVITF6fVEnC971knWayw3iCVwkUMwbI4zULnZ19OIMROUFynHxEPOfH5pSbo5YSMi74+aSSzDA+CgIMhaZIn03kFrCIfdg3TUlRLGxXpvEra6wxZrPZkHFC7hTuuyA+sHT+EDtyQCVWtHJleXW9ff78adeHu4eXpxb2u9N6vV6tVq5EMhQEKOytLaQ7ApVPn111wXQ9/4O/98f7/d9QE6MPPYNEFGMRCAKwOQslS6VlSRqJXmZUT1nm8HQQuHjl2DvjAOkDZgfNcuT5zk7vz56fsYLlIOl8nz2/5JMJ8d4/YVgQdXpd/sGBNyIWwGrcARVskC3et0csjLG2MOh6MRzKEEswP/z45vufffzDTz/aFM5AMBAlgg+BjAssXe+bzrddF7q2a08ao1iW5WpV1nXtikrIcAAC6UJPZF1RGmsFyRgDMYbQG+EUXZLLAMysjXZo7Fmql0Y82qHOPjOzc2VdFiJF7zUcFGL0MdrIQqM/GkYaHDVM0WbcXdcVxVh919qqqlR1VElA1TbfdU0TVVUbMvRC1JRC3coUk6kKMCICTKSdkScjAGib0JxVKotr21Z9Vnl1K0S01ibpHwBi1l0pxw2t6RJC6E6Nnlaq95osQr4syxR4qcmTCsa+71OGpP7QWpv0qxnHViYOozERxmWst9vCe2ttck4GZiCyYx0RHtMRdYHaS0AvM7Z8BADEPA/tzBLVb5mOJBltfH3baZ2bYXBnEVFTcoaXwsSYW9vCWEx9CNNZw2MfYBz7N+rCh9jgMUc9P7BGbJooun3oCM/8XJ9A0LZhGGL0Hnzfq7ZjiKzuOCMLkyFrC4jcei+CSGSM8zH2vnch1qtNWdamOBeVURBxhISEWju31cxvhZ4h55xGNVvSRoKRJWjKaPJ/pouIeJqIqBirbc/SqlNBIGPVtCoMHEKI7EWYiEIUrTKq/sbksfT9MIKxCFEF58EI0jRHGb1NQ1NEazUVK90EIA3P3mw2uukxRkPnZhNDR6lk8c2pRbd8jA8emwdmZetzpsljzl76cxiQRX3N6X6SISTTMKdH/oWzIYSAeP4zP5ZoGpWEmSamD/DUCwrT4yr9ankHEQ0OSACZAASXBNDEoTBT3mj0DSabVkIUnNr7ITsXcwjn9DODef4A0TwXy4x1mXLWkDY+v5mHZ+QKtqILImpswPLtM7gl1iljkvdsgc45QZ0PgwgMJaGEQbz3dV2TMYHj27dvf//7b773v/3PT7t7I9FZilHC0FFUAASBVRI0VCAic0xMkIjIpNhRzcTNikENAT1mVI1Q+7zlEm1aUMqE1vx7bVyGasMkQNFmg1H5YJTQ9R1ILIoCbcExomXnCArjxQcWkSaC/PCPvvfX/+GnT1983J56SbFqYLQksfC5pkXadxwbYMaxQS1mqV+Y6SFpNxEn/eguonf6PGPZs12WhYQ0+woykiQiHqPqcaqWz16aYG6MG46F7Mmcn+JUpr9o+FjOKqF6HHyEcygRUbIvpaFijGjm4mDiTkmNTEbWGCMuVKzE7tPu8BhWasaScenCUSJZ8pPx3wnVJ1jNOG1aWj5JyVRfmLKCZKGUsalxDsmkFuZcQoOXqjE8LEEp3z4aS+epHJbWlc8w805A/qvhnBvxMCGJwDmbehxNqZUAIHLUaPCcASa04bFRbaprP7Tq1k2mATcAIEhUP6HCIE27CewQHk6dBV49WR37ePtwcPZqW5cVGAG76fh6y3eHUx98APQm3u9u2cQnTioL1tonN1ch8OnUvX777tXrFuC2LOoQ4ViHm5urwHtri6Lw1Wpb1dahFBhuKvvR1Tr0u2MESxEMcIgRhYyyifOBlRAjxnMlgxkKLfENFj1F869y+k34IwudLdkaMokQEvyXaaWJHGBBtkv2cvFXMNjXLyS4JmzJc1Zns50tM/2Zy7uJlnP5ATJI2nBERgO2sA6xiAw9gIA1FXWh821nrCkJCvRPKvPxk6d/9vc+evHs6dPrKyMhtCF6hsgAxAyHw6npPBEhmKZpjoe9xEjW1nX95MkTV1RFUQmat/cPd3fvmp5XT555jm3bVjdr44raBheL2LV9GORvFRmNsaS5ZzGZkEy22Jg6suqdEPpj8MYYAWZm70OMg7znvT82nXNOW5bFobMc9H1/OBy0OKGIlGX5gx/8QJsuXF9fqzC5Wq1CCE3TADPiCgA0AFJEjCvquo4xhth37Vm27Ps+xN4552wJANZa5whRAMFaqy6RpM/AyLHzxgySqXD6AI4NFZLDLWGIflA9UBXCrusknBu1K+rhUIDADJZrQM0+K4pSUwcVRbuuS0Y9LRAqY0Kgcjx90hYFZ13mE18SEdUwq6rabDbJw9M1DRGpBJ64a1Lm06L0vTHG5H2CUdHVB8qy1PXGsd2ITs+S0bzQMdD4LEITkRAm6XeIjfTRusGgMCNeNUzoumjhWkg303brQkQw30QbbfCBxoKumsq4rmsOHhHLsgSO+/0exTtnHh4e6nVpjAEWEXGuNEbQGBOhrmv1lUGMMTKRNE338LB39aosS+0Rom9XOBdVqWdc2Xbr0QeuBTxT0xGDqIaAorQ82jR10xW8IhEBTVagm4jIOBnS/4b6tAqYge0gA4AhcsYiSt8Zay0gV6XzvgshCELSmEIILAwIxiKAxTHAGIDVGw+jjqfsOoSAMpzOWhfXex+jR7R9HxOL9qGPHJRM7JKx6lhJmk9HwrDHU6dWLjTIqPbkupk1RqaYsVTJEsYsD7AcsjC37mZgzQa5+GfC2scOhkcHkfkg+Xmc/2SkCs4hmR5evu7iOMtnZgu8CKL3fJuErfwByTTP2Q/TJkoeJ7bou5geVms9wHmTMygNaT94jgSmIfNYBGXo0iAiAlIUhfIFY0zfh9t3923Tky1iPAIAImhuAw5R+FJVRQih7U5qXHS2VOtg13UgpHHbMFZXkxiHqmI8TFtYRBBgKNk9gwBoeWiW6JWfogrNxhhnCgAQjpHF+xBZC20zkhSlK4oVAOjZYIwpi/Kh6wkd2GpVr9Ha58+frtZV05yQEKJo2DsAABlEQqChO/ZC+5rt12N4OKxOJm6ZdCFCarmRfpsOj4sv5azFXA6lZFvJoSdLD8z0LTMpcIEwF364nOdstO9CR7N1QdIDpi+CjH7zV+CoBCbVLkHGjEaidMgpqqSRE3vM7WWzycAiuDTjHhcIFrMeTfkkH2MOF+cwHMzZhuUDZrR2LjpKWW7hEiWWR8ny3/TwzA4IWb1WzNzU+kYyEgPjnFlpK7Mz2PNXIOJFYKjUEhEBwMxaWZxjR8/KTEQiBI9yEj4EXsVwjLGLvEVrnCmF6prLY2+Mb733sWstW5I+9MfmGECqarVZlfz05nTsgufudH/77kT27fVpc3W16cVdhcNqtTHbEg1H6USoFHhSF9//+AUj9fzu0EMDfURiMEJosmPl4kYsqfgiSsyuGTEuz81Ed7MBH6Po2Yf0ZG5wXP5wuRC65MnEzL4zuz817U3e/tgbcapwzhBpNrEaIhCZ8fsAJGgZjO+79WpN0fBxJxE+3Lg/+ejDH3380bMPtleb1aoyEKSPzjOf2r5pfM8saIx1p+a4u7/rmtYYY5179uLFdru92t7YwnmWw7E5tW1giMJCBtH4GIz3rijLsizRnGIAgjy2MMYUPDkAUAt3jYSMIwfLjD5DWTWI0YcQtLIFMyNLjIyIeb1NbUKgmkYI4eHh4eXLl865Tz755Pr6eqgKo7KftavVqrDW+05EtI2eaJqExoyQlEWdVJoQQoh9CKGFXotbxijWsrMAAKroFrZIu5OWLDLkdOn4uU7IQ5lTq0llAOeAiPNLx7XEGKOftK1Ko6mKotVEi6JI3sKiKNT8R2Mkpzpn6rpOknM6WxHRN01+QMhoyFBCc87pV6mg6KqqknKY/JY42vv0dTnOG0MzyOjJpToqnO2wZ614qKk+9akQESMggC5cdVoiwsDG0NKoBxmLwExjTy/iIa10gENmuJmbd3FUJdIgIQSDuF6vC4sc/GF/f9gdY0TnXNu2pZKCgIh0Xedcoaqgc4VxzopxDljEt633XoxNXMW5MjGZ8TJahwbGjizr9VotBW3b9m3btu3pdBKIdV1fXV09vb4qyzKZxsrShV5COMvtIgKQ2jLplkbIcgh1v4iQCIui2G63q7JkCYV12vql7bsU2mqMcYP9XI3CoFI5EbVtm/PtM4H4YAwhDiFaCQfSXqd5KiLZtBOYmcrSTqR34xh9zqO2kK6EduknOTtGHEp0LHd9xsqzX83jxGBQ5Cbxx/mp8B5xcIag6ebsseVXmLxIU2cdXFKQMknufKZeXOlsJu+Z9gyYSUS7+PxFhS2fWD7seyCQv3SInAaAheKdIE80WchCaJtL89pXHKc/QUDnnPc+RmuMO5zaV69e7Y6np1frIGeFNlGXiGjSoDGmLFeI2Hfh7u6uaZrnz59jtjad5kAASRsU1KKgImIcLjYIAcB7bzAxPkr4FkJUrZlF8+MRyCJJUdgY47HrEdEVZVWsuq5793A06zpE4yNiL0ju+umzq+vru4cDoAVgkahQRCFEQjJyKddutndnuE1xEs6kMTHqp8+IwFN0Sg/ELI80/5Dv9ezKX3F+7HFLf/r3Pcgvj8htaaq5ZyMnpYu/EhmtDhkc0lvyn+T85CLqziA2e2bmJcsXMnvRjB2lz8mZljNJZtYDIx9w5iJeAiFfwuzbNJmzVwcnBrIln8w/z6TzGaufQRKmGz2bUj4NGaUWIsqrDky2HiOAuTjIY9cSJRAxp684RoieH0aYVeVkooAAwk3gXd8XJ1iVxVUXqzKuCleU5ZrN6tS5U8Nd1/b9O/Hrde18h62NINbaqlpdbepPP/mQGfb749u9/+blu4emuTn5Q8+fQhO31kldiIdgELHgWBN89uJJH+Kpa8OhaTsfIwR0ZIiCzKCqV+L/7znj8itHiekPRaYGxJxL/EFo5+g9w/zH5rbkCX+QS7zn21wqna30PaPlT+YIvHzAxEiEgsISmFGcQUSIXBtnQ8S2vSL4oxfbP/vkoz/68MUHN9viSeWcs4ZEgAorGkUZuWm6cr0Vkd3D/t27+8LC0+vNalV/+OGHhavKsgQyzeG02x32u2Pb+z4woSVrvA98Oq3LqlitrSu57wKDyqxjPZJRGBsL68OEJwguOBgSIiKP9VS870LsRYSEGWwc60mmS4MY9edd171+/RoAtMiKOiISg3LOQVH0vR37Hw6xNgNekRhyaW4hhN6j914YNXrWmEjkCxfLsiyKEhEJJ43v077onWTLVs3KGOM5Jrap8oxqs1rBRcZ+g+nf2Idcn8SxxaL67larlaof6r/iLJESRntEik0dz2LELHAjZBVxMLNfaEhFHlqi0yisTQ0hkioOY8P3GKPeSYdIot9kvKPRZJmAlkABAFr8BjJOogshd06DTMmNAAA+ag4hTUM/RITH0utpa4wxImc9QnlDQo/x+EA898xQXfcc8Xf2aoIQUQjenzcuVmXJXvV5kahhIFC4qqpWm80NAHgfvQ/GGM1kdFU9gjGIYIySqoz2fW/MEGmSEFV1MMWWruv6tm2apmma3kcNmW6Ph+vr6+vr67qui6IwBlvx6q9Lnlj1puYhvme6y45jH4ElhD4CC6Kx1lq7Xq/XgYc+kLrR+q8aCHIsXa1WaTt4qIQUAIY+MSLi/dBIUwS991mXEdGUKN2RSaRQzgcVUdLZ8B5+mjNfzEJ3EsbHMbn/jFWDrDYfFhEBLjP6pBC+XyaYofXsKwDgLATrPQdPAgUtkvdgwl7nP0n4BNmWE1EySi1/dfHOe5Zw8Vp+JXKB9V98Yz7/XNBceo3SyAkCMWuPkZb8/i0gMAgMiIQCggAECFFYRCKgtbZt2zdv3h6Px+dPbiAQc8qflpFlGCW5vu+ZGxGJQZRIBpKIqAGZqPEeaHwfxlkQgKAMG8QoMIaQjW8ZdjzxTTmXaxPkwS8BmgfvCBGA0HtvbQkIbdMdms6Qs7ZyVdVHLuqVYXv/rvnqd5///Bdf3O/2neeytFpcHzCOLbIv5xDOJJvlli0p4tG9m2opy5FloRPmjy1xAKbYhTikRM6muiTz/IEcnZYLzOcwm0y6mf/2Aso99lj2QkxeKZ5MLB3eSfmRzDyRLl7k8ev9nDHmm3J+3eU8rglIccqCaOyblHaKp0aEGUrkWDFaKIevdL8S0PIN5ayPfH4/zSR9lVY9w8OLm5J2Ybbjcpac0usmMCEiRMBplVFEzAXci2xniaX5V4w4M6SpsSqphYIQkVgYmG3nEbks7KpqnXEA9qouihJXq3VVHelw7Pv+XQ9s4ipSxRJjlBgIuCB6er2W8MHhcGq63+978A9NE3jX+81aInsDLUFxVTsywN6zD2tnXlytT/0Tb03cN23TM0SAPvdezjY9v5Nh++Uj4zHEG2InMiYw4xWPEe9jLGWGGwlXL3KVfNj8+Rku5etNBJX+zGeSI+3F8fNVPPZn/urhX0QApIKsoShRgi8NUNdtHf3440//sz/67NOn2ysHqxLRIkIU72MIMUTvfetD23kgd2y6/X7/7mEXQrjebp88ubm52qxXW+U2TdPudvv98dR0bdOHPiBZU5Zl34bT6STObawjY6qqijIciBq9plff94BmiJqbUEfOhQblyhCxxNEH1Q+RacOaWQAi+8ieJeh/IfaRPSC7wgBA79s3b791hWEJn37yvdVqlfSlzBMy5P0yM8pYnDkCR4BRabHWWkchBGE8Ho8AEEIQicGz9965HhHLuqQxODZxQhlTVFQGgNEDRkQaVyRjLRz9lXNOi4LOdN0YI8SzEA8A6hVUx6A6LSeNCmBQxbuuSxppHLMoZ0x4YE3nYgSTmG3VSVKzB8rlyLHGScrhkrE0BjNrvmLShLUlHWTZ2vqn5ozhqBundJi0QWmqiCg0SSIYHkAEAFtAqmIyO2hSRuT00JnQWlpWjKnYzFlU1h9SFmyYFELr7Ha1ldh3hkCecGhvb9+0TVOva++DMVDX9dY65rHvhjGI2Pd914eqWltLiFJv1l0faOyTxFmnkK7rc9aR9pfG0Ji6rldVtdlsuq7rfXs6nQ6Hw7t37w6Hw36/V7Vwu12v1+sYi6IoTqeTxlTHOERvDrXlByvNAMAQmAgQsOva42nfNSfgYAy1lVYtKoqy0BhXZhbBrjsm5TBZMSRLkRjfSKrcuXpoiELCiCgILFEQ0BCIIIIwC7IgoAGS0T26PErTDj3GQ2eMOOXIzUyVIiLwyCGdjTYRJh7lxXOenvP9XM5IR9Rk2MV18eYMCCogzs6YGWTy+Rgzqe2eZjU7CC8exss/lwfbxQnnz89u5tCAbFtTyNns3zhWi07PJLZ7cdh8SheP6jkKobbKARJA0rAuBqC+78fKzjZEvr17OOxPzEOIthqWAM4Vtw6HAzPHyCJIRM6Wm83G2sLasfgVq4XmbJiYwUc/++ARkZVRik4SRaSua2AZIzcElI0iudIwBxyqi47yCsP+2BZlbUwpaKwxpigRiBl2h/brX/7+5z/7/Bc///zubn/79l6wuLmpT+0JAIjQgpFzKYsI2Ua/H0lwtC3N7uil+5j/FhERhMYo8RxLZyPk4+RWqNlW5jpnOmli1ifq/N7HKXGJSEtGlJ0ul0tW5Bh+EVb5i85oKRMUzd+Szzzhz4wGdTTO8pOXk8nXcpF48z2SURPTD+NX78OEGfFilrQ8g9gSo8b7y0lNoJdmNYPJ7L2PMajZnfTbBLd8wIRRomeVnA+UcSEMMrG1GRoc+MkGr0fuTCCbrTq/GJa9DM+lR5m0+TAyC/kegJyluigrW5bG1xVa6+p6va5XhbsngF0HpuVtG6sKLMUYeozsCEtn7POrU/fxsWvjm9tdI82ha6J/FaprsVgwFT74g0MxKEhYGfNiW4N9YcqKzC3DYdf7PvRBqscQO19j9uejCuHs+RGfJ6fqco8uovHsyoG8fNFFEn5sbhf/TJNZvlQ9JxfpOr+T0/jyZj49mR6dSCshAjREBg0RRPK96doVyfeeX//xJy9+9MmLT59vVg4dhapyHqKwcBDv+6bpT6fueGjatjfl6s3b29dv30TfP7tZP3/67MmTJ9fbtRCJyKnt7u93d/tD27Yxiu9DL2itLaoVhVPf9nw8Mtmu76/Xm2KMkTPG9H2vVTr6vh/KKg75b2foZctkcy6acubDGk6j0U4Ck1oyMCozekf1ihhj27avXr1qmqZt+mfPnj19+lSnxMxKXBp2OKgoPJb6I2GCdKYTkQGDekwNWhz3/dChru89APjoU/ynqme6LUluSVlLg4+xGFTitIqEJ8k6lmCisrqWDx3DDgeFUD2fiV2nCAvO8vTUgTPkSWY5+bmRncbkPYVAsuu1bZsQ1YwXagV2Ihkb2CZPg8nKCsgYWq9sM8fzhMapHA7m2mY22hnhaThEkitPAxZHVQZ4EVw6IyWYnhqzYyJtRNr39OrhgfGRtGsCgoU7Ho8o4e//6Z/+1//V//7J9frf/tv/5d/923/75Vdf7na7/X7ftm3LLQA4W5Zlvd1unSt97AHQGCNAQlAUBRmXKdogWbWhXH+GUXdNqXPW2sJaxTdXmLqut9ttdzo2TXN/f39/f//kyZNnz5589MGnRWHrut5sNsfj8Xg89r7t+94YgygJhmcmI+BcaQhC1zVNc9jtgINz1veFMWitNc6q772u67Is1usyhRCrR0RDPY/Ho2R1U9NCjDFt2zOzVsFRrCtLxxy0A6RkvY6NRZuDIOeGqQxOziWZGRfGP/03kRZkBwwzOzIJALnyiTjvmDsMO+Xz7zk28tNlMkL2bY6gOV7ClOPDFCnzDwIC0yX/wXMxgTRRxVKcSnOIi+T+5YTTsLKQepdrnz2QiwuztV8cLWcES+vs8ppBdTl/zqKF9U8CEUCAIZEZAFg4hKCNNYkKAbi7u7u9u0+xEETWGArBt23bdZ1A3O+OVVVtNtu6XltrhQc/XjLU5TwLOS9yQACAMum8NwMFInZNm+4QJb0SY+yYmSVooRoGjMwRcHv1nCMKOOuKyHT3bv/lV998883Lv/3FV69fv727e2ibvq5XrrhGPL69fdhsKjZAyADIIMIAEpkB4dFZLWFOWU7X7JF8hPNKZUgZmyF//qLlh/xJyJAHLmHp8vmLf2Z3/gA+z9548XWP0ZGIIE1mm54XEZgaLxP/eWxFS9rJ79A0LCd/jLLArYuLTYPkaky+rnwyytNTX8GcAJWtJwaLWZCnMv10Fp7txzhPa8Qs0mG26USUDBb5dsyOidkW5CPMGPISMikHRgadcFh4yk2C7F3vOR3yt8944AXgX8RPAABgEAQIgIzQAkGI9tRVtN8U1cpV69rXVVWW5Xq9ruu6PBxuW344RGsOIBJX1iHWtlhVdeEKt64++uDJQ3NsIbZv7nYt+I6/4NMLslL10ZgKQw3hel1fr1fI4aosnXMkGENgH4z3hyAPJsFgspw5bs/X8ei+zKAEMEf+2Za9Z6OXk1k+nGTQS1O9bAibPfb+TZ/Zo5eD58ggo4B+8XmYQlXfG7BiFiExIIYDxlByqB1/7/rq7//wsz/66Pm6oBXIqnTGWoYYtdV7EA4xxhh8ZEAhvL17+Pb1m93u9Ozp+sWLD54+fbKuK2fp0PYxxv1+//bdu4f9oQvS+ND33pQbMrYoirqWJkqI8XDYNW1bkKnXQ0yjplS1ba+uCR77mGksnK7COQvnvR672Msk5IElxBgBmAWEhsornKJvsmgIvaMw77ru7u6ubfr7+/umaZ4+fbper1WWGFIHUwmiOJZ/EwKB5CxKLAth6EwYAlsbfK8yrjBz27ZaG0MFBtUJE5fTLEEZM+hCCHaMmkv3JSt1kQRoGGsolnZwyCSQquaZ3Ia6ZPVGqgaohRyVJ+sqEhPOUW7An+ATallrgdCQEZHdYa/6p3POEAJhFAYBjEPn8TBtW69lhHRk9R/qmN73+hiPnlIai5nJqPTmTDuJo4kwtZVCKiKIOJyIw+ESz01HaBr7+l24cYqlzI+MnJs559THqU5mxTEE6Pu+KJ1Eefny5V//9V//p/+r/+Qv/uIv/ul/89/c7+7fvHnz+eef/+ynP//888/fvH4dPBtXVFUFY5UpJCI06g937lxRM6E0AOaK/YDVaI0xPnQKRuecxJjsTXVdV1XFddV13eFwOB6P+/3+cNjtH043N1fPnz+/urrS+kBd32jQKXMY5dJzNf5NVZVlKRza4yl4bpoOOHjvg/fqUQQaSowqHq7WFoSISP9M7S6rqsrRMq0lxmiMZ0ZjXNpxLRJJRFrgI7dNWHlEK8hxJYcgTc/v9Ntk/klbewb3yEzzjT+jw5z/Dh7CNPJ78Gz2TOLa+c35WbIQ+PAPBW4laOQEkEg9Hy1JYHFa8kEekZuXMJitKCeb94Pi4hKWz8ymvZxMkmhlDM7EVPx3ISWISHJ/L6WB/KVm7JNzeVZjbEDbttfrmojevXv37bff9n1PliQTvoeZELx48YKIrHU0psmr7BjH3kEXV5cvYZDgM/cAo8aOEiI2x5OeB9ZaRAM8+Hj77iQiWksTUOtHE4gJHrtO9sfj29uXv/3q1Rdf/Pb3X796uN8fQ41gNpvPyio0x33bHa2pb64tYBDuBSJzGEuuIi7q66YLsuiFHNkubgrAvD/B7BnIMHa5Xwn3cJoQn2aCCwXssUFmP7yEzJNh8yld/KFGhLwH25esZig8MpXtYowEl0BHl+PDZ288T/hSYHZiEfkOPob8FwG4fG8+yNJSkx7LyxXkS5ipXhP4XOIzOHU2wpTAZ5bdDPGmwBmLxywfm+FtvuP5GYFjg5i0IoGolD48OT6TZivnk36uAyCiVnVTN2O6osjSSajPsLJDwggUAJoIpvGlHG/KVUV2XVSGyBizqVfXm/XpdKDGHNs9wKkgInYlwrowDqGw1hbl1Xb16ccf9CQtcvf24dTDmwjdqfUPD8fQXFt4WlDpaF1aZ2xhDIl55mxfr/E6Fh7v4v6A53M2J5wlIi0pbr6+BVbnuz/bMplqgxe5Rz6HfDtgQRT5b3N8eGzOOVbkE4AFBUl25feXucfL+V8ceUbXHorAnpAREGLvpH+6qj7bXv/ZJ598/9n1tQEIbSWrwpke4rH34pvYR+99CNB57nvfdV3Xhm+/fd334fp6/cknn3300QfbVQHg27Y9nWLbtrv94XQ6HY/HY+sDmEhmtapVsamqaoum8eHU993pdE9GWwWq1Lher50rtUte0/aqrhCdW/4Q1ZiViRLVx/gssKW1M0dVfQHOTjwY9ajctJQ2V2vMhBDatj0cDioW12U5VqZH1FAjHONILcYgM+gDDEYMYwyRNcYVTrVQjjEGDvpSVcPU96KqFGWt/NJ+JV8KjYU941g2X0PvUq6XMaYoiu1qo17BPB8Sx/NXNbFc5j4ej0n7Sj3lkwLGWRbfAC45c7PcD4bjaZ4Obn0As54ZuV9RczIVZrr1+pNc99bdzH2JOJYCkVG647E7wDA9BMq4CiKSMTaTN8jayJMs8VQoNSUBpgnT2IdQRtkyBcRkDOGsmyGic87LubuGajgWyXNEEuDwu9/97pe/+On/41/4m5urTz7++B/8Z//g+9///j/+x//4//Df/h+7rvv9V19//vnn37z69vbt/X5/7HxkUT3QkSucLeNo+8htAaoQ5vwwZ1zzHUFEOn/WzNKu6+7v7x8e7l6/fr3fPyjy39zcrNfr1boCgNPppK0+Qwha21DHrOrCGGyOJ43/jDGiMDOMObeqso0sWsgWQZej+KnFFNX9jlqFdSybpKPFGKuqGj+Lpg4iojafEBmqOcIoWp+rhuRXQinMxBr9AY2elhknVaUz0UD6NsY4uuInlmzvffr9/DzLsmvSr5g5N3bOTo588zhrtIJT4ZIuhbRdXM55zEyaTBizPNsyhnuOVkrKFU9CoeZ+14tXemz20kc8QhMBCx8XBXJEf8993euUkJqvNJ8eXjqeZ++d/UlENESEZLGOgBrqmbr37HaHu7u7PnBlSGTwE2rVsvV6XZQWRHmcIOIgug052We0oSGSHBGH7reIOBSpT5sYJxNW+RMA1uu1rqXve+DzeUAG1GqhzwKAhpN9+eWXv/rN7372s9988/KuOQUfSJiAbFk+OR6Ph70XiVGsdWuipmlP9daZEENUiIFA1DrCaS546aLMKZTOD1gIQ2m2OXdDREDOz54ltuRYCpnoT1lYSHr7EhW/C1bPHsvnv0Sn5a9ybPwul053ifYzNE2DXxx39vPZBPRbnobHpK9mT+YejPTSyWynWh9kvCufap5DmIZi5tTvYUaSKYl84jnPXpfPNmdouLD+5jSe2NSMv+X8MJ/kGc7jK2YsbvZkDp8li0PEyHF2YMu519Zc+z2DGhmmMQKLLc9CeRFAAMiwEQjQMjSdvz8cN8Ye61VdFNWqVK/CarWq6sJ3Xe+7PsYYrfchdH0oO+8LU1ZlVT57dt0SPnTdu+bQcmzJvOt7/+5d7Et7s7kqqj6E0+F4s9mg94RSIT5frVGsZarBfNlwzKpAJzCO9ceXmtLlNc45w+OnxkXEnkNrahbJ9262fbNtTQ8kQwYs6P3988epUjcLAkxP5tQ0W0hO2ul+Lsbkb2GyAIAGjGUjoQT34tnVn3z84Wc3V1cGy+iNpYLQt10LMYKI923Xno6t99J53u+7dw+nh0O32x1W6/UHH3388ccfP3lyRdz6tvV903VDiz9F47ZlMezWpRYzBICiKNa2gK5vQ4jR7/d7BmJmVQLLsiyKqqoqZt7tj7vdTr2Fae1d16mhk8ZIUWaOPLihZDw7EmbFUWOZMUCtB46IMWuGJCLOlvv9/nQ6nU4nrfF2c3VVlm673cLoZcJUaEQCR9CqAYNPLwbNITRD4zVnrbFGz3cJIQiOtRrHICAeC6ioVKlCy3BWGxN9H8fq5fnBx+MFo8qk0FutVlpFRi8aW8/pr1SZTGpkApeM6Yip4qhM9T0ztgpEc+5pDKNmEkJQ+EDG2Gd4npw/Oa9L4ysoQgjqAdOEw5wKFCVQVaNRSDbGaPbjWR4Y35z/PL0xxmjJAJ4rVaZVF0URo0+UwmcX4pm9G2M0HjY/I0QweS/TShMVD4QgpnS267q6tNfX1yDh9avbu7vbd7e3/+bf/Zu6rq+vrz/68OOf/OQn/+Dv/6f/8B/+w//i+ub/9f/8f798+S3Du96zABljXFEYshx8DsyRuZ0zXYesxTBM2BUmnWKYMr9G6BnCpBY+efKkLF1z7Pu+ffPmzeFwePbs2QcffHB1vdEQEoAUdH32Q0b2zDG1srTWOuOcMzF4ZvY+MkREtKZwzhlret/AmLmqc7amUO1OQ51VUVQ1Lwl7IYTj8dj3vaZXJEQVEWMGYtSbNlFUKnfLWZsUyRQYvSlZ26v8sE/UngSXAU0JiQgQU4w6EQlzUZYz6WRk6GzIJBSccu2zoJAd7aqx6Kr0K+XsxKJRYer0IUQSQOB4lvrTezObpSQhWOUwQwlBlXKYOYReaWbUl1hRCnDQfhNmw1QKnB08OIbmLsWjXNqeHqU0G2cmR+UWOySJISCYFDCZYK7EmY0DiOn804ao6gjSnR3MTtOXqdIIcEkCSEYpyLwEiAgSWXtQoNEZigiKIEZHYoib48O6rmIof/nbl6/enX7yg5X33lk8Hh+211uHLCKRoSwLDBK0Phh3BBFRrBWBFYAACCGBEEcgIQEw5AZVUdKBhCJCbSEQrUVbkSuAwffchNB7YQESJgZn7MqauvfQNT3X1yFEACJbxUAvXz78/Ke//c1vvv38i6+DN72vg5R64kTsOQbT3wP4zgdjDBr2fQsm2LLwPRtTF9b67hhHMyczG3uu7pWr5UuPXDoMMjRAHEyAItPze+TRrC2eEsuGqcA0Q07I2gNkZDhRQmC0GSvrsXRGquEnkRkvK36M59xjmFw8ojEhppBzEYlE5/MynYiYFfLOqWY4k8aidmnyg4Yj5/nkNAigGJQGmtTpZmaWYbGCJOGcO22yms6J4pR7ZOPLDHoJ/tkshlLUIhKFBQFwKHYi439n3qvUK8O3UZhByBo90TWytHAFM4uyRi3OMq4SshJwyU4smVMFsxJwwwM4mEL0AyLK2FQWEQGHwkIwZQtpsTiKVvkdHAs/EOWtFNVEMlTcjjFyBBzzZ3QvrLMaKw4Z8xmASGfR5Ly5Wf2d9xgVUvPDIqwZQiSIHAKwsRAY9gHum35bhbf7BoSfwWa7sc832Ox8uXlz6mG3g3B7auXamw+g3faONiA3pqlX/cbIp2tyH39UNfjr0zc/hwYJPMB9wxZQfA28lcoGwXVlNlf1qjYh+Ku1PTk+GP/pXff2zUMXhQECEBZlCIJkwZYiIhL1kENgRDGA4ewGTzurMRekPboA2Do91LwxZB30fatHXQwMhqyzzGAMsQhJRGXi6mAXCnGMNBu2WNHYjD4Zx8wxCIsIoTEmxBMiEtpk/db56Kmdeqwnu16MF3RCnFZEX6IZLFgKjR7LZOEaNSLGqaShV4xRGW8SnfVmWB+haZ+g2x67K+H/5LOP/uSTj6+e1rEIDzXtjWDsV9jdSL3uvN0fbyu36+T2GE899tHdPrS//+bhuO+fbN1HHz7/4SdPr1co/ggWA8i+9d82Nnj39t39/bsdoikLB65aba8jlFhtPLlILgKjw5vrsiw67pvQNvdtU5b1arMG5sJVGmZ2fXVVVZUmWWizvT740WjfawCk1rsHUSHVMwcfOnWghRBC1xvjPJ+TrGCMwSHtCCyCRHbkdYToxaPDKPHd/e3htL+9e/vhBx/c3Nw0TbPZbFZVrX0dLGpSViWFMcZZW8QY+75vWxL2QKC1UryPdV0XpYkxhhALgz2DMQYZiSeCEAuEGCQIjlUxiSaYycwpwU+v1J9QPxdFoargiHhRjcjKwMuyTGU8UuKWiFAUAyBRQh8MC5FhhND3RWkBZJCpMLKEqL6lUCWk1QkoKmo5mYHLjYquiMSu0flwVgHF+xZHVRyzNkjM3HXGjBVoFA4KJe+9hjvpOKO+bYwrAEAGZYUlSyUVEa3dQKSJ1AzCPnDkAQI6Hx5bVua8Nc0ZKRIRoFpHNHMHmYfsnsTG9SfGGOFCrOljiH2MkYELQ0BsxYt1LnSRHCADgbHOQGQUDn339vWb2zdvf/bTn/6L/9v/fb3efvLp9548ebra3Fxd3bRdzyCFqyJg0+w5UghBHQkklqOGccaiKCCy6gtFaavCDEe9sSKCY1BuCEFiRLQCJkRgMmScGtrRYFlbhFaMC01z7EK4vT95f3O82WxW63VdGFuU1hIZohCC+J6ZCXi/f7i/vW2bk7MkVRHZh+CZ2ToyjmKkGGMMfQw9AERuQNX1YkjKbdtWDmJNUdQrczwJozFus9nWdW2oKgwU1q4qu643PnTedyF45lJD2L3v1LrBDAQBJc6lqOR2GM/URc/3TBiVzC6o7sH8SNa425AVL8l5N2TCQboGIoGzFWfEy4mHEHNLj5ylRbwUSzZ7C2bSJSxkiHQ/F1PSn/nDs58PI2eBssvFzv7NVw0L4Wn2zGPzHz8kARrTQphZxpJTInObPU37Os5mO3vFxT/TUDK1MStwkoliBkyi5HIEREpMRIRDCCjS92FVoYjc39/f7R4APgBgDcRQ1qw4ud8dCYzKFs4aAAYUAO7HfmX6b1LErSVgjDFqsRkAIEBEs75aAzKzD9Kdmj5AjwasLWII1tUCpu+gadlQLMv1+urqEPvA3bev33zxxW9+9Ysvf/f7N7t9iN4JlBxt1AB8A4ACkQGFmeu6Nga974+nvbYDKqvN4bDLWWHCvbQROSY/hgxwCaNU7FniPBEJTPQimOJ//u9s2ByvJgh/6dsZGSIi4Bx5xl8N4ZwLBJvYF3IEm+LSOYRhSfgJFWG6wASNWRLzbHWPQRsRCUdRI+urJmOg+Ay8032R1IhpNrKMOthjX+UScD4gTAlwormNBr5zAt7URpBAnW7mdoecpdAYrpyAkC/wIls7w3867RyS+W6ONyejyRirMsyZJ/aIcRUTvp0t89GEt7/LlfFwARQCYEY4dv2x67vgu2g73xdBrLXPbp68AOyOd0eEnmXXtvTwYBwWZR1CY6ko7aosKrtxRKveAxnz+vUv+xNwB533u/6ALTuIhbkOMQQsDLuVK5+sVzUQ2jJ6/2WzJ2uQA8KAhwOGv0e7XVw4KkIKJRitGEQIgOpfQWOJmAfYRhGCGFj3CARYM5xYxI0wz/EzamkTxKh6PchgKxnQZqx1KaOCOiJe8mWdHQgXN3FGDumZnB5zAsnTujA7yjnzBMICk5fYW3H0fecK+ODZzfeur3740bNnz67LUsgxGInc+557ifteoPXt4XQb692+fftwfDj4+/vTt28OIcCz6/IH3/vk049ePL2+KRyJaG/0yCzRh+Px2LYtABgkW5RutSnrWsoKgUIIYg0RFQbZkAiKgRAGVUEVmM54Rri6unKucEVR13XX9dpc2/o+9EN42JiErOZy0Z83TdO0jSrDiGitTVV/FbCp8V3eajXfl0SwKOC9v7u7a5vm1atXH3/w4Xa7fXJ9s16vi6IwxdBGr1wVqXkgjtl3xpi2bZVxJRVO57Ba1XEsDZriJ40xkvU3h1E4UUvWjPPrFcfaG+cJIyJi6ueWB1KKyJs3b2TMn8zdA9IHPOf4MRhSf0bwrEXqEFGE1C9CRCEzDsooNscxOW1Jp745zRAycfikEKp2R1lAEGd5nkmkpyzMR8YGjAyZKAtLZw+HrEs7MzuwLOeg2dxvATDv+JLAiGAYz25kGcPumFkJPKNZBk1cRQQwiAzIApEZENE5U1Uu+lVZljF6z7EoipmJ53Q6/e53vwMwZKsnTzeuKDvfxyDqBlfX8hDQGzV/NRnECfGc3YdoRFiMSborZM7ztB3pHNXFlquaEWBMFm3aFh4euq5p29oYUzrjjCFE5iAhxhiFw/39u7dv3z7s7ruuYQ5kwA5e1vO+J9BZVzMzc0jdX4iI0HrvwfTWFMygwcUP9/sY4812peVpjUHnXFlVm81mtEtH9TSmRix930/QaHb8z1ae4J6zyPxPHAMm05PW2jgNfIJMzUvHQw5ctVcl3j1FuAnfgSkzwkw0zO+kzwmyszcmXEwzzOGQ7GEDyo9Ly2MGzhQ+LZwzmzMuBCbILJf5YzgNDc2PJcqKZMD5XDxbwXNXhlxsOnRJ4nzPhGcbnU8sLXy5rhw++VuIZlK+pvQM0rAlo1guALe3t69fv2Z+rmzbuKEsmLUFa9oEIKIlwUFO5CiChsxQPVLSSwMAkJA2D+SxrjojIsaH5mSGKAYQawgrRg5ihNypA2Z0dr293sZAt3f7d+9e/+u/+ZvXr19/8/Wr3e7AQoRViLb1XDiKCCwsABijgGfxMfrCUNedRIQMbLfrJ0+2Xd8ej/vr6+sQ+qaZqPFLN9oSJ3EqsuT4vNy7xdZPHs43KD8t8rfMVPqLv5KsNgNcUmVzrW82/+wkyAW7M97m7jU1uM6ofonhMDXZ4CWNhYiA8zf+gYs5NzSMTnIAM4aIZxL2ZBdm00vTmEmuZ53ngmIDsFhguvK9wFEhXFJ0HKvwzd6LOCHtx94yW9TMsjtwv+yN6SYviv0s8WS65HmLVxjPWgCQMdhmqg/MVc3ZnznOfMftThNjiIyMIAYQRA1MyCKnPu7a9qHv6uiqrreG68I8ffLko2j2dXNPbROhOx4eOt9Kz3L1tOKSwta5ypRl5WiN7fNrRHiDzevXb++ahgMcoY0hWGIgvloZ70IRSgu8dqYuVtz5Y1nUdW2thT4QEZCJWqZrXqBtuBjhkYjROXwSk4+hFwRmAO4FR9+dCPPQeo5UYxdBEu3KhIPDeIYAIKKNB4TIDl3HY7QOAEAgghjtBCkSAWikr3mw+qwErCxyLqZ844IhBjJuMBsq/3eJ8BdRpW6aDcIH29Uffe/jH37w7MPteluTkUAmxuibNsRTdxLpKfZ9357abzrz9cvbV6/fdC10LRx7eFrBRx88/9EPf/DserOpCt+f2t4ngSwEc9ztu6a1xlRlVa831fYGy4KLipmbpqESTVGIACEUxqJZqXNJPbEd+x5DBCGyVcVlVTlXWOvqul6v1z6G3f2emUPoR8VGiEhQUhUKM9YdDUGYfVFUEIc0PBkVwhywCVaSKdgpvky7FgJAd2o2m83D06dPrm+ur6/X63VpHSIKGSIqy1INvho4B2MmXt/3+opkXC7M4AErimKsQI6IqF3sdRUwHhzJvKWPxTGvT8bqLDIGvqXkw9QvLjElZXc6sVwL0hkWOEaCAEeOZsxnJnsO6ffseVTefBh63yUtTgXmpmlmuDrAc2z5mPxpOodU3YRI0/BQ5JzklZyZNF7WWg03TZxT3xv4LDeSOefLJTk26cC6cB96wCGtMYGCiJxzg7FoZrflKIAw2ndG4f/cyRBg0uOhC51ABIxACEZVQQaJDGyCNWSYUYSjBB86EXGORAbLRdc1RVk/f/7B1fYGjVP0I2uFsfWdptEdj/2goQwpA2QMZYmXzhhEMCNsrZizfjTcRMSxEMu4omHyxhj1xIpICCYKA3Df9ywhRm+t7Z0pNK6QQwhBQuTY7Xa7h4eH4+koEhEFEJSrayEIHk0yaZuGg1WEOYoAIBlruq7DELS2IgCK9BzVn+kRAQGMwbJ0VV045wDYOVsNYdG1IsygEKoTOSfmhCuJ86Y/dfdgIYbKaODBaRLdLNI61x5nClXCFRjPsPSrTNqYhHQO77VFTsAz/r4UBJd8Hyf26YnTAzIzuSxU4tkgj42/vJlLQssZzmYimZY7e0v+VWbYOKsKg0doCHuepFZenGT+ltkzueKaP5b0z3Tpn2qthCl3gKnXF0D0JSpZMAMajDEIgrV2t9u/fv3axz9hiCGidYbZGyKDFDg4WyIbdYLKAAkLyEar5CXpREQ1wBgbXQXZJNCzAKyv677vu77TLtJERR+47X292hb1CrBsm/jlr1///Gef/+1Pf/G733390DIzR2FEa8ghGmYSAjBWBALEGDvAgBQAopgYGaqqMga9933fRiaRaK1dr+umIS02nQCCY2R1jgMXcfiisLK8cskpwT+/v0S8i59nA+bKT/7YxTEv3hnw5NLN5Z8yrYmSeBFMJblEDmliCSWXOC+jlfI90z4/C+eeWgBngxUD0Pij2Stm8MmpA6al3nILy9JclZ9G8Aht5iRGY9JLWppGauQ8EzONFMf6Q3IOxLjArxJLN1k/sYuguzjVfGdzPLykRV9Q3hIHJjr7MMebF3LLvzuBzH64nGrEgCAAjMIEQEjEICAdx10Id21Xd65w4MgXptzW5ZNi83x9c7v+9niSYwzMMTxAiE28rotYrJGcSMUM5Grjtxv6yYefVZ5k//U+BAHoRd60p+Pb9rOPn4GYWqLpughSd4FPJ2pbMpaH+OF5gVZEBEA1rsEMqIt9yZepn621dV1vN080EskHFhEGjDGCYNd1MUbRNt8sjIEEUSBwbvif8C7dGhFhVmSOInHIrTi/dzKNNOeMWGS2IzJV3mZ3cgTGhSUXLtFOfvMiW4MMz7cn/9EHNz/89KMfffzs6bZ2FJ1BEwV6z33Pxz62oQcIIKe+P7bxV4fjt2+Ot28BBWoH1yv45MMPvvfpJ8+ur7Z1YUj6GH3rW982bde10bfheDyyxE29Xq82681Nsdl4JC4KZu5b79CjKSILYhRBIg3FKhObE0EQPhwOfeeLrjdGGYCztnBFhUJh6HPdhhA0nQlRc8biUFHTDCni7AMRdcF779Vlp+7BQa+YUr1eAzJEliSQAADA4XDouq5pmoe7+6urq5ubm+12W1VVHVlzn5I4oVumiY5xbAmgvf6Y2fvOGGPtUEZS/SRhbMWWFNHk/VOX40y9yWWhhEKq65qRrFThSWHzcWwskTRnXd3N1ZV+e05IMyTCNKYoRxDEFOIuKIACBskgESABAqBBij7kjr44XgnCMz8Ka77ZWGQVRqE6lRvNnYQAoKGw6UxJuqWP53wxGCNfYHpg5ZbEel1pE5MEH30FM89CRvVf7+dGRkRDhPlJh5mH1pghgwiYWSJEBmAGIZAQ+9ixgD21Td/3MQSAIfFHUMu1uhcvPvzx3/vTFy8+eP3mtm37t+9uEQlIsydi4NilXGtAGXKLhlNcsz4AhUGEmUCICM/+w8lhlwsnWikooa4xxpWFcSaEEKMHZBEMHC0SudI6hzIENgph7GOMUYC1eKFIjOyj1v7NFEKDAzqxRN0vMiZtsbWWGUQ0GhaIDIIhImsLEdYsGxbR+sMiEtkjymq12qzW2mDT2qIsVmWxsmlhubk33ZmdrIpDSfiQ7MrzxPTzQDmXRISL3Hb5Z3pRwpwZpooIyfyCxaGejywLc7JkElK+2fm/s/v5uXVxRfD4iTJbI2dxWWl1ynPTT2ZbkENGRiE4MYX85Bs8QlkY/SUZYj7biyfiBTLIPiy3eJo3lYPoPPhoI2AACEqohMEP6ci9999++7pt+tW6Yu4REYVUlWPPCAwMIASACGRosFVH6bP5sAgPUYNnoQF5EJxERO4Pe1fVdr2WwH1gZOvq9c129ebdwzfffPOLX/7ml5//9puv3+72J99HAIp2W5ZF5VwI4XA4tG0LQtYWPrYsqH0oiBiNIAkiYKfGdYzsjTHOGWaJEQ+HQ9ud2u6UQjLU3jEaHCFtfWLiM3KY7chFlEsUMTypxfEuIUB6JkfRHMFwKvOl8ZefL165Y+r87yXZXTIxcTY+LjyEOaCWK0JEeMRFokgBl1D6wrMZQGavi1mvzuVallOVxUsTyef7mwQaxInTON+O9Ir8yq17eoemfURzQBGR78MMAhcXcnFROWnPcAMex8zlTslUoEeciAuzN+bRK4h4se/lEuDf/ZqtnSmSAAobAUIkRgCICGx4F2LZtmVjKweVsZvSgLgnZfXh9c3bY/8u7EMfI5kdR3noHAfqXMniWJgZS4cIaxc/Xl/Z5wGa+Lv4Ztf1ASVyPDa9OxyhNivvTd/3fWihafdNOLVNb+Ig/QInUweiJGkMWWAeQCoyv0dEOPJARfCyLDebzWefPK/rOgbpggcgH0LXeRjLacQYgUUgQmQ9aE5ZyFzXdSFEEY1YRwDQLjoiEYSMQWMsQJQhd5dBzh2pRk9goo45OqWF4EInXO718oH3UM3ssfRtPk5CuScEf/Lh8x988uKDJ9u6QMuhcCTeN/tj9Cy9CBd94Nu+vT2c7k6nX94Ke4gWnUhR2hc319/79MPvf/zhpnKlQY6BmX2MXc9NF06N3+8Ovu+dsavVarvd1qs1FkUfIpJlyIKqWHBoDxABRmF0rPahHXS9930YVBfnXOEqNOTsYNA3Q/1wAyCEaAx677zvAMAVxjmHiAZwt9uhH+o9qg9BRNRkAJe2QG1PErOQRRxk3xDCfr8/Ho8PDw93d3dPnjzZbrfXT55qNRetlJiUnBCCcda4Ic0PtBNDjEPWGjNApWX3iUh1OT0i0nGZ7BSQ8YRx1UMRnYQYOkIIoXJl+mG6KSIaxIvTExkAQt8PtnIEY0xVVUaAiLquD8wKEK1WqkweHKR6AamwR4xxtVrR2C8eRu2OmeOowCQzn1ZWzxVCyM4RY+ZthPRK3lFmNmNVUhibbSSwqNDOzNrAI6Xyqg8QETfl2tjhoFHgKFi6rssLiqYJJIfT6Kgs1EE6OgyGoOW0QUXl0IO2vSACpEjqXRQOIhhiMACgKfqqxErXdeTjel3Vm+3VzQ0a2u33xll/PD0cDiKgmB9jqh1A41mpUBVEHVBZEDFHUA+iiTjykzO2LCQfyE86Q4Rg4iDDBGGOwTIDcFFY9WwDi0fWWhG+Sa04ojJkZpYQNP4ibRmMVSEgsjFoSDU+5UsG0TiHUVBL6nJkjr2WCOmjL8vS2tHri4wCIkBk2iZ07U5EDJFGEJRlaTXeILG8pBQlCy5MA2f5UhEIxHOloDydd/ZkUkiSVzoXQYY5kAhMyDg9lkcbp8FzQWS+T5cUsMhxNjcZrSD5imYUtSSwfF3Tt1xIL5y9K79mApBkVRBmC8kpbQHbQSbLtfpBULjkYIRRrZ0dnPmwF0/c5ZJzIxZOr3yNCbvyRY0gGuXXkZd5H50hY8y3r1+/e3jY3lxLP75FiEOI0RtEBGeG/oBWJKL6ArHXxwDGukCIiEBko/qlWZtGqK8R6/Xq2La+Da5cra+uDqf+p3/7m1/+8stfffG7N28fXr956L04V1m3pQI9x8BVs+9jbJxB56rt1TrG6PvYe09ExqKxWr0oigQWv65WAIAom3pV16WI3L578/b2jeby6pEwFv5SZLgA55m2MNvH9KRMBWu4hG9LnEx4Apew+qI/HKY4/whOTl66xEb9GeHEJ/YH35UKguXfXoTM+dtLg+fzmU31sTmctT7gxOI4DgayJaBmp6P+m/IM852SLP8wMYqRas4QyMdJE5ixssRPcqCFaVPZfL8GsSPLXUyQybGOpi1PLu74xa3Pn8/vGGPe8/D46rlaSItK94+FROpaZ8fBd7ww0wkFIqM4AUI0DAAkQAElkuxjpKYpnNQ2ru0qxkKCbIx7vll/cHP16tS9C01A6IH3Pt6ffMW8dq4qHRO62ohDMrRFdNdPKCKHGN68PXjvCYHgm/v7YKWsK2ZYAZYB4vF0OLW7BvoQgTCCCDOYQZiDGQmLCBDh2f4pmRadgISIulBEVA8hihTWsRXqyRQuRmldq92cVRu0jgozxNcZgl3jY4xN0+z3+/v7+8PhkMrljbswrWoLKMKjiQUAeOyPHNJkQHXIS3pdvr+P7V2GAOdnEnleZBQ5nuDi0EyfiehZTR/dbK9Lsy5osy6NxALgsJdj27HnLuApyLvW//7h+PXd/d3x+LYprUEntCnsxx+9+PGnH/7w4xdPt3XtLAk3EYRRxPggpybuTu3DwwMKV1W1KqvVamXLwgvEKBAjCjJDjOq9BQMowloOEREHbxOgDCXTQESieO3r4PvYUs8gzp4jfXAsGUKIRFgURV2X1loBre/Sxt73fe+DT83Wkj+AiPINmJw7fMZDEYnMImKtZRAc61I2Xds0zWq1ur27X6/XV1dXm82mruuyLHNOmAZBRHUJdt7341UURVFUWnZfXZfe+74/9zzAvPnZNEhNddq00arbtG0bOp8UwuQEY+amaVIwLWRCqfd9GrCqKhFg5ihnM5U6vJLmo6V8ErLpgEp6iefrVIcap0O/OMhzF1ORuZyoR3Dpcs8+Ur0oa4emcNCFBJbzclA4iw1WxNdhJ4EhcYwuyeIBUwTZ2WYxLNCKKDxINS7Minmm3uiJYCN45fpCwsiMIAAGODIjiHrtnCttUaHXsjrBGmetReuE8e7hvg/RumK12nQ+WmtZIArHzqtCKFABMCEZsmTQDJ40G2MUREEDJCIGgIWQAXGSKgLq8U7C1UBHmQLf9Z3KtMysZjMRiSLgfYzFgBdGiIkNIQsSsMTktQZkRDDWAp5ljIxZkdqK+77HOPRfIYqDd3EwDBmOkJTbEHqPwxEfo0etMElgrRoEh9TQU+N3+4aIrBoMcgqkMc44ucsTJcxkgoRqODW3JDXSOZeM6xkbwhzEk9MCkSXkrD5hNmosxCUDM07PwoRe+bl+/rzg+OktOaGmr9KuSFZTAeDCQaW/wuzzd5dClg8vfztyovn889/mi5r9Np8wLmLD0lBLuTbt6RLyszcuP8CiYrW6bETObAgR1UlorUVAItt5b0xRlvX9/f2rl29/8P0fCTph0WxAESmcM2TVmy/MKErrUSCSUVtKZNA3EZARQABiAQEKwCAEiAKEYPZHLoqNWPnyq29/+au//OWvfvubL7/+9s2DdRViCebKGtP1Ydf2RFQUVQhM6ExhmEOr5lJERCxK7WYRNQUWUdAgkimd0ULAvW/f3b29vb09HHZqHlNkSaeUkp5G20smCaUT4qIoM0OYEfLnnNJ86wUEx9iD2Vbmg+e/mmEUphLMl1Sv5VfDb/H8OcciRNCEz4Qh48If1TpgarafjnZByEMc/CDphxcBmN6SHOz5d489D9PDdeAAOcAvMYGLN2cePBzDnwJPPO35zy8y0hS6nybz/jmMXGWiRr6HceHUkEc0QaeLWvHFP1NEw/LbfPA0fxnkjzm/wkyLSF/NDoIZnjy2tNkyhxEwKDaSGBICoAAoiNEUAfwuxKJpNxafla730UepDV+tyo+fXd/2YR/47an3ntnQLrBrpT52RVkGgrqnsjJl6SqUsrL49CrIx2DN1/fvHtr2JNJ18PLtQ4iyu7m6sq4MEk/t4e5+z0XgSLaAyDGKNYaIARFTXdQcFECp1vlydQmFNSlIBaPCWmOoO532+4OxBTN3IVZVFfxwCq+kwJWz1hbOGGPElt57RNByWUoOMgiImmpltcOyiLAEjjJKUKDNP5Y4LKOVcIk2M974ng19DPFkKgAsz69ER0n+ybliURvriDlaMquyCm0TQjj1vgncdOHh1N+18fWh+f397s2xOQUAIIu4LutPXjz5+3/8gx9/9uFNbUtkS+J96Pu+9aHt47Hl+2N3t2uD56vt+mZ7pdqRELVN1/lQrlbWuRiHapkAFC0B89Ax+XbOAAEAAElEQVR4CYBZNPKNmeM5uea83hDYx9A2fUaDWimdrDG6R4iWmdu2u7u/vb+/75u2LMsIcjqdHh4ejsdjEhcxqxuPmekwKS0JpKKZpqNYaMdgxd1hf2obvLvfbreHw+H6+nq73WovexFR1186UFKan7JX9QqGEBA7RHTOlWUpoul5fddRKtYCo3SXUEW5SoqiTwEUQ36gPyuBkl0wNfCljKeicGf2YgiIGIBj1J4ZA6CYU7sImB5VKXdR42NH5yekiZVFnZ/j6bhBRHUC50sTEaTkQZ2c5prYptp4jvaDgS2z0GibhRgiIKIGYCEO/DgjtbGWj4wuW14CKierpCZksWMmj6wZFNHAIAiGDIIIMvcxRgQyBkAYmAFoEOcY1OtprTVFaa1FQw/7w25/qtYbtzsVVV1VlTUW0VhrVSGM7JJWbK015LQcqyrARKR504ltahRu2jLS0tULuyqMPtiu61TpAkNGtNY3apCtUq4xxplkbuhVvXfOaN2XyB4RDCBLmFg/AXEIHhv60AKfLQIxijEMZIkMocWhxYk2U7GjQqfJsaz9F9rOF06lBYNAHKUNXYxic6pL+8djmDKPBXbykz5HAhzNvSbri8Vj0q0WlUn7zZnvOFHUBbchn/nLjPVfwrOJWR0yEWe2qJxtLUdL3AGmhwdloZswP0ov6IQTorkkWs3upFDb9BbMdPI0VXjkyhlWvsDxJyIyFNVIg2SwmouY6d/8jYkwZiPk889xRu/nVcgG6lezkERmjgPw5ewEQHSuhBjI6klMrrRN179+8y5GBDHMQQQNChEUVamd4oMPEqNIFAKyoPJIZGYWiBKHWjMESOxZ0AA6IitAACQCgeW3X7/95puXP/vprz7/4svDvkdTsDjrnpItuz4ET7awtqyx4BCCZ3HWsMQYPXBPKMaRJaIhrJ5jjAzeWC6NrWrjnEGA42n/6tv94bDTBlOIWFXVaH2UGONo5TU43WnMdP7HMCHH/OzfVLj/3BIYAERVwqkpAefGjrkMNOPvkNW8muHtRQzRH19cCOKQ7ZmvIse6nOjGm4MpYYaueZW2fBydVE4mZ8Be0nou0NpU+RQRwPPcUuyNyNw/n7ORyZIz8KaXpsNVRnlU+ST3HU5Vtdl25O+Ssd9gHBrgDlI+Ec1C/WGMSnWuSJw88fnZds8mBhm3TLOaYU4aZKlg52CZYUXamgQxyqrX5NA7AwHnNqns2zn5vOdaYuB4UxQDEBg17gCBAcnZyNhJs+v9rrMHH1sGz7AtYF3Yj+3VMfD+1DTH5sRYuOrdsQmGqHC2i9zyjcANmdK5aoU+xm2J33vx1BgElO7Vq0MDpoLdHpp2dzj2V1VVAcXmtLvvTiVEQUsmskRgoxQ0Ttikz3TBd5ozdhEZgUl5sfnVqrIEp8Phm2++iTGGKDFKtV4BQAwCyHVRqoXLkjEWI1hmVg9Se2qiDwaJDGlwn7FYFna9rrWSQQjh7v7A2u9cRAQGIxiee9uMkzSI+ufl7VuaHvSK06qS6XNefzIfMFdd8q+WRKqfO0Mtci8MaETwdOyD7/ZtaKi8Df7VqX29a7/dHe8ObSdEri5jfLbZfPbi+sefPP/+h09varsidtb0fe99aNr+1Pp9Hw9d3DXh/uQ3BNfb9fZqXRSFMa4T6XzofLyqqrpeS+A+anIBowAPJCYDzx/q/ZCgOOeYIbIXEQCSsQgnjV6gGCOijGIxlmXJHI5HidE3TbM/PLRtC5GbpkFr2rbVwwuyRueSXen4yHPYcstRomJxxtFQH67vezTuYb8/te27+/vr62vtZe+cY61vobI4AMjQv1xZnDaB0OzBvm/j2CNE9SttWRHCoHsktTAn8Kwv8VBUZujdB5QaS+RoUFUVjaGV+TjH9sRDUxVISYAypPMZk9W2JaKiKJhDjnIpHTF5LPMrhKBxkbpNqf84M6v/EMfQvDg21jP2fKzrTzRgVZej+ryqkXoVRZl+q6Heyaaj1AfnkwuJqOs6MpBAnUjDGKOkSlmxCciOp6TeiAizOjaGcqk49syIMRI6MAzGAoAxQSmXBKytg+9Cp1oixAAcMQZwhUMkZq7K1fWTpw/73Zvbd/umLcu6rterzXa1Wq3X1bqqiWyMse1Qg7PGPoLDeac5lgAgEEdEk4TGyUGahIn84EvoHWPUnF5rrQyuRDXZiDEoItpuHpyRyF3X9H1fGbSWqqpCFDLQdcAcQeJjwsm56i8aFfOYgTmICBAgMkhUM5BmDqNWNFUXrbWWBqwrywoAfIghMApYa4uiVDSzaQvzC7JCrjL6mmHq8DFZry0cpU/Iakz1fT86As68eJYve8Gknck9KqkMLt2MuczABONPkkyWe/bStqW5SBaZqXSuB0bSTtMVsxoGGSM4K7GUOUiThJRkLOUIeaJR/mqcdp6ZntYTp2vaEZwK6Gka6X6CEgAgSVmWwQ+1g03WSXaUdR4V5TlzCcrQQGYCcP1XBc3ZzXwQzrpcdl0nHBARpyFtIkBETdNorHPb9kVRlOXmcDh89dtXD7vj0ydX0R+DZ+Mwek8Ofd8zsyHnamLmvm8jC5LtggkhiCBZZ6wNEaOXKFhW2xDR2YqBDof27u7+V7/64m/+5m++um2bU9v3QcCCbEKHgpZs2UcGdGQkRAmxj6AWjVCVbAAsRhEQCAYQJaCQxIEKNqt6s60RY3va392/++rrr4gGi4lzjkgNe71ojVRrREQtIAnlcxaTMBnGNLxcws4xIUNOkGlO6RmlkQ2dDRD6okHxmBT7OdNIfqRJJvfHsWplQqEl60zjq988cQlFGxEBM8HbfM45IWQD0qA2T91l+TIT9NLIPFbPSwipwByq0U6Rdsn9eFqTABGNTT1vBODcNy+xgpkwkbOUi7m1lPnZ0jR4WhQhp8f8hzjlh7lpPD2Ao0U85zxptolNpdoMNGaz8BhBlKCqZdlzppd2P+1XrijmLDHtZppJAnW6aa05b9xUmRQR7aOdEFhEBC8rnCKCY1J+PhORSyftJcV1gDOR0fLforUxkQkZkCODIYCi883ru1Md+zWCQ7cuY12VWyMfX60ertbN/eG2iw9dV9erlv1Xh1MT46nbfHazAjHsO4J31pVIRYHxqjCfPrkyxjxr2998+3pV2k7i7aG/byLGAEGcRR8ikvEhCJmiMCLCICKB0AJAhDg6TBFQINOfp0R5Nmt2XWssquhQluW6XjHzN998c//uwRam62NRlHd3dwCgcfgH2A2opfZE6xJNKcDTxhVFEdn/4Aff+/M//1/HGF1hd7vdL37xm/v7e2PM7e1d1w1l05um1YDAKXoPZrJ0J+1sTimzrdfcp+V10fCa/zl7SzKgpF8xcwjhXeG/3u2K9ebrN2/3D+WmqhuPD7188erNXde9fLf/9n7feCGqgKlr/Ycb82c/+OhHn7z4aFt8uHFXjjEGjJGZ7/aH+0Pbijl04cuX376+fScCL547Zq7ruq5WaE3zcLy9e1c/eVbUq8BgnHUkzEPRF4nMMJE9OAIAgyHvo1K9tWeLT1EUEgOO7QGYWWPGIkjf99YOsYghhBgkxighImIMQcOAE50mb7CI5B4nGaPTaXS7DbxLhMaDLIQQICSYOyJ1Wx2Px8PhsN/vnz9/fn19rcpMVVUqqWv9Q8VSHFPa1M6l53IYe+tZa/VXbdueTifd6pwj6QzPfs4xl08bGEiIRAggReESlPRc067fKXPPe980TdXX9/f3qeynSvyqXznni6Jwrhxkp8ix9z13MmprRBQ5CBARHY57OAty7P1YJqca+nCkYRWZT6cTTkVEAKjrmsUnI2DCaj10VH/Wlerxba0NQU0DWBSFIgOMUanMHKOfxQmuqnUcUw2T7jCC6Byjm6Adozh3rmeTAo3T5FWxPIvZxvR9XzsniMdwLFxVFtg1jQp7AXqO4EzlXMkhGAPGQNu2DggIBaEq6/V6ezydWt83nd8fT0+ePHOu3K62Q74l0mq1ijG2ba9AAAA9r8cdOTuHk5qamEDyjKVuKIho6VxQs7DFuFJcrVarVeW996EjGVI3DUrf933bxegJsCzdbrc7Hvfb7bbvuxhDXdc06hdhDBh2plC5QmlKSVtLmCKSbmhgZo6aEYxjoSPnkksAEFGQ0Ii1RufsXMnMKnJqeqdN5+VM2pjleOBELJvYzNJXSZ6LWeVfWegYM4kEMvWJiKZiD0Am88E8VR5gKgQvv11emClU+XJyD8ns4MnHz0+O9DkxRDVowvSsmkEMFicZTi+YmuTzt8uYQwXzc3EiJ2GKqxHp+z6GvOnT2e4+g9ts2NmHmR6SPj9mqZWxoHM6J0b0iAAmSSjpvdZqvxeQIAwSo3TeM8Pu4Xg69jdX2HWelImIhBCsJQ0xYDZooKiKwD5I33ONxhlyDLb1HJmcq+rqqvO8P7a3ty9/9cVv/+N/+Nmrb29DYO99R9fMNSAikGcM2qYwcNu2ZV250jHHU3fyoUNEa8m3D1VVVOtShLs2qMePoLDWXV9dbVarEPr7u3fvbt+cmn2MXiCqOZyZEYfU8HE7AEDb4JyJS4tk4ELWgYwQZKq2LZ/MAftdLsl0noSHs32f3UzELpn6BJdywyTb6xmfec81c1XxkDZ2IQ724nKyd52fzH9ljIHMIJvIn7KIgPyHkuk2OjYRGSKIsnxpvkEX15UvLbe45/Mc8GEsj36RHY14NbEZzdBDZkaB8cKphJ22Xr+NY8NPmPL2XFd/DOyTwbMqzTlUl5uC2Ykw29/E0MaAnTMDH7x3iwnMVnpxL77LhZIPjoLarkrAWO2awECd8K4Jr/ensjisKnONSFQ8LfAHN5tmd8D746kNLbTBFkTuXURz6p1xDhxE2+PdZr3errZ16V6YjbPWIILgQ71513mOEJxFIEYDBSMiMCY4CsDZkIysu/UYdV0COMBAuYNVSIs9jCFSGi416OfWDIIOi+7B2O+oT5iJGvmvj5Vl0TRtvSr+7M/+7J/8k3/CEo3Bruuub/7nr7766rA/hcAPDw/eB2tsXYNzxezUIyJECeGyog5Thyc8wgmXP5nBJD/Xcsx8TC76fcPm/p0p6w/qq77k41Huj/uX7+6/3p2+frh/e2hVuGbPJbqb7ebP/94HP/rexx8/3VwX8qQyBXHTdicf39wf7jt56OLt/vT1t2/fPuy9YF3XZUla3YGcFcTO9z7GjXVFWUdjGEiEAQU1hXA6WwDU/7OwtYVSB5E5n9GEHLyIRLajGs8igqAH9JlFIyKhZW0wMLZbiGMHPBmNy7pNOQxn50KCp2RUnG+BtkZIn3e7nXYv3G63RVGsVqu6rrVlfFVVxpjet4Q2+eKGOFIRPYgHL+hoz1qv11oSKZ9SEkvSTWNMURR1XccYQ9d3Xde2bUrvT2VXtAQrIqraqTmH+9OxKIqmaRQ4ltA5p3qzuvM49KJ1IIGj90XhBjE4RM8+Zs3lZWpNI9JQXlJ6TPpAAlrSDfQDERVFIUB55HbaBREpiqIoChx14FG35GTQyUr28OilOHsy9FccxFgsy3IWSKkxgmZR0/WMADKXUc1YbSTGKBI1gNkHtqZwtgCA6AbgsAuh99p+kgANIQk5Wzpber8HAC2sgkBCaKwtiirKUKlV1XXf+qdPn65WK2PQuQIAjDF9H0SEI4iIthspSnXDYozovWcOCvAc8+VSGELKqvPR49CD3VhrwYCIAAdjMETPIXZd1xxPfd9WRVlvNgAsEo/HY4i9iDDHqirqskREtc/WdW2MkQgAsFqtuq5pmkbrl+qGdl13PB7rusax/Q+iRt4hAAiqHRNIhqIyig9uszFoUQMKRNQ1eFYI0yYlik27OOOks3N69q+kqjhJNhqfnJ3WudygP6GzT3kiDcjUVHxRMPo7XWeWN0qxssidS7Od0RVc0t/y5y8Gos2ACRnQOPO4fhfBJU04gXQ55nk0ZA2/nFnHx/m870W5APHYV++5JFP746JDffY5LUdjhDBCEJHAMYTAAK9fv727e/j4o+e+j3VtAYIxTiRqwosAaG/NIIElikQwddf2zLFe1avNpuvx7n7/8Ptv/uY//uK3X3399e/f7A9N37GQsaaMkU6RrC10kj5wjCACEcJmswocuvYYo0fkulSrG1TRsvTHwwGQrZH1WuNabAxwOr57++rrh4eHw+HA4ktniqII2qU+C8CYQUnVQgRDBIjE/D7YylRzg0uEiZmIvNypJb0sD2nI8HnUxM7ImWOsjBdMS0/NLwTIiB0uMZb8z/yATOOLiMY4Lal1NkLOalCxakGwiJi6R0DWVWLGeZazAgCWwZxJmYICGZkvae3iPGHKB2DBBGZryUd7jO/lw+qVCxyzhxGHEhSSGQJmr15eac5LhsZZMwm9nzzJ+ckyW28+YL5BOUOTTCHkzM+pCuFsp5YY8l1Y1uXFRiThoRMSgBACCSJahMAAIoiWpX/o4Ntdi/awXVEA2hbVqlh9+uQqdL0I7tvuZcfioCfLIcKhcwFNxH6NZTwEZ4vV+tq5zaq6Wm+dLdE+tJ73b25FGND1goGELIYQtgSjGqA15s4HDiKeW3AgApIAGEwG+wtGIhwOweEoVHkABxcx9AGK0sQYQc4HpV4Eg93HDFSTvtXx+erqSuSuKByi+NA3zbFpjk3T/Pa3v/3yyy9jjG170jAza6wxRk94zAwHADCO9v+H6w/KCTmypZksz/1dhG8eDiBvjiv/UK4d0t3+8O3Du9/f390H3wmQI+h8yfjhs+c/+vR7P/64+uCqunZYQ6TQBYlN09w1/cu7h/sObg/dq/v9qzd3h6OvKlPW9WpNV1ebqqoYsA/x2DY+sqtKKl0QjJEZBIGUfSlFhDBEHmo3Eo7ACNYyogVERNGMBkRUL4qIWLEsg/QvEoFT5F4QyRdOiCx8prhcZJIx/Wf2p2RXTtoXrRUCgtoKVeMeu7btO9rT/nhwzq1WK9UJq6par9d1XRsCxGDFMmul/nM0AaIwDyFdWmnGGOPcaGkdo9KMMWVZJqzAMZll0PrMEGaZFMJUATWPgFMHWlEU2+3VcXPc7Xb39/d936nTNcZQFM5ag2N0GI9MvokhYTiP3d5hlOvU85kmY61FElcYGGJc0VgLACFG66y1BsZo0hAoxuicda7SXcDRt5lYYkJm1Zk1SxPxvF8heK0kjKM7MYQ+zS2OLUBwLJGas2tNRk2qqaSgQkEZ+kJL3pciXQMGjilmwQdLBuBcqwaQLZmeGwImQARhlhD64D0iOlcimrqqS1caJBFkZkbgyMZYY0AEmsPxTZC+7dbrdbm6rqpQ13Vd10URvY/ee4qAQQRi10XmoLgEmadUt2mI1x0DdBWvaKz7oFixWtWISGbgF2RAywhVVdE2p+7UNMAtIocoTowxXXdMh+BqVfd9fzwe9g8PdV2rTKIYaMkpqmu0fgjBx6EnSl3XRBQCE8AA4aHYShRB7zXlBIb5mcKMObRERGgBiASGAFoydsGCZYlAOYs0077GS2aaTnpFyjjtNJXO6YSsuUwmovLJ5Nz6g6x89oy+7rscAEs7yvKZ5StwceXfyiXxGqchVfmYCVz5HCQLn0t6cgKpXNIGL84ZNCIXzs15pqt4dO3vkZ9mB2eMy1oGkK9oJpkRabmtoTwoKPYOgf5D3WIEYuYo4Gxxe3t7e3sL8GMlOYTonIsxsABH6Pq+6zyj2KIkSzF6tE+qDYlgc2p//dXLX/3q1z/7+edff/2m77npQgwGyRGVYFwQYmAEB2KC56bzMXrriAyhRJZOxCPEsgRrjSCH2HadryyAsDVSlMY5G9m3x/3p2JxO/fHQnA6NCJRlWVUVknShwyIJTKRns7Uybodk8Nfj80LiqCxKoVzEsfxbgIlgfR7tkV/x2OYkPZy2b4YeOZ7nD0jmqsLRapimjTR42Oba7HSxszcukTwJGTnLShOeTW/49xEyUVfzbOTc8HQeLcsYnO0FM9MixynJH8uXJqEoH2T53nwtM3KbvSVnQed1AUDW5yOtbsmjEBHpXIgvsRfOmi7mP8+tAzME083NZzLD2BmL5izE9yKmpXlOZ3vG0gwTLhxViJju4VTV/DtdxIhAACDAbISRAQABJEYIAQCssYKxi/Gu83TqPzwVYDoJWFCxKu2HN5suxqPA6fXdDqBjYTAtw5tjYwGDmLWJtpeVF4femWJVr57fYCfw5tgUb9/ppkQWQAKyqfi4CACN1o4M1AQwr1O80LJm4MUsvULdIMNQaBAjAIQQAE2MUYYw0UllCwAxxo7x2OdN7Pu+rmtj8de//vW//Jf/0vum7/vDcffll7/+6qvfF0VBaK0tmH3PLUcwxs1o8LvsV44Mf+etXbxlhrFL0IkIF7DrI97um314aw6G6dS1u77ZdUGscVaAuS7o0+snf++zT77/yUcfb8OmdpVhE0PwIgA9YBtx38vX7w7f3O7uj23b9WChKO2qsnVdrtdrIel7f2jC4XQEonK9YkEfQ6elfdR6eJ7qENWuDceQGAalLqI6AYZYCAJAk+SK0dERIwOCG0J/5xYlHPUWGUN+dK/7vsfRNpcLcjMyz4G5hC08wtkA4Hg8ElHbtsfjUZMGVTO8vtqowmZt6HuT85CUrQBZB2zFq+U0Uvu+9HMV+knAGKM1PyXLw89zxWWMH44xAqB2p9ChQggoEH2oq4I5RmZAQBm6RxBR07FkwVMpMMQP6s3QRiIxWE2VTBSq1FqW5dAxcswtVHVXc1ISeUom8mlUYZ75SUOq1Lk2RAg+ZtV0vPdd12jCYVp133ptU6lPJpTITQP58yGcT0wR1IIpOOb0jgfNAOp0KITe974NIRABc/C+X1UFc+zaYwi9RB98JxINUuiCEBpjy7Ks6zUbw4JlDA/3e2sVmMQ+hN6/e/fu/v6+Xu3rur6+vrm6uiqL2jhbGK1PE5mD974LkdkPZrLM5J0gSQAiopmZw4VaGMYNOa/WmMF7KsaYqiqKogDk0LbHECSyM8RlWZdlXZav3h2rqqrqom1bESaCqqosUdu2RKRe6KZpNqutMSaE4H2jhOCg8L7TxoMp0BfRAAxOPxkq7RORpk/ZZCUxSMysFWhA+TZgBEFhO7Mcp88zvnw+kqeiW3p+aZHVizOFMJd7MIs2zjHpUljoBX5xnnBG0stv3zNIumbiSJptLkYsvp0b1C+OM5tS/hVkMFxyxnzY2Ycc8tnGTbzz5w/I1lrCc8JPmhIRzeyvs1k9BrrvePSmNyrPytlQGmd4Fw5/EqBIBCAwEgOHEIqiaJrd7Zu33ndFUSi0hFGYus6X1aqoVkK+9X1gdRSUh719+fLlL375+eeff/Hq5etD08cgAiZGLOsbV5quC11gZGH1WzoKHL3vfGgQAdGSiZF7YXFG05VD3/UA4JypV86yIAqA8b65f7g9HvdN04TAwYsBV63KFFjvYx+jt7FCBAQEAkSDyAAWkWKMYyUrEdE2FZfbggNcOKRnu3YR/o89s6T3i29MJyJMKR0y0shPndxvkxM+IgpLsoDmmkBciPJpPjkeZtO7LD3M1jWnwUc0W1wEcOZzy19hsiQ3ItIcwhhjiBHjZIYTNfgS2JPHLH2booaWNP6eBc4Uttkzs1fMtg9mOuF0nHScX2SqNDYpfs8uYKbJp/HzRc04YT6f/I0yqqYZXp0/D+e0TM6UJcxnGPV3vYgVzswIQALAqovF4E0QFRsAbUDZM3DX354cWSSIGx+cwfXKfgTXLbldH2XXR89UFAbo1PrXp0acuS6kaqSuouHOoEVjwURXWpAgEFGGAixRwIhEFo1QRpKkCuLibJ1BIAf1dK/PWJGgZK1t26GVKxHFKH2QqjK9WsSHGoQIY+cV770xQxcvkSEdGhF731ZVgSyvXr36/PPPnUMk6bqurmsiUPUS0YfgEQnBLFEix4fHruX5+3e9LtIXTOlI7wze7wit5yhN28R30GIQZo4E1lbBt+TleuP+6KOPfvzpJ58+e3Zd26eblUEw3IXAp+7Ys+w6fnMMr4/hm7vTN3cHz4IIJUFZ2s2mKMvSFdaHcGq6+93p2HXl1U1Zr1rft4FbH0TQEXgAB0ST3GNQ0sQBXzU3GNLpr8RibDHSYMqRYe2VwGOgoEqRxhgUCaFPzd8Te0yMIgcXj839EtuHC4LK0mY3KQ+Wbwozt22bwiNVLXy4r+q61h4VWiVSA0pxNEvpOSMydGJ0TiST5mUs/iFDSVLFZzoXg+l9UnoTG1RHTfq5iKTWF+/e3d3d3XnvS1c4Y7VGmrEIkUPfImJRWGZpmgaGBNfJOCl4ShVCvamCu+Ke930I2sFYCxB451xZFoioHrmicGVZ6tmkAYS5MxMy0uaxQmRaHQAUtkguh5AljXddR66wBnX307YWNiKdeTVlUbv6Qh0h28fF+S4IdO4dYK3VdE5m7rqO2ArEGIIEj8ghRN81IfiH5lAVjmNHIghinUCEGL2Gd1VFWRelc66IwTkHZJ88sQpaZIHSAYvW/Lu7u3t4eNjtdk+fPru5uamrdVmWZem894gFM6/WVYxR0ylDCIKUEjhFxBhjiRBRsW6IKB5tZEQUus4ZrMpK40WJyFqyjvb3h7u7u7dvvuUQi8Ku6/rq6mq73vyX/8P/IBC//vrr//Af/uaLL764u7vt+94Sxai5oIU6ckMIp9Op7/ur66rrm6Y9IlFZlrr13vsYJcV5KEdFRAAyNZKcK4MmYwEHRAQyoJnhIoJIjHAuzZefK/nJiotrhmdLVp4wfmn5Tr8yYzEVHG0wg7EEcebHSMf/7BXp++Xb33OlcWYMazZ+Oj7z+0uB4+L4y2fyJcwezkW3fG55ThFMOKbMuKdeMxEt/SqEQGMhvuUhNxsnn+RsfDvtf5pGW+aMLUeegHpgEKkX1aAQ4hjVJsyIoCXIqqoS4Fff/v7+/t2L51e+byyx97HvgytWh73vQ1+stlfbD05t86svvvj1l7/5//6bN4fDYbfbhcBFURTVNkbpuwAgbcv748H38ebm6Xa7bftwOp04dgIxsicSa8QYjxgResRIAAToLKzKIhHS6dC2bXM8PZyaXd83LAEAEExRlcAkDH3s2TOSEJEpDA/9eyOAAcs4JSsAAGDIuywsqtTm0MsRSe/kZs7sd5eNr/mVE7JMgzPzN5pFMSG9lioHj0VQZsPqFisAE+akVyBMCG2JPNO3X1CBctqZzRMRL5brysk5X2z6drrSocmbLJLxYowWCBf8iqZ5NcvpJQtuGlYFjvzJYZenM8/neZEPwIKJQWadyXcZYMLfZqSdasksYXIRr3i8INMJRcQsCtKOUKXEiNIciEiLKs2WnCCmdYBm0INp2vYMsWcg/Y7HRLqsWAFh5IHzEhBEEDaRhjoGkQUxAgWGrvGv7zpDlQFcF72xYgm26+JDhh88fdL5h75r2Asa7BF2oefm2FPpSl8UvREw2EThxvvdsbnb3/nYCQjG3ggRAzEXMQZCRKQIOMSEPqo4iYgg0OImnovunuX7hNsxSuBYV6s38o7I+hARAY0h5qG5qxK7kDAoo+ahufNgmYZRsez73jAUBXnvmYUlqNYxCJcmji17EAE1wOm778tsu0XmptvHnp+Rz+z+ko1QFqEjItIDA0SkE2ArGvBoLIvhUDI+3VY/+d6nf/Lpiw+uN0825mpjnbjg2+BD23SHQ3sK8V0Xv3lov/z2/s2xOzEyiBMxFq7W7tn1uq4tEYWub5rmYb9n5tXmyljbhtCH6GMcOoyJkLFIRsPtVMofCYpoLPpFRMag9h5TfOCYpfOcKQjUEZSXJo4xwqjjJb1Ft0+yYlcXueiMhcrUSAQAQ0FUABwtCDCaC5OzSzJfEwBoa8TD/qGu69PppG05iqKoq3VRFJphONqgbdrKpAriWBcjASpNEkffOBF56nK+hFmqm65dS+B0Xdc0Tdd1+/vD4eEIyFVROlcwR5SIAG17AuCqLOuqCCG0Tez6LoSOyuscGjm4mFlVLx5rMuFob9UPSWBO9VcBQOej7kq9kuEyx399ABHTsHqnKiqFcGq2oWvs+16DFbUzZBgb0IMMXWrSTslYIUL3Kgf4yOMVmOcDAuK5boiIAJwTx5CHXFBEsQS+60LsXIFPyvW6rh528fCwC76zxMaZ4COBnsCo9nQRBBYCqKpKk14dmcKWFsl7H3qPbdu27XF/kMi+67fb7fX1td1uy0J9ZciM4ISIBCIzd2EQseJYms5pC1ZjzFhqleB8qq4Ka601zgKA992pbfu+DbE/HA4Pd3en5lS6Yl3X2+12VdVEYB199NEnf/7nf/7f//f/p4eHh9///qtf/OIXX/7681evXt3e3t7f73Qv+tYbY66urkI4aQlcgSESTfM5iRits6awttC0YUKLiNF4jSM150IVACAALEgAgiQECGC0TYVNG5NLcji1AefkzVNHU86Rc1abpAoy56qAM36xNHLrXZ6y9EzAmoi548rOOXX4HcyEkilFOUPMqXQ5Ts478jXmkBERwEflsyQNz36y9Jjlb3ls/otza14EaHwXMXPks8Fmyb6XK12+bvbtd4SzZEFo8VzadFBrL7yUECIAIhF23kOvT4avv/7dq29ffvjBTQihrEpm7rtYVWW9XvMx/OY3L//2p//T3/7s59+8ennq2n370Xq9rurnZGPbtsemI7JIVkSKoqrBoWk9x7uHvdYlQwoco7GxIESKiIwUywKPpyMLlkW93tRFUXRNryEHzcmE2DN7Y6Gu1wK+bdum9VGAowCQMcYWFlFijJ33JRUwVNZgjAaJRSaQx6TnoOqOF4qy4LTBbr53OVpmm/sHnI3LazZ+og6cXumZRLy5Q+niGxER8FyHJp8JM6fG9JiFDEDGVaYzH8TZixQ6+1OvGfLPVndxhNlKRYa55SwrLdnQvBD57EWzJefiCGR8Lz0wYwKcFRl6D1tIP08zvLwRZ0gOlwqI+dtxzPt9DFVyJDyP80if0tkE0sJnkMz3PePkc3VU36CS7iBe4PmNM8RIhDZb3WPQu3gZMAwsaBiZUQRZBIwQARRkUCBGjIRgywCxif7tvXe2R/aWPEBxXZeFM3VhPn52dWjgdIgPXfAW0VDAeO8b2RmEI0aUqzK01mHcn463h+Pr27chgkEI7C1aYMEghHgiKyIRhlZcjyESALB2mclOtCVN5aDT+yGEsixfvHjxu29eknHBN2q6JbSjPjjpaEGkLXwEhIhQZXEA1iYTZAyicc45hz4AgEHsxrg4MmT7PjBzDFHvvB9/Zlf+2MWlXVym/nmRh+R/UlalHDICtEKIhokimACWAI2ARDYRvv/i6U8+++hHHz95srHrAjYurp1vmuLUdMH3oQtHz7s+3u7bl+/239zeN+A8FRy7wuL1zebFs5ubbbkqS7JWoA/CXdchmc1mE0ECs+cYNZ2VhQXAMFgnKEhiCA0ZTMbBocjniBsQERGBkLDvtRGFtjAMyoRhIcrL2LMul/tznm+MCWPQYEawc1UwJ+fhGUKAc6QHXoosm+0ajnpRjFoDkrUr/SAQF0fnnDarSNU1jTk74njMRssZeIqgSy9SEkgh07OjJ2RX3/dN02gfjhijMYbIgVZ4DoElMAcCvL7Z3txclWXRdV0MXQx905wkOhnDNdOBmPv0aOzTqGVORKIxRiR2XYND5KqIRJGK2erzXUea+2etNcbltrYkrqu2jKOfMIGlPbVmbGUhcs4U2Gw21tqiPHfR0Of7LkT2ihh+vFSBT44+mWQfnGGbBgGAyEOUo1aUSUc/9x0AxOiBAJDb7mAtvXj+/P/yf/0/V4X73W+//Ol//Nuvf/fbd+/eHXYPt6eHdX0lIsgoIgapLEs0BsjsjyfnaFU6a21hSwPIIYYQVtfX+/2+bVsQOp2OyfV9fX09zjBaa41FaysAcDwp8677pHiScFWrESiZaBHRbr9vOi1ze2qaY+9bSwYRtuvNpl5dXW3W9coYIzH8s3/2z7que/r06U9+8uMf//jHf/RHP/zTP/1TQjkeDi9fvvzFL37185///Kuvvnr39q5t2xhj0zQAUBTWlQUi+tCF2FtTPH36lFxhTaHdLwFQnYQte1CNmYQEEBFhKJNDNOHliEIEZ4WQpmX9LsooMp6xM4qFzLUI02tGV4kvyCLkLGMoc4VnJuLMWMbs4fecCpDJTOnVsDhILh6Z0z8vF+7TReRiR1pUchrMjvAcIKN961w2QxY5hDFemNtsJnK+dJJz58/49rNKnO7ndoEcFBrSkK7EOB7LIcwXm0yPxhjAswqEiACkJ8TQhwAIkc+VZoBE5N27t7e3txpg6ZzzPkbufv6zX/3Hn37+V3/9ty9f33khQBOEQzSr1TMi07XiI4s4MoWIBK/NCn0I7GxZlKX3Hg1Uq9KHTntPWwOAwBJKZ6rartdPYgy+69++fn08HpumDV3PzGKeWlciWZau69sQO0So6xLRiFHIYx8DABOhLQqMiEiIAjChr6UAPeDkJZSb7UW6yZlEmO8LXmyw/siVBs+R5IxAi06kuYU4cfk0k4yKz5fg5ZA2JZclHSUCwanmlh6ccZUEhPTz/IIMPukBZpWq5s/n0BjmjKmR9rzvy+y9aY3vYUFmaiCDKUmmkdM+xjhhGrjggTPulxuGltPIF5jgMJsPnI27k1BzWThIL46fj/aeB2LWMzrBzWQ9S2BU/2ZbM7uDNNpTplPK53ARVt/xwlEtVTQeLhYENkggWhaYyBggAh9PTdifWgdSEpVUWIrGFc6Ym3r1fMPfFof77oGDoZWNwI2P2HoJe+paaau2Jozt/f7h7Z73J4gWEIGADYJoggeaE9C5mCfkbEFvEqStxGHJA6BGS8pFOJz5EshmtXrxgqy1gAhCxlofAqBujQFgQSREEgRAA2YEspAM4Q0iGGK0SIUpYu+7U0MrF71njt73KkEygzVj5B5NckpzVH9PjjpkVJCv9LHnZ8ufEUI+bHp+RlyIWGPFAK1IzwLI1joSFIbnT5/+6LNP/vizD57WtCnj1cY5G9rTw+GI+92ub1sfut3xcN/6r+/2X3x9u/ckdcUgIcZiVTx9evP0yXXhqCxLStUXha0ry7qKMbKM4RXMLEJRiIUAYSwrr3Us0oRTwZXUgJGIUkTYlKkawJgFSQ4N68LQbSKoR0KVwFyHCVknsAS3WRgIjoLlQO84D/qXTBfFrAG9lvWHaRU9fZe6CrXuqPe+KzpCq9HIq9VKF07kR057rgiiVxpWo/5yi3yM0bkiIYmyplT3RWNQ84KrANC2rX7oug4iqzHW+66uivV6/ezZM0N4Oh2Er7qu2+8fGIZ1mbFvBw5FKd3o0R3aMh2Px6ZpisJWVaU6MCLWda3v8t6n8qeppZAxpus6zIT5lDSYeg8mnNcf+s6n9obWGq2goxvHzG3bKnA41R1l7Pqmbduu65JTUeTctgTG40NfpMmVOAjbZ8Uvxjx38ZyOqLl8XdeJsIDvuubJ05tnz6///V/95Z//b/7RX/zFP/6v/6v/HUS+u7v99Rdf/OxnP/ur/+XztvdItFqtjHG97zTeU7uVGBh6TRky6kOOBM45LdR5Oran0xHGc1Pr4jjnXGHMUEumJ7dKCK8WBxRRNEhKNcpALzHG2DWn02m32zVNE0VJjwWiK21dV1XpCjuYKgwKGFMUxVdfffWb3/z6b//2P4rEq6ur73//+8+f3vyjf/SPfvKTn/zT/+6/+6cA0Mfj4fCzn/3sb/7mb379m5++fv369vYWCNU9qBcIoXVEhGC0zaNIFCatSHTmkAIADKIdNYegGxERCQAWAKw1RrSXSIww6JKIiIYo5wIyWo5773O3j4zWDhhlWR7Jm4isc0spB0aLS054MNp9Y1AWM1RNHREL7Bj7nl+oupmwCAtHEAaNOxEY8kF5QP3R2n6ugDRj9GnwOPYMnLG2nDGlpNFEdYN8j2xNcV6+ndhXzruiEDbGWqsEjGcR/Kwti6AIMKtly4pIjMI8mfl4qkG+rpEPI4CN3NFYmDgdgeMPJyb5cUWzwjOpgZik01Oyqj9EpB+ZmTkmWI03A7MGGwgiMAftkaKTMcZYO/QmsoQStTUFRmZjC2Ptoe2wfvKqKV8eV4fiT3au+5//8pe//vXvf/rzz3/729+zGOFrkRsUUC85OsQ+iGFrrUP0IYTYA7B1CKFHhsIIIhDDqjBEINIw7owbYFLX9Xq9FZG2bb99/aZpmuPxqJqwMQbICBKZHTMCAyJacgatmi3VdaEIYQEQLTAAAxivCD38fKAdgijqOYPRPEbaxgCC8ADhXKhNikR+dmKmASYJfvxr2Ij8uEWwuh3LK3f05a+e4sZUUxoRIFFxGiHNDQAAlSolhkE5TiUJS8B49vyDMYrwHKOua2LmREQEI+ATu8BpKuPIEM6TgdF6h4gsQwNAEGFhBJCpODjsgoFRAwAaean2vxsgi4g6Koi7pHirpRAzgTVBDxGZxRjSZUpmSfU+NY4fOJ6ObIYJobaUSxyJJc1nEpMZ/aRvavrsjBt0YAYUVMcsCpoBZxlBu0KOU80ayuktRaskUZ0ZBAAi+rEIW5qhIXLWqp07oVBivEQoomgJWe9BNlaIzjxZGId2jzDoxsYajEYhbK0NsTd0jukaQGrIe69HQQ4HWPR7/IPXqehQXw/koByOT5IAcJQANLq1fCCAEmhfUXfqH9gewXl3ZTZP1/WT5y9W26478fHV3XHX9A8RYjTGFKWtAeVI8GUff/euRS0TKlsm4RUzc2CV5wCNQWsBwGLeHjZX7QYi0tlSWp0Z8rcRRKObAIBZrKW+jwIMwjFEIrKGQbwx5el0+vWXvy3Lcr9rLJpVuXrYH4uiAgCtsDrmUkZRDzMCICCB0NBiR0BMYUKIbd/bsupDXJmNb3tjnDPOGRujVEXFEcBA8CoLqq+GBMakAtF/5x7OXMFYXjmbyu8nNpV/lXAgfzjnOelKv/K9EUK2jAYYo0hvAdaGn5XlHz+rP13Rk7q4unJo4rHr9/vj6fd3AeAU4W3jvznw7x6aV/t+hxWuDEiAcHxS07NN+WS9fvrkuUMoQPZNf+jibRtenvz2xTNfbXogBrEIRMTAMQoQBbKAhN4POE9kBuARA/c+qOTGzCBI2vA2RmtM1/XMbC1pKrsAUGENU9M0RMCsFSY1rNdI9KHrkaUwNggPYTWIMUZnrWSQGVmZuieToX8waqsAQwv3AMC5NXkSmQBAC97ODjsACDEAorEWELu+92Oxx3W3dgenQaTq/VPdpnaVQXSOABhBgCOHEJgMxlY8cK+aQxz78nXMzjmDBDEKRJUqSZFBPIgHEDAQQPbNaXfYg+yjeOiFIlhrQbjvuui7cnXTsdsdO+uIIfSy6+VbKvcIq1GZNMwQg0BkD9SIaMKYMUZjs51zaG0Poe8P1tqiKIhM2/Z9H1SzTTqzgsg5N2qJNjXC1RYao3hgcsQOoT+dGmMo8IAq0MPhdEwpczgK+al+DBFx9IpUXdcpb6exNwaPp48QEqnEQnbI4YzMUVt6pboyzKHrPHMwFq21PkTvvaG2a9oY4ycfffxwd4ziP3y6LVl++pd/9euf/u1ms3r+4un3vvfpp59+8ic/+aN/9F/+4//xf4z73enf/eW//1f/+i/f3B3Kau0MHtumMMgSRNgYUxiyxqEQMwPb2tUFFd772tVd8CGE/X5XVEPhIiQQFokKW6wlCIsxhkM4NAcictYGP1jANZhWCxF579u27ZtTCEFr3qBAZE/GbLdbY6iqqqvrTekKAraFY8T21OwPJxY0tgyREelhf/irv/r3RPQ//X/+1fX19UcfffTJJ5/+4LPv/fCHP/zxT/70z//ivzi8/e0//+f//K//+q91F8qyLOvqcDiUpQUQ5l5pCgYJR0x4CsP5O7QaBgA0wLFzzpARpta4aB0UhbOW7HvOxVwQTASc31leSR/IMTWNljsh85LBOUdOBpv8h/kDM8lvNnhWXSoTXKYznK3xPWvJ55aMKwoBzambzQFwLoIsT6YluBJIaVHoZbZwyTwz6aVn4Wlq8jyzaZiIyxdPwXSdJekpZB7zHKaHE1tPeJL/HDOVO/2Qx/zjOHYIVTmPx3xrIooBI3c/++nP2+5ffP7Fbz//1W99wKaLzlaqg4kIChINNa+JY4y992gdWWeQTO/98dit1zWSKAICMnPwPjIzY9hut1dXV1qG63A4vHr16u3bt3nbpWQTGYE8NzMv4bmE4QxiyQ88oxQdfymy5NuXHp7RSBpfxly+BOREmMuZ5Puek1W+hFw8SvEt+TTewxYeW0s+pfReHoqIXBhQREvvXKYmnnbaPL9uyoLyf3MndvauOcHOYMuTyOcLa/yD0OAxGye/mVNlvtGzsAJ4L2otX5ru5FEYMMe3+QzfwxVnL5XRupS09PRAUs8ugkWEZ/fH6T26vzN80/s0RoLNpo2jDjvfpu+mB06g8Xd5vg8syE3LOyN3e3O9Krfr2lqLSEIObAH2hAIs6DkGJgkBDZEMxZZYMQRksqLF7l+83rNrM4RERF3WyBBQIzy1CaGgERH1AACS6gDKmQeS0v51wCKaf3KBEfGoyuYTxjEja/RLAy7mNp335TCc9y82//Yir5s9A5eo6eJbREQQmHJdcWDgZV2LtV7AiwQxINJFOfXhofM94EPnv304fv1u9/p4OkUQa/rOI0WLuN2snl1fb6rSCEOMnqhp+/v73Zs3t8djc/OJrVZ1zwz/P+r+tNeWJDkQxMzMPSLOcpe35l5ZK1lVZJNFdmMWcUYajNAtjloSZwYQ9A8k/Zr+CdKHkT5KHwQIkjCYRg80aqlbgDAim83mWkxW5frW++6955xY3M30wSL8WLjHOfmyqtjiOBIv48bxcDe3zc3czc0nB1hEiFAYBTjGWLlc8+tVkTyF6BOR8HG/CzgQUdM0VTVSOe16ZRpJi0ZFKioIj7PVSMF5PoIM4RnyzQQ6I4cNu0jMkOmrU5RN6vHu7i4l+UiX2ldVtfON9346Z3WcxDcxElHb903TJERVVRWFq6qqnEdEN6bVBQAYOIYQ2ra/3+3uD/v73WG327Vt64QRvfPooXLoBCIhsbjDHu7eBA57RygS2zb0nePYDLFPcs0R1O9yjg67vXPDeAsoOCIabyH3I2xp800PTN7cvKqqSg9SIqJ6I1rBe484ZpVUMk2RsYjTPoSeEQ0h6MKxTBHCycm0O/Zgdghh+jVtF+lukL2GgYBSJgS9xSFGEwmCMTHkiNth6Lou6oUc8f7q6qpZVU+ePtqs6r/6yz/frtY//PVf+8lv/+YwdM+fPfv8809/+tO/JML1er29WP/wB7/18NE7IYTtdtt2QvWKBRkhhF6PNCeOmgYybtU0TRNC8H2niYtUy4UQ0B/zCQFA27aTTSIxxrqu9QaIZKnGGIdh3C0cK3jvPG70TKwEJFpvGua4blar1cqTg3GPBJ1zutcqEquqGpMEE3lPIfSvXr16/fr1n/3Zn21Xm0ePHr333gePHj1qmvjp5188ePTYe397extibIC228u26xARwCGhAyAcpYy8xMgxxin16LgNQ3qRp2OklXNSN+q/y4JDaA2FtOcwahpzEKg0mxKj202DtCuYxF5/sgarNYt1GSMzcbBYiT+lKbA4bYVzyzWbn6AwgDKEWFVlmsXMnMJpWR3MDMdLYaVWo+H8IKX+XsIwV6P5VJcgSdUWP09QGVFcuJqs1MLpc/smYdJuqOqDTi3JYbY4xymkTeYWsH2QefG+3reHP/mTP/3kZ1++en2/3w3b7aPt5qrrIgIJAgrqWiQiiIBAV9WV954ldH3LHIng4nKFqBcEjScuEMk5BIBm06zX6xDCl19++cUXX9zd3QFAMoBUEVsMEyLkYS8J+CNLGPZYNlysCWXlqzyGatQZ2mI/t8SFSb7EnKBI7digkewhETERKGMtixDLA3YUlmfGPycELAva/I6ZqRFG9JZ5bHenZN8yasKJTFkoMw2Acy/C4HNBBEr0QiFlqVhFl0r6M1kw2Yjm6gWSzrSRBeWQoZi5S9lPD+W4ss/nIvkNfMJFwKTIOZHpEDALSWb4y6HOPO0kp170EyIadzsN/DBieAbq2P43dAjLAcJZn5kFI8BhGNx9cMyeCAjvusF7/+xufz/AABAIIqEARRh3aM90itM646h/5mf7M4b52oGIeR4JoweKRIZhOBwOfQx15QFGUzWZeqwxEAKIggIMMu6VkjslDtl0o70554g84mDhEUEAGTO6z0E+xc9nxpgxm2X7DNTyIZORpefEh5NgIjBjG+Ku7e6bunIY7/eM3aHv7tr4Ruju0D17c/fZqzdf7vZ3AYLzgA4cO6BVQ4+vrz545/GDq02NDBK7QPe7w/PXNy9ubroB6tVmu73Afhg4QuQpewdwhAjIkcHNchCI3mYEx5uy05BH/Hu9etsxB11yretagKeQwjg5h2MgiV7RLiLoHZq1sJEVTxwdL9FYYjg1RdNtb3ZuKpWhVe9ivMFUX69noCmKUh1CgvHS+XSXulbWfTN1DBQh473coJkkHRF5IoejJzBeQ3+3u727u9sf+r4P4030W0Jw6BE9MIoEYAdYHXbDG+x3d4GZCYWZuw6YG/QOERCRI/d9Pwwa6SB9DCQwcHTBeSTCcae045A0m6rT1Wq1XjeaTVSvJteJTH0zdWuJqKqalAnGRsbSdFOFTi5ucghBJCISosZjVN6PloPO+4hR7Sczj6RsqCMzzMOUILIAVN7HGJnHWzfS594TM+oG5BD6vu8Qsa79atP4iuIQauff++63b1+9RJAnT548uLp88PDKI9ze3dzd3d3cvH727Nnt7c0//+f/HMGxeF9t6vpiiMwMF+vN/QGYSU/PjlBpjNR0H5+iRaNiEn6UCe1UonKBiOow9+3QVePB0ZGfw5jUR2OY13VFRCH2AuCc0zCKuvHD0DdVXdc1AY77qCIisl43zmHbhshBRBwCIjbNVu+W2Gwu1nWjPtF+f9+2bT/cAUDTNJEByXvyAgRIVWODFqcrKJnjsBMRh+iU/8ERkfPY9y0Qk5O6cZeXmwcPL7bbdVVVPjMQk3hnc7kWZnbeZ/UzrYrzqHFbLbH1og2xaLuk1s6USQ/mF3Dh3ELNerG6snxpv8oG6JwjM4vPQUWOxxg2q8LK+dL+aiqUrunMDi5DZezaPJoCc90q83vGEHUOBlgiYglqQm+qnKgJZobA+RXYqabFv/08IaH8lab0viHybncIsdpurrfrSsAB1YSDhu8xcIwsEFmiE4fEkdvQCQA4j7XTQ9g8DINAJILK13XtR6VA8OzFZ5988snLly8Ph4OuvTFz27YXFxcJe8mgoTEUdoaipEqUUouYFFOy9xmZ0DjpZVM4L2Im+7QKo0WDRmRuDeAUs1dSOR2xQHOh7SKoFuasWEJbamaNlFJmsWRby77NJDrrOqs5MtUcdSUk9l9EBDzngFlhz361o84GsljT1tel1gy2NLNm4pwaKWlRDhDmPAYFUSyHLAKcFdua/SSFNNuXJQUXwZhhb/YBpRfTLIBp+hj1zAl9lVEAS7l9uyLFDuEpyo5weieAkXkXBtm1DG8Ofby62TPz69u752/29wEGpAieiRDA4RFvPIFKuqyJ03LJnPdswWLpoQCS7ellKeqkiszQ9/3u/kCXY2aOqvKA1PeBfI3IiICot8giTGmiF/GKiACjthRBM+MT0RgZxBpPeLwgochWjcesBCUPn7IK7IRSKpYzzLnYkRVS8z6ZjLqAgQHwxes3X17d1OQDR9qFPh4Y474Nzwd8cXv47OXNy13boad1jeh6jnW9cjLUjq/W60dXl1cNVcLC4UXHL+7uX9/tuwDgPJIH8lTJmmrNmx9CiFE4wjDEjkPSNmYiRpFjBFaMEQS9H0+pVa723ovErhuDrlliCBqSM6QbJiaWYN0h5CkUGYx9leYDmBt4S8wwVkhxHPqn+iTqw/CUaGCROmmYiexiCkyCoPGNKeWm9x6FnHP7/V4v7ktXsQ3TxYD6Rl0CIpIxNB8QsXLO03hb4DAM+317f3/fdh2Rr6pqs9kgYkOXiDhmRuPI7Al9Xa36oSWsw8AhTrGX7IiqCIzohCWEcd8MEUMIKQtOjFFIiAQicIS27xQwmGytvu/3+9GFTmP33q/W9Wq1qnyjLqKmCRUZjwuZrKSOx4sK4jAMTe2SF627W4nKWiGdlhxzQUU+BvU40vrplARN4f6TgEvfHfTs5WREqecfttutJlxhCTEGZm6aqq7r7bYK/QAADx5c/eAH3293u59/8tf3t28+eu9phcDMTVVvHz/94J13fu37P2DmIcgnn3z613/zadchuPXd/eG+7RTaGGFcxUIHoMfhXIVHxwkR9cBk0zQalKGYsXMTRxEBImSGoQsd9yLSdV3KkuhxjHfQC+LX11dN0whEDWaOPERmX5FzpKdXZDoBzhxiHDQ1kQA39aqp/Gq1Wq/XDx486LpO79usqLKKf7W5UB7o+55cRUSBkTXPtiQ7AWMEpdv1lQshMItzA6EIBSX2o8fb9aa5vNpcXG0vL9fbi5We3vLW0M80o90uKIUzU5enZib7lf03PSTaJFVeahNbM2szMZ81FHAyfO3eI8y1jJxwUbJhJss4FV1uKPGgbFaqxAzybI7B3MY9gjTVnAHJPIsyheJ2R8xcaByT5p/X1+khC0ktrZDsz3THYGok7SBlveiblJymRIKdWtJ47+53db1ypDnocBhi2w7kgcglEiFFACYQgRj5sFqt9HDmod0DQF3XVeWalXPOr9fr1boZhuHly5fPnj27u7vbHW50qri8vNRNf+fcxcVFNCE6MHmDRCRwPElvUYSImWG0iO1U7ObPouwsikOqn7BUdqQfplMEdhUgo5TVjLq4uLh/a9h7FsRou7Nzs33WKhY2ABCxyzH6pkQdlwlXy5J6tMsQMOfqRb00hcAdmW3UBicsxVQt64VN9tFMY2Sf65/Z/iEWEQ0l/JbWMNF0rjTyNTX7vNiyJb19ztTyKYTb1tLnPF1HuQg2zNW+VaGWbU45bdOOdzodOiIhctRPZ+2P1fINtF/AIfymhQErV6ETjrCLIdwf9kNs7nYhhK4P+37oAcF5ISeAjDKd6RdORJxW6hJbotk2LUX+vJ4REZiu5RBJXtxIshgEJAKIgMaWa4ggV1XlqAogpFcYzwg9Am76lWwtDEZ6af/H2VmmLLtHH3U2EWQuq+jwYC7Fi+Jsx5vJQsaBpbbEGQxfW6YzzwJIeiAZI9Kbtv/sxWtm3l1fIsYhHCLJ3f7u0+d8e7d7te9aBq4r9FXPEoQ9jCe6vKNNU9cOuN/HobvZ4Ys3+zf7NgBF9Id2OHQ9eV/5GiquIod+iFEiA8AQhDUrC8yzuGWRSgiUNnNour1QJ+4Qwm6/06PyXXeYrp0AxOlMVAxBjhdOHNd8DQJlnsPTzlxnqKbguZS73xxaTp9kf6oGOMXwqUcRUU8GEQkcER0OB902nC619yl1ikZKp/WIwAOixhpRrTtpoFfkDfv9/nA4EPqLq83l5WVVVYzQ7Vs33WfIDBKnm3H6ynmUICSqFWOMgwAI1uPlK8LoqHbrkQnpqME0DkdEhMNms0lKcgpW4uQ8Jx3b9Ye2cyq26cJ6zYTpnNtsLtSdS8t2mgVDRAh92kxOm7SKljSrpiUbIoLI6eLBpPCFSO26o5Aw6w7q/nA7OYQzE+LN3e39/W3XHYjIV44IgNbV0Lv7Hgk+ev+DH/76r73/7tPh8IP/9v/zL198+UXz419Hlgpgvbmom0qjTAHAbRr/vYoj/ps/++v727ZabR8+eBAB+f4WACBOU8ykrtAU55xDaJoGANq+U/OvjyHtc4pI7Ef/MA5hv98rivRk5jAMCRsagisi21WjzrnmT4lMQ+xx3CvSzUa1ASRG0DWL6wdX6/X6+vKqaUbardfrvu9FRIImatGY4cp7v+/uqqpyrmrWXt3+IQyVr2OMTELo1DnxfrTj0d0iM6H4qlmtqs3m4uLiotmsN5vV9fXlw0fX63UNyG2/3+12bbv3pURZAUaTxCLxq5Xnxc+zaf6M6OqfdnMjNYKYzzGnILQ9TvrweP91BsPifHCmQnIsYT4tWVU1BwnLX6Xwxyxy7MCzmQ9N4FOqYEsSWjEzvUUvIsJ8/RWPHuOCG1xqWzSOIhz1cn7hWBrjeX2dvrKLXkqv1BqOS1lRRAYE5yqOApGcDyC1bs13g65iCjkmECRBFIAIGAS6EPvIvXOy3qwuLy81P/WrV6+++PLT29tbveJTB/X06VNdbtE5UhfGNPgEC0OfmZHEXOV5tOaJSGQBmYgzKqeflCJ273EkGTKYldRF8bHtpzp2q9Z+mHrXNa2qOiZytPRCRNsIFFKThmZ3pE+VuaKYUT9r2cq+9mPQFRHzWxCxcPAs753B0hlQsy5gzqtWssquFwduxRkM9jIgj9E1c8N0EVep5SOrFKC+zdCy5+Ny71KbZbF1Snm3kwUWChyNsUh01BslvwGA3R6EIyanrh0AggCEKItXF2qrRx2oXwHACU44xSEqwNmbc+wEHAERgVzNgG2MfRvdoNeCU3Q1Iwk5TkI2n1MQUQpWL8khp2fGkkURWeVIRGyEtjrMHCMiII7J7tW1W602rvLt7uD16gLkMfKag4wqTdR1RUpIZREEjNNJhFFeOOUnFGHmKTP+AEKIAqN5VCgTzCfQEgMnSVCITHp5SrLKBkv0Ks4F1SFMeokEJQAMgF+8fnO3373ePWwqDDIEHl68fvXVPUWB4KrBY0Snt7ygr7qhrzyK6KEmLxCGLvSH4faAt4duP0ggP4jsh6Eb4qZZxRh9ReQceCFi0sSGUkEMME8JRkQ4hQ6ODgAQurQsGA6HThN1DsPQD13btm3bxng8EkYEGjiqFjAYyYXJoqD59KTPzGzvAASjWPjE8RmdVdX9SKOw7H1K5Za61FJKpgR7zsy/OrMfDgdE1FVjNyVEma5CAU3WJSIgse05hIACzMdNMOfRIQELh8ggQ7j11Xq13tZ1Beg4RBGPiF3HRDQM0ldeRIYQsRMiIr+V6dK/BHbKVjDhkOq61mpV1SjYUxqt0XM7RoohhxD0Zr8QQjv0OhBE1LsciWi1umuatS580xRSqxuJ9+1eY2UtAhUD3nvnPPoqYdg5BzAmD1M3L0YWiRJC7SiCxDieR01XIwYe9AqNlFtY84geDodhGERis6qbRpHPMQ64xYpcXdcPH14T4HvvPq2df/7VFw4BObJ61wASQgXUNM2+79d1c315tdlsDoe7GEIUJ2o0IgBAjIMIMgLgmCROKa5bpozjSkclrHvjfQyWpYe2CyFIZL15UvE5DENVVTJd/ein8Nqu62LfhRAePnzoa1dVlfM1BGV+ANHcXYQSZUrJ+fHHHz+8e1hXFdF4LnEYut1u13VdRVVVVU2z0hW0SZFC3wcA8HWVSCbAVe0BQAgQZyfRiOLFur68vHz48PH19fV2c7ndbuv1artdi8ih3b14+cXd/Zu+13Sy7EvDLtOPST5x6e6EVMcKsF1pyFWJWeHOdPT05ricmbSJFNbSYoNawe7plcOxjo0d7+Kkm+qkNQM7xmzanpo6jtduziziOcOb7cIo0HxzwMKWbLJTdIHJRy2/SjaSra86OXWXcJsp6EQgu5PGJsw4MwTThzQd4LZbkdl2GRguqrYrZh6GiONFOiLAw9BVtaZaHhwyOQaMIANzqCppuzs9ZPzuu+9dX18fDodXr1794R/+leYa1mmgqvSUoLx48WK1Wq1WK5Xnruvqur64uEjZX8HMW8xcOQfFWaaJB3LOtC/RWMO2QvZyJNjceYbThedHyCye7VSK086tPdsJc7bPSIDTckPGV2V3luJWFmzjNsQF4CjaVnzSXl0al8h4EbPpNGH7yM8y37csOdCWTKElEoy/wlFAbC+lpEywOnUCJu+F6JiJAEb7W0afQgDQZKSwwpWJs2UPi+oMvRbnpR4rn0uclPrwLcsiJBb/Fs+2wsRXxwVpaywiEAgB5Pnp9fQU4jG7FU7LMTDXVwaenCG/0QBTsZDD14skIwMjCoAgRb11TaY1ON1TAmEQQNFUM0cOpNEbRETBo++aqG77/VrNAIVs6itNQWqwMTreMcjQ6xEyrptmtVrtdocji06rECKCo4wce5kUnUxd6A68iDBECIFjGDdsj66p+vbg1ILRlSg4rgGkddCvcdWWabB08rYU4QxRZc1My4kII2haX0TSnJlRMAgNKDeH9u4AbQREiRLEwZu7/gA1eofOC1IUiCyI6AiBAYj6Lt7d79/cHwYn7a4bDt2zW77vYicYhAJQFIwMfd8TiMMKp00eQgIhRpJAanwnQjvnpnNSbtT2gpzSRYao96N1Xdf3vV4H573XrUONJm3byMwptz7MjkUsrJGJCINEYWap0GWT2ljB7C6ObGdurkrTffZJZstZApVvsucRTnMZqZrU+q3a9G66qn61WmmGT/TsvXfkhZmjjLcLBIHpVIWItPv7/eF+3NWkg69QoCH0AhGJUe+0AA8AkcF5BEAZc/r7MIx4VgiPiehhPPavW21UedC8qCI4zdqIx2TjmgOPJTBDiu6pqmrwA477exwCqlfWdV1VqeNXq7ep6Vidc044ZWRN7SgkGYbHLWKQ0A8hhLbvku83DAMRhRC6MKgRla4qidKHEIYh6htlQhGp65oIvPchBERhDiJVCGFb174mT7hdrytHlb/46MP3nz378s2bN1eXF5tmRYigudljHHphka7t49BvV+v+Eg6DRAYhX9dI4kPoQ9DNofGAZU01zfL/hzF6Ngbd92uHXkeqscfDfowO1T1J50hhhjhGBvF0MYluKnT7HQC0bbvaNFdXV+tNY2cN78k7H3pFWh85eKqaakVE/dDGGJum1nzOjW+YOUbu+179D71XcLvd6rHe0HfgfV2hdz7GAYkBmBCd181hV1XOOffuu99dr9dXVw8uLi4c6TZm13b9Z5//tQoZObi83L733nfee++9q+vLmUNobaNy+hwHNje8bM30Z7L8UrWsDiKmdKBJtkeGo1xrL5pK6df0E5nbNlX20k/GEj0CaaGyY8xeJi2WjldlXaevRATx6Dyk5X9rxmUYK0dhZ6D0e1bHDpOmkkZkFTHoxDxBmNSrfgKQUweWvILyvf0kUTC1nwXFWdyWY09bYWkIFgPMfH9/W9crV63Wq7qqiCN7T3UNRBJkENAAnAASInch9CL+gw/ff/fdd4no5ubmr376Z5999tmLF68ePHgQYj+EAZAFHDPrzQHr9VpE2rZVNaGqUG//TMi0u4VEarDOaAfFyVjLHgVBc6fXvrTsZKmj3FVi1XKXbSS5OoncE56hbME2laTJ8pIldGJCC20iXAa5hQfmXGQ51moee+QJVftPn+C0YZ5BJWYx4mtXuBI8JcwwrRBnXZxZ0HHk0sI2GHlMCyU8DxZIF8GmXjKBylUcjFGsCa0yvTyid/pVRNxbO4SZSpkBaQITzpTEITKFM9n9Rp42hRJdLP9kUfdny3gzjS5vW6pZSOwAp8st81XF49rYWxfEfIfwfKkcjqtqrPCMG/KsYdKgd1YxouZwQo6jIhIQ4SO0aJKFICIBxNO+0OL7RF+azvIBgF4Di0gpa0hiN00ocmj7ZtWtVhvdQlHrJ0xnXEUEgUUMO8238gAAkEWEIxOBCAOCEZApgx8jQ+LZ0dtdHFqm3KyyOkWCxFRWphYRlbGHFcnFlmfvkWEKdBeEgUHQVQj3zPv9fohhfbEayGFTM3PkGFEAHXpi5jiEhlwUbLv41eu7L17dXlZ+2HdDF5+/ubvrhp4hAkYAQDfEEO7DqqmayhGgqCZHgooiYIqJs+KclO24LROFYZxrukOrp6S894DSdYyIdV1X1UY/bNs9TzkkwcSwwFG4ZqwFAFEYpk1gT5V15Cw5MuWcZiXrIibIZ/e8Ge1hLUHLCfZfWyHGAREjjwGNkyFHSKQHvXSk+0Ol6UarxjdNU1crRySMIupJizA3TVM733WHu/u7rusqT03TvL6/2Ww2b17f6XYcT/fp1XXNzLoPhpMJWlVVCIcpqc94zYarK+echq1OAw8hCACEMIg4771vPFXEzDje5AQDR0B2vvLTPc8AIIh1XSta1JZmAWaUabEbcY9jntKNWsjvPHqouEqxuylOKh0otQsBKKzZpw5dqza8BljpsbouDGlvcPwKx2OuPAVFKJWZ2XtPDmOMMQ4auMTMu93w4PLq6dOnH330URz6/e2bDz98/0//5F8PfddU16t1PXT9MAQS8L5yzrVdd3Gx2W63urHmq40QHfp+s90SBCIC6iSOvOeoWtWNjkUH2IVBd1+7oR8XSsLotequ5rAfTyQqinSXtXZBRDRNUbL2OUYNP767uxuGoWp8COERPvC107viCIGIADnG2HVjatNh6EWwqhoRIYxN08QYeQg0bjzg8SpAXTocusbRulnHOITQe+987WJkXxE49BWtVvVms7q8vNxsV03TrJpeGXIYurv2TdcNIoLOXV5eXFxuHj169OjRg6vra6iUbQavnr0enTpuQYooT7A5IZM0QvJAjvmIibruePg1KW5mTnKi9dNPx0OZE/9pp+VKkvITmdOAVuNP3JbfdmCtNKt9rHoCs1mvBkcChpmdc03TJLNGt4bVYYhxyFSw9sgx6Dyepk+toPy0CHBCGsxX4NLZPOuCqiUhs5kexVw+m3g90YuQYLqPy+4GhBCIfIZkAHCOElnF+PZ2Q88adml5D+b3TKbPjxOoUdMKQGISTXemv2Y+AwCIRCJg7rt+R1gjVUOIIYSmqQC73f5N5fHJ06uHD59Wni4vL9/c3P35n//ZV199dXNzo+p4taq77kAETVOpiZZAZXP+IXUqUxbchPY08K7rHFVJBNKMJSIp8MmOOtktE/mO93PAPGBSNYvzo4jJ3KahKSQVpwkgE8xs9rUbKQkAmDxwW9/CbF1frZCxlohkH84pNeZHmXECABHZi4btqKEwwtRHtkA6p92N9r2Nf07PSXZSajV1URzOXIhkZMi0Ops+H+dFOua7sybLIp4TQqx6xMlBsvrn+Dkdb1sRcyGqVU3TWZHj1lkauBgv2hJFjLteEt1KqLW6mBlxlhIg6ZPVaq1wHrMFzCPTsphnMOrOLu5YTZhGioiaSMDqExlDCrMA+BEVqooTcUWAjymsZ16uiKgTMl0RaPot3I4MUfbN+H5O9GwsCyX2hChAgsCCIhGcn8w4RGBE0gPPIAMwiszkFHVvEEbFYYFx0+RrIUlCnTjZMow2NHGR6jc30VGpIHoIjQi8rw6Htm37EJiIvv3t767X29evbiKI9P10xRkwAwE6cjGGru8225VAZAZ1WkWOASAxDsyxbhqFSsMuJOLF9mq/awGgqhpNveB9fbyXGNSlzBGeob3kfy3lwg0UUpAkJanZnOhLJB5rOtJQNBIYuQlJBCIK1fUgctcHcRVU/hBAqAIOgrozzAIsERGAPMbA0Vf1xebNfvizv/n8W++8g4w3r3f3h+G+D+B8EIxIQ4z397vNanU4BJQhht5776gi78ChY9dsNnr8YbwUzkxMMOnDGFhwuiogRJoOaOi9BRoHqLEzGvKnIw2xjzweRB/10pRBJCEkyvGIAZlUgtlkJMbkI7NQrvO+uhwZIez8OMP/UhboVKw9o//6eRJEmXZKVTWpia/d6QUDsB+Vm6Oq8t5RVU3zSN/L+uKSOcTQC/e3N4f1ZgWyev7VXXfAzWajKcq3260e4M+MIkTsXRTqun7cYKiqyldUiydy63XT933XHWKM5B15x8zoSAKn25vnUTac1HWidYyRnAOAaroAHBG9ZxEh9OMW8ejt32vMZNdtYLp1XffEdrudOjZt2+52O/WXNGx1GAbg2E9OlEz5V0cKUtqZdmn+VcYb/VVjPCBi13VDwKqqnEOtUFWVd5vb+91PP/mb//K/+q9+9IPvv//eOwCw3+/37W5zcdEedhKZOdb1KvZD13XVqt7t901TKYRXj95pW+kjiIBzlV9XjaxijMKsO2x9P6hQKMyHvru/v7+/vxeEtm37vmcAzSirlgwIVVVV115dQe99RXpP43A0KtSXUVoLd10nIkCrw+Fwd+fW21VV+dW6Bo5t2+53d72GnjpwjroWEV3fB0RXVX7oox7YTpqKOUpEInKevK/6/tA0DQAz4rpZMQxE8eKiIceXl5vHTx5eP7yom4o5DEMXghKw7/ueyDeb9cNHD9995/2HDx8+fuedcS1vPA6sZh76zIBY1ImZuWOE8/gmpWkCo3nB6OtMUK35YrWGHmaY2wRIZsU9UzRpHct+sqgsLBjWcrIWTFafzZUYidFFBGC2HXRsc34nhK2TKaYMzwY/eaxLhiLnZvsn6dfkz+R4plkjSblnXvdsFMbySHRcWCmcNI59j2Yt4BT+LfJhmm+sE5JAJaJhaIkoxoDYV86Tc4LMESIPu0Os6+p73/3oe9//+MH1xbNnn3/yySf/4l/8C5V2ZZiUVazreovP4/OJJWe2YWx2M4ePRwrTeMV4BTC5c3Z0tgWLZyoCO2XyfDKoxGxvnOJwS4J8mMd/0zU7s8bSzryMi3mJDxeyEGc9JmYrhSiBNFcIy5DDkbHzvRxE1LMDmCV9TG3x5Dkr9yIRIE4+5FzD2JHmQVBWQVlAeX4GptQ26U1qE0054gcAx6uiMBErExaLTzYL5EkxWoJaJKNxwKys4dyHTItczKz6JNMeOF02AAXFF19mGLNrOllNMa4pFDwzx7mGMIyjyBb4RHSjgmlayLd9TSfeFoqlF5xl7HJ0ZyqkostvMu2wChCByLjLjQCAEgkYgeGYPgYAYArEPPZ4ChL7UAr7WxYaM7uwRAQ6Zlrf7XZXVw+8r588udxutw8f393c3Ox2u5ubGxFJCQOVJzUnMwBMGwsuRkknBp2rnHPC463fjjxir9CbWR51Z/I43jmDpaWTUwjJppJFziyrpcpZsyU/JOHVhwg8BsYCIAgDqGojxIiAglFPmI+5WIGgRwEEIRAGSlclOecEIYALwm/6WN23GOPNbtgNfRAgJCDPzEMf+75vqirK0BMjMA+BPHsBdJXzXsIw7XrhdOHTaJonkww9CU5L8DIulrFERNRd9ykKg4830eG4gi+MiCiMQDhdaKwIAUEkoIky03IqH/nTilipDxcRbmeBksowIn7x/YJXLyLxRLS/c45FQFcyiMg5daXabq/aZpDYIaIucAAQYFU5lKFvO+HBO6grQRk8brerTeiGu/4WdOmzDXpPAGiMNBAiRlWMkRkO3nuYrv1QgarrWhBTOKVu58K4Vj5LqjxhZpyM5kvPIhBDOG4GKMr1w2HolGq6M6ZzkPf+sy8/S3uDOHnp6lypT5jcdbWp1nUTpzyuxwkFwXtfeZ+uwWDmwJGZw6CnQxeiuBGRxtvSI6IoPIL04sWr29vbVd18/unPKk9//Mf/6tB1P/3krz/66IMnjx/jag1tG4eBIdZ18+r2ze7Qo1tdXV397LMXz756UW8uHz9+vB86EWVS8N7FGOMQYmQeRl9XB7jvWr1P8tDpITqIIs65zWYz5nfxq9F11+1TgiRryooyLZvqKVwOo/bT06pV7cgjgHT9AYVjHIa+Z2ZyQOT0EgwR0TtFAcChCAqA6P5TVWm+nYS0uKqp8iICUrnNtr64eHh1vXnw8LJpHDkBF0Po9/s3XXcIoRcRqh5sLq/eu7p69OjR9dXD7Xa73W6hWUHoQRCANfIEcCTKMa4yE2PLf/Zf+96aGlaSF5XsqXkr2RAybi/Mkk2lRfosO2X2YdYjGg87G1eqYDV+trCUzPRk29l1WZ6HQ2TDWVzBtTXt/GTrTBWO62cJPClOGVkkZA1m+MmGbEFaNCay6RPnJjIYlZ0Ua9l+ueB3Ch47Olsh/dk06+kS1T2LeE/kHEu4erBerertZlVV/tOff/JHf/ji009/9uz57WZda7NVVWlsZ4xHLZm6zp4tDy8aChZgEEgmUTa07MOMWBmq7eyY0CXCiz5qZmQnpFmYy35L2pE5NSfFESNLvgmkXKIXR3EKt4tgzHCLggggx0amcUm2wwNzgyMrlt+yxYVs1kxdpJHOnFXI5HH2oR2jpbX9RP/VM2/J8YOjyMyATPUzfXssZ0W7xDZNx3FTs1bQ0uZnGnvac8vIpGthmdtmG7eg2lVwwzlHAbEMNmnmUW+kb/VXs8Mwwz9NwSMWWiKKkdnsS+AUswAnChpFaum1uLME1mE73WbWPiKiAKMgAIIAMoFahOj0F9AZBPQPyIZ6tizqpfMVTsE5/p9IxlMJGGPsA9/tdqvb28vLy6qunzx5cnV11XXd7e2tnrHZ7/f7/b7vOxHx3kceQgiRh9hFREo2aIxRr8MOIRwO3X7fMvP+sI9D1fdDjFHTJChnzeYLnIWhpp8yni/nl1JMsvelqpR5dmIxs6HMZ8ZjI+oOAeB47fWkrVV+kdIh5PF1ag3AqfISYmAkinqZI7q7nsPtTkJsD33gGEEceQAUkX3X3t3dEUDtxAkTR3aOojBg1ThyFUxbgulAV9+H0UCvKj0nRuQYprMDHnH07AjH1JEgoj5Dr0nwIw8qpyEEIKXIwjQBkhMFETP9Wf4LhbZZ1GYytzdM1zPiLn5rf0p8MlfCxyOLqqhTVMt0hzvoMhMg6lKjr11d++128/DB5dC1EvvDvgmhf/7qvq7rruuYeb1ei8T7u1tErJpaFZE6EgnINux9TIFmEYRcNd0KONl+RKR3JAAAsCTwpkZm+mocOPLow8+v0QMAPbur+13OA4uEoec4TothOB7g1JJQlBbWLaV2YzI/iBNHJWeJiFzlATEKD2EKN8VpvtAw/vFYlciozCVGZgl6JaCrq77jQ9sPw7DebK8fPv7qi8+evXjx7Msv/uhf/9HhsNtuVk+ePHn69OmTJ08264tV0zxZ++rucDhIH/s+DNuNr5r6fn8g7wPrNiaDbvrt267r+kOr2WJ1dIe+0w32yHG9XqtnXlXVxcWF5pVoqE4+iBupk9hpRJx1CIExxiHGqDtvevKQObbd3iEwB+FIApoUCjhW9UpEYhwQHUoEEABBks2qBmDhXk8da85YzYRzdXV1dXWx3W5W2xVSFAjeQ9fv+9C1/aEfDohycbl6+PDdq6vLJ09/qHRRxtbgWAyD5shFRBjvlB25ZXZnQGIFnLany+nfTaGMyc6ApQNUts3EuFYRJFbODBTEvMckt2hit2w7b1/OqI8EjB2XzPcQypK+Tc+ZQ5j9u9ipHW/SvdmOXGok2ymFObZt5WSc4Rz5dmjWPdDPk0JICMmCN7LhlJsbSdVmiukUEk79pN8OQ3Su8pWvqnp7eXF9fU1Ebd9WVXV/e/fZp399e3vbHnbM0FT48KphqSeVLzpxmybNECSZsEfnR8xGq8WhhYqIOILlf4OQmTU/9QUJ7Yv0Sm9GToNjkowCP7konbF15oAtAGy7TvyWEVfMwcUSZij4s2whA6NsZyxTNo35KMSeJzyBk6KlYphZtcwymBPr6BCWYywbARNAlQFfMn8GDE+nZ0sgLfYy5za1bBcI7EPaUYGCLmwCPnEKQB2GPgGctZnBT1Oos2XjtJJt0Z56x2JhLgFjQ8vsAK1+g4KlSyp474Nw6mUR51nXJZLPf1J+LnNvYV4DRUDvkwBAAEZBAT2xzRknCuIpX/CMqjwPtmXaM+VIaxEAEhjP/LRt9/r1Tdf16/VaL0eu6woRHjy4Hqay3+93ux0ze++RomaMCAOHEPs+pMMgzOpTDHd3d19+iXXt+77nWN3f38coiByDRI7Mx6RiqNauuQtUrS8p3DOeR1OfH6z9MFW2XFpi+5TsI2qaFgEWBBTWFf0xLwhABATN04OIJMoPQoBHmwuYkABAM4IgYc8S2k5EIjkHAcHztB6y39+/evVKQn+5XQNHgrqqKhQEckJ1RX5VVWhCnNq27fugu3wJM+iI3HRdBEeazvuIyQHbtu0wdF3XhRDiZOKKCOJxRcni4XiAci7FMR5vGlykV0YsG08B1mhZWliEwiGEE+o9NXWK7rb3FESKiHXjQfchnO6ver3RfH93ixCdx2999N7ldnWxWV9cbtZ1/W/+6tMY483NzatXr/RgWNtKN/QAgx7gBIBoEg16alCCxCisqXsjBBUTjjEy0BSJ5hS/bty/dJNlcjzkr0NwTq+QGK21dB9I0s8ymtDjcaEUOIpRMBxvbsw2/VKDmthznDVoWrtFrJwbt87qyjm97I6HoRu3PTlapwARNVXYKLOMzGKyA5BzzvvauSqEMAR58eLV//n/+n/brKqh3w/d7t2nT7/36792eXUxdP3ffPbpT3/2N1VVXV1dPXr06N133726fvz+R0+/++Lu8+d3A/swxLvDDh2FEDRTi254dofRIdSDnSmvjObXWW3W6/V6tVrpCks6HVZBDQAOjqtUukxaVZVEjpxOukaRWkQIHOtLGCeyw+HQtjCEjkAQBUEqh0heJApzjAMAOwTnBGlsHElC6BGYnHjv1+vV5eXl5eXlarW6vlprmDdLGIbu9v7+9u41cxAIl5fbR4+fXD+8urhYX1xcbC/WtFoBb2WUZUBE8lVVe4AqDnsAEInj0fJJOry1EjJTNZvaS9mzJZ17yT7MBDX1kurYmunwWzpBJ/P9CjbrHzKt7C4adrZTMeZ4FhyVoM10R2qWTFZM05rTdQWYzxmElL0U41mlnywaSxPnlFmT2kkVUvs4d2CsTuTJIcmaWpxHUZc9ing5KHYwxvHOIx7Tr1KEgJZjzOpbNNoBtvcHdOw1IZbw/f2b/X7/5u7m7u4uDAMirpvVgwePkKcsajzyzAT1WEZDs8gHgyZU0kKY7UVMWlWTyuR3P2Z4AEPWjCdTU6l9MhlWR9yaFuafzygLhXxZ7NH8JAkYPs+mzCRisCTg9v2i+GesaD2EES2FNpiRO3HaFCZq8SlyPAk2IgRyGddSksDiuVRHi4MSES7uMDiFcO00TpvPSWr0T12QWxL2oxDJCS8dLDXNHl0aRSYmtjXrm6Vm7bpvNhxbbO8yLf9PduGRNDkFi9WiEsmLCLRApmp2p84yarkQpsU5xzE/c7vIqFqyhbYMpAVtBgtXU54pcQqoAwD9SERgnHcnvQe6j4TJ9dFe36aPhPlSkC38i+rdlqRzaNzRcnoO4v7+XkOqdIupqsb87HpbV9p3IiKPrmka8GpEUgxyOLSvX7+5ubk5HA6r1SqEEOMAIG3bPnvWOodEFAZNFj3eFRw52kCAiQ8SrzLRTI7SoOwOyZlRnyJu6iN7cxb3AKBhVhNLy/iXYDJ+NSI0IiIyAI5nREWEgBnHGGgCAY4CiASCFJEHXXX1BAFBU/MjAUAcht3dXe2QeJChRtmKiKswYqigY4D1Zo1TkgIlzW534HY8ma9WCqHTi+mdczEMirEp+Ue6J6AbTws7iAzGPVhAERfLGIgI09ZJQrslR9J1X0uCQmFC+eeCnM7PcqeHcm1aH8qzzdq1XkrhqHJOr0gRIXIE1w+u+vZw2N3e32+H7na/q717UtPlj374cVVVjx8/rqrq5cvn9/f3r2/ffPbZZ69fv+66w749pJwrGhcdOomRQ4yCKnQwDJGZV6v1MPAwdMHpHQzH0614zAI4y6+hQyYHyV0UEXecH4+p0SdCjCYBTxk9dBNPpiuprG5kcyIm4Q0RnR/PsZN3vqpoSoSjHmAKF0882bV6r2PS9rpZyogIonEoUFVVU6/V5n/5+u5waKu6ef7ihuOhclBX+FvvvQuEEeHxB+84pBjjmzdvXty8+uLFs//2j/9w1WyfvvOt+z3c73Zv7gfn1+iqF69eDzyEEDj0wzDEIWj8s0Nh5qqqNptNXddCWNf1er2uVw1qQhPveTr0WFXVhlYJD0jjVhkREKj6oglRk1qOKnrDEPthGELoh9iLcOTBExKBI/TkNcMqOlg3PsYYeYgxiEQBvYWSH19vm6a6uNhcXV9cXl5uNpuqckSkOYdf3Dy/vb0VkaryDx4+3l5tHz1+sN1ur68v19stEIIG6fZCTY3E3jEgCgeOsW9bgdZ7D6qhEv8jg+4QQlHQWIqJI5OAnZHnUlytEGbWAE2n1LSL6YFofuxY1x5sJolM+LOuF1XPkaGnxAxoDBG7wJypFevYZLKR/YmIUzKD2ajfEjn2eT7DzWqOjn5hgWUrcMc2C4fQjq7sWk93nAKvREL2BuZRamU7Vn2n51NzgIh4twoD3N3udrsdOmEOh3a33/OjR6u6ciIYQ3xzc4cMTdOsVluabtEVHnczOApHnl91dRxjlvUxw08JIRaow+O9l7O0NNN4Z7suqSky5wz1eXKfZjAkPMi0WZdawCVHNEN4ErE0kGjShyRoLSmzIWNxj+Ip+bJUE5PdIY03dacvRxhmR+PGExfz1k6OLjUFZsHCbh1knGnhse1kP8nX2TG2OFfZDxPA6hBOnph2pOM9CqzFQ6KphZmIYh5AsbznBoVgJgYu69vRafqBBEn6NUxnIbIIjsS3Vk/SlKbCkkaO6zKzclSWUwvJFpF5CHqqbP+coE0bjOOKj2VyLBzsRSyVzWZ17BssnK7FwtOUiiR4xIQgoN6gwKC3FbjRepZovh19Qjrtgs51y0IMdmKkM0DOh0yTNeMAx4wjbdsmnZMiplDz7E0n0ypf13XtVxPIQn0/DEOXGInHk4QEwF3HiFDXMQzjaXwQChJ0BFZdZKfzM2ynIWf8bLVTenkKgfah3JEuu5uhV5N2s6CwZoVVcKaVvCgACLrR6UCEpQJUtc4gGmszLjSgsCAACjPGcZomJxIkSgSklBk/kkDbthIHYIkx+jpixT5yHePKZPKYzg169O7+/p6Zx61ChhpE3QbfNDjbXx0DChT/3vvITn1FdRVEcodKEEoO5IlpSz2j2Mvst5Iii8x8ihDnhdGSuNTeSedAATBMEVIgo3szOmAouF4BsvPUrKoKI8rQd7svd29evD4wh3fffffq6qrtu+vr6+99++P333mq+Ta7/qDLK4e2DbEHgM9+dq8uYgzMzEPkw6Fr+4BIwzAMA8CYcmxSnip5rHCOy6xmdf44Ch11hRbzx7kVjmp2ZtAGa686B3PFa2V/1LMw6mplmIFjuvXkiF813eeRZfZK4fQax+Q6dVVVHOFwOAjg3W5fe+cwbrf19YPL737vw//8f/E//+iDd589//Lu/v7u7u7y8vLi+urq4YMIIkO437Vfvnj++ec3z57vDh304ebQhoF1oZYhcog9CnhXu9pdXWx01tMsyuDIe980DTrSVEBpyN779XrthiO6EBFJ5QhAZZ9h+iQxEIYQui7GXvq+H0IXJQBI5CDISOAQUKrxlheHLL1AIMe+orrx63Wz2TZ1TR9/60PnyFea4FSY+27oQwi7nquqWl2uLh9dX11dXV1dXV5cba8uwVcACCGEQb3Z8Vzo0O4E9X7RCqn2nkGiRhcLkD6MBAIEPUNoBSPpfTTGhJU0u0OVrSiAUc3WaDg1RVkJp+OJmqORoe24KaF5ZmSUutu+z1RGAibZ0Fk1NAUKo7Bo6uT663lzwVbLwCh/WhxXgjRrJFNtM9XJOR5OQYWoK5j5CeAMS3ACyWUFO5DEDFJ4g5ZhwFifAOBw3fUdS/QefINEsN2uLy9Rk6oBOO/rVdNUVMWAoQ+Hwd4fqKl7FfLZOrRBQr7tdr6IiA7FyoJtIRvyZOPNMJ+RfhEAi2GLzFJk7E8WgamFbHcX5jF+2VRtCwAoq5/h6oyaZypb+h7RZfolIkC2PqHOeWmRAhFhia8SwGlcbrqvqUSslS+L5GnMQvPTfVCQtRxgItOZahOKjktsczznUqPV7JJcqh9jdNPkncmRK2hdjsViwzmnW+v6J5kNSZxWK5IJoifdS5paeqU3Mr+uQ3JTPneA4cQah0wGpeHtkPhXAw7Tfrgl7mLJZDxRLX2VSRnMRexrC5MHFtALJgCBZLw6HARFNRIJjvlFEJAgLjRSHp89Df/i+69tLB0JkWnNUWlejUkjhkkDj44ET+cL7qcLtRFIRFYXVQiBGRAcgK7BHZPAJYqrjkl3psE0xaSNmlL16WM5/HKkic9LuYZC9rPWTi2vnMLzeC+iCEyhHAQowBGYAHhMK+cQEYUJUMALCyCjICBN2fxAOGj+K4kiqJmwIB5Two5Rprqe2Kwqxwwc2/YQOJLvseqr1VA1g4tBr5XTDUDNh+HqCgDSXXB93/N0nKGuvA1OSeZvyoos81TM5bZ1JikwLWRYpQEFZ5bv069ydtEK5tPKKT4vexn1ockWbiufcgi99zClBZJpfZ+Bb25eXV6svCdygiBIfHPz+qd/9RfPngUhvL7+4urqiog2m9X19bWr/BjbWRERxSgcHEpNRN//7hPdgIoCMXDXdbtD27b9Tz/5GY03vxMCxBBZbyY8TtkpwfJxptP1k1n0mblHFwDsTuOk5SLiMYP9YG6wzKQmPdgdGiQENwZIx3jcXcRphQ8m80NLU60AkqxYEwWJgNBpNrthCG2rNzFIU6985Th2bTcc2vY3fvO3/if/6X8KFcnhToR/+tOfvnr14vXr17e73TAMMvTbi0dV1XehYxAi5yrXiMMYNPbWAQKAQ9L8guumSvmWAEBocnrdOImkCW9MchNHg3/CsHI86jzujpctjWHWEsdzg4fD4XDYh9gLMiJE7jwhOWSQyjkiWq3qVdOE4Wa1bjab1eXV9upqe3G52mzruiJEGULb9+2h63VnmAiR8P2PPry6enB5fbW9uAZoJvsWQzcgVYQbv6o8+qQ5KwCAXphBGFCEI0PUXGcIIMCo+c2mfRGfyRKZa/dgHmukSyaJ6lbGzuxUZGrCvswasYZLxuU0Xpkq6YRM4tc43WcIJ3RNerbCn/i7ZP0MpATV3NRYMFn0cb4TNStY2MqZkrK90DwIKkGYvkhSp39mhtqxcTTPZlDMDObMW/oEcXYhbKJdtlecPmQTcpyp1znGxq6To5K9T42jsXGZmcghusq5ZuWpin1/GEJHhH0/NE3tfcVBhnYYICJUwFBVDczplU0Vi/jPVOEidY4v00TIsxU77TYbuH5nGc/iweIwW3zJMJmBbUFd/DWa2wIsOewnCedElJKLYO7VHLP8zd/nz1mFIz+ccBGnX4+wpUCABCQWRl56zn5N4mCHbJYnT5r1lmlP/Wp50v4qItN1c/kkanfMbP3IMTmrYpzzLOTefmLlIsXkZIPKpNgmWUnyWw7ZCppMV2VAIezKISmsyNpSFi1WD6RmVW/DnP1wyuWQ/iSaLcTasdgHRBSJSeeQuUz1FAVLgma4XSTTsam3WCSaFxIUPSemKRSEEOLRdBYEYAJEGdOofj2cJWynylyTn4Ncb4SHkS7HIfd9z8yI6cpsDiH0/eC9IyKHGBEBwHvPUXT3g1kAUA+pqUMI000YRMQcEURvEwlB6mpcWWC9HMwdb0AZIU//wLKTYPn5jG63cr2oS0udn8lviVIAEALShSlxiOwABQX1xjdM2l5Ac7YAMZCAoIAgIEREB8KAgZkJEAg1A63TPCJREMU5L0BKQ4lMKHVdN0RD34YwdF0HQ4SBK4aapYpBs1+orFVV5X1FlUfEw+GgyTM4jreuOeeExwvGYowis/RLWifdQGjRaNUgGXtg1Nt29eoEf2brXyXVrH5Y/FUfNOkgLK0lJbWTZpPy81SsA2wbYWYEPOb/FUASEEHEpmnIwdB24mKzaTgOL18+v32zcc7dvf6K6LlvfIyRCC6vr/b7fdPUm82mXtc4Tfcap+ecq3xTVZUghSG2/dD3QUGqPQgSMzMyITpXBR7SoCAX8Nk0MW3gxTnqGDSD0OT5iykAUDV1hkAYV3COkXR6bEF1btd2iROOGC4cbJp2zXC8etTOhqRG5lQn5TXt2rZFqNebJgyt966q3RBj13Wf/uxnH338fh9Cc3n1gx/9Gvgfx7Z99erVy5evXz376u52f3P7WkQePHhwf2DqZXu5GYaBxqOAWJHzNO6Yhb4dU/iozpmcCBDWDExRhIg0cKZt24vqUoc7jZQBgGCyY6eJmzmEEDjE3e4QQui6Q9u2bdsOoWOIiMDS197VTeUrX9f+8vLyyZNHlxcXjx993DTNetOsVhU5DrHt+v39fXdo7wUGIlivVw8ePnj8+PGDB1e0WoO7BPAAIcbeORQGEUeu9s1WhIQRxKM4QCcMIkJ+SlKIoGfaxlRFIADW4pwcQqv4cFqgTdeG6MWLiRGJCDnqGobKHACQc6grjmO6MABAonGOH4YBkUDDJiSJK4ikKEcEwBDUtva6EGK1RjJKkhbIVoITl+OUCycdEYb5PTCgyvpoAglOmXamV1b3gWg2gKM5ot0dlVGmLgFcUkZq3apcqfWTPonThTxqLSXvS2VWMSOSklUkmQeR0SSybvBkS0k6EzwhTUSEo+hiRhJRxY9zbrrrGFFjmXhMikUmSCDh1lpsMF8pMMM/4iqRBubbDimLFxjdbfXdJGA8ultV6ymGEA4duOAQPYHnwE21JSBgQhTxugM+oB+TvqmjYfET45D0Zrq6g5lBjnOP8jNk2xQilOYeEWbQ7f7kxeFkIaWxT6zOIqBnYBIFmUGXXIZhnJidI2aYekaEaBNpJuTzPBIyva+qalEcEpns9ICIemFuMu516UckMlsHcmoNmbCykNhnXTcYY2qOsB1Fz6zpEEeAKcUYokdEYYgMriJCAJossCmLgZj1CBFGhPECjBR5gkdQNPAXHRIJIvIUCHxUaJykdcyv0LYtETlygDjetE1YVSQyzNY4iFKF5P8n/iciwKN6lKOXi8wMmPTGUf84PDp+1kiyNY/MqZYiQJhy/XvvnR5yGIYxVziMp9Zw2Z+fXZWZXtoumIVo3AUaBr0X7nhDo9bUuH1cOssn031imcbQAWZLeKmOZmbnY5L6ZOKPmLEqBcDuaTvnSHk4BK7qioXHq7f1P0nikFuBdq3OYikrll5ZmUkZzKg28gwf1GJGQUHUIEF0JMzMAhD1wmERARmICHVvU7+FMSI99WXxllTiKbDBLJQcX7GDERGqYsZfh3BARCIkb+4pEQAY8+9HljgaP7XzIABDgICoi8hDLyICVOn99kQUp5VJvfMDyYtqQiAQDEGFEUJUjDnyyjmDnT1k3ErQfzwABDYh7qpVFC2ECnEiASISkgKdYcnSOpunLJ+XmLfEHXErUQCExnsWwgStYMoLOaI7AohEhztFg8I/Gh8RASsBRFVnAhBBgpAI1ethGJxDESYE51w3tG3f11dXrPvP2gOH0O0odjte97E/9Ieqatbr7WazWTWeiCpf45q8qw6HQ9sdYowxDGEgD3UIUaaQyBD7MAyRhxCHyJ3zggMjcpTh0O18rTe4EAtDkqcjLsftETeCxCzi8BihkOgCRWhuwn+mNNKHyYSza/QiMgzHrMj236Qf0ptpEqcMGKtuywXAOl4ADex6cTGCCDsMNUZa4WoF261fuxg36wa5gbjuuuu9RGJ0etc8kmtqINr1suvDbdvHV69lNO2m3t1qvE4gReFFEZH9/hBjZNE9ZnRePCJRAI5N04QQ+n68h2BkzgktUVcWEDny0PXe+2mSTVo0X/ibsKFaSOwRRDjGTCp+RIRjCMf5iEfvDsftT+KR8iNR1WA6Cg4GEWE9aTtaoUBE5AkRGSIKjvc1BGahlUePIEQx9FcPH0U+/Ot//W/+8qc//eh7324uHMQBgCAGV1dPP3j/6QcfcP+dF89vbnZ9hJe7HQrhO08ehQgAOvWw8+p/BgDw5IiulA5EEUWQhPy43Kl3ziOoByGEbl03gVvvPE4zNSK4KegMRAD0DkLuuu6w27dtu+tv7u/vEbHruq4/aGZRJIihXV1vnQuPHz/8zb/3vXffe1xVfr1u1h5FZAjdy1evbnf3fRjI176uHjz88Orho6fvvHN9fV3VK9HFRECE65FVSaJoCgxmpR2yUBLRgE4AIMgj0HtvRKbbShgAIg+IoKuWAgIQdd70SUJSQpfMnkj6cdQBzrFxwFI1ZpbpLpr0MqlR61Vqyd4kSU5TWmYQ2DVva0g5c9l6aiQ5P6UCSk5p9qsr7u9KRklqf6Y5loJjwcwfqTWLq9R41pdt0EJblgwtdnQWh4koyZFOjrFFeNn4KepnK0AWmKwpNNY8zLW2hTObAGwj6UM0Oy1ZL9l+2pmSIT91utg7mlLiQf+AOe0yhs+4SzL7bCrZRhYdNU6e4zvrxfIkmB1yy2ZWjjKOErPjlN5rarhFOO23JU1t18nxyHCSOl0kcaKvXfGBuaRY8jGfM4iz9mFuKEyDXUhPMjXOyd6xcgQn5AWMy50qZHsX2YdJqyQCJQitlCVQdaFBK9CU2gpNVlgxQTsW5owts8GW0pcRLiOurZNxOM6SABUWViH+pek2rzlDfuIHG9is7xdjWLLFkb87BeeqNZXzoFqiZAT6VUF1vkJG6EWVkt7DXN7PdGo/SX9+LbRZnYx5LOvCnOW+tuVT3ZUq6xQM2cOiwizhybpARM3mgiaURi+TAABd/kst6J+6DVjX9XrNzND3/arpNA8hEWkm/VXfaGbFw+EgISYhDiH0QzcMw8CDphiZIgB4SiXlCDFt+ULOhEfCpX9DDCVyfoGS5kerEsuIj68ts9U9o77SdGMro9E+gEio+YEdAnGIbXd48ODj6wvXdvv1xRaaVdd1U6J+QMTJ0yMi2mxXMcZh6KY70Mfsu/u2tYaliOiCQl3XAKQrmSLILIgACK6u0LvtZv14taqqSu99UWg1Myl6pxcS6G3yM/Gc1lBAz4oRqgOBczfbCqBM9xBmHJuUqkeXhDfVSXQZp1E8Ijaz03jKZ6M7N5mQIqKuhjjvBF3btk/fefCjH/16XdfAPGbsGi+G1BKp3rzz4cW3vvX5z//mZVVVDTsAWK1W082Ho0MYI4kIAXpfEempaQEAgcgwxq6PQTqSWACJSJ2aJBEAEmVcIY1B08boXS99d2j7ob25fxlCUB8+cqzqisg7T1ePr0Psmqb68MMPv/Wtb202q344ILpnz18OY3rfoWmad9/58INvffTk6dPt9TVMISSDREFw4Bw4Tu7osdC42CcEswMIiqU45QMWzX6s9Qk94pRhFBjAEZKgeEtsq9Z10XdmpwKoYz/w8ZPZNLAUSKkO27g/O0mdlYdMoVjFakuYxzonpZ+4P2s/41HT0cwLTd0lONO40tkJmOyYjPtts6nHxbM96U2mgDKBtKg+X/QTa/3YpjLYsmgBnOIBLAwwV45ZI9kcZiHJHO8MHphoWraTFSqijnGerMKyDRTpqi0aZzx5wrtIn9hOU4VMG84+ma+0lQTNoGXO27eVLVNNMQyzfufUyW3xRNC0bZU5Bqla9q+VDi3Z8kcGZzkENNahXdzJOPDU2I8YiLl7kAlCBmrZPhqry7KfhTB9JXPHFeaMqv9PkNid7fNSmYmwTGdXFqG1X1kIk5K0R7gtbu3okkRnApKxetrHLpM6WHxmnG9xYvUqTHKaADtFi4SHjCFT+5l+NhVyDsmIm+l55uN5FdtpuSD4b7Oc4pZTUlD+tFjzPBOe6jGT6EyIzlc+Ba3VS28Duf01ccLXilXWqeWu8+PKpAzmuugte8wUqWTKd46uDA9WlaVfM5iz4SfI7Y6NiIQQuq7r+z5JPcz1m1Yg8jEKIoZ1dNNV2lr0AoX9ft913TQul2xHmU4DaQYaFSP1XpxuZ8MCpYgIgDKUIOK4P2xQt8jeFgm2ZNiw7sQ0Q+V5wr+2WOVWUiHVGSFELt8jga+o71uR6Lyv2IUQ2u72wYMHb17cIwICCaGrq6ppqkrvnhViRuccD368gL6LMW6bJiFcRFIORUU4ACA6HWgy25xz2+32nXfeaZrm+fPnyZtSanrv9dI8dfiViGUEWTTXI6FxCMsVtFORCKPoTZEgo6adpkuako2p/5ER3aI9i1ixdZxzms7Ho3PeM4cf/OAHf/AHf/Dv/rv/DnAAFEAGpHG3THe6up1rLq+vr+/v7+r64eXlAw7O+bptW0TdtjcyCHrsYkBE3ahEDSsQadvWOed95ahyzjVNXVWV93XXjjIYwhBC0Gz2IYT73W3oh67Tm1r60e2Psec2TZEOqa5rIqhqLyhE9PTp0w++9RG66rMvv7i5eVXVHoM8ffr0B7/+4w8++nB1/WgcXWAQXR2gCAKABETgJh8vkSZRbXQFEUbDHinppemQ9vSJbkETokCcbv4gVMsW5BgyanfVlNhTONlRBTvnaLx8ZzaLi3EIM/2bSeNM9pbWqu3KdybDSSvhvIjxARZnu0z+LQtm/9KU7zFbyE8F5zcgWajATHJY7MhlBY3xmgDObLtT32aNgJla7EiTyZiQULZvMW9xW2LMTlGLyEyQpIdUIaNpSVkwGiR7aZWaZaEEgMW/RakFUpYsj4wNsvqnHiw/WGjJRHXCpN0QMcaw2IWd6hK3MDO55aXoDMl2sNaAWBQxS9BsR+s8pyGmJKx546WgTS797PSpFHZVhnOZA5nVtIw6vRE4buLZBiS9mSrrt4pnnNSoIMo49ZumUod2OKWMlOydOTYJ7FItaJ20+JoNzfKtZTatn2k26+BZ0uM08WdoXNSKi3CmmT6a+wxtI1lkh1WAWWV94CnOFs0iHRg5Sn9OYORWvuV2q+5kMi+sK5iB8W+/nFEm5wXNohQLDXym/ayX843byqVIyvyidlvf6hP7yaJOPl+y1s5/WPab6L44IoIcnr9tfsi6s7KAiNkquVWei+3YZWVlbPX3uq5L0QGpC2bWdDL63HUd0RgmGkLQ0wFEJMCabKZpGh7G8D9E9N4jrgEAAgG0emAnmbO6JSAAALMzFEnkxQRbJaiymTqbqrLBwtdRx+Jq6vcbyEU2Vdk/s3WuVEREJAgzw5imkmN0TL52H33wXlW7r7764r333r26fPgn/+Yvnz9/Tm5rkZBmjRB4zAXijp55jDHCmAsjhKCLxTp9a0w+WAveaGPdhlI/UOFMeE796h5XNLeApoFTunZrQnhS7zZCh4yzd4oibG5yilPsqDU1LcMvqr5sYkrw6xvvKyKIMQJx23b7/a6qKr9qILRjgm6JcJy4o6trAHr48GHf9217+/jJg6paRWHvvdZHFABGbIjIk3OumpKoMSJquLOgbDYbAGRmjhBjbNv2cDgAwGH/Rv9s2zb2Q4xxCJ1uCXIYlGrTwCMA1FVVVVVTNwhc1/XFxRZR6qa6ut6IxKsHD7o+vHz9qq5XP/nJ737wwXtP3v044QIijLeYkotRiAiJPFQakx6BI0c/W+ec/DiAtGs6Zj5NDuBo6oCSAkEtIqfJGwRIzxPqewLyiR6JldMJt3LCKAk5m9dxxgT6rNEOWSOZcrGN2xsIbV9JRcJ89c5ODNYmyCYM08WC0YzmgJBVHInY2cDBaDrbmrZjt+OzInPL7NREe+pzfV9uflrEWhlzzulx0PRTNhWV+E/DyQZYKvSSlGWDpUawv9p/Fc60KpEomwGmZRHD4+jOYk+LnQxspKIWnqdGzB4sPGmMi6Jh0ZjNjklZw0RQCwlizpw4ugFHucj4PJNEZk5JhnBuDtr6NBVESseGregttmDfZ9SReUAmFPNWKrZCghmNSVHK9QRYLj4Zo1rYFncX7W55OSg7nLcpWWQBzFdkrVRa3FrwJuIeb25MTJiWXXEy1tMbu/OWoOUxZflsdQ8Kx29x7DJN7epilWd4rGhYUO1726wdaaYuSsKJUaRwguXKbe2EeYv/t6fd31IptQEUI7JSXBbLrnCCXt8UpEUxOUWvjCjnASgbOVMy0r/NJ5ZvS2hzSP6WqZ9pvLcsGdrL1lLRvR0yoTE83YqWjP7j5yIAsFqtELHvgyZi6Pv+9evXdV1vNpvNZuO91wuv9WBwH8bMMQCsXgpgA0RIwswh9F3X4WT8xBgFUFW4NcCmDa7ZonbazgKj98DsCH0txkrOtF9NfS0kE1rkEPuVGIsx/Vl+gjia10bBROSATLe3d/z+1cWmQV4DctvtY4xN0xwiCEAUHkLo+l5UTaHUdQ2QNmMFSQAREB05pa/3VdL5SlmNupDZ/uEI7eFw+Oqrr+q6bts2LQ2kBGNt26rS7rouC5SApVOyljrpDU2JJK0GACOkI/5TAqrkiRg0RsiQOVu+KecCiwF9GfuByDFzUxNK9fnnn//Jn/zxj3783QcPL2G8uIWnq5sEAAEJJFxdXa3X608/fblaP3z65DpEpJUHgBiHEPrx/hQRiTwMMcYYQs/MLIHHfAqxqipmUdHo+tD3/TDEEAJILyGOHqAEANDPJY6QO4/O64q8AwAiqeu6brzzm+12u9msQuibVX04HL7//e/+5Hf+3re//eHl1Xa73bhVAxCgB0AEInAelP1EhMH5KXeDIhPQASLJ0dM7RoemGXPc7puTYLxgFsf3jODgeMORyOgZMoIHQJ9lt1MExRh1eckylv4bQVIUXGKdESDIi50FMxlenL8z+8POQ+oo2u60ZDseqX42YZiHI+QJQqsyrL3IUyqtDE6rBy2Lw3zlMlNPpWKy720vduBnUJrBkETXUscu3mTqNYMtPViJTV/ZLJR2yMMwnNatYGsm/Ng/F4FZHHVWLS1kgtFZCf6kBC1dLG7TMMvsrKkRWDJMk5It34sxTMUsSWScY61qnDRvmuwRI9FMUnAy0zO0pVGkh0TrDMPZn4m7DH0X1miICBHQJAmQ+TyKxeGx7NfykxKkkrKluJ0aSzbYctRl47AkBXPxOddpiYpMHtPcVu6wWYATDOk5hcTDPFLATqUWRUuQH1VW4tKkBCwkmfuEcy1dyr7FFcyFWg0RWKKjvskMxDSnlMSSudWYXF8w0mTBS45rjLPAp1Oc82+nZGRKD18L1SKvps9lbp99I3hKVW9lc7HBxJypml2KXRTqktUX4TmlKM7Av/jeLqbMOzg3rrcvmVwc5f1EKPIigRbVGpp9pKwCFtE0yZFLUNF0kFg0A2wIelDQuagOYRiiRocCgH7rnQMAPWPGIeiZJW3Qe4+aik+chrf1fS9yHL7M4xES9WOMKWR0NpZJLcD8fG+KNbNMBYY/My1U2nVTXzNsg+HVRQ6081emRUsKAugtfQJTxF2yvy+2665rh6FDkmHovvvdHz5+/K0vvrx5+eUrrRAC6sUJAAA4Js9TLKKJ//e1bvMS4pGHp/U70twzYDwo9foOh8Pt7a3aKtZXVCWphz9lSv2VDaqMCSqXRK3utRZj+jVVJgZE1B0lK4M4BovOZmcAYJNUDAz/y2RjZCTThDQyrXR0Xff8+fNV00w1GCSOSZtJACC0EbGq6/rDDz988WIXQuj7joUAAiIOQ7ff7+/v7w+HQ9d1oR+SHQsALEG3amOMyZdGREKP0wkOh6wcpNB676vKeWoiD3p9hfekQjTOdyQqjwyy2aw0jVZVuQ/e/fh3/8Hf/53f+a3VxRpkAHIAMAxtVV9ZBADHoNHaotkgAcEdt6nARBzIFD6Kyc6czfKG1R1OOfkmb5AEWMCJRAAnElFT0CB6m+jSSo4Uy67agd6gYvvDqYA545TqW3nW+nRCpSbmy4ak35aGUYIQ58V+m8k/ImrKTTBzW2lGJFRwkTEiG0v5MmstW9UuMDb7yb45o7mykgTYbi/A3HWxAp8pAjuExTcWmVkj6ZMSS6WCtmO3LSRh02e1pNPSUcaZiy1YvEHhuixS0AKZgZ2RJoM/bZ5jkfBGCh1aer8yLZiljqy8EJHAMfofja1v7WPbmu16kdkyitj6xsg7YjL7MJPisk2YM3/mIoLhq4wE45+TMinFwX5uKuT3SaaxW6Kckkco5MKOcVHuzkviqb0pSwuLQywMWUtre9JDTOCWbSG9zODXh5QuPIPf9pJJE5h1Crs+WMK/KBeZ1l2U+kSpUiFb+MXkN8pE2DbLpugleJlfegqMf5vll4fBKrpfps3yq0y9lD/BnKlgTqPyTdnj+UFleu888Gd+LcEoOe0Xo8LXsnqm5WCuajKSabF+XanuLNgJS0n2E8OLiFqizDxGfzKLSFVVVUUiQhhguoRwt9sNw7DerJqmEZEQAk6A6YaSrkNFiJOhPN5KqrZv8i6soWXst4UrQ4+J5Oca2E49i0oge1OGQU3Vlidcy1S2pOsl4EQpKMisy6MI6h0iIKEAwMXlKsQ2hPaHP/yt/9X/+n8JcPnZ5y/+7P/0f5moKRwDxHE3CtvWYkzPdIkIdseFv1QBEauq0T/Vu0hIiFOx82nGMzIdGky+opWyM0aIpYVVoRaTmbCjO0qudR0ZAec5Diz1U+XM28xGAQBN0yBHImrbFthVNdzc3Oz3+9XGg0TNwgwAAFHzCfv1w7Bvv/jiSz1k+/rm5c3NPaCvfNM0TVW7vu+7/rDf7+/u7g67vV6PAwAIszz5aQMcEZ1HR2M2V+HW+8o5B8DOucp55xFgvC9+WjQ/lqb2urASQvCeNC3c9cMHv/3bv/2jH/1wdXkFEJkB4gAAVbVKDvMEhvdjHn4EOBo8kUMIkZnrutjAELMlqChFc3YJ0hnCMSM6ggcQAQKJepdsOpcrAp6m8OLEDbpTkXY8YApAUlqGGGXKMn+EY+ShHFQ0Zk3WVMncWWupgn6omit9m5RjYqn0FU7eUQ6NYcFULb0pFVCmiU7poPQ+tZOUuK1j+zIYGxcFkxinOllWvVQy8U4aJ4WKJZRqszYEN+2+ntmm4xP5GBZ3ksVc/JpQmiEk6wLnE2GCc71epxCIREEyaesthnFav7G7cGOFpTUSyxv2k4SQkl5279E+8DyWj+fnBmnKQlSiF+YqMqmADFd6Q5ctWqeua5nvOrI5KqAv7cJzuZZhJcUin3k87WDZL/t2sdi5JDXo3LGRxFFWojNs6HaotXssr2ZEAYD5AuixgpjEcTIvYBggvUQzWWZCnQn+ItipZGf2Uu/lt4sYsOApLdDMmmWu9vQhpSxEAGDUjr17Jus9A3IRLal+pg/TG6t1Yc4npbzjPBkYmQuE2EQI23GlUFULLczvWbUNKg6sFPxdK2cAO8VjMEds+hfnodRl/YUuplWXEgh75j+VlCVCIHP8MDHrKD6q8WTBCj8PZ6lbcMmUt+NanEqW3yAo5Mel9BM6TM79ODNSM8izHjNZsJ+ICOHs9F02/KyylcdklTGzZo/Uauqq1XUdYwRmFNH9jbpeqQao63q1Wt3f3zPz3d2dcy7yhRp13vvYB21B02GItIgIDmKM5GBaYeF0D43FA5mCiHo+IydEQcZyaexUsb/SFAFk0UsmyV+qqW+sfpAlMyP7c9G+QkSOzMCCKKPzQajXTkGoPCHCerP6+c//5p/8k3/SdfX/4//5/0qAISIAqpMOAFWl8/VkWnMCLFpIps+9ZoEBq2YZiQhwDNnTmyqGYVDGsGfLtfAU55/sE/sTzk1Qmab+xI129rHbuWIM4DRvinE7YYoUlawYUlqzPwGQEUWf27YF4HVTxxARYb1eI2LbthIv0YHe7AQ4Xj7DzDAcPvnk5//0n/7z/+a/+ZevX3eu2gw9braXq9X6wYMHDx8+uLjYbrfbq6v2/u7qsNtHCJo/qeu6OATlHBUoTQzjAOu61rsKRcRRPN5iL0JOCcR1XSsKIw8yuZREQIDeE0sIPGAUEVmvm3feefI7v/vbD54+BYjdYd+sawAfYkfgGcGRI81lCjFwhCktE0ha8yUiqqsaAFlmR/CWi9BRsY3OnqR/AEf0ATgA3XLXbUMQEa8h49bIBhMRZ20sng4ihykqQ4UqLSmJcSDFJKqSaeUDJk2X7uWTye7PODixY4Ih1YTplKOON1sBStZnYu6M4eyFpImnM0Zf9DnFKCacG15aUnBU6jpBm7ya9NLiPLN+rNsGZuZICjEZQ+mnEEKqgIjpshoR6fveaPBjm3LC0S0nQh3CFOPOGcmsR5Soj3NPSY6K77iHicZAZObD4WAHlVCXyGS/jeaeSYsr0euAi37BqLzE2GUux3I+zubv1I5tFqagGv0kHSqDaXkFjK5UvKWu00CO3js4nDtv2v4wDKoXEsCW57XxmeY6sSBilXsmbpYBxumBpfK+lJrkjVhRHeeG48Vi+SJIBsZUPyZuzFCdyhzImbpgs8yZurMCknbYMhJYXrVA6sXHFkuWl8AsnCXAMjniacHegpQ+BHMUMA0q07cWq6lfKxpVVZXJY9KIUt5wyw8Zz7jiDnpdmZZpNcoiKl1CqGeSrZhYwum9iMqrPN3CSuYQqTXm2IQAWIrraqWMGw3HlTLnXF3XANB1ncZl6bweQhgX8jPHxoRY259KR2WRVxeZ1harW7J2rK6Q+cxSfniqI4sxMNxVSsf5kq0pnOrXwpyNMVMOkGlFzsMKEpcmNrYyYue7TIoXe19wZE1HGTakOGCfSqkPR5TGYklxCS3HNmXWuBXPHPJiuUTmd65k1bTo/QEi0jRNmunattUTawCg0Z6q1pqmiZoBP8a+75vG1XXtyAPAdrvt+163Tfq+3+12q9XKew9R1Jbtuu5wOGjqyxBDjDEyO+e6rqvr+uHjR33fv379CpBEjtLB0z6B914kXzbFaQEIjDiU2EATgJbwlin/NNHYFhaNijSPpHZsU+Ukpb3bGW2migHH2E1CYQImEYkckWC/37VddXnRvLm9ObTyp3/6xauXN2mhfAKS1HxIR2lKvZT06qQWxgOBMNlyHI1SwjHvtIgo6VXDZ5DzdK81T2e+LIvqr4mIJVFgunchfZKZWElgebrnM1mVQRj4eAm2No2INA0hzQs2sU0ygSw8IkLgyI3b1AFlvEiDEb0HFpABJELdxL516zUNw5dfvf7f/m/+iz/6V38aBte2MIS+6+LVEFertutaRPj4448vLy/3+4NzbrNaD9z3fd+2bdM0BKjeoOK/Ipdu4U786WjM2UtOLXnNUIre+/vdLSI2TTUMAxE5B33fV1V16PaRg/e+79s+dN/9/t/7D/7D/97Dp49BAgDUq0YnQELPHIEowvGmFiDSsFvdcEUAkOmaTwkAQDo/wlHrirEcSn6T450TypwgwgCoO4cmpy6pc+jLiSe9sQKc5nXhYwhTIqRO/DJ34ZKpZyXcqonEyqmCnIorM/o0walvyqQOqZ0Tk1/O4scflupPCIVs4ibKV6xFdMKbZXMGo7ky1k/v5/SbIUGMPWERmCHH4jlTQ9Ygs+8tcS0Ap1Z2Mz2y2NqZcaVfbdeWoe3QbP3kP2edZpCUb+yfODdZ0kjnennGeNmbcjhZg7adkvcyopQKGuaoK9tBRADMcIXG1rRdLCI/A35eZ5kZEPOzl/bzTJBL9niLfs/VXHxPgCiaGUtQgAAFNFhlvNRbj2eIiFab1QfQy6H1YeopDTZ3WjJIMn6wEnSePy1RUtKqt8SY/bZss3xO2+wyWYpg1MiIwxNhvbappFQty4m5TmNRAJN/TmbL16osMb6BlsxdT36pzC8rSsmxNBYOxsP9uh6/vOJwCp+nVMfX0uItS4btX1WzvxgY6c9CtAtXBwDm6x1v1cUce0l3Zb2kBzLhM2wiCzLAjox3cg9vwe/KxrsoxW9fMl36Nsrta1tLpdQztpFycmnbNt1Ljmn9BdFVFUy2lq7dCAEirtfrMYtM34cQNGViXderpkGzNuo9hRD6GELou/7AzFXVrNfNer1umqaum8icMp1Y+YXJNMooiIb/EwKtGjmFkDT8M7ynCmN6zj81jWini9WOHVmQjtyiV6wLgKAIgqBmhFmvm9WqBo7D0AH4PvCh7UNg8YuR8A4A9XRSRkptkkH0Xz0Jx3ZuBQISAUDC6TqAaRFtQnUyd8evph37setjyOsRNeNBrxGLeu88IIBwOKUKsgdlm4qOK4kiouG1swUdtVSmoquUIqj/JQKFYO1MnCwQaOrm0O5QYLvZVA7u73d/9Vd//Ud/9EcSuydPH66uLyH2IEBYQR9evnj5f/w//Jd/9If/5s3tnnDd9YLTmdvD4cAcXr+uNpuVc9g06+vr69Z3XTjUdaWrLSigzl5d1xB5hlsWQEFEhF5X8IlIfIzi9TBe1x9C6J1zzNT3bYiDxuLtD7v7+9vNZlPXdR+6H//GD//R/+h/+IMf/xh0Zw954lO9EpABLB4opXuRLGvokZQ5Qxc8trgTQKajMbsMAIzhppgyscNx9pW5E5UZ4pnKRuNDowndkXkcXTY3WM6z0pizlB3/PMo5M7/EOF2WfW0FC4nVTVA4S1m/U80FnwfMRfbZV8xcOswWmWwO+JUaM7Wj7JW6tvKf/s1mLDtkq+/yKRYRzUpeNrpTpZwPMsMuw5ud+O17W9+68SV7LA5/kVezaiVyDDVntrVdVswqZ8Mv35Rd2/YTMGxSyJRNJXkph2wbnwY7c/vtbpLFD5nAjLLHE4TODxvINNPEaQEoI3GSJkjppsxeRKGkjoNNWJKzc78F1VYjhOmuLIVBTRBGEkQNQEVkHBeyRciRPhikCZokOqbxcUqFJXJbyO3Lkqz6MltQSx+6+dk/NBHji0hY3PE4pbVgQnIZEHUqtNUiIVHWMlWBpVnvGSTZjhabcIYMRZZbUnc8pa9YxLbMTx8syukZnFj4T0nHL1+wUObw1v7VW7a/+H5RdZz589SokyaBguipQim8i7pr8XMs5rJMcLJmF4FcHLVV7LZT211ZThGmxPPI0gVlrQ5c/PA85IvQwtxb5umAhj7r3M3MBODrerp2iJKixungj/e+bdu2O6Q1FJJjOtD1es3cDENHQx8CtW0rEK+urh49egCERNR1nfPemkAJPE0qAwUd9doPq+rBCEUpFyW6zhNdipkdCgHPOloUjaSfs2k0nYGECCIAETwgoqu9X9U+xKHtuqq64Njf3+/6IEyz2KK5mJC6phYMAePLyTFOJI7XEdHcs5UpdeTRgs3GJXMTF5YYPkNyiS4s5uts4SajwqiBpwQwknZ6p41B0/7MckPjOIAR+YQ9nQKE42Hf+Yu68vXz5y//5b/8f//pn/zx03cefvj++0/fefztb3/78sEV1CtPd5/89ed9z5XfOFpVVdWsL7tuiDFWlRORN7evBWKM8b33Pmialbsg2Y87fiLAYYzdA6HtdsscYoz6UmCKniWNjAsa9hLiMAxd4BhC37Z7RBTg3W7X961uhAYeLi4u9u3u0N//+//+v/cH//l/9q1vfxtkSDGZc6K4yU8zTpru3QGP8jWPbUfgKUphYSvrFyymKZ8oZAkjonEFx5J4wuPMLk8NMTPLbGeDpwPQMjlFdlbIHJKvVf22TgpKBADVdFIE/h23NOdzW6a4F1WSXb0rQ0G+Fvs4N4ASGKXUlWO3+o6mg2qpghX1UkGUFez7RK9SdZaAlTo0G0s23kVNZPWO1QhQmAWlpkvQWkjOtH/E/FwfnVeL9mU2ivMLIuVL20iCk5mnELiTtnWJXpgf8TIYmxnKaNYXkoTiNMmVPGB7LPBGNhQw4VaYhWcnRTNy2O7SG9vFKWIdue6EMFn8z2A26sgCnLlAlqyJ8Wz9RSlW5CcIreJaZDYwbJm9X3yJJucNzJ298yG+Gf+cUQspFDNjFdWTaE6YpAZPyX7GhKn9MsorM6rQzPopRwIU1MQpatp2p6FWaCxIi2eLvfGnX9QnzMa4iPZfuJxRO39LJeOHRbbJ6qfKcFpObeWsju0lEcUCU6oaMYZEqpnIbSVUykZPg2SV0lvK41uWt6Gd1cNY2BupTjndlI0ktteXVpQsctLpfRSBKT21c0eHUM8veO814WHVeg067bpuaHs9IuWc5h4cCzOvVqv1pmmaxnv/7MVXmpAGRi9lLEktxxh1BBkjQcEeMBd8W2Hxc4uxkmo24gAMnywu0JcYTp9YLWSrCWmafhZBGS84IL2gzjkUiSHEqua+C4dDR+QBjpsfp7q2I4KjKwjJ95tGSmmKSyPVe4xhOuSi7aTzKXYI+pBvHk79ZguLRzEsiGXpgnNzIhElgogIxKP8Io7JVUqDOTGY/XNRSGHMFeIAYtd1tYfLJ1ff+vDjX/+133j04OLF8y//9R//6RC66+vri4uLd9555/b29vWre+Cq8lXlt843F9ePhDEKE0E/tG3b3t7eaqfvv//h9eV1lCGEoHuVoQ9934MQjWcXATjGGFn06hcgcABhGAZdVQkh6L0sfd+SxxBC5BBCPwyDQHTBEZFfVbt29+Dh9e/93u/943/8nzx55512f8sSNpvNnB2mOXT8k7M3p5jJHI3WDDEz8cyamv4sy3IYIAD4RamTJVNeq1HlWY7nVVLQnXMOph1Cu1Jl1cQpIGDOheN4C41ZTjC4dHh9xqNF1xaeRZDS+/MAlxMbFPolmyMt/Olfu0tm31ucp2ecm3r2q2w46cPM+Es/pSOdJZzlwDPNbvuyAJ8ae8ZCi0XmRkyqb3daLBozAFIjVOyNyHznoexU5q51qUBtwaUdGyjOqBzlhWYYQ6M0y4EgIsvx+hZZMs5gTo6pl9wDPEXHssGyZmrKWgzz95iSPFle1U8WYS4xBgU53r6UGMjyYGW6IvvzFGIXf5K5ZbOoqcoAh0X9I3JMwrdYc3Gki9Sxz1ayeMofkEF4ap4ucWIZtdQMizLO8/xGdkT2GLD9NZ3hsb49IiaHMBsXTt5jioPVFUAkc4C+QF6Jq0X0nuGH/w4VnM+2v/yIMpYAwxtJcy5aLmf0/K+qZBrPqqZFSE6BdCokVQpDeWzkGyL11Dz19h/CFB2dTvKnI9Ds3P39fVVVTdNUFTqHzCx0TIWtFpoGyLVtG0LQUL1pGVHnVgYE7/1ms7m82jLzs2fPPvnkk9vb2/V6HeZBNCWK0p8l+9kK2QKrNZ8WsXFKWk+h65TaP0n3qdgeRUR/ESDQEwZAAEQwRstXVQUohD6EMPSR0BMd73m2YBP642EwMywQAUCdBhKoIqInCcuTfhal5RRjHzJRXcTk2wtmyk8DcyGSJfuB9LqM+ZSXiV7WtV1VtPrEke/71hOu11siGPr47jvv/cP/+B8+fHR58+rly1cvfvazT7788svPP3v21ZcvP//888N+uNg+XG+ummYzBCBXry82zbpp230/1E1T6VnZr776qmnWl9sLXaDUOwanxQUCgMPhICIyHqYNKFBVLnovfAghtO3hcDh0YRCJw9ANoeOO0WGMQwiBHDhXa9BuH/rvfPSd/+n/7B//R//Rf7+u6667r9cOwIn6b7q+AEeHj4zzxgggNF4lf7z5JEN4QmN6r7Jsq8Wx7byc9AMBRg/T2/k4UVFkdkgUp3UC/Rd5nODT/RtjUpmpshW2kgWzQEHLOnB6CS17maTFrgxZ+Eu5ypRFaqdsPL3HJYfT8rdt08Jml70T9spPLBIyMFKICM6jc60jl7WwiPBTc2RG7vTVqeymMLcRF/G22FeJLvt5Ru403vRhpjigwHnWI36dTizBLuGx3Ls45BL/GXKs+l5UiBYh6Sci0ozB5VeJRNl4s8PxFv4MjPJbO0Y95K1vjkGMOFv+tAxpk8GIyVmSLlEs0YXfxMBKjk0GtpvfdG8Jl0lrSkhgmeQM9+p7NvX182xJpdRLixKXNXsk8aQNLKXwtOFS6p+sU9u4GH1oRyrzEHQwLLooBWmkpcDavWLbdSbI6ds4v2G5hDPDg0WImHVlNYJnqQvewrgpEVtyVAZbSd9foPxKGvmmPZ4f6eKf9vPsISsZ8xy7S2bLnJEW+8Xi5tKEeSutJXinhmy7sB2Vn5+kyOl+ZsM0Lxdb+1oElo2f6itTazCdIdSkMqqrNT7Ne9+3LZHGdzhmQMSIjIhVVYUQNGUgy2j+6qmedK4YQOq6bpoNVVRV1TB0d7e7ttu/fv369vZW/U8NRrX6pFTsM7RI/lOm9jMN/DaEtli1mtO2n5rNCFGq7nIKyKEF7UAAiMAjoHprY+/kmLnvuAtDjM7XU3DEsTGCtNmXmfOIfOJIQsrRor8mvWeTJqbd0UxfWeYpNwCSHk5/pgIAUpwZtgg8M9lZiVbjP8PtVBMtaSwdF1tm0aQ7IAjgKEb55JOf/df/9f/9t37rx48fPfjRj3/zRz/8cbvfv3r16vPPP/9n/+yfvXq1W69ktb7YrK9YsB8jb2G1WtWN327XXdfd3Nzc3b/5/PNPgeWDj97Xrruu011BBI66vBjiMHRt2w59B8B6ttC7cMRBDFGma0UQEIWIqlozaw7e+4ury49/7Tu///u//x/+3u8ByN3+9mKzQnBDaL33KMoODOMNEA4AUDC5ciQAQpPLjTgqJ7OSC0ApZFRjcUFGFtN9BPnavcF5wbyO5ynrEUyMqD+kQ/xKRRsqACMPzALWLV3R+DCZI2TrZNsLOF/GXvwk6cfySBuc1hHpW0QkWojHg2I6Md0ta65FUUGz8p26WNRWFgkyN5iySdGqPyxM5PTJooAlZZH1m8aY6fey2vmZL8GTaSiLxgzbltwZfjJFr6U8faQPaccvG10YhlQ54eSUlrR4lrknkOFhERUZQbMP0aTJTjBkDAY5s0EaTQYwM9tsihnMOL8nY5EfLL2y9yLH87SlTC1S0x4sXKR+2ZcURk+J0kVk2nZ0orVucMJPFke6SNZSEjP6ZrRI9S1C4ASHQEG1xWJRbbFxBgmLgypBQsSUaA6M/V2ux+Nk38Bcoksxkfl1r7br8sOSvTPxTO2n+SW9X2ShbNQy6Uye5yY9U7JBlfBbtjz/7duUUm1abPzy5RQ8Z4TOVvja4Vh9awXQyotlA6uCYM6fi7JjGSNrXyardzacEx5bqZbL7kqxfftyUuQn5Zxx0TdllVNqxDrMFqv39/cq2uNd89O6cMr0LtI7J0REWBwjx5EDq6pCB5qcBhGdc1XlvfdCAgCHw+Hly5ch9n3f63XYIQQ0C7Kn+GGGh/lL++v5l1Dok1MimTUCJ4hbqrLs11JUiUh3WhBRBIUJwZEgQNQjVGBWLoh8ACTMtHF+e1OmXrLn9JWyfTKkz6gmy+dL7S/PrWdUUGJg274uEJd+gWXLY+PzuXVxCPbDMpwqPUeOq9UauO/aw6beelf9xZ//1f/uv/jff+vjDx4+uPr+97//rQ/e/+53v/vBBx88uH70+tWbf/Wv/6mIdN1dDG57cblZXyDiod/VdYNUI0pdexEh2h0Oh5///OeXDzbXVw8vLi6YQaLoLV9D33vv0YGId871IsPQ931/aHfbtV+tVnVdMweRSBEGEsDKexd4YAne1zEOh8Pw+Onjn/zkJ//oD37/448/Zgj90F9utgGG/rBfrWqRCEKTiPB0UBAAKIWEqX4hwXGrUOuOJwlTCQjp5dENHymCPPcJS2J/jZfoJ+ae3TFlSZ4ROMYIDm3Kfki25jzCWx+sLZ44NcUz4OTgwWS+lFdvWTWReCutoKT6di0Zxxj3Ixh4NGGXFc1JBH6dzZpVsD6A9psi2aQw08WkP05vYDLo05asTAWnZBV2PtZSjddZ5oAlxZHoZdGSmgUjwItIyEI3weDZdppatm8sNhLAMD9DZaECMyVY/rGIsniwJEh+kcVeemnnADveLG4ttZ8gP4JUYmc+fDs6IgrhmGQo41578irBjOfWIJa7y97Y0ywWYxmqzwzBPCx4U1rsvU+L0J4v58GAE+43GCzh/OqXMl5RjCeT9Jud2LKO0udJ9FIFK9QlPlO1AnvLw8lQmolVVjLPBwv7IDWVHtK1E3bX1zaSGBKKvEp2XGnBTvGmaMnuCUyqNcHJU7GAZSyEk37O1iItiS1UWt/eNgQzhjxnOGaDKuleovRMC29TMvVyqv1/y8WqPij4P9N12VfZn6XwntL5JcLtcbhsasNih1+bWBxOZmmU3Z0a+Dcq34h8pzCwqMrSdVyndEX2frfbee9jjHVdY1oUixGO18xE59h7791ReMcVHzqqNSe+ql1VVVM6X+77vu3aN2/eMAeczqfpfRXe+yEEC/6c0PnyEOJ43XeGvTQZZWNMw89U3BkkZzq25NtFHigbsWxpi9rWyp4CHidtt25WuhNWVSRSNQ2sVithCtLNRzTOTRr6npFYJI+ImWg9S/plJm5iHqzaTPjnImmZRUuJATuj2TpUYDu1nxBllXOm3MZ/5xn1jxWm1ZNTs21Z1CUm8lVV4Wgbg/f+5cvXn3/62c/++pPtdvvw4cOnT58+evToL//yL/u+X602ken+/r4feHvJl5eXq9WaSMipMqkfPHiw2Vzc3+139/d/+Zd/+e2Pv/vkyRMiAkJkjDHoVSveV01TVd4Twm7HXdeFGCS2RLTZbHR/nsUHCTFGEY5tcOS22zVz4737tV/7td///d//zve+JyIEzlfuvr1brVab9TZyj4hpVzDt6o3PAICgNwIqxkho0oOJaumEIQKAwDEvqIhtrihf5wHm1R9cX06BAceVgExPWYHhqY5SThN/pyWrnFFMLLJaJzYaPrPS9GR0iL3wuIog5jo4mBRQalyms2FWYMDwnxHy45s095RbKACZvps5nGVNiyiaUtOSY47HgM/UFDM7V5n1vHyLbI5wCSFoTAhMqeQRkRzEkFu9CYAM+BIntqYuN5abKojjNR6IYi+jDyFoAGGmhsKQO062lOppUREkUKk4IWkAGxvnKVmF/Twbrx1R5hUo8q3Rn7rL3liMwcSo+eLr1CMV0f8wTfzJPh6DK6aiDC9TePZYwYHyD88Pl9Z1LTJLSTKV3NfVku6itANnZk+NKh2OEIWZWa9uJCI27Aoysj3ZrGhmvNnklADzmdda0hrzOdgWWxOLCU9EvDsix1JZrzYlc0Giio/J1SdivC8rmzLt7RMRh0UHKd9hTutwTOeuG0lNGVCjFEJhv0rsNCqHeZKABD+bgVkE2itJxVwMa7cl03qEbgIk3QXGnbOywCZZqG3Bji7DwJEfXJ3qJ8BwbvrPUZGHpGbkU4Hq+35c8qPluZCKU7Vjs9/QMaACQksyy0W2a/trwkwmpPbX840fgRdBWVCzQMtOCwO4JZcjg8QOwYpGAkNOJz2yNbMuyl8REXjZPbOK0X6FJ9Z6SsgnFjkHVVkW4YS5SGYPCbDsk2whGCZ6Lba/SC8wNvT4yVRLQ74fP354fXWl9ut6ve667vr6OoQgjLrvV1VNVVVVVYlgXdd11Wjkm15bLyIevRCGELruMAyDSNRYOZE4xADAu93uZz/75IsvvuiHlohimOXns0jI6JJ0coa9jK/salRZ375fpHtGha8tp+pnDlX6lZ3G0zKKAEfC6BCc8He+/a3Hj66vL7aVp1Xlt5uVRN7v93/4Jz8NIdzvdvf3LTNUlWOgYQjOOQECIB45cpz9I4Le4w1z+RIRmR/6WuSWRIvMAYM5Q7KJrh/t0vnGTOoX5/ZnaiRNH3aqTVwAxuIFY2WldhbZG+et6NKDTMcZNA1SCCuQ6Ehqz7XjVYUPr9cPrq8uLjaIDsSx+CgOqXF+5V2964aUUPfi4uLy8tJXlNZNEFEQyWEIYbfb3d/fH+7vLi4unj59+vjx08r5+/v7u7u7YRjqxldVhRx1EfXV6xcvX74EkLoSItqsthcXVyKyu73ruk7ttOfPv9xery8u13e7m29/58M/+M/+4O//O/+A1R4b4zmRxuuuFkJkR8sN3FRf0w9aD3B6luMkF3MHr7x+cM7nS4GjuKx7gAQ8zUO/ZrrbPNiZ3gZ1pHJqpXny+I9wW9ZMq852dpSp2jik41pFzmcJKlzyIqycJ0a0K5SZ2kpIAKN6Tk2EMjeAFtWTxWou/JP4LW5epZKJvZisrdm4rK5MP51RoMnyy34yyVfyewgQZ/i3UFnU2ecMD+lfmmLVrAZBYyRl+AFDTf31lKNeloQfS6nUlD1bZY1gzcqY/kwgZTskdqS2lwxLFo1gqGbxMyHhmHjawszMdkHEsn2pfGUpSnBqJyAikEMSYgECRFKFo37i+AUAAFOxd5pKliU49cLzhHiWZCVpShZKL9MKOsx5Kc07GXXSbpjdjwUTXGoZCQwdbacl+WwX2auymi22HYuQGPOd9lSnRALMaWfhsUtOJTstNp6NRUyypUygys9P6ZnsfakfkqGQ8M9noz2tBrDLHxk+KUXafzP/7ldZFlm6rHNmsP//LSWfwDy6HqcZ/1fVo8hJcllCvw1iYY7/RTHJROZ8a6cEP6tQMvmiOP/Ki85HzKMhnjTA/f09MyO4erp/omma1Wq1Xm+dc4TjbK5zfYwxSBBCzbAPkyh59CKIjpjDcdSi4QDV4ujQxE1YDGTYODVDnS+lGgTjdsISk5zCv2WqRYbPypHZRRAIAEVCFLy73XlyHtzV5cX66vLJ48cXFxdI8tH3fiNGefbs2V/8xV/8/Oef7Q6H0WabW+qaC5aI+r4TnK19mKk8Hw4umbUlZqBAe4mQkgpp2bREl8x3/GgW0DfbKkwDWdwhxKUcHPYr+6wFERPszBwYQgh9GPo+EAk59M55twb0LBhCqKpKJUL5X+VCuV1NAkcEhFVVrVYrAKgI9/v9s2fPvK+fPHrcNM0wDM65V69frFar7aqp67VzuN1uh2HourZrbxFd5QbNk+K9DyEgSdcd3nnnnX1///Lly+/94Nv/6B/9w9/8zd8EcAABgBBQQHDc+CvVnrHNsjXzWcznMV/oN5Mf297S5uEpIUAAX8qemI3+zKqz1dK8nhbmFw0FaytY7WBDPpJmYeYEf/o2+S2Wn8mcQc/MnfS5faPVkhlRmlCZAZ3KWzqEZe+LzwXrL9vKMoVKZlasTEk7rDaxGMiGn3YA7EtLFIuf4tkkF5mPbv5Mi41kjoQYfy/Blg3Zckg5HMuNsmS/2o4yaG1NMmf5LEXQhONaOC22aQqZS1suFvJUOb1HswBha+J07DbDW4lGmYJ/0r1PGUJKhSuT40rzkGxIDiEgRtVRQgiIERTOBD+Ml/tZJJTEWkYjzNlyDlUGZCY7acgyz/Jqq2XLxlZOM74q2TV7g/MtL8WSp+OCi6m/cPBjJFPhvZcw26Ys8NlPi3osq2/VhYXE8kOG6gxRGR5OZfu0n8NZfsuGmYFXrhxnAligaARGjJFhZR8mKdavoizDU45Uy9kDFt+gLKoXOCEvf8dLqZ8tgX7JEb3l56Vrcb6UUlNSZJHN3hK8srWs5t8SoTNapCHov9779WpbV6t+aEX3WIJUVb3dbi8uLlarVVU1uh84RpYKqh+IOO4akejy33zzTafsKDHGtm3bth3FXEhAMn6wwz8VZgVzbXamlNrsa/Gz+Hym/UV+XtTPiMgREJHAjSNgFnAA/Oz5q/v7/etXby636+dX189f3jx5+HC9Xq+vH682zbvvV4GhXl28ubvd7w5t27dtK4KBYxg4hCASxk69gylGRrvWOUgXWN8GYxbgckTl/GLf239TnEg285Ztpk0jREScHeFJX52Rlwzh5bxsmyICBCQUmnbIGAkAAkdk8eDJUeUcuSYGHIA1Usx73zSNRkUxC05HEkafEEkcrlYr5xxyrKqq67ovv/ySQ3z33XcfPnx4c3OD4Nq2hRgQcb1uLi8vnXOvX7867G9FQt/3wzDUda3OJ0sIYdhsrwO0zeryd3/3Jz/5yW83m1Xsd1j7yd9DAgaEaZPr65fVJivtmGD0LQoXc5rAN02FbMoYd5dN8xmBLdlonp/D2iKwxFJp98+2rCWRLWMm/VUtWhtrlAS71DjW+MiagiXnzT6U08miiZaVUgZMOwsCkPbWrcRm7RvJGW2vTLxlab02E8hsCoGJalAoETInf9K/ivb0IZtrzU4uLBR4g+K+oES77EY7i2pbP5W0l5hs96TiS9SJ5N6gVXZ27yh1lDFYBpJ1yy0HZoM9j5P0YU7NJfgtNix1LLosGIhOJ3sdq8E/AIBzJHJcSSEikX5cNkrHGQQkjs84g0EAxpXOUulbKZ6hGo/wn0KUxaE1KayAn5JBlnyH39I6Qy8zO5ffJ1lKutjNKHJZjyICcAQGjITC6ZK1gIXNnUFlO5Uli3Cx8Qx7pQSd+bZEnWIgWz8601T6tlSGx8ZFbIUl3GZfBVnYG1SuAERRtnRuttLxluU8yX6Zcr7lv71+f/myqGEssTIx/wUaf5siS9MWGA7J5P2UHs5iXuzn3whyK6f2IYvQ+dsoM9k39gMRrVarhw8feu8Ph4PugXjvNXx0tVppbhL9/O7uzntP02UGiON0RkKMAJACtUYbgxlCCG3b7na7w6FL19IyH9WRFfNTUpwh5+35J6uTRg1LlswpjC3+lHVdQmgbQaxIdO5zIAwQhYklItBuP7SH4eb17nP/cv1JfXFxsdlssPbr9Xq7uaia+uL62jfrqr71u8PVg4fMHAbu+77v9e47AYCeYzSJN0UkHaESODJ2NiNk0FpCZHNlGfOVMHlq4oMlMVnMESAiGohj2aDEZxLMbKa2nyzOVgDAEgAESRhBsSAIgiRIAhQYJEShUIGA87WrdUGQiKZIaUEc76DC41E1QAEHCOQ2m01d14fD4dWrm5cvXzZNc3V1td1u33vvvdc3L/vD/s2bN33fPHr8YLvdamrfYRj0VK1HEogCkTlcP7jc7+9r737yD377P/i9f+/i4QX3e1f55I9J8u9wPHYASyVZYZj+0fsnpp8J4OtTxeRcbXzCb6i2fengnelPRFJiFst5bFIIQKEFssr6E01HBHFeWESr2EXiyZ9Z8D9T8hgw3ImF4V7OK9knpexl81NWVMLLaUnbO9HauZWqsiM09nF6w9F4aAAA+T5e2Yj9ySJwca7NFMqi3M4RuNht+nVB0YiJTc96KR3XtzFP7XN2DYmFhOd5YhBnetm+XOQKKY5mWdQtaslMsuyfi8PP/szGKLLQCzoSESAEAZAZHwIIwrg8JVN83eSQ56COo+AiuMFk5S1Vv5ZyLGUd++EpXoV5LHExfG0E1ZqZOJYRjwfjVcrg6N7mZ5NKXWepeeaglJVWLBzCU1Jc6oGsqbIkOPGETyijAZeFcyObg3+2craVnXVtfSosFLttZ3Es6fOkqbJTJYS5/Mq0FFiO2vaVQJ0HLI1NGVFdxOICu45o/4bG/C8cknoKY39HyiL7nRHMX779kaC/xOp11tSiiij7PT+oUrL+bpaUdmG9Xl9dXXVdx8x6xbzKWgjc922Msaqq1cppiKmf8v8lMw/N0TmZ4usEZBiGEIJeXEHjVRMatCVp4diy9OKRhFR+YUaaz1+zheDzds4p2i0GIpXfHu0TdFMafxEhEIzCKEAOOTIIC9DQctt2u1ZW626AEELw3l9eXKvz0Pe9CG63WwAQwqqqvR/DbhExgAwsyXochmG/3/d9T0QsM11aDjZTkovjtUch7KScVUvtnwr6Le2ocgK1jaceMxPFHkkr0V6WAJGEIzCIRBlEcAgcWASJ0OuZzDAwS6wa7x1hHLcZUsILdQg1vFNT8oocjy2s1+sYY9Osnzzx+/vdF198cTgc3nnnncePHyPJ3Y3b7e5ubm4A+fLyUpdaACAOoe/byukoGIQ9+d1+9/HHH/z9n/z2ux+9D8AwnhnkwvdbXLXM40JlnGpwejPeTsFjutFyJxCyFjLsTn7pEp+c0HMiU8iozB2PjAOScKLZtbBUF3O2LbO9yjNv+m92L1aSfN2yHwdq0oHYvcRkDdg3dmA493YsYKW1mo0FChZfRp5pJPtTX4jxFcupK/s29WghKQUJj4vlR5nPdEdmbmbaITVr1+CzUaTWbHAX4myH8AjYiQm1HCMUZMrgT6BmZM3g1DnMhjHYBo+Hi+a4TZcvLRJ0EbByFtTpVuZJt9KMVTYLp/dwsiWS1BQzgxyHttjsDKUF/Bnq7ABFhNxor9OUokqd81NRCrYvu16r1wZYySoFwX5+XHjCGcBZUPTi5GG5wrJ6KfsWaYnPoRBSmN+nB5aO+gYB4LgteHQLFI0AOCWLxgmNFvJM6OwoFj3ecshgxNC2YwmqmwYyPy9qP89IYJtNWDoVclzqH55un18EKaPFkTGWJp9FmBeV8Dxa6YhSMnlfF8vX6u1fsmSSlcoiKf9uljOyBkbofiV9fS05FucLWJLxxW+P1XDZ/D3FLaWcZvAkMOw8Ulb4VSHqlM7X94fD4e7urq5r3VYCgPv7vZ1TEFEzYFVV5V2VssHHOIhgjNGjZ4lg8m8zswCHEPR0yWZzsd1u9/t913WnZh+cYruy+Su9sVy0ODtkQ0stw5LOTL9meuMUz5SNL+rPM91NV8aBgE4KxJFFUPReCk28F3jYD1hj10U+DIc2MHPfDeq0v359Q0XxSNV6xQJgQsaqqprC6PKY7TOzvx1RGmN6sMM8xahznkkLAQtGFxjinqG7iIwqH3LIs8kOpwzh1srSB1ehJhFhYBEmwSDMgpEBPXlfA1UAXhBjEAauiNI1KopVckDTy+l2lmPA2jAMiK6qvHOOQ9ztdrvd7vnz50+fPt1sNo2vnMPXr1++ubkTkdVq1TTrEDgOHEIYhsE5RBAAPrS7H3zvu//xP/wf/M7v/hYQw3AgEhnupFotsSHNF8IweYkiBBBhZDM5u152Zr475ROeEI0zDqENmbOmbWKXjL14ykOQCCBFhE/Gyonh7E+6q2utFhX1EEIyiFVbZfxXPlg1JFP2vFMflvNcZmWmamdkEk5I2qT+FlSbSotFSKklDTaOaQBnNZc2JKEQ8lTYZMVcHKalrL4hShdG2xFptQX7G+aqYfHZdpFdkCqmLA4hpZRM5Eh6JNs6SO9tv6k1Ww2KDZYEswUmI5D9ybJcGgjOrXyZB4viuV0OSZVZj/BNzuEcP0ubHkKQeGOk0VRHQBhxdt2toKfjIGg010WECY1PSJzsKoGEGShKwrlFYEJvcoZnx8aOvtWCyVUiDeZcVJIj6QqrsmDC4ZnkQ5YWmUBlFaRI7XhkMMoJuji69JLIZscdX+uzjMG9c4t8rHqsrG8IkZBI05jBaE8oMxzvGhYRZiTSnHkiwCwACIiA+mY8kzyDEQDG49wz6QYQRG1BYZv4fxwLA+hyHky8CiAEJhPVKTqWf2ZaOpNEmCLoytZOlWODf5uO4iLR4bT4/B0p2WSX/rV69dTQ3qa8zdhtpJLlAQsknrArZpVPqJfzpZzuS81jkZN9+/Yd/QJFe9TEoXd3u6++eh7j6JLVdb3f76uq2mw2eluaSkail11SV5MR5zc7MnMIAVBijH0XkMR7X9cr/TyF3mWTPpqjJdmMJsbEehu6l3rVUr+MV7K//q3IFB91jYggAiASonMVADAHzcXtAEWkHziE3jnnfDWwCCP6ChC7EEM3aLwiAabU/4g4vH4l5KxpF0KwRvWiPsxKxop20szq6HuaHxS0OKfpdq6MypkdZc0nqxakuD0rjXQR2tQLzovpCMajKkkQyaOrgLygZ/AePVKFSIyEESJJ5R368SR8VVVV7aZI0WS7jvcgIGLXdZtNQ0Rt2zrnLi+vQgi3t7fOue1221TV9fU1otzd3R0OB0T03lfO92bhNYZ+GLrNtvnhj77/27/1G7BeQWyBAJyDoYeZAkkuHOOYTRQAUtZAy/kMgNNPmG0SzuJFT4aPntlC/AbFp+A9q1jF3I+X7dRbumashsZFhCU+SNyQmY9iSoiB0Nvcd4ljMtHI+C9JRVZsZTEBVFkLJdg4uZeLiCvPyGnRa3gyjCUNYzFcTrQWG/YexaNjnDc4K9l4tY4l0GL9DI3H2BKcXbegreLc57EfwpysODcvUp1hGNKCGRQ5J2zLaUqwDqHFPywpMnvhteWBMnAuqcISMzBtQchxNh1B7fs+NWgBtjtO2SgsBtKg0OTfnw0/il4bpfOQqgdUNMoxfg11TQmQTeOn+Nmq7CGOfTkEZEDByJOTPEX3gV5UPFr8RyJmoaFpvJbBLAwiAvP7XcrPM1zZZhO9ZgJrurN4S2sfiV3tupXtTj9Jh2QsbJksLBIxVR/xvAR/pknsZxkTZh2dKiVg9v7VdG2JzJPxZF1bdJVUSz8ltVBqKimC/GGOYSmiqReR8DYlY7DUi5gwJCJSB/U8xmbNfkNA3j5ktOSx/06UjP1KGS9171s2+42QUMrF23+1yCq/DAlkyavJ0JJ1/SssJ1BHzPFwONze3q5Wq/FWNObVaqXOXowiEhHTYhCmq4NSBB0AIKNMd6+rqlRNEmPsug4AWEKMkRmIfF3XXXfI9OTiqJM8pmtvYG7PnBmsPliNJCapnpw4ilby7ddiNfsKl/aNmeMc2nGa7scsr8zM3nuqPCJKCIRU1TUg931P5Ju6jjEednvnPDiYtlhQos5KIlOEl2I+5ahDRO02467MJS7VqeXPbALKRpo0fPZTht7FOULmlnPqUaal+RKTU2u5PkmskkyU1LJzLvKAHAEAQViYwSMSeV/VaxGMLEjgyBFVCCSAMY7ZeiIIoWaXqdN4pzvG0CE55x1S0zS6x9A0DQHt93sA2G4vdrsdIsKqrqrqyZN3EPHN7euu6zbreroeLDIHEAihH0JX++awu/vzP//TJ08fXD+42D68AAB0hAKj/z8OmkcTCgRmVySNbiHypGoUpQiLPuExaS2k9vNYaN1pzKmwdC3TKZ2FAHh9dZFZ55YbYHISjrPd/JSXrZb9uagx08u082M5AxFZAsfZtZh6cjqEkG4Dy1oeLxBj1tOfON1VNUfW+O96vU58bDleL0WxOxt2f7LUiWl7KhsdS89xhAomu20SmIVr7lJI7Vwmo5iSRAhJYjiKPRp7y5IsAaPFEiXVTyHXGaH1EHmMUabAEq1ZVdUQOrsPo3OMDtai1GqWpaHlSw8W7MzBk8l211W0xCoiotHhqX7Wjn1OrS3y+eIWNxj1ahu37adxWZgX+7V8mMqimwQAjoBoPNmrA4ek0Hk00y3t0HlldUWIdYcSP6NZuInkAABJUACVzWKQydXkcdBuWsoCgePFuBZOMmfzLN6c2QbXjvX/SR4ZcgY46pY5n1iqKY1ijHXlLHrTv7rQkBaS0jwqZqfdLiL0fQ9LWisj3PgsR496kvoR1VFCeaUqmpB4i7REytSyVTi2fkJFHALMi0z6yqzUHN9PQTILJJuNyJSkM2HKAXYmc0ZmoKSi400lYdKGFlsAMvDSeMsdiSTmdpjpoRtCtqaZfrXslIo74eGdMijFLc9rMGezcl1J5orujMGaiQPMhSJvNnI2LhGRpW1YEWGAujiaAYUpbAEoQyJnHU14SETJ+CTTbxnVRETMGXgrfSV9sz/tkC38mbqQechoKQVZOzj5RTCnY3YtjYUzEdSqprSAbqshLty7eEowtST1qHVwjDY6arZ1s3r06MmjR4/quh6GoWma9XqtKRb1buEYY+VrnZ3TFW2adFRENs2mC4NODXoboVL80O6HEA6Hw263e3P7+tmzZ3d3d1VVZYFqp8B++3LkBBGR2V3H9teMf1LROa68TNheg1SKUirnkW/rmz+PhtzYsgXMeY8zVQAizAGmhSrNlGnaj+C8tTogmV6To57JxaLc2W8TQ8rcrkvYQBPqmZg8VUv6E4y9l7LTp0kT5nFV6cGu15ciY/WirWCztafKIsIQEVGAUYAICOXBgwfvvvvu1faKyANVCBWCR3TkKkRXV1jX9WazWa/XVVU55ypzbpaIUECHqBAOMWjvwzDEYTx85JwLYRCRyuF2u95sNszhzZs3t3c3DtER3N7exqFbreowtCLxnScPP/zo3SHsnZfr64vrx5cPHl48efL40ZPHD5+8XzUNIEnomNl5P5pR4EFijGpYeSLSPcMwsQahJyQAYuULRCJPQMo9Sv4ZckeslfsZszdsdqGmS+0FBfRsv82RDnoPof04UQtPn71JFmpG/lMPNHcg7fvE0LZCMnKSACtdnXNqhCXZkMkwgiVlnY0LJuvw1FkyuzJqB1gqpvRtajZ9MioyOf652BcW+2y2gohAEZw5vp+vBhVfHTtNWM1ENHUtpimLfOWnbPEGp5jvDB4RIXIZQmCujGz7aGbcpObSukDCp1WCFr0W4Vbx8VRkurA+w0ypqtJPpwyjrLsMw4vcXuKnlKOSEyA7Z2gmD5qunBopAmg18tSCjCtQ44ecUI6au3m8i5uJEAC7ISKiHsUmAQRBdKhaeAzmRwEUwnRHcgatFrtkMxs1J6QhgK5G63+THhh/gunfKEvmb4bhRaLYf+3UYkkAhsr6Z5oDYM6o5ee2BViiHSISzs6ypkYSG58amlVKqVrWUSnm6cOMGxeVSam9z7zMRvq1K+5ZsXfJlpBnegmW5EhLZiDCpIKSvWWxjYhDnJ1pLNGVj3TZz/r6ncCSjud6eWu7GWUmDOMELScXcU+1e4rN3p4HzrdW/mQJ/Y0+P6WN3wawrCbOJzLt4Bu1tqi03xKG7MNUfoHPFwvimLQezOkyxDEiTjWYGsGQBiJI6IiormvrEOp1bTKZTESkZ6KSF0HoiEbF6Kgi9BNuf2XDKfFjjUNcsmekMJOsPWZZcVF7TxibrelkiigrNJrvVpUxAI2RMiiIqvHHxhlQiI77PKiTtWdgFIYRfc4Ax7J0cp6Zs4vpM/vnDMxg2PgUDkuLyNonVh7ZhC/BHNU43xxKEGJxv7eBJ8+VYPFvJ5rxve5osfoqTidVEB8FRcgJ6SFh8q7ylff1qnFVVaVLOIkICNPqTIxRHUKZ7MwoHGMMIQxD4BABQENJiVzfd3EY97fr2m82GyLq2l3fHXR0IQze+7//O7/zn/yP/9F6Xb189ezLrz59/vzLl8+ef/nVp6tVs91uV+urR48evffBB++8+2S93QIKsAAhcA9UOe9gTB+DIMIxer8BiMwMEgUAEQiJxmV1jiPTjjY5AeUXzS9EkOaxo2i8welBwHiDthyXk0tNXVqfZU075WdNo3H8ynnadjTTFDj7MDENIorksxEYi0omu5mXAtus4GW92xYyG4VO7JLDZFamP1P7izOdHfsZCU/jFeGk+DKFCCdMhQR5UX8ZjDTYYg6bLSyl1mKMGq5ne2Fm7/KL3e2Kke3r/IpyiYTkKMJcH+mv2Y6EHbhVjhkDLGJ7sULJnJb5M6LbUUNBbvs+oXTOMEdUcxxDUxL7pYBAlqUAYJZxMRJFhGVaUNHbeAxGRDfrPDkAIBAERiIUIGA9JR+QIapFxQCOQUTEmVFYZJY7P1M1nlF5yZwQMyFlbzLkZ8JCRJp0SsY349eISOj1bJzousl0jpfcce5JVBORFERgtw0tZa2kc5w57aJnS0aL7TguMqd3Tol8qpCtlGXMfKwGMzWbSqblMuYvhHqGfPtQSg0UZLUlI02qaVfQv7aRsqlUWALMQeLxfCtPdQlwOt+Mx2+/tru3hOpXWCyfL8wKU6Vv1GYmMosVZu9P8OHXtl92BwV9S/Ev62QNZob7W5IjMZuV5cXZhMw9aW/TziLn/AJMsviJyC+QpJYBjneFI2JaSkNEBJc0FaEDQkceBDmKEKTNRURUbzDF0RzXy3gWRJC0HxE58Ml7dM4hOGEUmBmBb8NCiyVTF+MQpmSBUOilkrL2wc6YOhAqzqBqsWbkKZ08g5Ns7yLjDQCjcsPp+q3xCQAikNhlCNJjQwAgSAAao35cXSOcnfKwA+TJdyrNzjMAp1/L0Z1CKRf5NexzijAqlblFvp2t0Oz5JyVghSvrTudfC78xGASmW5RFhIVikL4fNhtCcESeyCM5vXiwaRpfUVVVvqqKCRpFBFj02oK0YRA5DsPQ98MwDMCigVQA0Pe9966qVjGGV69e6d30FxcXm5W7ueH7+1sBjoE5Rk3sdHnx+ONvffiDH3zM3L+6efnFF599+dXnt7dvnt+++PLzL/78T//s4mJzcbV99OjRhx998N7770dmVxMg8BBZZdDV5B3EASaHbzLtozAiOQBwKAAkiDgtqS/RftEnhOnM8NEbPO4QZieJTfHlXJUmMFqKDFZ/OpsnmI8OTDlzpJelkFuWVTZ1jgSOLJWW9pNmhKWFChvIhEVoeKppp2f7ucyvrwDDuOVLq4aksP/SJpv9dRGSsn3bbFZ/es0I7sw0lmlJC9ji/F02YtauZnhgZsDcv0rQZqtKKn4JGBvQlYWcIaKNTyv5LZsALIlLlZS1UPI2FEyY4bnEA8yJgsaTSZox/bkIeQaPhWpRy6cZLrnQydq2c9vYkETd2kMBvacSFR4BwiM5AHS3DmuqQBhAkBGFSVhD0kX1JqEgAjnWVbUoyTixQ4bizG2CHJekpuTzxVEvMuQicqzQ4bQzb79N3TmXr9poSVnOYM7MLEdHWgBYJDLPcsqgLvEJoAb7L4QrW7SUg5L5fnhZZ6Z8TuDNKsbFrs8I+6KGyUTv1Ip71lcO7ZI/sCgIabWxFIrFTq0BYXmg1OS2l1MNfqNSku88qN+ocQt8+fJM/VN9Wd2ibP21bb5NXyW0mVxbTZhpS/s5wgKTQMGfixr11DBtI7ZOyeRlKXH4TSlYfvU2qD5VB4/Hh1I1RL2mRQgJRSQGDkP0LtqpIS0w8XR6CqeDOTDNswDg8RgUhiYAatLqzrkxPaOldaas4BfFkl0ys7FCmXIolSHM1VoC+JTWhYLHLIecJ9BMPZKAkKSF6UnpA7DorMCom2DjsiyxZHMigjXltYUEDJmTFzAXjRInZyyH1JTWsbjVX1NkXDbYUnmmf7N5bXo5Rv0wS4zjLfB6fGocHyQgteuZvS2TrZ428WDOAMIoEoFFhAEpxnhoh92+3WwjEns6HkLRbKJuKuQcgYaJYkIIkp6xDySAgEgUOCYZoQkkPZvjvSOiMPBut9MjtX3fP7har9fr7Xa7u3sTIw9D+4d/+P/99Oc//fCjd997/+l3vvPR+x+8d329/fGPfvTj3/ghM//lX/zs7u7uzZvXt7c3X3762WeffvpXf/kXFxcX73/4weXF1ePHj68ePPHNGoAgRgASPTCJDkSmuR6RCEBAGQwJgRTMGAX9zJGeqLiYaWZ0C603OD0sswHqPYQlo5ya3bMK540ey2eWt9JP6XxdIboLmj2TBOt7LHHtbGoRsz1VsuAMI/NljBIzmZRmIqqCVyqbsruyQSlsu4XpDc3zW8/xClgSg/QnnPAJp0+W28lgTgcwUgXlqK/N7mgbSZBYxW27KyekYRgsKXF+LuUUcjK8LdJFzDpZRtxTLUthsELBOelbGwKXiH6sLKIcDfMpU0T0DGHOkJTiQpP+hUknAhEaYBiRqO8AAIUBGYUJZFQcjioicF6IWLCLwiy6MmqBTyRQhZ655acQXiJNltw/+3I2SZhqpaNSyqwl1qnK9tSE1h8XEQ2NzCelGhT7UzmQUmVlKtH+ZPkhV2Uw+9MORIz9bTvN2O+UTsuqWdE4o6+ykjW1OF9g4RWkCmUvdmW6VAUlwCp35ULSKbk+5WCfen8KjW+Pn1M69ldbLDOkB4W1FIcz8LzluLKmsq9OMeHbAHAew1JYJqcAPo/z8xT8ZeiVgfeLlRKrCKNcxMB933ddl3yJtK2XpmNmTmfqNDr0KEQIyWMkIv3KFiLS9oicXbXJ/v2mKLJbkXaMtuDS4lGqnCbKxM926ikjJkqetD+dDIkXEuZJVYwYy0HiUS0LAMt4VYDu75Ect3NPtH+EJ4E01Z9BWCKn/DNho2hqxkJnNAAVORTgLIlTtcRs2eEv+6GI2GuDsoFnlc2fhKjzsmOOHGQYYgjBuaquEAkIEIU5xIBdVW+cc7qn7VAPQApOx3qng/EjopxzEEBPF3rvNdBIU/sQ0eFw2Id7gUiE3vu2bff7fei3ekw3Dt1hP3iitm0/++zuiy9/vlpXF5v1xfXm3Xeffuc7H3/nux8/fvz4N37n70MI7f2bFy+fvXz5/Pb29s3dm/s3t3/88iUiXV5evvveRw8fPN5uL6+urq+ur7FZm8EzxF4YGcTVKwDhyIisOR2YWYRpfsrv+Oni229ejhfTZ8aE9eDtezahQZbGp5jJntlbNEFSwcnBU2dX+YZmVwvMogKkNKaXmNh2mmBLrlH2awZbOctmoy45W0QAji1rHdvseX06/ZqP5ZRwnlIE9qWN1UwaxGYxte/dMdx94XCdVcojJqcsUqk7S7gMY2Icp8QS6RgDGLJmqMvwnGE+/ZlNCSW67FhOoTQbcvZvFhqX6p/a24QlXrJdZzH0ZOCyzDmuEM/ZCRE5jklHRjdlykY7fqvT8BRjKcxOM4gQeEKiMSsWgKAjdN5VdQBsQ2RmxohOeB6iLHODrBQZNFMaItpbB2AyUk+Rxpa055wrlkL1yeTX2UYSouzZVysFVjxnZzgN5xjZyXc/NDI29ZupFzBIKLCEIqB3OqQ/YQoImUhn5HqWRfPYjp7dnY9LtPP0eurRAqAvFcLjiHVEAKChUCqFsFQyQsm42j3G86dxpZqK2lKQrYxn/GA1c1kyAkWehanDnJdKTvvFiu3xFGC2lFrrTMvlDHV++DDXDFYebe8A02ZG8eF5eMpqVi0vAg8nqFYSXU5khX37skjfrOtMJM+3swjzL1xycnzD1qaBsFo7AoCouD1Gi+jt597XiEgkALomSCJo13z/f9T96bMsuY4fCP4A0D0izna33F7mW0pqqdXqaZtv8/9/7xkb2bSmzCR1q/Sq3lJvycy7nSXCnQTmA90ZcNI97rmZryQb2rVzPdy5gCAAgiQIYMll02LMgJlf8oIwX6nymJyOXObYXb5f+JzdEJ+KEWDpo1/CtXjzOX09xRgNbqSo2WAttV3Y2N1IeUVXhGFuy4rspUo3M5sD2Wf4FrMVABh7UWo6DWWBpMxf3qlM2/GWzQsS/Cdbxk8urXh9/nJqiQfLu/Fwo1PtwrepVX7y33Jg4EWZmYU8HzGBAxODOYSw6/Yh9F3YZfdIPF2jpTK5TJXTeQJNKUl2nQeQTpHpiSh7pZ7Oew0pWooWUzSzcRwtRmLr+xzTPqhqDsjJjLztYukkIgQzjHEYP8Tx/uHDD3/9/h/+4R9ubq4Oh8O/+uW//va7b37zm9988fWXv/z1bwC9//Du7du3T6fj999///33P/zjf/vtf9Pf9t3+1Zs3X7z56vrmcDgcXrx4cX17i/6AEAgQM0BAic2SKZExB5EgQurMQZfDthWOIhs0nYMc5iuMlVpVUmDnXr+aVDwBtXN2GeYy0tVU7WfoLVoklwpj2Hw9FfP+/byK8MfrEwytaR8asVK1WGW+wCRlKdVW6DtbybXJgc9yfqoyewSurk4rRppQulQgKtnn618VH6vTpMcDLeOIVPDnQmWrb36/EGFl/e9tIXyGCi0lqXO32AK/2muPmfLXn0xukYFvpcpQiemtVNBYsvkwBq00x1KStu2eH2B5EvKVUI775JilgGqNFztaGnuT2xEws+uemCkIhcCBQAxoMlYA1PXE4ZTSMIxIA5kF8LA8HKOlClhR/tSDxc917HnSbTmlkE07cNXCu2RoJ7zp5xwDyp8E+lbqoV8SQMtNcxFr3tTaRvUTE18s7i5W+oovmF/mjee2v618qGD2tbVvtn4WPt3cQV/WWfq+uvFPRNjYu3S0vACm6KB+XLB2Yp8f2IWcvcCz5VO6uNBaeb9V4zPSqrBqMn1enbScJvz79uVPTlvIXOP3lbQ1g7QQLkTZcm1ZMV3bRAukzcc1bbvPwc/WfPH8VIENrHtueE6aRhnlT+4v0hSt4Oxm099/zt3NtwT9p2xcx8xI8FM2c7D58IQ1+SIT6S7h+cn48ZZoLQ1USFttopRVF10ZS2lZKqxK+ZG9DDxzMCsqBE+HEDo5CgRgS68eQlMoV1VTIIdxPt8ZXK4GJ3gIfkbOibmKXb7AT/vST16ruCovy6msNSeB1cxVifSW2QuebSa11bEreWip8vk8Ww5K1Ch7v2OAiELoD4fr29vbw+6q7/tuWqkF4VC2LcpdITPjLLpSoibSRt5JKee9RGTO1dzpdMpsAtLsbKnvQ9/3Gp9OpycAjJQtS1McYzz2HSlZCJAQUkof3r3/8OFD13X/8J/+4fbu7quvv/jmm6++++4X3/36u2+/+8Wvfv1rMP/P/9aenk5v377/01+//+H7t+9+fPvnP/7FkK5urt+8efPFF1+8evnm5sXd1dUNd52lIxGBWIjB5eZIufv3XAacRmFeE5aX67mJ6vPH1atBPmVnHgXXq5qQq39BEJ4y/JaDLysitiHBL0yBtKbeeYxgKTjanJWbaU/obeVouPRcG5+t+SsGqzJPDLB2WN92tkWU7xrWUJ3zF0XKliajVfGq0Qxa29OChzPyqRYTq6NTamidT5SlI625IV5H8mwTT24N2XZqtXg7rKufWpv7knO1fj+P+karu6ltwQpImXxynt0PTj7PkpKYNCSa90oy3szMZqv6otAXuZkJYM8UGCFwEApEhmTZCQuTBElEp6iaxjQOBiIOoMW2ky2XUi1fFNjQpE0x9DmpMnGvsLEkYFPVPEHnr55UPEdguQPqSQJrNAxHDKoLc4OKWQBU6Krkib/h0xLhBdRtnUh/LpIrvK2+aVMl2WxpUVLqAUC0gsBKuvo6i2VBuRKT34zjiLWx4MbioGqiBnt7GbPV09V6Vincd+S/c/Lk95yxe2aFbUHPcW1tq5XbctLxTVwG7zIdengutLtV/ALAPy15qn5O5nV4/PMEnAFTKNq8E2VmWeQXtzHe0Jo5hBBar+C5UcN0HW4O2Ktl2s2VrM5i2NC+np9WTf2rjcutRqtp2r/HUravbmNVgjSnyqnJOTOmODMAYGxKMKh6c6rFbjLhbCrCmNyDYHb336Yi09CIrCpb+VRxXJvBTzSFHcpPnn2VqwsYWBpV5x3UlgmOAT2Q5rYLfXMV5VRlCzyeYVfmmmRm+awUgYL0stvtrq5uZBFoHsRgIRYqAVeKQkiEfM8wx36kfM7LPI7jOI7Ud+f+Apgos9vtLMao4wjSEAIzjeN4Op0CJxFiwelxeHh4EEr7fW8mpsMwnoYhEhsRYozMYObb29sU4x9//4c//emP//E//n+ubw5ff/vN119//fU33/z617/55S9/ffi7V9/+8tfjMf7w7u2PP7z705//eDqd/vKnP//ud78T7u7u7l6+eX139/KXv/pN3/e7w565A8hmpziY7hDWIQcvz0dE9Qlhm6arhl998XVZkxARoDXD0OKakKF2O5uh0WVgaCLKS/kUB0++hURaMy0qnM1sZiWKWraiyzbBAMZxjKbZiRYAHZVnZ4wxxnIvsRJABWaheqeK3ILQbzZUsMFxBZ0PLes1UnbrZfkAekZjSqnkLzyQ9/n8uJpbFOU4MORWzjNUC7P+qiDNAU9z09mHUgt8TuX6XyWLfU5ff5sBQN/3ficmo0KXbqx8K9q4J85glM0eWlt+r0olD2Rprhq7kjmHsswPGS2ZVLypqqeWcRx9zaX+6gSyar0iCbPzFmvOI7O5SFZws7TyoA58w1CxJDBGhJqqJsOYLDvYDyEwGWsKrH3obDj2fS8wWBImTtYR9rsuEAgg1SwNGZR9GN/qR2MBBSMhCWTKUFgMrCY8AD8O+tej/pj4Af0JcmWjR2Z2egnvlEUXqANPhFcwUmz0zbLLL5AwEWX/bZwWO0rUzBYFMxPBLEOula+VPDmPRTqfOOVr6PmCzVzEsLzfYlhXFHJHimQrAiewrMoHT+eeJHxwWk8m3rWSx6fGRSD4lhFoOaF6fvdorNCyWta3XsVVq2rznJVT13UesPLAHOama1MO4pULMCJFYix2WFQVOKs4520jlYztPH+XkaVZS6OlFEqmvsUKmc9P5LST8oaWznJ8f6srGGc8U22RUep5PjBeyPhGFZCl8VgLdpW2TnJaoTqNy4bzO6IpBF9FNl7Z8KNTtVJS1FS12D5X8JcO+p62C5KpnjVMVMEMS1UKhDU5s8XvAEocQs+85pxgVQwV4+Aa9aNJGWMw3u/3X3311atXr8xMpCei/X5/dXUlIpa067r9ft8VdxvE2dAuHxsm06Kf5LjNZhYtjuN4PB4fHh7u7++J6N2H97/97W+H4aR29Ch1lkHr+Bfp4MWdw1XbX2vc0VUFW7GzWnNbtqWuCzTvM2yJyipVNFnBgwtxTe28tvT0UOEBZzo5e3Enl1oKLwXLBmUl/fxRYdXZ5SHzpW0gMEHNq3wFmMCTmbE32ZDsZRWsMDUyo3zx5Qy2JoICWR3QPQ5Pw2m330fTh6fHX3z77S9/82s1ur29Oxyu+27fh/2hP5AxwH3obl+mHHMipaQ6TeJ89kIy0cY4jk9PT8PxtKOrZJrSqKoJKdoYY0wpAtNqeYwnsnNUZxreibBhHONjHB9ASYQ6EVPSaClpdlUqIeuuVqLCFDxnVLx58+bm5uarr7747rvvvv3226+++ur1F1/g9hZPPxzfv//xxx/fv3//448//vDu7TiOIYTD1dXV1dXt7e3d3d3rL968eP0a+z1UkQRMoAAimAIECIRhAElmTTPTefZMOGY8eOI1s3L/iOy8FU6gs+/+eeynn2cTODrPcKuzgk+elKcle8OoNE/Vq6y1xkIr0sGjG2sMWYFaflYmWHDS7TIkBUWlwjYbzUEClihFwZ6HGZO900pzvqy/Y2ZmJW8FgO9mJXOrTplbXU+7Wk5MtKVaeerb9Wjxf6teoJEsBcL8UN0BWD0A8fnRpNURqSCv3qzKU//TU1oLRlthWwnLYi8wafIyFJgtAOafQtHMgDRt5LAJE2vWpw1ARxC2wNSzhA4ECYGyVOhDJ6awtA/MUEtKBGbq8sQgUNUddkqsRkY5/E4npASBDggywhCtOEZj1AGgsWbWsujOmm7XzjGeGsnpCiVnXkNWFM7MNN9xzfhsj/iq1ElwldTMmJ3uLATXRWMMP2tW1F51x++JwBEAU+0ts2LzCmmr1OtZrMqwyh20tvBbba4kv5HsCb4Vkq0ExoYI9V/zHgWWcglnE1CBk2NmxsyqK2xbTn3ZBe1cbX2CHzX8Py1VFLuKhMuJttdmn1VPqa19Q2usd7melg6rmaUFdatGuigE2la8KCgvtzZ6ttLWdPO5aXV63aqrjGM7B1XkXaDy03qVZzX5CT1vsc37mzs/k4Iob3iFbr6ygUlOToGCnP4AR4R5wdD3fd/386468Xx/5zJsPlXO5Co9pOKa1cF6DjbaVJFNqWQVzy2Zrbb1ScnWTny+a5Uo9nK7TVkfq4qs/rxM0h4e/7xatup4lR+OAc+16dmPYCUZVgHLSxXO7unUrPjhZ+LpoqbNwT2UDcoQEZAy4fr6+nA49BKUeN/v9v2u6/pd6HZ9IAiU+r5jzjDnoCszzifFYIHDwGJdF48xmU7HEFCbJ2HVs9fxwmJIyqpmChonnJx5UE2hqmRQVaQ845vIWbcnl06n08PDwz//8z///d///c3NzZs3b7755pvXr1//23/z7atXr779u7/7lhmn04f7j99///2PP/749t27t2/f/vDDD7vd7vr314frq9vb25ubm6++/C6EQH2HEHKvIQyYDgOxQHg6wbakYBgCd4qppwSezE9JIiIREWRWihgg5AXhkj7OxDR1ZQ7MMr1s7l95UqjIzsyAmlF9nssMMw1Po/wFYswHXAJpKdvT9GXOL3m22GxVnFXd971LaeVCJlwXKjz4zAXJ2D658vgsn2jDVttmXXMaydlu0HuUqTbnqu6s4qTtF5ZCBEu0Vxherdn3ve2L/1nwViEBywGqilRo8akCqcKDH3qbd3Yv9LTqHVfOTpZw+lKzwB3JkkEBMka+j0rMGEcxZYNQ2jP3Yr1QL5DQdUwECNFh1zGZxTEQSMkEZtYxQqCZnORGJCqSIRkxCzMHIkIcI5mwjErTjoHm08DVhQ0u7Lhv6+I+5VrMTDZY1dyaqrSoqm31GSR/Yr8KmJlBSXG2dyAis/MgztLmWTrZmUjs3KJnhIopKnrzkFdM2gqcCj++uapCrNm+XubiFl2+lVLWG3L7UoURFkR+UZae/1LduhcRFVYLDdraOryUKuvDVj7k/EzrdPu5qRICVVur+Ve/tmN0oZLnQLUq0HxDl6nCLwgrAeWzfXJqyKpSJScvd42Wa8JpvD4zbmGb4Tmz2GWozs9NW61ghKNwL74qBvE/Pc6XfqE9OyyaK+cw5WcRkpU5osGK0ZCZhb6bG+LsNiPGCJ3kT14QPj4+lqqKRdgqb7ZpVQ7Q9sJjtZJqyFoEXkgVK1XtrioDvmDL1FuzfAshGpLA84iflhsf2EZRVWdblZ+bLsjkrc6W4sVirrSlsyMiXd7Ny6kE0aumJ86zG6kATJTjHJslgsz+ebJ3Vp2fNQQ2syBydXNze3MVREIIPXNH6AhCECJmMKMPEOmI6ngbZiBis/NtrGIENKaoMNWUTwgTUqEuM5tOv7IbJ7WUEsFU1RCByXsfTdaLKJsrZjZvVmpK09lstkQrJocfPnygya6QP3z48PHjxz/84Q9d1/2//p/yzTff/PKXv/zqq6/efPnl69ev//X//OW/NhtPx3fvfvzrX//6448/Pjw8vP/44fvvv++67h/+6z/d3d29efPm5ZvXV1dX3PfQEWAODBionC+RgACMSARiCABDvuGSzMYg/WQGQXLmL0Kx5ylUcl6iTMwzm4xmGdSJEGG2/7EZRVCNzHmVacx5SMCMFM+rkZYoKwFnZrwUoGaGKcr0mdCJiAxjXhDywrTJ80zLD2bGzZ0ozIp+9bKCsFRLs+27/1o+LVC3bKKVTb5UldkHXvdN+Derss9XWOSC39GpGsJF+VIaamvIz9WO8iryq1TlmQSH0+TKQPsWq955+P3XQmbUqD5eNJeCflptW6+GrKDUY3h1OLwY9W94lll5v5Zmk9HSOqUjAJAR5RvtykZmUcgCI8B2wQ6CQ0+7wDthEeq6TkDC2PUdadKkgRBELCUzC7NsAmCKnUg0TUoAG9FkAaVJIkcQMCoNSdWMs4AonZ0YZDYDO5/oNkrAKlNUiHUDsU4tHsm+wnLX1ydbbjf4gSvWqhOZzRaJrhSVlYaqYmPBYC50zeqMiCU1tgvmy1zW0qqnq7Y5X1uV85Ps3ELVwlChtDyXcW85tKpwrqdursrvMxORZbOihrtFAlEtNuFMl7NBb9XrijI9eM/Ez4VkTr+sBPXlIqufPmvUVot7GM6wLfcUPglGC+0qb/rn1YpU1TvJsCUYW2TQNuEBeCaG23pWe/G5qYz0VnOtGCQiasLSVJKtIHnu8qUTWjjUkfMAl91d5CuFQUIIIZqy5VvlZ/AqKp1kVAmkPsOgqsB567MC9TKWvMmxb7ct2L5fHaOfQLdt5iLZShP1Ru22UPKyqB2vVbSUocHKwG0GyCmjs5T2NU5Wp56qobaJVfGyivBKjGwVqcTplvhKcSAikJAwM3EOYEjIR4Zm2Q+o5aYAKExENCUReXl3d3u4EtMdBzEVMzEVRKEk2f9olz3QeHE3jbG//KUJlqAJmkBEsJS3SKJFpWlBWOZrITY20mT5uC9BLZom4rPGaGYMAZFkN+2YXA2ZWXa9YbPJbmG6vDgUEZGQM2RN5q9/ef/nP33/9//f/3R3d3f78u7169fZoPQXv/j6yy+//vJXv8Hp9PR4//j4+Ne//vWf//ynp8f7+4cPv//DPwG4ur356stvvvn2Fy9evDAmCR1zh9kHVT7KFb6Z12UAzMggQc8jVccCDS0FwGk/ROfIyLR0MuvlS0VwXtwX1FTXw6o85UEb70MAoJZX7ebUHVIjnQjKN70qgKqpyDNeRfSVcFxl9QvSpMpfdJRiI15amQHmCntbaVUEbDF/NaF6w27fBXPnMBfqvwCYF09bSGvzw63EPBVVnfWDhZk8ikRuBW4FwOqn6qU6N9ZwVN3Ku6q5FlHtcBBRqnbcl/pQiyjBSERGku8oA6QwqO26rhc7CO9FbwJd93LoJIgRyW6365iIEVhMIxJ1gXsJhgQgR2sFE6YQSRQVqjBQvt+lGlOyvttbTDbEmCgaklre3GnJqQzfhA1boNTnJ/emMIKZ5WDuK/GdlhSy+kZtwUdlyLbiXhar1xkABi3IqWHJzQXb6vjahtfT5/SrqqpiIiwJxudcANDQ3qpYu9Du576v8NCKjgvysLzMSqevoRU1W2V9/rIg9KvB9sGX/0SGf5nk5dUF8fhz6vcPZ1r6zHpaKso/tywCPglSKzlbKV1o2Lfr6aEdr+fj8F8C26utlC4sSLTZEMkPFRJK/pSUlhcoSkGPz7OHhXkqjDHmjb9K3nrwMG+Qocx0mOJ0d113PB4nPoKW80NieCCfg4dWPuCiBKsQslrbc/h0lTb8c1XJM6v14LXC9oKMMheluSjA3oR1Ffiqxa0mLvSiEshetG4pWqXFUlUJUFm+snPu5csSUdbPqyYYZ2scEDFgQkACiLRwN7LzyykGu1pUJREj6vv++vq6Dx1DhLkj7kl6lo6l77LLXNr1rJyVECIQDDzZpUJVTQnGMCPTpFPiQJYYQLSYUjKk7NHAbF7OqQFKaiAlJjHSlANPGhGQNFkiiBWWBWe3tLnj3smfP64vLFlOArquY8b11YthGEYdP9w/vv/48Lt/+uN//s//5fr6+u7u9pe//tWvf/3LL7744uXLl2+++fbNV1/95je/+fHHH1NKj4+P79+/v398+v0//faffvdbM/v2u++urq5uX7548eLF4fqKQsfZd4Qx1CzFcfauEvq9gA0RAEFh7DWn84JwHsgFEZgZzacEU9+WJFvkdb6yXNGQueOUyuioovLy0tuJeVlABnXLyDObzTtwDAKoKHTUCmI7t9UKmvJztQvYkGt+0ioPBfLquRRfFUO+XT8lVGK9wph/6a2Wq74XpPHsqCY7ovQmrHDLaY+QtumqsxVUraz0ZFCdmJX6W9PE8t7joSpewVPopxjH2ry1geXm5aqorbBdNeQJz4/4BRRVfTEz4Hw8tZotCCxrrcwgLsbxXeCbXbjbhWvRg9htR4eOghBLdzgcghARCZklgYW+75Eic1+QnOYwCaeRJaVoIAiYzSxGikYikpINaqeUUszeKMgz0QSwfQJLba9zmvC2wPwl1b/iwSpnwWFJ3s8eGtpbgAoBnQ1F4EjRPrUg3GLG1Qz+vWvrkqntqmTwVbUMUjbyV/nxk8m3+8mC1px8rvJRmyE/+jeq6p3KtCILCzvV8/uz1NXzqJV9olJDZblw7iPq9clzMH+ha76eC5VUQvWT/PITUjWIZmeXqqvoXU2lU1V3VrFks5uoC1X5n6sM4oFvud7c8//YlEGn5ZvyXCkJlxGe8VDxcqFthwo/fOfMqppDEXZd1+/APN2gyYeEic82FFMRchC6lYkhqU7KwJSfLDvJypHuAeTLRWaWAzCUkEhbWErLONV+3l8VnpXygA22+iSTrs7CFZIvS+zns2TBrf9bcR83EZJmtWT91tIq8ADygUELVQv2BeQsierSIrlKBT8VSS8wsLzabWZCJPNXls7MEibvbQCrppSSdDsGsrfMKTIekjEZQWHShd3hsOsPXbdjQ0dd4C4HmgghdJzPG5mZdJ6xM+dMEwEkxoS82Jx3TyY84NwdM8v72TRfQDONeTVoSGRgpsBkllQTk5Fl02tkz2fItyMnHOVeLmjeT/3DMIhI3/d5pZ3vKJ5Op8fHx/1+H8I+pcSMrusI9uH9/f39/e//8Mf/8B/+w6tXr968efXll19+9913X3/99Xe//lcQgenp/v7Dhw8f7t/f398/Pj7++U9/FJHwz93hcLi+vr598eLu7u5wOOz7DiLU9X0HqMIUcQBAzCCCGUh5wgMIFPKk2/pOrO6SnqfhpSzzRdQ5ECv52WHHU3DBlMfahFhVWl4l93RMzgZSRJjOh3XlmLgidA+tNznDmripJq2qO5glcvuytFVZV5bjOE8lNh+cqtbtrqILn2JjaxZd5ZmWJyoek5VEq2RNwU874lVt1Ez2Hjn+wbdYwQ9He+1GGm0LNSzpc9Vaz59QeZhX3U+vjmzVl09KUt9WyXxh+pnfmwExb9TmGEdEgYOZdiI3+/5G0oHiVcBVoE5od33o+06IWdBnhyuWQpDHx8fQsYjAOMaYnfUxM0RiJI4xu1s0M2Uwk4IHtacxDjEpiDkwsxFYlyZAVgNPDWV6pi5vpgF1iL2Mh6rCJYLOuPXEUEmq/FdnBcXzXV4TZpCc95pPQ+K3LXyjLaieXBfYWIu/BOcdseqFz+MxVm3llLaK6XVbcKtTqz9Xx2WLf63ZFSrP7FwUtpmLyCp9L3eoSkNuXTO3a6xqZtlG9HzdoAVphTKXguWT5LeVKgw7aD+ts7b1tOP7yYGr0uoCWG0KVOOx0crqFp72ubx5JmDMXHkZxVJLrjDWTj1TLy5CuAr/KiU8B+bVtJBjANY4q+W7cx63l1EVpKVR2YVBgZvCaLkJkh2Ye2v2ZMqqXdcVd45mpjCa5cy0d0aa0oScKUwRmbBkF8pnGyLVioWfQwNlNFdn0pbGKmSikRJ43jiuZigUuFpDxXpbnOjZpyQ0dGtLZdJfYKkg2QK+Qde6vF3tfoXhtuPlofysxGB5mFwQuQy+j+fM8yHg+SjMDA5XSkFVowI2YUPBCmKjBOQTKZBOiyvAyIyw2+2ur293u0MXdoGk73fCOfZgEBEKAiYjVSTMPkSEKGXvGBDOnl3U8pt85S93JcYTAGKbDkUse5ixgmgBK0cdU9J8Th5VFUkBTYjT1copwL3RbLdY1pn56hyzzDZJ0zXCvA4cx9GfEPZ9nyIIMg5pHAciyhwzjuPt7fXj8WH4MAzD8Oc//9ns729ubr744osvXn/59ddf//KXv3zz5s2XX3/95XffYRyPT09/+f6vx+Pjx48fP9zfv/vxLf/hj7vdruvkV7/83w6Hw+3tbX91hZBdyMxL2aLwmFLeNiKaLxedB96K7J5EiS1MzMtfT8FUVtjz+3JEXhQiv0xiZu+N09O3zgIIDdGTAXym46kVghlU89hk0YxJBp7p29e/WPEWWvc843vXmhTmBz8BeFbJp3QFLfkhW19g1im9FYFqZjwPM3sp4GRBHoUzJF5MlHgbZSCKoKfGVJXcAql0IRcs9OCxYRt3LNtUSU+P4UqoVRLWlgvvYpRbqeC2tA3DkgjLg5/D8k+/5eGBbDc+PMxVEXPuwisYtmaaqu9lf+RcsMYnR9XRLJmCwaCehURhQxf40PFVoL3pTrQPvBPedxKEiSyI7Pe9EFSjiBwfNRB3wgmmBjIjAjN2YUc0mCmpAkmJRUgoDFGj6pBwioggIgrOmPnc02bE/RsiShYLsxc8VDzuB86vMFcxVgFQKZTlVLyE8SgpV5KmsBZaETzA+eYyLVduF1QOPxFuDTeWk64vsmylXj6RWxRhpjRqFJTys+WLVXR5bGx2bGODCY65/Pvqp7kFbVtzybzVvpc/ZgYs1BfMBiDnXhgXPxk6bTkvQrliOVLty1VSvIyfVbB9owsIP7+enw9PKViePV09v+ZV0XoBthVOKGXp00LyAgxt5mcWp6Xy+tOQWbV15syLKwo4esa8IPQd99zNjVcFAEaw+S+AKSotIethBBATmMCUNeRhGHa7HTDFXjtPNLO5WgFes72HjsRGOIfpyvN+CcLkTS2yPPws7FUbW0Vcew/ALY0tMOYQ6/HjEfv8tEqBbaOrlbdlS/HynPU3LNgt/5yWReWSG1Bcj2zCWZFEmSys0cesUahKEZ+zvPGVrBLkam3kFKQWJ1Czs4JvAPzQ5lZGhOzlkoiIBUTZWZ45xYHARsi3K4kSgK7b7fZXHDoOIciuC4cQQugO0oX8UskMMBZKRgAvumzI9IZpNTiOY9FHSvSEactDpw4EESUDWMhURdMAU1OFRmhUi1DNe49BhCkoUnGFbcYlnifN9nfzAqHo+eeAnzZ7dhzHkdA/Ph6Zeb+/6rouxsFM+37/4cO9CHWht5SPE+P79PHx8finP3z/D//1H6+u/4/r6+svv/zy7/7u77777tvr25tf//v/DXHA09OH+4/v37/9+PHj4+PjOI7/7//9f88LwpsXd3d3Ny9evLi5u5WrQ7bdBeWV4KxTwUIznZ+98E3rCrZiY5BS0jXb9/zg15YF7+KMaP3cX+iPnCZUie/MQCzSdd15AZmUeFpiDcPQh66i1KndOdac5wcz67qd5xZfpKDC987HD4QTJaparGRLKTNTjZ4h83OGxHc5ZyjRY0tmL2tWOHB+Y/N6j9xFuNZqrlrFecOqCshcW7vN6SXFKtLydEJOPfJCrUjJ8nP1+I6W2nDVTY83nmO8lKZLiyV/awqbt1Hzcx61smwu+K/++pHyo9+eRRTK8eRd8kgXPGZ8GMwWq0SU1GIyJUbYmXRRCQomdDBL43BSY765OezZ2EYYiCwE7kIAlGEhdGY0DMPt7fXc94iZjKNpigMz9rsuxhhHNUsGU4gSnyKexjSoqlEyQvbA1BhXl25W8Bcq8kRcxiI7ScoYnzY/sj9TW1E6V0dh+tvQZzEM9sAUup3pLZONeqYr0C5GzTbuIjrrA8+kVbuF+KuFq8dhKVv4kZZ3M3KajCCcBYQ55cDH/fNMXW2FlOSX6J6qvcPhttftWJd4er5mnR0XVwKksLYf0zNClg2VUvO0ugj0Oo++GMzrsiH0vhU/EGioiIgU65ZjlcjyCK8y+9papK2KPnLaWFVJi+dKDD4ztapk6RscestPL+s8Afip2VNXC/xlIPP4Vigyt6HmTXXKyaHPnx9SEybqZ6ZKyAfnlA4zBxHXa8gifNpeVx2sKISdQVM1FmWP0r9Js/xhZu/ywWwhajL97/d700m/JCIQ5ejHvJ/jGHch+8LQOQyd17aKXqdkxOht8i+63++Pp9PhcNjv98P4iI1lBpbjVVF1yzheHBUY8oFJi09bKiql6Za7i7EYllI3p2EYqElobOIq8sAaI1eCorRSSQws2bnCScWAJeXQ6nlu8psF5YrBBcnTUh0uipQWMMy8WbpZ8LZaikHF12NWyUIIGiNm9SzHRCGigYRIKAQmztbIUE1I2bwTmSqmuZVSShw0cHj14u7N65dBAlPodntl4d3OQgB3iXmIKh0T0RhjP/lBmDSuaUJXE+bRknupMcaUrOu6GOMwDEOMZgbSzNT5BLEXUYvD8aSmnTCAp6enEFggmiIzkYhOttkBIIKIsIjGaGqjP1BxIBnQlZdEeZeACKTJiKnr90ymiuPxqKqq0TAtLlJKFg2kIXRMrDGlZA/29PR0+v6v7/7xt3/4j//H3798+fL29vZXv/7u66+//vbbX9y9+vLuq18gRj0+Ho/Ht9/dv3v37uPHj2//+qfv/6LMuHlx9/Lly+vbm6urw/XdLe/2YCBFUgNR8M7ZiAjzztRZ1eYFSRVqa9m1ZadcytdfiQk4Pi9lK+mJ2SEPilgBGZ0PB0qL1miuZaYp8qgV6BVjVPzjRSG5eb207tGCJR+2svJCWsXMagZ/SFXyVAoQOY2z7RQc2qtx1FWnPg6Gtr9bE4Bvpe0UNZtPpaqyivBFViWvnyPJufZahbytEA3dlgcfKBzbI+jZoaUBctpAfqOzK+Qz3pZIG9WMOpZdpJCUYayU/TJDCPs+XO+63a7rCdBsXmdMFEJgRuCJ4Jk5JWMmY5F8VZugCgKChHwWLAQVjEOWOfR4Sg+n4XEYj9FGM7UEJKTEssLXPyFVyHT15Amvrdbce//XKpxhstzIo1ANNGyhUJI5/AMwJfCig7xlOKqThg1Md+Bze7pG3s/BUkX2W0U8Sa8KmXPflgtR39AW9drSZLodo1Xh5jMX4L149KXKtkvFYn6uwUKMAJPb6vMKYbZeo8ld+HwnpOL3Vtq0aTUQeS48ZfBAYo0w/6aplTxYW4g+s6q21Lm/1ZSk00wKzONwEW/thsJU1WeuXdsB8hKY2u2Dz6od5/ty7RTswa4Q1VJ4hcwzkJ85Lu34Pl9ErEKos6F73hZh2oXQMYcY1Yz2/a4LPUFSspRMBAoICMjLS2Y+78kW3swWNOSOOIiN2HKk0C14VlWmVsVqhdUnmbRNq/krZc/m7Qave1Qb+l74t0B+LgCr71sC8/1dlaWrkFSaSZW8wR2aHZ/2IcPQbruUPNVOfXUuUgS70GK7n4ox4Ix8nVNuRbhjmQ3INYHAIBHSMTGMSImIcwwKhhErp53QTngfpOt7po45JIN0PXd76piYQEDeqkZiJj8jUD7YEKhOlkoTWZoxkI/4AM0e+4pnpjwZpZSGFGFRLZKpARZTH4QYpgbmEEgNpvlUQGFgRr7M2HWdgZh5GI5539lrhsWlKjMzTfE2MqhJzcySGWVJTWbEs9BmwMwiLPOhAhihbJjcAzNOp/Tu3f3j4/E//+f/8+7u5s2bN2/evPn6my+/++67b7755urLX1y90e/GUY9PHz58ePfu3bsP74+P93+8f59S6vbd1dXV9e3N7e3t7e314eaauy7kpXwhAprlablaQ7QkaM6dmTSi4mXV073facjGu2X+XuUEP5FTsZV3IQTKyvss6EGE2gVf+VrA8MyDRoK0D+1cRW4RWDFM4UCdzUp/2kS+mqoTBiwnkoJeDw+ayaZdVvmfBVetWPENVWXbqXRrQLEc2RYMn8drlkWgtMeJ5tbnhcy8mKsEZdWFdipqFWhfsCpVFXda7ArSKtT5Canqsm8oJZNOTDozjgo2MzZYNBs66naBdz33gZnIEhGz6pjKhen5+HGe6ZmIWIQVZJiEb+jjeIIaZ4mYxnG0CDqN8WlMx1ENTMxMwaCGlQV2jVtaGdOWbC4geStnO2mVzJ+cTT0wS/DIG5ObmSUUvecC87aUY27x02ZeZYq2Nk8DW023kgrbLNlO+ZXcqCovJ35tR6peVK17qLjxoODaXXRktbPklLlStsJPijbfB0n+02cqlpup6v4Ze3+b6lcml9V2PQY8JNVortZDzWqqraciwpZOtpKv5+dMc7ZcmG1BWx7aKaCAsd4ALQjew9w2TUTQxRQ/EVWjFZS/n9vxVgT9nJTZwjOs0nQqMh21MU1nC41Dhxn4duzOtWVshzBFKlq9YJ+Tt0etKvLZVqfCtuDlLl+eTQrZt8Lfmg2vdjieOaCXmfcCV17O4HNWmsBlwGxDNd2aI1Zh8GK2GrtV1QuNOKJ5A09nfawccgYWIjrpwNLthFg4xZTGk6YYYAEmIEDJYGna+hHQ7d0Vk/Q6YjiGftf1h37XJRIJPQchFlAOjclMBhQLR8J012DyWppSShoNyZCg2UuoYT4ZNiZis3lByEzMZDENGpmUTFnAZkOKQWwKy0zK3DEFTTG3ldE361opO+zN3AfofNDrD3sXsfco222RqKpZMgMzcyZXTaY2O6yZVxmJgKlrzMYMgURN8XT/UdOLFy8s0l//9Ne//PNf/q///H/d3l2/fPny6urq3/27f/PmzZvbL798+auXL7/7BZ6e7j98+Pjw4eHh4TQ8fby/f/vDn7uuu3v18vb2tuucPdtMXjVhqZMszKxofFjNFFxstwplm02mwuVlJZpbIva0WFqR+aHlZJ4/TTVjvjU5H0YXNZxyZEz1nb2kYLUMQK4tz1HWRMvwNTxH8LVz+XJQFgqfX4uWcfEs7b+26K2a2+p+1Z2thZOfGJ6jW/gelZ4WOD1gPLtCrYpXMJvThC60ay61VbVv/DH1ZbFewPasXnCYfbec628UspJ/2jQBEZHlo1plYhJT1VMQDTQGRAZZCspsMBLuMJkWMEsgZiZVgtsgIGMgscHATEzEmqCqMl/GGsd4Ao5DOsYU1VSYqANxBnpxvkbkr/xNg4gF/v3Pn7M1sjodFnnSkmvJULGtNQoQkdhsl7WgPb7Ep6v84um2hQFLOdCW9XSy2l9snNjbhrqwVdtW/grm1e5cKOKbM6d6tr0DUOLr+Po9rtBIAMzTCmy6c+4no/lafL1NQI1yZ8VKEQABAABJREFU81lpwa2A/c2WhJt7Hz8Z1E/W4wVONRdU8upMz1vrrJ+6Alytp71WsPrwk5vYkg++/oloV9lzyQJnRP0keCpZ8ZPqmOoxM8z+BfL8OCYdNYW5C/m0pLiZyfoo5gdmXl5pKcc5fhdJiUhEuq7b7XZPT0+58QacsmDIn2yutd5ca2XR8/uLNV1lFYfq/PzRtrpSpS2WfA5gq5BcoGqsibut4mi2eKrMZaFbKVF+IUfLVHbcvMD0xbW5yeXBzj/VFiblZhaYbSlPRKSTkJu7DQqMFE9IicZB0higfSc9067rbw5Xh/2+77vsYaWTcBqHpCa77jro1U66fdAAUyOBsRlbPhkkw3wZbSCiEJioszRdXjCzGONE2Ennc0IF1EyByddoOU9OKU0rsTnoOhvyIflwPDFDNXG+52gszAQ2IwMxxMyMTE1BZhZ3oZ8xORnTMnOe+8yM4G/YMk+H8HOodplBOuv2eTQV4PmeZVEe5sDdgBnev3/PM7ePYRiH4f7DQwjhn3773168ePHmi9evXr9+/fr16zcv716/vvnF19CEOBw/fPjxx+8/PtyP4/Djn//y9PQUtti1zM3FqQxNO8EJqzJ0SdZeMyhlnSQ6hz2tmIHUmBmyuLGTt7F1pmDz54drcnapiNRKxmpqbwdtlaoqr9Bla7rXhdZLK5UsKOzavjen+mB5VlkJBVvb1CylzCUs5W/V9HP6VYnCFviqv/7Zy6YysnmHsiUtX7ZcRCnmClt4bsFYnVfKy/aCR372brU9HgrmPf0QUYznu6NmdplcLe8wGVJMMGEygbFFScPNVXfd815ULKkNsAASMF/tegBmiU0kcAiBTFUm7VljAhLUoMYEIRqjqZrqdJk6QzUk/fjw9PB0OsaUTBB4HvFPK4gtm1wY9wk5E4qMqI7T9ZMTO3dKmKkIa3xqZkRSmKnAnMXy5y4AWtYu1VZz6oVSl6VTxYmreSpBtIr/1eKVSSe5VMjDs+eWVTY1258zBsrPdemhjcfm8pxhS9Fmh6J+vbpw34c1llxF1FZq65keNur5CfWvyvnV+tvMqzz13zNV5v34fAyUUpXE8NNBm/lCPesfNsRRy0ETPbvA8WfArFR2/po5hDZOLD8Lzp88lJmhbF7JpZTMaBgGDeHQ70Qkxnji4cB7zqcehZVoKmXnuEdnZ4GOuiYtiJn7vs/uap7Tr8KMz5+Cn5kuKwDlfbHSqsTCc6h0SzautouWhdcye/2hvK8sKVqmLpC0n6xRBdtGfcGSuZwWrAK/ev/I1u7k2yzKfYuFGUr+fDZYrnTehDgcT2kcYOkguL7qXtzc3V3tbq4O1/vdi5vrm+vrvg+70HVdF0L48Dg+nE6PQ0Infc/U0wglEQkIeW1lYDIxZQUbLMxEbmqkhqSWl4KmqkhpciM6TR+WfYtoCVLAnO8WTk4ZQUZJLcYICSSEUxxCCNmes8yVIpwSwQj55ppAhHK853wlZcZYMsvonWbYgjszItKUklKiCZlsRikl1WhJiab6iYhNhCZ3ISBjUHacqZYIREK9dNPoJ1VKFqlsDKnGt2/f/+EP/3w47A6Hw/XN1Zsvvvjiy9evX7989erV1ZtX3375FdKAh4e3P/74/v37sMoMrdQu773qUNFiHphqi8JH/wPOsqzdyZgVjmklkOalcF73mpnMN3b84q2i11W6L2LR5g22tpvOmBi+4CetrosYmpWYFWb+3OSRvCqJ/NdqBMtY+OLtKLezckGgV0c+KU+rOotUqkxYt2Z934XypmwWtGhv4TcXaXAV855c29Zbys8PlclEKTiZHFxkjaqIl7N0Dmi6jtuO+JRSMhPeZTuGztJB6M1193LfXXcUSIVADIQgXR8CUkpkalBoghJBhQHQkJJqIghPMenBqknNlEhJLdrMhuM4Ph2H0xCTwlhokn0psHjXH2ZGS/0JAGjRUzvvYK1MdeU5j+uWMGlx6ys6A3NxSbBZwxKqCoALBN/Ox63Y+WQlvjYnND5xcdcz8jS1bPPFZcxXEPqTcHKpymZOq/YIL3znt0KqgvOzVqIJF4VSLphSSnHhoZqIJgfl2wuJC8jcSpcp8G+SWiG8BcAqDM8hqqqGvKL9ZP2LuWNjQ8SP3edC4lOh3udQ6U8YiJYfVyXtmcy2K2kffs7m1c/pFBZzK1Q1O8Y4Ho+hv54u1MikJpWbUeXoD9mEJE89kx3EinkC8wJIZp7t357bOyIyW6HbdrjxKXYoZbHN4KWGLalYDf0FQfFZJH2ZEVbHd3X0216gITnvVMY3V+2jtdej4OR5Xva0wFd62prQ9pAsaIOmK4VTF4oxV75kWO6I6f3bjvDyEF69fPH67u71zdWr2+uXt4eb/e7QydVh10kQlPiB6ds3X3+4f/jDX77//uHjAATY7ub1ixcvnsYkAcxMmtgQYKJQjbwLqkmjzrcJZn3eIpmVE7+SmHnUpJqAsxdMVdVxVFUmkGEKWA9Wi8wsgQAxSx4tIqLTAodzoEJitSkc+rrenovPXvdkGpcUZ7NSmzedAJ5NscCAZcacqIWVmA3JAKTpYExYVC2lZKp5lcvMAhIQ8S6lRCRm8vHjw1//+sM//ePvrq6ubu9uXr168c03X3355Zd3L26ur69fffnNq1/+JhRMVXR8vsPGZ891MUajBen4XWFvF1rohiFLO58JQVt3vYoJKBuMJ5fixUvkwknWkj0q/YBcXETMokFViRcyzpfy/Fl9rRK5k6KKi4pu54lgtZKq4+0khEZkYBnXq0wSq6CWr0VcouH5qi1rlOySuZJoLbQegRe67O9Gejop4JX3edyr4lXNFZIrPrwg1LYyeMLe6kJbZHWsq5urEwtgNuPJLqTmc+9zhZYsqiFkYUUWO7Lbndwedocd9YJeENggYsxgJotCyHtGqprioKoCYkE0WFIW6jhY3o5SG2KEqiWLYzQkqI3JjsfhNA5jMiViDkaCHJhVYFgMdzsEF5DTcnd5XlEQlsN3uZWWDPKb1hUQLYu4ZjnvL1WEvTqdwymyq28qPqrm+AXYTgnj2XOpznek20QuVe+xwYar8LTvV1tZraHK0NbZ9nQ1+TqZeXXtURaokylbOvtiLUW9Zl61/kzifD6cf6t0QSpusYkHphrfrUoqacx0DhtdFV8l3QupPfm5LOe3UgvAcyjn+aniCGp2fPw0ByCHhfCwtfOvr/A5g3ghfZITn1MD5skxpRSAfLSSGSFbqZkzBsWsC0Uisew/dYH5AlIp6zdYt8DwegjcqqyiivaN78Uz02XxQvMN5JLZF6zquQDAZXreHvdNqKriqwJ8Cww/Rhe0lLatjam11u62xsUDttoKuRkpuLOQiXJAmRRTSl3XXV9ff3X4cDjsvvnizS+++fKL2+urfbjb9TeHnlV3AYcudHmBNLf1pHLFh+F4/XB8uj/eH1V7QA4HVWJmkEle3IADDLBjjpsSUzn3A7DYMSQ1JIMaDMjngakl3Rx9hdhgRmpGllJSS0GEiFLSsoeiqkRSsK6qhqSaLMVs2sgUCgfNp/HFO3dePM8HemaGBAIxYb7lmPXDNE4LeJk4EfPfs2dy4+keY8qtIDFTCByCmFmMQ0rE4WCG0zEOpwgAhBB6Nn73w9u33//wu9/+7upqf3Nz8+r1i2+//farr746LwixFJr+ZKzsPKWUSCYkehHgc3pKIiLhjNBU5vWC1lWaLroy0XQgq86nubl111z/OQ5VO8f4fuVS0uj5JU/FS1WFpYP5Z95x4bn1MvyXhc6FdJk/SwZvG1n6VQ1fVacn/fJcrdIr/F8AsoV2tfUtuVzGvfDMBbAvnJzo7HWmYKMstFZb96Ps3/OaK4u2I6uC2L+pJtGCWJaJYKYhw3T3w580FqmR+6JpJCZhGExTlJ4O+11gDRSEVMBMZGQ6FxeRwBRCEIaZCRl1rPkaNDRQR0KSOMVoFsdxFMpXB0dDSiYW0+l0GseUkmkKFCZtg0zNmObT2qpfbbrwyQ+rJ6HqaxkdciY3F5ItVwLkzBPsHEaCsx3U+vT2bA61pcZTSKU1ocy9yCLrAk485BcWhBVz+aaxITRaMr7QTS/f2hp8QzQ7j2k53UuSBsJ15TLXV/Wl4LaERzOtBU7xCbRa7aoI/dy0qORvt1pZHSyPzJY1Wjn2HIpdzdOyW1XnlGEDgX57a/H3MxFebdkUri8nGBVCtgZ0Ew9L4i8drHp6Lq41VomoWD0ULF2A5HJq8Xy5X1vJzlul0+Kt7L+XBaGq9n1fFofVBF2esweQNTjP6o25gMkX+oUNafMcul3l38vdr5r2qT3p8kLJQ1IqeSalrRbHeURWcpLb466g2lJpLovoql9Yu4VblIEWYFu67Crj6+cyWqaSrUJ7MYksLz0m86ly5uW+7+/u7v4fv3m13/Uvbq9f3V4dhAOl213/4nofLHUEIRCULFqaNOen8fHVzQtI0BB+//b+D+/vf7g/vr1/evOLX6lR33XMEsCBc0h7GYZT9snJJYaKqlnMBik1KkhPcZzzrIQeMZuC5YAIsJRSIGR3vkTWU1cUjBij5diKkyWrEJMIHZ8eRFSso2npmHd75ystKeP5vGVj4jglKYCu65i5nP96gaaqxomIJ0/ALDBT1Zhs3/eqajpFOOTZEk0VOTJTDpgZgnRC45i6bjfGU4zx+Hh6fHj4w+9//1/+0/95c3MVREjVVM1MU0rEM2Ww0eT9dPayChNBxyFP1Mhnh8SmZpY9nYDMgOmidh6NZKMLxz2pFOM4hhBMSYs4Jsr0J52UU8E8zB0Lh+50Olk2YxARlpgtMwE1MnAev3k2I4BSUmZhkVxV0mRGLF25dYZmyVdEm2cM1TjL3lwiXwkjIlONWWDSbNlfuKJQJ5aip5VQ/nDMZy4CrjKXyhTrFd98IaosTbM4yqOmmsr6rvB87mbf98VfX1lKeeHuV4xeXBZIVqstFXqhvOggG+x8RTCz1rzgz/5RysYBEyGlsRqUgtvyprSyKm3zmypyifusmP04+fx5JHMXy18iVHFmZnisBKX0VMQCsjlUIyAZ8pTSHKWnnHiXRqN22O0j6YChAzoae+WD9nu+QeifensSOwQ+SLhhuyKLSoElAgKBSDwNMaa+68yYw55NBlUajUInoOE07uVEwAlD1BRZjujfJvv+lH4Y6SnsU5BIBhu6DowEG8xuFhMhz+EWUpqsQ8mMYdNlKJO8YwTOW3PA5NhpJuNMW8BkCkN0DmtxHmfF5Gl5Ira58QSj5V3f2UIDwLSY5nlZyAQ1U55DfWZvN8VWigxYbJdM1V7UCkq75Y3mEBR25p0MC4fJe6c6p6bExHPwXks68ypJ6Ly3npwmVlo6Dygw2FlBPO9D8WRMYmUjdy6qefV1dgk7q7xZqywUPrdiszeu81SKbE9IjvKJmVmYzEAE1WizPKepuMpZdLpZcPL+JwVzKaZRR1VNqQwAEYUM2szvCy12Aiw35EZo6vDGBtDW8LJwYWqP52rB08q98jOnrQ01XbvmACzupNn5YX4kIIsITA8kiz2vieqWvSryhIHoIpwv4Pfoco228hNYkY0Lal8TuTQrnVj6mfMncr6qGi3P26nxmWdZsb6WLk2UUZvIsjltNjNbsM9ZWbRi97wkkimz1sMAp1VXs5j/VOoHIDYr9AYAxcJBNVK+gcYkQQzjGJ8Mh5js+DRc7bkTNk06RkiIFlmCiD6kR1G+2R92gYJZJ/K04wTEGZKUUiROxDFGBInoHh8fnzTZboeUrt+8ufn+4cPHd6EjodEY0vEQTSk8RVUEIyaLghToJIiENNp1RQl+OGzJldVYPHPEzWompnkD0dfPs9PLFoCWT6s6SyvtQFfPOeA2zjQPVVsKJAK8+rQIFm02aV8xRldq8vavakBa1n9e41UU7lskWkxtFSXnh6yAFT2qCGfaiAuak970T09DZ3TT7TvI6XRSKHcEOnGfeooHHe4ofvvi6v/+qy/+7a+/6F8M+/3+cDh0XUfGAEREQIQQDQnERMSkOYh8tB2d0sNf7pj+/Rf83T58v8cP794/PXw4/ZffydXr/tXXcvelXb/6GA5D6Eei3fEfc1yKMWJICoWw9EzDw72okhppvhnYRWUzEiYmS0gxRpjlPcmUIoRZmYliTEPErutZKD7G0B0BiHQiYkoGJpCqCiVjA8DGYjmCN1s0oRuyvKAkCbMSyIgWKUjoyAxHjdPKJnDgo2oax5EhIYQgQqA0Ri5DTGQMA1STkoWROEigQEoa1QxEEojiCKaeAhEziI0pX53kLiVNADoWZlbY0ymmlHaxy/qtjkYUmHsb+f6dBu92nJnzsnjesfaOm7KRKyZHE3luZra8ZWUWVZmYCs3NZJRSEpl0Ah/zsNAlz2kSiwwQT6eouR5mI5K+ywRuTGaAcA5bv7owMLfh4V9ijlxJywXMJ1M1PZCLobdk7xXThZaTfc0XZsHyqcwcaHaYvOTysrVUyLw+QfrNv/LTd8dPVxVgvqELm4htyjXZ0nG2h5mWCyRbal2+I/7laoYqlesQHj9TQ2s7cL6qLcmLM/5XCCl/sWUoyCUqVmZBVSipkirN8UvzPkRKmpIqZC6UR42CDzVxvsdVwslMHKekafrJRCDiLrCKJhuG4TiMqmpkBiPoZLLeKnrPSLbUflY77vMXWloQMNUn1RU3nR+spkZzPJLpvxoaD9Uz2R9LsvGVeNZuGbB6eQEDn5s8Y5b9l2yH6QVg24RHNZbxrFY7hSXGiuirCNgL1apHZRGIWREpKCqfKoOFFuznmHCvFrzAvD8nbUnFZ5ZFg+Rnpha9vs6qsxfw+S+dWuaa4Nk+cm0RcgE/VU8rNlzFUkXk1YzpH6oTwqpRWiti7eU5AGuRn7ZG8HIqm7bFzWC22OqgUEvjQCpE1HUdEQl3ajqLwRRjHA1mZEk59DbNEyACG5MxGVsCAaTUGxsFsjQqdUokIe+5KBg2LbpscuWuZHBsyUun1HWq5kGPh+fj5EI2rz9s1Ulufe7VjBaMagpr6aEt5cXjFtil2oqA21IFzk+Ki1URV/CwirFSbWm3EuxlZegFu5nZ8eF2vw/Ux5OexsjMfRCWSDTuKb256f/uq1/8T998/Xdvvvry9uXd7kDyfdd1mSyhZrNVcxrGMzBKsLx2SMSS2w5C19fXCdLtrk6Rku1G2Y+Bn073T3GMuxu+uj7srzqSMaqmZInYSA3xNAxp3AdhQC0liylSTBbNdD7kMjNDSpqg2TFv7HoBMIzj6XiM48DW877bXx04R76NqjqmNHkNBWyMed1hPDGj5Vms6/czzrWs8M3MpsM2mY4JGMxgBiFXqGRQtaiJSIWIuYNBFUSa96eYhQGy0Zy67gjsPLhe5pT1kdvGnaREYZO5iBFR6LquvJ02mJ0Jk9ctiFbo2D/7tZlnAFuGjCsUVrQab2iq85G0V1myGlHsiKr6CxGTuzfoGbiYvNoyLo3D5rnCUvCyhGpNFHzZ6n2Vs53MSvFWaPpGyyTbYtgLo+WoUdVKwWoBuMiOUnMrSj4prwsYVZe3aqjIusJhSR7PVbVVTpo3HS7U0wJG88KnBcNsJXO7ADYzwCqyPH+yqXU/TBdEfKbvvCA0msOvpuw4K5kxMUSE2EyhqmGyR9fC8JjVBaJ8TzkzReZ3UlXLdgwJg8bjSZ+GcRgGBZspeDHihPWFQZVoOctu5Vl9385MNouGlUrmvdP2a8t0k0BY46PVN5cpfKv+1XMVWt6RXvBRoxDTkvz8z09C5TPM7L4wKa+kqOfr1Xar96sAeHuJMuIVVZcxbc/ccvG8YVFEuuvOujMnn7ZobDVn1Z0LOdu+XyjVjngrY1eL+CY+ObhV/Reg8jzY5nl+Q3+r5AkDjlT8+K5OfKX4c2De6mY1HFX9fupcL0LTm3aSBWDLT5ehtaUqsgr2c1JVPJuxDcOw60/KDApELECgyT2FphyjEMAkECLAoG48G7Ywkxk4QZRUIURqbBAYwzgaR2MOPTjHpI1EMCMzJShTgMGtCdmQKuZtu1ARp21soLeZWxxWyRM/tsUa1tjQQ0hryk8L51JPWMhAX0nbkAep7WCFCmzE7r6gZW39LGC0vFDJ5wpO3x0zO2CkEaNp0mAhhMBJB3368HKffvX69t/86ut/9YtvvvvyF69ffHG1uxPpDjacJQDmuSBBpAOAvNhxm7ZJYySCUaBwdbXb7a9e3mlMBNkPST4M9sPjOD49DE/v0tN12O0Ohw5qNqoZGyAQEyJhS2NKKcVhHNMYYzSokRkJCTOzgI01O7MhA1NKKWr6+OHDh/dvVVPfydVuf7jaXe0Ck0239YCk2ekDEwcCiCwZZZ3OkCobYps2ieaZEcjGUSBlEJuyTRYKQqKmUFNQIAIzc1BVUkt57JhZhNjIEpy920xuUhRLv/e62LqdTWSniCCzeRrNoCoBZiFb/U7kTudDhhBCdvZdJmxP660SwEtnJ2WA/VUxf39AROAONEpDLGfy9TTddV3eEuP56vP8qZ6YC4QFHl+VNx30X/1C0fcrry8qiV9xddtKi6KqO9XPduqqOLk06lPB8DzXLvqOxs2xTzr7I8qZyxCXqtqe+t5VQq16tk/pYb5F/9dj3uevfl5oq6LAUrDCQ8mcUdViu8pcAKuqnQsuaODMI2fjoxUluwI742QyFVAFMfJ9XajFlOMk0GxhQlPUiLzVYhanr5hpwPMjmBgMQjRlopjSOKbTODwOev9kpyGmLUs6Yw9wBbPvbIvwC/hc4rZyGbJAZdUurS2ocvazjLo4fbY/F/XTpTtRXvShQQUtdVDPrb7plrOqep4DbQtMyZlPRH2jq0PgOW4FCcuCHubi3wtLLq6mgPJSz9rnOehT+Wtrtgmr3d8UOJ8msAViP0mQz8yz2sRz6vRiyrZNTLe46ZOyt23xb5VWgflkkUr6tf2qnp+DTKyxycQRjShbnVA+WeEWSJ7kVgeiStVxesWzz09FyhlNsi7GOI5jjKc+7IRt14kgBBZSiilBmDD56yAOJCxMLCFYykjKRxQA1JJZ2hGy1V7Q1JkmaEe2E5K+h4RkI4EJk6dFIrDlqercUYV8Unh6Avb00GbLz2XWLpxyGXuVsL2QrTRxmXEqdaLN4L/ahJwV2m4bokbT86nQSbsg3KrnclrtCGYZnsFuYxu259sd0uk4Jhq7qzsS1uFpR+OLG/lfvvvi337z8n/61dcvr2+DQOMYd0ZdT4NY8e2YbZDMTGPXdUtJSGwMw0BBNSmUQD1z10kXTEdNlnoyAUgjp+HHp+H+6YeTWnf7Nff9YXe1IxnAYzIFiOTxeI8UYxpijNFUjZRAJHHUEEIe/xyqkwhENI6np6env/zw/dvvfyAoEbrAt7e3r+52fR/2+30IHQCCSggi2SNDyjftxIyZmDsRRgKQQ0EryChfCSHG5N2XZo8GrBhNaSJGgEgwzZUhDwSRmCmbqSFfcQEkzC6jzIxImIVn13TVSE2GlyHkcSeduC9vwvZ9P9HVXHVOQVXzEgtAjuCRT3hztyrLbDQnY0XiZ3NkT8SZCrquU5342V93Zua8f1UJylU9yWYDCb+KyA8874h7Jl9esTsnLE0u/Veveawyj1csKv4sgqDSk1Y5cDWZU6lLauewLIs/K12Y59qfNJsKYDkiVW1Faq/Kl+dAtTU7+kGcK1w/afF65HMardafpQuqClsEbMUGcmypilXAW6PREhFQV7uKt/Nkb3m7iAikZrAkBAEhX6gzAErGMEuqnMwkSxDK5uw6b7EAEJGYUt4AYmYmGTWlOOnioyYFovIQhyFq9n1FRGRgmudgpXIbrUWLZ88qtXi4TCd+0KcBnU9csRx67zVxqmGuJsuxaa3sJ9SlF6nSbitJVoHfgtm/3MJPS7Slj1W/SuafnBxDXQK7AtWLLDhu8iK06pSXbNSYj7ZHguW+QN7vK2K8Mie5rBtdeOPb8nB6knumUHpmNp+5LfLJcayY/bIpbEVdz8dSNbI/P1WUszpFrgLghW01h/q/WyLiMj6rWbvK7OvfLLuBni2W9ERbt4UFBVYQ/k1Slo2Fy1RVLaqR6k5VYdn7xTiqdtwTMeUbvkxgUgKYyIa8bFMiTPeKEygRJzNLGBNOCSfDQDyGTmXfk3SaRpCApvvYBCVEQ8iBtjG5a2bdNhmtyLiduFuS8HkujOOFhvA5G2pYo+eW6VoR6jOYc9biJby5hCVVr0qwtjvtm7agOS10tfiqLC1Q6RyGys5eMc++PLK0HwdjksO+Jwzj6cOO4q+/fPXvfvHFv/ri9otDf2OQ0xj2uhMmTuN4NMuWVtlWct4xVLXsdCCzoMFMKUcOZIEBml25gCylYYyn4fH+CeBovIe8JBU5hdPx4fj0+JDC/tBd3YWrmxD2USnGFNOQLNEce5AIREogcNJBkyYiSbNRlarmKPaPT6cPH+4/PNwH4qSjxnj/8PT9D3pzc3N3d7ff7zqWfheurrqrvo+jMrJTgECUvaeQGlH21G4Kgs3BcgGQwQhmU2wCJgMYBuJOiIG8jwuB0GQXxjATCjq5xSNVkAE9mymwsHfzxFyk03RyNufh6ZBvOuZLKaEQ8ByOFUCIaRAR4fNNwlxXDl7BvDg4rri0TGnFz4ffMM64LhvDZZNYi8+YZhOdqARTPc9nFSN5MCZUL1enLWOQ2732R2EFcf4krZqlSu9oOZ+hYfWW/VZnhUrobLG3/9uydCscvfjzompLOhRmWK3f/2wH3VdbyfQKM22/gGeJdV/hVv5VWV/J6+pTi1u1WO1rOuBXNiZWf1ZSuLwhZujKcFcILOJYVdmIiAkEpMxbHaEPgaBkCZNnjsjJpoUekQibQVVLDByzeVfOTGFsrIwcpjXzKEFIgvE4mo0pJT0vrABQ3sQiKLjqET5FzLR2Yt+yT6nK74CWbLw0zX0+wZQFobdGrlLFI8+sebXIKiMXSFYp8HLlFXgXkrcLwFIv8aKyhbPtiJdghdPLlYFShOaNhvLMznNdZaRaSFq4yzNunnRbEdqSR0nPedMm37vVjq8m31MvuD5Z5AINbEHukbDVo0rUVLBtzQvVp2di9fnJS9fL9VCjSU/Fl4WsUfp/MlSryPfzl39Z5WmranmH8mnCtpJ9eary2LDtk+GtVNhE5suEc4umGsdxJH0iFVJh6ghE6LJzfDWoYkTk3IWuyAdkEaKERIiMlHRIaWSNYiNrCjBw13UcJJoQlME56hjlhaApDAkGgxGrKdFm3MLVSbDFT8mwSs8X+AUXmaJ979NWcV+DLn03bIlWL5NtaWjqp6RKaK/2olUV/JuW2KqcVb+25EBL8HAKs5fYk6k/X4eOND5pHG97/Ktf7P9vf/fq11+8vAt9p5RGGzSJqI1HGQXdmJIirzeYkGNuGYwmd1PgGQxYQkK2AUJKpmlMKY0Rlk6neHyKx5OOcUgajYllB9xi6Gj8kJ6e3j08/PC97W52t6+625ddCCxsA2zarxSzfCZlpipTgPcxuyCPhpjiGMdxHO/v759OR4CVAGMjOg7D/eP4NKT7p6GTwIx9v7u9u769utrv913gvg/dHLY+NyCcfa+IWVLKNDOd4OWesiBfm8i6loSA7IUo49/U7OzKmwhMZCDOckMXnlFteRJWaKAsWDDHNhOQieS5O1FCREpJp0MAIqI0q+WB5mPEiklmhYyJpiZ58to0GcratPiePi6JjInY7OzWtgCXG6LsZno2GS3bEllXLbIPjnXLS39EWeqHW5fmJtg5uqxofYuZV9nSK0BbRTIM/mpNYbMCRjVmviGPcD+3tfOclzWrrWjjYieXrmRKaXdVjaty4qL8qpCwJa22fnqB6+7ZLaZYj4fytUJLeeOVVJ9WFfS5U3VH5p4ueuEHqJpIyi5GBTZN96jrqS7b65Yx8kxniagTImIDG4TQ78K+78hAZDJtJRmAwLLf780Sc8DyYuwMP5MYG5QAhbmYhwokxZjoGG1QU4KpGRKD9VwJk52ppaKctjnPjNX78nOJXrhal+NIK+OVqyZXOZY/ixz0VJ3WJmyb48OuDuhqWqVqNCZhpZUtYrtwIlEVXK2hTR7zZtOAFSHQdq30evqqBEyOnjAv6YmISXSeRqdiWcDmnePiTXSuXwurGue9VyIQQdOYF4QlXjbPGm0rBIioDKlHxSrXT59Qk2L1sDUWPz9VjHC5FQ//c8TphYawvSHYtnj5zb9caqXB5gg+bwdkq5XqTWvB5Fu5ACr86KyVOnN6s9D1fWnHxZr8P6GzrYTJ2/yaTTkJZjmMNQGc12tszMiONpDMQFCzgab4n34ff2SOkRIoBksGBSW1ZEjEXaciRJEIZPOVbCGCWRauYmbgRDCwGXjDTbPHcEvMW+Pll82VTcfl1Mpzr1TgUyfzVZGqXT+gW2SAJduWzFvEMEnvZ3DH1kyUkz/YWMrVzeRJKx/hVM35RuPutcb73h6/vMG//UX/v/7dm1++7q/CeL27Edsz7Y2CksXT477T693+OCoTaXZGicTMMssGmq/qkFmaj+lOqimN4zjENAglBjAM6XRkNR1HHQYzKAsR7QDh1PV2r+OPp+FhOA7DsR+euus73u+QBgOIwcxsxCklRUoqZGoWY0wKAAZOaYwxfvjw4Yd3b59OI2UTKRZmMoCEhpHG9ARAgBCe3n+87/vw8u5F18vVbrff7/tOuq4LgZk5WiSifEeHKbNm5koDJnPLivKmkVIjIhbm7Cfcpk9EQgTM68OUdHFpaG2IC22nlJJpFmjmVo8lNIjirDnmqkI29fTrzmJSPDeWJ2lSBdF5yWEu/hvNx4MlqIPOzl10cq4+LTuHYQAw3Vq02jyaiFKKWGO/2ay2broErih6hi1PJgs1VyznmbOclVdMQtmJ68wwlZliNbuj4VW/RPQ1t+JgdfcIS161SeXiqrjHf6nBzccLRbPCgB/Bqvvt0FSyaVW4e/xgLZnNkV6WMrpK5WX2kuqFZk75+LvqkQesRXsFxjQxSO1UxvW07qYtNxrmEaFqmNzQkM5emmi58dHGCyr8kv3xKwhkQtxL2HUiZDwTeSBmRtd1u93umE4CMmZDsVSRTFDMHJhgSdVSVtFJQrDToGOMT6M9juPToIMSiBMMmu07VGja9CHONvGfTpXeU8ivvC99LzxSjf5imNbuM8x/F4pRefK8VmBgZm9y7OnHD9kzU0tCWC69KhbzxFzyVHxRUOH3krFk/NU0U8uid6rqTSxaGbjaqaojXhpYswdZyVJyor4Mse948XZLRKHcZ5jvk9t553GByRZFWwNh26FKL0ihy+k5BalRyz45XqXIqrjYKvKcl1hG+lmttnr/ucT/01Ilos3MnNsqD6RkWkV7BXCj6mYatYtcszUXVOM41bCU+eU9uU0Q/2mr+7lQ4QVzOsPnUuZ5jlDDrDDEGA0AhxDCLuzYuq7rOpZooPyPiPNNIyEhdL2MNh3iGRgkRJR98xtFo0SSBCnZQF1v6WgWez5y9ntPZMZEJTIo2JA3ghJlizhWwtaE0cq9yxiwpX5Vur+1kGulrp7D26ygsRUp1Az6hY6UDFviuqUNLzZ9u5j03oQ1GUiN3uULtm21eN7qQnkuKC2imBvfE7lISmns9oFOL+5u//2vDv/rL3ff3tBdH3fB9p2cBh4iEQulgdJp4KcD7cheTvMDopkJSEVEJA5TODHGZDCYrUhGjap6Go/jeCJoJ0wp6TjqMEKNDSBmyl5yLYSO4+PuOtxc37w9pr/eP7z/y4N13/PucPPyFUkwyVbTmLysq8T4GC0vPo04WHYnE+OHDx/ev39/PB5DCEgK0q7roIbQJY1ISkQQgtJ4HOjx+OOP77qu2/fd/tDfXl1fX1/d3NzsdrurYMzEIUMpRDkCV0apGBLIpoPRjBWKqmpJmTmwTLO3meoIMJBgCgiREDMRj8lo9tStZ5vYevezfBWe0GVmOUSKiLAI63yKOIu77L0iPD4+rtIZL12VYD589ApBode8Bwy37ZTJK6VEZDm+XAaoEBZlC3eRaj2zyrGl3ZyHZ78yqpq98ZS1UFZfQgj50mp576E1t8IsvW71krkvVhbMBQZV9Zdiy96hzWetaGaaVQYrAOdlecEALZVmcmk+oV0cLhX42XlznXu6AKCdmbwcoeXdId8jTw/ldMVv+ZszN2/BODfHZGpeTfT5C/bK13ZfsABWMLY0oakX0hU9eysIz0ilXTd8i2tOpcu+whmSCS0ZktyLfNxts7F0xWWVn6jcFyLq++4UY2INXSCzmAaYBN51gQQmDIGZWeBARE9PT91Vn3GYXXjlOo/HYRzH0HcE1hxZNVlKKZo+fXw8DuOgpCbJeFQdUhoSZ6fhlKd5GGCZXmS+9+upq7AwuVXujN7apCE/V5fI3QCtTEhwE6FnlizAsJwCKxbzZl3mFvDkzGBau4vzMFXBxOZU6LCMZhkyD6G3U/AsULoWeD0wd0uTU/4QqrvTRdGheYlVAMjyqrSLOQ549sVFS2OSnFI6b1WUvlTdKVjyQrLIeZpnB8yBhSph7ge0NFHRf4vn1VHAklUBeA91tBz6SvhU6PWwwQlGD1KVp83vkz+X9pj0bNKWpY21gYe5yr8KQCvWPAwN020ma2Ze/7IVsFsV+pm35OTpKsrmuHuwt8DDBvv7Uj6b77tvtMKMr8R0ZR7JWcdxFNf3UkmYTL/W14qezSv+8nnMrYineuYgE0VAJZu23buuU9UEut4fun5vydggRGwaYNe7AyxyMu6MNTEjMJMpp/6M58QG5JijoqJxDOiEzCAknXS7p/j09eubh4/X//TxxzEN0ksCyDCmtJPAyRSa1BSkLBREWNI4eFSX/lYbXgUDRYUwt71FaxtGWDJLRXjVNpY1N5uw5M2K3iqW2Sq7QiqfWtZ6CEt/Ky4rm25+Mq0AqwSFOc28wnOhvTJftz3yVXm0l8rLuJSmp4OiXghyfbj5+pvXX77pQvpgSXe7QJa6wDCcxlNglcDQ9PDwILvb4/F4Op0yj3TTfDT2YdIHBEQ8TcoppZB3HIRNaRhiGpVhgZilA5slEEgpn74xCDv+mFTA8bbv0rXY4/jh6ePHjx9sPIX9XnZXJoG6XdftTfWYhqRjijGNGk1BURWnMY3j+MMPPzBz3/cxRhAEkme0OEIkcICZjeMwWBShLjBTP8Y0DA8f7h9+DO/3/W6/33dd991XLw6HQ9/3IfBut9vvd0njw+PjzfVB1URCCCHpeDodc3PjeBKR0AvMxvEpASGEPuxCx9kHT55LiYzBFKRQSDNk52O8mXiyAtB5ss9fU0qYnOmceSEbE4aURk+aROdoEzYrGV7OGrIHVMmLQJ1CinNRuK0cIQLEbBrNrXByf0rmsgAoPKOozdDNzGAxO8eaokBnHoCBzakpPlUnZuVvtUQs7FQthAoGU5oiyZllI6i85J8QNRefIsIDZ+c6vjlz2+d115Z3RQrvbQnQ6jpp+apLZzltWz55SsJS2JHTJispWaqtgr3CEYyX4J6KbLEmXBxcuO7Xu6c2T5CVAF3tkc/jEZuTXw8vEQhggQr3sKAEdg4zaE2Ns5UEglSb3gQQ5SHLWxg2/4MZjEpUcRNGLxJYOqFepO+olyCB2ZlVT1ex8wrN3M5QNnUGmVFSTUmHFFO0TmRkQ7Ih6tOox2hR2ZjLBE0ADKApJnI1iKu4bcfC48STt6+hYFKEiwHtvJbO0ikjebLpzew/X2KZKXaps9JyzWOWN7atgrndZq44ok2Fr8u+WEsGpRVy3p+x5C9zC05Prh5LvkjZOinv/eKtRfUcUN6KGKH5fnnVzRlarqqqGLZKeQma8/iFX3E/tqB+IO8GVv0F8qZkkS3n+m3NvsADUxA1PTSqocdhy6Rb6bMyVwXhYFidjJ5Tw09rvaqnrYSyBDUg75V8spElMotcreaL8vxJsP3MUpHfZY7bSj8fUfhMVf78Mhs3LfOUmSV35pmdWk58Z3iKxeUEANWr3FJ81qEtxpg4BhKhxDYKKBD1lIBExpwmj6CkjEhXvJ+aNqhOhtxsxqo2DDGrO6eTjiPG0R4f3777p+HDj8EGCAcJ0ZRAxEhJzYxAwjCwZbXBlN0oX+DEipy8ODWnzDxnrCvBtUDmRevKCrzS+idbfGZaFaQtMNUMUgGmG3cXq7K+v1Xm1e57KdrK/1b8np+P7/teRbrHEz6epD+8DB2ou+qo65Q7mMBULSW9f0pJT+nHP88TjYV5e5EMRzyJSMeiIqDzbilRZDMCg7rsQJ0MQqzDiGTKiAaeXOQKEaR7lzI9Jr7qMO7BJLto79//VT+G0F91h9twuKZOjbtedk+nUUhUlEeAiYTT4+O7dx/GcSznGZhEJpEl0BS7y0yNBPkehCElNZjmyPYRimFQDSzD0z2A/b5/+fLl7e3tft9fXe+vr1+e4hjCXhmnYUyaiDqWAOJRxwTrzFggXWAQkyWL4ykRCUHywRlRImEiyyFhcrj2gres+CwpZCKbaiN7sW8CGIwXspmCV5WIyGzyTFhO/KxJftugbPj5vQ1PXqrqw5WWhktt1VZES/GFdldmOydHCjMUBaXqWssVq1WhMaH0k2JOq3YLc8cvtdXOAeQOwarDlqpmX4PP5s8GC/xODK0vDluep3lh7MfFj5cf0zLKnhg8Dv0ow487zqYRq5C0WEIj6fybUk8r0fyorW402rTeWEFvm3xzq9OGueOgGZ8L9JYmyM185eU0+ukc4JiZg0gfJAhT9j3FljWEpClRUiSi7gz53AozW3aqhexlGEOK45BijD3MmEgEZFHHCFaWbMMAIDvxJCIY+wXh6kit4qpljVyqnChiOVKrY01LGjaPR40LQbGcQT0XzIrauRdb9Oz3vLYIIC+oytlvWWglWw9nUlqp0OXjFrZtrRKzz7Yl0FzB1VVZfUZ9lhtr6lRVv38u6q+6ZMuNfCyVvBY/q31HQwbYYDQ4BKo7UfnkIP4NkweyHbWfkC73t8r2k+tvBenlJvzEutr6hQHypUpitxuoS0/Cq+mTBHC5L9hA7JlPN21S11NhJD+1VT3FkhpXk5dIi5dLC4Xy49zKrGBkvvv47r0+nvTqdCN9IkoksZOu6+z4I4RIQIIplJcZgHD6r96SZbrcCxOR4/GY6xzHUVVPcXx6ehqO9/Z4L2nkrjciMtHsJy1pR8jSlvMVdcBMfWgxj5NsrlbeYMk1/k0xbUAjLi4ITI/VtiAa+VaV2hilS/xyeXyrzn6yIx5+Wyjk6wu8qpurTfsaVmaitQsvVc6KwonoGkcahx++f/ivXdrvv726/fJm3z2lY8d90JFMVfUU7XHA0xifxlFODyGEEIKAjpj2UoU4BO77PvSc3RIRUWAREU7R2JKwkMi0W0FkUO4tWowqSaOJgtQsGfZ9YGMzdGxXYCLZdTQk1tPj/dPD8PQ0PB2xe1DZhd3t/voaaiwkxImMDNHi09PTDz/8MI4nm12yz31PankTzUocZwAKIyMjBkDMgCqgCVETkB7jycy6Xu6Pw+7t+67rXtzc3N5dHw6H29vrq+udybTlP5o+PY2Hq4MlVYANxCDmpBFxCKHPSzUYAWZYbDH7bYLM3cxidj6JyR8rWVR0Epu9yE7doez1hSl7haoIIitLXhHJZuOW/Q7wNPdX65BKIfBA4Hny0XWmgoewNnlU7OGlyeoOU9GNvKrU8mc7HWJNZWzPwWhaIWtVT8VyF2Y4XjprbTNUNayCXXo0s/E65ivM+Mq9VPUiqYDnmygEsHWWTUtTMcPZfYUXVZ6IK2JYxUPbqXZuaPNXpDW/X6EoVc3wliIVDbdTRc5Q+FZVQWCIR2ZVm6fDCYEx5mMwNghIhETyyieZuRv7C5Nhy5en4ZVjwEyjWkzT1eJkGjWl02kEm+0Ss5KoUVJL2a8oTSuE0oiC2UmZFtvtWKzSuc07R8VEnJbr+bKuyHmC1HaV+YGJMmecv84P/s5hGQUz4y5gyRRoSKWlijZRE2evBc9XfmFzp+HQFTr32bwBtt+m8Zn9syd1L6ZaHjcz4sUKs2QrCpmH38zGcSwt+r8tMueRvaTQr9ZQofQyp1d48FzvCWxLaKwCVg3oavKy0cu3T9b/L5pa4UnLvQ9soNR/bSu8wCDPee8Jr5J+F2ooRaoaPpn5mV9/QlV5NtU1HV2dk2dfyVbvKpP7MlLe5B4NGj1rZwQ+vPug/ZOcjt1+TzBVVSHtQwxGkoXpdB87x9YO8T1EeDrn16AaACLquq7XwYhCF9CDiGKMx354tz8EG5OO92lMSYg7kqBqmDycgYhosp2yfA5hy+CilbxqiQFro/wTWKkVtp7UK4FTMWwlLqoh8KWeCdgqy7RlK9EN1/fLxalZ6T1nFluttn1/Af97Pp7G+H4YFRZ2d/3+C3x9+CL0HJ/64YTT03DSY5KHkT6e7HG06zSdBBIRaLopZ0J9f5WNRbOJCuergUyS970JTAbO/k0Mav2+02g0Jk5E2Ww6xZTMQgCIwAHYBQBg0p7tFy+v3ndPb++PD8d3x6eHKPtufxqHY9gPEIBJiBU4Ho/3Hz4+fPw47bVMePAxgc5ENU/EKU46MDMtPOuamTL3fU9E7z+e6ONT3/dv392rxv1+//Ll3RdvXl/f7Ha77nC13/WHbodhOE76FhOzEJEIT9EniGA8eXY7z/tsRgAV5wjTNhFRUQoBqE42YiKSrQD8pJ9SypY+7QWN2p6n0EE5XiQ625kQkQRR1XEcPY7K1+o6UDsBeOSWtVLJoKqGs4JiSx16gXcXPaJdPhWJ6XvkqbyaNdEsOCuWqCYwmu8IXeYus4WytYrnrVSVdRNP3c387O9mLCFZ0eMvAFD0cp9T9bzzVz6Ru0fUSjGPMY9MCesCtwLMtVVD6BHYdJYqkqiwtzZkK1t3LZY8kZdPn5T4q4I1F8ymgJ6Sl39z7yxkr0amQiJEkz09cyDecRckTG6FAxeyTClFTTAyszHpOKY4yQ4m4hhjom6weDzh8Tg8jXFQSznOIQHQLF4NgLFlZwGNSl2RR9txrC34sbZT045g/pstFKqk84lWGd9i8LDlpMRmS3Va6spVi9igqHPTy33rqmu0TKXCKudqf+0ZekZFwFV+/6yzObe/QFKaaLm1MkD1cBZ8li0qM8tewSqQsMYgFWxb2aqOVxlWuayq32f4ZP7LyZet5FKVVk36f2brn5wULiRyGq0f6Elj/+++Uq2o1JPcTwDGz4ZbrHRBBF3O9tmQbOkSANwlscJ3n65w7SDI1wk/rEsTJNHUqfRkB9IDW8/jnnUv0vccAncduAsiCMT5kstL6UTEG35zFyT7+Yix67qszhohxvj09PSXh8P1Tk7j8fhxjClyOIAFSU0NSBMmoCBmya4+1hdOq3doV+VnQaBHyGqdlzm04uVVVFcgbX39OUz9nKq29B/PyJfr8WKzmhoqUmy5pmUoag54SorDkSXswvUpdv/td2+HE42nX3bfvTmlp+54otMxjXEY+RjDMAZYGIbBzNjAzF0vu92OJEzCAfluO8vsp93MJt2DSJhMLEUDJUsI3BsswCxQVHQpPg4DY0y8y1vWTBRASskQWZVEw5WIdWRPFGmUlNLj48ejPlrousPhsLu6Hsb4dP9wf3+fUuq6bjoBMLdyoRwXXtWUiBgwLIYpYVocTcNERJBRiYCoxNwldDHaMKbHp48fPj59/+P76+vd4Wp/fdi/fPny7sVNEIh0XSdMiJpUlcyYLBLJ5JuUYMwsJB0zWxrnMZLJ9NKYuOIpxdrmlFdIiowqn/KIB7NcuLqSi7KXD8tbSxPTinRmMa/diIiIzWBmIswsTgtJZkrZJ7KtqIZoFPTplIDW92/ajk0EGqc7iuWSTEv0VSnf/wqMql2b7/5V54pYY9Gc/Bqp7Oj799XE5rfwt5jQQ8h8PjuuEFgVKV3fgrNtAm7PoMzoxSSs5PFI2IK5KJoXMFaAnK/n1dlWO+eFOxxRVRNANVtUAre8bLFTxiubEFxAclWqIlfLjLFcQVUsAEcPngiZiYXyzB26vDWTkS0ARKSTkHeRFl12wMcxjZrGMaWklm0ciADs93so69FOp/E4jKOaETOHZJEAAcFgTeQOD/wqc/mv7bTtH/wFVDPLDoeZuegoNnufaseuCJdzo25V42GgeRE4pFhl8OBZs5zA9oKwGF6W/TYAhrqGaapwfmv8EKvWG0nUTOH+a2VU3O64taDavAaG29+ppK7LeRabAGi+sxTjtA70+4uY5iHfFmbS82/OD5XQq9iwGo4KYy1yfFk0pFVtZl0QAlst+pq3JHxO7Jw3rIqgZ6at3v2Eqiph29ZwGaurBS907TlweuT4glXZT6Ku4pdPttsW3ETIxiCv1j8J2Lkvurwn7Gv6JHKKYzZflZkJrdxGWcBPi8qDjjvurwPf9LgVXEk4BN2J7g/c73i3C13XMYdAnUgIxHshLw8pW20EyTD0fZ+9WI0pjmQhSnh1E4+Pf327//FxfEqaYrSOAbAEU6gpNBrBkGb3+tRSly0XPBWveXTRmiHGBfFIzVrxMo/7hqqCbeVtF1a5Yyu/H6bLOX3THrCttiqhuooTX3NZY1eXoVqAL7cIYOBgykzCiR8/DH8Y/tKrjh8+fPfm0MVjSCdJptFsTFDtoL2EGGNMUS2K7szMLOn5qikRwV9vM2MjAhMF6YhIVKJZUh3UTBlgliAUOMDSySzStUFVE5kJtCMYFDYmHW86kRtm2fUneoR9PB1Pj6enCOk7jQbwaYyP90/j8ZRZl887HAZSIhYQLF95m5ZYDMnz+jS9lgtZBIAN6HfX2fR6t9uxdDEpQLv9jaqmcbh/PJ3GhHf3hrTb/fHu+uabr784HA63N1eHvmMxJuuFRRh5DUpZSjAoMAdmNktnxGHyIZe1IiLKa1JPa2X6LiNYtIg8vlPYKUzKaihMuCSUxcxdNAxmzvbl1cRTiD4nbwBGRJivabFzzGhmaGKUE00nEiWPFx++ocIMpc6cPGwt81SEviqnCkeVvpT7Qu1KbIu7qJmbqxmiKutwMg3YFnMyn+8y+QorN/2u4CqA6ztMHqryXHrqZZz/uVpnyeNpA4BZwtq+YKaTtbXupdVFTv44upJilbRtAfb0WRBYYZ7W5htqpj1PaXO72taWU7vFkL8KM5hISIS6EHZ9v+/6XceBE1nKokoCBQmsDDt77yxJRIwQx8nJh6oaOKsRybTLqzLVIcYxqWY/NA7NxNNjyrtTy15XbIgmtbOIJ1RHBtPfGKPXjSbFiM4bCv5vNgf3lTPI13yBjM3d7fTUa1UokQ0N0Q+WHzXfVtU7n0q2Vt2hJj6E/1uA92yojQcpV4S18UNQhEMrr/yGjrlUbktWnS2WAv59ReRVx/1DRe2++Kq420pnGNbubLe8+TdPHvjSKJ4B+eXaPpm28PzMgs/BTJWhzb86iM9M7V3TZxa8nLPluyr/KpE8s+mqIVpb0xIRudODT1a+emJWNQRMRqpYDl+5rcPMrLoXXAXsWa8CvdzTIVCgtO+w78K+1yBKlJhpFyQIh/DKy5aSlGBmXeiYWccTJ+HEwbDD8Orm8N1XX7wb6fjh9CGlZImIOiIjVkvId6xAptHYDGGVI9qJ28vtVdFXyYcLorVq0eOw1NyKl58pJS6X9U1UrV9IVd8vTwrV+6pTvgZdRhf0wBQT36otW9OCAIzhMB5HDOMVYxc6GvSf//Cnd9//6e1vvup5vO5xE3iv1g2nTmknfeyn2SoECSEElky2QiYSui6cAwKzibClAAITGzGExSwhkUE5UQKZZs95AdYzkeBBri3GbAsFM5gGM1CUThVGHScO1ll8imRjoBHodYwfP378+HB/GuPHh8dsW6cxO94EAxGJs4d4ApAIxnnNZAYkBjTv2mQ/p/lIc5YMYOHQkRmoy5EPzUyJT6dRAAmMaEQwUHoYnh7ffv/9j1f73e3dze311c1Vf3d78/LuppN+iMfAIkJMXd6EVTWbwrR7GjiPtXvp+FoXa5xCjXnEE6w4KMwZwrzaoeymZmZPz41nYmXm4+MDLReQNC+WylFSUbzmbDUFFyngdZcMVvb1XzpQ9jZKgPgqf/b+hyV7tyoRlpKo1NPWWbFoVlhL/aWJsz3tcpJTF3Gu1M/LuC5FTFij53l8em4sf0Oo403lB6+oefi3JqctwepvS7Zd8JSXm/Y75Z6wSqCOtl1bns26S2ULwFqChiN9LOXghX5VYq7gZMb/ue8LIGcvoFgaOq4iE25Mq3YrXvUC2pYpf+1ImNl4ugDSddJ1XdehCxyEJHAOqdJJFxLlmE8G1WSqSrbYvoGmWcxbvpSsqqen41FpHFG8Lo1qmkYRMjPweREIAFZbKBXM23Kd3KK9KlIRJ7nVUTZBL/mZGc5waIFSTPLofPHGzizgay57BPkOoZdLXnb5+qd6NibuMnBYc6pUjbiZdV13rtN9pWafYkv45J/Bhf0oUiLfAaiQM6HUOWOwGVdZSLbE6fviBbgnyFJ529OttCVwfKnKQqEUrIy9K4Lxb2zWmJ/TOj1b+fMjciFV8u05RVbTlvj6yRX6qn4CKi4Isa03W/VgQ/7j2UguAD8z5/PTeQbZ6Mg66jbCaUyZl9dlV6enkqorKuctITcJZoYpz9O+PswTyT7Ivt/t+27X8a7TQ89XwVh1L7wPtg/WsZklMuqhvZDhAAOJP5MxM+uIlTGdyrAYR+6NMIxP7+6udt98+cX3I/+Y3t7fj5EA4miRXYhtMzWFgYw+Qc/Vgxc4rUBrBZGXhO2QrZKun0d8621Vlyf3qqpPJq8RXU6+zip/NbWVVPHXKpmZS1tNZ72OXDjZ1WmivHxSoYCeQAqJ0UjHFIcx/pd//P1hx69vD28O/R3sKuqVWs/du4dRRPq+D91ORLqu6/uQrYHyxMTMKSZDAnXMzNIZQUmJyADNc67qbrczjAqCWg6Q1ZGBiWVHymoppclDL0EDEUt4Gk+mCNx3AUypD0bXXRquUkofH54+fvx4GkdlMTMBjSnBst6uPPFgAib5QNOr+QYN6ThqZiIqYfYIZvbu/fvD4RC6ncZkBgkdgDjGIJ0EVo2n04nIuk5IZIyRmO4fTw8PT3+yuOvk5nr3+sWLm9urL16/2vX9rj/sdgiBmGU21dQcIWyeQLPSouyuIhdrPq/MewUmD3q08/54OYIKu04SzBT5NjPNi8MYowLQ2fAP0JTGhK7L7tdTOefNuGOmwtrMBJQIEyF7sjJnyETny47JdHn0x2w5lHZKRBSCAIgx5rPRefGQSTwvIc6KRRFw6u5lwZlB5kZpLRVsVlt3XbeI44GlCKunhDlDsXnzDLwqGorCV0YlXwMrCmXJmYuv3t4ENk3IvNev9sGDV1FMgZncqhtOjpfWywF06Qgtfc8sBdMUAMVDrtMdxcnMOMOYy3bdrjpwJqJ8bl4k3Zl+kErclApvlYXwebDm0zE/f2BihFJ/gQfzkX2uMgLouq7runznzdNSRgYEWWiophijTmEnM0sTQKoWY8oGAoH5UW460i6eDnp8tUtvOjr0aaR06oLsD2HXIzFOyvvEIkYxSBejjjk4ARMRJZgZRgNIKICUc+AdU+l4P1xrPPFpsGjBlKDHwBFdBphVD0hslgxR6AiKIe3ZkLGalLKBKWSyYTWzZJGmaHCmGokD1iitUEjBkrnwKj6bqmbb+Mkqw60AVXXy+m6T7Ld5TMvwVUoZz3ELGUQ0BVkscfnOLeYmhNmkmkGZmWgRdb0QFbFlp9RTa0BeQINm7XA+T/AgVfVXxtieiZi57BFlkQhARHa7PsaI6Z7AYv+CiAkCmCYDiCkQSBMA1jlOWmEom51YOPZcEQ5wXL/o/s/Q1KtWzh23c44Jp1WaI5vnAxlakzxZnniwS1tbYPh+VbD5DOXT6oL2QoureLMmPpsHrMI8lovqqipq9NqCDcs3X8o/lDgUn9g+89U2jDVjUtfmNV5fc+pyPvK1ZS+U7aZD9XMLTv/GTzdeGvv8Zbr0JprnXhts7SoEAYGmOM4LypzFED5FURMwdC6UR8fM1DSq9jyFC8JM21iyJwETzGpJIyGY0bSDL0AfZL/v+RpqI8RiGEMfQuhDl0KIIaTxveTAahyMKJ9qmIhRYCLVNIcRhxqFbvcKdz+8//CLnvVF/+F3b/FkeujuxzDKPtFu4D1ICSPhyIiwIV9qaPFQmW75sS4P5jQ3dd6zPGG3ZODHnZYJS+/WJX+Zdyry8PRTAMvyxM9TrfSu6MTWjJX8p1bgl2ff6yzH/D6mn4ZWhXCpoTzDMZ1XjHM2fyfcI6r4pJj18MkHyQtORhZZB+EBCUzMHSPcH9Gf7Mf70/fX/MXt7Y5MzK52+6+efpAeu6sQ9jt0hk6lN8Qx4CCRcDKwGbMJR+ZEumMlogDWqHEcVbXvdvvD1ePj46iWZvXJzEalAdrhERhTGJVSNEsWklyrqhk9jsdEMGIdHnkcDhR7kNzu33+8T8OPw/A4Jph0Jr0FZnRGrGaAZFMkhgJINhRyzXNSViG6Pu99pKwbmhlUYXZzuDKzNDxRduVOg5mRKBGpscGk6wBWiCYQHU6FSAhDwsM9//VxkD+dbm/jYdff3l2/vL27ubm+vr7e7bqu67qUhIRgNioLBQ7EhmQkRGYdE5GpaUqREJm6pPsS08vyUpA4mYkEAWeHNWNkyCQGAwCaJUzhlikkNxPN95S8kPUkVXFmoezCnzwHkT9H6wbgnNagEeKeQH39q61k4mhNDb088tC2dVZchKUQJ7ec8Bzu4Vx+WixRvKQzt4gqb4o1V0mFDytGnctSlRNrcsrX0wqs1Z7mvxU81Yj7Gjz2qjwVPtvi7ZuqkjLQOqdlnepnEYdq81BVD6upWii2wrR8qki0kLcn47aPnm7b1otkn+0o2EIQJDFi4nzXv+u6nuXqat/n24Szx3BRgMwrphmSGOM4RrXYqjTMTNMSxlQ1OQ+iRqB5w5dIiBTTYoqMGAQFjAmYvFJq2d8xy3tpRMQccqO01MAqZqlYo8BQYd6TZfnr+cLj2S8AfBNFEGFBtOuzKRGyC2xPjZkMKxab6ITUNrxoYoMfy3P1tVKAckopMYeWrmwjrincyVUpVZ1qes4C4DfUWmgvjNfPTFt8ytUdqk8tOVtu9bVV1LUqwFeLr0L7t+o71rqfUyXSn1lPS07PrKSdDVfxcBk5n5Wqjvvha+eLn4nwtsJ24mvDTvycpltpdiGn7/KqtGw/taASgciIjBhMxAYkjZo6WeFrc3e254kjK7QAKVEgImbYFJdWzSYPHKy4uoovX7788fj2YZ5qV3q0REIBsjzYUgXChlRsa6gmhZbr25pz8pZcpWC1m3NhrMsnbWZTmxeN1eROSwOxqp52cIsOsNr6Fmyr9VzIWe3aV+/Lz7aSiiaLBVy2j8Qc9hZESfE0Hu10ig8PvfBt3+POsq9bNYoxhi5btUgIU8QSgI2F2YwRQqBAGjWrPZlQh2GIMT4lncLgBYGelxLMPMVNUUVmKBBPu1HUdZ3FNIwxe0uCdac4pmTDKY7j5GjPjFQtRzguZ+82LZvPah6ArCvZOS7oFLrZzkH12CE4l81NnN2BEuVAxPkfzNTtnZkazJIqKfTjx48PH+3HH3/8UydXV1cvXty9fPny6urql1/dGRETK2lSY+YARqDT8cgCVekDGxjM00lDGgQdguTWkhmRYlrg6cQYlMroh5TMSAvb6IQezYZbTAtXdau2UvlTOVPCUrc2d/bl1Xqfs5BaxVe+hpIqYmXmEhO5CP38ktbWcq1UqmRNVX/pfrVyWFVBzCzzVzX92NrCr1LpKkm3NdM3ASjPmX0fK6hakFaBx3JELuDE11Ad32FDOhcCa1v33a9az78qeZfp0/erNOHd3FfkhE+lFoFVp+CsHOG2Niv7n6pOPzpbQ5NtJwJxyqstNbCxTQAwM3NQVegUxp0NxNR3PVG+yn+eh1Q1H0XOE3QhchARzae/ExBMzGxsyWAwMqN8AkP5pEuMwhyERyi7AlOyHL4GSiAD0mR7kAd9Za/BE0OFz60BqvBW/pbxbfP7qkorvDSBnp5pAUnVYgV8Rl31aRp6QtpYELa0V+h/lVSKnKR5T3qmsUUllSxyaJmEZ0ojLYVzMQStiszyqlbIPHhbVP3zUzX6rZz5rLRFbL5ma9aEz2m3ks9tK58FIRqt9yfjtqLqrRNL3+IqSP7rzxyFv3nagvwnFPRT0mKS+hv1uCWPVQmANUllGwsbLERQrflMGZKyKZMxC3P24DjLMaqnVLNEsyKeqzU1M1NKZsHYcvghVVNLalluxBB4TLrvuy9evfzzh6ePH07IcWtJzSZPHDBQNq/boLR2fp/lzybdohqpbbXhOTVUL7cWeBWRfDJ5sYzl1LMFZMW5q3KgUnjIKeE+Q4XPFp6qHl9/mcjgyKyFtoYtj3k5q5z2AJBSIiANKaU4HJ96YLw6cOi7V7u+7yR0BoIx5cWeBIEYkfG0tTCNLyRvQ+Sj0fwypZQ0ZlIREUuaF4EARCQOx5QmWp21JWVjgLuOolrWhfq+F0Z6stNjOh5Px9NoRswhJiRLLAE4X8XKfdLs8NQE2RQx37NVQ+4IM2X0Otwws2k7uOchLrid1ACbvKa7wVIABo0xkimAB7b7+/v379//5S9/7fv+j/8Y7u7uXr96cXNzs9/3RFnN035/MEsGPSWFwkDMTCYSWAKLsCZLSDpZJIB1BBPnODEk+eTFzEKM0RbxwZj5LH3Kaqo121tlGK/65DQMQ/Uyt1Xcymdc+A3sktlvtJQWPQvZcp1WkXsRtRUHVkRfcq4yT9VcBWEFWFWnb66Kk1ZqawVToa0WDDPLJ6JwLL3F81jK3wpFtNQDSiXegG21dx4bBZKyb+TfV5B4AEqG8n717lAZx2LpPmdYEehzj9b7W6UCQOUko/RudaIys67rph0pp+IXE7WttqrK/cvCESIiIGSTHSQBERnPHqbMDKZm00HiHAq4nAhlh8Bk8M5as2qQgPwXNIeZSaZjjDHvtBEMpDDMPoHzVWkQAYgMM5gZIzGF6YgxKTOrmlqcbiLm8LKggAUe/IivkmLLO1sjVRWsUhWmotDt+jhOa9564ZTy9fHtVURFxhfSap6KBto+tqSoS/8xrfiyOtV3dytxUUHYSq3VPrace6G/VZ1b+Vez1fnnX1vtPue9F1kXwL6QqgH6aWmLJPzXNs8qllowtt60vV4d8dVp9ALYPzNV/PXTWmlJ9ALhFWG7xXqVtvDTet3268JeIZzuMWVYKhgts7ddMx0IxkxCOb5g6AJpMle/ERlICfn2Ec+uC3OO6dZiSqOqMOe7W9lsVPPeYn7qRF7evbi9evfnDydhYlNVU84u4nJkNJAhbS+rPB68oFtVS/xIkVszX5AnHvPVsLY078uuygosaWNVWpYuVACsQlh9bR/aEa8gN+d0w5z6t4UTP194fcnL+S2AVydom/UrM0NSzHGRCRxzTIHQ5RO6mOIp4XFIEbTrOkhQUrCYmSYjYZaOKWDamWYzaDJF7ObQWTzAzLKza0zhlMgsqWoOz3BGOxnymlDJCNnrAbPEOC1YQggIgRgicRji4zGOgymzUT43Iwg8TRAI8+lZ8aptc/zsvJkyvze44csHd/lWUYXM6e6IFmRmFWS6kZjXlmTIqzsCmZlIJ0LMTGrDKZ6OHw3pB8T9fn91vb/eH25url+9vHv9+uXt9cEgwoECCyOlUUcyGDH384GqmRkmC3kzYxjUpkt3NPuvJw15/VegJDrvGcwREc9XfiuS8sRU7lBVBKfOyUp5325gl6+6ZEJP0/5vEe7VuSIWd9LWt0ZWZ8SKP8tPb8rlAfY9WrLQ+slDu0Pvf3p4tn4WSVe6v1qVl562fUel5GwHdzWtGs1eFkYXaquGoHSk7a/P5v+2J41medqTVZBaoX8B2lWEVKSIjXmirWqrxTJe2UaCiMggDFbqWEJACCGvFZkpO9cS4pCPAWMakJAiBTEDUyA2Ncs+eEQkajIosmXC7E5ZEUdNUSmOOsQxRk3IPpsnTCrmtaYpVEGIwbJLYiV0NDsgIRUm05TSSEGYyEyyESkSVhOtzfQXUmuIO4uRzQ2UVXbwC8XzFDifELYLQi/fHbSL8O7n0SSFXdrhbqXN1gk5zTsLRcLMeZKvDe5A3jdRUmb3avr3wFe9S6m+CN0ItH+p1HI3gM89sanQ+1mN/v9vasXOc/KXtDUX/E1g+2RqJWQtxp+RKir1c99Wi6uSZ6vgvzSd2NLqu4DxSfqvAAuEXSe7TjoxYTDAArbp2jMt7QuIUDYfz7M/GyHHfc3inYjAoiAFKRkHJkoaAr+4vnp5c3vVPSTFkNeZC4NbJtLL8G/hf/WTn14r0XSh8i0NytPA33Bw/dYwnDRuJa0Hw2toz2+rVTzQMIJX/0qpnP9MY64GHx7JZ7jwM9sQJsAUNAWfJBgbAQwSCUQ6xsFwfxrf3x9l13Nks6Q9NGFIsUs9JMdsICY2smLo5FwtCNsZvclU5wAH5m4SMZMkMrOUksKy5TSMGZTr6bpuzzIAo0YAj09pGDUpEQcimTZEpn3gGttmytRj8ri+vDXssDpjRrKP0rPKPc3dns3zSDFgkxdPlCswRgARslXcfGmz7PhbxyLSjcPT0zEeTw9v+T7w91dXVy9e3t5dX+933e3t9ZtXL25vb4mCEQsTpNP0pLMKNB+6akqRSCwpkYkJiDBdzpTQd7vklAo4njGdDGQreeqpvyLKlnxbWqwUmnL4k/WzOB+/VJWUE2TPcsyc0mJrs71v5rtWJXKpnG75LXlyCpx/6ev3tU1k0CqOawu/C1MX5rt/3gJ+7mC9hG4AOFdVidRKV17FTHWS6bHtJV0l+Dxv1DPcssUKvEpItfj07u9dhgXYqx1vq0IzClgKRA9Jm9PmBUZrOO0Ru1rQ48onZs4+aaa74zGpRlgETESCEJnmrbhkIwdGBzIIEYmQJnJ0AiOzmFsXEYrF8jACeelCqklhWrwaMBEzByRLRsh3BKee0hl4nMc3gZkNRGY6wFIQCqEjoiFFS4k+JRPQEIZHmkddCMEj1lFyXZXPsIpkn99Twgo9U/Zftdh6AOZ9zJaekZjCalt+qljl9KrCAkahrnzPx+/HeVBbIYyJINcNGTKdVBjw3azkzOemLZa5XFvLp5Uzj89ao9hSD2tJcZVCWjSuwlnQdTnnZfCw1uU2j2+oGqYtubfKVpfr9288taAhKnwKP89PXlS2lX9SelTp8nD7Crduev9tF8HVjNamzLy6ZMPFcUdDwxfEGhHtetnvZL/rhEekqMaqzFBmYQYRoQh7zo4nzsNdmlbARZYmouy8Wc2UQydmYSAh6jvcXh2udvLwEJkFyLb0GRYGEqwOc9fSle8dXdwgq7C6PnzbZFkNRNVu21bBiYe2/NwyMS0ZqpmlhXy1+OqbStpUrFqB7Z9b6VTy+LuOHsIKvLYtmvXqqX5SGNkUUAtkRsIAsTAMY0oGsJAajaoY0/unx+t4GxQAJcCM8nnVkDQQK1SIJDupI5DR6XScpz8DziqlSEgp5cspDIfwMj0mzcdepEyEfMyoqswSiKOqjhjH8ekYYyKjDhAyma1WoXMI9OnCDikRJRiz2GQWOhGAzGeklGkJNLlhsgjjfOOGiGxaRS4G1I/UjNjxjPY5/2KAUrmCqGyICmYKEgJjSOnp3ce37z+IyGHXX98c3rx8eXd3d3W9vz5c3d3d3HZXnYxnymcmopSmiKNqODvjIGWmrpNam3HKR2Z4Ihcmy+a7grRYGU95sM3Y1kjA4jTJszq7ljxeaGnC55m2lNCluyd/tu5Z168/PZz+JM3XjyXn+/6aA7W8L/zYHglSc/ESa3LBF2/FQc5eNdoKgvJT1+44bUnVqh5zqRXHlawpYHvUtVKyuvvnhWmLEJqX6K1F0ypIVY+2JGmVp+oyuWm4LdgsTScMFMW9bc6/93oJEeUYErvdjojGcYwWoQmWWEyY8vm+WkyJx2Qd92y5Ie67LlAHM01TTGEAJUREaS6l0czybrFCNa+uYSl7V82QA0TuxmHGG02GFwAYJkQEExhpYkCImIVC2O120vVjivqgqtEY2SnL6hBUePYoagn4c1NbfJUw2q9LVoK/U4czJs+BbRb8S6a2fmJ5GaqKyLEUF74V4Ez/XvAWYDyvVWJhC6TVnJ4sVyHfeuk7cmGIt4DBp/j0makSTRUjr7ZyoadVtav1fFby0oa21cTnp0rGVhtbVbatPv5Mpvsbpi1Ifj6WCubbqqpJ5Oe05emkelO1bmaGhRjJDJ61igqELVIpbzqhTqQTI9Oko0aibkfzBvqcrShysHzDyEocIjWzZDaptcXlO5vBDGrGSUFEAdoz3+z7m/3+Lx/fMQcmS9kmx/I2pWRLu3YoWzWmIHz1ykzLoZV0aqtFQ0L5Z2vZtFWw5ZTncEdVaovYtlQs/7LCzCoAFagXJEn1suChnXlXH8os4wduhnOGOR9+JNWpCCy/UjIlM4jRaGzSKedYBpSSDRqHkMzGRCYAmAmGlNGSsony5A0hL87MBNPeaAYjx1IHE4GH4zGmONshZvdKZIYxjoUAUvaTTzgOp8dBkzFxp7kBcN6EQd7vnld6GWcA8svSfXHWpHklJMox5kaIyaItOb2QLgQAsSeVvEWb/2alCzZ1D5xXPRQQJg0gRjVLu/0hpXQ8pcywZNZ1XSfh4Tg+nuLbd/e7XXe1P9zc3Lx8dXd7e/vmivf7/eFq13Vdx1CBGRsFI0lkgZlDJ4TJFy8kjGPSGQGzwkE0uauu6cPmeICeQOdSi1VToV12DOA9HeU1YXXrLJNCZQWa6/R3Dot6xMw5LEExky3s5FnUT8Yyx+mq8njYPDZoXpP4N9WQe+6qqi2fdA49d0Gi+ffVwtvhv1tsKLqdNt+uG7XFDlwFeQU8nFfS6q+XQa20qnxt+VKeNiry8Pis6Mc/VBASZXmA6j1wDuNbpVUkV5B7IsR8ZO+Jx3eq5FRVXrvtiYZxKjByPcwcwhSWJ1O4sAbTwNR3suskiAgxE/Uh9H2fg5TABUjkOQQoEdG8mFFNxarCTMv2BYBhTONgMcZxSDGlREim0SIHgSVGAMBkxAAxsUpMzAjEApAlMQiBia/6Xb/fHa5vpNt9vH84PR0jiMERWnV/i+A9YldHzWdrZQi2l5GeSStFebWeUgoEwWLjqeKRqgnDipfCeQjqANy03MAqXeDZN2zOUGzdadoFu7QDUkk8IirRgyryq8qSU5TRsHmbLgzQT0teVrRUcc42P2wBVvGUf7Y1GwS/tdRy5Vaqavv5qeWOis4vQ9WK6NWpZBXaTA+tiTW2mfFv2PEKsFWwL7x/fmphXv3JjuZb7rgA+eVGn89HnkOJCFgRmBcaJY2CKMh7U9GsI6IgoeJlMggZsxh43pSkWeU1QxLOkaUU2ZiNiMiIKEVVBbEFod7oZr+7Pex75ggAmjtqAIGQFebtNVL17IltFV0XXrbyf+vN5Y2S8lxhuH2zPl+4N5UpWTWIvlRV+QU6L2KHmhXjas0V8D7n1h37lvgvQ2VmgIHFxSUwVahqYAEHJgZzMrCJhj4xqQQDjzGNYzqaBtJOhA83xoqUeIpOp5QP+xSqpinRdPBmzMyBxpRiSnFW9QuE4zhGTQab3ONRDqWAGCMzE9MwjomY+sAswxiHkQzBGKowwIiz0kjMkv3K5NhUhXvMLUbIMH0wZgTJ4cQMZBYBGLONqgDBnW1mDzQs5dRkIqRpcKcbe/NBPgBSU1IiVkpIMxLy/SEZYjJj6UOOS5eGU0rp4RQPu12MMcY0xuHxafz+7Qf+w5+Y+etXu1evXn351ZuXt3dXV1f7/W7XB+l3xxhVocbUsYGimaklQsi4yAmzyWU2vbVlxKoyf3j1Ij+cVdJGseB5QWVmxWyp0JY3ZMrJL/ay+lsBgKWjiHJ3sb0st6pnjONYgPSzbyYgbwFY6RO0jJKU6/EnfjP8qwsYFEQVv/Ae5yVOI83o8gvXkgConrvPcwQCM8smdi0nF5Pa9hMtvb/afIZZRhnPSO3ZXcF2VYmnijKgM+mdtZPcd0d+Rs7pEWZiq06MS/0xnhe01miEbt5Fac4js8JPWRaWskWwlkHx9Fa1Zcvp3NzZeObvvu/7vjezcRxztPHTw0Po5KoPV313ddi/uDocgnEcMj2oahdC13V937FpAiT04zhOcUGFzIzSOFPUwi9uSiml0ZhGTWNUI4iIiigrJRJiMrKkZBGMwMYMYiJYGhMTDn233+/2IRz63a7r+74/jQPDTsfj47v3Oow9yymOIl1FA36roiDfE4Dn7jJ2PnwCLfaMznGlqqFcrX+dAt0W2IJcgXEcfbvtKFcUYo3XU5oXfm1+OEFUOL1dOp5vcc9y1dtftERVpBOWkaZ8Q9UbxyMLmVByFpzUqFsKtCpnK4LK2FVV5QVwyVm6SZXv5XlJqO5agZcDFUcvyq4trnzfV7tZPWxR0WXx2H71ZhSVQKhwW6GlmLqU+cLDU/hrVXzl5Emi4FmdsxMvrltLnFaOYTHQtSYNTE6byrRSNjczxrEckUXBNXq7kFZHoRpTX5uXMxNP2YJ0C0orHK723eOtpRxfQ9UvojNd5bLTpT4Hf0u0WSSW6TJ3ITB6YTYVws3hcH3YcXbAMV8ZIqeAERGLlA7mqrL+wHP8ZzNTjUaY6UpY6KrbnQZ9Oj6xpp3IPtBgswM0CAGWAyEqIAv69Fj1yCwgefL2soKcQ2Y/cCU+c4WlUlt5X5BGTjksRfK+W8XaZVh9HOlq0CtC9VRXIPftFiKh2fs9M+cbIoVzK+GgS+OyKtKSmaWUvBO7Fk4sJ4I0r7uwFGiexqohK8j3CjkAppBXPGbGIJtvKDAHMyPDqDakyIZoNjw8Puw62V3dvLjC8aPqkGLc7XZpjCc+QbjrLJp2LELUiZRRKtjL3SzLhEK3+WXuVx+EoKcUY9TEakqqAIf379+pEbpuGJPa6ek4fPhwr3ih2fZKYWaTK7wcebDgky1f8KPsbUV4ig4elVhCJyLCMLPYSd/tO6CzpKfT6ePHe/RXC0waZ3Ka8TlF4W7HyyzludjMsoKSMPmZzzpVVuqUAzGr2TBGIiIWZgEwRgWCgSJAWedPCujv/3z/+z//sP/t71++uLu7u3t5d/P69evXL1/sD30vHYduVIpkLDtmRg5y5cXQgsiW4qw1dySX/GJpkcdZ061JxoW8WFDefPRRiRIPqpltzRql8iJZcioioxJbZWWCpYinZu6v+u7H1XenGu/VZE6Z8/B4AVHqmdtljxByE3YFYQVYBWrGSSsQW0lX3reZ275c6KzviIfZmn0H/z77z/SEV8RohXPM/cRyOGg5AVyA39dW8OPxdpl6K3xWbFKayG/3+z3WgiJ0BNJEamwSiANzHygwdcICsYylKYAEGWkcT5O4YSPq8jaKqp7G4QzS2fxYNI3uGuq0oRsYQgYyEuJkMGAcTQdwuglXHKQTPuz6XZCO0CEFGzvl0HXc9XgaTCNFhbBA1HXf40qXFt1bhOF/thkw6ZOXiK3h7vpay2oh97+2I9sC1jK4b6UVGm1ffBeqZy/lfBxC3/RqPEYAxSva/HMhJCtSVLXKRBbbEmA1eTxUErWFrergEs4ZqkbuoVqmrmV4fmpF9Goln4WEf4nkpQeWQ1+EZCWyPis9h1y35pTFy2Y1heUasarkp8D6PyhVzIIlEjyT/oRUFHo/8VnFvS61AieX6AiwZBoNailzCs+XrGbn1TRvPcdEznN7vm9MRLAmdtQUnQLDaEnNNJoqw3bCh553QfQ4InTMXXZoSkRswlgxi1gVBbYM3VTN5heYVNdMQLdGyuZlWEXtF1jmOQO6lM/TVl013ZdYaM9ppervBdZeBd6XWu1p+Xu53dU8VStp+TPHMDCzbJmihOxlLa/nADwO42i2218JW3r8mGK0oOGwExFIjnowj45OvuvMCOASCIHm/QIjGCHfbTEzhSnM5kEWCSZGRgPiGOM4pHx0qIpj1JTC49Pp/vE0ah47pFz2jC3nNw5syOofgdPk/ZMgAcIIgbtAu92eNIUQ+i4wM5E93uvTI4YFchjnsOSLOMln33jLA6S5v3kUyJgMOF9LmU7jMUdBtKxu5ZvHuQgbDOe25HCbxniM+P7909sPj/9EdrU/XF3tf/2rXx4Ohxe319fXh32/k0B5bANzUOQ9vcxR04FbSsnmVem0NSUQlqxEljmpbP5VO4tn9miWXp5YCwp8nlJn2T5v2aDi/FKVRy7WmKSCcJXlfLZqJsYak1czt8/WsqJnQo8Nj1UvEbYgJJc89qpJq5SuOltlq3paAdamC8KlxUz70rfigW9fFoRg3i+vGvUY2BKjaMalhbCdUbYopyL4nLNa0tty4eq5g5mDyG63O7sDLidFSUk1L/+udv2+CwwjIxHpujDbUBgRYd7TCiGANKXJp9wUWGrmzbKXNrdsSRFTGpNlWUhEDBLiOA4dOkEM1DGZEEkQCTiQMGMXun2gQCZkgaRjYkrdfg/pHx6fEJOqkuQbiebxUAaoPbmq8I8la7TI97jdIsu2ztZw4HLyHFdEkxPrtOBomsRmBf+F1J5se8D8aWpppeLNT7bVSrBSQ/XJs0wr6D5Z+WqqWmz74ltsxcjq4HqZYPPuXgtkJdPajjyTBtrMtFTRPlnPKnhbOX13PJFjSY1wnbog6H5a2poanpmeQ5nPr/Y5/fJM6iu/ULZC2mUxsor5/+GpnRz3u26Xw9LmEwad+jgFjQVPod9m8wHCeXPc15wtI4qeSiARMDOYTGOKKY0IsIOEq76/2u/s4YN0nQilpDAwB4YiRY/k8neVHdjdoKnmi/zcjiw5k+9q+HyLvuAzLTXaT6XyLR7EPMtvqRZbjdpS8fP4+STlV4J6K0+V39ylpypbJXBWK1nMULRYw7hOxWnvdXJkQDlm/f3xdP/4lEC73Z7HI9vIgICIJ8vEslrK3vKSOw2ayJxBRCXUhDl7Q1UVYgmB+tALj4ZTHE+POkY9DkMyJKMx6pA0Wnw4nv5/3P1bsyQ5jiYIfgBIVbNz/BqZkZlVlbVdfZHpnumR+f9/ZJ/6caeruzqzMiPczzEzVRLAPFCVRqOqmR+PyNoVWYqLix1VKgmCuJEEgdMZJSH9AjPVQdWH9ceKlvJj8e1kEYpRxiEIGS0rw/j8dDgcDj+P8XJ6+ctU6LPkCfN1cN+mw3p4WNDvzZmHmdW9Gl8zQQLwZiy8fLS6cQPiROQvr2ldvmV2uOvrOR/O85//8v8ex/j++d3Hj+9/+Pz58+dPHz58OBwOoRK7V/S7o7iutkYDGXBzSN2SVGumd1p5awFsf7fsRLeGdddU+6M+b7mrmiPdicQ9/qmfd14i3dv2STuo9pytHdSW5zsY6ltr7JtOq1FTdg2FtosHvvIt6tr2/Wrv7qOlbb8DrLa8dRltcbXb6bYvXx0naN0Nqm9Vr/PYeND19tkiKdyIepenOr/3fMw6xO4+7+q089I6q+yOrp3NEkUmhlCdRgoCFyfPrJLTMIwfnp4/vXt/iMEtWzIPHFmcnMiZAwB1YwKJSGBQILpBha15XVkNYLO0XMGmQDS7160WJnMmOHkAhDAQRYAJUXgYQhzCIQsLAiO4BUIkGgcaBnaiYQiz6TxdUkrujrKDR9rhreOgtuw+3KL9Xn3fWzS2ggW3pH6v2bZw57K4/OjHsvDOHswPBvVgdMU84vXqfCUw1X17914vu8LwngiivZVblWYP4L83EZXlW5590E6Fs2ttd6SdPGnH+LiL3e7aXjoY3kJ7D9rHhjK/F8gH/bYnJJ1q+K5yD6RdCdZBckNLu3t5vwCg2y7+5uWumrtzB7ibvmv9b11NfGPZ6msiImboXX7ZpainGKMEYYjzmmSMzC2EAX71hxIicgJd02U5zNfduyZ3TjHEFW4lXoTE6KpqCrMAioGPMRxjWA8trFiJyzLAqJjv2NhpbWyCWioeauX21Xaw2Kjj9nlLqLsC/wEb0t6ibtdOaFvr/tz9tjNU2m/fohkfWBTtn9tqLSarWrkH/0bf7QCw4oeKNlwSCGKJQOIggwHiULhgjY40Zf3zT3/98vWHw7vxMAxMSppyzmLRWUGhZD0mUCBmeNe1wUlJ5BpYROG23CUyhwkPEgcZhkzu8zylfDnP52m6XObsDormZk4p2/kynWb4sJCr+xIAp4zW19XgihFeMixgPcpeIzMJcwhB00yODDdNIs9P4zDFQFQPBpZVMRGt3qctAbRzsQnG6cuSmBkiYR24l92d1dtrI43tuogtD53IQSIHYnMol2uNls0pZZ8sny7pLz+9DP/zz0/P/+Pjx4+fPn06HscAZ4evKWjW5iAiS0rA5fmaLc20XzIViHcTc7dA0+2Nu7Zaa4IQ0RoulpceV4O7Vr41evoeHwjoyodbhvRbH4a3a6YNk/cD76ptv733/261Ghu6HU77SX27yoJ+Edt92MH5vWU70t0pRiO5thJtK4bqnx3kKyHtuCBuBVyFhIja1Vc374/H1ULYgVHB21ppdHtRp7wqJB1CKPcGa+XauLuL29MwfHg6HmIQN6hJ4DFGWm7VcojRil3IkCDlorW7u2vJbwOykoqqGSO5W0mQQCTu2czhS1pZggtoHGOERIgYMzQyRqEh4H2MIhIJTMrwKHwYIgfJRMK4nM4vL1+SzuAhw42uuqcOym5vSe1ieFd5bydllYw7tlqdWb+z4H9j2TJskez3Xm3B3DJXW7rL/W2/nRjclZb1bec6VZ9vxKN3BOaNZwczb+HZcuhjFO2KrFqh25lup6Yl/reUiqJuyLv9bn+/kSQeCMm3l8di/94n3b5qK8kfoPqxHHtQHszyLvx+xy7/ZoPtV9/E5i/m3L9t2aXMBxrne4s3K5aW5B4PvtP7zByEGQZjBgKzoDjVkZmBALBlt7JTJ8WHo2Q/W2yqZR8TEI4ASgRFs7K8Y4IsN9yYADenSBiZxxiEkJd4G8tFKbIl5kaF9p57V/uwk1G/oHRo7MTCY7OntrArJTpq36XMbZquqv27r7aN3JNgWxy2cNJmuds+vKGlBvNbYLb9PhjmtVmA0GcDJgfIBQyYQ0DLzVwnyu5fXk7znIERgICSKooRWy1tBzncMojWzO2uqkmXm5yqKkLOVO1ZAEQgYgey23yeXqfLz6eX03m6zPOUdJ4zcSCBgxV2uly+vp7nDD+Qe2miXf5dA9VccejshBKCFwDABKlZ6Y+HpzRPmtLJfAyvgwRTpeuclpUz1bJy9m2SM3ZqsxSui2svd7mZnAzscHJzELbSgWhZelt1Jt21yZfDPCWSbEbZFtPFePI8f3n9+evrP//3/0lEwd2XyXOgcSdo5UWZPDRnl9gQ+pajaLPuatFdX3XsWvqt96drna1m6hRht7Vfu+sw2ArTltk617J7o2tm4sr23phxXVTPtpEOP12FXSOmxeRah9EQbiuD2nmpvZTDtMf7Q+2I2uFv5U7HNluxsqXFjjA6A7E22J40duur7jetm14dRRERsHD7tt+uha0n4e5AuklpUd3V3y41u46wXuKn++qHmA9DfDqMT8PAbpryYeDDMD49PblmFgkhxhgkwGDline5OlyCIxf37pTSPM8sEUumVzMzOBsVb4IlqEDxOyiho5k8gMU9kA+MwOEQbTwgRD8SDyEMwgRlmDCIzHSKw1F1en39erqcs8EESq7OsYlk03E69ko3L+0nLfntMtT2YasaW9rAHsHvli2ctC4I8TZGfvD2fvs3aSduae/umvDeEHaV/RaATmLcE3TfhL922lXYbaojeOxZ3rtzXR3MqIH8gVmzBaAzmNoPO/HbVrjXxQMM4xalW0i6mm+hbWqswC38v6DcA+kBSreE529e4G1n+UHlf9OyFey7ZXfuvotTHpcuhkpBuwFM+65lLWCVI0II5agParQ639GyK1dO7dwMbqYKIUZJ1EYEX9qxNQ/hDWjE6x1CVqgvhwHu2RiIgQ9jHEeauWz2BWZ2g5mRM3AjvlaPniVAYIW/kllnQaEzyhu0b6egIuQxde2K3O1GFb4lT3abbfd8u8UbNry8ay/9MkKizQrwHvztJ7uo6Nr5Jj6va0IyApEDvEy9E4PcyYByfY4cmHPiGIY4UrqoqlseonDJq0DOIAbBlRxuzrEG+7mu892d48hr0jz3kjyFiYgt5GSv0/kvX37+6eU1abZi5ICZBFRWp+l0mc7TzASDlmtsWGZn/5wJWAKIEwY1JXJmKcs8U4bi6d27k/klZZ31cpmfDpkgT8d3f03z2k4xG5ZDezScu/AIGUDlPJAWYK6CxeC4hjsxLGs0X5d2uFlu7ulQImLHlOb6pwCFF4l9miYGlSNNIiby7G6qy4JwXfgtgoaIVLOu0S+ZuUCsqgRp4ejobFditrjeytlOQLSMWo+JWwvgSpcPqX+rVitULfC1wXoUiYbJb2dxGeA9k2KV7/2u2GOnKW7SbHSYpM19aADu+zZE56NcgTTr13Ll/21QihYt23ZqIx3earMdRVZ8dhjLOZeQoWhy+pWTikrZFVr3woc9zq2JwrolNmxmrS0diW4F9Hbg7atdqbrbQv2zNLU4i8ZYwttUMLwJNSkiz/Hw7ulpjAN7ItgY4zAMQuwiWDWrBLirubpZECmCpDSimnPOqsoS3V21GARln4LcXR1qWNIWEhFRIGYW10QcA2EM4RjDOPo4ZAnGyRklbaExg0GAurlqSmaXecrZiuNIUneBbJRihwfc8uYWzx2hbuZln/F3gwYV7D6er+30dfy+iNZmu6oBb0cIPG7/3l2CtscGeC+3ER4Q8z1x1P321YhsP6x+XF2178JY+1UrV9seO5Rib963o9i2D1RfpJ2v7o26fXhvaA8GXlUGHgqWXfi3gvGNZTubj598b9nOCO5PSn17j8a2wPyyUX8XMA96363fqddlKr9lQrRd1Nns1McvmIjKie1R0oNWti+Lv8lxkBhj671eojPmnCVIiSvj7mZQVQb7Gk299murhwlWPwhmLitG1Zzc2H1kcREhCHwI8fkwPh2Glwx3JYoiAlMzC8RF3lZUd2bMG7m+5bIHJIfNRGyft1+1tkE3dw/YYZcMaoWaLqgrxf7ZNriLhzre+nar+La90x2H/7bNOoRuA70bb+19F8K2UwVzUalEsrCUsZPDnATkS54+OEiM4EzzPKtqZJEYxYakEzOTuUGBQOREEBIhsJmGUDwEiYg5ALn6dhGtsNGVnQ1Imucp52xExCEy2JxCgJMAMNCc0+vr6/liHJv83t4klCYDiiiQShW2LM/qYo3dXTMya06WpqzJ3MHEmux0OkENeiMTCgpb36UVq0UzEt1mJbhOBy0fdmTgvrp1Xaea1rWhA6CaQtCdCA48H46+WJjZHcVGdNen47EcHxQjnJ0BIkg9IVwkwoKfEiF3HYmE63la5aWu3CNxv31Ctxb/trR+m4UO2ttlXUerSN9hSFtTtNEaWbRluU4WtGxQVymlnWrQbBmv3RyqjXSz2Emfbek4vH6y3AFrJOnqOru/PO7Rvv4IoTcoW9FQRUCtcK/lDshap/6/HeA9wsCtmKPbY7SuZmvod9hu4Vx+FN+F27ftVy3ApZTEEh1I3bcdtFts79b0ppSF3DiOwzAQka8rQHdX1bKEA0DMQ4iHIQ7DIGaBQoyRHfM8j2MsYqI4jppBs5pZtut6svQygGhN4tM4ylKJBOZc3MPd3YlJmMEugUWGA8kgwzGOhyhDVIa6mhDc1bKZK5jjEErgy8nSDFdNBpCQE0p+2kr/FRUtiu5x/bZ0jTSt7dcvO9C+OQx3RzdH3wtAZdAdweW+uz55MMzdu75E1Lnc1464ScvRse1bkNkySHUTrYgys4q3x2C/pX3sydWaVnHLLGhMkHvyZxekezJqC9Xuqw7IrruO2X8BTrrPf00L95rFdxLztlS0v3GMuxLefQ14/P93pSOJVk13FX5Zae2E6gnFRMiPYgHg1gBj5qen42EYBZnM2jvNS3rrYjth8RJSwGAhhLogpMUX7JqXsoVQVWfLAwtRYFoCiYa4ZEsiTe7GzAx2KJyFb3inDLD0VXe961u+CXjWQH5nbdZKmG3NrcvPVlxXvD2Yu11ivqdBtuqJmlhu3dsK/7a1rfzpxFTX467lhj151fZFjSPrtg7RzvJyW4iIQL4GkCEvuang7g4lF5TYdsRYjqTkcpnP57OZPR8OQ6BXvZiZ50wMD4uNF8CDUYBPq61rZiUr/bIKUC0BaHR9WYaT5pyyZjcO4UmeM3xKWadMFMw9myXT6ZK+vp4vF5QFobsvOelB7iUTIcjcGcTWrgkBNnU3Apk7ubpZZlCS/PpyTvMMIATKSX/++et8vvz88wuOxxWfC26Lsl0xz+7a4riboBXLtwhvNxRuPYaWvmyxQ1o/2GI+zNP5+i2RcMkDSPM8m1nxCV1WBObuHgJHsqRlCAwqycjy7O5spoXmFG5M4EAhhboaXXAIMiLkrOwob9ydiUSYmW0q+cRA7uRGzgy4uwBEYCbigLLoN1e4UeLG4iFAHKTmOVMQ4cjCWo08Z2XJ7uLORASHTuwWQsie1ZLCzcmEwOJlaUcHZmYQzOEKzwxlMehFYMIwsJc7rhQMLJSs5Brzgv5FxEibhMPWYOgEEbkeq66et3VeW27nJSH1sgJdNQKtPVFZ0FeyKGfWzNdzcwCq1hpelU+A/vNW1uC+jOvstlqsCX5z695Wj/iLUF66KMEw1qD/pU1yR0kwWPL8VMCKVrj6Jzc8MF9SOdcWkeJOo6bmlLOtZEhVuxCJNSyx7PEASk5UeLRsoTjsGtt4y5wVLR3SShdlDVlmWSQQuXsuayGA6pFmOQj9ahhjOEYcI424SJqdWClkl1PCrBCIwAJSRBqF/9MzfhhPH0R4gECisDOZm6oex3iIHFxpztHB6jkbU4k5PGRFUiOJwXWavzKIswYlVoJxpqDEFnCypy+eL37yMAd2dWLlQHEQHoTG4MKvkfwIGhhClPMcXAIxIDBOMytxAvvx/U+vp78mmUCzsvDwFDjPRmHdePabo0Jt/ZJolSEol6FvbpusHgpa6bk1hjhc0zCgmRoDETPcmWU9LFV3D7x3t/lWJ9bAEry3idBQBRWPrCvBGBSKWyZ6bCl24adXhedo7tNTIVYCgHpy3sqTlk3odsPrtq8rH9UR1cS+aFabbZ3yv90Gu6oiq4Ld8amtCTaxskZ9vkHjDTwdnIuAXfclhRZTss3/uUSwcMCLA1DZ3iV2whKsbZk7Jmy6zkwFk1ZsCzTmqdEN3aJx4atmxBXSm1JF4k2etOtruzk5qRXqj/qqE8i191KNyzFOCUBePwTZHaqrBndH2Lt3/tvu6u+OAXdob/OVm1cPjvaV3LfFt2gpzx+cqHd4q7DtVm5PqmldqABYXN28JPSxetTWNO5EWP+R6k2PLTA3ZLyWLZ5bWm+XSSXGxahDEs9smRLDgiEYxNhcwEOOPNs8CMecwun1d4fxIKcoFojVbHI9MIcQs3u2JEbwBHcmZjARgxn57ARjnrNPs6VkREEIrGY5uTuF4EzJaTaAhmiRGYkhgeQp6HlKLyeCRvVxsmMmczgpmPwoF4BUfM1FwhzK7mNJsVtOfrA4/rEvF7f6jebt0nGXWtop8Ftnh5bRurzBtfI90WcbV9KWulqS83XDbgt/x3RtBb9Nplcbsb2YCO5eYnU2JLMQ6xZ+ZsYqA7HupBe01F31ziDcZcbysMqren6wfGUCEi63UMmNDUxGBgqKxftIDMERCQx6tWcZ6X/+/PIfbfrhmf31dRiyT7PQO8wSmEYZDyQj2SAaxL8mJiKRYr7qlOdpmuY8Dz6ShDgOIpJMppwdHCVe6OtF54u7xwNTxDxbOmGej0c6Xc4hjBfiP71efsLT6/s4OUmaDGiTmZWSPAmHsqgpgazhcFdzC1FKlsWcFSAz00v6OmVmDmG46JJpMOdgwzt2tgWNpWErRMFcbwBWA+lGh7RuYq3SKfr+OjW3nlBUIiwyZy2ZqK8TTUSAyy0lOwAwMaV5DiEuOzUgIjZ2VQ0tdRK8qtLyfWQ4CzMTlgzm5EuNBUpmIS/uusTEfM0zUerPZQ0KMC9WDjnYpSgSJSfkgikt32FZ7FYlTes2F1MgIrPVaPASp1UBJyInmGUzYwCmEgZ3I3c1Y2JbV8wKhZoTUQYsu5rBHCogENtyULzgn3FzWvig0BXmm1SHFbedaYvrHm2/rV7po2vfF5vsamNZ43K50FojnrAKBTRys/bSCs3rgG83sdqvdmXHFjPd6Lb1t3cs7ynydcj7oBZyb4V1h1t3Xw/9C/stQ2O6YowJ+yE+9vbztsK9ndD6/9UngYiIDuMhCGJgEYK4uylcvVkClUTAmqJ4lHA4BCEGmUgcgwxjEBYYyrYuCmGUm39Yjs5zSuo5e4kATgIKIbg2BuiCJWTzlFPJdlGeMy2hm5iJmQKRCES8KHJzj3EgoiXqsNeb0Q6zOae6sDczEuYgfnvd/I1lS4edSt5SYEdCW0W+VZltC/cMzV06f1Da9nfHcq/ZCk/L/ttxvQWGdmhvEVa7gD1ALzYW2C63bttpq+2KmgfwPKi2Jwr6T9r/b0fRGHDofRPYsX9As77HXfLeyQv/dlr65qxdK3wfef7/ptwb+zcR0nHB9zLjG0tVFh2ctOzG3kDSqad7mQJ3Af5eZkSxC8kN5uxLcAdjNzKY083ZPmAh8DjGEALZjSqE+zAMIYRFtoN8FZJlgboesfjqUGp5nsyyEKk72JLDOQiXBdvSaEVI8SwQUTaA2ZYGH01Wi5lWLD8oWzb/pvBp+7onV7uZeuMcPRAyu3UeyLct+XX6rh2Cbz4vP7qNttpUlLBrTT2Gc1dQ020pD1mIHXArOa7IATcGDL7YsgK2ciJBBIpDAJLlWTU5H+IQI45mlrKCTOFGBg5gBpzYmZmhZiV1Jo8hEiFYmHNyT0SEYVHuOedZTVWzqaqpw2GqBoCDuLtIVKJpTvOUUkoKVi+Hlr0xiftkUDz1lrE3XtZlA1RVG4ZaPSj3pqx9/mC6dyu0sDGjbLisnwDLIqXc+Cvrz/Xwi6joq11L3tcVRAtAWOxLYKE9W0PMrIVL2CDysjUmvppfpQk3EoE5+bIHVfpa/NKRNRbDHQIYIGvCjDW6FZxg5eYl3BkDBCAsMJXGyI1KFCwDFRc5OC+nkWLkcCpHkOYEBZaIPM6gNRqBL+FGmGY4A0xe0vK4u7uCQqFqXnxvXb3ci133qnenDQ2LrlspN7svvi7Yyq55+2pdIi6uobgVlA8UatUK3riOlK2gViuUPzcedP7gdydKHgOzC95W2naUbWuipBZUv91JvWWAa2Chq0pw4Jo65gbaIGJXDuzx2bG9XQ2AHUt0a9a08pfWbeaK6vYHiiNECBJjFAoCYrhpdlZHdjOOgAViIjVX0jwO8ePz4ekoxG5mkWkYBhGBO9HiQuzuqmaOsIYQUMs5ZwNIIjPndb7rcU0JD+CAmqnaNE0ppXKYSat/i4jAl40hERJxBsOzAywCd4XBr+EO3D2ndLlcpqnst1HRByIha3oLqXRzvaW0Vp1XbFcFWfFcS6X/WpPXXMzthH4XbG8pXbPbLjqF3ZZ2FC2bdDK6a6p+2BLbLx5aB9g9zm2hvTdA3JoprZ9/W+2boO6OaLfrtho3L6/SbFNt9WgAAKHgfGXqtR1/uKVRmeBurY6GcZ/2tg9/wTz+GxH293b6WEe8UYNgY5u+/cNvlsdNdX01c7d8W952Owi7SPg1LFn4YxVk62E4X7mGHGRO5kHkOIzH40hElrKull8xDoJcL9q04DmJm7mpgnyNdWGqKSdyODu5m5oSBw4iTAaA3HOBhJlJbpoteKna9t6oG5zsS4BudlrRgcYAuCdgtxZU2w7uzFRtdrdOC9X26tAvo8zW0KqSvza71QL1q44vqLEkW+Drw+5tFcu7UHXatnbR/iglsAEgBUF9SRQPK54ITAAYQuQEdipEkczyPF/mec45jMw0xEhks7lEDkLCTkieCVbcylZJbst2hou62atl03mes2n1hst5xupZ46Zqi/NzGIaUkgFZ/fV0eb1Mmt3Y3K8hYbsf90rL7xUPi0NrE/+yNMVFA96ZxO2kdBq8bb/Yw1vTou7gdKOok7Wh5ys9tJ/UflvXFSIKvMo7wNaYQeA194N5yXuBss/EzK7Z4FRDp4DYUQ7tykrRb8wC8ihYvHWhgLqyw+GQxc1d4YD6KgShsq5Nl5WpwQFfT72tGx7JXC5VGomzu3AJepPViYiLq+DqWEQgcwXUjQAuRy1K5M5uWOo6ULyRPBORXQXZo1IRaqbd7Nb5wz5benXo3+XYzqro8vagEQT1k+3J4ba1FrBWlHS++A/guVdakbd928nTLcN0fzJdPStac7OyWtcLETGg13UjtfghorKTZcCagka2Y7wnOts5ul78WH35WhmK9UafM4SWWMnqULVspLDizE1wcTcgCn94Gn/76VOMJyIQeYgcYgnVhTVE2+rQ4l62utyhqsmcBUOMYNc5q2rZuFKHlg00h4LMkQ2naS4LQufQmstubuZmFCgyOy3HqyvOi2uAMIpXsPtpurxeznNSIiYRV3cDhTs08a3SzlErHzusdpOy/ap9/kDN436op2+S97Z+x1O77eySUwthJ/q3aqN7e4+5dol5C8+DrzqN0mksa+6IdvNVWKM7qt0OtoXqjWXLXF2ztJ5O3OjI2+Gs1GJV0JUWfdFB1/LgHPC21j6oLWBb5D8Y+OM5Wn7cV+3/pmWrZXCL7Qff/oK5/n4Av93sthdf7+RUnqq1mgXAbmLSu5P1K+eieGowmzOT+7I4bDbvSmIIFjpwfD4+jeMAzVNWXm2QqiuLFxk5ioOcY7EfDARXBZXoYq5WuJbKPn/pLIBgDFODw5lKoqJl4GUDWlVVYWK2LgiXPf87ZcUY3fLjdTr2Ku886UyIMuTt7bhdcVHLN+fxQanQdv2+kXQ7/n38VdtX+7yNrdAKGbebh/X/bglRxfU9T5nuGuS1l+LJVw5V3FFyTQFkRmAHnNQh5tkd7hD3IBYEoKyu2S0QXFgiuzAFhpAHwChbCfTCqmopO3mQAIFlt2zDMHiadZ6nlEiYIOQuoLlZj5VwDDAC82XOCOE8pS+vp8uUFMIc2AnW83Kr3dDYfi2ZtaumshrcjVlQly27tOp7djttFoSdy/SWHrak23671ea4JR5ardMW1HLgCSBIIPdyTwNkzgxmZuGpdGzmZgYlB7MDLA52OJb4BOWIjdzZ4eutZkc5PWZmymS8LLEW6LWmQSVzW/bAeN0GcydfNwmIFlcHAGolQ0nJRUK+Hg+QZ2Z2EvcSBIfVHU7kLkQCIhi7k0OKryDUwM7IMDUqlyeNyeBwcTIsR1helhXuO/eVHzBwvUBYl/Lb2yPbU7u2PGi/kmY3nZ3Ftm3q7VKvted27ZiN2O0hrIDtWg/U3IOqMNPmHlT7edtmgdChbU6WFVpHvTNTG19d8PcWAH38w+7PTlK3sFX8LK6nzCU5e31S64hBZDE11CgrzFmpOCM7qcFSgL87DL/58O43H55DuhDRYQgxhnLlhgUxRrdcbgWDjIlpCet8RSAtxu2av1U15zznPGtOTkqcFNl8nueUTVfX8CpBmKjaynAu3ETwZMoOYsJywRXJNKlN0zzPsyowMDO7ubrRt7KQPy5bWbZVYLgzI2jItW1kV+FRE2V3++o7n+9E962Kdktd22Ye8+bu27cw9QPCbj98i2So3VWqRjNSNPy+yORfZ9BvRUfpd1dNLhua20buNF6BvPHht+WuzrXZput77Twewr2x3Ku5bf+NaHzc/uOvtpTwTWhbsbw7ug7sN4L3y0bxNyx064TS3em6h5xfT+1dYWaim9i/i8EgJddfMcrBoBD4OEYB6Qrh+hVTiZq4nFfwkoGbHGbZlZmd2B2Wy/kGEyGbwUw1AeAYBmF31eSTkgCBjZlANWV2L1Tdl9tG95DR4rMqHSJqAzh/L67qpGwVxzf5bpdu/c7qq/twK+vqEL45lt1XbTtbADo1V0o9GOgqL4FSNjdZ2mHW4t86ANjaYKC5GOVkYGcGicMJgdlAqxS1kndC4aL5w7vnHz4+P48juRnUydQpO8wUVuKgiwtDRMB2NlU1X256F4OvREBZ0iabm6sISjKUPOWspgpVTbNOc4YRCZvD1F8u88vrNKsRorsLNUft93HblnoYSLeB37bGBhHx7YZph0m/I0Dqj10YNvK2v2HbUWDbezun7XNar5Ve3VxXYg6Bxd0NpGpOxsQSqPinZcoZy63x9WNjWwOdMpUEFmpq7sSyrAYJcBS/AmamlB3F6bTGciB2XM94fMnKUXyOzQmArnJH2oQ5xf2tnPoZfLm7rAxyN/WyT+zu5ObMLG6BnN2Cm0ADIMTHMYAkgy7ZzskuZgqwBwM7lUNLI5i70kIP0iK9mhHtZKx0u0Q0rQ8ronHn6JmX6C/esndHH1vR0FpmtYt2idgSVnng3lMwM7vbGpS/fOJAvY16MzQAW0eqFZgeti0VtoKydfSrFYpHePuwZYNKrAtOaLlF2gzzinNLWuLGFJqCLyvHFnVogtdrm57Ur9sWWO4ftqy1w+cF7SXGdL2OWw4fljix5OLEtO6ZICgRgfOcyN3yzDYdon9+d/z8fHweGNmHEEJZDELhThSY4UvDTLxu9hrg4BhEzYF5nudcdh8cQEoppTRnTdmURMnm7K/lgrZmcy8R4WwNxjsE4WU9WFbscDdBqSbFT0nhbpqzJrWUszlZOUkkFEdrNX8UqmKvtGSwJZiuWvu7EkD12t3WxO3CoJ27ezuj31u2/bbkvVu55etdvdJJ/Mc9tgzSfk4b2/17h7OFtvuzg7kdddfdg1FsSxViW2B2KpdLf+vLpZYt5ml5UqTcom3Ulup0s59adNNO5t97pp1j95DQoFuDYBf+XfTW+h0NdNTyuOW3l26WH5RdGLZEfo8Nvwntbv23APZdpaPYLZUWgX+r6L37tv65y8j0raXI41KusRjMyNwR3dVNnGmJVuKAErOQB6EYZA38t5wfFq8rIgoideVmBAHcXUHZXAhLsCWoqrpngFVd3bMaOSIrAIK5mXsoR5bOKLYRM8cYQwgSZjEGL07W94TedVxUTJGbJMPbm94dktvZ2X1S/u82+O6JU+xR2lvmqzu3uQdnlYT36HaXPO4JbeBGgXVC/gHY1V7yze4G3c8C0kn17tXSFJTKHsN6IaeYi4GDEmoEjmWNQPTM+sP7448fPwxBXLOROZNqZg4GU005SyI4lOGJigPievUGptlSSpbn03QhDgxyIcHiqXQ9a4HCzCxbVnWwiUuYFV9P08vlYs7ObIbiirU7Lmx0X0XdrhptA6dVbVUiePqe1Gqftx21q8SWKbbg1ZbuzXidvqo63ZdglR2c2MjzSrRhCbdIjjWxBDlgXq4FCjEJMYX6zXrnwh1sCFeycwUJmEAo0U+LCIvLntbN0nz1El6tR13XVLgyADvYr6S/hpvLbgjEJeOGwymbiDuBHApilhJZPzAGppExAAMQORwCD4GfD9HBM/Bymf/19ZxzVmMlYwoOMnIsXqOLYd2ifx35dVtllXHXyWjvvNXnW/XjtyZsPb9qJ6ybuXWmeb2seNMvbqVSneZ2I7xrypvzxisB7RFNR3ndj+3bbtS3qOiBbBFb26kwt3f8iKjkQsUNld8sFdyVQc5UjpHLPdKWSQg3s1AJbnc4LS/tVsC6AhyGYQlrGUK9iExEAU7shGBk5uXaBoE45zmys6Xg+TkOP7x/+ngcxfMgHKPEgYp7JjMPZW9FpEyTqcOdCOxmZsROzOaYc0rzYoxOKaVpmlJS1XJHNymd5unl9Tyrr1zOTlyuDsqawl61SMBCZzEKLQtRR7E83M2L6/hyuWXJbFP2fNz97tHMnVIp5Drj7ew3E9H+2ZE6N0lxvtlXN7PbOt8L//ZhJ21vqWjHnt6lrnsPtwBsGQe3Wu0B9bafP5BRW0ZohRtuWbjl63t4vld267cPuwq7XXTiDsDq91G9yAmAm18DJZUPHXUL6X5Z7sm/bUA9zB2KWoC7cXVjwaLg+6Z2G/w3Lfem9QEM32SrThe8pc3vLQ9g2LJM+6plEOZ9OHdbw/fDb2YGKxEPACxBc0sj7g5jRyAEloEkSlCd3Z1BJMI18t8S879D4xqI22CW3QlqqppSyubny7RqalM3FiEKRJSUEZyJCWZmaV7qX4PvkcFB5ERO3+KcDp+PkfPAlrhn0myNik6VVNLtZNQ9kDqls6X8epestZoeyNt7qNilPXfHBraWVnfaxM3wd2HeDrAFo/3RUvLynAAm2LIFvOyyeckegOVvMqJyXITPEn/37vjD02Egs5w4UnGnGuLoCrOcptnVxI08k6MgkoUB5Gme1sB1ZsZuTnAzMLG5mZqqxEF9BpaY5A4FxAggPqf05TS9XgwhErOrEy0o3c5CSw8delu6qlxfPem6t9vZvP6/7l+3Hd14nLpjtSe7eWkI+9FGw1bm3DOcrE0y39gJwUswhsre5paVBGmay/QLM3ORI1YiN5hZUiUzhpNwuZ+nqhBnlNAvcPcSaP4J670ivSJU3Qcpz7mFVQAM63w4w3zNsmYSGK6uCea+eJk6AJ5dBmeGOTmxsDAzjAP7QfgoOAgf2J8CP41hjCFwBNPFLECniSY29eL47krMzk4GlFWH0Rp1bhetW9nh7hKvc1mr8ZqHp502rAqmPVyqbbYhGdoGaxLByu0LdkvsyNVbtSOpLYGW2ezI0dfgLlsLpitvkXdtzYaRluAf7RFN2xrdFlvsthpI5ip2AXBVTLecI8Ql8afB3cgINQw6NWfOAECC7xlOy/z1d3UPrjNOq685iMjEyd3JndThjsDKZgEqpAP0feSPT8NxJNZ8OA5jHA7DGGOMEkQksjBRCZ9dZq0MlsulXUol20XZTnGCpny5XJYrHg4jBrEqpjm/TnPWYL5cFGlxraqes5RISzSAKLIMIVzyZMWNe9nQYhESJrokEoarWlnkMhE50/fYydepb4VvRwwd8r35CrfC+kH728YffPJdZQt/fd4SakM2/eedKO9U0YN+O47GKmq+i2G37usteW+ft5LHN9ZVLdSYbu1Y3o7/7bTSHTOrmwJejaKKorplVEGqwPiiTG4OAm//2HZo9y4QbofQAdxK/m+OrsXVtv7/98su/v9WgLVGydul8d+20HqMjCZbb3E/uXflGPtGW29nv734clHFy2UHZmKwkDgJPJODmINIMBWmKFRc6YgIviiyIkuSqcDhToEYcJay6+e6mFRkagZYnuf5kvTnr68SB2Y2y6Ek9PYhCl2SWRQPJNCk+fWSXy+X89mLejErAmQNpyd3B0z1DNOvbFj+fISNDb/sMk5Ffj0U7QTUmzD/cAHfbji2omx71aUTdNtyT2ZuRW7XTlvhsfjdNlKBbC8HtpC3sO32vhQOcC/3B1fPZnYCnBYzzZXJS54TAf14jD++O348jCOcHWOIQxTLqnPKCgMhgM0JTjAimuc0DAMTm9k8p8t8AUAiYxizmeacVJlNQnCnnDOG6E45WUrJUlZVIybirLik/HKeLxkylAuEXrxCtm7ND+ikNTCuaFzzhHfTsZ2gDofbT9AQdv28eMxtYaB1Wwe3rIFbGmh1bn3awkNrjJjteMNhGIvfQADJMILKjr8OQSBLtgkiIhIDldwjYRgGZoObmboROxGZiwM5zwaHcAiBQapK64LQyNbxCxGcl0uxUsD2khPOkFMZBtWhurO7TjOASGABwcnMNQN4zwfhGEJwMIQlMgC3eRQ+Dvw88EH8gPwU8eEYn58OxIeU7et8cQtZQzaxc77kJBKTUzJXy0SQ4uQK5TXWfxVhZWKWoDubCAo557L8Kz4MhW5KkA9vSlV7KSUiCiF0omTLq+uf1MqmjuxKCxVaKvuGd0oJ4tQ+oY2J2fVega+oWDKq3wZK7eRa/YTWKKNoLNFONjWaw31lPLPsZiWEFK3O3IGg6kQUYyw7nSJCQIwRRDnn7EpEJMy23FEkX+7urVft4HTNI9exayccS4Uy6SGEEIKZzfNcvgohDMPg683mevQa45CyajaSAJC7M5zBn56H08//6vnyw+en3/3w/BQxuD09j++iDMPAzJZTdgs8qNmcsoCUmUs6yhIzwBRq2RNLzDlP02xOQjHbshjkIPkyXaZMg0zqr9OctEQVJzLJbmxGEDjnnIMsa4l5nmeBDKKKczprwACmIETkyXLWS84nR1lzEi3HKWsumOteaaeY7/nWtxS4VU4dNRJRG/UXty7++0S+sQ+2CntL7S2oO0qxKZWXWyncAU+Nsdse77fbQB26tojqoOoGVXu02yvE3VdbdbXdy2hM4R12rjCjkQmVl9vtpCo5W0R17ewOs9bfaqyuMtGSgGXpsVGdi0uPlc2KBZLlAgCWeza+Bqe9ukhsYHHXNZlqO3x0K8WlgrN77sZ7j34qwndJ8V6ewHZa6c4Gx2OKLeWey3RtthPI7Vy0fXX9dvLzAa91/IKG3d4+li0ktd+W+3Ylz/J200v3eQ0wtMtBRRmJSDHgunZ2YW75rtas3YUQAWeGl/ieIGYmIwaFEEp8fk0XFvzm88f3z082/RRCKMZICQiRUoKEEKDqMA/CIQzlUgPUkpoQVDVPFwZJiMx8Pr/89euLubz/+IHJppyYYZ7HQZxiSsmyMdw8z3Oe5znnRV7FGI3IShqvnGLgqxffuv9VyakwYN178eWmzE3wkoqT7bxvBXhL/yU5MG4FDlZX0jp3W5apcmaXzqtk2G6ctd3VpurNl2IM1Mq7hIrbff8VP1fsmRk1hwStVL/Hv7VOK6jrJ23Xdby1x/qh+00e0bbfbC4kUhb2Vq5ukTMRGGTMYBbPk2U9Hg/vn5//899/+N0P70dPPtkwUJTBVS17yprNmAKTm2dVI1gIwQEHNPs0X17P55RnIuIgl8uFRGKMYRhUNScD0zAMr+pwvlwuBDocDul0BrGRKPH/+unradbx6ZiIkylzcFCMUR+auF20nlY7X6dyFZDU7EGoqpczM5HOGCaiEEIxzMyuicTNrORJ9sZCRuP/XJ80lNYbGO2E1mYborq23PZS8hvX6a4XnULOudyXYykXqwggEJUE0L5GGWVyJiYWYy0atSxteb2yHJmMoExQc1Noudq8sJHTEqnKGYASSXZlLFnhBAS4kTtDNAEgEi9HimVFCgshwJTcWFUYQSgOkZnf68AxLD5sgYYhihCDY/TnKM8HOgYcZXyO/O44HIZodpw0jzMkuJMmS8nU3C55Ih/JiSHMTswlYFO17NsZaqVSWwCEQbr6VRr6LSGujXx7O/9WoV6fVDpoK99Tfl2Fe+WbxkQnu1u9i0Z2bAUKNWYiNpILze07L1ptqZOkCIIQqDnYZDcFloCZZiGE56en4/FY8p2klHPOJf8sUO4Q0XqtiIHiekXAeoO1GX4nuzuElCdl2c/MsnpyVjO6Egwzl2y8IoHA2VxVhUjYR9L59ev7AT98ev93v3n/4/vjuzEcBzlGHscxhECO3MTRJyIrd5+YQYoSfBdmlaWYwYsjfcGPunkmdTNiVTtf0nlOs5a8F1R8jMrcmJkCgZZowHAyg6rzOl18zeBcXfYrmYGJvOz12s3JyhZv2BAnEeGhubzzcM+qw62WfdzONyn836LsMl0VDmgo7QY5G2Oabhefu720HNoRc/fhdo5atbE1aHbn9AHwXdmVV13Z3bbcgnR9zqt16HC6gkpEDDJ4O4rSQ9FxDCKGEDtfDcFledAIrhrdeh3X4n+Rc27PCVew+2ME7MmQLU5qeYyct5RfSd47GP43PrJ70MVbun4srn8lYMAib2qzVby3huO988MOtu7JhjLX39y77bi7wtzBTg4NREMIYwzx2hiXPU53J1o1KSiwVA0FILsJR82zu0cJ0zS9fPn6OmUDg+IpJbtMg/AglByDmXmYLQvcyRieLSXVnC3nZXXnrOSyelTt76K22n8jge6J/7vlFzDIVul4s7CsEGITSQhvEFn3rK92Tlvh2b1qrcH2/2osteihzRpgp/i1ctuL3wYU9esJ7U2D3pRdCN3YgAwiZyIigZegMjFM01ncnseBEZD10zD83W8+/eNv3j8fDwMBZq7QZd+ECHC1ZLNlZ2YCQgiBeDyOKaWUpiknM4NTNnPN6s7mZiUZXImjwGbQDHeEEInIdEFRUrs4pmwX0+ySgAw4J4DLjZlu6gtydhHaEsYVMxsV3M1pi7Q6m7RXtvK2/GgPcnHDTepLYIsKgFMJl9HIq/p/bacdyHY3wddtkUCu64awwxzwkuR0CTZTNllpuXvBzPX+X/mGQeTm7tmUiVBiyriTO7MLwZbU32wMCK0uziXOcQhMRGTmqmowuA+Wl+64GNkAiJyHGNgIrgxEQhSOgUXkcBEmYgGRS+TjMYyjxBAOgcaIp4GeohwCxoBjDCHIPFMMwhIg0XmYdZzSnLPmNKsTe4CURN7sBLOr+VW5uhVw2HB4u+tfataDRNzKl4750fDkPVLDfd2zu2PUUCawkRHYk3T3BM2uPqukT41pWxmsxVjT8g1M9beqltzrBSFlBRhCKOs6Vb1cLlbdYt0APD8/f/r06fnpaRxHVVXNMUYyn6bpZ/0KdzI3GEBuxMxYsnmuA9/MRYeZXeFeF/bFJigbk/VJbcdsyaJjOstycgERIRhbRpqCnn/z/vjvfvfpNx+eniJGoUPkGERo8XljZinmJxaxW5FDjbxQk1JTREx1AY8JhlzSdTJfsr5cpvOUU0U4CTED9UZwiZ4EYjKzlBI7QViY4+LtusQbbh1klkFBSh5ao+t1l46KHhtG31u+2QVuBGi/H7xbvyu+WQLdVcDfgvObbwtC671T936HooN/19pAg9hd1t4Fpq3cSbl24wYPh7+r2Lbw487E7Y53F/j21RUti1ABbmXjOI7WPKTVrcA1tVKLiHgJn3HdVaUKGGDlxK+sCoyIyvl7cWFYUn5uEbJ90s3jr6H/B+XtVPqgZivG8QYCeHu5Rydbldp+8rjBB82+pWw+fLQJUvR59Rar+v0BP7YnSO3/rZvPTbFy4oISVNoIbg5AQF4CNJgL4TDEIUa4iURmcjUFVJXcqreRCJfzJV8NPjNLSXOaTRNpmuZpni85Y6ZIcTy9XM75NB7C8yEcDJMiqJmDyDIBlqxRAGXb0aleGrd1q6WzYq+8tv7/N1+x97KitUm2mPd1LdQ20n6OhuZbw2a3bKXxPSnXVtg1gboWyg/DDngPim/sVaz8ZWtqgVqnJcKO6e5BSI6S0xslmQkzipw0E0aA+3xize+H8MfP7//p97/54b0wA25gMHORycSiZcs+mbItMUoENHAY+ZLmeU7F6ptzMsvMDCmUzELCgUVc3cwsqboSnJlYPRORhMgU8lkzyDgqyEkIDuKy7qjHZBUzfn+OOwNgqXhnUbCgbjPd3XnSDSU0J3i1I99srNQutnS7O33t/3U2K5BVKLUTXSgklCNLAMXaNijICeTl1NKMvKzKQTAChIO5FmW5WKVm5k7uEgQogsmgVm7f5bg6dDkgXFJOuDszMSuzm7qZmhvcQTjCmQjizMJBmAIzMy2xrgKEyQNMyBlOSAMjhBAHiTEMIz898fEpjBFDpMg+CoJgYApMDJTbq4YcWY/BPhwlfRymNCadUjLKfl78CuHLSnyN7VgupJnhVsR300xE6ssV2Hbi6/pwz/TZEUx+u51zS6DfcLXatNa65LXCeom817TT2FY7ZSvrr3BuRXBbp/y54jDjlljL7xgjiidkzqXZ4ntZf7j7MAzjOB6P4yEOT09PzGyqOedpmuZ5iixxPIQh5JzJfDkWLOBdd8IarLKUEEJ7SLvxr2jHpZrrhIYQYoxFO7p7WcG6e0qpeI26O4hyzk7OITLDU/Z5Qn75/cfDP/zw/nefnp4HCrBBaBAWuOqyoRCYl8SsIOZFMdA6Qe4lsLhT2aqzhbeLR+7qIKQURJTzlM5zSubGQhwKI1O5PMkgkAibqa/cnLOREg+BAsyIidRZczl0dTVzA5XzlTXGz4Iov15QviGd7zd/t59Qu/d1W1r525Ii9sTlrhnxoM2/VdkaLrjlkcdmx+7n3VetzNlq9F0R5L6vkNr698BrFU9b7g2k6333eas72x+t69ftABcflOZ/AAhCRqzqZIR1+4aIbF5vpbsToXjUm5lgcR+saKCy53oVxQSBu5sj51mGgdroMjX7CPVof1w6TfHN+o/Lls5/ZTu/uJF7FPtgjB15vBEbtdm/LcM20Dbybf2zck3LIHc+R/28g3MXYLNsJWhzoUYIM5GKiCCwQmEgpsDCbjonEWawIi9UvWiHoTZYnaLd3Y1yTkRkaX59+Qq1KCE5fj7NFI/nbHPOB3cRuWRn8qAmgxAxWbZ10zPEKDfbgksEt3W3cifdzm5Z6/xamr8nz1ulgD3RUZ/Xg7LtnHaNPAaj7WhrJW5xslVbXVP3xnhPkAIo/nqtFG23yba0151EVQxsYS6/B4i7uzmRl1QCABRm+fJ8iNHNXtJB8E+//fC///F3f/jhfcQLlch0YSDmtPhNJi3p7NxgVu7NmlE2JuSk2czU0uVyuVxOFOR4PLKTOTFQfLQdTKrJUl5adLjmnAlyOByIx3/5+rOSuAQHA4SS98DVndsTFVpXa6oKvqGTB3y6qy7rmsg2c7Qs5m5Na18Zalf/dmdI7Xy1gF27vqW0B4TXknop7ZZWKHlL1E1VSwQVEWZiywpeeINBS7onU0KAUwnaxgCxG8gJh8MhhEBc7lClcrDjul5oKZFgnNw9m5shRhEjELG5GBFRCMLMH2LBKhMH5pKchIgAVyYP8EgeYEzO7kz+7kOIMR4OYTzEwyDjgPFAY+QxBkJmL/lVKTCDCEQsHtzNcwj2JDAMl/mgRjnrzxfSi8/uashwLY7RNzlzqLLQVlKUH7IusGv98qO9Y3Brqt6VGrsSpNMrHflSs2OBZcHWt9DRzQPh8gvKtsFW7BIRkbRoaWWouzPzMAzl5K08zDkPw1CWXkOMMQozB+J5nnNKX79+fX19zdPk7h8+fBiGYQixshMxyGF0RQiuN45u7M4HCqwbC6/3kXLOOWdZi5kNw1Cu/6WULpfLNE2qyiLlrh0R5XlKl5cn1h8+PP3T33/+3fvhh2cZyYVDHMYQxIk8OwNh9UeVcuzmPAPkMDcv2R/WY2cP7O7ZTFWTqWet9zQMzswwTilN82xO5mQMI3Z3XncHRYIQG5TWTIOmMDZ3J0jppuyTYjlzobKQvMqwsiHkAN4aZfSbVNfOy42i3Zup3aZ+mY24C9iv5JHHxoo3m3ZdhdbI2DbSPWkx9tj6rHW2NlPb4y72WsAeVP4m5nfb3x1R97sOc31ysx5e4Gn+5DUxzCBBRJSut3xDCGGIWD3PSyN6o2WBknHFqETDKhdGmMt8GRGVLU6iq9/7YwxX/Gwtj3t4eEvpPqnt+/cfyHTE9ispvx3dL2DJe/W3+PylMO6Xpv2OJpf5LQutWvkxAJVfdnV6ZwAU2lmIzRkuAiEi9mUnLzATzFRzmpIlEVmSxKtl18XXwJkY7uXssFyxY7CLCBObCDvmPMM8KavBgsxGr8mM8zDk56yBLAZ+HwKTkzqM3Z2FBsdQRFaDK1q2zsmbPHjU7Dj7dZV1I5F+vemxazK9nR46a2S3tQcssLXoHtd/DHZnjWxB2hp+O+O5w7/1+TeNwI4g2/8Hogx3gFCOhx1k7JnFyaZg8/sn/NOPn//Pf//Hf/rdx+eI0aWc4zjznO2SpjkbuYqIUspafLiW9GdZKOUppVRWEJd0cffD4WAKc49MSwAbEiYqR9SmpBlCQZgYJuIxjBfj0/k8zTm7G1G5NGieyZR5IOBqIq6l7Ok3dHt3KqlkTb+dyjopIoLbA6GrNU43n+xO3T0Cvppat3YCrUuSe4LFlsRyvSl1T7mHZWPJMpZTO2bhcqeCQcUSLGqwdMDqVCMim8MswACK7hGIHEiExpEBd1XVC+Vyr0nd3D2ZpuTq4KxEKA5ygSXGMIYxhHC0jGWXlcvpQ1mWUVkBugXXQTAKgnBkef9hiFGOx/F4iEE8sAZJYyyKvqBQ4ORg4gAiAZOrOQVVFz0O/uEo0zxcJjWjOasqa13S800C62ph7NpnpajnrVcnVYeltdTD+jaMdUsHu/s63WS307xl7Af2HG4FQfvVY1m2JdM2rRBul8G79esO61aGMvM4jsfj8enpqR4Yvry81PPVy+Xy8jLP8+xZ//rXL0EKYjmI3LS2xq0hh5eboLduXUUtEVEJ4v4W5VEFR2ENM5umqeysPD09DcNAROWEcI2Ccw3iKmFgZtU0zecA/e3nd//+d59+/2H4POi7iKGkdYqDMqesh3GsVxNLHlV3QzmmMC3ArgCX3ZZyGX1ZIuZ1TZjKThzCrPk8TUnNiNW9xIVzd0PJ2wlnByAi5Uy0XLUkkrJ0Lws/BhELhNy0GinMRkRQd3Jyqpe4vkk27cO32AP3aBXfsgiJaNvBN7X11kR7A4w7vXSKfH3YMwvfRjLY9ltpr4NwdyHR3kfvWKz9qtVhLcPusu0uYC14tbVuvF1Tu5jvpnVrJ90zbnaNsKqtOwlD5ksyXABrgBkiGkIsRJvS4kpaZ2hpsITOci2XdYkIy61jK/fbAYVfM2Huenzsjv1BhY4Cv7f8Mortyq8BAPe3MDrYHpBHN7+74HUk/WsAvtdF+2ftrrUmd7/dcuie6uy5pr6i6prkzM4MVlUFjI3FyQ1qOs+W5ku6iIhZzpqg5q6DxrU7IiJfnDwJ8KIXkBPcn54OgH19vSQP49P7//Xlcsk2ZRjrOOsl2yicy1WCwGBiDkYWWYK55CW+xXVoa8jq4qLyQCC02PCy5/KLyi4htbKufXVPwuBWpBDdROzrWngAzLb9XeLfwllfbZcQlbq6P7efd011wDwW6a0iaOFs/29LLLkAijMzctkAFrZIwDQdRvynP/7h//p3f//vPn/8yC564WAI7qDZdDJNRAjs4KQ2Z53STEZwUiU4U8gJl/Pl9XQ6qWqM4enpCUxJ8xhGEZEwCAtzOVTIZjCDO4nEobiGqc45fX2dX86Xc56zwaXkyHCCl5CCNeoaVrNzwS0BGznMt+H6V018na+bpsrLO/TmezRwTztvhU/5v0YfrZCX/0vQDdxwFrn71gAqIyj7WV0vzBwqQMwsoeynOszLEc3iMm5LokAmKkFmlgNDZHYIQcg9ZXcnD8MYjsMwjrGY6WqXrJrzcg85ZZvnubgCFikVKEQJZfuWiEgCUBLxOEBGTgR2ChLFTNhGDqNgCDwIMfMwaox8ONjhYJEMlgK7kAoPzIEg7qJGbmIgOEc2ArGbuBn7IDgOeBrkeeTXyYfJZ4igWMsER6AFy5UC6pbzln8AkN/Ed2qDcLbqpPVS2JViLVFuH+JW7mytopbIHpNg18s3FXBLVZVCOqna0tnNtTNA9RqFr4Wn0FvO+Xw+z/Ps6yFY+Xy5sWBLCC92vHt3hLvqEnuldCoixN4Wcy8xsW/Hte/Wsp2LTjgSkZnWcU3TVCAPIbx7967CXBCynhzmIGzwnPMhht99/vxPf/j0dx+HT5KeaDqIDwIWGBRGzDzEw8rGleScl4OOFWMwdxBJURRXBmZ293J0mdVKCOaU0nmazBwkWTU3LrstLfEanMNLItErokxK7CiHCAHI4mImQkJ1j7wQM5UVeEcAVZ/tEtjOiq2ptp2jjtTrj85FZ/vjnp6+1/suZ+3VvNvI/U/63ZNWGnS9b8Hu8Fl1zy4ecEvVHWzdvHRtdtB2H94baS8P7xhGbS+d/usa3xV0D+BpNetSx9yWfJnqrmZZNZVsbICRozys34YgIlJilcW4eCuoLqBOl/T6+roavmvvvu7RPlxvY0PYu/f78f2lw0CDoG/X3xZalyvdKN7ezr1536VGbCi8Pvwmw/otwr+3bNq/uyxvO+oY542a9AGR1yIiEAIvgf/IaM27y2YZDCGG2zzPl8tlPowDJXc3y6puOZnlYQo55xACRIjImapimuc5Z7N5ZsuR+Xg4JMV00emS/vrzl9N5ShCb/TyllM0OwUAppcIJHgKTGQvNubWGzcyJ6y7KZkG13b/b39r7xaVjqK3ioGYlv0V+O6FbY6kBe7/sKhq/vUqDDftvW6A7+eK7rulb2x90X3W2AOwCdo8+WzaUnARwgsEVJbGJsyAyfXz3/B9+++m//rt/+H/98OGD6yFPA/wCImgCZjUFI0S4q+rLy8/T+WJzIgiMp0uak5v5bK/FhgGZhCAxiERVBwlLFBFaEpLTlPQyz+6s2Z1MRVPSOWfzy88v5/N0mbM5sRMTg9yIlgVhh/br2G8neUskVzsQN7TUCsnqftkibXVY2zlJottJaYmhbbydi+5HKbwXH+jawaZ0kBR7lYhCIDY3LC5qRESqWVUPh0NJJLhaGIvnJOolSCt5fUEMAgKTuC2OwEEjjQEM9iGMqqoSspvBVXUKnFJ6OjybmauVbVcGkbmpZqFlFGXFVW6uEqLwwHSMfIg8CgIRI8Pc+EJMxBykhGdWIQqRhlAytQa3mLLnzOooPnQgCQgKIQILHcfheLAYchATkQjKzmQCz1jzRmCVfVhvfVgTXr/6YRPRYTws50IihbJLCIKSXqJOXmu30d5WYusFfvv2Zl63jL0rQVo5sv3ku8oNY6ylNlhLqxjK/9u9N2oW2CmlkjqiQlhOyVpEVSZhv0ELrSGbl2SMUHcnK+0QEYUQ5nUHZXcsHUi7pYxrmuZ6DFh8z+Z5LmvCeZ6r71mdRDN295STqv7w+cM//bs//t2nQ0xf3z0fjoYBs8DN1bIjcIwj1R1oXxJMiTCXqIblZM8zmkkUkfIrBM95ocycs5U0ECjrQ4WDhT2bFWekskPBTnYVIuZu5IbCxjAzdSvr67IBLIDRcmFxcWotWPErijpMPjCPHpcHFt6uJbGrRCtpbt9+Lzy/vrTM3iqALdIqeJ36b79FM5AHinzbe9d+i40Onnu2y70nnRy4h4F7pZNg9yine1s+oo3N18qossFRtuHKLd/ysLz1ZrOvOKsPw3A4HMZx5Bjev38+Ho/DMABQ9cvl8pd//YmZX19PWKh0NQJsx5rsJOQ9Od8O9heL5V104Ttb881yupLKL4CqgeJNOxTYoOhe+o3uROXXwLZbGjx0qNhRoPdmrSOD9kfredQWZvblPsx6ucvhJUibkRcLRjGnyzzFeZ4xrr2UWwNpnmNIKYUQolzNSjNLKU3TFMaje8yXeU7zOAzv37//6/TT//iX//nzq11mh5gZ5qzZlwONpDmaDEGIArFnEJGuOm6R0ubGDSpuRdbOxseWW3992VJsKzOpWX929OxN2aWob1L+VrDjlsh9b7XZCuSONmizWt4i6h7q2rXKdggtZlr50w2EmnRiLWAA2BQc1nWnazlWIh8C/9Mf/+H//I//+I/vj+N8oTQfmN4NYYIRMzmVe2YGzCmdTqeff/7ZchanQxRzn1L++evrNM1zenn//t3x+YnIY4zVO6wY0mbmSywZv1wu5/PZ9CmlBE8wPp/P2dyI53mep5yzKgTs7iVpirlZycLW3Zz0ZkG4i9h21tx3Qp1XLNUFYT14tNtb4p3W3jbU0kDtvRJtjWJYH9azvm/K7fZthwFeI1YEpbmkAMzqWRdHhZJXp4xNqASddxSfGVcQwdgIjqDkSmBypBwDHchAiaeJ6Hw8jIcY5Dh4zmRpJHVLlvJzUCFBPmmiXC4XahH9FEI4WyIiYnKfC4cxiBnP4RgCRxYhEg4xsMjBoRIQQhh5FBrHIQicyYnIIaDAxEZGyIRpYGLm1/ysKcfh8Pz+aZpf8+trdP80hvMhpYteXs86q1MUkQvCrBlcVhYOLPlXHVA3Kcc4DlIQkRCHEAKzeyh2uQgTgRkiMYQwDNZNZJ3OexNWz5qKib+wqObnD+/KAXFZJxCVbTxZ7vg6r8TG7q6aCq0wE0BmiwtJob3ypJIOEXnj6Vxp18wqc9oauLb8bvMr1sr1f1oXzI2L9tVC8mWh6IX5vVkHLpoMSvm6U6LmbgRQdg+mIQSzFENgkFs6RA5k5pScICLjqHNWA5NkJeLB3cstWXflxRzcN7KX27PMhV8LPGY2ax4iAV7OKgtg5a7g4XCAOSwTeRAR4BDZJPyF2NJpOP/8h4/yf/3x8x/ff33ir0/vBzPjwwHhXdZk6RJNB5kGnhP/IIHd3VTdNbJIcHI9jGTG2dQz1JFL8AD4WXMRkDZbnrLP8Bxdg4Nd5HVOX6d5pmHCnHSiITzPpFrcLBwEQyZRDuHIUdyRc87JHH6IwiOCjDKmbAoTAuCAD+QhMOVskSywpvkE8zAmZ8uXAy+qohWrD9cB+495TURZqaWs+bEu8+pKbylF7NL1bbGlqpRspX/dzaFmxbUVqdio0q0ZQcTANWpUQ0g3ecbqUN0V8JX8b46UOwysn/cuiBUbu/U53HguLF9RDcZZ1+7LUqFtp4OhRU4HQ4clv90Rb/HA7ZFL+wktT7Q+cANAa5qmelkXzbFna5qsvazt+5rXBxA4lInKHqWamxuF4JFJnYqLEbMDPE2JGUOIPIzH4/H9h+d3796VFWCReMdhLPeB53m+nKcvLy/pPB3HkdyTFrHsKKtKgpc7GC0W/Yry9vprJeB7C4PdGal461B6Mx0VH4Ue+pgo+xsirWWAW2LbwnALD/VHnLW7VWcuQJo7nEtG0zVjW7sR3qqhLSd2f7ZfrQpuyYvbsmrlymr49oyMjv4r+uq8NeMjlE26Difd2c6NHl93pVbJVFpWWa2qYuP6ciLoNjzBXeCSATLA3DVHh2aJEFO72OyGMPx38z99ff0veD4MIZIGBgejPKeUTi8vAokcL+cLwZ6PT/PLl8vLy+FwuKiRm7OQvLvY8NOE/3ma/0dKf4J+Hc2gI+eDkLsfgwzmkV8FbDSaRHLXdKE8He3rE53GgCQ2s2QHyMU06pz5uAq6BWUbnFyFYYvFjiw7udTivPp7V6lb9twrMVTjpFrnaKRZR0K45bKtAd3t3X+z2lY+t0Nr2+n6rWR/Q9iO5R8A8xJmFnZNt9N+uzvG0sswDK3L1ZVfyi0PWvz+FpvI4G5Likhh5nJ/B2bG9s7AmWkOyJSJ9Z3YB+h//f2P//Xzh39PeH86D4wwHJz5JYhOM4GneTqfXg0UxwFzOn99IY4KOc/28ylrouni8zxYDmMcYYFsOD6PxycZBpKQBQa2KSW1kRBNZZ78PFFOR/rpT0/D8GL4v1/PZz7m8fhl0n/N/hVnDybqrhlSfBrZKECvuwOtpVqvdK2ktoSW9BLkb73otGCelrh93fJvibR7S8ksYGEzYy5GePFMqcQQOtFXqbdSV9tFSy1oZGB7Qkjr8QkRlYOBpeXifWmmZsWmrRl6s5dQgjl09Fd/FHlNzQkY1h07J8C5bA8VwJhEPZlBFQpP4oOWa108TZNbJrIYWcIACaYKg5pDiCBk5ELuSiW7N5ZFzjiODOScA9PhcDgMIQSJgZlZCHFYkqqzsIgMQ4wSCpKYy8qojEXLJbdqZBzHOGHmesdxKTgcDodDHkfjlFyhqk4kEgtBdIVbTenu7uaWcwZz8iUlfZFZ5XeMUW7vudX/x3HsJEspxW2yzmXNJvTh0/vhOF4uFxE5Ho+FoE+nE4BsllLSvMxaWRAy38jTSl7tDtBtR7GFxNv4JWWkt0d/dcHZyZoSDKYs8+rylZtD81seuGruUpbdIKjQ1au5CKvShWq+HsyCYozjOA7DkFIqQT6LZVBuxAKcux0vv9m26eSyE1pQK+SGEmOLCiKA9TjO7Hw+Px2OcRhUk7uX03Z3Fzcmevfh3e9/+/Th/XMMLOwEC5HJYSmbOYEh5LBsIFmvmzLcWUDl9v56CrGuY1fnnCVGl1pbHJaSzuwp58uc05zVYSA3t8LmshhPqlqYhZnJHEskgxVRizRxpmrOGzOIWTIxFrdYKUGBIQbA8pZfHpStAute+catsf2k0nNNid5Wbuu0hNdW2wVm9+09mDvOwq2+7yq3ddpqv760HbVdV0bb/arjgntM0SLzF4K31387DYvau7NJ/00s3QC829HSyjUBdLEjP3z48MMPP/z444/DMIyHGGMs6q+Uf/mXfzmdTqfTaZrSPM9TSmYgYVNfZR3RsiR2gNm0xFtEIzArBeLNCHw7Vdyb38ct3Hu7Be8x/bwRvFpsjX3fNuh3tmNanuoKbTZ6vguY7XRse7/XL/Z47XGn/VebL67ct0YRxxqmqAT0c3c4SmIfOFR1nrOZnWIAjENZli/bKEm93L0vXc0ym647vbq4LzGF2XC5XF5eXr5+/erDcYVlvXRh5DBjU1XkbEzkbqr10gRuNmV61+dfNkEdQnArxB601gquTna1/3dKoWv815RfKcZtzQ9+j+a7561y3CrK7sPafjsjy28vm1YEGIiWP4lKEHIi4hJbudRyz4RkiUzCEAOYYR+H8Q/P7//+tz/+8P7jYRzFMsGckeFuGsehHBsMcZxyOp+ml9N5miaHVMBUXS0BzpHjIMMYh8MwjmOMxIFYvBB3Np/TbJY0S04+XTRnQ0oGzyj5C3ia0teX17/8/NKn64CY53IKgL29zu7JloS2oomai2C7H2Ld9MUdFeYrU3bC0G+jjNZZpnWhuNtUS9JbK6gdDjULzkob5VVov78qzXU90z0xs8Al6sTSXf1oGA5MCrJkmSYWmxlk6sNIIBsCBZYhMMQsK9RnNSd2YSKhkoeQDIQxjAAC89NxZOY0zRLo/dOTmY5DCIFp2dIzARNDhEvKOpBBHYzizkZEZOoGNMtaInLP8OymquaWiSjGUSmEDBloGIZhoHNSUzeYG5NjV1pcJ5jg7bGYYrsgrDtY7fyVH/M8704brzcVyxlxXZU9Pz+PT4ditRwOh+onmVLK81wOeIASY10qqWytvVoqAXRPdkaK60gr86hqm5ep6vtioJeFmYiUILS+ZmioS5dKXy1TVZYwV24S3Fdg3NdAR+XWnOpyLFzSrKvWgC7lkwLyjW7oY8dRO1JqAhC3PMbMkaMZVJXdza4IKbsYIjJNZ1MNh0PZBTgOObr84dO7f/jDpw9P4RD8GCCMECSrp2wAlchXiuzmbMbMQuzMUADmarqGur0CbKVf0pzNTFPWZJpTzjmbJsWcNQnmeb7M0yWZAhAyJ3ciATMTuKRIAUCQMvIyFSBwnWsuSe/ZTU0zuYNZRMwmd2eHUAnjZTVI6W659+qeMm6FIG6lbddUqda5lt1Tq7jlgm2P22od23Z12mre7OTR2xZR9XPb5ETecuLuh60ac1+ce+99tS1e90TvjPcx/L+mlB4D0W1gz537vR2Ej9skImpy87SFmVPSerxQOPd4PP74449//OMfp2k6nV/+/Oc/v76+vr6+TtOUUtK57i6RLuKLPEOz6bLLK7he+aDn48FWv4Mii8qGUVEEvtnReIyct5DQ7of1x/YkuavWlW/aE28sfhvP4EpIvvS+5aPdvjoDaMuh90i0Y9UOk7Sx/7Yk930DvlPuzPiONKsoIZjB4aj+lkRUwrQTCUsJ7mhzzmY8zfM4RiNks2DmIJ2TffkSQuQYRQSqrplhcKibGcFInNT0cslfX16+vL6ckmpQA9lizKwKGkZuZpnMFgedss0KEgGv8bFKcLYifDoc/nrR8c2JaE3zbja3cqNXoxuH0rbZNwLwNym2xpXYhbwj8grwtkJ9SLfXgHHLI1feUb8+q6GBVhOfwDB3hhsVcyeJZzdhGjR5np6Y/uG3n//LP/7+Hz8///B0GNhd1QI7czb1xYdCs5oRsvrr+XK6zOYw83nKl8s8XfI8Z1fEOAyDPB2P4yEej2M8RgoONl3Dqufs8zTnjKSUZpumlGY95KxqE0lmmS19nfOXr6/n8xkcASYyEAAGbtbMHe/jljA6a7ObqRal7VFZKd0TAOVEqpjxa+NcPGOIFieme7SHDfnZneVle06zLR21VJe9rr67h61Uag36ij6/rnm8hGVb4r4QFUMwSIARPJtyNp1BIhnOY4whxBiEGe5aHBaZKIZgxduKAgDKpJYAxChCg4gchpEFQSAgFn86jsMQhijuXm6IljzmKWu5OF3WvMIsTCEES3N19SQw1mMQzTMhw7Vk8YYwU2RE82QOI+Y4BHKZZ8+eNUema1TVNcnUjX5aIysuqpep/bMuC8vCbzvH91QvEZXVYFluVQ/e8/kcxkhEZvby8jJNE6372dM8z/Os2QFWVebAzO6KDYm3AqWVQYW0usqdTKmlhdxvTeH2VPl4PH78+PH9+/cppZ9//rlIvXKwhmbjqutoIUW6WUXfDKFxe3Cg3PwREb2xAuHLWnoH8y0DVy5Y2qYry5U6y6kdkbATwdz5VnDHGM3sPOdpmjTn4rYrIh8O4RDD73/z6dP7Y6R5FHoexTRHZjaQsJdtXYc6u0McJTsi2bL1inJ3b50LGBstuS5V3ZXcUE8I1U1hBk/ISXHOeUqqBpcAHsnBXFy3aoI1YZJbbwSmErZGFUt4pMDl4ohndgQikAujZEoMbKwOc6d+SfOWwv3O8s1M3TDanVfd8y3BdMryLTZKO/u7rXVgbGXrtrylQjcoIrr3Ucsvu1q/ZfNvQrXl5c4E+WZp22mBIdD2bLA+WXC418IunruySxVUhHLzvC1lbw6rphMRVX99ff1v/+2/ldOS0/ml3Gde9k0XD5HiflP0oBuc1hABzIHXe+YAYozWbBFWMJjZvgel3YS2NIzb+dr9/O2cuLUJtt9+k4rutbzLpFcxu7lbtYWq0wgtBirB3KPVe23iltjuSZLdQpuVZPtwFwkd5Pc6Yi7etSXfj/naMACg+FGDiMiFGOQMAE6v03Q8HlMU9yK6OamllIbBzpc5SlBLOaUSgkHMJUSDqmpOfrnkeZ4diILJzFhAZO6qbupk5KBiZa3Oq2ZmJatcjIE5w51WP8bimXIbz/tNy7lvvm21wFZe3avZnt5s+2p9lVtoH7tw/9uVqoJ7U6Shn/q7rVM+3w6k/n78qoRkp3LzgVBdnwmA85LWMi95pdx9isoOuNs5xZR+8/75P3789L///vejTSPMU8qaSAYnT+7JfDqd3D2ZznM+X+bzZU7JDJxmfb1MLy8v83kCMA7Hw1Genw+H49MwDONhCJHcVS1nN8+WUlZDSpazpdmmuRx/mIBn9YvPryRfVb9Oecoa4pByGZgQGYoJ7zt+3eVJt4dbkY+9fSXaWzrukvFS0xlkfiujiISui4mdeWzhaWHuNo53CaB92Hr0e7Mebj3gWpkWuqbru7pd0Skhg91s8q2+qGlOBCsXvxhiRI4IjqoeowgJu2rObsoOsAzDoNmXqDYEFpQ4pHA9jLFcx4ss43EkN9UUJMRAQyCUkK9wJmXyIBQDRyZzJ4eUzBlu+faInJ0ExKAYyJUclB1uZMpqntRIgtqcsqk5s7BEymWf2QCQA7enRvUobAk1W02cW8au9m4rqjqi2c5i7aIGmS0GCjNfLpd4Gaq1USqU5WKpQIHcr25RWzrr/q9vWxKsf9Yn1gQWw23x1Ym/2x0Zx/FwODw/P5cMgV++fGkP7ngNGGNmReHVxptFAl0vPS3Q3jAnMwcRJ35+Pn78+HEYhtNfv1TPVXfzsrlkoNuFx5a2O8yU0IIVaXVB6JZ8pf+SpqV4TcYY53lWS2XL4zxPAA6Hw9PBPzw9fXgaRxF2j0FGkSlNoeyFiBgFd0pq5ELCITAzyRLEqSwWKay5SQxeUsMTERmZKYxdy0Vcc3cjczZlh/A052mesxlYWEbnSE6E5Dnn7DKQiEAkCHMZoCmrsCz7HotxAiMCC0xLLJ/idUGHw0FnHR2jYYBnNXMqiOyIpFPVby9bQXzPomr15a8vnSag+9bery+VDn9lC+2f9XenaXY/p6bstrlVM98s7XTseoq2HRVbxNe0ZrvY3u1687AfONHVlaXWr3KmeC7EGFNK//Iv//L169etNWBmUaKZzasLukjkIIH5MB7bE0Kse1vTNJWsRXVJedWne1G17k381hRAQ5Nb4uyb/RbTPaCZbeVONaDhOzygq73n7XU7a+6LtlcYKlQtEm6tnH2W9PuLrg6fHRofjH3bxbbyN/m3wrxCfu23VT2Fjqiktl6DFJRbeJVuy7KriFol+nqeD4cLy+HIFEOk6GLmlqfkL6fLYRhcswemcRhCAItQEObZs+rs7mGIw/EQk2syJyGwG5m6l3RvkBAcAhAtx4aazAzkx2EMnMkIWBLkkpo5ys3RXyDt7+G5xVJH2C0XVG+j7pPtFOB23jtQu5bfOLO4JbNdOnxceGOfVDPmgeJ7e9kq0EUurX+ggb/kH17yrGLZPi6rxJnz4IQ5R9U/PD395x9//A+fPn4muCVJUJrNPcHc6ZI1W4n5QGm2ry+nl9dLNmTznOw0za+vr+fzmYjeHZ8+fHh+//7d4XCQeAghSBQQuSMlywozOl3UFDnnS0pp1pyzO5HQnHw2mwwnS18u6cvlMoM5DApf7kBCyg5eGWjrMdHw3Y0SbH+3ViLurI/a1rYunUTkvnP2uy7VbrregoeGqB7P7+6Mt8fO3pTtwqTUD92Ks4W74oLWY0MArnnNLV4kO9Vr0w6GgxDUNSvNGUQaPAlxECJxNxCW46MQAsHZWUSci23tDtXMYwjM7FBiHIYBRGnKarOrWwgMgqsDrlkVXK5fFX9RuLtbmmciK7F3gHrdEeUmv81MhUCoXDVV5zm7IswZ55STknEo53MDRDAtW0aOcqd8CYdQsdzMHxH5Hq+2E9CVbiO5lnLncJ7neZ4LlZS9unIrL+ccY3x+fn737l3O+fX1NeesxfOYuFz/Xb/q98lwK8VaMgVAdD0KbmnFr155hSScCO5Wj8LrgrCTZSUkA4Byta+4Vrb91p28XSPmnuDrTrDLqiznfDqdlgyBdaQ3ezA33LuVBWgrrfihZlWMEgZpwQmIKAxxjEMJkWqeI7Mw55xKXNlIrk+DqqpJiIMImRnM2dSdhdmZsyI7SkuB0pKa0sjduFBdZUC31TdgYU+dVXUJ0DrrnNRS1mygMFzO0+s0ZxCF4BQcwUmApO7lPE8k0hKBCSEEyyYCXrIJFnnK7rnMNbGLELOEwBAaKUwZjByIAyk54MrONZnPW6TYgwp1gto6dH9tZrexvDobd6vgt8TW0UBbuW5etJDUBtvf3be79keHmSqjWzgfjLRrvxvp9+Yhb3cKsWG3reX0zQl9Y9m2I8sFln1D/1c2jhVXZQWoqvOcDodyQqjn87n6/C8n7Uuxw2Fcwq+FwFzCoEvZSfdy8cBU19Q47u4pK26shPWqghcq+ubMbkfR2RzY0O1u5e/Fm98aOl37v6x0pkVRkEVGlFAZrgaQuzGus79qHcDhey7Q3bjuVfg1kN8r25bf3teVtW+f1N9EIK5BcYDFz628XeI9L2iBL2mIJGRINlgQZ7iJE4OiejqdLqp+GIKBQYwQwSGlJJByoEfC4zjGOKm/GhxMIMrZsptb2a4koFxpMYCrByA7hmEQeWUtbnnFfRTUyLHtwH996SgTDSXYbZiZFZ83d6UeN4VGqlNj/b5FhW3p4ZeVq1V5uxP9QDe1wLdCoJUV289r/YYWy5cMkF9TXjWIIiMi8sSOg9DffXr/f/z4h//jH/7w+6fRvvw0DoRg5nmGsYomvUzLtSyDny7Tl9PlcplBUdUvc/7zn/41Z5eA9++fPn/++P7D83GIIiQxEBFKtHN3d2RFzjZd8pS0BPdSNyIKIbDI+WzJcXFckp6m+TKniYSsjGJFKYjIiURqiuo9lOLWLKxj76q1H1bk1A9LzM/2yUJFJZNd69dWsPqQWjry8+ZqTAtDB39b/5690Y6ufmVmYdt6KdeIHbfjL3+WQB1lOL7cNWIBAIapO5I6ppyycZqYWYR4pCAhMDFKYAsC3KHFEhAhYgEojkcADiWnwEQwJgqjEHsMFEUkEPuymFZVtaRKDLiX5PVmRt7khFgPvpftYdPsrsRCLiA3IBvNWf/y5fTldJlTNhqIiqHugRx54msovJtS0LqkffUrDqs06SZm+y3uuMx1k1dP0rykQT+OIjKO47t37wCUFaOqLtvYZsVZtEjCrUDcNewqWbfU09JTvYSJW5aoW7y4XQq6ezG8ajKJEELxq2yZrTa+CyRw18AtUZ6KCVc243POZfG5xEPixXG8EELbDBHxeuGtJDltuRfAdRHZ+DVdV7lrGCh3OCGEEMchTbMTeEmLjRhjICai8zR/eX3910Gehw+fn54cec7pMB7hRqpuRkJsJEYxhiEGcWNmuDmzmxUH9Epo5fpiKTnnrBkmsDVfqLvBlaEOEzrN6WWaZxNIMKWUnXi5MkBEAC9ZJSgU1bMihMlRrhcuuxVkS5qJIUQJIQRzNjJiZzjDhVngrJ5hCNgt99TkPVuhJcgtN20babmvrdnd3cUt4XUt1Bmvb1sBuDsEb5ZwW/C2vTS7DVcuq+uETpfgob7o1H+dvhYP99DegrTFxrZx7Amxrv727Tc/QTPRyxC8l0UdPLjFD/YQfm/G3R1YYikRgVZX/LL2c9cqbEMI4zgy8zSlEIZxHEWkeIOYWU465cWt1Ay6cp+7jyW22a01tvy+h4g7pVMo9962A3+A7Xuf+2YnomKsGwLdxr67t8V5/eoOtLSauWjm6x7k93p5TFrbOlsabhlkyywP5FKHqMfAdLRaH27lRkGz0GJsdEzpqy4TYY7MS7YvxEFcghI5C5iyz1k9EDv4MiWQHA6HMAxgMUc291khyDnnObEcjsfD4XBZAVgczMxAJMxBWNynYlPfis2StQwN5Msyd0/ivWmyutJ6DOGWIHdLK6ZKaVmv1qlQdQ3Wb7f9fhfYf6vSKj5q7Cvc4fQq/DvIHyBh+aplL2elxc+neBDjGlBAmcoD/TAMf3z+/L99/vG//PZ3v39+Ovic8qSBYDRZmt1NT2mm6ZJhdNKv8zxfLmmeclZSTVPKKek8ewh49+75hx8+ff7h/fF4YIG7DhGmrprVkZPNc7pc5nmeL+d8SfM0Teq5HD2oppznZIfkeEnz18t0mucMclBSY4kAAHYvJs8SKcfRJ2SvSK4/0NDbPZmwvcu3/mAsMaRvtBIzXxeoTcwYwtXC3CWDbo+j0kY7m1thUt+2XipbMmgZs1QL7R0q3PJMHXCBoGydBvbVVje38hwALGUXESZ2IrCDDESQrDQnH5INIUgMMYprmufZxGwJrJQZgdgFBNCH5+fL5ZKzEpMw1GYhjoNM53MWRHWHuJRNDNWcUJwUyIQhEuIg5CgX7oiIKXiRpc5wJkiMEVwuq5ICc9bXi3496b/8+cuXl3lWyBglxjllBgFs6yQb1stdDrQCpVgw150HL8hzv67E3Ptj2OtffrNga2Z6kf5EbEaqWnKBhhAOa8ASrM6i5eSQX15yzm40DCGEYIvjibbzWDsq51ct9bd16u/WC7REOq1v65lJJdmaG6MIppINqZSySGvDQ9czTF5D37YibIGE4HqDlhZRqsq0BMasly2LbS0ixGwG1f7DZVzXPLCdAbf0FcNYKX8ZL5mTCubiUGFmhmVXMoQwnS8hBAJpmgEcxmEYhkD8cpm+nPMgpx8+PKuMZ4OZfnr/bnr5knOCGQcQSYREskPxD2UGnIisnDV4CRqk7q5uuRw+pjRrVtUYDyKC7M4UjTUnFrDionTJepphDApSxCsHMjcncoYXP2RH4GUeyxSUfWY1JygzK0p+QjCzEBXCM0dUDpSFOAoG90E8u/0C7fng5LxSpt+6zu8q6a083SrOzrzYftIJRzRicBfI1typv98CYW3cmz2U3S7uFW9K2wXfglErfNNDoZP8bcv+tjVh/XyHWzcfdWBXHPqdcO1dm/emuwMYm0kv4aDdvfiEz/NcWPh0OsUYh2EYx+u9eiJ6eve+7D2VGDOmbmbJdA07vs5+XYeDqElmU/VxjPHR4v5haQWUbYIP3UPIN9vE7WqwQ+CWfX4B2G2DtYty3Fre7sY2eGPxPTPoe5vajn0L/N+kPJ61Vj8Lkfui92pFhQuWsAKDhFD2MZ3d5uSejJSCCxMHCWOMomrJs7qzRJLohGRqs40kQjw7VJUDHY/j++fn4ziwXsyXQwwQnAAJFALAblSyZ4Cp7BvW7dRlQkG07iniVvD+AslWSl3bVPJo5Vg3ZR0BvJGi6NbOxmp/d4zwuJHdAf6CUe8C077F7Y5MBydt0q5sv91Fi7dbIYsPI+BqZAwmcoKRO8gFYObo+O1h/I+///E//fj7H4YDzWf1FIdwzq8QOnmek5qHPJlewJCTTS9fX+c5Mwd4eDmdT6eLqj49jYfD8PHTu0+fP7x//xwiSpjTgTkhZU2aPCed5zRdzufLnLPN86yqIhyHQIxpmi6XC8nzpHqa8tfLPLkhjsRhOeq5KesVXHYiVNSaWTkOXfTjcgBBWOx6X8+ubvBJ60lg6xa3QS+vp0Xui3sIHBkNUZkZ3xqZ2wldpmmtUF3rt6q/n9mNDVAN2tpCKz/L29CBUiNGppTKAmDdTyUqdjb6NSVAzJwJvESaAcyJAGcwFOwm05xFeBhCVnfDOBxDCEMM7m45u6YYhYKA+HI5uXsIzOSAm6uaIukwBmawwDxPr7OIDMMQQhjGBWAmYljOmUHMATBTaJ7d4ShBQdiI3YRFkkl2V+A8X/7Xn778j7++zM4yPB8CEoIaogSPMmueZ2ciZyprwsWJxReXTqxJ/K6GV5ObsmK53pHoCAuAqbZ7AHV1ZGYxRqx5J7GayMw8juP5fC65knPOx+NxmqYSd1REfBUKzNcMJ5UIqofYOI62RihdZnYlgwozNuK43SGuT1rbvZBQQU5LwViXHFsadXfgmgemViAiYPFcrdqlEvEyRuFpmshRsneUVS4zp5TcjFkClRwSYmvvZfEc1sQkZsslwBseK5hcMdO6UYkIQVQ1mzLL8XiMMeacX88nVxPGOMZDHErCejLXwCz8Mvt///PLGId/+t2n4+Hwr19fovn753cMT5dU3JcFiGSZqLi8hrAEEwKsLHerK6w7JVMiOh6PnmDCcXyWNP/8+jWDlDC5/fmnn18vszEykKfZfOQg2Wq+emZmd3Ja0r0QscTBzZPm5bibaE45zjpIeDoehyHOl4u6RY5weFYij0JiJEaMXNavYNnKqXsqtpVTrVSphFHv0NY6leqo2TzbKuCWgMuH1TTvPuyIqrbQboW0NNwOyjc7yu3nLZ1XMG73iW6Waq10bsQ933LKzRg79LrfLMpbaNty0876c4v/7dyh4dA6qA5jLUqZubj3wHswdqBS870Tzlb6tWB0ZNY1WOTnPKXa1DAMpbUQQiWGRRqEUG8CF1FQ/BpUdXp5MbOSZKL0W3Zp1jtLaziZNUSNp8x8BaaIx5bSvjkjb3y7S/MtZlp+wWZ+W0g67vONdVJXoe3Day/YL52yKJgvuYjK9YeCZKxR77ZCAHtM0eFwO8BdOLccWuV5104t1SCrkwhA6EYtbrvrO7XrhmkdoK2eCymlQjarZWmmS9RxQ0mnK2VHVSQE4qWewxZnoASml/Ocpkugp+f4/PTufT7JdH6VOB4kCvnpciGid8cI5wwlcwAl6LdSmIqLBzm5aZ4lDJFlSV+llsjCyMzBnNXcvUhOTnkOIR6PT3Y6Z89xiCsmr5PYzlR1uW9ns6vWTVPHMnU6Wrndclm3rVxbaBeQW6Lq6KTMSKuPqhi/R+ZVXnVWUDfeDi0dMN2g2sotF7d8XRcGtWbRla0nVzs0Iir2HjXxEVQ1w0QEIFWFZxEhwNwPx0HnSXMWcjJjIEQ5xPE//v43f//b3/2H3/z4w+EYVUMQhieelcM5TxdTNWhSu5ieHTm98kwygnGZc07pp5++TBOen8PT0+GH33z+8cfP794fQiRmW4IWpBQjmcl0ejmfZ1UnohjCNL2yW4wSIjswTZNZPhyGkw9fX7/8fJmUGCLZPWsmiRzE3cm87NVZWeI1VnSLPVmjgrVMXerkZh470YrVNmhn+YbNDe4oGv9yufAam7BIlIUv9GrJtOQRQqhTXK3TNltBJ5C35LSGHbkh15Y76udlpV0ONsKuH2DbbkvQvoTaVyIyghc0r2lVS2QVq2sHh2fMcA6qANEUo5iD4cweaTEcywjNM1IR0LG4PwgxyIJzCDJEcdcoVFIXVCELwCwzc+AgUpILFkQoUbnvTg4uMWWcBC7JhGXMzq/n+S8/5T//dfpypuwHdSR3NTfA2YtLRgRrWfi5Gxwtm9lNhHcKXFwmyk3RTrtQc57WzU09dussHr76JV5lRF0GF8KqcQtoswqt7bTh+Kkp3gQa6rYiWmKoNFBN87aa3+4xtG87DLTmePcJEQF3t2mJboFZ7in4+XJ+enqKMXjOUcIwDDnnl5eXQtyL7iqOzUzMoeTiJPYldiuhoq6q5KssJmrj37QjUlVhDiEMcHcys9PpVFmXec0YWa5/kDsPYXzCGH+6pP/2f//pcpr+6Q+f/vDuHdukBJsv5aKsSMzq6Xymp8OaQ1bdCsEvPqLlbl/O2ZkOh0PKWdXJl0uNBlJiNZuSfjlPl+yzwQgggUu590iuxFKc83NWg8E1SUjZMBIROZMZCG4lepS7GpxJwhCELStMk2bN5r4EuiEHo5zGOzv1G3Ob0lLaFeH3qzWUsNBqNU/bnZQt8bRzt222VQCd3Ove3iHLG/3RNrvl/ds/b9Jj1GJWUtpSbRL3vKX3RnfttOmtBXJrkC0/rB/jKlf3o5m1w9ziqu1i+eT+ILqAKm8a7ZsLr6UK3oWd6YaWyjqQmT98+FCYa57nnLPqslmmdP2WiIiJr04+UluuKWjI3PwGD7vivUfFnRvU29JWe0Dz3yxbif3L2ukFfdN+K+1bKi3u/UWmbWFoiYqZdc81y2/vnLd/fnO838RwCw/t3f7YbaSttitwti1Qc0PMlsQkFmWxUEcRjiHIUOjTzNjJ3C1nqJmZq7n7KTtpcsGcVN1AMQwRNuY5ufuUFZiiSB7jOIbAo06XYrXJcnXTI2EM8jREd4ZQQAhmxOxMLqzqRgQRdWhWMwZYJOZpLiZEiRJfB7uL3hYz3Xzdm7W2nVZTbDlld4K2k9j+Wb/qesEauwG3a8uOuh4rrMcEuf12d1AdVFs6rKh+oHp2e+8wTyIoi96KTzLAptNFmEZBAIQwRP708d3Hjx//82/ffX5+/3EQ0ZRzdjMSU/jXNGf2yXB6vUwvSU+GCWI8v/M0z6fTeZpmTRnAp0/jbz5//t3vfnx+Pr57fxhGIVKHlqs/TCWUHuIQYtZ8mlLK85zMTEQoiARSK4tYU/hLyifNs2MmZGKDOxEI2RTm9aaQkcGWCJAdbgtiu7RVtUgTQXpp6nbT4d50N8QjRJ3dK0vgqEaSdwqi7JS181Wq1cVhfVIJoyXXquBqhdp7JzzLOrD8iDGG9vSjtNI6Ed1jb3d3ukJmpkR8HVIxPkHJfTLyZNlUnSSweRgjD7jqyMAAyHyJNmmUWQBwSfpNbr54BV4HieDMxdPP5/l0vXcrwkQOYfZ5ymAnMEBOTI6scFjyER4uM//15fIv/zr/6af0Zabk45zdXNWt5O52UAAJi0lQ1dky3IlvTtjLAviK/WLP0X6QnhIMpiPEgqo6tkoftA64PQkpE1ZOAsuWagk5UxbJRbvEGAmyhFciMbPmBvuNrVZ3Slo5WyHcmi8dFXavWmrrBril2q3a6IRd+xy33qQldjARffz48XK5/PWvr5Hx6bcfyzEdMy8XCK+n+deMIwBYsGy4Uj3bDO2m2mI+3u7btSJgpTSOVJIyoPhwjuMI8xC4tO8wUnJ3PhxIwiXZeU4TY4zz+/d6jPIEgZiQHDgMwyGEwNnc6WIWQyCilIqDBLnhMk/zPIuImilIhIdxpKyn0+kwxvM8TTlNKWejDEnul4TXSS8Z2QBhR8lu7FBjZvagxaaAuUNVk6kRwOQkYMua3JA9u/uTi5b7giQOVsumOWdzKSlMjdiJEYSYWR6bvRs6xJ3w3221rRRqVWD9v13wtMTTPcTGuLz3qjbSqeq203uN7FoS1IiObalSe1fkdjW3f1572ZxYtvjBrRAArneT7rXciab2+RYD7Sf3mPqbo9sC8wvawQYJ1DjodqNw93L8nlIuB4NXSci7A+EazruVmUQUWcohfLuF342o7Zr2bEH6lm13j7bfXrby3Nf92V/WYFd2ISyYrLeg2yPEtg41Jeed+/sd6rBHkw8A63j5gepp1dbyY7Nn9IDOadmVht+6EtDthqyvSxGRMBTPNSkH1wOJYNWAaqrZTLUElC6KPauTmzgumqak74YhhkFGqDqpu5c4buUqR+QgsCAsgRhsZA7md+P4w7unv075ktRUy1ZmdsvqGjDNxq5wS9nTnF1zweB5SlkXPetMS3IrumIVt3TVDvzB7OxWaKXiI1TvKYuOtHZtmHZCq7Ts7rDca7NtAbeUuf1qy+O7An9Xiz0eflfavlq7qyx3W6XmZOpLVgnQElZf3IYxIGdSFcfTiB8/Pf/x73//+x9//KPYEMYBrnlOKS9pksm+XjIizcm+fL28/HTCzBExuJxO+XQ6pXnOObvi+Xn8+z/8/vPnj58+fxiCDEMgdiIu17rM5HSaUkrTlM5TOp/nl9fz5TJndQohhnENOZMZUR2a05fL5ZTyxUxFMkHBRmCCqVGJULKk8HIAbi7c4/abemq7l9rpjlaGtwRWH5Ywgc2fjPXaUdVKdepLWT1QiG5NCLvdIKt6trzqJOquPCwPy0l4V8fMgjeXWCoz1GRN7aiWnurJXrP3WQ7l2gEzczFCkzIr1N3dTmNmhkh0sIEdJaNgIIIbMfka4QNEcHfV5CVmkUsgymaSc5ABYDgTMa0YUdUMcvfIxedH5mRwBrGTEMicoG6mMx3nZD99vfzpL+mnF7ycw+RDZs6ejJTFLKesyk7CQYSTibuLUsZVJzj2jFS6ZpmsE9maIK2uvZLgbUqQbjuZ1hW8ry43RcYXnM/znFKapklVy0IxhMAUVjezssixHs71h99uSxRoS1TSWlqBtez4uxNVgkMNXLwl0C0htuTUKdROUq9s4CW6WiVRWXH18vIyDMPz83EQGcdxnud5vhQjbNnUIQJo9QRfcrvjCjmV+KjFU6mdqaIKCqr91jphZgbXBCQl7UQ5ZwghkEOEQhBmritINs05k0ONHf7Xi/9//vx6fjn//acjfxg/H0eGqwR1UAhDHC+XizUIrPkn1YyYKcghDsRsBHGEIeZZpzSd5jwpzEnGp8g+ajz96SUlqAEEK0FMCcSrCgc5la0Wd0I2nXNidWZycMljaUbqmjTMSeeUA7E6zGCAwl21eicKiBlBWDee+1sa2KXDSgktFXU10YgjX3cKthV2lfcOt96SZX3Y6oatnuhkKH3Lvtl2fa9+S//bXrYfvvHE6XFf3/xqq/N2cdLNYBViZmZMBHzXIqObxG9ieLcUFdaqzwJVmudq7ZUfZXFyOp3RzO8VgGWXo85Ogce6E9QbzzTsCMAqSdrhEC03zruHbxzj22tuy73Z/AXt3GuiBe5qP6hVVDIIJamCu1AT3dS93IWl+2OkO1cf79Xfyord+p3Y6dQT0U6Qs119d/2Tbh6uYFyX36VmCWI0DAF5NoOtii+vobmXMZi5GgB2EBGDlImNFJZmvcxpPsQgTCIhBJi7a4mxnnO+pBlADMLMTMIGzmCi5yi/eX73P356/XqZNXvZoVe3SXPyUGhf0zwl1zlBL46c3KYpqaqhhMGu5KQdElrNjkYa74r37qtdUdw+vzVOeuu2VGtdkd9CKtWVqW2tA2a3bDVXW2p3W05vh7PVgPfg3FJdp0HuKa86RmouTBIc7AImWCAwWXSHqzjej/j9Dx/+8Q+/+7sfP3/88HT4+ldKph5KMK2cbJpytnROmYTP8/RySklZQsw8ZqOvX386ny8h8OFwCEy//eHzjz/+5uk4BnEJANQVTlZckdyRjV/P6eX0Ok3zZUrTnDIgQyQWRFGCmTuJM6tjmtJLShezRKTkSnAiKzKJrDBIydzCRF5WIXt5kB4I0ro5WP3IquLoMN/ivCWhonyIvOqddiKqnVlPaOr5VndKURoqUfrrGUZtqgJWu+766obZMkXlFABXaNrjxZYh20ZbPiGisvgGzNgJWGIWlm4IxSXIiLILNAN+mZIIicgYcgiR3OHldqZi+dBYihclgUhczLHa3CHnbAqFqzrDRBigMEResrWRuyfTZM6eicg5AMIiBHHiQnFfFV+/pj/95fSXL3qaY+bBDNmhxCzOlIv0NDWJPHAQYivYL0Eli0fwJhZ2QRejp6yKyZpuofuwntBVbOP2zkYZfvXYNLMStLOuM4vIKzEPcs7CtHZrZiZyIyg7MXFz+HZLWC3d0O25cffJluC8iUHX0g9tfN/bIWMrgsmEbn3i13QL5bZPTtOsy226lKavX78SheIH5oA7+ZI3cyF9s3UihKsrTkvSBQMEzPNcEVLZT1jKFaJ1CFxUegle70v07ZKidxEil9PXw3ikYSSSc8r/8tPX0+n0rxEh/N3hcHh6OkqgTLCcTW0mK00BJiEafJ6TuYU4PodgBJHIIrPm8/kyzZO7v7z8fJmSEcXhicIY+DBdEs42J8sGdYaqASTMDGHTLE4OAkiEF3LW7KfzFAgDszABAW7qUOeUbUo+Jz8MxGEgh1suZykrkTjgzOAt9d8hj06NbaXNrjLbPrS9ABv3iPMeSLXHlgHv0XYHw1ZCtg+3NbfPd4FvAbtXpx1792PbxbapOl7GvmTY1n8ANprMHK2acV9Yz4C3LAvJb5cRv6K0wnYztKu+LNzauHyvosZ5kXiyhCC4weE6OrotAHJK3jizVMHiq1XxmMzeMq7d39+coK5sF0h/q7PBb/bbIodW5/xWX6AZTvf8F5e3oLrtomXYb3bd6bib7qifa3cnoMTcjjEurivMwzDEKFkVUFebcy5qd14XhOVGzHLB20FEBriMEHe3Oad5nlOKzoMwD8NADnf1rDnnOadRxcSUiNwGAsxhzqSj8Mfj8cPh8NPpomZCJCwGOMGESMWyXpJNydQSqVmeTppS4++rbkywuzkpH6FrF7d0G8+2/XDLd9tvdwXg9kdtp61Z+aI90d1qpXtgf1O831MElWzuDepBs10X2BMvnSCqUyCF/LxYSMoGIRuIfEqfDvybD+/+8Pn9H3748MP756fA4fIayJJq1lktJMU052lKU0pZZzVLKWVjiiFzfJ3z63mav56gPgzD89P7d8/j73784dPHZ5Adh2EYgwjlnOc5l3j0pjjP+fUyfznN8zxndXVAgscwDEcDzdnUDCQAzppfk89qJW6CEoHJyp6d2XomQIRlCwZOWwG3i+3H6L0nutcJlbLPvumCSnTASnj3GvfbvQNvjkPsNkT/diydtCx+tn5rz5dvy7JzdaNbPqESVKYOr4JCzSZNx71XJ0Yr6RbN3cmcY4DA3Ut60nJUQETqgQBygeolO50nuDG5MAeWWJa8nlnAAph7ic9JxAwPwk5UAoAIk5KzuJNmaEmXCTHVciQUQiASV1O1rM4cinVPELDAKLtp9r98TX/96fVff55ezzJjdIkOqGVmds4o0RSZ3VTcycHMEcHdTQFVww411LmxchZ5a4s0tlFLH2gR3s66r5dHK/eaWfGHLH8Wr8gyx+U0TETO53M5TSoMYGbA9bbVLt23xN1cZdQW4E6AbgmxtLrtqKOoWlon1fpkl75LyTmXez5YzqKXuz3Px/H19XWI8uPvfvfx48fiPTsMwyVZOa8zdzMELjlKOBSXXbqqmVLa3utDXXeAWiOpgKxmQNkiYAN7zT9W9CMT05I4orgQH0g9n7MZwjEOB2aePP98vvzzX77CNef823fHT++OLOOss81+FE05sVuxsFWdCCJCIeSczT1rPk+Xl5eXaZ6ZmQTDcQjjEcPzxcOXk359ufyvf/2SlcylnAm6mcCEiMhUASZnd4eRg6CG2TMuOgojhMACuDIcTE5Jfc6akrqTSCiR9tWQtd2OKuZdJhiw74vfGa8tC1QivEcDW0LqKm+/bUXZfdl9d+3kGwvv3uet7N7W6ST7bmtbSFqE7Iqae4hyv4Y73vDp/uKha6gVU/fa2YoyWndSOsiZeTdhD9BfICS/AeUBYt9YykVrvb1D6FdTcvmxCj2/cc3wklwNzJxuLz9foWrolpZ93OzuMUZrZqx1D/FGgW6nezv2x+W77Jg3NvWYB7+7zdsxthqh5crHHEr8iL8eEHlXtsvdLdq/OaJ2UK3KeNA7NWsbum480TAMwzCU1CbljLqkHPaUFpcr3GBs3bMoncLL/+5GURhuNCOfpymlwYcIoiFEV5tn5JzdXFNYlB2h5DIK7OTKoKMMn47h09PTn76+TvnCCO6aUjrn+ajxCEmKpMjmrkxOySylTBSo+F1hSVQAlDXhlSk6Ed0i+THS7s1Iq6k7gdYpkbYmbim8g61rqu5NVMnW9fhGJdVJ/k6YPya2t7BhJ3txQ11XeHCL4R3RbQ5zJ4OD1NhxGOU5xh9/+/n3n97//Q8fPh7j6DlaHlIWQhg4qSXNl5SmhMtk8yV5dk2ec1I3jqJDfNH859PXv768/JgwjsPx8PT8/Pz58/tPnz8cjoPrzGLCxsQAVFVnnZKa2b9+vZzO+ZJcjSmwDAOBjZjGUdVUU4I4OCc/TX6ekdxKLB1ncnfwcjZlRGRwWu7kk8OWfC43Fh32aLUtdV+AmhsH7jdBENuyzjIT9Scou4eT3Zxi1RGdvKqTWO86tlIF6/lePb2okJdo2Bvwdk7Cy/+hvdjdSa6qStuBmfkSoLWcGuF6Llkwu+SGQyYiJlbiQGIwgOacCQbPYsaO90/HEALc4STCQWCWAdiSS76AsSyHAJgqkQgXN2goQ2iJTuYBWO+KFW3NzA4BYCDPPuU0Tzml9NPP+vKql8RGB6ID2QB2RjIQHFknd4osCMwgT4kDE9HAZKlMlV572ZvO1vcXGw7cKr9av22nEys10Ovi9ZSSrffyy3wvsXauc3dtp7pMbLuuE9cewdUj8k5+dSeHLY36WuoF3G9K+U68tiTU4dTcKh+amTuVIHWn16+fPn36z//bf/rxhx/++f/+7//8z/9slg+Hw5TnsiDUxdGLSRhYDpCLN3kpZWeaKLbjKr9LBBper262qPPGpHN4jesTVpKo52blltG7gc+zTnohinx4jnH0NKWc/9fPL56m/4e7/1pyZEkSBUElZu4gEZHssCJdTWR2dvZp//8TVlZkP2BuyzS71VV1aGZEAHA3M1XdB3U3GNyByMhT1XfujMmRPAjA3aiacjI8P32833774d1+u5sGygMACKhJMXH3ADRCFctFh5xyzlmKeVQq4b7bZ7WCcRR9Phx/ehx/+vnxl18+iZERMDEQihQid9TXuXyoqZqXzTAwAUtFQRFMI/EUi01ISJb0NKbH52MgjAikRSXnnEXOOwloWA25r1YSX57zhUQB14hZ/X5B4VpAWhO/zzKC0AAhXN6F1zy/WMItunL1lbYtbEfrla7neX2GlyO0aGH9sLky89qsrn6J1ySZSizq1tXviUi0oS+vMBJeZbw+yx5dnWptdtkqLsRGpep00MxcGmx8cpbTq5PEZiCYGXRmBjjzDXXmVSCky4zTVwH+Netdn+wXtav2wF+xz7eaIxa4LTJVrA5NltH1M/5q7aR9pp3tZzdhQW5q/3gD/hcdngkoXumqfrO+ZTSbJRZz9v13yu5R6KpqJigzHwlTgjo9w5t6j76zXuPBY/4ErOYUyF2kwDFEIgpIhcxqKGFJoe8VdOaCAcy6wLs+vNnvt/HT4zCAlmJ6GofD8bgJFIzEi29ZMCgIiMDEkTkwAWIxc4HQiM4ZBW+xAV8Eq2tkvmCT4BKKcKVBewGe1+yQP18d9tbQtRhlvdKr6/0sdF2lI3gp2a5B69ZOLsBswbpAo5GZHlAxM1JAAEbcxfDh4eGr+7t/+u037/ebd5vI+SjHE6XEASLToZRjSsfRkoRUeDhJPgkWQNE0iliR3o5JfszHn4bDAfR33fb9+3cf3t2/edi/fbvfbHpEJUYwyVnNLKWcBhnGPI4p53xKoBgpGnfYbTf9ZkNEWeVwGoqZACvRmORwGJ6fh+NYMgUF85pshqg4UUBf4ow2FGbVEiLBlyO6qi5cbP6Lp8C1fvlVdx64VKEuCHpLthY8SUtZFtCiM+O6UGq0o9ulTNhKeYgYqk8qXILRYk6XOHQOhgaZ84mrZ/s8z9gT2xOZkddt8HImpWhSGRAIdBPDJkQfLsbYRcoZKAQAQwIiMDM1VZVSNOcMigiRIniMHAIj0abf+GQmv14jmHaKAZxTkZJ1SOl0HFMqxyEkReKesRfrVNkQuSMRKWUoWRA0MiMBipZSMHp1e+x02itAZKSspd3l2qpv54LY1A+t7GFmpMbMDqeIkwOxzek5zAwNVM3NPR0Hdxl17G9m7nOic2vPakH8FrC7UDDoWRG+BCNsmHW81HciopvHWrX3Ggm2f65dlcwMQNu6F+efALz2hpM0M+vi5v7+frfbffW//i/v37/fbfv//m//9q//8i+bPvb99i9/+X5798Czy6iq4uyCNgXRzgJh5VO5uZPMTExm5m7orSjou2rTPkz5XXUuCeAOP2aGpjQpcU1AUVFl7Lnn0I9Az8/PRQ6BcUeQlY6pQB6Oh+fT6fTu4c3d3d3d3YOHgwJASklVY4yxYzMbx7GIpJRO48DMd3d33EUR0ePjKY/Pw/jxOf/4nD+dZDiJAakZIZEHYYMRA6KhKTMbgoGpioLS7OipqgqYS1FSRKPAhJ7mHErOT09PKKVniAiBSaSoXhX9bga2Xb0LcHl3rgLPLbitgL5Wp62HhktyuPjQ/uuRpYsh1hNb/LRYVHsRWnr8Ah2aMOoNNdO6VYy9mD/Rdc3l1S/Xi7o1sTqTVphpd3IudXNRRffW5JdD/M3EkHNrNaAwU1wz0zkotyWo7TMI5xTkZsZTyl9tHX4qnNiZu5r0euM4ahMhVgtalNkD8OqeXJLXlzZtcXfgxql9trVg2X545ZG9fojFKXiSV2hYed+3Rbr/2iodrB0uVl137OXdWFyTq7io/eYW9K6RxvrdyxeuzgSdjjuSn1WTSDQHtePEVKmqmAFAiFSRKyISAwMiYjEiBFRUUZctSynBVIkRMYQQJJiImaSUBoR+G4KqGKigaAYzJGGOd7t9HwMjAqIKeLrdIaetsXFEiEDqNwA5BLTAgYIiipMB55HQrgPwLeR2q7UP4zUBrN1MB6RzZsEGsF8YtFpasBG9cJV2pZ38C/j5lbdmvTmLb9b9tJNZTGzxbsU5i+mtr2E7EBF5PGoADERv7vbffvXhd19/iGRsajlhlmjGhFSyJPlx/DRmzRYUN0lCzpBS4Wwgmg7DqHkc8RHLT3l8ZrANb2H3/v1XX314s9uG+4dNx6qSADUEdqvGOI7jKMOQjsdxGIanHFyhhgwRqNtuQhej2C+Ph1RMkQ0wFX0+jk+nsRSVXj37uto5QZq2Ovy5EDdcKp0/S/i8VWe9dj8XYHb1RUSEOSX+1fNtL8X6HCcOc/bHXDgwe8N5Re0Ry1zZ9YVraGZd19W0XtZoVUKZEAdFYjXP/VUYyIuJl1IIESm4JxgRhTBdoQI6x0yZGQGAlOlGcQjOi5MR4ifIKAgGVEogCh2FJJQsQEE1u+tDgCI6lGSMFpRcQEolq6qhFpXjqCGE3W43iB0PQ6SoYkXymze7HSAgEVJRHUUNCGOP/fY52Vi4lFAspEzPh3I4jimVZ/hNItWoBmSGTNLngio8lDBKMIbYY7DMOZsUM+Z3OWe1UgyLjAAU2EwTWak15RGRKEwuy0wioqLM3HFEREfTYRZ4uAbtAACisM3ur4CAOivvHNGqKSEhEzApwljykEtKoqp9v/N4M0Q0IzMCxUDR3GAVgkhhpgLgVY7MTFWqPGaTTxc6PMzgcsGdIZyZzplyT3F3TtUA0DtvlQ21fGV7i2onMfaXeM05JDIDMyBCZnRxFwBCCIgFEbu+e/PNh+++++79+/ellKenJwD4zz/9xy+//PL09JRLHkF6sPuvv85mwihaRExNEU2LM4EKMGV/AQQx9v6LipscKz41My3FpbJ6V6ebJhrCZt6Kybm33kmveOG+QACQSxmyDHhHRr3FwLC1VCRTMYpx1PhTCU9hFwT+4yeJj0/v39Lbt93/Y8dcIDLHsN11wQieh+fD4fk0HAAgdN3DV19x3xexT8fD6TT+rL9/Hg6fnp6fDuMwluc0PqXTUURADTAnURUEk+xbTR5TiUiBCN0SKGagSP1oOhQDkEjMipgR0ZAUiZ8NH08lcuhDjDEy7bJmJBK2ZHksuUgxECIImoozx8A2VelxH2yzSU0zxV3iJGBkaJiwdsOvIlxHjACGCHTOGFaz6d5kGur5rgnkBX7HJdqdEbQiTqqwtl+EszatxeAtFW8xcv2pHb1daaU3/mfRs4XfZl2bmak224Lg1M5DumnFpiCiqhIgrPazpRztzl9o95qFrC2o87mccUW7hC6EWVVnAOcszVWXCQDigoF7eMCSNMKKQWx/arb6fCvrrz60I6hSSoyROapmc78isFQcys2Rt9J5m903AUdwWyGgEzUABCSMIU62nSSqeVbLRjGJYcoFPdl/VFVgHBNWvxtF8thEQ6TrbOICNuqXNSakbjU1BYquQtTioBcbeAYif0uvODYhoNfLWtwdRCRcllPCRsM4TQ8ADIhm4odmYIBes9dUxdQ8jMma2k5Tn+pU5yylq0xlrurcJtWUP3+DM7+Yc2vK1hVD5t3a1JPbBGeAWOqe6lm0iiRoUYoeGIGQwIoZEpgxInK1SKsqqjCiMwgSu5JVBYgCAqEZqSAoq5okROm6Lkxepmpmb+/EpCCUd+/3v/3wgbUc0gHDpuuiQhIWRStgKgFzp9TfnUoMZJqTGQZQMqEiWDYdvdvdffx0PA4ZicZCH0fqYdNtKIBGPQR5NhvMgDf3tv3u+4R0OuTxxxCIAVMeIIAQUjm7Xrd4tYXJCofYlCle7tuS8cDKtLQ/tciqVeA6Hm4FxVvTgAZbVgiBS5xT4a1VYdSB4MaFvfpN62N19bFW4b52A6nf1JLLLbFoKVe7TFwJHvWB3N+B5nJ4etPhP/z2u9999ebdht/snllSMJMCIGZF4JQtFRMIEkRElAYrB7PHbEfCtAkfnw8pWM7CWUBLR/BPm+3bh/u/f7i7v7N9n3dBccwQqSNFAxIbT+Onp9PT82koWASOAw4n7k6YrIQthRjHMj49f9xv+2AMT596iYbd0cLxVH7M8Im3tgmaT4SEiBHRSgWtcAYhJABPpw4AoFIh8+wqBZfJh6aTnQgWKEyOgTAhSK1GSCIiPmdQMzNCnoZFqjDjrnxE5MURAMDhCIDMzlnxsUn+74alBUXDxh7TSomVzLXw4E96IUecayxV8PBQLK994GP5EFNFO4bzRrT74nUqW5heF3Jdk+HFNRCYqCsiAqCqicg46inAhi0oYxBiIzRDU8sRwQxVtaggWgghBMhjUSlkFAL3MZqAiEoehQMoZANRNGCFABpAmDmSYBE7jnI8leOpjElViSIF0gKkYCpFJOcygCixxm0gMSEREBBUI7XQoRZJAMJkTJBzRiJEJ8xn9+LF3SailrNZuB1Ci+z4wk1OZw9SbeJHsWl+kH3fu0lKL5utWgXKOpP2gBYIAvGmsn6e5JKs1um1kLNA3As4uYSQ6RVP+05EXReIaLvddl233W6//fD+3bt3Dw8Pqvr4+Pjf/+OPP/7446dPn7xwp6tDQgh9t9n0m67rcBYwqqTqY8QYzaaUrUSkPKEAV+EvjsbVtz7hWjV4AnLI0CBrm7Oo+wNzEWE+b7V4fqP2OgCJpEwiIrkQgc1pgR6fD7gZHu729w/7DYfnwwksg6kZht3DbrcLsT+Nw88/Pz8Pg4ddf//8/ZjTcSjDmIexjEPOOU+RUOh82GQzv8orWePYPh9WdaZFM+NgiMhIZX6mivpEJGAyaxOYOZhhFgQ0ndLyVzBYj96ii1vfvL69QIkXBBJW6v92XFtZIGtvtUF76w1a8/gteenqDNdtgUzMDBvIeWUP1mx7s//X26LPF55ccCSv6ae971fnb5dSyuIZvOTJFo+tuZ/60wLPtMeNt1vtzRqJa9Nv60DtA+M4TiJgjC16d0WSy4rVasrMm83G5gQ2rWr2lgTzAjz/T9KmJV+LdG3PdLHzX1puckGhFl/+l7YFyMEKSl0Z4SL6wvJwfstVIEAA5NoqD2rPSZBsciMGcwsbIRpZiKAKUNTMALGLzEyMKiSmKmVUxD5u3jzsd7v9dmPH4zGAffvVu2/evMnH5+FxEloCd9ijmcGQEMgQDEEAAhAwmQjMPr2AGgOplZxHRCamIuU4DKch3YXIBES03e12b+5DiAW7g3WchkAQGBUMTAISgarcPN1WcLImpg4vhZmrHxaPLZ6vD1feqb3Ci67a59fIFi49UNpzb5mrdla31nu1raexxnvYMP3tiwtMXhUQtsoVf3XcuufLCZQUQN7c7X/79v53b99/s91syfpiZEwiIkWK2ZA1iWTTYgPzqDAWGwAORR5P6eOQhiynXLTkQLDddPv97r7v3j/s3z483LPtutAzBUae1ami9nx4fj4Nnx6Px1MaC6ViY9KciwgJWKDQxQhsIHZ8OuUhJVFCSyIfh/Q8FACIHIoCXnqW3VrmYsPXB7fwsTx/v8pEsziINfW5NejVVutergnrAsDWf9YJaBOvAc0tAwCzsyLvKsRio3dAxBDwjMiswdaO42hWwtdlX9Vwt9d7MWQIoSiICqILtVN2kCyWEgwDdMZxg0pB0cBjUhkBoIgz5RbMQgiBENRrpUIMBExaiqqcsgEyIiJHZAYMBlw0jomOgx1OdhxhGGwYSYUNY0lZRARMQbOUrKckJy3SBeLAFIKJFVWzyBBANcB4yicpxVAZDAgB0MAIWRzzgFN1dFUl0lzyDs4KpHZbFNtAtikge31jF6izdiIiLnUgoptwbY5Lma2FC4vKss5k7Xxxi5aAAtw+2aLLNUZe3ElrkrW0YyGiGVx+M93D/X7vYN33ses6T8O93W7jpvv49PjHP//p06dPwzBMi4oBiELgWAGdWREEzMy8pmcXN25BJaIYvT4huoUwhKAyxW+ELuIlT1PVLe20/S7YbDdwRtCPw8M/0Gt+zAJnfauMOjkFoaJOZawEzFIiwBJKmHz8SinldDplPD7c3T88DZtIZMqkXQwh0N3d3XjCcjg9Ph0Ow5BUhiE9Hw6PQypFU5YhpSFpypKKGoCZq7bZDYN+Dossju0J6pwZFYAUjSpP7HGGSGhmqAIlIZJNEYxezEXAjBCBmJkVRMzOigyaYfsC0mwlCcAlkv1SWrt+fu7N7PJGvEAt4Bqiv05HL6/q+n4trtit5dyiTy8v7bPt5TUuOm+ROTQiU3v3P9vhgkzWadjqrCujZg1f2HZydRVXKdkL37Q2tAWlXCyzXSzMjH69wjUEYNE8HUjNxWUXcc5nW7EjnBaZWBO24RN5YT/XrS6qPvb6s170c33cG8+voWL68/LoF9f5V4BuO6Jd02j86j6/aK9uOfe2kNyeQovWLu7LlLePQc88GXvmLUUDBTUzKJqJyACP+RhCiNwZgimQIQCpaS5DiNZ3YbPZbLfbN3f3+/39ZrP78ac/SS7b3eb+/n673WJJA/Hx8Lzr+kCMzEzBIOWSbSQzG7c9s0VgIFDxLHrAzF1HBAZohICBJeUhjY+nwzdvv2bGyNJz2PfdZrM5JD0cJTBtYuiZcsmkbGiEqHoTgFqeoZ6srUSyBdatYNaey9X+28LiON/iq9O4BQNXuZr2rZafWT+zaAu4vTro4t2rKPQWSqxp9trHcMVAXv2m7XZrhc2+e7j/p6++/m5/dwcWc8JTYrNSxP/TbCJYClqxx21/SMdPh/E4lpPYKZckVlSZuY9hv4nv9ts3++3Dtnuz397tt+8odx330WLAyIigWU1ED2N5fB4eD8OYIBUZk+YiihQ7QrI+hg1HA5Ocj+P4eHgeFQz1KQ0/H8ZDUuO+I8SiJdB6qxc3vQWh1xDi9hm6VBQunlmTzvVJwSUg3TrTlnwsIH9NyNoJOK1pFQSLdbWguzAI1fnQnC9j6mVCf2jg+TfOLoXk09FV7pN2+PYM2s+qisQAnoiF3fVG1NPCUsk6pjyQ9bETg4AkasgkCKZaTA3AVCUXUNvEjn2CqqWkEDqKBGCPA4cudl3P3ClSEUzFSsFTwmHE4wlTIhFSg2JoQmK5mBiqsQFm4wKdEotnFAUAhhCB2bAgClmQP20wZypZhEw4sjKOSW2WDCfH5XOhqim9ZAtGdMld1U1cH/B1NqXZahdLPPzAS014jIpro6/T7AaU1zhoiQfR/1nmikRET3xdcVD1joNLfGQzwKzXWK9Ps64LRYPPUFXHcUwpnU6np8eP/r37DJvZ09PTp0+Ht2/vKivmPbiE5rKxmfUd1hovRMHTtDb20snoVzU059NBxClpLWJjIZz2H6kuvNoDq3m27cQ8b42hGqoqoRIRc/TQezE1Uy0ITBwQkIvomJOgPeYDPx67yLsu7PoudiUQ8mMS05TKWLIgAfIw5MOxMGlK6TiM45hTNjEwJCLKUqaqmJMQqDiJ4ktKbAjTTwgzkPg6zczEJUsAEFNQJgIwAjRRBDIAzypgDh4EzIRiNZy6gkQLd2aGqG5gX1yBNcX9FW19a9YPLICnfm6fbqnI8pqs5lyvW6uZfnlu61GguaQTDllxFV+0Py1sv+bJlg7hlwsb7W7U/bTbgoFdyoqIaHoFAK6eF66o73ncGQtdXeOtmfv/4BIJpzRWB4H5J8LJ48BUtUhpT3zSABJ2cVLb+bjjOOLsyaMmBjoTjpfCX9frvXqgf/2V+WxbgmWFltkPeQHGrdjfEiC+kvv9/Na6q8XV+yshs/1mcfcX2wstOK2W366rpcuwOgvDqAYIyDapxFy7GKbceEIIasUEUYEI9xsMgQmhFNW5ZyZ9eLvfbcL+bnN3t3v39u3d/gEAUir/9u8H1bLd9vvNNnaB93vL6WMaT+O4jR0jGU3xiiKWRY47JvSiuUhICF5OCVFsu4m7bRwEBBQIC+BhTCnLttvELqoOqWRIWIqplE3XbSP2jMckKkpkDFQMcFX4e33KV28urHBUi4ug0bksIO3W/rfAc+VcrpmSFjB2a/IvILTXt8Uq1jNc/AqXUFotDdC4peCswGpXfXUhdWM3mCznDd7fEYbkVQWznk4xxrHkUy7FUJCKci6QRf9kcjzo0yEdhySmiBwjb/t+v4m7QG92m3e7zV0fdoH3PW8ZH3oiUoSCogKkqqloKnLKckx6TJaLpWynlEQpRurvOkbadn0EymMWMUmWEgxIR9NPeXyUlIxYhYGCQquuq6QHLo/ygtu5xhtYI24t8EBrUIEVbKzxM8J1fNXu/2IO7TRaVLkA4DqKP7PQjKyB3/9tudOWTi3KUdRXwtzLtCOM5DauqVwbAEwszsXAeBl9u15S9YUds/vKT1uiCm57jqEvAmnUAaWjHAnJAiGLFhMFFQJ3WGU0RTUCJmAOwczGVBwTmjYAAQAASURBVNQQQzSzE2w62Bh0IJyLjVmkBIMoEIdShlFSYbNOjHJWEaVOApKyIYEGDIQYGJEDoIlZIQAAgyIYDIIRy9PDw67bPHw8Dj9/eswGBWn0O2kI4P+pAU3h8qpA2O5GC3PTZ0IzIC+p15xH25avXAotHq7m5kGbE65AgwQXyHTdKkJc4kS9DLE4D3rB+MLq5rTTbp9sUZiZQROXCACtj+ps4RQX6lzuur+/r97VRVRVu3771debcRwNgAjNJutWESOxzWZT86/iLJW5bZA5VnF6znl01u7XVVMtNN/8WffKYMq2ugiS9ItXa2pNE/bsuOYSIBgAMpoRqAJ6BU9VTcECImqRnLNtNyrFksYEx0J9ER40EIhIFlNVQE6ShjEXFea4jzZkGZOOAooISGZo0OARs7OYc80j2Mw8taJZfYAAUD3hHRIYFAM1VSKbEoeQAJKhIqihmAGBAtQI2HlbaKo/A9WfYaWB+xvVGYMVnp0/fhnlXlyZ+uctzTQiVs9kmAFbP5fn5mo/dikdTePahQvTAuO/3Nbk6uVnrn5fEcviw+Kxl/u3SzZugdmu4pOXp7deV5VMzKaQ7FszWZOt2o+u8KEHGDuxczSLiCGEYRgqlcXZg8CVRBcdzjnAXr9ji9muv19vl305h3prY/F1FssFcWnBYzH/X3GvF8vBy6p0X3qzFtNeDAQ3RMHFTOzyscXtgBV+qGeE1iEogCkiGXiYKgFGYpGMSEhmwkIOVzxky2UkAgTuOXRdt9/vtpvw8Ga/23ddAFXVMpyOUpIcDoc8Dnd3d28f3njiohDCdrt99sBdMCZEJooBiogaiAxJAmsItiGvbq8IYpDTMO778P7h7sen4zFlRDCEYcw/fvzUd+HNboemYzpqyoDc931XqAsUQNhkihkHILydVex2W1zDdp9bcFozFfUVa0wr7bvWKJLWPNUCHhY46oUL9cq79gLkr3vQVSLKRfTBYgnaZLNfsVhXRO4FZTm/KCMaoCZT0TGXU9ac8yBS6FD0mGEASQCj6SmXlOVfSlJV0ACRQQqhxhj3fXy72zxs44f99t222zD0CNvIfQDGoqIqJEQFMBUdcknFnof86Xk8nFJWLKJZFIiAQ+hjx2EbexMdxzLmMQskCEe1n9PwKeXEiEiWBcQiRW1Cq9ao9SpiXF/e9uElnX3F2b2mXaXFiDhp1S59Vl3+Wsyzwv/M0056EJzrYC/6b7el3Zn1drXLD+QijWcLITS0Oc/HlZW0Q+INUbV9UVUdeRMFRILJRkFghMhmlkWHBD3TGCEECoRkSUxMtXOLooCJZ7citRC5z2JDGZMBmWUpT+UhQgwawLAIAnQcegrb46fhOOjTWFQACU2ZuDNSguTxor69CoCBiajjmIZBkpKRaJFREDnG7v2evv727ZsPX/38dPznf9MfH59ECqHNNbkJJqBBg2U4fsOsXJHdrSnUtL6oVTNxJi3NnkMjk0OT+m9hEUZEfZFO3wJuvGT7EFn1XLu5lRgraLbzvAondo1Rrgus9VJqqKv/NI6567oY3RDnubkRkbpuM2UTNfMCjKWoWd5sO5fffJ7e7aSUDRERc86lFLA5xFYuaHll+BbbXk8EaWJQ7FIOh9nXtC7c/yxZAcBA1K3iRACqLq+qqZZsk1O0q2qPisybEFEInqQ8Po+RvWjV5nl4HrPEjo26QWDMRoSSTyWrmDuRsBlKlibVrdRQ6PlYz4gPsbndhKhg5ne0AgkAopqhZz8XMzQGUAISIzMglCn/AoKZzKqlNtcloueuOwuEc7soQ4CNnPDrmMirn9v+4TYxOD+w+ublbqtZFVZ3/Oq7uNL5XR1oTZnW/VyZzJe3q9zYrV/rh1sHtGY+8MZWV7TQ8jTwinW9jLjWa2kp4poK1jYl+TgXSa10ejL0ua7H9aRVXeXuA+3zNmuL6kD+p/svwAoCPwvoi/UuMPniy/8B7SrtaBfVnv7iw+vnabZUGdRB7RqT9ytWcXUs+ByALTxf6ulfZcgQ0dRjBhU9pBuACAICWAbNasZgYIYiRMQGdxsIAbeb+81ma4qSJAToI20Cvr3b9ptweHo+HA7Hp3w8Dp8+PhJv3r99eHh4MJBxzIqYcnb+JhUBQzVDCsTF471PSQNLZEWGAEZogFmKyZj2m/jVw13KJU+1lCCl/PH5eH9///b+LsSNqYkn7iCCoqrFtDhLq2IWEDBeRQ4LTqbFbAtuof1Q32o7rHzz4pkXjqxtC6Ba/LRgt9rJ36IvtwZdbMILc7u6G3Uaa+56DauVuVLVRUIEuHFl6jcZIAYQxtGkxIh9l9RKxGeRY4GnXI6iJ5BTkWNKYy7PHNCd4LQwaMd4tw3v9/23b+4e+vBu393F0IGyaUQLUMbxVEoRAwHORkOWY5Ki9Hg4HIcyjMXlGmTuun6z6ThYF7jrunJKWS0lPJkeCzyrPWU9KQKFQCRF0EoAgssNhBt45pVkdH3K7dEvYPU6COmVabzAdbS0qb5YXdIqVODMYbahWC3YtxbjtvOadBQua4AvkjnVCZwthP6WgxbOZcXOE5rv4VU25QVw93SCiJPVwswQmBhFLbt+zvRIpe+tV0ZmtBHUGAgAQUlEUBmRmfcKlKwbRZ5SxkABuzHD08icMAS2SUojzoSkPz8OKWtWNkMUExMAREIoKmBFNaMlUEFmjkzBADQjmjEBoAmMXezevNn/v377u2673dz1fcTT8U2RpIexCAlQQdSJwWXnbk2vXGkzw2uIrAWsxaksNraeIjU1rBZvLTps2ZEF9lwd0AXBngvgnp9vVRfNEmg2RF94h7Yw2nbSAvca7SKiC3Uu47lA6DOJfZdzHo8HcIAm9Gxvh+MhxhhjZGYgDCHW9O7tbF1iRORxHBFlsUBVRTwr5+okWz9Sm+XY6VI16WSqBIhzvUdrcswAQCllLNNnVBRSAERDNUTvExhEixiSAjNiyJ73DokMctaUUgyhB90FOgkehhIKdX2wsDHBsSiTGhhQAABRELViKmBA6MU8/Xa7KzxUDW51E51ak1JSrXp1IaJMzwLapCgyVTTDyW2dDMGjxRWNAKQUmJVe03U/KzvXqv1pCq+h5a9vlyRz+maNnT/bXib8DQjh4tK1Wo+rxOAFwrB4BpeqmVcIQjfevdrWl/HqZK4yGevZricDN6h1a1a9mMztPls0Untr4vEunBdapdUC/7T/tqiViIyWdUenikdNgkEAKKVsNps6infupLcWJq3905zere7ba6AdG+L7wrb/6nZzDn+b7gFuqOdfmE9lLVpa00JO2/OXTmZBdj/bKpK/9YpdMmSLu3OeOSKgp09VAEMoBEQYEIxRmKDrui4wEfV9v9tsf//390Rh0++Iuu//8sO//9u/PX86Seoiy5u7Pmx4t+m6QCrwC/xyeHziPt7v73bb3hNJu2B59/Dm8ZefoeTpihFyDGJFVU+nRF6EqAtGEKfasbIhRg7wJhgShqdfng9DUQU4Zf358bmL/GHfbQMECCaWwY4pH8chlUzEQFGlFEWFKQfhLVz3MjAvAGBx9O3FrCi3vrgI063/Xg1dWZzgYp5rTLhGhq/Bq+24V0G6nWc7aJ3bC+C3IGfWcPywuiyL/fTtcq5mROAuPJb85+cn3VNQKCWL2nE8HbM8pXQSGc2KwChF1PrYgwmbhUBbxre77tt3918/7D7sN9sAW8aOpQNAVZU8DDLkQxHLagUoFTwmGbKp0fMgIiYAqhAINl3YbmMfmRVQJaV0SsPjKT2n/KT2aUyfzEYjMQBFBGAmAMxSakJRWKGO9VlcPQ5vCwGp7uoL8PMycqsH2iKEBfzDlG50KVLVA6ruJ9iYweFSmljAbTsuANQBKzit70ULe2E2tXtubjOvve5lry/u58XyWvq66PHqnVeDWvPAlao5i5IpiKIRxE2nfQdADMNoCkAUgM0YgUPchLjl/r4UGYWexvQpxUDbDe5OEI6yISPSqe5FzimlYy4aQkSIGBmNc5YxDVIMke6DGZgqKoJhQI6Buy5ETSPoiZH2HSuloHJ3D9991/9v//ibnz9+PA4fe4xfPWyfnrdDKjmngoSAxQvfrwEOAFdEpcJi3Vi7jPprb2zdbZrDPb25sGFm7hiJs5Z6jTi8f5eyql6hCjAVvS7O0RCA0NBT+/t/ntd3XotHgdqURi+E4F6UXjwSPEl39VGeIcUMbcqzs+DGph3wGL8Y42az6fs+zw2S+l0NITBFRE9vYrstVmdOvyGBQwiBSKozs8zNTMdxFJmuGc55zFXV1N3AyO3G1RBaz80DDp1TNbNajx1m3T8AVHcd/3Jyt5729lxGBhG7MHmTTpG6WlJKRUQLoCgZUB9FcimJvBaNi69E8nw0oL7fjqkMj0+G6B6wSlwLPEiR6tXmEIB4NkMjIiLVPHCGesl8o5r4wx5YQlPp5GwI5MoUALUpD6efHHnVBcRaz7EFaTN0xHIVPiuUvvDnr2gL/FPTLre/LgjtBfFogoHX3a7JvM4JZtur1Aa0YEPX7TaLXJFqfWB6a87M9kU7007msy/WI2u3YkFm2lVf7WHxQNthi9wu7/6ScTSbtn8955f3DSbGu5Ki84quEqx2kt72+z3AlJVK51ygZuaaL5g9F4jOHuNt51U3VNH1Yj9bnHDLp+bqehf4uX5zlab8V7cWSm3FaNZnFjAPM2uFs+/AF4z4t9QUXfbsC7nx660b1GruF3AFDTTWV5gB3PUCFFDIhFCJLJABym6zeftw9/DwsN/tHh4eHh4eCE9ecyIngyJlTAzwcHd3+PT4I9rx0DHz/f39/d1dGcafkIbxlPIwDAOjRUQxVcBuswFkAUVTwoABCYxMJdnpODJgzyEgxS4AMqKR6r7vIlCIDESikHMWGQ3oNJbvf/5UxpTfP3z1br/fbNRsFDlmPSbJYsZEIaoAGJuFW06jC4U4rASVBcLEa5zuAkO257gOqbp1Iy6w/VpYch7mXBvM08gtBTaYE7TeSpvbQsVi6Ku3Zn2R7YZAu+4Em1J1VbBZruuSBulcjCchsOKPx9NplKcsnXE6DYow5pJyPqWcS1F07yUKBBsdAsGu57vt9u2ue7OL7+4277ZdoBJVzExFhQOglZJSSoeiIlbECsCQ8TSUUcAAUjEARGCCDGBMXUfAWKhQKvlk+efn4cfD85PYSe3TWI6gFiMDp1yQiAMDw5gGxNhu13rPF23xa6XdazexFgjXILdA7wBX0PL6+fWflYOtEI6XZavq+a5FuHojapjSmkaEcFGmyImaXtbtbN8KXYgAYOCGRUVAaiq3tJOo86M5M4dbY2qmtXbNZ5ZIgQN7db7qaRM4BAqmRRGR0SicsvIpZYO3G2TEiD1hB8pdt4vdNgv9cpCnofzydBwVdm/f9ncfnnL+5ZCOj2kqJKCaczYzZo7d5jgMPJUIRylqaNwTM6WhFFVBAu6IWIulAoIZS77fPJAcDo8/kB1/+93bv/+7b9++u7P0pw2Lqg6q+2Af7vrD4TSO4yGNfb8LyEPKqZSppjlYyZm8mt4lO9LC2bRXiLACLGtktjbBnTMcp9MJALquq4xFTTaVUvLyd9AgX+dC6tAOfNW1YE3M/HudA+Rq1UGXdswMpkTZZw/mmqwFEV2VDrP6v4IsNDaTCiIzBE8703VdjFFExnEMIehc+KGI1DR9vnZny37++ed6Eyr75fuAs6THzO4gGmOPs5/nlCQGp8pFCgIA1aznZkZszIA2c3vTdjV1t2gOGVLVrus8WMgZx5rjh+fih/OtKeo/BUYDoNBvAoCaaikl5yynJ+88cmCGEBkApBQE8HhaJqAQKDAAighSyDk7ui40eeT6EISIhsAGMFnPbS5c3h6FGw9FhCggG8DkX+oLZIv16YmBQnC4KWpYlA3FFGXirZijqpiJyFk0VVUwMryCK69qqtrNXyD6loy2iLW9Qe0Hd01vNW0trm+p8vQnLTtcdNv+ueAn6itrf4x6zWuQN15aseo+LGaFcKYWa3ZhTZC0iUWpz6wnWaHaAXjxil+T9gjqBNZLa1e3IFQwx0DWB+qOVQGpHZeIcBYIFxu+YIwWtAYRacYkZoZz1cFKhuqNrsNVx06/sF7XO0mRc5Vwb1NyqZbquTtDu+Tqvn6rsDXNxVoX+7NYZn1lzQguAeOGCvxWW1+Qq+2zXbWnpqqeg249scX1gTl3KzWhEAvN960Xb82zfX5xH29Ne7HteO3p2uEC86zntsA/C6XARDusMBMFLDnlNFKEvuvv9/Hrd++++/bru/2WiO53+xhjztlMTJSQ0zCOozx9eiTA3/3u7/Z3m3//11/G00lL6rqAio+/PP7yy6ecijA9fvx0t+nv93c5p9PhYKI90/Px0IUIsQMoOec8jKUUVeiwU4XjMIIaqhWWgNhvIhpYKaZpR+G7d29CiP/54y+/fHrcbPcI+ngcc/7hMJweHu4Q8ZTkn//9v58KZOBc1KxQ7IEImUBPcC23ecVO7XFAg68WEmOLx9bM+pmdOCsuz/Va4RLLtd+3x1p5G5iZWJ1DWlp4XnxT4afitwXSq4Dd2nNw9h66BZ9r+K+9wbXL3jox+ZeuC64VlWF15fGSfEw7Q/2YDA1FYfz4TICllCwF0XLOaLCJ3AGw6L4L93d3b0H7GPuONwF3PT1sursN9VRM86ZnAEhJRLTruhzw6TgOJQhgEhmTFsViYSxyGgcnPYGQQ2AyRFFLoqSJTrk8F3kSfSL4JZVDltEIQ09K5LFFIEWlAGjHQS4wTz2FWxb+lgjWDbHZH63d7amHJqymRTXrw8KGJ3fq2UYWVDhckKG2LESdUuX8a0R65SetVn9YCfxwiZH8G+eHF1DX0pdaMs3/DS2UTK+tNgVxKtwLACYXdTPtBkt0/kzg/KI2eTuKCCKBqZmJcirGrKEYCkXamBlCh7SlEBU3jwN8ejoOyqdUDhkLhedH1ePPqeiQig0pBGVm8PxtJqyWpYzjyMyey0tNVRUUSGkDHSATB6CgSqiGBgFxE7Y2Ppf0fBfl6w9vfv/7N+/fYwiHmAtERKSgBADDXff8HMaRyGAop6zIQJvAhqQzLqhHXt2ZptOCKyGqtwhwi9oqGwEN7+hMCTV+FFcJWCveTyTqdhZEs6kMBvjJLaYEbHATxcNlptAWIdYbYmYqgmdmi6pAWG+CGTpe2+12iJjy+W7XD3VQnUvA1a0mmjO+NERaRPq+r6lNEBHmTITE1L7eWlB1ThJTv2FmuLy99XK2qJ9mJzFEVMI56+wMBog4M8QA4DZHVfW6ZWDJMQiTMSCgZ5lzrh0MMXIEIs+bOkouArkoEyiiiNjsrcqAcKGsJf/TbBnr4iGCM/khRDQyUDObzGtn2GjR1qwjzSpoSkTuONp23nBdbDc0qQtU1eK1FtnhJdPZPn+LxK6fXHS4nsmte/FCu0qwX5hSS+ZbhqmlE23Tayrz2sma2i1I3Xr0duZrUtQed21XsPp6njc09L9iV68+f7WfCi1wmX0HL8WPSsuIyGy6s44ZalK+03FARHcZrZGBAEDE/tgZjZuZmWvlvP9WhnxhLWssjYhXxOv/S7UFR7I4pgUv8sqr+l/U2hP8azqB1WWprcUG869mJgaCVroef/vth3/8w2/e3e92274L3AVmZtByOp4QcbfbgfTM8efv//jP//yvj5+efvvb33/71dff//AXMPrqw9dfff2hlPT8+PT4+HR4PJSUE9pwfP7+e/hz+U9Q7QKb6Hg6gWoAK6CglqUIGHexY9ZH9VLGajAUKKX0TAoAkACgi7HvYkAsRQ99zF34cTyBGkg5gn46HuOPv2SRISnEMCoUBMUIxIBsRmbGNzx+22/aBxb0pT7fimHt95VSrw/lart2InAVDNrbvT70ds5Xx11/roNeffdv22rPehkitPh1MSVERJ0kqCJoAMgoRAoEYMDUEd513T2HHeBdiPu++24fYqBIiCCBtMcUs5hI30dGyKJFJZuNaFksEQ+4yTmnDMdRxpRKAVM0JEJi5hiYA0RGJk2SLWWW/TGXp5w+qj0KPIMOiMbMQKxkZgQgRIUhoRgi36CA8KIGbf3KememX6sp+EUFcfv9As8splHhfA2EV+/FetAFJwMrGGvBzw0Zt7iCNTAHnNhBAECB69fMWoBrtq8VACprvh67chWt1lnEyBAIsypkQlTkQixjCWAI1It0KVku+TDKx8dTQh4Es1JGGZ6fTimJIcfQqzGXEIiZGQEATUXNFIFBgREBSMygmJkaiHaKpurzEBRggogKeaByipi/fr/7w+/efPfNPnQ5lyEGZA5EilmhI7nr87s7AIuPw89Pp5QUmDFSUg+2wioJXIGJa3hhcXWhcQRv1WD11OdjnmxWle9ZHNmC22snQ405d3FGflAODIu3VLWFjsr9tPa0dtzaMzN7XcFp2iKNNIhzYr9z7E3N6u5Do1dsUjUDIuSASAZgsZsqbRB7uRAD1Ml45dovOEOjJ3WoizI7O7PG2ULSTgNnpU69Wi7TEpEXkYRGVVlL27tBcnFepalLVm8ytrFnCK6x8B5QgZlCYGYycVMSMWEpRbSoKgJzDKpFcpachVGtgBEZqxaYyl0ZAqDh2QESYZLwV4qbFgDOf+LEYdfvEC9SlPqOFVPPMuASPxKpAhiBGdhcr8LQzwVWoL4A0TWSbfHmGiNfXUXb+a1fG2i/ULUs1ti2m7Tclj+t0W7bqn6hTqDV6Vzrf5rk1c2BawikJWl1gS0iqpd0jWHO+7BiJhaj3GoLvAeXo99c5ufaAqssmqqew2OvselnfGhnPU77wHa7henyXCwhpaFafuYXXVtU5shAU7+YU5avaRvqLMzU7KyIRXQYUABTNeTu6opes9X/Fe3XHVB7s1pYutrbYmkvn+yvm0kdd41eWjzzwmKvzueMwG/QVrhcOCJyYClJJKHK/f32737/3R/+7jekedOH8XQYBwUAEQOAvtuO4zAehz//+ft/+T/+QxX+4R/+4fe//8Px+PznP//53du7+/3d/f5ORLrQM3Vg+Pj4zFS6LhIhhdDHuN9uEHE4nu73+8gBQMfT6dOnT4ecS8kopaNuSMmQKXSjZDbdxLBVYJQuxo44xtBxlDsYx3sEs2MqpqWUVHJSSUmTahJAxGymEIwQ5+LSqsq03NgFmnoBMNbPY8M6wyWY3TqsBZJZQwJcoujFTBaKrdr5wrBzfvdFsL216pfp1GvaLTBu7S4V2xNRa0FtJxMkmJkXE1YxIAVGxACaAuA+xg+7zTf99oFpD7QN/HYDzMjMbhoIhAgmajlLEi2mQ4FkKikllVLk+STjmMcx5yQplVLUw4KIKDKFjkMADqBaUsmnYaQcnkv+peRPZs9mRyUlJmASBHWtdtXlg+DNnXzNNa/ncou2AsCtJOWtBbvtpMUML4xbm/+1ZgBukd32ZNvnWxGgnc+CzNWBqrG6fuN91qQyYF4XbCaKNUaifQEaIMNVqAysIN5mH33n8vGs6lY1AwQCMoCkqrlw6E5JPx0kxr5wTFkfn46nZIXCqHTKUgzHIqdchpSSGmHgLhoaM0eLnUWIREQuEcbIRMRMACoAZKRaAMCYVKxoAhREZNKOsGcZn398c4fffXjz22+3796FTcimA0AiIkY0s5RKMHjoGd7t+xg28dAxMTw/ZSlSAnIBAJy2X64pxtoTvUqxoPFptrm2e900P9r6QJXHrtoWzgd/WWlkbWtuD2sNahVwpRjilczI1U910Q/OwY0uDdZBadas+58i2Z+fpDuimue9lDKOY+h6mFUPVSozM3cr9Veq+wROabw9dg4azw33nj378YIRXHq2tB98OTR7kLYsOzG30Zs8Vymk2SWs7c3M1Mp89e1cfWF2yfOehQR5mg/lmV8H9Lo9ZsXMRNQLjUwYHybPXtj0vl4mVGab1H6GoIAugt3MSrL+rE1+nelMHX5WzvSIqAieU8taE6snVG3qqy7Aox1ujYjXD19FlLbiO1/oqj6whvkrPX95Vo2ry7w1n5Zg1017gSdY78Pilp1Pyr+5zP51dWLtIbYYph2XLh2lPrvPtw66Haj++tn1rkFlsfx1z+16cfYaqLJcPf3Wac19uR07MQV3GXXH79qbO65XBVYFv4tg3QbfVtFxPUOc1YXOn83VcW7txP8N24wGl2Lbr2hX4WEBny3krNE7TPzJZ5jF5fyvuarC7fuOAB49bwSBcL/dbLuYhlTSwAib3QYRS1ai8HQ4/eu//uvHH34+nU53+/s//N0/vX//4fn5+S9/+QsRPbx5Q4FP4wCgu7v9/u5+s9v+9NMvP338EQ0C8f2b+/12O7nhffVVSbkLARG3233st/v9U0pJVfWxHFQGLZazCjDiPZIRbvvICALKVhBw14V397uAAN1pGPNhTIioUkZFQOCIQxEFNARCNFAwgMtqL1ev/Jri0Cq8fMGNwArjXd3ndqAWa9ltpr+dxuvba2jWre/bfVjjsVeOsnjs6ouV3tmscFzsCVQBQGgKLTEQUyNAJWZjpLuu+3q//fZu93XfvSG8A+sARA+ETLNavAAoWFbTlJOJIBlhNjqlcRhzlvJ0kHEcpRjTVEiui/1uuzdQZgYGQcsiWWEQGIxTysdSHks5IIzEAu4jRSpGTpx9uUagtji6djdu0vdLIli3qxXwLnaSV45yDZ5fN7cN1FOuH9b9X53YYvTFmd4idi1hWry++L4+3IY81FfMLMyI8rxNZBeK0naABQlcr2rxsJmJeFKTaROtCZokREMzQEAQtVGESvl4DBwhqo1FPx3KKGaRs+Ivz89GXLKeUgbDvu+JII/HxImE1CJYB9gxexQUIPYAULIYqKqamLs0EFHBpKaEwgxshWGkku43p2/e3v/hN/tv3m1CSFoyoHYh5jHFvotAXSwAEgHDvutjCMTbbR9CsJ+fnlIx5IhcbFp+Fa0vjgqxPTwA8EzULau6ZjKs0ei3P1UGpTo+tWdx9Ziugs7iyPTSzX2CPGCDi2kgIlHwgDtrWus377ZBlxidf2JmugyA1rmEka+0hvC5K3xKifEswSIiAdKc9NJEVcSIAQlcJiHDUE0iMM+TzM4XYN7h6acyS6SL5pNsRT7/PkSqsmuVxtvUgnWrzUxEYh8vbq+eaxiaGTYyGyIyI2M/57MwVU1+ZcRPBzy8u4iaZUSkwAqKaIEwBrIMIoYqHtiwvJtGAFCgAACtVkzMoJ7B1TewmpEVKhJuni+2RCi+A2KgTY6NRVuzawvMtZzzqiFewUJ4W1BZEMKrfN5r2q237IZG+ZUUffHWlz7wWXbn6qxeJpxtu4XtX3hy+eVt2+bfpNE52v5MHemS6anqiQU1bdvT05OqFtO2hA8iumtonXbN4l1jBR1Z0ZxsphL+xb/YRCFWIvjrQPF/qrZeQguQLV2b9v8LV3wLVF7Yuiuo71fdygWueGEmCwrekuw05BAwcAApaJDHNJxOUNKYBpN8RBzH8XQcibox559++nnbx3fv3n34+pu7/VsRORyPh+OzmfSb2G+6EChnSWkIodvtdiEE45JzTnl4fkYC2O12zB0AIJOYBcTQxQd+2O02Hvua+9G6x+fD+JyzCCCiEimxkZgBcoaEAa2juN9F4m2/iR+fDz9qKcVOAknKoKbEwIEm1SqgilOXultXr/zVc1mf0cutvVaLI2g7uXVwC3KwGN0Zg6vTXnRST/wFcF6DaCv93sLbfw1uvArtC3TXIkYAKKCEhMhBARVFlEVJdb/t3u+33759eL+ND4w70E41qBYISiTIaioKWjSLDkUMcciaNVsgAxhSOo5DKeU4BEmZiPrtXR8jGnRd13ddLoUCK+NY8jHJqaTTkFORQTGpjcieHCIYARAaqFmZ414c6qKQZ7RYb+CX4tVbp2A2cee3ul1/eetYW1pz+X2T4L15a5Ec6CrYw+pY18tpYbV9Hm7ckTAHA4HheVSe7YS1CwBsAWs9/OKG1y/PpNjj+MEC0JTwAgBdalJUgqKWi/6czYaMRxCkk1ABzEM+DunpcOCuRzWFst1s7vcdqB31JJoMiEqXsSApWgAKiE6ewa0Vc9wjA8AoSSQDSkABKwAj2ols/O23D9++wYdNZlMqyoTEPSEkTAAQO95D32UporEAA5cthbAvRU8pp8cTGBRQs8n0hJP267z72iiHENEtpwigsyW27t5axwyNBNgqvHGOkUspLUCkHk1rxV3AXO2kfVdncQ4quBt5hsmz2yTAVLeSKASq/pBtt/6wV4aoXBQRLQTOVu+ec64xOczc9z0iesADniMSp7E8S4uIIpp/MBNVtWKz3Q9q+Ye6Op+eqsKsShTL7U2tc+OAAKZWTAT1bAxcLLayd4t7601VO640gABAYYpL9G1x9lGbpIWAlEQ8ZAkA1djMAFEFmDjEKbjRBWwiNhRGz7ZqAKpWwMBUjQjntDIAAEaeLkWLAoAC8KXlMIROTGxOglxX1HoVtkhOVXDB1IqaqRgpQpWSbCV7WKPEXaCkNYZaADNcQ7W3vpyPe55888zC/FU7sV/FsOJK5nmBqLcEo3XzeA0fsB6obk5L6eGSPVpc8MXDeMNIeGs+t76/RQgX0/4stb66wIvV3Ri6FQgBrOpxFqpZt845EnA04ddwHEdEBCZ3Vl90rrPjN8yqLph95CqIWpM0op1nXYLOyUvdz/zlffi/WXsBwts7/vq2AIkFi/OaDqdnbly7q4DagvGiK71MolZnxRRQJefSB77f3Q+H4z//7//t9PxLH1itRA5gNOaCEO7fvvv7v//Hd282qsDUieRSVDSN40kkd11gRiIgAjMpJQFi3HTffvvt6XT65ceffv7x++F4/O1vf7vZbLLIbrMTyVI8CTZF7tynBvsddOEpff/LpwNgZAxJh2yaxKQPyIYBCTiGuAuBGN5tY4cCKhijDGk4DGNWQHLrjAEgKBrA9C9AE2zSWuMXzEbLoV7d0vbXF3BL+2TbpzXs1vrUbKVSvDWfF2Bg8fzi19bV6+okF9+8Zo2Lga52VUnbgtO76kcGAJkLY2AERCR3AjNlsaiwodBzJKKsedAsUqDkHDs08IoBoiBGY9ZUMBUdxzykUVQV1cyKipm5dr+LcdOFTfQs8QEm3bEVxedsn4b8OORjLmPK2gUlz841lZJzfldRAakAOJMWfHGCRrYmDa8hyuvda9jAKw+0mwy3JbSrdGqNTOqfRNfjCVvF4q2x1phzTXf8KbgG4VdfCRVqscqEUx5FvXzHnCJyI9G2DywYuPqBIgHMHmWEBGgz4KqqGJA7IhpKsYQyai+mSqUAJi2ClFSOwwmYzQozxMCRATUx2r6j53EEQDUrolgMrYsBAkURQUUVAGCEc62SYl5wIoOewIZdyHd7e7fh774O73e46TKUEYjcTb+odn3vaRhjjAEppYKSFW3LpEDv7nenAsqHn475kDSY6eXyrcE7V8918WV7k+tBtpBauZkqitT0rYsznhnc60qjhbvvAp+23yBMnpbuzElELtvMblRLd1bvhJmrHayFeL3M+oVNFc76k6oy82azMbPj8ejFBqvDpE+4flm78tel5K7rzEzVrqZgnmbTlBb0HugylKt6k7a0rd7SEELXddU1331cYS5EgbPXvpl55ltrFI2l5FLUAw51zhNb51aM1NiABM3MgNgd2yY3NiIgQhMydG60i6CqhIYK5kjaOWMDAPUSsgg8/b+BNG3shJ6KE3GyV7Z+xdzk2sKGfWrByfuxCsDTI644uIKF1/R4Afn1p3pAa6Je/2wP92q3i4WvLTMXnd9gJW+R6sWIi2m83Oote5kPaOe/QO63ptp2u97V+lgFP2gQlLc2IV7b2wtLuz4cXHQCl6f/+rbYpWtYdCkStHimNvdFb9fi6hVXP9mcY7YaFdssbTjfDiKqndicoc6fXzNkdb3+ojtNhBCGYaid/M/Tbh3NC/O8eiUXwHPrmn8WEr4UVG5d7b+y2/WLLe2ABt4Wg3bcGRQZB46hi/H4fHx6+sFyfnPfv3vz9v3793d3DxRiTgoU7u4ecv4RivWbTsVyzs/Pj8NwfHhzB2jDcDylySINpp7xiwF3/WbYbj9+/PjLLz+5omG326WUnM4ROIllDAENcs79fpdAf34+xs2OEaBk1ZJYTTuDVCSJlS0ChIhooaQt2cM2atcPQJ9O2UyMJoLIaIgKZpWUGHYwn3JVoLR7stjA9QEtvl8j2M9emVv0pT1EvCaJ4e0Y8vbdNVVaDGFz/nP/s3XPW0NIhZ8FjXsliC7Q6YJvqSiu5ffaTRjYBCQYBCRCowwuhpWcSylDGjtEhZwtQx7TeEh0N7lcmf/LqUhK5Xg86hzYYiAhcOSeGGLkSBxDCMSoQsAU1NmkpHos5ZdxfBzHo8hJMGvIBAQWhEgxAKAgqBYUIxJUr4DFBmAUDVlhYFkfxOJYv2g/l3t7bZPX0HUeBa5MpvYwW1Ouz+TqEIuLsPBAsUaZ2/7bYOALRcDi+rQ74/9WPSX6P4iTm6OVpgD6PBWb61qsO9XLIMs6oRC6KUm3UbWxtHZDQCQyACimJiUTGzKGWHJ6HLKgUQgYO2ZUyWCKqFKGoZw2gfsuPp4SIgKogaAJBkNgZAJgFARPOIKEMLHdse9ETWTwZEixhw8fdr97v3/o6O2+3zFLyqYUKDIFEKPtPqVBiue6MUJDMyboOyoF73fbb3kr3Tb/+EkOqSieSqo7AA0DfQE6eN43vlZQdXGfF2dsl75J62PGOYTGPKHBbV3CGnXiipdyH26bnSSZ2a8JnkMBFXGqwcBNWZWUkojA5FXn+GiRcvdCHpvbUoTmuQIYzPJw/VAJj/vDAABgDWBbStcXIh/Oa5HSjt5OD+cwyLrnZtb3vZs9u65TVbdq9n3vPq5eO9Ga+M9xTFWIbZIqiUj2e1FHcUpflJGJUG32XuMQu35LBMmDsg3MUJEUgQjc0khIoGKmZgpIjFXWvQC/GkXp/zaOo2fwq/vgMEYNGNfdbEmX3Y7D8YN1lUH7yhrxWSPDrJFUi9Y/yw3cmsn6giye+XU9w+XVrhO+pZGta6xX7GVyhash6vyvfukY9erq8JL5qNOAlbpqsVfL/m+39QNXV/VFW72eUvvTGY/NPbdEERqw8UbNnrdX3r8sRWqC7/plNeg5H1OL66xP0LFf3YR2h93lwZsXtf/48eM4jun/4mlG17eyblqLe9fg+sr2pW+1NPHqr+3h+nO3ulrMue1wcWfx0leifXIcx/u7HW8LYjkdh0L5zd2bf/rH3zPa+7dvEHkYUuAYt/E4psfH5/0es+XT6SAFYtd9+PDhp59/OB6fj+NRsWNGQBvHKSR+s9mBaNd1b9++TSn98Y9//NOf/hRCuLu7UzA3kaOBqJaU50gHENIhp8cT7Dh3zGkYI5oFjZatwBhwzLuiEjZbBboH6RkedlvT8KkgPx59gTJRLkREQJtYsJZSNMC/3swXzsjbKsTjCi1YnOzVflr6cvV8F6c2IxKoqKviivZVXyIiAFxE69RudU5wcMY8MxN1a8l/TVvQkdbLzBqNdnslpyc7NAUUBbMAFAjIgNRMTHLJOZ8IgAVRi5WhjCcJiKgCRVCMVCGNOuacc95sNpE3hEKgMfKmjyEEtNT3fWC2VKQUQ8W5lPSYyuPx+PF0OhhkDso9siUSNgDAYMAKWBRNPQzXkJTQAEiIAFmBLzO6LWBjQTheprMLiLIm0ODqVn8W3hZjLfa/ne2a2rZ07erq1qtucRE0tGkR0bK+mNDcDkQMxRNfN7+CIRioABEBnktvx8CIaDptVh3VZk1znUS7BaMAIiMHM1O1ooWIKDAxq4fvgxEQMytRAZQ4jOM4PuViysCBSEtCVcgQYQpwLGiGJCqnZPt3f/f8/DiMx23UGAvQCS0z7yWdYtgTbcxQAJARiFT1Lv3RZIR82NDw/o5+8+bu6313F+Dd/V3O+bHkGPq+CwJmJQfGfDp2ZiIeE+VxcWg5bwN0ZFsb7iR9faf/CPrH/POf/wT/cb8fS1YwiB0geQKxEEIZC6ACIjJJBAFLVkzKPt0zk3Pefd+VkiSXvo8iUnJiZgMrxdOoYkoDIsyTATNKY1FVU8xpspUxk8JZVhTNbuJrj2aGDISpButUXRzAzcOANpVud2Yodh3NJQfdpSrnLKBISAYx8sz0YAiTLDGOAzMzh9loZsyEaArZtChQCB0yqFAuOlVEQBBTFUMGBYl92OzfUOBhGKxkZjaEomY1FygHjqDGKVsRFCUA2PQRgVRFpKhqCB7BmGKMOY8hBESvCWOAWES82D0AqJhIhtkz1kxCiEQokhy8Y2SAKV7IRTi33Tn8e8i+J4ooc1PVolPqX0RkVAAghr7rVLXvAiADExgBkgGIK38ZSlErJXTUdR0zM5ZSCmoGmYyKMRBiMDMkDsCllJLVgJE7MTOb9bITwihWZiw5y3Q1LaMhACgZqqmasiHDJJgT48QH1/vufssIkHXK90Uos7e5AaYxn+VDO2tEriKyBbKGFWsFK0zNTf2l9qe1ZnS+DlcsYHoZ8Nl2RZeGphZrL3r2b9rMybU3vCaS1aHbWencFqs+T9WMbnFOevHlxJpd04XDwhHdF4XQVgpZUKCFpATNCV7d/4s5N9SBPD52/vdi/vXtypnZ0gQN1yCnfm9mMtVlIQAyE09aDEhjyVGi6Nn9gTC4WtB7U1UtImEy7g0p1VE8kYCqmmqRDARoOHlTIwDN2k8EJLRZT0JIRARqqnO53eBJayIR7ff7t2/fbrfblFJKxWuHMkcrqV3ULT7j3HS5Gwu+Z/n4DZ/ICoHLi6DXn7/VGINvy7kf59IU3GXdj54AGTlyVC1nI+ocQY4A7E4iftZMACBgZmp5omtrw35lYtoN8fMAmOqmnh8+f8AqtSxeX1xzuBQkqhcMrO774sUqDKuqcU5wzPnxq/tdxPEP3354/+7h7X7PMRRVMYEuZMsqA7KEgMOQmSIRMgNA3u7hm2/f/OlPf/m3/+Nfvvrqqw8fPoTQqSoDg8FwGA7DUynlu+++e3j38Ocf/iyn/MsvPw3D8X/7X/+fgKAKpRQTrzgXEEBUT8fhcDhtekhJc5ACvSR6kvhJ4M66/TbuS7c9ho1hCGzlBMgYaEfw3Tbow6bL48fjKQMrR0USQzHVOXlJnMUebWr6tZiz5R5b4+Et/LN+YMHJtMfUnt1swDg7wbUzqf0sWFmwVQnZeYwGzBzLqpkRn2fSKkHq0has18t0od2QOr1b1AdnzUtdr3MgixEX4y5msjtFAJixmyQUAwOybdf9ZRwOz/iG32xoW3LRsqHNh+FpNKandHo6ngRKANyo3gFELds83HMX+5gRE1Hp2fq4PbAUKkYZeYh8KjLkrEVG0VNJp5ITIRghAIuQ2YNO9LSYFgALNhkpDKIoTpFsWhAyGwBQOV/Ai/U2x0etYHZ19+eKf3Wrz2cxn2a7q3SZQbBuLCJq437iqTRn1QAy15OS+iJPktFF8TZErPW9ofE789/NwOx8g+a5sZl6BmybOGRDdIPYRO9ExP3axnGcuH2sXJN7lmGo0RFmBqvEITBvQQtG66tLc2S/XRGdL1HtXBigZmmrCNR7mMwgqgZGfA6+V1VABDABI1XkqbybmTBBJDbTnAXUS00UsIgqAdQMFJUMrRRVTXkIULqA+0338LC5f9jv9puuQ0AlBlBAMgUB8ZWwO9XOa0eAM0IBQNf7dj31223o4sP9p8ePYUx2ylnKIEhsYIqaU08M7ClVSz6WQoBdCN02KIsIByTAYTjGGLfb3v09Kk70/anOhzO0uTb6LJ9XIoTnTG6ekfKi5GsDwRf2a5s0YVMFhVpLnWbhB2YtuCKAiisfmpLrkyeAd9WmP/Uh3LV1Rs1YCUaFHwbn9dXJWCkFMT49PaWUXC51IQsuyw8iXNR+UBnbe1L1FNXplGhaxcJ5jJyvPJsKueu6GKOHJtKcqovnNPH14lREbHO169pUFTniXJnQPfU54NngoKAINcCPKJiUVs4EAJcsXdSsqIqJ59NBA10dIoG1gPoZrrGCB82VwWtr7/t8dqYIIQTnNWvmJP+pHvqCAtUZrvHJejLX0Mj5p3bOVz9fPvllDO66h/VC2rEWz9Qnb211PYi6Y9DwRm2HUw83BIR2Eu2Wil2QvHbyq+v/ksXv1gG9AELtWBW3r/fnFijaJau3mO3L41p95tKegB6p3t4CmfxT/F0C9ECJPsRi53QvMId2xDhd1Tkp6IQMK4rD2WnfkXYXYtd1m83GsUcVCB0bPD4+Hg6H02lU1XEcx3G8ecBf2G7tD62y5LXPr9/60tkswfU28E+oo3Gla6l/de9fzqeyIvOf7biLabxw7z67inaIzwL5y9cKGiAn4nQaAvM333z19dvdt+/vu0g1I5GBop5rtJIBclejDwBgs9l888032+3+hx9+ePv27XfffffmzZsYe4ef0+n0//3//X8A4HA4+MaWAofDgYj++Z//+f379+/evQshDGUoY3aqB8SGhBQ4mAgZkmjJiGBiCmAn0SySs0gBjJ3ucgJmVLLYIQeKgQIDMuA5Pa6TG2wUTwuc2W5Ui+vWT76w+S0hW7RbxAIu82b/Otj4mzS7FFk/+/Aru237rBu7gEC4BO/1WDQJTYoAwIFUEMBdIdSKiIQQtCRn3nQ0IDpJSf4nIBCGEL5+9xDSiKKA2G17DmEkyGLKKGpjHk8lHUs55TyaZERBykWTFPVs8DDBPNE5+7o1rYpMC1RQafxiy1oq+KU47eqNhtedS1VMtwB/C/XhtWyfqleE1vUcsGlVQK3fNNA+meuoyai/IJG1ZzO7KExvl/oMnD2q7aJdMEM4M9/VkWZxb1+gDb59UG1ZIqpaAFVVQBbOr9NWWjMZQADQIiEEApGcUsmqDKYIwhRBDU0QgYCgJAUTyVYOFHC7oYf7zbu3u4e7ftsHppLzCM4fWBYl1wEpMpwLGYOqB3YZAERiRQIyK4YUug13XXd/fz++oVTy8zA+j6ehlFz0eBqHIQ1jQkZAZqIOCM1U0EwR1DQF2hARQHh4c7ffbH/++edhOAbnUbyMrGpKaZaymoOfN5ObAvR+O6YdcwXMCkcAgB9dPe754NjNgCHGqvGqH4ywZhtyEK/RA/VYaC7OjpPZcCJ+zjbFLp7hTbGuxcwQmMMkx+acncFqe64Cp18tF/DA1KVB/3ccUmusdo2I1ZjVuVyE+6DKnOfU5cMQ6SwGEzhL11hxzoJflfeq52pN9NreOs+rQ5PRkdEUETmgj55SsqKAgMA4hWWGouLiYitY2ixL1+vmXCkiApnqUj/aYrT2grc/NJ+mn3yL8BpCxKpDnVWjIQRs6I3jI5kR+oIOLZA4rIZo2wuEv+0TLjHa+q35ID4j8Lwwh7a12t+Xn7x6FgtcDJcZF7CxKkAD8P7zrRliY8xs96Ru/mL+V6f3V8jLy/ksplHvdfvYmbL8jZgzRKTzjilOTkTq+jIkcIS1OIg2Ox0BOkfLYtl0oqxave41hBCYQzfH2brKVkWLKEzuM5uu98jA7Xbr/uR+hUsR19mP4/j8/DwMw+k0+jcAEEJQuaWzfmnJ7Z92KUi/sp0BbNH5F7JPa2C7en/bx+ySPV3MBC89siqP+zJHUX/61eJ1nbbd5p7h8tou5nyeQ3MLYuQi+tX7d3/4wx/e7uOWVcqY8kDMOpWLFQRQN0yhljJNo3pm3d3dbbfbb775Kmf56aefPn369ObNm+12OwzD09PTn/70+PXXu91u1/f9eDxp/lPXbf7u7/7u8PR8PB6fn5/HcVTVPnbb7bbrurHoYSzHlJJagQBIxh1ySGVQlVJ0GMfjELdj2abc9z3qCZliD2yckYyiISkCECqAzMBHiDAFAl2QG7h25ddKw/Xziwc+iy7WUFTPqwIPNoqqdQ8zfvjMKC9PYz2Bq++u17Ve9WvuNa60ii+Pu26NRwy7MhgBEdQAtJTTkKQYB9QyZamwpMicAYEDMqsIUOj6bew2PQcGMbNsllU1RGYakoyST8NwSMMx52QymhVAY1JAASNmIAIkVTXCqnyv5GNhi1sDSbsVr8ESt9p1Uo5Xfm0n06I7M6NrWmOcsz+286+WwLpYuMxedjGN5q/1tKvH0zVwstldZaoo0cqlix1T1QCNOO5E1P+sAp63s+LHqLIv7fAL94C6KXrDBaW+ovMrE3sdTMBnib5EVS2mzExehgSntDSqKohQUghsMaoWSygAYpwEekRRIDY2M8tgYlpQpaOy7eObu/hwF/sOCItKMVPIxszAjAZgQoBIDASe5RImsWDCFwQYQlAkEAMTRUBC3vVd1/2/374R01NOhzSMRY4pfXx8/vj49NPPj4cxP58KI8R+03cxq41ZFVPsGFCR8M3b/Ye3b8ys6wLwruu6lNI4jszsQkEI7I4KvjEAYOoQOVVcbGFmArJGIGxhCAC8CtcCiRhMhrVqxQIAIxQRmH1PK6pSsJSSd+IQz3Od+hb7u1TmEBVj9D4Jg01p3FEVRAzRCCOyqRYRS6lok9bPDXotjLlcZO4OMwtIHuBXl+9KepjvpIgQBb8e4EVjZkNijLHrQ40CmqqvzjknUkpeoMxzE1ZRsAJzdfmoOzDpEYAYZ3upipkRX/o34tlfFwCrpDqOo4vEiMhNDOeELufYZSmlepFdPeX64WXScn6mYhabjg9mpwtEz2Sq8xkslfRmxnNWVbOlraa9++1n+xLtadtz+7xeBlvXnv9WSuE64oJyIy4ZoMUkb828PZSXj+ZqM7M2cfF5nauerg60oGRX+7/Ko7wwn6sH3dK2NTy0IIoXes3XNppfNzNQc4VFuwl+L8gAyZAMClQbOBmQgVVHWzNUO+s+/JsVrGqTOIqZu67bbrf7/X6323lcsSvvjsfj8XhydhwAZC486Bq3OrEkFy6jn20LOLw6z7bJNX3zrebo6IVxb72FlxL+Leiy1herUfZVhqblTjyjwUJAfRkCAW7pf/7G7eWr0R6HSO76+Nvffvf+/VsUZ4kF0TxxgrVX0sQAK7GrZMsvUdd1T09Pw3AUyU9PcDg8ffr06aeffvruu/sPHz7c3d0x8263Y2aRHGN89+G95JJScmroatnT6ZQwjKKjaPLAEgzGZoHVuqJZNA1JQzk959KnFMJJKRHHfotcSIAfhzQoCLABmd9uQiAHALh6eyuxbgH1s3C76KlFXItf2xtROZPFFb56a24d6K35tMD5GpL68kJeP9ytdnFZLiOi4doWvdwJIoLNGSWBQiARESlFBTQAsa+bQlBCAKTAQKRqBkR9LAB91zGZiJgWQRQFJXo8PY4lD+M4iBQwITJ3X+IAYOS1wYg8oaWZQeNRDNcEitdsyPp8P9uuEvGrj9XdvvX8AjksMANcLmRxiFdBq4X5NkTFmrzWfsGrz3DlnM0uLIRzIPFZfKsaE2csQ5VQnSutY89i+jn9Y513hbwqJV61crYLO2/EnDLkPMU5tYZ/X6xMiB0VDAFEEd0wqIhTjlMzBfCi3wonhG4KhiE0pGIGBtnMDEgEQNEyaEZTIttt9H6P9/uw6yFgMhEpwGESQRHJTADQiIAMoPgSAcDZYESMzIiUpRBCQEImBSQKxIzIrGgYMtBoURCKwePp/tPTm8en0/c/Pf7xzz//8pxyGZQoYIjEauN+uxmGwUDevfmm6+OPP/44luwoHsCPleYELVWFAGYCdhHnc0Z8k4UQvFhgWwmjPYsQuioQnqEWuYKm4hSCgrWGIhMQIiBDcK8qLblC5xoGKvmvGUHByJ08icgMAah6QpqBqqJhEWUuIhJjdC6qQrDzW2ZWrYKmkxA4mSL1QjFR/3XLXgiBKNSpVgtpBemGO8HZGJjdXDnlRoJQN7BKaDSHTFTlVqXrWYxg+sn9pZEuSZfv/8waMaLqZCB1m3CMMYbQDucn4i+klOhsj5mp4Fwk+Na53LqtF59tiZXwUvjBaQ/PF9wuofE1Yy2g5bOzXTMEFQtV+G8/eBDv65t9uSB069321xd6aHmX9svPTrWe+GefXM/wNVBxdSteeBheFPleOU+7FCzr/t96vdlhAYcBAHDvO0QCXHgizpcE/D8AQPXwWUVP8YVT+LQCMmCI7HTUCaVb/wBgv9/HGD1JTNWdpWF4OhxSSsMwDMPoCiwfcPZumHwQ3PIPK7S82IR1q3fELt2zvwgMYHUcay7ki7r67AQqVmzPF5t3PwuQLZu4mOorUdwL7QKtvXhBru7bAku0ACwl7e+2b9+9MRCV3BGFEP1hBPMoIzvHrVlVelZFbXU2jjF+/c0HVzeklGLHX339/t2Hr0VEc5GUt9vtw8PD8/PzMBzv7+/32zuXKl2b7JR0pJ4P42a3HywXZTTIaiCI5JnYTQ2KwpjlqEAkhCPH0hXgBEn1eciHVAqRIYqZAgKIx3qYCYAhBriG91omoaI7u0yy0m7jQsH3WcisOug1VlyACq6UFy/3vJhe++JrUOLVVmGsheeWwr4esS/Woqvw+M/Bc3X9VbMpNs1A2NwxWMVhFEGBVTV0EYHETIEA0BQELBt0d3eokqUAUQy9AByH4fHTp4+Ho4hklQJeUYAVUdGQwBQFzWcMYBOT6X5/q8telWjtqu1SSodX4Idfh+Je2MBFW8hK9d9WCdji8PYutBfkFgBfHRHc2WRu9WaZ2cQPzvPx1s6wjujiRrALncqFbq+ysHWKZiZ60YX32Fp1F8gxhLOLoA+wOMs6xSrO+heuqlQEBHOfSURTRPacVmqqYKgsKYFOocMhqkAWd4dXoMIECIKaUcYQoA/hbhPuNmHbWeTJ/xEJ2LkAPh8DIgKoiBFGF0fdd3Sy/xhgRkBWJFEVA0MkAiS8RxPQDLoB0chKcNf17zdheLP/7u39Nw/7//zh0/cfj59OeSjJALVXQgHLXbd98+a+lJJz7rqIiC4pVYsTIqaUkMjO+TN930Kt9u5OvqrFzJBmqR5ab9IJDkIIzJHnEn/nCg2BdbJ9GTTBNgpGNWyvUbOFSDTZ/NEMiABQvTD6BFHkIqW5hr6yQYiMU0ApAAAzp5Q8D0uRRHPWTc+gXfUiNVSP5roOvhsVlqQpy+FSqM38nKo7l+JFZQgAmKuTpXy+n4iTX+jsH3qO38NG3pt8QZtWLwLMPcPsLUDTKZxvENGkZ5UppyilkmFmAipe0CZp00RBdaKaOefIoWUNfUvwGg1+AbPU4XA+lRY11/XUpYnIlCwXzqtuh4PVKy83fDVr2CK49Z9th/CrCIBdSk313wXigpWQ3M5hQRXaL68ucK1Tm3u4vhuuYKgQstiKdT8tGFxM7HMK0Re+WextixZe7q3u23o+ZyCcn3wd/ExZXvzz5Dk400IyMJw9/eb/pj9tGpoMzMBzIhc9Z3JQE68xc3d3t9ls3AwIALvdztFmKeV4PA7DcDqdsnsRiJQi1WWgrpR5qnBYyXadAMA5X7nNyb6uNs/Qt97YW7vUMty32gVm+ELZ6uq1xRWVrzhT9SzqUHNHqkv8GZInXPSFOpdm/q+8++utew3UXUy1cbOCBoYRMXRhv9tst30Zx55ou4mgIppNCcAtgqSqBKqmoKBqtRzRWf1HFjtmRpupERHd3993XVRBxClgZFazWkppv99rETdQT/4vXVDVGPfZiEIwLA5nAgigZEoARNEAFcu0RMNPGUiVJWM6JrUh56KgxOq2JAScWESFG/u2Rk06pwBYoK+rD7/maFp00Y7b3otbCPzlU766lstxP4PrXmjt6Au8tyajLwzRIt61t+ELROeyNy8M7vuEYl6DJxhimU2H4s72ZEVUwVAYAJPoMaVjKcDIIRRJw5hPOX86nR6Ph8HAkJRRwAxAEFRVEdim8C8FYwNEJTfqyJUSwVf34bOg8kWtxVQX38N5k19GRNNkViqPCdfNurCK5ZwKzCobqQ8vwKDaGGZT0DmrrT81/2uIpip2jqpzDec551B1aqtAsoBAIgqV0Twv6fJDu9q6PEdJrV2lxlC9wEu1ODTnXJ+8JBuz3zCggZi5AkOIvPQ2CNhU9xoBfaN9Z7sIjIOWUpRUAhiZqI+giSHtuni/C/stb9gCCIFFpMDI00GiCiioJ5ZhIgMylRCjmZWSzIyQmAIRcRWTDBHZDbQIhmjbQEkKmVgAYylggaXf4b7nh83+7W7zzdu3f/rx8U8/fvrLx0/HU34KVNKpC/ju7UPfhVJS30cKXc4lpQTI211vZsQMiOMwNJqSKbmQH7m7GhL5WSRVBZwlCjsHCtaDZmbmqZZDWw7LLs8LANyJakoq7XCMYGaelDI6JHryBiJA1FmAcclpKvzhunFEzZOFc4pRnt0dG4u2uh+siKmCe2H5T26mq/DtX6oAzLRwThszrZfnMideTGwWDs/MRynFGv0TYItMdc57dL7SzNx1m9pzy2zVbGZVuva3mFl0ygczqVfmzL2llFxUwFTAS8ITBZnvRc3rQ0Ro5i7glY7C7Mu6JoHtDV/8eXHB61vzu0v6NEt6sKI9hOQCIVZPIedUVB0s18Pdal9KntcrWgzUSrY+vavtlbS2tqs4tH6zIBVwSRJgpRpcT/6V29UOOrnUNeO2x7/Y1TUBqzN85YifnU/775patxO4Sk1/Xas3FA3mALIVCUOtyTPPJzILXYjISFkLAKCZ+0TwnKeKI/d9v9vtttvtdrv1iqDuQz6O4+FwOB6P4zi6+UVVOXYw+79UlS0AEAW3pVfV0uSRbst05C+3xWkuwOzK83+jpDW32vqg6/Tan3AWCGM8F/KBmTtZYB61JVPb3pqrkPO3AmN4HWZYTOnqJvhU+8CbTdz0EWSMHSNilsTMxoBzTZ9pRDUD2G63cIkufOtcSkTEELjr9tUtGSyGEGLsj8fnv/zlL3/+8/elwLt3AxEZqu953/fM7F4nELeuchXN5mXdcKbsYDBV1grOTyLiyNHUICuUImDZEIiJWUuZ2AqoiNEQUbK2Z9ReeWjseBVLXwXd9SmvAaDFq+1j7XG8svPLqd4EpC+CsTWdggZUWmy5eGbRyQvQWDnndk++lI4sGQYPKjQxBKSAoGaihgjTT6lkCp0XqiACBMhqz8P4xx9+eLvbbGMYx/HT4flUyiia1CREAFCE2WQ0JbEB8FwYyh7E6BOYxa8ziph5zgXzAw0mXKzor6EsL7Mua5hsd74C9loHtyCOOPNvrUFvwWDginm7uvx2MtCopRqVpq1542pqcq6yGlEQMVS7nHdbx56MtnQueTdP6CJZTRVAq+spNAe5wJVmU40sM/MCbjUVSh20C6SqGUhdprHicSKe9L8udTK2EAekUgqgm1CIsmYVAzH2YhPIoAilY91G3G9pGzkQmAgoMkJAr26nYIqeclcBIxmjEZqxW5mmnOaegQBQEchAwBAwICAzILrBrZyKaKaIYROAIUtiFaLA1J3G0iFueLPrw8M23m/454+P/4oyDMNmu3+426uWcRyrcSxl6fv+/v7eDHPOXbf5ZJ8AlGiWE/AszM8KRZeUBGbdBjHPoiO2cEmeoZbZzAjYs1wi4pjljGqZYCbbk4svTX7eZ4o1czz+J10m5K1HXP0qB0mzqkPNYOaW3NznEDVFALoEeDg8DcPgQHg6nUpRT97g9fdERIpUWK92Th+u67oQoheE2G63PiuZE7QAQM45xHi+og1Uw+yWXYMJiSiE0Pd7bBT/9VbXtyo68BMBCibZ+UWYBOnJyFkFQimm4PeTYoz1EvmIbmOsGMQ717n0YquUqXjT17MmJ4hTiOCt1pJK9+ydqE5FZH7oYJVjqEmG6hWuAnlFVdYU7F4PaitZ9AV6dgsnYsNbtJRykZvqr2kv0PgW0V1F6PVhbZxh1qSrfWv6cJu6tf2fT/8yq2R7EO3M6x2/ldRkQdXaWa2n3fa82Ir1Pry8RReW6oaru23p8nzZ8+huqVADtNn6J9aU2UQ1EK0Q4korVdUiyEREkTnG2O+2+/1+s9ls91t/18N6n56e3Bh4Op2q34HfQdf1piKzKUzrTjJHV6FCW/LElpX6bizwVe3Woawtz3VWcOM+flFr6ft6PmdSMlOHzabzxYqIilS93suj1FvWAsb1ydjNrfibNG0cZNqBWuSDTRT9MB7R7mKcEsqJJBHZ7XYpi7C5KgM16wy9zhf5et1dyMyQLOfslmqzSHNS9xBC3+2HYTgcDj///MunT5+Y8cOHt3/4wx+GYYgx9ttNkXRKR0SMHPZ32wGBUQAUVRGFTA2KiZBnT4A5CAgMABVJQu9prommsoMAZESKSq4QBMTZ5ZUQiy0VZy05W5wdrmKMF3h1sbFwWXaofXGBNiveaLvFyvnMOXte3770ptSh20nC5drtUqJYwO0LmHbRbnXyqjkrem1AuDiyoKqGoOBqbK/8QMiQi/bk8qEZIhEV1VPKeTyN44kJSilqqMTFIOUxz3E5hqhmAOjlAVRdFCQ0MC0GYEUFpuo9dTKtyAQroLq1238NOn3Nnr++/4qy2hlWXsUaya3ll+Aa5EDDmsIlnNc/Kzc4xzeBzfkmfSdbXtE1nt5DrYkd3HiyMPepaozRzAyqg98cZ2UXhKTix/WFrG/58XsUHIVzDe5KCSqmqGYiNGMOsyGCaqqPuiOz9x2M4+jxYznnU8opa+DNptvsun6yxZZjsLGPtO1sE1VLQoqRYyQUsWFIgRGJuAumCMQxdjFERARDmg0+vpWRAyIaKJgRIwMrTnUuKHbTxLrICsamCACCJB0TEaClN7vuruv2ve57ue/5rsef73gs9G//9u+7bX+32z5++ng8DNvtTgzFbIesqqeTV89DItrv966cFhGPAHR44qbY2lmKQCaiarSZ5BOAGpVHTUyaH9A4jn5uE08Dk3NmtWuDZ9NRdUIVY8TZujgXaTARca0kzAIhTgn3is0esLMZ7VwfIufc972quEjseWhcdTGO4zAMc1dTElS6jKvMOU/hPYY2xQpSztmXE2NMKcHE2J0l2BijzWAsImql3rcYeQ5NnGbrjjc1eBca2k+zB2mNNdJa9QWnXDiqOgwnZuaAwzD4fnrZCXfP9FVvuljvTpX62Kt9zGKhmZWUfT9jHwJN2l+Z0+KbnKnygli27PiMbwAAQoxn/OJXeGUW0zlD1GwidlZ9uuPMDIiqZyTo716qnJYI9yqjv2b76oeKwioqbJ9v/5wJ/xUpDi6J5YJjWDMT3mTmX1uJt9WGtJ2331eU6AfUIvFb/VgbXbDeoGmYK6uwyz+hObt2krXzyrkuKFZr5W7/bc9rTbQWh7L4cOvX9t2WG2iHbld6sSWTthmrt5N/ewaYWUvlthBE7Pu+lJIBJqVSSv1mw8z39/e7u33Xdbvd7u7uzgiHYRiH5Ljx+fn5eDz6pXMKioiE7Do1FfP4G3d8qqoZAPAwaWx0AZ5Py8AQkYmroqeFllv72fquw4STodLHJYCY1aD99U9Xv7/VbCXG+9wWAnzbeStzOtJ2OlJrtNrsc1s7v1j7LODRpdq7Xu36ynljieB2Erur+2MrHtoaRUb7J8xSDdxAOHCJryaRA/XubjeejrsOSykBLMY4DIMBJSnMMYQAiJHDJkQDCd3++fn5hx9++Pjxo+qk/XRnHzPr+97JyjfffBMCq6qJSi7/+ec//ed//iei/eY3v/nNb37j6ehEs/u8+JyzlFSyRQiMjNpFNIUiYwBSNXeUQlA1Q6/ViGSgowJQBLQMBm4qIgMRO6OUyTBvk9/v9Qx2LVp7Pa5oj4xWTrmL5xd/tg+soRcuA9UqlLb4bTH6VTxZM/m1WN2/qdnC6xG0l6XCc9WVL1YEl+amxabVpJG1eT8LQXexh2tUHAK4Z4XPis7lQBlRich9GXXmCkIXiwpYIJxuNyEWQ0A6lEKmZqZIpqaGGDeTHGFmNmnh0RAMeEpWiADAGGq1q6xnKtDCQGVxF+d7a7G3FIj1XteDqAr9xUbNv0r9pv1Q+Zm2KwCggC32q7E/fl7+in8Jc+BfO9sKFTwpX84KxBbntMv071tFkvfZhjK5FaSy4j50NY04cp7goQ4MzQ0hoikgni4kV1WtGrgWQNtNaTfOh/chJp83PsdUTAuY7UXThjokIiKaghmRmRdnPh8VGeNc7YBjULAxeSg2EABCQSmaBjElUMK8i7jdUAyKOnYhEqIWSSagRB11YRO7zpANCZERghmqEJqaabepZszpyk15bXImJPRgLY8qVDOzAqTMikrTPbJAFANZMbQMQLtIkcI20q6nr9/uPp44EsfNdrvdHocRvBKjQuy2ZpKKsEz3oc2WeVarGREiIauVC7qI6tX8iBlmBbk2YS3OmE4KyJpiKAQCqt+4BGCzOqFqDTRMfLn3Uysx1NFnXeaZcamocIqA0zMCqgCTc1JVz4XjUutmsxmGwUHIS7Q7f4+I1UfL5thONxsSkvNqc+DfBITtQNAgFM8i65+nDC4xIuJmU+cP2tRXrIBtl3Yhx90tmnALp4y5pNH5npJGRATUUsqc/C0Ez/pD2HVd328CYXUob+mlC7QwM+sV0cQYa4KZup96iVIXGKRepPnDGcXYbAwEmOSNSSOL0K537scJEeIF83Sjnt6NduvXNbq/9XyLcK7SiavtFuFv9xwawgAzbJ83qgGDl8daE5vFil4559e0BZBDA7TtWOcvbx/Oep4v/LpuL5zUX9kW+0nViopzGXas9GiRNlOJJhosIq6E2u129w8P//AP/9h1HTK5WebnTx+fn5+fnp5gYatpqF67nAokcql8saZ07fSvkQcOTl3BEpBarNKu+m8LJ7+iLTAArOSixcPtZ5sDuccxV60/NVb9i37QO5+GaJGb/66XmRvWl2t9o/8HbN3VO77dkscQbgKijGSErkEDtsJeGSKdBtMUiLoQMvzkxSpDCO/evXvz5o3n5XYDdSnl06dP7qvssuLxafzpp5/GcXz//v233359f3/v2tIQqfotI05ACABSkkgxLWCGUBgQ0GCK6RI0oLkaFKACUAEEV42fK7QbOGdo2CigljvQbkWLM68+096s9Qm2j7VHf6u39ZRaet3yn3bpukJEt5Lyrilp/aVd6WLc9SSvIo0XVnEL67YOCPXJW+4AcO2qtlS7Hc5mQdQUa4jzGTXVcedX1AAQChhOijkEQHVfOmicIIymSG/zoQlQUWdvf0MyqDE7a9R3a12e2rAi+TaO/LzSa0qxeppVLK/fL+Bk8WK7yVdQ4urLit6tyccJlxDSYrP156vTWI+y/rflXqgJYqyrXg90rjrQombXNBBRzYhYfyr57B3n3184ql3qgeoyqp3QENoH2k4mQYXQzNAACMkIAAQU1HT2dGfAOWDPKYYLJAURmShEJiC2rDmTKpL1PW+3vN9wJDNNIkpIMKfvB6MiBsliHwGRMBAxAoGZKZpWIdkmUEBjIyJiJI93nBOfT8BlzAoIgGKiCuxVxDFwACtibB2Tl2lACrsd/Sa/3212w5iHVEzBzNKYUxHAMOaSc5bZ+uRX1Vfp0ikYeW4oVcXZ59PzphiEnLOrDerl9U3mee1mZztANRXKXEcI2yIKOLlQTnIjEYZY7WZ0meCL5yp/9XDbX72+POJkHkSsWH5SnIRAgJqz5jyabbSpE0hN6uq5CIQiTKWiJ1kOsDIfOecQzvmXZmi8wIx939epEkPXdW6crD7e8zwn7FCF6iroVlqyuN4+vcNpZDRXirt/rcFUaZCZY7fBwGBkCJOFU6csOKp1AhdZKHSukV3FYC3S7vACxcMlPj2jm0v80mooK0I6P98IjRPiCAEmdSC0B401LP2v48DWhLClx1fxYPvYZ1vb4ZrStyi1zme9vS+scbEDa2LcrmWNmtsnv2gf255rn+0EXr8/62kvfr31zdUh2jOCV2gKXjM9aDgAQEUFQFcVToEqtRTP7A43p1CmSfmKiF3fe3DgmNP4PA7DcBhOp9NpHMecsxsAZc7WzfOf2LAL9TMiSqNRXkiD4POC81uIqCpXIXBhMah3/6+8Vr+62YpDWn+oDS9do2HGWgBQU/YgojVKllvAgLO5lRpnk/V2/Y9s6/WukdX5Man7oKpqUgCAyETK83H4j//4j19++WW/7R/2OyOEII9jIqLvvvvu7du3u92uwh4ixhjHcYSfw9Pjx8dPJ2ba7/eSy4cPH77++uv9fl/zn0FBDjw7I2i1cqhq6DqmkTxRuZqhMCAzoefhQ0BPsARowAQqcwKC87UFATXCifn34HszAyB4UbBZwE97iBUbtwh8TcXwkgO+RWUWz6zfbc+oruvlm7XG1TMtuBBQ4QZ+s4bjXfx5yZzclIUWCHnh8QEzK3LLMvbCuhYjqpmBGExaYBfjzksGdbTq6WTRyEDMTAABjdAcIhAZjBAIVAAAQSc2wiYtJE91xefqVj6FOb6wntdn7/gtWrPAP+fPdv58C4QuIJNp8U3dtBZyrhLZekCtqaAdGmbLZ32xIsOFBbLt8+raW2BoGfLFoItdWoAlAASdQyDAT6cRBi6fXG5ZbRUQF2Dd4pH2xWokrT3ARS1E8KprBmiIbKoGYgWNweVvmDGLAQBkm9BYH0IMhAZkygaYc0DoiHqmjoBRwFTEE6KwGZihKZmhGYohAxAwcyRmRggTO8xJkoe9GqioGdBZazJtMZrZlCPOyLGoggIAAxIFMLSiFKOiERASi6hoMc1MtOs3upPD848fP34cjoMKlFKKQEqlBqg4UKqWnHMRVDNEBvTAP/dks+BSYmCOgRnNUCfTP8y5rc9agRlAsUomNDlYGsgcMAagboBCRC+gSZMvMgCckyOhIGIIk7TmAo8a0gSjTgYV0HWKZ6zqddS8kxpgM1W5AwFQkU5EXD/qAhUAqELlySZZkbhWk1dVBK0GtApdiHg6nXylTZ5lgBrp5wdKVu3sOZfZ+2LpptJ+rmjLR5z8oolKKeM4AsDxeNr2U+lFT5ADqPv9Hl10D12NIfStwDkssb1iPpDbNKaAYPSAxt5Dcepy1i+2qAFxSt7b/mTzxVyjjHrctsI+DSZdznPxp7fPYvbPthktXo9RWbeXR6yuNS2WX3fb/tpyD7oKlaxvtcRsPckFT1/x4YJONG992b6tV70A1PZ7bDS+i1ld3VW7xnAsiE07RO1n/e9iYz+7is+0Nh0U1g7bfC3TA1kn0W6qFWGWc356evrjn/7kEqCeVVTYdZ3M9LFqgrTx+1rPdl6jO1BciuWzbfACn6h5AYwr+9bujOdSBvgvzhHzUrsKz69kQG0WbNzRpJKhRW/rVm/HAmwWf54/rwDnhc5f2V5GNVdhtUJIYEQwMAFAZjY0cH8xppTSL798fH5Ov/n227/77bdkCqZf9VFVmWLXde5jwszb7f50OsXYqwIipwTjCNutvn+//f0//sZ9Z1SVCD1UPsloxUm8VY2qETKoIiMFd7xSAUOlgIhkWkgBAJZlWsgQAVBxitQFMwQyvrANkr9nCPNjV7Ci2VIqa/ewHuj6ZNvH1opXnMWq9Vv4otCoTbJTuI30Xm4LtAmXmG3xU52Sf65GG72WiWTxcNvPLeTzQlvQo4YWVDoIiGTO6trFWqjdZ0QAJKS5HpnijOaQUCd5bk4kCGB6UVyHz1fJABB4zlaIF/vWchF1JldJs17jB16gKTUT24IYXd3naV9W+9x2vgSARoaqH6pAWMl9VYrBql3FNueHsWZ7mS+UPzvb9abHpqFBZQlF7fTWEwillFqMbgFkZuekGf49EblLWHv9JsbiJlzihaEDGse2KXDLanSjqqLCpL5CQwREZABR4zApEeYVTLPNxYi4i10ficFMComyQkfUB9hG6oMimBQhsEAQux7Mxpy0aBcDh+2277abfZlKrLo7h5kKgkcI4gREioYAKsAAU+Gdyd4NyIToLjDMJmDJvbApMiOo5CIbDqJmOimNiTkSYuCQgokeDofnT88FiJmpGDMgE0kAmvKatiyI55KpsFXzWzJTE6FXSilqAoZV4PE56zkq8swpNvaos0aE0TPPkE7JRJAA1cxTT3neqFQyMyMR1JLNqr5AxyNldph2uS7NGfm8yn09z5yzexSIZjOZi91PRZkmxSeAT4/m9C2IGOaiC5P8jNImTa2rc2Pmgh1HxHEcHfxwtk77PlSTqaOmirKrRd2vdOO5ambmXj1uYK9e3dXVdtNFVUWyu7s7c29MIJNSrMx4rTBCVYFTTVRohohu83TqHjm4TD4MA86FN9TM61DTrCGrN7TOvMmnP2O6Bi0uEIRVY8gKcWm19sMSmdZ3X0Nf/3p27YUOzewWB72ec0shoMWBl1iubRUxwjXifQuzL6bRnlELt69d8GVbiJfrrtrhXmhXUfr6rVsk7ZWj1Cm11Oez7fJJf9GuBo9NWNq03VjHDzSX7UgpHYfT8XgqpQBT3/eImHMuIpZzkuI6IwUDM/dxAvZaOhMnNF2lSS24hJNpN+ysSaxzg/mOr2n/LZ7mvzpr6GfbC9NrW0uw4JLxutiWmUmqiieAKVvsZFOwpTtGxaj19bYr0+s6iNv8yWfagnFsv4Rr13mxOZvNxqmMJo2ITAAiZphGzbmoAiJEDjH0AcxAeLc5HA5jOpmZB0F4DGoIHQCAFVMwBTDY9Nvvvv3d24f7p8NzSmm73arqjz/+eDqdnD0hmhmHwF030Ytk8XhKuSgQYAAvPGamaKRes9PAC7UAgAIxGoC1aqM53MtmcCYAaBylLrbuV2/7Yv/XO38Vua1fvKWw+NKJXcVpLQPT0gs7V4GbJrk2WL2yVThfjLKORYdXa2cuB2hss+3sPP37PGq9yUQ0ObwDVhBQRE8mr6ioBghqiIamBq5mOC+8djmPQ/NZuEy4ZjWatb+eTLSewJf/ThLaNJv5gG4piO2WpbFpF9/cgKua72OhuViwEPWnq8zDC63FSC2vArOCpoWQ1mxQgdPfnaqIVjy7gLC6uIrpaBXhUPe0jtpCcLVK1RnTnITtgmLMhIFcV2AAakBTqBtLM+lpUHMB0TgiIQcmArCCUlAKAW66bhPCtuMuWICCoIBGRCmNgZGZQuSuCzEyM3s6TSJfnKipSUYVNAGLM4CImYGBqjIomheWBTW/MISEqtaBCAFOZeONjQxiEUCMRFhEpJggIjMAKaGpDkNKp2QGIQYAjspgEEMPlpOUGROpx9HhXNRYp1hVj9UP5vkww1lWacltAx/TcYcQXI1dDbzkoaWzRkHmm2BmoNZxCMTuP6JzTF0g9qSgOieLszkc1gUtM/NaETbn1SxD1ilq1szcHGfMOI4JEWEWWIjA03vWivCzy9CUF6de6apcmDaErcIbzpKbqrojKCKanY2liBi7rsp+Nhv6zCxGhkb8q0Jd7dBP4ZwgwcyZy5o4xOfwcP8QeYqr9PhAYogx1kwz1SMIJ03h0rhkM8fktkf3LEWDOvoUMDo/iYiIBM1xL/DFBAmNZgUaZNQSn/p8TULb3mtPXnt+5pIDWPC48IVtMe1FW3deCfACod8SCK8i3AvO8pKWtIR5gbjbrmzFEa6XcMauqzVeIQZfuHXrCdTv15DQTqad0tXzWnT4MqGqvV1dPsx7dfWBxQQ+AwNoazZ0TYb8S/e2SCm5Q7Z/C4R5zCGE6vzsvhUtwaI5Q4CteK/FxtZFWdP8HBc0u3a+nuoKhl8rxv/XtQWUtvO/CjMtDNcTwaZwdsuj2O1Eo/X1BamqM6mP/Qok80J7YatfuNGwuoCn0+njx4+kGSUFgC4yA5rhaUyq0Pe9qn76+BiZ3t7tH+52z8/PALDZbPq+R+Ccs4iVokTEFDab3bt3H0qGT58+MUUEHsfx4e5eVb//8Yc//vE/DofDfr9/++4BmdwbWVWBYRgm756Mu1RgLACIHDoxUgUVCHM+HptiCEmBDChgMZuC1D1Yx40RKmdNIgA0RV+uaKNa6F3gn8WWLrAxrABv8Xlx9RZn8VmowFXF85cbXhML2ymdyev8zVXNhbdKtV8gAYvV1VaNTqpLE9mtma8RuJm5NGeT5DDfNGj4g8ueCQ2AQNXTi6OnKUV0sDEEQ6CpcLcRoMIUwoqIAKQL8QxmBcQ8h3Btni+s65ZltaXF0AxqeuHM2GKnNXQt/lwfyrpdxeQ2531c/Hpr8usnr47VLqGOjpc8TC1CXu0Z9clKm9oRg9uX5gVfETQrw73YmsXFrvdqsZU1O2KFtDpd71lmS4s/T57Y1EQViJiIgoGylqLVToWIPDPrXehdVBPRYMoEMXKP1BFEso6gDxwCMhUOwGxFU+y6zbbbbbttF2MIYmUYjhy7KcjE/adBpoz6NJlApusCAABqhdD3DdE89M+8GF/QzIRMMIqZKGBg4hAJjJnIIpiKgqnJkMchDb98/PTj9z8cj0MXIoYexDIZA3qxBDMz8LSramZqBXHT5JTz4LqAiCEEAJ0y3c15QYtMGaQqkvKoszlDzJToVeesRMxcklSyjQBe6l7nApoMCIgJ1EWyEILCuYxJNW0h4jAMXdchYi0eqKo5ZzQFMCJGdO8ZqBdGNIMBM5px9SOtM8cpPDLO5s3JQ7XVuLhpTi/Db/xfLycNAGZTYXr/0/1tphsC5+iUOkoIVB+QueS0S2I10s8vy1S4bE7/47fr7v5e8uhZdrRkAOAwbwWiKIgKInFw0y7UgbyHyVd2XqbrjJlZ52IbsYuSSz1Ev7CEZJeKKGuMnPOezDhuwp6zSFl5rAbp2Aor1RfxkkxOyr5Xc64vE2xbSVZ2LanVK8dq25qnhEt82g5a9QIt9qv49Gr/tziGBdqsiLTVFLZPLslj88AL436Wwr3cydU+4XP80Asvroere/iaftZ93p6AzZB7EROLc0S02NlOZ2bIk+9osYkMjSm5yDE5k8da29cIJzygqtbUynN/b8+70VLG85yBW71E+8ACgFv2pf3m//S2gKgWDNbniJd8WMt11aVXHuWFSwRzNsWFWtk3TZsIiPr9gseq03g9Rnq5rftpUN/F2fmHnOekgiJgBiZkYIYhxL7f3u0fng+P4/Pz08enfb/p+61xJGIAyDmncQSgruv6fnt4PlLErus+vP+677aI/PHjxx9//Hn7u/en0+mHH3743//5X4YBfv/7h7//+7/f323N3ZxEkhR3tJkViBtEJWYwVCN3WAZCBZvygZkZeKEBAkDz+pxg07rNQM0d42bb4Hz05jGE1/OarK/tFYKyenKBeW7hE1vJVBXkFpDZvtLyxFfnvH7xgj5OnS+v5wLDL6bUQkt9YFZVX+lqfbMWS14Qsl8B5+txzYzmRCw4J3TEOdeTqoGB6RzGRWAMNpUqMa/8Q84fzOTrzDhPy57njABeOAjR47o9n+QL613MEy8tqIsVLQ4Lcap02KKRBZx8Ft8uENHVZ1rwW7SFAmIhKH4Rtj8TOEScmfa6UZON5PLJ+m5rYW6nGtr0te07OWdmpibeauI4AbFJDrnA++tVdV2smUucn5bLYnFQSkqpPRicJoPgaYvYSIkIkKahw+xFgojAvZQkZSDUQBqZdhw2TDaOBMhoTNAF5oCBFMm2m13fBe4CTmalQkRGoKqEri8hBGVmBmQ0C8FFQk9Z7v8SQFWImaLN0Y8AgFoCR2QUsbEIogSO3MU0lhj7GJkQGE0ka0qH0/jj999///2Px9O4e3ggIs0ZABD5+fkjcGDG4IUZRUVENDMHFzlmPsftdYqIItkLJ7jZ08zUBBDNzrmPXTIJIbhAslZRLG6j305tPFQREUQ1FwscY8wpV0B0mdBdJWvWE1s6+VSfchf2pnf7vj8NxSFSJKeUU0oGWusNwqVOpcqBahd+RCq5iUiEytg1wDn142/Vgr9mhmQuLSMiM3pQZYVwlwBztmoerBvisF3fdeETET25Tp4fm4q9NA6lamhgdF7XeSwf2gfCWS9e0yrIXDem67pRJvshzncKAbGGdDaEp1pFWkQA87/tXYaZAExMGy5RFRG5hXBRRfoqIP0N2xq1wSXCuUpZb7V2yTZrTBbDtQi3nUOLfNeTXBCMljLBQt5bCYQVGb5yFYuhF98snFLayQPcdEBcsxcvz2dxBFe/v/VifX5NmFuacmsC7Vs0f4O1SCsizZwTEdUYwsUczGvOMmtDUFNKFqjquaYHLtNit0t2+lS/r6oiBG7n3+wJqZ6zbkIDityUV2n7/z9LOrx6rezScr7+Fa6A0IWK6ozi2sOdOE8AAOZJOViRfIvWFsADADUbft3Jq/f6V2/Cyw/UQeuTX71/+O2333337YcAgiKmxYqoQuj6GHsE/umn/r8/Pj8+Pu53m6f9lu47EfEt2G63RCHn/Pj4GDiWUrzoV99vY4w5y9PT4b/9t5+cIrx7c7f97e7Dh3eeIQmZAExVgxWfuesxO3x7HFPXb4eSiqoaIxCRezoJTPIceaiYIliRSiOmvNIeFWaoKHDWdMzVOK5hFGzEuWu3YCkN1s9rPL9ACAvYuwqiV4+pmteqVmLu/CUX05aYzt9fwQMv99AuwZryjC+8BastuqDgjXx7q6vFr+eFWLXMz5LJrGQxm6wJDnUTVnTHeFXzGoVTMCGYv0lABuCOZIAIOOeinfh5F6DdTAeEhgBGLnzaLDNCK79dO741mVjT2VucLZ1TKl4c6C0kdpWWLSjIBSTfeDFPjP35CrREZD2N2/jqAh4qc3LxxNyV6pR2u5VCcRWf2b4SkMkT/pgIiJoJuXkXVUvRAogIxKDKiIRBLYFXNTBk5sDs5g5AQI7TYDLdNCICMKJJFASAPkYAcJZdS1EABOjnGh1mxkRiGoHEwBRVgUPcb7fDkEyUFANwz4EULYmUEvHPBBpYNz1vOuzYGFIE6O9CIOtC6TvomJmBGNBgxxgQOwxMEYgAWY1IzawgGaioiAIoM8RoRF0oImKKxIGAVMGTbJkZgYIaaEFVYtzEEGP88e4OSg4p94jvOgxURJ6SGt1tj6yFO8UNSBwGevqZH3/E//hlLN0DYn5KosNzUY+0TJHQ7xcBM0VEBN9FNTEhLwdyroRhw/+ft3/tluTGEUUxACQjMverXpJa3T1nrq+/+f//EtvL12vZx+N1zky3pCrVaz8yMyJIwB8QRCIYkVklzRxzSbsyIxkkCIIAQYDAdIqRAiWBkoVJCEJEgYhzIBZbBl4/b/gIEQ18JCIgIBTmMkxZw3iqUB7HrOeXiLEM8jKcCoGqdsxcCmgWApnv56QQEmKCGoQm58x5klLc1Ts1smWW0MWAc/hQ3vc3t/s7AkqEhRC7SBhFsIggxZIBKGKMXcJpmo6nUyLc7TQkaSKaY+Ecjy/H40vf95qyqZQigiGkEAIXoEAhhCwDi8QQUzWVixSiwGyXLWcfXQ3oMo5j3+9TCkQwTbrhAxFOKaq+Pcdl1XlBRJYu7fKUh9Oz4rkwUEAKvS6PoHbOcUDESPRyfC6F6zVIQgghBGS8v7+fpqnwVNcwaz+3u/10GiBrsLgAhaWw1NnUbeWsXlYLqrIAmjm0nnRAKYxQ4wYo70BEu/wwH4YAAhTVWhE0KoGIu6El9Zh5pWutGZxtu5WjefaETmVd8VZlf8YEpf5Hpk57NeeKgLR+1wJm1al4eOz1s235qu3L3lVnFVtx/nB3LcDmt5hluTfS5y4O/GKzEtb5msDhudbTFhARqRWNfhRrJFgbfl8Cl9N4+Jn1Ey3VqN50LW7f7xtfW1Brm0GgBs/S/cfshlJv30vJRURkHE8iBVkiloGLMKfUBwzAABIi0FhyKUUQ5iBVAQREXehJ7+wzqD8LCEaaAykzcy5ZQM8rCSHUMy6ZtUNErBcXG3wyTwELBKRgeo4q8LqmSP1SoO4CBASJ5gmVeRkCAAsHCrAidYSLhyPNRgRWK8XvNYveWqvAWA0GKJolCgoKk7AACwMjUeqm2dExMI8RIRFJmbouM0PJEBGwwO19f/fw5vPjy8cvLxMHBgyUNPZ0YA4oeZwQMVI4E5IoLDN29Ym6ypNbd2vWAauFjHpvaoPJaDruM/LNsBBrFGU+u0gGRMyFEZRVMgkjIwEFwsj8w+vbhDxN+f7uNg+nx+PX8XjYdf0JYA5CFqbH4yk9fkq3/T3e3NzciPA4Tl3HfUcEGCP1fXd4OYogBXr8+OXjxw/jOHx5+nL3EN69e/fw8BBD1/e9ZjOagLoQmbnghAhIUqAUyBzKbZDD1+fAY+AMGSl0mn5ZQAAJZSJgBk4AACgARSIhqdDPLlS9WQbOfGBGy3lD77HasC+P6k32a5q/p0PzDGroc+02ScuwC2sCsAoN8xHJnv6leiHVLbWtAIE5YcyGy7eGotCHRRhAo0xgBgE3ojX/X+MBvkXD67Jm2qJRwd3Dc22u52ZQUxAhIAIzVF4EqFl85igxAQAkCAQAAJb53mmkQIwyKW4RUQALA0iJfr515BT0IhJo38AgIiQCAMwXjfzbT7hKQ21/mZlmRsXyALeRdGse6P8ufrWfKueZJwLONaXWKctQf9HpOFbWMMwfkGEVukxbKgXCMp2gCkf1wmukbUqp5POQVQtQBEUXC0MNtwCASNH88ht0e+8ycWsPcTYmMJ+JsqFmcrE0Ng/zrPKapgUKzGkwEQiYMxQUka6LUhgKQC6naQIWAkaB/T4QhS7BvgsxlAjcEaYQg3BKYdf1XQopQIwUE6QQEkzneCEzVIJI4zgSJrPC1J30FFG4sDACsSByEWAEYGDsIsUuEUTgAlJGKeNYcEQECAGRgIkkYEDsCSckKcRFpjKd8vT56+kfXz+///x7zvuccxENmJmAdPuurrbAzFMeGOYJCoC57nrREjmKiLDze/TcTcB5yXNNeVm5wznErRazXNWWgZl1P0VEzNrvfOVDRBCCxIXxyiY6pqQpHIyfzmRWSaIsA4Eq5EYVnmnWgcyDIr1ZWpeoqmEpzMqPagylpg3U18dxrA9Jk1xC3SpZ2F/tpZSizmYiogY9tS8aisQFkoGa/VMXvPmLUs30iHUjaMSPqHHx8e7uzhBeSsl51GpjGRUr8/zqySvgMAzMrPxXRFA4pdT3vV9HWOUVurs69hex3R1uPmxW5VqmztnDYLs0lq4FKTYrfatfWRrH1r3/0dL0smZBl+obYu3JBrNaYripv9m4R0XDEv9Q8RYqj7cG/u8p9rp9bV5vKlwql/Dpnzdjv9LsptRYg73+rMVWq4h4C2ERjVNVmMVMT2Urm7PM914WRwCw3Ck2z6HuAzzkl1TlP1f+xPxeL82842oH1pC3h0R3QyKLQBBCKDV7rR5WCkMppSOMIb685Id7/PHN3ZvXD33f3+zvUrf/5/vPnx//DUFQaNn+OcOYYfv6cC5VuEK9l3ij/3DeHZ5Jur6y8p6wHhHx7u7uy5cvx64jgsevXw5Pj4QYET5++D1EFJFdf3Nzc/PmTZmm6d/+7d9++su7n3/++dWrV7vdnpmHYRjHLAjH47Dr90Tx06dPv/zyy8vL4eamf/3u7X/7336aI4uOo4laRDwej33f932f83g8PU/TRIm6Lk3DVEoR1vgMKES5cPWp2fD12OTA9uQKd10j+Tr+r79oS8z4ib1llNaA0ZBxwy3XXW+yQXTlEhI2x7hmTQp32LqyuCkf2/2Pe+435+vXPT1fXw5+vM3n9bvriWgGskbsNyd9zWSCC295CaRvrtY1wIYiXkXwuoSlhrE39Wl5m+nSML+/LMhv696KFrMne8xIPQg2qYc1VZva801a2etjvRlhTWhrc9oJZqa5D4QatFALEbG7hIaIPKsQABoov0LPpfi30BTQrQnjZcxfK8xqG5kTXApkLiwciChiBBIJXHhMkXa73a7rIT+GgH0HKUKQggAp0i6RZOxj2vWpjxFJQpAuhq5LaT4Z18MeREthrOmrpADohn4O+FEEmFFEgARVneaCGPq+E5QikqGgMBKEkGKkOE4YUUM5ZxJBjEQRoxSEglOGPOWvp/zr4+P/eH785fA85cjMUoOvsjAQSq72VbO9K8XUcL9EFFOo2khmxhijaNgbXBBNnQ0ymsCaDBCW3Mo0tOrDM1NYKaJ6kc9nWGkOp3pr0bteCJRUi55e+AVpYNika6dgyR6rSmO3EwEgpSRIeX54ToSgpK95L9SSZ2s+hLDb7UIIp9NJ3ZIBAGAiiurVIDVdir3CzKXMVjgDWyQoVqSetEm1b9gJTUqJKFrWCqPwEM42kBkAZADQeKTTNOWccx5tJTMWxNnNNYREGEWEhCwv9qzVE+12u5ubm6fHx7Ol14Xosu6aD5uy/Arj3mR2WH3xffvb1S5zzOYndFpNI5U34ZSV8N4s9spaxnzzlTWQlypvCsLrHclSbF+ZAl//EmCb0+qLIfaK7FzDLEuxvR7amflfjW3QtGCjtq/rafJ/G/PgN4tvUxmoYSCEoNxKqrOWlpyz6BX6mnYCSC3q7bbSIGwUwvq53ZjaW5sj9Tj0L6LTRX3hrfldt/CdWPKQYFUIYX2sgxsUUkeyERZwJdOZUHa77u7m9v/y19v7m/1+1+1SV0qJsRsZRQqh0ByRUBsvuBxag7o/Md4r5Qr92web980lf353Cdfvv38SkVcPD10XDy/PeRjv725KyX/96193+05E1Jnl5fn4/Pz8+++/n06njx8/Ho+n/X6/392ChnnoUs6cYvf169f/+I//+PT546tXD//yL//y+t3b2E1dl1QCEEYAmKZJ8qS3QnIR5gxCRFSyHMsgOU6jeuiAcMSgZ9+ACC5FWx2gO3T2+MHL+kbDoy4hdhPz63c92psKTQt+FhYkekGFgOXKRbcXbcisEUmbI21as0Y8YdThkH+y+de/2FDdGhhPh2uOuom3SxJqTdhrGK7jwU+Wn/3NIkvh0lRer/pNOL2VtWmkmc2mzmLNXt4YNKj26PUI94S0+dYVPMAah7hxhUQ/23psSMjTKqz0Rq7XHEopeplIrwTOnoOOJOJZvImQgKoT4CgJEQlrQnAgluy9AhZww4wgA5qZwbVj4KK7kNMWtbCDBCCGDEyMgsA8soQQKVCARHHf9fd3Nzc3N9PTEUACFoKCWCJIJIqIsQ+7Lu1Sl1IkLIii8RupWshDCIGqLyXJbrcLCIhYSiEQS2EMHFAEABGoZuTUgCggoOFkGEhipBiJUoqnAQoBYk6IgAWYNbdcRi4hT3w4Tr+/HH95fP51zL8DSJ6TEzDzJJPUS3G5lBg7tTsBBn/gDQCaa0tvrOmWJUYqZU4S4O+6zPhE1PpaoZqPzmRt6Nc7GzlnkVIJi8zGGEKwrIYa9fQwntRGR957BNkrSzJbFAsi5JyxHmBgjQ6qx+p+gyUWlTTnUkrs0m63Y8B8PI7jyAxIlHMmx6q0fnGmc0Qwk51fXcyszlTMLKWNegdAIpnqrrG2RoizSVC97ajmhW+Eiu8IALrd7qzsaU5NERF5fn5WPDMz4tljhAIRhWpvTCAkIshY1d2iMHRpxq0qljMGhPW+Ky9lDNZjJ1iFsYaVVFisxeYATz/r5W+9dL7RVMsQtaz7tYlrIPECzPPrBqpLUu16uVLZN7iGat3vZp1LpcXkt+xj1xvZ7LHBxlocGnpNksHSs+Wb72527WX/elKaKW5o4DoG1p2uu0ZE36SIbOpRRlQmjZR2EcFWum5/9bosI88Zdy7kNzMUebkmIlg9VV1vAKAnysB8DqumrXj9wUhivVK+iZ8/WqyLZtHhcitm45LlzrIyHCGSGmIEAABr0rIFlpi7Pr179+5vP//l5x+7EFBK4ZyFOUUCphhwDi0hDMSgGYlWV57+xHr/E2hpiL9d9cs7PPOgnR6FiD6Wxu3t7d///t/6Lj59fUwUX727v73ZHZ4ec84AnXp5AADcwf6mv729/fr4uRT+8uXL09PTfncbQoixu7m7LUUOL8fffvvt06dPRPjDDz/89NNP/c3++fCBmRFJg43pdS8VuOM4iQgFII3EkKdpyje3yRsTSARA/RvVpRxFwE864cJ/D9zphscPLFfoJiO9jvArFcDT56r9xlbjK2xKnDV4TcubbMez/bV4ahoXp7lh3ZFfGrXnn75NPwv+uX71lq71YH2DlzC8OTt+7W8OsDnNt0bWV0Iazr/uffOJB9tOuuFCELh1Ox5pfggNhtfSZG2laCjNBrt+t3myBun6KthAxSp0c/3AFizNnutPGuVxtgrWUkpRhz7bnDdt6oSSY9fzvSm1fAVACgBVLzqDa0E4gDTRISKaBcmN6rxmzGMNl3k/fLlIqfo/SpgDuGVgAgQERmFEpBAoZMDMMr663ec8cikEHIhSwD6GLmCf+j52fd/1XQwBkYoIA7JIQSQKoYtUo40gAiDOoS8BhTVW64y7ACKESECCgBgQAQMM4zGlkFKi1GuyhAl4moZ+NqAilIAxAFApnMeSJyxMX0/lt8eX//j69M/n48fj8CShG0cNPTJmzjmrJwfFyNNUhMOcvr3YLkedBpGNUmeGaHQQIobzwDAPJ+X1c9A8AKnHA3pNbqFn1jTxapmbZ0f1Ymb11VQtCwA0/4HQmfdVspvjYUZNYsQCLMBFT+NTSuLW+UyFMSpdijti2OTy1XMThFkVwspPz4Y+XRjTNORcTqeTnpVWjavSG7IIMjPqqUkt1r4SrA2KiAAiABCBWgL90Wn9ILKKInU8HmdNtRSWNkGiiKBdBNVXgui6JpfiAnkOCZDLFELQuyLM/Pz8rJ5Cfk259QiLzytJKUtz1nW29Z1lzS5bMJZPyoVw8/9VZRMenegGReK8Lq/w9DWW1uO6JFD/NPyXnuBqTq/3iCuFbd2sIUG/XhfATbP/VaVZ+A0YTXfnKTtT9WJDhjUXrogUnmpEKEE9pgEBZEYgRM0/BABy1hy3lbS1K5fUjVozEB2Cd003msSqEH4/AjfW9Qo/31/86rgkjr9ZaKtnlTiqaQBw36d3b97+y9//lk+/AIDkElFSiqkLmCUREgjVJL+ADAJIIkTCix3qf2awV8om27xSdMtg38BjT3wdiLG7u7t7/PL1n//85/3t/u3r13d3dw+3N+NpOB6PwzA8PDxoCsEQws3tjsIblYbjkF9eXr5+/coMt/d3h8NJGF5eXkRkv98jyePTF3h57vescdGYIcao4YtCCHkaY+xCQAAoPIlgjJBSZ6HlQphQkJm56C4C3bgMvcu7UluL8RJ+/pNztGZTa/68Sf+yVOnXdXQ4mztSe1FWaqF/Vy7rhP6VhoEAAK5cFr+JUlNUPGweDPvpOun+aXlkQ2jW4Gb7/usajU2zsOLq4BaVF2qb9bVs4vNP7CsuwcnOk3GNas/J12OxgXwT7Q3FIi5I0bXM5te9XgseqgaYhfteVeDdtJ43unNMZ1UICUnk7JRyXgPu6EtVAkTUpAW2uohI4KzXnhXFujRoGRhgk4ilbtgRVCbXDKmgnvECKIhEQZBAY9nc3aUplzLFQJAiRMI+pRRiohQJY8AQsEuBKIiwQJE8ElEijJFiDIgozCIl5wIJuhRVLWTUGLlF7XWOfTAiBkSKIaRAAVFKEY2BJohzEhZgCBKiBGAcM4wsJ8aXKf96HP79+fDr0+nTcToMMhWSPAlhkKQ3CROllHrQ4B81T7q6sGIMsZJXVflEg46mlKC6kurFUaMtEbEI6c0mRi2KphCqpW4YRqjiHBFBSNXG+UJqTWghMsfc66jzvIkI9KRAb7czs0ip04eI2HVdHkd1lTT9TXFO7kChjiXY+f04jpkNG5hXC17bt6utlgkUSVIX8qQLGy2mjqaUpNqjPrfLgaWgG9Q5JX2MhHiGqkZ/gd1up3c+Rc55OJi5ZNEnXKO91XbUv1QA5tZmDZNYs60AALOo0o48A6NBYne7XQo0juMwDKXGjxWZw8PoQvJLeKYEJd+VC3fDUBrJsWHZUzwvXUbXC7mZnbVAWnNJ/9YV2bOG/Eq1zV+NzDZ/3WTuTeXrLVwC2Br/Hsg3322gWleT1dQ3o/AwILbXURqZ17S2BqAR2GvA1tUuCc5msHCBJjfBW7eArpwD4y/kuiBJ5T/niyuIyMgWJ6Dpcd3OUmBf1LT9yalsbSjxgua52dp/vmyuWREhOMPTUP4aNkQUKboUEIERwgxgAKFZIBYhol3XxYA85TwcMudSSkAqpcjT03EqH3//jQAQBUkAWIRwCdj3j/qP4mcT7dbIxkxdOJhQioK6TTKM3d7edjGFEO7v7//y47u3b9++en3/+v4OBf77/+f/9euvv07T9NNPP6WUYpeIw83NDUIopUy7qes6ADocDsyQc356enp+4r6Hu7u7vu+HYTgMX6ffHmOMQhhjd3NzEyhpeIndbjfIkPOccwLmjVw5HJ9eDqdhGPRqDCNCtWnZQHC597vEzL8H4R6T1yusuzAK9K+bpQG2SNSmcrO7Ne/1+41LL1pNK81z+/X83I2padCLy80F1by4ZFmLFtYce82NPWbWGL6Ooua5lrUi3eChYdrXi8HsV5kflx2vrzHj5wW2CMm/1VRelyv4aYbmPSibiQCH8PUYryDB6sxjubxwmtm3HlNK+pMFZNbNM5e5C7J0r8s5EhHgM8zRsEkhBCRAlhpmRuzIk2zazoxPXBa4eTwI5iLI9QqjX2wGAa8iIpxxUWDOnUGkF5XmA8jCSIQEep8RgIGQAghPBJI6iqlPJJGgS7GPHUkIQYNBzuE0Q8RAMVLCuh0nmsdrLpQ2LlRRh5AZAAVRr0qwCq1AMaQAACBZWEAyEsYQY4xcJhRMEAIHGLkwDQVeGD+cht9P+Z+Px1+eXj4PPGSEnMJpQgggVIQFIVBIKZFmmU8RAACJarwWAGFmvXKAKLYwQ9CIl5OnLTOXMbOZxXyG92EYVIGxKTDXTarRMgE0SS2ISIyRaE6CJ+4a95INna/QaF/M2SucqEFEc7ZI7p7oFXg7y1eathOaaZoyz2cbpcw7ueDUFR0XyKziqkdrqHkXc35BjO7sE5BECoBQ5d+LxFZ6s8K8Ya2Lvk85swfeSF0hV2Pg2dk1dHppkFyp3FUAOlsFs5JJrDg8U2XlDn3fpy5Uf2AfqXVjH+m/NsylERuNOLnEv3TVNcvWM81qMl303oAhS3m8Bnj9oq/pecXm83VpxJU1vik/NgXGNwVb09omhJcAu16n6d23ucbP9aY25a4XRc2v6zPFBuH2wZbMFUQ1lObHshafAAu+BFenwEjuer/WSwghxEyZMCxCETowGg8F/9P5rLNBoEfCJmV6xwf/l3B7X4iIsOX7Clt+6VdQ9E0Ca0HampoaX6dtGQGYBc4XilV2EyKy7gcEBCAFAuAvXz5PwwFO/x5C2Hfp7u6OGTIPAqnv+xCBMtk5k4jwnBm13dLBH1QRr5c1NfpF4T94XAGAv1cpfosmi5b71InI3d1dDNhFenl54Ty+PH7tU/fp06fPn7+qt07qu9vb2xACk+R8PBwOzJxSf3d3F2MsAsz88vKix5VEECMBQAjhw5cviAhAt7e3XddhDCoXSinH4/Hx+WkYBnAGAaB+nGQc9YKQrvS67QZUl9FayBgAuEWkYm69U18v1TVXvIR/P7kepeYmA6vdfLP0cLnDbKBa8wFYUZE43WlT+myWZjnDklQuycFNnDSo8K017TRcy3NjXrk+rju6VKy1S0IWnULYtLnuYs3W1uUKqpvReaT58a4bbH7yCGyGsx77eiyexqSqRQ0vWnd3hcya0giR2ubGZkklgsiCI+krVUGAZSO6EM5yeX5SN6tnqPA8zGgABcQAKFBUQ7L5EBGxTafmnZxlW72kaM5v7upgqVnF/WmJB9SfNCxJJ1IN5HmeW9Rk4gIQmDlLHohSKSxpGA4hYpc0+zzTrM+ELnYaGVx36SIQYgwBu9gBAAVA0uiuMwZTiiEEAcg1MV0IgSgUYgQoxISIBMgckCjwdDyoz2QIUSQyc2HhMUNMFChCoIJjhsNUngW/ovzPl+dfDtM/n14+HXLJESWmgjwWvCGsREYphtQj4pgnRSxFVAdFEcnTNI5j3/d6guAitbDdeDSWbfY3ZlaTXXWUYo31ojkY6jAJAPRk0YxyiAhCeulcLxYqJM5HmUvJBQwGNDBkDsV5ju9iat7xeISqsBlVzOorga+pw1GXSIqzw5Xu4JgLLpNhGreilKZpEimIEQBY8jTVxQDGfWac+DUJAEQgNQqrnrhQvWVqfJaIRIplceQaMufl5UUtrub4Po8Cz16s3hCx282qoM3LbJwMosY8bYmrHVREuq7TOATTNE3DSW8Pzv6uMKNO7xDCporo1LZm4MZ91hxcfJmfzC/D5XKpKetuzTdhS7haa5u9XGe1l5pt+OkVodIAcL2y58iXwDO+J0sp8ieKsWi5ICC90Noc1Pq5r6zLvKl8HWAvsy+Bgcsd9rpBTzk2Zczsw6ave9yg9i0qQkQjaJVRRRhKnrgA6sEnA4AFNBYRorgehYK09LqZWZDnXf6VUlPttVhaaY+ba2dd1gP8zxSjW2vOFqn+7Mn7vIo1UDUI1jxj+qYgaOAvEUGUQAG4fP388cMvxx/vhoeHuzev3/39739HDFOWAvHu4d0/fvsYCIix1C2HbjAAZ87fDPxSucIr/hBCcKUKQktma9fEDZalMuXh/r7v4vHlCRFTSi8vz++ffmPmn376YbfbTSV/+fD169evKSXJpCHf9IRXpWFI3adPn0op+xuIAU+n09PT0263i5H+9V//T4fD4eXlJcZ40+9u7h6UAg+HQwiRKHz++uXx8VFE7u7u7u8fphyIuO/jxEkmFVKIiBrdXR2g8CrfaAjeI+E7GfIa1VeeG5fb5Jnr2WkgsR1II3TsoNkGtebVV9ipNdV43DQttD+5NdVg2FzKmxZsn8xL31EvkrzQ8bA1YDTCAhake01ZWj9fk0eDmevtwAo55w8VVHZXmYwGYLUw1wqqh6SRjA0/v1Sw8n8A0PNBg8TIaU2Qnjf+6bJuARdlDnLWDEHcMWVwmXJ1t68JIdRxUaDYMQsAhkAACHNYewDgaAuDmYsAIBPMe2INiE80e5EwMyALFP2qe1y9yzSbKeAsC+0OW8lzGAwF3VsO1dilHvBQV9ecOlsjjoCIiMZv6WJkZi4SYkCQPJUDjSL4RKfbXd/vO0oxBYgB+phSiFwYI2m/JJQ6CoRSpoGPzLzb7bqu41zsrIuI1LCDzolIRDjIzW6fuljGgUtJiRIh8NT1MSAiMAggEAJEQIQw5Ry6PhfImSekz6X8+9fH9+P4f/zy2+cMzxkBdpGSjChTSSVwQt3Zp5R2aRdj1BQUmluWiFQoQCC9auiUCk3iJ6oYmhNmiOfsvUospcwWuWr4Ej011Ab9qPu+zxOXUkqWGOdwLMsSbRLn44CIGsCz6zqvIIlYxBQspahep1NsJwF+ueacUx99DE/7kHMOCKnvETBX83VhzjkHIgvrota8w8sTUbXXSY4YdYUw62JAhSfnQetPU6Ya1AcRU+pTF2Zj43k1nq9oHo9HPaQxZ1FFXc65lDlThS0BNbRK9ak2SouRSimqlptFEYD7vheBcRwq3yERTCnd9DchBN0lsF4dPB3VfRREdJ3a+hKZM917nqvc65IAONdx/NeeGH70B8Z5m2J8R5aHWLAUP2u5aJsJqUduXJ2HwQG5KX7WDNe33EhN2NqIN+14SW+fvf6v1TYvzVv769Y07xA7m7O2dvZYXro/XZoaU/w8bM2+x+PZn6lrsSFY441I2xyX7ket2FseXZubgPXXSw/F5XtYk4eCbVGUzPfEgJGzp0lFRdVSGpet4sh15p6BEkaisJTiLHoWKQAAgmLteJx74G12GlOGoYvrWaoptNqONYgMQuejMRtdKUUc0s69Ly1mm/tFP0eeEnxhF6l/seJ44Y9kfW3OqU4NIKIwiMU3L4ISYsfMh5eXt28f8vEFRH7+6S83uzi9/H/vbvbTNH39/OXm5ibtboVJxdM0zf7+hZk0q2rFuMe8jdHzKxOL7Cwk4op/i6sQISJ2665B9SbFeh7lmyWiknMIAea7HTBN0+7uFgAC0el0IOBXr14Nx+MwDPv9PoTA5abrupfj4fn5+eH+dc4ZBHPOp9NpGAZE1KRN45ip8DAMt7e39/f3KrX7vh/H8ePvn0+Hg66RnB/HMf/rv4aQUiklpa7rUINslykL0E8//uXt27eZ4//4n/9OGKdpCqFngVLKNE1dH1FYA5XX7LIooPF+Fvj02F7zkDUH1qJXSDxLRJdutFnXUBMXr1kxLW8eiXOG8hRifz3fXksiv1Ksd+8c4Vtulr8O1vbfCwIT3uyCmSOdU9eKd+1ziuV6Fa+J0zfuQVK507RjaGyKXy9XyN6XdTse25vobcD28mi9HoUXEq1p1vflqcXq29f1wWtl0fNXkyxrJPsniPNeDhx7t9asO6vfCCl0osFLMdv2SBWCnorms7AlevUXDeRh22xdiXbRqenUMGOYJyK1GWiASZXykYLBcw4OiYjIAgiamH6xDus+RkQozNf6DBc6khij1CvI5DxFjaw9oM38GWtARAwJAECgZgCeYSsZBIQIQaKgCHDJMGA+QhYiOAUIGGOKIQhRQdQ87jpX4RwgJAAjABKgRbvRoqnqdE4LgOq6IQSJxCRTGVkyEVOIhXMp09tXrzlzzrmMBUCKqDG3gNAw8WHKL1yOgX59Pvy/f//4j8enz5OMkAB7yTScspwkMKX9zdPwJIyEESFkLpCr52c425R0JpkBUTSakG15bFN+qIIh8jklIGj+ccdM1cRHLuq6WQh1XpRxl5qeq2ETIkXVIVsAZqicpkk1ExHRY1FTCM+USoKCuBLYAKzundZpjHG/39/d3f36z19c77pOyGjJlrpeB7UrE16d0xerBywwZ8/ADcmqvOkhhUCZj0LOFktTJrO62jYrXyfFNHav/hkHqUumMJ8xzKwp5kMpmuS6MNvVphgopZS61M0TXc2JBthSAAvIgm2BO1GbP1e9Dhy/hrqfnhmHiSKNadR4h+pP6j0ubSNrFWPdl01K87wpS8L7xgG/stozG7nqQ3vlecNYbexXZAYsjzDWXdgYr49iExUGRlPHP9wEzLAnW1r0nyu+d1kq1ZeG1sy7EUOzsUAn2nl5eA/1wMgLC09U17HqCXt+AgD17BKQRTadXc8CHi4rz5vjXRO2RxfWfepMVSWDCyZu/gLr+W0W8mbXftRXasJqVa4XS4PbTfoXAEBGtQSCuuCTiAjIMAxd14UYhbnr4l/+8tO//svfUsTjlxdEyeP05cuX5+cD0peB6T9++2xQ6W1/AEChwpNlmv6e4pF8heyvkOt3FkT0p2AmhhCRWXI+Z8Hd77qbm10IOA0D5xwT7dIOUYZheP/b708vz8zcdR0APB2OMUYeym63S6lnZr1XkVLq97u///3vfd/vdjuAOc/1NE39fv/Lf/zz48evAHB72weK79+/n6Yp9bs3b95UUMP9/SsAKDn/+uv72/u301SYRRgLFKmCjyWryyhopllGQGFpWfd3Imf9UPct6JRAz1obXgpb0uE6GH9iThve3miVHp41GOvleWWlwIog/eK6zlu+E/metW62Rlv5DzeLVJ3fADP8yIVgPH+oNPi8jq5LgvWSXN5EgjUFAJY77Q9B6+Gx5x45sJxT/+ESXcGWHGwG4sEAAN3/rqXJld1Ow+EREeqRfbCE5DUx/WzwOQ9VBEBM3zvjwk0AEWmcFY8R+7A+vAlOA/QHQtYgVE6qTzTFBTOIqLF2HoOI6FyKACKBQGGADAeUPBZMOXYh9V1EEgpF8PZmBwVAOIYQkAkLSAkYSoXNjDmw1M41jIcIhBBDwJAiA3ApABJioEga8vH5cBTBACGERLGPQjlznopMIpKesvzj+PLr6fjvXx9/fXx6HjlDQggkKBnLlKeJCyamMpWMiCEGDPNBKQYyMkJEBgjuvEoDZnpvJR2OJi5HRMCA7jJuDBHgrJ+bxtJ1nZ99rHn/UuzQpVP3OqFNq11d86Q28awQhjjfcC3uymIkYAAR6Ha74LQ7nl0lZRZL7sQ9hLOJsu6W1BUZLco21fXQd8mbXyrdnpNhonNmrjoRImLfdb5HhWHKk2Gs1OQQWiHnSQT98A17Sp/WjnatJzGOO2Q9qbHDIUQJoaNqqp04x5h04DF2XdqllAKE0+mUcy48+QPR9a76rBouTzpF5qj6sBK9zdeGWctyg97wKZyTZ3o+sDj/W8B2+Yx2Xay7hsyu128qy4VN4RW5aL00/Kph6+uBNMj8ZkebNdf11wK4QSwsR70JxubD/3zxMF/qwoN0XpiX4Wlo47rA2xSoiAjLKG3quzj/5NCpHiizxwIi6hU4nVm7IbYC1ZDvJ8KBx4iz+w0AoPq3zDcTFq8DCCJQCLIMaWO0x9IihIga7dbjFrbm4hIF4irFHFTleYOqcbEuDLE6ttmgWi2EAARCAJxSQinTNASCV3f3Dw93kvPu7dtAdDqdypTHcSycgXa73Y0uVcKAFIksAexGksMVws9fm+m4QmZnRF1eoLK1c3VPKsKNFdddjfk7pJRCwGE4BoRSSiDoUpen8cOHD7/++uuXL88A8Pr1w2538+Xp8dPH435P/+2vf3t4eOi6zu7zE1Hqu77vVbKUMuUsRJBS2N/07979eDiccs53dw+vXr0iouPxuNvtxnGs57NYfUloOL08fn0+HgdbNSKiNgcpxXIzi+26FhFHv6tcWBRgw2lW9Fk8OTyLUwMaxigrnwhwxH+d7TelYZue519ZNVbTrzVcbsa2cpOA4cUL02YgfozrrhuENETueUjT+3WWuznYhm9/J0o3SzNN139qeL59zjWd2IKxryjkO8ZybdQNVOv217/61jzxr7vY7NGf/a3f9VMsIojCfI5jv4mrBsLG8kw0ZwXgencMAPI4aVryWSGEhSSQc0zRyumUK2NVLiuKN+DglYOT/2lhuapBOGzw8yZbb3mgiLEnEVGrIwIgaqRwCiRIRXjETgBToTAJDgWBEENPgSlJGSNQjDEFEB4BOCBSSnbv0TvaxdjVgcye9MxQiiBDAQFAAgIMgAEIMMFQhCgWiIidcBAOp2kaB9nx7jmX/3g8/NvXr78Mx9+H00smTLvpZYIxY2aCSEAYaIIySjnPE6GoB2o1xioazzZS1UwkgxABIJJu/JmZhSkAEmoklRR7O42LxFWRriIWEeeoRGiasFnD1OUAq35oJ9beKqUPjXBt+kopgIyYrEER0XyEuqRLKX1VCNXqbUZFZpYa9E97GYbhcAgqGlUT1gyYREQUkCilRIgKjMFwtg2qNdLpgVB3d+iiCqkHjpHlPOAsIc4vKti2dE3xa9iQ3zB5nmUmXIDzMYrNQr17eS5E1HWdGipDSHqEk3NWAW9pDHVzwMy5uvbNYmipRzUspmFYS17TSpFGTm+WefiLytuy3LOC5sP1xv3XK6/YyYV/S1YnmusK7VgubwFliVv/a/PWJbF05ev6lbVIWD9fv9LUvyKMr5fNWVtjD90OabOdTfG5hsqeeIu6H1HjsrtsoZIugCqDcD4YaSuLO81EMGllkOiINjy+rEFjNX8IpZcqI6KsXHxxa6d4iT6vl0t1/LrYpHk/WXKBqpXn6MmtYx/IiBE7jcZc8lSEKUDJU5nG+/0tEcZQ/UEYJwkZn4RRGBmZcN6MIGLEeCFb8cWRXqK3NX/7oyui7QvPn7Gep+s82n0ZIrq9u6GAkXC360DKNE2Hw+HDhw/v3z+/ft397W//8sMPPwDF07/9G9Hj7e3d2zfv7u/v9dJ+TEHvE2bWS+YT8+wtBgAqvG53D29f/fj08ng8DLe3+aeffnp4eNDeX07HaZrGMR+Px8PhIIw559CFaSwiGAIJpcJ1F4SzxzWcNV3C2UPrXPBbuoFH75oXWVnP0SUe0iyE9SpYg+dfvAQnu8tgHhLf3TfhvALG5teqY7dwrt9ai+/Nlr3I20SgdWSnupdga0qzwbPt3CWL1jfLJvtaY9U+rBlgU5qJbobWfL5OFf7hukdj/use10Nr9hvrOZVVTM1m4pSTbMKwJhU7vzALxLo7w/M8iuoBas/9kM8uo8CsN8MJFthhZoaaKg0C8yTu0Mj7aGFNam/AsUv8DY4obQKk6h4GepapaqQWfB+ZWbMAMjMjE2EQ3ffDUYg5YAEZuchx1jB34eVwIi67GHaYQiCgSFBCQAx9CLPaoNqypWufMeKsUgCAQogIiITELEMBAgRKp3FEQQYcxpwzM4TMkAsNh+HXLx//+2+/fBiPcrsvtJvymF/G4WWMORBQjAAYCmIhAAyBAiIKAi1XXQhBNMlxJSBVVVSN8Z6fqi91XScWWpOSLWOUSQOoiPO2r6PW0C/Fk07Oc1RSb8WyiRbn1OTX4bxpi5jSTlUs1WQQMYT5/oymSbB5l6qn5ZxLyQBQkIxS9fkwDHpFMJcMRAAYY+z7PoQ01rAuenlyAKmRb2awpQ52Dn5TiiqSiBExpJT0NCRnTikggvZ4Zq8QCk/eURuWseObtUdEpijKMvMEV49T/66IIIqvrNMqBLvdrt4EmCc9D9k2AQbD7KFUCiIWnQ4B5FnlhcuSxvMvPx0N69FCznyx5s0VV9uHar5rf/SzicPtlr9D0fLtrxv//ha0kL9/6/h1w8Q3hdDmuK4IEl8Hl1PWVF6/4vG5CcOlLcV3FlrfQXV62p8ojXxqpCA4rPr6JrcuKfa4UggbN+Y1GAJz+kEzDRIRQDEY7ADUr4sG1Et4aKSsffVBevzUCM/Rue2aivJkIlr3jIiy/Nq0BhdW/RVsnFsWG/p3qZpa9KiL6j4DlakBAsA4jigcQ+hS7GJCxD4FhpIzl5xvYgoUc5meHl9++/UDAIkwCwgUZiHQ5E7E37JQNQu8WUFWZ5PziFiinov4aRbm+oMeYWP1LYKz7yjnnB8fv3A+dV2HiM+PT09fvupZ508/3f30088///xzSv3hNFTvDRzHUY0hIsJFJr2KUXKMkTkbTyilqC365ubu55/jzePNr+9/+/Txi96zSCkycx9T3/eqgr68vPR9f3//kGUXwgsAyHxlBgtL4YkIxDgTOmRW9Gwyru+ZEf1rl7XMuQadjdrY7CbaPc7P+1XX/kx4F7bRTWUt7O7QwhYjta4vcexL41XTxZWtgvXVNLvm2NcR7lnNJYZ/SXxcafP7J/o7G7zeS8vzLxwK2Ge3fWrlr0kWWE6ouC0NXHAJuDTLlzBxRTQ0HxqO4V80eqblZdqmZd+mxgReT7cRczOQNY1pbEXEOfkCIoJDYzy/MCumwrLIKMDMgtVGCcgsZkL01TyuvZ1QlqjZxKM9R0SGDCKIAbCgpiOSIggiCMAF1LOeMAACSJEMVIh4kiJ5mEz5Jui6HimEVAEhlDm3hLEe80sEgGmaUDPCYexSdw5VEuc0D8yZyzRlQQTB8FJALYSHws+n6TCOh9N0Oo2/PD8/HY+fX4YMIU2JWfi5lKFESF2XYugKyDDl0zRBDN2uDxAMJ2Q3RAEQgJwvK1TrXIrBaN3wzzUKn2pihOdptSHbK1pCCADiQxIpGMfjESqH1bCZKp+sNXQF3MIjotSFvu99GBXEc0gS7QVKEaed+oMK/apxU0yr1AmNMabYCRKG0Pd9jB1XfVLmD+fFo/JS7wGKsMKDOIemEEaiqPNbigzDoOIq59lubpo2l/bmcRVmi/VmwM/x1peBVU2Ke2+ZysvIFDwdbNd1QhBCVA/SUub8FTxxnTaoK/Higmok67lHre9cFDxIdkcLVvzXN3ipuB7bpd2w7LXo/Wab9vWKoPKc0TBszz2W7ImBBEue65lyQ+cNML6jTWCs2hoPl4bp4bzELdfV1pB8D4avl4aKNoG88sQ30ogoWSl4HkV+aowB6pmdF3tQ2c5aiDbroplTX8tBIDVey+KwUhOj2wv2kx3ewYqEYEVdAKC8BdyC0vpdihhTSslOzQ6HQ12M7bqTrQ2ToaKZArm6t/M/+V7W84WIFzZRWtkrXaECjSIwTVMKEon6lFIKKByQcpEUEwAdT+OnT+8/fv76MpTfPz1ViQciKMICQBIAuWUoF8rm6l6vXFspcJlifU1Y0ZV9kCWdUE35ZYSnMRuenj+PpzRNU0DKOZdp2u12b968effuXdftRPB4OI25pNQHSk/Ph8+fv8YY7+/vleZ5YpaiDiN25MWcS5FSqIDsu77r+q7rnp5ePn7+/Zd//vb23fDu3RtdOBRmd5uU0rt3737++ed//vqcUgdCdWURESIhcxYfKnYupN5ShtvrRNWgDhyZXcG5F4uXXoc1TW4dAvrK3y9i1vz50nD8Q3Dk5CFHRKR2v968eEUIXofcfvJjbyh8k/XZXnGzuzXqmijTtk/7z4iVNTfzcG5yMC+FrY7H3lpINUelnvmLnC3el5APqynYZIkeeL8oTEI1dGt7raYdX9xmeHtHoQu2gRyXfs7r7cF6jGaZm6WqEzTnkETADMyArIoTukih4tpyI9keoTdD+dUCSwOLkZf/HEJAEkZh1gR9ghhk3soCY8UUCWq0QykcdxlgEJaJRfhwkgBHHqZwfx/6LjNPORNgihwIEASr+Uvh0WguIBxDhzXEiKKMYiAi0fjZICIyFSiFJ2HGInE3Qng6TR++HD4+Hr4epq9Ph6eXw8ebFACp6wMTH2Q6jmGCLu0wdUxYCCYu45QZShAgLiwRAJEq9SBiCHHzDIyEApjiZ7ilGk7Dnks166HetnT5Fa3BaTrfheOa8yDnXPLcpk0lVIXB92j8BW2XhuyxV8lX7DqiOogGmFeMPqT5PuS8okxFxHqpr5QSY9zd7Lu0G3PJlbS6rjscDpYjpCE2Fs2CyDWJBhk9FxbmrLFOp6nkzHWkYgu7ialjz3VEAAsbmlPaz0F6DDMpRb+aAKCeQbCquyJic9p1HaOUwhqUdZpKnlhEIkV07DKEEBCISLMdmkpJdWEZf+F6mwUReeXvISvx6dkcOIbVvCV1Y2pIcKznosiBq4JwXRq+3EiLpnhVAVewNeNaDMQ1jm774tu5DqRnwQbweoy+5RXSNkBqWmsmqEEpwDdkwJUh/CfHtQbbl/UQ8OqGck1+m69s0oYuRxAAPKdZ0yYMe5q4xt7iGiFKRL0fV5BvaQU2iZfG4kftK9hhnPmh3PQ7oWD8Vm2DfmgNEV6Zjk1gLq01z9m+Zz1ukhmCcnNCOCuqoqhEZYYkUkSKujxoBC0WHIf89fHrL7++f3k57O/evvvxL59f/gMRCCLMmpUKpgJ0DbY1DaDb/10avl9KF5Xdq/xHaappbcZkJQz11vnhzcN+v395eUGBu7u7VF14AGAccwgBBHmcEEK320/T9Pvvv4vINJWbm92cHQoYA+33ewA9FgTmWH2FUCYEgFJ4mqbhOCCKQBmG41/+8hciikgnka6LKd13XXc8HktZ2OjqXz+0tVr4h0tDh7ZYsIbFhws6wBrzHr2Xam6ugkuvaDHLzBra6y/CBW7QjHeTCcsWK17jYROATYR4OeVX6PeP6Ppzv5SMXWzW/2a5Mn2bv/qu1/D4t76HhBrmsNlX83kTq7LczKzpfN2yFtv+2V+sO17aura6iX99w8N51tHcxqAZ8pq6GgrxZBnPLyAikYAQznYqZ81sD948IWrrMUaCOVC4+R8TkYWx9nOJWxsvZRYYimQBBg1oCVAA9MCRZxWUJISAEbXnkPYAmSEX5CKYWY6nUxkQxpHvbnG/4y7ALmnWeELR4KKa3UH4HGSl73uFRPWiqXBkiTFOx5FFBAoiFOEx51FKAZjk9PWUf/n49Z8fn15GxrQH6qC/Pd0gjgWHqR8KicRJsBCFUIRehuFYpowlhJD6LiGQ8KTeQUAMUERIBF2McjDOFUCtbrvdTuqRg+4qFP8aQFZEM9exmYP7NKPX7Glq7zqdTgCkFjnTBnPOfXer7VTjFWkuPq9aNDE8Z2sbz3mTRKSU0nWdhnE3jUX/TtNEjUg+67ShoSttre/7+/v7vts/H47PczDVjIjTNJ2OR4PHFDNEZBc3SX/VKGdS1TwNtDOOmUJavcsqbo1iTYICQErnrYBfXVSjjFq2iXmBxVlzNl2d5nwtQc2AunawmhaJcJrm9BI5c86ssU/1icazDiGkMOs/s1OrGjYBEc9xuj3/IiINWheXYbhtaJfiL3v+MiMERLY2UtrRpTw5a/8c+wm2yvq553fr4inKhsbuosgl3t006zcrtrjW/j8Np14DvGbE9vybQzbxsGaSVmEdTh1WeF4P9vvL+vQdVwLye7rwmDeceCQ0CLF2/FGrTQdWIQortIvUoGhKpL7BlUpmvWjQ5JlBuRGch7O1X/E8qhmvYcmjS1k61lQ0qRbdsk8s1RUgq8fgTLe4gGTG5EpXvARJO5Bl8Tytqe8n6Mqk6wjnk0EACyojMB+hKqZ5mgDCruu6rpN8il33+OXLr7/+cnw5lFLevP3h3Y9/5dD/9//xC5EIEIageQ+YmYtAd5GuNofmaQOWxHMJRX+ouE4rZtwGnYhYEKsP8M3NjQi/efP67u52Gseu6yTPaB/HcRimENLpNPz6y/v379+/HE+73W4cxqenJ0Qket11XYxxyqOepY7jSEQh4JxfKgQiurm5Y84vLy/DMBLh3d0dIP/+++8551evXr169Wq32/3www8hpHEc379/fxx6S1UvRKgeulCM/80Ti6ry4iaSrzOx9WIxJlCv6pznwttzYHXwAasFxX/k7u4lIgF3pA4rTnKJxa0b33xir39PO1fYZrP0mg8eLV46XDrz5Qt3CC/17k8N9JX/qhW0Lg1IaxpoBKJ/6xIHboA3IhShGvdOX8T6mRBBhN1zfXFD1oDbL32nHPTeWLCi57U0bIin/rUwFgvaJpcou4HBD98DPF+hcoH0tVokCVx4ziVGgJg0tBSSHuQIAERCENaoZwWRRBAAgUldSTUkW56DWyBz0L04EjIPk5ggB5e+Q9M8mJjXE8Scs3BEiDEAoIgURgAsgDJxIYkQogiVLJA5xC6EPpcnRJQAEqIQjoihcJRyivuvz3w/5Psdvp7wfor7GGMIN92HEEIsMVACgiIZAXY3+5wziJQsKpJFZg+i/x5/DCHEuAOkLDSW8vwyHIeRAT9+/vTpSxB8HbqAJFMpQuXh63Q8Hk+n0yhy6nb9fU8Ui8jLy1cG6GPYh10pZRonjF3c9cAHQIrUpapIIKIwMnNMdrEkMzOSYAxdOJv+ueRxmLPJ77t+miYuDJQRkYALFwEolJhHDa25tvhJjZvCBRApxT6XEwDwdNYeU0oBMUQUYRZGwBociIu6REJMKRGpk4wl4pv3rHpAcBYJkMZxLCWHEIigFNELiV3XZ4WSAgBMhY/DGLteCCeWp+fDKU05Z84jUE5BELALUoIwFymZQ0dILFlKjJQIAmsQl4wTC1BAoDyJ6n3MchoHIgqJEM5BLBCD3pEUKKVe/CNSC+p8mTDnLgQUkZzn6PAhzOa7ruvUY9aijIoIFxCRlJKeOGhG+67rVMFDiESIQAgIUoZTmcqzqe4kmDoE4HF6RkSWCQru+r6PqZQyDWMS1FNkncTKZYRrhnqbYhRIFLoQM8/pNCwYj9KSKdVmcZ0JQ0RDuQIiq7WZMMY0liwgxZwcSDcQgKHeNeUm4uuZf7E49bumAtdYkIg423DWjF7DJXN7eGylYXm4vGm9WdmLUn3RDj4azcryBNhbXhK0G+XL4aTXuiUsxFV74mbc38sMe96IjStlXdME51qGydIl0r+4BhVWuG3qW0e8vIfZDN8EpEfUWqI3ndZFSiIinEUACYhCjCQiIWApgIgpzTvRcZjypCscEELERBQQYsRQ5ngzBQEYZ8JOFEvNA0E1JxsCAjOICGewNC0sIkKYCBEEJM9HcupporkEur63BGsikos8vRxVFdRFVIoUJoEYQtSkcCJCc64eFhEIncewVG222a4Z/UylQK1gWJ0PGc9pFgXmHdC2H6nFJV+QkwgBZEwkjJARAJETgiAKhJKh6zrJJY8lvtq/nJ5fvQrD9Dgc0v/t//p/L9Pwt59/fvv27evXb7qb+98/PYHk0ylDlxE6iaEL/TiOEhAu3MWCrd2YHwsAyDm2EOWiBIal1gFADJ3w6EkUttavIRMAQBIgixQATcssCARCMaRpwjLxbndbeHh+eX54uLl72L/e3Y9PJeccIu5T5MDDMIDkgPTm1dvDy/Ef//7P//j3/xCBv/745ocffhin4zAM03T48Pvp+eXp5uZmt9slovE0xJCIiAtH6pEIGGSQU/jt5eXl6fnr6fTl4fXuX//1548fPwpPzy9PAgwodw8Pu10vIgDh3bvXWXb9nob/mb88n6ZpyBBi7IahpC4SCM3eYCKgnlFimTtt1Xue2UwHbDEEcbqN5wBaXwOSe0puiLn54PkAOCamlhbPtTyXXpOKvejBto4adWjNtXyzTQvqdAAAdqt2llwABLNbevOKbB3k6a81GF6rCDViq8F2M0eXZo2W/vaujoVKBhEWsdOouihEd6F6gh8AwC4f6c8Goc1Ig09zYYPlFTO2q8gsDMJSb7jMhxN1QgHaJx5FcpbX6FPaMhMwcIHKtFX7CygEIiIBwR90I+KYi3f4AscfNkmd3dhFRO+1a0Z1H6dHv1K9oOSbCiEgRjXwKMvyvzJzPRNEEchTTeCs6xR0v6eHL5GICk/N60LAzJQiAIgFcSwiIkSuY6L5hpIUZi5ENNNxpYYiQgAlkg6dcE5IyCAMLjw0zUhlAMQzBRhRespo3GMUW0sCBRFFSUA7jQNRTxJEBGYB0FxSJYAACHMp+Ut+3kfMaRqncDgcbgL1MaWAu/0xpdR1XQgTEcXYpSQhZN2XlsKlzBnh5s3xfSgC0+k0DPnpZXh6fnl+GY7DlPp+zBkDCfM4jhNPIoIkGjfl5uaGMAKhiAzDMJUCMO/UBQrMSeGRaw4iI7WZqWmewACVx6ksz0ZV5nRkiPWxXqTmqZeatVw5rWcKxoK5+osKo4ikbt45oTNnEVFKiatvlefg3krJLsaaLcI5PGZdtDRnC1SVQMyePDcCZL3YwpumwSLf6K9UvUmNC9him6bJ7n9qsxQDAIzjaK/PC7UW604jxYuIzIlBuaFYItKxUC1Rg6gS9X1vF4GMDdm8IAkgAyIFQAQKgEVIYxVV/siSC+fT6QRVmSEMonTOknOOMQaMVKOwcjXGiojGkpk5oAAiBvVDcKzNPqA7nbGvdlNUGzGClFXGBcOYn/TVtcoNRQJWRbZsEeg4wLrgSofZbPlPlzUAlyBZ1/ect5ETzdemES8vfWUTaU1l2NozNXJxDcZ/pniQmidwQYH8c12v8YCr8+kGRc0HuKQq1GUVQsBsQZ7OJ+hV0LWbJzP7I4E5aVvQLFjuGqchY6AYYwyd3gpWzqALWURKmcZxHPM0B4+R5POaaiaM5gJPHRQqY1qjyxZj8xycodXvOGmZ3fuPlu+c2ZxzIOy6Luf8j3/84/HzL6eXL8chiMjf//73f/nb3/qUUr9jSjGG3W7XjQemkEUKlwLz3ojlvHH8PuDmf685g35PM0sdYEH2iIhU+xJVpXPOiESIekLRdZGITqfTNO26Pu12u8KD7g1EZBxHkPD8/Pzh/e8fPnxAhJ9+eve3v/3t7v72eHxRqXc4HJ6fn9+/f9Y4au/evVP54jdUwnI6HUqZRIogxEi3t/uUfhrH1799eJ/z+Pvv75+Pz69fv76/v9/tO+YI9PD18clujDNzgYVp15H0f9pzdFmM2/vZXB94Nfj/X1Suc3VYLe3mIO88BatFZ3/XgmzNzJu31mJiXb95sikdmr48I/W8Yg1P5WMXnM/PmwcvoRaZBfRxw58vibztXlalEfd+Lrzw3dx16N5GHxKRWQBlWdM/Ofd1QYx+k3jaSV+y37NYr0jw2PNPmodrjDVS0kZt764pqqEQ/woRRa/1Ii5HgoA1t4S+X5bNefcMT77N/K3IpV0tm0TvcrkEASBi1NuMgkBOtykkIkyMgihBUHIpnMsoXFIqMp1yTsAJoQsUQ9hz3/ehK0G1wb7vo0QQ01J020F6I7wUGPOXnPPpOD0fjoeXYRizYCIKmY+ZYZhOp3HIeYIAMUa946eA5cJ5yCIiGGKM05Qt3/e8vUAUmVP2+XSZREQYAUBgPvyuTo+zZ5FV09Y0yOlZ1QGQqgeKSM5n/Uo3Q376xB3LCRQWnqbzSRvWQwWb6GYphhCYS0Oy+lUb10ihzngFSDMklobBL2mq7tS2CYsxTtOgo9YY3HVcs3uV2ZYVqlJyxW0N1ioLvIno+cWCGqtuacdR522W7UQVLV0XTPFTPbmmiJijE5WaON7eUgSqz6phleply+KKzZQOP1DkOV6ZAEDXdZFSybnkcxezOm3LSua5oaoQNhzEizHPT1XN9ot9nl8/rYS6Bn2bnhGreLDn6/VuX/3DRm7BBangm5ILalIDD6yGv/nEF3Keit+svPniujTS0bO+piNjlX4pbTJJa8TPJlzYczQ4+Z7STDE4btAIKlgiH74PabQVDGbdVAMDXDgOWDfVyE57xRvHGn6lypi1baM2BojUcG8R55ms/OHu7q7ruv1+r4njRCRzYS6SxU7HxqyhlQszBzp7NZdSGET59DQNBobPDLw+nfFjb0hF5BtRNP/XFdRbJ6iJE8bff//9MUzTifv97u2bVw8PD8q0KZZc5rO2uktALKIWWjsvW7e/ZmvzhxrPQzY2eAAAJAvf4Cvwb/bFwkjVeAIACMKi1BNjJEBmDqhX/fl0Oh2PQ0rp7n6PFMfpqA7DQ5lSSs/Ph69fvz49DX0Pr9+8evX6IQREvAEAC6E0DMM0TTmPX758ijGm1OsppKbhBYCbm5sQwuFwIJpD1Op5NxA+Pj5+/fr1+eujCpfb29uUUt8nZlYzQAhBaoDreZg4/72O7SsLvOHhckEv8p8vTeVmudT1NznPJXmx2YKJGC8i12wHnXJyaaSXhrMJ8JXRNUt7DYyv6WE2vn2pl0aCmABaQwtnbPh2ROoVhgrJGUi/F2rW1PdIWIPfT4RvZPM5uCiyxr2JiFb3LOzvprSVparmO4XV5F7kwB6BjdwXkQsY8LQHSwm+2VRT4RLR8taVRa0/b7n7vtddqUBR75pASBRzzkiaAz4gnmNdTmLwYZ14xHOY3YXRT+WrzegVpnB+chbIoEGuzoNBAJ8iSpUrUfMUIGi8EiwajDQEBCwMHWBASIhJKAh++Mq7ncRYRHIIpe+BaCqljGPNMVCNMKpTxe4p5zyNJWeeCohgiAKEzDCVPAzTOI1Emuedcs7qqjRORdUVIkp9TCllZr2+lcucHELrV/PaYo91djZw65lryAGuLoUAoFcL9Ka4R4u2qSeX3pWCl9GWoZ5/S80n4U351lpxmej98hORnEe/Qyo1aaGmvCwuoQLN8Qf0euSsLkodvNKbrUy7bKM9EtVM9EQqLIdhGMfRyJJrdBZKoHVmmZei5KyhRHFBWmf6FMOXW/ghBOYzOzB74H5/q569+orp2NM0NiZc3WB23c5mxBaezHFNcylFRb7tKTVAuVUrOte5zMfDGgeInemyLBQ5WyyNoGXQKBugLpreBis12JR1ahONiOLCi83YEGERqpd69dKJnFOBQ0M/nlrWkqapdr2CrwZLfuc/b0oID8mV4hWVhkcZHnxf3u3W11xbzDxs4PihR45VWM+grMSPFVidZDdo+eaoN4u1vO56LQibMX5Pj9ayVfYcpmm8YTtWgarl7YzAakUWmd3+G2rU+8MiojoDCWhMLxQQ4JmABRoLGrPKn/kGhF55IDpzBi2v7h/0AwDknIfhdDwOGiNKj6XMJyoghEAgrAfvrLllmebuF2cfBKDp7dmSHGxiezXSedvRoO7P0cNm+5eKHt4p3iBA13Wv7m7oYby9+6Hv+8Ph8OXTpz6l3c3NJPhyLON4yrlgCkSkIp2ZS6n+6iswGnZnHxBmbyWfoOPKaJvFe2XU2iMzq1csogjMWS2B0XI7AQgLszBhSJGen5+naRB4tdvPIcRCCDFK6kKMsd91t3cUYxQp03Qk6m1R6y0DEVER//j4yDwHG1OHlK7rQgjHQ3k5HL5+/YoIKQWREmPa3exTH3e7brfrvnz58vLy9PLytNvtdrvd/jY/PT0xM1EEohCAZrlb7PRQHDI97hpOdQVXl7jclfKnadL36xfClQZxZfFbg9EssTWTb168In3W2Ph+CdXU9LJvDXzTiO+XV7H3TPrDhkb0bYYsIuBODnjlDLwWYb6RBl2wtRKNljYX6Rryc31ZuNouALswF+vRbeK2GU4zHZvVFPRSM9t5qQ3L+fXw2PMGCR5CL/r9E3+/kQL4jvy2xHdHlrfcLEvO43mJgmonXMPnx+yhtMpcs3hfKduoxJrvdt5h6qkeGSYFBKEAIDKq14YQigRBYYgSAAKemIcpE5UUqQ8hACHLy1i6knU/j5hjLGoPRMRxVF+gEkj92hEg3sqYcxGBkPou0nGcDi+HqXDf9xRC6rrYBUpqAxyY+en5oMhR7YWBmHkYBr2Mh4hUZhOQ6hWst1BodrPUKS9cQgiqS5sRKedRFS1T22R2DZ0dCO1ERGQ+ZiaicRxNgWEXGAaXLtGi57U1UTs5z+/1gvQWrRDQ3DXFqY6lBiXSBv2dOiICIDPZzaQiWTcCXrNVfamU2dBnVkFEVJ9Yo2apG7WYElsur3hOF2F0a8bBWcCf77PJOaTvfKn1HHMIURQ5XbezKDXaEddgOdqgVtMIoopSVRqtKcNSmXMw1rtGagpGZ/oTdT4XsMg9uajrORHxyndCCqtjfYAtFxfY4Fx+CTeccYYKsdRXqEbmN16v78hsmTw3BUs+i39KqdiEU6Bl2d9k4pe6uFIuSTLrxbP1TQ62+RMs9T38g6qaH2AjV8Cd8zX92vno93cEW1i9jt71i3+iowZOj0N/VLEcddRwJgAAoPnwHH2e+zG01Gi0Ahhm3+8QAtIEIqhqJAqcL6uIMXMBNsueiMQY9/t+t9v1fW8O/MAyDMPj46N6RhjnqW8x6u16GyCS/kuAhFAClyKcGYEF3GE/zv/h1vqqqPhebe1PlD/anWhgPZCcM/X07t27v//8OlEJ8Q6Ajy/PXaJ91zPQ6eVUihAR6LEySuXMOUpqDH1+69MAYIjCLYc3X+ajMfc2rk4leHXfdRavRa86L0YqAFEzowh0kQBYSk7p5u3rV+X09fPn58LjDz++DhFUZjHrB3l4uCvl3el0Ohyej6fd/qaTsaSUiNI0TSpTYqQYd10XVdwrXZUyDUNBxNPpdDwex3F88+b+hx9+UH8ZBWy/389RCT59+vr166fDgPiV4WXKRWQ2BhJRoDBNk+UedEz1zxwq2QrdZM5rtt+4pcC3GMglqvONeCbp+/qeBeJH6tvxOyLfeOPpsIbBHhpCzGRirW0CvIbfg+d5cgPwepjWqSfp5utyCNtqD54tfguMQbXIecnY7Cj8GNdbyvWvjWiDC8VvF9G16x27DMKcMy4smf8FPLOZsk3I/X5p0TsAOrrymDGTg59c/7rUrbJHl9HD2adv6dBrf02eeopCxHg8vaBLT4eImQuyMmh2MNn+cnHJ2PqgmhDcoJxttXpmh8jKgxBEufblxL76b/0f7bqgiMz3PICoGnYCYAEgmPfZwCiIgAFCZBlzAdQgpBkAs4ig7DgXYl0hMI5FJItICGliKYULQAjzEEIIEpImJwgsiFIYhQJBoJBCCEIy5XI6HEXmK2fm7BFCwJDijCgwH4/oNGoi0isHNgPz9KgOU+mklKmUUsqkdkvdlMCsNpzJqLqDkndhUgcSvQEozqLl82Ip6iwCmDcneja0SVUxztZFm3eLqO5XHdUIn2tGICIChfAc1EQ/TNN0Op1yzuqqqAZb/Qs1uqatAVU4mRkF9Z4PkCEEzv4MNCswRr1qclZtcI5BgiyigV4jEdi1jRAwxqjZh82AbFPptVO/3kxbtpMX/avmTf3JgsqKiF7vnScCKRDFGHU9qg4O9ZYmuyD1s2u32mBX8hRtqyRnztIsXmOank0olye7YG4yDIGZBefN0DyQIoKgJ+VN17AQNovF3nBk6/qiAnPhQNc3ZeTqf91ubVXWEF6v3yyK9a9rONd1ZLmJ8bPTuAhuwmMwiNt1eYR///DXkH/zJy8L1nBeaucSeLgUqB5vbs0uTlJFSHn4Gtu+ndlmCEregWiO0xgCxboiEEDvIwBAQNSL8+eFRqDXArXEGLsuqkk/53w6ncZxHE+TnlvZ5fwQAlZzlZ3B2XCIav5PRKCAIgRchBGBZq96EiAERE1gjwvEbpKTH7WtI7+glJlszsv3lCu0YaWUEmPEeacYb29vHx4eAkwsKYRwt9sRcIxxmEq/G+84/Mf7T/EwTKwsaO6lgXOTNtbANDhBRNhSEUkAoTUerldiM2QKAMv3dB8FQBpRAhEREELYpXhzc5PK4eXl6fHx8ea2e/XqAYA147GSx36/L2UqZTqenr8+Ur+LAXcmJogsqhzvdjvlBuN0miNOM5dSuv5ut+9E5OHh4eHhQe9oHI7PoHHIE71+89Dv0u3d/vHx8XA4vP/0jBRENxlV6yulxERQF5cYGlfm6DXHbkpzENnMl+d1fmuxnsHvoTFfGi7U8H97CCt+u9mUf8XEsW/EatopPPgIBVtBntYSaj3SSzJrLeY24d/kujoEi6+mX223tnT1tEakgb+ZNYCWJLTZurU4P29G17R2aSBbIG3IHbwQvK25/ernqCFCa2pThK1hM+TzynV/htNtrqx3v+/yPaIIryZdP6tFwfZj+quZGQ0na2psBmKv4LJ4HPrWzjvRWmYgojnwzZMx64GMG1MF7k6UTZ6OB90JyiYRbBKxfyIAjHr2iwBlPgIkFEJGhkAB5gSFpRQmpBiQoiBiSBpXWZA0dwRLSXxS6d51HSLpIa7epZymSRg5Tgp8CCGlPnRynHgcpxA4UAKg0PW7lEDdNU96h3AU4K7r+j51uAOAIlIESGazkt7eU60JKZp25G/0GfZF5lNRLrnik5k558kcMitJzbt51Si6rouxKzWVvGkm6p7KNcO7Qo7udqK5E5hWDy59CC3DOdjKr7clz1RrZk9zEJWqEUl1ScXqr2WjIKLCsz4510EUKaVMw6BRmCJWhVC7ZufxyHOozzlyDDFMJRsHnN2AVculea163SwGpUwyU576j1X1eCbCUgozMkMFb76lacZPcOIhV6NtKeV4fGlOzhQbuiGAOcxUsPVCNYosEQWKEAgRkc+m15RSAFQ/ommaqAbonN8S0Ig4UqOD2tyZ0DJrrT30fEGqudJmXH/TNIYFFqwZcQ76NK96Ae+O2zCdZsmvBcYfLY3EutLX+smlfj3YsrWraEpzQmwMt+HXDR9vBcNVfnhJHIKTJUZam4r0n0Dy2tJoPcIKq5sPr5dGkHsOs275OiQiwgwijER1PwMQKnECwfkOHoqwGQ5VTlduoDd1PfIZkQJFY3cxhZSSut7FGNVrQ/PIa0L5YRgEIwAEhNgl7TSXoqy+3nxHjemtnaCMaMNkFkLVM4dhynw2jzECYCAAkGlzT2BE4olWFjbS/7+WSpCV9+qMS8kFgIuos0iZAKjfJeDzpQMEoAAIAcu1BAN+vOAGvrlBvFIudXGJjZCgoMZdZJmDLhIAsEoc0eTGHAlA+HR46TgDyvF4enp6eni477qUcyYkAIya6FhucxmfX/Lx+PLLL//++uGvXdeFiESUurPXqEghCjEQhZ0GoqsiNd/d3TJz3/ddl0KgGAODXaqHrktdl/b73d3d7el0Cn1+en55Hp6MQs4iABDkPFhxieqbVSmXFSppNYeN+puMwlf4E/yq6XTNgZuW112sx+gfetkNK4lpv34TLbDiaV4OriXOWkCA22lsAumFoNQtpc9k1rwCS46qj30Lfk7rB9+IrNHetIkXFN21eG0E6CZOfOX1oKQec9ivCy39W0e0HiGb0+oxs6Zqz4FtLLR1NqG8zkttqVtTX7MRxGuC9HhbU+NagPrZ9GYbfRJ3ux3XOJPMbL8WEBEU4PlcspoQuW5kG3w1K+T8wcEty8/NZMyfqz3QIhCjCk8KUAoQIhNryFFEIEQERBSAwqIRclV71fyHKUSAqBlxUZiEulDGMhLERBGAczlBAYqcQgIUjBCjAEiWQiAR6eVQSpGYer0MNgxDHnmcSoj48vIyjqeU0s3NTYiz9jVMExF15/ifBDWvxjRNeeLYzfNhEy+ikAtz3c8JAMCUp3r5e9ZJNIRmCAEx2GGAqmHM3Pf7rut0No1w7UokCFDAEIJlNMKz+6gAIFEQodNptJmy2VG1VlRLmyZzhSIidXkFzZZeC1aTd6kJJ7BatEDmY4xzoDPJBrAqhDGeE47FGBGDiOj1CahnDTZArr6yooKzsOo8CoDmXEopqfuorRmjNwvop6qdpvpoqumo/Qrn6pUaarhXtRjoECa3RrwCJlXXIqLdbmcqNzgW3/fRvhIGNRMjSL1nSDFGEqiOQ3PGZ205AAJCM0BwZ2MNF3AnC+fAhrg8KfCHUqzXIusKDc7t6iyAl3IOVly7KbjSu9b7iaa+rZrmw7rZK/1eKl4ANGLpSoMNa4ZNzlY/+52BOOHhJ+6bqGv4uJX11uE7Br1RqF49b+YFlqx7jSjYwsa6XEImr1z1fPtrwSx1xyPnJL/GHAhxAZXyWHal8KSnLWcEgiARIiCSiGiMx7u7u9vbWwqojnxqDzwejy8vT/UyYTXEhcTMGQqIBCRABIJ5q02EQR1AzrtAAnW10oGUiKHrQ9ftfh++BATB2SkGIMgs2NpZaBYCLrfCIv+Vdwjhuymq8hNRX3llmAHKbn+DXBBAM/J1XUKgl8M05YEnZsGYgGICgIk3aM/PJrq9sqdVncXzWwitHfBbI1qvowWSRUSdp1BpVQAE1dGDhfOJgCnKOJ6+fP6IOA7TeDyO3fPTD9PY7zskQiIRiCEBAAYKHfb79PXx49enR5D+3bt3d/tXKvI0SEyoeYDnvtAceZKIdF33fHg5DqdhGi22maVzqPs6vLu7u7u76293//Pf/+P952dNHwV6dFipd0k8qp//MQZiNvD1TG3W9wTs1/sfVeyvl2ZpbG6Ur7y7XmIGp0sP8I1mbcsOSzw3PK2RCOAYXdP1pQF62WFszct6X+fCEsNGitX26wHogk7OLTTNeoRsNrguBKhpe1AApWaZ4nPAEhHRkGAIiNJeJdChsXORtf2w7pOz23V7/FzySGr4zCbym7KeBd+afjjLrFVy+VKDJp5NFA512xbRWqDu2aRiQFMDrgfld57+p2g7+5zVajTnjlPhihAAZwsMM1vc5EaumMWG6m0uLaUUqPeyjC5ts97cPYN5Ux61HeVEZc5PKwxSGEphRui6LvUdqN9jh5NexAIQUnuKRHWKkBIgpC6UQtMwIMWu6zocI2HOOQ8nRAxqWZwK5zGEECgRMCLGlBARmCVEYRimMkwveokRcc7UWC8FihACBRaZpqnGPgkMFrtyHqPycKNa1Rz6PiGiMl+RmXbV7zfEOWY389kNNaXeUO3JZRznSJ7eyqcM6xz4dGKREQC0WYVhHE8W3KWUor6KzKwqkPmXWoGq+9lqKaXMKXSnWYMlItXEdB8gbr+LNdIMAAgUM9YBwDiOMRIAH4+DSi/ddZUiiNh1nVS1UCOy9H0PtTUFI+dMgHp3QtGrqmzsksVVsyzwaqyvbmOBiDQCqtGqwZnzaNgwJdDHF9W1pzobOLZri0uqu6y+YlqWvWuMaRxHdfFFRBBk4WmaptPAzCmlSCmEEACtEQ2HAYjCogHEcVYmt50G7TYpOOuWcfCFai0iInrEYPVRL+2KkJp2Ya6JNceGmjE8+2MfAmfFg7zC2fzkuYoxu/XebqbtZRBXw4MpGI0E8h3JSu56UNcizf9qXtBW06bSXvTCxhi6/Wo9kqVLck68tPKwaJDp4bR+cbnhaPr6ZkFEC9u7iSX7YLPj0eKLuAJL0XWp6+aDH/t6XvjsWZc0YxLX+Maa+VMfqMwtpaCQ7idm1wnAnDNLTgEzi8aR0jO+m5ubrut++uFnNQAOw3A8DafTSV1DZY5dPKFAIAqkabhm9wREjBREJZQIEWIgzqXrdiIiOCcO3e/3yIcYpUtd13W3t7f3969u9ncU0//j//h/vv/9C1CIkViCEI1TJoLqpr2Yd5txW6pn/IhQ/cnLa2a2PIRXpmM9O80qQEJbkvqIhVmKXuQLhEioV8EB4Pb2dsyZBJgzQokpIkrJGYXv9jeIX1V7PB6PSZNoLZmDLyGE5snMbUAaJwWpGrgfbB0GrPkVLFeTPZnpfBIAFAAKIKD8kBACFDidTpHCPnXHl0cQ+N9/+m8//fA6HL5M+TbnMXV4PB5jJLXvlVII2Y7hHh4e+l2IkZ4+H0v+nZl//PHH29vb0+lUjxelHtrPl7V0k5Z5GqYTIvZ9tUhnjbXWhRBElIsWRIR6wKr3Ryjq4XUquR4XiujpvxKZbsV1SXkC2KQZWanQDcGYo9C6hYZt+heNqm0K2DnRGecxCNciwB9Jozu4aWbcgwGevJdOoV5zBscSjRc1Txr5JTVouXFX7yPmAfMir5HjHp/rifCuPfqrHhCwC6MQXNARPxYTuHr7qcEMzucRWlNRqruXc2t2sr82SDaCAC4oTuj2QoaHTf5WgTsDeR5XYb9+DSelFHAdeQyzy2PpWWhDnAbtJuQAUBye/evN6BSqgFiWxyXmW6eYbPg5OqdQI2l/gcjozb119sjzyId6U0k93RBR9/BRrWqN3rnghoJ2ux0RAdXSojhiEcv+Srp6hBmQ9K78OQb0EiPqa9dMDLj9KDOzJjMU9S8BYUBEjIFEijCVgjFQiiPnIoWBEQlkjn+RYQDNgwSCpe9STF1PRCmFKWeKfaQ0TyozMaKe9FAECgwIgnOmRpFRaLaL6DkugN6ExAAkarpjwCBAggVDYABhprrh8PQUQxcDhBSxujIiWkSWOXHAjBD1X7WLkcCe0xllOIWQum6OB2OUJCIg1KU07yn1aLOoN2aZxrFMWaRYmJa6EmaFsKgFj0XT+KaUAhJQEBGCmtRYkwrmUjCjgBQG0Isv9RimsEqUGQ/MYx7EaYNGqRppDWA2ReqGSXmJ+grPbsGsgQAZBco0GydZyikfRSTSOcsn1KMXoHPsY102DIAh7FKKMXbnpIW2ZFQJzHw2SM4LTXmOUak48aar0bHUupRgDiuoxMkFQggInGKq7Fh5lB4KYEizsl1K4SKMZyliDMs08xgjLuSIdbqwCoJjW14YNAzaV7OvJlD1u8xuA9UWD+cjw3nsy6bWcmUmjAtn/7K1Y1hUvnSy6CQHLOWrXHjl+k8NYN9TLrXmB8WruwdXOhKnCJkQWvdiD6835T/4Vy7NxZWynspLJ/rrwZp0/86+fDse7AYGUeMeIoUkUmbJWuZ39Xi46zrOyAwBS4GitJ1ixJ28vLzs9rv7+/u724ebm5tuN8eJ+fjh8+l0en5+PhwO0zRCpTTVcBAXLpE550xoS5UAKQAwEFEX4lDy4eV5mvLtbtf3PQkEQGD4y7vXP/30U9d1CCGlFFKXGVPESDJJEc7qDztTDsIae83uxGTu5oSea/6xGfhjxZOuisJ6mhYJWAqihEYjlfnOmtAsA9vgyX4If9SCZOTi/zZwfnNEAGDBFEAIkLkewBORVBFDAe5u929e3d/f3T686nPOOZ8AOaWYUkAMIoAQqoQBPaWM6ZYIxsOXUsrvHz6NQ37z9tV+v08p5ZynadRYZepXIlBCCCFSHrcPZaZpAqVDnO2JWmEag5ErMwjWc3xw6aMavK2cGOECda1Zyh8t199tZesF7bSBZ91CwwzXADQte26zydBgJdE2hd03y5oarwjK7xwCLA8xNx82w5zDGtcKTvosdOB6j4zWNb850ktyn5cWPKtsEFJ1XdGiQQF9IwbJZr+6QZFq1xFN/QoiCFyvDIjxCASqqIAVzje7aJbA/FXO4700F36kazRaa2vO70/NPKKa7vxClrqlNJsHzhm/p2gprTfRBzCjqTBg1QbtgMowawWWq9TPnx+D3754VSeEMJWsdRiQQUBIZwuAYoiInLkw88QlCYUQMo+CgEGzS9F8HsaUUpJpzDlHwKLjx5wLnZAAAMOsEDIyhkAhhJCJom2XoYimAA6lqA5gGdixRhvnc/oHtYF0AFB4vvBtzoSllDyxotn2+iuFcHnesHQYqPFOKITQ9zuPW0QUmbcgnhw1UGopBSFwkcLnaJa6b6kWwjlEja5rACh5nsfiIqb4iYYq9vyka+XiYsbYcx9qT0TKasEbI1DCqOf9YL6p2rNVsNt0mhFV3WVLvRdax1g9YQilHsjpu6VmElNDXKiX91TQmoXBzO6KWDqHiil6piH1YDKEcM5ob5niz4Gagh3h6HOzkNtkmTIjIiHOJtlpmoSB63HLbJUVKGr6rBZLqkpsw0GagzQ/L+KK1VlzgPl5ma1y+r7MO4YaQXiZY8YggSXHF+eS4TvdLEuRg77Z9Vv2xHC4bupKL+vP3wTsyq/X313jebP+Jiu3Fvxf/5ah9/pG2Xpfw4NbgnzdUTN9CuT6PHH9uSHFS7i6juSt9oMuRhEBQaTZVUY9TcRVUx4+5jkOE1QVq+/TLvR/+ctfuq6LfUdE0zQ9fvl6OB1zzl8/P5UaDRhcNiB1+dYApzZTMUaKcZaMeSogIhIQiSHzaZ9iutmnQK9fvybAz58/I+KPP736+99/fvv27TTlaZpiikQ0THLbdylSnrhwEUGeY8+0/j+XqO7MEP74ucZ/SVG2BrPPy5xRfZrG3S7IHHlFEAGQNXNHjEQEZT53qqRyIZngFSK5QFfXTkyut9b8qomwEADQa4akJ4Wcp8I5IN3f3N7f3ex2u/HlExH0fY8kAFKKWvAgUF95WkDMiBhit9vJDz/E4/H49PT04cOHYRh++OGH27t9CEEkmWsZEVGYjQkma2wgKnWGYaClYMV6LOIVQiDRbf0lHNDSdfx7lucVltIIBbjAKwAuSqVLNRsAvsnGvznpDeO61OB6RFcqr8W0r+8Ba1i9Z5i+QiNN7KEfQtOd/drgYV3NV5h/PT9s2bjvzuuNfj/QSPM1cgAW+eKvz6Mf+KVRbBZf2coCBt+I2zUZ/F7e+cpSz7RaqHix//GU30wrrORjM8ub9MAuuyA6gMFRS9Og6S9gnl/MpZT5JncALMujRyLSKILqbrEW+X7BNP4bCy2f5lilxrZAQFgYNCZXVdn1RZiRXs38ACJICJoxJxAiARdG7aMgAAPWSyOkQQ6NaQZKTMw1DStLJsChuxER1egAaNY1BUPYCRHj7IEZAgTVm6cTIsY4298UPyGEzLNK0Fh1iUM9w6PCwCKlyFRq/vSpwAScZ49cCxJdh1CpQdM5klRtRJ/PXYPTImSOVAT+0pcw2udTOc2qjmSTBBqwVNE8m4Cg4PwWrgstHbVNt1HMoNOpsFqATQtS1cU0h0srvJQJqu1LlZ9Syul0Ymao6Shs9Wp3qiWBcwTVG5I6HUXN14K2YADOeoPBMzveuGjyZlvzOoYphNN09kawdQgukpW1YHizz1zd24jIrnHqpVCT6GM+pywDQQhERLF6FjFznqbi4usYC20YR4Nt+2AGZL+Q8Xww0ZZ5FeJiyYOnBOOAS48aXEmjTWbdVPN1NnnoJSGxyUMv1WwGeOXrpeLZN2xJ+nXXDWtuOvL1tU4zoZeaha1Z/hOlGdEa7HWnxhA2320+X5mRPwSefa4yBQECqLP0/ON8asHM4lwD9PagBupy7XCMMXap62LO49PhRZ1CT8dxHMdS9CyRQghd1yGCrU2a3XLmdcTVsyvFQKgwFESMREQSCd69/eHV3d2bN29iCKVMH379rRyf9vv9//avf9/vdyWfSp4QMJIIT1LK3e2uSziMeiDLAG3w3iuE2vy6SZb4n6GV7yjKjYVBI2Trw6pXcxBBDYUlAsghUt/3MVLO59VRSgEWoI0NN1wmpHOYlxU86D77Nq9joqkMAHpS3ZSJJcEsU2Kkm9vdfr8LKBCw7/uuixQg52maplIYhIZhQkRhTX3JMaZAsVB59eq27/eI4cuXT1++fCml/FB+ePX6Xl339YQ0hJBS9SJBYITottoKZNd1wbkdcr2qA8bzoW4pXJHFpvxMKn5z6Vnx+tc10i49+R6Ew9UJWnPdZiDW2iY33mx23Voz6ubvJg+8Iv6+k6XLUm1bV1u3vxDQK6GwJmOjDf98vam4gIpW2i67a1//ztkXaaN02rYHHYX7TpE25vd7uvMwywXlGdy9FT9YXU3rNhFRQDZQvaq2MfCrwt3+ypbWd4lU/JbDBtjgttk4aSo8ZuawjGaDYQ5DjHWLr0EagaXZ5moxZcCfSKls9sYKdKYeD5Z7S84IFBJC0/HUx6EIElGoQcmkiHJYISFQ9Qkh0DhmBgyxE4BJFUwARMisTpglxhiC5rBnkRxjqnibUwKGEIgQpACgCPq9fhE2F0GuXoi6v+/irEWoGubnuKox5/MSOC/Ic25A0gFUhTCEgPMOo93EI55zFdo0TWMxJWeaJoJZQSWiEGea0BTqlr8LSTSbAgCIzIRO1c9EYbAJcnZRLcFQUWrcVDOveSnVrBNrzVRTItIdw+3tbdd1ADwMx+YV/7VGaRMi6vuemTUBPQAIanrgxXUarDZb/TrfeKwzKMujFyNs37WfUwPGqmE98qg2ihBCGE5H5oCoOr8gAhGGQLtdb7jFqpqKyOl0MnQRkcwXAucVpwqzOGNpkI2DLriad67hekZ4m3Vm1DVnQIBEpDkS1YNU6weiS8Hsm/X+PYBdqXaFpa5R0VQAx0mv9+K/rrn8lfavNLXWcptiROgH4qXCd8K/bnbzSWNJvjQQcSJz/ZMs41LAkts3leG7J3r94rrr+jAgsmaVF83dEufooIQRaD75ms++jOarTz5O8uHD0zAMx+NxrJldapnv0OacS8n2rnpYxBj8TQwAkPGYYkhd6tOcm0KzTjzc7W738WEXSsm/f/jw6bd/9Mj/+9//su9Q8lAAuhhTTAB5Eogo97f9vkuHYylFL/Njvf644S8KywUrzV5nOYPwp+jnj5a5O3fFK4RAEhHn/MJI80QxE2JQn1LIjN+xiL5ZrqiFi69wDoK17gIv6NVn90p9joAA0zSFkBAgAKYQU4jIMp6OOE3M+TRIztMwDNM0IRJhdzwOXKQUmaYhdfTTX94+PNyXLJGo67qHhwci+vLly+Pjo5Lobt/FGEOg+Q58gcJTzpkoEkWhwMxK+yIIQH2/B9bkVQBzFisIcLYgERLKfPlcRNpN6xLzl5jVJtIuNvQdr/t21p0a8P7r9U79WbbxVVixKYPkEj9sBWKVhs26+3NIWLcAS3XXFrXHSTMc3+l6XH7tN/jx+Gw+NEgAqMHnhDwGGqGwOfymwiVPlsaT6HtQ2kyWgrUJEl6gt2Y2PTKN+P0eqcH2oqnVSO1hg+f5yRaHaSbLtgSwXIy+Ha+4rvcbfkehz3Uf66+w6tcIyAIFUBCj/83DVIS5sE9P31gYYKnEc733rBWUhqwmuTAJUGnd9I0Y/bVgjSQECJQzMwJDkRovCAMwM0LSTIUEsfYYBYNIoRgxgIjkPOac1dMjQgFhEQlCQRCBpWhquWISRJ0oZte+PCBiDgPWvBqIWDOeIzPn6nthnkjiEht4ixBUnQHnDLzzSTUi1rQHUQthDCEI2JzNWhMiptQ73axeimPW9rnA2USmjqnlPPeO6ytzmSlGXcH1rVxEVmlYbY68GU1mHWnyhkGzqumvpkya1qTavuFHRFQbtEknUmMs5FxyznbMacA0pjbTTlUh7HY90HmZLbS1GM8RZaqPKNbJMncarOqZrTT2Fu+lUmHjtVQfquCZPskp1cCwiIh936up2XMNBUZdZC1sT4xRo4z6Rd4IDGYmNsgWnM5WWfOuZw22Wo1xNDUBwK4Bg00eYhCUQFBKc1SOuAhc7sulJ2sWZk2twfbv2pMGP57rbfbesHL81ua4EahNZdzaNTag+vq2QfEDxOVmohEAHuwGeI+NdV/fWS61uTmQzZm61NTmXFzp9JtAejLYhkdIpFQlCBFBHa2Na4WQmDPWAzhmHobjMMBpHKrzPBDNYRiKBjFzoduwMuu+70sp6n6vLCKl1Pf9DZ32+/3t7c3d/qbrukAghTnnL18+fXr8WA5fOJcP79/LwD///PB//m8/jfBl1pewlOlUigjSrtvf7PsuRQII6ltJGtISNdPdNym8qbCmf7+c/hzZXC8zY3ESMISAmBA1dcPMsUsuBaKEGIJqK+ewWCggJLIikz8HLTWrvrZxfe18iznUzSvODpwkCAI559Pp8Pj0JY9DOXwSkcJTzurkH3f9TddhoHh4eXn8+jzl4fZu9+rVPd8JYui6HeIUY7fb7VJKHz58+Pr168vLy5s3bx5e3T08PKQ0XyMsRUDO54kG8AxzzVQ0azgCUrhAKeZ9pef7iDIj4YzbynGuYVuWG25bjM2G/krZkDVXN/3r+msw1l1v8mfjwL4pX/N7OKqsTlius6bN9hsRtqa3BozNX+EqD9+sb4Xc/srtGTbAsN2XiAgvSG7VcsttrgiCppf1dKw3XV4m2hYafFAfOOPzO3tsMN8g1n6yzeelmQIH2/mnrQ2MDmOzUxuy3/xosdtPm9snqRrKWUGrjTebWHH3obxoO2c/pMAIZ83N/DwRkZDcrrGdJKnXurwCbTtju8RpxXiWR/pyVhxNaJIfkGJua4ZQzdoLHVJU2FDvEDIUlJR6ioQoY8mZKBMwIwPEMgUQJIxYUES4EBRERL0HCJohIIMAaBAvmQBA3SlLKdkyDcTArPf9RW8WCAII4Hi2f3oKKDVnHRFp8BXVfELUNAMqMlPVBGIIofBUccazp00NEZlzznl2XtJ9jAaABSFzC1YACs8iNoRABFVNVS194XZYSe683qTeWOWaws7bBmeRD/PprynAlsldP5j1GGd9b3E6oNiz9hFxt9vtdt04jqfTSfGGS0bALq+OcTGLPhpqANsFwkW48rJSLQWKvRR0dGcTnxFnqOki5oeMLBzTgsd50sWzYfkc/UyjAhgf6fve7qmaUmoRXM38OKMRqBSNoIs65BgjiJRyjmZmzFfNHYIL8DwLgAvcx3MoWBasKXSMeRly3FkGSg2oKyKI7XHPZl+w4mXr4teR55WbXH7Bf92M+LE3XL6BxP8kW7L5EqieODdHuv7sG6dlZDM4r8QFe2xgbsqlA9cr8Hvq9YD5ypew3TR1HTZYosWTx39VUSap4a/I8ZamFyICIWZzBJovw+ecnw8vRBAohkAAeuAKAHA8HkONKqxNalNlTkUzR6/R1BQPDw8/3eS+7/e7XYwUkRBgmoY8QIK733759fnTKU9DPsq7O3x3v8+Hx/QAIpgnHk/HUoQoxtQjcJ9ipFnhCIDzFX68GAnGAGtX3P8CZe87SykFhKky0pyz5FFjWJJw0Nv1mZlISGKMBGfihwsU8k2yWVDa2Ui4YAXrBi+tAlhtTOHCBMQYUersIJdSNNTh7e2tQAHYI0KMsev6Lu1i3H398vz0eIwxPjw8vH5zd3//oBH+oDKEEML9PWisgcPh+enpSfMz7ff7lKJu5sw3p26tgOpWuPAs5pQ+GWZBI9Vrh5kB9CYCejnyPYjFyxaeSxyPlg5omyLJ9+KZqm1m2F1o8aKh4fMegDW3adjCJu9a89s1YzQgTWSbQnKpeLzJBY3UCyAb++bQNuE3w0zDctdrav3Et7k5asOeet1XLK1lUyvprKxX91ooNxiD5ek2LJekLHVO8Scal88LvEilVShymxepd+14GaJPnElsDfzGTmNl1jY412TQkNZaisEWfdLSHc+Beu7Ug+QXDjo/jmhecMgC4UxMbLksAqG/QnoWNAsS8fON1bxGRMM0YjUfmd5INXCIn5UKWamjaCcSiQgDQBHhzLNtJ3FXSVL3VVCEkYViKAUApQgzksSIAAgFhiMhBgpBCIS55CCCGESYkHQzP28INAu3QKmZvkspRbJCqzEJdBuMWD1shUrdNCCKKQbqSzlfdRPIIoDcdbubm5s8x+ZRJMzXyTS0UamxZ5hVVZAY4+Pjc1UkzlDpdBLGantjj1ioFicAENS0EPOBSimFJdsCW1uujMeZ9gLuviIzpzRHa2DzC6rsWw1uzWnKml/a2tabOX3fxxiPx+PhcCCikjO6MwijN+2RXAQaROz7Holmv0oRxTnqsUrlxWqIAztNqbGCDFRjBMHlh0EIEKCUEuKZoxlyRCSlBNUiqkqpgrHf9aYYm7qokHtDpXEBWh4ZKg0UmVXEEAKI8JRtvsDd/cO61mF1N8A+l2WahPVYwCnGuuAXDGbZJs4WbHeZNrQnxGt+19CYr9bUbHj3dbVnzVKpBt29/kojbDZrrpmyFx7+Ia7k8fr1RvCgu8PpxwtL1g9LJXnd+Kb883U2xap/svni9TGucQirS6pNL2tkfrPwKjqr5wO6kjQtFda7u2qhYtZIvfUgsjp+y/nQZ0bq/CLLxFnZaUoJwJ/vnE9SD4dDSunVq1dv3rxRF/fb29u7u7v+9M/ZDSEXkEIIHVGIdPf61cdf/vHyPI0jRISf//Lu5x9/4JKHYSIiFgYpXUz9fo8YDsMUKdgNLw3grSNe5zg2bMDWIoKt/cRc4X+lrqiXwIUL4bzlGoahjMcQAAEiQpwPN0WwGHNDzBqlWkT0X3T8xNPw5h2e68UIdR2Z+U8QZFNKKShIwl0Mferv7u7evn0bg+D0UhN36WV7mcZyOp3ev3//228fQkg//vjjmzdvQoCX52PhKaGEELAe3d7d3SHibte9vLx8/fL09PR0d3f39u2b+/v7WWKG5jBrHmMMcz7blBIKTDQNw+DuO9RNLc0vXh+/X3qXOPkamd+D1YaZXOFgHoA1q7/En21r6vvysske+t431pFr1tOh7XZkuem3mo2cvTKoZkT6lZb+dPbTmh962NZ4WHcnToH0lbGeDqwnXWrMBbUQIgItPcLWPSpIZ0t1Lc1En8erb7lZ9u2LeQzULniJ7dk+diGvICLKhRwqzRCarr3YFR+T4gJW/dc1uVqDtu7WkrQhmHN9SzBYNUB2zqKwJHhcHdwYMM0BDdQd+ByKQ78TC2oaH5bAgEgoiFkAIGIUEZlkAsblmpknsp5zY/W1nTXAXC1USCGSiMzRPhCx+uMZyAhAIXGdb72FAAACEJEAUZBJQCBEpBCCBBjHLCLCQkAhBIyMpWQuHYAUCSEkFJQSQiSi0zQeIRBSpKh+QZICFxCRmLqxFJTQhS6EUPIMV55e9ACOiCgFlAQayaYIIsU6KcgSQtCkDDYZiARA+lFvm+gsplCz2FHYYbAbiSDIzFMpzMNut2MAZp6moabX4kGGYTwHtPTICzR7zFINVDMTDaqeRiEAM/NYpjzxyF3XFYEikCdBMytlJgiIJAI5q4agDq6QMxOFvu+odfKeFJKu6+7v7xHDNE2ljH2/qxRyNruVUljzOAMQUCAsgpkhy8Qiu67f7XZdTKfDcDoOmkkkCmomdFiyrZIzhYQuoGjXdV3XHQ4nBOrTTgk7UCilACOIcClElDDGGEoprAlS6JwI3gg7RmLmEDQ/JzPLrL7JELiTqkHNpxshpJBCCJaZUJlm6jvaE8tkpstQkzqaifJ4PB6PR0RUB6Fpmna7vf6UpwIz2dM4jbvdLhAFxGnMw3AUzhQgl1FDH2HV8EU0kHiw9T8zJp75WtSEacwwB14iABCQGIN4tZBFE1rrQp1nEM/zrmclkllD64sgASFgYZg3JectKYAIC2soYC+Sja95PqjMgeu9bb0Kq/vFEELhBbv0LLgRmeKiKsPqqJWd44eX3EUcA53pFwGAy0oXxbnZNQDnKltsHZbMs8EGLnMK2a/WIK2ce8ExejMXeL7fwLNGl5fT+lNzKGs1pd6msPq+x0b4Ne00Y9ksBI5sfJF5qwuVWSkFBmECEYRCQeMkM3DhIhQSATNnyQGFCEVKzmOAXlAEBYmYYCoMRBK61AkDDoVZLyUAIkYQit2EiMCSc0GWiGpklLf3d3/58d1ffvwxRUApN/suRjk+/ppioBxEJGICgGHIpUDX3Z1ynuLtKTzmjm9vU757eIophND3QylFIHb7HWEsTEUQqROgm7vX+PGFJ2AoE58KIQSgcV5g5hY1T42d02vOPYtOLJp7XEII82muXv24EMDT6MFPqx2ir6kRSwZgBFLTLIsIEiANPFGALgFyYZk+f/74/PnXPtFuRyFgQGFmYAkhhbAXPI2P043EUXgaEYtIiCVAYe7deZRtVK5QUZDl0pgZILvz5bMbBUAhd9PQk26zh7NfR0RAJkEURACcc8Fi13XDcIwJcj7e77sf3972YQySjzAJy/3dAyIeDqebm7uY4N/+7d9+ef9bZrjZ5fef//H+8z+6Lu52u5zH+/1Nzhkh3N3dPTy8vrm5CSGk1L99+9Nvv/3y8ePH42ECIZCw23cpdYJqaM0EEClgjCQBAFII4+kUQsDY6fVFIup36R+fyodPR4ZOqOv73WEYmctu15WcRQpI0RgKICQSRYTovN/wCJGl7mRbT+NOl5izIbOZU2tT/85JpwBgeQ4YXB5dWF4JMSCbM2jjmfaK1W9Oz+HywdNaAbCBe6E2v67uuBo9USOvKQyB4tIbE2F2Ai91n4xLqcFY1zIsCBLd3UgDg5CAF1Y4KwZ/MzoNmus2dbDWBs9dQEQMSPOZSp21Aivjzbn1qhGQuyuBTulYtI/IFWzdRIHaGBB1vKXuT85n63myLsx9mlyePD8vWuwKlRk8QghqjTC/M1NfJ86Ac7ZS3fLoV6+yzq8ICwuh9gtVgwYNGwVid/kgEM4nMqJWnDOZGTYWRgLncRbr3XgbC1Ri9lNs8x5qy6FG0NBRTzlTmFELgYScyygsl1PTuqM5ms1wy/pWLn09rxkEBAw18KPWKcvTGvXBdKxBxZtaBwMou0FUQw3P+etmitfrdkREEuql/6gmN8Vp4GIefdapmcsQMcZqFyLQcNggsbgMBEbT3nffWBtp/sD6VVM9Kq/c7XbBpR843ysrOIfoYC7MwiUzi8jLy0vOuUxTziPnmuRApHCbelU/qIXKqPnMYemMbTtLMA9PP1nzsqlhURp+GioeDYGV9Uc/fKKA1T5cmfJiX+uJ1cxKRuKlFHX+tJoxhAKL3clMuDHCnBcVjeP4kxvrwj5ATWONNZu8Z09GJABqV2CLJWgb9Hl/eb4Sg6SJ7/XmZxVOWuau6wGK+piB87ylGmUUnUV9mqaZ01WeqNZOO3Ap56wb5yWIuBAz7bqTc57AUtjGi04omsXe6HnxukplOotArpyo6cu30AIjoIqrb39dcz0EWhqucSWrrrdw6a1tfnWh8c0Twc2+NsGr3Owi5Gt91Qj+Ooo2YfBNNR+arwazffie7oy9bD7frOOXv//awnyhcw/h9xCAyj8pwMwEbMZDlnPc0TlPWwjmJhMpUiREYuY8lWEYCgwpRKIYADHRvt/d7m92XUwh7nY7ZhYJgSKCMpNwPL7sdje73Q4RNX+9svrn5+eXl5dx5Lv77scff3jz6nVIgZkLgzCWUnhixEKUBAMLZuaUUgiYj6Je2MIMYfvEYY3nZn7Xxc/UetKbX7+J7fVbyppKKVAYALqu24WYApRyAkDAOYioCDJPMgsFzjkXFGYskiGps8k1C/9m8cNxG4kNI8aVoV1aBbg0plWa1AxPGjjnLPuMb+ecVV1i5sfH50+fPh0OcHsLNzc3+5teQ92mFGKkcRwRkbl8+fLl69en3W53f39/d3fz8vJyd3engjvn/OXLl/3Y7/f7uO9E9zy2g5IsIi/joDJlGAaVek9PT+9///Dh6fbr168iknMWOYkIIvmDMw1pi4gCjLBgvNepAuuBFGyR3xrhfu3b7Fxh19bOWeytWt5s81L5Q3zVg71eLH574ykEl0VWCLG/V2THFeAbqSQyh5jz3NIU4AZUj7crsmmFpQ0aaOpcoplLpenaFuxaljXkcV0+bojCJZL9thDcKca6x29uHlyd7wJm88mV0tD8ZvtnMvOF52yndsQMAGq/gaWyoHvCeAHvG8fDVowK/VZ+E/ozX8bzIQLW/TEvGYfNrljgRESA84JBnC0hQKQp4b39V+ZT//k0RRVCO+/RHXwHYqtVOxARJL0jyyFo/A/U3ihiCCHFvYZpMQuPnYQ1A0d3wA8AKaXdbqf6aqk5G6WeVdhDYmKYrwVOOecyZhdDskxTKZPeORQRBKDQ2/Q32F6vFhHxwTZtAdiJb3BRhgwwU2ysspq/bIxUHSMRkXnhYy3VUdNm86z5l5JzxoqE4pL+zToxkYio1qQAFJ6QoiU+Mdj0LEfgHARV+9JJN6K39g1C/dX8WkVEcA4nE9zVPn1LFcIzuTIrgrWFRpEzf1rfo4ggsTZuTwzVerdQjYqmjk7THMmQmY3wtAURyVOeas6JmeDlLIbhMpdxgqc9c5Kq7631Ln/cq7Tk+c78q1PbPNsiwCYgREOfazGwCTmdncmv7eE8v2o6+mbx9fn7pEvTb4O3pgJ+Sxv0b52RXQ8vr8jF67B9s7tN7v2dL8pqH7b5fC1HrAX/9Vx/bYmt1Zq+rkNuPzHzfM6OKCIpRhFJKamrfIwUAk1S+hRyznk8wJwWL+5ixNTHfpdSCiFwLqWUFOKui33qUMrh+fn08hIj9SnEqAZKDpIfH5+7OcJojKkHgOfn548fPwzD1O/w7du3P/zww+3t7VQmIiIYIQSeU0JRSoKEIAAsd3d3Nzc3n59f9I5DLrlRkPzwPZ4v4dz2Cpfwv373e2gDceNmIxGFoJFEoe/7169f33YYSQBGtRCqQkgUkZLADvG03+/p+YSAIWJhRqSQAkxnV+pvLqIGKsf0mnuArsKF9pq+zvgBCIBqn0X9TggAOWc1r8QYd7td33WJEgGx6GkvlFJi7MYhf/z48dOn5xDg3bs3P/3lh91uFyPNp9BE+TSosHh+Onz58uXLly/H43G/31togJzHl5djLqMGMXp49zqltN/vd/0uUmTmPGpuYdkBMYrmcxqG4df37//H//j6NH0FSjFSEWER29IgnHOoOPx8Yyvc4BOX6Ys8abG729Yg9jrpXurdbzBMJBkYxkX/KG/0jBEce2++NiD5atY1rFYoi6jzzpoaG/NOg5kGHnCs257P+40l/rwQ/87hX6pZn187BIQlf143e4nPnB9eEG0Nv9p8svniuv1GhJ1Rt9KZPdhNv7Ry5Z2rOUPuWlT5DdI8NbLtYeQfWkceNv/XKMGUfzErejX/wDJOgR+7fxgpRQ+EiIaUBW7V8YtbJVjRrm+QNdEgL3wwWF3S3OsAAFTt0Ob1MfsJASJmYSrVhwuAhMA12GCNYb4MOo6jaoCIGGsIT6vPzKVIzlmghJCIZr2lDiGISN91WB3YzKAMlcGtNyV930sVDLaZQ2dRRKfDMPN4GgVBFcJhHKc85DpDzCwWN0k38QAx9nWa/JmElGyen+ofqHgCxA1GbNqsPTQNjWrEdRupftW7Otq11xy8klNKIWJTkHDWe0UD8c05BnExZUYtwWmhWDXVXIR5ThuGVWuyWKAWy7N2pMf8wdph5/6B7lzAzLaeEgznahGEun500isJBZohbc9E1LDp1+fcsky+F/3JlEnDP9VbT3aiPI5jzjnG2Pd913Wl4tAUQqhhxBeLqK4ILycAgHl+mOp6Nw2W6hXQhm01EsgXG6Z2DPWIYSZGW5VrvsyyDhvYCCFPWrZkbHU3bzdspxFCm2AbfvyKsDUCf1AhhCX+Lwm8i69/d4XvfKsBwMBoLJBWueFgazRutr/GpH1uGl/TjyHqygRd6rfp/TrSHIvR1ABz/RBCiIgTomimisxlFJ4QOJAQcNel29ub29vbvu/3t30KEYCGYTi9HKYpS+E8nhDhdDgOpxOidJFEJKVwf3t3f5M0sXhKKXW7/X7PzF+/fv3y5fN+3//0lx/evHnTdd00jcycUmLQ8zVg5Lpdmcd4t7/Z7/cBXhAxIo11K+lR0fC3ZvibNS9Vg8vU+615WZM6CnPQA7tA+/3+1atXdz0hT7kcTSGUwqoQsnRv3vavPz2+//zEgEgpT/Otb9raR37PapKlHthwku9fjw2XQBERwhpmEVUZJgwkzAgFAKWLSTdhCMzqEFgYMSCEx8fPHz9+FIHbW3zz9tXDw0MI2HWRiArnEEKsmliMcb/fD8MwTcXO+HMeRaTrI2XIeXp6Gk7l1HXdzc3N/c39zW5v7irjmE/TKKP0fT9N0y+/vn//21cQuL29zSxQILOIkKjckQwiCICkyq6YXwjANj/xtLGJq018bsqUZifQ4P/KvDQwyFL9W7fpB+Ib9IvuUjXf6Sb84IdvvqPuFdHEU8tjUyu8clWF5eLaxM83x7he0SuGX2C5rDwm/SLyTxpU+A2D78I3tYm6zbIpntbLdu6Uts+JNumnbknaTcKaUSy6WG4StDgPuIUQ9LzQvyuu2LiIiMsGZjxF+a8iojs5JNItPsFsSpnfdMFF55R0IemL3gcVEdUTYekWJ0UT02NVCUSEC+iouTrmKuRuNOfDHjBFBRW8FqGwpMiyeUqH857SwOAa4ZRwsbXVOCqCGAFCNJxWLr28DjuvwBoGc77QCJKSGdBnLWIcKeccAnEBZbg4+yJyzhIwqc+eodLg94Rr6PEoNvdRWIb3tQkupQzDoKm9570+F64nEPUvIc6+8oFIvXyb2fWobtawzbpZL6UqMDZ3Nh1EFCmZPyE752wwHdUZ/XSMNijFtr5bSpmtfEXsyhyz5uJDXBonFVCNl4AAhEiIDAAsBYrULCDeMomISGalZLMBeoOkuLQQ3rRo2lcpxcKvLVnnHNDVVq9CGyNBjTdrF/S1grN2njEmIlM1Wlo75mvarDKb1jJnpZdQ7yUS0cSsCiGU82kWLq8CeQJT07TTUR3XWFKvvdusXP+60oaPGHzuzr3etiln8PwiXdxFdJA3vRsy0Z144eXrCrDcGfiF0AzZfvIDWeOkaXnz+Xlf+B1vrVHUQG7VYHnXZT3ABnLr6xIkl8BYj/16O7S684mOe/su1hTVtOO7s790YX69IPiekeJ8800ZC4U56UwcxpGZMxfmknOZpiGmgMKB+H6/u7+/v7m52XV913WaIQYCEwYQGYc49LGMpZRSJt7tdo/p8VMeh2GQjIWnEO5u7x9ud3R7e6tnkYfj8fn5eRzHYRiI8Meffvr73/+KeA4vTAF4TrpDRPPdB8iFy6wS9DGBBixRdpS/oTCv0dXMyBUKv4Tw76zcglGYCQlFo4ymGnoq5wwQAaRwkZKZGQlYaLe7vb3dIwln0awfIpCn0uMfDh7TwLNgVmuvswsuXvbWRlP1DZL5tBsgpJTUNBcDh5AixjIWhiIRSxEuQITH8fjrr79+/vzc9/C3v/3t7du3fZ9yHplZgKdpmCagMl9BDyHe3O7u7+9VHNhpIKDeI+JhGIZheDo9TtP0+++/f4aPN7vb169fv3n15vb2VgRLKXniPPHT8+nDh8/HI7x9u7t596/vf/94+PS5MEBAAShcRITQQqQKoq5NBjhvbGC5VA2fsFyba3uUuP2MPbE2wx8MDoS4rQCY3G+8Wq5w5usV/nRZLzFPRX7vBG692GWwhnNuQij1bra1dt6KrGpeHyOutJq1XMbFpncBdtOXpwpfxz+HCzx8BqPODGzJpktDaIBcv4VVUW84AF3QzzeLuAJLMoblTK1xbuYH63o99uazb62hn7UKZl3YyX7d+gpU/y9tx+/5fX39erYMiIhmaV+So/5tkwuzc8DblDT2RBe8urjhUl0BqiatLdRUUgbE+VKtPuH6tyqpC9w17dhuUkRyzmPJiLOHXowxhKSYyjmWUgC4cClFiAQxaJa/I2fz3dJcf1AzRjRUa7tn80j0GoKfNpvUUko+aeoKYebCzJKlTqeIoAhiUH+nEEIMgSiZ8c2bqmzUZvnRYu6F6NR+9IlKnKREDUpWi21JAeb0hry6OG591dXBpWaqqDzrzCCICLjYpNhKhkpCttLOHs917OacSTVuKlL04kdh1mwcntC1F7vjp1CpuZKZGc7XJ1z9cxgYfaJ2QiIws+KyPqSU/HQbYBTmxIPahb9d6b26jTDGcVL8dF2H1Xanc9ossQDnwxRwLMZPtz3UCPtENI0nWt6ilBoFt+FEsjrmXKzQc3eLUOC6mTg3JaC5CheKhMx+VrLenzkYvOzc5JJwuRifWVfTJ0b/sKRnBew/UxoO1oD9TZitvl+Y1wfrUbfmww1U3/P1e4pv2YjE2AhcHrJxZt+pZ+VXemzm9ApWxV1RRhQVH6C3uSQz8yxZhLvYdbv0cP/jfr97dXe/23eh3phAxMyMKCyQqIQ+hv1Onb5S6Ajl+enrMAyMgBAI466/2d9QjDHn3E1TTP3Hjx+fn5+Z+V/+5W8/vnuz3+9PpwMRxhhzGceRY5pz7cUYpe6GiEJAhAAppZRgEgChSIshN4Qhyy2IZ01rLHnMN0sb66nfpfrrWQG1wIp/hucr1hiDBj7hzGUKFEMIAQRJBAExAAYNZazOk5DLLClCyC6sq5G3/7oujVfIGi2N1L5UfHf+lTk3BgAptUoRDMBSpsycCQRj2Hd913WROGHiPuWcSx6GYfz8+fP797+fTnB/v/vhx7d9P4taAQ51RxEjgnS1O1RfFSLq+x4RBeZDVQC4vb0tPL3mV4fD4fHxcTgMuYyPj4/TMMaYXr9+m2JPWE7jdHg5jkOhALd3r/76t5+fDi/lQykMMfV6FhsCCmcEQBBEknpPR4AtSN7molvLBZMjzVtrRRFWrGON/83SSAotnp+vodqQX6sG7fOVCpekiZe8IgK4cMLyzVZ/J2t6bp+LIFZH5OU5b0PDlwY+U/vyQM1modGT7SfzmLu+IuxXWspHG53HPyxn3+0Nvn0gpcB4/aKBfw3YFXHQzI6v5mFrNsO+cYNh3Wkjv/QDLTnVJr+yMRJR1bCkaWcN6vzEyIdnaQGANdQwsp6oChAiARJSPueuO5/LiIh6oWuAQ3DBmWKpycSFEar1pjqALTA1F8IqZRcb34uDWTJlG2HxThcrtBbPKSz/ICIKU8WvHqT5KPrNrNeHM/Z1Vz2e5hBDKIIMRBSRkAQZMKZEIQeN4RGg3hyDSvFSt8jBhYcx+uB6yc2bm6AqpaZoGTXoh5yrX6SIAFCYR6viAUWYcc6vNReNK2n2wxnIqn1JKVycESmXOSSJF/aeyg1j3oPRa1P6iiqEUg8YKvKL3mkxAhCZB0g1LY9S0Jk/0jkIrXEiRIwxegVJybTrOigsdI74YmsspcRyXpbnuxBb1tpmOLoMGqZzJnAiVQj17pAhBwBEyrwRqPXtuMX8bD2RI2JMvSnA5PIiqvXYRmTHwOisiIg1Sn7O4lRNQiwrxQ+WzNEU9VkXdVcNmzMkw2FzfqHL3FtyxI3Ou5T7Bd4wBBHRy4S4ZNwqt7i6AKDjDzZ3Bhh+axsnW/JsLTz8xPnGmym79Nbmc/iWn5Is5dD1cgnmpo5v6nuaXRd03PIK9/4mhGuAG8ay5jNGbFca2QTYin9y5RU5Xx5mjeNfSgldKqOKgDk0766jVw/3//K3nwk0RhwjCmpwDhZCIQiBAIMUkEASI4UQDy+ncTxpxI6bm5uc85CnL0+PEbvdbqdZcG5vbx8fH0Wk79OrV6/6vhe9iJgiBZ1HQQglF0RMdpNQIHYxQDhlCSH0KU1jRpIO41QuIv+bODTsbS6lS1SK37GH2ywxRlA9J5J53UekIkUYgNr643ja7brdbvcyvYgUkRJDR4XsXhtezn23Hojn6rCiPbi60q27zc8AJFJAUKCKDxFByDlHQtIYnRjKxANMGGmY8jiOLy8vz8+Pnz9/LiO8eUU//fA2URgOxxAREaYpS4wxEZdSIMSIKfYqQEthJhbBcTzVg8U5OTAghxBev9rv9/u7m5thGKYhn06np6en4/EEQA/3r0NIhCGlPnU7GXOKe73dGkIALiJFvWyIoEhBgJlPA6o7l6iTxwoPNhd+dfvD4gbbcIE+19Tlu9icl03+4/+yuwzfnDpdaaoRT146NCS0ftEqnJn/klOda9bzrjVBGswtw6z112Bo5cVpJsxX+sFR/nfKlEtPGqgAWjBWFRa/Gj2g05RgawEa6tZyX1aHX/AtUrHpWGyQHMx+vppTJN+On46GzMDtJawCBcQtnmn7Gas87/n52weg/gktV5ysrDue8LQ7A9VojHn2gLMFYliKhQE0Xq1ag2B2eJOcmYXdOg909iT0s2JdNiPRz7zEo2GnrIIHrGlX6vEJIoIFw0VUH0vzP2m0BX1RWyulMJ/32Yg4TSPUK09dOlveiSiliBhMaQaAEDDnM5y6ZVcwLCaK/apfNfScV5i1juatojkbb7bBxhilzNt9JAoRNZLqHDWUuRSwRPamZRkdlFKmaZqmSe8uzp6xq7A3DTXj2aA8Y4xqYBXO4uFH55DpF6SoKluKhuF0czfjs+s6zyGN5kK9hyn57JFIVQGGUjU6FiLqYmRixsVNWf0QYxyn4h0tYBYG4dLyQKd66TkIaZrNWhxTlhCCCs5SUy+KSM4cgoUP9sGaxWPbHkINYG8AWBelxj5WWrKbgX3qQzo7taqBVSEnQKmkLjKfDxEuwknbdE/TbGlsjgZV8bZBmXuwmVUbwvCssx7NIOL2XTtZnkT6H5o2FU4SKH8kKhfiRviKNfNteOjmBxtXe6q1Cc13Fxu4x9t6LN9sZA3/pQqXfvL04D80jXu2cKXZza7trWZa7ddG5HiqWPd1HUu4LGLnCBdgnkNbFaJ63UIbiSHmTAX0ZESkTIR40yXSEMJSkobnTpCzqF6XuhBCmjCPnIWzFGSWl6evh8NhmoYY4/39/TRNLy8vX79+pRx+/PFHAGBm1QRKkTdv3qipP+cRuKAEnjKUvOtvJp7mMFpdx8zTMCLFlIoQ5lyEMxJwFiIGCqWUGBa61CY+DeHX8XllLv6TBQU0s2IBwRQDIrAgQYyR9TYdiuodiLN/4jRNMXYpRUSccp6mifokUgD+mEthQ//2+SKFX0CSd2Vc4JbPcTvUJR4RRDSVIgUEkXw8Hj9/+gLTcdfR5+fjNE2Ho8aYhf0e//rXv/71rz93XRynU4wBEU+nMZcRKeWcb3a9iJyGA8ycafY28gpDjJECAAQVHCx5lt1diTHmiacpPz8/I4Td/m6/v727p5Q+TJNgTEpsIaKMKgVUGFV1ZW1fqg+sd89STNyIO1L0Ty7RWMOO1r9eot5LlqiG8/gtwbqpP8GTmx79V9sG2AZ9s2bDkBtm2NDb+derirGfAjMqwIrt+75WuN1e+5cs7evRXXpu/H+zfSvNRDRoMfibgfvX1zMCS33vvHYAABZXD7Tw0pusgU2WRX+ybVhDeJ6h+F9DOKf1WrQm2+lAPFb9AP0Zh0HeHOUvfqW613K3pUwHwZotHCr1RvNJM32jlMI12Ze/owWV+g0gUxX01NO7SppGwVygAmTIZWYCBDcAw3UIoQgEfWiYJRyOJySiRCnG2CWo0fw1FRyebXQFAIDmuB2azQZrPgMIlGsQS2GYU73FqEeYwzBYiDmsZqUYO66XuzSY+DRN4/j/4+3f2iXHdQVBDAApKSLWNS+Vtav27p4eu/1kP/j//xJ7uj3Tp885dcnKrMx1iwhJJOAHiBBEKVZm7eMxv/xWRigoEgRBECBAYKhc7LA4eVo+k7UopsBwCa+ir0eIGKZQkwKAJFhylzdNg0QaJUyx1/e9rQGFypLa9f1EYVgSaZTI6lP9XMqmAADiAUlEQVT2duU4BrbixKLF6EQPwxAw+iXkaVfhb5pGQ54oFytmwIULsZewp6GJFHIS06udfgvDMCCLYomINC8fohyPxwwTbcQY9YCzaZrj8QgYjOmbhU09GD2WoOwimpLec21mbttGAdPr+KXa0LaLjIvlyKBJw+yO6xdhKIFVbckRUdu2uZjsAZAoxNgw8zgOTTP5BTHzOKZhGBFRby6llIY0WL9QhACUKSlfSikAAqlaCB5IW6SVgiqzGp+xeMAaQ7EheLpV+BfadWEu66N6Yy7TNciyJVtrnLKl/xJ/hFQe+lMMWaqmsHXWWx1Fk3NJxaX7sWJD3IlaKHG3KqEhhGD+27DcO3npam+LCJwAZA9p5Q+5+dkXvyc5bnZxQ11vwIaWauvSBv0Gb3Cud1O/LW3utZfg989tAeLlxLgVJPOvAnb7t6pgXVBJBiMiYXHKMO8v4zg2xWuRAAKWfDkhKOPK4xAIDrt9QxQDImcZ+27fXu12w3A+v5ybNlwf2qeXE2dEAALs2sgM574/n17++OOPX3/9tL/a/eMf/xiG4Xh8jjEOw5C65uHhARGPx+PT08vLyykEOB6Pp/MLS+q6hgL2/UnP8Y7H4/PpGCNFoi8p5XGcDuAhJobH4/Dxy1MaeH9oRqaU+dDtOI8V6vxsbs5LtfN6BrgmaVlKtOKOctZTXAAAmxdEFETOzCC7rk3jSXJWmaPvj1+fHhJgiDj2L+fzSxOpbVsByhLfvPlp7IcY4/k8hra9vr5+Gc7iFpfv1wCrBovukI6duzuuLFrzu7hYzvY8e9cjjyIBBEIUQkLRo2cWQSJk5mHoQ9ecz+dffnkej89NkOOYbm5umti9edPe3l7f39/v9l0I1Pd9bDT62hhjBJyksb7vJ14aWirFL1U9kdTcEjHGBOPV1RUyHo/HxHJ1dRVDe7W/ejmef//jYwwPN7dv7+/f3r55+/nL/3h6PB7uj8ZamZmCO2kV0YALGp2QKCDSOmO9zYWfFxMYqvW+yb7WXLQ6RvR8b02im/Po58sDuf665s9+jzOy8bu/h9w3KO6uPvg8mVhztuk5zOdX1Soz/i9ObUBEcXkFfX2rvGCeiNV8VevaE1LhA5PUXSF5+2AXAKBekta+r2Q49AKG5yR++AuEc72uVXz116D8rOWxVvix3FeqMAwAuMxjaSfmKg94OcGasn49dYE7+7B2TOux/csfhuIqs6s4MaOiB/tgVO3ZmlU2KU4leXXis4j0E+Zd416Ps+FYBA3VEWb5zxCndGsImig+g4hwlimvg4NS3D3FzfMbLJMBJbjoRMoCoE6vjnHnEsde3KYlajFDBLLjCtDwI8q9HN6JiFQhLLS4TGxKUUSwRJSRoi5Px3ElJg+VfAApzVFSbKmY4OtVc78SqtUCS9aARX8AgABBjVRSGAN4TUB9C3OBIaWU6g3bK6K+9+lrmFlYGWMgIkv3Z1Si1aq7asatZg7lOB0imkVu/RZOOsYUF0E14cNhNy0n5y4YAmE53mrbdtd2eg/nfD4xM8VGtTXlL+fz+Xg8xhiZ5ymw9em1CI9nrwqSK+qaSYDAoq65KIACeUygBxw8ZWgHEDXRGf9CV7ymbbwgpTSMvUanUK31fD7rr6bBUslkKF6TYTErt2ESEUlmAlOS87zQ0dgcEAjdhgF6wr08fBIX3RRWnP1SMeYOSx1MA1DpgQ4vniNOMRi+q3hiW9AVL8jPgFmDXa3E7+xUYBFK59KQv6d4IGdWduF1zwP9Enu9cd+a58Brrv46ABW/BcdRv3Ow5O4bW7+bEIrbAqym/fR60CDfshZ3smDAYwW2TquWxDnnLDmLQEBAEmFOKXEemZnH1OMpjT3ncRxzSmMkysMoJLFrm9CdTv3XP798/PT56fHlw4c39+/ejuPw+fPnEMKPP/5wdXXF4zMAPD4+fn147PuBENQK9OXLFxFBVJ8iAWAkIaJ+zDmPKBJjaGNTbhLmzAzAEVEEJOmlcchpWGPD2N082NWqqXaf9cPNmvAqAczbEII3M+iNdx4TN6GhEEJ4fn7+3//70+nhU38+DQw5QxPg5ja8ub9tGmSBc3/+9PnjaaCc834fR5mOBWMIOG6vF39ecGngrwxkc/iv19eHLMKadBK8IQLUqUQmhwsSxjGl1PPN2/sPHz7c3l4jSdOEtmtCwBhpmARZO1IMRSdsAGDatt3xYqWgogTADAA5ScbpjC8gIYamadq2u73Df//337/8+fXlOADEXbe/vr59OZ6fn5/P57PeTCyUgNM9cJgshNNVNkTDQrWWN/EsSyn/0sPXmbbHf9WdrDzRvqds9vKXNoXXmbZRXUWKl1rzCHmd1X+T926ua9hawp7BGmyFuuZ8wq+sEbd9bAOz3jX8X5s4q7DJfAA2uP/rk3UJdZd2f09Cs2Sy9HPcnNN1F9Wv6IJuVANcT7T7unGqBStMuomoJ9HK5hbsYzqYTMXFZdSDrRUiEIJmigcBAI0AoufeKmgrt8Igoi6kS6nXo9UArebD/zUherALeDxzIADnQqbmiHKHlfSSGHAWlgyIYm35mSNCIqI4mWvUJDUfORNR01llZs45AQgzEImFv0FEVbVVBbcoIObqSS5NX0UKm1xMlidAVhARcK4peoRULAM5Z1DjZF4obJ5uPPFNXlIl0IsafCBkIwWbtXlbEREX3YSZgWvPLtUYqzMkU65sQIYHdOc93mhsrEG3t5QS5ywiTdO0IapeFEIAFrNAxhhj06jZ2wdWUVBZFo6764WB5TYsOlsxOoXQFpoUiysW7jkMg521YPE1FcgaJrjSBrHY3PzJk/rzeLzlKZkhAoAGKLIwOXajcuYL7vYGIgIXgp/kZgDN9QcLRmNvrLT0+WylWqFG0vbZUxdcKHMFWVA7lqi/AEDOY3weFM6vA1yM8geOf/mHvHQBwiWH/eZz+JYgUo1ujQF0+6uH0/dVsezqwys9+s/fBNL6guV4PfCe1fg5FbcRbsJ5CZ+XRlEdKGwC70Gt+OTrjV9qEJazWQa4tI2rMgjzqVZ5DiKALJI5D2MIgUAIhFMWkRiDXgwTJo30y4xfHr/88cfnL38+HM9913Xv3r3FGH/77bdzf/zHP/7x4cf3RIQ5KvsK4QURMEDXdVdXV7GhYThnTogQAhI1IWLTNO+vb8exV4UwBr2PFkLTnId07LndH/HTw+PLwBkIcUwplIDPl+bF7yYeD5srenMWNqfj0tSs6UdEAGZ/mWHITw+PZxgonQ9d2x+HnOGHd1f/9f/2X969eRMbSlmej/2//Mvvnz9/eX4+EUUe8phHCVEIL8V4smXo6U0uq3lrqp4+/0Wxez12nrISgiYQjk1zf3+/C/JV8svD17Ztd7vd4XBAEgDOOff9gCj7Q4cIIrOZAnT7EAEhLqZc1S2HIRERIFJEPfQnIhEUyIghJ6EgIQSkBgACNU0EwjiOghQ+f3r45fff9rvrbnfIOX/58uXl5UX1bcagVzxExA2Ryt9ZpPkmnj3V+SevMFu/O29O1rplf3D5yivV8+9h+P+RYqM2RAlcpBlYYe/SFnPpV3Breb2oL+2nfi8Ad5wt1fa9pcb4OUUMr8zsmtusCeM7y+YGtLFZLIGs9r71W0Zv4m/DaZKYFbTflH8qeBBRJPuxSzHDmIy62pE3FPV1R25oUEFoU7lWCMHZe7yGspxQNEISkejhA0HUhNLFq0qFcCIxtqVhr/xOY19937PYAWChZBFRioEx2Did2XCBBax7EREWBrVeRiSZXSm8SqCVh2EwoV83J1UIG5pdPXPJF281VVHRX/VhjG0VD0Y7suggHpuwRfq4PCOB5QLLkFVGRnUZdXYh0bM7ZjB9EqDtolkw0Clgbbcz/TxnlCmECQrO2pEUZyouQWg8bJO64nyajQurSbo0PieUBwAN+2o4CWFKElgZUQ3JwzDofTkjFXUERU3UDtiPvd5zaEMMMQycM08hbQAgxqh64ziOIhtLV9xJjyHHiN6vGX1xivq12sA867R2Muc5CJPTsf1Xs/vpwNuuVaum2WPNYfhwOBwOhxCC4kRRlKeUFhmm05mpUJjzhU4rS2ZoK3oTWTOshRpQ1beH3jj2Cje0pnxlz1xm7JX4N3N3Csk3xLBtbRAcX6sGuAn2utp6UPVACP26ttdlyfrXgFXPeXkHY93I+t018JcqG+RrwCqGWT03CC/h4Tthu1THkLmu/J3kJMtDijVgm9xVP8GrhwsTL0JBDEiCMAiDblAo0B9PApwQAFggN10LIsMwdPvbDPD0cvry5evvv//+8pIPV91PP/3UdV2g8OnPz8fjy/39/Q8/vG0aOp/Ph7bpun0ITYzt58+fX16eIND19fXd/c0wnAGlbWNAYEkAEGOIu4NIjjRFAD6fj23bXV/dCmKf6HB1HDI8nz9JygFCxr9w98lW4noJ2Ar1FOsZ4JrCX19HiAhivyLoURdgYk4Dj2O82sebmzfv7m/l42PK57///T/9L//5f20ink4ngdQ03du3b//tt8/Pz2dpIqsXA+LQ97vQrHt8ZQF+Eyf1eP+iRRopIjAgMEgAySAIJIhZOGLAQCGE0HTX+7b5Qe7vbjLIOPYPDw+AvNu13a7d7/cx0pj6kjGLRKb8YczQNR0ApJQ0MpzwPFNEhEAibDxbBLuIOWdhAJwS2Aqj7inX14e222UJv/zysR/yzfXdbnf47c9Pp3PPrDc5sRzYkTADiADaX5WMpCSH/h48r7mxX7xrCrz0FiyZ23fSvGdBa1Bf4b3rRjbIezUE/2sF52sA63We8k28JoMAqJ5I4INdr4Ex3lj9tAbPYPMHxGt2ut4pql8dVut9UIqYVE1fJR680uOiwnKa1tVe50vTEKCmrunv6iS0gnn94ZU9sWqkPAFY+S9Ue6LhExEBtvmtf+gJGwOByPRPRzSFeZ5cJkV1TET11VJJW0R8VHmDwRgLFMl5VgiFp5CyCmipVJubxWnYm5iqUIkyn1oog1GXPN2TJ2LS1iZclmkukQkZJ0FVYxJquBHDu+ZF1EJECn4lbs7wMwM1ACbtBY0hTsULNOfM5dKXyuhdJ1C8dWl5oROXoVks2mdFMegc020+ZgSywxWiVgG7h6nqDYuyAARou8A8uSObriIiTROKNC8h2MFDxmVMES4xNlXhtztjphMSBFwa2fRFIyw/OiKCEmwtlDyBVC7By6SbLVQsc2WMMQJPrrkKxtgPGsXh0O1ijOM4nk4vp3GQ6Qrk1LIqVADAgtaRdYHuHpHNDiyPUvwsgEtc6V6ZzJtmwZsUPMhE9cLWEZ1OJ31uwUKnQ19Jds/TL+yu69SVVAo/1SkYhsEUQqUO0FjnIc7d8QYXgwVzxDVn1xKW3t2ehGDJcD3LWxd/wFFWE9oX3y9pIFr3BPG73Ec9N7SOaLl/iOPIVVlvQpvbADhmVb27yeg8J12/a5+thQrUdZv2fBOGS/hf0vBG+5ea3Wyq+vA9lTefr6mxQsgrTU1fL2iS39xuWARET3nV7yAiri9AlbuaU+hdQAG9P7zbHRAyouQ8ZpiuoA8pC+PXrw8fP358eDhnhtvb/Y8//vTu3bvj8fjx06enp4e7u5u//e1D04aUh7aLICQiu93u/fv3ItL3Jzsh6rouNqFpAgqPqWfOiNj3JwCgtokBQ8A2Nm0MbRfHxDHSbt92bZPSOIzQdiEiWZSPCuF+9j3tOcb4vZrkJv7tc9WgiHiVSjkwTtd4QoyxCbLf76+v2440BFru+3R86V9eTl1LOecQYtfFrhtRM+6GQMwEAWMcM1cHK9VqWgP5PTT8V0e9eIiISAiMgKLH5yWdVmgiMebMX79+zee4J7i/e/vSPxFRP5yG4fxyxLaNbRu7XRNj1L1l2ugRi7O9Xv2gpmliaGOMzBAjlAqcEyNJjFF3Ogoxu9izzCyMRCHnzIBE4e7u7uHhpR9yjBFCOJ/P45iYNXF10EuQgM4Vc/baXrhBVWhZW/a+ifk1+12zaFhKHVXNS+bBzab+aYJfA/xKj5tc7hW3l0p48Ix63emaeaI7ePXNWjtUQVIaNw81e3e9g2zC+Z3l9Y0Yl7nybAOteAsiaoq7NZOpMfwtYlvDI14wenW3/Z6xV7rAd8Jziats8ucKeK3mcwFov2YMgyWdYNFKrAuz0HiLhW8cAGLW/VNAUPTGNAAQYRIGJGqmvlkjIAuHJbiGCw/WGgXIE5tZZCuSYi4QkXn8iIgyu6+XWPkiQhtj1m+GGtNDVIVDNNVIE/1J4AgwxVlBgKCiP4U2NglQNdUy3mw338BpFOC4pNGWEbqnYE926PQQzxQ0jsVUoTw0bot6UxmnCuTooKLsyuxmrJNodrb0K1MHtTWJEyVVhi/vrqnthyn4zZybUYu25npc4BMnAlM1dXFqpToSIgbAlJLeFWRCgEWUGragCLgRmCvG+aFfJIY0PwuICC5zugNmAtiPK+ec8uijzrpGFp9tyTVNc3p6sWAtueTS2O/3eqvweDyqn2oJQpgcDGVN40STyJJdtgw/qAqk6ZjMYQZsk1/dvUFEu7SNONvH4FtFVroQbongALDZmmb04lejjPoBVizPg/EKkJt8f13mRmZDx3yqutlF9dDTjyyjxq0r4AXV9PuLvVixXNuoqk3UM/3NX2FFFb7ZbwK8Fhw356v6WkH1+ng3506KD0IWQVIIL96KQUT1CAXd6whCCBEJgJgZmAHlPAzH4zMjAPAwDH0fhmEYxnxze3V7e/vmzbv9fo+ITy8vnz59Sin//Pe/vX13L5J10wkQx3EElhjj3d3d8/Pjy8vL0/PD/tB1XUPU+Kg5iHh9dZVzjoS7Xds0V6AOOCFkZhCOkdo2EkwSewBMWxhYfzVWf2nG15jcpJb1fK0+l5legcHMKNB13bv7N+/v95GHhuTulp5fwvF4/Pjx0/3tVbdrApEI/Prrr8Mw7HYNx+Y0DJlzbNoYI6RUNVutowr+12npEoV/s77R8xTWGXG+OolIiAyYUhJO+yYKwsvL6fl8fH56jA0gQNvGJsQxDV+eHs/nIyAzZ7tNEGOjkdLseLTrul13aNvJcoiIKaVyH0STWiUql0qYBUCIIgIGaoD0jAMZUJiurq72h8Px9PU8DnHyUA2IhbNN46RJ5hI9iCcQAsQpx8ZlzNgqWzPDaoJeWeaeRMHNY7W7vc4o1rP/PVzrlaYq4eFStc0n3zPYS2DDFlW/3u8rr6+BqdBLK6H6FRgQ0QgSlhN3CSpD46Xte3N0691nXW1zLduHzf0IAagExvOVPalgKd/c73y1zV/9EOz5mnFVkKwb9E+yXyP2TwQL5MyM5VxAyqUkKZKzleDyhXpclRwAUv+Qcyaa7lCJU0O9i4XXi7R1cvaoaU9iYLczIaLGktGwGUYomgx1mntZIIilKISAdkLkQFrE85wbLJoh4mTKU0SMeUTEEBBAiWACnqeYooaQKX6U5sZVy5VeKtO+yN25slnc5GUGqgJgcE6IRRdexbVpdjZENYBP6GVOmsEeERGDb9neze6eWxY2i5NX2OxFrNKYwOwpSiX4ZBXlSTWcohAu1rzIrDoWOp5aKw8XgXehKCEhhDY2RDSe++PxeD6f1XG0aRrG6ToKujuQXHw9rZ012KYYe84Fy8MLm6blUfTkpKraGpk7cR40utJiRSAiYtu2umd3XafxBm0eLYMFT/F4J4uoDsrC0qpC2GLIzphp+IGiVHvgeeWXWL4uojtiORapKnuOUDHibzLudZ1pItydWKtAS0b2PQW3XNf81zUvXsPznX29Uox0K/4u3xoOLe9kbkK47uuVr+tiFOKhMsDWG5Lh0y/PSoG8NLRqBW0Cs4YfV0qFn75qAb7SvgfsVWzoB6qfA1rCFCICSJbZFTFojP4vn39HlBAppeH5+ZwBYgsA0Mabq6vrH374sev23X7ftu0wDE+Pj58/fx6GfHd/uLu706S2IQTmRKixrzIzt217c3PT9/3T01PXdW/f3sd4RZBZpAmNLvG2bQElIrRNiJFAIKU0DOem2fVjj4j7/X5/6E5DTyAjZ3BXHmyAcCG8flX8luQ5gG9n3fK6kWWlubKf3BhjSiNx3u/3b9++fXu/p3TeNYQdHU9P4/nl4evj6eURSc79+HIaP31+HMexaXYjRIBBypHi+mDl0tJ7nTzWv2KRmf5SQcQsQHqmUCL/CQYEzCmDcAiBMKZ8Zs5XtAsBUhpEctOE+/v7u7ub0+kl8/j4+DCO4zAkxEkh7No9Ij49PzBzE7vd7tQ0nVKI5rJSjVENkiBTQPg8AiATUUqJijMO85iTNN0uyyQR9n0P9NI0qet2WYZRfV1n9mypIV+L2uJx6OfaPqwJzCq8wk82Obm5zKy7/mbxvf8lDvz9xQbl2/8mk4RV8NvXoXoFWpVMYIl/gIsEbd2tBIOF2gZus4CtWZYLKrqvsO692nRgFYTMfg0ux9gaFeupXAOwWW3CgJsyz0ZkeXawxsa6ZMtbVp9P1WnuTb6qhMbpreVFG8PVJf5WKbrWkbjQntaCiADOSplJ5irPVwPU16OIAKhn4oYPgF524hJUN4SAeVvBZRe2tZon0i4ALAej4QjCa6jXEDYiIsKZGTGIwKSx8NRLpMkSaJYc3fZNFUZcmFltSpZGJxzHTDTnN7c6PM7hRrzHgpTLdewcKW0DE6ddFzAWvMMQOI6j+s6pxdKEGe2RNZGdowYqG6ThkMsdPyoJ39XQpPM19L1JfujSstPs0rnIVB5w0lv0LVX8cJnE3KPacKuFy703C29jE6EgMc+rDouypHcIh2E4Ho/98WQR1aa0FjRPro2diAQWe0YBYOrIYOOSVcUqLNXyOZ2AI8LZNGRLfcIzNeLUTgMshKBob9vW7IEhhPure62sY1Tka9qSEILmsNaHXLx2vQHBSCWEwE6vNtKyIBPL7WFDtiurLFfjtRMjX3mJjY0yN1LtHxcEr/XzbxZ0EoaBtGZTuNI61kVWWsclhks4O9CCO7tZ14RyMvd6v56EvgnnuovXi99ajEXYYvQwbA4Ets5iq53ye8BYQy5LmWzdHS7Fx7/ai29wPm0kPUfbFmq1r6ZpUhqn5EREAHA+n8fhhJxDlKv2cHV11ey6GOn69ma32wkcmtiJiB3x5Cx93z88PF1d7f7Tf/pPd3c35/MZgK+urljy8fHUdV1ookYCUyH+eB7++OOPrmtubg8U1AIPAMiSTqfTbt+FGJn5fB44jznnlKUBPJ/PIwdAjSTegx4ww3weVH2wlbKgzyWGKwqspvv1sjlNUg6nYaZzAJU50kBE19fXNzc3RDwMAzI2ze3b3bux378cvwiPYz+eTqe+zymlGCNgGDOEEBoKjJhHoTb43r34IhXnubCu/etrvP3V4pGwXNfQdG1DCACPj48wnv7+4d3/8//xfw+R//jj93/7t39LafgQ37179+bN2xsReXi4fn5+fnh4SCk3TdM2Oz0TfP/+/cvLy9Cn0+n0/HzMOe/3+9vbe705XzYdJJoCz+QsRMTAOQsWDsAs/XlshvE8ZOEp28owDKdTv7++jolxZBAQQZjlb1ZWMR3OiwCjoFzC8xqf1TJ/pcKlaZKlskHLrMuvEyo6efrSfK0rb/70l+pXnWrNV4LKGH+GsmZxpad9T1m3YwDBFtp9ugVZKCG1Ucf2kWr4pcJr+F+P2o/OY6mSpe2venj53eo7l+orW8mMJdejb7ySf+DVGdeSlxmwHT6nxk1+g4JwXpkHTX7wrFuL6REeReBsjFLurJlgX+34ZbrZX+PSkRKRmlsqVCBiDKElEo2ICCUZqsh8zoqIdtOs2n5kEW0Slaer9qlQppSEaAqESLNSwcxAxCKQWSVjAEDknHOQyHpsQYggjMhZkuTusMs5p5z1DmHEiEKSIfFkOCq6B4hIGiYHnrZtYyQA0OztY04RJYQwpD7x2LYtAIx9RsTdbjcmFtGkf7HMTdL9LlCDiJzhfBrmBYPz2A0VnCbzXdM0eoGMmQEIEANGImHmQEHvyJ3Pg1CaTG2QCUSzy3FKsVwyEBEg1LR1OWdBDE0IxV4kLABIsRGYTKyJBTSbLWJiUbkEAERYr6kLMwpoHkhOGYrfrGRGxGGcg/FAWbpGUlgMhk49m7M+iEjxDM2n08lVyIigwfpkpL7vmxCZuR/7pm27pun78fn5eZK3EPPMCyIHAoDMgsLIYsCI7nyaFkrjw6pLKoEgMwAhJWYLm4tELCVMLgEiMmRh1ihxwpN2bRFlEREIecq6wwg45nTux66ZDmM0DYbOviquqgoqlvb7/bRMaL7Om0uxZWlgW++jJAiaYRBYWDKHECLGhpqeMzAGCUhIFEUkUADOiJMNBAB0ICLZbPtl2U5RTDXtlV5ByZkBBABDCCkNbntynEIzVUG930PR/eJ0QMPMjAAMGYAQJYRyXI2TsK4uvojTFV8lV2SYguhqsyxKkBN/L1mPJq8BmViPh1C/esHFCiyvnfgDhY2zOh2a/i8zF54uPCvvFkFEAw9Kmpb15pGX+Yvm9mGOdemxjbBUdIuhNUstN4hIFgaAQAEIFY0srE3QlMjO3WNBAMT6iX4VyeNs+TdM2gZDzuNDVn412wh0x6K8cN2f6crjxz9UsNzmKuv2/SvMnJGntSKC0/VARMQ0cAqIasURQhACDEjjeIwgp5SIImFz7tOuaw5X7d1Nd3XVXO93IaKIIGuUReIsmHtEPByaGOPpdPzy+y//+stHZPjPf//h3Zsb4NQ1IWc4Ho9d1x32gXngDAhht9s9Pobn53OgbhyycEBos7BADAGJcEypwSH3+WUsWn1mTqOIRMDx+Ng0h7uuuYrhswADMkZcomuieQQslKDzqyyBEVFdoD3+ARAxVwJoWTt0OciKzbg/AqckMvm5ZAmAiFlE/XkCRMwSsRXB49ALJUFp4BkA246wuVX/iMPVPSG8v38+n/pR6OtLP5wfx4Gp2ccQKhd9K0QIk9cD2F8AmHyJVnTiR+GpN3G9TrUYb/f0DADIgggYkQmnEyQIAJEStM1ufH6U6/TD3+93Ld0c+i/n/9fb6789vfz+8PQHABxPzc24v+6uCeOb+/dN3N1c36eUHh+fv359LMHVwpv7H5qmeXp6enx8bNs25/w//sf/DgA3NzeHw2G/319dXSnXIWqaAOfz8fHx8fn5Wb11mHkYUhNbZshZcpK+HzuZIsaLQACEzJwY2hYFE+cQAggTBsZpYVIgwTExdGHmdeBYgY92awzNL1jPpe2n9VcvN89TuDKjyVLZcPvahnHCt2OOOb4vESRa+L7pi8JkTtkVFRlt4DIYgW3chgc9El+AsRK7K+RU7fvGqSgy1fB3bTezZUDzuzLPFP8KIuZlnsYKXf7hpQoFdfUpp0G7+brfR3h1r8pesV7ytG+oRyCgXkLhegnr34A4FlON+ONyxKZp7GDdwMAQ1KVNs4Yq6lSUzTkTIJcdfwpQMe2cvijCoWujiAinOSImIjBa7gMzj4kTNnQ39KhoYiMl3yDN97DYG5Zsydi6s9e1NaLZh65aMplRGAgACRhBCBmBQSAGwUUjCTJIjuM4mqkHXJ4A0yDXKxPc+nTgTrNY6cH+Ap6fUVuiNlR7nXES/hCRUHU/V3TfzFn3MFXqjD2pvcqcUaXcqtQ6DaGkWoXTsWgImTJzi9C06nRaLSdmFpj3EjFLC5JahHTgRdYH34IU02uMsQT1nKLfQrFumXZReWB6PuWnpu97b/NcMzubAn/wZlNjs+P1PaNRNX9hCQajupAUG6n15b8qDBZpxuY6NE2wq1mIDKAusGNJ+cAAYtHPRALO78KKLVYDtFsZ+qvfbDxDsUkXEaIFg/Pk7cnDDHqqQnuS1l6urq7AUn0svFJFXJbIsAzWiiWdBpdwRLqBQTl0wGJ+7Pteg5FWcyeyIAmRjVM6X0wLRWdaeeV4kpyztye8vEoUW6gRUJCLF7Rn+lIghC0Rv5pZ/6GquVlsCAbt63i4VKoePfybFS71gqtttbyyPRavrPr22VnvrdmwTP75Pfi5VGx+AeYp+54Gxe3rFXKsrJ9X24evue7U1zEhzAMgIpqXQC/Cg9Oxp8pap6gxelIJJIUaiYiaptnv9zc3V22r2x8gIklgJluYtk7VlV0E3r8/MPPDwwOXGG6haQCgfzkS0W53iHGSOImIUx7H9PT0dDze3tweQmhzOg9DYuYQIkDmLJw160AGAAJNFdXoYGKMRJCYAwXZVNhWz6RwrvAt+qw3lAsz7xjOSlD27xSTec55F5socD6ff/vtt7H/Gui830XKDYNwliRMFLuuOXS7EOPd3d3NLWDodo/HP4/DcXwGon27T+dTBed/hNrXLXj6t69LBltFv5ANdANMfh9Eb9++/fvff4rwMvRfXl5env/81+PxdHt7dX19/e7du8PhkHPux1HVP71JHkIzDEPf98fjse/7cRzfvHlzf39/e3srIsMwHA6Hl5cXZv78+bOIHA6HruuU8FJKehHdJMXD4fDDDz+cz2cEIoqIYejT09PL8/PLMPQ8NLoKmibkgJlLYu7pev88Uh30ep1+J3pxi8nDFglt8lLD/CtdGL9Cp8nD1lbiJSVE1IUvyzwWiAhCfIG4PFrAKcZVF9s1t0a0idhqK7cPa6RVytUmotbY+J6ynh3r3RbCN6fGj6sa2iXCsNfXGxCWPM++AEzeruuRbu5imy3rEy9GbiNlq1Tkiri47Fb9ZDiswGDn88XOkbCSr6q9r6LwSrjd7N0LHtOu5EJ4WM04jqPK91XTTqJdssULlOfXhrWumreH2y8n0zrsdSIyi+LcCJJoELkJnsWJnZFm8WycXrRzbsMyIgYKdlIiRYdhnnSk+WQCEqK3INc3rKYhuOiJ1ppFgzQ0igi4EZntHhG7rhWaXA3LDYFZ/TBWZTphZWf3tJVdHEtwhG4HHP5YC51A7w2A1vvSQWVKI6H17SKlSkuaT0+WyqGO0VpGr34X4UyKwuZPEJQk/Fw4gl4M3LMVzw5847CUsysSlaL1+e3E+rLnJpGrJpxw1AEqiozO1SPUwz+NKARDix/I5nrR+gDA2Qu408HKOlopuLF4JHg6qYo/NUB3txaXjF6WHHazeNbjeHH5s0L1/CbPrpZVFx4h68ZfKexcJvwHvxiti+pJ1c43+9oc+He+skasTdkmYDYdniH754u3/roO7KcbLuD/0pNXRmespqpWEerrUFVPPBM2iiKqHZmknOLDAtsiusvG+Y4KMwNoiNFd13WIY86ZRWJoocTsNl8AhvlW225Hz89HZr4X3u/3k5f+OJ5Opwbp69evu9355vpOnQARJeUREV5eXh4eHnb7pusaxcPkvyqSs6Scc86EGAJGCkOfmqYBDCHA1dVV1z33Lwlj3NRGzJ5cccXMHGLcqL+F6lcWxbqIk0RRpQIkQL3KISDCY8KmReTz8fjx4zHg+cMPN2/e3AZuj6fz+Xweh/Hr16/DMLy7v/vpx7+FSIFCaLr9gQ/djvlxOB+DQNw6J/onyuYwRURTFsNqUa8JdcItgIvJWSK3CYS2IQFE2e/3t1fXCBBDz/kcm6vdbod6F3S/150xBtDL5NpvjFFk//T08vz8PAzDy8vL8/Pz3d2dMWc9PlaSPh6PT09HRXnbNoh4fX19fX2t+5GeJ6aUrq+vU9IACNK04XDYEcE47v48pSmMWYi8oe6+plq8XkcLr1z61y1UPGezzpoj2buyuvElrhhZrnt0skTNRWfe8i2F0EPoe5xZHwAsDxrMi51oMiuvR+1NI4vBcn0Tu8JMJSd8D54vFWtwzbcdokiWCuq6F/+58jFZ09vrZZqXFZyyVAh9Zf1QycPgRHRHBtvCwPeAh05JXjwsfYHDAxYnMqdiTFPm9ZHNmao2PnbGc08Jfr9boG5JGB6ffikZTqIzoy9S93ogrFcovsgelTgpOYjOTmqgVHeczAyoCeu8KXmqjJqEQhgmayGDZGZNzEJE7Mx1sKW42gKT4kQ7I6iYvIxiRESAERsDsuCUASxj7EL2MrL21zmqmbDj/KJTRZt77V1NRm3T5RLto+s6syuqo0hFH2baVgDMIufPGDbISGbi8HCSc4kx7BERAKmZyxQ/fUVN8NYjM+vlt3HsPf1giebi28SipOWc+2G2cfk7flxcC7xZz3MQdKqjHexVNdesCpe7RbUAAGDKebU6dDTC9lMZQoA468z+J7W0281J85qG4oatIJnXqEkDfkaIyBjKNFgWAMiQZ+Xcx3lioVUQF9PA4XKpeNB6mVvxBzqbhLTdvgAYGllYE70IWKYZ9QmHFV+zFuxwpFpZlxQ2dJvB+sPr0PqyZqmv9PXNdjYW5oUGjcKrt/zJKNg5BS6eryfx+8sm6wC3EF5v1nftidmPyLdQtfZNclqzAv8VAOxOsmtBDPkGnp1QCLNVtIMbmiJY5JwTkggjRpq4EZp7vABA13XX19cjZ3Xne//+/Y8//qgXLgQx5zwcTx8/fnx+PiKE/X7f933OOWe4vb1ChOfnx+ZPOhxaQgmBANRjHREZUTPCSIxNEwhYdrudSDNIur6+3ncPD89PhAt5cl44Zb78PggXiO0S5qcnr66UappERAjRYodb4E0ACijCKQ+nUw5Ab+7aH3/88e8/f4CBhpRjjEPi//bf/tv/9r/9Nx7T3d3d2/s3DALIbdvev7m9ez59eTmCZIHtaL3/weJ5jqx8SmGLJb7aLxNFyCk21DUBkIVl13ax6Rq50o2SmY/HY6DG9tYqQMNu197eXn/58vTw8Pj01B+PZyIahgFLNLumaYhi03RNA2rZ7rrub3/72ziOz8/PDw+fRESzZ/V93zTNOI4pMci0rSNiiM3t7W1+eDgfB5Gg1wswTMmlRURgoWsBzHuEZzWvsAXDlazEcaNPq7Zux+al4vP23N/p8Dx2yQE24Fm2M5sTfCPfk5Ty0qjn7UYA3DLUlglRttigx4OnSdug13153Mqr+tU3ubevaR8qlg5Lyq92h/Xz7+kFsY68sllsJeLW5gJO065egdXBBBZvwbV4ANX0bR0ovA7hZjEZlbacou11XhohbEIvnemvPYlsZ1xQ8nJ0ImJMeubzuHETO5phyqvOa25ovbYxmBJiphgiDbu1IFNxp9pefxXnhw129QtxMlRKGQOIqPPwtHptwhaJ/tZd6LuVwGQjMhF/cVSAjBDq3RQBcWFRsX6tTV8mbMBCIyoaToDiPavMXQ1ugQJAkGJx8sMx+qi6tl1EZgsnc7mrY/NohBhdVDqbHSm6vfmZIKJCJbLhF8olnbpubFLsgTlno1ujZuua51QTk5dszrk/n7VZ3dJUB+bi5e/RW8riYXXAs/DdZ+bie03F09VoWNuvVouba6w+K0FWqwO2jG/auy2KyuamoUe9omhTXDE4v+5sRKoQJknWb6GNskRwsUhf4VDWsimo2aWaWKsiFYvf5I+vME3ve09TLF9YuJepdw5tA+z51Hd1t3WC+8qv3yzVK5t7Bqzuwr1SCjz1vTibes8/KzA2sQGVzrO67vD9UPl14blEhQRcbpObAF8YdX3S+Urjvpr95I3Yi+FPgsU3hj5zaUF00a2NbwBAoAYRBKYjW2UhwzCwgDALAgXqdu21XDFK2+rdj5Rzappmt+tC2+Sc/+eXr6fTiSjGhlSgH4bURHz37g1LYk5fv359foaujSEQIt5cHwQI0axOmJMAZx6TYJNyOp1G1nXKEkAYtpCwHCk4hvk6WtbF5mWT8HBDCpzi0JcnQQQRoGmjSEbJIcQYCRFPL8c/fvv90N0DYRr569Pzw8MTAOyvrw6HA8UQEBmJSHa73WHXPp/7DMCpVtX+uYIrCa8aWkWQ1b7vFhqoKEJCGTKUS5f9cArC+7bd7bqmaSTHAA1BGvoEgAhBrai2C5jPJyLqphBjfPPmzZ9/PiJC24a7u7u2bfu+158Oh4OIqDfpzc2NBrIex3Hox3//5d+enp4Ph30I+PT01DTh7du3IhLCVdN0qkbK5CEVn9PhPAxPx0GJ3kQXnMIizCuaRQBrbfw/PhGvlIqqfV/f06PVXDMW+8wrdlHT9oV+KsPOJvDzBiEbhzFUdbx6faMdnNJxwwoDa4+Sf269XyqejWwy5EowWNeHV/cFD7bv8XV46ocFzbK6bb65N4kT2isALoo3F4C6VF/KHctKlqtiCrjP9c64OdgK+bCa7kvYmxkXLcaec0YUIrJE5ZMeIU6vAJeZwAelWLLF+frm+lzNIDD5kl0gRK3gWa11jcW9M2XOGkkCZ+QRUS7RkDQskjcQeTAmmBe+7zO6hTQQ3aSQLMz0mBECuSsExivtGKLuFGej1twOizpv2Cs6AntRFcIJpUICZJE8DduqRClC/EMR6fvez3eln7h9ayqa+twvWiNQg1xHoekQysBBM2uZUmdpGDxIANA0nacTm3fVMUpMHbC5FufjEUq8o5yz6p/VNozlMqERlQ3EswBrWX+l4uXlEVL9dWuDiilYrxQGokmfjzE2jaYG5nJ5b/L4rdanN8r5xaIYwOWlaigXBW1EUo5IRISqaJ9F3zOFnMqJCUyRaf9adDKjDcOqXbyUJY/zGF6AtKyw8VaZIILpMuE8mxr0CeemKqa2ZDUbnPoS46PlXTj7S85hwdPeJeRcOpnzPMHAeGUPu8igi4eDh4GWFzXJ3fLVNC3W5vRuoQp7a1rIAPAXZYJNVOByX39lRB42D6T96sd1qRHfIy4f+r9ratRK+t9mm2AcrxwSCQCwMv9oOalk8m+HgMCCIGZho3l1gGDxnGeAt2/fPj09jeP4+Ph4c3PTdd1wOg3D8N//+39/eUkfPtxcXV0hBOZEBLe31+/evQPkvj+9HJ9TOvd9Zs7DMByfXxARiDAUlw0BFDmdTl23SxnPCY4jpH5AF+9lzW3WmEHNW3uBHtbz/sqiADetF6eyqEaIRfNJY9eGd+/f3OxI0um3337/P/4/jx/e/ePq+npk+eOPP15Ox5/+/veffvxwfX2T0tC2bUr55eX55eWp708pDxRaEaq6e2XRvV4qhlPxcHSH1P7X19jFBBsBQCSUNOYE/XDqT8euyQSU8iQh2I5Py7v3VC7Ym0NQ3/dEcHV1dX9/f319bWe1ANC2rclLT09Pf/zxx8vLy9PjkQjevnvz888/Hw67YRhCwKurq68Pf4YQ2rZtYgfTmW8IIez45pfffgN4AGSQyeskpYzTre9p7FPEjVAf3xha+LKnxiZv3NxiKl7huWLFh6t9Z3Mel9v69rFaeXjxLt+lYse41eh8L4vRsYjb+zb3VtjalWYmv+Scr2AD3W5eUfX3DA2gnuX1h6p8J/dY04DN3ebGAavhWyGnt8yNwHwU5dHlMWlUcQmfuFIiDEK5rBB+s3hS8cPB5d7qw1542HyEmEvtbHa6fjihq1yGN+zZYrd3iShySTwAThucLt871Fi73sRxSQirMLIezLomFJ0n5ymoDOrt/vLb7HTH8+t+JRQpikUEwxzeh3mKn0FEEAiyKnhs8DvnzPlWGzgCBVigbB4LzkibEcWimpW3mIkAlzxCsFhvgEXIsNwDUu46q6ak3Xm1WYHx6hy6qIA22ErKxKXu6meWihutQ9okj+r1GP1bMUHDFS/z4+lZSAjBnEvBrXMsN0s95SDibreb5tcdpejvnmY8018vfnJJ1f3M4vJSsm/QXoRZLpzPBXhZEFEdvTz/Nazy0qKrAPj93pOENWITasAZMqk4qhmeuSwOm9BK4MMVJ62Kp1XDYXWkAks2UbXvO/KrXn8KAHq1mglFBEUQgVEIcDalsQAt3H5gsdxmbIBjFJvM7pWyiYrXkQOXg7t4wJZc9bugWveLyy3KmrJlu1oI2w2iF2T/ojZYjWsT1IoevtmaX5hVm7iUe74Hqs2WF88vzFdhgJBVjCjHTwQgwsLTzUN1DJnWFwKRCKPCrocX5jk/5qTAa3Cp0+kkIqfzue/7l5cXAHg6vvR93/fDmzfXP/zwQ9M0aeQQQtOE/X4fG4oxdrvYdiGlPUgex7Fp+jwmZgERFAyBAJE5ccrDMDALYIPUEmHbtl2TmS2IzsKh14e5Z+8r9R2ztrlGbBWsd8NqT5nyPykA5rwDwpyA0253+PD+3e1V+/zw6fnrH4+PSdLvP8f2/t37w+EaEK+vrwPB6fSyaxsAeHp+/vPPP19eXpCgDSSBcvqP6oFVqdrx62j9U7VfICKIOiuZukIkAMhNEwQjQHp6/PPX3/qrFm6uWpCxCZ3iUl17it/KIlgDIsaGTqfTl69fzucBESyuu2a1RcSnpyc1OH/58uXr169fv349n5kI7u9u7u7urq4P6il6fX2IMaY83N7eTo6gkBCJAoQAMWKTGxFmSUgdCE/B3J3gu952K0S9XjzlwIp/rvcLz/2qHWE9X7hyqveN2F/fiBcY3NK4RFQXnSw8q/dv2SkNLzHG5ZaTb8EaqQYFbgveZKFr5Hhh4xJs/sn3TN8rjcA8a4vhrH6tz4y8mLGJijUMVX0RjYe9cTpsXYiT+sT5kVkLm81WL243u4mpi4Q9j8DEQi3eBgCFx2rFigWJK7hVPPLXaMel1u2oS6reYRnwXLe56C0ealdRzuUb9eNX9VyHO4n76ghLqOwFEEEQ9fIFAoNgIA2PzlNUfAaA+USUiKZdDfJyVeh/iubZOXB1sVJhUyWKJ7c9r6gk0+KQqOReEACxpPYTUmi6N+gJmigA1LvgBI+Xy8vpBRJ6V8zyfNLxmqYxo1m1bFT9MwLSqGWqUQSXQ8Z0Hk8cBgYVr2VDlykhDtkIAJphWWOrmvaooWVV/eNiG7S8hebp6jtVsO0Kr4mzlXZkEKZhVLsZLOfRxijO/RIAVGCrVmO1MAwzWFxqpVjnFO3FG3a2yHlG4BebokIveOigdFIUM0SU3V5exd0xpdEvnP2uM0x6nNgdwmqp27mAPcGiJ0xzVzBasSRf5LKiYocg4BgQLx3u18X6Erel4epGHyKWK7EYAJhsAxNYevxPV1vBohJ+Y1d7/TmsGPo3618qm69UjXvCvtSFx9j3dOqOABacvbqzrZPFMOcHM7oVmRJjfO9QAcC5HuFSivpLjQAsdiZ74ufUI01cwnGsREB0Loj+7zQwt/BBPUEWh5KKISg8kNVXWVUuE61Kzh5G1ysDI2dZhKdn5oaiso4xTw7zinANRRNPzel0OqtaeD4R0X/5L//rmzdvrq6u+r7PPIYQkCTl4Xw+t21su+bq6gpgD6InRFk0zrogBk0GgykNnMc0jEQUm72E7umYMvx5HvLjyxlDs8a88QrPYRCRRS5FGd2cYimKgZeDpDzRHXlKaSJSvuqeNd20V6MZooQGWaRp8er6cH3VStrLeHP9/HLYX+v9iKvrG91cRsghhD6Nw3H47bfffvn9U8aAEgFgHM5EV5vw/weLkWXmRWQ4kyho6ZI3L8mZ/5bTAwAUOJ+P+ybsusicv3z+9IRpvL+KhIfDXgOGE0XNCWazDAApJZakCEkpffny5XyGwwF2u91ut2vbVpf/MAxt2768vPz7v//7b799Zobr6+Z/+V9+fPfuXaCYUhrGPueRmVJKgFPCWmXyGqIWEZnzOHLipN4uejdTIIuoBEiACGDsHdRbUZYFvoOp+gr16r7MqA356IS6dR1ZShTV3/VXe2h8EgAAyG6yWGWR+QrG64PyYyGTIvwNeX3CiwsRFQKrPdpTWvUcV9qg+BQLq0i5r8P8zeI5Niznaz2bm9X8GP0mZTU3MfB6sSs/XvwImuSmgFF1CmVj9SSx2iy2SXoG9QJB+O4WD0u/lUnGNAJxsW0REd013eqw3sNpXaxjT/hSzR0AYCDOWYdhb8m0/yKygKglqQSPzGpZUgMRwKhyMADO2X7n3VFcsgsF2vZ1LqnVjJnmEqJN3zKIvRkH3AKopoeZBYBxyuviToam62Ft08QYz+ezKd9S7IQ5z1bB7HIQ55y7tnX2Tw4B2/IES9YKSGTXGkWQSxYULLetlM40ymh5saSVG1MuGTUsspBulqrgeWkDhNquTSlp1GkuVl1m1m1Dz6e9UBhcXh0DCVziDXT5TBAxj2kcx7Zt9dKCodduA9qMDMOgENr8skVUDyHGOLksLuMAbSyhMjrjwkYhzNw0jQYm1Vvv2qOU2/CVViwiRPMtQVvY7PILeV1UuzMtxZQoKnfkPGPyCLS/kzVgHG0xm2QwacVNRFe0ct/34vx/pNyQ1JseikBVUG1R5Cn2d1OOLdhzECinCU2IRBQxWjTXlFSXm5ONwpK7bTJoG3tlDBTnUOEHJXOL0/PsQiKJM2xaXxPtBVIpSUSQpzioBCgiLKIWT39ZmbduVXg+4OHhKZWWO4grAG9yZ4B6f13TgLfQGnUZP0HH2X3BJefd7MXDMzO3sp17SPTcxz54V+rqYGUarPsMPvePHvhvFd+jfzdP5FTjMLiAUr5cCmZTsQKbGrvHv56gTcAI6zvbfuD+pyWhznYDfa7Khrq9q6ynCxCbZhwmC/x0wxkCM3ddl9MpZ8GSjJt5uqZgR3j+TrL672lsYWa+urp6G0hEOupCCA8PD7vd7urq6nT+l/OZr/52td93sQkAMAy9SG6bcpYEQruWQsgC5jwf2066xAyx6cYkt7fXQ8JfPn6KAU85wxLbiAhY7572k8dShbfNeamWFSyF9eoAyDWIU1xu1QqFIeecORCIjIf97c3up8+B+tPxdBxOp1Pf94B0Pp8fX55Pz89Df2rb+Pzy+PGPx6aLf/v735/7dPz3X1NiWQbNmvnGhUjruMqUAFvL04qyYtuX7Ux5vX+VwuoGgYhEkUREskjetc3L4/Fv79/99OM7GE8kfX/uv7w8Df3jfr/f7TvNM7Hb7TQx8tPTg8p2KQ+6zI/H4zAMIkBEehysXjMKj2bv+PXXz+MIP/9891//63/98OFD0zTPz8+IOI7jv/7rv/zyy7/97acP//jHP8Zx0O5UtMg5t22rLOXh6QsAE5EGpkpj4nFs25bziDhpgNPokLgkKNpkdxVR0TKYWbUfVVzCGIi3ovhqFT8x2gthca3Rk73Nmj9W84fpvpeqXzAyXobZqAYr5bRaubTmmpbiAwwiIJOpcGpfAMqRyrrTJd3OmMmr2CfrzSW42HJ+UkyQ8GsEnaBejY5oscx9hWrSi71h2/uxAtjjH1b8R5ZX0gwzfo0viAfqJ1VHFRPzcpTvSMUwfyau/VoUX5MhbciI9UyRpkBdpiXzSDBdw3iIiCUkXwilaB4WK4XIv+u3SAteaABU8gktrUG5WERAFqLyOI4RJ/8as2QQUTSvNv+DXqY3SrJxigjKYv+eNbFQlBwnY3mDTMFvmVdeSPl+/qaD2yUfrlhPKEXvyFlHPGlgC6sxzmVzdxEDTL9TEJlOOpETI0J0nr66+GOMMDXFrCF8SGC6A7C4Fqijt3yMuICGQttAufbm5d28TOhnSqAqil79M6KfgHfIIaI+zznQrR1VOFUDhGLpyiWI6HqBGW0YhbAzhRWld1F0vLbADHtjToKAgQQhcbZlOaQx55w5cyEwY3J+nVgxS+B81rKabhsCX7CAiTu8MILRgDr21ZaxgWF/jdr9NMFyL6QwTaVKe578/OsGnt0tAbfJJUmmOePESvToAgEW/FEu3Ouzz1g2J3tYVV6jyH/2fMee2zEQot7BWgAQoE5LSDBrgZ6X+blbnzh4zL8Op6///QXdIYjvy4NnQIJj3AaAJ7k1tOh0uapf3BJwp9M0IHaHwb5x3+8M86XJ1BA+WET2b0VhuVQMFWswYAvn1fKviNOPy6+pNTGvMTkNa3mP3RqsFGmDxD7HGEPZBPRgkXPfNeFwOIjg6eUMgofDAYreaOeSummpKhiaaOGXR84ppRim/HKn0+l4PB6PRyJgTjKJIAtHQURU9ikADAhAQgxgd/ino4kQMERsiE6cgZoKbwDFfLeaCB3/Jjeo6NOTkCf1bzIHLAoSAKgjpUhGgMypa6BtAwEzp66JV/vrw+7q+PLw5cuXJJAy/9svv1GADx8+tG0zjkMM7Zs317HdEVF/OkPm3a49DTXk68/r0X1/IUECBCQEBBYRBhHg+TrrFkELCgigHUMgwDj2bQdv729+/PEHTGdJ5+PjUxD+eDz9/vsXEbi97W5urx8eHsZxFMk5j13XNU2jkQhiDF3XEuHdbRdj3O/3RJDSoAk2RVgkh4BXVyGE8OHD+/v727aNOaf3798Ow/Dp06fj8fl46p+fn4/HFz2tUJ0QprWg8XhRfVB1P6cGGmjGxH1/arbTk9Ac7aiM3vDs9zLDfHWeC46bVYuxYu+vF8+K/fNvksSajNf7oy/kLMP2t+J4WE7xmJmg5lQEi1NOJaQ5zdKFnmtUlIx2DBtYwpXvvf31ghm42fHKISyuaFbXoxZ4q2CTcvHs+0uF/815f50MENFcRqtxIWpI178Gk6LOk0HFPGE56dXWv3533lku01WFgRmMcrq07siaNROaPckuHCA4V0QoUqhRiNmfBGZrFiKS2OHdFAYHEAVgVkKk3LlSjdMsgVCopxLRpIjak54aog3SMO6XMS5lUHP1qSiPiMxrhWUht6AJs66YnYSni9p6uDVr2F4O8MCgM+uppZe9xSxkAADkpmnUEmhjsX7Xw4RyMGCzUjpFdC6F/iKpWX5gqTB7DcQ68spPNRZTvagUdCc0NoM6xXpcoQqhHXehy+uwxpUnNf91TfeeACquBABqY1SozGJGJdaopzFTYisCUAC84Ohp0rDnTWcVhDW9Lb/aLKjurS2ooduvw8pUxSu3dWYexzHCrHCCC38qy7M9fYtL7myDYfKxLsUNzUiu9tH3bVZMDcDytl3cF9cPq/arLnwdRBWSRCtZnQAgiCQIRTmkopuIu7kK7gRhc7I2R7oG5i8VXDJ3XhqsjH4u0aHROdb8DavKBrRvzQZVWeahKC0sr0lCnilpjb8oD//lUuGngsezCz+69exUlDljb+kUtAagQmyFlurh1DchlewvLDSvI5k6ijG+ffs2jSeURIQ5S4wRgYgIBDmLMFAIau8mQmpi3w/agAioT3oA4IaHY6/84eHh4ddff31+Ht+82d3d3QFKznpGBoikN9vDdK1RRNPtIjYSCJhABFkkSxbAgCiRcNc1z89HchaM+cPWevdPXiEejzr/1iZiNwuL6EFQqUMASUQwQObx+fnp0ITu9na/37958y4L/PLr7+cxxaYNIfz000//l//rf4GcT+cXFO7HIQuODJ/wKWfOeQBYuMhWY/8mbN9T0BkovslMkERjDollqBcBkBippbzbxRiAgIi65ga6phnS0/H4+zjCfr9/++ZdymPf9yEgAHRd07R6kgvqTgIAhI0d/KveodyxacLNzZUek4aAT08PLy9PepHh5eXlzy+fnp6e7u+v3ry5R8S+73e7nea4F4Gu6wijXnb9+Pj09PSUkkiTmCNiDAH1ZBKw3s0NP54h234HF6jLvwgX1imsGeOFFjyDhRXfeKW1V+i/LJpVuUxXXt4Ap19FClX9igdqsauGl07i1v16Ut9ktrASYGALn5dm51JBp216GawsjW+8e6lfD7P/+/0ASyn2hEVgGSmjqr/5sNodYEXhvouKfipq9LtbVfN1wp5ZDSxasLH49o0GsATgMHetCQ/FB9VDeInJIyIJMOK0Ctx4c851MEZr1Etm/qdqQ/Ko2RyzhVexLvRvE2uXpInRyLy9QTlfQXDBA4q0rdK5OTF6CIHQLn1NpnvtvQhe5DIrFDDIpHYpui4AdF3HMt8eCSUXubZvLRuDMHOtv6YFMBkAdeYso4MwjuPZdFobMqwUQmtcx2tTbrPuXSlM8wQAFDAlUC8PYDGvmcXJJlo3ITcuFe4RAPUEAVEQoURnUGxHHak/khcR83UGd2og7vQClqzTrwQPUuIMy4W3psO1dQWdB0tltasIlTMQUaBAqBJN4AwiHEKMoYHJZ6bYYCExD0YDRvlU8hD6qdGfqmRTZrmtnKihGCJgxZ6YmWRSI3PO5SYwiwgKxLCQj62g20r9uvPM1k9EtYhm2HA+XKheWXdanheeCIvdzzGjufJaK/YTWrFX449rUCtOsq7wSvH1ZcnxwWHy9U4rqCrI3ecNzxzrApebsSokhqV5vayST16C0He9yajXL17CcFW/Qtpmj75atcw9Eiow0B1v2a++pv9QGqzlznJuIkSEArTM9sHT1XdUPwwiOh6PTQRhHsex6/Zv375NY35+Pj49PSjTDs109yHG2O53mm08l+MaUj2PKITw9evXl5eXz58/D8P49u3+559/vn9zp0FibGh6lR0RmyYCgBAxEmIQEZQsktOYdClRIAQmhH3XEC7waRi4GPpgORffXCO4FShu/e7i1/UTAURo2ih5HM4vX75+zueX/vn5en8gofv7+8+fP39+eAnxfLi5vrm7jTGOwm/evEGB89BnQQhNhrYf5I/PfyZ3TLOGoaIu+OsHIv4gz4R+KlcMNlFUDi5mLqfXpds2InA/nHDsG6QAtO/2bTs2TQRIu93h+vqaiAS465qUUgio/iMAU8Qjgcxp8nBThg/OY/n6+lrVPDUJqhcSy5hSapr44cMP7969OxwO5vijV104Qxr7lE5PT09PT0+fnvPx9EwEQjTmzMJIsWmachd0Y7zf87XandeVPS2tafiVrj0TWPONqn1f85vtb5acaxf69Sg8O9o8mAap7yJe2i6tLO0H2wrJK8ivtgNY8UyvMMDCYPhd+F8PvILhUrmEf1nqVK/XX1PC9HfrpGCzl4p4qk6rndfDxrJh8/Bge5lEpBwSLXtfg2d9lQCaC59PKqY8D94aIZURYr24EDE7i/EM86RfsJfQNCNTtFtMWptcFjUDkReXIAGc9O827w2Z24Dw2wwulZlKRGBhUSPGMkaT+VWLqY7MWPLL++m0rsso5q+AJKx54Vqfaq+8G2x2VY63F9fUllLS0ztcuo2ZC+tsnCXSaK4eXTApjawU4fFDLjgKLEPSV5NtFOBfZ+fBCACS2QKEmnXOzE26qZhWzCVHSKXDW6fmsOocYhdKlwFmIJm91DAgxUveo2KT7telquOnRn+yy5OezNaMzOZLeLaq+xfDVnqJ6l1Y8QV0J3ZaIeVkc2R2S7vFaiSh+FFxE9xpCyhKl8dICk4F0nqA+C12CW7dWSOv119353ErIrq/mIRqBwxBkMsQSEAd8HBlkbMGL1HCpXn8JuV8Z7Hhe1ZGS3+ninF9J5ATqpcEaZ+Nfqqf/ER/zxhfmeh1Cx5v1Ur8Zkf+FaOi9ZNNivomdb3SF2zJJVWbNlg9j9eFNPHkoGZ/YBYGUQOU7oBd27TtTqQjor7vv355+PTpz5eXF9KMqTHoJWdVCDVNXGyaQI1mmE/CzNyfx99+/0XT1v/004cPHz5cXR3SOIaAIoRYoulwBFRDELAAMwuhYAaErCy1xN1BwpGTSG6aILIhgCKi4Py8msE1B/snkP9KEWFAEtEAM8r69GoDZYYQsI1hHPs//vjjqdlFpLgPNzc3z+f+3GcNoxIC/vD+PWdou7gPlBio6d6+xcPHT+d//wN32/1W9OyGuU26l0ja3+q3mq8ohACaTZUAbLwCCOdzut43u13bNZFQMDOXsKJt2w5DOh6Pp1N/e3tNIYooS+Gc59lkSTlnMfN1ANQUQ8gCmSWHEPZt13ZRD3abpmnblmVUkUM1mX44icjhcHh+ftntdjG0T+eXT5/+eHp8FsEY4939bWKR4zBAgMQiU5zqv8o9K3zadKzvv20i/ztZWTUvsiVhb+7Rfnde7w6Xe7+oHplTj4kxU2HBkl588QIvjmn+6aV3CfKKnVpRV6aKQ77S7+uMdPP5XxrF5jRZuYSWTWHGf16P7hWBwe9Efoey3bYSP2xuAeCvXq1Yb0NrgMFtZMzZdCtYCvabqGMXZ0SBV6nSkFCJLh4/aPKMuuCWmlGjQyMAwCLMpnFGrxDC0u2wApRK/JLMySMCF0ozwNIZTERyqo0AMB12kp6kqnHdeNV8tw3rIhvF+4JPIwIACmZ2m9UAQxyRxNhSSYqg5Xw+A87ep9mlUliTwgKCMmotdofQamo8FAgb6xadQmVGJwVANTEPvH44nU7VcGR5FARFo1OlbhgGKu6L6CxpBp7X+e1do12ab6PO4yV3WdYsYDM5rmjdhllZxjxNB5fByfr14EnRqUzdshXCpawx7HAy11RKLlHgkh97LnkjaOnUB+5sz8Njv3rLreFZROx2ByyNq2oGtDuN5mpiD2lqGYmIADVcYTVAWxe4EhDZuVLDFjOtUGQj9UO2diqeMHNAKcEJFQadSkC7gmA6ocACXcY0KmK+NH3fHELVwqXiV42HQS4IJX6Nb3a9Cap1ACsd2H8FO8ACgNWmIq5pdL++PsA1PB5vuOrlUqmANLDXM1UxomppV92tx14hxP9qeIBZV5eqHSl5I0QENNEEYs45xunMiFOGwn/6vn9zdxjHUYny8fHx08fPz08vAHRzczMlaMUpNxQz933/L//yL9fX1ze3t2oqTCmdhn4Yhpevz6djv9/v37179/bt/dXVFYAwp7Ztcb4WjRhAY0FmyJklAzKWlDKIJBJCiER6PV1YAHnXtZHqBTINdqmKG27xwpRtcgz9WvlTwOXVND1HqCJD6XQMw9gS3N3d/fz3v8GYTw8vkgEYP378Lca2acIw5pzz77///uXLl+PLy9319c3NzW63Y5H+2J9O5zwmFgirlbimWE8e37nefan45OstyCRR6P6LCKpPgQi0TYhNAGQiJAgYhASur27vbvtxHE+n/ng83t3dBIrD2MdIRbUAooAkOU8B0gEAEZizuoGIcAjUNFOK4KbpDod9iabGiFLiqMswDCISYysi+/3+fBoeHv78/Onr8Xhu293bN+/v7u5435379PXplESIIlKUKeTma8hZf60ozSrQ0tEJVpT5nRO0JtR1X+v2cSkQV8C/3k55PHvK2IYOAG3beoaPJoNxFhFC5OUyYWaN8WVNvdrvIu/iKyiyn/xdzaoLj4EFPK4F666yvRt78fBUKN2E6tL+4r9uEtJ6yqqWEefI6jbF07sifGGuL0291TQJtvrVf7Cuv3OLx0IGm1Q6b9+OGGQlN4KbBQ9JRX6wjDPiD2KsGhHhHOndrc1VF/ZuNEsULBfAAikAhjtLEF8peOtRVRMAq7JmEGVVT5jyUM1w46I4xQ/KZioCUzTOCsXgSCrn0b5iMQo1TRtKyoHpRJnI/HylqFgpJWUQ66FlF/+zdJSZF/OaXX48KAj0RkUA6Loul4R+wYVRrXBiR1brRJYGhr8iaGm11icNhbAW8frIBSzVKNhcnHX1FVNdgstv7kG1KbZwT7YUrSa7+2OwXDO0HMvmwvCj9o2zc0f0q2sNm8cql/wlXPx7/TDNxIPOK1XKcX41cFhGa/RYxaJMek49pb1mtrBXuj9xiVAqIqr8TomwZOan67W9ufSYt1c3Xtp9l3Nk0YzszMi3I+5U1OuEMrW/gX/DfPWhmp1L7OX/J8W3aXrvekf8JivzdS6NEZc4r8ivgkeKqlNRbwXGvApm/bEuFTxr8L6zeNjWBABbU1lhzL8lG2LlYsZfwbatd2YW8d3NiwLdK1q4RF3jNIWzyjm/PJ/+fTwP/RNKUvPg6eXcNt3d3Zu3b9/rCVEuLOV4PD6+PJ9PA8hxTGymgyGnnPMuNB8+fLi+Oez3+xgp58lMREQhNDaVRCQ5jOOYo2ZjggwoFPRQKwT1EsWUBgoIQLsmXl8f9nt6OF8UcNfosvW7SWbVZPm1aZR5aWH6ongWEZyz88GY5HCAm5ur+/v7kKVvD5KBR757f2CG29P5dE4vff/16+PxePzXf/3XL/t913XX19ehbRPTacwieHvYvSy7XW9wK8D+GmH7PcWGfNk8CCJZs1WhYEErBsCwg65rAHgcR2BpMIQQ2tDuEtzf36c8PD09ffnypW3bu7ub6UQeUel2HEeY7spAbGZz5fQkRkTU6Ny8dHDNOd/cduM4hoAhRJGMGETw6fGpbbuPHz/9+usf4wBv397/7cef37x5t9vtH9IJgMcxMzFQhCIhiDvQXw15Ichu0ox9WCuEFbY3G7/UKWwxFg9MxUsr+QGdFO5huEjMIgDiG7Fiwvd634SyBDzRTM8vo6Iq6zqX9kqPBD92OyuHy6v49X79u/6o95X9Gpbc+HV2sZ7Bar7gshzip3tBda/zpcuKWUUbm51uDtB3aqP45hRX43JbNnpU4OrkHZbz6NeXGY1MoTCowELRgGF4uqkjImpyE00iMOWXmbqIqkh40RaWiew8lxQRhNkgA3MHfCnytR/kkuDCmrMrYqZIuO6ejJQIH4goUKsNHuwJ78JhznO1gMSWTc5ZZPYwhBLWTzd4u63XNjuWZII7lvuHzDwHGVtyEHQSf4kxg13XeTQWagh2VcAISyG0jH+mzumsm8OnllxSNeidPU8QU5uy0EBMX1KF1hqR2Xd5wRRCCBYxVQOUWSRrP17PtdmisLpiNdfBMzwON7ltNSLfabUH+I3cPpdxbdsJObPHjKbWEBF1pYayGivPZENptUysccc+5nihhlKDh0tKRgUyhJBKrFcDSUT0xpGRHxGJIDMLS/BXZAvSKq6HSxEcVuxmzQ39r346rPG8Cn8PRSeBOeBN3ZRn3yjAIBRm12Jr0DtR+Fmr2qxGUXW6yYteL1gUXXLOw7x0S1537V9f17zU0bwlLCH3SFaifmV2YLVkLhWv38K3Jv2b3Rnj9b17krPGq6nEpd6Cy50eHM1XQ/Pk4bFXGgFYzbhWIr1JCFM87sHdBdC9IaV0PI4gQTgDJ2V0dzf3+92h6/ZEwc4cLRzU6XR6fh7O54Gen4io67qbm5t3797tdrtdaLquyzxO20fbjOOYxxERNZ7TtN7RzqdJ4dRtmYhiDG2k4XzOOQ/DEFsIoeu67hrCfr8P4+LOv59EXMkccuEgwHhCNWsevZ6hyYVUpSJSPZMigoQAXdeEEMaxF4amaZomjiFDS89PL23bJpbT15OmRCLCvu8fHh4+f/7cdIcM2I981OwguPCwqFZHNS64fCByqXh68+O9VF8mnUGBiSAZAEEgBIoxdrEJgQgkIqWUMuTd7spSPjw+PgIAorx5e6/DCRQszKw676SRTSTQol/V+ufXgiYqPA9fBbjtOs7w/PwcAnTdvm3b//k//+fjw5Ez3N9f/+3Hn9+9+4EonM/nBImZYySKzQDUDwkJmqZJFxRCWUrV1T5ScD4T3vqDx/NfKp5KV1Ow+FxR7Lr3VSMXFUKRhThuH7JLn+OXBq0cNxaQ2P9bau0rENrAK8vepdcNA7x07LJS+fI46Whb364+i4tX+ZfK62BXZQ129dO6gn/+zdfBTYSHzfZ9WM1U9dB6qfjn9KvrDpZMFYp0UW/0Jc+NOPGSmX0MDg+A2Spgmd/Cg+cH6AQYhjL1yM612AEDAJEgBNIkhFlEApGY3kKChHr7QkSEABFjmCxj7OxmIjCOqlgGKIGKFSXj5CU/a3ElN/0cHAVVsJ4OzAYCwtgIILLonciGiIFYMzzAiIgInIH1SMzGw+UyZIxRshAEEeGkuCYB4l7aXSciwACaSxdRBHJOGmGc08hpZOZIiG0jkBFQ0uQpqtqRZi4ahsFS5zVNs9/vmfl0OomI1pESzBMRQyCRXAY7BWURkZRGlEYyI2IoUqjFHdEAVgVvzClrHnlVyczER4BIgQAJyfw/J2JCaBo9wNZrhGR55LFstMKBMAgICHAGDK3fdSjE2LQaY11EhlFyBpaQWXIWZqI4K5kpJRamGGLbcPHAVBTpVbpQ0oT4BQMAIWhoNVRTKlG5R8coLs0Ju2uZXPLtgDOTAgBIcbXFaCbQnJhZrD6iS5dS1ow2i4o3kTSOIQQQ6c9nI9FAlHMA4BCi2Uux2FH7/qSasG3nRESr1LS5FL+wVX/OOeeRJQNBQESCEIBYmCWjqOc3cM5clLGAJFMabs+ONPiWXY5daBQYSESyMMCcVFAmE3Z96iki000XxTYiaQRo5kYVOXa2bgAEzBIUFEQyK7/KuwLQADFKDsjMWRgFMpT79+VIVY2wFoSp4vIVx/fs28rMbS9sD7g8hzP8iIhFDLI2jd7WAoeUMw7rsdqMa+6POCYiIhAmQEQhIIAp2+f0ijJJwGkcy/MFEZW4F5Dwciuq4DRQlf79LriuD0uNzm9m+oFL6t3pK0/wUol8CwBlJekRwOzKsqlaezzbdmBiuiddX+afMAAQUkuocamFhEGAAgaMgTTcfhJNG40Q2nY8n4ehZxlj18ad7Pbx/u767f1tIJY0jv0gMF3QIqKUkkAmCm2MDCSCmeg4jNjA8wliIz/+ePWPn/92dXVQNhtgAOE2hkiYU39OvQ5rHHvdPtq21dtijDm0lDADhiyYBIARidKI+Zw455awC03EgADPp5fh+eVuH9Ofg3c60GXOOcNKmhGYbiDrdyjTJiJYlqTOxOJFwgrDEx/YytuGqFcHkSUDMwATIhFExEgxDXnfHU6nPgK/PD+Ox/MwDONw7s9jZtzvb7AfbpvDjz/9/enx+Mtvv+6v707n42l8ur7Z396Grh9SP/TMGRAwhNgkgZxzAG6bwPlMIAQoCFmAISRAFuxcEAhbyJ6kF/yqkJynST9eXCIHABgjYiAUhISSQBJxAh7oDH97866hgEnapkGUluL5fEoRhjRwHHY33cMR/vj89er69ue/33EeUxpCACLIqRfApgkAjHE3jOeceb/fxYZSSiIjoFCTGZOIUIwAlDOPktM4ttjELqY+9f3YNYdATX8czqf89NiPA19fX/38039+//49EKWUKQTIfQiEwKnvFUjGOPQC4YCIAAycJGeQMWBqkARJWRKiwORAJyASwxSePYveqJz8QWzbqaZgsQc5nBcLv/jKFc7XrKBi77g8X8bia+OXiW8KQGx/9EuGZfRveSKhkpfLzAa6PSUQ0S3MERsAyAVPnE2+KsuoCpuDXaPCI8Ee6j5l5pPlqMG36bew6ifle3ZI7RHrDQbWDi7P7MqZta6jWCLlisgUup9AXWqzcABEgqAxI0mIMQkIC89jQmRELgoHzCYvYQAIzGr60t1Ar9wDxCYIr47vERMIBgJzdSQEQAbBMDs8MrMFg1UlQqWeiRK0TtZLotPXCTYEgmwz4uVVD4mfO87gw0aYzKPnhn6ZVMzK01JwYQ4LqidiECQANb4JAKAIocpYWY+d9BfhcTricg1B1SjLIg4KMzOIRbmEZTYMvRPlcVGtNxuVVvNuh54u5yXh0MHMglgmiRBni0rO22wCV4xecT0M4JFYcLq4Nqk1iQgJOaPJ6zMeOHVdg0V5m0KEycxl1svbr2E7AjQRzbBELodhdkVjhPpc1VTu1BkCrQVbnMYlEVGdDI0iS84oweVOn3hUwcUw3DRN0zR933NJBO9XfmyigqcTqvdtiGgYBnTZ5JTvp5QoNn7SPU/xxWa04oweVD9Ga6EyEtoUr7aEmSZn1XoZ5EaH73EYQmBOiGQzXuHZXHPBtF/IZoP1y8ErsbPimrOT7RfFcEjL/bXCiRVzXa4oJCxdejzvsOe+vhR/YHA8zhQkPyIPia3BCtsiYpb/7WGKq7lsqqJzWK5xPxZfbZN+jPDWD18psjyxfr2+R45BLiLzQUyxrlRgwHJdvA5S9YqH08ConsASt54vwRKH/kVZyQ3fBGbCwHe+8x1ljaj6V5n+ginPi4MCvVU4CUwZEgDwmDRJoMiUsig2ASDEGNUQPnH+EEWQsyTO6kSQM7x9u79/c3t3d6PeFsqnm6Y9n88WeDnnMcbYdZ1Go9HUc0jzBtTFph/HlBJDjC0p25dxRGCg+ayTyn1vvUpgHGnN3Awb//SU+YfV7nxhUnCSiFQaY2HkLLln+fjx4x98JmbhFEVP9KTddYfD9fXVm59+3u1310OCh4fHGElP0+7fvvnw47uuC5+/fvz68Cv0oJhkSVOsZXDLvKwgBCSIggKXo4PCamm8gh9b7NWSp0gkKCKZAQUCEiJSwBhF90xEEWRt3rYVnbumCQEQkE/nl0iBmYNmP4kBAUMgAMycbTsYR93rMwD0/VkDbudclFUAgtAPp3EcRVAEhaE/n/788vDly8Pj4/j+3fXPP//nt2/eN02j5/J6Zk1E+30HIw8ZQFCjPktOOG1/KEQIIRIg4pgFpvsJc8QgKMZtmfQefx9sQZnwLT556Vd/YHdpBtdT7DutOJ6f90vcsuqr6reCzVaH55yXmPYleKrt6XVI1iNd96WNVL9KEU23Wl4oqL6RavMqT7b3Jl6uO89PalnFGMXczdSX+U+ucVLJydaLLjPfIyxFqArtPu2H7Quw1LjQi5GXWUTFSQq6ajTCX1kCuArAuWp/w2l5U8jRzwyIZUOcMUOoWg6W1wlQ86gvDhIUN4oYZlZbHrk0CZA5zxl1xTQTb4+uSgWoDdifuPsx2K8yxWcvCMUNKpRVPBKDxI5w7Cf9Ok7eO3PGP/3d4Clyebm8l6cDp3LskVMqt1CYVRMmUkg4RlJCEjN7OtEfXTE49SzHE5AZ1tQOqbqWKV3aTtM0nkTE5ZCoqMT6FVnoh/ZVJEuJMwIAbdvGGJum8QqzhjWzxBUwR3UHxJkV+kweVEzbHhXe0dQz0GrR+fUgy4d+yOL0MbdyZscGDwA5rrHgHW4uKqZgvp1GycY7TOs2P94QZj3fXHBDCOLOfvzUGB2Sy8rY933b7Ool6daRShzaNfBrypUdSdiammbTMRdvsfEcfD0L6/Xu6RmqbePChiqu2XW/OtPVwHFJGRWj9GO/ZMFYK7q45KdrOCs8VACsu75UfBczfiiKCFgWhCI76vilZMazLVOzjLzeEQHkuc5FkcImfXprGa3qEuZXCKlFB9mSdVwj2wLH6yOqam5Ox7IX8NU8dTGzuPjGAKIHOsJzNClkQZnibocQiCCEOQgwGGOZaSkAjFdXV+/evdvvO4LpeuEwDOfnp9PppEloxnHs+56I9vu9MlXlruiCk6WxJ6IQd4JhSGkYhoC0bwICX+266fi5QUSMXbvf77sunU4n2610pEET0iyXzyUafqWI1OHyv9mIyv8AgBqjGwCRETASXl93bRdljA3Kfnd13e0BYH/omtgdru8Im0BdP8K//r//+6+//hJiw0O/v95/+OHdjz/+0LTIMrTxVxwQQRCITW4TZnVMcKDqkokr4q/IoxraN7FRDX9qTWO9MqIAkfLksNtR0zREBJhEhCWrwBIChrADgMdHIgEGzjmfz+dd27DkwFHKAsFJtdZTRdA9AxH1ck3b7kKxUAGYyCgAUbGSxvT8/PTly9cvXx+Px3R9FX/88acff/yRMB7Pp2FIun33eWzb9v7+vjkOz8d+ZCDhDGqVmWRrQgEIDOr9KwBsPgh2TZQ5Q1G+JzlfZt5gWwYsd9v1pFyakYrVb85p9aGar0u9g2MdG5O7BQyYw5SLUOBmbfv88fXBVjVfGeZmmXfVJSTVPliJLutGqsdVO6/gcN2L7SMepMqi+ErRJSylvn/Fb9l+ipUrq1NY9XwNIcAconK799VsbjYCDkuXuqvgtBFV4wIAvVLkn2vjdrUK3Fyve7QKm0FlNoAvbeq86B+trq4hsbJimbggIgQgBRS/BtYDM9XLg7gW0Tx5VdYYh2IRQRZ2oisiIoXAMBse7QhW70d6BGkxk51BUmySdVghfTvnHJbxNotqKsyWOQCYJ4OhFD9MVRvswrf5Slm/Bl5wGREMS6YQGlY1w6GqghbeRkpcE/XexK15ge10EQhAIsWlc1IIp8swUwtz4ElUVdBQUaKZTTcDUwmlbQjUlEdmJsVi2KxI3Gac8+J0zbNyWC656VdarCt25xGrSURwCrZRiCe/itiqv+iMhN61FcqSY2aiqb4p6n6ubVIUgJQShY1+AUAvz5j+rLv1MAxN7OyUSUTYGdsNEvusJ+5wuay4DwqLejmIiAY6nHAnsx4yQaufl1JX1axfxROqkXFKOL/Y3UV4CrkmokIGgADIlP1xqQ2u5xcco6hG5D+sf73EhTbrw5I2PDOsKlfkui7rXhz/zCCCBFRCJgUQYE3PKCAqWYmU26e+QYA5Wo/vi5YhDWQpB1fDlJWUXPHzNVrsV4HXxJSqMhRh0Zdvvu4BrhjCen7FOfP4m0Er8psKERIgAqCU+9WIFLSpJCBBoz5O+jnnnDkLBtYA1E1Dbdt2Xad8T0QaCoB4Pp++fv36/PRwfD7GCPt913b7ttvvD12Mcbfbvby8iMgwDH3f62JXBtKfx66NVze3sd09vbx8/foADNe7KJze3d/tdx0gRkBGatv2cHVF9KRrTVmHeRZtYhX/ik4oS4nBv/7KlOUsAJlwSodapgmGge+ur96/f98FiZgPu/2ujZxybJucZLdrc4LE/OXLl98//hojJU5NE26vD3d3t20TQFLXNoc9wAtHClkVJGCAwAhQzhrKwMtF6gsLcxMJ4tjsGoHrBaKfScxTyVCFiLjb7SaFUIAlaVITgTwMQ9M0OedzfzyeRkIgghiJQXLO4yhIxJwpALCgMOJ04yDlIaesCidq1OvMIsiQoQgDzHJ1dSAiZjmfXz5+/PzHpwcAuLraf/jhx6urq/P5fD6fT+cBgPR8mduw7/ZNs4vxDIBD4pRR8kkoCBKIcLnoLYIiLAW9uh1wsRCqZFhQxgABlDWt2HhZd1QtyYquDJV+Otb8017fZN1+Tu3JLEtcZmsmVGw2W9WpzvcrwDbfrYZc8duq8lp2ulRtPQSvyPn6FcLXDLbqwiaiMtWux+t/ovmiipSvAGBwTqcY7FyN9MnmiNbArDmSkaH/dd4RKl19aX/zI3pl4tYgrb9uUtoahk1Ur4nW3l2ThHXkz3PXABsYNF3Byb5HROQMpUkRESBCJiQyw5ewoHrFai1mRkIzRxjQ3iPRhC1/DG/DMBArG0s1Nk8EMl1YAgYBJEQ9dCwEsXjbFAMXS2YJ0szEVwkzKkjsq2IDzX8POefZCFneyERo0RpBMwjxmHkEAJBoFiQj4lAC1YQSpNFA8icB4i6YqfJW4dnXNPOXb9BrYmUgiDgdYDsr4kxSZkQinFQptaNaL9qgqoJ+/RudEM0ulKrEcglnav0aD5UtGkDcphwi0vR1FacwSKpl4w8mfBfo+Dg4E59hFRzftDa5RPv0LWDRk7nEg1HvMk3j4SFPeWBJ3lLqmbWp0IptdQYmInAkilwUQm3Ba4az4Wib11ROnlaqlagP/ekJrC6yr+mQl/4hvtAyQaJm0mLFsNNkECe5w2p6juanu+J0a8YHlwVfj/OK1YDjrVW/VQuXGO6lh1rWB2T6l3DaGIM6Hk8XoVFEcDqvY0UbAgMsqHSGXDYU9fkO4gWs2ogMD7Z2qjPvS/iscLIG7JvlFYx9Z4VLr8jSPXUamtRPIoUxc/G6DLEEzSIiYBRmIRDJzEjTZIWBe2ARyk2MiNTETh00+r4/Ho9dDCzp8+dPf37+dDrB1R7u7q7fvHmjJ3cxxqZpiWi6o55hLEVx3rxtRSS2XWy70DQppcevLw8P6foKKGBoIgUUAMGAgaiJxkvBLU/PA/+JUk3fX5pfNH0MAGDapoE5NvD23Zu3d7dtCzyc2tgQAoMMeq9+HMaBEZrMiQiurvcvLy/drtnvu64JyBlJ2kCHXZPzSAiARBAywHQZHoEgoLAACwMDM4JMzKaGWV41lq7xVo29+jBxQiAgvSMtGQSZu66jJlJESDopCVCD2+EwnI9Pj+eXIxHc3Ozv7m+vb66GYRAeJ1cPZkBgBpEcqMVZqBbmSQElQIwtAJCQMGSgJJwkPT6+6A7y+PD89eFpHOHmpnv75t1+f9X3/fPzcUxJRHd5CiEkESLqAvKuhcxJII05gpzHxCJZchZhQAYSBIB5P5pCRpe5LnzVBE0954VLFhjY4ueypYD59m1pv9Lm65T/CnvfbHazNXEiOLu7ElXliuS+H8hNeL5nXCs5qqZza+R1Fl3tjFDMIZdQ5PvyyLHW3JAXlkYRAWTgbey9AqTppfZieX1+y7+OiCLbOPRSkD0ptvf5yfSXN2jmm1ityMMPv3pFDUj+oX72QRkX2HMI8Tt1JeA5IWce8gIJEABZEJCnOnrHvvRKqBpWhbiImKvAmKsjHI+jJSkAFIvNWiCzh15gBYCsE1AakGlWQ2ZWCyHUbLpWr6d2yh0qD1IIoZx+TYpK4TviF4AJ8TxyzppSj60OFFtfiCgiLInzdLkOAEIJiFJNs6pYBrZVsPyEhltTHjzp6xBUe5SlbqPVuESt9MdX/sBGinBMRIjB0EUlWuakDBOxuytoor+d6PjuRMRuxJqII0t3Wal00dWS0DH6DWD+K1NaDlhx1fXq1b7UFbOiByy3Gdk5Kvs217zMkKA07Iy9i3MgVS1xGaQkRAxxdo22XtwqnXJ45Jz7vrcIchra1GijWMOZmVU5hAvh/tZlPfV+lXkc+ocVveGS6fvl/3rvE3JKLZJJJ9TGgwZrWqqO3ywV8B5gcApYBYZZwisWpM/XjPLSWPzX7wfbV/Z/CSSEEBuCvMgEJSKafrW0z/7O4QIkwIV2XQotRyQrWWT9VZYcPpT8Met+L5VLXSCiv1OEbrP4nmbXrcFyqa6+SvWKb2Smn+mAZZJtcboLTZKCIOhZCyAzIBEFipRGEQQhZkg8DsOgwb3O5/OXL1/OL89j6k8vLxTw/fv93U377t276+trPeQhIgDJOe/3ewDgDG3bMmsiaySiJrQiwgLH/jw+5TSMiNC08Le/vX/35r5pGgxBIPQZzkN6ej76aJPVIl0P/HuQbHUmCuS/5msaQgNCAAQsIAycBRJj+un92w8f3rddjJgHAJasPvUQcNfuRXBMHALtdm23a//4+Fkv7A/98enhK+FVt4sIHBDaCGPOLCwBCEivN0QiFJ4viwIIiKg+QrheocbV12O/NFJff9EUC2JADKA8VVgkZQAIJRxDAALgyRUcUPj58fH3j78+PPSIEAKeTi8fP/7GzGM/IEpsiDkjSQgoInkKdaGkLkQUKRDFiNOuzYlzkpR4GIZhGPrManbOwoTN3W24vr7uuv35fM5ZZNoIprTATdOkMQPkEEIkPHQtIuYdtESfv3wZBYRBQ7sykCCxSHD77KJM7h4BeCMh3noL3kQsbHHINc49QVbEv+5u/foaPF/NwFtTwrqmf6VaeptvrYFcF22nOmNd8KtXh7am83U767cu7WKby2GJom2F2R92r3jyRptFgdSmC5CAlklrDVJFVG7I25DbpjM/mQ7SN/QrKfnG7OsmchA32Mvip+VL35x9P+9+4OqEaHAqbBUf80O4ZPSCyRtrXmKTihgIhQBZcDqJ1tbimFOA4BvSH4im8HekClKp4M8jDSB0gmM1YG8hgdW6qtiujZwwwKQ6T24IK5Qt0nHg1mUhWNEQOtVUfPAY3D5x0Voic+CgyVAG2aZHhQMACIHE5aNDZ75nV4wObHZNZ6BScApds5Bx12ivTJGGcCkX2Kjcday0RHUg8c1KEQpjyRRvVxatWSymTt9ajMGsghV75eKkao3knANubDC4VDxmqlguWs/ayN3uY+fahzibO+zUyq/kCv9heRvT2oES4McAI6KmaTQLsLVMxVlpvnUJmUp2eyMw7Uv1eR+Qxpx4qaAd3cGEuBtQEyqUOGVBD5tFRK8HWBgnAkCdNI+xakXYGi8cYHEy54nBgkJ5jBmqAebj+vUMAqgfMzJOFsWqBcHtE0py/h7VlMEW8/EPPfFULfum/EnnupH1MC9NwSaDRo0JLFOcnoZC1oBqIDQ5xUEAZJT5KqzbUnFL/vAcFQptbKLim8hZf5CVsFWv0FXx7B0ALu3W31mstc2BGxhlyU8UD6YDrPCvDDwiSbkDrO+GEABEigcAy5QMRmQKegwAfd+/HM9fv359eHgQgRhJUh5AkOD+/v7m9urm5gbkpds1FKCBhqa0PS0ink5nACjxqKJx+zRkpDAO/efPX3759dehlzd38d2b+w8ffrg+7FJKiSGLjJkfnp5//f1jSvMNbXHuJ9WE/lWcb1beRHtViEh4volERAgUEe/ub/a7tu/PHPI4DgiBhBExJYkxjiM3TUOEDHm/7wCBArZtHMf+y+ffCd8GvG4C3t/evnuTH56en88JAChAztMmwupXwgjlOi5NB1CzfiJL6QKWq3VN237I66VhX+2JiGTQQMEwpHFMPCYOKGVTYAR4fHx8ePh6PPaAEAKMY//nn39+/fo1xpiGPkRs21ZtcoiSc84Mqrx1bbmNDyHGOJ5HRJQs45AhAzOklIZhvHn3fkzCQoFiu2s1alFKnFISdxPM9kQWyDlzzhGRIAeklkI4tDldDWM+Duk0JmIYBdNK9tdA12XsUxR7QAKu7y8Zg13vrdW8eMRWLVQPKyFhcwY395pNwAw2dOfmawDsw3rr8WRQNVu188o2bTBcYuyXFmAFwytdWIV1U7IyZL0qUeiM1C6X9qvHgKFL2bJhGkCdipfbRMkiW20ufgrmKNwrGqjo1EtF1aavGyou0z+sEWJjEY22supXSpCeig4Nz4ZYT7QVnWhhnuVwWBKVf+vSPuirVaKL9s68SJeiyqBTEQNgFirKnUhUww4RKWQ2BlRHw6JaxGIz4ZJOQEogE93Y/AL2f+0GGjjqn0mhFGtkbgeRpzingICxibkoVjDxOBTJtlvXNOGKf2jALPkC4EpQtv/9cJiZJUGaqVMpnkpEyqYJal0DwBCm+AE5c9tOrkmFO08Gq6bpDJOqUSgqdF58qACbe0OCKdtq6sRlfBQtbdvmnFNaxFYB51KoxVZIKj6r1oUpqF6/Ypf1QZyqo/qnKpN6TwYsPI/Th5dI1oWxFS4cwOJOLsi6WCC9S6f+qukKTN31q8V3bWjEciezWmnkYr14dZdKyDgRUb9lHe+kEJLkPDWOS11L2wGAOZx9AazrOhFRU2HoJnpG9m9P1qCK9XgEVsUuGXvTq36wibDZ3AS4sIXFRBtD8Bb4qmtjmgt8CghNcQekQEIy31L2jBhYgMA/Wc+R/1wx03WdNV/yl6/WQ1gXz3krfG7Wp6ViDzM3ZwAIiJEmYZrdFkggLEACCAxEAFMuBxF3dcmQvDQSYjlD8ewCVjRjDyfYULVztFQWgu6ioCHHPnyHluHX4z+nCH7npPiaUiQPeygynaP4+nm0tJ+z77eIEEYMApEIJGeQJHpWPY5JRIAiAvXj+fn5+enp6fkZDgfY7XZv3txfXe0P+26/3yEJjym2065HRCSkdkKN24yIGgmd2YkXQH3f//nn148fP56Ocn2NP/74409/e69xxM/jkDJkjOdhfHg+f/zzyHKwjRXLFXRZ5QWd2/+WpOiLiJRQk7Uqfmk6EjMyqsGIiAJxg4Ew3F1f7XbteO6JMDYUAiqyqd0POZ2H/u5qNwzDn39+GoZz08Bu17y5vUl56Ifz8fh8e9Md9ruffvzx83BOKZ36Z0EhIpjStEq09BiECEgYGZGAoDhgb5JKhY1XaGziui4QgD4PRMLgThpQ1+rQp2EYUgcYgMGO8ICIrq6umqaRKQTu7I+TUmq7uNvtpkUlOaUUm+Z0OuWcm0gAMJzHnHNDAVsMGIRx3yBRjLFBgZylD3RYevWrPTCXA2icgr3pSW4WIZVyGJFzJs4YpSX88PbuOIztaYjn4TQwccaMWbjEXNDdc05wozKUcpvCXPT8ulauKnY0U9qruocsdQPf1CtvrWtWstMmA69WzeaHzfarkb5e1kNGV6rK1eq7VNYvektd1c56XwAAmtKgXtwKDcIyKbAJmOHWBFGZLQeLpjQDlTm72csAAAIYN4DEcuzln9v6q1Ch45EiMlXPyalz1eT6YRoFwqvnnuCmtTy5qGdutmNZGyoyMIuFJ9eKViuqFqcT+lkDd8ytt1D80JCDwJziIpodwNOQCoJYaMXsPCIScLb4GUBeS7S4I+hEz4pK1tYk8/RTF0TmKagYokpFapkMCKhu69adiEy7b1Fd/H5ZPYSiRBlIKs1rDFEoSR51/xaRMU3WHmY2Xx3LCeHn1fQZjS+naSULT154Xc6on27ZoWHPx6iEpQHQYDbVC6fI1CMA6LV1G69RTEpJT6O9pUuxoUkUzUIFADmPOWfAKTaMXy2mDUoRoco2E/vhJCKq5EAJtSIimnbCx+nxerjNiM2LJcq0X0MICGHkWdDxNJPnkGsL/CghGahYVLimac7nMzOrCGU63vl8phJH1/Z+RNQ8k1psUnLOIWiSeifkiUx3kHARtBOnNBUzDWgvOss6ZXp10NazTgoRIU8SHgFmAEkZEAlRiFAAaXF33POIimuwc0AttLfwH7C/FowHl1ng/DK3n/S0wtIxqZF8yrE2RZaZBCKDpGmapCmYERCRzDDutgR2JJeGEZYpHA08g8eP3VRcW+/V6YMso6T6A4KKEVeY8WX9xPI1GUjV1FSvBJ1EFMkMccoFNKFUJAMQAhHBdJEoM6CR0LTJ0czoUVTBnhEieQG/x5X/YIxovX1Vr1QlhLD5azVM30g1NeLO3ap3RQTLnQp/bLHeMo0yk3PFLyMSEbHM78yMJTcsEakHNiJSDISg5zjjkIdhiA01SJlTYskCRMCCIqKqed/3n/748/Pnz+dxvL7G9+/f3d9ev3lzdzjsAkHOCTOHEADG6SxGSCQ3TRNCk3OOMfbnkZlDaIim7YMoYhAZ8HQ6nU6pbeHDhx/u729zzqGhYz8cDoen4xkkXN0e0h8PQ4Jm14g706zOfdYo5dVRrOLNpqCaSqNb+W5lchzHruvyyAgMgEnGv//07se/fZA87ved8AghqLkMAFIaiOLhcMg8fv368Onzxy+fn7su/PDh3f3dbUrDy7M8Pz78JuNPP/3t+ubw88/3L6fjH1+fiRCAFZM5DQEbAEQQANIAp4QkSMivScDVYH01j5xNjBlXKfnNEIGQACkQiiCNLBgbzicWiCHkMYlIE2l3f4sUVaSZCDuXvSBiOQUuFuM0XF9fqxEv53x9gBBCG9qcmCCIoCQZx5RGBhF1Wx3HkZEx4jiO/blvmuZwfTUMA/MstqWUhJACNRAP+30ehqHvG0IC1CMShtSFSFeHGJs4jG2Sl/P55dgHiiNnhEBNRKAxTzcSEVG9GUpg7ilMoeYprVDtJ8IQa0RoX41paKkOIiuZwX71smhFxuuv1ojezuBi5FjTwBpav6z80thklR5y344fstZkZ4RY6lEIy/ULTtTBC7piXkbd32Tm/usa7egOTKtXfKfVeqnEbHsYY9S8PlY58yh5SrwORXPTdwOEkZPNiJ28s3PlgGq/CItRTGhUOKWeJgJIICaUytKPz6agoqiKDdq8rPHvm/UzRc52x+7+FBbJ3GPeiN+68AuBlo6jNiNepbLeZcUDy+9TckgAAATEKMzAnDNHRkAEnQxPPX4NoLMszcmmV7cHuVzTArc/eUGfnZllE92ICCXIlQo+RAQYEJGRcjnKNetJJdix87GsgmpAyRFHNJ/5GTC4tTEov2hiJ+sCcyYomJffAhvraa5oqMA2J5syfHqvQl8MjYZ/Pwprs1qca8lYJn/FwfQiI/Gck10i8pB4K5nGVACn/lWE6CPl1Aum+Csvf53ZCrglBwJcvLh8g34x4JIzGvDikgsbvdlcV6SbXdY+fWKhJqp2Qli8OD+PCDCTnM2Uv4pparxqfT7WjhZm1pT0xdd9YmpYtkBE1BtQIhIACTEvpXpf2diH/0kNTfrIJkCnxBaeF7rtSMVjDJd3RA0/zAxA5lfkpzjnzDDjnaREO/Zk70JlIqKsFLNqpJ56KmIzVuZ/qlnNsvL6J3tuBLMuayJ/vYzjiChNaNq27WIQ0d0yC2QBNbYAAxPVW+9cWKAc1U0VBOR7LHf/Z5bLeEDPJG31ef7pWzCvTViyOG8B86vPK6jaFrqgyeCmUpdhu+tyGs/ngZlHkJSzAAjh4/NL22hycD00QYDAEBMzAPT9+PsfH//1X39NCd59uH/79u1+v9u3XdMEEGGWgBhCDAEFWpCEYNwYVIl4fHwsGYygpI2eDoZOf355fHwkgvfv73748K7runE4NbFFlBDCfr8/pfDn0/nLwzMLuDW0QN0l+rThw4V1VE2V/8HT/6WWiYgaEhEkaUNDmCPSbrfrmigypGEkHJtAITSS8zAM+oak/OXx4Zdffvn8+ZkA3r69/08//3R9fX08veTUvxwfvn7t94f2Ol/v2kMTaJqVzMIJY+zavXACAEZyEIrFWnoF8opQKw4DS+palwCYEUFTLwogEIMQUk788nI67sK+xRhJQEBoyEPT7EMIoqn+OAcKMQZpLEsHCqPgdHCvnHiSamJU5ZMAkQgSZ2FOkhKnYUwj60HikadY07oQYoyC0Pe9iABMt36yMocMKSUJKCmnNAaiJgZOk2uSBslrI1HctbvdaUxNpK6hxND3/ZgychaEQMCyYNpl7wDRKAYyM1vPhDeR6ZFvaPebnbGLi8zwW+VS11WzFUj+6+ZGY8O/BI8fu9/iKwBguTarTXb9kz337/rnl4C5jDfb3AVAEDcG5ZslqtmO/1VWGkhKqYo8BI6x+6bsg5fP7YOHyj6LTMnpjFSWe309EH0HlvNimPTEZr+uFfg1+13iWk80Ydp+7EVDUrlmiEAgGAKabM+rO2LVeOGSJx2ihYF8nQZED50BAAhxboqIsggixXkOUEDEHNXADUIrECC6aCJ+VuwVf+6oFcyH0C8hT7vLKZwkm8mSAAHIvEln1RmAWJI4P0ADwKAyTdof/5ghTquZ4i7FRGnEoZOUcgrU+eAs5k4prDnEPL0SAPi0Cp6YTID206m17LOpDTpkXsaz0mL4B+fP6Y/KbIJtaJXiYTjJeVQ1b8l8Z3ugb7YcaZPmqYeSKcG6s1fcNG0sG4PQCBQRARbTZNgTRgjby8+vW2tWXPQU/6v9ZIRhVB2cszsujRiGcC2mYvnFqV9DRHbm33IIpHibVnuVloNL0BpxShciRiAAyEbS3r1+Wgh27IIhBHHHEBU3qTBwidcYLW3OFDtbuq/vg7UYwnPOTQCZeJ5AOT8ShCmCsXY9+awzIGu0GVMgTSdEPedbXu4VrK+1rEdUw786H1nXfKUdT12vFFntuJd64TRo3oLDriWicRwURQEJETKxiJAEESZEAJxibK2gqly+jU5eH93/eeUSimiL1VdlQb2yQKbnY/bXmKG+7IBYtIllRwYXnzoEFehBZArhmxKnlL8+PnZN6LouhMmnABAhQ4zx4eHp06dPnz5/yRnu3918+PDh5uYGEQJgzhkkI0okZJacgWXMUwYjZRqISMwcqIkB7ZTTTtkenh8/fvzt61N/OOCbN/dXV1cB8zhyziMipiwYGsnw+cvXL19fJExitw3we+hTllLaK2wBAAAXTzxv3J47khAC5wQiIbacc4x0d3MVY2wDEKFk4DSmlBBD2+y6pn18fPz48Y/Pn788P50CwvV1pzF4+uHMnPb77vr6+nQ6Ho/HlNJjlv70AprxL1IYSUQkIAAKg6l/eszFwhZYtsLJpeVpQ/NPXkEmAAekDARCOKUnQRE89sMfn760Adp31xAohBjbKL0G+J73BRGYSMtjWFAAAUnD84RABCgijJwhS+aU8unUM0POWRIyAwvzFB1JW578ZTAQMw/DoKZUImAEwtn5qGm7YRiAQVCYoR9TGsamQwDIKRFgbJqmiU2AhrrrXTyN+RTgeO7HLAJZVUAL61AcSidWCzCHITWU+jW7nosK+bgU672txhP8983UXGfdtV8CFSSbW6cXJPzQ/LjW/a6/ensUuPVlspZv0KPC94sr/xdY4XkT4HVZL/YKgEsjulSqajkz4nIUOCOhAkpEuBhBNgEzmOcGYbF+pZwMVoxrPZBLT7xUA8X5v8LJmnguoci2Ku8ii654w5VsaVVr+H3j9tfLY/qrQ3K9akQFXSHXjugZdDTtSLdTH+bB3ld/ACxuOwY3uVQH1rSO0MuX1QT4ytVCRdS0BGpSEQTAoMEKCXGilAmDLLqKsku0sLmkxcmFzCwy+RBX3oyVomuKjbqA2qzkcpEREQGCmle95gzFfYudgRud15P9VHSPeW17JUrd8CoCWrMwb022qfEAp1FSyplH69HUG3UpqfBvPuW2HmR1BdRQKiIUNHBCNB/UyX/SiG9ZcMlbfV8eEsWGAALVPNeasoFsUppfV0o2WK7ceD8rKlFVjUg8tNklM/TkRM61A5CBKaWkbRaroC2KOfu8J8tyVXMR2gcRlSNOdzhZuJzR6KoohsMpRQwzl9hLG/Zt++tpI5a7eRXaK+Rbg7BavB4PsNI8PYnO3gQCbMwOEadkhVjWeznXRwA91jaE0AyPlLAr1UihbKhrADy9eeqVZVQ0TzxrnBgC4dWyxslmNURsmqbbNTFG5iyZ9WxLXZgCcxaWyVIIs668bHMdOcbGfgnOS88vmRa/Od6qrAWU0pCs61Q0uQHVSjTx84WusDjMux0dAhld+TLmlHkU1LNuziBjTue+H4f+HKk5D22MiCI0Me0G4u+///7l4bGJzc8/f3j34Yfr62tbWQiEKCGo12DmlDR1hfoF61VxEM6ZY4wAE6vRgZ/P55eXl3//t9+enk5NA2/e3F9d75kTIrdtm8chxtj3fUbuB3x8PvUDNPu2T9nGXiFzE/2bi2KTOKcWcPFi9e66pDSE0HDKCKAGrN314ebmCjjp9VhEJNKtYWSGP3759eHh8dPHr30PbQuHQwyBjs9Pw/kUYxTJh113fX2923VNG5j5/PISI7UtjJyEYwhBEHPOAQExgEyhrabYvJfpeRMt1c7i8eCRXGFARCxssiABcyYc+vT1y+PNvrm/2beEDULbtK3guR/9eZ9uB0CY07SPaBAEIhJChiB5AICSpXBMKeVRL+TnnEQDgCIGJAQBJCJhEcgZxpxIKOKc7SkExBhCuWuqm05/OuecNcsxEenN9ialpulSGjDQbrejJgpzR7JvAiJG6AJBP6RzEkk5C3G5WGh7HABPQQuX1jDbGgzPr6z36gkvzz4qT7/vmWXfmp9rXMYg9AAs2OzlXr7J6v2583pnMRrzfLuSGG2f8gvWD2Rzq/IP12v/lfKd1So+7/ffdVNSVI+i7gpMC2CJ5As9bwoV/omIxjlaUJe+xczeZXTGocNZNSOvsFA/la/T3iUpAlY8x2Z53bunT1laDtd4NjOSrTU/uop4Fl1Y1zBZh8XuEMIiFdUkJU9rXQCwBquaFVulXj9ZrwcPoik/slQSZm4iChUgMyKrOAhG9AvtdjFaPyXWi0FVpXko1suMS+uwlDtgwugHjoghNLpIF+FcQc+hIcbJEmgav+HEXKJV2aMpEGi0W5fe/UkTHxt9m+ehJ6P1rFtfpuXmhCmlrHbLMG2feoYNLhthaScTNZ7nWteaddeHctEhxDjZDBHR7F0adEScQmIIiTTH40FXfBSpGSoIjKs7Qku6AmdGqLizEQwv76xa+7TMsSHucs7aHqivj+PoMxawJEmzR2UZaZZZ4QebWS5hRRFR8YnldMc+6/bPy8iiOJ8jlFMAtH1ie33ZQCpsGEesCKkKSjFjb+n5Q0vDezlkmb8uFrKbOM5MpCnBJwEOXYD76a3i8kWIOmGyXNp/tXjmsB7aN0vFQP9S15e6CEQNhYiUVcrjUV3CYiRmTEyYc4KMGQCAUVDmVGB+viRPzlme3b0C5CV4vIX5nxvp68WzpgqYCr3TQJZgGElX24rj9q5N91NR2ObWpqZAXZeziJ5VR8EAFGLTIiIznPpRpeRxHBNn6fvnI7cN3N3d3dzcQLkljojCnFIWZmTiJkQibJosASFP6x009JqkxDFOO5GdnZ1Opy9fvjw8npoG3r29//FvP1xd7UWySG6bMJzHtm3HIQ9C50GGPjHAmOf0Px698ldE5E2qrl6vNtNXip4AFgU4IfL19dXtzZVIPh1fgIfdvtl3OxH588+vf/7557/96+e7u+7HH983TUMUhz5xSjHGpmnGsX95eRnHvmkDEbZdjDG+fXd3uLk+ntOvn5/G3CNN2XoQwhRQVNTZ0hbCt0cNjq4u4c0Ipm6Hs0AAEUAQ1YMQkSGEOKTT8aV/fjryOfQRd10jmRFhBCibY2H1FDFyCE2METHk6WKhMKfcj3rxfBiGMfXMnJPknDUoq4gIAQBqsBc1pxARxUBJBRJlnNA0AQJxCfCmH4ZheH4+Hw4HYRiGAUMM1KSMwzgADAJZYwG0wgBMKE1oIkrsQhP3L3Hk4ykldXJHmQ7upkP8EpQKXUbh1Vn/ZVq6xGltgtaE6vneP1EuvW4c5hJ4VVnvsxW0r2893+Tbl8ol+tws1aA2dvnVgdG6Ppbz7nXjm1+N7RMtZl9EqlXq4THBrBojubjKi0EVtg8rGsN1L7NUNs2+t6as6xORRks3LBltm7JwCQ/rst6pZXVZqSoVtcMyxgG4mV0LgbCa1goYPyh7skgSqL8JmWCnqs78vp8qWC4G68B+Ymf+rvaw6ifTRQGA1MCoYbwYwURhmqRPKPKBtqdHX6ZvGBh6aRuXCg8AOIfmeVxS1B5wViMipQW942dzMFE5M1su6Cl1lZvFiux0PzAM2ECq2UJnsfTKoULoUWoWKsO2p2mvjOXsD6jmU3nVgU2BnC+tlWSPpqKQK1iuwImI7uKacE871b3HhlwRhiHHU6GvLMtruKr2VC5Mhi47B/VnEBXf8S+aLdGorgKgWkgq9pm10/CMOCt7AKAhP3ChEGZZbmP2roaQAae3GzEbBsCtNcB5GXtyiuiCiWOYNCyYwi/pY/28xWUWPvG4tU+j28bY2MKKoVRjtHmcnixbnUYnAlkEwe4Tav7jbHWsd2cREpn8TRFxDoa5xforuvIDlG/phJ4A1iS3flL1uG5wswRAQM45s3pcC4cQ1E1RNxpQ5olZ41ELikgJvbMcjunP31Mugle0Sj+b/0S59KJ38r2EKE/kntL8RuVr+glFig54ApfvZBM21byzcBbNPU9N03Rdd3f3QTLnPI7jeD4Px+Mx92noM48cCPb7XdO053E49meEIJB3u52klPIAMjaE3a497Lq2jYIlLhRiSoOdjvV9D25zOZ1Omr7i5qa9urq6f3d/dXUVYwQWFdyn5RRCkDCkYRgZCIeeKYZ6eEscrse+ifBN+WON5DUC60IAIEgQMYjwrmv0giVK3zRNQCKSp6enL18ePn78+Pjw8uau+/mnf7x//z7GNufc9+MUJybnlIbHx6/DeD6dTn1/Pp+PRHT15ma323W7JgQACEJBVPAIBAAIYfIZQwS913dh6teC1KXRVT9tMIpJ0/f1EYA44+PL8fNnPO+aDiUSEqKeczJL4gwAbdu2bUsY79++QcQSME+GQTeHzEOvFDIMZ+91onHCAQk0SqPSPmEbpzvzMdI4jsMwlH0/Z5icTfSAI+fMDMjYhhh2bWy7QI1QeH45no/H/nza7dr9PjQxtAFABIGJhwDq/kMppSMISkYWKWhnhOBEC4DplNkv5E3ieYUVSzmL92KhrOLobr57qVRSh7IUL6QZqJu9wAXKgQuLpZJ5fM3KurCGrdqG1h9gyTCr9qut8JUpeGV0r2C1Av6VXdUqcDn5LWiRzNsxJkQWefxMEPIyRi3X6fXcJdgIk6jguRzzFG+qGs4mK7AP5OSQ6q01MNXX9Yuw5L3iTtKr9v14jZwQUXmCKTWGnEv0FpSBrJ6vSFTjmUvMPIX/2iTHKU58aajam23CzColRaY3/utfMei94aWay1m3AUCibFijYPFCAGari2/Q64R+OJ6kPJxEs9BskrpFlcw5C4CGscZZE1YLSRWibaF4oFMIjQtIib9qaob6hABMIZXMfKTqlrqQ+ZAkWGLvWmwSa9+GI84D1q8fCwMDyGWykl+TM+NwS8Kf0xjSuPi7mkJoLii6pylXZVdmcrq8eHywTUMdAwhNC8Amy7dWNSgiuLKY+fZt4dmgQB0p43yZVt/Se/kenjz53ky0gYgU1iIs+C48ddmJg3VtM+tx20CBpCQVmBROZiIKCl65MEZEvKWq+YMDz1aUQq2mh7zSlo0kbAOrGJAnmyry5Jotgt6RI9JLA2aVIiq+kfaWyJzzwJ9j2YTy9l7l14KnfFoGDVsDtm4HVyhdP3nl3dd7sUhFKqYRijntT4QBqNYCRcY8QQA+QcX3wPP/z3JxyAIVTqqaNU+guZqNVNzhF1RKvtueJyNFeVVXiv61dgiRy2X6aYEEAopN03BgSChIMWNsMoURKe923MROtbUMQhjP5/Pj01cA6GJsuxiDMELK49ifmyYINMMwCExCeYxRk8qUixWsku4wDOfzGRHfvXtzc3Ozuzowp2FIkRCBh+HchJhzZkYk6vvheDyDIIZoLv3fxvzy19ffmhfvlvTzCv2HiCxJWDACCO/3V2/e3IUQJEHXdQTp8eHrv/3b//z48Stn2O3Czz/9482bNyEEvX/edR2U6Nkh7Ns2jql/eXl5enpMaTgej738GZr2eDwiQtd2fYKxqEkVMaDqhDI/9x+qV6qxby75NTOcX5+ILWjwGEHOOZNIfx7P5+H26tA1ATgToB4gQgErpZRG5ftqK26BgoikVPb64Vyimidmtvjnfd9jIKI43cAWFBEQig3p5isiQ5+Ox6MePQiVlJsuFPZu11zvbw+HA1DYMUGIw5jk5TwmhhC63eFwOHRdE2EEAWIRTm0MRJBzkjxCzgCM6p1CRQfUxYUAK3PEGqVr/G+i1zDvmbauX92pv5/7XaJzX6HSPGGLZrzg4dv5JiTVqHnpqgoOV7iKeImO4a+33TVyqn43N+JNCA02J8lc1Ng9PDZH3sLh31XRpmzKgHZJxKPFNW8mHw+JoaLa4gEm1zyAxearMcyLzXpWXBGAtyizhsdPykqSXL/uH1bi3xqT6JRJbxjzkhg59zRxt882l4lvFhxVTPawUsOwumJrJAImbMRJgSlI92PT7xWCYEmCNjchhCoLuT2shrrGlG88p4SI4m4yqjwQYI7JAUWu0sN0a42WF8DAibPWrw/GwCW4rWXb1PwEIqKB9ZFEQ8MZ5AVUNfFtXptZVPYImST+prGs5XrZDIvCZvXt1+kWgTO7UcmRYBkj/EhlCpPgfVDZotQAgKVGULQVVM3tIwWbVirJN61lP7NaYqQKTinSz7Y26JaSX4eVD7Q91xMRnbUSJ3bhe2ZF+wrF+lrxEcOVZ7siwiU1hWccVDLLmzOn50SmyAWYNEmY7ahYDY0o+OuCpqFVi9zQBcuDe2aWMlUGpCn8psV5TlHhcL2uN4vRj/9boRGW/M5oNZRELDlnzzg9YCEEDCTTdDCWBTsFiRDIWJCGc/szzFvKgB/jmqJsIOj2Eo8lD2f1k3+Cl6XhV0rVppXYkB3oMDNMV6Pn6LiqEAaZbKiSt4WqS8O51O8li1CWTSb2l8tFFG2JKbKSVKpNYT26ihorMij1a78Db0/WJZZTn6fMPUgh6FXDlNLH3z8ZDylGwvP5fO6Ib364u7+/R6QhpxjaGOO5P379+rS7a+5v725u9wEkjeecs0juh6QJRYdh6HsOEbougqZdSclz47u7u+vrayK6ubkJbXM6vQzDsO/aGCggKYcfE0DTnU6nl9OA1Ox2u5wXLvTfs7Qr5BtyNl9ZL67XCxHlMUsGiAAAXdft93s9aDudzufj4x8ff//ll68vL3B7A7e392oia5ompZ6nfHhT4qUQpuu1Nzc3b97cn8/HP//88+uRzy8vT0/nvoeWUt+PDPFwOPCYEKeI5IgIOAUlLtyiljEu0Zvnex4Dm2M3flIUQgQggCwAIhhCg5ia2L19+/7t1T6nMVKIzbwvM0hK6XTsz+fz77//3jRN2+5C02pqKE1RyFmsui6UablMVEMiksY8WKyyHp+fn/u+117O5zGNgAjXdwcd2m63UwNs03T7/b4BEsR+GACbEOKYWDMf7feHw+Gw2+2IhBMHEQBGyW1s1JVVT3sJMCAxIAKqiC0IKKBJVmDFn+E7KHNzauwUz693XvlVfZM5b/ITcBtHBa0/AF1vBJ48qg/VWPzG9Po68qJFtVWZQGubuP26ZqHrYVZb+Sso8jYrT/+VXPHNsawxIHMqyynnIVswqBXwOOkcdYNYriN5VLgxuhYWu8PKeimTt6pHYzXkCs+mVdpzL7ltQltBXiHQf5USHM6+VjDYc3Y2IRujh1/VlkpEx4IdVOPKknhEBIq4KTjFOC7p6cpsLcYsTKup4+JPaBnAq4UqRUbUyt6l0yRpLCf34I4lJnkXQ2YGEI0vSTljIGqQ84AUIRAhZeGUB4JARJLmWCmZRwAoFxI0H50gTrmnmqbpujblM3DSI3iTsBFJIMfQmiPihHHB2OohmGRJFsmGplwRIYSAEHLO2fQWOXHOICb7ioggBCJSES+xyKhKL4ampZz0Ot8wnHPORFOYH0vpDkCIc6oc5lk/lKJ6GcLzKglEd4heO2XmkukLRESN64Qla6JIQ9J20RoAERDOzFIifWgImRACEbCkl6c0jqPqz0QUKTAzJ56iy0AQAXZJzB0BTNqdm4X1XwkSiYiQUJDTtMjL4UdgkTyyEBBRpEYxYBu7LSpCpGIDVFJJJZ0jCCESAoEAuDi8+/1e6V3Xo0pxIQTL3zIdSQgQBQ27asbAsvI1UWRvyNFZ8NZOMxTbW4MM03VUFmEmYA0eDsKQRqZir57C3XOJQgnOHV1CgOmOiWC571mogq6YWe/vqYWTUFiS5IwBysX9jCx6TJSg58xjFsSAMaCgCDJDCDSFQxAGBhAhQcKY8yAaSoMIKRI1AMAIp9Npt2tCJM6ZcwIQCZKyYIAxJQqRCHNiRkIiZki5UFQTEZFTBmFCyrzBVcFZOD0zxRKOGZ3Ob2unLCi2BWUiV7UpajE+A+6ACack1MQa5QJAADKDBtIAEUTNKZ8jUozx0HWc8zAMyLKLTaRARMCQz0wxEOk9SyZhgZxBQgkyAUQExAiZgUEwBjFXUhacI3rHCgmF3Yt9tSXGAITLa58XFDYrgSa+4XcXcCfohroCxiKhsHW0NvXLdFA18wprkErIZTtPsTrZnEoEEEtYEREEysIoApr1UYRBgBBiMw6jEHKAIY8ZBubm68Nz1wQNdZCG3Pf9MIzC3EXKgqPg4/NJZe79fp/HPg/j1a7p2qgK3u3Vgfm6H84AcI0xpfTnn3/+9ttvSeCwu7q6ugo4sei2jd2uDQEJUPUfDmHk3B9HEW5jDEjA3FDDYw4UeUwx0tCnGOmcURKDzFt+NWubUzbTgKNqI+N1QZnOxWFKeDmd4iEsD87twK4PIbQCgwhdNfvUn//tf/wft1et8Lk/Pz4/Phyfckrw5jr++OGnd29/uL5BlpSyxAZyQuZRBIgoJQGkbqeeLBLCAQDu72Hswu//8j8zABJkxl133feJexYWFAHUbGnMSFMUYxVxDMLZ9X2RJmoOTkixIGfGAQAI0OJJMWOMFEQEeQQZAEDjggIQY+y5yTye8x7wqumu2nhuSLiD4/EoSfb7/29v77YkOW4sCPoFIBkRmVlSS5o1zZqN2f7/96ztw66dp5nRkUbddcuMCJKA+z44ADrByKxqzZmBtVUzGSQIOBx+h/slcrjfF8jDGD6t9/j2evv181Xy9XS6nC7nGCIAJA5rAhHkcLZzlWlZmQNhuN1nkRRCmK/5y+evVv7xdn0FACIMIcRxeHl+snmN43geMzANcWxCJxFJIFUNgUUkrzdZrgFuEy0npkuQgWRdV8kCgYli0uU75NP5MufXz/P9dUnEQRBVtCjEVkTKsgeZuJ96tcHvVnQNijxgz/QPi2SALcVdExJbQriH6O3Zqyc7fgz2b3MMNKpSPtBCsm2ETUEFPUiqW4SRJ2KecB3pnh8GVl/FETjtxeNcPBvyPe8w2OnP3QSPTfcpAKy1d2Gv2BydLn5qfmW1iElavNlQYkcRGdmsyYCgKoKoAICEAlnF+odm2VG1cyVYz6kCAFj1MgIQ3Uc2KYBFgCgBbuYerUpNtPQWxQ1GoFlFQAXRzuqjLTyCgEVcY80cqQ7fENqRlhbuZHeaA8Pr2FgFP7803uTRrWC3BFhD2ERkGIYubFBq+GGHYyV7GeQdvlWd27JYO6VPjKcHLcdzqoZgE5A+MKkNuglPHiOxRjN6JPMXuJcAwJ3pasHiRQ0L3BxTWlGjQmdVZVX1Vu0YBnBqattONdnpdrNJ4c3t0zwt5XnzM9f1KA5JKeGpWL12ICpa4jNzzkQKULYcM6/J5rVtIQCAymUNKOLOswWXjsXjgXOPYAc93bvgGjw7amKUUWqEZ4d8beE8vdB6Ng8chW24i6UQ31boDwDmefbI7SmFdd4m4lehvd4e7jDN3+9IDzizbvvTf8L35vHtOGVTNdscAUux9ValvQ21vRLjYBhrW05quK/FPnVHhIkoy3btheY2njZsv2VIoeWAaodkcM9OyrsUoN9utsRlulDrC9mfqhlABZURAajpt8S2U7QiKZgJwEwSDAJA4GAizUOu9TO6nb4oQC7TQhSNMea8iiAzTtOIpBZjvOaUkjBnpk2T8cjsV6FDgw6ZPfJ4NNjtRPekH+p7j7WHO4LmRqLlFEMtI9b2uOqDpCYlq5AoOgUVfGgxKFTiE6zumUiSXMsV0pZ6tUGAto5kf05GneyO78sEP9883u7hsLUPINneFRc64WlC4Ad4Dk7t6e9vmjmUXeK9gkYhVYxwhRCyWWGgqj2iSBBCGMchLetyn5dlEYEYQ0A2fhQYFTIHXNf185frep+XZVGFeZivV0JSUhnHsdS4zxu5YJZpmn755ZfLaXRuSVTNIEoEMca3Zam8gNEIiGZZF7aEY5UQEREpEVGuArdnBz8D859s7/XTIY9fO6OniLgsi6T0ZWTS8zQiIp5Op6czDnF6efnllz/+5eX5D9+v/946pCosmqBjZ2uhruM0TTHGb7KqACKEQIZ5Jega1BhvGY8phAB6OFb7Mc4ff+3IzmMQHRwvWSRUISGlJKDncTwN/GV+JQqC6Xa7XTPkJOtsxS8L6HJOb29v92UOPCCiDnMzO5rNPa0zIs73dZ5nABqGIWAALMGTnz69bCINU5V22JgUIlIMRKRSZLBZSlI9sy/V52FZljmtJznZ180aEONIxGnN87ymJM2IzMzeLoeIttvaufEjTvqdu+fsj2lvR2/bha+D5xu59GZ+Edu3GpH3JPEhAvjxHGlRz268lOc4VHfRBtMLt3vzSje2HzbfufVpyWN3XNgF1r3XA+w3+ENJ4zh93zoe2i5MMIQDnI9gr4/1BA2xDwrdfn3fTerH00hog7mHzwf7HRHJefCOEOjQCbEkPTH5vJrX23mBB0Buos5x6Tv5xB6w6D+v75ArWNIe275yPFIP0N7t4KaqoQWL+nNxWC+kWF6x+zw6yR4enbw8Mu+Hn2+2q00n3KL7kpSSDCoIIQQgzLoLqrbWpHnvbqobb4tTbZvQw0K2qNGiKKuUTxiRlZSpZtdswhARIFKLhiVAJHOxlkBNc39lVzKuowsiNXZok1u2eGL7te0QI3ad+ifVPdum1o3TJp7SSi4ZT0MX7xXxaOeXw1q7JpdkpYHCZ231z/ud1ubenvHU0GOIX6lOyez6gT21aiaujuB2S999iIhsuVW1qypZF2sjtfaVeV79R6F6y9sadWl11Cnn7U6LIIU9/RIpJ8fKKcFqKFNV3ivSXcODuNZN3OFJNgu4oGojF5i1VqdHRPuVrB9kJCEwYooK5pf01T4QsKR4VhUOqFJO27ZNpqCkQgzMhAgpLaqZiE5T0BlWcyqKlOqexf7a16CrF48ZT9ceQukIH7992k8dHTv25hGyFZD3CiFW47HWmAwUBQZElHUBAGIOjCFQI4AAYPluVRUQAmON2RDMJEmNzBlOmO0TqvLpw1ok63G08A6U/DPvQfL45Mf3fT8fdOvR0gO/nXrwj8HBA+kbVuHgwU2zvoG2XZZzJgUB4FqZV0QC6nK/LcuSc5qG8PT08vLycpouIYSvr99rsAm+fv3+/cvX+z0PA4iAiNxut3m5zfP8/HwZhsjMqhRjfHl5mef527dvFj1Bl5OR9BiZGHOGLBtfB79/iSBnRM45W3a+nIv112Nmm46nHv8hrdsOx18f7gWs5a1zlhjj8/Plr3/9U1quab0zUOCBKADI2/W7p65EpLRFD2VZnSWo1LxlthMQwMwpCYIQcR1noQZi9AJBrKDNB0hyaN1m7/jgexDAqhOaIUIBuZiY4Xq//frrr6z355FPA98xMcessN7v65ohgyrmVcZxRGAgeoXr7TbflxnhTkTX/O18ngAg55WIUEE0AUDgQVVVE0ocTuF8PgOACsSozBy4mMWlRtCoqkWiEpJU654CQD0iAYCKaAeTDQ5pzUtOkRiRs2QFIqLAcUl5vq85W6ENUrUAlc17Bq3UVuUzR9j6O46rK8BGhz9eMo97H6+ROhGx6Yfvdfuw/+PIvXjm+emxTIvvak/ZHqvH3bsdA/pggr4TrEKv1z1+OGX/aT/Bn2xHaiB7L1H7Vw9Cu//Vj0REjz91E29/Nqbw3vA6IKgqbKE0+PCxhx12tNdjQjdadKeB/NmQh8/jQVFHF6HTjcRvkybdtW69gdW/yLSbml8FP7XWQ3DaUemaAVsxH62yKbnjZK1H/+FWigD7SgYPDANH4G5Nt8rU/rwZMwsowkaL7QPlu5Bb9WH36SyyHRgrpIF2Ru722Q23qBSpIgIiCBCaMzClBFL0IvOQmBZtLMrk/BCjdW7ybeWXZUaEKFXjto/mdVGnWjQ9sA27mUzqkvfSalm1cqIv+Ic9/fXLf0SOtqAt/UnTIcERHax6i6c7TQ1rh0jVKSQeDezPhru6N3W0rmCjDuJf7/4l54L3E+k+1+kVrX/7tcSOBkSMdsemzyWXeh9bL9XC2tDSn6Czg5TNFtXOefqVatllmubcfjKFEACklldDsaLFYJJyxy3QEbKueTNh2xcAoNrO9KIqZFQq5jfJoAS19nuLQVRWNWtR0y29GCqIpTINoLCCeUzKFgBVTQCIIEwKCoQSAg3IgGT12eH1llUth17OWS0QnCjrbundKjyWgx9SG3CUt/Xge/vhu8dPNHiW8dQaze980eQwC2MWRCUCRgqBY4yRNmd7SglylpwELFqBCBHAPPaJkQQqcoAAUDkVYH+rYlXg/SQezsiTAngk+n+AVEegeZj88PmHwGw/tS3fvWvQ5r2ldiNZDb2tW/tXlCIxklgOrWpttDQtrQcmQsmSMgR6eXlRSQw4juPl8nw+n0MIBDhO8X6/g20ozV++hGXJIYTzJXx6fhFJ19vr29ubamZmVUEM5tp6enpiZslwvV7zOqvqMAzn8zROA5Ss1KqqDMWorKoJNCoSBYy43BKQCNBtmed5rbtjO6PbgPnxev1rrSF5o5PvUR7z2DSSHgK/vLz89a9/HQYIlHTggIRIkkFEU1qGaZeACkntLFrKS950FUgpA2RmWZZ0uy05g2WHRRCTepkI3m/v7WjfKhrsApdgL20/eg3rvi90E0AVNIQgeU2Sr9f17/+EdPt+Csqk4WKHJidUzUnUDpIjMvPpFCkEUEpJ5nVJaUFgjHg6ncZxXNcVJBORQkbEaZokg6oGpGk6D8MgAuu65nQnMgEEkJAUVBBKQe1iXsGthpGa1lxmkYUAA/EwRFW1LEfTaYwxchgBjcKEdVnWNYMSIYqAgFBVxRER0FiDSY25nUIC6OmzxyXY1KoN4P75xnkPDP2BPNmtu7/vGN+PbV5oJ9wO3wVPcKrobCOkdzwfOzLrKGQXYQHv7N9utD3dfoThdqcL4NK9qvBx83vcR/y9xzT9Tmnkwo/HA83fhH0SO9+hqvjH2lc7yGzs40d73HMow55OJIA9eI89NJnWT+dhJ8cvevHvuKYdlNpPnXZ32AI7mPve2ivH6+MDfgrtK31oHBz2WDk03KBTh2Gm6CLQK5WQHQVABNqSVULeJuabT+6iVVJHLJZ0qwsMpJgZCNlOpoGqSMpZoACFamMK1k/zFOVczI3eCisi48QmuLc10OouO27pnDNibKAgc4W4YSMiIxGDqmYLBqOdzxNoi0ssO0Fhi4d7J9S76WMPCVz3sBZOHFoCw4bBUhULj3nWic+r6ZUHU1e8vteaH16LloT9JhcXDXvE4yNq+knJPq+JJ7vtxY76eNODn0h7vo2cmX1G6W1UtHMjN+AjPojtBoCnp6eSgba6kQHAfAhtLbCqzW0R/Ud1qwMJ/mbBKNo+R7abPnSz4JaFRtu/9hLUbVKlF0REwQQ12Zfta0HzXZmtoSdzVWu1JO8AQAC2KAKAjACg5mdE54W2qWyISjiOwzzPiPn5dPrlT38YxzGlZZ7nz9d5WyQqR8IAoJUbKSDdYNUHy72HVA/B1V13FM97Po+IRIdjbwCAQK6Un+nSdr/G0mophEBEgfk8jEQUqXjam38krWsWARFAUBA0s4sqostBCmBmLwtZ5/2k6lx6UGDlgh5uDbb4I7m5ax1Y2r8frMWx/7avtePuAACPB9MR57bxk02qnHcoQjq4OAWz61gPmoUpArEqWGlWAGDA0zi8PF8CATMHYmZWWdYl2YfGwVL70jTEy3nKaUGQT88vf/jlEyIO36OIhEg55zVlELlerwBwPp8vlwsCz/N8v9+lFrxNeSUCVLAEkjlnBkCujuSsCqLZyskAIi/zOq8LIuo+ILaDPzxC73+hqaM273W4I/tmDKXNwmURE7fbV9SFCdQceoSRmTkCLobw24KSMmHOJRaUa/Jwe+z79+/3BQCAKFA91ldOGhWfvKgtOYCqmVF6Kdy34+7wjx0vjgACQFDpFEIRSZJJVQVTSksm0pyX+/27RA5PT09PT08BQ1qzaophlJyZgZnHcZym4b7MKaWUl9NlPJ+enp6eJK+WQEhVrbYTEZFQE3tEkmq2eJ2cxbIPQBVhzW/aDKxU+FqmcmoPVEuw9Pl8HqbT7XZb1/Xb69ua09Onl/NkXsr09nq/35d5yapIFFRQswLTe/v0vXakGw+BfJQWPEu1G93D1prg1xhxY8fvLONjTZLwsaHhyMQLs9jjzw/b8btdt8dn/Ajf+9A2/n2hBdirKB883232Fu4IB3W0vftw4of1guOf79088IKP2nvcpxtJfwd3n+7YyvEThkVekNtY5zso3YWDIWIIoZ3x655vWQnb60e89SJi51HcZMX9kUX3iHT3jzzX08DgPi+qStU52B5VVSBsdeG7unBtQN6i0D7sBY5uqu0Zrw4RUVot5gnJoilBVAgRiV0EsApgyf7S0l1CPZ9j0rzH4PYJdhXA/d5rXh2/6vYn1VciB0Gq+pJlqrCzrlqtjOYz3ATKoqyGgZktGXRZPLDjtNlSgvgv6l4bJCKArSInM9/v19a/H2fbwG0W9pbA7hNNDms6cGeS9z14jmJ/+rOL9fldOcomqDUnWGcjgb0ToLWOMYOzIXVo071OB1Oxx/KG+sxb9lRP6Sw7k9aqEnXWvWHGr2kDmiGeqqaU7LTPcTAN3z722XoXMYoKiEp1vhGiTwkCAGBpXBQA+B3G1tR1D3BEBHJMCHTTCe30d6ftiK4KRRvcSB4AoIgCqqoQoqoQSJXPqIQ1GinkMI4hhDCNw8vTGEI4n6fnl7OlRsizWI0sVQzMoCi5ZPFB3tk7ELZV862t4/GnI0y6t/zr3fXx+SPp7LraPuevFQDFFPAhxnEcz2NR8Kzi3LqueU0mTQKAIjCSgoCogIJqiKxKxBkFUdXyZmuXBt0yABXS/0Ax2xHz/Yz+5da9rgex4wN4wmELHFfQv3i8bigK4AJaykcVt/O9La4PLHVnHMe80DrPKSkDqAgHvFwuAIJIjAigklaom3Gd74RMzCKJA16exnUdb7e7SCKi0+kUQjH2rWlelkWSXq/XZVlut9s8zwjMzNMQrTbA6+t6vSGAEGCMHGMMwzgMQ6CJbUE1p5zzKuMwArIomqpAYeCcMTDskwd0kHlvg/x86xDj40WEerqhsjoOAQAl5WUYBlRASAia02qFA8ty6JYevHGZjTILElEMwXbH1++vKkAEAkAUFErZSTNMU8l+UBVC6Gf7ryH5h7ujSDUFOKqKijUbGTNzREQMIY5hmCWn222Z76oYwnCKnEVAUOw4pCJzmE6D6osiMKMJCTlnKSWOhpZDzkKd2znunFe1RAapcm3MapmnLGoUkRAtnZ4JqLZYSMpKKhktCmoaxnFUpGmaXq/Xt7fb9Z4w3k2+muf58zULaFohK6FacoTOh18tMgUDP5L+1QmjsN/FcCDL3UVF+I8e2FhbbR0N/LhhrfELh42zjV+NhRIAgKjVovZEr+Mp3QA8L/atmZh/uGGPzMtPzfN9rF4TdCnBj1P2vbXOHwpp7cn3lhidZOX7eW+Vj4zphz138HnvxSOGdFM+MqD3HsYq17UDU0fs8sOgGujXhLrOUeE79/Jk1zrZpgmKXYcNf/yfWyewAc3PzuOe/6Lz70t/HLtjOSagMLFfcnLV4Y4w0iIZqodXt2/92rTHSLf5A4gghpqpRe3FoqBigxHWo4DtDuwwSUzYbtk7sUZI+umIWBwWQg2QqzlKFcpMi5LDdhwKTOZQrKcHdR80SxSONhhEBAXNkkRob4SwkHpf88AOq3h3WYOzR1Z9lKdBav9+Ke31phB2NNQLmlQbVB2yjaT7etMJPbL6Ifk7HtcbtvjDVLp3/Xn8Kago7TQ8tiXWWvirQ7MjNWnQsOYt1nVS236uD9uU4Xq9tlFh9RC2i+OsCUtAqQVNIaIKgpJVXihPi6oiYdj8TQ4UANByRJYBA8Ce3uGe3KA7O9EmjnXjWLqubksSkkgrF182oKiilVK3zQ6Qi1VIAzXkF1Sx4ykAECiIiAIoQQzhdBovl8s0TQRyPk8xRslJlntKpLIQZkvFYSGjHns7IOCWqOmxwtbm0l188HCHhFRTWR6fgUc0qn8AwEQxRwMtegIAJHCIMZyGATXZB3LOOa2Sk2hWUAQlsnACAtoqPoc44LrmTICrqqAoAFqtijYG8vh8iCxALEGJH0OjwU1/j1zin9RHgoIH8r/WLVbK7Be3LQchbzavAn8AKHYPANvFFsECtnkQ0cwXiMXkGBhfnp4IQCHLmta0Nr7LSIpquW3HwJfpdJtuy7K8vb1dbk9WQYGIBAFWohDW22JZ/hExpZTWJcY4Rn56ekop5bwqiIiuKa3rTETjKaeUQrIYdWRmgsCMSCErpCS323ybVwgRqgbycJkaTfshhH+m+XVE5xvvtoCq2lEzUBWRQDSOcRxHIsr5TpgIMwAiAQFa2qoSmwPcHDmNj9h5h5wKXV3XfL8vb283ZhCilBIAI+Iq2fT2w6jf9e3A+9v22E+D5MPetLFvu6iBFcV/ywQga8pZhIdpgvPCYb3Py5q/fX3NF5ziwEy2UUQEUYjodB4Vn1QzoiphWtfb7TaO8XQ6qXLKCyLmZW1nGYjV1GYiWMVy6gKCaEYRBUJEiNGyjlttCra6VmgJqRC0JC1TIiZkIEaOyDErfPv6+vnL6/U2K+GyLHcZYhyIGDSLgCIAMSK6em7OWEbkpUgPwyMTh0f2tbY05HIBwA69Hye1amRca/M3j/0f73c76Ig2x24bAvsO92JDryD9PIfq9iDs9x08Iq2e1z+c5kMSfQTCB63r4bh8TVz8+LttkB30iHqRzz/Qs+B3RuvZXzcY/8bxK8fW5Nu26NXn1K+jdcV7Ef296XfD657/4Yw8YhvxbLk8us91nMJjyMPBBG7ppPczRNzCPn1fXdxd63pdV0/c/QT8ILw71Y9Yq4smDlN5nlBETPErJfTUgcZIkeaUiu+lxUzmvIps/iVEFEkARXFf1jtWXaud5qJyLDCr7uwWiBjIQkwXcPHZxEBFCjY4CKIlzMB5TsxkgV4GruYs7siHqkoWZVLQrJIkG70OIbA5lBAENGWxslY2AKAaZapARExW8ZtEJMsWx4h1AUNN4uqxuTNIeLxsykxHfUTE5tJiMwwaMcZOS2yg86S5QwZwhKyj4w19udYeVKebtde9L65NzUIxu5791LylwP7Nrl581X7LiSOnIWzhKCklY65QlWSoGmwDb3M2etj6lKRykNqhapWaVtUS84aiVocdK1piNc96BgCwQ1oPHzxuaiUwQy9AZa7IQAWTTX5AyDXLNjKai8sQD+tHKZTTytaXaiYQVQiMSiSSEDFGupziy9N0Pp8QIASSNS3zlZmnp5NQlGW+XC7X+z3nOeecUs4KIUbmsKTVA2c3hfebR8IOYT5olUo8PlXf0WuPotv6FgF1r8yIKgoAEiIzjSEOw5Dvt9KPiGomFURAogyKgdiqNbYVZDIDHC4LujPSsI+E17aygC1ExE/tIQg8zfcQ/l3tZ8D78BX/Lb9ZvOPo+G63p1TVqpIU3DBPIKCd6aJ6etBOwJqIOc+zdUwEzEQKlvdFVQSEVACkeLwAEaFU5QZhZg4UAkVi4vJWzpmoVEoAAOZ4Xd++f/8+z/Mf//jHP//5zyp4vV6XZf7rX/+aUhJJgJqzpTO955zfvr/eYwjhDsQxxmmapmmKcciW5GOV232eZyDKOWek3MUJw4er9i+sTuuz44M9wjc6wAERs8iyJEJAHBoFhopjiFZEJakWTYaZCcmHPxFRjBGBV1zNiipyv17v65KJQBFTyhYykXNmtkVXLDENAIBGqfUgsH480w6SnoW9BxtVRWSA3KxygJjSIiKMsELGNYlchmHCEJf7XTLMr6/L8qqKw6dfOHJOJdAGEc0iRghIIJpz1tfXfLvdnp8vwzAwo6qSAhG1Cjq22bFFlGycPasCAzNFClb/dhbJzEiWL1GVzAipBAJZijUSSJckgEwck+TrPc1rCkPMWYCYKXJEWLKK+ALRHoZYBvYu2I+sqlujDrs8ysGORMDxYXCROE2GwUNm+67/h+PxopEfM7uCZHQITO1Gq3vDup+pn2w3Kvg9G9b3cET4hhIfd9JJEf719yiM90T5Zxr39HNR1ZYV9ogSD7dbzcVWemj3G9y6776333GvCD1s3dwftm7YWg9MtX3X/YqsAFqK3JSiDiIq0J19bTMCUBA7W7whSMVyAKh6mBmeVNw5uCPuPYTGQyAc+Wzhpw6z/dG2kt6zfc93ansj1wLxiGghc+1L1p15zCTtso9AFY61eoG8Cg4ASAqW4CKJgKICFvdjSYXMzU0BgLWkO5asHqufJCLmvJrXaJomRJznWTQ3r1eL90sp+cOsFUYCAG9v343sMrMdl2BCEC62OmfPsGiJ8zhZrAuVJGlcNAQAqBQcakQTEXEIWk/0AUAVuVVErH4dQtGL7Oia2VmppBGjVuPeLK8Nki3TSaNZO7fVvgqFNm28xk9azwBgwMFacsOGGmNsdVE8MrldLbk2rfqkRx5tiqtzkHbGAq0GiDakjnw0HtDCLYZhaF7Nttst0cswDFqZBOxjWdtHzdNrLN+etyINDf9zzuu6FlkTQFWXZSGi0+nULJptnCmlZVmen5/v97stt2FFy0OjrqRHA2Owha5FCxCt0F0dQ83daQBRhLSW07A+nqEzUjZg1k1DKuWZwAERJWdEAAFEIrU0lsDISIis2YiRQi0EhACS08KoqqKQh0hP5/M0jcw8DOP9fr/dbjmnECgwgaa03oKVMyY8TQOApGW2PIrTNI3jeLvNIhJCAC2G85YeyU0qd3M5+t6PTR+x3u6mZ5w+chj2hkx7yx9+Ng88AEAtQuj6TIEZFUQSEk9xCJFE0xijpLIvxhAxDvbKsizMDExWU4dr3to1JUYYQrjeZ8jCkZlYFH1xX6xCkrqz2W2x/UXbFA0a5CBzRJsjbP2fRwTr6AA4bkQ1+zkd4mfaciCixarBnl/oQYhsnTf6gAqWn8QvbqEbCtCkZwqaMzMvCstdLp8Gy/6iaqEnwMwxWPQmwXY8WCweEADm+Xa/yzDA189fVPXTp0/jGIdhwAGv16uZzP7yl7/8l//yX/70pz/9+9/+8V//63+9nMbr9Xq5XEIY5+Uuki6Xy+VySiktS3q93v75z39O50scpte321/+/GecpmVZp2liEERGgvt9HqZLVkLZiVx4kHv89I+S63FBu4X2ER9+pZoo7LtCRGVeluU0DKA5z3dLqPP29ka4DFER5LbMiDiNTyEMIpqkRImbTmf7GkBDiK+vr+NwYub7/R7jGMKgqilJWgEjMZNt02KnI0AspSfMCiIiqjiEnYPCb4TjzUZju93RoNohNgAoIaqonVtsAERcJQciAU0pR8aUdVlzDHC5PKck3/Xt/pZAXgEoEq/zwswWdWyc/X6/rzkR0bKsqjqOcV2H2+0txkhg5z6EqIgHBEjMxoymUJPYBdiSV6OkRRBxHEfjxfN8axswhIBAIllElFABZBUFXNZlnpckigzIVFTOEG7LWg69E6miIuRGMFFsPPWQN6lKt1U76RH29NYLpnhQJ/xyVDWvNyi3fpoM2fjCkeh1H+pWuT3jXeLGW70jpCk/5lQAR6PQleA+4tVx03kIHH/toOFvNtrbcLWbF+yN5u/xwUYivBbtCTs6Cc133kDnKYanRW0B/TK1r3s26ntLact+vz3jjOwepFl6ZPML6he9XVjOXmveedAZ8RtIPYjsZgihCSdH1GrZSRqGgBNQwWVkNDG1S18CB8RuC1fGJuiX/og2fimhUsvugYdYZ38Ghyi7QeScLRG/gcPehuIQ2DCvveI3dvukVl+HHpiWAaL5MdqFpNxqgOYSpoeISAoZsm6pDktpP7ceO5jmnJl3+okNgCk2ODZxXFXWtSiKpvkjop0UIFZmtP8KmRBRhGWZmdlUPt5zDrJq3K5GeTtgqWBnDq1wE4cQRPuQRfuzKU6g29prjYo0naShcrdbaAtY7Yv+QR0YOjrin291TmzwJt80/dMURYuSstfzuks/09bdtNmO3Heo76+P+OOvYU87OhRvxKjJK97vZ/PyepcHTiPfZk0wwNb4zy27FNWSG6bOGVjMKW33bb6mfGKlGuhSqvpZy56QoSNYOSUxeqF1Q2L5qWRzs05qCt5hGKQ6b9tSeirQNcZAQECgmtEO+gmICFipeodsxRRMGaRu4VIQwuqApxB4jMN0Gi7TdD6NzIySKU5DDEOkZVlyXpflKumKiKfTOMYhxhb5JmY3+faW7ve7qhITAkouSag69oOOOv1HtUY0258Pr8ExRXWsF0rmyZaYdfdiK/CNZuQjJUACBMk11BbsX8vWDihgX8kZCEUBgQGVkQWAGdn6Ei1JXlG7GLmW/euIb3DYej8DnIf3u1XwHPHhAx/0iXuy2R7qbj5k7fCO4bNFXOecNeVjCgERSeuaUkIEpu2wQ0qJCQCUSimasi62lWyPv16vX758ud9nc9Ov65qWrVICK0binPM4jsMwrOv6t7/97b//t39f11WnskntcDIzBw5YzjfSL8N0Pl14GL+/Xj9//sz8/fmSx3EkHgHm+5JUYYgDhnC/rREe7GvdJ9/6mQXq2De8s3BHxu2fx2q+zjkjSGS2eoxEOsSBKalkIgKwbPhJBY3nExGWghPFyrMscxUbIGclyjmJZDWrqwKpomQr+ISmxagWrUwRVAELZ31g+oF38BkRH0Se1ubNne5fQCULjy+/IgBkRBSESARqBFWSalBC4hCHGIeF1vuc8Pv3gJTzSoBEFCKN48iBz5dJRABxOE3Gedd1/v5dxmimw3y5XJiZKKBCzprSUllYdVtVMUoQEDjGlrQJgF0dKRM2ABCRmQUpC66S5yVd7/fvb28CeLk8jadTSunt7U3BiuGUtyykDw2vIDvIvHsK6+OGTrrdQPoOSTFQtGc8ZjYS3XprkkDXVRNdYE9DWoddt/5J3GtcDyitE7h9kM5D+efnW7fvPnjgPfL7EM4PJ6KutTuN6z2UKxr9OcLKKmr6XalVWT2OuQPvNsJHTwIAIeYDPDu2/kHzE3zvrSMPskb7kGbfPM40UIATXL3K/dBTB+8gGACgO+P68Ouw16qaAtx9QvdlEot3ijmUmDRCxF55sNdzFWusU61Ba+qEJIuxIyMWiOpWMdSQ1I4DNRh1O7NQLiM4oIhhP5MsUmp2gwlFICZ01TF6s9CmH/u8jui0QaiUEYoJeSuiUMe8btMUrjrCRndCfb6ATrckIi3o3yxMiAiaVYvIXu6vJatnBYJIdUkX+DgqJiKI5UWbS1MGTJOh/WEPqnVp/A73G1JdHK/HYNWt+FXDM1OAmzZYYdKbH5yavUHJLcq7BPGI4n6XdtusKduttU/YTwZSn3y16612ZYN8EOPeLoozLYSmpZvabHf8WptCaI/Ztl/XNaXdnqRasbAbObnD3wUtkdAqUTm4dZSr5r8xnc26sdBBdZuiSE4AKBlLVKE9bsZe0VAyxwAiFFsIAimIJpWs5nYmjDEwD8QQQhgCn8Y4jcNpiCEQaE4JkDQMPPB5HXlZeFnuKSWVfJV1qeDSLAbDnOX7NVo4NCKWKi0A8GG4iyeyx588iH4XssFesG4/edRtN713+lH3VhpNVDIiBqzZ6HCr/9kmIuWYD2O1ae2/pRYTzsyIKYsoIiD7j22qIAIcZtSB6z0ucoRPG+fuwv16xMmf4cR6iLB6+OnjxXHdG8S6oFhsHgOyGJNq31UAgCTG/4DCxg5FBIvD3A7ZFiwNISzLEuOYFb5+/fr161dEOp0gK85zen19PZ1OQ2QmElURmaaJiO73+z/+8Y/v379//fL2/Hw5n89ca1IBADOHEGzsjIHj8PT8AhzmRe6LyOdvkuE//+c/IEeFBREpQEI7ECbvHJTbget4syO8HpI/XLJurWEvIOomi0uM8XQ6DcMQo8QIWskGAps6p2qqdSQqJVBFtu3go1pAURVV0Sr6FomiVYVFy15eFcJyRPSBBPYQLD+Jpd2W92/51OsAIlCjVwhRUBVXk2KQU5KUUhbJUE4Fj+PIPEUOHEol0hgjM4rIuq7KAwAsyzIvN01pWbJIsmDRaTqfxojE67pKBmYKMarOjaEXa6AWNtPuo6IAS1UFk8WDIDOTCqa0Lsv65cu3JaWcNcY4nk7TNN2XGe9UqL9IatlJiywItfasQduYxs5e1uHMO0vwLhJ6EuQkk/4Ba57FtwEcrcAfk8GPGYcn2g1DWodHknhkMT9Je49AOA7jh8TzvfF3jz2kA1LzvTf8Jxef9d7nOspcH+jVPOuz4+9eR9KqCMAB5r7zhmS/FwjvoeWR7zzsqvu1W/eW2aIbbXMGtsmqc5X5LWPtvegnerSIsKfJUFUGIpJcxuB5rl/Wjf0hqmqJVCxc032GmQHFLNjglse8pV7iP8JrtwmhgKPbmU22PrpQsMbFBeeZFBGpucQItGblxvcwFUAQN3m9gazMXEihnOkKocRkVt3AeIuoioJyMCErq6jWErgAOk0TM0fTzRQtPwgSWpkKVTUjYlE0y6FPAQBLUWN5xlVLme/mL0XcaXoiVqmb2kzHaUKn1tvD3vcN7qBgA6/fXUcOZy/aKy0AwBMC02G816uFg3Zr1yF0w782nqZ7dysOjug0TuBH6C/8rvZ7AKvL3qN4663bDNYshq2NxyneO1rfxt/05DZUWwifzMYesz7toJHdb1YA1U0x8INvEPOeOnTuzYLDsGWeEUkASj53qGYRrWphrxACYil7Qpa6r0jMMQbNJWMdakZAQiJEhcSkhIpIMdD5NF4up3Ecz9OAiKBCKkxAKgoSQAWEGWLgGMYYMAbMeVXVeZ5TWu73JJbVQwvoiMZGlZw4sYNGnTjAgaB7bIHf2XSvXXg86fZIR0xxH6VjiLNnDEpINb9LkX1VMypnFdRdh3ZdTDxakou2TaSqpMhIzByIchZSUFYQ7JOA/WiyHmjdnfeAc4Ttw7f8Vu2+0t56SCiOPeg+SyE6m7Q+kl1Ud1lGuwFUW6UCFJ+ti4ZgAFlyqnwaQVEQuB7/BjZbCwGxIr29fv/1n7+tq3z69DLE6X6/f56/zdf5er1Op3JwjhD//Oc/3263b9++hRBOp9M0nv/6178SFOIJIKSE2Aq3wDAMJfpXi26Qsiw5zynRmtYsSCGE4b4kURrHEdN8BO976/JwOTwMH8GzXyZ/3S0NEVkorcWgm+kNgZlYZM4pSVYGQmIiVrHMl8XB3Xqw3oLlJxWroEAAMM/z7TqnrEkUEUB3pQ7KqV3bdKCAbMJYJ2j6Wf8QRA+n77cANgMEFtdiu2NhAqqaAUkliwByCLGk7rzeVeF0Hj99enl+vowhBkZmNP8JB7JERPe78ngKIUCW6zUuy1IpZ/r27ds6p3xOwzAhMBEyR6KAaTZ1GRgsc4ExK655X2qi9MK5RBgAzKokiGtO9/v97Xp/fX2N43g6nTiGFig0jmNeQRWSCqCIKlheGQDmrRwXbkodUtFHNwpjf6Z9dtzjArXnPe3t0O9ImX/Yjm66j1GCYOc0VtUSo1VfQSz0xpJptzpkXT//k4jX3e82b/fAz9BzOIC3Q+yO8XVwfti5f/i9r3tJr/HND5gR15wX/ld65Jm0T/5evt+eP8704dQAHkPehwrvP9B/y55p8qHU9PuNxR+H0QjjsX+EDVzd7mgXWLUA2O9Ba+JOezaNdKPDbQSm6YhbCdvc6iLc0EXfedeQn1UbWTFZHaDdBKCcd86xotFa/AMi1ON5WZIqct2HZTIECIxOIdzDrh+bqoomIrKac4go9WgWbYlSi2dJYTNbTtOgqv4cFxERBVVlYESEKpfVY6bUlCXvRhCrItDACEUgtrivNn6ttexNkRARrtWrqYY4ojvRV10H2BQVrQGf2RWW0IMdovOXFqhVXGk3uVbdMG8Y1vOKW3YZ5I5Sv0dKbCRtY3T/eqz1f/pDzP7+0cTQtoGncW0AbS0alLQYCPp0z6pqOVG8oggAWVZIorI7rVdXdhOd2+lzD8wO+N2Y2/be/qyCjuGe/0QDNNoeqSjfesMPOVCpHFjLF6ICQkZS0BU0oSiiMlMIGAJH5lTOZRlgIZBMQ7icJ6sKARlVFESIkTkAUa5x2oBZwQwcDADDEOZ5XlcGoBgjIaeUctaFPsG3bzlfk2QEYmao6wuOnpjI3Gbhkfb4pwfm723vsQd0pK8hzAdwLm+BlgQlIJolU6ad7Aih2fWJsirWVEZqeAgAmlURUKNFravJLGjUZINAvfCmnzaM9/Slh+bVbtbd9ZGXP2zdAPAgCnj24fcCHkKJjt/dfchpg6QbIGo8jL2oIKpIACA1G6RKtWcRhjAAgCKoYEJhRTsboQq3eQWA5Xb7x9//x+fP6/Mz//LHP8cYr9fr7Xa73db1Pqd5wXGKMXBQABjH8dOnT+fzWURU8OXl5fvXz8MwIKJZaqqxqViIJAsRzeu6LtnO94LS6/frMJ5UcFnWdi6FGGTdBG4P4feAf7zjCenHi+g7f28Vcl6JiJBEUVVvt9vb21ukATFrFhUzaVvGI5KsRJZoTUIIdh7euJUZmu0EWuCQknz98v23335bV8lJibSk8azDQIBaFdiPlUTXjv4X3HgnUeoRmB/DTWv+KKyG42IMQhRQkZoDQEEEBGFd1/syr6ueJnr+w6eXl5dxCAwqeQWwLDI5qMW+KREwaCREDqqnOARVjUMgorfvr8uy5KwxLtN4nqYzc2AOmmuwgBhRqYKZVpcZbVRUVZHJCtwnzTmn62359nq93mZFNHsEMqWcl2WpyXsVy9F6ASxJp8v3ioVuk/FQSWEXMfHetu3o80PhoVsvL2Y0egLvYOZxff1F6/A4MM932qelZjXvngEAyY+zO8JB5PgAGh//9MPm332XTr6voKIT4LvWQUMendnrrjvyogrHm3AgO25lGY8ao0K3iB4VHo78g/ZQQHoIQy0qZ49mevBwOtFrC5H1cJN6BhvceVefRKNBz3NDP9pyRx8HqXoEkxqYKiK+rGbbNVjl51wTwbT5btK532BtgFjRRfcVBeAQc+iBqE79kH1Wom6beQ2Va+bPuixgc1ORrEDMBKBkiZMtyZbCY9OwBZHuRiKaVEBVhxiLm2vdkn/uUAGLU640TKpWhhFAlcj0AUypTcSgHxDtLFm2w1FYEt5AznmtVKNBmJltgHPK3qGnlaFQLaRhCmHDiev12mIgPeqQC0TUvd/Vf7StQlVfewri7QcNBgBgp7+kRqjmLbvmPnqtti6kE3e5WzY0a9d+wEeC9ZM3/U+wJx9tUm3P+JtNfw4hbIdFAaDRRMiqkHMehxPVg8LebW4ash+Y9RDCUINsbF0AwA7JbLHQdenBSoxALutYK8eX9WrzNSplegUHyjlb+b6Gt4iUUnJh1HWnICAhZFFR8zBpToDCiArEqDRAZIoxnsZhGAZmVjrbZO/LvCxLzmm+3whl4BACxRAAgRAIkFURKUuNNNbMBIGDVRTkBYloHCGO0xBHRFyWtK5rwpd5Xa/Xu6TVTDuwVx46Yufpv0ekIxo8vPkQT9qfbdWO/TSUaDjjft18F2VI2rDOQniLGUUhj4Z+NeQsiQQLLc5ZauFQAbU8ZcHxaaOQQTRVRvXx7I6zODI2/Dlu6pdDtde0u534QZ8/XJHWw/Ervud9P4fHAKBU+wAyzbla8VVVVQQBjGtisVqK6po1AggIUTBRFwVUwbyLb2/Xz9++Z4DL86dPnz6Z0ed8Pqf0La3r2/fXyIEuJyKal2UcRzvxlVJSQDsiC44mtOkY3SBkZr5+ff3n58/fv70hhyekOE6Xy/PbnN7utyVJOE1J6X6/x3fw8yeXsgPmEaS/tzepJ8wBIKX09evXf06B9Ol04mhsSkAyUCDJuq6Z2MIBkoiESIhG/TIimoaDyOsir6+vnz9/vl7vqlGtsAKCGU9FtVlh6zh3sdawpwwdJemn9ggl/WP+eUSEkvDZ/i6GG6MKIpIBIiII5ZznZYk3ut1uy7KEAE/Pz58+vZxPZ5WU18XKRKgWXUs0IWKIpCBrWoxcAJTK9YQciOd5vd+XdclptV+CJUQgd5xJs1gypG38CiZB5awigMQAOQOmJLf7/Pp6vd3u65qnaVIzKwtahp5S1TkMUnaUxQcrFQIo+zPMlodJj3XPOpirE5fbAjxk7v6VtrjdunQI7FU12GfU6Ebl73QdNqtfe+ZIuDa5S7cvvseJuon/rnYExXudfExd24C7Pt+jtE06+qD/PV/umUKVbR6s7HsjAQdtv3ObN/4BYryjoH4MpQ4TjpShm5T/9HG+WBsAyOEIic3IEgr6DluwWIdvTSY8jhBcyGi3F5pK33qzB/jRCqKzbmPVC0x9CD1yNHHzoV2QsInOTSyubHbzzLQJGI3ooKk1dNMPDqsrsj1j5KUm2a+DQZeE1bp6lCzer5DJY5p3Phyo4bNYqJvaYWVE9OX3iGhdWr0+LmcWJaky5MoWFHDzkmHznlWFuYj4FIMd9yqHCQGIUETsAJVu6nH5vCX8JCKE4kq18dtxgrbwfr7eK0UuUsJr3U1v1Oqtbp7GI/Rwb9LQqjh5IvvQI9EhZVuIhvcNPdrAoCMBFeu84mqNDj5h/0DTlh9as9p9twP7fYXYK6scsPnSuRSHLOGvrc8Gli0b0J4/tZtaFMXgN5ePv90urH7aXu1p8aIf1Go/3myNQZNmQCEkAKEAjIEDTkOMjEMMwxDGwDHGGAgRIUTDUkQl0HXN1+vrcrs+PZ3Pp9PAxASaATQLEKGaqRtUkZSoHgFCO4CKihTjEDiIAEWIFDhzcz6XPW/jpw04qj5s57Fg11OqH7WOM4FDv4c/eUzz+PaobwGAZikvuJplhVmUUVyIsigAJBRs5U8IiahImayIGplbAuRCJ0VFBRAV8iaT1YF0s2g320+/Cz5dD/5+xyAfvuVf777ebY02PNz/6anEw1G9N59yrLpQe1QogToUaaM2BMQsGa7X63y/jpGnaTqNk2WuFlQFoMBplde327Ks5xNeLhfgQMTjeHp5eQFVCxAVEdBPMcZ4ucQYx3Gc5xkROQQReX5+9hEcqpIhW5i/qgDouubffvvtt99+E8XL6fz8/Pz8/DzP86+//vr2diMCUPLlixpR7ajlwyXosFT1gajnKU/Hx2GPOR3aW8SHaXSq8vr6+muAgQXhzOcQmbOsCjkEAoC0rtMpIm6hKwAKdWliJCJalvzly9e//e3v3769DsPAteIsAORaeR4REdSkAZt0UwiPmNAIrEddN51yx0MS3wk91R8ZYipjwnVd397e8jK/vb2llDjwOI7MnPICkpkhhABKAEIULE0XgFpAVUqLRWwSkbHuwHQ+n0NIROH6dp/nWRVUMJ/0Mnq7J5ZjKzvuDyIK1WmwahZACyGa5/l6v6eULJRXzGOg2KaPiERYFl8V0Q4YgDbHOwCA1Zo2tySUszN7q9MH+NMphA8JSENFLfrztrJ+1ZrA3cSbBpkjMXy4cODkadhvAT8jv4OqZ3vX/NyPU3gPcz6479/6AMn1/eRS732oo8APv9vm8sH44Z1F9FIf7GW8d0j6dt9Tue6t8gAA/FxEbmttXx/fOtLS46Q6ftQ92SGMn5rnZbDP/9wBze54+rPbQXs0eYjhHe2CR3jbVtPDk8xNcYSa/YZcSIPRbkXQDFZGws+2aXFtB7apigjV3rqZ22NebzGZeIu4M2CZo6zt7aoQoki27Hw54z5Vrl20Y3WmU1X2sxELPwYtya89bbW+c84rIhJZUjhGBDuWUgJYgEqpV7AKKtk7jvwaEJEpVkbrmanpLc31ioiWerRpQUQEusv0bXzFI5O9btAT2Qoc+w2sVe9qE2wMUkr6u4KgVGNQW+d2YcP2qmlbfb+UZiw4muU8thB1B652z/idj87yAfvt18yiD/DW1WnxqO93sh+/5XgwTa8tE+0d4H781qFlW805L8uyLEsbdtsvWGwcuTm9tTpddc+92q+qGgCzwUEBCQl/QN9NTGy9tTVqYQBd44CAZHUEOUQGjSGMMZynIQQehzAGDpFqnJFy5JRUIg1qGYmX9T7f1jUGioSRCQg0i6W/YWbgduJRlFBVUkpJcoyDAlmVoWVZVlEEDiHIuuXdKjYgVd3yWPUNfycD+KAdkcdvlnb/SKB960LIEBEAsVqs2iRUNWdB0SWvW1Io2Tz5xZrDFEKwsoQESkQUg4iQbHYTKRETtNlO3f8fj/8Ay38ZjEdFrnX4w3f9BqRDUIldWPxZx2j1kaDz8fhL5jSuBKGaGmIIOecV1SQPIkopvb4u8/16OY3Pz892wpOZVVFEkXhd59vtljI8nSfmuCyLEo/j+OnlBQFSSt+/rgjfpjFO05SIAMCOhMUYmeLXr18j75ZDVbICABLRNAz3JV2v12/fvi2LPr88/fLLL3/+039S1f/23//93/7t324zjOdhyTmldHl+SddrY7LvQcOTU79S6uSYh/DsQPozSBJCEBGQTEQouizp7e3tdpvOJz5NBEzGKs3s1viLfVw1qwoRMHOySJm8vr7e/v73v//3//bvOcMf//iHsCLzrGgZp3qUawohQnkANxlkp4F4XtAuEDHL4wk+ZEnlU/XrJowKmh60aVCktGa93eb1fpvTKgIhYM757e2NUQPhaYyqWa1CIDEApLSqaogU4ylXkaZJCJoF1AKOWDK9vr7drjPoq2a4jGOdzmY5Mu80Ilrwgg0+IAHTvBYOnnK2hGeqwIFTSsgcAituNbFijAJFKkCtFmuTI8FSrVJVCMGyn1HwyltvPDpCGA4eQieAbUvm8HZHkDukJRfV3727/+hj7QXN3LY/YkNbOWLwnL2IAVxyani5yBO0Hbr+foVwG9gjYenn+/G090g60MmTx43T4IDuLGhHWPyf6FqTo7qV6vYXbOu+G0kBNeFDg47x2d/Fy45U7r3HKkzKF7vFPd7pJt61dkQO9ukDPTlqHXYyNhyx9B1XeSPssvnJHtB/66EFnRn22sACoiXAVARKsnIwHrmmtLIwhQiFxEREBIUki32nFb4jIlTNKSkiY62Mp2rxFHlgy2mIRjkQwOhHFkYLxaz11gglC1Xl0+LfFK0iNqaUeIgqAoDMLEA5ZUSkoHGo5f5UEEGZTG8k5sABADQpKJkqmfLydl2HOBENTEwYVGBZlnG85LzGIcTBKtRnJM05I1nCAJKUpTioqYSuCiEFAASlShCEdChUZZW1nBhhJs7LmvO6yqpque9KYDSxlbAMknPKOYYhxhGRU0oInErqyqSQFUQVENB0M1vRVi0wu1In3kvmNTSpCVeN3Le97c8TMqNI6a0V3ENEO9rOzExRtSixKSWrAW12sscZF0Vb6QxCVLLU4VsGXiJUQBEx4RraTlBVYjjUaWmzUGdraRStEY6qEQuilkoJpAhgOeXrcMAEERNTmt8vBGqHJLUGmRFRqLm87dMtepZqOSlPdqXVEgTJOSPVQ5iKHIb7PA+RVRU0D4RZNUlmxpxKfQFAUKBy8Mm2NwcE4FagXBAVGUnLYTMsAQIIBKIpM3MkRMiASljOksjrMo1wnk7TNE5jHAMHRiKKjIY1gAta3iVrNyVKZ5QhaBhIl+G+hmXWdfp0WxFSGodEMSWlBAMrBVYMCAo5l42nsKqkdVkBAJBEACkMjBk0Zf0jTjxKDusKkjhm5BJkIkCoNQEOGhwUCFxZyI7dHhmJZ3Xeb9xiJI7MtTGbrufjA+VdrkHaFtyvYOHsMY4KCVTXDMw4hCkQA+op5pTSuqSelSIqBUJKyEAkqip6vc/LkmxVsiIRxTBmXdGKc1i0EoL9Wwbp0klrNUAIbGi5jdw2QA0raJqe1AMz7TGt1jTeV2emGpFlKbL8PrULAQiws6dgjdD2LMoPLO8r6MA+wTe6ww9Y0moj2mEAC2LTjIgIykMMkUIIeV0sCmMaY4gUkO55JcAhMoHklGCKxPDyh2cEvc63+zpPt+l8Pht9k6x/+9vfXr/N0wSXpyEOMsRMpCktHGE6j5f5KeU8p/yPz68hLOfh+8vLS7rNSCHHPI56HqfbdSWiXKY7hBCAeUmyruua5j/88Y/r8grxAnRbIQwv/+lG4//z//5/v37++nUNEGEGBoQxcp6vfvnauiCT5CoBFEMyKkAGZS5HUs3oaKlTdwivYPHL1mlzue6MelgQrCEVEgFhRuA0Y0kTRYlYib9luf39cxqG0x9+UVUMxAJMeHt9DSrL5++r5DiG23Jb0jxMwzov99sbiiLy29vbl2+vGML/+X/9H9PTs4Au39I//+/PMQhkiTCQEpARas4MGQVFzc8WneB+DA/xpcOaxRZNSdUmypVj2wZMgGJnabYDACDNgIBgFWMCqFLdd2OMktYkEgKtyF8SDDwOjEQ6xvDt2/X6ens+TQQ6Bz5NEzMLo6xIPCGeFHS+ZUgJBAMEBAQBkUQCCjhMU84aYuDxjOP06+evX+blKm9hxGkap2EUyBkyMQCqaAKUlDNTVGLJFnRA621lPInq9T5/vcqbPH1HWmAdkAnygIqYAlIIxIo56/2+8BhTWiUlUlVFAlYAQhYRsI2pikSBubCMepSmmbmPhHpPXY00NdIKquI1f7XkLZusvKNUjdS3g/3eYtXoTCc0g2wjLKiuamcfEBEe2apyzpZBEhDL2R4zQ6zZTVaJNvHa00NEBEBVaLbadnyLaqhak4h0X0quo4d+VI0kostWcgTyHuA9o/SU1n5tKfE80Pwi+nmpKmLTn80zIapgiTJEdiG1lvnJ+jBbkgjahaVnz3ltvGZj4rDx3DZaEUHVDGtza2k1feKDkOYyWXYe3e6nDRkcFlFxQmnL7oHFbS4meLauDHo5acsaaD8ZP7WAEe+SsX/ZGWtbwhithTaMGuf2bVXCbWlMGjdcMrWrQAukZRm0DQmqIigZADbSlytrxmbmUmipETccOl7YjjVYtBLq5PIFU4WgVdcp0AFtxBegJHnb4eV+teBDPPZ7YNdJpQsiIlqygKKzTIATOxCgpTJ0UysySpGAxRKZCMJ2tAxAJGVLkEgUQJUZa22MZksBVWXitksVW7LNMiQCUrWsgzuVvQo9LZpRcs5QLWFtbA3twFM3RwXa1I5I37Wmz/u3/LXUGFG/iA3srZm7w0RDIoJ9wc1u+bCZdP8XN6ymkf2dHUGx8dtG8nDLOVeN8YEm4A2THseaYVL38hY8WJeyWAWwKkA1F24lBN5/8sHgHSbbMLT9GEJEkJRXOyUYCCMTEfzxz88hhGkYhyEMMUbCwEgEklLhj4WkQvNkKlmOJw2BYtQYZU15nmeOPOTGVzYXZYMPuqEW+m4Oz5LIUUEl73Uwu98mtQGtJnb7X40/R0LUSIR/rOOI+1cQjd+LcFWcymFgAnZfAbdPBVRKfDuCmU5UEUmz5JxFQWoi04KZrZ+qCuIhNGv7xAfV1n4Ejf3S/O7WIe1uCh9aaj2E/7VPt05awAVY8UAzn0k2TjEMw2kK4xgR1Lz9b29v9/s957yua04yz/OnT9Of/vTL8+XJIvnneY5hICI7Ligi+na93W7fv8/3CPM8Xy6XEMdhGMZxBICUJMYohVXWg4tZU0rzcl/W9bevX9/e3kxS/PXXX//Hr79ajfsmQOScO1NIB71mudst2R6SH4Do+Izfzt19tyvtAUM8O5ZZHElfvnz59RQZEqlS1u9xePv+FpAw5zmtwxSXvCRZ4xjXeUnr/OnpGUBPp9Pl+WU4ncfLUzydKfA/b3+L0eRmqBlbdubzI719bwpHsvzw/gddfYC0G/yrmmHoMTG/vb6+vb0hwPnTy8vLC0gGybfbzR5k5jhM4zhSYGZWuTdK3j5a+U6BvTGp+31Zl3SdCEAJsBoGxWMCVMXJlF4AuN/vq+r9Pi/LuqbcVKYYY2BgZqrFbomQ4VHVy0Or2PiRvPG/oXnaAgfE2C03bmjfrb5URg4Of8pjTtZrz2dX0hke+XY+aD+PeN0c++n8dG+w39G6t8c18vJxV4+YY/fnfzwatG53fAHg5zlbeX1/2t/36f99j/r95IcaznQsz4/k41Ye3v4pzdtq/VeaiC6y3ScXi9e+6RZoM6m0/otCSLQbt28VxUtgPdV6Cf6gZ1MILQGGn1I9Brit3Cap1D8b4SMicyYV1e4RrGGPvv2vAOAkobyvwM5Ea358tCylVB5PKmIuU68O2eFJU/CoqfLFEqkbxHf7kzbksLIWSiqSAQVAjLERB9yONW5BrV4h9AEJ3R62zrMrbPBDwuRx3bOcZnho3j+LmN2m6XqojI98gGvrc3MdVAZmTUSQd9LwwxE+HPMHD+9oxIe2MajY0Z70DkbY8H/XuVZHPBGlvBmKwJnoOmCavxHq94pNvfYmJSRVcs4IoloPaqqQbrG+fmN3IRPbyDcdU6vIqCZngAJzYAox8jjEKYYQ6RTNHxjMMciobNpHjYFuIFDNGZSFBQVAmTkADgPGmGnW+/3OMZwuAczOVDdFtiJXZVEOMqulIEJz0G2n7I6WM0QzHvzHsJbjlv8hnvj19Vuv72czXravIJafytSK0QQJUBG20+RYjQIikmrIulYqR0RqtQxFchbzNTNzMCr7qPJqW8BuRrpXFD1z+CHEPP00dHv4/Hvbs2MHH4Ddszf/Lj5KWvbBF/1jtgeHYQAAK/hTj1pL40shhHEcn57OhJBSul6vlu7fhnE5jwDw8vT0/PxkyYGo1l5CxGEYXl7COI5xevvtt99SessZrtc55xyHyYVtEwBg4FaeBKH4AQAVEL+9vYngMAxrSr/++uv3tzeMw5rEO3htd+uPsNcT9p9ZF88uAX6AEv67qiqq7CStAnKQnOV2u339+pUhRSLKuhBfX69THALAbZkp4DiNpzCNpxFEA+Pz+bIsiZmn8wVDvK0p5YxMjbQSkcp2VvBIoivmv6vZNrLZXlFVeoSWniM8/BXKvtg6b+MMzESgWYjocrk8Twiqy+1KiM/Pz3/5y18CIar8j3/8I6V0v1/f3t443C+Xy+lyDiFwZd/eRUMWMqAgkudlnefZ7BqCervdiDAQc0DVTDWkQkTWdQWlDLjMCQCRQkrpfk8J4Dov9yUnQKj4Pw1MkJmQAaXktQZmyip+4h02bfxHxFhWE9T/N7eG9p0woE68hIYGuOG//k4dpvVjfx4jGrxz7+Hrxx36wcPwDs30w/gY4B3+615R8Y/59nFvbSKeOxyf+Z9pHalBJ9bunCK/R0BQVdMsus6PDMXt8Z3lt1t9/7DHwBavh672rB6cNw9H6H8t4T+g4lghVNUDAFqG3zaFFnOL2KvQHphH8cxaaCM4qhBtnlo9hCYAFVIqbogNfJXSNoBudE136CsicKC56AQOv088xLsNT7RF2xdEccJlWex9RLifURNezSsoIjmrSCYCBDuSZ53vwELYXEPWv42KtJYs71ZOVUIMlnuGCBWySKlAGCK1h4lyHUxvmHvIk8Cpi03ubLP2JO8I5yaCkytyiIiqufE2G4m4BCrtPtXWPlHFrK2+SvmWbr/CR/LGD9pxK6ILC/wAVq093BjZ1SHE6vBEVB/FAY6MekRqFxbzUPniFsIhImsVLrvxmzdVRAg3Ap1TikjNCNTIytHjusHfeSyxJURCVMhMeJpOl/NwOk1DxCEQM0O+EiCWbZxFBZHqESexcGhARcSs1NRDG7UF01phiGVZxgSqTGQZcwlKAtVM9IA3GIqiUxS7CTbMxHrWCDy1aR7C359d84d3js3Ta//88V3HJ3C7VwLpkd2J3CxZQTLmbuNAo3WFrKoogPlUiSwRpdUUY2YkFkRRlVxzKe9NnjUH4G7Mxz3SfvoYmm5RNgr/wfMfwNZ39cPH/LD9WnQ//XAkIkIAwzCo6rpkVaWqomfNKluuYCIaYhjHMYRgOaJtjc6nS0opECHCcp9VNY5jjHG+L/bMMAQiymAVUDnPr6pq5WQt3bRI3a0yUMvtBmWPz8s9xBhjHKdxXtLr97dV1pQSWBVgbiE9CGBw80wZGw30iNSA08EIEeEAyYcavn/P9+y/CLaN8cGqbcBHDSEwASkw8zAML+dzuF1PlylOEUiREVCHIUzThLh2y4qI67quKwAmH6RCVselMN8HZpr3sKLb16pbvVc/8g3bP6QVeJQXi51OswgRTdO0rq9tUUyLm86n0zggwOvr6zzfrter6A0AwhCZ2bKw+rmgRTwhpiTzPL++XUuiGubA0Y422MBzzpbkGQDWdbX69auoKYTEduJdMuK8ppQBQgwhhCGez6dAipoQ1IpPQ5Vz4CAWNgLu72iVhboQoAbVj3fr/3zbEdKDKtWEMfvT8+LuyYYfnmWXbg98DasBovXssQj2+6hrHX95yFne66FhrMfnTsB4bwBaLdTgzmEed9B7I++2Z7sEB09sB+QedvET7bgB0ak3sq8C8kHzE9HCXqVJvK3DtkPbt9oAvC/kIUHobnY4g1XZ8fXboZqQ+kV0/RSMNVkIN6dKN2w/wYa5bQp4sOA8BJF//f8H6J2UQtiM4xEAAAAASUVORK5CYII=\n", + "text/plain": [ + "" + ] + }, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from PIL import Image\n", + "Image.open('./mhp_extension/demo/demo.jpg')" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAABLAAAAOECAMAAACGszjIAAADAFBMVEUAAACAAAAAgACAgAAAAICAAIAAgICAgIBAAADAAABAgADAgABAAIDAAIBAgIDAgIAAQACAQAAAwACAwAAAQICAQIAAwICAwIBAQADAQABAwADAwABAQIDAQIBAwIDAwIAAAECAAEAAgECAgEAAAMCAAMAAgMCAgMBAAEDAAEBAgEDAgEBAAMDAAMBAgMDAgMAAQECAQEAAwECAwEAAQMCAQMAAwMCAwMBAQEDAQEBAwEDAwEBAQMDAQMBAwMDAwMAgAACgAAAggACggAAgAICgAIAggICggIBgAADgAABggADggABgAIDgAIBggIDggIAgQACgQAAgwACgwAAgQICgQIAgwICgwIBgQADgQABgwADgwABgQIDgQIBgwIDgwIAgAECgAEAggECggEAgAMCgAMAggMCggMBgAEDgAEBggEDggEBgAMDgAMBggMDggMAgQECgQEAgwECgwEAgQMCgQMAgwMCgwMBgQEDgQEBgwEDgwEBgQMDgQMBgwMDgwMAAIACAIAAAoACAoAAAIICAIIAAoICAoIBAIADAIABAoADAoABAIIDAIIBAoIDAoIAAYACAYAAA4ACA4AAAYICAYIAA4ICA4IBAYADAYABA4ADA4ABAYIDAYIBA4IDA4IAAIECAIEAAoECAoEAAIMCAIMAAoMCAoMBAIEDAIEBAoEDAoEBAIMDAIMBAoMDAoMAAYECAYEAA4ECA4EAAYMCAYMAA4MCA4MBAYEDAYEBA4EDA4EBAYMDAYMBA4MDA4MAgIACgIAAgoACgoAAgIICgIIAgoICgoIBgIADgIABgoADgoABgIIDgIIBgoIDgoIAgYACgYAAg4ACg4AAgYICgYIAg4ICg4IBgYADgYABg4ADg4ABgYIDgYIBg4IDg4IAgIECgIEAgoECgoEAgIMCgIMAgoMCgoMBgIEDgIEBgoEDgoEBgIMDgIMBgoMDgoMAgYECgYEAg4ECg4EAgYMCgYMAg4MCg4MBgYEDgYEBg4EDg4EBgYMDgYMBg4MDg4MCa7rFGAAA5WElEQVR4nO3d2ZbkSI5lUYvy/P9v7jZTG3SgkjJc4ELIsx+r1YUkSJwVVR2Z/vEBAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADf/98N9IwCw5//ec98aANzZqRXpAlBIa61oFgCzvlzRLAA2A7kiWgAchnNFswCkmqsVyQKQRpArkgUghahXFAtANFmuKBaAYMpeUSwAgbS5olgA4sh7RbFS/Pf/ue8ByBbQK4IV6L933DcGJIjoFcWK8LZUVAuXEdMriqXWVCuihZOL6hXFkurJFcnCacX1imLp9OaKZOGkCFZ9I7kiWTijyF5RLInRXJGsi/r3w30jAWJ7RbDmzeTqgsn63wP33aT7t8F9T0rBvaJYs2ZzdaFk/W+b+7YSbdXqZM0iWKUpcnWFZL1p1aWi9b5WJ0pWeK8o1gxVr05erKNcXSFaB7k6S7IIVmG6XJ07WW29OnO0Gmp1jmQl9IpiDdLm6rzJ6sjVGZvV2qpzFItglaXv1TmL1durMzWrL1YnSFZKryjWiIhenTFZI706SbNGcrV4sQhWVUG9Ol2xRnu1frIGc7V0sZJ6RbC6hfXqZMma6NXiyRrv1cLFygoWxeoU2asTFWsuV0sna6ZXyyYrrVcEq0tsrs5TrPlerZqsyVytWqy8YFGsHuHBOkexJL1aMlnzvVqzWASrpPhenSJZql4tWCxFsBYsVmKvKFa7lF6tXyxdr5YrlqRXCxaLYJWUFKzFi6Xs1WLFEvVqvWSlBotiNcrq1eLF0gZrpWLperVYsXJ7RbDa5PVq6WKJe7VQsZS9WqtYBKuizGCtWyx5r5YplrZXSxUrOVgUq0Vqr5YtVkCvrhqshYpFsOpJ7tWixYro1SLFkvdqnWJl94pgNUgP1pLFum6wAnq1TLEIVj35vVqxWDG9WqJYIcFapFjpwaJYhxzBWq5YUb1aoFgxvVqkWASrHEuvlivWdYMV1as1ikWwqjH1arFgxfWqerHierVEsQhWMa5eLVasyGDVLlZksBYoFsGqxderpYoV2qvSxQrtFcEiWJ2cwVqoWMHBqlus2F7VL1Z+rwjWHmuvCBbBcj/fEUOwKNZ73l6tU6zoXpUtVnSvyheLYJVCsNoQLIJFsPzcvVqlWPG9Khqs+F5VLxbBqsSdK4JVvFgZwapdLIJViLtWn9wzaHLVYKX0imARrEbuWH1xD6FBRq8uHKzSxSJYdbhTdeOeQoOrBiupVwSLYDVxp+qbewzHCBbBIlh+7lJ9c4/hWEqw6hUrq1eli0WwynCH6pd7EEdyekWwSiJYZbg79cs9iCMEi2ARLDt3pv64J3GEYF24WASrCnem7rhHceCiwUrsFcGiV0fckbrnnsW+pF4RrJIIVhHuSN1zz2IfwbpysQzBolhb3JG6557FPoJFsCiWm7tRD9zD2EewCBbBcnM36pF7GnuyelWuWATrkyVYFOuFO1GP3NPYc9VgpfaKYBGsXe5CPXGPYw/BIlgEy81dqCfucewhWASLYLm5C/XEPY49BItgESw3d6GeuMexh2ARLHrl5i7UE/c49hCsSweLf3O0BHehnrjHsYdgESyC5eYu1BP3OPYQLIJFsNzchXriHscegkWwCJabu1BP3OPYkdcrglUSvarAXagn7nHsIVjXDpahWO4nLshdqCfucezJC9aVi+V+2B0EqwB3oZ64x7GHYBEsguXmLtQT9zj2ECyCRa/c3IV64h7HHoJ18WClF8v9vBW5C/XEPY49BItgESw3d6GeuMexh2BdvFfZwXI/bknuQj1yT2PXVXtFsH4QLD93oh65p7GLYBEsguXmTtQj9zR2XTZY/M3P3wiWnztRj9zT2EWwrh6s5GK5n7Ykd6IeuIexj2ARLHpl547UPfcs9hGsq/eKYBXgjtQ99yz2XbZXicVyP+gBelWAu1J33KM4QLCuHqzMYrkftSx3pf64J3HkusFKK5b7OY8QLD93pv64J3GEYBEsemXnztQf9ySOXDhYScVyP+UhguXnztQf9ySOXDlYOcVyP+QxemXnztQv9yAOZQXrssVyP2IDemXn7tQv9yCOXTpYCcVyP2EDemXn7tQv9yCOXTtY8cVyP2ADcuXnDtUP9xyOXTxY0cVyP14TcmXnDtU39xgaXD1YwcVyP10TemXnLtU39xgaXD5YscVyP1wTeuXnTtUX9xBaXL5XscVyP1sbgmXnbtUX9xBaEKzIYrmfrBG9snO36pN7Bk0IFsEiWH7uWH1yz6AJwSJYBKsAd60W6RXB+nT1YEUWy/1oq3DnapFeEaxPV+8VwSqAXjWhV5+uHqzAYrmfbBn0qg3B+ogKlvupehAsO3rVhGB9unywPoKa5X6ohdCrJgTrE736iCmW+5lWQq9a0KtPBOsLwXIiWC0I1hd69YVeORGsBgTrC8H6QrCcCFYLevWFXn0hWE4EqwHB+kKvvtArJ3rVgF59oVc3BMuJYDUgWF8I1hd6ZUWwjtGrL/TqhmA5EawGBOsLwbqhV04EqwG5+kSwbgiWE8FqQK++EKwbguVEsBrQqy/06oZgGRGsFvTqE8H6RrB8CFYLgvWFYN0QLCOC1YJefaJX3yiWEcFqQa8+EaxvBMuIYLUgWB8E65ckWBRrDMFqQa8+EawbTbAo1iCC1YBefRCsHwTLil41IFif6NUnUa8o1iiC1YBefRCsG4LlRrCOEaxPBOtD+V/Z4H6SVRGsBvTqg2B98N+JVQHBakGwPuaL5b7/ecpgUaxBBKsBvfogWPxFFCUQrBb0iv+dUNwrgjWGXrUgWB+X/0csdbAo1hCC1YReXT1Y8l4RrCEEqwnBuvr/TqgPFsUaQa/a0CvBP2It3KyAXlGsEQSrEcG68l+qGtIrijWAYDUiWPK/t36dbgX1imD1I1gtpmt1gmBF1Oq3WbW7FdUritWPXrW4fLDianVXrariekWxuhGsBopeLRys+FrVblZksEhWJ4LVQBKsVZOVlauyzYrtFcXqQ68aiIK1YrFSc1WyWdG9IlldCNYxVa/WK1Z+rsolK6FXNKsDvTp21WB5clUrWUm9olmtCNYhXa+WKpYvV4WSldgrmtWEXh26YrC8tapSrORc0awG9OqIsleLFMsdqxv3FCy9IloH6NURbbDqF8vdqT/WMdhqRbN20asD4l4VL5a7UU9sczDn6sb29JXRqwNXCpa7Txssc3CX6o7l+SsjWPvkvapbLHebtuXPwd2oF/kjKIxe7QroVc1gubu0I3UO7ji9kzqEyujVnoheVSxWYn1G/kwWd5WOpA2irtBeLR+smF6VC5a8SnvxGfxj8dw1apUwisII1o6gXhUrljZIx+UZ/oOR3BXqFDuMygjWe2G9KtQsaYxauzP1hwO48zMkaBbVEax3QnNVpFnCEPVEZ/oAJXd4ZohHsQKC9UZ8r9zJ0vRnqDeSQwTcwREQTWIZBGtTSq6cyRKUZ6Y1soMmuFOjMv0tLIVevUqrla1Yil7MdUZ30hB3ZMQkH8UaCNaT1FpZkqVoxXRmlGf1cuclhO77qI1g3cuvVXqyNKWYj4z0sA7usMSRfiZlEaw/plxlNkuTiSnaW+l7fHdTYqk/lpII1jdnrbKSpWnEJPHddDy+Oyjx9J9MPQTrk7tW3wKfcPQ/EyP3d0fi8w64Y5Ij5NsphV59lOnVp5gHFMVBQn1TTQNwhyRNzPdj91cTglUpV9+0zycKg4r8xo4n4K5IJu23U8RdTq4eLHeb3lE9nyYJklN+DtPeW0Ow3A3Jpfpy6njoyaWD5a7SvvnnExZBdNS/h77oT9zgDki6+e+mkOegXLlX7iAdm3o8UQ3+qf+ZKOAW98bgzofB1GdTyWZRLhosd4tajT6fqgWPPdCellEsdzwsRj+aUt4W5YLBcleoy8DzqTrwkgPxcfHBcqfDZOCbKWanKBcLlrs/A3ofUZWB1xyIj9Pd6ZtJuMPh0vvFVLNflAsFy52eQV3PKIvARg3Ex0UXy90Nn/7VqOO4KFcJlrs7E9ofUpaArRiIjwsOlrsaRmMLUkFLUGKCVaxY7uTManxMWQE2YyA+Tnm/r6NwR8NpfE+sWnty+l65ayPR8qCy/d9ugfg46Q0/j8LdDK/JhbFoL8q5e+UOjczxo+rWf7sE6vOkt/x4rrsYZtNbk60vKcHBcmbMXRmpg2fVLf+bDsgPDAuWOxh2it3J05uU2GC9/X8I5w6M3t7T6nb/TQUighVULHcv7CT7k6Q/KQHF2j07Zw7uuIR4/7i6zd9ugOoScbf9d6g7F36qJYo3kJSeP9V/eMvlI7jTEuXN4woXf6MAwmsE3vfPme5aVKBcpUBjTXEEKzhZ7qwE2nxe5d4/77/2KoE3/nOmOxYVKJcpynhU5MVqPDpsFO6oRNp6XuXav+y/+jJxd3470t2KGuQ7pTYVFXWx2k+OGIU7KcFeH1i69U/rH3CZuFv/OtKdihoiFktoMipjJ7Sd3Xkf89xBCff8wNqlf9z+kOuE3fvnie5SFCFfK6XpqAwf0nJ2/61MceckweMDa3f+cftjLhR38/Tql3SppBRRmTjn8OyhmxnmjkmK+wcWr/z98j8KOVp46O1kdyfK0K2Ulqgqk2ftHT16N0PcKclx98Dqjb/b/UcxRytP/f/clShEtFBawqwIzntz8sT9dHOXJMvfE4s3/s7TaGPOlp5Kr+4J1klM2xXNkRsHT91QH3dH8vw8sXbhHzyONuhs7bHuRpQyu0xi8q7ojp36d7ymhuLOSJ7vB9bu+6PH0QadLT3VnYhapjZJTdOVzTrIz1XcVBt3RTLdnli678/uRxt1tvRQdyKKGV8kNUVTdtqgPVd2W4fcDUn19cTSdX9xP9uos6WHugtRzOAaqc3n5DgNymOV97XLnZBkX48s3fcXf7MNO1p5pjsQ5YxskdpsShrDoDxWe2dvuQNiodz3DT+zXeNkdx/K6d4hucmOdHRBeKr61t5wt8NCue8bfma7xsnuPpTTuUFqcw3pzoLs1IB72+BOh4dy3zd8zzbuZOXR7jwU1LNAalP9GKuC6NC5Y1qr5U6HiXDht9xmG3Yw/4AVq7syMhNL31IF+VVl9/7R+M9Z7nC4KDd+w224YQcTrFgjqVGYWvnjJgRcWHr3DRNyd8NGufEbbtMNO5hgBRvKzbTpjR8PguTIyBv85u6GjXLjN9ymG3YwwQo21JtJ0+s+FYTRE7V/jdjRjNzZ8FFu/IbbeKPO5V8bjTZcnWHzyz7Zg8ETxX/v4cGU3Nnwka78q9t4o87lH7DCDYdnkGDXJ3sweKD6L2rdH5M7Gz7Kld9wG+8S57rTUNNMfPopNn06B2MHyv+i1r1bdFfDSLrzr27zXeJcdxpqmslPN8Wez+dg7DyClUO6869u8w06V3ukOw1FzSWoi2LNFTkYOU78LzYc3KK7Gk7arX92m2/Qudoj3WUoaq5BPSRbrujB/IHBN+huhpV26599TzjmXOmJ7jBUNd2hVpIl1wRh+rzgG3Q3w0q69S++JxxzrvREdxjKmk9RE82Oa4Iwf17sDbqbYSXd+hffEw45V3uiuwtlSXJ0RLPgoh6UCdabO3Qnw0u79s9+ZhxxrvZEdxfK0hRpn2i/NT0Yvp+AB9q8O3cyvLRr/+xnxhHHao90d6EuTZP2qNZb04PxG9I/0ebduZNhpt37Jz8zjjhWeqK7CoWpsvSear01PRi/o4An2ro5dzHMpHv/4mfIAadKT3RXoTBVlt6SbbcoCMP3FPFMr7fmDoabdO9f/Ew54FTpie4qFKYrk7ANgUEYv6uIh3q9M3cw7KSL/+xnygGnKg90R6E0XZtUZRBS3lbIU73cmbsXdsrFf/Ez5YBDlSe6m1CaLk6qMigJ7yvkqV5uzN0LO+Xiv/iZcsChyhPdTShNFydNF8R0dxbzWM/35e6FnXLxX/xMOeBQ4YHuJNQmzJOgCnK6Wwt6rqf7cvfCT7j5L36mrD9TeaI7CcUpCzUbhQCymwt6sKfbcufCT7n6L37GLD9SeCDB2qdM1FwSomhuL+rJHm7KXYsClKv/4mfO8hN1B9KrA+JMBa31FMnthT3Z/U25a1GAcPVf/cxZfqLuQIJ1QB6qkK2eo7jBsEe7vyd3LQoQrv6G7zmrzxMGy92D8uSliljqWYJbjHu2u5PdtahAt/sb1H35UB9IsA7pY6Xf6VmCm4x7uL+D3a0oQbf7W8R9+XlzsgPdNVhAQK7KBWs7WaMHhN2buxUlyHZ/E8Fa3iV69d9msgb/eNituVtRgmz3N2n78vtJqA4kWA2u0avZYoU+Hr36I9v9bbWD5W7BEi4crI47jX0+evVLtftvhARLdR7BanGRXk0WK/wB3aEoQ7b9mwjW+i7Sq6l/uSH+Cd2dKEO2/ZsI1vquEqyZf8QiWGlk27+tcrDcJVjEVXo1UyyClUa1/W8oCyPuFcFqdJVeTRSLYKWRrf82grW+y/RqvFgEK41s/bfpgyU67B/BanWhYI3+21gEK41u/zcVDpa7A8u4UK9G/xGLYKWR7f82ZWQIlseFejVaLIKVRrb/2+TB0hz1yd2BdVw9WMc3TrDy6AqwhWCdwIV69e6/5b3nT4XclzsTdegKsOVDWCyCZXKlYI0Vi2Dl0RVgy89LlJ2lOOjGnYF1XCpYQ/91fgQrj64AW37fouqo+XO+uSuwkEv1aqhYCQ/q7kQZsgJsUgdr/pgf7gqs5FrBGvg/vBOsPLoEbCJYJ3CtXg0Ui2Dl0SVgE8E6gasFq7tYBCuPLgGbfl+j5iDFLd24I7CSq/Wqu1gEK48uAZsI1hlcrVdTxYq5IXcnytAlYJMqWKLs/XE3YCnXC1bnf0gn4VndoahCl4BtBOsELtirvn+7IeFp3aGoQpeAbUWD5U7AWq4YrK5iZTytuxRFyBLwhiZY6l4RrD4X7FVXsQhWGl0D3iBYJ3DJYHUUi2Cl0TXgDUVrPhSHPHAXYDGX7NV/Hf+fhRnP605FEboIbCNYJ3DVYDUXi2Cl0UXgjfnY/H4KsnsiWJ0u2qvmYhGsNLoIvEGwTuCywWosVsoDu1NRgy4C7xCs9V22V43FIlhpdBF4RxYs2R0RrF4XDlZ3scJuxJ2KGoQVeGO2Nr9fgu6WCFanC/eqrVgEK4uwAm8QrBO4crBaikWwsggr8AbBOoEr96qlWAQri7AC74iCpbshetXt2sE6LhbByiLMwDuaYOnuh2D1u3avjotFsLIoO/AGwVofwdp/vpSHdreiBGUH3iBYJ3DtXh0VK+ep3a0oQdmBNyTB0t3OP4I14OrB2i8WwcojLcGmyeIQrAqu3qvdYiU9tjsVNUhLsIlgnQDBel+stMd2t6IEaQk2KYIlvJ1/BGvE5Xv1rliJz+1uRQnaFGwpFyz37i+JYDUGi/+dMJY0BZsI1hkQLP8/YrlbUYI0BZs+poo1mbst7t1fEr36r7FYgdd3x6ICaQo2EaxTIFgEqwJpCjZVC5Z78xdFsP5rK1bg5d2xqECZgjemikWwiiBYnwiWmzIFb8wHS3o77s1fFMG6sT65uxYFSFuw7edNjv9h5d24F39ZBOuGYHkpY7BtJlgTqXvDvffLIljfCJaVMgbbCNYpEKwfBMtJGYNtv69y+M8Kb8a99usiWD8IlpMwBm8QrFMgWL8IlpEwBm9MBGu8dG+4t35hBOuP7cndtShAF4O3CNYpEKw/BMtGF4O3hoM1Xro33Du/NIJ1j2CZyGLw3u+7HP2Dsjtx7/zSCNYDguUhi8GOwfAMh+4d98qvjWA9IlgWqhjsIlgnQLCeOJ7cnQs/VQx2zQVLdhvulV8bwXpGsAxkNdhFsNZHsF4QrHyyGuwqESz3xi+OYL3Kf3J3L+xUNdg3Up7fT0B1E+6NXxzBekWw0qlqcIBgLY9gbch/cHcw3FQ1ODCQHnWvCNYcgrXB8ODuYpjJcrCvQLDcC786grWFYCVT5eDAeLBkt+Be+NURrE35D+5OhpesB/sI1vII1iaClUvWgwP99SFYtRCsbfkP7m6GlawHB0aDpbsD98KvjmC9QbAy6YKwj2CtjmC9k/7g7mg46YKw76O7P+JeEaxZBOsNgpVIWIRd3cFS/wMWwZpFsN4gWImERdj180I7/4DwDtz7vjyC9U76g7urYSQswi6CtTyC9Q7ByiMswq7fN9r1B5R34N735RGstwhWGmUS9vQGi3/AKodgvUWw0giTsItgLY9gvUewsgiTsKszWH11a+Fe9/URrPeyH9ydDR9hEnYRrPURrPcIVhJhEvZ1Fauvbk3c234CBOs9gpVEmIR9PcHq/MexJu5tPwGCtSP3yd3Z8BEmYR/BWh7B2kGwcgiTsK8jQh8dv23m3vYzIFjvEawUwiIc6KgQwSqKYL2X++jubtgIi3BgJFjK67uX/QwI1nsEK4UyCfvuvubmXyqv7172MyBY7yU/uzscLsok7CNY6yNYO3If3h0OF2US9j18z8//gzc/VF7fvexnQLB2EKwMyiTsOv7Gt34pvAH3sp8CwXqPYGUQFmFfy1f++jvhDbh3/RQI1nu5T+8Oh4uwCPuaP3WCVRjBeo9gZRAWYddYr/hLc4ohWO/lPr47HC66IOwjWKdAsN5Lfnx3OUx0Qdg12iuCVQvBeo9gZdAFYRfBOgeC9V7y87vLYaILwq7hYOmK5d51r48P0TEE6x2ClUHWg30Ey4xghUsegLscJrIe7PMHS7Kvy/ogWOEIVgZVDw4QLC9VsJqLFb6v9WQPwJ0OD1UPDhAsL4IVj2BlUPXgAMGy+iBY8QhWBlUPDhAsK4KVgGBlUPXgAMGy+pAVi2C9lT4AdzssVD04QLCsCFYGgpVA1YN9470iWAIfBCsDwYqnysEBgmUlDFZrsRL2tRyCFU+VgwMEy4pgpUgfgLseBqocHCBYVgQrBcGKp8rBAYJlRbBSEKx4qhwcIFhWBCvFwQACxuLORz5VDg4QLCuClWJvADGTcecjnyoHBwiW04cyWI3FUq7lMt4NIG407n6kU+XgAMFyIlhJticQORt3P9KpcrBvolcEax7BSrI1geDZuAOSTZWDfQTL6meU2tMI1ovHCaQMxx2QbKoc7Bsq1TfVPWi2dUk/o9Setk+4k8to/qaF13QHJJsqB/u6K3VHdQ+abV3Szyi1p6Xt5Crav2nlVd0FSabKwb7eSN1T3YNmW5f0O0vtaWk7uYaOb1p5WXdBkqlysK8vUY9EtyDZ1TX9zVJ8XNZOrqDrm1Ze2F2QZKIcHOh6m09EtyDZ1TXdDVN8XNJOFjb2TUtvwZ2QXKIc7Bt7q99E96BY1UXdT1N8XNJOljX4TUvvwZ2QXKIc7Bt8rTeiexBs6qrupyk+Lmknixr+pqV34U5ILlEO9g2/2E+iexBs6qrupyk+Lmkn65n6pAnWOFEO9k29XNE9CDZ1VQ/jFB+Xs5OFTH3LIcNxJySXKAf7pl6u6B7mF3VZD+MUH5ezk2VMfclRw3E3JJUoB/umXq7oHuYXdVkP4xQfl7STRUx9yGHTcTcklSgH+6bereYW5vd0XY/zFB+XspJVTH3IYdNxNySVJgcHpt6t5hYEe7+sx3mKj0tZySKmvuO46bgbkkqTgwNT71ZzC4K9X9bjPMXHpaxkEVPfcdx03A1JpcnBgal3q7kFwd4v63Ge4uNSVrKGqc84cDruhqTS5ODA1LvV3IJg75f1OE/taTkrWcLUVxw5HXdDUmlycGDq3UruQLH3y3oaqPa0lJUsYeorjhyPuyGpJDk4NPFmNTcgWfxVPU1Ue1rGRpYw8Q0Hj8fdkFSaHhwaf7Oa60sWf1VPE9WelrGRJYx/wtHjcTcklaYHLcZerOjiksVf1uNIpYelbGQJY99vxnjcDUklCkKr3vequq5k75f1OFPpYSkbWULvp5s4HndEMqmKMG7vtaquIVn7dT0OVXpYzkZWMFimjPm4I5JJlYTaJGu/LoIlQLBKcKckh2Tt10WwFAhWBe6U5JCs/boIlgLBqsCdkhSSrV+YslcEi2A5uVuSQrH0KyNYCtpg8bc/D3GnJIdi6VdGsCQIlp87JSkUO780giVBsPzcLUmh2PmlESwNgmXnbkkGxcqvjWBpSIMlnJC7IoncMcmgWPm1ESwNgmXnjkkGxcqvjWCJECw3d0wyKFZ+bYZeESyCFcEdkwSKjV8cwRKRBks3IndFErlrkkCx8YsjWCoEy82dk3iKjV8cwVIhWG7unIRTLPziHrZEehjBIljJ3D0Jp9j4xSmDlb+NtRAsM3dPwklWfm0ES6hisdwRyeTuSTjJyq+NYAkRLC93T8JJVn5tBEuoYrCuVCx3T6JJNn5xBEuJYFm5gxJNsvGLEwbLsIzVECwvd1GCaVZ+bQRLSRgs/o9YA9xFCaZZ+bURLCmCZeUuSjDNyq+NYEkRLKu+/Zf9XcxZNCu/NoIlRbCsOgOwWrE0K782giUlDJZqSu6IpHInJZRm4xdHsLQIlpW7KaE0G784gqVFsKzcTQml2fjFESwtgmXlbkoozcYvThes/FWsSBgs0ZjcDcnljkok0cqvjWBpESwvd1QiiVZ+bQRLjGBZuaMSSbTyayNYYsJgaebkTkgyd1UCiVZ+bY5eESyCFcZdlUCilV8bwVKrVix3QbK5sxJHtPJrI1hqBMtrqAXf8xYXRky08mu7Xw/VOSl7WJYqVqpBuQOSbagFfyMXR0ZJtfNLu9sO1Tk5e1iWJlW6QbkDkm6kBXsvQd2dYaKVX9vdi1Gdk7OHdY2UKXBQ7n6kG2lB8xtRR6iHaOXXdvcuRMck7WFdfbMIH5S7H+kGUtD/ZuQ1aqDa+aX9vQLRMVl7WFf/1x86J3c/8vWnYPQFDR08kKobzcJLTvH5m6PomLRFLGv06w8akzsf+fpTMP+u+k50Bmv6L8fy+hui6JgWmk0sq/trD52SOx/5HMHq5QzWx8rN+huh6JgWolWsSvNJq+7GnQ+D8rn6GCqWdOElhxl8SB6g82WpdrEoyQctuxt3PQwW6NV9sVrjpV34RZOluf3OdyVbxpoUn7Pubtz1cFiqV820C/+xZrI0N9/5snTbWNL816y8G3c8HOr3qkSwPtZrlujGO1+Wch8rmvuUxTfjjodF+V5VCdZiyVLddufLEq9kOcNfccTNuNthUb5XdYK1UrJkN935siL2spKhLzjqZtzt8Dhhr8KC9bFKs3R33Pm2onaziv7vN/Bm3OnwoFe9q6o5PpLubntfV+B61lBpHu50mJyuV9HB+qjeLOGt9r6v0AWtoNI03OVwqZsr538y52hXNRcJIL3P3hcWvKJ+lSbhDodLZ6oSI+bs1fGuiq4jpr3L3jeWsKZelcbgDofN3vY/v4r9mGmN9SotWB8VmyW+xd5XlrOpRpVm4O6Gzf7+37+L3ZaJDeYqNVifGg8S3VXLpfpur/O8fWnL6lJpAO5u+Gxm6iVNuymTc/eqZ1kbj1HdWdcd60/ck7mvFpUe350No60SNbSh+3tuNVwrT7C+tJyhureO+404873cjXUo9OzuahgNBisoWTO50v3XI4/d++Gfl91f4x3rT9yTvrTpCj25uxpGw8HSF2uqVv5gNZDe3+EtC67RwbC2yQo9trsaTsPBkhZrNlZrBOtD99fYNNyy6lJtPJubqdAzu6PhNB4sWbcUtVolWF/mb6jllnNHYFveNIWe2B0Np9f69Iai/+O+J6rVP/1f8TX3XG22rnZ8A203m/vsxvVNUud53c2weo1Pdyn6v+6PoescmdlQ2eImaLzT3Od2LnCSMo/rbobXS3sGUtHzZWvitGVmQ5W7G6z1NnMf2rrBSao8rDsZsT4+9v/fX8oz0orWz1oSpndmNlS7vZGa7zL3ic07fCk54XD5OCjWc3gGa3H8Sc8X6cDEhgZscJD2e8x9XPcSX0lSOTy+vqa9HzxnZ7IaWx/z5JGtJjY0aIsDtN9g7pO6l/hKstph8fU17f7iKTdJdQkwvqGxuyzVfm+5z+he4itJSofF7Wva/cn9zq/cq5RgDS+0Svt95T6fe4mvJKkdFt+f095P7neeYAXv9ayOO8p9LvcSX0lWPAx+PqfdH93t/Mq9Si+Wo1kdt5P7UO4lvpKkeBj8fk67v7pbeYKVst3Dem4m94ncS3wlSfXId/c97f3sbuUJVtaCj+i7ldzHcS/xlWT1I9v997T3O3dnZMZWdNboireYuJPkp3Ev8ZVkBSTbwwe190N3aFQGd3TW6I4fmrqR5GdxL/GVZAUk2eMHtfdLd2hUxnZUYXjPX6nuIvk53Et8IVkByfb4Qe390h0alYldnze+6w9Ut5D9DO4tvpCsgGR7+qJ2fukOjczMtk+b2PY/sjvIfgL3Fl9IWkGSPX1Rez91h0Zlat2nzez7F+EdpN+/e4svJCsgyV4+qZ3fukOjMr3xk2Y2Xnr97JsnWInSEpLr5ZPa+a07NCqKpZ8yuu7qy+fd+Q/3Fl9IWkJyvX5TOz92l0ZEtPcTBlY94Oop9/3IvcXXkVaQZK/f1M6P3aVRES7/mN49D7l8/G2/cq/xdaQVJNnGR/X+x+7QqIj3f4CxVj9Xj71pguWVV5BkBMvDV6v/Gw2WoFcEK0teQZJtfVVvf+wOjUpAA/oZQnV/6aE/RLAWkViQZJcMVo1ifTLU6nbZkT9DsJaRWJBsBKuA1Fp9Xm/gjxCsdST2Ix3BuiCCdWqJ+chHsNBA0yuClSGxHgYEC8dEvSJYCRLjYdFRLHdnZNz7vxyCtY7MeDgQLBxR9Ypgxctsh8cFg0Wx+hCsdWSmw4RgYZesVwQrXGY4bAgW9hCsdWR2w4ZgYYeuVwQrWmY2fFqDdZ5iuRuwEmGvCFa0xGoYESy8R7DWkRgNJ4KFt5S9IljBEqPhRLDwFsFaSGI0nF6+q3c/dHdGxl2BdUh7RbBiZUbD6PW7evdLd2dk3BlYB8FaSGY1jAgW3tH2imDFyqyGEcHCOwRrIZnRcCJYeIdgLSQzGk4bH9abX7o7I+PuwCrEvSJYoVKrYdQdrA9rbBTcIVgFwVpJajWMrhcsdweWQbBWkloNJ4KFbQRrIanNsNr4srZ/+L3vBOsqCNZCcqPhRLCwSd0rghUpNxpOzcH6LhbBugiCtZLUZng1F+u27wTrIgjWQnKT4XW1YFGsJvJeEaxAucnwIlh4pe8VwQqUmwyvrW9r84e3dSdYl0CwlpLcDCuChRcBvSJYcZKTYdYZrOWL5Y7BAiJ6RbDiJBfDbOvj2vrdbd0J1vmF9IpgxclOhhfBwoOYXhGsMNnFMNv8ujZ+d1t3gnV6BGsx6cnwIli4F9QrghUmPRlercG6FYtgnR3BWkx2MNwIFu4RrMVkB8Nt+/Pa+OHXuhOsk4vqFcGKkl4MN4KFPwRrNenBcCNY+BXWK4IVJT0Ybtvf1+vvvtZ9/WBRrB1xvSJYQfKD4Xa1YFGs9wjWcvKD4fbmA3v53deyE6wTC8wVwYpiKIYZwcKX0F4RrCCGYpi9+8Kef/e17ATrvAjWghzFMGsN1lexzhAsirWNYC3IEAy3t5/Y8w8/d/0cwfr4cMehIoK1IEcxzN5/Y08//Nz1UwTrg2BtiO0VwYphSYYXwcIngrUiSzK8dj6yxx/edt1dm3kfBGsLwVrP27/2+Mz2vrKHH96W3Z2befRqS3CvCFaE5xW9hP3v7O6Ht2V352YewdpCsBb0vKKXcPCh/f3wtuzu3Mz6IFibCNaCXnb0Co6+tL9f/jtFsD6561BPZKu+uHf7lJ439BIOP7XfX/47QbBuj+TOQz2Bqbpx7/YpPe7nRRx/az+//EewTiswVTfu3T6lj+vlqiVYP0P5R7DOKjJVN+7dXkXPqP53xV61BOt7LP8I1lkFlupb4I6fSde4vOFwafnabr/8t36wvp/H3YdyQhr1IHjRT6JvWt5wuLR8bbdf/ls+WD/P4+5DNRGFehK76CfROSxvOFwaPrbvX/4jWCcVkqhHoYu+kJ159A7Lmg2f42/t55f/Vg/W7xO5A1GNNE3bpFu/qP2pdA/LWQ2j42/t96cE65zmc3QsIgBrORhM/6yM0bA6+tT+fvn5H2xZ2O8TuQNRzWSLmgRVYBUBs/IVw+xgene/JFjnNNCfAbFFKCxoUNf8t7D+1/Ff10Cwzqk7PRPEKdAepxc5os/fupphtTu9h18SrFMa2ao58iDIDpyWN5zP35qS4bU/voefLh2sv2dyF6KY+R0boO6D6LwpqYP5+rEpGVaHE7z7rbs5U/6eyF2IWiRrNkTaCMFhmhvJmcnt17ZqGLVM8ee37ubMuHscdyJqUa3aEGUops+avoPEedx+7gyHS+Mkbz92V2fc/bO4E1GLdOH6KXMxedbcxVOH8f17bzos+gbqzs64+6dwJ6IU0aqNU0dj8rzBqyaP4vsPuOth0DdRd3aGPT6GOxKVSDZtTmg6pg4fumLCHH7+hLseBp0zdYdn1ONTuCNRiWDRZqX1I+1CQwZuxV0Pg86husMz6Okp3JEoZH7P5rk7knIRxV283I67HgadU3WXZ9DTU7grUcj8os0bLJW4JYZU3V9i6JHd9TDoHLG7PIOeH8OdiTKm9k1lLlgZ//QT4ekRCFaT3im7yzPm5THcnahiauN0LtgrzSO785Gvd87u9Ix5eQx3KKqYWzqZKwVr6lmfn9idj3zd83a3Z8jrY7hLUcPE3mldoVeCUr08sjsf+brH7m7PiI3HcKeihvHtUzt7r3S1+o9g9XHXZ8DWY7hbUcHo9kU4ca+ksXp8Znc+8vWP312ffpuP4Y5FBWP7F2R2d4sSx+rpod35yNf/Btz56bf9HO5a+A0tYJy51a1InqqXh3bnI1//W3Dnp9ub53Dnwm9gBUNNrW5BAbF6eWp3P9INvAd3gHq9ew53L9wGXn2wmc0tJyRWL0/t7ke6gTfhDlCnt8/hDobbwKsPNrO51cTU6vWx3QHJNvAqgroSc+xOsC5erIE3H45c9T+3uyDJBl5GUFZCjv1HsN4YePEJThKsuF5tPLc7IbkG3kZUVkLO3enVpYM18N5T0KuB53ZHJNPA64jqSsi5e8G6crEG3nuO9XsVmKu3D+7OSJ6RNxLUlYhj94N13WKNvPYcq/cqNFd7D+4uSZKRdxLUlYhjCdamkbeeZd1exbbq+MndLUkx8mKCuhJx7H6vrlqskZeeZsVcJaSq6dndNUkw8nqCwhJx7FGwrlmskZeeZ2plHXJa1fb47p6EG3lBQWGJOJZgbRh554mG99VTs6xUNQzgkzso0UZeUVBYIo49DNb1ijXyxnONLWvbPqullappBp/cRQk29JZiwhJwakOwrlasoReea2BVOxZaJ6lR7VP45m5KqKE3FROWgFNbgnWxYg298GTde9q50fPSAtU3h2/uqEQael0xYQk4taVX1yrW0PtO17ml/SuturVsrXfpzkqcoZcWU5aAU9uCdaFiDb1ug64dHdvpubsyab9Xd1iiDL24mLLEnNrE3ZEsQ2/bomNDx5d66I6s2m/XHZYoQ+8uKC0xpzZxlyTF0Lt2ad7Pma3uupkK2m/ZHZYoQ+9PX5aYYHU8kjsmCYZetU/jes6t9dj5Pu337Q5LlLG3KE+LPVinL9bYi65ufrFHTvdpv3d3WKKMvcmYtMSc2spdlFhj77k6xWb3nmzVfP/uroQZe50xaYk5tZm7KZHGXnN1mt3uO9ar+RncXQkz9kpj0hJyaAd3VQKNvebqZOvdcaZX82O4uxJm5J1+BLUl5NAO7qoEGnzNtQ0u+NutD8/NtPbZuLsSpf8ruQlpS8ihHdxVCTT6nisbWvKn/2FUWYI0z8bdlTCjH0tMW0IObeeuSpzR11zZ0JrHtSRF83DcXQkz+rWI0xISrO6Hcmclzuhrrmto00Mikql5PO6uhBn+YLRtIVixhl9zVUPbrs5HvtbxuLMSZ/iLCWlLyKHt3FmJM/yai3KHw6V1Pu6sxBn+ZELaEnJoM3dVAg2/5qLc4XBpnY87K3HGv5mItkjPJFh3xt9zRe5u2DTOx12VQOMfTURbIs5s545KpPH3XJA7Gz6NA3JXJdD4VxMSl4gzm7mjEmr8RZfjroZR44TcVQk0/tko20Kwwo2/6Grc0TBqHZG7KoHGvxtlWwhWvPE3XYs7Gk6tM3JXJdD4h6NsC8GKN/6mS3E3w6p1SO6qBJr4dCLiEnFmM3dSYk286ULcyfBqnZK7KoEmvp2IuESc2cydlFgTb7oMdzDcGsfkjkqkia8nJC4RZ7ZyJyXYxKuuwZ0Lv8ZBuaMSaeL7EbaFYCWYeNUVuGNRQOuo3FGJNPMJRcQl4sxW7qIEm3nVdu5WlNA6LHdUIs18RBFxCTiymbsowWZetZs7FTW0TssdlUgzX5EuLgQrwcyrdnOnoobWabmjEmnmK4qIS8SZrdxFCTbzqt3cqaihdVruqESa+Yoi4hJxZit3UYLNvGo3dypqaJ2WOyqRpj6jgLjIjiRYz6ZetZu7FSU0zsrdlFBTX1FAXGRHEqxnU6/azd2KClpn5W5KqKmvKCAusiMJ1rOpV23mbkUJrcNyNyXU1GcUEBfZkQTr2dSrNnO3ooTWYbmbEmruO9LHRXUiwXox96qt3KmooXVa7qaEmvuQ9G0RnfiPYD2be9Ne7lTU0Dotd1NCzX1I+rioTiRYz+betJc7FSU0T8vdlFCTX5I+LqITB3pFsKpyp6KG5nG5mxJq8lPS10V0IsF6NvmmndypqKF5XO6mhJr8lPR1EZ1IsJ5NvmkndypqaB6XuymhJj8lfV1EJxKsZ5Nv2shdiiKa5+VuSqjZj0leF82BBOvF7Jv2cZeihvZ5uZsSavJb0tdFdCLBejL5op3cqaihY2DuqESa/Jb0dRGdSLCeTL5oJ3cqaugYmDsqkSa/JX1dRCcSrCeTL9rIXYoiOibmjkqkyY9JXxf9ic3cTQk1+aKN3KUoomNi7qhEmvyY9HnRn9jM3ZRQky/ayF2KInpG5q5KoMmPSZ8X/YnN3E0JNfmijdylqKFrZO6qBJr9muR50RxIsJ7NvmgbdymK6JqZuyqBZj8neV40BxKsZ7Mv2sZdiiK6ZuauSqDp70mdF8l5BOvF9It2cZeiiL6hubMSZ/p7UudFct5QrwhWSe5SFNE3NHdWwsx/T+q+SM4jWC/m37SHOxRV9E3N3ZUwgi9K3BfFcQTrleBNW7hDUUTn1NxdCSP4osR9URxHsF4IXrSHuxRF9I7NHZYogi9K3RfFeQTrmeBFe7hLUUTv2NxhiSL4otR9UZxHsJ4JXrSHuxRF9I7NHZYogi9K3RfFeQTrmeBFe7hLUUTv2NxhiSL4otR9UZxHsJ4JXrSFOxRVdA/OXZYggk9K3RfFeQTrmeBFW7hDUUT/4NxlCaL4psR9ERxHsF4oXrSDuxRFDEzOnZYYim9K3BfBcQTrheJFO7hLUcTA5NxpiaH4ptR5mT+PYL0QvGcLdymKGBmduy0hFN+UOi/z5xGsF4L3bOEuRREjo3O3JYTim1LnZf48gvVC8J4t3KUoYmR07raEUHxT6rzMn0ewXgjes4M7FFUMDc8dlwiKj0qdl/nzCNYLwXt2cIeiiqHhueMSQfFRqfMyfx7BeiF4zw7uUBQxOD13XQIoPip1XubPI1jPBK/Zwl2KIgan565LAMlXpc6LqVcEqx53KYoYnJ67LgEkXxXBKm/6HZu4S1HE6PjcedGTfFUEq7zpd2ziLkURo+Nz50VP8lURrPKm37GJuxRFDM/P3Rc5yVcl7wvBUpt9xS7uUhQxPD93X+Q0n5U6LwRLbfIF27hLUcPEAN2BUdN8VtWCNfgY7qoEmnu/Pu5U1DAxQHdg1DSfFcGqbu79+rhTUcPEAN2BUdN8VvK+ECyxqddr5E5FDTMTdBdGTPNZEazqpl6vkTsVNcxMMCUiCRf5u5gCwSpu6u0auVNRw9QIMxoSf42/a0nUCtboU7irEmjm5Tq5U1HC3AhTIhJ/kb9rKcgDYwnWiYs1/mq93K0oYW6EORGJv8rftQQIVnHjr9bL3YoSJmeY0pDoizxcbJ68LwRLa3gkZu5WlDA5w5SGRF/k4WLzThKs8xZrfCRm7lgUMD3DlIYEX+TxYtNKBWviOdxdCTMxEy93LQqYnmFKQ4Iv8nixeScJ1mmLNTMTK3ctCpgfYkZDYq/xdLF58sCYgnXSYk2NxMpdiwLmh5iSkNCLvFxtlrwvrmCds1hzI3Fy16IAwRQzEhJ5jderzSJYpc2NxMldiwIEU0xJSORFXq826TzBOmWxJkdi5K5FAYIpphQk8CJbl5tzomCdsVizI/Fx18JPMsaMgIRd4831puj74gvWCZM1PxIXdy78JGPM6EfUNd5ecIY+L85gnS5ZipF4uHPhJxljRj6CrrFzxQlnC9a5kqWZiIU7F36aOSbUI+QS+5ccNl6XgGBRrGeagVi4c2EnmmNCPUIusX/JYRF1cQfrRMUSDcTCHQw30RgT6hFyif1LDiNYpYkGYuEOhptojPHxiLjC0TWHRcTFHqzzFEs1EAd3MNxEY4yPR8QVjq45rFaw+EesJ6J5WLiD4SYaY3w8Iq5wdM1hBKs00Tws3MEwU40xPh4RVzi86KCQthAsGdE8LNzFMFONMb4dEVc4vOigasHiX2x4pBmHh7sYZqoxxqcj4AoNVx0TkZaZXhGsR5pxeLiL4aWbY3g69BdoueoYglWaZhom7mR46eYYnw79FVquOoRglaaZhok7GV66OcanQ3+FlqsOKRcsTbHcoVGRDMPFnQwr4RzjyyG/QttlR5wzWO7OyCiGYeNuhpVwjgnlkF+i7bL9QspCsGQUw7BxN8NKOMeEcsgv0XbZfgSrNsUwbNzNcFLOMaEc8ku0XbYfwapNMQwfdzWMlGNMCIf6Eq3X7RZRlrleEax7imH4uKthpBxjRjjU12i9bqeQskwGS1Esd2dkBLMwclfDSDrHhHCIL9F83U4EqzbBKJzc1fDRzjEhHOJLNF+3E8GqTTAKJ3c2fLRzTOiG9hIdF+5DsGoTjMLJnQ0f7RwzuqG9RseFe8SEhWDJCEZh5e6GjXaMGd3QXqPjwj1iykKwVAST8HJ3w0U8xoxuaK/RceEeNYPF3//8Y34QZu5wuIjHmJEN6TW6rtwuKCwES2V+EGbucLio55iQDeUl+q7cjmAVNz8IM3c4XNRzzMiG8hp9V24WVBaCJTI/Bzt3OUzUY8zIhvIaXRdud95gnaNYgjlI/D9H48RprCL0ygAAAABJRU5ErkJggg==\n", + "text/plain": [ + "" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "Image.open('./mhp_extension/demo/demo_instance_human_mask.png')" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAABLAAAAOECAMAAACGszjIAAADAFBMVEUAAACAAAAAgACAgAAAAICAAIAAgICAgIBAAADAAABAgADAgABAAIDAAIBAgIDAgIAAQACAQAAAwACAwAAAQICAQIAAwICAwIBAQADAQABAwADAwABAQIDAQIBAwIDAwIAAAECAAEAAgECAgEAAAMCAAMAAgMCAgMBAAEDAAEBAgEDAgEBAAMDAAMBAgMDAgMAAQECAQEAAwECAwEAAQMCAQMAAwMCAwMBAQEDAQEBAwEDAwEBAQMDAQMBAwMDAwMAgAACgAAAggACggAAgAICgAIAggICggIBgAADgAABggADggABgAIDgAIBggIDggIAgQACgQAAgwACgwAAgQICgQIAgwICgwIBgQADgQABgwADgwABgQIDgQIBgwIDgwIAgAECgAEAggECggEAgAMCgAMAggMCggMBgAEDgAEBggEDggEBgAMDgAMBggMDggMAgQECgQEAgwECgwEAgQMCgQMAgwMCgwMBgQEDgQEBgwEDgwEBgQMDgQMBgwMDgwMAAIACAIAAAoACAoAAAIICAIIAAoICAoIBAIADAIABAoADAoABAIIDAIIBAoIDAoIAAYACAYAAA4ACA4AAAYICAYIAA4ICA4IBAYADAYABA4ADA4ABAYIDAYIBA4IDA4IAAIECAIEAAoECAoEAAIMCAIMAAoMCAoMBAIEDAIEBAoEDAoEBAIMDAIMBAoMDAoMAAYECAYEAA4ECA4EAAYMCAYMAA4MCA4MBAYEDAYEBA4EDA4EBAYMDAYMBA4MDA4MAgIACgIAAgoACgoAAgIICgIIAgoICgoIBgIADgIABgoADgoABgIIDgIIBgoIDgoIAgYACgYAAg4ACg4AAgYICgYIAg4ICg4IBgYADgYABg4ADg4ABgYIDgYIBg4IDg4IAgIECgIEAgoECgoEAgIMCgIMAgoMCgoMBgIEDgIEBgoEDgoEBgIMDgIMBgoMDgoMAgYECgYEAg4ECg4EAgYMCgYMAg4MCg4MBgYEDgYEBg4EDg4EBgYMDgYMBg4MDg4MCa7rFGAAA+R0lEQVR4nO2d225cua5FjQMEhgN07///29NVdrlu6yKJpCYljfG0sztei5wkRxzn9vEBAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADwzf/dUBcCAHDE/+2jLg0A4IEDW6EuAEhEqa1wFgCIqdMVzgIAGQ26QloAoKBZVzgLALpisxXKAoBuOOgKZQFAF5x8hbEAIBo3XWEsAAjG01cYCwAC8dUVxgKAONx9hbG6QNCwIgG+4o4CIXRYmQhfcTwREDxAjK84HG/IHiDOV1yNK4QP8BHpK47GD9IHuOItKU7GH/IH+MbTT1xMCAwAqph5+mYlcS/BMIIqlm5+e1vUNXlivQbOJRhmUMza3V9YYAXOt/2fC013MldSEtpzX2sKq/d/YYkITjf9nwdKr2PGoDS0Jb7cFIigKAN1iR4cd/jPFmXbMVdMGmqTXnUOhLBM+4fdbeqqQV7qJgeldAnLUXcUw+IprNX8QWsFuirUlrrJMalbxDLUPUWwcg7L9b7fVoWuzpWlbnNEWnaxAHVb7iwcxIKd7/VUqaszY6nbHJDGZTxH3Zgz6yaxYuN7HdX76lhZ6j7Ho30bz1H35sm6SSzZ93Y7Tbo6Vpa60dGwbOM56u7cWDiKJfve7qXZVwfKUnc6FsZlPEfdoBPrZkHbdyy+wlgu2NfxDHWHLqwbxppdb/dt89WusdStjoTHOp6ibtLOwlms2XWIr/gUy4zLNp6jbtPKwlms2fVm32Zf8SmWGad1PEXdp42Fs1i07a3GHXyFsYz4reMZ6k5NrJvFom2H+QphmfBcxzPUvRpYN4tF2/5AWDnx3ccT1M02s24Wi7Z94b14H2FhLAPe+3iCut1GFo5i1b4/Nlp38hXCasd9Hc9QN9zEwlGs2vdH4CdYCKudgH08Qd1xC+smsWzjH4GfYO0ZS93wAITs4wnqnusp62v/R86Bk6ie7iyNfwiENUguSmL28QR107WU9HS8iMMm0TDdORq/8Fq0n68QViNR+3iCuu1Kzhs63cRBg2gb7wydX3itGWGpidvHY9R913Hez8YqFrpL3dsxhhEP3vmV15IdhcUXsVqI3Mdj1J1XcdrNxiYe/0A6ShLGKQ/c+ZWXgj19hbAaCF3HE9S9V3DezL6wCpSlbu8A44zHbfyHl4IRlpjYfTxB3Xw5p61sbOLpZo6QhHnGozb+w2u9rsLa3gt1y6mJ3sdj1N2Xc9bJ1iIirALU/Z3xWm8HYaXPREj0Op6h7r+Usz42F/F8M/MnYR/xoI3feKnW11cIq5bwdTxB3X8pJ21sL2LBaqZPwmPIQzZ+46VaZ2Hxc8I64tfxDHUCZZw0sbOHEwjLY8Rjdn7jpVhvYW3uhbrnvHRYxxPUCZRx3MPeGlYJK2cSHiMes/MbL7W6C2trMdQ9p6XHNp6hzqCIww52t/B0MdMH4TPiETv/5blUf18hrArCV7HgTkeYzmmTCMuEus0jnisNENbGYqh7zkrcBhaNZZzp1PY6i7Aqp96Mus8jniv11xXCKidi92rmMsx4Ghre+E8nyamb3KB+/G2o+zziuVIvSR3vhbrnrDiuXNtgRhlPQ+Mb/+0sQ3WXbzTsQSPqTg94LtRkpuLDUPecFJ9dM8xllPm0dL/xH8+SVLf5Rv0ytKLu9ICnOtuEVH0Y6p6TYtsxj8EMMp+WFLb+40mi6jbfaFmLNtSdHvBUZ8PStxyGuuecGPbLazCDzKcliK3/eJKqus03GrfjNZuS76xudZ/tnnwZJw0l5kW0D2aQATVFsfv733dRt/lK64JsBXP2Aepe9ykeuIVRwlDitYqGwQwyobYoqn5rR8oUGjdkJ5exer9TPnEDo4ShxHEXWwczyITastj//aR7qPt8pWlDWn5fS8bmf6kYeTsvCap7TonrLrZNJv+2XvEM4yhedZ+vuK/HSM3/Yp955Vpcvq1uOiG12+g1rLG29YpnHkcJq/t8xX8bBmr+F/8TOA7m+m110wk52ka3BT0ezQjr+uHrq6GMFbAMw/R+J+IIDoL5+ba663wcHY7fgh5MZox1/fAW1jifZYRswyC9PxB0Bzu53L6t7jodB2fjuqD7oxljXT92ovIOIGEKMeswRu8PhB3CViy/31Z3nY79q/Hdz93RDLKuH9tR+SeQL4WYfRij9wcCT+E9lt9vq7tOx+7ROO/n/mzGWNcPhOW7D2P0/kDoLbzkcv+muut07B6N73rujmaUdf3YjCoggXwpxCzEGL0/EHwN26ibzseuo3zXc4uh1vVjK6qIBPKlELQRQ/T+SPA5bKLuOR9Hnoqez1DrGiGsMY42aCWG6P2R+IN4R91zPk5kFTqfodZ1I6qQBPKlELQSQ/T+SPxBvKPuOR9ntur023rzr2uIsIb43WhBK4GwClD3nA+hrwYTVkgyIxwtwvpGICx1ywkR+mq0v3UfYfkuxQi9P9HdVwjrHaWvENYYn2YgrG8QVgKUvhpbWFER5IsBYX2DsBKAsIpZ9moR1jfdhaVuOCNKXyGsMa42ai8GaP0ZhKVHKqyx/iG2mGwGuFqE9UOvs7ih7jcjCKsYhNVBWOpeD+ksLHW7KcklLHUahwRlk/9qwxYjf+vPICw9CKuYoGjyXy3C+gFh6UFYxURFk/9qEdY3CEuPVlivK6tO4xCE5d59/tafQVhyUglLHcYxCMu/+/StP9NVWOpmkyL11bDCCgwhYQ79hKXu9ASEped7AgjrnG7CUjf6TtxmpG/9CXyVgIcBdPfVUMIK+5fpXm2g7vOdwB/KhtqAj36+SrgFSdhdnx5TGW1bY7J5kYG6zw0Q1jf8Nnc9e/vTZSqDbWtQNi8PVve5QZywxloBz8aPUXeal5396TOWobY1zuXPj1X3uUWcsP4ZaAMQVgK216fTWEYTVmAO92+o+9wkTlj/jLMB/Yyl7jMxm+vTay4j+arfFzDUjW4S5qt/ftdA3WIB7q1vo24zM51GsM1QwvpYWljR//TxEBvQ6VrUXaamzwh2GEtYvVJR97lDrLDy9v1IVOuDxSClxwz2QFhbqPvcA1912AF1g/kJH8EBQ/lqeWEF/05vdXdFhHU/UghaokdwBMLaQt3nPggrrPuBMlATPIMjhvIVv0QU+yul6t7KiOt/nAzExI7gEIS1gbrNQxZt+05cAONkICZ0BMcM5SuE9YGwEJae0BEcg7A2ULd5yKJt34kLYJwM1ITO4IixfIWwLiza9p2wAMaJQE3gCE4YylcI68KaXT8QFcBIGagJnMExQ/mKP5dxZdG27wQFMFQGYuJGcMZIvuqUk7rJM9bs+pGYBAYLQUvYCM4YylcI68qaXT9DBlpC8i9iJF8hrCtLNv0KIWiJyH++IRHIhTW7foMQlPinP+OQCOTKkk2/QwhKvNOfckoEcmXJpt8hBCXe6c85JfK4smTT75CCEuf055wSeVxZsecNSEGJb/qTTok8rqzY8xbEoMQ1/VmnRB5XVux5A1KQ4hn/rGMijisr9rwFMShxTH/eMRHHlRV73oIUlLilP/OcCOPCml2/QwpK3NKfeU5kcWXRtt8gBSVe6U89J6K4smjbb/ik8Ffdxpj4hF+DuuMWSOLCqn2/4RHCX4TVhEf2dag7boEkrqza9ysOGfxFWI04hF+HuuEmCOLCqn2/Yc7g71+M1YrDClah7rcNcriwbOOvWDNAWAY8lrACdbuNkMMHwvrlry2Cv38xVjsuW1iOut1GiOHCup0/859tDAn8/YuxTHisYTHqZlshhQ+EdeOqm9YA/iIsIx5rWIq612aI4cK6nT/y45u2AP7+xVhW7GtYjLrVdkjhA2F989dgrL8IywH7Hpai7rQdUriwcOt37sap7/9JWH/UnYyKeQ2LUXdqgBA+ENZ//Pnzt91YfxGWB+Y1LEbdqQVC+LCGMMPPgp6EVWmsvy/CwliN2NawAnWjFsjgwxjCDF+2+fMsrCpj/X0TFsZqw7SGNagbNUEEdmGNbqw/r8KqMRbC8sL4W3eLUfdpgww+TCHM8Ctj78Iqv51XX12EhbGaaPjyYRPqPo0QgSWDvxMI649BWG++QljN1P5kvBV1n0aIwJDB7UrVDZjYElbZ5bzr6ltYGKuF2p+MN6Ju0wwRtGfwK6zPT3ULzfzZFFbJ5ez6CmO1gLAKIYLWCH7P9PMJdTdV/Bim3lhbuvoVFsaqp+JTWwvqNu2Y/5S+ugEH2pvfENZI3vqzK6zjtdjWFcIyUP6prQl1m0a+V7W1+b9TCOuql9bm94X17azc3vpzIKyDtdjT1V1YGKuaoh8ozKi7tHJb1abm/04grF+5NDZ/KKzsn2n9ORbWzlbs6+rvwwPVvQ3H+Y8THqi7NHJf1YbebwGrm2jn0SuNzZ8LK7GzzoS1sRcHtnoWFsqq5OzHCR/UXRp5WtXK1h+2VN1GGy9WaWu+SFhJnfWnQFh1/MFYzTzEWHmJFaibtPK8qlWtP22puo8GXo1S1//LoRYYK5+znu3i76sx90LGY5B1FqpA3aSRt01tOtgRN/NdJ1X9v15qkbCSKevFLSHCGnAzZDwl2WSjc9Q9Wnlf1ZaDHW8tt2RSE8DbpRYKK5OyXsUSJazhlkPFc5TtUjpC3aORrVVtONjRlnJbJTUBvJ9qsbGyKOtdK2G+Gmw9VLyEaRLTDuoWrWxuav29DrWRuyIpD2DrVsuFlcJYW04JFdZAG6LiNU27n15Rd2hlb1Or73WcfdzXiKX/SwQVxtIra1Mo7aJ6XAOk1cp7oF6i+kHdoJndTa0910GW8VAixYuyd63DGGvXJvWK2loDnNXIRqL46pHDRa261hFW8UQhxV/I273WGmEJlXXkknI3HWwB0mpkK1N89cDJnlYca/5PsU4FUviFvINzrROWSFnHHjl10iEVwkq+LRo2Q0VYvxTsadmp3pdV3dIeBfoo+kLecQT5jXUqkeMhn1AprMwLI2E7VXz1g9uePiyruqdNiuRR8NernEZQK6zOyioRiH0FkFYze8F66GrgP+/7g9uePm2ruqt3yszhEcJHZmOV2sO+AlirkYNozboa+S8o+MZvT5+2Vd3WK4XecMngv0zrjdXFWRXiMG8A1mrlMF2jroYXlt+evuyrurEnSp3hk8FHo7GClVUpDeP8zcSGkZmTgC22+ju6sNzWdGth1c39UC4MpwwuL20zVpy06oVhHr+doCyyc5qxQVdjC8txT3c2Vt1hja4+3SL4aP4cK8ZabbqwT98D5yhGoCDmdl+NLCy/NT1YWWmHdZpwi+AarkVYjtIyuMI8eyeckhiGoqgbdTWwsPzW9GRpVQ3WGsItge+dMBvLQVo2UdhH74V5F4aiMO2lfOW6p2dbK+mwWg5uAdyWQmwsB0+YJ++Iy1KMQXHg9boaU1i+a1qytr07bHGDWwCewmpVlpMlzIN3xW8/clORea2uBhSW95oW7m3PFpvE4Nb/fSlUxvKQQ3UUji/dx3VN0lK3bHW+Gk1YRWtRF1jx5nZqsdELbv0/LIWLsCqNZVRC6yr4vnYX72VJSe26VehqLGEVr0VtYqWr26HHViu4tf+0FN2N1SoC8yq4v3gP/5XJR+PanbnqeTfTU7EUjYF9o1u4T4Mg3Np/WgofYRUbq/78vXYh4s17hOxOKswLWLCa6anaCUMiso0zKcGr+9e16Gmsqgk7r0LMu3eI2R8598bsC3i+mcmp3Yn2RDRbZxSCV/Pva+HiqxJj1U64GM+hO+G7O0l4aMy+gAWbmZiWnWhORLB3dh9cinHofWMt7LVdCZlxIb5Dd8Frc/Lw1JjLvwx5tplZad2J1kR6b56HDn4Ksba+uRYe9X2eGat1yIU4D90B+94k4rUxn3/K9mwxc2LYibZE+q6ekw2eyvDYFv8SD41lmHIxrkO3Y1qbTGz25bCCp4uZEONONCXSc/m8XBAqrA7GMs65Br+hW2ldmlTstuWwg6eLmQyHnWhJpO1NDf15eeDz+0vuD8E5L4ZXlZGTrsJr6EYadiYZB1157ODJXmbCayfqA2l+VW2LXhq48Pz+rMLaM5bXsCtwm7qF2o3JxnFXHkt4vJdp8FyK2jwMr6rq0U0CV55f774YbnWGj7scr6lbqD+NPJw35bKGh3uZA+elqIvD9q7yJt0UcOXl5e6L4VZoh3kX4zf2ZtoOJAMlPbms4SvZQgvYiro4jBS26WaAb15e7bIZMfV2mXghnnNvo/1OpJT25LKHL6TKLGgtKtJwoKRRt/v/5vXFLpsRVHCnmZfhOfcmjAcjobwnlz08WkspgWtRmIXT285b9Tv/b15fG7AZfsV2m3oJroNvwHw1vanryWUR99dSmWPYSlwpisLvdSe9+h3/N2/vdN+MMGH5Zd6I0lYXPG6nH7UtuSzi7lrqkgxYhFdOg/B93VG3frf/zfv7vDfDt+rOk8+Ny/10oqEll03cWUtZmp7z3+ckB+/X7bfrd/k/vL/NeTO8y+49+sx4HVE8bS25rOL2VqoS9Rn8OYc5BLxvp13Hw/9m41Xeq+Fdd+/RZ8bzlAJp7chlFbe3UhOpfeKlHMUQ8sLNfj3v/putFzmvhnfh/YefF89jisLSkscq7mylIlXLqGvZTyHmfVv9ep79D1HCenmua839h58X95vyxtiRxy7eKS8sRRQ29k8zhveGXa/+h433uPxZwpcGfItWjD8pEYfliL0jj2X8paa0DFmY2L/MIF4b9j36H7aE5WGslwZ8i1aMPynuZ+WJS0cOy/hLZXX6MEzsHmYUzw373vyNIGG91u9cNb76xfWoXPFqyL6Nv1TXJ0/DxN5hhvHYsPPJ33ifyXajlbzW7102wrrhd1K+eHZk38cfGiqUx2Fh5y7DeGjY++JvvE1ku9FK3uoPqBuuOB2UL94tWffxRluR8kAMbN5lHPeO3S/+9/K3ojXuxXv9/mXDDw7n5ExAS8aFvNFapjyRdrbOMpBbx+4Hf7/8rWiNe/FWPsKKw3pMzgS1ZNzIHwyVykNp5f0qI/lp2P3eH9iM1rQWb+UH/YltuGC6JG8Ce7LK6oKpVnkqzfTz1S0l73t/YitaH1/FfYbYKf0haD8kb4Jb8hVWv6TdYhmBa8fe5/7MZrY+vkJYHWg8I2969CQWVlvWrsHk59Ky97m/HP9mtghrGFquyJtOLTkKq7FkdTLp8T71rePfzNbDV1HC6pj/AFTfkDv9epILqzpuz1RGwPnUN49/O1uENQiVF+RN56bcjBVQW7d0MuN861vHv5Ot3VcIqws1B+SNoCm5sC61lYbulscwON/65u1vZ+snrJCi4Ua1ZdzQdCUX1n/FFabulMVIeB/71u3vhJtVWB1SH4oW1Xgg60ovrJMC+ySUE+dj3zz+vXStvkJYfWjSjRlhVx7C6mEshxTGw/nat45/N12ENQRNvjGibatVWI/PDC2wV0YJcb72rePfjzejrxDWK83WaUbeVgphnQXv8IYB8T73jds/iDehsELTHpNm8TSibyuHsE6C93jDgDjf+8bxH8Vr8NXvcyNqhgcs8qknRVt2YYUby+P5I+J77lu3f5SvwVcIqxcW/VSToy+ElRbfc9+6/cN8rcLy7iAo5bGxKaiKLG01+cpdWEclujx/QFzPffP2j/Nt9tWvsFxbCEh4fGwOqiFNY/W+en1gcIEujx8Sz3PfvP2ThBFWesweKiVRZ02+chfWfoE+jx8Rz3PfvP2zhBt9hbD6YVdREZk6a/JVR2P5PH1EPM998/TPErYIy7sF32inwUVHZ+TqrM1X/sLaqdDp4SPid+07t3+acZuvEFY/fIx0TLbWmnwVIKztCr0ePiJ+5759++cZNwnr4ZkBRcMTPk46Il1rTb56fFxogV4PHxG/c9++/YKMG3z1KCy3FvxCnQwvLe2Tr7Uswtqs0O3hA+J17bu3XxByva8QVk+8tLRLxuZafBXxc8KNAt0ePSRe5753+wUp1/sKYfXEz0zb5OyuwVcIqwNe5753+yUp24Tl1IJHmJPi56YtkjbX4KsQYb0X6PfoEfG59v3jL0q5yVcIqxd+ctoia3f1vkJYPfA5993jL0u5zlevf+DHu2Z4xk9OG6ibsxjr4EmB9Tk+ekRcrn3/9gtTrvJVhLBMGU6Oo57eUPf2xyCsoycFFuj56AHxuPaD4y9N2fAJlksPhgTnx9NQCY9vr7haXyGsLjhc+9Hxl8bc/gkWworGU1E5b2+7vCTCeq7O9ckj4nDtR7dfnHOlrxBWP5w1lfH4Nsur9FWYsPz/rq2Rcbj2o9svzhlhpcVdVAlvb7NAhJUQh2s/vP3inOt85SuspuSWwd1UGU9vs8QqX8UJK/DJ42G/9uPjL8+5yFcIS4C/rPJd3naRNb7qIiznB4+I/dyPb7846CJfISwBAbpKeHoVwjp9QFht3g8eEPu5H99+RdAFvkJYApbw1Z/NPpMIq/xrwfNjP/fj268KuthXrsKqSGtN1vBVsbHOPzqqNP/njoeDpI5vvy7pY18hLAkLC2vDWCUfHVOb/1MHxMFSx7fvOsCthYgoGh5ZxFeFxir54J5FL4aDpY5vH2GNzyK+KvvNDSUf27XqtXCw1PHppxeWV20Ts4qwSj7FKvrQnjUvhoenjm8fYQ3PKr4qMVbRR/YseTE8PHV8+54T3NqHkKLhiVV8dW6ssg/sWPBqeHjq+PQR1vgs46tTY5V9XMd6V8NDVMen7y8s1w6cKpubhYR1/LuxCj+sX7XL4WKqw9tHWOOzkK+OP8Uq/LB+1S6Hi6kOT99zhAhLw0K+OjRW6Ud1q3VBXFx1dPruwnJtwKeu6VldWB8IKws+sjq4fYQ1AQv5aq/X4z80hrB64SOrg9P3nCHCErGSsPaarfigPnWuiY+tDk7fc4juwvIoagWWEtZOtxUf06fMNXHS1bmwHKa4+ZyQouGZpXzVZKwxGx0QJ131E5ZrA/aaFmEtYW23W/4RnapcEiddHdx+sLAsHdhLWoW1fNVgrGE7HY3xhWVowV7SKqwmrGpjjdvpYPQTlnmMe09BWPGs5qtqY43c6lhE+ypeWO0tWCtaiNV8ZTJWvyIXZBhh7T8FYcWznrC2ey76zh1rXI9wYXl9EevgKfgqnAV9Vfe7G4bvdhRmEFZjE8aClmJFYVUZa/xuB2EUYR0+BGGFs6Cvqow1Q7tDEC8snz9OiLC0LCmsCmNN0e4QjCGsk1VAWNEs6as/Fb9YOEm/+UFYUMCqwio21iz9pifcVx7GOlsFhBXOor4qNtY8DSdnWWEZylmRZYVVaKyJGs5NB2HZ/9rR01VAWNEs66tCY03VcWaGEpZnF4ZyVmRhYVUbS1HjMowgrPNNQFjRLOyrMmNN1nJaEBYUsbKwSow1W8tZQVhQxMq+KjHWfD3npIOvdv/uvVLOFwFfhbO2sM6NNWHPKRlIWK5dtFezKGv76txYUzadEIQFRSCs4/6mbDohCAvKWNtXZ8aatet09PCVj7B822ivZlVWF9axsabtOhv9hNU+xwBhNdeyLqv76tBYM7edC4QFZSCsfWPN3XYqhhGWbxvNtSzM8r4qjUBd5dx08BXCmgKEVRqBusypQVhQBsLiU6wE9BNW6xyLPhhhhYOv/hSGoC5yahAWFIKwEJaeDr7KJ6zGShYHYf0pC0Fd49T0EJbNWAgrCQjrwrqd52AQYfm20VbI6iCsb9btPAM9fGX7558RVhYQ1jfrdp6A9MIq/EiEFQ/C+mHdzvUgLCgFYd1Yt3M5XYXVMkqElQaEdWPdzuX08BXCmgOE9cu6navJLqziD0RY4SCsO+t2LqaLsAxfxEJYiUBYd9btXEtyYZV/IL6KB2E9sm7nSvoKq3qWCCsTCOuJdTsX0sVXzZ9i1ewAwgoHYT2zbudC+gir8U/nIKxUIKwX1u1cxxDC8m2ksgi4gbBeWbdzGZ2E1fbXjiKsVCCsN9btXEUnX8ULq7yTyiLgBsJ6Z93ORWQWVt0KIKxoENY763YuopewWv795xhh1dUAdxDWBss2LqKvsOqmWbcCCCsahLXBso2LQFhQCsLaYtnGNfTylUFYzp1U1g2/IKxNlm1cAsKCUhDWJss2LqGbsBqMhbBygbC2WbZxBfmF5d1Jbd1wA2HtsGzjAhAWlIKw9li28f5081XDHyes+/74KhyEtcOyjfcnsbAqvz/CCgdh7bBs4/3pLqzygSKsbCCsPZZtvDvdfIWwxgdh7bFs493pL6ziiYb4CmEZQFi7LNt4b/IKK+YTLIRlAGHtsmzjvVlNWPV1wy8Ia59lG+9MN1/VTrR2/AgrHoS1z7KNd2YWYQUWDr8grH2Wbbwv/YRVZ6za8SOsHiCsfZZtvC9JhVW99/iqBwjrgHU77wnCgnIQ1gHrdt6Tfr6qkVD93iOsLiCsfdbtvCcKYZ2PFGElBWHts3Dr/WjzVT9heTfSVDj8grD2Wbj1fmiEdTbShrVHWF1AWPus3Hs3FhNWU91wB2EdsHTznejoq/e/SHZvv1vWHmF1AWEdsHTznegorPIdb9l7hNUHhLXP0s13IpWw3j/v8v4nc5oKhwcQ1j5rd9+HZMJq3nuE1QmEtc/a3XehzVe23+ceYix81QmEtc/i7fegn68Q1hwgrH0Wb78H/YTV6iuElQuEtc/i7fcAYUEdCGuf1fvvQDdftQurbPII6wynE0JY+6zefwcQ1jIgrHCWDyCcJl8hrBHxuiCEtc/yAYTTz1cIS4zbBSGsXZYPIJzFhNVW+RwgrHiWDyCcfr5CWFr8Lghh7bJ8AOEgrFVAWB1YPoBw+vkKYWnxOyGEtcvyAYQzj7CKWmmtfQIQVg+WDyCaEXyFsBxwPCGEtc/yAQRT7yuENSaeJ4Swdlk+gGAQ1iogrC4sH0Aw/XyVQljNxY8PwurC8gEEM5OwCpppLn58EFYXlg8gGIS1CgirCycBrBqLG/18hbC0IKwuHAWwdjI+TCWs827aqx8d30tBWLvsBUA0LiCsRUBYndhOgGycGMJXCMsOwurEVgJk4wbCWgTfQ0FYuzwnQDjOdPNVF2GdtmOof3B878R5bBPhv9PwCMJaBN874SZ3CNhpeARhLYLvoXCT20TsNDwymbBO+jGUPzjOh8JNbhG00/BAN18hLCnOl8JNPhC90/BAN2G1TbV+uAhrE+dT4SbvxO80/FLnK4Q1LM6nwk3e6LHT8AvCWgTnU+Em/xhXevZwgkBYi+B8K2vfpGmXZw8nlvmEddiSof7Bcb6VlW/StMmzhxMNwloE51tZ+CZNizx9OtF08xXC0uJ8KwufpGmRp08nmgmFddCTpf7Bcb6VdU/StMfTpxMOwloE51tZ9yRNezx9OuEgrEVwvpVlT9K0xtOnE083XyEsLb63suxJmrZ4+nQ6MKOw9psyNTA2vsey7Ematnj+eOIpt9X//me8936TRVjv+N7Kqhdp2OEV4ulAoau+Mb6r22QR1ju+t7LqRbav8BLxdKBCV3Zj9fqLOBDWBq63supFtu3vMvF0oMpXdmFdiZ8rwnrH9VZWvcja1V0snniqdOUlrA2cx4qw3nG9lVUvstFMy+QTTpWuIo3lC8J6B2E5gLDEVPpqcGGpy5KCsDxAWFoqfTWKsRDWOwjLA4SlpVJXoxgLYb3jeinLHiTC0lLtqzGUha/eQVge+AprwoCCafDVEMpCWG8gLBcQlpQmX41qLHVNWhCWCwhLSpuvBjDWv98grF8Qlg8IS0mjr9Ib699f8NUPCMsHV2FNmVAkrb7Kbax/n0BYVxCWDwhLSbOvEhvr3zcQ1h+E5QbCEtLuq6zGetfVj7HUhakR+GrOc0RYQgy+SmmsTV19G0tdmhqE5YSrsOaMKAyTr/IZa09XF2Gpa5ODsLxAWDJsvsomrH1f/WcsdXFyEJYXCEuGzVe5jHWkq3//VVcnx/VK1r5GhCVjImEd+wpheV7J6teIsFQYfZXJWAjrGITlCMYSYfVVHmGd+AphISxHEJYG8ydYaYR15iuEhbAcQVgaENY6ICxPEJaEZb6ChbA8hcUxIiwN0/wSIcI6BWF54iiseUPyxyAsdemvIKwTEJYrCEtBq7DUdW+AsE5AWK4gLAGtX3NPGTHCOgZhuYKwBDT/ImHCjPkM6wSE5YqjsCZOyZnPZmPl+0khwjoBYfmCsPrzKqyRjYWwTkBYviCs/liElcxYZ75CWAjLF4TVnTdfjWusU18hLL8T4RQvOApr5pg82RDWoD8pxFfnICxfEFZ3jMLKY6xzXyEshOUNwurNlrBGNFaBrxAWwvLGUVhT5+THprDGM1aJr+qF9fUVUKoSha+mPkSE1ZltXyGsK19fsxkLYXmDsfqyI6zBjFWkq2phfX1NZyyE5Q3C6suesKqMJc87RFhfN2JqluB1HtzhDS9ZTR+UE57CkkUe+QnWXMbyug7u8IaPqhYIyoldYdUYSzyEQl+1foI1k7K8BsMd/tJipiWD8iFcWB0m0kFYsyjLaRbc4Z26LBYOyod9YVX8zVjayZT6qk5YX++4Vq3BaQTc4Z367V8zJyc0wiocUNkHdRPWf8oafamcroNDvNO6/YvF5MWBsIqN5T+rmu8d46ttYX2NvldO52Gb7mRUb/uSKXlxJKxSYzlNrHG4HT/Bugpr6N1yuo/2aU2I/0rDPnZh+cyrabxXhwiENe56+dxH07CmxXGh4ZRDYZUYy2Vc9Tw4RCGsUTfMp/zKWTnVnhWPdVb3MA7Hwjo3lse0GniUiEZYYy6ZT/GVw3KqPSv2bVZ3MBJGYdmH1cZXvbGqYtnx1ddLGUFDCcOp8Mph+dSeF9sqq6sfjBNhnRjLNioDzxqRCWuwdfMqu3JYPsXnpXmL1YWPyJmwDo3VPCkzLx7RCWukrXMrunJYPtXnpWmD1UWPikVYTYNy4U0kQmF9jLJ8fhVXTsun/LzU76+64oE5Fda+sern5MWGSfr4akdYHyOsoF+1tePyqT8x5NGPc2HtGat2bf3YMolaWB/Z99Cx1Np5+TSQGNLoR4GwNo1Vu7SObKtEL6yPxMvoWmftwHxaSAxJ9KNEWJ/nqiq5Zid2VNLFV+ctBo3JiG+VtRPz6SExxNCNIl89GmvzbMvv2c6uS3II6yPjYjqXWDsynyYSQwbdKBTW55OvXg63+qIN7Kskj7AunL6i8Lt50FJe5fOMYYzO8gH0o1RYnz8/G3y73eaTbuLAV8fGqkvFLqyP4y0t/G4eVNfW9sTWJKZg8fZ7Uiysz89r+IfGCDbW6ZsfBPUqsKpQHJvbenzJ93Fhv17TY6sH59ROYlbuvS8BwgpSVtGbf2X1IrC6UNxbuz/69Dv4cFax6eGG9mdl3c47U+GrCmH5G6vstW9kEVYBTgPdiMv9bareErNo2/0JEpbrXTfK6sqPrz6rQunV2CuWQR4EFvmq+MbGYMWeJUQJy+28KwW1zecYwrpSO8HzyDzecvxG757GY72ONdT46nPrGM6oX+5Hat+2i5+wXPoq417Qxv+3+1FlxVqWxtLJrKzWr4pwYbVedvV7TqgyVmxrsRRWalma+qIsbxuExdpVES+surtueHwZEcKq7K0LpWValqa+KsvbRmGpZmV8fMQLq/iqW55dSo2wqh5cf72RFFdp2plqDG8DeOCjxlgHB2G+6sYHl1MRSu2jGy44iPIabTtTi+FtAHeu29RBWEenbXxkKRWptDy+4YoDKC/QuDOVtL8N4IHrNnUUlpCKVJrfYdSNnfLabCtTS/PbAB743qYKYTWfsp6KWEzvsRnHSHldxp2ppPltAA/8rBPCesbjdSbvtFJRkXFlKml9G8ADt3UqFpbHJcsoz8XphRb1tFFRjnVn6mh9G8Cd33VCWM+4vdLgnhZqirHuTB2NbwO487BPCOsJx5e2uqeBulLMO1NF29sA7jzuU6GwHA9ZQHk0nm9tPfESDJU4LE0Nja8D+OVpoRDWI66vbb3xU0yFOOxMDW2vA/jleaEQ1gPub26+83e8qnDYmRqaXgdw53mhENYDES9vv/UnvEpwWJkqmt4HcOdloxDWnZC3G679jlsFDhtTRcv7AB542agFhFVsrJi3W+79imMFDgtTR8sLAe68rRTC+iXq/ZaLd32/y8JU0fJGgDtvK4WwfgkroPXcvV/vsC6V1L8R4JH3nUJYN+IqaDj1gLd7bEsl9a8EeOR9p+YXVqmx4gqovfOQ13ssSy317wR4ZGOpENY3kRUIbXV7u8uuICzoC8LaJbQEna2+WoXl4CuEBUa2tgphXQkuQiCqx1e7bArCgs4sKawyY/UoRGCr79f6bArCgt4grD061tPVVpf3uewJwoL+IKxd1FXGgbBgVJYUVrpPsTrjsiYICwQgrF3URcbhsSUICyQ0GEt9b3aKklEXGYfHkiAskICw9lAXGYfDjiAsEIGwdlAXGYd9RRAWyEBY26iLDMO8IAgrG//+q66gJwhrE3WRYVj3A2Gl4t9v1GV0ZD1hfZbEoi4yDON6IKw8/HtHXUpHENYm6iKjMG4HwsrBv6+oC+oHwtpEXWQUxu1AWHreZIWwENaswjIuB8JSsmkqhIWwLqiLDMK4HAhLxZGs1hbWmbHUJ2enSFhzGsu6GwhLw5muFhLW+14hrCvqKkMwLwfCUnDuK4Q1s7DW/TmhdTcQloICXyEshDUh5t1AWAoQ1gMIawd1lRGYdwNhKcBXD2wsFsK6oK4yAvNqICwBfIJ1Z/NfQjgWVpe/dzyUz2WNVbccCCsHCOvGZYfXE9YnwioDYeUAX125LTHCOgloHio3BGGlgE+wLty3eGOz5hfWWsZq3GuElQKE9efpFhFWQUjj0r7Y3r5CWG3gq+dLRFhFMQ2JabcRVg5WF9bbVlcaax1hDW4s63IjrBysLayNxV5SWLN/imVfb3dfIawmlvbV5m4jrKq4RsBhwf19hbCaWFhYO9u9tVsI6yCw7LisOMJKwrrC2ttvhFWbWG4KNvx8yQN8hbCaWNVXBxteKazRjfVZbCx1pU2U+epk0SN8hbCaWFJYxyu+tVwI6zS2lJTr6mjTQ3yFsJpYT1inS46wWpNLR5Wvdnc9xlcIqwXLjzsjUrLlm9s1vbAKjKWutJpaX+1sO8LKw1LCKlxzhGXLLw8NwtrY9yBfIawW1vFV+ZojLHOEOWjy1fvGI6xErCKsmj1HWA4hpqDRWC87j7ASsYKwatd8e72mFdb2V93/98PDt9YR1tPWR/kKYTVQ/aPNcLTs+ZrC+vyV1B7qQuvxMBbCSsTkvmpc80WFdearAY3VLqzfzQ/zFcJqYGZhta/59n5NK6yvUl8tZqzr7sf5CmE1MK2wTFu+mrC+5hWW2VgIKxVTCsu85DsLNrewSnw1orFMygrUFcJqoOwHmZFwWXGENZOwDMoK9RXCqmcqYfkt+N6GIawxhdWsLISVDBdfPS5GeMWnFXiwnLC+ioW1lrEQVjIchLW1HR0qP3q9md0Vm1dYHx9zC6vNWAgrGXZh7W9IcOmRy72/Y7MK66NYWCsZK9ZXCKuagpmdPKFoVVxrDl7rKwgLYSGshDQJ67K0t//dtjrVdfpu7jkHSzansD4QFsIagKKZPX7AfW0v31KvYBhHWzarsEp9tZCxgn2FsGopHNrtuz+v7by6OhbWm7IQ1iAgrNEpHtrlO8+ytiWcLNpswvpAWAhrBCqGNs/alnC2abMJa3vCswmr1liRrrqivv/RqJjaTGtbwOmqTSWs3REjrFjUAhiN8qnNtLUlnO8awhoQhDU0xVOba2tLKFi2hYU17OyT+QphFfITVenY5lraIkq2DWGNB8IakVtcRVObb2lLKNm2aYT10w/CQlgpuaVVNrXpdraIkm2bRVi3fhBWb18hrBJuYRWObbqdLaJg2ab5GtatIYSFsEQc5HEPC2EdcL5r0/wq4W9HCwirxlcIqw/HqTz83zZfDbuzZZzvGsIakGS+Qlh7/8LHxn8uHNtsK1vK2ao9/cZRdbEWfjuq8NWw059cWJfROD8ylKqsENYhJ+l9IqwBqfFVF2F5Kus2G78nhlIbVPnYplrZYo7T+0RYI5LPV9+Yr/9pOOl/otkSUc3YZlrZYg7T+0RYQ5JVWN80nv/7dIwPDMAeDsI64Ti+eYR172l+YdX4SiCsj2rF7I2n9XkhuARTNbWJVraY0wQfhaUu1sK9I4Ql99WVMg0cjqf2YWF4ZVI3tYlWtpiSFGcQ1kM7NcIacvw1vhIK6+NYM0XjKXxWIK55VE5tmo2toDDJ0YX12MvswqrylVZY25qpGM/ps4Zx1YXaoc2ysRXUBaqutp3HLiYX1lC+ulqmaiBHxvp53oCqulI9tVlWtoK6RNXVNvPcxtTCqvOVXliVP4CcCuuFUWT10eArhHWKutxWnruYWViVvhpeWKfGeiCpqH5oGNocK1tFZajqcht56WJiYQ3oq37C2iGBrD6qfIWwilGX28hLF/MKq9ZXCOsXgap+hnb9301Dm2FlK2mLeDRe25hVWEP6yigsN2N15nFuHwirEEvK4/DWxqTCGtNXwV91z4lpdr9PmWBnKzHnPARvbcwprGpfIaz+OAzv/rDxd7YWt7xT897GjMIa1lcmY6lLL8ZvevdnDr+z1XjHnpKNNiYUVr2v8gir3Vjqwgtxnd/9scMvbTUh0Wdjq43phDW0r5qNpS67BO8BPjx69KWtJyz+RGy2MZuwGnyVSlhtxlIXfU7ACB+ePvjSNhA6gSRs9zGVsVp0lctXTcZSl3xCzBQf3zD00jYRPwU5O31MJKwmXaUTVr2x1AWfEDTIp3eMu7SNdJmDlr0+ZhFWo67S+araWOpyj4ma5fNbRl3aZnpNQsduH3MIq1VX4wtLXe0xYeN8ec+YW9tOv1GcFRLz2KMOZxDWTL6qMpa61GPiBvr2qhG31kDXYRzWEfLYr8mFNZevaoylrvSQwJm+v2y8rbXQexoHdYQ896jB8YU1m6/KjaWu85DAqW6+b7CtNdF/HLt1hDz3sMHRhdXuq7TCKpyJusojQge7886RttaGaCJbdUQ89qTBsYU1o6/KRqIu8oDY2R68eJi1tSGcyWsdEY+dWVhT+qpoJuoSd4ge71fVNvtWkwb9YH7riHjsWX8DC2tSX52PRF3gBuEzvv0fJ3UMsLZWsownaupnvSCsdKCrA85qSb+2VrIMKGru5QMeTVjtxmqZeUeadaWxWd+hn5aTfm2NZBlR1OTLBzyar2b5E4TvtH1yVfBdAug99POK0u+tjSxTipp9+YAXEVbTwPvS9MlVkdR8UQy9pK7ke2sjy6Si5l88X4SVh9pPrj7Kv6cTqpmXVZd7b21kGVfQEpTPdzxhjf939u1SZavn7x5dmnbkpVVmXlsbWYYWtAql4x3RV0P/qxMnVBmoUm/tqOddc67qSqPIMriwpxYxqLCG/Hfpy6jQz+vEgipSz/rKYOUGkGV2MY8t7mhMX1Uaq2nWKord8z4z/2LUY/5lwJKdyTK/kMdWtLSAsJpGraPZV77GUk/4mVHr9iPLFEOeWtPSmMJq+rehJmLvxxmfp6uH+87ItfuQZJKBTy1lTGGVGqttztnZ85XdWOqx7jB6/XaSjDPwqcUM6asyYzWOOTv7vjIpSz3RA2bowUaSkYY8tbqrIYV1bqyv1jFn51BYjc5ST/OYSdow0DLTgDziHlrZ1IDCOjHW9bs0jjk3Z77aUdZuGOIxFlCejbrSKOq3JCaPuIdWNzWesI6M9fM9WuecmQJfvTvrJQ/VxBopzkZdaBityxJTR8hD65sazVdXDmy1tK+elSUckAfF4agLDaN1W2LqiHhmU1PD6eqbHV3NKKxyXT04SzYYL4rjURcaRvPChNQR8czGnsaz1ZVNXU0orEpffUtLNBNHSuNR1xlH88aElBHyUFVPIt5t9TWfsFp8NeBnzG+U5qOuM47mlQkpI+ShopZS0TzmpCCsY9R1xtG+MxFluD4TYT3QPueMtPlqHWGpywykfWkiyoh4pqijZLTPOSGNvkJYE9C+NSFlRDxT01E22gedjlZfIawJaF+bkDIinqnpKBvtg85Gs6/GF1ZpROo6A2nfm5AyIp6p6Sgd7ZPOxcK+QlgIa5Exf00jrHZfIawZMKxORBkRz5Q0lA/DpBNh8BXCmgHD7kSUEfFMSUP5MEw6DRZdTeArvuaeTlieD5U2lBDDqHNg0tUMvkJYCGuVOX8NLyybrlby1dSLbFmhiDIinsmcr1hGLceoqyl8hbC+ZhaWtJ+MWEatxuorhDULli2KKCPgkYp+MmIZtRp0daE0LXWdkVi2KKKKiGcy5yuWUavBVxdK01LXGYlliyKqiHgmc75iGbUafHWhNC11nZGY1iigCrdHIqxXTKNWg6+++F0NF0xbFFCF2yMR1iumUatBV3yCdcW0RQFVuD0SYb1iGrWYt38vckFfIawLxjVyr8LtkQjrFdOoxXy1GUsduS8VYc2LeY+cq/B6IsJ6wzZqKd8NLO4rhHXBY5E8i3B64hfCesU2aS23Hha21RfCuuKzSH5VeD0RYb1im7SWexfr6op/RPWK2yZ5VeH0xJbGvF6dE+OklTy2saquENY3jqvkU4XTExHWK8ZJK3luZEVZXWiMazJcV8mjCqcnIqxXjJNW8trKgrb6QljfOK+SvQqnJyKsV4yTFrLZzlqyumDLaxZClslShc8DEdYb1knrUCeXA/K6kmOXAtLWNZMT46CVqKPLAYFdybFLAWHrmsmJcdBK1NHlgMCu5NilgLB1zeTEOGgh6uSSQGJXcixT6BOZ8wXjoIWok0sCiV3JsUyhT2TOF4yDFqJOLglEdiXHMoU+kTFfMA5aiDq5HBDZNzm2yT9rXS9JsQ5ahjq4JJDZNznWyT9rXS9JsQ5ahjq4JJDZNzn2yT9rWStZMQ9ahTq4JBDaNzn2yT1qXStZMQ9ahTq4JBDalST75B61rpWs2CetQZ1bFkjtSpKNco9a1klaHCYtQZ1bEkjtmyQb5R61rJOsOAxagzq4JBDbN0k2yj1qWSdZcRi0BnVwSSC2b5JslHvUsk6y4jBoDergkkBs3yTZKPeoZZ1kxWHQGtTBJYHYvkmyUe5RyzrJisOgJahzywLBfZNkpdyTlnWSFYdBS1DnlgSC+yHJTrknrWokLR6DVqDOLQkk90OSnXIPWtVIWjwGrUCdWxJI7occO+WftKiRvDjMWYI6tyQQ3Q85dso/aFEjeXGYswR1bkkguh9y7JR/0KJG8uIwZwnq3JJAdD/k2Cn/oEWN5MVhzgrUsWWB8H7IsVT+OYsayYvDnBWoY8sC4f2QY6n8cxY1kheHOStQx5YE0ruRY6n8YxY1khaHMUtQ55YE0ruRY6vccxb1kRfzjEWoc0sC6d3IsVXuOYv6yIt5xiLUuSWB+G7k2Cr3mEV95MU8YxHq3JJAfDdybJV7zKI+8mKesQh1bkkgvxs5tir8gZ36yIt1xCrUuSWB/G6kWCv/mCVtZMY4YBnq3HJAgL+kWCv3lDVtZMY2Xx3q3HJAgL+kWCv3lDVtZMY2Xx3q3HJAgL+kWKsOD+zRRmZM4xWizi0HJPhLirXq8MAebWTGNF4h6txykDzBrmPKsFfuKUu6SI1pukLUueUgeYQ9x5Rir+Kf16OL1FiGq0SdWwqyR9hzTi5bFSAYTVO2NjLTPlot6txSkD3CnoPyWCpzud4PFLWRmfbRalHnloLkGXYdlH2l7OV2eWB8G6lpjkSMOrcUJM+w66DsK2Uvt8sDO/SRmfZIxKiDS0D2DLsOyhyGQ7XeD1T1kRlDJlrUwSUge4ZdB2UOwyGTHs/r0UdmLJlIUQeXgOwhdh2UPQx7ud4PlDWSF1MkUtTJJSB5iH0nZQ/DXG2nB3boJC+2SJSok0tA8hT7TsohDGu1nR7YoZO82CJRok4uAclT7DsqhzCsxXZ6YI9W0mKMRIg6uQTkTrHzqBzCsBbb6YFdesmKNRId6uT05I6x96hc0rAV6/5EaTc5sUeiQp2cntQxdh+VSxqmYvs9sU8/KfGIRIM6OT2ZY+w/Kpc0TMX2e2KvjvLhk4gEdXR6EucoGJVPGhFfcZI3ZSggGz6BSFBHJydzjoJR6eMIeKa+qWQ4BSJBnZ2azDEKJqWPI+CZ+qaS4RSIBHV2ahLHqBiUPI6+z+zVVTK8AlGgzk5N4hgVg5LHERKxvKtkOOUhQZ2dmsQxKgYljyMkYnlXyXDKQ4I6OzGZY5QMSp1G54f2aisXTnlIUGcnJnOMkkGp04iJ2KWreW7FJw4N6uzEJI5RMyh1GjEJu3Q1z634xKFBnZ2WzDlqBqUOIyZgl66muRWfNESow9OSOUfNpNRhxATs0tU0t+KThgh1eFoy56iZlDqMoIDVbaXCJQwV6vCkZM5RNCl1GEEBi7vKhUcYMtThScmco2hU6iyCAla3lQqPMGSow5OSOUfRqNRZBAWsbisVHmHIUIenJHWOolGpswgKWN1WKjzC0KFOT0jmGFWTUmcRlK+6rVR4hKFDnZ6QzDGqRqWOIipfdV+ZcMhCiDo9IZlzVI1KnURUvOq+EuEQhRJ1fDpS56galTqJqHjVfSXCIQol6vh0ZM5RNip1ElHxqvtKhEMUStTx6cico2xW6iCi4rX3Nc2lOEQhRZ2fjMwxykYlDyIoXntfsxyKQxJa1AGqSB2jbFTyIKLilTeWBXsQYtQBqsgco25U8hyiWjc3Nsuh2IMQow5QReYcdaOSxxDVurmxWQ7FHoQYdYAqMueom5U+hq6P7dpZCuw5yFFHKCJzjLJRJYih62P7tpYBhxxc+H+K+Be4j8BiOAAAAABJRU5ErkJggg==\n", + "text/plain": [ + "" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "Image.open('./mhp_extension/demo/demo_global_human_parsing.png')" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAABLAAAAOECAMAAACGszjIAAADAFBMVEUAAACAAAAAgACAgAAAAICAAIAAgICAgIBAAADAAABAgADAgABAAIDAAIBAgIDAgIAAQACAQAAAwACAwAAAQICAQIAAwICAwIBAQADAQABAwADAwABAQIDAQIBAwIDAwIAAAECAAEAAgECAgEAAAMCAAMAAgMCAgMBAAEDAAEBAgEDAgEBAAMDAAMBAgMDAgMAAQECAQEAAwECAwEAAQMCAQMAAwMCAwMBAQEDAQEBAwEDAwEBAQMDAQMBAwMDAwMAgAACgAAAggACggAAgAICgAIAggICggIBgAADgAABggADggABgAIDgAIBggIDggIAgQACgQAAgwACgwAAgQICgQIAgwICgwIBgQADgQABgwADgwABgQIDgQIBgwIDgwIAgAECgAEAggECggEAgAMCgAMAggMCggMBgAEDgAEBggEDggEBgAMDgAMBggMDggMAgQECgQEAgwECgwEAgQMCgQMAgwMCgwMBgQEDgQEBgwEDgwEBgQMDgQMBgwMDgwMAAIACAIAAAoACAoAAAIICAIIAAoICAoIBAIADAIABAoADAoABAIIDAIIBAoIDAoIAAYACAYAAA4ACA4AAAYICAYIAA4ICA4IBAYADAYABA4ADA4ABAYIDAYIBA4IDA4IAAIECAIEAAoECAoEAAIMCAIMAAoMCAoMBAIEDAIEBAoEDAoEBAIMDAIMBAoMDAoMAAYECAYEAA4ECA4EAAYMCAYMAA4MCA4MBAYEDAYEBA4EDA4EBAYMDAYMBA4MDA4MAgIACgIAAgoACgoAAgIICgIIAgoICgoIBgIADgIABgoADgoABgIIDgIIBgoIDgoIAgYACgYAAg4ACg4AAgYICgYIAg4ICg4IBgYADgYABg4ADg4ABgYIDgYIBg4IDg4IAgIECgIEAgoECgoEAgIMCgIMAgoMCgoMBgIEDgIEBgoEDgoEBgIMDgIMBgoMDgoMAgYECgYEAg4ECg4EAgYMCgYMAg4MCg4MBgYEDgYEBg4EDg4EBgYMDgYMBg4MDg4MCa7rFGAAA/tUlEQVR4nO3d7XrbtpaGYe003Z00badOstux286c/1lOJFm2RBIkPtZaL0A+z68mtUAAJO5LctP4dCIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIrr2X7fUEyEiWuu/0qmnRkR014pW0EVEHZWrFWYRkbgyrjCLiGRVcAVaRKSomivMIqLQ2rSCLCIKy4AryCKikIy8Qiwi8s6MK8QiIucsvUIsInLMlivEIiK/zL1CrJD+9T31HIiic/AKsBz7Vyr1xIgC8vAKsTxKSoVadJh8vEIs67K0Ai3aeV5eIZZpJVxBFu02P68Qy65SriCLdhpg9V8NV5BFe8zTK8QyqZYryDpov99ST8QhX68Aq70Wrg5I1v88pJ5NeL8vpJ6TZc5eIVZrrVwdiKz/WU49rcCWtNqZWdvi/HoOsDRZcHUEshJWHQqttFY7IiuPq9cQKzorr3Yu1hZXR0Brg6u9kJWvVTVb6iWOmx1X+yYrz6s9o5Wh1T7IKueqAi/1IgfNlqv9klXA1R7NyrVqH2K1cZXJlnqRY2bv1T7FKvVqT2aVYbUDsky42iZLvcwR8/Bqj2TVeLUTs2q4GlwsI662xFIvc8CcvNqdWLVejU9WJVdDi2Xn1TpZ6nWOl5tXOyOrwavByar3amCxDLlaJ0u90NHy9GpHYrVxNTRZLV4NS5axVytkqVc6Vr5c7Uesdq9GJauRq1HFMvcKsUxyB2sfYpl4NSRZ7V6NKZa9V0mx1EsdKX+vdkGWlVcDimUB1u/qRZTn4RVvsZoL8Wp8sey8Gk4sE68GFMvDK95iNRcE1uBiWXo1mFhGXo1HlotXiNVYlFeDi2UL1khi2Xk1mFhOXgFWU3FeDS2WsVcDiWXp1VhiAVaPRYI1rljmXg0jlq1XQ4nlBRZiNRTq1bBiOXh1VLB+Vy8oPy+vAKu+YK8GFcvDq0HEMvdqHLHc3mABVn3hYA0p1nHBcvBqGLHcvEqJpV7wAMV7NaJYef78888/+xPLBazf1avKKxwsxNpMAdZwYmVq9c8OxfLxahCx/LwCrMokXg0nVjZXxWKpV7aVl1djiAVYvSXyajCwSrx6EyvTLvXa1vPzagixHMHim1g1qbwaTKwir/55+K3BxfIE63f14rZz9AqwKtJ5NZRY2+ikwcogS728lVy9+l29uu0Aq6+UYA0kVplX/0x/a1yxfL3qXyzPT4QJsdRL7jqpVzsC6x/AAiwjsBArndarccQq9GoG1rBieXvVvViuXgFWaYCVV6FXc7A2xVKvMBFguYLFZ8Ky1F6NIlapV7sBy9+r3sVyBmtRLPWa+03N1T7AmntVAVafYkWA9bt6kat5g7UklnrN3abW6px6D7Iq9WoBrCHfYoV49bt6lat5ewVYBblr9OOPP+5BrOI3WIC1G7HcwVoQS73mXnOFatpuwVryaidgBXn1u3qdawFWPwVRNb5YxV5deVpCDLAAC7AqC3FqWyz1NmxX7lUFWP2JFeVV12L5gzUXS73mToujavC3WBVeAdYuwXLxCrAyi4Jq+LdYRmAN95kQsE4hYM3EUq+5z2KxWhNLvRNbVXj1z9K/PBZY/7k1tliA1UvRWo37FqvCq7NOy2+7xgHLAqtctNRrTRcB1q94tV08V6O+xarh6jtOy2+7DgDWTKsMstRrTRfhFWBlJOBqX2Cte7UI1rpY6nVOs+Rqkyz1YpPFg/Xrr+o1d5mAq6RY6r1Yr8qrFGM7BitNVYZY6sUmCwfr/Gv1ojtMoVVSLPVmrAdYFTiViqVebLIIr36dXkS96A7TcDXiWyxLr4YSy1irdbHUi00WAtav02uoV91fEq0Aa1Us9Uofs+dqTSz1apPFgnX7tXrV3SXiKiWWejvWsvVqTSz1Sh/z8Cotlnq1yUK8+nV6CfWqu0vF1U7AavBqRSz1Sh9z8QqwAKsqmVfLYqm3Yy3AMvQKsNbEev+letXdJeNqF2A1eZUWS73Sx1y8Aqys1IvuL51XOwCr0atDg5USS73adIDVQzKulsVSb8da9mClxFKv9DEfrwALsGoCrOzsvQIswAKssoReAVZKLPVKH/PxCrAAqyahV4OB5eEVYAEWXhUl9GpJLPV2rAVYpmAlxFKvdiXeYHWQ0ivAGvSbWIAFWKqUXo0NloVXg77FAizAUgVY2QEWYPEtLHVKrwDr0GAti6Ve7FqApU8K1lws9Xas5eFVQiz1Uh8DrNei32Kp19tjgJUdYAWApV7rasFgqZfbZX2Bpd6N1XzAWhJLvdJJc1b8xFKvdTXA0gdY2fl4BViABVj5AVZ2TmAtiKVe6TTAugZY+rRgTcVS78ZqgGUN1oJY6qWuB1jyugJLvRnrAZY5WHOx1EtdLxQs9WI7TerVsGAZerUglnql0+LAUq90I8DSd5UJsLYLA0u90HluYE3FUi90I7zqoH+l+AjwaiiwTkFg/aNe5zw/sP4zFFgnvoOlLwlIOFjqndjKC6xHsf7pEKypWAcGK8orwEqVEiTCq1HBsvXqn8nA6nUu5AfWf4YCK4wrvEqWICTEq0HBMvbqJtbtV+p1LuUH1n8G8gqwOmiZkBivhgPLnKp3sd5/oV7nYn5g/WcgsMLEUq+z4xYNCfLqx5G8Ovl5NUm90MXcvHonS73EjPBKXpRN44N1OjRYJ0ewXtFSrzAjvJIHWNkFedUpWCdfsL6nXmBGcKUPsHI7OFgnvAoQS73A/usELPU2ZHR0sE6uXgEWXmUFWLkdHqwTYOGVvi7AUm9CTof36iyWm1djgOUslnp1QwRYmQHW6XR0r3zBUi9ujHoAS70HWQEWYAGWPsDKDLAAC7A6CK/yAqyTo1jqheWGV/JkYP04lFeAde7oXgFWB4nBUi8/N7w6d3SwHMVSr2yYZGD9OJJXQWKpF7nV0b1yFEu9sHFSgqVee0GAde7wYJ2czFIvaqBkYP04kleAdQmvTj5iqdc0UjKw1AsvCq/OAdYlwFIGWDkB1iW8uoRXygArI8C6BFiXAEsZYOWEV5fw6hJgKQOsjADrEl5dwitleJURXl3Cq2uApQywMgKsS4B1Ca+kAdZ2eHUJr64BljLAygiwLgHWNbxSBlgZwdU5wLoGWMoAKyO8ugRY1wBLGWBlhFeX8OqajVe/qJcxZoCVE16dA6zXTLwCrKoAKyfAugRY10y8Aqy6ACsnvDqHV69ZeIVYlQFWTnh1DrBeAyxhgJUTYJ0A661fDLxCrMoAKye8OgdY175r0+4VYtUGWBnh1Qmwbl24afYKsCrDq4wA6xxenXv1ptUrxKoNsDLCqxNgXfulQaxfAMsgwNoOsM4B1ukdrBqyHsD6pF7JqAFWRnh1Aqzvffr0S71YvwCWRYCVE2CdWsX69k09//YewCoU65cJWIhVGWBlhFenRrC+7QCsT49gFYn1ywwsxKoLsHLCq3awRhfr0xSsErEAy6qPeJURYJ2axPq2S7DyxZp6dQYLsar6+DGGLPU6G8OrFrC+7QCsTw1gzbwCrOo+BomlXmdjgNUA1rdvOxBrCaw8seZcXcFCrJo+xoilXmZzeFUv1htYX76ol1Ddp0WwcsRKeoVYNQFWZoBVC9bNq29fHlKvpqhXYcrFWuLqDSzEKu9jjFjqZbb3d4NVf59TL8CgNq8mYI3k1qckWOtiLXMFWA19jBFLvczGnp+fv5PT5NXwYF14afFqGayrWX279WkFrBWyUly9g4VYxX0MEUu9ytauYFWS9fcOwHrDpcGrJFi9v9P6tA5WQqw0V7/cDahe23B9DBFLvcrGnm9g1Yj19/Bg3bvS4NU6WB2btQXWglkrWj2CBVmFfQwRS73Kxt7BKjfr7XXPz+pl1DVRpd6rTbA6NetTBlhlfUKs6j5GiKVeZGsPYJWJ9fcdWCOKNRXluzvVYOWI1Z9Zj7rYewVZRX2MEEu9yMaeH8EqIOv+Rc8DijXn5AJPpVd5YHVG1qcAsDArv48BYqnX2NpzrVh/jw3WEiav9FR5lQtWT2RNYfECC7My+xgglnqNjT3PwcoT6+8ZWCOJtUzJtwKxZmDli9ULWXNW3LzCrKw++oulXmJrS2DliPX3wGAlIfmWLdacqxKwuhBryRRXsDBrs4/uYqlX2NrzIlibZE2/fCSx0ozc4VPh1benArH0ZC2C4uwVaG00BcucLPUCm0uBtSrWnKtXsPoXaxWRb7liLXo1klhJTQLAwqx0c7BsxVKvr7nnJFhps5a+9AZW32JtEPL0LVOshFdlYAnJWrMkwivQSrUAlqVY6uW1twrWIlnLXzjCW6xNQB7BSoqV4qoYLBFZ646EgQVaCy2BZSiWennNPW+ANTEr+UUDiJXBx9MUoDKuxhBrE5FIsEBr0iJYZmKpV9fc83OGWNl1DFYWHk8zsGZkrWpVA1YwWTmABHsFWvctg2VD1kf14pp79gCrQ7Hy5Hh6WhKrrFPPYuXqIQALta6lvDIg6/sQ6tW19vxsK1anYGW68WQDVoVYIWYVwCHyCrVWwWoj6zKAenWNPVuD1aVYuWY82YBVKZYzWYVoCL06OFqrYFWTdXu5enVtTb2yA6sfs/LBeLIC61TzqdAVrXIwxF4d2KwNsKrIen+xenUtzbgy/EzYi1kFVjwZgXUWq/I9lo9adVzIvbpkvBUjtAlWOVl3L1WvrqElr4zBEpNVxoQlWPVvsYzRarCiA64uGe3EMGWAVUbWwwvVq6tukSsDsWYDqhZYKsSTKVjtYhmg1QZFJ159OhpZWWAVkLUHr1JaGYg1H1CywmIcnozBUotl4EQvXp0zeSjGKBOsTLImr1EvrqY1rTzAiierxoYuwaoly0iJfrw6Z/d89F02WBlkzV6hXlxxG1q1i7U8ZuQSq2B4MgdLJpahER1xdc70Mem2ArC2zJp/uXpxhWVw5QNWnFmVLjiAVTuVJrFshejLq4OIVQbWCllLX6xeXElZWjWLtTJuwBprVXjyACteLHMhOgPrGGSVirUAV/KL1GvLL5srP7Cc0frSAETPYGWL5QFEZ159OgJZlWDlpF5afgVeNYmVMbrPAptIeDIFSyOWDw/9gbVXsT58uP0TYJVx1SRW7hVs19cIwpMXWHFiufHQnVc7FevDhzexjg5WqVZNYhVcw2p97R58V+o0NliOPPQH1g7F+nDt9RdHBqtGqxaxyq7Svj4LDs5enU4nD7BixHL2oTuw9iXWh7uuvz6uV7VcVYtVfJmm5Rlp8ArWNQOvHMBaFSuCiK682pFYHx67/uZBwWrQqlqsmgvVrs/KAlewAsQKdKIbsPYh1odZr//igGA1alUrVt2VKtZn5cCX67fc3we2ACv0LVY0FX2AtQOx5lwdFSwDrCrFqr5U6RKtGDg3BlgpsQRWAJZBS1y5/8kG5YJTWWl1LsyrQrHMELj0CJaFVy6fCZfFkmDxSe/V2GIta3UHlpNYwiUnMrPqWphXzyVkmRFw6WlksERYfAKshpJcvXvlA9b9BXrIBqmH4rx6zibLTIBr3mB5iiWy4tODWKIZ1J8TaWmtHsByEasrrwx0WirQq+c8sszO/7WnccCaiSWi4prYqzHFWuXK/S2Wbt3TLLBIFMjVc45Ydsf/2sQrB7DcxJJRcU3s1XhgbWnl/hZreTLBu3By5eo5Uyy7y22s1e7wX5t6NRBYOipeU2p1zuLsxJXBVSBYycu6Z0dFskCuzq2t1u7sX5ty5QKWk1haLTrI5PwElcXVoxyeYG1c2i9bKlKFerUmlt3Jf23mVedg3Yml5kKf1SHyL5OriRp+XqUm5L4R1lSkCvXqOUmW4cG/Nveqd7DexFJr0UOWR8mxbK7kYDmT5SBFomCvEmJZnvtrc69sxHKc+G1MNRY9ZHmYvCrQyluszGm5bYWLFImCvVoEy/LYv+YF1mRc0zlfh1Rb0UfmZ8q6Qq6mWkjAcjLLB4pksV4tiWV66l9bAMvk/yWcfNS0nfRlSDUVfeRxsAwr5mpGhQgsB7K8oEgV7NVcLNtD/9oSWBZiTf7jo+2kzyOqpegk82NlWQVXcyi8wMqYnelm+EGRKtirqVi2Z/6WE1jTP9xlPGu8esv0UJlWxdWCEjqwLMnyhCJVsFePYhkf+VtzsEz+iuTp/+5jPW3AumV3pGyr5GoRCR1YZmT5QpEq2Kt7sKxP/K0ZWCZ/qfuTM1g//6x2opuMDpRt9Vr5ilU3SYMNcYYiXahX92IZn/j3PMB6+/E7TtP/GbDeMzhOxrVx5SlW7TRbd8QdinSRXD2/i2V74O+bgGXyY3MAK7DWw2Rcs1bLPmjBajUrgopUoV7dwLI97489bq3FD/p6cgYLr+5rOknWWXCV0EEMVgtZIVSsFOfVTSzT8z7tfmstfjLh/c+Qvo1rOmHAeqj+IFlnxFXKBluwzKa1WRAVfXRZselxn3W/twY//PkJsGKrPEbWmWm1QoMYrDqy1IQEd17yi+l5n3b/A3Oe2sVyB+tnwHqs5hRZZ8pVGgZDsCqnXLwzakCiezlned5nvbPyYI2FV15gqYnoq+IzZJ6tVmsuyMEqJksNSHTuYD0lwKoUC7DCKzxB1lk6lcGCmVgOc1tI7Ud8cWA9PbWL9eQOFp8IZ5UcIOvsiMpWQQ7WeW65aqn5iM/5LdadK35gWU4YsGYVK2OWGU/LKiSSg/V9cpnvs9R6CNKBVSNWCFhqIHqrhhqLjGRKmZBOD9bGBG+p8VAkBKtcrOkAr/fNcsKANa+Km+YMUKoFwQKsCLHUeEgSglUsFmBJqvKmMQORGkCoBet+TNcJXlLToUkJVqFYAV4B1kLV6lTXftgbPegCrC2x1HRocgXrHpYlsMrE4g2WqGp4KjM4640e9AHWhlhqOkRJwSoRa/ba27iGEwaspVrwKc/ipDdz0A6Wu1hqOFQ5gjWBpQ2s+Wtv4xrOGLCWauGnOItz3s4BYHWbFqx8sRJgPRmDpcahy9oIKsrimFtwUOWVOVhrU1TDocrvm1jTd0JNYi288m1YuykD1mJtBpVkcsotPCj3ajqg8wTVbugKAqtRrBCw+ES4XLNDuZkcchsQqrwyBys9QTUbutRgZYm19Lq3Uc1mDFiJ2inKyuaM24BQ5VWgWGo2dHmB9QiLE1jXf7KaMl4lMuFoK5sDbuRBIVhL47nOUK2GsCiwWsRafNn7mFZTBqxENiKtZ3S+bTw4lYm1OJzrDNVqKJODtS1WyivACsrGpLWsjreNB6cisJaHc52gGg1lTm+xCsDaEiv5Bus2ps2M8SqZFUvprI63jQfnegFrcYZqNJT5gDWl5VQv1vJL7oe0mTJgJbNiKZnZ6TYC4VyNVx6fCRcmqDZDW+dgJV5yP6TNlAErmZ1My5mdbSMQrlV4BVgBubzFKgNrTawMsEzEwquV7GxayuxoW4FwrcIrF7DmE1SToa0HsNJirXkFWFHZ4bSU3dE2AuG1cq8AK6IewEqJlfryyZAGMwaslexwWsjuZJuJ8FonYM3mpxZDnANYc1u2wFoWK/nVkyHbZ4xXaxnyNMvwYNeWmlqxV05gTSeoFkOcw2fCCrAWxcr0ykAswFrNUqjHLM91danJlXoFWCH5gtUgVvJLZwM2zxiwVrMk6iHLY93U8vQ6Aetxdmov5JmDtaTLNlgzsTa8sgSL//F5PWOmnI51U4vTK/TKDayH2am9kAdYgLWROVQup7qtxQkCVoc5g7X5FzYkxNryyhAsvNrIXCqPQ93a4hSLvPID635ktRfyzL/rXgvWU5ZXgCXIHiv7M93a8iRLvAoBS81FBxmDlRCmDKyVr7rdOTOwfgasrRy46g6sZbJKvHIE631otRYd1A1Y72StfQ1gxXcIrz4sktUJWG9jq7XooI7AeiUrxytTsNQgdN8xvMoWa/vVXlNTY9FDtmCljMkE60zW6r82B4s3WBkdGKwFsXJe7TM3tRVd1BlYGy14BVjuHcSrTLFyXuwyOzUVfWT7nwkBa5cdxKu8P9yQ81qX2amp6CNTsJLG9AyWWoMBOgpYOW+xsl7qMjk1FX3kDtbdz7jpDyzeYGV1FK9yxMp6pcvc1FT0UQxYVmIteAVYAR3Fq22x8l7oMjU1FX1kCdYKMoA1dIfxalOsvNe5zExNRR+NCNbj77XMGK/yOhBY638aK/NlLhNTU9FHhwaLN1iZHcir9bdYmS9zmZiaij4yBKsAGcAaqwN5tSpW7qtc5qWmopPGA2vye21gqSkYpKODdQKsXrJ7iwVY++1AXqX+lvc1raavcpmXWopOMgNrFRkjsQBL1JHASolV8CKXaaml6KQosEzEMgdLDcEoHQqshFgFr3GZlVqKTgoDy0CsxXFavAKszA7lVZVYAQtVU9FHw4E1/U3AiuhYYC2Llf8Kn0mpqegjd7AMPxMuD9MAlpqBcTqWVxViAVZQ44NVLRZg5Xc0sIrFAqyg4sBqFis1CmD5dzSvisUCrKiMwNpyxhGsWrEAq6CjedUkls+E1FJ00jBgpUcBLP+OB9YyWVlf7DQfNRV95A6W1TexVkapBEuNwEgd0KuyP90QsFo1FX20B7CqxOINVklHBKtIrIjVqq3oolHAWh0EsNw7oFdFYgFWUP5g2fzvhICl7ZBgFYgFWFGNAdZpfRDA8u6QXn0o+I+FEetVW9FFNm+xAGvnHRWsbLEAKygTsLK0MfAKsGQd1KtssQArqMOCpRZgsA4LVqZYIQtWY9FDAWC1/7WjW2BViAVYZR3Wq0yxACuoocBKfgFgeXdgsIrFcpuIGoseGgGstycBsGQd2Ks8sQArJsCirI4MVo5YgBUTYFFWR/YqRyzAiskCrDxx2sFKfwVeuXdssLbFAqyYBgJr5SsAy71je7UtFmDFBFiUFWCtry9k0WotOgiwKK9je7UlVsyq1Vp0kAFYmeK0grX6JYDl3tHBWhcLsIIy+DEUJuIEg6U+/QN2dK9WxQpatlqLDgIsyguw0mKFLVvNhb5hwFr/EsDy7/BepcQKXLeaiw5qBitXnJ7A4ltYFQFWJlh8JvQMsCgvwNK/xVJr0UFxYNWKlfViwHIPrz5kiuV4fTUX+gCLMgMswNLXClY+Of2AhVdVAdaHPLEcL6/mQl8EWG1iAVYnAdY5wNI2CFhbXwRY/gHWNenK1V7IawSrwJxasczB4ifmVAZY1wBLWOOfHPUHK/OVgOUfYL0GWLoAi3IDrFuAJSsUrBqxzMH6GbAqA6xbgCWrDawidABr8ADrLcBS1TtY2S8s8Aqw6gKs92QrV4OhLgSshm9iAVZHAdZ7gKWpc7DyX4hX/gHWfYClKBasYrEAq6cA6yHAEtQElgc8iy/LeV22V4BVG2A9BliCYsCq/L9zAKurAGuSYuVqMNQNAVbO1wKWe4A1DbDCCwKr7q8dBayuAqxZgBVdA1gV9jiClSPWz4DVEmDNi1+5WgxxPYP19ggAVhcB1jzACi4KrJqf/+wDlvrYjxtgLRS/cDUZ2mLBKhOryCvAcg+wFhIsXG2GtCOBxSfCtgBrKcAKrR6sMq8awMr8asDyDrAWi1+4Gg1lgEW5AdZigBVZGFgVYgFWXwHWcvELV6shrH+wcr8asLwDrESAFRdgUW6AlSp84Wo2dFWDVepVxf9OWPb1mWCpT/3IAVYiwAqrY7AKvx6w3AOsRIAVVjhY+QIBVm8BVqrwhavdkFULVrFXgDV+gJUKsKKKByubIGuvAKs1wEoGWEH1C5bPGyzAagiwkgFWUEcDS33mhw6w0gFWTJVglXtVClaZboAVEWCli164Gg5VewErY86A1RxgpQOskOLAKhOrTDfAigmw0gFWSJ2CVfh2LBss9YkfPMBaKXblajhUARblB1grAVZEdWDVeFWC0KngawErMMBKB1gRxb3BKlEIsDoNsNLFLl0Nh6jAT4RVYGUPDlghAVY6wApIA9aWQ+VvsAArJsBKF7x2NR2aDgaW+rwPH2CtFLt4NR2aAr/n/vTwPE9/I/GFgNVXgLUSYPmn+I+EyZa+ErD6CrDSAZZ/XYE1f99l+CNzAMskwEoXu3o1HZo6A2uZMMDqJ8BKB1juvcSBVeeV3c+pP4OlPu07CLDSxS5fbYekOrBqvAKsfQRY6YKXr8ZDURxYtV4BVl8BVjrAcg+wqCzAShe8fjUeiqrAqvGqHqw8sQBrq9PJaBjASgVY7gHWYQIs94I3QI2HoJcjgXXsP9VwAiz3AMu7KrCqvAIscVZgZYvlfl77K3oD1HzEdzCwbE7smAGWf4DlXQ1YdV4BlrYTYPkHWN4B1lECrIAAy7sKsCq9AixtJzOxACtZ+Aao/QhvP2Bl/eBnkwM7ZoAVEWA5Vw5WvFeAZdAJsCICLN+GeIMFWAYZgpUrVsB57S7A8g2wjhJghRS+AWpBgov7RNgFWDbndcgAKyTA8m1PYG2LBViWYwHWQoDlG2AdJcAKaWMDHLZFTUhsxWBVewVY2gArpLUN8NkZNSGx7QqsTbEODNbJEqxMsSyP5TClNsBva9SGhAZYBwmwglreAc+9URsSWilYEq8Aqz3ACmppB5z3Ro1IZIB1kG5baTsaYM163IGQzVEjElkhWPVehYC1JRZgAZZz2c+04TXViEQGWAfptpW2o4WdyVHKf6Ytr6pWJDDAOkhve2k7WtiZHKOCZ9rysmpFAtsZWOti4dUJsBwreqYtL6xWJLAysBq8Aixpd5tpPFzQmey4umfadApqRuIKA6vurr5WcB3AWux+N42HCzqT3Vb5TJvOQc1IWC+AdYzud9N4uKAz2WnVz7TpLNSOhAVYB+l+N42HCzqT/dX0SANWVYB1kB6203i4mDPZUU3PssvmqB0Ja39grYkFWNeMh4s5k93U9CR7bY4akqgA6yA9bKfxcEFnspOaHmS33VFDElUZWA1e9QAWf6P7LePhQo5kLzU9yG67o4Ykqh2ClRbrwF4BllVNz7Hf7qghiQqwDtLjfhoPF3IkO6npOfbbHTUkUQHWQXrcT+PhQo5kHzU9xo67o4YkqiKwWrwCLG2P+2k7WsyR7KKmp9hzd9SQRLVHsFJiHfl77lNibEcLOZJd1PQUe26PGpKo8sH68882sCL+SnfASjbZUdvRIk5kFzU8w87bo4Ykqiyw/nytDawGsQCrucmO2o4WcSK7qP4R9t4eNSRRZYD153uNYtWaVXoVwFrocUtNBws5kV1U9/xGbI8akqi2wfrTEqwqtcqvAFjzHvfUdLCQE9lFpY9u4PaoJYnpZQusP/+0B2uhtdtaMVzCK8B6y3SwmBPZQ5UyReyPmpKYNsD6c5q5VD4B1jzAMgiwxK2DNfNqcLBMDv6oAZZFgKVtFay5V6OIBVjzAMsiwNK2AtYSV6OIxSfCeZZeARZgaUqDlfBqDLJ4gzUPsCyyBYuf/lxahVdDkAVYswDLJMCSlgJr1ashxTr6J0LAsgmwpCXA2vBqALH+uAZYbwGWTYClbBmsTa+6F+uPt/DqNcCyyRQswx1SUxLTIlgZXvUt1h8PAdYlwLIJsJQtgZXlVcdi/TELsD4BllmAJWwBrEyvehVrztVFrMN7Zfqjn+OPY0cBlrA5WNledSnWIlff4w0WYFllCpbdFqkpCWn+50YLvOpPrBRXZ7AszvzQAZZVgCXrZSpWkVe9gZX26o/fAAuwrAIsWVOwyrzqS6wVrv747TeLIz90D6fEdDDAAqyodgTWmleAZQtW/GnsK8BSNQGr1KuexAKs9QDLsB7FUlsS0iNY5V71A9aqV4AFWKYBlqaX3YC17hVgAZZpPYJ1BLGOAtZvgAVYpgGWpEewxuUKsLYzBEtwGHsLsCS9PIg1LleAtR1gWWYIFt/Eyq8BLLVQ0wBrI8AyDbAU1YKl1mkhwNoIsEwDLEEvlWDV/Cxm9wBrPcAyDbAE1YLVo1i8w9oIsEwzBMtql9Sc+PdSLVZ/Hwo3vAIswLINsOKbgjWyWIC1EWDZBljxtYDVmVhbnwgBC7BsA6zwXmZgDSvWqleAdc4OrPij2GOGYBltk9oT917axFIjdde6V3zP/Rxg2QZY4TWC1Y9YG14B1jnAMg6wolsCa0SxtrwCrHOAZZwhWDb7pPbEvZd9iLXpVRVYnz83Her+UngFWIBl1wtgpfv8eW9iAZZ1vYmlBsW7l12Itc1VDVifP+9OLMCyDrBiS4FVJNbrfu8MrM+3mk52X90fD6txQs5ht1lhZbVRalC8swTrpDIrx6vyvyD58w7FujsdVuPEnMNus6HKbqPUoHg3BatKrLWb0ItXxWB9vq/pdHfU3Y2xGifmHPZbjUyOG6UGxTt3sB4aFqy9kHV3L4yGCTqH/Va2F+4bpQbFuzRYBX8zVnECr0rB+jyv5Yh30vstMBom6hz2W/nT77pPalC8m4FV8Rar9gZtUpT3ojCwvpPV/OPdxb3vo9EwYQex22qffqdtUoPi3QpY2WK136scqVJfnenVH22fCF87Nf9wLG3vm2g0TE42J7Hbip92111Sg+LdGli5YhndsYL8wVr26vP18k2HXdr7FhoNk5PRUew1m0faajZqULybg1X6FsvmfhV2terrOQFY45J1MllA4c2yOoudZvJAm81GDYp3q2DliGVyu8r7+tZJAdaoZNlMv/BemR3GPrN4nO1mowbFuwWwisSyuFsVfb1LA9aYZNlMvvBm2Z3GLmt/mi1nowbFu0aw2m9WXV/LxSo6kgmvPk+m0XTsBRlNvPBmWZ7HHmt7lI0nowbFuyWw8sVqu1UNfS0Xq+hIZoI1GFlW0y68WcZHsruqn2KPyahB8W4LrFWxqu9Uc1/LxSo6ktlgjUSW2aQLb5bHueypqifYazJqULxbBCvzLVbVjTLp6zQhWKdRzLKbceHd8jqbvVT+/DpORg2Kd5tgpcUqv09WzbzKEKvoRKa8SoB1GsEsu9mW3i7H49lHPe2HGhTvlsHKEav0sbVrwSs5WKfezTKcaun9cj2gPdTTbqhB8S4B1qZYpQ+tYUtebYtVdCSrwDp1bJbpPEtvmPMR1dfTTqhB8S4HrC/bVF3/CGdIy15tgVV0IpNebYHVK1m2syy9YwHHVFtP26AGxbmUV0mxFmV6U6P0Ua4o4dWWWEUnsgGsU49mGU+x9JbFnFRhPe2BWhTnMsH68uDVRKYHNkof5sKSXHUE1rnMQ19LRlE10yscb72ww6qqpw1Qi+JcGqwlsR7AuN6LmRvFj3NJK16ti1V2JNvBOq2jkPllFhXPrW7EtSLPq6Selq8WxbkVsCZifbls/qoYzmJtXvkOqClgRScy7VUJWJdyDnyTHhvrKJhWdsU3LvbEKupo7WpRnHMAy4msrCu/YTUBrPigW4F1bfOwNwGyvgL76xUvP/zQhtfRytWi+LbmVT1Y9mLlXXbZrw7AyqiJkCkmm1M2uEZBgmMbXEfLVpPi2ypYL9VgmYpVidW9WF+KDuSKV35gnex+jE3GlK0ulZfm5EbW0ZrVpPjmBZaZWy1avfVlDLAutfORM+XSq6xfcSvZ4Q2roxWrSXFt3asFsEqpKH+47zPR6iJW0YlcAyvKrdPyN7621cibLGDZ1s961aT45g5WrVhmVNWIlQFWFFrFZc4UsIzrZrlqUnzbAOulHawysqydessDrB7Nyp0mYFnXy2LVpPh2OvmDlS2WNVL3lYCV71V3ZGXPMtQr9Rk+VGpSfDttiDUFq5KL7UfaVKelvMDqyqz8OQLWXlOT4trlaQoA69bSw2zC0XaOYPWDVv4EQ70CrMDUprh2eZqy32KdwnRxyBmsPtQKAKtqXupDfKTUpnh2fZoKwFKr01AEWHK0AIvUqHj2+jgBliFYSrcKZhTqFWAFpkbFsdvjlPsWa2SvSsSyAEtiFmDRjsF6e5wAywWscLJKJhPqFWAFpmbFrbvnCbB8wAolq2wqoV4BVmBqV7y6f54ywVKT05YELFexGmZS5xVgDZAaFq8eHqg8sdTktKUBy0+sponEegVYgalhcerxgQIsL6+MzbKaRaxXgBWXGhavHh8owPIEa6pFfVZTCPYKsOJSw+LV5Ik6AFhPSrBsxDKbQbBXgBWXGhavJk9U1lssNTmNScFqF8twBtFeAVZcalicmj1SgOUMVhtZptcP9wqw4lLL4tTskQIsd7CqxbK+fDRXgBWYWhan5s9UhlhqcRqTg1UjlsPV470CrLDUsHg1f6b2D1auWIZGJMjIzeXyAq8AKyw1LF4tPFSA5e1VmVhOV1d4BVhRqV1xC7AkYGWL5XdxhVeAFZXaFbeWnirA8gdriyz3S0u8Aqyg1Kz4dUiw8sRyNeOOjmCtrpct9gqwRkrNimOApQTrDZBArc7XE3kFWDGpUfEMsHoQKzjA2nVqU1w7JFifDw6WyivAikhNim+ABVhhXgFWQGpRvCsQC7B2EmDtODUo3gEWYEV5BVj+qT3x74BgdfefCYMDrP2m5iQgwDoYWDKvAMurf//79R/UmIQEWIAFWMP272vXX6gtCel4YP0MWAqvAMu6f793+bWakphywXoBrD2k8wqwDPv3tPNvqimJCbAAC7BGaobVK1hqSYICrCOBJfQKsFpblAqwAAuwAKu31rA6Nlhb33VXc9NeFlj7FEvpFWDVt8XVGSw1JEHNnyvAAizA6qptrwBrz2Ad9zOh0ivAqi3DK8ACrB1W5hVgdVIWWGpIogIswAKsvuMN1l0LDxZgAZaDV4BVGZ8I3/vrr2KwTmpvmvv5sGIB1ogB1q2//joiWD8DFmCNVJZX+wfrr9cA6zBglXkFWH3EG6xzf/2VBish1p7AOpZYv90CrAEDrOd7rwBr32D99pDSK8Cqi0+E91yVfxMLsMbpt3mANVpHB+uvadliHQ2swcVa0KpILMDqo2N/IpxxdTywvhzhLVZCqwKyzL0CrKoO/QZrgSvA2iFYa1xlimXvFWBVdWCwFrlaBGtZLMAaow2u8sgCrE46LlgJrwBrZ2BleLVNloNXgFXVUb1KcVXwmfAVrNHFuoKVI5aanqryvNowy8MrwKrqkGCtaFXwFguw+i+fqzWxXLwCrKqOB9a6VoC1J7CKvEqS5eMVYNW07dW+/lDDplYJsJbE2hdYGWKp+Smu1KsEWYDVT4cCK0crwDo2WAtkOXkFWDUd5xNhplYFYL0AVt9VeTUXC7A66ihvsPK5AqzdgFUr1oQswOqoI4BVglUSrORnwuHB+roI1n+/dver44D1QJaXV4BV0bZXo4NVqlXJW6x9gfXzG1Kp1P6UZyEWYHVUjlcDg1Wj1XHB2vJqQLHqwXojy80rwKpoz2BVapUCa0GsnYD1Nderg4n1m69XgFXRbj8R1mt1QLC+7hesZrEAq6t2CVYTVmmw5mLtCqwcr0YUq4ksR64Aq6Jtr0YDqxkrwNobWA1kuXoFWOXtCiwTq9bAmokFWKMEWPvIxKv7ox7g0sYMIsF62QtYX7PBOpZYgNVZBmAtHfgQpdKXdwNr8S3WPsA6nfYNVp1YgNVZ7WClT/2IUm2BNRVrN2CdssE6kli+XgFWcdtebYGVdf6HgeoWYAEWYHVYFVj/9702PbpE6r6Vh2yfYJ0AC7AGKAOs02mK1TWFI2GtPWV7BSvXqwOJ5ewVYJWW4dV3sE5zrS5iqVVxbP05A6wxA6zRywTrStb/TVKj4tnGg7Y3sE6ABVgjlA3W6TTl6tBgPYq1A7DO7R+sUrE8rbqkPv+jlePVK1hzrw4N1mlXYF2XBFiA1Xn5YB3Mq22wToA1YIA1dFlencFa4OrwYJ0ODNawYnXmFWBl9rpVWV79b8Krw4N1AqzxAqwRu21XFljLWgHWjsB6XQ9gAVaX3XYr7xPhIb06FFi39QBWtFeAldNts/K8AqxUu/ke1m1BgAVYolb2432z8rz6X8Babjf/lfBtRQcAq7NPhID14V6kpV25++22N1iA9S4WYI1SZ14B1gJXDxtz91t8Ilxt61G7/6Puh/MKsIwyPv0/fc94SNeK9opPhKtt7N4LYA1Yb58IT5Zk/fSa3YiulW5U5husY/4prL8K/roGwBqn/ry61nz6f7qv+w+aNVuU6dXl/8sBrGkvgDVkvYJ1rfL4/zSrcUCH2jcHsDZa375HsUYG631N+werw0+EkwodmFv1KFYXZJlsTIlXhwRrcwfvwVKj09L7igBL7tWlPAZSVj2CpRbLak+KvAKsRHsA6245JWANKVb/b7BurRmwKtVcLBVZpvuR6xVgbXUVS61Offdr2TtYRV5pwVpmJouqGVjxZJlvRqFXRwSrbEPV7NR3v4qdgzWUVxdlCoBaF+t1vAGpupTt1SpY+xarbEfV7FT3uIxdg1XmlR6s06nFqzlYk0bB6lThFWBtpoantsdV7BmsQq+GB2tTrLs6heq1fK8AKzs1PJVNVrFjsAb0Kg6sRB1gdSryCrCyU8tT2WQV+wWr1CvAektA1Q/XLv9c4xVgbaaWp7LpMvYK1pBeNYJlJlZwP9x1AqzMSndZLU9ds2XsFKwxvXL+rnuf/TDNEKw9i1W6z2p66potY59gFXsFWPHNpCoH630wwNpMbU9V82XsEaxhvWoSSz317FJSAVZJxduutqemhWXsEKxyr/oBq14s9cQz29CqTKz3YRNg7Vis8q1X61PR0jJ2B9bQXlWLpZ52ThlYFYF1NzRgbafWp7zFZewNrAqvugKrTiz1pLfL1KpErLvRAWs7NT/lLa9jV2LVcNWXV1Viqae8UQFWBWLdXyEF1n7FKr8Lan6KS6xjR2BVcdUdWOViqSe8UalXmWI9XAOwtlMDVFpqHXsBq5Kr7rwqFks93fXKucoT6/EqgLWdGqDCkuvYB1i1XI0Plnq261V5lSPW5DpHE6viVji54jPsCli5YqlJWm1PXhWJpZ7qepVcZYg1uxRgbeXEisuwX3cO1r68KhFLPdPV6r3aJGt+McDayIsVl3FXvNoBWHvzKl8s9TxXa/JqXazF6wHWal6uuIy7BlamWGqV0tV71S1YmWKpZ7lWI1frZCWuCVhrObniMew6WHliqVlKtkev8sBST3IlA65WyFq58EHEqrknTq54DLtnsHbpVZZY6ikmMrIqSdYPr3/VXyrASuTkisew617liaWGKdFOvdoGSz3BhWypWkDr9hsb8wCsxZxg8Rh2C6wcsdQyJdorWBtiqWe3kBdXS23NBbAWcoLFY9gdg1UvVs09D6yaK41mkVxtg3UCrHlOsHgMuwnWtlhql5Ltk6tTWqyMV8VM8K5YrjLAOgHWPB9YHEbNAGtTLLVLyfbqVQKsvJcETfFSNFZ5YN3IUsviVNWd8oHFYdQcsLbEUruUbLdgLYiV/4KQCUqsyvXqVSy1LE5V3S4fWBxGzfFqSyy1S+n26tVUrKIv956aiqoSsM5kqWHxquqm+cjiMGoeWKtiqVVaa69eFb5lKuStPi1WJWCdTmpYvKq6cT6y+Iya1aBgDflz6fMq4Kfw42Ntaqsu5U9XDYtXVffOiRafUbMa06tCsarutapse4q/4VWR2qm38qeshsWrqvtnL4sPWAVLOgBYVbdaV7VXtmKpiXosf95qWLyqu4vmtMjBSomlFmmrvXKV2RJXdmSpeZqXP3c1LF7V3UkfWnxGzW1MsHLFqrvPvZfyql0stUyJsuevdsWtutvpQ4vPqNkN6VWeWJ/rbnPvpb1qIkut0krZa1C74lbdLfWhxWXQgj4PCda2WN+/pu42994qWJVmqUlaL3sZalfcqrmnJydbXAYt6Hz6BwRrQ6zLl1Te5r7b8ipBVvLUqzXaLn9v1K54Vf6UXHOxxWXQgq7Hfzyw1sR6/Yra+9xzGV7NzZqcfCk/5WXvjdoVt2ofFh9bXAbN73b+R/Pq0opWh/bqkSy1OI1lb47aFbdqnxZjWlzAKl7U++kfjqtrCa72CFY+V3dmqb1pLnt71K64Vf3A2NrSG1jfyQq2xqhFrnYIVqFXV7TU3LSXuz1qVvyqfmJcbHEZND8RMrbNtfq8P7BqvPrpJzU37eXuj5oVv6ofGRdbXAbNTsFLUNW3udMAaz01K37VPzMetpiOCVh31d/nHqvz6jhgqVVxrP6h8bDFY8z81Kh4Vn+fO6zSK8DaQfVPjQsuHmNmp0bFtfob3V21XgHWDqp/bCxtASz36m90b1V7NT5YuVukVsWx+ufG0hbA8q/+TvfVgb0CLMC6T02Kb/V3uqvqvQKsPdTw6Hjg4jFmdmpSfGu40x3V4BVg7aGGZ8cDF48xs1OT4lvDne6mFq524BXfc+8OLMtBixekJsW5hlvdR01c7cErwAKs+9SiONdwq3uojasjeQVYiTxw8RgzN7UozrXcanmNXO3CK8D6a89gla9HLYpzLbdaXatXgLWXWp4iO1wAK6CWW60Ors7l7pYaFc9aniIPXDzGzE0tinMtt1odXp3L3S01Kp61PEUeuHiMmZtaFOdabrU6vDqXu1tqVDxreowccDEbErCmNd1qdXj1A3+q4VzTU+SAi9mQgDWt6VargyveYF1qeooccDEbErCmNd1qcT/8gFeAda7pMXLAxWxIwJrWdKvF/VAnlpoY23I3S22Ka23PkT0uViMC1qy2Wy3temAP7hVgnWt7kOxtMRrxK2BNa7vT2m5H9sBa/QBYl9oeJHtcrEYErGltd1rb+6E9Llf8ENVLjU+SPS5GI1Z4BVi9dn9sj8oVYF1rfJTsdTEaEbCmNd5pZY8H94hYncveLrUprjU+Sva6GI0IWNMa77Sy6dE9oFY/ANa1xkfJXhejEQFrWuOdFrZ4fI+F1bns/VKb4lrrw2Sui82AgDWr9U7rUkvRR/n7pTbFtcZnyV4XoxEBa1LjjVampqKPCjZMjYpnjc+SvS5GIwLWpMYbrUxNRR8VbJgaFc8anyV7XYxGBKxJjTdamFqKTirYMTUqnjU+TPa62I+YndoU1xpvtDC1FJ1UsGNqVDxrfJjsebEfMTu1Ka413mhhaik6qWTL1Ko41vgw2fNiP2J2alNca7zRwtRS9FHRlqlVcaz1aTLnxWZAwJrWeqNlqaXopKI9U6viWOvjZM6LzYCANa31RstSS9FJRXumVsWx5ufJmheT8QBrVvONVqWWopPKNk3Nil/Nz5M1LybjVXkFWF2mlqKTyjZNzYpb7c+TtS8m4wHWrPY7rUkNRS+V7ZraFbcMnihjXyyGA6x5BndakhqKTircNbUrbhk8Uca+WAwHWLMMbrQmtRSdVLptali8MniirH2xGA+wphncaE1qKTqpdNvUsHhl8ERZ+2IxHmBNM7jRmtRSdFLptqlh8crgibL2xWI8wJpmcKM1qaXopNJtU8PilcETZe2LxXiANc3gRktSQ9FLxRunlsUpg0fK2heL8QBrmsGNlqSGopPKN04ti1MWz5SxLwbDAdYsixutSC1FJ1XsnJoWnyyeKWNfDIYDrFkWN1qRWopOqtg5NS0+WTxT1ry0jwdYswzusyS1FJ1Us3VqW1yyeKaseWkfD7BmGdxnSWopOqlm69S2uGTxTFnz0j4eYM0yuM+S1FJ0Us3WqW1xyeKZsualfTzAmmVwnxWpoeilqs1T4+KRxUNlzUv7eIA1y+A+K1JD0UtVm6fGxSOLh8qal/bxAGuWwX1WpIaikyp3T62LQxYPlTUv7eMB1jSD2yxJLUUnVe6eWheHTJ4qa15EXgFWf6ml6KTK3VPr4pDJUwVY3dd8j0Wppeik2u1T82KfyVMFWN3XfI9FqaXopNrtU/Nin8lTBVjd13yPRaml6KTq/VP7Yp7JU2XuC2BZ13qLVaml6KTq/VP7Yp7NY2XNC2BZ13iDZaml6KOGDVQDY53NY9UbWJXLUKviWNv91aWmoo8aNlANjHU2jxVg9V7b/dWlpqKPGjZQDYx1No+VuS+AZVzT7RWmpqKPWnZQLYxxNo8VYPVe0+0Vpqaij1p2MASRgIu8X8wiwOq8prsrTE1FHzVtYYQh/td4v5ZJfYFVuwq1Ko613Fxlaiq6qG0LQxDxv8j7tSwyB0YC1o7Fqr+12tRWdFHbFsYg4n+V92sZBFidV39rtamt6KLGPQwxxPsiDxdrz9wXwLKtekvEqa3oosY9DDHE+yIPF2tvJ2DtV6z6LRGnxqKDmvcwxBDnizxerLmuwGpYh9oVtxr2RJtaiw5q3sMQQ5wv8nix9nYC1m7FatkTaWotOqh9EyMM8b3G5GLtmQMjAmunYjVtiTS1Fh3UvokhhLheZHa11sx9UYG1T7HatkSZWosOMtjFCEI8rzG/WmuA1XVtW6JMrUUHGexiCCGeF5lfrbH9gLVLsRq3RJhaiw4y2MUQQRwvsnS5tnYE1h7Fat0SXWot9JlsYwQgbtdIXK8pe190YO2QrPYtUaXmQp/JNkb44XWN5AVbsudFCdbuyLLYEk1qLvSZbGMEH07XWLliQ3sDa19k2eyIJDUX+mz2MUAPl0usX7K6el0cwEKsaTYbIknNhTyjfQzQw+US65eszkMXNVg7EstoQySpwVBntI0BerhcYv2S1QFW1xltiCQ1GOqMttEfD48rbF2zOg9c5GDtRyyrDVGkBkOd0Tb64+Fxha1rVtcXWLzFmmS0H5LUYKgz2kZ/PDyusHXN6gCr64z2Q5IaDHFW2+iPh8cVNi9amYstgGWW0X5IUoshzmob/e3wuMLmRSvrDSz+YMNjNtuhSS2GOKtt9KfD4QoZV63Lg5YWrwDrMZvt0KQWQ5vdPrrTYX+BnKvWBVhdZ7MbotRkaLPbR3867K+Qc9WqAKvrbHZDlJoMbXb76E+H/RVyrlpVd2DZiKWGxiqTzVClJkOa4T76y2F+hbzL1rRPsNTOmGWxGbLUZkgz3McAOcwvkXfZ8lxkASyzLDZDltoMaYb7GCCH+SXyLlseYPWdxWbIUpuhzHIfA+Qwv0TeZcsDrL6z2AxdajWEWW5jABzWl8i9bnEesrR5BVj3WWyGLrUawiy3MQIO62vkXrcwF1kawbIQS+2MWQZ7IUythjDTfQyAw/gS2dctDLD6zmArlKnV0GW7jwFwGF8i+7qFAVbfGWyFMjUbumz3McAN20sUXLgswOo7g61QpmZDl+0+Rrhhe42CC5fkAwtgmWWwFdLUbsiy3cYIN2yvUXDhknxkASyrDHZCm9oNVcbbGOGG7TUKLlxSn2Dx859vtW+EODUcqoy3MYIN02sUXTk/J1gAy6r2jRCnhkOV9T4GsGF5ibIr5wdYnde+EeLUcKiy3scINiyvUXbl7JxkASyj2vdBnloOUdbbGMGG5TWKLpzffsHah1gG+2DS/wOD1EtKBACiBQAAAABJRU5ErkJggg==\n", + "text/plain": [ + "" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "Image.open('./mhp_extension/demo/demo_multiple_human_parsing.png')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.6" + }, + "pycharm": { + "stem_cell": { + "cell_type": "raw", + "metadata": { + "collapsed": false + }, + "source": [ + "## COCO style annotation transfer" + ] + } + } + }, + "nbformat": 4, + "nbformat_minor": 1 +} diff --git a/preprocess/mhp_extension/README.md b/preprocess/mhp_extension/README.md new file mode 100644 index 0000000000000000000000000000000000000000..7c771109c1a943b0610978b7c01b024eabf9e08a --- /dev/null +++ b/preprocess/mhp_extension/README.md @@ -0,0 +1,38 @@ +# Self Correction for Human Parsing + +We propose a simple yet effective multiple human parsing framework by extending our self-correction network. + +Here we show an example usage jupyter notebook in [demo.ipynb](./demo.ipynb). + +## Requirements + +Please see [INSTALL.md](https://github.com/facebookresearch/detectron2/blob/master/INSTALL.md) for further requirements. + +## Citation + +Please cite our work if you find this repo useful in your research. + +```latex +@article{li2019self, + title={Self-Correction for Human Parsing}, + author={Li, Peike and Xu, Yunqiu and Wei, Yunchao and Yang, Yi}, + journal={arXiv preprint arXiv:1910.09777}, + year={2019} +} +``` + +## Visualization + +* Source Image. +![demo](./demo/demo.jpg) +* Instance Human Mask. +![demo-lip](./demo/demo_instance_human_mask.png) +* Global Human Parsing Result. +![demo-lip](./demo/demo_global_human_parsing.png) +* Multiple Human Parsing Result. +![demo-lip](./demo/demo_multiple_human_parsing.png) + +## Related + +Our implementation is based on the [Detectron2](https://github.com/facebookresearch/detectron2). + diff --git a/preprocess/mhp_extension/coco_style_annotation_creator/human_to_coco.py b/preprocess/mhp_extension/coco_style_annotation_creator/human_to_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..8eccb3a8f63e9b76eade5b2036526d91b8483dc2 --- /dev/null +++ b/preprocess/mhp_extension/coco_style_annotation_creator/human_to_coco.py @@ -0,0 +1,166 @@ +import argparse +import datetime +import json +import os +from PIL import Image +import numpy as np + +import pycococreatortools + + +def get_arguments(): + parser = argparse.ArgumentParser(description="transform mask annotation to coco annotation") + parser.add_argument("--dataset", type=str, default='CIHP', help="name of dataset (CIHP, MHPv2 or VIP)") + parser.add_argument("--json_save_dir", type=str, default='../data/msrcnn_finetune_annotations', + help="path to save coco-style annotation json file") + parser.add_argument("--use_val", type=bool, default=False, + help="use train+val set for finetuning or not") + parser.add_argument("--train_img_dir", type=str, default='../data/instance-level_human_parsing/Training/Images', + help="train image path") + parser.add_argument("--train_anno_dir", type=str, + default='../data/instance-level_human_parsing/Training/Human_ids', + help="train human mask path") + parser.add_argument("--val_img_dir", type=str, default='../data/instance-level_human_parsing/Validation/Images', + help="val image path") + parser.add_argument("--val_anno_dir", type=str, + default='../data/instance-level_human_parsing/Validation/Human_ids', + help="val human mask path") + return parser.parse_args() + + +def main(args): + INFO = { + "description": args.split_name + " Dataset", + "url": "", + "version": "", + "year": 2019, + "contributor": "xyq", + "date_created": datetime.datetime.utcnow().isoformat(' ') + } + + LICENSES = [ + { + "id": 1, + "name": "", + "url": "" + } + ] + + CATEGORIES = [ + { + 'id': 1, + 'name': 'person', + 'supercategory': 'person', + }, + ] + + coco_output = { + "info": INFO, + "licenses": LICENSES, + "categories": CATEGORIES, + "images": [], + "annotations": [] + } + + image_id = 1 + segmentation_id = 1 + + for image_name in os.listdir(args.train_img_dir): + image = Image.open(os.path.join(args.train_img_dir, image_name)) + image_info = pycococreatortools.create_image_info( + image_id, image_name, image.size + ) + coco_output["images"].append(image_info) + + human_mask_name = os.path.splitext(image_name)[0] + '.png' + human_mask = np.asarray(Image.open(os.path.join(args.train_anno_dir, human_mask_name))) + human_gt_labels = np.unique(human_mask) + + for i in range(1, len(human_gt_labels)): + category_info = {'id': 1, 'is_crowd': 0} + binary_mask = np.uint8(human_mask == i) + annotation_info = pycococreatortools.create_annotation_info( + segmentation_id, image_id, category_info, binary_mask, + image.size, tolerance=10 + ) + if annotation_info is not None: + coco_output["annotations"].append(annotation_info) + + segmentation_id += 1 + image_id += 1 + + if not os.path.exists(args.json_save_dir): + os.makedirs(args.json_save_dir) + if not args.use_val: + with open('{}/{}_train.json'.format(args.json_save_dir, args.split_name), 'w') as output_json_file: + json.dump(coco_output, output_json_file) + else: + for image_name in os.listdir(args.val_img_dir): + image = Image.open(os.path.join(args.val_img_dir, image_name)) + image_info = pycococreatortools.create_image_info( + image_id, image_name, image.size + ) + coco_output["images"].append(image_info) + + human_mask_name = os.path.splitext(image_name)[0] + '.png' + human_mask = np.asarray(Image.open(os.path.join(args.val_anno_dir, human_mask_name))) + human_gt_labels = np.unique(human_mask) + + for i in range(1, len(human_gt_labels)): + category_info = {'id': 1, 'is_crowd': 0} + binary_mask = np.uint8(human_mask == i) + annotation_info = pycococreatortools.create_annotation_info( + segmentation_id, image_id, category_info, binary_mask, + image.size, tolerance=10 + ) + if annotation_info is not None: + coco_output["annotations"].append(annotation_info) + + segmentation_id += 1 + image_id += 1 + + with open('{}/{}_trainval.json'.format(args.json_save_dir, args.split_name), 'w') as output_json_file: + json.dump(coco_output, output_json_file) + + coco_output_val = { + "info": INFO, + "licenses": LICENSES, + "categories": CATEGORIES, + "images": [], + "annotations": [] + } + + image_id_val = 1 + segmentation_id_val = 1 + + for image_name in os.listdir(args.val_img_dir): + image = Image.open(os.path.join(args.val_img_dir, image_name)) + image_info = pycococreatortools.create_image_info( + image_id_val, image_name, image.size + ) + coco_output_val["images"].append(image_info) + + human_mask_name = os.path.splitext(image_name)[0] + '.png' + human_mask = np.asarray(Image.open(os.path.join(args.val_anno_dir, human_mask_name))) + human_gt_labels = np.unique(human_mask) + + for i in range(1, len(human_gt_labels)): + category_info = {'id': 1, 'is_crowd': 0} + binary_mask = np.uint8(human_mask == i) + annotation_info = pycococreatortools.create_annotation_info( + segmentation_id_val, image_id_val, category_info, binary_mask, + image.size, tolerance=10 + ) + if annotation_info is not None: + coco_output_val["annotations"].append(annotation_info) + + segmentation_id_val += 1 + image_id_val += 1 + + with open('{}/{}_val.json'.format(args.json_save_dir, args.split_name), 'w') as output_json_file_val: + json.dump(coco_output_val, output_json_file_val) + + +if __name__ == "__main__": + args = get_arguments() + main(args) diff --git a/preprocess/mhp_extension/coco_style_annotation_creator/pycococreatortools.py b/preprocess/mhp_extension/coco_style_annotation_creator/pycococreatortools.py new file mode 100644 index 0000000000000000000000000000000000000000..3f3d8332ceda5fa4409095a0ec56d181ea162273 --- /dev/null +++ b/preprocess/mhp_extension/coco_style_annotation_creator/pycococreatortools.py @@ -0,0 +1,114 @@ +import re +import datetime +import numpy as np +from itertools import groupby +from skimage import measure +from PIL import Image +from pycocotools import mask + +convert = lambda text: int(text) if text.isdigit() else text.lower() +natrual_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)] + + +def resize_binary_mask(array, new_size): + image = Image.fromarray(array.astype(np.uint8) * 255) + image = image.resize(new_size) + return np.asarray(image).astype(np.bool_) + + +def close_contour(contour): + if not np.array_equal(contour[0], contour[-1]): + contour = np.vstack((contour, contour[0])) + return contour + + +def binary_mask_to_rle(binary_mask): + rle = {'counts': [], 'size': list(binary_mask.shape)} + counts = rle.get('counts') + for i, (value, elements) in enumerate(groupby(binary_mask.ravel(order='F'))): + if i == 0 and value == 1: + counts.append(0) + counts.append(len(list(elements))) + + return rle + + +def binary_mask_to_polygon(binary_mask, tolerance=0): + """Converts a binary mask to COCO polygon representation + Args: + binary_mask: a 2D binary numpy array where '1's represent the object + tolerance: Maximum distance from original points of polygon to approximated + polygonal chain. If tolerance is 0, the original coordinate array is returned. + """ + polygons = [] + # pad mask to close contours of shapes which start and end at an edge + padded_binary_mask = np.pad(binary_mask, pad_width=1, mode='constant', constant_values=0) + contours = measure.find_contours(padded_binary_mask, 0.5) + contours = np.subtract(contours, 1) + for contour in contours: + contour = close_contour(contour) + contour = measure.approximate_polygon(contour, tolerance) + if len(contour) < 3: + continue + contour = np.flip(contour, axis=1) + segmentation = contour.ravel().tolist() + # after padding and subtracting 1 we may get -0.5 points in our segmentation + segmentation = [0 if i < 0 else i for i in segmentation] + polygons.append(segmentation) + + return polygons + + +def create_image_info(image_id, file_name, image_size, + date_captured=datetime.datetime.utcnow().isoformat(' '), + license_id=1, coco_url="", flickr_url=""): + image_info = { + "id": image_id, + "file_name": file_name, + "width": image_size[0], + "height": image_size[1], + "date_captured": date_captured, + "license": license_id, + "coco_url": coco_url, + "flickr_url": flickr_url + } + + return image_info + + +def create_annotation_info(annotation_id, image_id, category_info, binary_mask, + image_size=None, tolerance=2, bounding_box=None): + if image_size is not None: + binary_mask = resize_binary_mask(binary_mask, image_size) + + binary_mask_encoded = mask.encode(np.asfortranarray(binary_mask.astype(np.uint8))) + + area = mask.area(binary_mask_encoded) + if area < 1: + return None + + if bounding_box is None: + bounding_box = mask.toBbox(binary_mask_encoded) + + if category_info["is_crowd"]: + is_crowd = 1 + segmentation = binary_mask_to_rle(binary_mask) + else: + is_crowd = 0 + segmentation = binary_mask_to_polygon(binary_mask, tolerance) + if not segmentation: + return None + + annotation_info = { + "id": annotation_id, + "image_id": image_id, + "category_id": category_info["id"], + "iscrowd": is_crowd, + "area": area.tolist(), + "bbox": bounding_box.tolist(), + "segmentation": segmentation, + "width": binary_mask.shape[1], + "height": binary_mask.shape[0], + } + + return annotation_info diff --git a/preprocess/mhp_extension/coco_style_annotation_creator/test_human2coco_format.py b/preprocess/mhp_extension/coco_style_annotation_creator/test_human2coco_format.py new file mode 100644 index 0000000000000000000000000000000000000000..17339187305a97fa7ab198cf1d8127a76ebdf854 --- /dev/null +++ b/preprocess/mhp_extension/coco_style_annotation_creator/test_human2coco_format.py @@ -0,0 +1,74 @@ +import argparse +import datetime +import json +import os +from PIL import Image + +import pycococreatortools + + +def get_arguments(): + parser = argparse.ArgumentParser(description="transform mask annotation to coco annotation") + parser.add_argument("--dataset", type=str, default='CIHP', help="name of dataset (CIHP, MHPv2 or VIP)") + parser.add_argument("--json_save_dir", type=str, default='../data/CIHP/annotations', + help="path to save coco-style annotation json file") + parser.add_argument("--test_img_dir", type=str, default='../data/CIHP/Testing/Images', + help="test image path") + return parser.parse_args() + +args = get_arguments() + +INFO = { + "description": args.dataset + "Dataset", + "url": "", + "version": "", + "year": 2020, + "contributor": "yunqiuxu", + "date_created": datetime.datetime.utcnow().isoformat(' ') +} + +LICENSES = [ + { + "id": 1, + "name": "", + "url": "" + } +] + +CATEGORIES = [ + { + 'id': 1, + 'name': 'person', + 'supercategory': 'person', + }, +] + + +def main(args): + coco_output = { + "info": INFO, + "licenses": LICENSES, + "categories": CATEGORIES, + "images": [], + "annotations": [] + } + + image_id = 1 + + for image_name in os.listdir(args.test_img_dir): + image = Image.open(os.path.join(args.test_img_dir, image_name)) + image_info = pycococreatortools.create_image_info( + image_id, image_name, image.size + ) + coco_output["images"].append(image_info) + image_id += 1 + + if not os.path.exists(os.path.join(args.json_save_dir)): + os.mkdir(os.path.join(args.json_save_dir)) + + with open('{}/{}.json'.format(args.json_save_dir, args.dataset), 'w') as output_json_file: + json.dump(coco_output, output_json_file) + + +if __name__ == "__main__": + main(args) diff --git a/preprocess/mhp_extension/demo.ipynb b/preprocess/mhp_extension/demo.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..9ceaa358b93868b3c6a842776551578688646c53 --- /dev/null +++ b/preprocess/mhp_extension/demo.ipynb @@ -0,0 +1,306 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, + "source": [ + "### STEP1: Generate COCO Style Annotation\n", + "\n", + "Here we show a basic usage example using DemoDataset in `data/DemoDataset/`" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!python ./coco_style_annotation_creator/test_human2coco_format.py \\\n", + "--dataset 'Demo' \\\n", + "--json_save_dir './data/DemoDataset/msrcnn_finetune_annotations' \\\n", + "--test_img_dir './data/DemoDataset/global_pic'" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### STEP2: Generater Instance Prediciton\n", + "Here we provide a finetuned cascade_mask_rcnn_X_152_32x8d_FPN_IN5k_gn_dconv model on CIHP dataset with human instance mask. Download the pretrained weight in `pretrain_model/`.\n", + "\n", + "- [detectron2_maskrcnn_cihp_finetune.pth](https://drive.google.com/file/d/1T797HPC9V1mmw0cDoVOPSF1F_rrTcGPG/view?usp=sharing)\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "cd ./detectron2/tools/" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!python finetune_net.py \\\n", + "--num-gpus 1 \\\n", + "--config-file ../configs/Misc/demo.yaml \\\n", + "--eval-only MODEL.WEIGHTS ../../pretrain_model/detectron2_maskrcnn_cihp_finetune.pth TEST.AUG.ENABLED False DATALOADER.NUM_WORKERS 0" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Crop the original image by prediction bbox" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "cd ../../" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!python make_crop_and_mask_w_mask_nms.py \\\n", + "--img_dir './data/DemoDataset/global_pic' \\ \n", + "--save_dir './data/DemoDataset' \\\n", + "--img_list './data/DemoDataset/annotations/Demo.json' \\\n", + "--det_res './data/DemoDataset/detectron2_prediction/inference/instances_predictions.pth'" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### STEP3: Predict Local and Global Result\n", + "Download the pretrained weight in `pretrain_model/`.\n", + "\n", + "- [exp_schp_multi_cihp_global.pth](https://drive.google.com/file/d/1s30hj8zeYj0wuTA5Rek-one-v5uT7kX9/view?usp=sharing)\n", + "- [exp_schp_multi_cihp_local.pth](https://drive.google.com/file/d/1dwDrXHkhAe_nYtnSqi548zrjo5mlSPF0/view?usp=sharing)" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "/home/peike/Projects/Augmented-CE2P\n" + ] + } + ], + "source": [ + "cd ../" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!export PYTHONPATH=./:$PYTHONPATH" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!python mhp_extension/global_local_parsing/global_local_evaluate.py \\\n", + "--data-dir mhp_extension/data/DemoDataset \\\n", + "--split-name crop_pic \\\n", + "--model-restore mhp_extension/pretrain_model/exp_schp_multi_cihp_local.pth \\\n", + "--log-dir mhp_extension/data/DemoDataset \\\n", + "--save-results" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!python mhp_extension/global_local_parsing/global_local_evaluate.py \\\n", + "--data-dir mhp_extension/data/DemoDataset \\\n", + "--split-name global_pic \\\n", + "--model-restore mhp_extension/pretrain_model/exp_schp_multi_cihp_global.pth \\\n", + "--log-dir mhp_extension/data/DemoDataset \\\n", + "--save-results" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### STEP4: Fusion Prediciton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!python mhp_extension/logits_fusion.py \\\n", + "--test_json_path ./mhp_extension/data/DemoDataset/crop.json \\\n", + "--global_output_dir ./mhp_extension/data/DemoDataset/global_pic_parsing \\\n", + "--gt_output_dir ./mhp_extension/data/DemoDataset/crop_pic_parsing \\\n", + "--mask_output_dir ./mhp_extension/data/DemoDataset/crop_mask \\\n", + "--save_dir ./mhp_extension/data/DemoDataset/mhp_fusion_parsing \\" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Visualization" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAABLAAAAOECAIAAAA+D1+tAAEAAElEQVR4nLz9Wa8ty5EmiH1m5h6x1tr7TPdeMjklkzMrkznUkFXV1aVuNCAJ/VCtlkotAXpQQ79JT3rRiwABepMgaAAEVLcKrSrVlBOZTA7JJJmcyTvxnmHvtSLczUwP5u4Re59zmGRmqgOXh2vHihXh4YO5fWafmdF/9T/+UkpJRNyo1qrq7g7w8XhclsWsHo/H4/FI5LVWVb06zIk5pZRY3B1qxBARdzegWimmYJKU3H3VNcmRiIgEgBrMjJLknKuuIkhsIDVdal3VVnef6QGBmZmIAABwJ4MD0OpmxswpTcxsZsXUCwAQu4gQQVXdncinaSpF3Z2ZATIz4TzP82pUSgEsT8SEdb1VK0SYJAEACGByhjNAAKZ0rLXWWgFwIhExd4cCKKrm6oRoqkLdfaYPpcTurqWCbJJE8ForYCIEQK2omXld1/W8LDk9WNd1XVfVYmZqxd2JyI0ul/X29rwutVZTdTgREWFlBgBysNA0TXHbnLOWCvA0TTlPKaWcc07z8VhPp9PxeMiH+cGDB0+ePDqdDkJE5DnJ1dXVnKSsa10XM6vL6uUDVb1cVnfPeXZ3U8Tdnjx5cjqdVFWtALi9vX327BlxOhwOx8MEQEtV1SlxzjlLutzcmtXT8ZhSWtfVqjLzg+Mbjx49OV1fGwgk0+GUjgfidP34CUh4OuBwwvGEJICAAMi3//gPrZ4//7lP/x/+9/+768N0SFIv59Nhvty+WNeFWFmUsp9O89XDw0/flxfPn1qpxykfUk7E7nRZVabrc+Xp+o3/2f/qf5Pe+qhZuij94R995YPvfef2+duf/+yHb178+Ofv/ujPv/bNf/dvvvKDH7w3nx49fONNsL/zwdsAH+brj33kE1rpsj5lh7urqqoSUXTOupa6llJKrfVwOCzL8vDhQzP7/Efy5z/7aTN1LFrXnFNZ11JKTlNKkzutpYBsOuRpyoA9u72YGRHlnOC+rpdSSkxjkSzgsuq6Vnc6HA6n43VRqKqZARARInJ3M3PCNE3MDMDdL5dLrTXnbH42M1VNKU3TYV3XpZacc5Kprqup55znfDCzda2lFDBNKYuImdVaoRbH5XLJk6hqsUIEwIhoOsxrpcvlEvdPKQEmIimlaEk0xsyszWZ6lB+tepkPKWWcl1siknwsK2CzW075APfL5Qa05omJdV2Zkzx98Vzmw1LLv/2DP1zVHKTqH/voJxJPP/r+j2aeMuXr4xUR3dBNDNM8z0S0rouqsmBd15T4zTfffPDg6jvf+c7t7W3KknN+9gy1VhFxVzN79PjBo+vrtVxub18AOJ9vpjl/4XOfPxymn/70pymlIotWX5bCnK5OD8vq7777/rOnz4lkmg4f//jHnzx58rWvfe3q6upyuX3x4kUsVWYcD/M85w+/9cZnPvOpq9PxBz/43nI+u9bz+WyqRLSuq4iAsqqCqJg+e/b8spQvfuFLAH3wwbPlfHv74vnnPvvr6/rs9//Bl/7n//y/cF0mXERkzAQyj14nIg65LUJEgMVASJqYecwTAEYAwMzuLiI5Z3Vb15WIpmmakpgZuRORiIxhBXNMDHVzJmbmJMystQ20N3Een6VJZhJ3D4lHMc1QWvNg7goysFOTsQSAIEYMZyIGYBzNBQtATnCFGarDHdyeCCFI/DYWrxvBnSDMiZhBAjc4O6IHLBoKAJgBONyNWuuJDPFVf3egP6t90T/fv2z7Zjt8dyVFA2IgiCh+G9/GV7/qQfCXf0h37+ZwwPpf0Vrr/3n/MH7C418CkV9GgwEQtZa7W5yPvXIcmXN7aIgpd5i7u0iKP4lIiIkZ7u5utv916/9xh7iemUEUf1c9j8aMWW1W47Jxxt1DZKnlcdv9t4w23ONZ8W9dVtodoz/j87isNcBhVX13jBeJZ8Wvxn0AiCOUjVjCIdiZWVXvXRl3YyYiApGbtV2AGMwwix3KzHjXbwwe/SYiiBu6xx00RmT/4rpGv7mR17bRcHQXyK0SEVyfP3/+4sUzAAeexpu2nmQWkVrr6H/bPWWej6ra9CvmEClENN73Xle7a2yIOedQO0k457yua594aqFBMjPzel5DHoZAi1kXbYgeDo0xdigzI+SUEogMGnPCVNdapmkCbCh77q7uZjbN6c4kBEy1quacm1hjdrNaq8JFhGXu04wAMEv0fPQAEbUB7fKZuHUm+Vh6W7eE2AnRGgvFDyAiQ59dTCICYTciYWYGk7ubNilIXHeTsO8Czr3P9/OfsRN0+0lC6PvM3SkKgJEcbuZ9ujIRMdhgrxKM2AmiOwfdkZnbUX1DCuwANbHDFAKqS6Gx+sR2d7N+8zuHu1NfqqqFua1W06qqpsXdp2mKdyYSMMMQGmHIHyJiSomZiFzNrDLIoVBzN7i6KwCQSTrEE0NQMLNAzIxBbUTcvdQAX+4+J2VmxJ5lprWu62o1lkNGX1xMTQeo1ULQOblDzapZdVhdC5EzUWh05CAiIUqf+MTH29xy1FprCbHC67oSuSoBdrnclrrUWs2sXCZ3F6KUkhC7G8wAqPvhOB2Ox5QTJwFR1dXdYeoghQKsbm7ETOY1FjyDQNH/zk7uPk1zTJrd6iUmcncRjnVL1NTTopVNxvjFphCTstbqXXFvg+u6rmuF1FrdlZiFY02yiNRSiDgWdvRGjFNoQjlnZnay1io3jfGBNelIJC4KX9dLrUxEMCd2hdJuu2Vm4pSIiKdpmvI8rwsfj8fT6RCyaS0NDyyXknM+nU4AwZOqmsENNy/eBWKkVlW9XG7RpVut1aqKZBFhlgAqwpcQ/znnPKWcQ5DKG48fHo/HRw+uD1OeUn706NH11VFVr+ZDMlK7NcPxdGKWFy9evLi5ABeWiTjnSYizqqrBwUnE3deiqlrXYmZFaFqVmQmW8qSGy835cns2s5S5Xmhd1/nmmYPVwDlJOhjzzfkCTpSn+XA8Xl3P82xgM1urvvv2z+ZMtN4cp8wwUwMs9GYAxRyu5AZGgdV6JBIRJ6KitTqgqIo00zRNn/rUp4jZzmc+PRbgd373S//iG9985533L7dvkz59/GCC8+VyOZ1Oq7qZEYGJiEkSiRARl5tFEHKemHNMsFprzIuYM6WUdV1LKUR0Pi9L0SxgToo1lGY4uVOtQ4pCVZfFiHzIejND31CJaF2rWQGn2OdqtXVdCWfvijgA87ahSqLLshC138Y26a7u7M1EAgBmlcgTMTu0LGYOAtyrrnAWoSzTWivDYE7usFqKaq3ufjxMZrZqIbfD4cCCUkoty7ISM+ecU+JE7XHCoqqBNIiIk6S27wlnnmQKeTtNEwBvm6JUg9bV3c0r3EopVVfmgxdVVa4F5kIc8i7nJCJMLIET4MUqOyH3fcaMCK2vCNM05SzzPMfSNrNaQOBaPeRDyOsxHMfjcV3XJnZKEaEwTjGzs8eeZGZmoZhZ6B+7PWYD7UNq76SNp5SqSKml1qq1hpFunue1eCh8IaZELKUEkO/mxhhltP/RMKg14xEhpqiFcOkKx76Fm9ZFIKKUUkgVZnbHeETcJ+RjvxkBSCnFvUTEmcBNbcrTFOqA+bbTOzSlyd0Nim7+c5iqqdeYsMS9VUwBDwDAGWBCLEEBwJzc3U3DxODsBI5dGoAPSObteiKL+7YNo+kDjoAAXZN/nV7Sr//lj1cqPfcPh9Mvd+WverwSRr7q5C9+5dd+SxCH7rTAMbXQ8CA41hEAuqvoN1DUx3YsGSeEUuHufre33Z2a3u9jEXk/4+5hiR6P6FeKeaWuslPAZDCAxGmsgqE4mhmId4pyuz8FiHoZl/Y5sb84FG7JGaGI7JZbiPf9c8dvY6XHs7B7Sjx33w+jwbYDzUSkblRtCIGmIPb7D0B4fyDdrTcy2j8e1PoZEuIuNiYzC9UNd9dDkw8dfrv7sE+N4bj3yiFtYgKMvSkA1b02uPs0pfFn1yFRUHYXE7mhWxJyzkM4+86MpaqjVejwm5lr6XMxBtY9tjN3B5yGo4I5Rfv73I7eB0DM0tvctl64EZhYRIqpdDkNwAE3U1UaBtOOwqLT4T4aSb4ZLNrIxm+YxtMBBjb53+Sw7fFqG31mBjVBELKxz4RhugU2QdG3s1cJP++icycH2q2c2uref+Wg1wlGe8351wHCl5rCDRPizsyknb1md7xarIVIQV+hMQ36C3qYIYYgCtSBPp/Ht2N+ksOd0Ddmd3iYHt2jV+93GkBEZS3MLH1cxrogMncnM/R5ezweYaGNyP4dow3rujKHaQAgU1UzNdeUUgBCYjczmBNRYk5P3njg7vBQK5tOA6CU0u/bdCB3JaJ60XVdXTV62MzWda26Aqi6ni8XIxgU3MT9YT6hwWghEhJOjMxSbQWgMAbMwZ4cBGgpZRie+8wnYqrFhvYTAjmlRElQKewozDw6VoTWdWVOAPY7UK3VOKxublWdwzQW1pTQeLq6ZhQiiDy0omRwU1NVg7p7KcXaPBs20Sb3a63MSM3+VJmImVQLwOaViDgREaeUnEiri1A4c9RKKTmGeV1XrW4GZoGzqtdqZvbGk6O711rWddW6dslYSinrWsuyhi1hXdeQC4cjLcvi7ofDYZomotuU0nyYvv/9H14d5uPhoKqJ8OTR49PpVNb1Yx99cDgciCSl9OTxm1dXV0vlc7Faa7q5TCe9ygd3X0pdKhlN1eCKwC4GqLsVr1rhdpxmFqnGRszzzGoAJKHYamd3glar5k4EFpekhmKmZpQyQS7rcnt7u17K8ThfHef3f/q961nKejGtBNRqzCnnJLBLuS222qrK7O4pZxYG4FprCduUPH3+bL5+wwgOlXkGWRaWq/yZT3/hS1/8bFnf/tpX//Xlsph5LUpIWZIwU/IQrW6rWvXS5YTGhhdyo/aVz31RaK1FtYrIUqqqsjCbrbWIkplJTpKSKQiccwZ73MS7EuzuqoEtmjjIOYe5V1hEpFbT6oWKTFPM+b3ZtXnsren/PLw3quEHs26SjC2eyJmFyGDkbnXRgDQQga5rMRFJkidhSGWAWcLQBVUmEgYTjMBOZHo6nY7HIzOHHRKAU6h9ro7wHYkIQbrQ9GVZWSzPCUBdi6usel4uDg8dwVIGEak5rCqBVQkQ5iyylOpgq27VSRroyiIpM5kzkzfnVBOVDjULxdSCE1GrmSElnqZJpIZOw8zczdUhbYYRevQ2M6eUCC6iQ3iOzSAGb3+9dwgXuBHYgFaoTd7IGk3W5ZyXdRnbUrsJ1I1DmRkicdyfmoF5uzM7Agk3haw7NPZq31DXABA3gBdzQETgNE0TEU3TVJYLEfHdzTUULA8tUMS5+QNDXY2ucIKIiIT9AcRwNXcQGYsQk7deCokaBjVu/i1nb53JAJOTE8e2RZLJzZ20OthhROyM4ZFmI4RzLxaH8dp17qZFuQeGvLcp82YdR5jMMTZavLSFv+Z4pXJzF+HACTR8g9tFtOlMXbn56/sJX9WGV2LCVx6hNvHOrzj8lkREcAGw8wRyaLO7hwmBQEZ3X7NhYG7217ZKw3wAD6P1K1/3DoK6OxV9KNTYNC0iYqSmGN3xZxLF2tF9g50IBuN2qwZa2s1i3e1aEmZogzejPjUVuOGuYdi4+wpjAd6DRq1LX3pf2gPChhQ6Kuh6UncqqppxQwXMTGamZmg4rWvom6YLu6uVDiXV3UMOtAYLw9EsN25OxC+ps9vFdOdFBqDda8DUYfZm0tptZPuO2qQTIaUUzk93b/a1qiIyjFDNMxeCAnduNdowlMzmZtzgdxiMAG8yk5MIJVOlxq2AulPo6CKwDYuGNkmxlYabpHnvuvNTxHWjrY3GRHt8dwyDnXvH9miq6WYH8eb8d9ssdtUdzNL70+BE4gBx83lv60I4gavvHPgNCsbsRb/yFWLh7qC/Sm5sqPLOUg0F+44Q87s/p19glnrFMaBsUD6ouTfbTaxZ/rY2NN9Qk7p3njXW0dgozQxQIjZ3GjchEiLVYiQSE8bJvK2vmEbubtp9+KEqwgEGKzkcDiNA7wiujhmbnG/zoLVnIHyr2/raOlYa3wBdJgQ0c3eiti2TUHhI3ZWakNmLwfbWqda1N4uJSYhFQlHgYbAxr+5zOBy8yLquVpqjWWvQHdXM1GuwH4tWAJKTiJxv3nVCkHMgTGDJU855miYiZwKRJ7amfWHKaWp3Vi2lqLqZgYkgXUi1jhARTsJJGspi7r2gZoGeCeCdYd6tuhBDkkP70nZ34/DphfHMgt5EzASEochKWdTbOowRHn0IIjjcAyh4EqpVh0RVVYOl1LuxOtDICDFngGRGy7KwbKIhpTRN07rUYL0GtAsrmlACYGalLq4WYiF0/Vrruq511VJKrRYScy23KXmpdVlr+PHCpiXEbosqYG5mZf0gp+fLsvzFX37z6upBzjmldDpdTfkQHuPD4fDw5zfvP18eP344TZN5KM25aklEWSSxQMR0LbVAS87iaz0XJYeITJIVZVmW45S1XLCuYDKzUs0InFJKyZysmqlSZSJZbm5unj2rS53xYDHWsj55dE226FqEs5bVJB+mY55khd0+v4ghnWaRaQJM1WshYng1Azifb5aHc37n3Z8ty5IYy82L6erhstQHpwef/+3PAE9//s5ffu/bX9diibMJr01OhtcO7gZbHSyJvJp6qaVJ+dA1VJVcCWABM1jaf6ao5l6qW7k5L1lomqZpOjClYmpwTsKMpeFBit3OzNxDgW5Wn2lK59uLFRvgJgAfD74KENOgLeyUhrowdndVnQ95mqYBV8Kd2OxHAFzNUK1adQWUma1oKZCcD2BKAsRUDAKqAFVXL1Vh5D7lbMd8dToep9ndbbjZa805VXPUGrLGFE5qZro6M9ZyAfuDlACs60qYvJrXoLmkkDySGEi6FAJdHXKaptU9S6J6cUZ4rbjtyJBEnAgGYYltJrg3xM7OAILCVKsNMkZK0+l0/eLmJgjesbjbnmydwEMUsmuMixBzoipa3chBDgYJsynGnwwKCiR504e4q4NDfwr3Y9CfADBRCMC9EjOWuVvjSjA3/SloVLElDa1it2d4zIdxMPVZsdO4drssdad3x/Pdsj42J+qPYGYS0VJARMJNJHayxujA+IqYmiLauP0gApGDnAjC5MqxhwPm5mBCM2mHfB6uGDcw4GRwgzszzJ0JBO3aAQCSgQ+iMcQEhKrPAJuDgWb+AAPh5d/0ktd52O5arF8JpX4xU3TcKxxcv8zP/3rHuM89TasJkr/qh8EabaZ0AnnTKztSGrZ7F3fv/dY8LHvkGRqt7L0ZMUOG3aQ7WFwVAJhAxOR7tWn8lIg6nEFAOCICgiLVWmB2x2Ibzhd3911XNA0bDkKAqDhfa22tv+sxa2pZ76D98gz82FDLaLMpdg698W93hzp6KEhbgwYPZRQNYMQKSr3Bu1XZNJjotOG8auua4ATeAWDqtMbN3z669A4ivTsJUuLNOd/MoYbOPTHqGll7ikjz9e2pEME1uAt47oDz8XkTF7v27AWX18JEAJk5OmUshHnr77tvUFRHzEIToRbyguMp+ycSkTMpnM2ax5SAYIKIQELu2GDnMtHwhmNsxEQgMgIlSTvoS8yI2zRg2ZbPaHnvjTuAuXmqPfaQu144kvCYb7NHGLyNbKBBjMVF1LwfHtKfACZUAHdRc9P/W+ej7XSjTfvZcW+2YBA4N/k/dpfNnuV2Z7t57d3uHK8Sntt3w3ADd4VzyCIicmqe1cacHC1FyN2OF8c49iMoValbGSx4Uq2nY0pTUDpjTqFbwpoZBuTu3FiBAi3tgUYwNtSYRSnUr80O1Bz4OWcibvsoA9qG3sJUzRyWJmu08JIl3dMootnhIScGKIwjLCIMcrVQb/YmKHdPVZcRHcEcW75Qi99y7774WDi1aiLOWWhKKRpkczBGglBam7xQMytazezmxXMLjhyRqS91Xdf1QhJROu4uBBHKiSO8JaWF7rD5t8nUm72zKjHXtTq0A+jo5I3BMhSd/m/TbIi5G6cIWt0NgzgU4SksIjmGvpSy1mKdIRaGqGAltnbSzu7lHmYKMyNyhMICZoEZ4pWHlFS4MNWq3ZMDIjJjdxXJpa6laKN+iwQt2M2IyIyIk7tzV/9U1T2f/OAWHUGh9K+1ACilXC6XGJFSyrpe1styey6l+Jyylnp780xAZsZXvLy4ABeRnJ8vpg7gdLq+ufnxNE2n0+l0OgQ0TVmmafr4hx4fpvnq6up0Os3znPLxMJ2EqJQC4qpqdRV3FuGUEyZisgatQcLMSMxJpvNyidVgZOTkCtZL8vXBgzlhtUUnpttnPydAVSnTui5OStN0mg9F8fzFbTqkB4/lOM+JaF0WNZ1yspytGEk6XD08XV9Xt5sXT69ca7nMOE2sdS04X3DFbz5+8rPmRKVaa6m2rhdxwCoLCSVmCDGtQFNd1EwBayG4rjAHlMhT4pwlJQb8vC4353PKzNCqzsxuBKJqWgJEkDFD1dVhanOSsG8x85RySslctcYOxDF/QnQ23NjjLngXI6Gq48/Ym8eedykV0qiPEXkbO5hZjRARhmRhhauqayUYuVktdSX3dVkWOFvOV1dXRMQguAIwI4KT4+o4ZyFvXlNLKZm7kAuRurq7mqqByAykqqc0EQOVidr2TA5AD/NRyIgyANXiZrDABCqS0+HI04GLzpKsOpJnyZmFSNy9WEG16AA+JkfAxU0RCUEZnOppOohkd1J11TAL1rHPjDicbbfwjfVkZgKWhuJcREy67dk3XWcvhcwssVAPEAqxGTpN+JkBBtgNtRhcARn+q+1WhJgA7l5rDRkWyNl9QbfTx+jHvNmrHcAGaEbbtq/c0alf4frDBgC9B2nTvd2bU8LYJ3yzPXKn2MVNYgMrpofDgbktJDOFKYSJSCShm0gBhGQmIzCF5jxi2BiAs+rSd1NmCoYSweJnROTgTQ0gapQkggykYhShg0ExHb0wTPi/KjB73fWvVmgG3tiPwp7B1b8a7dGX7vGLjnHnptihOR/GZgoEAN6mN2hDgP2Db/FLd1+nRTHBPTY7RMsNEKJwG4Yi2HRB3l7N9840DIWuNdWZwuWmvr/m7s6+X9Sh9YqkoO6HehCvQG3PHQFFvcOJfFMRAQ4I1ff3bcm/um87RaiZb/Yd42MwhQGMEJTxr7mjOx3u/OcbdXZodUPPG/0wjuAWcGeem5kTpc4fCIlDDAoDf0qoZY9i0R/jcDfb+LtdQ3YzGraSqmZOQNs1gpsPc+/9TB4GuLHpjAHy3bEXiegCef9qzDxNk9892ledZBFvnVmIyEUGhkQnaumOIDoeNzrqlROJiKZp6swaNPNWvGHzsO2Gg8kNRLwXnwriiIA1ZxYKdXF0qXlHg4OygfAo+kbj9N34uOQ53gg9DK/PiqbOE+BEwkwSRoGBcqk59ZmZpdm8SLrRwj0mKbX126c69znM3p5FaIaFzRB230wWFtm7+wh1EkcYHPrSHqD3DgV6+xVeff61h7eHxV+2ce+1PZqsibRXSeaXTHvjvIYReSCRuDm7g8zD50bUOClEREYUqTmIRZgSBG5GnU6i6uQGsg2wszC42Wjc3ba461AS0Bm9vjN9yZTHoGDHFVcPRgPQYjC6y6/vsNa8X01MxaIfizDsOm6eALDgrodXiahztSnwFQua+lLDHUfgYGNGlpc0TVOxMoUTP8LhajGzh1dHd3diOKljXWrTfQ1WailFrbhaWb2uBcB5eZd6/oMIhxORyD0QaTDuiD/VYKsCLbqaOTy6w6TXTFMxrjyhVONmpHFymDXZndLgpkcA5hDBqq6Ahw0ODNVaa5gwBwOYnQbS9u5iNYBSYqEkKQBzDWeuo+lV3lwQcX8Jq1XksGFewzsqDIDN1GBVIVRjw2jxF0KMcAQpc8gaJiKhFMBgWWvO2eCR6oOIVHVZlpTS7YsbM2PQcj5fzmvMjHN9fj7frCumiU+nUy1mhvPFjsfjWnx9evPB05uYVSmlPMl3v/Xn0zRF5qHr4+l4PB4Ohynl6+vTMfCVuQhx5ofXDx/Oh8uzHxOxN+huAMis6jLPc/Q5a6m1LpdLWc6w8vD0aFnO63pJp+Pz58+maSISd805F9/2S4AIQpzIjFJyVzeTSRLxUhciujodz5fb+frJOz/54Yc/9rGrh0+AYuUyiWICRI+n9MaTh/XyocNxeu/999N8zdz0JDOrddVa4FbrygALurYR09ARMhsGUkCGkff2fHlxe86Z5kmIBCTF9LyWjQ+s5k4OhjPISylmplqBZrF2jwYYM8t0CJMNnM92josDDdIu+ISZIz1UjPjIT+Dut5ebSF9kVqeU3JUcU0opcTUlgLnBvFpqLUbcaIjV3EwdLEkkZzBX1aJKnOd5CuTAIGeyWsIwlFNyq2ji2BgQYhtOKRClcIFaSokYqh7cLXdTVYcnCgxZASeRRAlpcbKJCXBxmnkih7jUqq5wCvKtMNwpEZO7hkbbt/y2b4XzjYhymnOaCVKLrUtF16G5U5yIKISDqgC4XC6Xy+V4PAKNdigi00SACSdTY2Z0/LWjbQSks8Zp7E62QQGa5+NizasPgKQlZbksRVVJOMwo6MZmVa1a2uaRIsRaGE71jq09flLdkmFoQkREaCF96eXYpBaw3YHcAE6hJ3XLgrcNsGlJsRwcbnehpndHdBAr+vwEc9O/YRZh+hHoctf2B6YR5xNe7GgtY9OZLkDI/ARmIg7aM3cIGiZyINzV1vGJb/kYHA7uhuRN0Yn9VQcAanrY9v1LH153vBIKbo47v4MJB+4Kd7I3vyiNk78qQPUd/B93CO/XzmfodOczDQQYuR/usz3H3fplumub0Q4Wok2q1vO2/XCYaBGm3KYAWbPbtu9iV9256e7pneizfaf3c3tIqKE9eLXroO5uY6BpZy8evCzYpgj68EoHdBlKmG0e+NBw+R7IYcJw/vZXuAOA+/V7AEDMIG7ez7ByuGNvk7o7IZmYRRqhVHUEofdm+9DUua/Q3cwIHaT9ZNgQRwsBFC3sLJTcXd3gAFOKvDVh2O39cc/kBHSf5N2v9vAsTgCbaBrdMkxa9/qqB/BvN3F3djCx89aN7h1yRCa/Hucfu2FYtFsfEqEre8yMlKhWuLO0UYuOcDPvvjuSsDYwc3vomJxtDjAZnKnR79t94ilJQsAG77RJspBv3dUab9hgABOsPX0DXSTUzCtkAdd6bqGIk0KTbO6hITJ5hTUrXotfa6o+j0XhYzjiQx+jEKDku+Q1A7aNeUKbAnxnAnSJTTsx6E2e/+0c+4caAk+79cVuAW2cLOSpkqOnK4zXbPsCIfLQuHvQ36jlonDuZhyi0QmQnJpQFRCJ9BkeTBwwR8iDW8uxUmttiINdAKYENki3PLoD2kyYDHKqZs0CEdElPW4eLHA3VTONrDYikpKoakyjpg1yQh/Lrgd4WLqJIsSjAm7uoY55AD3zZBpXAKDubRRmqrU0uqRZn7IgaqQgipQ1XqtWKJKniHFS1bUUhBuHGuErLAKcMpymfDADEZ+O16paq2mpasWqhrH8UnKs3sipUUo5n89V2/BM09LzyhBABsw5h4KSUsueN+gK0YbIOjASQjIski5FaAgREbVciADcqWlo2mzkoBJ4ts0VbRwz6rMn/DZAsycJR7qqqlaISCQya6BvhY0w5u5mNYDtCGjeM9OoI3hQeF5DWSRYDxLFlls1fNlo8RdKJJAm7k/XR26x3XQpq6qSSJqmwPCmcLPD4fTwsSdiM7upV7XWda1ElPO8LMvlvILog6fPx9xi5pRZpNKFjhPVpV7KzftPXwSFJjLQnk6n03w4Ho9TynlKV1dXDx8+vLq6enTA4XDIeQIbEVJKwZV9cXOrWohIiNa1LEshkuvrh5zTjDkl5sTTYRbJIfwOV8ekANGyLFCb85RTtrXcXNbj8ViXtZalJoHZsiwidoNnkGm5vPjL7/75wyePf/0Lv4k0v/+THxKt4IKb90p5/uYbR9fHWWyaOR9kzqKsgJmrrV6WlZDQHJx3iJo1otuJmGHmDlOr5srMJKlUrWagxGQwlGJrcRhN0yxCDniL95DqLX+jO9Va63peltLV6IAI0UN0PCbsxPGm6Hfh2FjcqszbjhvMvWqupbTA6aoOS0yJmYIQ5mZtFZh5vVQWEeHsRM4ynabD4TDPRzMTIplPAOY8AShlISKjtdZKVCOlWUxKYbg5M7NrsohZFCFikrUuZpaZmGm9FCJnTlZUbdHqlMLDyZEABoBjuaylrIutpmFqQGJOtTQITUQgd3ZKNCVeTePVdzqbRwwhEdViPekUtewlQQMm4u58R6OfWcj6sNfQSDTlxpRSgrtShEM1kJbcPWRa37ki7JD2A9fmT60557rK0EsiR5QTD3N1T0wR+JaD0RTSP57lQ7sbex1z12vh1BwubZIMSmR/x7G7xlXDko0OFuLKnPKmojFLp0ihUdf6Dbh5XlQN2PyTxJwYQpHe0AKYhIPdwhZvpakLEagIb8oH0QjHangmZGCq7jBUR2VKbQpw7wnZK8Fm8JE1tN3E2+oIALh3ld1bWaN7urqJX+J4+aL7SnMs7dA2Xrr+DkD9Gxx89877YL97Z0az9pgwjuZHGjCyK4XDrd0NAdj3eUyV4TmE6TbH2uVoHKeXtX9YyzGx3eveEWu7OxgJcHB/NA1nkXvESJPHlAOFHgci7+7jYd0I5Smc/LTHnwFIRjuln48EfaE/jHYNFDRyKe0WUQNdHWBsrXTPNN17QeyQ1X61xm9F0rb6iCAcDFiHkzlF+IswAIXDlLqEMLNG1QEFvREOt8124I0CKu5UTduVQozQFwnO7rWvh97Il7LCYuPH7gTQRm/z/ZnxspvHtWPIpunWai0ZgeznjEgGbbmsxrLt7WuAmtsGOtJvNA0bwQohwUCG0pY6AEqiqhTLggMq9l4a05Ujy4uHs4IGfTQa02PZu0OLOqRtL74TMmH+aclpTdXdzS3IqfCgi3gkbqQNDXLEy7l7KKJMBI+0IQRnZidOaGYm8hGdtgnD3Xpv2WsQm0af2A0Z4rVS6Z6ciTcdMmSzlcD5VbcI8f6rxhBuFjSMLM0RTNj9kEEibR5Wj7UAItpN1p3xhmBKDgtc5XAncwf7mMM7X2iYGclBDPPNbhU8mvZjom5gAhmhR62AQVS1BpOBdtYQN3i3/+5tkPFny9natsaYZpFPppERiO7Jmo1FjzAL0Gb0CQWn2UTUUik1pdThTeQjIhCbGlMiipA8D4OBG2ldx7gGdEKQ55YlHplEuhJDiQW6NMOykkPgTS27ubkBwBARmdKMCa5qZg9y7mUemjaj6qXWy+UyeF/dqu7qXtdVt3AUCnyVUjocDqG3pTQBMGs2b2kpDWFeQ9MKXfxyWbmx74QZzl0ntjpksZq5dQhO3ECY94xh5sSkWkJ9XJcFMOTJRMzq6XQCYEbM7AQiN0vO5D2Rofd8lbyRAFtGnHBeCxFBtICZHWpmUDcLwl51s8jSYWbMMk0Tc3J4qefGpuAkQmaeJj4cTsuyTMdJIKo6TSnLxMyXy2V5cX78+HFKEWaG5VIil8blsgCotYRXytFCFp8/LUG9a7bASD8rcnO7hsKaJXLdN2bL1YTHjx8/fPhwnufTYbq+vp6m5GYfeuMN9RLcw+q6VhJJeb6CTPPhitwuy4vTw2xVG3tfhN2XUtZ1XcpFCKT1xbOnDCSG1lLXoqloqZfzTUpTKfWTn/nsex+8eO+99776J3xz8/Tv/N3fv3369m/8+mfAL97/yV+433z8E289ffb+B0/fyfmApmk4AGr5ApgpJWJdi2r4yWfmoHEqAGKJEHyzqtqSME3HA4RrqZd1ESJ3qbUKqjsRS8ozgd2dSRRuWk+nQ6yjWuuyLOtawwKSEglnB2rR6Mucc85ZRzKrrs00udFEh40lE7NrvpqDCxrTWEuFqbNd9Naquztb2y9FhCndrEYycZrUzIlTPsjhiubjaZ6vDlfMXEop6+V8PrtLyjnhhriALEsCrKyr2crMpVl4EOFjMJAwmLXWWmuaJ4KoriIyp1SsJbkR8XBhzfMxH6IIynO7vaz1bF48HUIVk5SOko7TzDmCUYu7O9QoWK0EoojlCfMQEU6nU1DlIyTEW4aYiWghImIHWhRfKSWwHADmRKSxvhogREsAo+pwAVoyT+qZY8zCrrTlfmgfrAcDuNcaKatdq5tT+B5VvZQlpYmokLCbDldw266YE7GkFvVt6mY1Z+5N5QEIhyaWOpNkpACxnhV/006YiChMCeNku9hMuniPm3qXXe4OphaFxZtakHK3JTdNW3uwk7mah24pQpQFsdUpkQCOZkNng5IzkzR7efOThGYPTiGoEXngiYS4kf2cKDTc2BEV5vCMOWhCA0UY8YZ+7mQ46PrZy4ePJDT3mbN3XG1096tXH69MNMcN3bx88ldFiO6x/e+4Xi97CPdt6yfvY8Joob70IqGE0e6isDeUjtEiIU3oWqQcanh7u/FbokQIVufOVLEbj02yDXJdPHE3P5s5aNOwm4kciNjRoAo7drpXaVZzsr23pr9MR5bNVxmeEoLDbVD4WuhgpI3Yid9+o93IutNQYnYYYKfVGHMz4VkUBWhxrc45wYPR1J/bhq4pc9vdwgbXPfzc7TWuZmacc/zJZq4aDZZdZ3qPqnKCuU/TwaIshHuYLdFibqUPUaO6BQSi0bywgvc4vf32RAORDvR1d/9y952Ry8dNVDUN4z5nMHut7i6S7y2i8ZmECeARCNdxWuBhJmY3D+cnACdXB5N0LGdmiDRuQDhDqcu31jZ4oLLRco3wHxGMmxAax9YswkaIqE3IMIebD3tBt+yb9epQzSohFJjGnIac8oiR6/53773AYXETarmZevh/n5sEAhMx2KFhmtjQ787VEXN+T+O0bsLo83zII0YXMnfZBANkjkF5KYOXv042/tVHsBJ2cdi9YYSO98Jn4+6dnYDW1Rb5zRAolKkJxtgRI1Aj+oeJzMHBIekguoEQaoQns1iz5GoOLQCISCiF4sg8R47zSJ9gZH3jRCeExgKOWYHYpKPZbaY1jqpHPJs0U5kjIqCZI/VazMKoQlNrtVBNybHznwOxvgTW3qgxks0TPJmyVgu1UiSJNByi6gC5ScQGhHoJaomnIgtCkhzag/TKV8uyaM+k5O6TNPZCSgks66JuRuIsxBA4B5AWyUxJVSvd5jxHUnJvrG45MT98+LC2SKoQjE2clGVh5mW5XC4XIg8+JMievyCmFt88TYfD4eDul8uFPAzqrTQZM5VSOW22+VgVqo3JQ4KUJNrGTTCNgAHqoaTbdjXNx8vl4tDDYQLgVWtdRWRZFnSLoHVEG2MRf4UKN5JVLMtiFiXLeCSEVK1GxsLSh4ODhswpLFgAZZqpzdBICxGIEYolprkBl/Wmqk0pm1d3k8wpUSm1lCWJmColm1JW1XQ6PLg+ieSosBdHRLLVWktZ1jBDqi7LUkpRd11rYAAK7UxadorDNB0Oh6fP64/feyEEEck5nw7T4XCYc5qnnHN+4/HDRw8eqpZlWR5eX18/PKTjAzVd14thBozEi5XlvEwTi4SLRrWsgKeJJ2EiEWJ1ULRwXQEcDofD8epnP/mxc3788Pji52//yb9/56c/+v6nP/O5dGWQW7Nnv/aRh1nw1pvXT964+tk755yunr54ev3wQUqplgLw5bw+ur56el4Ttawq63phZjOUUsJDzuylLDGIy7LM8+yGw+F0doOrJL6sy2Gab5/fPnr42J1ub2+j/sHNi9tSl5zl8MbjyNKktWtjzgRZlyrhWCHSdhgRm9YoWHc4HBo8K4WI5sMhehjA+XwGeooaYoVyyuRWayVgmqZpmp49ezZP07qUVTXlWdVL0Xk+ztNVZG9649Ebn/z0Zz75yU+99aEPPXr0OAwcdS03Nzcvnj57++23v/ud7/zgBz+4Os4PHzwSxgcf/HzO04sXt+6ekzC3+jJFFSCRhMjEQy5CSy2hc5hVMuSco/hHrbas58PpOpz65/NqS43SPeCD0QQgpbSUhVhub28fv/mYGJlz1Uskep0Oc7jpqBmYIrMlM7ODQtU4Ho83N+ema0Ej/UwpxToGq1GAyGoIDQDrWkLoTdNRRFr5Vvd5nq+vH9zeLGZ2OBwAW5bleDyu5QJgmrK7JUreM5SG0IgaicvSwqcvl0tOKUYTLAB345St67osyzwfARyPx9vnz1JKqpe2xOaZsfadEqVWadalpq8U15Z3RxBRlOfbxbtNocnwLb6l7VktDoWZmYtW2vkPh4ZnZkG5qbWCWx55M5OuXmjTGl2CMGq11mrQFGVb3V3AzJzTej7DaZoODImgl7JWTuSqrsTSiKYggog5X5ZVJB+m4+VyIbJ1vUz54NDD4eQwQnqxPrueHjhKRQ2CfXUVEUa6lIXTFLlnDNwYXs2MDYzomZ0mQURMw1o8NJNNpX5JaXmlutNODsbujn4Zw9dVZ+f+tw+f0i9/jOt9nIARkbWw+btfArvYHtts09QVJ+y+bb/i3V2aNXrz68aPdo/hrT20+87UwqbG/YY9yYe/5L0EAMTqi8/D8KG1CYd9H0XmE+xfhnnATtlRprdIne5zG+c3pDfOd+wYqhsGKzImho2f0M6o38hyPe9gV8GZRCC5uYtDQ6TuMQdASQKdOhNAA5XFsiIiSs0/SQZmDrdnSCTu4ISI2Ch45hswIyJHbUmV7gxQ7BelluYGYWnmBIp0o661MjMlrpday8CENNSYYTYa//ou6UUXERhyZhjm9h7CIW0a4qqVU4uAoJYLulcW6jNxIzSaV6/cy2+gcz2857lpPeOe+3AE493MGHAmmafmGk8SKGqbV/FEaxTnYaEAYAQWcbOw14eS17udSylhfARRlM4KjLqfJNRpsdzig5iZrbrWKkySUpCSCNQIw0SBUYRTDCuEQ6zByQlQN2rWHG/DEUa1QOMguieCXk2Jr1aYOQC2IRy8EnUFd8F4HZz5uH/g3UFDHQf3ruwr6zUGr9fLvXaHZljyYKezuyqGDzC6JGEzeTTx1dnt7XXCPsWpZeQ0d5B02ry6Q7tpOUhOplq1wjxKowNRyotcmB1EAqIop8eSUMNP4EJgEndVs1prSolEoN5UOhARhRWGHDnnFKVrgmqkvapjT64ZkwVE0La+gvQkxCIiFLmUagN8rZ6W56i7Tq3qCcxDK0s5H2KxMWV0a41ZSywchu1mzQCbqXB3DBI5UOP9w6DkcI/I454atekWkcSFtEXUBCFKLXibLbo7Pt8PNg2CHNDsU9yLBFpkZhdZU8o5m11FsQdVXcvFe9q9WlrNDFWtVUspiSWMT0RND/Oeg1hyeLok+jSmr+T4UPZGOACllIj6A4fVvwmaqisLJpmEgg+qUUutq1whTgWYVLVnPqDhDo3PImK2MA+zfdhEw4mfmvEPJtq+GuKVgMiwZJ2opj0tskXEHdSVocg517qaGcyY0lrU3aej8DpFMBJc28Ycfkw3IZBwlinUA3dXPXpqFoGoY+7u7DCz29vbGqlOa4Mvt5cl3Z5ZJmzlczylNKeUUpqnpGsRocM8h8Hi4cOHb7311tWjBzlLTsRiOdEhJyYyZY7c9sF/nbTqAnNTXUsVya4OTkkmmoU5RYQ6AGGwqWu9XNaf/OAvLy+e/sYXPqXvvf34jeN6PrvrGx96eHt+DuLz5WZVqrWWoqqemInEG2YYW/sWpB4RdOhmSOrh7FZLTP4X59u8IieepsmvKKVUq10ul9vb25xz1WXVNef8/PlzVV2XSkQ5tflZSqSqa6Uogu57OByOx+PPn30gMoXPsNZaawnzj4ble5fOK1pVq1r1xmwBuXmtdXHM8/z8+fOUMsv04sXtNB+vrh9dLpczpU985BNf+u3f/b3f+70P/8ankSeYgRPMwJyBo+pb5p9alr/ze++9//77X/nDf/njH/9wPV/y4cHNzfMHDx+z4OfvvZ+zSJ4OzLnoomphK2dnElVy90he4+7ackTh+fNnzHK6fpjStBZbalXnw3wyp3JeatXplKrp+XJzfPD49nwudalazGpwBEYFqjZGNhQUIeJSittgAZhZBbhF9nYXBBGar76nKdurkpG6KxEnYmYUZ1VzAzsSsxMJ+ZTSnDPDyFwAVZtyi7ds+581CR4UX+reNnOAmCXBOVh5RCQSdFZ3V/NqVlsVLBqgUcBDpITpOQKQtpMGiBNoSxsD3PmJ7/S2O1qOmarm3PJVdPtlEwXtYGLiyA/ZPErhaWROLIA1LblXPUlpkikhJVJ1Uzcn8pQ4KNNumucjZMpTKstKkJQSUoRLVTcl4LzidHqDic63Z6t09fDRPMPrQul4OT+VnHLKhOlGyySTgeHqVifJZk7MKSUm7pjDXu/H26sjr1aVXnW8fMEvAIcvaTyvNpz/9azpeyJWt+Vv7Cx51ZU7wqr3821UrX97p8coJlc7Pwz2v6jBMRtGDcaw4mt3U/DdPtjAWDfwEyiCuDzIXdy2ubvvsvPU0f7t4ooOPO5qoj5A8L1jONNszPxAYo7dEHrvu+GgHzdon5mDXLS1tRcM7LfdRQHsYhfHmVbbIHz+Y6AYkRjGrAVNGYJ20wQLJ9mjsmam2/VAQ5IhOeO8MAFRSYYdsChdExkpdjXgOy4fzxqwc8Aw3OGxN4A0GiO7coX77hoyKtq/Pxmd1TKLmrdc9NTsOgQSahGDYcu2Dn3H7jCcrk2zzu3+kS0EfVC1FGdv+2kL2IthZCJyJt+NctcDGl2AmjyMiESOvgsrxYhuj2hAD1jG3fnjzjl3XqIwTNru4CISEyLY2JvLzt0IHBUAYwwJ4e0i4gCH8QIGtCxGY+ps3X6f59nnFiisPZGIKzJnhOfcW/6u7fKgArQZfd885M0dcmeg3Xt01kvHa3BigP8ebto6efuFYVSr38QRARGa2RRj6ia39pWixY+Dmuu2AZwIPHEEMXqX+YllGAXCgddeUNtSYk6I3VYyhVByY0osiGJMIDKUIQ3cXcJYHLWXeMsfMx5KIxTNDA4RQU/OwMzuLfOwmxLFLh/FF6hXD219jg4w3N3Mk1v8wZ3m1GJamDnYyyATYXcOj6iR2a6yXzF1D+dVK7aoqq5GRNKwJZyYnc1Qq9aqxCncWwTCVniKo270XsMYHxxIaWrYyBHKBLWowmmaWn3wqBY91eYYHIAwJJ6qllKDYrssy7qul8slLO5EZGbTNM3HQxBoqXPWZ0tEEYsY5bYTkZhZKSXWbzPXuZIHRayhVpirangDg7zq7qpsZiGUQ+51h3DLGajqgI7rxyQYQrbpbTAARhYcsEj0GtLQzVzhbvEWEsED4ESILDjaiqRRUY3oebeyqpJHnpgWnGZWOu/GQZYyu5N3tBkELRGieYplME06uIvu/uTJk6hBomraKojEfGpZvGqttepSyi0KM2fhZVkYNs9zYqm1pvefff+n7xrzaZ6PpzxlkoRJeMoixKf5dJjm42k+TlPKyElSznkiJ0iaXZyqVvBayuVSit0eDqdqaqtSKnDOQvD64tmz//b/8X/5T/7Tf5Qz0tWkWt977+nVw6ufvPPOfDyQMElL88Mp11ovlwtzo78ThTmNh+PXmz2MMVI2d7fP1YNrUIWVJBtoVNXz+VxKSSmB1F11Le+8Y1HBhpkP03GaDmYWDNWuCTT8GX/nnFuC367f+1BSdjbacai3/A1G6q6R5nL1JaUcNiBOpiQGycfrq4dvffqTv/0f/w/+6Rd+7++BCMRwg4T4QzOdhqTNp4fXjx5+9Nd/4zMf/f/+q3/9b/71/4dFrh+/qcvt2+/87DQfQD0dK4kt66pGDKFMCE9h8/zDLFSEaZpKKRAG082ynI4PUz7e3p6dj3WtSGTVKOXHb7z59OZ8e7mk4G8ibGAtr3dstE0hKC0ymdmaBbpHBfRVpnZH6wIQ2QOaHtAtzVvtr27SDk6vuqvpVkeruxAb2UF6Kjx3H5ZjInKLlU5GSBQ555oMIaJeFYiDDL9PUjce1BesmdkAhNTTCKkZwSN97piWI5fdyKY49uZobQ/A5qFg9fdtFpBNkbiruUb3YVNNxnEnOCRNk6sa3IqiqJkFi2Z5fjvPRzkcsfq6FF0NKDe353feeS+yHB+Px5yFmSRliCQ9oU4QOqSZJvh5AZwkwWyimSm56tX0sJoxkmkBKalBWOvCU5JA9WiY0KiFoBE6ZfJlnPaKg1/z+XXX/JXn2SN0H8A2NH99YtVrSKH95Kvjdpi2nBoD+HnHePET3hWStu5/2Ctv+zt3j9meRgY0lAHttk3f/3hPV6O9J6GnE+x+Cbqv7PRn9d/6nVfevmiAcADJ8fxRwOAeMhzKSecrOZmHkNlLE3Tzyvb4xhEDABl32VTh0MQ6bbJhisa79b7QQDS4iNz2oD2FD8Mp4Z1MZ+H/aQ8OXhnFd97hh0c1Bvgw1wZmgLC3VC4cSfgornEOv1ZjKLWypbSFMO2UV96xnAYgDOkkMsKkaf/VXi51xS/kYJdIG2+zPwji3NAXOq6odWURjsTyPT1pqODogzgkWDA7+8i0PBPk5Bx2B2+mrp3X3VlitEI13aQlgJ6wZ4QzwQfncPy7GeO6CN0irt0jFNQBcGRCYvJqtdY8T8MkV02ZOfBn4L2+wgLjMYgcRE7OCI6Ydxb3cBKCtqXUJ/Fdy1eDVIxWe4a6KIjPbQcfZSr6LBoFU3D35hiZnEd/hpcBv9JB8bJRREEdoMGlJAPYyLhXq+9NRYN/7aQZNn5pBF616vYNdEWOJ3IP/OwKEjjQiJjEo9yIaCkijUJcw4lNwgkKCLGkIJZXi/Q3g0brHrUOqHO2O07zSFUafG+PwkpNRm5qgKm5t8jEPnt7zogeoU0tQ6Gbt6oQcR9q7Ni4DCls4p3kmZhZ1UrRaWq5K4ljhWuLQ4Y2ZTSJM7W4o7AAUYQwR7NgCoUTSwAltaqRF0W1lIJeEcgNTsKuHohy2M7bRDR3N6fwwO97AYg0EfBOXySKFCw2VlTKnFIkJu04p1yIqJRT9Ei1Vuzr+bObaH5HO81yP8+563O5A0Iys9PpRE3viTyrElaQ4hQ8PS2VmXPKmKaRYjQGa117csiUmCWyjAbOZLZw3piZd9VzbDwtHgm98+AVJk4gV1WmBoM15i9FNagEsyCWtEmmrqGtEihF9RIF3MxKsdyzrfag49SCQO7YfloGICK3dY3rONj1vJVV4ZzmefZuJmiAkCISrEVn1VqhBlgphVMmIsqzOVbTF88v/uwsacrT7TwxkYFqFDAQSZklSzoc5+M8zQeZEuUseZInjz90PK4ppSzpcEhOU5qT5FzABjKHrtbYYVYv5/V2vf2Df/+H/+Q/+XtIR6nLJz/9uXk+1upHTlkmyRkSYR1MRMUrc6IorYKm9IedVySPvHN96oYRpJZSTlfz6XQir24VCPds2yDNrJTFXGtdifzFzQcB7JnT6bCeTkUkaXURmaY0z1N/RHMrSZawbkSu12maAocnyXvMsG1CUwbIalVouFlgFeY3N5frB4+WpVwu5er6MTjNpwd/9+/+/d/6p//Fmx/7OAjr+dbB89UJ4GVZAmC4mmuJFNuN0fbwQ//0n/3zj3/qM3/47//ND777F1qc81GmZKWEE6xUU1V25zSllEpdyZo3FUJeIx7Y3H06HubDsag75Ld+5+++8davvf2z97/2p//+g5vb0/VVnuXZ7fnZ+cbIOEV0iLeyHDSc+WHIZgDcU0ibmWppu0nnOI1ezTkvy2WI1L3y12SvaiD2fpC7M1NkXwjRvK7rmAmBA4EWwxy2NjMT5ixpALxorTNDw6ZvoboYV3cxr41DyFuKhSH9O1S7A132mvGWSXBL0BcaAvEdpfne9nwH7DFzf5d+t10Rw0hy4GjZ7cJo6HDJAsCtaml8sOAtw0GcuL+BiKQ8I2fcwJToosR5ytOL5+dvfOMbf/LHX/nWt79zPB4ftePB48cP33jz8YMHDx69+akshdmuTzMdUrm9TDlhPsCUDyeUi6uRnBL0fHs+nh5CnzIzCDkiFW0gmti5bAss2YGE3fFXUKpec7x0zWvB2I412gOC0CNz8HpL+euPl38Rj3i5IDWAnUYSf+JOrhQ0718ogi1uojX2jnusedR3H8aVjEY2A5ra7gCiAA9RLNgNk+xCH7cW7l9m/ycR+SvcqrZ76/2Z++8+Wk/3VsEOGao3c4G7dy8BGfl+2sS7enTJluO0AYC+YH0HeJhGcKztU8xsNvEBG/ZvHm8SD3L35qUkcoLk5C2tRW+WhAHPvMMVbQRRZwcJtzDnhiWbKGjg24k2MdJHUlqNVfRUwDsYQOj8gj3aGcf+22H8Gi977333N3f2UMDj9amD23Ed0KfV7inxoeUjDIdMaVvAtj9G1J93+D3mCoGAlPM9w4C7U59J3jSu1jdEASBpAKxNtG5VKAgAp72DfSyl1sMeDHxvOnbPMxm7DLoW4uTQiIdkdm71ZreEzPFvy4/JPf6ZPZ54Z9LSq8Xabo1wJD3y/gKt2Txu1Eetv+OWtWt3GvBWI/QOBO1u11/h6AYj2t3GCA5EgY0gxFgzbGw+uPjRjv/SXrjHxg+rEJzDTtMKf1Jz8Y1KHMKMYbdpxh0D4EzkYZ8AMDx+gMG4W5PgL7v+WMhh3soQtOsi0wFMGiEigNEmQ+KBvO3IvL8tEPzLrYSP1gpAiFOoE9J8VE37HxArHHsR1hEYJtLmUc9LQ1zNzJmyi8ckIwKTK5G31K2NVBeECA+Ph3GCiCRpLqaYtG5hw4rVrmbmtJm9qdvC1XA+n0P4Dl4BPCrP3l+lY1R8mNb7EwGwQCQ4AxK0AXWIyJMnxczWpRYdAXu6rqsRSilWqjfYWQIL7UR2K7wRDhDtzNvQcqY8ESAipTQ1zqxe1mXoVRGKiaCiOBPCi5zNSszGzukYHHS4u6l1i58qICB3ULDWLUJAqRFFEAXWIkjRI1kOjNwxzTMRtZCzeYaN8tYxFannaKJAL95jskfsk4iUJqmjCi71GC1alhqxZwm0N/g5tegC1VRrrrWlz4qu6EUXlHOaI22ssbstqxGqe+2WjMKUzIwBacV+LGVMU7o+vj3Px2BUXh1P0zQFe3Ce58PhME3hzEmSyODqdpqu//Qr3/jkr//ax3/ns3j6c6t+df1oOpyKOrOHTdOSSOaUOCVR58G4iQFpIfUQh4UZCdiycaTEL26elTq5rVlAsDlPTx49eP785nScE8uUctV1XRfXWq3UWqOAk9m6XsrlsghnM8t5nue5zoc8Scw02tWZGCTJEYO69+qMpeHuOU1mVqtaXQjGDKEEASpailygGl0/ePDZz/3WP/jH//Hp47/hpRSt0/EhuOmDaZ6bkiXMMveU/gSWUpY8T5/67b//8NGb//Zf/7+/++dfOx6Pzz94X4hsVXevbs1ErVWJSlkAFk8iQpyCCenut8uSklASJro+PfkH//gff/izXzq/8/NPfPLjf/AHf/Cd731H1/LWr/3ae0+f/uinP56m6cX5dqnTUQ9qxT0lSpkzEV3q0kEXR2RyJNEF0OcSUU+7QEQ5p0Hp9G7I0F3a97YMu6MshiDEQtDam8BVpeaj0zEKakU0Ukr2kM5OxRzXBKffDFFRiilbp9Pv4b0INQvd8A32qtCbCjj0Le5pn3ubudUNv1/0jHp61QbRu64TfSgikTSrXbwXuczd2N71SImM561E1ZDJ7h5MkIh+oUh3VOCqvHrOJzjXYu+99/73/vKH3/n297/6p9/82te+kfOcchYRhwKWsxxPh+Px+OTRx9948vDqav7Exz/04Q89eng9v/WhJ4cl8XEOvYcP11ifwym7Yzm/ePcn0+GQppkfPAQcpgRWMiaxXmIBI/EIcJdO+dc4Xg8F/6ort6LSm7L41zj2P9zvlXvP3r0fhOjYAa2d1by7B7uesU22SBbgXTF6GRbuj/ZV8FAb6cK52dyJo3Y1YjB2b+Gb3jnCGneIcZuV+6Nj2pf4ohgK392jmehfOm9dD4ywLKDTLNvWdu/uIS7uI9iuWe6fQ+GecXdiCzTZ/rvTpPbmwzJgHrbv2EDbmzhc0JLJoHvyvbkTMcqWtN28N4WJI/XG8BAiFOpoRZRJcG8MXWFY9w4F1dvd3c2bc6+rOpsuF3E6uItvRUS1l3fuu9WAiEMubWgqOpmsYS0iCHOoqNxzEW/d7wCmeR4ovN3KbJ+GbRudmIySgF2ll+Zv3IGc7tKhnizH907Rfv3m5YsY0g1hbHyijtwIgI86imOLAUIO+1iKY9LHDBiigSLhc44K2rSfuaEXNisHw1kRqZpazNcdDge23nOju+fbsVGQEJNsA+3Y4Fnr8t1PXxZubf7s5VvPd/qK43UCsJ/fYpvRSKQaMDj8hIAZIYbTG+j3ZqxpmI1sE802MusQkRPcjT31qqrEkUwNuw3UYwR5zHMI5yiDNF5I1VrtEzAlQzUzyVOsU2aLVPJx43A3w1u2F5Km3dMeCG5TKEBdAsAl6JNDD2le0J42qIGzNsptXYAaU8lSretepejmZwBO7FFjArAgJYpwrRZqg5nZyCfu7K7eqiEmIx2bvym1dAJOIEkiClf1pju31dRwoPko+xTQtA2Q9eUxJl9zlJoWa/l8QqxY40Pa4XDo+slmSmfmnFM0XjXSxJiqkqS4npmzNfyAKFYmU8ukAjMzXZujY1nOZi094NZ17IZTtCSqTj531FLCgZNzPhwOKSXhjNSGMOdIUcN9SFoHdKmKFibUHL4Re1YRu+hQsLolbBtwdzAioomibGjMH6NGlbZIS6jrZSGi4/Eoid2hWqO2ahCp3clNQ9NVNSJkBnEiBsjANE9zaMwNqGpkBwhyCJhj52jUCHcnKvGtECUWazm0KKVkdmqQuzSRDWHceinLaou7OKdQgQEq6msN84k61L3mSeYZ733wHhF1Fl+aJIVd8HQ6HY/z6epwnA/H4+Hhw4enq4OIPP/hzx89evCv/9Uf/BPXX//iF/hmPR4eJZmds0WwFTMzzGqxhZTUNdLhEoSodSxRhDwZgU2DWx+VYoWILpeLWzFbCxNcrZZJ0pyn6+sHTGlZFvOqWpflXHV9+uxZhBeez+dWL1BLKVqKns/n5yAimuZ0OBxyFgBpzpE8RkRiKjLz8XhUb2lFoyeHt1BLVbVSilUVAk8SlPeHD6/OSzldPUzGL87LZz/663//H/1Hp49/yoqy5GnKAMzx4lIAHI65GhK13BHEhEa2p3S8fvf99x5eHd745Kf/80eP/p+1fPsbfwoWrVVVvVYARlDVZXF350ndBSDmCd6MeBG/sdaS1lWmKzkcTtePIfPxIx//ex/98G987u/83/7v/9c/+qP/8OavfeTX6/rVb35NocG0B8zdrNQqpNWZKUpEctQtbCCHa8+h5T27d5ctW/2xvvPp6DraHcxMkMg5HAG05/OFeWJKAc7XdW3u+l75I1jTFAZcbhrDQJubSkRwEhqVe3aUmyaoXd1159XcdBpmjtScsvfdMbOQqr681e7ffahfw9C+B4TAuGHao7tY3QYP4ldjoIWiycwiUQi7wd/ek6aa0uTm2qN6RCRsVfVi77/3/ve//8OvfPWbf/xHf/rDH/xsuVQzZlrnmdJEEcWwiteynm/r07e//pMkj59cffjx7//4/LNvfPAz4pITfewTH330xpPPf+l3cL5dbsr8a59Ic/6LP/njt7//vQ9/9CNq+OLv/z4iqwQlgdlLuMVfgS1+JdLmqy5+GQ1ursI7p/do8G/v2PTa3Z1HzUCMb9GNqru30AbY7gQZ3vMBYhBt97TSu/ff/zlgIXXNpl9G4nc9e32Fbu6goZUC3SrXp+7u2E/6e28at5PYcl8ere1VXnG7rsV21GB7hb632zcUvXtiaMC7pDLYrcReruVuWb+eceROS+JP7pDCtlUcLqYtsLCfJIrkkhTssq0aW4gpx8Ae/SftFmGRCmeZc4OvcAEMfVHvmzckzzg5shN7S4Q+2KGNmo6dxfnOu+/uuRfUMQahXAPEzdlM6Am0uv8W3vJIQ0QgEUK5FYhHsEYBOMi8+XlpY6VG79ZS9v4XCk0qcO82JQn7Scg0Gr11aQ/bDgQYzokN+Xe0Oa7nzm5t1DkiJ3byNj9acg9iSkgyMqDSIIu28SVqsBCdYxxw5p4RI57OvSH3sGK70mG+XU/Uqceb1zFmzm4h7gIMeJzpwLWbN+Lza+xl9JrYwl3DnCBhmQ43UBfhskOJcCRqeTk7wO/dQm3RjETKLWC0XURRrNYAIxjB1V0apoWZtv7cZmmv4BL4yUlVqyk7UhIQs2e4UnShOpG417GrMnMUjGzQPBAA0SB2uHsbZiKCqDbaUlcnGkoa7fHdFk9EaRIzgzV8ZGaqDrNU6kLNYhFioUuiFuNKkcPFTZklJa41CidQAEIicpKhVZmDQnJ1H7eCO+2b3ak6zFqGlfDPE5lDHGpuaj7Un5AL1IkB3I6mVzQ6JZPtkjGYNb4FMy/L0qxN5ERkNrbA8K05EUmihMnMIlI5tOq1FDMLX9m6rqfrQ3j/2iOm4i0A7Nqbwril3zSzy5ric2D9uqzL+VLqcjqdmPl8Pk/TxD0lNJn2SutOEPTYUWYhal0Uah52oLeJAiEiCrsHEUGH0zJGsEklNybwPsLBEZGAHmihvU6pzmxm6o5uQ4oIfyIJUoBp/FQIUDetyuxzVtXSc4K1IkhE5E5Wa7VVVUMfJCJVzaJtIGJvknhZOl8uIpKSMGfJG0Pv0RsPSinVVpBVr6VWdXPKl6VwraWamS3lcllu6GKT64mTViMygQOas6VkAH78zs9TCyXAPKXr6+vr69M8z2+ePvreu6T+9C/+4lv/1f/if/rF3/kHH3xwS3xgycW7ich9LZdFJpqoxWD0Iq9DKQlXUHi5zSzc4+4USXdTmuu6mlW3ej7rzXTz5pO3DoeDcE6Zc84iXPVSa3346JG7395enj17UdeaU+S0rMxcaw17xLKEDcLM7HB1DCZzzIoAhy1az7dtz3qQ4brW8Ht51ICqbl5V9XSdAOI0idGjJw9/60u/+5Ev/mZ4AEFYV1/KejjNp2O2kLjd8tVUwgibd5wVj994UwBbz3L14Ld/7+/fPnv64+9/W9ngjTYcnRShjMzcMYIyeiVNUM65Fi/VlOtSyrLW66pwB/kbn/rsf/nP/5cyT1WXy7Jwojwlg05zmqZpuN3MTCgNplB3+jUXdxmUIahDQZE/HGNR7Nca2kZ+J+dBKDfLsoSdO0akZ+EiABFXo6oxHKUu9xSdoSENWUc7ZmaLydAaUSXcmeSqyqQb2GPeR/PvLfT71s7z7GpDjOyimbYNZgcPt2xJ7u7dh7ltkzu1m6gFlIZ+Q8Ed7apfcDBHuQJqULMl7qpqIpLmA5gvtzfPnj37wXd/8sd//Cd/8sdffftn7z97sUT+YADzlOHZLZlBzZVRy0rsT2Z99uLmjUcf/f3f++Jyef/b3/7g+c3z40m+9Y1/97kvfB7LR370o589eeujOGfw9LWv/jta+XCcfvbOe1/8u78HU8gE4h1IMICtQ4dXG6sBbAlXfll3H/AqNPj64x6w2WlUv6LH8hXxgfSqN7vnMIzBkt0k4btQKm4yJl7saC119kuYcNzfXnO3gX8EtE9XuHvejpEYx1hor/xz107cfd97LWnHK/2YL2PCPfDb8NSrmtoAg6c98KPhbiLq3qPxa3e3XfaNu+/SSXkDdXiU1RzwXsjdyTbg0TGT9Yy1vANLIArFyENbM7PQxiLdBnZ+qrg4FKl9WbpocpjU0F0l3IXD+PnAe3c6thMiWiHvu5dZL5uBga8ovChsIGvundBOe8mMrVejixp3ptQdoyHstER5356dly96tcO53tDYNog6H46opYQBeqDpXtKij+u96gpErUQENVNjgxnU0ps4U8/eGFlMxg0b3vfeNupStBfAGFFz3j2B21NlE9Bga57oscL4btam7fMuueO9NRX7QpMS3cE4rGf8qhV3h0uyG9Zfyb726sNB94xZ40wfxZdlTkhva1lYERoNAW7gnR2Hx68IMgo1jYDcNobE/bkgIu2uVXevbgh7h7OZwogb75fHsg2DSPdgtyqn1hmU6BpRK19s7eZExCPhbWcPMbuZFVOoha6CZoip7qydcxFeOyFWVXP1Tu9yNzdLsfZCRQjUFjpN5OtnBgvMws/gKVI8JSGgaDUzblX+wqwPdyVzN2MIkTALuUaUTuiDtYbXw3LO7t2OQOEvdOsQeVvA/f1LKSKJe9loUzAzJXFBzrkBXAC9nsFQ+PY3YeZlPTOlUkr1ztRScBKRPM9z6uVvwt1HnIPE2NyPZtUU2opxhSsg8lwPNunj9PDm5mZd10gcb6XWtdEgI4HNuq69EheMUGvTCHOao44Cs0QaSSIj4hFlO9TQ7q/gnmOCmPlyc2vNiBjKZVMHA0sIR4SPqlmUVokuIsJhnpjZVINHt2rdm36pmdVyNSvqADi5MDsJxH0rgaAikU9eiCSz1FpVG+ZMCSnnCI2zctuLpYYUFiMjF7i2LNpmYE6ZicQUVDwRccqcSaFSSgURTzLrwdjBzrSUy/l8A9JpmvT5M5TiTtWs1rpoETGralZZMIlIomq+rB88ff5MRL734t3r6+T0/PHj9Jd/+b/9vd/9fVUXzmu1VWstBwAk8AoiT0nAQFesvW2mHlpy05cNwdoNB93a1hGbGcHmec6SjsdjQHQtS11XdqRDS9P/6NGjZVmWpRDRNE2n41XOcySeUXWvRVUjUPZ8vjmfz1HPk4hSSkGLjdmYpjziNjGcS2a1et9cLM5pWda1rhVvvPkWwEX105/6xOe/+JuQhEvRGQSkRHmeDSiAakv0SIACqU3OZhwUoBjOy/rgOEOXT//279588O5Pf/jd+XhdCKt7beR1RB3FSkutRkaIMskttsZVrQfdKcDz8Yg8AQle64sXTz7x6//sv/yffP3rX/3hT77/4PGjy80Lh4KdBcyRGjTN+RCSYZomgN3XCOW9Jxa2P9lZUC91n0+lpYrdmcBHT0bfns/nw+FALZcde88aai1Hl5lZsyitNIprwbeFPBLPhIbU6HkNcBJMWcDSAKH32ObR7MjTs1fC3B2JhXe52oN6BOow08e11HMz7H6+9c+47eiBqMs6oHLkUiamRt0WRi+YFp2QqaXdVy8tb3NKFJxzJ5Axc12Wd95576tf+9q3vvWtb33jOz/8wY9vbpbD/CDJfNGLqeU0M09E2Y3NzI29Zzq0+lxQxMsh2+Xm9vqaCexYfuMTbz5+mF988JNv/fmf/dMPv/XNP/l3N7fr0/d/+vnf+N3HDx/94Ps/AjFU3Raa87IueTr0ufBKaPBLHkMr+hsevAdFuwi6VyClv9bxSkz4quvuwbBNy+2hcXeaJD1HC+38e3voxTu/652vnIaJ6bXvOGbj9ivfCrj9Mq+zb0lv/64Nu5Hz1/QPsbxydCOn58stfPVBhG0lbrJIRIiHr/UOKmtwJT53O3EsrDuouKep2yxZRDtqGZVagOZYQLh7QkaFJoEGUCKTu7sXra0IL4VV+A6RFb2JcX+OzMs7WUTdwhXmyLs/coSh6m5O46FObO/VUTT119ndyt1bedJXiHQgp9wCDs0s9uYdyhq3YuaWzjGoesItYtBbun/JGdR/u7mVGnlt/+hIRbPXY9E9cW1biafcZWTYLt8sUXN4EpFXox18ZWYikS6QqZVLtp5Z1BEVE4ngjbMT3qXoxGaHaK9w17R0B8AO+bPv0may8ZeMKWPPItoWToNLr1rN3WkRvx1Byy0i9xU/eL20Mg90a/2qkDz7Ahe0u6f4HVj4WvvU3YPddd85DpgVM4DDg0V9uxRCZWGAXL2U4uZEJCxmJsQiKVJphncOnfVN3QIbaX8jJ2xwDO5J4A0cRQXLUJmi5kLPR0eglBKCD+WVSFR1WG3DrL1nhqPPTHdP3HlNAW+iF808pdQof4BRYjIClWKU4ajuHp43aCFiYfLIaeteI00MKafJclqfnUUkUdN3YUZMLbqXCAQ1q70KVjolL0t/c/WeNodgDHJVI1prNfPw5pVSXHjVyGwjIEokAGoxq84sTIArOdgdCrWicEJFJvGkrgA4M8FTgntZliVS/E/TxMxuJmmpZTGlw+HEzHU1M2OWnA8A3EkrQvV3R5KJpDw4CU5Hka2AGCK8x6JMYrlcLoFXmXktGljxcvvCGnuqOTEi7c00TW3oU5rnudaaIxU+JdPglQmJMAWL3gJUhwmEiHlys+KoUTsrYlWZmGFTYjO4m5oSSZ4mALb4SNNsZszGBNeqtebIwK+LWqPzmulyPotIFjKrZlHN3HuBRMn5GD1QSjELny23tPQcdi+yambVQXVZiGjKBwbqugbIuc03of4KhdmCkrv75eGUSrlEF13P5FMGMgBLjwZVMoSmqqqJanNKLJeNpQZhWi8vlInonWc6TfbdH/0HwIoVUz2IlBc3CcJ2QkLig/C0rDd7g0WgwmiGwUVkOuRqhQSXdXmUHz969MiNbm4XLZIlHw6P3njy4DBNKcv58szqxVVL9XVJOUtmrjfPHl5f48Si+fmLF5fb2+Mbb5HQPCeHECVKQsJmdV2vSinr7Q1TcveYUerrui611rzOTmh85pbnzdZ1LVbYwY6kzoYEmn0+pcMH75/nD11dFqLjgw9/9jff+uJvKmW9nlU1i6jaBGagPH/64MEVNExp0HWVaWpSslYKAeLldJwcqDZlOfz2f/Sf/9HXvvfVL//7xw+uLlSmWc4vnk+SiFgk1aWKszkWW63RhElIcj6Uolrk+tEbx9OjS/FrZpjXKRW6NvcnH/v0P3zy4T/92l/cPv/vro9vJbv1hUUzKxtpzsnoUmxlIrgye07wnkEqCefkl8tluWTTh1fHw7taRfJyvrCwu4qkqO54c2NzOsBFazkejrV4LUjp4C5mOB5P1b3UZGalgNmI2MBgVve1VnVP06GY3lwu4Fx0QQZnmea5whetp3RUr2oWJQprrZP0wnyxXkFrqQlsVhNxluTq11fHd999n9wZfr55cTqyu0qCsx0wuzuKW9KuL3pKab2caaNmRYxMGLmIiN2tWE1Ek4gwq9acM5hUlanFSCNqSMwa6dM08mtz5pSZUnAuvSircQ9tBQB7DCjBEjvIDZFU1W9vLg8evsGQ73/vx1/506/92Ve/+a1vfeenP/2plkn1utbD7SV2qROAopWX8+GQzCq8ins5rzLPxPQBf+IwX+rh6uIXyZfL87cPTNBcbspcOS3ntw4123vnH/7Zj3/wM3/n6cOPPZjP5899/AS9Rbqi/KAicc4EJNiABw5RiIMZZRArAN0yKLQYmIbhd1qM3YWUYbxvHJnYfLdrnZ3GE+8cd//c/YRe6eC6T/jc3ce7Sdubp6hnfWv321PLgJCid27bHq6hCcFl7zXzXvcSXQXst02jc7DDV+x38OHWV5sns+vQjbp2twxV83eQujayKJq35OX0M3ePVyt8gnQnqKYpd8MH8tK46K5GZyOOxvsGC2/cZyQv3bys7t7CLzuOBQUJbXuIkKjXWiv34p/NDNSdb03X72CSKdPwOoahh2uojRuYFCYOoOkcwgVBsowVSqgwJglXQJjDhCHiZhNlOFpWRXcQqqvBJSdiZ54wJXmml3LDxSdJxCrM7r6qAtjkBjf/5D1b/8Bj3uj+5h6VUrkVFGlKf4uTghngnEgkUyfFEaVhF+j2rJFFbw6EDAKxmLu6ursIB3vWOXycRlSZuD2GnFg6/VKoKeIJwqPN7S16oXl3r1Wjt0WkAcvgQQkEPeP3NOm6hrlz84J2yz46VozZ5wBPEqNvzuZsZdQTrg6YaqRiYiERA8Nd4QwRkDixgw3JDcIHH4UHu3uQQO6V7iDksegI4E4pveM5FOBlo4iNFo8r279070w/++rzv+rBo+ZkZHKBErK7Eg/WKMz3VmAFpMt57zVyWqAdAAU7bazRnl2JG5kx8jQBoMxiza4Di6liUKIcHkj11QmSJXDEdDXDHaquZnE9EzvQo1fIQexiDCJk9rUQJ7hVqzBnTghPGKWo3sduzImYDOquaT5C1dREkrMX01YlwiOkjiWK5FnYDyznTDCFWtXAXyCmLIkg3bPU7OJmphpJRJp7arMbG626tp7t6YMaNQlCTO4kEuHHUdn5FVL4DumiexcbGVTEa6/EZUZk43ySluoAQK06jOWU7pdI3o19/0AYJf26ON/M57EQQ1Mc0mrzCfRsSGZG2EriREuYg8JPEWvEzOY+yFpmGuz06FUGicjhIOEdjWtSngHUWpdlWZal1jXSb67r2mCGGWiU7UrN10Q05UNKoYuX8DXH/iGcQxEMKDS0wPFGYb6nXY4H941DsK6r73JIoNGLtcVcdbuX70wXnR3WzzibrVGfY/RnSi3PdinnbToBg9oR6um+SfG1NNL/dsT1MbtG6ovRwnk+mEUxBRsN1h66EFhxfGsEFzX3sq5rWYgoss5EtVBv1cOnlNKUZ2Juka79QLOgt3aOCT9NU2TiuVwu58s5pZTSg9P11dXx+Ojh9WE+JKF1XRtpQUDuVd2sEFFZzgautU7TJJLO5xe3t7eHwymq2bIIqUetrchReeDUeIPua69K3wJNJYWtodbqgDDmKdml1lrWUrMhgUGcOSdJv/Vbv6WSmPPhjTd+50u/CS1OzpKziLnNwre3L46Hw4Prq5YHr6wAyTTB3YtRAqUED19RZNlFXTUfBCn/k3/8j3/wna9rOZ8OpyzwqgK36rc3F5A23MMyTZnCxuwopczzETJN07RelnVdoapqynmSRDAzzcfjP/r9f/gv/8W/+MkPv3fIjXybcybZ2EfD4NXz4jaaZWeQuvfUl/GvUlORiWJ1t4tHtp4+CaNIjDpT9wZHFfshTrGfKt7FQizk2MCiTE6zB10uiOpYjuCrx3pfF++0CB9vNHhWe0HXZ+MdWk5/1rZs4wS6zyH4/EYmLkOoxgraJXbfrUeegc4QhBD17MpldXcSJpJwmLOru5f1qYhwIhBHyjF1t+IPHj35/ne//+WvfONrf/bNv/zuj9999+cvnt+WosUv7rHOeqJ5CHMrDHVHFevm0jaI0lUlN9UyxtrdsZbL5VKrYePlbokiejSLwb1nk/EBn355Z9p/P8d+BPen+1j/Va0N1todfexX92feYVLe45ruj1fY3e2v1vnar5pT5e5320uOud2Vzt4vrzP2v/q8gIfbCduS+QVeypch93j4zt0RtmzosBq+thlbTvy40f2kemP1BSbcL3DcXf77M/fEwva+I7tbuw8xEycCtjqErSXx3IC9Nrw4lDh52GjMW5E75pwznFLiELPWV+vYhXPOr+zTtoRfanyttTGJRsf1w4NcF7HK7butTiO3AgBtgeuuRhoQeek5JNVoALoey8xjPpu3EpXR+ykl3GVhxErqxag56m2EKksi1IHpWGpNFu1Swe0Ht7279wqQPY0qGjpmyaIt8eQedbVjWASIU3cHxisLgYhbxiz3nYNw59O+A8rupCfdH79IULw8D3/x8UpogI06/jc6+orz7rH8xQ7A7eEvOQ9HxCN1AoEijFwts9Gdt/ZQf1t+qG1YiclVRzTpfoWSEtBqCboZGOKN2BwVMpkji6HRXaZASwc8ju6B33Y9DUfUXYNUDzJ033s1aICyxJzc1UzDeGsOtabcREY6dPAdP9aqFAmMW+FOcvdwVYkIs3Ar3hZO7C3sjTra5OaE3PT1XeowIuahb7vXofGLRKGbwGO70d2wTBgwORpmPbAw3lMoKKDM2JIi7O/QSQtpgK64w2BnlVKYnKh18QBIERZMnWNWa0nEe/WxK6ANP7RK111WXi6XeZ5zzsx8OLSi1cEsDTiXMrt7hKIR0eVyae4vRc6Onqwi7iBCvWq8RT/kPAf+HP5GgOJW0fdmNoqnufs8z9HgMWUHnWMAS+pUOnev2tBgvKlHSYpam+tyO3rYZJODA42PedkBIVq2D7iY2RjcDjuNiLLI0LC5M/po/7Cd+Q0YiZn6+/ajuvEBANZ1vSyiqhGMyz11pLtPU1M0R594Tz4J3EmZHe8VMzF2l8CT1VHdcprn4xGc1qqlWE4sckgMuFpZSymLFlcTjtKFcjjkq7VeLisTHQ6HdV2d3Jov2qOsXxKRlM1MiPOUj9NcSnGmaZpqrUFsvlwul8tlgOfn8+nm+YvFbnLiTCJEmad5PjDjg+fPrp68ecj5U5/8dagmAcDwKoCX9TQlkEMNqkiZhBFBLA5KGcHfW1dLBGctdjzkdJzgQPXPf+l3P/uZz//xH/7bSXRKRJB5nqvU5dnz+ZDUDKDEOefMnGDVavPSmzNgtdYpC4REZnMDXGut6+V4dfzCF77w5pMnP/nh91SVaUup2qwY8Dm38hsjXHZIwN04bhOMOI1ot9jsx/gOCq572FlCBqZhMbmnhI37D4gYiJH6lj8WkaoOanqtGmOHXtQETZnYINDgU/VJjcENbuJORCSWmIwltlvRW5a/YKImauaPvmpkWRZO0bwttam7c01oHi9CMwBVI8ynOZgwalq1Wd2YmQ8KD4ngxIlIbi/Lze36p1/5V3/29W/92Vf//L13ny0r1kXdaZpORc8EcXI3LWtY2VSYmTkKSJubuym8lQAoa8XqOgGQFOGKIQB1dFQp5Xxeaq3hVYjO6RXGw2m299p5dzthBwvHEYq+/y3wQn+VkMK/nWOXOJRbwcXxOq88Xs+q6pWb9xd3WHXPB0iv+OH227+SwXWvlzbP2t2zAx/u3bP3tMBfpBf+khrtsHf0x21Q8BVQ/G4cWdc6XoUJ0XykPd6vZzTZWXPQMeEvaBwhXLg6rKt3vgkqaQvPsa40E4u49bCr3f0DwMDD8RKNjCCy0DLIIwugECeB6VD2hr9uLz9HG9DlGBF5D9THTtkFAGHedtVdZk6WADtt/YaOt6NfAnt/6Sbr4lsGfLOeAEDkH2myghGTx4ZmOgwNHgTNTtPb653UMtAQU0sh3f7b+nG8XegIJFHCkeBw2414y9G4TSWziC3MIBcmmEVszRhWouTi8d49J+o2eAHYfWcxQRvFlz7fYTFEM+g+0wGvlVqvm5Sbmv3LXf/KUj+/3EFoeWX03knsplA/Yg1Sl2J72gXRzq60l40WMq7dM/yPW4H71nx3J7eRHqknYh3hf6P84VAJGhjr68KBqGOGcOSnBDLreQ3C9IC71hag1WbnppFy2yntjv3NO1EZvfxVzMidWu6pZzoNCzS5gyDCke2DI3JxOEOIaJYZXbS5j4hlRDiXgEAM8mBhM9iwxJYwMs9FG7Vt2+ht8kh7kbd4mC0UhyDIPP4M7aeF97hHEQU3oOVSbwkTCK1aCwCwuTHQ+Pe6OXmIo9Bi1GOAg8kJkUlZOECCuHuthWA5Z8Br1UALzCwShIHGeq21gth7lsIhjHxLPezeXRaqKmkKrXFdLwCmaYruvbq66pldBcDxeAzN9XQ6LctS1ub6ixDEeEdVLaWqRYqXIG3yPEebEYAwELvtSjUyM7XcHs2iYLsgqCGpufMi7m2c/astBQV1QOW+39S168F3DGNjLUU37p/ormrerGK9D7s+J9R1+tFaIU6SVLfKGWPSBjmm0Wcgoyywui8vFkmcp9PhOEd5usiRk3Ne11XVIzZPUnCWNd6YiIdZM56g6qFgh3IfJ3POZc2l2vmyno9lKipSgZxSvl2WOSeeM0uGC0GEsouznVUNrgQGy5QPWv18Ph+Px+iu4goYgYQ4sXiFVddSYDRN6TgfALj5PB1iFCaSmZO7R12KBw/p2eF4uT0dUk4sQRA6zCd1z1nmLF4uEEJiqK4f/FwrjldXXmtZV9WSc/75z5+++eab59vl6uqqOtJ0QKLLs5v5eOB5nlqggcCA4rBaLzfp4fEf/oN/9Bdf/6rpJZE5Uy3mTiI5TxOrQt2JqzlpgZpb7VyAVdSYbU4ZDtQlH2YYZJoyExxTypfbMzvWdXVr9ulmsomYihDlL9Vwj8+d7N1mUUopKjegE+7RVEANgataoy6rCA2jz1hHPRibxhKImTmeTkTh3AtP5rhDnAljkKvGlCulXC4XwjTug52NxnemDdUNrNouYWm/3n2XI8d7hHqsoyiZuDfbtV0KrYYycx71i4hIa6BlNwY13oXDsV5uWzKbqD7AJK2uV/FS3S3nxJx/9vZ7f/gHX/6jP/mzH/7wnZ+//+LFbRGeE/Oiy1rMtNDUsuGpkzNFev/BJTN4k9IwbeO4wEv0m5m5KVtkETNomK4SnFUtVuh4UVBL7n9fQcQdnXg791od5lczpf/NLt7/6h7u2rwsAHY+q8ZTJVDfGNtP+nW+C+N57YOI6FVZOuneb+Oytv3eAXXjmpEmb0DKoY3t1TJuussvpx++pv0vI8N9b/8ip8H9KXHn6Cy1/k/o3i//nIhG740VFJjwFUUtWkVvBfpKbDpZ8FebOAJtP42VBqJ770IQgInuZB9oG64N7xFpd0G0txh+3wa5QERGHjGEvunDBECmDDWCoee1N7MSo0pNK8BO18QOyO63Zr/rMEF7KO7Yc0flpIDd3sAoRqA1tNV56y6K3RFZFhyjTkkTKqEWknROcqv5LRLUuOZRaPiXqhu1rJMBLFs0YK0GN8aAUvGCRtF+90FjbUhn58mJLohEIjQcm9jZ+QBvFo4d5CBEqpLGBY+VPRjT0QwjJ5gbSHoNjZgSYxAaHXbnnG1QkIjg7aZxpj/4ryOpfuE6+ts89ksMiOLeHAunO/eilMNIN9owIcAEuwsLo9GykyqGnl2m/xwBPmMaAcPm26bTzqcSoJyBVuE+SgLGjpaHc0WtG1hDJYCrEhwicIQDkGU4bzebARERp9d5XAdoHNr10Pybut5d0NGe5NorKwKRcYGZJCV3ZRD19rfbu46F6h4Pi0hQj8wKRFxrdSPVRtBCS5m7S25Brb/2yke8mJlZt4iPFrsR0WbOJ6JgYDfPW6tp4eQ+yhKaWUqJ2kA2a/GQPmbmLWS6rSgR0VZhvJEzQ03sCMdbcb8GhziusTsZAlsYt4hEEcne+wP+FW6ypQmZfj3tyZtjXKuuA6HtF1UUrrBD68BoWFcQayllLZd1XWsNXGTn8zlau64rQOtaqGXoae47pjS0WNrRRMcFMRwRojY8LcO5Ea4qEXb36LTIKRo+jf5Gmz+2+9CIRl2U1slRPqWVQQsErapFt8D0tnN4U3D3+xxtgdx3NlrvzA1uXOjNTdQawWCRlNIsAI7o+U2ZuZMMJURk1ZbItbdzLHg280aT6/YeN1pLef7s5ucf/LzWSuxvv/3uPOckkrMcp/nRw+vDYb46Hg9TTikJsaQkSTKgtVkoTlfHab66ubk535bj8SqQRfjlXLwFeaoMwA/geDwKcdWVwihlngkytdqYzDTlE2m9nqbD4ZAlreViipzzWjUdJvViutz+5EenT30KL17cfvD029/+7ve+973lfPnud7/7yU9+8n/9X//X/+f/0//x7Xfe+/jHP3E8XefD8TCfPv3Zz37qM5/7l//tv/nYxz529eD01lsfPjx42GqV5ZzoBNWPfuQjV8fT+WYVkaWUy2XJOc+HK+cKT25aa61FYzMmmIiUy6KNTeHny80Dra6m57Ku6+nqCBZYyZLapO1rfKCyeOXOj2/alZl5r5QAJjVTMxImYdPaUk+DAJjCNOxxTWdtWIuaz40FqsV6nbHGePdN3dnN7W7eJgrv35g/w/LSf+7OW9HUuI+qltJIqtSCVSrtZILvEOOgSG0uE7i7B+Dslpq2McTNx1qImU9EgEcQdV/IvNEEcATCjW/m1oK6YF6Ns1DKgU1dUWo1s5zA+ZTIfvbTd//8m9/5yp9+7at/+uff+8GPj4dHazEYL6XWYrUqQO4WeYMj5F1EKKWWHL/JJ3fyro15VKsjBmAOhWqtVdwIPJjh8RamqEW17jTUIR/i9t0yib/pwa/5fPf4W3APvowJ70C13afdZ6LutXv5bvcv7kcEAeq9KwE0ozmAvSuAxuzlXUte/nnYyV8HC/c/ecXxCxHs7v53Tr7sMIwt/f5YeAO0rzruZ23ttentDjWxhxfuriZ08kv8+Ro35iht0BT84Inut5utaQ0T7N86wsQAMMGlPe1OXToPKMK9+AHQqkoQkXWsGLcl7rkMm2fAA6gbgYlNHAaCO0HhBifbkgL25drcIK11dymvzLz5x3rvRUGnvQzsKkNT7Ruua/pkj14JQeR07ynEzrInGCuG2sAtXhDSI/fC2Ri7fNPfwq9BXMrIMRrd2yAcdv/xLuGNt2m0qc9NE2lAsAWvxR3C4xg3CQi4MUPJHeqKVkm6d0jnhLb0KdRvRQQjJ4fzfUTXupC3v3ZHY4q254aICD/hfnX894TufpVjk3vhAPbGwWYgajkoOlGWwnjZ0iXaHhPGGbTCFWh2nBiO0NhBABmMt8jnZrpy7KrldQNZD9U2bz6tcdh+yQ9tgZLAQz8xd0dVVRVqvovY1k2VhRAWTNpWDXd6XefC7SxBr/HiTNMUWTDJsWcqRWF66sDJ3X1QOgOKOTRKRTfNgNZIFxkz3z3S/ARUQ1MqfCj3LCmjW+V7Y917lBp14/TonQGlRCTyD8StmvOt+Vh3JkkLuYOuLSHsAV0j2vLstYA6kugo6bRDEWFO3ou/D0hGTSoFsNmoaPFvsPI6/tYuRjxN4qrD8e2dTrkJggHNiZg5qKFD92JmkGWW+MXAnEQIR2VZ1TumH1ovEakWonw8Hs2jmt86mhcoJaWsvVg2uuII53v7btRjHPffu1aGlMfO8BPesEGjdSNma4pbE453PKUxeYbSMJRmszrO9Gc5dsb71gwz3U0nIhJiYhQyMyul7Cjou9xlTiPNDIDBbSbC8XgkdmpV04gohU1hXVciaYlqLAwEgKWWBXNTrVpjWiUDZ4JMeVLV58+fn89nyl5MrdpSXshzcqgQTykfT/Oc8jzneZqmaZpTnqaUUnp8nQh8PB6TyOk0XR8frYVuz0/TdGyMPLNYj5H7t5zXR48eTdNU9byuNec6nU4ppbVcyNxhItJSDFc1Cj9fZUZKLIkzJ60OgVXlnMu6Qi//6r/7bz7yrY9dLuvV1dWbV1dPvvgpZv7Mx990J2D90KNjsgdP3/3pD1/cHq8e3pwvruVzn/nsv/xv/l9M6SMf/VAp9UNv/doXPvfF3/md33HVfEg45dNHfu3jH/3IV7/6U4oyPkSlKFCTNJ3JtLm8hCmT6Fpu9UWeT+tyZvAH7//8w592mqfEKeUMAqxCDdP06PpBCLXAaTFhGuHYWgHVGK8gUQyb2Zh+kQXUDDlncyYInAdDwTeWJiQRmahq1bUVVLetaISZqW5Gh6EM7Y0Xg2q+X0p7OWBkIaljohKCXu5blgt321JDj9sA3UvZapCQddXuTshKCF3q4YJhTYgfjrVIna/Vz5A7KZwdx5Td1QxKcA8ihQKYTzOIUOpaFc45TzEKt7f2/vvvf+tb3/7Kl7/6ta998+2fvV8Uc37j5nZ1o+rVzEE0HQMeF0ICYHDq+pMBUEuZOQlgDgubWnAZDlMGXYidqBn0YO6qtZZW+BESJrx1rd63Zu9qWvuzZ155WePxPTdshJRsYumXwHW+V8v+phrV3a393t1eR5UMNaVrRRhgoynad8HVK7HT626+uf6I0BSRvQb5Ui14onGHfS3pcCv73affV2fbq2z/bsduk3q5wfdueO/kr3i8Asnr7ubhhWj9PP7dBxYS7qHKu+0kod1LEAC4MxtaoA7uTICXUXH3xO3CfBowcOv7KQEMArM5uWvLixmAKzChkDRST+RLai/U/AMA1KuZpUAQkamOwSytjvaWv8qJiGQrSLi1KvhvtLXTW7K5TQ1Az7xIRL0i38CKITzNezywt7A+AoEb0QOlmvd7GrzhpKjX2uRjp/U2zXv4EQPjSbRTEvURcneHwRzMgwlHHZJh6JH3BiYcISltodoD6hrctY6dwrnDO6KggJGFLxhG6GlO2jbX5KR3SAxB+BzBTBlRLRriGEX/WiO7GpOwJ5Q2VyN3x9e4fMy4v7klq3X2K88z/2r3Dw3zzoIadj1ih4EEFDVUaXupbsny9nnDyXTX2ERk/fW7XRjG7dE0ShY5tpncdGbmSOEbxs02amEiaNGnDMDKur31TidBm59tu99DJMTs7JTymKjETGYjvsPdCc5MLGkgL+w058bM6ncY22Ii8tAygUhdGgCmmmmUzDCr3vIyiTsbacvg5OFkbON4W18IZxFp8IlATKOIRSesb+NYSo12cFTwHuoR8m7Hsj3moe4pGm/xulkVo7KLSd6y48BkPNa7CAiu4HCO9Zfd4uLcXTj33ve9jT+gy6BvOTs1B1cLInJ35kRE6AVDVLXXG+RpmjbY5nVdWtb7KGLmrXiID8RltoFS2gG20BFHtJuImGkou0DLXREK7kinYZ1pNk4CuLm5GQLdevE6Zj6dTrRLXLZbMLTBM7QEdL2pMRHbCPY53TYMbyebXF4W7fN1syYAwC5LBwPea9028z/Ie1HH/vPhitxjex5v2pX7NuOqF13V3Yl9gIpxB3dlTmNEHcota9yeBNVNNdjCw8byCw+tqjJRKOvEXMwvH7wYW11mSbklTzqIisijR4+E09XV1YMHD549e7ZcLvPVw5xzzlHUVNydkQGczzeXWp3ZmUGkhmWtBFvXSrBJEgCnVhbFoediAFLiy4Js2cmUHIY0CwsDQtCv/PG/+9pXpvP5/LFf+9iz2w/efPPNnPNvfPLTU87f+fJ/+MKnP375+Ecev/Hm1fXjB48e355LPl7Nbzz4H/1n/+mq9b13fvK9v/zBl//yu8/ee+fRafrmN7/54Or4uc9+8iOf+eSHPvTmPKVaFiLinM/ncynL1fRAVbWawUlYQtC55ZyXZZmOUFVfy3e+/Rfz1cPT1YO3PvobRIScWt7kdSWiy+UyT8l33t3G1IR798j5lg+mOfPH0k4pIk5rUxCYmdkMqjHdvPvWmvWn1KXW2d0Bq7WtTWpUiJ40rB+D4RzPGnrSmDbWy0h0i1U7iQYIqYvlDccOhBmPiDdDN5w1g19bRMR8R2KE1hF/1qpR8WIIWNUaWfLGEk4pGTU1yMyWUiIvonsFPHLEgKwsbopSFC7zfPSant/cfPDBB1//xrtf/dOvffnLX37//Wc5TSJvllqePj2nlI0McE4EVvNS6nIpy+n4Ueq+Su/4uVeEdmL0PB0MdicjhmvVUthBQiKktZS1DrxHRGZYq9Zqo1K0RQJ6DhwTfsKx66NrSO67iJZ72TJfdfzSYO9voFTd9eBtmOpVoG73o44GxwXWPPgvteoVQPcVjrWXvo3e3tctBGChhwHYSEZbga/x8/hTX/IWDm/I/lWo/9+eobcD7ffGZ4tUfPl1BuB8Cbm9fpS9GQP2nj9pmxwNbu2oin3HxEA9dc02AV7yNw5CWjRmmEW8G4Remjp9DjS+Uzcb9YplO8S+AQnf+zOFYffDjEFwsBGEmnKJoewREVBMGXAmNvKobMckIiwyZKy7Aw5iplS19l2SFEagREKcnEc92K2niIjTRlBC93V0lBSezOhIop45w/sYdl9AWOCb/xDoBoswrgPqHqwwBocoIGag1xik7oJxb8UqGnrzXnswtvwMoOfeCZdSW1nhWol38B7oCDCo1ZR0D6zaZDjA+2+jAcIeWqU00/am5cTrGJzMISwko1OAcJexNiZk2IO4/4e4DwDssvju/Id3rVf/f4h2HiP7t3NsmZkIGIroqB/IfVKTI5K6NVhIkG4R2+QSYYsh3DFtmt3H4drBZX+X4JHKXhC17d/YSIOW6e5G1it/9t2fQD5I0U4AhMXh7EwMclQlIkrCjRvXhAztvCnsIBHcWy/eE/buLSxdmOiwODc/SzN8p5R4WUqthYgiKL9WrbUEO5aIWgAJNQveuK97HeAkLHwxr+Hc/RgU9xxW+bbCu0q0nexl5wHMMg89abQSLb9CM9WjraX+5tg+7P/s+U8DljBM0Xlfw6I/HEcRYUUdYoWIqbUmmcwscqhu4FBQ69qBXPO3xg/LpaTI0s41krXE45ZlEeKBzQCI5JQS2NXULLJZICRpznlZLnvlb6iz4dn3nbozpoWqmlfvMXR9uupOEeSIJWVmphQ3HPgQzSJVB3YacZI559vbW76bVCYePR/yAMZjaFS9ycSePKY9wrnUdTyLmSXd4fiNqbI5QJBqra7Kd5knpRSrWkcBiTue0vtTgnrs1vDPBNCtbk3P60zWoQdP09RBMjOzSGfNEW+vsxuCiDVNKY2wtGmapml6fHpzPV9ubm6WZWk9oL6UkmLOdKc3Lxwl3dQu5P7wpoT35urqajlfUkpv//zZg9PVo8cPrq6OOYlIi0N79OZbqnrRyiw5pdX19sWLsi4StUAyzcKmeqm1rkso9RB2p1IWg4IZDCMwoZ7Pqn59uspic8b52fn9d3+ktvzk5ufTNP34e99W1TwdHj1+I8+nD3/ko5//4m/dPNNf+9Jvwxk37/8n/+x/iOrQM9L0zre+85Of/Ozxg/nrX/2jt3/yk0ePrz79qY8/ffbe7c2zaUpWFcwpyTQd11JqraV2Ki/gVVctVw8fLctymg/GaTH/+te//pff/9HTm8tHfv2zbzx+8oUvfO7jH/vI9fXp8vxFFOExM9ollYk5LiJrLdStGwMudoDXeMboqExVWQ5RQBVYvbEnNuTmu4SlzByxCkOOhYdwKEMxNza7bwOfyXfmEjNb1zViRGN+ppRPp5P3OiJuiGWi6i2T5xaje+cYhsD2uQUvyTi928AcLUeOhZIaMBKhwt97BLco83j0upy70Ry9JBu5M5NwihWbteInP/rZl7/8p1/7s298+esf3NzcLEslenB78XVdmeV09aHL8mIti1pxUqWF2CTz9fFYLoV6gRnvedISb2GTkTOYBe5sUbKmlHUV85pZcmJdrEk52ZFhqrtFyvTNBDs2ivi77ccbz8v9FYFe2BmDfjGZ6m9fi3r9ccc+dffoRjFIvFeDu+Es+aU0s816/YqD7v7fnUy2e7WyqyP3O2roW/3+I/Lc99+//Njth3c1nrsezj2efAU4RAup21iVLa3867rFoIDfZ5mSdb9Kf/neXWnTRPeQZ7zs/fuglWrcOeOae8G9Zb5v17a0LhsBMG7egskMPdFazzRiMIcLsWlLfh68GICc9F59yci+YGYkZG6uBjN0qnnLKcXERD1k705SqyGO1Iw7XWJowIM8uVdht4NbTds7t2qvSDLNRlv3ujsxR+5OdCmnw+Hv8F6GvYd7deckiEJNhXuUMw7CIdNmjaBm3h5T2KjRD4koinm02RrvjnaB7wyCm0EkuslM/n/s/VmvbcmRJoh9Zua+1t7nnDtEkEkymczMIqq6kKUqNCQ9CBAgQS1VoSCgngTpoQH9Cf0C/RK9SoIklKARBWhovahbjaqu7iIzOQaTSSaHYDCGO52zh+VuZnowd19rn3NuRJDJzGR10isreO7ea6/Blw/2mX322VZRgohEQqqee+JoGz/uBMRa2BB7FKVQ457e1WK7zAxu9GEjNCzKcHKQg9ycJbU0whb368q6vtpUj7BJL8bnb3JB+xzLzudsIyeZ+zRsIyzy/dq65x604sbqatTjLQKMZoCgveS2knS3oMe0iHi7unGsPbwySImoq7gFs5Qig4xbBK+FhWOYUpcdiWHh7kBzPYPYlwVR2a0bLYjyLRJgL8ixXSTFTXoJTRrjLQaSovkivEXboyuWpYiISKbmR4+QuSSDciIyuJtBS1F3m3YZyGpFhMzo7u5WVff7/Zw5cw7hk2VZKipz1OAqu91eVWtVEWHhCIYxS067w+EQjpCylJyzpLQsS87TiveIrCeNhEUlIiIJoForEYd1ztx0ZQKNaC8rDxh4TTPrNRUQlcSmaQJhKafIPNTaUobjGHhIVbpwChkF6bDHFG6ryqhvIk7oKj3jPQ37z+Fmdj6fmTnJFIG+ZTlRlxtCTzcK9KLVBpQK3KJal6UVY9xgaY/SFEmuY/EZNmUYspIoxPCxWZS9u9iHYRqWsVaHXIiFmtkgQA5cZ12vxcyePXsW+DnkEKPVWk/ng7QKmDxOON5FAMKUEkFCW79VROgme6ReoIdrqC+Lw5g+HQ7rXmIWbz+lfD4Wd+3rNnf71QLTjtcRj0DWCGbjuu254Dk1QOtoW9ropfhJ5GslmbagAhuXUtxnSLm6rVD2nXfeAVCTT9N0dXXVyj+411rLuQZ0tFJj8Ki5wgAs6on541cnZhbxNwdNKTEv5by8jxfzz9LV9U6EVMuU8243ffFLX4wbTilNWXZ5muecd1fHw22p7JLY2WDVsYBZOCUzs2qFTapGHpgD2O12ziDz4/EuUSrL4WqXSjnWemCRChURIT8dX51Ptyz5/Z/95be//c2rm+dX/8n/4+add776B3/8la98Jc/7q5lu3v3C7/3DP/69f/h1nMu/+B//D//T//T/+/HHH37/e98q9ZQyyMtSzu6Uc1ajU3N8MEAwd/Ip57SbQMbky3La3Tw/nnSa0te//vU3h+MvPnlTl/LVr37l/fff/9Y3/qsv/94XPvnkk3meT8e7lHie57YyMEUJLLqMDY6B3URi6upyiklNVgKkhejLPM8D711dXZ2XYynnOPOyLN1gaJAyFituwi2sqsfjMcjDY/WIdUNElmV58uSaqNeS0Rqs+FhOVVW6r11Eal1qLbF5nE4nf+pENE3T7WuttT652avqbrcTEdQuHyWNK46eWDD8OLxmaK+y1EttlVFjBnGkDQvnzoiOT3bX+XQ6TdMUPsRpnm/fvCKS46ne3DzJN09++J0//+633/vTb377Rz/6y19+8NGRvlyrJ5nv7g5Jpnm6uru72+2gxilNQqJ0FkqOszMWtSTUotnafD21qrru93tqeaCNpsFwBrla4lYobCmn29vbTJxzo4mGCyzvdgBK0Sm3ojJXVzfYX8Gzm2a+WnA2N45UI2J0G7oHeYYN0doDa+YRNNgRS1jaG2j0V3O33wtRrg7pEdUcjqr2SbC/aI2ctICGXN71Z9hnkbJ5+RltDcr7J7qMfa1Ous2DuGtDp9Du6BwVwFb5sYen9wefYACi1i4En7BRRB+fh+Hf9sp7vfp2a5VGgGU9zDf6E7S5XPx7fOWbzx8gw37bCgWaiMg4SfvH5bEtTvUgYBuXY07jk/GARFTVARYW9Ep9cRI3IwaBU0rhKwLAIoYWoIBJUOCqGxTTPJsqiKKQWjhUSymLlm4ItS6KFThWM0Uz85pzuVaJatLu3Ggaq2ByxBvDWPfuyHNDRCZCPjSqkoHIVAd3dOjXEBFIHc6SmDMHy98teZTVQVRnA7qizIZd3/h7gZmZo85eILSLQdL7ubvA23S/BwjJPUzslHKEHHt2JsGJOPF2bFyQhoUAsAYTnoa8lhsRCQukw0iHN04dMxhITsIgh5jQBg3yem6C9/nbEcRvGAr+5oDf52geTpIeo1pdMI0p7f39BHhvP2rEhJW5s/VFhUPYAQ+hnU283YMBCaDHIQGrXpmYkRxVrYawWad3qntLRG3Y0rt7rsXzDC2ZDpwzzGFGSZKFnr967Hvdoze2dSayKPUJDK6yWam1Mo11AL6pZRolzZdlwWXOSwpwklKUGeXz+Xw+n6IOW8SjmHmed+4+TZlJkiSCqGr4XJlDuHIlSrm7e40MHNXClAb+GfAJG74WBuk2HgOhLlhtfTALKZQBIMcqY43S2U0Fc4e5mcOnaapNEI/cndq8YZE1ewc96LTO3t7iblNKUVSte9/XdLvItdsQIxsw48zSDS/z6tW7/VfRHPOXjvzwn7VXAniIenHV6mbulZndai2k2lJXqQW++q3yOnfHENn20mitPxUDXQ8oONbwsWXGz2PtNrP9fu/ukd3k3edn1vJFtfqItUbcOOec0jSAXIzLMLLHTbo7OklahPrLakHI/rkMoDjwmHdVoa2Z6+5wP3fVxGEYDVt8rPXaN7yI+jaMRz6ubj2mOkZgsRAg1TntxgiJXYBoLQqHViUW3EtugKJYreScp2liToEJreoWb0enmVldUmzg6l6qoRqfK1FEv+o56akaMxwmcs6H/JNf/jKllCeZUk4p7ebperef55xTYrf9vLt5crWbZoBo2iVJKEcIO5mLmKNoiSKN07xTdzWFmrqG90JEzEI0V1sqrCP8Taql3JXbuzfVSEH/9t/+63nezfvdV9599oUvfGG/f7Kbr6+vn7x68fLmes75nb/4i4+FShIhYlG2qnC1CoJFtb+m26SuKMKyLMvz588//uTlO3kWmf/xP/7H/9E/++e4frYc7IMPPvjqV758PNx+89/928P5JCn1so1tzGtVd6q1Ul8A8aBRD2WM191+q+FosHhNzNDczZcO1eLn8boiy9Tdaw1fVVviwhU1TRMR5V76wi912LHZL0+nU0yuRKuXAcBSOtf90vs+FqXm8iBqGkjusZ5LIuqUUepekr6cLtyyDbNqjdkvK/8QsQJDeJ7nJLnWqtU4S5JcUaddSknMy+l0Op/PSeb9kyf1VH78o5//4L3/7F//6//yxz/66ccfvcp53u1vXh8PNzdXqjYrSjka8vWT6XB80xcxgic3UxdnJ5JFzwxKAgRHQxcB5WkiarKJ7goHbd1PEGYWYg/zkEhFz6dzbPmqiiiJlGS8BTODKZDMjDgoMA/HSDR+yz//6v7yv+oZHkTbuFs3iCAFgHsZbluD5i0g8G2Bubd10KNPYY/g3lZWYb1Bp2GMdtOt/ScsUyeQXsIgf+SpH2/eLjXyFSk+WWVFNmj/V2kryBwPtn78aQzV7eeXv71ovP2aN0//qRHTT7nEah2Mz43AHuqTdJnn2VAQ+FIPxjp+7sl5tZqbNi57RNnAgEW9vu2Su654wrwhPbXVLK61VYXo1C/0P5qPteVEtJVw7M41XJoUNjvRRkTU4et5AG/FAomI8n4XTomWE9gXWHXrLP0eP08CIo5CE+2eadyn2eMkNTMLGm2PECIS3bubiAJ8oouFtrjoo/Ap+pacyLv/0ojImYzaOwscDIBciJpufnDtPapHUAAJfjDg7+9Ef31Rwb+edskIAC7po9hGAoERah8EUevR/m5/wtGlRNv5ukOnozcjivdIThbqQH1RErVlkNDHuIo0+IH5eGDRnhtCRCwyYsbdZUfgRGaA0bCEO9eUtjm6IFd1V4oRvUE02xG1xQLD/sHGoiCidDgvOWci5DRN08QpSc7MbK5GzIycc5qumDml4GUJALcKBpGnLESUJjufzyLSytMAafBCFwu2nrvnnFW1auVNFChuOf6XmWGrEU89la4XCl+d8bTm5Iy+DcERmJm21LsunQKoNnEXmXKAx2EERJxPVS/cdQSE8ggHIFkBFQCQqZUQsgMgnNflSauEqEP4uqzZZ1itOtDG5qu1ABRWynh57i6JvDo7x2+Hnv542UHS2K7aw9xpPdn0p9aC8mYWmZ8DENZa3dfgKhFFTuPw0o3Txt8jKDo8CiL7MJ0HUAzL/ng8Emn447Wu20CDwdBIeWXijjmb+n+MTHc0OQntq/wYvmrV1jSw9nQdlgcW5VUjlACoXYiLjtFFxGRR2yfwoRMoXiwzU0pdyKKpCrHzyBAYqz+REyFi7qPHwLVRVphGf4pIzkJEU8rhFt26Blqkd3flkXqq1kpuDM0oZXU7LAUtrdmAE7IlNakCP6sWAU3TNGV5evOk1pqYrvdXV1dXU+IgRT/dZyKSnLMwi6BVB+DDqcJMi2mppAs7x9JzdbWn8LeQg5KjCSAZKou7miROnA6nu7vlwPLsvff+8mc/uymLlcVvbp4yy+nuTq0khiZOiYXYVSpiRzRB8w1XVXeYV3d2tbvbu6vdngUppTztvv71P0ZKIJpurv5g/kPO6WY//bN/9s+0Lv/yf/+/SymBbKjqqyrApRQxQUp9UXYjWPelxkLuTMU0wymJh89vnQurG6U5p7v3JMmUc0SMRwZI9wdxy76rtUbabSklBIfGQrwZOfdGI23nSGdu0+ar4XFryyC6d4M65IvwoHtUYTB36+Vn5nGfQxhJRKpV3lhXYwninGCm1Wo5BYBsRPc51Vrv7u6urq6ePH2+LOXN67uf/PSDH/zgh3/6zW//6Ic/fvnidprmnKXW5eXLl3Sd/+k//+///Kfv/+f/+b9hYcbkBng1JQiTUdj95JkJBHI6i4gp3E0IrmpGTCZhJzdJMrhr5Es2gh0REQmxhCQGKJ6OGL0MD7xXFkVHiQifZCiGb/bNB3GbT21+D6H8zZtQn3rFi8zA0egRMBmHf5q258ML+WMWrPXKE9tPxp2Ete4RfXF3kBMJNZ+9IN7n2q0VnxsEbm5yNdHGF9TYX/LY8Z+/beVSx109xlBt99Eiqz1P6d5FP0PYpkmyP4CFWK3de1cc39+/hDdrJ23+GTiMN+EpbqW3xzoQkROygCUd3YTFK2AikpQS0pzh0p9mrGxj8QqghW72jAMefhIfjZ2dEIZMq8pA3lZpIgI1MBoUhubW7lU0GgXCWzdyM7OJhCFNIDTULUJ4mpl94xmErMMVXeVlu3wPAIaxmA8oC9AqQRnLWvN9B1qlXjOdBly89/YunIDUwrzUALw7mYMagInof6ckMEcxemtjhDzq0dNAg48M+H4bb3N+fXajxxaCv8F2CQs3hT0fW/toXaCGJ7/7W6hN1VZixxHuBu88keH+4Z4ACG7+phboYurlICUbVQERQ7X2JdG2fUV983J36qJ3MZgAwA09RhHHN6EBXsO8wx7Y/r3aDJePP+zk7a9aDzITUfr+e38euXM556urqwjUhNJAqeeU0jS5iMxzFkwgNwUzJ8nIGUBIIzLz7d0bIorKB8HcJSKHT7sp59yLp0/MqqBAhiFeF7PfG/xmgEVaulegUERR+LZAjDfanx86XpG7N6kbRMAqUnpARKZUDIDPrYjnGgdzdzMK4t7oSgwd+Q67VutQANDpdNp2PXU/VlnO6+barI14OgOGlslqR1InxHsPGAJwaLBk47Q9FoGU0rLEGIrKGept+W4XpFbfEm4Uhk54iwIfanWzYptUqFqbY2DcQ1AfH8yfIMitxmgHbxZ4NTiTIpLzHKN2mnbxymqxyIaK/rw7vFHrjJRN2+Jb37D4RrAuwHBiiRcQRmrrxo2MEsAiAQgtIjbMNNwq3kXGu8gyd0udQKaKUFCMkdmUAkiYIxjOnLkHiMZu16b0iEl2v48B7O6cklWtYYmaRzKA31ebjOojSUSSzLBRRaBF0Rlh3GsppZbFXeNVVF1K1WLOrLGgACh2PhVZitelmJnwJ1HBfpKUUnqyS5LTbrebr/Y55yi5kSeZ55nBiXYiHpdlEIMMEZAxgFQ1ypmQLKo676+XWggyX6Wc6HQuWk7P37nJaT5JBQpsAZIkYhfVEhy/PlMSEUNB4iKRfMYEAyc3M9Onz56czsdpmg7n40TT0+fPAOB8KER5nsrpmJmeffX3URaZsuS0nI9Bq45RJ2jFNk11QKxYb8fUs6Yh7LH6xetIiVigpoS0nQgeAgBdN4ggFOUoew3Prd9kYA/3yJrjGMCy1uxZ1+Txw5jp28XNzJgz89B8W9d97sTUbp+055Wcu0O9eWfQxZM3c61dQlWJPMgV1CwGMLMzqWotymxrqFP4fD4fz8s8zyjmlI5n++lPPnjvvR984xt/+ufff+/ly9em3csu9Md/9AfzPP/y7idf/ep0PNqUXzFk4t3pVG6uJiC5sTo5sVN2UNQYrKbkBnNTBSdmuOuynHIWb3JTzk4wgzURajJAI2cqVg81M2JXLdYEnMNAajSEcDvCLNbjlhpFfXfZLn3b7JL2Ci7/+Iwyyrz57z2T66+EGzfb/HqePlzH+Bkshv6jt8SjPqvdjy6uT+Eb8LyG8uLbNR51cZP3QZG3pME+/Pox9/i69zHh29V9hmEX8zR+ZV1uvu0vPd3o12jbHqPNAz6MHH7Kbx8iw/HhI5a1h2vx8onfEjCky+83lwjrZURlN5EQc+7Y09EiaX02EMMjs9haeAMgIpFkKCABG5MkkeTE7LUU2nhv4+Au+39BxRqan2MFi8E0DvAgAzg4zOzWFdz2fWaAhV1Rmcmp1fLqIcGepRc0on4nUYgUzDZSgWJJ6Aa0MhqnIm4yflutE3TXOOFasxFAB2YrJhQe78D7qPPIiliDlmTd4BkuOfSJPE7PDkRtAyKPSGHXim+3xzIiUUJCRNbDg2FR97WdL70Y/f2OsdKecbhs/j1olwvwvWjhI4H6QbGmUEVeoeP6200PhNYu3LVlEDR83pRpHFGhIUbyMCzbXtJCzc3pwgNFgIgsdjznLgXUCJB9PEhk+1ljr1AXWhu+DmzcLuNX6M5ldw8/r+n9mhNbNMjdezJMlPTjH/10YIB5nsM7HuZRBFumaRKR6+vrcHgDPE1TzjkYhqezzzNyzldXz4M+UOtiXdLTrCZkZnEv7hEZM1MvXnPOQ4Be3UwNcDcwaJRsRutJRMynR8CazRRTwlv1sHhUAjTEZILBFZkmRmLBNtrYZ+301AhX3SxbBV6p1TdjX/UMMDj6gKWefmbaFCCoi5d4pzIG1BjWPxr6sqG2Il3OFH2j8ksabQgnjBVWG1c4QF2NZSLWs/GOB1oLwyillGTqILkhMe1Kp2ONjlESQHfYzdI1S9FdCCMZL46PqwzqbAAtVY1BFYgxXAZxnt0+qWopJXIR+zjRrsLVbbzOIRnvukGUDY2z3dLlEF/zEywAoVDnlG6TE+KfZoaCkHMBJPIjhDhl6YgopAXZyYU5SVYrHQ1uV6Mo9r3uO/25nGHkyjFsrGpRItK+Q9hWbAkAQTybGyMCOyBKYyrBRJiqxKyAu1dNZ1eYm7dgIpmX6mWptaDrWbn7mbrG6YsXhZlTSmmeoq9SStM0PXnyZJI0TVOWlDJPknLk+1L2kBSnuHFxM4eRpJxnNYQDKCWmsx0Pt65gPsBT4ibRREQp5bKczJyoEpFXF5EkkwGgc0hRJaY0ZXcv56XU8253fTicQnyzWPnggw/e+Xv/AOp5mrRYvtrDDeW8HA9Pnz6NUFoSivE2pt52qFysm/2f0vXQQ+ul1jpNOzOttRIpETmkrWm2spRL0fP53ArAdKrzmERj6Ri3EX+nlMxyrbodrmMYhMvMzIJHF+8FADwBCJXmMa2HARFQf3WXpORLE8hJjRS6sl/GlIkztwmS5N63zhTrQ0slIHL3amq1HA4Hnq/2V8+rvvnLn7z/7W9991vf+s4vfvHLn/z4L6+ubqbd1TRNDLu62v29v/dH/+Jf/Iuv/eEfFPrwL374433+/Wf7/8Evfv6Lw5vDyxe3y2n55MUn1dyVWOaUd5BU1RfTnNndc87kuVYNPFyX4lbdld0SsUPNLEjXnMgNjQEQ0889qCLn83lZzswUvksimvc7dPAMVUxrHGNEFTBiF78yXvhNxgY/H1zhcaS1EsyrldD/23rl3i9XoYzHrrsxB1fu/WdkP17Kt3QrRO4fdfGvsF14YCFv5a/agcNbSnSvNsZAv/db8/S1YOD4VdPSW3/V4dWDHLzPbPciqMNaGGbovUceCecPf/X4h1sEf3EiAj/y0G8LNj7+fuPzGN59ePRYlveIykZq2wAKgozz4GQCUeQ2pGrgZlotgEg4nrY2qNG2KD3a1buUJtBWzzHvhnmmcHQHWXtx7kSpj+3OtUMCNzn0Rgnl1STgFGY6Vi2VzYVGfZ2IRRIR+mm3HddlRZssifdVAut4i8P6YzqYJDD1ZqknNLXJbTzN+1vY0Brbq4lHJiMHwA0W9Jm7BgHGErbxdK+RQAbQaxJ2NDiEZB54tH5dL8lvVXt0RowP1/nbVZE3YsgA1tUvpgaP/45twsHekoww9Gki+xfdoms4sQcYOaoamHXY3bb0eNnYpI9RKI0zo1Q14yi4wpCun4JmYhJCeNSMsEEQF60FFca0HUTT9qTuQM9KjcL0x7MaQvjOl1qAYl21z7u+iJnvdrvQwbMF05x2u11P+kKeJCV+551nknie834/T9MU6ZY57fR8GsAygIG6m1lQE+PO1G1gFQBb28t6HuPgNK5ZEs0qKhF09ealY5Az5FwKQYjgTqoaCjHu0E0kqvVIX/5UW+7QaEQkviqIRpTG19iOd+xR+74V0hvxJlZk4hbYMjgCazmKNvyGz5JXnfNuptjAMOtLjtsIOEHxRldjdHuYmcHAhBC7bRBxvQGizs0YDzhWQu8WKHoIcbA4xlfDPO0/adA3rtLLLSTu9FQievpsF+NhpImarVnpvgHDUYXSPMQeLWpojjcVQzS2hYFmAUiaVAf1lBvF31kktbKW3gI73SFqBBYWh7lL8OtC8WWzlLhb8NUurAFiRw9a17JVlWwvi5m0VFcjb6oMw+xm5qjUsTHdwsdzHq8JnpgtLiKAuWWmPOeYB0QETJh3AbBVFWrxd10KgTmxdCldVzNQdXdKVo0X5dO53SNzlvTRL+9iIw4YsJ/n/X4/TdM7T2Zh5Jx3uyknIdqxVBY6Ho/npaq2UHNKaZqSqookM4c7MY3XahVlUfOqSuww82liJlZVTgaDoZpBJAuRMtz1ww8/2F1dnxd9fvOuE/2bf/tf/Ml/+7/j5kc9JxFSZyHkNN1cX91cQzilFByfka7sG5/CMCu3f8cU077Oes9ZpSYCTtQSQdsETCmllEOeN+LPEbsbM3RMhyFpO64YDqNa6zDzOoRcJ3sP33kQ0eMMSSYi6iLy66ykLtUw4uTWw4xj+nezY11vO6F0qMvQyBx2d0DNghftOedYrEop57JM05TzfHV182bhH/zwJ9//3g/+9E+/9b3vff+X7/8yBsD+inKa5jlf38xf+vIX/uSf/P1/9B9+nW5uMO1vX3xwJe/+4Zef1fN/UM7LixevsuQPf/nx3WF59fruzd35cFzeHI6vXt/eHc/p6ksff/xxEs95fvXyjdaym/dmlT2FO73hFTUldfctUGjRBgfCuZmaAwvktdZlOdW5dWwDhL2jRHoJb+Ai6nXRfoN47zdzqkvrbUWDWG0dPBawenD1i8qBGmeOkNpmT+HLbhlhwAvLuZ+w39hDmdY1w6edhMiC5OZRVm3FKg2xUPPKo2tvDnPtU9omKnuhHkHNBBzI+Vc0gP0CFT/skPu9Teu3D1/H4z/Znn3cXQPqtP7sQXvUDr4PC22wGMxHkMtdRSSch0Q9Nht+T4363eF6ZRA3xaVaPSxph1vIxxFAUWFrNUAjoYKYaCwy5CCyHoQUoksvBgAwey8YeM+qcWrq0OirKzMTBKSbJfWiikbIfo76jArjIOU1Zl+L5zSThhBFHZrrWTg4QiEq02okbiaab6WPtowSwjjn+jLoYpKud/iQm7DBlmH4bzcOd2dp07bzSFuzNhJaeNARYRgCuLOwNyQFx5iM96BgJ0f8Jp1cfx3tUxHs1jWzRgg3O0aMkyFnhdV9FgnP3pAFkbkzvKXSdh4eu1uPFvfzr6+4nSqi6qugeVy47+MVVbpQL5hCjxTMGOokaG7Krd3i3vNkV++2XowrspW6sZkL6AObegwsBvbAI2kpFllyQbEEwsdDzGJmak0vZFnOp5PXWuti3TZi4uhHA9k8T+6626fn7zzb7/dEnlLa7aYZEJHdbjdNE7Aws6TM7IfDCYCI5JyTpBDbICKpjewRPvvuDr8/hTYP1qL95h7kTCJhEl+WCGgpXFt2W6w91g2+7n2M/9cphdFLAWaIws/j4ZNurAlqO0rHDCt3i4irDS7KCpyCcOhdYqhbbxSoiYjG+xsboZk1OsemzEM8tXWGZF8INhcbJlMrXp9UVxkY6sxGM+t8yAvNlbht7olMgdnGb+OAocgyfhLHD8OUiALDAwg0uB1wL168GBclIpERSGwKNKMORLSl9ChiXcU80AOhbhe7qZnRppAGjTqN3fq/7AEHNfxgq0z//QyHFn3tELMXm4o7XLnBVQsRDU2n8YzBHSemCEbF625Pyp4c4bXsj2vildzIjSACDS3h7iwsFAVpADUFKOf86nAAIKDMQpIA3s/u7kHxDeak5Z5fqlYBc1Unsf6Mbgt0msTRWI7MnNIpz3ci8lMvQpjmfLPfzXPOCUlYhL747tPj8UxEznI+l1qNGqV5qVW1Evw8ybzfPSGi8/kYqWvEXt3NQHB40WKUTpzEPZKujZlh1cyWZSFJpfrr2zc06Y9+9KPvf/c7//A//G/OPDNwOh6X2+OzZ08gwinFShLRfu90XACJeNn4aKkPA2/SnRXAqALfx78FKvYODMJBsCzLmJsxvFOaROSsVRsvUQIaqTZJvSg0EgitlBJOkKjYPkZaOCCIKOccKx5a7l8DqzkRtUhjcl/r0Y+VodkxRufzudb6ZJ7XLQGIRxhKTvGrWBBEhFlCFGp0jsK9T9VgmzghKCGq+vr163/zzR9961vf+v73f/DLX3y4LBVOzPndd979ype//OTp7vom5+wsWvTWcSIWfPLh85vruxcfLYfDs6c3PnumvN/v//APvm4KdTglZ3FKajCzf/ud2//3/+s/efHixW7Kd0Ln82I+aSmy3xM7ddFUMyNzJnIjcmo+/I35wkwpJRaYVagGj3RZTtRcYAZ3dOXnlvLxoD2gjH5Ke2g5/S3aUp9y6U9Fg2870nnTPRfHX1q3LTqH1US+jwy9e0UvYWEwtSTgaDeo2nBtJxgU0E7Xf8tNBwXuHlRb3ZrDBBy3+ldoDxHdZ2C8cTOf+snatphwNMPj4O/tJwww6QDkwkwMHhy6rgZ7aHKO6TD0DpgACSTUYiRERAnwVhiBsxDglFY+JOA+jKGxpdqGwDnuZLvhEhGEPcj/Pa4SJvA4EkDX9ycAhuaDi9ONwUYARs2GfqS7m1vHdt6TgdpPhhmDHtsMA33EZuONjOPHSLs0x+neaVuhjs06451EG7+18eybk2ADeJxAxt5VlpgZzi1iSdxE7BBk0e2LF4CI0oDam/9uD3sbrPqVs17/xtvnXGbX+9+QIFZ3yUblOD4L4AfA0BREt8EA9hb3JvRkQoAcniiti09Y5uTka6f1odWam2tEruIHgfHMtFbqbpgWlel1iZulGC4YCqm/RzYpb3ki6d7nY4wNy5+6ZKO7J5GZeVJtkpPMDE8EWc7FfbAEp5xnkWRG+10UUjd3IYebV9Vaz0R0PB5evdJXr94wo9QlpXR1tfPTgZmfPn263+9Vdb/fP33nuYhETYjdbnd9fT3tZunlCnKWWiuhcS+7NMiqmDKeqiGPoS1ormYjsSfnLJyJxKsZVSBKNqcOetFSTXql5tEp4VBfKwQUhBOd2Ck0iAnoNtYqRdNpDEULWEYwre1nkatmxtRWLurorlZ36JiQA+y5rzAhTNL+gqMiNiP0oJk36Cku12AVM7uTGXVQ3bKYOqJbY8db1FfKEjw09PMOEBWtxf16r9Ve+Ht0HRG1qKwZUwp+Wk+9u5/4NDrqfD7HP4UvFuWMxh8mRzxCoIVaq/ao8rCPscYb21Vi2KBTeaNtVEaZN6XkAGeWxsDUCAElESZiElVVujCMsCHbeGfVokl8DRxizkRJ0hA5BEwYS10GQCUiW+uSI4onMTmTC7XaVn3wG8Bq9Xw+A+Y+C/Lg2Ji1TCoi2u+vm95868nIAKzFWskNhjBQa9VSzbwU63YDOdiKn3UhouSFTFnwSnhKQqTCJkwff3yjdt7t5qurK2IXkaurq/1+v9sznFVtOZ8wyfUVE9HptAAcrucgCBubFluWhfyUbCKRCDaKZFN112majsfjzdPnt4dbqih49a/+1b968vydL/7xnxzP5+v9fr+f6/EuMZ4+fXp3d3e9m70TDYaipoiEN6jvzT5A3dbrEYAw+ip+GyWLI9waYyNCfAG63J25iSd7bZSKzXhrA6CUQkRBuY8pPJxzm7Z6iNp0wwoRxyoBAlF2t9xTBNGrpHjzOyBo2G+Ob9oBFJMaKXHUetmMk4GZyda96WIJYuZwXkhOZvbBBx9+97vf/bM/+7Nv/eCjH/3oL00hksxgte73+6989Q9urndmy93d3dWN3Mxpt8s8Oajg2bv+/i9uX7+5ud6J16rnL33h6fF4dz6+yfN0fXW9v5r310+vbp7w/hrz7r/13/tHb968+dNvfPPp02c3NzfH43mXdy9evDBzIbYukRXpssMrFAz81fxyPx+Oo4YHzKZpmuc5ekBEog4ZwCxCRgLxX8PQ+VWjS7/Rdj+nbsVIfPnh9qh7Ub5PQYN/lfYYwnnYVxehwtYIKxLpv2vw8eJ56a0m7MCNwyyLUGcT+bj/I/nVcftD0Z1HMR7ebjp/GiZ8i2PiN5LTRYB3A3HcwNpLAZbGOhB9SCzt4k5hqsLJY5dwbwGTXtQCoMhzABp3rk3JzlagTaBj/Bftt9s7bbkG1L+ynm3IvH1f9+1sIHzhIwHvUrozGH7u7pZS7osv0NbYuPntfbi3VJbWS3bBtQtRkYtoZO/n8Nh2y4yJaYRWW2+37r2HD3vPrx/S+Ml6T0Dj/gVN1CnqSRAwgo1Bao1j7tO221V+tQH124YD/1paLzG/Bfz3V6qGwSMQjcCEgTCZYDrigY1xSmZK3jLRiCOQbtLryFszvJkANw8hwbaOp9R8DxHRESFmaLhBrbs5AjSu5eWo+dq6/IRcxFcGZVpVTd3UeaPSr6pp8TPMJEkOqOAKoADISCkRNRur1ANjkiS7ZTHVWmsnvomTMO1PB2Y8B0s5Bw2bbaFypFJPOefXhzMQiUxHohfqllKa5xzJijnn/X4fWWfPp+vdbjfNc845zdNuN4tIWC1JnBPMFK5RIxQonueuKGIeHewgIEtyN7eaxKYpeqoanUnpdDoJszDcMc9TFrq9vZ3miaEEq7WqnfYTcmb3g+y/aFaPxyM5nlzt1cqyLPM8C6Wq1WsiYphYS82TQmdIJtDQBc3cnPxwN/dzqCQHKbIWoZVzH9DFnUAw0zaTCeYolQji7olZrc45Ael0PsB9mnIpBQ4RgkiX9ISZVa1XN++cj8dlWeL2CMLknFBK4RDEVy+lBBwvpRA7sUewpWoBXFhi8aha4WFOpZbMzLwsi6q1keBeS6hQZK0gsDtUjTmlNMdb0gLyKKoeFRGbK2I/X/km3tiGrJn62b3l0RGRmBAJCU9prjVTV3mlEepUA1C0LssConm3I6JF693pOKfMIZboLsyuthxOst+59xqkaLQXB0hSShSn2gBmF0o91wtjIjWobMbumbnHk52FzYWIFjgF3psmBKpLHQmX2mcyA1RpcnEjcyKDDzRTag1D9lTVzCnNRLSop+xEhqata+4tWrUsB4CJWDhHnJaIk+RMrkqrVhUlN6jq+XyO8HisRe4evqizmTtIiQxUw+EiRHRLSZX0hRLd5ZyniXI+M5f97hR9tdvtnlzTa7PUfbRM4TK2vM88SbVzxaLyLruREZGRQxfT6oS5lJJoKodylSYxneqBP/zJ/+d//b/6n/4v/pc3+70tR8qJGWB+fbilaTpF6T/JSJlzNqI55ePxaKl5Z0rRsqi7T9NunudlWbIkAEx5nvamYIF5LZUc2aHuNVgSp7LIlEOyd5qy6tSsH5KlhOsh1WqqxczhXMrpdFrM7Pr6SZT6CZapmeU8VVVhPp3PKRi2KR2X8/X1tXafHMfSXGvsBUtdWLDbTaUsx+V8Op281pubm8Phoyzp9nAHu0lpdquZBWpFKwmbFYdN08QM9epBwk+ZiGo1NRNJhOQmWl+oat7tp3nnJkzJkZZip0I8PQOlDz569e3vfP9b3/3eX/zwxz//xfvHN8n5Rulwt9hOnhmKi07zm3m+I6PklIra+TjJ83J4k/fPIfMLmeh6p36rxYXotCjxtNvJxLb3N1N5JccPFrmi9EzyTbKb85v3nuxffvHZ7mqC+/541qfPnh2O5fb2FkI5p3rQggLnXUoJbhXzLpdS5inr4lWdfTJ3spwwT+nKCoSmcq4302RlsVrmaYdJUM8LMqV89pIo0pHDCmCmlXa1MRg2WMuGEdszc8KGvqj2/gBsNALRal4My2NjlNDbUcr625670r9YSTSDvBR3uJqDj4CKB5DM/cJ87A/G92OB7RKPJbUB6Lysh0/HrACasmvEmUBRx899pai19+DOPG8/ubiry/M3N9wDQBX/jrpt3WDfHmMrdHzM0X7/bMbjr0EFQtPt2OQoDpkK845Rt00e6U0AgD6m8upY60A2N1APH7E/bqm/7fzWjpcWDFmVGAMeRc78yGPkqj2G4AjxP2upcEQp1BtUc7JSzMnNrDvggObAZREIw4wAoSTuqrqYMnPOmXohYjCx5EHslJwUDo8qiMwk7l7MyJfugA641uxdktxMXopqa+0eZEqxQyMyAFmgqG6gSkRRv7qNQbizuWdgiwfYnFwRaSAj4zQcKQzIGoEZKC76P9YBoKcgWkPVa8QmoGgfHtZ/zJdDvTmXQWZQ99R0BK0hagqUu1owHoJnwAQIkOACbwZbD84CaMkcb4OL/3Vttk7EAZ6jJ0evS/8s/reEp4Moer4xzuCTwzqqNsCbVwRqsCim1mg2Lc1TQyUcndbGIbOfco8kOiGBqplGWbWqaOiO2OHq7tUnZ3AiWLUKdZHkVqsqe1Y1BgkzwbRUVQdSrU3cpLl8qekRnMuZmSWncJqYu7kZPAmEjInYVGFRmI0NrKpQcQAq7MyUGEJGZzuAgAx3q65wJRAoAtPkSmFNRoVCArGwE5xgZvAesje7u7sLqRjVwszzPEcQ7GdnTNMkUxYRSWm3n6ZpcvZ33nlnP03TnESIyVOSKTFzujudCa1kKgGZZZ7naZqsqrurV7VSaznVEsoQVzdPcp6ZOSVmhGgKrp88y0KRiKWGadqllIj8eDxWO+cs+6tWddDhEWQLkoWkYAmytaqMnMASprbIWl3QDE4dSIMjWtk8UiVwvncyG2DuLT4QS1nbA1wdXrQavKgBZu5wnEtRrSnkEQFw9aiNQATmEeJAW1YU1OOHZvFtqSWnqQcWIqFUB2YAUGsVSfHmAvuNnXtFYmMGbdzVAyyNvwf09Z6F6J2JOnpg6wWc8g6d56bdE2Fmu13Th9jOdiK6efJEVZda5nmO4G+t1SullGQoOUWkzl1ERmQyZsy42zGLfJPfRUS1NuogrZOs0/9ibegirvEgQ8h6e5+jK7zzG0dIljeuU29FAiwUPsIptb23cZiZmQUKbcdTW8XiiYZ0qjMjcl/Hy40FYp7n1RrbuPOPxwvOcA9y4nQ6jUDrsiyn06nHtY7uTo6U0rzLU8oCcujVbp9zmnOeksyzqboQuwklMjDUXNVUoRXuZK5uzMnBqr7UpSiYX5Zq/9f/y//5f/Yf/8dMOL55s39yjXI+3N6JCMGFmpLTZky2uKhZFG5plOMIpsV7zHktdDkEWkQkYk6jn31TCmJ0Tg9H03ihIlHew0+n0xje0eJO+NKJoKqJWlGZyAK1fq04wLpmsrtnliAmhOpyRPJXZfec9vu9ns/zPLuru6YUMprE0njvwtM0MSZGS9mtT56/cz6ftdrhcFoqpvlq3s15N088/eVP33/vBz/67vd+8L33/vyjD1/eHQ+n04J6DVGDqXr1alprq6jp7Gydcu995UIT4g4Lxi1c9ITmI+AkbJSEU045c85v3v/kdDocDofb21tzSmlOKeWU1HA4cC0adJvg695bYeLtB+nGO52MiOKNovuYNveGPk3gfUeP2YWW4nQvsGVvh2qPts97/NvM98v225bP8+sFSBnYLDHerWZv6hj3LvC2a/Sg1EXE6dPudRP42pz/Ikwyzva5TrUu1GNZ3vDKelTB+fEbe9tl3hb5HMmTF3dID2Jrn3X+wZTsHSIg6/iQHv5u7MXhH1Q09U4igjsz+TqnEObydr8zMxJeKW3uW4IE0O4/3n3c2YXezHicfoGNKzPKp6Z2BrQ13GKh6TjZOgGeu3YoM7NH+Y3VqePuRoBdOFka2gqRj7e2X20W1C5a06dzu2LvsYeM7u0/Gi/s84zSt7XPtdL83Wi/Vk/ypi7LOl+8r0g9mN7O36LajUUYqhH9Xcd0bmXW2g/aphZldZsZYK6boGXKGLyhzchpEUhvGrnS6TO1lnXp25gWw550b2ZMbNxpn26YmUDk1dxAxOBEYhzxcI/qPUJCRu50pirCwhnupqYeZYIiJMpgDP1MZWFmNnHiqAI4iNXWpajcPUpyA6VWc/eJZqvqpZ7KoqpB9mPBbvfBbjdNU2KCu06Jp2lKia+eXs/zvNvtJs5EtJCfC+Zk19fXKcnEZATVsrNWgLuiTnlYxrQsi7nupvnN3VGEUtrtOIXZ98knn/zsZz9L+cl+P3/hi+/e3NyonmE+BG9ieYnVx2BqTqqcndwYREzSaZnmBnWvFapMJALxztiM8gfQcPdRU50xoHm6qDE1ekUKoyRTQNNp2plF0TzpFg7QYE+FszspFXTp1MEYJnIWaKsnCxGRFGOQQo2g6uKdZdqyv8hqraYQyTn7sMTGpNruBH6/KV3uQ7bqi8ZzcVDyMMDVhnO73fWD8cLMUTAj2gaOYlmWZVmqaZDrxkay3++9qkddja6zQkRF64BG3pGwdTWne2vHBeVPVpZgs0Q3xRuHMbr95F6ncCfibvtQu8e0ja6N1sjo2O2GSrSaCOMpxoLl7qAmJxsTsxdaWIPS8chDCbZ3e3vM0BYeNz/Wjq1tHRAr/s6pxB1KonxMIhK6yx/rqynLNE2ZKXRNsyRmzjd7Zqa4Oa2whusSR3B9cm949nhazvry9Tf/3Rffff4f/fN/vn9yDVekfDzemSrg5s2LQRBVraFE1RS8KpyZEZoH7q5q3tKpu2PCCMzDMTEMPtrkna5w4uJdcF/Q2IlSSlE8cxwwhHa3SzNtbBF3jyqRhjYafVOQEG6qjE7JqG4Dzba7UgCK8CLVOac5Uke55XgzHLtdNjO1Amf3xnpl5uPR3IVlzkKcwTSdjuXVm9ff/s73v/v9H3z3e3/+8/d/+er2DiTzvJ/n+XBWIGLJXq2ajpEQ9X4u+IrobCsK+WR2dweZkwkJBCzg5CxCnJBmyPTk97/y9//+31+WmmRSgztKKcLY7XYiB/fS16XLZODN0kFEof01+nbMx9rknfusjK5GbIw9UkQ00rmd7hcZ/I23R4HKr4u1fosabfJ1tx9+5q9wHxa+BQZvMQawcek/fgm6/8/xwaVO2GciTBpfScsnb+lDsbf2yBt6FOlBDPbBDVy0z7RPP7MPP08jolFzwtdPRtsU5m432iVG4yYHntzuZyACBxVubG3DoWLdMwoiCPPQfQmm1hbrtlm43tC2T1o+CxGcFM4jB5AgUVGXNnmnfXcG0JK+iIgpOTOzE6MXhdOLnL51+wZTFOqwnseFYIp+LhzO2Hbx5sMH45EfGaGPnZXui5Teb5E0CJJ+znse83uH/7b5mP6m22aB+rRU3v7tPWp3m0QdFJI3ugF5G1eR0pUIFhHi8Dq0AcmAdolHN0GTOIqsqLEHmRu6wzrueLgyqTmbmn+EA1gyBw+F+q+25iI6UNxadKO5e3p29ZyZbVOTIO4DzEFEY1cQmHpB890cIYbYab05yC2ztNRkH1PLQK6rxgMJtVwAIqhqlI8fNxo3dPIad1wUqjByYyThT17fpoMws2sxq0IQERaqdZmmab+/nlImIg4qI+grX/nKNE27/TxNE6cIyQlnnncT52xWQ2KvqNaqV7tdyg5gKXq8O0Xyz+Goh6Pa3cvDYXry5Mn1l69Vp9PpkIUiB7Kp2jk0ymyLw5HgZEpoZUCIyMxBXpeFiZrPnsi9hCXaCAPDvmcQnMEjcsXMXTLOASggKQWmzjm7kqkJ03lZxKKehIcEIpFRyIy0MhUWhdiC8sfMZpWZU5Kw/lV1WZZ5z2bGlAx1xJHG+hija2sWj5unXu1wvMqN6byZaR3jUROMbTmQAyJSr6jWVmpdBkrhLjbr7qdTy8GD8/b8qlptBWZmpnARiVhoBJCpJ4MJcZqaJD06OtpWtB8TBn3vM4sQvAw9m3HAOnfAo+vMynZCbkHUdvCvA6DXiYoeGgf0OpAY3/ZN18YNxFBpdOQWtmo31daWLuXqXrF6K1YhFqKMHkiJcbJR912JVWY2hFjG48SvRm6kVtdagcpBRSJeauFjAcDwUJNiZv0IOct+mpNQlHBNLIklZ7iYSwwMZknKMFM/n/5P/8d/+Qe//5X/4J/8k/LmNYBaltPpmIjN68ClpuTJmZmC28sE187sIrTigeOdYqyJ6NRNbBp1J9xIl43V0huqHz4Od1szVK2Hatviw8zMuqk0GJ+0r4RTSr1QNNtmYPg6rlaHWnOU9M9b5iTHnBICw+MFxWBgre4wZpZEQX8K5edXr5dSVYSneS/En7y8/da3vvPNP/32n//FT97/xS9fvb4zYuLkTsfzYmawXRe40y0ea4sV+v+n5oQAbCg3W6deu1AUewGH5F8CZXO4Q1J6/vz5PM9lqUTZvYk5M69qOm3ee5R1uUDXzAyz4SVpfdWqemJIcz1s7vfJhu6N1Pjo8X+d7VEr7dc13X4DWYKPnuGzu4UeR4O0+Rv3vv0MMPbYybcHf/oPH8Vg/tlGMx5821f7JvM4WJdxP3LBHf0NtZHu1plp3Z30K/sOttmD3BxGNIxdGl+1o80IoFZPnRjscGJIy7aycLH1aAi4O1KJ1nIT1oXZPWhjm/JR/dCOBkN8hci7qP6990HNhiQDoqY8mq0d29/mUKaggQ/rJH7YCxo6MQ3Mxpdjr215D4DfqD+B9qLblUaHXj4TgA6nx+cPKJpd4+RTGgGhjm7unznAwiYd1x3mxIMKE/SZ1/2vYWuahRfFJz59BtFj8f+RDWrbrwgcOq8+3Iqb8wyrTygMcldHFiEzCtGCzUAZwRum7seIKVPqsLoalApmYLgJhhO278tjog1rvJlnK9HJiRA2CIB0vZ8hHEeYXkTwDBEwaQ9Way2lTPPzYQU6uXGLMJCuyCFAbbsc07ADmkCYO0CqDrgIR51x644UVQsWuBObIHrNlJwnbaWnnEmiFmdZTGQ6ne1wfEMO4ZxTypyY+YNffi8snpwTJUmJc85pys+e7+ZdDsJVSul4PAJWC11dXaUsObFhouUkILX0/J36+tWLadoRyel4BrlIJqFzKaYtCtztPyIIkzHIzc20SZkTGMQs6ktqRFEKMJ4YYD5bi4y1hbUDDPPqSnGfRD4CF/0dtXBWlH03W0sddksorF4yqwIi8o2ShMZQUS3uqYOxUFxc0rRDK44hWt2lxcdFpEto8iCUCmfzOtCgiMCb42CswxtYgnFj8X8PjYAxOQdeGsmy3viT6OhlUFmwPclASmZ2Op2WZQlA2DpVEhHxxpiI3BJvfD8RkWmaYrRvjW/v7pZlWeIGW0hkc4D10AlRUxkZ0GIL/Prr5q2rBt143c15nFO1oidVvm3ZGpG6Tagwqjj6pue9Oz77U5u51+G/ZSGmLrYB7njYACqljv4EVlHWoDSPNzhuo9pERKNSnKp6pNdTZP9GSA1S/HRWAItrSmnOJxFJjCxpmtOccn1zt59P0zSFGtRuV3OeAah/wMz/h//t/+af/tP/0fPnz3/6058eb+/IFQy3CxJyaLVqHVmRq9pQ5FWCo3sDqqWtKUAb7B1P12dic+ANAV5zCz6qqrrDjUK2lIasc6cQx698w+4Y7x1dvDcsJ+v8DTMTyfET1fau0c/Aqd15SqlUL6WYwbSxqsy8e+iadg4zIzGJOKCq5XQ6ns/AO/Ne3P3nP//w29/7/ve+/96f/8WP33//w1L19ZtjNeR50qqnjrQnTDAL7sh2YnOENZt0UJ+SZoBfeF6dXIg5OZE5lGjiRClznpBmSMZSYuKoapoSOe/yJHk+n2v4EQBuicfEzMhCIQ3fZmtL0XPfeFXjbMw8TVMbtI5tLp+5hoWJEAMIp+LjrbNAVyF4e1ht75Hj0Y9vaYT3WyesPkox/Ft05P+aaHA99P7C9WnPMo7dDK23uu3fAjjfcvDjaNA/85hHfrRS6xUI67AFBIbt2Ffjt97820Vi3tY/dPE/Dz7/3G3kj2B9losgq20eBBjIYcQj6F4wu226QhJMADUNA1K48bRVNefcq4BvvZ/rhUh6ACQ0tOIWN1ZBsM0XrSm0/doWmsAk7suyVLfk3JBPT9thXzFkR140qLaN4LF5Gge1KdjH02YTXRHgZrDdQ4P9ZMGgpcsPN7384JO3OKoAkHWlcQYFmOEGaZo3Y3t+ARE9UEt6FA3+nW0E8c+lDtUPBzaw0HvisbfajzBAiKL2acQMW0TRoIHSomYFulkWRC1/oD4afzGzuoeM4uoS7q2foUuSUqjoW4QouHs2u222cXYPv203Pn0TKoxvk6IKBCBmj9pUAMxa5GQ1sJxVtZRC8374p4nc1UKmT1WDcUgbfiOAMmvTpXQMLATzNE3BUWtP2GmyaZ7cHWAwkepSrahDa0qpOptVGEGIjQtU1TNJraZqAopYUaUggUlVPy8FdI4ulkQi8pOfvUm9BR+Sha73H+73+5ubq2fPnolIrdW8Hg6nN8eixtN8vZzt/fc/3O2mL33pi7v9fHd356IBjcyKu8KJGI4I0Yx1JswySsRJJgK5tWrhRJJTEpFabge+RkcFQeFTVepVwsIWZGbiBtfdrZRS6nnYiOMFR0gwVsxal3BODGs4CgqllLTUSDmLaI+IpMSh/roxZ5twQLPqOzBrQjJ0MaR8SGKoMnf1V6IL27GD3nsgZ8CebW+MCdD/y0BLAEwpBAaHcHaPpNXSkEmY4D0KHSOtuuF8jgsTkRPc1sttj/QNM5M3TFFgir/H9B6wkEmsE1zH5w8fZzurB9AaMaUob9B/3hIGxg10plvrKBtsVR9hpMftDIoiqo2QvGarBvOViJpuNZFv4vZEJImZ2cwa8bR5rGCu1P5BCJ8xMwCRFnE1s1ZBwaq7L8sSuhdCTZy2lrOZpXlX1Uo9w5wJKaXdlHLOpZQsp5bUl9M8zxFRxN352bNnf/an/+79n//k93//90Xk5YuPd9NMREosxItB3ZIkJxStqtFv5o4gJRiZS6BcouYFSCJVJKeUUs4xAanTdLWn/o4xdm/objqZqKUgRvroWk+1gzSjS4+dhUaWasgr+UaOr/vXWgms8WaJCMIdZ7baWap+LrXWSi0ep30AJ3d3dicyghWvtRKzSHJ4KXUp9vLlJz/92fvf/va3/92f/tkv3v9AHQ4uRZcKYlmql6pqyHliTnaAs4NU1YXWB6FhdvXOMTP02jkypBFARBJKtgo3ZxdJ057315ivPF/Bc+iy+kQppergKba+MSzJzIopEyKI3Zzimxk3Eo0aoaD7SjhKloHdeikpAKMq3cOwcHOL4FeAZC1e9OtJNTy8yt8up+vh1R8O+0+/wwfLUV/B1g82LrN1kH/KGbZ387lzCMcpL5EYrUW6P5+J3OlP4cBuaeNAyDwOLZb2kICEp/WR87xloWZ6vD/DcfnwJKPQ0eP3+cgXI62ANpVpwovyMFTYRFMGNHKPNGBEQfo0qKTMwW6KtCGgUQViilowpjZ3RE3+D70GCRDhjo46A5qNmwCabGIil5BbdHcw2BG0MAmxbqVmQUe5iJD+ocaMsbjgCAA2N+nmug05bbqaAVf3iAfGgtx7OH7MuD9GmwOoocFLKGYPxz8A8GPvPT4pkYfqCEF7JiD4972jtnOFN4Xp1//2onXxaA+u83eyPQwVAngME45PaDNB0GsSDi+eeRtDcYBG0heI4WsC8D0VUyKyUDhfbQvEgIyba2/L1sr1kgQKUotZ42ruFpir7V9xImGYg4g3NqFvFHEHXrCNwoi7p+PyetiXzBzl4NxdawmLxNmJhIIkn5iNSYRzs+DNrNal0+fM3WGrG5uZb6dWJCCuaqWqalQYjxs1RRRaD9v7eLozM6aU54k5pYSNZU5mDBtFwxjupSoR55SZOVFyp1rVzEO/1NzhQiA1coWan6r7aRFR5jO6Pf3yxV3UZo1fqZZpmlJKpZSrLHe3p+Nhcdenz56A8tV+piZfAXc2JDAJ5yYDUw8EhCFoUU7EoMIyzZGd5o5gsILYnETyBmAPmUfknNHKKq6hhjhCbUkpsdD5XNixn7K7B1AEjImjcLe7s1unlZl5JaxGP2DEDmfu2UphQi1ViRAlqq3VhSMAxSI3LHx+bSQZmW8InymlJGRNaaMNAHQvRVwiS1ZVM43s0wvqyAbq3NsvrRVOXPX9YwxERKVfKEZUy82DOzPn1CzyiJC4t3qG6VLbwzexmriHrUz/+CqekbrDciu+UhZFrzQYz44W97eHj0OjQCLWnmnwoJ4GGHgr6uh3rqrcczb6lF7hXHR7HN5/66pl+5gRPCRic4XRCGkOcuk0pw3gxCgGMyLS40HihOdSQU4UJVYJxEzZoU+vnoXTR8iDnJymzMzqXVlHzaFqdFz0VIzgx3Ohwbe8O4pISmm3r69efHx1dfWLn//0e9/51te//vXb1y9T4vEuilaziLdLrUrGMVTdIBAnh7NXVyjIRARq3PLaHGo0daSNNSc2HjYoEjEpmqCLu7AQDRcYhQyvyFrhZ5wkfhW4uhUtZU7EHBCbORErN6p5e3EOb4pK7p0yzSxu5O6mqLUWre5Uo+SG+ZRAZC2uzs4sCqcYii18Tbur3TTPx+Px9Zvb73z/e9/4xjf+7M++/eLlSyJJ842d65u7uzxNkthJ3N1cmGGgspTk2a3NeuJmW40BwC3/kYmIJfazJhbi3hhexEqElFIEEEGZ8oRpj7xzCC3nu+PheDpN00xEzJ7zbMqTOEdOpKmZmToJeQ8Jtrmz4WNHJE77SzRvo24730HExNZdQ0Shvc8bD+uIYGzlYR5KxXz6Jw++fRAkJNCDM+DfBzT41tZU9R8E8TaG7GhrfIq6i+FTb+Neuwhvfcod+ebIhwvy9s4/9TTxP9JW/9U0RxjrDt3ezFvP9taLfMrz0jhhfxZ/6/GfylntxNetTfwwVHj/zxZUa7NZyVttd+qvDdYcN5JS+4QA4SwTupEQOvu+2cjaDQ1U5r7qvlK3EMKOAZIIkgAEC0xqIekYwgYN4gbvlIGW77exNhvMJBucvqD99t4aRYOICCRtWW+Op4H9NvyRR4Bc69zAxY9+u/brGDwX53nk7y0m7FQGu3+SePi2mKwTbSCBNQL/4Lb+brbHQoWPYkI0L4nzY6IyEcIlb+S78KeskXQigoNJwOYe1bdAIGHxuqZHRb3N2FXXpdDXnIs4m/uqF2Jm5iqhlBSxpv5/jijOFBkiRN37Q91du7V+xz/T4XyHUbOCWXIrlmVHE04iEsq8w8Dl85JzTvMsHF2pAnXS/X7vQK3VqnWbiUXE530gKyJiR80NELq71wYqrGrU7hORmRCJQJSi3oZVNRuMPmZzuGv38jDluDGoajEVRAJSUtXgShEjqORaYeTMUkoJBFSK5pxJ5Hg6MhNKTRVEVKueliWlWmtVxutXd8tyzjlfXb/6wXt/wYx33nln3k1Rr5mIcpqurq52u0BEu8ZVEEGtXs8GBURY3GrEfDhnEBUrZSk0sXWrnJiJnMGG6iRGnZYKV3jwlvV0cterqytmaF2YeZ4m6yDc++5kMPcmpEnDaidFWD2G8/mstaaUWJgNtawmbKyB3mUn0CtBm1nArRH9G4NyLWEPiT9KKbJSZNeg8W53ZXCtpdZQ12yV36OeofcTxoCkLrukqqE51FgigIhobaGANrURWZIa9nSplZkFOcJugfYHNqKeCVb7GQYGow6hbAP4vLO6hTvNrG/Goyv6ecR7WUJ0T0zvvb4M9TxJbOKf0aSrXAIwa+KTETHevJ22LphZ1TpAsrcwyMjQ6NgY6+ZRy0CMqzGxrgiG8UTxw2XRURBvHOyrSlVfoXpQa+rlNFpPG7TFiswRyYpeazX3PE37/f5wLKPfAaiWUoprsciy9ZoB6ph5mqY3t3f7/f7Nq5fzvFfHD3/4wxg5ahp83ZECSh1j19oAElOK+FIQCAGE4yMi3gH2KNfgfEbJ0LFYm1l1DyFiZg7Jom0ipZm5A12XSDfSO2PwM3Pt3P1Y7lKSlHiaplIWbnVQ2+sIfjwLW9W47SheH4MHnJjNqXm8mcRjhSQHVZC6u/PkTKRi8EjMVNWifnfALz/85L333vv+93/wX/yXP3758uVSC5Gcz+W03DEnSbvTUms1Jwu9zgjPmp6DMBDbDRERCcf1iSI1SMSkN0StnU6aJVdTihom05xBajADE0+g5M5KzDdPdrvdvMu7eQeQqwpR0ZKyELtZNWuqsETksXNhnUFjNCp6cu3gqpRyOp28i1DEnkoIDezY3qnP7CCtXXrfH8F1WxXSz4MSfT1hx4Qjp+gecvhN1J379drfEAq9BDaXX221uN6OnIbQ6Pacb+u37efbJXSzfXzG5e4f0DytLWLTz2AE6df6tNjy2y/z+PERQVpNTMBbIsDb3tdb+mHtruh/7RIysSPIvbvTSAnpqXxEFHPDGoIhNBknMlOoVRSRqC7RUlRSSpwSKAT37Z5iZwA27iX6vCUGejBIqacRUn/RSy3iFpusk4QtYs6UM3k1Ix9afK2/Ahx2kgE8XG7NnUuN1jnEYGMLNAJD+kVjl5cWf/F7IPzyTYaGQnsa3n4YS0X/aODA7QB+ZFAYet3IgeVcQWz9+dpC1U8o3jT/ByZ8OA5+hwYv26cgwA62W2BQGyZs87CRQcJKjzRAbyqyHiHuVj4TZOQCBwl5MC47MONIeWC4m6qbsjszotJgWE2hyN08m6WGhZMj1Q4t6M0iHmVTAHTDAxtuzjDsrdfrHk+7RYapmqdEtdRSSs7ZGKjUbA70WJw0K9zdUyl8ZrqjnPNut1MrZpZSsvM5Ok5dzY2dwRPnKdF09fQJM59Op9PplLo2QOMdKsgxiFVmVsuR99NSS61GnBLRIhrWPzMLyAVWI9fZQDjVRUQCnQMk0mRIa10AMCdhBnso8YF5OdfwPddac5qnPJnZJhbXAybmy6JEXEtca4Kn46GqVjO7ffNBhAgkkSks6g3u5nme33n3CYBpmvb7/W63S41Tt+x2O5GJZ4b7qVZ3ZU4yzaW+IRICtOqipduyspxVFWYuyVPK19dzUNeIyRxlOU1T2k1zKefz6ZhSIhjBVGugRIAJrrVo43V6ELEosvJI4cTMpZRQrxERR2QG7iQlFgBRcC/ieEREET0GWswWHtw/BdAriOjxeCSinLNI0i7QEhMs7MK7uzvvSapo2IOJKKXmdxjjldiJOZCUe1M3iUcbQzxGTsg8jk/CMBQRhVvoMTKpmy9L/DCzeAcwaYUioaTSyhKE5T0shmFrMrV4EW/2PKZ2MLPEXhWpngSRdEGCHdBrnuewVwN1dOgSxQZHuG9F6T3/07YTOKzwMautlxTPOZ9Op4HTtt+GQNFISaUm3BqVLbJkVlW3rfGk53MjTIpIBBhjZVFtlxvjalkWTX3xCiIfObELk7NT5E6TcyKBEPnpfBDZuzv1DD2qDBK3KSXePi+sxlidUz6fTrXWu7s7EZnnPUkfM+Db21uDz/PcCNKUitbeUTI6bbfb9UHYpF9yTldXV22pzbmUspyP8Xe8kfP5fPPkSc55WZbwWPGaWjnAZ9NwYebj8ThC05GPGqM6xmu8fS3FE5/P59PpdD6fOhBtWCdGY0SqqbvlA9VEjuKidZqm4/EYjpuUJicBFXNOu+yG4/EOlHOezLku4EyO+dWrN++99+f/7pt/+v3vf//jjz9Weu7OVaWUooaU90RsBHdjYQDqBqdSIssuprxLTq6VSPLE7gdmVvV5Tq6IsR0yTpkIRDnnqksGuUFEjsuSZaeq8z5Lcm9ldMnBgrm7nHie53IuIrQsy26eS2V2LMviEM4Tmxp8l3NKKfG0LIuHkrB7VWXm5XhaliX3Ns+z5HR9fe3uu/nqYBUiqBUTVVQK527UlYqaGK5GcCTa0Gw2zThIPeTwESA1B8xWJ1GHGivx9D4mBAa59BLJcM+fumj0q1aT/o01etRa7V890gZ76V7vrV6k9YNHzjA+pE3PDCD36KXbCvvYRR/8ljb/EyvM9rYvbuAtbSTFxA0yhVBEzyny1d12Adi2WPTR8w7GzL0j26awTe1rT3H/83vtwYNcRLzp4rdbBmn7pG0fjQfantzImAJjEdyrGxGYo4AetzrsfTdX1TpUrMNPGlshD/0N6n7qvqHkZD0tv3njzEhYRLx6dWOHcIrwpBOI3HVBmAfsTu2h3FoVSDIiYh/WsXs8kTucm3pq6+FIndhAwYD+gQ/bQgVGCJPiXnQOAHem6HC2tsjnRbzo4g3iLag+3uwCBIWwqfig4QjaBhVHgcSQhQOALpEcm/z23f+ubRuBcBkn3EztwPHW0HyTpeURG4yYbSA/IumBd+8LizE0/AONEuyKmDjSXoY0D3WhQH3CBAq1BTWNRHXqjDBuWTlCROoRANwUhqEI/Rh81d0ISzus4h6WIKLL9WfNn/ckPAknFzYlgngNu5byNHfSIDWp38hx8lrdVJWsHLQVoZ58EpFIXGn2O9HJTJZyroec51BiEIQCOxNJ8baZEro72d3M5hTpc9OiFc5FbaeaZFp1QayamVdVVfOqIBGCUQUTOXG8V4/yYoCpFatmQYilNEmCmgd0Nq/npYYRpyC+IL/FiuYVpu7MUaRRHQ6Foy4uoly5xwHOdHsUkQ8/eRmW3DSlnHPqkc+nz56EXZIliVBKkRZFcIiwiKQ8EbfAgrnnSWSIixg0ykZCGEqgxJJZwu/g7onpeFzMWl6pmzM7gZn4uCzU7eB4j8yc07wsSyRSjlU4hpprYuqiYq4MAYycXAGyHsHrlF0gTWlAHe+8U2YO27rHRlYPR1e7aea+r37ENevGzNSKqwOYphQE3d4afdGNfFWWv5CeHygustrUjTv4aU6XuGhkZvZnt8v0rRHi2z5dvwJhs866O1OUzWgE8SHOBLTgyXZ3H1G76B9a1WgtpaR19ei0EbhBhiPi3zvTk6QtGIYznGoxUxCv0csRhDZTYkrCQTsEwAyRTEQinQ1L1uvUrdbwQwtp3MnoJSJibiuLmQMXBmwzGqJngnRNHD5d29ROZBYwpwhLRsqtVnchyWmu2RujciktEOemZkaQyM8NnxlB3NxZc87heEJHxejFrMZwWweeV0EwFRspdPv4Y2CMBZqISilBKMo5m7lpEBzWsiXxXgawHMMvJWkk0moAJkl3gKl6j1f3EdKGQfd9tPuJMhXCOU3i7qHYPM8zFapVrQDEkiZJsxnfHZcvffmrP/vpB9/+7nvf+e73/+IvfvLLDz68Ox7MpuoVgJlXhTqpO6AGb5UzCCMB2BooYDNj9/ijuqVkmzEQXdQTIAf5657Z5LzUMmuiTCKZU0LsL3FSIN6RuTJzuMgZrVCQWQQnBWB3KqXItLpaqGkdIzj/AAKQt7MyMae2LcbQbyRRBtwjTkLbgun34MrDoN8jbYN5tGeqbHDgr9kuAlm/SvvrifitVLe36qb8Zq6zwaJbO2a7qI5v39aoESIe/e7XM5K3sjEttIVOG9viZ78HCMfl3nq39x92A40fOdLfAgi97xQPMLRdQr5u0K5tvFAH2FfgGBYw2i4M28bHGzBzkvD4R/WJILzESfusi72ROYTM2NxC/zPwjXe90naXRCRrnSQikqmLmXkT8ofDOmbmnMIsD7E/Q9/cyQcPxekiZBcpzdQYgNE/Y5nlTQy6wXtbe290wNqZtiLnzthsQ497jHX7Rh7NNL63VCYigtvIXqOmKNMBqWtEL9ttDiBK9+/td2jwU9saJ9yuLUTUX5OFYM/Wb0JtkQdWXgAGXOx/99FyWbgijooqnczs5C0OH26VsBLNiTkooxTBeV7zyNCikSAiDVplGGwtczAmRcQqLh5t+JiY2RG2cLOQ045T4lQZOZPkFCqRRMTOpRZVdUCVwthSVZsyCJRARLWHGhY11dKuF/kjImzMTJPWUnTpAHdEmRJLMPKYLbRa40YnoSmL5zxZdsei1W0KrlTYf+SR5aiupqqTFyJSdV2KFgNAToFBACIGc1IztchHorJ4rdUJQYRTVXbLG1QTXCOMEI2Su6FBj1CIjdsWB5vDwSAO0SA1Op3UTN1dzspcKHKbmX/+iw9Tq52YIhtqnnPOOckSQHGe5/EtEZdTG3bBcWLmlOYkUo63IDiTEbu7OtTUFJQyu8LMgi3qiOtONFGnsQXhLcnUUrL7+IgCg3GxKWetzWR3915CUEdAL0bjmDADqHfs15ZdW82u/lG3GjcoRdwtgk6qGsD43pwcIbLxX/SUwoHQAhCul7j0qkaxjeFH2ZzBL7HWNrCO8cl2aSCiUpZ2J5Hk1fBo8C0bx6D/iu/d/2aJWfufLmvF7OYrX9tKO+zgxO7dlQj33hPhplDaijo6BybsNAaKSGCLxQm05zNH30Rt4nZj7YftHgcyHHdGmzZuhpnBPWvSmgERrwgWkBTdF4WAq8KEVhq1sXY5N/eTw9mF2AyJPXpg0vPCklJmSW3Ljw4vpvEymEY6f7zQ9seW/NnGQw95UXPg2vbpRqh2DO9Y69qLUQ2kUUoJz3i8k8EyjnkR3TckrEZ3DS8MM1uotqYUyxERoeexWCc8CrXKhGMInU6nu7vj7fEwzU/BBPBStGidWCSzGy3Faj2nnIhFVf5v//f/509++ov3fvCjn//8gzd3RzOEfOt5UWZ2hzoFD9Tdm2eJbRt/JmuieTGhYqiUatNkgxxOzYvRVgCoIneP9ep5DTfK6LEx3WIUVmZ2IvPq7jmLGcNJ0RyHVRXuJAyjagrFNME5FhxyJhGp3m4Jl4M23nuowkY/OtxMmZO3JV6HmnyYcveYjZvVYMzni/L1m8WtP/X9xKwLcPhIbMf5ETP+b6rdu/nHr/4bKGXxeZtfWGZvXUjf1rZT5i2HPC7/8+mn7W2IXQ9i8gZrreUoNvezYXB95tkvN6PP8V4uf/spX/Y/vBHhLuRkNhlTwDaHDW249j2ovQAiCJHG64mvcFk9GJEzz+u7aNOEKLaoRuBsgmXrDo6+Mbp7KNMkSvFtczRtbA5ncm/osK3EUSJjCHS1kGKI31DTg6HAgrQ+KZG3TLwGBSPea/2P6JbYNPpcYDRLn8cjuruPg/se49u44uYdxXM8fFWtUHV7NdahuaOXLgCxt3sGgVZKcLvE20fB79r99ggm7MFV7+7+oOPGizAHuqyo9wkSTIE2ugw2VnMHU6wYF2uLIUT9woEORxd74ig9TxZBQnZgdRKt+3IrsMlEBiMP8U4aZSq0BW9GMXB063fcwdgZmTm9s58ALA4jmnJ291OpZsbOTJoJLc4fvomcXhYP4QQCFdNEmSW5e56CfEfVoap1aVFL0jfDfiXiKmeimI7k7gShJCKnblvTftIgRDm424VqJmHjEnsiFuEkU0zynRUPXta5Lkt1NXKHc9Rty5PknA1Yyul8Ppd6Fk4yCfWyG0ytCgi33CeQeavYrmqq0jjca3ihpRGz9PygGssWR6EbBwVxsKXP1arG5gauxnpWPy1xntzioqd499T1M0SEpeHViAzkLPM873a7aZquXEAObeKfoCxJRKiez0AI1dRa1T2AKM71nPrJQ3Ak4Kg1qVKHM0HCqA0Q5+7BFB1rNVpMPGydVf/wcsdah9dQ+evm9cZNZRRLeBNquyBS2vjnSFEa4JyImFMbJtYcKe5opP+eNla7Yj+Ee+I/YoT0RRkW1Vt7xGrc29Z6GIgRDTI1O3epNoz1dpgRoNQ6a2wq0s9TLmbvY5Sh9V30a/V34QPJhNxRQETruqbYQMpBW42vUkrxZ3+u3i1b+jg19eDAAGjroLVktIYNYiKjb9N9cRvb8GV4dijtJeLxNuGkHrK0FGkiAIwd8ARTWNAqAA+Zzsg6jscUkSwxkJQBBbuTObFk4pZXaQSu1Zt6zMrRtVFYshHGB40nhiWoS9SyrM6zbdu6EkaiLG+qxo+X1YSUqJWmiPE2rJntq4/GPZltCw7v2bgcZdNpnSNjWI4bmOd5N18tu4O7l6JWC3Gapvn6al4qXr0+fu+99779nR988xvfvjuc745FjSA7hy/LshyPKaWWPerm7uw9fA3n2GgabdvincYDBuGcIGoh4dNkHKjndkeHQAtoP/p8a5wIsbuXUpcF87IkbdQauLMgVv427I1qRaEqzEJNOotYnBsHONZabPY5H8VvI7LaUxJKKTHgx7d9S434wGBlhwFqEadB88p7/5a9E13xoHXxi8HRamL79JbjL9rjQOtv0aZ77NIPgr2/4il/1YjiWDw36/MmV/DRPWh89TnO//j9v+2n1hERgCbK4h0W9kxC2nw7AkIXbM9Puy972+O8pQ2D9aK9NUVzBZiXkO/Ca7GOVWpWb5u9BDIINXLs5dYZ3h1qjnNs3LLOVN0EklIa3kd3d9OwHgPDhXXR9/Ze+pDYidwi1Y+cbXBbImgilJybsaFeXTv9RITIERGSnqrnRrE/CHLH6wPLhdUh3j/s6aDsgLau2Wp4RrxU+s9btQoAcIYDUajDW/fG040R/Jb3eL8NG56QAPdOQWwXpwhT0njjj81HxsPx8bt22fp8ucCEQLgEtzOlcUsilzQcpA6m2DXgl4eNv2OyXEYIAWyNMW4aciNJgTpJBd2IhZma8dAUdGus0SZEFxoNRhS1tRjM8FZ3Ar0iVMyd5u8mi5Ep0gjJ6X/+P/nnx+Px5cuXkXR0dzy9ePHieDwuy1Krq2o1K+V8Op4i6vD86e+VUpqTYqmc0pRIFYe7W0kTSZ6AosYWvlq52c3eJeBU3UOckEJcgcGVlGpbWN3drdjxfAroknc7AKXWSHMSkZwSMyViSTRNU5aUJZkZ2DmDibwqkTDoaj+bVRGZdxMxn870xqvqMiWZ55lEWtplTi2PzsMuZGcytihu7u6kw+diAIGDqItQQHEzB4eREIa1UuqdHt8bAGKPxMWmXwgb9Np5vm5RILWlFKCVJay1OgxASmmaphE/3HFk3HEOQJk4pVarsKVi8mxRbS+CEnc1SR7sqYgT1lrNoKWMaIlslCS9RbcCS0gwNo/HZWuzDt+dDZpW7M3URCOJ1oDG1hoODlWDeQiN0MTcCml0m77lfQFIiYcdHAOdiIZ0DbZJAWCA3Vv9wGAggy8ijcw8fLTAigEGrNqa++OwcYktMmw7X1dx3Zry4ZzcwsuHqGAlT659DiK6vb0NxE5ELdQcjoPNYbjAkO2AoT9OJCklEQr+cb/hKB1Rct4P+56IiN3Ua60xQvptrOHWrVG1NbjHAfebGhEJc9sNFYE0CC10746I6ZkDjopThKWImWDuFIued/ZB9Ia7WzVVneZx/0PXO1BBakOR1rtl5nJWWFeP7NqpiEKn3ePB5EIS1dzHm9qOW2bu+sMaKKWlxDAnTgB3zdXVbbKdFzHp2vrWa1HE51pqU1hNzj0DVr22kG8wr8hHbzRfAFxyyjkDXM0Oh8P5fK6mKSXoPqfZkT748PWf//DH7/35j7//3g//4i9+rs7VfKnuRtUaRJY0qZYVvwHhKYtIIZGLtNo8ox/GTAFWxR0PFV8jRyhIdRHd7jaKIT/GSKyEqkVhtZiWKlGDkUvznlKb78JkToByuMYj0aCfx40oiYgwGxEZRQ+vmmkYfpPuAeyLCbCZidwWdyJqaLCLtd3XwHi8bYKEIwj+qe2xaMC/j2jwt6M9igk/Hxr81a/VMo6GdwBDSn6kDGxyw3yDrB6DZw+a2+ORokc+3E6rhxiyez9x6XzsrevcNmO37/hbYf2htn95g3FRpsE+3XzXxca2W1vAl7YNterw8DYFo4L35shuhI80LHqsOzrbM5IGmRvp1NnYzOJ6YCaJgAuN6GW35IdVzt3BGUwTRBlwtAghCGLrwsUdXG2jgtu720Aykr4mrNsoXRALLxq9BRA2LVsKOBGSRfG5AoymWBkukos45/aWHr2kjWf4XeuNQP4As/VJEW2dGv1gdP9Ir4EWP3pk3HJwCoIdGm2ojq/30LZUiy2IunzAOk0Gu2Zca9Oo/7OqkqrwytCJ+Ffs4Dlni2gGUUopAhxmlv7h126WZTp9eU4pPXnyBMDd6aSqr169inIRqno4HV+/uj0cDrXWH/30dSlIkzCnu7uqVlXLy9vbL0wTU+WUq6GaOjtxEhHTGp4XdhChusGZ4XmaAFjIxEUYyghAVXfXWpWIJsCdltokHHLOWQoANwUs5zzn7NSwQaMxmjPz1PBPUSLzklKyWpgxJQj5NMk0TeHCj1jZ3ek48oUMXkqppQmiWJfYMTO0tFEAVnQBkDKHxVObtL2FSo2rRaiEQSmlPMnpdGLHwDwgCzbwR58cqUcC1wiY+/7qqVqJJLdzsaWe6HAGjLWz9biRQlNmEbq+vg7o2KOMeXJJPQoRvwijP6w72iy07m4GMy+lMmnnhGikKPbUnQvHydhurXH5ejwQw7hcMVKYZ2gr+BYahfuPmmhYow4qaEVozAOb0TZCtY2P9WdcLXgisiH8OAjZ/f7dXdXG3+7DodgeirmZ+LQh+PUgZGOrqluPkQYqhqOxHwehkjmp1W1AaUQyH0LE7f3YhnLjPdI1VqKLt0A9CxFRBVGkB1gCEXXz3aKrY+TXWtEjve7aHaRNqLMDv6ikd8Fu2nbjWIm235IzgRg5NlSjViuFN5KqY5N38wD/IGZnczYoWwqbvtbK7i3NtWrVamZpl7WUSJmjtg9u+m3zKlvnKqEBOQymcNeatRiu8Qn1qPV4edT9HRHxsw1rdPSSmoasKxGZeSjfLsvq0UCPH25dAGGGjPPEw4Y5BfMy3HggiITYrF3SnrV6+NpLKa/vbrVUd+Rpgj//5JOXP/rxD775zT/7xp9++6OPXxEncD4cztbMClLX6ppkkinpoQS9qongUXQFwiM/5jWaCRLkYemJDO3b5VwF1dmMOBeqtU2fxhf1Tay4j5xlWRJEpzGvVbW45OSuIfPDrRBueBK5C1T4JjrqJCNdfjtbo68MsFCtLQWtzmoUdw0MSDAjCRAIGp5Y9LkQFSA7S2VjO1mnPNgjm36z+1cksCky8fYg4e/Q4Nvatk5gX2nazrFdOR/54frno4ixf/erRiwjYnwvnsD95ka60bivjR/k0sP4+O3c++evC2u3EcLt43fHxTogW9IjDS8wX4KFur0r95YGSY0/d3/9b1kKcfaw69wBTNMUR6it3qVGBGAmdx25d8xgGjMazQ8U3xEJg52tMTM9qg86mbfACKcUm4JDzRzUnLytN4nJ3Zpw6Hg7TBjpnTzCQRH364/HAMOjpl/AgG1sdthSAyvyBpLzeCNbIZkHY+BtQcLuTIgdFeJoxKgBElqJFzTdnM2NfRoa/F172CIqdx+3N0xIm+6MaRIjjTYL/nYSPcCWb2mxO4ZlGd6NmDgPV4xIRfERnOgsp/gk58zBZe1iTkKNubk+YJfuN7MQVwhuamyv6Z2r05KWE5+YeSemqkqLi3/5j5+WUph5v9+nlE7ncjwea60/+dH7tdabm6fTvDudTkZ8XuzDX370y48+Pi31cCx3d4fbw3I8ns4FdYHvrrUqAVPOkiZvM5/D4+HuGjZoB9tGiYiCuaQOd6sGInY4TENHxLS6+1LL6cThnidzIoY5uaeUas4MMlMnSzWxoHpUbQaRmp7dkCRqQzBY8iQppXh1YZzVWiNIWBcjIlWtGqakuHupFuKcFMx1RO1PImIhBty5k9wcZM6OSRLgZuRWmQgQImLi3f6mv6eeOKRq0GoOJBC711p1BGeudtdh65pWKwYUOoGIPnr5pulMiDCljjB5stdzykE3DXWfaO4uzMwcGGCYqp3qFgOyQoPM1j18Gxs3Hl+9Dam2gkM6gGmxqT4Im/kYhVzGFjVOGAbxoDjSWph7HccYCMTXIFU3WPkevuq2/3rn4ycD43Vab+O3DOt8XGv7327ia4TsqykQZLUevjPqmkoRjUzuWmoZUHOYs8w8KhaM6R1tt1tzCMevompLPNY9eNZcqo0AGQVWEOdnDoJfI6vEeLgMNvZ+TjQ+3Dz12p8DX21fxwBFA+y5e6KJQOwtt56MyEJAKxayWI/S+Dn5yQNKu0ZumhEkYr9q4dAtpVipcBfhaq4OD9Uuon5CWZazbdJBJcqlbMJ0/f7DbojSBdH/QQRljDTpEUze+EHGw7t7rfV0OgXMsGAwtsdxs5avS5cugO0IHC89XkfVnsdrPZQapoS5wXwLI7sWq/fAKTMHmbyy3d7e/uQnP/vgxz/7zne//93vfv+DDz8+nwvxpBV3p0NxBxGnzMxG8FKOy62ffaLMzEQI1mVj6W/edaBIj8Risl5RdrDBKbSFswSC5lppWbAsi6pu3U42KtA5CGZmqrCiVtxUYS4gZ6CztZnhvUalGZikd6wAMIXDWRoRdCBwd4+Xrj0hVDeU5jY2RFLQr8wgwYIzNDrWhaVOdN8y6LPvYbjGsTHBP0drxzcD8YHd7/eNxb+h9rbr/trI5K/Y7l13Myxpu0p/zp//Ru4obqT/cwuf1jDCEC28V8di3PPbbow2OY33lvq33MnbHrCNrk8dkBuz1e9hQtyzZdcbaEfC3GSTO4C+KcQ/O/NzXCoWYWv7Qg/uwR1EHuyQWHaYmFhEbDh8CUN5vzma2+XYNrfXKJlRwcJb/XnzyzFw0Y0Dp0UkoJEvYzfWrU+nSZcGBgweyUB6m17qaNCpBe82V5G3v46BTret78jNW2UIuZoWs2gF6L135nqGCyfJ46/+d2jwsxp3oWCsr3iNE0aU+EF4/JE1AVvPyyYCv7nSZRoL+q7qHQ3eGxfhPWmEBBFuE9GYuRXzI5CvxQfDi7ux34IBB10rTnvov4SpkzIdSCrlhYgm9mp1l4qI3L36+fl8FhHxZ3J1JV5Rb+vp9PUvT6eT7a5xdcWgJ8+evTvvr1+9vpU03x5Pr1/dvrk7HE7n02k53J1Oy/kv3zw7HA6n0ym0SQ/H8+FwOC5lKYWj+oGrN93WsE9DfUGrObNUs5QopVRrBdyI2Y2TkMNdDZ68q4ag1Ttzd3J87Wt/AIDYnayUsiynWqszdKmuWpeFU5RVOBFRmnLDY8IsmHia5yY2WIpGZy2lxkphhqIV/iwMu2KjuAKIqJ48hLWIKGxNkKkqA+5g0BohIicCo4nluGtnZDHB724PKaWUI30zNEJDBKIwMzG8+d0piKgpTSCq6lXVIrRo5u5PU0mtVlAAwNaurq520zzP+4HNwnhNErmL1K39S0b1imG6dUs2ftsW7E1Eawvt4rq1BMdGB+Mxft49/WF564gGzHPu2+cmxaupbqz70Li91RZ3U9UQsd2ivrGBxq/iutEDkW8WZxiW8ToV21WI2NGeNZw0cf6Bo6I/ebVQu7Jo3CQ3HO5jQmJjIngvdici09SAkz6oQzj+yz3PeCuV5k3HksPyNWvRJxFxa67Z/n6dmYnXkgzUc1ljqNd6emiXjA63Tdw4ziBpRRTtXVn0Cdzh6sTE0tjx7iABXLzptZohVkzp4sbMzGRqZsI8TdOh1P4qqVatdQmY5L4yM6VHPt2daR7OFN94pmtd3MktYuPVvd229YnTSuttxnbExXRTYDAuHaHUyCEM4J0zlc7HRgdysfQzr2zY+LzUKF/RQ1vdexKHgWTYWEMlGECt9Xw+Hw4H1XdF5LAs77333ieffPyNf/1nr169Op+KM1d1tTOlnKe5nJallHo6U6IoNstZANRjHQ6LMU3MrMcDFcawNewGVe/5xuOP8/lMT/ZjApZiwS7BWuxou60Zeg5hrXVZvJQyhXcG04BGEcTr02R9EeFKc3dzl14kppQAhDx+e14W7+HisQLE8EgpSZc+7NRQj+CHt+DHOtlHis6v3QiE+9bevfbbxtv6W4xM3m9jxcPfHkh+rFEHRo5LTLjqD8EBcDMfH3UrPHpeeuxvelA++zPv7y1QcGWKfq7WVs77n7hROD47LbIvIN4BH298iEQEM0gAPVlRWbCHmAgj04TQdZX9EjYHDuzuWiKiqG8BIoAF5K7eNIjRFBSI2Tbelm5y69B1BCI8qG0RaP3jbWeK1k12QmQVbD+837Hj70ZfofFPNCfQvffycO7f8yBIQ4DcttFmQiIBBmKHXuYQ4mIE/a79BtqnBfoCu/inwu8Nt+T++x7BunDAd0C4cXsh3AkNOGB4lOIA5jDHhdm6+Srw4f236oQYPJv1c3ioEXZ+ZycB6cPDzc9//rPnz58v56NI3c9pv7s6L2cSdrfzUl+/uj0fzkQE94lFLVEm8+TIpvazn/0s5/zs2bN3njz50rv7/Pe+5G6lnk+nw/l8rrW+eKXH4/H3fu/LT589/+STl69f3y5n/fDjV8zpg1989PLVm/NZP/zlR7Xabnf14sWLu5Jvb29VNU1CSkZQt+VwC+aUprIs6jJNOzU3y/M8n43uljuZcoS2TPS8HN959vzZO0+/8MVnaRZQlYlOp1u18u6779y9fPPy5etSyicvXxPJvL969fK12VmVc84JdLh7MwtfXV2V8ykx72zKOZ/LeSmFiaa8y1fTslRzojwb8lK8uDtxUT+XxaaI8CAAcDNi1IhhWphsJzuQWa3sRvBCxmzc5N0rA8E9TAlESgoLKp+DjLy65NAzbcatQBhMRHpWjTwZRjVzF4gz0bFEIUEWYShUC5xJ2N+8jOHYyrWHekfOuzlGEjFz4iwiOWURWs7nKBjiPbOrG6Z3qgjpfwBEKiIyTYrFralf5swdd5m1moEIC0yLjmCmmlZVs61nDrWVVfRa1d1TmlJKbqQaGJJqcTMXSczJ3YlCjMQYSKlp/RNRUS/1XFQ9NB9zy6mrFOxequYiIruJzEx9qZWZY1cYCKrUyk7QcBVmdy9LxH1H2mHUEmgV6kqt07z3JgUT6hfs7kupYweVrrtrLWO4hRBV9XSyjiET8woIzWwFgG4coNFcBCkJrC7LaTdNbZt1pbB5i9almnSWLCLEFGWXpBQdMbEePqWchZLUotbwJJMhkIx6DfK7u1drAlZOOC53wwXQfK8RtiEzMhdX91LO7Rgh931nUgcjInwiLgy4uVnzyMBBWMyF7Xw65JxznqLAA0Dn8wmxtBERJYiAKFgTi1Vi6lCB2dyh1RVZ2rIIqsrZk/u8LHe7natb0VKMWMQICj/rYoxTPQvRQka7fPPOs5OWu7s3wgjduhYkZydrj59zZk4BCd0EJPN07VpUdbebd7u9KdRBzOdapikXr8UrMxfSiuotkr9AXGRKlG5v76A+yZQl1VTg5YtffA7Yyxdv1Ojf/lffSykdi5mxymRmBgOR1+pOhoiHC4GoCABRMjMhQtd3RU+FI2eJwrPKZzdVd0pgV2ek1wAfTjqnqdbDnIR5V+uN2hPIWXFml1Tm5bijcwq7cwEvdZdcbvbvvnr9sYmKn0jI1A1EtmOdhRhSKl4kuVoOdxPvre4BqlbM1bgez3eLF5a9674uAvecQHYS/cLuSnY75ZRN1NmVqp1tn3dcnEmud/OyvEE6az7g+rrUF0yHUs/ACXlHUHIxgoAZ2aPsEpmjCJFd6DF0skNPyyFIl5wZtSXAPlJ5uy0I2MYwe5BkaI4gDgiBqKnY4bIcwsAYulHFHAdsCX5hD/K49LZY2bj6uNv1AhsbnWi9/4YHrCsRr201cvA52rDs7VdFv28BgZ8THL7tsI1/81e7H6ILH0d/sSt4oJZw0XKZ/TGt0U+74fHH5Z1TTxG/d2TLZ3twel0/pYsDvF6cJLQ4g77YAJgAsWgTgIzJ3YwM1EQNouvEmZAIgGlRO5fjLjPvLdEuXCvk3kpw9K3BA+8xjV0snEyht8/CKSeOSTCc1IOoIqGh6MzJiVjYIvckLOQmEA2P64LIjI2IfFnrTbR54SFlShFwEwd0OyZptw31DFnRVommo8EBdN2ntZOBderRhlZKKx39IudwG7m893L72xcevMQOrZHD79AP4X6eh+sGAg3f++hXm7p/R9uYv6V/Ekt94xEOvmiEttFLoEHWqLvBCQ5oaPMSjMgM2lwXZg6bPHmfLxP3kLUbw9TVtfmCHSF863BmSpO0whIeUnbOMGLnzHkiabjRHdWQur8gKqUaVB3qIsSIdKdiNXKUnJnT+XwSoSnLlK+0nN39cDgsyykLpZTOd+ej2s3NzW6a7u7ugDCROec8TVP4X+d5fvr0aY9ukUia5nRzcwOYu3+5+MtXr6dpurnZPXnyZbevAnR7d56nvSrujue729M07Z4+fX53d/eLX/zik9v6+vXr29vb169ffvDRh29ub91d4a9v7169vk0CTnI6v4H5nGdGJZMnV+LshYyInHxOU6L6kx//+YuP95xdhJ49v7l+spumvNwd5+npO893L1+8JpzKonUp8Ol6d5VzJnMWCOab3V6EXi+vlrLsshyOp9PhCMCdjqdlXvZ52i1qVs8uSdLMhrMawFf7GzVx92LKGvqlzdJNBPesddFa2DHN2WoppUxZzAgagyZxq8uMZVkAdjdyEgZTEknMvPjiCHnZNmLDJmZuTHNXdVOFw0BEyScA5IyomOxCRAwOKr8ZSlNeMGZfxN+8/rCvn06Ogdau9nMHVxCRXZ4CSQY1MYWsTefCDYlIN+d28qYNo94ifoPIZ12us9YaqX0ia4DOzIiEiEUal8O0Awj3CG71XD7tGdgUpeG47z3unnNmZmtV12zUCk8pYqRdz8ZMq9la5uEifGpm6BIpzMwsROIW01QG/m8kVebMbLaMgE+jcTJHb9ADHmlKyXSNua1u166aOHoYndM4TaHStrYQc6q1hhgrM3c2chRD7eUWeywRzqCLLKyx1ADgqG7Y6bXjbjcW1UUQ1f2C1Lpt28fp+LnFZazzZsfJucsUjZDa9ocRVOcmsJQ4CieomumGnAgiyinUWVsDtRIhW+rs6M/h5kje4vbjbY7H7IAcIePkVsdTDKNkPNH25cZLL7WaWVQBjdjV+RzCrHm89CBTrS/dyMhGbrABqs6ccp5V3czOZVEtESBdjOLDvovFrBwiN4IGtNvrzvnC0NQ+s0ZEnbCqjHp484dxo24tUy9is1VYzci0BduxLJh4OZ7ef//9a5g9v5LUKgECIG9pENTzdAhAp86ub9+IQL27BBCEgBA7sVPVgRm484c5sVatdUEnAPch2LJq3XtoEOBWaLgxGnBJZe/tU2ADb6KID0Y+bdHa52z+yHl+3eaXj/OZjNYxSTuX9dP4jX/DbSwav93t8yYOfXr7a3rY/ipbiJwooliNg0YrnCDAo6JT/+TiuZikYY+wV9q2BtRWwAohshGhPOoJ7cLo2RwAqG/q3MP4IYnYHnzUnOB1+EnUkuEmw2GdQTqEzbk7Yrahy8tnpz6kB3gbDzgmSN9ButBokFUvviICmC4cLuPvxhJaY5PYduxnt8u3/wimU10A9N38t2SC/h1qPdVwuEjGAvt4izQnajMC3Fh+hAvxZIAIDhBTzonZSbtJ1umj1khN7SdbM6wf1/7dDJsWsR+75DA7Ya6q8QgcKTbuyfR4tc/zLDdXTw6Hw5RSKeV6tzerN9d7oo+Ph9swr0Xk6mpXS1NUb8kkzsJZOL948WKapt1uSkmIyLxGkFDtlHPO++eTLJhSWc7u9PypHM+3V1fXu12G3j55Il/44v716/KFd/+ohjaGiLvf3r0+nk4GN7Mf//RnP/7Ln9Za07Q/HE5V/e7u7sc//slHL083N0/P5/P5XItqKYWF6/G0202nN3dErlp+8RNzYJrAjN//2n9jnvfv/+KDly9f5XkfEZHT3YGIMstumuGERac57ed33/nKOzdPpZzOqupGy7Lc3R3P57NTYtCHH7+4O7xOu33K+6UqSbq6yuoHM6s6bEGW1MCJhHfHGG5mlXzaITk9MTPzqqqutTNDPMkuQjW9QiC5V62kdgRCDqMFrxiEZkC7wSuM0EqxG9w1ATAj4tRtYvIkkYdJRFEHwowUrmyU7vFzTIiJ6OXLl8xN3zYKJ6aUiOAoKaV5nnPOidpyD6ClfbrDWZ3M4E7m5ABtQJGDzTzEjmwQ/yV5Z4VBHXARjgBgZ3S00Pow5TGSBrsjrW9QsAZ9DUCr1OcVXWQiCh+PurceSfMNC3Kb4uZoklKrtUQtDsYR7B+c7N4sfOpWqogAHmFCdyJKHe0AWFPOArJGKiU2gPBiWeltg5rYTLd5a6nlhQaADIAt91YqwiDE+mAFj8WiXzpGywXvdzMwmDc8hM0GRvfuvAO/FUaOpzAzkXzvuvHixgYvXf92gMbApaoaQwnbq8bqCYW32UHM7orQ+XDzPloC462KAczSy11sb56Zp2na7Xan00k3r2D8pGjZDMiLIbA9koesqCozRwnDaU4h9iXdm7bthEhIjcRUCAcQIlF3Op/PADiJ5KTuuizuLX2x9joi1GjPDeFHrD5STMc9Sq/uMG6YVqA+hhm2n0RYfphBftFw72wRGjscDj/60Y+uXOuX3/3DP/pK9IOZMSeSRD08zqbOjZ46rojVgFoTQbuzqaKxMCp0rXwtRMZUrLg7vMLUTMnVPVLNh43Wd9ChTfTW9jic27JJ/TNQ3Db5JPz997PL3t7uRQJ/nTbG5WdajrTxFrU/tpGwv+3224cJHxrrQJMT/Jtpv1qEs8eRGwJEC3qwUQQ9whEQxEvqTzcWjaGdwwZwCCHaBsIBPpKuO5yL3VVrJWEEAuykTQnGxli3Y6XyoAKlxuBgxkaILnSzySwUawZjlZnDTG1jtXt8NutqRD4DgvZOc+7R+zVd8N4ffa9cP6SVTTpmyiUs7GU6+pLYrhU/+LVaO8XISQsW4RC27bT635qJ+nemRdiOSKLQX18Lxlq9+WOUr4hpEU6ZUDeMQ2LQmgOGlGAWqrwRbHf3SBeMnTYkFEBMsVdGrT7XbYYDEVXVdr2un4duY6hpRClH3WAzS1OiLMl1EbpOzPv9fj836RGYlqe6m+Z53oEA5nPRnOYcqpKS43SL1tvj4bRUA6oHF85LOR8Oh9vb26v9+enTp8uZztmvr685A572V/t3af/q1Zt5vxfsDOX2zfsfffjL6+snh/OJyK+vr588efL0yd59Wpbl7nh6cvWlr//hs93u6umz50+fPn33C79n1X/8k7/8zg9+vttdffjhh69f37548eKDX36kqsfj+ZMXL169fpHneb9/+ub21Zs3S1lwOuHj/Ydf+9ofudXT6Ri5e3Belhrr0n7euRqZS+KnN092881uT0ttFMerJ0+/+OWvAMySnPJ7P/jhX/zoJ6fTIpWqQa2cjgvSYZDunGWEHQKC55x3U9ai5+UEYDfNyVq8ydmRJ3gXSw3Tpy8rIV1Ya52EAEAIaMKtnZ8SkWsnN8CFCQwhojJ5EM97uJKISBkm1MLToDgfQKBFj2Nti4PB4ABs5gYnM/cl3AFmBm6ZVCklofVho1SAbIorxoPAnblhMBEeng8zE85J2t6gVdWKmblB3GRN4270CSdUi5qG7Va3VimL99QgOJG5l1KYSSDMDGGmSURSzqfTqU8eHlDBzMqizGteHzV3IEIQhYhC+kSb7iTVWjZ4Bgb3JpNzv2Ages4kBojtgEe1VVxADwBiA4SwiZgN/LNGLp0Aj+gQw6JSC5GE0xXdKAzRliCiepQZ9LHN3Ed37m5N3bS5eMdhQ/11mJjxq5HrOFbMOOxeXb4h17m18Nb3B4Si1XrbmwqKI+0z+n/8yh026i5Sk2kdoeC2b3foNV4KOmAbHT5uvuF2Xrtl8widDX45/Pp11sKS4wCzWuviLTjZZk3DpcHH5QSUNVgKNiBon2Tei4hmdTscz+16RqUu7g7mRs21NmCY2S08gz4G8HbkjH7eDjbv+H8dM5u3TMH4agm0cNomRdCmExAZyxABvNb64pNPjlZvJvraH/6+u2lx85rT+i6qLlxFpRHjfJSuwTp3yDxCkS1ZE0as5kW1qEK1mKHtlw4RIQ7/qCFq/KhqqRfLRFtQOnLtwxi0xYdbU29r/DUiWScOfR673AaFASuebFbCmOP4HHG8z9+2p8XnRoa/wRv4K7axAt/75LepXWDCfqtDTOJtv3n8KXwzkFqsyz99gMlbPn9bs3G2SzlcQ6+1gPZI3u5o/Yq2M8XDa9uHdOPJDYs3FhluFFBOglZzguAufTucotDOiORzkxyLutCIM7TLxbRt1V+DqOPmDiZC7Du9ywYIG6IdkTEVcC6IedLJlsNDFGvacIh0/l434uM8AHpl0XtQ8N4fGw/Rhpa8HcnbRpeOmLcolIyR1mQ0APyqyaW/a79uoz5ZDG3lH39sNxTb/mD7oQOA9gIhF63v5g04kFvMCneXqI014pBrONH6bCL0nFghcfeL3Ys5Ahz3xh0zU9Q2cx1wA0ACzkS4fXNHsHqurhpFh8+n08uXL+Y5f+GL70x5zhNfXe2Ox6PwlfQ6v7XW4JWF5Kaql3JWKyI8TdPNzc08z6QfHW/vPvj5+yLpq1/96vNn787z3qd0Kvr9731rnvbzvJ/n+fnzd3/v3SfztEtyLKXY8urN67tgtbq7nk8zp6fv7pjZ9fb8+vTB7Uc5z19+tvvKf/efTNP06tUfmNOy1E8++UQkqeF0Wn7+wS9KKSJyd3f3+s2bn/70p9/+9rfL8/kf/IOvvPPOLN+pL168LEWFaZqIOZ1P51qriEQi1t3hxcefTJyffvjhh2VRd9/tdvO8X4rO8+7q5unr16/Py/F0VkkmeWLiWqvXk7elCmA1cJUGM4j8fD6fmOZ5vr56QkS11onJo7JIX0XDNl3KSX3Y3AJgN6uqgnXYNN794gaHc43S0gYiCk0MMOU0W6+FaGv1gottVUBj3zUPP2Gk0iB0vJwo5cndYY2nV1tQStMkZL6UBVhCCiwOCMDDzLlzvRq80Za4OM9zW6zJmHkpJT4kIq21ujkYzLnV3SEggplh0XJsDA+WV+6GLDm786o90wu1o6ODxn4c9SfWmezB292KZ45Cu+6uzFlEghdXTIPku+3VahpZnt7EEptJHSRbM7snMTpobLVWtwvxmHHVAEjjrQ1GYtTHG1zHfl0ytHp3ZrVWCzK6iHgvltMubgTosDzGMHD3oKM0v+ql3mags3uGNcCPbnZxzhgAW/QYsbJxgHcIumWl+gbRjQff6P3YiLm1Y6jt9wNpaxO/3dgoAIBaq4B8E/aMmbTb7QBgwYJwwVkp5fb2Nt5dY0176/xaKxPBHyGJbV/f5r0Msr6gixgNViq30G6QdblB7jBfODkBLMRk6qfTaVFlZjON3FrmrqWEEUCDcwWg2sKDW55tqCWrqjbPYh+Em/vvmPlirejVEA3BPnWymG4hJQUrpdTMHswxIkDnPF1dXU3nE5ruF4GcIGDxNvDUPVLpCFDzWmthUiEnATMLURCh21gCGbmIEIfIg5spegE3t3BeoGV6uIYUtPTnijUKIhCJ0dJzJ0EbyuiDsUyX/9iIQ3g4iakDvO3x92xBbwlIG+w3Rve4jTFstj3/IPnwr6U9brD+bbNGt+vh33J7oBMIIGaJP1gEiOStd/2W/mxezy0Avve/f9XGl/TFza7XkFTzVqCjpSgc1IEHxxcMASmoUihXu0gCgyEdyFF/xsaT41jch5UTHttYkZuPr1HpeCiFBuTrHcagQRmlhBCV914Qvu9hPpBV5Ct2J+ZFDjDHxLRNNflGDW0A+wIWAgx4x4G8eRV8Pza4dmzr1Xu4kd4u5/NQkPbCueC+qZDuwWxq1Qi7AfNZTIfftV+j89h0HwABAABJREFUPe7ZaTpkrRZyGEJjKH1aix+OoywyjrotRd2jYUEN7ZuCwWFO5H0XaEqcLerRxomttlTfPlJKiIB1Vz3s0pgW844cqmsdi7Sc3hDJ+XhmAJ7O5yI8kfOLFy9ub19/8ffePR2Xjz76KCV89Q9+//rJzX56ZmaHw+HNm7vz+RwEwqB4iZBZLaVEECXIRRNfk9TMxc2PByU/Mp9v35yfP3v3ZnczTbta7eOPX9TizPzR6eM8W4tFLFDVqMm+41RrrUd18DzPMH356jbn/LWvfW2aFXx+duUiCZie7VANzDLP8z/6B19jTlGHKu/mjz766Bvf+Ma//P/9Zzc3dnPzPMkfHQ5fOi31fC7H4+nVy7tyClqXWV1KUbAkKbtJyvlwPp/hZPX0+s3L169unzx5BvqFEV3tJ6Fa1dyKG7lpTq0SjDFpGElV3d2Ep2mqSzlreedrX/iTP/mT3TR//PHHOByrltPpdDqdzuVUq0bY9/r6Ogy4aog1OWeZplTrsjqSG1aJIDIHPlHz8NzH2s2uG8KxEQzm7sbclmgz0zA/zdU0z62QpZk5h2FJCbwM1T4Q4KpxJXQHILu7s5M1iOWlxgg7nwtvGHFmTXQxhk1oq0ZQbp7nquECUOv136aUVyjCzTsoIqWUKFG27Y0InmFMkM36G6a2u0nyHqZz1ZpGsr4Na8soqm4zw7l6yFo2NKmuYdK5UbHqnXLaiqfDTYtt6hZsZVo3nWDB+kNnHo7P7xkEA0WMk6wrizsArZ0G0Hg1vYjLYp6IJXIj4c2btO4x3pL3nEi60c9xt5tjrCWB9BYkVfRMj+2OFXmYigtDdvyxjb95FyVyb/Yx97Il48ior4Bej2QL25rXQBUgFTVrKsRErSx9QLvwOjfPce9LG/LlqkYEOBHFQLJNmbt2JxT1U9ekFyCK+fX65mqUGRt/yhh7QXm1JtfZniuldF6OxXRmBnA+n7nt9U2OnTobton9EAtLfxcEJ2dX9dKDzwBAjOavgJkzMYGJVndD9Kt3IdOYVtSH1nZuepC0+4NsAWF/NK/aqAHqLfgYcr4DwPu2jI07gJvr/R/9wVf97u6dm7kNCRb0btl2LDOh1pBKMwkqS1ecWramT6RMV8BZnFgJbT8Tzk5LPIiZwipgQi5Ewpw6M5y7CCHA3EqNdUd7PAje0lrGEfeX4gC2eSAP2v0cwl5Remv/WSP4P6hbPTJy6ddFgvdu7PPjuoeI9DfSfm3J1u0U+1tuI+Bz37hfV3v8+l03sgnuPezbzvarXcVHyCvOv97kiEIAUZevLS9OJIBvQ5cNLrojJAyZnajF7mQTqYuzusPhw6XYqrY2X0g5nxs7Q1r6X9SmFRHtcTKixltomqJMLV+GWg6hEbwVwgkhzngY6fBvSLmEMU0U7BgaEdHW5wTxXh9ifD465F6Q8B7Su/cuGrDsa8VqpbzlvdxHg42ISBcRwlUhtmVs9Y4hAv/mXAa/aw8bdyrHMI1q+E9CwuPhi733UWD1vo3aqCbhXZy/OccR84U71WXjo4eTGSMMN1eAzQEEnqReSzp2ZPTkCCKgs6s6pyms3yoUPugVPTJzKsspyUSwcj5N09Xh7ny1F4CePn1+fX293893d8ef/+wXVU/H0+GLX/zil764V9Xbu+OLly/P5xKAjYiYMU0TiKppXeA4EaOUIkuZ53m//4K7l3N6cTxoNebbn+dXT548+cIXfz+ldHd3jLDJGf9/9v7817btOg/ERjPnWms355zbvHffe3xsxF6SbVFUUxJl0bItx4ZKcZNCfkiQcoyyERTsOIUkKBTyDxQSJEDgoPJbkgKqUkkFSAVxSrZjKylYMmzLUiRTDRtRNMlHvo63Oe1u1lpzzjFGfhhzrb3Pve9JJEXJknwniMdz99ln77XXns34xvjG91lEsFqcUQVtuAkQUkmkTUqpaXjBqxjj6oVV0zQNtXlzEULg2AImaJer1R3Y92D08O03l8v16vS0YxoI2gZWr9xfhk/8yptfjjSuVicnH3tv1y45Ls7PL3a7HizkLCq23+w3m90wDMzxPe95z52T9t7ZQlUvLy+HYSwqb5S95JvLK4kdBG4DUoxEITCzWShDBgABVDA1FaDi571RY4EQjEOEhjV0vDhdnD148JKYiuSU0n4c9kM/DPssZRgGZhazMBHT5pDrGFocAUIQUTIVBSD2LcQMBXuaio9aTASnU6AGrFCbtUxVDY70949mZEZjjmDGgOwWBYj+kQcZEYEIybNoNT+InlT2VSFWo3FERCUVLTmlsbi9BE600nHMu91urpbUQDDw9JxaRiMCA1IwBVNAJhc7yVYD2qkShQQEaofmIlWn4AVzBSjzyJLmz6uqc+uDWTUlgqmqMG28tTauUL00ppyigss21aVeGyndjxEnxZ3pI0w+gUTHxEjEGvgd7wX+RXiFan4EpjPGLxsRCcigypAgkhGKyGQvzl7pFREMFWmYqWlFRETkJSMz77ibD7BD6FA1wYEQDx/EKt3U5uuU6Qif9iN86rLr/jjF5ar1Z08SzwcnHjl/4BHV1iuEtbLKFEJQTcQQkLwhZI6ips91HOhM7642tybOWBfAmNDlXszhBBAFjiG0bWtmLldTFWOtSi7N9+doYJ1pU51JVdUKEyNZCEFVjcDID/W29o5m0UPfzoGOW2qCoxq9mIGqlqzI5DjE64oiom6zMSFtqyYuAPWwoaoh4eJGh0hi3kmcgGRzOqM+eFStrV8HEFQpM0ZnbR041XVTEhHJCklgZcvl8r3vfW++uVw3AYxsEjb0PT9LcR8kIiBA0OIbBeGt+S+5VK8OQCAUMFVVyS22XgY2M+ZIIWoR1cT1SmQG5HVDnD+s+v7xbCHwEDEDHHypD7+HGlROgLc+iDUIOK4QzsGiTkmTKZX7rkw/PaKcfcfGU/P/d3zmUyvxD06siX9wSoXwNDJ0m4Fp5c5Pejeq57cK8N7t8W+th/B4XvnWeHzBEyzxgGPaSEG9DKcVCgICihkq0IFxCqqKVgibCYXVoXXT8O2hzi7x2iCRSPb3rwX8aT8RsOrdhwfWKMzvO5NREMQQ3bQVANwD0A5fwIRj6+6FyJ7TEXDVvBqNTwK/cHth1qLi4WebegiPod+tu3vMFXqHr+bbSojMG4IerqH2EEI9LyaTxOfj937MEN0p0m45yTVwepe/mdqXfFUdtlacQ5RDVdkAKoqrk79yPm0uLM5AAPwvTaepcbsC7Y53U+p/1rk4LEMRwHmrN1UNAEYMRJRzRpDdbhfDAhGKaAi864ec+9M7dwGKqF1d3+w2X10sVszctgs3OheRvu+9aCMi+34LoOv1erFoCTl2yzFLSgkA2pa6bhlatwfA5fJeP0jbhpPT+2bGzC+/wjltdrvdMAxm1jZhvVyaWcnb2HBg6bouDXL++PzevXsnq/X2esfLDGaRYr/ZxdiHpnvy6NE4jk3sdttLJlmenS2aCFKA6OX33Pvuj35svx/ati1ZGKlBPu2W91b3xlxKVsIID4golCzjOK7X62Xc/PiPfN8nPvGJUorHz7/wC7/42c994fT0zs12f35+dXF+dbPd7Xf9btePYw7YmLk4v6lnp4EFjDmYSWAA4835+Wd/5VcWi0XTNA9haNt2sV61bWvoco9anehiXMToqhtzjaJplwDeXKpOOXMBWwVUBVWqmpxIimCKcVFsqgPopK6JiCklRAQFYCVAZlIFICoTtWqm4U1Bp6mqmM0SJkQETCE0Hn2mo9nGtR+vTs4avhsAWKDjEBxcRgaIVCCl3lGHdyR6SHojVQCmbdvZOzvGEGN0gDl9NFaVWrF0R4MKJ1RFS1GXQUUMACRiLgiKiLNGZ43ysd4rmZziZkBSo//QlVKkFAALIRKRaa3qHBk9gjNzHcDMN38upNAkYcJHHRQV9hzZNx0jlhl7HAN1f/68R8zkVf/srpKJaIEZkUWkSGkCVhaNEYDUUxrdBFKnaaYTilM6qrnB1EDmH8rqFnVLiQSO0MUxGjxu9pvvJ9ZC3zuM+b4dw2atfhjsRaTAB8InItZcmSmiIZp3OqvO/j31RhGA6UwhrpfERKHaPBgR+qcWEYUDoXeeJ9NUQSI60AanFzu4O8441sos+dM0jbebhkBQjea7Ot8UAKfEs5GnCaWKA6FIqbVHhaICWouZIQRDyFLfAmsXTGWKzhV1XxHzZFMogZoQQk7DjJnnI8duiTHcals1Y3dKdO4WogKymZWizApmJmpFcpZxHGEYAEokvHN2kky6gLkkU1REVSlC2SQ3U9skmCsjExETMDMhmalkMVCfPgxMIQAGsCKqqkUxiRQRFw4lBFYgVYiRkKZZoQKqWlRLnaWeCAMRYzHkdz/H32Fiwi05h6ODua7DQ9B2uzyov/27mM1Z42frYL/D3/424+lUyDs0KcE7PAGPljAcbAB+/8ez72rf5p14l9f/zrxaTYrZ4YJnFP3t37ffC/Q7w4ZZTnM+QKB++3O2wo7+C/PPZgYGTDwZK2RiVmNCA8XJFg/B0Z0ZIHhDDDMDH8STiRgDtVhZAx77ThsLe+XkwB2dD2j3uQVARHWSJyIiilQFLPdnqjw6m9dNTUJ5xV3ma0ICQAR2zbgjWHj42BMH9dCLeLwY7SkYdjhkfi9yKIddZWa+mFl13YA/KGrAf7TGLfh3qPw9Qx337hqrjx/+imqOzyMNOgquAAHQKdAT89Oq1IfUs3vagedAF/Sou8E3Z1Awnu0owRQRjfSoeuzXMUdKzkerqW2YVl9tAXvzrbfv3r0fw6KIqY2x6a43N22zjDHu972qiI5quW0jIvf9uFe9utm0bbtenSyXK2e7nd25a2aAJpK75WIceyAzpK7rQBqw5LLEGIIgAwYOvFwuN/1AFDqBfaqKgk3TbK+ux3F0YlgbFmqRme/eW5dSIlOMcRiGXPRmsytibdvuN4lZAMacs3ej5ZyHYWDGUsr1TUaSxdkZNAFUQfWF9Qv/zb/4xx/60IdOTs6G7ai4X7VL5HYRKLMBUMqlFNXRIHOwtuXHuX9CumkZl6d34Ozsv33nJz79qU8sliuAkMay2+1vbrYXl9cPHz6+uLjYX5IhfOlLX3rrG29fXF91i1WSYmYpy37fF9FSChQ2icN4o02zl3RydvbWm68TM3fN/QcvDsP41a+9ZoiVVxmiB8SRAzMvl/eapimlvPzSS6rl0aNH6/WaiIZhUMNtP+ScRe3y6qbvx9VqBY3CJNHhkoallBBC4Lher/f7vYgUUwFYrNe73a7Fxm6NGhnMG5CIzKnxUoRDMDXVyv+fokaPjA89fojotbfkqpuIMguEgA1pDCGoOvEDiorkCnssV07prneOpScvtes6dO3WiqxwBhghhKZp5vAdmWMIpWRmJ6lakSIKAAQGpczCzTxX5gCQKIioa04ys8NIVWXy+kmYcU7JmqV0XadTc+Dc++rgwV9kXptzHO/fi0Nr/6eI8LSnH3h3ADCJfxwjkwowYuVPTsAmFMnjxMNxZFjcMpAgxgjOJQdSEPU+KJ2iWKz63cTAwcED5HHEWvypsHbeAf3WeS9oSslrd1Ly3NCoU88qTgXA+W/9I4hIKhknhExHUpzMXFSICNBfR4mImDQXVe26LsaYpRCR3/kZQMpsT8/MzK4o64VcM9NSZIJzaSxEtOi6nNJ+v3eyg82eFgZOpSYiyakGHgZN04zjOCuu18QKgHNcRRRAD84KiEWSf/ycExGN4+htn0QUGzZCEJh7SnPOiBxiK1n7fiSirDJL6dnECJgAvAAYkDt5xulLKT6X/DM2TYPIPjN9Bs7piVzGXEY3iJ+xH8ynyPSVuWT8/IcikIoQ4CCpjZxSDlwnhYiMY1+yxbvrPKbr6w3ECKA57XNOMXIgLDJLIofIsW080cNcE6cITHRsyzv1VZph0zRmg4HlnGLDSYSZGRkRm65NRTAEVRSBENt+3N2hFTQ8fzuRWKWKr3Zd5ySBlBK1zVHDE4DZU23/0+FdM/FQAdvczHxI3ByIoGg25WzNivuUAtpROF4JopWXV4l5akC+t9KRnL0BTBJZStBOD85vWkkFz4aDiKjH3WiIc4Xn2Wcev+zTj/zBKcp9mzWW7+CrHQOAw2boNwiR8ekw8Vus29z2RTxGHu88vsXXPyK14q2eNGci3PpECgDeuWQI6Fx0UEBwsXBPwHneEQGISAy1IsH6EgdYRSjgImC1bd7xFzGZqpgSEiGB79JSOARRsVIPTQVAZA48C3wakicjCRGAkQWBjyYtIhw48L6feW1QkcDcgIdMUdWAnipq+t2p7zSL0Mz40o58RY8UPOjZBfi7Ge8+M/1sPRKDmH055mzEc2T4HRpVXR5g4m44F4Cn/Hwtyc0LR9XFwrSmUsEA2ElyUyqBDapVkvNkDm+mM72Kqu7GFINP0aPmkhGdXo0AACpOBHKsqG765Dl6gCmT4uEiTUGps73MNSBzSc7QcXOycP/eK03TplSkGEU0s6FP+93w4osPQmDmxiDkPHKodmZNt/QosGkaj5KJPJ7zpH5oW4gxzjFxs2gxcpDaojOOvdmeDJErhSyV3sNrj72+9JufX69P7t27t2zWwFTQKFDTtkFjLulqtxGRZtE00JRSxt0opjnvVDW0YbFsiSJHWjBv+23OI+xhGHbLm8v1er26cwdOTu6v7773wctP3nr09f1riNS1y5w9O08IYXVy2sQFh6YUTSlFpE4u6e462DgO4+X2ortaxBjPThaSNgoUUNfRVvcW731xjR97n6pGOru8ukL80//Zf/F/5tj82I//eCp5u932Q9rtdtvt7s0333z77Yc5591ud/7kslmc7a7Pd9stEq1gvWiapomn69Wu36vqMAwAAxwVVd544+L09LSJfHpycnZ2cv/O3cViYWb3zu4Akyqo4WazTcNXry8uyzhgqC18Mba5cR0LSaPFGIcEY8YYFx3zOI5gzenJYnt9Q4hTCxOamdTcRv2nP34Qk5yKPHZUhzRVnqLSuq4QvaBQ+FDsmn6LDjbmmuQUgSBUodZaTZpkJwCR+iHh5OHuF1tJbuoNiml+F/8cquoVOSLSSeuaiOYd3Q8PA2dSIjOrQTF1K0WaGHR5SL5ImQNz3RoCNhMH1d+RjQzMkDBMLQTP7NFz+cV/NTNbDCaRmFu4/PYuP7/gOI71t0eBwTxbcGKoVlYQ4jAM0+PeWMx2JIc5h7bzQzP0nZ8wgQf2V/V17c6TRBSecVCYr5yOaJ/+wwF1HPl22O3QFifu6FPTiYiqZiyAmfiELKYiFCIdinhu/AoAoKg6a+EwMwckqAJIMR7sMbW23ikQEhzfSQWsRhRtFzlgLrW0C5WEfHzfSFU9RRVCiDGUUsG/qo5jHyP7R1YEVb17966q7vfDMPRdt0xlTFmYqsH1PAEOc7VWPqdFOgFvL5JOM8omfiZMF3nIMtQ/niij9as5moH1jabT6Ph7QWSDajI+f7kq3g0rwzDudrvdbpeut80LK0ZgFFe+ZWYIpMRFUjFVZVXPm5KZKUi4XVvG2jFhRFUv10VzrWYyTCQDKBExNQixKKVsWjRwzGJWXVIbMESIbWhzzl5enjk5tV0H3mUcgubZu0WnYJonSIBzLPa04RgqAuGMMFGPIn6Fd2R5oYLREXLTo+d/Z8a/VhHRP0rEtuOv8qkHv2Nf1u/hOOpJq/vb0cVXu4XaW692C5+YApBNyuN1PypmxrOoDAAAoEud+D9oasQgNAAxRTHmFgAdR4KiTrLZQG5vUxsU+SAQWrVtDpcCBGATGqyZHQMzZDSbVplLf1U0WDccRQAyNDR0ZT24RfO+9YOH6tPifYfd4pvGYN+p+f+HZ5r9URvzF00HlDiRQSdqsdtCyFxXRHAeylSBP0yWWeTCcIr/0GpzR61I2mwHOvvU++Myn8dWqzQqYAft/VrP9zG3+XisfjjTp8DbzCyc3H0xhtaP2EBxHDLQNqVUTJiZG1Yz0AwAqqQq+3EIocGgmjLmAgAhhBba3W6X0uDi/stVt16vm65h5v2wIaLYBUQWgZwrctiXLSOZoZZaKfLre/m9LyNigfzw8ds5S4zx3r17d++eLZfLoR8enX9jGIb1ehlCSHkAgCbcHfOYSmmBxUazPaKFyEiBgwGqIQxpLDcy5mG137z/lRc/8N/9K5eXl9vtVkRLKY8fnY9jvrq6GcYkpsNwOex0Nwz9fux367F7/WQZS3/dxZgslz4F66BjPusYKKpBURADI0AEIwjw4GRtu53mR5/8vh/8kR/8LjXd7HbL5TIVads2cAMcIIQvf/YLP/MzP3N250MvvfKeh48e/epvfPYXf+VX7t9Zdsv1xcWTMe2LiDgNEhGJTKQU69rTNPZl0GF/0wV89PCtkkcza9s2xrbtusViaal0pKddIAqCjYg7jmguBQAYGyLe3fTLVWOF1NgC93vd73ZNU10tDhDBJ68HylOBa64W+tQspZRS8CjodPRlZjjV+TzPDAB86ORGg8ryAzUth9a1OWAlJLWjKe1z3lMyE0MH0SAf99Q5GzPjxLeZrFxsZqJiJUP6uAV4Zp1sMJwk0ACOIukQCcAQAYERmCgAGR4kGRnRvOG8Lr+jJqQ5Fp/B8/FHriBkWvPPAkJ7pyJAkTw9uV4nHCExnW/TrRy/2+kaYQCn7kz3CqdCkP85T0Z2x+9oZrOchk0URBFxA3HDZ3zkjuqixx+2groQdBrzc2aQMz9t2stu3ZBJoxQdijt1GhFDoGMSLAD6PKwqJeQaHhj54MnuF1ZKaZpGJ1Y0VKGC2orpRVoA8AZuVU0pzQBwBvNYC86OGVw0yJGSlZKJqKp0YpWBzVn7NNL19W7XiwhzfPDgwcNvPE7jdS4FA0+fFxERjHTqJ8TJrlBMtYqX+jdL09d+QPg4dbHqkeIREZkcbvvxLJ2zMzPCnJ5DtWIwZTPBSA1VQEuJoQmhlJJzFvaZY4PoqJJBiqDHZxWim6JYBZM0qesAGhEwIzM6KY2M2BhCmJ1asM7husqAEP1ZENTIoKYqcpGc3fzTRMz1XKtijSogAhEZPZuJNzsW2/AGKjyoLJofx7VK4ke+mR27k/nL1MlyYBA9Gwu6Mci7hpKTMcB3YHzTAevz8W0Mp4w+Cwu/U9/ed3C8I4gFuHX43hYymdJQMOeAqmPKvEN65wA45RumTLH3/jFUMxt/3vERUFT5aNEcCm80dw8ShgZhtpOpNXZfMQrHTE6AWuerLYRmfhBz/RQTGkRkAAIjAHQ+q4tL1Xj90Bd6fK9wfpH5vaad/xB7/OsYx9zU5+Dw93Q48JuZovT0DTef7b4VMKDBgT468UYAAI7PG63ZQqgLxJvNAACYUM0twsGnnh4lakFqVDy9mpnNhcd3G88GcnPEgohVhG+7s8UCQwhFyna/y8OooO2i6ZYtIzVtMIvMXgahUsrVZmuWEbHkGkmEIDkJIomYsz1zzs6MIqL1vQ4DYQAzTXnsU2+5krtCCABUUgYAj9RVddWtSnFl0GxmRZvzS93ur9frNZLt074fdxQVRsh5XCwW/c3W9QbHMY2jAZYY2cmr2ZFSF0MI+33/5Mm5qp6cvfKRj370zvoO4939fg9G8uGXFUkFDDG2CwNKRS4vrt56+AgAzr9up8tFGfom0LprixVmg3ELIEBU90ZRyBnGLKUgI73yyr/4//yjF+83P/Hp74NuJLCzZWPDRnKGEkTIDMODBx/+8Avf89GXPv7xH/yuj33P9ubqp/7Cn/7f/G//jmK+f2fx4z/y/VfbbT/m/X4Yx3Ecx77vd9s+DQNyu91uA+N6EVcdBhi3uwsy6K+Th8XMEYGS6CoYs44lKiJ3gZmdsJqKDLuxSLHGQGBIY4wxaCgqKETRVL3NSAGAMCB7gTGimXPulQ71H1WJgePEG0M1DAfjBFX1iHOmGuJUeaAjgSMiKiXPYpv1mTUA9Yh27iOorxMiHe3Xc6oeENgUS1YAUCtzLGtmzEKUiW69fo4N3S5qEdUI1WqdHevpMt0Td79QKEUNUfyfNFloGCK4RamrWeitcugMbqFW7Dxp5HTmaudwfDHzGp5j+uMljYhkgDxTQ8u8wnXm9d5e+TG29YYoKuhTzN4D3JKDYsoxVAAAP2Jlco6qGMOImWKMpun4Hecrf+prfaZK/HT901/2WekdkyM7RNQ5vBARM5m1OudXjg53yVSVuV68uvlB9GceuhPnC6DJT9Jry+5GMwGkg4NiQBLPoMChWdHMAHVKW5gXnmepnlSyFySbpiEiEdnv95vN5vLyuowphCaEsF6dnodLD1BKSTMImSaiT8yKgYlcy6KiqacoavOlPnWr5zkmVVNqkt+cFssRRDzcUkQs8690dsWYnChqv2LM42BmMcYYI+TeNAOKaREBr8WJKRETMiISBubI3ACzocDUXhtCYAxm4rp/xLGW96e21YBEQSM2qiWrFUE1AmqQWyaWokTBgIqoKCAyGLuWNRExo9tOkHqrs3pVwW9TTbgeJXFn7Oc84gkzauX/eDg8tRQeTSLfvhDh6OVuFQn9kUlafN7QUK3KV8yvdiv4OCZ2fjOI4zka/H0Zz8SIfyjGIcatK+spTGiuHYNzedyfSggMpMBERNOphZMPYQVnNjEQFOoKcJgHZuJSybPtKjITQS0JEhLpcULWUNUQIfBx/oR8M6hdfgDktfpbdTyd3OcPdAaAmuXy3sYZH8KtZfLOaHB+pD73WZ72d7h7cJ5RTy30Z9/lj1Lt/Q/SuFX9e4oSMmf6ZikEf8TmUuHthfMU4ZyOXUzQDMBAFQHnSBGmUMqvAskfBK2pWuN5ns+vi1ULzSOtKWio8eSkCiHzaTVF8mhmIRXO24w2DsOw2V5rKctVt1gsqN8RkUEXY6xcO4Ocpe2WiGiAxUnMBnlUG/rFYkHMy9WJWlGVfkib7VZELJ4xRxcGHMcx9YNp7bdpQmtmDvxCCETBzIbd4JdPVJ0Jcs4ppcvL67t3z9q2XSza1Wo5pl53ggyhiQGiSC4loXdJkfXD2HUtqlgRUwYLTWwW3UkIwbSM2/NxHE9PTxahhK6Dbmn73TCklIVsVKCz1eLO2d1X3rNeLBb8Ax+8ubkJFDbX27aNyNS2EXLBYRQRFQFnNVqFN2Malk/sX/7LX/j0T3w6fOT9cPEY0gghIJUYM0cGxv7ysnz95vXXX/+lX/i5k/bB3RVjbF7+4Ad/7If++D/9xV8+XTCuuve+56WiJmJEhECllN1ut9/vv/SVt/r9tuua979yp40h2oPtTbPZbKTENJb9fr/f79Vg2TbUcko7xNMkCRQNyUyXzeLuep2K9uOAWDKZpiya3b+Mm6BF6jatqmCKiooIPJkqw1HciIqgOVe6szv7ATBWV3oAMDJzI8EpThKd5rcZTBUtgJqvn1mFc0jK8RheHiBikVtx6pyYRK4EDzNTw3mVGaiKI4iDagsA5H2qRxnZjFWIqG3jfA1TNc8/iFOn2RDAvA0CYLYZgEklDUrdHnSGVXOiyNBtKrGWXmuQ7/f5CJXh0ZhLf8e4xcwAlAwPh9+kbuoFjfrkWX7TLHrQq17UPFTzjvC5AVQh/LlyczRqYXOmSvrrM4NPA6DwFAjx4epTlVY6ZX9EBOad64BADp9u7ic8Mlqckwu1b9UvJkYGIIIqtiRacwEx1MZR/0LcI1NVQc3Rl4h4wY2IFosFIrpscmOFiPwPDMEb/wBVcr2rIYTFYjEkd4CY78m0Kc9GiCKI6P+NMXr+hRsCgL7vh2HYbG72+4GZs4qM4343XFxc3Nxscs5N05kUMwFFrW1mPueNkEVEoADwBOJARAhjnSBGZgfHjnkyHHOJj+dS/RYOref1B3rmSzEgPlQS6jdCRIABUQEM1KWPEQAgJ0SJTMYYCAQZmVHB63qIXHt7AMAAwb0hiYi46uscFqCXshlromVijIeilov31ARRTNmgFCKj0HATzdAMm7homk6htqoi6sQQAwUVkPBOdULDSc4bPCKUoycReq14Ci3RlSoOUewszgFT99G7cERvjek5tTXl6fjyWbbn5Ez1zuNZKDgHAb/TlTwf38Z4lsj3rYbp3yqk/Faf/ztfz+1TBqe/OrLU8PwGMliZgRbO29D8rPnMRKh9JS55NwW4hoBUA2KC2bl+yjd7tUTraY7VLaYy6/QWa7S+hpiRkUcbBAxedfS4pRL2qMI/IjSstUGbpWIONcpbH/8ZNAgTEnwGpf2+LaujXtA6ngPC37NxwITHNfPjBBAdiNaIlZvlx0vNJOI0Tz1AlaMv7pB4nUcpZfYnnL9Xwym7CDWcg6f2czy84rTD47OHA+LBp36O5x2jhSKxlEIMHLrFUsCEGUpJ37h60oSwXq+X3cIRnRXb7/fGNF1EDSy8g2i32zkjzw3Qnf4aQrh8fOMhoEeBMSwjh6ZpxjG7YoACiYoVMkJEik2jYqWUoR/7fWrbRdd1IbCU0o/SdrFrmtAsKEbkSETN8qSU1PdKHNs2LhZtLuN2e5NSNgVETqMMfUbE1eqkaxeMw3i9BQBoU+6Hcbdj5kePHjWLDokX69VyuY7rBXBEK2CZT+6uBYsk05Kz5aHPWVIaFotFztlEA3PDAQC82W6X+t/4jc+N4/jqK6/Cm2+C6tBvx5zOXnkFU9pfXzchSi5EoCn3u906lg5T2zVw8+RjH3zPr31GH5wtdmMmymSopEyBCAqARYSIf+zj7zeT9WrRxdC14SN/+ocevHj38vzi6uK87/vdZnt5eXVzvS1F+76/vLx+cpO3GyylDLlstnvNfYbdMIzrk7vb/aaNzeKUc9FSdCy5CWEUJ9ZVG0kzE1MDKaKmrusFEzMNzEzK6F8uQN30IQSEg5pIDeYmQUhWOp7BFecQtE2Eo9KQ14cAzIjn2PR4QrtSy5z2qEwSA7UDUHFi5LSYuEZsRxwPM8s6NV8Vj49rqjNL5+KQUAtrMcbIzH4qIhNhIKqtugBQini3kPcQg/ERtfVAI5yvzbV55sdVpcLsCUMdL3W8XSo8vg8OzKjA7LhARMhVy7V+TLcEcfXhatyo3o1PRN43RhTcPL1eAdTC0S3V/rodVerCAcdOe6XnEOYrnG+yU4jnj++PONgzNZrEb44/I+LTpFnHbBMsFJhgv/+zlCpUgARoQPb06Wigpn7HshbzjcuL0iKaUgpIXWyMkJmbJnjvnCs/KlhKycXcSinjOA7DoLnM344+E5VNl1xnsi8oYlhY23WtS+Zst1szG8cBKvQK7rF+dXU9juPULlvTMAxAgEig4LVrA6glXgw4A7zJjrImAsxb9Kbvar5gmPC282bnCWaTfe1xJuBWkFfVJ6pZKE3fptWav6BajLENERGJ4nb3pOQR0YDsmeCVPEnpnHayAJUOIFMZVmcNDD1q15xvKyskK1IMgICiYcjJtvtR09C1bmjCRaEoAAWgYHZEJJ7MRRXUZ8Lh090aNbPrGRasZAQ//tnqee8rnRARyeYaxWQbPS30b2XMX5bVYOSbKQR+sy/7fHzHx20kMBkR/X6/72F8qxPmqTk2ASTvAMSp4jFnNLxIoe6SpBNx008Cm7GVmcM68GMakSqoQ0QMGJgZZnmMumPPxBZSFbPJMguROQAzwHjrYyrOO70HpVN5hHTyDIW6bJ8dNCvNTMSKbwYtz+9dX3q+gc889x0nwO8dbHu2BvV8fOfGMSa8NW7zAhCnGQVmAEhgYFBq9bqySFQnIg/XKMZm+FfTvS547v1+R68OUo7eCs1MEcjppHZIwaC37yBqOdjeT8HSrcBS7aBOxMzh/GKTc+oWzWrRKBKaZClp3ImWrJBzGpEAMlNkYBUY80C1d6em7d38wCsAntMvkgDMxR67ZqWqVtAgUGwIiTCQNsumM0MDDY13dyghxRjHYQwhds2iCSv1fURxHLRtT3IykZyTplFjw0RsiuebRzln1dI0YbGIIVLbLBZt40YXLvc3DGkcck6236U18TAqmN1cPFQVtbJYLKCwDjDmfnszQrg+u7NfnZxmkcVi9fbXXzOzxaJFjAaQRil5uN5eh+sNALSxWXYLimRmJgoAfS9f/vLr3/3x71ss7mye3JzcOYlhSdw8+urrXdeZQB5su90xcE7G1KxbvHr8BlzGuFhT6WXYfuW3Prc8vctNK4oGFGPbdV0TQljEZVxt05BHWS+opO32Oq3a93/4/S9vzhr+8Hv6vmcipphzRqQQmlLKZ37jX2UpXbccc3774aPzi8vH55dvvPnwarMJZMZydXO12dmdO4uu45Q269VdD9ZTKWYKYJ6qVzBDrSJiZjCJcwCoSDaTebGIqZbsqoahErTqvo+IgK0e6WH4xHWSmL8gM3usKYKqChwOwe5ENXyHZOTh51toZCqLH7EWZ0tZ9ObVA3dR9SAU4WItM+nUJzkR3Qx7ojCZIfD8KYgIg68InFElkultYHwMCOfLnt4aQgjwTKNg5akeQcTj7YAYANWmJotphzoCY1O7Vb2Hzp6ZIP3M+mR2eQ9APDTjEZELYM57n014r0pZKs7f5hTZ5+PPCxM08tc5rgD7jdUJNuFRzcr3JjyyIpxf59Y3fjQNfL0bITEelxNn1c0KWozmtyulr3KmRXPOkVhV0bCCN4QZEBqCk+FtqviV2T9d/Tuqrz9XvL0EOn+tMUYkA4DlcukSuKWUIY2g7uASUkqEgYhcxZQ5mhURmSgjisBmWivgiHKcLDgQvGuO4HjyIKKnJ/wGzs2BfnkYJmr3zEuZq+7Tcjqee75RwyFvMb8URCJPIbVtGziWUkzk+uqi73eiGbQUJanyvcKTY4oZiqiIkOrBeay+3WE+5FLtc4xomjUGgCqgSBxiCI0BiaIpIrGpiYia7xiYUkmpNKHWnEUUJpfGY1md46l1+5GpgRBkxoRmgui9Ip7C0DkB/A680G+qPHjrGo6+gne2ZvlWx3M0+Ps7JoTzB3xMsqh+KE6w8KkewrlOeNhb8CjLSa5hCCSEdLSBHPIrfuQQVXtfTzkhgjgC5KMbZXB4hUoi9WXq4XX91VH4C7UyM0/w6QRExHlJTj8gsLmQgf8WcW5EhG/323oGDX4HcwHTS1Ux4ed471/veJY1+tSvqG7+t1oNEau5ERqIC+QCwPwKnvz0n33eAjOagU4KMTalH6ZTwc9x8iZ8jycn2Ae1InGUVbwdNwKA6vHZXRDRfwz7oYz7fhzH/R7TuEXKq0UDlmNgM0kp5THnLDG0XezMbH1nzcyVdjW9gZmllJhjjG3bglojIrX+UKrqPVhkooABJYph5CilAAREJCsqisABu2a9gBoi10sfUxr6FEJjBSySqfb9hhljDACw21yrKjOqtkXS5gaXy+V6vUbEUjTnEZEJAzOUYvv9OOxSCAEJht3+xRfvc0BGpK7dbrcpqyAAWR+L6TjmtNvmnNRMDXIpCRlSHttlC0D9mMlAi2iRgUgFmIiZX//6w0ePLj7wgQ9fXFwzYz/mxWKxPl3dvRNDCNeXN2a2Xp+tutV22w99CSAvvnB3tx+GcR/IfuTf+oFf//wXX//q+d0XXx5yGYekgJEb//pFpFjph93Hw0fvnJ0UKjFYWIa7sIZF1zx5okWIQNKYkyz45HS9+PM/+WNZNK7XwHHcbIpCLvroyfnVZv+Nx0/eePPt/+Yf/9xXXvvaRz/2oeXJ+uHDx9d7Gsdxvx9MpOQMRCEgc2A3Oq9x0SFPH0NUVVObNu7JPfxQ65ot/pzaRHOUPOcn8MiWABGJnNaFqkqxmdfM8eu4wkd9fZl/tBDfwd5A1ehIrfH4v3Pvn3PTAJzGZiklIuIQ5vC0hqQKBkoAqga1juFIRjBwjJHZK+dWYYkcoO+MZuGdAGFFRFMZZFq3evyp54vBqdHL5THtiGuuWhBno/mnQ9vj2NdDaptqJhMrFVRVtNowHoO6+SbDhK/cHe6Y6zu/y/ygD28qdtgvk8djCEFyOZ4kdtS99hSFuOKZCeEffQQAACc8iAlaBRWO4UFKpfgeUXAVq5mhpy1UFUQzVXBs5LVBDxXIA5e2bamAiGiM3gE4cPWx8Kuevqh5DtP8caoPShKZjChtgo4mRUSYcZ7VZHRzc8Mc/UZVWSVzdpV3KhoAwawNQ7OpJvCRTM7tb/kwc2apnhnB2hEgnGy9jirSRy9iZqZPZ9v98pg5cFBNLsaTc765ubm+vt7bLudRVUhtVgmevgt2WaP6+Vzf5XbexNGs34S6gpgdp9d/KjBzCA0F9pAutl3LjeoeiR1gM3POOecS+UBAgPmbAmI6jkenz/vb1vTsOHFzVFyZCJy35qfdXnff/LBn0kPf6vgOVhefjz9C41tBLEY1jwgwgSuZjNHruUZAwIwg85EBUJVnfBSRuiUecUyISErlWCM6wYbBA2Ii8hJjTe15acNFmOz46g+H49QEiLXH7x2Lfod/1qfV//KRucrvEnT9XlaGb9mcPh9/MEfVkjkiZsLkWgQGfGhDqEkKJ1nNxzQCEeQCUNP8s56eK1LM68WmumIte0xxIHhh0qYj8hZp7qDA54+JSCnVdsLMwm988YtT35EFMiRz8cTT0zWqRc5d1626VYyxL4GZh+sUY+y60C0WIQRVZU6B4yBC7YK6RlUR20VkM0sp5bR1V7QQrWga9gXRmjaQATPtdrvN9jrGuF4vW27NQiPvOT09HYaBUNvYpDQACgcg6s3MchlyFsnMbG3bNGHV3U8ppZR2Y1HNRLTrYLeBu/fO0jiOYy6l96pF27ah4WEBV/sbM+OGr8/fJKLlctl2MWOxqKqFmW7250+u3haRruvunb3sZUYzKymT0XA9WNE767OUUhNCyWmb+qZpjOHy6vprX/nNk0V83ysvyrANbQSFEBEzx66DfjxrGRcrUAVRSOO6ZTq7A8ulqa64WZys792784nv/dg/+2e/8KEPf/gDH/jg9eamH1Mq+dGTi81m88Zbb243w707q8df/9IXL5+8+p6X3npx/b0ffpVbBi3d3QgYAKA7W8AwjOPVmM/lnLuuG7dZSyJAHfYg8sHlSTzr4P0vwo+9r7349f/661/59/7C9/2pP/Nnv/61NyC1X/jCF/ZDL4aPnjy+uNycX1+fX2+//uajZnG224tAjLzqBxNFRB5WV1oKqiAhEppodQoEZoOWuvVyfX1xfef0LjPvN/tMVwiEgUFVEAhIwFJSqkGxgigiBsbGQM1kKulALUpXonLtYDUTsKmVHRCxAUolFzCOpOxum4CGbdOWMUkuxACERVXBgInkUEZ3hwsxFfEKAKoeEFFdrhwNQGYSmLu/mDK3SKRGkk3VxV0KABjXANcNBgmQkYgoxoiIqljUlf2J0G2RKmgkQFW14rcy4GzyeFAENSQSC2rq1Wl/K1XVKrvq7WJEU4HBzEasAKyiNXFwTl7jdQAZkSOzqpa+QHANUnXvmtg4KSDnXKk7SAioBkoUAlPaL4mIgEFJnR5sFAGY1cRMDdHIao1GhJvWBYxqJXZKBYCZZM2gMKdCzAwIIzcNBnCubAFgYUBUbZumlGxFTEouwuwGlWTSmhigBQqqoin5pilaIjMYShIwAGMVAgBuzdULkEkVpBRDDCGUYQQzMls0LQYkNmpwu90wBWICIEATzWoF1QBZS2kZmxg05bhYDde7GGMA3utVDHFz06dUFsvTLGSksWvHsc95H2MEk0W72FzdxNgSzUgomFl2p07vikQ2NFG1YooEpIgoChE2RzlEaFuE2rZzSBYagoEgAzOZ5w2mM8fjO6gf331YZlqsOkc0AJoWrLNKuOFhzBCasWQyNuB+KP1ivLm5+PJXvrDgXdd1Ja63Zc8xANBYpOXWsgkMowUtXFIJOTbGkAGChui+hAKGSAKYsgzcIoTddnwEcJIhmDUQurFoKCE0UlIOIG0Q0H7d0r2Tu4FfFO01taArxEVoO4wQVmihHRWqXXARA1YbW45mGa224BIEQ1I0AOdyz80Y3tengAwIBmTTZqcT0QCnXl/XYERCMGIXCZhU4LAmlwkAKrMC3WfCu1DI1/JUlrFD4REAIB/CjqMf5i8d4BAvmpnBcPwI4lwnmf5Tnzq92jvUNuc6z9OPKwC9S/T8TOvjv6njXfD8u6P0b+p+/s5v+y6P26H96dY7vtvVhNB7l51LLfncQECADKjg5QUmLZaSAgJo4VhJPYRUNeRE2tDqbfaBmYECuI49VFt1dAcpDIAMxOgy8QA26YoiR5j87hEICfnW6qDpw9cboC5+U3/lLlOEyKYI6F5WgDCvPgNc1xt1+27O9/+o1DL95unxnURrBu+Q45vAxe3rgcP1fJPL7Tms/GaGV6P9Z5+GVinR3jUATge1uqePMNE2zZtXjAFtMqYwgtpWQ0624uBygoaMiAaqZpbMAEGNjMDUO35AtaY+zEwVTADcf0ldgqxKUhOwkRNkRKTlMPHjzCZNeDMjQkNRVUN1PxhAQsJwdXMJAGiK5Kl1BRUzu7m5MrOA1IQYQgMAHnU3i6Zt28ViEUIVn1yuuq7r7t2772IM47gtJccIIQSD2LZ3kUtJw24niIWZkaAfcgi8WnQUIERVs+3ObjZbVW0jC0CMEQx2N5ubyytiWCzcjdfF402Vcsn9kABgvZo1SIBYEQ1QDMpbb70OABwwxkgBRfJ+SEPaAbYAxG6Kh8xMpZTxenRKlUPNGKOq+A+b7XXbtm3brtYL5jURack552EYiCG0YbnsSimPHz9erVbX11fn5+cf+chHhmFYLFtEZA6qallkuAGXHsmSUspZbjZXRATIFLvT0whtCwrdfr9YrF555RWisFwuQ2wELMT2A+8vFMP19fXl1fbFF1/c77f/+//kf4dA73vPq6+//vp3fdf7ddxRYAAFyaAGGJqIGfL11fVusx3HkdHWyxUZSrFhSLFZjsNgoyLEolCKMjUvvfLKmruXXzpD5qZrs9hmt7/cbJLiw4eXX3vz0T/5Z7/0la++kdINAXKIiKwWUC1nzZoRMTIv2rZtW8mljAXIbrY39x/cA8Cby+vlatnGu4ioYEXEnTQJycgLOGaTy5gUrRQQFCKKHLwsKSJpHGulxR3eblfSYojIxCKCim45S4ymABA5BOIq0+SNRcxSMlU1SROrBoWT8sR0qHgxB2u+HeaKGaHPRwNDMHq23uJuS47iJkCoUKmY0+NTwYQJcZLaJ2BmUDOoKlLHzEzPFQGAEUaK85UYOLapBTREmp9WP4sBTRXausERMhw5QNZdDwEAiBig1E3oUBMG/93Eyax/YmYmAGiQDUjNj+EKCBUA6y5qgAhWADMiAqppBHOLj/pK3lqGCP6tIZBfpqEgomopTtE3MwACNw7APg1mAmDISISKIJLMLLqtCFZwI1AMzADNLGIkCmBgCmboLnmK4DZZOMlMm4GKo3Iw4yQJikhRMP8evWQnc9274cbL3cMwNE0TAg156FZdSklVienq6qptlm27uL7ZIWLXddeXV2d3VoE70yIqgbBpQhOCKpQqpARmUDz4UFEV5ECeCEREVZ+GZhYXVaOlmvXVL0hnX1rw1OUUxhxPhuPSnGpxmDh/yzZV1OeURN2MlUVkHMdIRhiIFDENw3B5ef14ff7SXZxdauYyuKpKyczMgdq2bdvIjGAKYhCmguHMqpnmGFYicVBjRFRTM2VGIjQQkWImqtLv9mm/v3/3DnFBb1xEQzTVkvJQ3H2CAhw0isSY7bh7EBWA3HUbfrtxYIES6OTKCDUgnVbNvKqeidKPSaTfGqH02xi/y0rj8/F8TObsztWcd+xaJXTSph5N8koU8tPBMxUTt2Ve1POmVI0EiZAIDKV4hzZ5oOwsD0TU367+dlsJZrrmw68M577Bo9VwAJPPsgCfj+cDfvc8CyPDArVD3mo93IsKc6XBWxc8PK0mbYaubz2981wGnCqBVWAJ1ESEpuPbI4FKsNKnT/BDUmZi2eBRm0n45A/+oImIZNUiJec85rEvJTmNDA0CkiqklErKqjpesMvDuARFjHG1XiwWi5df7mOMqrrbb8ZxbJqm6zpmlHFYr9dd1wUKi0XbNsuUx5ubG7OUztoYWkM0sCyacxIRUXl8cYNkDi8FEZCT0DD2IiKSAYAJQgjMARF3w2b+tN7pkQqPea9WEDEoAXVNiLEhP/HXq7spj8Mw7Ha7nPN6vWyaCEB9v0NEUXVGlZmYQSlChGJFjEFNtahqzjmlIUza6KFpTu6c5ZyWy+WXvvRbAPDxj39cRAI3pRSAst2mYRhEpGmaUopreKRUNpsNMVxdXZ2dnRFwk4qIjGMWEaa43+76fiilbHe9gCFit1ikPt2/e3+/3X3vd3/PT/35n/qNz36mabomtG98/c2T1aJtI4ClcTQz71MahgGBVEXVCFHEikHJVvKYynUuKsa52PUVvP7mozfferRanVyOX18ul0CY8paYkfo29otmsVycfvIHPvZn/8wPf/FLr/3zf/4rv/zLn7m6uQncdPGeRQEAVR3zkFLSYRhyyjmHEAjC/RdfGIYegMKCIKoaExITccBQYZUR0SQSYxaiZyIccuxHlVxSDeOqx3cbGxenNRcbpepoR0SSq4CHmpJBDBERwdSKeAEQEUtJisSETKzh4NkgImDoAoellBpKT1RK8iIelhnvuWbpzIRRqPLXhrXDEgC8D5Zdr99A3IlxEoPBSaLpSMHV80w4O3MykucmfLs4XtioaDj7N06HLCEhuWALVcfSw4ZizHY7ODVCI7SiBlaV92cFUSKcAa3r6CoaVr6fVXeN+kkdpQMlIDJEAJ4+FfnK8ttJSEaikBkQiUUDIs0mCqBqQDjx4xUNqhwlghEhCfQKSoCKxoCe3yKDcUzgOrGAAgYGYmpmgYup1T3Sqj8DIkgWIUU2VRD1fJJyiCVPm6Z5WnreW8wwGIgKiIqIISJToMA2yaKIGAAUKwB6cnLiBphXN5cppfV63XQx53HYj9y0KaVSBkYGopzHk/VSSm7bOO5Hy0llZBDJQx4zd9HUKYfYEAO5sTIUK55LB0Rg8n5sm0SlTRHAVKUmBwiZj8WclKBiwmOW7zFmMDMzPUZJfg946oavh5KaU3Mr8resWlQzAu73+34YdjsNIWBw8R7xrIhINjPVAtAiTsLZfo+r5VAGKmCAJGpFTJgjETEHRGZlAKzHXrBAoJpz6ZFksYwgXPox5X0XGEDUkmriYCGC43Z0Bhx66fNIssUEwI003vlUf/e6iw9v4wz222C/Wn+b4d87YsL5z9/ht0eU1Ipa4eiYr+Ooylcfr84Wz8LU5+P5+BbGnAw6fqRmasCn2a2+hqmH+biIDXA0D2U+a1ANGV36C9y0iRSMUCch5aem7u2q4LNo0OZqYX3C7VfAmnl8vhyej29i/C4xodWT+LAGpuUwLZi5pxUAiWq5z63pn+mBQjioZKMLj00DRGXSeCMinOjVdmTAVl8E8RCITus6xLYxk2jRaxgEWkpSEXKzYz/hVHPOpRRTLOPSe2/UqsiemQyj/cZnvxybYGZ9v3MwUENYta7rvORycrK6d+8eMfS7fUrjyckYGwaAto0xRlRGDKHhzX7Ybjeu6t40TRuaYUwnqzullJSGnPMo0md16u2d9STegOraJBTICMml9xxYBJqz1Jt9r6rEYXVyCgBt28YYcs4Cc1+NIGLJkEcRG9brpYxjP45YRfBzKUWLrFYrItrudghw9+7dxXp9eXX55de+eu+FB6uTs5vN1VjKOA6IViSZWdOEpmlSSma4Xq+NrB9HIEqpXF9vRCRQrNr0FCPzmMVEGUlyEREKLJQsl/3Npmni11/72kc//JFf/7XP/NZv/tYPfP8nmOJ2s08pMLrUAY6aVDXnAkCBOSwbAlWzPOZSjBBTnw2DKKxO7p3dgeXyTsoAu/zCSVmdAID7UCQiPFkvxqyAsYl9c4YPfuTj3/e97//Cj/2x3/iNL/zmF7/42c/dMFGMMbQR2nURyZqdjamGHMOf+bM/8SOf+tT5+eWu37/11luph81mc3FxcXl53ve9mbr6Yts0quqojAHdg0RETk9WdjRKKSmlPKambeeqIIhhLeKpmhIRgjrXhN2ZSAAAGVwEzQIGcq1pQF40OPXj1aqgq14bzZHWLHDCzMNYgNDUCS6IUyFR9RZRe/557rea2N5VB1VVgRBlLjNOMM+yHTc3ar2wtm3rCkeGqb8OEczycaff9CtvcZzRjMywjaDxDztvTVbtHc2gMgd1+giI6HAREQEYSlFVRKaAUzvckcedmgFQLNU8AxTUpqY0cqkaADMgQEVUI1NSLfVz2XSd87433VK3ClEEwElF02V2BB2TKxFRcPkZEbBjixRTNAMzAlRvRzkSpUMRJ0FN5VypSI8wKLqsDvv9F+kRxcxMBBRQrQrJFEPEQBRbZwSgm/SkNDRNYOZ9vyM2g7JYLs7feLRerwBgEDGzrmsBIKW06lqk+N5XX9pvb4b9frlcDftRUu77UdlEJBetZl1gxmSKrOIHiMs56CSUZ4emtZlTJKgs4BjsILELBl7UhaOv+/jUmc6wo66bo9qg9y8ggKqKaL8feUHAc8FZcxYR6/uxXaaWOyIqRQAEwEyRicwspYF7Cy10uQFtMTBoTc2wc9QEDBCQTA/XeXSgIhFwMIOc8l41L5Yx0iJHUhsNokFOuc9lABRmIobYMDNXNIzGxO6/dNTX4SxQfSfgZBMdTe0ZRHcUkOK04o80Bo7+/PgPbwvXeecJwKH96ilMCMdXVcs1IEcVj3etNB6F488iT3hHpug3M57H09/e+MNz357W5/Szw4yOvNdAEdjX4xEa9CcjsPFxXQJv51w8oUk+F612qqubVOvUMAUAVdgJj6R6qnkgHMnGwFPT2Ikq028JnEE6dVKB0btxev+Ajz888+ePzvgWMaHHFc8WtAlMDx5kMB9nAG6xAr4b+w+GM1/Z461b2jM0Y8UKcKYg0qptnNcY59P8NqVrEsmb9SMAIDhNzkzJkAMqMscFNyopowkBMzMBxM4rMSHQPdWDqYCZOD482W+JSERSHmqtwN2rhURkyKOkMpTd5aYvpYzjYGYhPjZR1dJ1XdM0rnT/3ve9uFwuVVVkH262Z2dnL7+8bpouGcauO1vfBVRHBU7+WZzg7GJPjIY4puLdOCFQC22POko2G7SIiAx7LqW0bbtarUIIABmAiOKiWyNizrmU7DcrJ5GS+/GJf1Jv/ar5MOJUJKWdF2T3fX///v3Pfe7z5+cXH/nw95yfX4ZA280+5YGoAslSSs6Sc559yfb7PREVke1mXzU5mbuuQ8bFYvH40ZOby6uu65w7wUhaBFRSyl2MTx4+Wq66Ljaf/bVf/56PfrRtYxpzyXmxWKzXJzFGJxB3HeactQiiRWYV6FogCoj8xltvx6YRxfX6/oNXXrh77+UQ10PKRFbyEEJghqHfIdPJcr19+KjrlrkfpcD69N4HHpy8cu/7f/ST3/3kycUv/PO3vvSlL33uC59/fP6IQmy6tgmY1LiJCsjNou/7v/Tf/x8AEIgC0faJKxhttzdXFxdP3n7rjddf//r5xeOby6txHLfb7bbfl6yllP2+z/t9w0Gqmg0Scxvisu0EbBxHO2J1ikjKuZTCTUSvGwGamWVx8/W2bUzUNT+JKHIzLc9Dj2J10gMQkTY2MFNDodbWmVk0z+vnqTD61qI/0hSpLNMpU18XM9W4zw/PygAwY6ZJMNSJNOYwV4bRpx9Nb3uYjZXgWt+uitmg/zlOAS44y9LtG5mYQiBAqf7y6LuFu6oBeN7Kz+4ZmIEiqgoIIAYR8Rvibw1qUl0rELGiLCM084YrA5rwsLNDqTLsVXPVvwExEDW3nSJn+hkAVCF/TwARQKx0iWoboMpMBIQR0ACqdcFcoSXopu/X67l1c2QKYCDZb2GDCKZUxBALUZUtce69z7LFovNbQdwCQNvGUlIk9hKlg0C3sgQAQBnTUL9YgLN7Z+v18lOf+lTf95/79c998YtfamJ7slwNQ1qtVqtFm1P/0ot3v/sjH76+Ot/eXLWxSSlZsZxlL+M4jsOQxnGUourtCAbWTKJBIArCNteOok/qAsTsVBACI1W9fZgdSqZzWsPLd/PJMU1sf5c62aweH2pW9YrU0Mx2u37Vrduma0NkzjL2/Zh2u/6ksTSW2CgzQ8k2+ZoggxQpBYpgKeTaPH7IuWF9CAEgABQDZgYpBtXrVWe9MSJPCQAilJJT3msZAQqxpLFfKBgU1aJaQAVAUSXGGNztSMS5w0gmmiN6zkYrzEJHTepyU0dR5rPlu6fU3ud/2jNPm4qEx7YE9R4f//mUT6m/k+MY1xu55tc8KuHeLkv+rmVOn4/n4zCeOeNwakadjgxnlxjOvHQiMHL1JyKS4kqG03E5bTdco2KqKY1qE++Fepo6s3Telw5XcvAGPAqsbxkGwvETJmRIFRlWLZv5t8+1l56P33lM2jC/41Sp3Tpz+7WZp7bf6Tngs3pOw07ZyXlz92QKGSFomWhoLgUwFw+JPJ/t9C63oxcRED1mOcExJgQX85dZZw4Rg6BHalis6Cgu1kFobdti7fbFMomtE1kpIxG50ULRDAhx2SzC8v7L9xxulpLxWHBSOJfR+X6gNgz7/X6f8ughmgucFtA07MdxFJFHFw+Xy6U/eRzHxWLx4MEDjw8WbbdcLruu8/ArxjaEcLJaGkYKSkRNiIg2DIPkLWODFEU59TnnpKpEgEiLbiX7fcqq215NENFfkxnNbBiGUpKr+eWsiLZPGwBwzl7kSl1gpNaw73MkXq1W/TC89daj17725oOXXn35lVf3/Xjn7qmMCdClqxgIUnYNdB7TKNebUsqu31Pgvh/NbsysCRGnKlDTNN2ibWJYLpfT101JCmGIHLbXV6vFogn8gz/w/T//8z/3m7/5mx/96IeJyKE+ESGwlCJizLRou03alFIKa8OhbReh6czgve/7oAEX4Fdu8oOXXt0Pad+nEEI/5H2fSikGcnFx0XXdyy+/vL26HsMwpJxGWa2uFt2qaZYxNB2Uv/BnP/GnPvWxN9/+oc99/jc/82u//tXXX08Z18uTbJnCQk3/5S//yn/6d/6Tv/63/wMQgLZdv9iuEe6PLwIaRAYoAAYl2bDfbDbn55dPHl9cXl9dX988enJxfX29eXJxc3Pz+PHjq6urYRjMjJmZsOs6TwT4bI4xcnDhjVyKiEgkIqY5AU9aI1ed5BkBQIsUAjNzorC7RjiucmQIR8sUAUy1a5fzEjqEzuZ+ub54qwWuqqopgFaRYU+8zMV9qLV7nYsOCGoWqjoouC2hkcPRoH5Z1T3gUOexSQt08ul2C+8ZNcIxegRQwvrxzUyxugPXDY4Q4aAN65+iGv5hZQWrWkZFEQTGSc3Cc7iTM2Hw/AkA1LqnkQEwBwWeSlIIHv0bKhVANRBUcZsTA1Cr2nKE7jCpVPGtmnagEzVWTUBMzG1NmAMFDohiJppFRUwDLdQUnc5QvxU11UBVNAUBCaoKgqpyLIgCgCpqZsTRmxTJsqRsZsG/TUlWRtQcmYgMSRDUxFSLmKrq2enKv5oQ48nJyfvf//6f+qmf+sQnPvH3/+u//w//4c9+9Suvbbd7AGKkNIxNRDC5e+dE0qbsKbK2XYgUEXHAklLp+37ox1TEnLSBmIqoaimapYjkmca8HbIHOuTEK1c6QyWiqkSGOve5wVSQwqlN3gM8hHqP6xwDmMMmOTpaAECBGNQM27ZtukUICDCaci6SRhYjJso5l6IUiQiKGkN1SVEtAAc1Wu/sBDBVneuBZh4XwiTUBNORZi641/e967n4gjYTNAFQM0WUJvBi0XaLhpkBdfbeUDXRzJpNIyiAFm9LnLXCzcQQDcgdhfWWuF9NtdgtKHjUlwvmijJQ75sCwhFTFN9BKvDWI89U8CZjADA6VAWPcWaFr1bf7oA8D8PJpRPIf5ci4bsPnCfIrQefjz9k49uqLDm1ksCZaofT56hYXTOY5sya6jh/VP5Q9d55Qw8F5z+sPzpvBsGqirKoVFmmqTV3Ou9qM+J0YdOb3IKCRwmUKspyQI8VGdaK5YGGPS+Db/3+PB//Zo3fLn3wDnaFR+DwHX9VOwlVD9oTnpT3gNFLhFNnBwDMihKmM7vLNKuIiJFB5YpOcWT9wyOy6JRb8Tc5AqKI4c23n7gcHxExIDFEDsy07YUAiCjUEgoAAAMjjGSE6no4SkRISExXu6uaQjZDNDcfBYT16SolyjkBADMvz7qX+EVEFMmIOI79OI6IWHIahsFzz/v93kXqeRhSP3zj4eP9vm+aWtJxBlsIoW3bEMLJovVKRdM0y0XbdR2A5pwfvHBvsYAYdRzHvu8RzOuQpSQDjjEq4jBkEdmPxey66zpAdTODxWLBzAXU1DgsVFUNcsFcVIuYWSCi3QgAoEmMU0oXT54Mo3zkox9cLtdEgamNoTSxK5JEcqCmlGQKTddNsJ+IAgBhQMXKK5o82Qoz5nHo+36xWNDEP2KExaJbrE6JXri5udlsrn/oB37gta985Zd+8Rc++pEPSUkKttvtSlYASMn13+HunfX19bV7nZlZ03Rtt1TB9cndz33h80+uNo+fXL711ls3+3/62tfeiN3i05+8e3Z2llICk/3eJJdyj09WL0gpFLuWDI1vLrYA2zsnZ5bzqG+Ukh/cg5d+4vs+9aPf/drr3/iNz3/pq69/4/H5zbbfjYLt8u7f/a/+7l/4cz/96vf8MdgOZdGZAREyErisnwqo4OrkdLk+fenlDwIBh5rME9HtdrvdPpnG+fn5+fn5drd79OjRfr+/ubnZbrfDMKSUxpJF5KTpttstmjUhNhz85IgxppwRsRa3zZgJ1JRR1RAJmFQVAb0lzXvMplDV5SvAVx8vGiRwCdDjrA4eLTycSKQ2ZVIAwPBA2ENEE3EZEKjRO5IZkkvUkJqaAlWtHVRAYp52IoDJ3wARVUWlviwRuvajmRGB1jZlmzEhAMSIZkYlN5PB43xJ/hynCTqBHQEQXeYHiLxfDQFUwBVnzBSLFZpQLjGYNoQ0GcrN+WMkDIQe6fuL1EJMQPD42aY/8SoNIpkWQzQ0qrkwQ7IA5LEDIymqCSACGTl9iRCNkAxMA5gzF4eKgxA50ExIRRBCg6pwLghsZkxIQfxGlayIGIJGDogIWkwyB+66CKCLBmHJbVwtYkDEmjBquhgjN5GQKXDTLtq2VYSm6X70R3/0bH3Sxubf+Xf/XVX4ez/z9//Vv3pt2cVxHC/PH79w/46WfLLotoEClogUQ4isCKwmHCFibCNKMTMkYkPOOZthkYl9oFU9bDvspgmIiAxmaioAxPV8maMlg4mP6enJirPmjKY6qp/+KS7qcHyaIbJZAUBvqDTDYUh53AQqJfWBCYy4aXLOKaWuXSCTlhExmKlB9b2Y14vVGkPNWap684SjKZ+lTBQQIlE9CBHBBEoxMGcNM6IBAodJtBiRAwZiAAAjwpDHwcw4+P7jTRqeHvLsgHd0KAFbpcSpJ2fhGVg4He+3Km8K7ttJeICLz2JCOMDC42pGfYSfKTMeMNtRQKHPtEXd+tX0xR1TjHR2Of7DSZR7Pn7/xxF4c9kVnOe2wVFYjGRoaISTn+3hEPQY8fA6ZggEBjpZUNjUve7Az9NYx0eq/9fgVlJmyi3ehoKHPIsnU56CgjDVCecn4a3nPx/Px+9yvBPww1ooqEaaNrk+TKmN42Tc3FVrnqCf2GNmZsQMJmY0WeMCARqpZDFzWVBX8rO6gqbQDgA8HK3voTX7SUcq7qoaPv+FL7mtlif4CdAV29sYQwhd1y3aLkZ20BUMOYjkJCLM3LSBAmYo45jUNAYOgQlRRJK4poONl4+diYeIhmZAfUo5j0QUYwTGZtE2IQAsT02YuW0Xl+cXpZT7918Qkc1mw8xpLOM4WrHZD1rESimi+uRyb1NrGZjMgjcNh3v379y5cwdU+74XrW2Np+vGeW6V6XpycnZ2ysxj2ntRqGlDbJbMbJBKKdw0VW7ESzqkBEgh5DEtl8vdZnN1vW9CvLzaL5Z3Tk5f2PepbRdjEuImhACF1ThE5tCklGKzaNplCCHnsVssRY0CK4galrE4HG2bZhzHfhx2u03XNQ7tVFVM1+t1vx9ijAZ65/Rks73+2Mc/8uT8G1/84hdeffVVZDKzK7kynaT8RJByLrmJkRC3+2FMRRQAQ39x/jN//+/9xue+vDpdP7nY8sXla69/fbvp33P20x//+F3E1guV+/3u0cMNIqZhLKUwYNd1ViAQg3JJCcNVzsN+GKWAGN+7E/7kp77vx370hze9/NaX3/jaG4+vr/tHFxd/53/1H//H/8v/dfPKe4yAEFz6ou7qxNAQ5AzMgHFaUQhggsinJ6cn69OXHnwIEebMour1+ZNhGG5ubi4uLh49evT222+//fbb19fXv/Vbv0lE++22QWaiPCYAQIPlYhFixMoKLl74B1Eto09OD0NdCzMYalFiZF8wiAYO0cBEyQzUvPY2Z1lmQacZehmZqg7+EQ08vepPMFdMRQTvoCciIrcaJ69XgCAgGBqimiGQFUC3vnZ0o57ygcCtqk5yiAhGAqYqjDwV/urhXVU6JTsuHabKjC9tXxTs+5ALcSIiYCQ4NJ4xEoAZIjBRnWBqhcBLYYRARM1c80EwVA8PgICpxrPsFUK/YIKJxAlCEGFS9ldBVE8GgwKoFjQwohhN0RAhMBih+38woaqiimadVVtdhKTsL6waN2CgiIhKJuTSTZUE7oDe/9lEIAIwEjZv3Wxjg4ivvnw3l9ENchDx3r07pRQOdGfZmJkhMXPTLhaLVdMtQmi2u34sstvu+zG98p73/tt/+d/5+m/91pc+/8Xv+shHf+1Xf/21176+3+6GYehi08Zmt705XQVGQ1C0AkZkkNNgAplqSRi1gKioqcvrEiNCE1iBzeJM43x0MRIRcQAgVS2sUiyYzR6icwxnZmh+fBwjk8pvtuq07p2ZR2HfNI7rdar6+Mk5kwYSK2NgTWMiot1uCLweU0pFOyDnfakVMG8aUrUiUiolXO3YeBoRCUmNAYWoamcT+RyDeoBZWYRTwmwqoKgCIgaiIMqe/ck5J3FX+lK0a2NKycxCiMDsW3rNg6gC+P9wuht6FCEqAJCRIkzthVQZ3zMre7obAIQVuXno/CwmPLY9PA4FnqoQ6q0vxdfmrF2FczHw+L/PMlqPL+wdY157Hgo/H7/twGfJxnakMur6Xnh0QHskCgai094+9Xcgsk66iTa5H6lU209EBApUyZy+/yOi9xf4e/kfTlgRqPoN2vR8PC4YwmQ6j0eUUfCjqf7wPDXyfHznB067sU3lQbq91VciJ5jMLbVTBmRSfccpTMIqIa6qFBgUULX20xgAGIKJFS/pmaEWKSJ+iqrIjApngmhN0zDM8g01llMN+1FhHBETT/Rux5fepEjTMeyDEU/vtXPvkCsouN+01+u8eYmIQnBHAGyb4NcAYKpUsAAAYFRzmYSgKmMuzGyG+2EYh7JcrZk55VyKxqYLxG3DJyfgi5mInFZXP55hKaWUVLFiTikl18UZinzj0RMDcQScc7ZSculdn8NlLU9OTu7cudN1Xc6j3zVXpmmapm3bGCMHuHv37mKxQsQ2RCJCNWI4PQmI2Dbr3W7X77fnF5sHDx7EZrUd8m5MZrP4nvjdSHlYrZYPn9zcu3fHiHaDFmBqFt1ynXMe930aRyuy3W6bEAMzEV1eX1Hgpmn2+71qaZrm+voytivOnFJarrrr6+s7d05DCN949PaHPvLBx48fmxlRiDESxyGNRHR9cxVjrMJ9QByazW6biiyWpy+956XLzfbJ+VUpsFiF7/rAB2PX3n/w6n7UtouQNWczbC8udgbing1KtN+lUgoTmeFyuZRg7aLhGMYsOStCiM0yNidnfXn5hbv5h+NXv/b21994+/L66r/4T//OX/+P/hfG9xEx5dw0wavkHBhEoWmnheT/RfMeoSJTahJAve0HAODspQdnAC8d+grMI9M3Xvvq/+R//Lff+noKCmfL9V/+7/2l7X73xttvXd1cX++2qWROqd/tI3Ek3u/3S+4cDs2VdF80TYzemMqAgJhLJoAQggUWqWaLfpjlnC0LRTosFO/WUwGFhmfr7YmlHQIz92lkM1UtKk57ZWZkxsJGTGguQJpKcVZpKmW1WKSU2thoLsioRZh5GMZqzF25ry53EwI3ZiaaVZ36Xe+TltnHbDZYA5ibkudYfGpQbIJD3ACutsKBDBBxzCOjjeMYG46xBdCmiczcYHz55ZdLKY/PnwwyglpkTqlkSe1i2UAYx9FjCa8cllz1hEHUQEXETNqmA1AxAZOAgSctVgBommRmqGaWCBFbl/zR/X7PxE72TiUDgMWAFLp2Q0Sr1UpVVDMFRsTlsvPPy01s28Y3rhhj27ar9k7f93fu3Dk9vZNSOjk5OTs72+/3aLDf70Vz0zQi+ebmJg2ZGUC3oCgqWSGnLvXbMetmu7/e7BHio8cXZ/fuS4Kf/bs/8/Abjz772c+u75/93M/93NtvPzw7uYfIu91uuWhFNI8pxhAImSCQjsO2izE0IYuKiqoSWGREAhWTnMSmAtFEdvUSmU/AGJiIpRhARjTCoNPwfMTc+80hHDFVwDGbgbVt64m2Ump7LaKJ5NmlAqtAGSNgKWqG/X407S33XUeMBBZ2+zQMBTECoGe7QggmCmjjWH0sc84lB0JEZMgFmEvRYUgBcxODmRUpMmZTEFEVGMc9cmdmMUYk1q0NVsZRVdG9QyhYLq6zghzb09Vp4KaJC8aoUpM1fd+DSO1i4qCW/RwGAFAFdOVatwV05W8vXxsDSNWHKlPBwc/kSqklnATinHfqUM08bBVEgENyl+AW+RO8JOl/gohghKgwv3vtET2UNGql/Wmy6DEmrJTgubBoIIS+I9X3xW8uIK6EwOfQ8d+4Ub9xrbQOn3Se4awN1jUzigiEWswbFbyBxQMqYgYKfgzqzEkhAkTRRMjTDo+u9IbAagpgCASIdgRHsQaBM0alaR3VUrlzPeaewyr/Vn87LdMqwkRHk/+340s/H8/H7zgm5ReeNPx00nEgRFBQBHYFdDjCfjDVAO3A5/J2G1ERUKU5deF99SKHvkFTNyS0WYGPCJy2OUUFvjj95+PBzADqOixzYEBEQc3VaY7ODAOo/rp+qYqVBgOIeLnbeAlx6laq4HCxWMyhZIwxxujB2b2u8+c4boyRY4zMbKoF3QMAEAOAexBgkmy5REA0dhQ3aPEoJ4QYOZhSqix2osCKEpvYYDNBVlNVk+L6H1aklCyluFdEqZ11LCJ93+eclejiZieXN9fX1zCVd7yi6Z/LzE5OTrquC8RN0zAaIjYhrpfdycnJ9mYjkgkxCzXdycPHV0hpzqA7BgghhABmfHU9Ill+fBVCQLTNNoW4utnsVVWKiJiKgUgpykhZDAPf7La60aZpiOh6uxOR2CQvZ425U9X90N+5d/fzn//s/fv3H7z8kplJMWJWVXcB2e/3zAyGqWQzjKENbUcYh2H/kz/5Z//UT/yZy+ub//y/+L/euXP3r/x3/uJqdVL666qtwoIBNZeh3+/32yaENoYYo5RSxsTMsQ1Ai2KNghpB12FgzVlKHrRIE5oY4joEev/pRz/0ohjs+vG1f/H3vuvP/Q/3++1ytQAQQGAKw9h3bWdTYl4BjvORPFcFHa3NbXuVNVl1WWBip7zvIx/+m3/zb/6H/9P/2d3VSdd1f+Nv/I340kt2c3Wz237jyfmTy4vzy4uLJ+f9Znt9cfn44aNdv/dK43a7zY7kjUxxsViWWADAb77k7DIqPWSnwFkRRQ0hLJoW205KUVUpRQAIkMitNUih2OQJUDUhwYNfJLdPUY2mvk8gUYydkBRzn3ENCIUU0dq2BYCuab1OGIixwfkRAKiiLlbn8Gazm1co0dzhCEfG1rfoZCklOKr/wGRdWJIB1EwSQ5gQr4NbaNuulKysiHZycvKn/tSnX76//t7v/ePve9/7Hj169LM/+7P//F/8wuNH5/funVxd3Qz9wBya2IhIyUqITNx2LUDJORPRctEihpyzmbRtq1bMnK7pbgHIzP3+oW8mXYwheKHMEPFk1TFzjA3HEB3btW3TNJFfdkDon5eZRdV1jwEAGUIIiGhQfUfb8KKIvPzyyy+//B4tst/vt9ttTvvXvvLV/X4/jj0imuRxHAGsaZoQkpmlVMYxiVIW2Nz0V9e7zbZ/6aVXm/YkD/lXf+XX/tWXvnZ+fnl1dUNLfPz4PIRmHMehH9sYAWi/G++sO1ADVJFsREQG6K7z4v9z9R8VKEVSUeaIaOheCk6ZYCainEfPOBC5Nwc4HbmWm7H+DIiGqETeqm5Wc+qO0s1MtEx1wplE6pznuh6pasGbT+mSNRdjYIWgBmiaig193u72bduoQslKgYlInOFM5ml/VFABKd43SDCVo33LByCDggBZ8/z4JO1rABBChyAIgsAhNMwsJeec2WviWcYxj2MxY4QIFur1C6oqqYoJsjqanHo2TE0Bi7uMTIe3F64BjNBUEabOvXrfcOrKuG1dOHU61aLiVCeEQ8KYMRwKgAc6HFUtjarXf/ya895odovwdjyeLfo9pVZKv9Pzn4/nYx7zEYzz/z818byK4AbIU3veJG9G5O7b/sxaJwSeTL7ndiwScGFBp8zR9EZPtQXOfYO3ACEAgFUJbpyVSA0BK10cfM0+TTE9/ozPMeHz8R0ZBLcNbK0qLaB3roCjqmNaaT3Onp6BiDg1+XvbPE7+wuICLaA2ybkDmedUbgXJxGyT55aPuVYP03nqj9cKoSGbGRlNunzTmqdgZt4oaEZeZwMwyw6ZJtG56YMR1aZ/pxo6GimlvGUaQmjaGEKYHdUcEblMiz9/uVzGGM2Yw5hLbgRjjEYRCDhQE7vdbgcEyEwKpYiqMscQeLBtjR4ACCZ1e7VAjCAYsYkNVUnJMzPTEnEqnnq4k3Mex/HBK7Ubp/IzpWbQx90gMgAMasVE09iXUtoQSikvvnAvD+O9e3dO1uvtrr+8uvn662+eni3cntA/ZtN0bYsxAiKWUk5OVinpMCYienK+Gcey2Q5ERECAjWDRnEYpZBDaxTCWIKCq+z7FGCmQAlqu4DwVWa1WMfK/9SOfevT48Wtfe/297/sAEfX9KIBFS+AGUdOwLaJmqKpgZKgkwC3lnE8a3g+7u2eneey75oW7d07Oz8/P1o2qFElN06y6JTMPu9hsIISgJZc0imZuqWlYKe/768cXvfOHl11HAMN+6PudqrpryOrkLNLYttItF/bk+vXXfmn7//vgH/+hH4LcFymhWw79GNo2g90ykL21uKaqNR6xTKt/xGHZOG1PVVPf/4W//Jf+7//l/+3zv/rroPZPfu7nfvIv/kW8c+fs3r2zD7z/46ZAlZYGYnazOb+6vL6+fvjw4cOHD6/OLy4uLp48eXJ9edX3/Xa73Ww2br+pABQCM6+aZs6piIjmklJKKa2WS5st6dXmCcaRnPtJkxG2h++B2A9EIQIF72UCMSKX6DZA9b+KYJ6j6PvdsluAFoQCZoFZVQNPXfnOOQckDMh0MoniKFb1lKmdA8Bb9uDW3VOdDCSgbitT6UMBAEsBI4Dk5m2EiGiMtlotRCSEjli7rvvkJz/5vR++u16v73/0/a++evorv/KPP/l9HwH4SLtcfv1rb3zpS186P39i1hFxbJgpmhW2XlVBEwChLQmJsQjKdpecst4tWmYm8nRSXDbvdY/TZbdomoZrucPW67V/KVXzMwYXq4xwAgBt26q6UQOmlChwzY0VG0qeVIKUiC7Pv7Hb7cwsxnYYhquLy2FIRHTv3p1A3LbtYtF2XbdYLBCBmYkGEUMaDTgnEysUYoxN20I/lpT3Rejttx6n1x/ud30IYXuxbds2hrbvh5zzom2JOARHp+S5KmWIkQhBtcwQndAUsO5RWcyQmUlNodIfgllgLimhmUzJLAMBQAJGJODalucIhwgioIBCZQc7vxidQaqqNSuA6nTaeXuvh82UwTFVQ8hZchZjIEAVM8CcZbsfNjd980InSmPOi9AQhWSJDslH0+rsWnIWjkFS7wrSORWwhGiiRaGYeV6/5hyx1pgFjJBYRVSBiAiDqwAzEVdJWTTFJrREwfvPD+BnQr+mCKCuWwvqhhqmoOb9i2Y4uy+CABCZ9xM6w/oIFtZKWi3QIdKBjltlP6c63kHB5QiMHV5qJvEqAJgd7Yq3Og+tUtFdj7GWB2/3UHlcjP61EuLkSFj3jAO8fI4Jn493GnNv3vTAkRI+TAeyIYgZg81cG/S0J5Mz36wyluup58R0MKh283XF1Pdy+bRn0OAxJpx53QcKKCLXol/lVLsKjkFd9XQ0w58JM56P5+M7MaYkOwHYlIaoLA/Pwx7CVsVJwtqeegVEIGY0AC0wdSWB5yXNwA2v9IAecUrBQCWjaT2kAIhIJ296m4rzTpCEmnk5yF4AQED0XCqYeqqGvHyBVAWBGZ1QVC96HPySnl5RNFVyvM6ZkqlazrqXgZmJRvSz6HBN6JxXAGBmZ5wCwJ17Dva46zoGVIWu61arFTNn5VYKonctYyAwA/GGNEKjWRdBDUyLfw0KAFwOeW4tlboAQN482XbWLcps1eCfxatwqmpGoNUuQkve7TYl5zaGy6tzJOZGQ9ulnHf74atf/erV1dU+74mIOTAzYUWGzHx6esrMd+7cWSxbd1e/2RUAfvPti7Zt14tl1zUxttyeMCARpHHcbG6WsW0WzfX15Vhyu+h2u8HJq00TkhCOxcbxwYMHL736/q/8qy+/+fbjl156CTmaQtssXUQH9KQi2yIppZxk1/ewH8/O7m63291me//F7uWX7p+sljLuNfW7/T6lpEWaJpTVatG2BtI0MTac+tTLkNOAiKnwdiellBIepHFkRjvhrolS1ARMdci7knviMpZ0dfN2jFER2q776md/+f4yvPI93xOQIA9d1/Ypx3apR3v08X5/IDtOBwjU4MVhk02VemAwZFJNWvL/6N//9//Dv/0f5Jz/wT/4Bz/5V/4K9CMEtMjQ+LevZAxIuF6/cOf0BYAPwyFND33f73Zvv/32frO9vr6+vr6+uLj4xltvP3r0aLvdvnH+KA/jMAwgEpsunAQ+kNAqRRuOMjLbcevrjZldn8mhCEHtrwAwImRyx0wqfZ4+uDIYOJmOrG2jFGobIIiRVEthAkKX43QWuGVVFSuaNSMzm7pvXV3UlUgg2az6DQJABYjT+kW85U8IAJPvn5du/FpAAU5Wa5HsJ2vOedztck7/9J/+04s34na7/emf/unlevWbn/vllNILLz1A1Xt3QtdKDMLYE8RusT45WapC6Z8sFosYFyGEZtE1TdN1zWq1Ck1smma9Xq/X69i2fvHMnPqrMLESQFFESsq+VHMWlw4KTYwxmlkxzduuFB1GGIYkxQzR8R5QzQRt9/vdftv3/Tj2IoJMOeftps8JYoSubV988cX3vfd9d+7cWbTtYrHggKqax2EcR6en5yTDMPZDVgU1BmhCy/dX7eXV9uHVk7bZbfohhBjbrmkaAQsYShYA6rolAuckbbtAZubQxA4I1YxDVFUzYW79OAAjQFMBEfUGTjM0y4CsAqYoxTKKece5iEJlgyCwgxMyBPDYS+ddTtWVHWYgoWgAKkTB0xI+J8gpYVhl5auTis8iAwNRNRFjQjMUAQQtgsNQ+n5UIBWTsTRNBHRVp9qM6nmTnGUcxzLmEsI4DDlLKZpzVgmAqpYNi2tiAdAsdeMnXykSET35qKqmBVRCCJIzAICa10ubpgFDr99TzWkwIPtP5v5OE9fg+GyGukz87JzyvIpkniuZdq2K0/Bo08LbmJBv1Qnr9vZUpcX7qGbC23EzIVQ2rz/lWDYGDEAmNUV4pvPwm4d53+rzn49/U8ah4RVhyuZ4594UziEREkABIlQwF7JyKW1VJ7QDMk7Ek1mNDQIBIHk9kJySwGBuFEqTT+BxtPksRMQp1UIIhIBHQjKAtcdkOt2+LbPN5+P5+KZHzeLhoc2ewCUhnqkcVsRYMeFRedB1F6dGXM+bggoAYBFQrWL1gBDUlECNCPxM8he4pT52NGYcJCI4rWQ7sisLWZKvE7Mqo01ESigyJafBACo9CAA4RoADrpvAJXqZyCOMop5jRcOg2IoYugcYTUI3Kuv12hCLp6IVcxIqAgDbb1y5KV/TNGYmxbqmWa1Wfd9XKb8YmTlg9M6f5syYOXKtyEVGRCSAid4ZCEwACGuHiFEtKaiqFB1l9I0pGs6AsDZDAhliE1syCFE5YGTK5T6qLLrG9Lu2242WJDmN4/ieV18updy5d3eUYrVlUUpxp3UQha++9lYIIX3payenK2aOkUWkbduf//lfXCwWZyenp6enJ6vFou3atg2MIrJcLmQvut2E0MUYhyxZY7RgagwxcJvU9tv9zfbri9Udbhdf/K0vt92q67q+H7vOdttxvV4juXGYiZhpENW+Tymlm5s9EDmz9yMf+nCMcXNzZVpkzCBiImOf8jhssAKSSBwCNU1T8rjf70Vy5SJKGfqEJqQmXQQVE0WEyCGNw+b6hgIVKVkFEVPOw/j2P/tHP/Pj4+7lT3wSsIGcYmgAptrUFDHRBOZv9w9M072GQV45nLtrwMyatpWUf+RTP/pDP/zDX/jMr3/mM5/5B/+P/+rf/qt/FcqITRBnXyEFZ3ZTACkAFZwhM4QAy9VisfzQiw/qyvQ3HYb9djsMw2bcXV1dPXr48PHjxy55+uTR45ubm2G3TykNw5DGsZSDRH5YRIcrqJ6nNwISkRijeDSsIkWKFq/gtWEZEBVEzBAkECCiokreLztK43bRdl2Lo2YmCcQFnGpsxFBnP7AijeMIhpWgZnQwOp0yuPMVeoLWDmpv9Rf+/9m3IWQkCkghhEAR0Z48eRIC7UAXiyYGcvD2xS9+8b1nDzb73fn526vV+9/7nhcfPX589eRhCM3ZnXt//Hs+Fv9EgyHud2mxWN2/9wIznyzH5XJZTT4IiahddOvV6fV2Y1a1IwEg5zykpJoAu5Ql7dIwDOO+H4Yhj8XVp1JKWQoih6apgFBluKRxHEVht+tLKURh1w8icr3ZeqHMqFIeiAggXG2vTk8a5sXJ3e7ll166f++Fs7OzF+/fV1Ui3A15t7/Z7XZlHLwyDAyllHHMY04qAMBZTMT6YZ9TMYxZMYuMYmY2FCmYmE3VAocmdiq5pBRYEdgQY9sgsqICccl5whKI6ErERGSIagimoGJFCrNnD0kBsoivaDNTK2ZIBkSGbiAJ5hVAqOQvRQRiNJ3PCRcomxdZ3ScPUmWIJl6zwqMoEaaUhAKwAWXJgRAAc5Khz2lUCihQhpQjHwQhavnOUEQlSUqFm2LW+uFU3SlNzVwvt+Y1jtOZPkU5ICKqFi0ikg00xjDkvVrOeQRQT4KICJNHp1VQBxAZGIARJ+kXNK/HeSkdEV1fG9Ud0zz9hEigpmSszyiFWtXKx6nm5uaZeBun1YIIPD1uVRenj3hoc6psu0qGe6pjEN6JCwqHrxPnS4LbrhVPO48/H8/H7XFrUine+renGtnNZl09GFSfajc1mmQADn8GnoSyOQU51bcnFZkjniccVfVp+tcxGiREOionHrONChzFwe8+nvNFn4/v1KjAD2dW6KQ7isCITku5TVGenegB3NLCRFQziJKLBJjMyRcg760FXwYoWiRzneGVC+b5FQAopThl1P921gvw48D/eYzmQkoDESGymxcDIFlgpelJbhZsZeqjcPKVk1gmBQ1ExL7PUGU1ankTkQCoFJ2kchC9d1hERG7G3l8/ICEi6VygUAAKSJZRFUqRPg3bUcwMtALZWh/wkOIt17CpggqM4J2Ei8UiBm5DbJoQY2xCjDESg+BmrtrhVG4ygEW3QBFwIhmiVbNss9HtCgoWbCObiFq2QQJhCEEJdrsNE7z3A+9NaYwxCkZVleLMQHMhBDPb7XpEfPLkSdvGYRhSHsw4WEiZJed+d/Xk8QYRyb8808Vi8alP/cjNzfVvfvHzq/W66zrR/NJLLz253O73+7aLp6v1YtGplWG/e+mll1bru08ePd7uM4XF0Gcw3u/3uVhO/RxdIRNRCAwFLecMIKjwxtdef+HuvaaNu81NSZkxTwEZiuZi5vn1pNliBEDCpm3cDxrNbDfurPQl5+u06UOk6kNkFElVkwA3kUPHsRHV3XYvu0erszu/9PP/3x8s6dVP/ihADBQ2275dL49ZINNK0iOy1q0FZ9OvD2sKgAC3/f50sQKQv/bX/tp/9Jn/+Xvf//5f+IVf/PH/1p87fe8rhpABs5VIQQG0FDbApvbQOoeaZ9tAACjFigAAxgjLbrnolgD3IL/fG6uYwcByvry83F7ffOMb30jj6Kqnjx8+evjw4fn5+Xa7TfvdOI7DMFgRn1BOS3a5QJ/DjgzdNoNrEF5UiqKCmVhRk5/8yT/znlde+vVf/Uwaeslpe2XjsAeQk8VpKSUlV24qgIwkBBRIDTkSI5CCOcvATBGbGQrOYjNmpjQnbQ3wSEQOFe0QiJuih9Jdu1gsO8m9Sz52i/b09BS07Da79736an+ze/0rX/sT3/PH2+9vHz8+x8AAhBSb2KWkV9cbIu7aZSlKcqmj7fu+HwdVHXLKOSOFpmuHYejHnMUbgNOQk4hoab2yl/qUUpJcsfd+33sl3BCYoiebReS0eyHnbIr7oTfDtumGXFRhuXqhfvCqmwWKICLrO9YtlwBEHIzXu0QXX3v0xS+9QURgWkpJQ59z9lYxMxlT5WOLqqqpairq5bvFcr1er4m4USlZSykp5yJpuWxiaNx3HgGYAxEm0VIUORihqGWRMadALEVUIIuqq6ooiKEZgqG4Pwc4PQRUVEUdCXug7/JGAIgzmXn6uiv5BICYar1YfbcnRCMNpRSYBGAnSoWn8yoEcpDqt44Qi4IUs4joFGVEUxxz3u+HzWaLYUkRcs5zqzAYuPMQK5FSKToOGbkqpsKB+oITGeAoMeRTEQAAmKlpmkC9gQAUtSKSOXKMjGhFkpmUklSLgTZNtFyLq15/MDBwMvG8tdQjD73ZA8kDSvAbOSWsnhpPIbFZh8aeqgc+o/sy9xw+S92c/egrEn7qLZ9SeZmAX2XDP0scfT6ej29rHCa2vstUQlezQAIjT6nj1LsOiAhMPIleHL9sZX3PDhPeJ+WlhNk94oAGdeoqvP041bK6L1BggxkZgn1TUBCeo8Hn4zs17MCrOkA+m6QQAQBgVgvziftOlFHAUopKQTVA7yny5yIQgtHckQuTuKbbksEkHgMALrNYiswrwE9zb2Uiqpa2/vxDAUO1OI27NgWrmyvMFXZfhf4HamZqNzOgrGvexbsJAUUNnB6AEx8pLAPcqiVCUHInP4CpL0y8kiZmVjIwsxDl5B/VzKxPY9d14CKKOSMWmz75enkfyWjwW1EQ1CuEZAMHjByYMUxu3USUcBNj7LrOGZU4qb+sViucJEYDN44YJ2mQgDKZyDFatpTSoDkG2m6uS0nNqhMp+34LPWBcz3wGJI7cONhen9wzs3v3H7Rt0/f9MO6vrq5Wq9UHP/C9Hu+WcSg5j+OQ+iGXEQCGUYdRL673b37jXFWR8cnlDgBubm5iwzHyslsQQRPo/OLm+upyux1e//rbJydbMztZiYiEkE2A2pBV9vu9qnZdR0SqoAqr1YoIrq+uwGwcekRctt1mt5nvA1EgorZljKEUkyJugR1jFxsupfR9v2zLogklW0lZdUBjUc1ZIHNsOpAIpTNqNUVDprC4v1QCAbRf+ic//30JPvypnxh3u5P1WZlipUPAZQpmc/76mX0d4enkNgJit1jkkiPg93/605/+9Kc//Sd//GPf+8d+9dd+7UdfuIPLTsB09p8mZKQ+pxACIUFgBgYABVNvUkWiNk6VZZOSSylx0aiaqhBYYMY23nv5wb2XHrzvQ9+FSFV3O5dhv99sNsMwvPmVr2y32/Pz88sn5xfn548ePXr88NH19TUillLcA6OouCy3sIIVEVGzbBndzF5zzvmv/3t/9cf/5I/+X/7z/+y1r345Dft+t7+5vkr9sO1LzjIMQz+kcRzHMQ0plaLEEcFMpBgispPSEUnk0FVsR+3GkVnA5t3BXTUAnH3jjhqoRqCaVBGg73tiDASllNOT1Z07Z6enpx/6rve/cEc/9P6PLdvlo7e/8eTyEpG32+2YMiIPqex3abPtxzGrQk5lGBIMvar2fZ+kBI5FZUjFzAwhZckqqlC0NvkholpT+zOlql9GikQ0DAjWGLVEZCEgEyIy0W4cRIQoFEBiprYNFMwwqYMfMjRmDk1jZCTSLJallL4fVYf9+Jg5jOMoKdcJCYBoZCCaaxNyWPliB7d5UFeEJkOQIQn2MUZu2tBiKaVst6vuhIjBUIlMlJk5EGMpJacsCoAUVIuIJCkhBBHvSAUppmrFC1yK5u4mRkCoCiK1C9RMVD35BsRgSqWIapGDVwEYVE8gmGhaqup1QrzVVFATQ+7yZ0c9hBNhrK47Ncg558zWBST0IE9Eht5SCrtdv1i1baCcRQ0YNYuQQXEV3xCQLafS96OAmYHbirJlcOIMqTuhzL27VhUMi5nb05uZogoHJPI7YOSi95o4oIGKZJHMwWk8hFhzlmBghiIS5q2kmj+ZNzBNn3cur9kBs6FWIwqAd7B5eAdNTp08Bp+q7D31555CNseEtY5t04l8e0wkIZyKfhMUPKr/w/PxfPwuxjuRz+gZEOVaF5UdeiRcwUDoAfLtPwfwkkWVrTpO9zCgP/2oNlirhe84KrN0MpY47uOF385D/Pl4Pn7PBvocttoubtWj6CAZetj/0Y2XZyt5qIDtKIXiRBEzQ1BQlVIAjLG+OE6aoiYqotVt0AycGjq9yOEAre9U/ScOj5gFYiT2oggCkPcfwoQmEb2nBMA7CVFz8cpeDUdmy7sYYy4Vv806/qUUE559CKGWCTkwixYi4hBijP553G47UutIMqUkJc+Z6ZwzIBBWq24zIwYz2++kXowHR8iBiIh3wy4S56BMBJBpKlJKU6losw6qiEip1+xyON524oCwRVwul+5ufHKyWi0aMDHIXYxmcnl5fnZ2sl6vchlVixsAeAZaa9AlKmIGzJxSWa/XImoGTezSWNYrXnQnTZRFp4zGzIwgIkXSfr+/2Q6rk7Mf+9SfenJxXkpCxJvdVlWXJ7ZYLPrt9vL6ZnN91XXNV7/6VWZum/jZz3/BRKsLCNJisTg7XS/XKzO7ublBxPv375+dnTkhtpSiRU5OTpbL5bjfp5RyzqerO9X5MAOyYUBUBKGATYwRAFIeRUQzEsQ2YGh3IQSTMO4h5wKKacRaQ86mRVehMW0uLociRjG8dBofX15AOxRc/Oz/+x/80E5++M//dL/PzTIeIk1zNOhL4XiRHf1sBgA6c6nrc51CFqxPiPi3/tbf+j/9H/6PV9vdz/2Lf/axT/6JZbwf4tITMgWEVCEwxTh7HMmctGFynQ+r2A+RiJuGm0ZQ2U3ZwYrXa1xkJTCogQogQuDu7LQ7OwWz933wg1UYChBEZLu9PL+4ublx18S33377zTfffPjw4fnlhZMet5uLoqggEdiQgAFVRfM//Ed//8UXzn7xl37ha699+Wy5vn/vTtcQY2yWa0Rkiv6d5qIp5TFLzrLvx5ubzc3NdrsfkrdUGYZq71EjRQJwjWQiUjtqUzbH1x6YGhISkPvj+ffQNA0AENEwZJdCefyNhx/7yIc215f/r//nz5RSmra9urrabDYUw8XFlSgghCGXNKoZFMGSlYhiYiLKUqQYBjazIoaBiygyMUcgEoulFAEjIqNJtACAiAkIkAFCCF2NQuoToOIaHgJzCFERiEJsgxKqQogtTlzZECg0ERFF8s1+V0pBBUTeDUXLSERtXLhwC5owB3AOBCsTZWmL1kRJ5QYjAGFseMzFcIygXdchERgYGXPYbLYIfHJyN7ahlKyazAqRb+NERGqogKZu90iq4OFXFbecQjHCgISG5PYQ/nVIKaJARCE4fq7+N0TBjrDBfACY4pxTRPN+Osa6w7MZ+D7ve7Kqeqe3F0XNqkclgKmULFFV4SCeZCK5qKaUVJU5FhnVVFEsSySuj4cADF71FRAzGMcx5xyxOCJjApxsQt1+loBLKUYyKTRJLqNHoszojvclDUgsIu5QLyL+slSHAUVAQuD5MISpVX4+lWsJ4tYGZDbVS58ZT9EvdfIb9PriO2t7Hr3UrdesFZUDrtNK6EU7ep134IhOV6rfdLvUu7/I8/F8/E7Dt2JHgwCGR8PMQAVhypPWP7iF0HyPrsEheKsyAbDMzroAcEi7+LhNIq3/wNu/ndua/FffJCx8vhaej+/k8CMYAI4PbpwksieyND7bvs4hEJoVQQNHRioKAKRaci6lAJgRc0Ai8p55Py+cpwRwOE+OGDcH7Jdzds01f84BECotSxEAYCZiQgbnTIIxT20kdhRhufi+xwrFU/mIiNj3MvuwmUEp/ofRRhM0RaEp/Zytxi6gRu5378rvZqq6WrH/tmsWvFijN4ccAhGrKWJPEqvaCeectXY6oUhJkkFNWAYAEEBF4qqoQQYoHQCAAhT/YhgxAIAUQ0FIBdE/IKlqKaUJRAgNE0GBkpYNr7sGQe+enj148CDp6u0n5WbMzaLtVu/JfV4phtAGRnJHbySM7f+fvT+P1iXL7gKx3977RMT3fXd6L/NlVmZlSaUSIAkJoQEECA0tBAipQWDT0G5sEDQ9iG4Qg5fbba/lpnvRbbFML2SW7abbggXNYLAXlhCT3FoFViNAlEAlqVSaqlRVqpKqcn7DvfcbIuKcvbf/2Cfii3vfy5LADK3KPCvXy3vjxhdfxIkz7N/ev/3bKbWHPvNZ6gmH3e583aIcvOztQNd0p1kRpXIovasn6YDkhbqT0zbpauXOdke6dvXc7iBPyQn7g7BNx6GMJdfIqto4jqZ6eflw3TW5lPsvPWiappRLQ7ter9frjtjNykdfvNycrTebzWE8rNdrcydJzz777NPPPOMt7fsDbUegEVmJEADK1gAbsr7f52GUxOtuVUnNqv2wb7bd+fm5Wh5GAXuzTrr2q/uvARj6h6WUk3y12WyGYej3BxG5z/dGHXE4NO3J6Zh/5N3/r9Xulc//jb8FW6A7gQKp8dRk4hFg8HoiXFXDczHVcCSKxKwCgQSsWjxRm+Tpz/2F8tTZ//m/+7+cbM6+7S/+ld//n/xvbLf3xNx2SmJJDtD1ZMPd3F6mXYFpCUsN4Nm7CQjLvH04npyMY5IRmhwgAsvJ3Xtvu3vP+TMBOMPchmF3vX/48OHl5eVht//oiy9+4sWfuXzw8B/+/f/xox/+0N3Nae57Wun3fPs/+N6/9Y/IPOfxNd9+rLkfM6JJY9N0Xbtq2zaKZ56fdOm0PT3fZBuKPa1u6nQYxkcPry8fXRPJ9fX19fVuHMdxyOOowm23WsGtpUaBIas7te2KQCXbmGzyDZVjBMktszq4ZEizfng1XF9nAr7t2/7eO5+5+7bnnyMSLvz+H3+x7/PrD1/ruo6lIWKibiYNxtWGNK2WTV28lLS4cdRFNI+aE3XiozBkWmTUSmbmltqmAXWkNhJR29a6gnUZlrsAsjklY+JCjAQrhTiIExEJ51zGnLOZ+Vig2diZGFRUcnYdMphhEjHJEEkmLWZlGKeMb9R4G9ydshM4OeugbCguTZcabp66uDPubb1OQY4deCBkwdjwKJ4b37EdUhkBykNOqdtuSytt32czTynBfBgOTlifnUTxiWJQtdFM2hUzjyUPmojInDxb1kxElKRtUs6ZCGly23tV4PQYw2Y2RoF4UyclEk7ipnOpFHcqMIWQTq53IhK4m7uSq7lBXEkLCbddHgctulmtX77aNxcXJ8O4wmocx/UKfb8TpjEbJ3Z2L7uNn7g7shn00J8ctg2hTS0P+VCUG+5Iu5y1z6WoEqukRtTMxMHUjKMO61POOUMBJbLkWsyoZJg3o1J7cq/HvvenPvjTh/VT5TN/2XPjeOmuxJlsgGG12SDnWts6EgDJGAaC27GOqNPCYxKx1loncF48CoN92tsXIDDsZZoKOM1xDDRY++PGrodkUsQGKfg7i2XKFifXROuwKabcRQDsVHyxXi3O5/nOgSgoVw8SyP0JqxhN4Zpb4UZ5cvjorfap03wasUQ8jwyqPB5xFdOQN3bhBLTchOgAe5RTi3i9GlGaCZw06zMBMS18viq4Sp6FfDQ44n7zFDMkAgFCkMlfM+VVLwKPcT1MRNNju1kwYxq98zk/79FgfuzILYfWrSZTSblb5/rNrrDj+fFX2ERy4HrZUrvTCcQKjpz19p/nIX4eNJqzmm4eNA4OCAExsB0Q0OwQdDd3crKQQYRyimJHBAWByKDFzBIxEZMYcjEtUA0xFxgnaYWSFY3TvWjO2jUr1eyqQu4S3DIFAEk60Rt9olXWaORU4Sml5Ef1+4kRN5lZlVHmRpN+I5kdN8IFGaC2GS7Ov84H559poV4Yrbq0QSISKjJxchQspkWFw6Cqxu1F+l/TprZtI+rI0s4U2MolcxXi+LWUMo7jmIfg5pF5t5JbkdP6LuUoXhdfDUBVx9G6RixJQ9aQ9EMeh0MIsxrxo8ur693BKBnxenNCJEylFW6bppEkIk3TpaaT1K1PztpV1zbipWxJuYxNtzYWMAGsTk4wd3WLpGw1NE035GtHZmn2+30/NJIS6UBEzGl90q1qPjfFexn7/vLRHS+5aSS9613sttvtkNZmZlZyjryr4eHDh6++/vpuf80iWQvA65OTtl2pm5nRoE2TTk9Pz09Ou1XDzG2Stm0vzk7Gse+aNjonGI8lm5tebXdRO069GGG1attmvT/sxtEOh6EUmJKZmXPDzeXVHiBOQoaU2n6f3//+91/uxy//rf82xhFNOw59BeRFUzr6whlsONo7eNLqBsDhSRIJUBRqv/+b/sAP/dAPXz54+G3f9m2/8ld/6S//ii8f+n3TJHXLjlbaxQd/lmybf948gxvbDwGR9qDDKJTAzF13tlqdPf1UqOL8ioagBtf3v/frf/f/6n/ZDwe4CuPR1SWRx5BW1NnhhHXrzLuIEDIoNdw0kjo48uZsfX5+2q1XqelWq5N3vfPp9As7Kz6OY9+Pu8P+0cPL+/cfXl3ucy5OUoq5e9u2RBE2ueHYrctZNYy9aRphIatJWGUin770+utDxGo47Ye+uEuTiFOpoiawagrUSw956TarolPFjWfW+sx9dwPAIl5r5pRYl1Q1iiiG6EopZV7HRGS9bpmZiUXE1Yo7k3Rd06UmvqgUMytjyRUQuruzqzs5iTSSADOCWWkSM3MiBliLenFTPi50DjeLVVwS5VwAV3hWZC2dtpvNJnXtZsMyjsMwFFMzJVhiappGcw6tr5yzWaEUrArfHfZhIfV9n9UBuFHf90Rs5kQSpQTnroswfqzSMfJsWvfnn+cOn7gVIKI0BYQdIBJnArE7BQPFQr5dwM5WuSkTgZZJWOImx3F0kyTVUZlzPuzH/a6MA0pmLTwcTK1pusa8wBnOWmgcMPTett6x7HfXfd/nPKhlmIHi7TGzx3ZwJLzMvID4Ntj8OO6eUmrb1LbtIetut3v48OH9Bw8Oe+yutzE2VDUNA0RAHGSaaYcnP9qfyOF5je5alOE2t+hhm6Ad5p0O1d0boZOZtCY315B6nIJK9C+F2PlJLnq81bj/uGH4G3/kreDJW23RgjlS65hFzH0SrHLPC6IajhT0mcA8L063KM1HROfAY6mzPyfYdss6vf3XT/WU2id2DT3hnJ/FnCHYEhO+UY/7W7JUNxtNoPiN2qzJNGUg+AQIQb5gjJQCt9CSISIRielWq0Qxi8OsmMHNiCgscAqhJijm5EBQEEhxE5SJiE0Bw7AK4t8plw8IK78afBAjY6oTMsJlvsghmQOGPl10nvPLL3Z3mYHWYlweFwiQuwcgvPXx+NIAhACGYaBJUUZSxYpElEst/h5HmLlppG2b2MijODXoJO48ETtycKgCIkakUaduiiNxEICqStcV8LDbE3yVCGVk0/Ozk83ZnVF5N5Rdr9lLVt+NlqR176V6BoyNjABPymyO9Xq96tp1J+tG1snvnq37q6HdP5COpHGlkZM0iRIlct9fbVM6IfaiJTXrrMacmqZT7gBYMP3MdMxhnnVdZyUTy7bfbnh17949Yd6cnRdKUw1JqwGX0NzzYmb7oR/6XEwPhyHnLCKX14/6IW93+1fwqrsD3gillJ66c5Fz7rrmzvn5ZrPquo6I8jg0adM0DSfabDbFfL/fjdnbNomsuy4RdW3qpOnKMIxDr8Wu+4dts4IMxIfUrAv4+pVXXnnw6P52/1t+x+/CqmtlHYlBq9Rs+2G1ajA7vSffy7LdWqoIYm5CXFy92Ori7n/wH/wH/6dv/uOrk82f+TN/5rN+8eecP3vv0eWjizt3BBhL36TVG03dpQfoX0TW+Y07lXYFAO5Qm/KSBEzIBwAQOTtpzXPTkCkNw5C6NmtRAO2KCKpQIiLalUzk7CVcPkQu7JLs5LQbct5ut8VUi5NI165Tak/OLlarTdd1MKy67uLigtAcDn3OqjqyIEkCeBhyySOI3Gq5TmYiSRS6RxxLgbgVd1eDukPNXR/0213fp5Tg3Ofs7tS0Ba6KOAsBVyY7PuMoezWDlijEPh/0iR7s7kIz/yFmbS1k5+7ELiLxcywdKaW+J2bumial5Gpmtmq7zWZT3IppgEBVHUuuCsO0EmaH5lLYWJCI3VQbWTExgdzcipbiZomQmBGZmeEKdwREY1cNqZpSNOshaylua1ufd+dV96XkXNSKR1EhcoR/DoCqpiRqoV9qMdfGoahp03Tm1I+ju2Z1kJkha2ESAAGGjzBvsRTPa/v0Nmuej6o+2ThTc6Koi+tu7hZ1HVULFnAiHH1EaJo2pQS4qk5vF0MeDzvfbctwIM0N+UbzCGKhtfrIkBqP8+QW/4mZEpTJyU0tu0VGgBmEPFyb4cubOGaL1Ij5Ecys6JgSBc2h7/fb7bbv+3Hk/X4fu1L4VmIXCU6Ox0YGFhInCseHpHTsk0BON3Ipj0ygaSu3MGTdnXxO2oziH/MSYKjuLTpeuv5swFy0MH72SXXjtqLp3GaaxJy4eMSoN5axI91hudvCZ5Wu2zHAm82mO3kLGb7Zm4fqWF1qhKgANKdCVXrz5NkPTbvpo9XHV3+h40EAwXO2o993dgveHnLLHN05XPMmR4N4Q+voCfbLY31xu4eDOPB4HGwODy6PEBhuoKNr6/E00zdHi+SLmA03Dj7eZsrJspfrhuEGU/JadH7yM8YZBBCYYVEuOKJBA2CT1igMVWzGnGb65LxRElHTNLFx6lSPPT447Xb1+9it7lPMDOYItc32BAJZ3tx945uObtTJhpsB4RKYzucE7YqIQjNhNlNCsn+GmmYWvRlVKAI9jllnXNt2myU6BZASN5LiVoNWF8JxKaVWUvieiKRr03pVjSejGpkMKyFuIPp0NDQsPcRLn5qU3Tk1Z3fvUXfy4MGDB5f7vqiklTMdRncfRYjcUd8TATAv5gLhYTdie1glCMqm5ZOTdz56ePXRj3w8rUga58albbr2pOW1uGw6ud62n/mutw9jee3+q04rJuoHhpTZnmNmabuoxppzZunWZ+3l9dXV9W6z2SdCSmngkhIzi1kpWoioWW/aNmUtzLwpZSyGqk6Ltm39062U0vf94XAYh0POmciF+cHV5XZ75VNOTmIBLOfMtO66ruvai7t3QLbd71frtuu6UK8RkdN144CWllgkNd427WZTStkdBs5DWq1FxJheeenF//5b//Rv+W2/4+4736XDkNYnDL5YdfA8LVXqxMuKzsu2NKycADcRoU76q6uv/A1f893f/d3vfe97f/Inf/Kv//W//ru/8d/brLuoDNOkG4vgG9Gf/v9b1xgLruntOCQRkqAm0Baok+7RtiD7h//gu9frpowjMW+adR5VzQo5pQbExq5wkWYYlSeXHzkzWYJboaLUOJnS2JftdlsK2rZtm9Vrrz48PT1drVbZTIubhRxILkUZxsxguBmzpcREpJZAAIyYuWZkCOCq2YjUndzZPEhxRCiQPGrjPI5927a55JTanDM34jYrMzGIzUznbvHFqicsfmNJwRRnATAH9ol8dk4tGREzSoyfh0HdvUtN0zQhXjV0Y6Q618p1AS7NEDl4FpclcpCzUEcgLSMRa7FIu41FqXrHScMqKmxO5GpKBJCZSZOEGwiXQlnV90PJxmsmZhFZJeGM0Uso/7Dh6upq1QAAc4JzLnl+imNXMLEzM4/FzNTcVT0XFYkinDX3rC4R05I4r72xIi0B4TiOcw/XWeAOKJF4RQs+iVkrEM9NS+6Ge2ROpgnRuynMAVViz87b6+Gw15Kla8+07N3USifUCkkSTg2v193JybpruyRMfkiMVkQS0WAOcivqZFNxT4VHlJAlASAnnzIItEq/uqqO49A0HGZrDI/ICp93ouiBOJ8W1INqWlrNFYyqJ5i7ceGbmAfn0vSRSFhyt6Ope7RS7ejRUgbNdjOOyI0mYRidylpwaOrUdOmfrS2uc6vdWOvmuCXRTb2P5fr6pCzEaXi8BQvfvM3da7WIKAkzsUAhPNmFx5AgEdFRVv82uSw8WVEBOw4SyVRLsNKnb9QVRJwfeblEkyp5jS8s2lyMavnrp3yTf2Zr5WeZxUvL6la77aBadDDfrrj3ZmnRB/NYv9VpjoAz7hR4G6jD2skJNYJEiH3eLRyQcDP32LrYGBoCTk5EidjIwt4A4GRTisIswrG4t5shtxs3NtkYyQyxIZpTpOtg4f5UIT5WJrsRc5y/gBfEshkQ4oYT6DhSaHHO44bI41u13wxrzl8R7n8ATdNEWG++EzMrXrQKN5KqlijGTdSwhJbM0o+uqF86RR0rwywRufs4lGa9atuW3LuEfFgz2cn50wrqMzythY2kIZKi5gaNUKo6EwlFYUZk93W3zlq0aDFrUJiTpXS5223HkQzcqEshiEjf0Do5l+Gwasszzzz18NHVP3nve09OnnFscpbzZ9fMnFJNGOuatm1Tw6JWGNS1qbi46X4YhHgjaTQ1IyGoeS4wFHFNplEFUQ1ZNUnbrarWSBFr29Xq9PzupK9ApgBYsL267vt9KWXMQwRbxnG8vL93yKNtf7V/Nes4DEPTtQACEMKcObWpIZKmac43Z+cXJ31mIioF3JAVTZQTUsmH3YP+L/+FP/fr/s3f9It/+ZeiFJIW44gEcHgMmaon6kYOzc1mBgiSuxILTFcX58jlP/x93/j7/6P/+Kl7d//yX/oLv+QLf8kXfOEXEuj68tHFxcXtCyyv9Un+9s/QHndtgoAA58xRSJcQsQwAhWEZkv7p979n7HfuNAyDqhl4zEVB7A6iQlBnrTkSTD5R+cmMXEjv3rn3zL2Lk9P15YP7H/3oTz/a7wxoujZs7XHMu91uHAtBjDCOJY8ltZ0k0lLUkRppWnIDGVmtnOKT8FMEnMmc3EhdpQ4VOEhFAKSmyTmLpDEXJlIgpeSqYSmE4ClCKpPScnYb1VoHy1XFqYI9oqPbbfI9kx2xXJ79SnFcVdu2KaVkUypkRcdx1FJmAImJnjQviJr7GWoe1yijPJRAzhZqWCJEpqpjqOqhWtVOoQjtxdyLcXJmlrZhdwBZy9XVVdO265OTpklt2woZu3Rc2HC13Q1iYzGhyn5PKYnU4qjOBOVhGCykV8BEwgRnkuqYr4/ME7Nx6TWPLk3EfJMHvLDbpp+r468Zi+Uc4BmzyNkqtajyNuYTd5dcVUuTOHHjrk6OqjZGWfN2u72+3u12h1V34ZZK0b2NIsJcrJWUUtu2q7YRIbWx5IOVDCqJqO0SoWnatSP1o4q4iKB4Tcid7j+gmy2aTzHA+bWKkIgU06ZprKrsMKdk7kVLy4kWSQ3TTuTunsdxuWFF7zFQ7BjBJiKZcF+UiXefS1AphclLNmUkV6dQxDKPSgPVTRx/Dd+8ho5pXUPIHc4VkVYf2WQ0O09fOI3qN2J4HgU56KjgvIgW3hgcbwgO3Z0eq8H4VvuUbzFACOBaXTPGzDQliU2NOc1bWyC9Sa5saRbO43Ai0E11Jux4EHCu0+dG6PqTDGzM2bCLmfWmgIJTW5otP5e5+XOdv1Mn3qpDWb9SHuNvvZk9RgSbQtg2VSHC/GqMwB6BPxOQw6YpEb1X42TkHsWs4MYONXV3hoTcP02V1c1MrdCCrVnvgQQCtprpMwfMYkMspUTpuMivmT+VjqZY4C43WkigxoXIjhYbFsjNF6FG3ESD8w/MfGub4TC8lpFKJq5SVTVRJIimYXDEvQStdPqi43f1fR+e/jQJls4G0OwsD+gY9tAc3owTslVxmlpwItXU55lAa7IehgxT0pLZxXGyXktqh2EApYs7TxfHmNWJG1CTurFk06yqriVisgVujsvtrpSSEhcq3DGlpE7X+0M2ds1AsVyMirgzPLmcrpqhFGrau/fu7Q8wHAA67O1+fzn3duXPihB51wjMW2EyO1l3ChKhdHk9rkREmkYijtekNQAjAyWkzkyzuVGUYFTLhTyJWvQ5VeFTdvcE4W5zGgE9M3dloJTywgsppXTo+1JKn/u+7wHEv+7e9+PQ592uHw6jqnbdg4uLMyIyLynxyckmNQwiaZthLM9/2jub9elf/LPf+htefPmrftP/DOMgJIDGPIq0UHemuTThcfotRlfYVSwAhpy7bmWlPP8Zn/EH/uA3/bE/9l989Vd/9ft+4Ae+6Iu+UMfhzsVFfzis1k0d1YuL/MumOjgQVDSHm5urudfgRgvDOGLNT13cKaWcnp42TbPb7foxEztHYQwzq7lH7kxk5sQGpLoIu7u+9tp9t3ynPx3H0nXri1Np29VmdXp/e1+zulPfD+qWpPpW8lgSEzEXjFB3CtWo4t5ZaK1GDzERVWICUUA1LlZ7jdSHgGQGczYnByuxSyoGJzgfOfazb2heTGzK+J/XkOotmvxE7i6TymiV07jpdVouedMihojYuINEmqZlkBvmxS0Wb5ukPLuWiQuIiGGmw1BUvQxjRRpQYpYEoqhkoMVZREBMxEiStBIfmrZ1d3WoOvF0F+pjyeYOZrNOhFNqN93qpMVJi7ZZkQ8AF1NyLcWapmsaLqV4IDCzYciqyimphyBUC4JJfJGO4xiRutqfi3TuSihgmdHyciVUOLw61JjIolpmzuNY3EkkdU1bOR1GofU5lDxtP8bh9ax5aMQEosj8I/cyjoddf73bXd+52EQl93Ecx3EELDU05laaMbUnyUk1x2bAIBY0ypxS0zZqtHLREstyIDEG2KwiI4uM0umJwEROpZRRi5lNBVgToDLloLpWF2qgaCICSIg8qvdiKghvYewi6qvW+TvvaHLb1oxqKHM1eZ9Go0iKay0F4AjsRz97KCmyLzAhyCZMOLegntpj0ovxGQcBTgvA9rMn+Ny0FW75lecHUSwx5NwJbypj+83d5lF/q6BKXUuPOebVrvPZpe/OkyRNADxMm2wE+qZLs4EIPB1kIvFbzosJSS51/BcKH3wL/r3J0OBjUX167E83++ONWFFv1CKuxeA5AFjFUm776t+kjY5zxOZhTTUpCMHjAszmUB4UtSpucDMt9gy3HD5O2FyRkw0K5ti/uEq9WKS/SS0fBfIbHoFSSqTEYcJTMzgkrt7zJYxMC1fNxIqZC2IsFEvnsg0lvo8QM30JAmu0L5aD6QvaSU4Qi1AhLeQNeFEVLeJO85m+yNbYbDbT6mK+GHabTTen/E2ET2VQyOJP5zuCFQmKal3V3ZuaNXdxA33fiwiIPApR6CTF03C5HrxkLaMwnW/WctJk1Rdffvn6arc+PTOnfT+ktnOj1GpxsIOcOLAOUQt2ghHnPJysur6/Ou3kZNMQEWDUdiB2kbCzzRtGy0jXh30r+rGfefEz3vUCGFdXWxHqe1hTWb5ExOpESkRCfm0mjDL2K5F+3BzGgZn311dl3YUxJA3PFTWY+e7du107RPeuwGoMcFqtk9d6IWMe3TUlEmKHec6llEaSOOWZVWukg3bcSLc5uWjPqGowjONYSjHD0GdXFU5l1CDljuNoZewP1/0w5JwdJecBsKZbvf7aK5uzp87On/pLf/b//sEf+dHf+Q3/7ub5F2aDDKYgIbKwwp60kNn8v9gupEmH3XZ9ssnb3Vf/+l/7Az/w/Z/92Z/93Nuf+0v//V/4hn/3G3wcVs0xQYiOU/bY3oh99XNvn9xJVqk1aXFi6bHeoB8+4xd8Zinl1VdeSdICsJoepjkXNRiTOZkZfAxNEQYbcWIODW9ybC+vh/0BAIzbZqPFHz26dkJxDUBEzl4tabRdAGPruo5WBCCrEvk4MipcNWfiqN5HVCzyoDhsAXcHsRFbYWbkUdVpyOokIXKV3auWHLuqCo4BjWg6mRlHPHPzSPzAdMOhhSlUOMcJby0dMSBporVPXMcFbYEJsSYIi4i0JSyY4AyOg5dSVCt0DBeJo7jDvDgs8cmUWhZQZFopUwN3jhXNKXowvldVw42VknQtr9IKJE2XZMpXU9Vwu2RTN4azwswmE5yZIMwo6mamjlKKRbGF2RSLLMqbIPmWsyyWTXcK7jAtmP8OjGMholXbglNgyXAMT+UfFGrmFtHUxBwBTzAjimcQGwrUmcdseRi3l7tHT4+ncBN2LWXMezOlrIaU2jG1w5oaFmvlbhOodUrAM7NSCJMrRFUlNURRVIPmkGCtShGiaCmNOmrdUWMLmFgnRSMJ3cxElVNquHZdww2YI+CorhQ56SI3xuR0wQqPJ3P1qAIae+1UHCIcw8fBVoU3ZsmfSbl0sZwFGyJOrmtEsLFv6yVGFcQp5xDsrgS2yprnuvEfLx0/aM0YJH6DooW36Bc3lkAPsujPtaDFW+1Tpy2L+y1b3W0IMHM3kcnBOmE8hy9cuEzVUcHHeEhNlI1KqFKxnxPAHsHIRbTpiP2cPfx2kxv0MTT45sQnN+fmcvrSjWPRZlz3RCPnSfNcJ3/ujRSYSVqm9jm9WXsfjseymuwNfoiV3929FnquBmilgxLIffKbOwzOTpj2O3eH1rQXLOwoc+LpHtw9m5pFTkcoxjkIImyqNAGxGRDS7E4OfDQlPiw3sOkJqDLDsVQnm8Qblkfmm4sjeQHAbnjxpwAjTTwcMzNCKCgcszumgvIBJ25FADBRoSKgN/l6vZE0o0RmpoklReZJUlhaWhS51jCk4B2Rw44YWkQA8nF0MiGWJjVMq1XbtDL2g6sNwyDSqJOWwpyGPNoeLq2QC4GnOw+xEAvpi1XKw2hNI9KxYNW17ebEUIxHJ1WHG5O1xA2ZrVZ47cHDz/m8z7r71FMvvfgwsTQNPxq1dtpkolCEkskvzk4MRUmyU1Z0Iodsl/1lBE6Hkvu+J6KTk5OTzdmHP/oSM7cprVarNjU5Zwadnp4yMBdjbNu264hhqrrerIwpE7sjq7h7aMnQBs485qxZVfVw2LVtC0mpbUo/9nkPo9PN6nTTnhkx8ziOSaho7zo2ydTGcdiZqbqNQ766fui5NOvTb/ur/4/3/dP3/v4/8E2f+2v+jUmiz6vWAhmc3yibRg3CULM+l/WqTaenOo7NycaGwzf9oT/4R//of/bw4cNHjx599uf8ol/1lV+JUpZJBrfm9b/UNqkUVgvM4dnU3VfUHR7d/8Hvf+/2+vDcc29/9ODhMAzjMHQpNU3joCGPQ1YH3Gm0IpwJYE6tcJuapmka4cR+6HdGogRTV2PA4WIu5pFGTGbuFAIdZGbNqss5N2hOL043m42hZvM+vD+OJY/jOBa4WjHzYgAiZSxgUMhoOIOEk7dEZKbupOoCUldVFyKQCwhKILNaS5iMYRSJKDRPRgA5ZyymOU1BDPiM5W4wEcZxnBNr54kvIkW9kUREXlRVHa6q5GiaZlbIiC+Nmqj78qqqkjlzYpDCKaFNNAtfKbJPSwRJCJ6EHyrGTTWy3b0izKZyYIPTx2q1bIwWKWIqTCZI5ZCvLrdnG9Fiqt50IipuFOuewkuxok5ESQROq9V6GEvVx9KQdW26FmN/lBH3CS1jsVrObrU5oGao2YDH3gZEqG1Xq9WGmUu2w6Hfj4dSCk9ggAVdxUxM5Ov1KqXkrqUUCBu55mJeNm2vzv142G6vtv2uYWobMbO260xHc3NXtWxUUtN03WrYkjqZ1eKdTBI231B0LJpzUXUWEAlxIsrutZRi/BCPwCJt2xUbRRomV7VSSnhSVbWRlFIyM1PlrhMCipWilkQmCErkxGBmzSXcEBRBz4m6YiGqUYOMC/fELI3o5DSPVQC20J4LbhUdeVUVFvIMAqdfMakU8GMUTQLZwuqySbXUnfhJoug3SKQT+J/4pT5vuI4lHK3f6LPt5xF7fAsTvpna42gwnEFgIrBb5Mm7qaZmhaUFGGoU9bMBIYSmwN0c95tESiIhkCvqq2mEfJwmccXKL32TjMB/rpzAT2rM2Bv8vOzQGeY9lvFSgqwVVKwZ2QjgYLr5iTfJG3pii7WVfN4bbBrmEWKtQ55c4W7uHOFBc8BgDhaQC7NbEEGcAXW3kqHGHkySOstSSlrKjLmMEKLfZJjlSeedKOyi/X6PaZ4uXclpDiBGIyJADLTcUyZwqWZOkABL8wfnc2bGzjIJMJsys/gT9Ej5JrmUblJJw5fv7hHT40l0tGma0An3Wk37SEkNWJjzsB/2IrJarVJKpZSg2JqZlgx2p4p53H0YhsoXbZsolBzHIy+xbVstA9xCWKJp09n66TalBw8eHA77rk05D5DETHk4ANwkyabmBlDASwPUSd3i/i8fjony7nL3zPnqfL0+7LaHfqXIzpkbcGo4tV5kUDvbnOyHRwoqaqenp6tND+P1ejO2Pjv4VZVAkIYJiXD/4fXJqhvUr15/+MxTd9v1pt2cjg8fbfeHzcmJqkDWxPLgst8PcHeBAANjH0ENYSZ6pDryVByyaZquTVHYLaUUeTgi0jWyWq2aYuG6CMJVgosIt92AOOT7nIsDoMOYi6ORhoHiboYkjTTMKA3zZt0QgVyL4blnWY0d8syde8z489/6p7+2333Jl/zK82efRdZxPDSrjpoOgKuRPLZYuccxEU7URgCdUwKMm9Sl9Ht/7+/93/+n/7thGP6b/+v/7Zf+ks/fnJ7qOErbjsPg7t165YCaCj+ZZ/VGXtLpT8vl4MZpqnCHyDETItCgmgpX/NM2rbl95Ec/8K3f+q2vvfryr/s1X/325z/t+vK65G2XmuGwP1+vh2Hww+F01eWcD+P49Nmp2/D88y88/7a3r7o1kVguDx48eO21VzZt6rpuKPlwOJye3X1w/7LbnDElplrzU1WzFjgnEW7b04vztumkSUTUtq20Tcl2OBzM+jna5rMbN0VRFo+5bhZLDpdcC/IRUaIUpB/mJrFMFe0DeEy9EDV5qvNlOmbmBBJeLgvz+60v/egPq8S/pmsDcbmH4gA5oGYiqZgyIrbIWoqItF0bRVCbrg0qwTCOQbksTkRNX4Z1J+ZusFXbXV9fd00rIiRsZqrV1iciLaNbjQpVZ7mbgbpVF5O0FGc+prq5jixNIjaCu49Zscs+QKzPWdv2BGAmGYci3KhZzhnCzEmEHK5qFvniOataP+aYfaYoNs4rJKpMVBV5DgFhALqQLIvX2nVdyHTZpMsaJIK2bYkTgHEsfd/v97txzO6eWESa1DbhMwIw1/9omu5w2JnBCVlLYuqaNadM5MOow2ivvPz603cusFkBKUnKObO03aqR1BBS22zWq9V+dSrSdSuT1DmDkK4u96lZCbdaRpJmTpwoeUipJfBmcypc3LFarcDpcBi6RnIpkchaShFpiEikiQBF3/de1NW47VAUkmQOA5o7gZkNrqrFNHnQRtymkhMzcp7Q55zv5Ih1iQjMYJrKtYWu+AS5AdByebF5rXAKmVNheBSrWni+Jvb0AqfFeBO4Q2pmChng5KiZyTOvs37jFE48XjZCi74MFdDkooqbIbIpLOPzc88ItirTvNmIeW+yVkfy8SVTuGl8HmHCQmmeClQzco8Jq6jiagw4Qar37FizgAAnkpCQiQmGqQjh4iIAuLqsQHMpwqUmzc32qQ9JFhVKCVg4neYTvJLRK78kSAtPsmPsCGOqoUJwrwBm2jJcgXl9SfNe/i/bgf4/wfa4nrOjqqBPQcJlSNAc7l4AkzpvnCL2VimMGhE7qHlRShwBN1erYcGipRTkitxsktsMtEKAaiTPM4CIHIYr02xZzKIAmJQ+pwpOk/vG3ZNNcXau9zQjtOMrdq+5D0QUF1mWtsDkbpwh5dE/NIG0CC3yIsRXIeV0kfmGgn5GE3/JJkG/uFrNSKSjrPwMwGcpBaLWJWIlTVwzfL0AlJhTN6PhGf6BaUahUaXRakalrxIhSLQsZycnbZO0jCUPTGQEhhOcmAASIhIacwEZiGlikk0onOJ5jQpZplIIzjBVVwSvTJGN2ckaMX409NB+uzu4+/pkY2aumnWwlBCuNaaEascIo5RCTWssDnPmQW006k5Om92hH4oZgRtXK+qqKau4eiGKKE1k57E74OZKZEBx72Nk01HUNE6mCB42rRBRYjJ40zSbzWa1WknDARoZMgxDKcpMQi5ESgyWbtW4q1t2cidxmJq6lS6lRCiEJMLSSlolaUXS3/rr3/HBn/iJr/o3vvoXf+EXtCcnIPIx7/vDanMqMa5igTMjGIWKYkxXwsS1Cu4hIclnf/4v/a2/7d/6S3/hL77yyivf/M3f/F/9if9aqN09ujy5cwHgwev3z+9cNCnpAvgdd7Pjbz/7MnFsBgBpcpIOQ3b3rmuYqJRiVkLgvmvb3W7397/nu//sf/Otb3/bc9/0B//IF/6KX/Haq69+z9//+3dON6U/rJvEOl5surNVGobBOH362+7de/aZe/dWb3vb83fu3B12+aWXXnnpwWvXl6+P/W61Xuehv3Pn4rDvt9tts+rW67U563jABPhZmqZpUtuIyJBL13WpbQBW98N+uLq6ury+GnrKUZ0FrpFrR0IEWdTtnJ9VgUgGnnjjRKEvQl7L1nEQR2me1MuP33Iw3VgZOGLiwaO48ZEnv4VFXLFpmhQIXw1tG7T709PTeYVZromSTs0sCTdprZZR+ia1d84vDocDgpfr7sbFHIATGoJBg89pzBJ4jKWWRqzL6fHGvLIqjoxZU2TPRKbqqm4GVZ1SEBySqn/N2byYuRUvbn2/U0MxBxgMitQbkJcja4MXGQIR4WS/0ckArq6uIiHh9Gxzfn6eUtrv97vdzt1V9+NQ+jHnnLV4hFFXbUdEQbwNAm0UHx7HQZcpfEZO7E7jmJgx5k61K6XdHpC1CLC9vjw5WUvC9tXt9V5IOmnKdne4fIBhcPWmqBgoyappWZr1OJo7A+wGVXcjr05GuEdBCgk1i8Bytby1EcCRjKRwM8i8kFFdOAAt6qlpwjYlQJiEONIDdZzKrvgcIDsSYQBAdRnKZqqAdTapQATimZbpwKQvR6i2FwE0M4Jj5arrRTV++bgdz3wkn/8EQqku43ndm3Fa5dRZ/TWuU2VLH48EYmErhmWoQJDabXEbs1vrjaRr3mqf4m3pGXU6Ovf9jbHBpIS0jPVh5lMA5FaHvQNMsggPYgELKxOVbiTHvjUIKzJ5QudPFskSo8yq2nHKovucAKZaAS9wNk2134iAMsAd1CIREwtu5Dff/No3I0qcmx/jafFelrFBEIwI7gZyEpDGtuAw86KmmZVcs+ZC5pgCduTxLsIu0qXdggm4UeTvoDqgAZ0ZocuIHS3EpZcGyjGHyghk7O7hTCQSh4bWd33A2PAWIjGzx3SycniZwzOfP3+fC/Pkoj56Wxd5L7fMuxlM0eSCxySqE1kcAHTKrD9q67mFazQqkpVSKu6ZtEl5YnkBSCmFz6Ti6Um8lJjD9d4iE0kuQ9s2F+enTSNXV7u+72d+Uu2KqNFkYAGDElOEgM2cFSA4EQsxGxsJKAk1QqHXEq6FUoq5EiDEAGnJCbbdboeSz87OzExd4RY5UUbAFBplgxm5maTGiKDmJP1QDuN49+LOnTva96MbNavOocWsaRNJKqYSdQqiaApFGTWX1IWHbmKVAQDUicoUsI0oRA5dTPNKzU2Jq0Q7wMxd06qqQDar9enp2WZ10rZtK6lrWrVMZkm862iVEgdtr125GqubeVEtOvRUiGjv/N5/+v0/9iM/+qW/+su/7jd9/d1nn9U8npydW8mgBGA49CKSWgE4j2Nqp7qCRyONQCijJZHt5aPf+Xt/7/bq+q/+1b/63X/vf/yT/8c//of/8P/25Pyif3S1unP+1NNP90MvKbnf4KQuXZRvtMa9kY/c3MP6dwMzuq6BA+5D37ddSpFxQfSTH/zgd3zHd/zQD/3QN3zD7/mar/mazdmp7ba//bf/9oevvPK3v+PbuxPedK2XvEqp69brZ595+wvPPffcc0S0OfXrq91HP/ATL7306v2Hl1FPEpa79uS5t7/w0suvgrTvh9XmfBgOY9ZEJYxpZjStdF3XrlYi0q7cQOM4bvf7vu+HXPb7/X6/Z6wDDcbcrtlPfANXzPaxoKpDxVGQmYNBHnxwIrhxDTLEf9PxBYQL4+KGk4grtiEi6A3K6HKtmNeo+V+aGKERrEvE6/VamEspq9UqwkellLlCq7sPByEWHW2bh8S+Xp0kke12S8HlgJEncmN34iTS5OERSWoSR9hHA42YBsoSkTmiVQEns4eysTERIXKWzU2gbsViGpKbRczmuFQSkbGhqGoxH8YS9jgJcxQ7cHZ3kaNmDIjmlD8zW61Wq6ZV1cPhEDHDtm3Pzs66ruu6zqGHw6Hv+3D2XV5ellLyqGYm0nRtI9KISNM0UTnR1bLZXPkQqBwQuBOJsLOQA5o3INvt6PUHQ9Hm+jozeRTm2W6tbWi1TgT5qQ+99jM/c3+zWY357OHVAHApWQ2rVcpKyXzImtXd2EBsFCRligcsDlYtHvqEpj5mJcaUS8/MEsGHgKw0OxGnrcvVIRMBimhSSCAwTfU+jxsWR+0JXvg9J9oLEWEW5fbQFo+xaJhcU5gCbQ5QlXQNRxvrRAEKDDmjNyxWnqUo04QtpzxAoluTwtyJEm6IgNiU3UNTtcMZ4z1u2sWgFaMyfeoty/utBlQD1/1GrImO2rrHVuHcNLZuhfviWtVdMikYhivkFllUcOOT83WOuUW3tW7eBM0fs0cet0/mLhHCVLzRPKS5YATioxyru3uVjZmcVIBHcAumMEO+hDG4BZwaIRxFqw3gyhp9U7dlkHaqNGiE8N3WBEtygKpWd/UVusE9ivDBonRaJWBUKFh3hmX+/7HRwgzDApoF4zJaWAJhDtHNzWJGXgngmXpa94dFMkkUvDhGkGsI8sj8pCmUR4vaVsskxfkW669y9FIvz+HJZRvGI91IfaG5mOGc+eK12NTI0sZ1FhWWjRw5Z0yYWNKUYV+0WOWnjePo7qlt2PnWI9hEtUKNsCvDV21zerohopwHdxcRYnKiKsvKHFm2zIj6WlPQMbrAmIUITAyvQVumxAC5NUmUQW6jVUYeEaemEyrX+93V1VW7XmXLsMRkpVjIegT0V682ioiIUFFzcyY5jPnyendydvHUnbuXDx8d+lGsif5MXUMkqu5goxplc3ISVDWFWnOOiW3i+CFKBRAAKgDMyUqlBNf3WwoRBQOQHCKDFwXoerd/+OiamYUSEYXpyfCuSSdd6lppEgu8axMzp9Q2TZOamp1ERCftBsCYy3f9D9/5vve97zf+5t/ypV/+5aXvpUnjYSdN161WAPLYM3PTto9Nz7o8pVUHt9M7dzDm3/cH/9Arr7z617/t29///vd/+7d922//Xb9rdXa+v7zanJ2uuhUAV0W64Tw7GmX/jFuOyOStJ2gpCM+Fo+samIH56sGDd7/73X/zb/7NO3fu/Jd/9L9417velfsBY8+b9fPv+sz//E/+1zoc/p9/5S+ePPe2zekmloOLO2fveMc7JKVPfOITh5+5//LLr7700iv9Ibdt2zRdSuwqv/JLftmXfvmX/en/9r89O+nGcRz7rSExSfbsTsw55xzTuJiBObXdMAyPrraX11fDMLiRwc0JpE7GRJRIwJFDzMxRlW7yQqEaylTVCOusn8ksUxE5IlKaOHIRG6G6nbs75hg/0VyYPqD50SKfjeZP2ubzS6mFIlKSbtWtulaIzZJqUc2lZDOrNBry4gbtUKxNq3VDKdm6kyZ5OluHS0qLZ8XoKauZERyrtq0dAlIDqSmKGaUUiiQ1OzoK88XbNyc1Q6h6EgCO2i5WKbjJyEzdag8IADeK+vXhhnL3tm3doZGoBlLHlDt+JDj4ZCox82q1UtXr6+sIXXZdd3Z2ttlsOLXDMOx2u2E8BE9eNVITlZmbVtrmpGm6SvQAe9ECM1ctFvBm1oWOl1jUGwuwywDUO4Ze78owbvu+rDqBFtOSmHLO5Pn84uziztq9pAZ37pw/2l4/eNBTEqKixVarDBZCb8R5LO7ElEK4ggxCCRyuVjI4QUQaIlEYFWeeD4pwYk7MEl6AuvvE6CUQUR5HhNAFE5iCL2GEBAn+MU0lHOt6OAW7Z09lLKXm/oQlwmcyzOJAVXwxgNwZ0JgD7pGTawjrynmy3gioiRyEmGgzkxMAk8OjhhuiMGgct8XX8kKOf45Axg3Ndp1Pxrd75D64AjLlDU7pQm820/vN2t4ozuPutNBSetKAWCSsLn/1BSMNYETRnHlGhNUnRzTovGCfhhukyplMd0L1sm++NlcTnX997BR3QOCBPAjOIMCYJgYBIufcI62FqviZgQlmMEXJMHPNwULsaIQLmjVSg5SZSBfE0beaB8ir3XEMyrq7I3RcnMKREsW8rNxw+MU6zgwQOzv7xCEsUIv0uUhECaQ3f6/Otbs0Ch1RMLm8FABLUIbHYm/zz+5+jBB61L0AkvMsCkVTwsDCUR1mXzgnfT5CNG2lqCd47Ryb76BegWvi2fwVsZVSBZyP19Nwn+pK2VRieMpo8hkK4whlAdxAocfuUMvjIQTics5OiOy+kPUDUEoJCc3jDTTm7iTcrVepbYZhKOacBCRCbOHWAjuLu8OplJFZwERwBM4UEq8FWx1q7oyorkNDVgYIAfHSVDKQiIgJwtLv8/37rz/11FMiNOYMFmKQE1PNYpLJEe3u6happW2TSh6ut/tHl9ef/sLbTjer/X7f77dKbM7SkHp2kHvxAgMJRXwVIDKrol5REcFq6Tkv5pPNFzZuBefEKXxzAJyCqxajjVwY5mp8sGKqMYh0u41BL/BEaIQTU2JuhCP9qV11Xdc13bppGk6yPi2paYdh6Jpuv9//5b/8l//hP/rer/na3/BLfukXdusVJTbL7h5x3TyOT8KEAHA4HAjwouuTtQ/jf/7H/pir3blz587Z+d/4K3/1a77uazdP3fWSqeWrq6uz83OHT/7wgPRPnv9v1BZgyPI4unvbdQEPvBQwg3h/ef2JT3ziO77jOz78oQ/9zn/nd/z6r/1apITSt6dr3V1LOUAYRA8e3T8/P9+cnrzw/PPDMNy/f/9qN/zoB3/y6urq0aOrYbzMWc2k3axExIkabrqGH11f7vfXn/7OFwz00osvv/jy6+5S1LK2RCQibde1bce1xHm5fv1RcRvGkkc1D2lkV9QQhhEEcDKNYqkq06QLcoLLnGOAeTLWCa7VVq7dEsVW527i2VBgIvU5zh8UR6+JWPCJps6T+sUTYeHsY5r/GkSAzWZz9+7d1WpVhrHve1cbxzEUWZwQpd7jmitp+sN1I826ZS2H/eV10/hm07qWnIsqJepS06kkLTCCpKRuIVtiIJAEB6Goz9e0oj4FmsAhNO0eCyOERECWdVANfNVATREhL2+krSX9vJL2KTUNm6Q2q6FYVYydOmMWv2FmmSR2iKhGBUFnZ2d37tyJ1MG+7w/Xu2EY9vv9mPuJ5UHMfHFxnlLTdZ1w5FRbKUU1j/0Y4yHyaedqPVZ1npFzzlHLxB3AmKlrmlzyMB4cZd2J28hwmIsQOYb7jy6vr5tWzMaf+tiLnu7udr1I4+4556bLIuIerrpUQlOZiEhio02pFRkJKZyKwimlVh0578kSnCECJJIkkkQaCqRsHpQaCiJqKLtSRdex/gerCklq50Y9iWOkbVawOQ7I0DOlhTYSUKOCt2w1A2r2YPX7VpBGruGZm0J2HI7kiRGq81Xi4HyxqFvPR0ZpVW5cJAfylKYSLFOuM3A6+fbdYd5Yj/Sc5VR7ywh8k7RbmHAOE88HozLdG+2QdkSGxx8WJ1e2KeAecanIJHwCGrxBDKmlKWK1tydBoTdHe6zMRm1T3M+oep0MboSgshtNxQIAhTvMoAVmsAJTADb2bgotbmZWoKaWzawRdmmECX4Ciovr5GnC8cvfrG0KCU58k9oMCM1cYwRhJHKw9UjcNIeaW4E5mQPxq5mZqfqEANOEgybyXnhT6vyLyCGRzWRgByb9ydghGUROMLeqmT1dJHawdHOhF8Ccq8MmtpAw7udUmGWkkhYBwPmvuGmu+eLXhdgM5g/OHw/Q2aVmvuz0LT7/OrETqwmSc75FOQMw8doWwUma0It5SokZRGgaUbcIIgKuOmW/eJXpi8+GMF3XdW27GrNeXe8O/WiGYoUkUlYIREKMkAKARKDRw+DgJIATwyHEsOwQEmraDTipOkyp1o4CMyJflwBVbRMz4+rq6oUXXjg/P78/bkEGI2IDgZyFyIK/B1FVU6gbgxxkhKHoo+vtO/TuxcXp1e56tz+okxIZeVFrms5MLYpiMhFklvhftIaklkJWVZpweCWGkIFMKdX8TM1mBrXK8mMmcyGGxDUTJxaRzCYgOIpaMcs5xCldBGaj6sHdwcSJmqYRkbvn5+M4rtab9ebUCG23vrq6eu8P/sAXfNEv+7Kv+PJf9at+1cnFuamGWda0bVX8o2kyTCSW9Xo99P3q9BSOkq05WX/TN/2hP//n//zf+Vt/+/79+88///yv+DVfZbkw07pbxRb4JHbLz96OTtMJFzVtA6AMvZm13YpSQtEffu/3/42//h2u9mt/7a//j7/x95089TTcsTu4HKhZy0kH+Gsf+sn3fO8/7of92Z2ze8+9rVmtXr+6fLC9frDf9j897vv+7OwsFxYIEqu5DirwzXq1Ts0P/8j7X3z5E1/+5b/69PT0h4VzGQk8jiXjjIhYJKUE5pxzf+j7IT+8uozCEMEwMLCDmdijrgTVytr1uUhBaXpQEwpCXOzNiqBETk7f6IwQg5mXBZsSsZJNdrYCC+K3qnr1LdyYy4nS9PPjFsiNFSla27axSqjqMAyH7e5wOAhxAEWvcpFRRB1ubmXXcCYbt5e7Z585+cVf9Lmnp2k4XL780otXV3m7dcJBGpg1PbkW9Dn7HIdicYKpZdWga7oTfFLJipi75WOROq8GNzNX+Fyh6dzPIEjUTYyHExEjj/wZ96mugjumivM+AcJ4qMC9sUien59fnJ5tNhsi2u/3l5eX2+12LDZnAZhZ0zR37945Pz9vmiaQgI7a9/14GKMbp3caoGKxzk+LeSll6IkFyuKuQxaQjLl36+nQ96OuO3n67h2YXT26DOGuflSQMcOsHKyZa8+qahr6lFI47LpuTURRtGneHSy8BEamkVVPwYi2uq7JrFnPlJK0Fa3FKhHdp+ZOKSVGzYwEQZmo5qtb3b4qcguCfXVYMFe7S6f9imVySFU/Uo1DEhHgRostD+QIwaEZaBpADHPcxIQAwWxmkx7He1CFIqjC0746fS+C/aoxrgJzeoWFTDeuf7RdHI4IimIuBj4hgmOQcEYEb1Yz/E3faErGuEGQ+1k/5kvn6o0NloL3HsmB1YNIi2or9eumT0/fXo2WfwFP9PO03RKVIWSaYiKTsmUVtySyust6qdE/IliGO1RRRhSFF5jCjdVMs+VRLbuaao7VsieWbiOphRegAMJIdgMBvnnRIB6bAovorVffIE3JnqYwBzPcoaaqrtnVYM4OuNJMHzX3SDaZGhZ4J1JFaKpkNEOw+FmEA/74xBXCRJwMShEmmz/MiSScahGY+reK2mbKFupmM337TS6oL1RDl3BzSlt0mjIiMVkwAIKDMl9h2YM2yYfSMVSoYc/NlNT5X2YuU4nG+d5iX5xR4mQ2BTGaQjOGiDgJeU0UZObqWZejZzfuLWcrRU/PVqv1us/jo+vtfuiJpRQjIxI2NbCJIsSAnAAxWATR3N2YhJiFhIjMFZJSQ023aprOIW1qwGbI5hAQmGMJJOemYW346uqqaeX84vTRw4MbEqOar0TmRO5gJnL2cJA3lQzAAtNhGB4+ePXizlN3zjc5DxESDSgJWGSAEzuxRCTX1YbxRi4ogKhfOXdjvDBmBsicSkEIybAwUxMRQnc1VWd386JmRVU9EYvI2DVhwpBTQjICO3LxhhpzMoiREgDnXJzc+o9/QkT68ZWmay/uPNUXHdXuPvX0h3/q27/zO7/zi37ZF/+23/bbvuwrv2K1Xo/DAKCNPMbg1jG5I5K7Dn2/7lagajEDePqFT/vGb/yP/g//yX/6gQ984I//8T/+R/L4lb/ha8DQw0H7vl11T5jWP5sJVOf6wtORh0PTNJRS6ho4Dtfb9/yj733P9/7jD3zgg7/5N339V375l9977gWkhGGACNZrErXtw9319v3ve9/3/ePv/b73vKcMY2qbV19/7WOHw6uvP9juDyxSHMRMWTXHtZum69bCQjA97Pb7u0/feeWVl56+d+edn/bpH/iJH910bIY7ZxfbcqZV/bL0fb/d73e7Qz9kBampmZtT1JhhZhJmUpumvNZoR0TttXKkAQTPAUxEuqCMOtW9eg6FeM0MiXFFAGguqjNN/1sIcP4TTRb2dNywWBZvTdv5jUibHKZWrq+vmFnHbGbMiDiYuxfTWnQQADz5PrVIqQyDvvPT7vy2//lXf/Znf1oZdx/5qQ995MMf++AHPvLa/e0wUN/zdp+HXjs6i6Q+JxhYHYMV09I0baxdcW9h7JiZmjKllJKDYyKKkDAYoaIUPheuycxMc+r4tKDVR9RipZRpnZfoTKaU0sJ35k5E6/U6pXR2dkZEXvT+/ftXV1dRgSZkt8ZxJKLT09Onnnrq/Py8aRKAvu/HcRwPQ9/3OdeKhfNHzCzK/AXxNXoy7pkig9FNSV3NkPvca+lTo5KIaHzu+bf/ql/5JV/xFV/x3X/3733wgx+yotvtdr/fmpe+L0WZSNQs2GElu5uVoilRk0JC7bibBP40BROIeK4/UaE4wJxQ3CZ6zvypOkimgc3EMo9MdxAJSJLMH1gyVipovBmIrltd8GXg7ka2dE9MYriL8Tz9QHU+VczvRCEWesSEvjS6J/AWWJCcJ21SqgDP3bBYvSs11Ccfl01oEAvMiZtmnFEVF1hiwrfaW622iFzjZlg5pBDosTMBLEdXSPDNR+oscHbyo1e6OnHmzYJmt46jTMFtxnF446YV+aZoS2MjfiUieJlyiQ1QmLoVMoUX8wJTJkeoC8KIiMsBalAtZTRV10Ku5NYIWxm1lDIOrjlWdHfvTVZu7fqMUOCGx/Rn3+TtlkLXDUhIxHNHORC1lYCodWdlLKW4WswsuNEUelteYckUnVyi5jW4Xs/BIuIHHE33MOpif5xPmM2taIknP+gc/HkSIJxOnwJ9893E5W4diZSSOAI7sk2WTxLK/vPdz5GBUIKZbYt4JABmFnrocT9zTy3RyzF6uUCn8/cycyMpsmQAcJXOV56FZKa8tflqZgZhOHeb9erkdLy6yjmreWJ2cmKCs4VSqBd3KqbumpmFEQoG7hBphJFaIpB56M8xc3Kwqq67rlDJUKpa4CIuQgKPPD6/3l6WUlJKqpkgbRsRVC6x9Vv1B7EATiICMjdnZjC74fXXX3vqqbvrdZcSu7NDPch/phaBaRDg6ho1tEXaJdp395DxIGI1RGIqJtYuAEctFKbqxRVQd4WVlBLMJWLLQg2jlbZpmh6jFYVRIiZOibiomqlnJ2Zw4qojWHRUM72Au3vTJne/uroyFgW9/PLLq/UJgO/93u9973vf+zmf97lf93Vf91Vf9VXPv/3tKHkxO0M/VolkvVrncRRmSSmdtLY7cNOc3b37p/7Un/qWb/mWv/2d3/kn/sSfeO3B/X/r3/7tq9VKfcqSXSLAT7rlPBYYrP+2bYpPPnz1tXe/+93/8Hu+Z706+WVf9MW/7xu/8e6zz4EZzhhHgCAJRfcPXnrf+973gR/78Xe/+92mJSW5f//qer97+eVXzdF2q3azHrOyCIhee3i52ZyuIMJdooZSalpmF9P06OEjM3t4/8FXfsWXPX3v7v3791/8mU8kgvJ6HMvhcNgd9n0/HIY85FGLU2ocJE1qKCnIzNSNzMDViRMxPTNzMJkxh379FJx3hC0LoMowTv0W8HJClZgLNsRfGWzh47q5pUf9nHmaH6Nek8zVrfOXi9ryr8MwCFGwnYmobVszc7Ugdrq75cncJwKw6ujQb4m8YeyuX3v55Y983uc++9lf/Dmf92Vf8LEffN8//EfnH/mpF6+3No489lTM72/b6+vrB5ePtru9m4ukzaZdEYZhPD7jRIhwVScncUmJOJEzuwmTCDfisb4xM9AIF7CScD+q+zFNxmvJGdMqsOm1b+oyXmduTNImpZOTk4B5n/jEJ/b7/f56G8nVIlJK2W63nNqLi4unnnrq/OI0pZRz3m63EVcc9of9/hBlG9q2TU1UmLyRAiBJ2rZt2/ZwOPBUtYIdampmVtSaomPv1J9tVmfnbdusPu8LPufrf8vX/aIv/ELX8dPf+UKT2qhh8errr/3AD/zAD/3ILnYBACamlhGUfK6dYzbH0yJexyKUpJsKZjRBlZ3HDDBn4QNgM/OFD4KIWAQkXvRIoWHiRCxMInCnyoat+myxtNZNaAKKzCxEzqSzJNxi3L5x2dQnt5rXvSDlRTL7zRFf43UxOOL8CWOae3iMaSpTwVN0hW/GHucF7pZgzI1f6abp/1Z7q91qCpe6LPyzfbDGIY7ZsMepcnPIHbVJp0onb+oWGQS3NkF3T+yTMaJeStHR8uiWx+EALY4sTBzlh6FC3JSDa9Qwy14BoRHsULJ5sZzdjEynjGmMI1KzMi9SfVyKyt5/UwcGH2+3eqQWwpoH9USS1KwVsKlaqTpyzJzHkczhbqpe1K3yFo9RGQ+DQiurFz4bObOLdorfVEA4wzSr5RUmmYxJ+QVAagWqpZQSLkfmkA0I1qV7SLxE9b+Ap1MEgAPx1tt0EWmaxI5SCtR5orRGxoxMnFOajEWHBliYBIs9OikREwfCNDPjyUmZmsa05JIDbbIIAQ2LQjFJ2LPMhrgxc3CcUkpNMzFjCaBkaqqZkUVEEjHIijayIkolm7tLw6pDLgMzNcNBFE+fnCSzR/cfDP3InLr1CcbiDncXJwHYrC5p7Uo1Q2sxtKwFAIsMw4GJUPJKfLi8Pm+bc+G14VUdVVWNIC0nYcApK7JRPpgobUZtHt4f757eQf7o256lV8fTGADuSsTEi1AJ2STAAUgKIY/tbn/oh5P1aaJHh/0+pZVlYWItnoRZ2N3doqglMYtTFD13CrZTpWVQlUx1ZyEjFC/kRMygay/szIkoRbFHU3cnLRxYsL5d73U4lL6rIq4R5xw0xjZ7qaYWWTWAqeWWE+9KP2faeFZmD8tvHPYl913X5ZL/yd//++97z3v+zud//hd/8Rd/7Vf/us/6rM9Kdy54zGgSRvVSaL0CvG3Cta8gHdtiyE3TNHdPv+k//V+v7m7+7Lf+mXd/199+1zue++Iv/VIRwViQkpVcCKldFRhR6nVcoY2tKCypmsQTOagO1ZGIeCaJiex39PGPf/yfvOc9P/D9/7RbNb/ha77uy371r7rztqcxjtBraIEphCDSv/iJD3zgA//f7/rOn/rIx15//fX1ep3a7vLRdQd0d+5oPw65jMWsWONU+kxEF0373KrpuubkpOsi9CroR73cXrlp265eevX1Z557/uT8rFk367ur169ee9CPOWut061eTJ1IVhLxXnfPPoXinQxeyhQxBpkbzdAsmGlV3NgccHMgu/ikB1B7KWBdk5KW6s2iKcbi7v3kQpqY7qFaW6Jw/MxUR5VMNDKLLLJwfUXZtwrdfab3YYqJOMlKx37d+XMX7UVDNmx9HKXxUqxpV5JWpaT9fj8MmYhE6NV1lw2SZNPg8pq+6//z/T/6gx/7BZ/xwnNve6aU8bVXt9sH+10/OBOl1CV5YbVpnn+2H59+8ZX7L792/2o/jGrmYu5qcCSiKrIqYGmaYb87OTk5Pzk1K1rG1PCqSSz26OHl5bB/pnl24M6ZHzy82mxWAqETGYdhHEciT5K8VdeSbYwegSFqGrCJO5kZjDTber25c+fOarUCsL3efeLjL11fX6fUEqVi2vc9M5+fXzx7dvbU0+exH4zjeP/+/evr65wzMw/DsFqtmlV3vr4DoO97Z82lqDZ1ExEGVYGryXmHouYQY7IIWTVuDKTUpM3JyVkj+eJU2rQ/3VwDH2b58fPzT6zXJ8+97QWCfc5nv2N//WPXhwNJo9rsd+PQF9XVYV/2u7xetRMmdLPi7gRWVWr5UPIm8fawLz5m26ln8g3bBiPl0ZQulcrJuRbL/ZCG7KCmTeiogxIH1OIq/xUMEQ99l2EAjJoWIfsTa5AdKwrGcEWkWFDoghLGUWJ/nSjTIgKIa4kVo47WG6gRUfeeOLJ7HG5GU9Jp9f2qu8PduAEQiQkxySK24tA5kMhOk5Qq0RRXiYTDSB1yYqEmuKMLuY4ZL4bEbeBMCW0br3NKqxnhc5lQX9D5jleb8fBSWQGAL4TN32r/02/z+5rpyFz5h24o5oWqCKU4g0jcGqPb7jkABKnV66ZxEj/YlKc/hQcJEAdCkaLuIzVFKs6MsluPXf9fFEA8hu2nKy+PPOYXKdPzHAtuEEDQOo8McIZF8hgBXpECAwKkqfrQG95OnuqZuoIAYYBGWC80Ahl+gA6wEVTADiJkhhnKiDJSHpIV1UyuVAqjZoBECmGI/I15ItCFLEQuZgbXYRiapiESMphhPPTu3nVdKq8A3V6R7KTlpxQgQ0MIcRonHifeUEIhuKP5F/BS/vU1XSC7aVDwRGJmAOS6pNwzGLWwoIIUyEBhKDwTOZBRMtwmsrOKGFQdSgwT0pytqBKRmoBUVRdpcWa1gELIvyECMy1UsQB77lbUQERN4iqbF9lbDs3F3RnkakIppDODPBoQIA3DAVOMDou40LJU9PL4fBqo1hyLpb+UIiIBIDE57M0sYjuL7aFGM+dNMZ42vL/B6QpsczQ9p7YssHgshLjwXd6IPDBHNGAGzfU2pk1aUq2lFgVXVIukyq9ltfk+Q5O9bdvdYT8MAzMbMI6j2Q2WQljIDAoJDWcIs4hISswiqR3DUIA1iaBN17RE5PCUqmSWVzRl5oUIbSOllMN+n9Cp6nq97jr0fa+ucDcvXsPEPmHgEOk6vsoK17l58ODBc8+9/d69e/34EoiJ2Zy5BUE8aK7VUWfRU/Por+/La3/Wl0s0DweKGpdqIdwX1uHc4TSVYVy+neV7X/ZeoE0jmh3d85BbvtZ4QREyzTkPwxBxlXEcf/iHf/jHf/zH/+b/+9u/8Iu/6Eu+5Et++S//5Z/9Sz8fKc3RBCvZzJyIpDo4mDhf7dqzze//w3/4ZL358E9+8N3vfvff/bt/94/8kT/SnJ4AyH3fnZ+XUnLJ1OhKunlWqGU4ENXZhPrDbrVah2wMAO2HD3/4wx/84Ae/7598/1N37n76p3/67/093/CLftEv7M5O4Obba+oSEgMNBn35J3/yh3/oB7/v+77v/e9//911tz3sVW0YhiEXh242m7ZduWEcx7GoKYqbqhORpHQm3LbtatUCyJpNnYiaZr05Pbu+vjTjdnX2wqd95o9/4CNX18N2V7aHfbDsjKpckBMRWN2q0uDc00xRJI0eSxWeX+J8fP6Tw5cvaznTl6c9PmdvjBPQPDyW3+tmcqxbVW8yDNJ5Qbh1WRaBpy6lrmlXK5Hk3jWJmEiKuTs1TbNer2NAutrDfmy97VhWTduAHrx29fJPv/gjP/T+uxfnxL7b77d95iZ1m5OmW61Wq4ZNmrao7/uDuydpFV4UWorWKgbCcAExsyR67rnnVquViGgembltKLVJSNVotT5ZrU6HXMa+B0lq1qUUIwYlkegEcSOnJmRKIzmEIEQS8UZi7wTn5+cXFxdN01xeXj58+PBwGEJZNMBe13XPPPPMvXv3zs7O2rZ9dPn6breLEiPDMKhqVHDdbDZt2wKYBWPiJc6UFT9mKcg4jrPWaCyqcZqZqRO5M3PTNF2Xuo5baUUkSvVGFqWqMjXuTiSr1YpTGzJeXauq1KSRKZlhSgeeqC8w96OYwS3XKbPUuqkLtyi0Vp4skYOAaXMPBR9UPyhLXUYBKjnX1SzO9Fil6z4YAxQRrkMVPIvvmsohoZTiHtI4fstu9Smi4kdWKNXbeOyE2JNi/8IshjN/mupn3R1wBk/CP9O0ivBL2DAhbVQjhFpTUkPvNK54gyV/tIhpql61nMU/N8LYjUzFt9qnQJvfO1Fox8eQ/+Qv+nayGR/tjVix6V8z//CxL79RzGL511so7smIcc65ra5jR7h46hGvkX1M03ziewII/w+4rnHV8sjxnw970h4+mPZuI0dcBIaeooCBQ81GczUrMFcr7lAjVbXiqhavKztqqQOEbr3ZtMKH6kEshLGollJq2vqiNvgbN14oWv28b0ez6OZxvz1kbimLOuCGQjCCImCDGsKPompWETmZW1FTdTOqMIoi//+GLeQeHth54sxZEvMbWVrXbdsEQWZpVC8t6vnXsNNS1sKzfz4GIQFE6qWm/8VmFNJDZkugGBSm+GC12MI8Rr16mAhLcxCPWZDLZ3D3iQJ57OXqNWJmuY0liMj0KCrjCwXw0CEIZlTcdgxolhURMUtKNMVVPaBOUzOkDWBhdiOQt0179+l7zPzo/oO+7+O+cs4IadeF05eoxgJq7/uRqDZ3OocSvchms2FmKyhVPmheBqJbXFWd1MxLzn2/P1uvNptVHnoXLUcXtc8PXgeHuxApXEABlYX5/v2Hzzz79meeeebh5dV2NwAgN5JUGaeT6EU1xad7WOKCele1TgDNrA4KGZzHUcExZdRng36Zl7jMyVmixPnj5OCppvbytfpUTSWgIE3qDm3bquput+vT7nu+53u+7/u+75nn3vZZn/VZn/kLfsHnfd7nff4X/NKn3vY2Tg0v5667qTan6/Hquj07+z3/4b//9/7Od/61v/bX/sn3/eMPfeiD3/zN33zvnZ/RnZzCTMu4WW0cGG1MkQTsSERgBwxF3WzVtXDd3X/wYz/+I+973/teeeWVVdvduXPnt/+W3/yOd7zj6be/HTBYwX6HJtFmA5QHP/XhH/uxH/2JH/uRH//RH3vppU+Q+bpr+r5fNa21yKPmoVcHkQ2H/fnpSSndWNTdNSKQoWOio4hwklJKzsUjB0lakkaak5958f63f/v/8GM/9qFdT83q6XJtu8MQa00sKBY1twnkYoxgelr4t7wmhj1x2i7f4PwSl5OaFn6f+U/Lsbo8eXkwhtwttDldxBVOqPoDx4sQuR3Z6TfuoShpXXBrMcOUWkkioupZDSzCDTMHZfpp2TzKxsX8kNWbJKnldSK5/2Dn7ttD2Y9AKvTw4DK2q9ziFUmtEQ+jDcVHozHbIRuzVAYBVcEdkDGSSKPqpeRSshAKsw9qnseCBw+3H3/xtdde3+Y8uCt4GMcREhxXVA64e1F2NFHqnImJRFIjIgRxdxbZ7/cvvfSKu4/jGKtfLEoXFxf37t27e/du13XjWHa73Wuv3b+6fn0YhmEYYpGcBZybpuGpfmP8G4sqUZpf/XLmLl/fDbRg7CC2xNQyK4EIwkhwBsQUZDSOJUkU1KFxHMWdqFk6jOZFI/zQRCGhEhWPMC01NzfOED+cMCozJ5Y0ufxi1Y06PHWdwsRZAyz8qXTDJzlhuXB1uc7TYR6fgf+bFFUgUzzoTaQa4/joVL25YAIA1ImXx93VsDwyn6zxxqPHo6/d5gllk2l5I0UwgjAELxVEKpH40eg93odPhTEovDyTMmR9uRMspKOBdNNTc7wIL+7hLUz4qdOq7y9mYx0JYeeEiTgZaUcmTbRF7t/koogTpwgh/jXyD+lJ4bonB/CeCIiOk2OWV42IWcxMoSc+GtUvJgJ4koEhkKuQwA1k0AIv8IzxMA47cTU7WBkCDapplA1HDwDu5q5qWTWrjaEKw0RwjloCrlNkxWf+PAcy9KIRgRiGQ7BFIqijqsO479KYy9C4Ec/2ALDIEPkUawT8rIRYv0Wpr+Q6dVfQTPlQJgOmco6msOKqxaoWYwi7m5mbMYjMba4FvogQYhGrm/e70ECZs+ei2ZTjgEUy3e07n1q9caI0bzwBYybQIjmX5SfDAX9rkz5eyI+hg2pBEiHASWWVLayEqXwNUWTt1TTHyCVbJ1nshQuQAFvaH4/DXNyERlHAnRZZmO6ec+5ktbx/M5tuL2hyDhYmECwJgbnrus1m049lv98D4CSqTsx+zB6dCI3uoFC3m2wMII44ldi8650D63VHRPPuTuw12YMscFfO40mXNl2XEna73Z3Tk5OTk6s8IIQ55s2ZKMi3QK0Q4CFDMUVTWWgchwcPHnz6p3/GnTt3rq5eZLCas5NNfYxFKmax25GWuB+bi0/yzOkFMZc8EqrhxVyDA9VknEygW6MFj4WJlrYRpnBrxcghkvuYWEvR0nStiAzDMB72xbRpGmmSm/d9vz3sr3bbj3/849/zD/7B3bt3X/i0dzz//POf8Qs+83M/93Pf8Y53XNy9c3Jy0rQti6BYe3YW2b2/9uu//ou/6Au+5Vu+5bu+67v+/X/v3/uG3/27f+v/4t9B23bdathtu5NNMoAUOcMdSeA+7nZXjy73+/173vOeRw8e7HbXp6en73j7C1/6y77one985+kzz0ASAOiAPKJt0ciDn/nohz70wZ/8wAd/8gM//tM//TG3klLadKvdbvf6a4/OTrpc0/eMmRtp1C3AocE5lAlBSiAyuJWpu1V10BKchaJ49cX77v7iaz/xoY/8d1dXV2cX5yLy+v0RkmIpJ2aAg+gJI0ioEbl5Dbq7F43as4uefyMId/yVHjtyazjdAgyLyT6N6JlZ/qTmDJBN0UhUcEgiEhrJi1PN3QVmruIMVyjBjMxJzNSYU5saj4x6VSJKzH7Yb4DVKiVJXUNNoiLWNOLAaO4CF1dqs5JrGiAbFEpO3BY1dTaL8ugQEUfFqAxzgFzc7fJqK1E8npyZs5pryXkoyj/y4x/6qY994vLy4Wq1GobD6w/3QkTSTN3iXGVfAdAUuGMmJuPIBgfo5Zdfnv1ffd8DWK/Xm83pvXv3Tk5ONptNzvn11x/cv3//cDiYWSl95FGvV+u2beclNHQ7SwmpLDMFM83V6nzh4eIqMqyllNBk5pqOHvVjU9AeoG7FNYdMl8EZzl7MjctQuFFtnClNq7oRObGzEzMkUT6MTqilKkEUpTisEFfV2ZmZwsxOlEtBITNzProJRPim6kXVlXFCkuQ+Sb+YG4U+uMus0lMLOkl9bFUPVSTcHNKLdTLCHnzTlqBF7y0/6FOYYHl8EuM1gkxnLryoiJJToDqNMblwmYiqdjnXEwIceljtlUdaISWRATTFCZ8cyY8nvVH0bIpizrfkXn+ecCCqlxw0RXffSjT6FGnTSGDc8Ags7cMbwG9ut/DhdMLEwnvCN/0rHjNzaP/JODCIBLcPRJtA7q3PUhx0wGF8cw54ZAMawUEOz0c4GbRT3UcxHyuZ3NzK0G/Hw2Gz7jz3VkYyCwVLsqKq6BtHyMZltVzKoJYjD4NCDB+hscxRPjuXAwB2VMlr85xzznnod5FMvlq1m80mdRJeeNmMNh5WXpjhR3XIimAfa586MPGTj8LFthJwwxB6jW4gJ3czBQFeoAYrrgZTzQUwmMHq0hn5WRJMQ4Aj+jJvFsyzdR0JcZPhfRQgvQXElh7hWxb47KlcPsiNshMLs+zGJ6fvU8AVzI9NFCeoKdtRoPKIFfVoSi4RXZw5a5NUvqgqT6S72ORj4SEiNWOuFEmdNl1zl8V+u8QeS9S77CZMkKmUCUAaRaGK+ikydzcvgLfEm9MTEdle70zRdavRQnenUdflcI/UCndnlqk81IxRYWbCEsEtEwjs5OQksQJommTmBld3dXM1dRMiRg0Ni/jhcAhi28P7jmTs1RFMMnlvY8zMuFRrIWt3N7L1+uS11157+ulnnrpz56WXXlHVGktxd6vFJIloSuybHAFL0tAbt0CIc8NCc2j+edlmEu/jsH/5yua/UvVwHAHD1O3U90MMm6Zp3dH3QynlbH0Sox9OxJKLXl//zMc+9tP7/nBxcXF+fr7ZbC6euvvOd77zXe961zPPPPPCc88/9+zbVMs7XniBTM/Ozv7Lb/mWiz/6n/25P/fnvuVP/skf/MEf/Nrf+G9+2Vd9dUP08NXXcs6vf/ynttvtgwcPHjx4cHV1NfYDMVJKv/Bdn/lLPutXvP3tbz8/PePzE4QXJ0LrZsPDhx/9yEc+8pGPfOynf+qnPvyRj3/8p/fbKxHZrFfM/OjRIy/adc29e/e215c5D+6eUts0bFpisgRdOSrJ+ERcMzMDmVqGEQlYikGz73stFhGw7mpXMroHjzJRKbbipgrDgCJ91BVO5OKRc1rN0HgNvHCyLBeO5ft6EjJ8Alac7MLH7IObMcPlQVr8lea4BEssT3OKtrrJE+zX6lRrhImatpEuNU0SVhX2Lkk2D4vdErO4F3dXh3G+7EhP15vNqltv2rbjvicHTIoVRyHNWuDGybkhaRwtsxALnLW4OYgQsWsmc6LEAJjcQ7TVjIFKyFDzKZ1TYekTL90PHHFyQofDgTli4CMC9JIR+RTvIpFCgFvgnMI8xKxp2na73e72eyJqmubpp59+/vnnz87OhmHc7XYv/9RHLy8vD4eDu0cJwdPT09mVuFyKcbNLYxcws4nDYkcS/iJUOIfpjnuVkoGFkFLbJCVSV5RsUKj6OBZVz6M2QuzcNC3X5PWgWphXKqaF8QHAnd0r5rw1xjCJXWl4MEtUZTTLJbrZTDB5r2IBhFutixPewJnmE9d0o9gOYi2dKu4E1iXHnCg490P41DlYVvN2yzdG/sywiCmxrCcxgWGprsHjW7CoBrHwRVcSWrWwfREJJANV9C5V+WH6Bw6weSEnsABLRk7Ie/Bjpa6jCik++U6w3HyXEiCYfHmPz/232s/fFqbSYqiwoyxf8GNQkKa8wXr+fHA+weckVvcpzIib5/8raE8OYt+srGCfbCbAlifX5/QjaKqmP4XpVkCFUEAOKBBVHxSu4Yij/hUvmnM2zRSSDGNGPhhazaOV4lp4su5VlcfWXM2s2OCujgIyIiOYOGrMqagZKApKYyCwUaQJMwC27HkchuH66iGAzfruap3Oz8/B2O124+FF09E9s7jXAjn1YT4l28827G6Pllp+mQxuXFVYHcQWKNErWmerKQyOCAnGxZw8omuTm8QRIkGYFRYmf+uyYXYgTpvvYr11v5mfhcmsehwlAUhZlUOXLaUYgsXLONqcgMciFIEEDyqxhnueF1HB+NoIchJCoGLKJHSZLf4lVAikuyyjHBaMa5l3jsWfsAxmVjIhFPCISi3ex9EymH+dYckcUXV3NwoRCoYwJ1UvpZA7wYlV8yBC7Xq1Xq/zqFdXV6UUFymlqMGc4g1VGLawjFWViGuHB3Zh4pqCHy/JmPnk5MTHXX1dbhX2Ooio4ZC6YXfTUoSo7/umabquyxloDEdDIV7zxM2jGqqtiSJh4ri0Xffw4eXDhw/f9rbnnrq4c3W1NRsSM4WCyBTuX46P2SoCsGAH1fdydC0QNUnoWD+zegSidFgMk8fnkC8c5Lcw/ON2Ay3YifOn4lUGd7Rt2/m1isjV9jqyCospxrHrusg2PN2caC73X3v9dXf6mZ/5iR/9sZhXd+7c2W+3n/mZn5lETlbdqusuTk8AfN7nfd7HPvaxv/23/sYnPv7xv/Udf8OIr66uXnjHO55/5vzevXvPPfPspz3//Hq9vri4ePreUzg9hRmY4UDO2O0vH97/+Mc//urLL3/4ox9/6aWXPvaxj736yiuXlw+HYWjb9uRkHa91v98TkZm2TWOjjttD13URJxGRsZQyDCKyXp9QUZrC6TPEcne0SUeE5n02zqrjUHZDudoP7nCSDDfjfgh/UpIyuqMGQFBtbSYuU1wZgECqWwdwvSHJuBwntCCFPv6Wb83HGeAtD77RZ28Z2cfBQOTOABF7De5YxCI86gqGzgZH8csYL1pgRi5MLnCGC5yIusTZXDWn1DZtg1byOI5jf+9u6vsx0Z45n5/fvXfvItupgh5cXfcZuH893L+20Y1ITU3HQZUTMbMaci65mCNBKtuZONRXZjYEmm6Fylocg7hotaIDS7OKZ98dilkjJFZoWlvmChYqIsI3XC1EPREJhIjUDmZ2586d559//s6dO8x8fX39oQ99+HA4BDU0oGAsj2bG3MzTB7Po32J6ViwU5YJUA9wsqSk0kUrmX+cPxiQl85SazWp9svKmHZmZnVHcsvX9eHJSclZfOYBYN5zYDLkMObsp5aw5D0TOjCgaFMnkcSSqNskUpjvuEU0SCaXm+lw0EdHBM3xCDRISPLZM98WFCFjUG4wqvYpi6hNvp6LFxT4oxEzxFYvqT07EbLB55tLk8rAnj391Er/lWIECxC6oLpv5EQSwILe427zqTpwuB0gq9uXw6lamntukEBj/LmM1mAzXI13Kl0cmIdMpTZEqd2HygS6prHFZP9bCfqt9ijQiwsJFUZ3T1TK8ESSb6KA8BxWnMHUNbWOWjTmGBP+1jJX5fo7NjlMAN4+DH7NwHp/PNWLoQNj3kxlFUNBIyPAC62EZmmHFNKuq5WKmjb7mappzKYUd6rBSbMwDcSTPWKkF60IsdNOcm1m2rDoalNjBRuyah+oQUjODqxEJO7oNC4sQwVmYCTI2oW2YV0mkkfOLkzt3z+7cvYNE3ZW89vJL6pnYRSgfgbE9iVf5qUAR/9mH4LESPQADKQB3ZZhDyTWK1IoHGlTTTFrjBLEzwUzdGQRzL+ruYPZSczQqZTQM40k+Q0Si/hMWmhqzT3b5Q9zYrSQFqtkfNCO4+SNptgPqHs9+LLJbDfcqFjJvY7NBv7QAMGng3DIU5qyGW9ZkGHBYnB83wKbz/aHuLseUpwrwBCwcquuzYTTfxgwCoyujp+J4SkmIiaVGw4J1jckjbnBYkxhwhzdCZ5t1k7rr/e56u1e3GgNMqWQVXqgnMUXgn4HIXWSq9dXCrnB3cMjkOLOw83rT9eNWFUVHcwfYCCAwU0qpSUyuWobwJe92O1Vtu9S2GCN3CHBGyPq41+AOz17e6TW7O3vKWdu2vby8untx923P3uv7fhz7xICRM6lHfJRnhZF627PtgpoOukgiv23Wi4iqmvlc11FEZtGI5VsO7vFtVBPjJ25g6UF3hx6F3XlRXyRmhZnlnEMzYw44xw2EgH5Kqes6IgpYaGZt23ZNE5pLBHrw4P5ht//ABz7A5GUYz0435yen2+12v9sF4v3RH37f+4qtTjallFc+/uJv/vpf/9TFnXe+850XFxequr26fv8Pvf/R5YPdbvf6q6+9/vrr2+320aNHL7300v1XX+v7vi96OBxUNUqbRLJW0zThx2gkqVbVx81mkxI7QYjbtg1OoKq2rbDqtu9rON3m4e3qMONsBnOzvDschly0+GHMLsKcivpuHLX4arOJ19QfajVSP+b8VAUnn1wbwaI+voObQP2m/+k4HmhB41z+9RbkW64hyxOWv36yWAIHoTTG5zSW4ksdIJMgGVOMIrNSxI1NEHQDjc5SShEeqf5SBgGamISsbahtpUkg8uI6lv6QS7fpqFCzG4jIBUkEKsW8gMTYhSEkoe5oBJGYCDX4VB9Z3bmUUVWDWmlmZSpwItJQ6ojIzYZxTKl1kexW3V2RgQg4rCiKTUVEp64lImYlotP1ar1eP/300yLNiy++fHV1NY5jsIC4NgLInUSoaZpx7GO+hMwSJndPAJ6AQz4lFJRSUtPOS+4MFwGEd8YXJWTjU4lTCQDnCpqjcQxmd49+iFzHIYyeaZqnlNxcyTiDGGQuiUQqzT7GLTNHjBSYCsi7m1lWF1kxh0bVcYzNSPi47BDBXU3nEXccujdj3VTpD5Rm5sJ0kditw21ZMZAaAAb5IjbIIHfXW+x3KpXheXN/NDIsyglisa/FNwKwGnKvbkGCgWLiHk2B+Gj8y9UGWUI4nelAXs10RY1kHKMZ4XuN6E28bSJBCJnWdWLmiNqUAhyPcwMTfgoYiG81zM4XsINADIi7BeXkONxna+FJaYGLWoI3Y4bOj5/8r7Yt956b+bHTL7YYxp8cMMTj3azZYhH9g/fwAT7AevhoZYBn02wh+WKmuZiZ2LW7eymWcynmajlnHXOT0owDa+6fuzuNtrVg0UGd1cmZzMiRNKx7JJATc5NYmFMXRaTB7i4QggwE13G/LbAsLI04ubmPZJLLoW3TmCgliRSC8FJN/jWbpW8e68BPyWbTguZTdZ84YoxI6TS3DCiZwhRTPTVomT40/Rcvz3wWmFHVWlG+qE1CfUubebnPzibW0lE7b8Ez5WdJEJ0/OJ8ff61WdYWhPEnATwRTwNwbX1Q2dHeuya83LMLFQoBbt3grXolFjIgW+p+hVNnIreizszMzZy0+o8EavnBzS7jhmZ7vYrVahZ0R3TFNmMivEJAzszspYi9nEbFcYNylhligh65Np+tV2PGqaoCQSJOart1j8DmIQTX1PqTfICwiVKuA2GSsBlWSmYhBwmhY9maaIamKEAC1sJhpNm4YLiLCnXg5HA5936eU1qebQ6krDBEbAc4R1fFbyTEecxXMstvtzs/Px344HA7PPffciy++yERMLgQDEcOYQczuilp0C8AcNpwvWF80T3LnzCBS1RBuYOZSdI4nUHU5+JKQthyp86ioB58QC5ojrsehQlOybFTTjiu3bds0Tc657/u2bYdxHEsWkXbVJZY4OXKcADCoau8yN03Tl3GzWW+3W9PcpWa73X74wx9+/tm3lTyklC7O796/f//k5KQchiGPH/nQB7/lT/3Y2dnZZrVi5lCgHYbh0aNHmnPILW6321JKRCZVFZQcomb9rg8zl5MNOSNjLCUUHQtJyeVwtQUwlhwh1sRCRCxERIlYRBpJzOxQEWlTIyJmVEYfR408w6vdfhwLiELx4jD0BuImgWz0ceiHrl0LJwqkTsFhEcTAFjEz9TKvI/EiGnrCmm5TTZvj3J99EIucItx8p8vXerzUlHuw3DXnc2jBzaFwUiRiVCcLhdAkzEGNJA5wBSMCxWpLvm6SuK3atErSiLhnNiIiK5qEkVozG/vezBIjJe4HAK2j7bNeb0u7dmlOTk6SknCbVh2ED+Jg7xwoo47mTCYw5uTEcBQ3zzliWRyerBjwbu4+lG3JNvtNEnGk9xfoGN/dtsZSQFO92aBxgirLUc2KO6kXIgJFSmFY7YWJCavd9jD0LxNR3/fhiSD2Wf2SjvE9AXi9Pol1MiqmRFczV30Id5oiu0LEsS3M72jp/QmZ8mX12vi6VSemWsowjH0jEBhCojMlEamZilRGLTwMw5DHMYsDqPF/VZQy5jwwNUTOMknjzjsIQH7DhWRmpShZNrecM0FVNcwmBmgSQS0hu0I1AbIVscVwpcpRNyKGu2qp9TOn4DmIEaLIE4EiihPaBIwppLOJMTPrYiPH5Hah6vCqUQOX+RFmTDhVWFl40KoH2iN5vZYEQuA/J/BSJp9AsQsQKEzwBJ4sl4jXMbxesxJPIsp3zBALlhsThW5e3A3BQ9SO5zqHNTh5DO/M4+RxOvdb7ed9m2pCCFBifE2Gwlx8+3Y+4RweXFaWnzFDZT7TfITq1f5VN36Dn594zuIZY4rXXXX+a01Fhk0naw8r0AF60LwvZVf0ABuZjKKIfChPGbEZzLbb62ohF63xopI9l1xKLOLB5hIKUQBsdw+JQIk4kTSgxJycGW27iZWBHAJm5kZaEcm7nQgBsGxwNytFhzz2cCXylLhtU2pAiZEoJWkasRTqXDb9dwsEvhnaDP+qV5omcEhQQJ0cVmDFLZOpeyZXuMOU1EJfZJ4wBI+BHrLcFCoObk5U8aIZMXuEo5iIYKZmU3kwAjMNQz/Lp81uSSwoP5g2x/j1yPKLm5hs8lrq3Tw8wViWLRYhIp5DeXF1hktKFDFHUJJEBM0ZwJT+UY0GAZOZE8+QwCa1urCVZ+QatxsmyyxxGzUIZvPF3VNKIIssyRmnDaWKfdMUYA172ieN09ie4wQzY2qI0lQ9mFJiAhOJjhrQMCUe+sPZ6UkjfrpZOcvDh5cpJTAXN3PaXu+brrUZAquFuFv1v4tYLvGYIdJQTMPjTkCSlMtwsmrOzk4+/BOvr1bYQUFO7Mg2FRsV1yxJXJ0FDTdJmkePHjz99NOf+MQnOJuIUGoAqHkJc4JTfAUz56HPObddarg9HA4D0K5WOeeubV/6xIsvvP35Z+49fdjtG+HLy4dNu0qr9X63h6Rusy5DTnzDameu+M/MajQStRpdpWOFKVrmSoY8j8KwhpeDLwbA1E/VQnV3m5zf1VdBAFHVhBx9hvTjOM7vd6ngOo5j4MMwSZdCuOrGToF2lveGyaItsOvrLbkDdBiHYbTT09PD4SCJIrpy796z7n7//n0tfnZ2ti96/8HVK+XB3OFuVsoYQsDMTNJEaK7YCMCqzgO5JHNXBcBGrZlB0qDeV43ZJnQhsjs750Lu6moOpXAiiPCkq7SsBoHUVSNYTVVjsGlEN5iISGGUoJ6l8Wxb0BpMAKlPtdfI3X0sGdVSvQnYjuS6o1MgZh+maBIwjQcc/QhHD8KMFQ3LzOYZz88PtViknJnJfCpnP7vHIgffiV2Y3JlcYWByMgUMpuYuIBZKkpKkF56+t141p11K1pdxTzo6XDW3bQsnN633wE5RLEDuMmPfD7vD8PIrr/30i5en52ftaiNdB24OeyScehm0gJHEhbsTIjJ4LqZBqZwagMmJW2swEEyZHUbs7tAqYsJGIEhqxZ3UjaQaWA4oFIA5wSE1bzJQQMSealIIPMrp8W53IKJxDCq+M7OqBumDJnmVeZ0kon7IsVSmlIhnj6OFqBgRjSW7u8FLyQ6f0zXnlRlAlJ3Y7/exfQQcCmdcLgcWIvZHjx4k2jCKuuWccTgY6OTkpG3b1eZM1Tebk9jS3Gm/3wUWnbXNHBZuHSIJV4kWH4ZhIy1Qy97MUtIiCWARyjmvV9JIWq1WZsagqL0R+ZNwdy3E02IyBxJjM2MGuTh8CokHrFdXZi77fX3LTgCKlTmFnkV0sdHWSBkMBF5QvhMxmCBzSkXkS0d9b7eaWFI1io7z/Ti5FJGOACHMTpUq/e9TcuBkivoyAmNEE70TqAVAaZJJvW3jRng6aKUUUaAqVEPVNe5hHXK9q7qw3DKmp1U3YOdbzNGf343NMwBzrSoakbb6ZDQVUenKLqY6BhhzatSN828GEukJ0cV/ye0Nv8sMU+3PKX8XzFPRCJ/8MDMaJKijRMgdNqDvkTNsByYvh7HfOYYkEB809yUP7upFXeFmIb8MwBSH4RBc91JGVOq+nW5O+uEw9kPQASKBSxIZH6p7LKX1yersznl30kAITChjyVlH4+r3Ke4ldRw+P8uFOQEyDgeGuxYmbxMfDru0kpPmaUBzHog9HHOAOVwgjggQ6adoPPAxSrAHhyJSomxKuzGHurv4aF6oKhSYaTErwualQIupVleaOdTgqlpqSDAyGmKSTGbPDFs8dIGmLIyZwjNjq+rQ12rPYOLs8KTZuXyECtBkFio7XicJOchDrcLcwmMaYoYSPs8FPHDA5poHUVLFbogLscPMozC3Qs0sJeKJOygEkBOBYeRKvujssCPhQuLBigv9tDB/w8tqxzShOEWI3WppKV7UUQQQNbXmPh2GIQxZL8Wn3cpdw19FJOzcSHLNcGsb8ZJPz86evjh76f7O3ElEo14bMU3FHGm5S8em6+rOFqJvN7c8VY0AYaD8lFJIlzPDnVVVEjG1RCR0DBC7WjEz9b7vc9mklNrGSzEbh5TaNjUJpKpZXYTHcWyaZrXaAPvxMKhQ27aunPPAkvq+d7XXXn7lztn5y0kO+91m1TmzljGl5CylFJLgATgmOwHAVCCokoNuxa9vUq1uhoNq2kA1g2hq8zuakZ5OuEJEbIEobCqAtvRn+M2iKzMUiavlSSVVpurwk9jCZE5NplMdxvG8RFRl1iuZdL/rzy/O7l48JSJXV1fDMGhxd89N4+6uCJ15AQFi3AzqUAsTHEBlOgNSS5vYJLdbVTyrIuts4gMgNrhClCKG5UjinhikZNmsVs+uLCyPGE6Q07zOOHISF0x04EksqRY7Mq9h4xvNiQETnnNuZen4XMbojgcXL3GGB/XjhOVxTPBgOv3GFYhoHMdj9NiPjoaQ2qid6ZOTYoKNPjlcXUsibphNs7mTqZCvmvb09PT84nSz2bSGNjFD4SpCwg1BK4fRDHyUnXR3tyJysT3sd7scFcx3WR9eX4K3qe04NcW47w1KzOTEyW0oc8wNIikemiZq9MxjrApPDhInigTrGIxsUYiqipoE14An7jciDBXv2qb3wZELN4XVqUbswQtSXjhofOI9VszDN/mKWDL6jjSS5a/z+503gpiJvGBou3v44GKvWl6w7aTf9+NoRO163XWtlFIut9sX2nes1yepW2mAWNanDKv1mRtRopRaNypukTPZNI0ZRFhEAnj4UcW0xkjnO18uIJjdolpzbJqm5SQhth5LG5qUCOG1rVdwI3dSAxkkLS81dacvvmISQHLAHI3ATKZOOBZGnJasJZMj3unc4WRGdNuuYr/BHa2iBUfFMudK6phzDmnqg4juVZGY4+t2Zncjg88phcBR8ONx1cS4SIm1EjjavvV/05fV+6y1Kt5ih34qNyKq26YFXStiyPR4iBjADP8mEuUC+83nH2ODOIYH/3X5DZYj3GM/xVHuUE1hLORu8CY4MdWasPoc09MakJEP6Pd22Nk42PiQmUsZ8tibj57IbSzjyEHBViOFqnkukdXv3tkIIicBjAO7wej6+lpr0XkAFn4cUqJWm0baddedrk7ONu1ZizaBDLkvljNGIyMHgaFm5isXB4QhiZnZsquWXMamado2MlcSM4e2CSXywfGpCfx+jq16zRgElAkNKlzdi7sRq7iDFO7Q4lZgIwyx5wsBzDA1WEC82L9maXeKkO+i3u/MN1nu2rPHeWZuLj3vs6t9uXMtn2FpjS9/xSRWfgSI0/dVz/3sAzYzkBOIlIUYqNqUzCwgopt2g1aTLeaSu8MdVfc2UIXFt/pNUhkTqR5zC5cP4NAouj3fcBglfsPiPJoypZSIGq3XayKKOGeU0yAKXkz0fIETkZBR17RWRrLSiKr256fPtY08urwahjHcxOaEBkS1+nztRCGJjbmCkxRWNRFYhIhSRayWUkoCzei6bv3/Y+/PYmbLsvNA7Ftr7X1ORPzDHfLmVHN1sUiKpDiUBsoSZE00bNgNCrItG54eum0Y7Yd+MGCgXwz4yfaD+0nttgwD7QdLBmy4G5CBVmuwGm21TdGi1BIlTsoq1pBkkZWV4733//+IOOfstZYf1t47Tvw3syip1WKmVBuJm/FHnDhxzj57WMO3vm+7FeJUfcvibkSSpJo4MM/CBhXixIGCo3EcX3rppWfTU4aCZRy2nNJcbIEIe87js+WZLUVS3m02M3OATpU54vfLvLDbu2+/8/nPfuZTr7z8j7761Yur69lsmksaRnWei6Uhr+gWzhozV9Y7nIgQ3F3OpP3O0UErsFDvLnJEKuWUD1wBo7VtAmYWPycU5FjuTkG04Ah5T+uDuJ7fnd1P/OwxIFvFkZ+EpPs/ALCoMbMgLDticnUCeRqHJ6+89spLT4KSMaU0HZf9fr8UEDGIzYs1PRWnVALSTEo1G0wAmVnX+TEj7bZUt6Zc1jVFADkLnN2bI05kpoAUL72O4QTrhTeHF+41GQGwkRGRk1VawhDACT08ja+1Z9mupPve7cz1tC/uyacFAWfPt18GWk6jn7Aa5ataqX5wJBuppV/idQiOtsfarpMIRGWZYhFITGSu5G5LWYhMxyFdXO52m+1mHLbb7XY7bvIgDph6md2VybNIDWeFs+ROIGaOOJcbIFh0nstkBBJRrUVR87z4VKyKILLa0e0IhBNYce/FG7Vvi9XFPZKdHjFXl++0CIdBzRTSUEYednUnPW5lWKuuNqhQEKnU+urI/hGzKVpuuIIJazeKALXOp81fJVDymiCKURdLcnzRa0iSmOMYuDn4hDwJW2HtN6JN5HAXU0rh8O82Y855mqZSlpzGV15+ze/mm9vjB+/fXl7Rdnu5SZu58HFWs4poquFGD90+BqpMohn6WBKRpMzsUREeQdTab0y2Uiqq/Va0IT9b/1B1kvrmUYdFhRwb1Pteo6j8znXt6lnrdTPrJ6PgO6qiYCfoe5u6DsB1NX/rk/JgE1xNOW/2dM3grcCZEUpjq25xBc/2FZooqGsozuCudTSFYQigMjq2QVGDD6tJejodt9UzNtywjXILXsRPOMBRlXxuONrpbGjsI99vn9gWzOaxiFUrzumcGvR7e3fn3uBZMSG98NE/v9ZiM/f+rmGVCsqJPAmCjGphzgCqJDOBCY09yQgOWzDf6eGDZf9cp+MmzWQkOhc9iBZTWFlsmRaNBAm5E5nDwBqQldEVDnLVeZlUC2BF51JmSSziUTnPwQIAVToOw268TJcPxnw5YkPADF2O5ailqC7uLhRpDSKYjBs4QTJnAmc+lLBWJdE45pwzSxPOcZdEkwIJFeG4Xvp+F733/yLbenGvC361psIbVIc6CrzACtyBBWbuSjBYYVvcDG5aZjKHa8QIVVXnRVVBbTt2SAMudvxO31iJOTGb2VQWampPPU3yogd0zzDr//YD1njR9ZEpsRjIQKrqFkJnCicJh7BFYXseRTKlJAAKRRWWMLMVWhmIhNQ4wc2jCsq6OlO70MrS1jbs7ube4x1tVlF1qvpdlVIAI6rQtfW99Q4NmoQ4T0+PNj9kRZ1jDmCTK6ZIyIVpM25ef+Xlw/7m6dOn0zRJGtR0Ns8s8RBSzo6qFMkrz6cq5p3prVIQOYgIoKpLSmnIUnQONDAQqMhGqwNy1mk6MrkkSSJkiBLKy8vLV18OOQF3p6VoZhovtptxdzgcrq+vSymmixZLLMyY55l4A2eRrDwL0fG412X64hc//1u/9VuqRYulFqVee9oAunVu5wyxjvvJz3XPvzCZvA/T3kXcdeRWwFR3h7ToA7waMA1p7C0u0s+4ThL2gdQTxf0jRa3dSlTlUux8qLiTmTmRkJt7xTiDiPwwHT94/qyUknJ+uNk9f/782c1zzyMFuSEkjq8sHSyqi2nU5gXDa2DNTojrqqwIwLlUZVhtjly/kVOMo84OFjPjlHtWs1pn8SdFZk9BYgjcgcIN4euQoXqPVVRiZWlauxoCKpvovYdI1GrU1u982Dqy7tIwt9fH1wneUs20CndJSv31epx4tzXaadkBciFytyi8IyARpSQ58XbcXmy311cXu3GThNydrdiswzi6m8FQ9WG9+s7V5UBdaxRGZjCn282mOKW5FANy2gybMY/baV72x2mapmKzO6mqk4vIbvdK9wBp0ShVNTOm9EJ/9t5ujIvA2jCmoJ12Jm+Vle49+HZyCVB3DjTKj1jQQjSmW/N924hoXbD+1h5mXw8wtJm1vtr+4PqRfV3F+brtqwR+P75qwadkjqnMh9mv/ULyhh3Pnt/9xm++lYdPw7PIVnhze1du726Inx+PMAMpIrrS1mczK2jspvWHGmAbCuG49RPDjTVzkpqvTkRDSn0nOt2yu5eibj30WRlRvNV2tGHpISwUhSOtph81HrQa/0sxnJa16KjY6rAqogZCcgo9oOZAibCsn2hQfRWyqT6Vt1HDsRhXoCaZeCMGBrgFmuuYIbh1+hdHUMKws0eeeSWqG9BWrwnDXk9uNesRx1R8YESb7PTFUwv7/nslCb+PHf0XoFV7D9JiFn4aJ3RvPMRHtWoX6C7f2t/7OA6GFjCFGVIGYEwaNWPqSr6AMkxAGc0cRWxtxIBBpzLfluNNmZ6jLPN0B8BMy7JADWRWtIR6BKAA1RkNMof7osdlngMXOk/HorO7ljKbLxsaSITYiZFzrrD3K7q4urh+eCVXOwwCMiw6l6nYAnLOzInFSSgJMVFCyoE9hxAwYDYn0xoCS20XoNBSb2sXEQmq2dv8339BW783gjsMZBRASVeQEgqhwNV9RlQJlsW9wBymbsVNyRRutpTuDcZ2qW7qxn3DxWnD9VWNG1C3WwAQZjuzb9H2OGvi9X1P73vf+vj+wl/IOsa/qftdaIk4YEU6Wj2BujMycxYKAslg0AuiCyOe55lbHqh/t/92l1Nc3Yl0B88scH4uDMp5fVcAAlopzMSRPWBzo8D8mBOdSGt6bwIIhxCr5CeAUgrXMv0WwWV2IGr3l+MxJ7iLF718dLUd8q9/9c1pmoppIgIJUNyDc7XmalWjXAspwvQgdXYtDvVmEHhzd82sWFmWJbAH+/0+tBCbGGNdeYgosbAlYWzHvNsOXqY4yXa383fvxjw+uL7YbHYGOk56nJdlLjZuhmEEME3Hu/0NtIiIjHSzLySBqEzsCrPf+NabX/nKVz7zqde/8Ru/WbQMm4u9qjmnzWZRT324nF7UjFCLCtyfMyuD/sydYNQosjekGQV4unmANSsVDyLXTJHh9ATXjijO+zPnfM91iRdrOl00/zYmmJ5mdvtiHQhMxOrqMHKDgskAvP3u+0+fP98O291ulx3HZV6sl8BF6L/Ua2NSN60FbnBq6RWzWFXbr3Xb33w1aLuRF/+Lfuw9G9fPzLWWp32rUvpyHMXuXi1TFwfclWpBgwSVPgCYB6lEbN8ndxBBKnTmwDTH7IQxvecNrvv5n7T1xS6qzvrTiMxSFjoejxV61rZGA4h9SKJLCQ8hCW92w4OL3W6zudhtMksS4tB+BJIQM6kWDn0Cci0LKCTbgv6XCAKWWr7F7J4GnvKlSOa7PRb3vM2QPJdFGYvb4mbBXclKRCROEDMN4tBSipXiquZE0hwJQjXKoz9jfFvthYjrEqjS67qiKsvV57x22le+OYdnC3KGMwuhSuSJCPWFmCjqvuI5RtkhyAJJTBGo0ro88or9a+0r3ntnTdRpZusK8/VXuhtWaQ3MizqRjOPlbvtwu7kyTXd7e/vdG/WBacjD4L7dbR8xsxlKqR3EzGpuhpV/GoWfsURxKSVnIj5pPIgIUtK5om5i94kq3L43uVkxHTRELuS0bZzfBoMhab2D1B46V0GM2HhIBPaVs56GCO2qiOhD+XPbRPOoiq5KxGZGqCCLNcdvpD1P+vEGBFOak7uTtNK+WFaph+UIBI7MqtdRUb8S2GNu4837yANADUDeJRC9Ypi5lo+vUn8UndFYT1+80xfb91OFn9jGBHEsH5nBO2FB22RafagKGQgAAQAASURBVPcFthg6GzC/S+nBF1u/JhbANUp6fTkYirtO0zRuJpvTdLSL3SOkC11IUnKDSF3udV7KdFyOdz4fWJfpuEfs6VpNUDLhCBC5l1JMJ1UNrWx2zMu8lKkWV9uRXJmckpWi5EtPIRFpSkNKKV3li4uNbEckAhRaSpkqyx0LQaBAKTAGZ0jCcoAaPKE4QLbM8xL0e+wEDQQShWRrRJ+ZIMwCsIdg6mnKv/iw7Hf9Cf7nbN6MRD+7E3MYw0I6klCIjEidFMtM5u5Fi7opaYGphX6Uubn2naaGFFuJU0UGqUX81Zqi4BL0HHLupjWL2hsYh84zhGgbUDDwv2inrb2zeBEbXJLKPcSBf2uhVg8NeGakGgQyD5GlGiE8RQuFOOLTcXYBpRqbrhAXIjLjCJz3LZmZgwkgbn5ZnIhSSiRjv9BqU9sJWsa1XHLptDHhy3VXs/da95jjVzoJzUBU7605BiIS5lvdbR3DIJe7i+c3T7/71rvElwSBsFDMAAqZBvQcV03aeMiJmZmj5m46DC8uwLyQGRFtt9vg5QNQvGTPp2v2ap2N48jkOfM4jtYi+rvd7nOfHpkTOHHKOW2c5W5/vL29U8N77713ONwlkQeXD1QXs1IWJpm2w0CwIWU2gpXvvPVbtzdfev1Tr7719nenm8LMaKWo1PSm65hHxezVXg134xwideqBhhW8P6POh2nUfAbgxJqYIVA5sppT6UTkTMTBHHkGSMYKHu3n8Q8/izg0T5UAOo1Y4MwnJM5Eod+RoKG1aU48JJ7L4oRh8FnLfr+/2+9THqcyh+XWSfbbSS0qGe9Nv8QSiK2Vc+fuLu2Ldr5gmmst2wsnqDG4WzkVF531ZzcUKwqU2CMHdYpWnvzIyL/WFyssovOLS0a9EV+9XrXVjb/wldWD6A+oPvTzA85uxL2hax2GPjD6AGNmFlRhxt3mcrvbbobNkLbjOA5pzEJubAYoyBJxFhGhw6IMiFApZO7mGohfgAgSCDsDA+YkzD6SOpNb1oFGHmV7cXsoHzy9QUqHBepZmFzIuaiVYlbu7qJ62xpBK3NUetToRjw1rxGl6hBWi4dOU6bmexrHI51goqEBAzqtOeHbl+qDxBoIjx0h5ZP+qrc+DbxlQ5am5qoQMxebaRXCW0+0dTIwXvSiwf4THRlLp4QktZ92VZ3KktK4G8ecN2qplFKMiqZhuCQe5wnLIlrM98chH1UHUxiFRXJ2MYEUPSFC2waBxXTg1CidIy1JmspkrTbZrLEKk1nOmYTViqqaqoESUZKkqubOgBEBxuj930Z1XXnqXnPa2HHKoMLBSWo0p+6bq4AOahDYVpMCNWdd50CfoeZKLv0S6nhZAyvce6AnSkOpstJxu+Yeamqz2+uQa5m9LiT4Udk8dmhTpe9rCK11C5vfiPY6Brx+mNjAKTh4tkp/P1X4L0jjjxhFrfmLj/jcD6yH/W66EAY7kxY8jWxjUpADi+rBfWGhnDWl/WG/3N4exk1KSA4GJaqFAAQFqflcQjfCdR7SYBbExw1YoTB3VYcWtaWUxXwGjMmNiKkIlcSJeUk0K5agDhZ201lLkZSYhFwTYzOkzeWQNxtkAQDVSOullPIwgBKc4QZ1zAYuYCw6LbNmJFcaSOZlWUwRqqZtMU8pQcQrDTUzJ276rv/CN18tcVbzAApSIgc0aghBChTiQu4IuWAz0+LLHMyxVlQSN7nuiv0IBEqgmYAabQshOTQaGHVzaJD/U0PAma2rCsNUMK+u5skqjrZmWMTKMHix1iP2o5qWqYfTWV1W/73emJncAtkUdDJ9fc8tztp/NZqtJOPCSQurYqWMXGlziCjnnGRA9wbJjIpzdveoSGFm81IKcotSk+Ref+mtFpNaFtVWZDMxMQTiFPmGM0+Dia+vr3WehoEfP76+vty9/dZ3BRDJCIAmMZGom5qnBkatrAbBFRMGCjccheNeb5hZaowXwzAsyxKZxtlnLjH3KouVusOMYDCMQ460ctQQ5rR84Ytf+qEf+j2f/uznH7/0igzj0w9u3vruO7/yK7/y5m/+xptvvvnuu2/f3d4wfHEtpQzDZjOk5XAQAcFczdS++c1v/uAP/uCTJ0/283eL6iBpAUdfsffg95lDiEByhlfTknj1Ca5drdX4iUjDejiujFpHy2iH+Epg3dYMH4j3i/XH1518rFJ/wGnlwrmjEiek1VOwfjnUricCdc4gA1PkoQAvZmWZweSEaZlvbm4Ph0NKCY3Tonn7auauDndiakDoMAGj32pGjmp1UuTuWm7yLO1Tb6xXYniDDgaKsMvuxffqMVZe8OUiIRwKMdyeZH04BXdnXVBfmnekyjrI5HxmE69/Y1V7vDbyqBHxrxeB6l2c1yZFW6ebHG2PbFw7FAjaKBpMKWV2oe0wXl1fPLi63gwplmCGWTFxB3kiSI29zUWdOIPcNNQaDARvCRgQgVKtR7XA83LybMoD8jZDMS7zMB9pmkZbssKVoQ4yVS+lLKpLXg7uFVjRHQN2gGO9ZgAGpqjAhlBNm9ddYdUfRh6k5WCSqAl0d3VthlRFWNWq7DpIiElS5PqoTqL1YOrOXc65s4z2qQ2Ag7KTAzdBZiuylgCw16FychvXz26d6l8PEmoBS6ZkinlepmV0w83d9A9+8dfI7fGjizd/4xtvv/3WMFzf7aeb59Ptc5dh3O8PED4ep7rmaMcFmFW5rfiVuqozkRD3y4hCADMTEdf6ONZ1+RGX7GPSum9FK+VV9xr1Ayy4HUI+oqnLMkCnUszTrufu0ucRek/DT8tFa1TXiig61RVvsFmFeRtOy90pTi+GvkQ7wOh8s+FO2oqWJgheQHbiFI2H0z1QKDkbOfX1pLpnK2agOvasOXt9YsvqGK22Uy1iZK+sgy9wWLUue3Ep+H77ZLUTWVEtiY53+RR2PLVWffo7AkTPvMHfFc/QVlmtnuQ0mIINUOg8H28BvbgcMQzAcS7Pb+6eP3r0EnDBSECvy+DIp6WUkJKxkIFpUFvKglJsmbWUUjMWEkpJwU/FxIHa9oEWYQ1J1UKz+uIOeEnVbE7bPI7jkFLabTYXV1e4GjCOyAkoKAoRSSJEOk1sCxn5bDYbFiNVYFk25gQlsJOZd11cgIQzETlTSgnCDb4uxEyUzso+2073L17rqc+IT8doJ7hZYQq86AJf3BezAldaSqX+XmbThcOmcRUFuZPVbaiq0bvn1HIbkfnohlC1lojq+h4MLryuu+kwnJUn6GvDmIhyPiXYsNqAeq1HfNpX44S2exHAICYmqiWFcIeFSUghqivEaxBgb+yglNYUc97i+suyxFZ3co5fMC67t9lhS23TPKFgRWQYBmYuejqDqkoeuz8ZGM54PQzD8ViV3+IyatpN2eCqxU3XfXR3e7t9/HCaJvEqq/Xmm79hBciVvxUUdO0o7rSq+KzP56SpUCtK3Wpw4eS3kCdGZh6GgZlKmSMM013llLIQS2ftsyJCwzBs8ub6weVrr732uc997k/9yX/14eufweUVnDAbht3F6/TpH/Lf9yf+RHn//X/wD/7BX/vrf+UXfuFv394+320vRWS+ncPlZnYBSim77ebb3/72j/zIjzx69Oi7771/2B+G7cbBR9U0ZJQPQQZG/8dAr7wKZyMM6zHazWJyhPvULTBUo6pyRVRnoCZYQETUygi7IiLpqXCUVm09SPo765FZX/Dp/RMECxVQ5YCpR3FhrcmpySIQScTwSDixdD+2Zee1TUNqeWwLhUw0qzEuwFBOAz7kwLyzP93v5Pa6T/LoKOHg14ylqR0Z1PbObm69qwE0UG7iqP8LW7AakLFhRbM66eNH2bmpVtcWiYX21HDe+mLUoXq+Cq+slyQ6nYHohYnfa0FjCYgXiU9Q0j78cs4p88tPXmLBmIecQ5fPiSiRDMJaJrNSYG6cOIiIjMYBaDV+PcsdPSvM7gZ3NTINZU4YE5IgsdPtQZ8f7m6OMBsXZWNRh5s6iYNUSZUyTmtad0tMEYXWhKoJaXB2cVTJqFM8IgCHZIA4KSBBVEtUg0znhNEcDxqtSJWIpQE+UatKV95/Q2oQuYhEvC8cmT53+jO61/pzXD+saD033sV+0FKF/aH34SGShFiX6d133pvubuHzy493zPLapz79zjvvjOPzz3/uXxnGi29987fe/M3v/Pqvf9154pwCPeFG7h7g8L7atFEU85FyjjhhxYlEDWcpnmQwP61FaGGjw+GwLAsJ55yRs4Syl+kwDLEf39va6r2f82NhtbPeW45i3PYdjdq479dvq7IQYq6aFu7kLMJ+Vm9/uuEYtXVeQitEvD++yulPXt3UKmB4QnN0n7BGouJ6tK+P/Uh+IYMX6ZG2UCicqzB9VJfUazt9w09x6+/V/Ps+4b8oze/5hL9D+2jP4Xc1N7hq4ROuE54G8oCEWzne3j5zm0WuN1cEHOfp9m7/dF5uh+1Las4w9061J5RSzps8bEo5OBU1IYYI3Bg5MQ+xvJkVUHFnYA7TyaNaWWdXJUogJqhw8LkygAITwmbIF7ud5Ly7vMSDByh3oBlaoNO8HM09AHu2mJthcTFOnogzzE2VmVVcSMgJBrNIaSGROBNYGnFkZfMiIubU4Q+t/U7J4U94q7lBuFMwxwbLphEKbHFbzGe3YlZonoiITE21iU8wVUFaMHPIz5ZThthaojgyB9VVC9pFdPw9VSY6aTJFHSZqjZ8fDRfZd5B7d+Gr1usp1ls2EaXG8mhg14LZVESYxUHurkwl3hGUosdpEZHKmr7iIGXm4/FYlkqAY8Am5Uox55k916IqLQQyBaAiQomgKLYQUxrF3WedVEYhZjcmZ7JynMh1t9moluScKDHD3Y5TcXfidFf04cOHT997Nwmurrf7uxshk0Qiy9VlWtSZhSXPc16KOafNbmNmx+Meisvtxt3neQYsXbHxdPXkUkBy/fA333/nMD70wecF4/aqmLr7IHlaFjFnhoREMoGZFK5QZ5dERHtVL8ZOzDRYFHSZMRg6Fdg83co2b166em+6OwxQfgJSEWUq5DN0FqYhZVbdXlwPwzhPtLl8IBeP/+h//b/zkz/5k/zo0+ogghFKUhFiYnMTInrl0U/9zB//yp/6o//H/8Of/2v/0V/RUm7ee+9Tw82777+1HbY8bJ7dTtePX3nvg3c3mb717Td/30/93vnug1974xu2fy6yE5MhpWMw5Yeqh4sQk0hiaaqo6oRK/U8mIkzE1MgG1dws7ESrCFKvDAWtzWIk1cjQGEiMPERRLIKUR+HS4E6owGC4u2lcFjNzHlKUaxHBvJhVtFhp+mkNJNUcs85IxM0/ifMnM4CpFveSB00gnMGWvcjhdiFf5mMxpSTJm7sXuGGEDiWchM0dHsqcVEowx1Jpt24rNguqgY8zq4vq/VvEZRDJS6rHa4lM4CogBHd3VjmhrfwMJVjTQ1XQoR1i0hP1qKTGxi3WH+tOW0rUXInEzNxqij4cgFJKaYtAqBr2nDIFVLXFg1qsCW6GlbuyWmsdZ6Yk3H1xIsnqro6c83a7vbi42Gw2OeftMo3jKIncFtIyZGK4luO8KKwQEScRilJcYk6lLCQCYvOk5g4hNSvzOKQEJTPzUsrs7pxYUnrfL53FmPZUntNyJ8s8ugOlqHrkFStJj5gz2USZiCNbEs+ERCiRthU8nZwsJ6JpmdaPXZwgwixF5+olgt0r9sQdwuNp2YaBnMhq+je4PwUlMpxERDQM3YtTcjhq/R2a4qKQ0yn1hzyMaNgKuDHB3bTYvX2iw0FLmZmQcgJQyszMTCi2SErbzUbLBC9MnISHnA+HQ06J2cAKntUWLcfd1cMf/JEf/uN/4o8a2e3fPnzmi59//4MPxiv57Bdf+uZv/kpOW2Yu03Q4HHMeXXG730sezCwnkpRAUFf1Ep7PcR4NdnHpqj4fp13ebmR8Nk2EeS5TEprn48WDDb9/W2jGxrYPRhkd0Hm5G+ZMnMhKIqZigZYBJbh5MWRGyjxrFH+4xVSMwewEJFR/irw9OiKFdg8x8psQDo0ZVIJUsiD/DKwYUUgPOVBc3UBRA8qMqjXiqGlyAuClTqhaIWxeUKWDw3gQhNZbq0T1hGoKqDdAu4ecIAmRETyd5q8zDX7CSWmUDjrEoX6KHzFR5IIIKHHrNSVYRVMY6DkEbulcAsDUi07rXbQV4eQM2OodxqlI9d5hH/3O99s/s3avCCwwJFFDRUCtGaLcJMhg9KGOgUVyAy5whkuwkgBAJyZtEQkg6krPo5MfdjH/RTTRFFuWh8Q4AIDAWDAI4Lfsy6vzb3/7qz9/HI+br/yI/fZ7N7/5/PXHP3I5P8Z8nYdRYc4LwQhkGGn4rOw+W17eF9wsuM12l8swLDvMo6ua3QJPnZ/tD+9pmW1ym9TnO5ue6fK+Lbd7fSnq9BJopNFs73qgVIrNm3H78OHrDx5+ivKAVCB73P32QRgtZUIQosEXLiZqEBEbbLaD+cSymE/Fpic3L4Flhh8LbSRrSXKQK7kIe34Z2GC4dKTbJHeDPbsrg2AAZYBZWGFmJpUCI9n6Sd57+fFop+FJ/W9r9kpbrCoXAwAMDHfADbQwLcAEn4CZ04IygQxsKJOowb0shX02NdfAK1VlBqFkRQGYatU7AAlgRtO8NHOLAVg1vcFN6lkqHYUTUeIEnkgUBoe5h1nLUX8XBPsB24mtxt0j/+xeKQaI406dpQVYK96kIm6CCq+iH02wTgASV8pvqey28b7VEoRaXG5RQ5gyd1xKKGIxQ4QSUtiRqZRKHJ4aB503fvyVmZjcRUyXMpdpk2TMAne3ksgERQAWojEx2XFatBxef/3VZ8+ejYNc7DbT8WCmY8rTfBjHkcIQCuxlW7pCeYmIEnEXN2dykW3O2Zby+KWHKbOqbsfN4XDYXWzgvGgxM2YhETMbhgEcTIMggnhZVNyd2IFWF+fsQYGj7hJE+YmwMPNms3Fv5Z4eg8CIncAiECZmhmK/P+S0efzKE07yp3/2z/zkT30lXV8HdCh8kZwELbg7z/NmGKfDcRzS/+zf/De/+LnP/zt/7s9dXFzI/P6jlx7fHpY0DJuN3t7ellLybvfd77zlP/FjDx48SCkVYjBlEivqrRyFHcTeDUFrJToknOCLqVOQxOgJitbTdKsZ6OcSxYRTmhurtGGjvvASOfOekVgppzU7CwBKKVXqjarYm60oLmxFV9NjFgAUp4rHNg+pH1+vsH0lLrJrbXfs8TqsglVB4+mWz1/3s63fWZ/txW/1P9c/5y0jtz54fdoXr//FKNG92r/ax3zqgfVX7p2nX8z6jl68gBdv5J8iFdB/UVUjXxQCBnnITpyM4SUYNBOTcTZVoiQEhxRQ+J/MDA4ecDanomZmAnIDLVpYhZmZWHLEQBa1xXyZj/NiR9VlcXUzx+IW/CPulfEYH1YQc++u1y/ujfb4iFt1bu+ie4/1fKRVapN4HGYW8647bO0xGVGs1kQt9tErQckVdCoX9BVohBsbc/zZ4xfreApCKbSTea4UkDq9aqzxsW6klFhImKBIQ77YJRjttsNulMPdMyF7eH1xuHtejgdhjEN69ODhNFd8h6qJpKBxil0tkBrn+wWP4+h+AJBzzjnHsjCOo05RdCqccnGLK1yWcjz6NE29TkG4pqb9hPNsU9KbPiQa7HzdG3Qa1f0J3psm3kAK1FC48WnwuriHysd9Aja3YPU7M6yJyPhsYvax1N9ez7KG74rK1dNzjKsx0OnZxTIZqyVWScU4TyCsoXYq94oLk9pFHzENmgr52TWfrwyOj/ryh7cP9QW+7w1+IlpULnwSHtZ56ivmBQMyANOM5YjlcNjvv/vOu2+8+/WX3vzaHVjx8Ief/AguBySEmCCaaDtLsgIrzikJsmGz4Yxhi7zDZgsHsAddQR7t/BWYYQZm8+Vg07Myf2DL7e1xJvNkSHBRczsIT5IXiD19ti90xPWCbQItkAKxserBLpUNzpnAcB4kMSU3KiWEszKIzBOogCIqCSJNrMQLKO0uhsLg3bgkFUlAgrNWFNG6fz7ExvgEtJXp9zt6rUxRveJBOuheSBdwgS4gwNXmxczgOs/zhiOaVmNqnSo7mJzNtHpYqJuvyGlStL3+zGTyCrioZn9scG7oO3gcZmYn+JufQKQ9o1iTAlT37l6qc4/sN1kjFBURS4bCxU8p0UQn3tK2A0V9jjdTsjoMOefgVAUgdZNlESnWvYU6lppBU8wMXbGwRqORBEkIEJfEZmU5iNtmM263IzNSYpHs7ocZd6T7w/T2b7354NF13o3Pnz8jwnYYD8e7auhwigo1uMLA5MKVIDngU8zMIUXOMqYsiQr4lVde2d/eLdM8DHmajuM4ujvNNVMqIup+cgjdvHWGuzoTmTKTKczZwO7OQmRsVjhxKBheX18T0fF4FIGA3NS8iHtV4CJhSiBsLi7cnUj+8B/5Y//qf/vPYsgwVzcRbmR3cHgpJTFHPnbcbKb97ZDyf+1nf/Zit/vf/q//N3NR4nR7+wzzwpIBbLfbYRjeeeedN9988wtf+MKvvfH1m7vi5FA1FBIij5LXwAmrFTgFrsmi2MiJuCocNBuXnZwNoPa4HaBGE+kVGhdz8MzwxaoC0N2LG9mZDRHJkHq8BOrpDHkVIzJGbASrYsivx23MHI2oyrk7FxMmSo/CQI81LgIvDoTf7gQSDm6YdUaeG8qxs5uuby1WgvX0Wbt29xaCdfPzZmuyn9YztX/ozB2953KsX/sKOHfvK0Qf8sXVp/XNNYPIvZ87uTTm93rgo+7xe7feXWY2z3Pv3lnLMAwi7K7C2AyShNx1tx0T0ZBzJhYQwEKcJGk5OrmDjEWjEIxBnOeiFAxYzMzJoaWUMulB5+M076d5UjMSRVK1EpJQzSHkVsfXHYjaDytruvb26mnWIyXXldNPOD2H9jP0URShq95tax+A2FNbirnhQk96OmhlZZG5AQByDywTcIKYAuY9+IJVsGDt2PR7sRXZ5trh6Wfrf65hLeyAmhBvt9uXn1wOUj796uOXH18f7p4OKK8+urp99n5ZdCQkkU+9+tJvvvWeG+Wc42SmEJEoYejQeveqKR8/Mc82z3P84t3d3d3dXaLL8HxUlcWL6fr6V53VxpiqnHzCmLVO5jCrKhPNRayoOIottvpR51OpnrYHboI5qCMUVkOlnTbCQ60PzRzuvOJbDqwBxx3hhWgaCISIha1na9Vx5b4iVcynnxikDCBvwmmR8LF7VX9RXI32OQAKNDZeWCrOvxeht1oVHqkdq9d7fpi3TaE3rjxP/a8PbZ8E7+Jf+rbaUD4hbsMpcQRUyRZQpIgyISWAn93e7YtdPXryyqef7J48nvTyyesvg931sJI7LqAEj5pvgiembQYbFvYNdAsP9fpLEAED6ALC2CaMTKqieynPYYft5hmKRTE4oPA74BYyledvPz08m2aldHh0uTGo05ySYRKYkgNQUKmSiQ7QABpgnMFQhhKUVQV2NFeSLMTAETwRz2YRdJUkGxIBCSiBN5KU6I44gRPCGww0U9/KPv4P+czh7yubn71D3GNZoS9CVNwXwmI6u87uM6miFCFFBddUVljYmZEZBgyZ953LV7FFVe1aa+utFiuOjDWyDDViiG4Q9mP6twD4Kli9suuMiKwl/E55BT87Mp3m60cbbN3QITfq0UQiIpJepwLSlWRc3xqZqXOgRf9WT7V+ytJ4zyFM5AN5ghI7i+g0kS8PH1x84TOf/tSnXr+63D158uThw4ckfPP87q133v7ggw/efPv5G2+8cTiUi2EgScd5LqVcXl4WQySyvSjIYCYxQestRSHhbEReFIIFVhZcX19dbLfvfPe7psWNUouXO5RrSNjCGMycgKYd0FgNiIgzZ7Cyz8VdFSZBLGilgJNZScSPHj1KKZViKdUOMTWPn3IGwZ2EeZABnJjSn/wTfwr5Yrm7TTnLJjBa1fF2QIiEhQAXglmWtByn4TL/0Z/5mW9+85t/6d/73++P0+MnL+3n+Tgt4zi6LdM0EdEv/dKv/I/++/+DL33pS//wV95w0yxcrKCSkYi7w+BqxRVASsmdO0cCzDQA5d0EISNuHqD3Z3xKpQSCcR2OWdu4tfdaNtFbxJ0hBTVqECZRnWOQiK03K9aoMpGgOaQ9Zk9GpBbFXNyzjDE1aZVwM6qAK6q2O7NDA6Ld1VnahPEXEl8fOrGp/9fA2fHl0zHe0ZWIiVCDPH3h6stB16VZ/Vy/mGhrk3Td+lXdW0TaNeNDWxjAa9/MV+nN1VpzCv/fu/31Rx/aPvRq+zu8qlSMd+4WnWwBzK0AnlNkAU2e3zFjTDkH1sJ5yDnnvOEiIu7qakxMwsTsrjF4jFydvZiqLYvOqk+P+2mZj3MpDnA2puJu5nbiRYC7C+CwMHtXJv79PDBeeIeIJBL8zZkM3Vfm7q+hS33EIGy9YidsQE+fMzGTUJWNJSJHjY80b6WR3FR+yZhTJFWLsItS+PoJ9n2r/1L/c02u29fwSAZ2adnI1dciBzUmZKar3fDyo+shLY8f7K52wmUvmF56uF3uPiBk0aMWPL68+E28F3GHlJI7DQMBbKD9fn82ts+noZmF7MeyLMfjcZM3RIOpTdMkmCOmw8w5i5y2JuoD+55DQr36zsz0NJ6pQ6/p1DNUtUSd2MPx693iq5m7zrzVzjQHoF5rRfqMEmKvZlzVUAXATY4C51xlfYyFfu86uFYXhNRZq2K+S/iEXJcjRtBqoa2iyDECK8CTzAGCMNwaTSiRwCvivbPR9FLDGHd16XC0gsN2EWehq7paOxzQtY9HK23EF9r3XcFPQFut6ufP63eAVnwMWisKoMo4GoVgBWUCKcqiKT148urVxSuPnjxcBr7Mj/nhQwwpBJ8AM5REDltghDDs1MEOTowEHyOWDw+raGvO6oOzkG+EmAUQQz7C5yl924XEOLMAE/SZ6Qfqz5eLwR8mUuaXPu8PHoBm98NC86CALbAD4Ug0EZYqoV4OYAUGcIJnFKC4aJAjGIsbFncFTZwmnxdDBmWnESTOobK4ZXbmSUQQagMdu15RqtZd6OjAf/6P7nduZ5BmW/2Pzt5vBoj7RG6Aks+OApvdFtOFaUFZnJ3YKWCSTMWhLTTp7tTX4RbKXFta9ZNzPe21d9e+t7bTPEmGl755mVVGABFBI6dYY/K6WXj6xeBT6MZqMzqoOoQraFbFqjIRUUrJoR38UwGi57bdyRtktpYaYj/r28TILEYQEZFGMUrIKatIKQtztfaEQcSshYmszIvOF2N6+dXXPv+5T3/x85+9vtoxnEiXw/vDMLz0YPuZ176cc/5gb1//xg/+/N/+hV/96tcePHopX2yfPn1KeMCMlLIWL3BXY1dmFg87EjWZaSUoLuFczETkyUsvaZlvb54R+eGw3w6jWmBwFeAgAlYtrORZwpR3CtpVg4MpEg5cmLTMi1odGFoIxhAAIvLgwaMhjzViTU7NyTQjgxvIYMJ8e3v76KVXv/zlH/zyD/2oH5a8u4IQCGYgoDkOLsQELMsixHDnYRjycPf++xePH//sz/7s3/kb/+Hf/Xt//+Hjl9JS7t56SxhlWTLZbrd755333nnnnT/0h/7Qr3/9W8/389X11c3tIc5BgWYiqkTs7kKk1Qj2KE9KVfOauxWC+vixljGo/2/bADcJ2zX2wJvQPMkJoVqnZgvfnE0bOmUz+oRhZoXbchYp90gMKtYGUyDEPPglV25iGHzkoWJuUVdtDgEhaIbNIHRvAvd/1++cXn/E9nfP4O6v+wR+8Zz9nXP/yj/0Gu75aetGq0hV+/UPcdju2Z2nZe7cw7l3/hd/7nt4g/duf/3mvY/6SXgYwexQDdlH92QEIi9Ojrt5Il9iKUspjSlv0xSJ3yHl3W4HSWykSiIDYG5e5rJE09ndnx+hBnWoUw/kGJxZOg65dXqMS+A8mtadDV5lv0+3sF4eqeFVGgFVGMx96LKDpUcZmJoZXSWBAQYxKmaDwxM8daYHyQyq40eR8+/6QBEyrKS5kaiJPcbN3Lp3hxZHrBMtYiZNax2IcgWIZGaOitNSTLWSlJKTkAvxIDwmTgLofjk+216NbIdN0rvbiSjPh8M861iZP+fwOc3AzOM4GmhZlmCPqUOxCa8HgJ9lMjMS3mw24zgmTlqqa5qZ634JENE8z/M8i4iqwgwhqwteb21RRxtuzTr56YHFYaBnU4lqNq+51lZKjd20MVwxOOgszXWQ1LGhHqWAZk1AlbhqUnnQBMFXYNHU2H1XuhfxvjTw6Wm8uXu7TKo7c/2U3F3AKzGeymjqa10sWIuuGUEYHIWI7iAS97AqPso9s5b7q64mgDX5REtUenS412NO4Q++f+bv+4GfmPaCN0hw7tWkH+tWRZ+aOACUYQSHLoHfgCTebH28fD7dTm8/LWnz8quvXeQHwFicQW7wVNN/E3QGDwDgBXoEAbQDUKvVvDqGSuy0NSQnLAAZxJlpJ7QhfEEkk4iDCQvklvCe4C7BX/ncl4HNxeYhIUc+ULHgYoYtYkf4HewWeuPLrdmxzHcMyhzUOAqPsi7At/CFJTkVFKOsMoJ8GbeDCsnAygRnGJtJMWHJIhmSgJ4WXIWzP4pW+GPTvJeF1j9p/SdCRiI+AQAlPwKAL/BCvhAWQmEyqKotDpACVoI0n2ClSR7E0tY9ww9tRHRa9ldvYmW5rW2he4dxVd0zd2dKDYwm632hV1FFMLNLXwTXJhH5CriJEKb3FomM0gh2AZNDuQFgzYuZuZZ10Uj3BolOnBVSqyhrdJGa5SEggUsVTDJ3ZmYYnNkaFkjNmZmKbi4ukJk5f+a1J1/+gS++/Og6k9689908yCZnzlnLNC0HLGPJ+eHVSz/7X/3jP/6jP/B//w/+0j/81a/OzhcXF9M0jbtdTiOTuip8gZMACSi1OhPkEBFXI3JmEsbDB1ePHz98+v67y3TIwkdXSVFS78qMCi8stRbanAIQ7IgcPYEIJhyRcmcQuUbFIlyTMJFLSqP4g8ur7WbDnMoCpkXclBCZ1BoQcHK36+trMv9XPv+F/PAhGK4ggTtUNeeWulRjEZjnlKf9ftztoKUcjhePHwN4+NJLf/rP/Dd/6dfeuLu7M9Q0glkJFfhxzD//8z//r/1r/+PXP/Xqzde+OQhIJ9hA8ADSOsdgqD/FrcwkkQhVr6zHUiIB4S0eTZA+MbD2f6yN7+7erMIY7HDmEzCsD9V12LuhxWhVBEVJmCrdzXraKGqV7mmStAm2BmF68wmBKAYPa8jZK3A0uKHshZm9dpOirZN4vkJWrWfp93CB1myN6yPX76//5RWo+6PO2fuqTsxzgB8zt1XyRc/WA662fhzrW177h+0rZ2mxe+f8qHbvcce31umO/u9C1vm548cUJB5Bq+gHOLGZKeeZE82HIOy6kGErg8q4mKmrzYu7q+oxXISyxPkPsxMJmIzYnUJayCJIfLrfMy7loEu41+TDlnhUKH7j8AiTva3FLYdD0vk2CJQYqNyPQJV7rYVvoUpPDhg7kzkRWFaJvvh1dmAtVt9y7HVanWoIe5AFNUFX2/ohxqzpc8dbxLGXHpzmcl1tArkYJdzOpIRFcBSw6Z4UKNOk83I4Lou7c845avyIOMa8iLh5ZCBPV9h6V1XzUENIKaXtdjtuN6ypLBrg1c0m7ZcpZxg88paZ5cSH6Os5WvNap9fuiI3TAz5q7sZO1gNS1a+GgH01aImIqXJ+3uvDWL6a70ciLbDoHjiIAFFwqi4vqo3arK9RUJdK90Y1jLqf0726vLM1gVpdIdDQmy7xBFe4iUa23Lu5OZBVWIK56uiQ1V9rJSiVyNQAqrw5Mbpq3T6auXhy+cKxjM7+UJ8QwFrc4vvt49/Ot6HIFzE+NOL4sWxaYxaMphAc3E0QggPTtD8ceLh69OrnhJfdJqftqxdXT5Bech8XE2JWm4gL6TTdvE/HZTNkpAQokkEydAYSLAMDCIioD8whjtEwxBzzwFoRGGMx8YWJIDQSO4sxdga72gyMnWM8FmVOzBlakGaEFgKOsAP0hvKN6J7kxvW4lAP74lpgniRm84XqYolMJ9OJkgybpK4yupOBowDBQcEDlJgTSwqFekWDtZmdM0V8nFuPTDUusLMFJ/DzvvIMJ3ioC0autRAZQQM+52pqXpYlDEUtpS/mfcDzCV9ZwaQArAXv1sUL0fy84dwAtmAz6AC6lhZ60Txrx/eKPEfbRbr1636GDEIwpa18PFYYkTP5skTBkLuKknsjKs2D5FagElgYqrw4JuQWqthAEoqyCC8aTHsAQAaquSHXYu3i4e5RqWYuIgwfh/H6YvPpT3/68cNrLcvtzdNXnjxkMrd5no+l8GBDokVoZNt869d/6Ud/4qf+V//Lf+vP/fl/7y//v/6Ty92DqTBBRDKRGC/GSrBELARlZ6ZkwiBhtgheMZj59Vdf3o756++951qY83YcyE2E3V0CKOUWR4qQMMxInBrJhBmBiNkhoRoFgxnFfDLLkghIzMMgFxcXu+3lOGzdb0mVo/KXTtFTImLC5cXWnD/44IPn3/6OXF1pGt5/9vzu7p1SypMnTy4uLt5///3j/k5EDofDT/ze33s8Ht9+67uAPXn0OLkv+33e7f74z/zM//Mv/+U33nhjv99fXV24KekCLeo+5M03vvGtr3/96z/xe3/8u2+9M+1vRiHttnZL6tEqDMzc+G09MEOUkqyG4ApJWJ1BXo9OxNrRk3urwd+tfzKvQbo2voEqzlHPsIq7n+ZMmwlr8gbgZB71IlpvHuN6Hnqlcmj3K0zaqJ3imKZ+YW0W9Tvt82rtvfTXteb2fNKuj/yoF2vzvffG2se7d7YXm5/nCdfGer/ae7ewvgx3B2riYo0Rxbo+6kMykB9+Md/jIvuL9R2tK9bWRZtETgRKhCagEToN+/0eABMlGYjIOYRBMEh2MEQgQyE5qutSlsWmaVqCLtXUzDQ2Z3cLvn5nDwbGE0diH+R1FySiaia/4PFWcq3VfdUFc11Aa+19qjfF1cWNld6JyVosw1e7FjMInjgw7CQgCsrwU/gFqDm/cNKqv0SNM8ZbVWp34dZDYu3s1RL25uDdS8v3uSkipURtp0SqPw5WVTNh5kRRya1ClhJvtrnoUchmU5BOh8mMkqRltpwzM0c1dcjTYzWSW+8GZYkDqvCIkVW+GaCUQlZ6VwRylVNQ71DOOaVkwYtmxhZIiFV4yKqD6O4wI1VQF1+pj10CLGMWrk6kBKOImaViEABQuJQIRoLTEgGAHK7W1Q5jeghWe3PEuVYjq/KS29ncT+F1r6ePneaaE0pZ2rONUdlqOjgF1hUA3E73BjJaL2J+Uqro48vdG+ClXaCtfEIATgx4SKoaKiy5nKlTEOAcHUiV+YZOp/p++8S3kzfY4IXB3/ixzhM6gBMruq1LXPR4/K1vf3uebl9/7ZVXH30RpHDD9Ai8gV8vLpLIgWkpd8f30vIch2e6/+DZdEzi292w3WWaBmxfhjN4AAQsgUZI4AkEbBNGwigUSoYFsIR94i2GK/gAL2q3wC141smSAHlHjo0nwgBDolwou0MYTIE7VfgRfmTssTyT+dZsDz2UsldYFmLboRwxgPSOeJ95xGw2HjLL0QwJcCXy4A9mbxsi3XOlUNdk+sQIUNTLbZyiBFT8AqF5gwY4fIKp20Km8OJQsgVeInUMc1hBWQyAs6uycC//A6oSVP3FkwkBb9Zmz9StD6BKU38yh04f1ThknKR5iYru6MZh7RrQfxrwbgysI7/dqok/q1HFUewX5snK1GtuKLuaM7t7ahDBeL/qmwWksL3DqyYcziHlxG6JiFiECF6hNwQWFw9Odma+2G51mVV1d3kxjuPd3R10GRJ/8MH7SSgPlFJKSQBXLfv9nZBdXT/6rTe//pN/4I/8W/+L//nN3fEv/7W/+anPf2l/XIgoEZfoBbgQEkOJhQXiRJCQsIjAgOuDBw9KmZ8//cDdzXV3sT0ej8xQtYrm9ULu7B6Cc0lI61OpUWYGwY0cQsQOcVOQwJyJYGQuSRLTMAybzWaz2RKDdGYCCdfcqjuDhLFJdPPBe69/6rPf+vWv/l/+wv/593zlp//wn/yZ337ru48eJGa+vLx8cH113B90mYnISvmFX/gFInrnu2+/8sqT995779UnL2+326/94i9eJR/G7Wazm+c5JynTcbjc6TyZ6jJNnNLf+3t/77/1Z//sL/7iL77xxhuPHj+ZtNLUenXAOMpOUkogxAMF4AXQxYE8XkWKIAZCjAs6s4bDOq2+TWquhSHEnevPJBY0F9HMoluJSFdejTWmU6xMlvhUQHAU95ySN+y1u0fKsXtWZuZmvuJmxKp+11cBfmPyYmbWHGAjImEO+mA0NOCLzkCfPn0yn0+8swTdeo3op10vBOsz3PMG11/s/fCh19MbtQzkvQuIu1t/t3/aoeDdZ7h3wf2S6uvzy/YPdxpXfQVFjZT6qjKdzBWRAKNIHlTxz6IFKyhm7+qcR/eTNoaZqbq6z+7mMMfRDMcJWCIj6O6llLksIEkpQ6qEoKQYZlA3ogR0VpG4bT2rG2w5kFOOqXp9p8dRxxWojvmVe9mSJAywgOK0xKEnUX2ziMsxmkw5MbUcU2QIa/fUgFsth7NqZxM1Hup5nvsg6ews/RhfbVoVr6EaJXmBOYkEnYjoYutYDDWh10jDppQ6NhtAKSXJjtgNWMynpcDmICN1FBEBFiI6TAvxOOSN6hKBTBYICzMb27on1wUVAOCccyZSbfGaUso0TYJEnh2+LMusUFUiEZFhkHme5nm2RtdG4kxMp3QfIjQf6xZ5ldYgOT1fnAhg9B4ewd2hRsxrT652ekpnichYM3uNbpyTiODBQNe/266rNivqFZdympKJ2c109RCpBSDsdF8xx9t1RikE7q82KTXmrkZL02+OYACHbRhqhADdN/+6cVV/3Hs8Jfq24//J4a3oyCubF7cY3onS4/vtk9liEHW3gT8qUPhxa9awnE09xeAGsE6TbHavvvap6bi/eHAFIegMYQyfinG6tDgGEV1fbLeiULMPnr//9rs672V3RXwBpfnuhkJYRthJSFg5GyeFUNqCNwzxoktZSAuZ0eY9psfAE+BKl/3++PY8v+t2vNhcAZd59wTDNZFAL0CXpFJGOKFUX4cZTJSZrgiG8SWME2MGJil7tcVhsCzzLW8Au/HpWdZDRsbmBmxpKUtOy2KcAFL4UctRg/DJT0mCXtcOGH3s56y1fDWDW2i3rdY1k+EgA7T6hH6AKrQUW8iUYO7FXUUCbRnpCWKQsKiAGGF21mBuzxC0hbfD3xQwqiUAeMEgXFt93WIMFEwzfavwoKpqqbKEZ3dq7u6R0Go7/1kWJL4efKcd1prMSkqDu+s8L8ti8I7MqS/IAE6JRQZ3d6v7vau5WzCKRrXMMAy6lNizl2WBuYhY0f1+f3l5CaCUJeccVRZpkFKl4W0QhgwRaZ4WHVIed+PDhw+HYbi9eQ7M28zbTSpWRJOLHw5HB0KXbH/7tJSynZbf+va3Xnr1C//Gv/E//Xu/9I/KMm2GIXK1AprNMnN0zbgZKyEbAuEDEYGVz37u81/8/Od+7ud+LmW5vLpYjtMyHW2ZF+zHcby+uliWZZnVgTwInLwouSdisGn45TAmCEiX2R3jkJh2YXoyC5luxs3ASCl94XOfJ8lEst1uuSQIL0UVzknYwLCBmWzJQsv++ZPHP/A//O/9dx9+5gs64Kd/+kflZFTgtddefe21V+Jh/gf//r//yiuv/Df+9J+GLhBBKe+//74R/ub/9+eur6+J/PrqskxHE3jRybQAeXchkr7+zW8eDoef/PHf+/T995ZlsWXZ7XallGG7MfPDNA3DME1TMyPhhZhZ2ENMb7rbj+NIIHMfx1FE5nkuZZnnMo5jTilk+iJwsiwFftKzXlQ9kqiAlQjwu7uTg5q7JR4SVydXCgAcQtwUVywsV2ZOIq3DuSt1WigcyllW0M2KqpmN40ghbdg8xojNiIgzsZ+Q3NSZb0LBQhUr7y4cjDiMGxVKSqmc0/cTnQjf16bYPe+urxF+buStHb+2cHzIkTiZkvezi/1UqQndeK3Xsp4+iluI53UvmNRfr9/vfxJRQIXXtnuDDp6y3+sXrtpXyXt3188QrWalbAEixBJBPYJLQBYBlHJ6CnGAEhtBzTGp2kJEy7IsS2EWAxsnrzBO9hr+dBIh5yr/E3RXIu1iGBFUrL/u0iBt1QRfVQB2o9xbGi2yXu6OxrwrqP3jUKq5JVmBSCEjoKaqqoG1Dw+SXIutQnIAEjExzcvEzMwpBn8pVorFXafkKaF5BVF+IKVozkOMWFUNcp1Sitk8TVNgLIdhYEoEgbNIHRtElHMWkXAawxs8HA697LyOWNlAivniSCxZDcWU02ClHEs5zuXZ3SGPm2nGfl4W8/1+767MqQ1RM60jR1X7uGtXq1OZGNhebPuQ/s53vvP6K1/c5K0ibqR2u5nd7I8vzebuiSXnPGy3GDaYl3meh2ETY8yD4SclGLQUgN0tlKCrPFVVBDWOwypf60mKQ0QA8hDFkRpysmDZCZYyqrnaeZ4hFSJm8xzfJUngE/2Au9ewSPCusWEFIY7pEQd3lLKvtvyUUnienRbL/LQMEpy45jOjvMQbQbmvsE/ORBBU9ohIPjuIGUnvEfQBHkZ1BWhE6k/Dh4wZCcC7wxCSjs2NrJOq+rA9efj99olskRxHyzCHF9HKj6kFsT6syv3j4T1GbApOQJFhY/MEzpuLB84DgSEb81gXoIyUKqh6HEQMIEYypv1umGc/6jRjs8Cd0yhCxHBVc4MmThvi8b33boZxN47bTPCiNE/sJqAPbr9xufmcyD7JI8kk8h7RW7sts+6zHFGOoA3GS+ASfg0bASYkIBOyg0tI5yAYFrdwdiSiXUpPpE7YhTczsCTced5i/z4uHJwglpbDcjyaFaAAC5IXPZimzfYiPHwJAiqEytpH0iV8zBq1f1uRkMG9ruKd4AJWzItZSeXgpZhZEkIKbWaFlXmahQgOuEkwdGkRorIUwsl8RVgCHpgmRMC075idjjvCr7FjRlDbV1mNHnZk5thMVN1sqbg5o9jgRHJ8Gh5HlJV1HcGGxSAAqrosS2zftipHcvdkZmYlKv6CJWSFavWuJBHNzHSZmeCd/c8leEPdYaWUsrj7kHMSlsREkBRLvxO8U6U1ZJElIUjuCRpVZcew2YybjTnNxSiJ0ODkx2kRds5wo8hGHZcyq+k0XT3gp8/vnh2W1z77/Ad+9Cf/zJ/+2b/wf/33Hz+5JGFdakllNdndtykhopdeo7zsUFjO+enTp1VjoGioYuScFbYs07IEUwWYkqqGDHq30Q0O00qnapFbA7uzWyKAKAlfXF8k4XI4zIfjf/wf/ydffePXiGgYNg4QswiBRYac3Afxh5vRp8Ojh1fXD652yR5++mUM0KKgdWlSXXHj5ZMnTzabDQjmhmIAHr/6yu97+OCn/0t/8Of+yn/0q7/ySzod0pjvnt0ty3K5u3j6/Fka8rAZ97d3P/dzP/cn//h/+Vvf+PpvfvvNiyQ5C1yFkVIW4c1mu4y5LE3AJJFQHe6qBaAY5cacWeAQELEM22CA4NRoEp04gawpqAQuoiYAmczMmqiDNkCaAzyIu3O1t6p6RPgLbgAJBwVey7dwYO4aeYZ1J9BqptJOwut1YKxfnFw17XQ2Zwn97qWs3bz+xX5w/0rmOuexkpepbmFboeLWsEIV9p+j9hNRIdnzBdT+by/upqtE4vo2X9x3X/xWv6OVV3nuSK+8zXXzUybwQ9zae/28PlW/sHuX1wXx7h3QRC8bOM0AUpxsiD5OWlU4B5sjFneK/mSmlABxMqYmrBQzi+C6xFHtrt2h5hbP534X+hl7JPspIhhfZxDxaajcA5MAqLlusuraxSgmoAI8qr6BiEj1RSuuzw2M00Nlh5GFV9aDFHTOHtS3FiZhZklZROKCI2saq1x413V7SGkYhp50JQqBxzPVyjgytrc+TtBiAYuZMdixOIo5OYpRcR/HC0q5YJrUtJRiAialFvJUN+6pe5hrhZt0r2aFIo5cn7otyxIxVFUtog4zuJktphVa2iJEcVO2LD4ViAy7nS96GpAN1i4ikNwzexSKEB4xBCN3slDb6QEaFck11xfToYF+Tz1T0SQtj8zc4ZrcfEtYldhps4gQDiGx2RmUKJLOYOqyE+vZZwDxKUBT0QfucMY659li2OSIApDVczSiGnv2lsk3V3iUazudavwqWDROGMI9aPGRllHs+cTotCg+YYetM0gEoJUk0VkG8nfOP3yvNe777XetneWUAHxsNQlb3WBbnx1wAiXAwMIyEpFHdAyM2FEaYygAx8wB5fCC/fPD3TMtR+hhVmyPA0km3MAIUgglAeAtQOCUyyGJJZrYC5Yj65LcwTQMJmxp3GB8ACyDDapsPg2Z4HewCZpw991StsAjkcc0JIIwRsIWyFE8DrjaxAwmEARIDnEMZi5GKgQihrFcgSaUGxSGAwXuLomRHKwQzaNhkhBuQuueijog+4SU+3JE4AILTw05CnegABoVmG4zmbIrxEkVXnQxIoIWuALIIsHSYh7afmEKeFvZmq3YHKi+BbgbUdQQaCnaIunOHEnFgMwpM4dmXssodpBFr22qe1mQPPRdnjlIv/rWf7I9iKpqbd8KAZhWdfs4JrW6Bed2LgCNlrTuchXL4k7NGj/1rrfkgNdfDW8ypRTx2sxUWhfEf8yUhNAYzNBy9LH5T8uy083d3d2b3z6+87Yk2CBuOl9ejINwHoSZU5LNZpM3o4hQ0mEuxmme529+85uXj179kR/+4SGLLjNUvUZ7U1jTDjocDhrKYrFdu0OL2vLNb37z2Qfvv/veO0Qk8GVZ9vu9iGAcy6KllHDEXdwUy7IMw9A29Mg/MTNLIiu1I+AkcOfIZfHTD96/vtjlxMtx+Yt/8S9e7DbHu1tmGYUksbEgy7jdDETbxA+3OZX88MHVo0dXrz6+wPvfwcuvD7x1kYrN7GG3E2Qul1K8FE4JRKUsTEibEWX58pe//Nqrr3zjq7+2f/5szHnYbt2R07jMhci2u8uvfe1r/5U/9Se+/INfeue7v7UbhJlhnrIwi7GIsHsSiXAZVeHpZrplSgAKF2fKOatqJD1aBZFFjFyLq6kQh6luMLiTVygJOZjF3Y2qkdAMLBfmMGXQIvQSJmOFS1s8gly5lVzIPQI+VElcza0CoeE90F1LxakWB6LZZz1sqV0a7tx77JVU4bHEa24ljms3rNr950wbL/pFOM/jr52l/sLP2Q7PjlzpyJ0te+fA1BfXxfUFvPi7qwvje+/0pefe8e31/TOsr//8SKDpQPblcu17f+gdMQrgqEYk14REcwi9weqbZ4gCBipd01JK4sycOCUzC2KOYg0axyTMsBK/H3VwfQ08GwwvvMOOF01RWqViTiuyefMEuh4PyClLkLh02Z4Kaa7LP4Gl1W06yMEiAgqFgujomldchSrcY8gTHMRCzCAOqQqQmMFMD/Pk7oG0DKeirufMzJxzTjmHIxE8JJmTu4PILFR2NZLqOQ1uIU3g7sbsRGTqBQaziFw4MTk7MTyByVzUyJyUuAAALdzEY5rLxxxORUxz7lDVMMSYKaVkOscSERm2i4uLPvY6tDjuK6W022Qzm6ZJVZmlbuBWp6q7u0V009Aj3/GQqivlAXI+ndZhrQoUECbu1XCnYdxLMMKRNosTSncR21pRbRUzbzTl3mIb7A6uRXf3olpk4E4asxqWKcabObVqFq+Vie5FiShEhrp7D6AvtlFLWMGdtQsCMgqiEBhkIveTw9YLCBmkiJJ45xbOuldY1MM3ncxL1ujTj0g29AM+pu7E99uqrZhFmzdY873OH/WAPw5ttd+v32OSJMxOIBaHRKkh8ezU3IDqPoJBmHW6OR7ujlKKLWZmtkRKbXY28sWxEBEoQxgYBhpEGT67H1mPyZcIAJHszHbgR8Bj4GB0TeMDPT51JFNlX0DLNB1vjiJ5vr4aUMTBRsKUE2fAHYv5nNiCBxDYAAOwIYxM7JITJUNhMDAAPN0WmUrKNs3T7EqJwAwsoEXEmRKqDuGLc7AmC3vnfQwfMleMeg3JgUBkMIWHK6jwBShks1lxKFzhBjd2c3PTxczIXKoaq3nRcPyq+dKZva2WQ1lLb/RMW4fIeWfXb5Ud66U4Gp1v6KE7fDKTjFD18CpEq3kGQLXxPqIfWrVU0/OuxyVmjjB5bLdmpkXVbRiyqzeKA2cWWDEgPD0RgVaWgoheq6qIRAVXN5KYeWEWjUQ1Zz7RxIU9XQPSOHWWqh4Oh+lw57aMAnEdMpEtQmDBsqgqNhu6vLzc7i7HcdzQYXd1ff3gpauH0/P9/M7T/ebi4Wc+/fq77z1jYmHiQYgHK1pMYT7f7s2slNnVWlcauU7TdHfzPGUZx9EIm80mEqmzFdWg1mCAYRp+e0TKiZkgRARyZiQWjSojc3djIBOF0vzLDx+qambeXF4u03Tnerm92O/32UsiUWYRTkyJkZONyR9d7UYuD7b0pc+/+s1f/4dffHCJq8F9ch6BU7FFc3Xw2c9+9vb2liTo7kDMpZT9fn+d+TjtD4e7y+3mIicr883NzbIoUUrDuN/vx+0O6n/n7/ydH/2hH/j7m+HpfjbVLCRCTFTgVhRmwtX/SSwpCTsMxI6LYePunoeUkgx5nuclLTlnpjTPs3rlt438g7urnPIkxUlVS/i4XFEHBCQKc94RpT4tKdeyRuQgYgG3pNkKx27Vmq+ShgpnYmdydzmndmge2mmSrF2dEwbyHGm5njwfOmnXp+rnObPRz0lZ1r9CHRi5SgL4KbZ0L3cX/95/M1pfWe5d2D3/9t4t3Lt+5vu1H+trvufi3uvDdb+tf/feTwSQe/2t/hPrN/v6sJQp7o9XuLlTPUM9w8lk9/ove5UZt1h5vaoFeKrVVE4h8bByUJlZKoum4eQ/s9JpbGTiF28KOBWRNQemuQRAVAm2IlamE6mJAeI1JNLKXPvICTucTuOK0ch+whwhAlFZzni/iaLkUGTlIKlDS00HzvORTslDCc6VSPfVIeTsFdZNBDG7f35acQ71U3lTegApahyQmRKxMA2QNC/LvJRi7izMEnJK2qhKAyca3sKHTC47sXXnnCcnCrwo0ziOVw8fYB7MjGUdjoHXndgZzQsioiRkhlKIpU+qJhvlBKieSiIBgEzoTKPCQ6WHqswjupy9GRoMoA5FteocraazlpOoVHB2hSVFL5gC7sF3CwQyuf72aVb2kBLO1gc0OhmuTDqohD3MHG5acIy3QdhjyY4WHgPIbIlUYTiLTpV4w9cZvBa5hsdgZiDoByNPGvC69RLKIPPwJPs76FSrFY53Ov8prbQW7/q+c/jJaN0bPD27jzJXf/cavTCeWpiTQhQrUvyno2ghuGOJYxgiYEFCERRiE4C1OHNmGkxF0g6Y4EZQVbAx8Yh0cbkbVBezPcFSMlACGYg5b9W2sAvgWotous7D4wXleDyKWvYZuF2wpzzm7SzbwssuEkeg2fhIOC76vOgNp2kpezNLPGa5Yly4bdxSHj+NcQD25kcmtf1+urvdeIGZLu5MNCYkgRqgJpGOklYX2sLoHZATy9g/p2f1T9kCQtRUq6MocvEyEQq8wBe4mi+BxeBygBqHqe9uCi+qqsY1BwZT117wcs8aOrWOeMLKJoycygqSWePjL5qF6A4hnU6lqpCTgXd+WD/D6Uf7n7FddmeVmUUCK2qdqY/6fhCpQYqtmE+R+2qgMIlQ5sBtelCSssDcc2KCqKq72jIbgTmGtVHLGRLIzKBmq/BtxLwBmNk4MLkyYbfbXl2MZLYdOAuPQyby/X4/TZPk5O43N3e3d9PzZ+9fXz8l/s40G+fdsHlj3F7P+/2Yk3nd2lV1KUsF6TpXSJIpg1ggwonSMKZ5Po7DZllmwLbb7eZi5yUMJm8SWGwKIkuJ3ZWdCZH2bHuYmhA7LG7I3SmYMeCHw2GZDrYZmJncReXp0+elzA8ym4AALUSzgSwrFjrwuEvCDy/l5Ucbn28xPwN2OhuPIwB0LucwQYDPfv7z77//PojmOZRDSUjG7QbkNzc38+EIt8SYrGRJIvnZ7eHR41c4DcsyJcEv//Iv/8Gv/MSP/diPfudv/113kpSa6j2MfMxjpYBnYqGoViIHsQ0NJ5bzkIdh5rTIknMmojklAMMwENE0TfM8m9nSMGnMbPB5nqeymJnBg5mGmRVeShVkm+cF1dyposxuZq7hZ7q7N2smLK3UaBI6fV8EOyL7ERZ5S7l7i5mde02xqNXg/RrJdEr1rA21e5P2Q//tB9+bvTjZRg3ltQLjrc/8UW3tbq1P2/Vn7rX1wrRu9xazFx3IF33O793uuYv3XvQ/IzjQgbj9Rta5nRfulxHV4fURMxBrDBGtOjbO3yBzdO6b1WMcBOMQwQ5F0HSiSwn1dq04vq7TwFUeoLUP7892tf2hU63MnKkF8ySydRIICTS0wbrPlWM8N8RIHeVRtNBk0M2ru0btnXqS7uQAAIrBVRsmU5dlKaaMquiw/rc9Got52gvWVXWZp+4d8apJa733iEhEihUCAGVykEUaUyDu6oqymFnUHEChWvO6ETAyU2MmU9QSShhRdzbaaOFTyS4ASrLb7WbD8aCOKmgUBlJg/p8/vxGRzW6bWco0gQQiLLCinCTIXQiBgwrJhMoBQERgpxh1RN7WE4QGYRsJpmdsPTVowdSjv0F27i1uFaRN6zEZKGJuuFMCIp7Vxvd5CDlQRmteUAMRdTz62XxvE+F8jHlglfuI7QfXy+klhWhA0BqDiDOvlST05EICDT1q4QQHtDtCmYa1VEugORbcR5++wFZ4du/94HuHfczN0X/ZGq9LBz/m3iAAwQIQkJy6KwgQzJy5V8m6e6FK0Y+mEAEGE5jA8A1sHPIFthfL4dbdRYTyWI7Gmw1qwYc7uPiY/RL0WK5HmfbT8X23cCoLVGFKYk4MzkA22Sa+EFzw9nKaFmaYL6Z3kpery51sGJgoPxAzKQpS0AJbxM38qNPT4/EdLdMmj2nzALyzKZfFj7dPtxeb2Quh7JgOz75bDjeSk5biDk4JkiEZXoy5MMzrtlKla1tO4hPUCO6m4PAGFQihjgNQ3IvbTG5uxb2Qh0pCC7e5x8LadkAjq0LW7u6u7h3AcjKWYk+MAKv7mbXTA6lYURt2E+hFGwyAaulRTjivwGdhwqEja1oO8MOFIddmZ4feMHMqpaSUqlHV8jABGqwbbrN8ECmaolV7vOU61vdfeSmKGlnkDMNBYgEIXKsc1Z1RFANHdUp8PQord5ssjE3avPLySy8/emC6DAnjkK1oShxhbbC8++67v/2dt+b55tOfefnq6sHt/nh4/ym5f+c73/ng6beuHz/Kw3icbSlWAtlk5sRElA3NoGev1KaJpMoYpCGrldB2TCnt55nYmUCgzAmAugklOKsqMYlwStJ39HAa3bWBgCoCkmFW5s1ms9ttpsORqFKhPHjwiG7eZ1Wj2dS0JIIupsp89+zw5FMvv/7kukxPr588eue3v/nSMObrx6WO6boPh3tDjl/7tV8josdPXqIwmmLzFMbN0ze/+U21ZZ4O5XiwZd5sduDh2e1hv99Lzu6uy/Fuvvvt73z7j/2xP/af/aNfPxwOkKRmDHMWV5dEwcrCzIOklFIUR5nxwJJyJqKU0pCHUZIOyjmVUobszMySASSWLKmUkkuZyxLgrqAqpyhPSlJKiVyxms0gVYXwrgXU47BFVdWZpQmwQN0De01OIcpXy5xWzpt7lZuPcVvtCnc3S5K9yYWds+rVdmZhA70krLf19PtQl+/FWf3in2t/6Z539719sI86zxqiuT6mn7z/u16AuifWlpn7p+0nWZ/hxfbi+/167n9E9qF3ur5Oa8gGIrJgsAq72rkmXJw90sZUeW47ZJQaWsHhTE7sDIMVco2CP6omsnMN3wS9/ulKghGuQsZj3XRuy3F1Tu5ddqf9XK/piZiJl36ARbltrKw1Y7m68VrSxk2cPEYt9cBcS8id+hwAkNJwTwfXFMUNdYG1pZTOhyScN2NNCba9CqrF3WOlJaJxHFMSAKWomY1ywquY2bIs8brLvaAFNequEczMWOCZ3GDuaq4+DBsiKaXM8+zsiwJsEG+bCJvBLRbV6hbVybXK/3uUnQewpfm6RKR2KqAniWSq92vuLrEHCZv5VCaR7Arx5lj3/8op0+6Nobe7f8S+wuq0w1buXFWPaEgbAFE02EZd9RXrtbmxITb1Ct1sebO+Bd/7rT6HTlT58X4r3jNFRNaqNlSE+BsudG0WxA+si/nq+Wvd6RmVrne615VaUlxqDAH3sLditJi35F6djzADA9pKELmSu8IBJlild1rBn1athnpe+NHvt49bO38u67rBj6U3CACYgQRwl1S2tj0YAChHthDkpkQC2xArkyiMkYDEznABjSSbcdyx59u74qzIatMC3xoWOINHZ3a6QHqA4SXIJeSORXSh2SF2JJ+8uNlepAT7qXAyDA4hpMuLR5imMu3dMaTMmwtgBAg0QPpqUGAiMBbdHw6MLDxfbCTvCF6SzaRKh6Mhh/N7WHR69oEf96obM/NENCZDgjNAi+NYtFYUfxTVUwsYfWwbBaiBDK5OC2GGz8AM7IM+FD6bqbuT1lSWu5uqheK8GXMSDhEgAQoUhuKVlb+ZE42HwNqKbQ0+6iuSmO4xrQKXp11+7ROi7Sy1DiugZN4r6rsy8L31XNRr4NLOawjXiQesDMLKYSogo9MRkVGp20ANIjqImFmb4RgHd3RTqCqF3YNcY8YAcs7ugRk/0dMFaCqzROm8t2QCM7MtOQ3CkhOzgMwTp8RiyYeUAGy3F5zk2dPnpZRS/OGTVx8+fPxg0dde/9xxLtu332N5m5jff+/9uWgxUMokyYmdLMhsusNPhEa6iuI2pCGlxLvddrMpZR6GoZTiS6WddCghMAPMzD45M+ecc86MJpsmICMY42T+Vtvr8vKSYLe3t6728OHDTR72fHc8Hh9kkZw8J5OBMyfijWAzSk6aWceBNhnbAYfprsx3g24LtcFU8ZX1Qfzqr/7q1YPrH/jBL6ecAUy6JKFpnqfnz3/hF37h5tnTR9fXvB2/+9ZvH/cH47LZ7J7v9ynncTOoWwL+7t/9u6+/+up2O07TFKVALEySluVQikhQLxIFalSEGOLuI/I4jCIC4e12myQzMyfptSiLaVksipSWZZmWIx18WRZhcoILqZEH7jgYNITFyALpySyUeuahlCLBWe9eTugyqwzyRATqm04U4oLcwg0grI1np1BEPB/MpwQhorqmt5NX2iP9zUelBgq/N4H7NO6zbu3prZ2fF90zfJjZd+/9Fz9av9lJRD/qgI+61IoqVH3x+Ht3ce/r7cI+km7n3vXX8+Ckco6PdjVPfTtkBFuGESiy77H8EsENTHD2yPFEQXf1zOoCGfNdjRyx7rsHgJMJZNajBYEGOXv2wkSRjLKGj2Gic/erz81VRrE2JmbmLCfpVwDuVmNG7UgzOyumquUOFCkRoprMYVnF2rFyVLgRPAJwNrPFSriCfnrEQkRpyCKSpe49pTmK3dMjogjYrfu/A25P0n+NsCSYPK0JKtSxhEIQciNfyD1K2syMIK5mqmYluDfDhfEWklB1r04Ri4iWU00vETXPzoRsHDK1URdvridsS2HGFmPDMKjqNE3unnMGyzIvqj4MG6BW9NenU2OkbfxD1xM8DwPwgl27XgFW0yQ4AWLdkEaTao0tAIAzmdVIFlqMTE90xOuMqPsLwSacz/f13DE7DVD3yqkVWGP7MLOgZubrNFSsp797UNH087u70zoCXZljmukfriDOawjr3Amf0KEtM0kga27hOsX4Ymuz79S+7xx+zNtZOODj3RYEXJkCh4KaAaz8I+pVXCERDAaoIAnxwKQW0Q0DbACPIMEgmYaU3ekIHCm7GYyEkEDFKBuNzhuSK2CLYWAxW5Yyz8tiJMSkQ5pyyjAqR8WQXMSMRbfIA24/KHeCNPJGYFvQiHI55wNBhDaMBAygjJQIWZ/fQSciYQywBAXMsviweQZVBTlSmRaab0nnWsaz2xKJR+2hDE7zVMsZQkGHW4bwhCb4pDRydyyMCVgcB/LZcQQKYXEUh6Hyh5lgqMsmYK6opd0SMa+zpdUNq9WnLpItYNz3yt7WGKjuAa6tvr7d9K2tL7w1Ykg9DVgDiN2uqMbOR7eVfXJmZ6a+ZcJO/O84FZefWQP9x7oNx8yBBXWt3u0aPhQ2+okIvrEsiEjOOafs7sVL9E64jm6LMAi2HI93N8QwpoEJQ2JVXaZJ1bMN8zyXYsx0nIs6FtOUBy72uc99bndx9Yt//5fnpagBJCklTlkdi+pcNEtmIvLUGB2cCUQuMhD5NE3ulkT2+7u4r2JSSlFd3D1J8KRyd99DsbPdnLaaT+4Pu4IQiUyXy8tLIhKqal3bzW5ZloE1jRnD6DlzSonKRR4utvTKVT7c3R7unn3qtd+frq4evPoZDPzmN3798RdfCWr7gC5FSsQJr776quTEzA63MCuFNpvN/+0v/aWvf+Nrjx8/1rvn77/3NjkePr5+7+k+pXR9fb0/TLc3d2QTj/zGG28s03Qodnd3x8yU8rjZypCmiRHls413ATChmlVIlMZxjFvejZvLy8s0jCmlzWaTUiqm+/3+eJzDIdwfj8dpL8yH47FPj4jALaWgKTXH3TmziCxzEeKWSzkNPyKKkWdmvUCRiFIaetLcV9QUc1nW9hwACkbW1fRbfURCbG5lxZkZv1saB2OP6987w3om94nXz9yPefHr6/fvHY+P3kq7B3Xv+BfPHP9+D0fxXnN3NPq0k1H4EUnL/n5NR5wXSQIIB+PFe2E5xavoBW+wm/WnXo2tyN2JPci6wAgEsldqWYWRWwgBgTyYWAlGLDFRGxtoZ910Yg8zVq1SfbiZ9oHUwe09kGQdvPkhmdhwCNG8a7RYA1qA7N7d9Xs3s8gN9o/unQfVlTjzNt3hru1c7eQQAIooVPB5nlOtD5TIhIdrp0HRqRoUnW3tEhHEmsyc3MnMmRMzl3LsKMQuUNR3BzqvJ0QQL8BAHoV3wT1PfioqFhFOyVwL3M3DqwRO2BtaRVJjQ6GGjTEzzjyOSYv0n+5DKIKpa6BOp+IchmEcR3efDgci3l1egVPIdHRJwwjySKcM7SnKup6rk60eQZv7FkB+EhG033V3dldTIpKV40pEi9cq57WhEPFUnELLpxAve+3T9bABAKrAY5xDtU/jduVOExEM5tpHVxxsZnkY65ExkNt5SimBhyMiUJQsOEJ3/mQFtddVob57bhX/v7aX4v0VOpQ8cjIUkBens0zghzFYfNKwav+ytoiAfCLSg8BJlLy2ytWEMIG0biUwr4EeQQEymMRxki1AGuEOZmwkD5jLBJpTFkCC3Y5YzMmcCzhDgAywyJXLvvid2SzMLJS3M/gCR9HZWYh4dB8lAUebnpW7Z8uwwzAIO8ADZHvMx4TBMQhEIOAMIxCn9IRgjEHI69wcNsSM5+9gOYIkpY17ybwQg4qSi7AgD5YFOYNIhixp6KzIHsZtkE1GRh8Gl4/9pDSCGbQ9qoV8gS8EdSruxVHIrbIeAqUYGjE+LBmKaoG5qQLGDaLZjKUGM16ZggCs6fTew231xFvfvNZLMV44lZmlKibhod/WZAm16QyfGRVrA6y5i/Wnu42BFXaDiJKIEi1aFnHfDDzPRYvlnEFwU1ESgRQbJDOneT5qEoUn4U3ODh03QxpEl8nJmcEsZlCPcKoxSYEj5WNRIpJxw+5sxczSMBhKKcVRQG7e1Ip5c3dXCA4/jnn7+OGDzSjuOqub2eyUORmnOeUjyzQV12U67Ic8Ho5Hg9wdp+f7abi8mvfzcpw5JZIUxJzDMACoqlYEN2cHVawZAMiQRJg5qS7jmA/HGy/BA4CcM5HAlIiYrCzzw6ttKUV1KtMx55wT1Yi1jzBl1OdpbmEhpJSs6JgHW4rNQbtCA8uRl1Eks2ZKmTi5ZxPB7jClx48+e8BWxzFdOR4dfX7nc5/lt5//wm57dbV9uRgLXRJt4ewLHl2/9JlPv07HhcZM+7vddoOl/Kd/5a9+/f/911/W6enTp3d3twziIT999qx4GYSm4wFWUhZC2qvp5uFX37k7Lk9TSomHBL87HoZlScxWls24US2X2+2yTOVwePDkkYgfbu8e5ke7fKmJd48ebR5dbx88+MIPfOn1T396e3Gx22yY+ebm+Xfffeett9769m//5vzuO5fPbzRxPg46TwPL4U7fefb+1cOrI/lEtMBIwDkl4ek467yMJK5GUJYo9oSqOiEYTWE2SkoDHc2hllKafRmSiAxCJxScV6lQKDySPMWMYCQQTQAoUbeJIS4i1XaM1H/j4qs2WS8QbdJnIVFdK3kIoJMdtkE2apAANNELYqumXkMInFIcdXIC1ejnTsNQcQcNy+IAIJZq1KY5IxBm5mVZUNXDQsWxpsjItZp7IfIRX21gNndfmtFMzGASLVEhYSAwAW4gwItbkJoQQKaVGcWwUFWQW7t59fx1DfJ16F9bUgTrd4m0AdXuudm8OJFV7TRyYrir13oAAJXbE82FKF5qbg5MzKW7uHBwiOywM4Pq/M2cokaEwFwzOx7449oN7tkDfg64BpJ0nSL2UF2rxWghGG6ovpOb18AEeSTQGB5MZxCRlNggMVTcDURlWVJKRCmkBd29bSUBdnCmBIS9RcwylzlGaYo8PthhxVWGgXKCCJhrZbUpTE2LuwdVGsGEKBFY4MJZEjOZLzAK/Vy3EkyZ3SVrekoUYg8559vbW2Z2WMqScy426nJU+N1UJtXtmI42T3rcJFl0nhfNaTPNAt+wc6ZM9G7ObIqyzACpUyllmcuihTmYj2kpJfa/PGYrZZp0HBMlurm5YWYtVgX7zEyZPLFl10LGm3R5XA6UriTtbve6uRiHUOmY1FARMSyNp0XN3E20gkdj9wy6MHMJ2vFzpAwI6DV7kQ+s6AYi5oFTgFLcm5II2BBwpBPQIFK1Q9SKA0SU0irZ2HZ6a9qb8RPL4ZBSovZwuzHBjTMWRMTEkeKzCqkQPjnwNfaxzPETFSnXnHKuQq160rNiZmijHq2Jzs7e12knrFajSEwfxhIQ6OrXOhMJgUFQEIHh6gCRAAFhXRuYhFp8yH7mZvSE5D+Ok/hCaeI/WfuXLQPZk8jVVm1ePnlLIHuvwV5BoWMQAQgYoXtIejIRwbiTVxsKzuDEkZML9NPvQlc7Hq//5NNFMLAxH+LyQOTsRi75GYEJmT07RAmaYGnJePb+s28+fjQB/uD61duZDsOrt4u8PN0Ad2R35GXgwWhIvADiuJoBw7VgN263R/0NzE9HEfBPLst7BV/LFyXJZ1W3ZJdmRr7M43bebIyWQfOYJoz/P9Db1/YV0Ai6AB4BLwMPwVsgby4+c3ec5vIB0fNd1DFrxpJhX5j9rugiRqqHucykumECsSXez8fd9SO4YVloOow2L+OCQaCZZcgAoERHJMBGICmhAEARgDzB+J9ciuKfdHou8XQcABKDVmZERKYUpKjk5AYsIOUqL1FgR+jkWkhAM8pi7MyykQDalILlFlT10FyV3ROzsB+WAwCI5CSAlDLHnl05HQOIZF5JeMLuqcHBiAp6KbYsSrwwp8YNabGQMjPcamL6PItIkGrYoPT9d13530N4ESPm0yQS98gWgYkiEO/moJC+KMSeMlJU+jXyMGJmReX8qIX8ZuqRMMT6ytDARe0d6nHxfgwzCYg5R0llRKLDfgrIV9jTQRkX7/uxXFxcwG2a7j549nTcJOatlhmwNGTOiYSXZbm7O6jq9dWlk8ylkCTitByW5zeH589vI/bcHfTqHEfMFf3BhFffI5GnJG8lazfvKozxTK3HAxabaalhb2IzQ2EWSimxVrkFrzLTXf7Sa1GlCMUyWtTd4aOXrAgzTJzcjQKHOM/zs2fP3n3vu08uMfoDygQqD7OVw9Onz54/ePipMh/3d7rMdPP0+b/7b//v/if/+r/+ld//EwCTAM/e+fm/9bf+H3/xL26nY7C5MLMqh93X4gr1Sbm5u7p6iepnEldTVyJ2rvz7IhKYTxGhqq8teTNutjtjypvxpdde+bGf+sqXf/xHX/nUpxD0DLHvi/wwY3r+/I2vffVr3/j61//+f3Z7e5uTcNkebp5vt9uXXnrp9nBLAYHzk+kfKWviXoBzv3ivjxnUyl0ACJBb98f6azWjqubFDiCUQOGdMAOAqjo0GP8j99iRk3yeIwqLvVog5JXHz10J644lomVeAGhLHngrieS4zmaNNfUXBOUqEaFJKfYxDKJEtaKsX3MvYauWGxGpNVR03aClD2kiorQ63sjcUHMacDRhnjB0qRYBeUh5uJvDGezdeK0iR0QwJ9yDT55lAsMR5IA8/mO0e5nSdZei5X/QuH9oJXx/D71Se7ulE/pDXLN7cauoxmo9vfeL60zvegHsi8np04YZ6LdwWlfad6gjDValAraqfe3yhpvN5uQDtHwOs/SFN75Yi6uB4iXy80HspC3ikHMNTCzLUlrqDEBOEt0oiYRylpSlXnzIjaqfkCruntrGQy0acnKEW2Yslsp+2X6aKSFYcOpkMyMyVYCqu1tKEZEIVMQQrhmzFR1FbIF9kJiX6hu3rTFCKabWKy64wcvXOo0dvwFiRPE9hdxfi5l0Y/cjcvX9rlefnobOahRxL7rzhomInwiEyfq00YduQWlzSvfdW1j6kmitLsXM+PywtQ9575qrAcGnCRX/Ugt1hZzG+lu9G+tjBQym5iEKUjnoAIB8NQvbeQwQrwwdRJVbB0S1ED4eQ3U0wCe6r5CnX1HIOBi9vJlslXrqCcnv3fg/t0/4/fZCo49a1T+qqOx+gvfeKP3kNF5FQHpznY7zfMT+gDQj5R3nIDJ2FnIBJzAxDU7i7qTFJQxRJggwJNmSHJEYeAo/AiMB7sX86JgdC48ybDEuBpvNDUYAw7CUI5MSJ+YJdARHSaTyQBvjQ+zXrsQZzCGgE9FS5sqNFTcQFXL3dnCs/gBOSfx1o9+F3D2jYVfvXwfQ/EAHzOFki7uDCkHhjXiPLKpQmBlqsAUaKr9GHeMnkppWRBCvdBhUX2aZOZjMTmvseSkgM7uf1RPmdGZIxDkbvvRUjd8PQFuK+8HVjfmwPQJn0NBTGNcbgO7edz2E6d2qEQM62x7Oln6zVewnDjPTAOQoEYmEYOPZNUWmpVvtARPqic4oDGOu4kxhSRwOc9i+drC7u7vnz4fMJIlSEnefpsWcSHIxVXXOiVMqimRMkgqV/XQsqpvd5bHcOKGYiZtAIkhlZglrIG/l0WEmM4v6s7DF6z9EcGdOEQ82hAtHcS8ppSSVJB2AcErCsft5jyKH6UekVqxS9iUK38OcmZNdQ1md4CzOxLY4HSe+GOkwTdN0KMe7crgZ338LsmCTRr4ex/x/+nf/nTe/9dbnP/vDt7flO99556XHr9DtO3/hz//bf+P1137ix3/s6uri1375V772ta/tYLd3h2mazUxEnGDLEmrxS7CGihBxYYPCXclA0q+/jpssmXLNAEzTtN1uCZjnebPZPLy6unz46DBPn/nSl376j/6R3/NTPymPHjWuG4YDy9GL0jiOD69//A/+/t/z4z/2xqdf+//8zf/0N77+zXI46DLe3TwvZheXl4d5EjdxoSZFTOFw1AyKc1QDmkdxoFX9PaYq+IHCDjKpNVrhNMU0gDe+WWdpHkKUiJvkENQAyFgBCkVJFpEwst1dwNz8xgiUVO/LEbzq3dyEWUFzzOo0WU0H5nsCYqflq2PtTt89y4xBLWrcTqZbTeu1tcZdgzuJiDw0PIiYKYL94WIEQvo0wcFcy8NjMljE6s/WVXa4EeDs5uBAi1GgQ7xGEyLjduaBv3iDVDkSTxfwPdqHmt1YLY5hvFrrt+osOfx8D1svaNZw9u1+T1Z1PSGRr0JaL95LHBZhnZYx5nsXGf7LeiGOh4LmhNQRSCwisXf2p6ntOr3lZ3LUttXbPfVMl48HllBGrcELqaB9XqH6ezxusfatc4ozqVlO5ppJpnmeE0sMpYYjJaqP270hG8Pp6NtY7QQGtOYP3cS9+rdnFlNvZmRk7A6nc4gLAKcqQ2qLUStt7UPHzCTIcniuKa9ELFWaKR4s6jivzz08xrPHSgRyK5H8r25Kd+f68Lg3OD8kYBHPekU8s36fVicUIKYhnKR/sR0uTZDpXuyk3ks7Sx/2sVZ37i30YU8natPWX+4tYlWvv/LXnoyJ845pM47Q17TeQ6jfUqJEiFA0U4tQxdIXg5Y9iGRPwIZmMkZqsfKU9td0eo0onfXQD6LWAXUZYYBAhhUe9R8r2nQ2Er/vHP5jtY/I1v2z6b0P3TI+Ia2xpzqY6pAlWEThD/v9Nh+xG3nYJmIhDv1VpgHkRAM5wwvswDIJNg4Ao+CS8gOQIwN428mJBmYmFKej+5G4QGjYaCnzPN8tnjaayBJ8KNOeeGAWSYPwDr4BBwnOIglpgWoxMUkCCMwROKnuFBKhKei0Jmj8bZUjiu89LK6m/+/OM2Ss5OXP2LVqetAA85CXgFWAg2kLYTTxOW3AByJVJSt1wauJjZNUbEUS5bwuSTjFZ51jj40tnlY19lh5j9FW1QxnsA4AKaVzBoMT9SZWwb5m2JwVKKLtUJGsonuBvFVhYJhu659Osd0TkRkcDUO2Om8kQPplmZ449ztXW7ULqMEknToNPFaOZSh7mJl5IRp7X7RjDKBxHJVM4ON2U+bp6bNn7np9fbnlLRHUjQxEXkpRxzAMxnl2Xw7HYnZ7c3y+P0yqJELMRrAKoFEK9hFTklTjlBGkp5gJpMXcyMyYmMIKhlAH6SHo1NQNQtBTgDkMInJUgjsrs1rxCDhYEG1HQUjIJcCF4VAtdZ7Sxo2tFFNiT85GpHuUy824TNPxOE+H4+3T9xR34/UwfvoJvv3rGHZX+vy7/+gX3/7q1x4+fH2Z8dVvvCFDfufZB++8+Wu/9Lf+BsheeeWVLMPTDz6Y5zTP81xKJYD1kNNsbCjsILCzMFlo7Bh1Re1EnFLKKXNTS4sMQ2JmBYDNZncwe/j6az/103/gx/7A78f1Jcj3h+Nms1Gd8zCAN2S2TEdnSkPOm82P/eE/cvXg0d/4q3/ljV/65c3lxTQdlttnF1cPF1OFmxawEBhqHmjMmN9daJCcGOyIghy4qZXqkeBkNXdhsDYBoKrmzmAi4qqx5mZG7MGxEY+vGtMiOedq0QY0lDgIkDh0wszhBgqiWW5oGphZl4GuvoHE1AAAJwiRwgGY6nqWWbNZ+7/MTUu6GuAVhw07fQsA2qJzWgzQ6YI7Q2PtEwAa9MfNCux6BuEInWDmfW0SABCIkjORn2T32EDiUZFH4eg4kDopy4piK/qt24G+ynvwR5gCfB5aO7ukOKDnSM895N7q4mPkNcvjRHB3EQYIRlZt1FPmjahzy7Ug6KqfrddxMYX2nKrLC26nnwAIH1ZyGQMIwbkcfKHoKd9+b9XaJmh4aJzgTkyqaqbupSzayu1ah+WUUspZIgmmK0ghgHUNbXWKqksQTCeQIFFQXVQBK/NirRQcq2r1MwcD1b3vviUxWKof4jWLG0PjVEUJRB1HzKgqfi7Eaii21HQgamRW27d4XdDedkSzQFPUWR7I7To7gpIqtbusU8a7A78eUt1Ld3ec3SBITnME6zCBr76OGnZCy1TTSkmC2vNdT9soknP3jhEIDiynFplt87GPin62PkX7Q6mjd93DL04rr55YsyFOt9nZld2DULo5sd5xglAEm4IH1Lx3SABfEZ1UMYQwuJC0uzagojKcSCNiBa5kfDC33ldmVew+vtjKEQFUxUJE+RNRV7GP3uveJvwjXMKPplj4lw0C+k/T+AUXuinheEC92jrZ84HefPWz7qXGv+w1xxsD+EPW7U9OS2iBmvAJDQDMYSKyzDr6wjkjMTtGYagAyR1kITYI+AI/wPdCA8COTNgxP0BysDp+BXyVeCAeAWc/aDpGUBc0yzCLH8qUlvlq2AzApkx7x0RchkE4DyQJcDBgz9mPRAtshjIEcPJiFePJTFUtNpZaRujLhcg2JB4lQTgYZRrL6Gm6/TN7aP9089FRBU56M4cSKVAcxStXENjVXQ1GXmBKrm5KrnCFKXlU8UTZtsEAEZRSzgrsSUS6gDJW7pK3wvW189YCx6dESz+Amd2LqnXe774NVTOj4TL6d/sG1PfcmEp9z8XKDsFqYe/X+eI1r22nVClh3Esp6/7s9nFKIq0GoZ+6/0yvKKgbTPMJAal0S8MgIoAReeWso1ov4Y34gRsrZyllt7tgJiZPhd11mo9Pn90cpuN2u72+viaRuSx3NzfPbm7B2GwvZNi6+/E439zs7/ZHdZCkYjAK8V4vbmxGwojf4+7othBB+KRVJpLdFiIPoJTaUoKsxJnZtRg7jNkMQmKKghId4wqLIrSlGgS2TrLlHBH60AEjRyklEQMotoBpMWVmcjJ2J09ON2kSn9797tN/9EtvPH7n8pVPP3jy2qORHPuE4/HHv/T59771nZde+sznPv9DX//Gt//qX/8bj588ebwbr66uSplvbm4Oz57u3Y/749Eu53lWVclB08eqczF1qhw/DkS5mjCZu9XBERJmVN0PNXXfDCMLYG4weL2jT33pC7/v9//Br/zhn8blpZZFxs14ceFgIvawZwR5u3Og6HzQckH0+R/9kT+2LLc3z3/rG9+4eviAyJ/ePA9aL3Z2com0LBunvASbU/NsEvFCZ3a2r6B0QHdmm4q9UtFipswMs3iQ8dGiJW5hnTGglu+LHMv6/UiYgMxB5TwNJUFgc76TccgKcK0VdK/equJsBtZ51D20YsQclW7esxPu4zAE3Ui35OJOS+g4e3j4HKMLQM45S6V3WoP0ClPou0Qop8e3srRMYXNm6krBmQCHJRKFm5s7ed0oiEh64KmZ2qUvNGHO1owBznrsRU/vQ9vasK591UI5/en3FY39ZCK/6KTdO3N/uLYilhQRO1/fXvzu2qzvP92v0N2DlcTP29pwD+FQM+NTnO2MBUQ61pQAwAzuJwLPGk7UtaqsUBN6ZSERdtSytOq8uG23G1XNpRTv2Nd28Y2NNUJ8sBLjp585LieGDYOY0WsmUSGsbGYsndKTewItsuj9dav9WuWpKImzM2soUsSYtDj5SWUppVQZz3vsp25pTm1BKKW0gV0joxW/arYsk2oBUq+C6y1CMyRM51yILz730zgk0qbzuX5wACTl05999YCEmEff4KlO7VZJ14MaIX+24t29dz0scrYIoEVtoxWNdSaMlQoMWTuiLUm4vmxCdURjrMSl1JtVj5CcBO1SWzLQl0qQrnKM4XcxhXQEtZBB9RNW4E4jZyf1ziADAZxDjAVBvRvzSOs3apcCzu6lkegohVYIqFcRA3jRrGwC92v9w++337l9pHneoNRE5HVJ+V5LOnGEJFqCN5R74KB7HuMn7Ok4MqqQaoVXMZxh5Ehp8AVlKlKeC2+Qhk3ObmIqbs6FweyMEJeH34C2gq27gLbEVwYlnxd6Zr5N2AKXgJtPTjNogPNSCovKiGWCWgYuQbMvz5diThOU2HPOAhRAfH6mdgufW9yIQ0mDTB1WWRHImJnEGOj4GZCAWjY+pBZXmhM9XvfPv+ejGdjhDGrkP+4OoghOrb1BJVfAag2/FscCL+TqNsMWIVhR14UcHOczVTMBW8t79cFpZliWU/BxZQCYoVf7m1WWOHTnsO3jfoLY8Mleavvm2eJ8bmrSOTilYZHODu6HMUskpVZbhq8PQ+uyfnepb+f9OtZGcN29/CxH6ev9YA3BaqFZcg4km7uzECp9u9fgOBiACJvV0kQi6Ui8ZZl3ux0LpjKT8Ga7Lbo8u7m73e9vDnuRbPDjvBR14XRzc5Oe7lJK81xuD8dlURmGwdmXomYKjxLTis5e9eP/n71//bluy/LDoN8YY8619t7P895PnVNd9+qqvsfta5wmacU4tiySD4DvMSJgQchXEH8ASHxCEAuCRJABiRATQSRCwInAsR2DbMlgY7DbTndXV1VX1/VUndt73stz2WvNOcbgw5hz7rWf9z1V3Z3ypUkvHb1nP/uy1lxzzTmuv/EbTQ0FcYBVd59y1Iy5uTkqO9zVvKq2OU0Qr2okRA5zSgR3V2pVduh53/OeBMPcHEkDVYU5O1SEiI71QxJ2I0s5mZuTEyaj5y+Pu2T0wdUv/4MvP/ne4ebDt9755reX5frR/Sem/O23P7h58fJ3/85P//gXv1iK/9gXvnBcy8feevPD5x++eP/l4f4Dd7x48UJ7oBlsZiBxERHkGkBquDvUihvcjSgo/IWH1jRXVaJVjURkmlNKqSxr0PRDoeo/98//87/jd/8u3LsHgWtaXZmywYi5mHZmBWJilmmWab29mnbpi//Uz/xzT9//D9//4P3vXd17+ODZy+fNyG+oydYBhtmpqQ0LgpEWxMIpnwaAzKNuFwCE4BSOUGI26dPOGuvNrKZoKesjotEsjwA/x59MzhKL2UFjPByNvJqzF8KyG2QR6hyKsfsYDICrVfdhIA4vIn44MoEAgiuCQdyl7kk6dLjeODkRcYpGfCEaFM4RSc+CzJSEU+xQ97ivG1W4ApaEiJgoEeDuu93OuxFpZmhocF8oKgNJ4ewNdWEAIoTRpEmrum6ixqGbdt7t7lqRcjfoAfRJe+1xx84+ycRXpGR8WUBbLp/T9zfvnCYNxMFp3HOVzJxZmLlsSua2I7lznnG2JGmIwWYt44yu+c5BREYBNu62N5OASilGCJBJMEwGYgNg1VpKaYxHm/OwkHAKzy0eVq3Vq+WcT5W0RPH+/uIQ4xTV6mYbtArUiJ3EGORQJiQRiZreCP175NaIKCCBZ+13RYS4NWwYmBEzi1gTEwQEGPlY0MyI2vKAnALwxFC0oQKBUSaAY6OISNEa/rOZ6QbistlGcTvVrJqxqqB3RFArI186vhZ62k/pMo8Grnf05Z3FcGd1bT999Zvb5CGjNYtvUqsNGac69e7/qyq0N2Pgu2ceO9S7b8nMPvLbsaR9sB268yvj78UfW0XPRBQtoLQp7hPy0+BMvWiihZDGXrKopo4y4wCUMTPEYITg9/Uo2wvRELicdruRsycGNz84vtMAFYGkaJlJ69PZw96OAIw70Cgfoy/La5va+6B0t36V/sndZ/bbxw8+YsJPfxJ1SB42mdvu/rV3fgC7yGul5W+Bo8cyACN3bs0ovH/EtdjLly+f7O7j3iVSIjCWZE7waMFbXW9IXwA7TJfwDEvgDLpQX9Ss6JE1Qx6AL9yuq966F8OO6aA1yyxz3pdbcRxAF2Dd52vTWuqtSyI/gCa4wXOtLxzX5IWFhBmBC8mTLVfODvbGuM7OScQl3JMeg+PhE9ImzgXgB7iC//B3V0hYi8gjGYEGQJRQHRWoDHUvDgWc2VpYqzkHUYNAVqtrsapoLgzCORgYsU4P5qpaa6HuEEV2YeTrOik1ubtqJSJAQhUSO3v4e0qEAHBF0HU4DuhKTVXhp8TgsFWGVhqaK27mTJifUGaienI4xxe2qm2rtZg5db2SmBVO4Nb0sKcv3d2bX9BDQc2oarT/iZkJbq6NdZAIgTEgcwQxZ+RVhhobGUUToWC/cSjIiH2phctx8uTulCSxoDYGyNvnL2rRYsqccs6U8vsfPnvn+fXFxUXOObAokwFAUdfTnbKZGTxozkavrfAHgqODNn4zgESJBUQCz1JRa2UIM6c0RRC8emTVrduRkRJUMxPioo3DbXC3DM1N7lYVTJmll/W/FJk8COdIwgZyZCd3zwTUBeXKPnz7+YffLR8+e8+mbzx/eUuYbhb+D/6D/9ubf+eXKe1/8Utf+dTnPv/h1e03v/e+uqWL3YsXL65XJxKtKknIudbVjVKinLNXY3d1N1O4myl5Y04MkzQyO23FgxDoLuJg0xTJzDzP8/379+eLA+33utzyfi8pLaXkDAUlkLBwd2uq1uoVRLv9YV2O0zT9zt/7+77ypS+9+8531lr2F4dSFnFShKqJthPGITh7poQ4NtVYvhR5exCrepQtxY+jH2GsxjwNvpC1arRBq+YNfkkcucmzOP3Yb8PUjiVKTF7DIRpODQlIcIJSIyxZalCYTptkbdhMAz82thLZ4IRo3USwxXSZA1jrEpq1+SHMUR/Y/Whyd2GC1sjLkikIAkkt7u4AyDxDGZq4d9GU1mhunufIFI37jYu/VDOzWmsxVVWjkJdwh7PDLfRaUyfGLuoIMivqO9/diWzDUbERXh8l6LfoiCF8x3HnT9nw2Yxzxh4HC20qoIg4qKC6yeJBfNWWfDB7DXHZkPCn+Bv31/FfwIu3mWRmDr80SGvQRfYYktlJLgdC190rPPrTxZsx/zUmHFZrHW3iRxBx/EsMh42ULzFG80BsNEf0bmmumnp4BUTkVauuZCSUiMFMKQjGO6mSuVPAO5lTSlZLKMWmOCTQ8uPuTLUQpUjFAxAWNeLAjwHUSjxJJAeRkqqaM7sTN7ezp6BOEy8S9NwS+3FQdaORSNWxW8ech1oJx497JyTiUzHniN22BdYWlbqdtxjhE9TzpCN6AfyrS3GYyrZFcLpzSoJeTCKCACx493I6z1C8Vm81J1tvrvmT1no5jpxeG1NQpo1dw6c+qjjfaBRB6fNCICLqRcA9F+otZBgCqiFrNvYEtzDT2BxtIgnw3pj+5J4RtcaORD3px82cMyeWDlwlghACrdUntF0kAluGxmtqADefkYgaeQ11x2Nrp8ba6w5rgy/99vHrP15j9Ls7Ni0oOxR0QEb7JN9FjZ4AwNv3v48i+Cf8CK4kIPxfC7cEQMoTyyxphzrfXn9Q15II8IoM9uSFycndHMXsRuqHcIFcgibSPSiB2CHVqRbJfkH0CAoQm63gAs/ge7UeZD6kHXJmrzN8Bqe8m/J6rLqYZfg1bAYMTuzHakegCsPdqSickGcuV6A2FuOGVCQwTupXQNLowJ2dI2FITtDwqf4JeHSnGJIrkQGVgke0tRYsHp1CXDusQIW6iyBwoLYSdGZH8EsD0XX15GJhE48b8j/stFYdxiycO0wSKWVrRfuhaCAiZpWoEdKAInHX/Le7bmHX8sOEGDmt0JubX3kneDsLVo6Cw+1v4xhmVQfjdMPSLOAhxMxbntNBY2pmjZHDTmnTUAqBHWI+FVREgNA9ggkR0NNRghgWWFg1ta7hTDq0VvWGHeWcctTGzDmxy/F4o6opT6UqGJS43tZyXO49nOc8H2/Xl1e3xPJwPuwPMzPDudYqOZ3408IJtKbbamkJGQBM3slQ25GYiTgLp8TB/3N7rGWtFIwgGlgpEaq11sw5uN2Zo/QhAZ5zPh6Px3UBMKXkPTdIRFNKAOpaAMypxfVTLpIJnpxMpEAZEIMeDgcvt/M8P354+eh+3onfO9z7kQePv3Hz4YOH+Rf+/peNdtN0WFZbbm7miwcfPL++qa48HS4vnt+sT6/Xw/7BixcvUll3vHNCVbdanHOsjJRScGWG54dw/AD1ZhmAIKDMklISZiIqpWippZTIdO93hzeefOz5y5fICdDb43E+XMYyYAcTXF1bHhxJUkIyoEKNCI784ME/9Tt+x1e/9Ivf+eY33DVlNrBC2F3VjToSbGMSRaZaAN+U/CGicxH+D9vTHB5GkzFLSolIiWaFE9WWJ3EndmKPokTiKI0xtIbm1ALtW5BAi5e3AHzrA4m4PtydrMUXRipg/DsCPDHPSiekIvf+5zBXYJ87iQiacTnC+dT5i8O6jRtZvI5P3SoxE8L5pAwkeCbncGDMnezenFTJow0Mu3BL5aW6xCCjcpylbQuhqda6rliKVbRkABGBhSDuVN20tkIuANWgYT26M1Mk5x3mDobYppjq+x+vscI7rCLGOZ4Ib/ALpx/2LMKW3TEenzeW2caAkogjN9IYKafTSXAuYYcMHaPiDXAfw3rmAXEkO6vebg4hUbPlt/K99y9pXmjRuq5rrZWsc8OIDEKUoYq0EXs1h5CIZknMSIkD4ekEkEmiaU6qCrKqplY2PlUsdQMSEdiJBSnxw4f3SynH47quazxAsAGt3YtaBYGFB9w6pSRCIhScYegkYQFObzO4cYYj/OxGQa/lKXAW1M5fHb2M8O4G7A+iOZwig7eNT2w6yszaZb6I5Bwg8IY9H3odAFFiZs7pFAqy03rbqLa7Sek7/8bRulaEeXziHKZx+77NirtTElJtIlcaNpc0uuOO0iwg/Cf3EY0SkIyfAI0lNXKOTde6twTbXbpRosjpnIZtnfYALDTAPv3bAKAG4TAPt2n9vop4e6r+KxoOACEoqygicUEI1T46CXnv00YD2EkbzxDNV+dgXyIyRIQaguYNjlHbZkj9BbVEYkeC/Hb14K/nuDtL3feLwwGLOmB/DcHM9qvWCOLidfiErzv/b60jAsPkLeiAZuVSni8k75IeKF2XaKkGglXMREZsQk7mcDW1G6rGLpgewfdwAu0i/G3mVA+EC+ILOEAVvIAKkQB71wP5JSAkWtbgSa6g5GalLJBUyw3T5FQdTFwc1ax6JAOrCQMpIwlBnbgl4ZlsbOoNsTN6Aw5q+3o7A4NM5x/PwY3NI/zA5u8BFYguGOEcqrtGA1yHupq7cuRyvbpVdogwKEE1OBaIwFkiejaUhfUyjaanOhxGtbb302RG5s3OZKZRyCCpuyQNSWRQwIm5BXDvOH4n+zYMsw0GZET6hurZOqg93OlAZA3aqba/HbbHsK+aBxSGfs45pSlaFKPXQbo7weZ5zjnP0xTWT3DQqxYEeiegOO7TlMhhhrh59ahpoe5tISVhnmuttUZL4tYaPqaVKKZVkXiaJmKvbok4T5OoppQ4T8uysOQ87d774MPj7To/2N+/f58vcTgc8jybopZKRNobN0tO/WmpiKhTLXUCU0riMDMCKHFiwaYZdA8wdjPOfMoZzWgTJgo6h9xrzBjEowADfjweAeym2eCqqr2UxTsaSkQCnMbBXiqp4bWjaHdKiTOzrbq+8eAe0crJP/eZT13OJHZ86803DjcvfvlXvvr40ZMPnx0vDvfu33vwzbffiyUn0zxNuw8+fL6u1cEvr68huRxvfDlGJm0p5Wa5ISJORNTkccB/4K0xGtNZdRARmWotZZommAc+Lec8pywiqsopwV3SvCMHfM5zW38ASQd29ywLAwqRaQ6D47Of/1ya8sXlYV1uqkb/K1OtRJKnREc/lqOkFG3BBz+emVWzyE6rqpbCvXpQRHbz5O7rWmutWZKralUiOi43dakQZoGVWk37I1azaJHUzKCOvivMTA5X45zGnCQhB1BOuzcTgWxK+bguLSRjblZBnFMO2cnMolECGHkVHvtT1bbOZykFjUziTL8yc+7W9hAfqhoQZjV1q2Qe7CBENuXEBHYnhHOtDVaqVcwu7l0w84sXL9bjzX6/F2IBsbBIWtf1+vqKmdv77Ilp3qVdxu2trmuBI7rYMINERJIZ1qKlqJlCWJy2bKjDqHZXBIJigMjdk8hWwI3gjPRaqS320jvC8+Sw2QmISw2bQAhruD/NeF+IA8IwBGv/+Sl/yMwjDTtmfvif7t4YIGtNIvM0LcsypWkImfADo8PP8Xgc9xLN6G3TxYQ6fZFsSgi6yG0aKM7mtTWaG9I/xtOglSN7wm1FMfNwDq1FfGBmt7e3Dx48uPngRojvX9774MOn4RNO00RKpipC9+/fS4xpSo8e3n/8+PHt1fUvfenL61oPh8toMLjbHdRWEAKVuhknq2rUKgOIsaXERAhFmCgDneSGEzMfj8ebm5tIBorI8XgkmeikU4KBjSTPRHK8XXa7nUjaLpWYxuPxmLNNU4qNsywLgN1ud3OtLBy0O8fjkWg2M7dUa72+vn78+PF+v+/nMZbmDbbTRmLOmYhsA4oZjwk9xLB1BeNPEW+NK1S1qrtzEhZBI4lhEMEsCkMBQq9FhDuqgoiI0ySuinC2R+6XCESSEkUu0R1mtpa2zumEGoX2QmAzjzXv1KJCYx/lHP0r0Zd3XCUljrF5B2C35wEkT5QTgABWcRIQk3egoHNPWgKQkJ9M0lLC3RWI6HAXnhwJwIiFm5s7HM4NUzpopbph2hpgazdVGaQOOrVGBLpLyRvsYsTguX9q8KiRCcc1Iuv47eP7HA1gsgnQMHPkhyJ8GtlCgnuD7TFgBAGow5eHlPZekBZnBj4iAPdP5uE9sMvETq5+9im1GxY1vvfgrdv1Q1W+vP9IRFAWCCET80Tw9XZRW4WdxcxuasVEF5j3IIEWJGMqTNV9P8/3Mc1wqF5zWlhUraRil/feoPQedJnmgxa6On64n57Lxf3LomvRpSzXNy/NU5qckNNEMs3ABXRRdRIGBE5q6uwgpN1Ob5el1klmDYoJJgNN04y1Al5rXdc6XSQIw1E0+NFaKQjsRE/+D/UR3gkmB+4P5EBYdAFdXgFzP2pdQFUCmx48/6RQVVu9qsIJxnCG11oUEFD0wwlwRK01ndDmp3g0UbMruJO3DYV+x2eLgC2aYdPnJxguGhaPQ9CNCHUDsxCZnjhFB0/4ua1yipgPa2dLQ6qq8zxtzYZxRLwYQJ4k51z7kTbmyBlaLEKGKSXmdhlmlnSKBTAPUF6TyN2NNmYOTxDw4PNgboFbs9rsHnZvaFWEsI4JXddFEiVOzAxmr1SqqdWc826f5nkveb6+XZjTNO0AkPNutxORFZ13wbRWjSdnHfuqTryZF+0rK3JBcGdKLe/iAS8ZjQ3ITD0yQgwzY1gKa16rQwzEwsPzHawPtS+FSJ5MPQNA3dUMoBXxRCQMZpqYJLHkLFNiQK+XF4/v52rX3377K1/41Fv7i+kbX/+lq/nyYp72OV/Rcu+wO8zpePvy+dOnNKWUmKeky6JFzVCiR8a6qlZK4sPpZWYmVS0twhG+Gnl7LnMPvQcusE3CcnsEME2Tu+tabmmZ5+V4XOu6Yl0x7VhY3dWUOUlvtNnL6ZpGoPZGUl/89ua9D57G0rp3/+LFi2KGxPCWcyBPLIldgpbThdlBJEKRw4M6NHBXDfJAHmZY5CQZY3OaOwkxC0hEiE0aoDSYFceeinCMdBg4c8vFSW8sQUTmDnMmF4Y7ZEsM5V1lNvsxcmittZe7n4rczLVxXjUCiaFrmU8eQktm9tCDtM6/Z/s0okDMLSmL1pCItBYipoxMLERqHmZoTgYBWyGniZ0F4nWSzBFuV0VdSFeGiGeRlKgzGpvOhJybxGBG9O1hQiUII4urI6xYIlJQRJWIiCRYaaMy0RSnmtrMJ4fQ3RsCpTl+TfoPBeN+JtfY75py59FrhHjlTr2BTagrpTR+vj3n9tdbM4XPGfzHgyhatv5qYCy3KsQb8q7tI+Fmh5uiuik5Nr4iMWcSZhGmZrbKKSl6eujutVbqDSE3EAfyziUTiyc23PASY0Lmed5NczRrLaXM87yf711e7Oc5TYnf/NiTz3zm0w/v3X/69MPvfe97T5+9AMxc4/R2Xm3ebXEBMM9zrXVZbtEbzJZSiJJ3RDEnYTZ3L0UPu9xCqFG2rAq4kYXmi6R7zHeT6qp3q78ayoPMrFbrWjm5UykKRB9TF5Gc5tjHOWegjshuo1pl7s/n7GhuYQWo627qgNFGoeTNXQwPkAhEbopelYCOKbCRxkeLwPVun1Ej3XEEG7oXZgbZSADy4ECK1O6Ge6AJmx7AHtbJWKjjX/QmE+7ux2N8bWBHm2Zc13HF1ma3k9Ezc5yhfV8tqgr7hXwYhU61/yrIRDtZLlrNKMO15w87ZWhHkEa7sOh73goOLU4anxJFdeh4f6SYWoMKAnX4KAN6njMM+9WIxg8jF/oPy4h9ZVn9ljwIJ3hwP4x6HcI2dklns43zJCE658f2C7+Vju1uiq3UwdBRehRAWQKSpEvs7uf5kd0+Mzw3J7GCSjABE80smuq6GIxdhUn1Busz4B0kRboHQFBFyrx7YGVle4rdG2leFrsuei20gp05gQ+gPbFQMk9Hk2u5vYfdvUdP+OX1y+NS1/WY58u8n2ErwGBxFzJvVDEEmWd4NVgERnXUlkcbGWYkQRb4KVYbflMIgh86YPQHno42rZ4BOAzmDiUyRwUpeXFU+GJeYAVkaARGClWwE5Tdo4ecmzpgbqGJeqBa/cTscNdCwHlkFtvyDaJSStXW5o2Zt20UWlCrJ+vMzOFm2jgZeoLBNix3d/J43R09MZK0SXAvpQybhMbFmgreqICNDzlcyvEmgNGlmrbdMKgTLaSUmCm4AvJ0ClR3QwRbbRFk9s2dYEIwH6YoPgnrRJijrCwsBiOCSBSrFMCDrWRE8ZkZws7kJOqUUs7zfnbKeY4UfEppR2nOU7Q1FyJ1T2aJ6+2tuKmqiWSS7M00FR5G+WY6mFPL2LEwaqNipyRCupYgiKOUiTw6heUs0bUZQOKgJ7HgzchZag2zyTr9nbtblGsywBwZxbb4ap2JxIkF4kBy5wxJYDKjBeIFuF5ulrormp+9+O4z3Kuro9yQrV6PL5+9d/PsfZTbqtA5WU1eKhtcjarBySUZgawCQcUeWcm2ymsNqFWCwyxStWKNSUg0aa3V1dxtmqbGKqdWDCKVmadp+vznP49pRl0gOyEOxNSZN4iuFBwgFGACmLNBLi8vP/vZz37wznfNKbF40hpdT6KWUlREanAej4KZPpV6MkTC0miXMlUmSsGOKNDagF8iwsoc3BhAYlZSop59JI/FjLBsiHut6amQz9WsKm3K6YkIHca03XXUbUjZeHehCRnk1PraE21DrifzrklelsHa2G9d4R6WvXUS5ODnpaABJGMnwARczcAeze6SkNSkXtx94sTMEa7IJAYtywrzeZ6HQIn8UgymdyOoqGViSjl5byfgcEKFGSEq87yiFkzCAuHkvprC3BnkAcenaGAgYEtdrVYd87MVc1vTf3vEd/h1imjL1DKEWOKtNOxWOIF75ieiCxsdc2LP2j7o4ZUN0RkDtnrW69zMCGdfO42ZWUQULX9qMKoAGh1LS7XlDEQiic0seFq1c8BsFwmllmxxQlQ+wh0ODpoYLe7srg0JbRVIzBChnNNuN+33M2CllHmas0S6XXa73b2L+fHjR0+ePLl/eVjXlZnRmq+0/dfroq3f1mm25zmX0hRKC8Op5hSmo8VowVRNl2W5d7EjySKiZqE+QkGFtzas/AgpdD/HtmFRtKCJuEcYrgk3d69VmXZV1/D6UkoABRQiZeHeajX8Ma9VVTlJgwlsFg/R2WIYS46o9zUNg8A9eIDwiqEQZ9ANcphoY1t7s0Viygj9ceH0BYCEuDFxE2xdY43G5WJxI0o8iADXXr8b8qFUHffS85snflp359YBou8vPfXqbLyi3FhwALieNRxz91PHFPeoNQR64Ks1/jGP0E7zB1spCsOcA5bW/ECCG3zTH8I7m2hMOvpHipYVDNujuRYdshhBodYCwE/Zqq370dxCAIO54aMEzn/Gj9OkvAbqv3UFz9zCOLx1lRwuIjYVhiOF+1usnjOsDe/sStFFrHnFzSFMEa6g6WHaP6jXl8bJWMBmttCaKAtE0kzVk2l0YnOoVX8mSpQXpMfgHVCoXJtZSivyFVJyXBNpYhYYvIIJNIEPJC5T8ewMA3bYCUniY1G/VUqSJkwHOypByNiRqocDWFhXSQIzN0fsPiJiFkjVoOCrUTeM1k1BiQRNpW6DHf9IffshcNDEiMMK3JwKuzqK28rsAvXGVhU9cSq5+bq08KK5u5K7usJ8ztmtkVy7G/e+yiOEdu6DnZzD7llRc5qE+NTP3IffpUEI0N+3fsTsuZ++6YMRtIumcdc9yAsM7ds+bbQv2GCsmLkLXRoac2uTjFNt93VwecvmAyYCcyuG6bdu7uem7RmrgcNJhISFFZECcHdAMcxjNE/S0YBVOYtZOGPoMCFj5pwn6Z15zUxrQEgi6MuBjQQovJeUEqxqASubmasSgdyIKMtJ0DNxQ8EQezBHOJxan7dtloCIhDO8MiMGWYrWyqqamISlcgVkSjkmQRr53oj+6tCmQpGjaiCocDC4h8ioZydWmzPnCKFT9BbP6mr5QDnLUW8OkHzYP716/+rFwslvnr189uHLF8+vtfK7b397qQJdP/b48tnVtcAImqBMxAEThNOUW1aELFgwAso8zzNTIpTSwEVEABM5uOVhzLTU1VtIRkSIWtgDkXjhRCSPHz+KhyUtQvY60UBt0QKto5GDZJre+sxnfvZnf/ZLf/8Xnn14xQIGsZCZgb1lB9AdH2ICqZmqRSFhAgThu0bivtmsZtW91TVR9Fon4qg57LSigcqLmpxRjBcBjlgnwqfc8pn1b5YE4tFkJDD3DFhAr4UpmBKjG/PYxm0A/fVog3naipteB8QNF5pylw5Va63cEuwW/CIMhB+S82RazbrLjHBxLFLWiTgJZRZPILirieSU0rIsZsYpodTb29vjcQU3twAsxaFqszfSS6jVWuuqKfE0ZSIvIWHh5nA3psZJ6W5whYPNzYnNq9Va1IglpQCPORPIGeK9SmrMBp/THY8tudUErtbzzCfp5pFifcWiGwa9bShqRER6zvAUq/POL9ma0La8LnUPH+7U6pyDm5UBUGjS86dJwJ2VA7TybmZey2oWPRA3EKwAo254mz28f3d06k7aHCwgyhuhDbPab5kN0TUH7RIOmE/RoZAlsUwpTykXXoutKXDw5EQ0Z9ntdvM8p8QAiF21uqvgFJiMKB71cFifIvOOeaZNswcWMAdNWWNtcSezRlTr7tWsaIVzMc8JRLSuaymFIOHp9Tk4bRnuCfy2QswDa98m3BlObhBhaHj94o5SyrIs6yrNiyMfOzpekERlGm1nNeZz++edT8efWhuxjUz5pID74knCW2bU7QrFcJaApr2ZBbBSiKx92t2/EadH9yrbQt1sBwphhGav+eZo1zR39xMPrTtiG8PdfZLc20C1S5M5JITHKf1IqcvYVm8T4z9NCLXVNzrlcN+PAzUtFMRUcHi4B1GebN1mCMSF9ZmijcvBARLzFt/buCK98CnwitS8Zn71ay08hPFcTnP4fY/fenmtH9ZBG59wrLpXDjv3+mKGt35+E4m/jqn+J+7YZpKb4hA4AW4jUxoStyBP2NH0gOZ7PO1pShAC2XJbd16QBYnSnKxMXqiWAjetN64memS+xu5QzdZlvXn57psff4x8Bffb5SXN0xRZHFvdnWwG9kQukjwdCAfk+3DVcnO7OtJuf/GQp0uYMO8h3J6GVDcyNYOKWex8hHGYU6ZMIrKiUVp5YItqdBEHBbe6NJmEYBD8IWYKX61E/Yhn0a5YCQo28gIoaIUVRgmdTWywCnMEblBVbA3RD7TWWQ6nCKN2ScveoxV2inadX/oUKBw6uZsr2EJ2wot2d2Y2w4CKuLf/GgHpxtnT3slwkAvapp/wcBq3HmnI8/HR5tK4s8mGfbW1nYYJZGZp5OLCGwRO7bnRqBdAPVYa+qDfuVNvih3fFOZW+eMFirBra+/uLYmq1m4YDQyMjemIdijsxm4aQV/FsizrWplZVXOeSqmllFrMDFaMMs0pRXSWyFiougEkvUlAkCUJSFsWh9zczHojDZi1APWYmqaNGscmT5JUSlRLMZO4MFHKfHNzk4hdhJlST5YCspTVu7vPSQAER86YqG3pibvnfE8YGUxaxVTYCAYHkagWFTNJt6XU5Wqiev9wIJmeX9/crKXW9MGz93b7B2987K0XN7eLWp6niPEwyJmC4rP2dvQAGo0KKIaXMwNA0VLUzdiJSQwtxBvVqNabnFxdXSVmqBNRzpNILqVcX1//2je/8eM/fSEphatbbWVOkVYaWUEAjXK264q6llQr5+nJo8f7/Xx9JVap+kjyqLtUD3RxNMNM1czUaq1gOm04Gr2wTjuhmacu3lLZJJJ7xi/2egvYM5CFEgPSqrBUS6QNmBqGWURpAxmNejCKZLGRmLcWFA1wrCfJYg5z6s0A1awXizaZMpbcidOIeLBWWufxDw8wsXSTzNwdncFjSvlYVdcVIjkxg9zI3UQa2fHQvswxqBhvglDKk1SvuK1FcbPs9zxnqZyKk5tP5gQi5VLL8bZULUQJgDD7YJzSLlxIyNwqz5zibybwlLjyQqWqmyqInAgGhZs3AzqwhUM2vfb1drpe0Qsn6bZ1G8aLEUjzAXlwsLRyu2FDj0eACFOFdditanS/nYiCTyiwcYwTU2iXimfNeLrYVOsFAAHcJ2uBKh4JXhHvRCwYcQTmnKR5710mExHLlrw0ZF3ziKTD75kZ3OT2CXBxjiokIquaL/aHw26/30cbw5zlsJsuLw83N4coMGBO6hR8m1Urn/oTcsQpvCeuW5l0IvNKoHme3VlcmITIDV7UDEkku7fGhqUUJLbgOtsY28zs3uqcx2ibntqUkroTx+bjHMKVWg/S+AKFuWMGVS+lHo/H0EctdEMt/rLxBM9UaBcpwElBtOGNJ4VNwAhqJNKgnh38cwKCb1ZmvPRQ0gPnSQ16amakStY73fdbJmbQmZ8XJ6yl8IZwaCBIt+On7lFHGNPvhr0BAFMfQwR8hi/aw9ptZZqHkhyqjSiq0tWiDHBc97RKmcjtxNRlANjFqGmHnjUKK20she6wAWgNpOLN5oJ6V1j9a6UnnTpGlCi66wKMNl1nma/t4/Zzo/91x392HUK0JUR3cKA91NuYVc7hoL3JCUINobedtN6J6B/h6P8hHDYiGb1RS5A+qWOiGfky7e/L7pIyIO5QXWxBzX7kPKVpcs7FjnWBWGE34MpwY7fPfb2oSGq4vn6xHN+f8yOYuQnpQ09qyAwGzY5LMhActMLVoOw76HEpGXLY57y/eIS099Uo70EMBjsyFxOBTvAEY7C5CzhJnqZpImQoT5ws58gOgRrvYDSnjhsnesV1e11M9jc1oz/4GMB1aj0GK7yCKmwlFPdC1eEKqwgpV9xUTTUlAhBdl7kRqrm717VwAyEE0TehkWii3+/23vyOUB3yX7V4RwO5o7OM+jzPqlorPOJ6RCxwdrUFm5IWdI1DRAPUsBH2HpX/m+xi+8nwJ7cGj49k42Z6Q6R3RGuL044BpHE6b15sW9Yi4mjlkoRWUDH8y+1l3B2u61pySoMKggFmyVm8knmNZHq/0XYhESISVWWhibOZxJMDkFnYUdzNbF1Xd1/XktLClFS1LIu7CxEzT1Oi0sjWQmtFCpBchWCNsqLnap0D8tToJQJQQhz8B2G0RVMtd6ca8NfGMOuu7o0/xt3300xRZsYjZk3dOAuobXMI46N5nmFtZsKsbDOZnYmZPBGxUSJQlLsq3ZZltz8QzR++uH582D96cO94vLa0q7JXkWPBUZdHj9548slPf/CVr2F3YZKXqsXVIYYo2+hdiTmSGzIOhL/mZFZV3Q3EYE7VIuVFIgOzSLlHB6IZ2jQh55mZmdLTp08BgGGmkCQi1lRAr4tzePcGh92R80RugF9dXREw53y8sdYSMDoWkxGRs1Mr0mnpberMe9v900kIThvDNXrR5LGww1+LPAY35Omp/I/NN5T0il77N1b4yLAnFuvGdxjccaSeM0H3weIFd4uzBMHFub3Zb60NOxG7k7rVombVN3u7xsCsMVg2kF8vNVaN1oWpZazCB0Dnm2pVUu7ua/HiCginjJTTjqdVy/X1zaqWFNVX50rJCaszqovZ8bYc18JEO84GNm/2bTxfRaPJajPZ8rDcjpxEpLpd3xyNIonsRnAjp7uSpE/dmfD1j0Bznb25wfGO70d8k3LjZY5fEDsLWCIr02yUli/eoJzvDAAb2Of2KkS0NR+H39IG1X22sXFUNSRVe+4i0peujaQoIZDGAjGz+bA3gq9hhrO7q1WzXkUAHV6NsDAT91DlGKF3GPDAKYxiwpRSQIVrrWZVZJ8l1Vpfvnz5sY99bJ5nj840YJineQenzHlzj6GYrHZmFDNzKPM0ivTM2IxI2PlU3EjCJIHQF85OLMYkmQGe5xlACOroDmgGEfEtacrm0bgRpUArhGYHc0qJavFAvkR6KokkmWJJDhnYJX1IyGHCni023mQIx6X9TqkGM3X1FhYGrKWF+05okSA/yz0SQCIjQniiq6FIArNHTKLbBk5wrbEUaIOZZ4AQVXOxIlsG0UEmMm0XczwAABoVln27jcMG/WwkD8PtMo9qD/QdcrIEepHqdpacjIlxQoduprSRNgwbyNhhMckIhHNskF5I2oGgvcNYP+V5gmrryLkbWue01skTRGaVSJrbgmCfx0k/bvf796/7e11M6s4Z/v//uOtEg09NCJs37qeyw60s3YJIfysdGw/k1cEP64ZOfxAUSfJFOtyfLy6RbhWLkzF2Xr2wipQ0XZLkXLMxQ2+BW/ajWYUms6PTIdF0ee9we3wG/u58uLw4/IjaYnbNNIMPxFGtNMOZrKBOSuIrSdpPe/A0iQhNF/CdUyUDOKD4geUDJMMFNZGReI3msyYZYBghWgAQ4AoX16JWWqr/fKmfqCF+qPN85+isFK9+usLUXR2FvUS7ebcVpoiG4daiaaE/hLXJfCN1I/MOE3Xf3ot5BPZHBg89vLvVBZt3bETwY1jbiOH2xTjD5h2gB+/QTQszGzbkeH8olDGknjw4dSkbJDfYRDCHeD/N5wgQd1MhjLq0sWwEqLThVQfOTgFEiP8sHG5m7DxCtifKnc52MM1cK4goCCrDgSaiWleRmajxUIeftq6r95aAKSUw5nkO9h0iTjK1e2Zel7ocy2625fYFgKgBdCZ1U3PFaNNBrlbNnRKTmJtwHgFyQdDJMHMzDVTVpXG4hdY3bR1Xa63SaiPMq15cXJipN2MojANntFi9xyypBd66BQzQYr3bFVbpqMQJzILEJmTcOxqmNFXlF9d18nxM+eU165Lf93JL+cb0FqTTheeLwvONkfJOSVZfV0wAFw8uNk5YghR5PMrmMhkBnXk/OEXACIfslOggASWRIBVclsU0UhxrzmtkPj98/hTCDrtdjvvDwSHOPabb8vHmBAMU5kB1yQ5iICVYee977yzLMrLkY/kKi4iLiDVWW/UokxOxHtLYrvVm6hGzCDOrnnIgA4085hxEAq5UeylQ6zU/GsjQJtdvZmMjuHutq2qJNJG1CHi0O5OOsDjJrLFpB4gOesoWtEXYiW2gVtE6XqgG92lzX4cfEvOSc07csgG3t4uuYd/3vgU9I7RtVNayi263pfpScs5pFqvulNK8x3GtbsdithQzq9GinVjV6Fhvb1erdnGYZZpD5zGRWmT8NK7CJACbGZkbwb2W6K0tnNM0CZtC3YqaxueJCMLcxfO5lHxF6J/mc3w6Xrg7/OxXQ4aOf0duLfMpixIX7cGc3h7Gz6SnexvfSAwyceR+W6FSj8zFIxaRwJ9EK3lvUTY9ieD+KLnDK+JorF2dkic6HKMDg8e9D6cufmVeqbchiU0aXFJtPjeu7CAf2+oDEZmmyayu61pKEqFpmhx6c3OlWuIuzCz8mZSSG1JufTVVi9mZh5xzTrmx1/AgwuGInjszc0o5D6K2NoAMcslWHUDdMMq20b76OKyVg7f43SmASiMxxdH6iLnhThH4/8zE8zyPKQXgaIQ0eZ42C22T4LrrwrUXXTKAiDgSd7ESwo4w8yAv7093jH+7Pk+LuhFwnERHtLNEeKotTk8wo+6JMVrfCN9EfEGtzenwUe11lgEiNbmZ2PaCqGWwx0f9TrkrMj+fB7jfmRbq2hOkzHwq0vOTQ0fdX2tjg4b37u5E1lN4UV7RH0IHxgKg0VgVcp5CPH0p4FI+fEIEw3P4hIGEkvBPXnkiv338xg7+qNQQGXwoO7xCJPNbj1dmsyC7Nd9iFgYyuHSULAAoIDxjf29/uCfZlFZnmm1fUcLCgzt4oinv9lINpCtRERzNiWEEcUoPLh+sVkArKAEPhBL7CyKGHwCBC+gASBY32rvvZbpBMqacMAMAMihznqwucIGyR1Q0gjUEQILrjbVZ79EyrosMQwfaqKpqVfhWTP0j2zNbV/DMxPLirg4lrw4lUvfCbmDALAogw/4VFpBAjwC8U7+xN0KHxBkwihBuL9rfCvk7BsZ2DNYpi4koT9JQdXQqsWfmjVyVRigDAK35t28I1UN/qeogsGA++wI2Js1QZJE2w4aThplF2EzPB99eDMd16IX4N+Fc4d0xtZmZeukVbQzTrQ3n7gTknKlFgExEUk/qnGzZAVJq6Kl2b3GfgRxTVbfRIoyZJKXpcGBmYUpBcbncLFdXV1d0FTbQxU6CwQxEZrZqNStk7mpCZISqphWU2mQNl5UAeHB7uDA4yB262iMiEcosNxoGllp1JxPJ7l6rirCZaa1huItQlhTIwCDwbDqm9wde19XVovqLiKTPj0PD22VIAqXIYxMR0b3Lhy+vnl2X41uPH7/73vXbL995/OjRO2RXN/ritjrtTPjp1RHPrpTnSgZPlaBUQeIeJbJMejNWLfWSDDNjSrXWUqpqBN+DRD7quCDkHB3H+0K6urpy95ymnDNAy7I8+/DFu/t3f+lLv/wH/tAfPjy4L1qrGxEv68pTIurYkBY9azsgU8/ErCvK8b333iuloOF4/bQBmBvkWHsSvLlPcNOtxUmboxGCgmyTQ3d3s9ILZe9GmMyC8WFTHdH3JDNzi8e0VcHMpSzhCQNBJtTSLymF23DXG/QNNDEEzPZN5hYuVVUrcdqTUTt2MvedMsxoYo7ltN6u7p6nBjRVM4KHV0zdF2Vm62RZx7XWWndOEwuKSk4GXs2ZZVWztTjgYJdk4FoNpS5LIUJKU04zkRpchKRLt9i8xCKlUQsKUNy0qLpREpacU3rw4MFay+26LGsp0WZu47NtDeXx71bm3pnVH3gMG2VZlvE0t3a8jHYX574ldfzGdmBjWW5WVEPKjTOPqwQadl3XdV3NLOccdLXMnHNetY5rmRmsq+H+5ngRvp/1oy/XBj3trKSeUpqm6XA4HA6HaZpePn2mqg0Qwj4YVmNsUTk26maZ+Xg8znPe7/a73S6k/eFwePLkCXU/c5qmnOdowaqdMy2GFBKYmc3s5cuX09w7wahGgE9VOZM1kCenNKXJwBSuZkA3u8ek5mtRDxApIRpAUetou0m6hq6+8xTGHBKiJJ6IXCgPKTGevncY9thNofmHwgbC/xo1b3cXIfUoTx+L+yB3if9aGBHo57FNI+OxvNv5a6Xw95iC73TEIgdrQ1uN7oi6337zbK4dtMkdTKtRNwmED0mADTjrnVAxnyZ27IsxY+P24yPvVazSCWbiX7azBOO4R+uB3YDmdh8WRBsfYGgI4IQCHR8G3ehdEq/Y2KOJxV2nQluPO0fwh0d1U3DHt8mHw2gQCNFpx/06ZctvH5vj11vxtT1+i071dv/GO5u7sPaV7R8AwEhz2s2gJCAkJpuz00pHU4S5hTRhntJydGdTZaxMbCjuxW2Fpf1hlmkPY2gGzVHtDBN4BhGwA2cQM2XIBLwHFKA2vDQSkMDEk8PUKYESrLRWyebkVquVomTeDJvYEV2jQQRyilS22yX+4SUF707z6999RQ6P/UvkpNbK5anjKRovS+vBQ4PohZjcQsoF+4s0PjDAzK10y/mkcIfA32rn4bx1DdZI/lh4aO2oehuT1lUuDZprNP/orLVy2Gyq2hmsm/U4ftUt0zN+uyGx4wgEGRHXWsaYx7H9iXf4W7yTXBctljOlTKsWSWqEdS0OZ+aCmkk4ZWVZFUQ5s5pZFklpAgzmQikCw9HvdZomLctajo6cp8NalhDi4Vj1khkb80tgN4vm70nyst6aRZMrWtdVi0U74XU9BkAu7/KBDqsv0zSlvVj1eZpznhSu1TN876Qgf/7itjyvWkQyiauT2qpWrVhglRAZHiIjKm4JPiV58vDy6tlTUNX1OE/58v4lET9+8vji4mLVmnP+3rvv/NRP/VTKeZ7nMPj2u11wflweLq6vry8uLogobA7qJWdRBXQ8HuMnwXAAIKW0Hq/XdY2PilpfN7neyG1x9gfruv/qt0rO837/8GrJ9z7++Prlt26ME+er4wvONx+8+109LrKs87zfE90UFQGEb9cyT/NCB1WFoJTCIvN+b2YkUlXTvCu6lPU2Z0kpr+u6lDKDp5QUsOqSMzGbskvK6YDo4FcrETnqUuyd9+ruG29+72tf+dGf+ZmdFlwdcbjMUOgtOIJcyYy0GHM203kSsQVaQACvv/orv/DO03dulptSV2cGsoDh6sbkwu5EmXzJOQdmzMzEiA0lkFRwEGpZi5kTSLDoMiGBOeWw3Q1MktOyLK6RNGYicrO6KtQnyrUaol+1OyuJIzxhYU7EQC0RYklBRaLVPM87p7WaMjGJNE7ZWoygMG31e6ReDUlMqupaqodkIXKCE2mDVg0+BsVEQUPbpI6GWYUkBBErFUBka0VYVUtZzexodb+bKKVqtapqWdgty84cMC/kuVd4WXWALy9kXf3m+PLq9kXKOc2zuad5coKWouvKOXFmdz0eb5hZEk8Xs8CRBEw5zQQj1eUYqX4noqJ1mqYkFX5cMOec57RnrcfjcS16vLni22OepzTNu/2B0krHsmprEhMVoXLedfBk4PaDejSqeo3txpy8RlYt11oH54SE0+eeJOVJDnJRVq21wlwoemsH+yibuZoGZuQk2SVmDhTURBTZBYJDzQgskuEOFmJO05xzy/2qVg2QOZ8omkMeQjg8kGJWe9VlYkG1UircRSjnPEWv12pEbKVaqUmEWNUWFstZluV4XI7hqiU3SZxFpsz3L9KTxxcPHjyY5/yLH757u14fDgemXKySZK0V7AY9rrfmVVimRDYnK8f9RMYurLuJEilrmQiieshpIpApw9UKIfNuUvHqfploXVeGy5yISBIBXst6uNiVUqB2mHcTCVWbUmJiK5x5nqY0J6vLWmA+T0sppVYSLl6d+Hi8kTQT88zsWmE6zZOZVrWUploqjDIxgZWFnE/euDPLwuT76R7r3uo8yc7qsdSyHI2Z11sXu4TW9XYlGCc1W4FKpKqL6pLSJMKSJgeC/9o6A0rLQOYZrZa+a+LehImIglRZ3VUrNDwoFREIj7gDwjgJNIJ6+1WguNVIJnd388Z/HDzEBKYIyHW1XQqROwH1lC4O6L9H6zc6dcQiYicOmCm5CRBeXHM2zRXGHI1moKYiIinBvWjlVlzQWmPFvpOUwdTS5aY0MKnukoPEKoobqUlXVRFxtVpPVotZMbjwnoJY6ywKg978iEZXegKYyKCvOA9nOT2CATwSCNLSjqczNsiqM8BwMWKCjEADm3b/NhxLbt49JEjDxhUjLud+Chycm6cnSXX+ScVrjo9wh5ytM/z9Oo/faIptdBB/dQR3TuUOkFMbv5w+96jZV1CBLcDSJiZaElLtoEoCoGSIsvaWlo6irPG83HALAo2OIO1FnMHlDn77N3PHv7HDTs/r7EIEJxp4UQrz2N0zoqRSAAEyGc+EmYpiVaDSBPsR2t1j+47rV6x8L+G5A2ICSr4o9CUy41Ar3Zbi9XiBchDNSXOiA8vuRvZUd+AHkirye8B9x6X6ZAhw7hSLldmYdoQd7F2AgD2cgRV0A3oOqgCBb6keqZaOcFkh4qkk92SExaTOylSUSeajm5EVCxlBqD5VXj3V5CCDWVakhEIDfh1dtoJOvFWHejqbz+2ERqaV/O5+ueP2jZphougsr4BRS8kq4IYXBCDBXcncVeFGRstxFYBHbacrEcGB2kuyHXBt+7YtxijsJ+5lZQCKlsgYRq+BMTDZ9E8mIubcpJwGcSyYJYk4N0sg2uSOwGXQtqmq8IyGcmKm5N4yuJlzVDe22B8ogXLKNcRvyMyRDSTa7aY+kqRupZS6FhHhXkuPJppkdMKoura5ZXdUEFKmE6NDex4h80fjQWlAOAYZWipz5NnCIWx2XctIBNxOgsu7lELnTbTGDGIj00eUHcA0TVEcFaKBA8UnEp6V96yoiMzzLCI6GjS3cGwQ2DUIDaN1ZyZnNGLzHtO1Sq0JpExMOTG5mvvnPve5H/3MJz/zyU98/tOffPz48cc/+cWb4+0bb7xx+elP+bNn/9e/9Jd+9md/9tP/zO9vEQhmmOOdd+COt96K8DCYoYpawYyUYIZ1RUqoFbVCtf0HgLlcPY2g+LIsy7KsxyVoD25ubm5ubq6urspxCQ/zxYsXz58///q733nrE59cv/6NZVkPl5drqR88/d6nPvWp3aM3vvnNb5aijx8/JpK1lt2Br6+vPdPhcJim9Pz5czefJC11ubp6sdvtmPNuSrBZq+m6sNNhN9miUW4XmLfME3UcEffSERGR1Gym73zjG3/9r/21Nx4/Wtf17//if/K7f8/vefSpT8GqHauToLDMu3ni65dXu8MFO9aXz6d7F3D/4Fvf/qVf/AfvvPtdHU20zwtTR6ohHqX2QlARsWgMCgzcU6t16cc4yUj4oMVaWl6OextM01OYH6Mkl1Z2TnYCVCca0MK2dCOsQn1IISCYOXy8sdQjXzTY9hzk3rpI44y0jTf8ewSATh5RG+qABbrbneYTFLAIPQXSzKyn9t0iDaVKRPP+IJINuF3WUkoxI2Y7J0cZLUmZeZr2DBI4QKWoLsdaFq/VVBmecw4+XjBFNgl+6o4qIlSt1lK1zG7JPM8Tg+Z5njGvWkspS1mH3TlCX35esnV6mpuJ9U24Dtb4RwTtCRNRypxzFmJMLdcxUnnUEBenfg/o+RCRHjzrYMV4JhHf2S6t+De803gzBCYJAwi8aJtP7U+/S/hxRZGodO4kzG3+T7DPkZaMhHm/lonINOXdlC8OuzcePfz4W28+ePBAhIi+Kr0klSyseY6QyjbWGF1oVStSw9ASgZO4+7IsL1++fPz48ZgTU3XWlOd5zlZux6bgjnFV1WVZQmJYI4vOwgwgp+y2bh+iu7Mg5V7I55wSiNk8XC0dc6uqRGrm0cwwHvmIGoxzmpnqqbFvC9TKaUUR3V1LAAI96O4Am6lscKThmLTbjNhDSuP8IzYxRMFYjUTUIJDYLM5mWxMznwCWHdB7Z7X3z2BmoFOvESLyFjE+CbQuOogIJJKYvdHTcfDiA4jvY+BC3d2doiB+k3flDnk4yZ/Nar9zxCKO9dw56GkzfiRm7QW0tAE7DFD9mN7tOe9srniJV+RA8N6NTKMHhcfpETSHKj534AyaSMbBSutoT7/1fmFvycZIVHJPRMZZNbCp7sOqfSVfcXrnN5v78n+4rs72+HUNMTjJAJx899Mv3c1Q3SrIg6OcEDEH802VIPvJBd2c2TZNIGkzHN64Yf+40KQ/8LqvfmHrzTajZPg/Eb4DZ+I90YxFiAqROimsmu9Yd6A5ZXYlowquDGXOBAUp00IAYYZfg6aWTcEsMlvjAl7dV8dqXt1rshtYUV2MKkslMscKLyzmuropmVlVrxr1NbQL7LnBHFbbLmJPJNbZ9mAGrUWrWmPoQIfO0kdl9E6b7rXzGUHWjxIwp6MjwxFiyVsA6HSV09LpjSLCjss5SwwjWEWGODqdupUOAhg225Dm6BJpO0TaGGx2InU7g0hsddNG6Z8yittvekf6jO+Mqwx7o8nPTfeLrd4BENC54RsCUDthjkDDoOJtkF1EAGlt9BqIAxh9CM+eQeiMzWi6ihVmLmcq8DSFKSWLZiYd1GREBqQ+1lcU52vmy8zCAYgriwRlYtNe7j7P8zzPamWtOdzu3OBPBCIwyNncvNERuLs3/KCQcNaUDrLbWjNZ0pxyEnr86OHVhx9+/OOf+Jf/5J/4Z3/un7731pu42IMZ2KPW2BI+HT7+6c9fV//q3/r/3N7e3tzcrOv64sWLr375K8fj8c0331zX9erqijaMrCklM1vX9d69e3FrAfHa7XbTNBHR48eX8bX9NB8Oh4vHD57sdiJyOByYGCLIGfOMnMOx/F/9G//jv/23//bv/2d//k//qT/zv/sLf+HHfuzH3nrzR7759a//3b/7dz/12R+9urp69uzZ++9/sKxFRJ5fX99/ePnBBx+Eoz7Ps9ZVGI8fPbi6ulmXW7cg/wsmWBBRLbXqmnMmkXmeLi/vk+D6+jrSHaptmcaSBcDL9Ttvf/P/8O/82ymlb377W7/6K7/0X/iX/sVPf/4LfNjDGUTwUq9uv/7lr37xi1+c792fLi9Qytd++Rf/n3/jr/8n/+DvvXj6gat6PUVfiDbtizf7yjbA7rGExk+YyOhsjZkZsYdgDnuOO4Y5bNbu3JIIpcRErWXZyTQho56uG8Jiu9s9GuJJc8naPjnxmLu7R+M+J6iquRvBhwZ0D58wpGq/r9cgatw9z5P0iEytJVLNvDm8exFGDqeinuDd6AH7SXCklA6HgxPfHo8KEEkSjsSmuxOo2qk2uqwKMidOqkdVK6utC6xeXhwYHkhvA3W6HF7XNaYiTTmltCPRo1cttVZ1VNOUkqRJRCZhZjacsJGnCrpXxMUdC7IZ5X7XbCUKdlwnIiEewGwAbiNINmT9mdzrAq9fuOM3eNNlCEDYhf4Kpn84CeMFtZ7pFZtegsSJiAaSUxIxNiVt7QGezjAc/pwzgHjuRJ5Tnud5N0/zPB8Oh3v37h0OO5jtdrtSSkqJwLCGwFQ9FRIE75lZcDghNf6rhtDmTnN6czyOskN3JzXy1hPFe3gCwjlnapw0NuiUc87zNMXt5yy1MrEKQzhgGSYj7mvO5MxMnGL9bpCNfd8N58RZzwGE3AGZtTaHcKitaIQi/XbGqeKpEo2VRvEkg/eqD+AE827JQGbe1GFuH/F2k/aXfOd999HBz+2cAGnEAtri7GxjzIyNH0sUCTPf/njMRDQMBBBIh66cw4baGmwbydm+75G7Oa0Qi+Ti+d7wLbbzNP8AdEOus7UcRgvW8UTasGI4m4fbzrZxNsap3J3k3KHw4asPMXvmlp+CPj3NaIC7NrRplH4bO1uvYOy1PQ4jGilBIumGL8PZT4nK15hMZwP8fuwjP/j4CPP6h1as9VHn2Zjthrv+LftYEL55ds0mNm/Wf8RBGDBvfzZVR+zdQ2j+9sYnpLvXb6iMf4LqCxnotLevnT82sPRh91+0YkICQ2bgAnpBuMTRCGBeKh3NmVQJE2EPvkhMlqrTFbzAV6IEUuGVeCV3eIUX0C3hAMqOiQCHMdSpuC+uC2BmT2uttS7ELjBmcxQzpaoEIwc5t2IFc6mWdt4KPb2qmxMCdclJwNx5AV1HALpHstCnw4FXpMNrgMR8/vK1u8P9NRPc8oEBIO+8g4ABuok2RL2AjQZep+ieNe8u9K+ob9/fXNqHjdflUgsin4mpbbLhdQUFfiahmzob4XVshGSEZYeiGddtwnujy7Yj3JoKY0hbdbP1VN1dbUzRyZlEx4C08sVWSm0epDLbuw1JOmyRzCfocFOEwgNfO74e4sLdVy211lpX8lbH/+ocxZ/D+Bsz3v41UkRriqb9VLW6R4ZQEhH7uq6hpyODNHQA+jozqzWqRNglZeYkuQ3m0PsvJZYslFLKIknEa3n8+PGP/ugXv/DjP6HI3/nat59+8N7Tp0/ff377zjvvfO+dd66vrzmnd999l5Pc3NyIyLqu+/1+nucXz55fXl4Wmdz93r374wGHMWdmqPXWjBW6rre3zyN7UGtd15UxovWKjfq8vb65vLx88OBBXOJwODx48OBwODx7uXztG9+dDw+/9/6HX/jJn/kDf+APPvqJn/jpr3zF8/wv/9k/i8v7+s47L1++fPvttz/88MP333//ww++96u/+qvvvvvuy5fP12W5urq6vn652+0udvvb20VVU5qmKXueIkv55MmTYHlxs2VZcr69uLi4vLw0C8Kh3PLDVdVd3Zd3vvfVL/3Sxz/+pojk3fz+d7/1b/4bf+7nf/7nf+wnfirlqdb63XfeffrBs6987dd++qd/+tGjR48ePfjmr339y7/yy+++812r68Vuvrleb8vxbEfFEkySPK1LYyEaTdu2ZsfYsdQj6GYKeKC/QNawnmDbGC4UJH5KCIM4CLXcRYKBr8UPWtkPmbnDTEjIxM8DQhuPsT8+Dnar5r00g49QaxWzZsC8LmWxdYC9o0mJKMzReZ69aqwT7XkYEXGzwCiGy6zq4Di5G7WESfD6xl5TQ+MOIQbxUouDAxzFbRcrdzwKeTBxgdhKdTWDVmbKknf7fWRHa13NrIY5T/FONfgEn3eHnHkODL+7utZF13WVvOY0B3xzv9+XUtZ1jV0w9vI2XbyVNtxZIidJ2q00DjYnB3GTcYEDCO/Xeus5Zia00q9+RALqTOwCIMhICcfbOItBGE4C+qxNiLu7Wvj/8dWRWTpPYjdAYHhZAISYaWNNnisD6g5hzrnWSuQpy5ynKOFrsjpn7hgNia5YZpEc3tYijogDJ2HjaZqEfDSDJfZ5nvcXFwFib0JMBPFErO6TIHXCz+YonUR6SilKqcdzFCE4CXEiZY70jZtVVwtd4cQwjmie4qTwMDKuQOhmjNhQp/z1eK4jW9WfoFkjtcYr/bJH5KLxi8Tm9JYBdnezU1LL3athq8upOzYnx2NT70pEZqdHtn18Yz1Hyz+8osjjLu8YRNvZaHkaM9w1RBRgPS6DCBTtrhhE1mtWe5KwfSFR4zci8mEzcCMzcHZCJw/oF1H0qD716kQAVhduMu8k05iZUtrG4MfuaAlLb0O8c/e0cUTb9xsh0wBS2oY9ptWfxLepuTHW/ZA++aD2/sknrGQcMSKnBd4wihGIMTDABHFygCnaknlj07HNcqIzHwB3vJofwFb66hHNG/z1kNGPzqX8Rqv4XvWz7BzL5+O0tE0K+ukbIHSGXiUozKLPNNACDdGSPpwFdrZROLppLNnPOxzsJr3Oh0q/qXv8T3N8lB9692m2Fq9tvHL6FqE7hEYI5PYE2oMuYAfQLYhIXGhdTWFMqMkn5AMxp2wmrvpStYoUmiRzAVVEJ3pfgJfuO4eocZM9pAQFFqC6Va3X7pWows2NHAYyJgRfBoOZE0tSpaqm5r4WGJkrwcFuAgI7mZNVsxopQTgRGQFMEn1KXzMfr07XKTP/Wqzv9km/zhxC17PNCSQvZhVxv2Sh3kg8AovmHnnOEMemVUBEjkCPBKTR3HW0HGvmRBvK69jOX39jPR4aAmqb7sLGH/POujK8QdpEuqnLcNUTJP6kX4BIwIz3fYC20imAeHc+N87eeMfqGe5sO04ADU7ZI9EA0tBkw95KhspcaxWQtb4yDnEhYiAK+dS9miXG+G3o2ZOxzkTCYBoO+lb5cccaASd61viale38nmgAOge6HY/H6+vrUisx5t1UlwLAeq9kd4BMGlUm5pTTNHMKtF0GIGBiF+IslDhKUhRmV8tNfvjk3fff/9//u//eN7/17dvb26Xqsiz3Hj6Z5/nJxz72qS/+5OM3nhDRj3zqk2+88cbFxUVK6f79+/M8P336dE7545/7HJihFQPPQ528OxCkKQHwE/eglVK8lsDWrut6c3NzfX19c30dFYY3NzcBE33+/Pm7z1987TvvLsvy/N3v7C8fvfvBiz/3P/2fv/XWW/+Pv/H/+uQnP/nw4cNvfetbf+Uv/cfzPD9+/Pizn/3sZ7/4Ez/98CHu3cNyhbKi1uv3333//feffvDeN77xjWfPnn39V7/2ne985/1337u9vb1dF2ZOnBPLs2dPAczzPE3ZCKUuy8q73S5QwZGVXbVoR+3uJk6+fvvXvvrGm2/Wp1USPXr8+O/8zb/+D/7u31HVeX/x3e9+N+f5+vr2xbvfISKeppvr63VdpsTJ/eZ4sxxvzAxqblarqUcHBYlFAlqJIYnZqXfl8iCia9Pc/UcYuYFPxihvdo5tNkxf8OzmlcDmWvVkNwMAas5T+KBhgLaFyg41tLYcp9bMfXedGOQH+Kpt+O6fqHfi2SRbE3YMj1oaJ+iOSKK7IDfEeSSI3JvVab0a2JpDCIWTeTHdpcTkINnKArgFkA9mruEwotZazFmEwSTs3poPxTFNE8zhGhFhIp7yNOcU9+1mVd3MkxGlxGk65Pn29nZZllorlxIeS0pp1RqiQFWraVmVc0op5XkKhzwcQtswZAyhMUQTM5ORoDX5aCnf1mX0JIu4cTSeJH6cNhyVWq3WE81GXw+jL71tvcEhxOkuyC3sTiMCN7Rtc8DihGYqIkRSq6saB/mwu3MD2SGyXYEgRYTgaWShGQQyEJE51KKsH0BKyV0TBaMpTVl2OU1zPhwObzx+vNvtIiWYUuI0MTODXfxEobE5mLlLKQqPpZSllBJ4m622i3kUiHaDus12l+2xyDMPSHMDVLMDXiVYu4SYXBhkXkuJzeVqSq4WQYnWf8Xda63uvbjUTNWA1gdvPLjmv5G5jYQfjfGYu6vcSRu6ay2q1dyaH4ieqnUzIgk7ZjtX1F199HSxdUT0yBNuFejIGw+V33+7UfA8dK5AjeBKZ/qRiMyq96fT/n0FGeEbmWZmLf9yArs66JRxDXnisBCMxNywbQQWD/ID2pgsLQ/gfua2Oe44QSODDXc4gkvG3Uk1HEK9I+VEiTrFaT+lN5fbyS0cUcCpdYW56xJQy9w6cMrPj6ml4P/tDqEjnLpgvN76hBaOkGMlkj6TDMT32MEAN2Bcp8NxAD6NYZzCPA0kaQOEfLKXWyDzjjXcUhD4IRy/OYfQ7vwdb24QeqfPHQ2r1+MVBhjBib3F3mARpzidr8HHmAhbmC45n/mE/er0Gr/6zjv/KH3Cs+O1DiL53W/5eNPbr8LWjqgsfILtqu8SXSFIryAMU1fz1WxhO4APMoto0oWKPgcvc/JxCrjBVuDKMQFMbYJjd1ZCdVtdteoaKsmhZuretB4xVN3MWRiSRcS0aKlLKQkkxGB44oheG1xESlWNujCWYL0ywjRNkAzlcCVewQ3gNAVt9l7vYP+gpd9Crr1cMGpZldjcK8Hdq0NBTqZh/8DV3eFK5gTXtTg5M8O0Efv7SVIQ0SlFGeuWeRA4N93XQhvYvonNn9v3xzuqp6QfunfjPaF1ig73HN3o2LQ9s3cxfrqojYDjaZCnIOMryl16p2h0XTOsU984hH1Lx7SIu55YRodK40YVOLQogrFr3AZ6wb25JwoPuM1yyplF1Dgs4HVdp5TvnB/dUP6otUA9DxD3YMYCLMsSGEsimufZO+y1hbFb621B+LSJUuJ5mhyY56mZuEMSGsUEmyurq6kCc0rf/PrX3nnnvSdvvfuxNz/+e/7Z3/+FL/74pz/3+U988pNvvPFGurgAAe4wBVGppSlCZhAfHjwxM8gMMyQZt9FfAJSQdy1Gm3fkDiI2y7NRaxnPkE1oMCY7Sg1zPp2q1r/2f/4//vk//+drrX/8z/xXHzx4oKovX758+vTpkx/51N/4W3/77bfftla0mn7kR35ERD735hv37l185jOfeeNjjx8+/thnP/+F3/3z/3ncv4QDH3548/77b7/99ld+5atf//rXv/3tb3/rW996+uLF8Xi8urqSsjx48ICZb26ub26up2nK82RuilZGGMDXzOu3v/mNT37yk2Rlubl68uTJsw/eZeZnH7wL58sH92+eP93tdjPnev1cVW8dKSWBH18uN9cvy3p0d3ZvJpwGPV1r9uCdjr8toWHxO6JSS0Daen7QuPEtCsuCHLat/sGC3dahmSUjqHkzjZ16xz6r6tlbdAqGXpdlFi9OAX4z4wpL2FSqODyaKxO5grhjHrSbTDKM1zuLPyzO6I4aqbDoVR81Wt4cm1ZSVbTu8h5AtVjFgUEldbAk9LwQBayurBWgxL540boUVXMnkJO0UtvADYQFKaPaNgQAwynJxDzN02E/mxbUiDCRMztzOAjUqQurOtcansx+n9erl0StG426BWUIyPRoASVIKQ2f0DdlxkMgtB1Xm/dLVM3AoGY3W8OPSrSlJButGofNOs4jiYYoG+/H/GvFCB+M59udCmzl2Fh1vnFchwQjR5DELMuyruvIImrHCUfcL7Gco/MsWgkSeyRoAHNXcU7ElEiEoBHyaFG2nPOUMjNyzhcX+yQSfccTo4q4ExTe9wifkF9qZuRIrsw5pyTSXSwzM9vv9znfEJGreVXOU855ZiZbGoUpgYjmlKtbSmm9Pbr3MbvHGiZmhkmixCCvAk8pCcNqTUIpEvJOGiTDkDEAVQUYp84cSCk5tYWKjaJ18+3DGmsmNnHLdwkkxSaIti4EcH/R9F1Kso0djOcldCrv3Gr64WiNd06PsI/w/LW5s5/HOBr8yZvnsbGuohSQBhHoaTts/UactEzOI+EJ7WlS8rOMa7sooRfYBfsrUSzJzS1sTZ+g0ugPImJIJ8pynrZxt5B4AMEaO7cPoe0bsBNtwyt3HN0YFVqkz89ieXF/8Xxt3NMwL6mTdrXawdiwzQeuAZxv3NknaGjwo0TRICHKTpwbqUxj8gn3rycT4kbPsI4DA9ClfS8I7LO6tXYGZG475cNaeH1q4jXvtoH9gEzNK+c5jYTPzmybQQDjmY5azVPDhQbhIzdHhVWCuyAay7Zwg7Oiw3RJQMYeXZqsM/oA0PMLfn+I6D8en/DVMZ2Cyj/gqDy6MjqABJ/cLyDfBANIhANTJTBQDC/ZMuQe0j3wnJSrouAFYZlsDw7fT4EVDqLF3FNA99suMFiFrt7ayUoEGoM1gCkRTxwBXGW4gluYOLjlIUzEbNAUW5LJWSZJ5GIRG+57n0m4maPucZFXfbs+pLv587tzePbOSd0PJIWCgFhjbkQOcoISDGQEJbgHK4xraJzwBs/AoJt0FEefhp4nGFV5bjZqm0P5bR2xASPFRsoBiIKpH3jwpmOE9W7A2KTBhiGxvYr3thBncjXsjZ7PGB+1qPbGQSXhYbpsNNSJ2hoIqIgRpVap7hEjk4SAlsV/cA/OPWuRb+YWORdmEZ5S8g3TdyTF2yA2RpWDw4Osprm3OrwTTx2jR7+f/gTaSmVm69AaZlrXMs+zqppp1Iccj0fAmNx7f3B3A5iJkTCnXHIBOCVpapQcEWK3GjqDg9rbDebHsn7xi1/8Y3/qz/y+f+bnP/87fhZpp6vJxGB40eV4TCnJlCAJhFawCixLmSaW/SwOAKVa9OnqN9WXO5Oqeyv66gqMhYKTCTDDWQMohwi7MOXcnvoaDCLyL/zhf/Hf+/f/w6WWP/pf/1dDZ2JdIYJaj8fbWuv1i+dvv/12pBa/853vfOWrX3n//ffXdYWbCN27uHj06MFnPv3JT3/yU4fD4cd+9Atf/P3/uS/+gX8Bzri6ev9Xf/Wrv/bVb37zm3/1r/6VX/iFX7i6utrv98tyi6YZzU5NQVBN11oI6+OHD589/WBKIvD3vvfdlHna7Wop8zzfvnzGVo9XL9zp8vISwPFmlUREZLUw+TRNuq5La6ISxoOM1azdqjiV3mIsibaxBbwlNgjeS+4lgg7qpkOQfN4xyAKQwIkyhC1bv6iZK+zE8AvArKryQMShW11t4zt806feNy0NsQE6BiANhG0LLL9jEm28lOZymLXiMSahU/iZmSlJJ3YyYWZOrX0iwd1YvZgy0rjQenvtTmstRZ2EJWURgSP61YSgRIMCcq315rgKuWwubGa1ViExNgdBOCSN1nq7LguJGkACt2JKqtzrZmutUXEkvRmgu9eyuGtwM0ZmMlKg/an5Zko4tMKQ466I/iIneO0G5RR9zxiehZHEzFyD1QeJJRj+ecPpN+TYyAhtfQxuyGQg6Ob6weSR+PJuqhI7EVgwzSnn7NCqq7sCZB65LmGHRSacOaXUM41wZxFJLC7OzHB0V9aZwaDM4qwslKVlnFp+dV2vrl4EzU9wizFzFimqrlZrxc7H4u/zN0ztqJqT4ZnHCu/U1WRmoibM0zR5aZ0Pm30gAkX4aVEUEUpokvbbLDzNIkRaFmHs57zbkaSmnLj1qTOygM9ulVZTSM2Lif3gZ3m5pvPOlUiMXIsGkxPxaRfXWiIVvN1344rbM49dz962sLlBLZy31KzeDTARgDsREvEgEAYaXUE0mTercI/+iEQIVcubiglyCtzgcJZGN8IWTATgwxnrmjQ+SgkWrZNOtVluxiJnUfwBq1APmHcgHdwdXgFOgwd8GykGRRB+TNeYwGSk5zhnADBnokH40OaWTsOnbjmgDzX+OE1Fl9KN1PTVuNlpVlpEaPMcTrZUl8Z9JM3FbRQy7kotzajUiuVifAY4Bx08bYrBgI2nFwuJ+psjr2XUvJf2w4847vhCw/K784uTF/3KGX5jrmA/3dZG3+Rd475OlZm++U5bEi1ZFSk+LWrFa4FrOO0dSRiYh4a+dQ5tx2BjjCZ31n3CNgu0uVBDnr7qNLRZfa1b+P2dyd/kcXbS3lnm1SMsSPJGcBurXH1hBkGoYWITeE/pAnIDTfAZfiFQoxW0Gp6ttWQn4rdAD9O0zzRVvF3pZa4JwsTcYhAEhC9kZl4j6AyONC2JsZGSW1gYEnzFALlaJRgYZmaoiykDSJNomrIkcnaLBCIzhKJpexSKE6Houq6mCBrMuOtY+5utaZsJ+8Gu+9jj58n/FsmJ9wNcBxhDEf95dRRyA9ytOrSrAY8KyViK7J4lgQym5DA0MBQzw3vgY5Mos0Eq04ZB1IuBqp8Iq7Z7cCujxr8AosZinHmoWt0Qyw05OWw86pHQrV83RoItKmdYmB9RxBjm4nBEB1vB9ju0cdaaa9SPZLS9mTb6cTNhoYdSDIaG5CygCBpExmU7R8F/aK7jVrezjJPVTvM8W3fH7URbClPbMLqecrL7/Z6Znz9/dnt7W0pZ6+KuOef9vMs5pyRmI5YfLDJNN1Nfu9EBPIm4m5uSG6DELiAw3nvv6R//E3/6T/5r/xo8ozgyVqM9Q9cqU5onAaDmzFSrMfOqdcpJUgpSrfA585y2kcAoSg9KMxLqohRxKgDCrc0SM2RKp/1k8GhJZR28l4QhzPje+x8+vz5+4hOf8ONC+/3y8sV8/z4ASN7tdwAuHzx46wtfBNBAqktZnj374IMPvvnNr7//3jvPnn7wla985W/+v//e+jf+1tXV1b3DxWc/85kf//Ef/92/83e9+eabL5f6c3/sj/7cixcPHj4E0wcffFDralajcik8AbXqRuqGWlQ135vSNKU5f/jiuVl944031vUYTQsBPHv27P79+ynxshR3vbm5yXm+unoZDnbOib3RNpRSEckdFhFxsGtV1RxZWApjHUEhySJAkwrDAQhtYWbMgRNrOJ/YU3YqqLVNT0JXtZRSuJGZpTDHnYoIC0F5rMawd3eHmSlInzRQbae9PazWFltt9U0SRPIAgPaaiBwK3aBMR7uJZuDGtjez6Ck/3lE0FspA6DGzBRkGotSFyMjBqm5q7OxGnJhETMTVGGbkiYXIlBhArbWayjQHxM/MmFg4kOFwqDeuVpjZUkpZj8ut7PezqxEH1bgYcTEtRX1u0OJVa8O4Oplhv9/f3t6W4xE4RbAY5MwRNgurfSsNXyuIw+3rnxkhcc9/cYc0uZm71hbhyx1u4KpFFRFs6othWLdNIk85j3jeycAdwhBg6ikGIqbRXKi3n9+oisQyylOtU4qpU1sD5hBPQimxQGLawRYUR+4U1XOJRTo+tp2ZnME5C5MnwpRTzuLu63G5YfGqVmrlGnQvjcozWhhpL+jq9JUAIhtfGobTREhySlkiPpVSSmlyg5lBjcybQ0Hubq5eK6vqui7uZlqrFjcV4WnKKSW4T1M+zBOcjsrMvJvyfpfmlOuyqiptNBzYA3HXHU7S1n+Uos8KOv/tFiozkD9Dm7TlF/dILUEXYW4zI2SCwBnObpFROlkGQ2ENVeghCjbUaGM5jeVxtj5FuCNmfbQbFYEIN6bfUJ8RmR3hXQcaAXFUXgQK1925FfR1V9m7nUFA71ka14Vr79i60fdhDEXKjTvHDJG5Wm2hvSgDidOKtBBk+CjDHffoo9024OnG3Ru5CBEFY22bECIyd/MTiHT4tADQ7C3bGipD3/eQXfiuZyZPQ2/axk0OobEJwLUJHSlEJqh3SGeLFgEdeG99UIJW9BW/bJCqYeCEDDc/NgeJqLnezgC34kxnIhnYwY/glbF+uTvvjIm/8+XzTN4P82hxjdMAvOdPhmCkkTkM+VoRfB66wotbha9OIEqwiLsrevNliuSyd0nrzGSEqJrofmYvqD0NiQzO5LYxg7cDfq2zYT9cn/D1MNFNyeDmUA7IGYW9G18tWq+lVcRmQEAM2jFfoi5AQtnBDkQm9FLp2rEu643ZtOMnmA7IlzOxKwzPFjsyWIjYQOJoYRYrpTgU5ilYEpzY2aCJq7u7OYE4jHAIjMyMnYnZzKq6K1JKnOecZhYhQ1G4kbs4GEzlWNxJ4qZrDdqCJiiaHGjTEZHQV+bFzlf4dhrbF7ooovNnOnx+BwxU2d1RyKujmlVuMKugsGhQcw75BCOAw5pKBGOwu2q/FDwgfu3wBnw6xXwc7gNE0xbfBg0x/gWQe6pmfNqsOG4af2O9oOuWIOZsGgQttr4OG2MrPAfHCvUQW/OVungf4wm11Oj9Il23GZWdQCJ3w3YdBcbtHAYzb+2DgdaBYbAXoidAosU8ecsMVK0iUurCzPOcrbODAnBXIk8pOWjkkUbLpiC7iy52qno8HsekeA+UqurhcGjrwiwMbuYIrVcz3e/3RHR19YKUaq2jLXIzKKPg0sDw6GK/LtVqcSbmZFqJKCVYoturtVGfs80pH4/HP/rH/ks/9VM/8e1f+sVP/czvOS43u8Nu2lNx5CmNKJYIOZASGzBxAgJ8DQD0quwC6DyktP3CAIptAWNjW7RKVxptlU4C7+rmSJIfPHpC+wuYU57ArOvKKcHJociZtCXHUJVlnt/4+CfeePMTP/nTEfbFcVluXn7329/5ta999Zd/8Ze+8Y1v/Ud/7a//u/+nv5hSundx+Sf/xL/08z//81/4yR9/9LGPvby+Xp7fvvnxt7773e/cv38/eBfCA4lAaq316bNbdVxeHp48eSPntKy3nPJaCzmeP38O4ObmiJ7um+f9zc21aRVhIkCtnsgJ2WI3Eq3rSpwyk03TqjWzpJQUZ0SCI5oymlKEHRmBBtUIKbGTUtffjT9ZVQQpTb1Jd8uKNOr84xIAehEppbihFSJLS5iEoRyhl6rVkOZ5TkmSZOqZ8xEQUtWgZWJGEmYgko8gSimx8LrWRozkYY313hUbW3P4Sz56XcSF4OSW4LXW6kagakbuiaS6OZ16ZzNzALSnaaJkzFwNa1V1AnGGL4UBqgFmkNZ74ISdcxB7UIu4u2lZ1Gpd53lmZyJatVAVETkuhbJN01Tdyo2aGXFS1aWWAx2meWaR29tbNZ2mOWRC4wcOadPzwDTIeAKJEAiFgLITqyNCTkSUBo+oOwgikpOY1VIiA3YKuVln/e2L5zTJ8c0Barvjl6L7eDGksfamabq9vZ0mJnKz0FUurWGm5iy1ru5a1+Vw2KWUnj9/rmpTymZVWHb7SUSmlB8/ePj4ycMPP/zw7bff3u12MDsej9M0uVrRtWq5vHfx8uXLcOeYQI5WEp041BID05RyzpENK6Uw8ypStIrkUC3Rlcjda61Eh2maSimqZV3Xy4uDCEXrTiLKOVXV+5f33Z2JSikXh8uc9+Qw8ymltiomNrN1XW9vb90sp3Q8Hl0NANTiXCnn3DpxTGTzPOfLy8t59lKKiFxeHl68uCoKAKUsnHcgyjl3MhsxN1VdlxptJ3KaAwUaAwi1NU3Terw184E6XtfV++MLXXN9/XJdPaU0zzNHto7MvJpXJiZKzKy1bFlVh1s1aFdHJi2OESc6qV4zd8/dNbWNJ7kuS6yrVnjsrUKPRdZlSSlxSo3e3WwgCtCd0qYMzrHFLRnurgYzm0TgTjDm5lv3vb8Zp3GTg0ycUiZya/1viEDCSKkeG0swUUcYbxrWoydvT24VTklOqHk0VmUa4KsRRtnm+tuWD5qHsBPklKd1d3jrshXBo608jBMTegl3eCw93xhtBZkICCLREdmJRE2QTlCvHUAkLg3oXpwC0R1Eoj0QqMcpWnLTw7RyjDQWA8wk7sEmVjt3Ept7wPN869We5+I2UxrjG1C0nkOLb2696e3xEc0qxhWJzn81PHJ/zfc3+ToOg7uZHkTkZO7sCi+gxbSsyw35Ksl9VU0qkuAO5pYF5pE/Uri5gUhAEvNNRAQDyFC7zdtCEA44FM6AwkF0luKmj3Sefzg+4UedImIpIxSFZli7oDrUQbXqLu3qgjwBtrDfqlpiA8+wfagQ54w06bXVo2YRTtn9ytU4rTknocXKFest5kdIn9il3bF8kKa3jczIIc5iUVNOXthdizvgxG5sBldxZ8YUsQtmZmppebgnMrBorbe3ixumaQL7ul45ZxYhSTwJceOgYYA5vXz2MsuEUkE8pXk316cffvhgEqiCCHJas6+btNe8tzV13VtZRHcqEeM0FAAgIzeDAmqu3vodViEHKUzB1hImASx1DbqSVmDgSiymxWo1s+YEmhWzKfUquZPD5nCISK3V1cL28yga2gCFNi4fcyff6uJohB2ZNx0ghr6Ik3Si/nZ4y4QV3hxoqQUMSw/BS3e6xCmXOMYzFmSYKEXrsizc2yhsBklDGtRaRcJSKlFfI0Lu3tzZsCG8e5NNn43Cmy6NhNkhcFfQEOWhOLpQC3r11iFNRNhpWJbonHvD5xyzhm7ZB9Oge1jAJ+6+4JXZ7Xb7/TzP+eZ4S+TzPMPO4H9mCGcXpomJdhmAWzyGqmY3RUVESFP4x1WVAbKLi4s/9Ef+yN/+//79y0cfe/jmp9zgAEvEETbxkI84PvqT39jxEaEpDEqB4/H47NmzT33m08vV9Xz/YuKDG2SePHQvEsxdArURZ2Q4XAWmJAwC9oc5yed++tHnfvZ3/cH/ouG4vHz+/Mtf/ur//a/+x3/zb/7N//W/9Rf+h//6n/unf+/vW4/rzbJQnp5fvfz4xz9+e3sbq01IzBD1xkS0Luvzl9fq/taPfPJHv/j5eZ5ub2/feee719fXx+Ox86naWpfjcbktrUisGxyNHdGIlmWxCk8aaLcAfJOtJy+r4/QsqFkACfbMYBZpsQNWK51yBRSujJupt7aUMDQCirZj++akwT3BHYJOYBaK0AsRpc4nyY2SVKLduW+4Rtyd3azXqrV3OvDa3c0UwtxzR5bMXaJ73jbv4edYshhAwAe5QaObfWlmkTBs2XqHkpNHr20ia40cyZPXyozLw0XOk8Jvj+vtUqq5qzGzelS3EaG1QOxWmJtXs2iRxMzMLiCzoiE0TnRYRMwobhIFvZQGBSsRLcuSciainDOZRjrN/SPjiCPSNMTf9h0iitYF0WUH7iwtkDak5JR4N02J4e5GwbnaSJNDzjh6wgGQ/oyW2qGz/ZubhGFkn3i0KkmJ13VFr/xsj5vYO/9tw15KmuY5MA6laFT95ZynJJeXl2987PGTJ0+Y+enTp0QeRVDmldj3eTf0h1odk0nky+0xXe6ps5jMeXr85OG9i8vDbr/f73POeZqoEFiKrmY2zzN15phaK4V9whz2djhL8zxzL4F28t1uN8+zllpLubh4MOW5VnVpa5LOabu3Jnt4klFmPKesZV3qNUMf3nv4sY+9Ucp1KSXIb9ZSqrkqipOojjzP9pyx6r2RNvtwV4YinKYpJY1z5pxzzseyjC0ZXWdS8pQ4YLZD+44Ba+9J045edN72eB/M1qwfEc9x43TuSY69PAbcLodTv0QQsQBk8AoNtE8E9Vvaf5wB3YIn3jQzJII7uxHxaLQFAGSESCzfcQi8J7LawAOWEPYCzFgtsTRYKTdTcozhdJ6zl4yeAzzdsgEiAf8ebzY0mLcAsW3d3SbMzwic0GyjKCnsVX+BLA6IBZxCelCjzOmXieEZjbPBe4N4JoCcjBq5jndvi1ryvC/hXsfvxGhWJwMQ8k1B4FgzAeih3m47MvkKQj1f0nGxDWlqfxrD/TutsY5iaImULcUAn//2I4+zh9/O/3o4H9OrZyIiUixEFLqFieAKqiAzXVwX9xq+iJtAACek0iC8kWVsCfFYJ0Tw2FxwgKLsvqIlb6Xlb51gEiwt0f7xo+/vh5wY/KjjPI09WvhEsKMYVJBymgCkDKC637pdU1SFcAFlgJwyMIMfyeSwmclBtTkThUTIbSn2POG+2AXkEvYmlUufXhAZkYMrYI5V4S1v28oexCEBjDR39pnUzKozIyUIwRxmoKCycRFxdkmchKujqBQHsRE1/joYoRq5NTC1MWrVtay3a13VrMIqkn2/x/ID5tAAdG9w5Du8f6lVqIIcruQFpOTqXhjqZFAFGbQDQGK/ROosZKB5z2iTE5Gbx8LqdINNnfdaPmaW8/o66/LQz1k94/3IaWGzubZ/3oGGxhciSottJUJ3fyIRONyuIRKHwTMcP/fxsl2UOiXp+PI4yZjrUhf4ME66TYeWgWROkemMMi6R3OLl4S9uBxr/5jCCe/ZZRMCna0ewjImE2KJReIe7WG+SJoN5lltjqHAJRuZwTHd8v5QCjPDomV+eUpqmRJTd3dDyxeYayI3WzS26hKH5nFNKKU1mtizLspgCrpUTiImF1IrWwjIT0V/5K//RJz79mf/yn/qvfOlXv/p73vx4WXw6zOYbxEpHvPj3FUI/LPn0uhQ8ADGzUsrl5eV8OAAoWiUndwxQYojtwRoe3qQ5gpunJcTnfYRTQMnE7r31id/7I5/6vT/3z7399tvvfedLX/3yV/7iX/y/fOXLX7rczYd5urm+/eDZh2FQ5pwhbGaiyVrj4926rm9/752X11df/+Y39vtZEt/c3Oz3+6DTMLNatHTIQRJGT7iPqWVmU1RTrp5z4xftzx0tnhSqhCgy7S2mIJFaczU1rW5KNCNq+SDMbMFCsdlJDoVH1FzNagRfbIPJJCJ2MnXIyekiIiJhCMhiKfYUllKrMmq+jbvH7xrDOgwwiR5i3tx2GeIAVBvZxMk2wia/MY7xJyLWzsRJOElVNUQIhKJ4JcShmbkZeUQ2MM5vx5u4tNa1rstqbt4Yy8jMmdxBICbvXH9xOiUiEsqJZJoSMfaIp5ASB0NhzLBWV3FmJmFScvcwiUopIJqmaZ5nqiXwAt9n/fvmQA+YtWmx4d6DiOABFSaYV0SjJRv+Pc7C8+aukStgHh140B5uOBylbmXOCGONDEe8MzorqFVvWOcAnwSqx80qkYtQFItm4SmJVV7XNcs0zzmxTNP04N69R48ePXpw/+bqJblpbZ6JlUgdC3cr82QiM6RB4kGIbsIKIEAcHs2miBKziXDKZrZsGt+jxwmHugoTZ57n3X4K+RArXF2HIhDiJOLqMnj/AaFWGRtSvXksHdXJzJNEd0FquduUmE80Zu5uXgEhYWn95c4Ss6f9yHxSOv3foYCnJEQW0kZEUgovLpCbtStID8UUqzkc71AWvT1k94StzUDjznH3c2qW7W5C12sxmPF6q8jHn1GZNmyF+Df1DNKINBOEwFAbpdHjotgmdrpPGA8T1hVg+9QQPl3P/LQ5bz3WAesU7Wwj9eHuKebBHbWHYt3cfeJGt0Xnvh+LRF4nLuHU32cmBztGIC/eFwmB4kxNVLVWFnSCKXZ+qJMOtGA73JDldmF4N0y7tcPackQAF3ttGhGiu1CAbyHbmyJHOIHm2osGNZyxXhYeUovaJAeI0UPuRqiOgArvDTA3zttmARu1Rg1jJkd3kKl/XR1DxG2fLW+zix8Vix6rZvM92l5xfNRRjtQu2kPJffxHN+bheEf/AxQhE3M3Bak35qEw/kp7RkHKLdxsIknNdCIjixRxARFa5jZqiQXOTgxyptRQvh5Q1XGbdn7Xd0rXfvj+4XleK+IHtgEDr1rXnC5tsPKQu95avSKqAIHvgWa0OmIBnoAKUjUssKXWI5yJ55Tm6qr6XLET2wEH+MNMjy19AzCGAaujhPNsMJK224iYCDaCnpVV3Uw8NqBGkgZEEU5te9iJnMFOsNmdzElYeJ5BGa6wClp1rZ4SqqLYerMeb27D1YRFG4yPmjDuyF7Ga1bnFhGKvpJH8EvBFS0DX0AGRHrQyKuTkdfW1AvKgAentyM4NBvXaOxUL3BnkG/klYgMw2DA/tuwThX1vQOTnaT0VoL5JqGF81jw+MJdMXUemtnqizGGceahNYaKOVMBXSHShoBte8Vx/v6d0xkGlXokLCIon9IUri7YmTmdJuJch0WBqbvD3Jqwi148LMGtiSB5aFjbHkGn5pR15cHEw/Lb3ga6UeUbn5jOD2wsobDLg68vUpxqhdi1GCVzMuHc58LcufjKLDmlec7uTjAhdnchF5F1OQpcK6pZBlhEq/5bf+Hf/uyP/cSP/fTPfvXLv/zFH/8ZAGVZ59xMcOo4mVeP17/76z3uirBXzzZkNMz3c/5Df/gPpsQQgqm7MrfCRe8/d3TiESIDiMDpHI9vvqzLNE3E4P0OgK8OSZ/47Gc/8aMf/52/7+f++L/yX/vLf/Hf/x/89/973/nut3/mJ37iww/eV1XACCSeAYRfRER5vnjw8KGqSiIHXl5fpZRKXZeyltbHwczMQXmep3nW21tuvFWRZrQoywqz4xQHdYW3kDHQou/sCN8ubF8iEj9tpDtRmdDWkQYBTKuPWOOZoVl1UPoyC7O0ndVlp3ujI47QknBTobQtFzwVycf5PZBaZI1xEd1b4FYQadv7GqGg4fbgzI3B9k0zG4DJCKD4SSEj8oROvBQlNUBDqsTTD25s02LwUlazyoFrJ/a1WCdCHBcCAHaKhnNCDGNO85x3OTFzWdfjsoiIu8VoRKRoZZVE0e2cV62kZ7iDyOEM8/ej1ModATpeC8iapDK0nRn2ZNilreaH2O+cLTK9zA2YFP0J0cWOB9zNqCdd78ji1omBOioDMNWiWkQaF8uIghERemcC74B86zWE5Cc/UxLNc56SmNnxeGzJKDdiFmH2gFYSQBJlqwQiD4TfbjfHaQN3WnV98eLF8XijqsGHFMHgnFrfwg639kA+m9cYj7LWKuZVRjd2bvnt6/VmXdd4XroWpTSnibhFZ+I7E1BSCW/wpISiJrzUmiohMbNwTgLnhoiWHF0oBWEaJ4IJCWnZ9ktsHAd+fmxDAwBMTQnsJzrv+GjEcZq8c6hqKYsqmxdH7TxqvUWenpjf4gyq3kdCtoG0jJ376oolopFsHDuozQkG9wrGVcg1rORhgjfFx84R5eSNKdBfoSv5k9XfGar4TqETkZ2HXQgAtEtWR/Mc4bJJHd2xbHrEfYyE+gtzJz37aDCCeM+gNkAqb6xD6t2Yepp0nL/peuo9hYisn2crD89GQqc58ejqMhThZrbDnuj5wzDrOW6/3c/wspzZDcCGXsEw7JxNYQeARpVJBqTWgMEZaDgTgjBtaOVbRDMMCcVw0ro7DfRykdPzCk8MGwVzbjB8BPH/9t7Pvr5hGW22WrtQMIp5WNltbESMAmJ4ARgGwOAFpmwFrm6VGFAjlwYgtjXmizm1FCCJD0oWSWgBivHf2q4GBmVQ4vZojAAPB7IHxXByaLfPWNHafvzwj77UG2wkZo9b8wy4e9VlPa67y0szI4cZhCuze701LGaJ6QbpPkgcbEioD9WvwS8dt/AjqIhPjB3plFyNFscz1YPUe+BLzgQ8cqhiATKjtBBK6wzF5CYhltxJlcl1aRQEZl7UOAirKWB6MLeAUpFwgLGT3yNyJ5AIaELYEaRwWm9LzoybFWutx0WPZeKEliG3zXy/Nld4gkhs3rS+y5zaGXpZR8d6esvkG6E6DChBZsusDS3s1hYFglY9AY4QU6EFzM17bRGMtpwDgNXa9FTXzrXWojaAYDRQSAARVW22lp/jM4e34r2H3FYvjApA9EuPfunDGwoFUWvZfvO08LprSt11jEtsz/ya5Rpf7o4fM4PE3SMF0lz0lrGP6EAoU4sEZsqcal0DPyqJSrUwFEDW4ZccQsOIoBLdydkhRIk5EZcg4VG3qilvqFStK9weMhyD9o2HPWyIYYQJTsWUI2Ac/y+lEclIknme1djdy6qnrzt1tlTrCZwOMBPBBBHZ5+yuCcbktfISRY+Eh/fu3676r/+5/9H/5n/77+xn+frXfuVzP/GTsziQTyJsUwhhm1V/N0r5wz5o0+j8U5/++J/+k390rQYY3KZdrnV1SUx3Hcs2zvPuudY6VtG835mZmQoLAJNGZFWujvni4ubp0z/yJ/7UT/7kT/43/+y/8q23v/Oxx4/W5RadiNKd1I3ZmfKLl88ieXiZDxcXF8yssKsrB5CmyR1rLbUYEe33+8Ph8haRKM/xgFqbAXA8pbEYRgLZN0W0ARxFZDAoNbT3Bmw53MJ+s+i0Cy2VQWTghuKz6FpewSxC4rSxCAP06S1a0cjrmaMBr5lpS3zE6o0plZC87ZGdQtemVlQ51nzKcCMzS9R8j9gI2zwDXhENW2NXVR025MJJToVlYNTI+glwDzy1iMDTNE0C9wR3XdZlWZZqzikHtirnhBCFMKKoM27cPMKUWYRgZtVXJKIpD0c8/B7KHq6Rr66q0ttIkvUUXG+8SUQkA+xqr0as40UsDw0Sjs54DMAFVLsX7eTUaTndAWThnIUomVcigrlwOwE21KYtvk+8sTTg5uaW8i7W5FbEh/NjLVHs4au0mXewv85a7WcYqihOlbOIEJMz0y5PsXdqrWtZWE5El9M05Zwjrx6ryF0pwnDMxE7h9rlP0yQidS3repymyywpS+u30e1wWK15nkVo0IcMv6tWC/RmrXXU6IpIdJIc9+g9eCmn3uUtTrGlvcksay+cC0Tx5cXBrEbn2sgTVm2PvppWVSNSNMRTbw8Zblhzos0sKgheFW5EZG6lFMmty8hxaQW36BjyfiAARLWuta6qxb30yDS7w7QQTZsNSFGvyN1uhTVQYnMfBsoIGC6Tu9dSRqAzJkpEkE51E0ytOotArqHyu9Uykt5m5pEboV7r2gdmBcAWQEjNDghUWFssbTd12XjSyxRejkeseUiVcNfce+QTLemPYQMRg3r3KWosNylYY/qz2D6ju/shGEciIcsnO2mMijqjOnqCcbzupzhhlzbvN9rAccJWN9VuE0EZRH2bE7ilFby7ZFFw6Ce/tLPRgGOsLQ8ZWn5riN3xDCu17lzB8RiPrHbO14i5ayD3thPjPdoe80+nRu04d3KGxXF+Xfp+9Pdj7janKptI29k3CerwIZDDcGTW5vRar331Citai9aVtMS4lYTd4O5pMLpVqMNTo5yPqm13EPnohElkdIx7IWQKn717y+M2w/c6m/vtqNvkvL704D/NQS3UEoc51N0Y4Z9yOCRXL1+uS31w+SRxsop2B0JWF/Mj9GqmI7hCMpAcqn7PGaAbcjVfWEwoE82oTCAhVRy1fggcZNqBHzseO4rh6L4YLQwhTIIjYzW6ASrIQM5sImTm5qfSZS1VVQUimVUdIOaUJXGilDNSAlG2A5g3ybyIrWR4KUstyri6XZdlvT7qsqb9nsjBtAEqWHhjhHw+c3ilwhNn3mCnEo3uoN76B0JRgYZpAyqgDmUECsabT2jRbaJZPEAkBqNSzdyVAfOgRmchciB0marW0mv2usdlHR2KczkZVQ+qm6DhBjcUwNGtI9fkTy9naDKtH8OaPbmmPWqMc7EWl44neDIO/eSLDnN3XBG9TUWcijdwFbSQ0+n7IRV6UDXeb7rM9JXG9OiSPeechhnNELQGl64BNWnPmT2YtftC6F3CtzpsHFvbaJCboxt/1i1E7tH9O7ZUi6AJmNmgWjSQrpu5NrOmhnOWWqPvRxUQVMmcGZndnSzLlLO7hpGkhqvbm3sPHr73wYf/yz//v/hv/3f+ux88f/HO17/y1o9+3izHAh+RuG08ZAjpj0p0/CaOIeR8ZJMJ0VyJgb/6l//y3/t7f++P/Yk/jnK8XZeJ9ilFhvAuXmKEIvuODQypMjfVFzT35kbEkuGGWpEO90A4PHn89O1vf+Znfua/8a/+t/7N/9n/5MX11S6dQZvcgy3T7l9eGPzq6qqUZS3L4XDI8xyMLMx81kdL2JkkT+4hUwRhZjRhQePfeO7krSkHbdtI9OUq3BYPe6PC64F5jcYV7u6u8PBM0t2+MZ1ocTiB5BGQl0ZM3KRC5DG4BfKJ2D1AcaO3W8wJzEd37L5uqRlYaKJQRNwCvntixxk3BaAlq+6sh82290aNRyQC5rrxfikCfY1xlBtKrlcLsYgICfimXKvq7XEtRUlSSgkkppYSGxzqTMLMNR6HWmuHGGAArdXqEU7waZqIIMIakwEwuXllnmNCZMqMJKYtx+InY19yYgEL/LX8ZAC6GbpJ8rRlMErpBnUYdYoRh40AABywyM6dspHU85/9nA2HNq41Lk3bdUinECD1wHB8M6Wk5VjKKqbaWqHGD43CKmRI4pSEhVKW2aeyNpEVbTzneZ6mKWUZ17VNPhlkI8E4lAp1TRakMkS0ruu6JgCH3S78urYg6YR7DA9zuGpVS19URuzsiCaQ7g6YQW9vaxQBBk3LfndvStlUqU+LmXGfSW8FfmwdeisgVS2lHG/XPImIaPUXL67efc9rvbo45MdP7jc92he5NSbhk2oM7RUu06uR0fhTOLutRDRNszQ6VRWR47HEPDQKNVhfP4v5/4+9f421bUvOw7CvqsaYc+29zzn31bdfpJpNUs2WFJK2FT9gRTJCOfoTgDKcwIItSAYMyPoTIBEg+EcQCDFkREGAOBFgw4aNBHAoy4EiiYJIyaJlkU1b0IuSLBsURUoi+0Wxu2/f9zln77XWHKOq8qPGGHOsdc5tkpH0J/Lk5em9155rzvGsUY+vvtqCUaa7PELsdLNkaoW/DIM27VYfu3iMw6iaMz4P7SNy4/uq25/TvQxBjuROTdDVWjtvyq5hgCQiME0D7qPR4MpQUM+1aIal827eEY3IW5hGwuFh8o6ZNLNGP0W9vOcY8wDej0rxQ5WgfRPGD212ImzRb5ujjvHowHrgSrz3lwZ1ECIBAiAI6PKMUCWS3U5otopDOpNqU22vUKUWhi/BrcUXw8RVdOWJWiUtIiKGIdIxImB7vRbaTLYRc++Jns137O5Mg8QCRAIfBdzb1NEIdQKAkxfsAoq7aRREO3wBWI0vXFINvfSa9Xf3ct2LOJsafY4T9YBYa0bpbySA4BVa4dX07LWQGYm4kjCgBqcWMm7OfAQ1j0edDxNiN0I/+qPcozdq1siAxxp97KLBW2I7DRDO7Ir/x3vt1iBZ06QsIkf7Fr5/er9tGz7ZYrkNpF03coNursVlI1MIw81JDBmeHSkELbmZV9EEElBKSA5Ue1B/e03E6QH4NkeFHoET4UT8QHRkLMCJW3y1RLyO2YRQV0tMYGIjAqsqS99QwsEfzhyYMQERbAUzwN5MLwUYkrygbpuaHJ89Pz2ctvvj+XRa8wGwFiHct8OLEcIXTMFpUEPfi4Nm8O5zNyqiimoczeTqUKLqpuZRTsNa7nqXchy5k2ZWm+swtK7gQQofTmCXVNVVJcUMNQoMEUopC2joh6HBUai3Hakxhxk+6pqOABr/+hRR2HXF6SvBrjde4cPJeGUlds/+Vhsk23tA4irwOJ4cTwuOgB6BHbiQ/dyJhyzLEt9Kw5c5uEYBuBqlNGrTc6fUH4NiMQHSEtyJkGXA95wcgSkdR+Po2NDCo15WXxl7hMTKfsoyEzMjGXxYji2sdC7nbdtqrYf19kpR4B2Ya2TEDpZQps1MI82RW3qPcBKRhQhvvvYpdSPJX/jCj3/2s5/5Hf/G7/z5X/iF199/kl79zMTB3Giy5yOG/uFE1Eu/+1FLzwnvv/v2l778C9/5nd+BRW74oKZAKtsxLzcvxdATKnpGZZi10YPT+f6w3kQsqJVeYCTGtpkYw+rrn/q2h3e/+bM/9/fWm7tEBt1I9sxzdw14oVpZluXR3a2I5CUROcOCxI8gYY/lBSSZmVWL1kb/yBnM7CANO59TiwwAAIQQ7HfBCuhQkLMgoFdmRqDBCtiLYWoU0xNBStnda63eS73Fho8NCkAbpq0CKXCh41Se9i0nSioX8DOfyEIADC+OmaXdKA0ttj0yPFXqVE3J9wU/NrP72Dgtn3CfcfcZX2qNEoNSSu7e8KKNUAHeMwYD9mqgVgq1KduOoBhkYbk5gFJe03JQ99O5nLaeA8ZgoewsBBfWoOd3SQwXMVit9Xg0AMuyhMFQXAMRETOiqs602s5B3weEos2KuvIqrRTEyxf7sMHGF8eAoFFCdHnW41fkZtQMpzi5ffKf9VFqlRiwO/Z0SraGXlYKumoMsMtuhIWv/cAww+Q0GVw74flq0llEeTc7I3DFQuu6Qs1KjbA3kVfdwoQQIu8k1GMEVHW5vTmsa4Bviej29vb20BhousvGWfaFGkmb0ewGAwFEJDMOeQnDsotN1FoT5xioRC1xgJlFmKjvI/fhi2mD2fGe8+p9/vz+0eObdBAlKqUcj0f3Td0dY/4IDQquQCN5a+alXzzq6t++GYU4pdSMqzE1c5jXprJGQOid5q4OHQJz9EJVY3IILAKCzothPozHcTa+ThTMBS1heE/L70w27upTPJlojwm3T7xFG7nH/MncOb7CBG+llhkUYMexSq1xWV3sovEJTSGiFuWzAOWNEGJcQ6gCmIOTVkZuWN92sysZPYYzLMBp+4xBc3fuC1k6eDFM3SYVQa0mfXfQEBIRgTwCSlPPRlbbxRWblOBonJ9GRCMhE8BkfFnUx2vj0cMajRe16f4AAN3LY1xGQvqf+6D2ARnddWpFwMP8rdS01JEMeREKI2qQzh6I7VQ6zXjuy/UjmEVfGId9sPbPcX7xXh4cBC123DQddxA2hC3qcAVp1AGvXtUtUrbBRIGvdXOn2lITJESNg8mg7kRi7gmAsxGRurCzUSGSCB3CFb45FL60mJVHeRYgTv7mH7ho/Gwe/6O8LqxBdBNEAQLSCHAdlnU7R8YytTilEUpl8lYdQY0aVCk73CXBE3RxX5izpE3UgApagAxwInaY0zOHAc8cnyXLhBvGxjgybkifgQSc4KO0Smm+bKaUw3J3AJKJo3yUQlWDeSkxQRLArcYKpfiCIZQHYyOonx6O5+N2WOSsOB1PWqudVUs1M7Ry8D4pvy8dwMtfLxJfzQJs7M0axOCV6dagQ5tbN/ywqk7Wso4nBxOaiz1kkTO6Z8rc3SgIUUOcmrs0wjzvSVgjjMaXmXhDCexx8t1lPG6b/52kYPtw5Izg0gJCF/tDJZgHZTRgeMT664aE3s1OTEMxSDeAYIBozYgSi/3rITfaEd+Vz3CV5hiyNBQm7aw7RGSOUop7U3SEuEEuzA/LUruG10X2iGaa+WTm0ijKgHmw4ucRFRy6WjuQthaKHSYgERG41m3MSosziKTEdGl2SgK8GY0h2ppak9Ks53mrCWaqumTOS373/ffWdb25ffTxj3/8T/yJP/H666//wL/8W7/61S9+x6ufQZx0aKeCdSfgWPYj3IB/OLPw6usOilgadtQnfeUrX/oNn/+e21dfLU+f5cd3Iou5rstqzRrcbcKpgaO4ihEaZf9hPfS9Nyj+YeZ5SQScjxCzH/mRH/nLf+2vEtG2bYvsweixMSzmT4pqTYnXvCyHLCJ8GvxILYsGRKWUbduEuLq5e1Lqru2+MSIM0umLYroH6YLvFLgaCjeGtTBhkCI+MHbgUApFRCS8/zsYKbaTT2l+behIhgbZ9PbpYuYFvE315Wb1aH7OuF9SUncquxKZkwSzztAa3QPjPyW29RhCe6Bco6y3bZPBit7TUbxbrXbpjonFH+z8xMmdXJKT1PO5E1kW68iEYalCS5OMIilnY7hWM3v2wYePHz/mnACwUESfhNjNi6lUDBvDIqXdLPobnKVtWESKVnz05ZNBOI8G92oT409Es87b2h+uLzMbXJFBG7uuq/a6ICP41hZMFF5vi72VYA1Po3mvPx3mnxkDgflss9+U/raeh1xy94i/oZuOY61u5WxmiTjiaVl4XTMRRUptznlZFiearUpVhXmQggrIXR89evSJT3zi7mY9Ho8RdVyXJaVEvVKCqt7f35/PZ5/K6EGYHCmlZVlyDuT4bhA+fuXJ+bx5Cy2uYU8+fnxXTWUioR1WXQxy3DbbhACfTwVe1pVfe+21T3/6tW378O7uLrZl1HCvpubhOvbZIGyEINNKGJNlk90uLMxQ1W3bBslbLIDwU8wAp34EDKJOawpUVHAwCkIa4SRRF/aywN2llCDMB3U4FLQ5vOY1DPMhY4e0aY1PaXi758emZYHt0OTh0PUW66ZYb00xirN4rnE/goGuDUrq8X/76UxE3gKwsXtIiHzikOdeQHWscG+RrmtjL37CdFvbCBNe6Oor7h7H1Xz6AyCm+R7tVUZkNgV6DAAvXPNI7h5qctN5HnsfyQaGotnZCBLHZoGPBw7mafmIA75LgBgdbkwXbrQHKsNUbnUudvDtLrUMwKUThGLFdnOzn+9TumNnT32hPdixFRenEenLboeqXeRdWztH4McwHsiZDKZGZuxWytlKdYAc7OzO7GxqzgH2C+wBOTk5O9xJ3IxRQRJKKjPMW4zIXRliUPcIRW8LZ2vt4H23Tu37xxok3Cuez9VB9uHZ3/7mm58wfRsQGu4DZhAl4krkUfCg+2cBTsxAcl6ED+IFFEXXBSaoDKxEtLAW3oifAvdBcCsEpoUoIWCVWkAOO0M3eAULvDY0cqowbZlCUdzMEMXuiCgYryEJHJRThLAEmpFnDoiq1PrwcDydTreUxKhuJVg93V2tuCqlMAI6vPwF8PH11Y3qi0Fu+ILrdP2LbzWvQhNo9OKzG7ys968jIGwrLcBwIZ9paKQNL6dqZmQ+G1TohpaZDW/x7LvHJXZpagBdVcqlzkzRRSj1JsWq3gOGszcTRLvAb0pm05fCkJmveEJ83hpPu9VaygaAKXUZv4vibvq5u8XXa62JeDUz8+SWajlridwthoJERBIzSq3HWhdf7pakKJyxeFItoCQi27YtWFJigL1Wt5qQa61qRolNvEU+mIiEOTFDIFbATO6ABlImTm7URZXcJZGQlQJUd91OJ2YwLZk5peyeiKjBgn2DJ7TIkBASDGamJ2dP6ZDdcTqdqpuTlVq26jlLVMQ5pIXFRZxQD8nX5F/78t//ns/9ukeJ/sR//ke+69Of+s7v/O7/7m/+xf/pP/vPg9lqTSkbqFpNkmIbdJmkEy5l9iBOwv16i7Q/SfjM6PoP3L26qupqKWeYvf/OO1/6+lvf933fBxEjBmc4mGTbal6WKxHZPZAN8so9Whi2oZUz5zWWxXnTtCQnbEq3dLLzcV2XL/70T//wH/uhlevh8frhO8+IJXR9lpzWQy3VwMvh5pZvI4BWaj0V5rwWg9EtkYYGzMxWtdYt53xzc1PrBgWBEvGaE7GXYrUq3CRBVU/nuiyL0Y2DIIuY11rNjZIwscGVmPMNL5liP1dlIiKBqFuRrETi7gIiYSMjr34+Wi26FRhxEmJhUErp5ubm4eHkpBocGxaCPtDbyb0lU+lWRAjqKafm9XPzAjM4sYE20yWJOlU3hSsc7uJEVDktxTvHMRzmTL6kZOeifHZVcVNXrRXh5oEzs1btUSZx9IBJCBpnOFsEezybb+28IYczeHHzzez+eL7JArAIJQF0y1ytlMyyCpN5sUpqJFzPJ9w/T6YrZ8rJXLdzqaoIGmIXUxX4IvL47iYLay26nW21++Nzf/Dl9nB7dytUj+fz4Ua4WFIztdNmacnpkKyilJLX1cIPzLnW+vC8LBnLsiw0dP3hPBMiWigVKxmSsxARg8UoMTuvhkiT5IZEEGIjBjXcIoQJaiH96eF4JKJlbVjKYKqupkw00JXurTiHak1pic9DyLKEOQqHMzW2w3XNwbHkrhGqRDBZNcpKC4W41no4HNa8lHIWwprT8XhcBEuiTI66WXlwvfNyKpvc3uR14SVJAqnikA9Vajm3/Fty11Ie3z26rwVJJJGp5pwWSTc3qzo+fH7Mhxteb29vD7XWdXUmMq0pLwv48XpjTjhrNibOC2VhPpWjulfg+FBE8uPHBzfetrIsN8wp5/zw/AGoxKVs5fYR396Qbk8zi1DiZXGClUosmRMZiNiZ8+GgzkapUIRhSevTUuywPtayvf7665/4xMefP5NFDoAkXtnTqVYFmRulnLMkWgWbIKmSa4txiUhKGXB2y2AhSWAHEeg5aOVUfFOthJrEmbyUM2hB5g+PR2Q5lRML1+29xzdZQHauGZKIM4twVtV0OJSiplbNiQN4XTZzZqba3Q0Dtwm4e8uG7efr8EoEE+ClFWTugzib3Y05iUiEjcxDqW4qCBFxz/Izq+FZ4NA/4CSp1sCOkjus1qGLyOEQZQy9YTub8Qs+BNtSnAJN7WIyVeI4wrhspW415yxLHlUxosct/L57sttRHvkGVqt3YDx1YzVqAml4oEHu6Ghq5iQoBiIwRw8D2h4UZRAGEdS01nD4SkpIZu6AThZdr+XYjjseqhcRjLaYKiByJNpNMpn0+xHpTpwjKRxhxLQvNjO8DRi1/rYTtUVKIxBh7fHx7BZejP8R3lmfO5rUK2afwpyW2JbQBaKs39OM2+vPAdv9v3P3qIF5WtDFx78uzdTxthzh7hS98eaxaP9FdhgZrI4gDNyhBnOULRnIwfAsgG+1nsixeIZIlO2zczUzJTDzcnOAGkzBipQAghuKwbJ7EPNUFoBJ/Awozk+ZEHgVUAZWR6JGlke9RkVCUNy5gBjDEm39Nof7uci6AKyAwbdSmXllcdMW24RFeA/Nzgt9qU9oT00sWkGZJBORDaAxE9GjT37bG7EMZalqtdRjxRvvHSk27w3fPRJZqaSw2BRW17K9ofbEzFTLtp2rbsuyRJ6zeZxKh9gyd09KzpkJtYCIU3oCfuT6KlFFPiI9tfJhrR8Knxybo5B5Wgj1BKugDGQ6p6S31et6INwCeYOf4Kv7HeEA/wb8DeJfs2Ctx7esftnKL+rzD5bjw6u25ecnSibk90ZHuz3QJ26VabtHekbpIHZXLZE8cnLAFC4tqSKS8zei6m08Yw/A3cwN1CP2ZIBry89UkLFGQKhVmnF34gXkEAcMWsxqLE1yA8F8i9WPVs1kcOT6ENcUkfZwnVvfCkSxRcPFQw6CmZla40U2NzeXlPZN11is3N3RM5C6nA/XlhGyqQEcvshaa6lnMzscDmGw5JyIGkI15yQ9hBbKDPppcljWsdPDWRzroUQNBTd3EDGxJwpntaJnLIYzKFyrZgHjUUQaESQqlCVZ0LEqqlbKMYa9VRke+CUNFNw4OEDMjVgvpdRKGw232VTCHt3l3+WktX97YtXw6HPwAF1GVFSLe3J31QpAS2USr1pKYaJlWeJ0jOph7lS1htZVigFGU8NiOoNFULoLuw97uJoaShZAsCkwcy367Nmzb//2b39+//TmcPf06dM/9If+0B/4A3/g+77/X/i5n/nb3/Vrv1vyGnORWeKs2cWvs0Fb/djLYNIl2PrqT9/q2ra6LAkRQWIpp1O09jf/5t/8sY99DMD6+DGA7Xhcbm+WZXnZK1760hZIDGuwlsKyLks6Fzy7f3jt1Vs48Xoozz78j/7j/zBW8PF4XNc1WDqa5z7lhdjB67reLRLqkQMOlFIMXmuNMyh8Ht4JFUSEcuYWYUCtNXL5Rs4hEYm08s3eWJiwOkXSAAEAAElEQVRqH4S20mI1XcX0gFb3UhI3X47tHL6A55zN4EYQ7vX2dud0b1JjUnF36QWRW5eHcyWUAfRvBRVNvFoomC3a3nHMeAAM2dH9OsPHc3VPR5f5EGc0YSbnOwEIWtU7au6z8JIhrwvDtehpOz+cOFnNBEqJu3MrDEqt1Wr4FP1wt9wc7jatH3z49Hze1jU/Oqwk63Y8le2k5axF1nRYloycSjmzQKEkTKYEpMZl3JKw+uw0kpIuKyh+dYOZlVJ8whSMH66Rby+7fPgnh9J2qfMNDX4AKeecOusv4il728xoaIGtPeietjKEWNtLzCml43Ye6W2xAkO83N/fx5MjNjgcgaODwU06vnvV8nFFhGTS0ds152APl2ScQDG/AaOMqGytVdLSZLzuPFtEBJSxGseAjPgqOuykvcLcid3dMEWk+3qOQQ5xamZ571qrCIWO+Jjje7Gn+pbYASDeYHXXobPmPiBEEgjl7jUmGzPezk6zlJKqrut6eig5r+4qnLqvdzhfR5QMIhFsFyIy91qrzFS00yqNgb2au/H5uG1sc+4IIu/5ltSpRMaqGKcqdT1g9L0Fymzv467/xw/WSnLt7Qk/hdUxdBQqE1oim7dkaSxLbt9SDS/t2MFjR42I0+TUd2KUyShtN3T/5mT2dBSbGkbLiai5hBGtGVzNjReAGcwwYyIbj+pvcXfvLiR0dS1e5t4SI/1qQKZVtI/5RPqnqugByfkeImqGn7taWy1j6wEg6sU8LhZqBWB6Mf7jGjM+G6lENGKRbdT7BrjowxjW8WM87PKLF9no+wLzMZjcVWcA0N1oDAuwrVVqhlX/UzcU43hVCzcE9WOxlsKqHPmBoRMSQRhVDR7VlzClCiOg0a5ggTmgYAIYjBbCIu6ko0MaxDLm4elAExQvjhBxXuA4nu5JEqe05uyOUuq6pJlyJ/rvL9XU5tm5/Hce9LGohTMSXn/9dXcnkpzzkpfpMa8yaU6avNWh0YOqlrxIT1avatUHl55wSjH8pmqm7nou9bR98BR+r/rM6WFZdDmwUDJDzq+CBPkMv0c1qLoTpZSRQIAbtPZ0Lgcq6AnqAUggcRct5ifzs9ViWqEMqCm5VYWC/DocTS+P03/UdRGCo1YL5uLDbsXZ2BoeyR9WidoWivNIiJjJYGOdB8jbAXaQcEsHj7lxhju583XUpBmQO9kwtaKmQwebJXl4JGgnG0c43tw7kqvrM2rFnLQlMcmVpjcerrVB9GdtcLaqaErlQJTvGu2MGnvu3YsXFkgDgjToZSeYv5qJccr0AyWG1lv8kUGJxVNSVXI30xZUhQIpymxx9wXGV8a5KyI5S+8n0Av7hIrASeZ3C0gB9z1RYch31eLuRDynYKoqiaTE2xYz7kNL8A4iIvKBM3Ezavxyu7o/ZnckSMx6DHf+wEB/vf/++/rY33zzzW984xt/+A//4d/z7/wffu33fP7Dd99585OfLqcTp8yJztt5XdYZecJIsw9vusZMfDTY+mXfCWvwfD6LSJJERGldz+fzD/7gD6aUAuMrrRret9Kex27tKAfuUHeYm6SFiB3Igtef3NaKZBsS//Af/+M/89N/+3aRu8ON6dmYI5fDzIgT58UMDqzrQTjU+sjBbzZ/zvl8LkN3GbaPaguFhWoSiXzoOVrtlCUeKV7uHvplEqEeIgvit1DQYwrRtlBosbvG0NckM0nR6u4Og5NN65N2PR5l2/OvqFUqGxtmKB99FwlEJNJbWaTHmnyc9NaP9usE0Vh4PiEEpktAmYXYoRYMvxg8lu4CCrjYbAmFmGSwETeODHT6U/hW67lUkLIwtzJohVkkJwD1rLVu5HqzHu7W9eb2kM7nB+YCLMw3WZbDevR6VMAqamXTtCQhJM45i7qda4kgG5MLRVVDJoqBtpbySLJn8UnLizN3r7XLU78cuuvNMmucl39wIo4cGI/EgDa9bdcPLAQQjn7my6zCWUE0s/CsD42wK3x01Ybxc4Qy4sP+cBeRWutgEB1FaWlgLOUiDVUnrdRbvKW91C4rkbTBUUSFT5I0/KCjncM+DIQJj/RrkYGoHAs7HCDoNmocXbGj4ytjiZoZebNS+HIgRhusJa3qmFAiSh0Cip7q0KeDxpiE58CBAeK1oTRMav38ojAI+zKw+R5mrmZFCxFt5yIiqiciKqXc8IW/CWSqHmrT8FYQxN2HZn61Sa8W6hiJ+FV8Xx7zQhopJbs7Jrb/5Ouh6RrfjdnnDkVWVZ4Q9Xurpty/3ei6FINzH6j7bpiZUgrTC5FyM83d3rWilAbwCTSqcTQrrNmBk8w3hJo1zRpG5JBGxMDnHdjISMeubHUjwYMndPQACmPwzt8wy4Z9p0zod5/tkOlO6qw22tmko+P7vF+4q3i0fTyEWoggbt6LJaKrOt2g6/9R9yxGV1qr4q/XebP9FfP0od/fUqquTpGXKAQ9OdA6KVeLtTV/Kdxri1O7D2sQgNUatl9kdzSrvg+IqsLM1TjKEjHF/hURdgaTtMxPqrVGpfAAWgAt/di3UNSJyUQyJQnIJUpxJpA4JWIHJW+g5gjodBw4E2w3Slp40KcU1rohLTd5QZKtKrE4jJkfHk6pLc5GajPvmhevsX12YXuJk3R4eDggQuzrsvbbQkMjh5tC/A1E8gcHtR6SO4KfVAA4A8kqogvMxTRCRywAQUiMlgwclmy4db0zHImOW3n+3rN3n334wcff+OQrr6xpPXpVr/fbpqQ5J2E5gA2moA1cnUFQAIY31W+y3wGZNNUz7Ky8eS1UlATkhOKqWsiQoMSGqB4Rgb6e6+kwBkDyUpvaLzdLW3RNk2+MJ9df8QbaBBnDSy0i3BLIwIjCStzNtubDGuDqmM0xQ7LPVQPI7Q64/Wzt+tusZ3RJZe5RQwzTQRZm2IX+kBIEu8ZLTb0MGX5JhTBUjin5Bf0UuPIgj5YMlXWwD0bWsWrpfsa9UJNqYY6K30PCxP+aRfFoDtdwD5HDktW9SOCSUtMmKlQ1sio666+rllpJKQ2FA/1QDwXIp0tVzat5Egc0WAhlGAwiEoKPe0LLeGDOGebEwWLP20bB+uDdqx0uWEXtCv3unofCzJgonPTezHoDIORGPFvqTTEbBAbqpZT7+/tPfOITTz98/vbbb7355ptf/epX/8//3r/3B/7d/+Pn/6l/+mf/+//h1/8zvxHE98/v7x49QcfGEBHFGNGVU+7q+lVD3s1tWRbqOaAp57/+137qJ//Cj/3e3/t7H7/ySrq7BSBJTPW8bYebu1/+ga0NHP9/3k6HdXHg+HBa11WEtFYI/Q9/6S/92T/zI2+88uT48OzmsGzHimUCHAMkgl6uyHstl2bkuUe47Hg8o2sz3nOoaq1kzsw553AieEtfVdpl+kUQJn7w4aGMqCPtuKxp7xmR19rqVTBAJMyeODPz8XxqQUWz3e89cY1i8k0MhWD8fKWY7npDVxu7ItvLmqvppb3Xn+PRBZGhOEJA2tWwWI1jQ8VXx7bqjbSJzbz5S/uvjbez1MqZQUSSIByAjOIelcJzpkOLM2zMWJbllVceiwhpFbdDZl85C8hKhnkWDpCkF1dGdSMnIDHc3GtR8hWLNfc2UacorAaGkTBL424N7xolFmvmvenubp/H6mrpXo1AGxnaPT42RVzj6wIKbuSQuRE6U62zxJ/17/HkWQ8evqcR+ZmVxab6jKy8qalhX0W42H0nxaJdK7VqVvqFfjy476rvTnczmRyqau7btrn7CDOa2bZtAOKNsdcy8zDzZlNq9LSUwrDR2kgEjWS8Wis1fhSSJC3uPUethckM3QDrAJWdXzv6kpjju9Eqi7CwtYOjbSJQSuIso+eNsQEeMBeFCxCkTaFjhhLQN+mF/Tnv4hAs0aomprAMMWJmcXqa2bZVAoeBLTx805P+NzZwg9k003qs3nYsTjWgRmPGQ65aaJ20YNzGnTrFR01LIuqCFD03fuwFn6TiaGE8vW+dzs4a/03xmX3QvIWAPPA12Lt5tR+vlhCFhJ0iTuMMjGGHd8BkDOmUdE1d/277JeIgjb66qT5mlpcO2Yrjhi8b5lGFek8an9t55UnBy6/de3glDcaUjR/cnQVmYRbGK0KvYlNtoq9X2Y4b0lQgpFnAL1OZiRnd2YhpPbzYjH0thcokPM6mUf/Gx21TvLE9sT0niMKnsJpar/LbyufG5xwRUe8OEnM3g3lYaeh0gz0s4R0h5k7OQWfv7mF/OriHdwCQMDGbVnfA2VBhVSyTCIQhEus+4kLtBwzpOB16RAC57VGBFoEIpYwTTsfn52MFvfvee2lZnz17xpS+7ZOfMjSvLqfUCgG9JPttvGT/03zbsEGIokkU+VrTndyJClrDncPc63cROWf3FrAFwSn7frI/uIMpMUMkEUF8wbIABroFHgNnoBz03vRW66Pz8fXTKqvfu9VarW7wym58uMmuFaTE3FjnyAl+xpvAmvkJXKnCT5U3Sb5YPauRS6pgtUruQp6lOhlI4QVU4ZVoiXVGjZDiOmJIUVJovy4zBmPuED4dEFHoA2hk6d1M6UmkFsj5LodNkcY4hvRoPpXwyoTSyNRxorF69n+HWDAXEQN42i/NAyLkjrB2J1GgwhzbovWFDGYOZY6+pCFzmn0xlc+dL0ni7g4nJh58GX1x9yY6mKLWQJjEnRMEICM01YI5as8NdVGB9lHbzR6GX0jk6g6+pKdy9xSrkB2MTlNm7j32Ygj6RAaIaeIAcZ/QPWYTB+McJyGiKOhcazWvsDSWgqqKXJC8NQkrVAJGCEfOS5ZmSPTjGJNrGd3/145Sh1KJXB5iuFogG1pjulN2PlDHq6tu7k7UEJLbtr311luf+tSnvviNr/9f/k9/8Hf9rn/zt/7gv/LNr37p5u7J4fYR3ACCQb2FiKJCgjehdH196xDeS917aupqkjMc5XTKOYP4K1/5Skr5f/J930fr6qWomSwriG5ubj7quJsDlPM9CqT1cCo1C93eHrbT2TZdDocPvvyV/+I//89It9PxudVNyY8PD0+ePIop8OYiMHUzR6319iYqX5NF3S7VoOUbJ7Gq1o7LVdU15b6yJKXE0vgPZz1pyPSw/8daYmaDq1ZVTXltHnRceFO0k+XyRH1BLVjERKRO5hCh5CmlVLZgz3NmGWtpMB71V/tY2HKhKpmZQXWotswsQg3N0uxMc3d29qFwREIaR+5boEScg/mxSxC/gKgZIFexiME45zusyEeMmoFimp2cm9hmkiruzolYtdq2hd3u4HW9ubnhw+FgVa0W15oJypQImWD1tDDJYanbycysFhOWxLVWc99Ua62emCCNMa+TIKmTmzmBDcMMdgMHgrQHE7T6CB/NkasXNbOX7ZsLQ87dzWvjae1zNKg1vWeQi7iILClfEHMRBcGH2i6L2lz1G0bga3xxJuLCpVo/m2HzIrRawzJJ3CD/4VcaMR8HpEexMNZM/3r/oRUqHDG9bu3sQ4FLOTm2lU80ubVWkV1umxm8JeydTgGFFfToIuKgTZ0DCahuMK+1blqvXtr9R5p6vIhgDw8Px+Px/v54c5AZ8upxjjvFvmihg+5vqm5RuFDHgATGizyPECtGfL7NEREFUutwOLz39N3gFiLizj4a+SIRYQqGIEOcvMyxOC3sX7mAAs570GwvhjGuZcnzaI/TcF3XFx8yr/B9a3fD70WT0t1bhcmwG33iEZ+useABkDMR9mIMY5qmZ+qcDxlCNSoI7Eob2J1ZIEwTCc1u5vYOO0ANy0u9UgUgrUaMd8/I/roxApNFiv2zrtyjn15RFqwb7O1zUut2dWC05pjkWA/j10mKgnkHMM+b5aOET+CngSBGCdAvkSdXI2oxtkmOifNONjj/e3VxZ3T1Rs68FwhpVy/oOkUWHaEQdjm4S7M+XBeRGRAcE+l/EFe1NEIOvsGIgvheVwxRG969eWGIIvpWbZC9UCttMjmzjMBgOMdUKVxEyAlReLYtQgYkp9wIeAyOCgfcYAwYiN0QCX6u3hLH5TGiEJQzIhHRyWOrujuoZweQqZrZDaX33377WMrNk0evvfrqq6+9VoouywLrIZbA4bSc04+EcX1LW3GfRrTei3ll4h5AYyKOshShOSKIi/ZpBDGsP6rvTziQBISoKMPxNXfAQMKwqIrCxCvk5tU3XnvltXJ8P93cOfFTq4vQI+hz1a06VSWjSqKJjJjg7LaAF8XrnDJoQb0v9/d6fxIr5FSLm3EFk6Gqq6rAFgm6LIUrrII9XNmGSLV4qZ7r1BOs5tO2r9lG2NYWPCJ3EEShTpCzCLhVePbqDm/op1ZhKMq6tKBV53AnwAlwc/LuId8dVegb3MgpzCdCZFA4+3Dxt3t2eeLD5vEI90EHQSF6VV6HEhMFe7N7FJ223SC9HgHuQXXuh/4IkIyj3KcQxZ6SADUzOAJpDDQGz/6K7k90ClCDWyQsh4+Meu+sW9Pt6Ezh1nVooMNdzUxNI3Pa2JkdEbDOkpaUcxZmDqbKrvE4GlniztRM7FHAbF3XIXBBNlibRrd9onGjrnVBq5mBLJpXaw3DQFVBlYi0ZYFz5yGiyK0kIrgBO0gJMDTkf0CzcgxxKHOhH4QF/+TJK+fz+e233/7kJz/5xhtvvP322++9987rrzx+/v67f+yP/tDp4fn/8gf/1a9/85sMqrXePH5ViIJKmbps+SgN9ltQYn2UIScsZQreSl6Oz5599atf/eSnPkkp6XZ293S4BUEDjPERz6HLtwyzkAA3pJTI1K0uq8Dow298/f/9R/9fX/3SFx/f3j5oXZm11pzFqlpVbryaThSpcyCSPeDQqZA8nAgp+eQPjnpry7KQ+bZttW7MiH3OzGZ1pJPECnFXohRR4vC7t2AFhFsC6j6oNCUcEglNDmP3xtspIuFW0XrhwR3fZZao2DNWzmg/hs4DdecxmgyYVRh5B5z1z21Yov3rGvX92ofoDWhbPfrdzuFQvmkuSUe7+x+7ZIntA8Dh7LQXiQ7R54TNXGo9luqJmMQTKWirplW3h3Mo+mte13VVLSCknkINs8R8WJatGiVOiV0l6lOF2liqljhyiZmkmKu6Odm+0gPR79VgCBghoK0ch4iz89DDBiLf7EIaTAPos3Y4e1EE5C0coYGFJmqEeU3RIUoi6F9PSYQTCVPUYXN3Y2ImZvK2CodGOOZ0hgTPKt1whw3Bvc/yyzDqtXOCjY7nLMvSonPMexYfLmnN4pmRGh21WDHBTXPOt7e3zDvefhwwsS9Y8nwaRfTSzJhaDK0UPVc9wINsaavltoU3KfTVsOe7GHFTK6U4aBS1H0PEzKn7R4awZfKHh4dnz549PDwc1kdEJJKYpKM5zB21vyGGEsHgEeNWa/wab0ePHLq748L08m6JCTdmadWBKqQrDlQiIVJ3X9fVLZgAMZGxjUe243NIBnrBbTHM5nHNS3c41GbJQ5dRsjHXL/47YK7NUzAd4/PyoI84B8gcaCGK+X700slD7lGtGIAd2qkTzYxUyX3HxUToWxKa3hG6fNt41MmWJq0baAkjs1bUI2+8+3HIKcqf7urBhYFmZGEHqVtPxey5+mZVRHqQMwwQd/dAm13o4O3fxsQTGyY+N1Wklu2GcayHyco2WKPQkmgk0Pr7bTvzYYvjNeU3/p2cX/s4dNd91Wacj/gwuluQdjV6XzAynXT7wosl2llW55XW3HZuGEm0NjQCHVQLe/ZoQMrV1FsdjyjDq2Wrtboai0RtCJskJxGB3XjmeGzHtnd7B+RR15SopUCSsruRJwifHx5cFuJEKVFeWIJYmpo1GN00c4+aFtYAl4SB+9Vaz2Wr733w7MMPPv7pb1sfP7IWbuzESENpoxZW8o+2+n6Fl7uHUROGSp+vHZllH6EItoXZls7+ibTIWwvutknm/mzJbtm0cp+q2ycHiIFXlhU43+YHX565HYt96HQW2SjBKbsvbI+JHoNuhQioOH6wPX/PHj7MOAPqpVbV4sbExbyaMSyTOap7Id+ACi8UvqOd6tYwJdPuLtsX/CCDZmaYi8GLSi2+akzJXcFNsLDAzT04yZnYk3UHhEfReoSHIgKO4AhdjyELI8jCGHYCaPjcGxucwnZ9tUd1HaS7J39ECKF26WMCQOQiZFapBzPdHeDmeWxohYb/j70cZ+N8TKBrFANbNItrdyfuaY3N29YKcDFzYBMs2LtjkImwV7ih6UPwXpriwi+Z4O5WrWqYRt4B4pklbmUQXN3cA28KAjUCiUVSYmFCIqaOjydi7x47AUUhe5gTgUOx5Thv5gPMwjXJTLXWzCQi5lVL4LtAbvOhOaYhRFc4tj0lYSaqDgAmcVxEHiniCeauXd3ffYFtIFKqWkopn/rUJ4j4G2997fbmkaoen33w2mtvnB+e/Rd/5Ie+8bWv/+5/6/ect+1cyrKsxIkkJ2H/VSUI/oqviHkCkGWB1i//ws+z2/d8z/dAWOQGQdOmcd7IRz9mN5zCGvT+c1W7zSwi5+cfrje373/9H/zxP/bH/tuf+Au3t7fPP3j/lUd3p+NRtdwebgKfRiRBck0USaXEzKWcfc/i4JwzmGqt21aHou/cXAMppXreWmOGm2AnyLaW8Uf72m37gT3clN6Rw7GXGvy1V4Aws6h2AGfrOy0C8d24IlVVNzZG52LCpCQxM1BDqPXTdByo1o5KHX6PMa4Y92PopnD3XYnErrQ5RYDCK+0h+3A4gwlhuXLnm4tNQuRMTSl0NI97CJWoGOUAg7QpymC36q5uW9VTTUDz7pdi1UiN2Eyc1mCyVnWFECMlyZyWlbcKTuAELiSJHCSVsbikzdzqVsyrA5yJqZjVs6rq1pEV1g02A6OH5ibNylU1MEjc0Ybe1dCxMK6lcNv7PiUKzHZj0y2YWbCX1c5ZiNysvaW7zfZCOyGyhyzuYtqHijzETnNJTNFjZvYLpHzrUdw5Sk3MVgT1YPVyWA+Hw7rmZVnWdY1l1qt1mxFiJXmHiY7kuuhXhASr1GzCzDnnw+EwAPDjTBpfcd8jsaNIOjMTjJmZkncLNpZQFOeYyKybZ3HbNieISSsHykJJcs7bts2TNTYpah1vL+V8Op1KKcuyzIYrwx1cba8CYmgZHVca2n4G7xDuHgzp4z+m0msVkUg2riWieYNCs7mNwhVo7sLJm3u6idMRXRmTS1Nqx1DZfXJ7MfP5vE1KcIsVo5MSjWUQD4z1hpddXRbtNX7GnNL4/pRqMRbqfLrPnyMiILQfeSFYRi/azt2R6m1z2ZSx3yA2PXVHVZdWDzAUa+Jha4We0fscegeYB91L05CiFz1yiD7iEQkkomaje/f69ytRU6/98uidj/UxGmMwx4ROMxs7tC2Z/bt2EfQey0A6y0pjTQE1hbV54YTc4qTgcOq5jjb0f0N0z66BaExY7UYcsS+Pu7yL+q42N9xv/DeIh2aPJIDgtW5HFg0gMEXuNtxDqUNUdWjHlUb2YHPNxt3Vh6K8a8lq27ZZ1aC0zSzdTiG3SszC4jC0qrgc2jl6DZU2HapgapFWciZSVHJyN/Kcc3ZKkEySnRNzimHathqZhIEQGXKeQEFtSj1QYWaw89/8az919+TxZ37dr0POwgRvwAEfBBzUAT1XO+dXdvlIV2zWz8Cdxrps/HkNzuMA3fevvsQ0nEyjgf2JBeNX87sVRKyIGCRhfKYI1TEx/AAXkCEXoqdEH/Lp3kU5b2AiLK4HpzeIPsZwQYU+rfe/ZA9fX+z5ysWrumpwgSpxBRlcCJLYyBxGrrDqou4WRMISw0AWVd9wuVs/+mKH9hheU8nCMmwDOIQyhDjA7RSKFUkSYtf25xaKDXYSB5hhHsxjbZ4cRAxv3tudGwAAwWp4PjqTTfMME/leXoKIiJ3g7BzyLYIZUQgNzbVhaEVHbfgMzZSF4aG1jjgE0DkChoSZj8Wr8xRdCHNztoYAoPanCYs71KM4IaI/XfG/1pmjk8zkLu6eVEvQHtRayRFkYomYBRZFYqjF1pjZq56thMs5HMzD3hiWMbHPa+F8Pm3bOb5OgTgP/LTMAaIW7mPmRIwFKTPMay1mrWKPqyLO18wE0aruXlUpooWiIfioxQlDn1AzT84eQVVDYuH9asSsA154Op24ZX/6uq5bOeWcnyzr0w/eefWV1x8/vvvJL/z4N77xzX/z3/o93/49n3/23tO8rrLeSs4kQmilX16+6n8lW+Py8ii1Z261khsxf+Yzn/l9v+/3nWsBxGrhnEspy7oCvNWS04sFc8fVcMY81cFl4CYzDK7bent7fu/t/+q//NEf/VN/8tOv3T5//lzYT8d7LRVk560s+XA+n9HRJmbmBHIyqcQtakFd940NezqdcmQKigT1agNr1QpYr36WAISPZOyouKLZ7tSgU+29RdUDVzZuZUnd+7JX8zQzcrjbUOKHQTi0doBSYq27frYP/qWCMkuEMGMcaMuYiEHMjWbjaqKp59+6O9FO3hsKvZnt5z3RyKplpq6tXQQTwib0GUZ1tWamLtRaLUtU6VDDZk5VrRY9hQGQiciJAapFH7b6+Pammrkhp5xvHmUlBzYjpcSUiN3zSskVOG7b8Xhcbw4KYU5G2LTqWavZpmSIQCVjcuuHGoAB8I4xgQJIJELBouPqPUdlCIr4D3Hweq+Ltl898jAKR0P2oXPAIhszpDZzipHRaiQcGcgN9wt4MBilRdXMLSqjxFybt3r2KSVZcimlmDISepbdmOV5CgYIYgSOKNhBUwojcF1XWbKIRHWz0IxtvrpTY9iEYWR6K3nIPPGLRJguWGSiL8Q8GhD/xlYNQpphojCzLHlZlvjitm3n8/nVV18Ng5Bo17oAOpdm+DkTiFJKBFHVUkqTAzS7J2M9B2PNbvMfDoey7YOTGA6BoWrp/hoDWN25x57ALV8NwhzcNty3RnOiD8ugR/WNlnxQ1WVZHp5t63KTkgE2pZ2HN5oBP5/PAMBC1Kutws1syUTdOp0PWhEBtRLA5g1mQhO0+wpNem13RYUGVThd3RadGrmCI8hMvTjVi4uNLi3VcT+AXtypfYcdNjm2abKUdh/H8EmH16D7d6hHWWNGGzXs1gxgDIFJLcjWZJa79q8zs/Q6Whw8ouGqn9zUV6EzIr6wLYEgkEKUb3WHRE9aALTxinc7uzVsSI1uXIUBRF3Uh3o1E00NhonRjtC/RgqcoVKzAJiMvE1NU5bcHchCRNb5otBDYfFr3E/d0BsGJyVCwHx7fLIdH3tYGAgYoTs8WNlaI2dnfxi0aL7DODHRTxm4C9Dp+wMuZHCH1RBD1KO7jAxyphj2UMXdGFY1ACICsogPoHHCDSIrizJu5hGEiwAjE/HYYg4yJ/EgiZHADwb8LR0AqYbtrNtxq2bq6u43t7+GWZmSiFPKQsQsROK2c9rF25skLBvUwILzhiUDBJHtfF4GhLv9/5yV+Ku4NKhhxvFDMZz9BISDeHf6xDB2i3HPR+gO/UmF5FgB3TiksC3Nm9HZkYOjjAvQ1P4CEjjBF0CAAhTQvSzZRSgZkBxifit4Hf7xTPcJZ5S37PyLSd++lROLQ41RiczZnBlMTgJxHhwcLfLmgNHckesR/Faab09cpm7Yh5rKfTsEna0SnCgcMcQibuSmcCJIkuRkAw0EYPewu4IA7rS9PdLe3Ftoez8c64Fd3ouN8jRlpatw0AhAxmmupZfHmDxQzJwkciUi2MBJspkBnFJ6AT4KVWXs5G2lFOsM4eNEGMPVXxRZKtKIIQeA3/czgjncuIaGNMcIMgMIZBlzoN5DMvPwWSdEPQkRBkkSqNdahXnbTiLSGQFJRNhRSrm9W2PLaSuO4QPMGdrJIDyIeyRJaiafM1Mpum0bM+d1ISK1BjcKYjdmNq+JJJKSc84wq3WLszWitCklU5RybuEgYakaQ9NyP9zL6byLJFURAZFKRLSiAEhUv0G4kOPwDnf+SU7LsoRWcT6fn58ebu8enU8PRHS7rD/9t/7Gv//22z/wW3/bb/9f/Y7jVraHZ8vtXZbbEOCqlhqkOVSmHWLcWdrb4vxlr24kg7P4ZjD/0R/90a9/7Wv/m//d/1bLJikBvCw3scTDCW1o2MH5OXGGhJg9a+NbMkfdalqTlqMs6ek3vvZD/4//9K/91b/82pO758/eq3ULu9mhTFHau8b4EMuScpTiY4ireQtZwd1rKQDSksPeCy/Duq5Lyt5dd9DKnCM+5i5mNVZOzFRKBEjVLcnCzGY68JPakG9CnM7n87KuDaPlXsq2nWtH/ybAU0pZxF0HNUsYBufzRiIAnbcTS+qKrAOLWcsHE5HwWRNRhJTdHWSHw8HdJQVV4x7wiW2mWqRKBARilwamm3vlA3eP7RR68JIP7p4zL5bv+5qMjdPjaVG6s7lLIsTKzNu5xm2hVjLMmbjBBoSGZGQqtUpeme1cqyoVrQyCLJ2zFOwQs0RgQn1+XnLeVLFpYl4fPQFQVCmlzeEGc1EtqlrMLR+enjUtK5yqunFmTqbVkqNszfmEXUy7o0HTbTe/U8rruoaMDKAUS9R4AjOZ67Jmcy2nLVJJQzmkibSjC1YNUyd+LaWsaw4lUUTK6Xy4aUUIw4LKOZ9PWxiEYUGt61pKqcUiUYN72ps3d3aT+OfzmTrEH5Egl9LpdKLukw4zzN1iZZ7P5ydPnpzP5yipcj6fmZnYHcrMy7KkzDnnsLs4iZMROGUm2c1+M7u9vT2eJE6LVu2AvdZ6Op0e3961gQ1tUmRd1ybxmpTzKL3A4h7AyK74ppTO5zOoLr2sbQxsKUoizPzs2b0TjHA4HJhZHVCtNcqEApFLyWRVzayqZrN4eNi6x+PxcDgk5m07MUkpFirao0ePnj9//rGPv5FSMkMtVh1Fi5McDoe8LvbMVJWk17eYWGHiyC6mQpSSANi27dEhJ0qqaq6RWlnKU0CWZXn27Nm2vRpNIqJaTfYYHW3bFivHrXkbEWgChbuDGvvx4Om5Shm1jtflTpwTh+kQ4LNtsyzLlRUX34pDJ0B+2O0YWFUnY+bEwaxHcAi1FAm+NHJi2V/5JmKP1GGwTUHUiBNGm613AS2xv5GEh3q9czjzwC52IlDzCECNox8iUJ0LYISSknp4EMyuRi9ED3yCXNALoACaDKHxIWMbMB+Eew4OQCfwBDGHbRpz0T3rcShHNM6CCBnUiijGA8MLGRc6XDz+Su4ta50iXEcw78XQOgdiBEvJ4VpLCWkQbvQYdk7JawVd68/uztIaNhqzz14PYPoUt2R09AFGtDLGp1fiSS210dTULGW4O9RqhLUdIGPHtm2JRcK1DTet1Hw5QFWtxaO0LzdkkEOZISLcXbrefARaq3WoAnEU3lNlr012t4BceP3gDkhGZpxPEIBpO52Wx7cwHI/n+1M9qzkJL3y4u725WW9u34jd0/+lFhwSHtsqfhCRJ09effONj314/4CHB9w9wrapiHAa1uDF4L/40a/gkpdjszom6MX7cfiom1/4Oa6yK4x0QVFhlw2I9pPAoGjhsNgH0FLPVm+WlXB3ts1ryvwK0SsoS17ehb5v91/R8z8Qf485CLyslqMkdq7bphWQvBhqgd0G5Q83gt+m173Q0Vawb2/XsEakw0ptuGwcxb25z3v1E2cQQVg4YLZqJQxegBulSSC4JaEVhCC4mxZYePMZ5HBtzixHSK1yOrV9zUQIwGP/LxLliGEWQIlZGFKLt+1HrburWjxNO/mnwzrpiRBR8c0NZprSofHcQyPJWoRTSuSNaA3dX4zum54Piz2rcKqm5hMiiXlRqzpxTTNnZs6yDBfVbGGG1RPn/pzE2E6v9uJdQPVcHfbh2hSWlHZvYihyQIP8tqwYAYzHODKjQfiAJJJFqkhKQkRZEnfihLEXnBylYcsSM5Nbg5c7cwBVglmkGX5kRh0D46HkwliayRsMcixggQWntntaDtLo1xGjuSxLSqmUc/O4pwZ0jHvMahIm8tPxXpLC7Ze+8uU//af+1Je+9KX/9e/4nd/++V/33lvfcH7/jU98gomrno2SCAhQ062eo8Qz9ZEdcI9f1iZMKcG1njZyyLK+/9ZbX/iJn/je7/3evB6IqJU7bJ4tGtKhEVr7+CMA1FqJwSLCOJ8KiyyZD2vScs7kb33pF/7If/b//Ot/6S+++cZrz55+6FbjOPIpaheANafIfTc4CVyYJLFZocvODF029L/ovnWSGN/hc7sbezr5aBgMccUSD8XULPhJ9iiKqjIRM+dFQFabt3TA/HY1aPfQ93wSmuiI9q1Cg3q4BaNG78aOCjXBmKopA6oalZcGgD7EWbjNttM558xp5s9oQ8oWaKso79b/ZM4OtFIT4VIEAZklETOxCxlxSAYmDiREb2Ez0EFGRq3Mj7nBlUGKRFxdAggQB7wQJzYhBtVcNRETLI3KH2qhl0faTE/oghOqc1InUFU4ExtVl4JEVIYUa0ue6OJUiOkIl4naKBMym3kxPpi0n3HDvNI4YE5oqYBExI5wYMUi1FrN2Kt6VUGEAXzEA8e0OhE4yeKZeX6vd4OQp8/nlsQKjPSwuWGxOAdp56zAhe8w8JYj4jeKC/Xm7e+KGHtcOWezamZR7Ij2WoJQ1WAZpY50ndfbMBXauIW7x93dU5Jaa8QMvduKOWeLelj98qXHiJgiX2cOa4zox9gdoztVi5mZkTt1abCktKiq1r0wABtVxyAPi9DrJBkuSkK5uxJSF25mkg85LOHjw7NSyqNHj54/3dGhF9Nt5EbojmR3cjdTj1OpR4V8YNe1NINqNPWlv47+ysRUNF86sH190LTTX82fjy6PJ9MLR8UQsPPTxjk7Ld14oIeEA9A9+y4Irn4PzoAYWAo2fFdQBPTDlR4vclf3MYzRP4YIRd3TiGJHP9Em7LrRYShSVNGIDIBecgbAwBNiOghA1MppYH9tu6HhJX3yyxGFG857xKJVzDNEaWVzIkQRxhAQjBa5gloI3pARRMHO1wz0xII+pL2EsndA0hyP9R7Pa4NPRIkY5lHsIRFHyhRqQOxemFkgckDGr9i3QDcK+onfIfexYN3NEUrRtACYmaJMvPUjtIa5bMII7dTN3XxJOdYI3F3VzdxcoaIEVYrIFLV5NLM44MaUhfQwioqCNLofP0QcvCVONfO+LUeiVY9nUS7lWFRvX3my3N6ilG++/YHRgfPto9vHt688zncHkLvXOR/v0uB6OVHDkyevPn12jxCwOUcs2gO84vsz/nGk/Lz0Ik+//E2Y1jp3BOZl7+z6dusfs7sKOaQAXs/PTe+J7PbudYOfq9W6rOk1SW8AN9AD7H2c34G+I/TU5QhngM2IhM0LdKsugDiYSJzYieFBJBSVrtrhTgBdNGp2dtBFS/dq2BGVNYKAvCM7mr/GYAyGE2BGRkhBCxnkOy2+GvvY3d0kIqhR6TKiq4g2+sRtgrQu8RUBWAREMPNOGS2X6qcN3xYQasbw4XhLE9iD0k0dFbRjLjgjwEZGPdqE6SxucvVSXFyvAvf5oGHm1KJ/k3dvIDsM1H1k5uaGIHqMrowHRtFCFgLtBYZ7e9BoP4kIE5/BqOOOOTkhEec0NbS6p7Hno2UjsRihnhq5a+B5zKpZot3zreQ0dPcY4pAa7k6+Q62oe16rgYiuOrBtldxaUlmvz8hM7grTcEdxh5q5N9ofVa21iAQhBYWvy91jFgdxa4CDT6eTiJjzutLtejgXe/ebX/vCj7/7xS9+8Qf/lX/1B37b/0LWw3vf+CXJ+ZXXX9dSTCsRSUrSHFG2bXVZltgNdCmAvkVM/f7+/u7uERxQ++Zbb5VSvufXfo7SLlDcPUjZrqTDwNYAsFIkCXHUjPJHhwzY6XifDjkl/OUf//E//Sf/5C995YuJ/fjsQzufA7bWbJMeNHN3sFPbydJBAkbWi8s1xX/XSIbbmIi0amCM/UKru1TKL5HTw+QL3yoRBcSXRZwppRYTVtUkEnSLquooRK3uyvCjxAPjhrF7I2g878AXd+P4nNi9dvmmBmqeUdFKFIAy6ziZ7obpt1ctOefEwg222rLKzSosD53eOqZ9bIcxMkPP2LVAMvPaOPwSTXeisZ67JQqaGcDd2FWNyAtgPuVMwoSYmYXU1RIrUWPzlzYR2pEqUUW3edOJSJsvUqsbk3ByM68uCHzQnqy6j62M9PG2fNzda919WjHjQ5hcjQZ1kpL4vFuDybu0d3eabHKPAA6ZWokFkFjOtTn7I+DjTtWMfHdYDBPOmzuQ5i6gC+45dDBij3PsqJRyOBwwQeDir2UsJJi5q+qmNffTgrrPok1Qj1UCyDkvyxIpu0Hou+uII/2v+xHHJyIXFCDeKUZpMmkiTm4tgRMAgnLGrQE39rOnhWpc42Zq8YNhLY8XNfOyVO6ibrf9+tCN+5mF2aCm6mZmHUGlbj1ToBHJeKMWj786Azc3q6qeThvwKOe8zSiasWYMfnnWtJW8e44GVb30E22XTn1kdhtsnMHDbNuH6IX6UeMrNPmexntTSnOrxkobB+u8/tFja1cPny/qwM74NbGAKOIIDZwZx7q7T88f3QlX61VjIoBGtJPQ8HCrS1Oiwq3dAnGR4II9tjGMUchuXI2q0t5Jg9COyP0a+33u45iyXcjL2IA9dko9RBa+OrvW+ZsY6bk4+yti8Vz5brv6Rr0jFIntvrcJCD3W2i5yuDlzkxIU7n9mWKu189I5fGli2fiXeqeAnqnUmxTWXNOEzaz5KL2ld7oHGCdTf8g4S9zhxpKG/dzsYDUAXqtPaOQQqlZ1ODicSFoQpbEssnMkAe9LnaBag7gn6hOOScyct227Odzmda2ne5idT/fffOe+2uGV19989Y1P8s0jLCkMBqLco4LfasDm6803PvaVr371vXc/eP3V16PqBQhaNaU0HjMMw4/cVP8Ir1/hO3z877fo4DACMRQOt6RWKW2MU8Xzh/IBud0eMvCEQbAVVt2eABlwq/dUnnp95nRKK2A3oFvgllf4N58bCrsy3Ekalj4vcCFkkAAZJAxxmDd6iqumssFfCB7Gr4E3CJtQfA9tuFvotwqwweAmxPFs8ogHhFHWOBbicEdo+LGpiIjIqFKjMG1Mv4EIDUigqhpR7tkHpZTE4qFPxLbg4Ojz610aBe7JsAulcaZFtjxFZgoRuUX1sfDShGNrFB/ePXeOHdUwJp+4Ua0MIolGsuLSmKB8RNypf7Kr09bT2t1CZeL2pGB/6ilRTYqG8mlhcHYERYiJ6Fvo0yBTJ9mtuziH4riaPeXWbDyvtMdkGqrK1eI/da91Uy0w01FgMbW1b8395NL1FiJiBjlVeMtCgZlZrbXaPg9WGrojpSQBqTciUNnOZkYIMgnjRo5MZka2cxOHRA89KgSotUpfDWfy+JUnpZQk+XY51Krn44lTvj2snPJbX/sH/8H//d//r//Cj/2u3/27v++f+adLLedn7603ryFsNtWyHUGUl8OypNmD9cu6owg4Hu9vbm4AbA8PmeXjH//47/zX/43Pfe5zFzf5zL3/4uUAOK9dlYeez2ZgSYdk9+++9V//uT/3X/7on3p49vQm56pkWrRu+5cJ4HCVNl99T2B198gUczdLaaGe3ELYD4a+DPalGZ+EIlh1E8io2uewXo2Gmg+mVwgUaTpBS2gRCfpaTuSFrl5kZtTR1yNteFYNReI5ZkaSEvPQ5DoFWt9R47HzHiOCWuNUiJgIEQgknQQ1minB7w0AyDnnJYkIgXw3//Z6gwAkkZcWFnhRcRlT2aZ9nLKNbyE8HuOvLvAAU7n3teYhg9TdIU2PYAeINaQJyN2LasTa9gRNol7I2GNJMKu3PGmQGpEanMnFItygY8TYGydktFl60j/UwsfNCLTVnlSNS7N8tOHqE2aG1ohIAeCR2jRlenEnZo2HS6IkOaW0Rdmr0FKolboKR1ms24i0DBtAtU3K3Ax0L8mV9j+mJsRU/MsdGeiReMZEaC6JrsJe6JwUazT2XYe4h7occcJ+Bnio0aoyFtIYyTjkQBQ8wFetHW6X4XcM6yjYn5hZ1cEpJUopMSU30kFsE5nbZs7EcDOP8zW2doxRBBVVlRyHRWInmtm2bafTqWxHeOtOSomLhf0dlV+KNRxvtQZLYGai7kYMJ1erwukKLOui20OAmYmoR2XrEEoy8RVFmpk3x19Ltgy6tm3bRLLkdrZ7j+mtchFgRHdbzJG9+TC+2r+zAOmvprGM58de3TkO7Ljo0swbTxv7osylI/qHPkyFqrikkwHAM+/BC0ZXSBFMXyHmkKp2AWjsBlgX9a19I/4124TTNex2uzy/YoYGiR+/2LCuKuy/Uss3hrnp7onv2YMhlX3w3rWUxq6GxlGPiNY64K38+hh5ahla7mYQdJ9Xn4ndEXAZwwk+YeE9EdocHbXhH2EZDJ/7PGgXa+PKNh7IT2Ak0UURrDYm2nafqZoqhHtWWrN4G9ZIg+jMIhiYfNRq0+Z53h21aqalnIfoCNhckA5G/QxcnrxjCTRlYJpyc2+EnCkdDoftfH7nvfunz+t3fOd3PXr923HzKii5VmcnNofzr4w/b1w3r3+MSL72ta+//mu/C96KsID74TGGvZ2ULw8z/qO8PlJb69cwTz/yMrQGD1Ez7QUDI1d9Bvpgq++TbEs6SD7cPzwXuWG+WRcSPgArQLwy0mM6H0G3td5sm6dySPV2O21KB6KHzMQk5lTVKZEsKyGDEpDAAhdcBbqH82I3bl4YTwpln8J76zBC6t9xGzwM1nQoHTYhADcQj4IkIBPJRo0ZhVranIXhBApSyV6Hqm9izB630U6mSLYKw5K77A2q6l2ViqYpEmegDBExfKzcgW+xU0bRKwAEwY5nbCac9OK9+xDGe3sK+hC2swYy9A1pVMyx6cYxFKqsEzGxdE04WIvjXFBvZFcXOFIzS077qYPmtY/hayo1mGZgSbRVphrEGP7FvcZu89NfKhygyeAcv16cCk6oLT+K3Mi5J322JPtAiod9iFZVLMBUaVkWduPq1Wu03COs0fI+p0BWy9qMlEUdjkZrif6tSLQkJsZpO2+nsiyHdV1rPZ63o9RqIH043j15/OjNJz//cz/z7/6B//1v/pf+pd/+23/7Z7/rO97/5luHw+Hm5g4p5XVt+8UNw0YfpMnXG2W+7ObmZjuf6/nh9tHj8vz+h37oh777Oz/7HZ//nGkcUbG1Wv7yt7hqKSlnwMrpgeCypPrs/S9/8e//V3/2z/zkT/z46fnzz37Hr3n+wfvPnz579OgWVpXMYjF1l+SkuwyTaXgurKUvTlR4PJExjgUTAbTpUDGatiWa+rKHCvsn3u6vG0W9EDRvBTmzIIDaOjHXUWM7J1iUr0mxRb176M1dq0YOQ9+3uz5H1IqX7JuilcHwsQnjRLV+BZ/MlW7nbqGLiywUXHPO1HiMyDSaOnFpQskoyHjJEZgM67ubgCg9JsQsZKLKVR1EVKd0nHlIm5bQlzwjeajIDoCcbOS0GojMI4asbhTk/tEGYu/cXwCcXB0RmwSiBFY7XrsbbPepExEHZx2BmaLadwyEw4W6UOgl+66MK56ueT1zp6UaizMuEYL2rAY00pWccwuBikhqK1O7/tfsMck9dCzhoBptmH/2yakxRpt6UHHIsXkuwoJalqUUD8uw6UPMEykSOpR/5lYNoou2+McuGCPj8ICS9kTudjAMgWyRlkCU0hJfqaq11gHmRxe/Wm25TTkvZmbw4Py4v79f1zVi6N6KHKpQrdU49fOi6ZX94AimMRF0g9AjESI3W7RWPR6PT58+reWBcEuN0isDW601+OeZZTyNJhbKsfuuFCV3P51ON0taaFUtp9PJrI6zRvssX0oVCaExuyHGV6YBv5jN+bo6mG1CIgy5d7WEMBmK49ykDjSaerd/US6LZO7vpesnj2U5rZ9poerkMsPOwdg6jhY83OXb9AO02S2N12S4Wib7xLU202tgSaZVQSNO2HJwXkI9yA6jPfVrDG9zv/dw5ZXdiMlsngdhRK5inK+s7vHF6VcNHBARmTn59eQSEWMUgDFxGb0Luy66ZeZEfWa7/8U9OFqHyU3QOCKTTYfOxVRq50Xch9gBUERWbTdT97Eae6NZ40DPWWg5ru04aDUqxlR6tyQBWK1RhBATM004KfGycjujdVc/R/CEmBFRlGgnEJxSANytuxdiA6bMKA8no225WR7O27bVb/v0Zx598jPgxzAxJaQDsRts0+PNt7AHgZdYdI8e3z56/M133kYxJGJO6kYsdh1qNPwKjLWPeGNcvzJL8hrO9eINV79fPXZ8fS5gMZ07DCY20KmeGLrc5IQDIInvhHsJUyRgAUi1ynHdSi56qPa44Ibp0Sp3norxh8QPIiKQrZBbdWcRAWfmBbTAEygNBG+vQ9gb5Zg03suO9MU6Y0fjBwKo5cQS2GEgJiDI7dnCy9RApAyAjEgACTQ4RxTKQxljJjBcyeAQsIUkcW0mA9CKtXFOh65+DKXXiJxIzXJ4wbq7xwetFAtBzKL+BOaKZZM26EQmIkxpVm/GeYFuIg1pMG+o8cChAACQxGP7MyUCh36YJClG3D7iaXG+kFkEA0AU1XGcaC5P5USxN80MKfqobtrr/1bTXmV1CB1ygrUioCaExFK9REmJ8LMTMIiNQzQRUeKG+aQGBO3pK7gIHMUnfWFbpz9Ow9FOROfzMey+lEK1xVa0lEKSZmnl7qoVZsuSag3W2d38VdXkTkjMiFqFYQ+WMoEi3AGTzl35/PlzVT1tZ7p/7oog9jgdt+18PJR1e/BXH90q9Cd//Md+7mf++//5b/2B3/bb/jVjUTmLKaqAGQQj4pQi7O6B825bw18mhQzA6Xg8HG6WvML8J7/whf/mCz/xz/7G3x9e67bsXvga0JH5sBEzSjm7mZZzTgKBfvjBn/+xP/Mjf+pP/NJXvvzGa6/ePFq//pUvpcxZcP/sWUrJWo0RRG0hcgtITSs/Nag1CcIkPXw4X0PX0VGXLGXuHD/M7NiBbUTE0gxO7X4HRNnA5lIIvTAQ6rui0x0nux+dmTsqlXszAtvpF76apjSoNHDCrm/tukjfgUPH6tubJCaRiAZmKfQl7VbufnpW70C+MP/EU3emdBqkqsNV3MQEO7EHtGBICiIyU7OdAHCfcWoLafLIEwDXIAp3EhYSIpiBQVGtaHaxD3d52CQY2qS7N8eStGwfuLt3zEGLHgjYSbXnF5ojTCRrLpgOJuwzEuYudarH4aOaxOjuWZi19rjY46S4WG+xUPwS6kbdkZFYymAaANWAekZuEZB4DkzjXLYwjL21krmnB0TMXOcy1pPDi3ZnHoCWOx1/2rYTER0Oh/P57C0i1RIyW3N7ylz1oFZK1hdtrGpmHpl+ROSOQbQzW4yzz4V5t1LQ/Rc8hQq9lYIobaCojVs1fXh4ePz4seRlPt4ozM4wU6kpudEDnvIV0ZG01FCE1WoRFoI+PDx88MEHhPLo7jAmOmKJZhzpUDLX2eOOIWdSDzpzinmZ5SYR1VrP504zRnQ8Hhl3OlG8jrMGl4dusM110UGGnWpSZGHmej7NIm6s1dnAG/Glea3SpZNorss6is4Pc+Vq8Y85mi/qY4JJZI3uzBFLmlxpifJ4LPcjmjsuepwj4wAd3dkbQ62b48PmpiGCmfYyGIgRDszFKNhoBuExod0HtY/keKZO3DZjkKMJ0Q7usm6s5xApfkn0F0RKV6Man8yDNnr34shjirRfLezmyJkmxT3oOffBISJwIoChZuaBJ+/eHSA4qZnRqtjPXyTskd6rJuXZRzk3CXtpinGsxD5CLxMysBIAyDtYLRChcaZ37yM3H7PDESzQxHtp1nlJkHQrlxAVuNFyDUJht8tJ3KPoQ3i2BzqxyKlUhWVwUCi/+ubHQQku8BxubwM5WORuQCj7LPxy9hXTJz/56Xf//t89Pn928+qrYCq15s6F4RiZb79Ku+4l14W7/6Nu8Jc3+COjA9b54adXYH4RXb7UDZQMwSxDIliBXNXWw9221e20MbNkYfKyle3sS31c/OTJeHmy3KVb+liiV6Fneudduv+AScmMDSgKoURgXkEZSACHI8dpGNcT3WpwA4MihfvyCoGsDdG5m4UGgCl5CzyYM6I8iFEiL3H4t/kKJQQkUQ26ZRmwuSKc8Q0KHgIeaEYSWemYdjRmXSJCYirNk9Uqr3Qhg5SDqfNSSlDHSw7ZIuGrq8X6d92dQk0eR8M4eXEpAGf9x5u+t6MwxnvNLKVVmCOVA7jwMw592xtt88VB4O5EUQsnetrqmjKz94qkHkmAu/QJH35s7qhy2A3Cud1D+ymlVN0krUSkesFyFrE7IoIr9VEbA8Hm6sWs2c3hnI5rCQxKH6AhoF86guNPQc8AanLHrebDnZnBNDIrzRF8DKFjDe6Q+fzj6aJe0Pnm7s7MmKSUAiNmI2etW0qJ3MjKs6fPOckn3nj1/vkH/8l/9B/8+J//a9///d//L/7m/9nnPvf5u0ePIEycaEmmUbNFSKL0CgNQeHqJQQgAh5ub5x98eFhWqJ3Px9/yW37Lb/pNvwkie+GIl10epXipVXYiotNpY/iyrtvz93/sz/7Ij/zw/+etr/3iK3eHj7/+yvH501I2hpOhqllVThfubdPOlDtlERBj0rxtLPGxmm3C1zU1FF1ld88pkflIu0I/tqnHIoioRw8E7hE+8u5CdnQQa1tpOsrQx8qsRfsiDFxC80wPjzv1cEokSklqL5tXr89e0omlY1+HE+MF0DxL4wjkCfkZPKtzNux4bLMnaVSuQ9RZZuaRHDwu69fQUXzScTEJLOo6OpkDLCMOP1Axza0cv3RXFkeJHpAQgMhyCdvLXcOCDMnqYWkEILl5v+GujoAztblmQIlUEc59d5ee6eDuDQ/hg6Rh76BP19y7q9toqJgAYGHpjfEfSzECcoml1cQSSaDAYbq7qQLV3as6oMxcapnFzqyZjR+G6SUi/sI12EA4WJfNVPX29vb1119/6623UC56NNbJ4XBIKdUwR1WbnwuNd4SIzufz8XhsTtBe6Hy0UDtxqPZLpMn2kGPDmeITvT56JMp6OGX44KL78erqbcBTEvUyRmOctaOnQ24TUSTNtgdmcvZt2549e5aTD/fQYAZmEXXyYRBO+46IQBScGfP2iVdHcNU6WiSnTMHYJDuycZ5Nn9Lj0XNHxxtpsrXGBFHXX4fhh8nC5EsP7rxo51+vyGb88jgfbZilKCaRMv40ebtoNGMWBWMKrpTyYTGSNOHZLAd0vq4p4PbSa+4ymDG9Ak1nCAZAhHh3dyOQtXI7bdiHyIwvEsGd3Wvn1B2bt2/wnsuHVkcRkyBtJtDETxY5Bi3TrwfQqAfzrnpE/a+zJwXYsZeta7NncBJNRIKuKfVVQWYYebNE11bfPpSTPBk9enHWxrtmSDBNxqFP9+z9cqfOBLBfoUKH7TpsPHNzZ3VVTSzgwObFnTHWF+FBn1SmedWNvxLmYvT7zgqKZurCeTyEUOEaafZERCz5IABDHSmBiAXVUSso4cLyaTN4FYZ64XL+1Ld9+itvfe2tt97+7OuvOSCS45QKK+RyjD7SMPvo68qSfCnodEIbNjKsF9t89Ylf/s3wwnPpKubWbjWv56qbVnLImVhY3PjZw32tm6OmdTlgEV7lcLPkxI4DveLyHrAZsuA16C3Kw6PHn96efZ3sGaknuDCEICKSFkjulS3IneCMVmoo8g6+5WhddJFBdjlunbCMFJAerA4WKAEZmuNb+uYeGTFq4WsmiXO8vaFvXu4IwcE3gL5xwpY1arUBzE1o9/P2cxpNJ+zuJ9XGba5ahhB2926MUQ9AtsN6tKftGhpqT/scL3PzzdewSsZ5NOkPHHmR/Vu7Ek6d6949GHHC6d9HAxbuomYZgloOIU9aTPxvdYuCpOoXKOCxpUfeSHxdO+/quI2avhIdQKQUA010ql1YNkPoJBaV4YM0UKNDXXyJyGat1XsCTEoppdW8VQSipqKluRZCc035tdwMaT5JrabW9N6VbaullM0U5lgPd3mF0el0ckWw8EkiimJDQs+efwjgs5/9zNd+6Ze+9rWv/ZW/8lc+9+s+/8//C//i9/5T3//6G2/6EbdPnnjkX1typqhu/i1qdz7c3z965RU9nf/u3/t7pZQf+IEfoJsbPDzwzSMSHqpR18ABoJ896ubSefkOh+Xrv/S1v/QX/5s//+d+5Is/9zNP7tY3X3t1e3j29Pm9uLFZrTWn27Tkk5qZcWZmtjjoCApnuMFzgM32cJaauVmayS3GgGPyl0/HZ1tf1uFYQysah1zc3vOn2Lm5NyO/cMxpPN8m/AztKg6bmci1UO5lM8ECpiwieVlK5wQf63/04oVl3H7QogBYmj3Anf2Wp2DRUNPNDPAowgEgkqVsio27O7rlwMyhQMVgzO2ZWxJX6Jc8Ofj38eye8uZyi5RLkDpAlKTdFvzO7ADYhzOJLkINHIQcDaXWrMGIkTY5G+AgsOsOgRojBgXI3bv4i7ZaXw+OgNUn4kQcuQGR1OsA1Eg8qO3DMmUPbIgPUTikcOhpVwqTmakW9JJ9sZJSSkbGzDnlbav1Eg1leyeukQXePSzzig25N3hB+sw2ZpqY04iAhUH4/vvvH7eXGJkAWtG/WoZOPC0hmNn5fD6fz8HzHApWt0v3YybnHHDNVp2iP6GUImkZ7xqeBXePfWFButOjWLP/QlXNsMl2WFZmcngdgPwuUL3Xr2uLP0RxDJfvfDm11uPx6OsFPjP2JkveqhUd3px9X/dYNL2oahHRVsp6k9fb25ub1cyOx6OZ3d7elvOF02QMtU+XTejNyCHkJKOKUqmbO8lllHWIMusJipiE3ouvm8d8FoPjGvJz7rhPFuzslQBQTWfJMEvFqy3Qnl90lki+6xD7z2EQDvEy9v7oCF0F6qnxf9Za13wAkXWXwXgyERm1DLfRTndn2TWE2cIfpbeupkkm3s42kl1VMesecdqdueh07e1pV06Ey8UzmhqbGZNkHvbkaMlQyPoDrlUdm8qQzBOELjH2NvjF9eKqGNe4YZRI4Za+PvApL0y9+4zgdXdyeM+Noc4mQxRoJYeaupuqpVYqCWjqFIhQbeBUx5oZ/FVjnVzN3dU4eyeZG5kj6DuCycu2HQ4Z5OfzOef8yuERJCOvzfctEAYYxV6SjzapPx9xqdJrrz1+/Mrb77zzWfq8qpPQVuuS0vRF6w/7h7nsW9qEe4MBvOyG8cmFZUjXhmJ7+KRyXTScktZyLqWYkrlsToc1LcttzoXIgqzFkYvWIAmGo/rBcWfIwI3jUQJANzePXsvLLc7PHCrMizBEkhCiAh7RPBm024RXnf3ocWgZf95XzYgTajP5oES9BG1fsy3/0IfOKQFBIgjBqdd5I2aYurnuen/b170sEMGdRYaz6aJthKj0p6oD8NWno2X6mFrUj+Hm/W/3BVt17+MgR6xjm/eR6Y+dCpjhhX00xNHYNVG0b7Kz2kk96yQ+eQmZEomi8YIbyMyqudI4f9vL23OSUzKoKU6bEnGEZbWqSI4sYTMrVs6dfVuWxFUMLjkCLAtxcjBLVjNvRcOzOVyJKoCFA7RnlUgyc4Vu1TOvpBlbAoy8ZjJmUKbkZGpqqkZMCSQmALPLouZNgwZVNYOTsPkJACUiIWNyMgdIeLNiQsJZJAtnd08G142TSpauEgU7tqm2Un5ETuSNucYqs9/wDTOvyw2BtlolHbByVS2q5f7h8eM7JzmdTsuyqJXnT58+vsvMnOjpl372b/z9n/6pV+6e/Ibf8P2/4dd/72/85/45Yk7r4ebuNj96hHUBALeKu1rrkoWjApBVgGB6e3Ow4/3TD5/+J//pf2yKP/gHfxs44fbJhoeFUido6uedw0uJZAdYnAdWnz3/yi/+4o//+R/9m3/zb/69n/07bvX29rAd7985Prs9rMyHrZ6rOcnyoMYOuTm4yHYuunVzTjczA0NIVKOOo0SxbCc2kDoOKRx+kESkwiKJxUClnAEkWbzlZyIz57QUfd7DgN68LJJrrRXGkY5IDtRiBPe0poaUVotwNchU9XQ6rXQgohEGcTW0HCQGQ904C6d0LmczWw+5aK2klTRzYqZSzrVWrSW7UT3lVRYRh2YGct58S4R1XbwW1yTEVtVAxct6SAZXK2ZWySkJgKoqKYV/IaVUStFSiGjNudZSz1tmyXlxrwxa8yJk521b8kFA5rCqrnCHas3LUouNeGVCy0hJoFVkTUnhBA+eNtVqxCOnN2gzw1wMqyAxYKa1AE5kicBpdXcKstEuU5r4GA5Hh7ObmauzcKD/qDuLvJFMcixgKBoMNb5elapJYiau5aza0HFLziKybZu6snCFmZWcczA8YY4JdCV4KHneXQApJTdUWc71DJCk5Ea1WJQVNTORbGZFt5yzm3LKOefGf+CkVswQRJ2n05FZiN20GpwY7r6VKhJaPncGUSciISaCsLiaKuBk7kq2pORGWl2YRJIwwM4iklIgqdTtrOd8s25Wf/Hrv7RZRSmc0nY81XXNNzfMXM8b3fr5fK61mgIZ3mZYkixEVEw5pZu7OxXadANADMnyULeV7CblCld3hT97uDdiIyhQ3baqvNVSawXlnKLKQ5wyIhnA8Xy/risju1MpZTudk1CWdLsevKo73d09ruZ3t7cuy9mELY59JiJzSmjroRS1KLMhYnQkERPb6lZrzb465w3spoX4ZKSlnF2fn46VnFehs6tu7kpOCSau5PWQVlVz8Ol0ymkFsQhD2Z1D9dCiESw9LAeqzkKrLL7Vu9sbEVmWg6qzWjH98OF5Oiw42wbbXNceoAjh40CtxmYMuEftayYSIQa5w1nSWI3jkEaHgEaoeaxSMxP4bBV4o3XNZSthlqFpzI3ixOBBCtS0Bzhxx6VH7qXpvBe4nMPQQSuU6tRKlMf7hDr3c3eEVjjDmRslnfcVwEJk5iBL3PC6VjdGamzsWptq5V63vbaQu/vmAQzIqQUbmbhZeuYEZElWlUINoWBmJgotVk9N6bGu0kWWfYkaswy0vEoRgYhu23h1G7pR5775swiNK8DJDNQoZV07TpUIROw+4AlN1ATMRBt1LsyJKEuCu5VKJE79JeYAuZE5WFJtFUE7uRYgeYGqgEQI3jwuzaBdUyiSXW13NHDmnqgN6kXO1MTTWDxmFlWdmPl2Wd3VtWqNkmUIJY7CMFZlUJs4dzcjEZRKGNz6ClMGiJuaKyLkDlMmsDBS9lK1lgadH9F1NRZhJteGLBBTmB6EmHay4pQS3FU1ZRmGaZs4h7vf5IXIzbSWTUTWdYXwtm28PBzIuJ7ZkJlWyp6oPLyT84LVnO4UiYxSRNYqaugIQHN3XqjzNBtaFoHxGwbL7cdee+fLXzm//3R95RWca8op/Ai9eDlzAzJeV8j8FVy/kjISe6t+mRTIy5u/xV+nyMaFTexIvL5ys74yfydCsbPdE17fsAYSATgAB+DGI6E3W31Ez/T56weuT89J0ln4xOl9ysJvHPLdIhmo7id4JiwYuNDGTQCET6LnRr2sHyEe5CqEzjTMXUXwl5I5zFs0vsZbAjvjsAynCIjBO80eXFW1uiuHkIq2hEekQ2zcI77Jcb5v2yacRVLAia1YraZKnohTboa+mZsiEvwTQGSm6ER6zCwpefMM7rAUbkmbUC21VocGR0BAH5io1kiVSiBqKXIQVU1pGcpP0CDE1Kmqe4iIxjbHTKo68+EtS6P2IG4VDuPgCpAsXIJtYyzH7l3SNMMhuOOgjNW8ogfNQBc+4/ny7hePmMx4SLxAVZkIIFU1txFUNDO4Moc2z/CoFWKmBnVVJWGmxMxkZKpa3eDWC7N2idPcWG2eVd3J4zB09iDBtMg37PAb6aEYd3cf9BLoiBrr0KmwD2ut23lj5pzWqOlcio5kHumYqNltxjmdTpuI3t08Wtf84YfPvvCFL/zEj//kenv72e/8zl//vd/3a77jM6++9trjV195/fXXn7z6yvLaXeKEQEtrARPIIFSefpgfP/7rf/Uv393c/Nv/9u997RNvbk8/BMnyOMeap+GzqKXWmtZDhAjv33//y1/+8s/9zN/5qZ/6qb/1t/6W2UMphUxZ6P7+Xmu5PSyHJW+1eFfEB/iYpoQKZiYVdArZEUImjLDM7mHtK6EVQdohSdYAObEq1Oq42buTxHpd0cnT7WP91KJazcxJ2ikYa1JEzPp+wO5Hp+48DZamIHgcGlXfGDurpLvHhBI1RSSIOnLaC0kP7753qB4mHJpfJvCMbUJ9EBqeGfumUG2A6jagkyu6uSqGb7i/N6CzIhIhR+mLL1ItbQez7WhJVY1iYxGxIWaYe9VQaf0SpRBpw31bEU3Rj+FzGk0V8IsHZwxBpLrTy66YOJ3yc0TEbAPIGgjZWqJFo1RuIIcODrHhPaXJaJxHGy/zT0eP+oq7gNu96F6WVnBWbQITjgTU0ZFhHoxF1fK5O6HzDH0MF0bYw8Oi4NT8/T4llbVe9DyxWZy2+e2LaLy3OwIu4gwYIt6dmXPO2klltNdcQof9c6dRzTkfDoe7u7tHjx6hU5WO8VRVYjIf66R9PlZ1I7bpykEbgQ2upupRgdcs8v02ogHVbjBXcwqhCu/sO0G/A4qHj1lmZuqB7uhLsRIR1CWXYJgaCYQpJZ6iTGMvx9imlMJFWkqxQT3dcfne0znG6pqEjF8tiXa/6SguPO68nt9piyRJV0s3erQsi418ih56ArAucc7uq8tbNSbgYlM3PDPx3ux5y4/c0Xkx0wiO9eBhMHTxFLKOO72jNH2UQh7/xa/DoriILPYKuvEWAG4XKNCORIhxIzPJGf2oaPfEWy5xFuMHH+szDKH+LjO7CKu4+wScvuiCOxOZdqDpNOnTW9zMuG+95E4z2rYHDIlavfv5vehyY19j0592acZRNWInxblcezYsMR7a5RTu8CHBYsSYJXqnhUGj8IOZBS8Apn0BYAQeZc/OuDiXt22LnW4d0cAdONZ3xMUpYBPMHnNd7LEp3GEAVavFWG07UyqUTOC2T5x3q29kndG3juyZGbN8/OMff+ebb7/99tvf/vgxsC+EX+bL/+RePuSzizBzorSu6+3t7XZpAhC9WE3zV33RZbr19Z+iQe4959DQJQIAEIe4jmqG6ITAiXOUiED/eqzdWYgR7Tks4XZXVY8FjM51V8/YN91en2ZvcNcP7SKdh65uGyeXw5uvJETopa7UJC125phxzZba1Sj5VIcWQcDBHZXtNn+F9jyXJv3mfR2i7FrRCaVtJEeGOayqZM67WbDLiLgGQZxPzHhoxgCJSHjg+lvcOuCVmeHcSI/dyGBwNjIzMAGRX+A9tMwwB3FUI2hMJ+QwGFlIPiJjonAHQOFeOZY0EU8sgrMgoy7BzVr+w9ALvfOC9DuDWMhnhWB+ZoCYa7XTaSPwVps1881vvvX+Bx/89N/5GU6yHm4/9omPf/azn/nkpz+ldPvmm29+93d99tOf/tTjR7f57gYMbCU/PvzpP/pDP/zDP/z7f/+/852f+254We5uEC5e65kAQcJzLuV8/u/+6l/92i/+g5//+Z//4i/8wte//vXj8aiq2/F00odlWVISrZuWEqng51LLthH7IBI0M2cit6uVgB6DVlWWa1kwHBjzCMSMtHXmGpWqmFnhbE1BcXezXdNyd3ayy/x+u8xOnN4rRCKSW2CKSCiNY1JE9AXHwYuPHe0fKnIc0pmlcMMHRu8Yu9qUUvJwlEwbGV3RNLNeNGW/Ago1lnrHJO9YtTj6aUpqnbs8VteuzfO19Iz7qRkPF8XWQoTNEmR0f3+FT+I1BMrOJhW7qyEuvCuX4xX7cb7PTrgjd/1pTDHN9hsQA+LT7Ezb8Dq3at5lHVW7Q3mFSCeVd76//0y973u054WrzYteVhf0YV13S284j2rPeRsTx6lZbDHv3AtCbNvWQHFJxgDOA9KtIx1/9eBumUgymRkkVzuibQGRKFQYNTmHcjbKXehUhLPZAC/kQEY3SymllKjzHiw4Y+V4L/+AMX2Ng7QOCtO27bs91pz5ZuqqqmYEb8yoOeebm5vTcXPfNHzBNMo0hdScf7WxWuet3RwH2Bl7R18AEtnprCIyc7W/Av1KTU+NL2Jsz6HWv7CcLgzLi4VnjXi5hVPaOXJRpoI6GY9P7AV+eZpEztUY5/GKqKixmxM2Xs3uu/G1t5kvzum5/dKZaef2u/so0zzPMi4340AajUS1KyVlNH6Ikb7JeNgto8EXuxsXLRnrf4jQ6yfvXwwp1snPu3Ua7+IoKYFmHaF3c+74mBVMb/RL1O6OnzSzyUxqFKzYO9IWxmVKRTeDG8q9DxYABLCTLiSTRW3A7toOPZII8H5goCqGzda3CRF1wROPsbAPAUinxekVa6MMxpx8EZHe9ihHS8gcG2fceTHsQ5fb/URg3qVx3BPJILXWCK0vy7I5ARCnVugeDquwWrezpDNLpciQ8a7c9WV0aRO+5NrRg+6PXn/91Vdfffvtt7/9M5/BktF0y/8/tAb/EdhnAIDZd+mNziTltBYSIumj+/Jg5v8Pozp0pC4SrG+myCokh4dt02GosYNiC19oCGjcNuE2qmMvG8GZEu058+6I9GY4RCQCz0O4xFoz3bUdAITIe4R16H6ASOOvzQfkNA/dVTfjwEJ/k+2Jf7uOBKeRxzR2lpnFIbuP82TRtdSPS4i+mVXdZmVpfNdaJcXrRu6F6dmb+ThrS23zR5YUhdN3TzgcN8xdGvoBd8i4CJsJyHm/3HRKmp8WsaTFSonnIIgmKZG4uqMVSJPew2S1AkbOxM6chOAwspB5ElXP3CLp3ENeRcOmE253blELbDaHdNyzLEtw59S6dKHGOedt29C3SrfFGcD9/ZGZc05EVLZyOp3hvK4LM5/P54fzSXJ6OB0/+PC9v/t3f9bdP/mp73h4eG5eX33y+Lu/87Of//znXn3l8cPDwze+9vUf+7Efy+vys3/7b61CRJyWXIttRKfT6cP33n/77bfffvvtd95559133nn69OmHH354un/eLNhOzVprRebT6bSVk5Z6WNKTR3eSVyIYnBSQETy+tmzHmMR1Pp9j9HdK8W4Q7kstKjR0uU2dHwX7yfFyvSFWOcdBw+2taJ7UJJSMgscJbkHNClO4UYSRc0ruvm1bKWXJCxPIwa3AlLoZObt5q8sED690YgFf5D5Zjw+P8F3rqfe+dJAqM6uZwLY4KbvTl8FjW1qLuqTZjTLGatckJtD5vM/71T4JWZBSI3oefx5+o/HGtrHDc8wN/OOh5ZgFVvDicsalaJ939BzpopGZDZThfn6ZA/tqfn2Ksrbn9xuGtJlFyosr5KMEzjxi4+feq1YgpHM/NAV0PKqJHmrma/fqxeJVdwsMWrjV4htmoU9STJHp1KM2oRRmlRvUa/cQIQLLIiLrisZsXIbVJCIiaQcvjKJ/dt3TcYWlh77Lcs7rug6DUPZcaK09mDBWoDRsXkvEHUs9cvyeP39+yMubb74ZcYD4q4i4XQyydatv9D0kf1ieWi0EvlBmZhFQeFIcndPIAzm2rmsxJ42aUUTTXvAm80FEqqED90BKp31ycgEiTXdZVhGjIJVhjjL3jUHn0gBD2KjVnGt8uCzLKCGAaX+NI/NiQ/ZI8vjr8F6xN9svxiGSOW2qzznWdbO4bM+lnA/WGPlhl44/lc2aD+dSYrds4jE+ZC1m7xfX6Iv0cil4wRyl7gi+WtsXHyKENTFP8qQTmYzhilsv5NtkpMU1Dpr+luvkvXnorFPUDFl3tfGHlIjI6egALQuCWbjbitQtQO8evRmjwq3+mNOO0nMzi+bS1cVcti0IxK9adfXrNFQvEWLoDjtyZzR4d18bwIU93NlZqmLyq7p7hDV56fBR7ZnrcVKkYAELnJ2NshNEF+y9Y/o0io66B9p89FhE0ItOz3I75zStNJu/EsKwj33rl6pHxTkhilw0YiKyrdzTdsPLDVIiAii9kC/IPLFTxid48RJBVbC89tpr77///vHh4WZ9tWzndFhfcvM/oddLxu10Ork7A+GmL9XWEAsNm0BotVheTnf/y17UUNTXqkJnKuNhCobNz61cB1Fj6e91q+GAujPcI6SBLuvh5k7o9ai7KeUU9KVhlNlw0AROPx7pjT0lgOtdClH3pLi7DbglE3sai9C9LVO65PpS1dnVNU6Wge7pxuSsd+2gyyHzh6y+lNg7IAjNCusAiiDN6bbfSybiUr9qmCWeOFeiw9xLMpgZwXnXiS8c2+Oh4fIJ9XF0svv7d8Np/DDM3/C1je4RCTfLQJqy5LVWJTAxDMNfLhG6i2dwELcTm9VwGzALs5v65MzYB3Scsj23FZiyPkJSXSzTPnPzURfCtpRSioa8C926bKrVbaFgZrp/uP/ww2fq5gZKElKUk+R1Wdf1F7/4dx8/fnx7e/PhO9/8i1/5hb/xV/7bx48fJRGzesicGP/h/+3/+sprr6/Lzd3dHYTf+ub71LW3yKKJlqSUtm2LqmtWS4BaVfWZbYAnYklcze6PDyC/PaxEpKZWQL2aM5zhcK+zL1Agvb+D18j7D2Zm6AVYRGTTytOgtfHsGg8z49JNHrWh5+EFQOAkMjgGNLLXiBKSRtIFnDmZqhpiuSrczGqxWkxYifag+aAD8d3qu9gYY+2FA6bWCjWaahu2dasNtZVTirOzGb3W3MOzacR99VAH+6kqs/btYLVWEnYjVY0s5CE+hmY5yZemPTctv4cZqXv6Z8XOrHGZdFqtbjMQRWGoedvycJnPgY5G09DbEBu5eaB3ginp2h56SsfY8mOPMDM782UYeV4AYyvtPwBuhiAm6f/F58N5iBj3bgDLtNhG9nY4L6KXo2utmIfbVXtC7NDEzjLQ4GNtv8RN1rQcAOCpdsVYSOZaa61awxqMx3LwMZp5l4phsKlqMTUz5tTSbqntC5niSNZ1rCHHxuUdJ9kmNwaEmg/ywiDvozrUslmkR5BQVZk7hhMQkdqIRlNfaUa7l0RSWtwtDELew7y6shBRInKn5liRRrpDk6kDxJm3PzYQ6GYay3lMYht878aJe9RbG1Krny/ZipZSarEZtbeP10TkG+LUCOOYGPuOLs/1cf/w1I4xbA/vccjo4It2C3ARIUQguLsXY6z4w7KOE7kRfhOhMyRTV556QGmnNR6Ls70IerXX4oZLodHMUe6xphBxAGDmE3vTGBAfHhZ6yeDMzXDbT0wigupVI2NE8BH6yljJrUmXgnGWHu0T3vt48Ux3XCB9plPp0gk1xmce0mGbJRG8EDJtVKOXzSYivsxJGyNz9cnlxMU+JbcLNX1IgH02QcR7kaNAsiSWCMWX85knyjEaCrFWYgYaWBTD1zXKYJjRJZCkb6uLdqrGyUxjMGPBJ9/3xTDzY8y1MyHP8so8c0uoJEBBRm7iKrV4PXo5Ulqb1UfXjstfLt1u3EVWttc/9sbXv/71d95559e89mp8/CLR5z+Bl9FVPcZ2aalN5VgW00TVJS1EJJJbiYU4R170dPxq3h5x4Rc/pmZzvsjTY50fJf5jR4nlETLJ4dRAE0bN88yjuiCorTF0sHA7Sc2IOj9qlxtOBicIw5108rkYAZ5S4v5YcrhT2B2YotitiwBRg+gH3rW5xEfSV9sO8zFxPapDgM9H/CTVfZYM6OZlU+zb3btiOZ1f13ixFGILk2aMS3FgZmmW8rgWYV1A1O7q3v2jqiqXitR4+FAFxkuJCISuwSfJS87Z1N3rzJJnZkwpSC9UtT3eHUbODo2qIWBhgHsN4gEpbNHOuTGBERr5PH1kBVAiOp2PAWQidquh7zWqn/l8HZPBtLgVVau1psSS0+H2Roq+++67RsjInNiMyrmWci6n4+NHd3o+fnh6BlMRWjjX0/396XR7e5uInn3w4RuvPXl4eH4+PpxPz58/f7i9e4VAZkZabOD1icp2rqUoEZy2bRsIN4rax0xRJ2PbNiYkJpHktaoZKUuiTrrrKS1WDJ08WrptllLijnIZg1lrTWIAqCttoeAAIPLw9TlZxF72Y+zC853cL5Y4MwvPXGS+x3UAYmHAhUSE4oRX9+YEkswopaTLmBt3ADA1jNxFqGrahPtUCvZlOSyciDg1JGGE6SZRNjYndxgqNcjZyKnY3fztODS1KTIwb5Nd1Rh7sId9OnZ6KJ08Ojgrpg365kYeldCJmBxNYb1+3WQijL8G41yUGPGhe2snpSK2blnIJFCoq2j7sAsPB9BUx7KPw6TwzftoVpXmpl5bwrsi1RswzQsNO60/WUTChu+xJhlLHd2BdTU4Vz8PfZGZE7GOovbUJJh3os49iuX7lNWyAUh5GRp5rfVUtlOJ+uwq0saIWYC9ZO2QNmOch0IfEKzgIYwwHUcB2B4F4o6NZxHuH/olZrWrfTZ8FofDEvLcphBE8DHEiTNPdPMcYl/hbdBabiChb0BhG+GpRJmImlfYWlZXU4h7ZU0iYgos9O7p3PdUSmRFVbetnmu52XMm2b16VdNxTgv2Lc/DwxLjoKqRqD9vOncP0tEX1+FcL+5S/rd7uGeHEpGqjxzIvkFGnuS+PuejZBxS88OJiAOV3s9nIhoA9ti+tBtvFa3+Swu7tO90lScqRXR1nTmYfjtLATpJjGnUG5tZQL2/emfcnZuNrgC4T3VN2xa4YiN0nwotUMTtXpDMRD07qH1jYuKd70ETRhT6wHgIEbqDb37yvNPn2ZkfOORq24kdSLyrCt6me3Qz6uuMPU/djqXmloK7U6N4bD0KSRhtDdlECBJ9ai74uTB9j3O6QYjV1K3R/KArMRSl2C7Dv2Rx+hnC74ZWkdmDr0/N3aV7tYZ8wOSuRfcg1FoHsG24/Jg5AgPTim0AubHCbaLnrbVykl4RDYBDFSiOEyXy8mDnlfMBiYDFAQO/zAQcpgJeNBG1Vgn0/s1N8Dx/6uEhP7p9mR3yP15ArGcytTLORHgiqiLSSjghRYxuip1j8g//Kl93GSpsgTdHZ5W2TosQka5mU7m3asREYjAHN1cykTlxc/ZZ6AdmNqoLdsoR2+OIwUVF3gsSOQ/zRAhRhgo+neauasuag/YkBIrHQ81S4rHUJ7tul5De9IHG9zv2whyBd6Na6+DwC/Xmwu04QUkBuBtPjnh07Wj2WvaDtW3kjnBpPvThTk3cMOSdSrh3e1juQ0h6sEhNaVKYxM1g644WDOMq7eQZs127jxNzy1xrzlADwFGXuZfMcbNWd3IsI+6QCYYMVQaIAm6IWSelRowfMssIEwnEuKjpXhj6UCjftbqqq+q6po78aTA5VY1CAjHoYhKGjZmdTueU0u3tQSSrainb6bSVUl55/bXnz5+fz2c1O9wsxBz8CuXZA4BlTYnIqtat3L7y6PGjWzOLStbf+NrXP/Vtn3733fcT8badyMipwdLavBLcqZRSO1qpamURFnb1stVt2+C2bemwLEvmlDOnzARKQsW9BRv3qKmIRKwXPcPTcRG2YhLu9PeqxV2oh9t9aJYEN6M+wkS5L4J+UOya/c7KPZY7R7gSFhR5aDEE75uNOq39nrkqYGNUrXPsYmyPKw8K9SjQknPpVWJEGk0UtZN4zwEbQTkJbDuRoB2HYuaTo3ruHfXwgnTG8Oyec1b1c9mmLcDzN8cpKyLWBdlYnOEoHas3yP2m3rVfu59kpEF3tWw2YntYYrj6hi9tjFKLbo073SF9AAeMYQZ+mKubmcmlSjf2l3tUiCUAmBOJL3WXIWEmiebuvgfryKgfRETNZQD3CJvPXRhLN4aUqo7t0zIYfc8JHarerA/ZlFI4JKG7Ry6S9YSEMYUtCc1JRFALEaW0xHprTZLAbfbScMSDxgZMBNEegRkNaG/vOYQXoNBatV/UXe/Vbbn0U0xUlbsMvEo4lLTPe0op5yw9yVZVlZUS5qnxl7DaSBhvRJJSwuYREWeylNLhcGiEKHEoT1A09OpJfXhBvTB97DCidigRyaCXYmbzPXMmgsA8FV5nvjg1Aoh7ZWZTOA2nmu99Thu44GophkI8RnJeb2MzzsVImh95Mub7vnS3oCGVedVRR9zMIiV+7Rnso/0y2DtiGY/tA8Ddhhk895eZDfsn4/k+BVplCkm9oFv4sNXHuNBkm40dDVxUk78SCEQ0+Ne5J/5RB9OO/dj2r+5wmL5gmny7ePIAgLQx6XofgJexf80zO1/74AwEkzsTjXS7qGxI/cijiAe+6DTH3trxWA/B1X8ekzv+6u4gY2JQo++ZXXXzhMpFQqnFwuoSMvhXPVheHdgRHmYIo5o5EOyBrg/3OQAKHdqMLnM4RwOYOdSz+RNmLnUbYrPPERG1shM0FVKjAHRYrAEyNnYfGno23cpRZZFyoJSH1Ue+s3rO0dfLAu4XUwyAUgLwxpsf+/DZ0/c+/ODjj25fOun/ZF82BeKgqqFFp2YokUgmCEEckQcUkcUp3vMP8e4RKuyF7M17lcJewr6BSPtbmmMl4mqBdmIiwIJFgeDECXCQcyTjujt82v8RrgBMwYRewn6X4zEO1Vs1YOrZt0xQAOy0YwbBxC/WiZx2x4jIjcLLkXtn5LMvqYniF7SmEcobIbddlwDcESWIhzAZMmGcdP1fuohjAAFoG+1Mfnm1rlDj/aOWsddapqrWXZuz8HL3w+HmfD6PnJPxqBef3wcRvf9gZmupy0Fzb1YNXgnsTC1AR01WeVfTk1li6WqWRaWyFnBwLqVgOl/38TX0Cou7YzK4KN1bAsysJDE3uqHQ2FJK0kNAfdC5TxtUNedGRlrUiYSSpHWB8MPDw7Ztpdao15yYk0jOUktjAoRbKSUxMfOjR0+27XRzc/dd3/Xk137u8z/90z8Dlrffe/fu7rHWYmbVzd2Fs8FN7VwLEQWRn1bfaiuvt22b53Rzc0NEcCuqiVOL6oiY5SDZsc5hSCTDdgoTt60qj1rVFEU/uTuUAZgGUnZOY9DqHN/S6kI0JzVdqTjeFEpvR5ijZ/o1Q2bWutwpklQmbU+Yyd2tNKU4DKSxUYkoNN1gU/TdHx4MMTHybqqemuXWCYksGQ+EZN+WSDkHV63ChU2IlWjQGLo7T6d1bJnYwGPcwjjcapkHY6zPEWZvUaaeuDXyWqlbs03jnPIx5iFNOSHcb+7tBjdTTS4X7+u9i8KV4F0puVLdduEQ2iDt3+0hB3g4n83CSIhh5A423iHoTefY0eNzS8aLXvwVPZzYa9xbD0k5MCmncTbRFSZot7pbM4i8izZcBDFawXTp+ZMDaRbRcuvsnb3uEfrMo69wDkkYL+prNZnVnFeazgAiTikvy8LMwtm4DC05DDyRXaEXEe6Lf59rt9l85ZycSeHSXz0umy5MVq5PpkjYbHd3d9ZdjERUO8tIrTU8c0NNt06iGwbSWCre68R6Va2uaiSeUlrXNSzn7Vy3skXer6q6ixlgzQBTVXdKqSEku/0Ti4F9Mr63Ush8GIix+GqtterYTdEkcjAn1bNOV590Fmmqhu3YuD2z2i+DtD4f3tNFRNrfO3iqrzIt+9R3/wgujvC+VHhEIMfn1iKWc/3VkU7PZkaXzuOxX4aPA1MQj5P4ZYgyBiRK7DalpGexICdc1lYeLxCJsgpdRRjOFxnOjotNSMQXH7X4WRPXGKEib3hEmcykaX/tEzF3lojQ7Y3x4Vj84T6YP5zHZ556dxfJ4fjzqcdEZFXRw2vz7Ez123fNB+g11EYbu2ct/ouiq3scweFePZAjvlPpEpOVenl0jnc5AdKrILZdTDxYrC48I9ZsVjOHERG3agsKCEtEYFgACJNa9S5bhlODuiNVRNR339mYwXHYXU4TRi5DeJ/7uJHWChiTW2BPwKDQWSspvBypHLHcQsThBpGpzMMIsQ5Bb5c2IQGUBA4wodSbV155/Pjx/f39IJX5Hy/vKMy4qJvZidhDX/IG5kpLppTA0q1Bnrk/Iprzj65dQbPXK9d74D7DJiSHxiFJZFEgazioiRAEY6olAa1QYWTMRGM9ETRyfpr/LEKSGuddl06qLQ2hk7F5aj4v6uzBZj7cNMwMAbtbvSCRGj+LdJylJXCr4MCdIrWN4S469jjB7LukieRlPpIQOR09gV9E4nwZRAPjTmYGJIq3TQ8R5gi9IjHztm3rujIz96BHTqm4WqvO5ADJxFMHIBjtzufzMJnCGnT3CJ01QHlKFIzeE3BORM7bFs7xWitgoCpJRKh6TcjVKyes6yoi51pKKUOJUVXOaV3hvQaUltYGSmvOi5mdj6dAT5ETszAnkACmVqqWJAQgpwVOpW6YcA7MKbSHXpMwcIkU/DHLcpBEtVbtjIXMfH9/7+7gQV5HTii1ai03NzeHw6qq27bV8EsDh8PBW4xIzex43NjFHXrciD1zMtA7777//nsfsuD27vGyLM+Pp6JWiqZl3WohH5WkUL1YKR6oaTNzr8GEwaRRIAVuZhsArcJ0WLLBT2U72Prs/gzTsNbipGy0FqBK21hVTVOEpBR4v6hZxe6uVWutj548Dn1i023btrwsIlJUg4zHTZtBAmUHIApyEvcSuNZgy+1nlfkENm6KC5aUFlU1Qwqqnlozi3alqsmPfgLZXmJbt21zNDb8dV3P5/O862LST6eTmeWcE0vtGlKcakEddD6fD4fDsixDe/aqDEpJahx+IIkFY54mQGNKiVrQu2nP2mFLQd6YuuFXu7Klqg6UUg7rqqqllEQcCyaManSXTynlfD6PhTrkwkhE7gU5LIuklIQisMKsF86aJn0cIyKxPyTICbyxaA1dJxTHm5ubotWscaWE5Kq1StpJdEJINVMcplARWZYlDAiWpKpJJLbSEGcRmwqlYVmWeIKNBDat3M2BmGjuZLB5ET2RmQdJ5rqu8UVErTlmFgmVr5YQR8IigUcYaX5Dsxk1lK+M7XEDM0ceOu8BN049QS6m29SjlkMMkYgkInc/HA63t7cppYjaRd/dXXI6nU5JFu6jp6qnMvajB6g+1kDYYIe8jDtjCqIjtVZCyXll5jqKFqzrs2fPQlmMO7dtOxwOwyyMh4R4J6Kbm5uHh4d1uVXV29vb08OJFKOIQtwZJsSIcZ3P5xY5b2iFLaWUBUtu9RXWdSXzdV23cnr//ffv7+9N6bidMVJT3M/n87relFJAEmEuIpr5QX2QPwsTYGZbLfErM2v1ZVnqqdnk2+l8s95Ff6O1y7IMvbZWazWqOvAydKBQckL4UyfQQvfsBGbkRZtk5FjGaTgO43F+dx26eeWK/X/J+9MtWbIdPQz8AGxzjziZN2tisbgoSr36/Z+oB62mSElkFetOmXlOuJttAP0Dg23zOLcosoqtatFW3nMjPMzN9oCNGR+StpvA4uT2h+2MkEpgCN2+X52R4WhK0uEaSUSZ1QvQR5UKKWHVNmJSITJTKymztaFNQ34DOFFG194GbdvFYWCmF92FI+8+VS4zc8uYLs4w4JJXX5kjPc5md7xErmixAM83FphE3H1auTiRQvt09+aeqwFyd6vFT35YqWbnjquBmUqxo4UX54EybZqJ0ujYsHzswrjz/4GxbaBKbVvihzWvC/deFTtJJHvqGTEockq7k6Q4WIS3Lfxe9u0jh91R9uNoI9DdRbjYb5YCHcexGnWrFR0OLGYGn/Kidme0errGaZnBxMRwTAUJmMyAA07bTYzm8fxlyEY/CONumPD7WQP9D9p01dKdI2vUzFjwr//1v/5//L//X7/93e/+6q//BeAGOwM7EVt+dZb+X/8yt8CPN7NqSGLupqpvb3efT58OFlPg/Z0hwACIIFX29v2N+H4K75++Mne0qBsgd4PHmeUyXTtO6NFx24ghDCfKdhQGRJ5I+QpiVyvjGp5WQW50o8ZtW6xFqtLtzi7eMg91V2GEbWxT80S76dKkqlg9tTMFZUrEgTbTMFy1+IBWX4M1T2SMAZCfNTLUOlUsUPQlajdQ6LHuXmhkqRKH9vVyVPma9nKRX9IpndeopSQmdag7sbXpg2+NpF3mququMaVwucX8931/u99zmXByh9CZRFqNUHMj8unzhC43Q/U2jFeIQESEArFgmk+Ws7bBXfWIQBbGGAJSaCUquLBEUDW62iX8KY9wlF2t8CvmRPl2UwhFl9pa1pimLlgI3x7P0Nvc/Tiez2Pfs5z0Eu5oLml7JF4wuSvgamY4oH/913/1nPrH3/3+28fDDHJ/CwYupOEVjL11yjqQIxM4JEonPPBVqfsrahA/UVQczcbK7dUOGx5UDZTcVxQPd4+cM3edhWJaUJbcxDDGcKa4Zc6pkRNyyifdNnH3cNC1Hx0AEVSR3VEi+v/ilTenqZ4kwbL0aiNzC3uHAD2c1EHEkfXEQiwgYTEW5WipAPbkBUEVrWQwMh2XR+XmgZrmiUgqX9cJZB7KqO8HVzsBLGpiQy+0RwMVtTg003Tr1NjKU2JVF20h1zD+1je8HOakiNfktKBOgIxxiR6UqsHkn59THm5mZrbUNNLy6X9b/+mbEwss8gsWAusb4D7doQZBTTx4vUXELpKbAJR1piVc4gaKf70S1EN7422zCnS35dzlPbxAROQ4rZnY8Ci9iRRUSDvR3bx80IFF5Mh4DgEBUpgetGYRESdsEzGYQ3vuReTx+BBiM0zP2JpW/M3doxKo0h6ZqzHASgx8RRXqwxuWc5gTxb2Tfwab5eWCMJzJxCuAELb6VpdXMV5bODWFs1bhKlE4cj2inS6RVG1Gunr9PM45WVWd0/RQcwqjz93f39/f3t6jTVmcaoCJB/Mgckem3Z6tR0WomExI4lg+e6qqhi+uBznnvOe6Sgk+MZsAtm2Lwtaol22i7WzuJvIms8tJ7+PjZ3Z9na8U5z2MMAg9leMtcLLC+6aVRB2CdT3WbqSHrXHvHhicO0LbpxdlVXJxjPiLRWIhn2x5GSd0zst7M+yf/TNO/alIq+HsqPBDkm1WdB2lWrg7VB2zD0tSJqd8r/ee4d8wMqnSLq5Leq7AOjtfPO69EVihsK+rZy0O6kMgHWR57nDp0ee1v+wAXMmp4QF7hFQt7cPRidBmk5kQaLCYJgpNsf0M2stgoIS6G9wDOgZrluzyb235VSV3DM42jFTcIAOAajCHTatGFIMYzBkJ+RSJTbBFz8E3tc85CSccdy9d/DXhl6m5Yhe7vhbwux2WfyKPrfeAP5u2wyZMbZJsQ3D/aXTlkFfV2qfrJUhocB7dNh3jfvvpp58ikgFhzqyy7+Oj/Pdw2UI6lLVBQMWZq5ArjRwogMA5/1OZooZK3/3HXQQkcmR90pWifBqbHvc4hXUIJQjQbeudEm+GqLAFSEacphq3IFy4x5H1MuEEi0GUW3C5zjygFAcVzcs/cyQ+X7LE+xQgDmPefLqlQkbk2TFzj1IdWU5WHq6AL2kDr/JcLqojgEjHIqIxbivu13n6rAHJci5xnYhP1v7C4jUhyDO+oWUw4LRiufzEx3EQ+e12WzNZqcIU9YqTp4vIvkQzPV1xZhSw7aZwNt0sIOzITJ77PsYInyOZ2zGtfKXRDNvVpqXjaoyByk/LtyS30qmzk4WC0Rm0lQZaXI9JL4VHUuATcJutiaY6qGGIhscxzP0hg5xgZk7Gshnc4OnlNiJnIWYmvW0A2I0YDCd2s+mwX759uLuaE4khcGRTU3ZASzk0dw3R4WTpoo1m3gidg8+we2wrwRnmLGwkcbQ8awzI4DxOG2YlFzMTSaDh43j4GYg7k0ZCrnoWjheseUKalICvE8JEwlt6BGYiE8K4lP50DBNXQma0xO4ufJodjUPONWm1Ll5OlbMEqw/MKvOkKrvQWZ1IAVYuUhnEUDO3/GsOrojEfaret0GL7iUiI+Kufob4m6pW0c6ZmDeb5azk59er13AxOc65xDf65yTXxH8Dy8X92Yc0zLxIhVq5RngDzI09cYvLHAIqRPBilL6oLOvn3qA+C12JyEY0zcmcLLtiTlC0PGVH0BQ7BCSgQWxkIiIyPcM4p23g7uGWYma4uqK0EBGxWJpWkWHqWR2hJJeCz9KETi8PVxJBUyZXSI15BCkEFdE80ZKI0tILhnOmSyT0S7BN2se4zQlnTZSPKlM0i/JaL3jk4ziIT4iLtAPh8dhIi2hnzbkmBBGJFvNpiaFarlVxbPysqs/n89u3b7/++uv9fl+fg8qYZT47kdiJiHv262vz1QoPKjiMiWe02W3OyLrp5NtNxEjEjcyeMc7jOPZpxJs7Edv9foKymBnqlE13EhYeIsKy0Qk/G0ZpwGkN2Lhvm4D2/dnxmXBhrSeLsgfncgoWJ+l6Hi/W13JUFxGO/uJa6oxFwAPYajHzT8Wdtup7dB6rOOrLe+l6nF/4xsvgaZH67t5m1+eJvDycKHsoY4lb9l/503j6T566xOm2MDMZZ6L4GAMcrr9Lgnp/vQIQl5nGTq1sZH0jcSeHZYwUtSxegC69SsD56ssrzgbrHMG6k1dXTeTFBHN3uxjq61DPu9LlSiTCyBZ5/XZvrXFRH/NR5P2+/lPfcNJJuXjRkc8CuSLAKwTj7mROntDx+BOOgDCS9wVOZpmrqeqQi6Oh92LxVbVF3zJdAtbilBF2EDPAWUHBLg4WH8yuavZQd2MByUaE2w8IQUncqY7fzVPsmZjZYIEjzcIx/vJf/NVvf//7j+fj9vYW4BFmJvSPt2H+//hytMEUYnJ6lnI0FrRs2wZeSwcJZxeh8/onTBtFkDewvISXN8SHmVxKlHBZDAPInCTqfz3xQ4vzmbuj3FjVOJTbffzqHBdmWrxmSdsRCminDAMeWupxnIqciARGDnE4ryMKY1xIyFg8rTm4DCNecCVwsrETpQWlAzRrWjlhaA5mJrLVE0rH7ss5WjqvC57dDGN0q/riOQS4O87ut1gHGt8NFeT9/X4qHGUTVkIgAWyf+sAiNXhxcLQQYObDprpigddjZmYFGci65BQwMoWbM1vivW4ilNmPoIkG3onY7dznfugMq2ZdArcTQqAGFvCkUS8nHUEK/uwFI7GO0N0BckfEapkzktPSOKau8OzqQRQd4bao3YJFk4oAEDULOFAJpYVdj1l0Q3AE4ArcXeGWLZeLpAJiLWw/ZMYR3EOop7N9DEKEQoSci8pDg4zT3lNLF6Dqhe+3EZgNqTnVBfMptJXK2J1aRquJy+4TETEN9aPPm5kxmCCRKtnemVVmE+Bu7tWLjJJyAGOQywaEqyLNeHePjOXV7L/Kra6hKkCXRfq2xhwWCzOTEwkbwOqNPbM2dOqTSZU13YVMfb4WG+aiyfUNlnk7+ad2YZx+6OQm+VJfjJllGOn0NXcOyq9kHjrPYG4SUGkzn64aYX7TS4dru8gLPRZLt7p1nfsU5OdFD+3rWpSGK0deqG69Mx8TYOtjsKTpKyJ0pmGsukgCebm1oUhckBum6MDjOqriUSe3XWnYEpis/OIL7nF/xd2tGuI1JegVCWaWNtbEMOc8Dj2Ow+4bFW7QnJOrrGjWFTGOThmV7ISuRGTh/7le0218T8OL50+1X3755ecvP/zN3/xNHxBbKkJLS18VdCHKSrn0phlFG2B3iCOUUnSVYyY5qplFiaxIQGPJdCUEz7sRxGDEYgYQF7hRou9kk0OR6XYjMPM27iLSNYTA200GEW0szEPVo4NWrtoR4qjqcnOb6HMEQyt3Xa4wod81jYgoWM1KsX6NrhR1LfG6q7XTUZe+f2VTq/69GORYN2s5/tG/MdgjEaX1BbRIz3OUXICI7GrXhS+AQMwZNA9ZozmAMQau5LQekM+fdET9hYo6x7XL8fLzxfi5kOxyD4otuGcvS2Z+ce2vzPbyqHCZlf3j7pHLlH5ApFnDV51hHTkWgsnPvT50j5zS9NRGkJAIEXhElDLFzUTMZAaf5dZto5MACn8yzt9Pv8VZcn/OCdCI1bwuHemJDpyaZqmVvSm9FFjI6WUX1iSFleT6tvqVrisHZIOZ2l/bQcOMjYXA5JluOkTEDrgxqc6P/cEQ3gSg3yB93OSuRuypE6Zu8HJ5WBRmJBLS7Yeffvr511+/fftGIuFmygl+X/T9d3B92iGYzbmPCvCGhL29fcHtXtZg+Glfv/iPtwbzAC17QRCQXY/zWXoXYKdOWr4n19PMizvNwNJfd3YzB/PShoGiHjuiI4V4GjQ9mEHEpYYVzTMjK2yBaN1BZqe2huS2gys3ldJcUneYIhLQUfzf3cO0SbWQaIyxJuidM/8EyS5Lc/ic4qIKLsJIGjzGm6k6BePrO0ek1eJ6ntcTzpzpeVo1VC/ZrlRab+SIR7HT4/Fg5ixNZPTc4hWhmtfXl4AGk84Ya6ydrf5pd1c7IuQqg2i6mbKL6mEGvjHzNihzOLXgYUTIEfXPSuRvb+9RQNJV+znrBX2+VzBAVty9bM7mpQ0pLmOkXtv2YfRC5CEywsAzxaRRnvgwPFi2cRvEB1T1iPry6LfgTA7b3u6Px+Pj8TQzIrltohY1Zi3quJVSq+ifu6trrvYavfFMsciAHQ+3CSAypAeFcsyjcPqbZYch16IuPuzm5nNO3phl8Ni6ZibACb99+1ZvdnPzdmS2D6NMa9MqEelTl4tvAJyMOCBnhEX6W9u2tS1EBRrhpX6ZGZ9Q7K96QJ+HleZbb+tfVwdMnwvVrOhTt+cz638kysaqaUpq/wkwd0GcbwKLjLjWjOMiOiG/67CsHoemzIzDtDZ50eFKxWNmyh6BJ4M8p5OqS6aBpSpf+kzDjZ4pkXFKSsj76Z5I+zDvJ1YYEQlICWGXn+4Ad48s3NBG1ALGpjeIv58H+2rVxBIxxDhhIbZb1nTpPFQHV3XfuvWr0pNaI4u7T9PGPKh+QSi1Mlc1WNwL2y16A/OobFmYgdnnnI2fyRywbKlAq0bLdLNcGCYeJDHUaMd7AW+kSk0P8mk/30rXwYSjPX3coGZ8lurUMVmgRgFE5kUHFaMKLdj7ly9fgk/2ZLMcdJP14KwHMCZoCpJTIPnRHL6Wwsnd93yvE7LTsVXiaxNwHTo3dyJnGoRylMRa1ZoAfZQQVPTcZ/h9Y+Tzke7LITfhjcom9yrFDDAwu8adiCi6JKyb3qzgu26C1YO7XHy73V7IL/6wtq9AnayV4F/WeX1oH0MsjNquXZ1WUvn8oovLQzLJ8MJqYtbtbvyUCmUv823PVHfwSPsHUgyNihWENRIpo99btMuCv6zP+pV1xSLSxcwv/L9TfD+v4ctb4t8tGtmXAw414E4Oehl0Ssmy/fu9CMDnBcwm5XRkVLdnoV9BuLwUqTNY9f377lr1yCv31aNNxXem/OnrMc5gCGkKFvX115m5QwtNbyt1NW/s89sU6Ipw31wpJ2tWmALt+UZEmknbcLhNI2AwIGA3O77ZN3Yo/fAvwNGDhKnCNATBn7gC92uabsHxjoPvtx9//PGXj6/HcWz3GxVefYzsO9bRf1dXEICqqt4TVCaPW1RNlzVYlLlc/4SxQVr7kp5J8mt48PzZHQiI31ThQhMkBxHS3DKkkUiCtRMebPpU9UiJR0FInU6Q+NeKKKgM1hCm1A2l6oaotihvvrW6crt3/14FoiEWE3mtKno93RPIJ7QEJLfhF0HT3DjO2YvUaCa5MvMIUa6fuLtrdueKA3siQMQVuDIi4jOiD7SGPlD8K9SayLMPLcQsczhb/c2iyaX4Hlc+Ygn+Ti9DXKddDNA5G40kU2z+Ev8KPDLK2kPs3mGi8O4lZuAaq2nr3N25SttfGO6cu2rHD9OfHXAjASvKHGGGMpYGqeo0DDMJOcHkC0y8uwkNkXEfGzO7GUHIjAXbJttgYLjrMY+Nib+8u+HQ+dgnAUNoqgAeUWwjEodByUMaRfsjrmw/AkCSAso9MJnIFK6BT00OJ/Ngp72ezf1rL8R9EpF7QQgsBvCcM+LAsZKzcokDyTf3FGrXIPi6BShV0stbU7QedQVFyOnsW32QKJlrqqrHDKssCl9baBMRcCnEKmo0APex+dSOcJ6RkDrSHe529+M4bi6QsZ69nA7Ai57Qp6Z9Hyj7KlJto4ajI4F9BHqclfqY4etWQOMlcUit8xzapdIDCF5A+dcsj1pe1FeUXTeP9HB8cH7TCYYzBnYyAWGD92at3z2HcTIsXi29K1M730tLnPM74zQzs9UzHWux8pYuAb3dRkM2+HWQFyKMT7Lx5KXUc11Gq/T4Xt4XAgheGjHIuKeLA2nRjYjS4dVX7WQwB3a3oFUQjXFRzftQxDjD79YPb2swGkUUPgL1krr7XBBGXhhyP6dgKSisxNoOHmPcbjdyUt37Wz22RaM6SyyCSvdPhlNYejYzH1vEilGDSI7jW9p1jjkniOc0Ndzv700b8M5VhgwhmB7+nMe+72YsIvf7/fGxE1O2n7ITNEhkXXC0UGhNud8Swx609t9LU9DPviMnYb+Qx7prqMreVqC5/CBnrd3ih1oPi2cGbK6ear/o1MKxAAasX6QqH12PQD5KLp4Xd6cIhVWk7kql1MBXOceyCrAc5/NAES2GYhUi5finrJZncpvsfxjUtFLLS/TpHFKt4EqEZiaV0pLdGnp4dA1XXbkhFbPyNp+YI3cI1zO47vHl+Yio3WXdvBIx6NNBftmsdW5+QTRweOLuEhH5q4rSYJkUeBgtR2SJfZU16+6RpwrLpp/ImoUK85YB0CTkJyKovxB5+z6aw9Mnvl2Lf359WUkmAtOTSJQD4JqMCKAN/nzug30Thim7DzLXj/mh4/4gEfANIdVT+y/FYF3JDCRWxAcIRC4Gfvjhh8fc3f04jm3buKxu/Pd3hd7y8pGqRtWGJ8gKNIpCd/WNF6L7b3gREWD/2T0hCEj7GMO5gJ4sg/BgotLzL4YmAIQKY2YyBoGhE8je3SnUsoJjKlxwClNUcOhktouJZIESnGTlKBAKM0tHerLuM9CS6UsdRXE3SykT8qq5TcugPk2rTK+H2Ovw8maOnO31T6sdeUHSK2cWjzGOmQ1G4/ltPiVO8ZWp0aL2deYSEWkVLlvZCVHl4u4BV30VfnlyVz67ytEYTv8XmagwFUqeFEVwJeBTv3eoQIjAQiDSpR9OG4RmNsZtlTj+STt092qqu2CaBc8u3AhmFqHAdQ0VxytByNQNgGaGqrvHSm98CEBCY/D9bRvMYT7tXx+3bbvd3wH++vEw+2qGMcbXjyO84yXCWQCC7dGqiwfXlmWKIXslgCaRuZmqCrmbCcHJzYyr40MoCrjmIzU9+en2wJUWT7DvcAS0TM41pHMZ12216GbNY/XKxOfMHFFTKreGmXUwas3/bDVXVUmyI1ypSl7a3aln+KLzxeWV1R0P70SyHn8hEqlHW5SiDWYWdatSwMECvhgA/dhah6x92ufRp3r1bryYzZe1Lsq83HZhb4uGF4VnbpzqO2Lztb7exVIR+7NIvlheFY/XAs2PXWbmSBmlOrPxxgD1oJLH/mnYsVa0QFK1z77f1T9wAXU2SXgZ54F+icjqZlkVkTSSPTMzl+rf9LoVNuNJZrUayYsi/7nZ66nvfmJ362qvk+0xc8dvm9caQG4KFg74ljos7uUwaoeL10NWft2v6DxGL5DJzijOVEzOuO7L56vLI65usVNPm8dxHMfx66+/fvnyY5yFE3l8jF33dQWuxHdyxfjBLMj+ouJ3kLIMs0zIF2GggKmcTDEcLFEPDmSjERCYpZx8ZohKqIViqY4YcYZV49dt25hFdffKagFQEKlboIny4jz2VBROFXwlyM+UEH+9j1MRX8jjlOIrnWABj7lwiSvL9dWXqmsbdxSY3nfS9uqT1/G8EOo5C3eYQbKvSNsGPYY8a4sBDGTKKcpE/EwWPZLicuQVHCMiRH7iVb8513YxNV9WHsty9XKICDWCZVBjf2XJrseyRNpYd+1DifujFqhWxsP1luh/V+Mh78n/nW8M3hKRumL+8XwPw4wIXr0jz3khegWhfJ9ASOpXOszdjPwrP6/zTwtrbQ5wa1QM67pHAp8NG73k8qpiYlHhekcCw7wl0csN66uD9a1b1zcIuZfnLuDnDKQwGEYgUe1PYxnjTV19Ph7fvm7322DCiNDKn4wNJg0QsOCayjYAsMhvfvObfc5g+JL4Bc7yDz7t/+LXuUGBJCQiyzlNKVnXf8Pw4HoRLXTzvZcQiDCc1DEcByUwDhGE4MhsGwoQXSIqJJdL7OHyPoSMqbT2cMFk4hwVHkzw2zQC8wnC4uha4pJBLCensoqWOSXcRurYaUKe3vGg2Iv+ExddPeBIhUr7npep8JJPRIvW3ffU8zM9Z7RuZGYiJwrzGCM+EZHuNNAnvBULIopQ4dvbLd6977tVG+UXg5CWDLSxXXJkicjdVoNwvXxhbS8rosfu7lz4hwgUkDE8s5Iyi0gk254RD1/sgVfiWzS8loIiZ02RmVlCDjYcCGkVCDEz9CRcMzPPliMzEPANUSLWeicBTM5M2+BNREYwVvqX/+Kvvz0+9v0Bku02fuLfHDrd6eNh4e4zysRIZnYnCk0zlMjAfdWMQXmkscF8KR5K/SOWYoFmEz+TxNY7e5WsmnoHvISIND5DlR2H99/bjwBUY1D/nHZ+0Z5XAiPvEl64u5pRgpp01vUC4sI2QUTkBJFEvwj9I17SZ6DfGDGW3Z/uzktKc+iO7hCR3tbqVHnWOsJ8EG8y/Kwlc/fuCpJcQK4IHDjR+bQB+tou7UXolW8l/twyu3p3VpdousBWrtECOyFV2hyKTC2uDKZ8ftWp0PWqT5iEeTmAScF0GQAsm9evlNMHlsqAj6Zz7crqWTfrWImk17OxNDl3/8TkEGIRcT3h9VVfmwJRdjkninIyg4hsg1tnen9/7+CYL/qNLbHx9Zm9DkTkrob8Sth7zLzaX5EVLTLCIAwetW1b6CVJ/JYHM5gn88YiZly8SNoA9rT3DECAykQzkmm6cRrbEdhpjrdeSXJznqS7LJRWxSMzz8P2fSeITaNxoY3rOpx7bWbFUF8Nj9qFM50G7j7VCUQcszCDG021jYWIRThc1O7eZdmx7HPfIXJnDnDUyBFuL8YKsRMOJr16UpOWWIPy1idHoJLlVUY0va3OiF7AXmosgiziunHFX9sbYteywOUcnWv72dhbCI/iBK1b3N8yszNVoq784qc6E3fHGkFa4pme/SakzcWVA+ShWNwK7s5jlKWUIoBFwEzw0/yLWko3PZv6IkwHX+wTWqLc6xK1gb1635II3X0xv+sruYO8NFnRU+uqXQMBOKKPX8wiqMizUcSqH+Bkla+bSB1pVPU2lohOsqiJAAhAXrhjMLqstEzNoBh0XGJ1kX/SWHorPxOVme2PY4zBY4CE4xiqk7vpmVrc/ArlWOzVpoVL+yeRHelCvezr0eAq535ZH0rEg/aDkBGZYxOJwmKb0x0sxjA1ejy/GSkJCzGE3BW0GirfuQwYzAEkytsGd4Pfb3d1D6zvP7Vi/z1ehFZg4oPwl4dShDE+m4L/512MghVFBgOFElkU9WGcmaWimLn7F8WZ50zFOn0ouCgkAnf2gQilhv4G6HFEPs16PzHT7caqUIUzqkYsKF9YavXMEQUO3t1zLjoXRERWRRcFExClByjrw7vt0FXC9hrRtZwNSH3vRSKoZe3ScFISP+aTGep4HN+Y+Xl8E+ZpO9FGNxbife5g3O43skPnw4117jqfpiRDWd6e+0EMZgbDWWewuY2IRojD/Xm4QfjmxpsMtykiW+Y93c02VbVpAh0yDp12TNlug3lGFOiYR2T6Mj3n4YAxqeqGLXzOQswQEFSPYEzbYIKoAqbEg0Fz6tTHJj8SgRyDxcwOPZjAhE2yMtDmE+xCvgmePvbjOY/DReQmg1hV53N33iEm22BmOuh2u7nR87FH4zPmARczGrjdBPt+wOw2xj6ft5uMje5vbP7k4e7HbdtECAANG7cbQNjdxW9vm9xv+zyez6cP2bbxPHbnp1mcWVLXqaYOHuzTnQ4HHYGxBFcyF4dpgP3TYGc4bMKV/HZ/fz6+6TQRdmafh5uJ0Be5Pdge+8Ndx2AytUOJTApocRvjtklkviVEjQfoj8McpiQ3IZo6dWpi1iuIaM55u93IjYjUDp/Kt/H+5W5mj+qGxMzeQb0hzHzQMbErHSQC2JxqBgamGzOYBjGISP1wOsDK2NyguruH9lyeEnJE/xlzPabzYExkj3SoqrAJiTEDYUWTqm7b5ky7zl3n49jv9/uDDrnfeLAdAEzVDjsA7rzZ23abb2/uETSlHcN1ym6D9bbRYHvoTvYxBLwNuo2A2FQ9dN9ZcMw5NlafTqY+jWzcx677/X43Mhp0j74gpgN+6DTaiheBiKLG0uGbiLvCeQJupiDmQUIk7oAC05yyotiJyCILkdk10uzCg6xi0bB7SU7LYHjwG5azJbMCoDvbcTBYZItW7IfOUIOGkLsBtm2imowwFpnYx8ZmZn4Q2zF3GTJ1j2akx3FsW3SvEYA7mDZVmWex+juJ82AojuO579v9fh/jhvm4y12nqivTBg9MYNzGxjSiRMx8EojYt5s8nzszb9tNjxnB9NPAU3PXqKZnZnNX143MzMgHe/SbZzjppP1pLEI+3sYwmTTth/uNiP6472RsU5/fPnC/j7c3chzPSSz32/vHtz/Kdn/uU2QDMYgVvol8+6YABotPMEhozI8n/5Q4gSIy7fh4PG73+7FDcIcxY4PB3URE5yTgbWziuLGQ2g/3t4+Pj33Xqbb9eDfD4+O4395Y7jLe3r789Gd/8dfgm/H45dvzxx9/ImNzNoKMAaLHVNV5k7Hd3oCo4OD0uFVkgwhDhLZ7VIoStuOJ48NwY+g4Dp+7VVItnIlATr7dhs4DxLfbF90Pm25uqiqybWMTEbU553TQGCzCB9yhTmbw57FPZXV/7Md2+3HXKdsgjsqq6djHHTzkmE+QzbnL4KlzbMPsGLIp1NzIw+SOXr2+P/V2uwEURY9jZE3EkFswwMZ9jY3QsCjKKildAUSgIWSkqjoP8YT/gWYWdBenhWLdNYdEJGenPh1D+rH14enQ6fAvqv+KYzaAUw3JAUgP62qw6b77am8Ih3jNX8tUSPuQEu0zzIgygYhQqaEiQRhhaAgxPLJzHV5oDMzC3J51d02jPwzIAtTGxQsWOUKR5Up0IrkD5Km0GawqR5h5zkllvU83irp8vuvj2esc41c3mBtB4eLm7tOUwpyTs79e+z/cndyjhDiyNnv9mdl0xjKijJ4wPn1Wy5zuX5IwxaGU8mJkOuAIpIxFTc+lIIKDeLG9IpkmUJjLuxHNe5ghP2zm7n4AIE7QDbOppm0KInZmMBGNOQTigWTnzsTTDcDHt0esWJBrxvy3MTbJVPlqORn1Cqq6mzIiFUIMOGyq6i5v7CxgwXzzAwjLjVlkVwbAsolsZrDHbma/0f+F7c72G9hv8PYTyzvDFDeDR+kLI1baIiYpvmEJPEREm0EwfBlvLu6m7pPIScIx/jnI/Q+ZQHa9iS4fv0a0QvKmyzWr7+tP7oCBAj6FAYSr4oYb0IbHd0Nkl+H9lxq1dzzN7zu9O8zIhuJmuD0ev8Hv/vLt4+MB9Z8+jHT47sfmd8KtJ7o6Njj/+ceaixo9A5andWuVqkEyUCEjkAPPMgXTu42MGYSv+SCwu3r0oiCYOYgg4q7HrNaCwq5hWU0Ag7iThhjTERKEjIwsGazcwoOL4OHubm5qKnMSB5lpAuo2q/DqTwOQ06Dh7NNnGK6UOe6V8U7EnL1ac/qEMUR1du/lObPNcgMHWEESolyEWbJYYL/BBGJ2UQKWwgv3TZiGjPbwAacLHMC+7+6YNJ/fPo6K9anqT++3jh92z6vShjviesYHIg7jDQBI3hLlInhaoJqCSSBRtcIVYNm2rU9PpNk8ns85p1j1HCNaG3dEfSNl1RaLyNTD3VWn6gEk2+rwixULZWYi70jrsWdAr2VqMDhmdgs/elers4gcdlC7WBYezYtUjlHd7/f39zd5yy3Yxun+//Lly8czWjtssg3m8TyO8Fi/vb3NaapqTsRMZIUnlgm3kWISAuZ0A3Mp9J6pcQcfc07+1IDu7GMW0Z44mgsEazgbwiAcYxxHQIfQpaim6K/CyNLbHREQgrQ94FUTBTpVq/Z8HOThGuEVmwRwpzF4jBGQvt6dplySt5abOBZgjHHWfJZfxJYSQWRY4KybQgApLQ2dI8xTQjeJlojcE6a8fauqGr3RaNti3EH8RO3amdEZxZdGKcR+TLXv+LZXaX95O5dDPRiHVyapRR7vMspc2GutUcDrXVQBt0YKdfdCWW+i9swSk0v6Fio4GQiTHjy7xhlPbj4VrDzGGTU/7lYB2DwdTWkXLdB9jKGaPUU7+IlrGn0ng6iq2ZLia1FXkO4J6+TtauPj7oFGE+kT5tqdNuecUpWovDTFziVa3hvjFJGuhqeA2ODQjtx8RuRqzvl4PGJl33/4kYhEeGxsTiIDpoDMuTc9LPue0UKR7NQkIm9vbz/++GMXyPVJEREROo4TDCNHHaXg0ct+9+dTVL/I2OIJf/zjH5mzJcPjsRP4N1/u27YZAh/eBc7c++hjY3eXQVMDVTGkoJlZY4oRM6JtNTenzYJJATk7rA5aEWQgLefIW+enM0yz3bZBkESEzm6Kt9vt+THVzvwUM5pz7vt+u3EHWnlJe6lgESNZYnhVz36DnE13L9klsbO91EEn+JTkUjSfX4yRBw1sLM0nUVbE0p2VVmbl7gE+1Dyhd1OWmrFeWBHRqnjka/wzb76OEmYBxN8r0/zEP2UhRkTrZCw9KjslBV95l6myGyI2GDEZVeczEQsv41kYXb+l39XrSYs11GpM28nx/evxqfEvK8aFB+hm0Y6tzWBmdnImRtVG+svAAODkmecUaEVPvbCyppCXz8/h4Uw/69UGgBXdhXnuO/uF3ogo/Xc1cQK88kqyqVJqpTmEpW6/kqFKl9OGc6wjGTka19eBKLQAalAu7dIhzkIkFKOmhLITEXGd/XwssfF93+k8+5nnMkQc05VtfvAuIMFdIENw5qSGT8LdyyT4nomHtrGiiqtDnZ9TmP7LLBy/vI3Xzz/d44uBF7OWnoLnAL879Jc3Gv1TR+28tC+vbALEDo5/dim1nsvpXuzCPWIKWNOdFk6QvdYIoWaA6rC7O6JhL12ATvLABpELSUKjxYt95SQ5pDrgq4Kw/ql1BSyenZUJ9FkLlaAZy3rbygN7tFSlEM1DjiMAzDs8mK8QiUbBlp+k1U0DZx7XpGo1vrDaTL8ZY5vzOI5DVdw3FCPwJWej+5AGwGBMqft3cSXphfSy0yPo/RBmNl+yYko73LaNmaepmQ2uX+d8mDFvlvktWBeiJKi2qGY/55Uap6sD8fmcM3IgLTE8oBp1196Noqk0/pjCseucMyA3o8+jiNAkM0MiAVhv7cvGi8i4397e3vSpc+4AbttbKFLbtr29vR3HMYGuqNnnJJLbwDyqjTU4wVZKXzl3xAWksRDhnW2GZIrp8ziO29iIqK2sivjg+XwuBJ2oP7R0ge9ZxDKCbX11aNJaQacYatR6hV0nmUfDYI5GZPHeOWd2z1j2iIjUU9Vb0MAYAKnKkDFGBN9POoSYWRVMWgftwwBotKHQesP8YI7YIat6NGYRkRAMdubHBsKN9Ynq1ciFXQCKzAI6VQHILTLPmYjCvUyneXlW14iIMfsifYlIiJ2sD7yZQc3lO0IiXguE1xfFIC6aB32PbQGY3ZCqbnNOE5CZYafRm+arxHhQKQ1Va+rkDk08XviirZ6ccUlp6LNwHZ6vI/TFc180eTJEv3qU+p6VNfV38+aahSZgb+4dmv/YheH2z6rKLEE46MLxcyWbm+erOdoo1mNyVA5U7keztbDl3B10ejHc56FzzjlIK3Z00RqDJ7WoW/gk2k7DouwG6FfPt/Of3T3aGwIIa2qM25wT4F9++eWXX34JoTBkc/ePj4/bl+olsxgYzQFeWMSc0xTO8KXrXRuEJWKICi0JlaRqWlk9HMcn8g+cOhzdmWzqSqak1XijUiKvXhV3jyR6rzaPTRJxWKgcIy917C2qyoDMHPJTYC8l5bZq8Au5oqrliS43r2dkZQWrpVqnrxT6sidf+M/LOeq/0qLZtAy6ENKiu8CdeJynJraSlhsAN6OCCW0tZB2Mn8pHYnKGsznm6KYyBpizwNQMYHmx95p7A2aXZXwZ/MoQygLpFb56rBaSaBsSxaYobNS27sxlAd1JswTJyV+Wu16R7Z1ejkBw/ZfRAoAw2etG9HhO07QfqIqlJHj1+Hz3sojVnyPpHHjxUp2pjMPe3IXYXmZJTbr5c2LdnxXvkdEzjz04ZOiNVAjJY4wAtVpJnUHzlYenj49KB3jhFcoTdJiS7pAIm94d8kNWXCUGZuQNEsBXjMrrRUA2kYs5+vXP/wVW1qdbv28KrpchOmyE1D5XoKxYFNQNgdyzbL+TIf8rx/nda93r+Nk9yCzbZqpOHczM/5mKzf+TrpC/7u4EMi9r23FdfII4lJjhFE350sfi7K6ppMXTKP1+fSjqGAJAQnraxUG2DmcZ1fJ2otYN+oFYDMLmTs2usXj3VhnR1TTNwU5Ou3ha+70nZ77WpZeLLx8dEnt4Iq4m42vXKW+bmRH7bbtV3mqmiavqti2VUZZSapk+k01bOjJjUSDWuXX2V3M6ME3T/dgBjH2LrYkBEJFJZm6KyJQxiLfbLcNQkW1Y/7GImU1Tdwe7bANMsg13H5uMIQ7rY8jp2eW1PDsWK7ib8EaQUKoqqsBmhxmIhNksI0sXa/5Mpqv9tqC+pYhC9QBAMNVD5FaGCr29vT32p4AKFYBFsG23X78+VPXQSZAAiWX2gJ77fFSo9eAoegRBDQ6dFwHQ5GJmRKnrFC5OeNpO9aWoXgO5A3LBQcnn+MUAtgXKIuhLRCJnxo2InQNEZAVZqcjM5h7l44v+HVAgmazi0FU1ZyaQub0e1Iivrnp83V+zY2/3v7s7OfDSMzqNZADw5ttoV3SHMFqA1eJbq6FhKQIYY/jUUhryZjVtoRsVce1P9UWrQKmnYbCtR695CjFxtYwjIv8TmkPcr/CBU+crzkcE0oKFN/do3iLOWYVP3XjaAkcOVOYW6gSVV1hESCJvl00N7oZpPlUhkugwZmamUdYl3+vVQYVTtbLCxfnXOgT1PoZLZageeyYCnEeYsky02bqIHHtmFmzb5pXEf0QBMJ/0UBKCIh2aPrlyaTH4rRIi3JLUI2/27e3m7rf7eH9///0f/3Ach6pS+o/KTShdNpfnLjnhFaLWzPZ9fzwedYpJNfoZnBg5qofq0Z6L+PB2u0temxkeH8/H4/Ht27evv/zqjv/wv//tt2+P+/0HDuAlOTvbRgfO3oh2yqiqs0/TKOZ0uFSywnkYw4lARc/ZZqrEZLZCrbBDKEQ1zZwycprHcTgZ81QdOl3Vsz0jbWveZh+ZlY/5YhBKQZ60ISEBPVqOrXCVrmLbygeJ1ZG8XOvBfClnPc/v4jZo4bgKyvUhzQpeuEFPc/03Z1r1/+sY1rGtT3B3uJ6HzlOpssXb5RVfahKiJcIW4mrNjclTSVQ8geG5xWFSOFHcFWvptdmgajT7J8yensK68g3SU/lmp8M0FJj+Sm4rFUZOVPGFGUZgFtQW8+LxpHIx9pbkv0tuSy+vmYWZ8dKltr4YKW7LLnQGx1XRNLNlRS++jDFGJAVh+VOu/MVr732C4v96EgGusJKZ+1kTbzqJ/AJNxFTTXDaDz1PTIjUcTCjZne3kFguQrgZ2ZHjFMazUjMhtM7NSdgcAV5pkzzlJCALDMGzvhBtoA0ABnBkTon/IfCKiKjb703bjP3h9vvtPW4AvV8SwOHIpir9XMgWYlghusECEV+K/WRVf76eZSXZT85l5NAQm2KdJ/DO4KAK7gbgPlE2dTMDPlhAgSO8PgRHh8qzVl1Da8lbn5EqVrFSZI3nU0nRMx8rVaGwfUSvMlO1PmjksPO21Pft6EvvDNqZWpwx9Ej1xf2sFMdHTZQ/EyQZwHEdxyPCr5rfGRbrUgBikbsdxbELbly26LKh2Ym54cjOvMlL4NfaAmiWF05kaPiE0hsg32Pe9rM4tdbJynPMgdzd1ANOcNbP4wqc+QvcygCAkm2wQBjxit85kBGeBwGEOchPDjHoAzzOZUIvuulrz0VwOgJkQcVg7Y5hpZTy5w4ichcgZx3HAMk8eELjGuc1dMV+lVO/HumcxX5BtN4HFwtq2CTOO4znG4HnkF50lWvkwyzZYJ820E6Y5kRGd+Xi6SA6HgtgdUclgwlxDOo6DYJAz+S2+PrZxHIf5hCNSbTv5rQV8lrFC55wEZ4pk4PTuj20jz8hwu9IjP6pToVbCjeN0v9/nfoTmrdmOzDjKNmAIbzL2yDVNL8PEjhlNVEQkXhFg5JYFLdaSLJKczcbBGipjrNhIs01jFiICZPuKbdxO90xhn+DTta5e7jLO1OUWge6OBX1bQLtHq+JFiXRUIguIXz097Cc5tRJm01Jvpkz1iYXdti1x8xCt7aBu7Au7IoKFqHE4pju5kbm06pMjSrHU/SeiLz049q3ZrmaGgFq8zszC27uuVQ9+uVDK56vF2z+8euXbZ79EnD7vTi91PEFEnBgOrjBL+6EtqyZpHQCq+sXLWfbizMsDqEQOQupJ5aAsAwZOnu2h3d3ht9vYd2u6mnM+n8/H89vz+ZxzD5WJmW+3G5syw+1gZrCvNmccgcNCkkXAWeacHx8f6qZuBGI3OJidnQxOwup26DToNjYZQ80cuG83wN3o4+PDbB7H8+Pjw92fjwcRzcPu97uZffv27X5/v91ux/xWW5CSMlmZmZfdG/tqmbRD6lCzqTzd1ewwvymv0C5cWSGJQhXO3aL/bAIb+S2ddxDdDKGeNeRp+K+5/YivWAhgFpExBtFB5TjolJYmsJDny8VeYY31jPdXfOlHst6wXk1XSRILFAeV0bUK+AytfIocfnYVYzHteMnqX//lylHvHDCu1lB94tbheaVPn4fOvKGSTnO67sb3LvJPajFFc2dQUIgqKPsTrme/LZgWmtIiow3U6zr36vWhrpsv7k7OjP3zirkIsS9hK1pM5cvsesqhl9un+W2cYLifuBDS23HuF5XhR60V1H8EeGPUiVDUqZfDYqWl7658XyIvmSQ1r+pIg9L3egEvAz69SEGNlh57PssinBwYRAQmTjPWhdNX1XsXv66KLBaC32QASBcvdQHVJYrui0FLiDpDBcyPSO9V2ibbhLxhvIPuseHlgOqFsjIVXorunBxnfuk/6fWff6KH5Zq51UiTkIlgWHh+mokdKvxvdSUV6FnthnRobmOM/5Zv/q+8KEw+EnclSERdAQFmqCUAVe/uSyC6CCPTstTtzJMignXydOXNNoh2XV4GISijy+4nTt45wnLlnt9aLvtkY9OSCbLevwqpVRsBEPpwnyzv+JOFydYM7TQ+g9MQSXAGbr+nFfchIlMcxzHGiNZzqiqUCSRm5mpcLzOz9sDFgxgEJuKzmCcy417mT9WKbZ1n3BxJH3Rjd0Qb97ht7egVMRM7ZkA7COg4DneHs3NMlUOj2vfJjDGGg8KQtOoTjYUPMruqveQRueUJZBJ3i8zG2uy8ns9n2Q8wswBW9tIP/JpYyJVGzxWVbrNnHk9mDl9oGM8isu97FKGV24JFxqH+3I92LbvTdFO1OS0TsxBOP0dlHhIkA9zpcWcmdNpbExYz0xgUOYpLXWVsn5mBAjDX3ReRBlU7NtpeWPxnol9/bQrWQgKEn41Mkl7JqFrZej45uDbR4iAPvyn4XF4z63Yb63janc/MMkiV3I1oENGQSGM2lmxHJjKZ+dAJKua7RA4BDhWzrhKl5QHyq7o2mI+AMEwyiKF2InEI2TNxlxyuFu7VPl9mNhYUu6tYzWXJlMKSKxSx3YUNBV2v8OXZFB7pocxV7TAFME61Po+0A+rIMCky/z5+dHeALFOkWKMXrDPTiCiumbVeEvMdY9yEich8VhnVpTMbnSkTl7jHOqk2cXvxaYlmhJsg1yrCx6jMcDq77vZSdzcINwOd7UPX27zjHj3U76lneb4+CdGkSdf8z/Q4DhEaY4goUbjsp5mr6jbK4w6Pjko9VAAQHjIqz18i4WpZmVczu7moDBIjECn8OHaf+ngCwJz7cTznnF++fNkfB5F8+fKj8AgfihlkwN27Z0nXlge9YTW/U46aw6dDPXE7wxnZYuJFRSsU0KCuPFOqGqngtbPKnF6q8gj4ycog4Tay5eow/gohiwozmlkUeDYhISH1qVxLl8iGVfrly2r3besnTQko5Ti+lUy47nmxuLrBMZYbRMSW160awPquF+V+/bypos9IP2elk2by8aGArLnKwuIoDa16YzPweDv8fMhCh81e0grq20obW4//i8L0cvZXZtIEWVO4pLdZ1D60UGiLlwiRCPE6wpOHU1mD59OCfb6c+tPVRj3N1ttyNewcf6/t8lVkMm0vCBGXM70ETd7fZPbdGkJ8umrigFejFo+f3Rciz2dWSjZvmyQGxGHVFLHXnJii6JSWS0OjSi0lNzfOkVAW7vRphatHzNnI5VTPOHIkquqBCJEMvyc2hDgR+4PcQE5mrpPGD4BhOOgGiEMMRGfUCG0TMnhxWbATvGJK/6UmD6F9A2XE/mfu77eWk8ANlNGUIJNw5DAJIUXtImUI10oQT+HzXxnhXC+HCg04zJXcVGeDGMsYcnvDd2t9/8++yiaMOCGZM0eftbC0qfI4nAELH3hGkB2lAMhl37wa4DGbzpWnUZ/tQuXFCx8OXd5Pr1ZcakreIg/9rZXNxpH3a9r5Cz9/Pp8vpuD6zHNNiFp3QqWqvlA3lfcTYGdnGwgHT2j/t7EBiAa+8aDbGCISlVeR6+XuMA81BtW6zcxUK7fQIgabHSmYZeohmh2x4rFjjNtt7Pt09zTnwC8zx8LlYzw+lT2b53Sup7tPU2ZmYRCFazxoZJqOUiXVjAJ0hbn60gfoVk5f1aMYmsBumQ7VvMw9mpilj7Y3YIwxxs3MjuMwi8xbklIAY7Glum71xhs8cj57lY7jcLVosNb+48KLJ1MHiGTz+dz3OQ3TMCOIPaGqc9pcUkYJIBoBNQFgAkDUvQxmptKq5XYTzr7MXq4Oh6tlLnLjsnjiXQXNpa+992gl1lQUVKdGI/szMbhRQJhHK5FrWuBps0lWTsbDaRBIVEMdZBEyP8VMKtxE/qljb4cHvRLqooa4D0Nm52bTwosDRoQOTb0wXtJ1EVIx1TYzSrHoQMGpacXc537JCPdIR115+nLhmrqZM706pM/nN5gEnXKCTlXvZBRRKOjuYPQxO9GPCMR0AufVN8MxElHBSPKCwzwxypMPllpUY2akplHqLBNZmgf5uuv6AFjhb7CEO16m7Fc7vz6/cNV1MKi8g88cs0dSWlIOmHDJzr0w67raAdGPok9PNrPKF4hfczWmHYHGu/CQwQXrz4z0i2FO033f77d3X6zcsgmZiITHtm3mPqcFI+2C8l6rl1nE8I7jeDwe8ZV97vu+m0Xb604XlK9fPz4+ngz64Qdsw9/evhDR47HfN7ZqOkrlbWmCTLILf2x8yOLT1KDq5jn2yBJ82REBnAgIVhBGozO7pVys8S+qLS2TKtNvLYkCLS0Q3T0qqDt9PVCscrnOB58E0HvU28qVBx78mavotP1N/a6VR9FiX63/vtAqzkNE60HoyyqslJGlkjKg7Dwe9MzrlJFxHjTuaE2kh72+kfCaXp6fm0F4HTYtjuoe+eXsLJH8lQqprceAc1uTQukEqok757X9zOflWtWjoJMXbnBSQx2N1chMly4BlYJxToQaHOG1n3Uub6A0r+9aqLK38vJ2+FqqRkRWNmHuYPGWdU063Lcyz3Veny8zu25j0jkTeyW8UXUzXvdupVvPhiNsZpBob0XAyJPWHpirpe0FNMiFnXu73TrqkBhovQoezlEHjJMDAE35pg4u3hhFnkzODHFTd4I9AdAw1YNNiRRwyHQaoO1qHbVCzIak8mXr5HNU+//YZYnVDYsn/8NXD8hJCQb4mdTqmULsFHgokcPcmIV0ncU/8fUiwsofnSf0JoJ/lhFCj4UCOQjgGLDC2RkUrKPCwnHUXCmTl8WtIH27aTCwnC9igIS5+WRUUwMUgsVJa+/BRHbysejqTEvhkrubGz6d2VXt9PLUNOvAwplfREP7OrGIiRduc1WKOo32lVvWJwRgiAz3R9h7RORG5BhjPJ8U2KHHcbgmrPBxHDIyJ4eJRTaRNPNUZxTbJUCf2Zq94+5q0z3z+sI2KFPq9EeublSk/i1jDAHt2C8qO7GCYtjMo8iXCoAh+D4AnXOnnNMQEZ9nYQ9VemFDR0b+VYHfELORaXuzpPKJ55zRKiRY3pwzNI/YCyEytOZ60RLMLJBaVDWSu/7iz/7sl19+eT6fPISIoj8ykYAJ6tNND5szvJtSXvZweJs7DOQErp4QegG3MKRqnhpkZA62lzp03zmnu0ZFH4N+eP9xbDy2txAhIsQCZn4+H6EDbVtCsMQ67/vu43TlUkSAi85iZWVIBF767PWQTk8hgOGR/xm/Bu7FbXsjKjwWDzuTuoKWIzJmblelfz0L7uZukY4SfZzLQmAAqmkwxI5MR/cuyzNWSVYUvk86Y+BhTaR5IGehLC4c/DRagLNwPQcfE3D3qZEaxGOj68V+cTp6mYLx3NbwTjSMAr0ALIKEQROKdCz3k0OpdKTyXjFbuHuY9AAw0sAL3RMONzfz0YBjXTkZaYB69n0GJ/d0BjsRhMjIPNILiSgChzGYirad+Fq4zjqubdty4ETuHoD8vNRSv1wv6mOE3bosO1/N0l6egFAnIhnZ0SSy3KMYBp/4bHxLA9x20RtVNZvPMdz96NBQ6cbNwePmOWcW+IlQlEI5t9VkCUgOgoDjYG5jDCaZOr/NB2/jz+aZtfGi4Z1kAyRIoHO4wIxo3/eYk6oKBeKXi4wff7jfbjdyejweY9ze3r6MYXGC+uRaFiOdOSrB22NG7s4+PPIwQkw6R0nSqs4GlRO4HDYBgQPNWPxKCCB2cooMAs9IQu6sVbFuENLKXuKNZtZZaczpV+7dzLNc9QF9BWU2HhVKGNM1QMdLXPRFKvOCPhrjCYcXgCylvq5G31/iKdc2vupLHmmPvynqtMHqafGEkGsgMtXIA1rUgiRIIoInbHGkD6ykLiAsCRr56nW+dejy/iinWEJ/ScrB7VXTJTqkPYzozatXt9B/OXE967ITknHIkNq8M5jp7tvtlpxqQWZvXp0LyGfqV9APCaOtNeSol2HCieRP2IR0NT7X6RBI5+y1yuFG4nq3oi31FEQs0ja/l2w6N9pPcfBCS5+vkDKUog3uTuWrWknXSoKHJ9FOTBpmTlDAtlFrL0L2nR1TQ8Fj5vv93kudx9CMzElkK1sxFkdYfHExWMCALeR9e/sSBAI1NQWUaAcBmE4OdmIHvYNvRDeBGLaa+klInH6ly8pEod5/lc3TNuGfvPrdhKOCUWUQtngvWxlOREKgpeaN3VFB739am/A0Ys+Hmo6NxSJdCIBlrYnqP6M2hACQvmpyy3ifR9kaGxlnveUaO7Wqdh0RKzF41PdRSBVLP7q7w1kBGYNISROlL0+Bl2VIdLrkqA5jDMzdC6msTjrZNZmeImkJ51fa89/8DeWtDq4VcSMqMAWqwEZE8vo5WI5Mf5KjgoKinBLApbYfwBDit9ub17WNET24B7FDQz26378AFq/cttF9MGAeIUEzu9/fATuOA4b7/R6OeSrkj/AY9RyI6PF4isjb25cwq1ILh8x5RK85wI9Dx7htzM/n052ijEQPhRFD5jR3UvXIbIwqyfv97u7P57OYJols7iq8EeHj40N0uvv7+/txHPt+Iu8hsokqU5Eo21eGhaaqj8fjN7/5DYDn8/mXf/mXv/zyy/6cc85ojzGPU8ucc7qCbgTg8Xh8/fph5p0CZJbhQQBjjF0fsaOq+tifd4LI5qamcJaEI2Y6HvvXj2/Px6EUndN8P6I/xEYQqxR/Tj4eaZMEIJJ4g1bmnIMRqrw7BQQICCJsZGYmG48xjvlU1YgdRqJNtHpPihkDQL6Oedu2Iamj9HaTcKxMqDXHcXhlOXemaFBRBO4AfHx8MMgKo/Img5nm1I+Pr9sWVQGpmZmpMP/8yy/RpE5GdSkIgF3OtkvMkeuSmtPtthFxaKtjY3j0I2U3en9/V1WAh2wDZGbyFLCY2bZtDoRU46jsipOeKlSGSp6PfdxSyLa251Ww1yrj8/m8Dfny5cu3xx88E+HUrk56W46uqkItHni73aLe7P3tLcRtqHSVaYkQFll8UsglRKRIT1WQ9fSTJUlrcEyRkejWqMJR2mGqduZFR0a0MDHRGO1MK6ZDYR2ma4ngFrGiQCt1MxOObmzW6HPFBxtWyuxMqwvsX1HVfZ/3+73ucVWNTRkiRCeUuYgcZfD3J7fbzed8fOzq0UhvVLWw+XQiwsKUT68bSEZq/2EPgDj2d2XoQfOd2mS1RD5T94qG6WpZUktI08LMPj4+iOg2tm3bvn08ghM6ENu3z8MMx5EyYE59PPbbbRDJfjxut1vkiM5pZiY8nsf83/7Df/ztb387xojEksgPnbM2johpfHn/8ddff/3ll19yvrH/zETZS5MdAagBcjNnptvbe0yTmSMdPVS9PgjBwcpiYTOLrn0i8jz2+22AaT+eH8/jsT9B/P5lc6b0u+37vj+OqSIWgWIRAY8QfnOau6iqbFs64Ko8GwALSOQ+xl1IdVoCREeLv4NOW2iLTWNmLcsqltSzkYm6hYXi1bklUyca4ijYV2BxrxGPzlwFLsgxtNQZhnksC/qitytnnhWDtDSs74dIgY5iMd76k9q7q1ZRNiQALwM+NqW/EqskBVtFYxAzzOZDR6GAmpnR+V5VJTOS7EDYNpItJbXxuXbLgTgdhD5xzNlwIn4e2+aUvr+e8qqqRzi6f+WEtubj+TxfmhmhkDGiBINFgDPPk8oW9SVp5Xxd2x5l/wBoh5dFEZFVR6Wrz97MphumDdliZVFu/ugf2bty4TDLpFpviwrtUb7P+Lff1Q9pUjmn9jkmQJKNPT6j0YQSGZSpJ4Z2s9YSCWgxHS6kbdusWg4+j0fDb5TDJcfTrvyPj4/+0+PxoMU7k4U/V8dfPEHVoGZ0OjvMFWbEQlGdOImjKWO0hAlqsSlsALvZfB7H/gtvb3J/F7kDPy30xIBFHHYlqr6+99n/oau0re+YapxNDkLKO/DNVUkAm2Byn/v+uN+/qO7JdsAUpSXOcBD9ACD8rIk9k3lP/6gBXwZfPwhEDwzCcRw+lYRu2/j6Td192za4Q0Z9pUjuv33Q8IyU49VVBwAesbmwn6NszIFAQpgEq2Ye5m4AMUHNAneDeJBGHYxHz+RsjIvSoo3o0HW+KCPQuuSYTgWbhPU5u8IIUXm4wIYxJxBkqEOqCqRpV2z8zFCoI5CuzGA+YSnEG+NI2lKj6w1X0c5ukQj5JO9iJ2TXQM3YWKoG7u5GY+GqqWm5bzkUDCSKJEccj5lVPXjpKYfO8ELxX2vrFmYzElOb8bUWxXy2CJtHgMeob5eoOzu0yu1afWxfrIj4nLVPZzFDS81sHgkSkYg+vdzTH2SAwjM+Gf4AZtzuY993NRu4I3N2Z+iUpgiQa2YGzdhLO2YvDiPjjfue6mBU4qFAZY7jGPDpMGIHTYdMm/NpCgWpqhoUcKN9qhqcoAY7kxIDnJMCZdTO0yldRugW+Xh25oZUdA7pMDglO9GYuleWHRWEd2ppp0lQcaS1FjE23cxAxhIwBu3lFS4CiB9FhMgLJZU1Gn+b63FU9FhDXwTSsMkMe2TO9G3bqo62MUKkKw9VrTz6eSrCxD3ZSF66bRvRbMJkygRXm0c4OyOmtGz9VueFA6AiHngcR0CiVRLgiAEkI0iovdFBLCAzNk8us8r+0AUXbpvumDEi1tpqH06V47QkzTMRlhaxkWDxS8a5JswdsVXWQcUC2/Fv1gf5ZDTuPuvcARcO7USNXNPn3U8FKINF7icG7/r1lxPKUfRaakQhPb6u1cv0IwgfK3aI7fu+B4YQCzOHo09EHByO8G5yAIB5qy07UW3WQa5bFj/b8id3V7dw8ztfdjMbRRQTav4Zxq2IzGnP51O2+7jdhcc+jzknbeKGqGrODWcC+LnP5+OwH8A09rn/7d/+p3//7/+3b9++7R/7168fRGSGSAQdYzANQjAKfRUYBQHNS1MEOa3WytdwBk03M0aW7jLFYGK1IANkkOFqDhixEQNEspnD5uQIjcpwzxYRcxlJ1ixB25kei2nVPHe6RRUIEbkrkbCAWZ7qE5PtOI6pC6DWGiCyTKkgAFGkvdJPkMqQrVXrfLM7Eb9k4bZDsykhckBOwug2yh24gwMQhgTifyQU0AjndsfG3NfBnxbLdwmPqnLBF/u2H7L+wHJx6ffg23iLS1SRUo/da9GDjK9JmGRuOKNYevKBS5yqi2/Xg+ruYEHGb6NMLjDbeOnRna734mGXqoT4d00Lbkfky2KVcFs/WWJ09UxbIr19L/v5eb3J3Y39VNfWXXZJ66to4CzRZBFUe4nLixIK87K/HsYkpTf6uo9lhF8Zn6p+bl9BBJ9zva356tQQpiuaEbBknfWHNYC0GIMgmxWr7o2dBsC5Qhm1boxLykaHZJv8qJzUnF2gMjE783dup+3NzMTO5GC4cQQuwQHx7CADE9zgILUoFXGd0On8NL0TEfMgRJW1VEeKIoCrgUGvqKT4P1CVl7gdlvBtKLNTBYhoK2BIULavsB0AXMEbEZgf+/4xbhshisAZ2GL9AOjxlSAQ7ibbVQn5/TjhP84+S85peuSAQV4++jhT/6jH/ze6nCOQ6UAEAAMjD0SOE3qh4FVoWcBzr5vy41fOAo3E20UtbHzfmRjsVF09i9Wk2dbIDO7uPkvVQaXvNV+lxbUXXw8jLcjeF8QyKn9KS/AWOuvBbybjZRm+vb1xQVd4ub10ycOvD3Nsax9Cc7tAuq/Kmbu7E/M4jkegzhD1u7NukBavFUf83dxgZrNr+sN6LhZQTWYgRNnqlCx9mgzqltltEwMgJhIGEzit87aPubJe4awaDuxRQtPgpqqSvSVOowjliKXqrt5RFwAC8qnMfBO+D3nqZMcgDIIxGCRwsmDKCsM+d0K2o/VCvT+O2a4492gwkAahAHNiqjPTPMw0YDZ9u99UfVdVgzvNwvjSQ1W1JK/B2Ux3vUgvpGgGYMnvvNLXzgLToobMFDp1fQAZvkI4Di65N5HHG99Q1axJiAWc1IvZ1jsWDQaA0kiohmxDlU3AmXkxG7wvsJEn1pNByfNIbNs2RMwDyaIJNbynfVrM/VS7aYm/R80nV0VQGMBEZDhrRFdOEWArqOiTJzgYchPCBqu87o7C+5noBcom7ImOk3ynvEcxfum0d4fgVIa8SoPWcEGfzfXycgMzMxGIs2ylEUqd5FSVQpMADBjM5lmcWwqZu8M4N6UV0F609e3nUXK2wixQChRZ4mvj7BdmREQvHjiunH6idXu/kxS6zr25itlrKXYMm8cYsh2qCNjlxFWhz2ior5O6pp72Y4mqL/sn9LemqzlpmsIDGxM23SJZvEIooWIRTQBmkHRDxLmkbbuP27bpTabetvs2NhHRm729379+PL7+u//169evEQ+MRImPj49p9v7+A8lmdkyDq083Iygwq419c7w1ddCr2EydXYOlmynCICwf3Hae5cr6m269NeuauztFb3RTYXNid1e3jpGqu/ksLQpBZrPMEvegRPKK9Wll2Hjp902oRQBbeuig5dCJI5n6ZTPetqNiXjV2KhMm/bVcsIr1qElV6Z3HkP0UoO4rwvBKQnxNfew1j5rtzyT98vPKsWuCaNdys/2VdF8G0L82S0FXSiwPX1WHnGClsp22mRqIXDIuVC7RdAKh1f11AFFO45n93PTB4bovDK2Xoa7/9ooFlax/7T+tVlwrrysLfWE+5wM/LZQfhUS1fiVUe7fm4c2X3C8plBcJAnecEb8LPSzu2JU4XoYUU77ect7Dldy7PN9VlT9tPa7TqfF/etdyVQ/6pn8j6bwJdxV3VzhVo6nT3o7U8HKrrYSXi0bEzId6eBB78DERAfmyxa1OuBNVlyO6tBkETEEEUyJ2PfyY7pvZD0QEEpENPMADGJkqCPJMDuHTHfEddJZ15T8bh9dzDaUE9laGApOggAEKn3DH/ntihtywf9NdZRPCIcKU2YwCDEABiSChhKONqCy0zoGk4oJUw/jPFjB+/ypTJzLKDJA5dzM7Y1sE50vc/p/PVXgwxWXI4KG2FgAYmDy7SiMheULVRN3gAjZXuKOadS0O8TMS+AL9QHQG55uRcmmG62WENIjKaCyJRdnRqh4C0Fpqh4UV99XSZ71n5TlUGm95YRLVKQAXUbV4LCCSziEIPjb6lLp7lU84e5WEOTDKSazGfMKFtws2blvPMxGhrAgqm61fuUqR9DzVbzklApExsdAQGtPNFG6k5ZGdhxHm8/n89vUxs9RtQ5hbRmnUGxnCkpzuPoZ2DCRWquYRJfpl/kXik0QLclebj8eHe5YyRmuyjwcij2jOXSQdA+4KcCRhhqblU20IEQ/O4kAn0loxMzvmfB576FtmLjAVZYkAg/GNpttUV1Vz2tWOeezHfM7D3aeb1h5VF/YiqXKfBm0UiaTrurfbzJiJar+8CCvSxnDCNL2qI02OZmrmoxoZS3ewZB5jPB6PQCaMsEh8X1XLwGD3gPsbZjaniTgh0yM7J1lVQR4Yla7TWVDmelJL6R+9qlGTU5g055hrInDX1rZFZD73ReFjQxqoU2cMhoCAq83EVEW7SjquQkTCHBbycRzRUK6XjojcDWoYOR4RCQdqalplrZk7gxpCJrfG3d23Stlt/wgzb9v21BbpsUN4vThKWsNFllpBca+8hdpc5GIueGV/fbUi9cKS8s8GCGd2hyWToEAKoeh7glYWcTrVzufTmfJ6vtGW63xXDs8BzDm3bfS+hGUYfGaMYUQ6MzjTT0MH8Wr6RKSzuPMSYYjnvKTHXBWdUneo1RZyd62BM533m9mccx5KYOkiJWR2K4votAgub9v2fD7HGJHcTs9D1UXkOHTff4kDYgsCMxHt+xxj68pSINohOlV/UYIMuTmX0wTkcJtp2i3Ly1jcTIujMEjtVNBnPB+RjAC30Jsp8oUzQbCAdfc53eahESbqBF1zdzIFaM6p6nEGKrWNt+1eUrPCYhRlBQbc103nrPE7xbl1//Oit0JMNeJT45xzOhNRZhEUOXDzwKaEWO0wpM0sKn687P8hSeGreKbFE9yWYT5qDYPUhWutWn+IsgvorBM++Vt/TsulOM9RP4eIQtHvX/uvpqdycz7QPSrVQeRlGweYVdpgQSdFJM15cWUgVFX76wlaB4aFOcQn5qu2dPKZFwfT4iZ7faz7d3bwJOnl3etQPYivD8ApUgEgDLx1AJEsSstqoDyt/a6VzZ1Pu3Izv9Yoov71NRK7XMwMr/YVXq44eEiuZcze4nK9/KWEqNf5U0qI6nGomptc/BeuqFkv8q6nECBdWBPKej2ZLYoy3GGuSygyO1dXC7l2xwAYw4BAMovMUa5e9CGyFJ7lTASHHYRfiRg83AfJBt/AAyQAOxgooJqSnH7uURTd969xbP9E/JBQnlU4JmCMCUxgdz/InvAj0jzgT/32IYc8j32fz3Ef27bx9p4OLAxACQOIGRBkQwS1nFMddw7DIdZ46U/4T3AJMSxAtifcIzwYwgVD/nmijL4WClaPtDT/KA4FMSSU5b7Twv/lGV45LWyiU9R9T//xgujzCFSYcWV7aVUKfeLbxhVsXEWDL3eGaBERGUMrv8wXg/Dlu1xYlaGfdNisXTNxbFfetbLBeq2Z5Sg44FjCQKJysuIMg1wQseJxG4uUGGg9O4VQhSbzlQZTl3GqsDFZrzQkqvbKdMYfSELUgaodMzFlWULP/DmP6fZ4Ph7HTkQKcgUzO7GRhgpw27Y8nw44JKqJxoY5PZvQcuQNx1vmnIGLxYxo/2Omc+5hTMvgqcfUI9bn8fw45h5+btooATvoZHzubqZOyuMM8gSzVqTqsO/7t2/fbmNLaUHOGXwjAB8fj2POQ80UCj+m7ft+HHPaDDG8bgpREGFzZHidiiTNqyRIIqMECGOOGkJveqoQDdqsEpFtE9UT/dXdOzW5R6Kaua4dPQAQFk+ewlNZh0j+LCLu2hIw973BzoFM983s7EsFhVx1o2VBTnHvfnrxUy0uDXLWqCjB3GilhzAZWpipqkCa4M1MzEV0jOHEGYT0ZccB8oBn0zmnMBO5mW1lPEdGXAtRT1fWlWXU9Zk3EdHpSW0/ceujn75i4eoDVkQsFDOQa+ZVem7JVhW213PlLOuAp4PUuaFHDcSuBFcbLE2QzQVXGg6SazrJG07FKaPTzSKb2rEAwJqZ2WnbHIc1beiccDtUVXXbtk4ZdSzxT8qUUbxkytGqap7rVq7/9GFHcDK8QuaZmsjMwkn5+0Ft+5XzBfOwjG7RiJeLiOWJDicou/uhNucuIsdxmCX0kYgIkao+w9dIxuzHMefUlg3Cm85dp8PPIsw5w6JKkVamVrIRZnLhTUYscmRamtnHt2fdwMwszKGZ7PtuBrPw3sOzuTwf6lLgbM9DH48nNnO/jTHmke1GiCiDuuFDIGIenrhcqNq0CpjnDx4eO4WqwyqCoarHoY/Hw13iBPiF9XEw+ZWiABzHQSi3zKLuAr6aV8xcnrLulkSFDFX/Ftrw+cpimLak3PeRuW2388Qt4t+W3NQr2X2CdfkHr+v0v/P5edAWd2H/Kcq0qM4t6s4YoRBBjYi9IsZ9ZKicjJdj3oHE+DmIs8opUfe3FQTAzV8W4TLaxUmPctxgEQT9r2VLrstSnwf8yo5y/OvIl8XJPYKv6YaJe3Z9xPma+FYRGSrklTeW6uXuPd9eAXyHBs5rVUaXWXsaqO3rX9xbl/nXxz27elFy1yqdjUCCdiVgfO7hMjmZBq8qadIwLqva5JWjLbD6QNbsgzYLdCeuOmtMmCArTJjU9QH2CANZ1ja6AZhkRPJBRAyBC3CHC2wDCeRGxFlii2wA4yDH9sm4il9fTME/ZRqZQ4E5/cF+EJ4+H7AnYxJFfYf+/u//g9m8f3nb3rdBsDn1UNnugOSrPRotMohdn+7uGERMWVp0I4J75T1W23r6R+O9MMARqqy0pjNLR/ifqzUY1xrO5So9MIDhljAz9asDK5Z4X5e0r8z5up67yFh0p66kD29rpEGF5g+Ps/NCQyLiDLbTuotbtvs9OVilzITmFvXeMbUWLkQZUTzFzZI46p88g6hSI3ePdotdo66JKxmMqP0hSHkfC8BdQBILEWkeEBEh8yhCZwhRlvo2IwixwsROEtbRwmxONlpcqSUKkk2m7GAqtKZgHHTx3wHxmoI5DaSNWKBPL8VqJPQGMI9A5SREWvoJ1RWVl1SWVV9jSziBfd+fzw8RMZvHEca+JSvhSFPMJHsApnpNX6YxhscmadrDz+fTfL6//dDuSaLJnIlM+thn6LVgkKjqYXqYavUMjTkF6HM0CiuyDg+fs4MAi3Q4awo/73m5qCIzRbIauGFl4mr9fPF8r6k+MS+13fmih1nl8DURM3O4rtuvT0So4qL2cGzbdtizpUKurch3hXg+XzKB2UxbbvW+NHn01+rX2JzA8kqgiJbccUXkENRhjTNlLg5xrMa2beo2xohmkgNg5kgNd89G1z3uJk6u2fWoVrcQLQe7PQ5kbsd0bLUUeUDUTl9AHRvCWWdCALq8LXBl4gkZQvZTOQCAa4lUj/xcQCsfRLmW6xC95ji9UF3QGIKf1FjtiqHnSwil+d3nHfSKBOJqQpvZnKbRFac4UCsuvqzDOSm//NrXSi19UTatUYCRNbSXPIie9TKeCWCM25cvP0bPdyE2s+cxI5XRmWXcbrc325+Px+OHL7+53951+oEDwNQZofXb7W3mZaVQjjFYhnP0kjIX4dodAjCzZt7bX+PuXpgoIjJoC+9M93C7jy3mWNWb6UxpEg1a6skaLvN192kKJiEGeM75eDwIcP+yHgEAiQPuPsYtSnAj1B+sYyV7EAkLMxzq2RuGm0LW7A98z2vengIi6iAwMw/ZnCnStGJv40+quT68dIBAYSDHb94p3yJWhdO+ZDBSuT6twLf7OZ1K0KS7Et564pLknBDw0uFsiiYZncGI+Kt7H5Dq6977gpJHL7p7fM4uhGwFQUSpBa6u4YVT9XRAjKrlZmZiDumO6+BRdILvmXProX75sD9/WShcj6S7o0FZApDtwiVOgOWgcyAQJs7lXqgXafFe3XPEZ+smz/gx4u7Ycb7a1Zfv5pQ/T3FlOPl7q4lhbDeIy3ejfOuLks2pLtYggMsq+SeGHKf79JWcBmSd6wBBJUO7z4j4uhEru+tFmIWrwatzpS7tdhS1Pr70LettcjcRoTFsftSQxYnIxJlciZndwInDR2gYF/mAg4xBm9NBvoEEtEEi5saAAEfUbuZjYwfCzsRLBLV/68B1xn0zKQYGGMEANd/JHoyHyITvsAPH/vH3v/z827/94Tdffvzyk9zEWA0k4kD/F/pwNkInadRjqhsU4AhSr+flvzZU2Pd7/3YczxErv3bRdYL9s0MZXY2Z+NXhmRYFQaY6hTaSqpSf4YnL8QTgC6wDYjMkndvu0b496Xyl2Pg3UjCTCuIULKmeVbh+4V0ANBvv4UUoLDt70XNW0XDOeeGrviDQEJHqeahXbl8qd8NhRESAhhWknie+Qg6OSBryMQzCdAlXXxS+vqCTlPyqy6oezPf266sqUzWuuP6b31WLAjWPPFXLB+YkJZkKmHjI7e2e2lhe3NOec8ahLc99KvRbrR1RtP6MKuQz5uPu7ko0mDEGW6VgMp/mdVg0tTFd3D8BDOJdo4PzlnBtANLln/saiRCq6rsK38uFHBxck9cwmdk0OCmTaymq665HvitR4YU2HZdNCACZjU8h4dzDTg4CRYDoZLu4gv/6LGDC5O5PulNIHyQzQ3nK7UoAZgZXcl81oTKyTppm5q5HB+DQBTvgcmhDKAONo2Z6zKkFCk8XyURExNGUQloR7L0OpMQeCkf52+KPacoJw5IWLd8LPDADoXxaLFiUmD7DOUedHQFIOrQ8cal0luWzbkSL9paUPTYnr43G+uqV6QQ5IbtsXT5WuFzdBF0LR+WX7WVfn5k75YyzlrhZRwxg9QX7qgskUbmZVfyETjfHORLPSph1vswMiC5VvrFcYbE0H+RCQQg+fPIxSLg/VbUkR/L09KgdNQa8DuayynkQagynh+L800Ln5nTCG4bFEjl7+77bVCL65eu3b18fcXy68Uk0h9j3PXLUw54MA1LnU0S2cU9LqV50v70hcH29KgecRDai2c4LIAFImQmsXGlaY9wGS1QJhgNijMFpbXJ8a8hWy56nz3LuAqJuCu9GbqREkbrP7OY2TQ+d20Snpzbl91aGXyZJqNZ5oXwXkTF4DHHQxBQXERbJHkKoHmjzuGDntInbxyr2nCrVVliMEAahn17LV/9Cf3dlj30qsZR6NTdoqdGtC5fJUkdCmnT7uL38m9/S07/bZmprAOsJzeUlWYfaPzRqdJ/HlGt12npIONlaMNgsD4jvsHQiWd3PHMBHmbkbz194e9G2IwC7PlX3LarpOeyVP6Bss75hVafqh1oW6t6Gi10dttmVIYYpXV9nAOrOnzxQ6za1OtrkoXW6eknxEoZd6mu8nG5t0mN5B5b9aq2w/10EXE5wGecpiV7WMMdwJZXXVy/Mf86Dymzrz8MRRQUpl/UysfJUCcPLY1ugfPZ7olYj6d8yTthPEJD1aWNmmk6AB74PByyxAeYgiDtxmnnpA2V/pOcEChXwhNwCHB8YMVwDA+Q5hffcGnjZlLRomBfbIz5wgOh0fzqcMJkwddfj6+BDhgLP/ePnj19/+Q//83/42D/+4i/+7/J+hx8M58FggU8QAROe4FkZdbcPgjALEUAMN6gZlCgYsoMIXd/2X3t5qYtuRtP2feesrOGwgPK8/PMzB4uwwxnk7o78+R9aEnNvhMFYQ2ZGYY2cQalU16kC+9VbAnlDOL/qQfnpOrKWHapK2ecrvxvXvqek7pKr1meQXz+L2H1xlFslaTYz6a9jOb8tXqNzDAB3NVORLfhcy8F47NCyAE/d5ROCDX26zuUo/TUKkszMfSmaBFT1dsvGNcFHtsGvTAFwJzNFSY6Wsr0QAcSXJT+UTKIkwXfG5ukkveS2ubsnqEzz/bwKKMXnnGH7hWLxPJ6qB9F4e/vh/f09enZt25Z9PwqV23zCYlKxkrhFY4A1g2hZzxJgpmo1Tlab0SPJyW632zzsMPVwVAHxt3L3cdd5M7P7K2gBeTiTAm6p505u5OQR7mW4zRniPsz7ypl0d+4Wz6G/mmV/tvMVZ2GMuztTqpXmFMD3y5orSvmDX7ajqSsUSxEpYzUNNgu7ONXryTxCBJdR55kYqDPyCeNYiggzRTmi2jHnZIrGX6fssQLRLveBN2vosKcv9YcRkSbLx1qCY6cSBcriw1DftTpBr8eSmaNA1K9XLyYzq5+Ls5JoGBIRRGJmsmQirr66Bl+oetW0XhSsviY84EybuVQN9KpdvXigQ5YzXfkjgAZDM5DbRBIswv/UR9TdPeLorZxdH+7uZj7nFM5EVj/BJy6winH7cRxjyBr7QpL0kq1HxJR4+lIoo+suu7tbsr6onKqRGJejsAe5Gp9eZkwXzPASDGc+ixy+vP/o9nXf959//llVf/311+O5u9Ohc3/OH3748Xnot8cH82DZssBmIY/kVI62gYNJcnUKCYT3Mudunsg02+Px8DPYkK53ZuaR9BlwLBHK67WNze/1MbNp7Qirra9d9atmqVnskry9ma5VK8h45iCmyBj0FiVeuuXJZ6plgjv0Nt6o9PsejOlJwJGC3iNbqT0ydaMkWC1tbHc3dSMQCZXHE4A73t/frrr1iceD4vxXI1Ne3lj0fyJjR/1hHqhr8nbT4Qs/7yERuA3p5dBdfDdYNsI/ma99/wsfOL9IxIAthk6FcdjdO9UTZhRhQM30HA3u6k7uUXcRg+tXE3CmlNSp/84wlg/6r+1hiV+zsW3TYRcO1IFFp/XW+gNXkxKlcC1u1nbqC58iwJfM0jbSkubPRIzvq5+9QSeXi6h8GofF//sLAbqzrA/FQ7zCDleGHAdvXcM4PiyCUnRW3xl329jr81sqvUzG3cPVu1zLbp5KYLknTMHExQ97qeOvcXB8ccJu4+YVKoiTJdu4FoieUxhzIvw27sAAotEWgdicCAQnMiLi0BFhDj/SGxocmBiYBIIdDnMWN3aO9EvtKX86wrGAL0cy/aefrihU02N+7I+vd97HHTY/fv7D3//ut3/3+9/+4dD57euvf/H4htuG+w1we37QtnkqkOIWvFMB/0+/+9sxbvfb+/3243ZjJgFHMUdvEIjwT2ETqgNmJIF67cek6SrttYUIWP7zD/r/7eWfvBt1oFIkLZ8Tsij1cidQvQgXqLC4Fj5JVAZhmI5zzk4TPfUVMwYD5zlqQlLVcKbRwpCJ6O3tLY5eJOfb9NcePDijZVg0Q9SxRcmLlkqrhtlyxxT9dTPr8qnqDEzBjYZNH3KfcycSdkBtyGY6o83LJjIY85n9i479oWb3+91JWGRz2g9Vg4wbZICIZZtzfuyPQ93Bh/oPb+/j9g4WsI2bg33Xx/RjexOzQ50gwrKRG0fOCd/NpprxENnYaRoOEiNWj95CHKkKOGwe8yBFpgUTs89IThCo2mFmTkblZxrENxnQww1M2zbkOCzQksMwmHtidQwRuEdZ5ybj+fGY04RvH7fpLCw/gN6n2lSaCgxmupkebnOM8VDXTB5DpLMe82m6s4hPBTkzz0O/OcYYQ+7HsaumBdIGsBHGbUzfjyPsCgBwVTsOkptb5HCQWeyzgikSVCOOZ2aGNGMM4hOInhRE042chHiqbyOawpE7nnO/CYNI5RjE5lNJb2OL7CQRMXMzTFUi2u4jifi2qadNFWrecRxR1XWjjaDmNg9joe3+NsZg4huLHyaGQ+euKmzw5xBe0i+hao+nb9u4bbfjeIgPFtdBxzFVJwDVCWzTtGuYZBOwquqN36bp4/Fhpl++bGR0TDdn2cZ0G7cbBT6N+z4/Nt7etx/nsQuzEGAHk7wNtvuXr18P49vHJPcxnZzww/t97t8O3nbYjdRFd36aKJzmDnZhHvvO7+9yv78/Hg8WM9MNG8+dyebc1WXcNz3UCfftbioKutEgp2PqY+rufvPguewsLsNlmCoOPfRxv93uRuOwcR/OeLgfGU9vTSPV6HLFk4WrbFG8pKv4Fsbki50cdwZCNgAy0iyVibNCzXTaHFjZq/KU6L9iADDGcCefyrKZuRptcqMNUYMj24BrgpA4IvrQaSrhA2MBb3w8v02nL7cvbjvBmdxtAgCPgFPdtmGqksa8EdG+7zcZu+4f+4fINm5iZofu4RU7dIIH0zB3UwvMDBnVkTNYqoGIQtmPM0dlJ7PAGcxEuBOpm3n2DoYRppuAnOSxTwP7uM05bbf39/uv376pkT/sl+cfQ67oRBSE8Pb2dXd3ut1/oOo/PljmfgyWEG63twT5fDweEWMMmaTuM0ysmR5HZt5pv91ut9uNmd/f39ucq7xQQvZaCr0wnZRzzmN/EgVStBuR3MhMn/Opfgg2O3uKTmYeuYbR0C/9J+50H/c5p9BHaIBvg+d+CH2JRhuP568kEzJ3PIbf1NxZ+Pbu9PHYn0NuCEWTiYmiSZeQj7Hdt8Hk87kT+U0GJm1Et7GZzZ+f+1/6D/b2xY9tRhLBNoycxJ7+6+03bLcn5v14Mvx2HOpQEUrPjgwzVjV3I4IIsYDIdZ9cObFQo1B8nQmkdrg7gQTl9yHmcbPMDvUwrc3gicIqRFB11YPKJSyDbQFdC1uslWakU+mEZGMRLpN1mrrO8+Qu2d2ny0zzcKJansb9uqsvzc0HMShq/JJ9MAUekLlbdD5L/NcJIsrCjZFudXcHsXARVXQsDNtDs9VlMqlOOVa1RflO1YpPm62mLOv6RAU6M3W4NY0ZPYhobIIThfWsVwSRzbkahG08D7mtVUBg6JxqVX9b2l4yOmY7a3twGjbmY5yRQKBQXoiqJVoZk/EwRxU0+mpXkMMLeGztDwRAxkhI0vJ2sQgx2/MJZsDboAIRzG1OTg2UhEgVqupqCetOFIEQJzXOSpaYT4KuVXmkQOywG28GmJvQmKpMg8SZhUiCpDyaWxHdKMYJgxOdrrGU7hG1Cwqi0+qTpX8VR3AvirGnahvMZDp3v917O0RkhLhUMyjYGQwJmBAQCEJ+3ACEuxojkvrM6IBPJyYTMLFLObOI/ffhgHcaDgY2AwPbkZG7EealI60NAoTg/iT9VcYT+Ir5M/AB/Xj79st2HENuvssvP//62//48fvf27ddiXge7k+QbJg36M6HwYm+/QFhd6kCph8ff//3fw/+6ce/+Kv3dwc5jg/IG3iQMeROIOSxClpSdQX9ZcYkQOphsQTI6ZmTuvq3JsYGyMQbNvavkK+g336zP4j81Y4vjJ83+SoP+1HesTNuG67XJ3Ttf+zFfzoIWUaSRKFW6CdMR9aLQbM7E1Hh1qZWTAim6gJS/cYgYgIs2pTFWVObwXgHMdgiWwDADK2GIpOYneLMO9Ogys3RY7o7UQBk2nJyM3rPZ6keGRkVVjYcQPbIReghDubsLx1ss5W31O7OCNxpE65mYTuqgm2yZ3iffdQXRSCY0aghUuosSpeZaERzwy4xRABgHkd2Myxs6xiimnmlx4R/+nnsZvPxeNzpLQQlM2/bFhmPbY+u3vQY+vP54WfJB81Z6UyVvhhpUCEJpFo2m5krhJ2ZB7HyUNtbqPhytYxs6RILN5Ylq+4Pc9/39/d3EZFB/ai4bd+fZiYyPLQ3p+g4174yzkJq6Y4anHGzdsupmfFSa9eD6fH04C0a3QJRgRaf78fRQ6oP2zPaKRYpdlYp4tH3zyrgXRczPZ9POA/J/F0zDgt2owSBj9R4zvvLrRj/142VzLNUd7ExUpcuiVILUiS0oEcyn6AytiK5N4AEECAQ/a1YpaAa8RFJgFQe4qBeIR5jRCVRehbH0Ojf4e44z1Lotdu2OaYeu5ne74NAx3y6u6k6AtLIVVXntMr98zK0CFKQVBkjFZEoJPDLFYFcrRW9oK5x1d70HF9YIRV2hSyQxJwIvaerGFfnU1HIGTRYecpFD1uKiPrO9bZ1JFjUqSbLU4ezLCSztKbMnRAJ1QsvizVx/344hZYxF8WeieUvdxIFmr1Vmv75Xa5YWFCgXQtpOgBFwliq1PoV3jdVX7U+qgCi0LSadnovZqpcy+AdHkyVmR+Pjw5khTIdGMu1ItbPWc87FsQOqiau64FqOnsbSySW2SsBNTyR005mnotQfep8iXyKyJzTAL52Y4OzEYygcCZSRJg4m/4KE7GQyZwTpsxsBCEmyk6Ptlwi0g0bzvHDI3bHS+60Q+ecmV7grm4+iWBc+HBTj6FWbs4QGkclUMgqhlQ13G7LGQl3Cs05nUIAcEa4KwUj8i9eCaN+bYnW24RF0rVEeDwefX7ju2HS3DfpwRT10gsNRGgubuukhpfjoAXS1hRu2YJ8A0DskbXdJNQsNwYf7ZaIMrV2GQyoVmRdATJ3cphRycHVAAv6QdU45aOCUZw22imSAFA2GQdVXAspTbQXGcWaorCz2Xuvv+sF5KyjeViYxivDqezWZpW9cfHFdg/lVlaqbfyVyyDsXKRc0mtYD0RYEA3gjuvI+wyug1uH6mYvESB3J3ceA8zkPuLmgu1pWYLlXPcGrPTf3LuW9/JeGU0nF/pf2ez6eW3sSUPxpwYORakWKT+X57xO+ZMygGjt1dtdy+gWAe0QYQjZHAvaqbuqWoSMs4ABXSyLbq8CqIM9An0UQ1YOCxMztR3nrC9VI3EiwyD4xPFII+vjV91V1aH27evjt7/949/97R/+9m//dh4f7jrk/vFtfvny5c/+7M++/PC+vb/jlx0//hmAP/77f//v/t2/JaK//uu//umnv3rotskNxBk6ZoEMyACkiCW22gASGtBZWfg+QA5MN3MEXvNqECrM4QwRYDAYB/wAdn38uj+/8m+YTFVnxJB4CIhh/mKv/dNag/+0F9FrzNTRdoEDhFMvZo0Ic9zmZz/VEqRMldgSIAv+qbQKfS5KRqOzri5i4kxHYsc2pMVJC0f/lKS5ShxakDuaL1HlcrfoiX91n2F3xkQaU8MsOqyd4HzxwNGpaCIyKJm7oarOPqmARGQLi2FmVZ9zSnikqvWAoMBLb4POi8OMFJFffvklShctnFI+Q4Ok2jNDpoyGhbltmy6Z8fk4TxykNi14SaJVVVQuxEIl4e7XxIOp1VE7WtDG0lOBrBBRhKHMTC0bGQOoFihAmoVhnNQzqwwjjYQalburu1R+FFgA0uhjaq6ZA+lP3yMkY+6qp0DSqfXqXIfiaXzKayby0zCgBBpI8RS+ScsSRDiQQl1NVYfLSuhUCs2J2BH2lWfNd4WP0s1Mp0LsfoUF0rNT5Tmw9ciVYn0mqBBRQPWsh6GPT6QqxWGjSmbQBMBA+kQRDoXItj+3+2QZ7mMMM98fGp0IED4VRjj5t02YObuxLcY88UpU5O5UmXjm6wE2XPqhXYSce+WjyshEWT0FXi8OZbHAKRrP1bCTZRARV9PoltmExQxaJO76ihe+tt584ZJ1LfrE+es6jPy6B4hcGmyUZ5OISEpmN3msW8xrH0KisWjAfSct2i0XIgKXehomh4jwiDzkfgVwnVG8vsd82qsE4ESK10IEWZbHUVF9LBZ17NRp9RXNxBP2/Xkuu7u7V1Qz5NDrHnnRaudYLkfAmw9LdVwYpeE01kukoCTlfErxbRiY6YbZbqk0J+xKFcxsRW8rMeR5XAoC+4gl9bub0Ur/IhKFo+tzzO1kwle6AmBLLqvBEnEYdhzHkIBpjdRcUz2YkS0SyznuMNWDKGon1qYOTGBVRXLslXMyldd1Je8QLg5f5+tlsL0Qas+3fQrr3mkDei8HAVeMASJxn0VsF4n83WO7fjh1Z2ZagmP9/H5jfYsAWov98i1cavTLFSf0kz60XJeqSOTsThrm0iXiLACoOqZTZHfywnqU3C1TRj+bEHRmda7zrVN+WeSXRaNz3wmfyA9tgfQbv8cSz/ubi9TP6xrKnx7G53khGBro5U/unlm7PQA/xfy6Zz2p6Hy1bFaSWb+sqctrR/K716FKqR39aTB6KqN6rRp/mWB8xVJ7vrz380FYJ2tmfDtVKQ40I3diYiandA17P5AIQ8iM3JCx9wEmIag7XEFwF0CJpJhvdW52gCWSDB0QDIcx4G4Moki5AoCJYwd2+BP6FfMJMTD5w/7wu58f346Pb/t/+rvff/318e3b/vu///j56+/n3H/+ef7bf/t327b9+Z//+Z//xU9/8Rd/8Zvf/OY49v/4H//j3/7t37693f7Vv/pXHx/0fH79F3/9rwYP2IDZ49svj+cfnrs+jukYYBljjNt2v9/v99vb2xuPgcBTZQbFvHgjbATzI1bEYu0Bw3S448cBG/Qr8AR9QJ/Ht9/b8W2jL+4d/gmYMX5Ji/5nYg3+KTYItADvWtBrfjhAJaNlRHdKhxqBnM/elBy+qgLojlSCOdW/+1Y/lb9+qS9Aymap9fX9L2yKl/DAC9PoG6jNlvIX89J2gpbmunJj1bm6++M5UUvVC4gSYWN9WZzh0P4PZir0l3XFqeq7wigKPex2u2VdRA2Ou+TRzvm8GAAhJZn5lDtEUdgSAifFLZPDbvdtmqsqOJNt1lL+Hmezs8yJIgsHea+/FnJ9rNcYA2RTE+a4KjtB0dIbNsZwC/FM7gElGn73UgRrUpwpOmoUHCR6Fo9Nt7Hpvu8BDHPGR2J3iLT8iPk53OGmk72dr2FeIsBjiARwgAGzipy3nlwbzIviMc2iipo9VecI8IqbKYxP5xznf0bu5IrINGXnQSPi8QCIhD30MREpXdPPjRhy05k1hJQJhJWscjWH4lKgUmjqEF5DB73X8bmYnXgkftp4lrHu6Kt2BGKYuxGNMYYUVTjMqrbN3VUdnwJEIqIGgCJkgVNxyenIYvcu5/90CzWpZ6jB1f0WAYd2owYqRx6Zqs1oWqVzXy554VFKapnkcOlTz8wv4GMAeNHsuaqW+1u29Dn175RPXK7rvC4f5p9qlwOPNjQOrSYykRZuBQT3wsrj7bzobf0uoQsK/PqnVAKWWs1YooSZWSpDynartK6rk9vdp1ty9JIkNV/e92fwnGY1zEyUgf9eQyvKbA7TE/SsTz6/7mamiJJUjwimaZO9XcGH2vJcFyF4dRTFxZP1+UBw8uYnZu7+9etXHtIOQpyGdyaZq6piBhXGIeKISl/W4TTC1z3yFHXhPr+0SjdTRzYXiiBVe5fWffdA7+imtewOYWL2PBvLawMJjCJNwyjfKwmDT9RtPyzMDS3LM2zZPqppvYdqysxYAqqAjY2Dgl4OhF8DU76wbpToCQ9FJ9e4+/1+X7e1bzueHy9E6I7ofLseeYDDJvQq6m7R8HoWrp90xLLpav23j4+UMX+BIlkev2p/ecBBcHRNqfvF29VemHXhsJjKcWr6dfLiB1n4zDq1c++40UQBogihuJ+RNyzlvsgTeqmBj7cInX0q6ZrssC7juZ5L/8bz4miHV0YIzgMDfzVQL9+7bqK726c2G+dolwXpQdqCUhvzPH++GlQ9VFo5Sbu/SyThanbqrK6/uCxIsDCgGuEtX+m/rq/OrW6rH7HepyHq18WPzDVf7MMIMGy+ehki2kNAJHkHHIu7a/XYja4WIMqOHyBzCyxegwlFSSOPOFwAiBSgynsiYBKY4WnAetFQIQQChm9/PI5vzLvbk/3JrNj9288fv/tPP//8+19//7tf/93/8h/nQUPuP//8/Ls/fp1z//nnpNL39/f39/tPP/10v9+n7r/97W9V9X/6n/7Ht7f588+/n3P+8vMO4OMxf/eHP/7x52+PXZ3YMUwB4VBOxhjbJvf7fYzx05/9i5/+/C//5m/+5i//8i/p7QvK5I44OMyE0jbOZcfvCDv2P4AVz19h+vUP/9HnV8I9nc15sAmdTZ5U8s/CGuyrmPl3LndHKUWWaHGR1GIMR5SsRJ3pJ3cMC7ePKU7rZ/2El0wN12rjdB4Hbo9z0fOI9FQB+ZJJt47Z1oSCZUjtMG3psI5hVZJTDg7B9eSWknyWxjRPNrPEnm6lJORHqBeqp67QGklH5FBHVIS2bbvdbjlEpshd90wqsRaNQFq0oVcFsl2p/kNV3ZX5BsCQnf2Y2RfCI1rrlS9aly95UKu8pMUwylcLQGY+HQY6vdphdwRMXft9hYdyjH+/bW9y2wDMafueqaqanDRLMz0bY5K7q9koFJXDtDImAJxJC7aiyhKDQUaATzNVR2YSS/K5edGe21nvfuqjnnU3qRAaXXQX/55wyj+YzTnfPKuVYK5QKZmRBBpNo6dGx20FyY2IyA2WvcdT9xeR49A8irVNIhLdIJrvu3sgdHIV/eeQAuDtSverliDCnHVQr0aCmU3ddU4WBJZ9NRUk6TrgilK6+3EcpLMBPDyaL+1RI1d6aL5XuoosU1XPhdWV1L0qXlbe0TuwIliuqYDNMtbp+Jl7XGocEDpuX+uBl/7Csr+lDvsnJe2a8Pk9a3Bd/F60Hu1nclpeeuoH372n12RdIgAM0s/K01LUf9E86IQJXSM2Vq1BgrnmJ04VCIrexuXBc1c4FC8IY/32jg1a4aGH1ut0kmUv1BriwAIognCm5DVsajRf8cqChlrbtPFF9bNBbe8REd1uN/qUGeHuNAKg5WLzE5G6UQmMXnMiAok7zMMghBgvE7fwx5GfO/WnyAMVwzSL7E1gIfLlu0yFKAt4FAt58UUBNaw0nIytxi+rVzWOoxMyjJs9h2jf96mHZ7Hc2a9yGSpY2nV4ji1eEZplDTUEn5vOF9Oi+I9Efm+7ouJPjXa7CqCwD4vdWZNQU35Tcq3YWRmyzDpSWppaTuBHfM+0aNrjxav7cmBfJpUG20Js5wjHmVBz/kC0OYGi9q+S8wJ5rx798qIQoNQvrX2lNqUAATvXJ9fG9DVUNr2URHZ+77IOF5Sd8A6s6+O1Hy8j7Lesb1znflmx84mGZQpAB+vIPZHvLw8HXjwN69Z85sNE1a+iBhO5A7wNjwNUhmjPl2s8RCf0dvPkz89/4fDfnWYMoz+kUvP6RYth+Z2p5bdAVI4/XLGRXqe8DCBY3HEczerPG8A+p3MgTECRy02UNZ9ORDTgagqDGojHBjLAAOFw4wbKKIWFwIADkyBAhM7V3QmS+txUn1NVx+Pnb19/UdsZ8+PbH4/5QVCoPR6PP/z9119/ef7x94/f/f23j28Kl9/97g/z5ma8H4kj8njuv/3d4/13e7R0Po5ju8n//h9+/rv/9G3fdxHx+TNA87Bvj+djn6Aht/uQ28dzLyYTyoZHq9v3H/7ir//6r//Nv/k3f/M3f/Pnf/7nP/3005//+Z+PH3/Exmm+egi55OlEP2P/OH7+7YZ5fP2FQX/87f+K44NsZ9OICSpoRr2iDPwzMwUJ5OHxBlAd6lNARDyw7kymFpGLT3LMVSMGSOaRd4h0g1GESQNodIzhmkZUEzpdsjkIn0gXXah/6nhC5F3vc07netvLwemJWF1efvD401o1E+JYdYbPdzFzfH1CjSevQeWyHWMcx3GYRr6cbCPDHeViCeUj+mbEz1aScvVleqXNh8BjzMik81JxlsVyAMRgiqpLUlWlaqRYDCxk9HEc6p5ZKDWeMYYVAPcL40g9m8+x5Woik0XNrHq7qbve73fVtQYjpmaRPaUOs/24HTS21vvdwEJGiXlIPEL0U5RQe6hYu6pGTRpXzYyRy5L4e1FZiPzauYGHBOKZVtOOXPMzzzlHGwRC5aoMMdd0Scws5/ofx3HbZIhUmgerHqrqX269p+vwTgGw0CUAt+hHGQHhzKtUVjr1zji3AJyZ55xjjOXcxrkSryZRXj4IIuLqMFZrZUXl2pldWKypXF5PmCYhHsyRB5cBQ79x5V7WPNjM2J14UBrPFlAT8dd4IzMHnn4tDmpU6ZIJwMxVCw90xq5i7YNNZM0I+nBe2Md5Z6V4LH8lcmagqi8/Q4uFhD7B7/rt/poZ34uG19Y3y/6+POTzDVeeZVaJwe4eHQgqh+IkCfPo0aKqZuxXgzDJvme+cMbPPBelSq769/qVjC0vKaOlyqanA4AnmH9MJBTfV+VMZFzVSOsDsm4ZkDnM651VfBRhwE4CTTaCLNtWd2fPelevzorkJ1H1Mnb+dm9i83dJAEjrm4kITFT0+bLUkbhlBjcCmXN6QI/jIBp9j2vr5d/R9uKAtHRZ/0REoEG6dC/UntFJD/0VERFiMFnWAHvB+uR3A9aBTNx9mrJzBEqP4/j69evzcQ8gX51TVVVZ1TXZ6cVvEie01sEIDPdIECEis2mmZiYup1do4UXrEVjPgunhzp69KzxKuoj8OLqfavTLCYuGbmO8HMlaTFxZ31n71xxjPQWr1ffytJV4mp4vpLvELU9zqk6QJRgsfdr5HAEoy7ZK088yNiq8iT778ctpOBFRga6Ynui1Hj6zcsX3SHCywQsf6z/FuXvhY/Hr2LbVddg/uZ5vWdeTOEOO69ubfby8IFaJ/WIQIlc7clLcu3auD6wnvpcvFz7ld3grTssxOV8+RmaNEkWRTa+A409o7r2C7q0hLHO/8LTA03amM/RqqRusoocckU52iralPVLPIrhN72Wc8Bd+HtdR/dkabyb219B9Cy+Il6rqTlyygIREGMwB4JQCmZ2iT7krMCI9nKJLRQFoalYJRoTQOVKx3EBMIPiEA2o4Dns+53HMX37ndrzdhh7z1z/8/tdffzadHx8fv/7y8bvf//Lt6/GH33/9/e8+Ph5mRn/84/P9L39w9+NgM7vfb3PK4/FNdYqI+TTzY+L5/MO+78/nk5kx/3C/v2/3G5McavvxoT9/0yUdZpOs+LFBQvzVoMfHH373d//P2+39/f2v/uqv/m//5n/4l//yX/75X/x0G2O7iYywIdXn4e7yw6/z8e3442/dj29//GXb7t/++J82mmwT5eMO/gimpVvjP78rM9AJEGAibcXXK1MdKLuJUimo7eZw8tO1RuSHZtxrAUg2M1kdcHS6lSFCFqnFi3ZkoHH2hmkGzp484cVxuZ6FFxFjS+TQyxALVwJwsrJ2zvIiu+KT6PXQxwrL4SIKgLgLb/XwLYVlk7Nikqx0x9GKIwcoAJvN4zi2+y1XpARwrCPLaUwCp+W2DiXs2kCy4UrSC7+vuVI1OIogUiwHVb8p9wWfis/nxwJhESRpxFbNrXuiva0O3fKyZRKRiCTAHWeL52DjZoGwHenoBS/mICIRMiPtTNlw6IY4F+YT5TVYNmsU8p27DjOYwaLIirCERryETBBlAreEyKFL1U2LGWTtIDrhLtAAgHYel1ToZfG6o2sqZIGBjtweqlKWCvZGlvkRShvTjN70vbyU+ToJ/FCEexK6V7OhoOYcTPYb5NV4Lu+4hOaUgoeLMNirfKEKzEDOkRiZWVjsZBkB0LvItm2YYcraGMLZUXCCBxI1ZGxVo1Uc0gEwDWYLO391VxOMmaPc1GAxi14Kd+clzzPorc7Ci7+oNOzMVY5gCItIJ06TXxhEX0TUW7De0Pv4IoBfJPf5w5/wlF/43eUhFz2kY+KxEUFV/Xx3dzWIE10G0JTzWVdYP3kZc0+wP2+aCeQI8rPVGJVeYli5HwrvL7zFBqRq+jJTlCEEen1v/+vtlpJLz9m+mbwLU1vrpX5ysqzbrb9SrpAEfVl3ii7BJQfYPWHN0inqbpqqODMHloy7d5IqEUWeVQOYxzq1TgBkG/Sr++/VzkSekdnUDhSU0hI/yAZj+eoodwmxRMzdXJral5vMi0AQEc5aKDd3iGxDtnk8v3177PskEvdAyWKA3djBcCEM4Rv+xBWnTNMs8Wpe6ptwK82r29HMVmzeXopwJ1ubZLUHDfWDelyuudWZ5wsdujsV9FQ7yKRQ+xeBeFLL50O9zs6vjideIjNYDl0bZsGmr+vzHTcTiyCNEBCRE7x6mwCnmvUymha0oEzHemE77k5O3S/s83n3BVIvCCQZ6AladmEOFMrDeopLE8yHM1/+euWl/UHWmyAC2xffhwYC+tUhRQt/iHY++errkVm24Dsbh9xr96s/xj368Cbuji/vOutiXnh1GeS0nOI4mZ+3AK1iLi9dN5Q9M/JP8jt9pqcq+jojAEAG2P0kyPXOOonpmeWqIR8jMyx6ZXMkCWBB4BH4iYGvk45Cro0mGgQKqsoJBo5CEXdgTIQp6A4yeGR5MaZiOmxi6vF47o/Hvu+3/Tl1x5SPj6+//PHXr1+/fnx8/N1/+u3f//3vvn09fv22//Lz/vPPD/hGfOPt7bG7iDAwp/OAun88pzo/n7/e327v7+/7tJ9/98c59/v9vm38w9tvFL5/7PtzPvYjsL5Etvf3u7szbCcSoSGxgNjModOO/YPoD+6//dv//X/9t//zj1/e/8f/4V9/+fL202++fPnh7SYsg5jBzD/95eHzY//ljyA/fv2Z7l/mx9ebiBigNuecZmAiYRdS+D+7vhPfN/zOnNiXT9bMZ09INAM5RW2Eo7XS+K/Jr3pGXLzSWI5MHT1ZX03ly7PZHc3Xr5Rb5MpqsJwy6tS/umG9uQyZClos3uG4YSyO7PXz/noPJt410v41HT7yz8JEUNVp6mrE0Rcu5cd2v5GwulGpKe6ubuujzeasRvAcNoMm+On6bwkdYqFQGdzVTIh8DDGDs5tZ2IfbtlmZOvEiIYaMg47lvc06z/3gqtGMqow9Xa1pXMXn27apaisfvdwi4v4UEZIRWn6bjpHJsnI8Ol/qRA4QS2AJxHOs+FgRq7PDV4UyZwFXP60LjipKD8BLDle9L5WpzbJXfnpSAKG10SJyjVaBIsJMehxhyo7tvm0bcDagb04txFOPU94kZZmdGcQjA1aQKFlkjJZkLZwAiFA4HZnZNHYNCiWiyKOtrbyKq1rlHtt6qNxTLplFF4bTU7LoQGe+Ydzq03VX+UHu93d1+nj+6q7bducCyGVmq4pBDjxoPxsJLhvkIqJa9qcQICyY6qpqZAZyXPBFE6mMQ0RdfBaBJUfkeIFTS2P+lH/ndtupEvbSiQiZO59mdr/lhUh6m5r7vLx3ZVK9tlFCjLJGVv5FBPZAhUnTV9Jz78ycaLyUBSQ9gDZCTtqzky2uJ/rCka+qZP/pvBzr8LCc7nRmJwUTwhhcIwYVp3SCqdV25/rHZUtmWp84Wji1qkLXKVCYDMEMOg02zsig6hpUOKK4tgHsBONRkaX+a07/GikFMgqWnv5gC4iWNd5S5By/EvFZuuPuQBvVDKZImGn2QtckgsiviVyDYAiHTkY5Osolkjn56aQSAO2Wra3KzHNmFuaZls/pQHRmm4c7M91YaIybuexP1UmEQRhuBBf4CEgFIma6idzc1M9z1EEMIsI8ha7n2cx9ObM9V5pvWOl1F1Zeh0UY9bY2b8n12TNnmJ1ZTuKPBUQ5yLLtLZCOzoxCc1MFsPWWrYeoGxyfls+Lq4UIzkwZ35tzMlNqi5Subyw1YCtULwAepw1QbNY9atu6oL08YjEolAtyZTUSZaU1gR5e27ovusu62vHxyX3yNlzuP46m5CYzAEl+XNVo9TWrA3IykjgF2QOqYqp1f/pM0wOQWKkAzrqOGmV6eSuOCoAWsKheycu3lp/b+ZI0uSApEBGExXM6VHFILEk0KNwqEHk0/4kvOtaX9kh2m76oobnpRNrwDR57itrivO3FhsykBktW35zZyYIGKWPppd2N7ZxUvJQJzNbadpdVh/MzW0rUyljC/wQ3M+NcBHaKamNXNwjxkj8ZrJ4RfTxcyT1LQB04DMec+zGPw459fz73j8e+73/4w68///KH4zh+/fj197//nbr9/PPP//P/598CPJUez/nt2zxy/XTCP75+vd/v27apBey5HWps6oSPx3NGwoj72/sPxP67P/x+f2eiSKwlJ5lQM5u2q0+YO3Rjut/vfN/CZH48HnPO2x5QUvZkfn58+/pH+Q//27/7zY9f/sVf/sWf/dkPP/7w/uXL229++HK/3wfvYtO+PiCkz92x6WMf9zdh1sPUDjPQDTwI7Ar9Z2gQAsDSRjIugjjm5/tKIUQ0c/R0EAfskFUg5Kz9jpKyEEnB5VdetF4lAgIShLDklcAxo13NgkXskel9HpxXJJR1zH0AW470bas12Ld5g2LYyfnjk8AROI4jXXh1xdezhE9A6uZmIqSq71/eQ597fjwsIcJ5zhmmRSMr5FYsQJq9joNYxnYfm+Noq1T1ELmJCGDbJiKbiHB0inYTkej9ENVcxzyEJJpoPZ/P+/3+fD7nVfbs+x4Pj0nOeXSXgtvtFk3k7/d752WJyO126+KN+FMkJ6hqZC1yZdBxZdLu++5q7+/v3W+dmVns66/fvnz5crtvke39drs/Hg83jx7KPKKB0gxqjVGp2iDyWqvQ6jr8ZQg5WuTCDGCfh9kJvifRQWFR4t1dzW63N/cZ3bNN3fxwJhZ2nRGcc5tmBJa4/9u3b7dNOCo6UvCnjAlfADNTmND7DkBEwmVuALlv9xtHA1ORt7e3Tcaccx4J/Y9UcVKbCVJT1W/fvplIlD+lIyKNq2rW5DwGlcqCMWTf9znn29ubu0d/7W3bnvtuZts2iGiqARgisWvHMaN7O2DHcZgxEdlh7+/vb29vRHwcx8e3p5ndbjeA9+cx9cCcYwwzRDLJ2ATAnAfLbds2QrVhnJP4du5XKQ2hrgVkyPP5DKN33z+Yec55u9/G2Bz6eDyYadtucMhge6RFrapEXqqbBsm9/fAlaB7szhwYwe28OHRGw4N2RKRnczHbzAwaNb3MSxVT86NX3Wj5t9nEGlVozcAaMmp5WohnGs4gGQPgOaewiMgxZxtILpyKOOCuASE8xphz2jx5egwjWvAFE9j3/TiOb/6t8TkosyBcmM3s/f2diG632x/+cMRCAdi2TY95+/JmH49t26BmHihhU52IBxM7k6qmBSIjuLS5Rwu1SLSemLT4m7Qy1fd9dz+1T+ZEqxtjYxaiqaqBSJbz4ktHgYYsyhDQAqqE6DEAokoUjOn0XuCqHK/rFv/asoNtvq6bC0DhakqW/q8UR2GZdG4YCREi2YQ8d/ZFIKkeYwxUnEGqpSGRMEGPY1d/t/A56r7v3759e7sz06gRkisOPVxlzsk8otgShZmx70e88TkPADIILOw2d7tv9+dj3t+GGY9xH+ONaDuOnWkbcocPs92N4WLKhBmEqqogOzk/WF3cPbBqAnwRkVp/25j5OI4WCkT0fD658IR6SUPuvOxF/KnRR1czTzXQmvPzXFIyVOP1IK14afCZNYOuH45qdxGCGEuTEr/SQKST9A3MLDSwUNS43fKtDW97xSSQUOa7n1ODmrCkhWNugUPRGlE8gABAqwYsDFNgccnH17mbdIWpkWSZUxAJE7XhkYqWo9fWqirljFgEIvp89hR6ACCCdXtAX88UyoyhRWfqP6HSwnNaRAiIQvdM9/A0uojpzCopbimgbh/STxZmGSPaq64voqsCum59zI63De42p1XrHZhr9WnkwjcKEL/someG04APgzCZWzOTYggSCtI6JDMT5vBDdtTaZtbJBMxVZOW07pffAkUavC5YR6caU0w1Zoli4gABAABJREFUWMf/l7o/65IlSdIDsU9EVM3cI+7NrQu9AEMSywF5SA7ngQ/kGx/Ifw/MDDjTQLOW7urOWjKzsnK5S0S4m6mKCB9EVU3dI4uzEOeg207VzQgPd3M1VdmXT4bN08nByYzTUStkbYIokwgkRmgSXB3ouGUtX92Kw00JIDJnZ85mbqZsDHSIexGCA1p2XZcTSFAqmFANwLvvv9teLqZ6fX5ZkjDz04f333/13e+++urHdz8syyKZPzw9fffD99diRHTda6nAuuRMpWLbrgqXRA51cEq8bRdVddfLpbg7C1BjpJ6/XD6EBX6tSkRqJcCumNkIXvVyveZFElOt9bJv5z2fTqeUpJowl2u0uEc5mmsUSLx/9+4PX339yaePX3z+KTM+//Ttv/yX/7Jey0koQx2uV3//8uQVlGnbNlJlZrUNQEqiWv/xzaUHOoz6EDNxqWmMESGKPnn1qCeZXC3vv4bYnSLfE5LYMEn8SLPEP0GfEZALsZNznigvqmk0BOBgW+IpA9Tn0QPwHu1FDwhSLzIaTGEdomKEw4YfuO97tK+PP4W+OOV1BDTvxFcDO7m1JVLwXkhw6nbkrrUZ69PANFWtrtxD18Majl6LkTGYOdzdWTIzM4lB0asrm5flu1ly95yJmYnVXGPIL6gNpIqOqZS4gad7S7Rw734MU6ljPFAYl5jCbFEyi3vp0348jINOGGOLmwMW2sibus1tTq4DSFmopwsA1Lq7q6OB2glx9MB1+6/2EUBBrUyIk54SX5MqutnDoLxmfESVUdji3uGSuc9DMyIyarqYG9pro/rYriQ0BglGgK0VlsZUD2GRZIaqxZ0oJSFmCvuLnQRmPabJInlZIqYgIpRTG8jhTik1i9y5FVvGMlhiKNbhS4Ttfr1e+5G1EM7giiBuZt62fVgGY2eaZRBA8O4poKhZ6jFnjlNiogZaw232JmLkcZCxMHsvzY1JFRSwar1/6TiUhuEhMe93NhUiZeruRK2w27xKQ5yH9bh4nGPOOeda6tHyqlpqrTzpvynKagGhdKykZ6uGcWD9pA+xFht+jFe6R56Yr3iGu89Sjxwf4uw2cj8+Pl7nOabrGG2r5BNcezMOG/LHECZNNpkbHSEx71myuILBfSp48F6gHjYEAOYD9CXEkjDnnNNySmpq2ErsVXsuB8YowlAATUQGnoq3mRbkc1l1mFU3dl7bhLs856v+xtA+5jrhY41dOmLzTR/cdv3N5/X6xZvXJ2+h7V3ffpp8+0Ze1M6rfdYoZVao1QPxKCiul3YzyFudtnmv0Wr4Im2WXatzJ29h0cP29SkKNm1Um3IOsxieYj+VEo9tJyNmdmLhhYjcVSteni/v333Usgmv2/ViVk4PlNKaRKxWkTXJWsoVQCSrA/mLKAEMJyY+ihQQBoTK5PI1aul0OCq6xz7fHfTdz94N6BtPoJfKj8gLgOGOzhcdGUufN4Q7OCpu00fjS+dbEfmolfCoIobO396Aa2LNcVIIp+k4iM43mCDdp5RcEJK0ZoqD671/vC8FkS3j1vbajvb2eaOYeuz/sd23x4FbXqOp+N7NqEVXj/7GtmD3PsbpEHFDqsS7ZgeJ6LbotD8WxVbech/cyUCSA1tsCDoA1Y/6ruNJ+4aP0zm+Ik58ejO6Ptq3LUwUHbWyROBmEd7LyQO/6f7+1H+4u1KSQ7Z3um3O8E/lMyMiT71kifs1vsjMoi+60UM+yoJmrlnXlW7pELe8NubetzMNdHd3EDk3gHwwiBPcW8MKkTNTG5fdJ4QR4SgfIldLcpLFYIZaoUAWmH/443cf372vpZTr9vH9h5zl7Zs3wvzh43652suLfXh+AtNle3m61l25mNXi1aIEjIpqgRoZmbtp/OhTJmovVzERG5QSWVbfiwGotcYw6pyziEDoei1eHSmZOcGuO6lfmJnlTEQiNloLAcCV3Zjgri/b9cf3H13L6bR888cf/8UXbz95WP/s7frpw7LvVR1aiSQzCU34TDf/+ad0RcF/sAYTmTNL9yEbRwmotYwa0IO7R5R3Yph+mZlVFZEhZYLUiRmmmGi19W44BrC2u6ObN0zQHgS/s2fu7brJSJg5xfo13j84ZXzdsHJnJiIium1SiH9bhrCBn3KTsOFdjLCiHDM0LPawRXEi6z3Z7t2bOkRq4CyOlc3KzN3NKlEOmz6UpcPCVGoViIB7JE92DhByYgYJCI4IbPt0KtQ7LlqZDfu4czjQu7ZshogQJWa476oa3gszcwuDtLOXxCD3CJazJ5JaRXX3KXsrQrXWrRgACeDQyN83u9fdlYE6RyLd0DqC0nxOuG36d/cY5hauv0UBSLMhWhKApmioOxn1QRTkIbvDsQnt2Sgv0ZgG4aoxKsDUAfWcUs7UawJFRFLilFSVmNjZo0LYht3Apaj7HoYOYopD764REYBtckg6fVrV3RSmYXe3EilmVG3zP6h7HdynQcZo75HvDQcgpQygmAbRLrJGaUmywytum6kgZpGc89rH1gcyLTGzg60PZYvDtY642PmHmZO6R6XrJHGOy5tpchOJ6XhqTS0RETNJF9h3TDszC1qNk3oPZfX7E8hGdy5NvhP1nkmfyoRiOXNH0J+6/FbAAbjL2k0PjiFEbi2Gm9g2EQlDhKwa0CYZzZ8aIm9e8BHq7qtiEDnCJbszULRPELkTL/PGAkgpLctCanvpOY1ejelMI53R0GU6foMRXBWKlKMKi2iy7Vz59YbMphWNXMStJefugcTZsvevkDDGWcxgG/NNxhvmI0MkNqc3DDWgqnPl3qQejl8N4F6m6ybu7gS3mwP12/v3H1Q1SLGT61g/XLpH4VEfHj36Ik0cuUe5D7WZQ8bMg3OjdDGyYoOinGme+Kpq3Ox4NsPp9PDwF59//bsftm17eeZt2zjhctlenq+XyxaDFZiZhXq/ZfNXRYQpoRXa9cR4l5Odc5tMSCn51K1xrO32KA+GfeWboRnBNxWn7ayn8tT+8cbrr+/Mr5BUbkliSN/BIJMd0F2wkDZRLR+bHjeK+whIe/jkcBfbNxJu4+5jwW0ld4/c5Ri1sttWUDroFb0Xgnv/JKZ03AhD/9STApjHUdDwurV12cmxzG5IjRDAfBN3p5TCKDxKWCPsWFqmehw3op/wlsfb2gjeAAbbu/tWQ5bcCjr8ZmbgEC/3++bzZIgenmCWMOEmueQ99xuD0wYjtdIy8GTs3tDn68u7KYhOovyq0iS2jxs3DujvQwBG1nq+SR+9dTiir/d/3xuK5qHFmEHH3N1wBw8Pn0aUHNQDbe190TBNQMMxAHmPpDaNxMPuTxEeMkU1GENref743bff/+Grr58/Pmkp2+X67t07Zjw8PLjaV3/48PHj04eX67ZtVXXT/Vqj+gRqESRQrb6XvXV9axSLFfSok3sTJIC51yOoxE4ELdFp6QAsAFThgOecSUjd1eCqWr1UY+aUExHFXFYKY8CdYYmJBWSoV73sF6slvZRr/cO3v/3jF5+++Rc/e/vPf/YZsIHosm8Pn7yNesqZQuhPUcl/+as7eG1W1J3FYiMNGNhX4fxRG07fqkQBa0l+ayqscWu3LnhC+mBmkk5+3b4KigQZqOFf9w1j+DEsdIQXR9PuwUoi6JUjMbUBU6xqXPipgKPITd4FXTXM3MpzPYII6F6KIhxCZpaOoDggFnSYbr2ulZlD7pFw/K9NFesFjdOtjy8Y9aV+XDrvbFjG7k7kImRtJrKP0hc0P61F1mdxY2ZDoYaqi00cEFXjGpEqrQ5ouKB9c0UEOa9jWnGXqxZeGBB6s5oZpEWXVa/ePIqSczarpew551CmBIroATkc5mrubdJlmLbN67g9pEYZjWqPEuRWDWhOjpAxCM3aGxIO5SRMYxSeIrwAh7o1GzeUYGCvA4hJXej63NQDLYeYzd1dk5tMlcDqpqbetX0QSUg6ERHOwlnIwdQzlh5iOnKPcYJDVQyTNUIRg3y7EQB3Y87cC0hKKeu6jnBIcE4SjJ/HobsRQRITNafdzTQlzZQSZ+ZUSzVzIOZ3HxAj7t7jyE7cnMN2LmARuWzqXmqty5IHSzJzqLaUxwiyGs+i1uc4dYcnPEQ/xgnY4Hxmjsa52G4C+RiHOpG9u+G2kGkWFkFCdutf+W0P8etrmFzz3YjIcES+53/5VYS702Evvo/9vV0YejR6xCYw7/C0/sEXg7yHgTiKjcNpjDDEKCEbbyul1Fyh5vUYYulVSynX63UsBkEe8EjCkid3BPB0Vy8GtSZA5/oKcyOTaV5cJ4cbx4mI+FaTjqfuUpHcR5/ArdtAdrczd2Jt3qJxw/mv4+dq2lGW6OazByEF9jqku4gAWIhzIqKW/jaLnqsgW+5lCxPljIf03hUKgDsuT+uxVLeUUsiY/nZnhwHU581HUeJosI2vph5wdKYG6B+2VPKc87LEPK68pjd/9Vd/eb1u5wdKKRXdZg4ajx9BQ3eP2UZEHEHTeI+IEAtra4ob5uk47ilvPYdsj1LASaNz7+pCtH1FOmfQxkzn1JTusc5xf+pW73CTxoHKhPuFiYtFuNuaXUdA3SnnbAo/2kskbjia48e3ExGIhHpJHrqjGF6ZtKBnt6y6x+j+p2Eub+iWrNkYYwd6ajmejWa+jo8PYTU98iG7+o61Pw36nGPt3pVamgJPmDz8LHJ7/7ZuC4ARv9lqdzcGY0IZ7Q3Sw3cdEKPtpJgRExCmPTEzGe7xa3jqfn8focNWBcogIu0F9IOfR3/6VIIxQl2zxPAemBiBh8HR2svhZuE/nn3WKUQknHSAnE1u/hHubEkFoh4cvWOfcY2SUe5wMsNznpmlbbgZchInM4toa7MezAStNM4IxOQKEoc7tbniTYgg2rnNwQ6v9XJlF1j54bsff/vl7/7ul3/77ocft5eLqlrVy+US5s3Ly0uWL1R1r2UrxQlGrEbF4O7azf1SSnS+hOQC0JbUqgS7A+/m3tHp2LkbRehZmQhWqlUzOz+cuSEKuTvVHuipVYnI9Sgri539uF0WSSwAorffxay8v8omLxfT3dzptGBZpVjdql7LzlqqqreoJcPFIY6OHfyP7hpthN1l6mJhXARRbAAMzjDACOZuDHdXGoEKO5xwP/p7O8nF/SV1Kdf5YnQUd80wyQ2K3rGby8mtg1QFJ06GRDeebww//GkDIPrsOiEdDfDQI24YkqfpcWqFY7PEAJDULcaqHg1dudel3CYuQpZZLeMuY1lhnLXX1RzHsNSipRd/vrJFOlerFtVW9OJEJBj5gWHJtZWEDup9FJFeGyJj/Cn+ZeYYOxEWZLTjqypzJghB4HA7+jf66Cr3Ca9FtURVjLnWuhMyyEYg3MyAPNA+iNxU3QzkDWwexiBhjgxJIOspM3mk5ClLmINe3EeIC4A1VIbYBO01913Htn7/xgmNaJoVIRhzVAhjyAH1kzKDESgd2KQjQhYCWuG1eymqWqh5p20n46w5uqSVssADlIt7QaUA2MreiaS3FYESsUgrmnJ3bXhFQVqZ+oitrsUPUcjMPBXsJSIztGGrU0HXyBKYgRnCmcG77/tWmXnJnhMTEYyirauHVRr2pLXe3G6KtJjQQavM7N4SvbHqBnBMpGqRZKDeUBd06OVgtvHvzNRDX3Z2wMT2Ns5oMmXU/chumR2KeTbQZyY/JM5Pqd5GQ9Ms9bvX56WOu8322WCWWLP3mBv16/YON/BfZD6sq2H5uXv4hPPhEt34wD61MoZDOGiAp2p7mTKKMjk2sVkKiuTVGAaR2mqlVca14SG9po6nGHnrFUzR3TFsdO91FvNjjq0LmF6A3NWONkhrmYRuQxARKEZpz0d/rxLGEU/7f3z14W0RCOO8uusyxTD76x6vs7dAHrxVxYeOcHc6SpdlCA2goWc1aunePhFZHcR5E2hgSsB+VGo1xueAOkeD29JazVyKaimFKU65PUV1Y8eaxaoBBjKQV90/Pj1/KM/n83o6ndazMcNqFaFlTcywTgBDtR07adY6uBANAsRM0JtizkF7tdbXI2FmypxPynuqJI57tAMxs7cBNjdsYmZEEt7pYOcgqnDwBi+Mj4zG+JneboImcYpHDOL4NBGj13TYcFFuCRgYPtetjWWT9SPNCUOz3alZ2sNLjJe6wTR2Dw61VmGEkVH3QFduYe8ICaOJ4jkK3G/SwjRju8bTDSlEdzs2bgVMh+fu7iMTeP/H2IW7GE/4TnDxm/cjmoqDJNBkWmyFd3NrruFs59XWfXN/6pbo2Lo4xaia4Z4rG+Zg6/JNTGEDDTAM6w6/H2FQdMPXJ6us75XPAQ7uJaMzU4wVMnOdSnAxOeR222Ig0TtFVOxo55n1RQANzPtvZuROXcOOXWv/diKkTkLebB3zhn/LBCOR1hhS1YgAZ2diB3EL++/g5FRxvV6//v03v/hPf/Pll7/5+vffkLZCswhPl1Iul8vlcokFBB4Vp0xJTFkrbRPoWilVd3VzEmdZImEAILp0AqWrlNKBnCjQ8p2jvrVzOjkFkkE4HhoVREgpe2ANOtzYYSDSw9FuG1h2q1QkERGZqQgxG67bY/4cmy1P29vn7fN8WpcTjBT+vF0X3zziZZSEEkPaqMYQ+/9I3cK4XmHMjGT+AJWBA25uBK9uiQnoodvpyYYj9xNPGxVl3SmP7U64wVag0QUzjYzHYZIdHOfTSGrpKOLeLZ9hYOCVAWADAfEW06F9ew+sja8eVuLIzRyyDeiTpsysVTBifKY/rFbTABz3HlUd7qZPoBphBsy4BWPTiEhYbALDGf5b57G97Tt7SjlqFM0sEZgzegoIPcwZQiol27Zt1gpx55xzvE7TDlJ3mmdRZWYESZJiVGC/iRCRSBbJwHM7fDNVFWkBHhGJPs6mV9hjIrHuewtuUSIy5rwI5Zy17tY7rIQoSl9nqzocI3O3vqvNiPJbMd061cmjdrSVEKCZUdMmxL+OPXaHW0HXzfFTKCd36ph+nFpklHNiB01BaHfXUBvcaLSabtsWtmOQ47ZtgfQjOR3cOZ21u5rVqPH3bvHYLYjieDH1/Ukp1bJHHXbOuaoyE8nRhsRTfVHfhKbp+aj8jK1mN1hVJ5Z2hxvb4k9dRAQPmIEGEjh/xKYwattJaQ/fWdoHQ6nRoHwzi1HaTh7lDO5Tr/NtFq5n1yeZYm2M260WbwHpmfLnn19fsyiZP3LnEP7k2+Ytsr68xm50tMbJ7TfPq+LuCLXFT5UYd/7tCBvjCGArurkcZeHUCZ2ZE7ciCDJ0oZGWZaEXoj46Lcb0NVkRcfe2nx7t4GbmNQqVhykW9Ud3vm7I2fs07MxxU8ySRyCp/7GHe4DwBsexzobRHdX95H7e6AwCpgxS+yAdFDI9BTXHoL8ygnCxdcICPVyR8S5M+I2gYwHuHoItYnPBsEH5M+cysZkNPDQGIca5TkM4MMKcnHtEKeKylcUcZS9FdQP0cnl+/8Mz2TnnTCKl7LVegUzkrTOwu4LU0zXMUdneCtI8yoeIHExHh/nN/psZTS7ucQoe6bIjBhe+pvcYB4J4HIyo7h6n3G5uHSYNzvEV43U+xMoRJblb2CCD+HXf9dhnPoSJmc324h0V4TYQEw5MkM3Qnu11myLfQb/ewKwjrNpmAPEE4/kqokF9+uJ0qxbx0v6GmWaIMMyPvuYbq2P+Ex2Fvodvfxzc7UcGSczPCIBGVnZsON34zH0nD28uVhtzAr2HkAZVUHfeZtHHwxkGZpCLvnVtWw45H72s8dfoiS0HlEN78KkJnCItOW7YzXq/CWHc9CPdyflZFLf3RwTBu9vVv9cPbKAjP0ndVGXmljJtIYbYsJEvb68c+0lExDS1QrRNGFZyKdZNI7K4i3W6GMHJgduUegCuBUpABNVSiu22nmS/lq9++9V/+G//+//x//3Xl5ctUcAlLDmvYTJWV86nh3y6fryq6l62Uk1xBbGZlXpIGJBZbWVVTGI9Kusek+1NtRE7RAhCElQmkZcT8lKr1ureqreSSErp5flFJC2SclqQUvFi1c1AKYYLsQOBaBITFkmSqpKLMDlgJMRkZi8KdZPny9uPWc70Rh4psYlbQBPCYzFMC7mQp6Gg/hGnCoEeQJlgGDjIopfUxQ8tWDWCoePD40fJuYksa8mi4IFwiDBOeYgLFyYzdNC1yGzDaiXgaD1oHOSuWmdvCyMbPEC/JkgtRIL9FmlphPKHQzhrTJk6xseoqvbtdIREB081EK3aq1i4eykc49Wm0RaR3aYpARJvjhyFqvZBfOJT6QWNmL3fuOwpJbUynl9EeluXDeNetaSUW9pLhLnpB9yKe3QLUif40yHlhwMdAH2CB3dHbxgbu6PViYjlxmNEzwB4dXclDj3k7p4XuV6NiCRRraWpcCBRMyWmFSIRhzxzdiHWSECbRxjLJ70CgKZsXtuQHk9qkeMWLLyR12Oy9p3stiP5YRTDVSeDfnpntPezuxetLV6bAlDHpbcUMnN1g9leS4QGLvslbhhgpJGJrbU+SEC2gOhm4jyiPpvi6TA/fvCATNBJ0m5iETIJPkkpYZrKFSQhaaDxSkpL54rgblmWkzc9Sr0kNhk89EFsVNyQBXZMh4stbQ71fWqrX+jaPToS3VWEe6O/9Xqam8+iu8fjDuOek3xxZmHmn+z9o6Nfl6hHQ/kwLsmiO+pWfd5R2nzp1Ks2L2kmqkMN37ocry4TpGik5duAAm5F782q9Bi2Nh7kJ9vYh0gZr4RfPUQB9bD0cA7Drwl8KevIruYULc7N9uqGKXUrMMIUjKDDZqZ4x0WIlYzCirFF4VPMyxuP7z3y0jbTG7juIKWp+fTGSB33H5t/dxA0+Vf38mQ60yZSCMNMnGVgFyyIGfX9DRwBi7bVOBzL+Ysittbltva7mRo4EnOtsQJd/Fo8IzMnSabWnY0e32EhiERrW+tOuElKEzFgWksShlutV5CdH1a3y49WvW5mdTklYs2LwInFU6KKtkgzo2MQIgHHTG0iImJ3U1Xu4SpmHs/VyG/qwRgLG/s8s7bf2sTUYWNrrZJy3//233GHcODuzn0o1jummF2dQQZER6lSP+FmKw/6HGQwHhOd9LvpdKgMdtg8Of1uGeFxoyu10GOKAXRMROp2t1extsTSmgm7x9jML7qBdemC+oC1w61cmm87ToGnjJZPe914bv618eErkTt6qifGdIw6WWLpYwytvdTWcaNKoG6tcySyP/G8NnUc+eSweb/V2N6JitrrSz6SfqoGhzXhcxhgY8Gvhh+GR0Y9LjzvHiaxMEuJedvZu0vXH3Am+1vSuNEj44BCcaNL9fEtw5Y7DqV/5E6ntNPv9k+tdcwOAhOz3LiqJOFatzLIEbwzs6iV3P3pw/d//3f/8Kuf/+Lnf/PzP377/cP6gCWVrZrS5WXfaimlbGUPgyQBtRZVLaXualrNQI4enm6Ykw5nAQlzNbH2gKWFvwkApbQGFhpRoFu5G5mDvUKraQGiygMilIgTSWtddTiYvBd5mRKByAjkLUXjYHKj7Vp0wbIsqs5u4MWc1IlAH7ftm++/r/50epDHNwslySuLihnICGC4AJmQpiP9R+UT3np0wI0unS4e3BCDTlpxBLvVztOIviwA7i5D370SL8d3zc041qGKIkYTOfkm8EdRYbsiST5uMoBnzCylNNrlBtfQzcS+9s6ZN2f1gRZgOgzC2SOz3tEzPwURpTC1460NeaWP/vTuqjFzlNeqauDtxk0DXxho9nri/kEMYaiDb7syOwS3mVXsqmt3gqtqG8EXn621RgDMzHLOqq59ONtQjcuyePeSo/8wnmVdV3d36CjGjdueTqcJRpkBjjbO2BHhlCQN1zncjyGnYn/ci7chdRWUqRuFJBzvD1dTQvVoK0dkZvcIXeHuETApjCF8+TZmwL0p3OkmPDD0XEy5uHvd3c3Vm509IDRlpqT+7a1qaN+31kfemg1eRSYctVanlrPNYOntW2Op3Gv2VLUBfnUbPacbdopwQMzzcHciySKSWo9sSunyssXuhUdaax3ttqkRugLgbtNwDs5cVIv1uk1JEitpe8tJREjHsx8KiZm0jkTlzVKHvuEpOz8+uCyy77Ubmq0mthTLrTC9sxy7iDjR8UqfZkZMIqJegSMxOzP/aL7tS+XhF/FNyWv7SIfImVzE/ymHcJY+6DYEJjV/3PwV+mVf600Q986Iv7u4A4hOdHgf6Bk/DCrCJIKZmQhJZCAJD2OoOT/BZKUWNSfetm2v2jxDsDuY+8zH4JcolWSmtl3Wp+cNYL2bPRxRt+mkGmuPTbgzho4fAOZjM4mmzvVbHh8i6I4ahw7wySd5fd0JYZ36k2eNcqNp+iBBAN6lMYBF1kP3tHK3w/e+pdjWzk1EpMqAKquq8eiUcABMMRC1rTSUEciIpa+q+UvWsawd2vNFJk4iAie14m7rmq2ukhjE+1au16KqTlZqLWWzVkI5DmgItyqSg+WJiEWIPAJDtwzoQ6HGtsyapRE83QD3U/dAYlqMT8DIQYR0I0mOQE/E14jvz3Qor5m55rPj2+Zb5jzej46I6dGjGw/Ltx4+JViAjgcQKzWjWVKUUgz7P3aF+Zb2ete0iOC2RLM9owN0VE6OnY3Oe/TE19jAmC2EXtrTKC2J9olTnfiPm/U9aQtt5ooIbksu25unSoqZTw8Lq+9el2g34ebjGu+cmvRAtO8FE9e7tyT0kNXu7vUINBwxsDtvEMCk7g/Un31v0oObR9p+7nEZ6UOkQyRmEkyHheHqH2RzlLqgS+CRj52pTobIn2IifOQr7oXVzEfoki1Lmnd4bOQY3xK/tliMmdNRGn3cNngWbmbmqtXRAotTvI/6I4Tk6aG4NmnQPApBddPffvmbf/fv/t2vfv7Ll6fLKZ1yzpfna85LuV6v1716VD/xVlSvO/armbXR5u4aWPRpLUXdq7mREwUCIgFGknPIMW+wSmEBtSqzOClVO7qWUimltIZtMlMrgLs/PDxYVXcyg6uagR3CiWgnanlm6W4wnMPu5d6LaECUzqZlZSHd8eOH92r02c8el/Nnkvl0OlHRUraqox+cGXJH9/+YfMK7675qFEMFtwXThCtzBEN9UqwArDuHNEg0ZqHHKKl424w3o2FwOKjBkLqZq8m6QotNUBfMLMSQBoJFvSRea51r7nyq0L6rSvOpA7DN+Zysi/Egs2oGELP0Sik+mUnjwdP18sHdhV3d1E2dDLV6lUWuupuQ5JWIrM+BOMFLKQwkgsAzOydh0iRu2ACOkiA3t7Ib6SILAHMFLEq7g71LKQE2sO+VSETWdeV931OtGUwGVqLKtrlF94syk3Bu+VPznSmlbC/X6u4BdgxhZ6IkKSG6eFXVCcv5RIzqdV3WfXsutaSUwOzukkEitRS14mB2aAdfERF2rKeslrWFVK2Ufd/3WjdmyVlq3a7bc8rJYWr7elrLjsS+73WvdVmSCawaczKre6nMUtVNGcJVsaTFtAk4Ya5mpewx4lzkXGs1tEElpaoZE695FMvBiT3G5jiTuwada1MkCEql4tQhAywQnwXErNAKzUtyF1d1cmPf7JpsNSdXlLJf95ecw1vbzYyJuvWW3B0kxKZilJBzCotKVVFJUjIzEFX1su9oGI8nkFhtoQQwHFZBxgnLguslyaJ1Ny3Z0ykJE+/Xi7Msy6otIqCn00lVyTkt2dVAklNyd91UVRPE1ZjImc1IRKKO18gDALWouxPYwA4YsXvM9alO5onEi1C1c8IVClMRB7naDlhaF7pIWh7ZTbBIBWpdqqLuWmtRA5EiVa8LJyISSg8LFSJNmmVJcFFkSysJWS2oLEVpVzZTcqdTfvi4PVtuWkLVRYSRS9lF1goiTioSsB7ivjo9El1TSDUnMhLRDizAEiUNBLi5iTcBsVfrvhW3wjYiCpT4JmLCuoroWcfROlT1gLI4StdGEtWdoJRlgbkkMa3lcnl8fERKum1MDfGIiFJaaq3Fnc4LGDtMmVQEVotWJ+RwqlVZaFnytgNF88K11jdv3qSUtu1aa80p0QFxDHe1sp/y8ryXh/Xk7m6oZHk91W2/7rWYV3MSprzA4G22W8rcZtUwLGBFQ2MwkVvx6oEAFlI14njCnthdybsD3zwWs25COTOJsMPVAsxWdmvpl5YcbK3FGLG8jnhAbu5wXhczKxo2DEs3a6KHl4g4jp6b41SVmZklDa3mALxZw84A3UyTIrMGLk1ERE16wJEYEiqLSJHaGBY4lIXRKhRuElMRnovCfmGJIaVy+kgwplPC6oVNhbOjXtyva8rkJ2G+bIXYKy4pu+O8rI/VnClft5Ly2R2RAgwlmlJKKaJXyJzAUhyLLInOD/kkhvry8jbjsj+rbivnN2c8X3ardFrfEpaqL2ovjp3II0JkZoARVe2Pw958NhGpRUmYp1o+c99qaUdM6P4Pc9RfMMDiZlXVyhGkMAenTIA2W9nNUR2JTfrgGXMXFhZmpuSRokTnl4aC5n7j3gf/xhincFP3fZ8zmTlnD0ryI1pHEHcnhvdqiIPHtVBKJOJlq1oIrYitTciNsGkSp9bVb7R6ZCE6cACPcnqCsCA8MdWIlDOvoaWO7CITiEEC85EePGBmSlm4Tc/zUtQNzFQj9gwnZhGH9+AjhYNhZu6HYdQDrzcFC118xVI8oLzbK+YsEm0i1msNmJiIXY8YSsS+m3fXa0FGZCwkZ1puHc7mVBNgNkY8BpQRXKHc40itIdMPNDiA0NwnYEArEyRld68xdYkEhFo1pRwVK4mEUhIzjjhFvTYPP34NudOAwYFwLsw4KgVFdoe5b3vjPoDUwCK11otVGFqVfmIIUW49VKrKfvQtN/JTZ0nS51gYHEIdI5rQB1IC0coxMoet6yJ+zd3wBTB0k8NrLSOeIkSBYOfmVpyZkYSSwFFVkzAy61YE4lpiRkd9er5+fBLJ33/3/Nf/4b//u1/8vJT69tM3Br5q0YWc3Qi+Mleqtda9oKrXuldxTm4wuIEoiRGuWiwqjWw38pwCPtK2UphKkAGRSkJMCKuszHA1dl5SInKrGvjS+6bn09mZL5fLvheRDJFaSGvJpzXySNUrFjhot7K6SJ9undZk5vu+AQDJ+SGb2fX6EsGImH2yvDxR8r0qnT6pcv7Vr573p9NfvF1qyu62J/tQr198kT/s7z+nSuiQndPVSBj/xa4O0ThHedyho140whxoKVQ2d5Czk8bItYgMgGHk3tT0+CChqmp0wri3FpIoDI6AdHxbjP0UkEPNNfxkVfVaiVyIbb92XQNJLUBTVXtFPXVBYnBzQtFKTNz6Uo4wN/VuQ7wKMc9t5NzGnqsLdm3zAiiJqW61bLWMW5m1GKWZlVoTT9COhqMiZb4wtJqNeab3zjRRW/38CrqHOoJhcxwZPUEUf4oIjVD7iPQBg7gFDqEpPWJmARUTWSZM3Y+xZukYPkOBjhKv8WhxjfwD3V7bvpU9UogHxolIih9GMHiUFI6wJY9Y2quYYkiEgDdYemq4kfKrcrj5T2MT7l/8qexB24F0V2/WkKzODycRcSNzRQiIXUspbJpzlkSURFx6GUMKTx7WTipG0Koqp9hSpNTFOSXJtO+7SFpXSimpupnFpMHzuozzJWmEuO97jFstpdSyQVUSLUncnQVETgdMBdwJyUZGa96KeHCbpIP3lIi1DpykHnO0a0/SgKLgpyX7iZkTpZQSmzfHaKrPNDPvIy4OQdOCSebesk+t5dW0AoskZmZkr0VVRx/aQYR02FIDVvt1Qo87untvO+mhpiOm7F0638TmaYS1gMGk4/4zP2KSMgGqMlhvvHPm7vnfHjmOgs/OqjQxPN0zwh2tMocRIimliMxZh42JLzUzpiiFPzLnr+8zzt3MYo5cw3kzjJrzWA4RdUHfIvdyw1zxr4xY4yQ3GCAhVtSIEEag0Qge4/gkqjK8wULC3Z37KQAYMeD4zWzkndqxRpJ8N/NDC8uUyxxiNraOYs9NGR33JUQ6jEdtpN1u/1AJQ0zNl5nVUIEBcXwrmsahUO+Gcj+qEOOdIkJOblGDoehNv6qdtjsmLXDDsD5mPELdG44FupzvDiEJqO5GgLOztIqVMUwcgKru7aLr9bpt28ObPssrwmpNHZC1AQRTi6MhUODuqJcmH4OmQOy4ZnYYOsunaO4QXPxT4BzDA7nTUK+JfF7PULI2pdDlgMDF7SKta7pXCXxvff8Ac2rrVC3BLQCIONKE0QYZ5Y7+SjFxzojJVaPlgRkiXvZBnH2XhFigZS4WiKK/EEMD3ZSYo/kCIvDaqgtubYO79Ox8dkQ3smKcxdjn+XVMPDjv+Sz6xp1H0mw+qZEQxlTBG6LGveWCmiyaWK9bnLPLeqTX2p+8+1g9i0tTgQZ6CvFoSehIShhxqE4ErbVdp0pUgIjMnZvTK9SxA8bm2ISQ0Rt625lWO2pKebphNOyEaJqspmNC/eyr/+S/8UPtSfv5rxilzmj5EOsvEuchW9DAF5xUZT3Z9copgURfXi6Xy77Xpw8//vV/+vXvfvcVgD/7sz9jlh/fP5ddU1pK9AVqA5OoDV/PoqzbpE3OcRgZhSFNgKTERHBTAhETp17jAICcLCKJYXcYmZmp+yKJhIkE5hyBIEfjVSKYO6kp8V7mJFI8b0CP7loBWolihx0MVKID4GMgw1/qJZ8EraexAnYt5bJtzy8mVJTNnVJKeVla9eM/xtn0/8suop+uJj3kwPSrjLx9BFwjXNLFKXVuijsYgVNKzH7jubm7z5Ud5t1YVRtj53BrGkXl42A3vJYDN2H6G70w/uUOxolb42389e6riShF4sV6eVWtu+oye01mFViY2UNF2dHhjUnHvN7WULB21PEHz7e3hf8mIjnnGBSObtGG7S4dLzS2dbbyhziYz3h4iQB0BppvotleP3z8Ovur8/3j9XoNuEI07EN3TIYyMwun6G3TYvteewPgEY6Nruv5nsxhxx21rN7HCwyx2KqVZiJ+XSnRL3bYqEzDHK4Ztq8DrSklDiJKW/d9N6+JmKhVZLG5iCQsRF6tll23Qokx7Q8H9LyZNQwIcFSURXa97VtHGxPJRDbKkgNO1swkp9PplLNE9amImM2jLHsJaC8xJTo0sQNWlI9OM+boCTUnmSpnmqGM8UrgLo/7iAjQHUtvypiZpFUoVXOnDr/RL3OYeXU0Ku3WgBPFuIdjlrd1VcrMAeg+VO88lHxICmYOsOoRViAieRWTm49+Zr2fvI5Na4nom1nwg3de321EGcby7lZ7xzL+qttk/Onus5jcifFDe2f3yaOL9e5W7k5Mwc7MLFMQJ/46AEu9m+x5zaEFQy8SURJZAs9j6godcowoobtkAHXwd45So/6M0ycgbWbdAQRkRKQNSMICoqJBCrnNQ9emH4yZHa2CoklwCeNbAQeIHUQ6xr23dB2ZkwcaSvSMGMjdyMPhIUxlMH28+I0kOXjk1l3XKPIEyMEOoSaQh1Jxp2DYIdlmsTztJ5mZqxodBX53wQ7v9YfjV1U70ndODo26CVU1r+4ZAUzJ7s7WMdxyzsuyRMriiHI2bdLcxVovY8oucxKJIYdQVRAxCYFjpM/w2Gf6nBf5mr/GUXYpf3xwbMhQTDwFfQebzPw1PjLzUVT0zYEw69VEmK7xFarae8ToUHPHMtCE0/RQIYKYGQHfbyXIRNA+4K1EisIhmBd5rLOaR46oNyW6xkTyn/JpzcLXOTCVejAGpof8iw1o/hINryb+2PZcRyfkTfX7fHa4FXR3b5hZA4OkewDmUDrTTYa/19bmbRigTXX1xwJeOZ+jEREN3+h+nQf5tf/N/eyOicaO75qhcW/aetFWeBvIiCq12bON//Rk7414jzvzYXMdsBYdut+JekyRaNzHTMOIGtRo06yvcedDBvaPH9wxbc68t8zHKGwQwc3dWQScxqzwdk7mDiWJ/j1CKS9Pz4ssyvWbb7758svfvjxfc1oBen66PD09uRFYrtvWvNmqtVYtpZo6XJgAIycIQV0dZurEA/0VHhkpByCcyXWiLvHuoUU6h0EaU56D6AiEQ2AOYWtuHrh4A6OmfZcbk5nte4nEDjOregsaMDFxSg1uI267lSskIRmq08XF6+WlfHi6Pqx6PgGJ1BA2OphQy086hP8F04P//1xEc4H2oY594ouWp4+KKj8glKa73N4wfuBmMbNr2PqH90UHvgAR68Tg8UOrv+rwGYNPZwk2y/+Zke9enBl2rHDmmqFlhhRNYyi8qrru6Pgx7Qadve9ACzu0SX+XmaqSgCKwOvI5kB5596G2MTmv0w27YzbFMbv8utmy+ZnNLOZvDP06Xh96d37/LIlmKdbYbAL5GZ9lTsyJ2agVa4UtS6VUswYNIpyYtHrZ933JZ7vNAY0jFDmGtA6kkBY56KzOE9S+dw156IM5Quk35y0g7Rt1BECYWl1ZS8gxc44hDvu+B40DMMECoOXKxUDV1B1qHqDIW5diABIFbGMjAKakTrXaQOwQSS2w6uxtKhE5mCQz+77VolVVpVrQn1lW8+CZtGRmpBRBR1NVeDVvTdkRogqdQ+RHLB/MTKJwP0Cux0ZZH649jqNzgplZGNoTF3mPXfQGd7NWH0VG5iLNAQCQGEhcKwmDzIWFiVNKeUkMTYnVRW5Nh8GKS15yVREqNSi2opPfZOIc0KxDTgGgjk/92sqJEoj4wyC+EHyN+aFoRkU3cQJH5BbUmEF6hJGaVR03a8UEtyZsXzlFDq41oLp5H4YeXDZatOOM1IytDTJtp2Pk7GYmnAYmzUz8OSciuCP6lQenj+D7bNa4u4i46UiiUjOUf8KLZuoRmpbzaJ8AImsStEGBTe3u5lBzl2Q4jC1vOCBkZgYCOxjM7NYN96OxYRhAsSERQAlsgKBhV3WKql+0HnifWBsIg6D1OsZIXefUE58t83AYUk1zWD9uIooZ6M0NaJ5zfIc1WHNqlh1mITk2c5xmr2k8+pNDiXg6tnpmQDMDbpQcjaiKO/lkzrphAj0KyzUcQodmPgdBem/PC5nQ/zUiSiktSwp02ecLStESrYXkIjFRlz15MR062D3oRMZgYnTZe7fm4SGMpxv66zWNTRTYLe87vrsNTY47j++Nxz/obeozn3Xr+BNRmw4yvnRemlkEEqfnGhGLVg0FQKLPhaKwEyDrvfAtYIERd2j3j+HMISU8/F54RBZShxvtdO+qMZuqPezsjzjUnXSE5M1iHLk70o1cJYr5URhp6mGpTdEHGo85n+PN49MBLjUocCgIAoINQTdpt/meTS12GyN6zmcS6o7lTzVYThbqcfoM6tIDMyrMrSHYjJn+c3Rq8DQPs9lIATLU7Z+mIvmmMbvdftolJrJbz5mIEshfRaiH4TvvBnVQuiZeqSMPE9GkEcaD3Dz+5Igen4o7939HHI2I2ISYTJ0o+gMr4GCAHN7qtRkJ2y4i2OvT+w+6a3rMzx9ffvvl73784T2zpIR3P3549/HJFJKX56cXVQ+hHu1OFvFu5oW5etA3jFxrVUNEmRt1gxhO3GrKklcABg1BgzaR0AHIaFZv7MXUSl8JsERMqSNBqBOLmdUJViB2yYTdvQQOAgoJA0iUCIToFqGUc3YmM6tuC0t1Q60GVEUGf7xs33//8ZPHz9c1E6rB4ckIjYGn65+oH4gWve0uU7wSJtFNCY/fGF1hyg5kcoO1JpnbmHWtTZ4wY4x/A4WfAgB0iG5imNb543HYM7/glpVqvYERHtxxV2Y4dNDkfB6qCh374CCb7i6loUolETgfKDLRy4jSFEjsktqYa9FvJ0A1Q61VKFHzSVpTDSaZMr4VAPdZLmgeFwd1iQh5JFvpRgpk9novj4wQ6iGiHQMGM75xWJnjdNsZULTaxjiaHgswGkZAjCgkkBuMwiLPEshdTmggvCBYHwGPJmxJ6KgjVTMrpsttaLnthnDoPYdWC1wpmjdKVQca22vbYsjxcfZN5TsG1mj7UvPwr8JOkv41bl6rMrciRNU9xikDbE7bvleVJDF4IwKE7t7S5WquYasauXv2GovR2rQjyIhTK5Ib2QBvZ1FLK6cMCBam1DQTnKK8bDwsE0lUK3WFza081cwCgZMm9RNVu0HoZiNT1/Cv40XV4k6gVohYyh4tKG1QjLkQRyqIiNhRA2wndKqg+RIcYyadyHsQNEpEmBIRUWahlESklm1NC4c75Ict6LhBLJ2zZNyqGr0/V5TLkqkS3CzHwwqxztp7JvIbc+dQyd5Tc9xLfdwPCIS5X/mnKyoGUd1SoE9XrB+AqxE164Z7+Xc8e6QyVEeIgeH3URszSymL8BzYHjwOdJDynuBAWJjmAXQwGw2qapG+rmoMczf3UuodV8X+cDfIep5/SPtRciwkUShhTjB4RHnNLeDLDNUdBKNmEHJP3XQn3wMv7sj7hgpQNSGP9Daj1aKTO5G27h5oMFHreSCOjn60zWmxb5FEh1EVfxpndFMq3wjMmvkx7hxXq8AHpaBZNIHsN8iNwDQ5ox/fXRUlCSeIMEZCLIbpjXKAFqQzb5mEUR/OzEKshLGN41jZod3hb5O8bnVngFKQtJH0QTy1VhEwCxEHsDrcrE2Rye6GY1LfgeUzjmk8OKYAyvhTfKpZbDNH3DYjzExkZtLdvOHxziq80cwU/VEtM/fFNQdAZ62hqjPoy3EuOOYGh7bq7iKPb4z1taeT5HvpBHUjvIIk281tSA5K6wojVVUvsasiiVICit+Ga83M3FqAcpSHjAkZNw0XBz+yZ0C5mXXBLMArweWzNsS97Hr9a4v9j+16VdDb3jAx1Xyg426DVuh2ZslYDHcwCb/NEKIesavj1Q4e77epMyKaq8CJxpzGjr7TX2kbyxyNEcedh3eXjq7j2csNQd01+2F1tB/M1Q9oaIrQ0fSw1mYl3DTj9CpXeO9191uTZjzgzAtEHUrn9RmbjwoyMxsxD1OlyD0nAeDDrGp0g+vzS91q4vzHP3z3H/+H//h3v/z773+4MDNItq1odZLkhutemNmVohKqVqWYUyrigNdq5uakWltLCNOalurmbgJKOeecU2Zm5ig6NVQjDYFMAMjdA+0DaGVEgXay5DwEswwjFg2LpE0mxOhGhSqLSOimojUg3kWkNAkjy9LaPkPmSMrqxeEMAom7PD/X3//hh7/42acPjznLYq6GpOrQguXVSKV/shf9VOFoo735FeZoqx2pWvL7cow/cR1okYdJRu5+2HsyCrynsFRIwG3bBr+M9QzexG2SkG4DWK+1ISa2mt+Grme7ecMpvDLzGn9I/SoWJm8DzwWDzM0MXY7BeWQqIrIqr1YJYET63QkoA4ARzQ6OAjACnJmFE/wAaaQes0ws7oUYY7QRhJmIOZlt1tHnADTPZ6poPY4//IQjBuxDI87IP/OGcnQ4hAiJgjvmJAtyRVfDZVcmAzjnta2ZCWQ4akeJIitoULfeNtNRv5abLoVY0l5bYyTdtmzdiU4GKbxH/0O1eNTPxzKI6JTXaLzhTnzxvTGilBBIGLp7DYJY06oa2EoSfZCRQFpSMmkgSW0boeZG5SDWIOlaTbW0kglKnSKty2gnMBhmdVe7ll22AF6vSYjd4MVhOct5ycuSnrYXgzGJU8dNNY3kKZi60TKmcRz+gGr8oTXHcy/vBDhLWrLsna+oT1E7BIF5QKgFUIT3lFdKycvVrDo4gtfuDYHLrBKBFGrVrMLVrapqTozwkZspAQY5U+BWj0Chj+I9Bay69cEJfkiBW+Pm2HZEzXCnhCEG+nsbOPh8tXfYMRe5+Ye3MtJ75KnbizfVPnHNQupYXixgpO6HuRC/jqGUYyXRjz090XjY2DMi8oAMrjVPtkuILHdlpyhKHPvjUJDNsMZxaQfMbLc+DLD7Ij00z5AAB5GDQEJg4iP/7NNFDTVV3SF9OkxUEXf/B91A1EYLCDOLJYW1G8wLGkTV0EzgoFZwZU4UIEHxsI3yFczuItks3F4d7VV+VJ4cARfuoVFMXDNOdui8hiU46JCOOUhE5M7B2jGKtjetUfQep5S0kWAip9Ee2cQ7exRNsFaWtp509Ox1fEim5FA+zoUiSMvRg8PRtYlbvrAGdAl3lyRRdCASdQl9A1qtIty9lKvChbOkBIoAKux2Kv248zh3m4C8u3DmWbnQVEQ9E9hIogb+VpSwjjfg1uHE5BPOrDduPnPN4P2eGTtWO1gYAIu7Yl6edY0f9wsRGp8SdINjLjYG4M7cJxYQOR/pRy/m7pIXYrbq27a9XDYzN9/uFK6IQKL1piFOizB7662qrcesCbJQrqCjTjIaZI9tbZ4LxSMcBgmNkoh7Hh87dkiP6bDm12kMfO/0P/r0xjtvpE0HQRhf5KM7dOp1xPwt02eH5waAkkSzws06DznfSbSn1nPO4XihKs9fxAlE3WawMADMPKWEwDoK9YpYyw02NU35kIMjAu1uGE4Ts4zdoNvuYiJqHSc90zDvcFffencf/qmkanuByfokFZugtt3dTElYOo27KiQRDJDt6Xl7uaz5XHf95S/+9n/8H/7TH7/9rtjp5frkTuBkhlp2TpmZi7qZdmRgZ2YnOEENRU2ra0c2cnchYbLkFrOX1kVOqwTwIRUNe7WY9ygxEdGuNSdOfQhHGGnmFvBLHChi/StgJmklIk4CiJnV3pe0qYUWiYxu2BasbualKLOP2Qax1QqtppJJ0pJSYqdL0W+//fDtXz7l5eH01l0ycVZzrVWWWzPin+BFveTneIV6VHX+f+fWwBtHuNxmZsa4KZNxd5vuF3NH4Yq7eSxAjEX0IwwXWuzIe8337GHuxuFzzmC8Z/7UrBFmLXAnl2Z/ckjgwc7MnMI/6fTXNRwAGE0CVEB2u2J3p95YYh1Y78gzDAeM2wgpvzWhunKawqutJuKw6txd3UKaDGZzI6HwPWU4P0MFhh3QzcTZG2wnMTuE81bOP7RHbuWdYBKgNKgGj6g/ldLGGESZIkBmMFOM2akNfoYC8tKjMwPpWA9gkyYY+zMf5NiQmOkc7D3e5u7tRkB4odTOui2SmRdpuAjdsmEiB7sbW2QYnAB2D1g2BrcSI2epDq/WxF9rcoQTRDKzi7iZVSt98dzB5FR1rzVad/RAVY5TZmJmMQLAxO60bVspG7kuqyQmRnXYvkvKnFnSNNMi9oMoGlduGANCIA4g26ZRoFVVtPkkaoU5z8hDTM6coqsw8vgSE+ccw3QI2WAdNo2ZVYt5dReYuzhMAWMhEQGzD39jjLg2CLcewga7bxYgkE3/EZxuoVeIwrs5MEiCX8yn4LxR88SmmF3nvUlS3BjK5GDp1nxHrxn0NnuDg/y4jWwK3mgGHxO7N5PN4xxHASRHwOm488xcPlnJdFsoPwm7PmTMelkys3AOe2AYCiENcs5ETnaT4QykL2twz8vBEsQFrUSp8fotFc0y6kYazKEi6ADlZyYzIxiTxikbufZkWxtcjgr4kmldl3Vdz5RGtjxu3M9MVLW2kScUyE/uXsbcNgcBDTDSjRGFUM1YNoK7uPuuffZJHxvT5G03keMakPHNiuo9ZsOWmu4APnop72OQwfh3wVZqoboAOZDQpN5eJ6LwDHWojNAbquQdwxbd1ifq1c5QMxC3TD71yEJ/Z/MoxiNs27YsC0HNLHGOQGFK6eUCU7gdjx8eda27OajFrRoiowib1cE+06ndhA5nAWVTJnAode+OQbz/ztnrO8bj2e+0+EyQr0Ou40xntp2vWZWMZZtZlD4N3Nr2bzKywGHnnqK3GFx0qzHbXQJzl4jQCq2DSOm6F3ePcbMvLy/v3r378OFp2zbmBgmuqteyu/uyLOu6Pjw8iMjSr4ZdSaTUAlIRO6AkzN6qu5yjUlQAM3UzN2fJ9/sQO+YNFuLur3dmyfSAP7Gl85vbYQ0i7IGD+cjQ7a1xAa2Co9YyvmI2tDLn1ycYawKB+PC5m5C5q26d6Am3pBKHLjBEs9lw+7pkJiayG1i+m7/2qMeAihnvHOUDZsa9NOJ+Jw/ZM6wXu8vH+FQdOu/YUBCCVi0xHpaGz+wtgB4jPFLOFHV37IC5gZiihg/mXpUI28uVSNzpt7/5/S//P7/69tsfymaVad+smDLrtld1W4w5p1KKqscsKyJyeCjQqrKXFg4z88isJEnwsiZJTHmRU+ZlodMiKSUruyqX4qVYtf4sTCfllGL6DiGJO+/7vm2llqa81oDDgBqRe8oi4SYSSdFaSlEnZo7q6l581zRp9PIMzRu2dJjK3oEsjAIWJ4PytdTf/P47x5svyvrmi4e8PBAnMxccQfOfEDT/1C66pT38pPCMMUgtrNnkM1MYzK4dhe7QAjfQBGgfBLVKUSJMxREMqjfYchgy6MaemZhRbnu2x88/KbJoCkHOVyxgRCHn+6SwyVS19j+rqmqz8qP0Gk3pOrW62eO+REQk7iUqP5lTNBUMOKOhhIZkUXVvPWk0dRhaw2YsdTiWBnetUTWnWmqt6p7pqOohooCpHJ2Q7cEomtPszr20hp/WngjDN+j+6k0MlTmlpA23KqC3R4QU0YXSn0vMaimllHKWLtfC28gJtcWTmmXT9HrbvRAl6DomWv1EZN/3QWSGBvk1iKALxONFDsucaDTXtQmKvtNkiBNRFlFguzaFJHxzOYFYUnhx5KpOiSWly+XCo7GnY3eAqPSiPpHMzICZm4Jkye5ezV0LYhYZCRGlyPESgwGGmutWTIv5/kbPp1WE3GG8IXIRD29OaB1cMLgTg6NOLKxtMnIiSWFvGWqtDje00SbR7ZIyBwg+sLh3WlIlopRWr2oE6fPm3A4lFH7SHMI3qx1t4gbMYM2ilGogFHdVTYRaa+KVmYXYSkOzECBLShykbh615nT0AlEjkhs/cfw6GDgAn+5E2ABWCTIfiFjhGqQYF9HrFsbNVfW1Q+jeM6WvzNA5cjz4K1Y3tPWdDPLuztFkxFRVY1HViFXDzG/ROGi4DZx9srFiASJE5qXWjmKHmKLuCqsBHHLM85mtO2a2Yd5NS72b/EYQp1bP7O6B0t8IgMAiogVQEIRVBCHazFQSNxgndmbOWd6+fXjz5s2nD5930ecRI/eG3Gv7Xi4v277vqj1CZ2ZbIQcRO3p6z1sJKcydhqHr1c2NzMoIQCY6fBJm7hkF4sCPFBBRjc7Gti9G7tGU41p1RLAjDcJgAtHhCA36YT7qPGnSSVGE75VU1aiVfkTWAQCRMxNL0HwYK7GvTfUohqXrAnI2gIJPRWTUclNPNPUSgMNKtjYNlWulQJplbm3hRELE7nBvgDRezR2qblZi6jT1TOl4LuspmllTjF11d7MbcI5BcBGgHJmxcSgI4G32Jv9ZAKhpvH9mhP5v9VeGAm4dD7q9xjunmzTncKzT7GCoXgS4gilVrrVW89zXQxyAtAyrZB49lrFMd7cOognn6+X69PTj+/cf371798O7D8/Pz7XWlNKbN28eHh5U9enpadu2lNLpdHp4eEgpret6Pp/XdV2WJU5zeWynEMAWGWyJiEw4ERyIiRTUmnYBTglRSjds1yDFaIm93YeQe5O0vIkEtbQBM/zw2CRl9E1GOF3eUgdBBDT8zwkQYsiru29v11QG0m4Yd7izUKcMxrgn9UoBPx4TQhSl3XHf8YYWver360UpkOHHTp3Z8RVzYD1oLM5lDnwAYEc0PZuEvcc8pSDC+hyACDwVI6CvPx5f9QaniqdEwvx1bRuplZ+CQG4sCwCyEiILRKTK0uR/JB+ESB2qnpgArPn04f3LL37xt7/5ze+ul3J92b8vu4gkWfdaajUWUdD15eruauajk7xnOKvzphbTa6PNTAiZLLGvmR9Oy2lJSWhNOJ98WaiWXErZ2Heg1obLRySyLCHomBMlAXBhY92uxRI8C8dATrM2WkBVnYWYAd6rUaUKY4Kvbc62dcfDzMLgGR+MmPVh8qlXd9Kq5pRXyjlZ/vaP788npJVOn7yR/JDSyVq4ZyLGoCL807tGD+G4vGe/ZxoDsO07eiESZ2ngCMRu+/ggME1jVPUJwAJBt8I+Tffp9OwEeL3fP5tAwrwLVppsMOsVFrMi8NsKFL+t5PrJTTgSlb1s0MxaC34J/6bppGbtDRFmZfecmmoEhwYVEYKoXkNqxJZt20bkOWfrMOXbdQPgFs3YwhxpliYZRcQM27Ytec05xxRjUy86CjOaEFmWpbqxIjzYCIFEvi4YIADHI8rIgqenp1DqzFxrNdMwGVlkaOVgjLAGdBrMPW+rGWq1UgqccxoHw1E8MGRWrQZgXVfq5V7omxyPkHOOrrllWVS3y3YdpsZhbbfWQsRTqGq1w4CmFh4wd49hU8y0LAsRaU/TuWtgJyxL6gG8NlB7TWtKidxLKWaWc25CP4YEpiWeXXKb46RmwrIu2cxK9PtFsk7twK0CiCjnNSR+hxJt0pzAkAAvNTMXibOrZm2R7uGzKbHD8OH5qdT89vFhleRO163knF+u13VdU07mxFwBBVirOYt6hEEZOPT6MHybd02Nmfd9NzO1Em2hOYtZjUZWdkQusNG83bQPze4HEQkRWCLxsiyrMvRFY+tK2YjTw8PDw8OD1p2ZrSA/tEF58MhoLbFXe9nDhDXbtNRSNtWHJaXL3lHXMMYKU611EQ64wJSFmU0rgNPplHZS6jguE0R+J3IOtB4GWe+JD6K9sTjN5ywQZoyKjrw/pExQ2uhBGlZOv5pUFZFo7CilaKnX6zUF3s5UI8c9bBEfY06EmIeelpyIKEkudR9fFywc91+WZGYx6i3nfL1eAaTMWuqS8romMzudTuSIYU0PDw8vly2CSuggQzAHB9Rde4qiOxEF1IG7u7e4SRJRLW4eKSXAElNiT8mKV3dLlMjNvIp4PmXVsp6Wt28fHx8fU2IReXx8/OTTN+Iansm+79frVoqmlJb1dD4/mmLf96en55eXl+s13qJMpNUAixAPolOXmUDVvTXuDogvN3djoiQ8rC5mZsK6rr0BpqqqoiQ7XBq/vciRc7ZuN6zrejqdPMDKI6rdcpNt9KI3UJnRaRkisQKwBCbKOWfKzA3KuFZd1jTij8zY9x0CESllE5GUWjwlsqbErd81CQcN7OUKIEvKOb88f4TUF1zC+gmKOp/P1+uVc/UWEafr9aqqIm3m++VyeXx8bKYnJSLKuRFzzjnnxT0arWmI6Nicua91UL5NuB1jT2jqrRpsdaALdHk1h2YmtWgjqD+7lzOCzvjSuOfg0LEkm5B74lNjnmGoxaEi0au4jbaYYWkoXCnSvOBWj04R5HYdeCplLymlvZRaTURK0Q/vny6Xy48fPjw/XZ6fn7et7HsNhUVEjvzxafvw8Xq5XPZ9Tymdz6xWQPXl5UOtNfR4nOPpdHp4m06n07rm0+n02eeffvHFF8Kp1h0ws5qXBBdoIUhapGHXBGYpy+GnNQl2K99EEJA7E+xKBFdCNIWphW54DenW/BBvkNltP0dp6G1V5zC5hmdFd26hCNTCf5Ocfde2yePEw1GPSr+Z6EJNHIhccHfhNCgLBjiIU08qOjORl2DMMD9qrQywSC1TxnKi7fAf2nxRgIi2bdNaRSQONOTSMFG4VR1H8dORjm6LnNIhLcbKh+6YqXdUNjZt2PNa3AfWm1lEp4jICVodvg1irvtOFAOTq6TUXGtmV4V6WhacH07Xen0p3/7h+6+/+uMfvn13ebqoenVSQ9UCgDgVM2wbgFIrM4uQO9e69+ZSL5wMrMQwWxKt62lNlMkeFvn08fxwSgw9Cb99c17XTECSTy+Xy75nJ9RaL5cNQF6WCAJWC93qYMrKUjjazNbEn7x9XJe07/u2be6+FQ2/2wkL50W4NMHuEMpCrgar7fTNukljIf2oV9kUc5EMcXdX8lLrxXYjoVq/f/f8w8fv6JQvL/tnkCWf4OY4qLcRSRwQ7q846v/sPYf+6qu8E9ZYlPlczhjR6vZOa0HGKdUZrPGqfCDo8HQ+j/fF/8wMVjmlaKILSujxaAKBWlLDrEU3AKB0/gpK79zry7KM4EtTx242NcEBoKnUs5q2aG9KwwbrN+t1H70/uZpyz3MMDTJstphsgUlYuXsvXeuaLPA2mDmx1FTUvbk3bSNcvTUZh87TVsp5A5UzHsRHZcV0NsPp2rZteLohblJKtVYwRdtvR94jZq4eWrNBgKaUhbO6xgD6UCFDsoMa6CV6Jx71PAOTSxtmlTD1TM9bM9tGAV4ytjL0oKqu6xrPJZLDLXGVLsCtunl1YWVV9QbiikjUugcwQKBEDBNK3exA2gDQa3AOqj1ka86ZB1aKV1WFec6S8yqJCOZmKedlWbZdmxGjupVivX4gRsxHcY6ItMB/Rz2N59RAdNFu38CY2dwEFL49Ea18Dikc5mYU0JpZktyJLxikRXEjdeaAhbNtqlqpwnzPqQ+0iMJqddXdIG6UswOc0uIutWj10RjTo6VNWISVH7nfxGwdsPFofAIZyAS0SApGzSxMMXksTpwzZ1y3QXs4eNXHiymLiHjEW1sFvwDNHAxZsCxsfqwz3mlmA7zBeyCnx3Ju8sAtZzj0NICAtkSYL6HUj0YEDPRMtVa63BNuY5zDTZps8OM0YTxeZ28FFaM+fsgs3GZI/M6+meuQuLmjzjwcv/G8nenuw1dDIDS+M8xfASCa9Nw9EXNuZrGIOHRYF9K7RrdtAxP1XtCGYUJO5pFZip5+8YZQ1asJDxikCErVuhNBEpidyZn5fM7n89kvz3Q+hTwJ40aEchYiMq+AZZSF82ldHldZSIm97Kp1Ny0wYzJhLFnKdgUgzG8eH5Jw4suzG7kxLVstqkpgM6OQLqrhb8ORhIUlLWts76VeeiiEaIptXS4X3B59He0EEzFQU/hwNUYb0xIBuOBfpuYuAA2caRxNbys7RGgIWFMK5CmyYLdqFsVX2bzGaQ6SCXHER95J3dv3DQJp1G4eIFJmFtAmw+Mdkj+34sNGdSmlbT9i542GjVpzDoZWPipKxs/x5qGGZw6iTvfeeylHoXu8EgG4sTnjT4MdfArozow5h3ibPpIE3ESgoxMyxhW4twVHwSf3xOOddvYb/zP0rFtUpHNBg//xom7qHCG1HhUyVY9RPUkopXJ1UFKtLy9XprRr/e7HH7/9w3fv3r17uW7bVswMfji913fbKBkFiDhVFeH0/XcfX15egsxU9eXlJZzVP//nb+Lg3rx5+Bf/4l9otc+/+AyC5fyI/Vr2SrUCSImBBGItxXs597AHeOrJORR9FJ+LULfqvBt8NoN5vpJOr+sfmNluUf5ef3zsP/XSJB+ZwJGaGD/PnySiwEchohgEAkCtdTSFcu3C+SfxwOavDs+107mPT8y0503VOYPInMy9qtoRoAzKHAnMY0unBwTQahjmR2Fw132N4Pl+u+LXOUAflhuFk5wTvPdides2KsgbH3LznNttSZoCNkDAnN2qq9NlE8mXl+df//offv33v3n/4cUVtarlTAY1c/cawZcANjczIqXo43CP19xpJRERUiZeBOdMD4uchD9/+/DF2/Pb8yLQxPRwWnIS15qy1PVU61JKqVX0vKgZkZhZUYu0ZOCog6pnkiUDOJ1Onz6uy7LUhcsaGZFk8GqoapWc4JnEMqtmZa4izpyI3C06oU/LgpinNPF+NU0u7uZmkEAw8K1q1frZ+Xzd1W3743fvf/f7P3zys//t+YsVhhu6/mkWAe6KJv8pXEQC6Rh7g66meFDzgDqAmZdCPcDERxWGozRQjW7eOABxnwIcN7LF55bCrg6oVy7Mb+vy/waLYSiOkeUa72yPABpCb3zdzLB3zJvQI3+hsYc0TJKyCGkd3SaBigsHMzNJDwgl5lJ2ACAIUW2tH615iUW67BvDi/oDx7plKK6m5Lrghnf16ZSobnutFc4x1QCAurXi6Y7EHUsqpaiVsS+vD378jxnMrWdyvJejHgYOcBiHfb/aPoTzP14fP8cW+uGruLoNh9AaAjgi7xfvr1rMEjmjgbvU+c7jCH+SdAa9hkuwrBHXDtEJEcnMiegaUk0tKlqnUKgdfRqq7gOFtiO/WcTgyKqW0kB0RMRh3lx9AEhLjhPkhgTYJgG4NWd7pt2mwO5j3kEaZE7XUuypllIez+uyLMy4XjdVV6WUkoENpiDVwHt1ayHcjn5npm7cOpVoKF3rEA61VgFBzfrzesv7sZubG9SQenDEPOb4zpqSmWG9f2+aqaWqrjDCvu+JBWSRvq5mHt2nrceYgt6YOYZ9U/deAs7xlaDq/kmLE5mZOWtiNubEAZnbMxh0DJiSBm00yL5tiHqNhMHgjOhsjpRjM1a8jT6OP6NlvdVH0Qi1HaejDH+45MH9jVPa2kRGEmOQBEWAoM9FbAUbGK4ydYGAW4JRi8mwagHl2s4XCvMBtTJk4ugYFFBKydlhxuqq2i3nFj4UQIhLNE2CHA0Xyl0BclMWEkZmBmnOcj7xm8f8+T/7y+h6CgIM04WZ97L1MvKNyJN4EidUM9r3675fAZZEgKQkOctmJdytdV2ajQdb1vz0tLtrANalRCnnE1pCviEONDxXhSpAWYgITEELFuXOITVmPTHcwJR4Em3wBtjh6NiewZ8jbnUTonK6napEUZP1St6StaWEgOVIgA0AboruDOaezjK4RktkjNB4bS8yqOMKhr9yk8TDBKvdxIIeMm0mQvQmyxD01I3KIRl4Ht3xKmg4RPEkzY4WCZ8ia4fBepuEmRW/TXA18/vHU7h7d/z87iPzr4PZx9pomhcSS4rqiVFMMRy2vRQRiZQpE5NE+lQatzCbaq1ubebstl3E7PJ0edn33RQ//PDDl1/+9tvv/vjZZ1+oeqm27/u27dvWkvNal1HLk1KqRlqxpfr+/fvwBiNuXRWlulb99d/95nQ6reuaEv/ww7v379//63/9r//8z3+2pl1jPidIEoMZqjGnqNOJzxf1aqNBvU3+35ZyDWk8tvTuvHzkGiYDYz64cV7AYSe8vo7X/QCYmZd39+bxvf2liBT2TP30UP3vXRf0tbV3DZpuor1JTBFpOcnOJ9TTKN5NYbsFNzp6+25tytvFu98jygAHiBqrHXpnPgWblh0KBcw4zmtYXAQALOq9taedF6gTd0TpiQJhFcyp1uq7ueVv/vDH//iffv4PX/6+Fl/XR5gF6qmp1u4RdMPazYzdIE1TGMXI4CJMSWRJOGf55Jw+eVgel/SzT86fPK5vzpmtsumSJAu7EzgRZVXdNqo1AbBwROFFfdtw3UuNwdeLJEoLrwI6nZa352VZkq8oJeovkrkXtb3aXnUrXLRqtSpSq9dEpCIpAl5SK5acVJsmgYQ8iZNidXU4ibEgcA+1+L5I+fACXL/5+ru//dU//Ff/8v/8s3+1Qm/tk2Eh4FUJ5j+di3r+8O5FTGQZasPMHE5dvxCRdHgFH1m+mzBfC8/haLhtsexx88iTd9kedhH3eFm7ZsVNdCijSMZRbyzEnQSI773Fhf1TimOwUtJG7hq9N1FQ1PmwrVuYhRiCRVKgR1IXtyMajSmoyQ0AI1zHmBvmMBqij3pBC3PKOSfJrZCpOYnRZdKlEh+6zazUKu4Uk9ijUHMWwUOgcIdSHYo59g7dMThcI4AcI5M+TituUjv2BncMtPii6/UacWjVIrIwMyUTSUXVOXJizeoNnEFmdpDCQwJKB99PCWN5w99zd7qFYx7nLb3n092EWYRSWon8fD47lB255Q+51nrdXqJUsn+knVeU+wKI6rXxgF3cH8OLYia0tsHxCKRvhQUmBBEFjCimrJFIdvfr9Uq9TiY84fYeNOvU4BY97ERCMJOcGG77Fn3bEsVpLDCYUzWIu9dqWhtIsqHfd6J16u6OD3pg709UoYSOOmBeCcRxdrgp0YGPvo6mH7n7Nimluu+11m3bVuZodMw5+2ULo7dWNbMkJBHdMDcYOZjIIe4NKkZEGMYCEXGLzLACIDfyY7xvzDcdhvhM6mPPmZkqnG40sUzodiNv5u7CR/2nN/isG6NWptrR1+Q3vneg7d1ZPHfLs6nCbbB3EGGttXfR3URA5ls1wdLmYMHMvB5YO4f4I2Mn77NMvbn6qlrWlIMOWURAzjBV92JmTgyAvcMkti8FAEW4O9bGOsJZaEnImbOwpOW08OM5PZzwxWdvcs5EKKWYQQREprUkttPDA2AvL3K5XGDVdSdba7G6F5ivaxKJInlhQmK67lp0t6zuDi9ZeM0nSetoeY1jXdc15/z8/BwRKzPUWrdtv+611iKAqpUyTDWyNksuWxsc0+y6LqoRZVcNL97cqQVKIwKoHa2Rmql0E2UkOgBq+pFFP6EeitAi1MHM4Da7pc3SDKQQwEHJuUa5QytlROtEEkAk7Xt5RSHmPvoXjmBKPJW2ycshxzxa32uts0L1bvJF7IYOO2BmtJ8Iow46nzli8CN6M8LYolGoST12MxSNT/BLfnuN98/a7W4B45VxIvN9cAskM95mZqVsPuXNunxQUiIQCzslETblWnUv27t3HyLwwTmZaSnlen3Z9/3yzFGLm3P+8PT8i1/84ne/+505bbsBVM1KKZfLtu+7ujFz2UqplpKmlER02+sLX2MN5sTEplC1Wmzfqrura06w7M/P16enl+tld6eIbD48npclSaTw1WHOPR5xt2NzUnf8qYtAu9/MOPrXSC2vjmA+R8kZXX1zh7sc8vPuFuM/3iu+Gr29IrPxjRxIMIdt1upgUY75fj4lNKQHDmgS4xReMfEw5QeZtwFr3mOBjmiIBLdMnZmFHpkJJspwtQN9eQ/Nzo/A7ERsHfUXQFS4EBrW3bz5gyaH5+kD/iAqzDGepak1kICJBr6aR1M1N1ucKKaquDtqpahuVQfoxx/f/d3f//Y3v/vmstWczk4J7E6upjVaagOYtkf5EyHMSSZi9xS9v1JPS3o4rY9renNePj2vn39yfntKp0RvTsuayXbzqkvGmhOxVAWcnCXTYtXMrJqrcCklAZSEXDd4JWWShSlBWPCwpseFl4XJRRMAlEIGGKiob3t5uW7PFy1mlak4NnYIcmamBPMdEYY2ITYmQtTHeBhUSOQxe8uUAx46p5fLBXpZV7x/9/TrX3/5b3/3zb/63++UHyDWSeeerv/E9Y9rUMWNVO8YOURkVmHezFoAgTIwo4kewoQBRDSfe2Ed3ToOiL4lp+bwNO4cA8V6JuAWxXfwMCZ28Jb16S4lNVeQejn3YB/vb5tF3Eha3D4+Zsl/vBkIlNH7NQGthSmWYx0BvwePbFhi47oVf9xx1eOLp6RZf4Z5KYd8oSmOG1P+Oo7ouq6llFKiDwHjW8aDjQWklMiOxN1xopExoFbA2VpcXunLoYO7SAq0TAlO6b2eYxOPaVrgxMyb9axIh3gBHOQpcTU3b14Zcwr/99Tr7+Mpmr/nxq+22AggJAp7Fzkv67LkLD3WSzAb4rjUbdu27XKVtLiRGzGlmITTHgGqWrVOSTyCw8seG0Kurd5jRLjZj0c3C7hVTFnNVt3hXqOqVtqEyTZfJL6l1t2ncDX3Qiwt+7pmEXKrUCtFt22zUt98+oYrTKvWjmPGKa/5aOAMQBgid69uY7pTEFFUzrXnax4dB4LsGFzp3uYYJWaCWIxjaHx1ZGVFhHqnr5lFIoTiMB0ppZhGxMw5L0tOXnXjLYqp0VIlRwk0w9khxImgjIAvm1jUyTFqEgQ0xESnLushlRaOsYC6bS2Do9LsxuihQEbttIvJVoiwjnSVPzhihIfuLKpZVs68fMduY6QHWkxrZCdoFnnuRGN6io3ZkjJEB9AwaXJuc2XG5NYYPxChuuhNVdWUOLoy7uQME7F2cTS+cV4zPDbfW5K0ZT6XREtODyuvp/Rwzufz8nBecxZmV91HYCVldvdSyvl8Vi0RQjqdTilJ4GSsnMO1O5/PKS2XyyV6Qsx136/7vkeepNbiAAjnhyyyAqj7FsEdkcLsbx6XlFLOK3PSatu2Xa97KeXD9VpKuV72Uqu6Uc+Wm1UAWcJMSOHgEZGWPXDSWuMzt2Ekg4kIzTX0vkXjT8E+psRy71oA7YhrraRppJY9fL6bos1ml6J3ebXgHRt5Y+34ZgSYpAe2Wbs6oi8PPo1LWq+4uXsSyTmH3As5MDRRZwGL7sihXIdDO+45LfiGdF/9ekRqvadBQqrfaV/vhvtPvj6uO0YbbxhLpW6vY+qPoCnHcq+qAcBqbZCDZmZWuZfYSMpEYkrqMLPrZfvh3fv3757+4be/Oa0Pj2/fPL45n89nMF5eXp6enp7fHw3533zzzS9/9esPHz588sknf//lb9b1nJasquExgqOsOlc3NzUFu0mtGwBgXVczo4pAsdn2rdQCIKdUiprt7urQDx+efv+7r/d9P5/PKaWcc63GBouxSedT8v3uwUWk98jdQCwMuTfLq7FXcw/bOE0ArWT69gjmA5p/nn89PuLTPIlXcbfmHdqNsL2Trj7qS197m7eLfn2T3tcUFde3didh5EoOOu8EjClsNBmBPV86ccG84E6MPu9hUyhoyMOT0Lj/FbesRB3jYPxJVYngFuijIfCZnJ26ORenJhKOaxKGiFm9Xq6/+ru///nPf/Hh6Xk9vTHIy65upFzDMBpKM4SdgFgoMTPMTYl8EUkpvTnR+Zw/e3N+8/jw5pTfnpdPHs+Pp4S6PyyShI0WSyQiKYswJ43iBTrnk6pv25bUsKTnZydVLDGscN8Kq6oJ5ZQyy+mcH1ZeF2GX8CtsEYWroyquSZKwELYN5iKumwiJnlJyElMh82upTEwsBtLIDToRsStBiIxqrdUKi64snDMZgWhZTtd9+/3v/vDrX//D/+H/9Mc//6/+zURg+FM+4T/+etFerTi9clfcDKCd/iETmrCN7HS0BHe0qQn71wCKyQhHsr1hRR0sgBGdmQRFU6zeFGsXzjcjrME0sllR4udT1Om1uLCp28KnC1MGqH1vbzJPwzL+SYUXDqGqqlpANcbicm6To+OzR3bvPv7ajLnhd8XupZREcqB6RFdISguRjh429Pyku6uWWJsI0HvnIsjdPbTW69+gNUnmAO3QmtwL32fZFA14r0mhf0tk4Zp6MKsAB5TggHRvuxmHd7N7AUtLzMxJsFcfk9nQBsWMmjcaRT6Em/nssQlMoSliw3POp3Vd17VXfDi5Skoi7O61bNHGllKiXv3i3YaLX9WKNVg5ibCQAxrRSgBtbEIDlQVgpao7C7glbJ3YgYSOTGNmBNF2Xq6qSEYpxw6H4y2cjVo3gvdcsbQxid4Sfo2XXCSntDw/X8Z7Ym+X0yo5uVHMo4yUuDPBnQ1zHyaFZzW4xQ69ElIViDFKIFDLARITM7mHBemmg4C6YR19fdE1thBsL0X7dDP31sEYysum6jmiQ5VyX2Hzab2GqxT15bHA8RS3dDk9Ti+8mQ63zeekAzbKZ6HADkVUet9rX4+RhHTcc3xL1IdGZCOA4ZppMFYCGgHD17bnzE2zTOoMZW0Woh9RJBwBpoNVW2BiXaRD0XhLvLsQm08GSqgktejtHDspxJIziHOpIjIKnjDtYVgSw38hImZkIoIvCefT+vbN8vjm9PZxXRZholKLBp6t24A0AJBSiiJtZn7z5k3Yr+7+8PBmf/s2JJUbCcir7nvdL1erhdzJNUnilGutbgq3nE7rutqZ911K3cyMXCVLznw6SZLEzqprAKO/edlfrtePHz++vLzsVd3dwACpGUkSyWAGWN209RIHlmc7aOnVya24gZpkU3VwBN3uYa8BmM09EuPiVnV/+C3HPge1D+MhKD6CBal3BAW9qDacqvheVTW7QdREBx5Dd9vurMnBaGjpGTOr1mtGGnFC+0SSm8LL+XlGzJGmcvRJ1LT3cK/CmBXieOf8L4AxCHReLU2DwoejOyvWO85CN7LHY45vYUpBkrcPxekY5qne6+JUlShAxbyold3fvf/4+9//4fdf/+HLL3+b8rqeT2/ePLz59JPTadn3/en5A+3r4+Pjvu9f/+Gbr7766sP7JyMu6ntVYnVwMd2rmTm8GlxY3F2dxUWIo8xARJ4uTzHfKBRiXmQ9ZRF5//5HMGKM5L7r09Pzd9/94O6//+JrEWGhZUkpn8hIrbBqzxL5Yb/0U583CpNAHgc9/ztv8qHi3fkWdXO8QUsZHx+HOIyTG35pfuDxSoSfMWQ44EwTXRw5gWjcR6cMV+XJpvTIZk0C9idpuK0qRgbR0WVgfdLM/Fx0692ho1jPcQp00zq+7e4b52W8ZpZhT87v5Ck5M3Y1/qDz9w7txa05IvQvdRpwd6vu0IWZ2rcz1F9eXn7xqy//u//uP/zil3973VXWs+1QVwepar21oclBzFYLRIiZzBmemM6n03ld/tm5PDzmT98sb87p4SQPi5yyrkJuJsyLsEs2a2EadV9yCgNgyUsPtXtOK8Cl1qKW1dK+8r7ttbhTzrSkfF6Xdc1LbtjVgHNKqnqtuhVNTCKUhcopY5MLoSwiyMu6EDd4qlrhIk4SQKlQ82bRCDupl3AIiSsSS6ZTFoYw81785eXy9e+//fqrb//8f/d/vDnXW5+K/oT2/ydxiRy9qfQq7jlfCpdetBQIhO4+IikHv0xs6N5CH7il59kEGxJm+ALjbTR5Z5Fmeq2G4polVVxyWzI6q9HxXd6DsO6eBlrUUGNh7s/6Y36SaI6fHUK/9VBnl5K6AzP8onlZ8YSxlGVZorM2yULsoeUDFct7X36Yt9xkDs3fRd0C9tvr9apG0YVM2R7rKFjznqqqgqwdjLtTrRWFo02uH/PQqW3HZGlWhao6QMmZJbNcOkiJCKPUWnfzNjNjWDDtth1Fe5y6EeiQ+2DmZcnRlFVrASwxL6cFgJmaaq2lBzttTE/Q6cF7sHwQWWyqCccUigh+qI3MXgcd8ZarUXewEvhQQaoKVMKoaB3FHod2qbprKxkFM/s8JY+l1lrU4CrkKaVTXs/n8/sP70optZYW0gaqutsxERho86hwq3qbopg8K+t1uf09xkhmFh2Q1JDXbWTYWvyga7IR+3g1kJPMzBsoFKtqKSo0MAATTWD9qlZrRRUfp9vzFdbbtF7LoHtuZxotXrbbeKLYn1koIAz9ANVwtEYI7sU29zfHvHt3f8Wkrd09Wn3maxYC1DdQiFNKI8QVo/nmkzo+fjcUaCpnYmYiMMsgwsEdLDAjNR1N1aXcTL4eNzQz4sQiyZv40lJrMEWXSGam7EQNvA5CiSDEwrYu6fHx/Omnj588ruuJl8yC6ubXvZ1dJJCdiJmXnCXnWgnAej4/Pj4uDVUc8cO2bZfLZd/q9Xp9eXm5XC7PzxfrSR4RyZm5YN93sBObJFskPTwmkbfBcLXWFHAGxDBy4/NJAHz2Z3/+fHl5//79x48fn16ul23f972os+QYMRo9AmOwRVp6eT8Rg9osPxyuxVb2PWa09O7u8e84wWgHmKl0EKGIkDH80AiD5nGr8IjJ7NBBzOwEA7lbb6gYIdcjXibSTJz5hrE/Qy5RPfqZq462jequY4zepGsPQN3ZMh5UFD+Mntj5ibwb+jwVDc7MOMh+MMtwSMamjc/OOm58URSWv5YMQ4XdcWIrEcfRKRfLC4fQrKp2cBF3ANumcK5VX671+en6x+/e/ebLr7787Vfv3n/k9MLMnNPDm/PDw9ndX7brFw9fqOPjx4/ffPPt+/cfnUWY96LbtahTNgUgOS0dk+Zyvbo7+VGzlFJa8zK2SPoKc87g3K60gHzbtv26Xy7Xy2X79a9/Xcr2ww+ffvLp27/4i3+2rllVCQWn4fpOwuS29H28TkSj0mTsdgyT8O7gxSLn7b05waDqHggYZ4delh1/Hac/jtImIGuio2EhNEVjlsOrnSztydwc6vVOXBO1WjWeHEjMLmL4gV3mjqcz+MCMaGKt14WNlpOZWmambkHQbn/eaQeRHmp+FcWYd2Ywxd39j3OpZf5GbtXnNGAj++sN64EZtTog4JZWfXl++fbb7/76r//6l7/61Y8fPoo8uppBOC8xX56IXBjUPWQHEZIIgwQkKS2Uz6flzZuHx/P5L88fzsvy+CgPJ14yrdkX8YXUMwkjSnaEsxHMY7wnhQkdu7Wuq6oxydu3b7e97qUW05SV85JrcfdTQs55XdIpp5xYmABjB+dUtKaiS/Jr1lREyDfhIksi0u20JD09PBAH/v/CdC1Aqd5Btpu+S5TUd40Aoe/E2GkT5/X0oKqXy169JEpff/XNL3/5t/+X/+b/xo+P/7OLRf8xXtSLrTBlBQkEGG4ZbbDqzedDp1gHFxmqsJfGNo4bNl+7/z2QzPhprlDACA/1NzfXoIOiB3DA4A6doPVn36cvM/TvfcnuUAG41WjBU2M4wU08iSbVJcxZUtRhpnxEvOKdqlp2VdcuDYNzmb3F72uvcaduSsY7AyZ+WN4i4k7RqwYyosMijKSctkFwrf+Ne+9imdT22NmhTe+0tbvr1D0YH4zppXeqou04SA8vgtQKavOXxuNEvkJVCQCMU3Ms1Rokk4gkSV3ZS+TcSilRQhMQqdQLWd0dfE86s2Za1ygVS0wUKQgRkpzXdb1cLi8vLzHwgIhUSyllWcP3EzOPIcsx7imQ4ohiZIECHmg9vhvR7PHGSIhW0gMyESZYVBkxo4wCvFvmGXQlwjNuUNmew8nsre3k1E18MwKC0lR12wozf/L205eXl33f4+bVNGrzTqdT2yKO6cStijLKwrvCQEMzbWHRo12BO1WX2g/Iaq01ZkXKFAYerBhRiZSS1epubWRFD50AYJIorFPVCAqmtNR6+I/uHq4LV43aToyYTbcjZ2V5IzsAHGA/FMglIqLa5lUy8TAh+vNGzrPBJIRXz2DpBtCssOMN4+uGKKj1DiOkXfJqpnyjVeGxw3Dm7hB2k+uGxdz/ZNVT/54ObUIxya2F0OJERvey9sHZY0kCcpEIuDTjxgkMSWn0xLIawsqZ10AW5Y5omDOIyaBv3779/LPHLz5/e16ZqZBXtaKqOT+GJInG3dj5GGOTc46J2zHiBUBK6fn5ed/3p6enD++fAmnjer1eLpdohw42CQ4NDrpeX1TLtj3nJT08PLzJ55wzyN6cP2n9LuqmTiQppcxZHn728vLycD6dz+fTh6d3Hz58BHQrxBKgvtUAbjDLwgnY2+BKR2LJIonbnLHWALyxXi6jeb0WzBJpPs35WL27KOMN0c030YyOU47qB2cZofRJE8UPhwy/U9s4AgSNQ6MF0XqmbnwwRN+2YwQLQjOgh6X6TW3S1z4rXbrVNUOY3GkZ6uG8OcozM/Xtx+9RuCMSte+ll0Uc5ElEajf8OG54FwEZ9+9y+Dgm6g4zEZnpwDfv+hFEXopeni7ff//u66+//fK3X/32t7/Py4mTGAFX2sp+2QoL9n1/kz65XN69f//+et05La5aVaHVDPteS1EQ5UVSSlV133eRRVs1RKuwWZZFV7WqQ7DkDgO+LMuS1uv1+ny9ZOFlOaXERHS9XssfN9Xy29/Sm7eP//bf/pt//pd/dX5Y5XQqHaQkrqbI7JhyOWg19qFHwGkQUuwXdasufCfumbSobpl3GT0FLQEB6j6aCYXa5HoaLaMeIJU+L2PcxztoxCwo478jfo2pBIaZTVuMv/07HeVx54lc6KeW5JNj5lOGUFWZePBU89K7h3zHqmEKM7eqovnmXZgccY1xNHOWfmKBIw4ylhrvSAHf5QHYETaxOyEtzR5Acx2JostrydK0DBOZqz4/P//xj3/89g9/vF73ZVmI0stLreZJTkVLRZsLHfYY1AiejNOahcBMa0rnU3r7+ObTt4/n8/mLdc85P57zacnLktYsS5LRs6oOUzg7cWKwEch1XVd38qpEWJbV3WuxvK6OQDDNxGYiUpM5PWbNOYc3mJMkQswsNTMgMaWUTNQA3rdSUNYlua7ruojow8OZUgYLp93Bl1qr7nM0zQwBpq0xJ7YbBbVW1YjCbwZ1Sl9//fXf/M3f/F///u//zX/9X///KBb9J3S5+/wUvU4w6r+69gn7Y5IbQwdN0rXHeoLg681Ai6HF5gDVpMuUZcUUOkR3TGxKyI1IqHfvYA44hqi8Xq/jDpgE4Dy+Yr6kj6wY6wxdmfxyzWbE65WkuLJL4nXBuiYQbZterZQK2HIy52uVE5u7kZZl32Lc3Q7frxfbzig7ETELg8WgMJhStMBajRSKg6r5SlnYCNVUIZLzypBSCzmYTVXBiBntZpWJU16I6vVi1ZRIl0VEqPq+7du2W4t3klndlZjykjmXlxcRdqLi1dg9SwmA8fVN8auqYy+nvKiqaeUmnow5DStTREgSE/TaUNfXNZ8Wrl5LKSyVpdZad4MQ0inV3bZtWy6rguBJCCIpc8yR24Up6pRSOo1y2eteT7nBNripuznBzapblLRBzUxFpGV+TZ0r82pWrvsemdVlyc7+8eOHsHRLtXD8RCTls9puvTjqdF4fTueQ6apkZm6eueU8ba9KKiJAZUZewIpa92rVYEwKAZEolIhkWVPOLLKQhbuuqlrdQSnJKjlAehJxwHkwtxTup59+GuwXDVHMycyu276mrIATjNjZzeBgGF9eisjp/PaBiEop2K+qWqs/f3zOOT+eT+u6JmaGEzGIP+ruaqCGvlNrFaHTaa1ahVw4My3OJefVDOpOlN1k091UlyUDrKV6MdTywHytpuZMSdUvpZ7Pj379SFrd4QpyYZbEqOSOi7kSc7VKdOLELy+VwFy3lLN7BXM+r65iLMSJCbWaGLsiERtw1e3x7af0DHNd1hyQ+ufzOUZ42V6Ch9U2lpMkKWUzXB9ZiF3JHWRCRgQPbDFe0iLE7BbIe5W8srNLlMfWqi1xJ71cIaYsNlHWipcCIghtvErrbCWHVSdiIQaUDk2ia4rRh6R1JzOIXC/P67qyoF53gYRhsSxLKSWnZJJVzUpdUgAdVYMnJBGpdY/C3T1m0wmX3T0vFZRTTmtL9jIIMAEnYQCJEmWBPO/XUpTW9JhzKralzJx8L08KIrZd92qVRCC8aSUGJxQrVN+4VZDlBQmFbVvF3pzW/82ff/rpW8l5W84nIO2FiBfdNqqm2356fGQiU2URteLVzuezqrKg6l51e/vwCOCH77/9/vsftm3bthLomFpp33y7gjm5K7lb1VpektR1zQ8P5/ffFyumRbHbSzHb6ul0kiUx1Tfnh4c3j+QopbR5YkzuP37+6fLZJ59+/in94Zs9gRP8Ifm+X72nZ813AFnyuq4vLz+ktLj74+Oj+/727duc6XxevvjiC8nLenr821/95ue/+NJdsp9th+oLjRxFT2kyw6oCKNtORFbVXdec3F3W5EruLJxSpl7rq+EAu5nWnTgFqtmSicUv5WXft3NmNzbfWzDba1BptSLG5N3xfimLZFFk/rTuGWC1i9Ilr1R2KluGffL28dMf372si6h9EGZ4Vm0T9lLiUOVlN/SeZ1Dg0BjIU8rDQkVHixnBiGHLHplDTq0M1kFG7AyNAvgWoBn/xlWnjMeUqkLOkcE7GjGGoUA9CDIr+IhHjMUcjqLvkegQkZSytEIYYkjZquSHh9O6ay1F91K3bXsBpbQ8Xe0P7y5f/+HDL/72d19++Yd9x5lErE2R1cpJPCXxIt+9PO37/vTy8WpFFlqXB6n15fK0ntanjy/7vq/ryna2omYusvguMPJerpJSYk+6c0pLS+oau7FacwVqeYrGWgalJDlnNr5s9eHh4d2PFyK6vPiSvsvyyV/91SeXF7Gkp9NJhOAGU4s6ndNKMaQFQATOmAgCJlfDMQE+jK84vaMIaD41WRNagNgcIEQ9PTy+CA64JBLOzVcMfConImEmRDLdY/gbebTMmC9rJhHpg5dGQu8IjLnD/Q68oRm1BCIyDcwFAhHUSdjMrNbhXLU7qPsE7BkxwNAsI3Kh7u5qMBY4nBKpqSxZRKqqM9W9LsvSQJjNBxwV4CTCQm2ey1TysO97BGS71SvSJ2A3bgECPi2eYN+vzNy6nQP5vCigvOYwjdvexNBMMHpuX1XRS97AVNTyssRZyLLart9+/f7Xv/zq5b0/LH+2y+Xj85WALFb9iWh3FWKYV/cqyR3FzVJKq6RTkofM5+RvH+zzN/tnb+W81p89fp5SOq9LSimxsCDSeARfUiZyLcVdE1kodEsLAywU3TRh56dM6+rrQvuOUPefutdqtda0JhE7ZaxrZmbXUTgm6qbVd7W0V1l0fTyVRa5b/f7Dj2sqp0/O6+qGfeP9mZ6f7PnlUn7YX95BPzpfsRRaK1F9eG+GjJxlDXvJFZb8w34hYuYzuYqcdHv+9qtf/vbX/+2/+W/+FQzYFesJLtteluWxsxbQXUXuP/8vLyK16Qb/k29r9XQ9G1cBdyiROypQAQNVwKileq3DMURvCAHOEMAICaRNihNHmoLI2dsomihohySB1lqYe1YfGsq3VZ21nqomu82UGhipB2hNEymuQfT1ehn6lFpsujXL9BxYqOx27mtaVLWUOuR/2ep+3XJM0e0CIoSGsEiWaQ4Tj8QMNyBTQwOntzDIU3xrrcfXx4pDOg8PGDCRZVkW1cuozwmeFJFREGWB5x8BS+5BLDPrcwujqSmzmB3B/nGflFK0C0piTjKWNPSx91wNQaz3O7l7sQO81czwqpMS1hqhWDilFNMIh5efuM0B620t0p8OLMRCxOg9Sj4AJ9umiwg1RhWRqM05YgkjWzJdQ7vLVC6C9jOIjv6B0UswLA83q6WF1rh3XNRaw3Ucjr514JzAsVyWJfIVWVKcRQC1uXvkmsZqHdoj66I1jWBGj1gMBWnRApCWDPRymh67ct+Z4arVqjmn3txoVYuVsbzYcCIPCOt4ccDYhtNIjlprVcqSADRQQvckKeIfROQ5ZzRbKiWpXXP2vA265WHz43hTzELkgVhevYVnepLHRXsCodWyHdCd8xEDMf0cTmxTEBRTptRH1DlCMnfNuv09OecBl6L9ggYXYHCZKoZu+Gl5GZlEgpvG4FKDm5uwMDM5YtAgRzvo6CHstUgYQWgcVdkz1/s0YtSmcoVAGyI6iCrocNQBzrwzL9jdBwKNI/rEWj40qL8xeB8EZDEUKLEwL5LGVtAtNI5q6SO7LFq959NBrygLA4zMRQjMQg4ztV0S3r59+8/+7JNPP/308XxqvWeGWivnlr0JwRXd3rE9nPl0Om3bFpuw7/vl6WXft/fv3z9dLutyfnjzBuByrdv1ip5+Z5YkRIRaLT7LzG/evPnw/LRfLgCqtYTnKfGHDx+uzy/n8/Pj+fF0Oj2cTu0sGFFx8PbtW4Ks6zmv33/48PTy8hKE2VIizDnnZVn+6q8+f/PmTc5rlBicz+fzef3ss0/MLK8nSeu3f3jHDLWCuhWluSVhbPigzHG+Q5WoKozRCsjvO+LGcXiHkzlescgnxLz7ABMlEREOw6jnFhCgUF6pp2VElmV5sj2grUIdhirRmH3XTt/ctQd2aVkWp55OBMIhpF6r7xOU4iD7QcmDIzANIB4cNKQ9psqcO40wS/i77b3jEe7pnZ/8OPVQ5viiy1Zm1raOVaCqgZKmqvteQ9g+PT1tsgDb99/9+NVX3/zmy9999dVX79+/F8mqPhVZmCTKObn75kf6aF2Xh4cHd49wT1k1brvvFUDOy7qukbydjY14qMvlEq/EaFwAtdZ9363avtW9bMxMtDBzKW5m2/YjETEs58YUz8/Pj4+P//xf/vn5fAaRVheRlBmuKAWtdOrAYHB3MswnNW+s6n0faaOubrEcXtYc4B/o8/3pCPn1abanvn2FogyeabinwyccwZcblTHlE7yXWjS6iiH1Ig2IYRSOAmk20XvOFDju3x/Wg0ytapfbx7yTmXrvcua1Vu714SNKYr2IZmjeu8TgLJLjK/ogn6N+LRRxZMK7yTvaRrjLcHf4LKTIvGjTPuV6/fGHj999993z83Mp5Xq9BqBXEFv1sQNNv6QocQqflpETn5b8cJI3D8ubx/V8Xk5LZubcNBsPSRitTrEAEWnLdaSUSi+IuyO8IUmG8AzVT4mYeU05bGztnkAMQ4r3pywLluD4sm+n0ymv68PDY1oyEZWHh8c3Dw8fH172XT5e6MPmz9WV2ZfMyX0PqIhBdU0+7EqO02lZloWZ9628e/fuV7/61f/9hx/PX/wZBLbvvDysy+qwvVgEsP4zXf/rgEn/Z2HZBIA7vXJUQwc1EgyMQXdz7XOJyd1clbshp6rScW6ZGbdNAWa98aAZ1e4WU8raK3MW0aec4fh1NjXna1RuD75zd4CHXEXXOG0Z3VTrz9hou3fhHXU6wSMpZakFoDBlulHFbtX6fa3LL0+Ziw4V3tusmftw8942Nrl51p9w8HykOLWD0MzhTAChWhLdM0y0EqgfxbjBVyYe+PVD+sRd44DNbLA39zQrM+fEa1qZYcruDdtgEtlBW+SuiVdp2LJhjYbHD6OpVYCo1upq0hrujj4QM0NML5mk//A6ZmAj73QJtOEh7Sxa5pqYmCj0hbs7QXJaclpBpqp7Lb3kj8YBBR2HX5RzTizuXkrZto2IvBf6cq99ZWZYFWIhFhEIstCSeBT5eI9paSnRRp/91EWhSEIYf+PZI/4XVaMwN6+05nBIpI8OH4fiVUtp7VghhqxxUWSswvhfK+1WKyeB+a5VAwV3WVLy8Th2mGLoT0dRJDuG8zVF5SU6Cc3M6fjGQcwaberDVZk+a1NlZkppNyVmnyyGetsjcSMCJkkUyxq0QdSSt43vwioAyCEgcgQOJPdGoHaDV7G40I7hHY9vYBy1cYNJYxTdwBNur8f3SmQnbuANqT1RDL/mmA0OGMii4IJv0auGQTCuO+7GLTZj364Qt4ebcZgQh2DTGOA4fwURJWKZvNZgiGmfezQKRAHT2qrDhMldFWQifnp4+PyT81/+xed/+c8+SwnLkq8FQdW11ki2nE/nqG3b2/h45JzX9Ry4ozGXcHu5fPjw4Xq9llIC2xNgVb/u+1aLW6sHW5a0ZHG3qnuDSAWvD6e8b9frtZqiKLCldVn9XEop123fdzcD+Zpy+KXh/MSInM8+++x8fjyfzz+8ez8wV2N4wGjVywvWdQX4dDqVUt6cT8Se19XM1vOJeDmdFhuQV3Z/iN0gGxWY906+qpKDEQUqrqqeDo2Aob3aUCJ1d+FEFOoKklkA1QZ7Fk6sCFUNyWoD+NuseX1GYOaYMl/dqmpRraZBo/OyrZVOtMGtdhgisTBmgdabqOUgp6Fi7yiZp97meZeiYGFW/3diYf6U31rMgykASLqXJzMvvz6dWOdoDEOPvcIp5ojsqqq67eXHH3/86qtvvn/Zmfn5+eWbr7/7+us//PD9u8tlWxZKCaoaY0KizpnolHO+7HGDSE21Z49S7cD7vVy26/Vaqy5LcXfUZA2jTumYPsVRUTLCRuFJurvV7Xq97vuecsMZqpViOmtYRHy5qJq7v7y8PD4+Wip/9Vf2ySdvzQxOkiXsMfgIbYdE5Abl6Xy3e2PPh9i5OYJBusN9cXf3wG8DEbpN9vpc4tgnPUIUZsAErdRyg6GPpiCXTSWa487uzjE/cDJG0c3NIe0CaI3ppm5tXnyUpN0u28bbuq/CNqm2mcZmeuaeQqDJqO2v4+5T+BOXu0vM45oaehvP3imOfsOxqWPHqAl0qVWJCCK11nfv3r179y4AmSMOIpIIXmp115QSuSP0oGpglyamVfjN+fR4Wj89r29WfnPm05IX5kCX+cln4dZsGbKdrLfDRMx5OITzIwflD5COEAJR0tNijual00/QRzj7zBnDYnxkESJJaVkMUK0pyZIkpbTu1Xnd9PlZX16eyx5dTQ2/8GgxjYSYJNYScZwMwMyenp5+/vOf//t//+//H//P/xfyAquAOVhNl5zxvyYZ+J/hupOEAzm8gcINln/dN9g+1bNzRB1/MVJ5bl7NWvs7JpNvCCjv7Ul8CzA2LiJiljDeZ7RPAEAbviCTvpgVzXyh00O/J4+Cz+5GdaSoVsw1WrQOuErMO3BEJO9D5Cnn7AZWI2pNBa2ykcNOjMc4ovIpJaKjhnW0Loy9oLFB3NBH45UOa3YTS8bIhBD3XGUVEVXG7UTgod6GSo6T4BzFlh7dIEQUAzNwyEeaF1Y1WIncnYkTcYlsWyBvsaNDPsebmFfmCEAB5m1YeRtEXt1izkELeeachcS71xo7FlMt45B6tuM4Ajefo4zwQyi3jZrezcwU22I6HDlT1+puZK6TyG6tEY4GKa6lbtpSi2Y1REzD0nQn97Brw22a99l75Bu9Ymrf97K3eZWlbMycOKdESTJnZk5EVLdSmQcOLWL4KfWEOCem1lsYDxuRdW7Tbo+G1zUvAMKLU7eA1qxmGRKt5Ga2eVH4ajmlVKMZTwSuEUUzMyJvsWF3Qe7Ub+5atUbTFOecJbn76BpthkuMvARUc6SvJ9ICbtF74w/aESzcXd2oR0x9Gt93CIY4WeJonnmp6lXhzsQCyiy7iIgUbZGYIIzEAqYMZj5qxAe5u3vLBPbPEBGoBRoGd5B5K+MZCszRUocA9V7W5mObDaOE6Ygg2DRIirpDLhPO/vj4vC3jou6fHs/W9br7vWSMv3HIE+IaBF/V+iyv8OepMZG5amBqdGwhU62KRpOmcLLsAmYiCBMRLe6FTNjevnn42Rdvfvb5p599elqW5Xp9EmpDVvZ9r1UXEbfWJlqKXq6X6/W6nE45Zwa9PF1K3aqIiFwul6enJ5g/nM8Pn3x6vV6fnp6u1z3kFvekDRGltDAjaQJV5hTC/ZNPPltOa5jFClXVbdvevn2jewmq214uhS9Lzuu6aszcQxFJKS0p8SeffLIsC0B7rURyPp+jpi6I/HSWnFcze1hPqrqu616uCuUY6kgcnYQpSU5ZvQZk+aDhQ45NHs5QFt6askZC+CYcfn+0bWYgTfRGKdyyVkcgIdaYhDnMbqHeZQhAY0xprUUrETm39rtOakHqR6WD90AhEWot1gtDQEbUcVZv9dp4cJ4ydbNRknuMZuzAUOTHSqZden2feH8UgMzfOG6Ln7oGG2JS+eMOMkLSw7ZmqdWqqTtVtffv3//6H37zt3/7t++LE6QUff/+4/c/vnvZrgpX9xoOWyIR4YJr4er68PBAPaKnGqxxjYhJqJWcc0AD1Np6BB7yJ8MhHFsUAeXxgFFM1CtEdN9rrdVd4KWmSkQMLMsScLRGvFf78PRMxB8+fNy5mML/OZ0fVkYClN2oY7vAPaiQrM94nUyFeZ8PQ62HZVtjGB+5lHHN75xfBGDHdAebEgOB9csIJmcDERjkfTTadOjjTGcCGP7bndJpvBnlbURg8q7LpLPZuOFBjTiS8/27GnbijFI4SLG9GUyvKJOoj5HpRuedmTt+CMosE3jPeEwAPa46daQjehTbI8/vV9Oo0/IG8T2dSHAQAHetvm3b5XJ9uV6u1+hY4fr/5e3PmiRJkvRAkA8RUTUz94g8qrqb0I2mJZpZGlrMy+w+7q8H4ReAaHdoMIO+0VVZR2ZkRLibmaqIMPM+sIiomHsWBsCgVynIw9zcTA8Rvo+P1VrZIaC2kgHvLRcEDAQp8LrweQ2XNV7WeF75vNApcgrIdCBCoYGheJUUqFGMWosZcQi+Rd7MH49E4hsrwkEVcHpeECFgmrGyBzqUmYGCmgAG7OV1znExMVJAjqWU0pfoTKtBOZ30dK7nvV532LKoKJPTjJJRU4EioppSupcWYvPogKr+7ne/+7f/9t9+//33//P/8/9FxCBiPuF2UAj8/+fQ6SfYlCUC8N188L5s8lW9JhN77G8wP+KwlQzAbMzGayd9EOPgZow+Bl9crzWjpU2oN+8KBqTWhkxeNYoAXofYAjpEYMajt3A6BoE17fCYrujvNxfRRh1+CyiMsoLjEVzSemXciPIMnR4APG5bzMBATLvlx73gx4yAiRBQzYSnNspxAEApZfh+zcSBAyZh6PjW8V/q2muc5lUet6uq3O0G1xAwSRZfiwYD1ecC4TRx2AWWThk8P5hZlE3qVoUBYU0uZrTdpyA2H1jVcx2uNTwFUb1OEg0IDPTYDwIgMEBgbIOG3+zoAPYYgaxObFZNSSeas5YNO257uIjUZDERIQdQrzLPZiZSEFGbgB7hNM9cRT+JD3vwjmuHyAshnJaFiEopqpUZlyVaHY2wfitti58vZ+ghAAez8cd/2fc5UNhFHoeVABZV1T5W22c23mr28yhRb+A3ANr33bOC1FqMskcTNjNXzQM2zcVTVQiRGJ3uW5G0IlQty7KEEGo1Kd5BAWYO+ipIKfa288HD1E0/x/4BFQAPkbRNOj5zeEdHEMT9HyhOjc24dwdz8xB438qGQ+Mxe2a2w+ywKenV6RZHGgd7vg4ARjkxIY6VfxB/c83qRIz+ORTr6P/HrFXswY5BnGNNtHGWx4xbthBbxhoAzUABjdjtoV+4n+Efaq/oG/djZgSdx9qtoI9M1+noMlHGAlEv7iUzMGnXU/MstFnwRKhW0VqUWLU6Z6uqj1IIIRBV9zm5Q4whKII8neLlsv7q2+dvPj6dVhKp1+sOWjPCXrKI5FKraVRExC9fvtzvd2bOpagqx9Y9a2aEwRRy3q7X637fUkqn0wmZPMFSijCFEIIqWHH5BgAQY0wpurCtVZHK6XR6+nC53+9fX19dxe/7/qtffV/Mx6+BI22o1m2T5fJERG6F7/cbEAPQsizMnKoQ0eVyOV0uxOg5TER5ejovMQZOr6+vxBCBz3GtJhwCAK1rQjIGdiClAel+0NgvRUYHKY5f/cOdlToO0xQWHGoJgADIB9wjcqdh7JBCRmQunBFRwAhVEcIUYFLVEQe2FtdQRAQmqPouHS1m7BrFadIdQmym7QPj42MJ/Ru+g6m1byjs8aT6LkMIj47czDjYI8H0mFoZcMa/uNTzc/mvVTzm8uArAmAIsJcaQjLAzy9f//4f/9Pf/cM//vZ3v8fzt9v2cr/tX7++/Pz5y7YX5qhgxKSqoiBakEyMeGcOQY1qrblsIuLdM2ZWq8N1OLBWOJ3OAJj3KlWtwatNMAkAALCua9uM7iiOuxUVMBQx1YLZiChEMsSonBKHFIhCKfL56ysAFBIGFrG/+Is/+/67b8BIRMhgCD2ftTfR7bH4M0n75DqzKXOCPSkJAH1zB1V7IHsQAB55Ax37OXNCy8k48hy2bkPA7nB2kTc2mvq1xuKg2zPeNPEYjpld1oPv/OvduxhnMzOEI4M30w8hIowsLo6H6uXTD8Rsw1PF41axAyw1XpwgDMc9zBw0fh2XOD7jz/VYSjdWdOKaQywBIGg11WqmOV+v1+v1/vXL608/fnq9b6ARgUrJVQUZzDTvRTEAqoEQWmRKDOfElyVGtESWCNaAp8jnJawLRiZ/VgKvPEI0MDsG2PS7YhwTgtF66MAQRwscAgDzoMk2apXQYogVauscMQEAYgBAVQJQM0JAQyTqilUNQqTAxIEDxsRmlnMWBTE4rfbhoruGHe/yusFePVJgAIjGSD64GWp1Oiml5MyDhF5eXv72P/7v/+7f/bsP33z7r/+H/9GDdwCgUslLF5xW/anhv+34r60XddoTQJ2rRs0E0B7fOXzFnkgUNIfdFgLEjkMLZoAKvTVslB/SSM2JcIxW6qz4yOH6RY+QdvthAIgQAAQBzAh6ygq6N4udd6aMzqEv/JO+C9vuvbWNj6jNhoheTzE4yHr+0FFGH5i6lTTm8ZnxRexDh7zyExndd8QQfBWqgYCha9NmCksdAYOj6g+1lyoRwSFZVLX0tmYAYGbVt3FTPy33tuaU0pA+MLnC48Mz8yM6Uou3Ox5Z1LZDBuDVMsBqFc0IIIUopUrZts2YYAlMjkoh2jfP783cGFarCEpoTECTpANQRjAg330v1jKfkNHjyoOR4IhSHRszv27P4gV+HXXz+K4BNCsHCQiM/H+1xreI3jRCSBamC6kaoAZi7GXuDTQBMRDFGENwo5pEOBAToIG6Re5XrFK9y5km5FwCS4FafT9x73SralJEKiICxxg93uX5Gd9GM0CfbGbO+TwYQLWqhrG8Axxp3/d56FyKsTWjS0Viim6DsKpUEQNQq8uy+K0qGKH54njeWFUVlWEq/cJe3CtCPbpDFABKr4FsRiGOINAUyDnU7VRCZt6/ER8aCKX9bBwYY1QQKsdS+yIGagBCeOg1a5wPLVUPZiYqqCB1GKzNeumC2PrvXhdLRP43xOPE4GVR0IPMfiFAr9tqrNcDz7M06eLugc4H27z56/jiHJoZrz1j08piyXwQ44jCBKTuIXZYYFEtVZgIMDISeALpaPECNWzeMrgEQ2Ds0jwSKmJKmJLFUKu09ldUBTAwlXpdL9/+2fcfv/n4FNi05orKjJFJVZ0XDDCgIwPDz58++ZhsRCTmgMFT6MuypJS01tttq7kw8xJjCuGH3//BDGOMKa2moAqo4s/GrQ4BQghIpgVrrS/7i4iczgszXy4XVfUg8Xa95ZxNKwE+X05rWnLe7ve7bZRScigg0QoizNH7pq2V9/OaOMZYA5fAudzYWZIlRAf7AQCLMToq/em8EKGIVisixbPoMB0uabjPE2tK1A6QwDfMMr44k+JBXcDjdUOtMERwDGGo1ZUcNwFvqOroF+QhGE6RAptDWEFr01Jovv6sLNWhNbp/6/jP7YnaZ8zAVNrNDAaZ1fasjPxFlTpez0/tOMkz5fufRhT1/WqMpZsX7Y1/+P61PTqrzPy2C90AAESMKHBIr/f7D7/9/d/+/T/88cdPgPz6cr/dbjnXWn0ETwCAnOv5fEZsuSZv3q9Vbrfb9bY3a4khhMCtwjzkXM2KLywiLukU2OstwNANXp99YmiEphR4LC8Q+tBLUenFSYAIBhUAEiMRCJrXvSIFBC7ZSi4Gsv2w317uXz6/XK93NPrm2w8BfTQLgE9e975B9LGrLTIxpPeR5ZsAjcfGHVsyefuzfTbTgx+ij8W96Aa8mZl1HXfod7PA3eH3fRpi2TMJANgjCC4Tj52lOYXYS1EM5vuB+U4ejzeEN45pER66HsaLN4zwSGkPWsNpfKbP2Zzz235zhofzeIxHm6I5+GVqGUSyGTvSOjQaAJRSvn79+uOPP/7hxz/+/POXUiyEaIRiKqqBW0zNyFPVFIjXGNZETyk+n9Lzsjwvy/M5PS18ipgiJiIECRyZgLwmu2tY6MY69SkdQ5+OuxubOEAfRkBhLC9OFq87bUcELaCqkgNuAaARBGD2OVktd2tGzKiqimQogSgSLimdFjgtcsuSq8rAj/CTh3YPpZQQgmNBExEThpDu9+uXL1/+4//+v3368f/9r//H/7vHRbyd5k/R1b/A8ZAefPf67a9zehB6lgh6GhzN2tA4EFQXDKCqNprIJp3VXL6G9nk0oI0dR0QAbQnhcQPWJstPRhYeKCcTK80RwzdaZrw4COkdo83UMlgPHv1MbKJH5oKCcSoiCm6sExEAOaiD/80bzJhZTQBArRIEIqhFvRNxeCxEJNMWDCPAh1yPOjH/6YWjkR6mG4/lEJGeheidVD0H6lUlVY9yUGx6uo6VMvfsoanAEZpDRFMUM6kGZAEJQojMDAhoKqq9EpeIDMRxBJAIyfq4ql6Bg4YNBQi9I19EQM0zEjUXswgjTToWpEMmqKq3VfYb65hErWXbhV2jjKNqZdARoueg2mtga86KleLalwSQW7IGAIGxeKLJl6FtBEBKAT3FasCEYKgmuewrg+ccqBUt8JKCV6kN+nMy8z1aT0utNWdDZzZ10I+KfUQCtGHrAGiqmpZYi3cKuW8KZkAYaq37fmdEwtZkMghs7LjTgCHU7FnHUGqNITjfimoWCdRILuesWpkTMoHWSFFVnR2kwxERo9aHMMwgRW0e3KF63lP4zGad02woPF9AL+SoKmmGcetlPO0SagRt0PxRh2kwMm6exB732e5HrbYOKL89m2/Pua/Zti391ZxA7P0kNj2R38YYWjg0t4GY5wGtIiIxDEjJvmg+Y8pbbCtwxG48UU/0uRE/W71DbM2iqz2dTwIkQHV0F2JspYJHcbyol2QQEiMYGXS0QIfudu8Tmh1jRBgQkIGUkIj52EFPziIomiHYmuhp5XUJjGC1mFWKEDkAWCm17plCXE+rG8qvtxtqm9xFROwPqKa1ChGlJKI1lxTCeV0jBzQwkXVd1+WMyKWUbdsrYAzDjBCrQjFETorEzLLLz59/+vwFP3z48PT0ZIiAGom37e6UIGXftlsKnCKbxi1nUCXKYIRAFIK3q0jNtVQzY8aYEHAFAGL4eP4YQsg5g4bI4XJaN4ZcNgMFRQp0Op2ISKQx4ExjE80fenFWMI2KTEFEkYGP8P+swJwZOsRROydzQADvLSQisgDgE1a81ZmrFhcRnj8SUu1E3TkL1O+EpiZ2R2vWahY9lQEGHmgZxtwIqownHaw6xPjsA8yPbFObPnQFRET7vg8WsKmnfbjQY7mGnNFp2u2xpPPA8skQP1yayYAAgMABe5VRvzqoV+8A3nP+3Q9/+Nu//4ff/PZ397yHkD5/edm2jSgghZAWZCEMpRRRRQJDEFOfy5xrue/3+616yQlzIiIA7K0ExStFETHFdVmWdV0BKG+CqNYyfv4IjMiNm9EDiEhEIqVWxT4HbwSUFEgACckUTbFWqfUYsXt/vZUsOdd9z/s9//Vf/9Wf/eq7p6eVfbaooXneqYtDnf0raPaZmTm+1Ruxj/3ziEff8vj1zRZ46o/6RCuDjsiErfBxPNe8a9JaJ2jQwOGU9uqy47sTJ46rm5n1AhY7ClabsJ0fZxw46Y4ujz2RAuMMx0lG1VXHW3pYQICRCp3vzTyuM2nz2ZFut4EP1xosMGsN+yXHFXsy1cwMBQBNCRENwYuBmEPler3ef/jt73/88dOWqynXqqreGq0iVUGYUVQAkRmXGM5rOKfwlMLzmj5eTk8pfFjTeaGFMYKCVICKS8NTBO00Y27cSmCHPzfr/aieKhoZwr41Pt0RugY3d/HehH4oAGIQERA1M2VwhHbxYCZ4MxeSSrMx1JxtPfiFCujj2sDQBE0YLDIykGozggCUrLWi1Vovl0spuyowt2IiM5SaP3369PPPP4MpIKMJAMfA8F8I6vLf5/BL9SwfYk8Pep4dWgdsl+rQOFqhE4n/pDHry8BxFsgdyP5JBrThv4zNsZZIHOZff9t4aqCz40LNIWzhKKdn66DTjyGaB1If15pcvrlXEPpOzWgg8OhAThw9Umg6Zj7REQ5wYxtDv56aZ1g7OmXy0EWM1dqbxA8jdMcDWGtCPdoA5scbZG2TB2y9hKFtr6rPcnBsYiIijuPzzOyY2tAEhO8qMR2PTUfbkjFgD1oQdKRjVUUAFbFaEPG0rMsSUyDTUkRqKRM9eb+vuik5wmM9HueKoTv9cKgr14IpLcB9eKunZ/o0yf7JWcG7hCePnjm2kZriVGgBAEYIchgHbm0y92AAuCeJrRkTURx9gQP3g1rJa8v9KSg6YlUtHUvGai01l2y7L2YIDSuImb3Z0O+lu3l9zB0zgaEF6Y6qWYvCqqpIIcaUFmbWSqUUJQZ2e2hIfiSiZz55k3cIBBa87g4RFcE6SGAb9WZ127bIobWbJouRiUhEpFZK7Mnqfd8Rzee5iUJKqdY6umEFTB2JvLMZAiwhqmqRByXExMhM07iqN8Q8qH3wXlsiAxHBwJqLG0nBW8MRxTyjPnzmRsOBuE5JjHFvzCy5tHDBKFJF85rbXzza4r4L3Q1and/xrJr+0uPMr8eawGQY+R1at5uxNwxrBxaGrv7nr49VcuOGkEBtrhYxMzgWWUDaPUQOXicMZmgGQQEcxwkAABWhQ2g0VtW2IyKCgtVARXJpRTidJZHMXJb+2fcfL5eVSbVuYBXJUL29ueFVPp/Oz5cnRH69Xe+v18u6llISBxEREKnZumsdOSgUNEkhnk4nEy17/vX338W4xBhVYd+JAJVbfK0Vb7Rec0xpBYDT0+nlH16+fv0aiD0PaSaWaF3XdVlU636/3l6vMdCHp+cPHz7A9dZWXjGEECgxkZlSTP7EptVEwYfZMJlK4IQxqcrtdjUTJAghKAAAxZTO53MIlIsAIAfWd6WSYzdh8mHGvqu3DqjXAhxQW0N0H9RFw2NB88mintaf8jB+hBDEQLSqh9UVpCfYRaTWUoq7jr24eaCDtLpBebyBhmcDDAQBABAYUFw4h8eRpLNGw24H4+TRSS81f8+PNIFCj4CXm3EHzU+dDkPgjMUEgNpg+uHNJbyM4oF9nLu7BdnjKVxNpdp6vmz3/Pnz53/659/8829++PnriwEyK3AQQIeg23JmjutpTaeViJjRzKrkEGhZFpFyv9/hVolCCCnGSEhVcsuimzW4cyMEjnHxkU5Sms0wzBdq81RleMvQcxTbti1pLVXUhIEjR8fsAaOSBRGtVCggeZc+9hMCpXRG4D/84UePGwakGH99ogAAgIYGwB4+c/f1cMAeqfGtn/MgBol6PLqlX2fh7x9uhXP0Tgj7xjFTbVBJXgVIrdm+p/6IoE+pbjE1AOzm4GC3WTLPF+GUQNW6qwyd8ZjZ7wenutyZwGaKRQCZoMvHtRomyiPewdALhEfUAzrExb7vzDiffNhs0z3YWPC+nEcaDSgAkXdnWXfj/RM+KNz1R38GsVb6R2LGKQWM27b99NOnl5crIle1WrIqqKlaraUoVg4IaoQQCGOwJfEa6ZT4FCFojshLwIUpRUg05heIYxYZkRkwYreSGwt3jyIgmoh4oc8bZn+zCzb52ENMNQGjVr3hxsDI0BDaJAMiNI5spSYgI4Yq/nUOAQGqmCHvYikExDYYbUmxqPcMlgZa2uHWnFT2Xfd9NwvMHFoRH15fv/7wm//09ccfP/z6z4Ad+0eR/qQp8t/1GNpn5L71Xd77/8QzdeJk7AoCDQBZvQjBUNW9NAYExJpzs5A9czggox5ZT6cyxjd8BFPQZJQuEbfYkD6OgYURYAohvIsJmlmpLQE2ugmGKpkv15Xsg3PYU9BlNt4AYKCuqGpw4F1ElCpVbXC+jymvWVNKalhKiemEiKPkdL52rZWYvXQKADowjLiUH2YidJORmYex6M/stj4R5byJSCkQUvQpFN5c7mBH4wwiYhVTSqXsbuOKFm/QckfIb8PLW5goMLtUXdcktWGwtGVCcu8EVAIRxwgAPibLn8I90n1vF8LWdqvbtnFH+0HEWmvDaKEmxB3bIHHkELT1lXGMaduze925apu/1AjZ3O5FRGQyPAjCm6aYWYp0VXpQnnmZGUaPBjUXmllUJWeiNgzQzLRWZIwxRg732zXGuKZl3+/7vvuKrdRwPk+nU0AqtdxrdT/T8f38iiNnm4FNq6CJiamCUQwhLMnMlhT3fX95+XJ7fUkpPT2fn57Pm2hKraxOxOtcTKqs6yoipeh+30IIKTLEqKpIdL/fRcQtnoZOTpBrcbyZfd9LIcdOjHGxei+luHFQyv76+urDwbu3c/gwRGwG6huXmJBqbeiOHnZyohUAN6SNsFHCNJbdsyu+uSml0s3BnHOgEELYJVMMLcv7WEzii+k3U0QGR0DHqt62LaXkbzLziOtA95eGUJjUBkGv1jNHNu8ZZqlVwCIGt9UGJyL2YSePpo/fpPRp9Q57bT3nP7cCDhN/vp9hHiGiz9Kw7hY6Ty3LknMWgICeJirkZf/MAEAGaVlEa4yx1szMqKalZsgpBaJQ84YGMcQQgkkFZFVNS/QijzWGV6IQwrquIfLLNZdSliUGJAFKiQBul8vp9fWmUpe03LciRf7sz3+1RPj4fLrevmaADx+flhBi4vv9FjnkbX96ejqfT7fXV1NUhCWFyGFNS605rmldV4cyX9c1cZCy5/1uoo6aCWaJgxGeUowx1qIUQlixcMk5L2nxJDwz1rwXhHS5xBiXlfY//3UKBFqvL189wFEsPz1fzuezllz2O6iUPb/CS4zxvF62bfMpntxkr4QQa9kDgZS673eRE8AiWs3ssl5E5OXl9Xw+xxjVhIxqlcvlct93RNu2OxGZSYxRpAFI6rvklb85T8MDLwmhIOWYUPL6+rp+ix6vcTORiCS3vmK3zkMIiLmUAkYKWEqFNiAAh6HpROWi0qTdRg8pwrIsOVnJpKql5FKStubn3QRyzgbCgQYxS4UUo5hVqdZqB8xbCeaSzsEdQy/QNMXBF8SDSm/0o0vUuRhshOpsApiZPYoY41jn4TvZu6DMOOGbDOQwLEAO4DdErGZSTURuP30G5n/8p3/+//6v/9sPf/jjtmeggFX3DZfUZp9+/PjRZ1QCwOm0+ECI+3Z9eXnxoujLhU7pmYhqzd6IXqt6e3nOmZkRHNwqiEgpEuNCrKK653y7381sWRYkElXNDp7VEAT80ZZ13bZ9iQkg+I6jp1k4xMhlz/tWkGxYCGeD5+fTvuV830op220HoCUk5viX/+rPiBCIAQ3VgNssxPdRbF/qKnnQG0w+WFwWULXJAkMiIOKU3PgiZk99el+X7Ptxfn/fUQZ6vE9VUY6YIIyUgUgHVjmc0mGPjhgccUMO93ieX4gBtRSABjDWRL3bNiqOg+rY361s1RoLH49fm1LzC+WcEXmE+XyPOshiW6Xxonab1W/GO+f9PMPtB9eqIhS4oRswtbYDwpBSyRmJsDXbsAEgGKgAExz4pS2HYR6XGS742E1ECjHVClVeX19fX2+1KFOIEUupiFCqbPtNQUJARKy1EIYUKDCkYCvzZYlPp/R8Wp7X5bJ4WZVYEQ2IJGZK0ZtmKxEtrZ/FVKtI63ZJfYYKIhLxwNoYOOrae6+ayu5FpGP1VNVExVrZC5FH/6GaIUHkmBKKNUA1ADWAUmqpDsRFzTzedkBKIaZUI4fQy2zLvlMIMUbqVWwMFmP0cA9R0JprrSklqeqG6Jcvn//u7/7OU2b1voXT2Yej/Qt5hD3kMt7Q+UXfdXXvvGUFzZAOkCTnLfAaSOT+XU8dCigAChCBKpi5d4TaSqoGXzCgVxObqoGpiKsqmOIyIsLdlBqxJVXwljoaBZVitUoIAdKCIo5yIR1FzIVnmFD9B2Fgm/H74DpOC3W0Mwzz1ee99YYaHTGa5g5YFZXB+IgYXNcQAZEyHopqeJ+ghhSGTmJuwG5DVIUQkrWpsoheFSWqWj2CG+MsMqDHe7wJcn4SFxBztgHGZnZ1qKruPDNFIxQRn2isBgwMPTlADpRSfTbfwWkhBGm9jp4yRnPZCgHVR7UD9uGUQKRwADrDZHYDckgxxggdZJ8pMHlbkRrg2MXZLEb0lRvICr3sxNenQd8DAIx8TTXxKQiKGA1VFX3q6UQEvrCl1hRjCGQWrbfmA8Dz5VJrdTQf7PWFjARoa1qYMedtv98BIKUQKLDYui4xBSJCaAmZbBsiSsllVzcNI3vtgUrd1xQi4wa5lGIGJuWW8/PzcwhhXdPpHPd9zznf79evXz8/f/MtIjJa6qF3qWZaA5NyEK5FrdYKal7nat0OeKD7at5VRX2p21Bjs/MpbDnf932J0QH2iRzCNDXjTxjxwBF13nL9bm1Omg3KbCvcO0vNJJcMADFGNnVDpNb6JjzmW93sG+n1A9iYZRjTaOYzJJpB+Ti1yaM77nkS0W7q3QB+2574VtWe0H57NIYdKXlnImyNO48kbdacyOOR359whHUGO89SSfsxCH58wB4PnQ5fWwRoRZ7mAtpUq2Jw1FwHPW6nAqOODWhmpj1cTXi73ZoohAOXQlUddKE9gkCBqkYijm7VwOtNCpmlhZdEpzXUsoHq+fl8XpJPgCC0KjlEcg9w123fdiI6peXD5XK/35loXVYVvV9vgHo+fQSwsmdQu6Q1Rm9RocRBUU9M5/Vki20bbdu+oy68iojFtrAighADYwikUtbIl2XdS67bDqrLsgSiumc46bIsz5enfb+XUkLgy+UyS2YAyNtuZsxBwdZ1Xde11vr188/b7frNN9/86le/ckdiifG8rjticVi5Xtaec962beyjGzSDooZCMoOhHYZP0ij5gOEw63NxAMC9ixBCCBqCgzSJ+nAIE6/8QfzlYSqqrZIQwKuXkcwAdDQE+hqqghNHP4AAs9gosEE0AGWOTKyihtjzIn4VU4Uxfu0NMQ9DYShgeNTfgzWGbp7197R0x5ln1hu/jo+9Z8yZ8T1eNt4Zp40xjVs1B10yMCQM+I//9Jv/8H/8xx/+8Pttz2JYS922LdfkPdgpJccicvt+33c369UqtXJsG5Me+5orAARORYv3aJhpB7JrrsLwct3q9fOLiFdwzKTlj+NZ8VKK1OoilAk9d+EotpKl5iIihEGq3W4bABGoiKDBp0+f/+5v/+H19VXrv/nu+w8fvv0IAcEEtKKhWKtKGMt1rC0dyz6vtslhP7UPqCLAnLOynoIDVaJZODceADjkp5lNIBRARtDzD2Ot8E/PJTsojR5810F4IwsHANDqlbyf8rCyZ16mqe2NiBz0wgOYg7VV1dHphy56//X5tNbM2alye7o09USxG6zWG9fHs7eUoyoi+p/ArRjsL9vDqZkpHg0+QNAgOrtcLVKzaC62lariBiR5Vo8YQInNGIEJFsZT4vPC5xSWiAsDm5EqmAAjEcXEDFbcMSBigg6XgDQNL5UpCoyIrrOGhBx2NR0VcIeQAQBEJkAjQjJvcRqrF7yMDTzTZWho7yBkfcCyF1yI6L7vefPEBkQmNdsIHK4NzJCaX0oGPprIGgwyiYiimsL99rqk9fPPP/3N//Ef/pfvvg+r52laK+4vkuh/l6O3Ao6f2t+R6f1xuP4yAAMcH3BmNlTvLGvGj0OMqUhDdjlyRQZTkfObK1Cr8gV43C8OwdqwpUmUhYiefpzkc601wDEjdLBM81MmcLKZoXxk8Uwnbkm5fBgqYAiuqfcKZsKrtbqS9csNrgwi5lkRVRVtnpiqAjS/AhAJlFq1j7d/6JQZM6/uGDnT+Rm6THmoxfcL+5AimSArutRz7dWc2sEz4zm9jCdwrKaeOIKuDlWV6fBm/UKqimooyMwBQnEEHRv1BoiIAUlATEFBrLQ0iylKtTbxdOw6APQ98xpuczgiJiBU82L2jpQDR98U9bL7sSzkxesDD8/veRLxbSO61Wut84QNwfDoBziIDNVRggCAmc/n87Iskl/8A8ycQmDGQEyMIpJSMLO8baXs67peTmciOi90Pp898WUNN1z2fY8xlrJ7fIuIDOTL19f7/X5+/p5PJ2YiBjZkigBAub58+blNpwy4rul8Xn1Q2F6qqUNAGRMRBSUtiOf1lEJMMeacS5aDl2IcgUkdGdpatRqoGhFimyPfYU1IFUSUyFLL4HfoP58f6coT2bG/OITAY0JDq80wM30LBmBjYf3D1GfVq/aBo51CrFvkPCp8qOvmqVzzIGyzMWWeeves10a3X6GhvwxyQtc8NhuaDxKrnblb1NhEAoDaoDRql+sU3vSq9Zyi5y7mRUBvtHLqHoQKAH6bj6mOg2Pec/EQFNTrYYgIW3TP5g8cbEdGBGJHVgQRgdDaFNn++N4YxuSuQCmFm7xjAFAxcVwxs1IKEUTGUvbA9s2H58u6Epdctpjo+XxCsroXj1Lv+346nda0gImUut1uy7J+/PgxMWUwZkLQ7X6tZT+fz46AnvedANK6BKTQSBTZ6iWEyxpDCFsILwiJwIDu9zuTmVmpAmDexxsDaZHn0xrAvr5cr/ddc8kGWiWFqFUw8rIsAFqyEXGMkTBEDu5fu7gupRAV5lAwI6JYM7b2/S5Saq2+n56SMrPSsQoQec/1ft+s+T/gRYNjT2erbt4snTJmg3bmLRpnkG5ew+RNddHteetBCdJPL6rspeuIqFoZGNEYfSDlQyoPejcyoLrQvkuZ6da5kjAWMyTCHl4xkBkLbuKAQ3nDhABBPcB/GN/984fnMC3FrNrGarxfzHm5fvHAR2Ni0pI9CNILAQDAkXMAARRq0b//+7//27//x5fXWxGoatteXl9vcSGPxTGzFwWMJy2liBZ35GqVnO+11oiLiIRIRCHnm2f+AeB+v1tv+GSOIQR32MYqDetEe9LJfUvoNopvJSdgwBnDXIkAi/kkQdV9zzlvAIBMlHeksG0bAIAoAb58vYLAy5fXmstf/dVf/tW//ldPT+uyxrgmYKJS4F2pW1/tt2s+TPZ5p5qcfDdpve2Cqof/YeIOBHB8tV/e1u7lDIbxb80O4RtaguY1PeC19JuZKg/tQDIAAJ4uAYfVB/DIyHxoHEZmzHmcf4zKnR/crHVtDV02EeThOs4L9SZW4jfT/VhCRKaGigAAHRNo8gUnUJnpWq4dCIrjfyColq3mXPNec7aSqxoPn1NREdydE0AK2KdNnNI58hqICQNZICCkBiRoZAxQMvXhmX2ZaLC8tkmnx+IwYNdH6EN9aAx8gjbwyaYtNh+96IWoE8ZHlxsGgAaGgEjqrr5OQxfMTMWnEYYijXQjc4yRs0KtjiPo1a8D7RvRwMBjf+xgziLMzDGUfH9+jj//+OO///f//n/4n/7N86/+HKQAxTexu18IUf9fOZoW6AlAn47V3oHx2kAauGgn6umL40wy31zbSTPQBnpCjyg0o93XP9fexF8Am4FutA/mGScxUwoRfMIcIhMStylxRAAHvMhDE1z/rk06EQPzrFvHn2Y2tOPwbnnnuzDrGmZ2U2uEJFwdh0mFm5qCUa1aq1auqgBGgKA9FNRF+YPuqbXu+76s6yEf3YEhQ8Tci9Om1TEzSym12c2us7nprVLe1tTCBOYzVIj1Tjzv8RgiYDBhIDYv1DRQPKJ3kYKb180QdhDGFjhz+CBDBG/eVzVmBEeSARMfhUw+6AIVTM0B69G75GstKa4jSz3Lvv4KBz/3W3ioPFZT7UOB2mZzGxNvjXytUSZin3niyR9lZuS387j3fVetIYTz+fx0PiOaVqlS2DWIiEUmOJ8v64ePT6fTaeFWi0VEnkoF8JqHjIgxxrSE0VcjIvftWupuZu7FhRCJgpo9PZ2PTdQKFFJKxPhn5w+edtj33cW9VGMsIBqQOC1LiDVVkWYsovfdAXguzolhtKmYD96JcRQ01rqFEFtsW0G2wgFDCP5x4kAcqYPrAHi22XF52toiGRoRGLFDmxiYDiDp0+m03+77vhshPz15YDt3NWlm2s3ctkR9RN6sJKz5fvym8ycgSYocg5ZiI7Q5FCQa9Ds1BNMez3inYg+W6RHWcXvjY+08XmKjk3yZZ69ZG2o/IiyNSlUHvsh8wjfyaL4rfmzCPKQeAkBHeOiCewbOaBTOzX/OOdeTRIiOgebyx1XXYBmjg79UAI2YA3NAoB5QhBjj7XZri2/1vC7ffPt8XqOWewjhw4entIT7/ebnzDlLycuHZyLYb1vZ7wR6ivHD+bxte4qRiGotoPLx+el8PkvJjKS1pBAiAqKdlnQ5nWqt0SChraZr5AUS1BhAjNg0c/HmfiGkNfESQ2AixXA6Pa1LJA50zVVqrbdt//j8oda6b2ggIaQUYoiOX2EprYgl5yxijAQcVPV2u+WcQ2BnDdV6v92+fvmSlrOqllKktngHYiN2IjKrIoIcCLWIIj3gbU6HDHN5Vk5mZmieG4BJe0G3/kc3nSsnEfGNPuwSRYdr8Iilxx9VPe3eUDoQkbk3PAOiQYMRNlCrtVYpVauQOREWaB0fFaDDEWsl5jYBoFcZuVWjE6jG4AJ4hJmZdZPNRsNM550B5xfDWHzzrTenGi8AoE9zfcsgw/R8o3FqqUPlAyIYmWlV++NPP/7TP//mer3Gda1brqUW0RbJHrasFuhdZIBWasMVq7Vu9+x7t0YfWH8G1G3LMfK6fkgJAV7NLMawLKdlWdbl7NEZAaum/tPUyNo4kK1k9/qcNwV6OeW+u7oh8ulXbXiVP6zbHiri41v2vaTYktiolmIEMalaq/7TP/72drt9+vTpw8enP//z7//VX/7FcjmZtWlGb6UoouohUua/DhfCQ+4078F0huO7AyYV6HGX7fj2lCHsYblGNkOQzhQ17mqW5zNzYZ8U/T7BPn9+PuEwOXRKcbs86Sc3nP5USvFAHPOhxdpf33kDbyh2pufRiGhm8+12hjp+d6eu93219Qc6YD2xWergiA9t6oAaGAPY9fX+9evX19fbtuV9V/BxxBwYoQ1PclBxVTQlhBhojeEUQwyYAkVUQgqMXligqjkLIkZ2wIXAXqnSkwFvHtOmIAh3WFF4FCZviAea191ftyy7r7D3WLMvuRoEgOLfJQOBVmmLTIgDiyswpATrSqluvBVVdaMOzBCNgXrVhY8DUK8WIQ7aM/wxxhSZmbdt++1v/tOXnz89f/8rMAJ8t+X/vY9OHjYNk2joHv56/iQ+vvNwzBZIKxwV9IwOAIKAEniVinVeegzYmfWBMS1OTTimyhHVfWu/8lFaYp71mdPpntgBEMk22YTj3rxFzh6rQKFn/IZYaFEzptlleK+Dhqoa7yOZ9kTgKNQXkRDjAlbVqGrVekg0r4pOKaGRmJoBGAF4ffOBacYcUkpVFm9uaXTvpedkbmqY2awRtaNNdKMZR0R5/In4QfZ5JtAZr5Tq7SUYGBFFjgb9Wisg+Rkcu5/h6I9CAFPlCB7RM2MPJXikq4ejgCgwh8GKyEQKiGiKPmNewQypyG4taoACpkBiWMSYBLFtWysLFnmD4nUonikeDAYCpqqeA6FeU8rMgZoEUdUwA8sCoAkh6QT7zpHR1My27bbv94AYQvA+utNpUdVd71btfFpNpICelhVPFkJgwMQBUbdt83QcAIy+NfcSDcRfxxifni6/+tX3e/aySQUABfKEQ6v7JQSAUsq+b/eXmz/UvmXxZkGBEEJMa4gQiEQqKAIAAS0xWfBxxnWfQiZDkpqZZ5jh6HloxUhMngqI3rQjUFFQEbxJksghSckUVUCqQer8Yz2LYd6eNwKcD0p1EDCA5eydMzMIRMtWzQGUkbLWnuecOZOmtp+x74OrsXlNOJRBNVVVEVAVMHkfXvVDW4oTiWgodTUtetj0ND33LxoQPSZC2BszDke3PuCCqJqKiSjEBxSscWPzcw3Z5JLR+ophc559qrIBAIGNqEd/zOa0izVcVoEj0dSNNgeStiK1w8yM+Lr3PogPaNJalRjRzufT89MaCDEsPjCwlKyqp9NScv78+dPldPY+xrzfVcrzaf3mw9PltJABu+ZmRsQYOYSw3++msnBgYtMaOTxfTk/ny/V6vXiuWCtViQSnxKBczZ5O8QoVqiKSAC8pxIgx0jmeOYZaRUolwFzlet+lXm+327quGEBMmTktiQkc+yaEoIA511LKupxjjLXKly9fYfEBEqKquWyquizLn58/IKIW8+JSiuShB1NkJm+KdmFu1eY6lvmn24V2jEVigD4bF3oPniHiQQbz0TSkUe9WACd5J+ROz03Ie29BT/UjI3Lj6jkyaj7RjYBjm8DrYWANAbA3gXvIWVVrEaKkAEdYF9Xpug2t6XGQ9zf/htpHSdig9mEFvmGKN2sIU8BlXGvW6PPrcZJxrdleGSJlvCki1RSREFgNapXf/fCH3//+93uuy2kVhaICAGk91VxUqaXTtfYMM93v+7Zt7puVLKUUAGDmbds91aRi+76rRjNzawYRQ0gppcDJbY6cy17zoCKX8CEE7zH2yJpL+GEPwTSq28yKiYiQoeTSAhneXs5sAKXWfd/NEgCgoyzKaoYIpAovX+/3+z+HH/jz58+q+pd/+ZdpCTKhwo4FHz/fWGnzn+ZUHkw91QclNLE2wvkuoOYKf3z4783RO0rmMMR8D28u9KhT2qjrGfoC2j0bjXem4w3NeAEIAUhtjjeAUBtZ3EAcEGjwO3XMVX+ewS/DNRqi2964sj1F40s0FGIP+CIigknHagXyQOS4eU974uTV9/HupurhfQDb9/3Lly8///z5er3mnKUyMIISojGzoA+gBiIgxUCwxLCmuC5xCZgCMUIg9L47AgSEplwBA7d/3S6m2SFERO4lP9b6xEDVEKHFwaAZ291IOCSqP01tULyKMNzB3hJMRkAiggQePxmYeV64hugDz9rAc2BKKaUKQNlGRV4RRGRA9dG32sT1sizSh5f0YJOJyOUU9u3mU0NLKSACITal/AYXDgBA/8XqSPXNa+uhXujTwv5PDptSgfOLiZX8Y23VJ2UHakTUqsTn5OEUHPRvt3ZPMK3VpHs3RJGamYRzjGmKpaq+fQRrdtcxphVHaTQ81KeMk+AUVHr4E5n0w0/u0XNmDMysbKRGFIgqAPoWIhJzgqQmaqKzfwJgI0oXY1qWxUBw1JebYisGP2AG57UVEaGW3mFmouDFVK0oxa383uqNIyXYrY1SqogwKRkhYtUiUojIrQLq2PQqqqrQpgsCd2fVf47REYgE5IgtAZqoJTACIiYX/d4YSQoGYGLKregDFAGQFd1VhnEPs16Zxe5MaW90j5/SHiNk41kcVtG/0op/QHQ0M6gi4rquqrUUIVrWZU0pxRhDCPvtC4CGEBhb4U0pxUkTVGutCBow1D1/zXutmZFKKaK1S/kKAMy8romZidsy5rz7E4VIXk2wLCcz27ZcVWNMLkq8wAJaraPFGL98efXnOp3TupzXZUFgEUHkUiTnbKUJNYYQkPZt8/nyJgpqXqaCBiEG6PESDzq4CjmvS8misi9rdIgLkTYO3vG9mkruSQgz+8X41rx3Lg78GIESqCXn7EAgp9Ppmjd/LvvTyhsAFA8uoAkmF0ZUHkx9PrqZgIEPLw+h5PxwKkICMrAWK3x38ypqHb3Wv2YDZHk4ZoTqs3Gn9OBxBThoeAgpv2F7E+V9fNjZOoFHk2VIwOPXiTuaVIUuGXvgfLRkqGpYEjM3vCURHzDvU0DH1QE8NjpuBkUEKVDwBT+eyLSa2prC5ZxOSwK0hS+qmsu+bVtiYuYvt9eXl5fvvvkW1RA0MdHp/OHp+ePzU2LC02JaVXVJKUTa973sOyNUkSVFRLQqS0zP6/m8nkDrMwgAMFEAAUQLZIlykbjEsu9ClZkVjAhioKfL6QMIEO9bqevCgLtA4MQcXl5f7dtvY4woZuZzcwBJsfp0lsiAYsSIKaWU8OPHjyKy37ey57QuBpZz/vz5869/9RcxLESOpKopLEigKi6AUlqXdOobF0QEHuvEBpvQ1Bowqx/v6iYi7Am4mULG16d35rimB+mOHEWPiFeaUsrmBhCqVlGvCDWptaIFbo3EadvVVEspbXpzj9dAc3sUQBXQtCvyNtWts+079ezBqUG6R2p0YoSDGqeIz/ynmf3xnTc4s8/47vuvQ4vM8lj28XUzQ2jwV1WFOYQ+P/p6ve5FzGzL+74XFctVSA21MsYUaFlSCGHbpGg1Zcm71WKAkkVrJYDAKYSwW24CqhR/QRSWJTqOrneK9j5DuN1uW8ldtQWPmqo28JQeeVciE3FfAr3yG8xG6M1MY4y5lrpn9yd5QvfZ8r2dH9AUa9Gc88YhRk4hqsjL/vV+v4cQTqfLv/pXf/GmNW8s9RyShndi3KzLzLkfbxJxY4Pe7bKBw1qIwDE1drqudbLrX7R3ztsv3libLfRIIg+Zh/7zOFvXgLMp0vmieXTTVRCnsCwAMAWewCb8Sef448NzdRi8ccX5Z7+WjiaLfnfH+A3s4KUHo7X/VHHeoGnxESiuoGAit9vtfr/XIkQBQ5DasNabajMjtoAUGJcU1hTXFJcUY6DAwATMTF6nDgxkiBRiiDEGzMMHGOiwHocdkeLpAY/AwYgRz1tsvcDNev7Hm7CRRtAa0IA6oCCBukWPBA1+FczHqKm27LfH5auoESOQTlPNlmXZt3tRMTNDUmtmLTEPdAzsTSJ+h5L3W63ffverhpfTRmLRv6TvB9ApEKZ6UADwfqDpU+p/H8ri+NeaBhtp9I+7s21oUGtBRGrNhz15i62b983NvBHUZuYIVe6cubyFR7BoP5vr2SETzIxDy87NVhwi3u/3N3bdLASGTdUZ/Lir4SvCL4ULAcCXgucEpplqM59CK1EwAtDDNjZiZpxYGhER2dQp9UHhtbyHWUw87lV7Vae3poxn08eaokNvURM3tdp42vFIMLnR1HtwhyEyuNETmE1IIamqdNhlmoxFVeeaipiIGaRl8vwDjkZA1voSzUFd2sN2uUmkCETBzBzsA4AoxIgUEH24JxERtApdYvYc2tjOacMAujRvyUwkaKih7WHFHB1ZAUCtlU0e7TetZv3oPg8p+tynlJKWV8cdqbWWIlKqmhDgtm3nNaUQRUviABwcjrbW4pICW+lp8sK8z58/L8uyrAPfpVFSjGldYlpOKSUzXNdCFDhGD/oWqY68IlK8KeWv/8rPMLyCprPJKGBlwAy5ZAcoasUYzmBNcI9kchUpVVWNxxYgInnYY/AAEamaxwVMSWVYS0RtWvehMt8zz1Dopkot+90WGUwHt4/w/0zVfvifPHqyxBRCGPQ0vjJiiqptGtugRoBWAGkACqaIQD5OiFxHU6Xposedv0nFOAuQgc+Jht4hSURIpqACNosf7CHwgGkEj1VhtDB4UAq7oYCIRK3cdDDvvIY4MeD84C0AhBgG0HtT9DhWxvPS42x9AoXpSLwjhOgt+w14iTrgsmd4VAFEQHvjtTbH3oxCCKdTujydljWZCWopNas1xIv7/X69Xp+fn8/ns1plXr777jup9bSspzWpqome11ORWkqRksu+mdn5fAazGCOqUIrffHhaTykwPp0vT9rQIA1BAWhNSHajIqrEEAQwcKnVpIRAHz48PWtB5LIoERHesRREjjGpKgPWWlXBGmyShRCoFSaEdV3NdhEBsXRa/uIv/uLl5eWnn35yFJmYuIq8vr7+/OnLt99+y8yI7NEfcEw8IkT0AvFapFZhSrWPbJ0F77w1NoEGtV9R6Z2hMOxU7KaSerzHv2KiWkWC56mwEwJMEQdRJQLVwF5urCYOBqzqpaGtp98M1EC09ej6uMpHk33kLQkJaYyH8h7eDkb3LgIyLLb5fZz4AQ7bGubXs9ZTbZUdb958L0zGck3K6CHf8v4OH3ifyDPje8mvr7fPX6+/+8Pvt20TgP12u912CKHWSiFFMDP1fyK1lFxKQWyE0YW5EDZn4HQ6xRiJsNZ6Wi8hEjOntD4/f6y1ruvKFLe8AYAZbNtW9BdQW/0RmHld11GC6FTEoXUoiFb3uUIIXh5SmQIED31aB4wtpQTaiQL0Wce16E57rbgu6ZQWEcnb/unTp8+fP3/33XfLKbzZ1vESZnjPkfJibmm3R9+7S8g3LXww5//am/5PZkO2UYh/aHYI/9SeHn/qy0hTg5D1GusHkutJjFkg+xdxspJ78B0AwFQDjVJ/GgIZm0PY/PDZgfnPUOB80YNHTOcPQLfBnAaGiWXuUro2B3cFJi7+pRAMtkJ1Aq2lFEe2s9nyVDQDZRMR0UomxBQCxcjLEtMSorceWGVk8ngpKlAgYiBjDhwTSgtGj8Uhagk7AJiRRbiVkISxVtgToU7A49mlexfYMGMa3fSlIwCoeiTEWh71QUoYQK+4AgEkEXETwPkrhMQs5K3mtZRSkIM3W4pIFQEg6PB6RDhwR1QLEpdSrl9fEA0YYUIOgzcU/9/leCwVQRie4aMr2Cjhl/llHBND6Zs3rQfEZ0Idn3//zvh1Pjxjoao4uWee0Tk8QyAihiaybL7EYKU5bfifeZZO7ccn+R1sMkyqZNxnA/iYxhL6qcL8aaKD55m5IfFVUUCGZrCqaggcQojMTiU+L0jN1lMCN8FbiQIwM7RWtMnAtUPUOgCmqkJ8i7c7HtUFwfTwPVFJ1HowZohXJREBtdPpZGZaqqrStJdERIDyaLCqKgIDqgpUETNhC4iIQDC1zECb+YdHq4n5WE/RDvgBE0zObBgNKTT9/IWdRkQ+0FydAz3A2ypbvFZn3JI/PlFrMzudTufzmYiu1+unn38UkW+fL16Dh15eWytxE75PT08EmMt2WU8phbaYak9PT8uy1AY4garVhwgRkVrtuDLoIvunnz5dLpfAOGAbQwg+ZsPMGCksIUautfUlDvyAnEszZ6tItW1zmG8WEamiqoSBiE5pcRPEvaBRJnqsgGLD32wBhToutN0diYQCOtJd6zdDxFqrmCJArdWc8Drgp8MLBjpktBlYj6sRsYjUXNjUZWsfoXH0Dc4CgplLKY7tS0QxxtHbPZjO6Zk6AMAIu1q3DqtpKQVMcJoDORrvfvFomqlFGrpOIm4zi8EUWqyjT8RAxda8h5PaZmpzaAYx+77P2OLDenO3fLDqvA6z/zC+ZWbZBzG3qDMONg5t9KhPXFTuNau9fZQ9RwrYQqoOKWcGXhhv0zzr1r1j7hUHAyDiWisHRIoxhtPptCxLCMSIt+vNUbZPpxOZfvr0SUT++q//ek0xb1sI4dtvvi05o0EkLqI55++++67U/Y9/fPV0cYwxxWiqkdCMUkqXyyVyAIDTsq5F0rqEEHKtVcULgUQ1iwWEQkSMuZhIJaLL5XLa75wiWCCORSBXKQQp8a9+9SsDuF5viJBO0QxUhYhMYN9LShhjzLnebjdTCEta1/Wbb74BgJ9//lRKqZI9cvTTTz91m54AqFXhVqUAIJKwAfOIaOCDKuxRfgIc9Pxmf83e9hS51vTIXQiBqAvMnpTwDUWu6PPD/WTcghHeTAMmjrg7ioEH6XLoXTpmbgiG5DVvwMyyebDJRuKRmTCQiM2xGGiFxw9j6HBKpMgEoD/CVYiIb8CoHvUdTDHNIbJwcpBGxsAmh/n9MWvSmdfGOYcAIeSWPKyQc/ny9fU3v/nhN7//49/8zd/8/PPPYT1vW7nnfaFgyMuyyD17WbvLydvttu87c6i15r2aIXNFxCW1tjGvQymleLIuhJap+/DhQ845peT9ng5AKiIceAhhmLS5qvqQGPcJVdUtePJ8JZFZcintas6/q6qntCCid5gTkWrNmAGqhSjrqHBGohBCXNdzTJzzpgLX6/3r16/fPD2POxmHfwlHYm1aYSDCR4sQftH86sBdOkUnhwVwRIKR4KiMcDqm6Sx/0tUfMvaB9uztAQe6WIOFxZ7ht9505w9FflcdzYXIw/Q0ispwxGKmsUM2IUi5iTlc4uN5j9cPXZR++NzOwRHQg90HJx7xI53jCNitMK8goT7f+WAcIiSEW7Za932/3+/btuWcS5EixnxSO+6/SgUpEOAST0QUmKNHTk2O9magQLFVTplVVaw1TFJrfuQmJayNjR9sDj0dNBxCX0yHIRh69uALsUY1XYw2RICB6t/TOB7xYWZtMxQMkRUBwdGXlB1ZMKVlWUNoY8aI0arknDlCjNEjCVUkxoWIyEA6rqHf+XoKFOO2bZ8+/dQwHWqllOBf+OiPf2QIBwDPf915OsjTAVyACISuRzxIoFMV6APnAjQ8+gcPBdtp3S6VNs4Ae1gHrCH0RG4xjnEzZqbd//d3xjywGTV68PKbi85vvhNfD3phVjFOY26rzFzm3w0v+RY5VMhVNkRInBgFyYoBIlZkYyAiw4K4n9a6YSyllFLxfE6IZmgIHBKZKFBVZQ6IqmBoSMglF6mKHGJaOUSUrVZVCabRVBBqCMaMwKpQDVri2oxAwWuq1ZSNY1qRZNcmj4wQgKpUlcBAiJZim+tiZhjo6+uLaTU2IFAVqa3noe43JgQDEy81JufIWuu+ZZFuC3oJitQUFyAlqkAFkARZzKpqMTWmomVd0romIg1s27YRRvSYdBUUwEABGJWA6bSsr7d7zreQokDe6w0DSo3SByJ5x7wWAYBaPObkepd93JCZbd3yRlMiCkQIJIha6nNKnk+7vV4//fxjzvlyubi9m4iu23a/3szktCxLBBX54x9+XpYlBvp5vwak8/l8uaznS1DV++3WqApURAmYAAlIxWrefCSj+vxJwVu+ff7jl1JKWML5fM7L3UfMXS6XUja3j/fbq1sMDUgmi4qel9O6nm+37dPPX87nJw8/V1MIxsQiNde6BsagO9ouctvrVhWRVg6kqrBxgEiac7VqEGJVNaIQIgZWxL0aghJBZQvEVfbbvps9Pz8/B7YMGZksMxibkoq2OjQDVQsUETdEZCRkFLPIxIHOHOF8eZGXlE5CfM3ldDrd73cwWihJvi8eKJEayNZTLC9ZHCcWCc0CEnAwUSlVyGKMsQqHgFz2kgUkIQeke5HQJkFLICSQEFHFEMXHahcV8h5WeY2QI2pWUQNFRiRVC4uJ7B6lBCQFEwExXfionUaXGoqIZCMA13+6hS66cZtNqj6e0SstvKfR0VYNAcgMVUg3IQYLIN5xEZAiYSJUqRFhlwohIpEgQYhbLkz+FFZECZiRgVVEALHUK7GJ1bisZkpEqmYIbcjkns2MkckbHZFrqTFGIC6lureR7/cd74E5pZhVqgoiikpAWEm3fAMAZD7Hp2/OT0mTSFnTN6TXxHRZwn79uuD+4fvL2fb69frrj999XNdYysLMAUV2se3PfnW533+utZ5Yar3GEM5LsPzKtbLgaUnfPT1/mzBR9dK3p3MAEDY7pagW73lHEKOIADEQIlcT1v2STr+KdKn7nyVSq1X3FOrpW/6a4s8vr19vd41BKb5e99dbsXo+n59CfGKLAe6ShQg+fvx44uUffv7ycr2fT2t6etpLuZxW0OdPXz6XUolISpVUrreXdV3Eto8fP3LA+31D0hCSGO1beX7+eFqXbXsx2FKCPR+lXIhgPc5aVYgICNs00uBAO5VTlKxmEo0EYC+lAp9PpyqSRY1D1s2I7nuNiZDCXb4iVwvIcTGIUsjA9j17MZoXPJooABJFxCCgYFYA1Pwfl0ql4n2vCEuMkYhDSDE2yN8YQKohBoLIGCKnkgXQzDgi+eRGkQJgHMwYa8mH1m8mndveItJsQTODHjD1MF6z/xit1y9lnwvX0/J+BCY374bKH34d9dzLrOC14SojEQW38kRAkalN2WVkYgZr/mqMkYBfX64c0571N7/98dPnr7/9/c//n//1b/7pN58Uz9eXets2rZrrbU0n2mtIT4i4bZtDd9Zacy4AxY2/nHMI4ZtvvjEot3s5n8/V+HJ+sk3rvRJDFsVqz8tJrcaI9/vL/b77ZKWSBRE8AIQOCc7soy8/f/7Zi0uZIQT88OEppfT69eunT58Cp5XT+Xw2wvv9vm13X+f1fAqJW4hNS7lW2VRAY41VmIh2gWuRE+LpdLJIgvBlv9nCT5flclnCErPaLZfvN2AmYAaylhZ2nHrymqlWRGxAHtql4kljd+ceeuSQiFv7nJm2jSOO0E1JVa1VESt7isPMS65a5RExGgzUK0BDsjG0Aj0G4q4Cs3uSpooIXuRiOnyt5lSqWnsoNwQ9KDkaASazVIqYmY/sChQ8yqaqZh5Jce/l8AZ95sRwLIefk3NOSzR7yPW9sVm7qerFU7hQMDERh69gRDQEpoDMOiYts8NEGaAcJSrDHUAkQqkGhF48QoERCQy0CCcUw/XDRwyX3/7ui8CpWjawLHdw0DhTBGVMaKsZxpWJDUCYlK1qrgJmgYpJCEFC9b6QNRAjEle04DqWAcmAe7YM0RJGIRBDpMhxNeIqEmk3Q2YOIVUFcb1LCRlVVVTEWhaEKCDySl5SB2aIngUBU4VCpBAR0NxgFkKzhZd7NhNBLagVpM2dUFWKgZiFhGq9xP2beM3wk+UvL/ZEi7CZ6p7FOKwhLkYKFMwEfEgd4RIY0WqVew7ntAoyxvgf/uN/+Ov/x79BFQAFC2At8zIBk9OfBHf5U8eMau4rebQIGoAC1hb4NQNQBO2elyAAgKDK6AAHVG/T8Q8AgEEF9PDwaMpso90Atb0g50OjHp8xM1EhAyRiA0EjAgwIoo5w1gBawYC5O4EtIe+MD6ZaCzJTj86oKZKBYuQ0RP0SfRx3SctyhHWseb8ImFLUVlR8RIJmv25okJaLAmHPbbXwf4s1PDqTrvWqqgYR4QmE0K38UooXDY4LdMXWPc6pBsbvZpT4w7sEJfRgnmrxrL3HEcd5u6RgM6/j6uUHiAguzqiUIubj2gxbXWibmqB9+gVMad8Qgs/JAfBZNC1OA70dTK3Be4xMAjMjttDXSPgguzHthYg6npqZPbatHd6tepR0SdprkBTb3rgV0CPfPGJFIlIKwFRQ3qqeevcXPIYBbCpOmEfMeYlUjEut9fPnz24re7FuKSVxKBq0FbegiOwq67IIWM65FjQTEL3dbp8/h8s5NSCZIw6hqppSooYxCKp6v9/dRIjQUFvPT0+n00lM970Uqc/PzxRYdtCtIHBMa0oKAOfLc60e8GOpsG25ij09fXh9fUVEDBxpVJNWVd03LaK7yC7goyXJYXo84QTUCjYRiFj0gCwiQvCUBwAAlf1ODGb2+nptKwYMhuu6tl418jIzQeIYj3iemXktNU7z6FsUs782s46l0TZR+pxDACCCQL0IqsqI6Y6IpssbT0KKHBULzeWekEjfBIHgTeXeFEhW1dAqpb1czoeysTcu4mNV0jinTmfo4tgG40/feIuLNfPRYPlRo6T6oBUO3u8owWbY2hk77K8qKaD7G0jVK12JEA6758iz01GMAUN6+DHk1cNXqI2UJYJ1Xc/nc0oJAVX1ut1rvhOmGsCT4YpQa71cLiGESOyD0XLZTDSGYCYgtWz3WnNKaUkpcQDksCRQW2JyBKbTsjpAS5AbEUcOiFyqEhE3doYQgpqhWuxA5pFYtcQY17CuKryFqnLP+14EmBXDRghaS953jiuyRx+HGBHVWuvtdvv69euHDx+sl9utMflCOc7ztm3runjZUpOE0JFUetLJ4+KllDGXbxZNADBG0o+NmMXXexKa+WhiI5v/6qh3Q/W8uWLb60dytR7yX5ZFawSAKjIgsoioFBjs6TfJzEjcxyvO7NAKt8azjJt/c8VBddh7vOdFeL8aMwvLVFFyMN27pRtvulYVqd4STb13btynx6rNobOriTaVer1ef/jhh7//T7/93e//+OOPPwZO4rVyCrXWWtQUU0pWWpfH2BfHJLvdbm1qpOpAglmWJStUlVKklIJirmfv971WqXu+3+85VzMkbNOhdNpsV0/O43NZhD9Iw3bKDY3Daqv38e+e15NZ8l7iUgoqBjyMCu31IA2CGEhMDWRZ0vm0uK6vtV6v15yfUkqMCL3RCMgMAM2A3NdoVQv0WB78hgBw3qTpfe3Qr+OuzBSh1URg23cCr4zzUjhrfk47n/9KhG8gOyYpOhih//qWkKxbqNYHuLmX3v+qnUwBHpuvBoIoTrEMRJxLPWfGF63wjk/xEZ7ngZseaRt7d88bTedvQXOGcaRtH1gGEd8LHERVzVkcTqYbV1hVDGGUqUOf1j1kHQAwhsDgcBRoDrYPqiqAYCJQEfFyWkBtTK308xMgEwIRieMaOrQLQKsajUSkAki4LIt32Oac3brYtg3VTueB0QjISEjICBS8w7yIQVUFA8XexIpqJgZxWbFWnzdpVkS0urwDMyCInAItS8OAhDZx1qeYmamCCLRcsja/1MgnOI69qLXqXb9+/frlyxfIGemh6Ppf9BhWE3QDY/AbTnn4N1w6U6Oax1C6t9YPIvL5rNa4v5GFdhSScS4/4eh7fiulrSE5DX7UUVc1AHVnRwnaZItxBurjyt4fZuawW0PRDHEnUwn6JGqsDSx5FEHDi8GpBc9/9UY1ZjZ6qEG38bTd8jvGyz58puvUUt6WQQ6DeMgdRwR1CaJuHYuAKjOGOFB32Ez9CugFONAaDLwSsD9Yu0K/GXXDhQC8+ZgABR42ftwwAFhL5rt3Id0hdEfsgOUZCwGPBX5j3c3M+Rl7baeIMLPDgVA3iHGymN/bB9THLvkUUezO8Jt1HhedLYNBQEuMRLTv923btEqMMUSKMa4x+Z2HEHBxmNQqoqVWFdlMEwcOCGb7Xmwz02Xa2Zb+5kBAuOVcazbRfd+3beOA67p+/+u/cGwVcmzDKqWUaPz73/9xWRYfYAhAMXKKq6perzsigmEp+6efX3788dP9viMiIYcl+ThUIjIEQAbUbbuKQTVUbZW0wcdQBq4qaApEQIE8tKtmx9AOIkLrAfiqkiia1et2J6LTaW27CVhrBTX23LQIGgTPOnd6tu4Q4hTj8L0fro5jJ5DL5RFN8JWnVmJNYF6r3EaegKjSfEJ8dLRMRGt1X9PLYnACMmui/EElH7ID1TC4R6TgKMEI8/0fn3wIFj9INzNzj3omVx2N1P3Dh/TpJ6Rep/RYS3VcwsGx8bBoW/lPj/n6NKcexFO1PmAA1Oa27HG5wbPq/WgB+n79shHvMW9mOp1Ol8slxih5FxGrLbWyLAvaibSuS3w6X87L2SG41mUxs+1+rbUua8ylioO1lHJa0xoDofnQdQJMIZ7XJTnFMhFhMG+wTWZolrmZYIpqKUTpAoSZyYADUdYlcFyTgw/nfcnLgopZoAKvIQakWqXmIlECAIdQavUQhhdxZakvLy/X6zUuaVkiwGnfdyLaa8k555yv1+vT04Upuq1N5FU1OMJSviCGJCINEuMdqcxUMehBRCzYEIDjmItkxq6pmqoMXK5xcuw1yU0Wm+E7ZF0zG3bMaMIxO6qqx6XnEUrvqcLV6ji3eivBOwK2Xjg9TkuPTXE6qfw3L8ZXmhiHYz2H7829HmQmcuj+Xq21GYpT2d4Qm+NCzSnKFTls1+2HH37/9//4n/7m7/7+y9dbVYwx7rXvgpF0NNEidYgUd6gclUdVL5cLAOz7Ppa31qrA27bdtvteMqLnCLKjtuy17FVUxNndTWH0DtWOLKWqSOBF1POymxkyx2URn2lUiqrmnLUKEGupSszMClr2nLddq3gmYPYq/aFyzmoUENQqMy0pxHgBgFLK6+vr9fpkZmdfybZrZoimlbBpgb4LOhc9+icPymmgVjYv/ti7N7TanLYen4LJrIKBazQO/y72WQv+ekoCzOcfPIjTu/5JGvZfD1I3EX20ob4T1v2E2lH9Br2JVuxdhcflHkPYs6lzEPAUtpveB2wtf9Pivne/zQCaZ9tbcVyDIFN0tVoJaH4QREQspby8vNy2XIoZMDINI1PG5O7AMUS0ij7z3uGpKQKYh7ANK0kIDMpAQF7PbmYDn0JEHPEFfUo4IAVWVUMFq2Ac3R7gBABVKnQ89m3LOddlQRHZ9x3VUkqQyM1UNjIgAwZDUatVS61FDGRCFqQWqaHApZRaai7V8RdVVZGcXKACIqTA5zU9rcs1BbgfQ558Z13KiAhHQiIGFtFqwt2jV1UT27bty5cvJef4dIL3O9WO/wakmTenGiCr/iedakeP6om+1QaA0OADdAQ1sJ+nK5C3Vf2N/g8EBG84N8edHoZfr5xqVpAN389gmOsDeOUN9XIIoAfqL3TKV5GxQPaYUX9ztOWYGg3gnVp5c2n0AMojDw7ZOL44r0YgPIanW19tRBQRn0vSw8QP6tl66+S4jIcPsXVDtixtL+tXA+c6GK0jAA+yu98T9QZHdIdt3C5zBPN61JaXAwAT6GXrh+He7n9k2w6ESFNVimBmOEndvkyHJ+1v+uMwM4KNp/OBV9oLQLx5yZqVH5ga4qVNZbuuXMXBvJF8hjd1q3eklfxmZrfc7+oNAR0ZGENmChyGi3+73fb9HkKIy6qqIdDT09PahwsTBIqoqlqtdhWLYBpCYgYGIFTVIpWZewcXAAAhIND19ValQU4vp/XDNx9Pp9O6rkyJQ6xm+76XKlmqu+mliELRqkT29etrzjlyMLPrtl0ulxDS9fX+8nItRVT1vmeiEKvkXGOMIXHPxVmtCkjExBxWJLcDImNgglq0FlUHigA12KuEQKbGasYCwIhmSkDW55KDGey14IYxxpQa4guoQQigVlopPzoYX2e8Bx6btZr14OLBHV06+McCYUHk0StIBK6AVdWMkUGFDAjBg0r0OIXCus+PiAjDEkYGtIb6M/fmwRiriv2+++sWNvO8bt/Z45h5eWYNtaodB8J6QVS7WfSRQIYABv4MB3wrERkQ2BQM6+hwZubWFbZK1MMWISJGc3yUEAg6vu4ENArD4m/xuYlnB+MQUeiB3nbmHp0xj/CoEEKM0RsIsfdzb9vGZFqTVQG1EMIS05rSuixkDazYpIKJx5elZlRRKQi6xhSYyWxdEgBEDpf1dF5PpyXGQGgqpZ4cWYgJDDHEHIX3DKIisoRYpGazwMxIYMpIKSKTsqlKDVbPIdh6DsafX++RcONwjnEX6gR1SHlFCEtaL+fw+lJrfXn5coGnZYncR4cjouSCiNu2+aQZA3TeBwDyAcphGc0MXhVj/fzj53HFOcc35QDfGJo2tbo5pfS9Q9Uy4vSISEhG5HdlXZkf7OD7biaOpA/mFA09kgqKoQ1GbyFXF9eDFLv36Hi27t15wK619VepCEfMZeaLN08KA3dUcb7VnrcAr/mHnqjxUDIiAh4s3zUdD9Tc2bHsYscQgaw9+1iQdiddZDStD2YQtNrv/vjjf/ybv/3tb3/35cvLniWtT7lI3kuu0pND5AmQqmA++9WwiEmRXLWIhbSu56fT6ZRzdsDGIvLl5bqeLz5UtqE0kbGFWuuyLMyRqEBIzrbcgNY6P3Yn31sHfS6ub5aZVcmlZtESiAlQSi2llD2rqhDXUErOzrZ1LyaSQlCiUso8c1VE9pLjtkVlOi01ZzNNKSxLTCnlnG83/vz5MyKu60pMAK22zPGszfVHDzgDoqNRTpT8gEvR5dADO4xg/yEAFeARDBCAoE95B9C34F5+HEMd3DZ+UBCzSnr4LuLxxTfhCbdS/FciUH1rirdtehvZHw87jxA8HOBO8OPn8eBdL4yTAIDrZTpgIExbM2NLAbUbboY+jAHU9kDqR2cmT+DY7buBiWjLeyllL2gDApTgaEhDpP4+GXUXnRAJjPaaA5ISqkJVI0Fzj49oWU7o2T8ARmOkEEJkL8gyIkBkVQUTt/wcu80IjdBUzbCUps5ETBWISEGrtchgOp29pkzBqlqVuhfdSr3dNkf4EhGXTy7NVG6tAqKKmREDETMiM4tqrVnBGOCc4vPTueTn32XTagFJQc0coMad8153gMiMBq11VkQCRBc4nz9/fr1+/fbpo4kgx1+gnv+rx3+u3BQfEUf7m+YA0WatXhQahbz9ZOt5gPafiBjI4HYzo57LaQK8GylO7TRhSj+el+AXpxMdkv9tCH6wxmBnVzf2ePiHvaJqjgQNL+P95wGg230weNB/hj5LfDyRH2H8LtVExKZc5GBnf9DxGEQNBHe2AMYlxeGDCbuea5DEKfoMU7eKicjH0LHDihIyoBGRossEP6cTIQKQeHF1KxhzULjADA5kMvIA2mPA040dT4FTQGvkVVoWZVLzvq3cajsBEbkDsfgCvo+luWrHnifEaeXGNrRMYGAfbOq7wsBemYaOnDCkPA6cvGN5AYAjewUUuegJbWLH7XaTUkvZHXUNHW6Ukw3fD7HVTxq1kL+4HUMK5Li5gFhEgTBQQGY0qbXmWs0kpXQ6X2IKgdhb/1s12u3ruq6KVA20CgDmIrf77iAoInJKS9nz7XZ7fnpa1zWtT7XA/Xa933cR45gSMXILQqvqXrJYCMFEpJQdEICQKRIHQYQ2knFMtVYnXeNoSIBcawEAAWBBi2bGSkYCAJhNTCwEAqN73lU1pVbfa2jM7O10qiqm0FEoiASkixVRoIDAYARGpggN4yZs2zaw1tCdfWQiT1NarVVrobAyM0g1VcREANThXpybtEo8rYMOcToaeetgXXS7BQP6xSY6RN/j7mv5PZEYiMj7WoQha/wEMOlyABi4rLPun9XtLK0a/QOKGqI2n9VGRq5bBv1Wf0Fu9vyMqiIyNYSD5pCMetHBEQToVT7oE2uppf0HiNZIqPbUIwioo16lFJ8v56fz2jEpW6snUXsQNAvMKQRGSkyRU+QgJaPZEiKDqdR8u6lqZAoxLimSQWB8Op/qnpcU1yWuKSwpxBhATRWX6H1jCIAphZOl277BzcD0dDoXlR23QLQwe+1oiiFEQiukdWEK68oGpLa9XhXxEsK+nqjqrlDKDhvRAkWLitaa13V9fr5cr+dcy/V+oz6V3kk2xriuK4QkIg6muqxxXVfwgCwBocOHBfBJWcTY4c9xcmBm0epC8ghaIaoqAc+iFYCG+ejvD4DbmeYPeoa372BTUo4ra2bqYOMAgMg+cQRnQE5kREX0aMVhxR46YuTkzcyM2gg3FRHuUctZfc5aTyY0KURM7P7PW0duEP+b92dwpnfM+AtHo1L2sj0zr20hFnH81LY9RGSEoBpCer3ffvjt7/7hH/7p55+/iMKW6z2/FtFty/tevP3XE7Oq4LrDb4mnkAr3OZPn89nv1lE6ooGID4tyi4QAoEobxxjDAgHM0Kv1BMDhZ6RDrSCBdxKmEBHblHMObeL8vu8LnszM28sdUK3WmnfyL/rtLXF1YBspKh4p7nOJt21jpFKJGaXsIrUji5T7/Z5zflospfT09LTE1iMDR5Sq5fGaedD8qEP+2JQhdDbpKbfDwnsj4hjQjpK8TtITOQ2T5c2+qwjaw/tvGHDQ1bg6jNRijw8ikdYesD5iFo32BsM82BuBSdWsOEDliNf0cx6Ziv6th9t+rymGOeSHtLAIvfnKYIf+S1+rEaf2DSJEIghsVT1m1Ze0P0sRhECBc66OPohMClBVkIgoIAC3+YdArS7XMe9H6h7NEGM0ZDGsCiTme2lmVYEIgZjR1MBRaFCNOXgkKiBWITNjBCZUTv5IIdCAo3PmanZyWlW1Vr3nsiy033YiUoSqUsX2Wra93PP++nIVg1prFQ9JR2gmhGeuzEwDUYwxRk/DtOweABDjkuLHywVqvbzsJlCKsDh+vaOCuPkJIsJMFAIY+iRYDw8EDDnnH3744Q9/+MO3f/FXqvYnihz/G453TuAvDBXU/jGd3vlTxyxLe53zHBzxjLIIwciHNSLzlewuwyGczUbJmJ/hwWSCieahn8cTbDNhW88lDJ2EbnVMQZOZU+Bd7t16rhLxoYJ0cBlMqaY3f+2tOm+PB5RRRCQMhCFwQmAiCCEpFi+gJwwwBoA+hnmsA+ureoX6EeorpQ1SNBAAv2kRKTYZAW7c++f7krmsAejxYKagYMzRbXBUVPBxK73yBJEc1glNSi9kF9VHQ3Ysbl9Zj041ctaGKICI4VjBBoxeVaD1JGCDghxe5fjwiCUjojUMISHmNuswBORDbI0P90pafkMu893OYtrMkJCplS3VKlL2JSaiUEqx1S6XiweN9qaAKzUFD8wcKS7L4vWfQKBgHjYjolNMqrptm3T/JkSKHJfldDotRFRrvd23bdscxX59/h4picjtvgsYOuDbnl9fX2sVMHg+FwJc1/Of/fovLpdLFvj06dPXr68iomJbyQAUY1zSyY0ArbVqMVHPQ3GDLcGRqvbwl5bcq3RAgQgZQ0RDyy1JomhVvSixxzkFACGEFEKoFRTIkEGL9dogQMTAVGGw2XBj3jA5tk6eloLWabZelzztRZMsUr26AxuKgEVCASKiyBwIyUBFfJbX7DtZjxj5Oa3Ha1U9LfcQD6beWD3z/7CA/QwtEO7UpQeZtZV8V5B8yM1Jr78RNLMcUAEIQkIIhmpgShZsqqnjXoANaERAzNYLb81MTRUU0UopIbikViNDavQZggV/aS27wsw+PBMAeoTPfEyLUwi2dKX730BGaBYDP51PHz58uFwuKcaBOXk+nZjhlJYUSbEyhHVdE4cU4hKX4LB7pimyCtxv2/XlKwHEEM6nZQ1sossSz2vaVJbACxOhEVhkionNDCWDolrx+oBIzICgFQ2eTieRugVeYorEhBiJmSkEDkiBCdRUjEyhyG1ZMnIROycV1rLXW8lblRhWIFSwveTT5fzhm497cWasXj4XY+wd0bgsC4RUSsm5vLy8ID2t66q9O0JE2MxnXtVcLPCwzGbNNNPqLLhmykFAgIOcVJ0bjhZuM1M1EdEOq2xmDdoXcFjPQ9W5jgAABQOfM8neq91c03U51+LXAlWfAO2DVWWcv38YiVnVb3I2zpvEfkPhfrj8n9kEH0pbH7TMG0k+SxLr3uOb1Xvzlenz0xnGVwhNAY0ATME8s+q+xX3PP/7xp9/88Luffv4iCjEmxHq93ikkA/LEIBKHwNAiKqwKZmrmY+WNiFJamXnfC9G2risix7iMlqqSa1FRBFULJiKhFLndNvUBMMCqyszm9us0mpiIQuRlWSKHlFo9Dgc0Nydy0VI1mZkVqUWqAxepqpOx17gCgIeKtErNRfvCEKKI5FwJ7sFn86isp1RrzbXc7/dSCqA9LXB5fvqY95CYGFoAGKairAax6GG1Zpzi5Nu/Ifg3rDG2GL2sA+Ah6Dbi+J2JDtNqEImfRKRZtu+o6A05vaWVX0ohDmU2P8X46puTdKZTH0ClvdEGEYlshHv84yOXaD0+ON/eMCqwp+iLine2+8c8kEGPtWPk0/D8djrMY7uEAWDrI2AAZEKf39u8dysqCFSr3vYNETmycRA1RXA9gshqx9CISEyAASlxYI5mYobEiyj6X4gTxRBDjIGYsSojWDVlREJkAAasKmeOAj4Y3ml2jLZjA0DzRWm84PVQzhrMDEY551K3UvXL14yICs4CWqTectlzvV6vilR9oDQcAdAIoWFAGBBDFIkSU6AQyNvJ+iwNPYUgy/LxQwK1su8VfFqFgJDh2OKG0z7a3gZ+ZinlD3/4wx//+Mf/yeshf+HQ/0zJ6J+aD/Eu09KquPvZdDh4PnbCzACcxv6kT9hKSdtcPRxo2fNnmHlCbZ+cqFIREXsC3w0MbOYWGAL1ROWQybNVZgf7/ELsu13slyLscyXXG4Uy7FLr9rCqjtE7b2OR9HDdoanf2KvYvdBgZmDkY4UAiLurg4ieygMjnYoc3HXB6QL+JKNxv2vGhpJifaA8ERlIlQwAxEBGagKtmLUBDbfqsxYnMx/9DgAI5BtGJIio1kb/DZMCJrqbNbRNCToTNQCN2mrgPccCx+xEgKPmCoYcZ/BebjPT1oWvRg/ZG0QEIAdBoXCUiIwVgKP48Fh9niZ9j6jbuPmHp5go7L1InTESQghEaVmWMQ4Y3IFSQ/LvEpEFXlS1FCllJ0AObToNElX1FuciIoE4JY5hCSFcr9d9381s3+9mxkgi4lBjr7fb/X5/fX31GxYRtXq5XNwpejqd12X55sPH7777puz773/86evX123b3JtN7IOMl/1+BQACBVSRltQO0VuyWdGqVu8JQlOUqlIYvembqwAwC6AaEgU6cgOgLVtGChAdfh3M+1MQodaK2uJiWRW9BYUB4Oi1bYpncv4PL7HvZkSOMUKX7NZlBwESASP4XCerRVXBzOsNCIwwMI52dkAwL1xhZt9TZ6tAzMxgwtOjESAQ1ke7ZNySqhICA4KZmBqAwQMFvjE9qeOkvznbeFJ/8dbvffdhA7AGptSE48wmHTDAAHxiOdoolRpQWogO5EAGqCaqWAUxNBA8Pw80tId2WqbaowMA4N2VvnoeM8Le503QZtulFEK4fHy+rGsKofdy7HcUQINa2SqjQQxIYKaVichHGZsSgFbZb/fXr5/ZHOLi8t3HD4gIoud1OS1J9i0GSoEjU2QKhIzo/jZ7H5BDpqogKAGS6ZLSEiIjLSEyEagxoLJRQA5ImaAIBrQYauJvns5Xga0qMyVEFNOyq2qt/iyw73fVp8vllPOzi4IBmeBMGohTiEKRV/aEyeXp5NvpSdJaK5YCAAHZ8z8BqXbojjdbP+y/maj6J70X0XX5wzwVGw5hBy30FHj/Zvu+9pZx39pOwH5R9q+Qg24eGhjGPTze0ltOmSMms+BlYmAKEN6cAcCjyhScGMmzj8jICFil2iN3vP3uI1vNJUBDVQ0z+o0ZYWbuslap1PHTzUyqeqlBQ5hHQLNSyrbvXz7nf/jH3/zzb3+37QJEBgEpcFxiWolVbd+2DQyY2wSpXCoSp5SSVw5zAIC0rDHGl5eX233zmWY5lyqqBnlroNMGWmsVxBA8Cry3xnhQUwVwbI/smUNnN2ZeYvKJLH33axXHwd9KyWay12JmpZYsVUyjYQsCV1CwGJi8qkhFwTiGAc/AHZtBwVTg9fWVGTlgzvV23RAxMiHibUv3+7ZtWwghpRAX8qI9h/xs9HzglRgT+07ACNi1QtAmwtr+dllq1KsP1PQx9v8W+2LqBzmOh6jKW7lNffo5TBG6wZ2DgRqxWXMOmyliYFNLjuoDoNjggtEvas3YKHMLtz8Y2ASA1/nujd0yHs3sMVTUu+LRY74Tg/zCOkyS57jPDtszOJkcK4XAaUyq3e/3r1+/VgUKDBhKLU1Jjeq7FmcxglaGxkhoINVMzbtdDJhC5LByYCA0QFHMga0NOtbAHN2lNPu6FzKILCEEAwFRxMLMGL1Gw4uGBJEJqdasqmCkIrXUqlJqqVVut/stoxlmqdu+71X2kvdct1y+vHwFCmZW1QAgxdWtvoUNDZCMAQfKACCpQoiOWY8IiqAx0LrEX31zgiL3+12qoaIZVBBURPamoSbVxUfdI8YQrW/Q7Xa7Xq99d9z3+5Nuz3/p8Qv5wE7w2JtTQGy8OSx2bE6j3wGSwSjlMxi5QbRxj9Z+HIHvY1bnIC9wzuqoLd7n0hnKC6e78eMvRqDcm4O6CxC8hxAOGm58d3SBNlPTj9me6fdi1u20+c3xYngTfvJWIcUPoavBgx59GMyIPRwTalVhby4ngF9APAMjM7Fe+dnc2alQe1iKfkbDZvC5QeZRP59WNJp/+tRRGr4iURt06yjVblW2th9FQ/O8nO8KNcDEwIA+vwWmultvQEkpEWgZEThTL2kRqTaFpfuTaq1l7EdL3KGaCmJAAsRh1zaA6TerP5ZV+5jOY9cbOmVbE6JGUy6vvZbGz+N35c8C0z4d0hagykE9qlrNPOaNwD736enp6Xw+55zv9zsRhd4zA4Z9Gh6oqkrdtm3bbmiQUoopeOnCH3/8FCI59OKaFkSsNd9uNwYsVszMBErJe2+b+fLli5lt21ZE3LgppRCAVVliShxiCOf19PHpGRV/+O3vX15ugfhyurTARgi11teXzcEYYyAEqrWSSgAOzGBkSNWMTQkIiIBQAf2vaVnFcCtWFFW1Vg0OMwMA5noLiIiVzYxi0ColC0AOIZjg/b4vEZhIEawKGgRGDsF1qI7UXCcwZmboFc9TWgAATmmBXlg1IiNEFIglBATwmg3r6ya1AiIGQ8RAFAgTh1aX5bhivr8eNXTjxphAGTvXgCHi0NfweKhqHzippioqQB6rlDd0NW4VAPCxJtOfeiBtzOcfK2OPWnzggvf1aXc73mxfR0NEBmyxDEUPmjoTM0EI7nirqg50X3ocRgsAZA2dL3EQQCMUEe/49ZHK7hMCGqCSkrEhgKFGZgyUUoghMHNAEq3bti0QFMnvJFBcEq0xghmZzwIVRATTkvfb9eX165dfff+tlvrN0+X7bz46oNvldFpT2pmXFM/rsq7rElMMBCJay5LYC8V79Q0tgZcUXvc7IzARAzKRT8UENUN111m0ilY2ILYlhfN5lb0SoXeJBNEQQlbzCW+q6qVxbiUg2vm8mmHkEDkEYi8NaoGSELWKHu1kwMyGVLPWo3C6h0LsSCDPmmbIc+xgvEMpHHveaGDyjmyyjx8TLON4sD5nd9HeqMPRKYBiuG2bVabQyoYR0UGqWwbo3SWatdA7UhGRQ0AwKw/kPQ5PT/VW5wf/7c3P+cV7VvWE7cibjfPDOyaFYWdXEe9Pa5Y9ymQTI6IBVJE9123LX19vv//xp08/v5RSAXkvVURjjCmtxLZn8QmBMQIilCLFaowxpeTzbJ2DiOh8Pl+vV39YnzzsBR1FxVUwEdUizKxixiBVmQBwDEhvZZ+EwU1kU7W+46XsjOYTic3MvQ7pg+bBp154n5U3k5kBIKEhUYjRHT8OYVnX6/VqraqWQ6twJCbe9t2MHFP3dgvEYOsaI5fswDMl54wMwRghePgOptJfYwJAM6UQ3RsEOyCLJuE2DI7e1WZG6M2cD47QTNVv6N93fWx5Yx6iBrj1tv+wE/ADPwK4JTpTZucfP9soprCjhhP+1DHHQN0E8kzReN/MoA3P6E/xSMP0OEp6fIt96Lm77q3ErKcK53vqlnpDGe3TAIYIYGZgBiKw1oTpcK24nlRyzvl23USEkBQt1+pIa+gJlloZCVTRDBGgzyEwAxEDjyVjoBCRF0WqYqWI1qJacbnUWiXvABADLTGlQES4xESggdFroa3PG0wrLMuiWvf7nYjO62pm+9YwPFV1y9mt5Fxk27Zdkojc9+31tl3vty3n6/2+5aIAFKKBRwRoXdGQFiQrW5NLBMwYsIUzBNT7sEULiYJZIFhT+BYu++vta4wlilbTauLzpYwRgYgBvHmnIEAIrKpiGqzNnHT21FpbIgRhyvz95zoA/4uPZp97Q6C1VCEAqIGOyxkIPkSzZfL6Grq+mQEq2EGcNhVOg5u2Hk2GI+TB6lVKTcg/JAPnu+x0ePwJWwdveycEEAVVm4pTcCoqecMj702mWVzMzgtOQwuxZ+aHY1Xq/c13D8fkzf2bmVnQNiUzOKiYoxjte1EFXlDEVCTGBcmIuM0+7g/vIt5MRk6DmV+ur+uanFBSSvf7Tn24U4zxdFpEJEb2uiGzBqkHAA7lpAiOz9nMC2vzbRygzDfMgzv+2A7nKlqko0V5gbcjXEODqaCi4kWt8XxCRE+81JoBgncIMLNbPyGSmW77jZnXdbWOl8OMzImZzcQzlm1jsNtAJGbm+snMdhGgVukmItAzP+4fpuCgoHutx8YMmvAMZoMn7l7i+AxicH/AzKrI6HBwLQ4A1+vddWophde1Vt22m1YJIaQlbpveX68hBLVKwGqtW8NjKZLLsjwvy+Lwm0RkVeqejTlFJqSimreiVkMITOTVuWgQPOdplghjjJfLZb/dgWxNy7cfPqqUP/z0x7zfQa0UHz0SHPAmo2WTQEAmATGlAKkpGyLkuKhANomB1UC01lpNyodTyjnLfoeQQPF227aqHJKDCtCUoFZVAUOAUiq1eQMmIsBtalSpFQG8VkS1KgADp5RAZN8zIsYYXULEGHXf1zVtG5dSkEi07Pl+Op3IwG0aAA2BPB6SUkKJOeda9sREa6QYRERVUoohJgHvLKUYo4JZbVh8nWgrEZ3X07bdBwMTUcDDjA6BTycqSnuFLBla5q3LKC+qRAQEL8njI7SBA41tSAqcX/ezHFNWe0JjSBA/D3dQRO0dTQYkImrHNHNVXdf1fr+7JMs5p7XJO/LZboDgc2IYETEg5Zw5tGFR5BnjEEqpWoUMGKmULOxQTtWs+ULrunIIWurwCUMI5t3wMRJANWHCZUml7ufTsixRVYvlxv6BlyUdLddqqhqJVVVN1rjWvJuoqRLa89NTRDx9/PDh+enbbz5ev77cbuVyOtVSYqCPT5cYQslbRAjLBYILCgWtQMhIDqLPzEuIkRBET0s6nxYpeY3pfFpAa4hRwSoIR2IKIIpoy2m51RoEl0ShAFSJiU90sm0v+So1xxgpct7v8HR+fnoqOe/7vizL7fb661//+nK5/Pjjj23OBCWzmFLa9+3Tjz+tp/TtN9+/3q4fvvkWCa/3vG25eeOIpRQIi05oGaPMyeV/rdWb0OQYAkQiYlKSDfhQ88p2hDZqzwMfCi1W6OrNFxygA8wQqbJ2dajeYto1KBF7F9m6rl+7g1rAGHHfdwAwwxgjYTAr3euQPl2JSykhrG5amZnLBzUxwshLv8TDHCCaYoI8NdCs6yr9sEc/eXxx/tVhxEc0c7bKdTIdmmdDlKVSD0h1vY6ImFIyL3ngqKWUKoh4z/vvfveH3/3+j8whxOX1vsW0nuKy50qBt+urqj4/f8w5v7y8+M1bUanVVBEgMF/O5/v9fr/dCDEwq6rUGkPQlErO6hO9AUZbYMCmsI48kpnrVkeakWq1ViQLIQQkLbWqhUj7vu/7HdQ8A0dEMdC+77is+75Xq5QY1WotojWlRIHF9LZvQeq6riklU825rOs6BJSagQiAKqmnZeKyhBDu9zu36fYkIj/9+DNh+Ov/27/+5rtvEU1y5iWUXACAAnEIYFarmCkGdpPOH5YDoY8dUwVmUB0ZOQ9RmI0GpMnfQ2ztfTTSfYdNNgw06J+CYQDo8YF2QsTR7DRSaNAAMJCYx234RkD3RVW158TaXcUYwQEY+vn9W7U2LEdmJuZuelqfA3RUnXivEHOYTdXZaWwU0qEHXUTE4BXs9XgqRJhQELFVu5D7gdKbg47FJAIiLdW8eM2fqCNG8p4B4NNPn30S9/1+p7QO78vUQIUAg5cibzsu6uBhrmRVVYsSCjA+nS4coqjd9r2WrCWrFFtRRKTkWiuaeL1VQDitKyOsKawprSlxgycqcZfAN7f2GeVrreaVXN4cW6uIbLne7/d9L1UkW9227Xq/51pzLbdt23LJtYa0DACYqnbbtlxr3LZv1rUD9qLXxIrpfS8ExphqLUxwWRcRsaxrWi4Yv/twkfpt+PL65Xr/cttZTdFESogLEalVIjqfTiKy71tK67JEwoCIl8vF8cloLhntCr/T8Z8OM/zS0cMqUxgODI7aaseSOZLhhAjouGUNMaH5bH2ECwC0ur93ucejkQE8BIMg6lbTQdsKVit0PwIRzXM74kMXOsf13E++NwfMH8RldQgBarV6zDFqmS0i7WMX0F0D9xZERhB2zjy5Bz76eGdGG8wFXXGIHM0R8zFO5WcYGtx/dYzKhlAkYn1ZfY5fQ2hAxFqLAyiNnYDumzrwyZz0m13blIKqmokqD9+m4aHnXEpxDMPJyhREBCYmdocQCaB67TIHFaKiBiZaoYoI9udtbjESMzZl45hRRDgkTTsMQL2bbu7F7CM7HoY29tGuPSzRZavNdcZ2lNKNVKx2BPkmmPqFEJB7PtfM5sthD6f5vo3bhSmHo/oAtuvbB2AM6KXn4t2xAO4oeqJARGMMTFxyBRXwEl8R1cpIMbE7IWTw/N33KSVm9C6OolrKnvedmSs7JVQ0Y0AQLbKTgidrne5rrQWACWTfPlzO33777ffffstI99er1nxOETqlRsYYGREXTiWgU4XPahu0bmanFMVUNCiSmomEXLZaFaxSwFxl3257MTBc0kocVbJV8VQAddx8MwPEwNzQzLT1u4rIuqxsPeNaRXwOILQpjjM7+a8xBP9iKcXjZJ4AbyaCF8bNiIsgI61FjATBevu4gQK4OGtfqVbtMXzlDgl0Gh2tdATYCnbo6JAehIGI1NLMCGBko6URxrXclHwjIgcbvn//DZW+OdvxyMjSMNq8sJsHI4yvs2f4kY4KQA/gWavgsIZDg+7KtqczQAOtrd/DmXqI1yFDVJVUoWe0VBoZaKkIRsSJQwgkWhxsMBBWxzTMGaZ5aKCmqCKgTEBkqlal6Fb2bKAB4ZSWJfDTuj4/P3/74SMUkVJPa/JKo4DEjm0DGEIL76spIxi0qlGukALFwCG2DwPTElORnRAThzUtGIyYyeH11LlazGBZ4m7GjIQAJqhoKiY1eDmDSdn2Ldz2p6d1Xc/ndSgMA0kpffjwYds2Z2dmNkPmwEx1l33fmZmBjbHHwlw5EeJjYdlkOPIEbz2TSggBAAkYaiOVIc1cWuaca9FSCqmpiWDzphC8kLT1ELYCmNb283AVZg6BGOvgrPakSsyk2OBh3Cdx4p3lqp+k1grG3apWAAOtigB4AAiPb5mZthHxOBSC/3XfR8XK4cr2R26fn2N8A9364NxuB7znMoADRrj/oX1x2zaOycw7mYiNjRARX15vuVYg5BSTYkgLAxIvani5PBndrq/3+76LGRntWwlLmP3bociGXHpjqfsHqMlXRqRSpJQ7M5t0eRJQtHnIoOAbK1oU0ae11YKllFpziJQsAqibR6CeLSxWvcUJEC2E6MK53aFU27dcWwD0+bSamfZLQy9PoNbGQkU0eBmnopnlKqvBlstPP/0cQnj+8MSMKkYUAJS8Cg7RvXX2cfBdGKoIddVAUyXk3NYxKOzBqOhn6D/hwLxiGhIQJj5ySf5WMs8y+fFP6vcwiW6/BI8OqgeNAKrVhyF2KToykwCAgwakz+gSKYgz6MMDSYwn9bgJdgcMes/RICcVA2jw2Z7LUoPZXvKjWq9SCccc1PkYC9Rf+w8wxX3L9/tdRBHZ+VWncWWgiqYCQICCZiyB8eTjrwzEW86Jifi+l/vectdoYipWyz3fSikmgmi11ppLDHQ+r69bRrCAFANFpjWFJSZmej6d3Z5nIgYjBhN14aRqRWotet322/1eigDA61Z2R9gtecv7nquaukyGJosgNa+DmVG1qoKZT/ZSkRIIiSAG2ktJgZnZsR44BjBYSM4RL0vMp1Rr3WqpxYopqmfgTYuaVCMisNjn3DIAIl4ul+fnZzDTmimc4O1h4KV5/1UHvmkv9PZv692D2g1vT/3BYx5SJ+TRo9sQ4K03qKpTeEVhBG5mcupZryGuxW3vLgDMzHzYu5l184M74PyIv4wrWqe6oUqICLnL9qmTkNpsoSMZOBRHW9l3ecLx/jjaUxxK6njfeg/8OMPQqoE5dgUw9Ba5HdDdZTKzWjSQeOWnqlovXKQOKzfCEs2iwnYfNKVEEdFcG6BxIKqtdrKLBhAxz0oBOHxs70hBR9ft5wFFRCZwZJrxkNZTIiOxZmZaxSaH01tLh5CiPhXDvcHhzfYbs8m/dqHREBdgqgJyGdGIxiefvOvaPGJpvX7GobRVD2Fqk1IZEYJxV9SGpfaNB+zZ8PZ5Fd22DADecubfEim+pCmtBHa/37UWZgYTEzVTjpw4rSkFYgBITKiSc805e/ZPtJQ9Q4yOgsKAMfrURNFaIwIjRCbyWlg2UEvEMYbny+nj5bww3q/X6+sXKSWE8LSgiAcaICavuQIRQnUqajFI6PB9T+siZobkk0xqrbmAFNr2G3EqVT+9bmQ1YjJCQxWzoiIiiYmIOLgylxgTBUoczKxWR6NVVd33PYSwttCA93QBU5RHfjZz5C7zaIJIUa1eJ46jeB0N0Ri94gQHUTUj0gMQSKpVfyHWq6ptmuycARjCaHD4iLqY9yI6hXR7teHRe+JxkEqXDzOzAMAMlTWe9M2h+PAV/3R7Ihg/XDaqtcYNMq/MeEQA14bYBADpcUmbN/jGIXRaMGuCujGhtISJY+55jWVWjSGQy3BRJSVrBec+BoYAWv4KzT0NM/lw+fDU6wXu9+u+370MRkRqNUYDMAFQIzNGBDUrpdy3m6qeFmbG0+ny8Xz5+PHjh6fnL19/lprPp2ctNefslaiJSRECHUaMmSBy02dkBL2/2gxMIoUQQsUMANymVKGZCSA1t8MAwMiWNUWREDkyMhiYsmokhGKAYKBFt9vVttfL/4+zP+uRJGm2BLEji6q5x5JZ9S33Ts/09EwPQAwJkk2ADzMA//8LfwAfSAIEZu27fFtlZUS4maqI8EFU1c0j695ZDIWoSA9zW0VlPXKkihaW16drxOAiu16vP//88//wP/wPZtaRAcCgZt33/e3t7fX1i5mBRXDPUxLcHTEdybMAnyVzSc5SaMQJEB79ojzCzFko6EMGmAnB/QcLh2nPpugNrZsScRwHk4iWQJiFDeZkERGbre4+bWc/FVKWWM5LfcgaRng6pn5yaH5cI0uw1wGz92Op3/OKO3/ij8y9n7TBeUmur59d7fPzyafRrLMWiyAzdzixddv39pe//e39/b1ZgKQUYhE4s8b+vkstF3+6fRySYyaCjuO41soTpRInM7caRnK720oPDoCVB40c9aNlqZYi/f5gwM2jW3AgsywAGEQUCE+lGZ2JlKWweER2Iwlh32/oHUSLw9MAAQAASURBVJHBo3gE3MzaSlUv5ZCX2swo4xYDgBjpjMAgzkFrDTJhOJ32W2sX+/7re4KGIuLl6+tWsuVVAJohNxMBJPjBLqyoWETATDGnk62OQbrHKrmC4zGkWZMScucHlX0WufmPs8OzhIQej/mjlA6OvrgzGH0SpOWznQPa9Pd8dmX7pDTsvQOWwyHOXtMna/VJS5wvKb/oh62Myeo//PTdB2n/zTs8I1TP/wHR/Ljd3t8/MhDNa3Z3IfYkffAgRFpsFrhZEd1KpYQ/uAPczUopt6PlEE4AQvB+9HZ89972A0AiFD4+3rdSPKgWEYqGfhwQxt70Uq2UEt1L5uYZzFDiJGMfNLyOWzve3j7eP/ZuzszvB9y9u6X9cu+iUi412bWX1l0Kp8RgXS1FA95ag6NuCqC7VSlaSkQEhUrtvVfEc2V72QBv4Tdr3Q/vCAoKgw/CHoqAiAjc3ZqXbYuIl5eXn3/+GWuw0FgyuS0W0P+tcwgfA7nxyWMO+jfAqJ/Cwjv7aOBMtHeHfd6XQMw8xSymg5xzkEmEr1yYz/WL4Xjk4SPG7MGxLTn/tL4e8yA/mpJPVu8c+8UpLkgAzgq7lpY+W97z9uO6O1/DOtRavw/1/ZF3R8LlIcwEMetJdh9BZlZKSWTp+Qb81DGyriwvN/OyKbJahA6a1UjzmDz9zITpMw0MuvcwsWEbicjDGBJhHh2GCANU1lCpfFvmDnInmDsPj9MRK6CiGWdnjd7MPHoihnEHygeNATJp7C1C5lvBfI4Pam7lzzADuezPGUWqaRiygGnhrbV934/jsDb81vme7m9uxkWfp74Ky1LNANbFK3HSx4AcPGWXKIJIigrFYNUBEcODwMrEXIqysohzTty6vX/E5H0lQJU1Rj+NsCgxC4roQIix1Mn8BoYwHOrEteof/vAHIoId778e+76T9cKxKUHUnQEXka2ICHkkDt7hAYSIT1xKiQiecyfyuZvTvvdevJUraTEnVaVfb98+7NZvBjWPdBc6D2JdIsq2PyLyDF9LYebWdzP72O0S26ZZmeQZEA723fQs+9QF7g4NkLNAlESJOMJ7a/sKCAHnCHLLT0b0JlN3C4lJa40ERMREDCYlEg4eWmDVZGKVvFLpz4guIpAIZGY7ObIMWi12RBRxYjYnGhO6TgbbF5fM4tL4F7yKT1oJjy7F0i8R4WMoHJLgYiz8ATqdHi25jNDyX9KMQ22parhbD17pUCCHOka3JAAcj3Emp9KhyUQdeQTFaEeeqZ1u3Y9+udTXl+fBtRuWtLrbth2tc4cXxeDdvdNAJ2nwvu/eDqFLKcJC1+u1KCPsuO0ACsv7+3s7jueffqrCRPd20Jgt5pnYWRmrCBv5od7rRS6lmh6J9nEzHNbYVSQ8aI6TJnYgVPlStAirO3evAS2ltY+s5XXiMG/7rR1bhoKt2aYFwNPz5Y9//OP//A//8TgOsGZppVattdxutyA8PT231kbZf1IfD51z7xh4EJVVOFrKf2ladlEuS1RoYIxpPQFmFemqZG5nNAdTTmQmotFrSnw+78xTZMoyPDvrUsEu33XoT2GRoirJev14C9OgnhRthh9EIHuoRq5tGZQlwzNTc2IdmA8tryGLtOevLw0fZ0zBD/DU89L4cb3kplqJyM2SjP6w/v3t7Z/++Z//9u3b28dH7x5gJybz/Tjc8f3jffMLwIm3TBf//f19U2VimHvrwaLEl1I5YK1HhB3tOA4RqbUKkgKamAuPQpz03iPIHeRETgineyERgLPzTOUQEfHgnCMRBVCKCOM4LBM/8FCAmUiUmcOph7t3242IWGvObw30Mc6RR8lgPv6HyI1mtMPMETRxenIcHdiP42BmqUVquV6v5tnWFZn7Hh2AJ08JQKbjltqJTNecArmYAczSrmf5GSsFuA9dIAq31c+3pGhcNh7U7/mmIkaxYh0es2y4dHvMwsWQf/8sWvcgaoL/17uLiJzldBZRIB/2A4ve9GTu6n39/unixxrnwe+9DpK/LYc4d5A5EMDpB5Phyfo3g8AZhA+PPMiaW+vJp+Ru7pbL3W3QjxHARApSlq3odStFKKw7j8GzAFmPo/XW3T0A72HH/tFu+w5f+BQzA8gijm77vhflqiwEISTDs7Lg+WXrfcWEns0cg//MWve99aPZ0bqZQTTiJF1EolxquW4bhBOalGPGdHJ9cffupsrbViLihm4RzACThZEwabFjB0hEeuuFO1UB1SDs5rdj/2htN89SCBLlmHUXyypZDxqIRFXNMUVcSrgTrYBnBjMI+g2SmH9tO49yARIWkISiI+0+yEVhpyDQTz9/41AL33eSMcKcNXUuq40dsij5GDjlykn7O8NFgINWJT9/ptwukZ5/ZebEQa09h8B4PKyCWV6S2UPop4bhpXnwuNDOduFHk3H+ZP1z/XxQDoDu+161zN4JhN8BNnXbRORj/wCQRHxmIUznw52PlTKNoQdpFJEWt3hYTrnI9ir3wTGTxDMqOYOoBg3O3FRaADGxCK/WlIgw770rzezg2j6pIWbOKbAjKjspkqwKevQ1eANAcuecj2lmk/VuiBQ9bulUj9eTPsEsxZ4vBhPVpqreW++970dEiEBloHDT51/qeyInH+qHEUEqS47PKjixjswsKtk6FTPNkASY+76HuTIgEpYOpW6lKjPMmh3JlVIqeQQTVSYiEhZiCIM8FCQMDsp0Wslm1kwckROzqqa+3rbtj7//3X589P1w92vhQlVApZTCJZtOVUULU6IAzZLSGoBQgh/GOCxvTpRsG0JEHnwUb43cS3PvzlxeOvi2//q274EeNOdoMxtMgpjzCWT+x6iUqkULY8/M0GFmza3kJIcIc7ceOfV4Jd5iLdStLInCyhA/csn6ZEBJ3DIrszERBQezgCn5XSDpd96rwYATILN+slwNVRGRYI6YAAbhZJoZLbMnlbGuKpVpul+JvFQiO4n3J9fyUXV83j6b4R/+Ok49jsqRiLHp6S53eezMsYia8vujV2B+cFd2J68i0rZhYbfDzMrk9lhLW2YfqYjQ4KwiVS2iBLN2dDu+fH2uVSk8zI/jyE6zlBmMeQOiGmMMAezj48NbT31NAFGIctWy1eruHx8f7v58uRLRfrtlh9IZepe3lDJARBkNmTUPSR58AL0fG122S2mHpg0+juOiwcqU4MkIBBI92qIL86XohZWaxcdNSC/b5dAKIJgojAgUQQEC3W63/Tiu12d3r7X+4e/++PPPP//1r3/1qO49W8FLUQCOeH9/fyKpsoloKaVIVdIg1qAWnwz2kLckWTm7epg2j3AfbpTojJyBsSYV0eBPim4t9CzPaVaJiHrvEc4PFi6IaLtcKCTr8czMKu5+HEdm8dxHxznNPvsj7qyJuPud5J61wJS6IAJn0mJARjNJcTfoRBxBEYu/Nv9K66WfAzxMFFDaxwTG39fCCe0cj48XJ+E/r8H7XzNJROBh35goDPH+/v7P//zn//F//J9vt9tx9KO3IGGp5P12u1mQmb29v2fKK3u5ieh6vRaOiHDrvR1FpZSy1VJUUsI7wa2HmzARoqgc3Vmyo55hDgMHqhQfWHciBIVQOIMYLDRSxjq51VIpUADkCIZFWI8IFmHgRTQLjU5wchUekwaJszA+1MBUDcNuzh4w5pVKChBl/JV2qncnMqatdyPqEfL2/eOf/vFPtdbX19d0IBaEMqMN+FDRa2DZeEEIdwvzjMoSqhARbnafInGS6mHKaQWCdxVhbrze+6JOSbG/H+FBEpY/sATmvkbivAGAzEw3zQ7G+/YYwuV27DvdPc67SJ/MgaeXsjTwj9v5ROtS8ggDPTFvkO7Zt7smIZqDCind8FOFf7nds8lw+OI5FCWCqRBRQvIj4N1CuLBCYH6ih4WLUGH5+lqv20YUbh4E5gLAPPZ29I42ONvdjr23w617NGEGwtpBRE+XjZlba95bODMUTGN0SXhIP/bLeA7Ozsh1m+6l+92FJiIDRfOkwmrWI1xVVGTbtm0rTy/XWuvlcnl+utRaKbmmzPqtH8fNEbUWR4Bqa80SkOLRzbrbSCIHedCFugoTq0d8tPJ9v1w+jo+jhatZdxA5sQgTuXfvxlqyr3sVe+AOmW/55CPMmO1/R4WQI2K00qy1No4Z6+dp/7n9yNt7F78RVrnZLHsxATSdN2amddjHgZ/phaTfdBf63kcX7nl1eSzSvk/e0vDBMBbAWqR0EvVP0IBlINZPngx/51vL7ZS4eVj0LP5p5/MOnw5FWSGcgYy4jfFF6XTm2IkV2+TVpMieT7kuPUEXKSs840aefDPZPT9aBOeL9MiGWi66lbKVItb3iAAsq7XpzAEwa8wacyTlXXEc4xoW+6LIOON6UkS0oPMgJ87OdaEH3eqr7LaqDWb2G/JMnuz3OL1LZqZ7V3lEhCHW0zhrN0zDz8zbtgWN3DmPLp3p0I83N4Qplvti8+s0akTjdfYucx53Es6k5spPIqKZwUPLRRg9mhC2sl23MriVuxFCmOy4pZQPFypAFMoSFAlA5wALJSqViMT3stVs/yu1RkQwtm1TOIniSSVhq0cDoKov9SnxaaJpcM1Mej/MIzFyqso8icjNS70GE3h0XbrbUah36f342I+9I1iet7JVrjs1RxBI0pfLt2BEtRSNiPA+qnRKWX4kou+/vn+kGHvUUui+Zh8Ksz63WktriQxys2xJisTESXba20Alg4IYOUDZRRaFwIqLhjTx3exlK3z5sUI4o1z4eTHntd2N6xTIyOiXGSeS5btiipVK+C0XMx6M/b9YMPy8JqbCchcwHMnRaw1hJr33bD8eSyPbMETOU17WkXOtjHucd3qHxMg905GHynA5Z6Ks44hIJJtrkOUg0Kmjcv7Y8/VJRDIUvL1/JG1VzIkL+Zib9W5HtBa92dHYqJbCjFL0er1ul5qTOZn5dnuvosnuGxGXyyVrce7Oc6aZT55Nlow8U1tYGtaETWAylJi1o93e37+/Pj1rpNMQ8KCZN6GAAkWkMqkH7Z0omMplS5KGaK3HEf2pUYRWaY3Xw3X3y/X569evv/zyi3Bp3VPP32576o/v37+DBFwTa4STsV/isXRa7pAlhR8/FxFywuPUWaY2E5+Dmqt357BuPeTcfzuu+GRt7uWLDN56N4ogOMfIuGVwktrfTihHw/32z9Ke9v9RsFeik3INDV95lugH1aW52RnGGR6WPSHrQ5rZ34TencnEpxg/tJSsr/Q5QnOpoLnSPPxhSUcEksxzPnC72S/fvv/TP/3TP/zDP+ytZ2tWXlA3y7KwFN1vh9mhWk3Muy1Dnxc27fXglLper8ucTZMqT09Prb/lt9zCPLFCrIze+gg9mENOwa3HgKoPQDlgnSAxGjcm3pdYVd27hpqZuVk3QwhQRcBilkUDeCCIlCn9hKz7EZHDOUZ6zonSafaJtkiHRPWJSPZbsx71WmM/jr/2p6en3//+96+vz0REGCmIIdup2JcpPymcZHDJ9BtONmPo/N/CQTBGCmyFfPeXnMc/WaO7Q7kWFxEBPh3ET+tumQ+sNbt2oJm6O9UrMqH8SWjX75h6SZV75/PB5wXGOi9OVmZ9njRddKLNo8lafP/62J/oNMAwRqFi+I2ZEP/0MNd8gbG5j8pMBAIZDSY94f06AQ5YBNwtnD24UNXydKmi5Ga97xxiDA8yoHXcuh3Neu+97d4OphCRftu5FIDNOjGLCsJ7a5etKIPCMyYSJlWuqrdjR3YCZ8qWIZO8Jt3P8Kw5kRssbPKIQgtrKaXKtm1S5evX122rL9enp+dL1SyWtHC3PfZ9771D2MJF+Ha7ZRwbhL03OYSJhLkjIKzenFGDt8pP1+31uf3y/vF+aG8Bh5szazqZFBzhw70WYaXjON7e3kCf1VGMJpr/ndvQA6d/DsU7osEMAX67KjjxooOP9BMredoP99PwhsUyah5Yy+GO2JzfenCQpinC3D/P/LAA6fGxtNYKL94QQvJNuKdYzsLY2OQ0vWxVCNeiiKkQfjwLfvDfFixlfTE3/oHsKgYmS6qcZ6z7/Uw+BmjUVRkrpezt9ulAmJCbsKmLJ6A0b2BR36Q6dvccwI3BB6Dna3Xv66kjwn2MWGqtqY7SGcBVB89nxDF058lwno42yKyS4aL3rjboT+g0kismHvdczZj2Yx6ah/SsZzdckxmIBsgsWs61m/Mi162VUsJH+fQczS/Rj4hTjiBsKMR70v1+S7MqzcxMjGHy+HTZti4gaPS4CxeWAWQtpWwimxZlhgeROgUTM+iYlErEI6wSZmVhxqZFVZMnetMcU4OrPJWt5iOVUoI8vYeWWV6QFo4In/yZL2VDArtgQHj03uE9iAvnDJ/MF4+bpa1ckClw5mCK3gprl2gMCpAEjF6frl9f+63h148DQIQtYzfJ/VORJfwa3Z0igPFyW2vejTzw9LTp4M/wxWNxwoDxhDIOp+W08JQlJFbif73f8ZaZRnGPxrRrS2hluIdbuMEcQ++cK2nL/VpaZoEth1n1H6zjfQmAOL2Oh0VxP9pp5/WVH4/zm/88e/+PR4gIeASfHJSzb+GTAot/oM2Iew/hWdg9R4p/eiY6F6+ZqQgFmDhBmee7y4Wgkf20th97RFyv29evr0Kc4872/cPda60RoXMqWnNja37cnJm8RDcNyfGFl61cr9e6lbLV/KI3FyFmTqaWpJp0J/ee1vquLtSDRiHO3S0SQt+A+6tvrfX9uGzbvu/Un3OOD/kdBEURQtQjNGgTuahcmQ9H6SavV5+529vtdnx8WOuXy+V6vfapf263W922l5eXiCjMrKWwtJaouQ3A29sbS5FyFd3uYjx/OTtqSww+1dLHehnjUmj4Ovzb5S+fraQicvyY6byflJaHvOLP3jtDFon+8F+ZzLpZ4AeytbVkP5nSvNzZSD+MmrvL4z54TM8ta82zmP8po7S+dY72McspKyBcqmY9k0/bOiPxg/O7LBENzi0Pt2/fvv3jP/7jn/70p+/vb60xM0sp5PCIfT/e91vRjbWspOdxHJmwY2bdBplWR7PWj9uetvL5+TnchfhStyQ8UBYp9bb1+RwyLStE7memnDRmbp5zfYKQfFEgJgYNoixVJfi2lVKKD+5xMjO8NSg7i6lHNkgQIqKRjIoAA8JFCqtExNHaelw8CcBzA7i1RqRmdvvY3X3btvf9ZmZE8ezXa1zM+P39/fv378/PV+bPcZy7g07+1o8VNuH7n3iMzfmk5c7E92ePZawjlXtL0jngnIMi1s6rnHc/+5Lkh8saizQ7as2OKWkDorZkNR4XeG6rpf+sw0UkKZ3nnrEclfOe5+MsN+a0Uk51xdMVn+3d6clMhUOKH68z5qI4109GqZDWAkwnJLMn5tmSl93mUUS2bfvy+lqKCyjCyMPJW2sBDma7T7WJIEC4FNmKXqWLFBKOiO7jrrc6enQi3M1gIcEGOXoPbEJMHOFkQAg5I0uvmV1K5dZm/w8ra2EFay01ESlVay1VpCoXBSMQLQJKAUFRUqlm0t26MzNE6ej9OHo++cPsabuAqB8NxMoeTEBsIU+X8ny9vDxdvt/6rd867l5u5pOqDKedma/XC4DjOKAKM/xQ7V7v81/4/H/TtgyHYZAafALP/yvV6XMPYa6FdDUiYs2fpJUEpIf8YyjxIvZbhs/M+VE7UETmjoCReKWziGYwRY8J01MktkzeuqmVkcFJyePRbJ0N8XmHs7+B+w0+fHe5r/fSFwBA15fH921kAVeQmmOs8vdSytFJREDBd1CiL/N8eg3zgJM9EpTlx9FYWEoBqNZK4IQcjHL5AyXomNaQzXgiwkzaOjAI5XwhM0dsnzfm5KGqiEE0er6wPHU6XjJHR7j3JIpcD3pGm76irPOtPaSvaOwfIOYcW/B5Tgjl6IIc4zf5LQf8Vetpt3sAufq7PiloBA/kxnShM/xLBE4ef7Fo5v0efjBo0yLCnjgKUCklmUzIhreUcK6ESvLoWHPOgU5MSqyqtWgp5Vq3gVJw//ufLshYGpFQHFW9Pl+2bbvdbkmSW7UkmfxWCg9V17x3j27GXRDK1+sGjB426/dKGif0C5wQAmMIUTBC6Pl6EQ866OD4qdP3j/7r23vkjJMIBhGHu+bEESJiGYZhxKAR6ejcbre2H4lhUwKDMHNIdGrazs36EYAIlZK+Xbh3s5ZNNcyZ5Uzssbl3O8EbYvrQInJYz+fe4P7oBP/mIjqb2yV7Pvu5P30LiDs557T6PsRsSFQe5ceTftrOCarzJyL3TO06yBA/QIgDSOEZAyZVw0qt1cyyvFxzauUtE0B5kLuKnAsz+68gIKXRRXnO2qBbuGPG53GiwBkvmlZULzm3XFVfXl6en58jIo4RnRJFrbW1prUwHrp5h0JQFWcRYeG8nXTRWmsZWUXAzG63W5buk8wGGPjVfFmehM7kzn1MXcPQoqkk8/lExKK4sN4lFOIUAQohgCgoY99AmBBfS73UjVsIcTZ1dHd3zFa68b6IKNtrcybQy8tL4vBnVc1v70fKxtHaUiArV5jErej9RyGJyTKNU+JvZVJmzwBnmLAQHH7fIs8rLIvpCHeruewWiD6vpm3bhJTg0Y+xutuIh3sPkju19RTjzw5rfjRLFGkNLbJhFAFv67txCobPeOBlID6toyXGNOdwrOP4LFZk0h2P1jpmG+GnD9PPGNDKh41VtZl1t33f//qXX/785z//+uuvnqVXZmY+utlut9vt4+MjNmIPYS2l9D5aLvOqGo8VERHHcS8WTTqolGTLJaPKtdbeezuMyZjZlXvv7sglgllScx8kYUUKTnEFJbUGy3YpgCfxr4cREYW3QCkkIqQCIAFvx9H21nRjB3e34fR4txbuXmpZT15Za63bttVScrn11oii9xIRHtaf+/tb2HzFqlrKFhH7vpsFsxPJKXSHu2s59byd6gaZ42YWnIgoxkv8QR6GWzUFIukSh1pWxWQXwyedH7F04/qQmBeHzfocRNZanABpACJAk/SImR/0UjLizOOffcLzh9NvaavBaW0xC33npMZZqjOAlNOAIqRSMhBlNppjuOp3o3PPxuJ/xeaB6QKnvgIRjCLC+wz/iAjU3bu11hqZgcHMpZTnp6fX19dSv6tywjh59r+JCLrl9ZNKcQnrVXkr+uXLhYhYNZdY1tVrrbfbe5jDLfygcEYgrHsnKt1NnXtAibIfjihXXAbq3hOIF04kxJHMXlrSyCH1p1vzjtZp8OABjGDm6FihSsBEaOONRFQrCe/7DgepkEceXxjO4AAziuh1K1uptRa83Zg5C5zeDUBhEZXdukYAyMbj8eDdaaJG/1e+q39p+6GHkE4fPjhC/7tCzSBRwGP2sjEGjTmpAquwfI/TGHRHgc57G7cZ9ywMiM5z531iBe9fvK+UiLla/ETPSTMhmOvxU51sxQLLXq5buucpfour7EEtnC5+BjhxPktE6O3WttKu12tvZkcrVQmxqXrfg2JTeTvevZuIRLeyQUa7CJg1kG1RZN1I5Ukut2OvtQIMD2u9atn77mEAwqM1U+XL5dIPUy5E9P72cdyaahXlnMdt9akfu7sLMTHDoyEcKIhOwSwQ2o8OkqpiezcAwkRiEY6oIiRiFISAaITByREGdpCBikktl9vxASE0YqIn3nZnNGgIpCYLXnqWzhRd3d1NFFu3jkaAwIM82GMTBaP3ozWJiI/9zXuUbauyQfFx7EeP4GaiEnyEHqS/Nn/7fju6B20glFJPjpEtS6lLPgy9d3Nj5iK6o1G6BBn9hQHBEcQwb91ilHN5dFtVPJGEKFRCFILIbjbzMAc5ITVMrWAKCo4PFSlCwiHhVfhS6FKVvRWNraDUuBSvW6u1lqKmHy9Pz73729tbKeX19bUUARrRcdmMLlSKqIZq1CqXS+3vvu/7cSCquFPv5K7E4e4lPYTTqnB3cpiHJ5UVsUhVFXjt/WitFSdlU3b9WvzG7VvcekuP+XCUsh3g/X2/PD9dVcmP6J2JXlSDtn1v7x/7y0uppCRBIX5Ed6gwER2HbZtOygeLiO7tdnw81yciopC2jwHo/XB+KhHc9oNChMp1k2527PbltYrt+7F773K9ioi3w8x6YOVKBEQwjXc2IqLWdtUvl02FqR0dSVrLXGu93W4cFFT6nJDiltmru05x8wBYACanpPLD6EQNCkJmfGbWFSPfRoAHcQz8yolZsZa6FNBZpyxo1lJA6zKUJaJpYW/NvHPdut/EO8SJ3Y4OoNaLysWNBTDr7k4IkmCiEOoIRT1CN3mSwv24veeYFB7w723bLirNw8JB1M2aQISP3kGE8GvgZXv+W/uLb7twH8GwRncrRf/w9/+mBxFTcPnl17+axc9ff2JE229Mdr1eu0NMyCRsu5TamlPfL8rRbpetXMi13S60KZzLl3/4j/8cEX//8+/e3t6OW//DH34PAxTebUyYNOv9SK9L9CmADnJx597a0frOveP2zu/v9Ov3F+Y/Xp7+8n6To33ZNuPiWhydyFXksHbc3iOshxMrVbo807VJ+/b9e3v7+vRzjeu2XfdmB/e+scf+8fa37aKlXi6bfn97h/Uq/P7t15fn69cvL/uvb7VeA9RENt3cCUZfrq9+O+LjjS8X7ni6arChVBLdbqPoSqMZLEnO/YjGwQ4ERSOz6OTUohV7CnKRAN2cvTvMn8Veen/ae2EVEwuC6rU74IJQpivHDqAU6RbNnJTNozWEoRJzKcxsYYe1K1+IOcyUmInafjBFJfpl/+C4SICBqiUcBbJpfTsaUUGoe2SyIlMYvR/CCQCmLG8HFOQ5TSkF3sKEBYC5CRPCh7KVoBxu4bbJNhMWC+Xu7p6o9Vw+q9wNIMEIEWF2ygamqMwBPDa5/pl59FoHklqYKaErzcN72y/bZd/x7a+/vv3i7Xb125dffv2PtdYqpaje9recMrLvO/ZWL5uQtNZ6O0Aog9hAI1xUhcndW9s9eb97UwblbJ79I3qPpvDLZm1j9is8ZDc/mjVqwRHMbeIt3ZyJShXyeObW7diofn16fbluCCM3FXz9ssE6yGvtIuzRW2vGDZcrAGUkRri1eH8/brGTaAt00BHoIb1bgAzBPdUzQggSUId2EmLq3u1SuVY1MzA14E/ff63GIlKq9PYGoKqi0ZfrK3UnJmTkrxStmZnWQmcqZBBlRMusPZgLnKK1cIgWgNCdlBCBlVYDEXl+E2e9yuPdo+fMtGwbHlmK4XcSWCQe/T+2dBMz/pwqetHe8nB380ThUevoYaNRqMxkmt+dzkeae6bN+ixuhxMqE1ozEclwKT1oosgoxDyUJTHVNJnM3L1o5ay7Gwgk2XQTaJRzNTMRCyfQnN58cuMjmAAwKNLlZQbfqWgQAXYgILOPjALoCIDt9ecnfeK3j++3Y7e4CEuRYi00PMJKb1flV7Wv1b7I/mUL+IeDg9CDHBROsB4RZB39ELiAatHn58vr0/N24TErK2MVj+M42m3XZixEVGDSWmutuUV4lEI4ejejIlzUOFlTHExB6L0dvh++g1qpIgKZDdgaqrRJSP9Aez/qtex7v+24PGndNOtS5BTm5ImMcyXO18QGMkfrF2e4x/cPFfpaioh8sxdBlKJP5n58b75/wf63/r7hCK5Wq0OsI7lpClSfdL99/N320+2Xb//Z/+X/9J//V//Zu/8q5UroBWALGCgCGgdTBz2dBkGcw5hPceP6k09W32wniYjsRSQigk/eaL7Pk0f6IQCcButMMLqFDYa/caJ1OgeIREUC5hE9Bnv3kBsQkHRi+cmshJsZr+wYjUt1s5WizAtS4sgpBkjFwIADUQohevY/MyngRBAhemQgjxFGzpr2Z4jl5yJhhOVKTeZOVRElGh2e5mZuWHN9QV5YHNR7j97GArNObpoazSy5KwZndE/z49ZaK/WScWStFZJx5yhbzZsc7zUtXzb9fwpeASQQKx7TTg+5n/PGbG0fZYHMc3wqq+J+qDy+iLS2R05WnQ+x1tHjxzwCo7uCUyUZ6sbMIKg40QDSScuk6P1wnfkEWttHxDJxKZTTgQW1VmbtbqV4n2jdrFdkLpaZax05thb3R3EO6Kdk3Dl2R6pAOCKIzzIBjM6c/DpH+ExacSVlCmIXMgonyu62SBVGFAQwUxHKmWZMVbNFEMFEVaRuuhWtUmuRrcqlaM4t3EpRVX2uzHy73V5eXr5+/fry8hLmR7tlhTlzbzkWchRAxuodEUXdkp1fe++LlHX4QKOiijEzhjholvu9t0bMTB4hYhwN/Xq9Pl1F35gch3uYm0XmsFprNw8lkxwe4C2ZskSkN6/1IlJyCKGzOJG7b9u2qhzMnH1fSwAWQC5tOeaEboyGqN5ngeX5+TntwWAVSkypxxKtBYZZYdWiYjpLHaY/wfeNcOoh+V+5fVLE65N/aYCsrQnjc+dPmafz9mnt45RXO9/IeTkA5xFAE1Y0V+vKeK0vulF+sY7ZKpS9fyWIVRwIeJxAR3kuTZRm+LZtz69PWdbo1m63m7snAuJ+ivTeInv5rPfOwPVyKTTqNmd4pLszg0Hfv3//+PjYtsybemsGjwRqpaInj5kJDiIaA7hmATNXx7YVZr1ctxS/UwXNCJ4zNmTMR0F3I4+kFigsq0lmYMK37TA73DI7TtwxIYvZtr0xv76+fv/L30CiZSulVIse45nnrbXWjmZZPCfqiPuMtUmrQTQ7Ns9vXDGy/uf85Se5ys3dEeSzny6ba8wsqAeah2SxhCbYDGvOG0NEeutB8H5AevidSnrbNjs4Zj2w9763/TiOCISP19p77116d8xB2ziX5ZNygh7U71qqfUHrT7VBPs1YwqwNLrVwNoKrxb3W7bxYePKJr7xYfn3VZDhrLH4yjR6YTS/5yvZ9v91uCdDIajDAxMrZtc4STomlz8GnEeGtk+q2bcSD7oWZhYJCKaDKfT9cUESJoqiGm1n/+HhPq3cpW/cws8N6Za7CI0XOCAtmCEAhLMF+XEp5fn76+cvry/O1gFVwqUqUPd5RqlbhiGje3H0/aPScMwmo934p9fZ0fNx2dRwGWOQAFyMukA9zZyLhrVS9llIKM+CRfYkzQnWeftjoZ+YC5n3fv3379vX11cxKKenkwx3+UFI+6zvMIbFEBB+QQWZexb0ZOs7f11fNzuK0pIjcH3aLWBHgOvvZIsDPn8wDzpUep6IKg8CUsMn19Xlwl5xrt7CsSblBZIf7Y88CMyfbTdBpfOLsKgpicLrT9/WSpnZ58fclBmgdIDWcMKvr3sdJZ6fGveoChD/aoEFtczJw+Qvz/r5///U9a3fCYhG9HUAwBRETKKsUdSvMPHqucl1HclOf6kXMtZRr3bZartftUrevX7dEWGRg4A5v/bge27Z5t94tUzA5orn3br1FMIGYIcIyuyH8hBqrwqg1PAczhqiWUoikh/Xm6b5++/UNwvWot26lFKJI3lHbP1R127ZSanT7+NgTDra3Gz0KW+qWum3R21wUrKq11oxAzGzve4eQg4KD4L35Hk/X68f3t//yv/h3/4//5r/9u59+t7s7N4XEGMFFYAKB7nSgS8HFD9CGhxcNjMQIHjGQqUFP3/jEL4rJ+TJFAZlAoBkHRu4QyfO0NPbELwDofdFhnvq5gPq46PK7K1W3NMLyi6z30+cDQTmO9oNgR0QGhDS1xLrle0VkLdWx3SOsdT08pxhgOjAxy4BxH7kZwJgNdj7gKmnmt+5jJ4YRitGBlkcxb713jjvS9G6rcjzFCVO+LuV8rcSUI1YSnpSoLRY9ux0j5SxyPsI4CBMI2TfFcXdbT95bTzRCYh1b74Az13UL64GuZ0dE0wVxInJ9QISeN5r6fQF+mLmIsKqZnzO46R+01pTLUnZDPN1h1sOP47jdbsfeu1sSTAAgKXkQPrFXr1c4LuzslAwWbMgD/RK5G7PQaClMYRXVImYIJ+tOxjRq3wziCAILCSMKkzAXYeaRTlAGAwqqRZ62ei36dN2q6tOl1qpauIgMpJlSRBTR63b58vJ6uVxa34m3JNVIxKCILNCvQAAo4ziOVL611lq1tUZDTBNSy2bs7mhhCA8Cy3wIFkHCUNUSkOZUPKR/eX9+fXkqH83dKL2xPsYPHLemNbSOKCpth6qylG+//Pn5+bmoNvMYsxB1NUEtNSBzDixOOQKcbNsSewAJhhvSosKnrok0DBjK3yjIZ7VwKJQ7b5Odj58bTjmXs9r617c4oTFPov6wlPDo7cTJqT2vhfVXfiQtWNeTGguna6MZ4H26pHO8h+EhjUvlOU76x/0Tc76SZ0J6t+J5OkZah3xrhr7+at22bXt9fWVmIhzHsQ8K5eGUlFLczVrPOSUR1HtvjFK4lIuEJVwn4Z2JpY8wZWbm79+/e++/+93vtiJpbJhRVNNGCkYqvmrpffRFLwXYe49JT8KTGqr3/vb29uXrK+DwcHjEGLARQaVwu/XUe8/XeHp6+vXtPcxTV1eVy+Wy937cPsystQaiYMnlkAGhEv/+p5//+X/6j2YG6qXWAH//eM/wD8CYjtP9Lp/nqIam7ZkPHpNfcVimhIH53chlUfF840u6psEKYDY/uMPMB+3zgw4fiwsshTEDSKe1Lqh5qFY7bBnUpH0/L7QZqQ4VT6Q0AMmYRneQtZxNsk86fjsxg68lwI/80ssQ0L8s/5lIWiuI75XDgdDDo8I5HyESGE8jQHV3wDL3lP+c8b8TtVSfpZQMCJn1drv13kvZCsvBu6o+Pz+VmXJ1934kNp+UAYARBC/EpRQh924RsakWESJSplCVih4ISm6M4X5RIBk4lPgVfLnUr1+//vz1p8tWhFBVrlvpx+7RgdCS8wxtcwXQLpJOm2STj/v1Ulvvv3x/88Cthx4u3bsHSMG8f7QeBosIK3zJQabuBsoJRui9g1mZ4H4ch1JdonUcx/fvgzFYRCAEy1F1M/oCkPMw8hXMVosh0vmuM17yMYd68I0+Iiw+/c6TCpWmos49Um7oNNTq/MVla+g0FwcTMkojfLrDL+PHYOkuiiBypsF6DwzXbj0ZWu4j7sdGOoGPJmhkZPwHF+iHjX4rA57a/9N6iXsP1cnsnrznzGfGwucOb3xEq+8ft+/fvx/HQVKFpPdMx7gQEQUDtdbnp8u1bozMpadvRkQCjLg9PQdlvV6vr8/X62UrpRSW5220k0SEG5y8Oanq9Xpte4vY3T2KqCikQ3r3BngEm1m3EC8MZREf5Pk9vDOzEicmxlR63/e+M2sQt8Peb7f32+379/fMdtHslcjWjJ9fVUS+fPnyu9/9rkptrdF93DAREWcToOTIDSuq5hbTXalz27btYx/izSyFWIg1qLMJgd3/63//7/8P/9m/paNdtmsPckpSqKR2QPK+5Mt4fOtLEvLz34gP5yvOBMX4hHJO8SN30PxCYI6tX7Gfh2eyfUlWlsTlnH8nT51PPjiK43FGNCYpy3JshtD6mAYCjzitlLxWnvNvMInWx7emJJ8dd58tGGcrMI3t3T6uFfHJH6M5bmR9ssI3Sq817qspImxWPh/dyHuMqr33Zj3TVfMUTDpE/GjtOA4BqbJIvR+aSU6ddamSkrTgfLnpfCTzXgYGrfXT8r7bRXd38js2fXSy0ic/9fx0IqKHt2YZpmYmhmYW33LSoFuYZUY/1206st2NJtY8+X6IiEgQPS+JwCwkIBrMYQ/qO1Ottda9HT67LzAalyOCmIctH4fNItXtdrvdjuNY2YwYTtR9Wy7L+jkOwinr40VmE+ddCgGR4dAkB10EeTcDuXXAGcZizJ7VPwYpSEkYLsRVWDVqCRGRwvlDhYRwLfW6lap62cqmcrlsW9V0i7Noth9HKeXl+lRrLUL9uCHi+XLN+SqqWlSWcx/mFKjKytdaNd/ReC8jwxbu8MEhHU6gCEFYUICyPYBCAiA4MzSIGCFmJM9Ply/Pz/Lnb4VBLN2IQHO691QKRAkhYFISzXxVloBSZfps8FjLYb0CcAQsHcElumCi3nvvWQlfqkEmu0wmSuahZqNO21NcGRwjWSj0yEBAp4KhPU4wyxXCLP6YTv7Xt4jAby2l397zwQx/jgbPP9du521Fh0uD2xxsPeV5rEeLYYpi9B+OXImWhzaViPDo7gGmoJUaiNmf5t2Ngs2M745CNnp1976On1PX9v3j5eUlAx7MfsgYFeNu1hPRRCAKIx/sNSo0Eqi1lFJEVQhHC7POXLz3bdt++vKS76tupajWqiJCHk4kSqpDKuABvwtYHj8mqUA+mdbax8eHmVUVYg73zIilshSAISIgLrX6T1++fnzc3m8HkJP7oCLMlFl5cnP3Yagtk9aFiL58+fL1p59++eWXfd+3qR5ba+/v79dLnRnrEaOGsDmIhJmTRMHM3C2DW2ZGMIbXzQDIQT6nhmD0tH56p3NpcIyBq3kKmmr5xAR+MnurdB8RwiykFMZskRj6IWlhZjQC+0hAh0gOIvPJ5jdH1IomZjlOU8iJODhgWLudr3mlYz8tHJ/YTtzl8D48CndcwANkJk5GOj9fiIN12KEQBmSUp6vEswUmZ0+PC1jtPdkFdzt2JnOgNWvm4VQrpf9XWFBKqvXL5WIfbzn/0SKQRQyGqpRRxncBQC4qUoqIPJfSrPfWjXm7bLZtt6PdjqNcqplFJ0RweDJgVy0/1+3Ly+tPP/30fN0YBLdS5VK0M8I1wjJwnZ0g9LQ9uftEi3R3EmLVGu5GfDVcWn+79f0wYyaiA3y0tnvPFEBwLD1ctYwyVD7kiN576Ohw9iTegGd9tfe++KUidfSczn6P35gp7hS1vniDmBPgdX7vv7k9qF+aHbOL8JMIzHm1CYA6i9yDezB96MjBxnwvpo3NBxnPWZM/liLpFJTd1bvMh0CnJM45wZdpy4QaIYLcmQeJ5vke07eVkwLIv/aZe83D0YTkxA92LbXBvNMf7315+SMaHOBq0uZmEaRFXCwIyZViDs6emSjKl62KUFhzcQDR89piGeSkIqxaLpet1lq0qooQt333ZBHHmBvRWrMerbfuZuEBBoGYRZWY2QZXYiqr6BxFOFCFPcekMrk7O/KRkAaBPaK77x/7t7f3b7/8+v6RJ3EbnHlBzNfr9XK57G83M7tenr///fvf//GPWSbJEYiecb6waMJMnJwz78zMiOHZyhxvqNblOIBQ4cqFCeIoT+Ifx+//+Hd/fHr5n/7f/9/vf/3lv/q//9/065ceFpn5SSgvOcMJv4k7ui+KUT/87IPck2jJoXWShR/4RU+MpoEAjJNclHxVj0DO468hXKbwOPHom80JOIhIyqjcYYx9xkCCLK8Gy15HLGZRotGSy6N5NdfGCAHADDM6laNWsNv6vUf99Mt9fS2bcpb29ftaymufXHrLZok8sPJGjFGKvfu9IZ+IeeymMVET7t7DJYhpMrhgMHThRC1gEX4KYpY9I2HGPZF5X/DuzUymFcciZys5YHB4IWubzVHm7gaoqgacaMy2Pp00j5PRIIDjOFhQ58x0maQ456cpk0UmPWye2L/eB0mAraEOwKgSDPG7nzEf3CJdoMmcwcw9/Hhr421BggfpPHNprR0DeUWiQiTpe9wOmw7QveIUP0Dy1j8VvMBanGnTiHAXVYS5x4jWCZGE+3CmYInCUKGiUoQYVEBCpMQqXISKUN2kiEC4qNRaN9WielGpWqowcVTVrcpWNA1ikh/WIskEMAP+xsRXra3vzCQMzkniyFLqePLCRCCBRAQzEYJYAo4IEQ7OZm81GGtYhAAW5OmgAQzOOTIBCoYROdHrtb6+PF3Le9ihUiKoOStxzMmQvYFhksBUHgT2Ty/PianLAeLHcSDGEMIlZmtFLQEYRg6jOfihcn5qrDezZK/h2YbU6V5DEBEWATEhSimi1HuIUNagVLV3X9DTUZJyEyVVVWEVcXc7DvzWdlYWOBUJ6WTLH6Rr7v/jX3/c+ezFrq/Mkkss1bl2yQuhGLJLJ6oe9JXVftCA6+AsSI4JZlaV1kcAmWfMHkqafl6amnvyWOCt+wl3muv9OA7MChgz86wyEUeEMwQAAzbLPinYm9S0lCJiEezOTN69t53gddOfv75u2/b9+zeKuGzXkjV38+6NQYm+2/d93/fwQRuoLNu2PT095ZXkI1LV69OFPgjA7f2jCNcisdhrBwi0B1hYjt7h/vry8vb2/vHxTwgzb2SIMJlZDmZGRHTzQIt9yqzVWr9+/fr29vbt+y+G2C7XwnKL9vHxtlVNrwUgZRailuxq9FC5XVImy6WbL3+8oFnYx7ncTXdRISIm9oFCHTmRFA5i9shqA3Aa0buaIqw7B1F262HwSPsgcemO0Gkveu/uXUTMWgaNZrZm5WWT/1nh5oMjlhytjpMxjumgLxOwoCI+w4D1lZjR4/KwYwrV/Pmg9tcFZMJoGSyfhSD/AUSUR8iA4bi1iFgV5oTJuXvrDnQH2mG9e3fs+17KVkUjgomylH3cPvrbdxFSEQCFqWjZalVlb53gREL5VplrLbXWV9GPvd2we4CEAqikVyZm7jM4oBBlbFu91O33V3z9+vK7ry+liJtxqKrUImbi3gOWF2wxvZmi7h4GM3OhMLecxGsdLB1SjsbMRdrRPQhbqelFGCHHfKhq5paYORH7cYrne+8DW9RaoFXl4zj+8pe//PGPv3+Rp4TlL8fDsnI4A8J7QEW0yI3x4Njdy4OftpUvXhI19ifG7NAeKNf8esaWj8jJH7fBQTt3GD6PR0wSxTEeI2JBEpZflHHkTJhm7TkByffr/F+0DvfdmCgeyBKJKGhMWVo7f+KHv+c7zyHfWn2n2O/kI80w8cFlmjhbcC2Xp5fn6/V6dNv3Pljnw9ydzVmiCDGD0M9js62HJc/n9BKrlm3bikhSg7lx54DwyOE7TUhFi0CmGhEMSfw5EYtAuFn6CwHjCOJgplJEpIo0wEEU++5kAmLhX/f36/W5Sv3l+/d//NM///Vv3yO4bFc73Jx6S7GTKlXr03Z9RRzW+7dv37x3b/7HP/7x+emJKFprgDMAiDCSrIGEbq1xoIrmnT7IjHmYOwjmAUtVVbWWTf/+py/vf/7n/8+f/3z56fWw/l//t/+NvDxnMzmNLJ4DIb8xh5AGN8xJZpfYALhHKfddsjkwA798xT5F3X2GiAEHcpRLzj8Mmufi9DvuX0WE8QzJ0qQSUU5OXlZmXK6MEWY0WwCWoMVMLZ2vPyJH03pM8nBmrOzdWgnr5lfmbskzEQF8B6k9/nWZkvnXmZeZWJKY1cX8XCfuEsv+zrxMvoux1iaMa0w8A9MaCZUhZu89ySFrrRE2HXqbCxY4uYCOHNvwoArz592qnf40LJw8hI55qIzLLcL7zD1PakE8wmTTDIsScvCC9zBy4rTTTy+v3Q5zjsdqZA+f3OITWYE8MFk4uYRngwqHE+j09OcIrES33htg8siE5OfI+yVh9KmSOCKilI35PZiSOTvCV3/LerD4YVtPeH1S9JSx8zu8yt0RiHDhVGEkFBFRmZhJ1VVJBUUo62JVuRAVZhUSoq3wVlEKB0iVrpW3olvRmqEJszKXIpdaVBVz1gERilRlgbmjVxZice+97cIJnSEJ4ggRdg9HcM6H8bCe6pSF0wAggiEzUUhBcASUuAAdZBHdAShRCIEis/zZqc4ger1evj5fXrZt/7iZGQXCez8I5N29Xmqa/2CoVJCjd3Oo1vfj3bttygFCRNYo7rJ9qq3bnFAyXvGnhCsSQTDeTi6iIotboucsGmYupayxM0GxLFoSSyRwt5RyHEdm0CSt+CCnNvekiv5f3s4qA6ciYUSsz0+r9TGTdPI7cdfXn8E8j+dKwryHLX2UqmX5yuupJqJiHTNtfi5SszvdORFlH1Ep5e24ufvy3mn6dqoKXprEDOGTNXupi1JKBmmeLPjzbSZRNHHy5pOICMG9h3daxV43nmDOiGitee+N2S0iwqy9XJ+eni69H94tZ2wyM8P3vnu31RcehPwKRmYqKO7Td3rv2cVQtZh2Ivr+/dullqIXudPJMhHt+85aidha70e7FH15fvrzn8XMrPWk5GeB8GjOoaHbyW2YvX3fL5eLFs5Y4na7sahIUeV9D0/Mm3eEJl8uwGYdRJFYm/nwl3VcNNw5tSHxzflwsVoszMysR/dTzwwmgeIq0S/pyrnBOdzu5AjKsONm0Y2rwJ3pjrGhiQJgZj8xVJk3kQlJGKYkfV5DKCiI0ql4gLrcY9RTS+SnhU/38Ozz8sEp3osZHy7f4lOAd15TJ08d9ywSCxERJOKeEAGQQMfWursneCwPq6rdjSzcY5FFMbB/fDBgFBToEWnfe+9X78JK7sJQ0UuVqsyMIFYSVRXiTHJlcqR2q9f6tElv3qx3x6UKX+rRmjG7IwMAJb5e6tPT9e9f9cuXl9fnS0T0luyFoOjCCW4qDnh0jzGLwiXDdU5ijyFFoAgLUgdlq7wy3Y5mjl97VwnWzQguRJN4chXrlkgACNzHpeSzSprfX3755e3t7XKtdavjLa9QRAadIiIeWGFWVneVbnmlpx62k1v44PmsgOcsTlmBZJH4YSA1ftgEA8h9F79U94ux5vT1s5jhB1Rn5MJw936vNqz9+ZG8PfVzyqefMiAZBK1ZTWMtTFcqL0DKRA/lVU0/7dMTG18PrOadOdc3ln893sWpbZKISOXydH1+ftZSLDIZNH13N8BVuRQuEowIGKkShCOCzR0CcPYIiCaWUkf3aTZ7mpXNuvXe931/f//+/v6eXbt0mk21FjgRCcNHNjBEoxS9XOrTdQMTCzx67/04CEmIqly8RMT7/v7x8ZEM+bVeL08vKruZt+4RBKai2/V6vV6uX55+33u/vd0+Pj7+4R/+wXr/4x//+PJ0ba0RRREGjKkkHTqzwBzMRKFCbTEmTEWHnFJNTESl6KalVPlPfv+H3z0/v//jn57K9vbLL/8v0H/5f/yvLy9PIHd0R7Z5OcEZHrgnBAn0m/HhozAPPyQiQKN5/vRXz7A2Z/KuT/L9Z20wL9vdQZ5ERec8Apw8uwJWcGWZxXvI3dBqSZ0DDJZx4RhJfwbFGFJLY1rRrLO7u8/oIE89vYxM6MyowV1P+LWz/qfH8O9xrT38sra7zKdjmdHKaGAa6i73MUIZliULg3PSQIQSBqvBREKbrSl2uoZSoIeTU8BnCXLkLN09JT+hcXdlxASm8Ehd7KeZvCu7z8znoGgogqxPEqeC0elw9N5VNcP1cf/DVxvgzPSofJI3fhpJ6XP4YyeUUrioEgM0x/3ds3TnmJOIcCp7DkrHWZYkmQwoGI5Oa03mJA8kcUImTlmvl0spYzaue0tvVbhkCTsixgDGUyo6z3WWDyKKcNxdQwIwyP2Sj4iFJbthnMHEYI6iVKpuVYpEcofC41qkiChLIQhDlS5VSiERVZZa+VK4FtlUVVmYEyRXa44fHJO783EpsXt3NxvsYBTmIqppHd3IMwNHHkgWejODdRAJszAiPCITTMtykKdCiEYkAvaApHXL8e/ZIx9hBGGqTJdNXy71Dz///Pb9+9utMSitliOyU2BIQlB34z5iSouRqXX3CKT+Dh/B9tlwzj6uAGeJkz69LExuoSXM7iNJknstbUtTkDy6I8BjDbKM1kqc7HSalp4jBHpPfnMZTdK/0Zvxr2/rds4OQUwgwSeX9LzDWiBnh+MssfktJmYaAwYw88FEIxO2vh4zGhxPmDwvAIEI65ar73zG4Zcw67pCkcIjFeUysxRnA7y8vTwaTyopZsoUTCll20r23WZ7Q3gUYSHKMtwgXwVkLvbmhgDcsn6oLNtWyONyuYjS/vEOcuUaYZRz9zzbu7kIAxBVVzUzxxiSGREq8vz0lEQyEcYs27YNLdQ7cWRxGBjgDOFC6O7OvGIkvtbt+fpkbfeS+Wy+1O1jb+bRj1bThxMd5YHeb7dbQqOfn5+/7Me39xzGSLVWN+u99V5670aLxIVUBI9NC2mxlht6ZxsDGJT4iHyFd729BGAm5O+fI4iYKFlPfObjMpl7D+/TuRl2Lsl1zIgG2w0llTJm0OgtKHIoRk8cOU3f7NStFMO5jOELZwwL46CcDzZnFY2uwtYSSnSfH3jS0/f19UmHnNdX3nvvxrNj9mzsP2HIl5EC7r410Z3Bb9/3kSGdbdvjWyQEIXIggaY5dYAAtNbyBXm3sJY3cq2cAH8hFKVrLYXZ3UtVAalQKZol9DIc3nyz2mpvXXo4kbBIa2SdAeQ4ZiG+Xq/Pz89/+KIvL8+1FmvtANPo00yadc2OtObi7iRcShae87FIKoFRMIvnpE4s3QuLCm9Feu+/WARgRImNMQp3cycW6b1jlvgigolrKZd6yedca70+lculunt2drg7mHNK5gzu1yzQod/PbkMWE3JVjypijFRffIr3Zgw5ZMUHMwyI4IEz0Cs/JFq9RmfFi1OKnOegofvy/KGmR0Qw/83juDvTQ61mGKgf5tzGY8A2rcj4b+0/VxkFxofLatgMC9JM0j14zsX3OR25roAmlG4e+zGoBjIlS6c/g0UEBLFhU5iZWmtMEW7KdN3KZSvKLAQuIlxW9Z4m89wCD8PckmjGRj7rn3uCPj4+Pj5uH2/7vgtFKVq1sKBIsoUlwJSJSHNpc5BTKVqL5FRbwIXQe6+6H8qECiJmeXp5vt2O94/9yzO9Pr2SbExCJFquZtF9eoMEEVGpF/5uZrfX2y+//PL+9vbt2zdVpfC0CLPaOV7Q0DwToimTGCJ/r+pFFGGK2ASvT9eXp6u+8h+/vl6ZpPWNy/Fxa798025pvA2eyjMT+oQ7J+KUkgfhneLzIGPpD2RyPld/JBsijRrgigBx+p3XwHryiNGuELhT4g3JCcYJgT+XodNEvkTEiazE3Sa+5v4tAsAiK5TNA2FpiemzzaL+aEM93/yyCz7JwB4F+44ji9OG33D5xp8GghLACbECjLQsAOIg8EpuBtMaYR1B812ERiQlj7CKSHGPZAkY8aF1R0a6waKqOhKuMYPOiIw7/LRNwAHRZMvJYImIRElI0q0Z0qnCOKFGQQCHSEQIYU1BPPdVj+c7cnLR+0FE23YlonbLXpGSvfWfvmVmnQkLgArcCzWzl6mg9Ohr/yLCdw0zwm5yZx59iUhcq46RnRQEJoIQ9ZS3BTnIMybJAjGp1Fpr90ke4JwrYcllPLCVjGxB320aIF6ynNevLFWZyBHG7iyhJEWlFLlsdN20FhJyuLHHVkphKSxKEEZVqlVrkefKPPo9eCtSilQtSZsxp69xJghyIRSm66UCtbUmGeAJ11qt9bzVAJwCQohAWFZdmEIYzFSYEN57n7n6YdjCzHuzdlD0IAmWkczItg0OciN3ImQFUokr0yb8n/zu65//+R9bawQOYift4UQYnaZEMeL5AA0s9FavHr3tB/qo/X7st63UMyPWeYGdTexdtCZJVyZHlskc0zXhIpqOms9BcBGRw2yZRZW3bXP3fd9ba4nWc/e0PnnhJ+1ggP6Ye/7XtziVB8+3EPf07W8ckGax9H6QHwqDZw8VxEE0q31DQ01M/Wf/+CTqzmPKHJl5zoFkphnOdXe33s1MaiEWRwxcIYFBfdGKYF0JmNM1lXzmAMBkZr0fWX3N3sLMeHj0GcBHVc1SVnQDggIw5zroWLqAhQnk1imiVnq+bq01LUyB4zhqrUllkVciIkBcak021DZvX7JJQbBtG8Dbtr2+fFFVd2eVCEp5ePv+t3QpotMYmEbERKWU5kHgIqTUDSiiz9fLL29v3k22IC01vAg1673tWgsBQpx1FjNr++FPHhHX58uX+HL4wCmoqqoMTjwzIwpzBrEyQc15ZRzXO12F8YhkdngIbCJCIgjG/LmwdhIh3B3ruc84IDNA3e4sRzMHP4jUALjBNJiFSYPu1LgLo661iIgBrY0ludJtEWuo/fA3pojmbXEakQV1ycvICau5ouPkIq8g85MtX99dOyxzybPZ47ya8qTn3FDeu3lTVTDPrPRYZ8krKz1uuy0lk/xAiWUxs27Rezcbq68fDR61SHTb970oPz8/czQOMLGwXKs8VS1MZlAh70bmTFFKUU7m/7hes9c0ROrThR3Re+9u11q9D+OYYefz9fLycn15oqeLqvKBhPqO8Zg52DbLPkfvzTozqYqb5TSFlAV3T3zvy/O1WxzmzCbDhKE3+QlyuB1uB2BZIHC3nry+6PDU0SRUtro9XStqay16bKVcr1dhpK8yKlGpfyKngNKdDWLKKoAEA0TvOfYBc0ZIdhL+qE8xw62zRwGAIjI4Py8Qv2OyHlbNUqESE7JBOEEkVsbhTlU11pSPvHw2UOHkTREziOle1gDmRNZ1AcMTAzHf2WRi9HrloR5qGsNLnJHzyKnQyP9FoLfGU9dj1Ibu1nYcfy2BlYcSmr1qc2m05nSipF7/jab9tu+7u6vWIOp2JD8FM0rRUoQFoqEsPgNUIlkrmDz6fpCHtz4UFDwpIf5///H70fa2H70fYU2Fnp+21yd5v30Xggo9XQrjqQoIyu4i14CJU/BEo4iIUK1XImqtHbU+O2VihYg+nIrUl6vEBiIhyRIOl626gZnpxFoHgAO99+ulPj89/e1vf3v//nbcPj4+6uvzVaTUWmsREcrOFFEuAe/Dp0lrWFjG1F8R5R4gClfh5618fX7SZ8JxiPCXqvb+3Y7j969ftCgQ6eHN/5zyk7i3GEwM50ORMEakNDMCsOmlZDToERnmxdzJR+YOAJzvYaEvsCjIFUo0zjXw0kjpJJ9KmAdiP4h0ra4cA7Igx8IMEaQJIxLcZ5Au9wU/WLQhpSwx854iWc7IL91LUNO9n7ZvtCCK327rOGen65wi5FktQ9bAUgam15H/dHjC7uTUu5ENBSktONkX5GB6MzuOI+MWM7roFUCWio6bH8eRcPKj982su/feB1kQkaeT1Ht20wahRIWP7nxVZWuXyyU7ImqtHv3orXBZ0+GJyD2SpaS1dsSR05NzDiZlbGCmtZhZtAYMPiURiRhTlc+5OmQR4NRzglm0vVwuVYv7qEmTyqXUJD5tzbJe391ZUEpJgm9mThRWPkfRAbJNY5D7X6UACEKzXqgex6ECp4yciYRJJR19BDOzTefm/f09KCd/sIwZymOAgczWzZX8ytuRUc1PIDzcXZi3WrxbVVaBgJhERZQREbVoKVKrioRZB8XTVp8v19v7x2UrT5erEocfQrhc9brV4m+1SK1lAIZFapGUtm1TlZyaOOKE3jvX0vZdZlIC7qIqkYRQznPCR98PZt60tOYAmDIPPWxM2BjEmaKYj3qrlQDsFhROpETKmgBgb42ZPXr0IOLC0q0j+vNTfb7Rf/rH37XWvn3sSnqznYgZYmYu6E5MIVKyG+T6/JJDLwlyuTyhOHrjOQMghUdV0zshomY9r9DnsOmZxBiL9oGnhImLUu+1KlDzi5sWUdpoS/u0XS6s5ehjBkNr7WnbUpJtULAW83a0WybRi9DlciFCa41mrcAnj4WZuUNURwH/rnKHzokTueVZc6UknxjA7h4ATriFcyB3Mpx3bRIRBs/xILfbbdOchBtaC0VoZl6JWCWL6pwtPUmVl5FO+gluZlZKFaXL5XJ8vGdd1Mz2o4Voumvp5xbRrdSI3dxE2SevhrtnJSQiSpHeeztaKeLuz8/PAS+lXK/X1DaX6+Xt7c3dL7Watarl2D9U6efXnxGJLNrc/TgOgiVhizKLyOWyRcT1enX3j4+PbSvZsrRtWz4uVV2XkQUmVe3NReRyuUTEvjeBQFiUkt4tb+26XYjo65cnVU2wsYiYdXe0tkNUKFrvrRkzh0V+5X2/ifC2bVIKhF+aBd32PpjrWu8k2lp7enpS1X40wL99+1Yv14yvLtftr3/96+VyMbN9v13b7oS6KQuVUmxvfZKO8qRKT3GVMiCvABjwCHcvIvtxpPWU8KB7OWVFlb333oy4iqhbmFneLwVKLbCYwplpRDBnDzMlipgh4WEIjni/fdTLFoHW+mEDWcrMx3Hsx0fdEmCSUzeQBugcyEUMKEwSFRCNrENG8pkOoNOAIj6xRi1bZnbvGV7Wd1momO1hub+qrvGDfQySGoTvmT/FKX2zTnF2CIAIeCqHt7c3cLlcLpfLJXOU87vk7t1tghjC3RlBQkzhrXs6spfr8+V6GThq31SrirILixAE4CrMRQlFqZSylZrjKBLSCKCHkXst9KQXM5NLMvESB1T1ernUKtcnBVnrHRS1SqYYk+08TfzeW7mIBnczULD1ImU4CSwitTd/u30gyK0riVwqM1hICN/7G4M34cQUdxATjJnZ4VQ2jYgMkrWqhb/vt6iv27aJcHopRz/++PvfJ+MUM8dxmJlWQQSYwwMiSUSbqckBaM6wMD3hfJvHISKkiozbAYzS/JyIvehkmeVUguA5IH4o4dlD+CkgXP8Mm3+lAUVLOXEzmh2qD/q8Vpi5Wfjj/IkIt8EzEfNi1tJYVzuS4BR9QjcHAFsmHefE52PWEplZitqc2HbXGBkJLQxJSjvC5xHumK9ZYAxz0jtGfTzVvDBB1vzHY8liDhEK997f39+P4yZSArAMQd2yCyqT0ao5ss+It4wiRAhg7x5h5sHM+/FRa32uT/u+/+M//uOf//zX1tr/+H6ppahegqUdfhF5/fqH//Tf/N3t+y8f3/92e/v15q0KKgc6AeDKSszCojIsSKkvT0/5iPtxvL/fCC4gYWJWN9m9DXfPPfpOUkqt1A5mUuYiKAVBk3+uRTaib0X/8Lufj5fn6EYU27Zdt+2yFRFSYuJghLe+bVfjDnOftZOMYcxbmAmxkXPEUy1fn5+uVaIf16pXVT2ciZ4v2/PTBXCYuQIjL5HB4SimxojwlyhOJTY89rQYcfJJnAIEAq9KYHqK3TAhHkhcaCCZ87NKNzl9CQCNvBchFvMpaNCiIMJTx87R36lvmZkRiVpNYTbrvEpfwEoxMEvWiM/RLTPbmFuYtzw6zDOFl7zZMdFYQ+37nX7CwqPnW7hTIXo28SU4wgeE9WQafC3DrENkaubeUrgY0ZgBCJCOqKoyad4hAJFBcqGc9BQiPjFyZtZpZLDSrtCyfxlT00TOzpTPuCW+l/5Xi931es1G/zRyidhN3v2YLSUJEPKchEBMzjZvW4iRVezBbCk2eWDzDHECuMcJmERz+1T16G5klCfvvTcR8lilCyBZzzlmda7HRLqmVreckUW+Rsllut36eAgOgjBzljfNkUZoPz5y6BmzOoxzMBoNQOrUpQGAScGfkaLr+ik8WU855Z/GVQmFEFUhIVeOIlxVGPF6vYhSKVwLMTEjRIjCny41mWCUAa/KUWsppbzoVVlUE+QgyqwEDmfWAhYGmHPFNTem4Jwdlil5AMxG0eFFdK7uwakVRGG8t4+MqFMDBJiSOASGECaqpWDW3ACIchALkREjOY3cs8MMLCQpDaQem3CU8tOVfrmW5ws3VzgF62BGJEREto2rBrMGxXEc+7GTB4Mkpxo6VHglkBiav2c+aFAoLoG/q6sHL80QPvl1n8oDyxPIAQE889QYWf9ZjTFfEwtTcn22Wi0TGxEiyc2eUroEB8wjbZufT72zqnU0e0kecBqxMilnMTs5Cp8k8NMn57v7Da9lLsMMipT44c9EHv18hJi56oUsWN525gWJOSJ8cj/yqYZJkZD+ob573LtA59liLsbbqEhgoilWZ5dbGIK7EleVUor1cBujd0illFK0FOVRPBcutT5daipfIU5fGdPKwQOgII7MQzkxJCsseUEZctB0kfiOY3QiHM0izF0aRUR0a60382DY0lHp9ilDC19rKapwZ6Aw1arauLt5t6BwD1WSnClPfi5TvLy8ZGxZa3X3ImQHeu+YkzNTRAk1oWWcqyFmLdQA4mxA8LzVTFcViQhGQh/uMvPx8ZGFPmbuzawfqZvBz0NE7zrxc+F6CqcQASNZNGKqYRSFiSImZzxxMHO23WbrRBaKzS5AlFJFq1mEM8DBAcjysQmfDUrKicyXhVUGiYiIjOg+qXR/RAStWzinLM/LzU8ElXQCji55psmvQKsFA6GqYD1a2/c9w8tSCnchMhImGxz6mYCLCBWpogQ3UkEipWHtJiJFalVWdiEU5SoFEUlhnnZtK7xtoqqN2GOgGc2sjbZbFvcqYxYusxbRbau1VqcuIkxjoqZM1hwiVmWDcbZmhZt3ABcmZSKi7hTmBoAGa87IpgfTmGkBCr/06LlgHIbIGQIAsoM9y7L5SCmYYsTeHnQcx3YZxZbBNcWc/EVQzkhvvZRVm6JZQTtLZ97UQI1OIui7gzFlIBbe7LdQG+Ngn/75Yy1ipf18NVMNSOqDZvZJMe19aMvM0kyJYpbTiG3KDuIIVGLEvS6Rz4eEMOcoJqm5r5iNOZjWOIq1CtxdTsp/6S6WuVLymgmgGaDOvB6dQKR4tDjjmZAnSiE+Jy7djqMU2Y8PdxfG0f1S6vf9m8OzQKeFk8wiwmIQWI2NV4XULcKF1Vr/p1//6du3b3/7299+/f5+HMcH/5tb70JdyKrUcr3o5VlEv3z5qbArhaIXFoInpHgyRQcbBRHHaFVQpihyuVxenxsRjafrVEg9Hczkfezd3d+A7ZIJ6+DCqiwDuhW9az63WqpeN8YzD+JQ3UYLPVIfDslVDXPACeLhNJPCl8vlrX1Ev3HEy/Xp59eXa9Foh5BdWDZV7B+3261X/fkPv8eXV4xy3Kx/wYCOKe75IGPkqIOggYSAnuU5AGTwP4eZjPUSMAoPyqVEk13G19yIU4UwuwboPr47l0P4zNcMHCcAwb2INFw+QrI6jaXuETR0u5mRhyfIJZCZHT5BQiKCQKqKCbaiE/0EzZk0A1EzcyJxwpKczQSfphBjZQPBMUc5xOO2AkiaX4+Zj87PbS1YMzcYIniYAzopKE386LjujANBFoP2OsOwyCCEBwkseBQnlmdMJ8bOWUAbF3079lX2SS8kpvd6XtUxY84C6Zy8K4N3MF+Z5UDVeelH7wr07r0fREGU6xnrqhYCJ5YSCSaIWWceKKChklYwCSHKzkZacMHMWJ9tf8qBT365Pj3XPGnfPe3T4vU69t7juO3t49gjIpvmEMgZ0Cw8nR9f/sHKWKR8f3pQLJQDvQAag9sR2UGrrIWiEKrEtZIyX1RYoha6bFpLETIhLizKUnWrqsokVESoFiqlPF+IiHL2RBEt004UHYMoiIf7xZ2EQaIrrUgBJjAosXMjb5C6NSG2zEU0C7xxQnJHJOnXsKbunnnciKhaPGDEANuCrLj7jM3C0b2HO4er0MtT+fJcX5/qrZs3WLiBSQZQqPU9S3NSiQMOymlaRSTMyQweLk5E3Y3jPrw4LV9q5CU8ZpbJnBSS8YaEeWY69n1/2Z7u9xj3xTxT48NHSeVONNhrF5R/KYVSKqZnSYmZOZn8mJotQzAfOY7PKZJsK8FvbWul/Pj5+RTrgPQQW54DyOxuvOd9iUhpZAByZ3dPXRmLxA/IgjqSBZuwGHQiLIc3pioU0WUnVJXdldmwQj7KceiZjnRC74dZXdDxfClvb2+LdYM5h+dBRNyNY9yjFL5cLpdrbTebnIdcyggIVXnbyrZtGn27lHLZdLSsopRStpqYAnJP/s2I6HDySHTAymFRjCDe3TkA73CNk25UXS3N5u7dskJ3f78x+nZcRDYtT5lspgBCWbZa637srbe+B9hBjCoiRdRntcS89d2fnp6O42itM/P7+6/P16t5O45DqlZRIrK2I3iF6HGyIuNaPTJI5Ozn9JkgjoiTATuL1gz1zcyAQNwp1zAaLDHCTkQ8jv8BkOxcNEXIzLKASUTM1JnhA3IpWeci1AoddEUYj7S3ACIkH2MWOMet8RgF8uNa4EX6evJBYwbnPy6f8+/ncJFOjVsxzYCZLfnEYzw5pwqteXFZWo9uPZ/At2/f/vSnP+UoEZu97jg4xgz6AYaiUduhcBKEshDCrZcqtdZr3VSVCQhnmLAUYQpQuBAJI/EgDBAHR4JXwUxsAUAU7pkx0VRlqrptZVONGORMzJwMWxFh3hKISBQkEFZGZB1YeixNaDYIC0iKu2efXXAocUigiFW+XuRo5hZGicMRyyc/+/RWPJbb8HN6eO/m9bqV4zhy3ItnSDRCAsfotX6I/e6i+CMtXATMzp7bknkQxSnIfxAPupMfng/mp0DlvAkJMDCZQ2anJl9ixtMXAcZsDDwK8yPqbbQnjNhv2Fxfyp+ZQUhwuy3+j5m1WZotY8Jl+zLYO6e/xzpdic51WfMF3fXzYisltslecw/IZwzzsL6Q4yIprCFo//hAGDG1fS91NDCOPMLkCctSDMHgPtKScEpWIs8uYtr3/U9/+tO3b98jom7KgmJq1gJea/35y/Mffv7y+59/fn6+kO2Fv7xcS2VUCUYkmdh00HOqC4CEI0Su915sr/Xao3c3RDhVugiBMfJxHbgdh0fve3d38yPImZGGiZl7XJiVSylKl0u5lEvm/d09y7Eg5zHmChHx0Y/eD4oQUlWt4Mvlcr1e9X3PjBfBL0Wvl1qU2kdXDjFjt977Ea1cn/7u3/0bXGqWrobfHDZfSmrUjNBivjKesE8OBCWgZNTlOHmGTwLpgAsixtCx/GSsEEYAFLAxbzBTxhmjnICpuaIelucgHh2o6fSZTu3K473EifGE/L6EMedwZhQEIiSLYawc6wCrz4eA3rvO0HHVsUTujYs/RoMxg8b1IYFtEtKsxeunAuZcy7Rcx6JDttfxk//3Ick4HiaYWY/jSADmTMNIRnuzxDepvTzSo7dHdqn1lO10yvN2u92Q+UuW5HrxCZmIRVMxb4yIkPB0c0z/e1lPMKl+ThS1iUEf3ljCEJjX8TGjweVHEhGS7TPVDTPfiyRCRJ4vAJxZRD0RVZ9eWOo4ArDaRWmWiXt4THfIrO/dMmuLkbsON3OCmRFsXuTnIsz6nYjWmHKiYBZmInBECFMQEKZERbAJKqNwPFW5blS11FpE6Lrp5apFIRgzgqtuAhZiZVIhVVZlFRbJ2dZaRDctIpKvp0yGIaaQDGIdwai10OqmoElwKvLx/r6eGDOrciJAysaz5XK8Mnf3blQouz5pglHdXUClaHcLJ4ev/oSsF01JSB+qA1Dip41/etafv1z3bsf3nbs5Q0RVudZKHvu+mwVyaKZ5KZJJHwSaOZmZaBKIfVp1KyD0Obs0Ht8UgDNMc+QO7uVrixgtCkva05mTOeu8d0/i5plYuRvFPHxEuHczcU4nx3EaWbOWY0T8mEo+L9v8//nDiIcphfEY/p293k/bJ4fmk3MhCRBJHZ8ZwAk4jgjMZuiIiN7STchLy7KPz7GQotz2jrkwU4TcjWLwGB1YvQH5WHxknD975J5DgTOC8tFRk9nofHwOhICEpTAri4voNqb0JVRhxbHMXLgMh5WQWCML5OT54fAnb3vaqlkSPD1Sgg/nfgX/DKS3HUDZ6pSlAALOSSWoUozTS+04jR7aqhIRRChAAs6xx8TWuhNnf8a4YDPr1tqRSLmn51egvr9/A/w4jiJiZq0dopdSRCiObqKSYcU0e5+jnfihIDYQktMNHKLFtG2biPTDZ5Mti4hwbbMP6CxsRGSn+SJn8pp81+7OlEQyCCeMcVDss9m0lCLCo2ADp+xmFwHczM2DWRGaSQOMRLdHBLl+dtlPSaJ1nWuZL+40PNpHPoFIf2PJzM+XDNwdjh+eMzNTZr0zLPQAvLsR0ce+/+Uvf/nTn/707du3fd97N1bhrkQ57YppLBgul0thIsDdwFJUmNn7UZ9KrWk1hImStAnwUi5wo+BFt0VhSdkXEczEYBLIYNlBxGhBz+YOVS1FVDlcNWfs0Apc7hoPyJZqAeDCYAo/8gmoKmsREbBKxPvHHhGOQFDAiIaDtam4dwlUUhHZAUqcWfLisqgq4CRzhJVH4r5utxuLV+VT46VlHJg1HSIKxKr4fVKCSxJoxirpnWUZ+/4GR935ntr+tIJW7fFf2j4r4RlA5t/OHbxDMuPhW/cip98vYBTiEu5HmUCRkRPvex7Ep8F1RJiXUjxdkJHfn6YKdzcpaAyhWeedzI0PIo2TnAeP4dOxjny+0wQmnB/yORpc904+C0WkRfv7cb3UqnJ0J6JuR93UOxgmOtjOHOn78njdKd7MTOhw783d2357e3t7f39nxtPT63a9RIQeXyKiKr88XX//9eWnr19er/VSgM5eGL4pOYWHdwoDQuVqloCIMfs6PHtPdHC9OCfvdGEGOFCUqaqk4+HuFh5hx3G0vrfGvR8g58nLUEsVkVLKpWgVVhmU7NlsTxwUJETKMqg4s8MCICalhDnrVtXMBKhVvRtTiLsiWPCiuoEpLAT6fP36n/7d7//dv4V30ADFUAQjlWykpp1VugzKRn0vAtnjN0t5BGBwhA5DCaLAGJWB+XlkspEmVHLhR9KdAO7ux8zdxEmWOLyf/nRfVICPgmUeZ6YyU6KE2HmKMTE4CFDM9HrEMuIjB5dEIksyT+vxrNjPQr4CFmZm1gVSOBcGMln4aeGcA5N82mvrvZPcuzPGDsQQrF5lkXJfgIBO80OrJWktyKQBdPcIZ4pscv1kw/J3P8WgK2BNt2mbPVEjbIsH8pyEdSVSbtC+wZOXbz3Ku1OYCBNmba2NQYIJm7w/1umpdKLt8THNq2WCMDt394X+gy03ZOTzT989FU88wFimK8Z4KxfJASCwOQzjbB6YCbDWeu8958lbDx+TA+4NYGf5WF4j7hm4+fqJRIiT+9gZROSGQFHeCj1VvZS4KJ4u+nrdLkUv22tRqpuWCkQza0qiIk9bDadM52c0WJWYWbjINM/Jt5HvWEAIkIcKZ6ZJChkLzfcic75C3sxl24jIva+RSpIYtZVxmaKcknO5PLnvZxFSqVW1Vkcna04xBm0TkbJaOyhbs6iTEREJBzNfIl5ftj/89PrR/a33mzuBtaonT0lRVW0eMAsKDkVJJz5kjBUZGfocKY7ZIzH12lw5k5V3PaiIcMKnVz/3ze7cB2LSCHMn4uDRS5x/dWZJFPiSW5oFJRHBxDv13kkUcAxi1M+1iDH0Y67WCCRd1Jnf5aytPi3q9c9z9PJJFH/8yl0h0Gd66fMDidMnS/JnYo3uoT75aMie/boR0cMKSQ6tdgM9TqM5X1jQ6JKfOWALH28to9AMCEGOSD8e7h7dACQUwswSercVzdB93WAEciiw1C3ArQ9AFE8aIZ67R8zyFgBgE/UxC+m+6vvRMolrZmYNkEwPMdAHP4oBCEJOGOUgKQqzJNjwQZHSAQiIVUnIEZ6zOxki1HqOKOUMpyLCWt+PvswYEZVSVh5kzMEDMSN70vy2s97t1pLt9cxpZjpWdpOZ6ZRYWe8o/5pc7a21xeMYiyb0zu2cvKNY9BhTSU5r7W7eyRM6Nci93B0xiAEfbCGw7ziOo7Xd3QAXJUQiDwmUdMQETJIMZkb5UcLnjSS4I28/faGH9O1aXEuf/1ak99mk5ucr0bn0SW7DYpGk7YhIHj9aVY4UPxGptRK8U3hBAtFTIWTT1vVy8Xa042BmLbyVWiQTQHueSJSKaFGuoiokQiQCDw4WRhXJyahBGaijDFY9J45USTL3GfaBRYilXsscI4mwsUwRtW6qbEzUGzP36AGnIKk1H4gUJZIgckeznh3Fy+MToLObCVNwgAKiTKIGeFgEhwxjl2rWM7AL6x0isl22ReNpc0bxEsjlBzCNoTEA6FQiZmao0idlOLq443OMN4UGn3JyP6RXltDG5BTAo8pNtZ7uyuhqIMIsWSBLMJ/Czrzs0WnwicOWsmpE51EB0xFJft1Uwilgi8U+s19ToB/ENeuEMQsAazHKzBNaksydSWXmMllZJ55ukrA8AMjvVaP7k0kW5Puj5ijK/+7f/dvrdfv+l4/npy8ftwZmLhzOWK8zr6poghTCe6IEiILDm/fjOL5///79+3eIfvny5eXL6+XypKq/pxdVvW71er1etlKVEIbWVLkZ9qO3diC8Fnl6erperzBubW8HuXdmyngszL2bd6OkMBfhIGYNQlCIipZLLuGUBGI+jsM8WaNahPGcplOuT5q5Ndac1YwwNwdAEAITIxGmQBgi7Q5FMMG6W+vnVBcFtlKeLvWy1eulQun3W33aLptWIrodx5d/80f83e/A4YTEBs3BYQ64w3iVB9crIiH4rCjmdu4hDIw5JYGB7/TsSV/2l8LnwsySV0QsYrD8LiyCyOe06RUX+FoXD3KzEuj5+2ksygjwViYl+3JjLOrsH8aEgOLe6T1GvYxlPZ03nwSka12fawZne4HPF5itUnOx353DB6DKtJh39WWTlmx8GJzlTR7kbXdW/NxhYPyZ0yTKb10lVv/gWUOtA2U0WGsFuawemDvY/g57ExEmqKpKWX/Nc68n0lvHBIDN+eccjIlEvZeVADgoffHhRMZ9fNynZ7qudn0es0rr7jHXwPnpL+0WEzixjiZENh+3mUkZ129mggmZSMeImDBKwNbD0URgFsGJbcO9y4ooggZH7Z1OJk/oqxNXOUSyeAKioOSDCyijqmyVXy/6dOGvl/L8VLdaClSESmWRoOAQVaZStq2UcGAMq2BNJLpQ4Y0ZRaSICgsT08i+jMHiQqwJWFQioj2MmTKxNN7+eK356pkmqUCMZst9CthYPBFh4csMmxmTbnWEQCqtp1MLibGSuai01iQHogUAz4n1BCj1p01fX7avt+1v7/Wt9SNYiCz6sEZa6Ei6ZiKl0cg06IiHx7lYE4e6muCZCAuUxX2+lkMeWTCk9KwdMAuk7rqEfGkEkCfwKqYnLRB3d/IltOPzUkUkRv5puQvscV9Bka2Dc53OKHG50YOc66R5T/mqk/bB43ZeMj9++OmTiPvQufvxA+kApNHKpsCkFraxEgfTRmSKei7DEZKJZrCEEahQRNgpFiLiCJtYuPvZST53rZgZ5uQAEWktMyrmPrhMtgjmIeH5LWG+XC593zHn1zPIQdk53LssbZtezkjkZI4pkPNdV0woo/MRjnvbt7snN0l681OngYiMHfAHymphAiwrYURgdrIpqPCIwgKVZmNsbuYj2OFBOZF2nbG3pHMckzlqvajq+/u7SNLeXplUS/ny8vr8/Pzt7Zac9Z+06FJfZ6U6Tk2kohGRs5zpxJS7oNdpFLqRuwcsfdEpu6lo79s6y5JeGrTPM3lxgvr7HNPava2IWjTtuC+uUWYQacz5tDMfEUQUeCiErrs+3+8ng6Iqn4Q//ymneVPng9CpsYpOXSV2giCe1jv5yYnPgD0xZ816RKjqtm1PT0/X6w3Ui8bRjxhzbgdHmhCn03lrR/RGAWUpQqrKiO3iIiJKqlqLFlUZ6CfPVKoSVKQI14EHLWYtIiRdenIKZyERKizClKWJHFfC7qVuOT0qGSeIQwb74CiJK4gYHGIBENW6jVVNEkRm0f1oh7EKJVJlUuopUEQIh0c3b3BQUbb1/s5ROsWYyjjaBa9Pm5zadxOvkYuCAUoYOYCT8/RJ9dGqFWCGfPmn2RYx4r3pn6y8/vkoY7fTglry8+MZz9/KaPBT5Em4pwk/7T+dnLFqlrmYf7pDvTQHLBERU0yiI8qJqSMhO9bj+MrpMscaoTERKGZAyMxOcRqsiqUxxrlPoHQiukd9RO6DQgJ0B4vGgKRSjnM4LzEcH8KXf//v/ou//+Pf/enP/50qA3Ecx/VSuidZemoaJhUWDcvskET0BJ1kq1TAWt8B/PT68rs//uH69Hy5XGq9FPNt27ZtIwL8aHvb94+2H63vyE5d5VpK3S6X6/P16anf9qRd8H4LTP8zIvV/5nEQ3N0iyN25EjNrEEkcx+z2JLperxGXlX1bAWHIUUVFShbyz9MUdM2jn9IlIug+AvE1Ooh527ZL3fD9o/V9uz49X6+vz9fny8ZeX1Wv22UrxQh7/9h+/oLKEErwsDjGkD1EUKpUDwy6ixiq1QdtywDnr5/nzZFY0FR3K4dMo6EUozdl4DopUlvIQvsD4Mjl7LMYS8zMIcOdXRISEWGjRDmM1zgFgOgWPFwaIiL3O15g+B73kQonRX0yhWvwhg8vd07evsdZdApPIoJoQIv9cbzCshcn5TA+tzXIfl5DwvJ3GzRv8ywjMKFRAcRamHmWRUR2VzQRM7UVZ30RuNff7tpqSdtKZwI5YXZ88eiHzyGE7p65gIeDq4jfS1LWKaP9LN5Oo5lkU7RCOBus0CIix3FERCmFWXxyuH3KxZ5vEIBP6Td3Avr0iujky04DPNRxXiH47vH/+Ch4ThyefYm0Aj5Vle4IBxMHBS+M/t2xyIefWpWZTwk85tWiGT0TFu6diIuQUGHYy6ZPl/p8rc9X+XItX562562ohDiBXCMK57VzFVXVIuIOWHKZQoVrBnQUIlJktO3nOtYc+Leg/AAmECvmHL+TNFOE7XtfPhARqyrI0ZyTUJZ1rhEnIuGyaNyZVPiu6N3beh1MlEo2xwxkriCljZnFw0FCIRKXqpdLfX6qv7bue7j3ZA2NCctu5m5GWtjN3WUgwUFEPgav3ZdGHt/8AUS6tqQlkFoqUXoSwGAyjJmhkVMP6rqG0bs7488hb6nixWiO0cuD3KGVESykki2INi9vxKVmLiR0GltPjxWGf2WjqfUeDOppja9f1s/zyhrvi8DTK1h75oOkBRMgAi/VOW6ZpkJYZRb6wf0qpVDvD4+ROLs8+NQwxrOrORtd1nW6ew5+PzM65ufZstt7T4qkiJC8Cx8oLNFBL2bZLYCICI/ebh8iUniWi4mZWUl778m2Lzw8wsxK3rqtWQU9QuZIjDWqJK/t/sq2u75OpZEnaq3NHn0QUfCUFpgogdg4GFFAqsbcqsIgjkGMlLHQ4HMuegGIaNu2y+Xy/fv3uulWt+frkwfptv3000+/+93v/vrt7Wh3x24lyD4lOJlWm/hMQ8x/5usWkYSxYDKwuUW3fKH3GtqDUPm98XIZtrOs0szHtdZ6rH4nXm73rN5j5KEkmwXG+g3bmQrQAUYwcVJuRJxM3lm/3S3dyXouaV+3wKdOy9XT/+nWMIHEPolk1jWv867fiYhnSth99BAyE7NkYH+5XF5fX5+enkR+SVKHlPNt21TrWEpJN+UWk7UrJZ8R9XIpJVS1avJ4MzPTgD6MNN/ATU8gTNXSKcxM8q8UifTR2biRYLYxlBKUTbQhAs+iak5P0t6PMIIHIxQCIR9dMzKL5WgWvTfrEYSEcuT7GAQcEd29sFB4dOsgsDhkvTI3m5k+MzdHpDC01o7jYKDUqqqXy+Wnn366XC6r6Cqx3uN0wX7YYkKFlwCMNPtjtLP2X3J1FoaISPYzTAmYDtm92f7TT54ZlLX/b17hOpHZHDsxrcbSlhlKER6rcJih5snvYmbDoK9YiBPPAtFpLZxu8LNB+STz6/aTVAajo3sSWa9meM4OGRBRPHJix/0NPSQrKcK9/f53P/2H/+v/+c9/+/inf/4GIPN9K+6lwSOnzGzWmRUEd2YK5oERqlpenp6fn/H1y8855j6zZpfj19IO9ve9t33fe/Mcu3Icrdb6/Pz88vo1ERalFBaNuGUZCazmB8btLs2WdsQ1u8WCpAy+U6JkRQ439HF/QkI8xuSuZsi/lCIyKOvHqgUAh7IwMQJh7km+O5syJtI4AOQSECUAAlIWJmptx6VupWxgBUWYXKri8vt/+28g1CgeQjoP4lUTNCKZuehcAkkNmq2DPF2FMZySiIAHixmwAeIdFcG7e7BOyLz6ZPNn6nDGqIhnhKJEAhxwpzX5c0oP+uRcmay2mNSZODtyU/3ydEJ41gw/r827lZxu/MkWrCs/V/79BGahxxBxHAGffbmzfUmdbI/KZxUz5hJ7SDDl1zNAYxYi1j7G9VnMRpT1XMalMPGkJzRrLNt6E+uXiEitGkmzFjynbjzUTFTVrLXj6M2nCf8EBMLygD3xMJHN5XfkDJ0AjSR66/tgH6lVlOGcJ3qwvsFrAtunRqn1RHSNgYIAYzghM/vJj1HVDKowS8Oq6gksmlfV4TRYW0NEciaheFwul+7obmDJ7P756d1lMZMZnOO2Y4r7gN8wM7pz9jwGAGcuwiQhLy8vr5u+PJXni75c5elaLoWF4rW+mDWPpgVaSIgTcVqFw+d8eY7MtxbljGTolGng0QYmSIZcjKXAAKnmtLQVqGdHe8JLlq5PDyy13roRd8y3PPBjpRSVamyrFtR7Zzfrk1tixDy2jCkzIVBKCQSSVp2jupci10t9fn5+OdCj7d2fnp5671kRYubo5h5mls0sKSQ5vnk4AVJ4kvZmXBEeZrzIYOhOiZGMIEPCVxCS73T597kNrgLmfIkx1iSxDAHr00pNSOpY7TTakS1iwB2ZOPMs6Xad5Xkp97XmiVKdntXoQ9BIUz9+UrX0+OH6Zfkonz2VhwjxLNufXSIAjlE5GXvCktEu3EstdyeYR0KklCJHRmNDt0hmf+AiIuEyG4hHpDGHQ5ZSmNGsrztLlbuu0ObWrJvgOI60w713OPadL1WyeG9mYe4IM2ud+n6ruYkSkbKo1k3d3TMgjBxIPOnb2u24q/uT/N+f1RSYIUUYZVIzcxqGn0d+gZVl+XMQiIib8Jj35yoEFu1diEIVwQgsaXT3CGLOGRU1h3A8Pz//5S9/qaUoS621dVfVl5eXL1++XC6Xbjs/zhlbCnnk/md9Y93OkfokewFk4O3lxKIcgx3UmFUL22reWPKDiAieI8KmlynZ7LekNyfgtdZyeRFl3ZhXGMkCEez7SNZkU3Ep0rtHks8NisNYy4ceA8514zGNer4+OY0Q7L2dHIV7dLcqfvT4fFaOdq2Cszdw2m0sBC0cEdbT7ocQy8z/qqpyeXp6qrVGMga1SES5SLkrIbibaZqzUooKhVs3ZxIRsxtzAtmHsGmyZkdIUmcTy4QLIYKFxZVyqKwQCyszC3RiAufiVRJh5lt2As+cL83ywVZrKWJmvSfYBwgPp+BsYaHkl7bJ5G6WEKG7vllqE0DOiSJRz2a91PljfLKfuB/o+fnp27dvlo5IlFrry8vLH//4x+v1WrYtc0yQgTQgQqwC1En70RzDs0K48epF6BGytDRanzT3SxiG2PzQ2j0EgHlFlesnABju4dvpREvw1kUOlesDxpWkbBilNsaqZM5rnF9oeWSPSaiGYB5vl5ljJXHO3zod6Cy9ceKE9/BwX6TipyognS1mCsNQL9OpIKIzaGI+oodWo3Gop4u9t1rrf/gP/+G/++//9N/99/9PLtcvX14/bm/r+Hyi98iNmcFM8CxsKJNcaimFhGu5tNZu+84spRS1by7i7h/70VpzkEgB+I+//30p5fL0dH16KaVEVuqYe+9YPFVxVy+qCsuxOzY0M4iEneY4hMJFOJxbMzt6bwm4wCxdkzsxcy1aSxER6+yEgQl0ilmhWb2Iqpo0uvCEWWUFK8n9hn3P3OW+77/+Es+qLz9dxMmsObi+vvx01X/zn/9b1GqrdAGM0C/fC0ARSZMRnmNRUmTuVd/ZSQjAEYM0CEDigWN4FEMTMo9U4Fol6acSQFDAgyJGxDhGFy45WrKC07SkWblJJqGZy/NYKNcqYw7QsNQTipXLfwrz8s1iySdRhvGZNiEz09UNaHc5p5kZWZKQyN9938+rJl+ZWUc8BI2nxX/PiqbPOa6kKACLM2STzsphPqLZmkeVDjt001Kk+c33bnZouXBJHSdKtffulpUcptKCwCqeo40QWTCNE21OmJu5E4tI4bIfe7RRdhdmkhphDBcRitj7frSj1ipaStX9MA+PnN6gPFUxSNgJzQxMQRQEi6QZ3YoUwMOkR8DFw9777fVlwszc5zj1XISVrCg4gAgSqtetdO37x7uIEKMDSlq4UFDfuyHSkvnt2FtndgSZ+uXyBCfqrFE0CpkATCb1cjWz3oKpilg6H5VELtfn69Ptdvvbt197b7UUYjB7G+RvQTn/MqAMlQLzMCfywYnHnRAq0stP3Z09REzMOfrG/Kyo/fbl5eWnjb484euLbupMrYhQqbVsRCQUQqyjjSQuUkPsiAPwy+UikpAtvhQV4rBovZOy1OrmH7fbdbswCCAHM7EylyKqajuq1sCgBM+5Atbas14AD3MPr4rq5G7W7FoKgxksRVBKFmeMLKSY9dabqopS7x1+UPTtcrVw2/ewXkVUvVE380YGFjeyAIzFnYIMRH65hH2h452+X+gb4WYkTZ7++r2XiK1DrR2tGcle9Lvbz3FNFJY5JHH7zICrN5WKcK3CHcd+YyJ2b81aM1WtpVjvTKjJvtAtWttqfQ8cx5F4j2Ze6yUijqO3ZiJSdFOpPVy15lxKLSosq2b19nb7uPWX55/226/v7+9hrhzs5v1WCAfCY6BGu5sFmiWE2MiPynCJiL13Yq1m4CDRzJaBrIu7+3249tmaisigo5upivx8shzT5O0cX/RpYAYYcgS5tMkW3uHMRbv3p1qbHSgXt36t28vz9fvbN3auTMXateqv7cOOg4lYpHdEhIg6Obh051pKb9ijCUkE9v0oToTY39/j62ts/Pb2DqKi6hbMjG7RDWZObNwZhCJvx+0rwijK08Xs/8/Yn/dIkhz5gqBcqmbuEZFXHSSb3ezedyzeG8z3/xyDBXaAXQze9PTBbjZZlZUZEe5mqioi84eoqptHFt+ugyhGRrib2yG3/OQnlVPaWv3l5fnp8VHBf/n5l4/vnz58+vT5p5+u1+s547o+IIKiJkJDMOaG7blUIhLKu1NtFgW5ZKl+TQ8Pp6zStCDouuK6OO0bmJ7X5ZSYwAWww+uAr6183V+58bIsDFy3KyKKZPVWqitAQkaQWmttCAD75dUcHckxVYdiHot4cs6MsCRJCFaL7pqFEmd8ktbKVnezhiSEJORrXi67taLVYH3IwMvrdvmyPa/ros/uSUTYDH76/OX08PT47kPZLo5MwouwWgOnx2VJiK47yTnCj2PqEjOMRISB68eg7QkPxwzITGQMrtW4xFxJWouaMyl4AyPB1qoWE0kOWq0u6/JSNqfk5k1tZbHmaA3AMSUHVVRHo8ZrEkJ0K5xEQZeFm1cD0KqS2cxaNeakhtsVsgBTbm2/XH85b+uSPghKgwLQHAp4Qj8jLsQOdDW6Eq96YMkexRef0a1H28gD8Q6qEJiuGVXH/0+4BADMSh8iKujcDuxupew4Ci6zazDojgDBvREACvdejLsCWG2Y8+n1uhuU5fR4fveECZzqRV/dHsE9syBRa7bvuxkgyWW/ppwzUy27a82JTosw6EpE7hlsEVozoysxZEYMuj/ixIh9EwYBQhZHhAQpJwYAb5VIzr0465082tTQGRmAFs6orqCODmAgJkKcxNwbGBFQShhDQ86ukOAKwa6hgOYZuQlUM2SuXkqpnBMy7nsppQC65HNKS8bSqjbdjJhkwcTVrLa2LAu4l1LLVlrZz+dzWZgzb9t1WZJZK2X/8bc//P73vzudl1b2qM9y7ck/MQMaCUM8aXcYj4Y64WsP6SKrAESAYOkb7YiezKtIipDZeyzeE2RAvsVoDojMIoxorfXFV4NdiYiQGbRNsJ872KD2nY0RGICxKFszJ3dX9SACgCCX0kLEgTeZe4DiI8YQ6AlmhsR1t9aaSHDz9DJHr6OoAzixR/zet7TRDL1oosaGIjgQ1NZXBXRNcQBzQ4gGtbuDGyLmNQGAWdPmUwlHiOtICObkAkiAPHkoAeiyfT6//7T/8vy//Pe//ed/+Yd/+qf/4z9+urRWWvVtQ1nPzE9kCVsTUTEAADYjEhZyRwYEkcS5B0jaAACsNmvWzCv+0RuoEVHinJZTEjmfTuf1tCyLEBIR6U7UKJFaK3sF39yUEDiRkoCpFq1b3bms68ro4mgEsc6UibSSOJt5AmRmBSVva8aKFvdfTdEhS87sTIb0AA1MkRCZMHp/wNBai5auZIK+rl0NULY9CKkK0NWpyGLaEDQppG3Ty/V9Xuz5pe1L/vTj8+cv76DJeeVP7z/Xr9/9l//8+P0HeH1elxV4AyBABQYXCO57BCcsjuChH0FBCuhOhHGzKdgGemHsQPbCwTuKvcNGbOgxL+tIkfI1dyBoOLoXEJmhRzcb1W9ZFoFZVXeHIKmingoO5jdEEQib2jRy5qEyDGYEmCXdBU7MbAYjf55lDiSM6jL0MlRMowORxHkSWpQ7iLnPeFiLZVQ4YIOtVBn6iBAYVUV3pj5VeSsi6bFRCYiY05lJVdXNAUAgAc4xy3An2A/rbgqmFvve3L3sTSb245aPEtFYgoF9WXsD96CvVWyD+WC29Yd1sDoL3kS3acWDtTLsVVWL2gMiMhe4dS1H9Shg1gZEt46E9YnnO9TBMJ4Gg9KQWWgQWyNisET4aODsrdcRrZcStRSvdXd3c+1N3kMpN7NEeSqAlFWbmSXOM5juHzEzRHevtVhMVA7cIyI6wimvVXUrNR55aeq1tdZcMiESdu5jb2oOzZTxVmlARuxFWY7Vf4CG6uiV3QVcUB4fTo8Py9PDej5xzpLFAVpK4vbqREIcGB7ujitMnRFXAGMRYoj5NLBOekEA7hjs+YwkIuhOQfQUM8F9Bdl4ZAgAse014GhlTJMjQOfDSEtWczA3uDHN0m1836KmEr0amwg6ZGRBMzM3AERyRHez5sExYGYKboDuvp5WwFbaaVmua8o5W2rULNXSR2FUtbkBsogslKzZKHACIDi6glMMqf11+M2M9nw0/aIqf5P1e2DPaPdNuJcN13gnRTYWTozQU2KGahaQaIx73Upco4A0Cj86y1e/+rJbJfhW3j6e9vFPAMcK1P/shb/2jYeAGJHIxuAWjks+dDgJkeY5zJZL71EIuTUz09oA0vHm98BpTvMerBkd6ItwVPX6B9VqKcGbElP1l8tl33cAiHZi8CAm4duuyDg4KKJpa6UqQKk1t6sCtFKSeQXXfYeLMIPnhNZaKzWJrOnGmBp18agiB+RT1QFarAZqTWszgL02C/RjtasjAaE5Vid1QCaiqqpgWoRPa86EkjNiMEiCiCyEquRAZUzvBJwSJS/LEirmaqWUXsXjzgXQ29SjfMbMhNK8d/Zyznuzo/jNZzEbcW9kgJl7+dc6dXe8LZ7X3ABJSACGyLMWO0UogJ+zqgr3jeiugAAMRgeqp5gGmI3fmC2Muv+kXWFmVQUH7DCBXn6Oriy4mitPt3hoD/o3ZsFHYDFxyPfu6a53MZ03ACD6TAiPekRjGPvNLZ3F3cMAEkb3BhFxMCF5zCOx7NX60urWWuukIHGeDA5u0QyZ9w07pkdS8AsASoTf7sHwZ2izixWcQ2AQD59itubAvdSfMvo8LvSSORh6uIU4+0haIpLvwBUzB9yaAri5Vo0eEDX3ZnC5burIaUHA2speWm01TqIHVa1Va40ZLHjuuzGppQSPaK1127aHp3dCtO9bq7W1HLd93/fHpzNzZ+6BMdgWFxWDOPFccVyqqjMzMHcEWtDWjFgi3gOHDvDtWSMCAE/0xDeQsJulO8A9uhYMWp1p4hD7DPVRbCZWzc2ODYf4q00s2Nzk5O6jlXcknjm+RsiLxz/N05snc4vo7tdmTEWINULHDyKiEwa9fJw2M8OgH58ztzcTMZr5M+WOCLqLKLPve865Nfrhhx9SSsuSBJbLtUzXE90UZmIy7TFIf1AciDDXIDFqbqpKGCTJ/TLDxWRZ1pwTpyXllJJgzA90jHq4WDNLKbWwxq2aWayEDcwU3Xa20QwSHGL8zmJLxYxs4685Z1cDgBnuSpLj/UGHIN7prRoAOCwJd3cm6pBsj800g/wCDFzXZfnuu+9I/fryfHn5elpWl9HSTPnjp0+wLJASCIPpxEZ6rPaG6M1HG+sm6r2f13+6/XAEKvOg+7w96L8S0Ryt6/jVbUhkfnxGOHiQHIDY2ATuzkgw8KJTaAEAWvMDmLOf7bhN89uPSotEcnc+cSlvtdvNcCCn+LCGPp6yqg6ivZutQETiw4G9oyeO6hOZ10SiIh8BQTC/QvhwoENCK/Mrjw4ex5DirE32oyOKyPV6nU3JmZ7y2CcDg0Xj6ESnX5sPZg5ZwY2Z52AlHYcR7g+7Q3TY8FYPuzOOZoYWOTYx86T/tlF863oPCmDusZMWWyuxyQfQzYI1tJEwoAFCjJqoKo5NxMFBkljcvc8XqhmbmUUnr5SCiBYLm7tljbFBLK3WWg2AUzYzAyfhOJsoa4C79pVbYJ1wCpH7/pJuQ62ZG5miNzZL7KeUHk/84WF5d14fH05LxpyZyJGYWVBfmFlYRCTzALwCMG6IDqkhuuQasQQiJkj9oTAxglkjxFhH6MHxawaI6k6ADapzFDK7WHZb4O4ONWgkSACxqbKQ5OSleTTlwWMlChMbc8wQhrSomltDMGEsDRCTMO16NfeY/kEzQEMicHR0ZCY0JAznIeN1C558WBUmIhpYM8CAXPcqBvSYEA0hkQPGPDTiRJPPAx615o2re/PygedmTnMQ1waBfpSozKdL7mFT7P8cLtnnRyYAe2aADHxX0ApFpr9iPo9+9IA7wvvR2aO+/2pK/OZtb47vfoTzTGjEZE92Jvi1i4K4OdPh2bC5b75xzoa54ZyjGxHa0Tf0sJuZhVmNENFTEpHW6uvra8Tua8pmtl2upRRm7vQCLEwUPtRVGzqam3nsJzBT02oGrVY03HZtKujuoGUzZkxMmsWq1aWelgVgReQoYmBTRKRx7dW0VcMI1tVKKaVpM1DV0nTf902vKWdgUTNDYklsbFBaK6qahFUfnk5rEkaEAsBmjizSM3DsnasOFjqt59PpBGj7vpdSUPExP4iIEzZ1pB40I/YhAREhWarpzLWwz67fagpTEY7qNiWBcwI1732N2wONiIS+WX6Jd68eI+AIQImIYg39CAGngtBgjjuKgXswdSFCUKLcag2R9iOiO/CgQAZEwDHoEqrNdwWgefw3IcL80zw+HHLCo1+/c3MAs4T85vdwHwJN9ayT2KOT3gkAoHspBTm5+bZt1+u1dPpWK6XEvpxatanO/OLd0wOauimORRThHhGZkBMlCtqWyKID+2QG6GB9n5wAMlFQgiMYQzSgsM/TAswBLEQnGRWxDsqyKAFRuL8GPp9mJ5QIeBYgntRBVRuAqhdre2nXUj//8oWISKgzYROzZD0QhJZS9maNOQEDUQMXyWFYWql9HoGInEikVjLVlNLT09OyLPtePVCayATVDaJlOR/G8YngiJ3mn8B9xigkckOtHbRmys/xid/ec/jtCCJueHs6ICr5DfI/qDbuS37T2ro7yW3PIR5KOQCdhGYafJubNu5sb3/NkxnKbsc7gDgwpd/kw/Mgs4Q076TfyA9vN+b4ZgCIKBXu1HxMM95rS/y0rMu+leX0aGo/fvd9zuLu2nY3BTcHNW/mzZ3Dh/R9yTimRcIRgwCYkDCC1sLMy5KiHSKJrCkALZKWtIoIQdj6Tp0WTEvuDmqgNsuX0X2IAP/ofImEqFfD3Z1uWNZOf0VEQgzWTY0xwIGkgG7llMP8Fx543d/0aWKJzGA37GSWVoMULS/pu08fHvL67/+idd8/PD563aIgmM6nH3/8EZIAOlgddYMo9kRH0A827A15TGC9O4kXAN6jv2HklkPjRp/w8Ke7oOtg8H2WSG560YcC+1+PlnyKSuykNbPY+TGjrDbIh++k7hthhlnZr4e2VmjcYdx9WozpttyBmZAIR2vh6FLnV9PsneBbXrd4vx1mT45W5SYEcbXjtkVJV1VDAGjSH8Kt/n2n5L3skZI1jfpi3J11XY52ZJb5IyqlOaB88HwHZzOJr6IC9NZfwkjbVPUYVCKi282eunvP6QCos7wcboEZUZ+FU1WPYdAbjKfNJ43oasKcU5Ky72btjZzBYAch6kOyEVwSUQtgRsfL3RIGJJ/zimamZsQATFXbZSvXfbfwuUAIfYesmTUzq6PBTYwsHmvV+s0EjAKGemtXdAMzQRWEk9DTyh8e0sPCD6vkBCl4NgAQGJgWRiJI5CKeGJlQkBBnk4qRPPfl1zEHwqqKDiIMANYMERZhcDXVvje0+2312NuMUcpyJCSKhRzxOBSC3B+haWOgnPPUtNsdw17BnVJODtJtFxZVZgIgN3ZwoETuDgVZwNEAwuc4S88NCDtuLeyAglZr1ppFqVucmVTUvbVWtLMkR4TprgQQ1Lvx9DmGruiWWsCvvfBgZL/907xMPyR+3V4Pg9S5hEb3TwcVDQ7KBICxxKkPjBE6ICITR+R3q6q8KZjdv+a1wBvLeF85m7//awc62Mfbm+O64Jsh5uN9YO6qhH3Wd1babqfXbVdthYsQEyR3j27edetbKNzxiKcXkVq9N9MNVD0szPSU8xYxs5lu2yWqxSmlpiU6JyklAHJHU0D0TlOpzRwWSapVVUWMEIWwtKLNykXdlQdaHt1EZEnsmjWzuxMw+t5aIwJySIc4jEiaetVmBrVWB9xUt600c3Xf9/J6vbyU6+lklERVkWldwN3Vreybu6vEpKvmlNaUF0raGjNHHTFqw0TEbJIIEVNKOee97ZEQEiOtT0SkvTE22wIux8SpkYhwL1ve1fLh4JyOgj2V4igz4XFV1bhzdZmZqqmqqdbqiLos72OI4qYvZmaOjkzEEzwZRiiCrp5SdXKgWlWbIzJEIkhCxA4EIHigGJnqYD4ywKNsH7LNo6bMNxy97xv5P57h/Ovx53nT3B2RjjfJf0157+6wd+EBv311ZO9JxJq21lqppWzbdimlNWV336uWUgIW4e6tNeZ3iKANj1fhCGhATuAEFjtxIAadu71RQEIkoBiXJY69XgQEaATIyCIiHB0MiByMiAyCxEjRJsgKiG8s1q1VGAClGHmKANEoV9OqsNdW1La9fH15fXm+fHl5DlzUsqT375/ev3tCXkw3dw/afXQvZQfJmiqomHv1mlKKmzljr4AJxITKeVkfHh4B4PPnz999/LDmBXOnhnajkXocTNsxsY+Bj0EdNM3dmyd4lJ9vfx8SGe/oudmvOhQiHBQvcy+fu8+lf36Mj/3+NZi3ImMbTZtYWH9rsk0tfrNZd17y8dLMLCDTIYrThhyV4lgomVEi9EL/7WaOk4zg5O1NhoPfmd8O9ykrHA6FiADs3kO4p6env/nt7/7yl5fL9Rr5VUzomzUndLxtACfmoNATZuZYDgEpS8rijZl5XRZ33fd9zYuJoTkBA5hpReAgpwIEpAAdubu11lor1sbo+KgjByJ9SflQ/SS/ZXQmIj63FkNsgMNlWaCH9c1H0ZmImlU/AI7mXo6qN14MPIw3q1o79F0BDF1dLSc+n/LK6eGUP717d/nycHn+GrPfoT7Lsnz6/rvof1U1nHcPFGL3B+LYMdhJRcfLIegM3L2zy0T01YFpAIEg7UYde3AG8E31EL6xzAMOefeHMOU9ABhbAKCfBAAA2ihPj7xu/ncK0vA4o6OY0k09EcHcD+sP8DAZ6IOL8eg64VYQJx8Z5vyKY/v92yudqjyE/XbOqop4d/52v7huvm7ChrcdYe5+6y9PT6n3yn+0Wce39YLxOMt931V1AkGPF09jyzYRjYlPj4/HDzjyaTNDZERFxEEgTvP9EKbkkF9O0YeuyT1TN7Ockh148KLsPesuMXox4I4ezX0AwOCVAjNrsXkSrO/OLk19LHNstTrMnAFDthiJkVJK5h43IFYdaHQLiQy8maubATVrvVm85NtTckfrXTOznjI2iy2e2neqtApuAsoESegk+elE70/pcYVz8pWcQBkk9iUiyYJrN/5OYETAyBy7B4lIUd0dPbk7KqETMXrUCh0AzF0J0LyBY+xEJSIm8vibuRvZje0wyruAiA6DfRTBwNUNHNSNgACmgg39cxfmWqsGBcXgKjQzNDLApmYGMZSqvc5N6t4L3sTk5OTqnbNUOpGq4OiSq4IaVtAAQ7hhMNbDNIXoOJgwmZEYEB0BY+vizcEf1PLo3t4I/OzoIjDR9JEAHQ/Z2Wgj/53D3BHRllJarDZHRBxQchwzSD7aOHZTzDex6f/kdcxpj4bjV0KTnhD++jHnJb95v7mNQKZTN3QX7waDGlubq2rKmFICt9iwBHAD5rXWhNgPcUkICyKOCKcnq5FWTab2/jist6kxdg2bq6o1BQFSwL72grRUI5ScSREAgmmWSADIIFJzCsYEAnT2VquqqkiSxc1a2fd9r2WWFRMzR7dCUF7rlmufpSiliKCIpMzB6BaoEKJYxNJUfW8NAGrT3VptpubXvbxs+1XVuVFrtVYkqLUGNVc3quD7jtHbfPfwiCknh1hJE6nWkVk+EQshmNZaW63ofYFN3M6b/JhZU5SehOecV/fHx8fT6QR/+am1ehQ5H7E1MyNyeCMcm7c9YBjmPFQIRtrW5c16BcQ0FtOhpVkgiEwweIacOuSCQphiBaCZBUMBEaHfFhveJNOJSQhFnbS5NiilTxCUUkopWRwASykUWA1DdKPBfUlE2m7rH2ZkAPcu9qh3b5TijRc/3rf4/YzZbmHK/OEb5QKAlJa+QgNuN8rdAxih6oKUc2ZmU22tIXJrqloRUTInyeEERQjM3JgqKd0FbAQ0uBYcHaPui8joQaDtBJ4QGF2oL0Il6uy12Ndc3QUM7u6AZqDayJsNegYwdiUAUPCyVyA0YiB2REdSq82hAJTSLpfLZSu1WSn1+fV6uVz2WmqtpWwsdN1rUTufV0TMRHmR9ZT5mcFdVakpUKWcW2211l69dQcIljt1s1IKRzrjcL3sdd+uv/vt6bxkYUAGMCKyN4nHLBxEUER3/bE3pvj49KeZOj7W2x3rjPB3pXkwxyMPRBxNDdQwS/wGR9R9tOezBjlPeHSKEIBh9DPjWxAcB4niPP9Y8zDPZIrcG9mex/9V+Z//PQbW1gPobi7ogNMbEd1NfOLmm1lKyzif+dX3zyW6DhBoZtJWmBmqieSH8/q//i//7Z/++Y+XvdVa0TlludHhBvHVbYQBGTuHK6CzUEosSCogxCmxmdcKBMYIwOxq2hSAY+gqscR4Dri5W1NtdW+1mvZI12cvyKnbwDtifAZo7l5rH+mcjjuefWKBCI4D+RnhHYDWFrerC8AoYiySmtsoytw6Zs1Q3QIe1qyCxlxaWxK+ezif85IQT4neP6z7y+e2vdJwGe8/fjh9fAeEIEQAzY2hF6Pj/6DDwHRE8upIcPgngKOzjZzQIQBmPoWlB5NHmpo37cH7RZQId7IXL8JgDOuo7Pv3jK1FCHGHw4iFnPfSBr8t0kXJG47yBgAISARmkSzAoTLIMev7TXFnKoUP1sDQgjerp+zAdkNExHcJ4fQSb7Tyjf25+RS+9R5ud8F9IhplWh8c1XScMZxZKSVqydF0EZFW+uaiaEQxUtSSPcyWA4SGAxz3y48MDmfMPE90hOAd6VcOIG7vxpYcjTsuhJGpd7EQDClm9iL+xv4FAJNuddTAwsRoL+Apddw2IIJa1d0QANCIwQGss1q12VLw0csJAaq1SlpwbMU4wjaYO4rGorZBGHipoCh0BDWE2LZM3XBPexSS4dB5ds3JzaHnA+7uYC0aheQNhYXwnPkpp8eFPpzzKWMiA7RYl06EzIh2Au2dlIaMwcWIRCkhckfaqZiZa3Inw93d0PuHwKy5W9O+wDS86VAMM3MiMBDvoPa45601H7hMHavzPCg3dewX8nCK7qDuHpyQwcEVn2o1RlIXUPVmwRiPnWkcGYXQNYqaLOCkqmSm2DHZmWOxIoKbqbtRgWZtt7qbGWJKJIasRQEsNqUSOKATQiImQHJAArabRuBg9Z2iO3+w2/RsuJNbZx9BiOLmcPd8hKpqY/bdR7nXzNww8qV5KFXVGR8c1Nyw+/9eHxmOnIgcf62Ydm8abrbg1wpINxPzV5LMY1wbN2K8/2hl3Ad7lanO8QOAWx2HDvQAM1ZwNWQJtxp0LKbu7rVWAA4ParEspLdz3a33AHE0pZn7RmwACIANE0fwkYgxLyFvQr0lHl9NkpkSAMyJ9LiWgJmpKgM6Vm0tUvd4BoMEPyMRg21awLxVi0alCEiidV2AMrYwsyJuAEGOhaXVZlrVS9W9aWlaql73cq2tEZaqgFbr7qa17sIIAOu6EibApKqqxZGIhZflUdSAQE09oBAQ9RdQCybxUsrIwGFZFgp0Lnjgyn1sCAQAIY5FFIjw/t27x8fHY9A2I9ppw9/I4Ywg6aYuw91id3J9rY/1IyBQnJuq9nF56iCfCGy7KjAggII2VTRg6DPxFpTvA49tZuQRRZGqqYL2AduusDaYIb0/vhgftr7b2vt/j5pyuxK8XdFRI46/PHruGVW8+XibK7COXZ1f60DGy4KsoSOsbu8xM1ettRLh+XxelsXdtbYKFFmQiCSWQBwFKps5obuJuDUDV9AEHDQU4AgKHjsE+xVFj8JiR33olKtJiqkTiNRDh+lZlkVVW6vu3oH68SBALbZxN2sT1+NkCK15AzdyI6qgeyl7bQX8crl8+fL89fm11toMWmulFHXDgAntrX3+sjf94ftP7969C3qC0+m0LIkvbG6qaqWclqUBtFIDTxE+HYO/yFutFYXBvLXmapRS1wIHAAckIO/rsMejxUl/GBvLIocZ7RcIw6dKOIggoc9OhamcvTi4xXb9pr6xruEQZ3UbANAOn8J57Jtrjj/RATB281ZhrRHBDdxUfUacUZGDwdEQyl72djPLh1RzJJa3bYG9Q1AiMYgzG4tP++pXGOFYX+zssW2AbseJ4wf79EwDphrOcyCiQ8Hk3nm5w2x1EiFwSuzNEYkZ//CHv12XzKAITmgMg7zWxd0VHZENx/IMUIA4DVvzKokYEBtGcV1E+HwG82CPdFIzE5LMEpXBmD9vBrU1a01L1VKBJLpDMyFkZqY0HxNRp/GInlsAwRCZiRMxIhogE5tpEKHTYHiGTsx+yx8mpKh3xSzm1eMpdGRpNTczkB6GOSgCIHor+5rw3dNpSZgY3z2dX7+sXYeJjOnTD9/D+QTQALMDepyYj5qIG6AhgLp3OmgncAUgRwWngIU5ADlbX1REwT4MgLEYEHtWGRGz32WDaNTLmCMIOeRb02biLXXErh0+/mkOODJYHANlQbc75C3+gMfa3ygDebtxL2H0bhExgIQHs39zB8cayjjgMdt6Y+f9/jULMTp6Wv1tB7MwDzIdMQBMAtib+tz22d1e5i0crsS3Hc/eR/HG3bs9NeN56p22+NZFDIlMKZkF34yOCgveXL53Tzy/KMb6j9fWhbs2hSHLjgbuqjF7Hh+fExeIwZ54vH09D0fsC8dhRNuBJ/TJvgUxOxF2qbkrHcagTSMvAyJKxN4G4TtzqRXNZZEB2CMLDJ75bPJ41BdiM1gU6y0Iw0hE1BURU+osDrXTOL6tNzshMIGpOcTov7upAbOAu4Kg0Lquj4+PT4/np8flvKRVEKy5GzkFcJVMtREAEqIBgpMBgaMhaDPmYPZFREYUksTMCGVOuKIDMntrpVV1C6x8Zj8KSVJQ0RANHuPRqgpgUXFRb9EYcvNSC+kBGnqYIjCzVqrWzomsg1TGFcyagzOio2tr4C7E0Xhk71y07ljIVV1jFpyFmSOvQ0M3c8c2yIVjlDA8lXuD2O/tAGjoRAQsGCBWHJshLDq2h0UpA2jei6YxkIajqjIj45gpndiJKaizHDhfUXIhakREKECNJJrtd9kmIpp6g74dxD2mW2eEHUb07fqHo1H49pdHl3+MaN8Yqf+fL3cfEQfMBZvz6o5WEgZSHQZy3QFnlghHS9rnIm53qQuMOktPS+xgJXtQM3kyHCJJEGIWidG7cNeqqrUVwFaLx4Z0gGgeuKEHoMBVmypoFAvWlCOJAoAlNfQUhkKbtxJnb/tWWrPTmltPGillylnUXcFVMecsOTsycgJQ91K17bVtpW57Lc2b+lbbVoPMBQJiICIImITWJJI4sBiZiVKnnN3L9vxMlIEYIGoCTAjsfUelBp3Mvu/WmqqqKwPNwnPOmVOGUTVDxJzz4/nhtJ7cdV3XyDF4rAmZj2mWEY/ubT5fZJ5VwNsLDsuXxkAvIROJOcaagwiWfK5yDX01VzUiwlHKjRM2ABnrDzuzJZD3yX/okyMQoOMChzKc97ZbCrRddAeP8jyHoI5ue6Ja5q2YV/atfs1fHu3eNH1jxdbbaOD4m5v7d6/W0IPAcsxpIQLAXjpRCjO/e3w8LauZNS17U9XbpvW9lLheRCTqQXzp7C8yww5zVYWmJpy6WwUENMLeCjTT5g7g1S1m3CMjciJnj9vbt4CAD5MYW2WaR8dEPRwKoSADp7xV29UqqqJdWnm+XF8vlx3K9Xr98svz15fn1noMMDiBMIkI867Nni+cs6TF2TinxJQlCXEgU8xMayNHb7q9Xszb+XxW1cgACSxROq8LEe3Xsu/7w+OJORFKmH5kgth3Gm2NYz3e+6wpTbr8N0W0g9E7yowfWm1Hq3jUIICxcf5oEseQfmgcaC/4H+UNRrbEB27bN6cBt65UDyIHGOSWE8LQa7qHi38rojByyElpc1SNbzXo9qd71ZgC3eO78REaiNN5BBxMLXcaF4I6v8gMu/kBsCZIp3UhMILmbQd1N/Q24DlOGA/XsKGRgaMjggihEwsyOCJIYnJAhMR8WhZtLfK08PCZJW5BN4ZgaO5qk8fFTHHUGmecMMzUm4QQI6YiRI9FCNa3yNBhz/bx+ZoZaHAg2ZucUL071w7NQwglN6C5DBAHPCoJLkmE+OP79+dTRtAl8dMpiwi7pEXglH/zu99ATuCm4K3Hr4c+3ujp3ugM0MFjtw95D9wRwBGcARR7RSPiprA2UQ5BBwMbJe6AlRocKGgI4SgDBwEbUcMou7g7RnNkjnW535BW5p2TLz5sZmYSq1lu8nX7650971EPmKofeCUwpt/n1vj4zts+uU4AfDw+3nNVwEFnEVG/mV2P0ziUYuHNn968HwCQwNotIp0lIXcXVYVDAhrvmIFa0EtGoKmqtda0BNQTopRr45VzrnXvGZHfsAdzmnk+lPh9sCn2KlFs8MDdLAhToq8c6KCA9rkqAjQe25+iIwsAIhLzP+6u2sfR3d1UO63IsGUUvb5oV7QGpoIxUuJE3GoZdy3O+kbkGjE95yWlVGoFgJxzrFLEw2dAzQdlLQkTCjCQswGq2ZLXFdAMDDZ3ZJb+qWCpbdagBRxu3plo9aDXTm6N4o6K4giESinl0/n8+PDw+Pj4uDAZurqpae0FIVcAay6I6EhChGiK7sDgULdXkRz7rOMpC0lKiVosCTRVjUVqFUmrbtfdzBi4shFR5P7untX6gsSYcl6AGV0thIfMwRQcEMlUayno0ovxfbqpB3/1GjbTzVybRouMmYPPlror9KoN0VOSqorgAETYi+XsZqYoCSiJ1SxL4iyUwr62ZuLkiCQxniSmVvc9zw3p3SCiBFt/0PjdiIMsIsU5fDLcZE9f54wE3qNfYp8M31ik4gjQmqkqIBI5Ao25KZM2eGU8TSUf3QwmIujdRUTqNL+zDzYLNPrX+oMHjPvNOnzTjji68L96oNsb3oYFARDAe8j/EV4eWhmKT6M45wYNm7uH1OFoKddaKxOgE1FKaffJLnCzdL2hinNOmEYdEEspqtr2wplJLfoDkliErKmjYxB1mEtiItLmlizclyKYai07mibBZVlOp9PpdELEfa9Esq5GiQjFqm1bdQWGpEUvl4sp1FbcrVRcFmlNVGutlfNJJOEoqEe20tz2Wi7X7bI1A2hGpaoBkAiiARojprxk4SXhw/l0Oi0BjCQSSeKC17JrLS+qVImZRIImMrhsIHi6cxYQ6tlgbVWLqzEzODCiiCQRO+TVcb0pJbh2f9ZaQ8ZpsY+O6igSURPUgCyKvCkqjHTobiEvYhBWe3C9DK5dG80VYg62YgsRo8j7oTIzKJk1i548qPXGb761yR0RiDkJ54I9cYqTr7UiuCwkkszQTcAYnI5LDqew2ZGx9l5B5h2Y+jVf851H1/tregRvDnX84PyBg/3cATFIvvpfs6T41iXn83ldloUiNVZAgJTSsiyTSaXvSu1kZRBFTGWtSkY9dqwGpKBKSAagjAwdVU/qnQfLmrdWYxlmpwklXoCaW3l5NTO1PscLox6Eou7oClZ7pZeZENNe6VrstVkBKO4vW/n5+eXr8+vz62vQge61U6cCgKs/PCQ1I0AmQcSq9evzKwD85v27hR4QkQiYMRk7UQOs2855UbPL5SKJTqeTq10uFwYk5iXl83lFh+fnZxFcc+zhYuijR3ePZMSWvSPXrysW38+FENghx3BvQvGv1N2O8eIUMHRAOpBnRlsM/RZfIlq9LZqfpxf/7p2gmFDyMe8/oK3TfhIhkfgotU/5dEf8K8J80OIR4w7xxoOs4iG5nT9P8xIHTHQcIRmqjX228Hh/5kGgXx2M+9/fdHg8x+TTzSuhgFtOvDDlhA/r+svnrwhGKEF0jBjckIbOAGAKZi2oRxGRCaL5HWYVABCMBZcswMK9WOXRGDSzuhdwQyTvczUO5uiETtu+pVGrnPRppZSckvuvlJNaNbcbVUcPMxyQ2Ad39OjBECJ21VaoHVPWLWgncYxaKrhrzDQ2PD2CO5IAIQM4wpK4JP708T0Bfnz/cFqSttLKFcFy4lVO6+MDPDw8/fA9LALaHMBBPXYJdgynBa4tnkfIC/S/dmoZnHWHIEG9achBz4Iu1t094quRDfZX9OSjo3kY5+uSYMf+IUyKF7sroNxkxb2NkgePLF1VmdM8yE0xEXGAwoa+HMXvvvICPuNDGNkF3ErbgIFUDBSVO4496pMV73jAKa79aIaHcxiovcM/1aPDzzOdGfqLh0vvKgkA4u6EWEp5eDidTqe67zxeRIeJvXuqmIjAUkrxs4js+x7wWTwQvhNRLPFT1dZKay3WJxDdqPPpwFBqZoEBK8UV7WFdRCScSpxxKWVZFmbe9x2Z3T3nxd1LqQAgTIR9fCvgYsMoDAky27Y9M6UcgGdHcmZWawDQWgne830vMU5cSmla+7Zx95l51lrdOs7BzJYsMbYBAFdrwYgAbgy4qTrCmnOz9vr6fL3uAESIddvcnZHMa0ysgMUIS1+GkVIKdmxyC7sQaH5bTiklIgVmWR+X83tM+WXbl4ds0Vt337Ytm6V1cTVdmqpqaeTAnBILOajq9XIxM+G8ruvL9sXdU0rrumLb4w4nFsnZDBpYWh8UNgIA871quW46xl6r11xcRJgADRsqJiYga17aFYVTSgRYda+17rXEbPOojRlO1kLEsFBHG8fMrUYA2tydBHPOQNBaWXJGYXNUVQcllsA3FeK6beuynPLiqlGfM41oBpAJFFtre1ODHG3tYNkh9CUvj6flvErIfUxqUUchIjOHAMQcVy3QWmMmd4voOeRtWZatlm0riHi5XD6dz9FTKnWrtZJDSolFEGMHgwvZrMgiYtUWbLQpJVXdaxFmdzSAEIlSiqynbjIc00A3RRNga5uaccrNjt4UZonH9C7r88PrqCk3S/FNk3BagP5z763AjHIQwc0cPKVEPskbqnpfud6aLnkJSycpbdumqoSMDlFeNTNv2ggYAQBSSkywyTVOLGKaaP+0fSewJQXzpLXmOWcRLtpUdV3XJGKqpZTH5dxaW5el9bVygohMSERrysVLIOHD0IskQjQLq5IBCyLmvOb1HHN3T49nIjKEtEhrtub19evrdtlB6Zdfvr579+7y8orApe5Rwqq1lipAbBKFWhSRvC5Ye+80EtdwSy9b3bdGnJbzCay8e/fu3ePDelrWTGDKQo+nNZZkuLssa9krgNEa/M/GvDS37bJxwbycgKVWq7UyiVBWrft1r7Uixh3U9bSqm6vxyp//8pd931NK27aFcb5cLgC0bRsivnv37vXZpseK4sj0NNGemg4s2lCFaBIPQ+9MmLq6uzYbg3AYHSb3BtjCme37LpK1GqE0NwdvVQk81p8CIrillKxBhGKMPIXcQWutOS9VO/w1zrC1pmqIuK5rzjlcGGFSr6UUAEMg6MNUvfmJndgfZtnlrfxD35kU/xzNt7vJCx8z83EOIrKuKwxKajkUdGauuCyLDf+IeGueu7N5g6GXM/90t9aqNzWC0+n04/eflmWp9WektZ9DHA0wAtCaMqMkkXDicfxS2is0Q8inJS0ZESAxCWrVvWlmaqZbLQydhPy0rIBZY5QJIPB3DioMN48/wYSI7l61xeCoGwgm5KU51M2q22tpL6VcVa9mr9v205fXLy/P+5U4iTpedm2tpZQIUbWS8LpmA9xrQ1Awz6rmuO87p2VdT58+fHi57L88X9SKoZwfTnvZixohEvDD6Zxz/vLlC1sxa+uaz+vHp6enVi7b5QIAHz6+i6W9nHOsveiBjQ9smJmptlr7/bebXswADoh8yIn7Xd/gm6hxdA5HKugzkh2/J79Rzsx4lAdiEInAvbWGh5ZaR0yNuBoO8TGOMmVERnBgMR2uwcw83BCMhTS9Ukxy+LlP34QWPOR1moUZB8IopswYcr4AMQge4/1xSnYglRnSC3HJeTm5mbY2aWwCq0wxOOA+mkChHRrPhUJxCYX9YVlM69PD6Vp2dw/SREJBZDdAIXNHN2SKCfJ93wkVYXEiAmdwcF9SZqR931deHDwxiwhYP5qIZJbmVmvtlfFxK6IAp6qu6AaMpMQBV2mt5ZxtjFgj4r7vQj0bLKW8vr6u67quZzPY6+vAKPVtcihIhLV0ZJz0wnffpcbkCo6hoy1MIgJxKYVF0D0oLSLUeXh4WFI+r6dzTmTetAnhhw/vEUDyAkyPH99DlJrICYCAo/1HQEDa+7QQ1U4KbvrIcBANgQe7qAHQAE8NLYg+YeRCgIDkQWnf5wm/oRiNEAXHKhw3MHNrOGoxU4TQ1N2B7uvdOrpVFhN6DOEXD3zFR7OMozrTC/EphZzF8g8A8L6qoJOl9wRH+qDvDLR8rOQxc2aa9n/24TW29dziz95RmD6uC/9ICEMfiWgO3NlhP2FcRJAV9QvpBFR4VDqLXQkz2YP7Jp4PANh8D41WtQ98mqqS9veo9ktS1RnjcpxEEDNAcM1ipBZgjgBj7BAJUIgbcWR0ZtiauQcBdG+bJEQRIbMigojNTA90qx6R6f2Q8bwF0wgCoCmMxXUj8I3ZFsRw6swppWQGMUnvdLs58Qp2U0S08WgVvLQqsbo+anshwd4T0bJvWiuRmBo4CmAM6AEoIJATIhioN60Y3afowtn0Coi4715RlYCt/fnz1wUB2vndSpl1oUD2JkRvQF4Nm7dYOxPo8trQIYkskp5fnkVEqL6WC3XuJXT3BznlvMd9yHkJc5ZzNllba62WUpqqB+MOAm7PF0ZaluXhtLhZ3S/CvAiLiIJxc1CzygDQWmmm5jzl1cYsmRvG1jUYTXZVdUciSiTuPn2GIGSRGEoZpJ9RbkFFVIXwymEehFhr2y+7E5khUFgONx8JmAc1kQVFahbKiUVIANCt76GI7uRwZDM+8/sMKqzAUfBgxKBBZBfQuFCiIDNIKaE6BiBkkG1wZxa1BMCUAgzaS/sdeBmbV7s8TFM1zRZ2qDYfIoC3fYxv9QK+QQHNYLe/PwJQvDvIX3uF6ZiGwsyEmZEAKOdsWiKUyTkD3L70eGRmJsYZR3ZO1ujUsgIIM4uQ8+ARHUurLNaiqPYsNGS/1lKKmZkzdJMJiUUSJeLY2RsnIEjDjypYQzcmJE7rmpdlFVkk55xzTmvUv3Yre315vWyl2bKehaSUVrf94eGh1M2hmTVVV1UgbNXaogYOw27DEOCUUqlq1sretM49cvk3v/3E6K21/XJNdF6XJSXmvJxzZ5nrLbUtGLB024puG6C714TCVgHQHVJmGwO9AICu5hB1YrMO7InKgpmhGyINhjC7XC6Xy+X19XXfd6Jlpjrzhk/fOZ84jFYhisQgiQ82ERgBMREhiUgGaISJEIgEkN8I2Ig7e00Qkc3AXR0ckVpr5ITOzoPtrI/vkFWIZmDT2pQBQERUY9Km3S52DqOMgnE4fEQzUGvtFrweFGc42i428686FqbNPHn+cDxC+HVVRaH5z3lXAWDbNh+Z5DGdSCkTCHLs6W3TZyEiI0lyoKTNcs6CpFqB1pSSENdaSzMAIBRTMANtTmCtWW0GDkgsIpkZidRtayqE5CaUQZIDuBAoO5oCtAHwTgjohobogO67NdoVyQWptaatIKKkFPGMmTWOLgEhsrOAc2m6FX2+7K9VX0p52faXUr5eLs+X12up0IhLcvfS1My9adSMqjqV1qoxqjAQeGtWSpGnlcBMW87548f3Vf2X59ei5XK5sojkJYLvn376KYJvSsAohLht2y8//+xe371/+vTpQzwnMAME8AEkc4DouBxq/POZ4hvA8H01bQoAHkrq8/1Tm+aOxzdagOZAHt/bWUl+Db7B2Dey+NBB9Du5Qhhx4Wyf9682BHpzPlMmpxl/c25T/edHSqxuJwREJ9TpepiGH+o5ar8bxDOknF/XPEiqfN7Y6XzbGAX6tj/ZO43Es/AUQ0VdVa2BE7oK4cMir6+xSajt+162tG0tJyZ3GpxlZE7shGTeYucngXvTqtYJXRySJJn7oh1iATsiSrDKtY5WaK21qq2qNlO/0ad5Jxw1M4sefsjkXjvrNREJYa1V1c1g27bn59eHh+3p/TszMwUAjZkjEcHm8bi7zRnRKQEYOAMjQkQOZtbmPScGIIVOPhThEzosSTKxqrbawHVZFszg7uvDuTCtD2dYMoCrW4UCwJ3gBpTdoDNfRGljxAEIEDOZGJOZOjSqP8+bqAMAWLjFaG0DgJlTXwN2H8z07P8mzTAqBTAAyeQGfuPROerUbZ5tSDmOT4XIzcLfrGWYGR7YSSYEoEdfI8ab7aI+Q+Nd4mflYlyFRwY6LfybIO2NayC+I6MKWbrXym5neqQ6oC7uDmATuTn9NY6AJ05YmPnGFDxuDwD09tTktDkM6th4Ya9c4iwC2TBVNtY74gCkjdPtasBjhfTxansoNnbaVFOACApv3CTTv7qPGZHbM4D7TsWdge5ek1OUCtws4JpxwjkH22fwCYTGomoT7kM47v32BEU79rW9/RJizqd/xUE8AcBdwZprs1bJDL1qaUGVsbeSgSFa+QhOrODVDKxpIWvNVRWC8RoQgRxyzuxGoHvZ//3Pvzx//vlP/54/Peb/9v/4wynDOUsSXhIDigGB+2ujupfW3Bu4ErguJkaLkaFkJ2pa0ZGBW23btl1Ag5t7FO0kpXReT4+PT+7onGFNrLDv++Vy2bbtQRjADCUlcnMrRZjxtO77NZauSiIKxn0AACitICJyT6tataLNzGJ4EJGAByGnIzqoadxnYAYRZ8KUWVDNeMJIwtJ1fgpHErOKTqflTI7bZbPzohYsNh4jlA4zSusdjCkhBAigbmqGRG83pfFh8BV7HcTc/ZgQTpXGsUZi6sLxUCIyNxzPzjDi4ByDrkGqWq3agHgFQ6mxR92x1EaDvzuCDAxfOOLI6WfHuX1jFv96lng85TGN0P85g4O/9gore7MJiDjYVlXVa8VYGGN6DAUO3h1mYVVVgyAcOskRAXjk1S7CjCwEBUYAodPKmxkHORCiEEeQ7oSBUEgpLZKY2bQCAJqjOWVABFdTK66VEZgEmU+nh7ysKa8pryxizpdiZvZ8fb68vLZSy7Uw0UM+Ifq7d49ffvmFmTLL3umkQNVa83t7NReCg5vGHK1pI4fEkmXJkqzUZraVKwCYeTufOKddQZAkkbsnwSWvgmxmrmpG+34tuhNBcjIAdAWglFJpplqJRYLwWVWD8XzVJBkG34yrgWtaT6fTKWCtLy8v1+v1+vKqtQEl7zFd7AJ1CEQ4EwAQg3uQtDqSz9nP/jAOewh7SaX/NVxz/IsAwPsAT8jfzZKHPzMzAzU0kghEbupGHYWOZS/WDNGXJeWc67WUspVSpvwfVdiBojgJfRXbOGEwq3U+KTxkwnQY4+HDSOEM9eyQM7xxc37ImZmo72g8+Czv2XKvDeP9YP+9eiL2ii0jogAisKb0cDo/PDxkFgUMyop926o6c0rCiLjvO3kizEH/HlPWDqjIqmaXnUsRxGJrUwcAFlRkM4i1hYpYrdbqq4Q5J3TDiLu0BtGA1qqqiRGpLx+vZrUqohEJsjiRNnu51q+X/Y9/+ksxf63tZdtftn1vWk0dkR0D5j0vn0a/0czMG0QZdBQ0wz631rLI+6d3n7+8tlId8Onh6fT0tCyn6759/fr1y5cvAUt2NRIkorpfP1+fU+LTkn/z448xOw7ugLd0hUbc3O9/UMj0aPKuoNZ/vicK6gY03JbdVOPuv79WbrtNEh7eTAd0HR6iWABAos42YW6/kjca4jDod19zywmPsdO8hKkvMxk7CHY/RimFBuZrtjsAIBrRR/8SdzWxfHMoH+HH4dRmoKWz0widv2fkNmEqADFm4gjAHZEEU4JaOpev+5rl4Xw6vWwxquOOrVlpZkqIrh6dLUfyGcqZWStlOa2qamBrTgGzSiK9ANTUBDHYI8ZVRC0yvF/vGag7dgE+UJf3DuEtH3ACJ2ZgZmzWGDJ5SqmU8uXL87ZXB8o5ozgAtKbMLILmUGvLxACg4HzLDBAdDQEIo0Oo7p1rBGOtSLQso5XKIsyEggKmtRVtdWFZ1xOal23/ul1akr97XOGUg2hKtS6cFNTBwcAt2J7MEdwVRw0ADW3khA4tLtMx0pUbRJ8OzC8+cadooNGDG0t9Jjf8UIop29BbZ6MgMveshEi7HvW0H82hjaTgKIdzhSAzIxEQQeB6g7fsWJQZcst0Z7HNrO+AOhh/PARdIqzapn2bvyei2Qj1g2ofJmcdhufDwR4ckfOMzYjI7AaQcXcYqyDtZn905oQeayf8/nX8zTwPGxyS5DD4tTpHGAESYG1qrYNSGGOdZr+POAr2ADCmXTVwdEcXODJJMDNHInIzBUpRia+t+L01tOHacRIbUOxFv0Wybx88orsRMrMbiFrRAywnys/u0GKFr7kpIM/eIyIGB9v4zWifAtMYeiG9Xg0cPfgqURAbgLue11yvstvuzRh0TcspZ47Y1cAADEHdHJzNwd20mikMeH3ceUDQ7QoILExEreDnrZRSrlv+l3//355O/OHd+cPT0/t3j+8ezsuyEMOVnratWaWclizMDtDANv769ZpIERt4ANC1tbbvtbbLsiyRa+WcCZW5Xar/6ZfX1lorzd1FZFmWZVny08Pl5RdGQkXcSkJAhxNSbWDqgkBszWLDB8Y+xn3fISidEdVRVZt69JRrM7MGyMwMgyKyueaUUkrEwIyYkjI7QWztAIA29Lw126uqKAcdpfvT+WFdFmvghmZgaGwW6EYAiMXJTn3Sw8xaq62RSiIAVWVGMyC6cZoouMzhz/sK8WGq6mYfptww88TeqCpE9uiE6K49puwYgwZRNYThG0IPiWTQ1w0lHQW/kBAbTUsEcLMb4Mangb17vTGLR+U66suNVKPbHji+7dtXvwOIOFigu2VE1D6AaSN1MTMjm1PdhwkrtVorISTGAelB6rS0eqvMICM5dZW82/8WW2kQ+84YQQqQnoGWUtwMKRaWYy+peoc8eTIwAu45CvTmMANlA2lGbavbfnkN7vutupgkelhPy/mspX7+8nl/vRKAEBKAWkV3cAJ38kSewnBHEkgMApQXOS3pckEiiHUnhkhAYF739scvfzmfHiUnFN4Vymulq3KuVtv5YSXwZvTuLAjMCEJ8Oqdm1WsvufZSnXef0aylDCKSJW2Dl7KX9txLLZOLa1mWIBZ297LvWlspBQZUcsrPXzOz8xUFtGNk7INl+2bt3d2tNWgV0jIIpbinQ+Bs1gvKZnZgpQULcK/34VEAGGCcGtMiDs5CRGDW1KqD0gEydHfCkwOyi2+f1QjvfoyA8VBPtRFAzHuyLMt9YfQ2djj/Ow81Msmbq526rKpw2NM9Y2siNGsRuhMRYqJwQ0LQgOOSmd+9e/fp4/uHh4fiJ1UttdVW3dBJ3JCYa9FEvC4oIiK57n1I6bk2REwKAKBW18v28Hhi5pQkpRbFlpSyu9eqAPqYKjMntiAeh+DZN6itag2uaWEDoKBd01oaESGBM0HDrdpfvr7+8vz65y/Pu+q12ute9tIcAYWFE0Fr2kxHkdcBAIQZzYkAoccbQs4IwqiqKR0AkGoi8un9x7/5w98v53Nt9peff9q2bdsuxezh4aFsl2VZgla01eu6PD0+Pj49PUDncdXBjdHzOBgUFBjlZ8JJNBqGb9jIaXQRvFOTHZO6mc756D26++HDB4vaOwH49uPYeyDD5xztb5Dg/1qLEltXXnK3+78COPSlZNM7THENzZ2Rkh9IzoaA99vSje8s+iAiYhsR4Bsr8e0/f9WezPOc/RD3uxWLOG+OG0T50L3fzbAY6OAmSKclL5LOp0XVG6kgu7EpmjIQakMzBTdgUNKunKaXWpckAk5IWYQAa60qwspAPggy57R/L3abGQDGAgwzaOqYMEqZkRBKfyU/JJAwoBAA4GKZ+gCXGVyu+/V6/emnn89PjzGlEg0NlgwArRkyIfY9hsHx6oCGVFtlZid0R/UukjT4NSKQoLmfMDrM44mnZVnyWrbtsm0/XV/e//aHj7/5AR7P4E5MpBb4HI6QBLsoO47crIcgRp0oJiSBewjv5HgsLt+HImjhnZsP6HV0Dq1rkM7C9GgMYi8cKACQH/KBCBdrXzRPfhS8u2pd/D6grnP2DwCiem9mfmBhwYGgfOMIprjG0dtYcA9Do8e3w1SueQS/QT0P6tn1qMD9y316xtDcWyx6PMjhrmrve41LHpT44EG264NqHA+9tchwIk5VVRskH8EaiobHCreI1AGmD3E/8AK+bTvMmMAGP/iba+5jTsja1Aen5Q2SG43PMei9l6MrhdGUe9vomGeizYKBkgmsdF6cJHK9Xtw9JYobCkCSiFlCfQAcAH20OABxbiUyBImokSn4jkwdyXDMX7IqIKwp1TV7LbvVE+Onh4d1XVei63U3g+rW1DxkJSgO3fnWEY1Oobv7CoqAGZCZG4s33hr4DotkrfT6i/7bl58Yf2LE4GaB9XtXZaScWBCgNQRbmFrZrRVCyFlEwoAqERleYgwmpfTw8LSkzMw5r6fTqda6bSUWTjKlCBmfEp5Pp4Ztb5aIHlNCtNYu5zU7OGlUx6pZg0i9BrmAAQCQujWDpgZApWpRQ1bhBAxEhtjyaQWMlgNQ8+JKqoiufUU4aOsFwliRYg8LmGpRdz8t63k9IYKqmiMgNDK5uRDw2+BEf+yuSp4kU1GjvmILDm7vhhwehdI75ew+d9rBbjWAiAINEktqZ0B80MkwKxIY6W5KuAvArY/UEz8cBurWWBindyviHE3MtxbkW+341deETMR36ghe//95Ha+RWaITAgEHFTneVXII8eiFJ7p9RVheGPvliRERzZtqVZ1wowiXw64p9PouAkDisc5EDZi888poXsQ9eNsgETeR1oKFvTkjABIDICO4qTfQptau29fny+W6f/368vXL61aLGbz//ikxl91++PjhfD6TmVtr16saAKBZUzVyZ0pEQpTMakgIIRIgMS1JlmV5WE+t2XapCBXMzMF1t6KAjMjIGYCb59qgtgaXVkp51wisXK51f9dWoTWLSDanJEtKO6A2N68VgMzNgVWDnM1EOsVIhCCzTtFVmzkw/NfXi9ZWDbZt27bttjzwULOcwqOHmbr5Hh88E/Gkqe+0M4XgTJcoFUfWp9oseC+QETFgte59ho+Z1VpgR4PEJGqLAtEcu4llfOm6nnfTrezbdtm2i2oTofN5Lc8vcACzWBDJQuu803ADD8fl5WV5ozIw2LCmkIcpODqyo/S+0YipF8f3zBs7Xfh85/EN8UWxKoCwj3FEj92augMzO1AGf3g4vX//fsl5Xd59/fq1bTsCcyBmoY0HR7H0FoGiT4JorSozO2YHvVz2L76dri2ldD6fU4rJwMZc3CHk4d1SmHlNOSdeSJhAwAmwVbOm6EEPV8TMTFtrVmpDBJRGrTo+b/WnLy8/P78Wg2ttl+t+3UupCgDE0qhmcTNjkuA0sNCvvQFjImEmJiMwMANkQaplO51OxNzUtFQR+d1vf/y7v//P7z9919T+4y8/79ct1jMSeGsNaiXCUlEUifHDh3ffff9xXVdk7DMugVdCixiAmN90BsZ/8fiY+3+HQfZDOf/YUDtq0xvN6kfy8RXjUDBQoCGiMEsS85T6Ozs9LxxrN/eFP4cb+zpi59MeaKdxhgcZ9sOZz5Il9XHZXieK0dz55m7e7/HVx/+2FizfvevQXUZHx/yKFgT1qI0Ft9jbJre5KQB3m6SO7uZsBmbgCGqS6N3j2a0llsRiGmsAvTUo1Zlir5hG3o9I4E6gEGYnxMEDXQvkgAMHF/15d1It1oKQogU5IiKZQa26l9ZaW9fMo0MIYzKZmc/n81w9NxvgqoqOPEju1tP548dPrf3589cvW2kPT4+n0wkA1MH3jYiaNsaOqUEmdkammfUrOGJ08XvfWMFrbcwckcZR8GbRlomZxRFKa1spuObf/t3vP/zhb2FdoBXIwoAOjcEJECiWRCCSxwQhWzd3cWSO+WwY1WIgQMdbxXpwz3TRi38pDDadvp8+0tmjHI715sdLIL912OY59AgEcNJxxtpKSQkOeo3eSQpwkBrAoXn4bciEg/oVByjXBz1K16ujIzj8aONLZ83RxqTAVNW7AO9NRjNmCG+/OXgTdz9W8A/6q3QYKTr+IFPyOroD8fj85vsC9ubuIuwjf53aTkQppVLY7luuiAh9qNVC7WP0ziwIOfhYaupaTfHtvaQdr9aapM6qpKrBlTR57c064JhGPQcPUxy3pzAu2/oyFo7NcwCHpYWRNKqCo7ET+QgM+oDntKpxdwzB3ZvFCmDsAYa7myVnQJQgtgEkhPOy7M8vzeH948On90+JOYElZwOvqnvTrTWMGSkMyEDvX3k0r9GJ6NTZ8E2LW2vmtAM2pYaJHLAZuTABEYKzql6+fM4558RobmUD11OSc5aH89rUGDWxOBNYI8o5S0U0gP1a9Hp9vjYiAqeU0rbtKaWc1+AUJdLdKu26JXvneM4LmZ1zImIFt7LvtTCBIEAsYnRFCg2JEUFXdwNUx9q0mSOn0rSqsWQRtrpXdXfnUqNHZ2YELrEdA9zdc0rUkYSE5kFGd+Y1uUFr7i7EKaVE+NqakziAeWzaiLTGwV1dDV3AY01IihlCYiNi7lwyIx8jIvLmU4EjVz+q6DRJ00XVGpuvll4oMYg6ChHV8pbjGG+9iFse6J6YQylERPRYsjrA1cY39pLYnQG6sxQ3EwbfvN788viBnjePw7xRrm+PMzuEMIya9U5hBw6E5vKMGEaBKb6TBzAMYt5osNF2UHlfRdjczbyZiVmbFr8zqKM5aM45qrmlFBRurVhToGAqnok3p5SwtyhjhD0eroFBa82KJdku1/35+fXL8+vr61aqiuSc85evV2Hct7pIqgtr3c7nNT+eXp+fCQBc0QFZmFKiTE5hnWJFZs9wmYUQwciB3AhQiDImc3ZHTqem8OXnr9dqLtmItxLrjeFTsbZfFvTvPjy/Py2f3j8+nNYmwDllWNVKOCYgdNemPWZ1NUy4plxzRYco9plZa7rvu7uLiKtfr9c//elPROTAz8/PX758mTQzfowpDxJy9Fgz23cP3N1NtEL1essLAZERbX56in1sA1SNTa5OTHqQJR+Fv449wbEmrkNAtbUCs1FM3Xi764jV8aBr4O5qiuiEhm5R9Tkwk/vwMnedk6OXHIUJjz7qm3syY+L5kRkHWLvxntsBhs3MMC/nULwnIiIxa7dzm5A5CrodBvCHh4enp6d1zenhcd/KdS8dEdp6+fjx8dERW7NWNbidHZlQLqUxe6yOeLlaKfvrZiLykdZs3pptWwWoalZKM7PXdMmS1ixZ0sKcE5+SZCaM4RkgdauDC8DMSdWJq5et+VXtZauv18u17ApctcX4QO+2qYIqCeUsMbXLzGZWS2mtEBgTJKYsnBMyemYR7nh7JjE3FvzNj98/fvj0+7/9h2L+57/89PLysu87mKeU3LXtZWECAK0NGN8/Pv7d73//+9/+Zs0C0TBznSC0/qypg1MPqSAAots3YeK0nD3+AaLeuXoTJr0xuVNypvwdj4YHSBgeBAa6DHQIay/4Hw249+1/8+chP3eZwHCM/VM6TmY2veEwnkC3ZUL9i7q6dtPT02DvewUBhh+xcVEME5x1C3NHpfBwT4aCIM4I+NY/x0Oi6wE3781McHeoVVUZGbSd1+XTh4+gtqa8pyizozZs1Vt1YDJBIgFoOFZrIiGLrDmNx2etVhj9EmudSgPcWmvbtumghKnqAGiO27ZdrtdSGgAuyykRM+NMCHtcYZ1i6uYi+1RU7+NVbUTy+Pju+fXqv3y97hsyIbKkxOBuLbhaVDViUwJ3BALv+4p55Mzu7t7i0txUDQj5zUAIAAnXvs4HgVABSdJ6fjj/+PT3//U/w7snKMVbcQLumMi5CLATjkbXRHreDrOTTeB++yLrD7wLhs3ZVkQcVDTQBXuIDyJMFtMkfTki3L/cvW9a/EYLYJQ57qXspoxx+iMku40G4KjOh+rehUA+z/nuGzHEaCw+nao3XzfLcDy9+xLMTd8R8SbyUToZ5FJjlAnH0Gy8x+DuwvGv1yLjB2FmG3N0s/kZJ6GqOiaaJhvB8Em3bZg+5s18dL1ba9HqRUS7tdFvaBm6x9tMH9laQ8nujk59p2kcxCwYa+JkoOfigUTtiNi4WdRt9d1UBt41dsRMS2lMkcT3zkNQyIwb10/PHSNJBuqEq8gUJodGt0TdcGzcMjNyUL+VmRExeGXA9Lzm18RW4MP7p49Pj2XbKgKvCxCq47U23K9eCporkjEG7ZMTgvZ5Pmam1xchUkBVQ8e0nIiTixhi37FqSs0I3cy0GrEhqWoD09Mpff/h43fv359W+d0PnwicUIWR0RA9gL5XRVV9eX59fn6O7dvaHEWgwa52ebmqvgAAYd+E9rDil+dLIlqYnk7r9eHhcckCdr2qMGWBQA0D9qX2CWPzo7mhAVb1UltVq05bqbUZSSLJ5p3i4sJIANECYqYlCzMSGCPlJWVJIU4MWGvb972mZRE6I6F5a42BI8C96YDBoWo7hAWMhJaUo8DZWhkZCk2TjQOK0IuCHcVxY0kdXrDHf0QUlO7hSaf1iMwQEYs3VQWMaVUNq5Ggc+rYPVHbtm1PT0/LsngfMp61IhqCHVpZDQBobJKc4n/T+ZtkwiEOmBbnjaXgObKLb23Zr77cZ0BxZ2juTGTYQTRVlREozKYgA+ohWpoOcvrIYC0LCxOHUqhERDGNSz7cDM870GtPajGsnyXVsnteiEiIqsL8CiLHvooueBOtlbrXttf2+rJ/fbmUok3dgNWpKjx/eX339PDwkM5P7zLpl5ev9foCrZ1PyyIpSzJFkezAZgBOiHhDXGcxb0JAAK01dEgpPT0k8wTApfq+1a0U3duXl9e/fL1UJKdc3dV9Xde0nNtVn+ullvKc6PL6+vHdU3o6pcQ551K1tU5gAEhNnZldsbXGTVNKOeeQs5C3WmvdiwdfGdjz8/PXX76IiAO/XNvPP/8c6SIcHOeUmemQjq5oCtgxsJsyEB3FwH3U0sxM2+3jHMy68fSptwVCrw4y0D3SGEEhnFhcouvrq+BJEqfMIizJAGAv27EgMs5yLgPsEn7UArW7mf7hF+4AdTPIdvdYaDG95BT4qY9vbqCbT/ASHHr+pRQc5mJ6WBEJckVm7hNLZgge7rhHTGYA+Hg+f/fdd999993Pz8E/vJS9NbWZuLp7a16x+v2so0Ku1Zqrg15336sVa0TWfvp6Pp/N4FqKO5pZ2auZuRQRySkxQkI4pfR0WtclPeSMYEJoverPEg69qTlWrXXfXrb6stXX677XWgybBnwAGSaQmxLDklPOCzOJsMiia677te67iKRM6yKnJRM04bDVXkphdUc+nx+f3n/38PieiC5fvvz7H//t8+fPzHw6nWJgd9/3lYUZU2JJ9OnTp3/4hz/87ne/o5zB2zAj7qM1BAPuNZ7dDRI5n9GUHDy0B2cm88bewkHYpjWbv6EDj93wMTeNOx5ivg/dAW+41qP4zbM6ntu3L0Q8Ake/Vec3ktwH10d6dpwb/NU78+aq6ZYM3s4T7uhzb/lwOIQ3J+y3SBLHfwf0EZDmrJcgFF8fH3/8zQ8iZCgiFbF5A0XX5u7owDF6gCiIGsBHIk/cN7UwkYDUWhkQU9JqbgTQAYGtln3fWyvoLnkNTuOmfr3u+16ZOeclpUQesmMAveWOiGXbA/wSd2dqPRghRyPEmLVXhfOiqqU0pOsZ0RDAlIxEZK8l7I8juEJzsw6xIY/ZPrPmo3NhnbzAbxFRfzrM3EplZkmZJRNxXs/v0/Ldf/m73/zhDwAO+4Y5OQBHym/zMbm7KVkDd7DYKOH99wcSpg7JjpcCDFqHOIGexmBnBYEDuNnhjmV08O5OIYmYLqhmv3VVEtOW9/LT5W0SpI2YysxE7tZlTUE1HdwW4xvj+DYSpTkUytD94+1k7u72nVrN/PDNmc8HJExT5gFgor4PitnH3btC3TMb34550KOjURJmbgeTdCxkzoPOP806/TzLEe92UpnJ3obUQZ7gNwrU44XxwIDNQ3ViJTn76MAEGFUkTzwq3poJCIDob9neCHFiy+GbV3xv7Mt1Hv2BWzpqROQ995KcMwABqY+S1VjgeVv3cfPr3zRz+ykNuUTEZVnWlE3q0/m0rGl7edZaEj2QsCIoQm6yQ63QTyxuHZr7uBsppe/ePRKn6v68bZddFaCZl60AsQdfnpMQJ2ZgB9Kc3LWV0rLgD7/9+N//63/57W++Wxi//vzndREBb7q5WhYmAtW25veIyLJIXkopZW/gnlKqf/qLu0t3SBzPupZ6uV7aXqDBmuDT48Pr09PHp/PTsn58Opt7MzRUUldQVTVvTzlHFANOjmTmzXSv9vXl68u2bbsaEEtCxOZgZldi4hA/WLLsTYLgOrGcaw4CPWZmpNbaXpp9/fpuXZd1QYNWO38j9QyFFRvGelT3jgRmdHUHx84uy2Za9m3N8kZy9JDaRBz5xpf7EIgZERr49IgAwU91VwExMySc8S4zxSL1UKJpqkzbvu+Pj48ppZaz9fUnsU2YQnFux4SeEB4EcnhTAD9En2988BvRHZfzNog5BgTfvswsMvgjKS8covyOhxkHuRmQw9BjWANCICAe3r1XZM2IPZZGRLe+67WEO+8jEONPncI7seScj+wUY4/fiHtsjCLjZP/rBrq0Wkp9/vr6uu21GEtelrVV3/b6etnWh7Tt9fnrq7t//Pjdyti218vXX2rd15TX05rTKedzq/p6KWFhRCjnvCxLSmwNLojuKkh5kSd8NGfwtBUt++u+bZu1tJ4kn0hq2UqF5syyLOn0sD6+b4z1YrW1L9tr3a/75fUDfP/4eM7LAMoCIUtESUysBq01CUDRYQ1svFprQEjMjrjv+3a55pwduChdLhcd2wVvKnBPLjplaaZMqkqSogZjZnSkbiJKKeWFRWTzSFaISKbYH6WUmT3IJAYyx9EiluQOeAKwW6sBwIiIkNRt3/dSijuyYIw2BF/avJBwYkSEMRMAFGA7PyaNh9B2Wv5j1jfvQIR3b/4EYw/Y0XvGP5sr4t1BbldxSDbmaMblUkOEEHqRVJiCBmx6MSRZluXTp08//PDDv/zpn+Ljqnsp1TEY2izaEYmREDmJWwsDlfNp369V1d3VAFzMGZz+9d///PT0xJJqrYhsQJ1dve0iWkoDV1I9Jan7+em00hMIAggSsDMjIksSRr9eqpq2ul+315fL875fdtuaGiWNjq6RYp8udTACAbO6X4t7Tqs8PGQh9GytETgDCvGahYAdDEyZU617rSp5Pa+n7777DlD+4+df/vjHf/vHf/zHvbaP330np1NMtosIt5pzXpaUF/nuw8ff/OY364cP4BWs2wWAgJRMjNbcdnCjgKeJdb8v7U+b/MbGHhVnCkM8azvU2X8lhOkUwY7f/O0Wjdx/8s5Qm0dF20dm6B4IVELsPB39tL27JD8I7bTAUSE/mvfg/CaiqgZ+84A4AGjtwO0xTHQAFjoBoR9eCjcMGhzSQvfY0BWfnu6jmVlKCQYGDWe/CAmFoZYIGgERHh4+fvyYUtKYclMI79maqTqzqzqAE7t5IzQyZ0Inia/hnPnG0Q9b2dblwczcTVVbrbchQDNVLaWWqnup7p7z8vD0ZFZidMK1AYC2FgPzrhYJofuNuAgAaJBoxD+rajgRVd1LMXdmTj1+MHdUC/sgEbOqWlOtpqRk4D6HYod3Q0nHeB7QgvEPABBIBJfltCxLdGiY+Q//8Pfwu9+CKRDB6UzewtFkSdD5S8zRzczJ7H5lPNzlh9++NMCjGFn9/aLJObtO95Tp2iYA/lZBdnc+zI7dLCoRBPTbfQJEscsV+SzBE2OAJ3GUGA/q7O7oHoTwfQHSff8tPmRjh0RXrHGXpyTPjxydix9s/rzMowoP6Riqd1jL3OPJkal1feHAvL+FjAkHkvquvOLukshey+W0PD2dFkSsiVvO5boxEpoCMDEAKpGLoFk1V/OqWq3TAxozSk61VkciSZIXZFHVtmvVesopxr6btmYWnXbkW/H+FjoTIRM5ns9nRHRQEVHwve0ovSEZzBxelZzdfU3rXl7AirWGzo7JUcAQHBKmzHnfdwQOOo6ICLctMntXc0JGSoq57DXnJ6MdANiJvWDgXQiwAguCOkBL7nutBp5PK6AJQmKM5dXWGnIGonZireDuzmBmtZkrJhGrtpXn7949LLYvWD+u+NK+cv1qXLKsgoik6dQWb1tTystruVbVYl6jiU2InBF8T7Iu/GFdfpcese3by9evn39+3UASFIcKUCRXXHbPxdGA+UqmmhF+eID/9H36MX2B//hzXk7fNWyv5kgEuLVaU8mnBIy0vaacH7mlBYy5cN23zdrlHb0SYGut7Q0R13WVlFSB24/0Xr7q9afr5/9xff3fX17pT/CU4R9++ze/ffr4IaePDw+njNqulNiUft6J0/r5yyutC/D689fXUv11uxosW4OKjikr0LWYMsu6+vNPp2U1s1orYgPf+8ov8/P5nIRSSqfTCc1F5OPHH7/s+3OFP6tQOvHj+eu5yrtf2r/+myBZYxdSyTtbw4p0JdqlLeYKAOf19PT0JGReyuPjBy27KalwK9UcUVKpNRjBzKBsdVkWcHEDSbTvOyERJpYFmKQ2x2cDNYRr8xMlYFFrbTCjWm3uviRmPlXVqDgySNPmblF1YSTKkjJvuzHQsiyZBdzqvrfWEgsYa92Xh6SAZt4MAIBZHJAl77UCobp5AyJAQkZx95T4aFCOHtcPad60rcgIAc/2MMdAY+eP+9hlExOGiAAg2ABAXRGQaDEQhayNSLntFzCVptAUiSmJgyBqSskJEUgMW2tFNbi8U0pJyN1rawAQixbVxQ0yihoBERDvbX/3cK6l9paJGydJKARIDsKOoOQlu9Xry2J1fViery+A1bBUqCQktIhI3UstRZY8hhrIvarGfHVFUqG2PKT37x8R8fnr68pFRBAu7x9PP/7w4WOuq13ePT3YadHHd/t1C5OVV1kWtBOb+OX6KsKEiFW56EoZYd1hffWcUtrV8pOcH5722l7/4z88Pb/7Hllzq3Wl9psPcn7V674ZYCY7I/+QLJ0//ke5Pr9csyyF5KeLln/+1/Lhw/tP73NegThmJJf1jLTvVVOmBUWtIqWFpLXq16a0w/rADgSWmB+y19rSIv/8rz/z6enxh7/5//zrnz7v9WtaL+oMUZ/pTii6OjH1kbjP5xJRNG6JYG+NgymawFpEM/l8Wq6XrZQiaWFGSajm7pWYgmmyts1gcUoImXBB5NY2ACBywFhl44BEKG6X6pJY0HArsO8M9F7k+w1aU84L76UXjgmVqTmLs6A05sJwxbYhvBN/hKSeqqGp7eDAzgirNQyugpnd0QEKHvExHPGfFhR7puoBtrtFxsLQkXIO0JcSGUImOSrjvLFzo517pwZxgNqaJDerpXbHT0IGcC07AOTEqq20TUTQ5PEB/vt/+7t29f/xf/7jXi6PgtBsV0PKzb0pEGFzAddLQ4CFOL2qwuvnxIyIe1MBBMC6t2KASi9fd5JmCBaD0G4AoA3BCgsuKS1rrkb75l/r/ssOj2v69HSSNdGCKdcsOzNsP/7h559/+vNz+drSi+dXBSM/n+T55RJmCBFJHCRiN0AzVkN0baXVS7VXWfI50bo2hEJK15fnzO9fX1+fnp4c8kM9WWtA9PH7T++fPuhWXi5f/u1f/uUf/8//6/LyRQH/7V8vD++e3n/4VNUfH9/R60uSdD6d1vPy3Y/frY8PqhsvadsaEUhQdAD0CBgRWAYz9B1kUaSTqURoFvVCQLhfut4pj7wpHmZQYUDLwD3waAA+J9ZmvBc72rqQDKp6ok59ahjOxdxdRxsZHEwbAMRaQgff98G9EWIYUykHeH/s4jIPRmRLhK7AwQphO5IGcw8EPx8Jj2XUzbTPuiEAOLq5WlxeDHKHQ+kskmN/bVxV4L2nvnBPThEGiyQaMrMIttai1kPjbhuwo7s6xaiAu3kbkbQ5bCjgpvtlW5YzlP3p6enpfGr78wOpoe6kqra9PF/ysi6ffGkGaK5WGpmRUN1JtwKnnNcFnK9VGckJSpQXW2Eicy+taVU1cCUzU7Ptqo7U1F+vTSSn5QFwqWWPCSNVdSQSTK6snRSEA9hpfYenqq7r35S6gfm6ZmGo+1aqrw/vLn/5CxG54/W6t2aUOtTi4ZTNUQ20GQZxZtxhZnLXWPBrk6GKKrRjb8ZVAN0VWi1W6iJJ941yFgQH/09/+Af7Tz9CKiAC5uAXQCZvGU1LIaK+tUMRDAQZEYn11otGBjIAZEQ3sKi2BPIO+kSJdNIw70MBoES98djPECKx7eSyQfM0J7/GVAkwp1vWF8gmdzDT1mB07boGHbZA98Ss1Qnj7xSaQUR/wBCmJAAwSvOH+os5qAFRYhEkdydmIAqYdTcaQtETAQCtGpnUPIHJrhKdLzgUWBGRkN0D1RO/DCvErYUGRXrbu2tmDmATfSYiSNRaq1V7RX1apHF88VEr0r65q29nig1OsyR8DBOPruuWWYrEm2EAo+Mk7kxe/JcQYQK0SLWDiyOvnfDU2P0yYs5Dx9MRMcjW7pqezBydq3l6t7ZkQDoR414gIkfj1drMMxGmXQaY+FXETGx+Y8kDQldTVUIOj46Is0jgHYDYAlRDs6zoLiLilBOu62lZls557Q5NEZGQGKwhiwiZm2pmASQ356AMRuZAzpgK4brIh4fTOb+n7z+W31z2ff+//uVf9wZfN2jXYqLLiRDTtWzamDAjlGX9AJi/PG+rG2kl5X3TvbW9tEvZjQ0zNauvL+10OhGBm4kIA7oDo/D6noiwGaSKDs68qRf1xWomOuXlh9On99he90vZKjv8v/9ff/zX8x//89/+zcP/8z/ntEBKTvrTT39OsJRt//l5g4u9bp///PNnNYvBm2YKTJKWBnRprRpJXs502bcXVWXGZVnAHQlOOW/b1upl32spBdRUtVZ1h3cfnxzo9PDu9Pj+8f33nz9/NrPT6XQp1RFvdMUApoDoguQsTBgCHJ6ptTat2Njg2YXNzBgP47/WiChcQvRYBCgm7N3d1UiW4cJv0V5UNM0suJ7n0VT1th/qdqJuY60Fcae9kiBPPAh/KDxVPVIOjBfgqLQdjReMevC3vzy+86jpb45wVO3x11GjQsLDPoChobcicVSFiWelvLsoP4wHuzvTzV3NCcM7K4QIh0h9/omZibmZqvahl+OZLMPagvWStogggFnLOee8EEPdybU1FhB3ltOyLsvycD4nWT69+7DvVVWXZX98fPz04eOn9x8elxMBem1a27vHp/g6dat1L/uuVuHehBKRD2O7pNwAlcDd15x//P7794/v3QzT41bb9bI/v74+PL8+v+4AxJKenp4yE6F99+n9dx8eWivXy0sp9S9fftq2y972jx/f53VBt1pB7QJzjx+EGqAHBDHHXmkCUwyeG6KUkhOfz2eWFMZKVQno5pVn/mM9ITzCVG7hRe829NrB/KXZ7WcdxDaqRqSc5Pjx8azNej3TCQ9Qo/uXz7ceZ7QMzCxao0GLYwMlq6rB3tPMkMxJDc0NIMoZOKZeDlc6NSrK9senGRce+M+jRsy04agpU8XebAWYxy+lHBWNxpT7zakdVAAPYcSEuJ/P5++///777y+/fPly2be9mBHZVpuZsEzK+OhRmLdSeuOC3MxsL63WHikYIOfkQIaxmD1+G7P0bOhqXlr1TRvCRmVh0lJR11X4vAq4IIskJLK2tzXl07p+fb1oa4QuIoCUUpr9BHIeJK9g28acT+tCtDICIyC5tcrMEJMcDoGdiTnhL//2ZzP7m7/9w7IsAFBK+fr16+XlFRHBvbbiQKWU6/W1lNZKfTRdn57eP70DAURcloXX1eoeceC81WiGEW/J2xbH7T3f9j0Qwd+2haf5+0Z8bz/SN0eiQYV9jL7eHhYwaOkpEkWiAD4ctWgOZmMv8MVfb1ekpgNlQiklb3Wqlar2LVBzpQQaaKyJG6N09tZrvJF5PJRXANDd3l7FCN6+9Tv4Ztrw4KFCQ9+06xHRVeNm9iHG1hDxdDq9vGzMhQXFhRyQoLb9crmsCxKgW/NaySpFwwwaurm1NWXCvho0SjbX687MTLfTU3d0DufeakeEuvvlutdmasXM9qaq6gBEUpIzc61VhJiZ+i40Df6Un375XLfdQZckOYlrNWvoJiJh6iJaJqIIUGLth5mhd4Mc1YvZgT0KDyLOhYd4mAg1962Uuu/LsuS8NDcSBqM//fyX30DPrMCs90zcAYBzrJANUbA5B9qnUN2DkHcClILbvT+jGSc44L1xp4OhmzJylJajXCF2qHl3N4eG+RSJjnA+RibQ5W2+E49G+WBv8f5L52+O4t1HYMxC0G+C+msuIE512gcf4OqhBAeA93jNdGy20CMhjId+K7v0NBKmmZmiAvd7kt76NRwEPvHu6fnsAImMG+g9lewnN10UAIA50MAGtDa/2AK7fNDbEbfdjm9mNUbJgcLKxxd1HpGxlQ7HOJbTbYBy3lNEFMlEVLSAec5ZwfFgI44RITGmlMxaszYy2GSt+jdbt81skCS5Uzc6zaxqW+SGyou6nSGAe0esauA3Ydz0mC4DAMw5i0jcQDMToczihLrtpi2JLIC7Wk4Lm7G5OqjF/gtigFVwEVzQV8H353WVEzytrbXHhS+l/fy8/fsvzz+91JeXL8DymJdaMgM3syxP6OcvX54L+Iu/ePP92q7bftnKrhUTmdj1eq38sOQaHbnY8EqIKfGaskgWYubTkjLnjGZMrZbPbptXx4RPKT8tiz8Y05Lbv//p37b/7X//47//8nw+r9/98Olv/u73V3l3+fz69fn1y/NLVX9+eX19vS5LArcshOiJiJFIhDiBJM5LwtZaA8OHh4eHh5Obuevj6bxkAQC0vllbGGutX79+/fPP19fL9h9/+bK3fz4/vf/ldX9toAopLYRojIAY228MFAEBjQgX4USMvRBQ3UCiOmtuCOiAY5n91C9EH1xkyMxohuSEfT9eYtnHGMAs5TLzJPIdEanHn0RENDYuNPeOyO9Wwd3M9n2vtWaSox5Bh6rfGC/MrMXKRea/NvB3tI9TfeZ/59u+jTngGP2MF9zb5RlzIOIxxPHDlL+ZkfSTxzQD6xt6fpp1v81G9slpRIybOh2wHejH+++Hzbmdo2O4cHBIKZ0QLvvV3bXWEptygCjW++5VEBZhQDMCQlhFFmYzO5/6ar6clmVZQGHbtvxgp7ycTychttadHwuel4WZnXDf91r3METrujK2qF9YbKwiSswiwqYL4taq1kYLv3t8onfi7pdrfTxlfTrX8uG6l9fr1tSJ5OnpycxaK+npxIyXl+cv7KrrK+2tta+ffwGwp6envCZhYkEHSgDI4sDWmcZvpYR56+LmC3EBOJ/PTfG05nXNAJCyyFYixj2Kirt344zHsQ3riZlE41ZxDLmZWa369Hi+DYUeMKKId8IzNeggupMT6s4lH/x+R2jfH7n/7IQ2uIVD79zYoHe9YnMm9i5HpyY/6gsdXkP7bPpQuE//jvpyFMipd3CYlTpGaW/ec9TB4298Dr0QzQrmjInXdf3+++//63/lquWX56/1l+f1tBj418tu1q7bZclrLaXWygCEaGaCtLtra0XbXmrTIINlIgTiZiNQQiQEJwQHbRYbx4igqRt6cmTAy9aE60OxqmiUkNjQCT1LUmnkUK5b3UtKWZiLGqFH6QAAmJCIiYGRHh4/LcuyLGlJIoQsGPit/foKAERoCO+f3r17fCqtllJ++Xz5+vX593+HCLy9Xna119dXEX44Lb8QaqkKLptsiNXMtC7Ebd+I3+clSzAjqLZSSQA7y2jssiKI7pYdhfyuoOb+jcXtNfu7INLH/PfhPZFPwv/khUTHr57h4JQNNHe4JaXujmYw8GXTQfCIW4LEExxm7yL0kTwaS86IMPjL3N0MVNVVPeGUWwd3tdvucRzokUPOfK/db7zGXXXn+Nc3cg4jIn2T8h1V7Fs1sc4v2plAwcBjhnyRWvd1XXJet73+8uX5Wq6pZhbHzuHab1xzIFPpJJZoFsbDC1H02bDVlFIsczdHMHTDpq01a61dr1tpRiSl6l6+MDPn1Forrbaq6hbYgRnQArggsSAABN3Tf/z0l1IKgZ2WfDqvS5IlcRbOOatGHa23fNmYiErU9ZKwMXNQ9qAhWCk+Gs3TezpFf818ToQxAyIQcJLPX35BZhCu1RT8fH7EJQV4B8zcDEc2GK1aAI9unbv3PRQAPpD5NirFsTgAhW9ZGeC3dXAHx8NK4VFBuA0T4izKH6wrDbMvSMe3TSEhP26GOXzdLDPfJ37T9eBc4nKsdPza7MxsuHWnEAHPIWeBoynAWNTSGViOKjCbEFO2Z0D419TqaGfmSU6VmcXESbxy+M28bwdGo/lhHlvRiAg6F6jN11H3jp4PR4Qabf04v7g781MGjn3M1SIvsoG1HTeRVCP1CqqbXnmNvuohEHRVcHdDMiQed99sLCB3t8E7FzcsLkctOoSIyNrQHU379o3+7MctJgZwts4rRghgYxKTiOY0tI9H1x+IA0FfvO7q1tTNCElVCSD22oXzZmZEyimty1Jba7Waaj6diROVZkiC0Bd3EqE7AxLiSSCjsje2IibixKSL2Pd//zfP1+3Hrf5+//GPf/78j//2p19eGtZm+MisWggpNeNyVUd4/fLV1XX3vWltzRiTsDXYC9RFtIk1bc2ZeUksOZ/OD9paAWgKaNyQlQQAGlJeqVpr+0a75fO6pHVZTuvp6fHvn0r9x3/84/Pnf3mm/Hz++fXHrzsywWt9fr28vr7GlMrj6d3D+0fydkrMfUspNGBiZMnEiTgtSxKRx/PD6byElVyX9O7pobVyysv5tCDiui7o8PLy8u9/2V4u13/7j5/+xz/96cvL86WY01LNOC3uDh7TC45o5IyBmmKURCzzYUbF8W1jHAAIkQSPWjBBCkQwsQeJWBILUzCt11pVlaRHkTFucTRkRAErUBospoff80yHbhp+CDKmxbTDC9H4QEbqo2B2tBRHtT2a1Dd256jgb36Y5+OH13yDuaEDHojpEiEMqEZkda01AOl6bbdg2u12/tMuz8hbD9wYMLpMB/N64OHqfm5U70TAQSjBQkU7xyYiAhIjILIg5Uw54GruArAGFQY5Ojw8nLMsZuZacUciWhkZAbSV11e9XjfAxLIuyyKpn3OzUYKFNQkwcXV319qsubsLcc75vJ62WgRpkaSA1lozyBmXvL5++YWw82XIyqucAAhIUpJaa3EXrIyMGU/fvT+dTvv+6fPnz5ftNbPEk2HALMkcjRE5uYNqLFoF0wqQHMGaam2u5uTeSACJaF2X61ZzzqfTKRF5kmutze5q/91l4O2JTHk8PiBzJ+jezg5V7SEtROS9IBjpBrLbjZvUrCVmR6PYsoyIAA7kfV8thpG/PwHoHIEB8wZnZpEo2fVJeyIiZiRxhAqtN1iAERgdAXqU7Af+s5uIHpRudnvMzOmmm0elmJc8nez/5OVvIKMHK3S8b1Mp4g06iB9hdGzWdf27P/z+sr3+yx//+OX5KzE+8Lma76Vd940ASyla6rquTIDmnpILqapG8keIWSSlAH5BIzA1cGaKoouZIRuRIToyRKkAiJylqdcGW9Vtt714TuAOBbTZ9vXLl5//8tPl5TUiaFCzWpjQo+4LxkwimBiZ+fG85JzXGLkVYsagFaxrIqKUEiB++PAh5/x8ed330r7uP/3yGQYRlwKi2/unJ0R6eXm5XC7eGrTqlYWJ3cH165cvj0/nx8ffLsvS6t6uG5B1vgiHLoQOQLelzW8eFoyC/fFPYXWjoozHGaHADR/wTR5zfADwa73BN8Z26l3/YGSGYwE9DrH0Q1ltSsgox/dr6AfRW+sAEIlZsEdxpi113gTC+4SVQlPGqdjEo/x6+/4uXDwILR5JYt60Uuf9nLZ9Xrgd7t6b90+j1J+FIRJijJgCIFGQCanWlGSVJeW811JNc6KcpWuPB5CC3dUdACm2rSsgOMTsTFgGMtJYZg/WCfVrq1VLKe6wldaapYVKaZfLlZnl9Fhr3fd9q83MkEgkU5J1zXXbW2sp88NpRcTXy/Xy/PL1tWor7l7WrKr4+LBmicVgrq3WvbXWWjFrKaWUUg3oaQN3d4Tg24BAFgAA9E3gGp1+Q6LeLIpskIicyN0buhL8cn3xzKe8PO/7E8LDx/fHqblo7ET7q5USfar5jAJnCUMgwck6J1I09n3IRV8tGE9Nbc4Kmt9nUGAOgxDmGJbM96D32Ug0NxpqcrDXaA4id6KlFkQPMKIdftukRO87wO9/i4hjsngGSwAQm5OO/qJ7ilF26W87tPrfxFrH30zJn3+dcdFU88iVjgCoQ0gGgTs95G5xFXcZ3NGhCAwglqoGdmg6165XQZA1Togc3EG/aSAQYEoJIuqK3xCllLSWOLiDwaBEVG0TTRr0BhAjQymhZB1Lq8ft4DdN4Pm6pe+HuzDxZjgcsB1s6KGY6rMnq6p8Dzkbkk3gzkwkDACtxIQDS07MXEi7yFK/rfGpAK8SUWlVVW+VeICYAW3NSimRdTNClsTMq8iOuEgyIHff1dAMzQWJCRyFmUV4Jc0EK/tKsJCehSR8gu8n0nzm7z+++5vffPr9bz78H//jn/7l369XvZIICYH75fJarldIfN02b9aqORIJUfQQzBHB1dRbRLFLSjmvj+eHd+/etVJrrSXWkakFczcRFSiIiqSLQW4mbbNiTYXXp48//PZfvl6f1dL58V9frv/0//3nvC6rptaaKrjbp/fvP/7ut48r+/6KbWPsCBZGAs6YFmQxXh8fH095SUmWsNfumYkJihUAEs5q9fLyGnWNdx+e3n/8IOvp8/PldVNauLhsz9daWnMwVBI27AB0AmOiKLkJxXAUYEqZcN/brfkA6u5oCkQiEjvZcgo+QwhWDut77SszmWt0gbQ19Nl1JxpVXkTMORORV9WxkzNSR3SYHHF0YFLh8QplnBaEmf2AO+3iF1HsrXrdlcfMCP9qMDqPOTUF/rrrfeObp7Zy6pts3D0aLQcPcRfKwLTj30TPR9N2tH3eA3Q9fmMPi4dp62o1QRFAjlBNW2s8JhVS4iVnYiYkRhIkImIiIVklMxOTC1Ja5ZTOqhXME/OS0rIkcti2UrdLJ3xzIDdBZHBGXIVPOa3Lgoi11r2U0iqhLzkHn+ddWc0BzMGcETMLEiQhQN627XK51H3Hsz0tWd1qLde91KoOxJyA+Hr9iohoSoxZltPDIozn8/l5T5Ko1nfLKYtItRb1e2FyIGJpIfYA5k4OOvpL1hq4QkXFtqT0cF4TC9K2nJbTeZVEW6/c3QWGOOIvGN0zv38peNR/b0Jw2AI4RjRjRdYbMbgd8yiihMiUENwJ8UYxQX4QpPlm7yU+BPDjd2tgJZu11tCtqWGKnUigyGQEgKbmB27tNzJ/c3xvqzZ3bYqjPM8fprOHaNQc/mr3SLnjO+M10SXz4PH+lFIMV/NYYhZvEMG/+f1v/st//YfL9vr8utfme80OUErRupftqqpM0MzN7HQ6qVn8z8CBiTmAT8BEDkYtboL/35T9V7PsSpYmiC3h7gAitjjiapE6s7KyqqwVh02akU/9SJvfSCN/AN/5QDMae3qGojkz1dM1JbMyq1PcvOqILQKAu6+1+LDcEYh9bnYPYdeu7RMBIAD3pcW3CE1NxSTFZOATXLRaRalGBGKB+bRUgkcQKXm+GplZQHOe4dXrt3ePD4njOB5KlbmsDJbioCAAgESBOAT2cpQYYAh4mOJhTERgolUKql0fppTS4XAw4pQSM79+/bqu+XGZ53lZVx/2o8iBiGLgF89u5/k0z/OyLJxCYKLAqoUp1lLyshDoYRzGmBCMuwlBBm0aYU9Zq11kdJ/s736/+lcXGS3qfYD7je5D7duew3ceWw5kV1DtD7T/1p+MfZDGzkHa0157GMTWGeWZQ6/GF/GuJCdFEFHoJaZN1xgZCBjCBb4gQ4Pce7Ise0reP8n+hM3i2v9zE+DftRIXTLHXEfjOBAuPNqGhKBCDpzdyzeM4rmvOpYSQbm4OcYwhBIMKQBsGp6kCSFVEtvlULUUVYeZIaBAAgYgVoHa73MxArYpWlbVUouD2rSmWUue8EgaVJec8z/O8FlWlwClKjHHN4vw7GBkKiD4+5sfHgohIwWt/oRc1xBg5EGqMkZd5XtdVpexTumbmqRdE1N4NRYiGwIZGSF2tk4HhGTu3mqGZInz6/e+998lHv/zlL3/71R+ur24Ph8PNNDz/9CNvTPV4H5iBqVQfTuZuAfkPAZi26omNWdrIBAIERJXixSAeR9e2ldBroy4cJKdzu5xt7p9vgbPtFczMyfyCerQVJbrN38hMzrgyHk0L2NwfaBCv5ghjfvoF3W4Jw3eOPV80wkaU3m24ZwREJKQnPYR2zud/BxDoE87qirT90NYOurOOXD8Hol2LR5ck+/hmWwH/Wjr6bVNsvfJz54M6JHSvN+jBl71kZCTdsXHLyG0MvNtL7ZBcuiFqwl4m7syCNiP1ophteyTrdrPufO7Nm0UfS+R2mFsnIqrmvZF8Dm8BADBHoqra3IC+K2qKQEa1UZkn3EWEQu/K7KEzn2pMLt4BCZAa6qkhYuJA0OTvuq4ze1KTTSQyTjytx6uUC6VQxWrl4TDNaznlVcwAWRHQFIqEoJFoYEwMiSCBJgI0WNbTzTAo8VrnIY3Xn7//7BA+fP7F/+NvZ9BTYgA75VVLeVSKhwOlMNQqzDHEKAQKUhRDlMcVzGStGcyiAhWEBTTgcRwXn/W7PK6qsERLKYSwDCcWjbUS8gAxGEI1EAWAcDjkEN/O8zThSShXuB7G0yqRI3AMCNcvPrh+/lLmt6rAqkjADK2Cj1nUcllXQ0KSYgBliJpSTJEhERiDqkkSjVJsmTMzDkNa82NIo5mUUnIV4jilqRi/fZjNrGUVwBAYUBEQTQg4MhOaWvUymw1OtpG6toZWUkFitSpazNrEalTTWqxLEBcWZoYqqMahDxRqFqpqtwYale4yLW78XuS7tsRz7fM8taHbOctwiLY79pLCJaKZ4W5iBOIFK22C6Ql//TEBtJ18lqqXv35O1DsmpGOaNevHS7sDM1sXEbYLPG2/5esUQ9zLR+wacYtzq6r1r7x8xnqflS+RtfObzuDQYEhjjFGKOPa3qBKgGnjJMFREIUghUuQRTEthyaupoNUxTMfDwY42zzMajMMUrpCZGcnhgggwIJFpLTWXXHKGNmSZoGoV0SxSFFTJwAxFRUqVUgmQUImYUzTR2eY6r7PCIVqKaQh8SEkMwFABVbVG7HE0vro+MtKynKRmAjyMEx2mNA5AUGs1wpBiETUgZiLFGpWVqmgIASEws6jWXCzXilZAgfDm6nqNK6U0TYOj7+aHE6ionltD7TKHvKefPeU0SjBAq53e9m7bFn0gRLZz8wls5h0AOFg6mJK1ngkjRAoAZfcAm+voxckXUQMfh9SYjAlgG/0sIkrB7ZtuWZorWxHZg5eeCZ53YzY2r5Wot77tmAt2dvwTtjLb5mtd+IT7tX1y7RNfdDtn62ncLGkXF1WWFy9v/uLPfm4mv/yn375+81Ckikgd+HRapGSiYFVKrYyEBjnntixoxIhoKsVr20TdGwIDqFo6lxGYjxBSkCoNQDoQhpLz6/k0372+fx2nZGDFJJPQ6XQapuPLTz4cpsOru3ucF04RCNUCoCEaIxGDo7Qch0BkVpZZVxA1k8AcYzxMQ0rj9dUVAKy5nk4Pv/3Nb+Z5Zo6Hw7GUsswzILJqDCEwx5Tef/l8OZ0eTqe2X4Sn0wnMDtMYmaVULzxh4lJXRjJTMSMEpA3aEJ6gLuOuvOKJBIZmDp5pgAG9l+t8ubuCuM8/fIdDiJcZRT8Z1Y3gS2HODIhtKXdicx/XO5ss/Z8hBGQ25xBQZkamjrn9XRakwH6+WVNk7kN9d7zRNpWx527oprb11Pcm2DfjCt7x/b6Tv1pY8JIFEBGQ2xQhPwiHaXz27NnD/aOuImpsTAwhYIw8JFJEAWhZXLOqhuhJRiyiLIaoBciKqAIzEiQTn4fbBmtpValuowuFGBUFQIEQuKrc389rzuu65lwVLAQTKVnhbp7HmFJKa7Xl7tGrcykMpS7IxK6zhpTGIU3jMI2EQKYhEJiHlJuMCv7uneoEjLQN0vQq4Q0s3VPKjujWGgeIDACZOPD3f/rjl++/d3j27K/++q9fvHjx8ccff/bZZy9+9CMndBB1Wam947RtlolLUZ80LiKJWli2xdUdTMtD2OSQSNwovG3ZRVTaUzIAYFQRzlEwgi0X3cmoE+QWc3G6AwA4h2F30+96RQt1GiNzKCcAz0a2Us9zQOfM4HrxkPsTLoZE744tZb2l4xpH6NNYxp7gbec9+iUNUtFtwvMjXXCW9Y5fM3AI383x2zy4zYY0s32INvhzeFWbXyMiWqvGlsEzz++dd333kh5dQETEhpXfO2g3PxURkQnACM4M788hpqDnIU7qSJJ2hu71GSkqYFDNsIdTN4ewbn1HqlpUyM5pjfYMzDlnEUGf+lwrtCCKIoZNMhOytLEhtEXI2osjS2lIOZ5cFtNSCoXgMGtGSLuSXEb0ijaIwcM5VgU6IBIAILDHp8cY0zDRLIF4GtPVYSQiAyQSjTxOEyKKlNrCGv7iirVyoEQYCEBNpYhaQJkijykYElkpdQ48/vCj559/8Pz3669+859+xwjHIT+/PtpUB6bE44tnN6WUFMc0jWKaS6mmYvX119lxkBFxGCYf6jCOY4pxXfUU8j3z6TRLfQRBYw4HAqmkMAa8TokxCARN4fW6PKxZQCCgqqYwDIShAKSEAKKFmKfjwUwe7u4PLENMgarluixLEVTkU5aHU34QHoZ7ZgoEKTCSBcRpDIdpGAKPQ7g6jCkFME0JieI4DtLCdW3y9WGahqtn98tvWRDAHHHTaw8QkbSmwCmEEIKXTYlIhbMWNDMAMrOWUnmiGk23Gdnk6QIHvwTbchv29ADoYw+23utGKmaOid/O6zUXzhoiYr1TfOOjWqv2jOJZmnyXF4ffFXmBd7TsWVTtRNL+ZPsub3D/VO3h3ZBVEZGcMx7GTSASUVHFXmvX3l3PgF3YfWbadQ/u4Tr8TBHB7oU69kETc9VlBYK3SfhTARIGIwtIKcSZSFW0SlFjIgUkA0aqsCIFASaKISJjTAEkoOQSGJlgjGE4pufXBwROMWqUVoWjaiIitaiZ99uAB+0IAIqLF+2TWovWWqVUBgtIwzCsJYuCiaLa1eEQiB7vHpdleVjmaZrG6ThNQ0ojhQQAO1AW9Q6rNc9SFzAZOeAQgRu2+5AChxCG8bTMYoQUVNA7VRGxEgMyGtSc54f7WgpYwWE4ENk4IOLxeGRmr4gupaQQRXErwThTyDsq84I2NrSVbub1bMDZzu7WYctwOLJj05HwlNi0tQgygtLTn0NodoML5HOrnqqaAQITBqbIzOCuJVJAMqxNcQioCvfn3F5nzx3vfrLRrQM2YmfDFu6BLWq4cZPXpO35a1tVZ7Fz5fNmKCBiKdm+y4DYzLKLYBMisSHa+x88/7M///n17c0fvnr19Tdvvvr61bffvv2qfo1qMQylFKmZwgAApooAMQQi4kjMXGuVkmub1Ume9/DuCcMQPI5DhoiKDErMPIQwBF4zSC6ZTANSSkNEC2GkIKUy2sB0nEYzizFyDCIi0HDLCRRACQ1MVaqsZVVVETMZYrq9vb06TLfXN4g4pkgUar3XKqe390teP/rk+yGEcRxFhJjN7DAOwDQM8fnN9d3NgVAIg5vXd4HrUo/TIQVOKTy7vgkxmYh3hBi4B4ygG+wFEMBGqRsBACCIgjXwOd9wgBYA1j6OzAnaaZGI0Hbp5E1iv5N1QGhztC9orP8EXNI/qsKu2/As3nv0wUvyaHetVw0CtOCjdQgoNx8DcRFHLDyja7itaBe2VgBARzXccfT5D9uppJ3KIKIdrfYxFXCpdzZlt3+pJ2prnzPfbmhmaFBrZSRmjyDX6+vjz372k6+++qqUUoogSa2qNUPiELE0iOnzowpCMPTaOzFgQxVTrGTKxsknv5tHD8ynH5cihlCqEAYMwbIAIYW4nE6n1YdwmLqRiSQEYKoGuYrACgAgFRFT5BgCEoRAQ+QxxeNhPBwO0zSN4wCmIBXJBh0AQMQHs5nXsvlRTUlBe68WGXV/7DzZAjuQGBIjoiIQUxgSpkAffvQv/vX4vZ/95Pr2Jo0jpARE0KoyjREdGWbz3q1rZUQ0VTBBUFX3I6z5e9ZjE462jQimfcQxYq/m66sPfS9k283WWw49kajnqN9+bidTqxnZpg46PYNKg0x3P6X/lvccNsrc/AJqleN7E8jesYjOTApAHd8LdqlCU/XwrV3aTgBgembSjS+63sG9waMdC20v+fu15zO158CsO4ob5z4xnzxxsU+tIWJvFGzRLKLdGFwPG3iuc3vJ/WFm1OeqbU8cNOxzlxQYLgc3tYXort2e51VVci6lOGg3IgKQqooo9ab9ru+7GjZCYDAQkVYuDiDmAAdghAYCqMzBR1awoe8OM4udi3pLqS1nyoEQoAMkcGDVKiLIFEIIKYppVeVt4AzRft3ZGkJpQmTiSOyobaaAgZksMIFgCGmYxsNhqeUxEgaEQMzoZ9GBSLVaKVALAXIkjsn3UurJf9RdNSJlgkDIiPPpgUO4vblF4lNeifTZzfHP//Tjt9/8ri7w7Iq//+kLtps8P46BGSWgjKMcb0gU1koUYgjHH78caq3rWsyMKUqppRRmCCQSsKQxH3FZQinFoQW/JCUKA8vzw/HF9a1WfTuvc10qp8f1vkKOBFjyszhdpymfljuyWkpZZ4tMsq6PZX54e/PimJdHAZU1L2s2jGE4xhBSAlzGnJUAYUwENC+PUtfDkPL69e3N9ZTiOPDts+sppWEgZqNDyaIiJSYOonPJa8mRBwAyUzQDAjPxJjFEG2IYhjgdhsOQAmFV0cZpZG3m9VnborWS4BBCCFxKm67KzK2Gm3ahYj1nq0SkS8Y9xhogtMzYFqF5kotovCpqwRAxcpAYN3eUDNYu6YI/E1fagN46ZyHu4mmXyfZNreJFwOkcUd6SeE8u2STAE3Vetp5h95cBaq0+01xEtni5quLuJ5hZSDejFrpZgNiK+5nZ7BxmOy9yF6AxxpaY7Q/DzBxCK8fV8wA9P2cIESoUqWBiooiBAAMREiEjMioqkHGgMI7B0KSaaEQ0LQwxpUhEALbOp1przcVxn125AiEzDyFyDIa0llxK8Uygr7O3keQhjzEMw4CBH06PbFZEQe14PNwer19T+ParUsRyzrXozKdhOlxfX0/jMaSAiNXUTEJgA3Gc3uNxoopuYSOagLUGCbBpGEXNMAqZiBVQ1cqIFVWtrstpeTyVdWbTUEsJvDBX02kcFRskOoKO05RFyOAdEjoP5dsv8o4CbZvb6Rvk+yLSpiB2OAxNw4j98H1uioZJTcGgOzwkVkB4aKH8s5PWGEeaQ+hqgIgIAxFUlXouxaEm8IErqm6elZk3ujAF6R8/cQRE6o5lFNp4AXjyJNv77iO721ohom/cE9sXd1lBvLSGtxXeGwqucbZ463YHRAyBlvkhxOHjjz+8uX3+2Zv7P3z16re/++KX//DrmsvjwymE9ObNXVkzqK0LBWpPGEKIiRFxdeBlrSGEFELDpG2VO8BGZqboz5BFgAgiQ0A0Bg18NcYPXj77+IPb2+MYIta5gPzy2zdvHu7eTNM0pmhmYpqGZCBmolrB7RPXznUlxBQDDYGIDuP48uWLF8+eX19f5VqGEJk5p0EPMk1TSokInj27maZhGKL3sAxjVNUUCIc4Bl4JgezqONzc3AwBH96ewhBCpNvr4/NnN4BW1iXdXgEIypYZbFVkYDtgie5TbWu+yUzc7EgAE3X7RxzUqu8j7IyfPTfRrjgFLt3DZkQZKGID/8BezqaqKmZKprijCkQEZNrAPJ2XocWXWw8nIiA5vEHEaHCGKwSANIysqqLSMB3MsVWY+Qw6fM7MPz32LL8R82bBI1KMQXdJSOy5vv0n+/WxvuBPvgpd1O9/FADUtFZFJo5sUksp6frZ93/4g7/6q7/y0k0z5hATBkbJeYEweELJkQC91amCwyWipmBmzBzI03BsjoRkpoampmq5Ss05xmHJOUZM4JkDAsRStagaIAbmZg8zBgai4zSt63KaZzAZYxqG6DAkL99/ETnEyCnQFMM0Ta2xC9BM1ZCZhyGqcsMhd7AraMpRXFoiIKKAMZL1cICLxlqFiIzN7WwgIAQLdPP8GZjCcXp5ewUxQikQA9R63hci4AiqgVqnGvrsUARE5I6ib7WnxTrLaJ8Rgf0TtDPYsvfDwC7j574AIaFtWT3byhM3vtiSui7AxSMaPXRCIbT76ZmXccP0UqMQTLWh2HQ7beMvR/FojYieI+xu8FkO+INxoB6w4xC85g1FfH02xj8TuULLLHQBwg205Rza2N7x/KM7/tqr2s2MNOt1SfDU5Wt8nZKZ7L9yk6k1/Hgdl9tt4zguopvw8pJlXykRoTR4CtqfsFZlBqLAbMxVFYgCARMwAplaTKF3zlit1SuCOAbr4Io+dsLMPHQbMQJALm2KV60VMfrDoOcaAQBcHqKIZGi5aateYtdCp2Maaq1qFRFdWJxOp2EYlmUxLz0pxRvhmKOv+DhOtZZlWQAgDee5lAAaUgwhbCzX1i6lksWqGIGqBoMUk9q6JZQvTH/EUooSjAFBpZrGIYU0UlxUSorHZ9dXpaxv7x8UIA7j+jjXdWHTcZhCigrmBvEwDKjGzMMwmCkAMfOaT4cxHqYrZACpHMLz2xtErKo/+/yDX73kX/+DvLie3r+9Ri2PVpm05jxECFRQ5ykMU0rAFDiNkQBSraHWiooAyWxQVVArxQoDTSNzz17Wep2uH1+/pjWPzDlnCjEehzwvGQCpQoVjgPeP6fkwPYvTQ13/kOjV6fHlzeH0+CCPb8Lt9e0xkVYDWU6PWoWQjchMSpHT6cTxWIoWybXWJVAgO0zHaUhDCutWscnMAAEAAElEQVSynE6nq0MCgFOMV4eROUR4OBxvN38+pVRzee/DW63/JMXENJiFJs6RCRo2Qi2VyadyM/OyroEcp75wikOa8tBs7jVnDqGUkmvxMi3yAS+IHnFwy8w/YWbsRQReY+aNZO5AImKzgGptHS+ItWUOz6DnGxvnnGVMRFRKQTVi7vEyhB2OLjMDca7VumvnImQTFlvLk3Z4jya7uym5l0FbtOiJppc+lPnJJ15dxsxMHY0jUIxxnufrw7DZ9y5Ycs4S0dnc163dR5SZc86B0hCTv7tXc0zTdHfnc8yVmYeUwM7VDVsYaxN8oiBgWGvO2YZhTEOKAdUih5wzmgYOgchEQStAVLCqGlFD4jAkcjphXh9mBICG2KkP88NymnPOUnJKaRzHafDcnYtXuj5eGQIFLqVoFUQMRGutQ/SAy+oOpCuFIURo4tFAk5QcwvD85vbFze2bu2/v7u7u7+91hdOyPjw8HA/XV1dXKaWUUhwiAEhVt3/NLE0BgwfjVEzFm0mQWWFes2oxY0aCQAQMajWX+fHh9ddfne4fQKsBpsMEZqY1xCSqITAQer7lzZs3AtM2h/BMGK7hcA/41uiHI1oVM8/H1lozwOC4fKpKBMwcOHoXRJtvgUFEEHleFo6TaXUlRUQxMAcUw6rCHCkw5oJIXUCVWuswDESk6g4SAEDOOVeJMap61Ykb661zHq0CIQaPy14UyIGpiLhy3DYXd6kM7QWcOxM2bIsALeWCqrArD3c9jRtmJOJFrm+zdDdO3AxoAGCmvRralJTXdGwPtp1vJjFy1aLZpjHF918Mw3B9ff389vnjw0MKMaXhn3716/fff8leO2MyDAMzOvxVKcVMx8MYQri+vp6myXyWt7ZM0RTTsq6Apgg5syoEjgwMivez5Lq+//y9P/3pjz//5L33nl+ByuvXbxH07X//Py6PJ5OCZschAVFIPuVZVcVAGMFMtBYKUURSCMfj4fp4NY5jSmmIMRDG6ZBzvr9bOIT54ZHQcqnTNPniiMg4jsysVYiAEU7rcn08DF65kyKhvXx+GymeTqePP/zg5uooUkFqujpArSDFQ1pnp8wQrA2QtJ6G3VgA9919zRPzwJZsG+oaxIO5W47iiRSF3pOC7zTQ+nwOUDWRhvFIBHsY+u6yEhGFAKrq1Ul9jrzWythCYwHJALx/iWHnYnlNnVqD/iutNlhFCLiqqp5VBqiniZqzF0LwVPiO/Jq16uf3uhjoKpJgZ9Rux6Z09q7dds/NLNbele0/vRnZusN5osApMCKCGoaQiKHkT7/32f/6f/Ov/0//x//zNE3EQURyXqbpuZT1OF0LcS7LcirVIIQQAyEaiCpiFTOrQUwDDUhCVksFVZFiZg4GQURiKDn7fokCsusuGo+HkTIAALVUR1X1pQA2DpggMIYUKKVwfTweDodpTETEaERwGNI0JWYqpQQmZGJgETElZAgURSTbuXag6WxCAKimJqJmXiPqTiwRMbACDMOgbT4qrSXfHt7nOEDNkCKYQlmBGUwNDJFrqQAQEGGdfUO3evVGwQCb1ArkCcwWjQXPFQU2NcMzKG6bhNmCLU3oibgoICJS7aO8TKAn6sEMyUQqAnAMgKi1IoBjlZBnJbHdC9xtcJbZBR08vllzbm1BXbBCk/Z71gTskDNnr6xrwPavnJ3aa615XbcgKXbra3+tqnIflbfpHTh7jBV6RN5l/pPG2j2XnQm+zcpu1p3zUwgMvXnHL8k5+8jTfdthKSX09IVuHTjuAW49hPuAzZMw8Par2juYN6sUEcFMRQBa5SR0KCFkCohuFjvheo0fegc/lvNcUdpQIuK6rkRmwMAuQRrFVL2QyF4OSESGDQvEg892gcnTPXXR/sDsY4sAwEIr/AJU3wkxYbnITrS6vUYJ7mC2ozVeynmPAcAQBAw9SE5BQaqIAMYh0TjEwGYqNWvNUrLPLTUEVEUQ0KziaX90KEwiImBEBjNREDXmCMb+WIokqiwKTIzwybOX3//go3/6m9/ZItfD8fRwx8ZBWa1IqTlXySXGgUIahgGHIFCZ4zhFhsFhxBz4QasQHQxkXdf58SGXjIhp4KHA7fP3NK9a8phiBVseHpUhDXz3m29hhU/fjz/75IMPx+tbHuC9m/9Z8u9o/e3v304RjiQvr2INR9IiPEQQEUFgjBPH6aA8DMPv3iym2ayYAVpMMRwPdDjE0+MyDmNeHh4eX9dyur29PkxYZR4ZAdRMVKXUUusahsH7nonFxMAE1ZAxEXFAs1zKuhIGIgxsBtZHDCmQGJKiK1TmAIhaCnjRRR/x5OyRa40iyS46oDrU4jle60awM7CqiqhssPW1MTB0b9AZJ3EwFgC4zPy1o5QCQLVuw9xEejoQL4QZPJE47/5zf/4TufPuHf7YsTdtbSc09vreX5x3dU37Q1VhhzJ65m5HIeuOKLVCcSWUENht4CapWh63oTh5cvjih8yCy02r+xQwqClZDBhipBiATMUqAnl3ueiyLOtpVpG6ZhMFgEOKUqygmSTP06aUKIRxHGsfd+dF44E4hVhzAeV1XXPOtdaBAqCJqp+fa2XEyByYmTkSh/TBNB2Px+t1XefTuq7r3d3dsiwuqCmSuytxCO5lmRmbRmLgGEzEtKioGZimwGKUiylajKMySKm53FlVKRlqZoOAwCIo1cyQ0Air6mlZlpKJwjgOp3VfUfWUKvZmwbt0cqntWtXSee3VfLqOmVkb3+OnaZ9ED2IAwNI7PLBn3ZHQFDcF5CFONQCQfVU2cws9FNmPAEVtA4sFetkcmZpom0VxafE77ckO0nP/ppuu3C7ZzPr9EullXdyTq3AX98VeQeT/9AyS9pLR7apNmcJlZbhaJQqBsBbVmgHCEMPN8VBfPP/4g/etfnV/94gm18cjAMynNRFFBC/EDYhMEBIH4tvb28MwppTMkACZo9v3UvPIVrVUESZQBmbQUmJMJVAc4ovrw4vb4/Pj4eYwEAADkn3v97///du7B0Yap6GIiWnglCKnNEZGB60Rz+eMo5SKCOM4TsOQIqfIPrFNpNY157ywxJwXEA0EbuWYiUipNSM6Th7P81xrRYIQWzGFObgdweE4Auo0TWNMgAhVoA116yYqQC8K+w55dfYDLxNZ0D2fJxtK5JBFsrcLnbBccrv02tMVItZaUQ2ptTbBZhP7KCO7xNxrOZCL+g5/jF5L1p7cgJoZsxVQgHhOHHszDrQ/QDW7Dmq3BYbeN7QZpiGcR/7suX7PQZe6oPUyPSmH21y7bfWshxH399/u47YydRTfbVNqrX4LBgMkUANDUPvggw8+/fyTP3zx1SHGWo2IHh7uxnFUq4Ct0IOIEEwUxFSrREYIjEgKKAa5atUcLCKiIZuWXAuLqBlHz6YQAiETiCm2rN3hONRaq4pKBdBAQBGGgCEgI6eIiTkNYRrGq6vD4XAwUSIKBIS28T6oguNj+PgiIh/BrQopJexpKzMTU7eBocGyaFEBBRFpRaRIRFRrrWjEGMzSNL18/z0eEjB5d6uqtVBICBvyMyACMGKz1Uspjaigy0lojdjYcF4bKZqnzclaMhCxdW8DgILUcxyB0Lxr7OzwmIBR//XGaec/DHy+LiJqqVty33UMYmsrh05L0Aq5AeA8+MJ1j0dzEGAL6ACcs/FOjRv9bc+DALX7NV6egD1uyH087Bata0S7dRXsjs0g2ZP33vPapE3/P3gFysbs0Hy/FhzpXt+5JEG1umZs3ESGZOjzFvZe38ZOey3ohzuKaGdQrL5SYGbUfXqRrbTAG2Zg42dVBWYy2/KYRKRdFhB5pgW2O1PzvLZxVXuotzOyqrZynYayQB079PzyYGYCgKoV0aAV+l3Af6mAaFFVouZec8AYYxEDtW0gjxPcWRO3yw1UQdSDPttWmZkiKBB4XqhN8EIBy7VWkThOYTmFQISGpJE5BZSsUmYjBquMSFatmCFwCAFoCMyEPreXkMysGgwcZYtdmKFqVQmERmhr/fHnP/jL4Xf3X99jiTfTyxGm+XSPgcQ4l6WcZNYHAAo8hvB4/XyIIaWUmCMTIrKPnLl/nENgIsqlLqVWqSEEJr6O8fp4KDmIDGkcXj8+rCZxGqtIfoDPX8D//p/9+b/88c9exsiPy4D4MdGv3r/9v37931UFmO+uwkfTe88ZseZTXmerYsSAkdNRjR5Oy3h7Wtd1XVczCYzTNF1fHQ+H8fFOUuRl4bs3Uspq+lAKlAIMV2DCSDHGQFJq5YAiJRBKY2ojkEQ8RozRcei3GHwL6MZhKGv2IjHb4vpEzFxUGojVrpIMVN0fQ0TiKFoACJlQkYn3zMLM1NuaPZ24DTJx2RGYa4/xI2LDki5lT6tkrS0bLiEo2q90Lbmf7fPEWNkLoO3xNpbfq/DtR7cz9xdut7JeyUP7HEU/c5ODdIYNOMtEAGBmZvXTiMh2g8vPb91DX9DNX4+wMinsGidUW18Z9sg09MaYJs26WcZIAq0zgaz5AoDoXVLWwUUc6n6AAU1QzETKagxDCnEYhoFbTtI9uBjjMAwcIwB5wYXP77YqUorkAkxgHuUXAAdX0lwK47kUkJEAlIyJ6Ob4bBqPtzfPc86Pj4/3d3c5Z0S8f3hY8+yLG2McpuTVRFdXV4FDDESBzaiIEFFVmOdHphiQVl3NLFKDbIsIFnAICIFJLRJGBDSTkiGlGNgQlrwua2nlRJegGk+o60Lq7vJUcKksdOu6tJ0YVxOQwE1l+wxU/8rMyFNGhtgaqqnDsZAbHNZHOTlbhXAoVTwMf0H8gOrzxBpBRTMEAxVANmwonYyA1qGktCfSrUMW7en/CXPtF2dzBuAyC7RX6m7kbXSOl42Lewa0nX2wBVm2t9vWdv8M5xt2xD8iNanrcnr7+hUTDjG8qbnk+ubVtx5DSUMKiACKaGNgjkmVAnFCMVnqXNCAQvI0qJYseWUtCIBQMXEaBqKQl4wKCypHvDoMz66mwxjGGAMp4nEax48/eP/Nmzfz6fH6+noYOEtNKaYhDsMQmThYIMcyKBgHR4INIQTi7T9TkVKRYEqDtAx1NbMU2JiYOTIxAoGhqdamf8eYLBg7SHjO67qGQLfH60h0fXM1HQYgA63eKdATFp4bvCjWeLrUuygePBW2ZzZpuykN5OMsOR0R2gyJXAFtoux8K1Gli9JMv9BjI7AzOc5ivAes94J9I8gzodoZL7Fx245bRRyjBRCbDmIiYq61tvg2kRuE8s6777genzzD9nNbFGMLEu3duScPbL1kGnZqa1OmTxa/KcFd2qp5yQYG9uGHH/785z//6stv/NtA/PDwMMQkpfoDeBVPVaOmqkAM2UDR64GpqpjYFu8UBEf88GsB3B9nAAKfvEcUYhQWQEfzRDMC4hA4BNIqhBoTp8DDEMaBmQAlj8PEzETAhB7vQFBkVBAVqWLioGVEBh14ua+YgpFCUWnRLTMjpFqB0OGhzExUKLC26AUWlUMcn798CSkCAJgCoiGJCoFRCPgOqBIA+Ky1ix1vxAdSlZm9iBqpbY+BIDhaTKve3GwVbCaNegQPqYO+qvmMe9tl5PaE5PFZp/atmZBai4xtbLClhQAcVxABAHpwn58wBfb+4Q1Jzc6ey/bT2/NAV3x7Gb4J9u1p+yQqIiKt51s9YZ8nlpj/380Ge3pcTDLbM/jlTQjxIiXWTkPdXM1zD2HnzGYJ0W7XN/WDiK0+01oT6MY8tdYN9cF2LcgeIRCRqgId0eWcezyD0rYqAsAGg+O9upv2ZY5dwrXnJCIkQGRENcKt9r6ass+n6i9sJtoReX3h2nN26GQiqqWWUgGVe0MgGcUYgUwEmHgzCBqQ3FY07P+pbfoAsaE8tQYWMzUrUo2YGQXMEHKtReqYBmxhlIqmIcIwxmpVV0GQwGjoNSpqhJFpiGFK5A/JYDFGqBXAwK1JIEMA06qKokZCQPlheX714oOX9Pqb+8e79eMPP2RJgaKURW0pNZeyrmtZl5xnWURmnd0kMkMyYvZeKco5O/5YChSHwxCufHzqNY5SCxEOx6sKlh8VY0op/ePf/gMt8OPPXvzppx/+6P3bFzFKhAGRn714f4p3f/jtX/7Vb8rDI0g+TtccEDWVPAIoIgMQpwMQ3czj9w4vvNa/lNXMIuMwDCmFenVLjKAH/ez5vDze3d0FhKuJiBEAxhSvDuNSSBa9Ohybh49ITEQQGIdIQ+IUiGhy1Fk/qiqoIZEagqEaqoAoVPEQqiIHAABRxY4TgGhEyKzQIBLNDAiZY63nWTp6zvifmXYTAU+khnacTMdBtu61aq/L2wK38F3HE638nzlhL9z/l9/n/9+rVNXIS5U2lW+b6LB3HFT8LusZO5Qi7Bntj1g8m53BIRF5W2ULW2JH/vBoE7S8YAvqu2NWSylIPMSU4piGIbKyjCEOMUQkEDVDdwiDKvKWBOiCmELO2cwYeRgGESFAEyDIFNhx20XE673NyCx7hC+E4CVtIFo1B249ZsMwXB2Ptzc364tnfqYnGHNdAQC8vTnGEHw0i3tuqgjBwKNQoCJGyBCJBRxF2kD15mpEA328Wg1RlMAOKYUQSimolZmNqaoVqVVCtQwQn2y09bqaTU06vW/yH1SJrS3MDmOt7VuHBzdFQ2sATi0WeZ6wQkSmrf3Ed9gQEdmsIiJhUETY9L6bBLoxjQfTpdaOgitNB0lLRBrxWV8QEXb1Fi7b7LeX2tTB9qEfIfDlC7bGjz2JwoWevujM31+y/+fuJ8K73/rdtj82LQwAMUQzq1VNNYRgavf393/4/e9+80+/e/XNVzWXFOj6MM7LqeT67NkzM0ZkJkwpHqcxsJlUZp4f7zVDrqZVGMOJo5lpUaPeXc+YAo+HiSjMBloVQUGFCSIzE6AVE2MKQvDi+W1K4e3rb29ubt774P0JUMAIsOYVCLSieyFM5CV5nuA4byRAiNFhw6qU07I+PNx5zRGixRhSSuOYkrfaNvBMhxdmM6NARAigXlb64uVzQLy+PqYhAjgCRGseNNuD158HY26b7tIM/niXte7KOjZIDFXlGN1y8H1qVlOtRmePaAvT7/G0cMuzAQKg1vIu/VgzhXEjDOs5lka3fViFeiD7wnwkgt4R+xSNsA01QQ+g9Gm3fucAFyuzPxC31t9zUMO/2kKE+8/3N9m+3f5+cn+/antO6Eb5pgigQ4bsXxJi/MlPfvK3f/P3b9/eRQ5FC3NE5Fqrw6vSeT4ZBiR3aTw7IOC7ZaoaWADAG8scXYagVWkSkBGCebddGAzNTOojMUVyIICgCEQUiZlHRIxMQ0pD4BDZSyVjZCZyUD0C3MK8qioOu2yoYqgKgIDsD4zcLHYjdKmqPnvQy516XSQQStaR2QgDM1FcpXIM0zQBIqgYInJEbC/L1ikYzi4QGtg2N9WlWRsB7u18ETzgCs0pRxNz1BkiVDMEM3FOw1ac5bnH1uzn5Kp4QecNbw+AYmAk3akb7HUdHcy0m1z9hMYguymC/v+wj8Rt4SBsRvyl0urzAO3p4SaKthnLjby3lhzYuhZ3R+e2c6SpswPZLq66fb6n/H5bSinpGUtmE0G4P3/PL7AFHEFg93IXDmFTuj2I25L4O8VTSom9OWQrS9t8RU+SUgwbD3tsA7rSby4fQK0aQsDguA/Q+0m8iOv8DrbThSEE7xR1IbnlQbb1cgPdzGCHab4JBd9jREQCA+1e5UYoLSCkCi46RATJRCSEhOjdiSjao8VVPE9P1sIMfjfqXUywewtfDQGLTIpgQMjRrBTRoDIFQJIqec3zus5aC4ECVgCOTMgRPaWKGCOmAIeBEUMiQ9DICdBMKwCoA0gAeuipqgYhUx3HQau9/+KDX/7yD9988+aDlx+J4jheZQbieIWqqiXXdS3rIjmXEh8QWFVrVVVFVKKAwMfDNaBGwmGM0zTG6B1xZZjtblnSceLEb+7vlTiN0zdfv77/Wj55Bn/22fdfpAnnU6RhGGAKwOv6fEj/6s9/8Y+//M1pARAlYpHivTZgDj7JiWUch0Oi6QiqodZUSvE39YUN1ykSE1GIVEp5+/ZtrfVwOBRm6/mfyJg4HA7jaV1VCnEMHIhgYBxSmBKHwEqtnbWb0SCGpKCubv0/BRErRQyVRxd8pi3kRQYIIDFGzygCkmgXGUySBQByziUFADARQNvKqkMI1nMaG7dbT0roLtPehn+r8k7vduo9/62q4kL7HV9xE0nwTiZhf8Mnn+DOlt0/4Z5J97JG9iO8rYHsiQgP0aWK8zJ74X49l5pvPYTWjbC95NoElAuZLU+IAGDnwBjtSpJgM9eY0OHwW9YJXGOmwGZRRESLqapgKTQxgKmACFZK3rPGoBiRGGkIw5ASA5ooATLxOA7bOohYVR+up6WUUioixhCO01UKQworIz3U1Yu0stQ1Z5fgrsVjjNAlpKo60uOyzkREmBh4HIfjFLeqrSJZRID73GFCIqprNvJdI68XsipSZExpyRWUQohRbF7XWtREx2MiwOvDkKqQKgFOwwjcQn1IpIir1CKmgFk09DoHe8cs23lr5xS3mZiKUdgrwbNVcd4spygnWnSBTOpc1iuEe6KGiNw270SP1ud5IzBRICKvk3WreGMokfOYvlpr8ay+iSqESIoIqm7OOFKo22pPJLntzNMnLLbX6084bv/VRp+IDjfQ8fbOF0IrT/BgYxv+3Ax02+VJnnD0nlX7A8BmbJuZiPcmyHx6WOcl51pKmaZBFcqS58fH2yEQwTDEaRqvjgMTmNaAmFjJQKuVNWuRZsMFjMNEIcQhACEwpXGoCis5/DiYiZkYSNtxNARb5tPz588//fjDX/+n397fv3324jkSDSmpKoCBut+kEDhSVIDAuzZLxa0K6f7+fp7nu4fHN3d3Dw+PIdKzZ7dIwoFDJC+CcnDiKuqlaMBkYNbj0YjmvZEh8fF4BGa3NJEaxNB5izea3ULAex8GzzAVGyl42m0rXWjFoN2yBCKo1VwReIFo7/khbw7s15KBg8FsJNd47TIPcEGIBo73vv+2jQrrE+Q2Yvbn2aw493w2FMrNNd20pAu68/nnn77IBDx5tr25tVlluLPftluJiFfE7E/bDtn1TG53xl5p8oQZwWsCtx3x/xOCEah+8tn3fvGLX/z7f///5UQi5ojKpawhkFcqeutQCAEHRgM1MQPLVUSYkMx75FBVPW7b+hgBDCSGoZnSIIbAzJoAkY99TgMREbYx4N5oAGYMxsxDilMaQiQCFL8zuT5VU1HHrtkDIop5YycyVS1ERAro3mAfMeiTXg3Bi3MoMHpEgck/hBDN1BCmaRrHEXykEwYHrOaemNn2txGP7gJkoK2kopsiqhpDMvPaDsAGbwMEKKYu2pDaa3iUtk+kIPCOM3kKWtsaZ86pPIBdVUUg9roO0AYsSZcMwsy4A5+Dxkqdo/tLeY/lJjY3KvWbED6V/OZBFjMcAiCSSOjLst1hW7qNdLuWPFPsu1p1z/iwG/N+eRD1bpotheY3iGFoFk5TDeTJ2loztGTVecqDiATY6S3EBqpBRFtLqhP6BpnI6WyQgdOmqqo6HgAAYDjXyBGRWHknKmyquhl22+GQANCbLFWtK0bCVt7pY6Vd8Pq+2TnXiNxnBDHR+eERrTnKhEYoxUvVFSA4Ory3mNK5grS9cgjnwDB0u1ariAqRUG1JSUSMyJE4IAUkZaZQtaJ6J4b/LgKCNzWZgnEMbCQip2X98DhxjLWKSCl1FREAZcZaK3IM0S0hQ+TAhGBjwJRSCpERUwg+At3MeoaUENswUF9n1Dkm+uzzj/76r//wq1/98oc//P50HJf5bYNrRQNUHmgMY0gwVKXAIQQAUgUVIOKAAcBblZAZkUCkaC0GQt6jEm2Y0mx6d3pUTFr1D7/54orhn//o53/xg5+9d304BgiEPCGAhkWx1vefvziO/OZRSpE0TEjDspwASKSUkk2ryoK6AkCIiZkpMqaw508iSimVUpZlCQYvr25dm+YhrqWggc9pIArTMHz9zVcAEBhTCoEgEgwRUwzMKBRaCTQRYeCAYFUMDVkBvY1QgRQIiJhIDEw66C6BZ57VkDi6nO4cyhQiLOtG8E0GESG0ZkJEDIG1jY5oweC9hbclA0UEqTcf73Rw15JEvXSFiMgAfB7ad0kO2FmN8K4l8V2+4pNzzP4z9774FrthYWYhBGemBvpS5Ynix12v1B5qDy/fN8aIiCK96AJAHe+xP6eq4mV5AiIr1p1QJjKIMRoqNNgMqSLeHx/ZIjERsxEbs1JZcy7l6ni0KpoLICMxGDjQ2yaFVaHPO0EzrbUuy6KqhKFtnCq7YGGOzGANadPVQ0gxolVVby8kI40RANIwMHNgJDTT2jp+wKE3hAmJsJo3zIpHtcnLKT2iLAaioiXGuBYxUUAtpaynpYipAooESoHQIkWLAWFMURiHaaxDpBgLYK6iBpwCvUMbti8VdsJvMrN3uFnz+fcFzPtt7dqnpSVMz14T9f7P85mehDQfbQyGQl2ZgVmvCEYikmqIRK13gsxqv23zGM+GJrCXXImigSKoR7gZzNi0NeD0t97acza6vci3t4Hg7Z9wBvUNfQBUmyHQb2rdHIfLBMjeMthbxtZnOW7rD70ke+Mj2HF3KcVb5gBUqmrV6+PV9z7/bFnys9sXpdjf/s3fffWHL4EwpVhrMUIOGFJIQ4iJGQEBUqBpDIEIxbRozlVK9RQtpYmIwhCAqagAoRXhFB8f7xSMiMTUUXbNGBBBNOfl6ur48ccfv3pzV9b126+/CSkO4wgATBQCMUFGC4RElLvDbCDYpjw3ofJ4OgHAkkutdRyHq5vrFy9eZKuBicCk5ErAPHS4kVVRG2BKN86GYUK0EOjDDz88HEYgBDPsXcWtTmwrf0MwRa11v7zNpmyhhO+WhwDgY6z3BI+1Wmd/ZxvwYifw6QUKWwDuMkiHPfLS/NI+03ujFQe/dwjBjaLQsTgAxNDMPCjmUEpK4CY7IAJfVjCbMTDCeSI4EamCWYkhbCLXzKBBRXZ49x3pmhnAGZzsiX6xXu2yUbjsuiv39O+HdBSZ3c3P1/r5m02MLQHSpj/1VWroO9c3N7/4xZ/97d/+g5Q6z2uMg/d7iwhRt8LJnSg06svkK+ktO9jCmsacOPgAj67ZQymlVlUP6HSX9XqYisiW9SEK4zgOwxCQVKvUCl7GQDDFgZmNUUTQlJkJSQRLcUFoZqZAClY1iwhDCB0gBAAULxDS2/p7ZQohS6OlGMaiIhUiMZjGw/jixYs0TeA9opsfwsHAHOdzW2Szc1mcbZGOLWRP5+IH7eijiIihuWEIAKgb6frznMu1DLTDmhCDiCJuTon/qJmZ1bM85E06S8cgsDbolfr9L2a9QHcvrTuZiKjmsfeNn/YMAb0lXS+pNCB5UNJq9ZDK3utzkm6c2I+utuDJsTNdzt+1IuT+DLDjIOh+43fqDm2lMWrQsKCcEty9wf5Y0H2lsMVpmnWCvdmx47qGEMwS9Xzglv3sVk5fpi31Lwqx8bmqKomHQGrNWx4AERXBl6m7f9oyCWFbFN8K3KK828u7aN0IfSur3WSo9rZ428luaBJkJ4V3N9QODCOi3WwlALAqJm0UQVtxNS9fITQ04N5d1tZazoYp9FlSRGQIwCSl1FrjmAipSM1ztjS4wl7XYckJsZAiZkSsaYzjMLiDvc1rDgDX05RSYCQCQ9omPDa8AfSKn8b/+ji/naarTz/74LPPh6+/ffXXf/tXf/qLP8l1TgMqFbEqtagqQlAkI41SIyFRmzRF4OVTFtCYkNBqyVqWUjJ6NyVzMKyas2hVeTzdv331sD7CTz766Kef/+gmHSegMbLqKlgy1OPwcl1LJP3B9374u2//4dWrN5+JXh+OCqJsqCSrrUuFmtfHtyIy1NH76LwwiSgigIkYSCm2LMs8z23xL6OJBJhCpDgMw/Dw8BAJY+QhREJl0sAUyZi8Kh9NtFY1K6hW1aQUR+3zMiQvOvKct2r1qIQYOoyPm8DdqWuRuZ5wVsRzTKFZq4DnuuWteX0LRF1GXrvwOMc797bsxfteOoRi70ia/wXHE2Z59wT7L912k4Ob6MKzp0dEvonn3uhOsexo2tarmKxn/J4IPtxx4iZhPdqqIG2EaU/AYncdnR8dyVSDekVKL0BVFVYRUAPROlceCFixqpUqiIYmpUosgYOxmk+3AWCkwEk0A7gb0WLbpUitdVnW+/v7kiWEQC55FHPO8XYaUhqZ3bMFpsAhDGnQagh1WeZ5NjMyelBV1efPb6dpOhxGVCFTcNjn0OHOAREMRWpeqyo70DMAgAE7rjeEwEZoikOIS5ZlPc2PSykCQODTINkCI4U4IAfmcRwLWmWGkGKMhYOqKgJzIBJ4x+41syaN9Yx1dt5ZYm3jNGXLZe+jmL6Hm07amYzUa/HOOtntD+p19mKQyKu3zES2kl0zI4rQEi1bATYReZzIM9iBORIFdDg61D4gHKEVmbi9sUNa3zHmRngbNTY9srNW3+WpPdm3m+xAYnaK+Tuy8f2nv+Num3jZP0+TKuq9aRQCoQkaXV2lm5tnx8PNsmTiOKTx229fv379Fg5kZsQAjIpaJC9FE3NgY2ZDY8KYIh8CiGo1MkLEorFa5RiArBRdcs1FpNoyrxhCYiKidV3ndTlUHkIQFSJSkMPh8Omnn949PJ5OpyNf3b19CwDMHCMzAXqDCdJjEQ9pW0voMREZyDAMqno4HNI4jIcDMyPTOE5Q12EYXPxCKyQT1coxblSXHUvcyHGhFey9D98bxxFAG9CCyN5q9CV+dzfb8Z3gWOe4GGrrgLoQXyJCm7RUPZukfVDbpsWcDs+/2Ji+29949qn8qq1YFLpr5A2RzWCjcDau9oHFjku5fxFENPVOl9b4pNqX6OKFW/70icq4pFt48rdLAtj1pPktXZs/sfeeKIInlN/W83JiU/sbDEDgYmC4gRkBA+AHH3xwc3Nz9+Y+pZGIpFRV1VILgJlxSBGqmeVaAjE4hpU3NitogEhBTDxZImwIPnuWmRlQRWQtxRSAAyAQMhAye41xi0oQhTGmwzilENWq5GIqKcQxDbE1IpqoqFlgZA4ABsoCqorVVMSdTQK8oD9VRUXoWe0W0yL0QQO2n9dNKLWKQsQRAKZpevbsGThiLRIYWCnobTaAJuogtL7IF+7ZWW77aAuX09jzeWdS8EuIyIGMG4c0c70JTxDVC5HbDG+7EH0KACKtE8e1gLsDtZRGQmYNjxd88rQBXgbdumfukETQ4zvbc+5V1RPe32iMvM8YEQDcocWdw7K3Up4YMHsCfsI7tmvE6BqN9vro8pDNptrfynrLhAuK/bGZkdsAIQA1o7DZZ16JAXR+xO1ezc7cyZ39y2xrtPl7rNE3RlXD2ExAkeJ9NeR1X31p3EZUVfeLcLOPYaMxJz6CNujZU21tmZjDJkDPiwtm9LTjf5Ms2wHdvGZmJHfBF61t3r2B1FpTHNuWnNMX5BFZL81qd+i+n27GajeJN5fbFzmDTpAQsZRyOp3u0sqJp/E4TVOuhWhZclbV43gcpnGaJo5DE+5+w3UeYhjSAFrRxEzQ0aycxbHNwkbvMVUbJj7Nbw9Xt//8X/3Ff/Nv//2//W//ejrw59//iCJ4KFRNUIG80tQwgTEKGoAaKDJSAGBGlaLVxMRMImmK3CZDhlDL/Pj4oDER0evXr1998/aQ6Bd/8vPvf/QZllyg0uFQTWMMYYinB2XmGOnnP//5//A3//DVN69ev3qLAQNxSmlKA9RhmaPmtdZa8vr221ODTwTHm/XspRJRLSIicRwiD3kpRHR1Nd3XWRWYOaU0EQmNKaVlmUNIiUOIBAqM1sqAe9FZrdVHuzKwTwUouYK1dJaq1toS3RBb4LAdFMjnKOde5EmegijYQtG74Igq74w8xI4ytNPf1pniCblup9EOSC0Q1zojmhczb5S35/0nevQMom4X5js+ebBdqnAv2vZffefBO4D+vUNYa7UQtxyO9ZLyrVIUO4ioqpoDDu/YdnuAC2Xvf3uFMFHtpYzWDQsksly2q7QV2fd5GAhEFJglBBNVVALUKsoCUWsuK2NASkyMmJeVB1CN7liCmjGgFmVvYHacap3n+f7+cZ7neV4eHh5McRzHIcbACaD38bk3uI3QIWKfIoOwluLUaNVOj48PDw+vX3/9/Nmzly9fHoaEiDHwNE0xoNsuyGRAIlW1EkDkUIpWlVoLAFAMcUgxxoF4LaLGVdd6mksp7g7lnEMIQ2QJUZMNyIlDTAEAllrBjGOIrWq9L/wf2XcR2WC7NwIgImKyy+3Ti6Kjs7iEXgu9nbAnTmwkupXxnAly293dbc/mqXbAJwAghMfH+XrRLHULQaKJGQak1jWIjEpKBnoerLLxxUaKW9B3zykA4GVg+M46bU+1fymALdlzJuxN2z6h+c4y5x/1V3j357bziSjGweNZgWNKLOTNO+H589tvv307DocPP/zw/ffff3yc1/W+iiGRIYjIugpC1YBjijJEAkMEDjTGlDihEZoh8N1jhdqi+fNpvT89rrnmXAVsGIaRQ0hJHDZdAAKoaoy8njKAffTRB+Pd4xdffnl9fY3dl45MSAaiiBaIdV69MxABmHEchxgjkk3TtOYcY1zyiktmZjGttYRAKYUQQq0O1Aml1HVd03jQ7rmt6/rw8ABG0zRNYxKRm5sbDGEzB9Ursftieo7QLRAOLWIN3Q/xpfZSLrs0OQCAQ0IDz5vhrghCVbGD4EufI+LwqbYrJ+mB+x2CaE+DwGWq7WybASAAp3TpZxo1A49aenCHiuHQWf3q9kN+glRhRD4D+zUSLaXAWVWdlZTsAiiws+l1l8HeyQEwO5fVuVPndqDs0FBtl0X8Tp7a/ng3INWR9mB3OQKgqdWSmXmaprev72KMtTSMq1qr9wB6jDLnXEoRq8QAxG2IDAEqVETvBCUz1erZVwzInGqtuZacMxgyUgjBJ7+j1cAhhAgtF8rjOI5pICICEmJTIUBQnee5rDkcUs7ZQEzHHppou2CKqrJheSCyQBuWYITcAs5tFzwZZdYai/wS9WEDVTFwjNEQhmEYhgFqhcDAAKpFKiAmSEAEovCdFYtuSPYHA2gDY80M8Cw5/YdbLILI0Sk3VjJQMEjDwWdztd101xSEOnbR7ucIAJhDI11R2AnG5s44jzh5eJtJCBsNYQfItX477OM9d3L+3AKzpzQO4cySOyLcEzN2dFC3c5iZmGEnNBAbus93HmfV1iXGk0Aq7ETNZlPtuQzAW1jJMxWbau6GkD9Aa771zwPIOgQoUBHUtCpQCOHRlBEwJQxcpORlVSnMnJiqrUBEAWsVZHLzpohVFeRYda05hyFNh4Oq5pwjHSRD0WpCEQdSQGVCsmqmoIBqKIpzEVKCqClIqRUBOKCPLkSyNJJIRmmIWg5Iy8CMNMssJoAWiREDtmniiIasQZGkQq21VAiBQ0jZBDggoxEpFEPJddZTTkNUkxQQwqhVSilkYQzpNC+HwxFAcykB4zTxsixLXqfD0RCUraCVvAjjEJBVGQM5wjCYU5xYrVoYaVnmIUQGW+Z8c5iIx4T57748/a8+fPlYTscxcaX79YTl4WocwxQxjRQSEJgImySywEjTpDVDPEfRiFlMiZgCAYBIERFmJQZUiBlAkNf1T3/0/Wfj4d/9t/+v/+d/8x9//Y+/+cEPf3x7e3v7/CWnCFJKXZUER/zm1BqDicg7n9EMpA7DpFVCmBzkcAhxXVcAeCUv4+EzK6vVZb5bvvmn1wPC//a/+vlPf3AQ+22aUhxHTXEarhEJRCMV1oUl/+jjZ/+7f/6T/8u//Ydvf/N3n7z3L1UlpVDXPKUUxskQrm5vTw93j5lLKWIWIhnAYz6JlM2FiEPkUNVmDkBEuTzWx3U8Hg0EJlvW9fD82R9OX50gf3D9YuRh4gGtiGSwaoFwCnUJc57neXX+UdUsVQVyzdu4ms0Tg7kOqUUuqNSrONA4LesKBitAjKmarvlEUNGqyTJGmDWQgVbUaqyQonfkV0QEJgIUqaKgxMpcRRGVI6lKKWu1owAacUgDERSpRSWGCBVqLcGoaOVQ1dSTAEbsU1zdmwdUADNAFQQhMmIgTkF7OQCIbgao7GZnbTLGDOo2CWP31RPTdqdrgWsmDopYPaTDEABOtU5DKlXHmMaYvPgGUCtKKeswTJG46KqmhJgCKyEamMmyLBqYabAqwjWmMIyAVInsdLp78fxKpKBUHIfTwyOIgkoKzMxg1bQEjknSFKY1Wy6S1QJBBRsYGa3WUuuqIqTCKGZaRDgdK/LjSkUBI+OqlTUFTjGKQSnCWJWAAAMSp1gecxwSmC2n9f40353WV68fXr9+++bt22EYrm+OFEceooHWWhXL8FCmdJ0oishqOqsaKY5D1ozDWO4flmqHwzURP5S6wvLN737/7es3r97evffi5fX1bUpprmualQiGYWBCnWspJRKbWT1lLGswBDUFI1OFWletBhSTiIhWAhsnN5sKs5EEyQBMQgBDjIfririonAqP07Ov73K8uUYaGbjM8xDHtUjdXKBekgCA3MPztQ2LcD2uRSoDxhhZAaVsPpUXe49jADQzQSQgqDWHw5UScgiqmjA+LhmJRSSDACAGMgIpRdUQOREVyGYcLFAciZMoV0GDoKpAVCQDT5wiZEEIa4ZphGGYfv/bN598+PFHnxw0KpQc46EsZDASF4xqsBgBYESeQB6xWVbNsvR3j2HY6/4WO1Wj2Gqecde7qKpADYV8N3B5C3ZsdjYCWK3ZzLylonPb2RTQ6rFsJAJGZsewadSF1DLe1bzii7ysODqYX3ULHFG1liLvv/+8VkiRallurqfIlOdFyxJguBoHkVLXTBqUwnoqxyGhMSqThUAxhCS5rOv6dn5Va53necmrJ5GSWiS7uQqnh/sDxagnyDPjFVEIPGGk+fGNLvOz8cpMxyu4wvdyXq6urjgMimSASKGoVLUQwnhDzW9HY2ZiMFPVuuZcJM/rqahbV60ayuhYC4MxUhSRZZYQ+ebZCzMTU2Y+nU7r+uht8sVAyY4f3BTWlFBr6R1MhEagAETeo4vW5lxB7WVpbl8SgfdlVSFvqNh540AEKohn7xK0uqBkBDABVUDg4H6GSs2E3AMZwmZA1Edjozuge68MCUHZs4WErUZOVM0Ua2/V2zyiVuLTSjXayV5KQGTNv1XTM4qMmaUQpK61Vi2K6mPGKyAhsSMCUmAgVDMFx+e8CIiYgYgB1Cf+4S6ocQ64bAzl5TlbNezGI6pW2zQBUG3pAddftVYA9FlQWzIGAKh1rXUv2lUiqILGabBaP/r0w3/8p19WrUaGo9EKH338wXg4/P4PX5gZqL159c3NzU3OWasa4hhSSikCoQAK1IDMAZiMEUKIgVTlUWYEWHVZNSPyAKyAqKCqPEUiMsbEMYSgIrIuD3n1NsIQghiccqmPrbCFlwfPk9yd7hgppUQEqsrMAYEj5lqLVQ4MhDkvVtUtN8/81H4oglUzhBRiSiOHsOWuU2Qkeni8O9xcc+LD7REiGQiAAlGKXjgqAIJkQXvaGRHZwQ0NzFSk11YAAJsZtla7amK1evUTElGIkZjBGOxcjh8oNGcMuvfVSRTMAJk9ndj9OsQW1SVDyFV2NUGMhClxCEQ7iGlEjMHMIDCUjtdAiIBe2ge1Ngbo+Xw/wkieXi8l674Gm9OWXdybQzEkJ1oP9XiaAVrcxEw9zS5dniNi810R0SdIdZ1SzuPtOsujV863T5SIIidsSJ+lf+71IAG3WSAIIeIG6Inoo2ra4iA5gJejn1jwVivr8RV3M5i55qWUIpK8X9YvZmBVBmuDIkSMyAueW2bAYyrM58kNPQOgpthHNhB6U6O3YbdAV+fslsc02/Iqu4pebRWx1ZvEPD9r1oZsEpIPAFQzh/Rg5Ba622VdmuBzIsY2QLzWauIFzbhFlAFSjNHxrD3PmZeTiLRyL2h8QQ5c4eTvh2QF7+VpJ4TAhP3VQFQVCRRhXddvXn37wbODmR2Px7I8iBkQV9WBeDyMHJOUrDmzVSKItDV54iYutUcmGtmZMbPj8D0+PqaURGSe508//fi//q//D//4j//pl7/6p//wl/8DcXz53vPv//AHn3zy0YsXz47H4/E4EZZXr159++23p4c5L8XMxnG8uroqa5ZqCGaKTARAPg7kMIz3d2/evPr69Tdfvv36y8OAf/qTH/3sxz8jguAA+oYqJqIBW1Zqmqa7ZeZIf/7nf/6Pv//9r3717YsPfvf9H/5gWWep62G6YRtmWcRsvLpOcPR5kmZaVXzedIy8yzYPQ/SMq+ScwbKIvXz2/NdffjUmToH+8R9/m5iuDmPEYaBAwNXYKBtCra1lq5SiagKmqrU4vRHuQBGdqhBhLVIVuIgqmACAgmogCiGhqZiqmOPie2rCLO7SIGbW5s40+rZ9uNRAzb36TV+euWCX6GjcS8yMwYIoGiDWc6vVVpje1O+5Fu5p/mQz0LfDLqsXzt7g7tjpe/tjf+wPatUOkjuMyrY+iONmH2x3UFW3g2DXkC07HLnzqxGhNSAv7XVZwEQN1NTMgjemu2EkIqVAKcRoABo5mOhSCiLEyAA9Nt8FhaoKmNJFZa8p+bi8dV2RaV3XZS2Pc17W8vj4+ObNm6+/+dbMpmm6Ot68fPliGFJe54eHu6VmnwXDCBQTM4mWUoyZI7GYpBAiIwNOw/D+i5cB6fVXv3n16tXpdMprfbGWw9QOFzUhBNEiUhgQ0NOexExAVESKCmY0D1CR1Kq9sbahB6tqFcs5o9rV1dWUhpLL69Mpg6XbF3FIFZhjYGZFMPV6lYuE8962g53Bt1FI4ACiqgpitKEpkDmU3ybSiZCI3fdrUh+UzqEJNDXoo1w9H+tjEjykTkIef1xKDtGIqEgx1VLLhvEdgpfpSinl8fFxnteW5fPgqIiBqFUE8VQuAoJbWpdUvXHZ9uKdErsu2OXooJfAbYS9neOf7Kvj9ofINv8KsDcgITpShD/1TkrAvnLnogavtQ40mdNli+IwOBAuPn/+/IMPPnjz5m2IFPAwr3dSqogMMULk4HCFBr5T67qWpdwbmGFd87qub/PsNx9iujocvVZTSl1OD8dpOozDMAzefAGipRSpxMzTmFSVCMcUGO3N/d39/f0wjopURYljNV1yUVWLnFJKKYHI6XQSLczEMahWVa2mqhUIg7XMRVsK1+uIiOhIdKfTCejcfx4wuGXy8uXLq6srx3MCx5rzyrG+gqC2NWMhdjSXHeWfDzvPb2iE8eSEnrE5n79L/bX4/Y6ctgnfUuuZAJ7cgamV8/gvGG7ghJcE1ZVYT5NuhAo7RsaWicft7fQMZ+1jn1vaHxG3XOL+N76TmM9vd5nTRnRL/6Lt3LpzuGeiTaqEc0sRALht3Yzp/TNs4K7YS7fa+mtbNmKWnBHx088+/vGPf/zrX//69evXwzB873vf+zf/5t988eWXX3/7zZs3bzjFTz755M2bNzFGAKFNd4MREjMbiTesMPE2QqYZx22RcZMD7vWhgYjOeXb4Cm8DOZ1OvWkC/WKXhIfIUiVjdVpaS2UEAJiOBzPAwBSA+5IBYZPVl7V7BuKK0k/AZs+DqqaUigqYcUxE9L3vfS9dX0teOQbrXvgF1RFRt8+t988/sRn2NLDF7hFRsWF6Qd8O2kkqc1jOGD3JiWaAaJ1aai374qOzENbzqNiLn+4lSNpidR04sHtxZ3LVVo2J3dLA7kIiohavxkSi4E1SuHlorVL6AhVmA3O6+ImepH1XaOzVwXa+mfk8Se0QA92Cqts0hW6ciPXUeuevswmnqnu+ANgnNfvEFzxXvCNi2MzNjXqglUk0MhuGAUxrWben9ygaAPlgEF/oLQvp7or/HVOo5dLUA6T2uOTtVQBNtW9+jt9tu+GmR7e/xWHcN6KH1qjvhvC2o25nwE6F6xn2R0VEerkabAmQPYwHoqrGGH3gwTQOiA3iLI3Dfhf3ZtC2owZi5tEyIEJxeEk0glaqGiMPw/C2li+++vK92x9g5Gm4WuZHQ1yrGVBMPI5jCKEQCioJBkYA8hGfqrpBllsfgeMGAQCoVR+ANgwxxhRCymt59epVSuNPf/yjH/zgB69fv/7iyy+/+OKL//gf/vKv/qf/cToM19dXh6vjT3/0QxEZx/GjH3wUw3A6nbyujA/k+ARbDWRe35ZSv371+y9+9/tvvv7Dero7xvDTn/3kT//kZ9MwWM3MTBRQTWqtzEjRWhGmxRgV4L333vtn/+yf/fUv/7u///u/f/bi5TQN03RdqtW1chyMhnWdKXFKk6PsWF0ZgCOFENIBEdGbOQ0RkFVy1nx7+/ybN69OJ7u9vj7E9OWrx7ev7p5dXUXm4GUMaERUkauUpdbTqfmEbXirOI4fIhuiI3F5QEeBlLBN9FpV3Y0MxJGRhtEtj9aIy02Aam+Bsl0hXOfVChBcHp1lBJ5BkjYRNgyD5PLwcLd1MzbuMPBp3fAdmpgAZNPZzcCwcy0f7FQyXErw/T+f2B8XtsjOddxf3p7EhWmX3S2U2+Wsq0bZNTfvwhmNiby2lHqJ+JMfch+bAMH1nAekRD0/uknDWitS6yXw6YJaagYZUqgEiBaH6IhZgBaGqKq11BAo+CRgFwUetFaJQAZYawUkwrhoBlEmWtYyz/PDaX08Ld++vXvz5s2yLGkajRzzMsQYzWSYRmSKzEQQUxiGIUWWkospjdG0msqYoh6ORJRSAoCHhwcK6eHu4c3dQ66ac518Sv31dQqxTCmlpFrNJDBvfT3M4DF7UTMRYzMkZC9o5BijQoNd8dhHLSUx3d7ejjH94atvTqdT5XBIqfEvInmzJaGZIMY9kWw74qKVOmD1RmnuqKuqiXclP6UxuzhU4OwQAl5oJdjprSeka4Sgu34JJsPmavZWXucBIrLuEM7+zN4LQEQGZ4OeMFAfNXSW6JdcYHZhyG4EvKnU/otEXjK9M0S+k+/293/3577z2+3DYRjOvNPPqbXyDq7G3wyaHue8Fub47PnNhx++//d///fH47GuWWVc11VEEhMTMMEY4zjEh7s3gbyzogHio6KqPn924xyd4sgx1Frnx9MqykhpGq6PV2MamqR2pVtrYEwp1VoiMw0RAATs7du3IaRqKrWueS2i8zznqqus4ziO44hotVbVyswxBd3ogYyBHPcLgawjb20rL9aRC5hqrTlnTjEx55yPx+Onn356e3uLzLAZmj2l0ASNSAe/vVhx+CMyE3bmIHTT5byP2//7CWeSJgJEk2Ib+gWAgz1usTDaCeR24S7cSB1ScpPbjnMB74jovTW1PXX/p+1P4G6wMpsRer9cC6B3c6v7PO8MZ3uHaG33vrsAytOVNLMQgsuN/cnwTuWeda91K9ndPt8kw8YRiNhLzrFnMIaf/OQnV8ebzz///Ouvv76+vv7o/Y9//ItfpHH85a/+cZ7n07o4+YkIBGBvSRQx0JEjIkJgDIwe7+9iT1VF1NAjob3Yj6G1tqrVWiWXWitY68X1ksKNcrR3YNWYfG15A9IMHEKogMiUcDAiJW6Aohyp17qbGTQlaooAqkjAFMkRF/v6ZDmDJBHRhx99BCHIfOLYS6DFthgUIkrJG3X5HXwX3+1ta+vfT0bEbRLKxgidyc4H9baRTZr5g22hcF/MjR5kA+bt98FuexBzd99t+wqqXHiw3eDnrTBKL2jX28ewH608VdVE96dt77gFdDY+3UIqZ67cdwSI8m4oxXY3/3BP/274bXB72/ps/NI5qw+M2AGs7HbkHDp048r6FEC/Z/C+atgFb7a1s+5kb32GGCICe1mET4zczndUvXVdXTOZGaASRSLnYVJVlTYuZgNxYmZTZJZW7yFnnOa97HhXKRL60ELDXFQVRMVL482txe11zvD9/o7MLA0pq7GNfzuOQ1mzanVQO6cw0YJ29vVbZKQ/y0UkBrivMiATKyM0zYmq2LpkRSkamhkLCHgDJ/Pbu7tTLrdXVxhxurrOtbDVEIcY2XPkBIYhcAyRUasRkfUJXZ4ZMPVCI2+bIgDY5ocepvHx8dEGm6ajnZbl9IAjhCE9u7n+6IP3/+xPf/rNN9+8fvNqWU4PDw9f/NOvXn/x1el02rDpmMM0TcfDdUrp4fTomXowKqW8efOm1vqoqZZstRyG9P1PP/n+J98bQ7x/e3c9TWREgGYo1YxNg5qZgs3z49VhenNa7k6nDz744Oc/f/5Pv3v97/7dv/v5L37xi5//bEjhsQJIrhYUBlFGwyq45qqKKcUQxxDp/v5+GAYAWLOK1BjRLGCY3r59O43XzDBc3/7tb37793/z65HgvRc3bFbX3MITEY1klVokP85SSmkYVohmgBwQoBal4HUFZKrSosbExAYgplIELHNAjSkltaoIBj0FzswhpBCClqemQ2dksjNdK/V2VrG6P9O5jzs21FmOmImKmQhjFajVG0bO5vIFxfbUyiae9qdt/zwT8x93AmGn3fd/7NWw2XeMfkFER9DZRLY7fFskaB8P6jfE85lte6D1ukrvUusvRUS6E5H+CSAFoP1jmJqXHCAm1YzI48jrupaygprXHaQUujLrepNwnudAEImrSoEGBi21qlYRW3J+nE9fv3r99u5BwW5fPA8hhBTFtEgtGgRMgYwwEBIIg6UAjFbzYsJDZDdVRdTR7AiNicA0DVMaplIf7x9Oat9cHY5rqWKQUrrSw1B9hi9KdDWjUmoIAQirQhuMaVgN8jIbUkojx6SqtWiW2YccEpFnHT1WHYeUpoMRCphaC3kagpcz4bkHHZ4Q5J48dkaDI/grqnJX/9ptXGaOIYVQ3GwCO9veqqqkm7glIu2GjqCP5RQz05oRIjtkYuAQAmIxrxCRc85cRIpYzpnIu0D1bHOgggEzA5oB+yBqp0xC7lhjZ2W0Mctei2+msO6O/bLsM36bhqVe1GR7m/VyDd/59XfYCtvw5ScmxaaXL28rDi4hIiFyYE4pXN8cUwrTNN4ty7Nnz9Z1Ba0hhBgIoY2DKqUiYwghEqcwTMPgCQ0I2FBGCGuteZ2X08OyLEQ0xjCOiZl9DplX0QYkNSUwUC1avdn72bNnwzhiiLlUPi33p8f7x9PjvBLRaVke5xkRY4zjlFJKBlhLaQ4AIiEbMhgbsFqbSKYb8EGPFMQhmZnrtelqQsQQwvPnzz/77LOGorGzaG3nEWHDs7kM7X+XJ2+7lOB+o5+YMXv5/OT68310v+PNDqYOrLDdHADOIzSspSqwQ1AAwFbhvCfUTQJvX3nge7ORNjI2M+6Gpl1aumbWx7zB5hACAL7jPO81yNNXbpgZT5fRdn1Wu9Mu9M6eExGxV5kC7FjjYmvObgkCQj6dkjfLAXz86Ucv33s+n9ZxHIc45cdHJ4wvvvpyreXu7u5wONTaan3NYQkNsgERhRSpoySaWc/q9t8i8Pks2xvVuaIvb6nuHDqdbWAkWwWNY1avULxlnYg8QBYjT9O0lBJCOBikcRBjRMDGaNU7IQ0VgJCaO1drJQBEz19Vt1eJsIikYaAYxOz2+bNhGGCet3r1tv7Sp7S3LMsZvAd2MHLfub8mal17t3nxPgGqley0EnpCdBQZEAVR6JPloIXUjQEDUvPpAdG86gnNnbTm3enm2GCPubh4UvMR3Wq7gDhiy99i/yECr4E9H9Qt6kbznd0C8Z5DtxPcBN/T4RPL7QlTbA7kRvnQ+X17ho0lzawXRbdMrxcGb3fbomDYE5LvyP93wqmbO6MKACGltCWX3F9yBbDW6t3OvQayDSTcCAJ3YXjruq3Wmobgpl6HQSVAcixks6otP0tux7CxeNcpsFQrUPZzNi7eHADpnZfv4wRVFdQosBudAMB9orev8bY3ZVcfa7sX8ZN9IBj1aWDWy9kNodZKvXKs1kocrBOBnecKsIIBqJExekFwq6SOkdHBncwcX4fRaikUeD4tr968/fiDZ1VlOl6fTicewNHtzQSqorcZIDAR90bVbYnMlN2T6e+IiFtwfV1XRCRAqYUJrg6Tqrz6+mtVlasrQEUtU6Sr8fq9Z9frB8/fvppvDsd1Xed5zmtZrZzuT1/Wr6+vrt8+PAIAAns0Yp4XZl6X9ZNPP7qaxuMYP/nwvQSw3D/eHAcUBTZUsy3Sb2BqoqWsc6SDajWtjPYnP/3JN9/+f0T1P/yH/3j3+u2/+Of/7Od/8oubq+P6eLq7e7NWAoA5n9ZqomsWlCXbbLWCmJhZKRUAjFBKXZaMFFXtNM+/+uK3f/13X04Bvv+jzw0GqbyoSF5XUQNSslWtiJVSVBWAwFvu0MAICMWq48MYmCAB9FYEx6NCUFQBgwoVtRZVKTEwM3NMaNpr7CMUcLS7LW2gAA5pYwBbwAZQuWEmJVX1p3Kj2V2F7p+33tGoEUTNQBChldS+k6lr+AJm4KC4TyFknoiz/bX7E96V8nt98J1irv/6WfqUUghN1RF6aikFEDjsBnJceqG1VDQUACXcbGsE4D4yuDmE3JoNzNw7pApVa0UgGkIMoRREA0dXQwOvp/Jiy5yrmQ3DeDwe7+4a5BUiApDHlVWgkIBBKVTWJXEIxCAKpiISkDyIKyKPy3J3ejwts4JNx+P17TNVVQRFqKbFrIquJa9rvj2giKg1yD6v4ay1BqQ5l2UtIgJAOWdVIKI4jjfPn2MID/en5fXbtQjGFNIwr7mITnUgIiaMEhvciEoRVe+dYWZCMBDV0zKHNKaUIg8iYlqYI7Mg6PVhOkwTiJ6KUODr2xs+XmUO1aCalLx6Ly0HJETT7zCzvpMY9p97rRDvEOro8jBDU4YN2xrR2xD9b7FzJnxHaecQrFllEMXWdgA9qrr9053AWk0Vtj4xaPh2RA7wQGqg2pC6CMwUK4cL9Lbt2INeXC7C0w+7Im+6dVPw1MEGNiNge/L9Yu71N8B5jsXlasA8t9JNT6hvQWKpLmp6ywoQgCI1J0dNQqDb2+txSqoVyQggMqdpuL25SinUsloVJpxuhhQjI6Aanuu9cX6c/THUMOfs2cVAPA0xhTiEFtVsHrJA4FBUGClyWMviHtrhcJgOVwpYpPKwrKrz168e59M4jsiU13WtJdRQ0Y4cIjFCqEpO4wDYoBjNx8+ItAn1COzTknxkrppZCCFNo2OWOMzpRx99NF1dQQhg6jYyAOxlopvWRrt/7l2sy5Tvu47iE3tx+/8WjziLPjt31GlPLfqLMDPsrt2TYrOAL3zV7vvZE34B3NXgYatDa5kEdLAAAB/BZ90lbm5Mj6G0ldxZmXAOcMB/hno3o2u/OHa5etv/ceeNb+vTTbkzj+yv2neR7T/f+ghcfbRyYARkgsBlWWrRcRzTMATvAzLLZbm6vvrgw/di5BBoHMfT6YSIDOgniBmaVYOqFhFNQbFV9HjTKRqz93uqmAEwIYWqUnPJy0qAIiKlqKrPfPRqtQ6+KLV3lBGRKpastTjuqakqR5qXHMeBI60ARzcbkJnRiFWK9bkkiHhuduhasqqgKfUUKTPHGI1QTX/4wx+GENZ5Hp4/g1o2/2R/dGjuFkfQVvDfpM27xG9mjU33m+5hPjNwkJvWAkOACFX2duxGA+wITO8wFG3QoE9y4B5Y7dbFlnCjM3m0C5uf3/iRMLQLvfqMKfhocjRtRdcCDd56p5LMzAFVMbTWx81E8ef32OsFv/sa9vfdrzb2YtH9OvfLL26yscm6rv5HCMl2sDp/ZG4hbAuCdK4eFxHfXTKzFk5mYqYYoysYOOcTaStJQkSioOpftR9IKXkRXXtYMhAv2nFx01+jN92JODiTtboA1+q1bop2kyDnFUQPCqBfVWsVlYAkgBtGrbdNA4BhgzqSS1e71irVkAwRCMO+KdmNM2YmQh9LSES5Vo+X1Fqgtzk2Y70VwVG1EtVjELD1wABVMy+H827YDRhpl91mMqS16hdff/29zz+GMXDgdDimENeSgRzcq70ymFbVVuFkZnYOOYtZ6HEaRPRJTdSQHiDGhAjz/LgsOYU0DNP1cUDEnBeRMkYiSw+Pd/M8i5Tr6QYA1pASchklhAhGuTYaGIaJmYvouq6INI6HX/zJx5999sk4pMD44uYo67wuDylEkwxGZmQA+9prJh2HcP/4FhSe3Rxf/+4rUrm9ubqfl7zW3/72t1/8/vf/9//b+P3PP//pT37ygx/84P2XNz1FhmYmWuZ5XpaT79c8zzlXAKi1PjycXr++Z6p/9w9/H6b41Zv76xH+4p//hXH68su3GkkLrExuha+gWWuWYoZmDkWEKlpVwYMXLuAUxFPBAICgPcII1LSKmJVa51wioQFaL1D07YUNb9M2nxDFzKtr0ETV3DfossBH75yxKPbVktuH2AwFMkNFVAMShTZ5qFHa3oA2MABTUwTeatDP0rnLoD/2yf7z7zxt0822a03xbCl06lVVOwMu+PcGADUX5rgXeZv7Zwb7an6PyaWUYhiKLv45YwvrGBD4TonvVG9EkX63KmZGTKplnld7ZtsI+CkNSwhlXRExprR9XmsVqMWk1kIqj8uMiExABqbgJa+KcCrr/cPDaZlDjMPxajoc4zip6mme7x8e1Gw6nFwyYIiloQNjsyoU0GwtgojSAFNRFe7uHh4eHt6+ubu6fTZdXYc4rPmrt2/fyt0J6S4XOxwOxSCrBcf1StXDNBF9WGtRBA4xGiqCKBgyAKn5HGBq4OlmkWBICQDu7u6KGHIcx6DDiESGqAC5iABWEWBXqE8HQL9LJE8sCSIiQPYROU2enzVQzjnnDEDbEK2NlvaWb8+NALR+dWQjRDQeZLWaVaXUSuf4ushWHmbN7CAiUP/eCIzU4VdEiEFVCc0ngJupD4EGEGLbXufJS23K+ImOh0vDd3/axdt91/+fWMz7O7cLe9pzu7OagMIW1G/at7ensRsoPWXRVwSRMa9FJA/D9N57L16+fP7FF1+kFEB9BDZItQoiRUwqEhokRXGkQdNadIVecVNrLSouV01xmqabq6vIZDvx5RhCFniIaCZEECNX5ZyzIUcgQyIm9nmuiAqmYmpAQ4yMlqnW+nhaitrhcBjHEbWNW/KtNEUfpCfSWkMVAQMzc4yBAjvigIDlnGvNh8Ph088/+9M/+8U0TcDsAyqa0dzhtc74PwBkrSCoGbKXxLCXe084Yp9e2J/wrjjtBPA0+QzYWmb3N98Iw2EKDN+hNzW7fB6/pPYSMu5IEv6QtRRoLHau4FBV3c2S9btAz4Rs1XTQ7GolIteRT3633/nMKbv3/S8Q/JaA6grivJ7v/sr+5u1yuzhBVfwOMTWUyOlqghBknl0g1JynaVSwTz755OOPP/72228Ph3FZFkT0/lQiRGWHNlEfNGHexggBXNcgIgIF0wqNR0BVS5HT6ZRP2cxyzmVZwSxGjsSm6vl2B1RPZp455xBA0rIsy7KUUkSLSBXR02mhkoGwqFSROIaQUlJRM0IxMyMjCsZ9aBuRZ4AVDEwA2FCKAiKGmEQkV7m5vf30008hRp2zp9rMo0du8ANslL+RLO22zwt/99TfFhy3PKCBY/wQXniDLoyd+4gAu3lDHcwJmpHRLtl1t6kqGOAfL4F+wmWqysSICNrAaURkGxvjVVrg0L7EAA0T/skbcQM+3X2uO7Dsek6wb4E/Z6XtJnuZsKdtuBQI+39CD/ZtU1j9KqmNibaeVbcPzxHz7qh3Q7TpR5/s4Lfa/HlVDSLmPaz+tTtpfjDFJtAJY4zekEtEG5AUIhNZCMGDK208gOP2gqcsTZXNfGjEOS5Fu1xfy5tg8/a2pdkO2/Jv2kvgrGHcqqpbjYzEzIFaLBC8OeQsxM71M7sV91VrX8kOJdbMRAV9aoyZmOITUcVk1s03dPxfH/KDwKQVfDeoIzQSUV5WAFACZYpIyICBgxKlISt8/frhy29fpQ/fz/PMGJB55BGRDFFVi9QtoKgdIwFQuc/QFZHIwUwcOwjJTNTxBCKHnPOyPDLz1XGSaqeHtwAwTVNZTyIyppimmPi6TImI1gdARDwc5MUzM1PDWiVXfXycx7HENJYi+e5BRI7H48cff/xf/cWfH49HRmCUIcW88uO91DzHQN30AGBiRkBTELQyJrp/LIxkNUfUMdKPvvfpP/z6n6Y03Nzcruu6nObf/Oa3f/c3f6eqnIZhjDc3N8+fPz9Ok4FXeBaXqsuSa61imtd6Op3mOb94fng8LT/57NPPf/B9DHx9+/x3X34DZXGJhIFRtaquIqecs9QkZAaIBuBNgKoIDIQUFKGPsiJAsBaJNkRQb9hANDVRq7WGGHxeRV4TU+uWQgNARpA98ZgZwBbdBIBW34tkAWmu2TdUuUlGzw3mvPRU/Q728yycn6bscGdD+C/20VMX+hgvRZ5dSrr/4ufn3+rSjZCszc84f6WqIQ1ossm1EIIxA0ApJUZ5YgP1AlrEjgmwpXpGoBBCLVs15/nx9kbGlhcyI6bN7gkh4LrW5fT4OC/Pbq7RdFny0OBGkIiIg0uh2iA4xGSVUsYUTvNqZmNMKTIoVBVQq1bXXHItijBcHW5unoU0qIKazUue19O8LuEuINowDNM0PZryUKMIVwtgazVVqTDHGJei85LdsH7z+u7u7s7MPrj9TFWvgF4aQYiPD6e3j4+nnF+qilkVb8rFVBKHbGYBARFF1QwpFC6FQ1CkmJIooHi4lFxc5VwM6j2Rlvo4r+Nhunr2XFOsYMZBkBRZpKlbRqwqrpDgjxhk2+fbt+bxYKJdGT9gb8ZWbXoIkdv/rRWWu/rYMCF6PmYXQzXgHjT1UPrWMehbT3DGyrcWPW2wBLqLEvq+GyqRhxPZDAgZgVWxo1NcYGxAj7A2ct0hDcC5a/eCszZEBNhZUVsDjOqFmwfvWAZPFnn7p4GaGgB40+m2/ltUZcOz2a6yVoWLHDDnjDi+fO/Fj3/8w6+//tJEhjioKoEPijDQ6ulGv4KZmAM69ryo94aLiFWpKgAQYzwej7e3tzWveV1FxJfOg3fCgUayNs5ba1WgoGDzmhWMQ3rM+ZtXr+4eHpF4Oh4oBguKRIBcbF7WdX2UqraKDMNARI7fH6jFzd0FarijS8uxUBgYQwhh6TiBx+vjD37wgx/96EfXH35Y8sym3rjKXWhgR3HYdz1Rs0ifenT7PzaKOotofNoysNHMk/3dO0Lbseco2Ela0BZ3pA4241e2irgeqN8/z554tkfd6Hk3b/NcUKq9WQsRzURMrYOXKgJsw+daTNBJ6yLX8eTVvlOPbILiiX0IPWa0dxr367z/8AlrnG/bRtvx7hwzg5Kbno3T5EiAwzhKrTFGTrHkfHt784s/+9Mvvvjim2++SSl4x4+qmiICIDWoXxHpm9DQ2dXE+3qIgpFVFckVQNfTfH//WOYVEUFURGIIiVMK0cxub258Qr2boCEECoiItnIpZV3XUkq1qirzuszrac5zLXmeKSRWHBGhEJjZSpWIuLMD9EoE8WYJA3DZheg75YkNRPzo00/SOAJRjFFzbvkhbCmJ/apuMnzz1Z+Il3c3q52GAO46ck9HGxgCqimo+3Uo1hCS/DcdrhoREGAblk7UMES0OQJPyAx2GWPYqSpyoCY9i0fbIuzYeX8XcQMHs229sgYABKQAhKDShb/7yNYcYpGLlp+NOHc9fufkp6pGDvsWvnM8fdeJs3fYWrD9nCE4j4vf2tT377vx9cbx/s9dz6dJH+TOzEFEqroPpY6ooaprrmddhdA7Q7ErdPEVl53D40+DZKqW8xIgAqhjErSlIQQ5NxC7sPbn8HbVxEwYNuqBS7GlqmTnXBNuy1dlW31E1A5DehhHIvLZL9Ali/+u5wocC4oERESQQkPvZAAoOYsIkZmxYYv/xBixyykiErC4YxWzNmUlpSAiuZTtsa175GgbcPbZyw1jUoSHBb5+c/fee+/dv3n78upoZrfXR0RQ1bWWmouYhhBiHBQcSVbbkhKiApIBE4oCANKeIWFeTkMaQ6jzfAJx4AfwofbXx4NPKgXQNI04DWaWbq8dchOZpNpSaq3VgAAVH63U/Pbt3Zs3b4bp+Nlnn/7Lf/kvn0dghiHFWu3+7hWajEMoS0H0zg4AACIERgMQUMyrAk4xoNlX3371/Ob6cHX86KOPUuS/+p//7us//P76+vb9l++lEN7GeyKqKKWUh7u79XTyRGtkTClZQw4MwzQCAN3Eq6vvH4/HUsrVzdFMBIQCVanXKf0uLwqmmpCRQ4A2whxLNaoCHf5VgQy51ZMwoarPtrGdXhZANlRDs47jC+ZF7yplXdechxTZRQxRICLoMRgMjjaD0AubyXxHDE2IiFgjRv+hvcWgHbFjU+1ERA6EXquI+IQMAEBkROmztRvs03bspcz+k3fPgXeOJ2L3XSmMPWItXUhvOthfx3r+Hx2WYGeaU8/8N0Zu4s8awrCoiCi7U8cErRvB/T0zM0XCsxil0Apri1SCYWtTpMAUAqzLupZ5Xt978Vy1zo+PPA4b4rOYNQBZtErotWhmkiCtJatVbOUrDj5kyzKrKoQYAdM0jodDHIZatEhN45BUaq1znpdlAcRxHO3mCkOhWI1qCLCuNecclnpzc/Nwyq+/eT3Ps4jc398D0MuXL9eSzTCk4eX778U0fP311w8PpyJ6d5oFqZgyMzGmWr1kNAK5D1BNgTCIDGmiAFVWCnUYhhDTMIzM9fHxUVXvH++991sFwjSEFCWmUqqAVVBDBkLqZRQMKLvWuD0h7TMke3PNQ26qCqrWJq4pcCMzh91LaTRDz29vsVUzH47c83zgqDYbJ6oKCABoJgvMTBBoG1LvNIzoA4DqLq9OxGBV1UvnmhLhgNoqWYi8iA7b4D3mc5/8k8NfkJmdPTdRvymjs+4D8EyaNIjXBvRXax2GpGdYDtgUNhHvjBnbaHt/c0T0xKeZneZ5ExHbM2yR6d2DuQQgABnHqafLxu//4Hv/4X/6y7vXb+4f36YQx3FkZgRFiGBSRBzeLIbIDnCqNYsUldUjcXlFxBiGMIRxHIdpKqWUWgGpDho5ZFFei7ClIERUpM7zXEQ4RkBacskqHO3Vm7vf/+HL07xg4OEwAbLQahUFqmPFZDVZ1qXKlTEzJ2N3ipmJDAEAA5NKKcXDcxwDFyKiLNXLU589f/79H3z+Z3/2Z8fbW1gW5OawERGwF43u1tCgITTvtuKJfdKWd4tV78IEAG06xX4LNhNiv7t4zhu8k2HeDQ1rx27e9FYQhoieP7HajOb9421pjS0Wb71gyjYAxnacAfzMTNqcQHIhXIowsxFttqCqeRuuOyD0XQ7hk/d9ojJgB3W2PQZeZjm2V38Sytl/vme37f+OC+pAG3j2T+CMy1/zsiwAAClAzcgMoHGIj6f5008//enPfvKrX/1qGAaioGYq4gPfA7MRG3ERDdiqYdhL3hSqSgrRa0x9kWvVh/vT3d0dVRhjijEOcbw6HG6vb2JkFbm9uh6GYRxHHz7R9JcKoNgw1HGsKsAIoI/z6fXDmzd3uhSLhIzIiEwQEAygqjACYfSeFQWvO0EtPRjHEZmZ2vSpajqEEMfhe9/7njf3hpTEtAXQt52C3pIXIqh6Vs2xHH2bQghgYG3cEDQTHaCnUlsZkfi35HVNTa6bhzDMBCxY20oGQMcpafMDm0fU6McLaQBQz3WhtINacGb3ZKSIbDaYaVUTNQUzMvDGWwIgr2wXkU6KzanuIlRV0dqLnHnWunA4x74bL++JHM/hwrND2Iy64CDBTufcMT+Vg6tNMTAiRjJTFS2bSD9TOJF7m1tx4vaE9F2ZSb9wXdfOEWcuI6LgaGwxkpmt6zpNE3bQz1LKaZmHGLZmzcBYSmnd5Ofh3AgAIbQJFntFyMxGAdhLMMkCmpgL6xBCIiLmIcRaWxKMo3Fozc3MDC0FbyJK5CUotJU+M/OaFawGZlOFbkMT0TiOXu/ElxFTRExpwDbekCIToBLR4XAwUGDzqKffxDMzqacXMDSbEgqIiFgLEmPgAchdYqZQRavIFpe1DixeJAfH/FGKjCJWRWKMuYICYrJf/eYPP/3JT97/8PM638UhuWxFpEA8xCAGyMEQKIYqBQWHtgsGhIFCrXWMARFFCiON46gqy7IQ4ryczGwaBvfsxyEQUWqQVug1SMSNc8hsGpMh1KIAdQAEoFyFAQyklGUceBji7fX00ccfLOtpqXo8HtecpeRhDHnOS17HMda8cuRhTFplkRUXj0upSfXIkazr1WEysjTEA/K//lf/4qP3P/h///v//ttv3x7TCEQDOwolXU9XiEhoIQRGY8ZpmrZmV2BCYGrtdTFHH7ghhAQGIYbfvH2dOCwCVWUpOmedc52XJVcxIGI2s9qQCFrwPEuF2iLZhuCS0RkdpAq0RHwRYzNGVtWcM6FJLqA2pUFqXszAJIRUq661hNlS5DQNxC2Li8CoQgSllHVZMcRlWcI4/f84+9cvSZIrPwy8DzNzj4jMrCp0o4EBBsTMSCORQy2lw/9+9+xn7kMftHukQ4nEgEPMAxhgGt1VlZkR7mZ2790P18zCIrJ6SK0fnEZUpIc/zO778bujTTfn3Lg0hFLaBplZjFFyzTm7hzy6prv6R/U2B7SWXzHCDtYkt8MbYAp1D86FyVIZjDMr72FYwI2t2QzYWiuHKNYQQUopiAcRMRGMIcaYUsItI2KI0UVHzvl0OL68vDCzAuaaowvBqYzcj71WFwiRk6rFuFT3NrUCEDN7PCilFJdUa/XCsn3f63EFgG3bjOjh6XHb91zluKbLGbNUk5pzXtclxRRjJLRac62V0JjZexDR1JRFoagNuFdgqmpFhUI8HR8du4JjAEIKAQCqihoakJhtpf72j99tRhAPyotq9qme0ezzP3677/v5kr/77tOnT9+nlL755huNASjEEFQ1bzun+P6rryl+/vz586eXZyPEwCR1L1lV1nU9Ho9BgWNAxCICAFFNFIiDqh6Px8DRq/rzvpdS1nVlzYgYY+SQ0nLYaq0AWe287XRa9pyXh3c558PhUKuEyFq+nFJ2lLyhCLG798xca0YERkZ0fC+MMZiZ1Guko1ZVFebrWOq4HhCiWgM3Pm87EhKxgldzkNfe55wXZlNNMYbQ0juuHQfgkG8WwBJCAEAz7IUwmFKySq+vr4yh1h0wYxAi8ijQMJStpweHap9N9vEnP2gKnNut3zh09sgDeHuwF3TJBK83utmvXsCtHTxz4nAyB0viyLQ4EBARtU6HAuCj9TjnPcYIYIz8s5/99K/+6l/+3/+v/7cFHy3Gbduq5MOyqgigPp0ezZrXWtQYjTnGgFLtXM7P51ep9vD0eDgekcJ52+n55fx6AQMCzFUCKiJuuViEy74xsxgUs13MNBtQNeVl/c3f/t1vf/ePey3Luh7CEtdDjNEqY87FdEE0LrJt520XvezV1rQcjlV1DUGXyDHGGOOSgpuDRWrsI/WqKSI+Pj5+/eMf/+IXv/jq6w+Hw8Htv7Aso9ZAJ2CSIYRNrUoZBpN2Wwjmejkiu5WKLhCb2do3blDL2NBZtPZzptMAxoTuRhgGpmqT0B504t+QNdtIR71uN6PtNhro/xyVxuMc6gaPdKzBmQVc4Tq/d6obOWcTkR7Lu/Zuzes2KHbICg+Oc39mb553pEOiVgg9ng0AROqg7VkrvS2Bm7VYM9SnkgHslQ4551YXtm0cAhgAKnBclhjCwy9/+cs/+/Nffv/dRzMTg10MCJYQRex8ueRSDmtqjobaJtXMas1aq4ipgIgaUi7l9fVy2S5mCCLhEJ4eHh9Pp9PheFwPKaVIbFoP62Fd1/bkombCZmtMAFAYS4EsuUgFK4dIcjy8T49xjSFFH7ECJkzk807V69pMzRpaqSIAk/fCiohZM/IxLp+fX//H//Yv3334EQKDmIjwYTGpplZ9YlPHCQfAum2DeGa69ZF3s5HQSJTbXnj2ioEQEUSVFNt8LhjFRAEQ5yTkcFRiBG2hWL+dlIKtspT5FpfL7xtj60OhEBixllJrRZEY2B05qyK9nQG9t7ZfJExlyY0RvD9wkvmqimp1avSF7jTSlG8c63AT8iBrFGNW4VpxU/pLDQZsdmYprl69DGQwOyKm2PJq7pdp71wbDNIc486Ao1lj5KJFy+y4hWVZLpcLIqpcqzfdtm4uEAXT4q9UDJfVUZUYsZcTwNUI8G+YOQRCxBBCyWZm0t1HIPP0iGoVQWobLCbq0xSwFz/M3O6rg1436lr5ChlwDYJqTw+6T8jMAARGKE04ppS2bSvbrirEgKbUMYXUFNTAqoEnCVREKlbtwJE+H8CLFck7sTowvaoB2dyyYmbMEdt4HNLax1MCEyEzUei9i4AV0cSg6D/+8fOf/fTrdHyUuqsAQhUpNWcxMCQ1VaHYWwcVAEA7lH8L/XqRMVqTmz751MxAvLsZoXWnQ62ZRgM9mhNKIHbQUu/PNAMzrTXv+4YkIjmXXQ1ChBAp5/Pnz3B6SPqS3ZsC1FL2mst5PzMAXoKAkan1QKOZpVIFLOdaShEA5jUFprhWgJ//9Ju//PM/g/prkB0lPj6c1nWNR1pTMrOad0I0UzIIkc7nMzCb6xUOBJEwUnA2MDMg9fmoplVKKVuVXTgX3MWySNGmtITaxLvRiq1AYOCDgF0FOx12rUgEaKRt6nsTD2YEhIYtIphNhQBjjC97FRHqF0CmEBiARIrXmSQOIVCIjHwd4Dn0351uG2p1tjFGgIeZ0YtVkIu6cdMLjQzuAKl+6JgthiHi3/5qtm+++P34SWcHBsHBpKrezmYAV0t3vo6ID33qyB/dwmiDRn3YzoS4peAE1tqhHfKbmaX3/YK3QxCioiIrwF5yShE7zjUAlFIICgAEdtBLUy8EUS1SAzEQFqmwt2lMwASEQBpSPKyn9Xhw9g8hvLy81FqRaVkW5KhAVosBfnrdin28ZDseP2MHD4wxhsDn83k/Xy7bbhSX4+N6ekqHQ4hu1gMyIROHEFOK60Ix7LWUTx9TSjEycShVPr88Rwy0t6ELxLyYZVVEFJGiUlSoT631/6782AzcEOOSDDGrFTMhULSihl6oXKsJAPIYDH1HFT9EV65BqEOOD5K4E+8wl3iYOpG4tnAY9xB8akx13maYEzve4WLQUMRBwThGMhJt89+doraSCU9mVdVjxCYirWvOs+rDxAEzA1OBN3Dq48m7jmr+59z0243da07Pwwd3VpRN3S8wNczPhvu8XNR7RebA67Cz+5XHr9pnUwBsEFzT82vPKgEink6HP/3Tn/3VX/3Vb371D446gx2hbmm1PF3AARqCKlz27fX8er7sxDEd1vV4gsC5SrlciqiZoagSUFEiUSRAQ7Xn8x5CMMKqJoC56F4u55z//rf/x8fnz5ctp8MaVqyAKAKInGJAOBBxEKCihsXIRD5+flnTlutRjnY6riFQBFDVfa9FqoJBLyIQMAaIMXIIx+Px8fHx8emJ1hXQExX3uW7sKJ3XfXHCMDBRb0tp519dv1aSYWZXmlYDU+ilcfOgLDDzVLDXusJw47txDL1ibuYjT1fO9OO3aTeza1tOk/jYyomh4Y9eSW6+8t27OxoF9IB+VYfPBvX/YRvKSL28lppuw9mZpSkPeccXcIuF6DVv1gu4xmmzMT2uSdTACMcbaYdjdVvZ6/q8UcrvINPYM0ABax7F3ZgKtx9BDWPMWwlmITIz/8mf/OQv/uIv/t//9D9br0iquexVPGIkBufLvpgmSCF0a5iCEuxFVFWLlNJDh0DI9O7x4XQ6PR4fDsuyxBSJIxICrGlhQCu1+f3UUotweWVmZCAVK7nUjcFOh0Na2JiATBFUq5p3LAIdPJdgVlGhIqIYKlDJmaiVtnZbBhDxfLn86OuvvvmTnx6PR0wRYgTTkjO54zQWH1rmwDv5bcjIbpOMkvVBSW0qSaerQKRtfwkIHanFNaypWYclTxQYycBo1GGqQRUHr72W/Pa0PPYq7hGdmeUqduYaMhkMGFA7fRJcAW/sbdIPgFuPrs0XB4DEwYOKMOk+Zp4RiW/l7fUWNhr0pgJ1nNATdCrVxp4DdJXt4DRX5lIppcyKBm/biUclpjaQwvZ43EFlZntPVYObTUwRvqTgfcazUbcwWpzgmrUcdT7TflzxgkU8nggEwUygdSECEXjtgWgxs1prKTtWBNCQkqpCB8kBAAcpbRZYD4sOEFEtVVWtinGXh0xIVFWCSItaqc1P2PcVCN096qa+2YiFmJk/p0oF9LrZXs4EhmbMgTCgg1JCK69l5mpsSIZmCIEj0QhcoSkCASJ7AQMQKyDFCJzUSgX7+999+82Hr57eP22fv1UFMJFSTdQMjNHcB2neCvYtNEQDYkISMEYlIhMdATwwA9HadqS9LABU02hIPn1LGwiHGQDUWjVnj0VRUbls55fX8+t2yWVXrcQxxqAo234WLXhWM00pLcviCatS98vlsqZlF7nkHELAnr9l5lhyjBEockIRraL1fAbOFNPpsH7z1fvf/sPy+rLhQY+HeDwe1iOfHo5oUPMWiEMgVAPUTwxEpABmKKZmxRShAqVErg29NFesFClZdtGL6KVCrh0nyZm2DWZAM9A+un1wl3mvoJo7/3Crp133dtmhiD4juKoENAsExgFYPYlp6FPRSwAjgpxzjNHamPpuYtqU7+9h/jsL8hp7QwQw5oBOF0Sj/7/Zo6Pazm6eGb50jLvcGaPj/Ldibnz+oWPc2h/JcARQPPIEdCtP735LLdjdTAciisTQGjJhOJbzIxEFMbXe7MEU1QtmwKpKrer2rBpU1cuWHx5OcT1IuZDCui4Du9zLvwUBRKr6bAk5rgtVynUPVJZlWWJCRGYDgBiWh4cHRyc30BACBg4hLLBUBaBSVcVMwTid9grffX59vmS3fhgphEAMl5fXnDdiXA7H49PjejyGtHCoiKhA6H3SUUOK67oa4fl83nIWMMWIrcJKEi8uJBUhhLBUoR0BIKWEF0TEQ1pSSlKrV4s9HFcf94AxGYdMIGDAJIqoikyALXhvAtha8W+corv/ztQLAC7CzbDX/3rMTlMMflqtXhEH6Dg3zU4wVQVsGDbu3ssE2kZEPinXu4OaI9cFeyPj3itCbXCCx1yDtK7GBvJORiEErTeU7JwGhleYtH7AbWZmPA90o2Reh7FWo/DEr3+niYeDN1/h7UX8GBbDCBVJr0mZmentRQCb/Kc2n6pHtbEuy/KLX/yi5vz9H75/fX0tAhSwiBCgxlhqk5ZK4BMFay7Pz8+fP39WsOPxeHx4oBjyXl8ul1IKA6eUCOwQk/c2iJEa5goRAbkgchHZS3m+nD89vzy/vn78/EmBltPD+/dPp8eHVn2DaCaAFOJC0SgtGCIvRUTUPqraed9UVa2aCWiNMcbUQtL+7tLfXc0k5/P5/Ho5r6/LQTUsC/QqpLciCImsF2oSESC26jUicM9tInJf7qtf16Tudeg83ArMIaUdOBE6pij2EN7YuDuROH8ehGFm0Ftd25N0PK07+hmXnS817jJISzrKJTN74/f0AM2QGLmagYN/95zjfZvB1nfk7km0t0VZ97iwx9RU6xzrHIXW/vyz5B9qcbzvdeOG1Ydt4ETr9WojawFGOggAwEik1B1QEx9qze/fv/9X/+pf/epXv/qnP3yLrdkYailERBhEBK8Zy4CefELpas7cGaOYkjFHAbEHiEtMTARqWqogKSATKAKqVWlmGxlpqTVn2rcsUlWqagVRxrjGuKZzMQERADMhROsY4qUaoI5uEew1gUVqpMjk3ULY8NqQq+Zf/OIXP//5z8OytMSCN010fwSgQcKYWevbnvZxFkfz+o8TVMWjJdD7OMFD8C7ozLiXWjsU3N2VYdRB9F2+CxMkH7A7ttjRQb3v1MxM3W1EbwcGtGmcSSsIn+j2tkQcAED0GhDUURoAQOF2Pp5PRfZ0Qj95MktuUoszV9ptKHD8dWZw7sgm1gMxY2usY6BM17lxjEcLrrQp8T04Pqab4tQyDRBERjSFKDBQ06My2Vu+jX3VrkkMB+lXVW9O67sIqlUVzcSMq7osBQCPuAsBI5pXuBCBKjKagZheIQGbO9iAXoiZY4xaqv9l+NOllAG47GMIhwHtfVXUstUtwyAiMcaGN9iipN2yNMVWUtzUSdtIac9j1kcpOWx9h58DAEMyvY3UGpmiopqC1xqCmJKKIJEKswoIGoBQSIJBUQ35H//pu9//8fsPjw/io5OIOEYEMNECpA3qSchBhL2IEyz0sOC0/VdLiIisjfoYIrIbTISKhGZi6nUo/gq11rzXLFVEcpXnl+fPL6/7XmJKT08HCvzyfK61fvr0kZk/5fO+7yklL+NcT8dlSbnaLjtv+/MlrzGBmReyppRWoJgwJaqGe5FcqyEhUVjWEKtISRz2SABaaz6fXzgkrTGFAITHJR0PCyKC6RKCC69c65ZzVREw02rCwBAIEGmXUlRL1WpQAStgBauAasZgAF7U3huTbpNjOIKrLQ3XvQ4KQ3x4CpEBEC14ORo2xFImUKTikJdAbqKXUnJGQkthaHf13ihsMJn9yrOQelNc1BiYyEx88Nr4laqKw+21J8Qh88wMgMzuMx6Nfbqg+WeskPm4O+3+JwMM5NZJmH/l5Or1jbOVgNfjKkxdG7VJaF5TMdIy3l6o48lJVbwcEUMTVl44UKQSAYKKCpp+fn159/5hSYmgEkXRqi9DWCNzBAAzcFTYUkqJkaiaGYS4Mntdd61bTCszU+AhzUVkCVETKKCUAkTMHOICAB7Sy1s9X4q/XfCXirzvGxGt6/r4cDoejyExkDlSlJkQEQYCJYrEKYYQDCDEaCC5llqrqhCRVDLwQVlARHvOFAMifgihSM05byWvpThIr5nC6ciMyBE4bGa7qIUUl9X2jMRMHGNc15U9yKUN5QUmPTdI9C2pvCEn6mHat45T23KaysnADR3HGhAd1mEIIXBEI9VmjlqPjvnFzKxWQZ2M5qnMDLTFXK9alklBsJmMzWwBROtjjWaC92+8bWMIfCLyTorRoH9H6vam1HPmjhEnHtT+dhm7/DEEQEKiFo0a6nuUA93yKfYiIkAkA3V0YkeLGI+HZB8+fIA///Nf/Nmvf/WrX50/fkY05ogGodazGQOaRWEDqTnn/bK9vH4+n8+HxydeDpSWXPV1z6973fdca2XAFOLpcBBDg1DVNiyIyKbMrEC5lpfXy6eX548vr+ftktZlXdfju4fD01NIySF+Tc2RvQ2JgYHMIHAo1SyEuO/bdnnd94tpBiuBnkJgb9tm5qpa21iX1OrpzRoop784E6SINX9BfHVitdGMemvY3Rlh3dnoY9N+wPWad/96o25NodvKb3oO26E2Kutuvveq1El4dke03ct6fHBcc/Av9HzyeM47RfOWhsdnug4Qvzp7d+fP38x2sPamLAAQ0YYf3nsZzKyU4kOG75hunDYrC/+v194PThlvNIugeek8oOO/mL8vRVKKACiSL5dLSuvPfvbTf/2v//W/+/j/yJdcqyIyMnhDGQAgmIKpoaojxKqKVRXseeUQAhHE0BTxcq6RWErdcym4lRA3RkZaQuSAPpmaiMxkL3nf90OpIlJELND6sB6PBwycS9n3vYKAdw1wCMyOgqtWEciADEC0gXqIGlGgVkDF2sZiMREdj8dv/uSnh6cnMIVam+fGPEUoOta3mgFQx3y8boEBA/pARZdlnYScIczMR726HTVodYBHXpU9AWi5mZs3CAbpKszhi8dEnPN/3Ym4cqLzLPTnG/zSgteDgFtoRqa5i35B7gAQ3W90la/6JfjQt//EHstwoV2ljtecpYp173eIHf/njCbtQXZEdPOmX+eGhWddQ0Q+kcjv0gVRc3n8m2Bm7mV6oQ4AAF2NMO3outPv7z/P/Ea92P16V6sKsbleDWFfAPq8B0AKmFKIJZpZiK1F0CbwiRhjiBRCKFVErhEjf4zD4eA5U38G7X3SKaWm9QERsXq0TBVtLIchKECbisaBzMRuX7AaEFHtznRcEjMj2l4EiImlNgiD1p9digihNgfAilQwx772OpPGL6pajU3FCmXly5bNLB7Sx5dPv/mHf/zmw7tHZFWhwAsHRKx7bvehQCRdkA3141EEF9Bze7pjqbtRdQPr2lxlZaYGiAJgqjBGZmSzLZd9y+d9O5+382U/Ho+Pj+/SujBFjs/Pz88+v55Unp+fG9swffjw4euvv+4phcq0Jw6gJlJSCIfDgTEwF8QXMRURI1jXdTms+/lS7fJ62SkG719FxBB0WaGUAlpNVGJRjYTgjr2/OcfAKeRSs1Qz23WPsA6veMtaRI1YgURRwAFDzDw3BybgU9TFOg5nEwZDMoxP7cNV+zIgIjBRRFhiJBCHNI7MjCC4aXE0bwigSkPdWo94emEiEhEjIRIH0j6h3t4k9G2KkHXldzUvfOOuMgWv6b5JvMIPHfYleTo+3ElhmyJYd38dwn1UN0EXgqbe1XtFZfTIYpvvNLmCAWl3UJkqhiCEpZTSQ05MkeDaDH33/H53VfVYiUslcCB7T5kCA9Wc6/PLy+tli3y0hhhHRcWkmBkgO2ayEAAoMXigUAwYKS7LejymZVXVnDcvx88516qBWA0cptz7YliNzUJckETAMGdEVfSko5hZ5BBT3XcDk4eH47vHh9NpSQs5mnU1BVBDA4YAJELMzJE40IopRCqlVFVFUwQ1zfsFAKypT+XKSVNKaffWQS77vud1BwARr2VaOUaOoRjkUrYqFgIShrgIIlMAgMBMBoysBHyNDN7kAd6aX+M0YmYkUkEw7yANwetEaIQRCSORj5dH6vBLNiUlPGYHt6pORKCPm6qNxNSBo2ut3KPZI1THzLWKASIyUfAcY4+VNlzgJj8drAWIQrq1MAAAzSClpKqel+1WUEt3zNzaCfDa7zQ0ZteSV1TtWbEOFT5+1Yz7XgsN4CM0uk8yLfgwuBHRlKwhurOHX6EhuCL0vE0p1XOMT09P/+Z//B9ez8+/Oj/vNUdVolCkIiLHBZDVcM/l9fW8vZ5LKQaEMUGIFbiYCgYMEaqUvW617EWLaMly2fMSU+KAACiVmI1wz/Xz68vreasqECKlhZeV0qLExVRNjZAR4nIQEVGoPspeW18gp5hMpWaPhtRazJQJvDyvqvqHEEIuJcSYcz6cjo+Pj+/fv394eFAEqxXVB//dSLBZkuAtMAwiQtvu666NnwEAduyWmUe+eNCUG7xerUn1ezFNgFVncGkAAGxgjNeiOGhJA1VV5pv5YzOHfvF5cEq+jVlzIgLosB3qdfiuNe/GHr41f+cvR9hovp0OmMduvo7WKTelmFn1mpDwBLt9ycEeJ8w9zDCZ1PML4hRbubuI/wQI4/FQtk1VQyTVuq7rX/3VX/31r37997/5+33fj8cHZt5ydelEymYoIpWg9YaZQ2eCt/ErtByVJ3uWSJEYEVQERPO2ZxOrsqTgcn1U2Hnnc0E9HA7LmozJUWe2Wj6/vgh6up4CBeCg7NExU/ChaewxN+0h0xgTEyOQR4jMgJkA6Onp9Pj4CExWBAy0VgMLIeY+h9BLP7mRJiBf5/tN1T66Ho9jO+3W/2mizHrMvW29V770LejTsGZHFHpQ5kolb60OuY58GBQ+RyjMruJOVUMDW28RsqtDqFM+f/IMHagYelIL1X9kpnVcU6v0u98MYxwnzEbU/C5EhFOKb35T/zzqqGePbDiKzA6FrdoztDB5gK5eO+DLEGvqoAwppaYp+qr6xQNTRMSq4nAvIuo620tDx+vNrNUeugNK+bblnMezAgBe07GKPcIAAJ42bNWXJoAWooPLYOuanuzgeZlGO/5MKNh7AEb8bJTgj+zzrFPHoao9Bt2IL8ZogiNc125qYPHarD+JrWuNaOtfcmzuPkRIAMhMRclrsYGJAoD253P9TYrkM6qlmgIiL9/+8ePzy+X9jx60nh3/mcGhU1HBkGhMvDAzQABTASSrAJG5jXtkuulGU9VmSwGAGVRT1RgWo1qBxsL1pAEKWCl62etl23NRNUxpfXj8cHp4YOa4LsvxeDgcL5fL58+f91KNoxpWM6j48Xkr+j0zHw4HrYImjK19NjGvmyowIroTIFLNJKTXlNL5fCYiEUMmNdvzBoQhLfu+n8/nyEiAKVDmTAhkkFJQBECKTBgYiTVDKQVQ0KphA3kXkaomBmpUAcSw9mUhMwLLKABgYDf6qhM5WZu/CTeOoVuFiGRMHJgTU4xEio5QjKaBCUBNK/NiZohX6SAiBVrNvfPPnRDxqAr07ginOu1Ihi1v34PLRYuqmrklZEQU+uCHhojsPmG/uP6AU3jH3T90jL/Op90pgPHluO84396YDv6B3twTEVtjan+Fzv7Dym+CnomZWfpyDTdj/NOZtLkgITKaVUUqey2vr68pYAxA0CoSZ9vaxY7UqlYDcZFqZpgSELnMFBGKIa4LA27bpqqcCAFFdV1XjhgXTaVetgx4Oe9Zaw1IyIEMBKmU0ubSGtRSUoA1xcMhrksMrivAZQmAQ/UCc6gcOWrw0I8CCGhMIS2x1rrvl4u3Kw/9ChYgtoHtISBizvu+74h4uVxeX18fH9KRCFVzlT0XAfCQMsXkM/g8vqZVUggBCfVm34cOG0bkTADQumsBbrnLzGp1ee5o1UboPUpmaHdnqqrqtf8NXMVqVQUplZLH2UxNW8TtlhSdbIb6qEUpNLvNHXLwwgpAL57pGqFFT0Zo5o6G3eefia1JFbq6ysMUHtp6rNiVX8i6heLX7/wF0Dpchk+Caqa12nU2NDSF3p/qZl9ah1q7YdM4rZC2mxoeVhsmBSL+t//dn39++ZRz/t3vfrdvNZp37oOZVVMrcr7sz59ft20jhhhTUdtKNapVzZgpJaxmXInY1M573S6fPr+GQ1pSCEwUCYmCIuw5v5zPey0hLsu6PLx/vyzLsi7EjAzMAQ2QWhVAqVrUtr1spbiXUktR05AiQQIVRKyStw15Wd/aKn4sy3I8HmOM1dTUEBFELLZqoDu6NTNmHnaDTqH6O3ENXR5Bd8lgbHH31uZ9mY1au3Ms30TlXDjMQ2Xbfbscb8nxKQFiZibq5tb8UmSAo3P06pLdxCP8w5yRAL4ykZn1yo8rrAVcAxlfyHLMRQTj/PkBhmUMkx08+OVOGpuZwy/dLRFMfYnzT+7ea1zfeg8hkuFYwfZGDFKcO9Z1LVlKKe/fv//v//IvXz+/nvcMDU9RhlBTn8BpYhZjAiCk4Fi87kOrqjXn1uiUjVNaUooxESCYaKm17FbFDCpWaz45pcMCsDzGdDidKPDLdnm97BfRgpbVFAGJOUQICX34OKp5mYMLE8WKagZiqmCRXUxhK8ZHUwEBS4eVmUEVmSEQdCCTsQU4iu8AHVhh3sd5+2BiIhgRCoNx/rUDFoCQAIGb0LdejwWtKmOQeqvpUK0tc4jX6/f/63s3HmnmvjmlbGZVS+cg7w6ETkXUOn5tXBrAjLj3MFgTBNe3a9t/pfYRcbg5YcTmbh9ypt7ZdRrcNygW+pSj8XauuVTVC/i193C6KTiT/axurrTRL+XSRXv5pIiE4S9NPH/rRE26fCgPVz9DC7bv7ToHYzxT41EGMxxR2HEpEaEgiMiAVUutxq0B97qUpRRRCCFAx1FtP/QmkL3kWkop2DFpEL3rvR+3niQiwhTkHj4hdsEwv+ys2sfKMHMI1qal94wcITFHZq61Wp/Pg9waOnsdETowjAXzfD0ihhBDXERLqbCuh/P5ebtk5ggVVFWKutWo5tgJbd2qCTfU3h4grzWlEJndjB4F92NzB5k6aAOy9il75kVEOZdt28BCVdn3/XLZRJQ5nB4ORHw6nR4eHhXBQw4InOKKFF5ezg+P5GDyyFRrveRsWYoRqKGaaQGpCBA5vF6qhIUAQ2BiKOWy7edaCwOq1YeHh6fH94fjaduzwi4Kl21zyPgl8hLTYYm11hgYQ6CYQFVUAZGQmY2Zi9SFGEBFGkyOz2zfRQtAFa3aM+GmCEpzyAMMZ+HV5SCAsudbWrehS5MmLJkwhRgDoDeGkrUZCQTUB+6pthk7TpY5i2DTbWIG02QUVTUacHYOyWieJJ8kUuM+PzojaDOsKZBpbcXeOJCfZmEEXzoGedzJ9FHJ88/85It/ajEIVeoOg9YrOk77+WwA3YoOItI+ZJynMsIwAMRdrk0dLO156PpPEalGAqb9GRF9JCogUq310/NnJH3/cGICVaWA7IPX7Vr7UU1BtQKQmrFFiyLycj67bD4eklljI0djQiS/BxFFYgUsVZkjYhkBGlAjwMjBxzgj2tPDkQOcDuthTSEiozhemRL0eA6ZKSA6+N7r6yvFwEaWDREPh0UsAllteb/m2zC3GBbHsCxL6FgFzFzK/vz86dtv6fHd03o47QAVKa6rxHiRqhxEBAIDQBzT2bUihrtNHB9m4hlfegGzApoI1CoSa60VHM17apid4nXNwvBWwF7RgNg7p12/Apihy/yeDIfxW5fSJleI2qFHoQdcXSmMe+Gw5idq1I74Pz8b9q4Bl+3uno3vvWt/eA5DGc+fZ/pE0lERMBYTp5DtWMmxXKPLYz7TzBBvxqP3Z+IeSyEAbTLGY+utIyPM9z0cDv/m3/wPRPT/+n/+z7/9+99Vc4BWqqhoVErZzufLnkVlDUuKq1KoClarmQOyoRFTTAHJRGvOuVbZiwpkzgCQHNgcrIjtpQgYM3FaYlo5MBCKqYo1Q6dUEXNE7r3KtpdLztWHXZkRYSBc15VQI3u52Q4hDgHCzAOcYzmsDw8PDw8PnDrYW4wAkCW7FTALvbG80N0SxGtp2djrq1XXUXaxT5u0Uag5+2m3nsn4PPrxQGRAQtitxJ7FLLXyOz8Hbyi5H8OgHFezifAmYmun3RldM0fPpOhqDoG0r4y7lNZjGXf3HbcbzzY/pz/UfIL02WDwRhnNimym2HGm9cDNOHPs0durATQtbrMOAiDCy+Xi/QIAwMyXy05E//2/+lf/+IdvP7+eVaAUzwdwrdUQa2tJIFUFjI1wWr8DkgtwU6mmWvIupkpqGOKSwpoWXg8gh/2yIflsdEgpeYGbqq4hGEKWWqRe8la2C6SAMRgahoQUkYKYl/iBofO4iYD3EBqIS0QAEkBCxzW0URzRlrRWiAFcfsZYamk9hAYDdcWJYJQ0ExGQW16ektHGL9bbVKwP7uyybYKwAOg8h4Ms8Sq4rhm5iYoGTcLEHYwE3v81CXA3eVu7rzVwELOeh+qEQd3BRDUOAayPQJxod5b/1L1H66Asfv7sLs10OEju7pnH9yIypkPPvDbES0cz+UIlaotFwk2oEW51B9w2qxMR9njrvu/te7xBYQiq6i372OFTx6JNa8fdEQrNuTI0uqYvzZuADUIIIleAZhsOocdZJ+aMkV2Gch/+M/MzNH3ZHwOMiMLUH+j62OzGHPS+QW8+WJbFXVkvo/PTvPFmLK4ZuLIUkZyz1uIFId6N7ZRlvWtl7DH1ih3rlaJVjQko3JTUMsX2di18ambennnTVEqB47Lue7nk/fT4eM5y2XMpJSGrllH60nJqyABtwiFdTRBQtSKaUmB2h7o1TyIoUbyj0cakRKqdQQBUddvL+bKbllrr5XJxPPT1cHx4eEgphZAent77dXLOIoJMx+MxrQ9E9HreSikhxVIK0kZEl9czIoJDF1dhxIoZEfm0ihTddoNayl72i2omotPxENJ6enpkDoj8+fPn8+v26dOnd++WC6JIJKJatIaKYNyHelURZMIYMTApB0kQdhVQKUhtTqSq1qoaTMHhZ7q1BMh4owzsthgG0RDBu7UREdUAsXQLwEAQgwc7YghYa5vQBVpr9UrlyJQvxUQZVdHlUqPWWisRIodePcpeql1vI6/QLRu8PYjQriWU11g2IoKBiFgr2m+kNsXVvuwQzopz/uc0wPRezX/xOv2WnZe7MPUcHfbCJO1JziFtZ5vJhi0OqpFtOlwKMbNKnXiqx7MQfE6Udeu/1ipSWrdhk11aVHItW94/f64IsgYOvHgVvqN4AUCIxA3kYCGILkARfRqe7fsOAACUIm3b5pNsTLTmgkgxxvxcgAIQF9Fa+wRUMxYtSEICnq8xIQMk+/DhEUyPp3WJgckAFdSRL9ncrQLxTigXQbXW42ENIez7Xms1hBSTmRHCtm05Z3PKCeg1CiNQ6oo2RnZP5g/f/lOu5fhUIcR4eliWQ01pq1JKEcKYKHBMKQUiUJNcYAkzfd6R053mczmDE+UM2uC7gwIim4FMNO5+vCGoKI8Rnv1GzAERc93tVtFitzirtfIYxauNSEQhJAdRa0qXGIxAb5/fevVpr1Wb3xER13Wd32j8la699DYzgk3e4HD/RIT4PtMyq7N53fy/o1fCr0PMTthmLdHR16BJDGtl5A25pwe0cV1XH0U1vBEiIqPL5eXnP/95rfVv/tNv/vCP/1RKLaXkXFOIIial7jmbWQgprctyWO1woD6DR9z7Z0rLQUUIG6QjigFBEau1vuzPhoDAGFiJOcaQlhgXYDJiQ8JAAQHJpFQzFNFaa6m65/183p7P51wKABwOh8CEEZcUD+tyiIH9V1Pk19cw52xme8lfffWVT/hoTpeq1UqxGRU09VojInHH40b07Dyomgh2R3Emdevd+9gpxybLBKaM1ni2O6cRuy0RU8DJXbzjrPZ5ek5or6KD3ca7D3ZQVZp0gN2YdldW7XTbZL7HTeQ274fdfhWRrsxGEabd3Xf+4VsTuZuIHTWk179QH8dCdF1e7MVZPt7gjt+xWzjS53zOEZlxvk4lo1Xy24v4W2/bdnh4ALH9sjNFB4r/xc9//s033/zDP/zu08fn1n9IpGbBA/QqJOLNBSEEr1hDszmdMrhMVS+XS8a9LhEPdlhTDCE+PCAoem4wJSLKZS85l8tly7un8tbjSfdtNyWDtB4wBo7JCKt3yKuYWUztTQfUKoepateoRzeuEShv3nFMFwoBQrCSPYXejB9PF4mq+ny8vilvxvCMBR/EWaUGAOtSzrq3Y7W6z2yDTfxD9+tm8jYzDlMJ9MxQ4m1u5HzqL++OHNk1QjGsDp7Rg0c/PPQcTk+KjhtJNzOIiPgauQMw0w4QQtjnIIOXmnVmuQEJG1/O0p74hgjvSAUne0lESik+gntwEFObQDi+HDlA/6Gfrx2kE9HcexoaBKndyN20YHaJLAGhVggQo8YEgZM9VzXMCCkhVqlmiLQYkCGtx5OI7PtmaMAIjERYioYYUkr7bmYMGgnATBCt7LXsBIZkCcysAVhboKhmda8ABJZIgSF47bOaRg6BOF/OZnZcVlUV2Zk5BUY1FAGrAUwoklniFZgYGA1TWGqtpIRCFIjA/TFAxD0Xo1KtAFtwqaGiCNkMu1YlQgBSFDMDRlGyKjHyYVnU6r7vVgubWlEFDMaJk9SKGMiSFrKKVhCFAYkwMEd3UxUBEFM6EFJRIMXIqwLE7bmcv9tNOYV/vHz71bvlsn1i+zHIZgGey+cXveAai+xY9hMd99pjYECgrZJI0I6HRRVqUWIWAx9hF+IyJCIRhBDIoFbNVfJ2BgreJKxac6lb3jfZwWH9bQ+R3r9/fHp6cqDUwxoRckoxS7W6p6OSWoJQX85SDWIpIHu+QKkRTASlFjUoVS+5qELgSIHN0ORbM/HCMwRjxBDXEGhZD4fDcQmR0N49pkAHk5f99SIlCiqylSKfzucs9Xg8ChHWmpZoSHvdUbPLtWUhFqugYVmysTA9n881hnRgA841G1vEcBbZlRQomy2a2qxtQkMVUJ9Oo1bVkAAJMBEGJA4EAKsaqJgqiiYtB7AT08oYI/v8CkIgBogoihthDFjB1LQQM0AECIGXlFAyYyYSqdlAlW0Xo8MqFyEzUGGUBWrUnHMt+xbDKoBZQtGoEkCqFQkKidat5FKyMdABi9RaRNkCppbhNHBciUoVAPi+Nvaqru5MHJgsjLtjtk5mEQYAFawakHkXO5lUARFTI0wpsamAcQwYHOBRzAxQ0xK2nIlIRJZlUVUpGSkwo3EQJDUTRCbatk9EBUxAoBTbshyWWFWKZmRTkaoKgIEPidcU11I/WZvF5VYaG3KVanRUgn/8fH5WrWs42FIqlVLe6XMIJCK6l3BIAUMGc9dKFM4ipGEJayAFq2sKuOy7iQEW1L1KyVoEVHC7VKkIgBwDYZsCDACgl5CiByeXw1prVtWH0wFDOK7r6XgkJDIKyMEAKkjAkncvyC8l7/uOAZdlefrwPufMjE/vjq+vr2AlxRhDOp7WbUvbtolUJmIEBmQtB1twz5AihnAGLSHoT75e352+f7XfIx7S6d3TezmsFzWqvKS1bhdGCWVHo1pzQRIOwolQh/rxuQx4W1Hirki39nSxihxUFOOiwFkjhBOGgmS11rqfySSwGahRQOKouUA9BHi9nIEWKRI4IBEQihRVI/Yx9AQGKBA5oGAIIRKR7poLVSKxYAyGvJ4YQ9lKrqgGj0/vXz8utXw0egE6c7C82/LwpEWzfKJQGBFwQVjMSKkalZgWG727oh4lQqKiBXqp89D9iAgQtA2KazmT5m80R3EYB55tIg/QqqqKQ0UiMQd2bOqbuO+UITEwUMFugYMZjJ6xzrmtxQUJexWrNz+7kWdFBQOLqtSMiMimIGJ1Ob37w3efTk9f/+xf/Nn/9u9/VYv5aKhioABVNYsA0XI4pOOJUiIUJhIArWqgqAhIQFKLIqI59JdodqPEQNPXjEAEzLYEPBzj0+l4PMX3x4hoPhNGRHKRWqsZfvcsqrpt5+eXT5fLq5mGSJFtRQpIh3h4Oi3H45HdchIFKEXFDJEIKFW0TaECPp0eJS70cMqIZrIsS62VlwSSXUGWqYOOmE3lasBZ8y1VFZXGXuCU5UiLhxjEzIgRvaujFrFrOGDYfGpK7KD16vuJiMRIHK6Wbt9N8c5nvnaVS6uDIEQEEQL0YRjXB2Ke7WBENGqmNoMLIunzBh2RoiCTijJzTJEt7Puea2Fm5BiYtRpaZiRTERUMaWBDtihbo3DumSTtpv5wAB3gRKsYqo/jAkBDiCF43bIBoH/uJD+6dfxSTBREtnklxx6NGP3YqTmjiJOP3aXT7AleFV+Wz3Gl7fJClogXVWIiEXm97P/23/7bnPO/+3f/jlg5JB9giCYqVVuw115rjS3xYJEZ0ZCE2AJrtb2cL98WYOaIgRTSljbVdYuMHJlD4BhjMNS651pEipguyBZS0fKybZw4nNYjY1GJiRRU6sUMTJUUU3dpRKSqIhmHQMGAUEEVPUKsouiABcSBAHULQRPTUq1gBMUMW2VDKxooAiKoVsliVX2cqUXkABw9ntI8cGITAyMcGb/umZBV7z8HZkBP5hRVjYeDQzXYqBNBMLMYotUyyjqwIvkFFW/a/HxQEBEEv4b4aBS3S8hFoM/wHB6SO3VIVxxRAAeU8A4LBABGn10uvVAxIAEiMUEIYGbTVGSnnAHo0trH9JrZG6cBeKrsWtnhLhkRWa/5JyT0Hh8yRJxrW5gRQAHUs83uufk7eUMHTC0/MGUF0YVXK167Vi7EuAwQGjMjbOi+qnbVItpgUYoDPM7aZXi61JsUa71iAamqT0X0iv+rDOrMScDMxBTIANrkUytlYybkK/yJqhKaAVjPQZmZ5OxwcK7SeqoEJne6h5263nUYcRg5wxvvGbiFVNvkDJq6jLg13qhWkZYAv3abFPMag56q9viZY5BaJQueP0fpPWB9Vf2pSmmPEdmXvoE7E4ZAkSkTMigU0ddL+fhyfiI1k9dSX2uJi7cgghm2pUYspRBCjDFG9qZFVVVUpIa2PCSxk4XzHRGnFEII5/MFzLxLTtU8/LDve9n24/F4fDgxBp90SQSHHgautZZa5mjf6XTa9x0LhWBApVYpJVcFkeJXJgNDa+l5hEvOiMjQ8q7HNa1pSYEfjiszXy4XH0D/9PREREtYPn4+55xFyhPisixuGQMcmb1YwESFiKp5mxBUyVUUIlUE7dFHRAwcYrSKaopcKokMZdZ4aZrOMHbZOpowEQUOjFSlgICDUhFeE/QOnAFgnnshDIj5Spxwk3ZrtAGAHQZGVUVMnJn7VNPaBW5KCeA8FN4ge0DMOVepqmogZtjieKohWEMGRvUYX/vdfylD+F/88p8/rip24kr/RlVNqjfGMbNjHcoMZXwfSLvZjrfKG69NXHbV8U0ltdOYKLSQcS9VwBaNVoBa68fvP1exw+HAkVJK75KPNDBtIT9XVWBmgBaQYozLsjCKVAOAEJIqqJiIlSKvr9t2qbnox48vtTjiK4cQkiOREhHlVBIirsdDSmuMsdZKHJe0ImLJVQl9yJ45SBK2CGWtdd/3UkpMjGYxhjUmiuRFSmZGhGZE4LeLANGBlEkMzIooleKleFIiiqTD4d2yxlOotTLHZVmoJ95JKcSovRyjGVj8BXr4L1JIX8Kb4h9D8GG4NuKpYCqC9kPk2X84ohVtHLZnP2gkiccJLrQNZFjkvoCIB+jUqL02pO7VB9KqGoDDToEXPM2K9o5/m2CdctojejKYdBw6qPM2c4K90Zf5ar++vem4Nb3paRzaef7m7udDI99dbfqnjVuHEAKnb7758U9+8uPf/Obvct5E7OHw6NlFREwpnU6nw+EAAHvOzD4MrdcQAjqDXxeqdTybIEFYIjORIuhhjY9Ph9MhUQOqaB27Oec9Vx/H/HyBy+VyyZdctn2/oOmHD49fffXVw8PRVJjZ6+sCUa1VSKxW15XE7Jh6I1+UUkqx+x5qoAJ4BbG4iXzdAnr5CW57SHe0aGCQmtlU1tSWdKw/fGHZv0jbMGUOZ/oZKmYcNz+cnhNus3/zlae7X3nkzrS9o0z/1UhTY4/y+PVLKVfD6YrKziI3KNY3ohuI6Mo483Ld6bVZyGsv2/b04J2OuCNmnIJT2O29ccF5iXgag3G7Fd4i2BQY9Hz+uq5m8rOf/fRf/It/8Zvf/F0VYwo55+SlFjaQFHE8cPAUDl1RJWOMCQmAvFoFajGzgpV8DmHlpBI1Alk1VcemYQYEUiNTZTAAZGLGLeeOR+FJvCbQtIjfi9ht1Kbxu4cMMGSRqRGVUs7nrZRCC6qpmjqRR09dmYGHSADFq4s9F2cCYEjE3Q5pcEpGN6m2wVZm0GuCmj9TKxACEVID3lAXvlOJZtsvN8OYoefbzQxVwQBECumQwyICLbFGRDRGFF59TjPi+87e8ZqNVod/6mpCempx6JQegBhkNsS1qhKF+flnNrTxmtN/75jUrwId7utODtyLKYA5ndlZo+kI/8bRd2eWISIinjWIXcupJAyuFropuRl9nWNpqA9dGEJh3ollWdTqCIQPp0hVPdXKzGTX/c45pxSZEAmtT4xo2m5ksQG8+cf6XGN/JEVQBL4OYkWjK1sPBQBTYyQMnQ2svRqNiBjBrC09NuF7DXqZWYzBjSQ0cJOIiNTQtY6OKdt0ZYBupF6L+5kxxjiqHWb5yxyZI4kAsQns1b7/fPnd7z/Sj05Z5DXnXe0Uw8qLpy7NjWmiUooapJQoBuiFHdWUoc1q9+J+tQqoyG1FBIxBiSgs0RRq1VxyrpL3csmXUnYMjIGXZUkpxbh4Ubu7nR7BYMAYQqQmZ1kMiYirKqAj2ZgVtZyzeZwUkYGZmQgN4UirAzEjwLKkp9PD6bgGwhQoRU7M6MhOgHWt+XQMW8k5b68bIh4OByPwiWGlFAzYNCghAIjVNvQMwUUCqNNtYLbadDoH0BBCUi2irR4SVQEIWqMIIiIZt0YbA1AfY+Fz4AjMyBQFEdytnWvfPa7gwqILhRs8XgFTNxEm1PirsBBv/Bsw+t5DGBwSCq5llVf72qC1RQWEKsAGpBpsuIJ3RwdfenPcqcwvfp6PL2YO2xM5H02kDs1SBFXVCckJur9xjbjfKPsbkYpX9+/6qP0bt8XdVrYpJnhTL9ScT0IzU0DmAESXc97++JHT8/F4fPfunQUEIJ9oUKu4tOPRmNiexEIgwhiZCNlAiSDFmMlyrh8/frps9Y/fnkMAREbgGKMJuRe0rMicETHuZS8CADlviPh4OmgVQvMkuZYqpQLArruI7PmSS2Hm4/F4ejjknN+9e1rXlGLigI/HQyml1AwAYsl6oz4iIiiREXiwGsTAZ+vZZV9CiOthOR70cgFvLSEfGQzAFICqWTUVqXstRSqxGo72Jd9qArgjj3uqUFWa6tVmNTmLQRERA0BNKX6Zqm4VqnVP0z1DuNVKbnJgSzu00IQHLokIiZdliTES9UwFaAgBSBCto240M2c2iGdoHKdDekNpo8TaeX9QTTea7701b7y/e1NthaNXohusgZN7MC/gP3PMBsowKUZouX3TO0kCmntSf/Inf/KLX/zi7//+tzlnRFKriA4EC8wUAptpzvn18kpEQAEA3KnV7gK1BxM1VCIiphCoFlySl33D4bCcTmsgy3l7eckisu/7tm2lKgCIWK11LyHvl3y5bPliZg+Py1dfffXTb34SAuWyS5+yNWijtJJYRENVBQGs1QjB5HBclmVhAhVQrYiodjP+cVxk9u3HsvtpNtHtD8nMgVQx78Lbk3GKJkz64iZk8Pb6d9eZfzIuMjs8888H2dAEXNFslR4RMLsJgogId0CLcalh2Iyfz3zdnwdn4dBc6CnuYD52iMIX33cY3HcREGa6u5ffrjfEMk5Gs/Ve36E1Bi+MHb9/bKZWfUAALgxIyYAI1nX95S9/+enT88ePn//w+++QmKhZWWM9q2qtSCQcMLTmPQAgBA6caOFes21mbMTE0Yiq6KaV0UQwg3qxkutQ1opMFsgADbWAalVDKFIBwAjZAhEZqICaStl3CiGOrntAaHM1midg6rhS4v5VKeVv//ZvhfRnv/wZByp7sYiEbIQmBqAIBkyOJqNavY3wdjHBXWgA8Ax5pzkCAIrkYQDwhg7nGiITAQNUHXAI1D3DdsUmYDv1Wtc8V5kIDqs1zC1sizuUwj3lA4DeYyI074BDc+QmpAif13oNr8wkOrM/Tj1Z8AZ3apDcUE93onh4KGNVh+ukrSiXWv3xZCONZZl/C5Mo8H/mfC2xhm6j3hmBflm3xMKtEm0T/xxrVRzWoucW/K08UT4c5XEPERloMeN7cidgyiWKCIDB3MrVNZ/f2kV8DAszo0Gt7f1FHbbeAw3GzNoGPFx3vUmNHocYQlyYsCVOFAMNh9BJR6p5nwkjAV69QeqzqhBRFQGAMBgZYUAHg7rd8nl9x047vRLRqP29eoxkABA0gKEqgBAQV9FP5/q3v/9+iWkJ5Zyrki4PjLwwmCmGtCA7hg17Wg8AAjrozqRpCEG8adX87t7/6jLLTGKMtUjV/bJdtr2YYQjh4eEhrYtP2MHAHgP2LmXm1IgshABhtF5YFaRAtF0uFyI6Ho9A+HK+hNgQOptMbOOjkdbTukTn4TXGp8fT4XAgMDQ5HpYlRJVayl5LReB1XZ/ewev5+XJ5/fT8eVmWZY0xxr1kALCCAGAgDAYADjYQA9GIQilVU1E1orznKuAQJO7amZkg+IicRofY8n6E3vgEZEpt3Hwb/ktEoBYCGgKbY9U2wRqAAIwZRcFnwYkIQAscdJZDERT29RsRM3AfEG4dLZxqk4gI+pTOIfLQRwaBkQGaAjalCGYwGnYBFMQMrLVJwT9/zJT8/8cxxEIXr1e7BxGJGRGwXkdOwcQ+s9DEll20mbmw4/QQ0Ri8O6TNbCrBJFh7rLNfHBAMzQwIiaNxVgQVPedCly0fYoTAQGZSpGKzzjUy5mpVJedca104pMAxsQvYGIIKnkn2rTw/v55fRQRi5MAJkVNalmUxAxNDDg5CsJfz5bwrmNRsZs+HVUsltDUtBJq33aow86U8lwL7DiHC118fHk6nFMO+bVuMKbJoCRgPh2OIDBdj5mpJtOZSulWHGEIKUauPpyQkBOJS1Uo1rkYqpui5aryC7RK1GQUeLiciZEIgGaV0wDiFQvoe3RPPuOC8xaoa19gmXzdtamr4zxOnzUenU5d1I/QxmCUEEjFUGUUlrt1qrWnBdfWwFPlwEQYE3KHFINrlB5DvkOez4esTa+cH6+94w7/zcbc+7ZsxOWa82cT4MKn26RbXpqCZ7G2y2sdvb9bs+j244mhy8saecC2/PTwev/nJj5clSi4xJlNblrD4sB8199y2bcu1IKIhMgeOgVCth58BkEwVFAB6yzOZCTHHGGMAYt32V8n7tp9Nqkc5SxEKfFhPy0I5V1ENxxOYqOzH4+EX/+Ln33zzdUrBQFS8a5sc98bjth5YIApiClVN2KB4EOp0OEYmUAMvwwRQ0TFgejbaACDECFMg/062XBd82prpD9cvsf8Tu+g1a3PBryaEwUhZkINbgKm1ymPo17l7BucW73V8a37M2zoTwIgy2G1AAack5+wQzoQ322zDNJrXzVrHzQ0Fzo8B0OT2+BV0n3zcaC74xJ7lm/5pXexcnx8RO9yuhwWHUgCRbD17P3PNLc9eH5iAwAiRzZxO1FVnLhJCev/+/V/8xV/84fffvTzvL8/nFBfJZwBs7XA95khECVIBMUPGVlvn210jtlxYMKBgMRiiERdRRctaQEFBG9QmwrlKSokTI5OYyJ4VVMBiSojo80ja/BlDIChVHNamzTUm9HxMX0ZWs2rq8+J9Gf76V//pj58+hjX9+GffFDVQXAKrGHhrtQFoVfAmdk//t6grdqte+wwGxOsEb9fgRtZV+SB+BDNkBuhk04U2MUufD3mN/FqDRsUB/eVZSgAA4E7G7AkTj9l4ZruP+xjrDwA+6nyi7R6vH121t3J+2GNzYGKmnzv+Uikzn3a+uKK2jl/xNG5+pkCVa5nAEOlEjptzA6pkdg3Pzhw0c+4cQB9+ptm1Ox0nVBQRDaPIUzvCsyojAjN78FIdQqYRAdQ6xgCOmE0Dm/HBmn5XmTO/XpgBan2SKSKmlLxu1a884rtmXdu5427RTMTQVK3FBAypY/tWAr2KKhed0rEZva3C/TpArRM8AHmsk4iu80YCI/kgFjMjDCFQCEFv5SBhAPC3MICWmpiamK9DVCYqGc6AjB1Vq2YO14no1WGkMR6M7WXff/9xi/HjGnWXc1rs+KSnlcwM1OISVQ1IU0q11lKyqsISE5E7zNJRnRyRJ/SpksAKHZJRFb0QyGfdiAjFcFqPKaVdBBrsRIxrZGjYP1fSBIRW1dYobF1X1+VVgZmLVK2fvTjESInI4VKaWOe0LMsSAjOvSzwej0sMjMa8xkBo5giZyLSu67JGIaZIl8vl+fn1+0/fPcjDuix/NDkej8nbh82Hm1v14YrFihmzVKMimnMppVSIfioieUdY5IAGGbND6jialllD8fYQBCMxEQOyhwtFzcBBQQkRgNnQK9kVYC9ZjUJgAKBQQiBTHNOfXVACQMd4ADAyLzx1Op/ggO3WL/LAuSfYbWRCsLn3iGRWRUoppapVUd9AQuxQme4Hvs0W3hx3Mm7+8p//4d0x8WITr/4h5xzNG3eRSsuizNJq/ATavLjaDfQbg3sqUip2PfStQPS18vq3jtRPyMQUMLCJVrFcFZApRAPIAs/nfX/AZbHIwaz7Bu1igGiIZiognvanSCyKAGxGtbqvKGDELIcFAwcAkFKFGBdMMRKgBjSzGD06VgiJ46qqTdsiq4JUy9lxHShxPB34cDg8PT395Cc//vDhQ87b9x+/q1ZVq1ZUJjBlpEBMCNUAHI/Ta06uth2KWLGcQmAKcV1TOsa0FCYOAZGBWpagqlopnKKZAXHA4PUCzKwDrdUGBseNtTc28yozHQsagQEEzMOw6il5gAEHbeYGQfihiMWdInR08PFlq/CcKBg9MoJsZsVBEzCEFPd9D1FTCtAbOUSEEasImJjbC8ZgAuRc024x84KZT7mwORfdSfdqdHqW0om8l4fcc9M1N3XT+HFtnbpbhGHazkkP6I7rfPIX/zm+HECOc3TSzKQKgORc1/X4ox+9f3p62l437z9ZlmWNKUY+v7yWsnsJrvpUJAPEjG0ofAQmMwvoXQYZzFSgF5VE0S2XYoa5qEoxKR6aOx6P6/oIAMvheDqdVODl5QV12/d9DXx4/+Hrb77+k5/8eD2kbdscjXnUK3r5MSIjBYd3NoTaMuXGYDHGdU0AgCYMAOogsQodbnFs7hySuydmRJ0yxsPBszHGALGrEpt/+8VNvDthyM270AD8gBxu3xCB6h3xwC3a5yQnDRFGlT53EEEzG4Nq7m49fzm+d5vKOi9Pl4ee1Bnu2Y3b5gsM2EYoMbPU0Qd0VXzDcp1+1Q55g3rtPxwZUb3tDhzjnYZh7Ywjt/PKcWSJxcuAcQohmhmKFCIypa+//uZ/+p/+7aeP53//7/8PREb0yuFGNmTBezzN0EOHKUR3nlVB1YyDgSiZiJrWWhBEVSTGSErmPS+mjgklZovUKCVIRAYzq1aBAJgoRa8x0j5521EMQ0hefD4O7qDKwwgBNATwmlIE/vTp2+9fPp4+vKN1efrwLsUl150BvSMGUEWrqjbUdR+e3KirUQeAdbmtTeQTAPXizObFXYMHAK3E9EquTrG9jattencIAeaWl+nLBsxmzpVEhGrqWN/dCBn00+ikBVyu5OP/52tIvaoWe30fYyOYVhM7ATjNFIjXl7t3yQbjjCzReCrr5twIwbTrKIbE44d9J+9zjzCJJpr61KaH0TYU4Oq6O0HfoN1QB5VhB2obu+L9kY5F08I/0GPqoG5dMCeXBe7fzx16Lp0HY7e/Unur6TQKAQGBuZXbeQ9brRUh82mB1pcoXhftnoUieG+8ogEChWYX5txW3FldHENQREyR+FpCYK1ox09j5kjsOL/jHAT0NKTLlMZp146Oa/zMJQD2fAWLMOPQqWMjO4f48N/inlWbbdl30d1XULcJkSjsJi/F/vPvvl8XU3s9PvDTh/zuaGwgKmgdnTUhM5d6JXoXYqogIP0ZPLlVB25qG1TIIOJ9kkYBF4wUU4iEZPuWD8u6ruthOYYQTDGEkGIadKmqpibWBqFUscMhxBiPx2NYKgID4b5fQghV1cxL3pmI3C4P6cDMgdiz0IHQra4lLWaqoiFSigcA3bZt37IhcAyP796JmZS9SE0Y95KjRDRqcAwCiAaERFhqqWoQTBhEpNRaVI2B3DwHQsBFsTIjYsxBarXZ2vOxb2ZWxRgIicF724HQCMyDdy6JyP1BADNUsVawgMCFiBZEDCFEQWXfIwBHGQX2hk8EBfDpUICIDFhcn0GrhrfeW3jXO3GjmxGgR1Wtz8kwUDMeI+xctAMAoL2xSO+Pt9bA/6nD3OsehkUPa5VSSCWGzmtTcfWI/o5duLPJhnRTVZ8xwsw4JfMRcX6v8fCzPG12hrbIaYwRclHAYhiJDNnxeM57PhwOh0RAwVBpzMjxygXEGCMHdOMWAEoRHwW978UU379/fzw8idj333/yKcAiXlMApeScMwgjYloiBt4BArr3m1WE3OVUM4AlHWKMp8Ph8fHrx8fHp6eH4/F4Oh3WQ6q1nB6OLy+fY2TiNkqum2gipYJaA8Yw2/e9qmClNSUpomp7ldUgxCWmlWLcSjZCYq6qYqZmQFhUSlYDRIYQr3AmjvPxpUpRGOLxjnLmvXDu8HRNKaWUUqt185RhGij8ZizosC/1LgXnfALWOv+0w6mJoIiIigjUWksFsDjoIcboIUJVrVbB56rd6nhrmC0TYd9SFCLejSvA3k8ySI6meUV3nNuUPF9zMjgdQ4PYrS/alMh4yOmCb3bk+lTzpfyjT7IZ9jd2v1fE7WlDtA8fPvzkp9/80+9/v20bI6nWENbjcQWTy+UiaiFSqaBiPvnJ+3pSWt0KdzvfVFVrQFL/J+4VEdEIbYnheFofDh9C4FrKV199dTqdVCyEEJf1fD7nnE8HK/vlkNJXX331zU++TimUUhihVCFsysUnVfQhYgRIBj62RHq5UHg8nk6HA2hVMCYyFURF0zr10c1uttyU/raVnNNW9CYChejeYEu4DZofuzMMIXuTdbzubynwlsy+lNHyw8f8Ntk4HXdm6/g5UVCtnRLccndMDhzRjVEBCIAjoXdHUTT1E8JVYqPZXeSRRkjF/91f/Ir+PxbWJoMbJ6MWu9nqIujtyoDPkZ6O8dcYw2QHX/+q10jZze6ouq3sn6uqGgIQcQwKtu1b4tOPf/zNN9/89Ne//o1K31AFd4qasWlqZlpNQMgaNoU/RjGDViZuIlo0g6iIRcQQSMCqiIIQMjH6rhTJtNcGYMUQKaYYtNUyqGeSXZJwwEgLEblxRcTkEVhyYMUKACagqj7JKURCxHU9/uP3f/wP/+FXx6d3//Lx6XhaXl5eDykpI+NoVBEwZAKwqqajfNP6XHXvEXHiUZ+z5eWO3nw2koRtUw08Cezzgc2oS+0Yo3VJhYPgdSLjKSKAiEo2OjaZ2dyLyWVZljs2bBSI0NzUHzh+SJD63+bTZskwyHXWCDZVJo8s3ECLmSmQOkQL9DANIRtek2ojDtKdr37fL1hK7fOItI7w01tRNqsAf57QUzfNlOz+a7cs/SXJeozhCyVtzqVDwdzptlkm2lUwXbUUInrbFUAHzOn8Yx42p+vKAgFaL0VmHwTvAEOARDFGNBWRXIujKQ4BLKY9F9wWTlXJpx2Z91QoI3jctz25YtVKvWEaAMzQw5BEE4HeUNt1cRHR8YLmnSDqJfsefmA2KeRQNwBVBYGQuCBvl+cDQlXIQc65ZrMFERFzrQOiGgACRyQww2qK6n0TWj2X4qoLq1tL2LeguS0MXrgbQuBDjHExIBFZ1nR8OC5p9apHMEPD1uVlV0TblndSU1VHNI0xxhgFDOAhEG95b20qTITBDdaUUkwHbdPA0MwkFzDjGFULAHAwogCgtWiVvOeLUhCw0+MDEP7x2z9kqY9ExJxriZaA0QAB1YNUhEBMAIrsfRHklcFghMCIxsRkaNEiciCQEkqpCODd0ABgDRoLjCggMQCBmSghojH12K83lTWfzcx/5uJeQT23DECRGNHYSzlb6JQMSQEEzJHoGvYvAZExkUl3oq4sfS1XwMniHGw9ODEgALBaVb1z/L4MjPFfc/zQ+fYDIhVvbdDxmZnBh/PiVLoA1x6eQMRc8Y2ZNW43Imn45eL7G2F9zT3aTXhMVcnlNhFRQA7EphgFTCAg0ctlPz3Y0YAQDMGaRauISMwtwOJ7wYGZFYSgQeMcjsu7d++IuBY4nf5IRm6H1KL7vl8uuwlCYFUl8FSlrcuSUtj3nVz2iphZCrSu6+l0Oizrhw+HdV2XJTIzE6LBuq7rmo6HWGs2BCI0NQRIHHIeAhxHVl8MxFQMKUSsqgaipEhiUHLda04pGUCupUkDYkXbcwak4KVTvv4itSre9ib1BefZjJv/4tpk1gLMHIKJXNqVu50A971PM8jHzUAYNzuGCzouAt7y4mEUaw7hSDN4HQozE8mIp4YQVMADDV4eQEgIDIaGBliHvgCANs38DWVat4lnQ9yNpGGmxxg7f1z5AnsO/E616+0g43lJhxy40Ts94/fFY2bGfrVZcRliywa40kcMaQmI9uHDu1/84ue//o+/+lyeEbnWXCUv6RCYU0qXy2utddtgw63Weql121QUDgcBUC/KDYxmhub9qV6vD+u6GoiU+u7p4Zd/+qfv3783ra+vr1999dWyHF5fX2sRb9EXLYn58XhMKXzzzY8PxzXnjVSWZZFLBQD0mTMIAFDFw9NOQug4N9ZAYenp6elwOPi7E6OIgnUnph9z7/ckWK67bN7A/6XUmTO1/8Buj3kjhhwbkQK8zQFiT1sNvrvbu7Fl7cvb3OA47Y5+7mTyXX6SiKrKsBHfvjW+iWjINN553Ho8yLw+7iWOR+rmKVsffzVupLfvAm/aq8aNZs/cJjdyPvyapej82G9X4E5kkS7AXltjAABkzYMAUCEA+v7T59/99g/fffddDElQ1KJqUVGHPKRWGUiqZtpEny+U1zLnfCFiRFRCQgID4wCou1SlaKhFRVEjByBSUBUTNQIJFIiAGTkE9wq6HqGYiDn0MCsiomMKNitIrwpXq6jXXnJUFRAys7gupZR/+N0//ejvfvfhxz/lsHJI6pvlXTXoWAlqBjMQRls3bMhYfdlHYNrNGAaPnvQNarrbl1cNTEd4cSbyTlIw793MDu3kKgSAfYCEB8NxCsrALRfDPTHooIeZ48YDWJe3Qzq7qw1wG48eN7rl0/G99irouzcFIER2Yh8vxcyllNtVvSHg8SXeWmjObvMthtE4YiJ+E+sTjKSVbbZbhzl2Mj7fle1gb9OHFkR03LmrBBGRlJJIQwYDV7ROAVJm1lVVACtFOJgDeTATIscYa1VCRjUGBGruvrYySI0QfIEUBZyI/Jn7o7rLAaZOstd05Rjl5928qrVKrdUAScFBU0INQuL5HmbmntYQAUI2stvV8OkfaIraC20J1UwA2ngrAwFoWcRBZNiBmD1C4BPPVAqSMRugiBQAYiQLAQ9HjVZLLQgFWZCQjYmqCRIpQClCBIGYuNccqyLZGLCuAEwUOBlIm55s4G2HHhYtpexlY47ruh4fHgEg53wMy7IsKpIvGUgZg1WpuaSUjByBqmgrWwIiShy87tQ5qpQCZqeHwwkODiSILWyK7hAShX0XM0uRVTUrMOG6hm07e7drqbn9kPHh6eGlWiklLmmxWmq9bOV4XFNK1SQuSyA2AJ8uiIZIdAis1nItYlLEUUdrRbQ2NBWCD5QBlLicOWOLIl/77NlDz0QMDezYWqacgPx9mtknBh6/8t4FVV9YScm5gwGa4WKt+N45k8G7vf1WZO4QEhFAi5yJNnhJ5hBjNNtmgeVaEc10wvVlJgNGn5jifHwb07rt8/8vHPNv/+sPl4lvj5QSSFUVR25Ub7eoAscTdGN6GGRvn8EmhxDeaA4iUmnf39we0EF350SNh8Aul01MkQmYFElEC2gAft3qXkpWc1AzaoJGQ4hIJFpFJOe8JIYYQgjr2hxMVU1pOR5Ppri9Xn76zY/2veScTaAWjQGXxGZHANr3PYTgdUTv3j0+Hk973lJKNe+XyyWXLYX49PT08HBMKQUyIgpMMXJYWmG7Gazrej5XRASCfd99QXLO3EWQr5bTEBC/XLYQEhAbsiIYUAUsRTjFZVkQeNt2EcHADs9GRNDaOtqh5pUW9+mCt0Ay/hffNwAYcU64qhJqNaJ9xqaYgYgiHr7gcF69Jr+Xw+ciopd/m7p9N9mI1NVNRzM2w2xN5Y9CGPBIXDURRRMEdSHh2FsGBnC1pO02Q9gwz7q7OFR+CG39AWCk6MeO2G3zPBFpnw8Jtw7DAMq+YwRPCOPkt8zWzNvjalrd9BMqd2B07C2XTZuLMlMIUaoej8dvvvnmdDpdLpc1JQBQ1Zh4iYd1TTFyznlZ2MGo91r2fccu6h8eHpaUYvQJqdC8QYDTu8PT04OJvr48v3///i/+mz97PD18/vz5kJb3T+/M7PWzXi4XZq551yrB6Efvnh4ej4fDUqWYVkYKRImDD14Djw4gmVlRYUDpoXxDQCTmQDGsp6O38SMCEFltUQof2OAiyFMHKioivcgcrDehqBqQId8s9VhQUJPZTp12Z+zC4IL5T3cHxzi493pxgJjSiEwNkkBsGTm8bXOdjysXIcIt6JF/HlacTUbwkMNDuYygpE4phfknfjfmqzSwSWGN3sJxEBHoFQ3epjw5Ed05nNzj8qMgYtxXOz7NrG7Gu5cyTHYcL2V2Ba2526+uYAxRDRVdRxOVakQU4uG7P/7hf/lf/r//+W/+FggP6VAcBbpWQmIKiEBUtMfrvTNpRPoIsQGuE5ohiCqgK8QiFQgNsXjaUYUdB0vbDFuH9kAyJgI1YkIxRAhM67LGGM1AVSt2UDUgMDJFx3p0eA5/MPeyGC2b8FprrTnXWuXTp5d/+O3vl9Pp53/yTdleK1aTyoQjUCAKiCMRDVMmeTRCX4t9nOZ8mouYjt+Bp5WwYYroAOwgBg6mAtYm2sNNDmlOq5AjiYKvJ/kwiYYk00IMei3Eg6ky32P3eDUzcJDBHce1395mrRujMVut88lXKtIbBpz5bibjwbD7XiainUCeVAnoLbL3m4e8ufutZPAJE9dBhfNb+Jhx7DDv2KwvCrXmnGspRdWYWmeLF1v6Xb0qspYGklFr4yVXq4NdL5eLWgWA4/HIzO7tmNmyLKCoqjBJDSJG1FprSDHGOII7tdYQ2UO5rnGdWxAtEDsJue9RSvULdf9U1CGQVBVs1nbI7PloQyBmRFyWBcAkF0R258mNMz8RAFo0tFZmFhFsYwAMsZXD5VypT/ycGggNQEULoA4i8FSSu0/u95uq909ai5aBmZQqtARRFSiB07nsSwhn2cK6XPT88XwW1d1qQKxaXQGbWUKqKmSwLEtD2XLLA4mG2a0WQghIqlVEQI2IlmVhZlFAonQ4Lmn1wRXpsGbR8+VFiqIBKBsCiJqAme1qIQQk01IFDNFqraLkQ7pjjIiWcwZCn6zqFtKyJCLKOefzZXt5BSZPJue99qUj1eJorrVWN2JHaHBdjzlnJFjX9fT0eH75nGsxtKenJ7HKRhiRga0TuoLFddmkWkgClqusy/GiWKsWRdPqfikagMphiYcc932Xaq1oKlcAgBCQPe8ILf4EYN4Ih8mIEDASA/loC1OVEJiYyILDka3rUaRs24bWiEQRvMXFFHKuFLzGOvpg2RZCrAKAtdbETBDdbF2WA3OOMV5yy+GEELwzPBDGGGkvrpwFsHR3HY1aBMRxmPr8U6lXxTnLvrmRA74khe8OZzH/fHemmxqO0KhVD4FKKap6XNft5Zk4HA4HPp+ZeYnJJWCtNddaSvV9d2tYVdUEAGK4RtMdZc5r0lzIxBgNqtc2e04I0efi0GFdt7LVNlOI5hDdiNrWqmpVCM2oAp5zfbmUpydYAosaWIvlA5GZOc6sj+exJVXTh4fj6+sroC3HhRFzPiPy8RQvl50ppIgqAKqmq7/a86fzw9rwANISDktc2Y6nQyn78ZA+PCxeDds8MYLD4ajWZvgyYCBSnyMisixLzvt+2b19qtZqBloVxExNfRZkCAIMFNIh1CKlVCE7HY4xrc+XjWOIMW05myExp9T6DZgYAESt1hpTa0V4/nymsNRbAMbRJTu+7LGpJgOZSMwHexozN7DliMxcTZdlOZ+5iiAGg4ZNLdLYnzEYkjlNNlneesaaDqNYa13CkSFYEd8XkVhrVUEx3fe9FB+VxDnLqLd0ZSki234GPUiRkCqZYW93J2x3mVW4+3/Wp5BDqwi5UcOzeTF/r1OAfOY+4pGruaZrmgfeVf/VwJqcxnG1nhm4JnCGwYE9+Gg9aQy9wmPbNr8L3YGOAlwulxBKrRo5fv31j/70T3/28eNHANj288Px+Pr6Kql++PABEV9eXtTscFyJKCwpxk/blkMIgRnMDofl4XDc86a1vHt6dzgs27a9/9Hjhx+9R4PX18cUaL9sjHRICx7t06dPPtPlu++++3S+ENGyxIfjcVkWZsx5Q7Q1JmCUKqDG4CXdeLlcXs6vVSGEsItEIB8Nh4iAkmt9CunDh69CWs0kpai1hhAul9fD8TgG2Wsn7GGQjWWc9DtgCF605j1FLlsYyUNC1n0nRCQDNVO42T7o9tKUCb+GAwDAJkfo5ta3sfyrDWeNIHyXrzH9qVBi/tPdTQcteYxUR0Na57JhqJRSXN56NMcj/nqb9vGoDgDgNeE8MhU2ClXmhRqfZw6aibw34Kmb1J6HGDTv93V+HGJnLNFk6bZ/jp9Ax7kYJpwvLwMyh6pZrCKDqIhU5IXD+vKcP3+6/PWvf/O//m///vxy/vGPf7zlXXvGzO3kreSSxTuxQm/fMvPJEVVUEwcRUTNCNkRQaN1MhIMIDUByyZABAFWD93gDoBmIQktEtzaBGAJTAzvAEM/WnkfN1EGVEQmDWy9lz4iYIsfmkNmeLyEsVeV1q5et/qf//PdPH370/v37SITARfdSyxLdnLPALFbHBiEqYgBE09YUauDWcoecIAIEz07Om4tmQAR2dfVhCMEpqvKWO3phqbTwDWKIySN8w9txmrZOWoPSAhIiusfffNyeK4IO8UJE1gOYM2lRL4QcBDb0gvVgRGth60gls3GFiMuy4NQRM/h6mDp+oxCC2yZdl3jjXnO+XB/N3A1GN/oIEeB6F5xyg4Nr/IlSSoNBHMTen3fuIbyNe0HzKVWrl1lB14XjlcbnGCMzi5IvmTYi7t6nUV9BNNWRRh9s6ddjZjAdECZN0AAScWypg2s1FBGZNVQAX3iXWeMlp/e6kU1+ceaAwYiICXyCtogMJ4oA570ca+ISpBYvTUERsZFJ7imjIevHtjl7dknaYsmeU2UmxzpHBMDax8UFMbVAqlwZtNjLZXvZL2vATQTWOASxAjGYAaqqX10AvaBUAdAajGqtVdAB483AAnEIgQIvtQ/PQXe7gRXboD8zUxQAQPPe42VZai5mJrVetkspPrZwX9LJ2al6PkpRq17yq6PRLsuCaoiYcy6lmFk6JB8bVkqpXViXnVJKwMCJrdq+ba57mHmvlyqSiCnw8XgUKcjctBSCj9gw8HlhYqZiDGZuqjl+rIA5aBcRGZKZSamReE2J0lIMXgAvevEYNqeAiImDqpJ3+0GraXPeQwPz5B2IV5k6rlFVYSNBDb02CpEROUbEquaB5warFZBQQYsoYlUEqVdIiVkO4qimG1zZQ7PMPsAaJdeu6a8lEh1jxXsh/B8eoUfo7cF394IfOH7oT0Mg/DM/bHd0BlDVUTrYzYUhNz1IEULyk5uAI7LuVwx2Jmadqi+0CTwRkVHxbp4HaM+J/X+9lltVdWCRW4tBKilgQMyKL7m+XEogOgRiCIhIYLVWQwCBimAmMdKWAzGEuGBrOPddIFQtVqXuZsgIITJhZCKH5vrq8d3IMEfiGBkRwZSOacD/IKK7/cyIkUtRkaJVJACJAag0hNUWgTatIlKLlixqzBSBrKqqmld9FqlQtZRKaUnrkUI0Zo6BQ/IeXaKAdjXCumq0Irrv+77vJj6kh4rNGa1p1lzfHxfvY9dEC/RKc48h1kpFKNCU60bEBiJ2tWK/SKKzNHam9wHf6sLgikALpRTPA0D3uFwcfV8zhx4oJWNm4siACuVOCcKtDLcJIQkAnDLpljgR0c1WaGe2xYRrjPlmcWDyA0etC15TNDdLBN2PnfluVm0/xIbz6zTBxNc5VDY5nGamWh2HwDru9E9+8pO/+ZvfvD6/MvPHj9+9f/+jw+FwuVzM5PHxJNqy0wIeyjmrKqLVmsu258AEyCHEGA9piT7mSZQAAxIBW5W6Zwxh37aP33/PFJdliRyWmHxG8copJSbEimamPuhDHJRf1bTVCimQSDGE6yAQMDPDwDEuy7I8PjzFuOS8XfYSCCgtRPvlvFFaxt4N7WwdnQ/xCpDYdn94ZSOUhnR3hYYQ1oI40EbCtxn22BD0ZS42m/w0ZgYktyt6qZYZaKkmgiPV1hKXFVsXfTNJia4G4kwbQ9Je7eabPEnL/GOPLAwys/5ewyzGbpoPX26yfa86aHyezmk585neRrHfrObmc+6IGdH7HtEMGhhia3ylHtDwlyB/FZ+LeCNq3tiENwwF3sFFBgRkqKiAoCii333//Jv//Nv/+Nd/8/t/+i4F0laC0NhntjzH2qqqa15qCJnAhOYYdmgAJCgOQ5JCFFPUqooC4v4egC1h8Y6txGFZYyBmxhYI9j7AXmziu8DUHHhrbZDmibTz61Ylm2p07IZAMcbAXAkw4Locvnv+7uV1O737cNnK8/PlR+8PQEpGU8UpDAelOV0Aw49lZvB2S2DAa5PeXMreuAkREG1ynAazAADY1XqHaWvmAM2VNjo+s1uCE1HhTPNmDX5jPElD25sKkh1V2LorNW46/Kjxp7Hd15eajt4acPUJ5wDTOM1uKwWwW8KIaEZmMsCQ/JTBm+O+0FUS9Pijtiwa4RQJSikMmTY92Jdrs83AsVl8tN3oJr924IwNGw89LIax4sO3Rgpu6APAyBHVWhkb2qeHBJxcmWC44KqttEZViUMgNiNllzhEBCEkMzEFNe2uuDuNDRtH+/75uo/Oy/bfW9nXXrDNadGxIoo9qNBopuGJUZ8tYC0jrSEkVR1L30vdZBiatwr79r5t3RTAiCi3YkSfhyqGiCiAWFSqWlBU4Jdt33IJx0fYLwCggArWx0beRqwRFc1Fo7o5YlSKawskQzNwhBUQjrE6JqGqCph5PwaMcnMrVQgoUowcnp//sJ33UkqVXGtNKR0Oh5RSrR5ERJ/xlUJQgJzr0+lJVVXq6+dnd6RjjEtKr68vThu1FETECIYgIufzOaXEMRIRYWAyt3E3OQOCGKJBWpclL4iIgaspdxMSXDC7ieMlQEhmtuV9L+LLVT1PQeSYA0i2xOWwRGSKCGxNpzrKIAIs7J5AFz1dsCCigYBY9aJhxECACCKmWAmNe0+wYl9J91mqFFUGjIGiMWFAT/V4xzVwCAhIIL3jv1HctUwCO9CRiAQaKtZaGToYEZEhYmlytUkr73wjN2loyicMcTYLuDsxNx7g7nj7/Vgo6KKfiMawE38T9Xbz257m5uCRj5LmEQK0EWeeDru1X4eAIiIXI7MyAM/uTrFwVSUVMwuEjPeOsSEU4Ne9fHrdlkQLR2M2AMBRS+TlUphFStVSm7jz5CQaECGqiVRVQTMDQgNkQAuICAiL934iM3MKMTqYWC3U5zS6TAMGQjXV7eJFNUocEA0dsV5BTaw/s1QrRWutRQ2qcoyGZiULGAUW070IEVxKfTw8rMcDIAoYxwCBS9maA6BQa1UR5taoYmBYa9lz2XYzI0S6Ttm+SXN1zee9EFfi8RPIQ8Uq0qiXRaxAcYDjThse0hHgdEeBcCu9ZwpUUyMrWkmU7dqC7xTFzByMmUW1FBUh7Y6Z12sM8U4hVO0BAoPWktgLPW5VaVMr2AdX09SMhF9yKbWHz2ftC8Pr7mDdkzENnWUA3gAs3fHpeLyrBd+fVqdGxPlF8Fq120KcnpSG7rQvS3KSRsTj8fjLX/7y17/+m4/ffTydTp+++35dj2ZWSokxhBBirABqRodlgYEPDGBVLpcLIYQQmCBftt0JWLnuuQKUnKtafr0gYgpRRL779vsQwldffbWmhQDFDA2YANAUvBpfUMwIQTQwGrJbNi3cA2hiITIAiKkqGGEwBEIK8buP3x9Ox2VJBlZKBaYKPlsIcbIyxxJd6W0y3VxvW/dwJt1+JVd+A5M7GwPwXzo8/Tiz1Xiezno3pOVdmjaEKpENoKBOb3ArKudjnFClDqtvUEtXQ9eTZ9mLkxXbv79rfG2BSDPrGUVsPRHXi1yXaLxIp8lrRejdSg63diiyu1cb/3RbEW/9fPgBph4nMDEAi1UjRsZSabvU3//h41//+u/+09/83cdPz19/9QEYoap1A7g54YQcmv8M0iLzzERMiMF7dqD5adRaFa36zAwyA0BDDA5YGIiIEgUz83KiJa4phRSICFOI6EAyzEwRGs0rE6pH9GWkXgk8oAnNq3fZoFIVwQIQ2uF0OP99/vTx+cd/8qefni/f/vH7x1PiBAYtB+XmLnTMlOs33X6uDZyPvaa0cUcHBLmhKKelDtrZToY2RkVu8igA0BoGnEv7KvXQmg7Q9i/kFcmu8ZeBqTAbEnAdD4tABF6/im2VBuHd8e9bO+SWy27eF26l7iDvcf6I3Yyl6BF/bnZjuygCICE60gy8ESbTA1+TgTNT3DGmTtnywWVm0NCIxiWwezihARV1/cEYAok4TsrN22ovMyBuJZ1j+fyFofuvbh37T71cBcjnMZgH7mutp2WJxKpaDVTUVI3AWNy8njeAmVNK+7ZfRYmrcnO8isYhs9XSHqPvnPUws4iEEXhQnEJ+12ZQBAAjdO8Ua8lCREZMVJxQm5yiKwiNWxizDO0rc2UqqSrehgs+/9oCe3eiiioYmtrrVl63YshqFBr1UBs8gGCiFSyE4D4iIXuxIGIA8JmP3sIRDMFqMUVTBDCKIRqImqpWbRUpYUk176+vl/PLa9kKKASkQPH5+ZkxLGt02/f19fV8PtdaecLuCyGk4NFle/n0GhMf0hJjTCGamYrWS8mlglIIoRZ3GMHMztulKbDAy7IsywJI276fz2dYgpe/g1oItK6rmqSUiCCE4DXWZjZachVMwRShmm57ybUAMBjVmjlGJmIkMghEkZGRjusi5YBSc84qoNT9c3ewoaFJDhJiANU+xve2/76H+0270Y9AUjbRG4UHRogkAAwNdIgImTGCo11f2edK2N2G8z3yplUzATXCFilkQyAmMzABR9u7yiZ3Or3g+R597k6y3B0/9Nc54nUjFuU2ctYDQNBlIrzJeAxtrdpygzFGz7TYrY2L2Mpr5/VxoQXIiNqWgUilLx0R+jhtGLIVyLxERBARTMwIyNFqEELain0+bw9rPMToKKNADfPDDAHVEFTAwatoy8wMLtmhh9LM1CoIgFYA5BqISsCAiK9589dfUsLDEmghxKq11DbZFRlUFUy3WkVkE4wxrofFU19qVUV8uia4iBOoVbfsTXGIiiJmYKaoYDlXESlVYiRDCEtK62qEYmpEhipV3AlXmQquDFTVu/JMDdQYkcEb52ho1VmXQEOivqHbKyX084d9MBgKehHarA7NUFUIHTLk6kRBT/4QkgdTxpeKoA1Oxt+iOWz+VzFzt2HSbo2MxQT1aiphH/lmnc6vFD7Ml359us1v2AR/322mK5e5joVeqwbT9QFgJMMnlsSZ+4ZFO3PlWMNRGnqz7ADwpkgVWv3e9QkRkXCIuBHyj1KFMPzkJz/5xS9+8Te//hszY46vr6/ffffd6XQiWva9eJAC0QAUyVKKMQZE2s+XUsrlAilEUC3bvp0vzJwWeFkWAsg5a5WaCwAcl/V0OmmREJeAHEJAsy1nUC0gvnse4ndmR8TAjAqmtbTJSebqo5VgNaAgNgCptu3lP/7q13uuf/nf/TcPDw9lz6LGIaWUpO4zxb4VjHf0rOMEadCH88Kimk6Tcua9+KIgHSfAvUC78YXuxOb8eQhV6OG2YUfeXb8/z32PXyc/VHNWI1/AZkdTBwB743HNz2O3Rur84tZdx/6lt9a3xr9hkv7QygwSnQUOdN0xVqCbAfePGuPN4PthEM6e5PiTmRlIFnAIlGICyApcKv7TP33843evf/zu5ftP56Lmk66KVeztvmpVFIjbKDVmNgPPTyKiz1wlQO2FbEQMAFxa+5VqP5OQKHLHZm9z7BlHOQYRheBjnRWAzEysjgGAY5asP0Y1BVEWFBFCRG46uqjPABKIS80aQhDR856LwOdP5+Px5UfPJ3iIiYcs8oni1Hy3vtUjtFRry4UOGlBVAhC5NQ/65kZHAfU/TfsViPsguamQGABGbOIqxFREuEfNfJP9BLArrikS4fQr6L7o+KdTkZbSKAevGS9mllK/KBakD7oYJNS+rzf5yTvStR6DowkVb6ZDaplebwC5ESDNtLj1J+/A1YkIkeeM8bWI8qaZ/MYxHjciojDYYP5DO3eUQ7YyM0UEphZuAbjJnxLRNbs2xeOxq/NaK7SgxbXzwdMg142fbAKVjtdCwOzGPLXlENBSOcbEwWwr3tJDmIx9CUptjXZiSoZm5oCZ45m7CDDsdclrPARG16rdgOFBdmaGDWydVNUdNlVoYzkQRzJDbgCOBXpRLowEJhNiK4svpQBGhAgm1LFA2VSAA5KFBKCGcc/7x8+v5/fvU7FwGEyhqmpiojUoSRAA9rdVArwCihgiIhFTQAIl8kmpzDERE9Vtz5e873vZtm3bS4z88vLy+dOn8+e9ViCASMAYRGRdlrQ8AUDt7aMisgDv+66qy7Kg4Z4FqeXN60Vf99axZqLe4QNr3DD7CQJ2OqW0rovgy+uriGTZEbeYUgihlLJtO5RtXdcYYyCOMYYHVhNvsGbmztrXxBe4EEQQUR8E5BiSQEHBpBqQEDGDpzs0AC4cakwmKiDAHIiZozdGOvkPxQ9mY5YjexEuQDVl9zuuffM0GMx/yMjAiD7BEUDNci7IgBiIgm+liNQqV7Ng8PzEaI0vnXtUVQUZfRoStvt6JUYT1z2CTf2Sze77osD6P3WMn4yfYxfJsyi3/hY4Omcmi3ZeJZ1MXiKqt7X41zgOzHJtiKyGUTlLvSYfDXufG6IaiFowJCAzRmvgiqBmPjRJlOJe99c9v2zltGb2mREeZwIwUzTNBkTlvKOqIoQYIwGKQxtZIQMVIYOqKqWCQkVJHDAgETGhp+L3/QKgoBW73eCqCImIQcw8FawciVMIRAxVsuTWvovUmsVFZC+Sc/Ew84FTrSKgSGSie8lVBBGLqCEuh3U9HoTZCMRUq3q63leYrt1NJqVyiIxEIcQYHcrGqvgg71nDtd/StdduEoCm5n1ek15EgB65Z2aOIYRAEEzRmz9bNZqiopo1wnaDqWUg2K8WUI2YCQISe1GW9AqR7eUCIKJ7PCzgIGfdW/Ag3VBSiIpq3suNBKaOU2IDkG4m7+tbQKtqmV8WAAan3rFGJ+xrKn4sXVOaHQ4Xe0Zlzhn2iMk1u3LHszYZVYPBYcpGjj9pHwX8lp2xF9e4lhYpYng8Pvz5n//5f/jf/8Pf/e0/EKKIfPvtt+73llKqCqARA6I39Sn7iyxLydlD0SaiVUyUmZ8/bT63ts3vEWVmiBgo/uj9V4fjwkRSq4mZiNbKK4mJNZpHCi5mUR2bA1TE539WChw4EaHP1wWASJE5ikHO9T/89a//7re/22v5y7/8y8fHEzJqqbkY472QGSs8H2MBW0mb3Uo5M1UducGZHua9mI9BBndb2cBsOvLgENEzwcwPOVAfzcybMvz8a+pjuojv7LjU/OGqd7qtMtPP7JUhtgr8ceXpCtdB3rc0aQ4UN94aEVXHPOd7IrxblvkJx4vcqa35SYbZiYgOOYG9mXaYf2/v4vrCJ5OSmUItphRiFXw959/+7tuPH8+v57LtUsWqapFcLQcTM2nNxJMOgjad70o/DggGoMzYW3yAuCJDnyzVIFmoAaH58yshRg4YWL3qXikAiyn40JSOmgaEplhkBHxJzLSF++vu5dvAwRscCZkxhLBLuWzVW50QWQX2XEqRT88vSzikUwMvNQUVM1VOIwQ/lo4GSFXbFESrioC3QyhvgwVzpYOLWz+YSNtc8jonu1Shy8DBU6p6reG6FcXXwNlNZgzAdUjvyB0G5EwS/U83ebbx82GKzPQ2FmRkk+4IdTQr3v28l0Bfn2Fi3isZj6WS20C8dTt/kDcRjyC7v+AXj1HKO0SK03+YF9Ex9HwvBn3Ph4g6qIPd2qnNjJimtI/fxhQ9LGzdxmXmEBgbJpCX7oAaMGtKyeWc/y8QM5LngnzaJvT+w1rNtdcsvp0Y5ye3yRdulAQ8kiTzQndt7Xs5BHobl9eodlpB74cxwlbCFwIxEqPUq9RWVehgHo2YTGZGUFWOpxAWog2ASA20mgbwQp4lFBOgSKjbLrniMR6K7LGRoBSqoKYmRFBGyp7A50nwNbbCjv1FyF5wAOAYpKwGZnspsm3b88v5fD7Xctn3su9KAA/HdDocUlwCRZdoh8OBiE6leHtJCCFkPp/PIuJds9vlUkrxcrht256fn19eXyS32UoigvVwuVysK7DleDqdTnvJSGRmW8nbtqmhz8JGxD1/zjmfTqfjegghxJgcVTWlICK1FvFW5hbgB0RUEUUpope8lyqY0ExjjGLgoCPEZkzQ685TDDsTGmgVbknBq2NmJtajqq4MwYAARyTYl5umzLCqOrE3SS0dZBlIwESsFCEzQewtGjxqS3HSkLP67+Lmarehgompgud21EhBR78cdC+oX6nPFJ3k46Sw7w3E/8rj7a8Q0VP0N6NZGjRUYGYj1Em8es9qbzm+SupxwaEDhsyBDoFjvS0NScE6dGRP5/vJItXMdBJlqAYjGG5KoOKIMaAAUEXNaM/y/PJ6CMYPK1MgAqsOro1qQKh7MYBNRNbl0bvrAi8mVcXMFBRjXNCqFS0iqLKhMigRHdfWzF2qqKrWTESOcqFeY4SAaNjFnedeDKRWFRMA4EghRAXLRWutW6k555yrGjIzBm6dK4CqULJUlZCi1rocTumwhhSRAwTaS1a1xAkRfWAfd/1XawEAFAGgyCG5kaqtYGjWkX1fmoy9CdU7KFivg721Ie4TL9aa62Ci864y30SXzQxd7YE50CT05/G1BQBg8nmsg5YEPDqL13OmQ1UBFQFU0Y1+BVWtPv9gfubO415/0n47XkTkrn3/WuxtHftjtmAmT+Pq6flhdmN2DOPj7cPcfZiXa6zzvHrOcXOYCbpB4C4rEZtZKyRh+tnPfv4v/+W//O0//KMT6na+fPfdd8fjcV1X0eLvGyKpElRwfywtCcwYKcZoUgkwxhiQMPDxeAwhoIFXu0QOp/WwLMuyRkTM+5ZzLioi1UuUW1UoQKT7l1LVDkWmBBEA/F8iAkwJWjOLAvzx2+//P//wv35+fv70+eX/8m/+9Y9//GNju+R8ika3Zp+vw+gFnTcFAALxEPLj+3lTxlLPGzFL2kEV8CX52c68ZZb54uNeNLUL0tSl4jbYbHfOV/advXsqMyP+wuSYmTYGd/vTzUAP05s63d5rllmqD5IjCiM08/Z2gzVm0Aqbaj7HOmgH5LhjAf9JKfnuFahnde6I3x+bgEUKECAxiRlSqfbx8+W7j+fzVrcs2169nKKoCAhPYD/QtLgQkUgbs25marVWJALiwDF4u4eP6hGEQMhmMXb7wefddCsgpkiMkUOM7CPXxLSa5zkdvwusjRwDM5NmwdIUZa6gFa+HtXkXAF7ButdcJXiQXAwQWQ1fXy7laYE++N0EvTwzzMFfQ1UQKTrATgyBEWBuVb1mccd+dYoEaBK/fzDzyLCZtDYmb6XqigE8FoTATIikRrgrIQH6Xe5jBzbpBd/fGCMQdcOrTekYTiY5oo/I8AljiNDTejN3NNz+N1XNKteXnRlkoBbPFp2Z1XqTrB6/FRFEHmTQ1YeqVZz1bEfHGHKAqOWo/fsQaFg+05pcQYPvuNi1wrVYRRuazcjUXbW+ZzAChqHStOPzNBkqVxAe7ZFaYbBrQOta/Ibo4Dzknl4u1W6RcLA7yj5cwRyNFxSotds2SRG4NfDMZcHTijTEPBGVVpQFb+TseP0uy2zskLtSZoaAbhwQBYDsn8daDyE4CawbtMy2WR0UyBcthECwEAYENAVVZc/uKRYpIUZRDRwpQMlWsobjSWyT9myoqtgz5l4SYNawnsxMEd0njDF6j5+qLrRADycEdgTzNjU+pVprLfl8OCzvH5fj4eHhdDqtpxQXRkc9DqfTycv5HM9dRB7g2AK0HrDMUktR1cfHx0gtxerl7LXWnHOJ6bvvvhORqvL9d5+yVCISpb1kpshEPuwvhCWEZIjH9dhX1Wqth2VdD8u+7yGw02qpFcmoUy8zC1jFWmt7ZQATMSODNmOw18SrmVlAisRLiLYuKbA7V6UURhTo3I4KRm6j0xeKy61FnbDFElygMI85CioiVQQ4mHm5aQtANHulUz6R+eh4s2uCAno6hXqZwVXajmyM+u/8u4Y+N4Uw3BtEA+Ef8OLeytDxJ/jScWdqDO6j/k3Tke0dGrfiVAWKfbnaG3XeH47x2xuNe81/tVuf4f9H2Z9/SZIk54GgHKpq7hGRVx1dfbIb6AUGAAfY5bzZnbd//bzH2TePnCWXIAGCOPvuqu6qrMrMyAh3M1U59gdRVTePrObs2kMXIj0szM3URETl+OSTvS4DTD97xxPYp6pqjybACcwg8lxqjoikZo/r9v4BDokKOTojejRHIATQQQnIzLZNzAAPh6Ukp9zaBu5ElJAggbK5YFN11ebq7rWtiBhMS5JEJDMjujcVAAi2GEcLs4aJmTyGxQAFEzJiYkqsrYlI3aTWGq14iEyUVLpx2LZ2lqqqCma13j579vzli1JKaw0ZkRM0j46vQDfN1euOEZGqIgAmRERQw4GHnG9kWjbfFUwuvmmXYJozBOnSbh1dYXOIvCpihOqHtDB7SolZgSJ5QLPyMCz0dErc3UUF1IB0tFO5uqWUAEF8dt66SHj5eS/2RIQxTftazLBr0sUJRsSBSHqqGvsfrhLf1/WKJ9Lb/zthBYOxPX6lHfjrfu25zo4MuHbTn6jJvP5ksbu+26c9w/sTAIiZWxVETCm7+vPnz//iz//HX/3yNz//l38JO/Xu3Tvro6WjCtGp9nJmxIt3xcQJCVJJzEtK7r6Uw7NnzzInFwUABCAHM5DWVES1qWrPjJgy82M9uTs6ZE7GOdFYQL+YDiIK90xEVGvsjA5epTlj0SMiI6cvv3z9X//+HzbRh/PpL/7iLz755CNmbtD2XERztS8G5Dr2nr+dzk8/xzwINqY3tj95vqB5zX0Asz/m+53h+t61oMEqMe/hib80RUVHL+KT689AcX/sP5n2E7t/fFmHeaZZDwht0HiO27gqgF8L6tNJD99q4eda7f3m+SvfuftTI/Zv6sMLznvbhSgXgafrAkqXLPGAVG0mJn7/cPr9l6+3qqLQBKoYYNTT3L2TC5pH1R3CBw4QPmOnmlW1FoUJB8gYCDQHUwRHR6aE5L0yb+gOYBgmjtwpAzgyIVNQl6nquoV9i4l7vU1ORcWNPRJYUR7s1JRgUEohhAmfNDfRis7CBgBRv43cCqeiqusqrTXDgwEEuwgRE7JZm81VU2bMDDsq6hI1xfLqzH89sZYRDV7/Kp5jCn9cxMbE5alfABDjwck9sHDjz4Ggl1n3oc5eHwEgMEocNcAYMbebWQJwGX9l0bDwgToDAI0KG14XyU11/13zq6fwxzF9uRkTIaIPQh24GGSbd9LVc4dAgevNaEZPe4uxl+2dglyM535LMrPEzCnF7SrsMCpP9AR2ycX5yiMzN2akRMWm6wOMsFg7e9vlmGvn3bREoCwiAo6Z2DVGJOjIGiCRmneyjZSIMvei3c4W9PsBdw+H78JK5NccWe7Tt7jcj5nRNLK7ScoDy4sENquF7l5KocQ+OKxmBE+U929r2lBEJEZycrXhAmHOWStpZH6g+02MhEhNFIOzDlzd3z+u9w+Pr5YlLR0COpeRKKY4tstrGqqKgEw5lro2YRFmJqPatsQZEzJxzvkIx6UcDzfH29vbkvFwODy/fX57vMs5MzA4gvWk6ePjIzMDYbxxRLRqzIzehwG8fL7EmeRBGJ1MOm9BQgKAr2tNqZS8HA6H+4f327Y54eNpPZ1OQBiDQ6K7o6qs6wrL23Vdc87B9MjMx+MREYk6jBsHxGJqgpopkiqOzDGYWZNKvKTECZyJEMxcQbmKutqSS3Q/mvm6rqfzubVG1oHUASiEgXt8InhEBIGatuTopp1clzkzcyJWvOhnKEtmQLvMg5pattP0fXoC5qY7NUWEGAzM3PqEFRhLwZxjK5jlwSEV4N6TBE+0+8knH2rr/+lvr2yudxmMX8QJEzLx5JFjAoqPvs1JBwV/IHBVFdqlw6H7SZ44A6iZRYVw5kcRyZFHvE3Dbg5OFjR0QEJAi/m6TpwQTaVKe1zt+Za3hGAaWAUicFAwNVAwIIL3949lSYgUXf7uaOLIZA5uSJiYHTI59fLU1s5ERIAMiN3cc5DvUWQQGCI1qODsXkqJV58SM6Nh711099ba2qq06JrjlErOuZ4qJXbDbds2qYZgYCLyvR/84MWLF8T8sK4JPaeEiKmkVlciYk7oHehBDtG4X01jriZ0uinIxI/a9q9mvvQh84MUGxjRACBx8ug8ufTREXwbFz+MLSNZZEDARoIDdzuouT15+6pgZo595GC8ZZFGTvv7ZOagz7mK8SJjZZZS7LRBfkaIwfR4OXkKc6gbcVSir2IARAzg7jDFl8xIzNqBGYUO30K0jotfiidT9eefwK4YtX/8ucc98XdxOC5z/pjv0s8AHUpHo8oEOH0RjJR8rTVRLqVsp42Zv//97//5n//577/44vS4EqVaVzNb1/Xm7qCq2xZTmnJOC1ECkEgC9scHcCIzV5W21cxFs2ltvT/KBB0Oh0OTTVWXJR+Pi4+0acAg55vtmTFM2gQGN3bOi5saoJi5SNRezKHWauB5aQYuIo9n/erLr9++ffv5559/8cUXf/VXf/Xpp59+/0UupdACyL204QCGFtnMK+vn3Y7xGOg839c+I/zEHu7fxf6EJwH5hyfD2FlgeBf7nMjMSwZia94M7ID3vgtE58VFLoHi1X/HoK8n8jyTINPDcfcYDDMrY9MZcP8Woz2fZd7eHgPCnD5cnLAn+8eZP8+1nUoxnb258vO0EQ1GCrjjw+cVhiG6egtNpbWGzEgsIluT+/uHL7/8SiSJQKvRxEQpJQNvrRUoZmbeU/wzXQsASI5IUcfryS8k9MgghDUbbdtMbhaJRjcDMEYiBiKWaLcxS04AaOCq4mIFHNGZmRIDoYEHhzkOkirZFSpwiFwYKB9jtMxMXBFTFQnWeGlGRK1p45HXJnd3ClJzKlVXuIYsubvbJQ84LDwgETATRtzFMIQ5FKbng8b0iKk2gBBn4nBfeABAYLxxmzJklp1C7GC4vQgIdIUKvthwRBXh0Zfer4kXG+7uxEjMDKABOLL+bXAdX13Kv6OnbCzI6F3a6fKUZx/FxrFBeCBKdrb3SdRwybruhAr3lxqv9eJczTQZEcUyfeBk0rdGg2aWVMg0mAkaEXt2zy7ozulw+9KkrtXLgQkdwUsmYg767TAEHYFmpr0a08u9huCEo5tLmYkTu4O0pgoAadu28ONVNTrCAaC1xkRAiISckmhDRE4IhNospSRawVirGMLxeHQAdGtyBjR0ZubE+XxaMfQJAMybtdq2IABsrVVdI2SgjIlAm6BjggJW1SGhU0KRii4Erm0r5QAxnAqZE6VU1EXtjGkxY0RcSnEzVUfx47JUU3Rzl/CljMEYnBCcpOpWGyOllMHZFN3okc4NmyOgF3drSE4sYLlwW9/dmrH7oWQF+sV9e/GTzw4Pb5I5EZoLVmcEYlilHpcFrJl6ZiYgc1PVzWwh3+oZEblkSumsmonyzZ2ZVTRGOtweSkvbtmVMN+nm5eH7se8mZuSpsPTw+AhA4pogAdLWJHlLKUEOLi0BAMjCWV2lmmTipkjO4G4GpJRzLqW8OMCzu2eP6/l8fr0c9HBLm1Tg9fYFOYI6iZjBBnA+ANw9M0uLSFrPVUQPx9u8lCaOqbhTU9lqMqSFF0pBw6MGLg55uTm9fecCN+Xm/cN6SEd2VweIhiowYW5mro1S5uOSRqhgBr4d7LDAup7PZ9l01I4MzRmBibYmAO7sZpAJU0qmAgC1rbfHAyKoqqE1awZuibZViBmc2JmUt1PLNwsSQ0rGiXNGAz29t+o3+eYk9zGRgoAxL8LLBpQTmz9IPaO2Qlm1VaWb5cB8pMeaMDG1YgCO7O5Wa9vU0feMau44+lnn7jst5vQm52/3Ngu+7fhDG78AAgJRil6sBJAcg2VeTJ1ARMyRgVE8pcyCumleCjoZQs55rRvn1NpmaI5ADgnTQofihRoVQmnKzFTyw8P5rCrMZIoOXh3HGOKFmFNxEwZkaDlhPTURSSmBE2JiopIpp9VsbbZBysQJyZbKYlaBGh+r06nSRwf+6HAD9fzyeMD1fCyZIVmr6WbZtq3CBuY1bzU6X1OKiMUQlECTK7AnNHCwDO4vVRBRm7n7khIzEnhZDplRtSkoM3BQuJASyeFwQHQmJCSKCrNjEwcnMVajaiBGjg6JMSe9ISQ8vbt/eLh3UCNOy+HFZ9/5+Ps/flR9bwDHGzoWIUsZ2vZeHlo+3vKSHFHURVcEcLfbw9FUAWFbz4+P79+d3i63L0+tcR8Nf4lbxM39qiFKzWrbevkiCK0AEiCaY9ugGubSKigslu6qrOdKzEhAqCwiAWfYzm05PE9EGGQ8qwaaCIPfAN29qTuJoGcgQlMAuH88ffzpTTM0pCaORKZwKIfzuRUg31rOz1TPtYo7MmVmBmR3iLqWd8ZTBSCHnpKDkaqIET7dHVegcR+jmtEbieeOjsizr3gkKEnVwinvBZ/dPG3oPuKFdeDDIx9SuCAEgDA9iVDeHZ/ZKOzo+gAAAfRyYDQDICRk5JQzAK0ds11SSoawwFna1trpdkmAtrYHLEtbypvT+qN/8z/95P79/+vf/m+Otrw43m/vLcn2+CzlBfnGvCZOqWST5lpVKpm5inJeUia1JpuZfVIKPjw0YnPfWl1bdaZSyiNUdzeveIZUkQjQ1c9e8iGnlCknSIkImYGouXHJGzStqOgVTADFXNQzYZOKiEtKzJAQs7uulTA/e/ayQno4wze/3H5fP/93/1i//6Mf/V9/9PKTTz757LNPXzwrKWNJvBQqid1bTsTkEXlGAkVEDuXRO76GAROQgzsRO6OZ6shCA0DArJDO0FmgGTGBEwzCZwAAFHc1VAcFUABY8jMYLl3AVrv/Z6bRVm3m7hIe6lXh6zJ3gQiZOVgY4OKqIgAQyYdCBQBqRtfQNY/J9cliXo6PgmSkEQCAKPtujl8IedPGzDFdU1XVNNQiwPRd7BMz9ukITDwvDrvUpw0wcKjerDHEr2a6P24+MlnDxzUcLBVEWEFTPBSqgbrYiBIhmn6wR/4IhIlLltOzJW9iZpzp09dv337x+Qr40bmdHrezoDsGLIug8sJ3dn5k5oWyutdNwZg4S0OipYmiaUwIBHNt0hzqQzAgJAAghJxIVbf1MefsYEgQgK0IIZqayhavw8zLoTAXBVZvp7MgImUozoggomaKiIXDIqm5mFvUzwgxJUZgBEIgBzZgE9vMiEmqvn/zsCy3y+3zdHPr+RaXGyH74kv7zicflWSnx69uD5awAoAaqlliLKUgYMDBAAGJzUAimE2ZOAOQmCV47AawtdYamuecuZQIF8MygjsMji/woMk1ihOCjDTOCWo6QHZy9wQEQA0bU5zVC1TkgBp1IAj5REe3Ht8CgEmb1tUMiRmJZwIPzKOBAY3Io/GBODhazCdBiFkLmZw7AgCYRRHrQiINs5A2cqCOFMxTRERMh9ITOmYW3OMism1bikGgegHcRtyo1kSlrw8wdDwZECWiUeJ2B4iyE6RMxBgoowjsAcBdmBmpd9zAjJnRLj2EcJ0Hmrv7/nhiRGyQyAPAsiyoHfEPuzB30gkQkdOuGbTzAj09IpgdZme3WNcQC5pVsMv9x17eXxdfD852u4TIM3EL5ipCQCKSmFPiuZ3H34ahwZ4CvITR/REwMH8QFQfoEb+aX6UTwq7VWhGxlBKj4eP2TqcTLMfpeV+s9qB3x8RgoqqbW2uttnbsyGBGY8WGbqqiQehEqKoeOfLLSpg7OA8anJGrSKkj+MMEl1JSSmBOFyRVsAPEvWHOOVq8RMQUVHrW5Flecs5E6Grbtq2nEzOjw2pWOCfiaGRys5yWUpomJerUEepGblHVcfcxqAkJnbkXNumwtCbM3KqmVMqSmVgdTqeTmaVMPkZj910BrGlTcXHToYfmjolDnAAuvd5Tupg5pWIG8aZUddu21gJJcRlUamZRoiOixAQAKXHOGRKbmWsvcIVm7gE81nN/kT4Mf87ElCTkhGaWaOR9OVEa4no1Vwp3ILGJw4QZ9V109QqWsz/8OsbD68Bv/yff+uf//WN/8ZAeDCPOzMzhWoTcqapabcQ9gxVmDPzp8wK6x9s0NkNGTt1FuMqfdfwnuLujgoO6kVlMnpzPEqvHiE0mggDRe69eUDHlnNHR0Qcc3d/ev7tL1FpLZqaDv8cQgdfa1C2dUs6ZDofgAYcd1iJmfDIwJkSkUi2l5IuDO1MigIDGpSWRUXIFCqJiwXhEB6KUUsqlEGcwrU3NzHs81tfZDVW1tdYaidu2baoaN3M8Hu/u7k6nk+XFwY63d1s9HbCY6+3t7buH+55vJXCg8KXcJ6QqI+WcMw0Om9nR8eRF+87s738VuA9CBOgkZbHsKaVaa6018kStibvf3tw02XTU2EVEnQI4ERbeRrq9Jx8Ngmc4JnUwR3kwXEzIOadUmFut9Xxe0fuXptwrIbEdouvlxggRGJzdO7Z5j5qZqkc7BAruKn6h9bMAsi8afLg+06o8UZzpRjzRwdgRAgaP7gaMGFw+/TrjTOv/i7ZM6ORtRKMsAO5qDu5qyFASmZG7Sg28DwAkJARkdVSFZiLbw+F495svfvX5716f13rICck3Ob1/OB2O6wI5xzg0TrECh8PhfIpEgam2BgZUYn0e13P4Qo7Q1GPOtrqd3p+JIIYCVVTGsOcXTygasZg5tKHpft5jh62amZhMb4GIArcf76vWunlTyFs9vX79+v3D+v50evOz+vz581cfvbi5ORwP5fnd8e7u5uaYXzx79uz2eHd3czwcClMTR0RwPz0+3NzcHI43vXAnYOaq21KOgEjgTAlSgoFu0rbBRZYUHN1lKJFBn2QLEDQ54Day/nsBQET/A8ShOw5Ff/KraH/wa64E0fYkhOuFvj/E9mkXj2h/RIp8iveU//1tTH9mfvLhdZ6Qh+HoHozxevPK80vnf6caxpV3K4bd4xsnK4d2cEqMbCmlzLyeK4Azc0oZkAO+AQBMqYk4MDg9Pj5+/ebN/cPpfG4BjIqQ42ZZYq53k3pH7BBDgMIUuI3sT9wEuCsAulFQ+8KoyV9DECOumOq/f2VEBITqtq7r5UMGdU9KUUIPbiEiGuw2bi5BgEeEHj1pABfau1nslbZVbbOcYxYuEBdszc/n84ELUnJwNSWwUo4horXWmKKBo4fNw2giszmSGSg7psHyioOwSsGt1UzceWVsUBuMfd9mYbOXo9XGXL4PnZNp93wyskSibRwiMtiJwyBceoC7zEjvyvMdgmN+xV7pfOdIzJ95sOmMl3Uhg70SXZWpDvtXbLtKo+/00boBeUIGhvMi8/b84vVd4QviV7PYNjUljhCYqUejO4/2neWXkRQTGf9EewFmzD0GX1ySRkROT+54XgGuDzMLPvdQGLPL65xPSESgcT/q7sfjMp92/54+/Mb9Ck4TAzsjxcyB25xvBbsjTohX4hXP2Dekjo/fXXOAm5EIZyCN/mT1fXgwhTMzBNF2yERrDcuBENHBTRATw7wfJUQiBINm6qZr3U6n00clExFE38tM5oVB4QgmIXbS+BYVNQQycuszUKd8EJGyDpwqRtZq4UGi4KST1wGjT11DjAxBpavTw9YCzGOmJgPDCaBiSy6lHNBiWDOX7OLeaosvampVI+dkahLuuA1dmvkV1cpIpZTEQJSYWUWbWkSDxIsOaoH+hyU56LlurfZADi6zVsQ9ap4ZEdXd3bbHGpIQcLLWWn9Gg+ilQZyudgOAQz4AYGZOOSNYKeVYMiGs6yq11+4HcF/54izCBbrZYbE9ohMRohSoDvsD3FZzQaa0j8XZk990tuVhoK9x8yH8fyAaxA8Gp87PP1Te/9PD3XswOMIVcNi2bYlMqTtiB4DFyJPLUNpxKPjsdVS/dJoJEYMxckrEI+9lZsidCKQ7QsGY5GrGsvvzi7W9dlkIaSKHmlZyQsZU6Obm5ng8gLXzeWOmA6CjF3VHQ3BxcEIzqmrnrfJpRcSSEjEw+OFwcFfQJb7aByDzYCXnbOYx6t3V0D3nXEpGdGIgAvGm1hCREpZ8yDHBsxwM4VTbVtVNmulWZW7kjh2UK41F6rZt4ArAQQR1d/s8SOXW7czM9VGWkrdty5lnv0REPkPvAqzed/FlWXLOsUvPNN7eAu9f/T7+6ad5Z3EFAPeLyzhNNHUajCtsZLfqfsGMzegXLi4XuCN2aWPEzrnuI3aqVQ4HA4Ccs2u2aA9OV6gbRkckVUXXaOAGN3dUVzNJqcTd2mjYhp1P4NcOhA9bNDMaNDCZzN+iXFeXuq7kPPnvXGQMopQLvvTixwfw+IO3Q7HlRYjSIyeP8YwSBtfdwZyJlpK2qkEqW9WkQXNvjmL41//hP/6Xv/m7//Af/sv9u7efvnqRjoXTjYNt24YAdMBl6QOHEKCUpFIUtmoiIiYsLDH205qFU+hA0SAAiLSxaM2ZO52tKic8EqW0EHpOVGL4GgdklDkldQtqx7FK/d0EO3Qu7MCImYiQHABSSqJta06ZRfzx8fFU9VQr3vG7x/PnX34JbmZSEi0Hzsw3t4fbw/Lq5fOPPvro1YuXr169+vjjj54/f17KIfGNWZLazPCQS87kpGJA0Ju6sTY3E22qmg/sPsI5R3cLU9+dQvCJWiSHkMO95Z/CjDuncDrBM/CYcjV/VlW3i1ztRcJ3Aef0m+mDLM88Ye8Bzm8cjveFPX/vv+1vaX812LlGcRrjnMd99Sezl9JH+PTkWfaqsf98PObYzhwmN3+02qtpVCYSMAKCE7gHqysCY2E9bcBsSPcP796+fXc+n1tztWDK6I3QrrZJBVci1LBRSAgOHZ7aPQF0N0CEIH5BdVAxcxS9OOKIBIixLQEADmbIfh1IgZmzMd2NmTBxxj4SScXVmmonM6m19kVACy8dsROl8OgIFZGI8wHACJqKmaWcD4eSc+6z5h2ryP3D+VgIKTU3MCMnzugxltcc+kRfQnc3hT6KVgmVAQzQ8SJpgdiHqHWrBgkpRAoY+qXMgWP8xnDDEYGRaExmn9K114IpmTaBTkQw/PYwbvP8vRTt5W3e50z2xZkzUJyRG+4C0XnN+eHA516UaK87+51iZ6Knr2VuvbbRbwy9Tz7bbQFPrj9/MLuCqY87vwpoZ9g/rzkXNryjhKMcyde0UdNRiGkBLh08HSdc3NMx1qm1JibTQO8Slj3qVVXc/ZYZo2yHiESYEkaD5Vz3/fHk4eNJGBABFWfAzT02wwsoeS733t+dizslYP5zL1XxV61tXGJwMxJRVJw4J3d0QsLojulobHGNdznTZuSJiFLKAEjDf43fI2IpRcwSUgz+wjwqFYgWfcoAPh5wa3L/8B4/Rjd0U5FmLomQmYhYWzVzcQGAHBXlQeQTPXDkBsYxuol9FLXDC0TM1DOptfaeFr2S7MsSdb9z6Ji5n9c1oCkzyo2gt5RD4pWc3CGlVLLlJs1rDwa0NZWYMSWmRIApaA4vhAHufm7nINLNKSOlGCZRq8xhO73zs2dVMOci5qd1q9IM0IEoJ1XUphLtMbGNUURu/nB6PJ/PqhpFJ1Ng5mVZbu/uhhawqq7ruq5ra207r4zIuXBmdMg5LzfHzKSqJtXdHYLJUCGGB0Xv3+htw14zi2Z3iB7xSXmiqjEuxd1Fxd1L6eRM+5zuNIh+vcfPmtHeQ7pSog/23f0P33r8od8+sarf+vlUM5wkyBhGdhQMwdX7rAUnhJhiTBgWPKCBhEE5D7NeDRfSOWhtE6mFytyt+/rEeroY8FilHQXU8J7nK3GA6DVlBHFLlG7ubp89u2Py7fFUtw3QC0I6LM2U3YhQmiGhI5np2iptKWVChEJMTDln5pKJPbIVAxdwTKUXkUSiaMgIh8PBwXLOpSRmbFq3upoJIt4eb8IPRk5VhFwiptq2rda6tabqjjPZb9IkyoPMTMzRs8rMx9vbSuksNTK7RITAp9N6XA5Eoy9hUGAjYhPJo28+xM/hsv57s7yXkCcWG3rxFhHgwibk7u6tSVz21JqI5FxUfdu2SInOXSkmVgAEM5xHxnRmRkZ5JMBENp3XIMcLVylnYs6JWZ32MjsdMgJCwDmx0LwHhPsBJxBsh2Oc5n5nmnvltPkzJN77H0/0aNw/zf1uv4Z7J2Ouavyz9J6ry9WmVH+7x9AZyxAxGmXBRwnF1QAsEdnotk1EdnjRWju1RpRKPtTz+i8/+/U//fyX/+u//Xe/+s1vv3799pAowdkVXhyXkrhVr7USQc7sBKrGiIl5WXLFaORtKk21RTidnM1AIpsJ0Dt+xQB6PhPROKWc83I8HJZDBllyujkeUikRExqMAnKzFkgT1YhzAUDcEnZ/hlO3nEh+PB6ZuZ3WzN1SEqKqnis1j7ZABdNzFTypu6qImRDB7fF4d3f3/PnzTz/+5OOPP37+6vDy5ctPPvr45asXr54/o+WoYtIsM2ZK5KYmoIIAiVKmZN26OIA79CFuAAB9bAkCRnoqfvMt9nnqkY+ga58xnAHkpHO8+DZji5yfuHv0su5TNrG/2LUEXgTyA5mK3wYXIF5XPPaR6pRq2nG6TIndXedqXM188ForjcLF/lb358ztD3YO3vhV/J8jMDq5AqG7ubpH7nApB4lJz1o7oRlnJKziAgTIp62+eXv/eFod0MDO5zMABJBKpJ1Op8x2PCQ3paD+AFcH66ww/XkNA9VHSAiIBoHt7PV8ZghMwnDTL7hZGG5zYmdjZoQgQcYxAs6MqANxVc0dUgpfZeuvD41HtiiWz8cCmtkldxRgSPJDPtzc3CxLTilxTsxo1h4ez89uD4eSmikBKIC1+EsmdGQC6Cw4jAQIwE4ISAoBZ3TbLnIUQ54gYCiACMjuDoTee83M0SNxdiX8mIAIdkmB/ggGRIQj0Ya7zr3QiwhMIC59zV8w5XaKFl1XyKeORJl6ytg8Z1Ya5/lxA63pXlznFqA72sv9g8DgPeo3M/Yc29WQn9QJn+ywcajqEA2cSwEA0fjgu1DQR83vWl9GQLhfo7HYCE6lpNZaM+wwU70sirtP7qm5ZYqIem9+mF/mHv2rXbgD3dN5NQqaqUGkzdABmF3lqlNzn3nah9f7Vzhxj/tjrtZ+R4eLVQXo+6iAuRKISMnZ3WWMh6axTYoDdluDHX/DREoKiNipfs1FwaPVbF+KBIDwOsLQy1Zba4Foaq3lnA+HgxlkTowoZglQR5kofNqI2zgzgYvp+9OjvszkEJ20bmgAIG6gasbuTu7NVTWN7F0MEAs4WDjfjshsS2ZEjLn2AU4FQDCYGaaZ6kDscRQOQFRMCAxrfuDco/3xmoL4m5m1NtemYkQp54W5uTtiuKqTYCoEVJw855xzYubJ7DorYHGEy6uq7io6rGeEFl4U1N1X1XOTx/O2qamjgjOzOnGmeHspJUrJwEyDVVWjUzz4knGURB4fz8OZnj1CsWtkAgQiRDYXRyBKOaWcs1Qm7Li7yOpcAEt42ct9oE/JO2FAOOSAuB8pOxVhii4NUhndtzi3XfZ0Zy+m1Xhitv4Ab9TTwz8gJPjwhP/+FRAxmHjin/mwLMviJpdhWRGWRXgWkSJEggwAYigKG3OQwiAz58Q5OygRZCBO3e6bGcZobCZkwg7eiSkRwMFZmVJY3f4W/kCyzQGWZVlbLUt68eJFOab7t2/u799hrYawEj1blnAAAHxrLSUWEzMVS5xkrVJKIc75UBCxcDoshQBdNRxiREw5p5SyuZktaSFEIliWBcxT5rwkIkiNMWHXr1QAwMCbSK1trSLN3NktQAkYfkdIs5m36q01jCgup0NJzKxNELmU8oJfPdZTKQUAD4dD285lKWbRZd/JmdUUB/Yh9t6+xcq38AfuXzd8W6DIzG4APucZX9W7YJeGM3PT+ASv06xgGPTnQ66uU3uxG1k3WUSYUkrbo6SUCt/krCKyrjWazGlHtx3PGB0mKScHdjAYjKmJUweCzLjug8ZaH045jjhwcp/MR8PORHW1MtNX0CuCu8uCzNvbf93+2/cb+T7ju9vmHQC0V6EcQKPHhBEIU20bAeacSkqq6irapAGegHK+ywzv37//p5//4h//6Wf/6W/+7h9/9stv3tw/nhvmgxO+e9zQKWGW4gVJRNbVmJEOhQBSIkqYokAHRqitNR+7+dbCsmEwk3HJAYw2M06YmcLmLstyPJRSygG0LEtMIQpmvypybhLbUKRFmloVa2oeQ61yTiWXUqJZK8wkE+ScRVZr1TxXaSWpmr/e1lLKsuSUqXBKiSJxhKSyra22x7p+/W7z37xm/mUpJR3zzc3hu5999qPvf++7n336nY8++vTV85cvnr24Pd4uvKTivjVTNsOEyIzgkeGEnhSJlC9IyPpOKuBqQPcfPKaKDRt/cVLxulgxTf1e3Yg6GOSJ/4bX179sH3Sla0PqesAzqYmICABVbZ+1hIsXhFNE4dpKyADsPZFzvBBQfUsO9Fv3Jt/lUOZSkAO6MxEipV1K0d1jdpQbRv0pvqUJGhQT/ObNm9ff3J9rZySKjSkkSrZTay0NkA5RtGlFOUwHnWo8EZkJmhsggIFDUyd3jMHd4AYGOwOyh62FXjdtRBfGckRUVYl+MMZpmiIZqn4pbT2xtPFhXH/iYwmwNT1vm5mVnJdlwT5mloERID2s7WGtzEfnKIuQyImDb8tjhqTFtIvIzSEgoIIbgIQdz3ycN7Cfk5lSCo7WKR79vRcC95j8ECpCFGgLvph7APCYyYRTLi9y1ZHqO4akoLkYybspadN4uvvxeJwffusJT4Qz6me+y63s0zR74Rwv6IrJ7InEzu3A3aO/xq9mwOgFuT3ynvsL7t/y/pP9beyNAwDUWnkQ8I70BLt7Gg88Alnp3DXBkTgUFbHHNhQeeTg5PnzQiGGiR8uvUd2hhfNGB+Ijxb48/hxEvNa6bZVyCqCogis4IDATQQ8Ir21XBHgy3tbOh6bd+g7tmvkDZoLkwV4wWxGJOh/UND19g8+lQyJ9fHX4roiABsAAkQHv9i6EfpAlXl6MSvSid9rYCJzMjIFLStFCD9FtYOZ4mXoM7sFNYK7rtsnmVEpmTimZgplsWzVty1IAyId9k9He6QPp2jmKYbItH5MjI5oJQUeBi/ewNl7KbgFJB23msiwiArBFZHJuJxrl5dZaACBzzmJR/jIVZyZnR1cRWTJWV9RYltAlc9CgKWMm5IHCBkDyQz72hkOtaiBi8X4zp5g/BUMDxVRVV8Ct2WkTUXMEA2ciJyxlAaApGNpk21qTbRNxQuaclnjRiVNy4uXmSGM0orSYLofIKScCNGR2REcGZGRyQiA0iFrDtOZidjUnc3w7KDiI9gbUoRc5ZwCS7Ty0nab5btDmfuw7oqrIFe63arjebmG3wU9jBteH75pX9+c/+fz/xyOC6l5NCmcaYsq0ihuEpbYLQ2wzE4+ijOnuy3uMAGNsnJmCOhozEhJBMMj1B4r/UWAacCz1TJJFAmhHkx2JOIhEFABEyRANMIlUd80539wcDG1d1/VcD+Rbs60oUgICVUQHFQVEAnZEdd+a5q2lVFPixIimKWi4EiLEjEhDwNYaIgYK+rAU7KgBzSlF4z2AiVQz50Q5ZzJUcFVrzWqV1tQM1MGc1AOD5GZNRETUzFRQteUUnVY55s6fTie/v7/76OPnz5/jmtb1VOsW3KV9tcMyxRhxU3dfUkqcQ90ihxUuPeFlX+zLO/75ZMucW+M+C4HDz+ugEpFw3LdtU/Xj4dYuriMSEXWj6KHg0W/Zk2FxDidz9kEo2ktDnImqiKu16CAmSq4wJexqmx9ZPHcPLi5wIuqzTFRtumIwyCouz7tLPM+ddZZELjvjbnOBXXIHRmVjbyX2e9DEm8w/FJn8T+GCXPKkhkAE00/qxZ1uxns9iqJLDz1xVIqgijFQyse4/nmj9+v21Ve///u//8f//Nd/8y8//8XrN/ebkhoZFkBYWyXzkuS4qRvmJep1kgkZfck5gP2myAkJMjMsWlxGQLjaBERQSqXklAkRExIRJMaU0pLzcsillMLpJntKKY9ahEFnfkboVJ/hOoqomgNATjM+GblaVanbtq05MzioNCrZAZoGowg1MQfFJitjEIk7WElZPIk7AElcqhrU6ufmX7//zRdv//a//eyQUoByZQABAABJREFU08tnNz/9yY9/+uMffv87n3zv00+++9knz44HyonMxAWqAWOwTQJIFPIHajTeCAU6YWTC8DpEvAqopvzYDtI2aZndr2KkIdpP0tM4Q469RxuSsf9S3yUEcXeMe75UG6D3TV0KAPsdZ96nX1e85xeZOSFO4onLJkLk0Z+zy4+Y+76y8STsfHogAkDi1CPjIL7SrrAqiokTsRMpOCKrmYga3Trh4+P563cP96c1CgSOdDjk88OZmZdD0YqBNNpaLbGR9eeKlXTsBBkBV0F1jTSTqlLKjgSIFsmxKBWYizTf5Y8QEZCAsG51mgLOaQICor0WEQ2MiDJlc/Rm+WoB+kuzkV4X11EuI0Q0xE1aa80R8mE53ByZ2REcQdQz5nWTh0fJyZaSuBydANgiuFUXcidOiQwtxks4gHGQskBPf4i28crcO68J8252F3YPeTBpqbiZ6iULj73i15jziHUIohpGFMSTUwDMLLoooxLeg5QdZDQafz6U6tnDuf+QiObn82bmb22XfJ8iOtVqXqrL/L7kuSNSmRqBF1YI3WuQX74DAeDiHF8fEWDv9StuyVzmGj6JIcfK+5U9mQ/zRFef7F6jRafv9DGV10dAGPt69Ht0b5UHBrVf52lKOAy1urmBGah6tGktOcHOfvWlIATwfAFLXITpA2t1ufknz2JmuJtcScHSg5QSjS2/G7P9Btyhs9CRVEDo6hLsBlE4Tl0IghvQpf+hD4BokE8ASKdmjv436s1pnHJKEfICOgQpawfjEjq5xWxrQgAXdxFj1mSJkAFdvbMv5JwdPbJWjoDUkydi1onaR/U1XqjBacmFmQfYWtnA1QrN8uzecLMOI4KIpZTDQcOn2c7r3GZqrbUpswGiGTBnRVACcwNTAGqiNiLS2M0djBlTSpwSMvXhqlEYRDMz9AieA0Xp8QMSL8ui7uhuBmIxm7uqal1SVdiaqicHbWqEYO4E6uCuHQVd69q2KiKtRWTCIuauKXVVDyWutZ7P5zAiIY3LcowOI8QeYahqc7vIIPZlp84Rd5lpgxgLq6oIqDlAu+NlE9HYQi6FiC6N1DfUi6MzPNr41q6jOyWfGvTEcCA+9RuefLLXfR+Qng+PD6/85LcROExNrLXqIVPvs71U7E0VEMEx8DbxxR7VYHB3N3d1URczE7cCMRCQCBCpV5ijUX/elaGlGC8EZr00oe59Pqf7bMC6PEX8kxBrW1MpNzeHtKRt22I/QM66be6YUkG0CCBVq1mCjhrHpnraKgAQATrgklaCXJEs6OyDiN+cyVSAKBmiO6CZVlNAKAF26GLfc0O+NXV3dVPx2nyrum51FT3XttZWN6nSRoOkuXuinFKaNo2Zzez+4f7L+/uPq34E+OzFM9Wma6vnLefU/xaAUzQng9qlNO3u4FM4lYjcrra6KSf7ldyb7mlyzUJbr0hW9llVGH5qb+SmKwHbx1cAYODkgBjb3mhkwITY//zVq48f7h8fTzVkOGdqFtm6SzjX526hJSQRMZdOpgM+V+BbxX5/w1MNu139YDPdr8P+Vzjc7g91No6ZkIVd3Dhr6AMa0+tfqnLxVmY0CIAg/V+Bth2hAqes4lUEwJnJDE+n0+Pj4+eP6z/9wz/+9V//f371q988nk+iUI3XprW5h51XOTI0JzXEXFpb3R3Rt21zUDgcODknBzAihJI4gWt3ksjheOjRso7BSylTjJ3goDdLlHMumQPqfJORAmQB3lTVTdXcfZNWaw3Wr+mYIXZ+qGFy1Yxdm0hKKX306sXvXr89CzCzRFmGsbZmY4hiygSQcmagpADA2RVrrWZGlCDA58SIWQAfz/Lm7cPvfvf6V7/54j/99d33Pv3oR9/77k9+/MMffvez73zy8Xc++eju5pYLyfqeiCjAgWF/QHy0lozwLxwkBAhc3j4Ve8kw7veFaT+ffLI/pu2dGweMsGgvmfFDaFs4RwAQ8+sRYQcu7N+5V1UAmF2s8S06yL32gj2bAJ9IOxHhB8Yk/ql65RbPJ1IbDu6uW/iJWu3XjTsKx/DCEOkA4EjJkBJbr2shIjma80FM3z6sb+7PTUwNxCAvxZQU9Hg8Hu9u3739stZ6WA7emhGKKccQoskj0isQECnpmERjbgZITn3iOoAnDGCZKUjrVpGIwIMgHN0jgI2CrM8CoJkgZkfAYI/CC8lc4uG+Xhazr16Y+m54B1yuShPTnA/Pnj27u7srpZM/NW2p5HOz+8czpZyrChYAWIgjg0OQCS0hKqh6JXQGDTI2cKNQREStDdhSn+oTsFJipk6OhYizTQgBEVRkF1zlmDcITj6S4PN53C+QKNg5JNEo2QlstJOETpGwHaH6zNzNM2EX9e0lasr8Xq2mTZ57ZUTa8+S93kWP0n7jmJe95Dt2aUrqg0l20GsG6O2F7qM/a+6zsdHvFWHe5H5x5joESY+NxtR5nTSVPPYg3EXGoZL9KmMt+i5lV1pqk9Cpk2QQ5971FNNC9meqgohQnxXkkbYk8t5WhDBdZJydr6FdhOE+B9ja7JK7jYoKReGcelyxP2zwmm7bBpAjTYJBIzMSikTE/a3IED6aF4DrQ7UFQSEhpUzuHQccT2FjWlxChhyEaTklaq2d64OZLceSCwMaoTMSzxc29zZG6il7a00pIyMBogcRh6qjR+c0c8bgp0EwZnYuiUIEwQGRHILvLUy/oTsDtFUBKDmAaWa27OSubrU279ybQ5KJcEy3D7WZmgAANzc3EYkBAOeUofO5OTFyRiYTUW1NTcVrrWhr1ASIwMyQIOfDsiylZE44PDx3d4zQFFw1Em+JwcxAVMF9a01ExNzMtqa1qagDUDVshgLkSIYURDLmra4KyNF/IrW11n19MyTCaJQlorg3RDR1HNPVfETRAHBaz+6+5OKqiEAN0kqJMXc/HMx6nTA8N9+xwJmZkqkIC9BC88BLjsMvidvh5uOAymD3ES8BYXgqNOkriPZiut8X+yfj8ye77P5nv07W+re5Gk8u+y3H+CNHiKb+lPOyLNRZIgFdMXLliE7RNxiZjC5XKXoD2CID2teUyF3dIcqKI93kZjYcGgfoRtnQwNHQAIK3BhwxiDedUM0JLv6HAxBgoPBv747Pnt0WTic9afdBo6JNzMzIouIAYpDA13XFGI0DjAgpmZjH7BNVbU0TOKGjdkBkZicQVxcXyUiAYAJgJkZEHBPZmVQVHFS1bgaE5ljVtibntb5/3E61ba2et7quq4wdNJ6lLEVFUiLEzowU40N/8bvfvTvXh7r9j3/1lzkvRPT2m9PhEJw3BgM0goiuCUhERFP37bgfgMBNn4w4373s3T63P4eok2r4MP4i5C6llFLK+eEh8lnMYRD6vigiQKICFqSx9EHaAgFjLIQjOSHydKbN7NPPPr093j2eDq29c/d1XcFuQrkuab6ge0nAzFXsQhLW26s76uTirQ6/wQbvGu3axaee2shtT0yOu7v3Xg7cHbG2e42bdlV2A+ht7KHuznyMbDr0ipBrb1HbqzOGb+/u6RJXBJs6qKM7VnVENF4QWZHuH+5//vPPf/mLX//7v/38q6+++ubNa63NkZvapqaIgo7ECMREyC6mq7YX6blsD7GYEVGjOydn9CWnKFASsyHGeDTEC/dMMJCVJS25IGJCSCnllJgx4ZjXmhi80/SLqVYxdVU3x/N5e1zX8/m8qTmSqkMUglNKKUZ3YmysAUn9+NWL7376nV998dX67uTuTZqAcXYQ74ED4uy3iWREsNQG9bRBDEJQUCPAhs6IJR3Sgm721Tfvv/jdV3//T794cXv76uXzH//wB3/2Z3/20z/+ycuXL3/4yS0QARqxAZBjMwuODAMAjiDQo3eXIGCM11Y3REJ2YELYuZvrehonXjxRAEgp4WhO239OoxdmlgpnIuNDrxHgEhC6+yztTocqOBWn0zmd4ynhc6uCXfi6PwHwol++c753X7pjrECsbYPeLuF+Ib17ahwQg5UMRdvc1BAxEce+rGIG4ecoIHNmZyJMm/FprW/enx4eN9EooFEpx7pt4HRzd3tzc9Naezydbm5zpGDdvaowZSIiJndwtN79BBelJiJArFudDnMIOX4wRG4ugu+SAm44k+mcyoBNwCijgoiKSFl64EF4vbMTQh+qZ0Tk1GtWm6g5UkrL8ZCXhWO4NICIwcJN/XETeqzMLFjN7PaAi6dDyZwwERq5mqhjZjAQBgAUBHPCRABEbNKHl030r5qIEqKbIwJT3IhDRO/X+YUwuSqeUgqSANj1wsEIa/fGM3gZ93m6aJuK2KSUMk+eghe43/n5XmX4ml9zeqfz23FkXee7myfMawKA20UR9gIP0Fkk9k2J2KkxLqMKrneNi5BMrdnf1TQOe2Xc7zLzz+cJc0NMqp5SKqUEgCduutZacuf9j0S9NK21Hg4HiH5NpAgaI6ASkWVZ9mukqoGCW9tmYwiEqLZaU2JbGEfLXSQSeBCI9ceKBNB4/vni0ziBiOIr4sFqrctyBIBAbNpYmtYaMx+Px0Rca3VXpOD/UDPLTAHZ2rYtp0QxNOupZQnCPY4gc8llWZa6tXQ4BPLDRaPvDpDqdjZHETmfz94nsxMApVRErG211sqcS7n0kkUmY11bLiitcSmllCoCoIjkKnMIT0pJRMxS8I6kRKUsTSqir1LdLKUEgNG1nHNOnebdkMkJpDUPxAVRU02JxToEszU1hcPhkJibnltr5/N525q7l1Jub28PS2615tSHzqPDoSyHsqhq7anEzim8wubGOedzDSgCBt2zmIs3Az+WotpUm7sT4/Pnz29ubnptBAC6I4UDCdNqaw6UUjazda1NBJHEhR2YMyKIbO6QcgFM27ad11bFiZfH03mr4kgPpxMQI6at1q3V0EoTtd7FwTBonUISIGaoDLMw1SyIsA/EzOxqta48/tDMatWbu1s3qWdFREpclsXUWmtcMomyASIxc0lH8xbCSZSZuUprW3PrLZpTd7obPjQ27M7UBRFh5nAFw1lBJByKHUw801xO42jwtLt6by/21gT+u8fe1ux/ADUZY3biWww8IZrZ1uoxpVJKr3KnJE0o87xgKpmMHh4eECAgYf2eo27s5u6cuKp4HyoAqlpr1UNidgAAJkwMDoE19c6Oa4MMQLZtyzmrIgDGUoedVjDiPE/7zne+Iy6IkHOOnbdkOtwcVTXnlHM2acsht9YMEM1B6GxhPyUlSoRSmx8yIxCWY06cyQ21yfn8eHM4llLAfT0/ZOqcQ63qzc2NqazSlmVRtU1aKcWBRXRrstb2eK6P53Z/Op+3bd1aVVE1Dc5Rs2VZDocDMyOYSDOzV69eIeKS+mjyb7755iQtl8Of/NmfbPW0LMfb27u3r18vyyEvx3Or6pASqSonQsDz+Zzzcj6d3rx50/dX6gPWrw7sYqNzPu8wboidOZiIwvVjvrh3UdWPa7TW3DEnVlHAzMwpRcHHCYmYxDSlNJlIB3AjshvAzAi+bdu6rvGwv/jFL37wvR++fPny7dvT27dvCUsUx3LOAKuqllLCDxARNKdEgIQEjJmwACR1c7dws7pLh7j33nDXjDp9BRgkAdMbnjt3uGIppRBaRFyWJZR9RpI+4lW4TtxM/dLO8x7OtAEYDhsVCBTpDTPdB8ooMcJH3UzdITkhYGpmRIsjPTxuv/78t//17/7b3/7t3/7617+9X2OHZSVorTW1pl4NFQHcMrOD17aRmbhVrR/f3QWGYkmMCLWt6wpL5gZ+OC6HcgAACObAgAw06RU8InfPaCXBoZTMveO917eJAExFU87uXkW31qpIUxfTTe3d/f35fG5NmwpRSkwGDAAlZXdF55xz5uSgrrosC4H/9Kd/9M3DafvHX75b5Xjz/M37kyMtyCoSYSciilRVjNdUvUaKnCmDqkZ2VaojEjMghvEgQKCFMm2Grx+237/73T/+4nf/77/9l5/+9I9+/OMf/y9/9qNXr1784IefpYRgyonMcSmLSD2UBQBUPXNyx1YVQDld3NDpvYUHpbsiyfxh53Re2WEz8zEmnka32zTasbnEBUPeOi3bNYDZB8+ZOxJZ5HDDnIoIDAqcuQVA0PnufFMYbvq81QhEQ1ZVVVrvFtn/iV+yn7PNh+dT+67ugQQ4ainDG6S5RaqqWWVm4j6IS0Fjrmk6Zje0oGNAMgcTb2aWy9v3b94/rKLQ3AGzk53W1QAez6dD4u9+7/tf/f6333z9u9vz8uzuNoZeIqLzRFChg8eQNwMH8JQy7kZtISLn/shV2lyunHMpS6ywamSrLRHTaMlxdzVRVUBUa+EkpNSZ/Mb7Gq1V1O1qSp0ssLUmopFtibcvIpyPmeD4/NXN3XNEbq0hMGybu79/fEiEBlwNlpwezgIAW3M++ZIbM+ZEpfDtcTncHEVrZgdyMFGrjqBgoJ5T9yVMZl4AmbhPgwBzBSRiYlOV2jDFOeniEGLC5ABgqjBUINhiwgvCAakdb9yfJOZoBzax7id3soxQhCl7T+QQsbd4wDW2fxrbkOFeDmFOKUXVZMaNOqbI5KVMyQeAyQSRM3uvk1mks+KfonECzjuc2uoTnrl7KOtMCjuAW2RASjYztT4dJMJyJgw/Bzw2MTToFYiEl0Dzot6zdjG3sWVZTPvAEyJCmuwy/fzWmvkFAI2jTY6IJMCBEc6lFJQhPcfjFgHhfLa9p9ErJOTowTOkgIh2AXMyc7jNYe+AKKhXaYAY+5YcBbMwLpyYUTu8Pl6+e698dsxoJHz7agDjCEyxOzgOAKfTiYhSXswMVHyMeQjuo5SS94glhhaE57l7f3h5qXF1JOqJ5ii1m7kjYaJAJ/ZkWDgBFxmYptMQkREQ3dyBzDGgXyklCN4FIDWFgV1HJHU3UQLPnKK5HxDDsN7d3d3d0VAnghEOxT9Vo0pjZsaAS8oJCRHFjQDFpFZPiAIGFg3VzIjHpeAB2/mBmdNABN3eHnPOItVcCNLwpbpOEiYkcDMzEPWI4QjBnczAQFvM6TQHp9r0vNYHd2m61nbetIrFUEg3b9qaqIqphw89MOsWtI1pqlDfkAhgpxFT00pZmNmgEZGprOta6womN4eje86EaTlkQiS2kcKcHnAXaXRwJUp66Q9mYtYBt96bpA+PvfEKgzVrRIQ4iSW3ptO6TUlz9z1JwB/6iv+/jv0uDgCMyN77GgF66SLsRmsp8Cg86L+siSC21geNJmIgzin54AMQ6E6MmZmJepSHHTER8R6GF8+pwQ4IjpQIGUfHGyJecAeIhCRqSM4JmTEjEaK411oB/NWrV8SomzJiTMkDaaIeWSSYiUM0AKubeVALAKhDFTydN0a34yFUNTyoQ2JQkdqQoEkF9O70YnSmgaqez4/RzmF2Z2bbup7PZ4EDAKnbucnDut2fTo+n7SyybbXFSFx0oJQ5hNnVA7ZKGL6XCLC6e9vEUtP7x1/84heH28O/+uEP6MVH7+7f4MiettZAzb1gsB9v1cwQuZQoIAAEAwSkvRBOSbUduOW/L71zd3C/WHJ3MAMi0tH05t5Tgp2Ab/eNO3lzD35JdMfLDG4ielxPX331lUNz327vihsAWASBgBJQQ+i6mRjJvI6Efqht/5bIKA9zRDBqejiOfdYm9sF5e3NNEDH8tukZzx+myj/5q71TPl1hIjKNzy2kK/rt4z4NvDUxs5yXcOJFBOsDIxFncAcExQTAzSktd28fzj//xS//03/5u7/+L3/z699+LuZLOQZ9nImKVBETNXEw4Cj1iwu7I6ihIwbXGJWSVJEC0OdWaz2d8PnzOyKKKCsgo2HYeVDqEYCZEuCSOSc8lDJeH/QGrBgTDejmYtJU1VHd100f1vPjum5VgmUxdD86imHoOwESGAAyOpjXbTuU4x//5I9+//r9+8+/fHx/z1gS9nY4AAQw9CgyIwCIVEQG6Ds7ImUiIBJvAFGVMUVHxPBgDBgs4i6stb79/Tdfn9rf//J3v/r7//zq1as///M/+eM/+lc/+OFnL58fRUxWOS4HJw6i16aurbphKWXm+ueDTN2ZPsm06lMah7pctMPMAtWEI4XhA5a2v8j8YWZzpi53Bw/RrE/sHNYsai9PCR2e/HNvDZ78sP8V7hDmsNt/54755JGRdtXF8T3gF5fdLoMxEACi21/jIuAA0VpBYKCGBuhIBiTiTczMz+v5/cP6cK6bGlGiBGjqqLXWm2d3ZMqSPv70s3V9fDivy7Jsrsy8XPMMB+KTEkInZazBqBk+9XxwGkWz+YImcHEuS7jQ8wSH0QJ6zY2/W5O+Ygw4vKkeOMUrVvTepeeu7s0Ac1mON3k5cE4pZ0hpeDvaFIgkF0/ABgyArUpZCCwRwCZGTU/1nDMfl1QylYQITJSIenMU6eZOxNCHvru6ocMFKex97PwIgHFU5GzyUoePlCZeozOPY+hpw+sUho255T5b+/yynnuzvD908Bfi7vhWSR7fK9M3noI6gXX7fLrthqjPVzx3kAg456W88+lo6s2CPqV6FxBejnl7tKuUDg2Nb7xGSl8jYJ9oHwCkJ1bGR4iHPaODYQhKKa2mQNntVReGezT/3LqsD2xrYsQ+ezQGuUPP1BZVBNMJau+vqmRgAiZ3G9mvC3TYzOYIpvGeLvvrkDA39xm9uLvpGA0H1slduxZFv+mwNXgh5hrL1HEYw1KOiN91SRmYSilqFnjoKNaoKgTIDZCIzFHVPdof3N0x8JsKnRbUzBw0ws14sM61BzDuhnywHahq5pKZAo0OvgNbUzD0BUs72eDrQSYgMgRxE1NxTIBhwiwYWZA45ZSyE5l7FBlKKYErC0+xVU3MhGkaXIAeOt4uh7BotbXaNkY4rWutTVRFwww5ACGnQGPeHEuI07IsnNPhUFJKxMbMy7LkHO6m9v4xc0C2jp+02iHwJG4gog41oiEgADhv+nhuZ4BW9Vy3KqoGFpzD4CKjm8zM9WJDB2RlOvoJBmexg4VDEmcO+6LhQiCBawx/E2uCDtJqKWlJ2dFlq1gdAJphlDWcwhIbosewcgST6G5JkRlSVZ8Kcq2Sl01ial8cqVNosl43h0wjNf+8//baV58W5Mle/uGx13e49lTmdUICffc1fYmJohJXVRgw8n80EPzxCucTMbPWxkyKiENVA8XtQbENgZMlBHLDIHp1a+NMM0dNQAhIaeab+uqNRj3AK5+DmAkQzAH11UcvCFCbMPOhLNuybLWigXn0lxuiAzkBuGMAmwHADBu6V3vv3uoKAFKpbVXawdX9WMjBANjtvK1NZcklEYoIj4KAmQd37iOuZhZbxfu2EZE6PKzru8fzu4fHh/PaxNYWrU1UmHKihIRgqkqMzETAElivaPlzr7UaPMK6PaxbzvmTjz56+fLZw/v3ik1VKTkAhZHk3asBgMPhkHqIzmZC+FQy9wI1l3oWLsJE74Wkv+ILF8MUbDMz6IGFmRmNv/Jus30v2PFLxEs7A5K5a/xFznw6nZo83t5yzrlVtw529egZm/trUJqKCKAgASBTTNwGddfj8TCdYNg5Ft11Q7zezi/E31OzxiNcsjNzw7Zdoveyge56g/eaRZeRzcGUYO5q43UQUYCSVN1QDRIROibiRQ03ETEHLsT5/bl+c396f/r9f/vHf/n3/+E///PPfvW4VuDsQOtJcgJVa9K0iYopuENMSUWP1k006uxfEJCulBIzmlSLWFLkdNLjcXE/IKdEGJoJAEQpKjZLzsyEDomAmRNxTtOBMzMEgFB43Sg+UkADr03fPT7ePz7cv3+s0kTEEd2VPOYWsbsnQgIkBwJ0VzAHVXMD0Z/+0U9+++U3X9+f1rcPuWQHiMWM2UIRY3dnDtFdoc9bIkQL6uhEfdc2tWCIg6gilgIAgMwAOR/WWr9+f359f7r//LRk/tmvP//TP/mjP/+LP/mTn/7ko1fPXr18WXVzIAIidCZyRxedTs5MClwU59uS5nDl8D0NCCMHNs+cBvbJvjCE6mmPenyYcq8mXew5Yu9cfXI4AWCs537T2d/z/Ir5SeRT5o3N/075n2eOn7XH+7DD8tl8FriOjsDpgkBx6O8LsJiDmAkQYKpi57XVKgb4uN6/e386r6Ia7wcB1Aya2LO7u3pegdInn3334fHtN69/v6ll1YWSDVfT3WOyIYAjEqEpmJkjemRPEo2lGFRb0SsO7g7Wu2M6WhICvwZDfRwvlR+IFgVz4IvJRURX8ZCcC07BprmbSzFFXRre3B4PN89LXhKXVBag5O6cUDZXUUfgrTgXTgBA27ZlsZyVmZGAGQ+QM+DapGTMmRJjSbzklDIjYn18w9D7wh2iFcsd0SlxcINbRLmMCMzwpDfLRy/k3kUZcnKdAJqw52vPxMwMeplqVsWnRD1NH+DVVrXXmpkLhpFk2WvWNN3hws2Ctu+Ao7gjkt2/x8u7c7cAUyDOsjldZ/CJyEcr6RMVG1ewJzvIE5cyPp9lzL2GwgwII9EZYUx3qnSwj6iouLODk5mF98nMAJesUn8eG5lmgIwlHglGYAnQ80yIIALMN+6GboB9AEvc4t7S9Ooh9bxs3Pa8desFgb4dBjGJiUJvqLtKG4z1Cm28ymHHY7bWYkrq9avqeh6bvEaOAxwdRJVjOzE31dYHo4sYBv97r9kHBamj6cUXDxmN99G0RsmZ3AFN3KL4ljiHByYqTsBozgkdcuFcOLIMAIA+XeleazYAjK+LxLooRZUGKOrCBp7MdFlihgszOqEOx+Xu2V0UcFIqITdExGSJGbzzXsaWEN96CE458G1ba60isq6n07rFY0rTTZqqE6YcYyUIPML1TACQEy1LLksKtnhmNgRVQlWRTUQapCYOaCpuHuMZTdRVpYmuVcyAUgaHs7SztNW41lariZoTAXb/EgADFRMaERpLmBJfHFxCTAgEDirh6XtvGgHulWJQacDm7mg+w0giUjfdpIlIEUR0USRPKYkBIkcDp2Fv5kaKNloX6irKTGZgpjnnSMFME+M7LxN2NYT459RHUge8FMRGemEaka4CAR54YsV8x1b3rcfeaO4V5EODEmxEw5ZRwAQZnKIBoDdHmYtTzL/CS2ODqmZicthEDiUDeif8gQtpR4yymI5FyDsROSAYAnKMObr4E+FqjA604ZcAjvKiiBggIyZCBzowv3z+Ivr6llxKSdx3334lJEAzB3MQB83E6jFXzQCwqZltrQEzl0Q3JZt2I7aUglSqrKrKouqQic0VB7Y8pwJEYnB+WJsIcy4lb6JmbW3t3ePp3cPj+8ft3HqFm4gT55QoePuD04tydE1g+FpzX1HV9eGx3D43ks8///xXv/rVs2d/8cknn3zz+neTGBndmBlMpwRG0oc5m9kexvOHhGTvinWpsDmSPrbPD7fAsPyu6uBCC9sTqoABykBEx6uJFO7OFG3ol1EV7h48sVWbmQEQMyq5KgBaKYv51QYJI4kJFyCGd1dk54k+kfO+Ptcp29gf927KEw9j74tM1Zu+zl6n5oeXdeiKHKZfI284mpT88XxaluX29taJVbA1qZuaWW26LAcDOG3t3cObr755+7Off/7L337x81/99s39+fXb97UZpZL4IA5mIrqZgRmqgwI6RAWMENnMgheRmVOizImZpa45ZwbaoiGTohyh5/P5UJabGyWK5nNXcFTPgO6ojgxIhJMHq8mesi/KBe7uW8wlcnQEdTit2/3D6f3jel6rBThsGLqUUi4MPV8MhI5zPpxJSqWu55vD4Y9+/JMvvny3Gq0VqkaKsbsxMbAm7EwaQzi7MIwcV+KRiaeg70AAdaLtfHZHoJRSQk55OSCLqjrau3X7519+8cXX3/zDz3/5P/zpH//ln//pv/7zP727Xe4OicnAtCQKjI+D+hCJ+PZp5XCH0t8LiersNb0YvW5+8RIQTkcwIPRTvy6/5astZn51mKb41bjIJVt0UQcngNhZrpgz5n8/tA9xNYM+YjYyLggQc1KCtQ87/LI7gYigJghgpog257PNxCMgAnQhdUAHb6MXF5EcCZ3RyAyRyqp1a64ma7XTuTUx5vT+4fG8VTN3ZHOXJlW0qTiCqFUVBzre3r38+JPH82M1bA6sniLcw7ihC6eRe0zCiWw/GTrSoOD0ID0y986n4nAhLIiKotokHwKASP7EmphI2AdDJwIkQEJAQqkyG6mGebma34aRkO6JUmyOudwshxviRdwzIiCaWUI2IvXamp/PZ3Xi5UCY2tbWtYNQuPCyLE0hZ7+7OTQnbW61kVvJVkpOzFlSgsTARECQAnGXCE0bBMECqZmiO5ITkci6N5sISIOFAfBiePcyNk8ekdhFPrvZHHPun/z5lEMfrKT7386kzL7cNxUBrhnRdoLt+8vOM/caNy/lA3oKO34XIiqlxPsZJzvARSXx+piqNN3ReRCR01XPwjQFe/bU/dEpPYkuLXwdFJtyYJRbzy90Wou5Ov1udklfJ1dVFNm/pwF8J48ewtZS6gjPbpgsHLxBCysy20/NzMH522qdcwN+QgU3jVc0aUDKMPIliOgOQBS5SQAgMECPjrXxqq5MKgAEZSAhIRO5R9UuEW+6ESCokTsRJOYcLRzxdVGRV4329C46Iyid4mKmaDZz3TNixNnMauZqhJQol5RLKXFiLHxUO3BXHDdHdzAPXg0HwCbC0TqYGD27iyko2rZtzLykTJwBSMSMjBCByQnDTY9FCH8+MYdr229vbj9qOSciysx2XBBR9UWtKzN3XVcfETISUa1buE1AGPDjwLjbqK/q8MlEbF2rErQmQO4eBTZwt9oUkbamVVQM2NnU1q2tm1ZTETcHjJHEyGyKYmgqiOBOQBhK6ISIhS/8Tsxc8iG2zMBYu3dzvCxLzpmIIuYSkYphNQQAgAmCLdiM1SNNjoilkymDX1G2IiKqCvElkwQjSZFzDl674SleYs5Rt7kyZ4Nnz8beQ11tte6tzzSFc1rxXsj3Cvstf3Ktfb5LPu1PG9cEcphFGwhMX7SMM+ecxHq11kckfLmKGoQdGp3l5IA2yvRDwd0j5GOAPnWGKJkrUrSZFYtaBBC6E03kidrAOQPYHIXAzDPZ5+DPnj0/lqWZJWLOacnFOzElMDiCoSmAASqDG8aGjRj7cuS8zMH87fvToWQVNwUDN8dnt7xkruKqTgTO3lhdFABS4EzQmFnM1/NmBinBWfGhbtta7x9P70/nh3VbawdpmDkn5pwoEVifADSZXS3gOgzMbACtxuCW/sjv3z9+/tvf/asf/ujTTz+5u7t7++ZeVfeE1tIMTKOlpOjYHRmIBhfB7ghBuApghghNeAzAHntxsS0zD3hdRUSI3rjdPzE0IbLC9PQe4k+jeBuOxcP6CMpmKhIF/0TYEem7YZc94ASAnDOgOJg5IQwsL6BcHMpo9O+rEY3cNGQSPqjn7x0FAAjIzNyn9wo4NP2y9ez/EIZy9b+iWMxePQ0EGiKmspj6w3l1R3BGYCBmTl+/O9vp9O7t+5/98lc/+/kvf/P577/86utv7h9P50rpgKlwXppobWvQJG7mgZsBTwjmEDswBpsuAhFTwj6eYb5Pv85Sqfv5vB0O681a+2JibFJelltmBgIxbK4E1lr0OkQsQO7ezFVV1N1d2QHAAMxgq/L+9Pj+YT1v1QycAq+LjEgEmXFJWZFyosKJqAt0AMfJYNN6enj8wXe/99lnv3v7uP3m998AohGYoYW1MUAEAmUEGF03NObF9yOxgzsDmGFvNHUwCxZu86rWiGOnACIQWpTgZHj/+v2XX99//vnrX/zi83/+l1//P/6nv/xXP/r+J6/u3L2KJAZ0E3cezEZzMf2DYvLe9k5ZGsXMoQzu06ecMgYAYTbHRXykxYJ27iLD7pfhKLYjoZm1mv024Xbxg/epnHlM/+SJsuxNxAxa5j/3fYPzoWZyM9IT84u0M0pcbWSquvUgnwkjymJVcCMDOK/4uGkT35rXRuqcvLx/eFM3MSB3ENGt1dbEHZj58XwyUS4ZzO+ev3z26v6br766YdjUoIojL5kjYWlmweAg4EFuhMSqaq1F+hIRVS24r3vd5QIlmAwYAGCFZ08mDrtlUVTEOUIMvdMjDx8gxm/GWzNTILzs+9dZ3bLcLjfPyuEWOMWwO8I+fogRPCV1EBGvtXDKJWVOtda1bu5OW1rP9Z7J3W9vj8shLzkTQ0I4LHCwVAptmrMxMxMjgTM6MzI4ADI4O6CbubgLmqNHLyYCXDIL4TtjD/WfxjBMV/MAffBE+uDhhAG164s4NAJ3Scm5Gu5Prz9/hddBZu95uU7fzIvYjrk0vkiGPdnvEftU4H4jcNcmV/Hb+IoLE+lUrr027fUunk4GKdT8w2trcNHQOP8ymJ47KajPmyBCZrYRJQIAU4bRAucfOI5ElHOOWtNkT4q5CH2th6WIxvruCvSmv96UKRD7/hDf2JkRg0KD+Kr6QYDM/aW21gw6R3pEtqmHBKimw6i1VK4yYZGNFKallDgdANwnOKfzpqaUomPJsFvNkjIgEaDFROcA1AD3+ScGHd4YmygRMwfRHgAjdkNsgx0REJDcLXJheGGkHgmDJedSckmJ3MAk8oJhSmg2oCOHFskATqRUDNaw+7zHTiCd11oy5wGAqWDZmXLq60xkBDxCU0I01dgfc85RkIglCnwVE1Ei90wExlRSlweEHvyr9npyTujuOWck2rZN3Zixd29YDwgphjEwAaGYNhUXNwBpZuBqsLaaczFAiYEl0kRs3VTUiVMqnAA451QyETWVKnbeViapOS4fkMWEiMfc80mxoxyPx1D1ABGt67ptGyIeDof4lQOIyLqup/PDdrbW+o5lY2+28EwB3BQUywh4Qmd9gEWZBjBvOL6RIsIyDBZdtka/pkZ84hxcNt1oNx/4kP3+Ov+L6WJHYLc982A7nGc+MTSXK1xDLPYfQo+vAiID7q5ubCZua6thRtHcXcIAahPmQ5QHMzGAMfNxObgamOL49rBlqqomlJY9vtQdY3QqAgAyovRtIFYvEgmgE8jkcMWinolzzq7WzMyBmF48v0NENMjMzOn29hbdiADbnHwghMbokJCcDSg8IodOhhnUvqd1iy9sLfjxZWt2XIrqhog5JUVJSA4G5kSeUt62UwhnDDjcqm3v399vj+e1vn98eFi3KmCITgyIDujESAyOkZtHSIgW+huRL2jflvqAUF5ExM0d7f7+/vXr17e3N8+fP39//9hU1VxMmTkRAkCtNafSmgTNUpjKJ298SODV3hbVjKlTxGOX9YvLqKpjzuuExvU9aVfiflqODp+gv3eEDid2c1MDmaFqpHRVNXWPVs1kKM4FJzNFqNeXcSYOwkqbxYTj6/LdE+HH6zQzDNKXqS94qewZfKBQOy/wDzoiT7TPNTgwbRYi4r+HZTmfVhEhLMxUmz28e3h8OP/Hv/vZ119//atf//aLL37//vEsok1sa3r37KN1a9vWECwvC2FqKrVKU4Mg6AV0JDCInAw4RFjIRDmlwkiUwILECqFnRnyW7Vtr61pPpxWAcs6EKcAamzpFVwUAmEWNMI38IyIruIrLcF8gAQCoQVV5PK3vHx+2rYkZcibqyVwCIwImJATiVJhyiulz3uuErirNwd9+8/rTH/7xp5989KvffkmAOeWq6Kg+YmyEQLjz2iQhMQNzDogSkINDcwTAWBzFmXjtDk9/eJcedIGdhA3wQAsmbK29fnO6f/in3/729198/vv/5X/+q3/zV3/x2ScvUmFCM9scdNetc5WJ28dLMPRrGnDYV7SH2wB+5QXCt0VrU7rErgzjLv946UXfO06XkNUJ8CLAe+fYr2M5282CHnWCq37a/cOO/rer+NPMOF3uAcBhDCA1M0TuHXqGYyS28XILQUaACMCi3sRF4by1tfnj2WozteSYHFJt8Pj4KCJNVcRazOhzcCQFq7XmnHMqrdrx5u75i49ff/11MzFVbIjcEH1him7anGnsNgiUAHpSmKJ2jQAEwOijEhPAOkIaTWiRgSImtoEzpIGnNAROSERMhND51iJFf+jwrhT1RlV19H0r1LQY8d+buxc3t88PN7cpFSICIALgPvcbUmZSt5FBYOYDcUJICAqITAZUpdVav/7668PhcHNzOBzLoZTj8VibM7egniiZklPArNghMxCkRAE5UGI2zGAKDtkbIoW1UWvuTmRENLGUgNP8hiR37/dSYQLcOzN7sfdrAOd0nGDH6rxXsSFXFyjmE4dn6tfugrvyzG7HVNPJQ7M3+7W2fRH+ohp+ofDY35Kqul9iuf0zDnzY04TRfOlTxfC6sDev5sGlMdsbRkdGB9TNDXtaBCJK14PmfR8lX49amnshDugzM/sg7GZmdyPwCRruXzF6zn0XEA79B2Ymh9Fj0kH8NGEVFlUBd/dJghJaYUNcaq0BXDYzMHU1QN9zfM+b6T+IuqhDZ0XzwRatiDZasftTq5krLPnyqsbMKyJKqWhfJWcKoNMAfuC05uDeR9r5YIvJTMflwIQuej5th+NtFK8k6HaoAwKZg24X3dFMRSwe35HdMfZyBCJnVEREaSKCmzQiUkUEoAQRqE8Z0h1/OrireCgeEoV1wy4BjgSJOOa8IRgjOYRfaIhESEwmGMSwqbWG6DmRQ1JVBwIzYHIwd1JTVQ3njplBI+ozM6siIW+tiQNXVQumEdNWXVUdqSxHREyZUimcEwA0ldQUAEoGMw/WdQBIqeScD6kz1tYqRHQ8HmNCC45+7liKUsrNzc3hcCCCWmvJnBjPnAJuR53TrKlqHRQvichHv6vtTHkYDGK4WJmEnDgoflvr1UXsrbnTgaaZ0LoIp3vOpbVIjkoHiFyyuU/LfU+MxRTOvY17ogJPzv/Waz75qxBk2CWnHXeIgBHc0oX7LtFgX2SilFKmyJ44I1lM53RXExGBfERTYAqab3cXkW2tS+GoRqqa96pEBE5PeXqm4a5jFFLcNqd0e3d3c3ODYCklE0fEVy9e5Jx7/Apqpu5EaEhOhO4EwUmE6NCJ+sCJmJGSmJ+3urmta1qbnM5bSZnY+9gVdACIZnszW8rxfD4TJeYk6ohsCu9Pj6f6oKpVLfjBDYmJmPMcZebuCMyMQcKhKiXRGCN2sfUiiuTbtmHK5ZDv7+8///zz73znOy9fvKRwHonF1MyQC6KLyLPb56pns25JxB12rsUTMcAPEq7jnNFMPrYw62RjMp1CxCjCmKoGdC12jV0tHM1s7/ECeG+gpv59ZgI42wM15wx9zBWFLmsLTDsPXq6rm7cOA+5WeDrYe2d0X+astRIRXrVKfku0PNff7HLaPMF3HvDcNXDnfz8ppwDELIw40wHNxjXl4cHUU0qE6eH9+ee//M0//P0/ff755//HP3/1/v370+Oac765uYG8qGxN7Otv3hIl4gwIbd0ANgQuiJuhOxGYe8BTY7wpOUTSlkpKS8aSnTkhIiHFWPHoZQZX1YaIprCu6z3et6bH43FZFgI0s1XjcoYADI7kjFgRpbaYiiGmKi7eqxmI0sxFpImstZ3WLYQklQxojAEQpzllLtyHEAhzI4pQ1pzcwV9/+dVy9/F3P/3O8+efv3x5riLr6j3FC5AAERyBOyUbuIEbKDkiRnsSaQf7xV8gEjAQADSpREFa3gCAIuBV9fzMKAslB4DESFjb9vXX7//3f/cf3775+uuvvvyrf/1/+elPfnB3W5iMU6egGBvKhcVq5009SUBMD+3yufcE36UqMgXSRykAdkHm/rJPpW7gBnA02E8RRexj9L5VBfbbwfzn3t7OG8PRADb1YoaIeyWaarj/rRvOPWVeGXbqY7y4QTBcikprdt60CZzPKoZbhXNzd0TOTe183mptrdVNNrEWG7o4BEkh5URE6o6cyvF4c3f36pNP9asvHEgBVFzQCCA58uhn6y44WGCJj6NraR/M4wh6cccyRfMwnLH2lAQc1EdmvWUags6Neo9At2Y7PyFaQnyAogfEAO7unh8Pt0s58uEYeLr+V+YeQWkQf6S+WRNiYaIlO5ATO1AMKQhP8LxKa1oX2bb6xl1Ebu4+Wg755ubmuJScKDMwQSJHkAyQGJacEjsyIIOZgJ3iJt3dDX1MuTfrDsVoX+3W2PfNBWMtZlaii5lf7DDilcA8kc+9TZ4SO8+c7xTGIPsP7XlEdjsdnBvWt5QZ45MnJwwViFd6mXsBPSCUvYL48CH3t7G3A5guW9IMO2EgN/dnxjm9Qmi7mgMN2mtp2lqD6yxmn+NhfV/yEZ2nlMws0uEAkEqOO+CcTFxjG0Aa9AlWSkEEFzDwaAWOWpxzCO6I+SzmtUOMWmZmtG4otQk6+IWzlIjIesDQRyzQYEbmzgnLD6czM8bn1LccCD1EBManPk1oJTMXTgoOQPHIlJOZccqIGA8bIKIv7x86vfDVO2YimByi4eHEF0lP4IGaWfDrR7UvInPiGHDnptu2tVpvbj4tJbW2iZuP9BgiMmcH9T6eys3MDRG4q0cQtHaEU38RPro6GQkBFElEojoqIjEPPJ4rJ2Jmaf1IFMVjTikFvhHRl2VBInclZyQ4nU5ukVlWREQDNEVTPpTaVJtGcBQ6Yzb1cDh0gx8EOENH4qmIOIDGObY1czE0MxUQNyDOmSzasXLmnCld5tsiYs4JkZp6axom8nA4LGxEFMDLSUoy90vVFvRcEa6E3E6DHh+GnMckw9Pp1NopIjSgRCkF+eFQn6GP2IN2RB/9Y7k1BYAY6siDiT5u33YjN6fJ9msoUdgOZ07g2UH9ghG/bPZDi59Yvb2C7//74fGHPr+cIDpSit0GkQOhx5bfWrMWqOrgl0/Nfdqc7qCYmxknRofZrA8Q7EARUnaSMqIEsIlIrbXkwwyGHQgYOCMSiV4Mt+/NIifbtrgfUQXAZVlevnxeUgaAwqlKQ/fb29tSynmkbAHgMt0eAGk6WxfeXQQ07PTrKq2pCImryVYRkTLc3dyUUkzEzDDgdk0QWZouywJUHh/PZoBE21YNV0yMlIgZnJBTSolyJloSISGAWawejt6/wAzjJDthzjmbAqg2heNySKm8ffv2q6++mpJQSnFOYlfVrcPhsG0tfptSkvYtAjC3xH3CAsY+d+WiAeCgvFPV2bgVR8gGuJa84PXGFrRP7j7Hjrk7Ihg49IETHFiiMFytba01M5VqtZ0PhwWGodt/42WPd0NAzuzA5gBOCIyYDBzg4goAgO0GQ3Wo23XaFS+cW5dv2R/7TXZe1oen+6F/AHtHJzQFY4AHQG88cgc1x4x4Pp++/Prtb3/zxT//yy/++Z9/8cXnvz+d6uPN99VTPt4CwJv357ZuzHxzc1drReBEvZNCREQ2VQVeEM0NvfcdEBEDcqC+M6eccymUE2QiouS6qhox5JyJ0MbYNxetm5ieWusWnTC5WVmO4GAIaK6mJNbQ0KG1ZuAqXnV0GCQmTObSWqtNzaypNLXgnu5ER2Dh3vS0gwp2iCMgOJgCRRoayrI0sTdv7o/ffPP9P/rTly9ffnKWL7/+2kafBbqL9YiwF5oQNOJBj3wcI4ICetCUOQAYEzljQnJI6Kam5sKAjBjA8pMaOaqBqKFByujACuZN/+6//sPr3/32q9/9+vT//Dc//fEPXr66eV7utLa98OvgwuVd+9Dek9uVBa5QpkSk3xbvTVW1gWrrEu5PHeueWDmk6cXCE8UE2Ne9u7ju4Nz7PWW4WDivEDfWZNzANWR0GpB97DT1brbJed+mw5+ctC4XO38WV9VWtTVtTc6brpvWZuYZqTTF1iiaER9O29u397dosaGoi5moqQGai5mXUrx5bVtiIObj8faz73zv89dfhoPohKoqYJwTEUutl5t0J/SImuLiIp0JEwBUJWAdY4uPxekeYGew6/8vJgc4ALfWSFWprw8zMpdSCgUCRyRGWmNkKs3CP5+dO4Q9VinLkcsSXqYP59zM+hxgcySMYpyqmlclVHBTNzABRSbivKQCTqf1vK4nFcw5cyoIAEC//uL3x+PxxYv27O7m9rjkBIkooUo9J/YDsx7LzaGkRAAGmKzPxO2PiwEx7e1sweMVEtVlTIaC4AjVyLt1jcPMcITB/AGpzJTS657hp5k7uLhu/WfZdcbtlWIWJ3FsDaFHcxDrFO84ytKRujNMjUNqQ7xSN79OoNCO+G2swGxe3W0ralOzZoly/+w2+Evju9LimhAcaTWsniglw6UJ1PXh5pA/en7b6nosZGbv1vPxeOxjJxC1SWutu0cKAtpUzEyJzL0R51xElbbN0YygohqTOCTGZzeHdWsqTVUcEDBjocho5IzEiATkRMRR0mFAFaxuIjAYJjIlopQQjd3CPQAzdyTEQy6cCmNCJyZSJVNRM3fIxwMTIzkqiYiDJ2bfkxk7oAM7uBoaSHEsSRHOUpkxGslySU5B8MiHw+H58+e3t7ei9fHx8ZPPvzidt9qsOd0/1jfvH1prJaUmXoWqMVIiD/h4Zi5wyHo6U6HWjJhMhUpp3iABsJ237bMf/vir3/6WRF4cD7fH48P5/nl+frg9yINYa8SMWgvzJhIoGVU3DkdFxTddHRE1IYY8QWYCIjrrtqTcBETWQy5EJFqbg7/dXrx48eKQTfWsm6kuVhbQQy6GzUHRSNQBPJGD8aEQooK5NAmjLSYK7uTMbAatqqoSMRADIHLOyzEmXJmJuWPivCyAuIkiU0bWta7nFYCW5fnp3Ts2JlyADXFpItq24A6zqlttDVLlsqbD2WkzvHN1EXbNrlj7rLORau2zemK+R3bIDtpIwxtzV5H39V1YH6IU8yTP2+ruuAE+wFrP6X0H2YpIGIXUp03C4XBY19XdYyYnM3/66adgEtdpD49qjgBOhMiAiYiJ2JyrqKMoirMw5JQULBovYUahALAsS0rk7sQD2EEIp5bAM6c1qRNxToqE4iAbAjCgEyKYoDqBeYDnIDuSUgpHz92CoT0nAY8ZL6JVVZeUs3OQ6EYSoLMHY6/SuzvgZFV2AJAE7uTxlJQInLyByiEvmfKSsiHW2twUvbqtxsVA63ktjqSG4AnZ1ZsZJsRlMVBtNRMQESMlPJzkUdspZT4c6eEETmpsVbaY4EVEQJhJ0c6mfsADAt2Wm0d6JErpcBQiIHw8n8rNoazL9u7t4VDev6+s23dfPXv28tXD+XS7IAFmTozw8atXj+/entaabg645E0bMZGyS8ucpDpTSpzFLVGU2MAxaGZAkQxIAcz9sYqqHqi8e3hMaQtAOxCauRlt25mZqa3uj5c6amawG1d3cUTLhIksuZBIYnZRQCcEB26x95Mfl09EtZ4fGQ2ypKzvz+8SQypUOW3qrkaASvBYz1+/+eqz73368Wff/eXPf/Xy+fOXy916erStpUwfv3jhrumQH9d180DjGyBYR+H28GzWzCJXfUlkiLg7EyoAJmbomTZEp8SUORNQ4vARzex0OiNiShyjWJkXxBiKbsgFIRFR8EESMwGjWjjtW7tBMoQGUBdmgjurR1BmqpBqL5X7AaFycswb4gIMTTcnJzYxKeRusG0bUiMiQgBDdfMoWkKNvXe6FxPMhohAFBDxWBJERNuImRIzs2MycDPbTBw9cA6EUDjFoGI324RjZGgfG6tORInZTBicgEBVTRmsE3KWc61NnQGKWmY8EhzODd+83f63//1v/9N//tsvv3779v79Wjegj539fIogP+oMhUsys/vHGiZFEBEYEIxRHA05QRA3KyICobqaCRC7++3tLbkdFkwgtp2Px7tFV2sPKRVw2NZt55klXLKKn1ZZq52rHc6tlMLMB8CYi0ME2iCmxJCjKhhCk3beTFSBCQ1Vz7WeuztIiMCZw9OCNFqH3F0cmFPKyVNC2MBJlRU5caaciRggIOuIdv7q83/5oz/5oz/70x+9e3hz3rIIrGttVRCJUgnitFUklUUiWGJgc1FBVE6ELoSYmRCpE4CrNdeexWNOmABAuqeFS1pFpYknSqkkN3eETZwgb86n1+ubf/8Pv3xT/+//87/5y7/819+Fm09vvlxtZQcCB1BAS4yFUwDApQIAqyN4tEYUqUoMkQkhRmb3SJ82CO4QJ3RKDuSOgJCQAF2sqaqaxiBoQ0dCGBhsBHQE76PdnQiZsTXQkVxjZpEGuxa+4aACgUYEEn4oIjJnZq51A2RAUJPhMZO5iefINCEhgKMHNrK7vwTkAZ/rSfmDi1o14EQpGbhIU89cSIItODCP5Buomajqu/aDAdpv3RmwhtFf13TbttYaJ0yNyLaFH7fNxdgtuaIbkzuYuDurESkxobO7E/DN8UXi4/qdX3/55ZdN6dmz26pmTgnL/Xm7ORwjxkZwxPDRG5JzOSaHJmpqbp5SSsRIvCyL9dke3d2PFHNKjIzMvU0Q3MA6bZi5kAEviZkBkbJzAWlsZgYOxCkvRJBA3BqSFoamhrzk5ebcoBk/f/lq+fgzvL2xUiAxuGmrOGjgkBMlCvAzOJIpgcvtnfXEazeIYM293t3duQnDUUTOj6dEfHd3Z2yf3LHIqd6vp3pTx0CplIqqllJubw93cLviDQu7oareHJpI1SaMlBOUhExGJokdXRCEQRGdQCLWOchtxEOdEpYC22HNFJApF3AwA3UUADQ0e4eEAKxOVRwcEy5EvG3S3SowcHEIpgAnKnt4c8j/zF/vw6r4fF1PO7TzTMFA7lOylKhzzMSfuOXIaIRGuIOIuSsNyMw+7EREgM46uY943Xu6wd1VRWR3Y00psgjmtbUZVaaUYiQBQ8RZPZmeqIMzY3e30dIaXwDSZNu2cHkj8dzfKDEmNzNzCPse4Jk2brG1tm2bqh4zMmd1MTMdQ1Ri55BIgGHv/lbV1hqn3APcnhPpA/tGybuX3GEgXQE8aEgMfNZWA+/iPqj3zCRo7Ny3tlnOKSXwwIsCRFJ4HDOIBwB3XVKutYrq8Xh89dFHh8PhfH68v78HgHpe11ZFZFnK7e1t1IvumD/79DvN4HdffiNyWpYF0+KqiXkzsVaJSExqXb2UkhCF0C1ldncg7k2QTmLCnF+9erWua7zp82k7LofeDkSEYwRWvBp2UPHIZeac0UADghX22mHCDiOYAUcFBzV0qCRBnMyqZ9kiWUJgIkroyERE70+PLu4KTDnAqUAIhE0cydENTTtDLvYmUg9XCRWIoptLzazWsMLR8w6IUdhpIkFqh0jEnHOuomut9+8eFNyIhUDMm4irgncCsW1tj7quzi3fSMqelnVtAMBsUZSutYYcwnVbLQDUKutab25uAUAjkdMhASmwMbGhdoUEBidTEBNVjQ1Gd03Dy7JEjj/oZ3AM/0W6DKpum3jkou0CcdnnaVTVjIJ9aKQP+2BTvLikvdwx0opJ1GJp1QGYp1sGEC0xPVSLg5EIgBAJCH2AmoEcoLV2bhXIj8dj4eRAhLSd1lTyHDc8OmsNeaSvZtrMYfzzaXP2NF5TxXgcsMtGu5oiTUTfXBYzUzF3R4foK+ghyXgwJmLmhGTo4QeYWcKU0yX9NmN4TopApZRa69pqFLRKgZso3GEMeL3k6g6Hw7Nnz6x93WGuBq21XMKGXCakE1LnkrUgvAgzAnt44swsznG6MMZSzWLbTPlbBx4nxN6plToLERD2lDmBBcBhNFR07+wyDRaRmUspbta0YirRAUuI54fT69ffHI/HuJO11iXnnHNr21o3ZLK6AVD48UjepXEA+J+84v1bHsbT3QdyeERN7gEE+pZkZ3SDjF0tSHESEZm7qeKYT9X3xcE5rr0P7XIbYQ/r1kdvBRI7aqjLkrGjWi6NzQaGhjBKl4gEFvtzIFPSdAXsuhF/r8I7wc4A4IbqbijRk6/g0RzZ403rjHmMdHNz8Jglb4KD5FpdwF37INQQDmxqYvpQT4flBrk8nlqtnkq+f3z3m99+9b/+23//D//y899/+Y04aWBFyFpTgbhVDmM7No45XaDH9TYOAJuFdOsyiQCwLMuhLAk0c0qmNB58NuoPMe512pzzfJtB+h1JpcfHx4CWBGm+mYEBIrbWmNkx4F6k6qqbiOgoOXZtZIo/j+Rj7PMAwGnYW4j5YGzeezo6+ZeBA5ZS3r1//+7N21evXizLcnt306RG0SaAuO5X5IH9dcOuKGcXJDbsfMFp5Oc67MXbfZe8dwcER8yUmOjhfPqbv/mbr79+/fr167/8y3/9f/sfbp49e7YcFtcmbUMgBxSjlFK0KjsyAamBWJOq4OQOhJ4yQwchUczbNHdAB0Pz8RQ7Epe9DsJAKj7R5b1S7/cg7/7YRR8vl3II694TRwCOqO6OF4RLrGuUwHLvsbcQNPIOMZgZF4BOMGVmAIrcgNzAqoo5ijkQq3OrhpQop8TZEUG1tdpaO62n8EXD+MdGA6YPDw/x3n2QTVgTM8Po0EYwcMdwYMDckSnQk8F6GTMGyfi73/vBurX37+5rlcxEjmaWibfziuiDKWBgeRBYa0jLfBEhIbVWvH4v3OedXrbCWexy95yzgkwZi0rptm1sBExMKZqnVI3QEKhukkv4MC5ijul4e/fsxcu0lChy7I0YDsqPuIGdIE9yjSHh3Zvo2h1av21tuqk8ZrTu9SUkYl3Xda1v374tpZRSEhciOh3OOdGyHDknjmZpN0d5XB8KlZIWJzNTdyUwJIdyjAWKnRTMmjUREXUiBe6hhCNEC8nYIgiCfcZ4mAsJEFBg8acq6GTsQ8QduPfJDrjf+OY2Md8mANhgXZrnQ49u+igOgA74GI3re/LOXjEG8LkTzSvM9zKXeqJM3T3iTxyQt/kss8IZ72iGVynCVnftrD4aBcTEnMAknKjWailLyIyqJuKIK4nBNa6IyGjG6obo4D6GV1iMi/HIGdOFRln6nAZyQLXWJEoiF2QCjj0HEcfsQXB3AaPdII5JSgkdNQKX2Lnbo740AerKueScU2LwJL6hdySqmbmTAU2HzN0NvFYBIGYSsfu379qxMvPt8Q6ZzCSdzw8PD+vjVs8NEVX1TPr8ZUNO63oybYzkrtt6url9ZgsnSmbmivmQc04Mig6MxEjWB0k5GZsZMpVSXrx48fb1a1VFg7NszFnUI7TjIKpGVOi7lA3sNOWUDa2JSJu7tarHhmHmtW4YxDCqrubuhsQJCTCXLObb1kJ+MmNtitDHCTIjYHdzVFwMbnIC6C1jowiJiNgcgnhGHZCSA5mZgrXzudbaVBAxlh4QmkhVQyADEJWqZti5WzZp5qikArip1FodNCExUjNXdxVvYJs3oMyIt8+fwQUh7Fwz5RS7XewHcy8nImI+revYyzgGOh8Oh1Rya41rAafoRy/Hw7IsRLTwZfb0zBgBwLIsqnpzcxMedqglES05h7qWfDifTj0YdvGrY+ciX+2yl81gJGAnlnUMc2dGbD3pgQnNJtkQAOBu7uDexenk7NArPO5WVfhQbsvR3QlJ2hbgpcPhCAAqrTOO5CW+Ha9oI6+Oue372FX29zzM0AA07qnt1IAEEQlj2J/bmFc7jAlM4F/n33cAG02JgDErzMxAgYIqVsE0kiOm4KKeVJnQHcIbQAYRQcbb29tlWSLOD0+0Kage8iHfPbut54duPVOCqojITG2rzJ2uGju6DlQdB4kpDqJCIiIHJyTsXc2xDSDGuIQOqQ0NnTprZoyh7ImCPBu944YI5mYdwhiSYyo8GjhFhFpz///y9adNkiRHliD4mFlE1Q4/4siMTGQCKKAaNV3V3dUz3dND0/thaWlp//IS7dcZoqHdJerumbpRVQAKCSDvuN3dzFRFhJn3A4uqmUdixygR8PCwS0VF+Hz8HnJKIjLft+F6LKXO8O0mHR5Or1++BnB1dbW72rdSAaRhJOHkMtdqteTtLueRguKd2b1F4vTDm+7n8BHrf9GYih2wJIRhpWPuLF4IdxCJu5sij8PqOHuY4qxLVr3+U9j/IOVZH6vDw+OJfF/cbGQmC8xMu7Nn6iBVkg5kXXCq6OQKj7SA43EZQl0eLpHk4aeiEoH+ZhxCWBx66w5zYSaCoSkpyIgCd5aiG6muDip2VjukLjZ5/f5YDZVlPGn78te//5u//ae/+ft//uqbV+/vT1WRtzuWxCGs12ZtldY85wys6oFd/A+PEsIIIwQcEYGDwIxxHMcxZ8oiTk4pcWKWxF4ejdbgEr4loIXjapqmSPnmaEEQB081L0aPmdWNIOqu2qppqbXWOuQz1JCZRXgYhpTW/LybkeDYiAvp9BgXwo9L8IDdZvvNq3dfff3lv/sff/Lkyc1xKnR7o7WVUpqat0AjGXOuy2zeo43ngaT9cBbu8rFGzMtuPO8QVY0BfmEh+NxaFheR4zT95osvSinfvfy+PvzZn/zkxz/+yWfbYXSjTFCz6i2ptVbcScSZk4Kc3F05aobMFLWwqgTr7i9wrU4MhnOEBmaPRgloqaqs10hrpPX4Wvx8YH3xeo+y3/jTgKimgQL7b512QQTgJSI/J8ziXeEm6qQrktzcF8KK0CtiIiEipYlYAK4Kp0TDIGkHSmO+KtWmYqfJatOqXObUGiYptdZ5LqVOa07oaofDqZTSWlnpIqEmnIkCiZmY10t+hOXr5XIBEeWcn7747O798XSYatFhF76+JSbzTsInIsHNHIYlZk0lcUbMmyDYwbQqdXO1EghTSqKqAgp6DyEGdbM2jKkZXd7EMGkACyRYHqzLmrsw3JyMmcQomQnn7f76ye2Tj9o4rtJEl+G3utEC1qXzg9cKtXe5kP6EFfUKoJQpKuYhak1EMRzEffqD3T2lVKtGos7MSYYYTHjrU855GFoWYnjKtBllm9Pt9Sdp4DwIo5UyaatGnlmqT6raWnXvrNoikjNQK2ASSQG7u0dW1SxcrkezDeSOZmbMRkRRjD/7NydJRNbtJxC4xZ6A0bL+AJbnUFTgHh+TR7+59JWXy/uBH7l84eN/Ijy2S/EOMbq1rPD5Ez+Aia7ecM0A8ch3e1o9QXhp666hmQVpO6fUkc0ElDKxIzGn1IFzqsoMVUoU0IPg3qOU0piSErdWwBJMJwvhi6vq3GpONAxDU6ulltLcvXN5L+GsLk1CiYPSzRQWCq+1CN5lXOyi2yByUTIXZuXVPbu7mVvTWpVcAROGsayUAsA5zJiPU7QOSpnv3t7d3T1cXV3t9/s61XEcb6+3m/HqdDoFHWUt7aCHw2nO4yaNu+dPnj5M9Vg0MT/cv2dmb22eJiHfbkdGmw6n/X4Da7CmqiBigMiJkId8c3NdazmdTqia3ZkkySDixIk4iSjO0o4aNzgE9wJqdFk5ICI3b231/WAOj+UxQBUdRIIMWWoxeAFADCChuuocQS2IXM1q1DAiJ90QEbFzcyCmqpwIaIuoTm+ihH3lWmvV1gUniCAMN20159FBpdTQ3Tbz0zS/v39wp6KhthzFCgXgQodp0ubGIuMgxmSkBqjNvnQIzaNmFkdlHEdJFD0HLHWUlFLERuEqyjxNZT6e5vjGc6un06nWKpKbo5RGRE+uRlUNaHvU3aN/nhK7q0hy93mezcw9qVbVIaUhReRzEUSqamWQumjv6YXzIH3EzIZo2y+HObzLagtE5DFr5nkD00W6d/HvFLlKPwVrGQzOSYiottbqnFkGZs7DOAynUhAqfIkJ7FqdkHIv/9JaP+ykj5E2XFjA5cucI84L6dn4d1qaJ8YXTF9uFPBVkEWAssyVMrN3zbCodNS4Ul8GBgC4utVWiYJzqLnFiHx4BiOoGTNvt9uU7uZZh026vr5OKdVycmjTYgju+5Jz3u/3x80mZDZ329HPRSiIsJk1Uwp1CqaBpOqSsVDPBNLC05CQA13WBUv6OnR4FTpVgDEzKPALwVO8TCWBiTw4Y9jXfL63MNxjySxsYmAujIWZr/dX3715b1rNvBW3nKqpGe4OD5998tnTp0/fv30/l1mIUuI8bOd6d6oz0si505evHTz84EF9mIGWqtwj37PsBHiHa5AZVN0U2rxqixlyN3JyWSqdkYw16/I5j97KHYRAuC3OtFN9nNO8RZw2jqdqa9XmGQAvte/YnkSRlogs8iYCEmcGLIQ3Hh2upbqxut5Lx+/uuo7lo7/92khhkBtAxLBFOYPmcuTI9UFETuzRNks5m6FaDFsupQAjSzeAmvrX37/73//bX/+X//1vf/+H7+4f5nF7LePeQerUmgFWWyu1JjnftUv3379ktPzt/K8KB0VzAWrqDkkYUh6SCIfEyeTWhpREaEyi9VwIv3wACIJEIlomtCOOhJmpKYAUITAA52EYXFW9qmo1xZISrPhcXDSjsMzMRLCwZoPr7SCSHoEbIi8VkaY2JslJvvrDl//mvy+f/eiTb775br/fx9zpu7v7UibOgyA3bxSZVM9VaN3MPwzd1v1/GfatX1VVgwbByd2IuvmCE0qtqri63lwNT+dy+PbVq7vj8fTu1S9+8Yv//i/nn//sx09ur3gUg8JLc9eQqnNAW3yCiAQ/qcPBDmJ2B5glWXhjBCWXuBMbhePv+3RZSTNTt8sFjCgpMM1pYXFcD3V3GaoXpb/z70Oq0pa0ec0kWVJXE1hqQ/G3tNDiRzYYL3cjicEhUEy9oWuHonjLiZzYwaCchh3xTk1ORebCp5OWmZsNahwsZu/u37l7NAlj9gGAQ5fsbhjHsfMCcCMi1N7s5QuCBqDQYid6/ADiGE/FcPXk2e7u7nh3p+rODjV0PUpKiYMlq8+jGkFrrPw6aeILED2ijrVsijXcX/wpC13u9oQu0GpmxEwQYktEzDBXj9ITU9Sg8jAQs5MQDzRsN1e326tnMuyxktD0fdtNxLqZew19mZZfoKIefFZEUQjtxnA1udrhr12pKB7L9QbJYor9qxrGYKGdo526lAbvmF3sRhlGe3t33Iyy345DZuZtlqthSJazDAeYkbVWi7ZJtRFUzEUSwdmNHA5lD0Cyr2l3bAOzthLVkpODQLZUGTlu0+pNVp8uIoHxWQKebgqYGT/I6PrtlrM8xuUKf+BH1psOO5sXXPBR0wXr5+URu3RDtNir9Saux/DSNNkFFer68hRVwOhTMavD0Qm1HOjbdP1yrTUBzLKZORRq3qrBjdhFXC3gXomQlmpBLZ0OaK3XLmuH9duISEqoWpu2eW5xG7CASIko5koRAqZmTkB0eikZhfAzKUAh7GKmburGbgbvXkfCVsJBgbSKCMZJmBNTWlfEKJRQQwR5QbUZkQx5ZDM7PEwP9ycR2e10d7XPeSMybLdXsb6Ht1+WpnkY91c3anSq94lchiElq7U2a6NAkqSwnAJGlKyVAYq2SWIlbLbD9e3N77/4XazD0HkiaBhC4D0TEbfmXvpFLx6iRbwDpiRZuJym1V6bWWtY+4QATGFmClVA1Ij4Xa1DCk6WPG6yMDc1VbTWMjcRcY2JOzC7wKf7QwBXiT0IYCOAT9yHa0Ugji6K00wDeBqoLLdk5sbNlSSBuKqXqlOt2vz+eLp7OJClZkqCtB3zQjADTnM9tuoKURZzzFVLm6jZYTpg1VBZsJ2+YKwvOwY553EcB0nxtFJK0bYcGIkzP81zlL7GcRQRd9y9q5fbeBiG7Xa7yZsYNYwPPU6nWmvOOaU0zzUGxsi8tdZrsZKsVTc4UyRJABZ5knMkfXmSbS3i8jk0YebAm3XLEkGlXXjuNQRcwvjl/9n7oDqcoNH8FUBNCIMgm3udy3QqDk40DtvNOMytTdMEkrwZVZW6Xxc8wkX27382ixcgQz8H4+vxFwBCnCWpW4TGKSWtU5TlUkorIsCbIVlKfRQnkG9rsxQaEg5pSKF8SNosRDWD3oqIDN7csnvOqbmh1daMBE+e3NzeXjMjJXFPxJwSmMncx3HA1W663s+Hh9PpdL3JImI6u0sAXVprXj0Ef4iInfIynX9Z/4vQjLyDiDq2cPER3KvOaVkWk87bxJlZlto6M3JKOWfVimiFUaS63QFkJkmJAG/uFFIRnjk9f3L7xVfvdSpgBkiLpk2+vrotpUD46uqq1ja9muY6j5ucwMNmW8xBpKpFm6oyr8iWPwIJPoO8Hu+EpYYnAd/3HqaH0qmbRU6o8FCeQK0V4JhUbK1VpSA1WXfQ4kFjZm+Nv+PjIk/oIexFIsHCGUlz7htyDVD6A10kllmIEizpkmLz0u+6vLoP0o/1PAIoRcN3EBMQrXhzdxg1a7EnhCDM2qsbmoQIrG6dSgtJ1asrAAMzJ5KOt1fVU6OH+/Kb337x//3//Jf/46/+7u5hTnlraXNqkDTUZqd5MjOPRaCQTDjH68uN+yCxOVedzZkcShZNShIeUt5sh5xzdBHaXBK3cdgmYSbPm01rtuLf1vAipUSgS7Wu5aZ0qAjMZ3de8oG5hvhql6rnJCJJRJh0jcsvb0HO2ax3idcKsrvDFuhm10B1I3LXnHMw+j+9ffLlyzdvXr3+6Wef/fVf/a1Zu95vS7k9TnO5PzCbs6oqc3JYhzrROca62PB0sZ7nFtllwZGZTQtR7/B45Es9RPFogE2znnwyNyIvD6e/+fuvvv1+evnm9O9fH//kp589vd5uRhkzxo0k4ZwzJ/KmzZSZJSWzpm7uGgsXXLAAa/Po1REJQdyNzKGrpiv38tHFNujHCgB6c8ndmfPlcT5bM6KVxubRwe/Pt34sOZgDHlmMNeEBXNzW6BskHc8lIpLNYQZ3MgMRAtntaYO8Ic4gMs9Vx1ZRik/zXFuqTWpjVZrmdniYpml6N38/DAMz11pbKxyEc5JdjDkBGIYUvh69M7aornumBfUKpMtI+nI9SqX91dNnz091mst0EmdJUNLMHCyz7K4EMKkTEHVkAB75JAKHr0ZMzIg+dnfoQEx4ghBxNROJdFvUq9LcO3vBzi0iO9GqDrcGZ07MPe2UYTADsUjeDle3108+2V8/azRK76sjaA6jDRvB45JsGEE4hjvP1p3d1brEzpnNKMoiw7CZ51P4ZQLHOy83vfOsRFqYcx7yZq3Im5nxYGCRHDpvZnao7VBbmR6SUEo8SLxKhmFISXbPdL/Zbnc7GQ1t9jrBKrzVMknE/wAjCEqCfuayY2ZATDyFlh4RQpb2vLHX8qJfEB1hycQuXcCysR918PwiQ+sL9xhaAlohUhbAGmJiItduqCOGs2WUL60ic8v72wJZ77ICF+juuH2+4o0vBjSC7cJ/0KVMDnUYsUviZMkWdI6IkGqo/KlVsyzCMOfeFjAswugAzBtjoax1dSezFm0I5sAqIEmOykHMuW3GoczTNE2cJOfNVlJpVo8nYnNfux6Psm301nkPQI1wttQs3WxYF0SoqiS5jwT5YuPgOW+6yp24K8ybNi9oeTMayBzkFNuEwrPwthrmaV55ZmPrXl9fH+Y2tQcREc7jOIYswenh5fVuzOPWwafTBOf9fj9ud/eH4zRNrm2zHQU0TUcm3+02dT5Za2aAobkVM6/tVPH5z348buR4etimwRMx9Uab5LS25jkGbPqeYxImBixyLRJmgqjU+M7x5WutsWSmhlB+cTYnVwupP8Bq06HlIXTZSMUkVpC6nwA00GAN2qxySinlcMnu6HxKQj25Skm4UlSMzCxLFDO5uZk2J6hTMz+VB05DM2/utdk81+NpPk0l0p+cBuFsZNbhdjRu9lXnqbTi7eSY5laJoWAlAOwgitFEVXMA8/G0xg2xGsMwjLV5qz0QWQnZWIi5tmbLNE1QR0TH7Xg8yoUoS5cACdhD6N1f1D6GYRDJQr2ERqZc2d2ZKGq06/b2AA7aI9vh3kePsKQ9/We3UAg4nw5yZnbiJdhdaKb8IgPrQfPypM5VCCfAcZqnq7zbjmNzI9VMtN1u9+PY9uPd3d3dw7HUiSXnnCLExWL5ACxaBB308+jYekDOPhhRwOpVL8ILD5YcERkSzxUrfhLKuMiNmZncolxFhEABiYiZE6cUfEpuTuZEkgZuxklJWOEKV7MEcE42TfM8q+PmZvf8+fNxt51bJWF2AbkE0bY1EUlDfvL82dtaTqfTYZB9TmzkRsN2IBDA7hzao+rOhLB76nDgzEMdUbi5CAFpuct9Xi7iWiIyC3h2l5MaQwKcmTgwVNJnpUxjczDCgyx3E0rOQmQcg8MaT3p2c/vkSt4flcwgXuZ5t93u97cs2cyGzfb61u/v75spcVIzGWTcbkDcbAFxPebyXkP8xc+d2bEX492ng9ALB3G953qqO2m0Cg2hwOqLJMxyCvpYFIu0ZrYoDS4TRx2Pt65tSsM60sa976Zm3lpzo4iNVBVhOyldJhLmRkutdumSuH/QzT5v4D/eJiWipqEHRkxMnZk2VkDJ3VxhTheMdt2LR4+UUpIslJU0j3tV12anpvNhvj88vH9/fzwev303/frXv/7lL3/19bff1eKb7TU4TcfirCO5OtSJOLEwQj25TpeXsAZw6x0kWuSe47SaODoxMoRSknEctuNmGBJMyVW1caLNZkgE1Zp+MH4fP6iqXKRG55gJHMUDI3M3xfI1XCJGlNQFaXop56KULiIpra2UheMB0Xvs5vSy8eLu0QXorTV3c7u+3vPL16++//bHP/v5i4+e/f6rL4dxvLnaHQ771oLiLgYZbf3alTzzoznJy/Tm8vdryLiateDPcHciRvC9OMjcrKWUnLzURkwpDQG+mTX9/ut37+9/+e237z7/7OOnt1fX18P1frjaj1fX2+dPb65v9rFLxnFk4dJCcygFyRcRkdNczd0FEnVbdmeHmdMyUPPBluBO+XQGuPISNF/mcnTx+OA3P1gNXnzBilRqOK9enAhEbBkVnIWDXZzEQU1hTgqCC0tmyQgQy3gLTuZQ0Fy1zD7XVhvVgmmux1OZZi1Fj1M5HE7TNI03BuGRgnU5iUggipuXZcy1mx33hbilA1wDo9BpBSJqXa/V3eHCRMoYtvnp008O9w9vT1+VUkYazBoNg8O9VWNRAkhCEzMHALZfftwDeKLWWpRuQJYWb+nee4DuvZ5oZgH6ZWZmoa5vEQGtiAh5JTdVVxAjkXAohxBng6e8zbvbzf7p7vajvLsu1fOZvXaJW0SAlTxZqPMS9U0edb9wBetZcHfmKGU2Zh6GodYYANYsfSNp90VROSIiSynFqMR6dphTDemgqslTrDcLu8v2+ra1UlTnolQnIgKO7JD7sh03290w5pTZN8n3m7wbNpSE0NjUoa4KtNArtsToM+e+AnAgbt6IiNF1fQEO8uqm05L4nff8ErydyTyjAkLEqufi0XpwzKzWgkfRzpo3dmd6Gd4wR3m0L+9FAQXrb+JVH7zh5T8tp/vD3iNdwPvjhWubBEBagEY9tlvunJqZICC51Npi1kkFwXvb3z2lxLIGfB2vZWYIiTi1nPNcm5lJIoT+o5/rZ8vZUHcCbFXVU63hv6OnXLQtbZmIRH1hjXEl75zT4B6aSuej85jLJ5AjwkNzKqXGJ5MLiVPrAdl2u4WzgcjhECcnEYK1Ju5etB2Op3mu6IJL+f1xii1uyzRtqC093B+2WxsbzPg0FSPejttxu/vZz/8MZOM4bsfBXOt0Amwcx5KG1trDw9GJiuthmpVwbE3hr16/GTbSSk1CDE4pXV1dMWvAjWJBoqNNRO4UMZczQ715KLTYOI4xvwHpIxYBlmutDchm0UgPEJcBmnOGc6luaI5iHgKDNqRsiZJHoxlA0Gc28UEd6kxEDiX3wLCfygRYFlFNgLfSKVhsICwpjVdUB8BN7XCaJVcDT3M7HKfDNB9Pc1UfxwT3mHqf2zxNEzOPmzFTPs6Yj20yrZBm1AhshoBsLbtqbfl1zHQcg5QIQExj5DFSu3Dbaq7WsIAxPEy5kVu3ImkcKES3o/RgOreKwldXV3kzmpmbDttNgBIhfajawSCIZErKToSgdI9peVzqVfoyuyIgB+ky73R57OM36hEiU4BKA+HmALHLQiodjuvSmpB3Pg49p21wgmqdywRtPOuTq+2ffPLJn/zo808/ev7l8e7169e/+/Kr71++UZ03497Ap2mmlOlcMDsHSb60a86fSB1W0q1P+J7VAjiRuXmLpWMSZuScZUZETsGejaVC2nEO8fckRFC31praalpJDc0UwjnlKqSMSAVjmShJGodmSkLb/fbTT58/e/Zsf3MdhXtmclczT2kwgqoKsbvv9/tys79/PT8cD+P11Sis8KotWBCYObkgQggCAymlRmQr1GSBnZhexlJAL+s0IKbijTvINBw8pdwZ1BKn4OuLuVtduo5rl4t6mFIdKaIrw0r/5Pvd5tOPns1fvzwqklOpGuSoRFRqTUPe4eo4lVev315d7VhotCHlrCBtJiI556bRhfZetowu7eNq5TlEsD+SMi0PdqeOTVCrzbSTHfIKQjMz84UchxPAHjWOR3WEpS3o7Gi0HBAzq1VPx3nIZBEmG7mTaqtlreH2c2f98BuJw13VOqkMcZ8wCC+zAKWwgHw+aBKuxzOizH7SQIB1Sgl3BjEnRkDhwCAicXNF6KOyUDaX42l+OM55sPtjeXt3//bNu1dv3r589erly9d3d3dfvb578/pdrbrZ7jPjNDd1E861VRarQaeUkmvHAcplGtPDmovEfkF0P0oSCHAwcxbOOW+GYRiyENQ0Znu343Bzvec2da7KJYYWkbVwXmulXrLpvAW2IJSYmXJKvha5eF1V6uAgrqaBdJDljCwJYVoYZaKgH1MuZ+r5HjS7BgRmORr9c918SHx7vX//7rXX8vOf/vgPf/g93DZjfv701t3vHg61VRAB5M5w1ZhvPNe0+Lx4F/twQXDAzCOjJCKzDgFxd2YKpHfEDACrm4FIJA1JUlKtpc1JdmU6ffvm4f70xe+//uZ6v7na59122O/k+mr77NnN86dPdrvNdrPZ7Ta73e5mn25ubna7XezDIbiL3dytmZMpmwXmiwW0al/12HFxK9FSXcorfQiwi/s82huXoaf7WTzz/OjaAAbrCnpBeNZfjqWS2P+6YMKZyNmZYQISIzaHusTqUN4gDyIC57mlVq2UNpc2TXqcmjY48fFUT8f5/ngsVd3dmTAgCwknJok2UEqcc9emjkQ42LyxOF8zA5PkRMJsoqqOCndiDjQKUXDKQFXhzkygwaztr5+++OQzPR7L4b26edUxeyQbMdOv4fOcfAGSxBryheh3LGaiLvq3bKrF7JBHoQPq7o0kEzkjqN96K7611rx1hFCw0RoJC1gaMicZd7fD7obTjtJWhh1ceSk5h7IJybmYQsE+xSyS1xQlvsuS0sez7KLtdk42loSw822aGRDHaKnBKRQXAFQwEcZtFKZqBHQdJiC51uop4Lc5TEGsYfF0nO3hdIIVho6CzSCbzB8/uxanTDxwCiCRUTGoocERAymAExvQlTWWwZeLGUIsM84XiICzj3P3BbGydgtVbU2xzob3Qj9mPVDdgPAqGb1iwRzgxF1HFAhgToRVZ1F7Wh6xndYeYK8vL58ucs4Y12+lqitN4OoLeodjSSgBkCImylBrrVUgLtIJBpnhxDAlORf7F9f4YRs0IEy8JH5eqqrKMk4Q096BxNtsNs281HoqNbLH1kpn8+dzc7ZPrC4lSGInuIHCtMZwmoM4IBIQkpggDUEgMeoKf+7uBmEJuGAKURdT8+atGbEh9FCacyI3IvE8ROCCYSSQGYr7aZ7nqepyX5bVfA2nVh6YMYzDdrsjyQCPR72a2ut3Dz/5yU9e7K+mpuZtGDdmdnc4zDYNm3Gz2YzbbSPdzKUJXVlLm/H13Zur6+2rb96nzESaEt8+uSZ6f7kvVzONhUCk2yw1M2utXO/2p9OplCJDjpJn7BhVbRFEEhhsMHWgmZMCyiBuaAYDD8mI6Hg6DElyzpFNM3NzK9owtbVk69AIhSVRnScAniVsR6s1ajBehZmbo6k7GYOMXA1TU/LqhuOp3h2Op+NcWnMiR3IEyUeby6lZvd7ePnny5N3D3ECzejFWYTDQI9QzzsMUbuHVmMBmupjdGHQWNwqduiXL7awt7n3IgolNLKA3McjTJlue70ScZBDOTClUH1tttSgzt2rTNDGlu3Z090FSoHy7ayQxmw3ExrGFmBnIKVWaZ15kc5ZySZ8xwA+wagAenzwjSkzU6QmXRxhurHR7F5Uq751zjLstmZHbJy+e/Ptf/Nmf/uizwWw+Hq52w7Mnf/Ls6e0//PNvvv7m2zpPoKyqKT2CEi0fR5cfuv51tUc9cQE5hWpLKq2by9YaVJkcSIsgU+fgWg75wtUWx42JmZ1hBjUUbWgW3P61tmot0yAs5lDz2rRE3YQ55WHYjA8PD8M43j55AuGbm5tpmjTEVFGjAeUcpUIXdidi4d3u6nh3N5cp8ONa5tNcEzqXmiSC8VrLC8Jud5cOH0U4j7ZwIwN9fYg9hYz1Ih0Z73b5EAqSZ2FmXvhZFvardTYHtObbMXToDo5p1zS4P31y8/LN23JoTA5TIlJHaXWudUhZvX7z7fe//eI3T58+3WyG5x8/39/swdKMOElKSc2Y3RluHOH1egrQq/6X4xDr5oh2PT2uS0RlKkagtakBzCB3gqqB57nUWnNmM4KrmXISgjjUz/g9EBGLRFzry0x7KWWaplev7q72krOMw1ZEzGAGDbj7BS+Ox8Sd2pDWMUyDBUoQxA57tKV96ZnY45mxZcP3qMnMzBtgQswCZjIzifAaHvIzcSbMU+Bm1bgSSqvfv3zz3cu3v/r1b+8O89v3d3cPp9NU5nk+TqWUcphaQ5JxKA3uNGz2cH44HqJ+Sq0JkSRSNTQlWExD9CTdbaFiWvSyLhLs8z1zJjKJgkGWlDgLtdZcKxiJsR03+/3OJi/WElJUxwMfsSJB1sWhxx1CbUHIFImiYFmWmO8yYjenRUJNRLhrkXYy7bUuHBGwg1Sjow5iF2Etnck5uhycJLEQHGRZEoCm7Wq/a9PpdHj/oxfPUmaGScq3N1ellPvDQ62VUg52y4CMLrYVDgixP/a8a1jGS7Plcj2D45qIU+p2DQy3PqgGmKo+LJorkobTcQ5zUtQfTk2pzNreH467Tc5vHr75/u1u803OaUh5uxs3m83Tm/Hzzz//+OOPAzhwc3W934wAhpzJm7uyqbNmlgBGOfcZ1yj7h1ECcdw1dwcsUujLoM4fP5bL7Bd+eQqYYqKbGEu5oYeyfc8/NlzsnIMB0omcBS7G4sQsA4NB2Vg0Dwapxmb25q2cTmWa51p1nss0VyKSIXPKsvGBmqgGzrfWGbXW0gfzAOScgz882pVE5AydHUDOKSUxb97qGjSjV5UdQAAgqPelLZ5AROpS5nk3Ds8/enF4//adFtOpuTezhcWp+74YmtTWxxd9MSJRbguQCzt8geUvtyNCfxcSZhZyY5glPu86ULCkmpXSxuQQSZwJuXkyypAskpunMW3z9joPOyVpCsNg3ICybuNO6bpAEMORpJREEtFSO7OlbaULtPWCyIQ59aFg79OGGty2MOtycb2B35p1oBCxuRk637HW9ymlYcjM7N7Ma6uGeu7AE5GB1PrlS9oAEDahLUFN54fDdN8OD/fzKLQd8363uRq3OY0keyPLcgTMvJlV88K9Q0zgFeFAAMMv8VaPXAAtDc+l0POIgHrVBl9NQTxC5me1imsfT9va6zqfIzPjlNe3XYtlAKayxBsLSc+aCl5+3GqFIr0Ezrh6M7/kKL0wVgZYEpHSVNVAnFKSZsTsTGqWGMfjMazGcZqJqJkLmbmYO1Ow7ECbq5aY7IpAIbQeRLJqKaWZRabXiGi72zCcJThMImmpCzwPgDFjiTO81hbGlIMWmXngJGeeYicSh6c0SEKdpqlWgoBFXCDszNH1ig3aQsEXxKAkw5PbZ7dPrmF2d3d3d/9O3dxgVVWEWLzT0DXLGUCjNCnuDtPpdAKYkgQPvrtfgqAADOOGmE3yhCQu7nS8P7x693A6HX77h6+GIbm21op0NZLWlCTR/eHw5Nnt7vrqX7763X/6z//z//X/8X//9vXL2yc7UMsDcgY1e/Zk/+TJNfM9I0bki7sPY3Lo4eGUc27mbsQ5gz0ZQv97Ph1FaLMZzMysiQhdsGCrAeaKylHHMtO5AxJSZpvLNJWc8zAkAK1aaprSolkahsN9muZe92Vn0DDkYUy1avCeBaNp3C8ATKK1OkEkGWEupTZrhtK0nOZ5rnP12oLbWxR2LGW3HQwGq5vNMFgAvdI038/F8rA7nerDcYpOSs7ZtIeeRATv43nM5AROQs5h0ogJvPRUCQZXU2ZOLARprYlQNV3m01IUaTheZR5U1HAvraJQM1W3uZaYeg0+66DPgZGItJy5lijGCbOA3EgJvAC7HWzWOknsQgIep2adz/YF1BoRcPgAaD81xOHmSzNq6sxcSsksKSVVdwYJ3L2XiMN8eIwdGqBCAPk8OayR17u3r1KrVuqbe5WcmoGtEnzI0hzcELTbSw24o4AEZLEx+nUEqWYt2mS/2W63IuSmtdZWF/1uprnU1pokpgXwdTqVqkVSYs6KQkQsTES16iYJhAceKyxvxs2Gaw1KeXEhba05lNh5KEblVN7PDzlnHvIwjqfTaZrna6ZS67Pnz3POzeOY13HMqmrQqD0Ng2h0+8iM+Pr6us6nYbf97PMfv/rmq3f3d5mfuPl2GHSeu11294WMzt1hnojR6cywdkgWq92jBIkAiT2ouUzVrBGMXKLfQfCcOkyUmTJz9BnGQd1j4vnS1lutM5DdXT2aqL1lsB3lejt+9slH9ZvvilpK6eOPPyaieZ6HYTgd55TH+8PDl199c3d/f3V79fXLb4fN+K/+9Beb/dXDw4MDx+Pxan89N21aI2IQplZdtRJRzuNS6nZ3X7VDzCzw20SCpVvt7kHkq7YlopQGU5TSmFPKSc1SSpvNximLGnFq5sLZqPaWBQErQs88GL/MrNY6nYob1VpLQRsVsFqrqteqpj6OKUnmLAQpJWS+0pjGcpoIwiLuMU5DwzAA4TfP/HsrJrznNo+LvnFzT9Nhux3HIbWmrbmjqaKUmnOu/TkxBBLNK0/pqk6TOjmlVy/f/t0//uqX//ir716+/f0fvqE0StoYUak6hTlRbb2u0cu9tVY3SizMVOfJVAmwMieRYchMqLWt8YFczJ+oBgSu5wa+ND8BELurt6bbMd1cXW+2AwCFXV3tjvf3ifzp01tv+v7tu82YSi05j2GygoBnHEdcdInXgDh01aJXsBBOpBjtVtUs49Jc7OjQ/lUJKcXTMM9zqT4MaRxHomTe4puLCC/V4pQlWnAp5XiT1hrBq9arq50I74Zc3F+9P/zmn//xf/hP/+mTjz/6+uuvtyLMcnNz9fb9PXM6TbOTEQmzOJGqlVLiDK6+fg254uEr/cZSxY6nMXMkIbTQH3BHhlvr+8uiQGnm7prHjTVt2gBrs81a5pq2m0GNk7RpbveHJkyJJU42++lv//6319fX19fX+/3+yc3VixcfPX/25PMf/SiRZ6GUEsGqFW3GzOZDTEzEy0Okl6MGlZJqbc3cPWiFzGxqXbFpTZNsoam7HBteA1xfiN9S6ny5iAHjmKcFmBjprF5gaTtPlZK4E0seN3sH5WHjlFrV2lCrlYfpVGY3EpH373ZmVo1V3WmTtwMRgay5GplkBquHTggDQpk3tdY46UR0dE/MItIpTVXBtM5fsbPn8OBlifhp8bmGHq82XLCkkqdxtycopc3zjz+1cnr35js4V+dMGIbR4HOdnExyMrPtJhOREJIIDSnesJQiQuwXdRDm1lzVhmEwi/n0MKzRamYi4swE8bBcKZlC2adylDGxZJaRkIUS5Q3LwDk/++gTR/riD9+8fv/w0YvP/+wvNi8+++z05uuUUvOWhXfbfWtlmqYIPDgNmRlgVY0yQc6ZqTua5Uaf+/8XGh603+9D2Gm765rMAOCrZMLKknBZhyIAiV3rZG0OMQxd5pMhMfzZGZLXRy4yDNmdmlvOkmRrOVHaBXPw3aHImzIM02673e1222FzuzfV6iDJaaDRvZrPpgqQMEESALQwiUZESR7ldWtNaq3R22V2xQxfXO6iGdahE/A1D8QZF7po2OIsTrja4dVorCkfgJTyBwYHS8Z4LsqsaCzmlZX9cubIlwZjvDMv3LlmljSGCMEGmJMt1WbrLTioqoNySk7MralbBphTUN41r5H3kywTGk5OuiArHs0VxA/9AOSITy/UbwIUZAZ3xoBz2ckBxGQwEalbrTWYbVtrN0+f1VpLqUUbKBh+xR3Tw0E4x70RokVnzgTe2mme5/cPh+GbRObTfJymU5YUyAohxDBxQGfv25vFr2gp1TrtS559QkABF9sAgCAnrd6MqHGxaAdbp7tUpTmV2lqBWWwGMxMHJZrn9tW3r4d3rz/76Y/+7V/++e2zK974l1//7rOffHJ4/y/E9uyj26fPrmt98NzMmzkhNOWYTdKQxQHXpg6tDpBqFINJrTqMBcypm3k3bVh2MPXBAoCcmVgD3QZHiFK4N/XabBySMRReQ75i2Xy52FoSjiparbWUFC0ONm+wpWre3F1L3HEGEzPHQKKZldpOcz2d6lyaqgXZT6JUm9dmpk1Yx8zDkPM4cpJh2MjYhCn5kCEhBa6qhPN8jl809yMEuXws56o7hsgEmBKzE/VsMOYVo46LHt0uTcg+dojg9HMvQa4QhY/oSLRmIuIENbh7c5dgfkvBDWNm1ro2VbcXQbS7nF5bw6OuotGJClThzY3dxoU54/KiuJMifQjb8/Mo18XDnAnamrB9+uLq/u3DH37725tf/Ol8eLjabL0e54LDab6/ewsLAhnfbDa2ZN1na+j4AJ6KCzxhp/wm5j6rEJSLYU9XrUXrAlCrfVsmlMxs5UJsrRmTE8xhjmKYVZGSe2vArFpbq6YWul0s7+7eB8fPp589+fjjj69ub5h5v9+ZWVKtdVY3wDl16rDLdTMnd1I3kqw25TFfPXn6/vX3d8fj1X5XHTnaGRdewRdC/77gMZO/vO0a6RKR0NpENSYGM8E6VL43kBbKO+ZEocEmAMipQz6it9bTUBiZhtZxElcO91K1eaubq+v9brjaDrsx6dQ+ef7ixz/9yYtPP+WU1b2a1jLPVeeqD6fJhIYh3Z9O/tt/GcZtrX59fV2KHY/HNA6AuXctXQeInUnMPly6eDCvPJvWJe8AMKkuIkMk5h7eh4BqCpA2V3VDUzViNHNIlHKWaqiv79R7OOM4ClBrPRwOp9Nps4E75tlbs5QGES9zVfXWmrAv54tWx+ne1j4I9YDe1FQWUo31mbgo8a47fE0I3es8WykUUNAQnk5pFJFaazNnYBg2RDRN5Xg8jcOOaHM4Tl/84bd/89d//3f/+Kvvvn9TqnHawFJt0f32WqkqmzFTW5LS5WNhIA/miWD2oj7V6lgyvf7d7PyF+w+9VKd+rnwTC3Li7bi52m2HMUngLd3KNJNrljSO4/uHd+GI83LHI6q4/Di66JitgFJWZcfSeY2PPpfYl2BXyDxA8TyuwQ2FiwmfXmt1aB/yJFPtzZwhUOXE7hLvTHAmbHebxGLwanWT6GozsNfM/tOffP71V39oZZY8bDfjpy8++uq77+8fGiRgsx0Qvqz5H5nnudzzq9NZH5ENAoCHSqp133ERGPVVInbAUSFOQaMFam6lqc/NwSMl9gHRUlY0IyE8PByBA9G78BG73eb509unT67+u1/8/EeffvSTH318ezUKA0zuUKtJRjo/euLNKQaAe5djnQkCHkXDlw96nPeuB4S8hbsnV3JxizHRJUiFLARQAUn1kw00brf7PZE4MfFgTSuP9w/HqbRatamVpnOr2pyZ1WL0w53MQ4PaHW4xQOHu4C6qxJxEHMFjaT1KCWc65hyw3vjlmsGSo7kpXEHOAWJ3AoM8FAnQTT06xCNSHQe505A2+6v97bO5TMd7l2FTyqkejykF8JWZ4G5aO1BlTS16FcZWqZtHKHTAEgtYAON+g/pqM8jZqXNdkAhvNpssY3M0Y8WQ846HLaeRJLVqv/niy3dv716+fff2/vC7r1/dTe1fT+XFbSTuFGF5oIXD864bYM0f4OemWY/igBhuIKIIXXq4vcTPPe7qfbPOKwP4OI5mWJlImSTkNjqtMlyhRrF7ACZxFkjm0d2NIhogAqEZpV4hJE/uZmruGMabVYCuGd1P9DBXovZs/36zGa72GyJULVorSxpyJjd1bcXcnRxEwsLifKkQFvs8HNCqsthDMjPrclwcoc6lcQinslqPSxNB/EjtY83lpkUObX1V3yrc+S/1zNvsvshIfHAY/fFjvZv+eORhPQi11lS0BZZd3at1Thk1MAsJw6p2QfsEgjVHcx9BwjAyL+Yk1GNWZmbJMFOHuyu8mefUK90LYKAHzZ7Y451DA1p0GWXvDfrV9QYCyju+1lqLaTQ2s3me3759N9UyT9VAIgJOAJkHC1Ptx6bbYHV3pmnt9gLGfYFURNA1Is/sT6p6WqBui/mLds5cStCRL5iAuAHUxGJzNDciassBMFWfilJrqs2tcYeL2G5I5VAVMII2/PwXP7/96Mk333/1+U8+52zPnl9vdxiA3T5td6x27L7lXJtE6NW4wz2RupMbSBKNlN29qAsF1wY5UTO1Ro627I+oUoSLAovwBdqnhqxIbcyltSGwoESEhWeCyDdG0cVi5tSYiCp0KnPOmQjUZfUsskF3KyFotiAu0pABtGaH01yrTnNtTeFMKeAQSRKZNbUa9c5xzGkzJJaqbsox7QkS9RgCtJAb6Rtdm2kNK7MwYi3rBkTNki80eS9P7Aeu3btoNYHIO8k3HK4GqJsrqzf1tfjqYIs7wUROZKYdEk7MLJ65637AFjWFwGBYc2u61O/7gW+tdV5ZZiOofxB59Dml5c9+pj7w5etL3D1GxYFOJ0EOZhEoqWrDPB/qfPA6FXLmBrC14qpC0FbmhnE/am2X6Z+vM9cXJunyr9EfyCRJwnuJnucQYtLLUzd/rd89MoYAUHjQWDkJM5s7IJREJBuLw6p6MW+q07wUikzVowZPtdbtdvzs889/9KMfbfeb3spgqrUxI4+DzbNIpFi6ujcHGZw8uAE5ZeYkKQ1PPsr39+/fHQ/b/c5Us3B3Tb7AS+KSFlPN3MWjlo0n7u5QYE3OjTpBjwcwnkNzIvwFQ7AKscbp86at3zsiBLm29RSUBWCHRPzBZqatNrctdLdJV/t8vd8c6v3zj26fP3+22W23+11Vm0sjEk7ihFIVU1F4a+Wb774lyHZ38/TpU5H8+tW743xytzA7RCGHuEJ5g4k+xtY6ZlgC2hkWGAiWVVroy1dTE4yR6LqF1kkOiSymfxaONV+GwYIjPH7TmpoZp8RdkNOY0zBIupAMhdMyu0JLibpDqeNQtNYYimW4Fx+47W4z/vjevjwF+6vd4lyiyVKLNoJ4LeO4HYdcSnuYKhGzjJvddir5zZs3v/yHf/qv/8df/erXXxxPZdhcba/2p7mqUS1WzZupG6mLw7siO1ZEGdwjCXTuK/xBsH7+5uvz10tYEXFYAh0WYXikFle7TWJymMNSlvk0MdHt7c1ut3v59e+3AjUM6dGyrLGRu/syxXf5oWfWH4QmR9xkk5ywaL4L3PvE1DlEBigcUEo9AiMK9x4izhZbyNG7FhFsADF/S8JQK8xiqkPKQ6aHu9cP79785PMf/TehVmdmHofh6dPb9/eH+/tDWToTxBf9PXe6RFgs67Z+zyVQPi81EXeJEcKFvbTEojGtuZJV9HfrED4WuEPNjsWkVnVv5uYyGIe6GpMTUR6uSinzNNd6cveUHl69fb/bbn79xe9+9uPP/t2//cW//sXPXjy/GXMwBbg8zld5SdptISQEWWxdkIFMePAupL6kCmeY3GOir7gfXdOo3/TIBtWQ8miKOU49hFLKOSfJOT1lTsNuN1edTrWq3R8m4jrPtaqVZuGd3EcXV6Bh7hB55qgcmoZedFUN4iliHpidxJlGt/6dwwFF3sJCArJFjHQJX882ljmYZs4xNxHBxdGW+05Ln9CDzJmd0mZ79ez5cZ5Oc5nci1pycCA03MldyAHp6ayte0YSsZGtJyXsEjkLJXcnJhECmLy3rYg95JEpJeGM2EtEwzA0UwfLOI6b27y9Ns6nqR4P5fXr1y9fvn737l11lGrvD9+7pNLqf/4Pf55zTmloWiLMSCkZdXqD5RvGRAOB3MyZOYqzl5nMevwjssUSMtWiF5FJGOGI5GMwrcJjeKQbKJZMJu6uttSaOQsJGZMNUHGzqCWqkzsn8j6SQElA7qzOBmoWdNMEgkWBXs3MvE38MI/vT9td3o4p5TG5Qd21EMBOvCD7TV29JTpztFxa/hUYsu78WJDQ2lnBI+vTVr6mS/sAgHAWmmem8IBEqFUj2cSi9rnOQ12+/LKXuBofunhcTr/jsVP74Pfxc3KQU1DhxoFQEBmBU8pZrIG4AiBhNVT1TGjmVU2oC/WsfC2BQXZid1WPrOECBdtlSjW8Aq2biGhhvPW1JbI8eheLQi10MUNEtN/vr66uSinfvHpT5jaVagpnhtdm6k5D3sR5XgKJDknVNvV6urvDpLe801x0HccUgTCbwYxoMzrQbO10m9NljTDYVpf9TgTKzmQQgy0ezgGilI3YI+PmBCZ3VqqG1KwNGxl3WTZ0++TJcTrcne4+/cnzzZaN5tvnI8+NfHKfd5u9EPHSPWB3wIU8LVSuFNmgM8ONgnA1AlBGBNaLmVNvTIldATa4W9/ZofkbJzaK6M0NijpNmSVYZx2BjXFm9tY748yuwtSJsIymRhwTz9HjXSAiknMWd5/n5u65OYCpzKfTDGd3MCWXbpFVlfII87ghDMpJcsrMfDqdjtN8VJma1qpLT+kMbrnss18ekvXn+EHAzt3DxaX1cS9f5+8fc3MvaLXloMYQcJ+TYJZYtHPbrLtQBN+dE6WlTW8mBr0oTwDOtU6qusjAdE7UeZ7HcSSioNXtqWWYg3ON1qI8wUy+0OU/6tg5u6uTM4LiwqOUF9eWE6PR8eH0s58++cWPP7veDpubfZ1O12kIBl0ze3sob+5P96cyaPWLfqC7s/PFuPyHE4z9+5lp0MZJOuerRCSMyL+YFOZmYMKZfGaF5BGYwCm+MjM7EUtm0eZ4/3CopUzTVIs26sNSBhLC048/+uSTTz755JPrm73V1tpRRE6n0zzP2+0YlGjxZVprQ87L/iCm5DFnT6REw25Ppttxe/302TcPD++naZB0NQxRs3VGTPqZGbTB+3h4sJj09yNaKiMuIO+z7D0UTNTZNViCdNFjYbGUKrqjMmutpb7TIEQOXwXM46CpmxM7yANe6Cjl6KZD5ie32/t5un16s9lvwL7b7e7u7t6/u7++vt1d36RxU+uc4HMpImJOtVbHcbPZP3/+fLvdv3zzXSktRL3UzC0q3CYSJyWiX+qSAES6DEYuOVjsBwOw4h4jSVtPQReYhQTBHHeV9GDfsfMR5r4agKtqKS1LSynt9/vr6+v37+bNZhjHMeexzDXULJIMsvC+0zoKErVbN1clcuaL+IZ4heWsRuOHxgQXIJx5ngGkxCFuVkooTSR1zNXUqkMobee5fvf1999++/3vvnr36uWbP/zhy2++e1mK5vGK0lgaHNJU56a1xVp1378OHy3L5QAUXavx8nv2bSabCxOgjw+m+uPil7vnxHDNwjmxCMENrkJCRBUuQi8+/YSES6mb3Ya7aNIZN4WLN+wmbVmZcLubFHoYcF85lUlYck6ui/BGsMuwcMqO6n2IuheSYpNzZI1LXZJoiURb40RB4t8Jx4lFpJRZyPNul8xZOMFevf7+26+/+PRf/fsnNzcvX79m2qgqkTx5enM4Te/e32lPkrt3UDd3Hyh/sBlWU/xHoy51gz7yOIs16yS8K6N6DLYxrBdOeuNUzUxJMJfWrFUbhiFLkqWtRPtBSVyyg82sEtnkx3IYH/Du/f394cHdN5u/+OjZNbElGSIsPn9m0MoZVKu725mFzdBbe4/u6dqR+OBi16w4kO0aryQiFzg5eG7kTqBsxMzCnKqzWiq+sWqnh/Zwfzid5mZ+Os4QGfKmKlSdKGTHOk9a0/fUD3+oW8PczDuNPgVaLTImIghabiTMzD39IJfEzNxU+ZzDBHFXZ+m7cFgrFPYcSHxw/GubicjNVBuzyPZqc32bT8eH+7fwNGRJmxE6a51YOGUWFr8ohFFH1vSeChFFjhqhY3TC40MZFuqOLOgOFCJDHoedOkppBjAnli1JItnwsJuUDw+nN2/v7+/v3759O8+zg4V5s+EEmg4Pv/n1r370bOvuL168iDlGZuaUyLs68FqtiL+Ymfra5esbKDLtc0AChPC3iKz59pK38FIU4ePxaGZhAmzBkLvTKCOTW9/4xszCKXEmYaJkRq15rWstDGmQJORLtwYAhFPABGCBDwAzC+eciAh6d5rnu9Ocj/NuO1xth+0mbUYSFiZlUrgbGgI0EaRSwIqOXtM/kbwUFGLXSWizLw4RWGmuYs8sO2c9O9QhoGd8xLqkQMdByELTtT5nTSB/uBXXffvD3/zwsZ7i9aLiQ5MzccpRBhdZWklgA5kTWEgysYMTuYIFBAOpqkU+yuLLQALABgp6KyEzgyHiiaj6BgbahJBzBgyPk+8l6HnkV9YLZmYJcUIzh3GSnHMasrx/4OpJ0Njh3CwidmorMevCPSDERDT5LBcY3wjfQx4MHN+KwOIcip5Offink9MRszxmCurSF+e0gYkQsEPQ+Q6dbwAkKOYNxpSnuaVxQ+IufPP0dnO93V5vnn12q2g/+8VPvvv2D88/ujm9eY9WicvVTfaTkiuHoNGCZU+J57maGdwJ7ARxNpiripAZLcRBS5WSl5Zxv0pv1jx8KjUmcqPgWe5O3ajVpqzUiJeBsSillBokfk4S0m1sZrXFsCKcLKTqz8m56OAMYK5mZslIVUspqkYE4UxMCRJwc8A9EdwHSYO0yNHGLC4yz/NxnqaW5ubFWtAppi7+1TeQYCFKu0zgFjsX4bm6myqsgyEAmLfonrFDu7dr3PXWJU6l+8JThVBWjx5QIna4mro2NwWBGlnE9NFaJoeZqyrnDDbY2pZksKVlOjye3iHBdq6SLDiTD496P0eE4JWhdRj64nx1c0NnDpKIPQLq6VU3Y/7044/+w1/8d599/JTLJNp03j5oGzfbK6Nh3JyUvvr+TfntH5r7ZUJ4/gJn0/eh9bmMhHzxM0QEIeHkTNF8tyjzApwZRBZD2sKmfcjYe+2J3NDMKQnlYVZ7fzi2Wstcg180RogN/unHL37yk8+f3j4xs+PxGBt1u9nY0dFmZ3J2hau2hnaqpxwifrEqAcwBGeCm2+1uerg34qcff/z67Zu7w3G73VZhDvoKWhqaWO42IQaDEIycQTy4tKMJ1Jk1FgRgWCphcAwfLku6/ulqGk1jNU4ZAAMx16RQAInZOS2qXdE/JIYQvGlprYLa9c3uI8fTZzf76932apdz/v71l3fvH4bt7ubmJg/DaZrcaa41o48QTNMEyDjsb29vd9fD6TS/e/fu7u7udDopPOQfqKNGhUSpy0WAmVqLpIMCa8VO0beM/dyn0KHqaFHCao6lKNNPK1M0A43giCGu7hQcrqrh6WstZJZSGscxJoIitqPOdNI1IZnZz+MM60wdi4gTBRql8/a6Pa5s9DuyNlXWX37wr+5uhlrVzGpVJqE8kDrL4E3uHk4vX33z2999+ct/+uff/+7Ll++OrVmzsB/bqTlqATdwqtqqNjV1IoFEUBKWXC/z50iNvReloozqvanK+Qff8/Kvq5M9H2FXZhpz2g55yAmm0cpopimlLPTs2bO71y+bajPdjhvhEAA+Rzmrv/algr4mS0SUSGqv1TtBzY2JGV0PGbA+SYWe/OmFPblc84VCSEHGjE4+yqlDlxmA6SL65w6zJkKbIbk7BJuBydvD3Vty/clPP//+1UsiIrfS7OnN7fu7h8PxZE2DSImWlncEr2uj7DIMuNwDj08uL1bx8jkrtWC3hOsPnDj8+EouAmGHm7VJa63z0Ib9djsEybO1+0O1PmcDg7h5gXlR2m9O98d//M0fnBic/v2/+fNnT6/deetzfLRZjHsbUZ8NWS/qkmAoqr2rtJKvlamV1WlZjQVowxYexgFKLmLOblQqUt7kcZfzAHCpOs+11nonD7WomR0OJzMf8kY9seemZMrB1EBUnXum1GwmIqYENIBNI20Dg4gT9Yz0UnqkiohsWRJL6ixfItJas+DbeuRYyd2J+2w8Uxa4wRddN4dL56Q4H6TGLC6oBnXHMA7XT/e1zU11frBEvrAQEYwXOYf1pAgeCQv3PWNnGxety3WPBXNSzhJzp5ISC9w4peQk4zjS/nmp+jC196/v3z5M94fpNJdWzZw5ZXFMpRicJZc6zfP8y1/+EsDV1dXV9U5VS2vDY2yRe9fH6LuaundLKbVmUbUvpQgn6i2h8xPWbRSXcmklWmt0kVUuqhkCT7EEas2dKG4rJ2GBk6oFCOsMuBWnDrYy1WC7p5S4BswomNgd7uQwgmjeCW/I1Fq5O06H0/F6N95cbZ/d7piaeXErQbyVE6ckOlX8scflSb8sBtVa16TRLzK3lQvt8rUUARCMCUzMFJpkQdVJTAwneGCA3AE3J3qEFF2tYuB1L38TT7gce8aSicQ3uYQ2nO3zPFV1VKMGqYZmcDR3O05JKxKZuyZIMwVxHoYQsFMHwcxhQWZBrA6Cuy0TY05Oi+HuhtQia+eFp9TdzYLb6vERc11HttZraaoSREzMqh7D0AC0uTucOUGc2KsRJ2bRttSje3WDgqY6pcScliZGh7M6sXekeLSJyJGWiFq7WqUD7kydVFUXIAG7IyiwI3nQupBY9hW42AfBXBn8tl38pzFxzofpYTeOL3706Sc/+vRHn3+atqna8adXn//D32zaKZ9eF5aUROGzt6pJVDmJRCgJBiWeTyohselGIAgSVuZnQxDXRvtB2CmtJPwsFHLYREzCbhZUalEgjM1tSwVU1WoHVRsqAKTWTzvgzZ3ZDdyURTptH9Tc+0S2u2MupSrHOKprberupiaS4Mts3lKXZZKiyvCUU04kZIkpZ1FGrVrmOqnOytWUKDEcEuzMkYCqWyN0aKh0Wv7lFAHRson0LJwcR2mAiRZeMnbYwh1ORECvg3j4iT7p4ebmpqGfrnBzKxHJMbF7cxNHxJowV3do6wQVsTEWCsqUUsxzm1mtNTon4RVSSjFXzQt6eW2BEiP2LRGI2Tso71Fi1j/L2QEjrEGxGBzODtd2++zZf/4f/6cn15s63++E3r9/vx8HVJ2nk0K2m/Fqc8V58/rdw9ev3rp3tWVcRDOPPu4Hv6E14L5oXUYHq7+JsBtUVTL3OezldfEENe8dKKKirannYWMsRnQqRaPKHW0xIkCcsN/vnz59vt2Od3d3Zp6HTq49DENrAxZRymiaRlcHUQwF9TrWEtM5wQjFfH9z+/Tjj7/98stZ7XA4BMwmsRBRixZ5rIwDOEtRE1GUHOPCmfh8eeTswfrY8VoR00bhs0/kmwesiNY+CQXTam9Tk4MZMgyty/r1BQRMCGNOR29ZoMDz57fX1/vdfthux+b2/v37w8NJ1be7/ThuwPeRw5VSirZx2Ko6l2LKRCJZrq52m83w5MnNPNfT6XQ8TtM0aXNdmg8hARrEZg51OIVvg63jodS11lu1IFvBGlYCUNVqKiF4ZabaZSzcvbmx9TTYXM1oCT56K6k1m6YppWTqpc4EFpFhYFWPCKY7ooudKSKSsiGtAlNmMFNiW4cCVq/pZ4js+U1Wx7/d7yJ6riUmq1POm5Q2Zno6+XcvX/7dL3/913/zj1/87suH40SSlFOx0loLE+pGlHjI6XScm2kUrDworaAARZ8qNHW9MxAGHYNHV8ScCCF1Y06Pas++KIX6ksysNR3qSBao6nbc7vab3XYzJDGHxihHNRG5vt6P4/jq1avStFXTTR/5w2PSgj96/JfMISbpNEDgvUosmE8nZmZKsrzYaqukaRzW7g26BUdP8cwcSuhNnuUo9W2G3vc1Z7ZQOBSYNWKF85D55nrf5qm18qd/8rNf/sM/wdqw3VupOcs4juNmaKfSdOHeZBYIEdns64pSb+XFAezIkXPYEe51sVfuWKqiC1iaHhEFx3YS6iygMI8x+wVfQNqKtuqoLAaMkhhwZ2ESIpRqqj1HIx7uj2XMm/vj/Hf/8C9A2m1v/nL3F7vdDnO9qMs9mgD0c7vArdeaQThHkHTR5FTVS1IZLC5pbiAiC9JGJkJ2Sko07HcOmU30BHVr1auJKVej2tyMaiN3iHBTg2rALM08FCYDckBqgABi3vnDo7Dghpxz5IGLf2zWk4TGnJk5XbB8mzXVKtKLLL04TlFJe7RjifNy2NUuHuvZZzGRZCA2aQ4iGvfX10Qk6f7tS53vpnnOwCanMQWfBC34BmZiWRLX1loHBBr1TAER6HOML5AxwJJoSJJSIuEYuY0VyDmnYTMMw9uK09Te3k2v3j28u59OpfPPseRgRBARuDUtbpJS+vbbb/f7/ccff7zd/lRE2hJv6DJiAya60MQj5lXBNaUUbW1tlkYSEaZzuhulbScHU3QPlqyGCE7McUZMlT2BLUlOQy5zA2CmUdlhEWJ3aG3FO7enLp8e9W8oXL1VqwoNrKE7qRWE1hcJoHA1FQOKOxFnkTRuOI2u7TjVUo/utM3YbmVMG0oMq26llJIf4XgvEryLrB5L3cTMony2NgnXgDNEgGjJEtcqf7AQX7oS702IHt1dmmsA5yGIZZfG47KLeBkifkCEtv65Xg49xvmnw+nY1KuTcVaSoJ00tAe3E2kmD1YWKVlSSmmgkFMThpmhmRlJnwsiMCQK4eIEOBNEUgrQS+zL8Em11pQfFQyYKaWUTINkY12L3tcBzGye5x4bLSNJAEqpoTtMDDNrpkRMEGd1Z4qGYeQcEaww+4XfYk7xk8G68XY2d9M+u0jW2y0KNTdoJ0Jzg6+CTjhfCAGAhTqZm1EfmWWLHmPCytIeOIE8Dg44ydX19e2zZ9U05dysXN1evX39/e56PN2lYWRual6n+RBbBv2+9pocEUkic4hTNQNc4BAZgLnFMEavMkWo6m4pc6sLrQKfyZSBqC9fkIUwx1VEohKk5EQSTf1ismgrkwJCZKAG1ha90xDzouAoNtfkrAbu6isMMiGWJIljPxCWXlCkryklhudMm8yZdTOkLKktI8tuXYxBRISQJLTg5fLcxkbqOoTL6RKRiOPhHPg3AMyLpHWS4PSjSucz1pWIZDl4uhR6OaiLzXoCEW8f/dooFFTTRCzEQVKq5hbJuvOKl8tZOOdSCl147CAzXbfr4nqXhDDS+LiHIUzPDMgaZ7ivBPe9QrUYBQHQ+Sei+c3Y5EESHx7u7t++vBrlcPf2QGBOc1XnVGmQYq0hpC+ZUqcHWPiGAx+LC8ioXySKEegvfG6ibWmww92hSxIV1lLAnERVbam7rKRBrTVw8uaz1VOZnSnlobWm6tEfV7A63BxiDsy1NFOwSE6DiDBqnUsptTVwIOyRMkeJfBiGpGxg718GBpCTOsYxl6Zp3IBJhvzxJy8Oh0OZp4f3d+Pouw1zktBC7PIJsabLFnRD0M0HdSU7lDXgw8SR+i2T+NFU6UEgtWYiCRZKIguULrN5I3NfVKT6LCg7ETu5wp2JEAM9nWl3zNKsetNnH318c3u13e8k893dw/39vSmXGLAZh5QywE6kilprTqOqNbNmRea5zadxHIdhuBluANSqh8PheJxKKdOp1Np8UVozV++FNCAABcZ0lggNzYnWrcoiTkV8DjqX6N/iHCwe4XyKfcnNzEyYGZH1hYDVOUsJ/7IOEXWPaBe+2SwR4Tyae7bn527kY4eK/z+P+/vDelpFMpEcp1Len96+f/jHX33xN3/7j//yu2/evT9O1UUGoWG2qg4Ng50TZy5zvX94WIgcQYnQia9AROa8pB8r7qGfOEOAqqlXWEiis7R+t6VEe169JaXsDXsAY8777eZqu0kpORqZs5uCWMCUXrx4YWbv7u5qKbjeOyFijjUAWiNmWYa5Vy8fa2K1mZtE+ziU2t3dvdYqIpndmMmZOs8Z1s0Qbw6FO5jhUKJoDHY+y/4EMjdyBNn0GeIVJ6q2AoNSTSldX+8Pp4e7d+8//8mPX3zy0avX71gwjuNc6/X+6nCaqj50UbvF4dJFwnC5By63x+X1AtAwgW68/GsEXhzg/4v3jKPsDa4RWBiDfSkrxYcoazM7TQ9qZQiRW085jQCpt1NVgERyIlalzTgw8XE+/f7Ll//06z+8+OTHf/rzZ2avVmUU9MmChfu3I4qNQjOi13HO35CWFnQEA8CjNQkfNDs4hObBBBFKBlHilDZzscOpnCat6kRZggZWhD24lkttytUWX8w9JDADe2IBnAiqG+8cXkFYs/SkHETKYWWtuapqMesVqPN9ok6hEfdCyck8GF2CnSXmR8x6+B4j30QU+nLrxa53eRB3aAC/yAhgysOw218TgDa9bz6rt9oTmNbpBOOtVo3QNXB37c2y9bP6MYpWoUCEEouIcBLmRJDmzkgimUXc/c3bu8Npfn8/3R1LNSfJ7lBHK7NWJfAwJIfNwf7vrVb79ttvv/zyy2fPnu1v9utH9xIM9/DQ++j1WWXBO5hXzGy1nGYWMK61cFm7utnlqem7KGZQ6TFJR6dNp+qsxMxJSNxQmrZgFSY3SQMLB+mrITNpL5B5W9SC+lQwEVEMW5IAxo5iMNPaeDsO47iBljof5jJ9//L9buTbq3R9nfcbYbFiXkvpurKPT7cvshzUOahXC8Ep9QtZD4tdoKzP3md5w9iKdLET+l/XqfnlxK0vWW9QPOKvK2p3XfzL7Xppsta31YUx+LLGkVqzpt6cnN2TE7Ez1Pw4nUhbJuQU6sDzZrPZbiVJFNsC3L+kDZLYQUlEATBRowU8NQyDeXUmAYmIuWottdbdkFZTz8SJWL1lz80fOa24cAcNwxABRGydy3qkBagIbBrcdwZOqmHROnsRea88Gs4a38zJO/QkWJVShBfhoyii5o414kXBk4NryS5N4YW5DE2GHudGVBiQ6gC3GEN61NubyM7N7Pr6+tmz57XW3/72Nz/++cfXz/a73fYf/+EbEdrvt6erXb0/qNZaZ1k0QGNYCGpLqEBk8OA+EWfvIjjcp+MAsK+jtWYpBWdui+Czr6QZhb65h5rto36OGYgac7Q2yV1U21Q1CDXcESLLRGjGWhUcq+pOPaQz8MgppteZmd3gTJKGLGYmQRC6avUREdFms4HROPKYLVGLFpnVFVQpmSQLbzYDow2Z99sdM6tqYilSLg/w5b4KKvOcc8rj8Xicpmk5w2cSalVlrtp8PX5EpKD1GF8e1/U8Y2GSNFsnYwOaAxJinE8+L5WE5TT2PUbnfDW31pppBEyM3szsCeHFpzNHlk7Reb4cHbyMTrC0B53g5iu8M0YWVfVX//TPz5/s0GYvuh3HN69f7jfbuSnJeD+fpvd3SptaK9GZEg3nn4gWHrHLT++ud6FgTikR9aKMu5vbYtoN0YWDhvist2ZLaI4FOj+dCmcz42OZjqejEjkwdYYnGNjJ1dwggEM4EvvlXpCamiFnDkLReOtAEJVSmHk7XqlTYHI76JIJwDhuD/d31/sdkzv4+vbpk2fv3r5+U2LWzhamWLgvAeLqCSKoigu06BA6wohF44PICaRWrfXugSxmvZmKSLQfsagJiQjMYu5XRNwDV0giMteiDiUmTvG+7i7k03wUuCQC+ZMnt0+fPrm5uaY0fvPlN8fjUdJmmqbIl4iouQWHRFxIa82yIbh2BbXWeZ7dPUkex/Hm5ub6+toMh4fT6XSqtc3zfDz2FZ5rIyKCC3hh0bHoTIc9Xyx5bwHlnA0B6zKAetxBWCNyv0gSwohHQy5lWsA2vN1uHx5OzJwkM3MAOJdRZ7ZegO/WsqGFRVYLzgZfiycivXLxwVHyhUh99eWXf3ZSIqda6/t373735ZfffP3y17/9w1dfv/7m5VtV4WGfhIta0VpoHsetJArMS8dNhfUITWbAbA3Wadlnj7KROCAAems5+KIIHbn3g8cHgcL5ycButwvKeGtqbV7AL5RzZkrPnz8363ctQtjWmkheQ8PFn+owDGvXixeidiJqNseEs6TUqYNMl5bIojjlKpHYsERoDiAqCNZ1j+FQEc5DiEGIe9f72Qjpco1MzI4Q/IvsCeaSxNxZZDvyq1ev3rx586/+7BcfffTR6zfvYC4i8+FwfX1zdzwep3qaSrPVPsPdmYQePy73xroNfEm5W9eiOO+THtstGgOASZ++gVsHRro7Q2K8T+HLjC6IyLTNrbU22TDknLwlZk55DBdgUT0Hu9v9YRoYAnrz+u7v/+FXz59/fHPz5OOkREEsfi7WXBrqXplaGL/iy/TBtov2xfryNUJd7qATB8MRd+JQSur09u6+Nq4NpUGNzc21EblKBZhBrVlrLUlOMqSU5tNkydzdajNvrbOht+o7d6ja0lfpLX1VTTl05wEybSW00ChdrYc9uA9ify6/VHeD8XmjprPG92pt1ksONw2s/+oOM63mgSnrXP3O4iLb/VXyWu61Hk5mRi4sREtLY93wYQB3ux16D/+DI6MAiDn0RSVCCDJGsPIM1cxcmJM2O80zyb626f54Oh6rcyZJ5jCtrZoww73UednMVo7TZrN5+/btN99889Of/nTYDiml+D4RaFHvB9C52bFEL2aPQos1sXEsuXRs9YWkk87lYyKilJaRReaQLIo3MV4UNtgoMSViMbMmKcrGyolkIM5wr00rINmjCRnexRxwcBBVAA4rIAFpWPSUt8ychRmYSvHaMuf9Lh/uX9/Xw3xqp2N6+mSz30tOiZm8PmKnX0/xqtOwhi5husNdxgZbu3OXee+l9eYFXnHxTy3SFlCMUca6cXTb475c+po15lz354d2ZikXrvfKe1HaFhnS+PRustJ9lZwzEzVTqOZEIFTFMO5aazO8ROm0uZws1dPHYsPtbRrGqqQ19AONrI5jhlpmYavGUJ1tpqvtdqYKsnKqzMKU0IgpeaU+MiYyJrfmp9Ps00xFVbbuEv+RaIQ/qhpJR86ZSFQ1SapWHx4eimmPcMlTRhBOEsqYU2tN1ZSIcwJETc1slN6gg2Od52GCEAHNzQWIuNx9aQFGLdY9sPocY8ALzbpfjK0BCHZRsKtreD4nVTcXB0gR3PgxqQI4nB54I3Nmvwa2+OTjj3767MX/9v/+X9LN8OJfff6w8X/5L//l/S1fydZLy7XWqlcfXzGlYkYpVdLW2iYPTMxNRS2x1FpbOZHIOAwp7YJW0cwyswhzMpRG6kmyMzWDqjMSkZCTYwsXJhBKzA5EcNRaI6Y0iKraUnqF8NBpryj4duc2M3POuXkhogCkEsiAxDGO35YuvBKTEBN5KxVA1BqM1LwBGIINWN4mJq+SxuvtcMMYVcd5bqAkaVNn45SSMNo8bunmKkNIhFm9KqRbn46iJkirtZbGInCB55x2rLobNpkkWhVElIYsIja7ERiu1MzdxFMiEUlG7s6JoOQVqmrBguZO3mK+FNrQGpu21sRlyRiraiVGlpQHgp8cxkQ5JcmZqRLAItuBvYoVd60e2qPaSilTLdskKQ0pDYkmtoRKNjt2xoxg6QElGZPCqjYSVoIxkjDMyJSJmEBWATCcmNSjruHsLknevn6zH+j5fmtFi8/zfRMej6fcKr+7v1NKN88/trx9//b3o2xbhKeEiC16lkNIDocnJiN3jw4DCYQsZd6zJ6skfcLYhyFtjjrPpxgZnFs19ZxHgMtUF5bCxFlArkSnQQ4baa3WqaqqrcKsTJF+BOklu3MAjRrN1QzycDwRMRs3VeI0zQ2QnAYeevnSlAOKcjITEWaJhnIiA5qZeTneXm1b1erOMpDLePv5FldHG79/++Yw+YthEEcyT+BW2pBHIzaGE4qWWiYWBLAnOtLOzJDmfYL8OM/MAHEnBBJB1zMsOXPKkWkkODdTEm7qxMKJlWFGKkGRUg3mQsLOVIhImBqkuSDtjkanWcdn1zR+kvLNT178+PXrt+/f3bvRkDILbp7cjOOYhuE0F+bc1JmvptmYx1o1JZ/qQyxsMDeoldMUhUnebrdXN/n6ZrTQA5ymeZ5ba69eu9amqkaViMDs4gXY5O2kaLRVlrk1YlfyPKYJJzPzZFoq80CStDmYyTqvTKLE0lsmiZKlIrwRuiFoa1NtD+BDyg7P1lJOiUXBJ0ncqgzp9kRTym4oksx02mYS1mZ2KMe0BadcKpuCnYQGK40aInwoWswbGMJUCUW9tZbSkDi3ZuzMlGqze39qBlPc359+89uv/u5v//E3//K7u/fHwzSzJMmfWfKipXhzciII7bQRESXZmtl0qoEVF0nkBA1UIvsiAuQckc25b4bFhQEgioT23M2otSxPFzdikug1mzkoJXIib1prKznLZhyvr4ZxAEiNmosJAglA7LjaDH/y8Ytf/sPfltP9ZhTKThvKvlE3R5MkIsmqt1KZXL1EB8SXAdcoh41XKVSj6jwzc5IkLKY4lolSdpKmjSnxkJ3kVFv2pi7kTSSLCIHd0LSlzIlT9iwW9OBN1BhkMsDVrLm3JJRZyL1V5O2WKcdJz5S9SZvnZ+Pt63evTqfTX/7lv/3tF1+A6jBmkVrmt9dX+fDgB1YmJlAtDc55HJ3bkkj0AHHhufG1BrQGZMzsXi9DSSxIipR6gRuduHRRIUu5E5TBGCAIAe6mDQwwJeLBYKp2nCi1lHMGfFADKDFqrWU+hEkprVXi7bhRtV9/+c3Tf/rio89+/uRnW21zHsRwOpb7/Wa72WyOd8ectixXpGhqxJ4TAU2tMklr1YlJWK3XRs1BHdHT9VJlAbWkdONInPYsV5PK4eRT8QaSISM7slmea5lNa/COtBMPg7h7SmxG5hXMsxZLFmC1hkbc2dpqRc4hUt7Mu9iVd0p5atWL1pQSgVUZ2LCwm5OTkIBgrrXVWrW16MgxUee4zOMI5mBmaQ5nAZG51maqDlDsTDOwsKNzveScyYdWW2uNSdK4ycxsc211O+zvp0LjFanNcz3YDB6Z3HHMOSdmM3VXchhra81bS4k5kVJFlD3IjGxrFMKeBhcGgTLTbjtqPQ1uzM4yVhoqjSXn2oAhW6lXdNve3bXWRMr7+wMz02BO5MamuTRtLTqLm7f3h9vb29999c3ul//0f3vx6bjZHk+njz56Nk0TO7xVbbbZbIgcpMMoxOOSUUfly1oz1UacEKPvdhbkdPdEA0KoQJjBwcIYQ/KhnCwiiYOc1t2Nm7fW4D7mIVPmxqYOynBnT4OAg2NMhYBBtjD1qsw8cLD1kbtXtZQyM6lq1caMlMTcT2V6smFiqVNt6sM40Haodb4rJV09cdX76Xi8b5Vvbn07DmlMeY/f1VpBNgwD4LXOAPIgHrk/LOQxALiQMUYTTsmYo2NkC/HMdrv1Tv3gzNSHV0nUukDFZY8hcrzQE3H36ClFdidkHUQds/G1qwhGayclAdgWkp7LKttKWRSnZk1Wl5qyd7nOzXaIClzwVAaJTRaBmRBxH3AFAG+t1HqgFqm8iBAnJmeGMymIe53Vo6BhjnmeMfYB0yVPjeUQIjJ3axphTBQCc+b5B2jasEG90s9O1AKVtxbt1vT3osoFtbNq8GqR158/yPj/Tx7rOyxFMP8/ecllgr5SGsRvVomzeKyt+e3++qTzyefdbqeq2/3+u9cvq9V/92/+/Tsc37x9ezydUkokqK2V2sBbs6hoOdzZkTjgWOQu5kgJQLbqtTVVzZtdfLdejCcOfAKYA69IZxSWAUg5ZgV7Azb6h6uf64t2AcEYx9H9TCyfXUI7ruNIOWYnnBcHmczXfhr5urzR62whtC0iaZFIcjXJQ1rUBTmzC891btbSIAOJElGSMbGI1qrX+w0RBQApeCJEsrunlLR5NNZpmdUmonG7iaO4IrnNjKT3/VeJRVqQ4rK0vv3igbUp5EYUMk4LkFWkfxwxRc4NMrPNMLgH5MpaqVU4pZSIy7KwfPGgZeY6pTRIWkfhg9eHOQkHmXt8XBqE1UosIAV7r0Xr1RAvWTf/Upw307lUVb26uvryi2/fvPxmtx1+8vlnPGy/fP3l6zfv02a/ub4Fyel0MhbOY0hwXDTxeogEnH91ee7cXdXPJ6hDctiIVbWqpehQL+2pgHwRJ2JSr+porc0nW6uqWIJhZlqHiAA3iz4biHA4HN6+ffvjH//4eHxwcZg9eXJTSnnz+mjWcs5Juu5Ip76+QG4AUQjylW4uSh+06DiLyH53VQ6n0/H+1Zs3tzk/u9oNOVmnIrAWCgqwlFKMUQRNAi/S22q1TU5EOWcivuywxteIOl9TpQ4+D2SpjXkAGSdZfifLkzUICtFJcZyF3Hjcbu9ev5uLPfnoxX6/b60dj1MQbI7jdhzHIW+IWxqG+Jx1b1/eLCIqJeyYrjuTSIjs4eFBRJLkmIPNOV9dXQF49vxHx+Px4f798Xic59kuBtlX02oe1Xxl5jSItgUQ7toHipeWS8DYJGrdHaG0HEE6b7bwxAQ4lFRba6Yh9Ky+spkHUZlIksRm2ihsgDnIs0RxwWizG1trtRYnij5M89a0jeOGBQRxMJMDPJX2/v3db7778rvvvvv9777+5pvvvvv+3f39CZTyuHWDkbk3i37u6hqCdRNQs6bN3JyJOYE5YHGrhYnKIz/G713+eblz/Adu1N3g3DyIgnDxJh0xkXMax3FMWUSE2EWcjEN9xc2Mnj9/bmaHw4Gs4+5U9VJ3CQuZBMuH2Nqw9gYPaxymwJfWkzZfTKX4UrSmZRRnefMQG/SUWIYclA/M3Gl1NeYtHS3GKKlT2CZJ3Bmbl1WiQFN3o8r5/fv3L3706c3Nzd3hwd2HYWjqA/NmsxmG01S0i3FdQNougwq/mPC5XPO1q/bBDYpnhmfxi3ZQ37fLuWPyBbQC78PqK7LprMI1TdNqvlYUjGq9/FB3mk7lyy+//uu//ts/ffIXt7dXMhBMr/I1SI/HY1ybo7oFVKdD5ACs42RECwXKRQuCmWNzlBWQlTZztXmqzQ9Fh7mRIYO51ig/NnMl4pD3NH0E9QJWBPGF5bnYz0QUznq1xkQUyEM+Qxkj5mYsUB1a4vI1pFknjz44MvG0pdnVf7OarCCwcHciW9+h1przsNls3GhuGhMf47B9+f3XXuftkIfrqzI/1MkAZmHxnCglsMEcQmQOScKbYQS5N7XG7sog5gxCSs4pufeiLZOHzNtmdwVhUNaYj2UhpRgriJBM5BB4uhiMTymbWfD3rA0rIhqGIczy999//8UXX/zrf/2vr6+vj8fJXaP5jkVDmIiSnJlLcOEovfepbN3e6+H1BZ24Pn+9L+vxWTWi/aK9drENzjfoMpboh25JpS6/ki3CD+vWWm/9irRq2rwSND7d4+BnIa9TafX+3ibhcZNTbillZoTUk0gOTtmIeQFklta/YpgiN7O6tpp6EMgaoc9FaMHMBD0H0z94rFbl7ALco60V67CeEV/JPpZu4cVtonURLs/R6iXXd+veENbHNHMfumjuxoQVExJ3nJnNvNY6mdX7h1Op2+020BpCQiwk7CKmVi108BjCDfBaVzK31po1TQvft8Nc1ayZekz5EwXjbL8qXSaD4xHEUEQh4WKLwT0Pv12cb77c9LQgPJkfPfmHZvqD+4HHpj/Q8Y+M0YJl+uH70JKdX94AXgUbl3BK1YUZimEYqtVXb17+xb/92X/8v/yn14d3//Wf/urvfvur7dXu9vbp93/363J/+vT2o3pq89QMYgyGBvcLmiVGgxG7kBCJulW1WhtnXdfBzBQao8kdnNCnv3tKFlij1XquvufSZPcLiXE4d6HUjUIo/LAqoqTcpQUUvpQomJNwjXYZzIyJmMEUMnNsZtZ0TXtiAROlMW8SnMiJlQRK7VCOr+7f+uZWNhutXtrMICZq6vf390RUaz0cTq21lFLO/Y5EQli1cRIxa62VMlljVS2lTKV0mdQmXM8lE+1aq0uhBYEH88s7u3ro5dSd/UdKzMxJJCVOxA51NdfqkSQ7YcHOQY0G+mDCeD3wstBqrTaRYBHoRCzFnMwJHl5HmE2oS3StzOYABLTQ+jOiAk3m7sKZs202ux999uNvv/zdPEOotGa3N6OZEcOsmet2HDdDUhmKmses6Rl31IMz4Dw6RhfohWZaFe6ERdrLmUACphgNdzCnHOP7SInMPCRtCGAydTdMdVpN27pQ/IMZ7vWvb9++/e677/78z/9chFS1zlPO4zzPzKyq48gppbXc21oLGPzZZHd1h+buvk4Zu4ejHYZh++IjSfzwRuzhfmp1bircarOU4UxuKLWxICVhQQRqCLmhEFZeEGIXsYsTnd3YmCl6bsKcc2YBBa5fADC5wXqP1s8yYnAnjpFiYgKB6N3de0np+ZMnT559dPvk6fXTZ5zT4c3kTgERZGZ2WctY1vkPPrSTl1GadThfd/nuvTwQxaDYrnkYc5b9blNKmed5mo5Wm7tL6rPsgGMxjDlnv4gqLu8jEQJp3iNRePd65+13oRBHJCKrsO3ihru9JXLVFX1tDvMARZibO7kQO3ccvN0fD7E3NuPoTHMttbJjU4rU4qW0h4fD/d3h4eH49ddff/HF77/47vv74+l0nM3IkYIFLwZcoe5oZtYQsQWHool3LpwWkU2Psy+CGyyH6IMQgRYazw/W6vKHxYAggidaU+fzcxxASmmz2Wy329XOMBM4sYOaOZA4/fSnP22tvXv3TkRyTswswroUOq3jRRszJ+pFFiLykBFFYNSxXuOS4AWJBS+/fCSgSkRRI5R+B9WdUxpTSilL7LL45PVKzR1MiVmoqxnH2BWCd9fZDAyLMctIML7++uuf/OxPPv3003e/+nUpdRxHn6s7j+M4juNUjmZtqZJoJxJbAoBLg7MGwZf3bi0zXd6US/O1WvgfnLXLZ55DQGAhunR391rVDKo+jmNKTEvtn5nJGdaaaWIxtW+++47+9m8/3Z7+w3/8y8+Gp7Upszqaljrk0YM9wh1OASojIjfiQbgxo4tdrfGAaTTZxNzUoNYxunPhufhUtFlTSPNkDMCKNutT98bM0XiIyoItA3vrul1aG7lgBBCRUopfJISXL4nftNaE0+oWsUSerTXz3iRY7w6RLw1efHBHeh3sfEOxAgLNaaUkEJGg+wrGtzggp4f7f/nNr8jr05vdx8+ut7vrIXGrcznN10RQCgwWAQJiAeAJyU3dev8/cYrvyaKE5NQnZoUoMVISYVG4uzJnjnKzkXubprAt0uNMWkGgpq2nW5fbjzm11ojs++9f/f3f//LJkye/+MUvaq1ETBQFmkcByUVj47zJvZOBPVIcvQyNfuhBLm+6XihvrYNt/Znk7mfKJaIot58nMmTRObs8SmtOGM6IlkyJmedaM6GZNjNoNfVmygybTYLIjQVOpfX8IqfTzc3NkHJTg7bNZmChUqYg4Y8ydPZo25l7V75dl9d78TfVWv3xASciAjVtH6yVX6CyV+cVLzGLEm73vGtJFgvZ+OoX1reKmFbk3D9b745fqNuvMW16/vSpu4Y/iATa1Zh53AxRaTg9HKZpCodVijBkmqZpnkHk2GT0qSgBE7i5Ta3CbOBBODLMiTmqOAvzl0b0kLhLi7CSRitZoR1/GdWa1lzNxXp2ulhIax4RiNbeGF22LAPGQam5eNZuvs/hzR95rDvmh491TjJITsI4qxqz/NHxDJb1hWd2HF5EZtJCj73evDa3/ZP96Ti/evXqT/7s8xl1uNm+vX/1//xf/l8H1Aq1eRoSX90+a8d33759/5Rv07GS5DwmOAFKcArt4l48EKAPobFkV5OUOOc4D6paI+W2JWQnTyGy4PCLas2aeJiZ2znY6pcZC9urIn04B0LELEZGaK0hlO5BFV07gXpZJSg9o4ezVhz7hzJTFiZGBN/jMAySGG1IJMJO2kBN2kO1tJk5D+o2n2ZTEozjZowD2XM5XahrIhCxs0ynurmBtR0fJgC6OhXqxd3lWy1ZvS6+4WIXXR77cIeJhZlp7Vu4p0RJJOecswyS4KqlNqVWChMlQhISkUQJSylutQXrB5mZN/WmWNy2W+t1EwTJYB/7hipI4H0oeZmBiC6+gCyGkagfWwaC9YtqrZlxOBzMjCTfPNnvhnw4HJ/c0rgdbnB9KtZaYxgRYI083gEC0jO7gGCVmz831QlCQTTSrA/jOsHABjeAJXMeQNwCmA1q5szS3ObamgYLNHfaz97R9BUET73QBfTeI8W4fr93qnd3d+5+c3Nzd3dnIjFrFDTN0dRyPwb9TWst5xHAqqm49ihU1ZNHdBIvH4Zhu90KfHdz49om1XY83E1HYDduhkbESUiBaPss+yqFfB5MtVMSBct5MGatsWag1s0siwR9SMiTCTFxbJhgM+0cWMHlRvBlftRCHcbhSuxOc9XNfn99c0PCs7U9cXOUqkS03++b+jzPp3muVWutIEGkCn1culvC1YsvdiAITtTdc86BRVxLvLENJDURGcbx6urKrAWU1LUum3whKiDKiYNVwR1L4UwMZBz2p5M5mTfve898IaF5bLGXjoH1CTpfWCjCAi2HBaquauxqcMliRIAyc4IIWL252Xa/adWcqbmXyR4OpRnlnN7fHb/99rvf/+7LL7/89s3rd/M8Hw7HN2/e2EiqDuKcx8RjJZTS5jJLTkGCESloVMKIqOn54UuOtwa4l0bgA7Pj5w6JrX9e2u1l667dHvLOahvqwQ09jVdipCSbzWa33zBzHDN2SKj4CDnR9dX1ixcvfvfFb9++fcvM45hTT7Ta+olxaoIsYE2WwhfDfGXCwuOoSIQpUasWX5Wi4AE1N19aPUYQYSZKvQRGHKVEDrpeJk6RQZWuXRa0tv1TDE7mQIhNetBvBS9PrfXNmzf394c//dM//cPX39wdDvvrbVOvRaPXLaBiFnxjfiE6soaztpBtrC5yvWuXTmT9pS/FpvUmrkbezCQHNZeRn6EoZnEf1+jw/La8aNW6e0QXka6bNWKuSl4aEjFomsp3L1//t7+anr148dGLj1LetaLMvtvkYRim4xz85AQmOIyNzJ1i5fp39qDddqhZbdWdjR3cnI2EIA30/kGbmnl2zuDMlJqh1la1BsgT6Oo8ocYUO3+NkdbEYN3J65LGla4B91KNwgdrq1GRfryqnSKOz3KRS4aAFatFj/P56HfRUuloTdeEMCqqThrZkZmbqimm2l69evX73//+5bffnaZ71Ho8jK71+ZN9SmPilIfNprWIS6XfRGcQCTUtZEJkLKP0kpC7O3h2SW4soKDOS+TMFPV0F4aQUQixllJ0lS3c7XZEFC1BVZ3nWVvnzI6tF0AMAEmGYcillC+//PKf//nX19e3t7e3K4wwbHvOycxqVfMV3fahLbq0TuuSCj8qiKxnZzVxl0bbHhfZQX7GFdKZFW+pPZmZyYU+04fR6fL7NWsVEbLOTBBfblE/MCGeSiFVIYzbUZhaLa02+ExSr5AcTJSNgqiS4R5qBL3S5g4zwMoCdlivLvakLvp0l/aZwB0v/ljq4/ISLg1mRFOXdp4vRtmXledL01RKXVLH3iS73O3r09bPSp9++iIO5JgHFjKzRJyztNZ225GIDps8T8f1dM2VHh4eTqcTM6ec0YcPzUrHXDmxuVd1WlQiiCiMNUmMlHk1FRFJIA9UXiCzHWDoo4TkcgU/2HDLVlw3BK8WkxZ6MT4TsC4dyD+G+fzA717a9NWr0cUNji9gi8O5fHmfeIx9v2D7OlUREGgqAKWUMOJirM2zpNPp9PNf/Pw//k//bsb8v/7X/+3d9JCf7Ma8nY7zw3QamDEO94c6q0nxrUvOo3SZQCNv6kreyGHeY8ecx1Gk1ClgHcJAlpSStFbUYvRymb8Hen8BIUidmBufT2k3vpe1jfMadkZmIqGFFcbdzRqpQwSAkFPXDDMBuIsURZsLiLiSkJhJaFg468iRcx7TSAA78iAyknEzprQfrj5OM1OhGZwGToNyTuOQd/lKANSqKR0XTZjuZmixcbTUVIZhiOgniJY0CrHCIjLVMvBAzNxaBGtE58xtrazE2TGz3W43SBrzICJkHvUlM2Oy6Hlu8pgSE0yJU6PiC6GC8RpCwbvCeLcJi8PA4ji74fNzAUmbtWYWzRAjAqIjREQcZDEORvCAQ0DV+ilAdHgRLe6AALT37+5LacMwXF/f3ux3tUzb7Xh9fW3+wAOu97uAaBmMqTcbzKn7tvNpcJzvLYxcOi8+cZC1CsNjEDwZu4yUN1tjmVozV/fuaJvpXFXVHByFFtNHyMAfntxLxxA/77ZX8zx//fXXn3766el0inrpdrsNAXFgQa4CIWaT8xjne0Vvxmf1emEeOrcKeBhSzvnrr7+eDg/l/o5qzUxTU6lz3mynaR4JoSqoaqqdJjTEhS/d1Wq10kpNRimAye7kAXujcwWdiHKwOiHIJyhKyu7EzM1rfAxczE2JzLy6S07NkDbbm2fPr5884zzcH053x2PO42azubu/v7u7f/Pu7eFwaM3ykNZrB6I9Yw1go5h4WuemVvuAC7ZbYcGi9p6ZzazUybwxSET2+y3znoHWqqqKSNMyz7O6HY8nZo45SQDOzhDm6P2B/n98/VmzLMmRJojpYmbuEXGWu+SGTABZBaBQ1Wj0kM2RaU6PjAgfKEKKzF/lEx9ahBQZTld3s5tV3VN7AYWtcs+733u2iPDFTFX5oOYWfk6iKgRIOTeOnwh3MzVdP/3UULVkASozhAA1ckYH0zUBaLlhVRVx4IGK1GQQUQQKht5RW0d9otlyajyWYBAAUXEoMwBDmDK+fnvz+RffPHvxdhzKcZifP3/98tWbw+E4jRkRU4yxfyxhRLK56DCJ6kGUTBGISCtWohL6A4gZmPlkLRd4N5fNfVltwckSfdd4rS/DlYr+vZetlEulv1OThLzp+92233Q9q0fNLQsgiEBEH374QYzx9evX+/1+t6l0LyKyS2mxswpAqCc4tJmpFTAkQMUq2zGxKS7PWFSBCBADsSuQWidsTxFC7TZwUmhEZEZgQrKTASE0BYetQEP4Y/sYwBYW15wCMBDUIUxRFV68ePHTP/7jjz/++Pa3v12yt5pS6vuUUig+GhXVFHmpeDTvojqsK55VWPLRD7TTd7dsvbn+CeSTWqxmttqmE6Hdyxr4DTC0QplankvJwimm4GaXEVWLCJixWc7H4/GbV/Nf/s0vH7/3+I9+/H0Ms2YFojzNkbiWgrypwVWIedbJPAh0Bk8UNTUCdASvEgpGAxYFETzMgJCQEmJCTgBsMpdS1DxuklKKlhkMGZgoCObmbq21ui0hn79pSxTt89Ob2kHElj9qf/hAFWhD+7dmpVX5pcEZ/JWztLi0bbT/+HA3jcBIVVLqzeztu+vPv/r6q6++ur66yWVOTIHDnPX125thOG438eJse3lxTmNZ6IP9YZ1OF7vNmRa/VWn89YgYoqM2BMliCIwZQRiw6xMiCnIGnEopxdSACC4vH7up2u12tzf7Fy9eTNNUisiJ3IWYCIzcM8k5d11XizfFPvvHL/pu+/N/9bNHjx41zsUG8TNTwBPacy3A7Z8tWlt+u5DN+Lnwc4h1JqR740QISFbUVD2ADCFwoHXFbJEEMznh1UUEQBuU16/0eKwC2peSY7MIjAYi6l0fjGLqHWths/GeQ1PJWRQh52yqaHQ3ZOS02/QhWJHiVSsnv2gOcpOWVchzL1r+bv5CVU9zG5tMLZXPVjdaB8lm5vD15jm0Ou1qBWxREafVaLporXBwocZpesbXYVNqYcKpvbhLMaV0d3udMxFaROCuY+YYAoAeMvd9P45jKbMukOtcLOdc53EFEjQDdfIDOk2fBET12V61NUtQTcdpLnNGj7/BPJfzACCHp25IlwxA/2giq3BTXMMrAI2QcBmKYmYG2rjXmln9Z/T1aeG8knJ/HZnZCNeF7PZfF9NWEvStTSktGakFuini2ReC7ubd9dkH548ePQohbM53f/pn/+XtdLN9//xqPExz2W43Sbrp9rqIbfvN3bUIzpusZxARi5tFUSM0Js9WqBmZOr8qpxCX6re4NSVKVIoFLAqliBQsYEamS9HJFzwAqWk5SXUrNz0siJ/0uBTHgAAAgQKwSSEiRk+pGIIxnnIhhECEDGhEBAiskTiEAGqqyowxcoCAIMyUInKkzIiJonXv//DD18dxHmYw6vvUlxQlmlHOAgCtPFi5pGsFgzwILyXbgiVw+Z/nOcvilmGlGG17mlc9P7PMUPHMFennr5RSH9Om65kZRMtSolSZAYAWyJZ5qiIExq3kOU+jy4uq1pHyi6vhPzBzCLa0IwMjRA4ckAFRhcxbuuvUhsDewMPZIHGIxKrZTwEZOD4HCcAMgT0bX+evG3Bg95AuLx598MFHv3v3tnTKHIkhRh7HY9qeXT46nxU5YDSanTYNvPDQJn1jm/DmEuGR4anoEwMSAJKgWSCOoaeIiLGLSphzEc1m6gVeZEIgRXJgtohlKYHjMlkUCBkXJUvUMkSntKKZjTiZ2RdffNHS52bmeKIQgpv5FDs/lcxRlgYGcPC91qNdshYuqorAAGJm81xub/e/+vxzmcZY8vsX508eP7F5PO7309s3KfWFKFhQM0UoKowAALPMzIyBUdFUPerwkqMBoZPU46k4GTh4c2lT3KiiPsxVnWsCEdEWtj0C9HxY7aE0UMACME6571gMvn3+/HKSH/7oEonvjocYI1MQ0WE4vHj+6nAYtB5tpwUystpYrg5mlcrzvMqsE7r5XPVGNl+5yOwKwRtaGGGBUnMIXEqhTccBj8djdRYFmpX1FmcARY/f0DupRBVFcq1Q14nm98rp6wo/IhIGR+FW+RTv7PRdprpilTOwfo4sCZ1hnPu+z0av3lz/9d/8w1/99S+/+fbVOKtpOBxH5rDbXvQ7cyoYMNjf3VDgwDHEYOZjhEGBiqkhAHknGHlLr5m5V40rFElTrQ/st78qPf2igZvu5dUA4tVBAFwmWwLQAr5xR8EFyAi567rNZtN1XQghAJgUt6eqCqrMHEP8wQ9+MAzH6+vrxBR8TJGoiVL03IG6lpM1hA/E5yeufQ63xX6ntVTCzBxr5Uc93FpVoQlFCykABFygP2YCHJYkugf8Jx6Lmp8gYkJmDByI28RGD/0BEYGYzLrUEdHz589/+sf/4oc//OFnX345jqMohBBihE3Xb7dbEZsnVfFRg+nBCj/4odrKlbv8wM1oN7nevhYNKsiD65dPILD6sK1KCQAmQJVn1UopWUuZVGbq+55S6IiNCxFpkSxilq8P9l//6hex7/o+ffjeOVssRVBKnzpXekC4zKsEgIrsaLdBBqZGBhQCAIuxWhSI2XAqkEWBtwCkEMzIihn6VBn1qZOeO865qCCDEpng5Ad/HdG19VGfX7Ja7RbRNZfGU3rrckeFKDu8s1EhAHA4FSE9qMBV1Qaqe+aZixrRnLQuoqNImpvub6bUl1Kur28/+/zz3/72H1+9eetOxTDnTYpmwkw73mLcjBnL7fGDzYXrSR/eE5bWd2bO85inKecsSwtDjHEXOKVkIGglMZgiysSEXfLGQlTDFFgohL6DkC63n5RSPN1pis+ePfPQyUmMHOVhSsywpFeiJ0kd8HJzc/frX/+667of/ehHT58+3Ww2TNAmZ4bQle90D62FfP3PtR5bW4Smr2Tp6jwd9mV/uaLB7xVmPHh2z9kLKqoKoHV+2CoXg4gtdbIy6Is9wkAUbCGJcJ9wnmfuExFLlv1xJFBGSqkz6w6jIZfY9RSDFgGBQEAhGGT0RIkzriL4rsGKfbRKJtcpprgKvQQMTCMvaZfFeC2iZT7loIFomldzknNPzi9FSFthTRsULoTYFI6tAp95ntc72IQ8jMUAGAhU6+RLIxaRi0eXWkTK7LhlBFUtIgJoqSPAMA1lmjKYEJqo5JynaXCkIgColhACEZhJDF2NlYqZGaEFw2HKMTGqs0WLj8NRRz9pG1pRn0RqT6rXsqhh6FUd+larBmbLOJjvCCXictlJvL6rcwFWNri91pF9VQ0LTqmtY11TrLTy5L31RM01CSG4BLsi8wQGEYWQRNWKlTn/42efvR5e/M1nf/Xkk0dXwy2kQMjHaWIM5++9dyzx1dcvvzefFYCzac65gAihBQaU0uo9iBhCEICsKmUmFjRD82gYFiwCew7Slv5xIDMkxUpr2zRyQAIkWckNuIb1HTUDU0BDAu+ORTNCMMBlfkulWo3oHDbGPkJ9Af9F59M3ISJUZOYU6wCxyJwiE8AmdSFKjGQkGIIFyCibp2cXF50ex+F2KGM2C2CkBQ63twAwz/M4jmaWUu8WJYRgQMM0znkmIlIR06LCSKo1AvG6BM8MTGamrbdnKduZmdMoA4DzQ3KFg0ZeWEaY2RagGgBIUTAs6PNbndLFENRWScpSiuSiUU0D3i9w4RIYMBIjAHOIFGlB9lfZYwAKSEYckMwMRcgUDVgBEcgADMjbFrRWxBAroI4QwSDPM0FtHN3tzsdxvjyHTb87Ho9Pnjx59vzl8Xic57FAjEzjcUYmMPAYxpzpHdGMyPlrEMGRaVWNAQZCJkVQAy1ZvUuFQgidqhSDUkoutT27mM0izMTM3qpogIKmcHKF1wdwrenW7wPAMAwppVev37qRMzPkmPOcUmIkAxERjgEA5jyllFQMuW0BNg5dRPXENHFUhZzz9fXtb37zm9dv3nYpnqfAm83u8SXm7bXIdBwi06xSig9eATFANCJcpsUtgAUmwhAAmGPVM7XGUhv6kQEMBY0MmJkdcGrihTsz87aT08BH8sowATIiI6BPUihgMfWvr67/85//Nwz9/+3//r/8/F//a0MyUTEtRYdxfvfuehxH8Gl+y2xqRXM0+aIMPWjx/atL7dbdFryoh9a+HVM++gVdjABa1MS0lDIzdiGaWYwxdR14n17fHw+jN3iUPFoVcZ+OLeCtfeitqiQ+PEWdfsN91dM5bf/1qAPRTQOqIQGa+vwGVPW2DKUAqKbgnapFhcyAKOwuHu/vjp9/+eXf/t0v//Zvf/vsxVvA1G8up0mASREPWUopqoUZEYBTj4himGeVarfIUIgCAvo4IEQTU6cHIAi4IjFfC/MDO/XAZj245p/5b9umBZXhHgma1GJv3/d9nyKxSeGQlMjACNQZ5FNKm65/8uTJN19+td/fnp2dIRTP6y0ztxQAeJWFMbMQiQR1mXPYKgZ1SRYmG0QkCkR18LihwTIgql5vBcAR5u7lSAiBOCGiohFam4FWtztEVAcVmGM9mJkYGumf6z0jJKQQQkp9znl/d3z27Nn3P/nBxcXF67dXIXYq6t+16fppyjkPJtoi6gf70vyt9TbZKk/fNgtP0cXa04Wm6uU0juLeqz6hAjEhUFtsAAJFvYfrtmIlRlNVdoJFJPPhEWpDgSGPf/er311env+bf/0n3//wcYrGIaEUUzUU508kA1WsToU5aR3SYnEIUYEQAlAqGsYCQ4FJrCgCBqIghrmI2QRENRpEcUydt+2BgRYoORec3Ph2Xdd8j7akLgMtNmhuSaulu3hjTectOD2rjrW7WO0TFlVw8tl8+vn6sPg7bYub29NqLMwRgHw8QAih5Pnly5eff/7lV19/e3t7619RSgHAYlBy6bru8tF773/wdB6H/f72xkhEyGALfMYdh/pdh7tDmad5nr2jgRE3G9rF/hwDU+IACCWRgQCR9JEigKgCwSZSpBSxzxiB+3l3RkTDcRrHEQBENITAFN2miEgpKkv4gcgp+RgDbNnSd++u/+Effn04DB9//PGHH76/3W77vq9ruwTJquoBUBNUESG611i0lvzvFs2awNuqERHAe1XvhYiL3si+yx7utoAwcrVG7UyJGSxb73vRnKhSipEkY0RTVfcSPRs7z3MgNpCSC6ohWCQgNrAgUtRyiDMiRgxAonXk8un2zMzZGdaPU3+FsPhpC6aAEBXZzHu+1iqlaYOF2QgfGAVbajDLm3UZV72dtF7kdj+ymoRhdmKCbevm/w2v3l05txi6u65Q5llzjiHMw1Elb/q02+0CRRExKGUYiAikzNMwDgNWqi4b8jgMuRToOvAZd13XBaYQCAMzUVGyImYGjEicRYMRhRjSBqiAkRmL5pAiCoUQHJJmZmogasxsIIBs6H1TWsRyKcQJACrwi8xZod1lATVdgU8qn8/Kqj4wsb/3td6JKrVMhCc0SP0tVgK4GCunZd/3ALCeFwn3oYbLfeLTp++/unn+8uX88U8/evbqxYcff++Ix+2ji/10FNXNbhe1m27LUbNs4zCAgc1m2RRVO4JABEhFxOevUYzEAQHKOJdcTAVWDdaIKObmFcHjbkRgWhARgYhLKbNkrfz7pKpkEImXyewPFMEM6qwfwZaTXI+iiKqhGlMFC5uPBFhyRd5RTQBmlJgExElrPfhhJmYkob7vu1SYy4yFOBjCIU8aYPfoAnI323Q43mHWRD2F2MdeljEbIgYwu9I5jkOrEPpDDcNxmkYTQ0Sgk7aapSBijNFBCaW1OXlJnYOtmnGbskspnY5frchKO4euvAgwEBABE43HI3logGqGnqm61xnsZE6LHCLWdXP0oEerImXTn6vO7KkScDSplDmbeX1XUI3BgMBp98rCWdy20EwYIYQYkVX1eDzGGM/Pz3/4w08fP3509fb5T//4XxyH6bMvv5Jcuk1/eb67O8zg8wPNDAgNjAjsxIYCAIBa9SMYAhihIqihaAZRBQNGJZzF2yZ9PUtIkQhmNQiROCp5HhEQgYhjPE3IXY6Pr4Q3l5sZIKITujqtdaK02WyOx+OXX33z8ccfq6azs7NJCmICgHnK86wxsoJJWcwkeBB3ckfsNMeyiv005devX3/22ReaYur6kEJRyQa7bX/53ntQZJ5nySWrBgQOwTSLnZwbMXDPL3KIMVrqrSzUarRos8Vl8bbeFJiIYgygBQFDYFeNBmROkVFLhazg7VWRiMUIFEDJWDH2geM068vn3/zNL3959ujpXHIX2LKnqHQ/HPN8gtksxxwVjc07OlalvzY+B9GtgNXmWxYxRG1FDz8UcylBoX4C4TRk7RSkAPQ55yJydn5OzKnbzHMuRaYsy6grQzIVQarMFl75NFAv43oFuu7NIoEOr1MTlVZPQCmWUo80hpD4PpmEx4KO6kQzFUMIzPjy7f4ff/f5X/zl3/z6t1/e7SeK2yJ0dXsgTILEzAIqpBQI0MZpImIENAMFNQwOR3X0hSfjkAhQQWp+Oiw0Y2bmWgtPiNyHsQesePMeGPL1ZWsVbUuBrspyy6DnYqZIwIydtzejlSKAAR3YRR7qY9fF7XYbQnj9+vUwDH3fz9PBQMjhSIDO/OC0nvUOUYlCu6NFksUMHYdWjam6a5LdmamzRKhWFR0v4fo2hMCM3p/MAVNKZgoOBDLvcK9UK5FYGzCtFR+c3aYt2mpNYozjPAPAy5evfvon/+LR5ZM3765T6vfDrVT2I4ocCBDUaEkEn7T0d7appY/XL1vFeLjqiHvgHyMuhF+rssmyegqry/zbVGpX80Lrhug93QDDPHktMRBjMLFqqu6G6fHFxevXd3/253+1TfGs/3m47IJpAHDmIyLPu/u0vVp2ptqRW916AhxnAyalkA3HgsOMs6IhiQkzIWJphQ5Uw4JoBsWgMAHFGBAmEdEiUHGwjV7hvvSuH9n8XPt5aUkof78sPPteIbQlmet0Weszggtlt5mVYiehBUBE5xtr7zS11kBDXMkXKz7o5Yu3L168ePbs2fX1NQB2XefpQkRj4WnOpdy8ub49f/QodtuLEL98/nqeZ1Xt+77rOgAt09zyaOApETMi2gmcQfjgDEqCAIEZAwlhZJY+YqQiYEYMKZaQJOOU8zxlTbrd9iGIiIzj5KW/BRznd25zziIncfWkDzOLKCKJyMuXL6+vr9+9e3d9/f2PPvroww8/9A7weZ43Z5vF87lHpCcizPpd4W8VEVi50HqfRqg5S7C45w/8csQlgFHTXGTOMmdx7IlEKVb4hC4uWQFq66NzT9pCIV5K2ZxHt8IKmmdxjqIFLmTTlKWUxAGQxpynUjYBS4FccoxzioE3FDECmGpmPAknETkjQ3P4XUqBTgy3p2hwMZcIpEXuP+m9ovRaCfifi95rL2zL6O1RzHzqkEJExJxrr7jex6zW/r5FFZ/ef/HmXYyxCxHR+q67PDuTMr958yowas5g8vTR4xA77BMQ9buddwZHTrvNUwCY59nneneRu8g5Zyffe/v25nY4nu82KW299GJmznnbhYgcKUTiaKC5aM5GiKoyTkUAQ0gOmzaTaZoRzRkgNpudiOQsMXSqOsps5tVahKUxIzBnMxFtdRVc3P0qfwv+fK1z2x7o0iAOLV8L94vXvI7OaygIAEzkKRZceKsauQIv1L0tY+Eawb+CQzjcHs6252PeH/b7tI1X+2t+EscycgzMAbMVzRQQdz3OudxRCfz2cLi83KU+EtGu727HkQBDCKhQzEwUXWNmVS0EFZegCKbkUWvJ6rwtRMoKDBiQJJoUc1xhjSEJzRgguaeKiGpWnBlhIdV1XTnPI/hkHg66DEIJTERkanOeluqxqWoK5JNJJReOfLbZlFJiqC4tkMbEXYjMRJwCAoHN0xB3qZi9fPlyBEyXZzc4Q6c/+tkP/urtX0tJSHC4G0IPMXaPHvXb7Zkz3QMABSZvB0f0kI+IKDAg8gI1rEX8RR+VpdJLp8HxoKpGy4PjCQDsWb1R1ERTSng/SxRC6LuOCMkpxaRIKSEERpjHCdG22y0zmllKabfb3dzempljIGtMagawpaUJntCymORMgMyYLE5xosEkz8rMHPvEOUPHpEXRLLInjYTJu04NzMAW34UQVcs0YcCY6Or6HapcXFz8wR/8wXA4OK35D37wA+AQ+v6bZ68O+7s+YVYroCBgUJBDpKAIIuKFQzdqgMIcA1EEujg7z6UgWc55KhOQKYKojsX1OAGDCQxTNhAiQqKx1P4lQy0i7r1JPjHurA9mC87XOhoRQWEYphDIzL74/Kt/+fN/EWM8HiEyE6XdbjcO9RnFNIQ4DnsXVC+lNz9js9n6Lg/jfH5+fnV184u//2UMAbtUSoEuQggUIgQueIxdTCGO4xgBtczFNBCXedLAbCilDgJyYfNwFwN3ISLiPM8mJ79HFYpKF1MtuYtyYFrmFxs4ntZjbkRACV0pRQtyJINQAIphAXz8/kcffvLJN89fXe8PFOI4jq/fvkmptxSD2G57/vLVu9ub/X5/SH03l6LqozJcwouokhEHIGq37WDXddhc5xPiwmetqhRCQ+8gIpJF50ZA2O/3McYyHGXOAHB9fR1CAOC+7z/55JP3PpD9fn9zu7+7O0zT2HU9euNALkSYQkBAEQmEAJpSD2Vwwti1+0hEiKCqUpSZt9utx42+lYlDjB0IAJBIcX7sru+1JIQQiW9u3/2//v2f//pXv3327KVZKsrTXNTYiIsJAIhlMEWSYgqgmCwPwhQNQcRUhSwwE4XAbtHJSim5TCKCRCFF0hoKrs1QWYjUcUmu2ao3ci3na+/hnwhLWsSirXPbLyilIFgXY9/3MUZEQDRCQ8Tbu33fd0QwSz4Y/B/+1f9RJF/fvAshgGYmBHHE+D3fvR46MlACrU/BC0UzVTgxuvlDREMVKWC0cNu2e6ZmWQzB3TU0H0YfYgwAZiAElXEI65AnVi15mgGA2AnMEBUVFQGYWQXQLMZo3lgqGZHB8Ozs/Ouvv45dd3V19S9/9rOvvn02TZMViTGKWInx7Gw7juM4joFTLqW5j7YQ161/hnu4r8qf0fyN5raKnGA3zQCZWYidZw+dn0xXM8SsNjKL3UtSZ0REPmlCLwfNeRyLEFEK0SwmjhSDmfWb82Ge99OQ5+lv/v53F9vtn/z4B+8/2hQopnq22R6nkZA3u7N8HMkgECtlMwNDAZWiAqiGGPusPGY9FhoLFgjGbEAu9gCgUEt1hkW1MJlBBjBEAhJADoGYu0nyMAwu/81RdkfC/YrWYbs+IMuO1wyUB3K4ZFLyXNoHVjS+Hw2pmHaPomOMFfW28O01YV6/iMg/IefcYF8xxmmaXr58+Ytf/MPLly+vrq4AGRHHcTRDbxEysxg7M3n9+m2M8enTxymls4snX3755X6/9wwyAATmUmbndKxRBAITD7Pl2+HNPLxPvN1uExvp1EXrCAPOViyFYETFLBBtt30+ymE8XL15s91uHz9+nHO5vr4mIqCw3W6nKbch3tsldBnHcbs59yExXqddFk1Ehq+++ur6+vrNmzd3d3c/+MEPHj165PhYV55dt/HC2jiO7USs9dham/mGtiJkUxcN9+vy7/UrX2T/FlUNgSpTaC7jNM3zLJqdWbuUImC0+N7tcCFiSqmdFD9HDoUIgVVhnov63aqB2XA4MrOzChcfTVEbaMWKp1v1MMybPmz7LcWIJpInUHHq4lNOEQCW9gpZMQOrQt/3VttD1+CCE6zggcJvPmd7qHVs4uvWllpVu65b6kwPI0mPjdc5x/YVuAKnAAAzB8B4HPJeB0a6uMR+I6XoMBbTAiocMBsIhmwkcx7zcNalhrsFwq6LzLvNpnv06FFKyQPuEMJ2uz0ej5vNRj1BpOojnkRtmovhEBJnMSLk2HnSX0QMOeeiJl4bzEWLCjMbUkiROKIj44jmOQNRiJ2TZLRjrGKBAizgt2ogxU6IyvsJpwev9VL6Cvr4hLp/C//ESaZ9ABxzy+n6jaxNgl/sB+a7hqHWT0QAbH+zv3h8aYnnkjGRqlgWnQDECBi2FGk7XA2Hcd/tnoxasvFZSOM4M6ecJ2YAJlDzagsTdJGzcoOTMSCST5qAwG4xgSAUdCiIkkKx3LRt1aGAIYR1ZjouiUNFWPWMLV+jYmL+CT7VV1BxwcoHOhG6EpgRMCCgMoFH7kQYEAMzM4XADBHARIt325RSjof5qkz95c4Y+vNus0mf/vSTb//2xd14e7l5TzVP0+QnZ7fb9X0/z/NcasHTzMACrGRAEXWZDOZHgpipoj6cqfyehLRzWKml1pAhqp4xITSyZ6lUTJm8fxq9DaNIzkKkFbRrZmQLAmGZ5NaFFF0nOsNvSSlGrmhDAyIK5NztyggxEIrPORQAEJQAyibOrdpSRCGEE7ywQmEV0bpNsjJfnO20yDbx2dnZF198cfX27ZyPv/rVr/aHQZEuuv6Tjz8e5vzFl9/kSULAGIMiqiGQkaGzhrAn+QmRAjNHThEJicxw9kYcBFEwp0dlUkc+gG8/IgQkp60H79azFcjeOzx1NRySvtMwvT6h0zCklBD5eDy+u77a7/ePHz+OMZZ5csPjEBQ3xiEEHzTUEpa17GZ2PB7NkNmKmCrc3d1VK2CIiJvN5vz8vNts+o4dESR1soGJI/CQISgEslkcdSla9xGW3iAiqj20odI/ISKGuDQ/1jhw4cdiq2QlpgY1l2E4ZmDuOEYMCTklDpEihC5tz774+tnf//I3qd88vXz0+Ml7OWeO3TjOjx5t3169+/rrr8dx6rcbIirLNCMoXmcFM2Ux1aWEUXmJTgpzHZC3dSEiMVBAWDCtKjBbCapEdWIBQCimDM3BFdeWbkSQQt/3pej19bWqqAr4ZJVWmSTVAv5/W6hZ9cTSqd70jShFZJom1QRLuGrm5pkQoYiFkCBGNc4F+m7z9t31n/7pf/nbv/38xavXw1BiSgZYAAwJzEqFoCiSmBQEBe8XxF7BHJIKQEAOkUePbRwoDkZE5mTx4AbiPtFFE2bzvpka/C+Qz/sVpHva7P7cLau9KABAnrqsH26gWlbBuSF4WwFO08TsHm3p+k4Vu6579OTy5etXwzBUsvUYuy50XUzh5IK5dy6avfDjABlVdT+JDJgocCimMQZHX+OSLcVlnq3eg6Kp+WweQNBaCCfyoSPKTo2xDDJspz4w44KnqE+3zMJxeWFEIII6d5k2m40ibLfbt2+uvvjiq/c//OBid/bNsxcxJnOouCkhRg6B0LxZ//cVZpsVWJ66/nbtt7V0/ml/VwF8PTurDV1fqY7gxOpgNHySp2DAtM6DbdlqYGADgyxFVSVIH1NKqRARdEplGPM/fvZ1x4QGm5/95NGjnebjfhxNJfSpNLZ6cLCp+Bw2ZFZjUQRIBbkgF4NZcFQVIEAyyI4sdBMJ5kAdyZpFsiqgERgbGjGpOtlQBU/JqvW3rOZ0NwHzF69eOWcXea9x1YOmsF7b1Qad1hNXdUJcJVPad60vEJEQks+v8nNdSn758vVvf/vbV69e7fd7KcYREDFx8hClWRA12B/Hl6/fHsc5RNr0lzKLFfMnd4FGCyYkAibGDF2IbIiKMssBla6PyPT0fLtNQa0WEmOMRCjgU6M1q8y5lKKH+YCIAPj27VtVPT+7NELm2HWbyg2j6OOPmRmRzs/P/RmH8eCRudtTD6hubm6GYbi5ubm6uvr000/ff//9fucVn+SwJvt9A7GaFgI49Za3NXdR58XLathIWtqsmurzrahxjjRMqZGiWe2LwNX8vbU9qgwOSw2NGlxF0dAAQMW71U5TGVQV60HjhZJAhpwDIQBNs45THucSyUAzY0VrAgACLFQplH006wnadZIogXWr+0Oo3QOJXeuNtqqqSljRajV/AbXAxctoxybJ/gMRr+3C+otO7stK/kPqNkUkz5pN+Dh2XYfqJQlvN0ABKADBcCwiIjhPOWcnhWPmEJxk7JSaHecZiPrt1pCLAgbyc0YBEqe692zEXTFFBUM2gpy1GBmFohkZzR8DyZCRI8cOEZfZW96aIuNc1EAd8le1ZK3MrKWzvvSkQdeb0fTyA+MKrWYIoqbF1Ns3HSzq6SUDcOITbyGrGVxCAfM/MSfVQDBTJlQ1ARPwgVh1HjEwUSCxzIA3727Pvn/ebdNgM4KZFFCLiBQACbrU9492+dl8dXfYzOn8GLZMFylNx2mbWIoyBgzknD2ubSKTEi6yjs7NQOZrhu6mESObIaMiKxlNSozE3M4qEUVmH6VlZrgAycy7+6We87bmZuZmu8m3H10/6NUDMAAVRIzEHJAAgZGJiSqxeIjeKk+BO8IZDYghceACwzDeHsen55faDdfz1d27d5/84feGl+Pxm3w73GyZGjzGXXwKHEqtvRyGIU+TSwgFRsTAtW9BnF1apIFaFWGZC71O6jQg1En7+Ouk6az2RptZUdFsC2TU2GcuqyBo9E0gygveXcHGcczzLGIhuOKq0DJfWo+4IrNxJlAzy+MAAAjKBKqCRmTGhh2RJSbskWs2CACIY8sPea2bCBIHDihlIpUffvyRSBbVy/Ndnqbz8/PjgIfDIGJpEw+HA1D4wcefGNDNfh+6nmOnhsVU1EexAxGY2WxSFJztlhAZWMb5OI3mbrRZtuzcJ42FyFM2S14NKDTkQ2WctmXawD+jRtuxhcUtazbGDO/u7r7+6tu+7y8vz6VoShRj5JCIBiJCjJ4m1Irx8JFQdXOHYWCOCPWurq9usCLyIRBfXlw8ffp0d77rQ2WyJcUgoEUMgVTMdJ6pFOuBiAIgSimgpYV5uHJTyIFeiOCJVcIFfQBeYIcFmOBwyaIg3viIuB9ls9vs0jmlJIACLIgq+OVnX3759bev37z9gz/40e7s4vz8PGfpVedc9vv91199883X347j2PXbIk5ZFqoitBqGOA+KA6qr2V6UGAIWmYmIVg6B/7NtIhDV4ySQ0YKJqKFo7f8nJLViollCCAoUQug2feq7czk3s82mOxwOd3d3niGeRfuYui5Kmd0ThwWM9CBCaE6Cs8iqkkEWyadiixtghRi6SRSAjbtvnr/7i7/4i//tP/zZiyNOU6YQ1GguKgYK2QFTAAqgIAZYwMoydGELgGKV371VkBRMpSxJXCAMnv2wZcWav/tAjG1ppqpiv1jx9ZXwndfpFIDVpINb/bou3qTtuScm8BYsRDLIZgB9irP4bEx98uTR5eXlb371W8e0B8IAFAMwOitvPMU5qIgYiC1W92t52wAb+1/1XTzyX0pqRRQdQbpeNGtlQ1RECoiBuHJWg5mhiWoRXeYWgIGzgDJAIPIeQ7d/IZI3NiASYTAkFVBV6vqcc+o2N7evX7x48Ud//NMf/vCHL1+/5RjVQTc4A1jqgue7m3PxQPk0R+LBm7Lq1Wn2sWWr17rLf6v/BOOdKvCS/GqeHJxSMCdl2O6t7ouox4SIaIQcAiHG0GnGl29uyvy7lNJ22//LP/nDvuvm44wEvRfWiBmQAHwJsgphCqEDxaw4z1owZqWsWAyKgAIimZqTeJNHqur63aSUYqCgAj6GFZ0qlkgr+FNE3IdcxEPXT9HWzaWIVkgQWyYGLQulCxy0Mso0ZBDpElqv3L/mNFbWq+Xbm0FX1TVvDRHN8/zy5cvf/va3v/3tb3XKIoJMRBxCoBiYGdjrjQoAgIZoOcvhcIgx4sg0a2eM2RAhRgyKpBjFd1xi4hQ5cjRVEdmDTOVQAGOMKfbMqiZGCFgMsCgWhUHKXsaru+ndfriZKYSYc7m5uUHg3W5TTBH5/Px8oT5hBZvnzBxS6pzLEAAOx8jMwzC4xHoZ30zneR6G4e7u7u7u7uOPP/7w4/ffe++93W4HUIkemzw3n6dJLyKG0Ma0gJmUIiGEOpg5eC9fUa26FPEUnDdXQVWt1EY7JAvEhazVdfE+LTOvWGRlGabte9qAe6SEiDmXPM81MAYwn3LnhWKjSlZnNGWjFAx0LnmYcs65JAqe6wGjkyyBISOiqdCCdLBTioGzPoSGmtUWq+/qEF3BO/8p9d5SaQ/ebziL5v+s8SarwhV+VwWpaiCClBIubU6u4re73TyNWkzMprnM88zMTq8wzs6/IWYmxoYWAiPglCXbMBzHeZ43xVJKgCQiZfZZzwAQDAyA/TZuD8ecJ5FsZqXoNGUVZOYpz5vNxhtsDofjPE8pJQPabDbiXVUUcsnHYRrGmRcHd32wAQCtBh4PFPRpJ74Tnbc1av/0v3JyBV+4lsYDAK4cTdz4YV1AK///ijfc/9bvrVWxm9ArGAOaGBDeXt2+N7zfPd5MIPM8QbFouEkdGBUBi0h9SB+c2+Hty8NVT/q464bcRUAxZtpgdfGLKxvPPzPUMRhWt0HUPXcgBe9vaTzCXgwkMwNRNSXTUuvg6K19AKDLoHl0jtCyuM2LmlYBRR8iVemGqSoEZHDGCzNTM47ElCAQEwFhjQZ9YnVkCoGYCZdxPRGZkSxrOdp0LOfnj/7t//X/8mp4/ld/8WcwF+ph0iHQpuI6AgOAw0hil/q+V1UKAZlDCHPJJ11Gplbnc/mOiMgsJcbYhpg9sM1VTu7T1aqqgDhkggzyPHuDeEuUgBqgMSATMFiMUXwCoNlcMhEl7H1+Y1OyLnLeat91XeTgJAcXFxelT33f96nruo1HknmXS5Y66h2AY5pyNkOOgZmLmtXp54QVB6hFs5kFIg4YCLuAj87PxuPdtC9dDNN47FNXcqXVGed8c3crhmcXlzHQ+0+eYIxAPKtkqXQvRUVVpagoFBXRLMXADBUj0DxNpkrMwJbF1WGdTGxmdSyDgQ9WZGZAXGip62w0qxUk81zKUgZRW5gYEBFOtSY1s+126/RCXdeJ5G+efdv18ac//amfU/IC1JxzmdzfTSl54lDEAISgzYuHELwMYqWUm5sbAGCOSByIdv3m4uy862JiKqUrYlMpgoSJmZjAUGcZx6IazQLUfIoaukSaFHdzkJnAEM3qVAILlhC9ARcWkQPzJlJEQSwCPvETmIHpbPeEiAaD4Wa4PQ53x+E4TEOWl6/eKMKTx++9/8FHPhSTiMZxZOAvv/jqiy++mKZMFKZpUgQ/L95q3Er7ZqJooFDHNMBajbl2OVVIoDlV3JlqHQnkuVTQZZsEAIqoFDWuM6+goAqITWbmousRwAfbD8bDeHZ2djwex2GYpqlebwrA6DN+Ft1ORIhGhE5ktRx2ai2gqpVtXKt2wpQ6Drw/HkOMh9H+43/+3//jf/yPt7ejhvMQGQPnYlOeFYiZxYQMAL2tS6F2v3h/Wv2OlgUHALXiMOB2qBHRc40Op1z5QCer9F3vChF1Ueb/1MW4epkPXF3a59cvMyPiGGNkt18ChgjQd3HKMwD0KZVpVtRPP/0UAN6+fe1D55l93CwSoOQCjmRGNVNQQwPvVsg5g6O5kDDUIylL5gtrpOSph9NpBQBAICbCCppycj1UIDRiJgb0PisQAGx9rUTES4s1MTEBMwckdHghceSgjNGIiIECABUxERm1vvq+3+/381Q++uijzWYz5cJIXqkuRZ0q3NX7OuPc1M7ac2j/XLtc6y1ul7XNXVSZYQrr65dryaxCoZY/IbdalWXXbF2xB/D18SwSlVJUdBiGaZq2psEHv/YblHgc5XefPyOC2IU/+aMfUr9FmYspqHZdlFxCQlIARkA2YqUwKx1z3g+TBTYKBbgNRvTvVTUErxoFRDYrKqjFfDfMAEFdu+NSF/KjUbyTYqEpXq9tez3ofSIiopbyWyohjOsFb2srWhNGLXX7YGts6a3C1fgEAPI6ld/PPM8vX7z61T/8+osvvhyHqYsxxdqW1qXNZrNxez2O45zHUoqRD1ap0buO04ZDhxWhGpG96JEo+AVd6vrU1wIDyoxlFsFj2e5z33chdkQEMIvNZpZNR8GD6M1Urvbz9d00aDgej0TktP9d1wXAzWbT99sa6iDOpajWEuj5+XmIZGbV74pxmqZSyjiOALbIf7m9vf3qq6/2+/313bu7u7vvfe97Z2cXbc3XYfP6CBCRVz/8/bKacd0C/pV+Xu0X1iKvHw31HCUIVV5Er6ggIemCTdD7iIP1GWw3Q0R5FmAFgDxPZS6IRsQBSUQMRMCLisGIVCCr5FljhKKoUqbJSqmPqaII6lPCEOtwMENqBqgdXkQEJpMC9/M1UGFR6t5KO78GYAatdn3SK4hYQ+b6OADgBq5l/NcawF8uz3J/1GdLSOmqda4GkEjGBJRCikxoIYQucNj0KQYts0iOMS5gCzYik0iRIydwZrxQI87H753lWYDHTtUjTBIxM1EgYjOUgqV4Gd0Q7eb2aprG4iT+AtNUTDGEJCAUUkiganPRMWdF5JyB2VtjmaEUmUWLASEFHz5xf+/r463CvbY9631q6nWtvr+jixtBkBPA1/V1Z8V3pUg+yYHe0/jtW1r7xwNTMZUZgEUKoB7vjnfv9vG9TeBYNKNSQGAFsVJMsxU0e/Lx48t8eP2bb67Hw7HMxzm/tz1Dkb6PSiamhBAYkRUNQDQQARQAE1hS0aoizjGK6iMBrVYCADREcnwhoBJDNARQESeYQapYPtVayTlN1yUiJxFxh2/Jw9UVaFABp+kEADIghsiBGQOzmXo06CifhkPIIkDCiIykxY774bgf51GuXt98/PH342xffXN5eLl//+Mnd9/ONmgepE1DdXFnVQDIUgcNb892ru9c8alUHbTYEkLEdL8N/Z4VvweOOJXdx3EsSHmavUJoqh4QqhmS1nFnC+smkutBJQRDhKLWdf7cl5eX8zznUvn9mNkj8EAcAjNB3/eRn6iWFGJMzMDMbIiaooi41+DkDDF4nyBlsVLmXHJ2spEY4yZt+hTjtuviZtOlLuRxfHx+RiiWtzEwqQyH/TiO212PiMdxYOanT9/fj4MCpq4riFlszPM8TUVMwEShlJKziOmsmkXEwBQBgI2EsFaYXSiYgJCYXf1VkfDlWFizqn1A9xtrpXvxPR76yt/1tGgBzZdSAKnrAJGHYXjz+t3jx68fPbpwprIQQkgxl6mp1JYwNjNU56mrWs5TZuMwTdNUisbIpEaqWsSKmLAwCqCAjfM0lDlwZAc4GnM3gxY93GVUAPCBQvV5OaYUwBpC0BTVId2lFHYH6r6NETNAMjDBYIgUOwzRAEbh/d3h3fXt7f5wHMbjOE+5zKUA8Wazefz0PQBKqUfELobD4TBP8vnnX757dx26lGcB0BCCiBWpw+jIQd0AHoSfykEATmmoKqoaQvASe/utL2PaJDHFAnUSHSojIbKYOLWxmSoYKqqYqjJwMbU6o1WYGYjAOUh3/cXlmYjc3ezfvn17e3ub5xwYAY2JmcxNJCKGEEYrRGzCIpXG0MxViiHYMivVT70CcUq9onLsbw/zX/7Fb/9///UvX76+3W63RUEBsGgpxfNEyACqVvE/hoZmjE2LYotIkQMSnfo9bNVXtrbH61ChKZe1PK9/tVSEfo85eyD/65/xfmkdl86ovotd1wUPCgkQMXKc57GUcr7dXV3dXlxc/OhHP7q+vrq5uVFVVDMCYgqIMQYE5YDOBmze2aNtrCKYGRkInUZliEjoUgsIWzHHXU9HhlcBQ1UrohKoA1DHM5DDDZGRHNpCZkbRgz43vhgIY4xdjIieLFAnpEHk4IymHIiCIUcFEZmAN30ye0MUbm7uvv7660dPHvd9fxxvjCwSp5RklsTcRZ4I5rVXsdqa7+6F/9e7LVrJYnV27kWSS1iqsFzTLMsDYYAKTrN2ypqePJkl1Lbg7gUKqJSSZ7G9dGljwVLqu3RGmt/ejr/4zRfdJl0+Of/+x09QcJ6HiBSIxzx0Gs0qFUIxnMUOU7kd8n4yUgsRlQgIjRTURKVo9jFvBJECmoAJmVCN0oBUnXj8RH7rIUer2Ptz8Wo+RBP79WG5vybgbXv+CcbQCrMtxjAzNWmVJW8LhBVYFxbo4LpAbWY+vdD37vb29ttvnn322WfeB+gehf83xf7s7Ozs7MwJL0Tz3d3d4bBX9ZjFQqBARAyp61R1wVIiIhBT10emmFLyD2zHPEMHJhOUt4e566ZNf75JURSIopR5FpkUJw3ZgiXiTeqFj8djzrlk3W63m81OwC4uLnIWwlAd45wRsQ6b6WOLwD0fejgcxnEgupymad1geTwezeTueHNzc3N3d/fxx9+/uLjw/LXjw1v1ab1ZrR2jFXVbWLJ2tk9aEcRVa1N35rkOdCwGMjOICtTIiJhNF852VRXxMDKEUKF4sMwhWPwH56XLs+ScG7KvqWJVrWzfqpolF81zIRWyMoUyTVPOxrFqAUQU5/Jz1A6eOlnap6nTvN93Jm0h6l/L8/oP1/FCEwZfirZc7fgDnAT+gUppx0T13r21i9sR868IH773dBgGXwUGjDEi2DgaaIEUJINI3t/dHfb7aRpVFUpe7jIgjlYf21TVia1ijIShlJIdeWrCFAGo5NoBzOxVzeJxFHMABo6sCgY1z+ergchMEYxU4Ob6zmGWTmhesrgvpYpqAGCKNTl70hcrSuq2E+vYfa3KHxjgky7WCpJ2rILbNofh+c+yvJA5LAMo27c0mX5gOfDEFgDQcBEK12+vtsfLtOm2cYMcQskoxaQoqSArgu0uzj96/O75s3HKt8PxEXZPNucmGmM086FBBuCYzCKlWFxwaGa6THB27LUpBCABrJlZAzBg9lFyTg+NALW/08nxDAxQkYzUFtiad6I5jYisn5Sosg7Qgg63iskERETyRfDYD80IyYi4krWQMRqCUmAijhwZVIvOhzwfchF7+fLNX//1X//jm1+8vn52xv2nP/n+68/2V18OZxC9cTbEGEPnEeswDLBkHJ14tu/7tOkRcTjODoGe51lEvLZMJ0rr++HHwo7gR9ruN+0ooDjeFYkXeImKgBGwD84EMCMwYqbIJtlFCdBCCLFLyHS+uShZy2rIocthzhMiImgg5JRAGdDRcbn56GYGZgXMR3tnqVixaZ6Px3EqGRF328uSx+EIRaXIvCBb7Mnjy0++934fw9lmg0JS5pgYNAyzdV0Cm0wxbVInZZicHyhALlNWVSmSxaCo8+8HtYrfAyfRMUJgcdQHOgkhuQstpmxLEr0qzWUx3Q9A9OSZyygaYKB6UfurWi9EqKh+Wv4BADCPk7fvj+NIRAB2c3f72Wef/exnf2JdZyfWny0RdV03HWemyFwW96IOUPVwHakmd/q+B7glIs25gO7vbq6vkun5psTj8TjNhVPHYoCci5gJmxVAIiYKIqZaAICJiimJ1AxfTaa4C2OBkDmYqPK9BgDXG6pqhAAEasgBQ1SkLPb5i5f7/fEwDKpQBIesc1bDsNvszs7PLy4fpa7bbDbzPOdpLnP+7LOvXrx6mXOmkEopIcUQ4+FwwAXvZ2YqzgZORIbOCnwP/oArawrOL7pYZxFRkVrqqY4CA5EtGt49V4Slvkc+m8QdaFMi8Bk/c6bd9nzTdcx8drZFxM1mk8fp9uZtFu8BzqZSCrhz6WfBfHxlzrq0mhsoOjyJaurKBS9nyUWGqfzd3//u3/2///Srb1+fXz69u7srkcwM2MfLe2FGRLM3jaBzMxmCsoslJiOGRkfZrLJ/Oze17zUfANWyNj1rG/QgGFj7AQ8M1gO71uydqi5zpB+++pi6rnOqwxSICLz45i8V8fjwo48+evz48W9+8xu37Llkb+IzE0SjpRkpEFOIDW6Xc+77DtVaMOzI0tZIrytMFKy8f799f4J6gQotycTqI6IxcvPjl/4IRGRgiMjRrwfz0mggjiHmnL2GzC7KgE67nFLquo6ZSynzVJ4/f/7pH/7Bp59+erv/ZSkFzFKgkriUmFIKIWBZmvRWQSAu0NC2Fy0UcSehXdleurDsNEfZX2XZd7V26ldTB1fj0ZvbV99xaUE1AFMARBEppRBBJCYiCAGJtEwYk4iMw2xKMQQTuRvL3//mdx9/8v7Z+c8f7aIYMIFqQTWTLLmoqgFlg8MwXx/yfjDFhGIFFUnFWLViW8TmPAsAE0QwVgXLfj8JwQCXIjkCBLSFwXuN51r/0M5OW/AWHzZhc6ypK/nljIOteBRPaRdVXaG0PO+HCxf/P+UKerTgUdbnn33xi1/84tWrV6rq6doUe6eCOz8/Pz8/3263jCSau6673d3c3t4O0zHn7AlHEQnMlBiNNJz8ihBS2m5dFJvAuBSpMKFRGUcZrkc5H6RLsaeO2XKBSfKoNEEnMUWK297ODF6+fHnYD4joLClZnXdHqXbKVbHcbDa77bkXBtuShhB8TLKZOVskAJhJSpUpcLwd3V+apvzhhx96P2Hf996o/2AN5T6coW2HLnVaWcZxN3luEUuLXYjIkQBL8/xp+Dsi4lLyaqqmKRZZWGGbMUJExA7NtGiZRYoSoBbJauijdoxET+IhC2n2DBJBSrFpmkohiMSMhGSGwROjhgJohpHajdcHF1WRe6ye/pBm1YVe+8xNbzxYySYSJqdAGpZkOi6lUV+59Tr78jYzRAsnzVrg17FiePns22maAI3AmzkEzXLOu03PzCpZVe9ub6xIJWyMcdmtasJ9cmkIwQlgkgaz2R9EAMpxIJrMWMWIQkoEwGoyTpNqMRPETBRM2Z+NGe72x/1hIF78QtEsVYC0dqwFL4aIOfpxWR1yLNaJhms1evCkiNf6dP1+M7frA8mITnNE1GyQUeUCEP+66saBiLMMtOsW5U4r1qCaCFxSMgoRyQJEQOgs3by9eXx7TI97NIoYmESKARgzUAAhuZnvtmfhg+9/NH315uawH9LusB+2IRUQRVMAs9pnAqKgxfPisEQ1xRQxKAL6qAkllQpbUhNTNDqdutPJRD+Q9c5xYQNT1Rg6QVnnGMyMiEG1zaD3wKZuk/PmEuECZPJtYDQiJmeM4FaUAKdeIAYWFNE85TyKIY7H6dtvnr+brkKgaRxGG0Ki2FMnXe1+9ECUSa0qYlUtenJQYt/FGAN3XjAcx9FjQqscyqcJEFWdecwuC3nGd6rKzZ9yXENVSUwOVmuLgwTBR7dpIWJvMS2mzvoVt1vCQIC2sIxWOfTtcCfHxAWJDLoU1tuk9W5pKlaRKhRC4BiCVhsZAEBrhSQVJ+owIdPhsLcuyTTM4zGPw7briXCYQ9d1x2FQVQGb8swxAuGsJqbHOc/TOOQi4JQSBibiTVpmldraiIAJgAgJqDi0DFlJtQj5CFRVAjJnKNVTTz+CmSxCBWigS2YNllNf/1fxSgDuZzfr4sngnLMUxYgAtt/vS54uL88/+ugjfPrEjJg57LZhJkTMVGKMgEpOdLYaQalOvEkEAJeXj1+8eIWIzrB+e3UdQAlK7vub/V022J1dIBMCGWlxqJKBAabNdh6Oqp5PBREpBiFCKebVs0Xw1ZmVdDkuRERop1QkoA8IBAJDKgbTnI/DOJWQlTBuAtF0GO6O8zjPfd+/tzt//PT9vt/2fQKAPM3v3twMw/DNN9+IKQA5thkAnBjMM9cOBRTVJVGyat+nNYSeVxnKe0nfk3e1MNyqainLaVDy+Sou2iKWUU5/bkjkjT9eSRg8wmCk7bbfbrc5Z9N5fzfPQ1EpyNWB83Ygr05DTUiRFmedFWOldRoOEICmeR5t/rtf/vrf/+mff/b5V0hnAuk4iU9JYO89J5+d4EQpzu5EbNGVJnvphkvrY1EvP4J5q2DDUzgLIwIQoPyz2ckHRmol9qfffvea5lWcLnav4vRPjakPja2IvZnRGKDMGRFjDCKy2Wx+/JM/9I4pZo5IMhEzR2Y09ajL++ddEtyhLKWIFFVNHHiZDaBL91cxbX4bLUinGOMwDM0pAQBEt48EWYmIAwUfwLeADN3/UDEysGpMAyJ2FBxTioSBAjM2BkJmDiEBkiqoAQMaYdd1+/1+mqZhGDDwy5cvSyl//Md//I+ff3ZzcydFEDmEEKNWNGN54JA9jNubq1r3ejUjZ701a4fsvgCcGDjWH05EoKf849rpXP66Nne460dEBmAmqihEEWvwiVBANUs2VQUuCikaBnxzdfu///XfvPfe2b/6kx9FIjObpilGNiuiWQSMoggch7wf81iIOjZFy4UQANSJwkxk6ZvQjg0IvH4OZiAFABWcxlwBNaCogIq0ytIDH2ntJbdzsX5wXLBXurCar8+CW5C2Yma2Gt7Y/mp2A9FSEi2Z5ZeVUsZxHsfxxYsX+7vD559//uLFCwDwTMpmUylWLnZn5+fnfd977zdSAoDz8/OU0nHYH4/7wwHGeVItpWf20NVCSkmKGUK/2fTbbYw16eB364IUyhmbokxcYob5dsi7ntO2G2UqEDQwUmDYoCYoBEJluJ7GzJVRMxARY+WsRuBG2brZbM52F5vNBrD29JY6BNgQz5xRZrvdFu/QNvPILeecNbtlvLq66rru8ePHfd+37Xsg/6radR0shriFKG1TRGSeZz/R31Vf3nIObmuQtYirYl354uuqcvtYWkgBcUnAtftBE11YplWVjFENTE3VIgIIeD67zrDSEDpVf2pRhXkep4m2fXQzTYxgaEiOctX72pvI7xs9HQktk7pSCOunbvcJS45MV0MIl4jmXuPf+owsb56sHtT5kPc0Ult5F4+WnvMbCONwJKK+6xFtPB5FLaVwtt3M88zkk+osIEEkr2XPdTbUgqlDIG0PZsxBAYoIMYcQQKTb7cwwZxGyEFLXbTmgSI4aRSBnK0UQhQMTsyky4zSN3rUYUx3Q1HiHVbWWChbN0E6yB15QaaAWbMbiTNh9Lqm2B+0HX5SmZRaRo4jRz/xaE9nS9uD/ZGYjhKpHqG1G28hmDFomAxuHlRoAMNTU4N2NHQ7DbpY8jxatB0A1QgyBJ7ZiZT8dtrvHH3zy4avX+7wvYHQYh+1lD+AZUgRUUwU1YmYL2e71+5ohERqbgdtFZKwj0UTEFG3hn20Shsvg6UmKe1o+26ehBZzQaW0UGTmLwn2hN3PmvVYevDeyxldpkd36F77jxUqBAlI0YylaigrSu7fX79696550/RlP7/Y6yWbT3fLstIjAhnwAAQAASURBVKIOBzWzKDF0HGOcc3bV71gRAZvneZ7nFDdNe87Lq2VQWprAzJY5b8tz3fcDWoZYVQ1JKyl/8Rn3xQxFAAzUEFAXnsPiGYVSmAdUm6aJZokxxi6t1x8RQyBCJPKJFwEDBkqISHjKuNco3W0qKjvPfGAWjjGGEFPXDcNUtWT0OAN8vksXeTjswZMvuZjZ1dU7VT0MQVXFCjNPJRtC6vu7wx4iE0dBmIvMJUttijPzOcR+UD0Zjw4qssABADRnQ4+ueSHeUEZGRORVwOEqErEUqfp5Zc6XY3g6g+s3m+DhwgGQc3b01zzPIjNR9+WXX+92u2b7U0oq2bsNEZEpCi2DARfPgMl8MJeqbrdbX/aOWa1Mw7gn2O86b7dQYqQwFu26wByRlaTSOfRkOWf0ITSM6hMmAqkakvMa+o6YgQIar4Zre1jh50JEDFCQVEHINOtxGG/2h1nOxlmyFEO6OxwPw9T3/dP3P/jo40+ePn40lwknnIbRVF+8eHH97t08z5vdmYiAWdd145zHcdru+mmeERFQ3blHsBgjAOFCgUNQx9kQEWKcpmHJ+5w8uRCCrY/2qRFUQwgORXNqKlU1rIMKataJiQGranWq3mkSzCLW9/02BKIgIo8fPzbdaz6olRCwAa6Y0U8kIsYYCUmgelqO8eGlKuONKGhoan/3d3//N3/36+32scL23bvrlLaTZNFsEIBMpNYTQggesyCwk2OgOAEHIJ9GzIk0NM2J+UNV13PwHliitTDjKoRov1qdj3vXNP1DdM8AiRUwMjC6Ryrj7mbVWLUGpWZoUgoT9X1f5mm73X7yySc3Nzc3Nzc5Zw5RRDCGGCOadV0XAl1enPkkp4aU8QM1jiP3G6/bNPcLyWK3acx+sLQVLWMnHgY51RYjVsj9AsclqkVkrTES0QJC2aTei66BMHUxRvbBrSklDMwcRW2eS156gZj5xYsXNzc3x2nssX/+/Pnz588/+cGn235ze7svpcTIVHnt3Qe9FxDqfUDj2or5r/6pwGZdMWgvAB9egIhoeGrvqe9ozZXQGhICbRNP+X7RCs9u3vDpexmLzPM8M21CMueDEJWLc/rHzz//6psf/vTH3+/6ZHme83yxOzN0chEEBDWcpeSiBsGAik/5RAjESxlzgbTVpyYARDBEFslIoODjWIzUqQutOaNr+X/gBz9YtwenYOU2LFcu79tSGqqxyuKpuj/zwF7gkrxuEANEzDnf3d3d3Nx8/vnnb9+8e/36NbMTtGhK/QcffOCew7bru65DRBMlhpS6nPPZ2dnF5dk0ne33t3d3d8NwyDnflDGwf1HgvnOXaHO+7fqeQwoxtvDYXzFv0QobRyXId8d5HOZo59ucs1EMXei5V+2PE5VSJtHXz57t98dHjx6dnZ357icOISQA8NlvvuCbzWa73Xrqs8G2RYiZu67zNkhcpuCqnhDIh/HgLrcfN0cZhBByzg7Q42Vok9amvlMvaFNKsMT8bZ1b+aGVFlTVq2Eu+w880iV2kjz7RzlODSusEGsigDkwVeQCIiJQKWomeZZShOt4m2gmaoZaU4hN9mjBIJjOHErONo4yjjRvLCQCVEe3LxEBqDeLLz7zgn1bKIBdxd1HCuD9V1u3lkt9IKIn+1hX6YRQWC47yc9aHckyOROWSGR9ZNplYZc2AGBZDawLPaZ6msxQjUPsYkJfp6Iyilodp0e40Fc2jUxmlg0RO0ygYJORobICQuwDiZgpBnH+rs327Hg8RowXmw0zz/OsVmKM8zyGhZIsxuhcVQToZCGbDZWszerM07HvNmoqIGTA5qlKUG11GzBVh9cDIRiQiZk5XITIFKoli5tunudcSjUtMYSAFHgXlvzHfWe0GeC60+ADLUgIp2kkor7vnXzVUyBYo0GrRS9EMyxFZ3l3dv64DGITnMXLPLy7/ctXH6VHT75/fltu8pM0zJMJPtqcxf38RPD1oxt7cvHim7vw448++5tvhqvrH5190F9wmO8eU4hcZh2yCoSUrZuMyBKoAjnL04xSAC0wqqoQAEAxAEITUCQFkDIj1HjGj7cTq4paRymGICL5OIM3QHOCfAxmaOJDWRxQiwDEwdRMClWWfUFniE0VVBYIEmNkTqGqrSqahmBkBECMROnQD9P+7P1eO3033ORkYduXq7G/A3ylHzx5/PbqG+riMQn//OLbu6/xOUMyZLUspZScS0ddt9l2u36cp8HHIXIgMI+0c3HqS4iJu34LsC25Fusc6mZmDiglCF3fz5B5SaqvjU09hJU4DSeTokUZOfaas4gUI2ZkNgMUgIBB2dQkAKUumNIwTABws3/d930HPXPMKm6vAaDvOkQMSImpC0yIIEVElN2RIllI293f5JBEJLu+APJGRBFpsOpFZ50y6F4mFRHXYvOsOecCGxEBCMgM0IsVK8j8mJBzLsX5QrDLKrUUYEtheXE4EdEAiqrH0g65Cs5+YzBkcRYIrFkbYnQFV/I8VXtATsaIZgbLgL4HDoFZTfquNR0iBqeaSAERpWjgyMzTXIZx/vqbZ2cX5x9//LESX93tiShuz0u+EZEsZZynEGib+sPh4LnJac4qWQWMQjR7vDs/Ho950yF1mXDQ+Pq2nKlyf4Gqh8NRsg43d4Q+fAkdPDlvRowAmDBLr7QJfVCVY+5TAhQlmcmAASPnQAXzo3gBACI5IDIZopqKAacuXg1DoV7S7s2hvLy5HYoZb2U/mIhM8/54PBwO20DvP3300dPHuy5Mw0GxTo69ur5+e313dxhDvz0Wx/rqWCTGuNlspGiKUURKMVMCQ2JGCKbUpYVRXRUEkRAMTO1sc+55ehEg4rT4BEOelqnHYmDEpqpzKUUzESkzETg1kao6aY2qV7/VjFsaIIQwHQeN0ac57493vr8fvv9BICaar6/v7gZ7H3fHMeWZGAvBkPyolT7GDiNbukp8awAAZ4WevD7iE80Xl+HdVb66lv/Pv//rP/uLW4ufHFUnPeZu2Ou0gwvmAAYg2FFnZlBgiYoNUdSKmBEvc2iXOKdNYEJsLlEDEZ2Y1vF+FNTMf5PttTfgiYGTg4sesmtZ1dwIEAFNDdXALGJQq5BdH+LCaIEIMW9Sv93ETUrEQM6MXaQQhRAU07u7/c8//aP3P/zxf/nP/2l/N4cQONhmG1DGFMOu3yam7aZTFSKLqAyFtXA0rNO5WBGLGSFuOo5obKqSh5m6GN3jVFXxNUHrUy0nTtMEZqFLNUokYwYOlhLFyNH5ldRS7OZ5VsmEwGCkJWLou9htC4APPsAQKCVmJFLtgiAqqMbQh7S7Ps5DsZi2N/sMvH13fZjLjBQQ7bPPfveHP/rBz/7lT589/6rrg8isKrPOFGl3sb0rE2JpTptH2SLmWIHF0EOtbhi0fqSmqWjxlWgpRsEKZYpIJiAq7uqZGZHWSpdni9TyirMA+IQldkvgLp4ulQEzyFlKCw5tq1qIVKkM0zV4NI7IQ7dJF3/7y7dn58/+x//hv3t0ttnfXSNvdpYpBpmmLDwOZRwhhjO0UIS1gKgCqGBBE0QMhJC7+Th502gue0SMXVBVAz/f1oUoInmYso1EhAHH0edSOq1mNmPXJF568qV2J83x9swMYOM4egd4q+811BIiciAyLKWkGFVbjMeBoZSS5+JMNggJgaQQUyAMIlJGU0W0YKrDdHz9+vVXX3314sWLu7u7nHOfYghh06XHjx/XkmAI3vhHFchTScv7bfUbY8KLy7jdXR4Oh8PhIDfXAUOFaqfEzISMiH3sAQAFyep4cVcFm+QVK9RCFneFz99SN47brnufl0HzRXQcDjcv3r58+fJYynb7COMuW0ycjN2RJTMrOotZ7FKMse/7rnddWrG+m8226/ppmrx1cAlIYkr3ytFn51sXtr7vHz16dHG2jYzTcQghhJoxAVQxFRXxIpwHIbIMiCcKbgpFRtdqplRyzaN1HEr2Cd7KREigWkouBObdM+KthIEDJhGDMpkB0IkuvmgWK2aWUkIGsVJyrqpRwcCIacz743jYbDazhnmcYoxE3Zi1+UUq4L3xlG8wi2WceTvRZur7qWwOGbnLanc4D4EwYojag0VQom1xsEPNXORCRAyg5qT1CnUoChAYkelCiW9L3aueZdFKzk8nNlFTY4qES2YToBVWm4YxkwWUCoiAqGuW0VK01Q/8AUu51+cViAJA+6xK72NmreLcEjVsrKzYNR4RQCSAU81x/bkejKmagqM16oxRXNCG3tKTUjo733rmvs0n9VDQ4dQVm15WYTdFr12LCBJ5MSeEQEuctr6NdQjnKjoGVlWxhcqfa5pwGAZw6gLHQ4elb2HRy+0DT7H+6rUsEnpCf9ndWp1bHTB48GlEpAhEbKQAwBRLLi+evfyXf/gz6/VuuAPEgOF4PPbcMdIjfvrjD3+cu/GNPONPHl/95vVr5bMQfvjk8UwKKDMUBUVzog/gqC4iXgg3RYPFtvhtq4F5j64Rw3Z7LiLzlF1TtC1zQW3y2hI5y4IjrAZiImIbctp0dLVP3mVHHEJw2kxm8iaHtix1fJSjIgFSFwFkHOcs0jim7u7urq+vL6bAMSqJKq5bsSseTVXViAh57kMgcjoN7y4vDEhLedlfy2NiCOHJkye6kJK5cDoHiYtlSskxqNM0jePY5rHCKou/TswgIi4NnEgWcKFlN08E+1BBVwRlymW2IXChwBBCDB0ATKVEBmIUwLEYWCEDACoiVMtKIOZlWxCwnqwoiKgY+lhOERExtwHOJaRACCzGKgYiCoQcEUjawsVYhqKqhhiZkYmNjRCZSm0Q8863e3DitTu7PiNNZtrPrnPsfqGvnYsH+qR9pr/WP7d1fnDEbJnDYwAA0hoLgVDUnr96aYQK8L3vfW97fgYAm80mHw6O21HzalUhopDYh1uaGQA6EtjJA7DqVlvkBJFOtGlo4GMhPbBBxP1+v9tst9sNzgJzySoBQ7cJOc9ISl6HZguBKSARackUPS4CVa/jgCnIXDgkC92xiGvOXGwcpnI3IuIwTcfjEQAuHz967733zs/PnUO1mB4Oh3fv3l1dX3u7v4tuWzddXj7fyB+ZlmYnRBSrFgw8HrJFs/rczgpnrfA1cwu9EOdafYa6Qb5lRLSaWeOfb2JKWj+qqin/65IVIegJYTEMAzNfXFwA6C28EajoRMmTpwIJAJgMQUVzzog8zzLPhSl2m42Yvbu+ffV6/7/+r3/+y1999fbt2wKMDAKChCEkkROLyFq6quguZH0cIy/zlBHXde6T8MN34r210ljL//pXzej4BcXcpzD0ajGYgvc3kgIYeNFmQccgNhJKozoLNjCGUJvhVTXnzIaRl7YIKwoqkneb/ic/+YmqPn/+vFIuW0ZkDKBAxJxS5JAQBMlAzDmQiCiGoAqmdTwJmREaE7AZAkau86mbAVJVQGhFCVhAN+AZ6wr1pBgDM1dIHvoAJWRmhgoidfRH17GqSlFEjjGmFIOhSgGAGAJxFOCpktsxBp720/Pnz2/v7jbbXlWnPL59+/bm+u6HP/xh3/fHYSJKRLbdbm/mu1JO0tu2aa2R1r6Bv+nlyuYetFdjXFzLgDXTdb9s1fJc7RsfyIl9h6q+3c/6SvOK8WJkAcDPGmYEo6urq6++gk+//+H1H32aIouoAQGxSPEWqTGXacrU9czxeMxZlvlViARKRI4NMTO3mB7wPFgrV+y8EJnA0vrUVsZd95b1O3lWzEQnppn2XLqCetL92mn7zFoYAVo2RZeUdwUN4cJyNA6j8wDd3d09f/HtN9988+rVq7u7O/cMN5tH2+12t9ttt9vmIXtE+mAfaXFUeOl1rD5tl07TvGLsUu8H3M+ja1qskZ74X6kIIodNyvMo8+T6LYQACtM03d7u3727vrq+vb3dD8MQdxV66oW+1pFYKe5UmdnLg34BMzeEl646Nk9VtdULANQqwiildHZ25osgbLBCJegKguhjC5pvhqfePGxO1wItARHRVQ+OqnqXqK3okcxsMfX1S9fCgFhTM00Drx08WJBcnq1r0theTUs3KRrH0QzRTtKYVUoBRULwP1dz4n0VNHBkbasAtVPpCYhm8nBxnolxfXvtJj3kaVvQrin3uWHWf/vgzteqY61/2rcvZ83appdSwtKMa/UDoflVuqKxMUJywwcsIuLk1+zDslDV1BlHnCbeP0tNDVDEgwEGcLYSgFX/QC7Tfm+bzcZlYj0pkoiIgtesc86SSyklxb5C/kTMLMZY5tLkCVaQlUYfDACAoAt8tyzjj+rCeRuPat/3zBy75FqsLBPoaIX6aBKm93vi72k0qnN15nkGqBDqJrKONLynzRGLSGDmmAhwkzZ73b/69vX2H79870ePjSxuUmSaj4OxTfPwePPppxd/YHF//dsvPvjeJQ/5l3/9zXAsBvz0PDw9Q05R8wiSUamnUGgGM1AToXqCkBnZgNhUrAAIgCJpWCi8SylIwIFEsE1N8JwNwMlNXzvxyxPhgiU+IW/bk/qmi2HzLwFARWbVYvNut2uXLZkIKaUYlb7vFA7jPBUFMQYLRDyN5d3bmw/Gi67vMs7GdLZL211anRM0M1PLORv6QxMHNpMipWQxH/S7QK6ljTGFRpaFzO5kdJvN5ng8Ho9HIFMrc1YpFbQAoN7UB+jPjk1YEFGKVJY2JAQfkFCZ6NGUAZEUkYix7/oupmm2Kc/jYSx6RA4cAlEwwoAUY0xgk9cktAQkZi6ojdV6LlrZ2gwmpSILuxqcTODdON8T1+WlVv3Isrz8mvE4+VzHGGPoEjMjAxmJaa4oBQQ4KR37ff4uLJYAVla8GcsH2rCZ8LWma79dS91a1681HdxPBv3eVyklhPT69WsfP/D9H3ziGsBdVVk64qBNznSlrJWjLISwOz+/3e+bQi7qc7TMA8Jq3bE1D5OLvWYAIKZIKYiozGXCAoaAxmDkU0mIAwI666oUIMOAAGyEiAkJAfDuMIZtj9xNx+NxmtUMgHKepnl2RZpz3mw2nskGQk+6H8bh+vr63bt3h+MRnd8vl2Zgml4tMnuCEwAak96ytmUNamr2Zt2rgKsYxtn/mmyYIpLFGF3AFrNdid1Vleshqlw+D4yfFADI/l2u4YcypL57tHmcUlCbQ0hqQDHkEVQBTIlrjVpMZykGxEGK4OE4G0YK/eeff/sf/sN/+/P/+su7O8kaKYVScoFCEfkhMA8eCJXvafMI3eTDfUd8Lbe/99O+K/y/9/jUJb3vPay169o5aMdB0RQdJGtO/sgcQqg1ihgjoWL1aRbEF1Eu5ezs7Cc//sPj4e7582fMmCjOU3Eu6VwkixoHAQyAKODnAtXIANGIIIRATETICAEDo7EZE2QAIv+fm8plc7FCv0JI1cIiuJVgrvxt5MxlRMTYJksx+DTF2PUxdSGFUIoAFUJOIXYxMYLNNTsIqGKiCkjEyCJyczd+8dWX+/1+d7YlIhF5/frtF19/9W/+zb/54MPvffnl14goYDEEWygZ2t7Z/djvwb78U1v5e/f9pNDo5Je3T2g+dLOqa2n5rkyub2YtVEuOB51MGwjBW+nUbm5umOzd9c1f/tXfXJxv/s///b++OOvnUiRyFsxmRW0Y5zlL1zMgzCUXJR/WyoACwkjG6BwK4zju9/uLiwtX+41q9fcuhS7tFZ6eiDE2XEC7oD1Ig4E0teMqyxGba/2Pi7vfghPjyoLbbkZWHOPN6h2Px5ubm+fPn3/zzTdv3r6a5xkRd7vd2dnZ48ePz8/P/Lw3Y8ErFjpaoSLbMWyRYYyx320Ph8MwDAgUQ/LbVtXFDz8lSvyvQgizZACLMcVAhSkGDiG8evV6PA53d3dXVzdX17fH42iGMcZNOL1o8eB1Ic1m5r7vt9vtdrv1NqjmJzdt/yBgaI9Q5QoSLuPWuq5rYd4DU778OWod7tPyv9aQ8+vj04RcTzh5WJIpvlPLrFEAgHssKVSp7219274CuIpy/a58l2uf/IrvVFevtXyqKlEIKTKjmY7juN9nRrt8dO486T5hDUEACYzWPsmDWBRWSn6V765Y3PU6mBnovRBu9Tn3FM5adeB3rNUDLdTKNk0qdOnSNLPKxXg4Hpe/Vk/O+jrwgtkVEVBz6CYAFD6N9CWi1ih/XxVy+0qSLnM14apqVjeemUXzdJyOx+M8z7vdRlVLmf1DnL9edW5tCb61XrDyHfUUCxCuvve0dm29kAkBVNuKr6wvk4e5EfHi4gKgEoc0ilQiIg7fEeh/ztFcy7q3ySFijFEqquc79ROmYRp77PrQgSJDCJbGcf72s2cXH11evvd4xsEki8gwHwj44njxuFxONv78Jz9+9tXXmx9/7+3rw+cv3hxm+cOPnv6ku3y660BF5iko9SEJZPB+V2MzJAqETBhUtSpJhoRWzMTEzIbD0Rc5hoAJTMUMmCjXB1czh0tXMDQxLaLmboFr8JN3uG6UAFDGWh5MHFxI15y8S7LIcwruQ0MIlJ2/kNKcSy6GEM307vrueDv0T3qCbETb7eb8on9w8KySWJR5nkOKyNWiE9WtaD0JsrwIDRHneV5UKnp20JVgUfE+w2lJ4bSEX1Og6+MA6lhZ/2cdDWsApUBEsMpxqAEppbDpO4iYj4d8LPvjMJcjEBqyD3iPzgEGoEVAlBgYqdHcy3KypJIzcalzHrwT+BQ+nWTvdPDr4mMtGsxa65aAxi782bRDSCk5YW32zEtlBjopKZGHVr85MWudLksb7Upb2VqnPzAtcF/TPdCwsJCJPXhfVZci072suZmSc4pO04sXL7wF4sOPPvjwww+BuEgeplGLxcQAEYBKKcgBqAbAItp13aNHj+7u7o7zSI4cA8cPE+HJHSFmEKxGxcWJdqJ0nOaEHFNHIUAuU8l9WOY0E7BPzjZFMWIObIgmpqaEjGYkGDBRhjjM+W6YDuM0FZyBfJqwi0HXdZePH52fnyPi8Xg0s9vD/vr6er/f56UlO+fc5kS11WsGElat1ADgnX5ezWsZ0PXGtSOgq6Ddp7c1/amqaNSiTVjB762iTtZUNARAbsMaTtKMiE/5uFFLSDGluN2dX4yPYlAAMCBxngADH/HSZCAbIgcISTDe3Ay/+s3nf/kXf/uf/vN/LSUCdV3XK9BcJkCfJ3GvPLg2rr4srq78kRueAhsLyH13f62XmkDqd9Jqa0H9PWL/nbgR76dCHh6ZJQepAAxg4FOXyX2mPkYkAxUkIzO1whhiCvkwvff+k/Pz81/84u+naUopdJsOAJAZAI/TTESp3xrAQvoDNXRDMAtmSATEkZmZAKEQGCEjYIepBvMrIID/ec4Z6OTroHurUntUGNDAEfDEyGpKRIxGRCFw18U+dTFEUCNA5zNDDxeJMMacp1KKzKVYmDCE0CuEwzi/fv3mzZt3MbIBIoWzs4up5C+//PrnP//vfvzjHz979mIqwszePN/3PVHG1astuN7PGjeBWXl+9c21x/lAMOw+H2BbiuaBrHMuD76rfeN699vatgNeTJGQgJzfsH4+2HGaH5/vispnX3zdpYCI/+KPf/LpD78/EQlygXicy5QVQyAOhymvbZyoqhWn1iDunCLI2/jjwkEI940OrtxZXTovVs5eHVX/4DK8Ny2mBsYPrmzP7sdznmt7CwAAVZFr1meoA4SKJ+5FZB7GV69effnlly9fvhyGIcRweXm52WzOzs68Nth1XVNTS93iHl9uk94He+FijxxUDOzUYrrEVDWgaorOP3waj6oagqtWLgD7/f7u9vr169c3Nzc3N3fjOKoAInfdxtku1lCpFg2CE+nF6LXNFr81K6xL0sEfZA1fvB8Q3uPbr06Fom+0rurY/mrg8JWyCut9we+gSOq6eaFJHXRSI5Em/L6JIkIUXb21Q+Z/7iDetTJsPzt+eL0CD05oSzqAs0qDgxVQROZZxxFj0JJ3GDFQQACQNgzXKtiLquUCMERuK70MRiK/KWY2y2t5btLCK3rL+9bhXrNMk+0Hy74WSFuaidY6oT21raJlMwvDMNS3QbyWQODmA5gRvXIiq53jkxNgK+PUtBwiIpYmjiSoqvNUvL87xujwKlUNIWw2G9XiA7adzINTVFWG00nznQshWIEW9/vFpZQQYhNlWo2CAAAncFuaPg3cFmLFuBpCIKxNsUsa2+4H8cwsRVpyBX4fD/JaMwIAL+mKGCMA+rl6wKF0b8dCHPcjMvW8MQPNGLnrgxyvhrdfv9uefcIxCGoX+36bfvLjP/yfNv/zR4+fPHs9fO8nP3v2xdfXw92/+B9+9l//099+9e71HIolm987exRSAAVCpILsgRqqCUBAcOp09LbHSBERyIy0ZPWwObTH99U2sxC4FWb9rmtrZ5054TMMHSHm7P+g6tQhVSIIjdAIiUNMKfUxhRDITxAAoqlIa2ZTVV0ASJwAyErREDco5XA8jLMasCnfXO1vrw9Pck89AxqSxoQeF4GdxjLZwpEF5PNCgJmDY43QdJVzXRsbvxMzKyW7e01Em80m9Z3DRMucZSFNXmzSw6wEVAwDIhqhAaCaEigaESygaxFFE8loQEQYYog9pcmmOWcrWQEVmHPJANn78sjA+TmwEozXOWNm/theGVAfWmoLUHJRK/eAne0gMyMujJpaqfsAFBIBp9hQKBj8FBQRUXDS/cbk9BBH3b7iwX/Xr6a1H7gvzcF6YFlXiYZ7FZK1RlpvJZ5ynM27MqeBHsd5nqeU0vX19d/93d9dX//g4vzSzFLq+z77TIJhGCq9q6LpAkwViap93+92O+NaIFWTosIFI0ZiPPFgiXu5S4BqOM5lNuhj2HWpTx3HiBKsFCBDQiIL3hZlwAApMcVAhGomBmYkRpNZd/bozd3wbr9/c3O8uTsOs2azYZqxgDuvFxcXT548SX03ztPhcLg97A+Hw93d3ZxzO9eqyhyb91AXnIw8Bl5NZ24VvK6LzRNam4D1bjbvAarxu+fvInrRiohrmsAVrPsKDz7QzNx4VY7reqsoICqgqgGDH+1AtNltIzqlExigAIJaMS2qztbDzAUgT7Ni5LT74puXv/nNb/7xsy+LgBGZT5RUNUPmyGyleHblntC6+LnDsaiI0hyaB+Z5HSesH+3+A9775CbesIKWtIvxfjzZfrs+O+tTYJUKq4qgwyzjMk0eABDNUH0qLBozsqkQ4Z/80R+Z6a9+9SteQE1ASJRAy1QmnMo2C4cOjIiRkNkIAquKh0yKBHU4BIABmzAKYeC0aY8jIro023klGRacVIu3A0ErxfjDel4sZ0mhKvQUYpe8IwtJhYkDBjNDlTJnCkxoBEhgipRFZlHi3ohLmV69fjvNc4i7w2HgFJ8+fZLzdHV9+823z3/84z/6b3/5N4c373a78/3dMfWdTb8/GoQFirZ26ayWXB4iiR5IwvpNM/suL3r7KLgf8tkqfdMu+723t/7z6uSAomTzehQRAYXE45QDG5l9/c3z//T//bO7u8PZ7pKfBoF+zPu7Y86KqdsZ0vF4mLNJJfLzoboqaGRo5H1i5I33zj/n7lC7T1xqFESEhLJwesNScmeODdSHS47JFhiCrlBztgAUW7WAloTjOgG6bM09fFPXdSK23+/9HRG5vb39+osvX79+/fr161LKdrt99OjR5aPzzWbT8CMtOOKKom+5mHqrrjYfQC5djxFx6zrxsCRncQZEovX4KL/ezCznabfb9X2SPN/ubw+3N1dXb/d3NzHG/X4/zzMYhUAhpH6z8WDP13ytVfyOU0qOFHXlvygrC4EBfBwfrfTVCfUKAIiwUDI3wt7Qyol4mqJ+Ejw3tXQfk+kKp5kJd/lUtJFL+W46UHHJgyxpC1VvfPDFd6RriAFOsPj2UCf20XV6xW+yBYSOPXThCavRI+7X+Ylm0xiNMFBEAyymaqxI++Ow6yV0XtJwtWwI98DMa93echC2pIRcfkQIzDw1492kPtrNmTtMTc0W/w3MgOjeqW9rvg4x4DtmhSis/wQqGacBnHrx/CaDnAC5AuDDiD2OUkMfG20EHkD4pL+EiF4XUj2pNl5i33YAVNXIRMxBWQAAENvhZ2YOGEJQLV3XpRSIIOd+LoDkY9m6yOyGFgCk1JSPW2Iz8xxD3ULTyvq9CKCjjxWMvQ5jpggBccpTCCH13WkG6NL7YQgOC66rs7D5rRVQC+W/mxGsq89kKoBIgUU0SwEAQ/DmUVtVw+qfExpCEZulkNRhjCn0x2H85nfPN+fb93/wqNumOWOf4icf/+D7sM37/fvbp1d37372J//92eXXX339/NOff/Kbv//s5f5q/Md5f/vhjz949HTTh6SSRAQr+TMQmFfMROvEZ0M0Qh/zQsFAkfo+5YyllJyn9oyqSuSI33tSiIhSC2VVdSOjQy4da+pCX8fNMxNRdBRmjITovfMIjUa4TqFUVUBFjw9NCuQiYsjHedofZzVCCARyPEx37/Z5erzdbjIUAEnB9iIqgqpkRMiI4hi/cRzFVMGYOVDN1aiAwslsNG0ORk2GtXKxaONWZebdbkc7NDPHxiDi8Xj0+Qhr90VVU6ymjpkAIDL4DVT6OzCzoqIiUmQuEomTa/c4CxeYywxIQJy2m5UoEmKFqIMUwDqj4sQrQeRdXIZetq1iBw5VPQHEDXywA4Ax6+JSAGtzOLLmkLq03fi0LjPLOc9zdmR/jd6xpgbN1uWde7qpGYy14/LAv9FV7nydU7RVGq/d2IPP59WYkHYNIpr+nkO6iPQCOxymnCcASCm9//jxe0+e7Ha7GOM0DT59mAMXmfp+OwyDK8xxmojCZrc1tromefK9JCEA9tSKc+ND1R6qCqAGBoo4i8o4joQd04aJAzIAECAjE3YBAxEjAimj12oZY4K4ESMpcjvZ65v9q6vb/Tgf55wL6Ar7kFLanZ/FLh2Px/1+vx+O19fXqqp2omtzUfGpFqclxVqIaCAWM8m5jYkwj5Obv742om0rmxtnNa17Yri8F0la7SOiBdnFzGU+DWKweyOSyD2J9UabYuiCgs3zTH3X9dvIRoExJsMIWBQKCORSCIpgAMRiZNztx/LFNy+fPfvqs8++vLmxs7M+z1S8FYKQqBJplaKMp16O9dM1C9qQZu4kNWXSHqFJ+z0zcT9B2z68/dX6+n/q5/UX6apvB+7FCWigSIiAhB4YETD1287zO1JyyTMDhEiI1vfd4XA4P9v95I9+tL+9/uqrL/xhiwJxRCQxU6JJbJglbUjEGBERkBjRGADZVDXEpN60SIZGqMaAgSkbtKprtQ5mZsZEzbKrKqk3khPhyTElcO2KPiuNiBiJCGIM3jBFiCRIFKByA4pJERDvZAkhxNDNQ56nGeaizIdpfnt9BRzGPJerm6K6OztLXZqm/M03zz790Y+fPH767u1V6jvbHwFoHMe13V9vov6+LhLX0A9cN1zKHScddT8+XKvE9s4/Ixu4CkHb0VvXau4JJACCjx1CtKU6ZAZgaoYGInC9H/K3z0PsHj9573/+H/8VEt8d8+1hFIhEnOcyTtNhyJ4xjjHSSsU2OLdP8vAhdSLyXYfVlvbR9iBtAU/+AHN73x9Nl3Ji+yes8iCy4g6tFaTaPQimWnmOHbFjwDGEFIHweDjuj4f9fv/mzZuvP/vCzLbbrU+Zd2R110fPofhdLZnrGgeuN2UtEk3Hti3w+3TEqYg1Fjcz8+A5pR4REUvLvPd92m57NXn79u2b1y/H8Wglm+Hd3R0AdV2nAqqATN7b0pZCVyVlP2t93/d935p1/UF04SeH+7DDlvBqT1eP4cIs3Tw6RPTB43CyEQ8zYvfFXhHRvanlbtUlJMao86wLBVHzHFSVGFxemmDQKvm1PjhrGXvwT1V1lke/Jc/mu2S2gqGjydo1MVbqriKCoIAwFe0mvL7ZgzFTCGhFNIIQEYASUyuf0gIVbqHsg6O61tjrI0BEtLT5PHg6bFXZ++nFtlYPHlmkhv1wX8OoPjRSVbbrbZEhMqCC+khf2Wx2RMSw0JUuZrvg7iQci1PqEfbqvmsVTlUPelf9oAoErcJHRIjgx3aaJk8DxBinPJ9udFGgYOSjnIsKzDMsxDOyMB9Qnbpj6lHLSjQQERCMkP0kQ991XYNQF3NMeSFiXYF+VUHUFBUdV4roLOc+UYQo1LYxAKiDdOvNysIfU0qRZfTCyorc09FmVlQpBiiYi3YUYuxQcxbYxLPhMLz75urR44sUOR+z5vLrf/iH7eHbDz789P2P/wDy9l/9yf/06U/e/T//3f+DOv1h/uD5765fPjvIeF2O4XuP6elleXQGKJ4uQhNAtACmJkUKhQ7MANBADIwAFQORufiuUMFgZjlPHlY1UYbFZQ9UOYvB1lhzppp4qHDzxhvBiyq1IqXMtij0s7Ozelq0+Bw//wsnrcsqYy7vrg/7YeZ0RmqROhgOV2/uDlfD5qJDImLY7tIBswsMETEwGpipqeZSKFSyaVAvfxOo2TKJBVpQ63S5C08MLdww0zSFEObhSN4OHhMzbzY9EfZ9F2NwEwQAC4usgVjkmpthxhQC4ikiJTNSESJV8VKpq6ckstDtMCnPaiBmJKqQ1RhIPVenpKZmhICoXqEFUENTFFQv0S45CGmJG3QQxArY6S3sCoAGS+YCl+QOEVAIyCxmJWdXbU7GC6RaG93WHtLvSY/BgpnBhf2laQ//lrVvtJaxBzrL7veTrN8PIbb9Wn9UmauLoHqP28BMY4yIaZoGEQkhHo/j3//93//gex8Pn4xd17333lPkqEAi5uah6wJMSBCYoDWWUEA35zSil+acpyqltN1sHSvuCWMiQgPJPtQbCU2kDDnPosJ8vu0BxUCBgZkChy4gI1lQ4mghKAaIG4mbeSz7sby+efvi7c3V3SHLggRHCpANLPXddrsNIeyH4/F4PBwOzihQRwFBzY75KbNy8lYNxKxu3JL9rWveDIYDxFqqvhm5xk7eXI2qIgibV2EGtHBuVRdEAJGYouOW5dQK0mxn3T73Y5iZCNb3U+lzzVhin6KBqBFSVEBARjZzIj7IhiBaFFPs4u1++sU//O7q+rVASL1OsyAEAcBTsk9B1MfZNZmh1ctWOPMmrrakz5p8frfE14TzQUqxuW6weGDf9XXa0Wji/d0D8uDciZlDxhVMRUktUw7OierkLsZS6swkRDMtIdCHH76fUvj1rz+f59k7/AEAKOTiPLJhzuXN7dGo2/XoDhNqYVIGIIYonFJUA0VgZjQ1tYDAgcpkuEY2Imp9FomBAqOD3hcEnYKpj1SoHihzWA54IHRNGxzx4XgiYAI0M67TYNFEiwkzi6kZF7WiwBCOs7x6d31ze6cGeco4FzEFwssnlymlZy9e3t3uP/nBD7/+9tnxMN7d3R2P493dXc5VSltRy1a+ftu7JjAPdrDt1HqPqpOzLMiD7YYlhb92kddysna+T2f592lURBRTpgDutYAhGAKZSlGNTKY2z6oBwgSv397+8tef/Z/++58j6s1+2g+l22zmqQzTpEjTNIVg5Kx7fs9qhjWR5OHEOI4eULlibDffnEP/53p2dkPrtXjDVlEu4gmO3h6NVkmituwPlmJ9MGExRq7l+r4/Ho+3t7dv3769urryG768vLy8vPR4iZlT7ABPym15Cv+oUxD14Ah/93giYgiphbruxDpZnariMnKEFmRcCAFRxnHc3968e/fmeDwCqM8/OR6PtVgEBsv4B4on/km630fXOGZaEOIrnHPF1q0zFE0jUUvQr5AR383b2qpau8adIlZE3nodvP6PSwsYL7Miq+uFCKiwAMTMBKBWy9otucj76jlH5voUtNte9qWeLZe4cZznuTCz9yrnLL7s7t/mXMZxHobJC1chMEWGtuOglmUckMmQqUtp0wUgRAUFVSxgBKsk6frg4ypmw+W1Flr/ua2erTTDfW1PtErgtgsaSdVaOcBSKW3B8/J1JFJWdnmpOiqGJ08fEVEg4uA57KIiqo7HQACo4yA9mQc4lG2TNrcTlX0xbdYHoMHB3d8yM8Q6MlhEmGmeZ09J+sVO8llK8eKmX1kFzogZvOm2lDJMIxogVmY/hwaywxfNYGlhYuaiQg50acnFEJ4+fYqIAlZUSz45MYbg6NC2nY4GEABDJGYgarT9hmjt5C//A0RAlFIaqFW1widW6bp7UT4iliwpRDDLRbaRI8cpcylDnkRZX371+vLpRaSnMXSbLr349vXm0+6nP/v+y1fz5cUHv/rl19un9G//7b/93/703/3hjz+QA4zv7t7elDJdv72WD9/XD99PH3TvMxMToSkHQzIAVcq6kgZTNGOEgAigJRBAIKqUXOwpE1FBU/IhwMtqq2oIaXFDEEEdGomg7BGKIYISGoKZmqgiAgggAiDEEACgthQGH8QpdmpyIUIUmw1VVY/TfHM3zNli6nHOkQAmuH17d/t2f/50wz0C2abriASIgIyQGBnU9+rERIeIsAxvISalUyZmbdRdgTBzTFXI5zzngqnbuA3TIl7q3CyvvFA2V9MrIiIBnBQLmNF70QMhM5c8I6JiNfbMTKH6ymhGoIGAAyZlAylgZiYuOdWQA4IZEPIJRelH2tQUdZnASUSoAHFRHKDZzADueSpIJ8pjW/WPMeNms+m6jpgdOgsAyBQpiQgaNz6bk+bSe/porRPWCmj9XWsr235LK0d8rUzX6bR2lOA+dKql2cxsKUdVLuZmcYnCOI7MnFIvzlhVCiL+9h9/NwzDbrdj5svLS+Ywz/PVze1ms9n0W0IGBiKaS0amTZcUBNA4EKB5lOiuKSM50Kg1PLtayBNXeEykDkylmEzFlFJw1UIExmiEQMzEfBaQQgEqygXoOJY3N4cX1/urw/HqMB2mImpGGCEi2jzPhCnFLnabojDdHQ+Hw5RnVSWO7sQY1cUV0ZwzYUBE5/yCJRrUOmYKHGTV5CGE4AphHf5BzUSeMFq0NOuLCMfQSPwAwP0JVd1selkSKEQEQLnknEtcoDvNnAMAgK7OJqr6/CU1s3EcQ0oAwDGYGYEgBBQVQ0L2MZsC1dLmUox6MRrmLGC3d7M7KCl2uaCoERmaFSuoxoFT6loXIa68KFyRWzR/1NPPKaW2CL/XKVz/8+QJLdCaeo3VOUb1pIDTXy2px/tI6Qef32zN8l8DcJiESS5g4oOYh2FKgbsUQwiEPYAx/v9p+9NeWZLsQBA7i7l7RNztrblV1pJVLFaRLJJFNtmc5tLshY2ZBtSSPoyAUaOhXyVAn/RNECBB0JeRgB6xNehptkg2m1vtCysr93zbfe++u0SEu5udc/ThmFlYxH2ZbAwwjsTLuBG+mB87+2ouYZfL5YMHD66vrz/88H3PCwBCdxuNaYqTmOE8yThvgPovrO56XaMCBUAmI2PogZkRPGBIBGAaAkLoGKnXxiRoDQAc2DUK1KzTxxTd5HMk6WpQwtRMvSKBGiTMs9hT0pKUwUSm4sVIm+042zRpAF5QvxivxqfnV+vtJIZTSl3XrbfT+pNH45xO7548v3j5/OLl66+9uRhWn3766bPnL3zOwSyhKG0EeWYGAEBu2lnS7TAXSxCgF+vl3P6SIWuWyyygaHloZka7jaubi8VHia/yl9U8yfYnbcIyBywXAAyBiFCyNqKqojZ03bRdR7RFN4RAN5sxXK8ngVk6Vb3Zztuo3Yo323mzncOwaNdWMVNVDamqi7V9Zc2fr4Kg0njU3Zhya442nFJ5O5Zu/vWVqaQvehfoIra4Pkj26pz34rEpJTNYrVbjOF5dXYUQTk5Ojl8/QcSu4zD0AGBg1IVuMcQYfcpd8pKD2jVg/3WqeHKe4DyzJcyceyCSZz8MpQIl7QbEu7vNT3h5+ezm5ubm8kpEjo9XInJz+fLq6urunTsish1ns7RYDEenJ8NiCR4LYaYQuOtCGaCtqsNiQUTgs0nKBwWgLnDfIRGa+qRCqsP3CB0hc0EQITB1XUhljAFUgxCk8sDWekTEYRi8VaSU2sgQgg++zwpSCDVnwPe9cDlBZPBZU1iSRbnWmOQEn5jm9nF+MHNZ5M528se5teKmck0/xNIRtIYHnbE7DjMDBOI85RsNSczirElIjDoDM59zZwDi0zWxOEmpOMGrhgONnrkDoL9wycpBRCk6aksFqpoTd285mKqV2GI4lFR2LN5JVWdRuzFFUIws9wWH06PlPM+qgkbEgIABkXnw4jBVBcoqu5mllBbkGdspbrfmoRJABg060S6RkjTOKLLs+2m12m7XvgciklJ+ja7rkAwR+96vynV3c9pl7sUYAUgge2M0R1qKF0Rz9M+5D6iJiJSxHmLqLWcQEQMf9b13N0+W5wJrE3gFQmmqWrlU2aaUsGg2WSYV5ablSloqbcxdmqWDoofXHDU95cD0cHidGYhIRyGEMKWYkiDi0C0GnQlpc7P+2fd+fv/OWX/aP/n46e//43/0G7/51gcf/TzO9wc6+8EPfvJH//XvPrt+3iFBSu989YvPPvzZ1RbCycmzm/Prp+srW43h5u233+qHfpyuAnqbtRlZ57Q5OjoK1G+3cxJg7gjCNMYQsIbvi6ctU0ddeUyTq4z90IGouzXNY3EeWAUMXSg2necM2zAMi8WKHBcRmbHjUH17MUZvPYfoAjQHK5BATbuhny/XN5tpOybV6fTkTOctDWSTPfn4+dvvvEEDBiSV6IvsKAQOIEAUmCFKCqEbx2lOcblcDl0PYCklzFlsjIgeReFSJOA1D44DROS12t4ew9lE0kzDVPxzQ9ctl0vf1hjjer1OKc1xZkYAm2cxs9VqgdyJJmY2ic7sDGyc524cu44X3Ok8BaKjob+5uUlxAqSUhL2ZBlIRvh7NQLD9WEEZXo7Alc06KwKvC0TJisjOk+QaGDScJwFA34fVarVY9HWWJhF7eBl8mEdulbNLok4pEXfV1wjFPGs9uG3uEBHVlJt6SWVeB3dwf601Drb6a+WzlSHqvjcdG0PXmxZ68YDtMs/ZdbW+Wzw7f3F9sxmneO/evZOTky509+8/PD8/n+ecGTgEcL2fiKKmcRwf3Lu3XC7Pz8/dETuO42KZa6VOTk7Oz889daofQsDh/Py8G0KM8e69s0effLxcrkjjmHR5tAymcxw7xGQ0JqOB4qzUGS9WaPjixc37j89frKOG7uJqjArYDaiKAGboPr/QdUA4zpOIbDabaZqYeRiGcRzNY7kC1fueUiICtaRpZ044tteiRyjpNEWmRj+hZhn5EcqMosoMIUc2ujyf0owol2fEGH3oBXudervR5PrTLjG18uSUZsQ8er5wYDZ0Lh3GcdYe+o5m0QV1YViO61mmuR/CsFhNUUy073skSqKaZLOdcoGy4eTBo75HZEAMFMxExDNHfMw9+3NdKvlQ3B3RNe6JivzViq6ga6U47iYXk7ML1xGdw5ycnLiHtJKG2Z4XvGJ1q2dXiqh/IiJYHf1MjKSWFCBK+uCjj157cE9NVouhD4EJzDSldHZ0tNlsXn/9dWb+u3ff67oOmRBZEeKckAMHfP78+cXFZd/3Sv388O44z6erBSOs11erBd85OxGRpCJGhjnoStyhipsHTqX+n5WmT06P3HUBaTJx9Oi6rmPUlPJsSkcqg9DlPMCMn4AByXNV0jQxc2DOpUdl4o6Z9cOwXsdRrD872SR796NHP/vwUzEQUw69AaomQLheb5DD9s704UeffPvb3z69c/eHP/7JNE0xRpVd+/u6sxU/rbHTqm2Q0txqgZVGuMl8btHDd69FLWi8XWHX7He36bgffHCqca3jAE9U1bu5Oscr7h4Db8SPPCz6eR6JiPo+iR2f3Dk6u/u97/7gahPP7tydRQ1pWC1TBGehzMyAZrZcLDrCm5ubbrly6lgsFtvt9ubmxnt0OyOqmjeX0VxJU6VxzH075q4biMijNFhcq1ByGqkk9VTvjJn5I1yhd2Ht96eyLy5rqm5W/BFqZkdHR6+//vpqtdpsNmhUfFsYAlXnPiKmpM79ag22I9WB/t1+sBLzqVJPd9YLFYWWV6uVt1G04lc6OTlZLpde5JbSvFot7t49i/P49OlTVTg9vUPcicIwDKvVMfe5Drzve29ftlwuPYLiUPL8zNqjoeKtqnLYq36SZjic44l7vaE4BM1SdTlVj1hlhgdmD3PnTe9g99bZreYGXg4FGbo8naZpKP7AsgsSQghd9gKEEBAtzs5jVUREoyftEBFCLWmZmVkVQsjVhlAS1L2Gsy4AS/VE1mxj9FonT4uIMfarXAQrIooYAqhhTDaTxdkkcTheLHuQaZvi1BFrkzruMPQb+r/VtKmf+35hxZ1aSnlcjiTnBpb7ghEReU9me5XhjSWbumJ7fbW6HlfhqpzKGr7kKGKW46Dz0CEAqwqZdQwALBpDF1BVERQRwFQTGjCC6WboFgBAkGKM40ZcSK9vppyg7F6uXkSESHY2KEDXMzOnFOdZF4uFNwTH7PdyVujGQA09s/eVMqy9/JuGRUxMWK18y07BnHTu8086V1w4u3aJaJ7G5qHmXnFIu2TIyj3raRVvKuVUE7/6xQu4yYsG6x0q4868X/MmYXXjGQGQjxYTMzUKSAbYh8HDntcvbz746Ye/8CvvfOXNd+6u7m0D/dl3//Lh3W987Sv3f/XXvrFaLvE5/eHv/JFq+rM/+evf/4N/8Jd/8dO/++G7X3vnK8+fz+cfr1/GcSvdm2/cO1oNSDLNNyDzoud+6LfzJDJCQqYFYAdKXcdJrpmr3Eqq4AplSilG9SEiNUM6hABZFAEgmBLk4BUcLxfqpYCYlf5AiJZKDRIHYiIgUECfFoqqKiZERIGJyAclI0lUm6NMUaPolCTJVoWXp8uTdLzdXq2fr3ULR/dPpnnLOdMsd55AKLQBNMeoYGScuhSIDRA0gVoIVHmNNa2DWoOk9R2oJgBSTabe7BPNLMl8dHTkfe6JPLLTh0Cr1WJ7fTXP8zRNblqPIwDa0PWBGBg1iSabY1K1KGKGAYwJKAqqDkwbBDUZmAQUDAl1F9xDd23tqQ55zUiWa2Bv/1ZSnVudIxcw528CYwhhMXRDH4AJlFQFqtfDbf8mM2TXexaA9udKWZP81qqqeytqjvbXAwW3PefgPnsUvd/Go9G0MpEaoiEgU40+lfMJANwZfH19fXNz8+LFy7Ozs9PT0+VyGbqBkZLM26yq62KxWPZZgzfEYbk8Ozvzp3sz8aS2QOz7/t69e6GjohNgN/SI5u03H77+2jRue1wNAbq+C6g4d6YqZEyYQj9bHGhYb6fHz68+eXZxOcqkNM02xqjUCZoR9hy6jgFVkxAP4zherW+y1OmCAYxxVtzFUg6gWrG9UR/32hU2/yoFb4+O3sLWg1nIhExI5FRX03RAsyZUBB6aoUj2s9TdJHKPdsfMc8pDhJEYiZCwppu7bgdkCgaUU4sCkSH46qNIvtQEKIRuAblw3pCJAKGMThZVzb21/DVQDU1zg4fceImQOU8XzL3K3CNmhkSpEb1UNR1E2+8aWnHv9jf+2XVNLOXWbuu6/6WlkboF8db8Oj+IqGYZ2f4W5102M9EkEQ2UcNEPNzc3muLRanlycnR6vBqGoeuHm5vrB689vHv/wccff3pxcXHn3v3l0K+342KxiHFab8ZpitfbeT3GMarR1YePu3tnx12YgWy5POo7y3aCa+AUchIBKrrsQzazRKiKKZmPBjEVJgJ0Ly9wbl1BzN72q+iRSJ48whg4BGJwDcItBM/acH0SEA1NTUAlSYopmtkkoDgsj8+uRvn+u3/3tz949/Hzi251BBGzpUqoqnNMN+vNRx8/+vI7L0Ts+Ph4nuI8Ra+kRQy2l9ucmU8qc3qwSS1GRNWdu6oytANWVm9lZsVefrWdf3CHA4yqv1bhdfvY10lqwhMFzmVgSWwmWa2WX3j7y9/6tV9/+XIzTikJjDGZoiQz4hrX9ffKVgSxGbb5n07mbh/WdqOtQti67Sr3zi5dd6A3v/qSW5i3yq41QUUoPuyu66TU1jL75LOdS5GISvdON4H709PTeYzuGi6n5aYA3nU8hOAKTF0DNqZRtY64sULrK/sRmvZIVtquqOpqtfKHmvWqK0TcbDbb7fb6+pKITk9PT06Pxs12vV6nabYytxARqcu9MDh0yLRa5gGDdJhte6iRVkBBw5Hq+Za9SFzNeCtZ5RWRsEmagP3sxGqQeHiwRUhfA5eMa9h1EvKGgqlB+Bx+9EKtFh8K7K06K51v56nvGZFqawP/c6cM0L7v0oedVIOwrqqgFoJX2CKSUjJjNbEwRdiOshy4J/YVhUCSvBTCMzty0YWKAiARkxfIeMJXUgEV3dkLDWHmJlseWW1xCffHOvgzILuKyCuyK04CgAc26hXe+sGKDVwDVBWHw+bq5WKxQLI6Nw8R5mmaNBUplX0bzBxCF7oZcT1NExOe3jtS1WmaQtDVMoTgmZW5anae53Ecu7sP5ji+ePFCNAKyqsRYQk5EHLDlcQCmCGJ5AImL50x1uSI4HxUXFcHMSuIFlrbVeHx8HPqu73sjFJHokCVUSVWCazE+D8h7j2UXvK9WQb624WiVluqvcEsnqJ/9xa0UPXdABD5hSZIpAQAEMA4YksTj1fG82T754Pzu2Z2zo+OXj2/++yd/+3cffxrh7Jd/Hb7+2hcef/T4/tnrR6tedP7mL67ff++jr/zC3Ufnnzy+etqt7iB08+WLn3z45IMnTx/cP3r7rbunR8xhiGQahVQDhX61QuvibGQYuJPSUVa8xYJpYOK+H7egSQRy453MRhHUlScgRKSu9uCivmMR8MIt75SLkFQ0EDJxx9RxkxZoAEwAZtngJPflzBIDgZmKWBITQy8EmsbNcLI8XizjfL2+TC+fXT14836aNwvuU1qLCJoSExkwhszUVFqaQ0RAVFP1Vkq0a/NdOXvhCJrzfRCYvRUoVfVLS9TUvyqCB9wt1/f90Wpxc3OzXq9FoohMKepGdKFD1xMYEQIFoFlV1Z35mjqEWQVVFn0/jGOapOvYTAwUjNryP9yfebj7F/bUhT1ek1uP1ApYQERvSZtEzCAE6kKo7avTjm9W0w4RQU33bdGM4xX/HUp6K1J3iyj2IoSNQFJstJx6zsGfDbUemu7ts2xfOd6nUwTYzY6LMXqWekxpvr7aTOP19XqxWPR9WC6XIQRVbyQye2ixH8I0TdfX13fu3HntjTcIYH2zAbS+77fb7WK16rru6OhojiMixhiPVkfD0M3z3C+GlOY333jj2bMnpGpxNFMjQOAUx9B1tOgFUCE9X09PX7x88uJyE015kRTW4yRIFDgYiUZgcv0p6miEUcWlmvPhCpDKebQY/1ZyESvyOPW1hSIFUK4GMdOu2VK7ZS2OVYnrVFz3sVXgVNUzdV0aibCIukuVS+cwLF6nEDJN7bQcysEfBTUFMTOkpN7DDIV1yaEbFipJJEoyZMqVvaAGatmrAQaUnbAIedhN8aoSQ0DSkr/uyk0VzxWjKnhbTPtM6tvveARFMar1F8MwmJn7j1qK/qwbvvIRe6KHS6/h/DMBqKcVrLfTdjut19vtOKWk9+93p6enaRzv3rk/LI+ePHu+Oj4hIgUMoVfA7ThfXl6P0zzPSYxSVLuenr+8Pjo5no0CyOlySZDmebtYeCcGMFBVQlRGJApE0IWFiFBKs8youcTDd5kga5tVO2RmEkMGzK04kJk9X0lEevJe0QpqEpOLVODgwS9Hc1VJKknFkAVhVri5vHn30eXffP/dD568wLDkWVLSZBYCIyGAJrMxppunzz59/GSc4/2HryXTqBJVKiZA6Z5SsbTqkQe0AI1qgfsdRz5rW+uXt2/1OWiAjd/tc7GlLsMJ3NM8fCgJ5AxlxH5YfOFLX/zFb/7yJx+9N86iCvOUFBDdHYnqMRZ073BO6cHAfbKdzl0jJN7Arz6+phemlHwwfat31XIha1T2yrQPJEW9qpY8qGqdSVb1NLrVxL+wr8xh3CA0s2k7j+O43W7dkqmbVRUbgDwfLz8uV0GX1nGUaz/aKNCe+sd7K6FSiT0MnScxOtw2m8319c04juO4OT0+Pj5Z3blzZ83h5OREYzKzvu9FTDE33ifmEHpmPjo5LYZr6fJQeMsBUhWU9AYfDpDaKd3MlIi7jt1jbpbzkgBfwfCt5OlUaFc9yvXD1iL19eh+olwlJcvz9zxz1Us/Ms74/dr1w/64KX9eRY+mEywS5c4udRSKNY2gHHk8m9T2o2oiBpAA3dAyswBkANB3tB31Zj2t+rDoyNSo8LHW6QAl+H9Ala78qypgNryrCe0Lw1AVoR0V4H4pTbub7k044APuXVEfv9RY6fkpAoY7z45fFd5648FisSBy1470fU8MKSWvoun6vusGMxMxjxS9+bCf5/nq6qbruvv375vZer3mXB2bBz56O6Ptdnt+fv6zJ3h0tLq+vooxVuXNGSgRqe6azuVNZVQ1NTXZSw9jtx0LvGBf9Bph6WTJBAgAq+OjvB9gXiE0SzKzRb9rf9weu5j+vq5TeUcFXP2ysrZ6iAiE3N4GELyeyrFAvOSGK97nfhzsrZ9RwURB1MDQAqARyaid0HE43U7Xj957TAAovO4uw9GDx5fnf/HdP717fDek4etf+aW/++nPGfWrX/3a3/zNX50+6P7xP/uNP/tPP92uU6AjGVbR5s04X3369OnFswd3V2+9fv/N1+5qmgiNqQMMAJgRjyJRdsMCmFmor9n1bBCIwXTPYdn3wT0xAEbknUHd0yOAQpz75OZGGgQcqGfuAjOzW0AOqy50iEbMGLgLBIiqwAiALjlExBAphDALoKTN+ho7O1vdmeDq8YdP3/ji692qW/RH0/TYkgAQKjIghRwdDhbE1ABUPGOH3HbLey3gCcyVVruuq75Aa/VpKH109wX2NE0+Kb4yGsfGxdAfHR11Hbvjf5qmGGdPbiHAjr2TZODA5LGOJIR58scwDH3fr6ctYK4KVNxz/Xp7WDg40Knj1U5iarrdZmTOzAJ9TFMfwmIYQgiM6HGcnLK1ZzkQEJgZNvc5kBPYOIPtloJS2Zlz6oOriMidgtoUqrUX3qbf1k6u4r+u4RY71n3G6nkUAADjOHq7tqFfOiZc3dxcr9eMeHp6ulotmFk1xTTNKQLhwwd3zWzczs+fPweA4+OT5XJ5cnISQri8esnMhtAvBtGIiGZCBPfu3bu6uloulz47/s6de5cXL5CHedqaWc/BYNH1vQ7DFOPzi/Wz5+cXL68FA3RDFBtTMuJFN1AXRESnPNBZRLbjWnFW1X4YMjK7FGGu7bBMS2zv1l5gCY7VX9svbx/W2OcHqZJ+qCoC19vX3eGcABxCiC13FREzrGlLdQddRiyXuaN68UlzsmQaBUDMVE00j0JVhjBQTyF0vZiKKSNZ1lGSiJh4phyZp1cXGiKP8BflL2CY62TlRoMx23mmrVFYzYxuKfG30b794MkXNTHBu9u7MtpifkVganS7g71oPzT7m41/QgTEwAzAgVhUwUjAppjS5dU8J0AK3aJbLO4+eDBO83vvf9CFPvQDc5eSvry4ury8vry6FkEFQu5NNSm+WG/O1uNqOSxCiKJEisUX4O5WMWM0CIGZ+9Bx6BBzXo/jTEeMiN4Pw9835PwoT2/LTS886sjIREwGMSZFLxGEOvvUSoFGMjUTQANQBUDmJBCG1frl5gc/e/SjD54+ezljd8zD8The+J5K7gaHoKQCKvD06fnLl1d37tyrUqCqaFX8VfxsIxUVn7FxaR0wsdsEWOnoAGFafQMOqRUOmKc1SvlnYWDh29n94ffKGipj13Vdx13XOe1cXFxKMqCgCobGTEwotUh4jjKIGqIBMmJgS7lZhZtknmrr291aZXWpoQt171rQWaPU1n9bcWz7qqDf35o5fjnugZnboSkCiKlX4Et2OO56QLiuvVwufUN9bIYrtLLfJbU0GjQASHPypEpuChcrPrRiMe+4i0z0M91zGsr+Zkctc8l3JaqZ6t6eoO/7s7OzrhsQMcYYVRAxj4AYFn3fD/2iDdlBca8c4FVdW20z1iJqVvm8G3wZ21jIcM/N6pAX2LWYrtuh6m1rd9Et2NcKqnONm3pRzvabQwYAsmLmvkW/0EARag/LrG4XPaDpT7Nrc5iNyZpNCsWIrZ+rNRia4WcxRoIec2tDMEIw7YxVIQlsttKTbZbxeMnotWImiH31GbWv3NJpy6v7rm/ho14dbei1Y8zBkcH2sxIODjPzlNSDg8rsd7sVKyYMje4EdZ3hX/zTP6AaEbLkF/mfPn/MDKcxTnEGcDfeyw5hdbwEgHHeuhHY972YupobZ7lZr50XnJ+frzcLtUQMbNh1jMBuWK7Xa19HpX9fN3S1pY+1iovH1hERms6iHvRzu9yn27W4Nc9zVHHOV1HfU1ZU2tElYAbuNK2yPzN6JMxNY3edjokYAEPIfTpahQB2ZeWHGyZ5zl4Bde3bYcF5hGctoQEiGAhT6PtF3EYBDdRfX2ye8vm9k7tnv/zOev08xZs/++s/u7s6+5d/9K8uN5ff+9GP/6vf/q3QDV/88tufPv3k3msn3/7Nb/zlX/x8vb7ckjAGCrAd0/ZqHuN2ji8vLqavf/WLHegYU0pxCNwPbKYpzQ4lLE6LujXO8hw4udO6EREtKHhaiIiYJhVDD2S5cRcCM6qJJ3T0fU8GzMSMTDkJEXI3WesCAXWIaGhiAqgc0Ewg96eNMSUxc5SJ09oE75yd9oQXz66vnq/fuvsmXL5UVdwZ7c5nS8035GahpmL90DETUCWzahD6PqY0+0jGWuTtcUvvNpNS0sIloQr44jyr+KCq87QNIXDokRN3YbFapjlO04hmEtM8J+/lwERIwYBEXLQyojHRMCzDdo4pd3L7rAMbxTGrtxUND2zI7OuqzLRca4hAHNC9uVRKRtOuGh4xJ3DvlKH2c/5mvxj67z3aa9sLDwRMPV7JWBGx3omaJCK7FZBpALWDUbvUfhgAIImZJROtlowCbsZxO02M5jmMwyInY9y5c2daTs/PL548eXJ1de0GITMP/YIYPTtUZCC0Yej6vr9z9zTJTIQhLMdxvHN6tr6+cbbPIfRDpykK2kWMNzebn3/0KIkZL41omtP1dha1MCy6rjPCqvrMKjHGaYyCucYJaxV0yfE+2JFWzBQg5F7Q7hDB0v6xhTYy7YM9/1T3xfYNkjbfXhIoqlpqTkbmrvYyVdWcvuy+aHBKTHWd7kb0dgvu1wdib7aLwObdq5DAbI7SLZi7niV631cwUDCwWPAJwWOEQAa5xBHd7ED2oEdU0SITrHH62r5u2hqEXJpeHCD57c+FR+3lp9Ru75UPH5DA55NVi/B+qCTX+Y2oagaQA+NIaIY0J9H1hs4v5mQPzpYPX3trPU4//+DDruseLJYp6vMXL589e77ebLfbSYyZAwIDdob4/OI6hCdHy+54cboZZ+jsZOiqHAGgnMnExBgAyDsVQ56PuMup60NJbSKs0oeIJCoioaKqms+4UUtEZmgCgqXnrWduICUQK6a/QW6dBsQxRTa8uF6/99Gnj55dz90d6o820bjMOgKAmMTMiBEoDMvV02fPHz959vrrDwHZm9cQUSyoW1Uraxr5tJSV4V/iigf2ob1KT3jl959zSUuAn3+T3SUGkJsWOdWrJ+cAopdp9n0fAgvY9ebmvQ8+WN9ssZjBYAgMZsaABKopbfTGIzld1lx3FFEttGma6kSEGqOoJNNaia3DpRbj1ZNbwFY3X33fOhjdzNzf5DCnphmsO0mxiUcB0gF4iXOB9GKxmKY8th4AiMCVECguLb+KOqoGYaVQatrMHGCF5WwUcEaHmDfC9Vt/BS+yVk1EtFwNiLbZ3KSUNAkiHh2dDMMgItQF9pZCPhVw6LpuV8tXd+02529lLt8a11RlaDvbvVhWVGJ3u5tUQqgZHFU/hzLPtt2sqmNbsQZr/TkAFGXN10ml5giwNKqAEm7ZR3hxZc/MW0C23jqrWus4jrvckFzjggWnsH6DSGag6tcaEPgYBiIkCoaMxKakmKZoMZqIdczm89VBKpJX2dryioqBWPxce+jRoDfs8/9MOAIAQG2KihoABO53++vQQkRC7HL9HZQ2e5nueFe4W4EPAEGmTSwqvviYQq+1S6nrOsKwnaftdoqzeHMqs+12u00p9d3g1ZkiMs83/shhWMYYLy4u/M/1ek3DwpsrugnObEPXnZ6ebjYbJ4zGIDQAyONOERHRyFUUQ6RaheK5euTRNcDFahHKlMyAu2iA9y2YJflvRkiAROTpUlKafdUROrWwssLIDyN0TgiEvpOeZOHWUZK0W5i7zWRnqXvqXr1/3Q9XuTIxGBKgsRmCGSgCmqphiuNqebLZmGynbhFklpvn649+9vHl6fLoaBg6Oz7D9fbq2csnX//y/W/+6te++NUvnj979Fu/81998ujjjz/+1GTxS7/81ofvP9s8HTUmAu6578ICQv/yGs+fviA4euP+6esPzrpOQLfJRibrBuO4qIiCpVBVRMzAx0ZXlg0+W08UQAHUW8KEUFKzTBEpBAodm6GZhY76viODXFTiU8XQNb/8JzIDQNTkqZxECOCtLNkH3RMZm6EJAaskNl6E5Wa8efn86p1vvCOzgbex4gFEJXkyZxU8qGVIPSMFIiTyOYqIeSaglc5gMU41tbroxCXlzNRAAHxkqqc0WEqJPWbmkkdNS688RyZTJMa+73UY+r5HhGm7ncdJk6AJAAiYIpgFBGTqwcQMh2G5XKb55gYQ4HYwEMyL9goWe869U0jRO20vpOAJvtlKzLm8CABJBAkpdNz1yMEAxAsGM96Xrokum508a3ywVUH2Q+sHJXzt+VUzqEdeYfa85Ahh1TCgyJIsiZvSrco8W4WjvWfF51bJKK8O0KhWIQSRnM7tXzN6IweNMaUUAbXvQ0csGs3MNN69e/fo6IgeOnXYZrP56KOPvvSlL/V9T4wh0GLRh0BJZjNbLRZ933vXkI45zXp07+T+vYcSp+XyKATCwHOcrq6uXl7eXF5ebiz0ix4Ut+N2TMqhA7A5RmJW1VnUUvT4uy+eQkgp1Uo8CgwAClZVqMri/H2rqsrMiNWza2UXdipXZgvNRKO6rdak2VQLp0WMVktAzM1pneWW1LucmwRALUetS/IUNTcOmSnG6KODVNUM1FDAQCCBUbJIiqaSkBA4dIRBii3EaOrT+LzKtiCAF72FQLnIR0vKU0GlimxmOxGL+40fDxSv20c9oX7w2iQqPZC9/56nRN5W4/7eO1eVrpIbAoMaABKSdwRARAVCMBERUM/DNMTL683VzbbnN4bl0cuXL0+Oz4jo5eX1ZjNeXl5eXV4nr3k3UAFkRCMz2IicX169vNk+vHNMYL1aVE3zXFAia6sIGKN4+76iiWIuKwBDNA5VIQOAXcCfoLQQVzUBBjACM+s4EBEYqqglk5KEAh0CIYAJmIhArnqi1dHx05frTx89eX55NQkKY1QSYChN8FFd3Ggox3Y7bbdbr6In5o56Zh63JVIHOdldb/mbWvj7YAdDMCivh4BE2jQZas8n2yOcek6l30pQ+fxbzWwPLr91mOcPm0/mMEMUA+z7HsBtchGglNJms/nwww87xb7v2XKZpSQUrH0cVBXGcQQgWqyg8SP70x2xpczqzApVIR+zPdOi/mp5EdKuuMXzlpPXL1s/IJU2J6rKwA189qrZzcpA8CaTRUTRpyUDdH3fD0MprZxxnn2iNZdBc2Y2LJZZWS1Pqa/TrrDdzfKKqVl/Ni85kIiM0xzTnO8fOnCX3zwyhr7vA+aWNiEEps6bk0NpNq4F5YipGj0K5l8CIVAO+XiiRQ1BEHKFpDNnCgyEIuLdC5AJEA/wNjtuMNT9qhKkGN1l1mWRO+1uUjOtoAFUZhJQ2irDrnYUAHYslIjqaDevkm5fwVlyfZwrflhyWbH0rXQ88YZeVqw4K/HDGCMyMfvwCRIyVYhiHUPgYKpx1jmpF0sJmBQ8V91r8QK33Bm+1Go01m8qYK0Jd+8M8s/wlbfyvYIaEU2lSvOqDmHOD7cikbOLEBHDi+fnADDP83a7VrDFYsFdUIWrm5t+sVDVy+v1OM5MHefgPl1eXoYQTk87Azw9PUa0Fy+eznMUkdWKVPXlyzFG6bpumhDZiyY9LK8i2ePrVJNhZJQ9lohulO4oB/PYiJ2XiKjnEEIgDEQ0rBZcNmBK0ZJUcAjsgsLVZVWJs+4T6s7DVxkTFhMfaLc9frSaaz0qU9BSxIyIxLsZd/WeO1Hh9xfVYJ67ZGQAKiaM5P1re+6gXxqIgco8Pfno/NNF981ffgdX4eju3c12/Tc//M6bb33hV/7BNz796CNGfHDv9eXq5P69N77/vR/GL53J/PInFzaLDiEgD/NWrtZ6MiyH/v57751v1xOaPbjfdyElnTqMXaAFHvtbeGooMCmSUsn5NiAkCp3n+TAzzLFj6kOo3AGK/h1C6Puu6xix98bEIeRCUFUFsEBI1BW+acDO0QzNXUP5ayxupBBC6MSIWRTR+n5ANZsVhZ89fj5tZxASkS7POoNUYhFEhJqHJNR9hEa6EBExFNN3LyFeGxd+xRBmNsAWtUQk2Y6k6yXed7RUI9AMMyJSYAJ0f4omkZhAFIBUNXCnYMidwByTdMNidWQv11sD8KkEhpWqAYBgT/MubrZWI/f4VuU4jn22M6IgV1AJUWDuiIJZHtpTDQc0j33vCMdJo9JpZTdouy+1TCUK+1OJmwVD5b8taQDkfrMt+VhxG1sxEQ8caVis0IO7tdvdKgTQxAnrCd7Z2E0UMzNFL7CwXEJgLgFnSWmaN+M4jeubm5sHDx6cntw5PT0V0XEcL64uj1+8OD4+RoKjo6WZrVarzVYBIMZ4fX25WCziNLsckhhfe/DgxcuXS6IoaYrjZoznl5fPXlzEGPujs5TSZtpsthNRCF2HIiJbT2eS3AwjZ6R0XSeNo7pibOb4CNqUbTg0PB8JAIjAZ3+20qt2l9nhz6uCEpWImu0jaAx421ngudmIVwDkljxlu3xjcU+92xn/lTxV1WxTHqRqoIoKaO4kJkwgGGAK2lN+k1QGFJlJLoQlJiAkRmQgIwJPaEfMrg5HPbUd9z7ArtvoVFXhA+C0qNiSDBTdx2/o3tVpmupPB7f6nKPqBwffM5J7PRHRZxuAERJ6sw1VUAUiBsIkEuPMoePQffLpo+PTMxF5+tFHL168iHNK7htiIjVABiNAErC+WySVZ88v7q66t+6uVHWaYmjqxMBRFHMmMNEO93xVHbGHcCsc/Cr/TNQRse+GNb5atALGQt0iYKbMhAH9hApD0Sg0ffr40Qcff3KzTkarBJwUsVvYVH0fSKUzNpimKF0fjo5Oqj7Q8eApVHUrzQ6poyWQihIHor8lzPbPfDl9XvzwAIsqLh1887nX5l+5CbI4zJlZJEKeKSqqenNzc7Y4yk9UVULVWVUBSTWFEFzixDh3XQ8ACHnSqUfS3CB097fnRddUxprN2FJEPeoJFaQFSgdZHjtGV/Wu9qVEBGSPJKu3KxufugvgtFRZN6WmTZYehX2bPwkAPe5067rjLcnXX7M6IbGcTM2jjQjdbPawJACsjpYi4g21HHQhK0tEBkaIycysGIREBMysRm3MrVVWqTTXyeLNDUJI7bIPxGjVunfMqgmGH0hVPeyfn+HcAsQhp43TsG495npUc9Mupy/liI0deBAAlTAws9Z8D9zBP9vGpcFMPVJUA/Xif6/WrkWDLWPXJqQWo7J37PJUTlXPIxAOxiCC23EeR1p2rjDkpNZ6twr8Fl3rv7afXNDS74FxUXHSbA9jW+yqaGmNPyXJ3liO8qA9e7VsliFiSPMYQjCJPpWrO+n6vt9O42azEZE5pZfPX26mebFY+AS266t5HMeTkxPC6eLFzcVyQ0SPHz9ZLBZXV1fMjEgvX740M2+/ni6vfX2LxdD3/TjO2+324uJihw2KsBtBj4p7hVvQWPxVNey6buiXTqiTzGaeS6IxRisBH39bd7ua2SzpAAsz63dSSeCKUYWpllmIhD79KNekVnZgJd5d3Vp7bN35he1KDqpLqf5aeR8SkIiRGKiP5EyahmERYzTRPnRzUgRcLlZm8uLZxbs/o2996ys319OwOJrS9D/+hz/+1jd+FSR8+a13nj27+M7f/O1v/Mavf/Htt549++jtLx6P29V7P//o+mrDq8VycRwSx2jbzfZk0X/4wZOL80dfe+fBV750enRsZmmO8c7izR0mys4fBgDzPHuntZrXxMxBd+EUKG4YETk7O0P0mTOBg0NGEdGng5gZFh9MGTWR0/3RO1UienB4mmaRHbSZQYyYMcW0PDtlojGpCTx7cr652YbQey2flfyjCm1NkTlURCqkpvVFACBZqhkOUvrHeturqnMj7xkhjhUtGR/ggIh4HUIIQcCmMTFi13XTXNr3DxSnOU2zAagCL3oQQTYzS0n7VVgsVkQkBmBouzhh7uhSJjhbCROamZjHVf0EAK+Ry0y87T2aKyENEIg7YubQA7KopDpobsfXdgp9rUlouRIAiBk3g+kdxjVpED9XU4Gy+vK4vVBe/VybE7QnIyLznrcFGibY3gSKqGusl9qYx4oMa/3xAAClckIBKpzz06dpe319PY7j3TvbzWbDHFR1tTr+9NNP7927B2gid0IgnxSPiHGap2l64403CHiapjsnd2KMDx+8fnm9psDbGK/Wm5dX188vr64222EYxknW6804jqqqOqabG0+5d0LzcT5VHhDRPCdE6vsFFMWLiLoysNgNANcq6q7dhhvktumv6HlITUTC9vWnVp7Vw7uf+0qqEAIAbdyfdV+qAkGls3yRWFZ5bHWdeiP7oyM279RnYARgpKoImhLECEhqkFJKUZKXyKY0q5JqbmTJITB3QJbSDLVSS01EJakKKEj1c2kTGKywskbdtGLUtvA8OHBfWUQ1MkjTLHNkQFX1iQuUk5p2UbO8L6/uHfyZ1mOS7H71FrB+Q4cldR0jxjhNMXbQEYUw4GKxWC6XH3/88Scff2re3kZsu91y12eiUDRUQgJvD4pqSR8/fXLS68PTL1mglNLZ2Vnfh3meYxQVUSAs7RydTqswdeHS932UXWpZPfzFCkIagbYKLgCUAMIOf1TNxxgiIjKB5AKNTz78+KMPP3l2Ps4JuqNBuR8TSEwnfd+qU4iYG2MirI7unp6eutnsvShijMyrA34CDRtsX8E/dP2u9eUr9YSDC/GWLdFe217Y+l9sH0k+5/AzD6xBAJimqe+D82rRrClWtbLe3wmZOZQ2+pyizvPcdXNM6ulTZlbSLMk/uzSpWnIb9xCVVh1v9eAWRBVKrTjwD5VX++J5v4FKPdxHDAVziHaZCNa4z/p+kVsaFs2bSv52B9z3/UHjRyuD3Rs7xCONe7OX6gtGqc2H3ELjqkPO8zhN02YzTtN0cnJyfHycUjJZezWfG4TOw0E04CK3vfGpksx9H0LfSWotIsU9S2y3zsYg3NFai73ahE+qGBWRsI+N+TWNvGl2BVqJoOzN4ahPaZmVqnocsCbJA+xMxLqV1UKrmOBP8TFvxUCCugARn0AJIrmjdYoaY7RS7uicp2bqVe9qrSr0BTuUmAIG9GAIIoNBSkkDx5TGMY1blOWCu7ySir3VElbVUCZzYGPOHJgkLe2XaVi75FJfVRkARtV+qWCsYPc7aPERt/pY3jjbgVRz1Une0DCFNzYKysonDzFwHE6u5unZ82dPXnSGqgpTOhpHjtdR9VLNUlIierK9Vr1UVdXHZY8vG4aFAIiqgD0lMwPjXs02syAH5jBK5JCXrrgX6pVZfAuw1Np56pqq9n3vY7KrcZUsJVFLmSYhsECOPgEjAAuCxN0goJb173mhEARMTEWd32WnCxiBmpqYKBEFYi6xyjTHjCpYwydmYBYVA6EHQ/2lsg6JTLkqL5mC5WYP2AkAGCJZP4D37xEREZxDUF54FrhgwnmrRPxL48Nn3336XLsv/eYXrmmbAmzC/OFf/ck/+c3fvXvv+D/86b+zy6vXF93Ja3devHn/0fn8+79+PMSP/u5n0PcvpnRtR3fnCbHrNsmgWz7fjC/evXiytq9+8fU3Hr6xWoSNzia67KkTE5lOjo6Y+fn5y2F1ZMSA5g1Q0CJAJItDWGGXa+1SShg6Xi2oJNljrg/pvYPWOI7cBwBiYGYmBvCULNO+7xQMEBShAxLTGOMsCZbHMM4M22MY79MY0K4V4xypX80Kahxx6MLJdPXi5v0NGy+WSCTr8fp4OFktj+YxpqhA1vedaIxxVE1gaJEU1AL2x8M4jjGmYRj6bmlKIoIQutD5whCCqc0TxNnDkkREAQdFjbMBIEInIqqmhCHkWTee97VYDNs0IyMRK6Km2cwwBEQ8OjnWJOI98AhgCBSCdnwFIzCsdT3algdmnnHernqZxgRIqhjNy1wYcghrguZAyCFDK+2MbV8HdUdaYeguBVFEiSF0TKxJRjMDBCQTSczefA/cFVcKe3M0w8oIZm8O1nMQEywzDQnZJ4ggUdeFahW0AjjNiRpfJu/quXO/tb6vxWzGHFKKLX+sKpRqrGyhnlDdN/595ZitDV9Zf5WX1thXhiYic5TM0BEAaI4GQEgLUZujMPfTqE+fPn/27AVzHpQ3DL1ePEeyF5cv7l/djSpnZ2dJZI54dnYmGs7uPXz+/Plkto3x/PplWPTb7fb6+vrl85fPnz+fb6YQKU7TZlqLiGr2Z/kyttutEWLgLrCIiLcWYFKE4AX0KY+d7jggojfvtQLhCnxmjkm7rnd3WN0aM+HQFwgDAhKDqSZJUMJ0tbt6lWqtltxqIZkzlAK5LAgVAIiLnz6J16mGrtvds97KLI+m8A314cjjGOf5muSs80l6IlFnMzHTCArEprxOOnRLm3C1PAK1pDzjMRAh7zzlOfCLpAJpFk/NyIsnZOzMLO3mEuWhnuVfJ6vcSsb1HNg/Wunj37TYy6UhoapeX19noihuqbpT0GjAVd4fIC3sx4vK/Qd0JFfB0iAa0QgUKQIAByMDwCxJF6vlZty+vLr8+MkTh/Z2Mw3LYxFJgACGwaWlGgCBbaFDWkxI33lqT9LFN7/01pcfPozrzWq9ubPEI5QpzgCivIq80BA63ey0JSZEE0vbyWeCORjVzJJq1w3c9RpmA29ckaKqJey6rgsDoKU0mykF7oeQZE5znCGx3JGtLjh1KL03H4U+8erjl9ff+0TO8RjuHK9FQacO1yw6KQ99WCwWDBZjlHk01R40zjHAnQCWZpm2M2MvCSUGDOZtmWC/lizrWyULDku1hW4VfXRBYOKsn4gkKt16CbwrAYCoqULYxU/qVlZ3ZP0GavuZImUrl8t68X7Rx05Td9TFna7pPxXGKMtlTwmGsLh7eo8hQOjGJAASQuio68PSi3ECr7ab666DxWIxTfHq6rLvuwcPHsxzcgbowf/T01NEfHn5ggOenL45xxEAuq6LUbrAMU6kEHwZiNSHrUSTyNgReDd4NjP0DCpkFUXaqwugMhWwAqR+RsTlcmk4IyKAudxnAvJwIioScec1EOLgYSKD0SCPVlITA/Oc5X7AlMDM+p7NKOWx1Ym7PISDgAIFb2asatZulqpKbqesSN4ZS3cTXzGlNM8xJb24uIwx3rlzp+/7eZ4XiwVql40fYqqV4ZhMlQIHymFMf+u0jf2RtxMzzwhREVHvuqHEbJCIGVFFU89skLx3Y2ZQZTADETESqCFhcOQ0cCY59KG+l7t1XFkmBkgKqO4nSvPsW9NzX3yIaF7praaSsdDMBITImBFJRecuoEh2RocQEEg0eryBchW0D2cHtYikJaFwZ/AQidquJtMMiXAcp3Faq8XgLXySjJutv+kwDBgADRjJkNwp4ZSlasiIBMZgBjkdFBfUB0Rcb9fQY0B7frlZrvoHR6fztF5iZCNGFDAwRSRi7vt+jlNtr5rSrLlnDCuQv1p5Cy2O0VAj6lliGiDmMRIAeyFEVR2GoQYeagGtqoYh242SlHYKjyFiF9xTj6kocogYfDjE1fX1xcXFy+sbQFSk7TiO42QIUpJ3K34rEuYE36whOv6J3TJ2waF6KKWqPKuyzc/n0noVSmVtS/Duq6vRBte5VRWausHWt4SNd+HgQa3CV//UUkjTnlzzqaypcj5wnByw6eqeqd/gvgPJmiLmVjm2JmrRagytpFfVy8vLsOgef/IYTuVL3357DtPN5cX9o9OXF1cA9JWvffX1u/eR6Qc/+vHJyck3fukX/+O//8tv/cavJfvRkyfXpLhdX6eZAw0msBoGCrCdrz/89NE4Xc/yha98+e2zfqDeQJUDL7o+pjhN03I1KCgDavAhe0gQCIwYKY89t50jrWRZwL4Sg8WpE5CMDclATcF9MATe2wHcuM4b1BEnhI5w6ELf933XDVFmoz7iJIKlksG52OXl5XI5AOGU4nE/mFlMs+eAZSzNjYKCoYLDE8QEKyZgkxvcbtwB3laChEYn8AmNfn4ocxoBoKbKEJEhpxRFJk1pmqZAuZGXaO5bQ2UabwhhsVhgSfTv+36eXCfdIXPRUF/tEj5QH+vReg33FcfDkccHT2kf196kpSzYNz7rHVxytKpJfVarzkLjAmxh2N5fytiY9j6I+BmvCz4VDYuDsyW3+vr1JgfPahWyzwJOPk1JM97mhI0Y53mekUxVp2mKUTx39N7ZAw9BMPP19bV3Y/YpFjc3N8+fP7+4uFiv197MQFXHcbLG6CrqH6VmLEddhqrWYsn2jSqraV/Qr/XannY7CpyzLa1l6Ihf7t5WKIh9cPJtyNQCAahJXE2KRF1VJcDqxWyxqO7awcHM2+02WZ5XkdSdBYjkzesB1YREBFMSNFPxxDdqX7M+rnRL3oHo9qZ/DsV9/nH7dfyz5g4A4MXqkAEFsemzmrcye3p2lHJASvVoqaOl8R39FnHZEoIfIoLIiuTFjZUlMrMWz703Kq5b4HQdQW9ubh49fXbM6ej+SgWjIIKF0GHXRTOJCcPOU5BJm/J9iqnfMTOikEhg7IgNgteuo1GcZg+MI/Bi6Pq+N1NPiSeivl+gxjkqgyhCMlFJgBTNLrc3T8+f32w3UYJxcqc2eAPG0JNHHiXFGFV84JUi4vHx8Wq1evz0fBxHEUEfLXALM1uww74KhIgu2tyvlAwBvMkvmB16T9pQ8G3u3XKndt9l3792gHK3ZQSXgeN1hZUxtk60oRz1/LprrhRtN1Oeu9MvuHSI8JGDfkkrN60pH6gLc02tlpDVp9Q1VyTx4iBEVlWj3bJpP3gCjTjGoqol3d3K02VL4Ei8bqKSgGo0w9p4vPKodkOpiUZmgOqh16btuwZVmy8JtITetifU980a+NVVlQs+sAoRa1+MSmu+ALe3c+CgcQkd6CotFrkW3QI2v9d+tmR9UA0ftSwI971+9VZE5AgAxeEIOW2HN+PcflO3/mB59c+UdnN9/IaAiiVYXbZ1p7DFzWRFOefS2bVFJBFt3dAN7ml904qZ0Mim+o2IpEytaoETJjLolwwGySeiRYhRUhQEDoEAQCVHOkTEe4NUPunYUYHpttkB8puZT0Q8QC0zW/RDC7pKxa3VU1EOmkZNPoCQSs5tDYTW3fdrw1//1V9sN2NSiZK2UzQg6oIaUmAVUwQiHxBbDCfbUWyrSWBztBuMGZRW4rkOjz31WkuvvLoZdZNCCKvVarFYeMqvlXyDVr85wH7b1/DaFbYwaozyXXSiAhSKVVZxqIVafc2K2fXmTva4bxAerPD2Ufny7Utadu9/Lfvl+c354w+evv6lN5b3FnMaOIWPP/j4xw9+8s2vvMNiF8/PHz58+Oz5MxX6yq984/vf/cGv/tav69/8+KP3zxeL0+txWi1W42ZCJVEFpIj0+PnN9fh3n754+Y9/5ev3z06Jk5r0i27ebuZ5Xi2PxnEOgRCQiBiQkDr2uQ2enQtdSV3rOh6GOg0TzIy9EQ9B3zEzGgH6HDHwQgLqui4HbUBQMZoSOOZR8OmRgVd9t+i7KeGEtIgYE1ruoSfTNHLg8xcv3vrCG8vl8PLFmpboSDQshjwno5CBaa6LEjAS0Vlbm6RGkG7js6Nc7WZWVcmWjF1Lrkxnnmd3lOSU1NKmDAQiyGLovFckGSsYAiGQqOZkDmZvV+CD7Dbr0YmJEMt02F0PzNvH5yBbK64qvgXu3WldXwr2ibp+T03+douuWTzgLoO6BV1rCrb4zLSTFtAYBpVhHSy4noBNy5n2vawmohQtpObkOOFX/1l9I7wVcqzLPuByLbXmPwiBUMFMzExAJAlxSKoa5skl2fXNzeXVlcv4nj6g0kw8xjgMwzRNrnhtt9v1eu3aJxS2noxuw01xb39bdtHuRcXDCpkDrG45WwvnFuAZzo3Eqgpc/dxCsq4qQ1jTweMynpSULdtX/lT3J2R+BiZjqavRpDUhTfPc2oBGmlTQ0FSVRCxFRQAXHcQ7eVw8/Q63PVDX9z0gis8/PoseYV+KHwgUaDxN7U7dvvb2T7cX0N4W962LV37eESDiNEVEDCHM88zUYTEAmLtsCJYoaMYZUUJMKgKwGedPPn3Uy3hn+EK3pGRAwITZ8QYACKU3Y0089v9wh3vMXGqYfbOQmQkQjUxUY06f0Y67rvM2hknFT0M0ieIqR1JJIqHvxlE+eXrx6OmL9ZQ0DJ4F5DyfwZDIJ5FonFUVsl/bEPHevXvHx8ef/u13N5uNqoYud9WqQDvA6paIdq/T5di7gFjyb/eKgnY5F06z+zdv9/E2jiGi3Ioc1gtbqjzgBpWT7+5TyrNTSt4n2fWuyjYt52TmiaBVlq1WuRzGVdiuW9R7Uu22b+Qd/9vYBQCIiIfLWvSu2l1lC4RuCBEAWCkwqeoZFs9OC4fdy+46n+Uwfg1RQMMt60Nz6fi+QXibWnc02MRawUVGObnVAyvPH2VnILl3w8cUX11d+W09CW4YBgAIIXR0aMDXhVV8qKBoFwz7bNn3qEWhyijsViVby3itUfURUaRkHu3rpdXZWuNaLV45tZkZQDvj/pDF4b6D0nNQORwuvoVGZZtYnC+aE/up4pKUrBZEDBykyViuz6rbdJvh+9Ue5/eZdvVyBBATEZ3mmBJ0fUeeEsmGsiM6KzMRSlM05tKg8QCv6koWi0VmgOVozbwKrrrv9QWtMZGw1NZaKREs57drk2qgElFI07hdXyWzYbE8PV4pUAJMYgoGnDFLVRWprq9Qzl5dY8XyA6pm2JNA1igomYRM2m+05MJ6+/thGLybnM/qbffSXyDtTcfeEWdr70HDQyt+131V3fNJtydYqTeti283r6XJFhQHz/JfaxaQH60FUp91wAdtvxODgzSEEKd01B3P1/O733nva9965/UHb223l90wvPv+e//wN3/j0QcfPXrx5Ne/9auvXTx///333/iFL773+JPnjy6+9o2vgvLPfvjkjfv3ry9vCIjYiMjCAijdbDY3Vwr99gc/ff/r77zz2r0TM5iihb5bwDKlGDpkQAAyRQQl89kI5CPcALCmajBTCBRjxOz8JmYsvQRYTNEAc9cQdgZPbiASMgZRhQRRozOVoMnI+g4XPS+HMAoNYEOPsBi2SFNMUeZpmk5Xy/V6bah37989P1+LpZ56ImPmWWZEpFzCyEQgZmoglkyMNOcnt3h1e5dbNlERrKVP3RfMO17MJgkjCKKmKCJW2ZAZxiheEqCqzObGtpmpmehOcVmtVqG/ij6lMjfL3dM//ssPzM3EStfQPYtoT104QOzbAqNl6DtugO2z9vxVrTyr/AiLTUtlelVLIC3HqCy7FWb1QdZINWvUi5oZaPtir92j1iMgsucqguJ1lqbJQX01oozXpW1PiXpFBNQYnZZBBDabcbudiMhHb1YgcJnv7JYJFJ2YiNS8TpRaOZ6NLAREgtxiAXLoCMByY6YMhKpVw74h3Z5QPfT70N4ViBamt9sU94nUDuwt82xRIj9FXxGGrQ86IDrVnY+jvLTlN1N3AAERghEhAhFTUJ4AFHw6KOTuIwAUmAEIwEzJlN3XnGQnoV1L2NXKvsqH2BJ4izbta97+/dXfNiBq6ctxr8U3h4NrhO21FcoHEP6so6iJhyqOL/02mTuxXG/Ws6ST4zOwWmTF5g0VgADMdc4qsmOahn5ZAILX6+3Hj+LJwIsvvz70HTPO82wSAYkCdQgd9wDgcz4kN9gQAAjEaKBJkL3AVNVs1qkbiEC46/rQaSepBGQ8J999WIjIFLxeqQ9ejQGIgQJZWFxPVx8/Pr/azApModPSogxVzCzFqBJTSiAJEb2lsm/ivXv3mPmjjz7y2WUHnLYVEC1soVaith4WRFBSK9Fy2kl5NFBV91EQUbK9+x9gy+0FtGz8YDdvU+XBN1Y7eSDO81wXv1wu796964MiPHlBSz3SARrXQlAsNThVGa1kRaVb43a7PT4+hkbjOsDtemG9SR5mWRiGK/pQBFYrRxoGXnuDGwBQCUqr1g0qPNB26d9YBDrt14DVLb4dZSqwNuKcn+JYjUWdhJI9x5wbvSAiKrod6F3Bpmlyg9Bv1g7UqS9IJfpXUa5995YJICIBOss0M1BDA/IxBGpoYP6NI5UWXt8kzrTY2+KeVRNur/x+T19qjat2X1ruWjNNbKfo7vHeerRAtkPP405+dV1Xt8ZKj3cRaaVm3T4oGcVYDNeqfr+SdqwGM2nvrR3bHVVS0gl1s57Xx/FOtxrnTYPVWTCbmSn4f4iEQEW+YQivdnxXA6oqlvknO2Q4lR+2pqPVensCRDQFoJ0NbGYirU4FVeyGt99+7fwZX92sk6Y4eScBimpzFB8Yp0hmapZcFEnpC1dpEorh1LLISts97zUYLDsn9QUQc1dmJ07f49VqtVzmtjEebPUitBpnqFirtnPktzhawXobfI4HlXlVtlUZQevCaVG/3TbaDynUDy0E6sl1m21fmWhpo67zgDDaXQeAOU3jdlyerhj7p+8+P+mPH6zung13795d/eE/+d2r7fXf/vS7v/Dldy7XN0npK1/9xemMX//im2endzcvNsswBMGr5+ujJR0f3d1up5dXG0AdhoEXKwAbbXj3g6dTol/9xlffvHcyyUSIoe+38XroeqJgCiJiyYnM1F0LBkTkfcCVFREDseJuoDC6IwcAkTQXrBPnXqAComDuOyBCQjYDtETJhRAqI1CgxcCr5TCpzGZdTMDdFFXTLBpVNQxdlDTP8/3X7v785x9PaVoulmHINmopACPvuI5et2pKBgOG1jvgOMlNr6AWf8xsHMcaBG6zU8zmg/3K6GRYWvm5wpqcifR9n8RMU+gUEdXUEorM3OVCuErYWjA2qZmVZFdnrLRvgf2XHVZKobDUnfvrqJiaQMN96gm30RUaedneuS1Sr6RaaaFSQaWdknN+eJ9qHUFT/duST0tH7WLqI6yoOxWSBymO7WbVC9s0gdsGfz1tB5M8fTJAdjnXMjJUM0JiKlkPqiLaNyMBXd9CJhERUx+InldoCghAHqvYhz8h7BvDUEM2TfVda4e3zBn3/ce8PwcJd4bZ4f5WMjngcgeMq/3XzNR2vLGtiMDGx0xNM4ZixuxxS2sazLabgm0r7dxdCX2YRSA2Q1A24yjZoEwxL9vlyIE12KLfAarXb9qt/593vPKe8CrPy+fcARrFpT3/YJGff6t6OTaa39XlzWY9Pnz4sO97r5xkNkQyBUTwIdp+lec+okYmkMzhCbi/3G7f++TJvbPjxXCn64YEyVIMARkNIRENRITkyEPV+6CqHnTyRUHGrmQSfAIIAQYkJXQXTEFvRMSOA4fgIyP7rrMEAmYdEy0mCefX20/PL0cB4w5DZ4Z9oL7v0zSayhxn0yQiDDveYmZd1927d2+9Xn/yySctbiO9ImJ/sIktrcVaxsxEGlTVUAEISv5tHbeMiIA+4ervOSqd/r2n2S11onWo+UGl1sOJi5lPTk7u3r3rOlKd9QIARDlQ4w0emTkl9d5ODU3l5LoKjRCC50Fst9ujoyMraeS2r/YcgLH93otIVT23K8PTH1rFdK0LqHfOHkb2u6GncBAR1lF35i7dnUt0p5HuM3xqIjAHT7nNDFEb6eYyqMvpms5wvJXoOI7TNLllaGar1UpVvaUTl+44zEy26xeCjbxu5V1FTiIC3D26/bVVb6jxZVcG3lrXHqaDfRHglwzdrsNWy7orHCob97s1TgRTVUk7qXSbgnA/M6ikgL7C1VsPb5ripkF7nwOztioPt/famuT5unHVHkuaPGIApZANgJAsMRMTgUFSUrlej0frcbVaaZp3oPC39Aw1TVDMUQDwYRkGFPbmZO60ppqkBk1DHdp3v7aHo8eBTc7M6iwF96Rz+7hqyHiH4XDneNnRg+OTo6vrm8v1BozUQJMqAhMZIQJKEeF1v7XE1toH1G1u3zA1ufJevAAAZi6/d1tVLz8+Pl4sFqvVahgGxOxNUdW+7w+g4FfJfmVVvQ80ZZeVMqGZtmnWVNA2CbUtjGDfX9KSB5cp7e1ibN99cvDcVkWzRilpqaJFU9sXPNk2FmVmjdphv7Ljpx+8OFl8+gu/9qUYrVsuvvd33//Tv/7zr//SNxb3jp/9/ONf+sVf/s7jHz49f/pPf+8fv//D9+er7e/9wW/96X/4izjinbunKuENpJjg8vL66uVlmuL1deq5//DRxZ07Fw8f3B+YoqwHhGHoGJEIjRC0zo5CFUVSImIkNCXArnN9FxZ9V9dsZjGqmgGCMaoaohFBz2SWrbOgkdAAjQwDsXEGfh9CUmOjcdGvVjqZjCD9bGOMJurl5uToznC1Wb927+HqhNI2CqSeB63oZwjZOYZA6OWLZUrsHmfH4jStqN4SaotgLZ5XzG9Zj6O6lckZBCgi8zzLPDkBdV3HPHAIBjKnpKoM2oERURh66gIAeEclZoaYVBXyjDQ3CujvUQpedRQOmLt37HFGyY2e29e8fbTMpQqAIgB3K2qdtZVVHdzzgA/Wf1t5Q02BhKcU4i3nqLf3hEaZaMVVuwZVdR/BgTSqWw9N7YQV52L7xL0XcXi53xmw9oBVBUAwNCnjbhW88/5YdZcQwmaz8XLwVmOonHqXfFPBRVmdqTa27YP0oPCASje5Fo2x0Sbrq1mjRSHuxjG1ljlkpdDTxry6nRChpggBkBULFhEBjGhv1FJdW5VYB2KsdbTVl3KFrN6hMlstDf0AgJmQSrKrkYh/0CRg6v0LLKa9PW1Vkxb9WjxsWfH/EodHn6qKVll921emBZ3u25O3ZSI0+ImI1RNse4IY4FXim5AuL6+fP794/fU3j49Pnz9/bgaqFroOraA4MiIgGpCZ2QJ6IkKISXSWxEQphRfX2/c+eTYEZr7TU08AAJZSBLBRytxwrg6Pko3myO+qByARh+Djzs1E1TdCVEBLxD53JgRiEElJVYBZlCElU+yMhudX60+evDy/mUboBImMwJAwMGDU3M/OCsru9lrl5OTk/v37T58+ffbsWeUVLWUd0FFVoA+4SkrCzD56E8BHB6HruABoat5NGwHVBJTM9rwwrRrQ7tQBor7yknpVi8zaVPdVqndeJCI+WeHOnTunp6dQNOBpmszMQzHlWu8f3qc0juNYdJscdqjY6mtg5sViMY7jPGU7sOI27YePXglkgNwZ229cAwlVOvvJzvf8/pXpAeTUgUZPw8I1s5VZyaglq0og0PCHyqnaRZYkX1+LS/CcblN/QiJAFUnTPG5j8mDpdrt19wcRuR3oybrej5rKgD6ZY7vXWirfauZLNQ79rZOlljfWpVYp3yq3la/Wcw4cfC0m19tWFlpP8ChxPafVKIhAxHUA8QG/pru9Ju/eDZqDabDLQKxLVcsVARVnYN+t2exRxjdmdmSrQKtCvL5+/dC+ePuvH74XzGiEUCPAmKGH4C4HmqNttpIS9YHr8igLZ3TFL1t04JEncQtIdY8tt/RewxKtMe89z17J9qH0GtW2XhcNcXfbqjZALuLbc2eISDg7XZ2eHt+P8uL6+vRmG4Ev19vp6bmJl/2Aj44Xy8QzhA5LKXkL0DaHu6XwHLzOB1a5RrtUB8EmgHt8fFzrX93ud33CG6/LfhGtv8se77jFLg/4Y1WMWo2zJXgtqXrtg16FfHu3bZ91AIF6czwQ7Y0xCY3f4uDLeqtMCQEJep0BlVdh2FxdP3r/8XAU3vqFe/+3/+f/Y3mMy3snf/6d//S//pf/m3d+6RuPnj370Q++9/L5k5//9Kdf/+ovnH/86PzRi299+xs//MG7F5sX9x++9fYXvrrdpKvv/HDcyqo/GkIHNN+M1x8+evHGgxfvvHWHkOY0LUIwTQRJDTz3gAlMAcwQLQRCtBgns8A8ELGIDymCMvlPAXKzNdMcVWNAtQRqhEhMhIOCgbsWDRFRkJJKb0CiaLTobLHUhUoXE3OS9WiKISCqilhK6fhsFSVasJP7xxefXs1pChxMkNmnZil6jNB3gRAVDHLhspP9bXxud9CPrsvhQcfMdvexcUNUhojg3FOZgYiJGACjJBvHELyNFpth/S8mcXHLnKfr+hiJEALRBGAGWoqdcsbRK48DllGPWyx+x2Qz6hq1ZmbFwBar2xu2Dh1VRcaW7VY0Lu+yh9UAHht+RYJuBXv75yvfq11VvVUrDCpLbTcUGkLef8SOhPmzC+TqVZabu0B7OQCqQtdln64k17rQzAJ3FRRdNzDHYVg6t1dVvxvRrsOWxsaFBAa2tyOaw5J7KNrK48pVWnZdoUS3MmnLzal5wb1sIq8oq44e/9WVxQapWia8w4fbQv3gkgM3f/vo1qGAxaqH4pjLd5Bi9QRKST1eGxigdEcTI0S0XU7XgRb4CtQ6eJ0DNLt9fBbdHSDq7t330ekAIO2++Afa3+v6ayPp9x53sJ6K/XWXWwUREW9ubs7Pz7/5zW8eHR2/eHGBCHOMxB2A8zpG9GwHRGQiJFTT3JgrzmIETCECfPz0RRcIAF6/e7TqujhvCGS1XI4xpxx7sM9MCRAZqHgHQgiSsgvMe8x4kCog1V67kodImXm741h2lHiOWw59lJSUZ5FPnr54//Gz9ayRSSgA5OZP8zxO09gR73TmxmBGxAcPHty7d++DDz64urpyg+c2m9L98PJtfL6NGwXOjIimoqrUjPpUVYNDV1cljdvo8Qp0atD19pfYxOHd3qgcwL/v+361Wp2cnHj1IDZ1tvOcah8pN7gWi9U0xWIlHkbvzUwk++92LMh8bmEs6sErckzKta7REhEBuhG7F95p4e8OlMoioG2OdStSVNXIPGl599AdtCpHPSAr3K/fQ0TIYwzMTHxehec5USlpExEfdRNjnOdxs509C1fKVANvkOGLdIOw7pc2kbHq/Kpk0lqnLW5oOayRYu6F8Tu76uLVm4f2wz7qtlyo5HekCpnKOqrC/Epsr3GgsrydhCqn+T4aUaj6OTRUVlfYkptlv16rUey5HX2RKeX5TNB02K7wpJIOVh2gLbEgIoeQsUAFA/Yh1CKLXH0DnZFIStOsc8JF510M3TFsAEDQLDiZWRJ3QoHTV2zRtS6g2sC4b2swviITql12a0OamWXa2St/0FJe2HCJbO+EabMxwmmWNE/L5eLu0Qn1m/OLlxbFM8iYOfRDAPDaa2tqE1sPStUPDvgCYIBdC4Sc3Y8YBLylLyCRZ057NW2MsyFOMVYPLhERc/adu0eayBC90ICbrlmwL0Ftx+x3KoU0vUDqlxX09S1KiUIuqW+zVQ82CfePFibtetpCz5az1Fu1alx98ZYA/JJZJxMI2EsUTTp0Cxn13Z/+/OT14WjZffz08cPX737/3R/r/8Df/pVvnz+9gHnGWT798IPf+da3v/H1Lz978oh4+Nq33vmPf/aX7//w0Xd/8hPSYbwYT/qzk+FkfXkDx2QwPDm/+PmHHz+4M9xb0jxHNuwQjAwUABUJfA6VKXqfTFVNMSazvusMUVLyULgrWabqrL0LQZJ4/N1A0rzrvuUeSjEFMwJSNiIioZBU0AARmIg7Cozk2acuGLySAeY0LZf3IcBom9Xp8uL8akpzjwMBdqGXqNkNbYDeJBdBUQGwFkh4zw/fCyzF0C3+FMaXd1lKayI/UhLaryrErAezikYVAmHOXdocH4JlREomSZKBIZNZiimJatd12TFNRGah73hiIjQkBczOKSLYr6Ft2fErv/dFtq/mDY7bZXuxUO3udYDwB3KopXcRCVyGOzceTcQD0fvqrLl28VUJa3N1EA+1pZbYqyypS8IiKdtLqGQE1IW15mKTH7WXOVy/b9/CzLxGC/YKTsByJ6HOm3OkNGPRd8WSSQ4Dhr4TUyA0BCB0fxlh7VsrkqTHDhEVc8XH3vveymiwxjuOTcfOA0WtZU0tYOsJtovH7l1YzbDqKXNVoAKq3VMrQacKw+qDoybiVzeo9c0d4Eb9FRpB6GvrApetT2YoIghsPokOgADNuEzvRBfhLSY0+7iXg1ef1b7L/xKH9xOC/cxqKOpXe2R4Nim+7VLbb1oaaTB/J5Ios6bDxGAimuf56up6uVw5P4SSGioilptkAnPHzCFk+KSUEKnvwjSnOVkfiLC72Gy7Z5fDMCxXA3MXZ2GYl8tlQTcmQkADYEB3V5fSPsQU40FAWFU9qmYgCLz3moSmGlWIqO/6eHNl1M2Ko9nG9PHL9eOX660RUg8QkIN78pLM0zRRPxgwwI4dofcyAzg7Ozs+Pr68vJym6fj4WETc89Ia8O1RcfsAYZDy3E9RVZXcO8zfCwhAskaD5Dmk9dUOHnF7l3eb2uBqe7Rf1jtQaa86DINnJ3rWYh0lX5uLuvlXdJIdI2XKk5m9y8M8z42fYW/LtOnnsdN2FF5pP1QbRsrAUiw+C8sv6IYNMqPLfa8dKzYqeOVd4YGICKoqsTRDVgNFFfHM5IKEe50Fzcx1VGZmYlXLjc/zPCz0em0EBRMwBUCfulS3bAelMr3Z026hzOWq0PAeMyGEo6Oj1Wq1Xq/9y77va1qcmS1LV8m6fSEEd8C1KGHFQqufdT/eUHVLKZOi6y7Udz9QAypjhDbXNII3RAEjIgrcmycWAROSgamXGxupWkpK3a73j98tp9Sgu8p3SkIVKBWX8ovYni8GGkmUUvKIelG99ppuOLRbsHNp9VnrxRzDa15r+8p1DSlJSpEIBqojQCilJBzU1FAIwcC2Yxon6WnCkvPgfnWCJlPJJ4AggXlzQFTLBnatVfGjdQD5vviamff6CVVoHPgp6q2kaTbTijP/XkvAmchCIDMMBoKGZvLw4cOXN9uLy8suz3yPiGRAhlV4I2LWRNuFQqM33LZ0valAwTwSyWPEur6b51kkDsPgJIGIKeVREzWLxvbrTMxa3u06VlZcrIkyQxOUqFXRVCZYtKLXr2oHB7XwqkpV1TXrSqgZbIqN4w2Lilkk3x5uQbHg2/GmLQ/1O9QxPgckCgDQGUJxcqulSQEjYPzRD378W6/9GjA9eXH+1jtf/ODZR4//5Pxf/3f/5htfuP+3f/2dP/hHv9sF2Gyv/82/+e/+X//2j+Vm/N0/+r1//z/9+eOPbh6edtyHwDiv13ePjq5ojjZ1wOcXL4fFSm3LIahGDIwGSWKchIF50XMgI/EO3e5tctojouVy6bCVMrKs2tVd6c9BRMR5WKdDz/vFoRv8kGFFYQio0VARxjQmIAidAo4xKQVQYqTlEs1siuPJydnzq+df/oUvnT99LlcqBCAGCF3XialZAgSiwGw585AgUKg8FEq+Qeswq5vefnYvkTNW3/e5zKXMlm0xF2OcaVekgQDE3EHpZTyNc4ruX89bnNLWkT+lVGVzLk4mRCZJmtRCN1DglBLBLge9Igk2NbHYqOD+TeUvAOBTPQGo4l7DvlHFOfbO8VHdFtCIcCoVlV3XuWes/tReUmFSn8LMPpCuNXUQsXadrbKNdllA+/pWYXzeFAGLL7MGjrA52oXV6B/ut2AOobOdWgYidWFEtBvqVSUccVlMyUZCRCQ21ShJTNUUiN2HhUwkgIBMDADzlAL365utmSEwInvPVdMSNwUGppq/wqUCXkRKRwCzfb4UQl93ueVLLW5UUFTq871T1WmaELHvQyVeP8fhUN+6stD6TS070cbvaGZJdinxre3hj6sZH3VJHj3wM51vV9WqxfDKsaOPDcTcfQKBzWye42KxADEgSlGInUXLsDimmzFJxqj2niLicZsWqdonHohhvZVv2V548BlKdpY1ecuO59R4aqtlWLesHgeC70DSVQg3YhELeHdzdKCGmhs3TUt6qmoGl5eXq9Xq/oMHP/7JTwBoGIYCHLecdjElX0PXdWKgCs76opiArvrVs5c3aR4Z4Re//PpAQUXGGHviHnf8J3Ts7yEpd1+cpmmzvYmzLBYLBpymyWk/zdFlIoLqnJi6YRhC32me64umME7zHNPAspkEV0dXV/Gjpy9GZeSB+sG0jI+TBJpyoiPgYujGcewIiajrwnq9Pjlavv76633f/+QnP6mQPFqu5nmW/QhGK74PvvF/hy5sNpukcHLn7MWLl8Ni4ZV4PmCAiBFANSVTNkRCKw4+Lb6eihUtQtYjNV0VWm6At3zWFZc827OaK85YnNY2m83Dhw+Pjo6cMEWk6wbE0es8RWSeUgiuf4OILJdLAPCYT0vOm81mHEfn/DEm1eS4cXV1dXZ2tlwciUjfZ6L2NcQYfdyOC1YPmvndArvLkgC8XEKrOlfZkesYdSOqvutJ7CI6z7GW8HnIKITgNbEO5NJcZ2zpouoGrhVU6bnjb2kXOcjgLUSRB8ebmZmb3FXDxGIxLpdLN8t91MQwDM4DicgtQ9lvY+HcIxNCUVypcdFq2nXBaJ8FAA7banY6zK1Yp62shJIFWnGvyu6+0Z+hZOpWjaLyLt+Rvu+T5YGNAEwEdfSFmZXgnnepNb9bh3uxL0REQIdJXU8rgNIcs54/R/Nqo/xeCGYqEqd5HicfzGuis0oNCULp6dg696sSm1VBBZ+2ayb+skTEtAglUieGSZQZ52QvLq5Wgx6vjkIIJooEHrbyR3Rdl0ARmbtgiqpGXdDyrPLKmr0OYJ4UXF8WEbqOU4q+TigtAAGBAvp8ZkREBiJi2DWhrHzpQBC0MhdLhmogMDUQTaL5WzVwXVA96olNuE9VsS4d6q2heltLNyf3dgOCWwK2M5bYzAA1RmXmxaJfLBaLxYAI/uI1a6iVhQevUfld+82BwG6vqvpQZZo7+ilkX03BSl3VBG3vjI0SUAX5wWrrh1YPu71UbALx9YT6uKpdtQRvZsS+ZjUTwkAGhmhGVy9vfvzDv/vyN9+MHF5cXS6OVzc327/+yXf/22//7pff/jImffTRx19958vR5v/dv/5v//v/4Y/f+/T8N//Rt/+KfvDs44u3Th/yZDjF64vn6f5JGHqTNG7ncZyPVmE5rDpIpBEBxAtLgTwtQgP0PTMTgDEjc0dEiOYhEURkdhj6XFFkDj6GIcO5pBOb2TiONashis8nDF3X2ayTzlNMoxItlgAwSRxTQmJENvNuBIYqyTRqpCVpZ8uzo+vNNRCQoTeA3GELIYAPg2ZElPmwNqlV+ltcwiajuJWvdb+o8XrueGtxgsl+0bOTelQxQiZmylwHxL3UoGBREgm7vDk6OtrOE00RCNBUNJoPVaPDSMIrj4aCdjnuuKezktmuBX8jG/ZCee0J0DiAoDCdVo6276uN6w4b/Yn2feoH5AnF+effu5Sy5rh9Ce5rQq+kTdg3Ttp1touBRrGr3cmsybFsl9HGNPKv7lzW3DYCb2mN7TLqNy0AzSzF/KeqGZlHR9Q8SZVzvkXt3sTkE29xX6Za0Wlak8NPc52g8sa6/hakRAS4yzht0eYA4W9jHeIenNvXrFpFa1wRBTOPSiFRQMzZtg3GUql88Ds7MdawNqKnMcZkyMxoJOzGjIF5iqnsQaDlxrfxBF8lWT7jTR3mFfg75PE/ITeM3bWYM0OxPZFU9lpxf6RY+6AW7dufWm9sJbeD0+rn25fXm7y8vNyO49nZGRFR8Zeh81GzipuY5ZHVqWAAAkoCJoazwmJ5OkF679OnJvMXX7/74HQB3Hs+uprHHpPOwozMLCWLqZrNRBRVxNxlBFJpAgAB+o4MIcY4pShJDZ2UCEJ/MyUcVpsE73786NnLG8EuAXUQvAmkSVJLoAAGyczHhVWEdHf3YrF4++23t9vtixcvoKTbOaWUvdvbd2sy6Oqm+7G+fHF0egJGmiKBSJpnmfphWa7C3JwN1BCD7gJWB7tzEDFut8C/aVn65+BnbQ6MJRvCSkTazBaLxfHxcd/3hzkjtxhp7YTZdb3u58u0TLj4Rs0NztrGiYiYOgAoMVeoTKyyKTzUwfY8nk30co//V87pG+rvKyJ1smt115oZs1abUJpZdvVF6vkHpGS7XIwMq+oR42JQ1ae0e5QD7wCI6JlxnkHdBl2rL8DMAu4cSdBwYH87uyUBuzI2g9DDpFm7AE/WAPQMRq8CRoC0o6pDPlDX2QopveUQaaVGidZSMVoOMdYvxWbEt9cNOhqYmfdgg8Zf6ZdVZ2g2jspzKw44+XNp7eENsSonbPY0G6UHmUeVc7bkIyJq3Ob8UUZYRcz6ABiY94ZHSoCmrIaioCpshKbqQREkRlL3aSACoqJReZYvRppiey6NmqDx9WDjAqjA8fPbfODKP6mpd6s3qXADNCRvOeu7a4AWENEn88TZV0PZkVLGBrq5CmoAhw1UbrOM+n0D5ra4ZS8U1vfh+Ph4uVw4NYpGNQm0sMYBUJ948KxbeGYVZJ+F3C0JUZM2Bvv89wBFDki6/dC+b/2+7oE14W9sjDpq+gccWBHWeP3dZ1O1wypsXENSEACBrK6DJtN1+vAnH95/eOfe23fPr5/BMcGSv/fzH749d7/0jV989NHHy9B/7atf/ejjj3/8kx/94T/7w+s//v++3Hz6L/7VH/37//f/9OKD87dP37Q53T0+eQo29AvbbLfztNlsHiyPOHSdmWjOFkbE3NiCHS+luuu85KNyYXfaQRlm7TBP0tjJutsv5o48TwMhoABlF9p6NiGCToawUtk++vTT9x49m2ZMYkqg4KIIspKMgAva6nhy7/ji8WXUSBBMUqi4ATkpiJkBxL0sBzjTmnwVXa0YhDHupRzXvasM6wCFsFXymvAyNxNRK0DMTBEUIRmgJJco7s0dVsthM4xTFK+x3ifAzzoqYdZ1+gZCI4krMqvuRdrL4nfJBhXVKyZXHt1CDxulBPeFev2zEiMj1xXWcw4WX8kwpcTct2CvF7aKSEtfVdt7JXwOGMvtM+v9PcHGSgSm+q1ExBoV3G+UXXyIOx684xXVPNjDkwNDuiwMPcJWF4aIhrl2HBtR7eMQiUjhMBWzfZGqVezUstKsz5fBTePcPTTAV4OrrhkbngyNHXXwfXtJPc2P8qw9e4b27aKW9MrNm27gwN6VyhBSUkQTsOCSm7O/su97lbnVsZoFv8IAe+Vxm/wPfn3l97cvN8upCrZvELavebCkA2dE+1MltJaKvbmONfq0mUGG8G27F7nj8/Pz6+vrt956q+97AIhRENHIcmceSxXmiIiWArl5gABcLUYfMz/N9vj5VZxHBB2GN45OlghmhOA9pn2GbW7CUcrNVYmoC50ryoiITIhIqtYTOlEpElHUqAnmFFUMObDrq9Stx3h8enL5Yv2zDx+9uBmhPxajnFisHiGs46FfYXUz87179958882Li4uLiwtvYwgAPi9U9FDHaPduB5ayR6tlWPVhuxklwWoxIIdxO3WB4ixOyKpWhh9qIrCmzcMrcax9et3o9szKT/AzdKEDDoxNtt5isTg5OeGSxeM9Fa3mfyYLbNa0/IXi0moynnZeiarOqrrdlVrUzSHi/YxlZ61U0lsyMoNTa6hMw9+iiatYjYZVHU+bhFUf8FANYN2L/iUAcg8UAHh6cBXNUIzPaZqgKbgAHxLYdZy1oJ0i4U/38GAW1I37qc4X8QCaR+pqIpVrUM6TETGEwMUhW0HXisUW2aAoiu22tiZ65ectPhxw6VYkQSM16p+VZ7wS59vHwT6Pah90ew215ZW7+K0ZFCGapMx4qBKhPkjKENqKGO0Tq1mORdgBoJvcsclLh9LVs71Vxiv11jCAyAQ+AVrNTC2pMoABmqolgCg2J00KYowK6m2ZrS4jhqDJ1BSRQ4U0h12GWisWWzuiQuxgs1rAViRst6Y98wBL1aRO/KroZGbZ3GQKIpIExHSeFYAQea/ighD92fvGWCuzb4soMxOVopMlr0bw2qTVauGxwRACoMp++01psjH9tu3S6+pVFWDP0qsrqbBoDeWWaCtTqKh8sCX+b2u1Y9Okq4V4++KhzGevCVd1d28T5IFG0t6w3aT6r6oyEIAmM4FoIERkahC16/t5LT/7znu/eXb3eHF3mufhaHU9rv/jX//nv/zed7719W/8N//kn37ywYdvv/12/+LCQEzTsKIo2298++s/ET1//PTh6b0X189pcQ9BDKwjt1hsThORkSEZsicjEAETdYEBJc3VrxdCR0RmydOjmbsQOn/RlJI7/gFAAfIQXgJQAjAAXK0WkqmRFouBmEVkjnETQ39818btp588+enHn/7k/U/PLzZdf2TaIQsEzIUkRBgYGSToaPPq7gn2mMbUgYBiMkDLujIjARpo9hdXgLfOgooSB4hNJZm5Eu1tKj2ggvb+FQ2KSbbLvlP1JlQIxTOkXsgMZmazpBWHYRiWS0m2nqJ5QKg2/zgg/tti4zYrsVIRY3sHwn77dUDAfcXilfzIinrdGrfQCJh6WovP7Yfbd9YmcFTP9FTSVoCVx8HBfnFTQd4+CItP7ja4PkeStawWi+FU8afmF9Q7+KZio5mVb/ZWUnlC5TMHcIN9B21tSk+lEbmZJRWTQ4dofUprOlbzuD665vBgY5RWuXh7o1WqoQ4FUACepFSKQtwaBtceGkvyYC/qe9Vficj39/bWtKTaIoz/YYoArniBqQKQ6xCoaGYKxuXkruviLLB/vHJ5LbZ8zvFKem9v225ofaN6Qmvg3b7qYDF/70pu3/9gee1tzXa19/X1+75/cfH88urlF77whcVisd1uAZT7XkQAFfJACEFEMQQDnZMymFnPgRCRkJEUCJBnL/kPi/UcP3z8Yrlc3rn7INioxsgUwmAEvgYxpcCekm0AHMJyMQT20FPJbiVDQyIyMhWZUlQwVVDHMUQFQKAIGJYLWBydXz19frWxMCgSAKeUTJNKtKJoorfHoaLG5Cw+JqKHDx8eHx9/8MEH8zwvFovqvyci0EPk8SNPNtrHCjNbkW6unke1vlsRmCQjBJkjAGH2t6MaIICZ5/i92qvYOtqwjVZ9rvQ5QA8szUhaTPAblinzq+Pj4+pHizEiZsflPM9pSG6lH4TBs64p4N2AEIujujmISPVwkpOZ5SrBwhCqot/yZCjuM2ZOlto3gsJpa4CuklvVwao+2VpKXuRSQVpvqKWqSvdjiZ5vWaFXI5k+zaIeWZaXMsjMRRu9ggrr9rxNLzJ36xEba0dLXYPJYUCiRYMdiHba+GGqsP952xRsj/ae9cwDw9Lv00Ks7hTs87d6LQAopbJyyeUUFLCkszr0sPZVQlXNQq3uV82vuf2+FeYlQJ3nT1S89fxPKLWXIQTE3BakvY+WGEZFgAoBSYqEYOg1HxlcKkABQDOeIxqCmCZ1G8oIgnrAwn/OxYMCat5lFAzUVARrghfuW7O4b522Jswr+YzsN92pqOvzbCsm5EegTVNqfegA6tsYgBhAmXlSScnirOMYfb6cARSb0JsaAAO27LBFRHlVFyAAkJzCZL7bBhI49H04PT31fMIks5mpJkRjxva1D1C/hQLuq5gHNHPAFlu3N5fiomr+ObpDcSntcaLymgeU0H5/gEaVPCpXrfd/JRFKU2CNjemo+86/SiEDdRHEU3oN0XxOmTBPtBqOn713+eGDj9/6lS+E4z5q5EVIp6urzcQnx6s7dz75sz//0//4//vDf/bPnz49Z0t/+Pu/+/2fvHsnnfzqP/yV7//ld99//+npgvu01Hle6Hzv/tmiH1BNoggDAigCgE+Zz++EhD31te2SlHplT9CvORgVGjFG8VJaxAAMYAZ5vqUBIaJR3XLKY4f4KMLw/qNP/+Q//+37T6+UUEO/HrXrsEOvXSNSoMBEpADWY5J0fHKyPD2KSUwQGUkJjUA1i88GpCG8onlJddEdIHnLWFsEE5HcHn0f8epVlW15gkS+nBCRDCxpMrFQhjs5ABGAGQEgmZLANE1d35+c8JTiejNidv/HNkLSHhUPP+snAKhFU7YXk+kqtppZNWBaGVCprPWh5H2DPappablyLmoLMnGvtVIVJzUFwhppCgA+h6qloHKrHRzaLbjNN6Bw20qwlQ9AcaTV8yuB10BBVTJ8nT5npboUnKe54VfChDtmcrAY/6Bq2fea9Z49D6h32HMGawjtu+f1I0AJ7ZoZqLVPqffhZlbY7tpbu+No4F7XumwzM6gNdT9Tq6jgOmCer5RhdCvppf55sDZuOta05/t+iXrppk86B1QQ1V2KKYCZePQJCAh2HdsPGCzWzJlbL9Ui0mf9WT/TrZBmPaFl5gWZ9favt+98+1YH52NxpEIRItjaDK862hU2T0Qi8kaj3/rWt46OV1dXV4rQMYq7psCdxd4ZxW05RDVMyccOIRJS8OwRM8MwcEcpbs6vN8fnF3cfXn/xyFhz3jIRmIkiqCAjJtUoyR1nHAIxa7IUxQVPLao3s5gSQAIgA2IOwASICmgqtDo5Wp1dTvrBp8+2ScPiaIwKACrRNFmK5lIA2ZCRmCmPCMOSLoiIPnfhxYsXqoqlK6aWARXWqB+fvzVmJvP8xusPfvlbv6ZG3/nejx4/OV8ujscYiYK6AEAk6j10qQpMr7inNSH9dscBwG6biPB567QSzfOryJOzRBBxsVicnp4eHR25seQYlVI2eFLUarTgLrkg35aIvG+O13o5LiFmqDLzdrt1/3K1uEoEeCcjWuSnJq6FSNUYSzFVNlUVOW0c/RUORRuxlFKMOV8UCkv06HcLWIftNE27PnkpbbfbaZpExGN3blf4yZwrddHMkwvQXd4aNSUvuSywz62giZk7ysaJhwf99XxhdSJaxhwRRNQSZjxwn72Sus2MiCtXafGwgui2DGptRV+PKyFVIrSeVrgVsagX2v7h3ysl08rMM6oeYHgrNQ7ejko7iYp4fs7O8INgO+sxY2BKiRmrOmqljUUIgTl4+nfdxFa47MjErHBUd/SY5ik1hCY+R8yTbgn3OPB2jssIjGAKSsiIAGJmnoYtoskUQBOod+nSNO2wvIkKprQbtvFK8Yf7GkVLQdikOtdSlz3MgV3qEqIVxgCqGmKMYhhFN2PcJhgnvbpZT9NkioYIvhlmPj6iVggecpmyfy22QdV6CbzSCVGZusWyXy6Xw3JQVZEokotYPKFUBYCQQtbIXwkscJwiBAOQz+TF9SXrl62ro6J7JUhpfIetm6pCuf1wG4n9z4M0USjOpMqP6n3aNRDt+QVhfzBlC+FgPaJCiCZJUYyQjciCbm2Fq6OUPv3J0+5k8davvXU1Pgun3VUwXob/+J2/+vYvf+tf/q/+1f/5//h/+r//X/6vR3fPfvef/OOH73zlk2fPPvjk49P7d775O798fvNnWxM4v8E43btz/Oabr5+sjgJLkEBgcZ4I88g/RUsaQYwBe8hB5urRISKvqPZvPOl5tynkFMTAhLlTBSPSNE3dYhh44QNOFHKWacLuRz957z//4HuPLq6gIwsrE0IAxEAYgBASZUWZzFATKpFCh8dnxy8uLivMmRjRDMS7OzGR6+zc9JW1xiqoqjPshzKqB64lRVVF3IvM4G1JbIJou5geZc+n2W4WnCOpdyJGIuacUGNm8zwPi4U3471Zb6vLk2CHoi3Nv4ISb/3pzB/2M8cK0hUKuqXRVo5jr0rtsyYzTfcn07xyMQ7/KuCrYdP3/YGHzJ8e414b8XqEcNgx8mCbDt6iXVW1DXYvfwtiuh+u3L2LKPrwD7VcoWEAaoGKse3t2KgU7zURQts3SA6OqvxVPVXyiItmWB8C7Bciqn3mDN/ikji06lt84NzrTOveFZabb8v7RY+38dw/UJMYX89pH6qvikBWk6ZdG+yz33ZbiQiUikRCM1VxtITSYSKHXAzUq+FbadIiDBZb95VG2t97tFvZ7u/BaS24LEuf3Qu273v7/u1PeEs21Z2qaFNe7TAcdHtVdf1mpprmeX7y5Mlv//Zv379//8MPP6RQa718rdmV5feh0KHXPsdkoAgMLEhmBN0wmNkcEwp1Fl7cTD//6NEbX3vAzMCUVEmVGbkLgXCeZ9AUY0wqfZbRAsWhYmae01dfWX3rvPMjkCmICQAdHd3Rrn/v3Xff+/jTWZEM56QdoYqQJjCl0rfBiAEZUaUZraZgRHTnzp3j4+Nnz56llBTIzEIISBxjBHpFjwMsEQ9oAvK+FyuGf/jbv/Gv//f/h/PnF1dXV8+fv1gu+qhZj0LE0qYyqM6wj/wtCVQ+2dqE0PCN25v7yr2uDBaL7KvLPjo6Ojo6cveuXxtCUM2RhKqeMncF/Qgx+9DBCABijB5pgVJDQaUkrJaN3KYIaPjPAYvIJii2fqVcyipNXZ+qDsNgZiLqI0ncMJjnWQRLlM81Das1inUBVHKczMwRYbudp2nabrfznDel67jvwzB01cGN6I33c3hqnuesxMdUnZ5aIoSVyXR5rh23TFhLCxYPV1RsRMQWFW7jxgEXvS07DvxTLWK0PPk286nssUphKPLd9uX+wZYduP+SRCIy8+3LRUZmVLfSDTkAIAYCEs17XWUBkpmZz2ute7d7U9x1TbcmGuw9CmvNqjsUQghd11OpF63mvRv8vpjU5KBCrmkXU1RUEBVBVTYz0YgJkYyIjAREFSFEvrnerJbHIQQ0VDWvvMOC/yIiAIyMSByAQ0jj1hdfTVN7VbZai6stsdRtqvpSRZXayvGA9tXaBhmad7TgQ1CBpCnGOI0xASaDeUoxCpTmMRkRwKpEgH0VuUWsFnH9yxB2xhhz13XdcjmsVsuUEhQ7VVU9kZBLRe8BymIpQq1+iwPCeCXG1/W3/rD2nq1jqZ7Q+H5UX6W1WOMdaZmyXyv7bXPrVVA0nsqM/Fa3HT/12grG1rPVScdshpAsqQGAEoRgQWcl4qNwMo7jB3/3cbjf0xshpTRyPxjZPP+HP//Tl194/Pu/+3sf/vy9e2+89tZXf+F777/73e/+9aywgQ2dht/5b37n3/9//tNpx0zdw9fuv/2Ft/qO2XQIHaZkwESARMDgHjEF8Z5+2sz9bMur/C2qZesvq+imDhORmCHuRmajD9nypHnFFHU7x5++/9Fffe8H7z95OpyccL94cTPOokN3ZOoT/KRukpP3jKMFULPjs9MX9FJMDcEUicmM3QRjQEX0sHHLWFtS1CZCQk1qRItprTCuBsMBoTJjpe7dviMQYa3ybdlZqSdBIpI9DV4NYLnk1Wq1Wo03NzclhQleedhOzT3UJusHxyjEXd5aQUuC/dlBtymr/VB5maoa7uyx2yTQEleF8wE827doQfrK71vCqUe7KZXcWgbVwgf3NZL2Ju3j3PNdn1vVPkkTUgA0QAXbuVpb1QdLJo972Gru/kFhHjYHgJdtNw1psndu9yKqWnOPW85WrdwWLFKaqjW7fFgfgo3d3kLGzKBJJG43tC64Pu7gRaikPBzsGt7i8H55HTcCjTg8YLYHb3qAGA0LFTASU1IBBUME1DrIEYvP4jZqtQt+pSPgNkHV47PsSWvMeNhD3UPAtq98sKq6TbffGvaztRt8ridgC3+/Dg4p1K+V8/OnzPz6668bYghVSVUAzxsyAAOfqkydIapGVAUVREUzcXdb6AHAlJg7U12P8ydPzh8dpTfeeOPo5JjAkszZ94SEiLEMJKCQM3pC4AFz81JOs5mFngE8H9lETAwNwFTFkJm56427Zy8uf/Lu+y+vNokGnXMmGJiAGaMZGQJpTn3CimmgrlroYrF48803N5vNJ598UhFYRIgDfO5xwCfzpgPMm/Xly/NpnI6PlgFxHDeWlLhHRG0YEVEwi+3W35Y4FWNvM6491vG5i2yvrQlKqrparbzLZcWiEIKIMTMCH+j6qrvMhUojroJzGXxXbMId2hMRIrvH079hDgckZo1/0Kdfcm5ZlACAul2KaYVzjWh5B1G3CqZpmqaJqK+ZSsyMmINFLferSos7aMZxXK/X2+1WVbuuG4bB+3/6oIgKbTce3NSpNYoAgLoXwLSicTtIW5hok5oEhdscOMVCqZOsvO7ASGiJHRrnZosVkFWRQ90MIEeY21thI03qXu+Qh17RmvH2Mg5w2OW+KhQZinW1O0D5TcQqHPyt3SBscRiblkIpttbR7vWlHG5sc3O0OFbxtsrBCo0McAsKqm7rWurK64sIYkRDL2g2iwYYY9xsxnlOsHKcBxf2RFSbGwkAUjJif0JrBN7ehRa2BzRSN9EJk5uWKNhIxlpiY43Z0mDRbo/8CF3XaUIERUwdDz2V5h+aQY8ZZRBumUAtZA9W2RJbibxpCF0/BO+kdHNzw4wckMAbk1g9cL+i7wBH26fAflShxewWm7FV4BryoyZaWLU9bmqas8596571aNGI9js0tLi+8+g3Wpf/WZLy7eDfdu+rrdV1nd2AtwZCANUEEAwAFIMFiDCO89nd058/+jD+8Cf/4Mu/9WI618VRQDy+d+/HP/vZ5fuf/pt/9b/9o3/2zy9vrt//5OM//rf/9rUH91LffXL+QsVO79z59u9/Y/3HFz32D+/fu3v3DHRWVUROaVwuFkREAYFJISkIMQBDB73jiCcEQPmvH4aK6FKAUF2GyDlOqCUHmULueq9gxB0AbOfp8vLyr//2vedXl9T12yldXL2YjfthFUU7CGIICiklz1XymmwKyNSB0tHJyqMEzMzA4Jw6Z28Ae90JYGqY9e0D9xPotQnlHeyjlYQi2OewlQVbm6oHWnEMmnncKSUmMGIGQ8SUUpxHAAghcOhjSszcL5ar1Wqz2STRruvS/Aq0/JyjlSUN00ntW0PW+fxF9tSRFjlt38bLmgHv2ZywY9a7dNxWdLU1gS0ja7eg/bMl532BtLe8dkMPIHDARuqXB5LyAFYHtTd1MXsOUVQk83nH9ct2Gf7/FjK2z6ZftcId9zs4TcHwVi1EjUy2j3Y419TuuuwDHIadErMbUZV3p6Lu/jLwllV2AD3cN1bb7+uZlcQczrXwo6WdFmLtTco3hGhIBBl0/mXGCVUF9bYouTOaX1iNWGxkLbxKxrUn3MaovU15VVk/Ni3gP+fa9lm3saIupv23/bKC0fe3rOHQJdTsxatSyMyIaL1eE8G9e/dcCdbSTqyuyOFqBsk0+CywYiiamaOYDx5Y9N2Cg06yndag8t57L7uuO7tz2nWcUhIxkwQAq8WQUgKmQNx1HXL2zApClaq2y+AAxKCgKGbmyUwU+mEYhnWSR0+ePnr8VA1C6G4m7bpBU3R2T25AuhoE5CWnBdMyu16tVq+99trTp0+fPn0aQuiHYZqmcRyHxZKZBfb2pQKQ9ofQVrqTBH/1V385xXjn7uuotjparDep73vxkU2qUOd4ESIE2O+leRs9bD9zqsWEll+9kqU4zrta3CpOTmje8dKynZZd8/O8GydT3fFWRttXbKeiau++2fnWAW7paUTkSfiIOwxsOUz5dS9//gDh2/edpknLSAm3BDxcGfwqIuYQkIiISyvOqhMyM3kzzyTX25tpmuZ5JgAfGZ+B4+3UfZettAqJkbulmaWkMYrPIvanWE4iBTMwMfMyUdobKF/f2g2GOjGPS4kTEfkgomoz1412mFSUq1/WosfbUKqGKLQ9J2lP84F9lf42OvkEUdUEuUDcu434Csm1v/ZuCOgOBQCg0o/YzNNetBjVu0rCivL1iX6rk5OTFp91ly38Cv0Ea9Ve8yKtlHEMsSIdSgnioZbOzJJIpbRjAVVLqp2ZMocSK0ZmUhUgNMKYpFweEBKiIQQmdXye59mH0+c9ihGaGGxFcistGH3xbepim1FVqVKalitVaPptb7sP1Kg+wp/WAjYgzqARdGZS1RkSdoQq0ZSQiSgAgloSRQNBCKHP3S9zn0NtW/A7gtVdVxUNrNO4AYCjO2dHRytwKSRKgGmWOFkIIfASfJyXZOej2i7oRESBgkTxD5rU212COZfJwV9ze7wQQGlO4DhayJ4oTT5/hlQ1pmilCVsdfMfAoICKAUPXd9s0tsy35cU71PTBpWBRUk+cqy0KFjrSu5AzVTE1AwqMPmKy7G7rvEFEAHMOgAghOKuVGE1CAAMw6mEVsFcwQ5sX08gjMyrqfJNO+XT83vTx9Se/9/u/H9/4adD+Di5++/f+4PyTF9i9AUevR+7+7C/+x5uj6QZeEvV0Eue1Xm7Sa1947dN/9dr3/92fTC/DzffTcLF++97dN9648+Yb9y+vnx+jHifUy/Hu6cOwvPfsers8Ow3y0sHSUQ8AqkaIXdcZYpLERCHgFGczDaEnotMpJoRplolxBkhghilourdYzuv10fJ4VrqJisu7n15s/uTP3/3+dql2J45riaOqIdsMk5ElsJRShxqAF4zGsA1R+3QSjmOME1w+eOuBHo22DTYOfVpKVGWLfYockyZI1MVhsCGEEV08uFTDPEKn60KRLqlRTNUD7gBgtnNtUo4MF5O3SZisunrBTAAwNGCfWGygqmIJS+ACkBFAzFQUsetzZrWooKpdr6el0qIf7pyeXV1dTdsx9J2kHd+vnMXMAPiA2jH7Wat4UICk6jwxNcJjF3pKSWt5npmpARF6gRBhQC8CVM8e4MBEYWe/aU7OQkQkxsqLq9QxMwRyKq0SLg8CqqnXRaX1NRx4+GBfaFUgtPWr1fjhZuiClZQkalu8qtJ+riM07WGgSdyqOSeAgwICAHcMXi4ABszUda4zqFltCasA5laKGhEhoNfMgnerwiJSW4dCx+0a/KddPjOCgWFm7aBJwmpRlke1q7MrWD7uyXNXPOzjBTMAgGTEOarm3AzKQhAIEMwQjEyRec+AxMZ2cslUgemnjdMmA8qo/uqmackj8p31guAcTs+6BGSjRdVKr3AtLjYPZwEaa/NcYLQS0mFmomyJLULnE0cnmzigm50A5GMnU0qEAQAAtfJ1aBzzrbZUqal+2f5p+16hek7VRegwYLKrXypEAcWDbi2cC27H1vCoz+JmuGj7dN1vguK6CtRiVLO2CNkASCIqXJxfnz998fWvfK1TDGKzJERi9JYCZEAKhkbGtiRXjylhFyGAxxCTMs5kkQAl0ZYoYId8d2P4o832+rHJg/5Lb9wBuR5s7Dqb1y8N4nHojBdREGERuhPFMM6JexETQBj6I7DIpmoyEA3L5WS2HpN0nUCwfkl37q1j+vhl/7fvnT+fALuFzPPCjKJNMXEIBjRnnFIEQ5gJ1JIsFwswERUiSHO8d+d0uRyePXsWRbphGOctIAlCMkUmSMGJw8AbVHocAL3ACAAQDdGIQNUQ7UX4wiiby/Hq9X7Q9MTSejMjroZoyixBiCOYkRIKzsKxS7025N+iFhJh7bzftKduuRw1kYFW0aweJcGARGqAQNx7uYRwz0R0//7d4rQN2+20WCwkGVAwZGACo2S6nSfuuxACIRlp1BkDqOh23CRNKnGFR7OKmQVm57nL5fL6+jqpmXk80ELwgXsWQjCZNIkmATM0QiNJSVXilFarlXPjLlDXsfNRhl7rJJ+c74YAEEcv7bY06zjO8+ydhCEMlQGLl78S7aa5IqI3d1HVzWazXq/Pz8+JaLVanZ2d+SRAP2fol5mfNwXbTJ2lmKZp3m7SPDNzH7Jv1FUCly8UsnIROu4IiZw2NTNaFY3zEAJI0jiriMa89WIKYZesm1ULIDUUneY4eq6jx67NTH3STkk+QjJicNYiIoTBCosgJlVLMXY0EKAr3OYBTANUC0hqqm6Z0E4GpWCGAIE9zGcAyXLaRUoRANzY9hfvuo7oxK1cAPfXmapaEvJMEBACCNnMRlYCVN8719LNzIwQQJLt8BwIkCErzym3QTYUSYhMRNM0pajTGFVgGBY+09IMiLgLg4h496OiS+hiMWy3Y7alFRE7MDTtxFDmNZiRAWII3PfDsht6CpQkxmlk5q4LmgCxY2NNvXan64lXMw5dQIgGidkUUeaJmbnr+hCSKZogKJD0/cL3rlryzLkFPVEdJknVjZ7Ta2tCVgm01FB/K5Kw6YIp6tmztTRAJVfbefw9AJiqBgQ2S2ZARGSkKknFc5+q/oHg/fp3GmfzSAY4lE9VhvmLhcB1FqoVj3jtxmklnQmyJnHooK1Zc/Wc3cL2hW7LRltmCk0k3QgFTE3V1AhziTxi9pSAgSkiKnj1I6Ecqpv1M+wf/qCYotHe+quDE2B3VdUerHBtP7+C7raq4V8SBERTAAQEJjR1xWnR9UY2zlOMcRmWzOnJ42f/6c/+4p3fH+6eLt5850t/+C/++fZme3V58/PHP8cjulzfAFBPixjNBLquU4HtfPPg7mt/8M9//wf/7s//5EeP3+r5uz+QNx4ufv1bX/8H3/qF+eoymjw4vW9m2+3m5OxoC/G0y2PWVdULMFrIZCZSWvpmSuTgEyYYVEFAsfDQIGbJSIyePnn243ffO7+6SamDpimi+D1NEBmQiYiVPPLu8APAxWLBiP0wHJ+dvHxyaQjU8Tynqn0yMygFCx101qm7EjNiM/W59WIehn7gb8uKndGOQZdtbFGxLrhe235/gMm4n6XWIlu9SaPhaQhhsVj4ZCdrAk3tzT/roCYbpF32K5H54HNLaNaYWPVWZrl7dH3Z5leraIxNON10R7CVwVVHdftGVqyE2/TeMoSDg14V1nP+c/tNb9Pd54ClXtJeWFei+77ben7bjRARqYke5/vvv6+nYFETIXQB4MUthX/kBrZUMiDaR7Rcy5oKQys9yhHRdlx9t+zyRqVzskAtUj+AVYveB9BuyGQPgFIaKlaOUa6F9p7tAbAXSzx4bv1caRwb2LZVA1Dy2QDMfR/MbHti7X/O0b47/BfEFW9/81/yKzUh9BZudQ2ff9u/93AIj+N4c3NzdnZ2dHQ0TaOqes6z5tgaGIKpmmsHn/t03wJBYwMzm1M8f/78k08+WXV6FDTqlpWOVycqs4JlJ4RKjBOgqFqa5uCjaYg8FNhzj8xiZoLI3A1L5kG4S1HW2+np05cXV5fzPAfuwZV+5oq0r3xfVVXJKRLMfPfu3Tt37vz0pz+VXfsroyZCVV5TD0jg/8/an3bbkh2HgVhE7J2ZZ7j3vrEmVGEgBg4gwAEgCZGiCLZEiZKs1e7VH/wX/Md6LX9x23LLbdmyraZEihQ4AMRYAAo1oKZXr95w33t3OCcz947wh9g7MjLPeUVoLecqPJx7TubOPcQ8wpwaKG5qiJ06naREDIIAaTzz4Xzk2Pl58LZpT3tbi/KzK0bgn/XTizFqvQ0dj2s03MnJiUZFVr9HiShbTEeqOcwISKiJiLaBpgsZpbVNU5sRVoc5M+farwLmPnnTZnnu5On7UoTDwlJyLo5BlTT6vu/70gVeRNp2asXuaT4z65eIqKrg5eXlMAxN01gupdcYte2EnxvX6qO6UiKqdq5yKCY5YA3v0rIxpo760SyUVFzMiMa4+m0BRxkWnJSdmES1FILZQM1UbSA6sWBHYG1KWGVXe6RQVMjq9SxTmNoUQ3X2MkBJxUxJsGQgq/hXDegWFAquMQmgBHW4Tc0MjYD7HNTFSr2ApEN5l5ouR1eaUqq1i7MxzSXmOpUBXa0EAfbRAfr+xdEUETdxzgIdEZDGlHKlBqB50aCLAmZOnHCOoZ6qG5gZ58q1eIR3mFko7AJIVFZUZBQo5TzUeR6mVJpq+NWsqxAakBEwxNhwBs5JHQ5AEXDyWdnFc/MkAljGUVnJnBgNw9i27WazsZJKUHN7zNACM3qKdthGVWXuQjXqY6DGxRJ2JMTIkxioArHBGc3zLhbAAcYwnO91gTwwZ94iYgE2NkghCs4MbHM+Kr/7mSxmpUHW7u3lMEcGJAIWHnPYYNesrq+v3/rZ23d+7YunbfP3P/hxu/m//9ZXv3rv4Qf/+S/+0/nlOXUhhNjFlsehDUJdux/6vt+t85P1mn7/j3/3u//fb73/Yb65gR98sN+Hn372s599aX0j9D2FTjiNaVg167HfM5R47iwMAkYT9bjNaaAOimEY9sxqXWCQnLNkBkFUn1mgkePVKI+v+5/8/N7rP3/vyZCTcFHbYyBGTaVjZkaGQjg0ZMVifXO3XuUxMcLdF++c/+yZFOsvKRUJgFzSGANJ+XKhuXk6jk6sJCIkDfqfghAcJz6SO+fHLBkyrq2Fj/w2OLFzXwCwgrqWvFutVupJ68fRbjP4PAZT0/iLlXp4Ppw5zoBtWo7R0+VQJR0AlvgiE6cxpCtgUy1ktud00H7AzeHI6sQrVEceWZqHYC5RydzW42Vu/RBc9QWPjEc3DasZyxOBxblM++Z0s8N9BgAFXSObBjm1aIrY8qu9s+yhjW2LZWYAV7PUv5RRW89Xh+W0maUshJBwOgpaniMYSJihoZrDRWS22yKisUYLnlqF1Gldtnsm6Pjx/dl5CczDnriq3HaPm7kAqGPryLVY8iF+LSDh6ONHH/GYdRRK/SYcvu7wwyfP8xe/mqa5urp68ODB13/rt2/fvv3hhx9MWCNa8tPeK5/8FsFiwWHmIjsiPXl29c67H9zYrn7p1TsgYT+Op5sO8shZtLa7AKdhnyEiEuMIoY0xUoA0EIhQbJquTVm4ocxC3XagNguOQrt9euvn75yfnw/DEFaNik1IKhUdnyfOoipyCOHu3bvr9fb99z8cx1HbxKlCCFpVeMLT46MhBkMBEQlEKaX9fr/f7yv9hzSRTQBAEAKQUv3oORuKTlQ1fBEnu3vTnkHUIWyoYVpEcD742dmZ5vJ4g8sCxqRqg+B0jFCbFnowzjnnMamrZyH4mfqqN4/DkGt6syfjUgU/sYYfLujRmIuSsmEoLkHN5RuGkV08RQih67qu68i1pTVxf7fbnZ+fX1xcqPR8dnamurEyIN0TIyAwCf0lvNNVnWlqJclJ49X9UV1UFUKYI76JgirT2+ZXComIM2rs9wcqD/WKk+mW6OQTT+r9gRIRyEzJMTi3QTyxijGOzmJIRQMoa9GuiVxLQxfgdKZGO2idmio7E+KAnmYyKDJdCGtblOnmKieEEPSkKqiwKjzM4FVla4g94DAMw36/1xwKm7zfHMNfWz4AJGbkItCqBwJwkiKwqiHjmPu+H8dRpCWKDJk4M+dCBtUgi4XaQGn1OTvi7Jz//uID2z26S+O32cWsefUKa4KlbRROmoUS53qIICFnEA4UggAOQxqGkbkEkhzyM9smnGJOJgizWdqJAkDbtqvVyqwROO/m5zk3zEkYOonHLASGRXW0mUnDXl0HDjZtAy97kcdwnLf68XcaefJCm4lBHp0QkagYDuxBR7iJDsQ+2wFy7iabAFQZy27IIyNXzVBFGQEAzGOK2ESMSXjcZyJoKCLit/7sh9/8727f/fTtv/z7v/vLH32r5+ucBwoEA7BglCYwNwgYCbLkZuiv73fN6ebOjV//x7/x7b/6/r37+eXPbT7a7f/y+z/5H/7g9zft6cXT89Nt227g2fX5iHy5Rz1cdLWk+3FcNa0/U1UIU0qjcABAIA1+BmYUQAicQTJmovPL659++PjH7937+CpJs4Jc+bGQABACihCS6QVSbWlNE5smYBAIkIeUeHzh5Rfe3b6fRx7HIVIkJIJZ7VwRKdVQEZBIkUM9b23bwEySLuSSWevbzUppAYC2r3BgaXBYjvgoPvu8i8VlsKdkPdeiakqSdKu7rhtS0sl44rWY22JYvXyIMhzwfrsWIvjiBluskTBBtu2yGA8TAg4xKOdZBrKRhcOZ44G96eiO+a2zA5mNUAnOYmT7jNVAYKz0eccHB2fnj+DoKmz//TeLnVkcH9cwCiNT4GqU204aIXFkLTnFTEFomXU27VhRBZeRIGU+9X4T/vxKbZcQl5Y1wxThGdhoyKjN3x4xvdFkGgUkpf9wjDjbl5XM4mKNWO3Ei4OGA67/i1yLm+VAYftkNPR8ChysLm57Pk2YrfoTZvULLufwFTHG/X5/7969k29+8+WXX37/Xq2tMi1WlTuMoA4BwKlh9QF9MIGpWEUbEXl4/uzex48+9fKd1eo0759e7oY2BBFGFqKcEqc8MFAITVhRRIiBAjbAktKQGYiJ2nYTO2AcMYhQZtwP6eGTZ++++/5u11NNpsKDMKr5VTz0McQQKCVs2/bWrVu73e7999+vgUsEAKWhrujGzkLKAQDpyPEVvJaSFZKzFglGnCVZ/aJ6u6HkAsDsT0/clBPBAVRAqV4uzBzQXCsUQjg5OTE/Q55yBZeJZ95Ys5iM/jmOI1bDZd/3IrLZbBbCklGznHMaR3CefzssT9wsgoyIVuutnqwShJRS3w/aym8cx/1+3/c9c0lxVCKpmpg5Kk3/16ozFxcXV1dXiHhycrLdblVvVB1DcwhV6/P9pbV+qSYrknVhro0x2PWxoNphwppVmDsU5qTPrH72ayFNgjVcn9Xyjdq5kVEYEYL+VyJtBFhK270FNbbLlm9HhlUgt7P2xlk790KQsSqfMqtWXXTampZsHjkoc3AF/AVKyDq6OmRSWLudtWciHniMblPJAhObITPnPKY0JYzAvCIOM+/STlOCrYeKHASHlAVWHVi4CH0GtIjSdQ1OkS9lqjnnhNzvx/2+36xD0y5pddkc0IBEFMk+oN+fl+nk9n0V/NBgW+ZqCM8zZUyGKYLi2Fss7nq9drAH9YUsIjHlkLkBAIGY8ziMkjTcW9BMWCIikFFgAbILWlAmVwUTNaKsVp2GIkCteKPnVAOLZ0VcZG7JWNA+cfMx+SCEmUFrIQ3YJI2+2Jcm5/lttRGm/arNu70gYjNZTLW8/Ni0c861zcAM0AtqIJrVBEoZCb+9RVTCapRF1FbMxbICCBqLTNAESOO+ENm26/qdfOu//ODXfu9Ln/7yi0/zRY9Xd1+6ef74ceAGMrAEyEANZcmj9HEFN4JE7B8/e3D66ou//se/+3d/9f13P766QfFvf/rmr776udtf/IKEq4GlaZrx6iKsQs5gJkNEzNW4oufLJWfS7VjbYtNAIBkTCSBSQxgIUsr7IYfV+tFF/8O33nv/8fXQrPcQQUuxKbYLV6sMkZTjyzwCcoyhaULoQmixXXUUA4WwvrFu1y0/yRhKi3ctcUwEhYMLZxSGSUoWmejLghEa3fQwtqBZBioeelFDkknBDwtVFFAYVGFbRzJ2cgjz4ERbDSNUw+eu74WzNTXxlAWOXTLXH2CmLcx0wkNhAhx183K2MhIlQ6lWkFusYoFiE37xTAfzu7q4yoRdN21/24IWLSZswxqWmsvI5uOnvdj5Q6ryCZc4scZvwtFJkjP9lMEXb8CpFJtNw+j+oYKt7TeWm3ageKB7r92q/3hZASqx1RhpjT+xJxa6loENO+8xzLfC/o0xWg6huGCNcZxyWQ8hcKGWfwJNNrnHpBypgiYza0BBCHp/NfS498A/dB0eKDwfcfzmHK7LdmAxuB/z6E+L2zyoH97wC15c02g/+ugjRHzltVfj97+70E5FBJ1HVZy7CbG0EWeA0sJY65PXOweWVbfu95fvvPfhnZunv/TqnU2IGSmDCIjWgkiJh1EzmmIXNgMIIUDThCYKAgMlIZQYYgcZMgSRmFL66MGjH/z4zfPzcyJqYpezZOEYG5XviY5HjS4wdLvd3r179/z8/MGDB/40TV3hgxAvANBUVRtK6WtlJRLn1t5iqv+FTRAL/F3AgwnlcExXnB3ZZAGZGYOUkmw2G3QmvDrbWUEXj62eVnvSarisNFaVLpMHoFIqDfIEgEDT/X62fvm2dXqUKhmr+DsMw27Xq3qmnp9xHInCarXS0qBtG1Qb9B4IncB+v7++vk4prdfr7XZ7cnKyWq3UQIyI+jjW2ASoUo1qnqbXrbTYnhPcwZEdDZiydDiVWu1ATY0RESs2YzsgzjVqfBbnBcbk8IKFfXDG4BZAhYhY624YPC+4nj93AICAAIAyo0t6Z4DpZqhyPiOklI0aN7URO0vS/dTRxAnYHuAN/LzHck7tC3At9lNESnZcBXvdT2HU4/OYZY/UUXUTZqomMyM/LyQesNQ9ZgnYj+lq1283TYwhACNIsKQMMQJbji+EqW6tzcdjmZ2mP3EDFfveL9wfnDaJAYBSSqtmjFeQBgDRQA4iEsEoTDF2jE0SYmERQIohhqSWLUfNy4znauGCrPlzIoIYSf3vGiAKVawJtfSogTg8xyVq+LAQxL32uNhKnFUTtflM8O3ppl3kYhIM5mBO9xd/2ks9Rc45eUDE6lZiZgUanIti4jwY/hUWH+uxkZnV/SiABNNaiAgD7vd7EYjUFJoOKBkaPLk6v/7O3/7oOl5/5jdeHmX48MnHkPOqIYKQhSVCaCLTECI2m1W4fjqOmTabK7l++Vc//42bt/63f//nT+9fSoI/+/a3Xz47+dLdm/3uKSTedKtMqV2tgCix5MxZktQSW8PQi4j1V0hpVBgWIgjEkJmT8Bi19AVDzsgSEocHF7t7jy6epZBWm+shZc7IGTgLl7QRjWQOFLTjG4uEQG0bu1VDTcSQY4yrTSej5JRpHXa8O4kgY86cBTJQMWlrBz+VcS0yGwC0LLUdhJFm3W1lEl4Q1xPwSfxYDIQRAHIe0Wk7/pQ9FVh877HJiKknhWrf6rpus9nscfBE5CgDWGBoneFCDoYFbONcyvTmKM+oDp/yP0k1ni1uqD/hYh/wOe64BaLBnEoamzRDT70BDvEIquDr8dSv1DMHmLtrFgTh6KLkgLHZDcYvF4NMC4HDLSo3U42B8b+iE6e45uv6udlPfplHgQQRwVEVRLViikgpPmlGRz8lz4HAAXA5skl8wQXiaDWjBdweop796eTU2fb6w/KA5E8QHNA6PP0H9SWuWu0R+f0Q5g/R+fDmozd4oLIFLp61exZ4AXNEgDni/LdegqCerAcPHlxeXn76059u21Zl90KOhEWbqdZDRzn+xixCOjctfSQAAPvE2+0qx/Hh+cXrb7zNaf+5T929eXYr530BiTSkmlc2jHsOQAOllLbrTYxRqAEKENt9YtjzToBDe5X4o4dPf/zmz//+hz9JKa5WLSJyToQhhCBFCGMGOpykQ5wcQrh9+/Yrr7zy+PHji4sLg20onD0fpauI6K047oZKYEvUn8Wog/2jrL4c+FR64Ni5zCFkQWn9DQbehzco+BFNCMLMMZIm9RCRzyG0qYKD2wWYGawaXlONZk/V2aiN3bWK5iSXVxbWxCm4MbtkQqgISzWxQv/c7Xb6ImZWl+B+r1pnr9qaiMTYqF9Om5yp8dRmq46KZ8+e6evW6/XJycl6vbYwhBhj13XqEtTMzxjjbrfTSFHL3VInhwbEQi1cWXeYLATUE2ezGixoF1S/yCF9kAODF1YddcF39PJZf/7gjP/aVYbi6ShthgsSaq9mZoEZz7X3knjHspcKpsebWs6HJWnsnifAXCL4fHSVWVjY+IL9anCec05pEMEqyJXXWXkOETYKpp3SEDFQcUTVjtkTOBfDrAhqXdASRyYUCssrjXAgSC0lRaRR3xkCjUPa74dhzGtG0pB5gCxMAurdZZXOCQAAncTod15DQK3gtloWiIi5OMY9PPjj9pvDzG27mvoxktphc9/32rcTXBlOPcY4ZkRqg3CfJSWu3SaCSK5B/wACWBsFYGnmBgJZqvgCoO1eC4dIadCQNvVOepOGl4ds6kbFFJaMQKCzT9j9C0pkGGiAuPjG75pHHvtJ6eAC2vwkF896pJID+sjMtRX1LAYA56LMBOuylDUXE/Az8VNaTKYkBQEGbABAPcvM3OH67PaNd979yd8O32tO6VNfuoMocUPDvqcYkIFHjm1oQtfK2MZ21987u3n3/fcfdS2OT+/F9dm/+B/+xX/9D3+ODy/eePzoWz/78a2bXz9rG+7361WbR9DORaWalnl6Kx2IIWoJLwt0TOqkAwbhIEwkyCEnAQhA9Pjp5YcPzp/2aR/iPstAAccB1WsKJX0FEaE61xggRIQ2tKsmtAECNG2bmQNRxgwEZ3dOL96+zJIJSww0FKevTnDWoNKoJDrGTwAaDustGgaZWKqW4fxYCYA8bCygRWpIgD0iEyMsMLmA/FBbLVUUS2p6PDs7C7STyroWBOJ5l3EsqdkRBpww5zH2RoPhQxwxBqDAb997ucQkID+I7r2HattYjzJ+GnSwMsNidpbRxYFKLaACLhHFXuexyRMrz/D8ZBav9nu+IE22D966ebixfihx+4yIUI30Nk9y/q7FtP25LGgUF0cuVqC1RlhYwkQhaJ0MHaNmIeoczFU+C6L2F85yucnmQ7Vx02Lf9NX+fGvWzaxTopkGFlDtBzRMsRukOr3948HVp801DRgARPJ8Ncv6OjaCrRQOKDDMsebwKbvhKD33gLT48AteC0703/Ssv1QsxkDn5+dPnz599dVXu667uLgIgUzvwWJTLp+XM0Eg849VP6F+j4hMoc8MGIWaR08v33jnAx7HgHTjpGkAOXPaDyK5qerEkJJqlRSbKChAsQkYusQ8Jtgl2MPw3kcPf/jGm++8/+HlDk7WLTNobVUVIpXSMsJRxy+7cvNt275w96U7d+68/fbbNUoQAUqSS86Z4pEqBvqHfclzb7n2OdLSlPWn+XbND4uILPx2AeSG0lDZEM3DLMFhzYL6+fGVkkHF4rZtt9utKja1BADUJUsWm3YhJrlmwhuRtFCFPK9oZbhmIqntj04sxhhj8f88DztMrtCRh2HQaag2uNvt9vvBisqosL5eb7SnovkGpWqhOWfN57y+vm7bdr1ebzab1WqFaOlhpE9lVxdHUxNtY01NNTFvQWfsJ9sEi4OjeeEA4xf+xI3Oe0bgSaWUFpHW4WAqtaKCnzvrAm12auBoy0Thj6n3flGOsCy1NQAgAaiboL4fpb0xhJSLUzQ455FOjGCaKnPS3Q2AMHm3AlFAFs45hJg1DzhYHZssgEhYDxdNXdc3Zh5ByAISuVb2ltLecFqmKR8VlSd6nsbMklNi5kQU7YBSygCAVOA5gLp7JDHImMch55xBWkbBDIDMzCqC6iZnkJKBWY/Ve4NFxDJXF+dl6GPwYNiEc3ur/WTEJxSIsrS7QiexPB4AJO72IwAlwKEfr/eaD5lHyRDCRPTLK4GIsBaBACHAie6ISPER51FFzBjjZrOxZMeyETXiyAoJmijJxXo0CU82MlfbsFFDL5EYvfDiSH3vcuNsv+xLclUWbIYTWXeNjKHuhm290Qhbi8cZT51DCCYre13R47yn3fmgKIU+NeaEeoTOjCGCKTFRjEhKHGOMFFvJIpDgmk43Z1eXz771//z73x2+/Nlfe/XxxUOhHDvOKPvUryg0sQnYUm7Carsb0oufemUY4tXl05NNu15tvvFPf+ev/8N/HhN876O3vvjktd989dV8vu+uIUqUFTAIBoo12oSZQSqo1Y1SjTHGGCKFSJCZgwhASyQMDJAYktDHj5+899HD6yHvWrnMe1pvQJhAAiJo8WOETChAiCgIAQGEQoNxFSliJtlut5fDBSTOkk9WJ3devvPx9iFccwhN3SsqDUsk5QwFsaurUOefc9aqeqqxmWAhIkPtYVrhDTU3wbQ7BUwnBC9FPQ+Biy89gHmQM5KRXTlm/XWz2eQkaoX1VsDDYReDEx3xhC/uBNCsTQQB/Q9rp9WJVCEwCGvsLYAyeL/Ags4CVRCf2USw5gAbu9IjUAOtn1s5PqcveT7qmeX8w7QWco2n0DE5Tx9C7dC12KLD/dHL61cwcZfSd2gxQyyhko741PF9A1kPLVglDK4BNp5t2NZB1alWq9Y4IiIYoHIxQICx7ToNtN48WFYRtA8hM6h5TsNacpacs4akLqDX5sAHaU7T8mssoi6wStszc6YRT3sK5kTSdsYfihzLIdd3GVW30yTClMqdRFrpmBGPuMo/+fKv84TaQ+zhJiwwWqpne/Hr0WcXDy7ms7jheXP4By+qPZSvd7urq6uXXnqpaRpmjhFB+U79p5C7A7RARB/2XMsR1JkgXe37lmS9PWUe7j98mocxpfSVX/tCRwKJx8QNYtM1ASSLUIwikgDGxCOPDKHFrpWIbcwJRoSL691P33rvO997fch889YtTlk7ujVNQ00cxzExa/+Ao+vNWUyGXq/XN27c2Gw2jx8/VpBWEVRYc7iYpMZflZbyKgsB8hJowfK4AIVFtYuClZoDdqw4VqnuXsdZEBADM3/63mgFB6jn4c1/L1rfggQRVSE0/oWl9YvOmZOwiVX2du8sxRrOJ6Uz32gFP000OiShSgnbttXmFH5dR2FbuZvRinEctTTofr9PaSb1rVarzWazXq/btvWcWkdIKe12u6urK73z5OSkbUs1WiJS47WiwH6/B4CmadSd6FvSk3MA+tQD+x6dfKiOUKM/dmSLkzLCeEj/j5JTYwomLddhZ/IDVluGMTV/HMysjjJ/QLqTVqnFgxnWmAo/PlWaLVUfJqJxGJQoS8oIEHGSlilMlRor1WU736btbChTnu3oDQvErJxh6lbg+YhF2DnnMCNSdvGrfuu0Z4YuDhxhZ5DyX5EMue4nEKmxRwBEamqlMI3AidXDEbSmAtTMfMLqMgERRopTMRjjVvo5VTnT5qlWFUQxGJNazNaEEJ9FgtUzHEIgahUfFc67bs21vLAwVEMwA0Akavd93262LdLVh7vLqyuhJvGovbwLiNZqbCKCOAmjAAGxcGINFA5xymgMIbRt441GIYTVaqV/qiXSggRkKiKX6oDJFtY0jVYEhtqBVFF3GIamqT1YmBUf9dlaha9YdEyM1qACqJXHdaNzLTBtyTlqMBMtnVtFigVyGpX3J4ENLvoQmkZkmRYeM5swiy/3BMKwi6q/KOdMYRYYDawarBCRpDzkAYBCaABQV7pe0f7q8sZqG5nP713+9C/eWEnzypdefJIfA0kvI65BAu92vSRsm1WOW6SIITSRVp3srx6m66ttu/3v/vtv/uCvvv3BTz/6r2/+6NWX7r52dvvyvft3N9tx5MypFmJpu66rZ5SbpjG1QYk4opBQyolQgvCYRwbU9jhM8bqHt9//8P6jJ81qS9QMuwFpj8MQQQiAkAAmoXDksWtXY7/bpf2NZtuu2tA1zUkYx/H09LTnXdu2w3V/dvtEQuYgDLlpGiJMNCQei2ksUJZZlU7P9oLzm1G1gNa6c8WSZIEiWkM1JVZIM6DS1EEbx8HqEYMClEq5CoqeDcs4JgAkCsyix80sfT88u7zcbk5v3rz55MmTZ8+eYVU58JhYgIjMBR+NYS/UHk+DAEDvNz5kwGl01utOdjp2sxFxgRKO6xdr9xs8G8vEqp4tliCc/VQ9vnhMNPuu7bNRMOUdagj3jFBvGUdZgIG/x29CmTwBIgAWCQ8JtOcUoGRO9g1quSmlSCA1aqyYm4hI+/vhvHQbACgLMXqieCS1JpBNSaq2OQyDY3WTeGGefP3TE15bO7iVaiZ5oXswNb0dx8Hvj5F3BQlf60xqG0Ylp5xLbzQTZUQmsLTkJa2IbQDpj0/pvy5f0VDdBSYi+DMy1VS/sUyesfREjQAw9JapL2r91RbJIlJ5JGBtyGknIpV9LPDL/7u4efHngs7DvLrP4T2HI/ijB4c4h/B58Oyh92lmoEkyUnVl/PSnP/3iF7/40ksv3bt3b1osi/aHEGfqMvyVyQDjcLO+sqxUILPsU26RYlydX/Xfef2tZ1dXv/alX7pzthlx6FZtt9mksZfMSLFtGghxN2QK8eTmjdisL/fpeuzbk1uPnjz8q7/73utvvjVmpNAOI2MN5xMRbRVgXRZme1Kd3qtV2/c9oYzjuNvtTk9PxyF/8MEHV1dXoemIiEGEQQhDaJiZHC0lF5dkiv0EkIws0qd+HUWZY9OMaWQNHqEQoVQbhUKUSEIIMoo6VUwggVIyrREHHgtgMPZk7RYsxG7xSIzx+vpyvV4Di0ApUnXz5s3VatXEJufcNE0IpFjWNC3BVElFR6BqbZcqdtdSLlMd/6Zp9te7XLvU6L/UNIqGOpoGrVmgJiKmlIZhUA1KS7xaoXz9XiWK/X6vh6tSR0pJK8GoeLnZbNbrjcaLxhhT6k1b2O1219fXIrLdbm/cuNE0jfkGtUaocg2NblXKoI5BVRqNLhkB5BoBC/N0ZayJQkbBLMZP65dSDVfJOV9fX+u2WHcuqARBlyzVgmZlcrxlTbdCiT8iApawT70z1wBXXaknF0p1A5llsBBJrNHRtsZsLQqJYMHrncqv4JFL9+as27Ver6+urkoEWQBNHxv7oc6fvaBrdFvXS7WkQoxxHEfTi2yLUkqQxqZpVIEXKTG9XdflXBoZKCQbgmBoOAmD9smEYRz6cSAioFIIZ+gTQOmKN9RoZ0DBQIJARIEadTaGEJClT4O4JggMuOpW45AeP3m23YTNClkEEEIIKK74jShOQUOz/Atw5XZtNwzNtZOwHYrVZFHrM7tAPJNetNWwZkHlnDNP3TsK2w3KygufjWPiJBAyp1FD/Y5YLwADAiOQlKbFHgJm9jARHtPYNM12u1mtOluPHESjUWlbOUvGBdd42l7h5Q+jg/ZrPhbZjzjrtAMug8hYoqIy1M4egMgiKqRo6oyu0xNWqMYq+8ZH/elJUxapzmg/VWORJq6VAec5hPbZYMKDhUjt2WwGaVJXIQKABKXHOP0qIrzP/T7m5u7pra6hhz979rr8BFi+9HtfeO/ZOznm2DW7q/0qbLfbk4vHF+OqbTCixBBg00lGlrTvOV3tHn/1n3z1jW14/acffOsnP7r75d89u/VC7PM1X2qksQgylB40QDSmAZP5WLIKh+MogeDmzZuXz55cXV3cvn0bgJ5e7GJ3Nqb4ozffePODjzLFUWkcCOYUQgggAQCBAYRLUI3qDRhCaGNs121cNdSIkEp4lJXXErVdF9cxX49tbJGRijJFgERCABDJebCr7UM9hAZ1hXoiVTvQ7LD0V2YGIDU224mHMOvSaY94yPGWKqmRLYvxxXkS7PR12iolxxhPT09F5Pr6WimCiEzCzxyolO5b3wJPzQ/FWS+R+92w+20VdQkTs5k2VqQoOnWZIpPNbIG8epmFDJwKTTSV85t40nMipvxy0InL+pMvYw1OBdVs76MSth9q+ldmP8lcTfWP2NHP9vzA5L94nRzzHdkFx47MCSLTG4+2YhNNXUIEpeqMAlPVu3qTHhAZxbOF2NbB3MJtc2DmQBZPNZt2CIF59tSkTjznYueMtQ0k52Hz++NxE6qpnrk0/XaQbA4KhmNOm0PUODxfo9ULTuQ/exjwIxwCDM6V20++7FnbFr92OACnY99MyIiIaRzX6zWlvNvtPrx3j4heeumlnHPTtKAeQgR0RnRPo/xBCJQYlsXq9AgQKUseGTKGBLmB+PYHD5t2lT798t2bZ8123csoQt3mZBSi2BKG9Y3TzfZGFroaGJp108Z3P7j/vR+/8c57H1zvRiBCimOWRqNnpLotCvnVwz2SQ6iCZhOpaZrt9uT27dvjOJ4/fhqoSN7CojU0/uHDEBLR9ttU3z7lholISmpBDoSNtnCsu0dEwLWls0dwv738fJA4BHj7/vDQVdilml+9Xq9V0aK2q3aZyfCnbYEWusEnkLgFfbOFmP5jaVHVCj95gbwutOAUVCpOjVdX16oQ7nY7bflANbO667qTk5PNZtN1K6sio0rFMAzqUcw5awkZFT7RBdtrWRrT3Iy8QLXEebENnAnSjJ6TVjAnZZ5PSXG9Ji3HSq57IRzQUgDIuYSA1TwUZIac1ayJIcS27ZTPAmDOjFSeNQoslemjC22dYNYzozpbT2Y9UDFzZucRdXRbLXRUZBsE0LKIkzwjkHO1CSoAME9lF8gsI3PWZnBlu+cVQkTcrE/0Bi2hqfMZhsE0dBGRPNndcvWXiqsaqrvn+KkqY0dYko6P2mgxAdfl55odmpFSRETKWVLinCQDImchRTd9RYgAXBsAepTxCGtn5xFcI+88u9TPo2s/5lUejYxLKWlConUjNMFMB4Sa3BhzzpylT+N+6HPOggAkKKjEMwOTGrElqA4lSuoLzdeBJg9syhkAuq49PT1tmkbtRp6nogthMrDzKw+1cwjXcsMGCrYGGy2EYAZO42pSFUUAMCO033HbdI8zJrzazXqPF8Tt1wXyHLyCDP18zBI4+mj4ljNX5iGeqCr+VypQ5oJV4J7NhBClAk0gEM1d1SnRmPabVZv23J/3Z6szQX72zuV3h+9tbmzaF7rQxN0wdF3XQZf3qWmaIbcSAjMAp4a46SBhvu6HHvKT8elX/snv/PWzp//xe9/97c98kbjbdut+fKwEjoiYp6rBiMgISTgGUg1E10VSwoxEcMzCRL2E65F/fu/Bd3785nsPn8Hm7HrIQ8pd1/WpD3EdQEgkCGRgBAiAtaAbC0Fs29Wma1eNRMjElIiZBUVQYhObTdycbnYPeyCRLCzMwBIIS3lPNlpsl8mv5FK0CVBd2Ol5FRTyLO7XaH3OGUucntT/dH9C3ScIwWyNAq6lp8GVI1VL1ouImk+43W4RUQ2o8BxRAFSem1tYDNr9+LYPMKdH9tJDVlEBG2wQ8rEoshy/zA2PT1Wd/AY2PHf4eGw6JNn27+J+j8g+vNaLIOYRXYzgxzncq6O7d/j2Bf0BRxBsYjivRM8HR+Nne7h8mGkFS03p6IoW3xxe7CIaQpj5ghZk7XBPDDzUEOO/sXh+fwrMM3ruh2IXV1wnE8gV5pU5Q/U2e6nsX9vJiFOB9Ik5VTd1YjbmEnTdlx4pjgLk4vqEQ1ks/BMuPzG/LThXJD5hMosds88ZJKX0/vvvj+P4mc98xkCuqPUlRMyqp82QseIRF2MHLM5RmzRwFmRhZGkwSpDry90b7300CjTrz5/c7CRBEl7HNudEsYvdarU5azenF5f7PffdanP++Mn3f/TTH/74pxe7IQNEaig0zKMCFQCqjxehmEUQUbVUm0lQAS6npmkIY2xos9m89NJL19fXDx8+DCFgdbMDAGB43lF+8hVD27ajqh8m9jRNM0Iy+wMRWZcv85AsUCDnDA5V/ckuTvkouPqrqH8UFIPOzs7UbYXVipdzEbRSThruZLqQhfbhPAbShG97tX5jTqcQAhyoKE3TKAsGAF+6k12Uo4oWIqL+Qw0TFRHt6xhjJApaC0cbylusvjr6QoD9fm9PaRk2LWwBFeapuuM088IvylGJZYKWlJI80YihrtdOEI6Rd64FV4dhqIES07M+7gNqXL1ttamO6ByJXnODGliHiBbzic5cS/MeZiLC87ZPJgxYBo0/fajcJxwzOgCL1qMCXwMPGElTb2rH5nlSADOTTFZzgw3bN50SuShTcykTEVKxrRtk6tHHGLHUlJKRy5kGqytdiZVRS8+a9U8FwPpTWabGjoJw05REUFSBVIr9iwVzkkCQk/Tj0LYxICJgSkl7Xut4RWrHSSFcIKk/C54HlNlxG0CWja0QK67ijo2Tc9VdcaoZwaU7K+kYIhJHzoKgRXhHzsVspLQSWFgsJlkYNbDBT1r/HwA05ImImiauVisr03LIgfTSaB+ahy/DnFNO855rw3pDpUeOA80tl1R7AvovF58NDvw8DT5gUiynX73lSeaSZQgBhbFiqS2E5i7+xbnabX6jjKTiXPhmJ+Q5OBYAENQkWwQoNWZFJOfUtltIefds31H70s2XY//o3vvn//Xf//VXvvnrL37xhUADUeDEY86xazo5CRGEhzHvBcZV0zQdNZB6gD0M7z3+8Le++ftvwrf//PVv/++/+vuP+l3mbKG/Y06QBAA4lTzMnHMIGFz72jZ2FxdXTbPe3ohPrvcS4Rriux8+/Ovv//Td86s+dMJ0PewxhC4GyAlRsEoWJEAAuYomiTMLUwDqIjTIBBnzhloCIQIJQAEixpObm0u4RkRBEMbqmEFAZJgsr0UeRVS7B7OIVVAsbdULcsLBhTjFThs5AyGEALCMVrKz8wBgQK4hpp42GbB52DD6nnPW7CztNrPZbPQ4EKeVejDjGtiTa66sh3aPAuVfmb4Rp5ixixRdYI0f0Pho5lkAp6F8mFvtF5vj9MyaolYfxANh148sTjBFJ6f6LYU50h092cOdsaf8vwCzU/O/+sdtf6Zf62EevhGrpgIOTvywtgO2mTJv5Gj/Hj5+uFK7YXF/LUNn5GtpYPYjg+NSUnj2BBjkXL4iYkZZcNCF7vIvMoezQQJW+RUqmHng8eP41TnYI8CpiM5ztoOtk+3i8kDo4erobc/7c7F1i0kenxFOt9m/Cyj1VOXg7cuj9zAptQmhiDRNo1Hor7zyynq91nYgIAKABMCFCM9Sy0zc9CMvpkeYCUlVy8wIEJAQEOO2Ob+6TO99RLEBjLdvbEPEfYZmcwLtptlsOXQX+5SwwTY+vd7/7Xe+/+OfvXVxuW/Wa2DJWUB9I1oED6CYqhHBFbmxKzg81ahFQGqa5vbt20+ePLm8vDyEnLp8pRJHnI2HGwtVflBR3hMH1CjzOawWvoMAc3Lk4eGQXLCL1bR7PAwsEDaEMI4jUNCJafBkTXAgjZHTEdLIEGYjS01dMZEdXfKSldr25nW9p2kaTklcSQVlWONwRbXYo0V7qW6mapu6Ppj5+vp6v9+PY1LHoCVUhxCbplE1r6pYxQA0DMM47tWXqAXY1uv1oo6xOR48jfLEaoHjXmP3d04c6phRzAaxVC5rX2Fx734+UrMETSE01doTSbufayh+5iUNNzXVo6cJlmrcWQCbzdwED3swBjq8TUQ0d2lhs1Oj37SrMtWg0rbsqMVmcNooi1Zd2P7spXqbJVhdX19ruK9V2dHLK/OxkVzzxtGlUZhtgpm1mJmCLcAkrrPKcQiIk/8WkEVIhIrVSYJ6bogIMTAIADJIGllEQgwqrTJqha0SdU5Agsh5Soy0g1sgO86Jla3RH3SoF7iuVACgPob6UwEVQ/Da9MveFUq60ZDTbtilNJQXTzWHS7CESGkn5zgKITLM6U7TNtv1Rg1OOWeBHEJcgI4RysOgNUVmQzNwOSp+U8RVEWDOPp1jAcd4yGuPaFNom+h3X++3xtn2jX7w9M4fYSCySus6ebJi8W6j7Aq4DOVCJ796xbL+NFl0DKDBBFwSWx0iAkJoumfX1+u4Obt9a+wHvuzXq+2LW/7o7ac/Dm+swupTv/ypy+FikNSs6fL62TbexQgZJElIzDlgDNTC+qwLPdODj5+88rmXfuUbvzH89L03nr37pTuvrK8i4GS8LGqGsOZehhBEcggBMuecAVl40+97CQ21Gxno6T69c//x3//k3Z9++HhHXQ+hHxliG4gkpS5GphABgyQSyiCkihfW6lLCFIhC4CBAzKRGoEQBOAAIY+TN6RbpgQAQSknaVyQHBIA0JNNtihUqKcjVc2fJkD3HXUCjiowhzGLxCWeuLXiOS2Fx9FJD+Iz0eI2IXZU2e1YfUcegFvXt+/7p06dqKD0cX6rkZ1zQG2VwLkn4efp/ofpOvRm1vmRWSAAq79GEusPLXgcONaDGNXkGU3bjmPi+2EyP2ofr0u/5wIO3OJ3FkhfHdzjzxSB0EOLrVzeRoIOh7KV11QIH71qs1H/2Hm9/dnbngsGIprcjigCW8pHOVlWCgIzsHIlMW+CF/VToUqg1wOaWhTBvTG+xZG3bublNu+ojR2z5KoP6wzKkM3zMrha88Q4RccX3sxYSru8VgCW0HD33BX4twHLx1CEy/v/3miDKgdniQsSFQoiIptuUDbTQ3CZeX18/evz45ZdfPjk5efjwERQGOtnLlZ/a4x7eDnGq/pEAI0DQUg0glCQAhNhSTunxxe4n73yIsfvql7/00p07nNPZzZORM8QuCV0PeXtyky93P3nzre/96MePzy9i17Xtqr++GscMADklCg2AHq2TcU1JBEYMlkBY0geU+5OcnZ2dnJy88cYbqnJwibw66rAt6LA4goUwIILDMKQmCQNRNDqWUoLK9hHDYvznkaOjl0yCU4H8wxv8496kAgCadKfIYkqdJyBUUQYc5VSBEorFeQoiZddgUNFZDcGoTtHq3lnsWKrN/aAW0zYvkN6jv+52u77vx8SWThZC6LquabrVaqWBr17nUary5MkTotJXQ4upQs3og0p8LM3MrGl4XPQqDlKqNSn0y+A6hHnufIgLUmtYYNV7La0aa0G+7OoP+fwpG8EkTxvcA4lxZKjMXZUB/xQ4QhGo8YzS2GJ2RQ1zTWQNIVjIov17CL3G/hAxpbEsUKaoGQ0WFRESmMzuJIhggR2WVagvMq+DUXXdyXSebt682bRhvV7X/UyIWtGjnAYRQWTOpaq2PmguaF1107TOjqF2VQwhJGaArJIOupYYKSVARtQI0unoRZCzsGDO0vd9SrhuGoSMIRJgKUNjuglidvmchl9esPf7rFvrj2nC04MCeHU0MJsmTmKJHnYBg6p4owjHzIwxpDSqC15IGBkBMyfQAkRlCwAAQZaUEasjIuccG+q6brXq1K6TeUREwKXipCdhjTUM4OqeztDSQ7a9EZw+ltKoQwEAIrlZLS1kXmIwYuQXAvOMFDnwKiwwk2rLHXF2nSa2BmT+2Kim9CxwUrsPyVzIsMnYQoxgMc/uLx8QWWMc9ZRENNkTEUNsL/dXGMdN1/QpB+BOujC2p9w+eXP/I/kpjfHFL97BVq7hsj1DeDIABWkRuiYx9xrHyNitNk/PLz73S1949OzpWYQ7X3z5/Y8e3l3fONkH5kZBjUIgRNUARViYVe8oK4UMAlfXA8b28nqQQXK7unf//t++/ubr7z7I7dkAq6eXQxbZbDaS+zwOJ5v1HokASYhEVF4NoUEKIoLCRBAa0t72QggNRA4jjwUXJCdJq01DTUhXKeTAwoycmQE4Q9amNIYqnpaZIWBhPLMD9TBmI5SIWSGiUqxojilLiQHnioqGQHtIMIw1288CDimEGBqpbSfU8grzy95S10emBBrz8FXFuOZLKDgZw/C7cYgR9V2zXz1TXKiOhsgLjgLVjG0MwO+GqVGOkE1EzV63ONbDKfnle6RTBczPc7GTfvmLOS8O2rZrQbv9zSwHr3MD+706HOToNOx73St7kOeuYPtXaxTV7VJiMqvYuVi+WRMXy7HBTYiprz5C9ERkHMcp4rAaPtFdC/oMLsHGNsRGO9wBdp5hsJKPrjeGf2pBfqGoOkc6Zx5enwwnbt+OjOP37XAyn3wZMbH1gsPHwxXVzzPsE2fhNToQYmxiTGnY7Xb379//0he/cOvWrQcPHpbXIYDGGM1n69dyeHYmOBAz8MgoIihAjMiCmWHcp7bZhNBc7PsPPj7/9Gf7Vz99Y7vu4grHq6tBAmAIXccUP3p4/r3v//B6yBgjUuzTmJMQIAEGJMAgklVaIR/3e3guVS1UOhBD+/LLL7dtd+/evTyVgNd9Pu4lni2w8uX6CirrLqFVpbRvjJHyFOyAiCX4RCtMqeGDjuhgiGjodxRgPC48j3zppXqaxvBrmU1EbNvWMNfuNBQzrISqBJZBqtgGVUr2JNePE2NMw2AkN9TifyCiyXtm+zMdBpzCptpgSmm/H6zezGq12m63Mbbb7Xa9XtsMU8p932uTembebrenp6fWV1BLs1jKolQ1z9eFPoRhcdYonNddMznNHAD2jcOyQoFti1RJCyG0bbtarbRKoj/KqoAtC5DqBFI1uFsJ9KJI1DRUR4FdzfzJzFHDd6kAWE2xnigVVm3QwlCJtFf5UhtEAQjTtujyNdcsUSYia66LiGoyTikF37MGS3xfDIGIYgiz0KPakU+3l3NO4zj0vYj0uV+tVl3XNW1YrVamsdOBz1z31ReUrrp9I3OpQErlkZlFgAGCfkOotK9w2DpHvbKgZMmZxwH2ex6HhtcRM1BtMOFTExfw5hWfBQLKFNs8UXgvXfjDtUEQMUZFUtV+a/WjgDlnwBlkiqCIxJSHQN04jn3f5zxCmZMwAhluMxTZgrQmjqEKgttxtTZpWSQiJc1HOml4FmgYBVOEIfkFew63IHZSvb3u4KcGFR6T/RvNBOW2+Iipw56ieSSS/1Vn4mXrxcwXU8Vjkhw6lc+mhPMygD4xb/EUFscggOZ26uuwZHsiImds1ptB0n73LAZq2k3KnHZyo7nV8dXHP3n63fz932y+vP2lLoW+OQn4KKUhpwDckEA7jJkzBAm76/Hu7Zc48+0Xbr96duP+j19/6XO3PswPXpOXAEDLRIUQCFGPqGliTknbsitC6dLGPYNgJrzqh3v3H33/jXfevv/g6QAYw56hxwgIiZGANKA+ICIDiVa1Q6oVDwGABSBQ08TQRCFgZKDQUBhEEIEIJEvOabVatV2XLjKJFQ5hkVJGsKl9z1gEtDHrPA4KnPrRUGAn4CJilTSmGDZj/0qgQ5y47GSAqr4vm4/R4glpXWicB/4FmfAEJddyZ13X5ZwXsnh9fGpjIHOOZR88omVZkhhbrzjhz77XtgTPw/fFN1g8VLMR7KLq5z9E6sVs/T6YbVIvL+V43X4xpkdDm4z/cvHe6Y1h6VP1k18ckMyb+foHfSiIzdDPx28gHFwVeGYJId644Ef2C5zzmKma9CLswj213EmYaRczVY1qOW/Fo8X+SzVp6yMq2eQ8C4jy93sjKNUiE/brInTfcoEWp4a43FVFCpnLf/4pnEuHC5Sx7cUDIfLogItn7cHn3fAJl4efT4BVO2s4riostW4jPjnnd95554+/+Uevvfbaz372ZlkpLEFxsWpxjBUACsE02IOkfU0A1GMWVCFMDES4ateS5PGz6/fvPfr0p/uT05v7/hlDyIlD08Wme/+jB9/9wY/eu/cxUmi7NTMPfUJECgDITcCEiBhFMqDaryfytdi98k2N74gxvvDCC8ysJVWJiNEtsCwN540XGQ5CcBdX27aIo+o8FUQtWm9ClpKMo65XJ5saGoYQ+ABsFpu/gMnD9eqX4ziu1+s8JiI6OTnRV5jrzDMjrB4Mk7ANbXny9sz4iKHzgmppUVAdp21b7QnBzJzTMAzmcrQIW9XZTBXUeFGpSpf6BtXpF0KzWq0sl4+Zd7vdxcWFqqw3btzQvvNSvXNGpqrAWevDV9LhN3NB2/VOTW6H6kZb/OrRzV4n87Qu3QdNfbRseR8pauqxf9xETSOD7GIrylnAZIM24Ta7JoQLeDA+yLUngX5vA9r5Fr9lDSeyQYhIc+Q891GiqmsUEY80ExRVCGGZHLZtbAxsbFZqFLDy2lzzMEUECHe7nW6Odk/Z7XbOguOin0giRk2K9ZvgIUclQ4AlJ63XXA0BLBafOk9ERAIW1moafT8OqRVphRkJQDSLdmrMa+h/NBDSDsijueGdVH5qrlQb085d1doKKQ5PWcuMmC1++jfmnDEoVI1ZWK22pSO9KYQiqk0SET+HCBLRarVarVYowsxtF0XCMAzKDmwocR2iyHnADPT9ar1aZVYKu9THqu7BGlA05eaZtI2OESIiudA+j3IeXj0oGF31FLYM5SLZoAbRQZ5JXQbczMzHjOUeSfxb7E9xghQiauoLOPqOiKrx605pXXAA0A5yYy/r7Xo/XqdxpDZcD9c00q0bt4gx7oIQn39w+e1vfefLJ1+89aWTjy8/ejV+fodpBJZISAgYEKCBJmC3XZ88OH/8yqde+hf/6k//LF19+KMfnty9y+c8mx5qw0+iEinNhmzlZDHsrvend25d7nff+9HrP3zr4RAwrFcPL65xdRrWG0mpH/tNE1YN5ZwliFY+hPo/2zHUE4wBo6qNApKJCLLuSdDgz9jFbtWMgSNERi0FByW0HWOEMJEDDYKv/VIXJx4AkRB4EjQ95EwC67w/r4XweUwW18rJSB7OA8Rlzoq8lc7wXykjYQlfMZhZrVYXFxeeuLihJtZlhMksDgtaiTWF7PDXQ+a3oGW2LTpVCpPAOse458mp5S0e/okIZEYfF9fhOD7UsMzkwA7nZyWyHMeTWj9y+RIKTT8kI4uZL+ChkBeXR2Rj+jdaTY5PXq+nSOBU2cV8FtQMETWkvyYETMswwmt7Aq696oKlYRWkbA4228lJPm/oulqttLHhAvYsymNxmehmVg8Ffu2SZ+dLNBX0wgOntNRI0QVT+AT9y5/OAmAW+7/Y8AX2LVRof7McKFTwnOMu+3gwzoJW+PceYvTh9/5q27bvewSIMbKkDz74YBzHV155xY5VuKRdqHSE4Qhw2ltKgwo3Pao1ohGJKaAQiOZT0H5IKIIMw+7q/Xv3Prh3f7PZpHy52p7kLASYGN5+650f//inwzAARiJKXI0peeScqAbsKB1eqm/18hCmSQ06ye12O47j06dPi/SMYFUP4Qjh+oRrZmXWeH5VaYZhSCkmIiYO5NQG/ccBKjubte4bHaMzHrw9EPovjS8oqGu2DqdMNHUjMIQy1xkiIgSV04JLERRn1cJq0LfLkNG8iDrgbrcb9nvtBW9I6lPpVC5X1lk7xJTW86YWIiJSJCLtYaiOtRCmPvK6b9pNu2mas7OzzaazMC6LKVX900dCmlRtxNyTQ9tVqu5No+deFj8qT4Kjmezak1DNPDTGpFeozS2qYADeN+CnIVXFreVzglE8g23z4mo7H6r1L6a35GkJNiWsNVo8EJbPTiGsECWA5Nu5ISIJaBc46hoNFpUpSyWraWCarUw5ewZaUqUmwwWTtGeqAZGWAtpsNifbE203wgzazANx1nmCMI5ptIBEX8ncBsdqpJbaZXRieaWAMEDx8ANAiRe1SbYRpDQokJGzkX0NmjX7OU8OgFk4YRWdax2Kir92Cuph9uiv6LDb7aRG6FA15+WcRVQuVZwldYdLtTQhosXpFA/hOWPLcr4fno6Z4jqGMIwjiDQYMAuwpocRIwAiA0Sm4hkPSBQEMmBElO123TUrYGEl+iwpj0TEmQ12oTLynLN2VGNOFmKOSMxiSaWWRKTr1KJMJviaAB1CRCS1RlhGg5FR5lTZrQIcpjEBQBMNDnKuRTjKs1DlPAAE8DmEXrDwH4wsImLf720C5EzaIlO4rdT5N7VlnF/sIUEn1wccSVTpBydlShYC4sSscKNCFQMAbAPm3b4V7GglCSQwRH5Cj+OqucTLIY0Bm/078ef/7uLWb33hT37jn/9vzf+0bjab7la6xryndbdpEYf+Yt1e4ZNHp5cXf/KlP/hcu36hfeFh89n7z9q/eO3jzQ5e3DWfwRs3pZUBriXJJl7DyBE2q03e9bIfmONuHDGEk3WD63bc3vzBew+/f86P1meXI+778WyzDZKofwqSgZCJeiQIgVKfRUZmAAyAQWKTtVwAX8n1HvY3X3h1/eJpL/2W1mk/XuFV0zQAKxlkTHmEuP3U+vLsevfk+izfaIeW9txBgy2kIH0eWuyslUQIQUoLu1GPDRFLeR4EDnrKweFnAFaY1bOjEILaqpUARQx9ykb74EAOo5qNoPV4TassEkktxs01J8oIohGLjrRQcHlLGkcQWXWdMO/2g1ofDX602ZHWrRbRhGNARAyUcxYQNSIIgiCwlvHJM6nXQNQmY1hZ8JFARNipxFTNBIoLmTPCLKELsQQalBeV1xXthasSi4hEmNLkf1jgo9F0+6ni/vHmBLrboVaq1Am3sUkpJZfoT0RBfZXm1QTIzFHZf23+7tEcESVzpBCpnJpkJrVG5dkkjVIZJRGayvojIpUQIHDpWrMPWrWFudgLgJCFBQTIFYtnDiGwcDFSIpByM2HEnDkDABKCqP5fN4SBSMq5sbAkzhypMBJbr4lBRudt87XKn8kNIlb3K4ypV1+6VZ8r8xfMollqNLUkERGRnCQECtSklNLIANDEbugHACCMaEmDSQGGhLXKViktpkIEADZxVXcbAMaKdACzdgWA2ACA6adSCYKtDg4CNfH5HgZ/+S89nT+85/BXKRKJoaQCuRkQl/5nouC+ARFeDLjwoQWRCNI1xCmHGB4++PjiydNPf+rVLsaUchYhCgAEAdPIiJRL32Cpnq1y0D4yyNQpEMmhqXjSQx4JMQRCwpHHtm0yj4Go3a7ff/DoL7/7w9RsXn1524V2t7vGDj6698F3f/T9y/6CIiUZWQvcIbAAECBihozpCm2DMJTMJCiCjEpFJVZKzzLvCVEI2y7cvn37cnf98MkTatt+HBkBQFsNMWQVtjCE6MC1pIGICGAhJgAgkrgKcE0+J4J9Or1/sbno0w6D4K6TkXPEHIkCBxEagFOLECjuM4t2oFWCL5I16e4APLjUmYymUAAgUemfPo7J5lOxDwAwRiCCnMcsqWtXq00X2yYzMwi1XYhRQqQWc87jkAGgaUJKiTmLTDGW19fXm80JaMtVCsJJOCPEGDoZR2zbtm3PTra7q0sUbgJdXFzkPO73V9vtqmlIJDHTMOxyzsO4LyYekEBEGJiZBVfr7dXVVT+kMfG+H/f9KIIxtIh4dnaiTr+Tk5Ptdkul8AzHBna7/bNnz8Zx3J5ob4mQ04DAGCMh5pxTFgAIRDmnJhIiCKdQW6YBSLNaqT4Jzh7ntxERgQACICAhIaGw5sdw5owysTZEpEDaSLaYvRDiuGJJeRwg524dM4/9fhBuJUPbroVDKZaDgSEhhlFGjDEnbuL6arc7PT1NeT+m8bq/ZEnI0kCH2BARZySKOe2tgx8DCgVlRsDCkoFKOyKuDVfbbisiOScRJIpWRHq1Wu33+xACkvR9P6axbdsQIkIrkDVwVNMNkqTK9QQRLUmyaRohpGFK2KugizG24zgiQgAEAcQmNDUmSw2steRSypKyZIaUJcRWFZoxMQuyIGcGSIRxpCyMbbO6efPmjRs3tCfhMKRh2COiSE6peBf3/Z6lRwy73fVqtWrbZr/vh5Sk1OEURBDgZB4ahoCQxgEjrU9vdl3HGSlEEMjVjIqIIUaMUQhkaFfdRsYhSW6F89VJuHkToZdxSDywZEQJocEQswAzUxBtL6jbnpmTZJDSyt6UAiIKFBAYQ9T6zJkFsQsxUmwEkaKKl8wA6hHCEGIUATJbUs6ZMzIyIGTASKShuFB6/ErOKeoR9mlkZuUNfCyrx66iiUVL55AYY9OEs7Oztm2ZNaVYdCUUlm4WrLqv1EpQzFNnDHb+ayN8tinGkv1PRhy9aQRqvQE+CHjwqwDHm01e98P61y0uz/hNGPICgZ+P3WBkxXZjMQ2TtLypyQQRxOL+W8wT5l1T/TyFi7TNzALAkJEAgHa7XdM063U3pPHJs2f7dz843Ww33eqlr3/q0fkzHMftya29pFHSyWZ7sr6zjbh/ev6bv/WV3/iVr55//Oj22c1/+c//2bvvffjzn31wGje0Wr1/79FHT+69eveVF1544SrtxiRqowXCHAkZkTm24ep6l2L77ltvvv6TNy4uLnoOYyZVVABEI0UxYCIU7QwJQqUjJ5EAcM0CaorqEGMkKnYRNUAEoBgjNIjEAIi8LGYrkkvBp+p585vvt/FQCsR58bE66HM8SFg/OFB53p+LZ+3VVPux2nvBSaIAmGV69aGpaTGBo69mZwg38Z2ZWTjgDM79JP39i5n7G+wzOt3MDIEzS6G7bBULr+liRTb4QjQ3TPdZl4cltE1VAAEAAElEQVR4t9gNP3N/6B5/7e3iiI8N5Q2c/JxIV3s1uRgen8Qvzt5smtLzFg6OpIiIKtvsLLJUc3KMRMi8HtWCIoGLdzVDpql8TWzEEWR/Xv6AFiPXPQGoDk8iYrEdcKcv0zINFP0Z2UH7nxY7LC7Wzo6SrO4/8OKnCj8yn22hq/7Po6/zoy0ef96dh7ctYPuQmNg3ixehyz/xL/Xf+GefNyu9lCOnMbNkFNjv9xcXF6enp5vN5uLiUo8wS0IOnzza0TciIhzEsRu0F7IjLEI554uLiw8//PAzr/6qMKSU3nvvvbfe/vn5+bnaBWBe7G2xav0IasCvnuc6k1rHEwBq728iunnz5tnZ2cOHDy8vL9WoYeRNwcLgxBvFDCYNVRebiSm1bazYN8kGx0oaT9u1GB+ef46OEYB/cPGnfWORKSEESyBUu2TTNE3ThCAANI4jSGJmztaXb6L2ITRHBxfXZM+cjZrI9/Tp0/1+v16vNbxTf8o5D2PvxTOdCQhdXV1dXV1dX1/rOEZ/zs7ONpvVZrPRpEejRcx8dX1xdXUlItvtdrPZaA90yVNoq8nWthDyvrJ6GR2ze+g5kRcGIuaW8XBoI/jw+xAjI0UMlChGkLGUolBQ1N0OIQCyJs5RQARSzy5L6oddGnqWnHNm0diKEUQggzARMeJUAM/iL3ItQ2pbAY66Lii5La3C2MTmcs4xNP4ee5CZVSG0cyycN2XDl+L7oiAigjkihWrACgftAETEqg0tqglQrVovIsM4YlVbmqZZrVYaJHx9fc28U9hDRAM5HU0Px4r6iLsW++DZjReo5EAiEhER2vVXiG3baO6dDMNwvd9HHFYdFgaMghQRUUsfp3FngxiT8iMbppc9RP1SdHyWNAwMFomjJRcLk0UASePk4TePMQDwmDSNyzM4RIwadNv3vX6bjxXn9ERcREKkiooJANo2ag80riEHut2KR5mnhdnriUiqHxmcq90L7oaTHkthTuzM+asTNle+wq4lmJrfXB0mBnBohhyyFS1luEPGrB/sRSZaPY8x67u8HwAcvh3ipH72RASdxFarwC93Y8F3bVgDMpaqoggyc9s2mTklRqGuaYZhfO/n719f7r72pS98/vbLj64vnj293t482++vz6+fffmzX/iXf/hPXj2702bIuxGu5He++ps379xBya///VWI+ebZyc3XXkzt5Rhpn8dh3wcUtdpypMQIjKISb7MaWX725s/ffuf9nmHPMjI1TSDUup+ono0mUA7aQqPkXRAgsgihBsljdTqFpqEYYszap95+LSw8gCCu1+tn8ow5iTR+34jIQu6N+kvJfUoLWbwwnnn0l6dih9fCruEP2hgGVEIARbHJWKNZDsmTMSdx3kKBSTK26FMjmh7abQRcKL0wgYrNSkRYOIbo5w9zCnW48MWm2SqO4pRHnMNxbDLi1AC7wSZj1+EgMtfWbAds+YbyRovsOLyFiOf9pvx+ilOujOMuZrIAEvvTM7/sKrnbbSEEl4U7Y5aLLw83xId+KSQYPysmlfqTP1xxApM4Q9uCfB2u6ygK6OZ5RdTADKo7y2DAPaIvwurvEhEWYCkFyliAS4t0p8Itdv5wVvrelBMetP+u8LMU+MD5lj/5OgqER2F+8fkQovy/RwHej2NferHGz2cxSfuXDxi9Psg4FUxGwP1+f35+/rnPfe7mzZsWha7HCgiMSzvO4QIPvqmIY741mVGSimV0eXn585///DOvnt26uU8pvfPOOz/5yU+vrq40r9zOa/EWQrPI6FvK5S01uqkeDUXk7t27Z2dnP/7xj58+fQqFSgDAbLcRayfG+c57mmzgbXgN0C4wVwhRUFi4ZDBMR3D0fD3qHVCD5YbbnYcIKyLMJQurbdtbt25tNht0UlkIgQhVl85JAEqxHAuCrYQuHn2FVAbkAw7HcRTJ+/1eq8IYS1KBfhzHrutUKqvbjpxBFUJND9MNDyHE2KzX681mvd1u1YtgPf1yztfX1zlnLSGjSn6MMUOyo7EjMxJnZjKTJL04h5UhkgswERfgd4i2dpv/1WNihhwChtAAMqKG/wBiEBEeEwiFiBREQMa0H8c+CYfQCIdMSCB5TOM4CvAwJJGMpFUuRQRBkEAilmwsWzXUFOsFjVUe53MFdZs1FGVamsz4oDgJfQFaUNPLxRVZTP0Ax8iRFZU0juB9Njqf7C6PC7bVOefMmavSi4hN02y325s3b26325RYK2VaWqZpgEoMuGbr1GnPDs7TWyKYgYqUJqtzTBRmDpFFxszMQonzKBgbOt2ehJAIAFRJyjLmpC20NIbI4ESK2CVtO0OxcsAAmauCLeohoZKBiYbgWWTq74HQck3QKzQwl6hJs4+ggGQWEUKMGEOfxmoSmzL6MmcAp5NIMatRmM4VEa3W09X1RdM0zJmImiaKSEpJGC0K37NMEYF5z0CbnAaCS7XOejO5Rz97ysBoMT6XgIqZYOeR1p+lNxoBLJkuOLXNvjRBc4Fmiw/+RA+xaDGyn7yhitEvmJjl9KAdmccZvz9qLQgRgYOIFK0KpG1WV9cXY05d190+vdWncb/ff/DevfF/ffb73/yD9mSThuuMEDaN7IYxp/46Pdld8JPdy7fvfuVzv/rx+cfvvf3magV/+if/4o2f/OzeR0+G9uzmnbMnT4f89Pz2+sQsH4lgCIAirAizPXly/8G7H328G4G6FY4SYyOM7aqjxMJZhJNwAGpCxEAZSs59CX2GoIkiSbIQCEDTBiKiGAAYCGOMgiAiwzAKY2ACgO12C8gZhDkhzDVttbjMjwUPDGlGNVIN6DfxHREDTaqX33+Zi3HLX502YqB7yGnAVQnz45Q/hdEV0zMC13UdHfTJlZqe4UdD1LTfskCqaQYAEOi5Gs4Cwu02PEAZA29vK7HX+RnaIHXnjuDLoWaywBFDHHAY7eeMvh6sOwtwUqMNaPkbi5tlLrotsPKQwhwlLIenSccKadoO+/30Aq6fM7p98BPQfAkiWsyH67UAtkNTpV5Wx8VmiNUY56nT4el4lb5sIJiqP1leEWeyl99PD0v+ZI9ei+1a7P/hmeo0j87f7//hs4uVenQ7+i4PhzAHmMN5epL+PEqyeJeHh8VQh/NZgLFeEzcEHIbh/v37X/va1+7cufPuu+/CBDa6IfkfKqqyvKb5CFl/e1CYV2qDALX225MnT15//fVPv/aZGzduXFxcPHv2DJFEYBgGoIhTSbvZyADaVYhErBjMgvXXJHtELaYiALdv344xvv322+M4brfblBLMd6bs/8HB+T/RhWZUKTyISBrzOGYfRmtjKszbnPyvh8blY2uZcBAcfhn1WJyvBq4zc9uEm2c32tigQKRAgKgymQARCQUNH+SsLXaLdJFdk4kFcfDoKZVFVocMeaajhCLloipYjRZV4VJK+32vzQMt+yvG2HVd2660II16/1QV1MzMYRhiE87OzrSLhmkUC4Og+UkOthH8IRrVonnoilQGvRC3DvHRdMgFAjJmBFW6BTAwIINwhhibmi2FzINIL9yndN0PQ9N0wiR5jGGFkoseM2ShjIxCQpQQA4VICCAkpe85EkW1noSAOYuW+NXCf1gyHTDlSSGsWwQiUhUnIiKEoABQz44PqRwiQo2TUjBY3IPzKB7V2A9plFU7l8qRtfSOp+T6rx5uE5oEZT77/f7x48fqOrbmhFabFJ18ZdqdvjQUB8DEL9w5gtTi7RpPm3kEgBIphtPC9akYkYIgZiBU910/DNs1rbouNhq5jMysUV2ClC4vqhG/LC2FlFIKVKELIXPKzMKCRIDJViEQRbJOu+s6AGDJIoE5SalNwF3bMDOVkQQAchUw8pgAgJoGK3cmosjMal9BLMniiMhVNvA6oV6W+0sEhpyK0kSkQRF6WhrkZpdUIaYi6oRL1pMU5lkZZp5ZoOUhSbVT9JTRP2h/2q82MQNiG9yTUZsGzNHech09R2dmqxLhX2GfwVH5Q07sL4efUwURgJIRjwe8il1/IayGf2YWGUKM1v8aCYE5JU79GDHGtm1ig4KNoBA3Mb737Se0+7uv/NFvvfrFzz7NTwYZmwhvvfP2v33/yRdvfOqbv/l7L9188enjx48f3D+90X3q058/Db/6hc/98p/9p//y7s9+nrfxzo3V9ZP9CWfJWQAySAoiEAgR+iQiPcQ337v30eMLaiNQAJTtdquV4jSiHGtJKmYmsl6mIgg4lVFF5sQAGSQ0EQJhDBkYdLtQEIUzA4AAsaTVtiuZJAhB44igmCQMeAhwcejMTAdKBVeOuOCFRw/RMwyZC2qmkKCTXBfKm6diBmAe1AtI48xeYCqlAc+CKR6dqgczrCL+J0h7Ng3/r/KzQ+C0+z/hJ3AqEDOHMKlGfusWdOB5WOwX65H3cBv9UKFWfPFjLtZ7dN8WE/Pnbj/5HXjeKSwmmV05ATtEG8fTPSOVxtSwGv7VMlqzmyZP5uIEF9Mg5yv2r2YXRGrr8lYMmIfCYqW6iGQVPitxsyVnIgtCs/+g5hwKTClqy89Ht9Gfl98lj1OLO+k5nkAPAP4bf8Qwh7TFaR5+ft4r4BdQdBdj2s0m43ricJRM+Qkf/smq6rEAAAMz87sfvI+IL730Eqv0U/xyIsrTn2Njfd7a7UVTgugEBlpLM1cYg3EcP/744/VqQ0TDMCi25cQppaZr5ICeYBVdcP4uRATU5AO/ISUFehzHdr168cUXh2H48MMPFUHGcdTd0rlNzzjPqrhLVZpDiqTnMgzDrt+nlBADCGRmxCAOSgGoljmYzTy4mmRHz9G42OFW+H9t4VpkQmuurFYrqeEPTpLR9zZNI0SU82hRJ1KiECVGz9dmE7aZqBY3DEPOoxGiXHsYCJToLRXfLaQNEff7vfoGfQ0b9fys19vtdtt1DdUSOBZPSEQnJydnZ2fm9RKRcRw1KBHnaefGVmxpE0Fz/sAFMNvJel6gn3nufvRg5qkQIkJA1gZcRIEohpYwUizNohAhc5/ybt3h2enJMFIWAYiXF7vLiytqEbjlWiUlQAgUSiJteS+Y7E71gur7hWrwtRStnLMyqDnMBKJJdq3QXEsYIFsFMr8blXRPyK5KZhBA9dsL2EaJCDUtARKSZrajKPhP7ZEN/n0CJ1ROl1Jq23YYhj7vhr74/cZxvLi4UGvvjRs3FGxUu/GvxlqK0tZm2G1rsfqUWHONfbylQYXhnE24H66wa9o2NA1Blqvd9ccff/zkCQDvu67ZrFdN18bQxBhD28UYb29ODcg13TvnnPPY971IUTEEpro4SYaqLk195xEp5bHORK1OehzIYxKR5OQQZgYNE5jzLAIggFgKIguHEMpBE4KZz1F9o0G3xlVQyERN13VtGxFxTD0iilihp1J4RmsH2yYabjCzt2yZibpI4c7fbRu90Am9H2CBnIaiJgPZjh+A/hEZzmYlRdCZXlSBANE1tj5KiGUegOfxx0tj6Pi3URk8CFGwwZkZEfwq/K+e+tt7c84qk5XBhVAKD4kUkQgZNGm+wVZIboHc++nD1elbX7nz1fXdk931XgLdunXjt7/4m3/05d/75Vufeeetnzx4+OGnv/QpXMHb77119dHT3/8n3xwxfvT4fzm/Hs5OznA/3r94erNtGKQVoRAbkoAIYYSMT/bjz+8/uh6Btt04CAAGaoggpRRAmmpJFM3uSwmwxB/H4ubHnAVyllKhm2PTAAkSZchYPX0xxiAjUgxEMOB6vQ4RQWNggqAQiwgLhKm3gQBo6yo7Ox3KpwcoSVjIxwAAvBQo3dEUUAcQjfyWEpkGRGp/YkQI1fHufIFT2KeP9/OvICKoZfoXoOg9YB56DTvY9Y1g4UVZI2OZXrg/RBYDdZh43hHPlRyUHsEqNBiggkM923/HbKZQT7vZv8hsnIu5ieNbtmS/M4u3y0EwAs543oRfOoKXMzxiehHB748J7h7G/AxtbrYiPx+/usX3WA0N+qVJA0oEtM+V14TFyYLirHX+WM1MYM6NMFUnmxmDDd5MBCkLL3P2FooKDwGELUlS1JeyILaHe+hBzmbiD8j2Hxwa2r8UCOd2DoMfkSOUHOZoZa9Y4MUCruzBxXnZCB7qnvcumAPz0Wn4Fx3CxuHnw/ks5q8xY9r8STgj4v379y+ur1557dUYI7OwZMSgmdnmJznKiZ4zf1t1pXig5icEAISAWKr/hYA55zTCxcXFOOary12McdcPiNS2rQAsPLoGCYjos1IdFM2iZA1fcs63bt165ZVXHj9+fH5+3jRNyRdAOhT4PGYZsmB1GS2gUUQII4AkljRmLeLFCJIRCQUn/LUwDw/D/sPC3+KO+/i5T3G/c2lBilQj2sndy9/MnJOGnCFAKSull9r6RTAl9k5CG9nWbvSnInu2ApjqexhTSRpUCVu7/0EV/VNK2kJQm0wowVytVicnJ6enp+v1dr1eA5Q6oqmWA9Takut1BwDWnEBExnGksIQEcQYsc4yYQS0fy2JQYdIzIE8b/ebbVljMHTgRVESAAosgo4hkxAyCGIjCmJiZATOPQ9vCq6+9+Mqn7mYeJXPK8uEHD9746buRkFECEkkIITZNDAEzqD4MWn/NJuDpP8xD36l6sJUyg6Nm5jojKtqjmg9EJCdABIxydNVa+21BbAGAxGPlbIvsskOxkT0WeOgyq0HTNMVpnGPfjMMwqOoxDMNut3vy5EnTNCJoQZ7G+0wGMEbGVgFvbkAs4kTmEELTFP3RakeJk688niJlxCC6vSBBZD+mfb8PJPsh7XY9YtDXEUUiutF2AIAk1amm0JJPTk4AOQRsmgYAhUEEoLajIyIN2yYqlhdN+hMRAEX8mnVpKVS2RkYRjLFF1ArAgMzCkkRykjikMXEmIsKYJAmV2qakBfTKWrkKzJDzqEamtm01W3ocx2FU65cVHiQDJo+KBqkhBKiJTx7NuIQPBX0iZ0lJgydLHrMOoFCkBcSZpzKsMtchxXXwMyB7niRh06tQUsZR1+0hmC4SfjxYyDHhwAARXTgW12hmtXmb4RwRREwiVOLFUHqezIY9nJvHVRGhGvU3o305BGpyzjnVtj+hzTmP49CMMY7hze+9McThi9/4UnfWnZ2sf+VLv/onv/+nK4D/+J0/e+v11//4j/7xS7de/n99699/62/+66+/8sv99W7Vbbr19tHTB/f3FyeUzzaxISSWQBAAIxKC9ElyP7z99MHHzy4kBobIkGPTar8MKXZcICLt4kkCWSTGgAdsmSETEQZGpKYNGEJsOCFjiJAm9hYCA0WMuDlZk3a9IqXFk2nTiGOFLlT3+iRQzvV5L8QvthqcvaAA2Fxe8T+ZS9z/65U0D65mLiHXDGp6BCb1yd6Vc+Zq5pMDIfuQptsaD1HDT9tPzN98CJB+1f42U9v8hhze7CfsB/dCmN/2xXLsHq/h2AfPYBaLtRsW++n3fDFzdH3V/Z+LRTkS5+XvJTlCpwyz6z3l10Uu5NU0UqkBXYvNwdriz4AhH/ToW5gb/LkvwIBoXhS0Tt5v74woFVMlu8edtlm1AiIt2MiaYkCksayTFlq9hSAyqf1msPPXYu0LyAfXh92fo+3xIVYuIGoJZAegtYCcw/thDtWH73oeMPt1LV6xsKzLnAcdfvZw4l9KmsodkFgSc4zh8ePHjx49eu2117qu2+97zeNmhOYTfap+2+dUpYRr1nMoShdRw8nyVnR1QYRPTk5E5OHDh7HpTk9Pn11eCUjbdkMq4m8Z7fk76dEHEQNKqQEGjFDo8N27d+/evfvWW28/ffq0VjjTmc+W5qkWOq6KtejXgpggIudMYYbRMiXLoDBAKX2GUL+fcOeAvh07x+nsFtdi4fXLDEBN05yenmqYma8qoZiNyCKqwRbl0EbwlsQFptgN9tIQCrWB0o8RWaZARNUGfehgSklTB62kBRF1XXd6enrjxo31eq09crTUiIUCqm7Qtq3W/NYH9VjN2jXfsWXIq98oqXd6RPP7rwTwkHov9sFC9MUVd0FEdUcxBBbOCYYhjZkJ4ziOwomCZB5XTbhz9+Zrr760769SSjF0nOSD9z4CDgJNgyiMvOcYY4yBIKsrSe0LDTXg9LoFYVyQdABgFrXFeLiq6vrUlnBB1harhjmyGM8SEXa2ck+T7fLCMDMLFagux4SlfERSnqWdYACZOYMIYcQYNs16vda+GtfX1yLS9/3V1VXbrmxu4DIGF6yBmXM50AkYTDsNpX1GcQ/mUgUzACJMLW2nDWnbNraNSN6PQxAObZMZh15Otx0gpFwToAQBMiJfP73W3db2ebUMJz84fyoisaGua6GYOXIIgeIojCGEtm0BMOccQqMGEUREZEQtWarMl0Pu/BkVMBDQhhzqY2dkjIFYmDlyBhAK1IQQkjD9Q2n0uTZvaZqg4aNj6tWJqfiPiMyFPio+2xaji/+sSvsse4p5ovIyTxFUD8YCHBcw6hG7bVtbf3DFHvJByJ/NbQHfChMUl1XmDGf0sxePFtScXdsZvW2OKuzfaw96FWVhF0S0JoPPZXuexolIbDBEFBFJk30ohDAOA2EkCACgTuaAESIyXq0wSp8/eP39fth/6be/9MLJ7Xvv3f/ru3/77NGD7//N3/L19W/wV2/B5QDwq1/56r/5nX82Cl48evrqSy+fbk6un11e8a49bWlM7QAtQSPQZOIx5X7od8NP3vno4cVuRBoTU2hXq00/5hACoBAICjOIFnsOFAJh05ak2JyzMIMQBSSMmTiEQEAUYwihRHyihBBYOOfcj0OLQBRDbFbbFQSCEs0oUPUaqZWNjEYAlO5VXL3KIqI1iONzygKBD39yJ1VwD6bbPBexvoJGqbmGCLILwNM/lfl5mJca7sIpaVj3gjmllACX8d5wwL/rqyeBxusGVmxmsd7FgItVLN5V8X3K2l8sxPBUjkmxXiOyzfE3mKVj8RTUapkLfPHLMYZkkwH3Lv+IV8AWNMcGlAMLl983W6M/SuO4tkyjV3WjZoqWTVvPy7uvi6BWObi9KNT2fUp77QhCCDFG6x9lizXwM5uXuIALz1YtxxucR8K2opKksm/VxK5Nye2MZ7Vh9YPtCVTbvN9tv/msoXcHUh3O9aX5hiwNEP50DsEDnqPdiWMiC7hdTGDx7CEiH468uO3wwyFW+n/9NUmi7oI5O/NXpXuNcM45x0AXFxdPnjx56aWXNEaLmREzAFDQljB4uNijM6kHqus6orIq4MVAqOyACBjv3r1LRFdXu5OTkxMK9+5/vO9HoghTN0FavMVvtbj4INBmAQdqFSKenZ2tVqsHDx5cX1+fnp35RNnD/YF6vl478sjuASaLBIghNCE0IFoRmgBAyhFgBnleaP6EQwfih1vCDK5gDlqHwKbPtm273W67rhvHUcPnioMCY86imUTirsWsZrNE8YHb9VcmCt6PGmOkMIlVRvQ0h1PvHIbh+vr6+vp6GAaNaCAiLVGhVWRS0iqRSQVCm7YlMKsGqImFIYT1eq3VMowyG7X0Hl2/24xL6gEOOBdYDAfp/fYT1dKOMg+zzyhERDHIKDmzigE6bcAAmCt9Tfv+6vzJA2A5O73dxGL3EyAEqk48ijEioAiqGZ2ZI0Zb6YJ1YmXudrjoIt08Fa1XWbvfN6yWC3BUpeLXMTH4MJzK7app12IGvjBDJdtGdpGDKg2WVQB37bptW6uOozGiwzAoGPvz9eGKfvJ53hTdb4WqggV6IefSXZNCcRyUkY1nVfoQEIEF90Max8snjx/95Mc/apqwWa3W6/Vms1mvtu161TTNptXqR0LERBnQkgmzAIeA3QAiWduShYiSrhAxBk2P5L7vAYqHXLXKtm1VNQsRiSgP1zr/hjSbtDF8IcBxHEGQGazObXx2dSmEKfEwJFtSrcyRRVRkIcQizHHKq9VKa+AMw0AB6oFNgQHMWbm4L9VIteXaOI5933fdqmmmCuY6S/Vg6iPWcRhKl4wlXbNfFxBmIXYevk0RhYnnkcl2VOPODdOMXUkV0diZvjQKwmLffSR6rJ1YPYTZ25l5HEebiYdIQwPDHENFc3znnFmyJsvWtyDU1k8qnoWg81HPMnLVq5s2CKOUYj9s3cxzzsCs7a04cWhlE1rO/PTB/lF+eNad3mxP20/f/J//l3/brUBglLj7T9/71tkrt//ZN/71HvZPP7j38PH5Szdu/cq/+u/HiN/90Q/+6i/+y4P97s62ayWtAGA/4jgO+3HVbX7+zgcfPb2m1Xq86iGGJnZjyiRAArFtCQQhg6p/zIhCQvv9nqzSFwDnieaKSLdpc86CzJJi1/S5Tym161ZEuq7TZKMxJQzQtjFf8aaLu13fNl1AYNIE7WVdOMSSSKA1qSZIUEydQnYVMEqEqwiotFTxTZg5IMamsb6xWuhFM+CJEEBMoGdmRCJqtAqcBwma2ltN5gy1j/gMbCJSNNntdtqVtWkbrNm84FSmBaYAANCsgTtONsIgeeJnBp/GXTy30+lFV3RqYiyO5YR5+VxFB6o5IcZ7vOjgY78t3UXvV4UhuMo0ngdkV4XYZq6ESGU+45cWQDKMgy3NcyMNrzIkNcTMteWa3WlcxFZtlIpcfKl97/U0Xa9XPhFnlWOp9nG1s7AYLX0kxqkivJE7vUdlIDN24vyCGj5n8/Tae5jSEbOmHlleotn+jHbZbjNziJN5WG1c9qdGVYWIIMHYqjhRTLmU0kw8qO2ue5XSMgbECzqeltYB2SKITFT1vWdsAoewB+5CpwN4H4ttuwdFOmgfYoOgi0bxe+gxCOf6hn+7vZQPDDG2ZC//gWN/8JyraZr9fr9ddSIChM+ePXvjjTdeffXV11577bvf/54SmYjEB7qQH8QEbr9kEcGZlWqawzAMXbvq+51QZGaWvN1uh30PAJvNRqlEaBoAODk52e16oKgIoQNO2FRJAWoulAhUOkACXnfBaphbrVavvvrqycnJO++807baIU0Fu0l7JNO31UToACa46hRevNYpERQN0LgJgDCrYZdI0wZcmXg7XE8zYVkl1RukZsuxyyjGQgTKaez7/vbt2ycn22Hoc86r1cpsQ4RSAslysQelfeIs4ziOYyIMLDwOaWxSE1vlfSmllMeUEksGlL7f9f1utbphdFWTklbrNqW03+91K87OzgqPdlrcs2fPnjx5stvtRErf6fV6ffv27bOzM2VPTdPsdjsA0ZIhRsd0czQFSzfZflX49/qt7aq5LkMtQSki6qGyAFE7YuVNllih9w/DoEKsEQFzXW42m2EYwImUlahOAbcqTEzQknPKPQLknN977z3B/Ysv3CLAx48fP3jw+OTkZHcFAoEBTZOPMTax0fhADEREOIqTwwtzybXSplFUdnaNWnsfc85EWWVLXSwza9t3sIY0idVGaaxE+UvXtikN1YEWrWzsmPcGvXrWqurrTuoW6buKjoeTk1n5e9d1m83G6InKM2Y6D1EQUUWv1WqlvkGs0jI49oeIbduG0KtmYQgbYxyLhjlRUfOvxgj7/nq1bjebjc4nxpBSothgCVFkZmEey24AEoVA1O/3saHt6mwc9oCrjz9+9vDRg65pEbHv++1227ZdSqnr2tVqtVq1Xdd1q6bruhgJUbqui01YrdrNZtO2sZy1UOSGiBLEMQmzIK5DCERxtxsAGGAEvK5IAYjYhNiEaEJF16215gsRQWZ10iPimISI2m471VL3ZEWQCbH2wJ3IkOKkOU+RBDBUeT06vjjxM0UkY3sqLqv3H52twiBMy+7jXFmHg8Qtu6iGdBoCePSzwT3TNfroX6R8ztOXIjfkyTJhKOTlHj9JPz1wzgSeK662tMWHBUG3X9kZUWwz9bLZ+lfLgf24fM/ThLUbtf4UEJEEhEIImUfOsqIYV6e7i/6jH9w76banZ5sbN2/t6NnI+/Z0/aMPf/b0//Z//eqXfp1G+kJYf/lXfrWJ6+/+8IdDgF/78pdxu/pf/92/jW1752xz9awfd3seKY/cD0PCcNmPexaIUTCAiCb1xBhz6oVEI2IFS0sJANJ+uMr5BSMFAWWbJAkBEWMTYowD5RgRAoccRC0ZwEgEwBiobVuKmCVrFVwi0r2IFJjYhITg8qOSq7dpG55S0kYwXsQREagpyBq+gq5YljcBcs2qN3q0OHFmVkCQWubLhIMFOhg4tV1XC7HWdG0i5VIGAB4Y7BIX9gO4BMJyD0xNCOxLOhbE6GHbflrAoS1kYofzZ+1avJS9B+8TXSjTig6wzN/p0dBkOP1XGZXdQEvL39KJxHkpbdsHPzHbNE/HbL2LPUEn7i8O0U7tefufD3Ib7FfbVSPmtj9+cKrVII6O48VfcCCxWJGb8Cxd3M9kWj5kUHc0iF/p7J66ezbhuoTj/i7/xudtyBILDm6Ag7JG9iA7T7Wf5OHRLKB9AY22+TAH7E9Y0QJy/GjPW+bh5+ddAqTFV5JWRxMEgIuLCxXNc86EIYOAMFHUiR4dh5xL4ZAgLGYI86IATdMwBxV5+76/utoNw9B1nSDlnAXyarUZSx/CWfXaJRLV8YlIJAMSuBIsWmqxaZqu627dunV1dXV+fm54Qa6whp/qUdItTh5Y2N04l9w8EWQkAAKZknEYAUUARJhFsjhvm7gLnEi9gJb/1mvTrQDl5tnpuuuaECIRcI7YIksIGENACKxlBZjVu68c01sWzE5KRCESYISqRFnBEs83AUvklx1TRecUo+aqjJo3WGyCIQLAer2+cePGZrNRmVWHijGK5AXB4bk1xBg3EcmkCc50bKuiaQ+Wz7hEVX/01UNeYvZ8hIXnI3isj1FRUKmYuiQzQK1xpSUlCQMEljCO4+PHT/rh6t69e5vVut+ny4thHJmoFSDOYlSLSNt1gZhCmGvnFfVBa4IMowgwQBIW15+WCNfrtUWbI2Lbdmr6DLXRn76KJt9JqHlubCKK7ZWu2jcXCXUzdXs1SNhra1I9FkWgVTO0KweqmxlqUqivW0tEsSkFrrG2PFiv16rHEk2KEFWLM869LIpelfgAIha7AGgYZxQetHYmFF4m1cOfVcvxp6yDc4YszELCQThSWHWrs9hsmZ+OY0DEYSBE7vsh5/zgyYXqROpfVaMGc26aQERtFzUvDxFDwBjji6crXWbXrVer1Xq9Xa1WVpnJVDAiiDE2TbPb73pSOCfJfHV9AXCBiOv1hpnTMI7jCEBd163X6xhj1Dg8LVQLiEAIwChLejeBdcQQkQKov6T+OkF/zlmq0O7x0DR7nb09a4oNznUeccZvcX35FoitZ7pQmYIrB+SoD/vBbTQ58AFi9Z+oxcIMDAZD7IooHMKEJyKLTfDf+z89KQFH1PydhcLCJByzC0k1GWWxCjSFMMNEgBCFWSCDkGgIlwgiC0loKe8SAJw228Dh8tHVu99/Zz9cffEf/VLzAq1Pbz48v7/C9r1HD957789++Ze++K/+zf8YYvMX//E/f+tvvv3rX/vNL/3GV9pV92zcP3z08a/cfuns9MY2njz5+Ud5n6/64cHlxS5nCQExlsouOYXYBoSRGYQRgQgIA6JWTwbgoiGB7r+uRUQk5zRwlqrgZZSgCQkszMwSEQmFgQLErmlX3QVfJGGMGEJIKSMSBQL1EgpbOTLHQICcGgUlBngCrenOPOVnGplDxEDUj6NSFnQ+bSNhHgiN+4tT7JU0e8YDB8KWzCHQHIx2swlbfi0zEAXxf84A0oUz+beUuc4vRNTMXnCqi0kGR19hgGpzrj/NzDf+XR6X/Td+W/wcZiutiKOKnx+5MPI0MSqPufbBz3wxKz8gOWeXiS8mJ/nHbX9sfL+lXgC1+XjUtlcUWle/W4zjd94mY3RVDpRnM/EuLgV1u3NBfGx/DORknqBoSxBHgf3uhdBi1dOckK1kDQE0ARjqh5mC9Lwlz+bvQGmxar+cw+nZgEdP34/vQc7j3eJFC8hc7P9icHvvYj5Hb37efH6Ri5klVs1HHSnCDx48IKKXXnmZmZGAgEpcxjxezs9w8VKbtruzIi8AaMhxDY6ITdTiIrdu3PzUpz719OlF13U3btx4+PhcRGMKYpUap30zEm3zsSXoDs12qf4RY9xut1pR5v79+3CsfMvhNh6FK3AnbvcQRRBCJKgxFwgBhUQQoZGSxcACwmqBes5h+ZHnO3zEmnCwSrAlIEEI8fT0lGqkd0qJ1lM7OITab4YRQGqRoXJGKsqrtzO40qBQ89Y4BE1MUscjIgpkrCqicUbmlHNOiURQXW273e7q6moYBkQMEbfb7Y0bN27fvr3ZbKg62Jmh6zotG7EQ5HLOcCA1iQhXV9JiW6SGihjXqOx7hvh2aQqSBYnYIIazC4uAnZF9UyiqRAQIhFlAJGsZdS29gyREFKQRzPthvz/fPzp/2oUIGEAakI4wCKJmx1CDGjCBBDXJMxCSOuA9MOi/XuT250X1THUheigpjWx5OlJIeN3YwuLlWISL35m6J8TMwEgleVKy8JhL4Q+G4pnQ4qgMwhWujPGJiOaU5pz7vldLpSkRRCWwRXmWKm+mt9tlQpFfrM45Z64jWJkZbdot+tLVqmuayJwtc2ext1iKHhIRNU0ngmPKCCSAiTFg063OTm/cjQ+fQk4UAgVOubiy19tTCwBkziIAyAJ0vRsAgHYDESFNvQM+Gq+KKowxhBBCo3+dnJy0bbtarZomhBDaLm42m67rVh0honpZNamSiNoQnz7bt20rEsei9csw7FJKcUG+RcR0uYoL7PhRAQuVvMvPnEx+hSmlB7E6ZINLPhnHUWGobTuo4UDGNUXE2s4aNOjgwSW0eFaq1MEuIxMLgPDYa2YJW6aRKvvGJE4VsMTJYfag5z14cJkg7jU0PMY4vdBgP1kdCNsBmAvoC9KzsJnZHHJmEDX6ihnwbFb1uIVFCAkQQtesqIWBOEkL3c11ywN/9PaDi/HZN/7l7+4lb7tb69Vqf9Gf3Tp5cH35cPd0Hbox4u//4e9//RvfuOj7yydPvv7bX3v40b3r+4/v769u3P7U8OD8erx+NqYHu+uMESIFDpwRkzRICIwCCCyljjxhIERCxpzmLNC2jhkjaotOIkKSnDOyBGyIiGs9cQx6K2CA1ap9JjnnsaEVoDBPVo+yGyixkoNcq25IzXWZBO4DZiDVWGAUB6piFohk3AMiICMRgmgGAAAMwwDALNVti0IBkDCNM2VeL2un41FVr77vGYvF69CIYEMdWkM+QXTwIxyylqOPewJikPm8y49vmGJoVSWGSXdaoI/xG1uOwbPp5HYP4tL6wzVQ0M/BNrwfpz6o4iQJezvMT3+xWHudxsCYHRRqQKZV1bN98H/6t+g3Oc9MsH7TFltaCM5cnvQEYXEKOn9yaZZQxZp8UHVmesopk0ZDTP2jebA9M4dIniabRLiAMbt8iKbdtgBsA+nFiSw2ZLG95Tao/5t5KpRET+du57gwRB7i4OIbe+nR78FB+OI2D1G2k4fnaKMdpQbHj8zdaW/5hJtFBCEwM3D58/79+xcXFy+99JJijTqLANRMTujME0eJw2Lmy2+0hjkKC4fQpLGHAvn59PS0bVenp3Dzxu0h5QcfPySiQDHnXPngcnu1Ql6AifNSbfAFdhYw29ibN2++8MIL3/3ud588ebJer8caDioyKYc8E1EmPLX99OTar1Gl51JMEiNCkGLsAkYw9e9wvz75ZN0O/0IAYLMdx9Q0zWazAQBmbtu273styWg3V3ljthbEgBgACECYeRiGIpfSjHDpT03TrNfrtm1jQymXfTOSSAQWiNi20Pf9brfb7/carNs0Tdd2Z2dnZ2dn6/VaGatZDInIF7nxa6zpqVNSAzNDtezZrhp99oZaqFQl4Iwc2YuMcFnOguq99nbbZONBReOqOnOpgsHmjLJYkpQ5CbWEKAJIgaijEEBSzn2fOMaWsEFsWQiEgBgjhExVGNEJoNrQaT5/46p2QBYGpdMbazKecatc0+T80ox0awsDcB169Vmp22iIANUqygJJmLRODOc8isYD61mknNg5hCyG2YCKq2dPDQe+hZKIAJD6maiUZomqEFK9bFhbpl9RzplZ1eCkMZZFPeMCyZDHGLdEpM7JCi1ci3lO5JooEgUFctGWgRgSiyBQbG7eutt2H/a7PYaGGobMLAwIF1c784ICQIhl62K70RcJIktmARBgAJKWM9TaoQzQKzt78OhZCMGafqmHMMa4WpPG2282m1XbChc/6mq12Ww2p6en2/VJ13UqDTFzNDypWzwrXieSASbeg7WLaNV2poSTBbkkKrTOnrX+kjnnYRhEJvO/p0Qh+ByD6fvskoC5XjlnqGH3BgG2ogV1XvAqcXYdhW/P1cRi7mVS/3jukcPn6HJw7FoYU+02jSkXkarv6PwlpRGPsvCqooSavihVcDH8J5qYtEbg2hbZ7tlCyAQeZCJKOWzWm9CG6yfXfT9sTzc3b91e3V299eCt//L/+Js/+td/tL27fvjkwenZ2cC831/8+ff+7o//4I/+8R/9swi4H3YP7n30uRde+dpXfmM3XP3NX/zl9/7qb966eBhubUYMj86fDW0nQYC1n6CEEAIEIRGWJlCGDCUtBBGCGn+n7sUsQoIsyCIiTQjIGAhCwCbEhoKUvg4UMEAkiAAkVp+l26yEkEH0z8Q5MEoS4UljZxDLFDa9gkHAlcwV5/q2O7VZjRHNYglGZGYNRTZ2YvBpwffgUsABIFX7yAIRqBrhzNR3eC3Y3gLkwDFRZw4oUvLzBsRfQODwlxb78hvomSU4lmxw63EWnL7hWbJRFeNwnvH4tSz0BJuGDaI3LxqsT+gwJyB+cI+5xlRCE+17QzRxF7sGoV4jAkdD5JiU6ae9OF//usWXC4OF3aCij1+yfjg6H7+Hi8smPL1xbiv1g+ScyRVcXWzs0UvEXgHMYjnSJVBHRErVMSWVYmv1i/KIsHijfcT5pT8eLtZP1a/68MMhli2GgvlhHSx8hikeNvy7PDx/wuv85W0H/sHnTYNZK3tJFo7MAPDw8aOHjx+9/PLLN27cePzkSYuYODdzkXGxOhOyF5tQ7hHtFosANasiCUzQTgB51a1v3LihIRKb9WZ3/uTq6oqZA0HOGURVlLpRrLESVdpDQakTmy8X57SOmV988cXVavXhhx+qrJmsrcIxvPvkTT52yiFnTmPJXM05MyDnDBAFE1FEFK3XoKUA2KXb+L31Bl8PFZ9wmP6sPVxtt9v1eq30U2VoZVKiMRqCOStNICKCNIKjXbpWRLQctoAzTVjF9+KxaUgtCObAGccxpQGx0WqQAEAUrbk8Im42m+12u1mfWBFUJdSmxug0FqRMF6IetsNNWFAD2w1VITz1MHQ/pLTWCE6zQmzk7Dqv2GUmTlN7qHaQChgEGUEEtP8Xa1LMZAgTaIACtoIp5xAjhdAEakWCcBDRGGMgIkZOkjApThERIMzyiTxJ8dGSeCAKmlOu6khMhFZ9RNUwky3NSkKuLkbbNACzTfBwS84SrcvU4xaXGqN/ZuGQIwYSBAwlX3cYhzJbEIqhadvYFrGKiDSry7TuruvUuMC1LqMZ08kCd52bCnHWh6NIYpocm1JDaAVXmDlGAlgyR7+x2rcTEZEaVAWHAobYrdeCtB96BslJWDKyIGJb7QuqdxEAAgJgHlKtA6KIj4gIAa72JetSoySKfQQhNCQiI6diXMoMYwbow5VoQRr1qeYsIYSu6y6fXYrIqtsYosXYrlarOHmvylGVAyMilcFtW3Vm63UXQkCUlAYDdA08AMfVYLKvF7EvhNJ8cxgGD6mGQhOrEKVziBAQWFhyEgSpjUMRhECk/IezYAC7TLuzmXBNYjTKyK5UQ66N5sEpb0TEabTZGvLb+IfM1eJLDd9kblpe8E52/kOaux/hmNSSZUqGtAGxxrjCgfRjIXxstX0ljeMYa5pZzdYraskouB+kQcZ1DJAGHnjMG1iv9usnP3/2F//2W7//z79x88UXL66ebm+unu4e/tm3v/Xg8umXXvvCjbjZUPOZFz91dnpy796HTPjHf/TNEJpv/+13Ygy0iU8CX6EwqFMCRCQCtk0QgMyjABVeTShYYpA9VwYA5JIR4ImLbV2GbMUnIGRGAIQQERIy5K7r1AIkAKnubc4ZGGb7TKSIavQipcRmbtTpHZMPlG/pTNR/nnMeh6Fdtx4MsAp5NPfMGOyRayVvnz1sGEjrNLquS1OHkpmp0kY2gMf/Fi8BayMdmvIq/VOHXHYBeOhkI/+9fwvM9Ua7Qcn04ksDdZnb4233DvFxcY9tKbjqi4cyxGIrjE/447Of0FU/W+ywTcBQNYSpJKYfcDFncSrZoUKrt5mBzAYsRJtm39g+5zxLvDEO7UvCLEjN0cshCpGz6KEjtn73jB765Sxu86/TdRnKGPc5CoFyIPKio7T+3OfzX55LffC5x+En7OEWHITjHMvsp8VxHM7K46MfZ8Eyjs7qE07K7jRWyM5d8Dz019J8tkkqql5dXd27d+/rX//6K6+8cu/+fUTUQhREpKnoC/UP5418jrxGRGwmAKDwicUirN9r8tjjx49TSqcnowiUDoGCauqbdDAhAwfP98vaYXI1L0piAkCM8TOf+cwwDO+//76vI4CF9RxA18EGLgB+sbFCKMXTiFb/RkSc4oGiIiYiIgkfIXdHD6sCwNFjPG4wAoCmac7OzmpOVEF/LiH0qlczgDaExOoVnJWtsmGLbsaT0B9cjTRm1jqHADCOoyaPXV1d1FbV2t6t0a6Dfd+LyGq10g4T283parXy54glt3mKRPCG0cIccRZMUenelIfvJ6+GBiMyjgiwHFAA3TfLizNZN7hmj6YS2GdTOO0nAEAMwoJEiIyUkHRiPKYhlNq5KBiBgsjAwOOYGyCBSBSBUBiypAycIWPpHicgETEQgtoXvI3PqITRf6N7hbUxAiCFJVeV2vghhNC2LbmsE0sQw8pBKjOaOJoRcIrB8tGERRBU02POCMA1YQ2ptKZ3XrhyTFqnxw5d56NVEkzo8uFRJuFbjTpVT7A4paZKP2UHpIwwjllvizEiRV2LZuipJEgEZkYBhw4eI/phUMBoaqijTmB7ehJjHDlL3zOnQMAgICCJqyY5Y5QxRn0DOj5FRKGrQqAQCAgXt8EwpCqoQIwxxLLA6/6aBSk0TJEgAjILjiNuTm7u90PKsNuny6vh6upKEXAqzCoAMidDMC/JRQFCKMKuosGC6BscGEniUgZz+tPjGBzYwolIGPywVHP5pmnM04qk5pQpxHtjg71iQRY9oQFnNkDHZb0cBpXDGfJ7743HPd0UmAez2YCeMR9lJ+D2f2G7slXbTBbk0l7ncRIAiKaG40qjtYZmoYMEQlRqigADIsFqv9v1JKsmxk3X7/YPHjy4utohN/lC7j998JfpW1/7Z1974Qs3P774eH1rQ1fjD9752Qf3P/6dX/nqv/i9P7qJ3Z//h//Pf/nP/+mf/u/+5adf+ie/+Y3fffPBxx9/9Ciw4I3T8zd/ztgRIhEkEZEE0BCqG5qREJAFkJmFgSAAYZAAUAsGSgGMBmFkVuYuxSqBgDKqtSmgiIzjwAEbiAGjMjwMhCjaS6Fov2mMNMuVYmasNhGDQHDGMJAJ4MEKdnHhcIfiiP/gYX4xuMGbRoceim7TBNwgcmD/FudO128WjMpP3lBgIW4YjxQQqSXFPILIMQFFL5+r4IGT52ZdP45XmD3YSzXBeKXRk36/1cF5ojza2mh+/uDIy0K3pHmYt5+kHSI62SK7spww12/xQOUAAGV4BhU2Q/86vwM1fW6mxS2sRfYIM2vfUXGsxa/i8Oz8YS2ozdHzVZOMvV2cwgwujkinWnizaLTLpO3b/eAoHgADiNZ9By32py1gisCgBJBBvYVC2pQKlgkOs3Udmb+K4E7Udpt/RPhenA4c4MLipQtF0UDRW3M8NTB6DrAkLIdvnK8C/PjPu54HWs+71I+hoRa64U1s9v3unXfe+cM//MPXXnvt23//957KLd6ygDo4ALzK+AFKJhJLXSMRppS6NnLmnFMI4eLi8mcf/ISZT7ZnL7z0skljiCFXKs0Zaha8+N1Gk08trFSDGx1tDygnJyef/exnLy4uPvjgg6ZpVMQsWDxfmt/qwz08ilb6dhUcCrWJjeSMAEBRpCicWIkRIvC8LQrMcdN/WBA3/8jhUzb/GOPp6akZnTS8cxiGtl3VY0UigprKXhcSFCRsSkZwIE/z8RJOygOFUutCp2HeQhFJKWlnwv1+r3Uluq47OTlR76X5puwsahkbleyn45BKnHU6BoSH/3qw9D95kz0zW7E9PxQAWGEbcFU6Q20sAXM+6y97Y3kpIzICsMY6AYBAZk5938fApNIIhRBJpBXgzCOMKtbOKuczJ/WHMzMIR4wYSEgyTSWp7e1ezPAQIiJcKi6UfbDzwur6029i7fomIrb/epVhARRCoGaHwdzbYd/LPG7TJqa7SkRW74drDwlwDhhTtkXEZqWMRmMjdW6qwHNJT2t5HmMI1dafUlKy770pZV+KWWrVtqVQreVbeaLtVwcALOqTIGbOPMYYgUgk37lz5/TG6fmTx5JGJImx0SjdlEZQ7VTfSKSqeJ9GnZI5M6V4erXqvn6DRJk0+LdpkTlzYpE+jTKqfi1Nt2qqeKmau4js+z6EwAxtiJlRICK1nPMwSvSCGiIKaJklNMTTK0S0YrKFmFbRzRo22E9YVaaccwhkn00tyTmrscfOScFuQfI8LfN02b9IP4orVWSiicdnIwFHOSJWD4zhNpYsT8Ywkz790owD+QENatFZSQ2I0YnUC4rjCYpiZql3VDGnWN2a6DmfTdhvETh+oJ4WE8JCCAKAulhg8fIHskAOsAVsUu6vYY/AIonHLMN1vx9v3bx7vn/y3uvv5Y5/q/v1O1+69XQ4P92sm/Xq4mr/+PqywdWzp+e7J8/+zZ/86Vd+62v3n338ZLy8/dorlxl7vLz10gsj1g0UyMg55TwOIQTJWXCS7ZgFVAiEAOAb/gIKBCIMkFWmDKVmcdu2Y5QEuYT9pNRzzwFDo4QGMcwiDIsVIM1sRZPOL8VMYGDm5Tyci/LMzCkr3mKNmSlh1U0zptEsDnCgV3hk0Qk0TanpL06r8RKkRwoA2O/3QsFeIbX8l8fHT2ZUAIBwRLq11S1wx5PC580qu4Y/5GJU/D2Lt7DLE2bXidH2fHGPf+lRtPLs09AEjpEXT9AtP8ojL0wSyUQHdMAsM8Hd8HdKQ62v8zADzlfpN9nvSf0wyR9wEDm8WCO69D//ry3kkFIF127ETG9yUIZhekomEQrmYoHMY/vt3LnUgynIpckqGqbi9r9U+bdNtHMXVz3LH66+NM+rqh5S48X8mVnDC/1m8tzOsrjfH+ICd8QpCYvv/Wn6Gw6nBHOmBgfcwZMgc1McPZ3Dy0QuO2X9/ihJ0e8LR6YoIpBLMNuHH34IAHfu3CmF46EI4toH0kPgYmSDZP239G+YllD+L8ZIqCb/qEklOefz8/P9fggBLy8vT852iCWMre/H2HYikz1iseQ6k4nq+m0vyAsMgJvN5u7du08unp2fn2tRU7JcDIAJwN1mfvL+L2gsJ855SmGq+1BmU/5kRmQ1gTDPNPxp344FQx6FpU++YqT12npVl5QtP22ioJZASyqrkkljZJkZ1BOVs9ZuXcpXmpRleXpVIZmovSqERCSStdDrjRs3Tk5OrBWZxrlRbUigZUJCaFQuVSodXIsRRFQFCeaqKR2cu96jkZ80z13POWtOv79fL/VhUpWt9TLdyXZpmszcCmOPaO0Q3TwW9gU49EPKOWalqJBGaaLKAyxcO6WZmCSc88gMCEhEmu5iapWnVF56OaB1mroi6l9VIV9E2rZNaVQpVzdK4QERlQN7Ut80jSmE9lMlrEVh45pIDy6M0/MOhRntMWNwwjWsL9fiQPZvPeUS3Yq1tLuRIA1FNA+WVqaheZa7fda+hWbT1xzCXEtgennMEHlxxHo1UdMcQNvdMxMzZ8Szs7PT09Oua/YyYkaKCEIsuaEQKCBiFmApKa8g0sSo6UsBKagxFAAEYmyd1s3MrJH76tssYeeF5ocQqO/7JnYhVHUGiShQCGkYiWISSP0gjAKEgRggtoHUJ6vHFsrZDCGElLLU9sScRBBCMzk35aCDsM7ZpFvDMTuhesAgUk4rlLxe7QPDiAh5UBgKFEIIFGJKMI4jksQ2qP1ARCgQiKQ0opAaFEIMMbYAJILMEkKjniWqBWeh1tQCx2L1A7seaG5DNQJeZY6s3CWPLJlDCJFCSgkEYghqxVdyGYC4qovsTDUxxtLfQACBQjX8j32KUR39QgRN02Qe9/s+ZQQAYd06BMAQ2hAAcmrCVIUZtFGkYBoThRBCVECpGy4YEhLERmMAOCUhokCEAIFaEOLMzExAITQkdA1PEIVQWmlaoBCIQT3pw+Xlx02kU9o8/v7D7zz44df/4Bu/83t//LPwZ6uw2oTt+z97+Fdnr//2L//6v/4//B8zP7u4fPjeW2/87L03/+Sf/nHzj/7R//n/8u/eTvtX//DX3/nLv5crvtHeyjvuqDuJN6721926BdqlMI405izEoeEYEkif6KxjTklYY65JIANRDk3sNozX11cbWHeBno27vrmEVd6161VoWt7EvuGcAgHSXlpsz3KGHmW9TluU2OddjwM0IkMW1fMRm9AY0UwpAZKACDNVQ6CFRgCL1qYSEQLEGoJvfNFAKEADAGlkQKvHlSqrI+YaiQGBJaUxjcMVVgsTcxZRuxSY+8JLwIiYCQIRi4x9T0RN03SrFboQaKkt45TXesJqfAuBcC7hYMnmpGA5plr0hxGFQPKUWyhAiFTs7wJVwTCSrdP2ueBe8LWFmPmtUt7JvVa5WkAkJfjGWkwJV5ifq3yIJRzaVFklUFM1ahFgFq3AW5hNlPLolNQhIgJE2QUXqT4RmkZ7p9jRG8fyJNHz5hCKx6CmIujtOYRgW4IIMSoLZ5GgXjdlbJVQt5VqmfiCiBRCwGo25pocYjPR+mwqpUk1OgQk0HBtxNgU4Z5Tidu04zApp2lbL1uY7YxcCIOZDlNKISJkyHnyEBrJtcNy9DbkVNObJaM2iWZmSVp9LkTU/RHImXPmUuEAnU0HqgC9EJf1LDKX+iCCUKKVAiHrbhOIOF0FFuBq30gtfmPfG2Yplvlz958Nwv2uekOhpyH2wRZoyOKtCR6bxMWnGQ7atqCzICwQ0K7EozKjQXoiwqbdj4li9+xyt9/1v/S5z59uTsb9QCyIeR2bnmfWjcW1UKehVmcBXFbxHfo+hNA0odcKgU17ue/btqXNC4B4+4UXwnp9cXFfaJ0FBTXUrOIXTTsAzBERAoFUUoCkiawYwn5/fbLe5DQSEUhGxFs3T164e/Nbf/1XPKbEOTZNzpIBs4TivCzdZwERSYBpcpXYcQCATLl/AAJYcx0i7tqOKIRxHHIpIEYxIvMIglq0DARAgggCInMB+0r99EUCgFU8VnAqUbL1hukRVSvU71eCbJm1uwDLcHZ65+x0m9OQk6zX65QYGEK13QRSDQcoiACw5LHfCQgFHoYh5QGJA1EISETD2IdI6/YEqjwaQmhW693uasgpMSQGhiKan5yciEiMcbfbNZGaGO/cvokIGr/adZ026S0EliTlQWMRtXaLFaHpmrZddev1GkkyO/8qIoZQsrYApB6AePOKNh0BERBEyMKIhIGAMeWMwiGEgGI+BnCYm5k5SYaMUWvPBABARklCQEQUIBAQAOSU1c3Ytq3eECCQEI/MI6e8B5AAoW1X+z3wEFvaBGgaCiKJeSARHmHgEGOMIaT9nsgypIgw5pxTP6SeEQMKoVIMApZRcmqaRiMrlaWztrikWpxMSoTUBLd5JCzflYxQgTY2kjMBAhIzj/2gh9vGJjYFxkAyEGiNBSx+ewAQZrUqSgjYNE3OwsySORCdbLYiMo6jUAhtoRjmCtKDHva9RvMKCKccKYSmTSm1sSGkSCEgSeZh3+uDFJrdsMOR2iYAcqSAEQmyCARUlRtjbIFwGPPJycl1PyBRn0YQFECgJmVh5qZZG/5qYQkEjkG2223f9+p7NAVYW1/UDn4p5azWjZwzxDUKhoyMMTRtaFYoPOz7BsNrr7768YcftBSIYNz3sSUShFXROChQrj0qCTGLhK5JnHfjCDkrPx3HcdvFAtHIQFkEkwycsG1bbSuqzTZIQFggAGVJaZfV5aZRuzlLyCFQzkPOEmMbm1jsziCxnuWMdyp999FfKmtqS0dj4Z4bHTKYKjHMTNRQbWDGCRaM0w/l/cspJaqNbgygRQSnd9Wg9onpTmKKcUS1VVjALtXCm5ZXbZzVM2bPcXX+XhQQmYS/zLnWkp4tJOfsCwAuuDtRGSHnrM3fqjFG6/eoVCfMHJyd0qbBGq8Yl4GvJgrYZSKIviKlyQqoN4TSEUVAe1ARolCIcYWrEVIiEcnDODx48OAH3/3excWz3/wfXw4cYoyf/cxnXvzsyzdP71zCxds/+0mM+fOf/2JG+Ltvffvr/+gf/8s//dP/6f/0P7/yqRd+7+tf+85//XtCuHn37On7F+fXT9sY98NOWmAUzQ5FqYaGoHK/IAohkcoeDCI8jhkCAEDf79Y5hja2qy6FHgVTSlpjSQAT98IpYtu2LcbqCOKZvdaDq53IoQBnAOMhxJvePRJ5cQ2q792fe8UgU4EKVbVZeXOjPzv/FjtKkwXtV3Y9SGxdUnMXJx75D9mYxTFFD11+gQZjhBBcUTg/YXL+MaMth2bvxXsPvzHKY4uCCv8LPKVaC24xzgIj/FZ7fLHJL/RSoyd2cIcXuvxkW3L9fqYqLIDw8FqAotEZa5vhCa+opUwtBXmKo/OqKVT9sMQ75VnZT4OZMK/97c/dg6htmq1XP1um0DjOrANURc+u63LtyelXEcMKXWgW1k7ETdN6wPsEODl6zdFqWecAHfc5PEr/4ej9HjUW2GFvX2ygH+oT5owugwPnHPbobvjxF9NekI5P3jd/r779+vr68vJSKxCMwxCmGmbH3Z6H33iKsbjB8NTbINRk3DbNfr8/Pz+/vLx8+vTpMAwaanh0XYjol7VYo0c6ROTMiPj5z3+eme/du7fb7ULbxBBEOKUUqLHHbLYBkJ+zbf40F/tskySk+TckUHcDZmVa4cAb+Qlw4l/qSa6/QarwvdlsSkdBmkJdsIZo6QyrOXIwQ88hVTTCYlKy2hyx5pUNw2DFVzQ2T21A4ziC0Gaz0UKHIa60qo1pBeocHoah7/vr62utNyMiqjc2IXLKVmkGq/jqF24bvgAAgy6lb1w76fm6DxqMdwg/RARhNrLfVW+CAec59MiIRdpEo3sWYA/FnTUJpf5kPSDZErSoD9U4pupZWXJnY5fBtVWbWeJkeouxP679vf3G6hrVzwYAfrZ12xFgCkCo9tmyD2auNSiimhMIjmqN46iGS12j7dJCzvGsJLhUHd2K1WrV9wNWZqcuxxCCglbZYVE7sqjK6tlQSoPqe13XWUEdAy2ex1Qu0E3BPoQoVewPgbQZxsnJycnJSdZypiyI2HUANVJSRDqRDBM8EFEEEGIgVKIXQgg00f8ajE1aeZGKHx41D1xdFiEE88oiljx5EHFdNIpAqNVJolTJyVAFqjxhnmIAMKQ1qPLwOqfIs8tsWnX7yOrt1m+W1RQn34MzHnuxA6uIGUKoGeMIMEkeFTimmCWT6nxYmiEz1fTfornN9QTPw9T4YbvkqYM9gnO1Qb882tPcYU7Bxpzz0WJZuiYiAi4eAG8JVtf2IRHEKhfahtirPScwANBQ3oxVTQHJtZZPE9qcBSQRYEAad8MHP3/v6fkTePFzX/jCF85OtrdfeWl79+w7D7/3vW/99cs3z/74D/5xZPjqr/7G+q23nz188oXP/OrnXnvl4aOPfus3v4qj/Pn/+y9Pw9nZi7d4J9vt6f2P71GO6qggETX5AglEyGkARAy1JaAUxtznFCKKyNV+d8ZbVexHHJCRmRmYmhYBx35fRNWm7FXiHESBoahhtm92rI6cTc4HDzz+TttSTxo84ykgREtiDcVDpRUFpkh6P5oHsAXpQZyBkxEswxqvporTCuyDrULmjHMB//69dT5w9BKnp/n5yP+PtT9rmiRJEgMxVTVzj+M78qjKzOquqj5n+uB0T/ccmJ0DxMxiQVnsw8qSL3yhCJ/5nyhCCl8oWL5wSZHdxYLkAAsMiMFgMFdP39Pd1XVlHXl8md8REe5mqnxQM3V18/iyeyB06c6KL8LdXE1NVU1vm58j6ilQFmOhM1pwvvV64M3iNSD9PfavR6OHp0Gy3blEDiw42pAsIqEaYMZQUAWF7WGWUtHgs9mhb0OpX2jLklrirZDZohG/n5034TRUOIxpvrIzPNufftX0cYXZIpBQRZnlnVaDfPTjGybZZahaJ/dGkIKrL21Wn1xI6igJ+Vnbl5Ufp+2zed1tVzMIzMkD5gRvt9k3SzJezvTo1TzVzNcvkP/VdkmPh2ay7Pwp/oNMN89+vby8/OSTT958883z8/OXL15oIDSlJBT84J5OXgH8cu5erbdBUkqB6HA4aP7hbrdriKT5sMTG8r36eAhBMnZd98UvfvHm5ua9996zYYkIMhdeEkVJuRIIwHFWbfh0OUEjRK5ZcDP4q13tU/3hFgLwM8X57tDIwAYzfd+fnZ1pmXqG3MAsIoCqEUVmHseSVOWz0O1d3okGlffVJgyhU5UaETUzU/MtmTmEsNvtAvWbzabrurOzsxDXBvA4jrvdbrfb6UEU+q+dO7darXQ0rOVtJsSKNHAOF65J5nyMJAxXlrhoWPWp/n4H6bs10Kx3g1GUbdxWtGZ1KHa5VSgH3ENtXKnjM7NGvA02mac8mLS0bExwPbpNizMaNmIwSV6TBtFwmHMOc/XbXioyUbI4xPoaeKjJqzg5hWdJMVRTWsT5FKDKXk0+R1cMUrwSteJRAVbe15xhQ4vtgxRn6o2IdF13enoqcqV+BBa2cymGYUhcnNGaXqlz9bBB3XRijKenp/qsXxeYM7vXAEUkafOeUJZsHMeO+hgDM282m/V6/dydaxC7CHHqe2KUkITVt6JWmC4ZaL/TdHAWeCFdFPJWH4AgChIgUgjlXRVFhUZi6MT5C7hWz0a/3bqJlW3Y9n5LNKJjySFYPXzLLWrObzOFbHkhImFRaew2U8XA9fBUpHRdl0c78HQW92iEl7Glb1hkEKo/wOtttjzaghlmgh4bmWjAi6ZwLOS1qCLi9giZawn2FrtKIWxSNlaHfSSicT91BfRYyjkTTq/zH2C+K+jKWo5HnWYuef+h03QcEMkoIikDCGj9LkIGhNDHVZAkLMPVzZ/8P//89L++J2/jf/jL//jORz95efH06UcffHv19RWe/OBnfztc7X/z2795Peye33z65psP+s1v/fTHF1/92pcvnjx950fvP754H8bAPa/OtofDgEwIBCKEKEEYOWNOYyLtpxwQAQkEixcTEUFNeCoJ7sJqNRIUkxbUDmHMkxoqkgG6EEKm4v40d4DxgsewMUyDc65dZMTtx0tHwDRmHQpc72a9y5O6yTUbU98V46wG0j5kzcZzXzYeDZ0dz6uwjDhtjp4yG841EkKncHi6ctKsHcQ+yzGFqRnttmspUvwI/sUN8EcnuHydOK3LU8LRWYAjDxEBmNbCP+Jf4RdL5lsLVF3qNpFoSok5KaTWzvkJGngp5WY0T8z+3+x6JnuUQtXzvCSxCXqhJ85hZ9NH1/NNN/oyJpCwNmkEZhhqRYfGJRDKsaIYp/faRBq+w9ttpCWJwnw7EKnFig4/ywF/4SVzAe5R3SDTpG4D3i8zPril+WWYpVl3OMYmDVV7HkEsXTixanv2/dXV1QcffPC1r33t9ddff/fnP8fq0QCapc42cDazgLmDxtQRqQZSM4iqzrbido+469UIAYd/GwerAnB+fn737t1PPvnk2bNnm82GEcZxxNiFEDQDVBOIDZ5f+CKPc78KFdZJOhWnFdp8a2gltH4KG/YVswMnATy9eRZGRA3K2Z3MpYsJlmMkUJuLqlpPhF3XjTw7/9aoxcJTNhQiakQihJBSSenXtDJjMd2JYuhXq5XaexRKYpTmhd7c3FxfX+sHVSlXq9Vms9HjKFarFQGKyHgYBIGINNG0bHAA4npxF7DZuepcei1CsSERIFSnBggwJ49boxZwVOfRbh4xH0vwvM/Og5lSYtYDDJNIeUStXGYWCfZ2e1AFPzvHrrGPD8naG+tQxfyzbUKjZAYbuZzzhlahyP8jXazZRVaa7yvYIMI431YaBtQ79U/dg+YaCxNZnAZESE96yNmOiVIBXlRWiiXmwRlSShEJEU9OTnLmm5ubnHPoYkrDbrdDCCklk1dYXTQNB5kM1xMsQKYSM88Cfq3FWVbWSdjPMQYc0tj3/Xa7ZRHIzAjMHGi1H3Zqa5kfgZmlWJVIRLrEKvoIUGBUQ6BR59IAVhNUsgv7EEKA7DOVMOfMCChYg9g1CbQKmNKC0hwbSnwpJ3Ld9jQ8iIja+6uRRLbAS8GBztUhtTuC0QEAaMrcjH9EhGYbIVb1xb/Ck6NJW6O5pV5uPJbnPQ88/Etd3AOwZHL/jeOoGYfY43TLmdEoimctiNKmL7NuSP5mqWaDUaFULTbnDDTLwatgz3JRTC8pLhkqDXLUEQIAPCabr6hPtAiCLnQxI2POAREFOQNwimn97//ff/nVf/Arb6QHz4YnCXZ3H5689+LjP/7Bv/lffe2P9sN1D+sPnn3459/5j7Cl//Uf/ld//eb7/+pf/atv/eY3ui5852++H3H9fP8s73AbT4kjCBKLkABihjFBJts6WTBEAQ7YiUiImGs77H69kiTMHLoADDFGyqTV59RFjarGvsO6BEQEtUMDMZlR52nGC0cjYx/Bg/kOYbtOI2FdOooJ9FlEyAbxK96svmeHxpul7GUweODZdRIypsjzEsQlDS/pzYPnWaNhLhEBd7C4/WpRKUORPc7u/CUDqYHEpnN0jkdBali1Gcf40cNp/5p8O4qlZhAASKmcGynz8s5Q6/Q8BqxTVwOJ5XUsL5l3tbVkJC/H/JJ51QHm1GUItL3EbEIvORvHR8MaPI98Qt1Bc71wfmSLR53ML/OnqLNfwYuxt5eiU8Q11bZZL1jQLVQSyvVc2cYYzgt6+GUuWRg2/l0GJB2L9C4Zx77/ewGwfEoWe5DBgzVN65cc/yjA5RUAADCO4/vvv09En/nMZ4ydYc6GDUhHh7VFMSAbMYLOGmRmYQ61MYkpu3CMsGf4OZbFwFM5SVED7t+/f3Z29oMf/ODy8pK6qG3/QogAQHHqWuzn8wpz/ui6mCjWmhG/TDpY+Wz0fKyZk8dVQ4r+Xc6NNbEJ15M8QggnJyd93+ecrcDSlqM8C3kcWaTEzWIMnEAYEAgEEQi0kkRQvyQMCMScAQEQOJsdIsMwjOO4Wq0QQtWXgshA2vUxw7Nnz66urpB6lScAMAzDzc2NponqI13X6VkUelh5SikNo1Sprg32vCJH850LoIReTdobbdtCGEN5pc4TMyJq5Z6J6Eq9AaD2k6zBN728Na4v0qTZcRztLhFUpcvyI3LOiEEkGzwA0HXR8AM1Q8T8+N6Z6E21XIutTERbjwwFBkxu89SQHBdasZHfJOKCwS8ApNDaN1JsDLFtxWwKnnvbzfixxar4B08PNhG1/BU2dTpwjRLrOMKccx5h7Lqoptdqtdrtdro3XV9fr/oNzAW4Fz62W4V6nqE+GGlmbhj8zRLbThe6Xu0uROxqEm/O0nW03W7v3Xut694ZDjsAGDmvmA+H3UxPq+Sk61UCvyFoiytm9SApDQni5PlV0zGETn0rUvuzpMPgdH4YtUwRSrvm0v8GhEAQAgBEct01se7lJJPS0HVd3/d2uIdXXDyavAm0JCYNy7rApVT+PbIwTZq+/pRzJleBKtUFIlwfr+00qRZAzrMl0YhJ7zfdRelPw/fs/PF2v9+i7BXaJMM2GGOJLsbk2h/bezVl1P6cPtRo72SQCzdMa100RKNnc9wq/Lcp+h4DGkyyZaUpkbr0kgohjOmAAFhfCgLMIoRjHrAmG+QsDJFzYpZ7m9c++uEHmdLqXv/g81+45nSQPBxe/n/+9F9fX9y8ef7Gve3p3/zNX/3gJz+4++juDz/3vd/6ld998fSTP7n8t1/95hcHPPzHP/vJNsbN+bnsIeRArEXhjMKMDDggdgGF1S3EWTCwJIQAQlo0qj62IEFCoA7hUExZYUaUiCSBAHI5T8baCCEwc4YcJXoj2fjcarRkrnV5L5c4z5DxgpeesLBwmBlxUtT8ULaazYBYFU37yZZY7+fcqiDBtaiFeWctfxs6xaLhOJx79z08lZYyLEQkTvdKA6fncT/lVxPt0Q/L78lFqOwnv3bN9P0NDUhWc2jM2+yRzSM+i7iZtXc/2Q1ELUpffdmrvRvVT6dBS0DLuUdmZqntYbDsOhQxEAFKiQXRrEmYn7tXvv2LPOXYU8ovTiWdGfx+OfSyDcVs0ZoPVkKL9quHoVms5k8Pofi0sblnGhxX/sKFsAX1yG+QwPMw6StW065mLv8JNxgwDdc0Aqfh3KNvmXE6iBRRI82SPX78+HA4fP7zn9dWAuVd7kXLYZdzoXnmBSwO4fCfiQjq0cFObZgSp/2s/V+ymJqXgaozrbrw6NGjk5OTn7/37s3NTezX1g8zpRQ7c2wxzmA7vi6NX6ZZjmputchHnA172xot19Rj2OOzLtkMmVg1mdPT0xhjThLLCYFOw67d7dV0EZm1ljBJaPvdUOtIpe5Q9quOMwzpcDiomqH7EdWz2ono+vr66uqKAgB2fd9P9l6tNjw9PdWbtV1HSkn7yuQxmQCJMaIUVdC4z2fQApTuaCZ+TU3yKbv6U9n9UdpZY+lg6VTcADDtj3BMJoBrRC+lmnrUCKGVVvrkPK5XQ2CmB3pGdgGVCf+2I4ATSgaMHiBBtckc1xRNa6TX0JXnKXuL1A4aRu168mFjb5dGtcY+9TLZi/N2CR5gLU1UVKhfQGG2rrBKYNa07DCWgkPORW3WKr7VanV+fn5zc6MtHvf7PWG0kjcFrAaZpo2s0nmwbqtmyhq0Jkn8nmLuVBGBzIlUMSgcxCLM3MWwPTsNIVzf7GNApWeB7MfHjKKNeupCiwgy53FU+wQpE1FHdt7J5A7oAGIkW2W9rOUKUWQE7VCnRnfOWbDGlrKupnakmddKgasG0ai0G3QWkfdo8qJn/iswixE8TApcq5MVpppT+ZxWxH6q5EgpFWtK67PtshnBYkOyl/qbbb/RuTsOnM3Ryz5/mxciVBsx+/2g4TobkNxJRMqr1j1Zx1MYAEDJhnCmJBm0fgnsVxFhzuZxMYBhUuAK8CodPPr0cKcMAgSCcMipC+WwzoAUAgGFlNLwIr356HMXF0//9T//N89uPv367/xK14WRh0ePXvuf/r//8re//q3/zX/1X58+vvvsLy7O7p/vXu42gI/u381pd3qn//Xf/trF9fMXnx4uP355RveJMUJkYRAEYiYgxhACqMTRaloWVQ2AATph5t1ux0kAKFCXZFShEIQCEQTInIUzIvTrFXUxEybhaGlRUjYGr20sScJLeXHFbw3xNxxBc184VgsBYDofRZ8WERbzXk9C2TOjwgXOkVkcH5VhPBNVkphuu81fM2O9Y5l4yw0GEZdakThlq3nRURTR7bELm+YSVKl+GVxk65l4gblyVkV8G79tZuqFO86vBnKZizt0m7FHkZeTNiZR6YLuh/LCYXkRBdMVsjtc3kKFDb9bBNIDj7XJDVb/MdeaWL+TLU1f2+rQiVwPtv3UcI1JFQ8eOPaZFLiq26lDKqVZ8rzMzbnlotyGOlvxo3Ton23WtLkaMvNf+ll7PMPt5pwnP6Pho3d6mXMbSA1iDavNCF6qNPAfAQwELH7lxo8xPnv27Pr6+u23395ut5eXlwWM+eOGkOYVzdvtX1uj5UzL9zXAxa4tbUPb80EQsVTkNcskmgaFpOrPer1+++23hzS+//77ZeE0xQaRZ1u2nqdd33X7cR0NLaGTh6ZiNcixf232zbrYDU0GQfOWJQJx3ptEuez09BQhqAuVaiSAayyOKJgNY1As8ewpyh5XNVTtN2bWxoyHw0EPFey79TDuu64DoL7vAUDpp+/X3arY4WryaYmaJpraNqFpwyq1CDCEoHmk6/W66zoU0DKzCdQqZwCmc1PB7REe+d6HZfQ8IVmIcSI8XWrPxVVeUS1XVPmmcR7RWI2+tzZNzaZy1IjFRDl15Kmb4ziWbvzmgeV6rNTSAyUianZm11FMP/vOKF6B9J3VwamI5M4P98SWczLBVe+c6lxkqinTF6Wcp7Mo7BUe7X7iFd6St+J5J9SuOZYqqN9o596Ukjbkl1SS3UKI9+/fv7y8vHh5ZXWAzAyGbZZKvWgMgjX1xiVFztKAbRaNI9ikEwmwiK6gTiHpwdcYYiAtnR3HESmKyDAMsSuDe21TWcA2fY+uDCMRJd3dWKQ22e77fnfYHw4H9fJQPVuhD2q7EcYiJdQaDCGgpkIUNkHU1kTighIKgTcbsAZPuVT/T1njy+VsBFZjW8tszwhN9z8R0SOJCQs/m0Sz+JWJaUVZiaclE6VTayMqBanTWnrM4rwGr7Rfr8XcZj4Zk3AN6RrDhBAOh4OJSHBeqJQS17bR9t6i/c91wcI/MG1vhWlJjCBqbaFSGxERyEQllmzQYrJim5k5ZbQydUAomyXk4nGYBH0ufcD0IGlhEUaQ2naFiIBAWFJKmKXTZqSCwLyS2B3i9SA//NMffPYzj06/9PD6kN/95PH9zz34u+fv/vCjd37/9/7o9c88ev3Ovbfuvf3s4sOb50//9/+7/+13fvqDP/+bv/nt3/3Wn/7rv+Kd4FVCFCTpRHswESNA0A5EBEHlcNmfy4r0QUT21zf7/V46AcBxTJtyZBzFfhOCDPkwprHDjkIQrGlIVILDEWIHnWZWGMl5e48XpYOy8IwcRb4hzbNSpSjfXszU4tn4xj7gdi+jQK+jBwrstorp+xCYp/zAhkgagG/73iZunGufG9VzYgVnCTfaAzjv5kRUdYSjMOAxXdwP6NnZZmqvxnpYrTE1zNvViBPBMFcU/KLDojepTcHG90gz5Hg1sUjh+am+nsyOLpCfuOHE/7n4MFGsXxWpShsAWKqShXqWCutSsNhoMpfD+o12RQNn31o2h+HcG8xeuVcsqdrXdStxeW4Oql+gATeXKYh+FhV7s29+If5lYU01N2BVjJb2mF2GMc/mt3GlH7l5aUO6zffLMZd3gleXbzc4m88hBO3z8dprr61WK1XowS2Ef8qvjv8SnMPIHle8DcNgtCQuLIm1QgxrJ3DtMNmwqhGwg6olEmbuui5QEA5EtN1uHz169PLly6dPn65WqyyIiBkkVtXCUF3e8sqV8mqxn69p4TqXBfMakAXUo5k+r6YTf1tFXQFJnB6lYROperaIKUWzM2DAdSgRmYobPXkYSMY+tmTq2VEDbxhSzjkgxhh3+7zdbkPQo4PgcDicnJycnp6enp8p7w/Dfr+/UTHSdfHm5krNy5xRjxAvr6BAFPo+rlZd30ci/TUjrhDFGgiTaEMgyDwr63Vk1TYJKy4kmjCpseG5a2nmahQRlf/oDJhQj3HXQLT1W1Ys7ff7RrLRwkFmL1Lk223iejXp+XhG4aap6okIRrQ1LFnyKnUcC7Wh26o8WYLb7wwqmDO7PcXFP5sNJ2E66wi8TmL/ajTYSCi7enhyyXpcE0yUcxWramObrPCDo6aGVfjv3LmzXq/TswuLsjIzxmrIVYeX/plr+xZ9b19PVzLzzxOM3uCoYoYQ0yGLCkcsQpt1H7v+9PR0c3rSdd16tdHDFTnvDc/sHMd6DqctmV1SSyEAgJHQrcLhcDgcRk5ZQ+7KVqlfmx1ejBeKMcZxHFX3DyFosmiRErnWARs1i0ytmUK9pB4cqc1XzHY3OrZv/NqLUwK49iLXPqcll11xwUgYIcA4jpmTHkHoJVRRguuU/MIY6jUmHuqp8TFGiz7bFDRvoVnFo9LBltO0PXtKxZMpEESkS1Z4AKYBram6zEMi9lIiSoPaBhNpqvSYiy0w7g2ASdgBUyim6zorUTBgEHG1WmkaBhGRFdTONZi57VFIGxFDICBk5gzl2FxE7GMHJAQgGQIE7PDq5UW3wvv92cuPLv/4//Y/f+sfffN3/ui3nt18/GT38ny7+ed/9sfw+/C1L3xFEj9+8snuow9+85vfDJv+/MHZzXjz/R/97B/957/7P/+Pf3p5uRPq1/16d7XrKIQYr25265M+jZBFUEAThhlZGIU5xrjb75jzMCQeeXO63csOGCTKarUiVo9LphgC8HijEkGypG7VD7shqlBGMGdbqidVQu234RV9L5eV97w8lakF8CzCrJxS1QvTabwKXtJp0lh6sukjlqDiIifZRCHVpHxE1LNGDBgDcr/fqzhWyauO2/1+32zn9qwRZyO+2fXOmeTDvGW2Nx5wriB6CasfVOyiO97AQ+LZ0HOKAexrIbg61bIrqJOqoOht6mnWmw0/RvAmGUxoWlYJzyNj/jasqq06UMK8TEJcbZ5fFx0npRFrl3CuHbGptjVqUGFT05vtFejOCrIyRYVcXUh53upTwfB3KlpUCzdBt9zePAYaxYWds8/QazLZzk2yXdNn+8Pc5eHFoz5CzvldpzDzoToAYH7b5EQ3vcc/kjhpXozRZ3DVns2UwVm2DVY9fZrd62smm3X08t/f43mwYRz7qeEOA9KYyBR6nie0y6K7r43v5YD/F4SxoHQmKEIIl5eXP/7xj//JP/kn9+7de/78eRFxcz8CHAsd26L4ifincj1oy0/QbjM9wRpj2Cz8+PUns+ImZ5xUpXMYhrhad113c3Nzfn7++c9//r33fv7kyRMg4jHrRqmJXuwMuQlmgOw85jYjqk2elvRj7MDMMYYY4+5mr39idRDrf/TzsqlMQ41eBPkvvQBUrxBU6a23aXSCmU9PT9VU22632rEyl/KChIixI+HJgZ4zW4AFKquGWpKgxrka6qrCKmb6vteSuXHMq80aEQnj9fX1er1OKe33+zt37rz22mvr9Vp7kKZhHA8DAa66HgBAICB1IXYhAsswDspZXdf16369Xq9WKxV3hR8BOWWe93nWWZfSmCpI+76XWvFlvJzr1XUdq8INQZw8Vwmfy3nxUyl+cAXnXmpp4uJ+v1ecgMsIxWoybTab/X4/juN6vSYqh37jPINDAdbYqWXP6qtVToZ5SxvbINiZc57FDGB7NtcG9akeFJyLyTELqE4MLlpMJH6cEAJzyHlk14tE1RglEovvKWZUOPvtwzhUixJzzlpA6DN7/eys11TiPI6jNqEdhmHTr/q+OxwOInBxcfHw4cNnFy/feefdEEJpOjqms7MzRIQSy5WcC4+EWtZIBHqkQkpjF+M4jtbSlupRGbEeX6eqmulpoUTkYByzMFvXH1W9Ts/O3njjs59++ikJwMjr9Vq49/tp+VdkFXt2jbj1yPoBUuyiYjXGSISMoifhpMSa5k1EgvXUihhtFYq2zyCyF4QYi/0VQx/cUQuzQg5wO42SBblOM54TvOLlJSDPo9LGS27DmKIH+oEw6sl7UJTpWAonF61gYSFwVe4bDP4KIRD1pnA06pERrheszetsdkflsubQVyUJTdfBCbCps6WN4JHj0WVjY3Vgm1xrFAt25r7XS4go19pFvyimyOI8XMPFkkR0Cg2UnTWXoUQQgAgJAjOrVcamggEiEtMgOeMBO6ZtXo1P0wffefzv5c9/4w+/scOb959/vJL43/2//se78G//4Jv/2esnD37jG9/eHy5f3Fys1vF/+Qe/N4zpZ/LhH/yjf/DXf/K9Z+9dXg5ydn52c3MNQ7535+7VzSWD9igDFmYegTEQha7TSH/XdTdX18MhBQjI2MU1CQAyQABgcGdgJkmCGs23HtkYCDEjOPXCo8ghpFXIyHkQqFaEN/fo5e60VM8p2QDn2cV+fT0BNPlC6DLT7F0NffoP/t+/72WU419ndLtgvdmrvYZkDOV/dduAeCRApVtwBpXtl36mhrQmcwEql5ltD1V2NSM0KHKSahZd0fE9x/m32xSoFmn4GXmKMsmJziYhVwNs614HB3vQz7pZYvuwlGxm7zkZO7MSDT/i4rcm/arnYubNbZgCrK9VTVvQX/VgYgXZrQ5ILQyrT5dyJkTweh5UUs85W2KVfW/0Yhj22IPbObfxX3gjank1Q73izl/m8vDbesHCvDm6vn7ido/niIaw/eMNedvnJTy3XTlnZH727BkRPXjw4Cc/+UkBnmaVZg1IDTw4N0T/XteS3pob0MWuoTYrt4WLMQqzSB6GIRBuNptHjx6dn59/+umn4ziGbkV2wBcAqA9aP7qWBrrrLYm/0VUaOL1Ys88iFsyYB17mKesNzYND3VKINejy4pfqOdKIhBjsGC/1VxrHMdcwF3XkTuJejmzYZuemUcbXBp6qy47jmLqiRnd9rw+ebk/u3797enoKABp2NmPSBBHVZFF9qaYzdF23Xa272IW5e0hnBwsfHDMDUk3yJC4nYAMAMat2owIBiVS4CRGBkMBMbuScmcHMSL8EjSPDq3am0ItzoRJ2utI89y2GEBChJpHOMpVg0UdQXDNJe6na7Y1BaOtlA+oHE7+H3cFG9uai9wd5Giibl+q5OFMDRNDP1OvPRs+2jdpQ9kgu7YhG2z40pq3tZGAuz9FUIMJcSgQl50w9IEJKabfbb7fb8/Nz9TgMabxzvt5sNtf7A5Q2irka6ZMDTsqJ61Yol7Vyyk9B/0z1JEnzlXDJYgga1610RRCIEJCKd3Kz2RDR5eUlAAwpBcjoXMahustNV+FaZlyWHkZ0YVgQEhL0RU9CzMLAREBO9yAiwiAq5Whin+JHQFCYoyG6EeVKNBaYNkSYwiHVqypOE7I1s5ULs9LkKYpSbmPtsilz0pltnJMyNxeI5V0ye9wkglTfrTj9rNE2vBpkgQuZe/6Cq5yGhZz1Xxr8JulNCasun8kMtnGqnjepBYiTV9vJpYksDDCcd4dHmNQ4W0QPqmEL6obq4ddLnYh162MUC9UyoOEcABAQhJA6yQeBBDSGNa/h+vDsJ88/ffbp9u72K7/zqw8frMb9Hrruk4+fffri+e9/4w+ffvT9Jy+fv/Glz74Yrt9av/XFL3z+Bz/4u/uvn3/ua2+N488e/+xJQOFe0jBucDPsR1ptkFCwyheQEGKH3TAMALTq1i+fv9zfHO7S6Y6xC52kPOvuDZJBBCCXruUjAGOIhiV0nkK3UrNd0EjalriJF4V6WJNp7UuJLDDL//SpUyoMcb6f2eNcYlnRltLgMfr3FOI+t6aL3Xn0Wv7qOdqG9bKiIc4GXYZkcsElmLOw39cN7Uu2sjHNUASYCcrgjki1R5TZTQTJPAe4maZ/pIHzKBortGKUYL4zA2/SS6qWo+UlUm1Cc429Gpl+QFuF5drJPNVT5vuxv81mbRFs+8bDDHND1MBYAmA2rQo93RrUjdqsuMw1J09jWHuxqjvSHLFcI59+FYxeEFsdvXmd/1Mz/3ButFT8HLFScJ4iuxzztstu8E8tR7Y/b/vcvMgzSDO+H7CREh515PyzR8TCLdNSUfPBBx8AwJe+9KU//dM/XcIjToU4Cvarv3n1dRuPeJIGI1dCqNU1/icRyWPiLCcnJ2+88cZqtXr33XfHcYz9GqhkzZTRCFFqys9sF55S+5qJ3wZ2s1geJLdqk4PA79rguPK21/ml96LM3x9jLBV32fdxAH2jxlu8TRJCAFFRRvY/kVJwpe4bEWGGnCUlRgxKWcyJiNQm1GDgKhbbck1rQA4h3Ltz9/79uyKy2+32+70eNmjGp6FXqqKi53prrG/dV/Mg5QRTrkSJ4c8zHZg50IwFvCPVMwvVGK/I5O7Ui7nUkNoCYTELiVnUkjTzzy4R0ZLIhiliKG07uriSmQYIXj3z4JkjzyMHqq5iQSRwWXg2WawWYDOgaTsy34Jhkm+NmD1G3qIVc2KbLFHUihjDswHfMK8B76FlZiQBZERAkBCIAiCJ/gsggIwkhIAozJmZUaLuF6iQjAlAhmF4+fJyvV6P43h9fX19fT3mdHZ6R6odlVLSYzaZRZtYWsRS6U0je+ACYEc35cpHxd4OU7MrMVrS/Db9O+d8dn7erfqXH7xcr9fMLPmgIUfDs/45VNdAILO/ChURutq/qMtKkibvgJkbRHTY74zgcznZCUhKXjdCIKIMIoVsMBqhzAQQiCJF+4vKpCdl4zdENCQaKI16Z4RImrrHo9nTOWfOaqmWpwrR+LNi6iCKJu8Wnuh4XqxoKuA4jugaRnlFyr5UGaSPeLYxSeTVNU/QImI5xFxzPyqhTJ4GP4ucs9pT7fZQHJlgDIkuw6E5ZhormzYsap+n2yo8njk9NioOp/kWBIqIHdGCgCIoGREDCiJwkZk6WhAk6Adg5JRxpIBxg7jb72+epn/3P/1ZEv76b351uNndxKFfdT97/N4LeH7v9PQHf/anLw5Xn/niW99//7vv//y9r371V5+/uH50eC1Benb9/NPnn9xZ3wXEl1eXSJ0AZNH1ZQCIWBx4zCyZV7H/9JLHm0NHr0u6xCSsDVo0AgOifidEiDGutpurOAAQZlRpkjj11eLSJDpbO49DWOjWnh685DVD0VsFdZFMThSmNXYzg4Gdl86/2obyRAhVS2te5IS7ePb8+17ifJPN3oAYmqxIe2SJOrsaKrUP9iL/rCGwoU9r0QZzprbL07m4wjlYWNrNTO29fhyTGNnl7nsBpUX2SxjAtUQG16tmpnvPJGRr6VWqgGZSWH09ONcs9ZG+5qgUh/Tcc6REptB7KpV5TnvjzvBLtlxN+zO4Bs6Gxpzb2K+BB3ObROb2ScOM6sf1IPlV8CzZvK5ZXz+4fdMQ6uyRyrZetPIrg4pLFOHitBj73m86JgpsFv7VBhguTCOb1JL1/OBwjAfh2OIevYhIG41+4QtfUBdYjDHNx1yC18C5JJsG4OVTR79vVtC+D4EAijHoR0gpIUCMFGMchwMAnJ+f7/f7Dz/8MM99OuDcrOUtbnf2b9Q1MifsK1CnAj9CZzLkVoZa0KdH71HJeRv2bC2knt9ARMiTUWQyRO2xvu9jDFp1pg6dMQ0C64Z+ygZQfevmuzFZV5RjignT4XA4dP1q1WltW7+Kfd+fnp6G0F1evri8vLQiN8O5vUVLoTRBVBV0Zo7o+juIcNVGp+x0mdF8g2dZlAj5STEzwCwGqBKMuQ0YMAuAunTJp2tqhp6my9buJsGEMBEhaGJh0FxQxGn7AADNxvROLkvnw2qfYLXx1PzTbEYzKmCxIZo3tvnANT5MriuBMUKMU8DAC9iGGhGD9rzAyUuulDlzvIrT0sEl0HrVtCxK1T1rBmYw/b8BgJnHcRzTobTr5DSO47g/7HY3h8NhGEYAuL6+fvr8xeFwoBhSStfX1916Y8BUCEW0R0m1HaydzDiOwTVpM5phV3NuwOtotcIZAhJwANDWGIEIEpOInJ2dnWzP9sMh9l1KScYEUJJ0ZOGtQ0QM5L+MnQAQAYqUHnWFzYk6ikYVtqCBqjQWTJy1cImINPZOKBDKIzkLYpo8uOSCG0hT8RK4ncZEDNc4oQm7RlwapzEDIhCVoNbkEuAp5cA4UNFOc8Fnoxl9G8BcY1lGu7ao4NSaxr43B4lRmNcyTb7bZXNZit1K0+4GbOWRVPNS6sEY4lVtt3GD1j+4CuBqmjl1vJ5LwS7sYA6Yxo+ocssRdFllJ80nXVx/GlOCypOqjSpeAqKI27+BgFAEDpIxAHWUM4MkgrjGDQOOz4fv/psffubeZ9946/Xd/ooCfPLio//2n/+3/4c//G9+/du/+X//H/4f2++e3f/M648evPF73/6DT+TZ//H/9H9+je78Sv7i9/7ixxcvX5zEO3KQVVwPSDmPDIwEXSDCyJmHnHOSGkKHm6tdxEiM6SDSTUEzAcnAzByABKGvuQdENPIoYi65WVENzTPuTBbo5YPMRnKW6WGLYvdP40yOwOyEUe34EmYlpkYnXsTD4hIRcjLX30ak6b1ylJiPDnX0zyUX2Nf+fiO5xuJycmBW1QBOks5Hnu4xxvdo8ZwFTnB5meMB9inTBokX6M1Qfnx0uGXXWcrgJ6KcwfZXz9rNjOzKrqeOwZlz1jocmDtTmw8eJOfTOVKatVy+JSR6+VRbE5vsCgL967CmmDaDYD3nSmp9jjliaX5wuV9Bz2gmezUOIPOtoVkacDyiBfENMN5r2TLjcfKfXUteaNjwFfj8+17/CUPdxqqvGF/mBu1Rkih33o4gInr69OmTJ08ePHiw3W4vLi76vr/tdr+s/sujsvEXwr+8GuFgM1K/oZJIc3/OOca+67o0DiGE8/Pzq6uri4sLKi3oGIhiCJknfi/EhlMz8F8IcHNxPbHdO2gqW03jTXxdDZ7s6qL/k/GGVX3SIiv9SScbQvS1Xvv9fr1edx1pc4dcO/ABzlBhqPa9JAwerHqLatWZU875cDh0XdByJoCoVXxXV1f7/aB5mMbRWl2mxlKMUVP+9ESKScCmRQofIgDoERdqtknmhMmQZjwrVXE1+cC1xt7vDuD4hXkSWSZPzEknIkRBu+iLaMIta+am+sSIQowlrKLmTRqLiCu6+8h+Z8HJ2XekaskLT8Wbme4NuxnZEJG92peKkctk8Rq47ZXeYSdO/5fF9o2IgOy/Z2arhXGYnFHRUYrFejSa0o9GhtUSya6KVQlPjeHLq5uc8zAM47C/vr5+8ez5ixcX4zj2/Uq3jOvdgZlX3VonFRxWqzZYkKCv0OpBseIImLrKG04a9QDc/qWtTgBQ+8YzAzNKTQtKkrfb7Z07d4hIXSRBhU89A09KoXu2+MSEZEIAGMZclXX0ahgR5cAddUq01exCPb5FGBhKam4IXYzx5cuXCAFjUHEUY6+sEXGuPfDCe2Q8H7SxpFOPvAJhokecEsPMasiKS2IuJOJ2JuNGIlINwYYy/mTmXFP7lO3LaNbPYJ4gF2NkbuMMykKWP63fmC3kJalRMFG7sZkoMR0FcZqIsDRv1D+7rtMlMT4vVFVuUIYUEQFsfcnoLlD+mxseVNP97eaJmFy6QjXugGtvD7UZ0HsmtEWqbks5o1hSu+IBFOcYSAiFKQPGjoAQSPjAAJkyrXGdL+TlzdWf/fd//o/+y99/9Lk7GW8O8vK9i3f+L//sn/3Tf/pP/+Ef/NGzy+ff/t3f2tLZT/Y/++u/++7QD2k7fOE3vnCTDn/7794JadisT9OBBEAQEnOAkrjOzDwwAARATgIIFxcv05CChMR2OB4jRnEiLPujiqrYJSDIk11nMsKz+nIVGlIx+1BcfMwzarmZJmeKEyh22+TCMMFnxGwU1cDWgLQUuH6QpWLx6svAMO4m13nP39O8zkCyt8srI4f+G3vEtszbprNciBmbuO9tCt7n5WsUxdnM5LJrjkohv75LYMQZjbbWPPeMhnnHLH1FduU69q/dYNNZUqa/GRHtvCk7K8gMvCWW0Nxq9TLAljAY1xwFyUdQsbrYmFkJu0Em1wJ9T5NSI7GwIHIiskoPg6pK0cnH7JfDE+GMQuYV2jBjouMuvyXdLlfh6OUJTJxi56eWXXmwB9sWpXnqFfzbIO0VZHMUmF/murq6eu+9977+ta89ePDg2bNnzWgNuTZ/Lufy972aSTU/zVYcAWCGjRjj6Bq8nZ6evv766y9fvry8ubZscyyvYM1cqm/RsY68Dn45TBrNe9mI82QBG9bKfvzjNM+4ax6BI0ierTgiqm7thiq/qrUwDlkL+dSmUmNP9LQDBji2lGHRZMtmClVJ63K/298cDoftdhtj1LYL+7i/ubqG2orGImlcU900MHh+fn5+fn5yckI1F13FmsxLQ8v0554aEQEWqYp+sxYNeg3gOsEZz0pNPHbNzGrmXt2hFHJtPWJzMTNMMW9mGELx/ZnagPWIAq9I61uafUecm0y73IOr/eHaSQudNWjvhVpnBNWmKgDwtEF4m81/Xu7dXqQo0nxMqFILlSCuy5XzZgXVFhv+XYoczXDWgjWah3PU/NNGO8MwaPXpzc3N7ubq6urqyZMnz58/E5EYu9PT06urq8vrnWUzaRhQ3GWSQ80zqrFu/TWEIPNMZlsO73M0KmLmEHqojFw+UIRAiAhCArlb9ffv3z85Obm5uWFmClEERTSbEUWARd0QyCxZBEDQ732iTRBFpGSHiQjnko7UhVJhCFL7BehpxPUcBK3C0BNogGKd7OTH0QaGyTO2WmV22oTpUoYXYxifrwzH9iqTp0YQegXqEk+ODU8umsboSWT6qZIpz1vZLgixPGWOkDzvANZIMeMH45k5f85EjwFjTGjvrwPOks30FYU/FwSE1WPtZwGISotKQx6H4ERh82cIQZ1Zhs+i2s6zv0w+0nQUnhiLMnPoIgCgygkRBCBABETRwoG6aREChkyMXc8CSICQBSWPiTiGMdxbvZYoffCdx38e//wf/ONff+NL59s37v7O73374XvnH37y0e996w8ywOPdh3/1yd/8yV/9yY8/+kne4Lgauw28/Y3PX16ML362w5uOASAQQCRg3XxHyVQnFEIY0kgETz/59PryKp530skBRqhmLQIKsxAikBmEZeFAYpkNRnem9mQY37IBN6aF3xuM/JrVrxvLLFJXSa42jEGGudXqSXoaxPGLE9YzM9JuMJjtpUsT69VXw90N7TV3Qt1pvKLv4fHcNy2EqyXzWPXTxPl+75nF+MjWDqe9fNrYvJ1jm+VyLkdxaE81c7GfGqYzmGG+rfpZowtULgfx99s5hA14JrKaMdOYGhzaBnZ07TxRmXwI9ZBDWsR7LbWmwYapGiq4zK7zdAhOfnpJ6HVlO1TARLFeOc/69LBzafvFshsMYI+0ZiEM87dhBgCkAmOr2XD3bddRFm640v/qv4RZZdHxG45efu63vauRJIshXjWjcRw/+OCD3/j2t998880f/ehHqmo0FAtztB8lbH/zq2d0G5M2jonZDBB9nNMIQx/RcwLu3bt39+7d73//+zc3N4gIAkQEdd8PMbhUZzUsxPDm2V+cjXf0src7cW1r4RFSSdE9ZfNtxIh9b+t4lCDtmxCCNucAgBCiCZNxHEufCOpYE/DGkSiaxQKgiJn179VZqAKtf5rCjXOdJ7gODmrDENEw7i9fvFytVnfv3jW13hogd113enp6fn5+586d09PT9XptnqYiYVKWRWsrxqmbKNd6NoXDNgice2PF7fW2J4oIwCRX1Z5RyqldiFtxsd/vLVqlOIFqgRvmLd0UEUMAqUfDS+1gbLAZHoom7Dq62SrLMQXYxLu+1wwbN5fJsWVaNFQ9kF1Ep+6YM6ZeUrjHvzB4gd9ISPvTeFBkZkBmd+jXZrORWshnjbihNtNOKe12O21ENAzDOI77wygiWoaqw+px9jGmzWZzfX39/PkLEfROH3GdbwBIQ1a2HP6IzhCCBqQNOTY70yJgLoVsqzXyM0nIzICCiHfu3b13797V1VUIYcwJADT6N0VBERLXgxPdlUW6Gi7KWZiZquNAiWekGFHdFiXMm9LIWaD2QgcAxKGwM4ZKacXiqJWFVTtxVBtUKGTXY4NdzrH92RhXXlZC0Z5nGxvWnV7LbRfbBuU8puoHwWrUKc+knJU4PDlOTWnqd1bBQgTG8Aa5tr8Ht0PkepncMUFmqpjNzojDbEIjFJ1CxIA424ChKr567i3iRH8AwIUT9JGigpi0QlBfxTSgobFhzhCCFgDaDfqirBkFRAiAXJwNJDDs9lzjtyEQImY9lDgGEhAWFtGQIhBGgiyAuvlr8BCRERCQKOY8ch4RMqAgYGQmiTGG3fXu/un9j3720Z//u+F3z3/9tA/dNvzaN3+9h9VLvh7Gcb8f/sW/+BfP0nPsMVNanXdXV5d3Hr7+rd/51neuf/Ti5zchrhiFSISCSB5zDgIAFCmIIIXAPBLBy5dXh5vD5u6m71YD7wCLpac2GIggImPJ2uXiTxQREHd0ux07Qa6HuMe5kTS5WkGoe3w9N3I6AsikcCFmsli038Ura7guz8slbjYhz2t23fasJ91Gv/9lLr/xuKFm1Oi5uInwGDxejPhduQEVnWLRjO/hIZdViIt9UW7Z+KXqZOYC9EDqv5aBg85uEXf8riHc1tdTgp1A5RLDJrnh8WnL7RfRA6Ovi3FyEPhtG+o+7THg/a+2pXmBI05/tXEaxYhqbj86g1CcVmqYN4pSuaqtKRQD1qbCXOx6BXeEiY3j32tOXHuK62lpDQHoxPOiBZTRW0OiZaWEazqGjwPcat0d/Qn/PpH2ZrRj3NSyEtQmB8t3Lf9sRn4F/DjX0o5P8BbbhojUefnixYu+7x88eFBi7POMsiMD3gLPrQDcDv/Rb4yqodCGiAjDrLga3ZadUuq7eHJy0ve9lkR2XQeA6ldTaozUOXVoxpLmpPBk8wpHW3BNsAyeCVFT/7xprb3INXa7TXQb5uerj/YWZRYN/UFt8if1FDGVVDGExFnDXH0/xb7Aca7HNtc+NF74VC4GcCYWM2coskuF0jgcLi4uzs7OTk5OLi8vEUVtKrUB1uu1nk+opYOavyru6BceE9e+UyaRAEFbdGTXq5kQk7Sn73iJ3biloGzuR0iLXDhInJqueJCZzklkwcB6KqN1IkVErmm0OgsVhloyYFYuls06mAD0+46Oo0dK2grqG22VDQyYjkeipd4CAAGDX0db3xgnp4CJXHDnZDb7iEHuY3oiUjpt1ourB8HO7zV9wD+oPxmhioge9DWO4263u76+3u/36ke42R0A4HA4gGTz+IzjeOfOmohubm4uLy9PT8/VTs4567ykljYgBlX59Vl/iibPvajGTc2//lcREUlFu1CqIz0oCxAh50wBRGS73Z6d3iH6IMaYxpIsU/mRMVAIgbUXIhAicq01YxbhJKKZqFxIv2CVAIQ5IQWpiQAiEiMwCPAU9UmpkBODJU4XV2/XdTFnibFnHnLOq1XPnERkvdp6FOQ8pjRQPX8Gar8Ekxo6H1OsjTGISJIQEmROYxKRiCQgeTz0kZgZOHHivutCIAQWkB1nY4AQJ5O16zoZBpMORkkpDwICJMKgyoGKQBHJDHo2PVKMmhwLoGZmkQLFHUcUooiEGCmElCFlZY9IgVgGRQXn2s+diAiLh4MQA+WchVm1GaKAdavwFJNzDlBsNgCINUUQO2Hm0flIEAkFEILWc8h00JZExBRDdL46G1xri8ESAxC8uc6sZqaAdmgijDFSVci0oA4RIQCOo5R9UQAJQwSiEUBzqQkgAuaceRg6ohDCwESwYlwhcAbOmAZkkF3OQ3cSxnRz88nuo/H5j/jj3/oHv/m9Fy8//fX/4f6D1z+9uvzk6sXz4ebnp8N+oDvnr8fDgfaZeEw3N6v73Rf/i0d//Z0fvv+3H771+HPb9WrMhzFdb7YxhDDsx5wjQz+OtDp/LdzwzcUO9+EunOyvry7Px5ACZYIxE9AKVh0xcjjbnmAHQtyv43CdIgXI3GMUOGQGxND1KsJy5iwgRJq2rlp+UCcKIoagfDVp8wAIgOOYquxAp4qQ34oyC+OUvJeTKtkAgIlL5VUWQaKpL7itIEjmLLVfvyAIABKOQzHAvL6i38Sog+jBazKOI3MKUUBm5WFSu+yKtPIdAFIaTMoTBXWVg8vctp2eiDQFnbNYTnygIn8Jq2sDAiGVXh0sGGbBENv5xNl7XiMxgPVzYwHCXIMRkRjbo2W08t8iWvqI+kQQMaUp5AvORhKnqEHVwEIIOSdw+oSdUGobnvkIFFGxDwwizIKAgSbNVSRQsPDamBIRxa6TnFUbYWbmKXu/itkQgo5cT1ut7dFQjUAfNapo4nrUBM21VawKaCNmTVNhZnV8GA3Ytq0btu2gFkg0Mei2TGk+S7XWUkoAJrJmxxgiovKgEUYtYUKvihme7angDuJDRJKApdVoKcIAJEESYQC2tZj+7UIGFoQACKSLlQGJSB8B68GuJ3SF2tHb5m4oMjAMGACoYSFB11sbAMbxsKR8A8phdbJ7F5adPjjlwPsB/dL4V3RgSvHshJVxHLtVv9ls3v35O8P++uHr9zYdHg6HGNciCCK5ejMBAAgTZ/XVApQjwjWNiXEK6XtshPkxEjYZv5T2kz215HpGqutRq08RAViEN/1qTENGiTG+8cYb/Xr9zrvvDmPOSCFAPYUcukjpsO+6Vdl8C1EEohgCMicvOaGKI5rnWtuVgNTteBjGDTMRjcMBQXNYJAJJIAESQS1djCUNx97AlZyUltA834hAhNYW3yMNRJAgp7GL6zGNCHG96gglCpJoHz45HA6r1aqLq8N+XK87AuBMOaGoJZAz54AYzdOFOAWvqoQEgLLF5CxqjVDA/X7f9TFEGsdRi5cuL68Qgwi+ePFCIIcQLi9fjONhvemvDztE6kPsVrFfdZuT9en5ydn5SampztyH2t8hMwJ0IIIQKWYqTj1NjEIWyEwCeoo1IgJhwACCFtUwUle5kZImswQiqzMnPX2k0pUgAnPKeQQAZl3qbF3rQY+bKwRZ9hoqcYWCqpwT83S0wH6cDifMkClQkhRpnZizCKvvPgQgSuXc75XmX0ntrqeNMYeciCiu+p4I0VutIefMCBSK2Om6brPZ6JGzkYKdEJtS4uI4gBDULk1E1PdTRqtN0/bclLLaeCp+ba/su7XA5Au2DSJLyuOgt0WKqqmmxESRiJBFkkSMXV/qP7sONSqbc1LizzmN47jf78fxcDgc9HRHjRDmnBNfD8MIQkRRADPGIcfV5gzC9pC6q6vLlLFfbcZxBDh0XQcDA8cY+u16lUYRwRBXXezGceTMXdf1fUz5kPkAwDe7cRU3NVBsSaBT0KiqAVPua5BMLAEDUGnjmRkOA/en2/2YNnGVE5xszl5//QF8j7p1z8TDUM4VXFWr1SQbkmgVnQBgDERdznr+oRAFER5zJqJ+tTay5CgEJLnARmNEDAAgg6j+EEIkojzl+OgS4zik4TDGygmq3IyIqIxtB/UCAGJAnDli2fmqTSKDVz6qhBo5B22aVOv5BYARiBCQWAQJMwjArIG+yUIfcsnHOsCaMxvmG14jnd0uYpOaqb/mOfMA5JxrgLONhvM8am9P8YTo2WV2Mjh3Ws5Zg8XknFUAgIsui3W3A6QjugVUVczr0Aa2LZDN0QjOfmoAg7mugC6G3GBjGIYASICoRXnFbRzU0OxWq/M+XO+vv/e97/Xrbr1ef/xXH+WAO8iHyJfpgOtw99751cuXPBzunJz2Idxc7Qnh9ddf/9VfTVl4/Oh6PMh6Q12/vt7ddCH0YcuIJAiS83AICIcxvXj24uEX7gfqJbOIazWNFTlJ3ALVpQeBRXGIn6B9WC5pQ/A+suTxQ+4Mz+XjZs9MS+Z0ZY9zu62hT1qoVg1j+mdDCLmYoDOirWAcxcPx/hyefmShBgHMNgZx9ueCOI/UjHmibfG+uBru8/M1Lc3w0NwJjkH0BnUlGvINjCUk9hZx1ZX2Uk8P9iURAUozyPKzOBrQb0w+LPHpZ83zZjn+gxapmyfbvvfiwksDu9kniWBN6fRC2CjBTGiDvNGSxV3LFa9roTAjwAxRyyUoCFksh02hYRYD8pchquWLsCb4IQQQz3SNwTaLgNnEG/ZZvqL5dcn1Rymw+WzL5EY48pQR5PJiZsaWeaFmCaaULi4uLi8vHzx4sF6vNTtLTUdtmaYOCGH1QLfvNpJdEsBtmLntMoMWGwE4tcWxKBwiloAYIsYYN5vNgwcPDoeDZm2p34rBXD9VHhbQfqmQbMNubt2nLDLj05wzButwqKpPgbsxXfwcGz6FOQH4S0e2PV37c5j7XkQyzFo6aUaYQOk3CK7HhsDsZDKPf7VDDKSintVQlpXSYfmpFlSL9sYcRCTzyAQBBULsuu7k5OTs7Gy73Wq5I9dIoJ/pitTbJVhordgPwfpZmHsui4jEMDXngJmwOn6cmMwvw9hy9fWnuZ48XVwziQwwS52FWvhn35iAVTMYq25gebBLkvOtYtB1wtTuIIoQkmJdWMaKmbI+vqqmnRGYQmjOuIbCEclKzjzGcs7aJcEr0szspaOXUTpxTVjT2JRCiIgApVOeAjwMwzAMV1dXKQ2WKerLNY0gAUqQue9XzHm/32tSdO1Jk5UL9DwVtWQZp+M3FJnMnEsMber2r0ZvMCt/XhPhRYTtL/aUfqPxTCLoKBDidrvdbDZ2CLCnT48rdbuDS2P0Hn9j8Ib2PD2b63zBvMfz/COUXnBERCkNitAQwliLVusr0WQHV3c4uNAEuO2fa6acQuVlWcNysBBqjYjPLk/d/2rMbJRnA7Lz0zc87GXaktbtZ/Ppigi5NxqulYeXr2BmuKUIw6+WF/HiyMjLrOUIKkyR2q56Ul3y/hsdzWhXan8zc+2YpDPpg4uZNsjxf06UlxKTehARSaDEJxFC2O2uGXhz0occnz/ff+97P0gp/dZ/+eWRcg4Sukh0GMb9fgTGkYNc73YRY7fqgWJO8trDu98++8b7P333gw8+YIhnmzPe02Gg1WYVhAEpYc5p13UMIzx58uxX81f0lFKCDoRQiIWlaCklaFCIE2oHl6DtmNBaUxQ1VOhV53EdU08t5djQZawOC6o20l1itRnEiMcGpCaeRrN7wPGXvcLINcbIYzLasilUXeGobTyJGDymthqxeQpfYswmaB/8r7CQa/7mo+TXTMHzpgcJFgTcgG1/FoEYO4+0Zq39gM2v9kaq7af91IzLBNmP04jBo4xv95v7ANyhOIYWA2M5DlZDzpOZxdOalTIK9CSKrnEoHuvs6j/kekJ9jDGl9oQGI07/vQFf+yIfF4C34cffg3Wf8uzQWK0wpyhP2w2dMCOFEECfYnjFGpHo++EWD8KSfZrvXyF1bUWaG8Ctqb+OvsivES2yoaRaQDDHMwtirUhh5ufPn3/44YcPHz7cbDYXFxciAvMH3OeWxwGK2fMKOfBLXs2sJ1bC6QbFGIsglvlqcGOz2Tx8+PCjjz56//33Y4zoiooNCtaNAQ35R3jql4FZzKggFS+lawOUZ6WagpMK0cgT/+dRDNyCwFJbDgBqECJitgK8+VZCRKvVakxsupO+jpnnhzhMnOLB4xo7ExHk6ThWnS8wp5QsBYMzW4XOOEroAsZiDd69e/f8/Hy72XRdp/LTKoptjnocApc/WQSn45EBuBoSIqI9/AinRHeYZTxOCSDzZZ0lxjdzN7mXXVsKWx1vM+R5LT3XwwCkamLiPGgW6vBnvGHpboAz6Ob04A3CBhgAyDC5NdXGTjWXx76X2qoUp74GZYKWSGlboWKdmUs9U5GQtf1BAKtQABPXVT9h5nKwp9eCuJTt2bl/KWlRWFmsw+Fwc3OjvptxLFFBqbGinDPo64RyxkCkacZQE0dzzqen2xjjmDMgY5wiIgCipxyClGQ6q1ps9sRjCJ8akSy4tT41V+fSyCw8juMYx/Wq0yrZjz/5KOLU8sdqvrCGqm00g8o6YqJzevKiISLOd2QvLRtZIU74i0gkomEYEIWolNitViv/ZMUOuzm3QSeeu9JnAhpmxZf24mVExcPn4Tbo7R6/GOgatRl2bsszUTZHJESCqbGndo7Jk0sQUSsFmGft3Q0h1nvdT7b8eiy1TBfSI2ECz21gE5wu928mDGCmwOFcNHtNDp1+0/zaYNUjFqptIFVjaFCNTjHVQUoRMyCKQMlpEQDIklF7Hg7U9/0JwcuLy7/4j38TX0vf+M1vnJ32N3hzdnLy/HCx21+enG33N4f9bugEaLXhLLvxBgjuPzg/+4df2P+Hi08+vCDpaLOlfeAcAkZgWYVxx2NEhhGunl9ijlkQI5I1fWVgYQYGgZGzFhCOnFcYAbRhD8/xGXQTRZi5M/RHv0zLyxyu4jLuRETbKDt2mOEcF4qRJx7D9pIYyIUWYdGTE53IgHlzy0rzR7RJ7ytpbtCLF97rhjaO8rK/vJBqHm+G9RPxfzY4lLmWvMSkz6lb/tqwKlTPqDgnl7lRPGIbH9tyjs2fk/8lAMxX3xNGw5u6r4hLjbOfTNuwcY66Emym5sn2U4bqq/YoWhqEfnBLtfVblOq4dr8JHCJCbFF0dL3cl8ePLZHFqc3LBYUFOS2xsczuaxjTvxQAUCQUQhKpYQmoQaTqM5kAED6y9S6pdPllIwqWe7zn9wZUWyBPzFUj74/OLudjqYbOYNM/ueqjzEwh9iEcDod33333y1/+8tnZ2QcffIABoTRude9o5qrlILfM3SNqefkZeRT55ZuRUBVw86lJcWQjIuLZ2dnDhw9//OMfPn/+HEMUnhoROGKQ+aS4HhF0K6hLPJskIZdK3ff9OI5mDorM6Me/Upwu0eDBaVzKjxnmF7vUErP9EDGDJGGo2ddT/I0mGqsWi1LRwiKsDimNEM6IR8S2TjTlsEQIZgEfROj7vusDRVqv13funJs1aHVuzAm044arSdvz7IQzhqJh1nhmy1Nmbh1lQK/9V1RPRwv4+ZpSBDVcU5mOTNz5CJJ+kNqKwiJydmd2fQpgHhoyg1BEdH0BwAtSnanBCYDmq9Ls1mmyudCJHY0YQojkjlWoQRdPYOK0So8EIkpJz+5GIvTrIiLCJCQeUaIENMlSEMG6pxUq8ofbQY24ighzHoZBKwYPh8NutxvHMaXBw6ZXCEEYmWugW2S3u2Hmco76ai2EIhxD6RaDgBlUM9BT61gYU0onp5uu66qlExWl1hDITxbccU2e2EIIGpiGuQhi5hhDDDHU08VOT0/v3bv3+PFj6ilMu+1k2sQYM7NItjCykZbf6agemeO9w1h1QnDlzehMAL/cHlQN7BTaIKKu67Qn0pgOGmY1R4I1Xwru3Cqqlw4XwqwKtpLaq5iwgckor+FedMGW7MrtjPf8XthsMOSyuWSxD03EeotG7gVrA89ybxaRgLMt3N8PLhPDjIfguvZ5SjoKj7Dk+VoaWZj+6oExrUJvMxHg8WAvNZj9UIY0D4afcugQuCbySwZAJAEM6XDoupXWofCYCTBlGMbxj//5X5+evPb5X/vcuOKEhxCECfbDfuTUn2yidLthhMzdOgjyIV0+/NrdL8nnbv5iePbeyxO4d7o+hSESAPMhBgrCWsR1fbk73OQcIZxQQIpIxCQkwJBz5swxT4q+4gwBEWOGsc5mdiBe3UiwomVGqMvLSNEzG87byi+XVY5peA2lGXsbTzVUQfNQD1bzABap3VUiTA1X5hRypMGpUYvXTRs4bTSPQCOn5s/m39i1bV091/h3+QH9PQ2XNYAdXa8l2u0ahsH2AKjF0s0gyzEbJmoWyE/f57c3vx4dGatOY7g1pYTnSQriLE//Cv1Xk5GMEkxPsswCL388vxtsqgZ5D6Wfgvc6m2TmhbBqqHS5CkthC8688TQpNdrvQTV56G/zqDBlyy+TGfxHLiGEQBgyZ5GirKhVCFiVNJzGzzn5wT0+G9KtPx2Zvn1uVa5fInej+ek2+j+Kdv1xuq3mbgEEkISIIJJz/vCjT1ar1RtvvPHd734vdr/gZMejE4dX8uBtlx/Bc7oXUI7g0P2n3JZSXq1Wjx49unfv3rNnz0IIgigTuqyRbLBH/L84j6h7wIzkmtn5f43jRARNnBoR6Z2LVN5XI6q+kSwhV5mBmUOIWBvN651919VNSjxtGFM0fNRQlJ+d+Xq4VszaDqgqrOYBasMYRFRr5HA4XF1fHXb77Xa9Xq8329Vqs1Ll+Pz8fLPy1iAzMwhrA323i01Cj4vRHpAIlH9pwjZBYJxSzDwP2nQa0YqIzKYPtEg2xdWXz/nbuKbOKdpt8/U5WUjBiKERpza+VEXXZ7qZpq2Qhxi8+i0y+eg9qAAQAAFADSpmjjHmEIClHi6AppFqdSJMagOoBWDWr6lDAFD9BbOWZiBoMFf6Ufin5QPn4+5D9AE3Iyed++Gwt4aitYkRgOvWrm9EDIIoQaw7y263A4DVarVer4loHMe+j13XCeOYOATtZ6A8gESkvRstsk1EevxmKXR0yZ9GD0aoxhfFBTAyAIBMJonekHPuYyxNFpg3m81rr71WVv8WBhcRS3/3O3Ij+sSytRfmos7Sj2CP+D9N4UfEaHMDgL7v+1VkST5XtTJYEHceuq0czPewZovCqn36CRh9+6eyr6R0sB7B0Xw+PvvR2NW7xDww5LJ1/SC4qK+zAQWy3W9eHz9rObbPNfs6Oh0F5pvEJN38TjN3Q04LyaznXTYLDy7XGauir1eox9R4FDUQghP3eWoQMkuGhsVVFjQnEYTMLIiCWA882YbtMAyJRwDIzFkyUFj33fV4+Os//0GK/KXfeHtPQxr2sKbr/a6L630aSSRSCCFkHphTCHxJzx99/cGLYffy8qe7Z7s1ncZAkBgYESBCGBMQwvXLm4tnL85ev9dRB4LAJd5FRMhVqQlB54GBUFhYCDGETtx+j4iWUGocZwsL1YHricqvpq2vSRDm2fd2Nbnd014751u7vEMBnF8GcSIkW0RPIQ2tEpFmxaAz4eo9t5olhh/703hExG8hYn4p/2xDbwakzNWm5r2v+NL/2nwwPHjEeoS0CJ/bGLZ8Zrk5p1ib4r8EbwmtZ39EzO4g5kYONCzvV60RjP6nZq3NSiSiBgN+WPvJK4L2DQBoDo//ycpRlLBD7RcqItpBxyPQ4WoWxlx+bpbjlmvWRLcsFSMIsUwejVcsBMwpx3DuBSY4ep7uBNR2IiDCOQCULtAgBFjiZyigSZVwbBYNYM21pEa/xM16HX3c5DbMK1j0anYrA8mz84wHZ1kAjdzA4TB0MTx58iTn/LkvfLHkg4gICAowuvI9QgAAQtAW2QjaNeXoor8aRUcvLyTtcdJz6so33iAsTDiMo3aUSSm9++67IkKBsgjMgpxtpN22AIDjnW9gvoIevBo0IhEZhmGz2QAAM2vqYykfrHsKLrZaT/AN1y9QohJm0q8QETF0HW02GwWg6zoQTT7SHmPFilMyNoVSpc2Q0pCSDwAsZ1cY0Xm0Y1dq84jIOgIaiLn0FElEpPr6/Xv3zs7Ozs+1D6SkNEJtYU1Smpb6QyBNZOXany8EQJisa0SUEv/MIsI16uIVMJ0N18CdV4o0wgYLmiTXZVofMZHumdQirnoerGmVXFNG+/VGqsHjdxN0BqFxrnnwZxwqwszrbkUuKckWyEpANee2AMdyOBxy6QqTND6ZR22kGeyNXCtIRUTdFiYYPR7c3qEsw4W09MsMIrkiM0dtDuQudFcMkYjUZFZotfv9OI56xuB+vx/HIadEAggYQgQAO6kbEVlNL3VyVCSreCMijEFH00BXzlnXHZGFQWcmEkOAENU6KMFSgSmwj0V4kCdmv5E1shQhoOutausSu8mn3/fre/fubTabNAxwLPcqpcQiFIphkms9vxfsfk9kFyRs5IMHvhEyfoNAdR6lPMSOcs5I0vUBEStOy8PmQjZ6pXn/ElNVjee90ejBgnmAsZGhZfw56RvxodNfvbOkYSGZt4fxs4Xiwc1+zGYZ0NmE9i5cqM4NeM2znvoNgDw/TseWpOF5cDLL4618c/TL+VP+QtegX29bGrQevR7zfo6NtuofLPiHghQE0ez8vu8HPZaHEWMgJADmjGs6//mPPjzAfnu/u//FU2JKzH3fA9JufwDJ59tTATikASX16+7i5sXp+u5nv/LZm+v83l9/fPP8MoigxEA0jkBdF1HWIe0u9y+eXnzmzc/eUMcJeGBgpoAhhj72GDAcghYulzliYBaSmR0FAFo9WPE2Q2+1MFtXt9GDoctTCLlSfv/I0VUDAJx32zNObJbMkK81hA0MMBdV6DTmEALiFKSym5kZFm08AACR/J92vYIkmjtNhIGjrub+BjmN2DUsLfX+5as9Mj0GlkPBgn0auW9bQnbnQ3qRgojsUpiWKII5OzOzFv2rjJlLlFDx79EVYKpunck9S/X0s7M/vZvJizubnZc/Xob7CfoZ4TzsbJ9hLoQb9VF/9zhZEoC/vyGM+qH1woCQ1pgo/MGxjOG5oU/73LhR4RddBBIJIwWmWmBjWBWEmmHuqaJ5qZeoS3o7ykRQiecV7O8vL671G6MKPU9vOcLSx+dhQ9R2D9OvLNjHOAJQiB9//PHHH3/81ltvnZ7fudmPkyUFk89MPB3rEiOATDAspehts4M5JmGSV0f8cWrbHcVq13VjGlar1cOHD1+8ePHzn/98GIZ1KE2kKoN7wThzqRBRs1yvWL4GeMQSXFUwfMF/HV//L1RjXA2izHHjlxgAGnigZg9i9TluNpvNZlO1YakGv1ghX21vA7X9CYzc6iTzV0CFZ9pc9H6up8YZ9/V9LznrKerkDka3+1NK2ppIqqWnrdqp9PdWk9LFCVH0uKgpUy4QTY6wkpFoooxl6vprcqa4auvl9whfPraYMmi1ESKozKsLO63B9F5mX29mL0rVIZjnbV2o9nOGhVvHr7hHnWHSG+Saum8Gp46Tc6Ya/dY1mrJegTTTEuc2xjJjwgY32qi68fRTgZanhOTgzAdxkSgiUvPNStP1jfv9XvOKtZtoPep8FqLIOWceYZIwpan+fn948eLFMO77vtfj4CvqtE16rPs4ppSk5I+X7Nntdqt2MqeBiDgnTtz303khxpLz3W1mpYsIMSJOW3/O6jQUBOAMmbLEIAJd152dna1Wq+SOb0GaqVXKp55flJZ8mR7O7S/7fmJSOW7KsqsX9dOJOWdrbRTq+XvguuT5ZQAAxCla2hTvLaWVt7A9WhFRo7p+ryoTm5tGjX9Cn/XRS3NU2/Q0V8FcNQ0iJky5whtYNAUxkGI3tSEyeHieiedfIW49PKKHYfANgiZOztP0pzFdZM/vDfaNx4nJOPvTf0P1TBWshweYYe/HtM+hnkmN1UfV6H/N26MaDIGABbSpNWcAur6+DiFs1ieHcZ9ZQiBBSilxovOzey8+ufruX37/t+7/2p3Xzl+kF5llxHG1PRGm68M+Aq9WMQINaZ9W+Pzw8u7913/lm1+4/PT68dOPiTPhaey2vB9Ct+kwdphurnf7q8vz7foJEGfOKYFgoNiFQIEwEuUQurL5mewTqVpRbUwKk4e1FFE4KcAAiNh5tNsCaTmyUZ0tvWaNy9xzAYtUvYmN5+2FwKkCVrFd5enMv9i8V9wW6KnLpAPOowQi4nX3iaR9wqT6khEAAQOBeigaJ8XUFR2PjObkgH5jkt1u8L8uuawhP7tkaVojIiJz+6DnDn9zfePMG3Xbew2lWbLUJBbwkEPVjFm4nIsmzLzpi4fYQ2LLscSYTcQ7+WRhANj00V1GP1LdZHZzgyiPbagKgb3CC8biXa5JU16gkTuG0X5lzlLMAD9HcTZe+V+FZ5Y5ouq9IUFEDy+Y9hRzSjYI8X96xPr5suuv0NCS3ROJuhAoYMiQHCXUR6T832Jj2lIbYCpnwulncR8AykkMDdjGsOJUPSOAZi52NYnNzeejzOjvnACAyRr0g6SU+rOTvNn2MTx9+vTx48f3798/Pz/fD8+FMQAyCDlTkE2YuDJSIWiGbdbrl7+MdL1lWwhaZjfoT+M49iEi4nq9vn///jiO2mIUZrv5rTn8S8EFC8QevSz0oWCFekb8YRxESj8ZjXIrIlReHWXJBmNQ9L8lDKRBG2tsvlptiGLOKaUkrCcMlSPs9Gxkj8wMLHnqhMl8fIJSO/QYoymhjuOovRw1QtX3Pae03+/9cqgdeDgcBLKkdBkjImrEUquWQoir1Uo/a6dN4/FY5DYz5yzCLOhK1xDbomXjIHB2DhGFMOsZYzJTaOmwEABtRiCEiIQAxAkQIYNkdzSOIUG1dm3WYnRC9dg2qZq9VFMfnCXZwGMU3qyytdvheZwzq5HsCE/Xuus6zSnWO3OtG8xZAhFCQAggalELIh32I7htDgCYMzOrPgwuvUJluyaJTL45IRHJSXKcmvlZ9gFM+whr2mSuB6cpbBohtCC2DuvLN5hZ48YBGBBFcBzT1dXV5YuLw+Gw3ZxuNltGPXgwhtBRDBhIOKtZmHMGsbPahVn6vicCSVkjqFVnDjZTqkUQdnkMmFEHPJXlMwtAOUdgVU1uRY7KAXJhLSIid2Z713UpZ83WNJJQpPmyf1sgXmT6FKJKYqbEUc3f1AD9N3Zdl1LSXszgmshJDW5YArE+YHnhWh6t7wj1BEyjFSdi0ESGQlyyh/veeMNgFZFYQ8/gonP6XrP9zJ2AiKvVyojJsNP0fDcxJC4Urpc5Udh5dOx7cuEjH533+5ChqHw5D1QaTqyDsC1JHX9q1uqNWEOLNcsOISCS0CR3bF31jepOUw+QMZJhxrMT1Yw4T0C259mGIbUVr97g/QX6LhEZ8yCCAbU0AscxoQARUqXaGHtIg/ZWiRQPmfq4vXz5/Pt/9f7dh9sv/+bnISJ2SlcHwRgQxpxxyJmE03iI1K/6m3xz7+G9r/3G5w8vLg8fHK4H4Qz9drvb7ajrIQc4wPNPPrm+eEqPSHLarE/GQxJmhLi/uey61Wm/LWWxYNkyYci5F/ITrzTPRJONp86Cvu+7rjscDkaWRrTeWm62cH8+ods+IWc7qH3W0HIchtIcWa3Puh16lrbVx+rLsGX1N5gQUTrEqlPqoTK2NYqIeivABTqc0JgUkWaj8t9LNRsIZ74oI2yqXlmTJ0UaOkds4+uShRUtxY0+qS+y8EwRkaZpKVTaXs+vVwN/s17NAXrWPsoMpObq4jR+s53L5JJGACAkCsEEly2x8rgdRG7IKQ8788CWTL08DWay6yBn/K4320vRhVYUIXquYK49eI2QrPW0iBwOB6n2XuOehKphrNdrcOqXbXWWesq1ibyJU78czv4xJ12bydl1HWG0QWwcp5cUgmSnedhk/TI1rGo8QrXNt8GW8+Hs7AERfXq4YU4AJAC6aQIAAAEyCohmpiFTWNk6SlXIPHhGafpvUwIgtpEfs/D9CP4t4LaV5i1qYNtmYcLEzsm0lSrUmFnmfX3sQT0MehU3Oef3Pnj8K7/yK+v1GjJT6FJK6+3m+fPnq+0mxjiOI1d7uExhWoi28ZW/zUOin4Or8vALanVxzWgsJULoGVwZUIn8zp07b7/99o9//OPLy8uu65hB1T7FX1cz3BCC1Mc9nZs3fElyS6QpPtfr9eFwiEgr6lNKUQMRykFASNTFDoCycM4Zj7ntYV4RQC5bIdTz0AyHIgKCmdPJyclut+u6brvd5pxDiERxvVnvdruU0nqzoRDGcVyv11QPs4ZAFkrSSQ1DskO6rRVKfW+2zpmG8MxZWcPS8zRa1fdd13XjGLuuS12Xc97tdkhrrOZQrpVs2nMSANbrzcnJyWq1UlFWI6tia2Fbqr49g6DL80+Jc84UOqqnftuDMUbmqfcb1rQ4Ikoy6w7qGYdqz3au0ZWcM+SJXI0SVI55tmXmlDIAUBdEhJCQgAICSuaUGVarlbCEWJT7nLOwEJEOR0QBSYDHNA7jICLCQRtp2kuLHHD7ozuTBw/7QUSoTCRoC8kQQoDoI5k+v89mZP+adi01R1S1I4VQVSMdzZ1vISFoWLXEqhSx2rxN44caEtRc4uGwSymlcUSBLkTt9gQCvR64qFIaMNS+BywpjyVn9cmnz25ubrbbbQwxBNQD9/QtwjgcUkoZurTdbne7/TiO29WWGTSR++RkI6lMJGetgMJxHGPsVKdglnEcsh2wHIKK0NVqhUiHwwEA+37V4+TIAz36RYCIdrvdycnJMIwdIQFSgO329Ozs7OXFhZLW4XBAiqphli67rqed8YiF+qn2qjVhGEoXw6KsKmJDPTdY3LkaXrags2VyzhGAEYEInC2j0q2e/F4NA6VsbzEa6TTvmFESoPpMRIRFjP/VFa8QTR+ObWwmH+0Cv4Et3D+KrM1mY5uKYaShb9tFoEYMvE1bN8tZEyrbPrNrDkHOAE65TSHzmodNx2Azw4yqIwQRCdCzerATWjITRYPWSyLvJTIaanDobQn/r4eqUU1mS+nkPlQ1K2DUM+s1562ggiRATQ5hCKHTWm0RWXf97uXN6fZkgOsf/uXfrc5Wb3/t7ZvLi7EfBuDYAWpiJwIhhLjJacB1t1mtn3zy0b3PnP1nf/jrf/Lf/xknzImfPn9+enL3+uaK1l1cw09/8PP9P3yJDwFGEeSAOE6nqQ4SJkWc1C8fgqDo+ZqO7IuFJnMtxBhBqjbQEPxtf/oN21OCrbv9WgwqmTUL5bofTJv9/CKnQCztUn081xbSobaGM7PBdlO45VrSCTgCvu1+cCqdffA/2eMGs3G30XAzFMz53QOw5Dh71rqoe+mBx5pD2M7diEs/F1gKEDcX/yzOTUTDA/GkIthCeAGFzkBVdDRA2iXOxDWQzM4UZ077NHWf7KC6UbMithxmjIGjqOxqX42EfMcamNNkE0g0ESfHkg7sRkOAX3fOADA7R9HPXeZ2lyfRhvaaD/5xqllqJkthPPQdYaCIcJAsICyYUtLACdUj5Arwtx1WswjsTP/K7IaG0Rogl/8u59VcPG/vaSQBjuNm6MKp5hyxVEkiMBFJbec4DsOzZ8+6rnvrrbd++pOf9xgRUXIpYvGvmIBHgBkgSzjZ+4PsczhWvn4UP68QYvYrEZ2dnXVd9+TJE9XnGqgKHlSRu6WS35tAS+EGc/nTDC5VgTuMg587TK6x0oRjAT84jkBEENEkf9Lc6eZFhFG5Q12K6s0sviFCCkFcTmDR9ISJSy8TkdIoTts22t7nBay/wDnUqJbbce24a154fV3OeRi0yG01pkOFedKIpPg4JKW0Xq/X6/VqtQJXe+lEZbWNaZLeIpJzCRvEDolIXcA+YZVd91HbVpZCw2Dm2uzEPuuDaZhqDj0xGBl76Qdzse9vG4bBJmW3QbW4oDKIRh1FhLOoBWViVkEKnctGRiSYKr9U5PZxapQSQsjDpOR7D5HtjyIzrxxixxlEgMPkRoRj4lSvw26wBvuEIlXsj+MBERnKnjIMe52OJuJqHa8PhRER1loec8oAAGEc+bDfH66vr1+8eJFS6vt1jHEYhtBjCCHEqC4GqsGkcSwZcwzCPB3ACKiFzmU1A1Ko1p15Ewwe2we9vAInOZuNxs69FEHl9L7v79177fEHH8Bc4illEmkt0KzVU1g0pxWXxMfOj6NLw8yBZoLLeIfnPtOJfxUUK/+VSSED5un8SoNY286aQUgukGqImEkKnBbV+M1n1nmuA7djHd3ODcVmgyV3MKi9woimuQAg8xTirwqICrLgZQQgIKGuh9yiE8tCLTaMG0LE6T1+LrZy1mZqtsa+q019r1TaNPw026EZDy7SNfP3mHChRS2WJ1/P4TJJXvHfT3QPAXHWqr6Mo+fPcJF9AYpnousx79MmbiGnZz85/PTOu/fuP1ifbmBMElIAJBRAkpSHlDsKfThDoZubfbfthMezN7bf/v1f+49//N3LFy+3d++NKWOg1Xr94gXwHi4+edZ/uU+JGTlgBM7MTAE417JlUi+aR2DxH4smfMsMJ3Z5lrPl8KiThQYgNfjQoA6c7b18l3qALKTmVxDn13LM5jZ2Gae2B2iTpEZg4TEDyQ+7ZMCj8/XfN+CxC835e3A+gg2yfCMUIp9+MvljFkiD1dsw4+e7vN8o2ZN6I4XtdexiBR7shuvdrCe0m9d5Ceo02pzp7CIXDbb1bYZqmB0XokzmwTRbnUlauip2dKkT/lk7+bexBPxqeuQY8OBqCtzEsaGBSqzBJCo2ImguqZpvGlry7/JvMXhCCHYaGADEMJ6frhkodgH2rI9lIK0bzGq8CiBqR87AC76AY8JhUiCkhVPmRnUzwmw7uMW1N7+O2L1G3p6SDSf2rwBByZdDwomMhzF98MEHzPzFL37xX/7xv+bAWsNTAq21K0M7fSzz/f/LdbtEOm4WmhP94cOHfd+///77wzBst9uUUaoKWEcGmDKB2zd67mi+fwW0OIWJ0qrr+76/3t0gIiEJIosACODMO7aUS+BWZ36DLtNMwyOicZhiBZqiBpkTZ3QFDnUukpgDM9RwDUDp1NJ1UZM/G0+cZyIPIdEkHExWE1FKI9fcdQDQMrau6zCXQTAiYYSasWnpSMMwaBbDyDkJs4wAwPUwM0YEJgbqgio8JZc01yI69WJYwEphSykhkjcCJ3EXJ2eW/Yo1xQaglWNeQW/kEri8EhPCuZ7/JFVx11dY4oNJ8kaYew2EmVPOlubjl4PnsWtL5AYTlS7wgIjMoKHUuh/FOshkeEspAwGjcRHJSYimVij1zll6CDNn8WSJiEWuaoQwlxjyaMcGQsV5jL0naWaGeam2iWhESintdrv9fq9BsBBRRsR6LErOOaWMyGPOklkPM8QaESWCrgvMKefJwAYWDLqIbO1/PLV428fWl5mHpPSAjLXmX4r5h4haIiBV83/48OGPu25ICUsm10R4y23U6HBaSrcL41xrMqHk9TqPPXOU2086QgSQECjGQO4gGp6y6idCl7pxahaQVcc2YDVEHKtp7qWYMVIVHIxzDcCLGM9gxocGT/HzuTt1R/eGoh+B8/SlR2sTkcBqA+hC8jxuYHB6DEj1kNli+DXzM/JDeYNhIn22GvdCCuXPuda7pJ6josSTi8wrymSuXYGzaT3qYEGXExL0O0HJUz9ZsMIMABFGFK1qYOYx3azWIe9HAOwQPv67Fz88+8k3f+/XGFCQx2EUyUSUxiQZINJmux1uroZxf//8ZHd9nSC//fW3L18O3/m3fychw4Byk3OGDjEGefHR5QN+HRg4SegqBa5iEs7MUo4YyirbmJmRAZZtMCZ+M3Jtvl+u5lHS9QM2v97WxcvSLSwmb+Pb2vlVs2W17U2cQrkE1Z8z5mXZUbCXE2xmdOROmT3V0E8LjPMiG1nqn81WZwKEXOoUVIeLyZbm5iWc6C4/vruh7R2nH5rIGLg1Nf5q2BDmqltdl7Zox0/n1QvhkDZZ12YSU83nsaU30jKHtAOjUJHp8R5+njdlWYZY/TRl3kwCXf6/n6CXP43Y8b+ad8behViODIW59NNkP8uLsps9bfu3NOveYNXWN9eDCnUum647O9+C4EcdCiQKPSFxMRuU4AWdke+BMYCPvrE84pZVnIDF+eZylBiwNQyOXH4dm+8bCNG5WZegGskppT1+/PjZxcuHDx/2seNaMWWJZ3Ss6qmB3I9vwy7Bu+1aCpMy5i0vjTHmYVxvVm+88QYAqEHbgOFhM0K/Dfn+Kb8K/n6RcuYaIuoBfUpmqlQQESFlO99C8b+oCfSE7YFciGVqbNic87pb931vHsY0JpZC3vplKXJzrGFAhkhes5eFqrCUWsZHXruT6jhTLVwzPw+HMYSw2WwglbS3kuKOUUQYZL1e6yDFu40ANepYgQ225YWgDZBL9bLIVFtBOZv0NkoGAKJ2XvprCK1EwnlCDTghIy5l18tGrP04bI1MHAmKeeqxtnXwCPR4No7za62P5zSLcNouoAa/F3QMQjATJqO+XYSIDruS8afrgrUhTdP3CBY8K2oThoIflf9QxMiEw9h3vg5LUplCwVfpDJoswS0AgkBAnU6n2w4AZDsfEkSQFWXCzLXelTBGJApBRIZh6Ls11UK2YRhDN5bGOYSJM4gUbztIH7vVaqVvL/a5zHR7i6N4DFhFRvEbxljuHLT2ksS6QwEAQNd14k9MyQAE9+7d2263N/u9qBXdHjUJaiJ62jM7xUMIterBzFQjoaOZaBYqh8UmEo0VEVHLRo3sRASgPNxYREsq8a56rI7zpbqDziC8baiGoD0JylwVtu+Xd1oNjEeHOPuqeYV3gHkKNuXGo09clz+DWb+nMB0E6Rn7KPYAQN1vtk7lKdc82o+AiFQliIEHbk81SJbrZZg3tHh90SC0L2W+Afib/Q3M6hRU9ubaUqT6FOqriUgPz90fLu/05yll4nDSnV88e/nzv31877X7dz53xtsx4SHF3K83kAkyCfZyM8ogZyfnl5dX6361OTm5+PTy13//159dXP7dX374xtmDm6sXtIO7Z3c+fXJx8dH1A36w6VeUETKICOcc6hk4BQxCScXZxczsuNRQJDXVzaPXLhvNU6DcopkZ3pYk7V/a8FHzRnB07hcIFgZ88150mlbxvFChNJ9p8PdVvzx53Haz//co0tBtwx5Fnl/8v1wyFwrpGtrhF7EYzFfBf+9lTvO951/79xWv8CPg4vzJCQkMPE1KtJmR1tbrIiv/104rEzCeAJbeItMqPEI8AhuyAVdx2shGcbWF4qs9K6nYzbav6CFRhjRT/nzOjwegITm/lMtLuHS69TYe8xHSgjm9GUkYYN7gadbLwFaBbPt9iHJ6sgIMXUSRDCAqpMchi/Yg0dNOa2/8o5wCC/pxYEysjW6/aB6HhSvwNkpeXkumW0LoCax8CTPjsAhD4SQQQnj58uUnn3zy+uuv37t379nzF2VN5+l8R18Ex371U1v+e9ukYIbGX3A/ESWRzWbz8OHDy8vLp0+fVpV9sqKWPP4KfvePNBPx309j1jJaEbECSBYW1LRe0r5T6Jbj1TKtARIRwR1Qrpy7Xq/7bs31EAIjA2HJNS6ThLswVarbxJTxfd2vnxfXrtQmlHT8zMkwQDRLPNElDu709hBCxF5EGArHhRD02Keu7x3zTkf/OVLJAFrqnIhoDwOzglHxX1ukGELYpX1iPXTX+wWYGYN1ezY0IotwFkSgxWns3vsmC9eGF60FJwHtQe2a0/e9kYRhyeSwplaCbhX1cMJxHAOVzLKqMJS0OK3dbd2mCDFEAyO7Qy/ywGoK0jxoudy/wOmWtqzMSAHAZeoRkYg2Yi3an65AzhlYIIOAluoBqEXndGxw0s9ktp6SpVgqtyWQIOrKTJkjhb5br1b7vu8zSEqJQQJ11EVEpEpXMUYQSnlARD2XRL0kugQ2QQLEepIFlpBG28sAXO2658Gcc0AsZuDiJ5Gab1gmQmdnZ2dnZ88uLvy5TThT/9oluG3Xdkw3axkox2SUl7fN+kZyJ0oxs7gu5waBH8VyYcF5H7EaUV4OKkyZC8sZ42H19FM9BdIbJEepEOqqLOfWPGuk7NfYHpdFAqcNaAFPs2xz9S357200cp5vWwYjbj+OPuUZzJDjwQMvW/PkjvUCt5k7uxzXUFtQwlRy3Tp9DRiap7DbKjfiG+b86YnP0IIsQCCCpUge3WZmJhaWbs0558Sc+bBZncoYbvbDVk7whr//Zz/85uarq0cRV5mHkTEirDkTY6Dx0BNhwhg2KcvVIcEmXvLl137zy08/+eTZk09pBZ2sxjGTwMfvvXh7vz/pT8a9HnYEI7MkyZmhRvBNzAgySuDSffh4Dq3fCBud25OcJyQ/jl8yQ6PRm1Fgs38Yx3lSsctWVj8HCs09R4UFVqte/VZl81AT8ZURBgN+yZ4GoUcIwsRuxuD+EU/2/oNB7gG+Ddv+A8yDnA2cwR0g6/nXM4hfXE2Jb17awO+/l8Wv6HTro2LXbjZgPJkdvbm5vBegwZVR+FHe9896IgFHew3wHqTmsw3Irisyuc7SIYSUhzIgOscWALNrU44iwvMaqglL4JrI1y/RvLZ+iTyRHAV4iUmDQZNKzJRF68mch74P3WrV9QEYmBNBMDGJWM4e1MxPElhuwLAgJE9dcswn8opHjoqp22YHtYZwiRl2qb+ebicAYPYgUkTgNI7jOEailNJHH330la985bOf/ezz5y+8KPD8btsJHeMmnG/cM7TYv7fMy8SpyNwGu8WA0vneuXPn3r17H3744cuXL0tnIFnV+O4sJRLxFQWhMxiOzsivlNG/VGXa6VScQUqhJhIzB5xJSz/4TOwcU5YQUaSUFIpr0GKvIyLA0t1RamB/HMcuribexGLTlG4r3NYsOW/yzMXjZaMZJwaYZpUDTFIXAHLOfSAACPVLswlz1QyhKHVJwcquJpkLbAgAOUk1BYMFGwEgVOWT6yF1GiqwwjzT1vTPMaeGB0W41JaJqENDG6epvw4FzLgV13nrcDg0dKLDMk7tJwzJUs6Fn1VdmTJZRbqYPx/0PA8nD7127V3As+Vwkt8MvxhK90tTSGDh0DSE6PfelYyIlInjlHxbIZlmUSbOrJG3Sj9JRKAGS8t7SQjbJIXqJCmAFUrjMtR2eyoiIQwppdVqtR8HfUPOuQOIXRdjDF0UkXHIzDVOiFkya9KyFsCnlLI2FoJKXRmEhbpJSBpu0Sn2NE+B0XMdAerpRIySGEkIEAC6LqYkoboO1+v1a6+99vjjj/VcFpZJ/SYiQESaeYUMpV5aTtvoXLzrPSFEI2ZbNeNicIJX/4wUCkH7wKi6ZBsZDcXwIOvI5DFlosF/QETvRrIBYSFM/cbmJYunSOMiG7BRZ9FZROS82vNXTJB4RjWcYk079o4QW3u7zRtUHjbJRcn2U/azw/m+SzHMPS7MzFy7gBgDeL4yxrClpXmEx97iBY1HcrMcDXk19zTP0jyBAYACBAZQWT9RBYAAiwjVc371/lUAgNyHiLhJB4pxJXJ48cmzD372wRvr1/rXAsswYgCIPACx9JQl0OX+6vz+vZe76xeHl2995rNXzy4efP71X//9r/7Fv/ouC/RMu+f7s9W9l8/Gi6fPtm9smWEcx7CexGKu1cMTiiAAzGrllaONC5q52xS8vX2Ulm4jbI9bIza7FE6Vy0SzOPOS7T1H+NuM9vz9fh1TSuqCbR737NZcnjXsS1noprBQ4BoS8n+6KUw3+zubVGq7z1e1yULrWsJpbbU8xvyw/kv7vBzW25wew54S/GX3N2/0r7bdpcHYDCFzMdhMsBGVhrcGqqP0gFV99+KisRsbadAQuX2pqqcGE6hqk+Ium6+9yBQdjzRElOaoOiGBaYKIqKliVnvTkIe9zjtZ9MPS9+HXcUaLjjDG4dB3eLLdrtc9ETBkZk45IXTT04DlWBERmJ+05FfHDz7BLC1UpqHasx75y9FewbnNTOHYgvphPW/K3LQSEQvOYwzM8v7776/X6zfffPN73/sBT34QCESZ2/OEj4LUjN/A8OqpNZziBz56/ziOAfHevXsnJyff/+73bm5uui5oJw8p8CjBuCrxY29Et5c1s1gyFwAg1Q4LVDKtbNcuSls9srGMgNMIv3DWx4irXMxsVQkx9NZWKsSywamCMXIehqHvkhoYSsNeLlnAIGfL0QihdEtuJZunfKjal81XRLRGzKwmZmY8IgkRUWprtzIUAmg8CaYUOE6lUhEA0qg0iCF0MUZhJEqI5TgE3w2VS2n9iO60ABvTaquOCs9leIBAfLCIahGyTaRZxwxsRqAZXXqbj53m2trHiyZySWTa7Keu9dTr0c6d85ATkS/WR5dN1uMKnCT0PsSGtirlz9zNYLaae4RdxYErtWAU7TmEiDgMSUQ8ESOV85uWZC01Y0vqmUZcE25X642IxBgz83a7TdcsgrEveewW+cxJMow5Z/2GKIzM2seViIbxkFKSxCKiEU8R4ZxRBEJkFybxHKFweoOQiDSFFQDYrA1CEB7HUQl+ZBbSbBRAxIcPH27feefm5oaIxEWSpLBhu9EbchqdHxwD+vsBp2x/25R5HhX3A2pQtQzHNc/TK/3mVygEhyWJGV2gCV3+ocxFOdF0TLyRvp+Y/Su376P11VPKtTiF7CiCZC6djQfSsfMJlcFMeNm/XD3fXm1qboPqt/HQetQZ7TY36AcTVeZ4ICKGiT+hOc9jSvhH071CCFb5beNwreEWZxUbYA3AXiI0iyJuI0Tnly1zV0gQRaJgRiRAs2913fVOsycBQYZhWNP2dHV6PewOu0N/0v3djz7p7ue7q/MDDSFHwkEy5ogpHRgQAj759PnJ3XMK3afPnn77619/+cnTz7718PLrz//633yYD7s7p6+9/HTYrk6fP794+PobnHJKqaN1CCFBBsI8jJk5C0/9zo5pDpXaZ+2zPC1ZYA2c0t/IL/+U4bmhfKs19VqyiORKct6M4dp8yC8HFj/WLGvcVs37a/zCpTERtnrhL3PZCHhMU1leS+zBMb5GpwD5y9959I1Gq4Vl5h0CvLzLtd+Jp3nftt7TeQhT7cQC1NZgICI+ZpYY/DZ9cdfSVmlEoseVzCuyDD9Kh3lqCzEBIFNa1Cz4TPOESazbP83zi/SyUjqcS2/bXUyqmHqnbeKoNrWyLC+D3NOkx6Qn6aqdVzlTsWRrjTgLe7oo1IRSmRvwthY25WalcL4X+IXODCGEzXa1WnUxgibe5ZwpREQsiUECACCMMgdmCRg4QiqLlWcGoSdj//gvZNhGa3EPhmYErAZ/syKeXAFa8Zhz7rs+1H7XzPzpp5/mnO/du4eI4BSREEJeAMNQz4ufT81zSgMn3ML4t8233EzhyN0AKaWuX52envZ9/+mnnw7D0PfbnLOJhAoDTVmXMP10GxjgSM7fuWRt+77KmQAAuaxCQAxyuynYDP4KkLAECafEP67naUFh1cIX+iW7w2Bk0TkGXGqcOD2yMRv8CiK5vaxe/gblzb7vdUfTE49Kr+P6YJifrcVojEmCI2I1qFg4ZeWYnES3gmLl1XaAqoh7MKTWj9mkvEzI7iwKj2Rr928qlt4TZIYiw9h6vT7K1zF2UGumbO6IqCW44noHNOJdRBAlzFtegROVZV0YZn/68wmIjPBMtgfuDSdFQ04p54xYepwYZanoEmH/xorRiSzn1CsyWRCsUbK4yMaqoyEi+kNE/QTFaQj6JWGkgLY6emQlAIhkkeh1M6yblMGKEEQGg3YYBtsIFF2cgSDLIjXPbvDblgFMRNKceAcBCBEjVqVrHEehkHPW1s13797dbDZFu4OpVrC+d2ZKGAw439nL1rlIBEV3Ap8rDJwdom58rVOI52ev5ZxTYoDQ9+UAOj0KwxbXnBbDMACJgGAgQWAQisXe0GJWA9FoJVTmAafjKncp9GbE11mQTn/KBS/lxFVuCqSUzUOMGEQ001WwpjLqtGNHIdRTQUQIKYSAjGbfm7/Hwgi2xhMXVTeSWVa59u6zxfDigLooauOhurUAAxHMiownGsrc911J6U6ZAbuuYynp0UYEkzJE1BNpOyZVwiyua+Orr8igypIFBQiyZLPtOXMIAbDkZ7EG9wIFCpzYIsCe1k1B9EhDROachBGQAgKgoKihejgciivRulkAsAjle0QEMSa8wRWu+xwYV3wyPrl4/jd0Lz08v9fnNcIWDzAOcPWSrl7b3F3v44O8Ptn11y9ffOkrX/xvfvufvH/x7v/1v/tn3/jHX3mRLn7y729O80XPfDrGF3+zO3vz22OAHQ57HJmGLma8PpzS+kqRFTB3xDkFlE2UJKReYUQwf4x23zVW9ASMCDEGgKn/r2mFRGROdGZWX1rOxqWaTA/6opyT7T22dojAY+bkGkmJgEhAgnrAFrAgAIquHmYx8xtsExURlXHee6LdICcHRIAOA5IwZ8YkCCIZimoXVJYJoYxKM1BfofPVMW03CpP9gs59ACDCgQISCksumxUgYuEOBHMtQe07pC/yjlsv5jCQ0hIiaqdHbR6NBRoUUcdjGScd/LGHmFJxLmomFZUjqiaPY64pQ/pORCzuy6LoTP0AEFGEdPHRGTZS9TBjH92NCl/XYx64ntCF86YF9pR+oK5XYkJr2SLCmXOxnEjfrf/T7xiEAYkodp35L7yeUQkgxBCG/SElfXVg5mEo01cPtJgbGy1gaJ6dyTOqZS0AelpU0UL0XYFWZntw6d+NRF0X+2k/AyEszn7OWiZKIpzrupjGJiLM2kFeIRFrAmSj6Z0Wq6wkOvnasKj7jPVIBkQUZo04BIwRJIIEYcwpdtuf/uzD+xfDeJM6DIElpWFLASSxIAAJEOqRDAACFDTpgGuKrJ665J04AuLb1qtn3PZyKk0tGIAq1XnzBOcH0Bv8zWf7l9Nkk0sDiXM1Gu0llVdFkmPJ+wAQyDlnwJDzyBQQ4ocffHy4Hj77+bfWZ+txHA+HAwAFQB6HGDsRYS6GYeE+RAalX2VRsM0FwHho1pFCBDLrrzP+qmrezNQv+GYpepUGl4Ehi0je9D3n4e03P9uF+LOfvwOEKUPs1sl3Q3Wf86K/br30C+8CFpmaY03UWKaWQkQhCsAYiHDE/fXu/N7ddb++uroSQQqBMzDmEILUTDHwQ8/cH5VlKj3vD6PBJAAgGRFC3wEhxXBydiqQWdJqdZZzDn2/HzJQR12QnDEzc2JOFCBERMoE2EVSLQcFY4xDPgAwkRoS2rifc973fR9CHwIyp3EcBViAAYEFpkbQhKGLDCLDmHM+HHYMSLEPq/Hy8vL+qmPBzdkZaDKWQACMEFBItOUJoijmdTMFEEJBACiqZhpzPVI4lgXUNo6IDDlrbzZc5Xos1lg33AwCAZP+GQOFkIEzMxJqOxMVqFoepepxzQXoiIgw6EnuCIEiUjHqjGYhC3Zdh3ikSKGTEWNYrVYvXl5B5n67QqDMkBm77mS32zHrEhBCTGOKBJAZifTA2zyknDPn3Pe9xgBiF4mE08DMDGGFSCgETEQY7DQgIAqZRUQCdCEEoWpjEI3jQUAQQRCYmSEXJR9LBE/pXURSzkgCAajGvQUkc+0sqIyBmIU1J1l5IWgdHxWzOScBEBYgIQYgioja0UCtAKyWqbdbMIRO+wDZOdiqAENEAEgpXXz09GZ3mfJhtVpnHmKMmQ85Y9d1CFGEAbq+75A74Syc82Fk5i7E1Wq13wsAQEBElECjsKrNSCjDoEqdMmDXxa6LRJTHBCScGSEQESALZM4c+pBSykkgAyKJDHoDh5BJqxZDArzaj10fuo5iv7179/Wf/vSd9brHEhiEEJAZMx/SWM8CCcUG3h8yIiGglBNisqqdMfSM7N18xSEbgJkBQVAERUAYWKemTQpCCJB13TmGGI1kmw3GvrdfbcFNQok7oKaRVv4RE45+2za/vteEzDL0Ln845pluNkWazjUuYTF0pycbjgy2Bp6lu0VfzW10FNG1Umjw02Bv2hKg5hNX7vJ3Zlex7aemg3gk5NqnIddGXoZzZ5zM3ouIFMmwar96hNirCwFJmyppCGlm6pcGqhMCnf3cRCQK3nJ5tqlROd2eXDy5eOenP3/4pUenb5zzIQ/p0G371cn6+nrXwcnV1ZVw9+DuvW99/dfun57/7fef3F2dPvv4xWdfe+Py4Tsvf5rPz7YvX950V4ebmxs8XWsyd+KMWZhhP+4Ph0NKqSs56IzMjElkFkkwzHuCwZk/ZkYqXlX1KXNYNeBG0W+I1kja7KIQjni4TRMyYvDfwJwajVabERSYrusKteDUdpJFQKa19k/5bqj+pU4fakHFeUa7kb1Hr91vyGnkxnIhCjPCjIX9S5tnwRGnR6O5Qo27w6xAXJqRm2cbweKdo/aUeXDAkVChf3vHPGTkidBjj3DGv0Z1Dc7tRTY7oysvGP0b9csGeIOlFQL11XisKzosIjYGCfPMme1LZexLcX56rtVNhrflQjTYW36QRYrv9KxHmswozejKxiciHuHli6tA3ThqSEHTQtGtNoiI1aHBLZen89kbbwn1zKjQDdIQpH3p52jfLIX20QE9ywQ7KLJ4xxCqrQaO0URkHMeLi4sHDx6cnZ09ffqUiAQqUbnwMjj820ubz4bzZhWaG/wgy5Ryh14dpRCuzmC32z148OD+/fvPnj374IMPlqc9/ZLXUYL8BXci2BQSNNWwtTIN7J7jA972FqrFBXoDV4c4WdeWegpcdcdMe4FCoo7+0lhSynYWQrC2JbYonpH1WasctPnmnG0KjUg0kvDXMAwkNiAzcxdjqAcpl6+kbKMskHjKfagOVtPZgJkzTFnrAJDnOV8KjI5mtXxe6QouE0dNbEMVABjJKPBEpXtKzhlg5sEUEYAZpxehzQKAOQsz2Oow55SGEDB2EAjG8TCM+z7ELnD1kkyZ/OpttJgnVupCF7xqKEStLP1Wi8rMX4nYtng0c8Lj1jHyjHiMOM2B4gWviHRdxyBQD6/HekRZ5ix13dnlW2GlqEZgGq3a9kG16QsAXF9fX1xc7HY7hYuItttTjIGIchLBUWrbnv1+b3Cu12v1ezZSyH+2/jLgZJHORTUoWwiFTQWLTqU4T4UmJk0lbKBhgBgjdt16ve77PoRQWzLOzoozPhJGweyValtuqueTsasYMoC9tkM1QmCeaP1G7bic83QqsWdsz7qV88sOLa7mwSsZYXF8QiGj6skDp8ewc5+bUVuVLWlebeQocwtNL57OG23tIq4p70ajXk6hsz8bd2AjJhoSqXiYTdZP2Q9ia+NP6TA8IBdVwmwDQ2ZjEoOT47ao9jnnKQyCLo3Wk7hBCE4P8CtSGABm1qOfFCwucaEArwHI3EFg60VEOZcVzCihCHQikpP15uLZ9YfvfpRjfnMV1ne3MOSRD/s8nvabr33t1z57cv8k4ecePfjKr3zxyU/ef/Hup3/w9d9598OPHndP4Fn/w+c/3z1PuZfrF/Ds+YvzdU+hS5JyEqQszFBPLiIiEhJEgNZEt4mjO8TPaLVS1PEEM5OSVFPm/JjLyxACzp6Hua7frL6nOv9v8xaTAjK/uB54WsSTI1FLD6tSrMpHACT0w3rgl8TQgOqQNhvBM12MR8pfjUq9HCiLRbeidAme/WssA9UssTZr/lcR8Vn7sJAYtmp+sg2/2Ky9EMjW93yh7+p1VH6CswGaR44SgNGzVyD0J2NwlS0WOfTAO7HfMsVyXo1Y8KIGZw6UyfL0uqkFgmyyJiWacWarP6+phgWx2WhLg6EgAY87Mvx0ZktDdHOz77qbLBwojgRq7eRsb2QAEsg1OHF0+NmLllj9hddEDPOafE/VzSMyVwv8OMuRly9CqWUwczvQ7j8cDu8//vC3fvvb9+/ff/z4cQgBoG49C8x7qJbfNywPx9jKLwo4LvCYnIugSbIpId29e/e111774IMPnjx5sl6vc2JEFD6+EJ625wAf54ujIwCAT4FTok1S5DAiQluo2Jq+DU0243uWaZ7lmvKgsRRTGc2dbc96g5CIEKdzTbl0dpm1pDIulmIQcrOmBoaXCf4ej9txHDsKJhZEhJyCWxs51GnWVFJEhBK4It3K9D2eNgow3AKQXSP+JcubohxCMINQTU3E6WBqvZgZp5HZFwGKCGLJxkfnpyOMzDwckuaIxi6INqPicb87xI66fgUyCA8x8M3uAmBLqDZqFsGckx2SriBoJFXH7/oQaKot9FsWIlIJfhbJb8aDXnl+ZjURWHK+Zzp2DWleTZyepPPUgrWgInMKRc2YyjKJCISb9YJpQ5kcHxoeRMTduL+5uXn33Xfff/+D3TCU3kUYUkooyBkQGQMBEGIgiiJJRJTaN5uNagLkDnw2lBUeKSrjQjqx6DFmnuxhLqYQJ2eNsUzO2ThDOXSz2axWq5xHzgKoPtxsJUXGznogDVIwFkBXieatdEO+zsJXlxgYzWJRzVE6ciq6/eznYMjSNv009/8dpYz65axIAOdRL5gbWgZ0lXEswoWaCTWDQIkVACxiV+XaZFaVxxGWUNkbjXVNUPrb7DM5D5yh9Sjde2x4E1Rk6rQhjfoiIjJJWxsnuHbtfkY8j3+isyj8Sz3+fVKQn1cjxE3xAp4m4pHmDRU/ggk7LzWMfhqUighDFmHgCKDdumo7VsBt3+3S+Ml7n4YuvvUrb3dxNd4Mcb2+er6ThL/1rW/3Q1oNKV1d9wf5R9/4ndff/Mxf4Hee/uTi0YNH+6+G//Cvv3vn0Z2b3YunFxfbB/dx1TGnDBAYgWJHvdbWE0bEcnQY1a4yONctsOZei1M9seShtdrnEkWewpfn+cAxYXd0TOMLf3NzDzrSskdgvtz2ee4odbLeBSo8z0rJ1p7GNFQcfYtxesPdjePKYyBG8sDDQp40SPDvbb5Z0jbM+ZFdQMxvfv4VVGvKGyQ3Bp4sVDGYSwBxlbp+p/Fr5FfwqCQBAI1IezRSjT83SNbP2fWj8vi0Z62WgErFyEw+1A8zS2O2CjI7RPEoDsF5ATQr3RBylFz9s36ZDPLGY+ovT3INJsX5nv399tH3kbTpRArBHTsUKaYx58TC9awL0R1Bn+VadUaaVQvQcuhRgGFBY0duu8WW84+/gi/0S5xbcR5LR4lQnAEjlfzQSchCxoTjOL7//vt/+Ef/8MGDB8MwrFYrsDM8XeH0HO1TOkkDsNcomu8bOL3Tc4lJEdFYDQuTQM1hl9PT07t3756enn7vez/QQUZOy01tie3mS4+fV9w5/aT3F/dj0aYck852YZzzhUkMIrJaIHBaxJI8LMBrLUmoXuAOMQMnGKFmlxFR3/XMMI6HnLNgESYhYBN3sh2/0umRy5ha+ddX8XmYWURdKYioB8cBltq/VE9HwEAxRuoiVn0sjZnJiqI1OwwQJ/IwAHxXfP+9T7DyAm2cNdssa8FcVCxxARIiQgyBrBJKrFYTiovfpRdXjPXYpZRSSvvD4fT0NIQwjgfEfHLaHQ7702347JuvdZFyHoHThx/mjz7W85Ojmb5FXQI9A6Mzw5UCaIy34tZax1mZrG4KnFLSg4JoOlyg3c11Xz4qUXM9vtLoqmFAI84Zn7IASNlFSYhoFEaeAtoFqxPxzAQFESFO0R2qNV+73f7x44/+7u9+8uTJk+3peQx9Thko73aH0PFqter7deiiUn5Kqe97zeRSR4lSmp3VqVOIcYqTVWQSui7ZAKAJyQIla8+WvsAGylnqkkBE1DFZSkJlSimlMAzDOoTtdrtara6vB2YOUddo4UCBUHs0lhAtEZGVMmpFSQXRaztSQfJBYBGJMXqPMNSi4mik4HnV/7mADA1T9u+RfWiusTV7mMk7MySWhNXQ4nx5pst8bO4bs4ynQxShGlo43wj968R15/NQ6cWuf7F/I7nNbwmDv20pmKC6e+0pcRagh81jw2t7hkzjEHtLoxT6dZzDMHuLLBoPeo6FBfPzvNqQs9ViFdte3M0TJWAGAEaKGAKCEHDmO9s7Pe8vrq+evPfpdn1ydv90tep3L6WX1Q//5gf/csA7iN/6ypc3d+/cXW0Q8eXPPr35+GqLJ49e31A8+8sf/ujF7gUAvHh59XDMEfsxa2lgiDHIKCKYk5TCrZwjilabNDRp+IQa+bRVJiKZe7CM1HXvLGxf3W+esJfcZDWEnvhlbmn4dfH/NmziiQGqR6rh6JZtfTtEnNXP+Oso5Ec56OizhkNwvOwvH+o5Oh1bF8U/y6wp1FFI7EsRqW5RtAVllzhgXMPzzN6jjAPzxG/PpLgQUzV1ZDKH9M/s8rv8g+y+95IkH+tiumThozd41FVn/ywibU95EaR/2rlSdUSb3az7pfHLXBOa1Kacp/5VlqP1issT/4xc517Poz/5y7zgflFefam1bPEKhX9MjAEYMGcZhsSi2jZPqd3IwmjeYsAjDiC4xYoD+IX246uuRhSX8Rr2lBkx2M0N4zjB4vR+gVytSqgIiaFyYuYPP/yQmd98801r0eRJaAmbCRnPv0afcMzRc5vNtmT8xRtnsyGiz372s6enpz//+c81LIBFZzrehOYVL/KXUWAD+RF6q91W1B4oIY7Exew/5um7bXCYrdeR77HGBr0hp/LH+NrLN7UQlItjjIiDGoSecpqr8khFggBM6QMza9A+LBUnIhIKiNUetHy2MXOWlPI4ah2gpoMKEY5j0uZVWLRHE+OlKltqXNsU9OWKiLOCDDyTWji580yOVcTCpLUCAGKwHjpqEGI1gUIIAJMr2R5ZhxPmNKQxpWG73QIAolAQGHMX89nZ6jNv3D87WYOMKQ99P15dXxXVIvTsusqnlLquCyEUjyqUkICaH4qZOtlpubketXo4HMRpejDfC5hTCD3ONUCWlDmp4dS4dWShORuqffM2NLHDvF6vc84j5wCWoZNz5j5MteX+35RSjFOTVQ1rD8NwdXnz7OnFxcurLBhjZIbEsup6hKAhQSIijCCUk3BOfeyVNvq+twMnmnQSvxebiaUWqW1PtoeGWCwLgw0AtBBSI4TsDnQ0lOZ64O16s9lut5vN5vr62sgmBMw5g5BazggBaJZmZczrSdrg9HgzKWohR1tQP6CtXfTuAS9f/Ms8QftzzzzFeE+eB1qOeeUNeqwWiGkqnhqMUj1x2Dd12eye2Ys8rcMxieApwH/vZ401fghOvpTcdJdj3dDukVdg+4qyKiKqYDVqsXeEWFTH8NwEb6WGnv3ye870Bhu6qyEavWwdDclye19EcMFJBUazO4Jrzeq9kswshARaso01M01IEJgCymm3Sat0c7P/5J3H49X9h2884iGf3Dk/XO0/fvz49LOvb7fr3c3V1dND3qf15uzLj774+v23//R7330xfvTt3/vmn/+rvwCCq5vd/jCeyCYxZzU3RcYhp5TUUYnQITCiBAxJJieZUUien20wJ8WGZMDw4E07rNEYmV92vxls5j7kKa1iUqkN282HBoDl+P4bTw9WxKUZ8LjYL6X6KTw9mIg0kIyoGvCarcLWvSnaMXz6yJt/RfNGOapjLVbBc58Rv6FiuaY2fb/KsJADzVvse5z7jBpU2EupZhksp+93iOZ7z7/LKSxXBKp0akwyD2pjEPpF9HNsPBr6n/IrlxvQOTt0fRvI87zTaeOHbihkQp0jhiW6lkhubvZ05YlhQt0ki9H7QMzjE+vxAAV7ApBYBEPoBBGBQhchl+bixVTCaSe6zcBbEtir6XkO6uyRJeqODljukenPRp40lFNQVP/WGzT6UFgSFI2UhAMii3z66acvX7783Oc+d3p6utvtdGPKOcti1xCnXR2d+FGQwJ3Xat/w3KUCAIsBUSeuKKQ6zpe+9CUAeO+999QnuOrXh8MBjtVsN+PPv58MV0QEqFsYNSII9X8Zirmj30liZtZIBThiaNxxXg7A3F+Dc3VI86emDaAiR5suztM72fMgl/Z4ysj5cDgcDof1aquqrTKvP5zTvYF9tMFjjOtxI400biZF7lL8a6mWIEgNQJnDRS0+IeScx3EcU7J4pggi2hlRJc6ZcNpGg2t4y/PEtOC6r5tASynFrvPTqXir8LeHxBCIOddmpWhE5KsKbfo7GhEx51FDxiQQEPuuf7m/jEQhCEEWHobDDSBvN92dO3cOh0POmVDTOorvMtZLCY+NvSj6l3LNdxURrS9NKQ3DQXuAUS1vme9iLDIp2MyspzDquuR6bOCSYWEuZAqCls0XBDTzttAhQADQxFFmZhIuhdpOJQDIwgSox/vklLXz5TAM1/vdyHmzOen7NQAdxiFQt+o3+/2ekRBHEYwMXdcrp+z3exHpum673Wo/BQA4HA4eCcYm+mX1kpRLQwL2ZyiHUExBFIeH8l+sqch1pyg7PiIeDoe+71erld+s1ZxiZgSCyaeNBhK4vdvkpKdMWwgT7F4XNSVQCQCrmZNSmh2ycXQfhblgAiER4EIQSNVhLIxlGwH/+Oz0p8ZFBM5h30zP1kPc5XdTk01Esf40073QJSnRIkDhfwXXw8Zgw7lGbk+F2noUnK5j8MDcWWJ6lczPdYRJOJaKXm/jNYPYohjSzAiEapf6mkMTtTZBexCc3rD06NQ5zFb86GVEJk53t/HBBYXAiQxFFwnClM6u7y3GfBZed6s763Pa0+FieH541nF/763X5cXw5a994R/+/m+9+fDO+++9M768fPu1h48evBFxLXH12tnJ+xdPf/bkgy9//fPvPX/34+8/ubneH8a0ZuAMQMgMOUtgYAYQikRY/JiAkmmRPm4CvVlWw95RUeib5frb/D5kBAbOoYDO8NaHvGQx7jCEL4hNwIkwT11+uZcwo78AACYaqCz8iw0wmCi51dE9UeHCvm2cFEuiMhw2QsBCVZ4UXzE1QvLNfuy96qc3yWDAeM9xMzVwCeTLGRlabJWNN8EZGD6bbvnSBm8AmoU3yQGDpNHJDACzvmwKOo43yBsLs0GgtwaPCgE/cb8KNnF05qg6qsWJYnR57A09AwC6JjrgWKZ5owcGjlGjJyqpLkIiwqkGbDZOjBGAiYhgFpyPcXVzczMkDt2q61aqSAWK5vgtN8/dKMurQeYrpOsrpgkAvqmbR8uSFGFuaTSr1kgkRwkCAFgn07CVW1bKwhcXF5988smjR4/Ozs5ubm6YWVUNsRPqsHn1VKPbgG0irkGaL3nwjGls2CBqkirlzvLl6enpW2+99f777z99+tQkMxGlW+iqAW82ctUZlmL2yHIoKrSLP7DmYeoh2lAFuEg9x5LFNHM/OMxbjC6B9KibRF/ND/daiukGFYfMtU/Jfr9frw59v9YzRZexZZOWziacqYsq3xrSIiJxgtfPy7iSiEhNjmE0cW2O+K7rGGEcx2EYVMvWyA9M2+uUuoWT83PSc/R1jSPMC0lL6MCZTj8ZhAXDc4EpgjkNUIhzOhusUgg0mw4AaLKiro/Fb2OMZyfngCOPfH11RbAex6ELcLI+2W6nA7H9vqbndoQQNAeZLFsbJnmrHgBFkba+VJtZQ6wwT5OprxDH4ygimuUIdcdsNCVbUIsE2lAyc+xOJhagEOIwDNaCa8iJqpWlcTMA0DPZm31N21lryWu1YRiA1uv1fr8fExPFvu9zliQQcs6JM3HNGhYAGscBADabVd+vtbUPAA3D4DJ6dHMsLX+6SHp4vdusi/mq4IUQAkWtjVwyJsw3I1hsZOM4dl232Zx0XZfSQETWmdlzlhfvXhv0VGHr7umQKyVYfiwiFgaf61H6IfpFbWbCx0IWdpE7jqOx9ExCeVFiDzbQq1pGtSsUzhUUP5oXKDaaAYw1g9wgKR6HesFCmPqRvYLuoaW5AufpwE/WBve6jq6EjuaLRKcxWfSQSj9ZE2Q4v3QWypNcXfJQtBZ3nEBdrOppI2ApaoT+DwQQ0jiSKygSEQFhmTROQ7VfL5mr7zBXQBFRBZNZ2lxbj9teQiXyiVSa50oltoQiESJSyIFhvB5eDs/lSUj8a9/62u9+67e6EN95953v/+1f/Orbn+/vnr087P7iz/708vrwe//FP/72t77xg49+8s7Tx1/52uc+/uETlRdGbswMuWR9FDyzQOYECXjottTUherCTR5Kx2+3/SsiboOf2ngsmdmGyrVnrOcgIvL+FNsJZJECaiQnMuMF/4qj11RbUlUlfalGfhw3VQce52bw+n1b1uVfigvB5zcJc3EBACIY0fr7TfLYdqUUHmJ7Jy6sEXtQf2eRlDOJRJejDrm0syyiJ5ZciWzdX+b0b4xvb5eiYbQquI5j3Vm96QtzIxudUG14bVpinjV0NVJpHHngSNFjHp07TFwzW6O6OK+ZNKpTekYnqPWeGHqDU6WHF4DinFB+Oiaagjsb139wAM82ES8Y5fY0OX+n/mtbuDFRgafaKp5JpOTqFH+mZ1iK8ZDSMIwnJ30I8Wa3x8whBFzUEpsR9YrLL25FzvGHFNTlfMvhCm6QBoyjVyM3/IfF5+rvKHDMEAtQO2YTMvPNfvf06dMHDx7UIhy2880reM1OPb3qdgDmr5vznRGkpR830oCIatktWyq8CGs31O985zuHw2G1WuWch8PYdV06EutqQfLfeHiOitl2LghAiKwHpCDUVmZg+6kACyCX4lx9uhUCLiXbfwkA7Hq6EBHUFEqeRzlMibSmMsMweGiJSE2Fvl/XoMHMMYRO54HJoakG2jQdW2+YS2MPPLsrxKKEQGY1VNQkUPBijF2xFoWZx3EEDIioyNBvRBCxlLRpsEXbNpqI8xLYNJNcT36PMdoRREU6TXMBYRaualGVrzYTqCfCi4gZCZrJqbeYkDQYIsa+j3rEgjnpiGKM/WF3eHZ5vV6t+riK2Meu67vQ99cxxmEYVLMyu2W1Wuly5Kw7QkU1BtvImKfjH4oRVW3CXNsaKR/VuF8292VRmCGbiLYqUNtDDas4L4kyYjBc+hiGEtF+v9cUUGZOqRQ0ahK14o2qL9UuXe6bm5v9fm/rKECZYUx8GBIzn5+fr1aby5vr9XodKGpeaBdXiMQMzCO5DLvD4eBHJiJNItWpxRj7vo9Ujrgw+pGq3RkeiMha8lLUdPrS3FVEhKd6Kxa1qN3myLhe9ycnJ5vNRiQr3ZbDYJxvxUjXS1ePfJMY7Jp06gjDMICl/TNTCF2MhFiM6iptgpbQg+szu1RhbVDTX7PzuNiRd5445hsANG3rsSY1+U0anAmB8yaZinc7FMvjwm6uxt6U2Kmn0+jRwWJHaTl1aiZDKx7NE2BhcQBQe1VfpPO1THGY70Y85Y6DFwdSK5hNMppMRBYIk73ql7bZkEytUXFj2NDPSs2KVeVbJ8HB7gRnuRmJZ1ddjYhEYRxHdYSrrISaeu63E6glN7boRgU6Ow3HOzvHzkXBlJIgYkQAZOYAmqOCOfP1y6tuvYoSA8eYZHwxPj88/hGn3eWzX/3mFz588k7Ku2/+xrfx/PTf/vG/+eEPf/APfvv3ujUdDtf5cH398tOH904//6uvvfvR0zSMwzBQjBnGNDIMIw643w0A2MUVKNkyAFFx+detrgroCSH+JyIahsOS4NmFwT3xMzNRaerVrL6UA20Nn8oFk6OX3BEU+jpbdBvKxgdnmesNuZ6WKc4M0OKZQk5SjuNDhMM4OL5APxecewSm7+cqS/NhSb0epUZ4Oi0TfCZttEBC7zcLzbZV3ch1o+WazlRb5Cm/cwauDpRBBYg3DKRmOYbF6aChHkBs0Hrpp6jwhD25QtwldvZgdaVPjwdUtY+Zk65LoNh1wzAQUW1XDSIC4nqLVb3fOM6XBHj8+43ZgBGXJZLd4YchBI3V25f1lGRu5Vv9r03c5LZfbs8OVc5PkEDNbiAi57KZGXIw30T8XNjlqjUEaSRqH9ilvBrfjeNIsYNjV0qpps/N5OT+5hBDf9iPmy0ChnI+NU0l+x4ePJbQ6G9ooIWqYNp0bBY5Z0N6c38zvoksqR5JG4SIgKedznjQELuwxmecLlD+0e9VdKSUAhEjxBhfvnz5t3/7t3/0R3/0q7/6qz/5yU9Wq7WeYDxy3b6b9XL2XkNCflIezmahbTRfIQZzmkHdrMexW60poGQmoq985Sur1erx48ciMgzDOI5Vh2u1qyVt+C+9PuPXZbkQBljOjIgkMIqKERpTGlM6OTkRwZvDPoaIiEkTWhY6QJVjyXOH8e8wZvL+67kVlOvx6/v93nJBlY808pZzCcHpU8MwdN0QQrfZbA77Qa0yZqkiVA/9m9poqxUkzqG52+/tYAPjcfs159z3/c3NlYK3Xq8RBWoR4/765nA4BCz6XgghdDHGKISihg0ip9x1XaQggpwyJwEQDUt2Xb/uV6u+jyFQJS1y4a/gDqBer9em1ZhU9zkjtoLMnHORn+x890SkUb7KIMGkDSKuViu1r/REAUVa13VdCKvVar/fX11dXV1drlb9ZrMehtTHHqHnvH/8wbP9dbp7doYk42G4uMYQwqrfVNMXETGECFD2QWYuTltIAEBVAcuZU5pK/vb7vQqHNDJXfzdPOWWQEqdUNDd167Akrla6yUMRJsLadRPMoz2Og1q+mrdrKoeSkMNMSTnWzMJxHHMuMasi9LTJImHmPNS81mrY7A+HQ+ZMMUguwN/c3Hz66ac3NzeI2PfrELr9OCAiSKHArluBa8KJAuv12vqL6qvnAcDpfDsR6fuVzTHXLkcAKAKeEbCqZIdhH0LAQClnAN0EQ86567pxHLMwEgpD4owJQoybzWaz2Tx69Ojjjx+nNIxDypxFJsq0HVZXnuuJbnnegcLiajYLJX7TeTQ2qHNRSzi75kBoDmLbcZsPoZ7VPgdovpPVR8BZ814uw3xjtvE9K+o0LHpulGdLwjy1aW4QpDxpqrDNqj47xdzE7bVegpv8tUGMeux1XgtBZ2Eu9wNc1EQVQswz7WT6jJBcUE4WupEtgQe1cuZsD1Pt0+O5gjHhHJ2i4HdQe13ZVKoFbggx2PyfHre2IlLlkdrkugZGPCGEDExGJDQxYZ0O8shZGDMRIwq8fLzf9hc/l8N1vjp5sN3n9Fc/+vEXfuWrj778xbe++OXPffbNkYef/fTddU9vvf76zbj/X3z9qzfj37zzzjtfe+2ku9PnzBnSNqxIiFOSlJkyScDqg5e6Us1q8txhYZ89gTV48wsnczXLqzhQDXLjQ3+nSVJ7UEnIarQaHsnVzvFv90zq4RcXoAPHYjLXt/zVgHf0+1/msok3D6ofh+bmIh6LQB69bNWWC6F/epZZAm/TnyTpPOHeVJnb4DdfFTpC8rDRPBeOraundkFWJztLF2pKZwmlaI9kyjDzNZi94afj3ztjRie9c60C8oKIiPKYvHhsRJBJPDB3Bk9GIDnHZMMmMEmM43RiCDd8lqdYjn7v1675ZimE0e0d/h7jLFErzFFCSkkkIyKGQgD6744PzDzm0spCk5Eig3ZPhdsPHvwlL5xPxuAhIryFw0zy3zamX010X3pxjc568YuuBmQ53AKUQEtEhZmFGVGKtg2CiKqNPXr0SH3bYd6s+zbg/Z9GTh5CDzNUvmt+WjIjlON5y2dmFgAUWK83n/vc51JKjx8/3u126+2m6zoQHIYBqIO52Jxho17LXaCZxRLmejdA3RlBgFErpqYtjxbY8kPB/4+3P22SJTkSA0FVNXP3iMjMd9SrKlQVUCh0N4A+eU7LkNPkCHdk/8F+oOz+wv02sitcWdkRjsgeM5wmuWyezWY3Gmig7rvekUeEu5up7gc1U1c3j8wqdFPWpfAQGeFubqamqqa3rmMBmjeavue/129KwFvXxdrWr9C7ywb0l9qUq7SzJLQbx/Dw4cUAuozg325TKqKUW4tU5xUXAzqllNI8m5RlRSCpJjiphXouUY6dCKZUYBLWdVNtmQaWbWi9LYdqoEqjBC7zl3ajN4eImEJIVOs0xkhEFxcXGoKuCqHOLcYYSfq+Y87quwohABBneHl7C0ABD+Pp7uMPv/osPO9jCCHA/lEIWoMki8g8p21YClHUphR6g1TVxUdXKhxM8DYpNAQkAvWsohODj6dbjRE1WVeXrHM2ad9LkrkmGVoUEiJqiR8EQBHmBJWTUACh4GFuCONRyHg4Vzmn2Yib492YZiEExJnzaZ4AQBVkIuq6QVFImFEgUMdcSsiYtUJdAp6ObLNCCH23AwDOnPLsK/ceDoeui16TVIkXXIpHtfKz/irVbE3UE1GM1HWdmn1Vd53nzJxZGEDmOVNt91KvgIjIylFAJ0sCkFlEAiBBieuwX5FXtV0MpFJbFYRa7MP2cSmJ1hzt/lhtKBzW15ZmPLLmda6UZxxYrch2MzMjCaBOEXNmDXGXnIc46OEkIJyzgBAFCgA19qfZUX2VBwE8eKETtb0YYRK5n60N6BdlN8Oaceg56h1xjXpwdnpmwmHngjD25x/xrNZvQYX5ctx6yPuZeyXWVqqv9nq73WPr3e6+vWuBwzrWWTWAZQTELDlzyYZn5nEcRRAZA3TCctEBn6YXX82v8vHd8F687H75wed/8f4Hf/C7P719/vL9zz8kgavL3T/5h//w/c8+/lf/33/96OLw0x//1kdffZ7mESemHucpHVN+FB6lufBElICIgQJI5k0Olc6/OR5s7Wa58CD1OIBrSQI2pOQ3yGAlToL3BGhfGg5gzU2txk5uXt3MYbuucgOuMBD+K124Ftq2X/pLRAAWDcerwf5ZWBNU84095cEL9Syhe1zuyu63Y3JORmjZVR9l5/nxELalmTnGdm071S0C2E+hFjloqbiqDA3Jn12UPhVcnrMHtX3pX20WR3+z8jFDdUQUM3jJasleJzy7O/ehljcB+NnyepnN/Lek6t/bQJWd99LTjg3pozXF4pp48TbbTya/CmmftorPsoLbt17bfbnvavjI2SXDuRvab6T9pmFoDSJJVSMFgRB9XVAR0U4dAqVSixB8+OGHz58/f++993a73e3dkYhSSlmqwr8OiNX4TYODR2yP+Q3L8vP3j2yB6emunJUCIHB5efnee+/d3t5+8sknUAUgzksk25ZYzmI1bFiZgdT4tkdREaEQ1EjqVgRn1ysivqeRfekJBNdqvIhoCOUWSrhuQnjWLmb3G3pzaV6/FKDesizbLwAAYIBV1FUIQSsuKcWVWC1c+LkpJ3UoTImnMeWcEUKgbs4zZ97t+q7fDf0gIjmlaUyZBTEgILt4JRNUTAHW+E+pjoTMK6bnPyhYYu1E7/m20Y5+cBrjKikJhGJXyo3ESDaBEMLFxYV6CBsnUqAUY4gTdV1QdR0AmQWp2/UD7rGLh7vbG04ZMIbYn+YZQMMdS3mYnARBZsiIiKSz5bqDeZ6Lyq1OV1MIC/e20P2IXb+qs62TQcSU0pxG1SctfiqEoG8xjLIN5RqkY95IQxhmxlgTwQip3pzyJLN0XdeF5ewm0lbvxreXyg4G8MYym3O2IjFElNKS46Z1iUzbt9nGGDUbwkKl1FteaXNFO6rvFUrj8kFJiQJ1Xd/3nVkfSkgt6rmpOjMionAR2Pq+74cYY1RNsuDRqaRuMcM0TYjCki0fFRdfQqFfNdzYikxhCS6cuzkLGghUQ8xyvxdaoqcTr6jo94vhvNK8NxgYdXkq8oxjy0f8OqmGh4mzdYVNY2gbZ3OiK86Jv9O4FREBrBi0P0vsFbQO8GhW4V9kS2iWbMzd5LCG5xKRCPgNWGiGBQI1r7aJ2av99z781b9FSdce8XvU7JdtqMGkUVD9Zfc3ki44JGkmCW6zoEoYyyaqGUPq0giZBUVybX2WEkMVNJl5t+9ub0bOgTn91Z9/9MZ73yMc/ud//r/+h3/z799549l7b7/5mz/84TAM0zT97o9+upPu//ov/tenjy9GeC2PUy/hsN+NkGKkLsWcUiCSLKjFqpFMnvMIbGdA86V+6Lq48IgNyuHmuLXv7U4Dl41sbBocp0OnIEEtjozO6ODf7t9r33hygzXm+73WMfM9Hjmb8BaXGoSpb7lX/WuG3c7qLE76wXETqm3f40Z2adbbjGYjsPO5iWtP2hCRfdOMaYbGs0CGDfD9hP1PFhl79vH7Jn/fbWdn22ycD5cqIvKSjx7Mk1AGwYLhGg/WwB/WeOLf3lS7tjs9G1/jwHllz+73FGTjbDmeJwoPbRdRWfoQGr7lDMycZInNyzlnEEbIIEk4u6Lzbp++k04oGx2jLISWHqB+yVSrrzTr9R/OvqghhO0Ixq8aJK+/1g2C1Vts1Sjsn3r58uXXX3/97rvvHg6HFy9f7Xa7B+a2Xf7DF25ECzkHK/86IlIbUwgBWBCRc766urq8vPzlL3/58uVLLeg3zzNhCCGkvDruG5557rPYmQbOA6Cgs0liUQIrwAlVkUbEDELrYMWihMvyroZRy7qti7H9MiHH7e2z1JRdb8KWKi7zEkxUsFeVAZWPY1RlSWphghVYVm93Gr7eoAqhuFoGzBxjK61SDQpTL9Y4T8AlzF4nHKt+MnNOKU1pLswZAtSOaz4RuOuGvt9pAiRzUs025ywYYU0UCyav48n9Rpc/ASuUVtKsAZYw9kPxZMZafUR/2u12AKsCFoVTcZLMLKWmiyBlkcRAGDMDimDoLi4fU5Vd83QTAuvLhYGzOgkgJcZa15RLK5M556xVZLgWazUNzXxBiGiuY0QMVHKONKQQAFKe1J3lzSUhlD4Wdj4aG1HVEXxYpg/EWDCZ2QVeVnQlRNStDxFDCEKUi4abDEsDqPLfQSBICdWPLcy8KI0K5rJBgXLSUkllkHp8pP3+YrfbEZHpgeqO9t4/dMYUn/EB0Ft7jz52fd/3fa/wgHpeJJ4VYABA2pGhlvnZ7/e7fR9jjDH0fS+SU0pCEzNbCGsIyMIihjlLjQl/3plkaKzYR2gWrrLmnJ6b8TrksCK5oLadOMsK7cUNFz7L69El/nmxmGuwsmciRiH2vRd/Pb8z9LI12CNcA2Rl49a3V7t4mWWezWIda1tA6aGpdIJFV3G5NxuB1RiujS8mX4blNntEqYUcQ9+CmlxIT2N98aPhRmByS2vVaVmH7G6Dde0VtndK6h43/AFgk2lQ6OxVf1ohqNRMF5+QREIIkDVhPAfk4fnnd8LPL4erj64/m964+P2f/vinv/vTD37xy7/4z392dz3+k3/yP/zkhz99+2f/5aMvvnh8dfj69OoC9nmex9Nd318EwGmaEIJWT+cMIYJIKbdj8Pe70yzn7K94Tnvxl6yzgbe77E8awzRPL/prcq1gg0siPTsrrkWlGrD7mWhDP6zREVot7b/i1VClx1W7wdOj1OQfrCFP4OjIw81vlv/mLB9ovrenTA/0aM/MXQye7VjWjecJfsBm+8wr5XOc/G7SOkivIS67HOBaeRerYrNdLLriWDZbe7CBPyh/Xmu/dSZtRwrDK7VJN0ze41WzpzGugm8962gGbz57+vIwbG5odrbhPLR2jMgqx5tLkpz3TzInWQppcJFLwORLuw2EzlXhpQeCSOU+Pn/uTr8uDwqB88G9frtXNLVUVV3xHNspQ4z6wUhJ1cKldmJ5SoFWObiIfP311z/+8Y+fPXv20cef1APFjAvN4hBhWY7frIY/+DU+cDVsAWqimojEgGUJzI8fP97tdu+///7t7Wm/36meg6DCyXl5pkEkN70WyGeXYOBlBBFAUZ1w2SbfWWE9QnvoKNibSD9Hbuchg1WobahYUw9MUSkSOYACapomotj3u77vQVD9ezmLZU2jM/XamCvG6OJcTENwOrP43Cep+Zw5ZxUBAgUtl2LeqmmedBpFWXXSF68ztO0pcVGpEFaHCFSpzG+xcSdaN2oHMG5m6118R13XBer2h4FIVcFgkQWI2Pe9BvDZfpXszcQiwjWBWsXUnLMQzXOCzCI5UhDCPM/TNDGKHimc1bkq1VuoqwAAzllSShpNqg0GuVaOtcPFb59FEfsllyjTNPrkgqr+AdYkF6nHtF255p/bQWCkwVz4B6IgRiJiBIFcpfeUcznlgwBg9Nua8yzOrIA1u9szOmbOwnNOWThoCUkJCCA5A5QcwpSmGLVSC+ectR2LOM+KsXp7neJSIZDc6E6dKq6XhwurQWB0QEQR1S2kAZkxhBCo2AiGYeh6teljjDHnmZmFSER2w0EFNqIgzIhRZDEQGx2JSAxtc9EzNFgnBADZ5Ys2POqsChPtPhOD/KANZxSRGHtZXcvo4vS9yqaZ6IxMpqTuyQ/dZfd7/aeZp1TXolo4/BHr54wbBYbWBjbPFGAdPAlOK0MnpjfjoNPOwXFwD3eU4qH2M0REwmI1hM3lYSJOavQKmP/Xkt2bq6b/LovijW3Yv9TKgqMz85BLarc1bkcTEetaYQoqNC9iQClMjpmzpnkQYcgiAIQUg4igCj4UZDdc9Y+nmSnsRaZXn959vXvxw998IyQaqMvT/LO/+PMX37z8yW/89nvv/Mbzr17N0wkkP3vy+OtPXt68fHkRdiQwnU69DKfbO7Ao3JwDsTBjXAX62kJsvVRTTPXXeZ5sRxrMv28fwckN23tsI7YkIJu8RP+i+udqfxvW4OmumYk4yWMrmjTT264Iz8mgiAiwFnY3xgJx8rdUgcaoj2sorF/C5hVrkXcjH9vMPZF6LiEu/83OAPvJzgZy8RGeq3pQNKAzUvUK7RaADRDYFd3x8pZISSX04+hebxXChul5mODas7pCOTjD+bWMhB8Qa3shK1dtiKqSh+WcG1T93m03FJFgvV9bXIW1xO+Jwr5ZeOm69h2sacpuDkt59GJ/ghrV3yCAA6AAFHt8gaSQU+N+vajR7Uq3KHIfWM5+2eD8A09BuwVlB73aHAAzAgCQgCAE8CtV4x2gcxkx80cfffRHf/RHb7/99n/60//ctMPaXp5a/eVJ9eyKzo4Da35iUwIACIGZ1c6vKY4ffvhhzpBzxkAxRk3e375uS27bVzfzlxof1DyFqDnqldw0axhEXC03xKXTIwH6INuGRUBlXN6eDufOF7SMtRoYbwRC1T3YkEZx2gjNNZ2vBgouRUG3u+bpVC+vAVrUQymNLiqgLw0Pcs76OmZGF5/Zdd1ut9P+6WYJtbdAjWnnGu9n4rtxJLPipWr4tmf9WeABC472653LSmt4TtTik8Mw9H0fQ7/b9xXUXu4tur2hNC3V3cyYi7p8Zp7nsgoIICxJZsggkiEISSSMAgKQqrGA6iBYLTMEov9JThYrO9tphdXGqrDVydO6TBcA2HyoOtCIyDfZM4WN1xWDxOWoNx6dEJe4uYLwS6dcrmUpJfMCl4YD2ypCbG1JzCySmZOIlcrPIEEAYliSa5iThlwiYoy9lEp5wXp46wcRQAwhdESRKGqkrjfPI2LXdX039ENUV7BhEVEp30hRj1o1WwQiiqF3pgpFWqWIlFJKp4k5KZ2yKxCa0iJXe7yiasHxp7bFV9o8F74hq4Z2ZgXw+ogn5LaojB+rofayqZtqSw1LBcdVbQxDCL/l/suGrcC5a/vGnDNI9vDytO3n4x9sWICnmQbhFANsWOPgth/i2m8Y32xeKiK47m0lXlBer7HhVlsIeLW+2Sx/2dJCrS4oVfTRR8zLZDFUNpT96XUenztn84SqEPoprcm1YCGaDSYtVS4AICsaxAAgnDMKxj6IYJ6TCIfQSQcZA4ZhOkKAHR7nT37+6dUu8gk//eCTv/27v/WP/uF/l2d5evHG7avxr3724b7vLg97oHB1cXh+fPmELp48e3b7/Hq+nU6nE6IWzQ8iMwAAnXfH2UrxnDTgd8dvpT3lScljpr8HNpfHnOBKOtk3hmN+s8CFvTWo4qnM3W9Y9C1C53/dq8F/+x5xBTH7yejrLCE3SO4p199s0o+NaR/IBV1UY3mBmb3C7m/EEb+iLd8Irsj4dvLN5Sdgc0N3mUK4RdSzl+cPUs1VzQHgkRNqzkZjdxRnqgshQKVoqDvl8aphrbAmk7Mzb1bhfm15kd9iv+kNGLcjewq19doMxbnmEDEvxeUWidDbifwgefG8VWhbMiE+VGbmLEu5786zDSkak4RdZ4eVNa/e3u9Jr4h9DGjOU70Ny3vrK8r/qYQ1jqePP/6Ymd977739fm+BZ3Zz89oq1LYWnPs4mJdmmvnbzO1+W6kuJzET0W63/9GPfnRzc/P+++8PAxKVbkuaUh5oqeTsp/EAlZ2dDK6lNH+JgHj9UIBxZaT2DITvOSxoHVR1FuH9QuoqQ/NT4XGVu1YMLzYsrJFQKSWrziCLTihb9Cu/SpEJuS4/13rCIiJ5CceQWrXOFBKu3rkYYxdLFRyN4rO4R6gmA5Qi+JkyozdrgwHVZKx3BSJOtfoiOttHqIXuvZkMHE4WtrzkZ5IqAF03qDaojqZAnfp8dJ89CuWcq+pUGiqoEkUsIpJT8enROIpI4jzOXArIc845I0oAxCDEkYiYAVE1LgRY5EMRUP03pSWEoexILtEEuq1a20Ydm8MwWFc6gKURhboHyQJiiUIgSxpU5hlqdr1BuOHPPvG+4k9mXrEgKTaCVPcCsMZAVVvdgvBlbvOsSY+GLbaJABDCYtYHZIQgoFVWir4EkHX1IjKOo5ZgnKYJEbWrR4PV+mtKiVzRR4WJ7unxeDT6amyRiAg11zTnnNMYQhjHcZqmlCcRgeI1ESKiSRDFhkJE7f/ZGGQNIHlaolSMvho24lG6dnFa6dKsHVzOmcOW0oXN6xvGYWxO3IWurJMmdy50i6gb2fcl54RcjoqpwugK+hWyd9GVfiVeGTPhWFygs9aIA+dn8GuxNcpGLosr8QABAABJREFUvPAvMiibiEaucbmxFV4CVgVc0wKpIjitzfy5FeIr2knJIfRf+mPA/7umsUVe0c+2wQ1CRCL2kpzuWlU5ipnHAA5gliExH2wI1qPC44af1SIxVwTUWLucs7gV5ZxRADJLtZIyMGAgC/UhdXUip3KcxF189fL49PL1+TgF6N58+vbXzz/68sMvnjx660///X/43utXP/npb/HMn332BU7Dj3/zxz/4o+/9L//qj//jz//yjWdPb7+8PR6Psefj7d0+UUqJcECsUjYiIiWnxuPa0b1skzvm1UCwRKTUX60tpCdLQzlxItcWA/2fRlNYvUB6j3LwdQHoe+UST6fNPbLIK7U3zjnTb4ONW9T135+9bJnNnc27tNPGfUDzDG6rZTUA9NRhVNxMoFGzm59SSoHQkznUs+e+3N1zATbtJGFNmLZHxmeMkL2hzXMhG4pd8Or2QmftMjbl8cdfelsXljYbuJQdg2maFMOrcFCMETEsNkR7RNYhvjZtEbEcb3AHjYikVIyaDUVoxsV92AJrjGqAvL3T07VOyR2EgrLaDj3FGRcMMaGzSq5JQI8DbcS80Qbvv7aEUJYMC/v1UJKqEJ75/tzlkXlLSn4Q22vPcwwIqs+KU/3saKufyeBGiHPOr169QsR33nnn8vLy1atXKkn6nPYzq1479rc3wJq6t6A7u1i9jHAULS8uLt5+++1PP/30k08+6fseMXCNkWsiKitjNIH+/OQfWBc45oPuKhP2R7/P/pB2kAZK4OgXXazXlhU4ZF6BiF2Mg12OFZRQui52qGVF5lkTXWz3eZ1M6OYrxm1ERIun258lTYsXHIPqA/TzEZEQQ9/3fVUIcy2LMo5jztnkii4sIY7GnUwhDCGkNHl1ZU5i/pmCtGvrdrOiiEukhmVwCeOu67uuG4b9brfb7XY1By8ILcv3O86stfN8RVYAgDzNjFpJlVNKISUAENCehCDCWZLWrWLMIkLccfUpqdYnIlmLrKqunTmleZqmaRpTbVKtohcAhFhYtOp1qhCqgqe+2b7fnU6nUsY1lxIyfd9XIJR2fKZp62LtMznHPmxoVt9i0Y+60RYkrA0n+r5XLJuVcBYQLm20ipJJnSlgVO0dLvcPtCUjIhIuQVtSo1qIKISdiW0AMM+zDWKHvl5LlmOaQ1iCgXPO0zSxJAJUkEL1VxORCGNQc7OeqpBSykmUpu7u7sbpaHQQI/V9/2R3RbSUuPPUZKTnubT/bJjc8AovhEQuKK1JWVT/JVn0WHDHR0QIbGkCOqJoQiuEWhZWO5qDQJpZMJnGpXsmJb5OsBAtqKZj8aJeb/FsS5wUZfva7I3ebxI5VKFn2dcKC+Mv6I5/CzE3Lp+TaFwvYcEzkYK8CGj2QhAQLhUyEYgARYSTIAIRhaIrljpmeWatDk9Q+/5lBms7DqXOkM3fliaIyPryhUNVTloYQQiBSO1tDFUA9WAsjMw1wibv2WdhliXPm4IAZAHRLwGxim6o6DWX/fUHT9FDahYEKz5gkZWZOXEmohCLop5dgqLqoogYiBAgScI+UpU1+9grl0TEIWiQehbm2AFiIGI48uOhz9OrvgeA05RuDpfd7c34i//yBY/f/3/9P/7jX/ynD//uH/7e93/w+pvfOxyPX77x8of/xz/6p/nl//nffvgfHh0CwzTe0D48zt9IOHZhlAMG4Tns9y+PN/3hCo8nEQlEpnEpT7TOdaURHACIAMhYHapEwcLe0OX4hbBobugCdQyNpaphRszgWiTNOQlCFhaRLCwISISIiTMzCwjqiai7gJDTUr0W1uIjOkXd/MCEyDmjclXErAtk6OOQUsqcgaCEeUPayLliQrCtBSEsXIm1awBibdAki0WjbSyx/lM/EzOZJV3HsTtFQGSFlsbZc21L49duq9bvbUNNTfJ2YkdKyCzVj6EGJtHk9QrRlSQ6aRm9EDpNeqnG7Mo5a9nGtaBmTN/YvR1ODZ5oDgMBdiFmyCKijSLA2adyzmw1JKCEJACi1nIAADWvhBCUpSuFFk8YLSeNfsg5j2PSHey6yuFnAQiEAXFpbeoABYjADESlc6lV2U5pNuT3a98ibZlqXsnK/oBsMNw22l8eWZtvdExS6yyAJRCW3UFkxswgFIBo1sLuFELOjJSzaBARiOyG4e7uDn2/AOcVNFVKoEZlIwAi58IGwRsIEGKIeE6tbRdTfw1O1vEHv7Ae29XGDKX4iXEhgyQ4o61XLfRDQqcImcG93kOIWoZEBBAJkC4urz748KMPP/r4vR/9xv6wG6cTMx/Hk9kQAUAERQtXQhESABryJ2YlHL9fANCGOtuH5mR3c5Y+Dpwmnnm/253uTu/93h+8/ebb//yf/3NJEkMc09x1/TRO/bCfpgk77yNq7Rc2vOktRFF31G0LQ2k/j1oNwh5jBuFJ03Qll7FFSMuNBMA4dAEhjaNo0DWOIXRe7NGhSv0P10ZPagAbcM28cAKTAEQK+2GHAoSx7/qcM2FEIM4MQoQxFywq1SP1CCOMXUcAPM9jjH2I4erq6vnz5zqfnAuP0m6T+j5NuSKJajLSvq8CKJmFGYk45XEcKRQOwwwAEbGbJzym+e502/d9jFEIOAjuIyJNaU6n8Xg63s0jL55KIESRWeccAnZdl1LKeQ5h9/jpo5wk5ZyyTHOeEyNizikzCgNh6PsOlc6B6n+iGpgKdQAgLBPnkhsGMOeMiKpqHi4OwzDs9/vSaQCZMRFxhH3mnFJiAUSIEWOMkTAJpyR5nlFEJHGamBMyJ4k55XniNOYck8SkaJcxMSJYFaKKA/2AFj0okjVwVSBVPwqnNB2PR1XqRIQCq+xBgWrMZ6j5aWTtJczJkfKEJMOu2+17xEuoNj5XOwOmacaifsRxHFV5BEARnqZJVvYmO/FK8RuUDCIxxogQQLo+dLh7OY0MiTRWmmfBoDJ4Eg6ddFFF30xELCwg3a7LGeY8J05EBBlijAw8zqNwkszzOGm7URTmDGEILAlpyJI5cbFPET693F3s9qfTKQACQB8iEUUkRhKWvu+6ECXlWcZQWxxnhCiAmaECkMapiNm1FWENJ6YQQqwpWkAIAFxSjvn6+lobFym3R4Gu62Kchic9EfVDt9tfhNgLEqBKjNHS0Y2fkABUWUv/tW1qbMcLd6WQdS8AQwhAAWsDakOzQHp8Z2aO/vmt5rZ9Aa7bD7DzfW0vcdqgHWP++G9+glrtCtcKYTMlA4HUXBS7V2p4A619qfYiLwJ6S7afQyNz2L/iVPPmoHL/rjLK/MheZrWhuDobTXNo3u7djwCQ8+wB0tzsD0h/3vs7G9j6ebI/D9cAzzkTLrFw5iKjjROmGZZdWSTYhLA2E7YlO4mQbXFQRBkKEG5v7z744IPXXr8YT9PTJ8/eeON7//7f/5svvvjs7e75j//uT/7O3/5bH9x+KEy3+U4yzOP8zVev0jT3dIgUGUADPAQyEIqAEDKCAJTWiISMwAiiv4JBqOCPz1KgGr3g0czg3wgudo8PAd1Cb7tlRmjb/bG98+N4pcLjFTVy5AaR7pvMvXNzlRIeYB3NU4Yz+Z6+Jts7/fK3FOoNLvY9rvuC4kaZ9CxCqqLu7/dLM5r1b4+1/6cHqfFrP5Nyj7ShYnqppGXsqFmXH81gYnMDh2CNKadZ11Yx8LvjF8ubOjr6Cssh9C9Fp096xqj8gRZvIbvBW/5cP+P2S//Gb8Wxhy/P8M8yruZiZlWIlMzVtv0dacRfW4y18XUPGmgQ0dmQ0e8yfvN9QxewRuwtxfnJoBM+YB2walh0c3Pz8ccf/72/9/feeOONTz75JIQwDMMqzA9JFUIAsECeZtrbSeoVwsYqBSDrKtzN/USkQVj6f8+ePZvn+ZNPPpmmab/fO2sAxBjzua1p9uJhzobrsKmzs71vd/wjIoJrvuTJChzS2skLsOTBNnzAM1WbJ5qReu00hvWO+wOCkGKMp9MppUlBp2efrx4MTu7ylEW1/lnXdSnP9qVhlPEHdbbsdru+7yEzAKjiUc5HAfcISC3RqbPtuu7i4qLrunk6HeslNWbH26aDK/TlmSFUOzszW8URnY82MVcXmbnXPNxUI+IadzmOrFpB5llEtGU8ICNiAATA0+nEzKq8SY29nOdZ23giQnM6T9OkInHjCdAA0WmaTqeThiMq8GOhgmAlTy0EN9S8TajHgVbRlBrEaxATkWEYzDCnk5mmqaT5uHI1/iiR9VWmCkvdUazmJI/SdsCdlcZN0jZxy3ZKA5t1jToZwwoTqk0wU6bUhSg13dfGyTWb11iKf0XGKlpUf5WNr13+LJnwrEKYK9XO86gYojF6XYjqiUVEgRKe2kDGGVtN1oLGS2/3+3PZ/2TlABaKg+3jC8BXIROwTo0zwvZcA+tl0PTTss8eLfxndBdsWO2Wb26PLr2oZgdpD0q9V1GqGcRYSZmA0wz1Msr0nBQcp/MkCmsG6sFSp7GwcgMmOnlX1mcGogAwFKkaDULKMsBUlwcBpZhqPM6Dy2/i9vLfe1K2eS4HwzpRs4GD/5NrVJ6ftmGOOugtIKfZMjl3/tWfan0XJIE4p/Hzz59/8vFnrz27GuLwzdevfv6Xv+y6+M7vvfna00e7t/urP7345Qcf5B6Hfn/x6NEvvvgVJwxAzJKSVhxmyTlUiHkXBLqquX5dXANv/Fki7iy0yRt7arbbXhFqlmCzO83abW7NN3Y1Mr3hsKc4f4PZF+AewvTIcB/arIb16rLIuaJ96/vrxTUqj9x19o0GZ4BWebb1+v1qSNgPYsBpiNrUGFlHkzb8Z7ubWwvO9in/oZGZDHTeEOa/Z1dvxk/AwWRZsoiklCyOxb+F19V67Ck/oL2xIUO72eQJv6GeRcBaZ8ib/ORmIfbv8qcslOUnBr/mdXazpEoDftXbO8/+2XVd3/dqfQ/3F2F6YD5n/5TNGxfi/RtpvstbGrpuZuLxDTZrt3uaHcGq+c95zjn/8pe//KM/+qN333333/7bf6uSt+fwJUapSt5+PuAQDM5xCSst2EyvQT8/mulUSNj3/fe///27u7sPPvhANw5ztnM/hMDiPYTtudmsffuuB/60Zz1R10EWdWh98+K89W/EdfqA5yFGtzaOcYOtgUlH0+WHEDKvsnhkI6gwMxKp2KohDCKkJODjzA1u6BROOyh18DlN2ZW+gBp2pNhi2leMMeVpnrMphDY3G8qgag9eXFwI4zzPp9PpdDpZwUKbocVMGm7UjhoiTqa1M8iGvbi42O12dmj6U6Pg/5xsYilNzEwEHGLO2ZoEKlsVWqBRzCUYpQoPFsoewpIdx8xzOjKX3EhVL7EqANM0HY9HBVTdu4ItVk/IZuv7znv2bqoIrSNp1ftqgXu2ZHbxqLBmGg1dF7zKixEw1GboMUaBbDgAqzQ8yK6krT7bIL9JmFD7CXuqVyUUqESEElFKue97a+2ANSja0Klq4wsbNCSfpWqMFZ30+67rSIo9gqovJ4QQsC4kEABXD6FF9okmYHRdt9/vD4fD0A+ZkyKth6dxCc9YxHXxsAnrv56vGnUgYo28qKVoJPva2ogIsFigiKhVCMll+p1lviKLOXw7M7vs2eAKVcHm8kQLa8VmO77/BizJMpkEUxDaBau0vgsRbY95Xg8Bx8Eb/LA7V7z4PA0ssqY3sZylFqihnltQGIp7Noob2784dRdxtV5c5236kZuVOoS7VwaJMTIs++gQbsEW9gHfdap2wm2vRidvIOBhAgACLFIieAFgv7sAyD//+V9dXg3ffH39erj6u3/nv3333e8/o9e/+PrzT+4+e/3pa+F9QAg85bvT8XQ3RYjMMI4nnpn7ACiZ5yCd7ayH0tazZ1Yxs2h6O5lXSDxsyeWy+os3xlSodju/Qby20i1IViGzVQg9PdrjvO533yCJrB2MD6BBc3m29R3uXJ0f+q9XUP2YZ5cMa+OW/9BA2PZiI3KJvbeZiY12lo814/g3+S9p43OzNxZC9uGgG3ObnWoNOXudEJ3TRtzRbgjDNfXfAyHX3sGGlrLJhfZI0mih6FK+m63cEo5UcdBG3s5fu1d7EBWo+s/uhjOQX6NBczV7at+w85T6VZzFnzJhQETUyoen00mzrM8eZw9c981TN+8sf/gbju9X/fDlF7v96Sw3tklKSjHGjz76aJ7nH/zgB7vdDhEtYB6KebQlNI+0zVvs+2YyHu23k7ebmRlJqOpXl5eXP/jBD66vr58/f973PcZApchIra9OZ/bFiGuLRQ3E/E/bueG5s9V4ptHOdqXNfJpd8AA8extWh1hY5w3anUV45WDKiWK1n63G5giDHnZ1tnNKmYh8UYo1dbeeIpPv9YOqeVJPHNXUzJ3FzOM4qnzsGYuIABAAqfypjrW+7w+Hw+XlZd/3t7e3Spu4zhLUeFTTjtBppHZ2k6vIqsL6oV61OKTsdjuuiYvikhjTVHhmhTADUAnHd8xchMcsAFmzy1TKta008IqIyGIBYebMxTejaphB9XQ6qXtQC6KEmifJkuzw8hvEzLZAP5rundroTYMyDcf0UnHSnUUsGyP1eN7Qgp8GIobQhcDaC7RBEv1AsGR+yeaIMcjbZ8vxcQRVVLKcFk+muuNEZJqmvu+5phCLcwU1LzIkKd/X8fWKMWr5A7M+FFkWsJySgYi0dVpBfiLqumGIXdd1Qx8vLy8vLi6GEHLOp9OdLFY2gTWrcZPRkPWFUXhuSZtUO78LiMiSG5WyyN4uI2/pQ+hFARvUD1d2jpd0HZsEuVrGzcu8odr2rDkP/NRlcxJs+Z0Hhw5vgDMdoymraiNkzmHTrwY2Z4B5gXktcoGTcpozzF/6jXFYE8iaDYZ6WG6+VMELQDM7UfOaAHERrdhJluhqG3oI63v9/npQgMN7u4jO9tcCZs61kphnN35AIx6vNG5RCCp6mL1KFm64Ovl4iZcDEeFa8UlYjvM07PbH0/zhB5998skXb7311uOL+PKbmxevvv7VF+/DFf2jf/gPXqbrP/6TP9kN/fjqlE8csJMMKZUYLQJJPAtEA5pHA1MIae0aVfr3LgKv99rMpeZ0eTry18IBHRyYOXSroh0GGXQKwBbT/P3bGzxhbne8OTy+qy64vBrOLvDhy+43SNrZ3MzCcwaAll3Yn1vPz/bxZpIeGz1aeogZ3EL4Fk9FwxO82dK/0fDK/qVzvnfPW5qbZSM9ewCaW8DIUP9U+4UeSxaDZCfrdlF2LnisQ9ehxKjGbtjyt4Yi4P5rud897p+971D41qt5kJ1HGl2rNP+67Z5ytUbTugHPfQv51mn478/+cN/330poWyQHx9agLnA7fLOPftprGgSPGFp34JNPPvnqq6/efvvti4uLV69eCWI1Pei7QOB8sdDveG2Pm7OqFABQgBBiAGROAPDs2WuvvfH6z//iZ6fTKfTF9qcVZTMICxOcD+LwaNxMYMs/DUSekW7vbEYzgPtV5HuK2RDRdgT73GwQAKiKhU6qkSp2e3Hfn8vi5N1lPszapaDve20iL1KS9+x11YyFIsBcSoOaAMa1OYTebMYCROy6LkT02uA0TafTNI6jrQtL2siSDG/r6vv+yZMn+/1eBDVHC5ytP8Y+BIl98YzZ6sT5T2zhpG3iuu5wOAzDcDgc1DHI1RcqNbzTdMJSwWUuHjOsSri+CLV6MyxdYUtgFKa70/E0jV3XZWHMNOcE6hjIkFPinARBi1uKiNZ9tAlY5dXj8aixpsbQClQJdb2qZqsipCqQ8T2ufleu1m2ds+qW+qDUaoKqw6sjkTfehUbs2Z4CiKEx4xKFrutOp5PH/IXcNCGXARBKWRJBQko5SS7aYM4ZGHLO02ka747AHLvObA26xoBk9X6IqOSpitUNKVYAk//FdVTytL9I+46EvTXZWzdMIcw5YyBE1CI5AIBYcOxit++6buj63bDv4hBIEHGe55QmKHojIKqVtq1jZz1LYF05RtZFwsk5jRZRgYBkqdFat7I1O66kTzjH+PwNHlLormbebu/PxP/Y4dq8VH+NXbQxt8xxO1UTbhCX0nCyOb3sEXGV5biW2bT5GI/2nNfu33Jh3GQGavmWRhD0fzYAtAn4YWETlOgf91OyobxyZfc3zrpmg2zwBWlQzdWtQVepRSt9mQJjJqVmNHAi1PZc9N8rS0Knj4HDECPmBnTlewh3t6eLy6vbm/E//of/0sUBMB0Ou7/zkx8OL7tR5tefPP37v/93/viP/3XX0YuvX6XbueMLKiEqCEEyZGHQmjEettv98ltjFjUjS7O62dLs1KFaLQnWdNTstf7Ezk6DzsgizmzRjNBA3qOun4bfNd64CpvJfMdLzsnoeL8+6XFp+5PNWYqGfO/j941sJ7EnLlgXYQJHtgsirUnDM1P/lu3J5wdszBnNYj3A0zx5fmIMpDHM+csjiT3l3+V/8oUTPb2ETQH67We/ZFp7CO17Zj77LJzjco3EabflnPWE9ZP3aLzFLgPOA5jgL292bCbZbMp22Ga2EEgDtGDNln+ta8sJl/HX99jnX0sh3ELmYXLebrpH6e2duD4T7deu66Zp+uKLL375y1/+xm/+6NGjR1988cWw30MBY8UWtNEA1hD2u+/fUr95aG5n5llAV+b8ve99r+u6Dz74YExzCCGlzCKSmBGIKM2MzqD8ADDdlM6T9tmnHmaqhudehNje7wHSjC8i5NoO+Q1SCZhqfIpFIW5H88dQtRmtwuaZWSPccs7jOJo+6YbSQ6qcdNrI3g9ORFinZxEEMdIwdCGSaixENE3T8ThqWVFHaKSl0PRdXJPfuq67vLw8HA4ApJGiXKPimQsnDCGErrdjVOoBQUTjOGJVnNTTqKrgfr/XwamWl9cJ397eapyhaRQmXKWURKMKsTOg1fkXAAAICOeUjuONNlfUMMWU0niakcTbjnPO1dICFASQBSDznPI0p7mWdZmZGZApABEgCSAD4jDsUNvl9b1lD1oRS4W/qutek+m6Lues9T9V/ZumabfbqY/UIG8CtqGZ4UDVkUr23cLWyg2+5fjqgEcVOWuX1y1OmmHds8pqOzipW3gYBgaYnXgcgtZyQhEZht1utzMHlWrvelCqxivOWGBboNPQDjEAEFwzRoWq1hJr1qJVskWQsyCaOwmHYei6vu97TZSNFEyKZk63t7fjOAKycFYTQEqTcZX6IZC054Uh9pY/i2t2ypLpTCnsEnPnGUJsTmtwXePQiSn2gEdce7E/1LFe4BQD2PCsZuOXcVjrXqKIgJCWLAIAEIJa3UtEAIpqB4vmmdk5zRr+bm/RxsRWP9cuk+ZNiAGnnPg5g+PafrGyVif8S3mdCASOUaY8rocCREDEzHPW8q9CiFgKvQJacWO/a/ZeWWtWtqFnl9AASucD61Au+4CIWlaUnGPar8s2lGp9Xh+vgu7Y0/mbkmMirIekmBVEAF13pnKbhMurixev0jxzPs3/8n/7t19/9fyf/O/++7/1B7/3+pu7H+HpP/zFf/j840/feu313/r+j7749NVHv/gwn3gvEYUCdZEwyyiQiUDyympgsPVl+j18zJzmD1fcZInw2q3XYKP/IC5eQkrB3oXvGAvzm2UjNFvZgG4FMYPtxrLuKVExzb6Uc1qfPbX9CREfFpVl87MJ7v4CaGvkPDDaWSDbfDxPgLVdjddBDW4JLQ/ZvsJzLXBHCLlwcT+mXUZB/qezdlapiq59r5cp9jqUuNPLlmaL1UfyUhqX7F1UXYVbpiFrNm4wbL5vZuWnYZZIA7VU2bdBZnGXgmS7ifdiwP1XQyn2wdhX4x19eBA1foHrePTXmNJ2ZABYNIz19ddYNZ5TY2yztngOayr2EPOfz87EM2pEPJ1Of/EXf/F7v/+7b7755i9+8QtZZANToswV9hAd+auhwe0kt/fr1iBLAgaAruveeecdEfnwk48Nx2KM4zhDoECdIHvjY7P8Zob3XVtW0Py6ZbYKDX8D1hPE11VuJnZ2fD7n1KVaIyTUrgzqJDHvnLigR3SBPFKPNkQkbcATMHMKIRwOh7u7O3aFPepkyJR2h05UZ1cwJOdaN5IZgNWTEmNUD6H6asZxvru7S9NkM8kaZlnNIyKiek7f91dXV0+ePGGGm5tXGmJa305UexL4MNHG4iYiquXqtdvt1EOoN4/jaCDyka7qoPP8UJ1pbr8ZibQPDQYiAdWFcuachRlU1zJVs3T4IDrhyW+6O/6WvnN2v9X2tMxzXazucghBFUK/XvOPydps6j9Tbcxj3T7UHwsuoMmUPX+Meny0oaQ+sEFmwlrlMmO2CajAYjqMZ9FYa08YNalt4vb21qtzuFZJQihJPZqeqt/HGM2TnFJSoOk3Fgpr8CciJVUi6ruOaixYCcamxddi5x1KtS+rGhGLL1rr0+73F6oQhqoVI0rO+fb2Wk0tGhQ9z0vp+Ib/kKtnuT2LwfV8KjAJmHNWJFozbbFpYyVeZo7Lzq3lwi2nLlCmYESOLr7OuJLByK/EszwvSduvNofkGoluwQEb3u2J078LzrF1EQm1xY2sz0JehxLZl02I4FZy8vCBdb8gvTwJeSCXncsrFXELc78oRExz9iqZVAFRZ5jXVRy8WgL3nC7NK/zGeMAOw2Aho/pr41qxESxojVy0ulezYX3w2P2ejyyIZAYFKS/Qn168eHl19eg43gzD4bNPv769Pf2D//Yfffzh13/5p3/2j/73//i3f/t3f/jOuy/Hm7efvvXZXz1//dFrd598SRhACEBy5ilPEjN1Ihsrr77OQuya1UGVKc3gqgvxxccaM0ozgv2E7mA2SV25np1kfjcbqrFBwFEQOL6wZdy4OrNXW+wVyO94NUNtSbW5+YFfjfTqFyuSaUZu/pS14m33G5V5uyY7r6nH/O1yeO1K8rC1pxBxmuct0jZ7sV3plkF5KNlCuNYvsT/9ev00xHmkV+ylnnN6+PnRVGq0ZXr7KKw9bN780QhARgIG56a4n3/Wn1UptdGqZdh78OQ+eN6HdXZ/c8M62+ShQfyGqiDoE2wexuf7Rtt+fuj+X+8N56+GjZylWY/n/vvtB7vTIKCeBBH58MMPAeDNN9/c7/enaTI82SDq+U18mBbOrmv7WUSIhaF4APb73fe+970Qws3NDVaqiCHcnkbM0vWtUWM7n+ZQaObWfLOdqvFVz58fXgs4IPsxQy1C1txvZ43fGtpWw695gKGWWDTCtKPHxvFDdV13d3cXI3b94mVy+TiFZ+MS6ZCkHmoiomZ6AGDJWu9K25E3E0YMzFmVJS63MbOxkYXi9vs9Ee33+6dPnx4Oly9evHj58uU0TVB2quhFZrjBqvN7UCPi4XDY7/dXV1cXFxfqnwQn8lmioyoeVsOT62VbmfKsjBQpsLHWWj0eKEguIZfzPDODCOYs8zxbqqQmQ56OU4hLZxGbbWIWRABIzPrfnPNcezCGEFQWKea9WPqKq0Ko+xVqDy3bJh9No6q4WTP1yjkfDgcR0Ul6tCSr5siMuLgNNQRU9Xwv/zvUWlmoNY0w82zwFCFEMkeCnEuP90Gzp9NJ6dp+AoDgjOwx9pYiYfWEMIZ5PIFmDKXUQRdjLOEDWn8FCQmRAhGFGKFCaTcMCjesFQE1DMGkwSJsJCnrJYwxUlfKkB4OhxDCMAzDsO/7aI79jmCeS8/6emLmNM+7Xe+5iggyZ5GStiAu1chzA5serhK5a1lUlRMARDK5eIEqsWQRjppqaWYP23gVLHztu3oWglGOIa6NboKIcbGG0XhmZ8jhD60lCLte+pQJzTZU/b18ox05TByxidmAxR4GoWGvSjC5Fl0wUcYfBuKETgW0slfDfqgWd+Yl8FfWJyKsS+yIq7wE7gDwbFrZkyH0PM8gq3axNrIpV9llRSr2WOYAuQhGcBrdSmEr626Nx3aZLUT5kXGTJl/ZIGyIaFkH6NotkLOveI4MNWMHvPVFraeIRDTP4XQaQQInjnG4u53+P//vP3722lsB6YvPv3r3vXePr+7+9E//dLo+Pj08fk63IaEIB+mYgTFhCBRY+/koxKgGf+pMNKTEk5mdtXba+d33IDI2gS63rUF4W6y/mYim2rfNsH2h5IonJugzM54rvInViGsHgGkpmk6w6LrOZ1VRbsk1l7WQhIgiK5TQX/zeab8pWYs+tClVZXMOtWa0wQ2q+baZgL6L1jF7No4pS7BmNbCO9jSMkhp2r8g/DIOeMQhgipPXqI3/GIc1DdM22rbG2AU6y4tOwNbFzjZv9imPcvpB7cfgCNw+e+XWDlF/SHjoGRYZyhmh+Ts9YD279pqkx0aPoqWItrMPmiXbDNg6AhFp7XiPSwXBuCC5QZju0Z8Nz9FJHjagN+F5Zm6JIuT8tFCpyZ8yRSKc8uPLQa3Rb775pso95Vc3E3ACkJ+kh2GDtwshuGn7swDOXX6NVh4Q1+dpg3JbYoF1GIsHb87ZZI7tCA27A+3ckHOM8euvv76+vv7d3/3df/bP/tluv68xOGcm7wcBR60ejNv1Nivy27f6HorZaxzHrnvte9/73suXL99//307g6bMfd+zoAoM9n50eWW8KSS7BUUDBxN7PPCbGTrcWEwwJrUzs2h/WfeI52+eRTg8WQoIh1JTMR0Oh4uLCyVJEx+xinOn00mdcnydEEtCoPrH5inP87zbHXQ+mjcYY7y+vp6mScu3vHz5MqWsOoae9aXSIyPnjIRmc885AxQ7++3d6XA4fPPNN/M8P3v2bBgGLeBJgQG09+moqYM1fHFJNVSoDsOggX9Pnjy5unrMzF999ZUd0yKiCoBOW/lb3/fafo2qiog13NS8glRDQxX5VffToEqNnzTl2R+mvm/2MAxFikMB9cdiVA1QuVzmPM9lRTc3N69evTocDohhmiZyxv2cgHlGp10QUaoKiR36UK3VoTZMV+5aMgZjHIZhGAarVgi1YpDGvZuTUDeoQWzLwDK9yw5WrgU5w1IEdclIqjr84pDQx/uaHSoiAGGaJkSMsVcRuuddjNEC8gFAq1IpwuvZp+Alot1ud3d3py6+u7s7NR/M87zb7TQK9/LyMsaYZs45qz9cH0cBTlkQjscjyhhqY0ZEVM0/ur7zShGK3uadMuQxcZ2IuricaAbDYLU8EBAlxKh9LBXZ9vuhDxERQQrHON69muf50aNHOecYaZoEAA6Hkr9a8lSZEQMRaYt5YwjgDh3PNi3+EeqZQk4jQ856dtnkNd1aUSjqw+yMT42E4SUtgFXrWM+bjK17ztUcNp7B5VoPqjnP2JlzbFhjedtxbA6e+Ros2AVIFKpbF3XAdTkWv7RyjxPB9VLUp3Vv34au/PQMv42e/UGIwLDi72cUZg/evla5taX50xTOFfTzf1I18NhyyJXkyjlzSrw6kcuzOWfVBUzibBADHLac/RWrwu8B5RGgwb26ECwh+F6VAtjtdoCcUgIhAro7Hd//1Sf/8o//5B//43eub45/9S/+9Qefvv/j3/nJ/+n/8E//7/+3/+ef/atfdBAJ9MwGwggkEAKIZGFAoBgAIAvnXANLaFXYRAAEBAHI+Q2arWmh5lbt//S770HhNWQzOijE+r63Oxsq8LvvPwfX1sLzDvtcCNwN28xZod3Q431Lw/tF2Iev+57yc26+b2gfNtTnP28ntpD2hodQTTBomIkhs+cteoMZETz/JFfUyr4p8MzFLOIX4mkZnV5hBjW/I3guyMJmRWuTmc3ZBFYA2K7CA8c+nzUK+Dc2uyPuMniy82CYFawJZECnYf66OOQ3y160MIqNrtVAzA/S8PkCagQNKFIzv+euD+P7FutgjST+z+2dDy/54aWdHcETgjjtq/neEBUcJBseYj8Zvs3zfH19fX19/dZbb7355ptfP39+3zwbVuOxBe6nWX81xN68hSydG+Xp06ePHj36y7/8y9vb277vBQt6iZ5QICCEkJvtODusw7FfO5LivlXY6751x2GDTtsvPRWgyyG0yxiL6b1+l8GJZFVTjaIqdK1mKSIgyiRn86oVw41UD4mUxCQTZwGWuqDH4605bUJAkdx1pVYNO29GStx1ndUIiRSq8hMfP3786NFj1Q1Op9Pd3Z1OO2c5CyLzi6pV3URzcrW1jC/pYqUatVUtVK3Jinl66AV3lZTFEDAEAqqniZQgzzSfpnEep7u7u3EctVFnSoRYotXU7h9CQGIz9JC6/qoN2kvpWvPGXmqfh66zb4zcuHo7PUWDMyoZAwy1M4QehaYtG4jQVSMDV1G/ehpWsVFYDXDesaSGGFO0NLfNVG6LcPHSS645pVAJXG8bhuE0lkh+XX7OGQAPww6JNPCyWDrmOecMLJqHZeuFejCllLquq/uSmFlRBSpYQqUjXZ3JRTaO/hupGpIIQwhx6LWZyjB0Wu+2ix0iwmJOhBDCsOv6vp+mU93KJWwTi18nIiLyGUtfw1HtJ6ngsxkqMZKseIVABllO55hd/hI45QedMLE9xjwLs/0z4Hpw2+Rsgz262ON2DjVrk81lt9njco8EIC4F04tQfv/IOWdgfSR4oJvLsRnHUKQR6URaldgM3uAOGxHpgiuuA2idZKriCoBASCAoLMp2PViaeYo7zLB40pby0PYlujQA04eVEiCz0Crfz7YPaKUFbUHqDWmNQuhh7iFg8refPzk/jEuad6/Wt0NWMo5xCDh/8dnX//pf/snh8nn/53/xzcvPL55cvPHG9wba/85v/PTdN//dpz//BphFgFkEWUQwQ0aQnBvWCRuLi78iBXQhnYYSWwUYHK1udw3XurrdT2Ep9OzhpiYrI6Ll13sEJ3RHu521WG1gC4Q3xWzuW/h2/LNfmoW7gcN94zRAsJn4OZ99UUPyBpPtu5rvtztigG02xe8vOrHY80NvS/IjN4Bq3t7MVqqjwE/eGIsxZE/FZ1fXDG7joFN1PJ6zC3VuLnbmeW/j2/IcWCOwoa4XQ/2r7deGW56dxrdeHix+MmeHlbVOIu7gaMaxuXVdUEO1CouWZQSOhM++aMv6PJUtQEPEc3zyPpJpFtvseMNgt882dzZM3sZk12GYXDxh82pEZM5qIb25ufnkk0/+8A//8J133vn8yy/VE25vlvVT21k10HM3f8sqGsoqeEsgAk+fPt3v93/1V39Vjm/BauskQUAJUHcAnbTTgLGZ232Iet9+3fN9eRdt/Mk2+gMUsaUpj7eIqFl2NrhZkM39C44YzUg9TVOgLoSgyg9iGMcRgOd5tCjK/e4wDMM4zjXeB3NWGbtk1mAJGowhpKqHlDNL2+Xt9/tHjx49fnylnfEQxWIyi4yUre5UEfS7EIdhr5U/tejLPM/agq/oMxAQszG6mjwZuq7HELCGUKqSrJ+1GIleZqzPOZ9OJwOLqWEmuXnOpn4kCsWhRESEJXwjV0SyiNN5nk+nu7u70+3pmEFC34W+AyJABGEINE8ZIWjNhMTFOxpqswQvPOtJYU3nvU6oszJWFmoakSk5DZ8xHLCzzI6zUCuscvUD6wjKCVX9I1o51RVsDcITBFlipESTQnUC81ywaNl6gCDVBQcEAAFCkIAZI0VAyJRFJEIMEgDh0B9eyY3kUhdHxySMakQYeQ4h7HY7LXylvm6o/K3xA6nNXedjG6rnI24UQn0ROG5mkkDfFds9xdB1XbfrVCFc2p9EtXsWBEt5IqK+74ehe/EiAUi5YZXNW3otiiyFA1Z8ZMMEYDkU0Hh4zrOIloashxEywlI2QkRW0sB9PNFfDCsflwHUH/D6oFG1OCmhAZ+H6WLb2/C+Lb/enmS0jr1uuLw96C2gzYs8HBe0PlemxUPAz9NPuHmLLdZ+3cqgthA/iJ+bIqInZiPj7LqpGEg9/2pmYhZfcdKh1APJz3/RY9flxfz2Na/GtWfJVtHAXKpCGFxfL3RxpCKC1MG6kJrSyVRD6XSBfT8cj8dPP/1s2P/9d3/4zqv/fH3YXR5fna6fv/zBW2+Pt0dMDCjKdbIwZk0nBFgoZ0EDtQ81rK1BjAZbthYjv3GwJl2pjcg9WPwcjFVJFc6U3xlk8rqM2xbUW2Jppt3shd5GRAQrVf+7XHhOfvUY+MDl8Wc7Q4+HIgKwEIJHaVvsdnWersFFlnpt2d9vnN800q1BXdwx0CzPxvc3r8avXS38wqlGtp+dqpzj9ff9mV0iPm28PTafhsM0AzYwlBqjYTM32Db7COt99zP3vxKRClrNG0XEp5p8l+vs+HCOrLZg9Heyi8gVdxgPMZ7mpIHWKSU1zKs0ABsC3256w8D9nWdB/V3We3YVHuAeXeG7UaJn7PBga6VmcK7SWM75V7/61T/4B//gzTffbCgaajU4OGdre2BiDX6iOwft2WZ1LDmGSELA8vTpU2b+1a9+NQwDEIL2EgNBLGbGDBJk9RYPwGYy9dUPQPHMdc/qWjW+An8x8N23a3jPDGzOqkXo+cI1tlzZgiYSS3WJGJ7bcWOyhJ4v8zx3nQYpiIioa0sVq+NxRCyM0QrGxBgplE0JwQ5QZuZ5mnLOwzA8efLk2bOnjx490iDGu+PN8XjUSMKUUppZPe+aWafhgpE6rSJzdXUFANM03d7e3tzcqH6bk6SUKBahPcY4DEMIUb1DXAGiQ1ls6jiOXH2YuhGm9oDjAGYr3O/3xlRVsldQYMC+77tQ7ERcQjEl5xwAx/F0e3un6u7pdLq9vdaGE6qd2ukWI5mKIiIhdIiMqNuh3qHSKosoWsJgoxDqUBblCI7ZSk10MiuPrdqjGTuvBtWus3o2Kai97ZiZHatbHUBeOBm6XU4CUIISdRWIJUhYbQQikrmkMqEsfMbQUndB16u811LDRESkplMSdV1HWGrJRCSLBfWXnZLeb1m9ixBC0J1lZo2wLTx8LduHEEhaegwhIBZ3dOgUS3odzXfBhBoQq5sSQtjt+sPhkHMiIgA8G6wLALjmfp72z8psiEgl+o+hFrLS1EfWfq01CcuOj9g83w53LlLf/vR44LHQttDmWjXdRTMJLrnfsCe41qheYwRXPAbW50Q1x6waHtadO6OKNKto2G4zJRGJzn1kKAU1x9K+t18bOdU+07o6kH3QBs12JyIyS/13aUJY4dmG3Hgsl7VMZr+aUaF51iCca5JhjJHnBBvBzv+Z19X5thBu1o5rAcLszd4lYvMHF1Oqmw7akIcMCbM6CPsuhC4K9yKSZuYMIDSe8r/44z/5p+++91u/+Tvv/eid7//gjcD06SdfTKcTaVObAMIIEDAAAAiJ0RLV0kHGffymG2KYFuFZqgd486HBNA98Dx8DhW2Px0l05gMz98KDl+KnGU39JMU7ZusczGxMUFZqr374RX6BDbY0X569PBY1GNUA1nAVNszHv6KhDvvTQ4w3Iej2auNRHsPDuUbkDSHbILlm/DckUG9AowhPs545bLmQXWY187u53QU/N1uyTcMD1lu7zkKy+bLZuy1WizMwwRoTTMgAAD0Wcq1zaG//NeVtaJbz8G1+Ps3WQ1XLoe6gLVCq+UCNcX3fa0W4LVREBM92cd1oiQtsAeDXUQjPrgXuD7He8hm/6obEZJ08jOt2bf7ttvUUQq6Jl1988UVK6a233lLrlbt5pRD6JcAGgZsXgYOnx7cG94wAsQp8jy6vvv/978/z+OWXX4YQxAZHEASEwASaCfDw9cD0/oaXPxOhSDjtTx5Vtnh+djIxRjVbgDPl2LFlPplcE33FdSZUWTZn8ZpAnSFrKZRAseu6u7uT+isQApGSjDBLyrOfmzjNCgCurq5ef/31q6srqi53bXCnk0kzp5Q0zlUzr2KMfbfbDYPWgDGdpHqoSBiJWO/X9LYY4263D7XkJrjYe2uowC70UedmLSVMBvBc12BLtb+fapjDMFAswYrAUj2OMs+z1mIZ747X19eqfJ5Op7u72zEtfbxNFQcgrczErkCDvdEOBahcVCdArpiqkVWaR6M+z8RwLZbLupcVu3QVvUd5nZGVvp1qxrhlDNohlc71/QaAGqEWKRiRoohiI6hCqCyCAoYQ8pj9Sg2BAcASYhFxGAYFqYiUQxu0jtQeoWRLEsU+RI1VplofHjZGW/337u5O/c+IqKqg4s84TUo+vBZFEFE2LBerbrnb7UJf1PkYqetCicTB0mjXSsZGohDwcDg8ffqUiJAEETPn7AoNmhof4Hw7t4YzGAABgAgU2lBFa1RtQpI2atRnDbCRnWcDNkzKyw1l76tm4XfdYNQAyAMd1wohO/v3ehyrQ4PVLuJ9UPolVdVoWa2HUV3eSquUannSafg5w5rz2uQ3h9NKWpX15de7HYQ2nkl02uOW0WfXUN5MOx6t/QnRqJrbMWWtoqMLejQ2pF/maZYaUuyXgE4saFikbCRUL33as7bvfhwPH9tHW7hyfACAzEyr1YUQApIEUlYugl03IKZXr6af/cWv/tbf+cnQH778/BseP727mX/0w/f+8k8/ZGQEToyCkhiQIDN0m23agsszyhDO9/DwEAB3HHqweA7rNwidhmBvl6r+KX+3+hxet8e1UuE/c62L0KAEO8xELBV6PMAJFp0fHrzqyO3kG2R+eKiGL1fItMU56g0rN34z+JYpNV/6p+ylDY+y20xmorVPw+8arKnYzy2va1MtfC8vdVw8pZgCb4zX9rHBombHmyV7TyOsJRvP0nV8XreN8dSt2E7rcIBm4xo4N7wR1gRucNAblrJvDcP8azV02BKUh8m3fgYn6+ifPpUFKoNVg/F+v7++voa1iuXpDr6Ndlp6uWc533r59XqWu33X9hHPr7YfmnG2w9q7NPpunudhGF68eDGO47vvvqtlHtyAi16nIeVbdGqwaDthWAO8mbMhQKAS6vb06dMf/OCdly9f3ty8mue5G/qMotkyCKBt3hrYG9Gdndh9oNjO87tc6ET81fLvee998/GX1HjRw+Hgj1FwRwPXOnZY/T8qsquPrjK9ohSp2ubVRRVja5s+SSkFWjBqnmdEQFpon13hGRG5vLx8/Pix9bg/jcV7BrUGia2rxGYzDsPw2muvPXr0SKd6fX2tS9BBQKwHANbMwM7CZYkIyCQE8Y4X+4DVgmDfe/5psiLVulmagqgKoRWtAYAsmTmnxKpzvnr1ipmn4+nm5kZ7JKqWwrAY6LNLpUYn5oVa00FEBEEQGET/A8LQxW7ou77X6SERUPGryVL6sTBhb7m27dDPterP4g+wl+pTnlcYrvKqiFr5RqrJzMBuAMxziSDouq7rQy1Yla2xBy85EaviebJup6S7o/uea3JjbVhS1hVy6A4dYalSczj0espofiYAzPPc9R3ZiYyItmqAOSWpaaICkJnnivziEuAXLuFCVWHR1bEfYj/E2HcKphhj7ELfl5Qf1cdERCALZG3N0nXd48ePYoxadjXnXLNHtZlTIV5a5+Q3DGQr2xhbMIUfEUEYEYUXJPdPrboYw4bxbc91dpZydM4HL6n4uXrG5NH0LDtrfvJv394P7mTVnd0exg2Lb35qBpdzl2UPGtCx4pOsBfE6/nk51Us/nsA0RMoDql4sUhRgEWFWu1GnSYZbgHhs8L/a3JQvW+S0v7OxQjWjNUeyPWj3GPvwDGUL4e3g9iuuyzM2rwMAAAX3MqXy0lDs9DUCkz7+6LP/7Y//9f7Q8fTm48f71x4/ujx0KMySSRIDMyMTAANmYFyJkl4NM75pYnRhCiSVGxbdzCsMW6TyS0Z3wZqUFmA6vbR5xKOlLj/GeJ/o2BAmOsXGA7kh/+Z154dut2b1ZWPkfngQ/96z4zfoYYpiQ7aw4UtGqg3J08a/6gdEd+Bt2cJZsPgdsXvIVVXdsiBZM8lmtIY3NnM2AjwLLv8uD0A7ReyGBzBBP2seETvLcbMK2FxG5ranZi2yaUDlnCFEu8e/N32b6/u+y7Os7Qz9qrfQk5rDiU6Kqn8WsRgR1YdweXn5/PlzOxcMJRp8a8BF64wGe9A2zPbiYcJpbrMB71sarEnbP34WDkZNfje94cCPafPR1X3zzTdff/31m2++eTgcrBxrnZXxmTMLaUDnecLZFfmF2we/TCJ68uTJfr//xS9+MY4jFGOiqAcXXWQEbsSVsxzJUdMD0/nrXB7OIvJr6pUtmiGieghhvUf+gDaxMtTeDCKSUlJRGwD6Pmjco7Wlqd/3IjLPs5boPB7HaZpqXQPUCpNECIzas1tcSWrOPI7jPA8pTXd3OE3TnEa1sJQEwlyMmGYq6rru8uLy0aNHjx8/3u/3GmF4d3dnK+3iQLXjYhbu+15dCFxLaIYQBLG2fFhS/RFRFRKsFjRTiszTpQqwKjIaRqhhovv93vLBYozjnKDmCp5Op+k0atmYV69eFRfo8WTG3K7rIgVreAjuwFVXZUOViAhCpQW3EEIIFPpuN/Q7g5IwCJSoPxHonKppaqdqudl1UFQWp9tqxO7vhxXOr2QDB8aFYdZjdGUe5RoHa8tR/yqiqAHCIzBnYFqil/15oRE66svZ7/cAoOj69OlTCPTq1Y0p8+PxFGJviUhd1w2xwxo0m1KKXeFX5CLCoNa2PZ1O6mZU24fGLeveNRZeEUEu1V91N7UmzWFfcjtDCDGG0Bf8qVvKtVbRXCRMKKR3eXnZdR1PWlY0xdoi2EMekTSQuGEdHoyyFqG957ac5gAUgITonM3dshVXXKk52teIu1TPM9uJGm9kLc3g+rC36eZa5R/csWQYUNniqjCJOBFqe2zUQVoNFta5Z/qUz8Xya/RrZ+fFsrU0/27hVt9y3pia89Jrkp3VX9kobqQu3FxUEo2yv8GDzkRbQwusToAGtvXVi/5jW4OIsJlDQSbnozPLE6wtEFLliWYyBo2w7kSidGuh4X5FngE1G0FWF4fZMn4Lm4v0ySef/U//0z//J//4v/mDP/jxv/njPz4Ml59+/AkiEBIDEaCgICGTEAZJi2veMyC/KLPYgfMwe5SGcwXZG7w1ivDyvd+sAtgaEG9gN3ox2OZ1wTFsjhBHmzYNr9bqneWGc3wE15z6/8+XiNhJAxsQbRVC/d4bbux7o4sGLAb/7WjgdsrmA2sc9nsq1cLqjbu0brPRjGCTaegCKinZzsKm6hVufNF+VrjWde1FzQcbR1yVOXD07ifWPOjZ73YaDQMn1x+l4dvGtWDN67br+tZrO5MtesAan/1nv1h2ra7KfAQ02SnUsr2Hw6Hv++PxeHYm4uJZjAAfmAkggtvus7jxwHrv++D3q1m451fNzopT0sC1UtiOY49M01zjDPPz58+/+uqrn/70pyq0ubNmUQj9oXx2uxsEw7XToHlkO4L6DVRwnOf5l7/85TRNVqW5gYOB2B834MIrtoTw69or7kHpMzp/YVZh9dQDmLB9XD+EWlXfX+wbka03XS3FGqIJNYqsuikopWmek3bM6/udBkP2vaY8LXENIRT8Ka716iQ0hvDq+tX19XXO834/9H0/TVPmeZ5nzcgVEWFkZtW7tP7Ho6snr7322tXVVQhhHMfj8ajyumqGRNTFQalyHMfQaX3ExdGn94imJlax2GQeqee7pnKp+BFqQW+suYIaF6ohmqppa5ESv0eqq5xOp9vb2+PtnboEFWIpJU4TSxaRvut3uyHlqBqCHSK4yIfo82U8zzd2quqH1uPxdiuPBt6xpreZQuhPFt1i/0bbQasfY99j1Zx3u51244DacUDHV8XJI20ZH6OlC6YcqQT3dqZRG74Zwtj0HHaVHgy6dtPbLy4uQt+lxOM4KiIdj8euFxFRlUzvFyRrHzJNkxII1sKKUhVXvV9ENMRXt9vkUqq7oH9quqAVrVXM2e/3facEK0TYdV03DKo0ppQKqaU0z3OqCYRDTwAQY7y4uBiGQT2Esi7Voy9X+FjtCanSoD+yt7w6uT7qRaCtqHTuwOUokAWEa+ClcioACTECeB1MVFaKQVMCFnzlnOdpQkQUwtp2lqg4s7MworaLVIsRhtDFiGohqKcFAJihcYYSGMaK1QBqNpMaskwiuVJsSAlEkFm04q2mMqfEIsUFVGuqVv5POpSPsUQA8THTZN4/ZMKlRhJWh3KoQfl6LYdHZohLV01YGLqinZ7+OlKh8QCSZJGWlHSVAJCoTB0gxEhELKLQMo1IcTqgRXWzsQ8RkZRDieCAWHJICVmgdFCAMjWuhSgAck2CsXE8TLzAF5CQcJozCAUVrDWmmCiG0u0UEUOIIS695jgTESEgc8pJmIUQOEMMvcBS8NZBb1EUSRAKAJAzRyTt5yQikkZh7km6F93hIt791av/Av/lb//gd/I13I3jb/327/67P/ursKPTdAcSAsYAGBCBcXIaoOeYjVxuISUYiKzjYkosjIDWolSJB1yUKWVBZ/QSFgQhQqwxz8wMImACNAYEFC42bBE1DSJU27EwcAZhRAggZMe8R2muQThY9W2plr/lAEDEKq9b9IWIpFPilAkQmkxFpWvlTeSSmKVSFkCGpSiR8XeDpzEsu8F4GbuaJYgI0Go19agQYP1zcTXoDZGWIkCijgBF+9h76QpqYraGrjGDlUUlIgCs8Fhaa5olta5V/BGlAoSIkDFlhczadWzkQ7WqMFFQe7YueW2AFANqjF39w0BLiKX5jWGsCFiBBD1o7VeqqSaGz1XKFwBMaYkaMCJFxK7r/Z8itgWKqKuQECh56oYJiKhR/YC4gNF2jWroO4ImKQkiiKAwIlq7FHQHoUIJ1gkCi5phaAUl+X/pQ2hYAQCAzE67MKMAEWlXYgVaFsZQeknNQpw5xl5EWOTuOH79zYv94fLuOK7telaTZnmdf7UnJVsCFsnZqQd1s4lIzgWb0Dqaw7aAfVCQs28299s4dmZ5ZmvoathimF9HtsxbKTuCMkMG4Sx82O3ef//9f/Tf/cMf/+i9j9//1eFwePnyer/fA0IIeDweY+yllGVHt2tlYxSxtZGvTc+kW4BaqxQ9U4JaRb3aX/o+A2SA3/rd37l48vqvPv48U18AieqWBmEBnhEgADjhRKm28DnmlXVm2a97migaiLhG0LW4t1KDQ0pJM19SYu0Dhhhi2Nn2Nfu+FdyVPAmFAqU8CQgF0vwlRFRdCxGJomEFs4QQx3FWwSFnCDTshphSgq6fRoiRQghpFqLQRRIGzH2as3AECWnGLh4Q8XRKXYfD7jBO6Xg89n0vIIlTCCFP3Pel3mNmTVPEaZpOt3c8T9cvXn7E6erqQp0wqJyVgZBiV1wiKve//vrrb7zxxrNnz0IIt7e3N7c3x+Nxnuc5zQwZA4jAnKfEesD1mt+IiEQBqkKYc6bYm5gkJYtySikdDod5nlmq3QcFEZEg5Xk37NUNqJ0P1b5wcXFBFNVhaKwyZ5nn083NjSqB41iq3ZxOJ9UBNBuz64faUoKePnp8OBxAiDDuhoiIAqXAieKRqiWLuMVIEECAkxCE3dAPfQ8iAMzZwto1Ry7kzAitJY5rMLl9WTphTFNyvZGkloGVdfioxzpmnqaRmZWpqA8NEdX4goghlIbezIIYYgySWSDPaQ4hdBJUH+u6q8Ph8vb2NsY+hNKcUO1ujCWMNgnPLJBzzjiOuQv9MAwY+5tXxzTNPe3GeSTmzLcXlwNSnqYpRtVHRhHEQF0XEGUcjzH2AaELFLDDAEQCMs9TMQGEEDAozjAz6E4x4zwzoEjiEAKFUMU86agkBPZ7LXR0ESmoat3HMM3zMAy7falnG2ME5jTOXQhznnma52majqdxPJbAVzp0fXjx4tVut3/77Xd+/vO/FJGuG4jU6aL/Fj+TCIt0/uTVckTzvPRDcsymuOvUfqGaRUdBQMZxAqfBiYj6mRAw+gEMMzybu++yAxWsEF8VRvXZ4tmIK0+anToPDO4PM3uwMZbYnSqfMJv2WN6ip+p2cJNQaVVvQ1Zx287dl/ISQmCKNWyK3NS5LbkZDU/fPmunL6wPY/0QQgD3pQPLEsrM1tHL6fl+ZERMwopMWVhESKDohzHq6bccbDpPbIsy6VDGUpt1BVdvw3tFbI88RmGVLD0yYLWQAa68XgquOZWON4hYtS0EKHpBmR4iswihCFDfTWkm5HGc7q5vLvrDaZ6evnb19HD56jR30OUsEoRFqCbUbV0ufoM8nvPa8+lx0kQHu1nXVeYJIKrLIjKCHt5F+kAr6ymsjHQN+ebyhLkl0rNP+Y1rCLzdgmr9MlnZv8sG90P5t1gZd3+bh8Z2eh7O7sOZpSk9WmHY9ferfjD+pY0NtSE6j8zN9GzfDVC29Yaf4kLKPaD8U7DSwVbQM7udjz/0Hzw73bII++BR1NdV87P1SO55VwMu+9dUTXD4IwtrbWHY7OxZaNhzDZmfu/PM6eBf5PHW/1kXzp4MV0+t8daBbtXsCxc1b3WIqCDVDPs3ue7bhQaA4OAD6wz8LXqcfUtDlVsK8m/fTqnCmQFwfTOGEPuu19Spr776SkTeeOMNH0wl1cNARLDe7vugYRP2go6A+Kq+ZQvW057G6eLiYri6evr06TfPv9Le5VbR8V7GdW4CZ6HR3Gx/GvybwZtnbUB/dpxlmP76VjSzcbTAva4XNtQk6+x075ZRQUIPOHGYBbVjtUhSE5jK0PM8h2p40nbhAKBagZkjtXqFhSyitbYCgGqvzJmp1swUESI6HA6Xl5dvvvnmxcWFvkiLgqoCYw0qEBbLewglbpNr4T31+4lI6Aa938Co/n/N60MqsitWy2lRBfudaoNayQYRD4eDVv+HqlNpcdS7083t7e319bUqhKfTaRxHO0n7vgyivQdCCDyjKplFw0dGLJ0SbWtWV7XlhVrmRyegzgIsRl0yrxdsur7ZxTWSNtdSXqY6wjn2u2UUsM69NMA2qCjuKM85Y9UFdIamotisPBFRNQKGgCIoUEofWRFHRJFcAUXSUegohLXLHZEu9odhGKAcZLOeiTHGVK2WzMvBIQWkpTBvzlkEVKMLTVePPuy6Xh2PGj/cdYFTac5xOp32F4dhGLouWqyswnOaprkUUZrUxapbMMc5RE1EjN/73vc+//yzV9cvZC3L2FZ6Xsec1E4qTjgBx7jsXGAXipxBSBYHMjgmVhiI5xH+kNvudCW/VsRXrJJqxa3G4DOnjmeyW9a/bM89vBicItc8S0QirWPBv8geUYl7O7KX9pY5u7fYq0Ukr4GzLFOEXU6FXt7c4s8MhXDERXrz8yTtUbMGHTMLLJvVwBDXAm4BAiwje5r3e+Q/W9Ea/16/j804sR75skmhsbUs7rU6SdsUA3sIQWokqqm7/qXNh+YVBnDo42k6DoC3t7fffP31s6vHn3z+2dPdxfefvfnq5x/s9rs7ToEwC7MQ1xQedp5VW68eFeYsgmov97K1v98A63+yQ9fYtMGqgYCHuYkLW5h7BmqQ3O7RVkFq0N4/azyXXP8DcbWY77uwJg36obbzgTVpb+fskcdTh+dICl1g9AvxYGkmUN4I599oEG5I3u9RwwP1J9tQKO0TFg+GB2ao7Sv8Xvt9MQtRsfRv6kvp/ffB36qeGNBwMW+1hwS4inbiLnBtSLcg9RjrpwSbmpbsUgBgTQ4AHleXybBzpPh98QqwX2+D3naBQxL3Zcsby5R48RDCCrtWyTYeaNpoWKeq+U5bBcND5r/idZa0t5CBtcXB/2vbsX1kS57NI9v32kfQzg31MrlQRD7//PNxHFUhVBM4M+fMuMaxs9PY7qafeZkSgqao2YQzQAC00x8R53m+urp6/bVnN6+u0zw6GmRNyP9WIDdT8tfWwOQpBdaYcB/kPW02rOMsZLZDOaoHcDFNGremNRhtK/3O+nl6s3J2WWem8huL6LoupTTPS0UZZg4AWljl5uZGpfxpmmIMNjIhSW1Sn0EQ8XQ6hYhPnz5We8Fut7u7O9n0drudliF98uSJapjH4/H6+vrmprSmyK4qDEIJ6oFiXOssRIJ5qZhC82LiISKNJ+LarDJQXGT9ruu6sN/vu67bDfvdbndxcXE4HIo7qO+JolRV83Q6vXr16vr6+u50c3d3d3t7ezqd5jlZVE4ZZ3dQbVCjChFxPrECrTq6l2wUk2HMGhtCQCjRg3WS0Q6FhvmUna3765mwRwaTr4LL7tse3/6Y8yjtWQq5Gocehxu2Y3Ow9ARTvP0poAqzOgAWcZFIa+wDyjxDzjMiBo2sIUHEPnaTtoYG1IaXRISB9vt933Wylg2ISB0qdhjVswa6rlNbCiKmlERASQmltpHQANRddxh2GkJcSyvNih662KurK2s2CABQGwSoQjjPc55mazhBRNM0UdBTGN544/WLi4tX1y9ECoO1IxtKV8ayy8zCvESFNB5dd8BnpKgvKhaHlDOs6tyKiLoHi87cMETjs4ZDDUf26qYXLFJKIGRwt0f85cexf5tjwIQk2LBjfa+JWR6Jt2y9zrBlyN4y50iFmwHLEhBkXf5BKnb7RuGL0M/CuDROsIWTSwCDzbWlt7KoOshqC3A1w+bUxE2KvEYMb19kM/FLs9EabqL8yJiOBwic4xqwLktgBkjdu+p8WJmRYowsi8HSQBFLiFGrE/rle2Amglmgj3hzfffFZ1/+6O/+wZcffxqm/O4bb/zsz37R72EUCEQ5g2BggFg1d3QKvxR3fDRAedka1mKBB7ifPNf6hFJDIs0gd9YEo3/mdb3WhvoMPuK4rZ+YfV9iicMCPbu8zN18r2Y8Dd6QqrdsKVfOQR4cfvqftmwEnfRp4PIrQlwRsuMYFkbXGiaadxnLM4xsZtsAfztPG7nZZaNovadR3W2Bfq/tqcYTaGN6n14Lz3OiqriyAehEUm/1B2etMGJEpxCC61DqgeltN+bD9MeBRz+PTvfttV+ITbUyAfSg8ENt4eA3zl/Nez1vsZHLCMi8BnjFE6qRTkVAIaJ1uHTZU/VRaBnDrc3lr3FtKQUcXfgb7kMVzz0emNJ93zdY950fVzEAEGme0sijZtHcXt/c3Ny88ezZfhhub2+NBJhZ64f7E9CP2axri5bLIwi45sAJBEAjX7jrYkrzxcXFs2fPPvnkk3EcY8AQAt9PSvctfIvMD4BOJ3/2Hv+lcT/PV5tfH5jn2V/9MVG78AV05iFwVKPOma185TGn+b7hZhpYqLKsCs3H49FSNzGUdYWIIoE52SMayt513eFwud/vNXFRSY+ILi4unj59+uTJE3WmvXr1ahzHm5ub29tbDRbNriYK6MHEaOdUjLNG+UJhYhlAABqKKExMNZkQyfr41WSwLsZefXrqJDTXJSIyp3Gc7+7uTqfT8Xh8/vz58+fPp/moPp+cEgr0sSSVPXr0qO/7YdhbPwwlz4C562IIKEIGW3FVf2R9LsvScqwWwKySJNeCseyusOZ75g+0R4wAjQ9vEQw3YqRHJKpFiexzc740/B/XtTFVq0dE6/VnxB5CSLl24csgghrqwcxdH0VySoKIFLsQkUuQL6KA/geiZgfsqhUPasg0aD4LgqYgAgBzUU1DCIghdsXtV1ckGjOMtR9baYIyxF3Xxxg10zLnnNLUx+7i4kIrOYXS4CSJaEIra2M875v14p9AVtwG4MvLy4uLCyIC1wv6LEP2FBpqwqHfd2YWyCCLzVpvS3kGWYkfdigXBmLoCOfOhi2PM9EWnShQcE4W7LF587kiKx4F7aIaw8muM1UzJVhzXj373dzAkkxo8dGRRrNWFD8TogauUax/owgH6kzGYst02tQUcUtrZVO9vOPFc1hJ2U1jLeauV93gR6Plb+/cgsu4jyd1+xOc9O8HObsFNpO8zlS2m7c6j51PW/QFgGmaLIfQeA0iSn4oi99L23rNwBBIghxPpy+++KIPfRfi7auXbz591geCNFMWZJHMEAlAANq+drDBTA+x5iDfft6ScfONOMNKY6KTjW9wyw48lnrINyD1MIRz2NVssd8+e8SfUttp1H+XI8Tjv59Sg4Rn6Xo7vqdTI14dfkUmbneaV/tvGiixC61Zrc41L92SwJbQPMezmdj3W2QAR7neYtqkKYrTuGCDTv69zeCec26RpLm2M9xieHN/AxmbktcY7V2GD1g0nGXLVCZQG6JUu/VZe9nDV7PpdTku8czzpTUU6lRXvs26NFKLnMdVNZR421ZzbooIfAtet9dZWrgP/vc9YphwH9++Dwe2G/rwq7f3UC0931Egotvb26+//vrtt99+9OjRq1evuhjVQkqIgJRz5rVNZMEZKSmCzb+ZV1W+ywTWMqteGQBAOkQg+v7337m8vPjoo4+++eYbALi66nlmA55fhZePt8tsGEhDlfdR6ANwaxgXfBuvsPHZGeLBbSjiyrarIaNQlQdv+KDaEqBIqVVPUHUI3QVrlM7VHkQEOZeQyxhjyhkRh6Hb7Xaqs5Gvzl0RQ53qOefQR23boG0SVe5XT9HhcHj8+PHFxQUA3NZL9S6NFPUTFqcQmq2qFv8oKtMifgAjKlvDkjCGCCgq32voZtd1+/3+cNjtdrsY+/1+rymOIKjarL59mqbr6+uXL6+Px+PxeHz58uWrV69YSiraMAxd7IdhOBwO6lfs+77vd129ACDnfHNztzXQ6xFwlmOL8+kJaACmqKMMABCLZcGEZ9rwQ7/dPvGneUtDUJ6Ho5MbTeb3soHn2w0mi5SsXHtK9SKNPfbiKFfLu2Uhanwo1EbqemcIQV1qOmdOM6cEKQMLATJiIIoxancHZe8hBCEkgC72l4dDCEEQRYqHM8aoRwCFxe8nUjy0AZfbiChERCh1eqAKbKrziza6vLtlZpZkwe1ELrukBrsqDoQluz4DwMXF4erRRQiBpdhQHBvIelziORmeiFhqNVEREEEQ5EXwruixVBiGhYVmKC3dkABLyRpyoU11GWfShzzK+s/1hhVCl8ep5bMe4Rq8bFAZHH/0oq2HlOeGOpFGJJWCUoWV5LxULmqQHmpw46JMQtZQdb98IgpEyqTYWdMBgKTkxPs1et5qfMoQuk7Ps/ilqZSH8/YsWdbIS/GA1fcAsonCMmJutBG/d419wtO/3wUpprgztlL/pwUn0Nq1+8C1cBk+w6E2+7t8yCgYiFGE4auvn1+f7naH/e3d3cWjR4fL/avjKKQFbyQyIqOQLWRxVTEvtebZFWO01YnzuthTiK2qY8Tvgd8sxCCpX4a4FA9oLr9BsCEZ/0YRUfemeZAMBzxdb7fJ5mMOEMPDZte2m6jbjkudj/P3N9979HazOqNCnwWCfelnKI5f+U5jfr3srLP2iE7FnOHNJO0tizXH0Ygnc3EFirwkp2IQurh6EfGMyK9XqtxzFlC0BJOAYSm7JmOeZdnMG/G3+cavK9fSqbjWFowH+qluN8UzCj9P26YCFlimYT/5mW/fsgXUFnqexdlsEREQg7NjiggAqSqxIIygJZ+jc+aYJVScZRD+xlfDLvz3Hmh2J9c+41uc9w+65bQGmoYR+Tsb9vVdrpxz1wVmFJF5nE75+OH77//0xz9+/fXXP/jgAxEhQCwnmkgWwEX99ojq2S+4TRRYrQsA9Chv4F+Lb6NIZk5vvv763c3NJx99SCBAGu+wgNbT15aP2esaqG5h6+/fcsjmaniFYWCD5Gen4cffvohdwLbqHsxc/HVruQURTTHg6gy3EiZbitPBSyPrckNhbqF2AjgcDhcXe2vzoHwDUaC0y+KcZ93caUo5z/0Yb29vY3y83+8vLi4uLq40HQsAXrx4YeGX6nZTB6CfsBe6AEClLecGYUUP5qwSP7jgxq7ruj5adqsVuhyG4eJif3Fx0fc9YlAXH2i+3LyUYLm5uXn+/Pn19fXd3UmV1XEcY1f6DVxcXOx3h91udzgc9vu9boEWTbFNaSQx2xRZx/IYbjBzDL1tenUlAUBp52CKCiLGLuScNRDVY4v9KU4asfHNUeE5sFQBwE/SxrED5WFsX/hwLeJom6hJdH0fu67TAqF+5FLCQIQZCIsrT9NT9YBSxT6lSSSf7o7j8ZjnRIoGgl2IQ9cbPAuGdzGGbhiGXd+rVxCEIJBCr9gEIYQuajlZqGZurFXZ6k5xqid70RgDEtLNTYkc1pophCXQNMbYdbFouWnx1hpO1uBEDoEQ8eLiAlFSSkil8iQALKGXAACCJCASIiLqBAgReW75Rjmja3cZgEUXNfQLCABLkRfUxvSexRhHsJPPUASrTKNx4R4hKks6c/wQLqKDx1EvivllwHcwsXr8ljNn2GLctbkhUq0ryJ4k6oBslZr9YqFWoPbiF2wOAzCeK4Cu6IuXisDlWoA3TFaLpZvMsrQGniKiZC+1j0WhIkKTVzzwpeY0wvpAspH9LtvB3ADn7Im1ZQe4ERwNyTyPy3kVXQCO42iCh/GRYr/ElcdsO5nmeO4gCATmKfT49fX1x199dXF1OOXx4vFw+ebjL9//hLqQOQMLsmCulV4AQEBre6qFRQBzEkTNVeCssQoY9QMACiPXmHUixS4kJI0xRiAQqGauYrZjFtRiVYUoUGpdO6ne9UZk94e0l5YMqg1AtnvnHz/Lxz01QcVSZXbWpdchj+hYyzgrFRc2AQErhGnejmth12brV+pvtn/9r2cfWf6l1VOecv2D9n22fgPryZuRT6o5Vu4JrLB3eXv2llnZW/xtzVqamxt+YvuC62Pez8Ejgzi+9MCm6CN+wOa9DUU3EGhwL+e2YtN6qNU0POAb+DTb1/CijXrmAeYfkVDrCTMzAGEtWVlHWLKM7CkVc1XexVrG3QxwDTf7r3g1SHt2/LMA2SIwrBFse3+DKnAOqxEr+YPeqYc1d91OUgaUgISS33//fRH5/tvv/Gn8TxQAgSJz1hGIu9q7yP8LAFmEUBChijEimq6xkgcqb1wtpNZIYAEAybzrh7e/9+Y333z1+eefa4v2aZyFTM/P3knYgPcsJp8FSANbzyRtzPseaXhUQ/Vn37glUt1lq/erHNuUq4ajUr1MD+TaWkP3lF29NHYpzfY602cQxcqcqE7Ydd0wdMdjqivKOlmWNOekPEc1or6Ph8tH6ka7urra7UrI6KtXr9QraOlVqmHqMq0UjbfIeO4EtT+tGfqLPycEjJ1pg+r/0QBOpxCSzqeGCwIAaOOBlNI4jre3t8e70+3trboE1WOZEgPA4XDY7bu+7y8uLi4vL/e7g71FtQ6iCACqcmtrCpFc4yh9IO5i1EOXFVW3mAAgs9aGzRrl2vc9Eaj2klICXPFD2y+uHrktoip62OnTIO19DB+qwdTP1jNM/5Z63KxOZwXsNE19H81na1IHZCBtKELMrAF0GjeaqbTc6Lqu02DYeZ5Pd3fT6ZTniWJHRISlvg5oeY4Y+mG/uzh0sQ8h9Luhp6KnaYV20GrSgn3fCyMixmJJqeGK1RNQ589YCaSGgKbpNGpgc0pJqS92pBGkACrvIWT2LoSqTIYYtBotIOKcRvUtHyX5ujuIqMUJCUBTFhELLVhti+YoX9BGQKuRFF+XO7MCaqnzKskwg0j0afT+WFVqNzyww5Uo5LwqBg12oqwdx/48tld4rNqyOVtJg2ENqjUDwpp16jT1nJC1CKhGIz+rOuCiJYJLrWERLeOrGGb6fYZVouAysUVIbk9fHbMhNmYmWYnUfnUe+PYKm4k4eZRqLLgfR0QaFd0I2xgNuYZpzRlmoNCb2UWBr8aBeyMAbS3k4g1yLsM2Oz7Ps2YJ67UYzGSZm//gZXGPaZBDynNmjl3/aj59fP3Nb7z53pRietLR68PxU9j1ILPWckYQ4I3oby/yyGBx2FpRWn/Nm/AnTyni7BE2Z3H+JX8bVVfGnFZ9Ppvxz8L5LOTNE2VDYa3svCU6nZvaRGySK8S+Rx797pcnWI9p251Fp1b67bAP2/lsKbr59ezrDCz+5pQZ1W7qhF8AEIDMnLRQgYjqDSlnIkLQxtd1fKnFex2ZiwHZ8T3lsVxb7qLTGD0W+S9NaLMS4Vg98PqnFZvZooqs/Wb6p226vUunZBn/9l4PWHTGIz8r2xqP/x7P70Nge+S7YLgf3z94lgxXiAGZCsxVCGumtAThewal5M/OYb5q3/I3u2x6tjR7qT8F7luXhwnczyK2xOu3w1ONPb7Zi1LHu5nq0Pe7rp9g7GO33w9pGp4/f366u/3e997Y7/cigqHwT9SilM5l78cXZ2fxq6ZVe2H9n7bAqryXV/PJPL/52uu/9ePfePni+usvv0iZh2GggMLAyFUV/HbH6RZuDT02I/z1+KR/neHed9zB5sJav9E6ldWnVizFPIeIWAu0LAdQWQu0OqGxC7MVznOOXanjoiGgzJzzCWt6MCJqp8HEmZn3h12M8dmzp2+99dbjx1fDMATEaZpe3dwws1ZfVBLTEFN0IodlnamHr2FERdON5YCLtSxkaR7YH8i1mO+6ogoqrPROzRbTJXRdp5qbeinv7u5evHjx6uV1zvnly5fH47GK8tj3w+FwuLg81IjTQxd7dEdtCEEFbouQ8vk1oVYNFRGsmGn8PNa+5KfjhIhIAjVDkgh0aYiiu5nybLpfAC9rLZeB0Vv/t+jt7VyG6muuwohCpe8UV1JiKJKUiDAAi2QRVkeLNaUkF500juN+P5iVAdYFBRU4OQuzaFOW6stVfR5TgpTSNJ2m06iYE7S/HdVG9pkRse92FxcXh8srhX+IceiKum7hlxlEBLuuK7F9LAyspkN/fBQoESJCCEGdxtM0HY+3t9c3d3d3IYT9fn97e4uIw66rS8gpzXmaVVHUS+EQKUQKh0M/TdPpdFTZbLfrd7vdeOq9WC4iAgxC4nQQAAAICracs5oe/ClgNE7UGfGGWhQAODMUbdzsZCqftOVuFQ/UAYVOblDQTNNk6ZjoKs8SEeDK6WcMzgjA/5pd9TxZhSYnz3arBYXdDaWJZAFu8SCJ7ZmiVIwd4vKNP2i51nkPS3N5MS5m2xBCoBBzElhn+6hip+57OxvqbENEMplbqly+Ld2poNZYZwvr9mDJOUvFBv1S+V5tCbrOx5OlpqVnlywSu8jOzWuSTSitV5L/1R915IKjwroJkleEmJeeS/a4KRXLDOsud11PJUw8a+0yZ25feJO9LmwKY5hpJLvqyYaKmJlYhsPh+ctvugt4zuNrXTpe4QfHr17/7e8/uXn+4vMbYlHTKiHe5dwcwFSdY7rXxqSg9nIx8Jogbn2rPZTKNq3FBS/3i7uMQPwx7HHGgOynWgmeTMm0t6jhwGOU0Q5uBOiWkB0ovFaAiFqPcV2q0csNQNXNi2u5ytbr/7QJgNMuQghEKx3bL9n+bMb3t6HTpQVbRmQ0ZUq4px0lVWuJC2vuZLSgESx931voOLkLAFSmsVNWJ2M8sNlEHZOcwc8clV5A9HvheNfWIrtSev2R5qOCDVV43QTSOCTWaBM/oOIbrOMdtPag3yCvB2L1UYisUFpEWJbYyxr4sIS12Axhc9ms2CV4uwmwsS90FwBOU/KgNkQSQS2A0ZASYNnrristtrTU/n6/v76+HoZBJYP9fl/Y/j3SPLvMDfAKz9qE53/1G+cJ1kPAkLPBNHYhxLYjBkwb2cyIumrcRCjUi3POwzCUEyGACoJdF68Oh4uLC8RHXaB5HCNBQPjyyy+fPXv2xhvPbm5uYuwZZBoLk7f9kHq+K7KpAUKt/nY6hxBmAQCY53meRyLS206n074fWJYsuIVwRn7z2euvPX7y5//5z06n05MnT6Yp9X2f5kwiXFE3SznN87qoxrLgdYyPQQwdz/QPKnp4IIsTnMQdrP5fA4W9t1HD7IMmKeW8MnOLCHOxEYuI9jNgZi16VO90Bu51zIJUd40edtojTtX4cRyJ6HA4CGQitKNQN1H1opzLKXx3d7ff7x89evTVV19BKchJd8cbzb47HA7vvffek8eP+r7vugAA4zjf3t5af3lrJmFTgtIGoKSTqeama9ejVqHhebIt/PLyUil0GIbLy8vY70xgqLpcUc/UvYYugQgR53l+9eqVdo94/vz5N998c3d3p5qhNtlVmiouwf1+tx98zqHqcnr+IhZTIVXvZd/343hEjLob6lmpOLCqGGl0HTvSxhun0wkAdruiz/R9cQzYCKVwZa19ZZulmGyUjpvQMzxnGG2EAamGbC21aoepaXHgZBv7kHOe58y1MKxoV6QQ5pRCjIlz7GLsu+PxiIgUw5RqM3ApSB5LO5McO+r7br/fU4A0zSlN03R68eIFcwoBrWMEAFYFiaZpotgbeXbdEPuurwcd1NREQhSilFIXBwWUZA6hVHPlKh8qa5rTKKk4Y7S8UM5znpMVv9XcUavECwABYOZZRNTHCwABSdP5LCyx73uWREjzPO92/UsAkXKE5ZwRCzpJymvHctkIRX4RwU08pp3LBe0LyQBikf/jkmaMqDVJPWmhE6/BnV72b4yxcd3p/HQCHqvqgbqwzuZXz3A92nmGZedcdumwa8kbAUAyMAO7/h1V4FtUU1wLEB5qdjJ5RomIspomGpbnjUHX/kxO48LWDtT666D692QtQNs8YX0qIGIIZIf9Aj1ZWRadjgfYR8Hq69DgEFANsthKAVC4fA8g1tEF1xlW7bYuG3q+C4J94yUh4y91Vxd8ax60y3gEEmrGEdaGHAjEOQMCK/8AYRAaEwKnxAmh24U7Sl+mmzs8hj5e/sbrF589+eiLF4+G0NNwfHXdcQfDAmRbLDt36JZL2m3uVL63MoEudXGeIxaRsTHwA2RmOafwNC/y1g1Yo4fHoi1G6WVGyuaeszd7yNRVs4gwtGGEy+oevPxGb9+4/eaBm/8al+c2HlDgdKQQFs+MP1D9B1tFw77AkW1jAzISaCZg6pBqHbBGOZtDMzg4Y1ZDfWYIsKuZKp4TTP0HUzCae8RZrNAJ9FjPy2aBFZ7BQ8wm08yw4TPNzR7Z7sPw+xC+uc7imNTM8wYrEALgmbYfdiqYTbChym996WYCLViabT17g/+TnFWu+RIrt7VhuTqEv/uEpcZLsyRgskGGoRPJAQMgh4AEeHd3M02nw364OOxSmlTyWNC4Fv2TtUIIAESh6+JuN/jdH2rvH573SNB1nYhM+93d3d2uG7rOqjIo9ofD1f7y8nA63Z1Op9dfezLOmQg0nWee0pSTRgnBmgy3yGPEsoVMg+Swdt76vSNn/ju7a37M+/YCHA40NAsAgCuHANbcJNVbuq6zlirgJHiptRNFJOdVpZCccwydFcYkoizCnLQdIFYZfZqmCAIwGC7pgKdphGkadp3OM8bY97urq6vMPI7j8ZisXijzknZoofhQmZ4V7TAw1gDPlZVZlcOu6w6HHSJqsVBFiRjjbrdjINMDQwgx9qZ0BVd0R8V9DRB9+fLl7e3tOM76wTxsFneqHSm0sOR+6Idh0Ko8ALhUH1FXTAYA0AIneZrzNIeaMV71KF/6YSkZYjtie6pq9n4/6OsWTMBC0eV+KoPn0uaxTJ7XroKzOOY/N9Own4hWhGNY16CQfjZlFWt3E6huFa9GeuMmESEEAZ0Ahqq/HXZ7fVAgq0no9vb27njbSfV6geaLlrzKLKUxYLjtGHCaUt/Pse92MRSlHeNifMmcQQgzLRZwDQfl4/FOoafxw+N05Lk4UTS8GVG6UDIPa9JgZ4md5FSIAs/MQtR1ph1ICIQYMoOIOeFWpeYVWQBAatpC/X5xkFR6SVYjzRC7YTUAjJrGhFhr0goKECBY2wl08ULmQ2gOOan2e6yFrYyzFPvNWt+rIy/za37y37u3r7iqOFnKz6SZmBRZWWMAF3tzCMF0Qs9H1jNcxvY8V0TrIxWCtFnlnIVXOYf2k2mkNqJxH3CHh98ezpnXRfYbYpON1C6uZEVd1eIz9POnc/F1enndBtbnnI1sTMHLiGvcanVF+0zncpDQxaxXtciru2eOYXbuC3sLwGq9XPspiUgE5BAyp9jj/smjvIvP0+0UR6L52cXurZ++/eHHH918OpLGFggAtKFfBXTOa2Qz94piozR6Yd3Pn1zoVwM9P5Qx0O2dfpu8QrhCgLWsvx3Bj2Obwuu4oGaLt/QCACIFzxdnVxPwjAtiALSI3Vy4kUq3SLV9dovMD1weOH60Bnls2LWxqWUysnZ1NgPqn7pBasO2Yw9MJq7Psivs5F0K9iKoB/N27eLOmGae5MJy/Dh+1ffBStzlN+IswHUob89GdzXzr6BwUe5lnqvQJnvVfS+FNUbdN7EHEMTvdf1A2xtKHn/Nfmk4EtfEcq8Qco00/i6XjcmNeXUzQ1iTkp+J/8bQ0sMHncHetsmYeUPddaUBoGFlS7AoyxL8wpyZUTjFPnZdQM7d0BF2Kc9ff/XlkydPhqHfDX3KyvCRNRBRpLQSFSGkgIFIUJhc8gIFd+4AESIFSkLAWXISERR+dHmBiEhCAuKqdoUArz19Mo/T3c31G2+88eLFi9jvAGBKWfg4z3NWgAgyiLm7m8MF7mehDfwbevGY/zBblnOCzX0o3SDGepyFFQdXvC0vfQXR6oIGl/1l0+Dabl7/nOc5UDQfo391zrNIsYlP06RmQc28Uvv7brcb51OWBNgj4jiOiBLj4XQ6zfMkIpxzShNUuYhZQliYkkKS18Uw0alDwzD01aeHRfUlbQ5hEnlYqmoJEQl2KqDWpD4yctA+KLYcbXHx/PnzV69elabzdyeDjFaOOewvLi4u1CWo/tiL/c4F3ZQwVFryj9AgrJVUYh9CJEetSS0mirqI0TZFRECIpVTWiTF2XVBFnYhEzLC7kLwBzbQv68coG+3OnwgNN8AagbI1QIvca4+Ghc8vXS7Ud2c/QTVl2m26NblWcwwBtcMkMyOGjsrni/0h50wBcsZpOt3c3NzcXo/jKNV9rUaGWiQGiGhmSePIACml0zDp1hz6LsbYDX0MHsOJmTGcnEJYNNjT6ViRP+ecM89aKdQmD1BMD9qK8HA4hBA0n9G0QX0HM+faqRKxq7iNfd9TgJyJOXVdMG+8ARZxsVdqqSYAYGFZejJ7R9qKaSTO5NKwNR4RWDpVSREBRPLSPr0tRWg6HjuPk6Gap1tw4rId88YNlw9O7vEM1Bss/WUKgLf8wbkClZ4/OqCDVgVwc9ZdX7x/tiJem/z9tO0VJqB4OPg/V8QgEuIqKszosDlyVhxwLeLYKQ6bU0pENB1pS4cW0ytVQAEAEkiuh6l/ygsiRkKIxVnf3O+RwX84e0Rtv/EQztkUwlXhCiItGoy4xrroyjF79awRfN2LJAScEULXXT1+REM85VPucwjwxc0Xb/7ojd/5+7/9J//zf765OT66uIQUMK9OI5strYMlbA7NzR6lYXO6bxHGgGNYbUu7T6X0D26HshHQSa5n98XTnd8+XMuFHgjNOWEPEraywtl5yoa6/avt7X7O9XVtiJ0fHNY73izhgctjl8feNUhXfVbtZtsd2eiNfkVnIekfpPVtZsvwcPM6f4OTZ5fZaPVbKIFj0bAGaRMNfnYcf9mJ63VX5pZym83dwqo+XsLDTHBp1LOz68W17AIrfeYMzjw44JlyqSDakGb1lMcWPaRijKfTCa0snk7svqpK52ayXUgzzy0XfXjMh+/B9bXlD/VPAliwRa8QAlIpZU5UMgVUML26ujgejznPUQJFihQ//fTjm5tXNzevmBMIUAxDKI0NTtNY0Vg7ASCUmA+cphI6qCNXIBBiDEQYiIkjIgB2oVTpyPMMRJGCAOSccs7UxWdPX3vx8vlXX301jydEDCjHce66IWi2B1eFVpbUie9+NQRul4e8CVENX/KfG6b38Lv8PbhWL7lWE4gxlo7nAKboighzcdRwLadnnERdHFa+xZKANALciBQRTeMax3GeS+JJrrVeACClbhiG3W53e4waby8A0zSZ3KUN66EqGwp2dEHp/sjQyahaS0SmfQ3DoGl/1QkDpuOJiGaX6YSX0qlFNqcQAmEJ3638NuUsGrB6e3t7fX19PB41RrRm/bA6fBDx4uJCY1APh8PQ79QtaRp4xYEi75lfThPVwJ3v1VVJvhyooo9HHhFBCIhglrJYm2QAgKbVAYDU6CgREckAbFtfd5+bw8UjmMF/vYozR6T96Y+n5icb1k8g5wyu0xu7y6f8mNgfqdxJRESxD2XLapCdMKfT6XR7d6MNDDPnoPn8wkgY+y50kUFiiEmy+qVFJMzFtzb1XfEQhs7NWc0EgZcz15JoViGCRBSoYLJOKaUpUtBmKoh4dXVFRAFFan4WIqgHj1N2JMMFhxcEzoih67pYNLWCUT5rA0FSPgGAxkUCigDkbAUgFrezbUohtLogbWsJLBXgoJ9FJIZA3kNoiAtrBcBLIYjIXOIlPX7kWvGy4WJnWZvna3aO2uucZWuRmcSFQkkVyExM9wpkzpkZBRZXdX0EmbPxnWZiNg6sOT6zEEajE7uZqk3Ov0LfAhvdVT9v04cW1rxpXVj+XSvYlW0sioTbMh+MLgZYEGAXNCsu/lU2Omr5t4p52/3C9bmF62u76c0WGLLW8c+/qNkd88Qaa9MxvQXFf0iSgWHmBL2G7eV5mjim7mJ3c/fq7bfe+Mnv/8bLz55//Gef3l6fdh310tv4Bp/tWQ5VCzXQ+dXhRhhtYIsbyUCjZcRJEmffeHaP/Ib6P7cj2JdnzwM/T7ifSP03lde7L8VvAaiI/8Dl37jFKD8TkdbQcHaSf72rAaB94+zHy75AzbVr5ry15BlybkMoy4qcSxDWpO1ncpbW/DiyPpiNqM+FiACsFT/Y7AI6VtPQ11n4NMhs8/cU2nAGIvKHa7VbA6wNPfo0rGmque77/uGf7Aa3HMvgcyH6sFT98ZDxQNBv+r4fxxGcQwZdcPgDeL6dzPZLD7rm8YbqwdGpmYc8vTcIZgK0H8qNuZ0JAIBW/AIAow8i6rputxuGoT/e3lxdXRDgnMarRxdzGl+8nCnAYdiNp5mZE5c0+P1ugBqCBaJNm0X9VznN03hSdcIcVl0vKBlCABGUzMgBiRBzmgNCvxtK3OOcCKHv4m//9k/eeuvNLz//4vbmOuc8dFEAIpGGY4UQ9EAUFJC23YtBssGiBw4pz1o95P2Hs8/S2vN/34vuu/zWY5WYr66uLi8vlSktjRMYoCIG14odasKw0p3qolHEMH3GtEedk3MbzDmLF3JEJPMMMKiMq1JvSglIulBa8Jn6hIhU+0JnV6vJ46fqn6YQqlNOYzW7JfgzaiKWPdv3Je+LiNSTxpKBOnDVR0UkJ2BmjVydpul4HE0bnOeS3Cg1Za7v+93ugIivvfZa3/eqmsYYYyjLaWxhUnvBF/4PS3kYrEGqtISwMuciP2sMnzF2HZLqhYhaDqeut0TACSxmSr00HbTBT0Ot5rwwQsbNQT9NE6xVynpbPovYDVOy4yCludYNRiMQrmmi/pQsVM9LoyOi2MVimODMApmzaFjv3d0dM8dIIiKEGQQD9N2w2+1CjCKQVJBFEOZpTDHDRCiCeTwVkEKAhZXFruvU+81VX8Cihiz8nIiQRF3atpVUD3Qt8Lvre0TUpGtvRM6uWgczzzMfj0cAAOxiR4B9SrMFNot4+lJDA4gISXPCqos7CxcTG7gzVKGtNTgQWERXS6oQKqJBedAggRHW+dOmt9Am9Ki+A+1cN1dvVT/aFmeIaEUdPCLqZ5/O5CnKqNGwfGvnaLDZbrZJ+hdBkTkKNjYMSIq1srI/f0ZKqXota92VXI4lOs0WKj8Cx/dx3fmqpSVZCmv7n0jjGTeX68dCi12HVzHHfhzZnG1e5PUg5drmwXgHr2MM1rNuA1lxfbiKs2KiEzf15Q1jKoI1tk5pWGvXdrMJ4lsdPgcUYA1P4jxjZpxnmUaeuA9ye/3Nk8Ozv/WHvy9j+MV//HDMt6/LAKCZkCgCiNqUhpj1s04bxXWFUU5tOFgNYOfz8dyXqy1WjFZc1YVWyl8K9jQg9WOehfnDl6wvD2S/WbJRd2GNmWXNToJv3tLcvx3BlmBvtC9hjcANPogwVXn911q7h8DZ3WnmiefUm7Oj+T/tNtMQYMPZrKCoye7b22CN0p58ZK0Hbsf3Wiver1PBeru3P90HJVu1OFeDfWlHoDEZm6lyYL8F6GxeUE9cR1a/3vXXQwlwcAAAhIDn1F3YqM0qc9/d3WVXYessq/xrI2rzrJ/A2fk3/GdL456uDT+bxVZbDwKsQqkRl3H0IoK+jz/+yW8+efLkt3/yWz/96Y+F+frli91u9/jx4/Hu+PTR1e7icHNzN03TadR8G1QfoNURgZrvBABaOWOaJpUU1X+loaFQhEtgySwZBGIVyHLKp9OdiDx+/Pjp06dvvfnmp59++rOf/ezLL79kyTF0WWC3252mSW3hOZcgPT1JPJ2e3b7tLtz3E8AGvb+bjneW3Z3bmpYtgMmpiDHGq6srdVAY09BThhZv4VKBSXfBjlGrhKfDZldzy/yB+pTiJiLqp1BLJWeeiUhb8KlCqLl2XYghhBjDEs6HS/PVu9OtZ2vMXNowFXMNhRjV7aP/XewPFVZCrkiMegt1LUhAAZEAMoSgUFqipjPPKbGWijkej7e3x+vr65ubG98KHGulmf1+f3l5QRTVRbl0d8TVRngmzDXkFZz1xA0LiGZbIVx4tcZGmimfKKxMk7TEZWSAwMwCi3imehozCy/lOu3CRqx1lynksDbM+fQ/dFYSCxpf0BUZQLQftYAAMiIggnAWWIWneTzk2lnEMKHYa2Rpqkm0pPlxrTh1fX19ffNqHMfCoIiyJAYkDSIASSklAUgAQCKYM1cdj5gZoukGgYgoaiwxplSy76qTsKSMpjRXdqeqPAgu9ZlijCJMVApzOkPbAnyApamjkkBKSSRrUmK/E6SuBnWXHveq6DZbk3MmlxpW9XPVlq3s5SqBEBGnNEIV0RQRVR0W114rOPN0tLLjsPY72Ubakc81lkMnZ0VTFl4jK2yr6LJgnrEwQ9CFMFydQ+Ok/lw8i9AKGI9kZc4YBVbpWG60M4oKuP5jHgJQu1T5q4xS5SGomq3+mXKyBTbA9CtalnDOeFPmtl6vp0zbjvLN0lNoZTskAXSxK/5Ffv7oziHDQnGip7/BsIXXQcXNZaKhB4KZu7Zb1rzFruwQ1yjBCM8PVXhWH1kSZmTm+TTCNIWY53HMp7y77I93rwYaHr/+9Ie//aOb2/z5+5/lMRsSNkAgF0cBayOrgY7OOY6++2V70ezO2dEa/PGPiFMbvssgsNY0/FN2+cPA3aAvc4+s33UWmWFDts2LxMk3W0Lwazd3unw3BcBP3g/l1+jv5E2fUr3HGJ2nQf9ns0ajx2bhYgWrzu3RlmN86wJtm/z5sb2MHmEDWFgD2ZPDdjINyjVv8URU71kN4tmC/n8D6vtWvEUMW/t2ix9GjPrrKli0Pn7Gb+PXa4Ca51lL1VvlYd2Cpo3ud2QLW/J5gDD991vOvMWos2/Bjf7TfIlLZrvoz4gILvorhKh1R+fT+Jvv/fC1p0+Px9vHVz84nU5doNfeeWu32z19+vRuPGlmSsqLYKQqH9aINWY+nU6qJVobOv18Ok1cfVacfIzipFJUmiYtj/ns2bM33njj7u7ui08+ub6+jjG+9ui1lDkLzPPcDQMz3N4dITMTEhGwZFlS8Q0IW9xugHz2s+cDnorvQ4Cz/PAB+t3yVUM5fSSEoEGbdk7V86ucVlJPSc1nE6sSVN1l4ziWRLvaqxNVq3LiDZaY0hkR9RQwgV73qO97bdvQ9eHZo6ePHz9O0wwA0zRqmGVBnhrtKchSylHOtrkAoJMxxrsFCzqzuFZP1CVbhp4iEoZSIwQAmGGeZ80PPB2n4/F4c3NzPB5Pp+l0OqkOrM5J9UB2XXdxcXE4HGpI4SJsAAAIiojWjalbj8bhgwsigw0DsSRJRDQW7pGHcKk9IyJaMF5ZjXctn8EHYW9wUSIKm+LShhLguGhjavSCFm+SAhrstWV6ZK5hUKseaVKTKmMmZlbXsZXxt7kVMNS6OIqlNzc3L168uL29FREizDljh5xZBIAogxynEaFYfCh0qgSqs1C50OJQwTJbJkbkaZrUd46Ic0pcHlkkgS1p19zOohNpCCsOAyLWGGGFp0hRnaIRZkqTUVMuVX9S38euj2p0MAdjhQkTEQmEqGhjaqEGlJZG2fUCqDVUyyB1p7SLHlYLiTt2hZkhc9TK6bo2XheZsMPbb2cIS98q278Cr0ZVU4SWxbPnoenx0hk/VlI4n7PQmwFMn1W3ZF63WEGX+9cgsSch/6WdT3qVNyKneSXBFPKQgi4NK/e0AQubWBpb2TcGN6IAeEb4Y2bYmIexdrCw19WXloOBnMdMOZSBFxxHUCuFyXx+5vM4ebCcHXa7m7BhN/42cj5JA4hISWywKXkPoWwSbJz1ZZX8aYhaXkfCSTJIzjydxjzNNDDLiAmQA3aBJd2Nd997582Il//Lq9v8vIQSoWPcDf/a8oIGf85uul9Ig37NDR50W4zdPtigSnPb2cebXz3+3HezrctjvkqHvu0EeBfWPXPDjfR59qdm97eTf3iq33oZQjavwHXurudI4BRCf5uOQJswCntXdE0dPf7ErttuN1aLrJ+eGXdxXY6cajVqcGfnA3BroNcM6NON/Py9QugJf8NhVry9oQJZ5zZrOpM+VUJGWWyldSNarmKfH6C+7375+bcf5EyBpQYsdmXXlsnuYWYNvvmOkxEfeOC+vO+l7bNrBIYNb/Q3NOM33ziiWL6EDcztKEEUTeUignE8IuLPf/7zL7/8/Kc//smr65ck0HXdy5cvnz9/enO8C6FDxCnNUG3kWCuFqOMFAIZd13EYdh268o8iAhJFBNi732sHpnFUiVP1Aa0w+dlnX7z11lt/2PcvXt1cXV29ePlqHOfPv/zi+ctrVQhzzkIBEVmQXc+hBuBnlw+umq5BkmtumMlLjQV2uwXN6x64bbvX/jZETOswKwunVNILIWggHFQngwrW2hACKuYU1Tol7awdaNWblGvGrMZIq+ZGFLcKoYhQnUA/XL799ttPnz69vr6WzC9fvtjtdsMwqDLAuWikBjepQZU6q5ubGzuXg7s0wiLGGAKZy1HHUch3fUk+zLXMplt41uqUp9OJM2gVmXmetc4qOBanqrUWC4kx5iw2gYUVA4sgW9PFnJkXOio9WnBVAc4AzjwzlxMEwbPfctYQhu1Tle0Xgx27igB2LnAtOqq7qaq+0k7DWHjjObRri71bptQID5U2V7HoioSaqOlVBqmBteDk+e3gNg09IOZ5vrsrWxYCql8uhA5AqIbmppQQJEbKwhGtOuYSFpfm2pGopq8DIYAGJy8ZrWqDUsXe8AdUwgGE2maGiEIoVYVUCR+6zibu10JEFhWs4CeiWO2JOc85574vxVdNB/ZsnAgjEqA2aFlZP4mIOhUDjC8VbdCK26l2VppWlJi4JeFdtcGZORaloXYjzKrLIpqRQ6dV2llkhrw0cTJPkU6OXXUywFJen6g4kxUTANDCJ9Uj7GUvG1kEiGLXBYPOMHQVOZRsSD3CADMLCzASU8V5Lg0wsiE9IogwIoQQPT1U8l7lbho2Iy0REbbBhijalrrruth1xVEZSBJkNdopq1Uzm9XVxVZDm9iiCwqjYUmS2WL6F7pgQQAEjYEARFT0VHADqNav/tuaLh+jpJTy4ugrVYoygJAwZhGrIwwAIhxqoEJlDSBSusnbJHUTobjCM0LpPsdcSkUhEkjWdAEAEMlSM806jLoRgEEAOu2hxAyFMxa7o/bGFYFAK2bUsC2fGaVQHW6GADNgSj28PL28wzepoynEGSRM0+P9/vl0fXUgCbfw5vjs9/j2ExLOEhBY5jHthgEhSs4UOwBgAKIgyAySJTFARPSzgCpFKZZvj3MRIQomxNdpa+6BYGHf2ULVTfGYpknT6JlZzaVWsBud6qKRrsqsKkdAZTcNuBSf8zq3TTaCr7fOiDIeZhAJRIiYS1wxMtf+XZgXTi5AdMYz1pwx9kavcXn4IPpkfWUntXFcqgYaEqimLxDR/GgPeCNzJboYsB4PJUdfiUohVttdAlKAxS57xsPpWQEzC5ecZwANkqmEDAIgVDbEsGJlWqjMSkAkFdGWuKqRRBS77oGiXIZ+5II4tkWYDPi4tr7pURQC5Zy4NndVU1FKkx4k81z6WcWOYkfMzFm3TOuPi0jSWagduorCi3inUwLnFCIKmVnbHglJzoyBInYAwAgAwDlrYmqIAWtbS4ZM4OHAZb/WNjgANMtkxaXFsyGMGDQ4XNTmCrV4AHMCXKLTqZxCGYEtaskwWgRSmhCl64KIlvGYECVGYq6oVxC3PELo2axxbRRam1GKgojkJCS/ihomtvBAr7r7qyKwIkmrdSgA9ZRe+UVlLmneAIgBETHU+iIMRIHzHImyEE/5EHd9H0Xk9nSbpvEP//C/+dF775IA57mjcDqd0jRLLeCeUjqdThiyiJxOp3nOItN8uj3e5hjjy7u7riutCKmWPAGAQIN6D0o76SoAWaJUqI0HstDdfAM4v/P9N0IIb7/zekrpb/3+T/p+9z/+j/+X+RSmQ/dFHncxHqeJYo8CgFSOm7XlEVwbGH9Y67/m1cy1knmMxXtjdGdo2ZjObajtfhUkg+WYg6pbEtbih6auQ+F+sdtnBqR4cbjadf10uusiXe6f3N0dBSJI38XY7XZENE3T8Y67EINATwEAcpJjOkbBXegxYk7CzDBRyply6Hb9brc7yYkQmFkQMFDiPKUZAyWeYowpjcdj2VxlFIfDAREvL3avP3v26NGlpOkwdNM0PX3yqKMAyDlLHwJRp0/hmIGCiGQKTk2V4fGj0+kknIdAHQLmJJBnSdjviCjneZp8kwkIgVQmZG04UD0EQAcRmWcYx5RSniYZT3J3mzQ4eRolZxQBwi7EEGMEtPItXQhRs0X6vlNWNo5jTV9URjTlaaxMr+y1th44Ho993wsVdVpNXn2/yynv9/vj7bVkjojzdNoPu8PhkHOeWI7jOKUEXdcPe4pBBChyloQhkGZ35QRCgjLOoxNfkRlzSinleZpg6elamnlkEXDGfUTMwiKMgLuuUx1E7MgjBARmCSAqtxNhNSsICi2sTApnQ0KuoYlFcmOJ1MV9lyUp34yuMWOMEViAkZNwygjQBRKRgCDUq4ieUmaekSWlaZ5n4XRz8+rFi+cpjxcXQ0ppmo/9EI6n292wT8LzPF9cHjqgeRLsBmJmQY2xDCgAeZ6vRaQf9ibEAghjRgFkDAFDIERhTgQ8dDGEQCDzeIwx9hGYWWBGQUEE4JwnROz7oesEaBZkFsgMx1m6rgsUMmQGwIBIJABTzsMwdEMEgMgdwEHbrmAcEmcUJKS70xwC7g9Xx9N8cXmA8SSQKTAigsyakMrZWhArq0lSWVgIQZVkZl50apAQiBG1fSIFIkBmnsZJMTlW6VS0TOtWQMw5Z2fZbTgXs8Y5Lzbd5l/P7LDGPcIyQbRxVIv1ORhq2vE322ln7Ljhzvan8dDtZfIQrFm/3dCIqgtAkP2Qsrm4ZmYbsTV9wMQsx+svq2y6BEx74NjC69GyLJNCG9TkTw7/pc6N1vFv9q+9sdkvs3xvj0MPpQYrtq/wIXMtVPWkxBa1yrMCgu1GGwDtFWv4LBOgiJECgZqLZJ5TSMQI6ThG7vjAu90eGF89v4Us737/h78cvhyPJ1Gtt8hYzm1VA2NwMVatDmw/zwaYzWXQFucSgTXtgAtZ9CRgdlN/1W/OFPz8LjPZfmk/mYUbEbOL5fDXghjrX/56brwtbT5Ay/VD+83Zy5DZFkjOreem/ZA1FABUPPXT0Ctz3j6+RVf/puan5kX3EVfz2SZpNxte2Qjbwbd463msnpQhBM2J98MqcTAzbtq0wMaRaODx0DCUtmkbLSyMRYs03D/49jpLdw1MYH2C2Eg2sbMjP3yN45hz7rpOO5XhEgx8b9XKsy+yyftfH0Dph1HXP14HOTNs87oGP4uyDSDaHNk4EpzJrpymScv9S077/f7q6qoPMaepo5DzLM7IrRYBiqWFV84l4pFLufkQQsi51K68u7srQYPQWTBhE1hodvRxHI/Ho36Ou/3XX99Mk0aTzrt+98abu2Ho1AbPzABLuUvYSAWeLjzTsFWTa8Vk+OPPd0Oz86fb/btmmyRrUeShm9fVg41U1dFH1ftkispuh2mezXCjooSCXUQ468EaHR8AK3GBiJpEZ6Y0Zp6maRxH9Y1IVZW1GufFxUXf98aRRKSjoBZJ+0YpCKr3svqRBBHVHhpj3O/3anSQGt1qPkl2bQy0tR0AiBAApDSrOjSNR53n8Xgcx1E/aCd0dmUIoBLFbjdYvKitN9ROElAqgswGbapVqVUhrGsp3qdANTGy9mnc7wadWN93XUC1N51OdwAwpxkRDhc7Cl0CkMwYKKcFzQwnDccMmGae8MhpZ4Q4btyg0zRN/uj3eO7vpKV82upkMTyndXSlrTpLhioNMnPOyQxbOWtDlKUxac45pYL8St2S8jSd5nmexuPpdJrneZomdApInSSiC58uJFkBBoQoECgAgMnnWCUrJROPBiEEVVyZWQM4HV0qreEwDHpn9RMWO74aNSqJFSeT3tB1XQgEACGgJi4i4jgdmRkRILNAFmHNXC1AXDOS9WF3Jubf9h2q8TTlGRGrm4cQkVPWdRkueVk0GhDNiaEan3nM0HMoXdtily+ifzGBbI40EbFWTjbIA6cdrDmybRucY6YeKZvvwdHP2cH9NyLSnJr+JJD1nXbZl16hhTVhrODg7vGbZ4i4vAIEaq62fkG0DHLfWe59COhTntYvbWYimy3bHoH+ZsdeWv7SzKqBgxvrvDIffF3Z1am8Wqbfo8YqoeA6pVk6yZAzA2dIiSkREHXdkKf86utbuAjD4wMmnE58OVzth+u7m1tvd9C8bACwSG0A1HIYFgjgwegX4sj1XkHWY28DH3QxtLAmAa5+eK+uyybEy16CG53KI/P2ak4FmwMtnXnO3A+w+EC293zHyyObx9WHb3iQi7SDn8Wf5s5la+9hLGfnc2YEADiXMi332Ib8Xt/3Fhvt7Bs9NnqWchb3mgnYg57pGSurXujiykZEnyi+5RiykYNlfd4gkpX1qjxwVdjg7PLRyUAGNI+o25mQ61rEXi3hjLiKD99C47tc6q7f7/cvXrxIKamN/4H7GwPQt14PTMzzjfvQZjm/pP3eU8TDk2kODjlHdDnLMIQ+xvFY9LoYCSRwTiEEBgaQEJS3c9cF9YDFGEMQlbp0HBWXiSiEiIjjODLzbre7fnUrTsywiZmyYTGBqk4wdfM8A8vXX3/953/+s1TjVFUY5RrTi4g16uXMeQQuZLoBUfX2sKkT9/H57cjf5druy9np2UuJgtZTcSUoUTETACyhRkWCYYg5pYW6saTBi0DOWVWbEJaGE+L61xOViheqAao8qvq5KlpYe9Y/evRot9tdXh7UcaoSMzP3IVJY+ihwDT2TosfOOS92TxVYYyTdO1U4ReR4nGMs5gPT1vS9UlOHsvMQTqnkd93d3RWv4DSp+1cJ1gcZ6RJwfQFYWX/xLEU/B7Ao+qIQ5pyJks6QgzWIL2xKofz48aMu0vHm+nDR//AH37+6ugKATz/9/LPPv5zSNAsnBm2y6GdiMjnVDqieHxYFrPoGsy8CtM64gZrJhYgagmt5UsZbiAhlZW7AjVvF3mskJlX4pJoFN89LQLVNEgAiLdF/FsGRUmIu1nC1/uRpnib15R7FupVyRloytpiZMAAhCGcuAReicrRNu4ophtuIC0sRkcpPwFE9iYhBhhkBQ62JmNWWUQwrGqQQexEZx5OIxBh3wyFG8raAEII2X1GC1c3sh5hSApA8zeN0VHD1fT+nybg3IgaK2kKwLqRtjGTMoR6IXMKscrWbFAWubF/sOirWP3eU+LYTBi/FOI9tnmBg0aEXdKkWxGVadv5RWCkShlLMbEmchlgWCw7robw8ZPPhc9WTbDm8tv7aHPxC/NWwgDKaM417bDaW2owgVeaz08IblprZ+nNlNVVgYBmGwXpzYVUJcJP4u0UFcGGHZ+88e9g0O+tZg31v3FChdHZwvzppZUETAWtF4AoZEQk1BMjeaK45P6z/k13hbBt8koTMMzLXlO805YlzHuc4xNM04fFmD1cd93meHl0+3e+f55x3fc9EoQeoXmuqobSIqFosAIhk9ZD4aRg+2wybeW6xrmGszSYaWfkd9IriGiCLXOuUkLahue2XYfhmnFa8NrrzdLFdlDcDicgDHpKz13YyfvwtGJ1HdAWN+8axBxsNbXsDVAMKbA6/li04uvCjodNb2IV6+n0XN9tm/n6cZl3NG2G9ofZ9cIlMfhVnN12/NHqEmhdns7JzOues5bXQKRjNfJoIBT9nw2fduy2ONVj9rVvpuVZDa804tE7y9As3/nYWON965ZxPp9PTp093u52Gy6podR9TXNjaGrG3K4INW/CDnOMA54Fw347DPUzA3UNQxA1ELKkf9cXt4zkXBXueixMvgfbmAiSErNpIcQ4ABM1tweIgmoiIMOacMUYgYc55TiFEYJHM02kMBACLLX+Bf5p0Nl1US3yv30+MOefD4dD14Wc/+xmicE7TdJJFkCiCkACLgMlDDdyaDWpgDvVMbNjsA9f2FLjvNj+BZsdXnMrNULUjrBlT8zxr2wY0K6e6YZ0MDUJQOjajCZdYivShET71SwqJuQ1zzhp4zzVXTWORiOj29rbruthRX+rvO/ZLEkJUp4q5eTUtiBmJwEKCEWW3KxtqOr+IIIHm+4WQfQgx4uKd0wzJXFMiE0f98nQ6aQKkiMQYp2nKabGx1rWTN6zrZYGX4mQ297sekcupxyxaAFNqrAO7K0+p72LX9YRCgd99993//r//ozfeeMbMf/Znf/7v/v1/+uzzL/9/pP1JrzVJkhiKmZm7R5zhDt/35VRZWd1V3UVS3Wo133sQSEKA8EBAK0k/QILW2ry9BG210595ECBogh4XhFbCAyERbA7dzerOrqqsysrxm+49Q0S4m2lh7nYsIs69WU0GEl+eGyeOh7u5uc0DY9jFHiEUwPMAmnxGiDEEaZI2IVrdfOU34ubm1UVEFEeEF6TGX54wIlY/HMwFXWqdNhYY6M7XBZ65WR/slBl4QwhKFvzExnEkiqoTagJkHsZpGixrbLPZHE8xj1ZoUBARUIgAW4IbAGmpdlsOgBRmzTExuZQIbGfhIlf7QP2qMEuNxiQK6gkEERqHQfVA/bdLG2hWQrVEpNhjMzpwc7C1tpY6k8zMm23FcxJgqXHXIYRxqsild9D4JoBqg21zA9Qg8zpnRyuKyMXggg0eJuGb3myOAlQP4QItDEAeP6AVyR3HEcKy2RG7ipeL4yQAGooAc4YEq0vmLHMxjr9vD6/Jpaendi1+YotfH4bFugA550uepI3frOYX76XRWTuTNpqRkvUCYXUy6w4twdKAtjLwr5mHXy+3hEtPmmFuPl9sAVIV++w/rFIgQitWBLDcPk997EW2ZKOzfg5+T0UE5rsjzqiz2L6rnNIkv5CoEBOFSBEjBAgyyHSaMk6xxCL8fnjAh/jRq4/+y3/wX/4v/+f/i//j/+v/9GvFYe0XIgIotr8AoMS9HSc0QWkxscWGemjAXBsxmCx+eHWo2uS3nbI1wj/126uXLgTmIp1tlj3jn6cnYiwvb5y7fP++orU8K9CDw6gFDl+1kD01zgJ5FktYjLyAwPqmvzxw/EErrf2Md5UzM4X41Jh+ev7DGvietqxvLujSU0s2kmX3L3TvGjGpA8Ks2gG0HW+JDUuS4t54acBj9xfuwcXcFrsjcyvJM6wEXc6kX5oNvjiM/wkXMx8Oh5ubm81mo6X8cs7aFvzvdfmzA9eQ0Cb8zEmBv/9arrqRnzrmnvT5NyJiLcghJIX7rksUukQIgbRET9QkRQ5RYooxxnGs5QkUYoiYuoATqkpTSlFfUwjEXDQ2zwwTJvSLSC1qMGdqRAScD4eHcTw/Pr6f8rDb7UII4zimmkJcEDQZKxcBkFlvJ48bTwHTRG0fJ2Yxw+vn1xi7YA1XL9txvwVXn2Rm5VCmaDGKtMzGLm1ijC0jVpg5xMi1oASISBEuwiIylSyMyMggpEVldL/nYJcWt2nl+BdKCJIM48n8inhxxKH5SdoImZn7Pjmp6dICpNZukJJzBhSBmvPcdV0pmhOHIpgzMw+IqApkKUW1Pm5O4/M46IdxHMfW+4SIhGdkQZHHnI3otH19RhUDm6ptU65tMy/9z7Tw45wpBMRJ97Trumkc3o7nLsLNdvPTn3322U8+LqU8fP82RLm73T08bKYMXerHLOPxXArrTggDlyr6M4gm90EtIyHCXPKlNYhXtKCtwXBJRKQV21eP6IJf+PNuN6VKIBfXiMdSe7U/KcysXuJciqk6BkZAMLJpZxyAS5lUlc858zSaU0RTRlNKZRqbFiQpRAEIIYQuqKIxU4qa+UdjEYyKWrXbqvEi1RqheKmvGyOEEMZRRBRtJHWhnfrQt4JYetZ0veM4xlRDrKGZJ5illKpCK4KpdqlIp5UUiSi2jDMN9i6ltCxBLkWQhVlYSqDOaiV4Duij6vzB7FKvkCe4cD0liRfLOmoIHIpAXHBx3WOac25DI1WgxUm39tVaatT7pagtCszI54QMLJfErSrf+PfiXL6x7TQm6n/rKabhtHcrLz7YYzZbt0w7NsVyxrxGB60dgj3s57M4ezYNMVPZE1cbn1DA23guBGgl3co1GdesIMxM8/L3spK9/GwBQHHXsGoxtwUoFm4BnLNS21PzlDZCBEJITTPhlqMCTfH1y+G1B9W92lFhDc4OeSiCoNWAYgwb2jCUKY+btCkPkxSADO/fvad3eNj/5D7cv3x5jwLAknM24hJTsqICdRWg6cQoPAP4Agk9Pvhn1mCx5+1PvyP27WL5/l1+NP8nkRakmAnNFVtWP/FoCXb8fZW8aw7h2Zz/nhrg+lrsrL1ica4rBJ4RpK59Q+6w+2H9Ob16LWz/67nVickVAw20HHq6tONrVfvmT/4g5izQw66FYciWM5vbHOsWKAHXgNxueoMIEBHgrJqOvq6BaElJ/KyIFPdmBTnbkq+kSyFW48tiUf69flF+IYuFPwWHZ/78/S9Vac7nsxYkPBwOSoWs/tbiwjke/uC1Juz286sncf3w1e2wEa6in9K3KmwBIFY9vAJzNQ4RxdBhq4jYdZ1uckAoJecyEVHOw+PjIyLu9/ucAwoJMyBxLuN5gE62/QaCjOM5atIakUpMIBKIx3EMVEOtYG5eme81IAgId13cbvuU0uEQiaDrYykTS1EJTUS0wpkAKOny++IvdWGtqcS65oK3aCwweQFhP+en0ACfxsmrxAERpfVMM7G+Nn5IiTBqIK6Rl67bIAaiS68UjYEtpbDYW2pRChHJrT9haRW/YC78YNNFVe9KXWBm61BPteDnBFBjyjQGz7ay75dF+ypUpWhXCu+1m6bJ9zFm1m4BNX5K3zUMVYUohaepjAM3b2H1GcI8TPSCyTH6uZm2b/suTabyOICC2CJvY4wh1CK6XC7eMKVoVPvsqQpeCkKBcjg9fv7Lv/3u268f3r7Lmad8AuFpyFKIhbAG8YZFHuMaMbiFX9r1PIOTlhfqQbHgIOhKCegDIYSSl4qf/tBCbe14VvkzzgpSxFiLCeWcSapGXUpp7RNQN0tdbaUUqQ0bIFDfdR2AbLdbzhNLZi4hREFWP1iXuhMXkUy1/A7ThTbWjNmq9LddVghVNZV1j0BzULEWB4oA5khnwphS0izTFK3+pRBGrDm6G5U8CSPV/nx1scMwpHQ4Ho/7m+1ut9tsut1u1/eplFLLtRROqaQSUkrMgEAxIgC12BxGRJx3elfavOBxtju6C8MwAEBEwhjtOKNgKYVbjqPaxfTnl8oxHhv8UZl9BRhjLJVNzPLWWDUnADvqaGKpLHopympVl8vrXZ7IrkUHfz01Z4+ysopg9L9aKIptRVXxXvw8uC4F3KKEDenXb1yYY21dai9Zw5yaXa3dbwBHRJfU60fz1QXXALQVgdNIF6AziNnIfpu8wWnOAq0/2wzYAKjSu1JCRA13LQACrQOMkRtjTn5iFb/na/FzQxfgav8WmVBAdTwZmc+MBWii6XE4nU4BY0/p/Pj4xeHtF3/x69PvDrvNVrePanq9+MkASdUzkLWFiwCbVqIHEloxLgAQbzVAFAC1OfmdWpB1Qx5sxma/L2uw+JvgLr/X6Lx2Tz0Gq1OzgLPeN8TGa5IogNoq/9M9LR7HbMLSrG6L94rTPxeTf575XX2FbbQfZEFnniI4fvAZwrTL52CDO4Na8nc94TVsPf4/9UajwPaV/+16jYtF+WO4GMGPjIjW7hVhxvJtgeSyDObkhbC5102WIiKTKDyoWYNzXI764qSsQeHx0w84g6G3Z+MVN/t/AvYqfg7DoOZtEdGCwE89/5Qw9xSCXX1Y5EqbisU4fjlrouFxdXasVm9c0OEQArAYJ8Jmhuu6jlCLMYoqycIMXLqeJpaUwpTl+9ffAQASxBg3aVdKCaHkPB4OD8z73W4ntepvISLNlXjz5s35fO66brvtQeNH+cK/Siv0rYIyAOjcEfH9+6OI9CkCM+dR8jSIoMyqLoNFScAVG5mH1XovFti1oBgLyC9g6Ad5at/93avIv3hdDEEEUkoavaaLUmdFKQUhhBBSq/xMdClRplcI0QLn8lQlmabFsUg9sOIkmRo+RzWazoQ675uybEMVvrVTiLSEsZRSjBSrqnChY3qxFGaWgg5dhQhCQBECQa+5Waiq9qI0ZQOEmNW9VDUlDzRuEW1oPS0ihUhae8NM2B7aa2TQZzQGz4rQxJgaA6bmCy0AxaoijZNQiJuYIvHxePzbv/3b0/Hh8Ph+s9lAAcklxpgiIFAIXdqGaRwbxKqOUaHUjP4LPdBKkXuWtEBL/+eCA9oaRSQ0acKTtTltv4xmpnlT7xW2hTNR9Ur4Q2HDNJQxv9YlRhcq+8AYqeScUhLhzWbDeRqnMzOFEBhKKUXb953HmkAkWmAmIEDgS4RlECf5NwRokjmYC1R8kXNtk6jout1ut7s+JS2MX00qgZJRoRDCOJ0bSawFWUqrsYwIp9PpdO632+1ut5mmab/fpxRijKkLPGUR6vv+5uZus9kcj4+BqDYFkKzhx+RSEkTEVqZzlhbCLXIxQEw8AgBjVIe8HoQUkkKfAYgRXNHK6I+iuLet915pDSIWvkRN2DOllIs06o6QKoR2x0gGESncvRPZ4qwWLGp9LP21IL7gFBij17YxC8N/m3AN8bpM+wl+DHOC4l+hfxq1tbNqbzFm7M/qglRBY7kyE/guuZqqL7TDczmothd+CWq6QwD7j5mRSJwkd1lFE7BsfDG3xtwbqfebyHsFVmtuZ8Tr6ldP/VYcHvpX28TsMQNplwJFKpKnicc8nt4cIMgwnUsej+8PecxlgHKGF7ebgPHhq7fbzQsuU0BSnqrFDJiHQMkHrSrlEADAS1aYBtWyVFOIiJDVpTXVhWZrpBZGyPOETAO7JVfgE5f9xKOfHwqevp751sxd0LxPeiT7rvNIdXXX7AjIPGDj97k8UrlBliL+NSRZJttcHb84G9CCvgHUBPRLHe1lqM9lAutzUb9q9xbQsB3x9211V7/ya1/cv7pMP8MFqfTT8PO/NsgFVu7+ZS+kNsuq5zfOIw70I7aIEq45JFdikGw0RCSKzHm1OjK+48EF82sB2MU2rR9eWBhlTrHRxSL+vS5lAefzWXMISymbzWaapmdyaNebBU+04bq6cBtkfRY8hBeDPLG/s5se1ISqcqqmhHqnAlxp3vK9ZFGFVpmcCLRzUoiEAoeH94L06tWrEML3335bWnXWb775ZrPZTMMJhDabDQD0fb/dbk+n01e//c3pNNzf38ePXhAwQULESGoBDMzh4eEBCQKGFDwZ5FevXpxOp5zHcRyIcLffjNkZcGEm2hJSaX2SwZ2+Z+ikVSlcnK/1r+ymPfPUnl7dlGe2bPGknbI2nxrzxsyEQ0oJmlsEXeSXCcTGknLM2t4aGidiZg2qtSU0/0rQ0MHiapJjk2dERL0r6p1AFPXLaN9IEUGUGFMVWKfhQg9RRARdYQ9mDTaslTZSSiB9m4BoeZuqmEEAIWHhAiWL6mDjOOZCOdc4L8SAeCE72NYf4iV4zRa4gG1u1bZtvVUNKKwKW9d1KXVml1eVQKtigmZa6QGJWxIBEUIqhd++fb/pOwLpEhwOp9PjGQp0aVMylenSy8q7UsRZwYoroqM7q3+a+Z6aZ3wh9xoErMTXgjoxs+JGPVeuI+5VtL+kCzreh4jjWFRhtvnrPLuU1Jnm7zOzKTN1wkV7YNK5TNIsUyEEzNWpW4BFKJJJo1AznNxaJikkVR4OoZrcsYXLqgpaStGEPWVPimA555T6rut2u+3Nzc1+v+169bNNUlLhCSFYH/la1Io5ho6IpmnSnWnaICo+wIlLqaVTz+fzZtPt93vEHYrEmLbbzatXrz744INvv/0aIRAhc8klQ1HCpX0XLw5/+xBau6zSeuEoalNofY9bVG9Kqdv043kQEQAqjaSrwzaqZ9+ju3598Yc6rU8/BLyUJFUXZN2/xl+LK4VCRLIsJG7EUQwVPMUx5DYved3XaTJNjKsr+YKy4tiqV+0WRDAE69QBNr6qN7ZANbABQEyk/UOh6VeOVNURrPyUUmGlUP7w6NvXjATnUublTDbe674CxbnzOFLsbS0m5PkpUWviJGakaZ1JfYnnBRXAVsNKu6MYeI168lyHDC1sOqVa/GaxX9h6bXuUaBWrxe77jbAXLVBlQWj8CNhiDGwvUorn8xlI7ra33719s6fd/mb3b/7iX6cQ3n6T729hG1OGiU7SRfrd33zx9vQ3N9vd6XDo+15LmdVOeiSIIFKz4S11kAJ5dF0gmBk7FD7TNAGD8Q9FjDKPthcnLpR5PBI2/mTUxBiweVqciQhN7tE4BHYOGazmmEJucIOwEnRDKml6aS0of7E8VXHBzAQiGp2P/hjCtcs20b9Xr6u/spue8iBimbL9zlESCCHINSnNv93fmabJDHu2diWGtikLg5c/1NIYWAwXeci/xdNAdDyPQrSzLy1dQfnHes7Yovc9RnkYGtZ5euK/9QY4cHEQDvhXUgPU7G1YyszYikMKV4u4RwldiyIPt7LszYI+C5hHRBAqrqcFAJhFFgBEihkcPT4QEc678eLcALeA3mIE/5UH73y/rlkb17cAACClpIFAx+Px9vZWRB4eHvq+tzp1hopr0+FiAraDnqx5BmfHBACa9Wl2fHgepWIvUvrgISCNlxk+zyCMiFpwRERAW02CVqvIOQekUnIMNE1TTEE53TiON9tdSklpFQXQGi9S8m6/Hc+n3W7z6tWrd+/epUBfffm78TQg4jQV1RZKKV9//bUKTC/uXx0Oh88+++z+/r6UMgyn16+nXM5ahP1wOOz3+/1+fz6fiWgYht1upzO/ubm5ublRMI7Hx5xzCCkQ6HGWzMMw6AkKZCFRgYswZAy0gIPBH5xBwZ5Z7JE9v9jcNfmSebaL35TFLthv/WPsMk1s5DYNSSnd3t7udruccyk5hKDGzcPhICI3SJvNRkTyxCEkZh7HcZrqY0Rk/qvzcBa+HFhm1ubSqoxpV3ci2u/3h8MjtcKw+nBtN8qTlpM5Ho9dFwFAtyzG2MWY86heLBGpdURDKM28AqDtwpFCTEljCzEgdTFdeC5tfQegrusC1W9V+yqlNImZYuxOw5lnzjQRAfVF65VS6rpkJXmsU1dxncH9puuf1EJMKVTBI8aoflEAKFkaVIl56rru/fv3t7f3RBEwaLWUHDJhGofhl7/6DQqDyKbfbTY3CB0zloKlSCkioTGORgd0+6j54qBp7xYm6lSCJhM6FVeHSunS7EGPBjXO7kx7Fzy8UCdg9YX6N3oW44kPIlaWKrVa7DQV3bVpGLte9THOOacUtKXku3cP1DyiACC5AHCMcbftx3Ho+u2UT4fHtxbMlWIUmaZpoBC7rhuG6Xw+d5ub2rmB65lSb6eemlImRQAtOZtzpYSBUjtc3Pe77XabUtrvb2OMu91mv98TwZS1LhGjQAjVbasoDYApddhq7bIrhqRg0bYTpVTSF8K5lHI+dznn0+l0u9vDBnLOd3d3d3cvUuoDAYCMYyaKQHkYhpubG+ZLnd4FC9M72g+j7XXR2qTS5EOd2/l8BqjhbBesRhKAqJqP3YVageqKvRkRVZsssGTV3vygEDGaJVJdJjZ1anWKrOic0TgbwSQbT2f9JNEJPQvKi01Q4PmCPTGFOT+2Mw8rtr341v8KHH/1D4NjJPYT7/mxB7yWdVkIiVwzJ+OzLdRkXp0Sq6XWZ2xeQm7sV3457edVDzHFe/2kB2CrARCMGHk4r2f71EXzBy+zmhft8Ku2iSmW65xPh8Nut4t9fP36u32Mn37wyc9//vPP/8Mv3nz38NkHu+F4gqFsMN2m3XAcwgQoQA3RgCWXjCgsohy0LkEuzNgD2at2hrFmBdCb8kT822LvYI7eRmfJ9ST0z+BKklh89ofCw+qpHQkuehmbbmmHCOYIucCE/8zrmdEWp0BW+uTvM9qaYsA86tvAwnN/OM/DyP3PYU6mFpc+oLhRaoZ6TfIZxlk/Q7uMhF7dXJuJxyI/PZjTav9hrSWuAeV32VTHOakEe7u/79Hp6n2TEurBgQCuOgkiisx2Yd5BfabG+HpOi/MI13Ae5meqfvh9CZJd18tIiqBG7DBDKUIUQ0giqFUHr4Ll6iQXCp5BeIHGvz8hXZ2LxfyvMNM2OAJgLZDojttiQKyR8C1kVGvsha7ruhDaEqDWFFCafH9/v93u7+9fiuBwPKvJQCvxABC3mv6AnMt4PB5DCCb0v337ehi2IYTj8ailYrTld875eHyEyuOk71NKiQhQFIFVl6mHGlzTwtp4DCkiChJzvgrGBaovaOB6R56i7biSba4O5c7OLLvkB/e9lBJCUM+q1jeaxotMP40FAELqVGPhUkLoALAUPh6PImIKoaGcQBmnEaQqSyKTHmE1R6oWdDwezXRo/KKqHLns93sNAJaWK6SVYECq8Utc6c4Ua/J/O8sX2YOIUuo1DvQCIuzNER1j5AIAoKYZZtD/ipUYzbmltFwCxav+Q9XF5NzaZDqMzcfzCNs+aUpUjHGTuqYQ1qhdZhZWx111mSCiKmUxRoEowAIFJAoCoAQkROBcBFIugAxcqiaDiASgogYb8miWG7OoymgfVNl1qHU5vM1gd3FpNDX+YnWal05MKUFhs1eaXdizIYNnjLPilGvzVnsnxVihGlw9EY//uh0XpyIVACYiEG2nwc0RV5WicTqXIiEKUlIgI1FAyUWDNRARATUwQGqN4+ZKleY4UX916kKKtQvlbrfb7W66rtvtbhQ/x3HUuqAhYtf1ZaqGg9Z4MCrabDZ9zvl4PJ5OZ3UwLLRlRQ8l9lqo4ng8j+MouXRDZGaE8PLly5xzAY4xiGApJYUYo6qXSznQQ8/TDW6FeS8ogQyggjoQEQQCQe1HjxhSoqAR5N4L0ZyntXHhYrdAvTpw2WlqfnbmmmHg2cycqC51tsXlz54PYLNvaeX19gKcHdcFD/OTQCfQkzP/mxjiRfA2mVl9hfVswZF+v0MLy6IdrRlmyMVpblOlJrTgvCtdvbOSuuwBW8Ia1F4JNIBc5UlEpLmKLfGvOB8CKqDauoAIp2ls2bfgm3Ly3AP2/EUC6HoteiCbk2OxrV6A9novIqaUDg8Pt7vbw+Ehn/J/87/9b373d1/8X/8v/6K7T8Nw3PT9+Xge8ZRid7PdvcsjEUXSDHYJ2ndbFiX4LkuWMmuAY9BeOPdMISyNTq73Zb0d5IoqcWs4uzgvHp9FBGDGAxbQC672ox7Xtbt+MbJfhYiSC1evzIGlPvPkrl4ZHxxz9X+uF7h+nV6WKb5YuEhteQpzQxJenp/NwY4hzHfNI5KHpJ+G30Qu2T57krIwly4ms94vj88LoOlvvevSr9GTnYWK6N/oPQ8eFDiX/tvDl6Iai83S4CuzTOGKIenPbZkzIiZkKk8DAlnNNL3PwlYIdwEHvAx5AaA30KyXsx4EnrgQVR1aXU/8wt6rcmdoRTL9uxabBfOtr8fc3V+gij+GlxGem//yM861kcXzC/i0v2vEldEWQFggj2G7Cm1qJt90fR9TpIAigNIl4lwwIjPf3t4TUUrdixcv+w8vvgXVDEspKvTEGLW1/Xa7/eRHH6lXYRgPakw5nU7n81mbU4cQrO85IqogrpOMtfsYlTKxZIJL0TKbvwAgRRIUIIaLQrg4HZ7SPnVdRbD1Kft7/GTO0xe7s/4hCIQQu66LtYtjkBbmEELIWHPqVIKexoKhQ0RBzAp8EQBQwdR0PBFBZCTEiDCBKYSquvtmD/5SSWbKp7u7mxhJFSG0YirMoXlOvBuKGey3UiUQbaExA85lL7BTGUNEQgjTWEP1pqkGT1r9GC1kGlJsACNECKEqMOp30gwxCmgKHrqoHO/e8YdRYauOst12p9qgwh8AxiEzq69SFF2HYbIAkBqjKxGRNbYGKUYkCSWEBBJYACgQBmCQLCAkjEWEZy59LKXkifPEzJwz54lz1lI6rv2ghY04za3CmSplRmNbjarrvymlzKOJMSbRocv8EleZRh0Dnt9VZtRSZuSSFljzjZscJdJs30SkXmVV1YkIqNlnuUq/3jmm7SV02JIn0FIuBaZpwpDcUUEBFu3DMF3SoMwapWaRzWaz3ewVw/u+7/sUY9ASnKWUYRjP5+MwDEiSUkSB7XarFUFjjH2/abIWl1LO5/PpdNJ4Li5QCrOMOecYowgjVX81AJxOJw1WKuOk0Xx9393f3+/3+8f3DxIgpTRNQwgBcVPKtFAIDSEXhtHKTJkDKRlvYSasege0bQ2MmCeGJk5FdFqKnVW4Fn/SuEX1ediE7Nib7Oh/u6BfduxLKUbfvIhgl63TM0hHsJbjL2B0WYsT3HnlM7Tf2jjsTOMitR3CmhB7gfLq5ceXuZq6/hdWPMD0AWa+mHXd2q/+yk/JY4Z/zCDpweVB6rfDA389/mImiwX653HOzPxFF/mjwRYQanwCiGMHLU4cjWwtKHUpJXbdVMrhNHy8vz08PP7mV78pY/mzP/3H/93/41+cHk9/8Nkf/vmf/ennn3/+m19/sdtthuk8TQMwk5ZzwmqgYl//EBlqIR8Nn5lJnAZGbLGgpg83mnjJufLAtJ+ACzTVdXk+NEPFuWjYln9l6/0GiXMdE6E6JJ7aDs8AdEBe2R1//51dP2yf179anOi1FopN/0TUWvbgBWSRK/U2xOcHrsw3Bl6vvInTi/xJMQ+/nz8icrmMzC4gE5pCrnwCAJrFeqZK2ZgLemIfqG2iP3eL+eP8mTV58WgDq41DZ1JpZvIlLdJaZ/oGw6gZnOdBIjarOkJLlMYm6GHVBmdartGpxeTrVyvF3i9tvfWLh+tPrEyUu35PHF6MD80SNE2TSodeIVw8fBXnpTHTxf7KKufncvyfUAkXxPAHr/WTbUXXnxQRlPYrl5qh54KZVSurojSg7myM3fF4TCl1XT+cp77bCmShS5YODIQ5o+auhC52nVqi7168UCWH5YWI9H0/TdPDw8O7d+8gUN/3P/7xj1UhFJHtdls/MzdqS1KqI4KZqaYeJaKItdwliIDA0oCyQMWrcPOPLX51dRz/w6d25yqurgmsJ/42oIrO0zTVioKxwrbv+xg6dt6JUopuB7QQKmwJHVFb87WiL1pPfxxHgUvUj16qt59OxzWfUkpyd3eHrYGbftv3PZJYxXGTxwAABRAQpLY3coDSzMbLXlQNJERlshqIWMo4DMPhcFL1bxwny6kTRoSgBlB3OmJK1XZDLdkhhJpk6YUi++BprJGa1K6u66MrAaqVh1XKN4WwlEuNtIiAFAQASWMyCTBocLYg1jocUPN1GSfr/bBAJ12vLdYKqFqYolf2pCGPURW9Q0TQvDuWKuwp1fp0MF+e8YCykc1WaFghmgZQSgjhUoPwspwqq6tCXoN6QUSKiARFXSiEQZUrw3kAYM6Aakq4NLoU4GnilNS32tyVYLG1Hsc4hLTZbLbbLQBuNpvNttNOLSoHllK4DNr7ZBhO5/M551FTHzdd1/e9KpMxJlWJj8fj4fA4DMPhcKzJHRC4xY6mLqeUiDCEUKiUEgCKKqWImHPIOT88SM6bLm1+9KMf/+3Df8w5pxT1GRHJmQmbgaNtqwo+LLmtqxFpEM3815VqmV8kQUHVWhEDEcYYqZUQzzlHQ3rbvxACXmMY9S2OGImzSZtKKU3dr7+iC//wL9Jd95i3eOMCNf1LPYKuuaCNb3THEyz/ugUdx3nEjo5gFuLFIVn8fDE3OyE2Ew/nxRuXQEbQQmGX09um5J9cwIFWFVz1MlvRgq6t993mbOZbs+j45aCTBszmZLzBIvEWMFmj0+Iy2oEC5oJevN3otecQ6PzJIjIBl5Fv7+9ev33bb7av377/3//v/g+ffvKjV68+2KTun/2zf/Zf/9f/06+++ur/+X//vx0Oh4fDu2kYy5QFAZkFq75RYUh4kY5FgEFEYtQaSGqaQqjFqYrafrQymy6dmUvhEC/eS0+jTf3zcacLLPWfvebpKfIa/+Ha2Wngndk3/DPB1WulFlsiIiUboVliSx3kP0me/sHLFMIFxl69PHn5PSfjT7pth23EQhxfg9f+7bpuceL0K/VjkGuhpq+LKcCcmKx/PjvFzm8sKza8BtH6gBtN9tTJLuZLbiS4jfbWXDubAEtCt565X5eIEEZxdczcM4TIi5sXIjB/S124i75ezGcBE2gB5AbeiwB6zW5ydS3PX/4Aqon3Yn2/9uRT4+CKaJs5xoDwPMz1Yhcs4w/+M/OH+W66z7PDdkEnbkue68P6r0ZEKyIh0DRNqvZrHyMucD6PMXYC3EplgAhYBnjOuetYdZXD4WAqSurCNE2bzWa/32sjstvb267Tou29qBEwzuzaAABCIhIwxhg5izASGGNCsErdgBZW4Imb/+yBsN67BZANAezDU7tv9/2/AFDkomU9vduzcVSZYc3zDIFCZeIppRRrcjIzp5Q2m/D2/UH5u/3LrYug7YW4YnKpryqlzz7abrc5TzYBbx7q+36/3zOzVhXSnc1lTCkxBS8eEEEIQSvp1+2DjlsR4CYyXcCOeIkmKKWM46ge48Ph8Ph4AICcyziO0zSVfNk7ES1LI9DKmVIrZNisTDUJ0PvHmGdhC35bbQTTCall9eech/N0OByOx2PO1VMkgtM03d7eqjcSIFMQrZ0JoAIDlRltJ4ICUKZSxjwCzWRjY1LmAtXPun02Q3DmSCJih5x1RZfU2aUnQBc+TRM6M2Vu3UeccXkmcBoaixOeRSTzBE1NLaWEUPP0hKXpyZfttsIHtpwYVP9Bdi0WpUa6grYtZCQAEhBCCZGQQosQ5sKsFkzC1mrSRa9QixmOMWrM5+UYtt6V51OZpmkYz6VMMcb9fru/2XZd9+rFC00yVBOAmaseHx9ybbvIiGgBWX3ft+SRAAAC2qSRdrukO19KESnjCIjYp/TJJ598/je/OJ5OIdyIiGb2qdILK5qDTiFf7aY9WZr/T/l+BTthbHegFI62W/qjmjtEVFxByMvpqmdIxMkZRm48/74Yyx3l8oRMrQULhoSr6qDgfu6XSosgkPm3az7nyAoaI79K8f1NmXvqF1wBrzFgu2NvMR+3JzT2ALvE5TbCZQ/bgWy7jsg82xTbeDuf9ro1zP01f+MVB6Y9ptMorfWQwU0/BEp6vEUAVfsHFAa1ZOixBKXmGpeLT05JxNp8zOZge+0ZD69cx/pvEckiEQlDoJimcfqX//L//cc/+xkAxb6bSv7t777cbPq4Te/fPIyiXYwAKCBWc70SK6SIANaRCUDUeB6sTJVHEmbllNwq5VpOBcxlgjViP3Pf/8r4kEdURHSKw0x8NG0ZHJKwsDUWl7lE4pM/DSfZhSAusOWpTXz+8lNaDLI+72s4rIdanNbFHXSMHObsyj9j33oLiJG4maC5IgIWlOuPts3HyCNYwR63hKfWtfhQEbvFnNtXns74+fuhLlt/TSEEZ6SYv1Q89jKzGFhc25ur7wIA9YLWP2kWcu9XZxgF12iO3b88MDeWL2B4FRRGEt3Wsxpx1m/8e13o4hQsapSeXuziw3qx0qxsCwRbguhpNcE7qI1WP7W0p96iFp62wPk88bJwlScUpVWLs7Ic0ky9mgAFAOOQp/F4OJxKEQqTrVRErBCfyrX7/V5FKwXmOI7bXa8lSVRq10hRlcO49RgQERXoY4xQWAAZkbnG9ZkjhZmlhn4gYiAEQWKZPASunj4PLlwJwXCNrMnvwYIXFLgOzs+h93q/hEX1ZPXQxhhDQPXGUEsQUPGaiLab7XmUUso0jaVVBNRx1IC12WxMwTBtpxQXmGe5fylBswh4beT29k7dudpLGQByGXOhlFIXorUgDwERIxERpOLqdemAIKgCA9IMmUVwnKoP8Hw+Pz4cx3E8nQbNaSyFp0lXdokqLO5kqU6oYmcIQZPTTGsCAGa2dLjF9ilIoQVe2qU/VOVhHMfTcVCF8Hg8i8gwDF23mabpxYsXtUoKZKi9uJAZCqCAykRISC6BlIUmxjFgh077Mn3JPIR2pzIgvBTAv/yQ2bQdQ91GahpKN3Xrsl6Y2UltI/RDaHU41wTZyKCIZJ70+caAqIpzhZtAdTGYmt3BpJfGpERTjgubL7S2vkyRigatSIZCRabCWAqAILM2HgVEhNYuJYULiG1i4zj2fa0xI/Kg5qRpmqZpyhMSUUxht7u5v7998eJut9+EEPbbXSn58fHx/fuH4/Go1cWOx2NRa4QYua6hyKybjRjCRXNDDE1PxrFwq8s5brru5cuXm83meHpUsEABQE4pmY/UC8PY6rasL03f8BfV7G5iBmbO0KpghoDaUdHbxQ1pFoqKvRkcifSvsbB1T1WVAYtcIa9FN23Fzp+ZD11Lg/HTEBcdBysnw4L++jUCVN7m4Gifl9NbYD/OyzDQKpTR5gara70cEZFLJTljA17CuGLOWczKflhcYps0oVCcpefK2x0EjBDzvGOE4SIzh4ZwJqrqZVkBXlC+CoTLnJsmbC8KgNxsitwSTvyS/VrqXnTdeDofp2G/243nCZA+/tGnX3/7DTAfTo+f//pXj+dD4fEXv/yb12/f7/ddkB2RxqvQVC4RXGRIUgnoxQcO8xwq5Sg5Z3UWaey4xQqWeYcSP2dv1DFsKasQTUMnWMkZhg+Lb200G8oosjYKt5HtLRrgjnNxh5mD00A8w7ioEL93FqHNeTE3WJ0OXGl6629x3u/EH3lwdMZ7TtxvL1Bao+V6vzwY7Sd6lTI7OAb59eBKKPgatPxGoFOB/NYvnvfvWtABmat/et+n9Hitw0/SkSxcvAtab2VA8mTEj7PePnGF6T08AS9ItdhZxFpN9AqYrmG4+yGuJ7MgeuvLgx2uRmM+/UNjHypbm6NmAQc/1cWmrCG/ftLfR8Sn5uPhswDOU/O3TfHzFBa0yN4FGlS5bZa8HULoYkLElKK9mZCwVi+QFHsuEzN0qc9TOR3f45w7cxElPlPOqeuGYTgNx/3+VhCGaeSjHI9HCikXURxm5hDGEMI4aXE/CLEDDCyQi8RKvWsP9BSi+lJEQDsZEpGmiAsSXLueQMvrIF2cVnTkBebnenHH3/cE2Y+8+HD1QsSu6zabjUWjxVamBQBYIMbILc0hhHBzc3M4HHLOKfUAcD6flVupLrSIJgUnJ5jibQljZinzsuLd3R0RnU6nLqZaDFOKitoldfoWIiilikm6B0QkQqWIhtjYkksWEVZ3ih7k42nSikTHw/l4PDLzNJVxHM3j4crCI8Cl9HpoFUHDpWhNsP5+4KLbrMSOnXEDiP62Kd71vKu78ng8ns/n82k8HA6n02kYJgAYxzHGrkVLYiklUAYAAWK+iMpEERExQEDQbDcRESoYBVvNLXHaYHFVc2Re3A5nYbd1U7hlstiuUazimYUC8ZoJCsCc6SMiNa2YWm1IQ3uPrvZqVF2FL7ys8c2CVeQGWwI0qmIqJbmODja4QaMiHpfCCKhO8pGFRALAxfpTcUkxmdDPljlPE6o+TxSqkJlFbUkhhJcvX+52u7v72/1+v932KYXC0ziOv/vd787n0+Pj4+mkUcGlmVGiSLUaqFNEESbnjHSpRRRirTPXtlUk566L6m8opfR9/+LFi2EYNDI0hKBWhLFc+LhxefvX8/223eAfs91h5lZJX89vIBIiimb4sREBoDyd5ENE4tQnM8TqBod5Mi4AcKtDAI4JGRHxFHCtjtr4Jm/pk0aC4RoxtVWsKfuCWLulzdizf94zxTXbXiwKnBgnc+nQ1vjUlAzLQRhbMxa/3/o0OmFrAYf1rNQlcWkgpZKWMBIp2dWuU/oVsoBwEea5X8gr5+vzb8ZI23e9Yuuj6h8Wkae4GzYLtC1KKwX3fbUpmjlT5qKwHYM6B5HTOHzw4uXDw7ELsU/deRwg0G63O50fP/z4o5/+0U//4t/+63OeBKFALeJLhMDILIgkBHIJHSkK89AIVR5HIjI1CZtCuNvtttttzvn7779/eHgA5S45AyW/TEeJlqqLZ8BrMcKWyS3xoz1G/mFDg+Cq7BoisfACqTyBMFWB566Gq+g627j/vGt9TtcHZ8Fv1iM886TMBbXFyOvj48kaNqlX6y6sAeJxfvEWI5Lger1QuJRkWC/cQ2Bx0BbrtVPg327WKOOv1K7s2uH4hYRV+S59zMo8VoEAZ4151lMyWzIi6lMVUcslTNqMHfbGORFGEES8hDV7FPVrt33BZvNaX3bfNmhNe/9zLnSawNqC6ee/eH4xSOHit8PmvMBe+D0O2lX0fgrH1vvoDsITCmHjOPa8olZoTZnbn0AIYx71sc1mk1IPQCA4DNOrfq9CnnZ8VcI+DhO3iMcQQtd1t7e3+/1eRE6nk4J3GAbtHgEtYgUAVHLa7+uY0zTFpBXOLwdERwbsbBdEBBgEBIR8EjJcQ7mrCL8mgzA/s4vzu8aK9QbV+4QLIF/dPrvMVQWOHwGAtm0QwpQSQxUfc85aUNF2rWW7FZVZAaDrur7vVTIehiFwVOeDauNa3/V0Oq3xSu9ode5hGGiDGkot0By5XRGRWgwWUbe7i1tqiScqCiNijNiKSbIpP4UnERlGHsdxGIbD40kbkDCD+nIaTaim2Jq93yIgYusSEWMMgZg5pdB1XYizosqmfpjkaTzR6Ce5S+d2Op2Ox+PpOJxOJ61+lFIvzeWl9g6q1VwZAIQFUdR/qyOKCAKwJtRJBi4CBUnQ5UCycwbaB7v0mRQvGq9ZpbNj7np8VK/zdMYoKpjCUC6NE4xGBQprTuqFCnLBJmw5Ml2Q2lULwdFk/6RfCLjIeW5d3UMI6Iqb6L81bhYwxI0SLkIKoZuKIGo1HX2jvr8ELBokZdNWrnQ8HrVpRCkFIWw2m5cvX97c3HRpv9vtNtseAIZheP/+dDg+nE6nh3fvx3GYpknRYaE2p5S2222KvTT5WX3vFCClGEKwnHxFy67rhHCz2Ww2m2E8KRBUpDyeHnPOFCIIahEL20rbHQBQpPWgaztbKzkvTus0TRS7EAIAlZYjhohxHE5d16U+lUs2p7BwK3jrhTNhkAKXDifs2lvVD/MGUwAggl4OMbxBRJFiVAVdGz0iQDTlsCJ9ShWDq45Z8xk8rqKZYbwovABTKVPLVyZE0OrD5OwQJsCJJsDQDGsN1gYBkzYqpYBAFGyZwoKA6OIe7SRU4gKIosV/XAEl0lw1ZAZmAaC6HchsrOiSpYOIqLqC3geA3MIwilMGGUHNDIzY95uF6aV2nSqXWlIehkpZ7GFslicKAihcszFAe/iJSOGhtagr2LLDUWrAiRqkQst2CCExQAEWECRMKYbQSU25LSGQCE/TmPMEoCqlhoFVxua5Zpjyfb/Lh3ETegKEwoQSIg7D2/vbzd/+4i/udvA/+Nkf/pv/z7+664IcALY9Ao3TBGMOIRCCcE7CIQ+p75jpfD4j4ma3U8to6kSLXQXE/X7/8uVLTQ+4f/ny8fHxq2++645dV/qpcC5lChRliTmegxqQed5myiCPTc32RND/KfPx280LEZfWYlg3wCs29jw0G3Bo3SbsGRY10rIgY4Cg+y4AtTC2nXQSAQYMNIsr9tMm52y0JWihsMWlQyIiNJ+x3WwxvCiuj2jhIhVvm8NaoOWDxvon1BgVABAEy0hxMxEhrKq+2oOtGEPOgRBAhIuXg2ieW+IXy40F+d0MITALAlipr2rEa8tDnU/7t51mEeG2p7pqjQ+pleX08IoIs9lZgUgxRwM0oOuSWpZFWJmQzjC2moSe+wKAGXRyHgEohg4RSy5I1UsfU+MCpSDiNNVGt4Y5ugcxEei5RqIArKUeNJJTGLiWKlEKEpGYl2gzA7e7AFFao3BjN57z2c3ZVyRqj/dEeI2Bl0Hcl/5JBoldEjWyxJC5DNMYY8TJXPFk3OeZVwQNr2UGX+hr9bjWUELES9m9FTfxC+dL5kU9X40+WB/UtUAPABDiBgBYRKCd1gIAov4CBJBcmDlRQAEpUwzbGDeIOE6nzZZSZCkjBULkSBS0nD0WkQzAfULADQAw8SakyqFauk7sO0Tc9N0Hr16qTkJ4N9xsEF8hECLe3u5NFA4h5FwRGLyTqvCm64sAskgu01QAaLvdjlmYy263OU0ZGQsWRCwCkcJCEjWUMJAaIpELDAEnT9uTODfQuLPg4XwhgBfKY25GlsXTFd9a1F+uuVXVwxND6Tu6v9ttukhE236Tc95t7xCxFMnalJ2ZKTJEoUAANze7EPB4PJYyqY/CQm+UU6iGqX3hpAnrMSYAGMexTFntxinGPE0l5y4lkMJl6rruw1cfDacxxjhNBTFobVIRSLHPwkOeIFCkiCqPYGAoIlymzAObcfM8VlWQS43G1B70AHB4OIqI3uEakC95ysKMGIWBpUpxIdR4167rYiJAzqWQkLK4lBKiWFyb6U6JAglo4itoJoUgFNmkPsbYp36z2fR9X8uTImWezsfT4eFxGIY8jXkaxumcy7jd9aXwze2m6+Hm/kXsYSxHCoRxy2YcJIwxIIrKtMNpEqnmdfULBETVYBFxGIZpHFqIJsUYVRdqKIQiSBQYMDMQQAgEgiBIFDabVIpSJJXiRCa11AM3+RYpVJy2cDBCRIyoCoMeENTav2ZfkHk3C5qHtir50Uo/jaMxM4Nw6ir7SKlXZel8zkTl5mbHzIfDgblQTOdpGIZzTPF8PlGQ4/vHaTxHxHE4AZdNSgwAXRJGdQwSpcIwTGUcM9XySDGAgAi25rpaSlbdrIJRi7CiIDJtNpvd/c1ut9vv99vtPqVEvYzj4fHNm1LKMI3HY60WM3IuCBKCAIRAfd9FCtgcGCHU/pxWrz7nrEvWP5tfmgkjBSilUCBGFih3d7cx0fHEGOjh8BgCbre70+kUQ8eZQSBnQTSPF2o9oEBe8pLqWhcJ6ns2P3AB/a1W4QIWIqKgZE0IIVponxdcFpTLPgAAUTSW4zmuf9iTQpmn59rzIZiCB0aFcW5F9nQWnr2eYroLmQBanxMjxKW1H9XpLeSJ9c/tKvMK4xeDtFyXSPxoa07jwY6IFGaSzXwQWEAY52UnFm+0B6QpHjqyUmqLKbXJhOC0WfdBQ7+8WovVBLW0fcoTDpmrwDRmrMEqHgN1O07jscZetp2Cyoln9n4Tx0teJviBCIiklKZp+uf//J//8c9++vnffP7ixZ4oDufp/fmcUkq7HTNrZEsIqNS2CKulB1qSgIjc3G622+3t/ma/393c3Ni3n//N357H4XQcch6Jaq7YBkMel/NZAGSBt9qyqbE6XvzKLpxLtAtxsOlRMxsegAp0F9/X+u2LrYSVMQ8cEs6fvOL9WO/4MwfqmTnMhoUlvvk3wvxwydzDvxp8mefDLBrGvxjZIPAM3OxctKFqiJHfoGeWf5WWggtRBmeuXqzXDoU/ep4NSGPbdsciwL35zEPSKHxwDSpDCE5sqIdRJRg1NBg+wpymLda4poS2I7ZdCwh748XzY8LT7GD9Q24C0PMPr1+9OLwLrFiQPnhi65/hdP5X68HFWTEMXf2/Nr7pMLBC0avX1TOl3/ibdqmYTkQpqaRYVBcO1upaNHYPQdOlAOxcGBMpICTxgmOuYuF2u63LbwqqXn3f55xLqfyCWll8Zi7CpYi6uJ2pd9ZHvj7JlwC99QFfIeeMtS0Y3FPX+tv1sE89uf6V3+4GDVDHglV51aN9Pp9NIQQAElBX1abfIuJms0HEGKM2r2dmzSHUfbGUS1W27V3+XFt0tH4Yhknj6yyrrS1n1jZJmqSkN0VkmqbmqBEDss4n59zaKmRN6GLm4TyKSAsDVmhAHQfFC1GISvRAu5IbDBUf+r7HFhfWEBPgkrJRF6u2s5RS3/fqyVGFEJpI8Pj+QWvb6LpSSnvaa09I83jbYTHdycNTmruimgsds9CvdOFaA5YbwRJ3eSSxQ2QpjgZ8s5+uUVeq2l/YOZ0AQKSQzGivVH/dZebSvMfgwm7NwmKr858Nh3Vd+qTaJnQocn3LmHmaSgjBqk/Ye6WaqgnAHFgCtXFjFaIShdinGEItYlTZYnWLadIcAN7e3qbY932/2exU4SciADqdHmsyYc7ncTifz1rfBVrbhhBCCiGlpK3LYtRgslbSrMXKppQEiqnWgEFjvJk5RN0sSqkWqJ+mooeaiErJ6JRtmjcSM1S3KO7FzmqOtOcdbe98scmi3V+YOS48V4ZPOc8ati5YV90Mlx9lWCVyodFEVFbRm9jcBQt2tSAcCxT3k5k/dj0UB5xIKk4+MIeSOJ3WyNNimU9dHlY2WzsSV6fqrYkeob3MZJDRuOE53TdaOUs38v+alGZHzs+TXC8EaSkBfuZ6+aPr12Ijg8ss92f76r/rjZMrLE1KK1Jne8GtVu84jTYIEbV6FRDCLDGJfUpeFV5YhASqQlhK+dEnH/9v/lf/6+++//Zf/ff/apqm0+mQYieSpmmiUhhNq+RSyn6/V0KfUogxdl169erVhx9+uOlTC9GRcRy/++ab77///t27d4LILICBAqUYp2nIOaPrCm2g9jjmUVeaeGR7tEAzv+/uPi5GEKcQLlALiaR5Ihf7viDxHm9hfn79ctbX+qQvbq5R7gfHWazEP+PPrMyb5Tz1OkRcxIldBgTAef9AA5QxKpgHfq/HXx8Bc76VUlLq1iDCeVuLBRz8CIvxPYSlqXmLSZoNyNtZjHSL8yZ5rJAmupkZCBppMusSrhql+kXZ29fwwVbnGp5AkgVlgzl3WEMb1lu5QoA1EhrpYBc6tbiu3l8s2S/BWMn636vjP/UidCqHB8LVca4CBOqJmNGTq48tFneVv/C8zyo2Fl9KVrzSeiHabQLU8MqXShIWIoVW1ZOq+AaIAS5xy1bv0bMYN5/LBFKMMYhZuIiQADmP6lbKOQuSKkg5F8FLhAtcToG0mmdXlED7c2GYW2zN1cvzrAWZsoMDbUOf4giLAe0rf5BDSBqiwq0ZwzAMRHEYBm2MriFFhKRK1HazU8Ugxtj3/TAMupVWFM0gX1znZCMmqh11XadFPvTOMAzTNN3d3Xz00Ucw1/pCuGj42AiOhq8TETOUPCGiSI0XlVbvtJQyDtlae2veYM65TAytTI6iNzyBzLpRqQuhFREFACVf+lXdX6wyZP2zcWH9k4j6fmONLi3ncJqmw+FwOBw4F1XVqkMMQSWZGiJI1CJa0IC52E1VaBuULqDWhZcWaWLxa6WUaSwiolpKe7LhJyA6hdDjoUX22XxEBJxbCVzeQaWKzOAQQETKnJLoIBYI41VBwwRjKJ4KoTMXGj9S/bD2m2nFjURkGseUorToAIFSDQ0kPDGjNvcyxs0E1HVRRFIKfZ92u12XNKAd+n7bmCkAAKEiBr548YIwal4fUWRmxepTPirjnqZpmEY1QyhAYoxdTDHGFEKk0OJuFPOr+VXbTujqtOaRPmCXQiPUON+L9NIaWsRxtFhohBaUZJBsiFTTRP3O6ocyjWt61Sib5wgMEBAxetbop+iJo2cDC8eVJ5EwJ6webzwetHGWHjCTP3juNjSkWZBgm/lVqmoPr5fj4eJnu75v4/v74iSnBblHxMK8GGGxXhvEdtT/WeHwpHvwigJsFNzPCh3eeDj4ua3JqEFpMVt0rsUFqpSyTGzVr7xiCU/oyXO8miUNawBtKYWAantkRBQEFhBBgRRaVaUaEweCKvWioADVbGy7hmE8HA7ffPPNL3/1d7vd7p/8s//JX/37v/rmm29u7l8qtYWac1zJyuPjY79JH7786IMPP9xut0jS9/1utwtI79+//+q3X755+/p8PtsOxhgFQSnKCSbgM+dC1DIZHMQWJ3OBq8ZaDIweQ1ZIscQ0fygW9yuw5wdhjfY2gvEA+8ruLGLasOmlhHgVddfvWrz073Ut5ukXuFiynpf27ex1NucZjbp2LtBd6wkbAbQHpKkZNkO7U0rxxcD8tBfz9wjgVTK/HTb4mgCSU2vRuQr9CPqtWawXA3Lzm1FLtACAlFLJmVuJcAAQxlyySniGyHJhVDOAI17Aa3KDp8z6xALs/s/FVPV/yzuX/Z1dlbIJe0BdHXn9K5jjyfqNBiK/s/6zjbYAy1M3F9d6l6+eJnCIZH+u8fmpt8DygFx5kcd2aqlKIqKZZswFmJkphGTRqvqoziQLR9Qy55dcZW9n1JFNSze1pEFJzyipRRwRoTaHrbUuGAiAEYJIdcjoCEikKF1YWEhEWdLM0bfYFPuXXOLJM6D2+7hA7MXzTxHzH9wXP6yOcH9/f3d3V0o5Ho993wek4/Go9QP1X4UX5zwMg4ik2Gn5UK8mxRi3262WSWSuIeWeg/ulERFhFT1D81eklG5u7vb722E4rVEltEokGsCmP1GJn2vsYg1b5VY5lrCGRFblUMvSlMK5HihuufQKja7rEFHrQ8KS7NRN1NxFU28Ra5jRgnL6JJQYY993Xderu0YVQgBQ7TTnzPlSol97lNljuvDcMBmbZOXRhtnb5cUDp2pEkyrMlwoCCpCaV3mpYH8JOLcPHvEWhhU7UKVF2AKL/6oaKGXmElhQbK/7mTJvHASbE0I/27pkzjHtkALANE0ipes6TVU9j0POo75lmiYp6qmrnmEMQlo5loGRkQoAggBhpEjENb7g5ma/3W67WLG96zZNJ285z5QQcbPZMrOwMr5pHMfT6TQMQ+igtDrSChPb3K7rNl3fxQgABHUhVEP6a2MSLcJfo9vIsqVEQNP2qsFCf+g3aBxGPaHThOaNZxfMuMAfSxz1OEYrhyHATISQmdTBqArhwmbALRJgfbABAPGis3m0s5/7Fz8ViiPOmO11KlNzF3x0wenFCRziLj8Zf9Nf9sBidc/T6PUBW6/oqd/6Z3Albl7VssxM65fp7ywGkWdLJiw2wtOL4loOeqAZ0ixeauMYC9dv/OsM1Twd9N+i45cm7/Lcg2pDrokpODXJQewCUWjqCoGRMw6BXrx48cEHH7x5+/qXv/zlb7748t3rd8xyOp0tpWocRwpwc3Oz2e1evLy7u7t7+fJeraHvHt59883Xh8Ph8PZBRChgCnG/3UET9BERUIiAWjkcNRqNE/tJ2toX4LIHLJmeVt7X3/PyILK9qP82g8Ly/opk4EpItd0hIpkrfdiUK8BLD1y4gtVXhEt4okjpAm0W49jnq3aip367GF/EPhi2L19tMCTXCdOPYxbfBeVVjPKWPAWd2XRtIVdft7hsKC+veBuN3jHxl5r4yy0jqLhYIAvX5+b2XyOYN8yB03JZmjW31Im5XZgJxJ4mr/9dQKatnYpZf35IM7GZXbk5n/b6WuzXYrFP/eSpI2xMk5lxHk294Fxw7aDZfIw2whPnBa6dKb3oWr4iXjO8+rdcXWYdfPWuBXLahGMkTSgQweaKCATIEUOlNoAkIoSYQQCx1jrWJtVtMjWUQ5AFQZDVwMetQKK3TwGIeT+4no1LZBQixhhqxEeoJw5DVP5VWARU+lWV4AL5BVSvnsTFviweXgP2+T//vtdiC4joxYsXfd8fj8fHd4/b7VarqgLUaiv6GxEpPOWch2HIU9nv97e3t33fG0hDKxyKraG8CgYmqYtzSnBLD7P40hDC3d3d7e2tsULDMZFKiLSLdwhBS4bqOOM4lrEols666mURGVTz4ZrKUcVxgXamCAVQQBCAQmucy7UML2o9zMt2sEjR3txWf1KglswVEURpzC1ZaL2qdr1qhH1PrZKHxtmeTqdxHKdhtPmQECLqw6L1QlJS16f5vVtovU5DDJ203oyRTW7VdErrZ4PNSqLQa8mZFZ0sDTJ2tScnrM7vghbpNErWTD9mZu3erl8xM3ItPep1UWnnkV3Vd3IpS8aSPGc0pGXnGLc/tWMNVxNkTQyOMY6PDzmPlsShqppYyEyTc4oUkQKFgIoAUEgxhjxxt+lub/d3Nzd934eIagcBqUXdYoyaZapOQkSapim3YqHTVHR1koVbmDFwddypPaVP3WazSZqNX7ixZgV7ddIGSsou1XkAVsUNlF9rieCu6zp1WyomsORpHA3fKnvV2F0pSptbE+xgRgG/UwZ/kwMXhKvJA87Kjygil0Lz+jPzTvi+FoZD0HIIF0zlKg2tv/09LI4eRw2x/CByLbSyDYVX5+lVVmUnOuG14uR/bv96sKyft195dU4/k/MIreGzWBe4XEQ/SZOq7SyhKfR0WR3M+ZBNbG1IsDWiU8IXkGzzn8U0wmXfLwKHI/eXPicyF25sH713wi/HpodzkdTNFEMIuZA9hQiItUnuekD9N/OIAs6pCFKbD+OvfvWrX/7yl/f39w8PDw8PD9oq9DgMzNz3nSYE7m+2t7e3223/yY8/zXl8+/btr371y4eHBxHJZRyGYZd2hVsBtCmHEFKI235zHgdteVpKKVMmWJdX/WE/ADSHjFFAQy07j8/81nZNXNy/f7WIPPfua0MtuoHb/evvnauX/r0LUPw+r/Z/rhfuD+zzN9toM8PKYjlPEaX1Hbmm5CzWCA7DxVWcijHmvLSRXZ2PjdnYZH2vCStGeRYA95PxwLe32GTAkZH1krHZ9e1X+jlQEs4lM+Nks4oxAsyO8BogdomI7YWfZyN2V+jteoP811fn//xJ8ZRKLqrOD6Dl+gjbn0Y/4zyGzdPSBeY/hXJPLfYJrF4y3/VbjEguJvPMSkVEYHmUFEQesbGZvfJ0rGIrhIAQQ+JSEAFiCAIAIAQgJUSUIlrhEIFCo0Zrw4ThZ0CJBBGJNFnAFohQDRBNl1TkmdRJ0ErvIIRhGrNwgmoYZQFAEmHAQAQ4z6leUksHtzVNuHr0rsL2mX1/5uZiBNtKoyqhNVochkElzhSiJsD3fY/NvgUA1S0B8O7dOzO7q1Kt2fvTNGnm2zRNj4+P2pqi6zqierTFCQ+IiEAh1IoDKSXlnpZepfNsxl7Ut7SQRUBAEfV05TLVxuU1IjTXGBn1ZwKARpNaqF6bycWRAEvDBzVfjfp/0LLpANQYQSEEJJO4xEhre55S0raWndWPQaxawfl8VleqtiMHzfoLZAzXRHBstNrjc3F9rf38p2nAuRQn81px0vxUpZSc2eCD7dL3dt3GQd7Lumqv0ZNCWgSuFCZAaXHenhrzpK6n4gUSaalG1AoFqyXIgfeS/uc5lF0X4CAq1qlmElo/D03YMe5TSkEK7QT5LAMUAeZSXycCyAABhSNBH+Pt7Wa73d3c3Ox2uxACgIQQNikV0UiWipAApMzoeDzkXMZxnMaaqFmKAIAaSlhqI4rNbqv96ImoT13XdVEF48KImChogEQ1ERBBk5dTSlMtghWICEl1y4iQ+r7r+x6Ap2mYpnGaxsJTCqQzzDlnLeNMIYTA+eI4oeaVMeNvO31azAtEwDse13QGEdqT9Znoc5Zs/xaCBQBcvePRek1G9QG+xkE9qfVG7rDq43R1Gf6Sa/Kl59zYIsIXQ9mR8wR9zbbXk/GvkDm7FZFFepL96202i+U8/8bFV9Qqbss8xYJdjp/t0WLJftU2mr3C4wCseBjOBURp1o4QZgq5QQPmmOC/pVp/WdYujgsq1uKpqEZKpYzKwIgoxmb3da+rQAMQZASNYCNEJkZlIA8P0xdffPGn/8M/ub29/YM/+IPz4YyIH9x/EkLYbDZ3d3e3L276vmfO0zT91b//dznn94eHx8dHCnB7e7vd3fZ9z0MJVNMImfUMD8o7gRAEx1wQMSARcJkmpATXjtUCMnY/usashpyepMI1LF38+SRKI5pDbgG39RFrZHvmN37+JPIPyTrP/Hz99h98/iqey+8hxrXfPDnbNQ1dHA1veFtAxnAS5yHlhvzrTcQWouNn7imVjUYtgHONOdCIgJ3u0ApEWYKinXRva1icQbvIBYsanfHkDtvVqp7KehBvnBKxLZsV4XSgAz9Pe8tio2eovr7zBObU+cOMYvw+WL0efP3VYjsMN55hKOvrmRetb66ftIXMZ3J55qk98pchz6K/6AJENlRMFGOcxuqyExYWyDlzVvQgQRSozXsUJT3Q0FVR1kQ4bgHJ6oLAcClAbwvEpoEbGl/MqajMBbkIxRRjzBPnnGPSYg4CgEiAbA3BZwtc7Nf6aIPDyWfA+BQ1Xp+1q7v2zLDoBA91rejpVg2wlKL1A4moJrUhQvNLEFGeyvl8fvPmzfl83m632+2WajHG5N01IrMyFepI1XJr6u8Srf9B9XmtuZJzjtE1uyPSpCzNJMTWAsR8XOM4ArMwaosLK3RXnWPlQop1ySpqiwji0uTtdpCR8KKlBAvgFGl9IFIXpWrUdk6lRu4JqQtIZ951GwuLLaUq3lbPhogyZwSI81qMpZTU97ZkC4UtpWhsJ8wPIxFo1P0aN7abjfpIFT6qG+eJtRiPWfda2OEF7BZqd5V4SgvZNQeEg7CQQK59a+uTFUrtgOge+XPnuZs4fzI0NdjOlz1pLIBdWGkIpIBS+E/TANVrIqXWOsoo6kjkUoAiAkAAxBBAQAgJSoj44u4mxX7bdX1TVikABYihbxRGz47aWzVbFRAxJuJy2Uqh2qZFELuu2263/bYGnaotjAA4Ny8a0lgmAO1yMQNC13VagV/bTgiUEELXRW0rWErJeRyGU5mGUjJLTttN39dAZUMqTzCpOQDpEtB+uQMuz9aTI/+BWsA/XIR/iT4qwD/q989v/DRdHJT2K3Zd4xcEml0dy/m5vfhA0F12JPyfz1NJf7TWb/GYCmCIvrSY0jxL5+rlx1+AaEGeFlReWrE++8ozicUqRISlqGKA8zil9eq8YIorro/z5HWAy/G2nxvZguauxDmfs9GMxPhyw0RLJXMBK5yzxgXmiDNGzP5sxqSIVMrEpQASBQhEGk8UoIp3KrdcJkAMUCuQYitxhoKZ+YMPNooAn3zyyU8+6z///Jc3N3fd7QebzWa73VKA8/n8u+9+8/b9u9Pp8O7du5hCSulmt7m5uUmbdD6fx/F829/Wc54L8wTMfUpd12WGGKMAAhB3zNyL4HlV3F1WYoQHiO6I7gK7MpULvWKOqNflkgUhcO+a6Yr2rw8qnp1HuQzoCNxaWl198cSUFtfVyS+ef+pIPnXqf5+LmTVofjGNhYUILmf5AnPj/eg84bZ95PpWiYuwKq1zmhvz8mq/p36LvTHLNLRpmpRVLCZpnLuUoiEr0uzKnkyZEmhLAGdxNLpBzouo0oaKASJIFI1uGA1fQHJBAHXFBszF5O3DGh/m2D671g8/g072wIIKeZJ49VocpTXvgJV90P/k6nlfLMcPvnjdAiZPzfMpKKGVUnSTvArnNk4zQs2ry8jsmQv2tgpb5gOBPOWszZRRkYRFQKBoJD0QSqs9r9OSlioSWrlaAowUBKlMOYTO8tn0jU2S1Goxs47V0Prv5SKCoEcAXHyUNOcS1tRz8VYhg5Kda79YavE7+EPa4Bz+M3wDZ3hdEK7nBzTUsikpd1A3Qtd1idI0TXmcuq6bJnUaMDNrKRcIlTRpddb3798fDgdtIq8ulN1u9/j4OI5jjHG32xHR4XDQCpCmxIbWKQRb3UEESmlW/nfNzkJNVoyqR/jiKDlnABLhictY8pinpj4BhSiSSymhFmwEzvVn0kzh0nxo4GoWNOwAJEESliJC2JIeKWBMoes6bfAQQjAPYVWiRFKKXeoVttrtLGfRBoOHw1HbXZRSShYuMI5jaLUrTesGJ1ZZ+tlU8jRNXUyGV45ykolehhsKSS38oxVNFXTTNI1DNiVQwZtSb0HU6OQ6adG/hr3+7aUU5tFooAAzM7KUS6hhs8e1lmYtVZix+ZaN36FLWvPGGhUXi6suIyJaPMZOhKHEzc1Nrh3etXZRLFPOOTNPZRprz8waiiQa8wcASBARJCAXIUSUXKZRCgOyVn9Q9TLGmLq+1F4jBECWoaOWEaKWsNbaRaRNa9UuQkQpJSBkZg1kpbacUgoJZORWJwsbVC+7qZvV9x0iTnkopYwjp7jVPT2fj8NwQikhEAViZi371Pd9IDJXagrVxLDY667rLKoutASo4qpRAIDGadgGiRMm7ZhHI6n1HLUi4wvWcvlB7Lh1IDHmh3PBSH+idByadLsgFt42YGpucWnlHntsDv6oQC0ZCh7XDTu9AlYuqbcXquo5t4houzlD6AXjNDkPnhC17eZVvQhdo2e7vFipI1ygSnXaZhUwnmSlvfS35VLfGdbwNFLuuamBl1pQ+2Wz2nzIBRvolIqrUy9y6UVpQqrtjg1lJNu/V2diPN4KW9us9Cdc2haUstvtFDeaQ/KiYE951Ptd15HgOI6iIXkUQ0zAwMzIHAhV1Jim6Xg8ImLf9/f39zc3d4eRT6fT97/99v3796fToZSiRPDmdt8iTWiahpzHUkqikHPebreJwvl8BmSt3z1NU0BNEqgnLYUwBSRAitE2wqPuQtqwrzQUBOeqvuZpyFzma7u8FD3twPvXGWBjuMzHH5w1bstcyUdXAbXwleqRhoSLk+t3dnFkmNkqY/kT5+W/9akpLXOd5pkSZhtbmJnsAVtm+3fm5VOkQ8AmKFwqW4QQSskGMTs4isBGc7iV5DZCyq1zAzo9zeODHfPNZrOA+WLyiOgrA3t6YrtArf2DztBoe9d1+ryhhJm9FdF0VMUlPWtEFKMa2pP7FlWXW+CG3+vFnWAdkJgBWgm7wp64+U00krXAZxOYjCDrZ87FP2Zv95Dx/0rrnOllyqu/1Q+L1InFuwxjF/NHJ3uZRg1zUrwYwZbm1/vUtR7HfrsiKUpEZzZfcAWi7PkGlugH9KBgZrUPBVfAIMY4TbVf/Lt37968/vZmu4mqLSAGjBiACIggRCICBlGa2XWdSC1loe4RRAQWKQyR1CXS971k0dNxOp1yzvv9vhQVP+Dh4WG/3/d9fz7XfJsQwlggpn4YD8MwEAVBOh6PMXQaWhlCKIKlFKIgrIm+4PHK1rugbOIs9B5pPUkRZ9y0x7jFelxFsAWiLv5c/MT+tRqM2rpaBccylhij1lpTYlLKVErp+r7rutLsgMxF0+E05O+bb77ZbrcfffTRdrtVuqSup5TS3d3dNE058+l00rWbZM/MqqWcz2edhpFl/TkiWu0Nw7HTaVCsyFkbzbHm55t7sBQxPVaVDUTkIqXJRTFGqGapYi8y6qpIZf2rqpSMLWQ0VLoXYySClDZEYEoUBaiJglJFl5RSDJ2udBim8/msVUbMjZknVlAbJakiSkraQYGIVChPfUcxlNo88+IJLK6TlrTiq8E12QohjONYY1MBSikaGKw5irpScJLkZrMpIIuaQOiUBL1jOClNDq/3hZEvkoAyNGrYruhBzfUqzUetW2+KonE3fwpM0jAVRUFhKO0XTi3jTl808hQCpdSfyyTARBSDmp4xhMggfZ9KKXnKzDnGDpnHYXh8eNd32xAwV4VdtceCodNWE6WUUkYR6LpIFIm0PHwMl2qFKCIYa36miEy6XwwBZ7pA1Q6q8R99sR+i2mdVVbuu64I2/UNNmp3G4TBOur9MBJGAmQGrIWC326WUzict1IQAoDV1AASxtoJ0VKISHOOVXdchuj7D8SLPHA4HIkLShq2CJISAiNE8hrZnOvpahLX7/is7ikbgTFwzcUTmLNYzIRtzfXNxeVrpKa88kRVFKw+ezRZWVnm5lmhh4yzYADaJZDFh/ZPm/fHsWg++4Dro9NVnbIULxcDgZq4k+7G90X/wq8C5rFxHm8958a71zOxU07zLeXCN7MVJXV7VtE00M61/vz4fG8URKSIFsRopHx7fq0tdRHIex/GMiDESUixZpIgAI1JExEQBabvZx0R/93d/p34/nfmvf/3r798+1DCMnBlU54wh0mazQRQgJKLUflJK4RGS9jKWalvSvHMiOp7Hvt989NEH7x4ef/WrLx6Px/1+f72ZQFvsGuHX0rZBY7F32OT+qxjjz8sF8ljbW6+38urRE5E1OsuzjkDDQLzmM1lfpfW3XCxzjQngcHg9/6chIAsc049rmKFmqfLyVPpTPwNmE85smUZCbfv8hBvVuqjW4rj1OkcUKxsuuKISi/X6zyZIeQLlkecq0BbfLuxfC6rQpjGj7dyck3PgXy+7T2TKLfjn/TPrqfqN8NTP/8Tj3mKBfjSZX3DtegZpr87w6lj4rHVjMUN5Oj7W/l1ThquXA8vMDuIPjgfd4tXomFF9RhHeQqGonoJxPBPRZrPZdn3JMpzHRAE3SBhBk34mAWTEEgphgLu7u1qfVsQqNBLRtt8cj0ctdzkMA4ps+z7GOKEw89uHh77vUwincSSi8zQi4uu37w+nQR1cWv+2nM8U+lIKs4SQWFBEKEVVPi9nAbSY7cwS9wwk7ag+s4nrU7mA5GJDYY7zVwdfDysuUFYVHk1yizGSkIhQLVYRRaTve2YGDUgTAIBSSt9tTIhX+5EqLR988EGM8fb2Vusrqn08hDBNxUSgNqvqyH3//v0wDNoocjiPhGG725QyIKoJ4OLCAgDVatSIph9UrxvHsRSZptJcNXV10zRxUbni0keHiEoBQqQYtZEJNjepp6V64vRzt+1Nh8EW9RpCAKhVQLRLIbWEFARR9QAApjxMY9HWc+fzoLqrirUlV5RQYNoVUlJ/lErqpZTMhTjYPo65Fu8RvshFnlybYbEGCuV8Pp9V/babune2I6EFiyJiatogOMu+Z16e4BCRxmRWoxWg2cuqUdXRWI+K9jobfyHDG87bAbejZAqCx3NqaZ/aD1NrxmoFF+Gs8a3MGQC6PvYxTHnMXFJKdTBA0mYbgZBESinjdGIuPJ3P5/7Yb7fbzW7bdd15fK++x0DJpA6REmOnTcJKyZVHRwohFmGa251tgbbpKICqTItA3UqoUELXChLV4DKez+dhPOWcmfM4cOFJLeN9n7b95uZmv9tvNl1Svz02B0/JgoiRklyECm3SOJVS+n7LfGkpTNhihvES6YBASpJVal0QJb2iKRhGbvRzceWePQ9ehKXat6Vc0k/tVDMzXqt3tECvNV9fU8YFGcVL7kHwgy8GWWOe/uWXrJdDjtnGL5bvOasf/BmWv5jY1Z94toGIVqnleV7lr6vw9K9bc771pqwXcvUZ/9njCTQUEhGLB1hci8cWdgeDg0IAERMxIhSs4mfJtbDVtt/kPB7GAVETVRAAESBSEJwKIBHt+s12u91t+74P03gaxtNvf/tbCvj111//F//4v3r16sP/9r/9P6vVN0RMXVd5WNKC0ea9yVLYCqBF6CDEmEj9lrk1rL+7u/vo9n6z2eTCp2H84IMP+u1WRPL4AwLcQlzwcrzfRw9Aj95elXJbsxT7HHiXwre9d/GKulNwrQiKzN4I7lBje+b3PBFGcBaWl8X8/Ro9tjyD0v5XSyDU9c7aIiGigNgsFq/Da+KyLYFXkQ688qA2qlhFJZut/QquHTFTJ545yOvFSrMxe/CunwQAXrVC1j9NubWb61fZTMGd68X480EacF0HqjlsGcMVWwDOw1nBsarLJK6J3U8BSmYmxRnOLGjm1Z8/c/lp4EqtWl9XEdhPwPOFxQKfP2J+N/0g/kWLaSCi4oPIJRkeqwgYmFnmCpLK4tBcfNq2m5kPh1PAWgqCCFMXUr/Zbqv3hgsEopKLCDBDpBBTGseMGDZpw8yP58cY43azOR4Ooe9T6qbpqC6O82nc7XYxdF9//fWXX34JAJvNTmeiQnO/25fC+/3+eDrF2J1Opxo56X13LpN0fS4WZ1Bk5rR/ZvvWPHcBf78Rzhi6fPtT+yVNUufmc+77frvd1pb0oIVNUESIYs45hMTMU86lFMRqZ9S6L9gqyph3bpomrRSaUtKbyme9h9xCFqepPDw8HA4HDe3jlvjH5VKziugSsiuuKuY4jlq+kRlKyefz6PXAumTBPF1i0NqRRwDCCDSrosmx67q+N9dHbNVrKKCCCKu+cVEXiQAxUIAYTWmtFI9aMLvC6ng8no7DNE0KpFzT2FhaU5MaRKRpe6leRDUWscgl3YtbVOSCm/jOwNBIU4unhWGatOWGzGvFeyp9KZhJFOeCvj1Z5+M62tt8AC4t/PxFgOi4z0IDVPzRjfP6nu0jt9IyphMaj7usfSX8UAu3gdb0suTIXGohVOZAFELQAi2AjKjzoRCAqjoDzHkaASbKw4jxFFN/Op02523XdVOhlNJms+n7PsU+xhhjRy3JMFBoMKnglQvALjZfABjHMSAKkcZiR6wGhYkzIgJcYn9auE1U88fxeDgcDlMeRIQIxpFDxO12u9ttttv+Zre5vb3Z7TfABRE1rkE0kk4YEYUnWPFZcn1xREQYBWscH0tN7/KchZktxrAJKkKERPM+hDJvFeBPo0PW62qMWfrtV5f5te03IWNBKxcEd43QdqjseaObnvguCOuC5q4nBo7ONqJzxVDtn8Fmy/GDzwD1BI/GuVTq57xYNRHNUzb8w9IaXy5N+xaKAPM+JIvV2TSuWpoRa9Vme+OaIS1mrohl4xtx94aGBSItSJLeWwABLWCSq8LZdanrktRQvdL3SUPYUoox1vwE7Zt6c3Pz4sWrV/ev+m6bcx7Ox5zPCqJvvvmGpUzDBACaTbG93S22gLOmBVfzD1RLD4UABKFPm5yz2rGIaLPZbDab3W73wQcf/OyP/0GM8d/+u3//69/89nQ6iMA4jgDxKXxY31mcOwMytQDgNZJrvB8s8V+8x8nIGUINhIAVqq8FI5vVAm8R0V7qkOGKcvgU3V8sYXHcVmv5gevqwx6YMIc2Yq1kuT7shqOyEsjQKTZ+OcqqF28Mq8Rse8Awf7GEci19FwCU4S3I1DPL91SRXVG7dcixnVnF8wVZ9rT6GVAjom695RJ7+BjMFvBRrXgdFSlPxXu4Q+FJDQDgiqKuCex8oMtXfsmLnVqPsLjpzcb+5wtCt4bheslXF2s7iBcb2aXc3+JfP8MVNbiiXcDTBWb1IzYOdBnH6Yf2fLOQo3If9e/lccw539zclCI5j5lLLiVJBMAQAgKM4xj7XmHIuRQpRNSFOLGcz+cupVvNI9LmlkAsKEDv3j9qlT8MRJCKwDDlUsppGMdxLLlG3GF4BwA/+ewPU0qbzVY0eSyQT6ZBREBEKFo/cw0cv/AfpEUKEHaxfziXTK5ez+/7emsWh0iBrx3bVP9BwpRSQGJm7ayt2T2sonxMIjKOY54ytnhOHUF1Dy0zcz6f9/t9jHG/36tDjCgBFObJGJBe5/NooXQaDAkAh8Ph9u6S22yQUe8WADCDFvSXVtQktzY8lTwyCIhQ5V+IGGOHzjHe9z00nqg/0fgdhY9qaDEF85LFqBy8tQEIFYDWsD7GWs5Rp6SOVtWQz+fxdDqdToO6Mc3GR0SAtRIPadEjzVGrhSub7Q9r5F1oCdheEP9BygAAIhrZeHGpmVZmuqgqvUS17SFC8Gn/ACgCApCXGyj6WT0k7fZFwNDGekgXCmmGSxMvbQkKAP3KfJjG6diFidnPvaGBXQk9jflSGELzi6hy0fd918WSR5aCiBSUiGn4OomItv0QQQAszIjCAJIrph2PRwzEEKvZP6XNZnOzv7u9vd1u9znnzWYXN1Fzf2xKFWTNeCTO/q5B15wLAKhCGJAgKfLXTirjkNWDNwyDeQinaQJkxZf9/iZ1Yb/f393d3Nzsdptuu930mzSeT9KqudpkAIBgVqsGsWJCKZWPg1z2KOcMeIncWZAmRARg1aiUTTDzJaFoQYYWIYi2edBsa4uh7YEFebXZGzunVgNnTfU8vVvMB1Yk+4kjJHhNdDOIkGtF6qm2/9V6zPUrFp/lh/jH1fuyEpQ9bH9Asvn9Lv9DT4MWm2KvlpWEsfjWj2O/XcCT5lFSC055dZLrb+srQAAEsaI+c46RkHCcaoyoSJmm0nXpxYtXd3d3+/0+pW7bbUNIw3kaz8PpdJqmE5ex79NwOp5Op5/8+CdfffXVr371xc3NDWAtucxYpSFEAKEUIsXgDyS1rkTKaDZdpxRZFcKvv/76l7/+zfvHh/NpQAq3L+4f3h8OhwOk6wohzPHZNmIBH2yC4NWcIgO4P/DuG1ncF2bhpYav712MbzurrYSXX8Fs359Z1w9e3hMFDrWewpNFsOp6+f5aPOnvMDO4OjH1vXMD0OK36wH1MsMBOz+hJ0HgJBijvOiULnZpbAusWANhMZMFzuDcDKRXaNWbnyEjeE278D+pi+Em6tFCQp0RQBEBuBSdNdrfID/zi8LT+74gC55Y1QnL7GFYbfRymauHnwfLM9eaxAGApQxIC5axvfjBQYzbwmpbF6fVA2S5QKcr+vFxxVOujuAMQ3NjqEuGNOMFEWlsoQrHmnJGRHd3d32/y9OUSxHJpQBrlGmgCOGUz9iTFEkpFSxQGAOEFF9/9/rzzz//8MMP/+inP53yeDoN2+22UJymiRl+85svb25uPvvss3fv3otIij0ITeMYQtA6OJoqNpQSa5XFFLuNpgOUUkLqLuvF2nXu6ibqv3ZePLN7BnRXN+6pbZKV2LB+fk1w0PlnjB+ZeZpc/wzV9EJIpZSkeV/9RmX0GC6FQKTFnSqWnk6naZrO5/Pd3Z2mPMUYh0E7FQO43NFpmm5vb3POOY8mzpkvwRxxBkkdliiaQ5JaIzvTsrAq53WlKSXzwqErfKUaLACoCqak11wRIYQQSTFB39510WalzjxEVHFc9cOmKAZ1ywHAOA7nc63qOY55ysM0aV5MdccBAEJQYYCRQ00bvGiDpRQgLC2XzB/eeKnKfsX6Kc6Ql3P14FW669RRaoVhqbUe0dTfUJu8XFDO62B+HCMOpuMxs2YBKIgKFwAgvKCiaba+LqN95ZF2jb36agucsShfmafd2iA6n1r/CVFEUkd3d3fC07u3r5lzjBGBmTMzp9hTIGmaMyLFGImZkbB2p+BcRhFhEIobdWYDQNd1N/vHu7u77Xbfdd1uN9zc3Gz6HVW/a9BKpPrDupCm0XRdV6ZJA8SGYdAuHSiAXTB4AwAXsDDjEDHGSISbzSYm6vs+pbDd3IaIu93u7u7u9na/23QpxZjIKugQEbtatZseCaFkQxtW2SS0YgxWDqdB40KUDMKVYtBl+0wIiZcD2axcfmNgfiGi7Ts4biEth81eZtssqxH8tKTpD/NXXM6G/cnzYhXu2ysmc5mbVD2jXUx7fXMxkwUc7LMl0XrqLyJPeQj9fqxXt74WE7YPmh4A7kjr5T115MwYxtLwCXs/OR/9Ym5+zuj4oudb3qoETqaxOSygvUCw9q34sHgxFgTQ96lZnibmLCKA3IVOe3r2fZ9S3O129/f39/f3210fMR4ej2/evHv37uF4OAvnlNJ2kw7jKZcpxng6nb77/nsp33/xxW/7fhuDmwyhJkggYp4YWJhL4VoqWisrphRi3FZxNud37958/fWZGYquLkQMlHM5TyMA3L24f38Yn9riq0iyOLcGZ7uMRF49hraJvo2B/3EuF91jMb7dfB4tES+JiODwcIEAMtdznrrI5U74317Ff7u5Pkp62SCebnjoVsCqhwCWZ1CqSXXZjQbmeLuYf2hVDcy45heymHAI0YQncpE2NA/qsB0DWJ4a/+f6SPrZGj33q/DExO6s12uT8dio8GnAIk3jac/PSKjIrM744oOslB+YX3MKMzPALRaF8+fh2p/+LeJsmp6VXH0Y5mdqAZmrzy+eXG/NMytdjCNOeruKe1evxTPidA+ck5Rnfr5YY+VsIhf2j4rMivtQ5S3mGCNwRgjYlC6EQCQYgqbunN8f3r55w6VozZguRCHpUzcNIwD85te//uXnnxPAjz7+JPabPE6QegGIKb1/ePju+zeH4/n9+/cfffTRZrM5j8P7x8fNZpNSIkTNS9MYMEY4nE/b/a26etBdWsPaSw1Xt8mfCC/vLggvrA6jR6oF9vqLnL/3me1b3LdVKMfXeEgjO6UU9fUghhij2l/UhRj7vta6HGvQo5UKV3Wo6zqVbrXu2n6/3+/36oGUS0WAWtAuhPDRRx99//33OY+qlmh50hcvXhBJU1EunfdyzjkzwGhRl5pBB4IsNbldmqyo8eQYIohQU+MMaClEnbl5R3W7Yov+1Bi+2H6Vkja4u/Q5QBIArcwZnCEPNJ0SGc7n8+FwGCugvEvjQu1VIdQtMJUSmmJsLh0LsjV9VVrpARHxoZoQQBo1LaVMU6klXvNkkYdoHSYoaPkcrIpu1MwpIioNmwwP2QJ62+W/pQj2GLSueQGpqObT7lAtmsIUQ022RATXzuR8PvvTZFKKnSb7rNvkKZvHbZ1hzjnnECk11ZEB8na75XJ7Or6fhlrgDYCAYwghEmmiIbaMDGYuDLkWEyYNErcCOSJcCqu16PHxMcZ4d/dis3m3399ut9tNv9tut1oqyUwb6u8tpUwAzNzFWBqsuhCHYZiGUYE8TZNaENRhq9i42+1CVMNZ7LoudYo/mOI2deHm5ub+/naz6YDz+XyeHod3b17/5je/+e6774yMKKiH4axoHC8N4XVZoR5q9pyIALLfi1ZATonSIrGIRSCaOOJ5pKduK1ZX8WMtvkBj2D6FBp2zaM192yxnXoKnLpPtDI0QsYbCzcf3D3js9Jcf2b99QZH9n+i4slpW1gT9qSUsBn/qh+3f5568eqHj+sYz7Ie2QR6AflhDg8Uq/LB+CfbZQohXeDKbmycNJrI7LLo8qQ9rCWQACChEIAEjpVYfLO332+12++Ll/cuXL/f7/WbThRAeHh7evn37+vXbYRjGoTBDiCgl5Dw+PJweH9+P03G/3ZRSvv3mu9v93WazOZ3ON32HVexGEhBkZkDEXMYIXYxx1226Td91HdUOpLmmFpxO5/O5TIOIMFDXdTFGKMwCoet3afd4OL19+xaov7pf0myBdofndXf8gVrs8vM4gIiq1XiQLtBj8Suc15Wdb9ysU1yb+VLotDuL8/KDqOsR1c/ZlKXFmLU/9UoR8hTJT9hPfPEJcXkuTGk0QLn5X7EKgaseCW7LSusT6DfU5oauqIwf0GaOzoblR56Tb/Cf1zTWzH62HJgTanvXM9Ow+/YV+ZkICVxyzg3CioEeAm1WJl5coX76ALiF2Agyv2nzWdC0xVQXm1XvP5Hx6K/nT9kCzrDancU4NA+XeP5dT+2sHVJe+VTXo83hJuuRn1mjf8zT/Lrh7nRRixG1kh56ZzzlaZpubm6YQd0AgEVE09Xo8eH49VffHh5P4zhu0qZ1/Q6IfLO96dLmm9998R//8hck4bPPPitZcpHj8TwMgwgOw/DFF1/knD/55Eddp+1+Oi3oZRJnjJ2W5mNmjefvum6/3495fnwuBGQGavvArie4R6GrW+lHXlDL9SFCdy0OL1zDpTWtayuNWq9Vb1a5S9R5ewnvspqTIYTNZiN8lma+r5VgQ5VZdShttaddFrbb7X5/j4han2YYpJSJW6PIEEIIKTSrgLrsBEZyFSMBqqLVrLoFANQZCBrhOe9AAxdTUS0ME1tLPZ1qwBhjBgB1kRkgq1M0XjLcmkKIFl1ZgU81piPGCoRpGqcJ1I1ZxtwKe6rHCUspIpcDWOvTQPWCmmxp7SWYmUG0VI+dVsNPLpfYTd0p/VBKsSbmXn3yuX/UYpQCJfVwGrgW6MHOJWhaysxP0y5C9j4hU5prSCcUu6mrgHKpSG8UqbhyqXrfGBzPMwyNUFiVbJxf3Cp1h1CN8zFG5jKMA/Vd3/e73e7IJeczonRdVzAZBAIShWgBuoDABQFFj4MgiCDGSDWjryrtrcdjiTF23bu+77eb/X6/v7m52Ww2+/2NEplWWiJIo+q6Hdt+I/uScy5TZmbqI7dKuQCgGToVOKg1WkPf9/1GHbzQpZ16C0XkcDicDg+Pjw/DeHr35vUvfvGLr7/+2s5+CBEAxtyKAuDF9UJErUJvURWUiNSWYbS7Ap88ObpIoVLpRvMQejroBQX7Fy/jXrbQW9Fs6Jk2OJ+Nd0PZ6/wI8kPeCc+DG9GZkcsFhV3PYcEg/bdXXypOlvL3LZJwOc5TLsK5dm0DLiQAu7mGw4VwXGMz4ODpT+DVN+K8QKVnYwuLu61u/ToPWz8Ou76Z6x/6hiX2K3D0qI7TahuUUlTs6Lpuv99qb9C7u5uXL1/e3d/GGN+9e/PNN98Mw3DUdN3DSfsjbTY3m67POR+Pj8Mwas1xZXIv7l9OQ0bEFy9eMJ79qxGIEQDo7u5Oc5RjrDWjH94/Pj4+Hk8PwJfePmlT+//kiYswMqauT31irjkewxNlRteig14LuNkzV8krESEuVXRs4vgCN0QEkNUq6e/7XVjsuIg0/WthO6h7up6/rNjSAm2egoZ9XuDV4hWLw4jNue2FQnB0ADHYTy8j4OxFhrqwogMGh5yLxxM/c3Ypu+TavSyeb/eLIbn90J704j5Wxnlpd+FneBVDYIU/4OjAYhc8rBouXUCxJqF+fA9hB+olOQIAkQtrsEpROC+i42eCiz/nWiU4jNWvCGbI77dscafed3/ak/IE37mK4Vcvm6fwdXT1Zh1/kYvvXS/c5iaODa2n9NQxgWYQsUH8e+1d/oeXP9sZaQPWbxWNKahIHUGk73sVnkop5/OZc95sNt9//6YhPHd9dQ/2ff+2lMPhkHM+HA73t7d933cxAUui8ObNG83N/ru/+7vvv//+n/6Tf/Lpp5+KyLfffvvll1+GED788MOHhwcA3O12yhd2u91ms5nGMk5nYez6XvPZENES27SBm5anrgtc8URwBHZxoNaI4bFlQanWuyBzZgfuQF0Fux95fV+cyKTdJozmgMrrrIoW55y1rAu1+IWqFlJUIIvI+Xw26mQqkw6Vc354eDgejzmD8l+bqroTv/vuO2VwGmeoDDrnjHSx2jPXRhRWki23JoRGTKxKpy2TiEJIBmdzvhFRoAQiAFH1QmoZ/loBTrMHjZopGnRdqG0nqJb7169KmWy9OitVDMbT0BxyU1XugFTqBjAHXbIdtNoNpVZM1a5XqN2tAMA0t5xz5kJSTDH2Vj9VCKnVG6PmmxocEirkN5tNDN1TmAmAtiIjvJ4feQQWTTMDaJNpwbeAfd+XUliqyhFbKvuUs0XE2DiXtjErfPaP+Qcsl3JxIrj5unPOsRRE8s8oqZGSD4cRAbbb7VkCIkphNSyYeUKPORExIFMQkczCXKiFNKcEoo0XCzCztmEI4Rxj7NJBbUkppZcvX+12u9vb2+1+V20BratKXSYJxSQteWHgKYQQQu1sMY218WbXdYUnAIgxbLfb7a7X5KOSccrD+/fvz+fj6XSYhhOAhIjjOL57924YhpRiVhMziRq5xDmfiShG0RzCaqkJUQ0latGw+pSNptnn2ohVmudfIRwZpJSC3LSI1onavLrULhF18M/4qO2WcS9vHyqlkJNUbFUrPIYAAQAASURBVByThMwuZc+40Oqa59OwJViItkP0Yj5QcMVt13hm35Yys0x4pPQrMtvPWqTwE/Y8ozJLuM741WcteFHZ688RWBhqodsWgjOvvzczKVENyPErVYuIBoq0mahdBACy51IeGthCk3HlILpsqBpKdeFEAJBbmVkBKMwIyRane1FKEWFtRCsrSZSpCDIAoYCABBQCJoRQcmCS1gQpS544p5Rubvvb29sXL15st9sUolKE/X5/OBy+/t2X7968PZ4ez+fz+XysZTygSyGllITL27evh2FCxJS2H314G2MsPOU8MQD1CCiQSsoEaiVgLlBIS6hv97f3d9OYp6l8/+bh9es3gVII6fHxKMC3N/chpPNhmMZCmKTgmM+73a7w8PLDD4joPAxC4SRDYCKZAEGgMKtZPWolJwQUEINKQwYuZekgMpa/RjlmRrT+bPqVlHl1uPlZiOxCfD0VvqjijYKrYXUqeRFhBfVVs8NSfyWiCic01m4/kpVY3+6j5mYAgOoIIVzap8JcnyECaRnwl/kTgkARBgQMpJipdhkBCWTI3wa8YLjeRO03ASBBpOWUtuPZLiKrmhgsrYWZVb/WyBQiDDEis+RcSiFECiQABnMMRHjRGBea4XpTlMF7Vc30CpOiSiml1EJkmtlvxBCRpBWVVYOokTVmzbUIMNuLmc3OTHszZRtrXYScL1lDzKw+Gb703AtmHDThBsxi2HqZplTLD2pRBOYiEhQGfhdEZoquJ8tcQ6AdheF6oNydln4TQDiI2DiBCEUKc/HiigGEmTUlQwCQJCABMCKIFC5MFScJoAiAHcC2j0EA9CQBQAzdOI7YCg6JSFBWSLkiJqqhAjEAIY7jxAhICBgQsZBiKcbpSu794kzZQRMRAGpCwIUF265x64NyOWikzc24WQYLCogIAQZhIhTmTd+PpxMJpNiTQBd64pTiLsAmRd7cdIA5QAGWTUgqu0SMHe3Oj9PnX339m9dvOOfbze4Xn//68O79z//oj/+LP//k7es347n8w3/4px//6Ce//s0Xr9+9/ctf/XrzwYebbr/Z3f+Df/TycDh89dVXiN1ms/nit1//5Cc/6be3U0GGWIDTZtsTHY9HmqYynBPSi9s7IppKxtRR1xdGplQwMgbAQBSZMzARFoOe0RkPQ5hfMs/PFNf62Nj0QrQAx0+xVUlpbVcY4MIHEBGxLH4LACAFq04uKVZL7nbTYRl7dVwAjuM4TVlVqeP5lFLiMiJiShrewkoAGYWC9h4H7RghIi9evNBg0VIEMSMyIpbCOU/ffvN96sJms1FXMFHMeUTEGOP5fIyRzueRAm62Lzdbbea+SalDJOba0KIURgzTNFiP1pxHXcIwjrvtnpAILx6n1F1K3yskQ6g9JIgg5yphRkQiJIJASAQkmCLHQAjak6NEAkSErgMQgZr3gRhEME8CkEoOWvf/fJ5yqeGsY47DUKZJACIAqjgXYgClYBQzoKAQYQjEiHk6Ks3JOWvMZ8485ql1nmAAmKZRKSdhEK7SvDFZIgKoVShZivZiGYZhGIaSOY+jhk2qYqwCMwXo0sawrpSSEoWAOY8AgACRRLBK4MCMglImEQERaBI2AoSAIpaKxVyYS6USu34nUDjXCE+oEaSQui6XklvlTEAExJiS+Ho1rsDmQm4x1SC6voWhdQphZiIg4hALhQAygcSUUoo9vEeEbrtN5+Pp1Sv65OO7777/3Xl4f3d3//j4SBH7TZymiSh03WYaSx5zv9kfDxlDjxyl0G5zczweic9q6A8pIlIpZThPE0/7zTYLMHPJMsEEgDkXZXyvX2NK6ebm5sWLlyqIxhj61CGipggKCFL1EnebXqV6QCCkRJwghRDG6Ryp2sVCwMLj2/eP0zQdD2+1rUhQ2wcSIsokERALJ0EYObJsUy8ik0iEVLgwYwwAKpIAYuHAqBmPzKDluAJBl4IkakQJSNmicEDoVFZnzgygbBwQ2OUQBpcESLVw3IUHV7eP83/5E6sEguchlHTN16G80OjmmquZRd9LnDYBdP50bg2UjUx7+usp+FP0/amv/JQWgsiSUrupenFtPY2FKPPUxcwLiBkjoZrsuyw7u/7Tbtp24FzHMPP8wnT0zBrXi6q/kjbOPGaAW46yzCNRWzYCh2qLEm003O/7Tdcz8zhkEbm7u/vw448++eQTijX+Hlg04Pv169dKMc/n43geWGqZMqWY56HkMrFoAIZ0XQwhdDGFEHIZT6cjotzc7kLop2mYpimFhLXjTYoxQrN8f/mbLx8ej9NUCGNoEZh3d3dImVmkTPv9Jt53BDiO4zBOL1/cHM7y5vtv+u02pf7tu++7tL27u5vev20+tYsfYCU01EMEqEVkr2zl1S3GJ7JDYY7b/t+FJQ8c8izeogROYwyuju+fX5ziqz955vJLEBE/wTm+wfOjXj34Vw/LwgJiT64tI4sZyjyugeViES+tsqjRAfuJzSGXS1U9ceaSNST18na09bfYophs5uvoHWganZ+VXf5sLuCD1wKJ2eVyQ0Ncb4kz+BieL2CCiCGQ5QwbHOCHoisNbh6Azz+54j41wt3e64X4BROBi9VgPjIAwKUQtNYbQAkAgAACuaYGAQCQ6Yfa7kw/h7bwaZqihUBLldSQCTT8CQERGZqiWD9cuZ5hT4sTbagoKwd+/cp0kzn2ahkGAKj9Awhb7hkej4/DcM45pz4ibZFKjF3fp8KZCHjKOYeUAhGEgPub7UcffzCezpiZAtzc7DbbbhzPmaebu/1Hn3yIMfzBT3/y269+R0Sn8+Hu1Yc/+9kfllL+8i//ElH+6I9+9uLFi9evXzNnIoiRYqQQ+lYEPyfsbm5uur7XeWqoPxGdh7E2RqaLgPEMr3uKeiwwClz5mcUDFT1aYUmzmvle0vVhR1twLsx4tEREOzX6vOYQoiv40exWME2TspKcWS0RamFPm+4iDLTam5qPl3PuukmbxWtJlZpzyJM2J9QozRgphJBzHoZBp6GmKFVa1IuiOo9WxfDmbGiRxka4NGbYvDp2ZO0ZNXwZ21KnKCJGpNpLkIgIAmgVkIgAiBJCiBQQsbTaCqpzliI5T2bJKqWcz6dhGEwhnCbSaQNArV3Z0v8Wp6m4OtIqw3BRARVEC7rmrLGstc6COieRPB2uIaZNbAyuUQcXyTmbHnhJkmyWNSPROJe6FeDZtUfHFk9ne3HBt4qAs4uIrOchrOjG+pj44+AHCSFoFW526qI+oyrx4n4pRavClkwllAuPANhut+N0Rig//vFP/uk/+fN/+k/+/PHw5tdf/N3vfv31X//1X3/55Zen0+n169fv3z+mcEz95v7+xTCWECikkAucTofMcHNzkzbbOmdCAFRtU7dg0sgvVqyrTpfj8agTPh6Pj4+Hh4eHu7u73W633W43m03fbWMrhwsAKSWWSXP6RKRwAYW52kCROZfhdM55tP6fgLWdiVaOUaWRgh1nQpRcKtGgWClJm38VhxTcsCJn2IoD+Tsax2PcPzguLwg1CtnMAIpq6OJCYaZCtKI2197t+Yqnp4Y0C41xwdTbh9mf/rdw0TQujGqBoOA0WJiTb3ji8i+ypS0WKE/LHwv+uhah/Kz8sPZG/4CtazFzI47qC5G5KCkr6Wc9lD1s8PTLsY1er/2pqdY3mosDLg0JyYWELbaYJSNiiERAAVBIMEYMoe/725vbzWaz3+9fvXp1e3tLMSDim7ffHI/H4+Ph8fFRy6C1hRQprP4KapGTiHhzu8tZvYAigoAsPI1THg+j1vYVKCJlPA2Asu37j+4/KrUTkJRSDo+Hx8fH43DOOaNWw+pSSt00FQDY7/an4TVzQYypQ5TpfD4jwd19//2bL1+9evny1Qd/9md/fnv/6l/8d//y+9dv3r9/pNhxjeOd2ZXBuao8MAFmaO/3cUFwYU6FF1gEKyovc0Xo+VeAFRVsucv22OKMr7H36vgLpFqfpgUCe5R2IAKR5SEy0uYf9qYZj9uLJT91Hv1ndLqKXtwCd8EdTK/w0LxixOJE4yqoG11I4WKe/kXe+yeuvIqNtua7MhcI/EwW1qv15SfsF2Iz9Ni1foxb++yrwDcpeQEWXlX6WW+WOP3N79EzS7Cto3AJ91oA8KkRRDTWeoGcAfSsMiCqtx8FQVqbmaa8XWq+mY+RcybNa0qIFJmZa6ZuQSJl8lXjFKiKJov6jtYc6Cp6ewRYILbMqdCM/gCwIAADVNOLyCWrU6UTadXztTKejI/Mpevj999/ezy9CwHH6ZgCxRQA+NK2O0VNNfz0008++eSjRAFy6WLCwrvNto+p72v1ywLyyY8+un91T0T9djMxIMput/nss09fvrz/yU9+st/vX7y4+8UvfgHAIWir8bTdbkVkmoYgcHd3d39/3/e91n/WKLv3D0eVj0GEmuqFAr4DzVXArgnXAq8W/y6+5XmZKxXIWavv2MMkwFdCte2D4hI2VptSUmFdK5ogYiDjua5W50WZ104AzMznaTC9IlBsdbO15qH+1atCqK3kDzIIFA0Gnqap67rNptZrVVm/73st7kIa/4kBBErmPJVpvAQuIhAIq5wdW90XRSSrFwoAlpMcW08FaxQhVQfe1DMroK6N0DTGFEOMUaVxUzKxFQUFABHUmi5VC+OsVU/H8VxKmfKQc2ZOXpDTeV4iTlvJMTHligdm1l4aJTMiau7WeRyYOSVN5uwbOkguVRGCGZ2sIYgW7Wzutdh1pmxf1H6sSogPvITGI7wG3jDqEh/o0dVTUcM3ah5vbEodOv0B2udg1VaUMjvU9WeBWq2Nq+drTcYVh7kVwrF93Gw2m23XJcr5/NXvvj6d/tFPf/pHH338wd3/7NXw/v27d+++//77r778+ssvv/z1r3/zmy9+9+33r8fhzAIp9B+9/ODjj+M08vbm9jQ8VL8lQsWvbdIkQ1UIudSlNGaqe8qllIeHB80v3e/3L1++BIAUew2lZGtT1oJ+NW82WEJji93NOY/jWe0FzByTMDMZfrRMUbm08RMAUO96SgkkEwYiEjQgK4cy6LOlaYj1vSRAUOdNISQKUKaWAhMApPJoBrkk7Bp2cqvV4+/P3v2ELLXARWg2LXE6CVyzzXtMMinlgmoOTe0ru7mYDM4FTZEfUOQWT3pMXTy2eH497POiyXqQNT82gPv5+H/rclYamj28mK2/uTiW3hj/jDx0deaz+YBNDxeXyJV1acN6YS55YpYupe3tbtv1P/2DP1TTy263u7t7UUr58ssvv/rm61//6m+0i6ieSW1hRATbfoMhahcgANCkWGbOw1GN2SGEAEEIhAkAXr68z2V8PBxI+Pbu5ub+Vd+n7XbLJxwOh4eHd9rXmDkzSECMadP3/WmYTscj7XHT9YghRcpChIIoIGPJY4rw4uXthx+++tkf/Y//5M/+9Kc//emPP/vDb79586//9f/3+++//clnn37xzWuDmHILg6dc9u6SdgwrJ4Bt33pT/IZ6qmp/+lO8GHMhxJgusTj4MEf1BQVf0IFnLhvkmQ9P/dB9WGoLfmIwV5/8Mv21cGfBNbc8zA/gmqpc9DcUH/EiLkF3PT1wpduNWi4WsnienUfOvH8Nl1icJqwT8AqYX75n3l4dfQrsXr+1f4NrokUrp+UCbTy0l1OSCxwW2EXXiN4aPjbmVafZgh56TF6Qo8VbrtxBrmaIqjyo069WFhARTbLAZpHiSwwNAoBADdDiPAWiPgUpPBFvYtzteoD+OE0FgKvDIaAgiVZLF65zEGCpIfbXlBPbL38qr7KtNVYsLkSstQ49cBAAwHJR2oZi7Lu7ly/efvvw0Y8++ujjD4ggj8N+3w9jFCmAQgQowJxLkcdHPp+PIjIBk0DO+fx4AJYUYh8iM29Sh4gFpJQSUuw2m5Di8Xza3b188/Zhu93e3O53+23h/Hh4CCG8+uDlbr9FRFVibm9vEfHh4eH992/v7u52N/vttiei8/l8PB5rm0QiAgQBFia4bhcDh3seCRd4taYJ/rwsMEraZc8uqCgoKpXZKxaX7Z0/76H1BiMMLQZP60tLSql1JDMqJznn8zSo84eIAlX642Q2VJedxrblnFMcVSGs/tVWzt5KTWq5S2g+Ky5V6rVYMJxfij+qB3JNgqruL8CZRNTE61kAQu364LrexBCIIMaYQtVeqNFkAGh9d4tKyaWIZlupRD6O4ziec86lTNOUrX4MKboLEGJACkggauJBYcl5smDIKR+ZuRRRhVC1O1UIERGxtxWp2HZpxtei8bWnvBkIKvQyK2yptRxUhbApCQacmrxQSmEGEci5pkRZ8pE5PBUCa5akNxd4q38aa7MHZP5b+xxd1X1b4CIdCS9h6mI2U1tOe0U7Qa19glZJOZxPiULAJCLff//mbz//JQUZhsM3X/7m7u7uZr+/v/vDP/3TP4HNpjwevv7q+2+++e777x9+/esv/92//6vjOcfQf/3N64d330mobIu0Y2XsU0raN6QAMrOwMtZ6KLSqJ6LWxBqnaXr37t3j4+PhcLi9vX1xf9ztdro7ulmFs6qIegQSXVLAxBlJ1QJSSgkRmDlgK3REWsc+SkoAUMpkWXVQJSXQqkgsF/qjZ1yktC1W7CLltUQUUDO0AFggwKUW3NzkB9Y1W0R8tQ9oo3pihIiEhLKUYIymgBMrwcofXTMPeFxcoBe0pEFDGnvAkKaB5klB2V/yhLK0/mxz85xgMVVwpNmeXxL3p9+yfp391o66AtCnHYvbDqTr5nMD3UL6sfPsDyfijMgu4GPrWrxivVkigjKjIPbkUzIHAHApRHR3d/fRqw8+/uij+/v7bdd/8PLVhx9+KCK/+tWv/tW/+u+/+uqr8/kcY+Qi4EIpQgjMWSOtQ7Seb4WkpszlMiEAYkAQlkm4OcCx7Pa7D17c95tus+nGcXz//v2bN9+XR9TS5CJFc9lS0K5EjAIp4Lbb3d/fAdDhcDgdBgkZUVIIt7fbzz7743/0D//oT//0Tz77yScvXt6O04SIIOcX99t/+POf/Zv/379hZmw5Y4gXRo5mwnXQ1Q/+7lV4+kE87i0w0P8p7jIMMTT2bzHcwJVkv7hMBVq8QsNs1ngCT6iCa2QTqaxzfab80tYwWXOpBQG5ChPPsUREmod2ccC9wA0+kxkqe/M83r9a5hLeU1D1ZMcvx1p1+R/6N4prhLj+ObhsQ48nRhbW8LcF+vWuh/WI5Nj5jJIgYrVQzreyqlqrYqp6dzGl5am5Bjr/pzTuY0Dja62VpCnYXpT3b6kfWADAq51SW6NaXCiAEKiF1mWAISLVEuCMAgEFuUDmGML+Zv/i/q7v+2EYTm+YEAUKgiqfF+6me1YR2KxH4TnjHc61FE8i7Ca7wmMGMbM7aEytU8tRg8YLFMmXCC4trY5I9/f3H3/y4Z/92Z9pjOjufg/IQACnI4QARZtB1AsAjsOjlHI8Hofb/XQ8g4jkAkxffvWlZldOJWMM2+0WEKdpOo5lGAb9arPZvHr1SksmNCEsxRhiJK0ycnu733fb03B+9/YBEbXDXkophZhCi2YUwJrjIEggAAuI+Q8L1MK5HcGj7lWiJ01/89SmCfHrzWOAJ+ntggRRq60/jmMIoUvUREylDEBECEFE1BuGiMxCRKGLalQNIQSKRsZb1cdL6ULVDAk7bahdWkfHUtrPQ6DaLrwzcJWSTXv0R88zF2rBosxsIZEUrtIrMYXBkLneQVHvoCqEXdcl8rpHBVrhwgzN4aZJYszM4zhN0zROwzTlUnRdWqfRiZ1wCfSwOWisrIILEbXMuKafcS31ASISkDCQObhKKVMeRSTSLFKjJfSBFvjRV+eppljHGKkBzWIIA13qahoGliItCPPS70eabqb6/6JDmEmIni/AXBIw+nmJj3UEyhtSY0v7VCV/wXYXjjJxqqM+qeYDzS1ULBDN7WYNuk6IIec8TbjbbhDx8fE4DlPX7YLwcD6eT4cYYx8TYiCM97fbH//4HwOm33zxOxb4/G9//e7heDq+P53H/f2LEEKMKXYppRSDOrdjSkmoNhQx+ADAOA4KdsWZlmpbROR4PJYsj4+P6r/V87Lf1rrHkgsBkIAwSy6JAjMXkBAiBCoUpmmaBGJARkaSGGMXE6JW6lAIVx8phpCoJmEZKkotsBJEBNVa2T6048aIEGNtuYJSGIOAIAlR8DUmKlYQIuAl8U+cZmLykuGBYcxCkvCED+ZkFFuYODjG7IdaTqgiaWuwuCKI64cNr/1jsLrmP7xiMfWDeHkCF8Li3CK45hbrCdif11Y6k1+9VOe3w+5wDeFbqqky13gXr/BwRqc/rOdmuwOrfVyPcwHIpe3E/D6A5Ssb+RCR4XyMFG5vX/z0D/7gj3/2s5f3L3jKx+Px88//5j/8h3/33XffPTw8pJS6LookbYBbSnl8fFQci4QUewAQaC51rgUX9BXKnDzT3aRO2+ymlMZxfHh4+Pbrw/l8FpHNthsHtXdSSj0Fpa1ZhFMMQHy/39/fvRCR169fC+f7uxeM8POf//wf//mf//znf/Tppz96cb/vNxEwPz6+Rcw3N3en01Sm8uNPP+m7eH93c3xzbmCfbQpqgFmzy1aoIoJzwnssWohxnoI/c/lTc/XzepdtzDrJJ5wMhkv271XEXhyuxZPPTNuP5jHKqqSuedL6hzivhPzM6+xbcp5zDxaee2AuP2xFtq7C0G5CA7tVVsAmeF0Fix/T3uj1Oj9PWyC16i8yp9jGjP1Z9vY1WBGuNUgXyhs+QcD9n9gUQotDqSt1NSTIRdvC3CO6ACNc28Tnj4DMmQ4RFr4gz3ot18Y3ohdEBAiFRRjrEq6cDlLmbAsKwihACBoTsQ3bl7f7j17di8h3xwdoBlpEFKniHwgioLV5REQrk8MrLmNLMIHS4EnzeC2/109tvYjQRYdkQ2VEynSphJRLORwO33zzzccfbH/5qy9Ow/n2dr/dbm5ud/e3N/ubLRHFSJD6lPoUIzBDKRDCHb8AAcgZKOTjMY/T6XAkgJ/+0R9rXOLhdGJmLfJehIepaOzG4+OjIpLV5gmtd4K6eqowPZS3b9+eT2O33d3f3wNACOHdw3txBnURkWaDu0q47LwvUMJTP3+uPQIvsMses42gGiCQLZzYv/0pdNajZHuaUtput8ysZlMQNIUhpaQja2ChGo6w1pHCTRc1eSmlFCgaYVH1Ulq3JzvaMUaW2rFQRd5SUN+rs7IUxFBLj0DOnDOXIsygOhgRaQEnfRYA1K8oIlVcDResDq1JRvWZOo0IEXNmAIgINXmQKEQMSDFGJCIzrSIJah23IFJyztNU1POmlyVJapygJRbON1qL6xSDPABp9pe1+VWFUKVWP1v7YAiAzQjl6B6IiG/AQBgEBGAy9AvNfVp1QowX52eVz0n9k6oQhnBpYm7I0+A5u3SElC6hy375Xp6foahjK/4ndljIuf6aKfzC/mSeVGWE1y5m1pZ6QIiFNEUIgLbbLQkQllL4q29eD+MRpHz66Ufbbtjv931MUDhDFplEMOfy3XevMfS/+eLrkqfbu30WePXqBTNkrMpb7JJWi/Vl8BTCbWmEiPv9znGu5r8tBRGHYRjOVUWUVh7l1f2NphdWFREJCQkwpWDGR5ZcShmGIYYgWJgZkGsJU0QUBrS+1oSIMQXEKFmd0sWQTUQQmUh1QpBWElZE2NLKSJENUAKBCGkye0VRZrbAE1UlozBSCIjAyuUwIKAAE6EZBhQ6ACRSU6G84cc4upFCj3ALJmS4YjhhD6+Rb8GoFiz8gqMrlrb4c06gL/cXP/TP23IWs/JMxb/dk/714HJNaH7qLQbedjxmzhARsVK8dhNr7PXsdf4V1LwWT/EwcIcT3Faiu/zM0SLWkBYwdMg6k+F0wBTidru9ubkJIbx9+/a7b7598933r1+/noZxu9sAQC7T4fiYc9bKTsfjMYag3TxjJBGZ8jBNU9/3JCxcPEZJs6vt+k3XbSpJDSHG+M0335Qpn04nEdntNn3fn06nh/eHV9v78/k8TWeCsu1v9jfb7XabUnj78P50Ou226eamQ8QXL37y6aef/uEf/uH/6B//6cuXL1++fEEow3A6Dw/DeULibht5KIfHd8MgSNuf/+ynt7c3x8dDSpuhUfyGKi0hbQU0JTprhDQEg/m5QCe4LH/2BIYvWMLiHKGTXdb4/Mywi2Pyg9fVOTvMufITEQGVlB05s2mbrdGGWh9emB+ZxXvb808qZgto6DQwLK3XurmlXCy1V2kUzj2xC1oB13bEooCg8Ww/uB/k6vbZTXEE/OqOyDXPBsybLi7g6emzRy1a+wDdG50k+pwR4eqOXL1sDt7vtwD7VYJ29QQtB0cAERACUN+f2nKBWvUs/VO1QRImYRRG4CASCLbb1Ke42/Q3fegIxmHk4ZBrtE872oSMgFWhrHGEwS1ksZynpi1PiHSLH+KcuSBeejdDiyoSBGAJgVSCFxat3vHu/fu/+uu/3v9Xf/JX//EXf/Hv/u1ut7u93d/stz/60SebzebVBy80aDMgbbadeo26rhvHMzMPx1OkcDocQghSeNtv1EJHRCFFZnx4dzwej9M0QQzMvNlstrvbalyGcHNzIyJISYC0qtMwFpEcY2TB/e393Yv06Wc//eCjj4+H02efffbtd29/9+VX6mzHWiedhUWomOtXRBawkt/P4OWl3jXNQRTNyWy2fB8csTz79uvFKwCEiKZpsgYG2idNbfc5Z4SxKf+qORAApKhpmZbKi9M0QUB1Dqgaad5Cizj1R56ZkWIu7M06IlXZsFWnlLTpiI7GrW8eNCFbLxFRyywiqlOFmUOrAtBWagXeRXPJKmQAkQURtDRRDJcKKxTAes9kYOQLRWKWiadSRHP8prF2viitRYQW3C9cVM+0jLwKAaiU3NxZ6mxkFwcntQEVhhAiEaakMbCKFVrYpiI2hBhjHluRPCCASxFO1c/Vcqe/1dVpf+MaMoqXNEKo9aVFwzAdIl0cgwuz4xwtZ1KZXp6h0DxSlCywc67oGtov0Bga5fehpB4fPL+esSpGpmqTUqzLOaNoXVkqXHb7tA83jw/v/u1f/Pu//EvY0OOnn3662+0Q8f7+5asXL/t+G+L00Yefvnv3+Nvf/vb169dIab/Zfvzxx4jh4ZybR7r1jVCCgAFjzc/UmRBRy2C4TJJciqDe0fhPbIaD4/GoFZjUStL3qe97PbbROrtIVpoGAKfhUE2WLeUuaC0xyWpqiYEUGTKA5NoPgghQBLg2CxEBZgC8pJXmnEUAEYoUZkbRfoyISNg4ZF0UAAYQRgFh5tiO32Vr1cdsfTaMtVR0W5X/luZf8iMYMsk1TcPEpjVBVDsWrK5npIqrpBycYOH/XD8Gjn0aybbj4Udbz3Zx335y9S1+zv5aPGyGBPvT/0qkqEzsQdpmfsXeaUTW3uVX7aehFEQPoR8B5icW3O5L8zNIE+LtyVKKLdTEdyIKmsUO+ObNm++++WY8D1IyCvSbbpqm8TyIiPbbVWoYUGsuBxEZhqGUQgG2XSp5Ks2rWYNbIKDQhx+/snju8/n88PDw7s1bY3uaFzGdp1IKMGz7/nh6x8ybbf/q1ctXr15om5dcxki86eiDlzd/8ic///nPf/7Tn/70448/DiEIUi7T4fERIHcRt32iQEjlfD7uNv2YYdP3iNvPfvyjD1+++u3xqxDDnHpW4DOzXGjNFYPI8yi0+ApXEu1il58f31NwQ7l6SJ94+9WzAAD27FNzXk/1qUXNh9V3zXRFT7XWP/S454+wfrCcDf+wjrGYiQcOuuovFT44i9MGl0q9WAJUi2yyI0kumXBRHsBfvGrsaV6vhdomrYiLf3INT5uep1cLIknzaAVPGO23ftUyv+x1XdeJCxCqcxC4pD9VXjDz0T1FHn//yxs10LEAboyQW90CW8vVdyEwgCvlUrVBovZ4gFpYC9UgBdp6mwklAKMwSQ7CPcWX93cv7+8CopTMwyMMY+JcSrPQAVTvN2gRGcUfAQAWIazUIqxUFFmxJ9umtTHew2dhlm0yn7Qj4PlmKQVrjGKuaS0PDw+ff/75t9/+drfbnc/Hm+3u1Qcvb29vf/Sjj4dh2O+3Nzc3KVLXdbe3NyKiMZ/jcOq6rpSy7fqHd+8B4Pj4iIh3N7c559il/X4fQng8Hs/nMzOfhqM2BNOuXOrb0UOkQZIxRiu22fc9l/Ly1au7uxe73c1hmIZh6vrty5cvP/+7X6sOECOBIGIAECgMFzJwAdTiX/9hISgb9Nb8sYHugoOmATacZLhSlOG6T5JcnbbSevMSEVI9zta+JYSgMq64NjNd1yFSSolRagJhCFr0RYkPVovPBSX0K+GgnmpuCYTjyNM0mU4iLSdK5wASjAKQSxfULdP4UgBQr1cpBSVTqztqECaqI8+kRCggkGIfKFjJTc2nQsQyZaziTUFEZEEWZj6eJjVGqDaomqGVuAQARLUqK86HUiYTXwmr/UvDOqZp0tpyClVdfqRaiTSEQBQ6xJB67TOubSfI1aEBgBg720RuTXoUFAVq20YVV2KMu90uhEAhUOyACIgEUQgh6P5CKQUB9A4IgUijzTEEMdNk3UoRwFkUmLF7cZIkNvnN8wjDw+I8536cMM9LNxIUWs0V+8qsio68gE1DWsCkooPeL8TTNG12+5wZISDRNJXH4ykS0oZ/9+W3Kit+/+3bvzz/tYi8ePHh3f3LLm2//O1Xwzm/uH8RA6f+Zpom7GobpCbYUKN7UaEKcjngpgZjVVAvbJ1a7VnNtjU4xIDTND2+f4Aadx32+/12u+36qNYQIhLhQNT1Xd/33RCZmXMZp7PKt9CcqyGErkuICLWnKJaiel3bQRJARlQ7wqQlO4xZVTZRioD2oCMKEAQ17lRRGtXEAsBQ4R+NE1jFAiICEfXVqu5rNjBCEm8dcUq/UoQFGkFrGmaob2jEcw+VIwdLGc5w1A4et3TVGKN+EPcWnJsl/G+JiDn7A2C47o+Hf6MtVh+wNeoZXgyi59mkHLvPLt5swTDE1WsyyS/G2OoRzzyERBT+/5z9aZMlyZEgiKmqmbn7uyIiIzIys+4qFFBANzA9fcwpy/2wHO4XUigrsuRfJT+uCCkr7NmZnoMz6J5uNI5GF+rIM653uLuZqfKDutkzd3+RwNIFUnjp4W5upqa3qqlaIzwt+ZAATiUcoCgCkVG8nHB+t9QaFaQxtfeF4rhUngYXJZUkJhCNyVu3MhFSzD8gxu12u6ibytp91wmzIbq/vWPm1WKpTVqMMVFYhEPotQcrs3jfc+irylpLeqqEJYrAcrFYr9fWWmuq1WqFhgjw9evX2919VVX7/f5wOJyfn/d91+72WietahoR6fu+a/vLdXV9fb1YLG5ubn79q1/Ujfvoo4+ePXv2Z3/60y+++OLLL79Yr9fOOUCJMRwOD7Y+Q8TaKkMJoQ9kIhnWk4f9w85WhjA8ffr0Zz/96W9/+zuwoDqx9m0jIsCRu07xIqHH1IrTH1VVlYklGZdETmsPKk54FOEfrnxn4uSjsY82D8iPxPEyoy8xSjQlYTyZ/Fb+TSndkR9N+JTMdvJsafDYmcmw5Y9yFXHcj3HCcMp/zidZUmumnRJcxTYNCX6kzCGkBEU4umD0pn4hpMa+mnRkUl+mTG4ThgOFDpqZeF5m6TnSHzklFcaVljn1A5yselL7YeI5kiTCS1aAY40Bx1oFjN1PbdvmYY/8XyivRRHJ+2M7OJhdODZmYMxCy5vp06RKVfmKjCPPmLrgTpj85BUZ3BBHuDECAsYYq6oygN53HKO11iBE5th3i0VtCGtn2+22b/npRXV1cf786rJ2VmLw3pvKici79mH3sK8XZzFKFzyzlo6xhMgIEdS2J+Cg7JUEBCLEoyGdETtvSt6RvNFUKGolQKQogJmf0byMEqoyeHWPsqmua89RAMHY7169fv1GixZQTmp1lVEjzaIKLLLW5s4B52drRFQtGVlEBFPd4KqqtKhYvWiyN71aVIonzKyu98PhkGvZp7NhR2nIEjebzXq1aZpmvT6zrn737vbb777/7rvv4NigxcTIAlGYQCbkjFkOwiliLCFTOs7yHLhIDSWi1ORLXxcATaTM+oAgSq77QkSDVjyumYwAak5rT3m1hNXq0LZPSJj6PQz4EGMU1oKiTslQBRDjMfjv+2NniMGpSjazdzXkEFzXH0le1RL952az0ZNv1lotTKroKIX5mntR5CKZ2V0rwxmQotrqAOfBclaRZ61FkhACIdV1bchlfQlp6GIVQhAQiDk5fDjx6L3vAmoDjOCjhgd1ycofZDh2NURXtPhQ7rhg6GjYDHdS4mVms+oZ0qigViQ35IQw89uqqlhi732U4JzjollFloMxsB7rQkS19rXDBzPX9UKtX0UYvY+IfR8y89S4p06PZJikBj9zIjEiCoxabqp4jce+qVjKC92yDCjdvhCCmlDZEaB1bpkZ0vnYrMoO/WlE1FrL89TvKrboSsuCnIqZ2rEj1TcdwomHvltUjlliiIjm5t1t37Xf9rdVVS2W9Wq1Wjaruq4r17x9c/v69V3bhldvbnovhIvV+swQUuU6bhWGZIwxDge9VKy1nvUg6SimiqlAd9/3er46hKClqhR8Wo0iM4pw2EFh8Gu5I2tt3bimadbr9XK5rGuXYx6rxVIftv2g9uNQ9B7rulJrwtnhJIgxxtnGGAMgRKDpAOoiqLDyHDkF5yNH5b0kYG3lLCGitkee6FQyZBYMPPDomCm5Xim8RxxQWADhlMKU/RA8Tp+QcSoXFAoWnLqUJsv90Ks8zQgzyV3eSXs8eJiybiSFsTdXO0pSKQefqFwydo2UImQCtxI+RDT5Vn43Zd4fxYbSQ2YWWRxiOt45kUyT5Z/858k7+SaNHcnlMsulZeBkdYSZCXNEAmavTGELSe3o+94ZE3rvvXeGAGCxWFhrmYPCm4iMtfWi2d13BqWqqlWzAKz7vvXeu8qKyNlis1qtVqu1Gn677aFt27u7B1UdRKJzvbFYW/dwe6v5NqvVyliUGBDxycXZZrMhfhDwu32/Oau/+vGf/+QnP/nyyy+vrq5W6yURNU11e3tb167v+9VqVVn20VskMCAchZklAjKStO1BV3F2cWVoudvFZ8+eoUZChh7EQ8ie42CrTHCvhNgEtUqamrwyud6DFcUcjtdJHCivGMPkjoxV8PK/k+lN1Mo/ZML5dmlqlvOdfKKk+vK/8y9OnpSxcnx8F45l4rGwFrITpKRKLAxsmSXG5BEmdARFjG6yF/lzeqe0b2FsaFERoCu3tes6KoIJ5VCYRH42ITJjKV11yjzLT09ge3Lf82InGznxOwxTguMpyjy9gdOOA19/4IVTPD8ybRwRzjETJG9BBuYcRRER1FxBBEgFlWHQZUPXM0pVVaZyEj2HaETON3UMfbcNroGrdXP2rPn4xfPzszWB1IZCCN0BQgjehxq5Iej6FhGtJQDLglEkaBQTQRAQMWp5Q40dypQh/F4onaSFky/mkct/HvGWCMd+/UyRIBgFRBSGEpmJwv3dQaGt/mwtCgkAvu2INONJRNR5fTSfch6gZlCJSNMMhRazHM+e60wseRMREU2w1mqpQOOqumpijLvdwUc5HLoYGYeDZilKJiM/0RiLTjOxyf0SzpMnyzELDiAwLY07FJcuyW2ya3kfEVEFWdM0xhhA4dT0T08P5icVwzWoxcwhxBij0JHJgCCnM1HqqNKKl5ldAABH8qHLNT80yqqDZ+NQZfrg4gFAFGspn2hSe8a5bBAqSgzJ/1hASVmIpi1nb4WxqPYpDccjAUmQRCBquf6sBA8UnTq/6+GungeDMIVDjzmf6UcEADKDRe3bDgAyN0QR1FUov+WpuHGV4p428Gg0sVNE9l2bDDEkJDXVnLFxloECAMJQJlIiorVV0yw0qzArsRnUAFTq20ctHwxLTFsPiMdGREiD7yCzRy1jPMHS/CP1rhxM35LQ8pP5AWY2yeWXF6Uype/7TE2Zw8ShyM2QzzKi5XGuGSICsHpVAMB7HzwjMKGNUW7v7hfoRehw6L/53Str7ZPzi7pe9D6en10hUt/xoY1v5K7vwTULQ87ZmpCC59B55n2iNYwxlhHCUpSrbOq6TsvXq0GoVS20MX0uMwMACzspCjDo7XwbNLO6aZqmqZbL5Wq1quv66vqJRg6rqgqxjz4wByTZ7R/UQUAGjLMSemRBlLqpiIjjkGNKhJpcQwYMHq1xDRs656xr8qbjUJFYmLmqnaaohcEMGaq9DJG0vCX5d+YII0SRaUPMbGiZ8fmW8veAkTOPO5y6Js/PcXHyu3TOlcJ+XkQhuStG9AxjMZAVl/mUyucn2nP51twHX2pCI2Cm50vkg6SQlSQ6AZeMr7xf8/mUw5avJ6Q5AYTJPyUJP6JRJJ1SsjuN+vweAyyTH8VoxCH0fahMjDGGrhdjiKip6pTFxxrdUg+TQUuE3vsYQ90454xzzWq5UCe0iBwO+7u7u/1+H/vonNtu97H3elTdc7dsNvVqtd1uXWWAxRBXtrJNrUvousOz68UHz1989dVPvvrqq48++mixWKgsYQ7MQSKeb1ZVZbsORTxzIApIBpGMMQxRRLPMxdqKGcigRWIREf7ii8/Ozlf3MDpujjioVY/hf76y7XGSXsp9mbwohb4yR5ucqlcibf5ESXST8eWUGpQvPFopAAXS5ufnrz82lF758P1JkoQZip5E4wnllo+VJlMeB4c8LphwMyl0wcyRColVLl8mE57QrxnbeJCsxzmI8jgT4ZSnMVkgFqUXyr+Wc5MioIQ4ssqywp0VrBIZSrhNBoSiwex8p7IqU26WCGdn8JS7wnQH4RE8fw8+5Gpsk03Xdc755+Q6OezkJgmgQYMGY4gxoERHVNWV7O8bB5sVPL168vzq4vxsfXl2ZkB8e3AVgKWDUNfiwUcKHQWwziMYZokCQYQFBQ2SFa1co6cDQAYlWoxmZMyX9hg1lTzksXWVfxobhEdjyRRFKcbfJR7pTwQMBIxoASCymjpq6jMAONOU+JknIyIMEKNAjJBCGQDwsL0vcTVPO7d3zzMZ/oo9Dok8EtTXDkaNwxgjAg3Hk0CIBC2CP73LmT9kaJQYfhLaE3ZXcoYJGyy3Lz0p5agy1pcAjuTPzFVVrdfrxWJhDAmwiMZMVA2wMcYQ2BijHdU0iTTDkJzJbSfUmauRn8xq9MexbzgbPfKk1qBKRkwhIEk6verERISphEnWEvW3TY3mMSmNeUoFlsYMAQ2UGTs8o1YlFKxAkkWXrdlso2pcS8vGtD1ngzDGXJd/2E0eqnQg4BD10qqtpmi+p/PUh0GGc3HZQ1GpQWicHhkzxgzOCEAZzndJSv9WTj5FG/1jhpgxRgRt6j2YQ2cZGUJg5mF3siUsqfzT2FcyFPwUiIjDscbMGxM1jfD8OKtT8Q9InCTPJ5vWYIYIJxbVj7KWmE1WKZTzbOiWdCEStVEoADBSBDYizEHTCbq+931bOXIVqvNiu+sDkwGMAYXl/uHAd4fd9vC6vr26/uDi/CrG9rtvv39pbz/44KMnT6627YOi6749dF2XD83u93tyVvPRyn3P5xj7vleDUAlBJ6+FfHP02BgTOCobSiikvhhvDPbed33/sN1aR9ol1Vq73d9vNpuLi4vlcuksmaqytrGO+r5fLpeu0kA9iRgAsKkGqQAnd5umgEZEq2VFlRcSaQhRtNlMjIEEjEVnrR5MiDECsrByHlRHDGSDMENhwKdxJlimW+ShseYchxR3MaWeTiRQiVLvvzJWZb4gyeCcMNNyZBmrL+Wn57OdT6Nk0BMPWR7kMZqZjFOSzUTgzaX4Y8DB1L8xj3A0ehFEIO9angbNonzvgfbJ5ZR/krElgEWIMiuOGbBptFHJx/ILY54ikixea6tAPQG6qslCIqcfD3BjWK1WzNz3XdNUl0/O67oigru7u5ub7X6/b1xzeXm1WW5ev3797t07ZxpbkzFoLBqDRAgSm9oYazOEqsp98PzFj370o08++eRP/uTzqqq0Ak27P2y390S2rp3KyO12e3Z2ttvtrKWhnhuxgLAUwgw4BqnrRdcdfODtdhu4PbTxgw+vr67O7954GDFTztpLdhOXCF9eel/5eOkjnGx0iQZzhMmDT+5MeHHe30KciIhQcaqkvJ83CKb49qjxNqfK91x8uu3B76dBGPsjJordbLZzfnI8SFlCe24i6gPWuZLz5KnmIHDJBPKKJvExSAYwjKNVGdqx6Bmj+DBhdDJWWCeLhRlHKt+aQOMU2KGEZ36mxIfHEHjyCRwrzZMLx76234skk0lOliljlouogb7pdRItR5SlTAtFlXZAJAGR6JwjjqHviYO15BBiv28ifPR88eHz58+fPV1WTthX2IeuBQ611MaZCqvekEXe1bZxfU0QhYGjCJCAaFE3jgJILIhwQoWc4fOELZTX3PEBs70YQy+Wz0yen3+IQSv781AtAkkEmRkEEAWAEIXAALAAAXAfBHHoalgICFBlS4OiNMhhAgDbDCmsg0qRSoaQHdqjS/qviMZAtNAIucoY5qH0IjlhESQk0nS+vN7H6AVm6Cd/sDX42FU8wLnBQwn890yAU4aC2jxaNNsgIBkRiTQwhOQHV5Gt5hMkPxHHGDW7cZAmcIzSJOtiUNaPXiFwevAsZ+cqSCGxaM3CzWmWiIKgTd6MtccSFWXGhIgwq2uSYDhiN1g4Wa9gZlcZY2z+Smm4yrhjQTZfY3ElJpACAwQQQVOQdA5JlzGKcMwcQp8jHDi8M/YhygA9zbAFAGOiMcaaylZu8Ggj6npJSOjIiGRobVfgm0xxDxGtrQCCWv7e+6ZpFIYJaY/dI468TihlrlKMpYAb+UowaV/ppgCAtvWao1zmw1klGCT+zCOZkT+H/ko0HtIgH5W2x7j3cUBkEWBEEmEJfQTLpm3bphJnDDgXuD/c7x/ubrfb/aZa9l1AxKpqKmuFUZiXy1UU431EY9dn5w/bbrvv37273e7a5XmjlltVNcwwVJrlIfTXd9MDZZq3nHAMMpXFGJ1zzrqMDBrlszikkSczfojAV5VVx4RaYsy83+8BoG13i8Xi4vz87OxsvV6u1+vVamFs1TS1rasy31DPUkHUHG/dGgWaujijoqvI0OEsaQsRmUkAkFFM3lDvvR5qMUSIlMPNtmRkE36X/5kpOSumpTqYdZEMxMmWlwh0Um8or/LYGxQqXb5TzmeOyplISo94flh3KCPihAWXlJPJgIo6K+VK33OVGkn+1sS1X3IZGhcFVvY3lEQbK3kiAnCCtBJApvdlZoXmic099+VsJ9oDFqllkE5nPbYRxYCjOShUJTIA6eGLuq7ZBwJsmmrRNABaPTkiekqtipDt+fmTs7O1s4goIfZ3d7cPD3cAcL7ZXJydgZD3frc7hBDOz57s93trDUvYrDbPXzyLsdvtH87qsxD8YllfX11+/vnnX3311WeffXZ5eVlV1bu33zBz37fGOFs5dTsZY6xlAIhImvcuydXaQQeROQIROVMhSYwxBC8Q+z4gUR97dSdtzprLq7NfvnqtcItxEKWEdjjjekzYyBs0TRPCQv5NMKdACXjsd0ZmGJMSFMhcbnf5Y8DGYk/fs9cwRsJyniUtlJ8uH5tf5dng4nX5Q2zC8pX3U+sE2qAKbNJIylWMJFZxHT0XiXjnPBBm+4WFa7ycZzYUJzQIM/rNUjbfz5rcHBpSFNGZfDoz9jzt7MWfT6bkruX4J5GznPCEx4JMK5NlgE125Pcy2/k1Z+yTv2YgnET+DJPj2iFbGwQgKFEArDF9eyCOjaWqromjxB4i/+zLzUcffvD08mK9XETf9Ye+MQ4WhsQYY4RMNBgr5yz1h3Xo2u/vOIIEAESwZAOYntnHCGQBCRBQSCQikKraEyR8DN/mSy6h+v4HTj7MRx/xEKgZNCdihgiAoFFBQmAUgBgjod7ECKxGBQBYskgEiFzKRwA9WT2kWwBIik763sOIfIaJUTG9ch+NtZ0PIpFIpTYSGetcjJH4WEpEtZgoQWu4ngTge/AHHjcmM1mluxodnQ/Fkltvn3qx3OIEeSQiPXCVrKzhiKaq9dneQDTOOSYQEWP0bOEQKDNgj8ifDhxmFMr/lOSdOew7rZKfFWUqetuo1qv5olJ4CfOYOa578hwyAHD0GZfKQIIhcMZaMlEABQgQBSQymGNf5cyoy9ig6u75uJru9RCfxKjaucYJNY1IRJDUDRdjNLa2ebE6dx1fCxcJH+tg5V1DjRmmCg4IQ4BxOP3IjKQBdsPMqr4DDIHBCdcFIMRjgYZkyWc5SIhQeuRlsAahBPg8NgDDuZ7scWMiGOyHmbzI6FeiekZOmzLaqDg9Ya0darACQHKeKtxMOiSfRYzOUAqpB4VUQoiICIYRQSAyI1BgAGOOlljXd8Lx+vr6iy8+e/v9t7vd7rDbt23fU0BEa52zFQTufUAw52cXu0O/b988bLfm0IK7WCwWy+VysbKYMp811t0F33ch5+IqCi0Wiwxt7XSi2zEcsjVVxnbtpNK4Y9cNItKs3exrGJAWWa39IUeu625vb/u+77qV913fr6q99b7XbNuhpkb0zAjJPkJkQ0NB1JzCTXSsiAMARAYRa+vQVaL1bCMzBxGklH3NIAykCf0iwgjWpOqrWJ4BYyGkoaOAgAgIILNwFFNNU0cU53IZVig4KRFxoUlAIcAeE0IlIsK4SkRGyvyVCQPNSiSm5AoprqToTNMvJzy3vEZENdNuJzit18R8TbDFcrbl58rv5kyt8iotNyJCBCySteCU3Hr/HX23rLIIj4ufDJ/SKs48PUdTmVnG2slcMc3wEREJMcZYWVdVFbCoAZzPnWtpGaWxzz/6wdu3b9++fhNit9s9IMnz509/+MMfWoKbm5s3b97GIHXdhBC7rgueP3jxjJn3++3Z+ebTTz8U8Hd3t+tN85Mf/+izzz7VIjFd122323c3LxFxtahFhBliUAgg++j7oNA25LYPu6qyCKauqes6rEOUKCACjtGbVKWOmdVTE6IPQZBwuay+/OGnf/V3b/UkfQjHc4PZy5BRKKHHCZx8bI/m98u/ljdL9JvYhOWUSlLKCBZTY/HyT6UaMSFYken0ShdSXux8FZMrf1RG9RKlfOk9PKT874SQT5Lt8bt0GnpDQlQx89LFM+czc0jqOJzONmfHfGlNzWGSyT9fMu70M1lyplkYG1RJN0rVw9M+TlxRWZ/I/yyXwEUKQAmcjF0ThgxjpC0WeFxvyplP+sTv8xievEpmmz/3GEjLyU/emv9TEAYpC8eGgaCQlGhQnCXxfe/D5fnyww8//Omn9Xq5sAYq6QJ3BH5pDZGprQss3sdOQgxsIa6XTXyyeW6g7fzDfr9voefAGIyxZF1kQAEEYBbE4beIaMPV99POZI2TfZnwh5LbzCGTrxi1/5WacsAgLCwiFoEIGASQGCIIikRmcc4hiXaBQ9ESXBEAIh/dB4NHTI7yZS6njDUn97HEljLyy6z2pAgQIgGwAAa1lwSZBTASkSY+iAyprfNVz8Hye/jGKd3g+DoO2Ybj1zlLeygwdjKCiAAMdJ1rhKhLEYrU+rTFKpSHUACkXMTMc/b7fUYJbcqXP+29F8mpbpJkMYYIMRUXMcZo2b/sIBYR7cuHQ1oEEwGzYSY9xkZEAKjqbCk7RFCEOB7lUclgjRkyM0sGC8kZfdSth+ORISvxvZaQSQZhjFljJDJoZICGFrZh5hA9AFSVNaYREYs227fZltb8VY0QlqxbRIiGDFVmTttsEVGLMpohCGOMMWrOaEkhVjM3pU0mw8/kRemJmKqqgBAQhYWVFSkqAmjqZ1ZmBQBwIKjMoOSIXqbMMcGirN38iJNeWRctxdnEkwhJ4pR6PhTWXX5Ambz+UxGsdDFgSi1mZkNGSBAJMKaztcTIIuJDF2VARWvg/Pz8ww8//Pj5NQB0XXd3d7d72O73+/v7h7vbO1c3cbd9/faVGLtcLs8uzu/vtl0Ir169Wi6XZ2dnq816uVyqvTdkCwfOUIoxtm2bD1KmSZrs1xii32gluWKHyrfJ8zXgc65GndLvERFgSMAOIZBw5MDM6rPw3j88POBORDhGXzVN7fSjenYUbDphq/E9jjoxQDRgCABiyPqGiDAiI5EhK5EjeOWyivAxSgw+clQ3PhENjU0yUzi6FooGBqUSwMwWp3WaJSmaWdso38JCW9JvlQ6M+ZU1laxAUCqeWz5W8taMmlJYUzQ+z5MZSsmpR4I/zRBmymh+t3wSCtZfMvHJrMq3Ti42VzMr1xVS29N8FR8aMh7y83PhXYL9f69+VQ44Wc4cGiJSZghO1A7NC4ICUMyMYAgpRun73hlrbSUxGLRtvzfGNK6y1lYLPWciVVX97d/+7Xa7vbp6cna+6bqDD63aV998/VulWEMuhNB1PRFdXJy9e/dmuVw+e3791Vc/+Cd/8tPr68vlqjrbLM/PNyyh79uXr24AoK7rs83KWnv/0BtjLaJWkSKyiNpXDLwPf/8P/9AddjHGn/70j2NEZkD0pDxBBED0QIX6AitbaWGzGIVMZY384EefVtVfSypfRkTqeJEExATJBE9mwhNnzDIplVtzco/mmzj5UXLq8WaNqhGOtng8DoypD8aEkB0uE8wscTiPgFMN6XjRODyV+MY0U+Xku5MpTSCQ3y2puJx/yUbyperFxD9Vwm0CpTkAS9DN2RHNwnFUONcnk4RHBHl+uNzckmZlbEtMOBs8glf5NxYmIpzCt8ng+c58pzKQ8zKHV2boMOfV86Emz2eNZzYfIXM0cUuN5PGhcP5lQpQYalc5FIiRgJ88Wf/g808///STS/PKGJDYVVhZIz1FYm+FAJiQMAbuWx9EGGpnL87Of3hxfuja27vdm9v7dw/7g2cRRELPgDi03QTW+NmJMNMEhx+7cGYTntz3k9s0gaQqncfnkZE0XieIIhAFEGjI+xqqjAMLRC1poZX9Bl6UsyQQQTROOHEcQOj9Y/sCYwzUH70XY6y1BnGQkzGy+E7lLGI2HtP8ZfR6ufASaCfh/H6QTgGLGmGeEN0JU/CxT2gNWCLSWESMMYDoGUItcIaIqUwoERGCGj+DZaUGxq7dh2Qyca4QDoBD3eMhdKD+r6qqVsvzCqwmxamRowwMkgYsImorOudCCESQI3UlcHKLwrx9iBhjrOiYFIdFXAULdzMU/sQYPQAwQ85czQmizOyL8KDaVjF6Im35rRn+BmGwqI0xLFE7aOiBLpHoqC7YwvEAxTABOUpPvYhQUq8IZTssPtuQBoxB1KQgkVLiHHFAl6wFgSCF10y6AqeqpEIwoPQoPUSvx/zv+WJmlZ75tFhe5skr2/wpjjJsHKdIYOKNxxNnpTTM/seyMnZ2Iqj5lwunZ6sYAAJ6BNSCN+kMZBTBh+19UzWLxcI5CxJ229v/9nd//4tf/OLZk41e5+fnz58/r6qKQzy03atXb+4fDu3B73a75y8+Wm/Of/v1d99//z0RqvXY9l3f92dnZ9oKAoSqSjLkldzUzi/We7SNtaN15Ahj8SGFTS6iNXKQiHJrHERUP/BwrjKGyCEdoSKjhW1AiGi5XK7Xa4EIMQwJqCikWWZEaAgAQipJyiwwnO7xeoyxFPoSGVHyYVRJtphOBwexjgJyrK2MpYY0PqKWd5qKaNXkk5RKhJWjZVzJRFW4sk5fODYg8xwmP/JV3skYPJln/m+5qJIvlzua9zK/UjJNHNt+pbQoRyg/+n4uT6lcZ35eGZx5pC8ZM+dzFDCr2TBZzuTmBFbz++UPSe63jAA8LtUz/DXFxAFHIpOIBt9VAS5m1o6CMpyNZkMkQoj4s5/9DAC6/WG7fdjv913XxeidczUtLi4uPvvss2bh7u5uDu324eGhb/c6E+ecNZV+c7FYXD99+vnnf3r97Okf/dGPP/n0g7p2VW2cQ4597/fGoKvIuiUAkMB+v+u6rqrPAYARtK9sjPHm3d3Nu3fWVFVt//2//w/Pn169fv36n//zf77bbZ1znj2CITQCHEJAGfLFe9/Wi8paq2ULkNCHbr1eVlUVknfDGMMM2jVEkljI8FZGUPpiJky/vPkYRZTC9eRfy3/K2F6CMV0M79LIuqOihtNkKEgCYz7bjGZY6FglA5mPZ2Z1VqTwbkzmDEUNnnIhcKp66kn2ld8KIWZeVzpHS5CW0zZj74mMY3QT4Jz8a2ZZUtQJKPktFFwiv5s760x0rMkX8z9Lvjrne5PZyrjS2HgLcD6CJL/vBBTzT0jBV8tdKz5K81fmkyyhV85wjnjz5yEFZpOPn7KVm0eb4LCgVrs5cvUQQl1XED1LeH799Mc//PLZ5XkIwVVoSESoJgoIPgYkJOf6rre2soTWWhM9GnRuGReC1RME8+w6XN7e/+7719+/vXk4hND1ZGpCZECAQYNDMCJxfqQwz/zkeidEUULsPdf8Aa11UWS5D1cEr3iKSWcSFETDIjh0XBDF8UJfAgBgEDURVUUY0l4AhLk0EcgaGR4sE7Ew08Lw36SHWVPlRgUxRiK01gLYyN4YrfIRZUaAJ5FkAtX3AK1kKZMXx+xuuFNg2sh99tgm6lvaMwlTURNm7mOwzoQQ+k6bEJK16tUCY4wahBqS0gQ3Inr27JkWze+6Tg3CkhAQTe7To/Ppuk4r5htjtA9kbkyv9fShaGkTYwSIACgSY/Q4VBPNQcKcV0JEqGfhjDn2cMsWIOJIDePUZkwJF4aeN5J/q3EYijYkuZNHVVW5l4Yyde3/rnVoLOlBR9CjejF6ZFSrG3Ggd0hvIiJCed5SRMSY2Pe9nj1TH0Tfe++9Vk13zjIzGPXTpbM2rFnVx1Wno2KUuTqmsj1Ao4O+InpEJVRVhWg0vxTGnZyoKASQEDS5wwbqGan6E/SefE4vhXCM0VibcSNvjYhwjNneiEXNmJPsNJNwgRJYyAUGEEBO9UWdiDRN44xj5hiCiFRV1ffd/f29+MM//Pa3HGNdu+VyuV6fXV1dXV0+/dGPfrTePOna8Prt3b5tD4e+aarPPvvs5u6d975t27bv1EmxWq2stU29hCQXyhZfpTTMcgOOonmkHkjq93CkfRgoveu6zL70WK92L9zd3wmM4lhVVRmLALJare7v7w/trus7EamMJUPIAcAQEdnhCCsIGWP6vse0NSnZDWKMTe2YWeJQPImLAj/qhrBoiAyL6Aliy5EQCCB150QmIiSw1inlo2WW2AWPiFihNRWrUSJ6pGFAoBACxxhJ12YM5RowrEfIB8UFAAEIMRQRP8UFRYjehxIdM6xz5kNmEzK2MLP2pn8qU1hLWRKH0gCqLaaKfCjCEoVFgJBU9VIMiFG/OGyxzkUth2T9JpoDYAZjaoWbIlKeg8JgwvsQMbKfFLrNvBVxqFOktxFBBBCMRpgmVIQaPBzoEABA0/JFjjpQCYpQ9Csrod22fbKoKe2MLnJqYytAAguACAKiEQAWVHtVe/choiFIbFoEKIYdggFrQ+j33qxXZxfXzzbL9cXzj37+859/9823BoVAjDHr1cJay/0DoX396h/7vn/35uVysbhYb5bL5sUHzz788MObm7d//4u/qyr7wx/+2aeffnp9ff35hy+qyio7FojMnkMwAI5sDFoUFLXxA0G1rlbv9tvb29vN+my3O1S2rqrmf/lf/l+b9fl333/zf/o//pu6WvzRH/3Rq9ff/+Y3v/7k4w8RGXgzsEJhAFZFhEGsq/suioAxWFkXhINvP/vkxZcfXv38b/62RmhBuhDBurbv69pxZBJWxzEQCpAIRjGObMpXIREIgZkFgDJvL/ENkhdzwnNFowyK2sPmZmqaOkr0ftk/E5KTT0Q0Ypkz1kAEBUDADCcQBIdT+Qkr3NHBUV6l9gaFpClwqbyMDOEQ1aIQQMOzI3MUsRRdyr84D6sB6uRUHSmyANk9liGW6RQyWLjIU5jPdrhT8DF1gehsaPyiJKWWDaJBgCEiQpaYOXDwbVDCp3zYRiR675J+NllCCB4Gnp4twygizlWjIr1GlcKhb2rehSyMQxLeiCjFNiGRAMQs/1IRvDBECY6sFZIjoJzeQCPZU564dwIjW6f5WgEAjEVA5C5y9BCHJLHcJGrgkzGqgpuhql+PMAhvKfIPdS9KTpv2i3ThR6ubBQQ4pEwaANJTWDDEjhZouxgCoVRWrIvC4oPz/fWigYe7FcAPPlj/6LOL680B/V3fH7DqrWmgblqOnRAsr3qyEWgfd4ZJIEYhaGpB8pG7EK7e3m6uLuqr5e6yuvlw9f3N5S9//f2vf/sG2fYHFCSoa295x3tPPdaw2TeqZQAhEAIwChACiVIDiGBEiCCMJAAWRxRXkMCIJDNiI5ryfkGwZcpT+iFisTkOjiAQEZFQCU+/ovDUo2sjBcvQMWuaMMXujrIFQMSZY0thpW/mIfSEg8pVXCJEPXMPDAhgDUAqm4EAHCSjRhaLXtgUOAvJbBUN6c2UYyZNChNDYMlY1f3UYgEwekQMlQOQAFmiKBKZAyCgYULRYiRI3HcY29qaTV07kti1vvPYAhoQg71gLzYaK+QiGO/9+uJ5d9gaMpvl4nLdYHdf11XbOxncMcPxfv3h+2gMpCxEEAFjrDHG1dZaWizqLNMHmkz9GJhZBEMIWqWTuI1dD8ZUxlSLoZtcjPFuv1vVTde14n1d2647BAnOGB+jsbZKtqJoGZtclFKGmInW6UZEoEhkAUCDxXqGX/m46qWRNcs4MIcYo7CLqR3icVO0rnfk4IOwkFBlhjzDiASYghUKghShyn0LaWikCWIsV0cLTf9qkbKJNZBPYdiHfl9bFxbc9fHQ+d5HjxiN64Eaa8VYZ4SQxXcx9A1IVe3F4aGL+0PPbKxdGXQQIQAQCQtEBkAgi2gETLC4TPL0yLQNEceIIIYGwafERUQAS2aOMSSNUWIMzBw5KvVpM718LlSTHqVQofUU/eFw0LBwTNa4JiZm/s8xSpKPXDSalnRoQoYzXKLuXf0nR9VgMEZ2to4xhtinJFIfQgAWZgES5ypTGUADQADGBzLoEMAYMBB920vY1ZVE7NzCSIAQ5Ob24fa2/f77WzRfA9D5xdWTq2syLkZplouzp09DCK30VYyu7/f7/Zubd29vb64ur589eyZocsS7PGpmqAFtLaYIIFEFw3q1ybyrFNCJIwAzC+hZiEjRJz9LJI1OJ2nY9WSMi9ELeAFAA47ictN88YNP/6f/2//47ubVt99+8/XXv/3NP/xqv9++ffuWYn04HJjZOGSGtusAaFmvgW2z2HAEY4II7Pet9365OAd5DcBAYlUEMwdPNCToRg7RoFgLRMSENZDNKxHRtvcDYzzKDzBIv7+yXOlNL8fUBkRYHEhLiHI60FF69EvZkx/ILw6CZPx8/vT8Krdt8li23LIOMRmznED54vy+1ss68o7imswrSaDjk2mEbOWecFKWCn0mvHznuJW/z3Nc1i8q4VaeahiDaPS7GJZPPsbMpZ8vr1FTMjQRHHtzoIOI7Lfbv/lvfy2RjUERBoTlYokI9zc3yIcnT57UdWVIrp9ePnv27KOPPmAJZ2dnde0+/+zTH3/15YsXz66urrREL3StftQ6sraK0bctd12Xur4OZ2pjjN9+++0333wDi+a777770Q+/+vl/+ev7++3//D//3+u6/vKHX9SVffv2zZPL88Ph8Md//MdN0whA8N40RxdDBlQ25vM/UUQdjVdXT5hZ+xQZH+OpmNURH2ZlwajI0MuPlW/RLBheoNbUM6c7Uz6ZxWop88qRk2/shIN8cif9czqT8snZZP73XbNxpsOWXx8Qm0eBsjTC6EVEPO7nyT5RqRbf/E/lQsrfORU8Q2CgbhmhSjl+iVFSKMETDjMInuEQ1LGWTMldy1cwhffzXuf5TNC43B1OVQ3LQ4bl81hqSGP+PEGk/Onyu3NX1PCtZKuXz6e4EOTWZSWvw8GSH5FVua6TGwdzc+LUFWIk6yxBxxx8i0SVoYaq7cPdh+vFly+uPrt+8uS8IsMGrTELkChgCBxqpX1wiIYEKus4+uD7wJEFmQxaWtgK165eV2dn6zPgi9Xyol6tpL5Ynv/8l7/D2rVALXsWWNSuAvR9N6z9OEESBGQoliqDRaZenPgHhQTnwMFTJw8nT0rRSaxEIZzJ68nrJ0c7+VhJBeXG5dNlf8g4k2mMphS5fHH0/IzxiggJwPA6AAADoQzZsIh49JETChpERFMhM4EY0V2C4b3gHWHj6pUz69otHDVna2fpcL89dP39vvWBnaXauZah73sklEHUChIwgAEQwKqqFDLD4TTm5PsbQkzGGGsxMwq0R3JT01HfcpXLjhJEw8xN08QY/WGnD4QQBrsr9cjWqwS1FJ76fByLU2JR5kXGHD07yoqJSOBYr0UEQvCDYX3khKPkrHypuxmgNGiPfHI42ZXOXubvWmu13k+e0kBUbsjYzAzEFDJlWELRNFIqAqA+sLG9rThE6XzfdR6AJXLofZRIyAakruv1cvXs2fMY5e7h8Pbd3XbXR90XAd0vtb7IJGxkUbM2r6s0g7P3tnTGhRgTnkBJBAXbVsfuUL3mWOt8TEFlMjAWuaAlZ55sffnfzD309JOkppRaG0ZmGSh5F4bzESSabCVp7jTkyAgicwhd1/V9D8CiZ/MYYAj5IBEBSlXVNzfv3t7cuaqp68X5k4vzJ6ZpmhcvXmy324eHBwCoqoqZu677x3/8x81mc3Z2dnl5uVgsYmQt6GKtNVRrPjCnfFctdpijiBMSABzuq3UioKV6c4ldHOqOJpEdI8ToQ+gBgwBFButEpH7x4sWnX37xKX7+p8Di969fv+4O+zdv3vz67//hm2+++eabbx4edn3nraWu8yG01jkf9sFHAEI0VY3WVsays5UwZhRiESAxYEIIxpBzKS4aGRGryh4NQijYOgCEELIDvqBe4Hi0ifMrmUGXKDsgyszLzkXINd/MCPQYK8+PTe5PnoFCJJxkVTCWeeXCS5aRFiiDS2imk02kbP5dJlUeSXCIZow+N8CkWAIWnaNPfuU913x182si3sbTG+Y2UXwfk68wA2MJTADQw3XlviT/05DzjxiH09ghbJn1hHvtXGVd8P3u4ZYIOMQvP//QVaZp0G02n3z6wQ9/+MMPPnjuvd+sV1dXV1dXT6rKRvYxRtATN0SLxWK1XhDR4bA7HA4qmfq+Vx09Rm9MU9VWRO7v73/+n/7jn//5n3/yySfb+92vfvWbb775Zrlc/uM//uPHH34kwn/xF39Bwp8uPgYYzg0LjGrDQOElygxamSAZcs598sknAIwommDDga216m7HDEYBVXWpqHNNqVq3jn+SNKDA85N//b2YkAk5c/ySfMrtm3yFH4kBwnspFMY20h8+4fxiCQplr6MPpYP35eTnrGAIAxRXQQxYPHZc6aQIE4zrXZWryzAsEQOSjafhQRgDELOtM4NY+ZVSDUpUduKEc1mbKkv3ws10vIgIxmgzgRul3J58UMcaM+aQQ2pQ1sNOblyZBwvjE+ZQ8AdrbYiZybP2OofCYTFEAcdAPrW/ML+Tpvd7mOR08mQEEYEQ2QgQiIVo2JPA0yebTz58/uxi5SAQdkiC6ESayBUaR2itsUSWBCQycfRd67uDiAAaITJc2crWV01z3qwX1sS4lLheVM31k3Wz6GL45evbtw87qCrnnO88xnDm6j6lcHNyXRAMhwsLoo5l3Yj3X+/hG3NsnMhBeGQrJ+g3GW0iquZitKQmGG/3yQEf2808sVLHKL81YXH5LZxJ/OOAiIBMAqpMiwiDoGi6FACAoBmaaRIikQ8ggiwUk32JwAYFOVYENeCS7FllnqwXm0VTO+svzm7u7szbW3g49OKZDQkYEACDMRCKIaqrRjVfOFYdRJEhBy9GYWaOyn+UFRyrenTd8ViNwi9zAzmmoB83sWma8jBVzsnMm8J8LJeaWbQ+T6n+ij6ZI29QZDaq4xgKPbPkciXPy7tQGkJ5DvlPmJSoQYDWDaVOG0TqKEFEdM6hGb47WIyAiBhpeOBotbKIiHMuYxEV7hcUC0DWR7K2FhSgQ9c+POy870Sk771ET8i1NRfnmxcvXlxeUmComl1k7P3t7uBD7IXJ1igievDRaW9nEdaQYYL8hPObcVd3velDme54NCa1QKYxRiuaHPGcj4ImA1bxKkdi1QTi1HkyQz5vIo6ryOD4eEgpKYRkgjwFBhan8UkQMTJHZs3IAzLMDChEwsx933vvq8oOkkVoaKCAGKPnyHd3L0PEzfnFctUIw5s3b24f7pt6+fEnH2oPz/1+v9vt9vt9F30I4f7+/nA4HA6Hp0+frtebpml0Yod9p1SgCoAeT9V0qpLVZPzEghUjojCJhBgVUEpWGKPPRVkRLXNELUJtQCAKRGtNjL7fP1giMoDOPXt2Dbb65PNP/+xf/sv9u3dv3ry5v9ve3d198813v/nNb77//tV+d9jv97HKdW4ohNB1D+y90p2zhmOytwGGlCge/ESCg9NkquXolXTQE2y91EpOCoySenFm5p18txwho8gE1rGoIAqJfRCN3Mn54Uwt5VYNm5RSqPM4usT8e7xQ9cQd11Iu7STo8l/zNX++/GuMjDB6TI4y9YQbhuj4QOllmVSe0B/0yHGm8p84FuREVgrRO4dJebME42R1mX9NHgshREBGEUJDrhoCKbFqnATvLDqLyFE4Xj05v76+Pj+rjTEvXrz47LPPnj17dnV5uV6vRWLTNNYSAPjuwBybRQUAu93D2XJBBpi56w7b7bbrDnqevu97bb/EzN53VdU8f3GNJP/uv/6XFy9e/MNvfvOXf/mX19fP/+7v/ttXX351dXV1tt44555eX+63t8yxqmofOkltCSSpy5I8eZhy1I5mgIgIfPbpx1VtD10c0niZjTUSsXQ0iAiKgNETekdmXbon6FTkPP+ebBPM0L5gyscXJ8gweX6yxZO9nmPmMFq6N5lP/tyEIk7O4eSUcMhC4YKpieTeu0ekHar2lBObEOPkmzK4fnRWp+FTEgUUYJ8oxPmLMV15+/Q+pRTfPOeBXgo/Wp5t+cwcLPmZCTAV/ZL7I2LSeCbzVNSKY//dZGuyGVnae5mHz3n15IH81wmXhkIdL21aIoJR/8bjrNIIDDw6WE7j0q8ltstMysiQif2oaTG/qKr74INEPVgkHIzvIYaPnp1dbhbE4fBwHw0vGkMIMQSyDbDjaJAckrFo1G5gwi703HaIyBhCRI+9cf3503MykeMe2k4OnWG4cChn1Z/90efR/fbwj+2Bo0TBGIiFgEBISFP0B2eiiqjATAAoejYPBIbThohHx2sJ+cdof062JbQfu8bEdYT8Y4DF9wrTybcmO3XyrxN2NxlzQkojUBDijOmVCylHUwxEGPLxIZ1tZAFAA6hJvAnCCAzC0apZgxBBk3sRrXBlrAO/EN5YebqorlaLxiKyr55crBvbVNbSzZv7h0O/JzRLW3UxcBRDUFuzXC2cc9qmQ8uVmdQWXPVMEYlBcsMk1ewHeo+ZXx1Du1iYBGntqSxf4UECNTm815o0+Yexo+3jVFI/H0fMXAgKwk8W16DiI6jOgJCqPgIAosQYWRiGQiPEDEN/RMiMdMjvSG3Zht0cqoNYo8AxWq8j7WNd18hHf1YWtSGlRpuj5ytKEfAAAJOEPQAAAyNYQSdi0QChjyGp/rr24KNHts658ycX3t8Y5xaL5XK5ruvDoYsxFB0UmY1FIkcDNKJwy+nclxROOhEJIcZ4TIkfQkCcifHYbg1JAEbkgIjaliCfN8sBusleS4oG60kWGTv+Jr9Lzq8fqqoqxuj90YnARcR4QtqUKxQksyr5+9SlCNYgWeLs6LQYe20iEgEACZlDjOIjVlVlGOqmqqoqeI7todv5ruu6Xx2ePn16eXl5dnamhm7fBO/94XDY7Xbb7Xa3211eXq3Xa231uV6vc+Q5x6LL6FfyoSSss3lFkjYFgFl72SsfipElsmbX9/2WmYmAtDNZ7OyB225JKKH3WFFktkLMbFBABHy7PFt9erEBABAE7x8eHna7/cuXL7/++uvvvvvu9au3r1+/PhwOIeB+7wnWIhIDhxCAgAwNWmuEGGOQICiutkQ2xtj3nT06d5GzjZ6INnFVoWMYWibq11EvmW8w4uA3y3x2oujACcZ9bL5XfoWKU7MZe/D3lVeafEVEcEYYiAgwLRaf1HomOLphyjEfE5A4vtJtzqdajpBBBlUoxwYhPiJE5+DKU80bAWNQn5znXHZO/jrfRBzrneV/SyFavHjMMzxKYhIBUf5jjNHqzMYYIjBY9e12uWgQQtfuGovPnl99/PGH10+vfvTVZ03TPHv27Prpc02PISJjXPQ+ylC6hsyQjbBY1ES03++1UaExuFgsRCRycFXuw6YnpA91XX/88cf/+l/+i9/86tf/4l/8i//u//Cvna0//+Tzq6ur+/tt5czhcPjbv/0bAj4/P7N2g4iHw2FhqznclFQyNChl6DHz8+fPnj65/M3X37JxoIX+xeT3CVAQBE5w2Iz2MHZwTHa5fLG8WXrsJpx68u7JPc0XnUpwLbG0JAdJOXglCZygwT/YFJwvcILeiLZMoBs+pweAAOBUBEmlYb4/IWeW000gHqN6ToWs8pRKA1UHySfoTo45vG6m9/VHquyHeXVHUBc6lo6f1b4J3Ob2WAnDyUf1oiLLNHOYUisqf3CREToB1+QTGVAlpvE4y6t4kfVZIorJUMRUwX+Atpm2z+FxxuljiDqHxslLyHjxAGINEsTYHxaGnm6WX3x4fVU78PuHXVtZJFnVyypGqe0C0IKQMKniaACNJbOs/d60EoPnwOIDeEEyHtoWDMXasXRMPRqs0V6KIWvdDz97Ujd//euvX233rlow0X3X1malRQgiDIFBFBBMR8x1T0UA2KgJAseCq+/n/L8HDidMI4CiFm4J55Os5jFxU+I2zLZpjs/lV+aDyyPm7mMOtVwmZ/LiYyDSPFxCBEl1cUCGZnDJIAQBZoQIOOR9DJaDIJOwFbYEKwtLV5037nJZXa2qJ42tjGBg42Bpl7U1FVHlzLuHw7YPHXeGQfqhOOZqsbS2EhEG1uIoVNTBGo4xVkYV8a7rQoh93yvEnKkAhnAiFnVcurYfA0e0r0GIwQff+957HyP7GHwMgWMUjsKBo4/BshEEQWBhLR6R6ZqKYpWcasBAYpIq/ZP5F0EIMXA6zWiMQRRmTlk5LCJ950vsymZGadRBYd6UBqEpmE9VVWacsTVEFFNRDMwOXzPCGVRhn9hXFFZM0NsCADzYPNagMSiR1Dj3vXZBFLJIaJ1zztbORUWWoJ36AAyStjSMITIzyKjHRpYj2vBdGb4eTklgUWrKobmBbTJz/pMU6n0uBT8hYd01jYtm+BhjaJxUmF/M5FOGZ0XEWh0h95mEDNs4rlCYENjqmTdEBA1fojHGRh9EQE8dD4R7nIBo5VhElKElB6xWi7bXsHZYb9arzXrftdvt/s2bN23bbrfbs7OzzWazXq/bQ//w8OCcq6qq67qu677//ntr7Xq9Xq/XF+dD6ooGmTG5gLOJqEHUjDN6Kr6AADMzRIncJ+TkGKPEwYrR/oQAgmSIIHLgYCVyXdfGGGsNs+bcivQBrQH2YAzEIEMGuN1cnW0uNy8+//hP/umPdWLv0vX27du//Zuv9/v9zc3N7e1t27YxSuh93/fL5dJYAmuEh76Z0jNgsMN+4CB6AUA9IilFKsFdz0gnDJiIh4ngH2tUOPkTjg/pTWR2KV1KIUFF2lW5AeUzeVb54ZNiac7rEU8o0PmfMLvmg5T8Yra0UbkFGpqTQOmzmcizEgKTr2SH/eTTnFKc845MJpynPR/5pGSFYh/fAwoujnEmJ9ZRig/vDl2IERG104mONvB9JINSOdvudxwPz68u/tlf/NN//a/++T/56R99+OGLh/09Ig4Vi8gYwK7z290OAKrK1nVd17Xv++39g3Nms9n07T7BUHn0gDLOuRiDIraIxCgh9ADwx3/8x0R0eXkJgGertTHuP/6nv3rx7IObm7fb7Xa/3z69fPLixfWh3S0WC2sJmPOOlr4JEcmV7lRC6+ouztafff7xr377j2QrIjRF8ArBqHNUNyZrF3kHM/ZOyK38b2bEkw0tI8bjS8qH8yATkswI9hieHMc6RVDzdycjPPbKY1exzBM9lPSs9uQrUmSTzvlAmW+jwnJgJjxyfMw5mxQbBIU8m/w3y+9S4MHMQM2gNmYQtDzOAqUxg83vlowiY6B+TovKTKZa8kwpsn0mqAVpT8ujg5AwxFqrRd7y/UzLUGBsCa4JDEuwlOPgWIkvgCkajsn7i6gVKzMc4skJTFLf8wRSfd8/9GpDQERjLHHkvnUsz6+WX37w7Hq9OHfWxNDGNviubQ0ZNZgJwREaNGrmkzFggZjJWkKBtm17j4yG7JKw9rdtD8YSIUl0QhAJxIaw6MOn9aq+fur2/r999+pl122B2BKiIKA6EYeKSQjCAkSpHA6jQERKne9+j/PlpLzI1+99sYwSnxzqpPiYPJnVXBlbZRPSK4d9vyP4/ZMf3R9XcYQCl6AQl8fHgWXQO0FPtTEMNiKAEcVqEEEZAlDeq9cSJQJHwuhIapEVmuv16vnFelObpYEVhVVV16t6b4UBHTlnnqyWzevb7Tdv797dbQWlj9EIGgDnXAL+MDtVigrFAdR5ai0jYtt2ajwAgNCxfnJmHURUaiN6lGMo0wrSdZ12eBIZVR2f2+rJijvhUCuxJX8l8ZkwcIN02ClGCSEYg8elkQyvi8v7lXgCIpLWUM1XPjdoKktlymixagBQlbbEh5KT6KX7WHJCKThq5MARAscYOUrvI7dtK5EJ0FprCQkEJJLA4XB48+bNek0A6AO0h344WkKEgCEcD34PxgYHZiZ0WUaU+5WFRYaqGicSjm5H5mSrIBtjtFBizvsDBsCIcOwLkKUGAIQQ1Io+QRePEFT+0xF0KVMs44Bq2kSkdpSuJyHMcK5VBGBIiNbkXENkQu8R1enpWbTrhp63RBA0ye2CiIgGCHf7ravq9Xp5drZpmgWSpYMNIVh72XXdq1evbm9vLy8vr66u6mpxcXEhIlpIqW3btu36vn94eHh4eHj5/dvVanV5eXlxcaGtIzRSWtc1M2tpxkwsIgIY8tqTNz+KFi2jmHYtQO4UoKWPYoAuOGcMDdpjXS1EBIUgAgKSUAjRmarvAmJERDJA1gIRRM8hkO1FIhJS455++PTpR8/VFfh/+Z/q/nB49+7dy5cv375+/fL7199+++2rV6++/fZbRAw9tG3b9YeqahbrZn2+soAMyFqcTa+TvnNE1B4spYCfsIP5HREhM20dMVE1MgKlMUfFQsuhSrUge68nth8UwiYPmynkPeJtPqthDjwVCRMgTNZewk2xNgNzoGcDZbY04uhgzJyHlnekUOzylYMDeaXvId18vzSA5wssLy4SUydjluBNf9eq0wULIFFXtn7FOIsCZUK8MQZQ2oP/pz/743/1L//8X/+LP/3jP/pq2djd9s63Dz50CIZD4AgM8eFwePv25v7+/vz8XCL3fb9eL1988HyzOWcOals5Z1Qn7n0X+4goxpgQehFR1U497CEEZrh68mSz2Xz37Uth/u6777755psY5ca9vbl5u1gsXnzw/MnZedM0+0NgDnVd+9BmfOJCMoUQ9KBHCShEXCzqL7/4/P/9v/6lcaZh6j0HZlSehwMTlDi03QGJWMAWZmK1xIoJqkz+OXcZpB8nzvzAmKeXn56QYTlmKZNOzmGCKpNv/YFXnkxGNtH618miU05gU9pSXoJ+5GQqNcBxzDTVtNjH51CuUe+UkdtJFC5zUSpSLkV5/DgZAQq6niAPFMVpYMwQJEUykz1pcGwW5uSW+fznDHw+Hyi2LG+0pP5FmXLzHE5uPY7PfJZBCUx2qaTwpj6sWuCYKbEWbR1tCWqfviHsUALtMXqBgi7mzPaxKwg7YyyxeG85PDtrPn365KPLc+PbpcOmdjUsdjuOvus6rKoqQCC0zlhC0KCRGboJomtqt2zirtt3nWexDTvAm5tDH2EZI1UC2FuSRqCK1Ah2D7fnwXx1/cQYQ69e/26/Nc5pUVVt23xENgANlBOACCAiIQjwvHPRhJrmf52ImzkkJ3/NhDABNRT0dfLr5bBzfpUHyag151cT7J08MEHpxwxIwSFEUs5qELIzqtGLBEGEEVjbhSMKmqj/nwYgEC3hWXEwhI4sERqkylBjqbawaezzJ5tnF+tKAsXWIdYGGkdAwCAdCNbkzNpaAuDK0JuH7e4QkQSBK2MBNLiDmPA5H9NNJ6mGLM2maYyx+RCgpq6VjmP9sV5tyjVK8rynzFBfes3mAC8ZY/lMJjQpijbnSIuq1JR6FYiIUnTOcYXUhQsSk7S2lqQKmlRCWUSstdn2KFen+55TJ/KV8YpSLVk1VCJMhUUuQntELRkArl/s+74Pnln6ENreazB2tVot6gZRLGHljITY+f7l96/3G+Oci0z3223fayvFkedO7Q3mwBJJQI+a5CXnOYTcj66IVpUcW4uycoqvEhGz9hsYjPYBmen4VhZM+c7EL6yfKcFYUl/mKiV153kOkohHDKHEnHTEkZiBJQ7KI5F6ZtJvDiEAs7W2qipjUMAgogHKg5NBa4gjNE29XDZI0rYtGbdYLBaLhfdRU0O7rnv37t1+vz/bXKxWq8VioTmiTdPEyCGE/X6/3W4RqG3bly9f3tzcVFXVNM16vV4ul5CSijV4qDQSY2TWOqk+B65iDBJiVVWcEAYAMNWWY4mUfDSVs1Xl6rohsnfv7hyZ84tNXTtYrAiA2hbQVJsr8J5DJyzsAbwXiAAIDGQtoED0gML+EGM0FgHOq1X9Yv3Ji08/UdbkHx7u7u5+9avfeO/fvHnz29/+9ptvvrl9d/Pw8LB7OBxrmidaBU05SBcMBTAKQT5n9PC4tFCzfsKmJ6ynREGYWYP6I9XLsnOsnXy0/OvJeWKhCKZFCc2OyiAiwFCKHWeyc/K5kw8AMKaaxUN8nATHh4iISHi6nAmxlZPPimNJk+VbmXTL6UHB4PLDc+CkOU+sEuCxkwyKwhtaJzrx5ZjdlEPkEEeKAubK4wFEgMhUVVVXTiKZ9ebf/Jv/4f/6f/4fKwfb+3cPt15ixxwEK4Hg+/jwsP36629+9cvf7Pf78/Mn7f4X19fX6pXv+/Ds2VPnHDPUzjBz27ZIYIyx9tjezVpLlDqr6ilaA0x2v9/f3r1rD733/vWrl1/+4IeI8tnnn15fX9fWhBA0PLjd3td1XfLuAb+Tx46I9AyPMmJdr7P0wfNra6muXAC2nY2dFxFSF3+5OzKEBx/b0/LmbL9En817VpwVOe4mnKpxWoiQIa8j7dTUNVMizJxC09dzq9ZBgRDRKgU6SRl/mXNK2x9yyVBMTzRGW4aarbUAI5tKr1wMIFsCCq5JpA5gCuFya+aUkscvqybkdyHxlkkoTET0OyVF6O9Uje1I1xMHfP4EJvEJhV9Min56JtX4luIkIRU5q5k8jTG+6HFc7mY5MiTNIIRQV1UeRxWLbAxM0CMvfDKyFOkeJR8eHHxps8ZYx3rkqcRDvZ+8sEekVbgpPCd7+j7ceuQiQmsQQhDfbWr7wZOL6826Yo/RGzHOWLamN9QHH7ogjGQPzhBWlrTIJAQV1662KzxvPb998Nvb7qHtodtbJy0sV0ArlsbF2sZ1bRrnDBqWYFBAYm3og6uNNK76/tXv3rzbEdHQqZ4Km5BEJKeei4gAA4hM0LrApcfWOyH2/OTEszC/L4WXIcN/gsD5v/l+iZDl6/mZ7H0o5zzHNJjt7+SV0gFaPjb0ZTqllmRDsURgBJ2Puo9ZtEtklnfAZjhAxupkOq9DZV1VUe1M5Uzt3KqxC2eXFZ2vmnVtIARDtrKIJCH4qqmiaMF6DhxXBp5ulrWrNPNTECkdb0GyYCD0LSKwMAmTgEnlbFrfG2OsrVxTu7oJIWgdjt4PhWqzKaWaydu3b+eeGgBQNRcRrbWqy+m5skl1Ga3uSETKaspDVlg0jcRxEpMOklWvgeEwZuNWRE9ChjyaGjCQalBRkaGg42sGkJ6oBADx0zSNUq6VPCrbyXl6et+RMcZ0XXdELRnCcTFGBjkcDn3f+8hd17VtzyDOuWdPr+vaAXMgJGhC7Lv97vb+7uGhr+qFc5UPHEEMOUCtS3SM9TEH7QonSTnMCJz5pKSjffpWTsRFO5BhjEZEAAdvmkg+Anf068VU7T9vTQZO7jCZvXjDV8bx4Sl1FDZhovfh9SEzC472Z0mqAINzXPk8gCpWAmg4JZoOOMPRIFRVBdKQ4RAZEUkgHfQDAAIia52A9rQ4M86hMWQJgIxxxpjlchlC2O129/f393e/Q8SnT59eXV2t12sRYZaqquq6Pjs7aw+9Xvv99uEhWmsfHpbL5TIfQXTOVpWNsVJK6fs+hOBDWfFoOE+UTURKedoiUleLpmmcM01Tr5ZNVVPTuLP18t/+5b9vFnVdu+Wi/uijj549e9o0zfn5uakrQEf1AgyC70K7ZxFjqD10VW1jDADsFrVKe6oqCRFiqzF/Y4yra7euny6unn78AQACRL/d3t3dvX756re//e233347uJ+Tj8FnAsuTBjCIeXclciwdvZJcMkq0Jc1n+3hCY/mxDBQp4gyRRzpEgS4jzMvjTHwY5VATGZDRd4KORKS1RvJjhegSPW7EqQg7FC4rKopoHR1Ow4pULT7qPcYMOUUThitJamM6EZQZKxaFBws5NSrgDmMRy0WqWEmupX9UX8wCey6AS2if/Eq5KQkNok7MpCuEENnHoN1yLabADqIJIWqcUDvkWtqEvjMgL1++vL+/r6wAdwa8tdB3fUTrnHt4uPurv/qPtzf3XeefP//g6vJ60TR3d3f397cxxrvbv/3jn/7k+vrqxYsP7+++wRR1BICBazNrzJCZs/KkF3Ow1m5W6/325Xq1+O//+//OuRoR69r1fR97sY4Q8XDYaQdbQy7vHaXubVycjkBES0ZSddAYuo8//vDqycWrm4fl6uJQeRH03lsyCEOTX9S8CWME0dKxGFImNM1PgEJK5YsTvUihkMHM8Mv7mIsQTAgh73uOvJWvI2K2chU/lb9QmfsxRtSSDzCzGmYwvhAxxiP9zqVLYkHHJagCgWgBMPfS1PyomLqv5u9mUp2Mk79W3EmhtjHcJl8vhVk2RcpBMnwyTDiVBBiIxaDWYyinhIjWmswWSoor+RuNWjWwuqLLccyAdREAnLMxRu+H8tkimqaeOY/EGELwxh4N5nJ12SuvQC7t6vzFsjZAycazEOFxca88eP6dmWHGvcieCEQGpTDLiBh7SK1QS5alsivTI6erxDcphILI0RsIY6kxqKosowdiIGNJYk10fba+Wjc1MPR+VdvGEgKH2A9dm0UkhN3h1pIslpUlYogxRgEkwkOIxrrm7MLd9+HN9vaw3XnPsv27W3N5tv7o6smL8+WTylWd9LWYSvb7A5DxJMY1i142Er9Ybc4C/I3n24d7qhwOdiGGECw5ZsGheRIhMqIhTJ1oC7ydM/CT/5xffCoIXAI2X/qtifAtJ5DfzQiQw0FzVJk7sCZTnXCVidiaL6r8uj7EYxVWZyhj70mejLr2WYIMrl49/sFEYAwaJIkBmEnYECHCmfHn58uzs1VTVbVzjTZQ4r4ichBiiI0lQosITFhVTjjUtjKIMcY2eohhYVAW9pMXV+b17duHQ22oqR0RGWv7oEch9PKD2jCcuCNrrVYuIDTKJJ1zVaj6vg8+EgkRIRCow5RD76PqOXomDWCUiBFjZB7VylLBtFqtFos6xF7pV+v1Z7LNPDxvB49rikzwSkQAxVgUJpVTWhCu6zwzW0vq6NR3c9FLAMhnKYnocDhQih/ms9mYTD41I5VFMHPXdUSkFsJ2u91sNkrOzjk1inzsdCHOOf0QCmhAsm3bwHo+E7RG5Xa77/v+4uJC3QJ1XTdNLZFZ3LJuYowgLZFBNGQELXMEIAKyXef7vu/7NoSgJyA0TmVc1lsG9q5sSvWZ8hghAHRd17gq80Nj0YBLYI/JepRyB7MGrvA0xlRVpavL1iAUfpm8oRMOkF0AmQnj0KThWKdaYRiLEkfMrCeijTEA7L1PVayzqjNUQjLGcGRh7ww655pq5Wtqu62rKIQAkYmMcxA8xyhROHUOZOZgTRVCkCjWViJora3rGgAWi0XTNPd328PhcHNzs91ul8vl+fn5ZnOm2aEA4Gyvy/Tep4TSdrvd3tzcKKyWy+VyudQAowKh6w8hLJi57/vD4UCEzarquk4Td/UoKwGqzYmITdMYiyi8WCwuLtZNZUPwMeLr798ai33f/eOvf3d1dWWtPTs7W5+fX1xcXFycr9frxaK2ywtAAQ7GNCCBGXvftvstopABbMPibMMxxBiMQWcIhIEjxAjRAyIgukX9dP3B048+/qM//xPw3opERMmpUnnXeepBH3gozbg8FoaQ4k1OXyyFfXkNdDWTDTAECE9IptIVVEr6+ZMlc5+LhFLByhPIeJ+XkIhqKCoz51x5tLz8NKxMTM7hHzgEQ2R2YVGIQi/VestA3ARKeZmTHzC74rgNxknlQE6d38h/Knd5uigAPfNaPqlg0RpKZXKCfjE7/PTsdd/5vuqBmaPXCL7bNMKMBCJire2CvHn16ne//d3D/e76+sXlk6dPnlwZ47z352dXt7fv7h9u37178+rlm4vzy9ev39ZVsu2Hz47UygLmw9YTIAGena2bpmqaxhinDVeYQ3fYdV23XDbr9dq5OkbvvdfiNCXyAHMsyIFS8juo85hwtWwuL86/efkabee9RwGjRU2AkQFFojr3RSTRhSQFN4vSDPCJJnQSN+ARtSlTd57tBJfmV/lMyRDmyJbx5NhCQQABDBIRSkwlGIoX4JEKEI9NZrau4TBZ8SeDODKY8WToQGtt549orFe/O9Y/80wyv8qy8OScMUnZMoEnv0VEgJB/l2ufsZHhgTjOJsLkxwU4+vglHceFZBRJ4R2Qou/ryWlPtlh/TwzRuWEMpwA798GVfHsywsldThwjiBZbGF3HeHJmVuX4GaTzPOEM6uym1Zs8H2e8uookHnYVyQdPLz+6PLPBdw/3Z5uFEQmhR2utc8bZdrcLnScyNh6MFdsQw8pUBlBClE6iMSb6eAhMVU2LVYf7B89k8J7N9mZ/+9Dfn28+uzyLq9q3wcihWRAQ9hCReNVUz+Jyhabuul9yVyEBAAsTgIghUAJjBIOAAMxAIlFSaaXHth4e5yr/f1x5QyX5iN//GBdBYB73E55gy8lxThqi8HgGBDwCBxGBGQvKtJNdMJmgcs4bIqpjCRJ5EgAKI0eM0ZJZVG5R1T95sT4/P99sNhI59L1IIEAgNhhBGIWYLVoUQ2AdWGsjMIfoY+hbiL4hQkfIiGfr3aE7dMFYQyAWiUVCCNblpPSBRQhHZq7rBacqkWoQKnXXdU1FPY/ssgnx2GYgWyDMnB1kNLQHHMR9LNqfZtUipzlkJSr7uEsKnVylbMoPMLMeKSSiqmq0CiIi6SdKJY1TWyBKV95ZZpbC4ciI0Rq1RjRmoiDCdNStbVtMyVyZzxsYIFxV1QA6FgBQdd/H2HWdj7FtWy1JEmNsmqqqqto5aw0ACKBBFGsNs8MKAFjQMpg4ZBgB2br23vu+r0MIwCHGGGOIMfbFmcAsCADgcDjofHKQsITGkJl1zA8YMlmMMSKjs1FqSUpRm0N96DnAk6Gqr1Tp/Gqeif639N/ppcaPth/TvDBOJVAHz8UQBIzJRhARYWQagtyMYGBmgCCic3bZVNYuRS7u7t957yUM3goENgYYiBmMOYajjTFA1lprjMsUrbRQVwv1BYQQDofDfr9/+/bder0+OztbLpfr9TrL0L7vtf9hjLHrtK1I773f7XZak8Zau1ov9N0EQ44xhq5/cnmuINVZGSTnnHOubXtryTnX1K5pKmupbdvDYee7AEDONhYtkdnvu75/uL/dknmn6OCcOTtfX19fXV8/Xa3r1WqxWNZ2tbYGABh8D10XY3/75q3C2TmHjjGiIldVVRltLGp1DgRmm3e95Ik4s6mO219gQMkoIekTPI6fqFdDRyvPnJxmzSmqNufdNEvphFPSIk+Jx5HDfJWukYmWAwWjSXOOZFx+hou0lvzWREc8qSoBDu0rsh4jIuq8RaSkPmNJ0ppOPVGeRIbjKBnaeczyTvmhFFE5Mt/5zsKYtk/+wDFlYiFEJwOqNVhCOD2gb6mfddDG1e/ojOu69ttvv7+9vT8/WzKDIRJhY11jlm/f/Oa7l69ttbi6fv7k7Im1Vdf6plkSUb1YXtVVjPGXv/zV+fn5YrGoqyQJhtwemYClXDoiarVr59xyuSSiu7u7+/t7Avbet20rInqKw1oHYEpVMsOTmYUZijN1R01CgIDX69Unn370X/7272LoOQRAC6B+6ax9DtMziJzIqkTm8ke57ycpdLJHJzGz3O5yQJhd2TGEY/WIik5QeXxOpUoyRWCyc8onS+QpdYISneYcoNw1OCLk8fhZGmRUTvDkKO8ZvARROTGepUWcJKLJ7pQwV3LgdKi4FOQAUKaSwrgaTeaZXETSiMwc+FBEyfRzGTJlpK6c84QVT4iFi/jnfF2TtcO4wioUShUU6MrpzM9kqAET0osiQ/BmPL0jseT5lyPnSc4Ng7QLR047AfhAvDKiEcvRWXh+fv7FB8+vlq67eStdGxoyqwoAkKhaNHXk3aGLvkOkGEPb7e7vKUpwda2pCiJiKwiRWVzdLF1TB5DIgBVxNIfgQ9cHf79t/fVmeb125wtzhlizRB8riIvG1Jtq4aro+41gu98GEC8MIkM0UBXAAdAEyPpfkVTicnY9xjreQxoTqny/EDlJ7DCOqL9H3LznK/P7f8gzj/1zjsqlMJ2Q87CW7LQmFCQaHIvafZ6RxQCsnLlYLzfrVWVulw5WFXKAbddH3xEZay0wi7AIAjKRQ2PREJOxAOx93/ccokUia0iI2DCEi/XKM3K1IoAQApOGVgY5qwYhF/SsJ5pijGY46DQkWJakgSluFqImFOSjUIm3mNSGnZmjRBEfo49RRHyMfQx9DLVwFBHmPgQ7xPOPBqeahVxkl+SoIyZLLJsENGSvWGWVWjvKWpeKVR6PZHPKvCgVzuQsO+5jCKMzxhCjepmIjrYlJPs2xrjb7SYssTI2h2GHPKMQvffayG7XdV3XMbNaCJA4SQbxwGfAkBEiqjWfX4CZjaMhy5MGkeqcIwEtNCKRYwxtCLpSrVzCA9qA9x2iGGNy+gkzhxAk+hn2DvIx/xMKGafR1CzX8t5lq5hTahIXB2tLwZT/K4WKkueQ9v2Y/5LF3/CAHPnDwPbFZE9Wpk1EPTArzBICe49EaAy9eP5BjFHTXfq+b9vW95GRfC/G1pQzY8lhIZs4nZiw1jb1UkScc2oQHg4Htfp2u11d14tmtVgsNpuNHkHUwjMajvZeg7q9wkrtvTdvdiGEs7NNyiatyABEPRbbS0r01a6XRLTZbBDRWbLWhtBv7+4P7Y45WONEBCL4PgaI0XOMMVo+HFqFXlXZh4f9N797CcBI8erqcrGs1+tmc7Zar1dnZ+uLiwu7vrw4XwOHsNsdDofDriWi2hlrrQQ2xhBZlqiYpiR1DHdMNuykQBUR4VH7qVL0ZmzIrJaIbIoR52eygpJHOGpFaZS57KFUthH+sGsuuqRQkSfmCiS+MNdRyjHL+1SUZS8fyCmd+XU9NzgZpCAZwlnkMDG+qf2m9Cyn1OVSkyuv0lNbyr7HRPvJQSY3yw0qVcCBWUvgmGTtUUUeNDkiQNSWKghDW6G4qBYHwVcv371+e/v5Zx8JGkDxMVaWXr9+/e2333ed//STz548ueIIPkq1WDjrmNmaCixdXl39/G/+6/evXn76+SeZKalKNERljg4CbTuVZbyKpCgiEfjQ93c3b+/v70VEue1yuVwshtJSKtiGCPlxu9OWFV6Vko687+qq+uLTT2pXidZpZAzBGyQSxQMcPAKklQJPpGbNSaxApPdFfsqdKv+bKWJyf77p+X5+US+TOh3nQaQ4f5j5gBRn20qvU/6r4LRVZp7/fML5T+Mnj36TfGdiNswxnE+lwCEiQJy8pf8tG9OX9+cgzexxMn7+Z8lwjvbM8ezlUYcunywHYWbFRJqVZ8zbMflnCc/yd5yd8terVAXKAKNJ9F4uJ290iSR5qhksJWSyGlfOcAIuHLSZo4sw3QFIXcjKqqEnEWmyxcaMfBMye31CX+C762dXnz67vlguViRVXfvYtbs2rFeuceCMIevq0CwXxlmDhm1ggEPXBsCq866uKmuMMf2hBzJk0VhsnF00dtt6gnBOm94aL3zLcr87vIz9x3D2ITVtH582lWMhiaYG2ziuqyounoLbb+8fDi0CGCQARE2a1YPogxJFQBowFISpMJ0D6jG4ze/noSaEDDMGcnJb50NlHHhsSo+9/hjfe+wVmWkU+btySsARESGVuKo4bEAAUChh48C52aDBGChES7y09smqud6szs/OLpe42WwWTXU4HCT67tB6AGPIkiEiS8aaylWOiABNZGDAwBIFjDHWWUaCELvoD7v9wtVPzjZQryWG7nBAV9ertZYuzNxFRMQoVOloLGWZWISSMkFp5luFQ+fxXFM0W0pHu4shF9lXpU514rY1muWY9zH/mKiRmZVl7atw6DBA1kJ9bpGd+yUaYww5gSCDWxwQgRmV/0PRaqL8qKbiZ/7DzMgc4ZhEQER6cgMAjLVcdI4thS8mi4uZQ++14sjhcHg4HHKMUcNEtXPr5TLGQDQ0dVDc0lOlMQqDCCMjERktLSdJxTXGODJkABE1hWqFICJq7Wg8KqbqrJjyrWJRdMd3XSGBRyIji+VSGPnUuLz04UrWopIcyTgAhYGAxWHCcg65euqQHjmK2R4lte4pHBESiAjUPhQRLec4NI/E4D1EBpHI4XAI3aEnCojx7OzMGNNU1WKxWC6XzAxCYOzD/SEyRjGaRG3QIgAzxNjnTyfNBABgtVoR0cXFRdu2h0O73+/btr29vX3j3y2Xy4uLi/Pz8+VyadOl+cZcnAdRaPT+ICL7/X6xWFhHsfV1Xa8XjffeGFQyMcZgquWrRMfB933/sL0PXXt2vn5+/fTQ7itjAcBZKyIg5Iyxxppl1fc9c4ieYwh938YYkOLrl69cZavKOmeso8Wi1sSEzUW1OVsNfRcXNvrQtu1h24bQK0rHEACgruvFYlFVjZ0wyvy7NJZKliowjTzoFdNh4pwleJJlZ+1hfmfAklN9z+bTmMx2IqXmdzKfyqbgOFx5jCjKTIHGIiEeCmWlJJhiITG/AoM1OLKusbCOEBFg6G+bzd2SXCcXIuqCJgCBgoxLpozpjLVkB88YbjATnzgTjfqj9AIc+c14C4hIIEo8BlISVpAGNJg1H4BFdCEYQui7IA0Yqm8fHl69fMNijK0FQ+TgI9493Ld91ywWF5dPyBhrTNMsm2bR933T1Kai1y+/axbVatXsD/dtt9+sFohIZDVpSoYTIrnhOyAQIIjojg9KubobQgh17S4uzkIIQ8mpRa3p5pGHI5faHoCZU+XxozJkiaQQG+p/4hiI7PMX182i3rehsrbzjJohCpgDhZDExgxVBib7GFa8ZytPbugESfKd/Fi5osnr+UUelxeffxTGpHdyYu+f6oQGoSDh+Yc0t3D2MBGR8LS4SDmrApOnYMSxVTYpTpMlIhdRKX2dioD8iS+mWupSKMRExPFE9gEU7JGKs7ghBPVBj5dwXFSeG6ezHHPaTxzjBJOEWc7ISbCXo8nsmr+IhaoxL/oyPDOOMSKiZoq+h4XOWRY+bofkpLhhJomd5o+W24qIF425XFZLh4Z9Uy/OL853yNuHm+3DbuPOF8aBIVM3m3P0XR9C7KANgX0QL13vxfW8XNTLmpiZSCCCYbg6W3724VOUl6/e8cL5ELwXaWsbnd0x9NvDtu0+XVTGw7nB2qCIUGWtsy6unvj4ypndLkgUdA6AgUVSKFUAICVsCwAhA58whN4Dn8mTv/f+3EU7IYf5X9/DN/5ARnHyizBjNZPR3v/PyVCl4MsII6ncLeKQKQralAAQJIr3KGFZ10/Xq6vN6ulmvVktF2tjbNUH6XoOHryPENkQdEkJq+u6qRtE1I6IIQiLIeOMrYioj8xtH70nYAQmYGMINYyDYUWk3jBmBhjqSKlroNTfJKnpIYScdpTrcqnrx1U2m1IazYiRRURbig9MDwYWZK3d7/f6cAihbVvmoMmHcUb4XKTM5MlA4l3ZYY3j9OYYPaLJ7A4AramsUdifSETXRNAySKjLLPWf44XDxEAP5qWwmLXWVJXez2cIh7IWyQaOMfaFzUxknSOtjdw0zbJpiHCxaDScOFm4iPig4gNAC0wwamq8thKxamEZQAEZWlkN26R1NXPwVq3xrutyCt6geNCI2GVWHRpg5LRtmuYk8utF44yYkjHKOGMuy4uJEM/mdBZT+ljm/3ysjwoAYMglohMwkk3yEIKEiBDARGbP4QDQE/Hh0BmDi7pWhc0YY01l6+bs7CxE2Leh6yMzOyIiK0U7pWzIafcETR9VQNV1s16v1TNy2Hcistvt2ratqmq1Wq3Xaw0VYlHq1hijX6+ay/1+3/c9IuiOdN2h3WkJ3HR6iwRYhmI1fee9F2ZEcc5cXpz94Aefff7ZZ8+uLvu+v7u9ffPmzdvXb25vb/vety2D1DFGY9BZIkI2pKokiPiuj75vtZMZ8Ov6TdNUjEHPTK4Wy7Ozs8vLi8vLy9VqZUxtrbUOPPi2bXdte/9uF2M81jSXQkHhcTGAkmNm/M4OHkjuE0y1tjJyiEiud1wKIVUISj3mOOAj7r3SIJmg7+TJUuZNJB8imhmVnlxmGvx9x/Pm1K5XqaPIiXzOWWChdF+li5lFRhBLizqudDJ/nB0RxKKY2AQy8+W8/5rPJA9Vsv4c66djRFdUQ9ZByIAIhcgAaIzhCH2IgcE457v+d9+8PHR+1VjhKEQhctNUTVNpH8L9YXtx/rRZLkIIzHG3e2i7fQQvEbvQ9bFHOzgmjiIh7QwfI0IKt6mqDcAx0mazQQJt3Knz1/QAYwxPnAUypLPlpSm4yx0REUSR6K+uLs/Xm5u712AaSEWYFGaSxlEylsLAkFQfdXIGrNy+uUKWrwnSzlE9oxCnMzyPkcNxgeP4UoldUDCHiQVbUkc5oIicLoOY5l+S2ARvJ1MlmlAZnHyrvDl/EuDYSmTyVixK6mVJeXJK2Qabz1NS+bujeMzS9JQzKG+Q/lAjfAhoz7r6Tgg/38/EOFmp/uCiSFU521g0hsFCAys3sWQF5eslAk9mkqdX4m255JP8f75N+iwUJPl7eZo+UKaZIGJZQ2gyWyKqquqT69XCmrjfI8LZxcXZYuOAOfpD3zcxBkBr3dJWslj0h/awOxy6nWcfI1OgaCNHNIyGqakIWVDCwtjl0/PNemmB+/bl7cPWA0DtXF1FwoOPvPdg0cZomX1tcFlbAMsxgrGOjHRGGIENAgMIC0gUMQgaDkyujZw4miLeWAjECdBKNH4MjHN6gcJXNd+g/K3HGNdknJOk/Xuvk2zqscdOUhYR4Xu9WqVBCABIKDTYhKIjqD+XPUbvDG7q6slqcbFarZ1ZgLQeDn0Xur7f79pDx15QUARQmGqsbLWom6ZumDkKA0AXmIGAKjQgALH3IQQOfrNa32zb/Xa/WmyI0Bg8eO+9r1YOAJC1MKnEEPjID1GLTwsCg2jXAYO2sG+H5MAYo/Uma//WUtM0ako5x0fWx8O2qrWjOagyFALtBx5VsDUcx6YyfWFS80t+mMplHzcLETVIyyzCaiBp166BI+UgFQDoacn8aRmCUWzQAgLDKIeccXAMAQAWNg8RudTEVY+EiUjshzqrXddp3qaahfqMOAAW5xyiLBaL2rkYhzOKElkiiwDQ0bF7bGKh1iBOwww6wyxi+q6VwqhTSlH9PAsUSJq2996ZJSSC0o3Lp/t032M8VuLJI+fl5zno5pZRk+GtsTjL1JG/on+dyEdJxj8WVS1KakVEHZKZUTmbCBTaARFFiBrugxiEA4JaEND3oT/0gHeIiGCqqjJV/ez6Q2NrYwwic5QYBQyDkKtdGVZFREKLiFpQR1eqFlTTNIvFYr2Kanhr+dC2bVXn0Tp2nLz/CjFrLZqgBBJCIAJrbdd1h+3DoMLh0RGpDV02ZwtFFTIAaN7ddG//3Xf/27/9X3/4g8+vr6+vr55+9tlHP/rh513rRcR33a9/+fV2u729vb2/fet9h4h14+raEbgQexGByBJ93/eH3YMxJopRD4NORtdVVdViWW9W64uLi9Vq1TR1VVXWNsaAVdoROdrxcyGRNwUAsplLRbZSRtaJ2SYiGpk5qU/kK2MhImq24YQpl2ylFGknJdnkRx5Z/1umqpZKzCNCSKTwgpRPzueTvpJv6/ZDho/eK4lQmR2MXh8u5mMFhRI+eEqq5eXgWN5DUuwm+1IO8tg1n9IEvOVHh+9CFD4yfSLSc5LqvZOk1IpIjIKgZ46VlaAh1wl8++13D/f7ZXMWGa1pPHeCcHt/c9j7Dz/4GNEwh647pGuHwMvV4rvvvr65ffujrz5fLpti9wepgFQIpygAUYuRAACiUT4J2ZhBMcbmcFBMXXeUl8FYXAEA47EBlG4bFH2QmJnIRZbrq6fPnj39ze9eESLzqIv6EaSJw853ORtsPI7T6t/zk5PNyvhQIskcaXXwCTmUY5ZLzoQvhV2UHyidQeWU5v6gR8jt+AoW12MLHC9WQ0kjkJYvT/A5ey7zK4PfegZPTLFuGDMNSeJwzgRwHJQrJ1DCagKHkjwzq6SC02blWylrvt1SKA2ltlTOTcY6+mMg5fHxgfn2lcApP5QxhGcpCfPFjjEZAEASMZbcDCAi2hKM5Twn8KfxQamCIRwhP0HpPPO8cBHRrlObBTl1ER2wbzuo3aJaLJfLfh92h0622zNzXq+WjkxlrCVziN7L7rA9AEgFgAKBOHResKockDVk0Tiq60X4+LrC8O9+cxv6GBEAxEXDIQqZXvDVoRUJDx790oTWNsC4p8Nut7tvfXswAtZQAAms/m+NCg62jQgICAswgnmEjU84zPtJsnys/OfJkTOPkrFNOB9/MuDJZ94zmZN35jOcPzClxEJdgQJXObHHTAsAanEjEKoXbxAxHI2IJVw2zflquW6aCpj7zkfv7dK33WF/6HaH0LYQQ20ILF6ena3Wi81ytagaQxTVbiDxxkVh7pk9h9i3bav82TrDEkLoF1W9qJtoTNy33nv0Qz+0ITXZGD1MaIyNcehqm5O2iDSekDUQxuSaD9FTShpUmlJzyBjMEOAoOWWxhIzCYXBcFklYpczKdJoNuRLUIpIiV5LYp9EsQkRCJC2gKyJd12ERmcmjNU0jhZ5zZM5DQumIOUvyUAOApNLo+opJ3EMt1dLvlg/yqQ1AQysFQMTaVdkg7PuOiCQeuylKqvugBrmIRBAOwLnwnh0cxKzHagwYPIY6OdVlVVjlItXlUU8ZQlt17SwUEiTvUdIDg0jDxVmPXAdIx8/8MO+UziEf+3QpoFICWUT07GX+U95WZg3wpvzSVHg/G7RyDFmLiIQYiIZMUUiOcvXyxyzjki9GBBQa2vldtQ9EjIAvX768vHq2WJ2z2PbgQwhCBsFk7C3ll4jkut/5/pB+XFvNBM42ZIxRq0tI6mWvfUd0+VFa5wb1T1+M0Xf7AxGRyR40cGaogXT/cLOom6qqnDOrRY2Ir199/803v/sP//7fbjabyrnrq6dPnz598eLFj3/41fn5+V/8sz8Vkb7v7+5v3r59+/bt67dv32y3264/xOiNMctlo2VLvSdmNlwhowD4ELtD/3DXxvhORKrKIiKyKN5eXl4+efJktVrlCGHUPcvbPGP0g3yNPPUoZzzIyFHqhaWroOQg2TM0UR3ml85kEvkZ5lQcdJnMarLr+XrENB0JrWI0RhkpjuX4eT4wkm3Hb8mRKlgTKgCOQEtcVbCgwzwaFxGD8q+TmeTfEwjkSxEdZpK7ZAEn77/nscmnj/QPIoV2SERaiCWT9FhLG3TcGDSzFIXx7e3Ndr+75jMRIWspUl1X2+22bfuqqY2l29vbvn+ji6prZ51pmurNmzcxhk8/+xiQCQ0iQZHPmU9QDCffmfUWkSUiAux8EA5AVkQ4clVVVVV13cEYU9dOc2z61K4tnx4ZEIljBgKnFhDKSlRAEjKiubi4uL6+NsbAEBsZYDFgadrpEp6TrS8xs0A2yNsy2eUZJos8TmJ8qtKDXqWBkWclhY1aYuYEjfPDE54AhcyezAlPnSXLSxYRBIOnaicyMxQtNDP5WzPhY9OR08PDwgVOIDwWET8YQ7Xkb/mj5T+h2BopMlXyBEoOdiSlxCJsap9Q8gRKJYiPrKZQCyYF4iRpAyWES6TKd8olTyaZubedGbpYqP4lbEs5UvJbGRu3k32kpCiUL5Ywn1wn4T/xU0z2Iv+TNfQAAABVVQ2cigdXtzGmruv2cLvcnLvKtdv92zevahICYUFj3O6wP8SOjFksFpUhW1crY87A9AEedp0EiagZAggs3eFQm0VtTGTuD1sx5nKzWP3o86/Nq3/83at3rzroHprVVVOvve/avvXsWbo9izlYc6iWXUuBdw/bhx30XQ8AjgwLMjOJcloaDlcmVBYQFpmj/4S+TkJmsinvuUqWPpcLcsrUnLAymDGrySAnv4szsTW/c/LFOfZOOMzxMRnh7YBmJIgISbgryEDEGNOQWS4Wi6axBtmHvm/Z0Nfv3vV97w979r0FWVa2WS2buj4/f7Ja1IvFylmrWSAATGjQOgwhShd877u+970IOufaruMQK2vX63XTNIcAMUYU0AqHTdM4a9VEUYNQCVrbrKtpMfBhOLba03ieHm0wdmAUMcYQfG7XpGf5suEFKcYeUsf6uq6JnMm9T4ujQ5LCiRm8pcjI/0yboiwrH+6yMYqw0jKBUDYqyv9mg1DN19JhOrDQCDycGxwpmRPZpK/rgTpIJrQuU11UGibSgiLB+5zxaFITCCI1AJwIO2NHcygqS3MEAAiawcuRiKqqMqnSVYgxstfKqMPaKpc2gvKsmFkt87xNOgfnnMixaFkp4MKxL+ioWIbqZvloKCQ7P6Tej1jIdyLq2lYRJrcEV9S6uLjIBmq2YEUkRv3ukFcJMEquOfoamDXUBIIAZAqDMKOljmmMsQhMjmMgAkQHAJKae8cYD4cDt501/uLJ0+VyaR1w3PsAwogE2fDTHF1m5ghl+ozOKK9R/6rNVPKOm9S8pE6pqvv9XnfEGte2bYxRq848PNwhYuMqlgCsFW4CEVXWVVWFiIKyWCwk8qHdSeyapjGGnDMXT84WdXM4HF69+v7bb3/3n/8D/29P/tIYs1mdP3v27OOPP/zggw+ePXv646++rCorIt9998133333zbdf393d3N9tQwgAYq21WHHkyF43BwAJDSB0ux4RAYWZ9/v+7m739dffYel2xaGHOBmDRKThSCU3AGAejl4M+jWR9hUgA6itxoWRAFS1AgEEza31bacVlzKOZhcsnxT2GJRPAYxYvBJv6SmRpJVy4VTOFM7jlKTj4caZdpIGzLy95BojLjNJT4dxvQf9kYNLiGjIHU/rHovXsbCW5QIA0PJZkJS5zKrMsbVrVLW5hEYm1PxKnkk5CIzTAMqZQ8E39be1Vs2YkVD8AyzGDP+c/x08i3byMXWECBIBB9bZSWAfAMBakNgjRBC4vb+7uLhgsnfbwy9//Q8//cmX+7uHBUjs7k2EH3z86S9+8ctf/PV/+dlP/4SQWr+72Gw4gquMc+7Xf//rmzd3/+xP/+XT84/WzZP9/o1FMmTUXS4sQENVQ0SBqMLEIGh1b2DgyhkUYB80XST2mkdUIRhCE0GYj2WpWDTYBfr/IKgE0fftwG2RfYhKLyJS18sQhLv+pz/++P/5/7ir19i42Hshh3pOm8gSEDBCJARCjKWOkhHv5F4AQIxH1zWmjGI8Vuca7ziJdoac41KOhZaadImHACO+CakTA43b8XHO80Bgrd6DQNboiGptJfITIMwh5Ql2UTrNUnpGjDEAhSflONtROKLE+ahHBEAo5e1EZmZvreXIxfMgAsyMY/LJU+IxURx/FB5xKa5SNckcT4mCgEBAopaARBTkyC6ddRl2CkD9oCF4KNTuPFrObymxglI57/zdvJaS8ZbCGPA4+ViUDi8NwhIl+r5XRpGk+FALofwWp6P2JQ6X8OGiT+CY3yKBERDFYD1fiwB6/ANmzAeAgLwIlXw4z1aOLQePja044pBPx4KAMNRw4r7vh7o+BBpqQYpdv2vFh5UYQLeoF+sNWLdv9z1LF7mPXJFt993u5mFxdrZ0VfT8ZLM0cI58ePPm1Xbf9tYG2HhYnp9f3INrO3GVayrTVAYg+ND9D//sq/8PP1TcbTt+u33tlnaxevL2XVg25w9BDj4cOn53v79aLxxJbOm/3vWxugghcEzegYoYojEg2mNHKQsAASyA6tnMmUuglvfLyJBwOf9+tG1DhnD5A8f2dkm589dlXFnqBDWN70+Qp8SxyVt5MiUmlIOXT+qVPHoy1I4ZH1Iti8OVRsgm7plMD9SSBDSCzjHViK7lq/X6Ka1qdn2EHXX3h8Pt7u63/Rfdw73pu4vKPqkDOL5q8PLp8uyyWtS1rdBTYAF0VkQii4Gulw6ij6Hvfdt3oWqaZnl29+b25qE9v/5oubmMaDoWWqw6RmTxkSX03rIee7M0ZHKyRXYcQtBer3VdV1XlvV9QwzyEhvQVAuz7ngiYuetb7zsAcM5YcrtDp5DUEo673c57DwC3d+/2+30IoarsctlYW3nvm2bZ90NOXfDeWkvWIrP3XTIRMUZvyaBUEoNABCGdg4iw5B7eyKB8z2qKLiUzlqTW3dTSknk327ZXDd4Yg6gVTcg5EhoiexK0Gg1mVJligoghskiIKCzB98wMIvq6dp87HA7KJ30Mxhg0tHROTQtjTGWtCFjrgKjteyCSdFham0ACAGMUEQZGCxVaRIzs/b4zxujgCl5lj3Vd+4cWEa213vu994MeK2IQIzMBVHUNKdCHRfscLuJ1+SYiGlPlm8YY7ry+3iVBo1EvZmZQGZGziggArakEhgYMmMNoRNp/RwMAOrLCLSnzUSR63w88GcVVGCMgAYTBcI1RmNm4KAAh9ABgxRpDBEiGBAgiDwuPEZgAmr7zLLu6rk1lQggxijHOGIpBSHD3sD9b7zdn5031ZLc7dL3mytbKUUSi/jAWzNCaMoYYCK0xBkBiDCoshcRUVBnnYjoKByFKC2AsgrOCC3S2CoGYQ/RVY5xKYZXRAoNGrYwXABQ/FZHqeiPRGqKL1cZYDH3X7Ti0iOy2Dx0zkzWuXtiVbTly8K9uHn759dfVz/+/y+WyruvVavHRRx999NGHL168+OSHX/3sn/1FCOH+/vbm5ub7779/9erVzev7rut0Z1l1J4EQwmKx0iRYRAKBQ+eh88aYY5guXyWHncTf8JSnXEcoY+tYqEHOOcZR35gjjz7lGkTE1JJtEglBAJxM7z0SBQuVt7wZeZT6VTL9ORAmV56VzMqa41jDzpPPoMhVCrN4mzw8WUse/7GFvP+OThILL/vkgRKwcvSXy2RiOPPvwhjUEz2v5LYwxpDJnTyCOjT7vieitm1fv357d3dnmH0Pxrjl0lxfX//iF7/4+c9/jmD+5E/+9OnTq8OhXa6WNzc3b968efPm9dXVk5/85CfnF5u7u7shEDvDh2ToYrlwIoS0TQyiPabyQkSrdxCqt061Dc8dIpqkRysEQCRGAhBEA4CERDaZcGAMIRl3cXF5tjm73/UIltOHQAgQEPU0gcIFf+/mlpd5pOVaKHqBHgdJ1clOIkN+fkx07/MITEjm5PNzTjJiAjJ67OQI5VBH0M35xtQeUELF7OXJ4n/yrdF68TgajGkZTnGbEvPnXKuEzGRp5Q8s7KUJnE8iA55qq3Niu4vrMZ4GR9IYLjUUM5TynwaOVDjCSv5GRR5m/hOOnQslTOY7OPxpZJwc4zYTaBxHQ5wQdTHtEU+WwaIo+HbRrMEYo8ka6h3AFH/wPnZdZ021tNVyuVytVpYgxO5u23EMbIlD2O/3D2hwAc4YRFg06/Ozy67zIbztQzBtL0JN7YPhYG0j0dkajKmsdRU9qZt/9bOfreyv//rvv/UVeB+6+/uz5YKBQDgE3vZe7v328GAghL4PtJnY+ZADEWOQpr9O62DDGLVmaPZ7CHDy4mPXHOHnMqic0iRinBnRhFnNCbAc/LEplXg4mV65rnKSJSfE4urBCJgAGvhlhICCwLioa+cQLRx833ftzu9vd3f3+7av9ijekhBIZd3FZnl+frZYLFQfYGaQQSEOIYTeM0jbtof9HgCccwjGc7y7v+/6vq7r5WbtnItETeM4ChIhEAPkkA4zW5sqQuEQACQyWVppdyUAKk86aH6jslZjzGKxiNHHGCF1MtQgyW6303ZtAJDbMnGqKskpDwgLC6TvexTRkjOS8ik41fATYGeHbu/pr0REIKPIc2QNghHAQL5zGTSE94smW/rDVC4HrHI4EQpCKPl2+V9JgSk9P3Y4HBRFdaiqqlQTds6p/bNcLpum0YePM48xu8Y4HSfRwTOUFMFSyZ/jEbuhommq2Kn2lYYBhyzfBPa8LmOMOjIG0TcuhZgBlWmBmY0+TJhT9nQo51yUod2FJspqVqQzVjsH5iWoQdj3vXMmZ5bmr0yqc6tvvaR6KRw9ao5SEsG5dIIcE5IjMMTgo29j9L7vXcXMbAaupbrQICwOh8Pt7S2SWazOV+u17fqu68SMlqkBcEwZKyKCcOzAAQCUOqZgEUwClCGmmmKYRFRVFaJjhyHYjIcioj5HjSEnRM3h0yFpDYEZkcTy4H/wIBFgSMJS3FBQrNYLfavrut1u9+pV/7vf/a5pau/92dnZ9bOr6+vrZ8+efvDBBz/72c82m023C7e3t99///1333335s2b+/t7PS15f3+v69JaOMys+3uscJDldIlAKSp4FJB5h2aCZyqi9IGqqhVHS0SkonvyVE7gUYGTwqaaM+4Sz0qMzzuU75eDzE3ccvC5vMHifsksoLjmcq6ceWYuBVWMyiFM3prLdRjzr/dDo/znSSjN5WvpVZqsq4TtfI0wKFLHAEiOT+ozJeM4CTpdlQJnuV5F33377bd3d3fPnqyY+6qq2OGLF89/+MMfEtnd/uHv//7v6nqh4fibmxvv/fX11eeff3719AkzN03DHEo0GP6LqNkgQ+EyLlF3fpxPGcGQAo2MQIUT0ZAIRJHIkVJz3kFcabknRILc9Jx774WJAZ5dvfjgg4/f/M2vmuVGCLaHvYz6brNABOFskEihD52E/2TaY3o8cRYxkQFO7kMRVZ5s+vzJCQaWMia/PvnvfIYww8z8ZH6Ri2zD+WgnIVA+kHFSg0v5c5lNz8lh+BycILT5hKEgqMkCSzCWb00M0fKaPIwjZXT6ZDmHEkSTEeYzP80NUqo/jmuTDt7NGR8uFZqMmVwUOSgHJyJN0cFC4k742/uvtMAogpObeSFY2PATblkudoqBOKpnNCxweOY4SGTo2mhdWBtLRLUzxLUP9bKpbSBXOWttCOFhuzWA6+XGsF3WFs+k23fbh13f7do+Coa299YKIkYhILGW6qWtLJ27swt01ZfYsP2vv/z61YPYGlik58AIkSAI+xAeIhtgZgg05KplFb+kwbycIkg+yts8KQUmIIdTKHoStWC8j+WfJiwl3y9FzASHJ+PMuUeJPBOWiHNFYiy23sOdHlvRnAkjYm8aAIggyGIkorAVJJHlcglW7g73h8Nh2+06Dj3HniHK3mBA8eJbg82yqdeLxjkXfZDIPgZrKlWavfeH9tD3/aHd+a4nIkLrY2i74CV475t6sV6vrbWM2NQNCPrABqqY2lMlHSN5Kg0RkXMOhoZPBADqeM1NHdRYij445zQpMgxFNA6Hw8F33Xbfcsoq1NKaoGl71mpe3FAe09q6rrUgvoJOFQPvPTBrxko2XcAklQDZ2TqnGlprDSMz50QSAAHQYwKoxwvLHSlxA5IghoLTioiNVUbI8pVciyUzsSFoE4a2EzknNjemS8Acyk0px9MqnXVdn5+f13V9d3fX9z0i6ulBTkVHsmmXjg6ldoXJJtTPKRroP7WCqIbaAIbERQV4jFHZjk5V46tUnNwuWTSlthClmJakla2qJoTAeOQniKjHpwUhb33ueBF6n5KkpFxXjLGuXVVVpUEOQ5ux0gk72jKYMxYZUWtiywQpIwbx2AoFkyclsjjniHRWWiQCvPcPDw9kKkHXLFY6N7B1QevHTKIBmEVajE6yDK7mhyXNPPIkj0DQEIA1RaUSwAHtU5UJ1lrFGYA6avkAS4g+GIPWWkHIi1XDO++dtWRMHUK4v7+vqurdzZvbu3e//vWvNXv57Ozs7Ozss4++eP78+dOn1z/60Y/qur65uXn9+nUI4e///u/3u/bh4aFtWw0IK8CHE6Ll9uSVnJTZE5shv1V6dMr/cqqakvf4iBwFoz8iBIJA2WYaMrqUNzJt5/lMljChB0ye7JO6AhbycqKyEI4snMnzp66paiuFoTUXe5M75YcmEquUhfPRHrvmU52vhYqOQJOZ4ExqzjWDTD/lmJmiSqKKMQpNz0xKqt2yXC4fbg6vXr26v9t+8uIydB0iAvD5+fmf/dmffv755y9fvvzmm++616/Pz8+32+1yuXzx4qMvvvji2fOn3vuuO1xcXGy3W23BdJyqkIAaSEa7tw3/BEOEICAIUYp4iwzK4gRtMrsU0eawApJPwKJzVqICcIg7iCAASfQCCFEuL59++tGn//k//dKsXGUJpAcIg/8eGZAFRIAJq/kO4rgq43x/S28LJsW93CNIxxbLok2S3AFyyugqt3XyuQmXmJDw/L/lxPLr+evzFUGB8793SnOamlwnxY8kC+M9o50cdsJASvYyn+SchOdD6TU/aaMXF6mb5VsTKJXM4T2znX9XnzfjvhQqvsp3s/Slsdeg5IflNCZfzPLm925W+dfS4Dm5BERt2XI6JFt+5eQgkwEHMk8vDkdrwPSBa+Q+xMPh0HWNNbCoK4yh7/YhWmYmoQPZGLEPcLZ5UpGr3bJp1otq1bYhRIkHb2y7bOqqsmSNEDBEImsqXItEiZ9fPml+UnNA/vXvbru+ZRZyYKypXQTqJQJGEkYS6EZzngiRU+JphAAnYY7FYb85HyhHhjHyTJ6XUybZ/M7892R6J7dpsq3v+X3yczQ+wAwFsZSs6eR8yqs3DQGABJSIEi2wA3EEvX8I3njv94euDSzGGts4RygBQk+xdZUsHC1q46w1SMLIomjnY+QY42631e5n+uXYx67b9yEAGqoWxjixw4mbiGyQCZ0xWNs6cOn/KpqyHlvaDlKs2OJez8arzRZ9YOb9frvdPbRt2/f6v15ijDKiLDVRtAkbEWnRQr3TNI1zrm33WXkdojqI1lKuuoGIbKKaNEhC2JVydgi8MCa8MoiGUIwxymhzMYK8ZdmYzJkOkk4pM3OEYyCrZFNaJXKOaTE5edUaVENIjTRIljARpdxU1G4Q2j4kLySmTuWcym9mdCodZCcXkiMHaoDVdZ0tLj0xqD90C/R+7m8RY8wRwsl/YazGZEGDAjmpVZKmaq31KR3XOaebq+A67PYCwxxyuU5JETyNPpVkxTwUaU/3KRuuuikqh0s7NjvqMskTkuBwAh9wmLxzDp0D7EIIQGCMQ0S1vgkxBCYrIcbtdtv5uFgeVqt1VVVFZjilIOEQXAUA4akhMGF3OgeBWFUVACAc1fsYIwADG+08l2GudXEnEvDIGJFRK0QnEkigiAAWMNue2qVzOAOpD2jRGkTIlTuTa6k/HA673e7du3f/+a/+y2azsdaen59/9tnnT58+vbi4+PDDD3/84x/3fb992N/c3Lx8+fLly5dv3rzZ7/c2w6Y0lsp5T3JEZXy2JIOJZimXmCOBqchS+UrJgkdCCBHSi3BKovyBlzqtaVY5txSWJcHMpcvwXxhJi/c/nO9gYfdmCOcRSoVPeU0Jz5KnwEzZmsxhMquT4Cphnj8xMSHKrTw5Ds503AznyepgzIPKm8P4WQUZYIsiose1+xjevn376u2bf2K/5IP0gfvoq6o6O19ba5fLxdOnT/f7VpngBx98sFgsAMAYrKpF3/f7/S7zu/F2jDwRgzmnMwLRVl3F6hCS4BwOR0UWGLqh+dCJSO71RJriAgQsIprQSJRPQ6v3DowP3DTrD158xAxd68VUVo+nwtF+PNkSfRJxnV8lYuSNoxTSzMiTBplag1AQ4x9ylaOVQD4OVUQYShTNEacjGgxAHmHOZD4TdjG5WX56YjhNns/THrlmkjMysxqAaZXRkl7+QBCVqygpKysuxwnMVOpydXrFVKShvM+pok85yRLscyDMmcZk7ZJ0kdJVXL6rgt+mQ9FZdNFQLeD0Ndn3zBsnEy52kxEB5fhY4jAyOd427OnUnzAVupiY/wzJWeB4K8+QAYARIA55WYR9lOAwCu679nDYny3rxpqKiPtuv931bWds1TRrH7iPHNrOGWPQLevVenWx7+Pu9m6/7w5dOD8/d3XViITIre8rj6aqwu7u6Xpzt+/7ZfXnP/1RF+N//G+/M+QdmYjCiETOMwQG1fAXRduxyUZnmhqT0pRIT5LP5K+/lyHMX5+IjMe4Sin7SjwsB8xLkLG8Lqnp/dN77LsT4EiRQv+HjCkiPVkjgsKO2TBYYItMAHcPD2AAycXK2dpGMDFi7MXhHkK3sHJ9sf7w+vLJ2dlSa7yTAUIADCHs20Pfdrvd7nDYQwJ+6H3bd8xim9pWTWWl72J76Gnlo9jY9x5ZkJJif+SinItSxrzS0RJ4uIbW2CEEibzd3ndd50PPzCIRUaqqqp0h2+S3tKqKkrxGzBaLxWq1wnSWzFqrLdryZsUYCcA5kwNlADlDlY3FGPZ6uJFSdhwAgORWroIoZuithyLCxdlOTGpMoUbLZJmx70trML9YSiJ9PqZGO2oNalRQQaRMOIRgrW2aJmvL2ppivV7XdZ2tWV21lgzB5GvLfsNSTklRxnzYiFT9JdsqOV0Qx9Wb85KhkDWUqk9nVSc/plHEvHzMZwhTkCrPXH9nwxXHxXuePHkiQ4jumE0ahx7rYU7FJSsmoqyB4NFKN4iYq+PEGEEocwlKtd/1WxKjscdSIHlPNeIdYwwhCiM59DHWiMZYZj4cDpFRBKp6gVaBOZgGatgPX0nOQClUZU7TEBHOx+MlJJEn431hg1aXm2UoGUhM5giQkhcxBwBkDiLgfRfZA7Crhp7DgqyeHRnK9RtEEuFU73fYOx86DR2HoO0xSZHq6uqKiO7v7zU2yMwhxLOzs+vr6xcvXnzxxRcfffTRJ598IiLq+7Dlhk14XylQ55g317xLJCjfzeNPUCSPWVI44nCumMaHUpgZZs2jYSz+y+cnVyZ1NKeroo0mUH4iaY15+ZNP5FfSM7kALpXLz5Rcvjj552TkOXwmD09AMf+rjG378v4EVomuRsUkJuPjKWFcwmTCDspvHT1ArMf3S20YAbBtW03T3+7233//MsZoyIEE5tC2QUSMpfOLzeXlJTPrKfnVahVjDMHrvHA4qquTJADWKesniFLhr8H+0vb0kksbl1sgKb8lL1lEjPopwAAI4GSlUYbabkDAZNzwFmPgLYLzgSvyH334Yr1aBGYiTesh0f5KMDQtLeEpM2Vojgb5T/mxhHjTPRr+K0JkJkOV4aD5JacUu4loec/cJjg8fzJzBhijVulgej8cJuAquXliJtM4BhS2a/npk2Q1+SIW4RRIlsaEGWb6mqDQSW6pY6rHt5zMiGrGL2JKSSp9nCfhnyVrSpkecQZEZBkxAUkKZjaAJ1Sfn5nMP/+3xKWMWiXrK2VKuVN5KFHTh4TgqMSnhRzhPEybBHHqNMl7NGd66aHkFwBAyCs1ADGRP6hutGcBpnWFgKbrw6Hv1gtTGXp6dR65e3t373vf9cxgq4WnPtRhh4jNcrFYLJ48uexivL3fPWy7tutZ7pumXq6qqjaOsYtsAzAFxr5ZmKXQmbg/+vKTLsS//vV3rfTsBZjBWkKLgokDjXRZLC4YU8Fk78o9mvDqEmLwOMKfRLCTr5y8A4WHCx5hLJOHy98nd/P9Hz05/nzt73lmzhCCgIgYBmHGyIgiJGKIHTBhBBHlN4wcASIA75cOnj/ZfP7Rs09fXF2s6oUVZ0wIgcgKY4ji+9gHz8xa/Ui1bQGp64aMI1eBMRYpxH0ffB0jI0fv/3+s/WuPLUmSGIiZmbvH4zwy877q3a+Z6ebOUNxdiiMBxAC7ErDLD8uVsNB+WEBYSH+OX0RK3yRAXxcCRe6SnBkOOf2s6q6u7qp7696bNzPPKx7ubqYPFuHHI+Lk7RpAgcKtk3HieLibm5nb23pgNLYNUfCcFq46lwJHi9AyD72v9AGtXKIrSkhuaXB8rai2VuPZonOuKuxQfJu567rDATR4MEYvEovC1nW5XtdlWcpgP0WAIkn2A8NhRhT1dA3uMhmcgYDEY9zpOCsBAMn6YMOorWl1E0FJR3OO3rn/AMcQUADwY9FgBUWu/+c8bdAeY4TImjfYtq320NblpBxIVSHUR1dVlTYxTyVhjDFarzgphDkzxEwhnM1ZA1MRUT0EGimqqTHKupNNcDZsPkiMUZtanWsZjldS/CCjWUTUdM18PgAQhYkhn7m1Fq0hIiOEhFq/R7MZVQ3u+77v26TNZifgeZsGrMwOAhpruuZzyw+FnB4R0VhblkUM0IXWew8iAlpFn3sZchsRjIio96yqKrIFUWGsQzRd10GYqxvD9BhxlNuTdcOYoW3kcL4bo/QiLNqQsDCkoBhXEVCMIiqNySlIAgDalnBE18xAIFGExp6ykTkgSlE4a4zm8Q77SyLMIhyCKKk653AM9CVzLlOSavwosh1ORyJikKIqbeFijIFj07Zf/e53v/3qq3/1r/81M9d1/eGHH3722WdPnz61CUEpcxLmV864EXM3i2TbLDI97yeMOxstHwqzAyyj5EHxm+F9Gg0W12TMhSgwm1s+wvLP2YtEcofBHBqzV+c3JzHEGYUkmzpOj5z08+RSy7lV/oqLTGEpGUyWMJ1eLurp/Zg1Fc2fnEHjPXdyHFAUTWat2RJkbAAoPBboRAIgBtAChn3fff3114fDqS4MMTtn2rY1xlVVqZzaWrtZ1afT6XjaaTK3MXQ6nQBgtd52bZ9guMTbBNczrBZg1EfyIPU0TowRjWqzg7GAIYqIsIbSCZKgGICAaJhj5OjlZF0lYJj9J59++OLFszfvjmjRidXMKAEBQEEauibwhLIuAn95zfYXcV70KH8yx5AcZ/KvYErIF7FiiYoiAgtaS5ifY0s+Qlrp8o1LHJ4R6XLmM0yWx+Y5ncB5yQstMR8zzeR8Z4EkCYsg81s+Nv8ZrPJX6JUHOOCocc32brZrOfdYQnUGTMpEAZp28MvhgOegmnNXK8hyMmdTungopJsJLLmrcwkBIEE56zkAoH76CTY+su/MDDC3eYkI4tzTK3g276R5sogPDMAdA7NfFX7ljGXZVvRkXVjEVV0/ubpmhrvd8XBsT33smcpji3UtwEBgCrfdbnqO94dmd2j3+3g4dne7U7Wqi6IoCtt7oo43hewPt+vts2pVyPHho4+e/udlfWi73725x+BPfcQI1jpCy4CIJNznk09ombZ4ycmXJJPj80XSXl5L/JltLiyoKX/sMW52kbnlU6JFLYAZ8V781ezPxwh/9nwOjRn/PM+TPQgTM7KgECAAmkgIjoKAB2DPFHsXTCnWWbOx8MHzZz/69IPPXjx9sqlWVhA4Ro6RQwxRoOv6vu99H0c9SsurGOdsWZbGFl6oixJCFFJHnAMiRuTIzD7EHsgm25DiuEI7cnIQndeokqKKvLpQ51xVlCp9IoE2Huy6hiUUxgYe9LHkZEvVa2T0KlxdXaUMBc1xSo61oiii9yH0qkQNXrjIqZxGjJLyEonIOkqysl5ElHgyM2tEQFyUMJQxVCHfOGNMHKrscirLMeNyA6zGCfu2S77QQfofiUsrhSrotDdVVVWr1dqMNZZTbKQdS0YvmfNFjo1DStgQhppiTVM0JmdlaQZF15/7y9MkIHbQbJdvSZNMcDDGSBy0a90FIhIEGT2KwyvOHR/pGMXYM/6k6eV6aaIXItLKx9npPwF7egOODkNBNjRRfYkISYqisJ2FsVrygNjMSIOow8wAZC2CEDOvVmVVrcqyRFMiGusKJCsiaAsYowVTLqIwJvRImGOtNcZq/QilGsi80IOQScMMaUjFB4mUCHA4Ssb44aQQagDXsDokkYhAaVHGmKqqhAOAYQ6a167tWAAcx3RIgTo/U2R413WaPUhE6rll5nW9iTEq/zudTjrtEH3XdYUrVbf03r98+fLly5fMbHOsnSEQTAUCGFUduWS65kdCSWNkrZ2RdvcsQ2QyE4yyRYgBHjlR4FLAW35g5KdLEpty2iCiMFb4yakRzuLCPIaY4HwQpmfStJcHqqTg7NErOIMJTpXq9PPZe/NZQXZc5ZuFWSQbTcvl5+PgVAIY1pVX8crKzS+XM4PM7MqHTaa7NJkZWAYSmKMZAiAgeo6rwvm+efP23W5/dFcrg2ALq+HazIE5OmcQoW1Pzhki7SQTRESfadsWHxdWxpuIiGferHaQiQ4wmDDTnLVjIQwp+DI0XCFCBKKx9gZr0jYL6a99iCHGaGsuHAYWMPHmZrO9Wr1+9wBiyQAxaW1REU1bJIA5kHP6WgJ/BuQMYVgBkp4Z1zLBtNk4OarIlAZnW7n81WPokfAnMdx8RwAGHgtTJpNTRxpWRotjvrnp36UaMxCInHNW0zzzzYUJciqDPsMnEWCSeGbcZsSQSa2aNI2c/NMM00tzAKYDPj05EmMC4RgclhHp0gOZ8+f0QE6PsyWLDNkp6dU8trVITF6fVEnC971knWayw3iCVwkUMwbI4zULnZ19OIMROUFynHxEPOfH5pSbo5YSMi74+aSSzDA+CgIMhaZIn03kFrCIfdg3TUlRLGxXpvEra6wxZrPZkHFC7hTuuyA+sHT+EDtyQCVWtHJleXW9ff78adeHu4eXpxb2u9N6vV6tVq5EMhQEKOytLaQ7ApVPn111wXQ9/4O/98f7/d9QE6MPPYNEFGMRCAKwOQslS6VlSRqJXmZUT1nm8HQQuHjl2DvjAOkDZgfNcuT5zk7vz56fsYLlIOl8nz2/5JMJ8d4/YVgQdXpd/sGBNyIWwGrcARVskC3et0csjLG2MOh6MRzKEEswP/z45vufffzDTz/aFM5AMBAlgg+BjAssXe+bzrddF7q2a08ao1iW5WpV1nXtikrIcAAC6UJPZF1RGmsFyRgDMYbQG+EUXZLLAMysjXZo7Fmql0Y82qHOPjOzc2VdFiJF7zUcFGL0MdrIQqM/GkYaHDVM0WbcXdcVxVh919qqqlR1VElA1TbfdU0TVVUbMvRC1JRC3coUk6kKMCICTKSdkScjAGib0JxVKotr21Z9Vnl1K0S01ibpHwBi1l0pxw2t6RJC6E6Nnlaq95osQr4syxR4qcmTCsa+71OGpP7QWpv0qxnHViYOozERxmWst9vCe2ttck4GZiCyYx0RHtMRdYHaS0AvM7Z8BADEPA/tzBLVb5mOJBltfH3baZ2bYXBnEVFTcoaXwsSYW9vCWEx9CNNZw2MfYBz7N+rCh9jgMUc9P7BGbJooun3oCM/8XJ9A0LZhGGL0Hnzfq7ZjiKzuOCMLkyFrC4jcei+CSGSM8zH2vnch1qtNWdamOBeVURBxhISEWju31cxvhZ4h55xGNVvSRoKRJWjKaPJ/pouIeJqIqBirbc/SqlNBIGPVtCoMHEKI7EWYiEIUrTKq/sbksfT9MIKxCFEF58EI0jRHGb1NQ1NEazUVK90EIA3P3mw2uukxRkPnZhNDR6lk8c2pRbd8jA8emwdmZetzpsljzl76cxiQRX3N6X6SISTTMKdH/oWzIYSAeP4zP5ZoGpWEmSamD/DUCwrT4yr9ankHEQ0OSACZAASXBNDEoTBT3mj0DSabVkIUnNr7ITsXcwjn9DODef4A0TwXy4x1mXLWkDY+v5mHZ+QKtqILImpswPLtM7gl1iljkvdsgc45QZ0PgwgMJaGEQbz3dV2TMYHj27dvf//7b773v/3PT7t7I9FZilHC0FFUAASBVRI0VCAic0xMkIjIpNhRzcTNikENAT1mVI1Q+7zlEm1aUMqE1vx7bVyGasMkQNFmg1H5YJTQ9R1ILIoCbcExomXnCArjxQcWkSaC/PCPvvfX/+GnT1983J56SbFqYLQksfC5pkXadxwbYMaxQS1mqV+Y6SFpNxEn/eguonf6PGPZs12WhYQ0+woykiQiHqPqcaqWz16aYG6MG46F7Mmcn+JUpr9o+FjOKqF6HHyEcygRUbIvpaFijGjm4mDiTkmNTEbWGCMuVKzE7tPu8BhWasaScenCUSJZ8pPx3wnVJ1jNOG1aWj5JyVRfmLKCZKGUsalxDsmkFuZcQoOXqjE8LEEp3z4aS+epHJbWlc8w805A/qvhnBvxMCGJwDmbehxNqZUAIHLUaPCcASa04bFRbaprP7Tq1k2mATcAIEhUP6HCIE27CewQHk6dBV49WR37ePtwcPZqW5cVGAG76fh6y3eHUx98APQm3u9u2cQnTioL1tonN1ch8OnUvX777tXrFuC2LOoQ4ViHm5urwHtri6Lw1Wpb1dahFBhuKvvR1Tr0u2MESxEMcIgRhYyyifOBlRAjxnMlgxkKLfENFj1F869y+k34IwudLdkaMokQEvyXaaWJHGBBtkv2cvFXMNjXLyS4JmzJc1Zns50tM/2Zy7uJlnP5ATJI2nBERgO2sA6xiAw9gIA1FXWh821nrCkJCvRPKvPxk6d/9vc+evHs6dPrKyMhtCF6hsgAxAyHw6npPBEhmKZpjoe9xEjW1nX95MkTV1RFUQmat/cPd3fvmp5XT555jm3bVjdr44raBheL2LV9GORvFRmNsaS5ZzGZkEy22Jg6suqdEPpj8MYYAWZm70OMg7znvT82nXNOW5bFobMc9H1/OBy0OKGIlGX5gx/8QJsuXF9fqzC5Wq1CCE3TADPiCgA0AFJEjCvquo4xhth37Vm27Ps+xN4552wJANZa5whRAMFaqy6RpM/AyLHzxgySqXD6AI4NFZLDLWGIflA9UBXCrusknBu1K+rhUIDADJZrQM0+K4pSUwcVRbuuS0Y9LRAqY0Kgcjx90hYFZ13mE18SEdUwq6rabDbJw9M1DRGpBJ64a1Lm06L0vTHG5H2CUdHVB8qy1PXGsd2ITs+S0bzQMdD4LEITkRAm6XeIjfTRusGgMCNeNUzoumjhWkg303brQkQw30QbbfCBxoKumsq4rmsOHhHLsgSO+/0exTtnHh4e6nVpjAEWEXGuNEbQGBOhrmv1lUGMMTKRNE338LB39aosS+0Rom9XOBdVqWdc2Xbr0QeuBTxT0xGDqIaAorQ82jR10xW8IhEBTVagm4jIOBnS/4b6tAqYge0gA4AhcsYiSt8Zay0gV6XzvgshCELSmEIILAwIxiKAxTHAGIDVGw+jjqfsOoSAMpzOWhfXex+jR7R9HxOL9qGPHJRM7JKx6lhJmk9HwrDHU6dWLjTIqPbkupk1RqaYsVTJEsYsD7AcsjC37mZgzQa5+GfC2scOhkcHkfkg+Xmc/2SkCs4hmR5evu7iOMtnZgu8CKL3fJuErfwByTTP2Q/TJkoeJ7bou5geVms9wHmTMygNaT94jgSmIfNYBGXo0iAiAlIUhfIFY0zfh9t3923Tky1iPAIAImhuAw5R+FJVRQih7U5qXHS2VOtg13UgpHHbMFZXkxiHqmI8TFtYRBBgKNk9gwBoeWiW6JWfogrNxhhnCgAQjpHF+xBZC20zkhSlK4oVAOjZYIwpi/Kh6wkd2GpVr9Ha58+frtZV05yQEKJo2DsAABlEQqChO/ZC+5rt12N4OKxOJm6ZdCFCarmRfpsOj4sv5azFXA6lZFvJoSdLD8z0LTMpcIEwF364nOdstO9CR7N1QdIDpi+CjH7zV+CoBCbVLkHGjEaidMgpqqSRE3vM7WWzycAiuDTjHhcIFrMeTfkkH2MOF+cwHMzZhuUDZrR2LjpKWW7hEiWWR8ny3/TwzA4IWb1WzNzU+kYyEgPjnFlpK7Mz2PNXIOJFYKjUEhEBwMxaWZxjR8/KTEQiBI9yEj4EXsVwjLGLvEVrnCmF6prLY2+Mb733sWstW5I+9MfmGECqarVZlfz05nTsgufudH/77kT27fVpc3W16cVdhcNqtTHbEg1H6USoFHhSF9//+AUj9fzu0EMDfURiMEJosmPl4kYsqfgiSsyuGTEuz81Ed7MBH6Po2Yf0ZG5wXP5wuRC65MnEzL4zuz817U3e/tgbcapwzhBpNrEaIhCZ8fsAJGgZjO+79WpN0fBxJxE+3Lg/+ejDH3380bMPtleb1aoyEKSPzjOf2r5pfM8saIx1p+a4u7/rmtYYY5179uLFdru92t7YwnmWw7E5tW1giMJCBtH4GIz3rijLsizRnGIAgjy2MMYUPDkAUAt3jYSMIwfLjD5DWTWI0YcQtLIFMyNLjIyIeb1NbUKgmkYI4eHh4eXLl865Tz755Pr6eqgKo7KftavVqrDW+05EtI2eaJqExoyQlEWdVJoQQoh9CKGFXotbxijWsrMAAKroFrZIu5OWLDLkdOn4uU7IQ5lTq0llAOeAiPNLx7XEGKOftK1Ko6mKotVEi6JI3sKiKNT8R2Mkpzpn6rpOknM6WxHRN01+QMhoyFBCc87pV6mg6KqqknKY/JY42vv0dTnOG0MzyOjJpToqnO2wZ614qKk+9akQESMggC5cdVoiwsDG0NKoBxmLwExjTy/iIa10gENmuJmbd3FUJdIgIQSDuF6vC4sc/GF/f9gdY0TnXNu2pZKCgIh0Xedcoaqgc4VxzopxDljEt633XoxNXMW5MjGZ8TJahwbGjizr9VotBW3b9m3btu3pdBKIdV1fXV09vb4qyzKZxsrShV5COMvtIgKQ2jLplkbIcgh1v4iQCIui2G63q7JkCYV12vql7bsU2mqMcYP9XI3CoFI5EbVtm/PtM4H4YAwhDiFaCQfSXqd5KiLZtBOYmcrSTqR34xh9zqO2kK6EduknOTtGHEp0LHd9xsqzX83jxGBQ5Cbxx/mp8B5xcIag6ebsseVXmLxIU2cdXFKQMknufKZeXOlsJu+Z9gyYSUS7+PxFhS2fWD7seyCQv3SInAaAheKdIE80WchCaJtL89pXHKc/QUDnnPc+RmuMO5zaV69e7Y6np1frIGeFNlGXiGjSoDGmLFeI2Hfh7u6uaZrnz59jtjad5kAASRsU1KKgImIcLjYIAcB7bzAxPkr4FkJUrZlF8+MRyCJJUdgY47HrEdEVZVWsuq5793A06zpE4yNiL0ju+umzq+vru4cDoAVgkahQRCFEQjJyKddutndnuE1xEs6kMTHqp8+IwFN0Sg/ELI80/5Dv9ezKX3F+7HFLf/r3Pcgvj8htaaq5ZyMnpYu/EhmtDhkc0lvyn+T85CLqziA2e2bmJcsXMnvRjB2lz8mZljNJZtYDIx9w5iJeAiFfwuzbNJmzVwcnBrIln8w/z6TzGaufQRKmGz2bUj4NGaUWIsqrDky2HiOAuTjIY9cSJRAxp684RoieH0aYVeVkooAAwk3gXd8XJ1iVxVUXqzKuCleU5ZrN6tS5U8Nd1/b9O/Hrde18h62NINbaqlpdbepPP/mQGfb749u9/+blu4emuTn5Q8+fQhO31kldiIdgELHgWBN89uJJH+Kpa8OhaTsfIwR0ZIiCzKCqV+L/7znj8itHiekPRaYGxJxL/EFo5+g9w/zH5rbkCX+QS7zn21wqna30PaPlT+YIvHzAxEiEgsISmFGcQUSIXBtnQ8S2vSL4oxfbP/vkoz/68MUHN9viSeWcs4ZEgAorGkUZuWm6cr0Vkd3D/t27+8LC0+vNalV/+OGHhavKsgQyzeG02x32u2Pb+z4woSVrvA98Oq3LqlitrSu57wKDyqxjPZJRGBsL68OEJwguOBgSIiKP9VS870LsRYSEGWwc60mmS4MY9edd171+/RoAtMiKOiISg3LOQVH0vR37Hw6xNgNekRhyaW4hhN6j914YNXrWmEjkCxfLsiyKEhEJJ43v077onWTLVs3KGOM5Jrap8oxqs1rBRcZ+g+nf2Idcn8SxxaL67larlaof6r/iLJESRntEik0dz2LELHAjZBVxMLNfaEhFHlqi0yisTQ0hkioOY8P3GKPeSYdIot9kvKPRZJmAlkABAFr8BjJOogshd06DTMmNAAA+ag4hTUM/RITH0utpa4wxImc9QnlDQo/x+EA898xQXfcc8Xf2aoIQUQjenzcuVmXJXvV5kahhIFC4qqpWm80NAHgfvQ/GGM1kdFU9gjGIYIySqoz2fW/MEGmSEFV1MMWWruv6tm2apmma3kcNmW6Ph+vr6+vr67qui6IwBlvx6q9Lnlj1puYhvme6y45jH4ElhD4CC6Kx1lq7Xq/XgYc+kLrR+q8aCHIsXa1WaTt4qIQUAIY+MSLi/dBIUwS991mXEdGUKN2RSaRQzgcVUdLZ8B5+mjNfzEJ3EsbHMbn/jFWDrDYfFhEBLjP6pBC+XyaYofXsKwDgLATrPQdPAgUtkvdgwl7nP0n4BNmWE1EySi1/dfHOe5Zw8Vp+JXKB9V98Yz7/XNBceo3SyAkCMWuPkZb8/i0gMAgMiIQCggAECFFYRCKgtbZt2zdv3h6Px+dPbiAQc8qflpFlGCW5vu+ZGxGJQZRIBpKIqAGZqPEeaHwfxlkQgKAMG8QoMIaQjW8ZdjzxTTmXaxPkwS8BmgfvCBGA0HtvbQkIbdMdms6Qs7ZyVdVHLuqVYXv/rvnqd5///Bdf3O/2neeytFpcHzCOLbIv5xDOJJvlli0p4tG9m2opy5FloRPmjy1xAKbYhTikRM6muiTz/IEcnZYLzOcwm0y6mf/2Aso99lj2QkxeKZ5MLB3eSfmRzDyRLl7k8ev9nDHmm3J+3eU8rglIccqCaOyblHaKp0aEGUrkWDFaKIevdL8S0PIN5ayPfH4/zSR9lVY9w8OLm5J2Ybbjcpac0usmMCEiRMBplVFEzAXci2xniaX5V4w4M6SpsSqphYIQkVgYmG3nEbks7KpqnXEA9qouihJXq3VVHelw7Pv+XQ9s4ipSxRJjlBgIuCB6er2W8MHhcGq63+978A9NE3jX+81aInsDLUFxVTsywN6zD2tnXlytT/0Tb03cN23TM0SAPvdezjY9v5Nh++Uj4zHEG2InMiYw4xWPEe9jLGWGGwlXL3KVfNj8+Rku5etNBJX+zGeSI+3F8fNVPPZn/urhX0QApIKsoShRgi8NUNdtHf3440//sz/67NOn2ysHqxLRIkIU72MIMUTvfetD23kgd2y6/X7/7mEXQrjebp88ubm52qxXW+U2TdPudvv98dR0bdOHPiBZU5Zl34bT6STObawjY6qqijIciBq9plff94BmiJqbUEfOhQblyhCxxNEH1Q+RacOaWQAi+8ieJeh/IfaRPSC7wgBA79s3b791hWEJn37yvdVqlfSlzBMy5P0yM8pYnDkCR4BRabHWWkchBGE8Ho8AEEIQicGz9965HhHLuqQxODZxQhlTVFQGgNEDRkQaVyRjLRz9lXNOi4LOdN0YI8SzEA8A6hVUx6A6LSeNCmBQxbuuSxppHLMoZ0x4YE3nYgSTmG3VSVKzB8rlyLHGScrhkrE0BjNrvmLShLUlHWTZ2vqn5ozhqBundJi0QWmqiCg0SSIYHkAEAFtAqmIyO2hSRuT00JnQWlpWjKnYzFlU1h9SFmyYFELr7Ha1ldh3hkCecGhvb9+0TVOva++DMVDX9dY65rHvhjGI2Pd914eqWltLiFJv1l0faOyTxFmnkK7rc9aR9pfG0Ji6rldVtdlsuq7rfXs6nQ6Hw7t37w6Hw36/V7Vwu12v1+sYi6IoTqeTxlTHOERvDrXlByvNAMAQmAgQsOva42nfNSfgYAy1lVYtKoqy0BhXZhbBrjsm5TBZMSRLkRjfSKrcuXpoiELCiCgILFEQ0BCIIIIwC7IgoAGS0T26PErTDj3GQ2eMOOXIzUyVIiLwyCGdjTYRJh7lxXOenvP9XM5IR9Rk2MV18eYMCCogzs6YGWTy+Rgzqe2eZjU7CC8exss/lwfbxQnnz89u5tCAbFtTyNns3zhWi07PJLZ7cdh8SheP6jkKobbKARJA0rAuBqC+78fKzjZEvr17OOxPzEOIthqWAM4Vtw6HAzPHyCJIRM6Wm83G2sLasfgVq4XmbJiYwUc/++ARkZVRik4SRaSua2AZIzcElI0iudIwBxyqi47yCsP+2BZlbUwpaKwxpigRiBl2h/brX/7+5z/7/Bc///zubn/79l6wuLmpT+0JAIjQgpFzKYsI2Ua/H0lwtC3N7uil+5j/FhERhMYo8RxLZyPk4+RWqNlW5jpnOmli1ifq/N7HKXGJSEtGlJ0ul0tW5Bh+EVb5i85oKRMUzd+Szzzhz4wGdTTO8pOXk8nXcpF48z2SURPTD+NX78OEGfFilrQ8g9gSo8b7y0lNoJdmNYPJ7L2PMajZnfTbBLd8wIRRomeVnA+UcSEMMrG1GRoc+MkGr0fuTCCbrTq/GJa9DM+lR5m0+TAyC/kegJyluigrW5bG1xVa6+p6va5XhbsngF0HpuVtG6sKLMUYeozsCEtn7POrU/fxsWvjm9tdI82ha6J/FaprsVgwFT74g0MxKEhYGfNiW4N9YcqKzC3DYdf7PvRBqscQO19j9uejCuHs+RGfJ6fqco8uovHsyoG8fNFFEn5sbhf/TJNZvlQ9JxfpOr+T0/jyZj49mR6dSCshAjREBg0RRPK96doVyfeeX//xJy9+9MmLT59vVg4dhapyHqKwcBDv+6bpT6fueGjatjfl6s3b29dv30TfP7tZP3/67MmTJ9fbtRCJyKnt7u93d/tD27Yxiu9DL2itLaoVhVPf9nw8Mtmu76/Xm2KMkTPG9H2vVTr6vh/KKg75b2foZctkcy6acubDGk6j0U4Ck1oyMCozekf1ihhj27avXr1qmqZt+mfPnj19+lSnxMxKXBp2OKgoPJb6I2GCdKYTkQGDekwNWhz3/dChru89APjoU/ynqme6LUluSVlLg4+xGFTitIqEJ8k6lmCisrqWDx3DDgeFUD2fiV2nCAvO8vTUgTPkSWY5+bmRncbkPYVAsuu1bZsQ1YwXagV2Ihkb2CZPg8nKCsgYWq9sM8fzhMapHA7m2mY22hnhaThEkitPAxZHVQZ4EVw6IyWYnhqzYyJtRNr39OrhgfGRtGsCgoU7Ho8o4e//6Z/+1//V//7J9frf/tv/5d/923/75Vdf7na7/X7ftm3LLQA4W5Zlvd1unSt97AHQGCNAQlAUBRmXKdogWbWhXH+GUXdNqXPW2sJaxTdXmLqut9ttdzo2TXN/f39/f//kyZNnz5589MGnRWHrut5sNsfj8Xg89r7t+94YgygJhmcmI+BcaQhC1zVNc9jtgINz1veFMWitNc6q772u67Is1usyhRCrR0RDPY/Ho2R1U9NCjDFt2zOzVsFRrCtLxxy0A6RkvY6NRZuDIOeGqQxOziWZGRfGP/03kRZkBwwzOzIJALnyiTjvmDsMO+Xz7zk28tNlMkL2bY6gOV7ClOPDFCnzDwIC0yX/wXMxgTRRxVKcSnOIi+T+5YTTsLKQepdrnz2QiwuztV8cLWcES+vs8ppBdTl/zqKF9U8CEUCAIZEZAFg4hKCNNYkKAbi7u7u9u0+xEETWGArBt23bdZ1A3O+OVVVtNtu6XltrhQc/XjLU5TwLOS9yQACAMum8NwMFInZNm+4QJb0SY+yYmSVooRoGjMwRcHv1nCMKOOuKyHT3bv/lV998883Lv/3FV69fv727e2ibvq5XrrhGPL69fdhsKjZAyADIIMIAEpkB4dFZLWFOWU7X7JF8hPNKZUgZmyF//qLlh/xJyJAHLmHp8vmLf2Z3/gA+z9548XWP0ZGIIE1mm54XEZgaLxP/eWxFS9rJ79A0LCd/jLLArYuLTYPkaky+rnwyytNTX8GcAJWtJwaLWZCnMv10Fp7txzhPa8Qs0mG26USUDBb5dsyOidkW5CPMGPISMikHRgadcFh4yk2C7F3vOR3yt8944AXgX8RPAABgEAQIgIzQAkGI9tRVtN8U1cpV69rXVVWW5Xq9ruu6PBxuW344RGsOIBJX1iHWtlhVdeEKt64++uDJQ3NsIbZv7nYt+I6/4NMLslL10ZgKQw3hel1fr1fI4aosnXMkGENgH4z3hyAPJsFgspw5bs/X8ei+zKAEMEf+2Za9Z6OXk1k+nGTQS1O9bAibPfb+TZ/Zo5eD58ggo4B+8XmYQlXfG7BiFiExIIYDxlByqB1/7/rq7//wsz/66Pm6oBXIqnTGWoYYtdV7EA4xxhh8ZEAhvL17+Pb1m93u9Ozp+sWLD54+fbKuK2fp0PYxxv1+//bdu4f9oQvS+ND33pQbMrYoirqWJkqI8XDYNW1bkKnXQ0yjplS1ba+uCR77mGksnK7COQvnvR672Msk5IElxBgBmAWEhsornKJvsmgIvaMw77ru7u6ubfr7+/umaZ4+fbper1WWGFIHUwmiOJZ/EwKB5CxKLAth6EwYAlsbfK8yrjBz27ZaG0MFBtUJE5fTLEEZM+hCCHaMmkv3JSt1kQRoGGsolnZwyCSQquaZ3Ia6ZPVGqgaohRyVJ+sqEhPOUW7An+ATallrgdCQEZHdYa/6p3POEAJhFAYBjEPn8TBtW69lhHRk9R/qmN73+hiPnlIai5nJqPTmTDuJo4kwtZVCKiKIOJyIw+ESz01HaBr7+l24cYqlzI+MnJs559THqU5mxTEE6Pu+KJ1Eefny5V//9V//p/+r/+Qv/uIv/ul/89/c7+7fvHnz+eef/+ynP//888/fvH4dPBtXVFUFY5UpJCI06g937lxRM6E0AOaK/YDVaI0xPnQKRuecxJjsTXVdV1XFddV13eFwOB6P+/3+cNjtH043N1fPnz+/urrS+kBd32jQKXMY5dJzNf5NVZVlKRza4yl4bpoOOHjvg/fqUQQaSowqHq7WFoSISP9M7S6rqsrRMq0lxmiMZ0ZjXNpxLRJJRFrgI7dNWHlEK8hxJYcgTc/v9Ntk/klbewb3yEzzjT+jw5z/Dh7CNPJ78Gz2TOLa+c35WbIQ+PAPBW4laOQEkEg9Hy1JYHFa8kEekZuXMJitKCeb94Pi4hKWz8ymvZxMkmhlDM7EVPx3ISWISHJ/L6WB/KVm7JNzeVZjbEDbttfrmojevXv37bff9n1PliQTvoeZELx48YKIrHU0psmr7BjH3kEXV5cvYZDgM/cAo8aOEiI2x5OeB9ZaRAM8+Hj77iQiWksTUOtHE4gJHrtO9sfj29uXv/3q1Rdf/Pb3X796uN8fQ41gNpvPyio0x33bHa2pb64tYBDuBSJzGEuuIi7q66YLsuiFHNkubgrAvD/B7BnIMHa5Xwn3cJoQn2aCCwXssUFmP7yEzJNh8yld/KFGhLwH25esZig8MpXtYowEl0BHl+PDZ288T/hSYHZiEfkOPob8FwG4fG8+yNJSkx7LyxXkS5ipXhP4XOIzOHU2wpTAZ5bdDPGmwBmLxywfm+FtvuP5GYFjg5i0IoGolD48OT6TZivnk36uAyCiVnVTN2O6osjSSajPsLJDwggUAJoIpvGlHG/KVUV2XVSGyBizqVfXm/XpdKDGHNs9wKkgInYlwrowDqGw1hbl1Xb16ccf9CQtcvf24dTDmwjdqfUPD8fQXFt4WlDpaF1aZ2xhDIl55mxfr/E6Fh7v4v6A53M2J5wlIi0pbr6+BVbnuz/bMplqgxe5Rz6HfDtgQRT5b3N8eGzOOVbkE4AFBUl25feXucfL+V8ceUbXHorAnpAREGLvpH+6qj7bXv/ZJ598/9n1tQEIbSWrwpke4rH34pvYR+99CNB57nvfdV3Xhm+/fd334fp6/cknn3300QfbVQHg27Y9nWLbtrv94XQ6HY/HY+sDmEhmtapVsamqaoum8eHU993pdE9GWwWq1Lher50rtUte0/aqrhCdW/4Q1ZiViRLVx/gssKW1M0dVfQHOTjwY9ajctJQ2V2vMhBDatj0cDioW12U5VqZH1FAjHONILcYgM+gDDEYMYwyRNcYVTrVQjjEGDvpSVcPU96KqFGWt/NJ+JV8KjYU941g2X0PvUq6XMaYoiu1qo17BPB8Sx/NXNbFc5j4ej0n7Sj3lkwLGWRbfAC45c7PcD4bjaZ4Obn0As54ZuV9RczIVZrr1+pNc99bdzH2JOJYCkVG647E7wDA9BMq4CiKSMTaTN8jayJMs8VQoNSUBpgnT2IdQRtkyBcRkDOGsmyGic87LubuGajgWyXNEEuDwu9/97pe/+On/41/4m5urTz7++B/8Z//g+9///j/+x//4//Df/h+7rvv9V19//vnn37z69vbt/X5/7HxkUT3QkSucLeNo+8htAaoQ5vwwZ1zzHUFEOn/WzNKu6+7v7x8e7l6/fr3fPyjy39zcrNfr1boCgNPppK0+Qwha21DHrOrCGGyOJ43/jDGiMDOMObeqso0sWsgWQZej+KnFFNX9jlqFdSybpKPFGKuqGj+Lpg4iojafEBmqOcIoWp+rhuRXQinMxBr9AY2elhknVaUz0UD6NsY4uuInlmzvffr9/DzLsmvSr5g5N3bOTo588zhrtIJT4ZIuhbRdXM55zEyaTBizPNsyhnuOVkrKFU9CoeZ+14tXemz20kc8QhMBCx8XBXJEf8993euUkJqvNJ8eXjqeZ++d/UlENESEZLGOgBrqmbr37HaHu7u7PnBlSGTwE2rVsvV6XZQWRHmcIOIgug052We0oSGSHBGH7reIOBSpT5sYJxNW+RMA1uu1rqXve+DzeUAG1GqhzwKAhpN9+eWXv/rN7372s9988/KuOQUfSJiAbFk+OR6Ph70XiVGsdWuipmlP9daZEENUiIFA1DrCaS546aLMKZTOD1gIQ2m2OXdDREDOz54ltuRYCpnoT1lYSHr7EhW/C1bPHsvnv0Sn5a9ybPwul053ifYzNE2DXxx39vPZBPRbnobHpK9mT+YejPTSyWynWh9kvCufap5DmIZi5tTvYUaSKYl84jnPXpfPNmdouLD+5jSe2NSMv+X8MJ/kGc7jK2YsbvZkDp8li0PEyHF2YMu519Zc+z2DGhmmMQKLLc9CeRFAAMiwEQjQMjSdvz8cN8Ye61VdFNWqVK/CarWq6sJ3Xe+7PsYYrfchdH0oO+8LU1ZlVT57dt0SPnTdu+bQcmzJvOt7/+5d7Et7s7kqqj6E0+F4s9mg94RSIT5frVGsZarBfNlwzKpAJzCO9ceXmtLlNc45w+OnxkXEnkNrahbJ9262fbNtTQ8kQwYs6P3988epUjcLAkxP5tQ0W0hO2ul+Lsbkb2GyAIAGjGUjoQT34tnVn3z84Wc3V1cGy+iNpYLQt10LMYKI923Xno6t99J53u+7dw+nh0O32x1W6/UHH3388ccfP3lyRdz6tvV903VDiz9F47ZlMezWpRYzBICiKNa2gK5vQ4jR7/d7BmJmVQLLsiyKqqoqZt7tj7vdTr2Fae1d16mhk8ZIUWaOPLihZDw7EmbFUWOZMUCtB46IMWuGJCLOlvv9/nQ6nU4nrfF2c3VVlm673cLoZcJUaEQCR9CqAYNPLwbNITRD4zVnrbFGz3cJIQiOtRrHICAeC6ioVKlCy3BWGxN9H8fq5fnBx+MFo8qk0FutVlpFRi8aW8/pr1SZTGpkApeM6Yip4qhM9T0ztgpEc+5pDKNmEkJQ+EDG2Gd4npw/Oa9L4ysoQgjqAdOEw5wKFCVQVaNRSDbGaPbjWR4Y35z/PL0xxmjJAJ4rVaZVF0URo0+UwmcX4pm9G2M0HjY/I0QweS/TShMVD4QgpnS267q6tNfX1yDh9avbu7vbd7e3/+bf/Zu6rq+vrz/68OOf/OQn/+Dv/6f/8B/+w//i+ub/9f/8f798+S3Du96zABljXFEYshx8DsyRuZ0zXYesxTBM2BUmnWKYMr9G6BnCpBY+efKkLF1z7Pu+ffPmzeFwePbs2QcffHB1vdEQEoAUdH32Q0b2zDG1srTWOuOcMzF4ZvY+MkREtKZwzhlret/AmLmqc7amUO1OQ51VUVQ1Lwl7IYTj8dj3vaZXJEQVEWMGYtSbNlFUKnfLWZsUyRQYvSlZ26v8sE/UngSXAU0JiQgQU4w6EQlzUZYz6WRk6GzIJBSccu2zoJAd7aqx6Kr0K+XsxKJRYer0IUQSQOB4lvrTezObpSQhWOUwQwlBlXKYOYReaWbUl1hRCnDQfhNmw1QKnB08OIbmLsWjXNqeHqU0G2cmR+UWOySJISCYFDCZYK7EmY0DiOn804ao6gjSnR3MTtOXqdIIcEkCSEYpyLwEiAgSWXtQoNEZigiKIEZHYoib48O6rmIof/nbl6/enX7yg5X33lk8Hh+211uHLCKRoSwLDBK0Phh3BBFRrBWBFYAACCGBEEcgIQEw5AZVUdKBhCJCbSEQrUVbkSuAwffchNB7YQESJgZn7MqauvfQNT3X1yFEACJbxUAvXz78/Ke//c1vvv38i6+DN72vg5R64kTsOQbT3wP4zgdjDBr2fQsm2LLwPRtTF9b67hhHMyczG3uu7pWr5UuPXDoMMjRAHEyAItPze+TRrC2eEsuGqcA0Q07I2gNkZDhRQmC0GSvrsXRGquEnkRkvK36M59xjmFw8ojEhppBzEYlE5/MynYiYFfLOqWY4k8aidmnyg4Yj5/nkNAigGJQGmtTpZmaWYbGCJOGcO22yms6J4pR7ZOPLDHoJ/tkshlLUIhKFBQFwKHYi439n3qvUK8O3UZhByBo90TWytHAFM4uyRi3OMq4SshJwyU4smVMFsxJwwwM4mEL0AyLK2FQWEQGHwkIwZQtpsTiKVvkdHAs/EOWtFNVEMlTcjjFyBBzzZ3QvrLMaKw4Z8xmASGfR5Ly5Wf2d9xgVUvPDIqwZQiSIHAKwsRAY9gHum35bhbf7BoSfwWa7sc832Ox8uXlz6mG3g3B7auXamw+g3faONiA3pqlX/cbIp2tyH39UNfjr0zc/hwYJPMB9wxZQfA28lcoGwXVlNlf1qjYh+Ku1PTk+GP/pXff2zUMXhQECEBZlCIJkwZYiIhL1kENgRDGA4ewGTzurMRekPboA2Do91LwxZB30fatHXQwMhqyzzGAMsQhJRGXi6mAXCnGMNBu2WNHYjD4Zx8wxCIsIoTEmxBMiEtpk/db56Kmdeqwnu16MF3RCnFZEX6IZLFgKjR7LZOEaNSLGqaShV4xRGW8SnfVmWB+haZ+g2x67K+H/5LOP/uSTj6+e1rEIDzXtjWDsV9jdSL3uvN0fbyu36+T2GE899tHdPrS//+bhuO+fbN1HHz7/4SdPr1co/ggWA8i+9d82Nnj39t39/bsdoikLB65aba8jlFhtPLlILgKjw5vrsiw67pvQNvdtU5b1arMG5sJVGmZ2fXVVVZUmWWizvT740WjfawCk1rsHUSHVMwcfOnWghRBC1xvjPJ+TrGCMwSHtCCyCRHbkdYToxaPDKPHd/e3htL+9e/vhBx/c3Nw0TbPZbFZVrX0dLGpSViWFMcZZW8QY+75vWxL2QKC1UryPdV0XpYkxhhALgz2DMQYZiSeCEAuEGCQIjlUxiSaYycwpwU+v1J9QPxdFoargiHhRjcjKwMuyTGU8UuKWiFAUAyBRQh8MC5FhhND3RWkBZJCpMLKEqL6lUCWk1QkoKmo5mYHLjYquiMSu0flwVgHF+xZHVRyzNkjM3HXGjBVoFA4KJe+9hjvpOKO+bYwrAEAGZYUlSyUVEa3dQKSJ1AzCPnDkAQI6Hx5bVua8Nc0ZKRIRoFpHNHMHmYfsnsTG9SfGGOFCrOljiH2MkYELQ0BsxYt1LnSRHCADgbHOQGQUDn339vWb2zdvf/bTn/6L/9v/fb3efvLp9548ebra3Fxd3bRdzyCFqyJg0+w5UghBHQkklqOGccaiKCCy6gtFaavCDEe9sSKCY1BuCEFiRLQCJkRgMmScGtrRYFlbhFaMC01z7EK4vT95f3O82WxW63VdGFuU1hIZohCC+J6ZCXi/f7i/vW2bk7MkVRHZh+CZ2ToyjmKkGGMMfQw9AERuQNX1YkjKbdtWDmJNUdQrczwJozFus9nWdW2oKgwU1q4qu643PnTedyF45lJD2L3v1LrBDAQBJc6lqOR2GM/URc/3TBiVzC6o7sH8SNa425AVL8l5N2TCQboGIoGzFWfEy4mHEHNLj5ylRbwUSzZ7C2bSJSxkiHQ/F1PSn/nDs58PI2eBssvFzv7NVw0L4Wn2zGPzHz8kARrTQphZxpJTInObPU37Os5mO3vFxT/TUDK1MStwkoliBkyi5HIEREpMRIRDCCjS92FVoYjc39/f7R4APgBgDcRQ1qw4ud8dCYzKFs4aAAYUAO7HfmX6b1LErSVgjDFqsRkAIEBEs75aAzKzD9Kdmj5AjwasLWII1tUCpu+gadlQLMv1+urqEPvA3bev33zxxW9+9Ysvf/f7N7t9iN4JlBxt1AB8A4ACkQGFmeu6Nga974+nvbYDKqvN4bDLWWHCvbQROSY/hgxwCaNU7FniPBEJTPQimOJ//u9s2ByvJgh/6dsZGSIi4Bx5xl8N4ZwLBJvYF3IEm+LSOYRhSfgJFWG6wASNWRLzbHWPQRsRCUdRI+urJmOg+Ay8032R1IhpNrKMOthjX+UScD4gTAlwormNBr5zAt7URpBAnW7mdoecpdAYrpyAkC/wIls7w3867RyS+W6ONyejyRirMsyZJ/aIcRUTvp0t89GEt7/LlfFwARQCYEY4dv2x67vgu2g73xdBrLXPbp68AOyOd0eEnmXXtvTwYBwWZR1CY6ko7aosKrtxRKveAxnz+vUv+xNwB533u/6ALTuIhbkOMQQsDLuVK5+sVzUQ2jJ6/2WzJ2uQA8KAhwOGv0e7XVw4KkIKJRitGEQIgOpfQWOJmAfYRhGCGFj3CARYM5xYxI0wz/EzamkTxKh6PchgKxnQZqx1KaOCOiJe8mWdHQgXN3FGDumZnB5zAsnTujA7yjnzBMICk5fYW3H0fecK+ODZzfeur3740bNnz67LUsgxGInc+557ifteoPXt4XQb692+fftwfDj4+/vTt28OIcCz6/IH3/vk049ePL2+KRyJaG/0yCzRh+Px2LYtABgkW5RutSnrWsoKgUIIYg0RFQbZkAiKgRAGVUEVmM54Rri6unKucEVR13XX9dpc2/o+9EN42JiErOZy0Z83TdO0jSrDiGitTVV/FbCp8V3eajXfl0SwKOC9v7u7a5vm1atXH3/w4Xa7fXJ9s16vi6IwxdBGr1wVqXkgjtl3xpi2bZVxJRVO57Ba1XEsDZriJ40xkvU3h1E4UUvWjPPrFcfaG+cJIyJi6ueWB1KKyJs3b2TMn8zdA9IHPOf4MRhSf0bwrEXqEFGE1C9CRCEzDsooNscxOW1Jp745zRAycfikEKp2R1lAEGd5nkmkpyzMR8YGjAyZKAtLZw+HrEs7MzuwLOeg2dxvATDv+JLAiGAYz25kGcPumFkJPKNZBk1cRQQwiAzIApEZENE5U1Uu+lVZljF6z7EoipmJ53Q6/e53vwMwZKsnTzeuKDvfxyDqBlfX8hDQGzV/NRnECfGc3YdoRFiMSborZM7ztB3pHNXFlquaEWBMFm3aFh4euq5p29oYUzrjjCFE5iAhxhiFw/39u7dv3z7s7ruuYQ5kwA5e1vO+J9BZVzMzc0jdX4iI0HrvwfTWFMygwcUP9/sY4812peVpjUHnXFlVm81mtEtH9TSmRix930/QaHb8z1ae4J6zyPxPHAMm05PW2jgNfIJMzUvHQw5ctVcl3j1FuAnfgSkzwkw0zO+kzwmyszcmXEwzzOGQ7GEDyo9Ly2MGzhQ+LZwzmzMuBCbILJf5YzgNDc2PJcqKZMD5XDxbwXNXhlxsOnRJ4nzPhGcbnU8sLXy5rhw++VuIZlK+pvQM0rAlo1guALe3t69fv2Z+rmzbuKEsmLUFa9oEIKIlwUFO5CiChsxQPVLSSwMAkJA2D+SxrjojIsaH5mSGKAYQawgrRg5ihNypA2Z0dr293sZAt3f7d+9e/+u/+ZvXr19/8/Wr3e7AQoRViLb1XDiKCCwsABijgGfxMfrCUNedRIQMbLfrJ0+2Xd8ej/vr6+sQ+qaZqPFLN9oSJ3EqsuT4vNy7xdZPHs43KD8t8rfMVPqLv5KsNgNcUmVzrW82/+wkyAW7M97m7jU1uM6ofonhMDXZ4CWNhYiA8zf+gYs5NzSMTnIAM4aIZxL2ZBdm00vTmEmuZ53ngmIDsFhguvK9wFEhXFJ0HKvwzd6LOCHtx94yW9TMsjtwv+yN6SYviv0s8WS65HmLVxjPWgCQMdhmqg/MVc3ZnznOfMftThNjiIyMIAYQRA1MyCKnPu7a9qHv6uiqrreG68I8ffLko2j2dXNPbROhOx4eOt9Kz3L1tOKSwta5ypRl5WiN7fNrRHiDzevXb++ahgMcoY0hWGIgvloZ70IRSgu8dqYuVtz5Y1nUdW2thT4QEZCJWqZrXqBtuBjhkYjROXwSk4+hFwRmAO4FR9+dCPPQeo5UYxdBEu3KhIPDeIYAIKKNB4TIDl3HY7QOAEAgghjtBCkSAWikr3mw+qwErCxyLqZ844IhBjJuMBsq/3eJ8BdRpW6aDcIH29Uffe/jH37w7MPteluTkUAmxuibNsRTdxLpKfZ9357abzrz9cvbV6/fdC10LRx7eFrBRx88/9EPf/DserOpCt+f2t4ngSwEc9ztu6a1xlRlVa831fYGy4KLipmbpqESTVGIACEUxqJZqXNJPbEd+x5DBCGyVcVlVTlXWOvqul6v1z6G3f2emUPoR8VGiEhQUhUKM9YdDUGYfVFUEIc0PBkVwhywCVaSKdgpvky7FgJAd2o2m83D06dPrm+ur6/X63VpHSIKGSIqy1INvho4B2MmXt/3+opkXC7M4AErimKsQI6IqF3sdRUwHhzJvKWPxTGvT8bqLDIGvqXkw9QvLjElZXc6sVwL0hkWOEaCAEeOZsxnJnsO6ffseVTefBh63yUtTgXmpmlmuDrAc2z5mPxpOodU3YRI0/BQ5JzklZyZNF7WWg03TZxT3xv4LDeSOefLJTk26cC6cB96wCGtMYGCiJxzg7FoZrflKIAw2ndG4f/cyRBg0uOhC51ABIxACEZVQQaJDGyCNWSYUYSjBB86EXGORAbLRdc1RVk/f/7B1fYGjVP0I2uFsfWdptEdj/2goQwpA2QMZYmXzhhEMCNsrZizfjTcRMSxEMu4omHyxhj1xIpICCYKA3Df9ywhRm+t7Z0pNK6QQwhBQuTY7Xa7h4eH4+koEhEFEJSrayEIHk0yaZuGg1WEOYoAIBlruq7DELS2IgCK9BzVn+kRAQGMwbJ0VV045wDYOVsNYdG1IsygEKoTOSfmhCuJ86Y/dfdgIYbKaODBaRLdLNI61x5nClXCFRjPsPSrTNqYhHQO77VFTsAz/r4UBJd8Hyf26YnTAzIzuSxU4tkgj42/vJlLQssZzmYimZY7e0v+VWbYOKsKg0doCHuepFZenGT+ltkzueKaP5b0z3Tpn2qthCl3gKnXF0D0JSpZMAMajDEIgrV2t9u/fv3axz9hiCGidYbZGyKDFDg4WyIbdYLKAAkLyEar5CXpREQ1wBgbXQXZJNCzAKyv677vu77TLtJERR+47X292hb1CrBsm/jlr1///Gef/+1Pf/G733390DIzR2FEa8ghGmYSAjBWBALEGDvAgBQAopgYGaqqMga9933fRiaRaK1dr+umIS02nQCCY2R1jgMXcfiisLK8cskpwT+/v0S8i59nA+bKT/7YxTEv3hnw5NLN5Z8yrYmSeBFMJblEDmliCSWXOC+jlfI90z4/C+eeWgBngxUD0Pij2Stm8MmpA6al3nILy9JclZ9G8Aht5iRGY9JLWppGauQ8EzONFMf6Q3IOxLjArxJLN1k/sYuguzjVfGdzPLykRV9Q3hIHJjr7MMebF3LLvzuBzH64nGrEgCAAjMIEQEjEICAdx10Id21Xd65w4MgXptzW5ZNi83x9c7v+9niSYwzMMTxAiE28rotYrJGcSMUM5Grjtxv6yYefVZ5k//U+BAHoRd60p+Pb9rOPn4GYWqLpughSd4FPJ2pbMpaH+OF5gVZEBEA1rsEMqIt9yZepn621dV1vN080EskHFhEGjDGCYNd1MUbRNt8sjIEEUSBwbvif8C7dGhFhVmSOInHIrTi/dzKNNOeMWGS2IzJV3mZ3cgTGhSUXLtFOfvMiW4MMz7cn/9EHNz/89KMfffzs6bZ2FJ1BEwV6z33Pxz62oQcIIKe+P7bxV4fjt2+Ot28BBWoH1yv45MMPvvfpJ8+ur7Z1YUj6GH3rW982bde10bfheDyyxE29Xq82681Nsdl4JC4KZu5b79CjKSILYhRBIg3FKhObE0EQPhwOfeeLrjdGGYCztnBFhUJh6HPdhhA0nQlRc8biUFHTDCni7AMRdcF779Vlp+7BQa+YUr1eAzJEliSQAADA4XDouq5pmoe7+6urq5ubm+12W1VVHVlzn5I4oVumiY5xbAmgvf6Y2fvOGGPtUEZS/SRhbMWWFNHk/VOX40y9yWWhhEKq65qRrFThSWHzcWwskTRnXd3N1ZV+e05IMyTCNKYoRxDEFOIuKIACBskgESABAqBBij7kjr44XgnCMz8Ka77ZWGQVRqE6lRvNnYQAoKGw6UxJuqWP53wxGCNfYHpg5ZbEel1pE5MEH30FM89CRvVf7+dGRkRDhPlJh5mH1pghgwiYWSJEBmAGIZAQ+9ixgD21Td/3MQSAIfFHUMu1uhcvPvzx3/vTFy8+eP3mtm37t+9uEQlIsydi4NilXGtAGXKLhlNcsz4AhUGEmUCICM/+w8lhlwsnWikooa4xxpWFcSaEEKMHZBEMHC0SudI6hzIENgph7GOMUYC1eKFIjOyj1v7NFEKDAzqxRN0vMiZtsbWWGUQ0GhaIDIIhImsLEdYsGxbR+sMiEtkjymq12qzW2mDT2qIsVmWxsmlhubk33ZmdrIpDSfiQ7MrzxPTzQDmXRISL3Hb5Z3pRwpwZpooIyfyCxaGejywLc7JkElK+2fm/s/v5uXVxRfD4iTJbI2dxWWl1ynPTT2ZbkENGRiE4MYX85Bs8QlkY/SUZYj7biyfiBTLIPiy3eJo3lYPoPPhoI2AACEqohMEP6ci9999++7pt+tW6Yu4REYVUlWPPCAwMIASACGRosFVH6bP5sAgPUYNnoQF5EJxERO4Pe1fVdr2WwH1gZOvq9c129ebdwzfffPOLX/7ml5//9puv3+72J99HAIp2W5ZF5VwI4XA4tG0LQtYWPrYsqH0oiBiNIAkiYKfGdYzsjTHOGWaJEQ+HQ9ud2u6UQjLU3jEaHCFtfWLiM3KY7chFlEsUMTypxfEuIUB6JkfRHMFwKvOl8ZefL165Y+r87yXZXTIxcTY+LjyEOaCWK0JEeMRFokgBl1D6wrMZQGavi1mvzuVallOVxUsTyef7mwQaxInTON+O9Ir8yq17eoemfURzQBGR78MMAhcXcnFROWnPcAMex8zlTslUoEeciAuzN+bRK4h4se/lEuDf/ZqtnSmSAAobAUIkRgCICGx4F2LZtmVjKweVsZvSgLgnZfXh9c3bY/8u7EMfI5kdR3noHAfqXMniWJgZS4cIaxc/Xl/Z5wGa+Lv4Ztf1ASVyPDa9OxyhNivvTd/3fWihafdNOLVNb+Ig/QInUweiJGkMWWAeQCoyv0dEOPJARfCyLDebzWefPK/rOgbpggcgH0LXeRjLacQYgUUgQmQ9aE5ZyFzXdSFEEY1YRwDQLjoiEYSMQWMsQJQhd5dBzh2pRk9goo45OqWF4EInXO718oH3UM3ssfRtPk5CuScEf/Lh8x988uKDJ9u6QMuhcCTeN/tj9Cy9CBd94Nu+vT2c7k6nX94Ke4gWnUhR2hc319/79MPvf/zhpnKlQY6BmX2MXc9NF06N3+8Ovu+dsavVarvd1qs1FkUfIpJlyIKqWHBoDxABRmF0rPahHXS9930YVBfnXOEqNOTsYNA3Q/1wAyCEaAx677zvAMAVxjmHiAZwt9uhH+o9qg9BRNRkAJe2QG1PErOQRRxk3xDCfr8/Ho8PDw93d3dPnjzZbrfXT55qNRetlJiUnBCCcda4Ic0PtBNDjEPWGjNApWX3iUh1OT0i0nGZ7BSQ8YRx1UMRnYQYOkIIoXJl+mG6KSIaxIvTExkAQt8PtnIEY0xVVUaAiLquD8wKEK1WqkweHKR6AamwR4xxtVrR2C8eRu2OmeOowCQzn1ZWzxVCyM4RY+ZthPRK3lFmNmNVUhibbSSwqNDOzNrAI6Xyqg8QETfl2tjhoFHgKFi6rssLiqYJJIfT6Kgs1EE6OgyGoOW0QUXl0IO2vSACpEjqXRQOIhhiMACgKfqqxErXdeTjel3Vm+3VzQ0a2u33xll/PD0cDiKgmB9jqh1A41mpUBVEHVBZEDFHUA+iiTjykzO2LCQfyE86Q4Rg4iDDBGGOwTIDcFFY9WwDi0fWWhG+Sa04ojJkZpYQNP4ibRmMVSEgsjFoSDU+5UsG0TiHUVBL6nJkjr2WCOmjL8vS2tHri4wCIkBk2iZ07U5EDJFGEJRlaTXeILG8pBQlCy5MA2f5UhEIxHOloDydd/ZkUkiSVzoXQYY5kAhMyDg9lkcbp8FzQWS+T5cUsMhxNjcZrSD5imYUtSSwfF3Tt1xIL5y9K79mApBkVRBmC8kpbQHbQSbLtfpBULjkYIRRrZ0dnPmwF0/c5ZJzIxZOr3yNCbvyRY0gGuXXkZd5H50hY8y3r1+/e3jY3lxLP75FiEOI0RtEBGeG/oBWJKL6ArHXxwDGukCIiEBko/qlWZtGqK8R6/Xq2La+Da5cra+uDqf+p3/7m1/+8stfffG7N28fXr956L04V1m3pQI9x8BVs+9jbJxB56rt1TrG6PvYe09ExqKxWr0oigQWv65WAIAom3pV16WI3L578/b2jeby6pEwFv5SZLgA55m2MNvH9KRMBWu4hG9LnEx4Apew+qI/HKY4/whOTl66xEb9GeHEJ/YH35UKguXfXoTM+dtLg+fzmU31sTmctT7gxOI4DgayJaBmp6P+m/IM852SLP8wMYqRas4QyMdJE5ixssRPcqCFaVPZfL8GsSPLXUyQybGOpi1PLu74xa3Pn8/vGGPe8/D46rlaSItK94+FROpaZ8fBd7ww0wkFIqM4AUI0DAAkQAElkuxjpKYpnNQ2ru0qxkKCbIx7vll/cHP16tS9C01A6IH3Pt6ffMW8dq4qHRO62ohDMrRFdNdPKCKHGN68PXjvCYHgm/v7YKWsK2ZYAZYB4vF0OLW7BvoQgTCCCDOYQZiDGQmLCBDh2f4pmRadgISIulBEVA8hihTWsRXqyRQuRmldq92cVRu0jgozxNcZgl3jY4xN0+z3+/v7+8PhkMrljbswrWoLKMKjiQUAeOyPHNJkQHXIS3pdvr+P7V2GAOdnEnleZBQ5nuDi0EyfiehZTR/dbK9Lsy5osy6NxALgsJdj27HnLuApyLvW//7h+PXd/d3x+LYprUEntCnsxx+9+PGnH/7w4xdPt3XtLAk3EYRRxPggpybuTu3DwwMKV1W1KqvVamXLwgvEKBAjCjJDjOq9BQMowloOEREHbxOgDCXTQESieO3r4PvYUs8gzp4jfXAsGUKIRFgURV2X1loBre/Sxt73fe+DT83Wkj+AiPINmJw7fMZDEYnMImKtZRAc61I2Xds0zWq1ur27X6/XV1dXm82mruuyLHNOmAZBRHUJdt7341UURVFUWnZfXZfe+74/9zzAvPnZNEhNddq00arbtG0bOp8UwuQEY+amaVIwLWRCqfd9GrCqKhFg5ihnM5U6vJLmo6V8ErLpgEp6iefrVIcap0O/OMhzF1ORuZyoR3Dpcs8+Ur0oa4emcNCFBJbzclA4iw1WxNdhJ4EhcYwuyeIBUwTZ2WYxLNCKKDxINS7Minmm3uiJYCN45fpCwsiMIAAGODIjiHrtnCttUaHXsjrBGmetReuE8e7hvg/RumK12nQ+WmtZIArHzqtCKFABMCEZsmTQDJ40G2MUREEDJCIGgIWQAXGSKgLq8U7C1UBHmQLf9Z3KtMysZjMRiSLgfYzFgBdGiIkNIQsSsMTktQZkRDDWAp5ljIxZkdqK+77HOPRfIYqDd3EwDBmOkJTbEHqPwxEfo0etMElgrRoEh9TQU+N3+4aIrBoMcgqkMc44ucsTJcxkgoRqODW3JDXSOZeM6xkbwhzEk9MCkSXkrD5hNmosxCUDM07PwoRe+bl+/rzg+OktOaGmr9KuSFZTAeDCQaW/wuzzd5dClg8vfztyovn889/mi5r9Np8wLmLD0lBLuTbt6RLyszcuP8CiYrW6bETObAgR1UlorUVAItt5b0xRlvX9/f2rl29/8P0fCTph0WxAESmcM2TVmy/MKErrUSCSUVtKZNA3EZARQABiAQEKwCAEiAKEYPZHLoqNWPnyq29/+au//OWvfvubL7/+9s2DdRViCebKGtP1Ydf2RFQUVQhM6ExhmEOr5lJERCxK7WYRNQUWUdAgkimd0ULAvW/f3b29vb09HHZqHlNkSaeUkp5G20smCaUT4qIoM0OYEfLnnNJ86wUEx9iD2Vbmg+e/mmEUphLMl1Sv5VfDb/H8OcciRNCEz4Qh48If1TpgarafjnZByEMc/CDphxcBmN6SHOz5d489D9PDdeAAOcAvMYGLN2cePBzDnwJPPO35zy8y0hS6nybz/jmMXGWiRr6HceHUkEc0QaeLWvHFP1NEw/LbfPA0fxnkjzm/wkyLSF/NDoIZnjy2tNkyhxEwKDaSGBICoAAoiNEUAfwuxKJpNxafla730UepDV+tyo+fXd/2YR/47an3ntnQLrBrpT52RVkGgrqnsjJl6SqUsrL49CrIx2DN1/fvHtr2JNJ18PLtQ4iyu7m6sq4MEk/t4e5+z0XgSLaAyDGKNYaIARFTXdQcFECp1vlydQmFNSlIBaPCWmOoO532+4OxBTN3IVZVFfxwCq+kwJWz1hbOGGPElt57RNByWUoOMgiImmpltcOyiLAEjjJKUKDNP5Y4LKOVcIk2M974ng19DPFkKgAsz69ER0n+ybliURvriDlaMquyCm0TQjj1vgncdOHh1N+18fWh+f397s2xOQUAIIu4LutPXjz5+3/8gx9/9uFNbUtkS+J96Pu+9aHt47Hl+2N3t2uD56vt+mZ7pdqRELVN1/lQrlbWuRiHapkAFC0B89Ax+XbOAAEAAElEQVR4CYBZNPKNmeM5uea83hDYx9A2fUaDWimdrDG6R4iWmdu2u7u/vb+/75u2LMsIcjqdHh4ejsdjEhcxqxuPmekwKS0JpKKZpqNYaMdgxd1hf2obvLvfbreHw+H6+nq73WovexFR1186UFKan7JX9QqGEBA7RHTOlWUpoul5fddRKtYCo3SXUEW5SoqiTwEUQ36gPyuBkl0wNfCljKeicGf2YgiIGIBj1J4ZA6CYU7sImB5VKXdR42NH5yekiZVFnZ/j6bhBRHUC50sTEaTkQZ2c5prYptp4jvaDgS2z0GibhRgiIKIGYCEO/DgjtbGWj4wuW14CKierpCZksWMmj6wZFNHAIAiGDIIIMvcxRgQyBkAYmAFoEOcY1OtprTVFaa1FQw/7w25/qtYbtzsVVV1VlTUW0VhrVSGM7JJWbK015LQcqyrARKR504ltahRu2jLS0tULuyqMPtiu61TpAkNGtNY3apCtUq4xxplkbuhVvXfOaN2XyB4RDCBLmFg/AXEIHhv60AKfLQIxijEMZIkMocWhxYk2U7GjQqfJsaz9F9rOF06lBYNAHKUNXYxic6pL+8djmDKPBXbykz5HAhzNvSbri8Vj0q0WlUn7zZnvOFHUBbchn/nLjPVfwrOJWR0yEWe2qJxtLUdL3AGmhwdloZswP0ov6IQTorkkWs3upFDb9BbMdPI0VXjkyhlWvsDxJyIyFNVIg2SwmouY6d/8jYkwZiPk889xRu/nVcgG6lezkERmjgPw5ewEQHSuhBjI6klMrrRN179+8y5GBDHMQQQNChEUVamd4oMPEqNIFAKyoPJIZGYWiBKHWjMESOxZ0AA6IitAACQCgeW3X7/95puXP/vprz7/4svDvkdTsDjrnpItuz4ET7awtqyx4BCCZ3HWsMQYPXBPKMaRJaIhrJ5jjAzeWC6NrWrjnEGA42n/6tv94bDTBlOIWFXVaH2UGONo5TU43WnMdP7HMCHH/OzfVLj/3BIYAERVwqkpAefGjrkMNOPvkNW8muHtRQzRH19cCOKQ7ZmvIse6nOjGm4MpYYaueZW2fBydVE4mZ8Be0nou0NpU+RQRwPPcUuyNyNw/n7ORyZIz8KaXpsNVRnlU+ST3HU5Vtdl25O+Ssd9gHBrgDlI+Ec1C/WGMSnWuSJw88fnZds8mBhm3TLOaYU4aZKlg52CZYUXamgQxyqrX5NA7AwHnNqns2zn5vOdaYuB4UxQDEBg17gCBAcnZyNhJs+v9rrMHH1sGz7AtYF3Yj+3VMfD+1DTH5sRYuOrdsQmGqHC2i9zyjcANmdK5aoU+xm2J33vx1BgElO7Vq0MDpoLdHpp2dzj2V1VVAcXmtLvvTiVEQUsmskRgoxQ0Ttikz3TBd5ozdhEZgUl5sfnVqrIEp8Phm2++iTGGKDFKtV4BQAwCyHVRqoXLkjEWI1hmVg9Se2qiDwaJDGlwn7FYFna9rrWSQQjh7v7A2u9cRAQGIxiee9uMkzSI+ufl7VuaHvSK06qS6XNefzIfMFdd8q+WRKqfO0Mtci8MaETwdOyD7/ZtaKi8Df7VqX29a7/dHe8ObSdEri5jfLbZfPbi+sefPP/+h09varsidtb0fe99aNr+1Pp9Hw9d3DXh/uQ3BNfb9fZqXRSFMa4T6XzofLyqqrpeS+A+anIBowAPJCYDzx/q/ZCgOOeYIbIXEQCSsQgnjV6gGCOijGIxlmXJHI5HidE3TbM/PLRtC5GbpkFr2rbVwwuyRueSXen4yHPYcstRomJxxtFQH67vezTuYb8/te27+/vr62vtZe+cY61vobI4AMjQv1xZnDaB0OzBvm/j2CNE9SttWRHCoHsktTAn8Kwv8VBUZujdB5QaS+RoUFUVjaGV+TjH9sRDUxVISYAypPMZk9W2JaKiKJhDjnIpHTF5LPMrhKBxkbpNqf84M6v/EMfQvDg21jP2fKzrTzRgVZej+ryqkXoVRZl+q6Heyaaj1AfnkwuJqOs6MpBAnUjDGKOkSlmxCciOp6TeiAizOjaGcqk49syIMRI6MAzGAoAxQSmXBKytg+9Cp1oixAAcMQZwhUMkZq7K1fWTpw/73Zvbd/umLcu6rterzXa1Wq3X1bqqiWyMse1Qg7PGPoLDeac5lgAgEEdEk4TGyUGahIn84EvoHWPUnF5rrQyuRDXZiDEoItpuHpyRyF3X9H1fGbSWqqpCFDLQdcAcQeJjwsm56i8aFfOYgTmICBAgMkhUM5BmDqNWNFUXrbWWBqwrywoAfIghMApYa4uiVDSzaQvzC7JCrjL6mmHq8DFZry0cpU/Iakz1fT86As68eJYve8Gknck9KqkMLt2MuczABONPkkyWe/bStqW5SBaZqXSuB0bSTtMVsxoGGSM4K7GUOUiThJRkLOUIeaJR/mqcdp6ZntYTp2vaEZwK6Gka6X6CEgAgSVmWwQ+1g03WSXaUdR4V5TlzCcrQQGYCcP1XBc3ZzXwQzrpcdl0nHBARpyFtIkBETdNorHPb9kVRlOXmcDh89dtXD7vj0ydX0R+DZ+Mwek8Ofd8zsyHnamLmvm8jC5LtggkhiCBZZ6wNEaOXKFhW2xDR2YqBDof27u7+V7/64m/+5m++um2bU9v3QcCCbEKHgpZs2UcGdGQkRAmxj6AWjVCVbAAsRhEQCAYQJaCQxIEKNqt6s60RY3va392/++rrr4gGi4lzjkgNe71ojVRrREQtIAnlcxaTMBnGNLxcws4xIUNOkGlO6RmlkQ2dDRD6okHxmBT7OdNIfqRJJvfHsWplQqEl60zjq988cQlFGxEBM8HbfM45IWQD0qA2T91l+TIT9NLIPFbPSwipwByq0U6Rdsn9eFqTABGNTT1vBODcNy+xgpkwkbOUi7m1lPnZ0jR4WhQhp8f8hzjlh7lpPD2Ao0U85zxptolNpdoMNGaz8BhBlKCqZdlzppd2P+1XrijmLDHtZppJAnW6aa05b9xUmRQR7aOdEFhEBC8rnCKCY1J+PhORSyftJcV1gDOR0fLforUxkQkZkCODIYCi883ru1Md+zWCQ7cuY12VWyMfX60ertbN/eG2iw9dV9erlv1Xh1MT46nbfHazAjHsO4J31pVIRYHxqjCfPrkyxjxr2998+3pV2k7i7aG/byLGAEGcRR8ikvEhCJmiMCLCICKB0AJAhDg6TBFQINOfp0R5Nmt2XWssquhQluW6XjHzN998c//uwRam62NRlHd3dwCgcfgH2A2opfZE6xJNKcDTxhVFEdn/4Aff+/M//1/HGF1hd7vdL37xm/v7e2PM7e1d1w1l05um1YDAKXoPZrJ0J+1sTimzrdfcp+V10fCa/zl7SzKgpF8xcwjhXeG/3u2K9ebrN2/3D+WmqhuPD7188erNXde9fLf/9n7feCGqgKlr/Ycb82c/+OhHn7z4aFt8uHFXjjEGjJGZ7/aH+0Pbijl04cuX376+fScCL547Zq7ruq5WaE3zcLy9e1c/eVbUq8BgnHUkzEPRF4nMMJE9OAIAgyHvo1K9tWeLT1EUEgOO7QGYWWPGIkjf99YOsYghhBgkxighImIMQcOAE50mb7CI5B4nGaPTaXS7DbxLhMaDLIQQICSYOyJ1Wx2Px8PhsN/vnz9/fn19rcpMVVUqqWv9Q8VSHFPa1M6l53IYe+tZa/VXbdueTifd6pwj6QzPfs4xl08bGEiIRAggReESlPRc067fKXPPe980TdXX9/f3qeynSvyqXznni6Jwrhxkp8ix9z13MmprRBQ5CBARHY57OAty7P1YJqca+nCkYRWZT6cTTkVEAKjrmsUnI2DCaj10VH/Wlerxba0NQU0DWBSFIgOMUanMHKOfxQmuqnUcUw2T7jCC6Byjm6Adozh3rmeTAo3T5FWxPIvZxvR9XzsniMdwLFxVFtg1jQp7AXqO4EzlXMkhGAPGQNu2DggIBaEq6/V6ezydWt83nd8fT0+ePHOu3K62Q74l0mq1ijG2ba9AAAA9r8cdOTuHk5qamEDyjKVuKIho6VxQs7DFuFJcrVarVeW996EjGVI3DUrf933bxegJsCzdbrc7Hvfb7bbvuxhDXdc06hdhDBh2plC5QmlKSVtLmCKSbmhgZo6aEYxjoSPnkksAEFGQ0Ii1RufsXMnMKnJqeqdN5+VM2pjleOBELJvYzNJXSZ6LWeVfWegYM4kEMvWJiKZiD0Am88E8VR5gKgQvv11emClU+XJyD8ns4MnHz0+O9DkxRDVowvSsmkEMFicZTi+YmuTzt8uYQwXzc3EiJ2GKqxHp+z6GvOnT2e4+g9ts2NmHmR6SPj9mqZWxoHM6J0b0iAAmSSjpvdZqvxeQIAwSo3TeM8Pu4Xg69jdX2HWelImIhBCsJQ0xYDZooKiKwD5I33ONxhlyDLb1HJmcq+rqqvO8P7a3ty9/9cVv/+N/+Nmrb29DYO99R9fMNSAikGcM2qYwcNu2ZV250jHHU3fyoUNEa8m3D1VVVOtShLs2qMePoLDWXV9dbVarEPr7u3fvbt+cmn2MXiCqOZyZEYfU8HE7AEDb4JyJS4tk4ELWgYwQZKq2LZ/MAftdLsl0noSHs32f3UzELpn6BJdywyTb6xmfec81c1XxkDZ2IQ724nKyd52fzH9ljIHMIJvIn7KIgPyHkuk2OjYRGSKIsnxpvkEX15UvLbe45/Mc8GEsj36RHY14NbEZzdBDZkaB8cKphJ22Xr+NY8NPmPL2XFd/DOyTwbMqzTlUl5uC2Ykw29/E0MaAnTMDH7x3iwnMVnpxL77LhZIPjoLarkrAWO2awECd8K4Jr/ensjisKnONSFQ8LfAHN5tmd8D746kNLbTBFkTuXURz6p1xDhxE2+PdZr3errZ16V6YjbPWIILgQ71513mOEJxFIEYDBSMiMCY4CsDZkIysu/UYdV0COMBAuYNVSIs9jCFSGi416OfWDIIOi+7B2O+oT5iJGvmvj5Vl0TRtvSr+7M/+7J/8k3/CEo3Bruuub/7nr7766rA/hcAPDw/eB2tsXYNzxezUIyJECeGyog5Thyc8wgmXP5nBJD/Xcsx8TC76fcPm/p0p6w/qq77k41Huj/uX7+6/3p2+frh/e2hVuGbPJbqb7ebP/94HP/rexx8/3VwX8qQyBXHTdicf39wf7jt56OLt/vT1t2/fPuy9YF3XZUla3YGcFcTO9z7GjXVFWUdjGEiEAQU1hXA6WwDU/7OwtYVSB5E5n9GEHLyIRLajGs8igqAH9JlFIyKhZW0wMLZbiGMHPBmNy7pNOQxn50KCp2RUnG+BtkZIn3e7nXYv3G63RVGsVqu6rrVlfFVVxpjet4Q2+eKGOFIRPYgHL+hoz1qv11oSKZ9SEkvSTWNMURR1XccYQ9d3Xde2bUrvT2VXtAQrIqraqTmH+9OxKIqmaRQ4ltA5p3qzuvM49KJ1IIGj90XhBjE4RM8+Zs3lZWpNI9JQXlJ6TPpAAlrSDfQDERVFIUB55HbaBREpiqIoChx14FG35GTQyUr28OilOHsy9FccxFgsy3IWSKkxgmZR0/WMADKXUc1YbSTGKBI1gNkHtqZwtgCA6AbgsAuh99p+kgANIQk5Wzpber8HAC2sgkBCaKwtiirKUKlV1XXf+qdPn65WK2PQuQIAjDF9H0SEI4iIthspSnXDYozovWcOCvAc8+VSGELKqvPR49CD3VhrwYCIAAdjMETPIXZd1xxPfd9WRVlvNgAsEo/HY4i9iDDHqirqskREtc/WdW2MkQgAsFqtuq5pmkbrl+qGdl13PB7rusax/Q+iRt4hAAiqHRNIhqIyig9uszFoUQMKRNQ1eFYI0yYlik27OOOks3N69q+kqjhJNhqfnJ3WudygP6GzT3kiDcjUVHxRMPo7XWeWN0qxssidS7Od0RVc0t/y5y8Gos2ACRnQOPO4fhfBJU04gXQ55nk0ZA2/nFnHx/m870W5APHYV++5JFP746JDffY5LUdjhDBCEJHAMYTAAK9fv727e/j4o+e+j3VtAYIxTiRqwosAaG/NIIElikQwddf2zLFe1avNpuvx7n7/8Ptv/uY//uK3X3399e/f7A9N37GQsaaMkU6RrC10kj5wjCACEcJmswocuvYYo0fkulSrG1TRsvTHwwGQrZH1WuNabAxwOr57++rrh4eHw+HA4ktniqII2qU+C8CYQUnVQgRDBIjE/D7YylRzg0uEiZmIvNypJb0sD2nI8HnUxM7ImWOsjBdMS0/NLwTIiB0uMZb8z/yATOOLiMY4Lal1NkLOalCxakGwiJi6R0DWVWLGeZazAgCWwZxJmYICGZkvae3iPGHKB2DBBGZryUd7jO/lw+qVCxyzhxGHEhSSGQJmr15eac5LhsZZMwm9nzzJ+ckyW28+YL5BOUOTTCHkzM+pCuFsp5YY8l1Y1uXFRiThoRMSgBACCSJahMAAIoiWpX/o4Ntdi/awXVEA2hbVqlh9+uQqdL0I7tvuZcfioCfLIcKhcwFNxH6NZTwEZ4vV+tq5zaq6Wm+dLdE+tJ73b25FGND1goGELIYQtgSjGqA15s4HDiKeW3AgApIAGEwG+wtGIhwOweEoVHkABxcx9AGK0sQYQc4HpV4Eg93HDFSTvtXx+erqSuSuKByi+NA3zbFpjk3T/Pa3v/3yyy9jjG170jAza6wxRk94zAwHADCO9v+H6w/KCTmypZksz/1dhG8eDiBvjiv/UK4d0t3+8O3Du9/f390H3wmQI+h8yfjhs+c/+vR7P/64+uCqunZYQ6TQBYlN09w1/cu7h/sObg/dq/v9qzd3h6OvKlPW9WpNV1ebqqoYsA/x2DY+sqtKKl0QjJEZBIGUfSlFhDBEHmo3Eo7ACNYyogVERNGMBkRUL4qIWLEsg/QvEoFT5F4QyRdOiCx8prhcZJIx/Wf2p2RXTtoXrRUCgtoKVeMeu7btO9rT/nhwzq1WK9UJq6par9d1XRsCxGDFMmul/nM0AaIwDyFdWmnGGOPcaGkdo9KMMWVZJqzAMZll0PrMEGaZFMJUATWPgFMHWlEU2+3VcXPc7Xb39/d936nTNcZQFM5ag2N0GI9MvokhYTiP3d5hlOvU85kmY61FElcYGGJc0VgLACFG66y1BsZo0hAoxuicda7SXcDRt5lYYkJm1Zk1SxPxvF8heK0kjKM7MYQ+zS2OLUBwLJGas2tNRk2qqaSgQkEZ+kJL3pciXQMGjilmwQdLBuBcqwaQLZmeGwImQARhlhD64D0iOlcimrqqS1caJBFkZkbgyMZYY0AEmsPxTZC+7dbrdbm6rqpQ13Vd10URvY/ee4qAQQRi10XmoLgEmadUt2mI1x0DdBWvaKz7oFixWtWISGbgF2RAywhVVdE2p+7UNMAtIocoTowxXXdMh+BqVfd9fzwe9g8PdV2rTKIYaMkpqmu0fgjBx6EnSl3XRBQCE8AA4aHYShRB7zXlBIb5mcKMObRERGgBiASGAFoydsGCZYlAOYs0077GS2aaTnpFyjjtNJXO6YSsuUwmovLJ5Nz6g6x89oy+7rscAEs7yvKZ5StwceXfyiXxGqchVfmYCVz5HCQLn0t6cgKpXNIGL84ZNCIXzs15pqt4dO3vkZ9mB2eMy1oGkK9oJpkRabmtoTwoKPYOgf5D3WIEYuYo4Gxxe3t7e3sL8GMlOYTonIsxsABH6Pq+6zyj2KIkSzF6tE+qDYlgc2p//dXLX/3q1z/7+edff/2m77npQgwGyRGVYFwQYmAEB2KC56bzMXrriAyhRJZOxCPEsgRrjSCH2HadryyAsDVSlMY5G9m3x/3p2JxO/fHQnA6NCJRlWVUVknShwyIJTKRns7Uybodk8Nfj80LiqCxKoVzEsfxbgIlgfR7tkV/x2OYkPZy2b4YeOZ7nD0jmqsLRapimjTR42Oba7HSxszcukTwJGTnLShOeTW/49xEyUVfzbOTc8HQeLcsYnO0FM9MixynJH8uXJqEoH2T53nwtM3KbvSVnQed1AUDW5yOtbsmjEBHpXIgvsRfOmi7mP8+tAzME083NZzLD2BmL5izE9yKmpXlOZ3vG0gwTLhxViJju4VTV/DtdxIhAACDAbISRAQABJEYIAQCssYKxi/Gu83TqPzwVYDoJWFCxKu2HN5suxqPA6fXdDqBjYTAtw5tjYwGDmLWJtpeVF4femWJVr57fYCfw5tgUb9/ppkQWQAKyqfi4CACN1o4M1AQwr1O80LJm4MUsvULdIMNQaBAjAIQQAE2MUYYw0UllCwAxxo7x2OdN7Pu+rmtj8de//vW//Jf/0vum7/vDcffll7/+6qvfF0VBaK0tmH3PLUcwxs1o8LvsV44Mf+etXbxlhrFL0IkIF7DrI97um314aw6G6dS1u77ZdUGscVaAuS7o0+snf++zT77/yUcfb8OmdpVhE0PwIgA9YBtx38vX7w7f3O7uj23b9WChKO2qsnVdrtdrIel7f2jC4XQEonK9YkEfQ6elfdR6eJ7qENWuDceQGAalLqI6AYZYCAJAk+SK0dERIwOCG0J/5xYlHPUWGUN+dK/7vsfRNpcLcjMyz4G5hC08wtkA4Hg8ElHbtsfjUZMGVTO8vtqowmZt6HuT85CUrQBZB2zFq+U0Uvu+9HMV+knAGKM1PyXLw89zxWWMH44xAqB2p9ChQggoEH2oq4I5RmZAQBm6RxBR07FkwVMpMMQP6s3QRiIxWE2VTBSq1FqW5dAxcswtVHVXc1ISeUom8mlUYZ75SUOq1Lk2RAg+ZtV0vPdd12jCYVp133ptU6lPJpTITQP58yGcT0wR1IIpOOb0jgfNAOp0KITe974NIRABc/C+X1UFc+zaYwi9RB98JxINUuiCEBpjy7Ks6zUbw4JlDA/3e2sVmMQ+hN6/e/fu/v6+Xu3rur6+vrm6uiqL2jhbGK1PE5mD974LkdkPZrLM5J0gSQAiopmZw4VaGMYNOa/WmMF7KsaYqiqKogDk0LbHECSyM8RlWZdlXZav3h2rqqrqom1bESaCqqosUdu2RKRe6KZpNqutMSaE4H2jhOCg8L7TxoMp0BfRAAxOPxkq7RORpk/ZZCUxSMysFWhA+TZgBEFhO7Mcp88zvnw+kqeiW3p+aZHVizOFMJd7MIs2zjHpUljoBX5xnnBG0stv3zNIumbiSJptLkYsvp0b1C+OM5tS/hVkMFxyxnzY2Ycc8tnGTbzz5w/I1lrCc8JPmhIRzeyvs1k9BrrvePSmNyrPytlQGmd4Fw5/EqBIBCAwEgOHEIqiaJrd7Zu33ndFUSi0hFGYus6X1aqoVkK+9X1gdRSUh719+fLlL375+eeff/Hq5etD08cgAiZGLOsbV5quC11gZGH1WzoKHL3vfGgQAdGSiZF7YXFG05VD3/UA4JypV86yIAqA8b65f7g9HvdN04TAwYsBV63KFFjvYx+jt7FCBAQEAkSDyAAWkWKMYyUrEdE2FZfbggNcOKRnu3YR/o89s6T3i29MJyJMKR0y0shPndxvkxM+IgpLsoDmmkBciPJpPjkeZtO7LD3M1jWnwUc0W1wEcOZzy19hsiQ3ItIcwhhjiBHjZIYTNfgS2JPHLH2booaWNP6eBc4Uttkzs1fMtg9mOuF0nHScX2SqNDYpfs8uYKbJp/HzRc04YT6f/I0yqqYZXp0/D+e0TM6UJcxnGPV3vYgVzswIQALAqovF4E0QFRsAbUDZM3DX354cWSSIGx+cwfXKfgTXLbldH2XXR89UFAbo1PrXp0acuS6kaqSuouHOoEVjwURXWpAgEFGGAixRwIhEFo1QRpKkCuLibJ1BIAf1dK/PWJGgZK1t26GVKxHFKH2QqjK9WsSHGoQIY+cV770xQxcvkSEdGhF731ZVgSyvXr36/PPPnUMk6bqurmsiUPUS0YfgEQnBLFEix4fHruX5+3e9LtIXTOlI7wze7wit5yhN28R30GIQZo4E1lbBt+TleuP+6KOPfvzpJ58+e3Zd26eblUEw3IXAp+7Ys+w6fnMMr4/hm7vTN3cHz4IIJUFZ2s2mKMvSFdaHcGq6+93p2HXl1U1Zr1rft4FbH0TQEXgAB0ST3GNQ0sQBXzU3GNLpr8RibDHSYMqRYe2VwGOgoEqRxhgUCaFPzd8Te0yMIgcXj839EtuHC4LK0mY3KQ+Wbwozt22bwiNVLXy4r+q61h4VWiVSA0pxNEvpOSMydGJ0TiST5mUs/iFDSVLFZzoXg+l9UnoTG1RHTfq5iKTWF+/e3d3d3XnvS1c4Y7VGmrEIkUPfImJRWGZpmgaGBNfJOCl4ShVCvamCu+Ke930I2sFYCxB451xZFoioHrmicGVZ6tmkAYS5MxMy0uaxQmRaHQAUtkguh5AljXddR66wBnX307YWNiKdeTVlUbv6Qh0h28fF+S4IdO4dYK3VdE5m7rqO2ArEGIIEj8ghRN81IfiH5lAVjmNHIghinUCEGL2Gd1VFWRelc66IwTkHZJ88sQpaZIHSAYvW/Lu7u3t4eNjtdk+fPru5uamrdVmWZem894gFM6/WVYxR0ylDCIKUEjhFxBhjiRBRsW6IKB5tZEQUus4ZrMpK40WJyFqyjvb3h7u7u7dvvuUQi8Ku6/rq6mq73vyX/8P/IBC//vrr//Af/uaLL764u7vt+94Sxai5oIU6ckMIp9Op7/ur66rrm6Y9IlFZlrr13vsYJcV5KEdFRAAyNZKcK4MmYwEHRAQyoJnhIoJIjHAuzZefK/nJiotrhmdLVp4wfmn5Tr8yYzEVHG0wg7EEcebHSMf/7BXp++Xb33OlcWYMazZ+Oj7z+0uB4+L4y2fyJcwezkW3fG55ThFMOKbMuKdeMxEt/SqEQGMhvuUhNxsnn+RsfDvtf5pGW+aMLUeegHpgEKkX1aAQ4hjVJsyIoCXIqqoS4Fff/v7+/t2L51e+byyx97HvgytWh73vQ1+stlfbD05t86svvvj1l7/5//6bN4fDYbfbhcBFURTVNkbpuwAgbcv748H38ebm6Xa7bftwOp04dgIxsicSa8QYjxgResRIAAToLKzKIhHS6dC2bXM8PZyaXd83LAEAEExRlcAkDH3s2TOSEJEpDA/9eyOAAcs4JSsAAGDIuywsqtTm0MsRSe/kZs7sd5eNr/mVE7JMgzPzN5pFMSG9lioHj0VQZsPqFisAE+akVyBMCG2JPNO3X1CBctqZzRMRL5brysk5X2z6drrSocmbLJLxYowWCBf8iqZ5NcvpJQtuGlYFjvzJYZenM8/neZEPwIKJQWadyXcZYMLfZqSdasksYXIRr3i8INMJRcQsCtKOUKXEiNIciEiLKs2WnCCmdYBm0INp2vYMsWcg/Y7HRLqsWAFh5IHzEhBEEDaRhjoGkQUxAgWGrvGv7zpDlQFcF72xYgm26+JDhh88fdL5h75r2Asa7BF2oefm2FPpSl8UvREw2EThxvvdsbnb3/nYCQjG3ggRAzEXMQZCRKQIOMSEPqo4iYgg0OImnovunuX7hNsxSuBYV6s38o7I+hARAY0h5qG5qxK7kDAoo+ahufNgmYZRsez73jAUBXnvmYUlqNYxCJcmji17EAE1wOm778tsu0XmptvHnp+Rz+z+ko1QFqEjItIDA0SkE2ArGvBoLIvhUDI+3VY/+d6nf/Lpiw+uN0825mpjnbjg2+BD23SHQ3sK8V0Xv3lov/z2/s2xOzEyiBMxFq7W7tn1uq4tEYWub5rmYb9n5tXmyljbhtCH6GMcOoyJkLFIRsPtVMofCYpoLPpFRMag9h5TfOCYpfOcKQjUEZSXJo4xwqjjJb1Ft0+yYlcXueiMhcrUSAQAQ0FUABwtCDCaC5OzSzJfEwBoa8TD/qGu69PppG05iqKoq3VRFJphONqgbdrKpAriWBcjASpNEkffOBF56nK+hFmqm65dS+B0Xdc0Tdd1+/vD4eEIyFVROlcwR5SIAG17AuCqLOuqCCG0Tez6LoSOyuscGjm4mFlVLx5rMuFob9UPSWBO9VcBQOej7kq9kuEyx399ABHTsHqnKiqFcGq2oWvs+16DFbUzZBgb0IMMXWrSTslYIUL3Kgf4yOMVmOcDAuK5boiIAJwTx5CHXFBEsQS+60LsXIFPyvW6rh528fCwC76zxMaZ4COBnsCo9nQRBBYCqKpKk14dmcKWFsl7H3qPbdu27XF/kMi+67fb7fX1td1uy0J9ZciM4ISIBCIzd2EQseJYms5pC1ZjzFhqleB8qq4Ka601zgKA992pbfu+DbE/HA4Pd3en5lS6Yl3X2+12VdVEYB199NEnf/7nf/7f//f/p4eHh9///qtf/OIXX/7681evXt3e3t7f73Qv+tYbY66urkI4aQlcgSESTfM5iRits6awttC0YUKLiNF4jSM150IVACAALEgAgiQECGC0TYVNG5NLcji1AefkzVNHU86Rc1abpAoy56qAM36xNHLrXZ6y9EzAmoi548rOOXX4HcyEkilFOUPMqXQ5Ts478jXmkBERwEflsyQNz36y9Jjlb3ls/otza14EaHwXMXPks8Fmyb6XK12+bvbtd4SzZEFo8VzadFBrL7yUECIAIhF23kOvT4avv/7dq29ffvjBTQihrEpm7rtYVWW9XvMx/OY3L//2p//T3/7s59+8ennq2n370Xq9rurnZGPbtsemI7JIVkSKoqrBoWk9x7uHvdYlQwoco7GxIESKiIwUywKPpyMLlkW93tRFUXRNryEHzcmE2DN7Y6Gu1wK+bdum9VGAowCQMcYWFlFijJ33JRUwVNZgjAaJRSaQx6TnoOqOF4qy4LTBbr53OVpmm/sHnI3LazZ+og6cXumZRLy5Q+niGxER8FyHJp8JM6fG9JiFDEDGVaYzH8TZixQ6+1OvGfLPVndxhNlKRYa55SwrLdnQvBD57EWzJefiCGR8Lz0wYwKcFRl6D1tIP08zvLwRZ0gOlwqI+dtxzPt9DFVyJDyP80if0tkE0sJnkMz3PePkc3VU36CS7iBe4PmNM8RIhDZb3WPQu3gZMAwsaBiZUQRZBIwQARRkUCBGjIRgywCxif7tvXe2R/aWPEBxXZeFM3VhPn52dWjgdIgPXfAW0VDAeO8b2RmEI0aUqzK01mHcn463h+Pr27chgkEI7C1aYMEghHgiKyIRhlZcjyESALB2mclOtCVN5aDT+yGEsixfvHjxu29eknHBN2q6JbSjPjjpaEGkLXwEhIhQZXEA1iYTZAyicc45hz4AgEHsxrg4MmT7PjBzDFHvvB9/Zlf+2MWlXVym/nmRh+R/UlalHDICtEKIhokimACWAI2ARDYRvv/i6U8+++hHHz95srHrAjYurp1vmuLUdMH3oQtHz7s+3u7bl+/239zeN+A8FRy7wuL1zebFs5ubbbkqS7JWoA/CXdchmc1mE0ECs+cYNZ2VhQXAMFgnKEhiCA0ZTMbBocjniBsQERGBkLDvtRGFtjAMyoRhIcrL2LMul/tznm+MCWPQYEawc1UwJ+fhGUKAc6QHXoosm+0ajnpRjFoDkrUr/SAQF0fnnDarSNU1jTk74njMRssZeIqgSy9SEkgh07OjJ2RX3/dN02gfjhijMYbIgVZ4DoElMAcCvL7Z3txclWXRdV0MXQx905wkOhnDNdOBmPv0aOzTqGVORKIxRiR2XYND5KqIRJGK2erzXUea+2etNcbltrYkrqu2jKOfMIGlPbVmbGUhcs4U2Gw21tqiPHfR0Of7LkT2ihh+vFSBT44+mWQfnGGbBgGAyEOUo1aUSUc/9x0AxOiBAJDb7mAtvXj+/P/yf/0/V4X73W+//Ol//Nuvf/fbd+/eHXYPt6eHdX0lIsgoIgapLEs0BsjsjyfnaFU6a21hSwPIIYYQVtfX+/2+bVsQOp2OyfV9fX09zjBaa41FaysAcDwp8677pHiScFWrESiZaBHRbr9vOi1ze2qaY+9bSwYRtuvNpl5dXW3W9coYIzH8s3/2z7que/r06U9+8uMf//jHf/RHP/zTP/1TQjkeDi9fvvzFL37185///Kuvvnr39q5t2xhj0zQAUBTWlQUi+tCF2FtTPH36lFxhTaHdLwFQnYQte1CNmYQEEBFhKJNDNOHliEIEZ4WQpmX9LsooMp6xM4qFzLUI02tGV4kvyCLkLGMoc4VnJuLMWMbs4fecCpDJTOnVsDhILh6Z0z8vF+7TReRiR1pUchrMjvAcIKN961w2QxY5hDFemNtsJnK+dJJz58/49rNKnO7ndoEcFBrSkK7EOB7LIcwXm0yPxhjAswqEiACkJ8TQhwAIkc+VZoBE5N27t7e3txpg6ZzzPkbufv6zX/3Hn37+V3/9ty9f33khQBOEQzSr1TMi07XiI4s4MoWIBK/NCn0I7GxZlKX3Hg1Uq9KHTntPWwOAwBJKZ6rartdPYgy+69++fn08HpumDV3PzGKeWlciWZau69sQO0So6xLRiFHIYx8DABOhLQqMiEiIAjChr6UAPeDkJZSb7UW6yZlEmO8LXmyw/siVBs+R5IxAi06kuYU4cfk0k4yKz5fg5ZA2JZclHSUCwanmlh6ccZUEhPTz/IIMPukBZpWq5s/n0BjmjKmR9rzvy+y9aY3vYUFmaiCDKUmmkdM+xjhhGrjggTPulxuGltPIF5jgMJsPnI27k1BzWThIL46fj/aeB2LWMzrBzWQ9S2BU/2ZbM7uDNNpTplPK53ARVt/xwlEtVTQeLhYENkggWhaYyBggAh9PTdifWgdSEpVUWIrGFc6Ym3r1fMPfFof77oGDoZWNwI2P2HoJe+paaau2Jozt/f7h7Z73J4gWEIGADYJoggeaE9C5mCfkbEFvEqStxGHJA6BGS8pFOJz5EshmtXrxgqy1gAhCxlofAqBujQFgQSREEgRAA2YEspAM4Q0iGGK0SIUpYu+7U0MrF71njt73KkEygzVj5B5NckpzVH9PjjpkVJCv9LHnZ8ufEUI+bHp+RlyIWGPFAK1IzwLI1joSFIbnT5/+6LNP/vizD57WtCnj1cY5G9rTw+GI+92ub1sfut3xcN/6r+/2X3x9u/ckdcUgIcZiVTx9evP0yXXhqCxLStUXha0ry7qKMbKM4RXMLEJRiIUAYSwrr3Us0oRTwZXUgJGIUkTYlKkawJgFSQ4N68LQbSKoR0KVwFyHCVknsAS3WRgIjoLlQO84D/qXTBfFrAG9lvWHaRU9fZe6CrXuqPe+KzpCq9HIq9VKF07kR057rgiiVxpWo/5yi3yM0bkiIYmyplT3RWNQ84KrANC2rX7oug4iqzHW+66uivV6/ezZM0N4Oh2Er7qu2+8fGIZ1mbFvBw5FKd3o0R3aMh2Px6ZpisJWVaU6MCLWda3v8t6n8qeppZAxpus6zIT5lDSYeg8mnNcf+s6n9obWGq2goxvHzG3bKnA41R1l7Pqmbduu65JTUeTctgTG40NfpMmVOAjbZ8Uvxjx38ZyOqLl8XdeJsIDvuubJ05tnz6///V/95Z//b/7RX/zFP/6v/6v/HUS+u7v99Rdf/OxnP/ur/+XztvdItFqtjHG97zTeU7uVGBh6TRky6kOOBM45LdR5Oran0xHGc1Pr4jjnXGHMUEumJ7dKCK8WBxRRNEhKNcpALzHG2DWn02m32zVNE0VJjwWiK21dV1XpCjuYKgwKGFMUxVdfffWb3/z6b//2P4rEq6ur73//+8+f3vyjf/SPfvKTn/zT/+6/+6cA0Mfj4fCzn/3sb/7mb379m5++fv369vYWCNU9qBcIoXVEhGC0zaNIFCatSHTmkAIADKIdNYegGxERCQAWAKw1RrSXSIww6JKIiIYo5wIyWo5773O3j4zWDhhlWR7Jm4isc0spB0aLS054MNp9Y1AWM1RNHREL7Bj7nl+oupmwCAtHEAaNOxEY8kF5QP3R2n6ugDRj9GnwOPYMnLG2nDGlpNFEdYN8j2xNcV6+ndhXzruiEDbGWqsEjGcR/Kwti6AIMKtly4pIjMI8mfl4qkG+rpEPI4CN3NFYmDgdgeMPJyb5cUWzwjOpgZik01Oyqj9EpB+ZmTkmWI03A7MGGwgiMAftkaKTMcZYO/QmsoQStTUFRmZjC2Ptoe2wfvKqKV8eV4fiT3au+5//8pe//vXvf/rzz3/729+zGOFrkRsUUC85OsQ+iGFrrUP0IYTYA7B1CKFHhsIIIhDDqjBEINIw7owbYFLX9Xq9FZG2bb99/aZpmuPxqJqwMQbICBKZHTMCAyJacgatmi3VdaEIYQEQLTAAAxivCD38fKAdgijqOYPRPEbaxgCC8ADhXKhNikR+dmKmASYJfvxr2Ij8uEWwuh3LK3f05a+e4sZUUxoRIFFxGiHNDQAAlSolhkE5TiUJS8B49vyDMYrwHKOua2LmREQEI+ATu8BpKuPIEM6TgdF6h4gsQwNAEGFhBJCpODjsgoFRAwAaean2vxsgi4g6Koi7pHirpRAzgTVBDxGZxRjSZUpmSfU+NY4fOJ6ObIYJobaUSxyJJc1nEpMZ/aRvavrsjBt0YAYUVMcsCpoBZxlBu0KOU80ayuktRaskUZ0ZBAAi+rEIW5qhIXLWqp07oVBivEQoomgJWe9BNlaIzjxZGId2jzDoxsYajEYhbK0NsTd0jukaQGrIe69HQQ4HWPR7/IPXqehQXw/koByOT5IAcJQANLq1fCCAEmhfUXfqH9gewXl3ZTZP1/WT5y9W26478fHV3XHX9A8RYjTGFKWtAeVI8GUff/euRS0TKlsm4RUzc2CV5wCNQWsBwGLeHjZX7QYi0tlSWp0Z8rcRRKObAIBZrKW+jwIMwjFEIrKGQbwx5el0+vWXvy3Lcr9rLJpVuXrYH4uiAgCtsDrmUkZRDzMCICCB0NBiR0BMYUKIbd/bsupDXJmNb3tjnDPOGRujVEXFEcBA8CoLqq+GBMakAtF/5x7OXMFYXjmbyu8nNpV/lXAgfzjnOelKv/K9EUK2jAYYo0hvAdaGn5XlHz+rP13Rk7q4unJo4rHr9/vj6fd3AeAU4W3jvznw7x6aV/t+hxWuDEiAcHxS07NN+WS9fvrkuUMoQPZNf+jibRtenvz2xTNfbXogBrEIRMTAMQoQBbKAhN4POE9kBuARA/c+qOTGzCBI2vA2RmtM1/XMbC1pKrsAUGENU9M0RMCsFSY1rNdI9KHrkaUwNggPYTWIMUZnrWSQGVmZuieToX8waqsAQwv3AMC5NXkSmQBAC97ODjsACDEAorEWELu+92Oxx3W3dgenQaTq/VPdpnaVQXSOABhBgCOHEJgMxlY8cK+aQxz78nXMzjmDBDEKRJUqSZFBPIgHEDAQQPbNaXfYg+yjeOiFIlhrQbjvuui7cnXTsdsdO+uIIfSy6+VbKvcIq1GZNMwQg0BkD9SIaMKYMUZjs51zaG0Poe8P1tqiKIhM2/Z9H1SzTTqzgsg5N2qJNjXC1RYao3hgcsQOoT+dGmMo8IAq0MPhdEwpczgK+al+DBFx9IpUXdcpb6exNwaPp48QEqnEQnbI4YzMUVt6pboyzKHrPHMwFq21PkTvvaG2a9oY4ycfffxwd4ziP3y6LVl++pd/9euf/u1ms3r+4un3vvfpp59+8ic/+aN/9F/+4//xf4z73enf/eW//1f/+i/f3B3Kau0MHtumMMgSRNgYUxiyxqEQMwPb2tUFFd772tVd8CGE/X5XVEPhIiQQFokKW6wlCIsxhkM4NAcictYGP1jANZhWCxF579u27ZtTCEFr3qBAZE/GbLdbY6iqqqvrTekKAraFY8T21OwPJxY0tgyREelhf/irv/r3RPQ//X/+1fX19UcfffTJJ5/+4LPv/fCHP/zxT/70z//ivzi8/e0//+f//K//+q91F8qyLOvqcDiUpQUQ5l5pCgYJR0x4CsP5O7QaBgA0wLFzzpARpta4aB0UhbOW7HvOxVwQTASc31leSR/IMTWNljsh85LBOUdOBpv8h/kDM8lvNnhWXSoTXKYznK3xPWvJ55aMKwoBzambzQFwLoIsT6YluBJIaVHoZbZwyTwz6aVn4Wlq8jyzaZiIyxdPwXSdJekpZB7zHKaHE1tPeJL/HDOVO/2Qx/zjOHYIVTmPx3xrIooBI3c/++nP2+5ffP7Fbz//1W99wKaLzlaqg4kIChINNa+JY4y992gdWWeQTO/98dit1zWSKAICMnPwPjIzY9hut1dXV1qG63A4vHr16u3bt3nbpWQTGYE8NzMv4bmE4QxiyQ88oxQdfymy5NuXHp7RSBpfxly+BOREmMuZ5Puek1W+hFw8SvEt+TTewxYeW0s+pfReHoqIXBhQREvvXKYmnnbaPL9uyoLyf3MndvauOcHOYMuTyOcLa/yD0OAxGye/mVNlvtGzsAJ4L2otX5ru5FEYMMe3+QzfwxVnL5XRupS09PRAUs8ugkWEZ/fH6T26vzN80/s0RoLNpo2jDjvfpu+mB06g8Xd5vg8syE3LOyN3e3O9Krfr2lqLSEIObAH2hAIs6DkGJgkBDZEMxZZYMQRksqLF7l+83rNrM4RERF3WyBBQIzy1CaGgERH1AACS6gDKmQeS0v51wCKaf3KBEfGoyuYTxjEja/RLAy7mNp335TCc9y82//Yir5s9A5eo6eJbREQQmHJdcWDgZV2LtV7AiwQxINJFOfXhofM94EPnv304fv1u9/p4OkUQa/rOI0WLuN2snl1fb6rSCEOMnqhp+/v73Zs3t8djc/OJrVZ1zwz/P+r+tNeWJDkQxMzMPSLOcpe35l5ZK1lVZJNFdmMWcUYajNAtjloSZwYQ9A8k/Zr+CdKHkT5KHwQIkjCYRg80aqlbgDAim83mWkxW5frW++6955xY3M30wSL8WLjHOfmyqtjiOBIv48bxcDe3zc3czc0nB1hEiFAYBTjGWLlc8+tVkTyF6BOR8HG/CzgQUdM0VTVSOe16ZRpJi0ZFKioIj7PVSMF5PoIM4RnyzQQ6I4cNu0jMkOmrU5RN6vHu7i4l+UiX2ldVtfON9346Z3WcxDcxElHb903TJERVVRWFq6qqnEdEN6bVBQAYOIYQ2ra/3+3uD/v73WG327Vt64QRvfPooXLoBCIhsbjDHu7eBA57RygS2zb0nePYDLFPcs0R1O9yjg67vXPDeAsoOCIabyH3I2xp800PTN7cvKqqSg9SIqJ6I1rBe484ZpVUMk2RsYjTPoSeEQ0h6MKxTBHCycm0O/Zgdghh+jVtF+lukL2GgYBSJgS9xSFGEwmCMTHkiNth6Lou6oUc8f7q6qpZVU+ePtqs6r/6yz/frtY//PVf+8lv/+YwdM+fPfv8809/+tO/JML1er29WP/wB7/18NE7IYTtdtt2QvWKBRkhhF6PNCeOmgYybtU0TRNC8H2niYtUy4UQ0B/zCQFA27aTTSIxxrqu9QaIZKnGGIdh3C0cK3jvPG70TKwEJFpvGua4blar1cqTg3GPBJ1zutcqEquqGpMEE3lPIfSvXr16/fr1n/3Zn21Xm0ePHr333gePHj1qmvjp5188ePTYe397extibIC228u26xARwCGhAyAcpYy8xMgxxin16LgNQ3qRp2OklXNSN+q/y4JDaA2FtOcwahpzEKg0mxKj202DtCuYxF5/sgarNYt1GSMzcbBYiT+lKbA4bYVzyzWbn6AwgDKEWFVlmsXMnMJpWR3MDMdLYaVWo+H8IKX+XsIwV6P5VJcgSdUWP09QGVFcuJqs1MLpc/smYdJuqOqDTi3JYbY4xymkTeYWsH2QefG+3reHP/mTP/3kZ1++en2/3w3b7aPt5qrrIgIJAgrqWiQiiIBAV9WV954ldH3LHIng4nKFqBcEjScuEMk5BIBm06zX6xDCl19++cUXX9zd3QFAMoBUEVsMEyLkYS8J+CNLGPZYNlysCWXlqzyGatQZ2mI/t8SFSb7EnKBI7digkewhETERKGMtixDLA3YUlmfGPycELAva/I6ZqRFG9JZ5bHenZN8yasKJTFkoMw2Acy/C4HNBBEr0QiFlqVhFl0r6M1kw2Yjm6gWSzrSRBeWQoZi5S9lPD+W4ss/nIvkNfMJFwKTIOZHpEDALSWb4y6HOPO0kp170EyIadzsN/DBieAbq2P43dAjLAcJZn5kFI8BhGNx9cMyeCAjvusF7/+xufz/AABAIIqEARRh3aM90itM646h/5mf7M4b52oGIeR4JoweKRIZhOBwOfQx15QFGUzWZeqwxEAKIggIMMu6VkjslDtl0o70554g84mDhEUEAGTO6z0E+xc9nxpgxm2X7DNTyIZORpefEh5NgIjBjG+Ku7e6bunIY7/eM3aHv7tr4Ruju0D17c/fZqzdf7vZ3AYLzgA4cO6BVQ4+vrz545/GDq02NDBK7QPe7w/PXNy9ubroB6tVmu73Afhg4QuQpewdwhAjIkcHNchCI3mYEx5uy05BH/Hu9etsxB11yretagKeQwjg5h2MgiV7RLiLoHZq1sJEVTxwdL9FYYjg1RdNtb3ZuKpWhVe9ivMFUX69noCmKUh1CgvHS+XSXulbWfTN1DBQh473coJkkHRF5IoejJzBeQ3+3u727u9sf+r4P4030W0Jw6BE9MIoEYAdYHXbDG+x3d4GZCYWZuw6YG/QOERCRI/d9Pwwa6SB9DCQwcHTBeSTCcae045A0m6rT1Wq1XjeaTVSvJteJTH0zdWuJqKqalAnGRsbSdFOFTi5ucghBJCISosZjVN6PloPO+4hR7Sczj6RsqCMzzMOUILIAVN7HGJnHWzfS594TM+oG5BD6vu8Qsa79atP4iuIQauff++63b1+9RJAnT548uLp88PDKI9ze3dzd3d3cvH727Nnt7c0//+f/HMGxeF9t6vpiiMwMF+vN/QGYSU/PjlBpjNR0H5+iRaNiEn6UCe1UonKBiOow9+3QVePB0ZGfw5jUR2OY13VFRCH2AuCc0zCKuvHD0DdVXdc1AY77qCIisl43zmHbhshBRBwCIjbNVu+W2Gwu1nWjPtF+f9+2bT/cAUDTNJEByXvyAgRIVWODFqcrKJnjsBMRh+iU/8ERkfPY9y0Qk5O6cZeXmwcPL7bbdVVVPjMQk3hnc7kWZnbeZ/UzrYrzqHFbLbH1og2xaLuk1s6USQ/mF3Dh3ELNerG6snxpv8oG6JwjM4vPQUWOxxg2q8LK+dL+aiqUrunMDi5DZezaPJoCc90q83vGEHUOBlgiYglqQm+qnKgJZobA+RXYqabFv/08IaH8lab0viHybncIsdpurrfrSsAB1YSDhu8xcIwsEFmiE4fEkdvQCQA4j7XTQ9g8DINAJILK13XtR6VA8OzFZ5988snLly8Ph4OuvTFz27YXFxcJe8mgoTEUdoaipEqUUouYFFOy9xmZ0DjpZVM4L2Im+7QKo0WDRmRuDeAUs1dSOR2xQHOh7SKoFuasWEJbamaNlFJmsWRby77NJDrrOqs5MtUcdSUk9l9EBDzngFlhz361o84GsljT1tel1gy2NLNm4pwaKWlRDhDmPAYFUSyHLAKcFdua/SSFNNuXJQUXwZhhb/YBpRfTLIBp+hj1zAl9lVEAS7l9uyLFDuEpyo5weieAkXkXBtm1DG8Ofby62TPz69u752/29wEGpAieiRDA4RFvPIFKuqyJ03LJnPdswWLpoQCS7ellKeqkiszQ9/3u/kCXY2aOqvKA1PeBfI3IiICot8giTGmiF/GKiACjthRBM+MT0RgZxBpPeLwgochWjcesBCUPn7IK7IRSKpYzzLnYkRVS8z6ZjLqAgQHwxes3X17d1OQDR9qFPh4Y474Nzwd8cXv47OXNy13boad1jeh6jnW9cjLUjq/W60dXl1cNVcLC4UXHL+7uX9/tuwDgPJIH8lTJmmrNmx9CiFE4wjDEjkPSNmYiRpFjBFaMEQS9H0+pVa723ovErhuDrlliCBqSM6QbJiaWYN0h5CkUGYx9leYDmBt4S8wwVkhxHPqn+iTqw/CUaGCROmmYiexiCkyCoPGNKeWm9x6FnHP7/V4v7ktXsQ3TxYD6Rl0CIpIxNB8QsXLO03hb4DAM+317f3/fdh2Rr6pqs9kgYkOXiDhmRuPI7Al9Xa36oSWsw8AhTrGX7IiqCIzohCWEcd8MEUMIKQtOjFFIiAQicIS27xQwmGytvu/3+9GFTmP33q/W9Wq1qnyjLqKmCRUZjwuZrKSOx4sK4jAMTe2SF627W4nKWiGdlhxzQUU+BvU40vrplARN4f6TgEvfHfTs5WREqecfttutJlxhCTEGZm6aqq7r7bYK/QAADx5c/eAH3293u59/8tf3t28+eu9phcDMTVVvHz/94J13fu37P2DmIcgnn3z613/zadchuPXd/eG+7RTaGGFcxUIHoMfhXIVHxwkR9cBk0zQalKGYsXMTRxEBImSGoQsd9yLSdV3KkuhxjHfQC+LX11dN0whEDWaOPERmX5FzpKdXZDoBzhxiHDQ1kQA39aqp/Gq1Wq/XDx486LpO79usqLKKf7W5UB7o+55cRUSBkTXPtiQ7AWMEpdv1lQshMItzA6EIBSX2o8fb9aa5vNpcXG0vL9fbi5We3vLW0M80o90uKIUzU5enZib7lf03PSTaJFVeahNbM2szMZ81FHAyfO3eI8y1jJxwUbJhJss4FV1uKPGgbFaqxAzybI7B3MY9gjTVnAHJPIsyheJ2R8xcaByT5p/X1+khC0ktrZDsz3THYGok7SBlveiblJymRIKdWtJ47+53db1ypDnocBhi2w7kgcglEiFFACYQgRj5sFqt9HDmod0DQF3XVeWalXPOr9fr1boZhuHly5fPnj27u7vbHW50qri8vNRNf+fcxcVFNCE6MHmDRCRwPElvUYSImWG0iO1U7ObPouwsikOqn7BUdqQfplMEdhUgo5TVjLq4uLh/a9h7FsRou7Nzs33WKhY2ABCxyzH6pkQdlwlXy5J6tMsQMOfqRb00hcAdmW3UBicsxVQt64VN9tFMY2Sf65/Z/iEWEQ0l/JbWMNF0rjTyNTX7vNiyJb19ztTyKYTb1tLnPF1HuQg2zNW+VaGWbU45bdOOdzodOiIhctRPZ+2P1fINtF/AIfymhQErV6ETjrCLIdwf9kNs7nYhhK4P+37oAcF5ISeAjDKd6RdORJxW6hJbotk2LUX+vJ4REZiu5RBJXtxIshgEJAKIgMaWa4ggV1XlqAogpFcYzwg9Am76lWwtDEZ6af/H2VmmLLtHH3U2EWQuq+jwYC7Fi+Jsx5vJQsaBpbbEGQxfW6YzzwJIeiAZI9Kbtv/sxWtm3l1fIsYhHCLJ3f7u0+d8e7d7te9aBq4r9FXPEoQ9jCe6vKNNU9cOuN/HobvZ4Ys3+zf7NgBF9Id2OHQ9eV/5GiquIod+iFEiA8AQhDUrC8yzuGWRSgiUNnNour1QJ+4Qwm6/06PyXXeYrp0AxOlMVAxBjhdOHNd8DQJlnsPTzlxnqKbguZS73xxaTp9kf6oGOMXwqUcRUU8GEQkcER0OB902nC619yl1ikZKp/WIwAOixhpRrTtpoFfkDfv9/nA4EPqLq83l5WVVVYzQ7Vs33WfIDBKnm3H6ynmUICSqFWOMgwAI1uPlK8LoqHbrkQnpqME0DkdEhMNms0lKcgpW4uQ8Jx3b9Ye2cyq26cJ6zYTpnNtsLtSdS8t2mgVDRAh92kxOm7SKljSrpiUbIoLI6eLBpPCFSO26o5Aw6w7q/nA7OYQzE+LN3e39/W3XHYjIV44IgNbV0Lv7Hgk+ev+DH/76r73/7tPh8IP/9v/zL198+UXz419Hlgpgvbmom0qjTAHAbRr/vYoj/ps/++v727ZabR8+eBAB+f4WACBOU8ykrtAU55xDaJoGANq+U/OvjyHtc4pI7Ef/MA5hv98rivRk5jAMCRsagisi21WjzrnmT4lMQ+xx3CvSzUa1ASRG0DWL6wdX6/X6+vKqaUbardfrvu9FRIImatGY4cp7v+/uqqpyrmrWXt3+IQyVr2OMTELo1DnxfrTj0d0iM6H4qlmtqs3m4uLiotmsN5vV9fXlw0fX63UNyG2/3+12bbv3pURZAUaTxCLxq5Xnxc+zaf6M6OqfdnMjNYKYzzGnILQ9TvrweP91BsPifHCmQnIsYT4tWVU1BwnLX6Xwxyxy7MCzmQ9N4FOqYEsSWjEzvUUvIsJ8/RWPHuOCG1xqWzSOIhz1cn7hWBrjeX2dvrKLXkqv1BqOS1lRRAYE5yqOApGcDyC1bs13g65iCjkmECRBFIAIGAS6EPvIvXOy3qwuLy81P/WrV6+++PLT29tbveJTB/X06VNdbtE5UhfGNPgEC0OfmZHEXOV5tOaJSGQBmYgzKqeflCJ273EkGTKYldRF8bHtpzp2q9Z+mHrXNa2qOiZytPRCRNsIFFKThmZ3pE+VuaKYUT9r2cq+9mPQFRHzWxCxcPAs753B0hlQsy5gzqtWssquFwduxRkM9jIgj9E1c8N0EVep5SOrFKC+zdCy5+Ny71KbZbF1Snm3kwUWChyNsUh01BslvwGA3R6EIyanrh0AggCEKItXF2qrRx2oXwHACU44xSEqwNmbc+wEHAERgVzNgG2MfRvdoNeCU3Q1Iwk5TkI2n1MQUQpWL8khp2fGkkURWeVIRGyEtjrMHCMiII7J7tW1W602rvLt7uD16gLkMfKag4wqTdR1RUpIZREEjNNJhFFeOOUnFGHmKTP+AEKIAqN5VCgTzCfQEgMnSVCITHp5SrLKBkv0Ks4F1SFMeokEJQAMgF+8fnO3373ePWwqDDIEHl68fvXVPUWB4KrBY0Snt7ygr7qhrzyK6KEmLxCGLvSH4faAt4duP0ggP4jsh6Eb4qZZxRh9ReQceCFi0sSGUkEMME8JRkQ4hQ6ODgAQurQsGA6HThN1DsPQD13btm3bxng8EkYEGjiqFjAYyYXJoqD59KTPzGzvAASjWPjE8RmdVdX9SKOw7H1K5Za61FJKpgR7zsy/OrMfDgdE1FVjNyVEma5CAU3WJSIgse05hIACzMdNMOfRIQELh8ggQ7j11Xq13tZ1Beg4RBGPiF3HRDQM0ldeRIYQsRMiIr+V6dK/BHbKVjDhkOq61mpV1SjYUxqt0XM7RoohhxD0Zr8QQjv0OhBE1LsciWi1umuatS580xRSqxuJ9+1eY2UtAhUD3nvnPPoqYdg5BzAmD1M3L0YWiRJC7SiCxDieR01XIwYe9AqNlFtY84geDodhGERis6qbRpHPMQ64xYpcXdcPH14T4HvvPq2df/7VFw4BObJ61wASQgXUNM2+79d1c315tdlsDoe7GEIUJ2o0IgBAjIMIMgLgmCROKa5bpozjSkclrHvjfQyWpYe2CyFIZL15UvE5DENVVTJd/ein8Nqu62LfhRAePnzoa1dVlfM1BGV+ANHcXYQSZUrJ+fHHHz+8e1hXFdF4LnEYut1u13VdRVVVVU2z0hW0SZFC3wcA8HWVSCbAVe0BQAgQZyfRiOLFur68vHz48PH19fV2c7ndbuv1artdi8ih3b14+cXd/Zu+13Sy7EvDLtOPST5x6e6EVMcKsF1pyFWJWeHOdPT05ricmbSJFNbSYoNawe7plcOxjo0d7+Kkm+qkNQM7xmzanpo6jtduziziOcOb7cIo0HxzwMKWbLJTdIHJRy2/SjaSra86OXWXcJsp6EQgu5PGJsw4MwTThzQd4LZbkdl2GRguqrYrZh6GiONFOiLAw9BVtaZaHhwyOQaMIANzqCppuzs9ZPzuu+9dX18fDodXr1794R/+leYa1mmgqvSUoLx48WK1Wq1WK5Xnruvqur64uEjZX8HMW8xcOQfFWaaJB3LOtC/RWMO2QvZyJNjceYbThedHyCye7VSK086tPdsJc7bPSIDTckPGV2V3luJWFmzjNsQF4CjaVnzSXl0al8h4EbPpNGH7yM8y37csOdCWTKElEoy/wlFAbC+lpEywOnUCJu+F6JiJAEb7W0afQgDQZKSwwpWJs2UPi+oMvRbnpR4rn0uclPrwLcsiJBb/Fs+2wsRXxwVpaywiEAgB5Pnp9fQU4jG7FU7LMTDXVwaenCG/0QBTsZDD14skIwMjCoAgRb11TaY1ON1TAmEQQNFUM0cOpNEbRETBo++aqG77/VrNAIVs6itNQWqwMTreMcjQ6xEyrptmtVrtdocji06rECKCo4wce5kUnUxd6A68iDBECIFjGDdsj66p+vbg1ILRlSg4rgGkddCvcdWWabB08rYU4QxRZc1My4kII2haX0TSnJlRMAgNKDeH9u4AbQREiRLEwZu7/gA1eofOC1IUiCyI6AiBAYj6Lt7d79/cHwYn7a4bDt2zW77vYicYhAJQFIwMfd8TiMMKp00eQgIhRpJAanwnQjvnpnNSbtT2gpzSRYao96N1Xdf3vV4H573XrUONJm3byMwptz7MjkUsrJGJCINEYWap0GWT2ljB7C6ObGdurkrTffZJZstZApVvsucRTnMZqZrU+q3a9G66qn61WmmGT/TsvXfkhZmjjLcLBIHpVIWItPv7/eF+3NWkg69QoCH0AhGJUe+0AA8AkcF5BEAZc/r7MIx4VgiPiehhPPavW21UedC8qCI4zdqIx2TjmgOPJTBDiu6pqmrwA477exwCqlfWdV1VqeNXq7ep6Vidc044ZWRN7SgkGYbHLWKQ0A8hhLbvku83DAMRhRC6MKgRla4qidKHEIYh6htlQhGp65oIvPchBERhDiJVCGFb174mT7hdrytHlb/46MP3nz378s2bN1eXF5tmRYigudljHHphka7t49BvV+v+Eg6DRAYhX9dI4kPoQ9DNofGAZU01zfL/hzF6Ngbd92uHXkeqscfDfowO1T1J50hhhjhGBvF0MYluKnT7HQC0bbvaNFdXV+tNY2cN78k7H3pFWh85eKqaakVE/dDGGJum1nzOjW+YOUbu+179D71XcLvd6rHe0HfgfV2hdz7GAYkBmBCd181hV1XOOffuu99dr9dXVw8uLi4c6TZm13b9Z5//tQoZObi83L733nfee++9q+vLmUNobaNy+hwHNje8bM30Z7L8UrWsDiKmdKBJtkeGo1xrL5pK6df0E5nbNlX20k/GEj0CaaGyY8xeJi2WjldlXaevRATx6Dyk5X9rxmUYK0dhZ6D0e1bHDpOmkkZkFTHoxDxBmNSrfgKQUweWvILyvf0kUTC1nwXFWdyWY09bYWkIFgPMfH9/W9crV63Wq7qqiCN7T3UNRBJkENAAnAASInch9CL+gw/ff/fdd4no5ubmr376Z5999tmLF68ePHgQYj+EAZAFHDPrzQHr9VpE2rZVNaGqUG//TMi0u4VEarDOaAfFyVjLHgVBc6fXvrTsZKmj3FVi1XKXbSS5OoncE56hbME2laTJ8pIldGJCC20iXAa5hQfmXGQ51moee+QJVftPn+C0YZ5BJWYx4mtXuBI8JcwwrRBnXZxZ0HHk0sI2GHlMCyU8DxZIF8GmXjKBylUcjFGsCa0yvTyid/pVRNxbO4SZSpkBaQITzpTEITKFM9n9Rp42hRJdLP9kUfdny3gzjS5vW6pZSOwAp8st81XF49rYWxfEfIfwfKkcjqtqrPCMG/KsYdKgd1YxouZwQo6jIhIQ4SO0aJKFICIBxNO+0OL7RF+azvIBgF4Di0gpa0hiN00ocmj7ZtWtVhvdQlHrJ0xnXEUEgUUMO8238gAAkEWEIxOBCAOCEZApgx8jQ+LZ0dtdHFqm3KyyOkWCxFRWphYRlbGHFcnFlmfvkWEKdBeEgUHQVQj3zPv9fohhfbEayGFTM3PkGFEAHXpi5jiEhlwUbLv41eu7L17dXlZ+2HdDF5+/ubvrhp4hAkYAQDfEEO7DqqmayhGgqCZHgooiYIqJs+KclO24LROFYZxrukOrp6S894DSdYyIdV1X1UY/bNs9TzkkwcSwwFG4ZqwFAFEYpk1gT5V15Cw5MuWcZiXrIibIZ/e8Ge1hLUHLCfZfWyHGAREjjwGNkyFHSKQHvXSk+0Ol6UarxjdNU1crRySMIupJizA3TVM733WHu/u7rusqT03TvL6/2Ww2b17f6XYcT/fp1XXNzLoPhpMJWlVVCIcpqc94zYarK+echq1OAw8hCACEMIg4771vPFXEzDje5AQDR0B2vvLTPc8AIIh1XSta1JZmAWaUabEbcY9jntKNWsjvPHqouEqxuylOKh0otQsBKKzZpw5dqza8BljpsbouDGlvcPwKx2OuPAVFKJWZ2XtPDmOMMQ4auMTMu93w4PLq6dOnH330URz6/e2bDz98/0//5F8PfddU16t1PXT9MAQS8L5yzrVdd3Gx2W63urHmq40QHfp+s90SBCIC6iSOvOeoWtWNjkUH2IVBd1+7oR8XSsLotequ5rAfTyQqinSXtXZBRDRNUbL2OUYNP767uxuGoWp8COERPvC107viCIGIADnG2HVjatNh6EWwqhoRIYxN08QYeQg0bjzg8SpAXTocusbRulnHOITQe+987WJkXxE49BWtVvVms7q8vNxsV03TrJpeGXIYurv2TdcNIoLOXV5eXFxuHj169OjRg6vra6iUbQavnr0enTpuQYooT7A5IZM0QvJAjvmIibruePg1KW5mTnKi9dNPx0OZE/9pp+VKkvITmdOAVuNP3JbfdmCtNKt9rHoCs1mvBkcChpmdc03TJLNGt4bVYYhxyFSw9sgx6Dyepk+toPy0CHBCGsxX4NLZPOuCqiUhs5kexVw+m3g90YuQYLqPy+4GhBCIfIZkAHCOElnF+PZ2Q88adml5D+b3TKbPjxOoUdMKQGISTXemv2Y+AwCIRCJg7rt+R1gjVUOIIYSmqQC73f5N5fHJ06uHD59Wni4vL9/c3P35n//ZV199dXNzo+p4taq77kAETVOpiZZAZXP+IXUqUxbchPY08K7rHFVJBNKMJSIp8MmOOtktE/mO93PAPGBSNYvzo4jJ3KahKSQVpwkgE8xs9rUbKQkAmDxwW9/CbF1frZCxlohkH84pNeZHmXECABHZi4btqKEwwtRHtkA6p92N9r2Nf07PSXZSajV1URzOXIhkZMi0Ops+H+dFOua7sybLIp4TQqx6xMlBsvrn+Dkdb1sRcyGqVU3TWZHj1lkauBgv2hJFjLteEt1KqLW6mBlxlhIg6ZPVaq1wHrMFzCPTsphnMOrOLu5YTZhGioiaSMDqExlDCrMA+BEVqooTcUWAjymsZ16uiKgTMl0RaPot3I4MUfbN+H5O9GwsCyX2hChAgsCCIhGcn8w4RGBE0gPPIAMwiszkFHVvEEbFYYFx0+RrIUlCnTjZMow2NHGR6jc30VGpIHoIjQi8rw6Htm37EJiIvv3t767X29evbiKI9P10xRkwAwE6cjGGru8225VAZAZ1WkWOASAxDsyxbhqFSsMuJOLF9mq/awGgqhpNveB9fbyXGNSlzBGeob3kfy3lwg0UUpAkJanZnOhLJB5rOtJQNBIYuQlJBCIK1fUgctcHcRVU/hBAqAIOgrozzAIsERGAPMbA0Vf1xebNfvizv/n8W++8g4w3r3f3h+G+D+B8EIxIQ4z397vNanU4BJQhht5776gi78ChY9dsNnr8YbwUzkxMMOnDGFhwuiogRJoOaOi9BRoHqLEzGvKnIw2xjzweRB/10pRBJCEkyvGIAZlUgtlkJMbkI7NQrvO+uhwZIez8OMP/UhboVKw9o//6eRJEmXZKVTWpia/d6QUDsB+Vm6Oq8t5RVU3zSN/L+uKSOcTQC/e3N4f1ZgWyev7VXXfAzWajKcq3260e4M+MIkTsXRTqun7cYKiqyldUiydy63XT933XHWKM5B15x8zoSAKn25vnUTac1HWidYyRnAOAaroAHBG9ZxEh9OMW8ejt32vMZNdtYLp1XffEdrudOjZt2+52O/WXNGx1GAbg2E9OlEz5V0cKUtqZdmn+VcYb/VVjPCBi13VDwKqqnEOtUFWVd5vb+91PP/mb//K/+q9+9IPvv//eOwCw3+/37W5zcdEedhKZOdb1KvZD13XVqt7t901TKYRXj95pW+kjiIBzlV9XjaxijMKsO2x9P6hQKMyHvru/v7+/vxeEtm37vmcAzSirlgwIVVVV115dQe99RXpP43A0KtSXUVoLd10nIkCrw+Fwd+fW21VV+dW6Bo5t2+53d72GnjpwjroWEV3fB0RXVX7oox7YTpqKOUpEInKevK/6/tA0DQAz4rpZMQxE8eKiIceXl5vHTx5eP7yom4o5DEMXghKw7/ueyDeb9cNHD9995/2HDx8+fuedcS1vPA6sZh76zIBY1ImZuWOE8/gmpWkCo3nB6OtMUK35YrWGHmaY2wRIZsU9UzRpHct+sqgsLBjWcrIWTFafzZUYidFFBGC2HXRsc34nhK2TKaYMzwY/eaxLhiLnZvsn6dfkz+R4plkjSblnXvdsFMbySHRcWCmcNI59j2Yt4BT+LfJhmm+sE5JAJaJhaIkoxoDYV86Tc4LMESIPu0Os6+p73/3oe9//+MH1xbNnn3/yySf/4l/8C5V2ZZiUVazreovP4/OJJWe2YWx2M4ePRwrTeMV4BTC5c3Z0tgWLZyoCO2XyfDKoxGxvnOJwS4J8mMd/0zU7s8bSzryMi3mJDxeyEGc9JmYrhSiBNFcIy5DDkbHzvRxE1LMDmCV9TG3x5Dkr9yIRIE4+5FzD2JHmQVBWQVlAeX4GptQ26U1qE0054gcAx6uiMBErExaLTzYL5EkxWoJaJKNxwKys4dyHTItczKz6JNMeOF02AAXFF19mGLNrOllNMa4pFDwzx7mGMIyjyBb4RHSjgmlayLd9TSfeFoqlF5xl7HJ0ZyqkostvMu2wChCByLjLjQCAEgkYgeGYPgYAYArEPPZ4ChL7UAr7WxYaM7uwRAQ6Zlrf7XZXVw+8r588udxutw8f393c3Ox2u5ubGxFJCQOVJzUnMwBMGwsuRkknBp2rnHPC463fjjxir9CbWR51Z/I43jmDpaWTUwjJppJFziyrpcpZsyU/JOHVhwg8BsYCIAgDqGojxIiAglFPmI+5WIGgRwEEIRAGSlclOecEIYALwm/6WN23GOPNbtgNfRAgJCDPzEMf+75vqirK0BMjMA+BPHsBdJXzXsIw7XrhdOHTaJonkww9CU5L8DIulrFERNRd9ykKg4830eG4gi+MiCiMQDhdaKwIAUEkoIky03IqH/nTilipDxcRbmeBksowIn7x/YJXLyLxRLS/c45FQFcyiMg5daXabq/aZpDYIaIucAAQYFU5lKFvO+HBO6grQRk8brerTeiGu/4WdOmzDXpPAGiMNBAiRlWMkRkO3nuYrv1QgarrWhBTOKVu58K4Vj5LqjxhZpyM5kvPIhBDOG4GKMr1w2HolGq6M6ZzkPf+sy8/S3uDOHnp6lypT5jcdbWp1nUTpzyuxwkFwXtfeZ+uwWDmwJGZw6CnQxeiuBGRxtvSI6IoPIL04sWr29vbVd18/unPKk9//Mf/6tB1P/3krz/66IMnjx/jag1tG4eBIdZ18+r2ze7Qo1tdXV397LMXz756UW8uHz9+vB86EWVS8N7FGOMQYmQeRl9XB7jvWr1P8tDpITqIIs65zWYz5nfxq9F11+1TgiRryooyLZvqKVwOo/bT06pV7cgjgHT9AYVjHIa+Z2ZyQOT0EgwR0TtFAcChCAqA6P5TVWm+nYS0uKqp8iICUrnNtr64eHh1vXnw8LJpHDkBF0Po9/s3XXcIoRcRqh5sLq/eu7p69OjR9dXD7Xa73W6hWUHoQRCANfIEcCTKMa4yE2PLf/Zf+96aGlaSF5XsqXkr2RAybi/Mkk2lRfosO2X2YdYjGg87G1eqYDV+trCUzPRk29l1WZ6HQ2TDWVzBtTXt/GTrTBWO62cJPClOGVkkZA1m+MmGbEFaNCay6RPnJjIYlZ0Ua9l+ueB3Ch47Olsh/dk06+kS1T2LeE/kHEu4erBerertZlVV/tOff/JHf/ji009/9uz57WZda7NVVWlsZ4xHLZm6zp4tDy8aChZgEEgmUTa07MOMWBmq7eyY0CXCiz5qZmQnpFmYy35L2pE5NSfFESNLvgmkXKIXR3EKt4tgzHCLggggx0amcUm2wwNzgyMrlt+yxYVs1kxdpJHOnFXI5HH2oR2jpbX9RP/VM2/J8YOjyMyATPUzfXssZ0W7xDZNx3FTs1bQ0uZnGnvac8vIpGthmdtmG7eg2lVwwzlHAbEMNmnmUW+kb/VXs8Mwwz9NwSMWWiKKkdnsS+AUswAnChpFaum1uLME1mE73WbWPiKiAKMgAIIAMoFahOj0F9AZBPQPyIZ6tizqpfMVTsE5/p9IxlMJGGPsA9/tdqvb28vLy6qunzx5cnV11XXd7e2tnrHZ7/f7/b7vOxHx3kceQgiRh9hFREo2aIxRr8MOIRwO3X7fMvP+sI9D1fdDjFHTJChnzeYLnIWhpp8yni/nl1JMsvelqpR5dmIxs6HMZ8ZjI+oOAeB47fWkrVV+kdIh5PF1ag3AqfISYmAkinqZI7q7nsPtTkJsD33gGEEceQAUkX3X3t3dEUDtxAkTR3aOojBg1ThyFUxbgulAV9+H0UCvKj0nRuQYprMDHnH07AjH1JEgoj5Dr0nwIw8qpyEEIKXIwjQBkhMFETP9Wf4LhbZZ1GYytzdM1zPiLn5rf0p8MlfCxyOLqqhTVMt0hzvoMhMg6lKjr11d++128/DB5dC1EvvDvgmhf/7qvq7rruuYeb1ei8T7u1tErJpaFZE6EgnINux9TIFmEYRcNd0KONl+RKR3JAAAsCTwpkZm+mocOPLow8+v0QMAPbur+13OA4uEoec4TothOB7g1JJQlBbWLaV2YzI/iBNHJWeJiFzlATEKD2EKN8VpvtAw/vFYlciozCVGZgl6JaCrq77jQ9sPw7DebK8fPv7qi8+evXjx7Msv/uhf/9HhsNtuVk+ePHn69OmTJ08264tV0zxZ++rucDhIH/s+DNuNr5r6fn8g7wPrNiaDbvrt267r+kOr2WJ1dIe+0w32yHG9XqtnXlXVxcWF5pVoqE4+iBupk9hpRJx1CIExxiHGqDtvevKQObbd3iEwB+FIApoUCjhW9UpEYhwQHUoEEABBks2qBmDhXk8da85YzYRzdXV1dXWx3W5W2xVSFAjeQ9fv+9C1/aEfDohycbl6+PDdq6vLJ09/qHRRxtbgWAyD5shFRBjvlB25ZXZnQGIFnLany+nfTaGMyc6ApQNUts3EuFYRJFbODBTEvMckt2hit2w7b1/OqI8EjB2XzPcQypK+Tc+ZQ5j9u9ipHW/SvdmOXGok2ymFObZt5WSc4Rz5dmjWPdDPk0JICMmCN7LhlJsbSdVmiukUEk79pN8OQ3Su8pWvqnp7eXF9fU1Ebd9WVXV/e/fZp399e3vbHnbM0FT48KphqSeVLzpxmybNECSZsEfnR8xGq8WhhYqIOILlf4OQmTU/9QUJ7Yv0Sm9GToNjkowCP7konbF15oAtAGy7TvyWEVfMwcUSZij4s2whA6NsZyxTNo35KMSeJzyBk6KlYphZtcwymBPr6BCWYywbARNAlQFfMn8GDE+nZ0sgLfYy5za1bBcI7EPaUYGCLmwCPnEKQB2GPgGctZnBT1Oos2XjtJJt0Z56x2JhLgFjQ8vsAK1+g4KlSyp474Nw6mUR51nXJZLPf1J+LnNvYV4DRUDvkwBAAEZBAT2xzRknCuIpX/CMqjwPtmXaM+VIaxEAEhjP/LRt9/r1Tdf16/VaL0eu6woRHjy4Hqay3+93ux0ze++RomaMCAOHEPs+pMMgzOpTDHd3d19+iXXt+77nWN3f38coiByDRI7Mx6RiqNauuQtUrS8p3DOeR1OfH6z9MFW2XFpi+5TsI2qaFgEWBBTWFf0xLwhABATN04OIJMoPQoBHmwuYkABAM4IgYc8S2k5EIjkHAcHztB6y39+/evVKQn+5XQNHgrqqKhQEckJ1RX5VVWhCnNq27fugu3wJM+iI3HRdBEeazvuIyQHbtu0wdF3XhRDiZOKKCOJxRcni4XiAci7FMR5vGlykV0YsG08B1mhZWliEwiGEE+o9NXWK7rb3FESKiHXjQfchnO6ver3RfH93ixCdx2999N7ldnWxWV9cbtZ1/W/+6tMY483NzatXr/RgWNtKN/QAgx7gBIBoEg16alCCxCisqXsjBBUTjjEy0BSJ5hS/bty/dJNlcjzkr0NwTq+QGK21dB9I0s8ymtDjcaEUOIpRMBxvbsw2/VKDmthznDVoWrtFrJwbt87qyjm97I6HoRu3PTlapwARNVXYKLOMzGKyA5BzzvvauSqEMAR58eLV//n/+n/brKqh3w/d7t2nT7/36792eXUxdP3ffPbpT3/2N1VVXV1dPXr06N133726fvz+R0+/++Lu8+d3A/swxLvDDh2FEDRTi254dofRIdSDnSmvjObXWW3W6/V6tVrpCks6HVZBDQAOjqtUukxaVZVEjpxOukaRWkQIHOtLGCeyw+HQtjCEjkAQBUEqh0heJApzjAMAOwTnBGlsHElC6BGYnHjv1+vV5eXl5eXlarW6vlprmDdLGIbu9v7+9u41cxAIl5fbR4+fXD+8urhYX1xcbC/WtFoBb2WUZUBE8lVVe4AqDnsAEInj0fJJOry1EjJTNZvaS9mzJZ17yT7MBDX1kurYmunwWzpBJ/P9CjbrHzKt7C4adrZTMeZ4FhyVoM10R2qWTFZM05rTdQWYzxmElL0U41mlnywaSxPnlFmT2kkVUvs4d2CsTuTJIcmaWpxHUZc9ing5KHYwxvHOIx7Tr1KEgJZjzOpbNNoBtvcHdOw1IZbw/f2b/X7/5u7m7u4uDAMirpvVgwePkKcsajzyzAT1WEZDs8gHgyZU0kKY7UVMWlWTyuR3P2Z4AEPWjCdTU6l9MhlWR9yaFuafzygLhXxZ7NH8JAkYPs+mzCRisCTg9v2i+GesaD2EES2FNpiRO3HaFCZq8SlyPAk2IgRyGddSksDiuVRHi4MSES7uMDiFcO00TpvPSWr0T12QWxL2oxDJCS8dLDXNHl0aRSYmtjXrm6Vm7bpvNhxbbO8yLf9PduGRNDkFi9WiEsmLCLRApmp2p84yarkQpsU5xzE/c7vIqFqyhbYMpAVtBgtXU54pcQqoAwD9SERgnHcnvQe6j4TJ9dFe36aPhPlSkC38i+rdlqRzaNzRcnoO4v7+XkOqdIupqsb87HpbV9p3IiKPrmka8GpEUgxyOLSvX7+5ubk5HA6r1SqEEOMAIG3bPnvWOodEFAZNFj3eFRw52kCAiQ8SrzLRTI7SoOwOyZlRnyJu6iN7cxb3AKBhVhNLy/iXYDJ+NSI0IiIyAI5nREWEgBnHGGgCAY4CiASCFJEHXXX1BAFBU/MjAUAcht3dXe2QeJChRtmKiKswYqigY4D1Zo1TkgIlzW534HY8ma9WCqHTi+mdczEMirEp+Ue6J6AbTws7iAzGPVhAERfLGIgI09ZJQrslR9J1X0uCQmFC+eeCnM7PcqeHcm1aH8qzzdq1XkrhqHJOr0gRIXIE1w+u+vZw2N3e32+H7na/q717UtPlj374cVVVjx8/rqrq5cvn9/f3r2/ffPbZZ69fv+66w749pJwrGhcdOomRQ4yCKnQwDJGZV6v1MPAwdMHpHQzH0614zAI4y6+hQyYHyV0UEXecH4+p0SdCjCYBTxk9dBNPpiuprG5kcyIm4Q0RnR/PsZN3vqpoSoSjHmAKF0882bV6r2PS9rpZyogIonEoUFVVU6/V5n/5+u5waKu6ef7ihuOhclBX+FvvvQuEEeHxB+84pBjjmzdvXty8+uLFs//2j/9w1WyfvvOt+z3c73Zv7gfn1+iqF69eDzyEEDj0wzDEIWj8s0Nh5qqqNptNXddCWNf1er2uVw1qQhPveTr0WFXVhlYJD0jjVhkREKj6oglRk1qOKnrDEPthGELoh9iLcOTBExKBI/TkNcMqOlg3PsYYeYgxiEQBvYWSH19vm6a6uNhcXV9cXl5uNpuqckSkOYdf3Dy/vb0VkaryDx4+3l5tHz1+sN1ur68v19stEIIG6fZCTY3E3jEgCgeOsW9bgdZ7D6qhEv8jg+4QQlHQWIqJI5OAnZHnUlytEGbWAE2n1LSL6YFofuxY1x5sJolM+LOuF1XPkaGnxAxoDBG7wJypFevYZLKR/YmIUzKD2ajfEjn2eT7DzWqOjn5hgWUrcMc2C4fQjq7sWk93nAKvREL2BuZRamU7Vn2n51NzgIh4twoD3N3udrsdOmEOh3a33/OjR6u6ciIYQ3xzc4cMTdOsVluabtEVHnczOApHnl91dRxjlvUxw08JIRaow+O9l7O0NNN4Z7suqSky5wz1eXKfZjAkPMi0WZdawCVHNEN4ErE0kGjShyRoLSmzIWNxj+Ip+bJUE5PdIY03dacvRxhmR+PGExfz1k6OLjUFZsHCbh1knGnhse1kP8nX2TG2OFfZDxPA6hBOnph2pOM9CqzFQ6KphZmIYh5AsbznBoVgJgYu69vRafqBBEn6NUxnIbIIjsS3Vk/SlKbCkkaO6zKzclSWUwvJFpF5CHqqbP+coE0bjOOKj2VyLBzsRSyVzWZ17BssnK7FwtOUiiR4xIQgoN6gwKC3FbjRepZovh19Qjrtgs51y0IMdmKkM0DOh0yTNeMAx4wjbdsmnZMiplDz7E0n0ypf13XtVxPIQn0/DEOXGInHk4QEwF3HiFDXMQzjaXwQChJ0BFZdZKfzM2ynIWf8bLVTenkKgfah3JEuu5uhV5N2s6CwZoVVcKaVvCgACLrR6UCEpQJUtc4gGmszLjSgsCAACjPGcZomJxIkSgSklBk/kkDbthIHYIkx+jpixT5yHePKZPKYzg169O7+/p6Zx61ChhpE3QbfNDjbXx0DChT/3vvITn1FdRVEcodKEEoO5IlpSz2j2Mvst5Iii8x8ihDnhdGSuNTeSedAATBMEVIgo3szOmAouF4BsvPUrKoKI8rQd7svd29evD4wh3fffffq6qrtu+vr6+99++P333mq+Ta7/qDLK4e2DbEHgM9+dq8uYgzMzEPkw6Fr+4BIwzAMA8CYcmxSnip5rHCOy6xmdf44Ch11hRbzx7kVjmp2ZtAGa686B3PFa2V/1LMw6mplmIFjuvXkiF813eeRZfZK4fQax+Q6dVVVHOFwOAjg3W5fe+cwbrf19YPL737vw//8f/E//+iDd589//Lu/v7u7u7y8vLi+urq4YMIIkO437Vfvnj++ec3z57vDh304ebQhoF1oZYhcog9CnhXu9pdXWx01tMsyuDIe980DTrSVEBpyN779XrthiO6EBFJ5QhAZZ9h+iQxEIYQui7GXvq+H0IXJQBI5CDISOAQUKrxlheHLL1AIMe+orrx63Wz2TZ1TR9/60PnyFea4FSY+27oQwi7nquqWl2uLh9dX11dXV1dXV5cba8uwVcACCGEQb3Z8Vzo0O4E9X7RCqn2nkGiRhcLkD6MBAIEPUNoBSPpfTTGhJU0u0OVrSiAUc3WaDg1RVkJp+OJmqORoe24KaF5ZmSUutu+z1RGAibZ0Fk1NAUKo7Bo6uT663lzwVbLwCh/WhxXgjRrJFNtM9XJOR5OQYWoK5j5CeAMS3ACyWUFO5DEDFJ4g5ZhwFifAOBw3fUdS/QefINEsN2uLy9Rk6oBOO/rVdNUVMWAoQ+Hwd4fqKl7FfLZOrRBQr7tdr6IiA7FyoJtIRvyZOPNMJ+RfhEAi2GLzFJk7E8WgamFbHcX5jF+2VRtCwAoq5/h6oyaZypb+h7RZfolIkC2PqHOeWmRAhFhia8SwGlcbrqvqUSslS+L5GnMQvPTfVCQtRxgItOZahOKjktsczznUqPV7JJcqh9jdNPkncmRK2hdjsViwzmnW+v6J5kNSZxWK5IJoifdS5paeqU3Mr+uQ3JTPneA4cQah0wGpeHtkPhXAw7Tfrgl7mLJZDxRLX2VSRnMRexrC5MHFtALJgCBZLw6HARFNRIJjvlFEJAgLjRSHp89Df/i+69tLB0JkWnNUWlejUkjhkkDj44ET+cL7qcLtRFIRFYXVQiBGRAcgK7BHZPAJYqrjkl3psE0xaSNmlL16WM5/HKkic9LuYZC9rPWTi2vnMLzeC+iCEyhHAQowBGYAHhMK+cQEYUJUMALCyCjICBN2fxAOGj+K4kiqJmwIB5Two5Rprqe2Kwqxwwc2/YQOJLvseqr1VA1g4tBr5XTDUDNh+HqCgDSXXB93/N0nKGuvA1OSeZvyoos81TM5bZ1JikwLWRYpQEFZ5bv069ydtEK5tPKKT4vexn1ockWbiufcgi99zClBZJpfZ+Bb25eXV6svCdygiBIfHPz+qd/9RfPngUhvL7+4urqiog2m9X19bWr/BjbWRERxSgcHEpNRN//7hPdgIoCMXDXdbtD27b9Tz/5GY03vxMCxBBZbyY8TtkpwfJxptP1k1n0mblHFwDsTuOk5SLiMYP9YG6wzKQmPdgdGiQENwZIx3jcXcRphQ8m80NLU60AkqxYEwWJgNBpNrthCG2rNzFIU6985Th2bTcc2vY3fvO3/if/6X8KFcnhToR/+tOfvnr14vXr17e73TAMMvTbi0dV1XehYxAi5yrXiMMYNPbWAQKAQ9L8guumSvmWAEBocnrdOImkCW9MchNHg3/CsHI86jzujpctjWHWEsdzg4fD4XDYh9gLMiJE7jwhOWSQyjkiWq3qVdOE4Wa1bjab1eXV9upqe3G52mzruiJEGULb9+2h63VnmAiR8P2PPry6enB5fbW9uAZoJvsWQzcgVYQbv6o8+qQ5KwCAXphBGFCEI0PUXGcIIMCo+c2mfRGfyRKZa/dgHmukSyaJ6lbGzuxUZGrCvswasYZLxuU0Xpkq6YRM4tc43WcIJ3RNerbCn/i7ZP0MpATV3NRYMFn0cb4TNStY2MqZkrK90DwIKkGYvkhSp39mhtqxcTTPZlDMDObMW/oEcXYhbKJdtlecPmQTcpyp1znGxq6To5K9T42jsXGZmcghusq5ZuWpin1/GEJHhH0/NE3tfcVBhnYYICJUwFBVDczplU0Vi/jPVOEidY4v00TIsxU77TYbuH5nGc/iweIwW3zJMJmBbUFd/DWa2wIsOewnCedElJKLYO7VHLP8zd/nz1mFIz+ccBGnX4+wpUCABCQWRl56zn5N4mCHbJYnT5r1lmlP/Wp50v4qItN1c/kkanfMbP3IMTmrYpzzLOTefmLlIsXkZIPKpNgmWUnyWw7ZCppMV2VAIezKISmsyNpSFi1WD6RmVW/DnP1wyuWQ/iSaLcTasdgHRBSJSeeQuUz1FAVLgma4XSTTsam3WCSaFxIUPSemKRSEEOLRdBYEYAJEGdOofj2cJWynylyTn4Ncb4SHkS7HIfd9z8yI6cpsDiH0/eC9IyKHGBEBwHvPUXT3g1kAUA+pqUMI000YRMQcEURvEwlB6mpcWWC9HMwdb0AZIU//wLKTYPn5jG63cr2oS0udn8lviVIAEALShSlxiOwABQX1xjdM2l5Ac7YAMZCAoIAgIEREB8KAgZkJEAg1A63TPCJREMU5L0BKQ4lMKHVdN0RD34YwdF0HQ4SBK4aapYpBs1+orFVV5X1FlUfEw+GgyTM4jreuOeeExwvGYowis/RLWifdQGjRaNUgGXtg1Nt29eoEf2brXyXVrH5Y/FUfNOkgLK0lJbWTZpPy81SsA2wbYWYEPOb/FUASEEHEpmnIwdB24mKzaTgOL18+v32zcc7dvf6K6LlvfIyRCC6vr/b7fdPUm82mXtc4Tfcap+ecq3xTVZUghSG2/dD3QUGqPQgSMzMyITpXBR7SoCAX8Nk0MW3gxTnqGDSD0OT5iykAUDV1hkAYV3COkXR6bEF1btd2iROOGC4cbJp2zXC8etTOhqRG5lQn5TXt2rZFqNebJgyt966q3RBj13Wf/uxnH338fh9Cc3n1gx/9Gvgfx7Z99erVy5evXz376u52f3P7WkQePHhwf2DqZXu5GYaBxqOAWJHzNO6Yhb4dU/iozpmcCBDWDExRhIg0cKZt24vqUoc7jZQBgGCyY6eJmzmEEDjE3e4QQui6Q9u2bdsOoWOIiMDS197VTeUrX9f+8vLyyZNHlxcXjx993DTNetOsVhU5DrHt+v39fXdo7wUGIlivVw8ePnj8+PGDB1e0WoO7BPAAIcbeORQGEUeu9s1WhIQRxKM4QCcMIkJ+SlKIoGfaxlRFIADW4pwcQqv4cFqgTdeG6MWLiRGJCDnqGobKHACQc6grjmO6MABAonGOH4YBkUDDJiSJK4ikKEcEwBDUtva6EGK1RjJKkhbIVoITl+OUCycdEYb5PTCgyvpoAglOmXamV1b3gWg2gKM5ot0dlVGmLgFcUkZq3apcqfWTPonThTxqLSXvS2VWMSOSklUkmQeR0SSybvBkS0k6EzwhTUSEo+hiRhJRxY9zbrrrGFFjmXhMikUmSCDh1lpsMF8pMMM/4iqRBubbDimLFxjdbfXdJGA8ultV6ymGEA4duOAQPYHnwE21JSBgQhTxugM+oB+TvqmjYfET45D0Zrq6g5lBjnOP8jNk2xQilOYeEWbQ7f7kxeFkIaWxT6zOIqBnYBIFmUGXXIZhnJidI2aYekaEaBNpJuTzPBIyva+qalEcEpns9ICIemFuMu516UckMlsHcmoNmbCykNhnXTcYY2qOsB1Fz6zpEEeAKcUYokdEYYgMriJCAJossCmLgZj1CBFGhPECjBR5gkdQNPAXHRIJIvIUCHxUaJykdcyv0LYtETlygDjetE1YVSQyzNY4iFKF5P8n/iciwKN6lKOXi8wMmPTGUf84PDp+1kiyNY/MqZYiQJhy/XvvnR5yGIYxVziMp9Zw2Z+fXZWZXtoumIVo3AUaBr0X7nhDo9bUuH1cOssn031imcbQAWZLeKmOZmbnY5L6ZOKPmLEqBcDuaTvnSHk4BK7qioXHq7f1P0nikFuBdq3OYikrll5ZmUkZzKg28gwf1GJGQUHUIEF0JMzMAhD1wmERARmICHVvU7+FMSI99WXxllTiKbDBLJQcX7GDERGqYsZfh3BARCIkb+4pEQAY8+9HljgaP7XzIABDgICoi8hDLyICVOn99kQUp5VJvfMDyYtqQiAQDEGFEUJUjDnyyjmDnT1k3ErQfzwABDYh7qpVFC2ECnEiASISkgKdYcnSOpunLJ+XmLfEHXErUQCExnsWwgStYMoLOaI7AohEhztFg8I/Gh8RASsBRFVnAhBBgpAI1ethGJxDESYE51w3tG3f11dXrPvP2gOH0O0odjte97E/9Ieqatbr7WazWTWeiCpf45q8qw6HQ9sdYowxDGEgD3UIUaaQyBD7MAyRhxCHyJ3zggMjcpTh0O18rTe4EAtDkqcjLsftETeCxCzi8BihkOgCRWhuwn+mNNKHyYSza/QiMgzHrMj236Qf0ptpEqcMGKtuywXAOl4ADex6cTGCCDsMNUZa4WoF261fuxg36wa5gbjuuuu9RGJ0etc8kmtqINr1suvDbdvHV69lNO2m3t1qvE4gReFFEZH9/hBjZNE9ZnRePCJRAI5N04QQ+n68h2BkzgktUVcWEDny0PXe+2mSTVo0X/ibsKFaSOwRRDjGTCp+RIRjCMf5iEfvDsftT+KR8iNR1WA6Cg4GEWE9aTtaoUBE5AkRGSIKjvc1BGahlUePIEQx9FcPH0U+/Ot//W/+8qc//eh7324uHMQBgCAGV1dPP3j/6QcfcP+dF89vbnZ9hJe7HQrhO08ehQgAOvWw8+p/BgDw5IiulA5EEUWQhPy43Kl3ziOoByGEbl03gVvvPE4zNSK4KegMRAD0DkLuuu6w27dtu+tv7u/vEbHruq4/aGZRJIihXV1vnQuPHz/8zb/3vXffe1xVfr1u1h5FZAjdy1evbnf3fRjI176uHjz88Orho6fvvHN9fV3VK9HFRECE65FVSaJoCgxmpR2yUBLRgE4AIMgj0HtvRKbbShgAIg+IoKuWAgIQdd70SUJSQpfMnkj6cdQBzrFxwFI1ZpbpLpr0MqlR61Vqyd4kSU5TWmYQ2DVva0g5c9l6aiQ5P6UCSk5p9qsr7u9KRklqf6Y5loJjwcwfqTWLq9R41pdt0EJblgwtdnQWh4koyZFOjrFFeNn4KepnK0AWmKwpNNY8zLW2hTObAGwj6UM0Oy1ZL9l+2pmSIT91utg7mlLiQf+AOe0yhs+4SzL7bCrZRhYdNU6e4zvrxfIkmB1yy2ZWjjKOErPjlN5rarhFOO23JU1t18nxyHCSOl0kcaKvXfGBuaRY8jGfM4iz9mFuKEyDXUhPMjXOyd6xcgQn5AWMy50qZHsX2YdJqyQCJQitlCVQdaFBK9CU2gpNVlgxQTsW5owts8GW0pcRLiOurZNxOM6SABUWViH+pek2rzlDfuIHG9is7xdjWLLFkb87BeeqNZXzoFqiZAT6VUF1vkJG6EWVkt7DXN7PdGo/SX9+LbRZnYx5LOvCnOW+tuVT3ZUq6xQM2cOiwizhybpARM3mgiaURi+TAABd/kst6J+6DVjX9XrNzND3/arpNA8hEWkm/VXfaGbFw+EgISYhDiH0QzcMw8CDphiZIgB4SiXlCDFt+ULOhEfCpX9DDCVyfoGS5kerEsuIj68ts9U9o77SdGMro9E+gEio+YEdAnGIbXd48ODj6wvXdvv1xRaaVdd1U6J+QMTJ0yMi2mxXMcZh6KY70Mfsu/u2tYaliOiCQl3XAKQrmSLILIgACK6u0LvtZv14taqqSu99UWg1Myl6pxcS6G3yM/Gc1lBAz4oRqgOBczfbCqBM9xBmHJuUqkeXhDfVSXQZp1E8Ijaz03jKZ6M7N5mQIqKuhjjvBF3btk/fefCjH/16XdfAPGbsGi+G1BKp3rzz4cW3vvX5z//mZVVVDTsAWK1W082Ho0MYI4kIAXpfEempaQEAgcgwxq6PQTqSWACJSJ2aJBEAEmVcIY1B08boXS99d2j7ob25fxlCUB8+cqzqisg7T1ePr0Psmqb68MMPv/Wtb202q344ILpnz18OY3rfoWmad9/58INvffTk6dPt9TVMISSDREFw4Bw4Tu7osdC42CcEswMIiqU45QMWzX6s9Qk94pRhFBjAEZKgeEtsq9Z10XdmpwKoYz/w8ZPZNLAUSKkO27g/O0mdlYdMoVjFakuYxzonpZ+4P2s/41HT0cwLTd0lONO40tkJmOyYjPtts6nHxbM96U2mgDKBtKg+X/QTa/3YpjLYsmgBnOIBLAwwV45ZI9kcZiHJHO8MHphoWraTFSqijnGerMKyDRTpqi0aZzx5wrtIn9hOU4VMG84+ma+0lQTNoGXO27eVLVNNMQyzfufUyW3xRNC0bZU5Bqla9q+VDi3Z8kcGZzkENNahXdzJOPDU2I8YiLl7kAlCBmrZPhqry7KfhTB9JXPHFeaMqv9PkNid7fNSmYmwTGdXFqG1X1kIk5K0R7gtbu3okkRnApKxetrHLpM6WHxmnG9xYvUqTHKaADtFi4SHjCFT+5l+NhVyDsmIm+l55uN5FdtpuSD4b7Oc4pZTUlD+tFjzPBOe6jGT6EyIzlc+Ba3VS28Duf01ccLXilXWqeWu8+PKpAzmuugte8wUqWTKd46uDA9WlaVfM5iz4SfI7Y6NiIQQuq7r+z5JPcz1m1Yg8jEKIoZ1dNNV2lr0AoX9ft913TQul2xHmU4DaQYaFSP1XpxuZ8MCpYgIgDKUIOK4P2xQt8jeFgm2ZNiw7sQ0Q+V5wr+2WOVWUiHVGSFELt8jga+o71uR6Lyv2IUQ2u72wYMHb17cIwICCaGrq6ppqkrvnhViRuccD368gL6LMW6bJiFcRFIORUU4ACA6HWgy25xz2+32nXfeaZrm+fPnyZtSanrv9dI8dfiViGUEWTTXI6FxCMsVtFORCKPoTZEgo6adpkuako2p/5ER3aI9i1ixdZxzms7Ho3PeM4cf/OAHf/AHf/Dv/rv/DnAAFEAGpHG3THe6up1rLq+vr+/v7+r64eXlAw7O+bptW0TdtjcyCHrsYkBE3ahEDSsQadvWOed95ahyzjVNXVWV93XXjjIYwhBC0Gz2IYT73W3oh67Tm1r60e2Psec2TZEOqa5rIqhqLyhE9PTp0w++9RG66rMvv7i5eVXVHoM8ffr0B7/+4w8++nB1/WgcXWAQXR2gCAKABETgJh8vkSZRbXQFEUbDHinppemQ9vSJbkETokCcbv4gVMsW5BgyanfVlNhTONlRBTvnaLx8ZzaLi3EIM/2bSeNM9pbWqu3KdybDSSvhvIjxARZnu0z+LQtm/9KU7zFbyE8F5zcgWajATHJY7MhlBY3xmgDObLtT32aNgJla7EiTyZiQULZvMW9xW2LMTlGLyEyQpIdUIaNpSVkwGiR7aZWaZaEEgMW/RakFUpYsj4wNsvqnHiw/WGjJRHXCpN0QMcaw2IWd6hK3MDO55aXoDMl2sNaAWBQxS9BsR+s8pyGmJKx546WgTS797PSpFHZVhnOZA5nVtIw6vRE4buLZBiS9mSrrt4pnnNSoIMo49ZumUod2OKWMlOydOTYJ7FItaJ20+JoNzfKtZTatn2k26+BZ0uM08WdoXNSKi3CmmT6a+wxtI1lkh1WAWWV94CnOFs0iHRg5Sn9OYORWvuV2q+5kMi+sK5iB8W+/nFEm5wXNohQLDXym/ayX843byqVIyvyidlvf6hP7yaJOPl+y1s5/WPab6L44IoIcnr9tfsi6s7KAiNkquVWei+3YZWVlbPX3uq5L0QGpC2bWdDL63HUd0RgmGkLQ0wFEJMCabKZpGh7G8D9E9N4jrgEAAgG0emAnmbO6JSAAALMzFEnkxQRbJaiymTqbqrLBwtdRx+Jq6vcbyEU2Vdk/s3WuVEREJAgzw5imkmN0TL52H33wXlW7r7764r333r26fPgn/+Yvnz9/Tm5rkZBmjRB4zAXijp55jDHCmAsjhKCLxTp9a0w+WAveaGPdhlI/UOFMeE796h5XNLeApoFTunZrQnhS7zZCh4yzd4oibG5yilPsqDU1LcMvqr5sYkrw6xvvKyKIMQJx23b7/a6qKr9qILRjgm6JcJy4o6trAHr48GHf9217+/jJg6paRWHvvdZHFABGbIjIk3OumpKoMSJquLOgbDYbAGRmjhBjbNv2cDgAwGH/Rv9s2zb2Q4xxCJ1uCXIYlGrTwCMA1FVVVVVTNwhc1/XFxRZR6qa6ut6IxKsHD7o+vHz9qq5XP/nJ737wwXtP3v044QIijLeYkotRiAiJPFQakx6BI0c/W+ec/DiAtGs6Zj5NDuBo6oCSAkEtIqfJGwRIzxPqewLyiR6JldMJt3LCKAk5m9dxxgT6rNEOWSOZcrGN2xsIbV9JRcJ89c5ODNYmyCYM08WC0YzmgJBVHInY2cDBaDrbmrZjt+OzInPL7NREe+pzfV9uflrEWhlzzulx0PRTNhWV+E/DyQZYKvSSlGWDpUawv9p/Fc60KpEomwGmZRHD4+jOYk+LnQxspKIWnqdGzB4sPGmMi6Jh0ZjNjklZw0RQCwlizpw4ugFHucj4PJNEZk5JhnBuDtr6NBVESseGregttmDfZ9SReUAmFPNWKrZCghmNSVHK9QRYLj4Zo1rYFncX7W55OSg7nLcpWWQBzFdkrVRa3FrwJuIeb25MTJiWXXEy1tMbu/OWoOUxZflsdQ8Kx29x7DJN7epilWd4rGhYUO1726wdaaYuSsKJUaRwguXKbe2EeYv/t6fd31IptQEUI7JSXBbLrnCCXt8UpEUxOUWvjCjnASgbOVMy0r/NJ5ZvS2hzSP6WqZ9pvLcsGdrL1lLRvR0yoTE83YqWjP7j5yIAsFqtELHvgyZi6Pv+9evXdV1vNpvNZuO91wuv9WBwH8bMMQCsXgpgA0RIwswh9F3X4WT8xBgFUFW4NcCmDa7ZonbazgKj98DsCH0txkrOtF9NfS0kE1rkEPuVGIsx/Vl+gjia10bBROSATLe3d/z+1cWmQV4DctvtY4xN0xwiCEAUHkLo+l5UTaHUdQ2QNmMFSQAREB05pa/3VdL5SlmNupDZ/uEI7eFw+Oqrr+q6bts2LQ2kBGNt26rS7rouC5SApVOyljrpDU2JJK0GACOkI/5TAqrkiRg0RsiQOVu+KecCiwF9GfuByDFzUxNK9fnnn//Jn/zxj3783QcPL2G8uIWnq5sEAAEJJFxdXa3X608/fblaP3z65DpEpJUHgBiHEPrx/hQRiTwMMcYYQs/MLIHHfAqxqipmUdHo+tD3/TDEEAJILyGOHqAEANDPJY6QO4/O64q8AwAiqeu6brzzm+12u9msQuibVX04HL7//e/+5Hf+3re//eHl1Xa73bhVAxCgB0AEInAelP1EhMH5KXeDIhPQASLJ0dM7RoemGXPc7puTYLxgFsf3jODgeMORyOgZMoIHQJ9lt1MExRh1eckylv4bQVIUXGKdESDIi50FMxlenL8z+8POQ+oo2u60ZDseqX42YZiHI+QJQqsyrL3IUyqtDE6rBy2Lw3zlMlNPpWKy720vduBnUJrBkETXUscu3mTqNYMtPViJTV/ZLJR2yMMwnNatYGsm/Ng/F4FZHHVWLS1kgtFZCf6kBC1dLG7TMMvsrKkRWDJMk5It34sxTMUsSWScY61qnDRvmuwRI9FMUnAy0zO0pVGkh0TrDMPZn4m7DH0X1miICBHQJAmQ+TyKxeGx7NfykxKkkrKluJ0aSzbYctRl47AkBXPxOddpiYpMHtPcVu6wWYATDOk5hcTDPFLATqUWRUuQH1VW4tKkBCwkmfuEcy1dyr7FFcyFWg0RWKKjvskMxDSnlMSSudWYXF8w0mTBS45rjLPAp1Oc82+nZGRKD18L1SKvps9lbp99I3hKVW9lc7HBxJypml2KXRTqktUX4TmlKM7Av/jeLqbMOzg3rrcvmVwc5f1EKPIigRbVGpp9pKwCFtE0yZFLUNF0kFg0A2wIelDQuagOYRiiRocCgH7rnQMAPWPGIeiZJW3Qe4+aik+chrf1fS9yHL7M4xES9WOMKWR0NpZJLcD8fG+KNbNMBYY/My1U2nVTXzNsg+HVRQ6081emRUsKAugtfQJTxF2yvy+2665rh6FDkmHovvvdHz5+/K0vvrx5+eUrrRAC6sUJAAA4Js9TLKKJ//e1bvMS4pGHp/U70twzYDwo9foOh8Pt7a3aKtZXVCWphz9lSv2VDaqMCSqXRK3utRZj+jVVJgZE1B0lK4M4BovOZmcAYJNUDAz/y2RjZCTThDQyrXR0Xff8+fNV00w1GCSOSZtJACC0EbGq6/rDDz988WIXQuj7joUAAiIOQ7ff7+/v7w+HQ9d1oR+SHQsALEG3amOMyZdGREKP0wkOh6wcpNB676vKeWoiD3p9hfekQjTOdyQqjwyy2aw0jVZVuQ/e/fh3/8Hf/53f+a3VxRpkAHIAMAxtVV9ZBADHoNHaotkgAcEdt6nARBzIFD6Kyc6czfKG1R1OOfkmb5AEWMCJRAAnElFT0CB6m+jSSo4Uy67agd6gYvvDqYA545TqW3nW+nRCpSbmy4ak35aGUYIQ58V+m8k/ImrKTTBzW2lGJFRwkTEiG0v5MmstW9UuMDb7yb45o7mykgTYbi/A3HWxAp8pAjuExTcWmVkj6ZMSS6WCtmO3LSRh02e1pNPSUcaZiy1YvEHhuixS0AKZgZ2RJoM/bZ5jkfBGCh1aer8yLZiljqy8EJHAMfofja1v7WPbmu16kdkyitj6xsg7YjL7MJPisk2YM3/mIoLhq4wE45+TMinFwX5uKuT3SaaxW6Kckkco5MKOcVHuzkviqb0pSwuLQywMWUtre9JDTOCWbSG9zODXh5QuPIPf9pJJE5h1Crs+WMK/KBeZ1l2U+kSpUiFb+MXkN8pE2DbLpugleJlfegqMf5vll4fBKrpfps3yq0y9lD/BnKlgTqPyTdnj+UFleu888Gd+LcEoOe0Xo8LXsnqm5WCuajKSabF+XanuLNgJS0n2E8OLiFqizDxGfzKLSFVVVUUiQhhguoRwt9sNw7DerJqmEZEQAk6A6YaSrkNFiJOhPN5KqrZv8i6soWXst4UrQ4+J5Oca2E49i0oge1OGQU3Vlidcy1S2pOsl4EQpKMisy6MI6h0iIKEAwMXlKsQ2hPaHP/yt/9X/+n8JcPnZ5y/+7P/0f5moKRwDxHE3CtvWYkzPdIkIdseFv1QBEauq0T/Vu0hIiFOx82nGMzIdGky+opWyM0aIpYVVoRaTmbCjO0qudR0ZAec5Diz1U+XM28xGAQBN0yBHImrbFthVNdzc3Oz3+9XGg0TNwgwAAFHzCfv1w7Bvv/jiSz1k+/rm5c3NPaCvfNM0TVW7vu+7/rDf7+/u7g67vV6PAwAIszz5aQMcEZ1HR2M2V+HW+8o5B8DOucp55xFgvC9+WjQ/lqb2urASQvCeNC3c9cMHv/3bv/2jH/1wdXkFEJkB4gAAVbVKDvMEhvdjHn4EOBo8kUMIkZnrutjAELMlqChFc3YJ0hnCMSM6ggcQAQKJepdsOpcrAp6m8OLEDbpTkXY8YApAUlqGGGXKMn+EY+ShHFQ0Zk3WVMncWWupgn6omit9m5RjYqn0FU7eUQ6NYcFULb0pFVCmiU7poPQ+tZOUuK1j+zIYGxcFkxinOllWvVQy8U4aJ4WKJZRqszYEN+2+ntmm4xP5GBZ3ksVc/JpQmiEk6wLnE2GCc71epxCIREEyaesthnFav7G7cGOFpTUSyxv2k4SQkl5279E+8DyWj+fnBmnKQlSiF+YqMqmADFd6Q5ctWqeua5nvOrI5KqAv7cJzuZZhJcUin3k87WDZL/t2sdi5JDXo3LGRxFFWojNs6HaotXssr2ZEAYD5AuixgpjEcTIvYBggvUQzWWZCnQn+ItipZGf2Uu/lt4sYsOApLdDMmmWu9vQhpSxEAGDUjr17Jus9A3IRLal+pg/TG6t1Yc4npbzjPBkYmQuE2EQI23GlUFULLczvWbUNKg6sFPxdK2cAO8VjMEds+hfnodRl/YUuplWXEgh75j+VlCVCIHP8MDHrKD6q8WTBCj8PZ6lbcMmUt+NanEqW3yAo5Mel9BM6TM79ODNSM8izHjNZsJ+ICOHs9F02/KyylcdklTGzZo/Uauqq1XUdYwRmFNH9jbpeqQao63q1Wt3f3zPz3d2dcy7yhRp13vvYB21B02GItIgIDmKM5GBaYeF0D43FA5mCiHo+IydEQcZyaexUsb/SFAFk0UsmyV+qqW+sfpAlMyP7c9G+QkSOzMCCKKPzQajXTkGoPCHCerP6+c//5p/8k3/SdfX/4//5/0qAISIAqpMOAFWl8/VkWnMCLFpIps+9ZoEBq2YZiQhwDNnTmyqGYVDGsGfLtfAU55/sE/sTzk1Qmab+xI129rHbuWIM4DRvinE7YYoUlawYUlqzPwGQEUWf27YF4HVTxxARYb1eI2LbthIv0YHe7AQ4Xj7DzDAcPvnk5//0n/7z/+a/+ZevX3eu2gw9braXq9X6wYMHDx8+uLjYbrfbq6v2/u7qsNtHCJo/qeu6OATlHBUoTQzjAOu61rsKRcRRPN5iL0JOCcR1XSsKIw8yuZREQIDeE0sIPGAUEVmvm3feefI7v/vbD54+BYjdYd+sawAfYkfgGcGRI81lCjFwhCktE0ha8yUiqqsaAFlmR/CWi9BRsY3OnqR/AEf0ATgA3XLXbUMQEa8h49bIBhMRZ20sng4ihykqQ4UqLSmJcSDFJKqSaeUDJk2X7uWTye7PODixY4Ih1YTplKOON1sBStZnYu6M4eyFpImnM0Zf9DnFKCacG15aUnBU6jpBm7ya9NLiPLN+rNsGZuZICjEZQ+mnEEKqgIjpshoR6fveaPBjm3LC0S0nQh3CFOPOGcmsR5Soj3NPSY6K77iHicZAZObD4WAHlVCXyGS/jeaeSYsr0euAi37BqLzE2GUux3I+zubv1I5tFqagGv0kHSqDaXkFjK5UvKWu00CO3js4nDtv2v4wDKoXEsCW57XxmeY6sSBilXsmbpYBxumBpfK+lJrkjVhRHeeG48Vi+SJIBsZUPyZuzFCdyhzImbpgs8yZurMCknbYMhJYXrVA6sXHFkuWl8AsnCXAMjniacHegpQ+BHMUMA0q07cWq6lfKxpVVZXJY9KIUt5wyw8Zz7jiDnpdmZZpNcoiKl1CqGeSrZhYwum9iMqrPN3CSuYQqTXm2IQAWIrraqWMGw3HlTLnXF3XANB1ncZl6bweQhgX8jPHxoRY259KR2WRVxeZ1harW7J2rK6Q+cxSfniqI4sxMNxVSsf5kq0pnOrXwpyNMVMOkGlFzsMKEpcmNrYyYue7TIoXe19wZE1HGTakOGCfSqkPR5TGYklxCS3HNmXWuBXPHPJiuUTmd65k1bTo/QEi0jRNmunattUTawCg0Z6q1pqmiZoBP8a+75vG1XXtyAPAdrvt+163Tfq+3+12q9XKew9R1Jbtuu5wOGjqyxBDjDEyO+e6rqvr+uHjR33fv379CpBEjtLB0z6B914kXzbFaQEIjDiU2EATgJbwlin/NNHYFhaNijSPpHZsU+Ukpb3bGW2migHH2E1CYQImEYkckWC/37VddXnRvLm9ObTyp3/6xauXN2mhfAKS1HxIR2lKvZT06qQWxgOBMNlyHI1SwjHvtIgo6VXDZ5DzdK81T2e+LIvqr4mIJVFgunchfZKZWElgebrnM1mVQRj4eAm2No2INA0hzQs2sU0ygSw8IkLgyI3b1AFlvEiDEb0HFpABJELdxL516zUNw5dfvf7f/m/+iz/6V38aBte2MIS+6+LVEFertutaRPj4448vLy/3+4NzbrNaD9z3fd+2bdM0BKjeoOK/Ipdu4U786WjM2UtOLXnNUIre+/vdLSI2TTUMAxE5B33fV1V16PaRg/e+79s+dN/9/t/7D/7D/97Dp49BAgDUq0YnQELPHIEowvGmFiDSsFvdcEUAkOmaTwkAQDo/wlHrirEcSn6T450TypwgwgCoO4cmpy6pc+jLiSe9sQKc5nXhYwhTIqRO/DJ34ZKpZyXcqonEyqmCnIorM/o0walvyqQOqZ0Tk1/O4scflupPCIVs4ibKV6xFdMKbZXMGo7ky1k/v5/SbIUGMPWERmCHH4jlTQ9Ygs+8tcS0Ap1Z2Mz2y2NqZcaVfbdeWoe3QbP3kP2edZpCUb+yfODdZ0kjnennGeNmbcjhZg7adkvcyopQKGuaoK9tBRADMcIXG1rRdLCI/A35eZ5kZEPOzl/bzTJBL9niLfs/VXHxPgCiaGUtQgAAFNFhlvNRbj2eIiFab1QfQy6H1YeopDTZ3WjJIMn6wEnSePy1RUtKqt8SY/bZss3xO2+wyWYpg1MiIwxNhvbappFQty4m5TmNRAJN/TmbL16osMb6BlsxdT36pzC8rSsmxNBYOxsP9uh6/vOJwCp+nVMfX0uItS4btX1WzvxgY6c9CtAtXBwDm6x1v1cUce0l3Zb2kBzLhM2wiCzLAjox3cg9vwe/KxrsoxW9fMl36Nsrta1tLpdQztpFycmnbNt1Ljmn9BdFVFUy2lq7dCAEirtfrMYtM34cQNGViXderpkGzNuo9hRD6GELou/7AzFXVrNfNer1umqaum8icMp1Y+YXJNMooiIb/EwKtGjmFkDT8M7ynCmN6zj81jWini9WOHVmQjtyiV6wLgKAIgqBmhFmvm9WqBo7D0AH4PvCh7UNg8YuR8A4A9XRSRkptkkH0Xz0Jx3ZuBQISAUDC6TqAaRFtQnUyd8evph37setjyOsRNeNBrxGLeu88IIBwOKUKsgdlm4qOK4kiouG1swUdtVSmoquUIqj/JQKFYO1MnCwQaOrm0O5QYLvZVA7u73d/9Vd//Ud/9EcSuydPH66uLyH2IEBYQR9evnj5f/w//Jd/9If/5s3tnnDd9YLTmdvD4cAcXr+uNpuVc9g06+vr69Z3XTjUdaWrLSigzl5d1xB5hlsWQEFEhF5X8IlIfIzi9TBe1x9C6J1zzNT3bYiDxuLtD7v7+9vNZlPXdR+6H//GD//R/+h/+IMf/xh0Zw954lO9EpABLB4opXuRLGvokZQ5Qxc8trgTQKajMbsMAIzhppgyscNx9pW5E5UZ4pnKRuNDowndkXkcXTY3WM6z0pizlB3/PMo5M7/EOF2WfW0FC4nVTVA4S1m/U80FnwfMRfbZV8xcOswWmWwO+JUaM7Wj7JW6tvKf/s1mLDtkq+/yKRYRzUpeNrpTpZwPMsMuw5ud+O17W9+68SV7LA5/kVezaiVyDDVntrVdVswqZ8Mv35Rd2/YTMGxSyJRNJXkph2wbnwY7c/vtbpLFD5nAjLLHE4TODxvINNPEaQEoI3GSJkjppsxeRKGkjoNNWJKzc78F1VYjhOmuLIVBTRBGEkQNQEVkHBeyRciRPhikCZokOqbxcUqFJXJbyO3Lkqz6MltQSx+6+dk/NBHji0hY3PE4pbVgQnIZEHUqtNUiIVHWMlWBpVnvGSTZjhabcIYMRZZbUnc8pa9YxLbMTx8syukZnFj4T0nHL1+wUObw1v7VW7a/+H5RdZz589SokyaBguipQim8i7pr8XMs5rJMcLJmF4FcHLVV7LZT211ZThGmxPPI0gVlrQ5c/PA85IvQwtxb5umAhj7r3M3MBODrerp2iJKixungj/e+bdu2O6Q1FJJjOtD1es3cDENHQx8CtW0rEK+urh49egCERNR1nfPemkAJPE0qAwUd9doPq+rBCEUpFyW6zhNdipkdCgHPOloUjaSfs2k0nYGECCIAETwgoqu9X9U+xKHtuqq64Njf3+/6IEyz2KK5mJC6phYMAePLyTFOJI7XEdHcs5UpdeTRgs3GJXMTF5YYPkNyiS4s5uts4SajwqiBpwQwknZ6p41B0/7MckPjOIAR+YQ9nQKE42Hf+Yu68vXz5y//5b/8f//pn/zx03cefvj++0/fefztb3/78sEV1CtPd5/89ed9z5XfOFpVVdWsL7tuiDFWlRORN7evBWKM8b33Pmialbsg2Y87fiLAYYzdA6HtdsscYoz6UmCKniWNjAsa9hLiMAxd4BhC37Z7RBTg3W7X961uhAYeLi4u9u3u0N//+//+v/cH//l/9q1vfxtkSDGZc6K4yU8zTpru3QGP8jWPbUfgKUphYSvrFyymKZ8oZAkjonEFx5J4wuPMLk8NMTPLbGeDpwPQMjlFdlbIHJKvVf22TgpKBADVdFIE/h23NOdzW6a4F1WSXb0rQ0G+Fvs4N4ASGKXUlWO3+o6mg2qpghX1UkGUFez7RK9SdZaAlTo0G0s23kVNZPWO1QhQmAWlpkvQWkjOtH/E/FwfnVeL9mU2ivMLIuVL20iCk5mnELiTtnWJXpgf8TIYmxnKaNYXkoTiNMmVPGB7LPBGNhQw4VaYhWcnRTNy2O7SG9vFKWIdue6EMFn8z2A26sgCnLlAlqyJ8Wz9RSlW5CcIreJaZDYwbJm9X3yJJucNzJ298yG+Gf+cUQspFDNjFdWTaE6YpAZPyX7GhKn9MsorM6rQzPopRwIU1MQpatp2p6FWaCxIi2eLvfGnX9QnzMa4iPZfuJxRO39LJeOHRbbJ6qfKcFpObeWsju0lEcUCU6oaMYZEqpnIbSVUykZPg2SV0lvK41uWt6Gd1cNY2BupTjndlI0ktteXVpQsctLpfRSBKT21c0eHUM8veO814WHVeg067bpuaHs9IuWc5h4cCzOvVqv1pmmaxnv/7MVXmpAGRi9lLEktxxh1BBkjQcEeMBd8W2Hxc4uxkmo24gAMnywu0JcYTp9YLWSrCWmafhZBGS84IL2gzjkUiSHEqua+C4dDR+QBjpsfp7q2I4KjKwjJ95tGSmmKSyPVe4xhOuSi7aTzKXYI+pBvHk79ZguLRzEsiGXpgnNzIhElgogIxKP8Io7JVUqDOTGY/XNRSGHMFeIAYtd1tYfLJ1ff+vDjX/+133j04OLF8y//9R//6RC66+vri4uLd9555/b29vWre+Cq8lXlt843F9ePhDEKE0E/tG3b3t7eaqfvv//h9eV1lCGEoHuVoQ9934MQjWcXATjGGFn06hcgcABhGAZdVQkh6L0sfd+SxxBC5BBCPwyDQHTBEZFfVbt29+Dh9e/93u/943/8nzx55512f8sSNpvNnB2mOXT8k7M3p5jJHI3WDDEz8cyamv4sy3IYIAD4RamTJVNeq1HlWY7nVVLQnXMOph1Cu1Jl1cQpIGDOheN4C41ZTjC4dHh9xqNF1xaeRZDS+/MAlxMbFPolmyMt/Olfu0tm31ucp2ecm3r2q2w46cPM+Es/pSOdJZzlwDPNbvuyAJ8ae8ZCi0XmRkyqb3daLBozAFIjVOyNyHznoexU5q51qUBtwaUdGyjOqBzlhWYYQ6M0y4EgIsvx+hZZMs5gTo6pl9wDPEXHssGyZmrKWgzz95iSPFle1U8WYS4xBgU53r6UGMjyYGW6IvvzFGIXf5K5ZbOoqcoAh0X9I3JMwrdYc3Gki9Sxz1ayeMofkEF4ap4ucWIZtdQMizLO8/xGdkT2GLD9NZ3hsb49IiaHMBsXTt5jioPVFUAkc4C+QF6Jq0X0nuGH/w4VnM+2v/yIMpYAwxtJcy5aLmf0/K+qZBrPqqZFSE6BdCokVQpDeWzkGyL11Dz19h/CFB2dTvKnI9Ds3P39fVVVTdNUFTqHzCx0TIWtFpoGyLVtG0LQUL1pGVHnVgYE7/1ms7m82jLzs2fPPvnkk9vb2/V6HeZBNCWK0p8l+9kK2QKrNZ8WsXFKWk+h65TaP0n3qdgeRUR/ESDQEwZAAEQwRstXVQUohD6EMPSR0BMd73m2YBP642EwMywQAUCdBhKoIqInCcuTfhal5RRjHzJRXcTk2wtmyk8DcyGSJfuB9LqM+ZSXiV7WtV1VtPrEke/71hOu11siGPr47jvv/cP/+B8+fHR58+rly1cvfvazT7788svPP3v21ZcvP//888N+uNg+XG+ummYzBCBXry82zbpp230/1E1T6VnZr776qmnWl9sLXaDUOwanxQUCgMPhICIyHqYNKFBVLnovfAghtO3hcDh0YRCJw9ANoeOO0WGMQwiBHDhXa9BuH/rvfPSd/+n/7B//R//Rf7+u6667r9cOwIn6b7q+AEeHj4zzxgggNF4lf7z5JEN4QmN6r7Jsq8Wx7byc9AMBRg/T2/k4UVFkdkgUp3UC/Rd5nODT/RtjUpmpshW2kgWzQEHLOnB6CS17maTFrgxZ+Eu5ypRFaqdsPL3HJYfT8rdt08Jml70T9spPLBIyMFKICM6jc60jl7WwiPBTc2RG7vTVqeymMLcRF/G22FeJLvt5Ru403vRhpjigwHnWI36dTizBLuGx3Ls45BL/GXKs+l5UiBYh6Sci0ozB5VeJRNl4s8PxFv4MjPJbO0Y95K1vjkGMOFv+tAxpk8GIyVmSLlEs0YXfxMBKjk0GtpvfdG8Jl0lrSkhgmeQM9+p7NvX182xJpdRLixKXNXsk8aQNLKXwtOFS6p+sU9u4GH1oRyrzEHQwLLooBWmkpcDavWLbdSbI6ds4v2G5hDPDg0WImHVlNYJnqQvewrgpEVtyVAZbSd9foPxKGvmmPZ4f6eKf9vPsISsZ8xy7S2bLnJEW+8Xi5tKEeSutJXinhmy7sB2Vn5+kyOl+ZsM0Lxdb+1oElo2f6itTazCdIdSkMqqrNT7Ne9+3LZHGdzhmQMSIjIhVVYUQNGUgy2j+6qmedK4YQOq6bpoNVVRV1TB0d7e7ttu/fv369vZW/U8NRrX6pFTsM7RI/lOm9jMN/DaEtli1mtO2n5rNCFGq7nIKyKEF7UAAiMAjoHprY+/kmLnvuAtDjM7XU3DEsTGCtNmXmfOIfOJIQsrRor8mvWeTJqbd0UxfWeYpNwCSHk5/pgIAUpwZtgg8M9lZiVbjP8PtVBMtaSwdF1tm0aQ7IAjgKEb55JOf/df/9f/9t37rx48fPfjRj3/zRz/8cbvfv3r16vPPP/9n/+yfvXq1W69ktb7YrK9YsB8jb2G1WtWN327XXdfd3Nzc3b/5/PNPgeWDj97Xrruu011BBI66vBjiMHRt2w59B8B6ttC7cMRBDFGma0UQEIWIqlozaw7e+4ury49/7Tu///u//x/+3u8ByN3+9mKzQnBDaL33KMoODOMNEA4AUDC5ciQAQpPLjTgqJ7OSC0ApZFRjcUFGFtN9BPnavcF5wbyO5ynrEUyMqD+kQ/xKRRsqACMPzALWLV3R+DCZI2TrZNsLOF/GXvwk6cfySBuc1hHpW0QkWojHg2I6Md0ta65FUUGz8p26WNRWFgkyN5iySdGqPyxM5PTJooAlZZH1m8aY6fey2vmZL8GTaSiLxgzbltwZfjJFr6U8faQPaccvG10YhlQ54eSUlrR4lrknkOFhERUZQbMP0aTJTjBkDAY5s0EaTQYwM9tsihnMOL8nY5EfLL2y9yLH87SlTC1S0x4sXKR+2ZcURk+J0kVk2nZ0orVucMJPFke6SNZSEjP6ZrRI9S1C4ASHQEG1xWJRbbFxBgmLgypBQsSUaA6M/V2ux+Nk38Bcoksxkfl1r7br8sOSvTPxTO2n+SW9X2ShbNQy6Uye5yY9U7JBlfBbtjz/7duUUm1abPzy5RQ8Z4TOVvja4Vh9awXQyotlA6uCYM6fi7JjGSNrXyardzacEx5bqZbL7kqxfftyUuQn5Zxx0TdllVNqxDrMFqv39/cq2uNd89O6cMr0LtI7J0REWBwjx5EDq6pCB5qcBhGdc1XlvfdCAgCHw+Hly5ch9n3f63XYIQQ0C7Kn+GGGh/lL++v5l1Dok1MimTUCJ4hbqrLs11JUiUh3WhBRBIUJwZEgQNQjVGBWLoh8ACTMtHF+e1OmXrLn9JWyfTKkz6gmy+dL7S/PrWdUUGJg274uEJd+gWXLY+PzuXVxCPbDMpwqPUeOq9UauO/aw6beelf9xZ//1f/uv/jff+vjDx4+uPr+97//rQ/e/+53v/vBBx88uH70+tWbf/Wv/6mIdN1dDG57cblZXyDiod/VdYNUI0pdexEh2h0Oh5///OeXDzbXVw8vLi6YQaLoLV9D33vv0YGId871IsPQ931/aHfbtV+tVnVdMweRSBEGEsDKexd4YAne1zEOh8Pw+Onjn/zkJ//oD37/448/Zgj90F9utgGG/rBfrWqRCEKTiPB0UBAAKIWEqX4hwXGrUOuOJwlTCQjp5dENHymCPPcJS2J/jZfoJ+ae3TFlSZ4ROMYIDm3Kfki25jzCWx+sLZ44NcUz4OTgwWS+lFdvWTWReCutoKT6di0Zxxj3Ixh4NGGXFc1JBH6dzZpVsD6A9psi2aQw08WkP05vYDLo05asTAWnZBV2PtZSjddZ5oAlxZHoZdGSmgUjwItIyEI3weDZdppatm8sNhLAMD9DZaECMyVY/rGIsniwJEh+kcVeemnnADveLG4ttZ8gP4JUYmc+fDs6IgrhmGQo41578irBjOfWIJa7y97Y0ywWYxmqzwzBPCx4U1rsvU+L0J4v58GAE+43GCzh/OqXMl5RjCeT9Jud2LKO0udJ9FIFK9QlPlO1AnvLw8lQmolVVjLPBwv7IDWVHtK1E3bX1zaSGBKKvEp2XGnBTvGmaMnuCUyqNcHJU7GAZSyEk37O1iItiS1UWt/eNgQzhjxnOGaDKuleovRMC29TMvVyqv1/y8WqPij4P9N12VfZn6XwntL5JcLtcbhsasNih1+bWBxOZmmU3Z0a+Dcq34h8pzCwqMrSdVyndEX2frfbee9jjHVdY1oUixGO18xE59h7791ReMcVHzqqNSe+ql1VVVM6X+77vu3aN2/eMAeczqfpfRXe+yEEC/6c0PnyEOJ43XeGvTQZZWNMw89U3BkkZzq25NtFHigbsWxpi9rWyp4CHidtt25WuhNWVSRSNQ2sVithCtLNRzTOTRr6npFYJI+ImWg9S/plJm5iHqzaTPjnImmZRUuJATuj2TpUYDu1nxBllXOm3MZ/5xn1jxWm1ZNTs21Z1CUm8lVV4Wgbg/f+5cvXn3/62c/++pPtdvvw4cOnT58+evToL//yL/u+X602ken+/r4feHvJl5eXq9WaSMipMqkfPHiw2Vzc3+139/d/+Zd/+e2Pv/vkyRMiAkJkjDHoVSveV01TVd4Twm7HXdeFGCS2RLTZbHR/nsUHCTFGEY5tcOS22zVz4737tV/7td///d//zve+JyIEzlfuvr1brVab9TZyj4hpVzDt6o3PAICgNwIqxkho0oOJaumEIQKAwDEvqIhtrihf5wHm1R9cX06BAceVgExPWYHhqY5SThN/pyWrnFFMLLJaJzYaPrPS9GR0iL3wuIog5jo4mBRQalyms2FWYMDwnxHy45s095RbKACZvps5nGVNiyiaUtOSY47HgM/UFDM7V5n1vHyLbI5wCSFoTAhMqeQRkRzEkFu9CYAM+BIntqYuN5abKojjNR6IYi+jDyFoAGGmhsKQO062lOppUREkUKk4IWkAGxvnKVmF/Twbrx1R5hUo8q3Rn7rL3liMwcSo+eLr1CMV0f8wTfzJPh6DK6aiDC9TePZYwYHyD88Pl9Z1LTJLSTKV3NfVku6itANnZk+NKh2OEIWZWa9uJCI27Aoysj3ZrGhmvNnklADzmdda0hrzOdgWWxOLCU9EvDsix1JZrzYlc0Giio/J1SdivC8rmzLt7RMRh0UHKd9hTutwTOeuG0lNGVCjFEJhv0rsNCqHeZKABD+bgVkE2itJxVwMa7cl03qEbgIk3QXGnbOywCZZqG3Bji7DwJEfXJ3qJ8BwbvrPUZGHpGbkU4Hq+35c8qPluZCKU7Vjs9/QMaACQksyy0W2a/trwkwmpPbX840fgRdBWVCzQMtOCwO4JZcjg8QOwYpGAkNOJz2yNbMuyl8REXjZPbOK0X6FJ9Z6SsgnFjkHVVkW4YS5SGYPCbDsk2whGCZ6Lba/SC8wNvT4yVRLQ74fP354fXWl9ut6ve667vr6OoQgjLrvV1VNVVVVVYlgXdd11Wjkm15bLyIevRCGELruMAyDSNRYOZE4xADAu93uZz/75IsvvuiHlohimOXns0jI6JJ0coa9jK/salRZ375fpHtGha8tp+pnDlX6lZ3G0zKKAEfC6BCc8He+/a3Hj66vL7aVp1Xlt5uVRN7v93/4Jz8NIdzvdvf3LTNUlWOgYQjOOQECIB45cpz9I4Le4w1z+RIRmR/6WuSWRIvMAYM5Q7KJrh/t0vnGTOoX5/ZnaiRNH3aqTVwAxuIFY2WldhbZG+et6NKDTMcZNA1SCCuQ6Ehqz7XjVYUPr9cPrq8uLjaIDsSx+CgOqXF+5V2964aUUPfi4uLy8tJXlNZNEFEQyWEIYbfb3d/fH+7vLi4unj59+vjx08r5+/v7u7u7YRjqxldVhRx1EfXV6xcvX74EkLoSItqsthcXVyKyu73ruk7ttOfPv9xery8u13e7m29/58M/+M/+4O//O/+A1R4b4zmRxuuuFkJkR8sN3FRf0w9aD3B6luMkF3MHr7x+cM7nS4GjuKx7gAQ8zUO/ZrrbPNiZ3gZ1pHJqpXny+I9wW9ZMq852dpSp2jik41pFzmcJKlzyIqycJ0a0K5SZ2kpIAKN6Tk2EMjeAFtWTxWou/JP4LW5epZKJvZisrdm4rK5MP51RoMnyy34yyVfyewgQZ/i3UFnU2ecMD+lfmmLVrAZBYyRl+AFDTf31lKNeloQfS6nUlD1bZY1gzcqY/kwgZTskdqS2lwxLFo1gqGbxMyHhmHjawszMdkHEsn2pfGUpSnBqJyAikEMSYgECRFKFo37i+AUAAFOxd5pKliU49cLzhHiWZCVpShZKL9MKOsx5Kc07GXXSbpjdjwUTXGoZCQwdbacl+WwX2auymi22HYuQGPOd9lSnRALMaWfhsUtOJTstNp6NRUyypUygys9P6ZnsfakfkqGQ8M9noz2tBrDLHxk+KUXafzP/7ldZFlm6rHNmsP//LSWfwDy6HqcZ/1fVo8hJcllCvw1iYY7/RTHJROZ8a6cEP6tQMvmiOP/Ki85HzKMhnjTA/f09MyO4erp/omma1Wq1Xm+dc4TjbK5zfYwxSBBCzbAPkyh59CKIjpjDcdSi4QDV4ujQxE1YDGTYODVDnS+lGgTjdsISk5zCv2WqRYbPypHZRRAIAEVCFLy73XlyHtzV5cX66vLJ48cXFxdI8tH3fiNGefbs2V/8xV/8/Oef7Q6H0WabW+qaC5aI+r4TnK19mKk8Hw4umbUlZqBAe4mQkgpp2bREl8x3/GgW0DfbKkwDWdwhxKUcHPYr+6wFERPszBwYQgh9GPo+EAk59M55twb0LBhCqKpKJUL5X+VCuV1NAkcEhFVVrVYrAKgI9/v9s2fPvK+fPHrcNM0wDM65V69frFar7aqp67VzuN1uh2HourZrbxFd5QbNk+K9DyEgSdcd3nnnnX1///Lly+/94Nv/6B/9w9/8zd8EcAABgBBQQHDc+CvVnrHNsjXzWcznMV/oN5Mf297S5uEpIUAAX8qemI3+zKqz1dK8nhbmFw0FaytY7WBDPpJmYeYEf/o2+S2Wn8mcQc/MnfS5faPVkhlRmlCZAZ3KWzqEZe+LzwXrL9vKMoVKZlasTEk7rDaxGMiGn3YA7EtLFIuf4tkkF5mPbv5Mi41kjoQYfy/Blg3Zckg5HMuNsmS/2o4yaG1NMmf5LEXQhONaOC22aQqZS1suFvJUOb1HswBha+J07DbDW4lGmYJ/0r1PGUJKhSuT40rzkGxIDiEgRtVRQgiIERTOBD+Ml/tZJJTEWkYjzNlyDlUGZCY7acgyz/Jqq2XLxlZOM74q2TV7g/MtL8WSp+OCi6m/cPBjJFPhvZcw26Ys8NlPi3osq2/VhYXE8kOG6gxRGR5OZfu0n8NZfsuGmYFXrhxnAligaARGjJFhZR8mKdavoizDU45Uy9kDFt+gLKoXOCEvf8dLqZ8tgX7JEb3l56Vrcb6UUlNSZJHN3hK8srWs5t8SoTNapCHov9779WpbV6t+aEX3WIJUVb3dbi8uLlarVVU1uh84RpYKqh+IOO4akejy33zzTafsKDHGtm3bth3FXEhAMn6wwz8VZgVzbXamlNrsa/Gz+Hym/UV+XtTPiMgREJHAjSNgFnAA/Oz5q/v7/etXby636+dX189f3jx5+HC9Xq+vH682zbvvV4GhXl28ubvd7w5t27dtK4KBYxg4hCASxk69gylGRrvWOUgXWN8GYxbgckTl/GLf239TnEg285Ztpk0jREScHeFJX52Rlwzh5bxsmyICBCQUmnbIGAkAAkdk8eDJUeUcuSYGHIA1Usx73zSNRkUxC05HEkafEEkcrlYr5xxyrKqq67ovv/ySQ3z33XcfPnx4c3OD4Nq2hRgQcb1uLi8vnXOvX7867G9FQt/3wzDUda3OJ0sIYdhsrwO0zeryd3/3Jz/5yW83m1Xsd1j7yd9DAgaEaZPr65fVJivtmGD0LQoXc5rAN02FbMoYd5dN8xmBLdlonp/D2iKwxFJp98+2rCWRLWMm/VUtWhtrlAS71DjW+MiagiXnzT6U08miiZaVUgZMOwsCkPbWrcRm7RvJGW2vTLxlab02E8hsCoGJalAoETInf9K/ivb0IZtrzU4uLBR4g+K+oES77EY7i2pbP5W0l5hs96TiS9SJ5N6gVXZ27yh1lDFYBpJ1yy0HZoM9j5P0YU7NJfgtNix1LLosGIhOJ3sdq8E/AIBzJHJcSSEikX5cNkrHGQQkjs84g0EAxpXOUulbKZ6hGo/wn0KUxaE1KayAn5JBlnyH39I6Qy8zO5ffJ1lKutjNKHJZjyICcAQGjITC6ZK1gIXNnUFlO5Uli3Cx8Qx7pQSd+bZEnWIgWz8601T6tlSGx8ZFbIUl3GZfBVnYG1SuAERRtnRuttLxluU8yX6Zcr7lv71+f/myqGEssTIx/wUaf5siS9MWGA7J5P2UHs5iXuzn3whyK6f2IYvQ+dsoM9k39gMRrVarhw8feu8Ph4PugXjvNXx0tVppbhL9/O7uzntP02UGiON0RkKMAJACtUYbgxlCCG3b7na7w6FL19IyH9WRFfNTUpwh5+35J6uTRg1LlswpjC3+lHVdQmgbQaxIdO5zIAwQhYklItBuP7SH4eb17nP/cv1JfXFxsdlssPbr9Xq7uaia+uL62jfrqr71u8PVg4fMHAbu+77v9e47AYCeYzSJN0UkHaESODJ2NiNk0FpCZHNlGfOVMHlq4oMlMVnMESAiGohj2aDEZxLMbKa2nyzOVgDAEgAESRhBsSAIgiRIAhQYJEShUIGA87WrdUGQiKZIaUEc76DC41E1QAEHCOQ2m01d14fD4dWrm5cvXzZNc3V1td1u33vvvdc3L/vD/s2bN33fPHr8YLvdamrfYRj0VK1HEogCkTlcP7jc7+9r737yD377P/i9f+/i4QX3e1f55I9J8u9wPHYASyVZYZj+0fsnpp8J4OtTxeRcbXzCb6i2fengnelPRFJiFst5bFIIQKEFssr6E01HBHFeWESr2EXiyZ9Z8D9T8hgw3ImF4V7OK9knpexl81NWVMLLaUnbO9HauZWqsiM09nF6w9F4aAAA+T5e2Yj9ySJwca7NFMqi3M4RuNht+nVB0YiJTc96KR3XtzFP7XN2DYmFhOd5YhBnetm+XOQKKY5mWdQtaslMsuyfi8PP/szGKLLQCzoSESAEAZAZHwIIwrg8JVN83eSQ56COo+AiuMFk5S1Vv5ZyLGUd++EpXoV5LHExfG0E1ZqZOJYRjwfjVcrg6N7mZ5NKXWepeeaglJVWLBzCU1Jc6oGsqbIkOPGETyijAZeFcyObg3+2craVnXVtfSosFLttZ3Es6fOkqbJTJYS5/Mq0FFiO2vaVQJ0HLI1NGVFdxOICu45o/4bG/C8cknoKY39HyiL7nRHMX779kaC/xOp11tSiiij7PT+oUrL+bpaUdmG9Xl9dXXVdx8x6xbzKWgjc922Msaqq1cppiKmf8v8lMw/N0TmZ4usEZBiGEIJeXEHjVRMatCVp4diy9OKRhFR+YUaaz1+zheDzds4p2i0GIpXfHu0TdFMafxEhEIzCKEAOOTIIC9DQctt2u1ZW626AEELw3l9eXKvz0Pe9CG63WwAQwqqqvR/DbhExgAwsyXochmG/3/d9T0QsM11aDjZTkovjtUch7KScVUvtnwr6Le2ocgK1jaceMxPFHkkr0V6WAJGEIzCIRBlEcAgcWASJ0OuZzDAwS6wa7x1hHLcZUsILdQg1vFNT8oocjy2s1+sYY9Osnzzx+/vdF198cTgc3nnnncePHyPJ3Y3b7e5ubm4A+fLyUpdaACAOoe/byukoGIQ9+d1+9/HHH/z9n/z2ux+9D8AwnhnkwvdbXLXM40JlnGpwejPeTsFjutFyJxCyFjLsTn7pEp+c0HMiU8iozB2PjAOScKLZtbBUF3O2LbO9yjNv+m92L1aSfN2yHwdq0oHYvcRkDdg3dmA493YsYKW1mo0FChZfRp5pJPtTX4jxFcupK/s29WghKQUJj4vlR5nPdEdmbmbaITVr1+CzUaTWbHAX4myH8AjYiQm1HCMUZMrgT6BmZM3g1DnMhjHYBo+Hi+a4TZcvLRJ0EbByFtTpVuZJt9KMVTYLp/dwsiWS1BQzgxyHttjsDKUF/Bnq7ABFhNxor9OUokqd81NRCrYvu16r1wZYySoFwX5+XHjCGcBZUPTi5GG5wrJ6KfsWaYnPoRBSmN+nB5aO+gYB4LgteHQLFI0AOCWLxgmNFvJM6OwoFj3ecshgxNC2YwmqmwYyPy9qP89IYJtNWDoVclzqH55un18EKaPFkTGWJp9FmBeV8Dxa6YhSMnlfF8vX6u1fsmSSlcoiKf9uljOyBkbofiV9fS05FucLWJLxxW+P1XDZ/D3FLaWcZvAkMOw8Ulb4VSHqlM7X94fD4e7urq5r3VYCgPv7vZ1TEFEzYFVV5V2VssHHOIhgjNGjZ4lg8m8zswCHEPR0yWZzsd1u9/t913WnZh+cYruy+Su9sVy0ODtkQ0stw5LOTL9meuMUz5SNL+rPM91NV8aBgE4KxJFFUPReCk28F3jYD1hj10U+DIc2MHPfDeq0v359Q0XxSNV6xQJgQsaqqprC6PKY7TOzvx1RGmN6sMM8xahznkkLAQtGFxjinqG7iIwqH3LIs8kOpwzh1srSB1ehJhFhYBEmwSDMgpEBPXlfA1UAXhBjEAauiNI1KopVckDTy+l2lmPA2jAMiK6qvHOOQ9ztdrvd7vnz50+fPt1sNo2vnMPXr1++ubkTkdVq1TTrEDgOHEIYhsE5RBAAPrS7H3zvu//xP/wf/M7v/hYQw3AgEhnupFotsSHNF8IweYkiBBBhZDM5u152Zr475ROeEI0zDqENmbOmbWKXjL14ykOQCCBFhE/Gyonh7E+6q2utFhX1EEIyiFVbZfxXPlg1JFP2vFMflvNcZmWmamdkEk5I2qT+FlSbSotFSKklDTaOaQBnNZc2JKEQ8lTYZMVcHKalrL4hShdG2xFptQX7G+aqYfHZdpFdkCqmLA4hpZRM5Eh6JNs6SO9tv6k1Ww2KDZYEswUmI5D9ybJcGgjOrXyZB4viuV0OSZVZj/BNzuEcP0ubHkKQeGOk0VRHQBhxdt2toKfjIGg010WECY1PSJzsKoGEGShKwrlFYEJvcoZnx8aOvtWCyVUiDeZcVJIj6QqrsmDC4ZnkQ5YWmUBlFaRI7XhkMMoJuji69JLIZscdX+uzjMG9c4t8rHqsrG8IkZBI05jBaE8oMxzvGhYRZiTSnHkiwCwACIiA+mY8kzyDEQDG49wz6QYQRG1BYZv4fxwLA+hyHky8CiAEJhPVKTqWf2ZaOpNEmCLoytZOlWODf5uO4iLR4bT4/B0p2WSX/rV69dTQ3qa8zdhtpJLlAQsknrArZpVPqJfzpZzuS81jkZN9+/Yd/QJFe9TEoXd3u6++eh7j6JLVdb3f76uq2mw2eluaSkail11SV5MR5zc7MnMIAVBijH0XkMR7X9cr/TyF3mWTPpqjJdmMJsbEehu6l3rVUr+MV7K//q3IFB91jYggAiASonMVADAHzcXtAEWkHziE3jnnfDWwCCP6ChC7EEM3aLwiAabU/4g4vH4l5KxpF0KwRvWiPsxKxop20szq6HuaHxS0OKfpdq6MypkdZc0nqxakuD0rjXQR2tQLzovpCMajKkkQyaOrgLygZ/AePVKFSIyEESJJ5R368SR8VVVV7aZI0WS7jvcgIGLXdZtNQ0Rt2zrnLi+vQgi3t7fOue1221TV9fU1otzd3R0OB0T03lfO92bhNYZ+GLrNtvnhj77/27/1G7BeQWyBAJyDoYeZAkkuHOOYTRQAUtZAy/kMgNNPmG0SzuJFT4aPntlC/AbFp+A9q1jF3I+X7dRbumashsZFhCU+SNyQmY9iSoiB0Nvcd4ljMtHI+C9JRVZsZTEBVFkLJdg4uZeLiCvPyGnRa3gyjCUNYzFcTrQWG/YexaNjnDc4K9l4tY4l0GL9DI3H2BKcXbegreLc57EfwpysODcvUp1hGNKCGRQ5J2zLaUqwDqHFPywpMnvhteWBMnAuqcISMzBtQchxNh1B7fs+NWgBtjtO2SgsBtKg0OTfnw0/il4bpfOQqgdUNMoxfg11TQmQTeOn+Nmq7CGOfTkEZEDByJOTPEX3gV5UPFr8RyJmoaFpvJbBLAwiAvP7XcrPM1zZZhO9ZgJrurN4S2sfiV3tupXtTj9Jh2QsbJksLBIxVR/xvAR/pknsZxkTZh2dKiVg9v7VdG2JzJPxZF1bdJVUSz8ltVBqKimC/GGOYSmiqReR8DYlY7DUi5gwJCJSB/U8xmbNfkNA3j5ktOSx/06UjP1KGS9171s2+42QUMrF23+1yCq/DAlkyavJ0JJ1/SssJ1BHzPFwONze3q5Wq/FWNObVaqXOXowiEhHTYhCmq4NSBB0AIKNMd6+rqlRNEmPsug4AWEKMkRmIfF3XXXfI9OTiqJM8pmtvYG7PnBmsPliNJCapnpw4ilby7ddiNfsKl/aNmeMc2nGa7scsr8zM3nuqPCJKCIRU1TUg931P5Ju6jjEednvnPDiYtlhQos5KIlOEl2I+5ahDRO02467MJS7VqeXPbALKRpo0fPZTht7FOULmlnPqUaal+RKTU2u5PkmskkyU1LJzLvKAHAEAQViYwSMSeV/VaxGMLEjgyBFVCCSAMY7ZeiIIoWaXqdN4pzvG0CE55x1S0zS6x9A0DQHt93sA2G4vdrsdIsKqrqrqyZN3EPHN7euu6zbreroeLDIHEAihH0JX++awu/vzP//TJ08fXD+42D68AAB0hAKj/z8OmkcTCgRmVySNbiHypGoUpQiLPuExaS2k9vNYaN1pzKmwdC3TKZ2FAHh9dZFZ55YbYHISjrPd/JSXrZb9uagx08u082M5AxFZAsfZtZh6cjqEkG4Dy1oeLxBj1tOfON1VNUfW+O96vU58bDleL0WxOxt2f7LUiWl7KhsdS89xhAomu20SmIVr7lJI7Vwmo5iSRAhJYjiKPRp7y5IsAaPFEiXVTyHXGaH1EHmMUabAEq1ZVdUQOrsPo3OMDtai1GqWpaHlSw8W7MzBk8l211W0xCoiotHhqX7Wjn1OrS3y+eIWNxj1ahu37adxWZgX+7V8mMqimwQAjoBoPNmrA4ek0Hk00y3t0HlldUWIdYcSP6NZuInkAABJUACVzWKQydXkcdBuWsoCgePFuBZOMmfzLN6c2QbXjvX/SR4ZcgY46pY5n1iqKY1ijHXlLHrTv7rQkBaS0jwqZqfdLiL0fQ9LWisj3PgsR496kvoR1VFCeaUqmpB4i7REytSyVTi2fkJFHALMi0z6yqzUHN9PQTILJJuNyJSkM2HKAXYmc0ZmoKSi400lYdKGFlsAMvDSeMsdiSTmdpjpoRtCtqaZfrXslIo74eGdMijFLc9rMGezcl1J5orujMGaiQPMhSJvNnI2LhGRpW1YEWGAujiaAYUpbAEoQyJnHU14SETJ+CTTbxnVRETMGXgrfSV9sz/tkC38mbqQechoKQVZOzj5RTCnY3YtjYUzEdSqprSAbqshLty7eEowtST1qHVwjDY6arZ1s3r06MmjR4/quh6GoWma9XqtKRb1buEYY+VrnZ3TFW2adFRENs2mC4NODXoboVL80O6HEA6Hw263e3P7+tmzZ3d3d1VVZYFqp8B++3LkBBGR2V3H9teMf1LROa68TNheg1SKUirnkW/rmz+PhtzYsgXMeY8zVQAizAGmhSrNlGnaj+C8tTogmV6To57JxaLc2W8TQ8rcrkvYQBPqmZg8VUv6E4y9l7LTp0kT5nFV6cGu15ciY/WirWCztafKIsIQEVGAUYAICOXBgwfvvvvu1faKyANVCBWCR3TkKkRXV1jX9WazWa/XVVU55ypzbpaIUECHqBAOMWjvwzDEYTx85JwLYRCRyuF2u95sNszhzZs3t3c3DtER3N7exqFbreowtCLxnScPP/zo3SHsnZfr64vrx5cPHl48efL40ZPHD5+8XzUNIEnomNl5P5pR4EFijGpYeSLSPcMwsQahJyQAYuULRCJPQMo9Sv4ZckeslfsZszdsdqGmS+0FBfRsv82RDnoPof04UQtPn71JFmpG/lMPNHcg7fvE0LZCMnKSACtdnXNqhCXZkMkwgiVlnY0LJuvw1FkyuzJqB1gqpvRtajZ9MioyOf652BcW+2y2gohAEZw5vp+vBhVfHTtNWM1ENHUtpimLfOWnbPEGp5jvDB4RIXIZQmCujGz7aGbcpObSukDCp1WCFr0W4Vbx8VRkurA+w0ypqtJPpwyjrLsMw4vcXuKnlKOSEyA7Z2gmD5qunBopAmg18tSCjCtQ44ecUI6au3m8i5uJEAC7ISKiHsUmAQRBdKhaeAzmRwEUwnRHcgatFrtkMxs1J6QhgK5G63+THhh/gunfKEvmb4bhRaLYf+3UYkkAhsr6Z5oDYM6o5ee2BViiHSISzs6ypkYSG58amlVKqVrWUSnm6cOMGxeVSam9z7zMRvq1K+5ZsXfJlpBnegmW5EhLZiDCpIKSvWWxjYhDnJ1pLNGVj3TZz/r6ncCSjud6eWu7GWUmDOMELScXcU+1e4rN3p4HzrdW/mQJ/Y0+P6WN3wawrCbOJzLt4Bu1tqi03xKG7MNUfoHPFwvimLQezOkyxDEiTjWYGsGQBiJI6IiormvrEOp1bTKZTESkZ6KSF0HoiEbF6Kgi9BNuf2XDKfFjjUNcsmekMJOsPWZZcVF7TxibrelkiigrNJrvVpUxAI2RMiiIqvHHxhlQiI77PKiTtWdgFIYRfc4Ax7J0cp6Zs4vpM/vnDMxg2PgUDkuLyNonVh7ZhC/BHNU43xxKEGJxv7eBJ8+VYPFvJ5rxve5osfoqTidVEB8FRcgJ6SFh8q7ylff1qnFVVaVLOIkICNPqTIxRHUKZ7MwoHGMMIQxD4BABQENJiVzfd3EY97fr2m82GyLq2l3fHXR0IQze+7//O7/zn/yP/9F6Xb189ezLrz59/vzLl8+ef/nVp6tVs91uV+urR48evffBB++8+2S93QIKsAAhcA9UOe9gTB+DIMIxer8BiMwMEgUAEQiJxmV1jiPTjjY5AeUXzS9EkOaxo2i8welBwHiDthyXk0tNXVqfZU075WdNo3H8ynnadjTTFDj7MDENIorksxEYi0omu5mXAtus4GW92xYyG4VO7JLDZFamP1P7izOdHfsZCU/jFeGk+DKFCCdMhQR5UX8ZjDTYYg6bLSyl1mKMGq5ne2Fm7/KL3e2Kke3r/IpyiYTkKMJcH+mv2Y6EHbhVjhkDLGJ7sULJnJb5M6LbUUNBbvs+oXTOMEdUcxxDUxL7pYBAlqUAYJZxMRJFhGVaUNHbeAxGRDfrPDkAIBAERiIUIGA9JR+QIapFxQCOQUTEmVFYZJY7P1M1nlF5yZwQMyFlbzLkZ8JCRJp0SsY349eISOj1bJzousl0jpfcce5JVBORFERgtw0tZa2kc5w57aJnS0aL7TguMqd3Tol8qpCtlGXMfKwGMzWbSqblMuYvhHqGfPtQSg0UZLUlI02qaVfQv7aRsqlUWALMQeLxfCtPdQlwOt+Mx2+/tru3hOpXWCyfL8wKU6Vv1GYmMosVZu9P8OHXtl92BwV9S/Ev62QNZob7W5IjMZuV5cXZhMw9aW/TziLn/AJMsviJyC+QpJYBjneFI2JaSkNEBJc0FaEDQkceBDmKEKTNRURUbzDF0RzXy3gWRJC0HxE58Ml7dM4hOGEUmBmBb8NCiyVTF+MQpmSBUOilkrL2wc6YOhAqzqBqsWbkKZ08g5Ns7yLjDQCjcsPp+q3xCQAikNhlCNJjQwAgSAAao35cXSOcnfKwA+TJdyrNzjMAp1/L0Z1CKRf5NexzijAqlblFvp2t0Oz5JyVghSvrTudfC78xGASmW5RFhIVikL4fNhtCcESeyCM5vXiwaRpfUVVVvqqKCRpFBFj02oK0YRA5DsPQ98MwDMCigVQA0Pe9966qVjGGV69e6d30FxcXm5W7ueH7+1sBjoE5Rk3sdHnx+ONvffiDH3zM3L+6efnFF599+dXnt7dvnt+++PLzL/78T//s4mJzcbV99OjRhx998N7770dmVxMg8BBZZdDV5B3EASaHbzLtozAiOQBwKAAkiDgtqS/RftEnhOnM8NEbPO4QZieJTfHlXJUmMFqKDFZ/OpsnmI8OTDlzpJelkFuWVTZ1jgSOLJWW9pNmhKWFChvIhEVoeKppp2f7ucyvrwDDuOVLq4aksP/SJpv9dRGSsn3bbFZ/es0I7sw0lmlJC9ji/F02YtauZnhgZsDcv0rQZqtKKn4JGBvQlYWcIaKNTyv5LZsALIlLlZS1UPI2FEyY4bnEA8yJgsaTSZox/bkIeQaPhWpRy6cZLrnQydq2c9vYkETd2kMBvacSFR4BwiM5AHS3DmuqQBhAkBGFSVhD0kX1JqEgAjnWVbUoyTixQ4bizG2CHJekpuTzxVEvMuQicqzQ4bQzb79N3TmXr9poSVnOYM7MLEdHWgBYJDLPcsqgLvEJoAb7L4QrW7SUg5L5fnhZZ6Z8TuDNKsbFrs8I+6KGyUTv1Ip71lcO7ZI/sCgIabWxFIrFTq0BYXmg1OS2l1MNfqNSku88qN+ocQt8+fJM/VN9Wd2ibP21bb5NXyW0mVxbTZhpS/s5wgKTQMGfixr11DBtI7ZOyeRlKXH4TSlYfvU2qD5VB4/Hh1I1RL2mRQgJRSQGDkP0LtqpIS0w8XR6CqeDOTDNswDg8RgUhiYAatLqzrkxPaOldaas4BfFkl0ys7FCmXIolSHM1VoC+JTWhYLHLIecJ9BMPZKAkKSF6UnpA7DorMCom2DjsiyxZHMigjXltYUEDJmTFzAXjRInZyyH1JTWsbjVX1NkXDbYUnmmf7N5bXo5Rv0wS4zjLfB6fGocHyQgteuZvS2TrZ428WDOAMIoEoFFhAEpxnhoh92+3WwjEns6HkLRbKJuKuQcgYaJYkIIkp6xDySAgEgUOCYZoQkkPZvjvSOiMPBut9MjtX3fP7har9fr7Xa7u3sTIw9D+4d/+P/99Oc//fCjd997/+l3vvPR+x+8d329/fGPfvTj3/ghM//lX/zs7u7uzZvXt7c3X3762WeffvpXf/kXFxcX73/4weXF1ePHj68ePPHNGoAgRgASPTCJDkSmuR6RCEBAGQwJgRTMGAX9zJGeqLiYaWZ0C603OD0sswHqPYQlo5ya3bMK540ey2eWt9JP6XxdIboLmj2TBOt7LHHtbGoRsz1VsuAMI/NljBIzmZRmIqqCVyqbsruyQSlsu4XpDc3zW8/xClgSg/QnnPAJp0+W28lgTgcwUgXlqK/N7mgbSZBYxW27KyekYRgsKXF+LuUUcjK8LdJFzDpZRtxTLUthsELBOelbGwKXiH6sLKIcDfMpU0T0DGHOkJTiQpP+hUknAhEaYBiRqO8AAIUBGYUJZFQcjioicF6IWLCLwiy6MmqBTyRQhZ655acQXiJNltw/+3I2SZhqpaNSyqwl1qnK9tSE1h8XEQ2NzCelGhT7UzmQUmVlKtH+ZPkhV2Uw+9MORIz9bTvN2O+UTsuqWdE4o6+ykjW1OF9g4RWkCmUvdmW6VAUlwCp35ULSKbk+5WCfen8KjW+Pn1M69ldbLDOkB4W1FIcz8LzluLKmsq9OMeHbAHAew1JYJqcAPo/z8xT8ZeiVgfeLlRKrCKNcxMB933ddl3yJtK2XpmNmTmfqNDr0KEQIyWMkIv3KFiLS9oicXbXJ/v2mKLJbkXaMtuDS4lGqnCbKxM926ikjJkqetD+dDIkXEuZJVYwYy0HiUS0LAMt4VYDu75Ect3NPtH+EJ4E01Z9BWCKn/DNho2hqxkJnNAAVORTgLIlTtcRs2eEv+6GI2GuDsoFnlc2fhKjzsmOOHGQYYgjBuaquEAkIEIU5xIBdVW+cc7qn7VAPQApOx3qng/EjopxzEEBPF3rvNdBIU/sQ0eFw2Id7gUiE3vu2bff7fei3ekw3Dt1hP3iitm0/++zuiy9/vlpXF5v1xfXm3Xeffuc7H3/nux8/fvz4N37n70MI7f2bFy+fvXz5/Pb29s3dm/s3t3/88iUiXV5evvveRw8fPN5uL6+urq+ur7FZm8EzxF4YGcTVKwDhyIisOR2YWYRpfsrv+Oni229ejhfTZ8aE9eDtezahQZbGp5jJntlbNEFSwcnBU2dX+YZmVwvMogKkNKaXmNh2mmBLrlH2awZbOctmoy45W0QAji1rHdvseX06/ZqP5ZRwnlIE9qWN1UwaxGYxte/dMdx94XCdVcojJqcsUqk7S7gMY2Icp8QS6RgDGLJmqMvwnGE+/ZlNCSW67FhOoTQbcvZvFhqX6p/a24QlXrJdZzH0ZOCyzDmuEM/ZCRE5jklHRjdlykY7fqvT8BRjKcxOM4gQeEKiMSsWgKAjdN5VdQBsQ2RmxohOeB6iLHODrBQZNFMaItpbB2AyUk+Rxpa055wrlkL1yeTX2UYSouzZVysFVjxnZzgN5xjZyXc/NDI29ZupFzBIKLCEIqB3OqQ/YQoImUhn5HqWRfPYjp7dnY9LtPP0eurRAqAvFcLjiHVEAKChUCqFsFQyQsm42j3G86dxpZqK2lKQrYxn/GA1c1kyAkWehanDnJdKTvvFiu3xFGC2lFrrTMvlDHV++DDXDFYebe8A02ZG8eF5eMpqVi0vAg8nqFYSXU5khX37skjfrOtMJM+3swjzL1xycnzD1qaBsFo7AoCouD1Gi+jt597XiEgkALomSCJo13z/f9T96bMsuY4fCP4A0D0izna33F7mW0pqqdXqaZtv8/9/7xkb2bSmzCR1q/Sq3lJvycy7nSXCnQTmA90ZcNI97rmZryQb2rVzPdy5gCAAgiQIYMll02LMgJlf8oIwX6nymJyOXObYXb5f+JzdEJ+KEWDpo1/CtXjzOX09xRgNbqSo2WAttV3Y2N1IeUVXhGFuy4rspUo3M5sD2Wf4FrMVABh7UWo6DWWBpMxf3qlM2/GWzQsS/Cdbxk8urXh9/nJqiQfLu/Fwo1PtwrepVX7y33Jg4EWZmYU8HzGBAxODOYSw6/Yh9F3YZfdIPF2jpTK5TJXTeQJNKUl2nQeQTpHpiSh7pZ7Oew0pWooWUzSzcRwtRmLr+xzTPqhqDsjJjLztYukkIgQzjHEYP8Tx/uHDD3/9/h/+4R9ubq4Oh8O/+uW//va7b37zm9988fWXv/z1bwC9//Du7du3T6fj999///33P/zjf/vtf9Pf9t3+1Zs3X7z56vrmcDgcXrx4cX17i/6AEAgQM0BAic2SKZExB5EgQurMQZfDthWOIhs0nYMc5iuMlVpVUmDnXr+aVDwBtXN2GeYy0tVU7WfoLVoklwpj2Hw9FfP+/byK8MfrEwytaR8asVK1WGW+wCRlKdVW6DtbybXJgc9yfqoyewSurk4rRppQulQgKtnn618VH6vTpMcDLeOIVPDnQmWrb36/EGFl/e9tIXyGCi0lqXO32AK/2muPmfLXn0xukYFvpcpQiemtVNBYsvkwBq00x1KStu2eH2B5EvKVUI775JilgGqNFztaGnuT2xEws+uemCkIhcCBQAxoMlYA1PXE4ZTSMIxIA5kF8LA8HKOlClhR/tSDxc917HnSbTmlkE07cNXCu2RoJ7zp5xwDyp8E+lbqoV8SQMtNcxFr3tTaRvUTE18s7i5W+oovmF/mjee2v618qGD2tbVvtn4WPt3cQV/WWfq+uvFPRNjYu3S0vACm6KB+XLB2Yp8f2IWcvcCz5VO6uNBaeb9V4zPSqrBqMn1enbScJvz79uVPTlvIXOP3lbQ1g7QQLkTZcm1ZMV3bRAukzcc1bbvPwc/WfPH8VIENrHtueE6aRhnlT+4v0hSt4Oxm099/zt3NtwT9p2xcx8xI8FM2c7D58IQ1+SIT6S7h+cn48ZZoLQ1USFttopRVF10ZS2lZKqxK+ZG9DDxzMCsqBE+HEDo5CgRgS68eQlMoV1VTIIdxPt8ZXK4GJ3gIfkbOibmKXb7AT/vST16ruCovy6msNSeB1cxVifSW2QuebSa11bEreWip8vk8Ww5K1Ch7v2OAiELoD4fr29vbw+6q7/tuWqkF4VC2LcpdITPjLLpSoibSRt5JKee9RGTO1dzpdMpsAtLsbKnvQ9/3Gp9OpycAjJQtS1McYzz2HSlZCJAQUkof3r3/8OFD13X/8J/+4fbu7quvv/jmm6++++4X3/36u2+/+8Wvfv1rMP/P/9aenk5v377/01+//+H7t+9+fPvnP/7FkK5urt+8efPFF1+8evnm5sXd1dUNd52lIxGBWIjB5eZIufv3XAacRmFeE5aX67mJ6vPH1atBPmVnHgXXq5qQq39BEJ4y/JaDLysitiHBL0yBtKbeeYxgKTjanJWbaU/obeVouPRcG5+t+SsGqzJPDLB2WN92tkWU7xrWUJ3zF0XKliajVfGq0Qxa29OChzPyqRYTq6NTamidT5SlI625IV5H8mwTT24N2XZqtXg7rKufWpv7knO1fj+P+karu6ltwQpImXxynt0PTj7PkpKYNCSa90oy3szMZqv6otAXuZkJYM8UGCFwEApEhmTZCQuTBElEp6iaxjQOBiIOoMW2ky2XUi1fFNjQpE0x9DmpMnGvsLEkYFPVPEHnr55UPEdguQPqSQJrNAxHDKoLc4OKWQBU6Krkib/h0xLhBdRtnUh/LpIrvK2+aVMl2WxpUVLqAUC0gsBKuvo6i2VBuRKT34zjiLWx4MbioGqiBnt7GbPV09V6Vincd+S/c/Lk95yxe2aFbUHPcW1tq5XbctLxTVwG7zIdengutLtV/ALAPy15qn5O5nV4/PMEnAFTKNq8E2VmWeQXtzHe0Jo5hBBar+C5UcN0HW4O2Ktl2s2VrM5i2NC+np9WTf2rjcutRqtp2r/HUravbmNVgjSnyqnJOTOmODMAYGxKMKh6c6rFbjLhbCrCmNyDYHb336Yi09CIrCpb+VRxXJvBTzSFHcpPnn2VqwsYWBpV5x3UlgmOAT2Q5rYLfXMV5VRlCzyeYVfmmmRm+awUgYL0stvtrq5uZBFoHsRgIRYqAVeKQkiEfM8wx36kfM7LPI7jOI7Ud+f+Apgos9vtLMao4wjSEAIzjeN4Op0CJxFiwelxeHh4EEr7fW8mpsMwnoYhEhsRYozMYObb29sU4x9//4c//emP//E//n+ubw5ff/vN119//fU33/z617/55S9/ffi7V9/+8tfjMf7w7u2PP7z705//eDqd/vKnP//ud78T7u7u7l6+eX139/KXv/pN3/e7w565A8hmpziY7hDWIQcvz0dE9Qlhm6arhl998XVZkxARoDXD0OKakKF2O5uh0WVgaCLKS/kUB0++hURaMy0qnM1sZiWKWraiyzbBAMZxjKbZiRYAHZVnZ4wxxnIvsRJABWaheqeK3ILQbzZUsMFxBZ0PLes1UnbrZfkAekZjSqnkLzyQ9/n8uJpbFOU4MORWzjNUC7P+qiDNAU9z09mHUgt8TuX6XyWLfU5ff5sBQN/3ficmo0KXbqx8K9q4J85glM0eWlt+r0olD2Rprhq7kjmHsswPGS2ZVLypqqeWcRx9zaX+6gSyar0iCbPzFmvOI7O5SFZws7TyoA58w1CxJDBGhJqqJsOYLDvYDyEwGWsKrH3obDj2fS8wWBImTtYR9rsuEAgg1SwNGZR9GN/qR2MBBSMhCWTKUFgMrCY8AD8O+tej/pj4Af0JcmWjR2Z2egnvlEUXqANPhFcwUmz0zbLLL5AwEWX/bZwWO0rUzBYFMxPBLEOula+VPDmPRTqfOOVr6PmCzVzEsLzfYlhXFHJHimQrAiewrMoHT+eeJHxwWk8m3rWSx6fGRSD4lhFoOaF6fvdorNCyWta3XsVVq2rznJVT13UesPLAHOama1MO4pULMCJFYix2WFQVOKs4520jlYztPH+XkaVZS6OlFEqmvsUKmc9P5LST8oaWznJ8f6srGGc8U22RUep5PjBeyPhGFZCl8VgLdpW2TnJaoTqNy4bzO6IpBF9FNl7Z8KNTtVJS1FS12D5X8JcO+p62C5KpnjVMVMEMS1UKhDU5s8XvAEocQs+85pxgVQwV4+Aa9aNJGWMw3u/3X3311atXr8xMpCei/X5/dXUlIpa067r9ft8VdxvE2dAuHxsm06Kf5LjNZhYtjuN4PB4fHh7u7++J6N2H97/97W+H4aR29Ch1lkHr+Bfp4MWdw1XbX2vc0VUFW7GzWnNbtqWuCzTvM2yJyipVNFnBgwtxTe28tvT0UOEBZzo5e3Enl1oKLwXLBmUl/fxRYdXZ5SHzpW0gMEHNq3wFmMCTmbE32ZDsZRWsMDUyo3zx5Qy2JoICWR3QPQ5Pw2m330fTh6fHX3z77S9/82s1ur29Oxyu+27fh/2hP5AxwH3obl+mHHMipaQ6TeJ89kIy0cY4jk9PT8PxtKOrZJrSqKoJKdoYY0wpAtNqeYwnsnNUZxreibBhHONjHB9ASYQ6EVPSaClpdlUqIeuuVqLCFDxnVLx58+bm5uarr7747rvvvv3226+++ur1F1/g9hZPPxzfv//xxx/fv3//448//vDu7TiOIYTD1dXV1dXt7e3d3d3rL968eP0a+z1UkQRMoAAimAIECIRhAElmTTPTefZMOGY8eOI1s3L/iOy8FU6gs+/+eeynn2cTODrPcKuzgk+elKcle8OoNE/Vq6y1xkIr0sGjG2sMWYFaflYmWHDS7TIkBUWlwjYbzUEClihFwZ6HGZO900pzvqy/Y2ZmJW8FgO9mJXOrTplbXU+7Wk5MtKVaeerb9Wjxf6teoJEsBcL8UN0BWD0A8fnRpNURqSCv3qzKU//TU1oLRlthWwnLYi8wafIyFJgtAOafQtHMgDRt5LAJE2vWpw1ARxC2wNSzhA4ECYGyVOhDJ6awtA/MUEtKBGbq8sQgUNUddkqsRkY5/E4npASBDggywhCtOEZj1AGgsWbWsujOmm7XzjGeGsnpCiVnXkNWFM7MNN9xzfhsj/iq1ElwldTMmJ3uLATXRWMMP2tW1F51x++JwBEAU+0ts2LzCmmr1OtZrMqwyh20tvBbba4kv5HsCb4Vkq0ExoYI9V/zHgWWcglnE1CBk2NmxsyqK2xbTn3ZBe1cbX2CHzX8Py1VFLuKhMuJttdmn1VPqa19Q2usd7melg6rmaUFdatGuigE2la8KCgvtzZ6ttLWdPO5aXV63aqrjGM7B1XkXaDy03qVZzX5CT1vsc37mzs/k4Iob3iFbr6ygUlOToGCnP4AR4R5wdD3fd/386468Xx/5zJsPlXO5Co9pOKa1cF6DjbaVJFNqWQVzy2Zrbb1ScnWTny+a5Uo9nK7TVkfq4qs/rxM0h4e/7xatup4lR+OAc+16dmPYCUZVgHLSxXO7unUrPjhZ+LpoqbNwT2UDcoQEZAy4fr6+nA49BKUeN/v9v2u6/pd6HZ9IAiU+r5jzjDnoCszzifFYIHDwGJdF48xmU7HEFCbJ2HVs9fxwmJIyqpmChonnJx5UE2hqmRQVaQ845vIWbcnl06n08PDwz//8z///d///c3NzZs3b7755pvXr1//23/z7atXr779u7/7lhmn04f7j99///2PP/749t27t2/f/vDDD7vd7vr314frq9vb25ubm6++/C6EQH2HEHKvIQyYDgOxQHg6wbakYBgCd4qppwSezE9JIiIREWRWihgg5AXhkj7OxDR1ZQ7MMr1s7l95UqjIzsyAmlF9nssMMw1Po/wFYswHXAJpKdvT9GXOL3m22GxVnFXd971LaeVCJlwXKjz4zAXJ2D658vgsn2jDVttmXXMaydlu0HuUqTbnqu6s4qTtF5ZCBEu0Vxherdn3ve2L/1nwViEBywGqilRo8akCqcKDH3qbd3Yv9LTqHVfOTpZw+lKzwB3JkkEBMka+j0rMGEcxZYNQ2jP3Yr1QL5DQdUwECNFh1zGZxTEQSMkEZtYxQqCZnORGJCqSIRkxCzMHIkIcI5mwjErTjoHm08DVhQ0u7Lhv6+I+5VrMTDZY1dyaqrSoqm31GSR/Yr8KmJlBSXG2dyAis/MgztLmWTrZmUjs3KJnhIopKnrzkFdM2gqcCj++uapCrNm+XubiFl2+lVLWG3L7UoURFkR+UZae/1LduhcRFVYLDdraOryUKuvDVj7k/EzrdPu5qRICVVur+Ve/tmN0oZLnQLUq0HxDl6nCLwgrAeWzfXJqyKpSJScvd42Wa8JpvD4zbmGb4Tmz2GWozs9NW61ghKNwL74qBvE/Pc6XfqE9OyyaK+cw5WcRkpU5osGK0ZCZhb6bG+LsNiPGCJ3kT14QPj4+lqqKRdgqb7ZpVQ7Q9sJjtZJqyFoEXkgVK1XtrioDvmDL1FuzfAshGpLA84iflhsf2EZRVWdblZ+bLsjkrc6W4sVirrSlsyMiXd7Ny6kE0aumJ86zG6kATJTjHJslgsz+ebJ3Vp2fNQQ2syBydXNze3MVREIIPXNH6AhCECJmMKMPEOmI6ngbZiBis/NtrGIENKaoMNWUTwgTUqEuM5tOv7IbJ7WUEsFU1RCByXsfTdaLKJsrZjZvVmpK09lstkQrJocfPnygya6QP3z48PHjxz/84Q9d1/2//p/yzTff/PKXv/zqq6/efPnl69ev//X//OW/NhtPx3fvfvzrX//6448/Pjw8vP/44fvvv++67h/+6z/d3d29efPm5ZvXV1dX3PfQEWAODBionC+RgACMSARiCABDvuGSzMYg/WQGQXLmL0Kx5ylUcl6iTMwzm4xmGdSJEGG2/7EZRVCNzHmVacx5SMCMFM+rkZYoKwFnZrwUoGaGKcr0mdCJiAxjXhDywrTJ80zLD2bGzZ0ozIp+9bKCsFRLs+27/1o+LVC3bKKVTb5UldkHXvdN+Derss9XWOSC39GpGsJF+VIaamvIz9WO8iryq1TlmQSH0+TKQPsWq955+P3XQmbUqD5eNJeCflptW6+GrKDUY3h1OLwY9W94lll5v5Zmk9HSOqUjAJAR5RvtykZmUcgCI8B2wQ6CQ0+7wDthEeq6TkDC2PUdadKkgRBELCUzC7NsAmCKnUg0TUoAG9FkAaVJIkcQMCoNSdWMs4AonZ0YZDYDO5/oNkrAKlNUiHUDsU4tHsm+wnLX1ydbbjf4gSvWqhOZzRaJrhSVlYaqYmPBYC50zeqMiCU1tgvmy1zW0qqnq7Y5X1uV85Ps3ELVwlChtDyXcW85tKpwrqdursrvMxORZbOihrtFAlEtNuFMl7NBb9XrijI9eM/Ez4VkTr+sBPXlIqufPmvUVot7GM6wLfcUPglGC+0qb/rn1YpU1TvJsCUYW2TQNuEBeCaG23pWe/G5qYz0VnOtGCQiasLSVJKtIHnu8qUTWjjUkfMAl91d5CuFQUIIIZqy5VvlZ/AqKp1kVAmkPsOgqsB567MC9TKWvMmxb7ct2L5fHaOfQLdt5iLZShP1Ru22UPKyqB2vVbSUocHKwG0GyCmjs5T2NU5Wp56qobaJVfGyivBKjGwVqcTplvhKcSAikJAwM3EOYEjIR4Zm2Q+o5aYAKExENCUReXl3d3u4EtMdBzEVMzEVRKEk2f9olz3QeHE3jbG//KUJlqAJmkBEsJS3SKJFpWlBWOZrITY20mT5uC9BLZom4rPGaGYMAZFkN+2YXA2ZWXa9YbPJbmG6vDgUEZGQM2RN5q9/ef/nP33/9//f/3R3d3f78u7169fZoPQXv/j6yy+//vJXv8Hp9PR4//j4+Ne//vWf//ynp8f7+4cPv//DPwG4ur356stvvvn2Fy9evDAmCR1zh9kHVT7KFb6Z12UAzMggQc8jVccCDS0FwGk/ROfIyLR0MuvlS0VwXtwX1FTXw6o85UEb70MAoJZX7ebUHVIjnQjKN70qgKqpyDNeRfSVcFxl9QvSpMpfdJRiI15amQHmCntbaVUEbDF/NaF6w27fBXPnMBfqvwCYF09bSGvzw63EPBVVnfWDhZk8ikRuBW4FwOqn6qU6N9ZwVN3Ku6q5FlHtcBBRqnbcl/pQiyjBSERGku8oA6QwqO26rhc7CO9FbwJd93LoJIgRyW6365iIEVhMIxJ1gXsJhgQgR2sFE6YQSRQVqjBQvt+lGlOyvttbTDbEmCgaklre3GnJqQzfhA1boNTnJ/emMIKZ5WDuK/GdlhSy+kZtwUdlyLbiXhar1xkABi3IqWHJzQXb6vjahtfT5/SrqqpiIiwJxudcANDQ3qpYu9Du576v8NCKjgvysLzMSqevoRU1W2V9/rIg9KvB9sGX/0SGf5nk5dUF8fhz6vcPZ1r6zHpaKso/tywCPglSKzlbKV1o2Lfr6aEdr+fj8F8C26utlC4sSLTZEMkPFRJK/pSUlhcoSkGPz7OHhXkqjDHmjb9K3nrwMG+Qocx0mOJ0d113PB4nPoKW80NieCCfg4dWPuCiBKsQslrbc/h0lTb8c1XJM6v14LXC9oKMMheluSjA3oR1Ffiqxa0mLvSiEshetG4pWqXFUlUJUFm+snPu5csSUdbPqyYYZ2scEDFgQkACiLRwN7LzyykGu1pUJREj6vv++vq6Dx1DhLkj7kl6lo6l77LLXNr1rJyVECIQDDzZpUJVTQnGMCPTpFPiQJYYQLSYUjKk7NHAbF7OqQFKaiAlJjHSlANPGhGQNFkiiBWWBWe3tLnj3smfP64vLFlOArquY8b11YthGEYdP9w/vv/48Lt/+uN//s//5fr6+u7u9pe//tWvf/3LL7744uXLl2+++fbNV1/95je/+fHHH1NKj4+P79+/v398+v0//faffvdbM/v2u++urq5uX7548eLF4fqKQsfZd4Qx1CzFcfauEvq9gA0RAEFh7DWn84JwHsgFEZgZzacEU9+WJFvkdb6yXNGQueOUyuioovLy0tuJeVlABnXLyDObzTtwDAKoKHTUCmI7t9UKmvJztQvYkGt+0ioPBfLquRRfFUO+XT8lVGK9wph/6a2Wq74XpPHsqCY7ovQmrHDLaY+QtumqsxVUraz0ZFCdmJX6W9PE8t7joSpewVPopxjH2ry1geXm5aqorbBdNeQJz4/4BRRVfTEz4Hw8tZotCCxrrcwgLsbxXeCbXbjbhWvRg9htR4eOghBLdzgcghARCZklgYW+75Eic1+QnOYwCaeRJaVoIAiYzSxGikYikpINaqeUUszeKMgz0QSwfQJLba9zmvC2wPwl1b/iwSpnwWFJ3s8eGtpbgAoBnQ1F4EjRPrUg3GLG1Qz+vWvrkqntqmTwVbUMUjbyV/nxk8m3+8mC1px8rvJRmyE/+jeq6p3KtCILCzvV8/uz1NXzqJV9olJDZblw7iPq9clzMH+ha76eC5VUQvWT/PITUjWIZmeXqqvoXU2lU1V3VrFks5uoC1X5n6sM4oFvud7c8//YlEGn5ZvyXCkJlxGe8VDxcqFthwo/fOfMqppDEXZd1+/APN2gyYeEic82FFMRchC6lYkhqU7KwJSfLDvJypHuAeTLRWaWAzCUkEhbWErLONV+3l8VnpXygA22+iSTrs7CFZIvS+zns2TBrf9bcR83EZJmtWT91tIq8ADygUELVQv2BeQsierSIrlKBT8VSS8wsLzabWZCJPNXls7MEibvbQCrppSSdDsGsrfMKTIekjEZQWHShd3hsOsPXbdjQ0dd4C4HmgghdJzPG5mZdJ6xM+dMEwEkxoS82Jx3TyY84NwdM8v72TRfQDONeTVoSGRgpsBkllQTk5Fl02tkz2fItyMnHOVeLmjeT/3DMIhI3/d5pZ3vKJ5Op8fHx/1+H8I+pcSMrusI9uH9/f39/e//8Mf/8B/+w6tXr968efXll19+9913X3/99Xe//lcQgenp/v7Dhw8f7t/f398/Pj7++U9/FJHwz93hcLi+vr598eLu7u5wOOz7DiLU9X0HqMIUcQBAzCCCGUh5wgMIFPKk2/pOrO6SnqfhpSzzRdQ5ECv52WHHU3DBlMfahFhVWl4l93RMzgZSRJjOh3XlmLgidA+tNznDmripJq2qO5glcvuytFVZV5bjOE8lNh+cqtbtrqILn2JjaxZd5ZmWJyoek5VEq2RNwU874lVt1Ez2Hjn+wbdYwQ9He+1GGm0LNSzpc9Vaz59QeZhX3U+vjmzVl09KUt9WyXxh+pnfmwExb9TmGEdEgYOZdiI3+/5G0oHiVcBVoE5od33o+06IWdBnhyuWQpDHx8fQsYjAOMaYnfUxM0RiJI4xu1s0M2Uwk4IHtacxDjEpiDkwsxFYlyZAVgNPDWV6pi5vpgF1iL2Mh6rCJYLOuPXEUEmq/FdnBcXzXV4TZpCc95pPQ+K3LXyjLaieXBfYWIu/BOcdseqFz+MxVm3llLaK6XVbcKtTqz9Xx2WLf63ZFSrP7FwUtpmLyCp9L3eoSkNuXTO3a6xqZtlG9HzdoAVphTKXguWT5LeVKgw7aD+ts7b1tOP7yYGr0uoCWG0KVOOx0crqFp72ubx5JmDMXHkZxVJLrjDWTj1TLy5CuAr/KiU8B+bVtJBjANY4q+W7cx63l1EVpKVR2YVBgZvCaLkJkh2Ye2v2ZMqqXdcVd45mpjCa5cy0d0aa0oScKUwRmbBkF8pnGyLVioWfQwNlNFdn0pbGKmSikRJ43jiuZigUuFpDxXpbnOjZpyQ0dGtLZdJfYKkg2QK+Qde6vF3tfoXhtuPlofysxGB5mFwQuQy+j+fM8yHg+SjMDA5XSkFVowI2YUPBCmKjBOQTKZBOiyvAyIyw2+2ur293u0MXdoGk73fCOfZgEBEKAiYjVSTMPkSEKGXvGBDOnl3U8pt85S93JcYTAGKbDkUse5ixgmgBK0cdU9J8Th5VFUkBTYjT1copwL3RbLdY1pn56hyzzDZJ0zXCvA4cx9GfEPZ9nyIIMg5pHAciyhwzjuPt7fXj8WH4MAzD8Oc//9ns729ubr744osvXn/59ddf//KXv3zz5s2XX3/95XffYRyPT09/+f6vx+Pjx48fP9zfv/vxLf/hj7vdruvkV7/83w6Hw+3tbX91hZBdyMxL2aLwmFLeNiKaLxedB96K7J5EiS1MzMtfT8FUVtjz+3JEXhQiv0xiZu+N09O3zgIIDdGTAXym46kVghlU89hk0YxJBp7p29e/WPEWWvc843vXmhTmBz8BeFbJp3QFLfkhW19g1im9FYFqZjwPM3sp4GRBHoUzJF5MlHgbZSCKoKfGVJXcAql0IRcs9OCxYRt3LNtUSU+P4UqoVRLWlgvvYpRbqeC2tA3DkgjLg5/D8k+/5eGBbDc+PMxVEXPuwisYtmaaqu9lf+RcsMYnR9XRLJmCwaCehURhQxf40PFVoL3pTrQPvBPedxKEiSyI7Pe9EFSjiBwfNRB3wgmmBjIjAjN2YUc0mCmpAkmJRUgoDFGj6pBwioggIgrOmPnc02bE/RsiShYLsxc8VDzuB86vMFcxVgFQKZTlVLyE8SgpV5KmsBZaETzA+eYyLVduF1QOPxFuDTeWk64vsmylXj6RWxRhpjRqFJTys+WLVXR5bGx2bGODCY65/Pvqp7kFbVtzybzVvpc/ZgYs1BfMBiDnXhgXPxk6bTkvQrliOVLty1VSvIyfVbB9owsIP7+enw9PKViePV09v+ZV0XoBthVOKGXp00LyAgxt5mcWp6Xy+tOQWbV15syLKwo4esa8IPQd99zNjVcFAEaw+S+AKSotIethBBATmMCUNeRhGHa7HTDFXjtPNLO5WgFes72HjsRGOIfpyvN+CcLkTS2yPPws7FUbW0Vcew/ALY0tMOYQ6/HjEfv8tEqBbaOrlbdlS/HynPU3LNgt/5yWReWSG1Bcj2zCWZFEmSys0cesUahKEZ+zvPGVrBLkam3kFKQWJ1Czs4JvAPzQ5lZGhOzlkoiIBUTZWZ45xYHARsi3K4kSgK7b7fZXHDoOIciuC4cQQugO0oX8UskMMBZKRgAvumzI9IZpNTiOY9FHSvSEactDpw4EESUDWMhURdMAU1OFRmhUi1DNe49BhCkoUnGFbcYlnifN9nfzAqHo+eeAnzZ7dhzHkdA/Ph6Zeb+/6rouxsFM+37/4cO9CHWht5SPE+P79PHx8finP3z/D//1H6+u/4/r6+svv/zy7/7u77777tvr25tf//v/DXHA09OH+4/v37/9+PHj4+PjOI7/7//9f88LwpsXd3d3Ny9evLi5u5WrQ7bdBeWV4KxTwUIznZ+98E3rCrZiY5BS0jXb9/zg15YF7+KMaP3cX+iPnCZUie/MQCzSdd15AZmUeFpiDcPQh66i1KndOdac5wcz67qd5xZfpKDC987HD4QTJaparGRLKTNTjZ4h83OGxHc5ZyjRY0tmL2tWOHB+Y/N6j9xFuNZqrlrFecOqCshcW7vN6SXFKtLydEJOPfJCrUjJ8nP1+I6W2nDVTY83nmO8lKZLiyV/awqbt1Hzcx61smwu+K/++pHyo9+eRRTK8eRd8kgXPGZ8GMwWq0SU1GIyJUbYmXRRCQomdDBL43BSY765OezZ2EYYiCwE7kIAlGEhdGY0DMPt7fXc94iZjKNpigMz9rsuxhhHNUsGU4gSnyKexjSoqlEyQvbA1BhXl25W8Bcq8kRcxiI7ScoYnzY/sj9TW1E6V0dh+tvQZzEM9sAUup3pLZONeqYr0C5GzTbuIjrrA8+kVbuF+KuFq8dhKVv4kZZ3M3KajCCcBYQ55cDH/fNMXW2FlOSX6J6qvcPhttftWJd4er5mnR0XVwKksLYf0zNClg2VUvO0ugj0Oo++GMzrsiH0vhU/EGioiIgU65ZjlcjyCK8y+9papK2KPnLaWFVJi+dKDD4ztapk6RscestPL+s8Afip2VNXC/xlIPP4Vigyt6HmTXXKyaHPnx9SEybqZ6ZKyAfnlA4zBxHXa8gifNpeVx2sKISdQVM1FmWP0r9Js/xhZu/ywWwhajL97/d700m/JCIQ5ejHvJ/jGHch+8LQOQyd17aKXqdkxOht8i+63++Pp9PhcNjv98P4iI1lBpbjVVF1yzheHBUY8oFJi09bKiql6Za7i7EYllI3p2EYqElobOIq8sAaI1eCorRSSQws2bnCScWAJeXQ6nlu8psF5YrBBcnTUh0uipQWMMy8WbpZ8LZaikHF12NWyUIIGiNm9SzHRCGigYRIKAQmztbIUE1I2bwTmSqmuZVSShw0cHj14u7N65dBAlPodntl4d3OQgB3iXmIKh0T0RhjP/lBmDSuaUJXE+bRknupMcaUrOu6GOMwDEOMZgbSzNT5BLEXUYvD8aSmnTCAp6enEFggmiIzkYhOttkBIIKIsIjGaGqjP1BxIBnQlZdEeZeACKTJiKnr90ymiuPxqKqq0TAtLlJKFg2kIXRMrDGlZA/29PR0+v6v7/7xt3/4j//H3798+fL29vZXv/7u66+//vbbX9y9+vLuq18gRj0+Ho/Ht9/dv3v37uPHj2//+qfv/6LMuHlx9/Lly+vbm6urw/XdLe/2YCBFUgNR8M7ZiAjzztRZ1eYFSRVqa9m1ZadcytdfiQk4Pi9lK+mJ2SEPilgBGZ0PB0qL1miuZaYp8qgV6BVjVPzjRSG5eb207tGCJR+2svJCWsXMagZ/SFXyVAoQOY2z7RQc2qtx1FWnPg6Gtr9bE4Bvpe0UNZtPpaqyivBFViWvnyPJufZahbytEA3dlgcfKBzbI+jZoaUBctpAfqOzK+Qz3pZIG9WMOpZdpJCUYayU/TJDCPs+XO+63a7rCdBsXmdMFEJgRuCJ4Jk5JWMmY5F8VZugCgKChHwWLAQVjEOWOfR4Sg+n4XEYj9FGM7UEJKTEssLXPyFVyHT15Amvrdbce//XKpxhstzIo1ANNGyhUJI5/AMwJfCig7xlOKqThg1Md+Bze7pG3s/BUkX2W0U8Sa8KmXPflgtR39AW9drSZLodo1Xh5jMX4L149KXKtkvFYn6uwUKMAJPb6vMKYbZeo8ld+HwnpOL3Vtq0aTUQeS48ZfBAYo0w/6aplTxYW4g+s6q21Lm/1ZSk00wKzONwEW/thsJU1WeuXdsB8hKY2u2Dz6od5/ty7RTswa4Q1VJ4hcwzkJ85Lu34Pl9ErEKos6F73hZh2oXQMYcY1Yz2/a4LPUFSspRMBAoICMjLS2Y+78kW3swWNOSOOIiN2HKk0C14VlWmVsVqhdUnmbRNq/krZc/m7Qave1Qb+l74t0B+LgCr71sC8/1dlaWrkFSaSZW8wR2aHZ/2IcPQbruUPNVOfXUuUgS70GK7n4ox4Ix8nVNuRbhjmQ3INYHAIBHSMTGMSImIcwwKhhErp53QTngfpOt7po45JIN0PXd76piYQEDeqkZiJj8jUD7YEKhOlkoTWZoxkI/4AM0e+4pnpjwZpZSGFGFRLZKpARZTH4QYpgbmEEgNpvlUQGFgRr7M2HWdgZh5GI5539lrhsWlKjMzTfE2MqhJzcySGWVJTWbEs9BmwMwiLPOhAhihbJjcAzNOp/Tu3f3j4/E//+f/8+7u5s2bN2/evPn6my+/++67b7755urLX1y90e/GUY9PHz58ePfu3bsP74+P93+8f59S6vbd1dXV9e3N7e3t7e314eaauy7kpXwhAprlablaQ7QkaM6dmTSi4mXV073facjGu2X+XuUEP5FTsZV3IQTKyvss6EGE2gVf+VrA8MyDRoK0D+1cRW4RWDFM4UCdzUp/2kS+mqoTBiwnkoJeDw+ayaZdVvmfBVetWPENVWXbqXRrQLEc2RYMn8drlkWgtMeJ5tbnhcy8mKsEZdWFdipqFWhfsCpVFXda7ArSKtT5Canqsm8oJZNOTDozjgo2MzZYNBs66naBdz33gZnIEhGz6pjKhen5+HGe6ZmIWIQVZJiEb+jjeIIaZ4mYxnG0CDqN8WlMx1ENTMxMwaCGlQV2jVtaGdOWbC4geStnO2mVzJ+cTT0wS/DIG5ObmSUUvecC87aUY27x02ZeZYq2Nk8DW023kgrbLNlO+ZXcqCovJ35tR6peVK17qLjxoODaXXRktbPklLlStsJPijbfB0n+02cqlpup6v4Ze3+b6lcml9V2PQY8JNVortZDzWqqraciwpZOtpKv5+dMc7ZcmG1BWx7aKaCAsd4ALQjew9w2TUTQxRQ/EVWjFZS/n9vxVgT9nJTZwjOs0nQqMh21MU1nC41Dhxn4duzOtWVshzBFKlq9YJ+Tt0etKvLZVqfCtuDlLl+eTQrZt8Lfmg2vdjieOaCXmfcCV17O4HNWmsBlwGxDNd2aI1Zh8GK2GrtV1QuNOKJ5A09nfawccgYWIjrpwNLthFg4xZTGk6YYYAEmIEDJYGna+hHQ7d0Vk/Q6YjiGftf1h37XJRIJPQchFlAOjclMBhQLR8J012DyWppSShoNyZCg2UuoYT4ZNiZis3lByEzMZDENGpmUTFnAZkOKQWwKy0zK3DEFTTG3ldE361opO+zN3AfofNDrD3sXsfco222RqKpZMgMzcyZXTaY2O6yZVxmJgKlrzMYMgURN8XT/UdOLFy8s0l//9Ne//PNf/q///H/d3l2/fPny6urq3/27f/PmzZvbL798+auXL7/7BZ6e7j98+Pjw4eHh4TQ8fby/f/vDn7uuu3v18vb2tuucPdtMXjVhqZMszKxofFjNFFxstwplm02mwuVlJZpbIva0WFqR+aHlZJ4/TTVjvjU5H0YXNZxyZEz1nb2kYLUMQK4tz1HWRMvwNTxH8LVz+XJQFgqfX4uWcfEs7b+26K2a2+p+1Z2thZOfGJ6jW/gelZ4WOD1gPLtCrYpXMJvThC60ay61VbVv/DH1ZbFewPasXnCYfbec628UspJ/2jQBEZHlo1plYhJT1VMQDTQGRAZZCspsMBLuMJkWMEsgZiZVgtsgIGMgscHATEzEmqCqMl/GGsd4Ao5DOsYU1VSYqANxBnpxvkbkr/xNg4gF/v3Pn7M1sjodFnnSkmvJULGtNQoQkdhsl7WgPb7Ep6v84um2hQFLOdCW9XSy2l9snNjbhrqwVdtW/grm1e5cKOKbM6d6tr0DUOLr+Po9rtBIAMzTCmy6c+4no/lafL1NQI1yZ8VKEQABAABJREFU81lpwa2A/c2WhJt7Hz8Z1E/W4wVONRdU8upMz1vrrJ+6Alytp71WsPrwk5vYkg++/oloV9lzyQJnRP0keCpZ8ZPqmOoxM8z+BfL8OCYdNYW5C/m0pLiZyfoo5gdmXl5pKcc5fhdJiUhEuq7b7XZPT0+58QacsmDIn2yutd5ca2XR8/uLNV1lFYfq/PzRtrpSpS2WfA5gq5BcoGqsibut4mi2eKrMZaFbKVF+IUfLVHbcvMD0xbW5yeXBzj/VFiblZhaYbSlPRKSTkJu7DQqMFE9IicZB0higfSc9067rbw5Xh/2+77vsYaWTcBqHpCa77jro1U66fdAAUyOBsRlbPhkkw3wZbSCiEJioszRdXjCzGONE2Ennc0IF1EyByddoOU9OKU0rsTnoOhvyIflwPDFDNXG+52gszAQ2IwMxxMyMTE1BZhZ3oZ8xORnTMnOe+8yM4G/YMk+H8HOodplBOuv2eTQV4PmeZVEe5sDdgBnev3/PM7ePYRiH4f7DQwjhn3773168ePHmi9evXr9+/fr16zcv716/vvnF19CEOBw/fPjxx+8/PtyP4/Djn//y9PQUtti1zM3FqQxNO8EJqzJ0SdZeMyhlnSQ6hz2tmIHUmBmyuLGTt7F1pmDz54drcnapiNRKxmpqbwdtlaoqr9Bla7rXhdZLK5UsKOzavjen+mB5VlkJBVvb1CylzCUs5W/V9HP6VYnCFviqv/7Zy6YysnmHsiUtX7ZcRCnmClt4bsFYnVfKy/aCR372brU9HgrmPf0QUYznu6NmdplcLe8wGVJMMGEygbFFScPNVXfd815ULKkNsAASMF/tegBmiU0kcAiBTFUm7VljAhLUoMYEIRqjqZrqdJk6QzUk/fjw9PB0OsaUTBB4HvFPK4gtm1wY9wk5E4qMqI7T9ZMTO3dKmKkIa3xqZkRSmKnAnMXy5y4AWtYu1VZz6oVSl6VTxYmreSpBtIr/1eKVSSe5VMjDs+eWVTY1258zBsrPdemhjcfm8pxhS9Fmh6J+vbpw34c1llxF1FZq65keNur5CfWvyvnV+tvMqzz13zNV5v34fAyUUpXE8NNBm/lCPesfNsRRy0ETPbvA8WfArFR2/po5hDZOLD8Lzp88lJmhbF7JpZTMaBgGDeHQ70Qkxnji4cB7zqcehZVoKmXnuEdnZ4GOuiYtiJn7vs/uap7Tr8KMz5+Cn5kuKwDlfbHSqsTCc6h0SzautouWhdcye/2hvK8sKVqmLpC0n6xRBdtGfcGSuZwWrAK/ev/I1u7k2yzKfYuFGUr+fDZYrnTehDgcT2kcYOkguL7qXtzc3V3tbq4O1/vdi5vrm+vrvg+70HVdF0L48Dg+nE6PQ0Infc/U0wglEQkIeW1lYDIxZQUbLMxEbmqkhqSWl4KmqkhpciM6TR+WfYtoCVLAnO8WTk4ZQUZJLcYICSSEUxxCCNmes8yVIpwSwQj55ppAhHK853wlZcZYMsvonWbYgjszItKUklKiCZlsRikl1WhJiab6iYhNhCZ3ISBjUHacqZYIREK9dNPoJ1VKFqlsDKnGt2/f/+EP/3w47A6Hw/XN1Zsvvvjiy9evX7989erV1ZtX3375FdKAh4e3P/74/v37sMoMrdQu773qUNFiHphqi8JH/wPOsqzdyZgVjmklkOalcF73mpnMN3b84q2i11W6L2LR5g22tpvOmBi+4CetrosYmpWYFWb+3OSRvCqJ/NdqBMtY+OLtKLezckGgV0c+KU+rOotUqkxYt2Z934XypmwWtGhv4TcXaXAV855c29Zbys8PlclEKTiZHFxkjaqIl7N0Dmi6jtuO+JRSMhPeZTuGztJB6M1193LfXXcUSIVADIQgXR8CUkpkalBoghJBhQHQkJJqIghPMenBqknNlEhJLdrMhuM4Ph2H0xCTwlhokn0psHjXH2ZGS/0JAGjRUzvvYK1MdeU5j+uWMGlx6ys6A3NxSbBZwxKqCoALBN/Ox63Y+WQlvjYnND5xcdcz8jS1bPPFZcxXEPqTcHKpymZOq/YIL3znt0KqgvOzVqIJF4VSLphSSnHhoZqIJgfl2wuJC8jcSpcp8G+SWiG8BcAqDM8hqqqGvKL9ZP2LuWNjQ8SP3edC4lOh3udQ6U8YiJYfVyXtmcy2K2kffs7m1c/pFBZzK1Q1O8Y4Ho+hv54u1MikJpWbUeXoD9mEJE89kx3EinkC8wJIZp7t357bOyIyW6HbdrjxKXYoZbHN4KWGLalYDf0FQfFZJH2ZEVbHd3X0216gITnvVMY3V+2jtdej4OR5Xva0wFd62prQ9pAsaIOmK4VTF4oxV75kWO6I6f3bjvDyEF69fPH67u71zdWr2+uXt4eb/e7QydVh10kQlPiB6ds3X3+4f/jDX77//uHjAATY7ub1ixcvnsYkAcxMmtgQYKJQjbwLqkmjzrcJZn3eIpmVE7+SmHnUpJqAsxdMVdVxVFUmkGEKWA9Wi8wsgQAxSx4tIqLTAodzoEJitSkc+rrenovPXvdkGpcUZ7NSmzedAJ5NscCAZcacqIWVmA3JAKTpYExYVC2lZKp5lcvMAhIQ8S6lRCRm8vHjw1//+sM//ePvrq6ubu9uXr168c03X3355Zd3L26ur69fffnNq1/+JhRMVXR8vsPGZ891MUajBen4XWFvF1rohiFLO58JQVt3vYoJKBuMJ5fixUvkwknWkj0q/YBcXETMokFViRcyzpfy/Fl9rRK5k6KKi4pu54lgtZKq4+0khEZkYBnXq0wSq6CWr0VcouH5qi1rlOySuZJoLbQegRe67O9Gejop4JX3edyr4lXNFZIrPrwg1LYyeMLe6kJbZHWsq5urEwtgNuPJLqTmc+9zhZYsqiFkYUUWO7Lbndwedocd9YJeENggYsxgJotCyHtGqprioKoCYkE0WFIW6jhY3o5SG2KEqiWLYzQkqI3JjsfhNA5jMiViDkaCHJhVYFgMdzsEF5DTcnd5XlEQlsN3uZWWDPKb1hUQLYu4ZjnvL1WEvTqdwymyq28qPqrm+AXYTgnj2XOpznek20QuVe+xwYar8LTvV1tZraHK0NbZ9nQ1+TqZeXXtURaokylbOvtiLUW9Zl61/kzifD6cf6t0QSpusYkHphrfrUoqacx0DhtdFV8l3QupPfm5LOe3UgvAcyjn+aniCGp2fPw0ByCHhfCwtfOvr/A5g3ghfZITn1MD5skxpRSAfLSSGSFbqZkzBsWsC0Uisew/dYH5AlIp6zdYt8DwegjcqqyiivaN78Uz02XxQvMN5JLZF6zquQDAZXreHvdNqKriqwJ8Cww/Rhe0lLatjam11u62xsUDttoKuRkpuLOQiXJAmRRTSl3XXV9ff3X4cDjsvvnizS+++fKL2+urfbjb9TeHnlV3AYcudHmBNLf1pHLFh+F4/XB8uj/eH1V7QA4HVWJmkEle3IADDLBjjpsSUzn3A7DYMSQ1JIMaDMjngakl3Rx9hdhgRmpGllJSS0GEiFLSsoeiqkRSsK6qhqSaLMVs2sgUCgfNp/HFO3dePM8HemaGBAIxYb7lmPXDNE4LeJk4EfPfs2dy4+keY8qtIDFTCByCmFmMQ0rE4WCG0zEOpwgAhBB6Nn73w9u33//wu9/+7upqf3Nz8+r1i2+//farr746LwixFJr+ZKzsPKWUSCYkehHgc3pKIiLhjNBU5vWC1lWaLroy0XQgq86nubl111z/OQ5VO8f4fuVS0uj5JU/FS1WFpYP5Z95x4bn1MvyXhc6FdJk/SwZvG1n6VQ1fVacn/fJcrdIr/F8AsoV2tfUtuVzGvfDMBbAvnJzo7HWmYKMstFZb96Ps3/OaK4u2I6uC2L+pJtGCWJaJYKYhw3T3w580FqmR+6JpJCZhGExTlJ4O+11gDRSEVMBMZGQ6FxeRwBRCEIaZCRl1rPkaNDRQR0KSOMVoFsdxFMpXB0dDSiYW0+l0GseUkmkKFCZtg0zNmObT2qpfbbrwyQ+rJ6HqaxkdciY3F5ItVwLkzBPsHEaCsx3U+vT2bA61pcZTSKU1ocy9yCLrAk485BcWhBVz+aaxITRaMr7QTS/f2hp8QzQ7j2k53UuSBsJ15TLXV/Wl4LaERzOtBU7xCbRa7aoI/dy0qORvt1pZHSyPzJY1Wjn2HIpdzdOyW1XnlGEDgX57a/H3MxFebdkUri8nGBVCtgZ0Ew9L4i8drHp6Lq41VomoWD0ULF2A5HJq8Xy5X1vJzlul0+Kt7L+XBaGq9n1fFofVBF2esweQNTjP6o25gMkX+oUNafMcul3l38vdr5r2qT3p8kLJQ1IqeSalrRbHeURWcpLb466g2lJpLovoql9Yu4VblIEWYFu67Crj6+cyWqaSrUJ7MYksLz0m86ly5uW+7+/u7v4fv3m13/Uvbq9f3V4dhAOl213/4nofLHUEIRCULFqaNOen8fHVzQtI0BB+//b+D+/vf7g/vr1/evOLX6lR33XMEsCBc0h7GYZT9snJJYaKqlnMBik1KkhPcZzzrIQeMZuC5YAIsJRSIGR3vkTWU1cUjBij5diKkyWrEJMIHZ8eRFSso2npmHd75ystKeP5vGVj4jglKYCu65i5nP96gaaqxomIJ0/ALDBT1Zhs3/eqajpFOOTZEk0VOTJTDpgZgnRC45i6bjfGU4zx+Hh6fHj4w+9//1/+0/95c3MVREjVVM1MU0rEM2Ww0eT9dPayChNBxyFP1Mhnh8SmZpY9nYDMgOmidh6NZKMLxz2pFOM4hhBMSYs4Jsr0J52UU8E8zB0Lh+50Olk2YxARlpgtMwE1MnAev3k2I4BSUmZhkVxV0mRGLF25dYZmyVdEm2cM1TjL3lwiXwkjIlONWWDSbNlfuKJQJ5aip5VQ/nDMZy4CrjKXyhTrFd98IaosTbM4yqOmmsr6rvB87mbf98VfX1lKeeHuV4xeXBZIVqstFXqhvOggG+x8RTCz1rzgz/5RysYBEyGlsRqUgtvyprSyKm3zmypyifusmP04+fx5JHMXy18iVHFmZnisBKX0VMQCsjlUIyAZ8pTSHKWnnHiXRqN22O0j6YChAzoae+WD9nu+QeifensSOwQ+SLhhuyKLSoElAgKBSDwNMaa+68yYw55NBlUajUInoOE07uVEwAlD1BRZjujfJvv+lH4Y6SnsU5BIBhu6DowEG8xuFhMhz+EWUpqsQ8mMYdNlKJO8YwTOW3PA5NhpJuNMW8BkCkN0DmtxHmfF5Gl5Ira58QSj5V3f2UIDwLSY5nlZyAQ1U55DfWZvN8VWigxYbJdM1V7UCkq75Y3mEBR25p0MC4fJe6c6p6bExHPwXks68ypJ6Ly3npwmVlo6Dygw2FlBPO9D8WRMYmUjdy6qefV1dgk7q7xZqywUPrdiszeu81SKbE9IjvKJmVmYzEAE1WizPKepuMpZdLpZcPL+JwVzKaZRR1VNqQwAEYUM2szvCy12Aiw35EZo6vDGBtDW8LJwYWqP52rB08q98jOnrQ01XbvmACzupNn5YX4kIIsITA8kiz2vieqWvSryhIHoIpwv4Pfoco228hNYkY0Lal8TuTQrnVj6mfMncr6qGi3P26nxmWdZsb6WLk2UUZvIsjltNjNbsM9ZWbRi97wkkimz1sMAp1VXs5j/VOoHIDYr9AYAxcJBNVK+gcYkQQzjGJ8Mh5js+DRc7bkTNk06RkiIFlmCiD6kR1G+2R92gYJZJ/K04wTEGZKUUiROxDFGBInoHh8fnzTZboeUrt+8ufn+4cPHd6EjodEY0vEQTSk8RVUEIyaLghToJIiENNp1RQl+OGzJldVYPHPEzWompnkD0dfPs9PLFoCWT6s6SyvtQFfPOeA2zjQPVVsKJAK8+rQIFm02aV8xRldq8vavakBa1n9e41UU7lskWkxtFSXnh6yAFT2qCGfaiAuak970T09DZ3TT7TvI6XRSKHcEOnGfeooHHe4ofvvi6v/+qy/+7a+/6F8M+/3+cDh0XUfGAEREQIQQDQnERMSkOYh8tB2d0sNf7pj+/Rf83T58v8cP794/PXw4/ZffydXr/tXXcvelXb/6GA5D6Eei3fEfc1yKMWJICoWw9EzDw72okhppvhnYRWUzEiYmS0gxRpjlPcmUIoRZmYliTEPErutZKD7G0B0BiHQiYkoGJpCqCiVjA8DGYjmCN1s0oRuyvKAkCbMSyIgWKUjoyAxHjdPKJnDgo2oax5EhIYQgQqA0Ri5DTGQMA1STkoWROEigQEoa1QxEEojiCKaeAhEziI0pX53kLiVNADoWZlbY0ymmlHaxy/qtjkYUmHsb+f6dBu92nJnzsnjesfaOm7KRKyZHE3luZra8ZWUWVZmYCs3NZJRSEpl0Ah/zsNAlz2kSiwwQT6eouR5mI5K+ywRuTGaAcA5bv7owMLfh4V9ijlxJywXMJ1M1PZCLobdk7xXThZaTfc0XZsHyqcwcaHaYvOTysrVUyLw+QfrNv/LTd8dPVxVgvqELm4htyjXZ0nG2h5mWCyRbal2+I/7laoYqlesQHj9TQ2s7cL6qLcmLM/5XCCl/sWUoyCUqVmZBVSipkirN8UvzPkRKmpIqZC6UR42CDzVxvsdVwslMHKekafrJRCDiLrCKJhuG4TiMqmpkBiPoZLLeKnrPSLbUflY77vMXWloQMNUn1RU3nR+spkZzPJLpvxoaD9Uz2R9LsvGVeNZuGbB6eQEDn5s8Y5b9l2yH6QVg24RHNZbxrFY7hSXGiuirCNgL1apHZRGIWREpKCqfKoOFFuznmHCvFrzAvD8nbUnFZ5ZFg+Rnpha9vs6qsxfw+S+dWuaa4Nk+cm0RcgE/VU8rNlzFUkXk1YzpH6oTwqpRWiti7eU5AGuRn7ZG8HIqm7bFzWC22OqgUEvjQCpE1HUdEQl3ajqLwRRjHA1mZEk59DbNEyACG5MxGVsCAaTUGxsFsjQqdUokIe+5KBg2LbpscuWuZHBsyUun1HWq5kGPh+fj5EI2rz9s1Ulufe7VjBaMagpr6aEt5cXjFtil2oqA21IFzk+Ki1URV/CwirFSbWm3EuxlZegFu5nZ8eF2vw/Ux5OexsjMfRCWSDTuKb256f/uq1/8T998/Xdvvvry9uXd7kDyfdd1mSyhZrNVcxrGMzBKsLx2SMSS2w5C19fXCdLtrk6Rku1G2Y+Bn073T3GMuxu+uj7srzqSMaqmZInYSA3xNAxp3AdhQC0liylSTBbNdD7kMjNDSpqg2TFv7HoBMIzj6XiM48DW877bXx04R76NqjqmNHkNBWyMed1hPDGj5Vms6/czzrWs8M3MpsM2mY4JGMxgBiFXqGRQtaiJSIWIuYNBFUSa96eYhQGy0Zy67gjsPLhe5pT1kdvGnaREYZO5iBFR6LquvJ02mJ0Jk9ctiFbo2D/7tZlnAFuGjCsUVrQab2iq85G0V1myGlHsiKr6CxGTuzfoGbiYvNoyLo3D5rnCUvCyhGpNFHzZ6n2Vs53MSvFWaPpGyyTbYtgLo+WoUdVKwWoBuMiOUnMrSj4prwsYVZe3aqjIusJhSR7PVbVVTpo3HS7U0wJG88KnBcNsJXO7ADYzwCqyPH+yqXU/TBdEfKbvvCA0msOvpuw4K5kxMUSE2EyhqmGyR9fC8JjVBaJ8TzkzReZ3UlXLdgwJg8bjSZ+GcRgGBZspeDHihPWFQZVoOctu5Vl9385MNouGlUrmvdP2a8t0k0BY46PVN5cpfKv+1XMVWt6RXvBRoxDTkvz8z09C5TPM7L4wKa+kqOfr1Xar96sAeHuJMuIVVZcxbc/ccvG8YVFEuuvOujMnn7ZobDVn1Z0LOdu+XyjVjngrY1eL+CY+ObhV/Reg8jzY5nl+Q3+r5AkDjlT8+K5OfKX4c2De6mY1HFX9fupcL0LTm3aSBWDLT5ehtaUqsgr2c1JVPJuxDcOw60/KDApELECgyT2FphyjEMAkECLAoG48G7Ywkxk4QZRUIURqbBAYwzgaR2MOPTjHpI1EMCMzJShTgMGtCdmQKuZtu1ARp21soLeZWxxWyRM/tsUa1tjQQ0hryk8L51JPWMhAX0nbkAep7WCFCmzE7r6gZW39LGC0vFDJ5wpO3x0zO2CkEaNp0mAhhMBJB3368HKffvX69t/86ut/9YtvvvvyF69ffHG1uxPpDjacJQDmuSBBpAOAvNhxm7ZJYySCUaBwdbXb7a9e3mlMBNkPST4M9sPjOD49DE/v0tN12O0Ohw5qNqoZGyAQEyJhS2NKKcVhHNMYYzSokRkJCTOzgI01O7MhA1NKKWr6+OHDh/dvVVPfydVuf7jaXe0Ck0239YCk2ekDEwcCiCwZZZ3OkCobYps2ieaZEcjGUSBlEJuyTRYKQqKmUFNQIAIzc1BVUkt57JhZhNjIEpy920xuUhRLv/e62LqdTWSniCCzeRrNoCoBZiFb/U7kTudDhhBCdvZdJmxP660SwEtnJ2WA/VUxf39AROAONEpDLGfy9TTddV3eEuP56vP8qZ6YC4QFHl+VNx30X/1C0fcrry8qiV9xddtKi6KqO9XPduqqOLk06lPB8DzXLvqOxs2xTzr7I8qZyxCXqtqe+t5VQq16tk/pYb5F/9dj3uevfl5oq6LAUrDCQ8mcUdViu8pcAKuqnQsuaODMI2fjoxUluwI742QyFVAFMfJ9XajFlOMk0GxhQlPUiLzVYhanr5hpwPMjmBgMQjRlopjSOKbTODwOev9kpyGmLUs6Yw9wBbPvbIvwC/hc4rZyGbJAZdUurS2ocvazjLo4fbY/F/XTpTtRXvShQQUtdVDPrb7plrOqep4DbQtMyZlPRH2jq0PgOW4FCcuCHubi3wtLLq6mgPJSz9rnOehT+Wtrtgmr3d8UOJ8msAViP0mQz8yz2sRz6vRiyrZNTLe46ZOyt23xb5VWgflkkUr6tf2qnp+DTKyxycQRjShbnVA+WeEWSJ7kVgeiStVxesWzz09FyhlNsi7GOI5jjKc+7IRt14kgBBZSiilBmDD56yAOJCxMLCFYykjKRxQA1JJZ2hGy1V7Q1JkmaEe2E5K+h4RkI4EJk6dFIrDlqercUYV8Unh6Avb00GbLz2XWLpxyGXuVsL2QrTRxmXEqdaLN4L/ahJwV2m4bokbT86nQSbsg3KrnclrtCGYZnsFuYxu259sd0uk4Jhq7qzsS1uFpR+OLG/lfvvvi337z8n/61dcvr2+DQOMYd0ZdT4NY8e2YbZDMTGPXdUtJSGwMw0BBNSmUQD1z10kXTEdNlnoyAUgjp+HHp+H+6YeTWnf7Nff9YXe1IxnAYzIFiOTxeI8UYxpijNFUjZRAJHHUEEIe/xyqkwhENI6np6env/zw/dvvfyAoEbrAt7e3r+52fR/2+30IHQCCSggi2SNDyjftxIyZmDsRRgKQQ0EryChfCSHG5N2XZo8GrBhNaSJGgEgwzZUhDwSRmCmbqSFfcQEkzC6jzIxImIVn13TVSE2GlyHkcSeduC9vwvZ9P9HVXHVOQVXzEgtAjuCRT3hztyrLbDQnY0XiZ3NkT8SZCrquU5342V93Zua8f1UJylU9yWYDCb+KyA8874h7Jl9esTsnLE0u/Veveawyj1csKv4sgqDSk1Y5cDWZU6lLauewLIs/K12Y59qfNJsKYDkiVW1Faq/Kl+dAtTU7+kGcK1w/afF65HMardafpQuqClsEbMUGcmypilXAW6PREhFQV7uKt/Nkb3m7iAikZrAkBAEhX6gzAErGMEuqnMwkSxDK5uw6b7EAEJGYUt4AYmYmGTWlOOnioyYFovIQhyFq9n1FRGRgmudgpXIbrUWLZ88qtXi4TCd+0KcBnU9csRx67zVxqmGuJsuxaa3sJ9SlF6nSbitJVoHfgtm/3MJPS7Slj1W/SuafnBxDXQK7AtWLLDhu8iK06pSXbNSYj7ZHguW+QN7vK2K8Mie5rBtdeOPb8nB6knumUHpmNp+5LfLJcayY/bIpbEVdz8dSNbI/P1WUszpFrgLghW01h/q/WyLiMj6rWbvK7OvfLLuBni2W9ERbt4UFBVYQ/k1Slo2Fy1RVLaqR6k5VYdn7xTiqdtwTMeUbvkxgUgKYyIa8bFMiTPeKEygRJzNLGBNOCSfDQDyGTmXfk3SaRpCApvvYBCVEQ8iBtjG5a2bdNhmtyLiduFuS8HkujOOFhvA5G2pYo+eW6VoR6jOYc9biJby5hCVVr0qwtjvtm7agOS10tfiqLC1Q6RyGys5eMc++PLK0HwdjksO+Jwzj6cOO4q+/fPXvfvHFv/ri9otDf2OQ0xj2uhMmTuN4NMuWVtlWct4xVLXsdCCzoMFMKUcOZIEBml25gCylYYyn4fH+CeBovIe8JBU5hdPx4fj0+JDC/tBd3YWrmxD2USnGFNOQLNEce5AIREogcNJBkyYiSbNRlarmKPaPT6cPH+4/PNwH4qSjxnj/8PT9D3pzc3N3d7ff7zqWfheurrqrvo+jMrJTgECUvaeQGlH21G4Kgs3BcgGQwQhmU2wCJgMYBuJOiIG8jwuB0GQXxjATCjq5xSNVkAE9mymwsHfzxFyk03RyNufh6ZBvOuZLKaEQ8ByOFUCIaRAR4fNNwlxXDl7BvDg4rri0TGnFz4ffMM64LhvDZZNYi8+YZhOdqARTPc9nFSN5MCZUL1enLWOQ2732R2EFcf4krZqlSu9oOZ+hYfWW/VZnhUrobLG3/9uydCscvfjzompLOhRmWK3f/2wH3VdbyfQKM22/gGeJdV/hVv5VWV/J6+pTi1u1WO1rOuBXNiZWf1ZSuLwhZujKcFcILOJYVdmIiAkEpMxbHaEPgaBkCZNnjsjJpoUekQibQVVLDByzeVfOTGFsrIwcpjXzKEFIgvE4mo0pJT0vrABQ3sQiKLjqET5FzLR2Yt+yT6nK74CWbLw0zX0+wZQFobdGrlLFI8+sebXIKiMXSFYp8HLlFXgXkrcLwFIv8aKyhbPtiJdghdPLlYFShOaNhvLMznNdZaRaSFq4yzNunnRbEdqSR0nPedMm37vVjq8m31MvuD5Z5AINbEHukbDVo0rUVLBtzQvVp2di9fnJS9fL9VCjSU/Fl4WsUfp/MlSryPfzl39Z5WmranmH8mnCtpJ9eary2LDtk+GtVNhE5suEc4umGsdxJH0iFVJh6ghE6LJzfDWoYkTk3IWuyAdkEaKERIiMlHRIaWSNYiNrCjBw13UcJJoQlME56hjlhaApDAkGgxGrKdFm3MLVSbDFT8mwSs8X+AUXmaJ979NWcV+DLn03bIlWL5NtaWjqp6RKaK/2olUV/JuW2KqcVb+25EBL8HAKs5fYk6k/X4eOND5pHG97/Ktf7P9vf/fq11+8vAt9p5RGGzSJqI1HGQXdmJIirzeYkGNuGYwmd1PgGQxYQkK2AUJKpmlMKY0Rlk6neHyKx5OOcUgajYllB9xi6Gj8kJ6e3j08/PC97W52t6+625ddCCxsA2zarxSzfCZlpipTgPcxuyCPhpjiGMdxHO/v759OR4CVAGMjOg7D/eP4NKT7p6GTwIx9v7u9u769utrv913gvg/dHLY+NyCcfa+IWVLKNDOd4OWesiBfm8i6loSA7IUo49/U7OzKmwhMZCDOckMXnlFteRJWaKAsWDDHNhOQieS5O1FCREpJp0MAIqI0q+WB5mPEiklmhYyJpiZ58to0GcratPiePi6JjInY7OzWtgCXG6LsZno2GS3bEllXLbIPjnXLS39EWeqHW5fmJtg5uqxofYuZV9nSK0BbRTIM/mpNYbMCRjVmviGPcD+3tfOclzWrrWjjYieXrmRKaXdVjaty4qL8qpCwJa22fnqB6+7ZLaZYj4fytUJLeeOVVJ9WFfS5U3VH5p4ueuEHqJpIyi5GBTZN96jrqS7b65Yx8kxniagTImIDG4TQ78K+78hAZDJtJRmAwLLf780Sc8DyYuwMP5MYG5QAhbmYhwokxZjoGG1QU4KpGRKD9VwJk52ppaKctjnPjNX78nOJXrhal+NIK+OVqyZXOZY/ixz0VJ3WJmyb48OuDuhqWqVqNCZhpZUtYrtwIlEVXK2hTR7zZtOAFSHQdq30evqqBEyOnjAv6YmISXSeRqdiWcDmnePiTXSuXwurGue9VyIQQdOYF4QlXjbPGm0rBIioDKlHxSrXT59Qk2L1sDUWPz9VjHC5FQ//c8TphYawvSHYtnj5zb9caqXB5gg+bwdkq5XqTWvB5Fu5ACr86KyVOnN6s9D1fWnHxZr8P6GzrYTJ2/yaTTkJZjmMNQGc12tszMiONpDMQFCzgab4n34ff2SOkRIoBksGBSW1ZEjEXaciRJEIZPOVbCGCWRauYmbgRDCwGXjDTbPHcEvMW+Pll82VTcfl1Mpzr1TgUyfzVZGqXT+gW2SAJduWzFvEMEnvZ3DH1kyUkz/YWMrVzeRJKx/hVM35RuPutcb73h6/vMG//UX/v/7dm1++7q/CeL27Edsz7Y2CksXT477T693+OCoTaXZGicTMMssGmq/qkFmaj+lOqimN4zjENAglBjAM6XRkNR1HHQYzKAsR7QDh1PV2r+OPp+FhOA7DsR+euus73u+QBgOIwcxsxCklRUoqZGoWY0wKAAZOaYwxfvjw4Yd3b59OI2UTKRZmMoCEhpHG9ARAgBCe3n+87/vw8u5F18vVbrff7/tOuq4LgZk5WiSifEeHKbNm5koDJnPLivKmkVIjIhbm7Cfcpk9EQgTM68OUdHFpaG2IC22nlJJpFmjmVo8lNIjirDnmqkI29fTrzmJSPDeWJ2lSBdF5yWEu/hvNx4MlqIPOzl10cq4+LTuHYQAw3Vq02jyaiFKKWGO/2ay2broErih6hi1PJgs1VyznmbOclVdMQtmJ68wwlZliNbuj4VW/RPQ1t+JgdfcIS161SeXiqrjHf6nBzccLRbPCgB/Bqvvt0FSyaVW4e/xgLZnNkV6WMrpK5WX2kuqFZk75+LvqkQesRXsFxjQxSO1UxvW07qYtNxrmEaFqmNzQkM5emmi58dHGCyr8kv3xKwhkQtxL2HUiZDwTeSBmRtd1u93umE4CMmZDsVSRTFDMHJhgSdVSVtFJQrDToGOMT6M9juPToIMSiBMMmu07VGja9CHONvGfTpXeU8ivvC99LzxSjf5imNbuM8x/F4pRefK8VmBgZm9y7OnHD9kzU0tCWC69KhbzxFzyVHxRUOH3krFk/NU0U8uid6rqTSxaGbjaqaojXhpYswdZyVJyor4Mse948XZLRKHcZ5jvk9t553GByRZFWwNh26FKL0ihy+k5BalRyz45XqXIqrjYKvKcl1hG+lmttnr/ucT/01Ilos3MnNsqD6RkWkV7BXCj6mYatYtcszUXVOM41bCU+eU9uU0Q/2mr+7lQ4QVzOsPnUuZ5jlDDrDDEGA0AhxDCLuzYuq7rOpZooPyPiPNNIyEhdL2MNh3iGRgkRJR98xtFo0SSBCnZQF1v6WgWez5y9ntPZMZEJTIo2JA3ghJlizhWwtaE0cq9yxiwpX5Vur+1kGulrp7D26ygsRUp1Az6hY6UDFviuqUNLzZ9u5j03oQ1GUiN3uULtm21eN7qQnkuKC2imBvfE7lISmns9oFOL+5u//2vDv/rL3ff3tBdH3fB9p2cBh4iEQulgdJp4KcD7cheTvMDopkJSEVEJA5TODHGZDCYrUhGjap6Go/jeCJoJ0wp6TjqMEKNDSBmyl5yLYSO4+PuOtxc37w9pr/eP7z/y4N13/PucPPyFUkwyVbTmLysq8T4GC0vPo04WHYnE+OHDx/ev39/PB5DCEgK0q7roIbQJY1ISkQQgtJ4HOjx+OOP77qu2/fd/tDfXl1fX1/d3NzsdrurYMzEIUMpRDkCV0apGBLIpoPRjBWKqmpJmTmwTLO3meoIMJBgCgiREDMRj8lo9tStZ5vYevezfBWe0GVmOUSKiLAI63yKOIu77L0iPD4+rtIZL12VYD589ApBode8Bwy37ZTJK6VEZDm+XAaoEBZlC3eRaj2zyrGl3ZyHZ78yqpq98ZS1UFZfQgj50mp576E1t8IsvW71krkvVhbMBQZV9Zdiy96hzWetaGaaVQYrAOdlecEALZVmcmk+oV0cLhX42XlznXu6AKCdmbwcoeXdId8jTw/ldMVv+ZszN2/BODfHZGpeTfT5C/bK13ZfsABWMLY0oakX0hU9eysIz0ilXTd8i2tOpcu+whmSCS0ZktyLfNxts7F0xWWVn6jcFyLq++4UY2INXSCzmAaYBN51gQQmDIGZWeBARE9PT91Vn3GYXXjlOo/HYRzH0HcE1hxZNVlKKZo+fXw8DuOgpCbJeFQdUhoSZ6fhlKd5GGCZXmS+9+upq7AwuVXujN7apCE/V5fI3QCtTEhwE6FnlizAsJwCKxbzZl3mFvDkzGBau4vzMFXBxOZU6LCMZhkyD6G3U/AsULoWeD0wd0uTU/4QqrvTRdGheYlVAMjyqrSLOQ549sVFS2OSnFI6b1WUvlTdKVjyQrLIeZpnB8yBhSph7ge0NFHRf4vn1VHAklUBeA91tBz6SvhU6PWwwQlGD1KVp83vkz+X9pj0bNKWpY21gYe5yr8KQCvWPAwN020ma2Ze/7IVsFsV+pm35OTpKsrmuHuwt8DDBvv7Uj6b77tvtMKMr8R0ZR7JWcdxFNf3UkmYTL/W14qezSv+8nnMrYineuYgE0VAJZu23buuU9UEut4fun5vydggRGwaYNe7AyxyMu6MNTEjMJMpp/6M58QG5JijoqJxDOiEzCAknXS7p/j09eubh4/X//TxxzEN0ksCyDCmtJPAyRSa1BSkLBREWNI4eFSX/lYbXgUDRYUwt71FaxtGWDJLRXjVNpY1N5uw5M2K3iqW2Sq7QiqfWtZ6CEt/Ky4rm25+Mq0AqwSFOc28wnOhvTJftz3yVXm0l8rLuJSmp4OiXghyfbj5+pvXX77pQvpgSXe7QJa6wDCcxlNglcDQ9PDwILvb4/F4Op0yj3TTfDT2YdIHBEQ8TcoppZB3HIRNaRhiGpVhgZilA5slEEgpn74xCDv+mFTA8bbv0rXY4/jh6ePHjx9sPIX9XnZXJoG6XdftTfWYhqRjijGNGk1BURWnMY3j+MMPPzBz3/cxRhAEkme0OEIkcICZjeMwWBShLjBTP8Y0DA8f7h9+DO/3/W6/33dd991XLw6HQ9/3IfBut9vvd0njw+PjzfVB1URCCCHpeDodc3PjeBKR0AvMxvEpASGEPuxCx9kHT55LiYzBFKRQSDNk52O8mXiyAtB5ss9fU0qYnOmceSEbE4aURk+aROdoEzYrGV7OGrIHVMmLQJ1CinNRuK0cIQLEbBrNrXByf0rmsgAoPKOozdDNzGAxO8eaokBnHoCBzakpPlUnZuVvtUQs7FQthAoGU5oiyZllI6i85J8QNRefIsIDZ+c6vjlz2+d115Z3RQrvbQnQ6jpp+apLZzltWz55SsJS2JHTJispWaqtgr3CEYyX4J6KbLEmXBxcuO7Xu6c2T5CVAF3tkc/jEZuTXw8vEQhggQr3sKAEdg4zaE2Ns5UEglSb3gQQ5SHLWxg2/4MZjEpUcRNGLxJYOqFepO+olyCB2ZlVT1ex8wrN3M5QNnUGmVFSTUmHFFO0TmRkQ7Ih6tOox2hR2ZjLBE0ADKApJnI1iKu4bcfC48STt6+hYFKEiwHtvJbO0ikjebLpzew/X2KZKXaps9JyzWOWN7atgrndZq44ok2Fr8u+WEsGpRVy3p+x5C9zC05Prh5LvkjZOinv/eKtRfUcUN6KGKH5fnnVzRlarqqqGLZKeQma8/iFX3E/tqB+IO8GVv0F8qZkkS3n+m3NvsADUxA1PTSqocdhy6Rb6bMyVwXhYFidjJ5Tw09rvaqnrYSyBDUg75V8spElMotcreaL8vxJsP3MUpHfZY7bSj8fUfhMVf78Mhs3LfOUmSV35pmdWk58Z3iKxeUEANWr3FJ81qEtxpg4BhKhxDYKKBD1lIBExpwmj6CkjEhXvJ+aNqhOhtxsxqo2DDGrO6eTjiPG0R4f3777p+HDj8EGCAcJ0ZRAxEhJzYxAwjCwZbXBlN0oX+DEipy8ODWnzDxnrCvBtUDmRevKCrzS+idbfGZaFaQtMNUMUgGmG3cXq7K+v1Xm1e57KdrK/1b8np+P7/teRbrHEz6epD+8DB2ou+qo65Q7mMBULSW9f0pJT+nHP88TjYV5e5EMRzyJSMeiIqDzbilRZDMCg7rsQJ0MQqzDiGTKiAaeXOQKEaR7lzI9Jr7qMO7BJLto79//VT+G0F91h9twuKZOjbtedk+nUUhUlEeAiYTT4+O7dx/GcSznGZhEJpEl0BS7y0yNBPkehCElNZjmyPYRimFQDSzD0z2A/b5/+fLl7e3tft9fXe+vr1+e4hjCXhmnYUyaiDqWAOJRxwTrzFggXWAQkyWL4ykRCUHywRlRImEiyyFhcrj2gres+CwpZCKbaiN7sW8CGIwXspmCV5WIyGzyTFhO/KxJftugbPj5vQ1PXqrqw5WWhktt1VZES/GFdldmOydHCjMUBaXqWssVq1WhMaH0k2JOq3YLc8cvtdXOAeQOwarDlqpmX4PP5s8GC/xODK0vDluep3lh7MfFj5cf0zLKnhg8Dv0ow487zqYRq5C0WEIj6fybUk8r0fyorW402rTeWEFvm3xzq9OGueOgGZ8L9JYmyM185eU0+ukc4JiZg0gfJAhT9j3FljWEpClRUiSi7gz53AozW3aqhexlGEOK45BijD3MmEgEZFHHCFaWbMMAIDvxJCIY+wXh6kit4qpljVyqnChiOVKrY01LGjaPR40LQbGcQT0XzIrauRdb9Oz3vLYIIC+oytlvWWglWw9nUlqp0OXjFrZtrRKzz7Yl0FzB1VVZfUZ9lhtr6lRVv38u6q+6ZMuNfCyVvBY/q31HQwbYYDQ4BKo7UfnkIP4NkweyHbWfkC73t8r2k+tvBenlJvzEutr6hQHypUpitxuoS0/Cq+mTBHC5L9hA7JlPN21S11NhJD+1VT3FkhpXk5dIi5dLC4Xy49zKrGBkvvv47r0+nvTqdCN9IkoksZOu6+z4I4RIQIIplJcZgHD6r96SZbrcCxOR4/GY6xzHUVVPcXx6ehqO9/Z4L2nkrjciMtHsJy1pR8jSlvMVdcBMfWgxj5NsrlbeYMk1/k0xbUAjLi4ITI/VtiAa+VaV2hilS/xyeXyrzn6yIx5+Wyjk6wu8qpurTfsaVmaitQsvVc6KwonoGkcahx++f/ivXdrvv726/fJm3z2lY8d90JFMVfUU7XHA0xifxlFODyGEEIKAjpj2UoU4BO77PvSc3RIRUWAREU7R2JKwkMi0W0FkUO4tWowqSaOJgtQsGfZ9YGMzdGxXYCLZdTQk1tPj/dPD8PQ0PB2xe1DZhd3t/voaaiwkxImMDNHi09PTDz/8MI4nm12yz31PankTzUocZwAKIyMjBkDMgCqgCVETkB7jycy6Xu6Pw+7t+67rXtzc3N5dHw6H29vrq+udybTlP5o+PY2Hq4MlVYANxCDmpBFxCKHPSzUYAWZYbDH7bYLM3cxidj6JyR8rWVR0Epu9yE7doez1hSl7haoIIitLXhHJZuOW/Q7wNPdX65BKIfBA4Hny0XWmgoewNnlU7OGlyeoOU9GNvKrU8mc7HWJNZWzPwWhaIWtVT8VyF2Y4XjprbTNUNayCXXo0s/E65ivM+Mq9VPUiqYDnmygEsHWWTUtTMcPZfYUXVZ6IK2JYxUPbqXZuaPNXpDW/X6EoVc3wliIVDbdTRc5Q+FZVQWCIR2ZVm6fDCYEx5mMwNghIhETyyieZuRv7C5Nhy5en4ZVjwEyjWkzT1eJkGjWl02kEm+0Ss5KoUVJL2a8oTSuE0oiC2UmZFtvtWKzSuc07R8VEnJbr+bKuyHmC1HaV+YGJMmecv84P/s5hGQUz4y5gyRRoSKWlijZRE2evBc9XfmFzp+HQFTr32bwBtt+m8Zn9syd1L6ZaHjcz4sUKs2QrCpmH38zGcSwt+r8tMueRvaTQr9ZQofQyp1d48FzvCWxLaKwCVg3oavKy0cu3T9b/L5pa4UnLvQ9soNR/bSu8wCDPee8Jr5J+F2ooRaoaPpn5mV9/QlV5NtU1HV2dk2dfyVbvKpP7MlLe5B4NGj1rZwQ+vPug/ZOcjt1+TzBVVSHtQwxGkoXpdB87x9YO8T1EeDrn16AaACLquq7XwYhCF9CDiGKMx354tz8EG5OO92lMSYg7kqBqmDycgYhosp2yfA5hy+CilbxqiQFro/wTWKkVtp7UK4FTMWwlLqoh8KWeCdgqy7RlK9EN1/fLxalZ6T1nFluttn1/Af97Pp7G+H4YFRZ2d/3+C3x9+CL0HJ/64YTT03DSY5KHkT6e7HG06zSdBBIRaLopZ0J9f5WNRbOJCuergUyS970JTAbO/k0Mav2+02g0Jk5E2Ww6xZTMQgCIwAHYBQBg0p7tFy+v3ndPb++PD8d3x6eHKPtufxqHY9gPEIBJiBU4Ho/3Hz4+fPw47bVMePAxgc5ENU/EKU46MDMtPOuamTL3fU9E7z+e6ONT3/dv392rxv1+//Ll3RdvXl/f7Ha77nC13/WHbodhOE76FhOzEJEIT9EniGA8eXY7z/tsRgAV5wjTNhFRUQoBqE42YiKSrQD8pJ9SypY+7QWN2p6n0EE5XiQ625kQkQRR1XEcPY7K1+o6UDsBeOSWtVLJoKqGs4JiSx16gXcXPaJdPhWJ6XvkqbyaNdEsOCuWqCYwmu8IXeYus4WytYrnrVSVdRNP3c387O9mLCFZ0eMvAFD0cp9T9bzzVz6Ru0fUSjGPMY9MCesCtwLMtVVD6BHYdJYqkqiwtzZkK1t3LZY8kZdPn5T4q4I1F8ymgJ6Sl39z7yxkr0amQiJEkz09cyDecRckTG6FAxeyTClFTTAyszHpOKY4yQ4m4hhjom6weDzh8Tg8jXFQSznOIQHQLF4NgLFlZwGNSl2RR9txrC34sbZT045g/pstFKqk84lWGd9i8LDlpMRmS3Va6spVi9igqHPTy33rqmu0TKXCKudqf+0ZekZFwFV+/6yzObe/QFKaaLm1MkD1cBZ8li0qM8tewSqQsMYgFWxb2aqOVxlWuayq32f4ZP7LyZet5FKVVk36f2brn5wULiRyGq0f6Elj/+++Uq2o1JPcTwDGz4ZbrHRBBF3O9tmQbOkSANwlscJ3n65w7SDI1wk/rEsTJNHUqfRkB9IDW8/jnnUv0vccAncduAsiCMT5kstL6UTEG35zFyT7+Yix67qszhohxvj09PSXh8P1Tk7j8fhxjClyOIAFSU0NSBMmoCBmya4+1hdOq3doV+VnQaBHyGqdlzm04uVVVFcgbX39OUz9nKq29B/PyJfr8WKzmhoqUmy5pmUoag54SorDkSXswvUpdv/td2+HE42nX3bfvTmlp+54otMxjXEY+RjDMAZYGIbBzNjAzF0vu92OJEzCAfluO8vsp93MJt2DSJhMLEUDJUsI3BsswCxQVHQpPg4DY0y8y1vWTBRASskQWZVEw5WIdWRPFGmUlNLj48ejPlrousPhsLu6Hsb4dP9wf3+fUuq6bjoBMLdyoRwXXtWUiBgwLIYpYVocTcNERJBRiYCoxNwldDHaMKbHp48fPj59/+P76+vd4Wp/fdi/fPny7sVNEIh0XSdMiJpUlcyYLBLJ5JuUYMwsJB0zWxrnMZLJ9NKYuOIpxdrmlFdIiowqn/KIB7NcuLqSi7KXD8tbSxPTinRmMa/diIiIzWBmIswsTgtJZkrZJ7KtqIZoFPTplIDW92/ajk0EGqc7iuWSTEv0VSnf/wqMql2b7/5V54pYY9Gc/Bqp7Oj799XE5rfwt5jQQ8h8PjuuEFgVKV3fgrNtAm7PoMzoxSSs5PFI2IK5KJoXMFaAnK/n1dlWO+eFOxxRVRNANVtUAre8bLFTxiubEFxAclWqIlfLjLFcQVUsAEcPngiZiYXyzB26vDWTkS0ARKSTkHeRFl12wMcxjZrGMaWklm0ciADs93so69FOp/E4jKOaETOHZJEAAcFgTeQOD/wqc/mv7bTtH/wFVDPLDoeZuegoNnufaseuCJdzo25V42GgeRE4pFhl8OBZs5zA9oKwGF6W/TYAhrqGaapwfmv8EKvWG0nUTOH+a2VU3O64taDavAaG29+ppK7LeRabAGi+sxTjtA70+4uY5iHfFmbS82/OD5XQq9iwGo4KYy1yfFk0pFVtZl0QAlst+pq3JHxO7Jw3rIqgZ6at3v2Eqiph29ZwGaurBS907TlweuT4glXZT6Ku4pdPttsW3ETIxiCv1j8J2Lkvurwn7Gv6JHKKYzZflZkJrdxGWcBPi8qDjjvurwPf9LgVXEk4BN2J7g/c73i3C13XMYdAnUgIxHshLw8pW20EyTD0fZ+9WI0pjmQhSnh1E4+Pf327//FxfEqaYrSOAbAEU6gpNBrBkGb3+tRSly0XPBWveXTRmiHGBfFIzVrxMo/7hqqCbeVtF1a5Yyu/H6bLOX3THrCttiqhuooTX3NZY1eXoVqAL7cIYOBgykzCiR8/DH8Y/tKrjh8+fPfm0MVjSCdJptFsTFDtoL2EGGNMUS2K7szMLOn5qikRwV9vM2MjAhMF6YhIVKJZUh3UTBlgliAUOMDSySzStUFVE5kJtCMYFDYmHW86kRtm2fUneoR9PB1Pj6enCOk7jQbwaYyP90/j8ZRZl887HAZSIhYQLF95m5ZYDMnz+jS9lgtZBIAN6HfX2fR6t9uxdDEpQLv9jaqmcbh/PJ3GhHf3hrTb/fHu+uabr784HA63N1eHvmMxJuuFRRh5DUpZSjAoMAdmNktnxGHyIZe1IiLKa1JPa2X6LiNYtIg8vlPYKUzKaihMuCSUxcxdNAxmzvbl1cRTiD4nbwBGRJivabFzzGhmaGKUE00nEiWPFx++ocIMpc6cPGwt81SEviqnCkeVvpT7Qu1KbIu7qJmbqxmiKutwMg3YFnMyn+8y+QorN/2u4CqA6ztMHqryXHrqZZz/uVpnyeNpA4BZwtq+YKaTtbXupdVFTv44upJilbRtAfb0WRBYYZ7W5htqpj1PaXO72taWU7vFkL8KM5hISIS6EHZ9v+/6XceBE1nKokoCBQmsDDt77yxJRIwQx8nJh6oaOKsRybTLqzLVIcYxqWY/NA7NxNNjyrtTy15XbIgmtbOIJ1RHBtPfGKPXjSbFiM4bCv5vNgf3lTPI13yBjM3d7fTUa1UokQ0N0Q+WHzXfVtU7n0q2Vt2hJj6E/1uA92yojQcpV4S18UNQhEMrr/yGjrlUbktWnS2WAv59ReRVx/1DRe2++Kq420pnGNbubLe8+TdPHvjSKJ4B+eXaPpm28PzMgs/BTJWhzb86iM9M7V3TZxa8nLPluyr/KpE8s+mqIVpb0xIRudODT1a+emJWNQRMRqpYDl+5rcPMrLoXXAXsWa8CvdzTIVCgtO+w78K+1yBKlJhpFyQIh/DKy5aSlGBmXeiYWccTJ+HEwbDD8Orm8N1XX7wb6fjh9CGlZImIOiIjVkvId6xAptHYDGGVI9qJ28vtVdFXyYcLorVq0eOw1NyKl58pJS6X9U1UrV9IVd8vTwrV+6pTvgZdRhf0wBQT36otW9OCAIzhMB5HDOMVYxc6GvSf//Cnd9//6e1vvup5vO5xE3iv1g2nTmknfeyn2SoECSEElky2QiYSui6cAwKzibClAAITGzGExSwhkUE5UQKZZs95AdYzkeBBri3GbAsFM5gGM1CUThVGHScO1ll8imRjoBHodYwfP378+HB/GuPHh8dsW6cxO94EAxGJs4d4ApAIxnnNZAYkBjTv2mQ/p/lIc5YMYOHQkRmoy5EPzUyJT6dRAAmMaEQwUHoYnh7ffv/9j1f73e3dze311c1Vf3d78/LuppN+iMfAIkJMXd6EVTWbwrR7GjiPtXvp+FoXa5xCjXnEE6w4KMwZwrzaoeymZmZPz41nYmXm4+MDLReQNC+WylFSUbzmbDUFFyngdZcMVvb1XzpQ9jZKgPgqf/b+hyV7tyoRlpKo1NPWWbFoVlhL/aWJsz3tcpJTF3Gu1M/LuC5FTFij53l8em4sf0Oo403lB6+oefi3JqctwepvS7Zd8JSXm/Y75Z6wSqCOtl1bns26S2ULwFqChiN9LOXghX5VYq7gZMb/ue8LIGcvoFgaOq4iE25Mq3YrXvUC2pYpf+1ImNl4ugDSddJ1XdehCxyEJHAOqdJJFxLlmE8G1WSqSrbYvoGmWcxbvpSsqqen41FpHFG8Lo1qmkYRMjPweREIAFZbKBXM23Kd3KK9KlIRJ7nVUTZBL/mZGc5waIFSTPLofPHGzizgay57BPkOoZdLXnb5+qd6NibuMnBYc6pUjbiZdV13rtN9pWafYkv45J/Bhf0oUiLfAaiQM6HUOWOwGVdZSLbE6fviBbgnyFJ529OttCVwfKnKQqEUrIy9K4Lxb2zWmJ/TOj1b+fMjciFV8u05RVbTlvj6yRX6qn4CKi4Isa03W/VgQ/7j2UguAD8z5/PTeQbZ6Mg66jbCaUyZl9dlV6enkqorKuctITcJZoYpz9O+PswTyT7Ivt/t+27X8a7TQ89XwVh1L7wPtg/WsZklMuqhvZDhAAOJP5MxM+uIlTGdyrAYR+6NMIxP7+6udt98+cX3I/+Y3t7fj5EA4miRXYhtMzWFgYw+Qc/Vgxc4rUBrBZGXhO2QrZKun0d8621Vlyf3qqpPJq8RXU6+zip/NbWVVPHXKpmZS1tNZ72OXDjZ1WmivHxSoYCeQAqJ0UjHFIcx/pd//P1hx69vD28O/R3sKuqVWs/du4dRRPq+D91ORLqu6/uQrYHyxMTMKSZDAnXMzNIZQUmJyADNc67qbrczjAqCWg6Q1ZGBiWVHymoppclDL0EDEUt4Gk+mCNx3AUypD0bXXRquUkofH54+fvx4GkdlMTMBjSnBst6uPPFgAib5QNOr+QYN6ThqZiIqYfYIZvbu/fvD4RC6ncZkBgkdgDjGIJ0EVo2n04nIuk5IZIyRmO4fTw8PT3+yuOvk5nr3+sWLm9urL16/2vX9rj/sdgiBmGU21dQcIWyeQLPSouyuIhdrPq/MewUmD3q08/54OYIKu04SzBT5NjPNi8MYowLQ2fAP0JTGhK7L7tdTOefNuGOmwtrMBJQIEyF7sjJnyETny47JdHn0x2w5lHZKRBSCAIgx5rPRefGQSTwvIc6KRRFw6u5lwZlB5kZpLRVsVlt3XbeI44GlCKunhDlDsXnzDLwqGorCV0YlXwMrCmXJmYuv3t4ENk3IvNev9sGDV1FMgZncqhtOjpfWywF06Qgtfc8sBdMUAMVDrtMdxcnMOMOYy3bdrjpwJqJ8bl4k3Zl+kErclApvlYXwebDm0zE/f2BihFJ/gQfzkX2uMgLouq7runznzdNSRgYEWWiophijTmEnM0sTQKoWY8oGAoH5UW460i6eDnp8tUtvOjr0aaR06oLsD2HXIzFOyvvEIkYxSBejjjk4ARMRJZgZRgNIKICUc+AdU+l4P1xrPPFpsGjBlKDHwBFdBphVD0hslgxR6AiKIe3ZkLGalLKBKWSyYTWzZJGmaHCmGokD1iitUEjBkrnwKj6bqmbb+Mkqw60AVXXy+m6T7Ld5TMvwVUoZz3ELGUQ0BVkscfnOLeYmhNmkmkGZmWgRdb0QFbFlp9RTa0BeQINm7XA+T/AgVfVXxtieiZi57BFlkQhARHa7PsaI6Z7AYv+CiAkCmCYDiCkQSBMA1jlOWmEom51YOPZcEQ5wXL/o/s/Q1KtWzh23c44Jp1WaI5vnAxlakzxZnniwS1tbYPh+VbD5DOXT6oL2QoureLMmPpsHrMI8lovqqipq9NqCDcs3X8o/lDgUn9g+89U2jDVjUtfmNV5fc+pyPvK1ZS+U7aZD9XMLTv/GTzdeGvv8Zbr0JprnXhts7SoEAYGmOM4LypzFED5FURMwdC6UR8fM1DSq9jyFC8JM21iyJwETzGpJIyGY0bSDL0AfZL/v+RpqI8RiGEMfQuhDl0KIIaTxveTAahyMKJ9qmIhRYCLVNIcRhxqFbvcKdz+8//CLnvVF/+F3b/FkeujuxzDKPtFu4D1ICSPhyIiwIV9qaPFQmW75sS4P5jQ3dd6zPGG3ZODHnZYJS+/WJX+Zdyry8PRTAMvyxM9TrfSu6MTWjJX8p1bgl2ff6yzH/D6mn4ZWhXCpoTzDMZ1XjHM2fyfcI6r4pJj18MkHyQtORhZZB+EBCUzMHSPcH9Gf7Mf70/fX/MXt7Y5MzK52+6+efpAeu6sQ9jt0hk6lN8Qx4CCRcDKwGbMJR+ZEumMlogDWqHEcVbXvdvvD1ePj46iWZvXJzEalAdrhERhTGJVSNEsWklyrqhk9jsdEMGIdHnkcDhR7kNzu33+8T8OPw/A4Jph0Jr0FZnRGrGaAZFMkhgJINhRyzXNSViG6Pu99pKwbmhlUYXZzuDKzNDxRduVOg5mRKBGpscGk6wBWiCYQHU6FSAhDwsM9//VxkD+dbm/jYdff3l2/vL27ubm+vr7e7bqu67qUhIRgNioLBQ7EhmQkRGYdE5GpaUqREJm6pPsS08vyUpA4mYkEAWeHNWNkyCQGAwCaJUzhlikkNxPN95S8kPUkVXFmoezCnzwHkT9H6wbgnNagEeKeQH39q61k4mhNDb088tC2dVZchKUQJ7ec8Bzu4Vx+WixRvKQzt4gqb4o1V0mFDytGnctSlRNrcsrX0wqs1Z7mvxU81Yj7Gjz2qjwVPtvi7ZuqkjLQOqdlnepnEYdq81BVD6upWii2wrR8qki0kLcn47aPnm7b1otkn+0o2EIQJDFi4nzXv+u6nuXqat/n24Szx3BRgMwrphmSGOM4RrXYqjTMTNMSxlQ1OQ+iRqB5w5dIiBTTYoqMGAQFjAmYvFJq2d8xy3tpRMQccqO01MAqZqlYo8BQYd6TZfnr+cLj2S8AfBNFEGFBtOuzKRGyC2xPjZkMKxab6ITUNrxoYoMfy3P1tVKAckopMYeWrmwjrincyVUpVZ1qes4C4DfUWmgvjNfPTFt8ytUdqk8tOVtu9bVV1LUqwFeLr0L7t+o71rqfUyXSn1lPS07PrKSdDVfxcBk5n5Wqjvvha+eLn4nwtsJ24mvDTvycpltpdiGn7/KqtGw/taASgciIjBhMxAYkjZo6WeFrc3e254kjK7QAKVEgImbYFJdWzSYPHKy4uoovX7788fj2YZ5qV3q0REIBsjzYUgXChlRsa6gmhZbr25pz8pZcpWC1m3NhrMsnbWZTmxeN1eROSwOxqp52cIsOsNr6Fmyr9VzIWe3aV+/Lz7aSiiaLBVy2j8Qc9hZESfE0Hu10ig8PvfBt3+POsq9bNYoxhi5btUgIU8QSgI2F2YwRQqBAGjWrPZlQh2GIMT4lncLgBYGelxLMPMVNUUVmKBBPu1HUdZ3FNIwxe0uCdac4pmTDKY7j5GjPjFQtRzguZ+82LZvPah6ArCvZOS7oFLrZzkH12CE4l81NnN2BEuVAxPkfzNTtnZkazJIqKfTjx48PH+3HH3/8UydXV1cvXty9fPny6urql1/dGRETK2lSY+YARqDT8cgCVekDGxjM00lDGgQdguTWkhmRYlrg6cQYlMroh5TMSAvb6IQezYZbTAtXdau2UvlTOVPCUrc2d/bl1Xqfs5BaxVe+hpIqYmXmEhO5CP38ktbWcq1UqmRNVX/pfrVyWFVBzCzzVzX92NrCr1LpKkm3NdM3ASjPmX0fK6hakFaBx3JELuDE11Ad32FDOhcCa1v33a9az78qeZfp0/erNOHd3FfkhE+lFoFVp+CsHOG2Niv7n6pOPzpbQ5NtJwJxyqstNbCxTQAwM3NQVegUxp0NxNR3PVG+yn+eh1Q1H0XOE3QhchARzae/ExBMzGxsyWAwMqN8AkP5pEuMwhyERyi7AlOyHL4GSiAD0mR7kAd9Za/BE0OFz60BqvBW/pbxbfP7qkorvDSBnp5pAUnVYgV8Rl31aRp6QtpYELa0V+h/lVSKnKR5T3qmsUUllSxyaJmEZ0ojLYVzMQStiszyqlbIPHhbVP3zUzX6rZz5rLRFbL5ma9aEz2m3ks9tK58FIRqt9yfjtqLqrRNL3+IqSP7rzxyFv3nagvwnFPRT0mKS+hv1uCWPVQmANUllGwsbLERQrflMGZKyKZMxC3P24DjLMaqnVLNEsyKeqzU1M1NKZsHYcvghVVNLalluxBB4TLrvuy9evfzzh6ePH07IcWtJzSZPHDBQNq/boLR2fp/lzybdohqpbbXhOTVUL7cWeBWRfDJ5sYzl1LMFZMW5q3KgUnjIKeE+Q4XPFp6qHl9/mcjgyKyFtoYtj3k5q5z2AJBSIiANKaU4HJ96YLw6cOi7V7u+7yR0BoIx5cWeBIEYkfG0tTCNLyRvQ+Sj0fwypZQ0ZlIREUuaF4EARCQOx5QmWp21JWVjgLuOolrWhfq+F0Z6stNjOh5Px9NoRswhJiRLLAE4X8XKfdLs8NQE2RQx37NVQ+4IM2X0Otwws2k7uOchLrid1ACbvKa7wVIABo0xkimAB7b7+/v379//5S9/7fv+j/8Y7u7uXr96cXNzs9/3RFnN035/MEsGPSWFwkDMTCYSWAKLsCZLSDpZJIB1BBPnODEk+eTFzEKM0RbxwZj5LH3Kaqo121tlGK/65DQMQ/Uyt1Xcymdc+A3sktlvtJQWPQvZcp1WkXsRtRUHVkRfcq4yT9VcBWEFWFWnb66Kk1ZqawVToa0WDDPLJ6JwLL3F81jK3wpFtNQDSiXegG21dx4bBZKyb+TfV5B4AEqG8n717lAZx2LpPmdYEehzj9b7W6UCQOUko/RudaIys67rph0pp+IXE7WttqrK/cvCESIiIGSTHSQBERnPHqbMDKZm00HiHAq4nAhlh8Bk8M5as2qQgPwXNIeZSaZjjDHvtBEMpDDMPoHzVWkQAYgMM5gZIzGF6YgxKTOrmlqcbiLm8LKggAUe/IivkmLLO1sjVRWsUhWmotDt+jhOa9564ZTy9fHtVURFxhfSap6KBto+tqSoS/8xrfiyOtV3dytxUUHYSq3VPrace6G/VZ1b+Vez1fnnX1vtPue9F1kXwL6QqgH6aWmLJPzXNs8qllowtt60vV4d8dVp9ALYPzNV/PXTWmlJ9ALhFWG7xXqVtvDTet3268JeIZzuMWVYKhgts7ddMx0IxkxCOb5g6AJpMle/ERlICfn2Ec+uC3OO6dZiSqOqMOe7W9lsVPPeYn7qRF7evbi9evfnDydhYlNVU84u4nJkNJAhbS+rPB68oFtVS/xIkVszX5AnHvPVsLY078uuygosaWNVWpYuVACsQlh9bR/aEa8gN+d0w5z6t4UTP194fcnL+S2AVydom/UrM0NSzHGRCRxzTIHQ5RO6mOIp4XFIEbTrOkhQUrCYmSYjYZaOKWDamWYzaDJF7ObQWTzAzLKza0zhlMgsqWoOz3BGOxnymlDJCNnrAbPEOC1YQggIgRgicRji4zGOgymzUT43Iwg8TRAI8+lZ8aptc/zsvJkyvze44csHd/lWUYXM6e6IFmRmFWS6kZjXlmTIqzsCmZlIJ0LMTGrDKZ6OHw3pB8T9fn91vb/eH25url+9vHv9+uXt9cEgwoECCyOlUUcyGDH384GqmRkmC3kzYxjUpkt3NPuvJw15/VegJDrvGcwREc9XfiuS8sRU7lBVBKfOyUp5325gl6+6ZEJP0/5vEe7VuSIWd9LWt0ZWZ8SKP8tPb8rlAfY9WrLQ+slDu0Pvf3p4tn4WSVe6v1qVl562fUel5GwHdzWtGs1eFkYXaquGoHSk7a/P5v+2J41medqTVZBaoX8B2lWEVKSIjXmirWqrxTJe2UaCiMggDFbqWEJACCGvFZkpO9cS4pCPAWMakJAiBTEDUyA2Ncs+eEQkajIosmXC7E5ZEUdNUSmOOsQxRk3IPpsnTCrmtaYpVEGIwbJLYiV0NDsgIRUm05TSSEGYyEyyESkSVhOtzfQXUmuIO4uRzQ2UVXbwC8XzFDifELYLQi/fHbSL8O7n0SSFXdrhbqXN1gk5zTsLRcLMeZKvDe5A3jdRUmb3avr3wFe9S6m+CN0ItH+p1HI3gM89sanQ+1mN/v9vasXOc/KXtDUX/E1g+2RqJWQtxp+RKir1c99Wi6uSZ6vgvzSd2NLqu4DxSfqvAAuEXSe7TjoxYTDAArbp2jMt7QuIUDYfz7M/GyHHfc3inYjAoiAFKRkHJkoaAr+4vnp5c3vVPSTFkNeZC4NbJtLL8G/hf/WTn14r0XSh8i0NytPA33Bw/dYwnDRuJa0Hw2toz2+rVTzQMIJX/0qpnP9MY64GHx7JZ7jwM9sQJsAUNAWfJBgbAQwSCUQ6xsFwfxrf3x9l13Nks6Q9NGFIsUs9JMdsICY2smLo5FwtCNsZvclU5wAH5m4SMZMkMrOUksKy5TSMGZTr6bpuzzIAo0YAj09pGDUpEQcimTZEpn3gGttmytRj8ri+vDXssDpjRrKP0rPKPc3dns3zSDFgkxdPlCswRgARslXcfGmz7PhbxyLSjcPT0zEeTw9v+T7w91dXVy9e3t5dX+933e3t9ZtXL25vb4mCEQsTpNP0pLMKNB+6akqRSCwpkYkJiDBdzpTQd7vklAo4njGdDGQreeqpvyLKlnxbWqwUmnL4k/WzOB+/VJWUE2TPcsyc0mJrs71v5rtWJXKpnG75LXlyCpx/6ev3tU1k0CqOawu/C1MX5rt/3gJ+7mC9hG4AOFdVidRKV17FTHWS6bHtJV0l+Dxv1DPcssUKvEpItfj07u9dhgXYqx1vq0IzClgKRA9Jm9PmBUZrOO0Ru1rQ48onZs4+aaa74zGpRlgETESCEJnmrbhkIwdGBzIIEYmQJnJ0AiOzmFsXEYrF8jACeelCqklhWrwaMBEzByRLRsh3BKee0hl4nMc3gZkNRGY6wFIQCqEjoiFFS4k+JRPQEIZHmkddCMEj1lFyXZXPsIpkn99Twgo9U/Zftdh6AOZ9zJaekZjCalt+qljl9KrCAkahrnzPx+/HeVBbIYyJINcNGTKdVBjw3azkzOemLZa5XFvLp5Uzj89ao9hSD2tJcZVCWjSuwlnQdTnnZfCw1uU2j2+oGqYtubfKVpfr9288taAhKnwKP89PXlS2lX9SelTp8nD7Crduev9tF8HVjNamzLy6ZMPFcUdDwxfEGhHtetnvZL/rhEekqMaqzFBmYQYRoQh7zo4nzsNdmlbARZYmouy8Wc2UQydmYSAh6jvcXh2udvLwEJkFyLb0GRYGEqwOc9fSle8dXdwgq7C6PnzbZFkNRNVu21bBiYe2/NwyMS0ZqpmlhXy1+OqbStpUrFqB7Z9b6VTy+LuOHsIKvLYtmvXqqX5SGNkUUAtkRsIAsTAMY0oGsJAajaoY0/unx+t4GxQAJcCM8nnVkDQQK1SIJDupI5DR6XScpz8DziqlSEgp5cspDIfwMj0mzcdepEyEfMyoqswSiKOqjhjH8ekYYyKjDhAyma1WoXMI9OnCDikRJRiz2GQWOhGAzGeklGkJNLlhsgjjfOOGiGxaRS4G1I/UjNjxjPY5/2KAUrmCqGyICmYKEgJjSOnp3ce37z+IyGHXX98c3rx8eXd3d3W9vz5c3d3d3HZXnYxnymcmopSmiKNqODvjIGWmrpNam3HKR2Z4Ihcmy+a7grRYGU95sM3Y1kjA4jTJszq7ljxeaGnC55m2lNCluyd/tu5Z168/PZz+JM3XjyXn+/6aA7W8L/zYHglSc/ESa3LBF2/FQc5eNdoKgvJT1+44bUnVqh5zqRXHlawpYHvUtVKyuvvnhWmLEJqX6K1F0ypIVY+2JGmVp+oyuWm4LdgsTScMFMW9bc6/93oJEeUYErvdjojGcYwWoQmWWEyY8vm+WkyJx2Qd92y5Ie67LlAHM01TTGEAJUREaS6l0czybrFCNa+uYSl7V82QA0TuxmHGG02GFwAYJkQEExhpYkCImIVC2O120vVjivqgqtEY2SnL6hBUePYoagn4c1NbfJUw2q9LVoK/U4czJs+BbRb8S6a2fmJ5GaqKyLEUF74V4Ez/XvAWYDyvVWJhC6TVnJ4sVyHfeuk7cmGIt4DBp/j0makSTRUjr7ZyoadVtav1fFby0oa21cTnp0rGVhtbVbatPv5Mpvsbpi1Ifj6WCubbqqpJ5Oe05emkelO1bmaGhRjJDJ61igqELVIpbzqhTqQTI9Oko0aibkfzBvqcrShysHzDyEocIjWzZDaptcXlO5vBDGrGSUFEAdoz3+z7m/3+Lx/fMQcmS9kmx/I2pWRLu3YoWzWmIHz1ykzLoZV0aqtFQ0L5Z2vZtFWw5ZTncEdVaovYtlQs/7LCzCoAFagXJEn1suChnXlXH8os4wduhnOGOR9+JNWpCCy/UjIlM4jRaGzSKedYBpSSDRqHkMzGRCYAmAmGlNGSsony5A0hL87MBNPeaAYjx1IHE4GH4zGmONshZvdKZIYxjoUAUvaTTzgOp8dBkzFxp7kBcN6EQd7vnld6GWcA8svSfXHWpHklJMox5kaIyaItOb2QLgQAsSeVvEWb/2alCzZ1D5xXPRQQJg0gRjVLu/0hpXQ8pcywZNZ1XSfh4Tg+nuLbd/e7XXe1P9zc3Lx8dXd7e/vmivf7/eFq13Vdx1CBGRsFI0lkgZlDJ4TJFy8kjGPSGQGzwkE0uauu6cPmeICeQOdSi1VToV12DOA9HeU1YXXrLJNCZQWa6/R3Dot6xMw5LEExky3s5FnUT8Yyx+mq8njYPDZoXpP4N9WQe+6qqi2fdA49d0Gi+ffVwtvhv1tsKLqdNt+uG7XFDlwFeQU8nFfS6q+XQa20qnxt+VKeNiry8Pis6Mc/VBASZXmA6j1wDuNbpVUkV5B7IsR8ZO+Jx3eq5FRVXrvtiYZxKjByPcwcwhSWJ1O4sAbTwNR3suskiAgxE/Uh9H2fg5TABUjkOQQoEdG8mFFNxarCTMv2BYBhTONgMcZxSDGlREim0SIHgSVGAMBkxAAxsUpMzAjEApAlMQiBia/6Xb/fHa5vpNt9vH84PR0jiMERWnV/i+A9YldHzWdrZQi2l5GeSStFebWeUgoEwWLjqeKRqgnDipfCeQjqANy03MAqXeDZN2zOUGzdadoFu7QDUkk8IirRgyryq8qSU5TRsHmbLgzQT0teVrRUcc42P2wBVvGUf7Y1GwS/tdRy5Vaqavv5qeWOis4vQ9WK6NWpZBXaTA+tiTW2mfFv2PEKsFWwL7x/fmphXv3JjuZb7rgA+eVGn89HnkOJCFgRmBcaJY2CKMh7U9GsI6IgoeJlMggZsxh43pSkWeU1QxLOkaUU2ZiNiMiIKEVVBbEFod7oZr+7Pex75ggAmjtqAIGQFebtNVL17IltFV0XXrbyf+vN5Y2S8lxhuH2zPl+4N5UpWTWIvlRV+QU6L2KHmhXjas0V8D7n1h37lvgvQ2VmgIHFxSUwVahqYAEHJgZzMrCJhj4xqQQDjzGNYzqaBtJOhA83xoqUeIpOp5QP+xSqpinRdPBmzMyBxpRiSnFW9QuE4zhGTQab3ONRDqWAGCMzE9MwjomY+sAswxiHkQzBGKowwIiz0kjMkv3K5NhUhXvMLUbIMH0wZgTJ4cQMZBYBGLONqgDBnW1mDzQs5dRkIqRpcKcbe/NBPgBSU1IiVkpIMxLy/SEZYjJj6UOOS5eGU0rp4RQPu12MMcY0xuHxafz+7Qf+w5+Y+etXu1evXn351ZuXt3dXV1f7/W7XB+l3xxhVocbUsYGimaklQsi4yAmzyWU2vbVlxKoyf3j1Ij+cVdJGseB5QWVmxWyp0JY3ZMrJL/ay+lsBgKWjiHJ3sb0st6pnjONYgPSzbyYgbwFY6RO0jJKU6/EnfjP8qwsYFEQVv/Ae5yVOI83o8gvXkgConrvPcwQCM8smdi0nF5Pa9hMtvb/afIZZRhnPSO3ZXcF2VYmnijKgM+mdtZPcd0d+Rs7pEWZiq06MS/0xnhe01miEbt5Fac4js8JPWRaWskWwlkHx9Fa1Zcvp3NzZeObvvu/7vjezcRxztPHTw0Po5KoPV313ddi/uDocgnEcMj2oahdC13V937FpAiT04zhOcUGFzIzSOFPUwi9uSiml0ZhGTWNUI4iIiigrJRJiMrKkZBGMwMYMYiJYGhMTDn233+/2IRz63a7r+74/jQPDTsfj47v3Oow9yymOIl1FA36roiDfE4Dn7jJ2PnwCLfaMznGlqqFcrX+dAt0W2IJcgXEcfbvtKFcUYo3XU5oXfm1+OEFUOL1dOp5vcc9y1dtftERVpBOWkaZ8Q9UbxyMLmVByFpzUqFsKtCpnK4LK2FVV5QVwyVm6SZXv5XlJqO5agZcDFUcvyq4trnzfV7tZPWxR0WXx2H71ZhSVQKhwW6GlmLqU+cLDU/hrVXzl5Emi4FmdsxMvrltLnFaOYTHQtSYNTE6byrRSNjczxrEckUXBNXq7kFZHoRpTX5uXMxNP2YJ0C0orHK723eOtpRxfQ9UvojNd5bLTpT4Hf0u0WSSW6TJ3ITB6YTYVws3hcH3YcXbAMV8ZIqeAERGLlA7mqrL+wHP8ZzNTjUaY6UpY6KrbnQZ9Oj6xpp3IPtBgswM0CAGWAyEqIAv69Fj1yCwgefL2soKcQ2Y/cCU+c4WlUlt5X5BGTjksRfK+W8XaZVh9HOlq0CtC9VRXIPftFiKh2fs9M+cbIoVzK+GgS+OyKtKSmaWUvBO7Fk4sJ4I0r7uwFGiexqohK8j3CjkAppBXPGbGIJtvKDAHMyPDqDakyIZoNjw8Puw62V3dvLjC8aPqkGLc7XZpjCc+QbjrLJp2LELUiZRRKtjL3SzLhEK3+WXuVx+EoKcUY9TEakqqAIf379+pEbpuGJPa6ek4fPhwr3ih2fZKYWaTK7wcebDgky1f8KPsbUV4ig4elVhCJyLCMLPYSd/tO6CzpKfT6ePHe/RXC0waZ3Ka8TlF4W7HyyzludjMsoKSMPmZzzpVVuqUAzGr2TBGIiIWZgEwRgWCgSJAWedPCujv/3z/+z//sP/t71++uLu7u3t5d/P69evXL1/sD30vHYduVIpkLDtmRg5y5cXQgsiW4qw1dySX/GJpkcdZ061JxoW8WFDefPRRiRIPqpltzRql8iJZcioioxJbZWWCpYinZu6v+u7H1XenGu/VZE6Z8/B4AVHqmdtljxByE3YFYQVYBWrGSSsQW0lX3reZ275c6KzviIfZmn0H/z77z/SEV8RohXPM/cRyOGg5AVyA39dW8OPxdpl6K3xWbFKayG/3+z3WgiJ0BNJEamwSiANzHygwdcICsYylKYAEGWkcT5O4YSPq8jaKqp7G4QzS2fxYNI3uGuq0oRsYQgYyEuJkMGAcTQdwuglXHKQTPuz6XZCO0CEFGzvl0HXc9XgaTCNFhbBA1HXf40qXFt1bhOF/thkw6ZOXiK3h7vpay2oh97+2I9sC1jK4b6UVGm1ffBeqZy/lfBxC3/RqPEYAxSva/HMhJCtSVLXKRBbbEmA1eTxUErWFrergEs4ZqkbuoVqmrmV4fmpF9Goln4WEf4nkpQeWQ1+EZCWyPis9h1y35pTFy2Y1heUasarkp8D6PyhVzIIlEjyT/oRUFHo/8VnFvS61AieX6AiwZBoNailzCs+XrGbn1TRvPcdEznN7vm9MRLAmdtQUnQLDaEnNNJoqw3bCh553QfQ4InTMXXZoSkRswlgxi1gVBbYM3VTN5heYVNdMQLdGyuZlWEXtF1jmOQO6lM/TVl013ZdYaM9ppervBdZeBd6XWu1p+Xu53dU8VStp+TPHMDCzbJmihOxlLa/nADwO42i2218JW3r8mGK0oOGwExFIjnowj45OvuvMCOASCIHm/QIjGCHfbTEzhSnM5kEWCSZGRgPiGOM4pHx0qIpj1JTC49Pp/vE0ah47pFz2jC3nNw5syOofgdPk/ZMgAcIIgbtAu92eNIUQ+i4wM5E93uvTI4YFchjnsOSLOMln33jLA6S5v3kUyJgMOF9LmU7jMUdBtKxu5ZvHuQgbDOe25HCbxniM+P7909sPj/9EdrU/XF3tf/2rXx4Ohxe319fXh32/k0B5bANzUOQ9vcxR04FbSsnmVem0NSUQlqxEljmpbP5VO4tn9miWXp5YCwp8nlJn2T5v2aDi/FKVRy7WmKSCcJXlfLZqJsYak1czt8/WsqJnQo8Nj1UvEbYgJJc89qpJq5SuOltlq3paAdamC8KlxUz70rfigW9fFoRg3i+vGvUY2BKjaMalhbCdUbYopyL4nLNa0tty4eq5g5mDyG63O7sDLidFSUk1L/+udv2+CwwjIxHpujDbUBgRYd7TCiGANKXJp9wUWGrmzbKXNrdsSRFTGpNlWUhEDBLiOA4dOkEM1DGZEEkQCTiQMGMXun2gQCZkgaRjYkrdfg/pHx6fEJOqkuQbiebxUAaoPbmq8I8la7TI97jdIsu2ztZw4HLyHFdEkxPrtOBomsRmBf+F1J5se8D8aWpppeLNT7bVSrBSQ/XJs0wr6D5Z+WqqWmz74ltsxcjq4HqZYPPuXgtkJdPajjyTBtrMtFTRPlnPKnhbOX13PJFjSY1wnbog6H5a2poanpmeQ5nPr/Y5/fJM6iu/ULZC2mUxsor5/+GpnRz3u26Xw9LmEwad+jgFjQVPod9m8wHCeXPc15wtI4qeSiARMDOYTGOKKY0IsIOEq76/2u/s4YN0nQilpDAwB4YiRY/k8neVHdjdoKnmi/zcjiw5k+9q+HyLvuAzLTXaT6XyLR7EPMtvqRZbjdpS8fP4+STlV4J6K0+V39ylpypbJXBWK1nMULRYw7hOxWnvdXJkQDlm/f3xdP/4lEC73Z7HI9vIgICIJ8vEslrK3vKSOw2ayJxBRCXUhDl7Q1UVYgmB+tALj4ZTHE+POkY9DkMyJKMx6pA0Wnw4nv5/3P1bsyQ5jiYIfgBIVbNz/BqZkZlVlbVdfZHpnumR+f9/ZJ/6caeruzqzMiPczzEzVRLAPFCVRqOqmR+PyNoVWYqLix1VKgmCuJEEgdMZJSH9AjPVQdWH9ceKlvJj8e1kEYpRxiEIGS0rw/j8dDgcDj+P8XJ6+ctU6LPkCfN1cN+mw3p4WNDvzZmHmdW9Gl8zQQLwZiy8fLS6cQPiROQvr2ldvmV2uOvrOR/O85//8v8ex/j++d3Hj+9/+Pz58+dPHz58OBwOoRK7V/S7o7iutkYDGXBzSN2SVGumd1p5awFsf7fsRLeGdddU+6M+b7mrmiPdicQ9/qmfd14i3dv2STuo9pytHdSW5zsY6ltr7JtOq1FTdg2FtosHvvIt6tr2/Wrv7qOlbb8DrLa8dRltcbXb6bYvXx0naN0Nqm9Vr/PYeND19tkiKdyIepenOr/3fMw6xO4+7+q089I6q+yOrp3NEkUmhlCdRgoCFyfPrJLTMIwfnp4/vXt/iMEtWzIPHFmcnMiZAwB1YwKJSGBQILpBha15XVkNYLO0XMGmQDS7160WJnMmOHkAhDAQRYAJUXgYQhzCIQsLAiO4BUIkGgcaBnaiYQiz6TxdUkrujrKDR9rhreOgtuw+3KL9Xn3fWzS2ggW3pH6v2bZw57K4/OjHsvDOHswPBvVgdMU84vXqfCUw1X17914vu8LwngiivZVblWYP4L83EZXlW5590E6Fs2ttd6SdPGnH+LiL3e7aXjoY3kJ7D9rHhjK/F8gH/bYnJJ1q+K5yD6RdCdZBckNLu3t5vwCg2y7+5uWumrtzB7ibvmv9b11NfGPZ6msiImboXX7ZpainGKMEYYjzmmSMzC2EAX71hxIicgJd02U5zNfduyZ3TjHEFW4lXoTE6KpqCrMAioGPMRxjWA8trFiJyzLAqJjv2NhpbWyCWioeauX21Xaw2Kjj9nlLqLsC/wEb0t6ibtdOaFvr/tz9tjNU2m/fohkfWBTtn9tqLSarWrkH/0bf7QCw4oeKNlwSCGKJQOIggwHiULhgjY40Zf3zT3/98vWHw7vxMAxMSppyzmLRWUGhZD0mUCBmeNe1wUlJ5BpYROG23CUyhwkPEgcZhkzu8zylfDnP52m6XObsDormZk4p2/kynWb4sJCr+xIAp4zW19XgihFeMixgPcpeIzMJcwhB00yODDdNIs9P4zDFQFQPBpZVMRGt3qctAbRzsQnG6cuSmBkiYR24l92d1dtrI43tuogtD53IQSIHYnMol2uNls0pZZ8sny7pLz+9DP/zz0/P/+Pjx4+fPn06HscAZ4evKWjW5iAiS0rA5fmaLc20XzIViHcTc7dA0+2Nu7Zaa4IQ0RoulpceV4O7Vr41evoeHwjoyodbhvRbH4a3a6YNk/cD76ptv733/261Ghu6HU77SX27yoJ+Edt92MH5vWU70t0pRiO5thJtK4bqnx3kKyHtuCBuBVyFhIja1Vc374/H1ULYgVHB21ppdHtRp7wqJB1CKPcGa+XauLuL29MwfHg6HmIQN6hJ4DFGWm7VcojRil3IkCDlorW7u2vJbwOykoqqGSO5W0mQQCTu2czhS1pZggtoHGOERIgYMzQyRqEh4H2MIhIJTMrwKHwYIgfJRMK4nM4vL1+SzuAhw42uuqcOym5vSe1ieFd5bydllYw7tlqdWb+z4H9j2TJskez3Xm3B3DJXW7rL/W2/nRjclZb1bec6VZ9vxKN3BOaNZwczb+HZcuhjFO2KrFqh25lup6Yl/reUiqJuyLv9bn+/kSQeCMm3l8di/94n3b5qK8kfoPqxHHtQHszyLvx+xy7/ZoPtV9/E5i/m3L9t2aXMBxrne4s3K5aW5B4PvtP7zByEGQZjBgKzoDjVkZmBALBlt7JTJ8WHo2Q/W2yqZR8TEI4ASgRFs7K8Y4IsN9yYADenSBiZxxiEkJd4G8tFKbIl5kaF9p57V/uwk1G/oHRo7MTCY7OntrArJTpq36XMbZquqv27r7aN3JNgWxy2cNJmuds+vKGlBvNbYLb9PhjmtVmA0GcDJgfIBQyYQ0DLzVwnyu5fXk7znIERgICSKooRWy1tBzncMojWzO2uqkmXm5yqKkLOVO1ZAEQgYgey23yeXqfLz6eX03m6zPOUdJ4zcSCBgxV2uly+vp7nDD+Qe2miXf5dA9VccejshBKCFwDABKlZ6Y+HpzRPmtLJfAyvgwRTpeuclpUz1bJy9m2SM3ZqsxSui2svd7mZnAzscHJzELbSgWhZelt1Jt21yZfDPCWSbEbZFtPFePI8f3n9+evrP//3/0lEwd2XyXOgcSdo5UWZPDRnl9gQ+pajaLPuatFdX3XsWvqt96drna1m6hRht7Vfu+sw2ArTltk617J7o2tm4sr23phxXVTPtpEOP12FXSOmxeRah9EQbiuD2nmpvZTDtMf7Q+2I2uFv5U7HNluxsqXFjjA6A7E22J40duur7jetm14dRRERsHD7tt+uha0n4e5AuklpUd3V3y41u46wXuKn++qHmA9DfDqMT8PAbpryYeDDMD49PblmFgkhxhgkwGDline5OlyCIxf37pTSPM8sEUumVzMzOBsVb4IlqEDxOyiho5k8gMU9kA+MwOEQbTwgRD8SDyEMwgRlmDCIzHSKw1F1en39erqcs8EESq7OsYlk03E69ko3L+0nLfntMtT2YasaW9rAHsHvli2ctC4I8TZGfvD2fvs3aSduae/umvDeEHaV/RaATmLcE3TfhL922lXYbaojeOxZ3rtzXR3MqIH8gVmzBaAzmNoPO/HbVrjXxQMM4xalW0i6mm+hbWqswC38v6DcA+kBSreE529e4G1n+UHlf9OyFey7ZXfuvotTHpcuhkpBuwFM+65lLWCVI0II5agParQ639GyK1dO7dwMbqYKIUZJ1EYEX9qxNQ/hDWjE6x1CVqgvhwHu2RiIgQ9jHEeauWz2BWZ2g5mRM3AjvlaPniVAYIW/kllnQaEzyhu0b6egIuQxde2K3O1GFb4lT3abbfd8u8UbNry8ay/9MkKizQrwHvztJ7uo6Nr5Jj6va0IyApEDvEy9E4PcyYByfY4cmHPiGIY4UrqoqlseonDJq0DOIAbBlRxuzrEG+7mu892d48hr0jz3kjyFiYgt5GSv0/kvX37+6eU1abZi5ICZBFRWp+l0mc7TzASDlmtsWGZn/5wJWAKIEwY1JXJmKcs8U4bi6d27k/klZZ31cpmfDpkgT8d3f03z2k4xG5ZDezScu/AIGUDlPJAWYK6CxeC4hjsxLGs0X5d2uFlu7ulQImLHlOb6pwCFF4l9miYGlSNNIiby7G6qy4JwXfgtgoaIVLOu0S+ZuUCsqgRp4ejobFditrjeytlOQLSMWo+JWwvgSpcPqX+rVitULfC1wXoUiYbJb2dxGeA9k2KV7/2u2GOnKW7SbHSYpM19aADu+zZE56NcgTTr13Ll/21QihYt23ZqIx3earMdRVZ8dhjLOZeQoWhy+pWTikrZFVr3woc9zq2JwrolNmxmrS0diW4F9Hbg7atdqbrbQv2zNLU4i8ZYwttUMLwJNSkiz/Hw7ulpjAN7ItgY4zAMQuwiWDWrBLirubpZECmCpDSimnPOqsoS3V21GARln4LcXR1qWNIWEhFRIGYW10QcA2EM4RjDOPo4ZAnGyRklbaExg0GAurlqSmaXecrZiuNIUneBbJRihwfc8uYWzx2hbuZln/F3gwYV7D6er+30dfy+iNZmu6oBb0cIPG7/3l2CtscGeC+3ER4Q8z1x1P321YhsP6x+XF2178JY+1UrV9seO5Rib963o9i2D1RfpJ2v7o26fXhvaA8GXlUGHgqWXfi3gvGNZTubj598b9nOCO5PSn17j8a2wPyyUX8XMA96363fqddlKr9lQrRd1Nns1McvmIjKie1R0oNWti+Lv8lxkBhj671eojPmnCVIiSvj7mZQVQb7Gk299murhwlWPwhmLitG1Zzc2H1kcREhCHwI8fkwPh2Glwx3JYoiAlMzC8RF3lZUd2bMG7m+5bIHJIfNRGyft1+1tkE3dw/YYZcMaoWaLqgrxf7ZNriLhzre+nar+La90x2H/7bNOoRuA70bb+19F8K2UwVzUalEsrCUsZPDnATkS54+OEiM4EzzPKtqZJEYxYakEzOTuUGBQOREEBIhsJmGUDwEiYg5ALn6dhGtsNGVnQ1Imucp52xExCEy2JxCgJMAMNCc0+vr6/liHJv83t4klCYDiiiQShW2LM/qYo3dXTMya06WpqzJ3MHEmux0OkENeiMTCgpb36UVq0UzEt1mJbhOBy0fdmTgvrp1Xaea1rWhA6CaQtCdCA48H46+WJjZHcVGdNen47EcHxQjnJ0BIkg9IVwkwoKfEiF3HYmE63la5aWu3CNxv31Ctxb/trR+m4UO2ttlXUerSN9hSFtTtNEaWbRluU4WtGxQVymlnWrQbBmv3RyqjXSz2Emfbek4vH6y3AFrJOnqOru/PO7Rvv4IoTcoW9FQRUCtcK/lDshap/6/HeA9wsCtmKPbY7SuZmvod9hu4Vx+FN+F27ftVy3ApZTEEh1I3bcdtFts79b0ppSF3DiOwzAQka8rQHdX1bKEA0DMQ4iHIQ7DIGaBQoyRHfM8j2MsYqI4jppBs5pZtut6svQygGhN4tM4ylKJBOZc3MPd3YlJmMEugUWGA8kgwzGOhyhDVIa6mhDc1bKZK5jjEErgy8nSDFdNBpCQE0p+2kr/FRUtiu5x/bZ0jTSt7dcvO9C+OQx3RzdH3wtAZdAdweW+uz55MMzdu75E1Lnc1464ScvRse1bkNkySHUTrYgys4q3x2C/pX3sydWaVnHLLGhMkHvyZxekezJqC9Xuqw7IrruO2X8BTrrPf00L95rFdxLztlS0v3GMuxLefQ14/P93pSOJVk13FX5Zae2E6gnFRMiPYgHg1gBj5qen42EYBZnM2jvNS3rrYjth8RJSwGAhhLogpMUX7JqXsoVQVWfLAwtRYFoCiYa4ZEsiTe7GzAx2KJyFb3inDLD0VXe961u+CXjWQH5nbdZKmG3NrcvPVlxXvD2Yu11ivqdBtuqJmlhu3dsK/7a1rfzpxFTX467lhj151fZFjSPrtg7RzvJyW4iIQL4GkCEvuang7g4lF5TYdsRYjqTkcpnP57OZPR8OQ6BXvZiZ50wMD4uNF8CDUYBPq61rZiUr/bIKUC0BaHR9WYaT5pyyZjcO4UmeM3xKWadMFMw9myXT6ZK+vp4vF5QFobsvOelB7iUTIcjcGcTWrgkBNnU3Apk7ubpZZlCS/PpyTvMMIATKSX/++et8vvz88wuOxxWfC26Lsl0xz+7a4riboBXLtwhvNxRuPYaWvmyxQ1o/2GI+zNP5+i2RcMkDSPM8m1nxCV1WBObuHgJHsqRlCAwqycjy7O5spoXmFG5M4EAhhboaXXAIMiLkrOwob9ydiUSYmW0q+cRA7uRGzgy4uwBEYCbigLLoN1e4UeLG4iFAHKTmOVMQ4cjCWo08Z2XJ7uLORASHTuwWQsie1ZLCzcmEwOJlaUcHZmYQzOEKzwxlMehFYMIwsJc7rhQMLJSs5Brzgv5FxEibhMPWYOgEEbkeq66et3VeW27nJSH1sgJdNQKtPVFZ0FeyKGfWzNdzcwCq1hpelU+A/vNW1uC+jOvstlqsCX5z695Wj/iLUF66KMEw1qD/pU1yR0kwWPL8VMCKVrj6Jzc8MF9SOdcWkeJOo6bmlLOtZEhVuxCJNSyx7PEASk5UeLRsoTjsGtt4y5wVLR3SShdlDVlmWSQQuXsuayGA6pFmOQj9ahhjOEYcI424SJqdWClkl1PCrBCIwAJSRBqF/9MzfhhPH0R4gECisDOZm6oex3iIHFxpztHB6jkbU4k5PGRFUiOJwXWavzKIswYlVoJxpqDEFnCypy+eL37yMAd2dWLlQHEQHoTG4MKvkfwIGhhClPMcXAIxIDBOMytxAvvx/U+vp78mmUCzsvDwFDjPRmHdePabo0Jt/ZJolSEol6FvbpusHgpa6bk1hjhc0zCgmRoDETPcmWU9LFV3D7x3t/lWJ9bAEry3idBQBRWPrCvBGBSKWyZ6bCl24adXhedo7tNTIVYCgHpy3sqTlk3odsPrtq8rH9UR1cS+aFabbZ3yv90Gu6oiq4Ld8amtCTaxskZ9vkHjDTwdnIuAXfclhRZTss3/uUSwcMCLA1DZ3iV2whKsbZk7Jmy6zkwFk1ZsCzTmqdEN3aJx4atmxBXSm1JF4k2etOtruzk5qRXqj/qqE8i191KNyzFOCUBePwTZHaqrBndH2Lt3/tvu6u+OAXdob/OVm1cPjvaV3LfFt2gpzx+cqHd4q7DtVm5PqmldqABYXN28JPSxetTWNO5EWP+R6k2PLTA3ZLyWLZ5bWm+XSSXGxahDEs9smRLDgiEYxNhcwEOOPNs8CMecwun1d4fxIKcoFojVbHI9MIcQs3u2JEbwBHcmZjARgxn57ARjnrNPs6VkREEIrGY5uTuF4EzJaTaAhmiRGYkhgeQp6HlKLyeCRvVxsmMmczgpmPwoF4BUfM1FwhzK7mNJsVtOfrA4/rEvF7f6jebt0nGXWtop8Ftnh5bRurzBtfI90WcbV9KWulqS83XDbgt/x3RtBb9Nplcbsb2YCO5eYnU2JLMQ6xZ+ZsYqA7HupBe01F31ziDcZcbysMqren6wfGUCEi63UMmNDUxGBgqKxftIDMERCQx6tWcZ6X/+/PIfbfrhmf31dRiyT7PQO8wSmEYZDyQj2SAaxL8mJiKRYr7qlOdpmuY8Dz6ShDgOIpJMppwdHCVe6OtF54u7xwNTxDxbOmGej0c6Xc4hjBfiP71efsLT6/s4OUmaDGiTmZWSPAmHsqgpgazhcFdzC1FKlsWcFSAz00v6OmVmDmG46JJpMOdgwzt2tgWNpWErRMFcbwBWA+lGh7RuYq3SKfr+OjW3nlBUIiwyZy2ZqK8TTUSAyy0lOwAwMaV5DiEuOzUgIjZ2VQ0tdRK8qtLyfWQ4CzMTlgzm5EuNBUpmIS/uusTEfM0zUerPZQ0KMC9WDjnYpSgSJSfkgikt32FZ7FYlTes2F1MgIrPVaPASp1UBJyInmGUzYwCmEgZ3I3c1Y2JbV8wKhZoTUQYsu5rBHCogENtyULzgn3FzWvig0BXmm1SHFbedaYvrHm2/rV7po2vfF5vsamNZ43K50FojnrAKBTRys/bSCs3rgG83sdqvdmXHFjPd6Lb1t3cs7ynydcj7oBZyb4V1h1t3Xw/9C/stQ2O6YowJ+yE+9vbztsK9ndD6/9UngYiIDuMhCGJgEYK4uylcvVkClUTAmqJ4lHA4BCEGmUgcgwxjEBYYyrYuCmGUm39Yjs5zSuo5e4kATgIKIbg2BuiCJWTzlFPJdlGeMy2hm5iJmQKRCES8KHJzj3EgoiXqsNeb0Q6zOae6sDczEuYgfnvd/I1lS4edSt5SYEdCW0W+VZltC/cMzV06f1Da9nfHcq/ZCk/L/ttxvQWGdmhvEVa7gD1ALzYW2C63bttpq+2KmgfwPKi2Jwr6T9r/b0fRGHDofRPYsX9As77HXfLeyQv/dlr65qxdK3wfef7/ptwb+zcR0nHB9zLjG0tVFh2ctOzG3kDSqad7mQJ3Af5eZkSxC8kN5uxLcAdjNzKY083ZPmAh8DjGEALZjSqE+zAMIYRFtoN8FZJlgboesfjqUGp5nsyyEKk72JLDOQiXBdvSaEVI8SwQUTaA2ZYGH01Wi5lWLD8oWzb/pvBp+7onV7uZeuMcPRAyu3UeyLct+XX6rh2Cbz4vP7qNttpUlLBrTT2Gc1dQ020pD1mIHXArOa7IATcGDL7YsgK2ciJBBIpDAJLlWTU5H+IQI45mlrKCTOFGBg5gBpzYmZmhZiV1Jo8hEiFYmHNyT0SEYVHuOedZTVWzqaqpw2GqBoCDuLtIVKJpTvOUUkoKVi+Hlr0xiftkUDz1lrE3XtZlA1RVG4ZaPSj3pqx9/mC6dyu0sDGjbLisnwDLIqXc+Cvrz/Xwi6joq11L3tcVRAtAWOxLYKE9W0PMrIVL2CDysjUmvppfpQk3EoE5+bIHVfpa/NKRNRbDHQIYIGvCjDW6FZxg5eYl3BkDBCAsMJXGyI1KFCwDFRc5OC+nkWLkcCpHkOYEBZaIPM6gNRqBL+FGmGY4A0xe0vK4u7uCQqFqXnxvXb3ci133qnenDQ2LrlspN7svvi7Yyq55+2pdIi6uobgVlA8UatUK3riOlK2gViuUPzcedP7gdydKHgOzC95W2naUbWuipBZUv91JvWWAa2Chq0pw4Jo65gbaIGJXDuzx2bG9XQ2AHUt0a9a08pfWbeaK6vYHiiNECBJjFAoCYrhpdlZHdjOOgAViIjVX0jwO8ePz4ekoxG5mkWkYBhGBO9HiQuzuqmaOsIYQUMs5ZwNIIjPndb7rcU0JD+CAmqnaNE0ppXKYSat/i4jAl40hERJxBsOzAywCd4XBr+EO3D2ndLlcpqnst1HRByIha3oLqXRzvaW0Vp1XbFcFWfFcS6X/WpPXXMzthH4XbG8pXbPbLjqF3ZZ2FC2bdDK6a6p+2BLbLx5aB9g9zm2hvTdA3JoprZ9/W+2boO6OaLfrtho3L6/SbFNt9WgAAKHgfGXqtR1/uKVRmeBurY6GcZ/2tg9/wTz+GxH293b6WEe8UYNgY5u+/cNvlsdNdX01c7d8W952Owi7SPg1LFn4YxVk62E4X7mGHGRO5kHkOIzH40hElrKull8xDoJcL9q04DmJm7mpgnyNdWGqKSdyODu5m5oSBw4iTAaA3HOBhJlJbpoteKna9t6oG5zsS4BudlrRgcYAuCdgtxZU2w7uzFRtdrdOC9X26tAvo8zW0KqSvza71QL1q44vqLEkW+Drw+5tFcu7UHXatnbR/iglsAEgBUF9SRQPK54ITAAYQuQEdipEkczyPF/mec45jMw0xEhks7lEDkLCTkieCVbcylZJbst2hou62atl03mes2n1hst5xupZ46Zqi/NzGIaUkgFZ/fV0eb1Mmt3Y3K8hYbsf90rL7xUPi0NrE/+yNMVFA96ZxO2kdBq8bb/Yw1vTou7gdKOok7Wh5ys9tJ/UflvXFSIKvMo7wNaYQeA194N5yXuBss/EzK7Z4FRDp4DYUQ7tykrRb8wC8ihYvHWhgLqyw+GQxc1d4YD6KgShsq5Nl5WpwQFfT72tGx7JXC5VGomzu3AJepPViYiLq+DqWEQgcwXUjQAuRy1K5M5uWOo6ULyRPBORXQXZo1IRaqbd7Nb5wz5benXo3+XYzqro8vagEQT1k+3J4ba1FrBWlHS++A/guVdakbd928nTLcN0fzJdPStac7OyWtcLETGg13UjtfghorKTZcCagka2Y7wnOts5ul78WH35WhmK9UafM4SWWMnqULVspLDizE1wcTcgCn94Gn/76VOMJyIQeYgcYgnVhTVE2+rQ4l62utyhqsmcBUOMYNc5q2rZuFKHlg00h4LMkQ2naS4LQufQmstubuZmFCgyOy3HqyvOi2uAMIpXsPtpurxeznNSIiYRV3cDhTs08a3SzlErHzusdpOy/ap9/kDN436op2+S97Z+x1O77eySUwthJ/q3aqN7e4+5dol5C8+DrzqN0mksa+6IdvNVWKM7qt0OtoXqjWXLXF2ztJ5O3OjI2+Gs1GJV0JUWfdFB1/LgHPC21j6oLWBb5D8Y+OM5Wn7cV+3/pmWrZXCL7Qff/oK5/n4Av93sthdf7+RUnqq1mgXAbmLSu5P1K+eieGowmzOT+7I4bDbvSmIIFjpwfD4+jeMAzVNWXm2QqiuLFxk5ioOcY7EfDARXBZXoYq5WuJbKPn/pLIBgDFODw5lKoqJl4GUDWlVVYWK2LgiXPf87ZcUY3fLjdTr2Ku886UyIMuTt7bhdcVHLN+fxQanQdv2+kXQ7/n38VdtX+7yNrdAKGbebh/X/bglRxfU9T5nuGuS1l+LJVw5V3FFyTQFkRmAHnNQh5tkd7hD3IBYEoKyu2S0QXFgiuzAFhpAHwChbCfTCqmopO3mQAIFlt2zDMHiadZ6nlEiYIOQuoLlZj5VwDDAC82XOCOE8pS+vp8uUFMIc2AnW83Kr3dDYfi2ZtaumshrcjVlQly27tOp7djttFoSdy/SWHrak23671ea4JR5ardMW1HLgCSBIIPdyTwNkzgxmZuGpdGzmZgYlB7MDLA52OJb4BOWIjdzZ4eutZkc5PWZmymS8LLEW6LWmQSVzW/bAeN0GcydfNwmIFlcHAGolQ0nJRUK+Hg+QZ2Z2EvcSBIfVHU7kLkQCIhi7k0OKryDUwM7IMDUqlyeNyeBwcTIsR1helhXuO/eVHzBwvUBYl/Lb2yPbU7u2PGi/kmY3nZ3Ftm3q7VKvted27ZiN2O0hrIDtWg/U3IOqMNPmHlT7edtmgdChbU6WFVpHvTNTG19d8PcWAH38w+7PTlK3sFX8LK6nzCU5e31S64hBZDE11CgrzFmpOCM7qcFSgL87DL/58O43H55DuhDRYQgxhnLlhgUxRrdcbgWDjIlpCet8RSAtxu2av1U15zznPGtOTkqcFNl8nueUTVfX8CpBmKjaynAu3ETwZMoOYsJywRXJNKlN0zzPsyowMDO7ubrRt7KQPy5bWbZVYLgzI2jItW1kV+FRE2V3++o7n+9E962Kdktd22Ye8+bu27cw9QPCbj98i2So3VWqRjNSNPy+yORfZ9BvRUfpd1dNLhua20buNF6BvPHht+WuzrXZput77Twewr2x3Ku5bf+NaHzc/uOvtpTwTWhbsbw7ug7sN4L3y0bxNyx064TS3em6h5xfT+1dYWaim9i/i8EgJddfMcrBoBD4OEYB6Qrh+hVTiZq4nFfwkoGbHGbZlZmd2B2Wy/kGEyGbwUw1AeAYBmF31eSTkgCBjZlANWV2L1Tdl9tG95DR4rMqHSJqAzh/L67qpGwVxzf5bpdu/c7qq/twK+vqEL45lt1XbTtbADo1V0o9GOgqL4FSNjdZ2mHW4t86ANjaYKC5GOVkYGcGicMJgdlAqxS1kndC4aL5w7vnHz4+P48juRnUydQpO8wUVuKgiwtDRMB2NlU1X256F4OvREBZ0iabm6sISjKUPOWspgpVTbNOc4YRCZvD1F8u88vrNKsRorsLNUft93HblnoYSLeB37bGBhHx7YZph0m/I0Dqj10YNvK2v2HbUWDbezun7XNar5Ve3VxXYg6Bxd0NpGpOxsQSqPinZcoZy63x9WNjWwOdMpUEFmpq7sSyrAYJcBS/AmamlB3F6bTGciB2XM94fMnKUXyOzQmArnJH2oQ5xf2tnPoZfLm7rAxyN/WyT+zu5ObMLG6BnN2Cm0ADIMTHMYAkgy7ZzskuZgqwBwM7lUNLI5i70kIP0iK9mhHtZKx0u0Q0rQ8ronHn6JmX6C/esndHH1vR0FpmtYt2idgSVnng3lMwM7vbGpS/fOJAvY16MzQAW0eqFZgeti0VtoKydfSrFYpHePuwZYNKrAtOaLlF2gzzinNLWuLGFJqCLyvHFnVogtdrm57Ur9sWWO4ftqy1w+cF7SXGdL2OWw4fljix5OLEtO6ZICgRgfOcyN3yzDYdon9+d/z8fHweGNmHEEJZDELhThSY4UvDTLxu9hrg4BhEzYF5nudcdh8cQEoppTRnTdmURMnm7K/lgrZmcy8R4WwNxjsE4WU9WFbscDdBqSbFT0nhbpqzJrWUszlZOUkkFEdrNX8UqmKvtGSwJZiuWvu7EkD12t3WxO3CoJ27ezuj31u2/bbkvVu55etdvdJJ/Mc9tgzSfk4b2/17h7OFtvuzg7kdddfdg1FsSxViW2B2KpdLf+vLpZYt5ml5UqTcom3Ulup0s59adNNO5t97pp1j95DQoFuDYBf+XfTW+h0NdNTyuOW3l26WH5RdGLZEfo8Nvwntbv23APZdpaPYLZUWgX+r6L37tv65y8j0raXI41KusRjMyNwR3dVNnGmJVuKAErOQB6EYZA38t5wfFq8rIgoideVmBAHcXUHZXAhLsCWoqrpngFVd3bMaOSIrAIK5mXsoR5bOKLYRM8cYQwgSZjEGL07W94TedVxUTJGbJMPbm94dktvZ2X1S/u82+O6JU+xR2lvmqzu3uQdnlYT36HaXPO4JbeBGgXVC/gHY1V7yze4G3c8C0kn17tXSFJTKHsN6IaeYi4GDEmoEjmWNQPTM+sP7448fPwxBXLOROZNqZg4GU005SyI4lOGJigPievUGptlSSpbn03QhDgxyIcHiqXQ9a4HCzCxbVnWwiUuYFV9P08vlYs7ObIbiirU7Lmx0X0XdrhptA6dVbVUiePqe1Gqftx21q8SWKbbg1ZbuzXidvqo63ZdglR2c2MjzSrRhCbdIjjWxBDlgXq4FCjEJMYX6zXrnwh1sCFeycwUJmEAo0U+LCIvLntbN0nz1El6tR13XVLgyADvYr6S/hpvLbgjEJeOGwymbiDuBHApilhJZPzAGppExAAMQORwCD4GfD9HBM/Bymf/19ZxzVmMlYwoOMnIsXqOLYd2ifx35dVtllXHXyWjvvNXnW/XjtyZsPb9qJ6ybuXWmeb2seNMvbqVSneZ2I7xrypvzxisB7RFNR3ndj+3bbtS3qOiBbBFb26kwt3f8iKjkQsUNld8sFdyVQc5UjpHLPdKWSQg3s1AJbnc4LS/tVsC6AhyGYQlrGUK9iExEAU7shGBk5uXaBoE45zmys6Xg+TkOP7x/+ngcxfMgHKPEgYp7JjMPZW9FpEyTqcOdCOxmZsROzOaYc0rzYoxOKaVpmlJS1XJHNymd5unl9Tyrr1zOTlyuDsqawl61SMBCZzEKLQtRR7E83M2L6/hyuWXJbFP2fNz97tHMnVIp5Drj7ew3E9H+2ZE6N0lxvtlXN7PbOt8L//ZhJ21vqWjHnt6lrnsPtwBsGQe3Wu0B9bafP5BRW0ZohRtuWbjl63t4vld267cPuwq7XXTiDsDq91G9yAmAm18DJZUPHXUL6X5Z7sm/bUA9zB2KWoC7cXVjwaLg+6Z2G/w3Lfem9QEM32SrThe8pc3vLQ9g2LJM+6plEOZ9OHdbw/fDb2YGKxEPACxBc0sj7g5jRyAEloEkSlCd3Z1BJMI18t8S879D4xqI22CW3QlqqppSyubny7RqalM3FiEKRJSUEZyJCWZmaV7qX4PvkcFB5ERO3+KcDp+PkfPAlrhn0myNik6VVNLtZNQ9kDqls6X8epestZoeyNt7qNilPXfHBraWVnfaxM3wd2HeDrAFo/3RUvLynAAm2LIFvOyyeckegOVvMqJyXITPEn/37vjD02Egs5w4UnGnGuLoCrOcptnVxI08k6MgkoUB5Gme1sB1ZsZuTnAzMLG5mZqqxEF9BpaY5A4FxAggPqf05TS9XgwhErOrEy0o3c5CSw8delu6qlxfPem6t9vZvP6/7l+3Hd14nLpjtSe7eWkI+9FGw1bm3DOcrE0y39gJwUswhsre5paVBGmay/QLM3ORI1YiN5hZUiUzhpNwuZ+nqhBnlNAvcPcSaP4J670ivSJU3Qcpz7mFVQAM63w4w3zNsmYSGK6uCea+eJk6AJ5dBmeGOTmxsDAzjAP7QfgoOAgf2J8CP41hjCFwBNPFLECniSY29eL47krMzk4GlFWH0Rp1bhetW9nh7hKvc1mr8ZqHp502rAqmPVyqbbYhGdoGaxLByu0LdkvsyNVbtSOpLYGW2ezI0dfgLlsLpitvkXdtzYaRluAf7RFN2xrdFlvsthpI5ip2AXBVTLecI8Ql8afB3cgINQw6NWfOAECC7xlOy/z1d3UPrjNOq685iMjEyd3JndThjsDKZgEqpAP0feSPT8NxJNZ8OA5jHA7DGGOMEkQksjBRCZ9dZq0MlsulXUol20XZTnGCpny5XJYrHg4jBrEqpjm/TnPWYL5cFGlxraqes5RISzSAKLIMIVzyZMWNe9nQYhESJrokEoarWlnkMhE50/fYydepb4VvRwwd8r35CrfC+kH728YffPJdZQt/fd4SakM2/eedKO9U0YN+O47GKmq+i2G37usteW+ft5LHN9ZVLdSYbu1Y3o7/7bTSHTOrmwJejaKKorplVEGqwPiiTG4OAm//2HZo9y4QbofQAdxK/m+OrsXVtv7/98su/v9WgLVGydul8d+20HqMjCZbb3E/uXflGPtGW29nv734clHFy2UHZmKwkDgJPJODmINIMBWmKFRc6YgIviiyIkuSqcDhToEYcJay6+e6mFRkagZYnuf5kvTnr68SB2Y2y6Ek9PYhCl2SWRQPJNCk+fWSXy+X89mLejErAmQNpyd3B0z1DNOvbFj+fISNDb/sMk5Ffj0U7QTUmzD/cAHfbji2omx71aUTdNtyT2ZuRW7XTlvhsfjdNlKBbC8HtpC3sO32vhQOcC/3B1fPZnYCnBYzzZXJS54TAf14jD++O348jCOcHWOIQxTLqnPKCgMhgM0JTjAimuc0DAMTm9k8p8t8AUAiYxizmeacVJlNQnCnnDOG6E45WUrJUlZVIybirLik/HKeLxkylAuEXrxCtm7ND+ikNTCuaFzzhHfTsZ2gDofbT9AQdv28eMxtYaB1Wwe3rIFbGmh1bn3awkNrjJjteMNhGIvfQADJMILKjr8OQSBLtgkiIhIDldwjYRgGZoObmboROxGZiwM5zwaHcAiBQapK64LQyNbxCxGcl0uxUsD2khPOkFMZBtWhurO7TjOASGABwcnMNQN4zwfhGEJwMIQlMgC3eRQ+Dvw88EH8gPwU8eEYn58OxIeU7et8cQtZQzaxc77kJBKTUzJXy0SQ4uQK5TXWfxVhZWKWoDubCAo557L8Kz4MhW5KkA9vSlV7KSUiCiF0omTLq+uf1MqmjuxKCxVaKvuGd0oJ4tQ+oY2J2fVega+oWDKq3wZK7eRa/YTWKKNoLNFONjWaw31lPLPsZiWEFK3O3IGg6kQUYyw7nSJCQIwRRDnn7EpEJMy23FEkX+7urVft4HTNI9exayccS4Uy6SGEEIKZzfNcvgohDMPg683mevQa45CyajaSAJC7M5zBn56H08//6vnyw+en3/3w/BQxuD09j++iDMPAzJZTdgs8qNmcsoCUmUs6yhIzwBRq2RNLzDlP02xOQjHbshjkIPkyXaZMg0zqr9OctEQVJzLJbmxGEDjnnIMsa4l5nmeBDKKKczprwACmIETkyXLWS84nR1lzEi3HKWsumOteaaeY7/nWtxS4VU4dNRJRG/UXty7++0S+sQ+2CntL7S2oO0qxKZWXWyncAU+Nsdse77fbQB26tojqoOoGVXu02yvE3VdbdbXdy2hM4R12rjCjkQmVl9vtpCo5W0R17ewOs9bfaqyuMtGSgGXpsVGdi0uPlc2KBZLlAgCWeza+Bqe9ukhsYHHXNZlqO3x0K8WlgrN77sZ7j34qwndJ8V6ewHZa6c4Gx2OKLeWey3RtthPI7Vy0fXX9dvLzAa91/IKG3d4+li0ktd+W+3Ylz/J200v3eQ0wtMtBRRmJSDHgunZ2YW75rtas3YUQAWeGl/ieIGYmIwaFEEp8fk0XFvzm88f3z082/RRCKMZICQiRUoKEEKDqMA/CIQzlUgPUkpoQVDVPFwZJiMx8Pr/89euLubz/+IHJppyYYZ7HQZxiSsmyMdw8z3Oe5znnRV7FGI3IShqvnGLgqxffuv9VyakwYN178eWmzE3wkoqT7bxvBXhL/yU5MG4FDlZX0jp3W5apcmaXzqtk2G6ctd3VpurNl2IM1Mq7hIrbff8VP1fsmRk1hwStVL/Hv7VOK6jrJ23Xdby1x/qh+00e0bbfbC4kUhb2Vq5ukTMRGGTMYBbPk2U9Hg/vn5//899/+N0P70dPPtkwUJTBVS17yprNmAKTm2dVI1gIwQEHNPs0X17P55RnIuIgl8uFRGKMYRhUNScD0zAMr+pwvlwuBDocDul0BrGRKPH/+unradbx6ZiIkylzcFCMUR+auF20nlY7X6dyFZDU7EGoqpczM5HOGCaiEEIxzMyuicTNrORJ9sZCRuP/XJ80lNYbGO2E1mYborq23PZS8hvX6a4XnULOudyXYykXqwggEJUE0L5GGWVyJiYWYy0atSxteb2yHJmMoExQc1Noudq8sJHTEqnKGYASSXZlLFnhBAS4kTtDNAEgEi9HimVFCgshwJTcWFUYQSgOkZnf68AxLD5sgYYhihCDY/TnKM8HOgYcZXyO/O44HIZodpw0jzMkuJMmS8nU3C55Ih/JiSHMTswlYFO17NsZaqVSWwCEQbr6VRr6LSGujXx7O/9WoV6fVDpoK99Tfl2Fe+WbxkQnu1u9i0Z2bAUKNWYiNpILze07L1ptqZOkCIIQqDnYZDcFloCZZiGE56en4/FY8p2klHPOJf8sUO4Q0XqtiIHiekXAeoO1GX4nuzuElCdl2c/MsnpyVjO6Egwzl2y8IoHA2VxVhUjYR9L59ev7AT98ev93v3n/4/vjuzEcBzlGHscxhECO3MTRJyIrd5+YQYoSfBdmlaWYwYsjfcGPunkmdTNiVTtf0nlOs5a8F1R8jMrcmJkCgZZowHAyg6rzOl18zeBcXfYrmYGJvOz12s3JyhZv2BAnEeGhubzzcM+qw62WfdzONyn836LsMl0VDmgo7QY5G2Oabhefu720HNoRc/fhdo5atbE1aHbn9AHwXdmVV13Z3bbcgnR9zqt16HC6gkpEDDJ4O4rSQ9FxDCKGEDtfDcFledAIrhrdeh3X4n+Rc27PCVew+2ME7MmQLU5qeYyct5RfSd47GP43PrJ70MVbun4srn8lYMAib2qzVby3huO988MOtu7JhjLX39y77bi7wtzBTg4NREMIYwzx2hiXPU53J1o1KSiwVA0FILsJR82zu0cJ0zS9fPn6OmUDg+IpJbtMg/AglByDmXmYLQvcyRieLSXVnC3nZXXnrOSyelTt76K22n8jge6J/7vlFzDIVul4s7CsEGITSQhvEFn3rK92Tlvh2b1qrcH2/2osteihzRpgp/i1ctuL3wYU9esJ7U2D3pRdCN3YgAwiZyIigZegMjFM01ncnseBEZD10zD83W8+/eNv3j8fDwMBZq7QZd+ECHC1ZLNlZ2YCQgiBeDyOKaWUpiknM4NTNnPN6s7mZiUZXImjwGbQDHeEEInIdEFRUrs4pmwX0+ySgAw4J4DLjZlu6gtydhHaEsYVMxsV3M1pi7Q6m7RXtvK2/GgPcnHDTepLYIsKgFMJl9HIq/p/bacdyHY3wddtkUCu64awwxzwkuR0CTZTNllpuXvBzPX+X/mGQeTm7tmUiVBiyriTO7MLwZbU32wMCK0uziXOcQhMRGTmqmowuA+Wl+64GNkAiJyHGNgIrgxEQhSOgUXkcBEmYgGRS+TjMYyjxBAOgcaIp4GeohwCxoBjDCHIPFMMwhIg0XmYdZzSnLPmNKsTe4CURN7sBLOr+VW5uhVw2HB4u+tfataDRNzKl4750fDkPVLDfd2zu2PUUCawkRHYk3T3BM2uPqukT41pWxmsxVjT8g1M9beqltzrBSFlBRhCKOs6Vb1cLlbdYt0APD8/f/r06fnpaRxHVVXNMUYyn6bpZ/0KdzI3GEBuxMxYsnmuA9/MRYeZXeFeF/bFJigbk/VJbcdsyaJjOstycgERIRhbRpqCnn/z/vjvfvfpNx+eniJGoUPkGERo8XljZinmJxaxW5FDjbxQk1JTREx1AY8JhlzSdTJfsr5cpvOUU0U4CTED9UZwiZ4EYjKzlBI7QViY4+LtusQbbh1klkFBSh5ao+t1l46KHhtG31u+2QVuBGi/H7xbvyu+WQLdVcDfgvObbwtC671T936HooN/19pAg9hd1t4Fpq3cSbl24wYPh7+r2Lbw487E7Y53F/j21RUti1ABbmXjOI7WPKTVrcA1tVKLiHgJn3HdVaUKGGDlxK+sCoyIyvl7cWFYUn5uEbJ90s3jr6H/B+XtVPqgZivG8QYCeHu5Rydbldp+8rjBB82+pWw+fLQJUvR59Rar+v0BP7YnSO3/rZvPTbFy4oISVNoIbg5AQF4CNJgL4TDEIUa4iURmcjUFVJXcqreRCJfzJV8NPjNLSXOaTRNpmuZpni85Y6ZIcTy9XM75NB7C8yEcDJMiqJmDyDIBlqxRAGXb0aleGrd1q6WzYq+8tv7/N1+x97KitUm2mPd1LdQ20n6OhuZbw2a3bKXxPSnXVtg1gboWyg/DDngPim/sVaz8ZWtqgVqnJcKO6e5BSI6S0xslmQkzipw0E0aA+3xize+H8MfP7//p97/54b0wA25gMHORycSiZcs+mbItMUoENHAY+ZLmeU7F6ptzMsvMDCmUzELCgUVc3cwsqboSnJlYPRORhMgU8lkzyDgqyEkIDuKy7qjHZBUzfn+OOwNgqXhnUbCgbjPd3XnSDSU0J3i1I99srNQutnS7O33t/3U2K5BVKLUTXSgklCNLAMXaNijICeTl1NKMvKzKQTAChIO5FmW5WKVm5k7uEgQogsmgVm7f5bg6dDkgXFJOuDszMSuzm7qZmhvcQTjCmQjizMJBmAIzMy2xrgKEyQNMyBlOSAMjhBAHiTEMIz898fEpjBFDpMg+CoJgYApMDJTbq4YcWY/BPhwlfRymNCadUjLKfl78CuHLSnyN7VgupJnhVsR300xE6ssV2Hbi6/pwz/TZEUx+u51zS6DfcLXatNa65LXCeom817TT2FY7ZSvrr3BuRXBbp/y54jDjlljL7xgjiidkzqXZ4ntZf7j7MAzjOB6P4yEOT09PzGyqOedpmuZ5iixxPIQh5JzJfDkWLOBdd8IarLKUEEJ7SLvxr2jHpZrrhIYQYoxFO7p7WcG6e0qpeI26O4hyzk7OITLDU/Z5Qn75/cfDP/zw/nefnp4HCrBBaBAWuOqyoRCYl8SsIOZFMdA6Qe4lsLhT2aqzhbeLR+7qIKQURJTzlM5zSubGQhwKI1O5PMkgkAibqa/cnLOREg+BAsyIidRZczl0dTVzA5XzlTXGz4Iov15QviGd7zd/t59Qu/d1W1r525Ii9sTlrhnxoM2/VdkaLrjlkcdmx+7n3VetzNlq9F0R5L6vkNr698BrFU9b7g2k6333eas72x+t69ftABcflOZ/AAhCRqzqZIR1+4aIbF5vpbsToXjUm5lgcR+saKCy53oVxQSBu5sj51mGgdroMjX7CPVof1w6TfHN+o/Lls5/ZTu/uJF7FPtgjB15vBEbtdm/LcM20Dbybf2zck3LIHc+R/28g3MXYLNsJWhzoUYIM5GKiCCwQmEgpsDCbjonEWawIi9UvWiHoTZYnaLd3Y1yTkRkaX59+Qq1KCE5fj7NFI/nbHPOB3cRuWRn8qAmgxAxWbZ10zPEKDfbgksEt3W3cifdzm5Z6/xamr8nz1ulgD3RUZ/Xg7LtnHaNPAaj7WhrJW5xslVbXVP3xnhPkAIo/nqtFG23yba0151EVQxsYS6/B4i7uzmRl1QCABRm+fJ8iNHNXtJB8E+//fC///F3f/jhfcQLlch0YSDmtPhNJi3p7NxgVu7NmlE2JuSk2czU0uVyuVxOFOR4PLKTOTFQfLQdTKrJUl5adLjmnAlyOByIx3/5+rOSuAQHA4SS98DVndsTFVpXa6oKvqGTB3y6qy7rmsg2c7Qs5m5Na18Zalf/dmdI7Xy1gF27vqW0B4TXknop7ZZWKHlL1E1VSwQVEWZiywpeeINBS7onU0KAUwnaxgCxG8gJh8MhhEBc7lClcrDjul5oKZFgnNw9m5shRhEjELG5GBFRCMLMH2LBKhMH5pKchIgAVyYP8EgeYEzO7kz+7kOIMR4OYTzEwyDjgPFAY+QxBkJmL/lVKTCDCEQsHtzNcwj2JDAMl/mgRjnrzxfSi8/uashwLY7RNzlzqLLQVlKUH7IusGv98qO9Y3Brqt6VGrsSpNMrHflSs2OBZcHWt9DRzQPh8gvKtsFW7BIRkbRoaWWouzPzMAzl5K08zDkPw1CWXkOMMQozB+J5nnNKX79+fX19zdPk7h8+fBiGYQixshMxyGF0RQiuN45u7M4HCqwbC6/3kXLOOWdZi5kNw1Cu/6WULpfLNE2qyiLlrh0R5XlKl5cn1h8+PP3T33/+3fvhh2cZyYVDHMYQxIk8OwNh9UeVcuzmPAPkMDcv2R/WY2cP7O7ZTFWTqWet9zQMzswwTilN82xO5mQMI3Z3XncHRYIQG5TWTIOmMDZ3J0jppuyTYjlzobKQvMqwsiHkAN4aZfSbVNfOy42i3Zup3aZ+mY24C9iv5JHHxoo3m3ZdhdbI2DbSPWkx9tj6rHW2NlPb4y72WsAeVP4m5nfb3x1R97sOc31ysx5e4Gn+5DUxzCBBRJSut3xDCGGIWD3PSyN6o2WBknHFqETDKhdGmMt8GRGVLU6iq9/7YwxX/Gwtj3t4eEvpPqnt+/cfyHTE9ispvx3dL2DJe/W3+PylMO6Xpv2OJpf5LQutWvkxAJVfdnV6ZwAU2lmIzRkuAiEi9mUnLzATzFRzmpIlEVmSxKtl18XXwJkY7uXssFyxY7CLCBObCDvmPMM8KavBgsxGr8mM8zDk56yBLAZ+HwKTkzqM3Z2FBsdQRFaDK1q2zsmbPHjU7Dj7dZV1I5F+vemxazK9nR46a2S3tQcssLXoHtd/DHZnjWxB2hp+O+O5w7/1+TeNwI4g2/8Hogx3gFCOhx1k7JnFyaZg8/sn/NOPn//Pf//Hf/rdx+eI0aWc4zjznO2SpjkbuYqIUspafLiW9GdZKOUppVRWEJd0cffD4WAKc49MSwAbEiYqR9SmpBlCQZgYJuIxjBfj0/k8zTm7G1G5NGieyZR5IOBqIq6l7Ok3dHt3KqlkTb+dyjopIoLbA6GrNU43n+xO3T0Cvppat3YCrUuSe4LFlsRyvSl1T7mHZWPJMpZTO2bhcqeCQcUSLGqwdMDqVCMim8MswACK7hGIHEiExpEBd1XVC+Vyr0nd3D2ZpuTq4KxEKA5ygSXGMIYxhHC0jGWXlcvpQ1mWUVkBugXXQTAKgnBkef9hiFGOx/F4iEE8sAZJYyyKvqBQ4ORg4gAiAZOrOQVVFz0O/uEo0zxcJjWjOasqa13S800C62ph7NpnpajnrVcnVYeltdTD+jaMdUsHu/s63WS307xl7Af2HG4FQfvVY1m2JdM2rRBul8G79esO61aGMvM4jsfj8enpqR4Yvry81PPVy+Xy8jLP8+xZ//rXL0EKYjmI3LS2xq0hh5eboLduXUUtEVEJ4v4W5VEFR2ENM5umqeysPD09DcNAROWEcI2Ccw3iKmFgZtU0zecA/e3nd//+d59+/2H4POi7iKGkdYqDMqesh3GsVxNLHlV3QzmmMC3ArgCX3ZZyGX1ZIuZ1TZjKThzCrPk8TUnNiNW9xIVzd0PJ2wlnByAi5Uy0XLUkkrJ0Lws/BhELhNy0GinMRkRQd3Jyqpe4vkk27cO32AP3aBXfsgiJaNvBN7X11kR7A4w7vXSKfH3YMwvfRjLY9ltpr4NwdyHR3kfvWKz9qtVhLcPusu0uYC14tbVuvF1Tu5jvpnVrJ90zbnaNsKqtOwlD5ksyXABrgBkiGkIsRJvS4kpaZ2hpsITOci2XdYkIy61jK/fbAYVfM2Huenzsjv1BhY4Cv7f8Mortyq8BAPe3MDrYHpBHN7+74HUk/WsAvtdF+2ftrrUmd7/dcuie6uy5pr6i6prkzM4MVlUFjI3FyQ1qOs+W5ku6iIhZzpqg5q6DxrU7IiJfnDwJ8KIXkBPcn54OgH19vSQP49P7//Xlcsk2ZRjrOOsl2yicy1WCwGBiDkYWWYK55CW+xXVoa8jq4qLyQCC02PCy5/KLyi4htbKufXVPwuBWpBDdROzrWngAzLb9XeLfwllfbZcQlbq6P7efd011wDwW6a0iaOFs/29LLLkAijMzctkAFrZIwDQdRvynP/7h//p3f//vPn/8yC564WAI7qDZdDJNRAjs4KQ2Z53STEZwUiU4U8gJl/Pl9XQ6qWqM4enpCUxJ8xhGEZEwCAtzOVTIZjCDO4nEobiGqc45fX2dX86Xc56zwaXkyHCCl5CCNeoaVrNzwS0BGznMt+H6V018na+bpsrLO/TmezRwTztvhU/5v0YfrZCX/0vQDdxwFrn71gAqIyj7WV0vzBwqQMwsoeynOszLEc3iMm5LokAmKkFmlgNDZHYIQcg9ZXcnD8MYjsMwjrGY6WqXrJrzcg85ZZvnubgCFikVKEQJZfuWiEgCUBLxOEBGTgR2ChLFTNhGDqNgCDwIMfMwaox8ONjhYJEMlgK7kAoPzIEg7qJGbmIgOEc2ArGbuBn7IDgOeBrkeeTXyYfJZ4igWMsER6AFy5UC6pbzln8AkN/Ed2qDcLbqpPVS2JViLVFuH+JW7mytopbIHpNg18s3FXBLVZVCOqna0tnNtTNA9RqFr4Wn0FvO+Xw+z/Ps6yFY+Xy5sWBLCC92vHt3hLvqEnuldCoixN4Wcy8xsW/Hte/Wsp2LTjgSkZnWcU3TVCAPIbx7967CXBCynhzmIGzwnPMhht99/vxPf/j0dx+HT5KeaDqIDwIWGBRGzDzEw8rGleScl4OOFWMwdxBJURRXBmZ293J0mdVKCOaU0nmazBwkWTU3LrstLfEanMNLItErokxK7CiHCAHI4mImQkJ1j7wQM5UVeEcAVZ/tEtjOiq2ptp2jjtTrj85FZ/vjnp6+1/suZ+3VvNvI/U/63ZNWGnS9b8Hu8Fl1zy4ecEvVHWzdvHRtdtB2H94baS8P7xhGbS+d/usa3xV0D+BpNetSx9yWfJnqrmZZNZVsbICRozys34YgIlJilcW4eCuoLqBOl/T6+roavmvvvu7RPlxvY0PYu/f78f2lw0CDoG/X3xZalyvdKN7ezr1536VGbCi8Pvwmw/otwr+3bNq/uyxvO+oY542a9AGR1yIiEAIvgf/IaM27y2YZDCGG2zzPl8tlPowDJXc3y6puOZnlYQo55xACRIjImapimuc5Z7N5ZsuR+Xg4JMV00emS/vrzl9N5ShCb/TyllM0OwUAppcIJHgKTGQvNubWGzcyJ6y7KZkG13b/b39r7xaVjqK3ioGYlv0V+O6FbY6kBe7/sKhq/vUqDDftvW6A7+eK7rulb2x90X3W2AOwCdo8+WzaUnARwgsEVJbGJsyAyfXz3/B9+++m//rt/+H/98OGD6yFPA/wCImgCZjUFI0S4q+rLy8/T+WJzIgiMp0uak5v5bK/FhgGZhCAxiERVBwlLFBFaEpLTlPQyz+6s2Z1MRVPSOWfzy88v5/N0mbM5sRMTg9yIlgVhh/br2G8neUskVzsQN7TUCsnqftkibXVY2zlJottJaYmhbbydi+5HKbwXH+jawaZ0kBR7lYhCIDY3LC5qRESqWVUPh0NJJLhaGIvnJOolSCt5fUEMAgKTuC2OwEEjjQEM9iGMqqoSspvBVXUKnFJ6OjybmauVbVcGkbmpZqFlFGXFVW6uEqLwwHSMfIg8CgIRI8Pc+EJMxBykhGdWIQqRhlAytQa3mLLnzOooPnQgCQgKIQILHcfheLAYchATkQjKzmQCz1jzRmCVfVhvfVgTXr/6YRPRYTws50IihbJLCIKSXqJOXmu30d5WYusFfvv2Zl63jL0rQVo5sv3ku8oNY6ylNlhLqxjK/9u9N2oW2CmlkjqiQlhOyVpEVSZhv0ELrSGbl2SMUHcnK+0QEYUQ5nUHZXcsHUi7pYxrmuZ6DFh8z+Z5LmvCeZ6r71mdRDN295STqv7w+cM//bs//t2nQ0xf3z0fjoYBs8DN1bIjcIwj1R1oXxJMiTCXqIblZM8zmkkUkfIrBM95ocycs5U0ECjrQ4WDhT2bFWekskPBTnYVIuZu5IbCxjAzdSvr67IBLIDRcmFxcWotWPErijpMPjCPHpcHFt6uJbGrRCtpbt9+Lzy/vrTM3iqALdIqeJ36b79FM5AHinzbe9d+i40Onnu2y70nnRy4h4F7pZNg9yine1s+oo3N18qossFRtuHKLd/ysLz1ZrOvOKsPw3A4HMZx5Bjev38+Ho/DMABQ9cvl8pd//YmZX19PWKh0NQJsx5rsJOQ9Od8O9heL5V104Ttb881yupLKL4CqgeJNOxTYoOhe+o3uROXXwLZbGjx0qNhRoPdmrSOD9kfredQWZvblPsx6ucvhJUibkRcLRjGnyzzFeZ4xrr2UWwNpnmNIKYUQolzNSjNLKU3TFMaje8yXeU7zOAzv37//6/TT//iX//nzq11mh5gZ5qzZlwONpDmaDEGIArFnEJGuOm6R0ubGDSpuRdbOxseWW3992VJsKzOpWX929OxN2aWob1L+VrDjlsh9b7XZCuSONmizWt4i6h7q2rXKdggtZlr50w2EmnRiLWAA2BQc1nWnazlWIh8C/9Mf/+H//I//+I/vj+N8oTQfmN4NYYIRMzmVe2YGzCmdTqeff/7ZchanQxRzn1L++evrNM1zenn//t3x+YnIY4zVO6wY0mbmSywZv1wu5/PZ9CmlBE8wPp/P2dyI53mep5yzKgTs7iVpirlZycLW3Zz0ZkG4i9h21tx3Qp1XLNUFYT14tNtb4p3W3jbU0kDtvRJtjWJYH9azvm/K7fZthwFeI1YEpbmkAMzqWRdHhZJXp4xNqASddxSfGVcQwdgIjqDkSmBypBwDHchAiaeJ6Hw8jIcY5Dh4zmRpJHVLlvJzUCFBPmmiXC4XahH9FEI4WyIiYnKfC4cxiBnP4RgCRxYhEg4xsMjBoRIQQhh5FBrHIQicyYnIIaDAxEZGyIRpYGLm1/ysKcfh8Pz+aZpf8+trdP80hvMhpYteXs86q1MUkQvCrBlcVhYOLPlXHVA3Kcc4DlIQkRCHEAKzeyh2uQgTgRkiMYQwDNZNZJ3OexNWz5qKib+wqObnD+/KAXFZJxCVbTxZ7vg6r8TG7q6aCq0wE0BmiwtJob3ypJIOEXnj6Vxp18wqc9oauLb8bvMr1sr1f1oXzI2L9tVC8mWh6IX5vVkHLpoMSvm6U6LmbgRQdg+mIQSzFENgkFs6RA5k5pScICLjqHNWA5NkJeLB3cstWXflxRzcN7KX27PMhV8LPGY2ax4iAV7OKgtg5a7g4XCAOSwTeRAR4BDZJPyF2NJpOP/8h4/yf/3x8x/ff33ir0/vBzPjwwHhXdZk6RJNB5kGnhP/IIHd3VTdNbJIcHI9jGTG2dQz1JFL8AD4WXMRkDZbnrLP8Bxdg4Nd5HVOX6d5pmHCnHSiITzPpFrcLBwEQyZRDuHIUdyRc87JHH6IwiOCjDKmbAoTAuCAD+QhMOVskSywpvkE8zAmZ8uXAy+qohWrD9cB+495TURZqaWs+bEu8+pKbylF7NL1bbGlqpRspX/dzaFmxbUVqdio0q0ZQcTANWpUQ0g3ecbqUN0V8JX8b46UOwysn/cuiBUbu/U53HguLF9RDcZZ1+7LUqFtp4OhRU4HQ4clv90Rb/HA7ZFL+wktT7Q+cANAa5qmelkXzbFna5qsvazt+5rXBxA4lInKHqWamxuF4JFJnYqLEbMDPE2JGUOIPIzH4/H9h+d3796VFWCReMdhLPeB53m+nKcvLy/pPB3HkdyTFrHsKKtKgpc7GC0W/Yry9vprJeB7C4PdGal461B6Mx0VH4Ue+pgo+xsirWWAW2LbwnALD/VHnLW7VWcuQJo7nEtG0zVjW7sR3qqhLSd2f7ZfrQpuyYvbsmrlymr49oyMjv4r+uq8NeMjlE26Difd2c6NHl93pVbJVFpWWa2qYuP6ciLoNjzBXeCSATLA3DVHh2aJEFO72OyGMPx38z99ff0veD4MIZIGBgejPKeUTi8vAokcL+cLwZ6PT/PLl8vLy+FwuKiRm7OQvLvY8NOE/3ma/0dKf4J+Hc2gI+eDkLsfgwzmkV8FbDSaRHLXdKE8He3rE53GgCQ2s2QHyMU06pz5uAq6BWUbnFyFYYvFjiw7udTivPp7V6lb9twrMVTjpFrnaKRZR0K45bKtAd3t3X+z2lY+t0Nr2+n6rWR/Q9iO5R8A8xJmFnZNt9N+uzvG0sswDK3L1ZVfyi0PWvz+FpvI4G5Likhh5nJ/B2bG9s7AmWkOyJSJ9Z3YB+h//f2P//Xzh39PeH86D4wwHJz5JYhOM4GneTqfXg0UxwFzOn99IY4KOc/28ylrouni8zxYDmMcYYFsOD6PxycZBpKQBQa2KSW1kRBNZZ78PFFOR/rpT0/D8GL4v1/PZz7m8fhl0n/N/hVnDybqrhlSfBrZKECvuwOtpVqvdK2ktoSW9BLkb73otGCelrh93fJvibR7S8ksYGEzYy5GePFMqcQQOtFXqbdSV9tFSy1oZGB7Qkjr8QkRlYOBpeXifWmmZsWmrRl6s5dQgjl09Fd/FHlNzQkY1h07J8C5bA8VwJhEPZlBFQpP4oOWa108TZNbJrIYWcIACaYKg5pDiCBk5ELuSiW7N5ZFzjiODOScA9PhcDgMIQSJgZlZCHFYkqqzsIgMQ4wSCpKYy8qojEXLJbdqZBzHOGHmesdxKTgcDodDHkfjlFyhqk4kEgtBdIVbTenu7uaWcwZz8iUlfZFZ5XeMUW7vudX/x3HsJEspxW2yzmXNJvTh0/vhOF4uFxE5Ho+FoE+nE4BsllLSvMxaWRAy38jTSl7tDtBtR7GFxNv4JWWkt0d/dcHZyZoSDKYs8+rylZtD81seuGruUpbdIKjQ1au5CKvShWq+HsyCYozjOA7DkFIqQT6LZVBuxAKcux0vv9m26eSyE1pQK+SGEmOLCiKA9TjO7Hw+Px2OcRhUk7uX03Z3Fzcmevfh3e9/+/Th/XMMLOwEC5HJYSmbOYEh5LBsIFmvmzLcWUDl9v56CrGuY1fnnCVGl1pbHJaSzuwp58uc05zVYSA3t8LmshhPqlqYhZnJHEskgxVRizRxpmrOGzOIWTIxFrdYKUGBIQbA8pZfHpStAute+catsf2k0nNNid5Wbuu0hNdW2wVm9+09mDvOwq2+7yq3ddpqv760HbVdV0bb/arjgntM0SLzF4K31387DYvau7NJ/00s3QC829HSyjUBdLEjP3z48MMPP/z444/DMIyHGGMs6q+Uf/mXfzmdTqfTaZrSPM9TSmYgYVNfZR3RsiR2gNm0xFtEIzArBeLNCHw7Vdyb38ct3Hu7Be8x/bwRvFpsjX3fNuh3tmNanuoKbTZ6vguY7XRse7/XL/Z47XGn/VebL67ct0YRxxqmqAT0c3c4SmIfOFR1nrOZnWIAjENZli/bKEm93L0vXc0ym647vbq4LzGF2XC5XF5eXr5+/erDcYVlvXRh5DBjU1XkbEzkbqr10gRuNmV61+dfNkEdQnArxB601gquTna1/3dKoWv815RfKcZtzQ9+j+a7561y3CrK7sPafjsjy28vm1YEGIiWP4lKEHIi4hJbudRyz4RkiUzCEAOYYR+H8Q/P7//+tz/+8P7jYRzFMsGckeFuGsehHBsMcZxyOp+ml9N5miaHVMBUXS0BzpHjIMMYh8MwjmOMxIFYvBB3Np/TbJY0S04+XTRnQ0oGzyj5C3ia0teX17/8/NKn64CY53IKgL29zu7JloS2oomai2C7H2Ld9MUdFeYrU3bC0G+jjNZZpnWhuNtUS9JbK6gdDjULzkob5VVov78qzXU90z0xs8Al6sTSXf1oGA5MCrJkmSYWmxlk6sNIIBsCBZYhMMQsK9RnNSd2YSKhkoeQDIQxjAAC89NxZOY0zRLo/dOTmY5DCIFp2dIzARNDhEvKOpBBHYzizkZEZOoGNMtaInLP8OymquaWiSjGUSmEDBloGIZhoHNSUzeYG5NjV1pcJ5jg7bGYYrsgrDtY7fyVH/M8704brzcVyxlxXZU9Pz+PT4ditRwOh+onmVLK81wOeIASY10qqWytvVoqAXRPdkaK60gr86hqm5ep6vtioJeFmYiUILS+ZmioS5dKXy1TVZYwV24S3Fdg3NdAR+XWnOpyLFzSrKvWgC7lkwLyjW7oY8dRO1JqAhC3PMbMkaMZVJXdza4IKbsYIjJNZ1MNh0PZBTgOObr84dO7f/jDpw9P4RD8GCCMECSrp2wAlchXiuzmbMbMQuzMUADmarqGur0CbKVf0pzNTFPWZJpTzjmbJsWcNQnmeb7M0yWZAhAyJ3ciATMTuKRIAUCQMvIyFSBwnWsuSe/ZTU0zuYNZRMwmd2eHUAnjZTVI6W659+qeMm6FIG6lbddUqda5lt1Tq7jlgm2P22od23Z12mre7OTR2xZR9XPb5ETecuLuh60ac1+ce+99tS1e90TvjPcx/L+mlB4D0W1gz537vR2Ej9skImpy87SFmVPSerxQOPd4PP74449//OMfp2k6nV/+/Oc/v76+vr6+TtOUUtK57i6RLuKLPEOz6bLLK7he+aDn48FWv4Mii8qGUVEEvtnReIyct5DQ7of1x/YkuavWlW/aE28sfhvP4EpIvvS+5aPdvjoDaMuh90i0Y9UOk7Sx/7Yk930DvlPuzPiONKsoIZjB4aj+lkRUwrQTCUsJ7mhzzmY8zfM4RiNks2DmIJ2TffkSQuQYRQSqrplhcKibGcFInNT0cslfX16+vL6ckmpQA9lizKwKGkZuZpnMFgedss0KEgGv8bFKcLYifDoc/nrR8c2JaE3zbja3cqNXoxuH0rbZNwLwNym2xpXYhbwj8grwtkJ9SLfXgHHLI1feUb8+q6GBVhOfwDB3hhsVcyeJZzdhGjR5np6Y/uG3n//LP/7+Hz8///B0GNhd1QI7czb1xYdCs5oRsvrr+XK6zOYw83nKl8s8XfI8Z1fEOAyDPB2P4yEej2M8RgoONl3Dqufs8zTnjKSUZpumlGY95KxqE0lmmS19nfOXr6/n8xkcASYyEAAGbtbMHe/jljA6a7ObqRal7VFZKd0TAOVEqpjxa+NcPGOIFieme7SHDfnZneVle06zLR21VJe9rr67h61Uag36ij6/rnm8hGVb4r4QFUMwSIARPJtyNp1BIhnOY4whxBiEGe5aHBaZKIZgxduKAgDKpJYAxChCg4gchpEFQSAgFn86jsMQhijuXm6IljzmKWu5OF3WvMIsTCEES3N19SQw1mMQzTMhw7Vk8YYwU2RE82QOI+Y4BHKZZ8+eNUema1TVNcnUjX5aIysuqpep/bMuC8vCbzvH91QvEZXVYFluVQ/e8/kcxkhEZvby8jJNE6372dM8z/Os2QFWVebAzO6KDYm3AqWVQYW0usqdTKmlhdxvTeH2VPl4PH78+PH9+/cppZ9//rlIvXKwhmbjqutoIUW6WUXfDKFxe3Cg3PwREb2xAuHLWnoH8y0DVy5Y2qYry5U6y6kdkbATwdz5VnDHGM3sPOdpmjTn4rYrIh8O4RDD73/z6dP7Y6R5FHoexTRHZjaQsJdtXYc6u0McJTsi2bL1inJ3b50LGBstuS5V3ZXcUE8I1U1hBk/ISXHOeUqqBpcAHsnBXFy3aoI1YZJbbwSmErZGFUt4pMDl4ohndgQikAujZEoMbKwOc6d+SfOWwv3O8s1M3TDanVfd8y3BdMryLTZKO/u7rXVgbGXrtrylQjcoIrr3Ucsvu1q/ZfNvQrXl5c4E+WZp22mBIdD2bLA+WXC418IunruySxVUhHLzvC1lbw6rphMRVX99ff1v/+2/ldOS0/ml3Gde9k0XD5HiflP0oBuc1hABzIHXe+YAYozWbBFWMJjZvgel3YS2NIzb+dr9/O2cuLUJtt9+k4rutbzLpFcxu7lbtYWq0wgtBirB3KPVe23iltjuSZLdQpuVZPtwFwkd5Pc6Yi7etSXfj/naMACg+FGDiMiFGOQMAE6v03Q8HlMU9yK6OamllIbBzpc5SlBLOaUSgkHMJUSDqmpOfrnkeZ4diILJzFhAZO6qbupk5KBiZa3Oq2ZmJatcjIE5w51WP8bimXIbz/tNy7lvvm21wFZe3avZnt5s+2p9lVtoH7tw/9uVqoJ7U6Shn/q7rVM+3w6k/n78qoRkp3LzgVBdnwmA85LWMi95pdx9isoOuNs5xZR+8/75P3789L///vejTSPMU8qaSAYnT+7JfDqd3D2ZznM+X+bzZU7JDJxmfb1MLy8v83kCMA7Hw1Genw+H49MwDONhCJHcVS1nN8+WUlZDSpazpdmmuRx/mIBn9YvPryRfVb9Oecoa4pByGZgQGYoJ7zt+3eVJt4dbkY+9fSXaWzrukvFS0xlkfiujiISui4mdeWzhaWHuNo53CaB92Hr0e7Mebj3gWpkWuqbru7pd0Skhg91s8q2+qGlOBCsXvxhiRI4IjqoeowgJu2rObsoOsAzDoNmXqDYEFpQ4pHA9jLFcx4ss43EkN9UUJMRAQyCUkK9wJmXyIBQDRyZzJ4eUzBlu+faInJ0ExKAYyJUclB1uZMpqntRIgtqcsqk5s7BEymWf2QCQA7enRvUobAk1W02cW8au9m4rqjqi2c5i7aIGmS0GCjNfLpd4Gaq1USqU5WKpQIHcr25RWzrr/q9vWxKsf9Yn1gQWw23x1Ym/2x0Zx/FwODw/P5cMgV++fGkP7ngNGGNmReHVxptFAl0vPS3Q3jAnMwcRJ35+Pn78+HEYhtNfv1TPVXfzsrlkoNuFx5a2O8yU0IIVaXVB6JZ8pf+SpqV4TcYY53lWS2XL4zxPAA6Hw9PBPzw9fXgaRxF2j0FGkSlNoeyFiBgFd0pq5ELCITAzyRLEqSwWKay5SQxeUsMTERmZKYxdy0Vcc3cjczZlh/A052mesxlYWEbnSE6E5Dnn7DKQiEAkCHMZoCmrsCz7HotxAiMCC0xLLJ/idUGHw0FnHR2jYYBnNXMqiOyIpFPVby9bQXzPomr15a8vnSag+9bery+VDn9lC+2f9XenaXY/p6bstrlVM98s7XTseoq2HRVbxNe0ZrvY3u1687AfONHVlaXWr3KmeC7EGFNK//Iv//L169etNWBmUaKZzasLukjkIIH5MB7bE0Kse1vTNJWsRXVJedWne1G17k381hRAQ5Nb4uyb/RbTPaCZbeVONaDhOzygq73n7XU7a+6LtlcYKlQtEm6tnH2W9PuLrg6fHRofjH3bxbbyN/m3wrxCfu23VT2Fjqiktl6DFJRbeJVuy7KriFol+nqeD4cLy+HIFEOk6GLmlqfkL6fLYRhcswemcRhCAItQEObZs+rs7mGIw/EQk2syJyGwG5m6l3RvkBAcAhAtx4aazAzkx2EMnMkIWBLkkpo5ys3RXyDt7+G5xVJH2C0XVG+j7pPtFOB23jtQu5bfOLO4JbNdOnxceGOfVDPmgeJ7e9kq0EUurX+ggb/kH17yrGLZPi6rxJnz4IQ5R9U/PD395x9//A+fPn4muCVJUJrNPcHc6ZI1W4n5QGm2ry+nl9dLNmTznOw0za+vr+fzmYjeHZ8+fHh+//7d4XCQeAghSBQQuSMlywozOl3UFDnnS0pp1pyzO5HQnHw2mwwnS18u6cvlMoM5DApf7kBCyg5eGWjrMdHw3Y0SbH+3ViLurI/a1rYunUTkvnP2uy7VbrregoeGqB7P7+6Mt8fO3pTtwqTUD92Ks4W74oLWY0MArnnNLV4kO9Vr0w6GgxDUNSvNGUQaPAlxECJxNxCW46MQAsHZWUSci23tDtXMYwjM7FBiHIYBRGnKarOrWwgMgqsDrlkVXK5fFX9RuLtbmmciK7F3gHrdEeUmv81MhUCoXDVV5zm7IswZ55STknEo53MDRDAtW0aOcqd8CYdQsdzMHxH5Hq+2E9CVbiO5lnLncJ7neZ4LlZS9unIrL+ccY3x+fn737l3O+fX1NeesxfOYuFz/Xb/q98lwK8VaMgVAdD0KbmnFr155hSScCO5Wj8LrgrCTZSUkA4Byta+4Vrb91p28XSPmnuDrTrDLqiznfDqdlgyBdaQ3ezA33LuVBWgrrfihZlWMEgZpwQmIKAxxjEMJkWqeI7Mw55xKXNlIrk+DqqpJiIMImRnM2dSdhdmZsyI7SkuB0pKa0sjduFBdZUC31TdgYU+dVXUJ0DrrnNRS1mygMFzO0+s0ZxCF4BQcwUmApO7lPE8k0hKBCSEEyyYCXrIJFnnK7rnMNbGLELOEwBAaKUwZjByIAyk54MrONZnPW6TYgwp1gto6dH9tZrexvDobd6vgt8TW0UBbuW5etJDUBtvf3be79keHmSqjWzgfjLRrvxvp9+Yhb3cKsWG3reX0zQl9Y9m2I8sFln1D/1c2jhVXZQWoqvOcDodyQqjn87n6/C8n7Uuxw2Fcwq+FwFzCoEvZSfdy8cBU19Q47u4pK26shPWqghcq+ubMbkfR2RzY0O1u5e/Fm98aOl37v6x0pkVRkEVGlFAZrgaQuzGus79qHcDhey7Q3bjuVfg1kN8r25bf3teVtW+f1N9EIK5BcYDFz628XeI9L2iBL2mIJGRINlgQZ7iJE4OiejqdLqp+GIKBQYwQwSGlJJByoEfC4zjGOKm/GhxMIMrZsptb2a4koFxpMYCrByA7hmEQeWUtbnnFfRTUyLHtwH996SgTDSXYbZiZFZ83d6UeN4VGqlNj/b5FhW3p4ZeVq1V5uxP9QDe1wLdCoJUV289r/YYWy5cMkF9TXjWIIiMi8sSOg9DffXr/f/z4h//jH/7w+6fRvvw0DoRg5nmGsYomvUzLtSyDny7Tl9PlcplBUdUvc/7zn/41Z5eA9++fPn/++P7D83GIIiQxEBFKtHN3d2RFzjZd8pS0BPdSNyIKIbDI+WzJcXFckp6m+TKniYSsjGJFKYjIiURqiuo9lOLWLKxj76q1H1bk1A9LzM/2yUJFJZNd69dWsPqQWjry8+ZqTAtDB39b/5690Y6ufmVmYdt6KdeIHbfjL3+WQB1lOL7cNWIBAIapO5I6ppyycZqYWYR4pCAhMDFKYAsC3KHFEhAhYgEojkcADiWnwEQwJgqjEHsMFEUkEPuymFZVtaRKDLiX5PVmRt7khFgPvpftYdPsrsRCLiA3IBvNWf/y5fTldJlTNhqIiqHugRx54msovJtS0LqkffUrDqs06SZm+y3uuMx1k1dP0rykQT+OIjKO47t37wCUFaOqLtvYZsVZtEjCrUDcNewqWbfU09JTvYSJW5aoW7y4XQq6ezG8ajKJEELxq2yZrTa+CyRw18AtUZ6KCVc243POZfG5xEPixXG8EELbDBHxeuGtJDltuRfAdRHZ+DVdV7lrGCh3OCGEEMchTbMTeEmLjRhjICai8zR/eX3910Gehw+fn54cec7pMB7hRqpuRkJsJEYxhiEGcWNmuDmzmxUH9Epo5fpiKTnnrBkmsDVfqLvBlaEOEzrN6WWaZxNIMKWUnXi5MkBEAC9ZJSgU1bMihMlRrhcuuxVkS5qJIUQJIQRzNjJiZzjDhVngrJ5hCNgt99TkPVuhJcgtN20babmvrdnd3cUt4XUt1Bmvb1sBuDsEb5ZwW/C2vTS7DVcuq+uETpfgob7o1H+dvhYP99DegrTFxrZx7Amxrv727Tc/QTPRyxC8l0UdPLjFD/YQfm/G3R1YYikRgVZX/LL2c9cqbEMI4zgy8zSlEIZxHEWkeIOYWU465cWt1Ay6cp+7jyW22a01tvy+h4g7pVMo9962A3+A7Xuf+2YnomKsGwLdxr67t8V5/eoOtLSauWjm6x7k93p5TFrbOlsabhlkyywP5FKHqMfAdLRaH27lRkGz0GJsdEzpqy4TYY7MS7YvxEFcghI5C5iyz1k9EDv4MiWQHA6HMAxgMUc291khyDnnObEcjsfD4XBZAVgczMxAJMxBWNynYlPfis2StQwN5Msyd0/ivWmyutJ6DOGWIHdLK6ZKaVmv1qlQdQ3Wb7f9fhfYf6vSKj5q7Cvc4fQq/DvIHyBh+aplL2elxc+neBDjGlBAmcoD/TAMf3z+/L99/vG//PZ3v39+Ovic8qSBYDRZmt1NT2mm6ZJhdNKv8zxfLmmeclZSTVPKKek8ewh49+75hx8+ff7h/fF4YIG7DhGmrprVkZPNc7pc5nmeL+d8SfM0Teq5HD2oppznZIfkeEnz18t0mucMclBSY4kAAHYvJs8SKcfRJ2SvSK4/0NDbPZmwvcu3/mAsMaRvtBIzXxeoTcwYwtXC3CWDbo+j0kY7m1thUt+2XipbMmgZs1QL7R0q3PJMHXCBoGydBvbVVje38hwALGUXESZ2IrCDDESQrDQnH5INIUgMMYprmufZxGwJrJQZgdgFBNCH5+fL5ZKzEpMw1GYhjoNM53MWRHWHuJRNDNWcUJwUyIQhEuIg5CgX7oiIKXiRpc5wJkiMEVwuq5ICc9bXi3496b/8+cuXl3lWyBglxjllBgFs6yQb1stdDrQCpVgw150HL8hzv67E3Ptj2OtffrNga2Z6kf5EbEaqWnKBhhAOa8ASrM6i5eSQX15yzm40DCGEYIvjibbzWDsq51ct9bd16u/WC7REOq1v65lJJdmaG6MIppINqZSySGvDQ9czTF5D37YibIGE4HqDlhZRqsq0BMasly2LbS0ixGwG1f7DZVzXPLCdAbf0FcNYKX8ZL5mTCubiUGFmhmVXMoQwnS8hBAJpmgEcxmEYhkD8cpm+nPMgpx8+PKuMZ4OZfnr/bnr5knOCGQcQSYREskPxD2UGnIisnDV4CRqk7q5uuRw+pjRrVtUYDyKC7M4UjTUnFrDionTJepphDApSxCsHMjcncoYXP2RH4GUeyxSUfWY1JygzK0p+QjCzEBXCM0dUDpSFOAoG90E8u/0C7fng5LxSpt+6zu8q6a083SrOzrzYftIJRzRicBfI1typv98CYW3cmz2U3S7uFW9K2wXfglErfNNDoZP8bcv+tjVh/XyHWzcfdWBXHPqdcO1dm/emuwMYm0kv4aDdvfiEz/NcWPh0OsUYh2EYx+u9eiJ6eve+7D2VGDOmbmbJdA07vs5+XYeDqElmU/VxjPHR4v5haQWUbYIP3UPIN9vE7WqwQ+CWfX4B2G2DtYty3Fre7sY2eGPxPTPoe5vajn0L/N+kPJ61Vj8Lkfui92pFhQuWsAKDhFD2MZ3d5uSejJSCCxMHCWOMomrJs7qzRJLohGRqs40kQjw7VJUDHY/j++fn4ziwXsyXQwwQnAAJFALAblSyZ4Cp7BvW7dRlQkG07iniVvD+AslWSl3bVPJo5Vg3ZR0BvJGi6NbOxmp/d4zwuJHdAf6CUe8C077F7Y5MBydt0q5sv91Fi7dbIYsPI+BqZAwmcoKRO8gFYObo+O1h/I+///E//fj7H4YDzWf1FIdwzq8QOnmek5qHPJlewJCTTS9fX+c5Mwd4eDmdT6eLqj49jYfD8PHTu0+fP7x//xwiSpjTgTkhZU2aPCed5zRdzufLnLPN86yqIhyHQIxpmi6XC8nzpHqa8tfLPLkhjsRhOeq5KesVXHYiVNSaWTkOXfTjcgBBWOx6X8+ubvBJ60lg6xa3QS+vp0Xui3sIHBkNUZkZ3xqZ2wldpmmtUF3rt6q/n9mNDVAN2tpCKz/L29CBUiNGppTKAmDdTyUqdjb6NSVAzJwJvESaAcyJAGcwFOwm05xFeBhCVnfDOBxDCEMM7m45u6YYhYKA+HI5uXsIzOSAm6uaIukwBmawwDxPr7OIDMMQQhjGBWAmYljOmUHMATBTaJ7d4ShBQdiI3YRFkkl2V+A8X/7Xn778j7++zM4yPB8CEoIaogSPMmueZ2ciZyprwsWJxReXTqxJ/K6GV5ObsmK53pHoCAuAqbZ7AHV1ZGYxRqx5J7GayMw8juP5fC65knPOx+NxmqYSd1REfBUKzNcMJ5UIqofYOI62RihdZnYlgwozNuK43SGuT1rbvZBQQU5LwViXHFsadXfgmgemViAiYPFcrdqlEvEyRuFpmshRsneUVS4zp5TcjFkClRwSYmvvZfEc1sQkZsslwBseK5hcMdO6UYkIQVQ1mzLL8XiMMeacX88nVxPGOMZDHErCejLXwCz8Mvt///PLGId/+t2n4+Hwr19fovn753cMT5dU3JcFiGSZqLi8hrAEEwKsLHerK6w7JVMiOh6PnmDCcXyWNP/8+jWDlDC5/fmnn18vszEykKfZfOQg2Wq+emZmd3Ja0r0QscTBzZPm5bibaE45zjpIeDoehyHOl4u6RY5weFYij0JiJEaMXNavYNnKqXsqtpVTrVSphFHv0NY6leqo2TzbKuCWgMuH1TTvPuyIqrbQboW0NNwOyjc7yu3nLZ1XMG73iW6Waq10bsQ933LKzRg79LrfLMpbaNty0876c4v/7dyh4dA6qA5jLUqZubj3wHswdqBS870Tzlb6tWB0ZNY1WOTnPKXa1DAMpbUQQiWGRRqEUG8CF1FQ/BpUdXp5MbOSZKL0W3Zp1jtLaziZNUSNp8x8BaaIx5bSvjkjb3y7S/MtZlp+wWZ+W0g67vONdVJXoe3Day/YL52yKJgvuYjK9YeCZKxR77ZCAHtM0eFwO8BdOLccWuV5104t1SCrkwhA6EYtbrvrO7XrhmkdoK2eCymlQjarZWmmS9RxQ0mnK2VHVSQE4qWewxZnoASml/Ocpkugp+f4/PTufT7JdH6VOB4kCvnpciGid8cI5wwlcwAl6LdSmIqLBzm5aZ4lDJFlSV+llsjCyMzBnNXcvUhOTnkOIR6PT3Y6Z89xiCsmr5PYzlR1uW9ns6vWTVPHMnU6Wrndclm3rVxbaBeQW6Lq6KTMSKuPqhi/R+ZVXnVWUDfeDi0dMN2g2sotF7d8XRcGtWbRla0nVzs0Iir2HjXxEVQ1w0QEIFWFZxEhwNwPx0HnSXMWcjJjIEQ5xPE//v43f//b3/2H3/z4w+EYVUMQhieelcM5TxdTNWhSu5ieHTm98kwygnGZc07pp5++TBOen8PT0+GH33z+8cfP794fQiRmW4IWpBQjmcl0ejmfZ1UnohjCNL2yW4wSIjswTZNZPhyGkw9fX7/8fJmUGCLZPWsmiRzE3cm87NVZWeI1VnSLPVmjgrVMXerkZh470YrVNmhn+YbNDe4oGv9yufAam7BIlIUv9GrJtOQRQqhTXK3TNltBJ5C35LSGHbkh15Y76udlpV0ONsKuH2DbbkvQvoTaVyIyghc0r2lVS2QVq2sHh2fMcA6qANEUo5iD4cweaTEcywjNM1IR0LG4PwgxyIJzCDJEcdcoVFIXVCELwCwzc+AgUpILFkQoUbnvTg4uMWWcBC7JhGXMzq/n+S8/5T//dfpypuwHdSR3NTfA2YtLRgRrWfi5Gxwtm9lNhHcKXFwmyk3RTrtQc57WzU09dussHr76JV5lRF0GF8KqcQtoswqt7bTh+Kkp3gQa6rYiWmKoNFBN87aa3+4xtG87DLTmePcJEQF3t2mJboFZ7in4+XJ+enqKMXjOUcIwDDnnl5eXQtyL7iqOzUzMoeTiJPYldiuhoq6q5KssJmrj37QjUlVhDiEMcHcys9PpVFmXec0YWa5/kDsPYXzCGH+6pP/2f//pcpr+6Q+f/vDuHdukBJsv5aKsSMzq6Xymp8OaQ1bdCsEvPqLlbl/O2ZkOh0PKWdXJl0uNBlJiNZuSfjlPl+yzwQgggUu590iuxFKc83NWg8E1SUjZMBIROZMZCG4lepS7GpxJwhCELStMk2bN5r4EuiEHo5zGOzv1G3Ob0lLaFeH3qzWUsNBqNU/bnZQt8bRzt222VQCd3Ove3iHLG/3RNrvl/ds/b9Jj1GJWUtpSbRL3vKX3RnfttOmtBXJrkC0/rB/jKlf3o5m1w9ziqu1i+eT+ILqAKm8a7ZsLr6UK3oWd6YaWyjqQmT98+FCYa57nnLPqslmmdP2WiIiJr04+UluuKWjI3PwGD7vivUfFnRvU29JWe0Dz3yxbif3L2ukFfdN+K+1bKi3u/UWmbWFoiYqZdc81y2/vnLd/fnO838RwCw/t3f7YbaSttitwti1Qc0PMlsQkFmWxUEcRjiHIUOjTzNjJ3C1nqJmZq7n7KTtpcsGcVN1AMQwRNuY5ufuUFZiiSB7jOIbAo06XYrXJcnXTI2EM8jREd4ZQQAhmxOxMLqzqRgQRdWhWMwZYJOZpLiZEiRJfB7uL3hYz3Xzdm7W2nVZTbDlld4K2k9j+Wb/qesEauwG3a8uOuh4rrMcEuf12d1AdVFs6rKh+oHp2e+8wTyIoi96KTzLAptNFmEZBAIQwRP708d3Hjx//82/ffX5+/3EQ0ZRzdjMSU/jXNGf2yXB6vUwvSU+GCWI8v/M0z6fTeZpmTRnAp0/jbz5//t3vfnx+Pr57fxhGIVKHlqs/TCWUHuIQYtZ8mlLK85zMTEQoiARSK4tYU/hLyifNs2MmZGKDOxEI2RTm9aaQkcGWCJAdbgtiu7RVtUgTQXpp6nbT4d50N8QjRJ3dK0vgqEaSdwqi7JS181Wq1cVhfVIJoyXXquBqhdp7JzzLOrD8iDGG9vSjtNI6Ed1jb3d3ukJmpkR8HVIxPkHJfTLyZNlUnSSweRgjD7jqyMAAyHyJNmmUWQBwSfpNbr54BV4HieDMxdPP5/l0vXcrwkQOYfZ5ymAnMEBOTI6scFjyER4uM//15fIv/zr/6af0Zabk45zdXNWt5O52UAAJi0lQ1dky3IlvTtjLAviK/WLP0X6QnhIMpiPEgqo6tkoftA64PQkpE1ZOAsuWagk5UxbJRbvEGAmyhFciMbPmBvuNrVZ3Slo5WyHcmi8dFXavWmrrBril2q3a6IRd+xy33qQldjARffz48XK5/PWvr5Hx6bcfyzEdMy8XCK+n+deMIwBYsGy4Uj3bDO2m2mI+3u7btSJgpTSOVJIyoPhwjuMI8xC4tO8wUnJ3PhxIwiXZeU4TY4zz+/d6jPIEgZiQHDgMwyGEwNnc6WIWQyCilIqDBLnhMk/zPIuImilIhIdxpKyn0+kwxvM8TTlNKWejDEnul4TXSS8Z2QBhR8lu7FBjZvagxaaAuUNVk6kRwOQkYMua3JA9u/uTi5b7giQOVsumOWdzKSlMjdiJEYSYWR6bvRs6xJ3w3221rRRqVWD9v13wtMTTPcTGuLz3qjbSqeq203uN7FoS1IiObalSe1fkdjW3f1572ZxYtvjBrRAArneT7rXciab2+RYD7Sf3mPqbo9sC8wvawQYJ1DjodqNw93L8nlIuB4NXSci7A+EazruVmUQUWcohfLuF342o7Zr2bEH6lm13j7bfXrby3Nf92V/WYFd2ISyYrLeg2yPEtg41Jeed+/sd6rBHkw8A63j5gepp1dbyY7Nn9IDOadmVht+6EtDthqyvSxGRMBTPNSkH1wOJYNWAaqrZTLUElC6KPauTmzgumqak74YhhkFGqDqpu5c4buUqR+QgsCAsgRhsZA7md+P4w7unv075ktRUy1ZmdsvqGjDNxq5wS9nTnF1zweB5SlkXPetMS3IrumIVt3TVDvzB7OxWaKXiI1TvKYuOtHZtmHZCq7Ts7rDca7NtAbeUuf1qy+O7An9Xiz0eflfavlq7qyx3W6XmZOpLVgnQElZf3IYxIGdSFcfTiB8/Pf/x73//+x9//KPYEMYBrnlOKS9pksm+XjIizcm+fL28/HTCzBExuJxO+XQ6pXnOObvi+Xn8+z/8/vPnj58+fxiCDEMgdiIu17rM5HSaUkrTlM5TOp/nl9fz5TJndQohhnENOZMZUR2a05fL5ZTyxUxFMkHBRmCCqVGJULKk8HIAbi7c4/abemq7l9rpjlaGtwRWH5Ywgc2fjPXaUdVKdepLWT1QiG5NCLvdIKt6trzqJOquPCwPy0l4V8fMgjeXWCoz1GRN7aiWnurJXrP3WQ7l2gEzczFCkzIr1N3dTmNmhkh0sIEdJaNgIIIbMfka4QNEcHfV5CVmkUsgymaSc5ABYDgTMa0YUdUMcvfIxedH5mRwBrGTEMicoG6mMx3nZD99vfzpL+mnF7ycw+RDZs6ejJTFLKesyk7CQYSTibuLUsZVJzj2jFS6ZpmsE9maIK2uvZLgbUqQbjuZ1hW8ry43RcYXnM/znFKapklVy0IxhMAUVjezssixHs71h99uSxRoS1TSWlqBtez4uxNVgkMNXLwl0C0htuTUKdROUq9s4CW6WiVRWXH18vIyDMPz83EQGcdxnud5vhQjbNnUIQJo9QRfcrvjCjmV+KjFU6mdqaIKCqr91jphZgbXBCQl7UQ5ZwghkEOEQhBmritINs05k0ONHf7Xi/9//vx6fjn//acjfxg/H0eGqwR1UAhDHC+XizUIrPkn1YyYKcghDsRsBHGEIeZZpzSd5jwpzEnGp8g+ajz96SUlqAEEK0FMCcSrCgc5la0Wd0I2nXNidWZycMljaUbqmjTMSeeUA7E6zGCAwl21eicKiBlBWDee+1sa2KXDSgktFXU10YgjX3cKthV2lfcOt96SZX3Y6oatnuhkKH3Lvtl2fa9+S//bXrYfvvHE6XFf3/xqq/N2cdLNYBViZmZMBHzXIqObxG9ieLcUFdaqzwJVmudq7ZUfZXFyOp3RzO8VgGWXo85Ogce6E9QbzzTsCMAqSdrhEC03zruHbxzj22tuy73Z/AXt3GuiBe5qP6hVVDIIJamCu1AT3dS93IWl+2OkO1cf79Xfyord+p3Y6dQT0U6Qs119d/2Tbh6uYFyX36VmCWI0DAF5NoOtii+vobmXMZi5GgB2EBGDlImNFJZmvcxpPsQgTCIhBJi7a4mxnnO+pBlADMLMTMIGzmCi5yi/eX73P356/XqZNXvZoVe3SXPyUGhf0zwl1zlBL46c3KYpqaqhhMGu5KQdElrNjkYa74r37qtdUdw+vzVOeuu2VGtdkd9CKtWVqW2tA2a3bDVXW2p3W05vh7PVgPfg3FJdp0HuKa86RmouTBIc7AImWCAwWXSHqzjej/j9Dx/+8Q+/+7sfP3/88HT4+ldKph5KMK2cbJpytnROmYTP8/RySklZQsw8ZqOvX386ny8h8OFwCEy//eHzjz/+5uk4BnEJANQVTlZckdyRjV/P6eX0Ok3zZUrTnDIgQyQWRFGCmTuJM6tjmtJLShezRKTkSnAiKzKJrDBIydzCRF5WIXt5kB4I0ro5WP3IquLoMN/ivCWhonyIvOqddiKqnVlPaOr5VndKURoqUfrrGUZtqgJWu+766obZMkXlFABXaNrjxZYh20ZbPiGisvgGzNgJWGIWlm4IxSXIiLILNAN+mZIIicgYcgiR3OHldqZi+dBYihclgUhczLHa3CHnbAqFqzrDRBigMEResrWRuyfTZM6eicg5AMIiBHHiQnFfFV+/pj/95fSXL3qaY+bBDNmhxCzOlIv0NDWJPHAQYivYL0Eli0fwJhZ2QRejp6yKyZpuofuwntBVbOP2zkYZfvXYNLMStLOuM4vIKzEPcs7CtHZrZiZyIyg7MXFz+HZLWC3d0O25cffJluC8iUHX0g9tfN/bIWMrgsmEbn3i13QL5bZPTtOsy226lKavX78SheIH5oA7+ZI3cyF9s3UihKsrTkvSBQMEzPNcEVLZT1jKFaJ1CFxUegle70v07ZKidxEil9PXw3ikYSSSc8r/8tPX0+n0rxEh/N3hcHh6OkqgTLCcTW0mK00BJiEafJ6TuYU4PodgBJHIIrPm8/kyzZO7v7z8fJmSEcXhicIY+DBdEs42J8sGdYaqASTMDGHTLE4OAkiEF3LW7KfzFAgDszABAW7qUOeUbUo+Jz8MxGEgh1suZykrkTjgzOAt9d8hj06NbaXNrjLbPrS9ABv3iPMeSLXHlgHv0XYHw1ZCtg+3NbfPd4FvAbtXpx1792PbxbapOl7GvmTY1n8ANprMHK2acV9Yz4C3LAvJb5cRv6K0wnYztKu+LNzauHyvosZ5kXiyhCC4weE6OrotAHJK3jizVMHiq1XxmMzeMq7d39+coK5sF0h/q7PBb/bbIodW5/xWX6AZTvf8F5e3oLrtomXYb3bd6bib7qifa3cnoMTcjjEurivMwzDEKFkVUFebcy5qd14XhOVGzHLB20FEBriMEHe3Oad5nlOKzoMwD8NADnf1rDnnOadRxcSUiNwGAsxhzqSj8Mfj8cPh8NPpomZCJCwGOMGESMWyXpJNydQSqVmeTppS4++rbkywuzkpH6FrF7d0G8+2/XDLd9tvdwXg9kdtp61Z+aI90d1qpXtgf1O831MElWzuDepBs10X2BMvnSCqUyCF/LxYSMoGIRuIfEqfDvybD+/+8Pn9H3748MP756fA4fIayJJq1lktJMU052lKU0pZZzVLKWVjiiFzfJ3z63mav56gPgzD89P7d8/j73784dPHZ5Adh2EYgwjlnOc5l3j0pjjP+fUyfznN8zxndXVAgscwDEcDzdnUDCQAzppfk89qJW6CEoHJyp6d2XomQIRlCwZOWwG3i+3H6L0nutcJlbLPvumCSnTASnj3GvfbvQNvjkPsNkT/diydtCx+tn5rz5dvy7JzdaNbPqESVKYOr4JCzSZNx71XJ0Yr6RbN3cmcY4DA3Ut60nJUQETqgQBygeolO50nuDG5MAeWWJa8nlnAAph7ic9JxAwPwk5UAoAIk5KzuJNmaEmXCTHVciQUQiASV1O1rM4cinVPELDAKLtp9r98TX/96fVff55ezzJjdIkOqGVmds4o0RSZ3VTcycHMEcHdTQFVww411LmxchZ5a4s0tlFLH2gR3s66r5dHK/eaWfGHLH8Wr8gyx+U0TETO53M5TSoMYGbA9bbVLt23xN1cZdQW4E6AbgmxtLrtqKOoWlon1fpkl75LyTmXez5YzqKXuz3Px/H19XWI8uPvfvfx48fiPTsMwyVZOa8zdzMELjlKOBSXXbqqmVLa3utDXXeAWiOpgKxmQNkiYAN7zT9W9CMT05I4orgQH0g9n7MZwjEOB2aePP98vvzzX77CNef823fHT++OLOOss81+FE05sVuxsFWdCCJCIeSczT1rPk+Xl5eXaZ6ZmQTDcQjjEcPzxcOXk359ufyvf/2SlcylnAm6mcCEiMhUASZnd4eRg6CG2TMuOgojhMACuDIcTE5Jfc6akrqTSCiR9tWQtd2OKuZdJhiw74vfGa8tC1QivEcDW0LqKm+/bUXZfdl9d+3kGwvv3uet7N7W6ST7bmtbSFqE7Iqae4hyv4Y73vDp/uKha6gVU/fa2YoyWndSOsiZeTdhD9BfICS/AeUBYt9YykVrvb1D6FdTcvmxCj2/cc3wklwNzJxuLz9foWrolpZ93OzuMUZrZqx1D/FGgW6nezv2x+W77Jg3NvWYB7+7zdsxthqh5crHHEr8iL8eEHlXtsvdLdq/OaJ2UK3KeNA7NWsbum480TAMwzCU1CbljLqkHPaUFpcr3GBs3bMoncLL/+5GURhuNCOfpymlwYcIoiFEV5tn5JzdXFNYlB2h5DIK7OTKoKMMn47h09PTn76+TvnCCO6aUjrn+ajxCEmKpMjmrkxOySylTBSo+F1hSVQAlDXhlSk6Ed0i+THS7s1Iq6k7gdYpkbYmbim8g61rqu5NVMnW9fhGJdVJ/k6YPya2t7BhJ3txQ11XeHCL4R3RbQ5zJ4OD1NhxGOU5xh9/+/n3n97//Q8fPh7j6DlaHlIWQhg4qSXNl5SmhMtk8yV5dk2ec1I3jqJDfNH859PXv768/JgwjsPx8PT8/Pz58/tPnz8cjoPrzGLCxsQAVFVnnZKa2b9+vZzO+ZJcjSmwDAOBjZjGUdVUU4I4OCc/TX6ekdxKLB1ncnfwcjZlRGRwWu7kk8OWfC43Fh32aLUtdV+AmhsH7jdBENuyzjIT9Scou4eT3Zxi1RGdvKqTWO86tlIF6/lePb2okJdo2Bvwdk7Cy/+hvdjdSa6qStuBmfkSoLWcGuF6Llkwu+SGQyYiJlbiQGIwgOacCQbPYsaO90/HEALc4STCQWCWAdiSS76AsSyHAJgqkQgXN2goQ2iJTuYBWO+KFW3NzA4BYCDPPuU0Tzml9NPP+vKql8RGB6ID2QB2RjIQHFknd4osCMwgT4kDE9HAZKlMlV572ZvO1vcXGw7cKr9av22nEys10Ovi9ZSSrffyy3wvsXauc3dtp7pMbLuuE9cewdUj8k5+dSeHLY36WuoF3G9K+U68tiTU4dTcKh+amTuVIHWn16+fPn36z//bf/rxhx/++f/+7//8z/9slg+Hw5TnsiDUxdGLSRhYDpCLN3kpZWeaKLbjKr9LBBper262qPPGpHN4jesTVpKo52blltG7gc+zTnohinx4jnH0NKWc/9fPL56m/4e7/1pyZEkSBUElZu4gEZHssCJdTWR2dvZp//8TVlZkP2BuyzS71VV1aGZEAHA3M1XdB3U3GNyByMhT1XfujMmRPAjA3aiacjI8P32833774d1+u5sGygMACKhJMXH3ADRCFctFh5xyzlmKeVQq4b7bZ7WCcRR9Phx/ehx/+vnxl18+iZERMDEQihQid9TXuXyoqZqXzTAwAUtFQRFMI/EUi01ISJb0NKbH52MgjAikRSXnnEXOOwloWA25r1YSX57zhUQB14hZ/X5B4VpAWhO/zzKC0AAhXN6F1zy/WMItunL1lbYtbEfrla7neX2GlyO0aGH9sLky89qsrn6J1ySZSizq1tXviUi0oS+vMBJeZbw+yx5dnWptdtkqLsRGpep00MxcGmx8cpbTq5PEZiCYGXRmBjjzDXXmVSCky4zTVwH+Netdn+wXtav2wF+xz7eaIxa4LTJVrA5NltH1M/5q7aR9pp3tZzdhQW5q/3gD/hcdngkoXumqfrO+ZTSbJRZz9v13yu5R6KpqJigzHwlTgjo9w5t6j76zXuPBY/4ErOYUyF2kwDFEIgpIhcxqKGFJoe8VdOaCAcy6wLs+vNnvt/HT4zCAlmJ6GofD8bgJFIzEi29ZMCgIiMDEkTkwAWIxc4HQiM4ZBW+xAV8Eq2tkvmCT4BKKcKVBewGe1+yQP18d9tbQtRhlvdKr6/0sdF2lI3gp2a5B69ZOLsBswbpAo5GZHlAxM1JAAEbcxfDh4eGr+7t/+u037/ebd5vI+SjHE6XEASLToZRjSsfRkoRUeDhJPgkWQNE0iliR3o5JfszHn4bDAfR33fb9+3cf3t2/edi/fbvfbHpEJUYwyVnNLKWcBhnGPI4p53xKoBgpGnfYbTf9ZkNEWeVwGoqZACvRmORwGJ6fh+NYMgUF85pshqg4UUBf4ow2FGbVEiLBlyO6qi5cbP6Lp8C1fvlVdx64VKEuCHpLthY8SUtZFtCiM+O6UGq0o9ulTNhKeYgYqk8qXILRYk6XOHQOhgaZ84mrZ/s8z9gT2xOZkddt8HImpWhSGRAIdBPDJkQfLsbYRcoZKAQAQwIiMDM1VZVSNOcMigiRIniMHAIj0abf+GQmv14jmHaKAZxTkZJ1SOl0HFMqxyEkReKesRfrVNkQuSMRKWUoWRA0MiMBipZSMHp1e+x02itAZKSspd3l2qpv54LY1A+t7GFmpMbMDqeIkwOxzek5zAwNVM3NPR0Hdxl17G9m7nOic2vPakH8FrC7UDDoWRG+BCNsmHW81HciopvHWrX3Ggm2f65dlcwMQNu6F+efALz2hpM0M+vi5v7+frfbffW//i/v37/fbfv//m//9q//8i+bPvb99i9/+X5798Czy6iq4uyCNgXRzgJh5VO5uZPMTExm5m7orSjou2rTPkz5XXUuCeAOP2aGpjQpcU1AUVFl7Lnn0I9Az8/PRQ6BcUeQlY6pQB6Oh+fT6fTu4c3d3d3d3YOHgwJASklVY4yxYzMbx7GIpJRO48DMd3d33EUR0ePjKY/Pw/jxOf/4nD+dZDiJAakZIZEHYYMRA6KhKTMbgoGpioLS7OipqgqYS1FSRKPAhJ7mHErOT09PKKVniAiBSaSoXhX9bga2Xb0LcHl3rgLPLbitgL5Wp62HhktyuPjQ/uuRpYsh1hNb/LRYVHsRWnr8Ah2aMOoNNdO6VYy9mD/Rdc3l1S/Xi7o1sTqTVphpd3IudXNRRffW5JdD/M3EkHNrNaAwU1wz0zkotyWo7TMI5xTkZsZTyl9tHX4qnNiZu5r0euM4ahMhVgtalNkD8OqeXJLXlzZtcXfgxql9trVg2X545ZG9fojFKXiSV2hYed+3Rbr/2iodrB0uVl137OXdWFyTq7io/eYW9K6RxvrdyxeuzgSdjjuSn1WTSDQHtePEVKmqmAFAiFSRKyISAwMiYjEiBFRUUZctSynBVIkRMYQQJJiImaSUBoR+G4KqGKigaAYzJGGOd7t9HwMjAqIKeLrdIaetsXFEiEDqNwA5BLTAgYIiipMB55HQrgPwLeR2q7UP4zUBrN1MB6RzZsEGsF8YtFpasBG9cJV2pZ38C/j5lbdmvTmLb9b9tJNZTGzxbsU5i+mtr2E7EBF5PGoADERv7vbffvXhd19/iGRsajlhlmjGhFSyJPlx/DRmzRYUN0lCzpBS4Wwgmg7DqHkc8RHLT3l8ZrANb2H3/v1XX314s9uG+4dNx6qSADUEdqvGOI7jKMOQjsdxGIanHFyhhgwRqNtuQhej2C+Ph1RMkQ0wFX0+jk+nsRSVXj37uto5QZq2Ovy5EDdcKp0/S/i8VWe9dj8XYHb1RUSEOSX+1fNtL8X6HCcOc/bHXDgwe8N5Re0Ry1zZ9YVraGZd19W0XtZoVUKZEAdFYjXP/VUYyIuJl1IIESm4JxgRhTBdoQI6x0yZGQGAlOlGcQjOi5MR4ifIKAgGVEogCh2FJJQsQEE1u+tDgCI6lGSMFpRcQEolq6qhFpXjqCGE3W43iB0PQ6SoYkXymze7HSAgEVJRHUUNCGOP/fY52Vi4lFAspEzPh3I4jimVZ/hNItWoBmSGTNLngio8lDBKMIbYY7DMOZsUM+Z3OWe1UgyLjAAU2EwTWak15RGRKEwuy0wioqLM3HFEREfTYRZ4uAbtAACisM3ur4CAOivvHNGqKSEhEzApwljykEtKoqp9v/N4M0Q0IzMCxUDR3GAVgkhhpgLgVY7MTFWqPGaTTxc6PMzgcsGdIZyZzplyT3F3TtUA0DtvlQ21fGV7i2onMfaXeM05JDIDMyBCZnRxFwBCCIgFEbu+e/PNh+++++79+/ellKenJwD4zz/9xy+//PL09JRLHkF6sPuvv85mwihaRExNEU2LM4EKMGV/AQQx9v6LipscKz41My3FpbJ6V6ebJhrCZt6Kybm33kmveOG+QACQSxmyDHhHRr3FwLC1VCRTMYpx1PhTCU9hFwT+4yeJj0/v39Lbt93/Y8dcIDLHsN11wQieh+fD4fk0HAAgdN3DV19x3xexT8fD6TT+rL9/Hg6fnp6fDuMwluc0PqXTUURADTAnURUEk+xbTR5TiUiBCN0SKGagSP1oOhQDkEjMipgR0ZAUiZ8NH08lcuhDjDEy7bJmJBK2ZHksuUgxECIImoozx8A2VelxH2yzSU0zxV3iJGBkaJiwdsOvIlxHjACGCHTOGFaz6d5kGur5rgnkBX7HJdqdEbQiTqqwtl+EszatxeAtFW8xcv2pHb1daaU3/mfRs4XfZl2bmak224Lg1M5DumnFpiCiqhIgrPazpRztzl9o95qFrC2o87mccUW7hC6EWVVnAOcszVWXCQDigoF7eMCSNMKKQWx/arb6fCvrrz60I6hSSoyROapmc78isFQcys2Rt9J5m903AUdwWyGgEzUABCSMIU62nSSqeVbLRjGJYcoFPdl/VFVgHBNWvxtF8thEQ6TrbOICNuqXNSakbjU1BYquQtTioBcbeAYif0uvODYhoNfLWtwdRCRcllPCRsM4TQ8ADIhm4odmYIBes9dUxdQ8jMma2k5Tn+pU5yylq0xlrurcJtWUP3+DM7+Yc2vK1hVD5t3a1JPbBGeAWOqe6lm0iiRoUYoeGIGQwIoZEpgxInK1SKsqqjCiMwgSu5JVBYgCAqEZqSAoq5okROm6Lkxepmpmb+/EpCCUd+/3v/3wgbUc0gHDpuuiQhIWRStgKgFzp9TfnUoMZJqTGQZQMqEiWDYdvdvdffx0PA4ZicZCH0fqYdNtKIBGPQR5NhvMgDf3tv3u+4R0OuTxxxCIAVMeIIAQUjm7Xrd4tYXJCofYlCle7tuS8cDKtLQ/tciqVeA6Hm4FxVvTgAZbVgiBS5xT4a1VYdSB4MaFvfpN62N19bFW4b52A6nf1JLLLbFoKVe7TFwJHvWB3N+B5nJ4etPhP/z2u9999ebdht/snllSMJMCIGZF4JQtFRMIEkRElAYrB7PHbEfCtAkfnw8pWM7CWUBLR/BPm+3bh/u/f7i7v7N9n3dBccwQqSNFAxIbT+Onp9PT82koWASOAw4n7k6YrIQthRjHMj49f9xv+2AMT596iYbd0cLxVH7M8Im3tgmaT4SEiBHRSgWtcAYhJABPpw4AoFIh8+wqBZfJh6aTnQgWKEyOgTAhSK1GSCIiPmdQMzNCnoZFqjDjrnxE5MURAMDhCIDMzlnxsUn+74alBUXDxh7TSomVzLXw4E96IUecayxV8PBQLK994GP5EFNFO4bzRrT74nUqW5heF3Jdk+HFNRCYqCsiAqCqicg46inAhi0oYxBiIzRDU8sRwQxVtaggWgghBMhjUSlkFAL3MZqAiEoehQMoZANRNGCFABpAmDmSYBE7jnI8leOpjElViSIF0gKkYCpFJOcygCixxm0gMSEREBBUI7XQoRZJAMJkTJBzRiJEJ8xn9+LF3SailrNZuB1Ci+z4wk1OZw9SbeJHsWl+kH3fu0lKL5utWgXKOpP2gBYIAvGmsn6e5JKs1um1kLNA3As4uYSQ6RVP+05EXReIaLvddl233W6//fD+3bt3Dw8Pqvr4+Pjf/+OPP/7446dPn7xwp6tDQgh9t9n0m67rcBYwqqTqY8QYzaaUrUSkPKEAV+EvjsbVtz7hWjV4AnLI0CBrm7Oo+wNzEWE+b7V4fqP2OgCJpEwiIrkQgc1pgR6fD7gZHu729w/7DYfnwwksg6kZht3DbrcLsT+Nw88/Pz8Pg4ddf//8/ZjTcSjDmIexjEPOOU+RUOh82GQzv8orWePYPh9WdaZFM+NgiMhIZX6mivpEJGAyaxOYOZhhFgQ0ndLyVzBYj96ii1vfvL69QIkXBBJW6v92XFtZIGtvtUF76w1a8/gteenqDNdtgUzMDBvIeWUP1mx7s//X26LPF55ccCSv6ae971fnb5dSyuIZvOTJFo+tuZ/60wLPtMeNt1vtzRqJa9Nv60DtA+M4TiJgjC16d0WSy4rVasrMm83G5gQ2rWr2lgTzAjz/T9KmJV+LdG3PdLHzX1puckGhFl/+l7YFyMEKSl0Z4SL6wvJwfstVIEAA5NoqD2rPSZBsciMGcwsbIRpZiKAKUNTMALGLzEyMKiSmKmVUxD5u3jzsd7v9dmPH4zGAffvVu2/evMnH5+FxEloCd9ijmcGQEMgQDEEAAhAwmQjMPr2AGgOplZxHRCamIuU4DKch3YXIBES03e12b+5DiAW7g3WchkAQGBUMTAISgarcPN1WcLImpg4vhZmrHxaPLZ6vD1feqb3Ci67a59fIFi49UNpzb5mrdla31nu1raexxnvYMP3tiwtMXhUQtsoVf3XcuufLCZQUQN7c7X/79v53b99/s91syfpiZEwiIkWK2ZA1iWTTYgPzqDAWGwAORR5P6eOQhiynXLTkQLDddPv97r7v3j/s3z483LPtutAzBUae1ami9nx4fj4Nnx6Px1MaC6ViY9KciwgJWKDQxQhsIHZ8OuUhJVFCSyIfh/Q8FACIHIoCXnqW3VrmYsPXB7fwsTx/v8pEsziINfW5NejVVutergnrAsDWf9YJaBOvAc0tAwCzsyLvKsRio3dAxBDwjMiswdaO42hWwtdlX9Vwt9d7MWQIoSiICqILtVN2kCyWEgwDdMZxg0pB0cBjUhkBoIgz5RbMQgiBENRrpUIMBExaiqqcsgEyIiJHZAYMBlw0jomOgx1OdhxhGGwYSYUNY0lZRARMQbOUrKckJy3SBeLAFIKJFVWzyBBANcB4yicpxVAZDAgB0MAIWRzzgFN1dFUl0lzyDs4KpHZbFNtAtikge31jF6izdiIiLnUgoptwbY5Lma2FC4vKss5k7Xxxi5aAAtw+2aLLNUZe3ElrkrW0YyGiGVx+M93D/X7vYN33ses6T8O93W7jpvv49PjHP//p06dPwzBMi4oBiELgWAGdWREEzMy8pmcXN25BJaIYvT4huoUwhKAyxW+ELuIlT1PVLe20/S7YbDdwRtCPw8M/0Gt+zAJnfauMOjkFoaJOZawEzFIiwBJKmHz8SinldDplPD7c3T88DZtIZMqkXQwh0N3d3XjCcjg9Ph0Ow5BUhiE9Hw6PQypFU5YhpSFpypKKGoCZq7bZDYN+Dossju0J6pwZFYAUjSpP7HGGSGhmqAIlIZJNEYxezEXAjBCBmJkVRMzOigyaYfsC0mwlCcAlkv1SWrt+fu7N7PJGvEAt4Bqiv05HL6/q+n4trtit5dyiTy8v7bPt5TUuOm+ROTQiU3v3P9vhgkzWadjqrCujZg1f2HZydRVXKdkL37Q2tAWlXCyzXSzMjH69wjUEYNE8HUjNxWUXcc5nW7EjnBaZWBO24RN5YT/XrS6qPvb6s170c33cG8+voWL68/LoF9f5V4BuO6Jd02j86j6/aK9uOfe2kNyeQovWLu7LlLePQc88GXvmLUUDBTUzKJqJyACP+RhCiNwZgimQIQCpaS5DiNZ3YbPZbLfbN3f3+/39ZrP78ac/SS7b3eb+/n673WJJA/Hx8Lzr+kCMzEzBIOWSbSQzG7c9s0VgIFDxLHrAzF1HBAZohICBJeUhjY+nwzdvv2bGyNJz2PfdZrM5JD0cJTBtYuiZcsmkbGiEqHoTgFqeoZ6srUSyBdatYNaey9X+28LiON/iq9O4BQNXuZr2rZafWT+zaAu4vTro4t2rKPQWSqxp9trHcMVAXv2m7XZrhc2+e7j/p6++/m5/dwcWc8JTYrNSxP/TbCJYClqxx21/SMdPh/E4lpPYKZckVlSZuY9hv4nv9ts3++3Dtnuz397tt+8odx330WLAyIigWU1ED2N5fB4eD8OYIBUZk+YiihQ7QrI+hg1HA5Ocj+P4eHgeFQz1KQ0/H8ZDUuO+I8SiJdB6qxc3vQWh1xDi9hm6VBQunlmTzvVJwSUg3TrTlnwsIH9NyNoJOK1pFQSLdbWguzAI1fnQnC9j6mVCf2jg+TfOLoXk09FV7pN2+PYM2s+qisQAnoiF3fVG1NPCUsk6pjyQ9bETg4AkasgkCKZaTA3AVCUXUNvEjn2CqqWkEDqKBGCPA4cudl3P3ClSEUzFSsFTwmHE4wlTIhFSg2JoQmK5mBiqsQFm4wKdEotnFAUAhhCB2bAgClmQP20wZypZhEw4sjKOSW2WDCfH5XOhqim9ZAtGdMld1U1cH/B1NqXZahdLPPzAS014jIpro6/T7AaU1zhoiQfR/1nmikRET3xdcVD1joNLfGQzwKzXWK9Ps64LRYPPUFXHcUwpnU6np8eP/r37DJvZ09PTp0+Ht2/vKivmPbiE5rKxmfUd1hovRMHTtDb20snoVzU059NBxClpLWJjIZz2H6kuvNoDq3m27cQ8b42hGqoqoRIRc/TQezE1Uy0ITBwQkIvomJOgPeYDPx67yLsu7PoudiUQ8mMS05TKWLIgAfIw5MOxMGlK6TiM45hTNjEwJCLKUqaqmJMQqDiJ4ktKbAjTTwgzkPg6zczEJUsAEFNQJgIwAjRRBDIAzypgDh4EzIRiNZy6gkQLd2aGqG5gX1yBNcX9FW19a9YPLICnfm6fbqnI8pqs5lyvW6uZfnlu61GguaQTDllxFV+0Py1sv+bJlg7hlwsb7W7U/bTbgoFdyoqIaHoFAK6eF66o73ncGQtdXeOtmfv/4BIJpzRWB4H5J8LJ48BUtUhpT3zSABJ2cVLb+bjjOOLsyaMmBjoTjpfCX9frvXqgf/2V+WxbgmWFltkPeQHGrdjfEiC+kvv9/Na6q8XV+yshs/1mcfcX2wstOK2W366rpcuwOgvDqAYIyDapxFy7GKbceEIIasUEUYEI9xsMgQmhFNW5ZyZ9eLvfbcL+bnN3t3v39u3d/gEAUir/9u8H1bLd9vvNNnaB93vL6WMaT+O4jR0jGU3xiiKWRY47JvSiuUhICF5OCVFsu4m7bRwEBBQIC+BhTCnLttvELqoOqWRIWIqplE3XbSP2jMckKkpkDFQMcFX4e33KV28urHBUi4ug0bksIO3W/rfAc+VcrpmSFjB2a/IvILTXt8Uq1jNc/AqXUFotDdC4peCswGpXfXUhdWM3mCznDd7fEYbkVQWznk4xxrHkUy7FUJCKci6QRf9kcjzo0yEdhySmiBwjb/t+v4m7QG92m3e7zV0fdoH3PW8ZH3oiUoSCogKkqqloKnLKckx6TJaLpWynlEQpRurvOkbadn0EymMWMUmWEgxIR9NPeXyUlIxYhYGCQquuq6QHLo/ygtu5xhtYI24t8EBrUIEVbKzxM8J1fNXu/2IO7TRaVLkA4DqKP7PQjKyB3/9tudOWTi3KUdRXwtzLtCOM5DauqVwbAEwszsXAeBl9u15S9YUds/vKT1uiCm57jqEvAmnUAaWjHAnJAiGLFhMFFQJ3WGU0RTUCJmAOwczGVBwTmjYAAQAASURBVNQQQzSzE2w62Bh0IJyLjVmkBIMoEIdShlFSYbNOjHJWEaVOApKyIYEGDIQYGJEDoIlZIQAAgyIYDIIRy9PDw67bPHw8Dj9/eswGBWn0O2kI4P+pAU3h8qpA2O5GC3PTZ0IzIC+p15xH25avXAotHq7m5kGbE65AgwQXyHTdKkJc4kS9DLE4D3rB+MLq5rTTbp9sUZiZQROXCACtj+ps4RQX6lzuur+/r97VRVRVu3771debcRwNgAjNJutWESOxzWZT86/iLJW5bZA5VnF6znl01u7XVVMtNN/8WffKYMq2ugiS9ItXa2pNE/bsuOYSIBgAMpoRqAJ6BU9VTcECImqRnLNtNyrFksYEx0J9ER40EIhIFlNVQE6ShjEXFea4jzZkGZOOAooISGZo0OARs7OYc80j2Mw8taJZfYAAUD3hHRIYFAM1VSKbEoeQAJKhIqihmAGBAtQI2HlbaKo/A9WfYaWB+xvVGYMVnp0/fhnlXlyZ+uctzTQiVs9kmAFbP5fn5mo/dikdTePahQvTAuO/3Nbk6uVnrn5fEcviw+Kxl/u3SzZugdmu4pOXp7deV5VMzKaQ7FszWZOt2o+u8KEHGDuxczSLiCGEYRgqlcXZg8CVRBcdzjnAXr9ji9muv19vl305h3prY/F1FssFcWnBYzH/X3GvF8vBy6p0X3qzFtNeDAQ3RMHFTOzyscXtgBV+qGeE1iEogCkiGXiYKgFGYpGMSEhmwkIOVzxky2UkAgTuOXRdt9/vtpvw8Ga/23ddAFXVMpyOUpIcDoc8Dnd3d28f3njiohDCdrt99sBdMCZEJooBiogaiAxJAmsItiGvbq8IYpDTMO778P7h7sen4zFlRDCEYcw/fvzUd+HNboemYzpqyoDc931XqAsUQNhkihkHILydVex2W1zDdp9bcFozFfUVa0wr7bvWKJLWPNUCHhY46oUL9cq79gLkr3vQVSLKRfTBYgnaZLNfsVhXRO4FZTm/KCMaoCZT0TGXU9ac8yBS6FD0mGEASQCj6SmXlOVfSlJV0ACRQQqhxhj3fXy72zxs44f99t222zD0CNvIfQDGoqIqJEQFMBUdcknFnof86Xk8nFJWLKJZFIiAQ+hjx2EbexMdxzLmMQskCEe1n9PwKeXEiEiWBcQiRW1Cq9ao9SpiXF/e9uElnX3F2b2mXaXFiDhp1S59Vl3+Wsyzwv/M0056EJzrYC/6b7el3Zn1drXLD+QijWcLITS0Oc/HlZW0Q+INUbV9UVUdeRMFRILJRkFghMhmlkWHBD3TGCEECoRkSUxMtXOLooCJZ7citRC5z2JDGZMBmWUpT+UhQgwawLAIAnQcegrb46fhOOjTWFQACU2ZuDNSguTxor69CoCBiajjmIZBkpKRaJFREDnG7v2evv727ZsPX/38dPznf9MfH59ECqHNNbkJJqBBg2U4fsOsXJHdrSnUtL6oVTNxJi3NnkMjk0OT+m9hEUZEfZFO3wJuvGT7EFn1XLu5lRgraLbzvAondo1Rrgus9VJqqKv/NI6567oY3RDnubkRkbpuM2UTNfMCjKWoWd5sO5fffJ7e7aSUDRERc86lFLA5xFYuaHll+BbbXk8EaWJQ7FIOh9nXtC7c/yxZAcBA1K3iRACqLq+qqZZsk1O0q2qPisybEFEInqQ8Po+RvWjV5nl4HrPEjo26QWDMRoSSTyWrmDuRsBlKlibVrdRQ6PlYz4gPsbndhKhg5ne0AgkAopqhZz8XMzQGUAISIzMglCn/AoKZzKqlNtcloueuOwuEc7soQ4CNnPDrmMirn9v+4TYxOD+w+ublbqtZFVZ3/Oq7uNL5XR1oTZnW/VyZzJe3q9zYrV/rh1sHtGY+8MZWV7TQ8jTwinW9jLjWa2kp4poK1jYl+TgXSa10ejL0ua7H9aRVXeXuA+3zNmuL6kD+p/svwAoCPwvoi/UuMPniy/8B7SrtaBfVnv7iw+vnabZUGdRB7RqT9ytWcXUs+ByALTxf6ulfZcgQ0dRjBhU9pBuACAICWAbNasZgYIYiRMQGdxsIAbeb+81ma4qSJAToI20Cvr3b9ptweHo+HA7Hp3w8Dp8+PhJv3r99eHh4MJBxzIqYcnb+JhUBQzVDCsTF471PSQNLZEWGAEZogFmKyZj2m/jVw13KJU+1lCCl/PH5eH9///b+LsSNqYkn7iCCoqrFtDhLq2IWEDBeRQ4LTqbFbAtuof1Q32o7rHzz4pkXjqxtC6Ba/LRgt9rJ36IvtwZdbMILc7u6G3Uaa+56DauVuVLVRUIEuHFl6jcZIAYQxtGkxIh9l9RKxGeRY4GnXI6iJ5BTkWNKYy7PHNCd4LQwaMd4tw3v9/23b+4e+vBu393F0IGyaUQLUMbxVEoRAwHORkOWY5Ki9Hg4HIcyjMXlGmTuun6z6ThYF7jrunJKWS0lPJkeCzyrPWU9KQKFQCRF0EoAgssNhBt45pVkdH3K7dEvYPU6COmVabzAdbS0qb5YXdIqVODMYbahWC3YtxbjtvOadBQua4AvkjnVCZwthP6WgxbOZcXOE5rv4VU25QVw93SCiJPVwswQmBhFLbt+zvRIpe+tV0ZmtBHUGAgAQUlEUBmRmfcKlKwbRZ5SxkABuzHD08icMAS2SUojzoSkPz8OKWtWNkMUExMAREIoKmBFNaMlUEFmjkzBADQjmjEBoAmMXezevNn/v377u2673dz1fcTT8U2RpIexCAlQQdSJwWXnbk2vXGkzw2uIrAWsxaksNraeIjU1rBZvLTps2ZEF9lwd0AXBngvgnp9vVRfNEmg2RF94h7Yw2nbSAvca7SKiC3Uu47lA6DOJfZdzHo8HcIAm9Gxvh+MhxhhjZGYgDCHW9O7tbF1iRORxHBFlsUBVRTwr5+okWz9Sm+XY6VI16WSqBIhzvUdrcswAQCllLNNnVBRSAERDNUTvExhEixiSAjNiyJ73DokMctaUUgyhB90FOgkehhIKdX2wsDHBsSiTGhhQAABRELViKmBA6MU8/Xa7KzxUDW51E51ak1JSrXp1IaJMzwLapCgyVTTDyW2dDMGjxRWNAKQUmJVe03U/KzvXqv1pCq+h5a9vlyRz+maNnT/bXib8DQjh4tK1Wo+rxOAFwrB4BpeqmVcIQjfevdrWl/HqZK4yGevZricDN6h1a1a9mMztPls0Untr4vEunBdapdUC/7T/tqiViIyWdUenikdNgkEAKKVsNps6infupLcWJq3905zere7ba6AdG+L7wrb/6nZzDn+b7gFuqOdfmE9lLVpa00JO2/OXTmZBdj/bKpK/9YpdMmSLu3OeOSKgp09VAEMoBEQYEIxRmKDrui4wEfV9v9tsf//390Rh0++Iuu//8sO//9u/PX86Seoiy5u7Pmx4t+m6QCrwC/xyeHziPt7v73bb3hNJu2B59/Dm8ZefoeTpihFyDGJFVU+nRF6EqAtGEKfasbIhRg7wJhgShqdfng9DUQU4Zf358bmL/GHfbQMECCaWwY4pH8chlUzEQFGlFEWFKQfhLVz3MjAvAGBx9O3FrCi3vrgI063/Xg1dWZzgYp5rTLhGhq/Bq+24V0G6nWc7aJ3bC+C3IGfWcPywuiyL/fTtcq5mROAuPJb85+cn3VNQKCWL2nE8HbM8pXQSGc2KwChF1PrYgwmbhUBbxre77tt3918/7D7sN9sAW8aOpQNAVZU8DDLkQxHLagUoFTwmGbKp0fMgIiYAqhAINl3YbmMfmRVQJaV0SsPjKT2n/KT2aUyfzEYjMQBFBGAmAMxSakJRWKGO9VlcPQ5vCwGp7uoL8PMycqsH2iKEBfzDlG50KVLVA6ruJ9iYweFSmljAbTsuANQBKzit70ULe2E2tXtubjOvve5lry/u58XyWvq66PHqnVeDWvPAlao5i5IpiKIRxE2nfQdADMNoCkAUgM0YgUPchLjl/r4UGYWexvQpxUDbDe5OEI6yISPSqe5FzimlYy4aQkSIGBmNc5YxDVIMke6DGZgqKoJhQI6Buy5ETSPoiZH2HSuloHJ3D9991/9v//ibnz9+PA4fe4xfPWyfnrdDKjmngoSAxQvfrwEOAFdEpcJi3Vi7jPprb2zdbZrDPb25sGFm7hiJs5Z6jTi8f5eyql6hCjAVvS7O0RCA0NBT+/t/ntd3XotHgdqURi+E4F6UXjwSPEl39VGeIcUMbcqzs+DGph3wGL8Y42az6fs+zw2S+l0NITBFRE9vYrstVmdOvyGBQwiBSKozs8zNTMdxFJmuGc55zFXV1N3AyO3G1RBaz80DDp1TNbNajx1m3T8AVHcd/3Jyt5729lxGBhG7MHmTTpG6WlJKRUQLoCgZUB9FcimJvBaNi69E8nw0oL7fjqkMj0+G6B6wSlwLPEiR6tXmEIB4NkMjIiLVPHCGesl8o5r4wx5YQlPp5GwI5MoUALUpD6efHHnVBcRaz7EFaTN0xHIVPiuUvvDnr2gL/FPTLre/LgjtBfFogoHX3a7JvM4JZtur1Aa0YEPX7TaLXJFqfWB6a87M9kU7007msy/WI2u3YkFm2lVf7WHxQNthi9wu7/6ScTSbtn8955f3DSbGu5Ki84quEqx2kt72+z3AlJVK51ygZuaaL5g9F4jOHuNt51U3VNH1Yj9bnHDLp+bqehf4uX5zlab8V7cWSm3FaNZnFjAPM2uFs+/AF4z4t9QUXfbsC7nx660b1GruF3AFDTTWV5gB3PUCFFDIhFCJLJABym6zeftw9/DwsN/tHh4eHh4eCE9ecyIngyJlTAzwcHd3+PT4I9rx0DHz/f39/d1dGcafkIbxlPIwDAOjRUQxVcBuswFkAUVTwoABCYxMJdnpODJgzyEgxS4AMqKR6r7vIlCIDESikHMWGQ3oNJbvf/5UxpTfP3z1br/fbNRsFDlmPSbJYsZEIaoAGJuFW06jC4U4rASVBcLEa5zuAkO257gOqbp1Iy6w/VpYch7mXBvM08gtBTaYE7TeSpvbQsVi6Ku3Zn2R7YZAu+4Em1J1VbBZruuSBulcjCchsOKPx9NplKcsnXE6DYow5pJyPqWcS1F07yUKBBsdAsGu57vt9u2ue7OL7+4277ZdoBJVzExFhQOglZJSSoeiIlbECsCQ8TSUUcAAUjEARGCCDGBMXUfAWKhQKvlk+efn4cfD85PYSe3TWI6gFiMDp1yQiAMDw5gGxNhu13rPF23xa6XdazexFgjXILdA7wBX0PL6+fWflYOtEI6XZavq+a5FuHojapjSmkaEcFGmyImaXtbtbN8KXYgAYOCGRUVAaiq3tJOo86M5M4dbY2qmtXbNZ5ZIgQN7db7qaRM4BAqmRRGR0SicsvIpZYO3G2TEiD1hB8pdt4vdNgv9cpCnofzydBwVdm/f9ncfnnL+5ZCOj2kqJKCaczYzZo7d5jgMPJUIRylqaNwTM6WhFFVBAu6IWIulAoIZS77fPJAcDo8/kB1/+93bv/+7b9++u7P0pw2Lqg6q+2Af7vrD4TSO4yGNfb8LyEPKqZSppjlYyZm8mt4lO9LC2bRXiLACLGtktjbBnTMcp9MJALquq4xFTTaVUvLyd9AgX+dC6tAOfNW1YE3M/HudA+Rq1UGXdswMpkTZZw/mmqwFEV2VDrP6v4IsNDaTCiIzBE8703VdjFFExnEMIehc+KGI1DR9vnZny37++ed6Eyr75fuAs6THzO4gGmOPs5/nlCQGp8pFCgIA1aznZkZszIA2c3vTdjV1t2gOGVLVrus8WMgZx5rjh+fih/OtKeo/BUYDoNBvAoCaaikl5yynJ+88cmCGEBkApBQE8HhaJqAQKDAAighSyDk7ui40eeT6EISIhsAGMFnPbS5c3h6FGw9FhCggG8DkX+oLZIv16YmBQnC4KWpYlA3FFGXirZijqpiJyFk0VVUwMryCK69qqtrNXyD6loy2iLW9Qe0Hd01vNW0trm+p8vQnLTtcdNv+ueAn6itrf4x6zWuQN15aseo+LGaFcKYWa3ZhTZC0iUWpz6wnWaHaAXjxil+T9gjqBNZLa1e3IFQwx0DWB+qOVQGpHZeIcBYIFxu+YIwWtAYRacYkZoZz1cFKhuqNrsNVx06/sF7XO0mRc5Vwb1NyqZbquTtDu+Tqvn6rsDXNxVoX+7NYZn1lzQguAeOGCvxWW1+Qq+2zXbWnpqqeg249scX1gTl3KzWhEAvN960Xb82zfX5xH29Ne7HteO3p2uEC86zntsA/C6XARDusMBMFLDnlNFKEvuvv9/Hrd++++/bru/2WiO53+xhjztlMTJSQ0zCOozx9eiTA3/3u7/Z3m3//11/G00lL6rqAio+/PP7yy6ecijA9fvx0t+nv93c5p9PhYKI90/Px0IUIsQMoOec8jKUUVeiwU4XjMIIaqhWWgNhvIhpYKaZpR+G7d29CiP/54y+/fHrcbPcI+ngcc/7hMJweHu4Q8ZTkn//9v58KZOBc1KxQ7IEImUBPcC23ecVO7XFAg68WEmOLx9bM+pmdOCsuz/Va4RLLtd+3x1p5G5iZWJ1DWlp4XnxT4afitwXSq4Dd2nNw9h66BZ9r+K+9wbXL3jox+ZeuC64VlWF15fGSfEw7Q/2YDA1FYfz4TICllCwF0XLOaLCJ3AGw6L4L93d3b0H7GPuONwF3PT1sursN9VRM86ZnAEhJRLTruhzw6TgOJQhgEhmTFsViYSxyGgcnPYGQQ2AyRFFLoqSJTrk8F3kSfSL4JZVDltEIQ09K5LFFIEWlAGjHQS4wTz2FWxb+lgjWDbHZH63d7amHJqymRTXrw8KGJ3fq2UYWVDhckKG2LESdUuX8a0R65SetVn9YCfxwiZH8G+eHF1DX0pdaMs3/DS2UTK+tNgVxKtwLACYXdTPtBkt0/kzg/KI2eTuKCCKBqZmJcirGrKEYCkXamBlCh7SlEBU3jwN8ejoOyqdUDhkLhedH1ePPqeiQig0pBGVm8PxtJqyWpYzjyMyey0tNVRUUSGkDHSATB6CgSqiGBgFxE7Y2Ppf0fBfl6w9vfv/7N+/fYwiHmAtERKSgBADDXff8HMaRyGAop6zIQJvAhqQzLqhHXt2ZptOCKyGqtwhwi9oqGwEN7+hMCTV+FFcJWCveTyTqdhZEs6kMBvjJLaYEbHATxcNlptAWIdYbYmYqgmdmi6pAWG+CGTpe2+12iJjy+W7XD3VQnUvA1a0mmjO+NERaRPq+r6lNEBHmTITE1L7eWlB1ThJTv2FmuLy99XK2qJ9mJzFEVMI56+wMBog4M8QA4DZHVfW6ZWDJMQiTMSCgZ5lzrh0MMXIEIs+bOkouArkoEyiiiNjsrcqAcKGsJf/TbBnr4iGCM/khRDQyUDObzGtn2GjR1qwjzSpoSkTuONp23nBdbDc0qQtU1eK1FtnhJdPZPn+LxK6fXHS4nsmte/FCu0qwX5hSS+ZbhqmlE23Tayrz2sma2i1I3Xr0duZrUtQed21XsPp6njc09L9iV68+f7WfCi1wmX0HL8WPSsuIyGy6s44ZalK+03FARHcZrZGBAEDE/tgZjZuZmWvlvP9WhnxhLWssjYhXxOv/S7UFR7I4pgUv8sqr+l/U2hP8azqB1WWprcUG869mJgaCVroef/vth3/8w2/e3e92274L3AVmZtByOp4QcbfbgfTM8efv//jP//yvj5+efvvb33/71dff//AXMPrqw9dfff2hlPT8+PT4+HR4PJSUE9pwfP7+e/hz+U9Q7QKb6Hg6gWoAK6CglqUIGHexY9ZH9VLGajAUKKX0TAoAkACgi7HvYkAsRQ99zF34cTyBGkg5gn46HuOPv2SRISnEMCoUBMUIxIBsRmbGNzx+22/aBxb0pT7fimHt95VSrw/lart2InAVDNrbvT70ds5Xx11/roNeffdv22rPehkitPh1MSVERJ0kqCJoAMgoRAoEYMDUEd513T2HHeBdiPu++24fYqBIiCCBtMcUs5hI30dGyKJFJZuNaFksEQ+4yTmnDMdRxpRKAVM0JEJi5hiYA0RGJk2SLWWW/TGXp5w+qj0KPIMOiMbMQKxkZgQgRIUhoRgi36CA8KIGbf3KememX6sp+EUFcfv9As8splHhfA2EV+/FetAFJwMrGGvBzw0Zt7iCNTAHnNhBAECB69fMWoBrtq8VACprvh67chWt1lnEyBAIsypkQlTkQixjCWAI1It0KVku+TDKx8dTQh4Es1JGGZ6fTimJIcfQqzGXEIiZGQEATUXNFIFBgREBSMygmJkaiHaKpurzEBRggogKeaByipi/fr/7w+/efPfNPnQ5lyEGZA5EilmhI7nr87s7AIuPw89Pp5QUmDFSUg+2wioJXIGJa3hhcXWhcQRv1WD11OdjnmxWle9ZHNmC22snQ405d3FGflAODIu3VLWFjsr9tPa0dtzaMzN7XcFp2iKNNIhzYr9z7E3N6u5Do1dsUjUDIuSASAZgsZsqbRB7uRAD1Ml45dovOEOjJ3WoizI7O7PG2ULSTgNnpU69Wi7TEpEXkYRGVVlL27tBcnFepalLVm8ytrFnCK6x8B5QgZlCYGYycVMSMWEpRbSoKgJzDKpFcpachVGtgBEZqxaYyl0ZAqDh2QESYZLwV4qbFgDOf+LEYdfvEC9SlPqOFVPPMuASPxKpAhiBGdhcr8LQzwVWoL4A0TWSbfHmGiNfXUXb+a1fG2i/ULUs1ti2m7Tclj+t0W7bqn6hTqDV6Vzrf5rk1c2BawikJWl1gS0iqpd0jWHO+7BiJhaj3GoLvAeXo99c5ufaAqssmqqew2OvselnfGhnPU77wHa7henyXCwhpaFafuYXXVtU5shAU7+YU5avaRvqLMzU7KyIRXQYUABTNeTu6opes9X/Fe3XHVB7s1pYutrbYmkvn+yvm0kdd41eWjzzwmKvzueMwG/QVrhcOCJyYClJJKHK/f32737/3R/+7jekedOH8XQYBwUAEQOAvtuO4zAehz//+ft/+T/+QxX+4R/+4fe//8Px+PznP//53du7+/3d/f5ORLrQM3Vg+Pj4zFS6LhIhhdDHuN9uEHE4nu73+8gBQMfT6dOnT4ecS8kopaNuSMmQKXSjZDbdxLBVYJQuxo44xtBxlDsYx3sEs2MqpqWUVHJSSUmTahJAxGymEIwQ5+LSqsq03NgFmnoBMNbPY8M6wyWY3TqsBZJZQwJcoujFTBaKrdr5wrBzfvdFsL216pfp1GvaLTBu7S4V2xNRa0FtJxMkmJkXE1YxIAVGxACaAuA+xg+7zTf99oFpD7QN/HYDzMjMbhoIhAgmajlLEi2mQ4FkKikllVLk+STjmMcx5yQplVLUw4KIKDKFjkMADqBaUsmnYaQcnkv+peRPZs9mRyUlJmASBHWtdtXlg+DNnXzNNa/ncou2AsCtJOWtBbvtpMUML4xbm/+1ZgBukd32ZNvnWxGgnc+CzNWBqrG6fuN91qQyYF4XbCaKNUaifQEaIMNVqAysIN5mH33n8vGs6lY1AwQCMoCkqrlw6E5JPx0kxr5wTFkfn46nZIXCqHTKUgzHIqdchpSSGmHgLhoaM0eLnUWIREQuEcbIRMRMACoAZKRaAMCYVKxoAhREZNKOsGcZn398c4fffXjz22+3796FTcimA0AiIkY0s5RKMHjoGd7t+xg28dAxMTw/ZSlSAnIBAJy2X64pxtoTvUqxoPFptrm2e900P9r6QJXHrtoWzgd/WWlkbWtuD2sNahVwpRjilczI1U910Q/OwY0uDdZBadas+58i2Z+fpDuimue9lDKOY+h6mFUPVSozM3cr9Veq+wROabw9dg4azw33nj378YIRXHq2tB98OTR7kLYsOzG30Zs8Vymk2SWs7c3M1Mp89e1cfWF2yfOehQR5mg/lmV8H9Lo9ZsXMRNQLjUwYHybPXtj0vl4mVGab1H6GoIAugt3MSrL+rE1+nelMHX5WzvSIqAieU8taE6snVG3qqy7Aox1ujYjXD19FlLbiO1/oqj6whvkrPX95Vo2ry7w1n5Zg1017gSdY78Pilp1Pyr+5zP51dWLtIbYYph2XLh2lPrvPtw66Haj++tn1rkFlsfx1z+16cfYaqLJcPf3Wac19uR07MQV3GXXH79qbO65XBVYFv4tg3QbfVtFxPUOc1YXOn83VcW7txP8N24wGl2Lbr2hX4WEBny3krNE7TPzJZ5jF5fyvuarC7fuOAB49bwSBcL/dbLuYhlTSwAib3QYRS1ai8HQ4/eu//uvHH34+nU53+/s//N0/vX//4fn5+S9/+QsRPbx5Q4FP4wCgu7v9/u5+s9v+9NMvP338EQ0C8f2b+/12O7nhffVVSbkLARG3233st/v9U0pJVfWxHFQGLZazCjDiPZIRbvvICALKVhBw14V397uAAN1pGPNhTIioUkZFQOCIQxEFNARCNFAwgMtqL1ev/Jri0Cq8fMGNwArjXd3ndqAWa9ltpr+dxuvba2jWre/bfVjjsVeOsnjs6ouV3tmscFzsCVQBQGgKLTEQUyNAJWZjpLuu+3q//fZu93XfvSG8A+sARA+ETLNavAAoWFbTlJOJIBlhNjqlcRhzlvJ0kHEcpRjTVEiui/1uuzdQZgYGQcsiWWEQGIxTysdSHks5IIzEAu4jRSpGTpx9uUagtji6djdu0vdLIli3qxXwLnaSV45yDZ5fN7cN1FOuH9b9X53YYvTFmd4idi1hWry++L4+3IY81FfMLMyI8rxNZBeK0naABQlcr2rxsJmJeFKTaROtCZokREMzQEAQtVGESvl4DBwhqo1FPx3KKGaRs+Ivz89GXLKeUgbDvu+JII/HxImE1CJYB9gxexQUIPYAULIYqKqamLs0EFHBpKaEwgxshWGkku43p2/e3v/hN/tv3m1CSFoyoHYh5jHFvotAXSwAEgHDvutjCMTbbR9CsJ+fnlIx5IhcbFp+Fa0vjgqxPTwA8EzULau6ZjKs0ei3P1UGpTo+tWdx9Ziugs7iyPTSzX2CPGCDi2kgIlHwgDtrWus377ZBlxidf2JmugyA1rmEka+0hvC5K3xKifEswSIiAdKc9NJEVcSIAQlcJiHDUE0iMM+TzM4XYN7h6acyS6SL5pNsRT7/PkSqsmuVxtvUgnWrzUxEYh8vbq+eaxiaGTYyGyIyI2M/57MwVU1+ZcRPBzy8u4iaZUSkwAqKaIEwBrIMIoYqHtiwvJtGAFCgAACtVkzMoJ7B1TewmpEVKhJuni+2RCi+A2KgTY6NRVuzawvMtZzzqiFewUJ4W1BZEMKrfN5r2q237IZG+ZUUffHWlz7wWXbn6qxeJpxtu4XtX3hy+eVt2+bfpNE52v5MHemS6anqiQU1bdvT05OqFtO2hA8iumtonXbN4l1jBR1Z0ZxsphL+xb/YRCFWIvjrQPF/qrZeQguQLV2b9v8LV3wLVF7Yuiuo71fdygWueGEmCwrekuw05BAwcAApaJDHNJxOUNKYBpN8RBzH8XQcibox559++nnbx3fv3n34+pu7/VsRORyPh+OzmfSb2G+6EChnSWkIodvtdiEE45JzTnl4fkYC2O12zB0AIJOYBcTQxQd+2O02Hvua+9G6x+fD+JyzCCCiEimxkZgBcoaEAa2juN9F4m2/iR+fDz9qKcVOAknKoKbEwIEm1SqgilOXultXr/zVc1mf0cutvVaLI2g7uXVwC3KwGN0Zg6vTXnRST/wFcF6DaCv93sLbfw1uvArtC3TXIkYAKKCEhMhBARVFlEVJdb/t3u+33759eL+ND4w70E41qBYISiTIaioKWjSLDkUMcciaNVsgAxhSOo5DKeU4BEmZiPrtXR8jGnRd13ddLoUCK+NY8jHJqaTTkFORQTGpjcieHCIYARAaqFmZ414c6qKQZ7RYb+CX4tVbp2A2cee3ul1/eetYW1pz+X2T4L15a5Ec6CrYw+pY18tpYbV9Hm7ckTAHA4HheVSe7YS1CwBsAWs9/OKG1y/PpNjj+MEC0JTwAgBdalJUgqKWi/6czYaMRxCkk1ABzEM+DunpcOCuRzWFst1s7vcdqB31JJoMiEqXsSApWgAKiE6ewa0Vc9wjA8AoSSQDSkABKwAj2ols/O23D9++wYdNZlMqyoTEPSEkTAAQO95D32UporEAA5cthbAvRU8pp8cTGBRQs8n0hJP267z72iiHENEtpwigsyW27t5axwyNBNgqvHGOkUspLUCkHk1rxV3AXO2kfVdncQ4quBt5hsmz2yTAVLeSKASq/pBtt/6wV4aoXBQRLQTOVu+ec64xOczc9z0iesADniMSp7E8S4uIIpp/MBNVtWKz3Q9q+Ye6Op+eqsKsShTL7U2tc+OAAKZWTAT1bAxcLLayd4t7601VO640gABAYYpL9G1x9lGbpIWAlEQ8ZAkA1djMAFEFmDjEKbjRBWwiNhRGz7ZqAKpWwMBUjQjntDIAAEaeLkWLAoAC8KXlMIROTGxOglxX1HoVtkhOVXDB1IqaqRgpQpWSbCV7WKPEXaCkNYZaADNcQ7W3vpyPe55888zC/FU7sV/FsOJK5nmBqLcEo3XzeA0fsB6obk5L6eGSPVpc8MXDeMNIeGs+t76/RQgX0/4stb66wIvV3Ri6FQgBrOpxFqpZt845EnA04ddwHEdEBCZ3Vl90rrPjN8yqLph95CqIWpM0op1nXYLOyUvdz/zlffi/WXsBwts7/vq2AIkFi/OaDqdnbly7q4DagvGiK71MolZnxRRQJefSB77f3Q+H4z//7//t9PxLH1itRA5gNOaCEO7fvvv7v//Hd282qsDUieRSVDSN40kkd11gRiIgAjMpJQFi3HTffvvt6XT65ceffv7x++F4/O1vf7vZbLLIbrMTyVI8CTZF7tynBvsddOEpff/LpwNgZAxJh2yaxKQPyIYBCTiGuAuBGN5tY4cCKhijDGk4DGNWQHLrjAEgKBrA9C9AE2zSWuMXzEbLoV7d0vbXF3BL+2TbpzXs1vrUbKVSvDWfF2Bg8fzi19bV6+okF9+8Zo2Lga52VUnbgtO76kcGAJkLY2AERCR3AjNlsaiwodBzJKKsedAsUqDkHDs08IoBoiBGY9ZUMBUdxzykUVQV1cyKipm5dr+LcdOFTfQs8QEm3bEVxedsn4b8OORjLmPK2gUlz841lZJzfldRAakAOJMWfHGCRrYmDa8hyuvda9jAKw+0mwy3JbSrdGqNTOqfRNfjCVvF4q2x1phzTXf8KbgG4VdfCRVqscqEUx5FvXzHnCJyI9G2DywYuPqBIgHMHmWEBGgz4KqqGJA7IhpKsYQyai+mSqUAJi2ClFSOwwmYzQozxMCRATUx2r6j53EEQDUrolgMrYsBAkURQUUVAGCEc62SYl5wIoOewIZdyHd7e7fh774O73e46TKUEYjcTb+odn3vaRhjjAEppYKSFW3LpEDv7nenAsqHn475kDSY6eXyrcE7V8918WV7k+tBtpBauZkqitT0rYsznhnc60qjhbvvAp+23yBMnpbuzElELtvMblRLd1bvhJmrHayFeL3M+oVNFc76k6oy82azMbPj8ejFBqvDpE+4flm78tel5K7rzEzVrqZgnmbTlBb0HugylKt6k7a0rd7SEELXddU1331cYS5EgbPXvpl55ltrFI2l5FLUAw51zhNb51aM1NiABM3MgNgd2yY3NiIgQhMydG60i6CqhIYK5kjaOWMDAPUSsgg8/b+BNG3shJ6KE3GyV7Z+xdzk2sKGfWrByfuxCsDTI644uIKF1/R4Afn1p3pAa6Je/2wP92q3i4WvLTMXnd9gJW+R6sWIi2m83Oote5kPaOe/QO63ptp2u97V+lgFP2gQlLc2IV7b2wtLuz4cXHQCl6f/+rbYpWtYdCkStHimNvdFb9fi6hVXP9mcY7YaFdssbTjfDiKqndicoc6fXzNkdb3+ojtNhBCGYaid/M/Tbh3NC/O8eiUXwHPrmn8WEr4UVG5d7b+y2/WLLe2ABt4Wg3bcGRQZB46hi/H4fHx6+sFyfnPfv3vz9v3793d3DxRiTgoU7u4ecv4RivWbTsVyzs/Pj8NwfHhzB2jDcDylySINpp7xiwF3/WbYbj9+/PjLLz+5omG326WUnM4ROIllDAENcs79fpdAf34+xs2OEaBk1ZJYTTuDVCSJlS0ChIhooaQt2cM2atcPQJ9O2UyMJoLIaIgKZpWUGHYwn3JVoLR7stjA9QEtvl8j2M9emVv0pT1EvCaJ4e0Y8vbdNVVaDGFz/nP/s3XPW0NIhZ8FjXsliC7Q6YJvqSiu5ffaTRjYBCQYBCRCowwuhpWcSylDGjtEhZwtQx7TeEh0N7lcmf/LqUhK5Xg86hzYYiAhcOSeGGLkSBxDCMSoQsAU1NmkpHos5ZdxfBzHo8hJMGvIBAQWhEgxAKAgqBYUIxJUr4DFBmAUDVlhYFkfxOJYv2g/l3t7bZPX0HUeBa5MpvYwW1Ouz+TqEIuLsPBAsUaZ2/7bYOALRcDi+rQ74/9WPSX6P4iTm6OVpgD6PBWb61qsO9XLIMs6oRC6KUm3UbWxtHZDQCQyACimJiUTGzKGWHJ6HLKgUQgYO2ZUyWCKqFKGoZw2gfsuPp4SIgKogaAJBkNgZAJgFARPOIKEMLHdse9ETWTwZEixhw8fdr97v3/o6O2+3zFLyqYUKDIFEKPtPqVBiue6MUJDMyboOyoF73fbb3kr3Tb/+EkOqSieSqo7AA0DfQE6eN43vlZQdXGfF2dsl75J62PGOYTGPKHBbV3CGnXiipdyH26bnSSZ2a8JnkMBFXGqwcBNWZWUkojA5FXn+GiRcvdCHpvbUoTmuQIYzPJw/VAJj/vDAABgDWBbStcXIh/Oa5HSjt5OD+cwyLrnZtb3vZs9u65TVbdq9n3vPq5eO9Ga+M9xTFWIbZIqiUj2e1FHcUpflJGJUG32XuMQu35LBMmDsg3MUJEUgQjc0khIoGKmZgpIjFXWvQC/GkXp/zaOo2fwq/vgMEYNGNfdbEmX3Y7D8YN1lUH7yhrxWSPDrJFUi9Y/yw3cmsn6giye+XU9w+XVrhO+pZGta6xX7GVyhash6vyvfukY9erq8JL5qNOAlbpqsVfL/m+39QNXV/VFW72eUvvTGY/NPbdEERqw8UbNnrdX3r8sRWqC7/plNeg5H1OL66xP0LFf3YR2h93lwZsXtf/48eM4jun/4mlG17eyblqLe9fg+sr2pW+1NPHqr+3h+nO3ulrMue1wcWfx0leifXIcx/u7HW8LYjkdh0L5zd2bf/rH3zPa+7dvEHkYUuAYt/E4psfH5/0es+XT6SAFYtd9+PDhp59/OB6fj+NRsWNGQBvHKSR+s9mBaNd1b9++TSn98Y9//NOf/hRCuLu7UzA3kaOBqJaU50gHENIhp8cT7Dh3zGkYI5oFjZatwBhwzLuiEjZbBboH6RkedlvT8KkgPx59gTJRLkREQJtYsJZSNMC/3swXzsjbKsTjCi1YnOzVflr6cvV8F6c2IxKoqKviivZVXyIiAFxE69RudU5wcMY8MxN1a8l/TVvQkdbLzBqNdnslpyc7NAUUBbMAFAjIgNRMTHLJOZ8IgAVRi5WhjCcJiKgCRVCMVCGNOuacc95sNpE3hEKgMfKmjyEEtNT3fWC2VKQUQ8W5lPSYyuPx+PF0OhhkDso9siUSNgDAYMAKWBRNPQzXkJTQAEiIAFmBLzO6LWBjQTheprMLiLIm0ODqVn8W3hZjLfa/ne2a2rZ07erq1qtucRE0tGkR0bK+mNDcDkQMxRNfN7+CIRioABEBnktvx8CIaDptVh3VZk1znUS7BaMAIiMHM1O1ooWIKDAxq4fvgxEQMytRAZQ4jOM4PuViysCBSEtCVcgQYQpwLGiGJCqnZPt3f/f8/DiMx23UGAvQCS0z7yWdYtgTbcxQAJARiFT1Lv3RZIR82NDw/o5+8+bu6313F+Dd/V3O+bHkGPq+CwJmJQfGfDp2ZiIeE+VxcWg5bwN0ZFsb7iR9faf/CPrH/POf/wT/cb8fS1YwiB0geQKxEEIZC6ACIjJJBAFLVkzKPt0zk3Pefd+VkiSXvo8iUnJiZgMrxdOoYkoDIsyTATNKY1FVU8xpspUxk8JZVhTNbuJrj2aGDISpButUXRzAzcOANpVud2Yodh3NJQfdpSrnLKBISAYx8sz0YAiTLDGOAzMzh9loZsyEaArZtChQCB0yqFAuOlVEQBBTFUMGBYl92OzfUOBhGKxkZjaEomY1FygHjqDGKVsRFCUA2PQRgVRFpKhqCB7BmGKMOY8hBESvCWOAWES82D0AqJhIhtkz1kxCiEQokhy8Y2SAKV7IRTi33Tn8e8i+J4ooc1PVolPqX0RkVAAghr7rVLXvAiADExgBkgGIK38ZSlErJXTUdR0zM5ZSCmoGmYyKMRBiMDMkDsCllJLVgJE7MTOb9bITwihWZiw5y3Q1LaMhACgZqqmasiHDJJgT48QH1/vufssIkHXK90Uos7e5AaYxn+VDO2tEriKyBbKGFWsFK0zNTf2l9qe1ZnS+DlcsYHoZ8Nl2RZeGphZrL3r2b9rMybU3vCaS1aHbWencFqs+T9WMbnFOevHlxJpd04XDwhHdF4XQVgpZUKCFpATNCV7d/4s5N9SBPD52/vdi/vXtypnZ0gQN1yCnfm9mMtVlIQAyE09aDEhjyVGi6Nn9gTC4WtB7U1UtImEy7g0p1VE8kYCqmmqRDARoOHlTIwDN2k8EJLRZT0JIRARqqnO53eBJayIR7ff7t2/fbrfblFJKxWuHMkcrqV3ULT7j3HS5Gwu+Z/n4DZ/ICoHLi6DXn7/VGINvy7kf59IU3GXdj54AGTlyVC1nI+ocQY4A7E4iftZMACBgZmp5omtrw35lYtoN8fMAmOqmnh8+f8AqtSxeX1xzuBQkqhcMrO774sUqDKuqcU5wzPnxq/tdxPEP3354/+7h7X7PMRRVMYEuZMsqA7KEgMOQmSIRMgNA3u7hm2/f/OlPf/m3/+Nfvvrqqw8fPoTQqSoDg8FwGA7DUynlu+++e3j38Ocf/iyn/MsvPw3D8X/7X/+fgKAKpRQTrzgXEEBUT8fhcDhtekhJc5ACvSR6kvhJ4M66/TbuS7c9ho1hCGzlBMgYaEfw3Tbow6bL48fjKQMrR0USQzHVOXlJnMUebWr6tZiz5R5b4+Et/LN+YMHJtMfUnt1swDg7wbUzqf0sWFmwVQnZeYwGzBzLqpkRn2fSKkHq0has18t0od2QOr1b1AdnzUtdr3MgixEX4y5msjtFAJixmyQUAwOybdf9ZRwOz/iG32xoW3LRsqHNh+FpNKandHo6ngRKANyo3gFELds83HMX+5gRE1Hp2fq4PbAUKkYZeYh8KjLkrEVG0VNJp5ITIRghAIuQ2YNO9LSYFgALNhkpDKIoTpFsWhAyGwBQOV/Ai/U2x0etYHZ19+eKf3Wrz2cxn2a7q3SZQbBuLCJq437iqTRn1QAy15OS+iJPktFF8TZErPW9ofE789/NwOx8g+a5sZl6BmybOGRDdIPYRO9ExP3axnGcuH2sXJN7lmGo0RFmBqvEITBvQQtG66tLc2S/XRGdL1HtXBigZmmrCNR7mMwgqgZGfA6+V1VABDABI1XkqbybmTBBJDbTnAXUS00UsIgqAdQMFJUMrRRVTXkIULqA+0338LC5f9jv9puuQ0AlBlBAMgUB8ZWwO9XOa0eAM0IBQNf7dj31223o4sP9p8ePYUx2ylnKIEhsYIqaU08M7ClVSz6WQoBdCN02KIsIByTAYTjGGLfb3v09Kk70/anOhzO0uTb6LJ9XIoTnTG6ekfKi5GsDwRf2a5s0YVMFhVpLnWbhB2YtuCKAiisfmpLrkyeAd9WmP/Uh3LV1Rs1YCUaFHwbn9dXJWCkFMT49PaWUXC51IQsuyw8iXNR+UBnbe1L1FNXplGhaxcJ5jJyvPJsKueu6GKOHJtKcqovnNPH14lREbHO169pUFTniXJnQPfU54NngoKAINcCPKJiUVs4EAJcsXdSsqIqJ59NBA10dIoG1gPoZrrGCB82VwWtr7/t8dqYIIQTnNWvmJP+pHvqCAtUZrvHJejLX0Mj5p3bOVz9fPvllDO66h/VC2rEWz9Qnb211PYi6Y9DwRm2HUw83BIR2Eu2Wil2QvHbyq+v/ksXv1gG9AELtWBW3r/fnFijaJau3mO3L41p95tKegB6p3t4CmfxT/F0C9ECJPsRi53QvMId2xDhd1Tkp6IQMK4rD2WnfkXYXYtd1m83GsUcVCB0bPD4+Hg6H02lU1XEcx3G8ecBf2G7tD62y5LXPr9/60tkswfU28E+oo3Gla6l/de9fzqeyIvOf7biLabxw7z67inaIzwL5y9cKGiAn4nQaAvM333z19dvdt+/vu0g1I5GBop5rtJIBclejDwBgs9l888032+3+hx9+ePv27XfffffmzZsYe4ef0+n0//3//X8A4HA4+MaWAofDgYj++Z//+f379+/evQshDGUoY3aqB8SGhBQ4mAgZkmjJiGBiCmAn0SySs0gBjJ3ucgJmVLLYIQeKgQIDMuA5Pa6TG2wUTwuc2W5Ui+vWT76w+S0hW7RbxAIu82b/Otj4mzS7FFk/+/Aru237rBu7gEC4BO/1WDQJTYoAwIFUEMBdIdSKiIQQtCRn3nQ0IDpJSf4nIBCGEL5+9xDSiKKA2G17DmEkyGLKKGpjHk8lHUs55TyaZERBykWTFPVs8DDBPNE5+7o1rYpMC1RQafxiy1oq+KU47eqNhtedS1VMtwB/C/XhtWyfqleE1vUcsGlVQK3fNNA+meuoyai/IJG1ZzO7KExvl/oMnD2q7aJdMEM4M9/VkWZxb1+gDb59UG1ZIqpaAFVVQBbOr9NWWjMZQADQIiEEApGcUsmqDKYIwhRBDU0QgYCgJAUTyVYOFHC7oYf7zbu3u4e7ftsHppLzCM4fWBYl1wEpMpwLGYOqB3YZAERiRQIyK4YUug13XXd/fz++oVTy8zA+j6ehlFz0eBqHIQ1jQkZAZqIOCM1U0EwR1DQF2hARQHh4c7ffbH/++edhOAbnUbyMrGpKaZaymoOfN5ObAvR+O6YdcwXMCkcAgB9dPe754NjNgCHGqvGqH4ywZhtyEK/RA/VYaC7OjpPZcCJ+zjbFLp7hTbGuxcwQmMMkx+acncFqe64Cp18tF/DA1KVB/3ccUmusdo2I1ZjVuVyE+6DKnOfU5cMQ6SwGEzhL11hxzoJflfeq52pN9NreOs+rQ5PRkdEUETmgj55SsqKAgMA4hWWGouLiYitY2ixL1+vmXCkiApnqUj/aYrT2grc/NJ+mn3yL8BpCxKpDnVWjIQRs6I3jI5kR+oIOLZA4rIZo2wuEv+0TLjHa+q35ID4j8Lwwh7a12t+Xn7x6FgtcDJcZF7CxKkAD8P7zrRliY8xs96Ru/mL+V6f3V8jLy/ksplHvdfvYmbL8jZgzRKTzjilOTkTq+jIkcIS1OIg2Ox0BOkfLYtl0oqxave41hBCYQzfH2brKVkWLKEzuM5uu98jA7Xbr/uR+hUsR19mP4/j8/DwMw+k0+jcAEEJQuaWzfmnJ7Z92KUi/sp0BbNH5F7JPa2C7en/bx+ySPV3MBC89siqP+zJHUX/61eJ1nbbd5p7h8tou5nyeQ3MLYuQi+tX7d3/4wx/e7uOWVcqY8kDMOpWLFQRQN0yhljJNo3pm3d3dbbfbb775Kmf56aefPn369ObNm+12OwzD09PTn/70+PXXu91u1/f9eDxp/lPXbf7u7/7u8PR8PB6fn5/HcVTVPnbb7bbrurHoYSzHlJJagQBIxh1ySGVQlVJ0GMfjELdj2abc9z3qCZliD2yckYyiISkCECqAzMBHiDAFAl2QG7h25ddKw/Xziwc+iy7WUFTPqwIPNoqqdQ8zfvjMKC9PYz2Bq++u17Ve9WvuNa60ii+Pu26NRwy7MhgBEdQAtJTTkKQYB9QyZamwpMicAYEDMqsIUOj6bew2PQcGMbNsllU1RGYakoyST8NwSMMx52QymhVAY1JAASNmIAIkVTXCqnyv5GNhi1sDSbsVr8ESt9p1Uo5Xfm0n06I7M6NrWmOcsz+286+WwLpYuMxedjGN5q/1tKvH0zVwstldZaoo0cqlix1T1QCNOO5E1P+sAp63s+LHqLIv7fAL94C6KXrDBaW+ovMrE3sdTMBnib5EVS2mzExehgSntDSqKohQUghsMaoWSygAYpwEekRRIDY2M8tgYlpQpaOy7eObu/hwF/sOCItKMVPIxszAjAZgQoBIDASe5RImsWDCFwQYQlAkEAMTRUBC3vVd1/2/374R01NOhzSMRY4pfXx8/vj49NPPj4cxP58KI8R+03cxq41ZFVPsGFCR8M3b/Ye3b8ys6wLwruu6lNI4jszsQkEI7I4KvjEAYOoQOVVcbGFmArJGIGxhCAC8CtcCiRhMhrVqxQIAIxQRmH1PK6pSsJSSd+IQz3Od+hb7u1TmEBVj9D4Jg01p3FEVRAzRCCOyqRYRS6lok9bPDXotjLlcZO4OMwtIHuBXl+9KepjvpIgQBb8e4EVjZkNijLHrQ40CmqqvzjknUkpeoMxzE1ZRsAJzdfmoOzDpEYAYZ3upipkRX/o34tlfFwCrpDqOo4vEiMhNDOeELufYZSmlepFdPeX64WXScn6mYhabjg9mpwtEz2Sq8xkslfRmxnNWVbOlraa9++1n+xLtadtz+7xeBlvXnv9WSuE64oJyIy4ZoMUkb828PZSXj+ZqM7M2cfF5nauerg60oGRX+7/Ko7wwn6sH3dK2NTy0IIoXes3XNppfNzNQc4VFuwl+L8gAyZAMClQbOBmQgVVHWzNUO+s+/JsVrGqTOIqZu67bbrf7/X6323lcsSvvjsfj8XhydhwAZC486Bq3OrEkFy6jn20LOLw6z7bJNX3zrebo6IVxb72FlxL+Leiy1herUfZVhqblTjyjwUJAfRkCAW7pf/7G7eWr0R6HSO76+Nvffvf+/VsUZ4kF0TxxgrVX0sQAK7GrZMsvUdd1T09Pw3AUyU9PcDg8ffr06aeffvruu/sPHz7c3d0x8263Y2aRHGN89+G95JJScmroatnT6ZQwjKKjaPLAEgzGZoHVuqJZNA1JQzk959KnFMJJKRHHfotcSIAfhzQoCLABmd9uQiAHALh6eyuxbgH1s3C76KlFXItf2xtROZPFFb56a24d6K35tMD5GpL68kJeP9ytdnFZLiOi4doWvdwJIoLNGSWBQiARESlFBTQAsa+bQlBCAKTAQKRqBkR9LAB91zGZiJgWQRQFJXo8PY4lD+M4iBQwITJ3X+IAYOS1wYg8oaWZQeNRDNcEitdsyPp8P9uuEvGrj9XdvvX8AjksMANcLmRxiFdBq4X5NkTFmrzWfsGrz3DlnM0uLIRzIPFZfKsaE2csQ5VQnSutY89i+jn9Y513hbwqJV61crYLO2/EnDLkPMU5tYZ/X6xMiB0VDAFEEd0wqIhTjlMzBfCi3wonhG4KhiE0pGIGBtnMDEgEQNEyaEZTIttt9H6P9/uw6yFgMhEpwGESQRHJTADQiIAMoPgSAcDZYESMzIiUpRBCQEImBSQKxIzIrGgYMtBoURCKwePp/tPTm8en0/c/Pf7xzz//8pxyGZQoYIjEauN+uxmGwUDevfmm6+OPP/44luwoHsCPleYELVWFAGYCdhHnc0Z8k4UQvFhgWwmjPYsQuioQnqEWuYKm4hSCgrWGIhMQIiBDcK8qLblC5xoGKvmvGUHByJ08icgMAah6QpqBqqJhEWUuIhJjdC6qQrDzW2ZWrYKmkxA4mSL1QjFR/3XLXgiBKNSpVgtpBemGO8HZGJjdXDnlRoJQN7BKaDSHTFTlVqXrWYxg+sn9pZEuSZfv/8waMaLqZCB1m3CMMYbQDucn4i+klOhsj5mp4Fwk+Na53LqtF59tiZXwUvjBaQ/PF9wuofE1Yy2g5bOzXTMEFQtV+G8/eBDv65t9uSB069321xd6aHmX9svPTrWe+GefXM/wNVBxdSteeBheFPleOU+7FCzr/t96vdlhAYcBAHDvO0QCXHgizpcE/D8AQPXwWUVP8YVT+LQCMmCI7HTUCaVb/wBgv9/HGD1JTNWdpWF4OhxSSsMwDMPoCiwfcPZumHwQ3PIPK7S82IR1q3fELt2zvwgMYHUcay7ki7r67AQqVmzPF5t3PwuQLZu4mOorUdwL7QKtvXhBru7bAku0ACwl7e+2b9+9MRCV3BGFEP1hBPMoIzvHrVlVelZFbXU2jjF+/c0HVzeklGLHX339/t2Hr0VEc5GUt9vtw8PD8/PzMBzv7+/32zuXKl2b7JR0pJ4P42a3HywXZTTIaiCI5JnYTQ2KwpjlqEAkhCPH0hXgBEn1eciHVAqRIYqZAgKIx3qYCYAhBriG91omoaI7u0yy0m7jQsH3WcisOug1VlyACq6UFy/3vJhe++JrUOLVVmGsheeWwr4esS/Woqvw+M/Bc3X9VbMpNs1A2NwxWMVhFEGBVTV0EYHETIEA0BQELBt0d3eokqUAUQy9AByH4fHTp4+Ho4hklQJeUYAVUdGQwBQFzWcMYBOT6X5/q8telWjtqu1SSodX4Idfh+Je2MBFW8hK9d9WCdji8PYutBfkFgBfHRHc2WRu9WaZ2cQPzvPx1s6wjujiRrALncqFbq+ysHWKZiZ60YX32Fp1F8gxhLOLoA+wOMs6xSrO+heuqlQEBHOfSURTRPacVmqqYKgsKYFOocMhqkAWd4dXoMIECIKaUcYQoA/hbhPuNmHbWeTJ/xEJ2LkAPh8DIgKoiBFGF0fdd3Sy/xhgRkBWJFEVA0MkAiS8RxPQDLoB0chKcNf17zdheLP/7u39Nw/7//zh0/cfj59OeSjJALVXQgHLXbd98+a+lJJz7rqIiC4pVYsTIqaUkMjO+TN930Kt9u5OvqrFzJBmqR5ab9IJDkIIzJHnEn/nCg2BdbJ9GTTBNgpGNWyvUbOFSDTZ/NEMiABQvTD6BFHkIqW5hr6yQYiMU0ApAAAzp5Q8D0uRRHPWTc+gXfUiNVSP5roOvhsVlqQpy+FSqM38nKo7l+JFZQgAmKuTpXy+n4iTX+jsH3qO38NG3pt8QZtWLwLMPcPsLUDTKZxvENGkZ5UppyilkmFmAipe0CZp00RBdaKaOefIoWUNfUvwGg1+AbPU4XA+lRY11/XUpYnIlCwXzqtuh4PVKy83fDVr2CK49Z9th/CrCIBdSk313wXigpWQ3M5hQRXaL68ucK1Tm3u4vhuuYKgQstiKdT8tGFxM7HMK0Re+WextixZe7q3u23o+ZyCcn3wd/ExZXvzz5Dk400IyMJw9/eb/pj9tGpoMzMBzIhc9Z3JQE68xc3d3t9ls3AwIALvdztFmKeV4PA7DcDqdsnsRiJQi1WWgrpR5qnBYyXadAMA5X7nNyb6uNs/Qt97YW7vUMty32gVm+ELZ6uq1xRWVrzhT9SzqUHNHqkv8GZInXPSFOpdm/q+8++utew3UXUy1cbOCBoYRMXRhv9tst30Zx55ou4mgIppNCcAtgqSqBKqmoKBqtRzRWf1HFjtmRpupERHd3993XVRBxClgZFazWkppv99rETdQT/4vXVDVGPfZiEIwLA5nAgigZEoARNEAFcu0RMNPGUiVJWM6JrUh56KgxOq2JAScWESFG/u2Rk06pwBYoK+rD7/maFp00Y7b3otbCPzlU766lstxP4PrXmjt6Au8tyajLwzRIt61t+ELROeyNy8M7vuEYl6DJxhimU2H4s72ZEVUwVAYAJPoMaVjKcDIIRRJw5hPOX86nR6Ph8HAkJRRwAxAEFRVEdim8C8FYwNEJTfqyJUSwVf34bOg8kWtxVQX38N5k19GRNNkViqPCdfNurCK5ZwKzCobqQ8vwKDaGGZT0DmrrT81/2uIpip2jqpzDec551B1aqtAsoBAIgqV0Twv6fJDu9q6PEdJrV2lxlC9wEu1ODTnXJ+8JBuz3zCggZi5AkOIvPQ2CNhU9xoBfaN9Z7sIjIOWUpRUAhiZqI+giSHtuni/C/stb9gCCIFFpMDI00GiCiioJ5ZhIgMylRCjmZWSzIyQmAIRcRWTDBHZDbQIhmjbQEkKmVgAYylggaXf4b7nh83+7W7zzdu3f/rx8U8/fvrLx0/HU34KVNKpC/ju7UPfhVJS30cKXc4lpQTI211vZsQMiOMwNJqSKbmQH7m7GhL5WSRVBZwlCjsHCtaDZmbmqZZDWw7LLs8LANyJakoq7XCMYGaelDI6JHryBiJA1FmAcclpKvzhunFEzZOFc4pRnt0dG4u2uh+siKmCe2H5T26mq/DtX6oAzLRwThszrZfnMideTGwWDs/MRynFGv0TYItMdc57dL7SzNx1m9pzy2zVbGZVuva3mFl0ygczqVfmzL2llFxUwFTAS8ITBZnvRc3rQ0Ro5i7glY7C7Mu6JoHtDV/8eXHB61vzu0v6NEt6sKI9hOQCIVZPIedUVB0s18Pdal9KntcrWgzUSrY+vavtlbS2tqs4tH6zIBVwSRJgpRpcT/6V29UOOrnUNeO2x7/Y1TUBqzN85YifnU/775patxO4Sk1/Xas3FA3mALIVCUOtyTPPJzILXYjISFkLAKCZ+0TwnKeKI/d9v9vtttvtdrv1iqDuQz6O4+FwOB6P4zi6+UVVOXYw+79UlS0AEAW3pVfV0uSRbst05C+3xWkuwOzK83+jpDW32vqg6/Tan3AWCGM8F/KBmTtZYB61JVPb3pqrkPO3AmN4HWZYTOnqJvhU+8CbTdz0EWSMHSNilsTMxoBzTZ9pRDUD2G63cIkufOtcSkTEELjr9tUtGSyGEGLsj8fnv/zlL3/+8/elwLt3AxEZqu953/fM7F4nELeuchXN5mXdcKbsYDBV1grOTyLiyNHUICuUImDZEIiJWUuZ2AqoiNEQUbK2Z9ReeWjseBVLXwXd9SmvAaDFq+1j7XG8svPLqd4EpC+CsTWdggZUWmy5eGbRyQvQWDnndk++lI4sGQYPKjQxBKSAoGaihgjTT6lkCp0XqiACBMhqz8P4xx9+eLvbbGMYx/HT4flUyiia1CREAFCE2WQ0JbEB8FwYyh7E6BOYxa8ziph5zgXzAw0mXKzor6EsL7Mua5hsd74C9loHtyCOOPNvrUFvwWDginm7uvx2MtCopRqVpq1542pqcq6yGlEQMVS7nHdbx56MtnQueTdP6CJZTRVAq+spNAe5wJVmU40sM/MCbjUVSh20C6SqGUhdprHicSKe9L8udTK2EAekUgqgm1CIsmYVAzH2YhPIoAilY91G3G9pGzkQmAgoMkJAr26nYIqeclcBIxmjEZqxW5mmnOaegQBQEchAwBAwICAzILrBrZyKaKaIYROAIUtiFaLA1J3G0iFueLPrw8M23m/454+P/4oyDMNmu3+426uWcRyrcSxl6fv+/v7eDHPOXbf5ZJ8AlGiWE/AszM8KRZeUBGbdBjHPoiO2cEmeoZbZzAjYs1wi4pjljGqZYCbbk4svTX7eZ4o1czz+J10m5K1HXP0qB0mzqkPNYOaW3NznEDVFALoEeDg8DcPgQHg6nUpRT97g9fdERIpUWK92Th+u67oQoheE2G63PiuZE7QAQM45xHi+og1Uw+yWXYMJiSiE0Pd7bBT/9VbXtyo68BMBCibZ+UWYBOnJyFkFQimm4PeTYoz1EvmIbmOsGMQ717n0YquUqXjT17MmJ4hTiOCt1pJK9+ydqE5FZH7oYJVjqEmG6hWuAnlFVdYU7F4PaitZ9AV6dgsnYsNbtJRykZvqr2kv0PgW0V1F6PVhbZxh1qSrfWv6cJu6tf2fT/8yq2R7EO3M6x2/ldRkQdXaWa2n3fa82Ir1Pry8RReW6oaru23p8nzZ8+huqVADtNn6J9aU2UQ1EK0Q4korVdUiyEREkTnG2O+2+/1+s9ls91t/18N6n56e3Bh4Op2q34HfQdf1piKzKUzrTjJHV6FCW/LElpX6bizwVe3Woawtz3VWcOM+flFr6ft6PmdSMlOHzabzxYqIilS93suj1FvWAsb1ydjNrfibNG0cZNqBWuSDTRT9MB7R7mKcEsqJJBHZ7XYpi7C5KgM16wy9zhf5et1dyMyQLOfslmqzSHNS9xBC3+2HYTgcDj///MunT5+Y8cOHt3/4wx+GYYgx9ttNkXRKR0SMHPZ32wGBUQAUVRGFTA2KiZBnT4A5CAgMABVJQu9prommsoMAZESKSq4QBMTZ5ZUQiy0VZy05W5wdrmKMF3h1sbFwWXaofXGBNiveaLvFyvnMOXte3770ptSh20nC5drtUqJYwO0LmHbRbnXyqjkrem1AuDiyoKqGoOBqbK/8QMiQi/bk8qEZIhEV1VPKeTyN44kJSilqqMTFIOUxz3E5hqhmAOjlAVRdFCQ0MC0GYEUFpuo9dTKtyAQroLq1238NOn3Nnr++/4qy2hlWXsUaya3ll+Aa5EDDmsIlnNc/Kzc4xzeBzfkmfSdbXtE1nt5DrYkd3HiyMPepaozRzAyqg98cZ2UXhKTix/WFrG/58XsUHIVzDe5KCSqmqGYiNGMOsyGCaqqPuiOz9x2M4+jxYznnU8opa+DNptvsun6yxZZjsLGPtO1sE1VLQoqRYyQUsWFIgRGJuAumCMQxdjFERARDmg0+vpWRAyIaKJgRIwMrTnUuKHbTxLrICsamCACCJB0TEaClN7vuruv2ve57ue/5rsef73gs9G//9u+7bX+32z5++ng8DNvtTgzFbIesqqeTV89DItrv966cFhGPAHR44qbY2lmKQCaiarSZ5BOAGpVHTUyaH9A4jn5uE08Dk3NmtWuDZ9NRdUIVY8TZujgXaTARca0kzAIhTgn3is0esLMZ7VwfIufc972quEjseWhcdTGO4zAMc1dTElS6jKvMOU/hPYY2xQpSztmXE2NMKcHE2J0l2BijzWAsImql3rcYeQ5NnGbrjjc1eBca2k+zB2mNNdJa9QWnXDiqOgwnZuaAwzD4fnrZCXfP9FVvuljvTpX62Kt9zGKhmZWUfT9jHwJN2l+Z0+KbnKnygli27PiMbwAAQoxn/OJXeGUW0zlD1GwidlZ9uuPMDIiqZyTo716qnJYI9yqjv2b76oeKwioqbJ9v/5wJ/xUpDi6J5YJjWDMT3mTmX1uJt9WGtJ2331eU6AfUIvFb/VgbXbDeoGmYK6uwyz+hObt2krXzyrkuKFZr5W7/bc9rTbQWh7L4cOvX9t2WG2iHbld6sSWTthmrt5N/ewaYWUvlthBE7Pu+lJIBJqVSSv1mw8z39/e7u33Xdbvd7u7uzgiHYRiH5Ljx+fn5eDz6pXMKioiE7Do1FfP4G3d8qqoZAPAwaWx0AZ5Py8AQkYmroqeFllv72fquw4STodLHJYCY1aD99U9Xv7/VbCXG+9wWAnzbeStzOtJ2OlJrtNrsc1s7v1j7LODRpdq7Xu36ynljieB2Erur+2MrHtoaRUb7J8xSDdxAOHCJryaRA/XubjeejrsOSykBLMY4DIMBJSnMMYQAiJHDJkQDCd3++fn5hx9++Pjxo+qk/XRnHzPr+97JyjfffBMCq6qJSi7/+ec//ed//iei/eY3v/nNb37j6ehEs/u8+JyzlFSyRQiMjNpFNIUiYwBSNXeUQlA1Q6/ViGSgowJQBLQMBm4qIgMRO6OUyTBvk9/v9Qx2LVp7Pa5oj4xWTrmL5xd/tg+soRcuA9UqlLb4bTH6VTxZM/m1WN2/qdnC6xG0l6XCc9WVL1YEl+amxabVpJG1eT8LQXexh2tUHAK4Z4XPis7lQBlRich9GXXmCkIXiwpYIJxuNyEWQ0A6lEKmZqZIpqaGGDeTHGFmNmnh0RAMeEpWiADAGGq1q6xnKtDCQGVxF+d7a7G3FIj1XteDqAr9xUbNv0r9pv1Q+Zm2KwCggC32q7E/fl7+in8Jc+BfO9sKFTwpX84KxBbntMv071tFkvfZhjK5FaSy4j50NY04cp7goQ4MzQ0hoikgni4kV1WtGrgWQNtNaTfOh/chJp83PsdUTAuY7UXThjokIiKaghmRmRdnPh8VGeNc7YBjULAxeSg2EABCQSmaBjElUMK8i7jdUAyKOnYhEqIWSSagRB11YRO7zpANCZERghmqEJqaabepZszpyk15bXImJPRgLY8qVDOzAqTMikrTPbJAFANZMbQMQLtIkcI20q6nr9/uPp44EsfNdrvdHocRvBKjQuy2ZpKKsEz3oc2WeVarGREiIauVC7qI6tX8iBlmBbk2YS3OmE4KyJpiKAQCqt+4BGCzOqFqDTRMfLn3Uysx1NFnXeaZcamocIqA0zMCqgCTc1JVz4XjUutmsxmGwUHIS7Q7f4+I1UfL5thONxsSkvNqc+DfBITtQNAgFM8i65+nDC4xIuJmU+cP2tRXrIBtl3Yhx90tmnALp4y5pNH5npJGRATUUsqc/C0Ez/pD2HVd328CYXUob+mlC7QwM+sV0cQYa4KZup96iVIXGKRepPnDGcXYbAwEmOSNSSOL0K537scJEeIF83Sjnt6NduvXNbq/9XyLcK7SiavtFuFv9xwawgAzbJ83qgGDl8daE5vFil4559e0BZBDA7TtWOcvbx/Oep4v/LpuL5zUX9kW+0nViopzGXas9GiRNlOJJhosIq6E2u129w8P//AP/9h1HTK5WebnTx+fn5+fnp5gYatpqF67nAokcql8saZ07fSvkQcOTl3BEpBarNKu+m8LJ7+iLTAArOSixcPtZ5sDuccxV60/NVb9i37QO5+GaJGb/66XmRvWl2t9o/8HbN3VO77dkscQbgKijGSErkEDtsJeGSKdBtMUiLoQMvzkxSpDCO/evXvz5o3n5XYDdSnl06dP7qvssuLxafzpp5/GcXz//v233359f3/v2tIQqfotI05ACABSkkgxLWCGUBgQ0GCK6RI0oLkaFKACUAEEV42fK7QbOGdo2CigljvQbkWLM68+096s9Qm2j7VHf6u39ZRaet3yn3bpukJEt5Lyrilp/aVd6WLc9SSvIo0XVnEL67YOCPXJW+4AcO2qtlS7Hc5mQdQUa4jzGTXVcedX1AAQChhOijkEQHVfOmicIIymSG/zoQlQUWdvf0MyqDE7a9R3a12e2rAi+TaO/LzSa0qxeppVLK/fL+Bk8WK7yVdQ4urLit6tyccJlxDSYrP156vTWI+y/rflXqgJYqyrXg90rjrQombXNBBRzYhYfyr57B3n3184ql3qgeoyqp3QENoH2k4mQYXQzNAACMkIAAQU1HT2dGfAOWDPKYYLJAURmShEJiC2rDmTKpL1PW+3vN9wJDNNIkpIMKfvB6MiBsliHwGRMBAxAoGZKZpWIdkmUEBjIyJiJI93nBOfT8BlzAoIgGKiCuxVxDFwACtibB2Tl2lACrsd/Sa/3212w5iHVEzBzNKYUxHAMOaSc5bZ+uRX1Vfp0ikYeW4oVcXZ59PzphiEnLOrDerl9U3mee1mZztANRXKXEcI2yIKOLlQTnIjEYZY7WZ0meCL5yp/9XDbX72+POJkHkSsWH5SnIRAgJqz5jyabbSpE0hN6uq5CIQiTKWiJ1kOsDIfOecQzvmXZmi8wIx939epEkPXdW6crD7e8zwn7FCF6iroVlqyuN4+vcNpZDRXirt/rcFUaZCZY7fBwGBkCJOFU6csOKp1AhdZKHSukV3FYC3S7vACxcMlPj2jm0v80mooK0I6P98IjRPiCAEmdSC0B401LP2v48DWhLClx1fxYPvYZ1vb4ZrStyi1zme9vS+scbEDa2LcrmWNmtsnv2gf255rn+0EXr8/62kvfr31zdUh2jOCV2gKXjM9aDgAQEUFQFcVToEqtRTP7A43p1CmSfmKiF3fe3DgmNP4PA7DcBhOp9NpHMecsxsAZc7WzfOf2LAL9TMiSqNRXkiD4POC81uIqCpXIXBhMah3/6+8Vr+62YpDWn+oDS9do2HGWgBQU/YgojVKllvAgLO5lRpnk/V2/Y9s6/WukdX5Man7oKpqUgCAyETK83H4j//4j19++WW/7R/2OyOEII9jIqLvvvvu7du3u92uwh4ixhjHcYSfw9Pjx8dPJ2ba7/eSy4cPH77++uv9fl/zn0FBDjw7I2i1cqhq6DqmkTxRuZqhMCAzoefhQ0BPsARowAQqcwKC87UFATXCifn34HszAyB4UbBZwE97iBUbtwh8TcXwkgO+RWUWz6zfbc+oruvlm7XG1TMtuBBQ4QZ+s4bjXfx5yZzclIUWCHnh8QEzK3LLMvbCuhYjqpmBGExaYBfjzksGdbTq6WTRyEDMTAABjdAcIhAZjBAIVAAAQSc2wiYtJE91xefqVj6FOb6wntdn7/gtWrPAP+fPdv58C4QuIJNp8U3dtBZyrhLZekCtqaAdGmbLZ32xIsOFBbLt8+raW2BoGfLFoItdWoAlAASdQyDAT6cRBi6fXG5ZbRUQF2Dd4pH2xWokrT3ARS1E8KprBmiIbKoGYgWNweVvmDGLAQBkm9BYH0IMhAZkygaYc0DoiHqmjoBRwFTEE6KwGZihKZmhGYohAxAwcyRmRggTO8xJkoe9GqioGdBZazJtMZrZlCPOyLGoggIAAxIFMLSiFKOiERASi6hoMc1MtOs3upPD848fP34cjoMKlFKKQEqlBqg4UKqWnHMRVDNEBvTAP/dks+BSYmCOgRnNUCfTP8y5rc9agRlAsUomNDlYGsgcMAagboBCRC+gSZMvMgCckyOhIGIIk7TmAo8a0gSjTgYV0HWKZ6zqddS8kxpgM1W5AwFQkU5EXD/qAhUAqELlySZZkbhWk1dVBK0GtApdiHg6nXylTZ5lgBrp5wdKVu3sOZfZ+2LpptJ+rmjLR5z8oolKKeM4AsDxeNr2U+lFT5ADqPv9Hl10D12NIfStwDkssb1iPpDbNKaAYPSAxt5Dcepy1i+2qAFxSt7b/mTzxVyjjHrctsI+DSZdznPxp7fPYvbPthktXo9RWbeXR6yuNS2WX3fb/tpyD7oKlaxvtcRsPckFT1/x4YJONG992b6tV70A1PZ7bDS+i1ld3VW7xnAsiE07RO1n/e9iYz+7is+0Nh0U1g7bfC3TA1kn0W6qFWGWc356evrjn/7kEqCeVVTYdZ3M9LFqgrTx+1rPdl6jO1BciuWzbfACn6h5AYwr+9bujOdSBvgvzhHzUrsKz69kQG0WbNzRpJKhRW/rVm/HAmwWf54/rwDnhc5f2V5GNVdhtUJIYEQwMAFAZjY0cH8xppTSL798fH5Ov/n227/77bdkCqZf9VFVmWLXde5jwszb7f50OsXYqwIipwTjCNutvn+//f0//sZ9Z1SVCD1UPsloxUm8VY2qETKoIiMFd7xSAUOlgIhkWkgBAJZlWsgQAVBxitQFMwQyvrANkr9nCPNjV7Ci2VIqa/ewHuj6ZNvH1opXnMWq9Vv4otCoTbJTuI30Xm4LtAmXmG3xU52Sf65GG72WiWTxcNvPLeTzQlvQo4YWVDoIiGTO6trFWqjdZ0QAJKS5HpnijOaQUCd5bk4kCGB6UVyHz1fJABB4zlaIF/vWchF1JldJs17jB16gKTUT24IYXd3naV9W+9x2vgSARoaqH6pAWMl9VYrBql3FNueHsWZ7mS+UPzvb9abHpqFBZQlF7fTWEwillFqMbgFkZuekGf49EblLWHv9JsbiJlzihaEDGse2KXDLanSjqqLCpL5CQwREZABR4zApEeYVTLPNxYi4i10ficFMComyQkfUB9hG6oMimBQhsEAQux7Mxpy0aBcDh+2277abfZlKrLo7h5kKgkcI4gREioYAKsAAU+Gdyd4NyIToLjDMJmDJvbApMiOo5CIbDqJmOimNiTkSYuCQgokeDofnT88FiJmpGDMgE0kAmvKatiyI55KpsFXzWzJTE6FXSilqAoZV4PE56zkq8swpNvaos0aE0TPPkE7JRJAA1cxTT3neqFQyMyMR1JLNqr5AxyNldph2uS7NGfm8yn09z5yzexSIZjOZi91PRZkmxSeAT4/m9C2IGOaiC5P8jNImTa2rc2Pmgh1HxHEcHfxwtk77PlSTqaOmirKrRd2vdOO5ambmXj1uYK9e3dXVdtNFVUWyu7s7c29MIJNSrMx4rTBCVYFTTVRohohu83TqHjm4TD4MA86FN9TM61DTrCGrN7TOvMmnP2O6Bi0uEIRVY8gKcWm19sMSmdZ3X0Nf/3p27YUOzewWB72ec0shoMWBl1iubRUxwjXifQuzL6bRnlELt69d8GVbiJfrrtrhXmhXUfr6rVsk7ZWj1Cm11Oez7fJJf9GuBo9NWNq03VjHDzSX7UgpHYfT8XgqpQBT3/eImHMuIpZzkuI6IwUDM/dxAvZaOhMnNF2lSS24hJNpN+ysSaxzg/mOr2n/LZ7mvzpr6GfbC9NrW0uw4JLxutiWmUmqiieAKVvsZFOwpTtGxaj19bYr0+s6iNv8yWfagnFsv4Rr13mxOZvNxqmMJo2ITAAiZphGzbmoAiJEDjH0AcxAeLc5HA5jOpmZB0F4DGoIHQCAFVMwBTDY9Nvvvv3d24f7p8NzSmm73arqjz/+eDqdnD0hmhmHwF030Ytk8XhKuSgQYAAvPGamaKRes9PAC7UAgAIxGoC1aqM53MtmcCYAaBylLrbuV2/7Yv/XO38Vua1fvKWw+NKJXcVpLQPT0gs7V4GbJrk2WL2yVThfjLKORYdXa2cuB2hss+3sPP37PGq9yUQ0ObwDVhBQRE8mr6ioBghqiIamBq5mOC+8djmPQ/NZuEy4ZjWatb+eTLSewJf/ThLaNJv5gG4piO2WpbFpF9/cgKua72OhuViwEPWnq8zDC63FSC2vArOCpoWQ1mxQgdPfnaqIVjy7gLC6uIrpaBXhUPe0jtpCcLVK1RnTnITtgmLMhIFcV2AAakBTqBtLM+lpUHMB0TgiIQcmArCCUlAKAW66bhPCtuMuWICCoIBGRCmNgZGZQuSuCzEyM3s6TSJfnKipSUYVNAGLM4CImYGBqjIomheWBTW/MISEqtaBCAFOZeONjQxiEUCMRFhEpJggIjMAKaGpDkNKp2QGIQYAjspgEEMPlpOUGROpx9HhXNRYp1hVj9UP5vkww1lWacltAx/TcYcQXI1dDbzkoaWzRkHmm2BmoNZxCMTuP6JzTF0g9qSgOieLszkc1gUtM/NaETbn1SxD1ilq1szcHGfMOI4JEWEWWIjA03vWivCzy9CUF6de6apcmDaErcIbzpKbqrojKCKanY2liBi7rsp+Nhv6zCxGhkb8q0Jd7dBP4ZwgwcyZy5o4xOfwcP8QeYqr9PhAYogx1kwz1SMIJ03h0rhkM8fktkf3LEWDOvoUMDo/iYiIBM1xL/DFBAmNZgUaZNQSn/p8TULb3mtPXnt+5pIDWPC48IVtMe1FW3deCfACod8SCK8i3AvO8pKWtIR5gbjbrmzFEa6XcMauqzVeIQZfuHXrCdTv15DQTqad0tXzWnT4MqGqvV1dPsx7dfWBxQQ+AwNoazZ0TYb8S/e2SCm5Q7Z/C4R5zCGE6vzsvhUtwaI5Q4CteK/FxtZFWdP8HBc0u3a+nuoKhl8rxv/XtQWUtvO/CjMtDNcTwaZwdsuj2O1Eo/X1BamqM6mP/Qok80J7YatfuNGwuoCn0+njx4+kGSUFgC4yA5rhaUyq0Pe9qn76+BiZ3t7tH+52z8/PALDZbPq+R+Ccs4iVokTEFDab3bt3H0qGT58+MUUEHsfx4e5eVb//8Yc//vE/DofDfr9/++4BmdwbWVWBYRgm756Mu1RgLACIHDoxUgUVCHM+HptiCEmBDChgMZuC1D1Yx40RKmdNIgA0RV+uaKNa6F3gn8WWLrAxrABv8Xlx9RZn8VmowFXF85cbXhML2ymdyev8zVXNhbdKtV8gAYvV1VaNTqpLE9mtma8RuJm5NGeT5DDfNGj4g8ueCQ2AQNXTi6OnKUV0sDEEQ6CpcLcRoMIUwoqIAKQL8QxmBcQ8h3Btni+s65ZltaXF0AxqeuHM2GKnNXQt/lwfyrpdxeQ2531c/Hpr8usnr47VLqGOjpc8TC1CXu0Z9clKm9oRg9uX5gVfETQrw73YmsXFrvdqsZU1O2KFtDpd71lmS4s/T57Y1EQViJiIgoGylqLVToWIPDPrXehdVBPRYMoEMXKP1BFEso6gDxwCMhUOwGxFU+y6zbbbbbttF2MIYmUYjhy7KcjE/adBpoz6NJlApusCAABqhdD3DdE89M+8GF/QzIRMMIqZKGBg4hAJjJnIIpiKgqnJkMchDb98/PTj9z8cj0MXIoYexDIZA3qxBDMz8LSramZqBXHT5JTz4LqAiCEEAJ0y3c15QYtMGaQqkvKoszlDzJToVeesRMxcklSyjQBe6l7nApoMCIgJ1EWyEILCuYxJNW0h4jAMXdchYi0eqKo5ZzQFMCJGdO8ZqBdGNIMBM5px9SOtM8cpPDLO5s3JQ7XVuLhpTi/Db/xfLycNAGZTYXr/0/1tphsC5+iUOkoIVB+QueS0S2I10s8vy1S4bE7/47fr7v5e8uhZdrRkAOAwbwWiKIgKInFw0y7UgbyHyVd2XqbrjJlZ52IbsYuSSz1Ev7CEZJeKKGuMnPOezDhuwp6zSFl5rAbp2Aor1RfxkkxOyr5Xc64vE2xbSVZ2LanVK8dq25qnhEt82g5a9QIt9qv49Gr/tziGBdqsiLTVFLZPLslj88AL436Wwr3cydU+4XP80Asvroere/iaftZ93p6AzZB7EROLc0S02NlOZ2bIk+9osYkMjSm5yDE5k8da29cIJzygqtbUynN/b8+70VLG85yBW71E+8ACgFv2pf3m//S2gKgWDNbniJd8WMt11aVXHuWFSwRzNsWFWtk3TZsIiPr9gseq03g9Rnq5rftpUN/F2fmHnOekgiJgBiZkYIYhxL7f3u0fng+P4/Pz08enfb/p+61xJGIAyDmncQSgruv6fnt4PlLErus+vP+677aI/PHjxx9//Hn7u/en0+mHH3743//5X4YBfv/7h7//+7/f323N3ZxEkhR3tJkViBtEJWYwVCN3WAZCBZvygZkZeKEBAkDz+pxg07rNQM0d42bb4Hz05jGE1/OarK/tFYKyenKBeW7hE1vJVBXkFpDZvtLyxFfnvH7xgj5OnS+v5wLDL6bUQkt9YFZVX+lqfbMWS14Qsl8B5+txzYzmRCw4J3TEOdeTqoGB6RzGRWAMNpUqMa/8Q84fzOTrzDhPy57njABeOAjR47o9n+QL613MEy8tqIsVLQ4Lcap02KKRBZx8Ft8uENHVZ1rwW7SFAmIhKH4Rtj8TOEScmfa6UZON5PLJ+m5rYW6nGtr0te07OWdmpibeauI4AbFJDrnA++tVdV2smUucn5bLYnFQSkqpPRicJoPgaYvYSIkIkKahw+xFgojAvZQkZSDUQBqZdhw2TDaOBMhoTNAF5oCBFMm2m13fBe4CTmalQkRGoKqEri8hBGVmBmQ0C8FFQk9Z7v8SQFWImaLN0Y8AgFoCR2QUsbEIogSO3MU0lhj7GJkQGE0ka0qH0/jj999///2Px9O4e3ggIs0ZABD5+fkjcGDG4IUZRUVENDMHFzlmPsftdYqIItkLJ7jZ08zUBBDNzrmPXTIJIbhAslZRLG6j305tPFQREUQ1FwscY8wpV0B0mdBdJWvWE1s6+VSfchf2pnf7vj8NxSFSJKeUU0oGWusNwqVOpcqBahd+RCq5iUiEytg1wDn142/Vgr9mhmQuLSMiM3pQZYVwlwBztmoerBvisF3fdeETET25Tp4fm4q9NA6lamhgdF7XeSwf2gfCWS9e0yrIXDem67pRJvshzncKAbGGdDaEp1pFWkQA87/tXYaZAExMGy5RFRG5hXBRRfoqIP0N2xq1wSXCuUpZb7V2yTZrTBbDtQi3nUOLfNeTXBCMljLBQt5bCYQVGb5yFYuhF98snFLayQPcdEBcsxcvz2dxBFe/v/VifX5NmFuacmsC7Vs0f4O1SCsizZwTEdUYwsUczGvOMmtDUFNKFqjquaYHLtNit0t2+lS/r6oiBG7n3+wJqZ6zbkIDityUV2n7/z9LOrx6rezScr7+Fa6A0IWK6ozi2sOdOE8AAOZJOViRfIvWFsADADUbft3Jq/f6V2/Cyw/UQeuTX71/+O2333337YcAgiKmxYqoQuj6GHsE/umn/r8/Pj8+Pu53m6f9lu47EfEt2G63RCHn/Pj4GDiWUrzoV99vY4w5y9PT4b/9t5+cIrx7c7f97e7Dh3eeIQmZAExVgxWfuesxO3x7HFPXb4eSiqoaIxCRezoJTPIceaiYIliRSiOmvNIeFWaoKHDWdMzVOK5hFGzEuWu3YCkN1s9rPL9ACAvYuwqiV4+pmteqVmLu/CUX05aYzt9fwQMv99AuwZryjC+8BastuqDgjXx7q6vFr+eFWLXMz5LJrGQxm6wJDnUTVnTHeFXzGoVTMCGYv0lABuCOZIAIOOeinfh5F6DdTAeEhgBGLnzaLDNCK79dO741mVjT2VucLZ1TKl4c6C0kdpWWLSjIBSTfeDFPjP35CrREZD2N2/jqAh4qc3LxxNyV6pR2u5VCcRWf2b4SkMkT/pgIiJoJuXkXVUvRAogIxKDKiIRBLYFXNTBk5sDs5g5AQI7TYDLdNCICMKJJFASAPkYAcJZdS1EABOjnGh1mxkRiGoHEwBRVgUPcb7fDkEyUFANwz4EULYmUEvHPBBpYNz1vOuzYGFIE6O9CIOtC6TvomJmBGNBgxxgQOwxMEYgAWY1IzawgGaioiAIoM8RoRF0oImKKxIGAVMGTbJkZgYIaaEFVYtzEEGP88e4OSg4p94jvOgxURJ6SGt1tj6yFO8UNSBwGevqZH3/E//hlLN0DYn5KosNzUY+0TJHQ7xcBM0VEBN9FNTEhLwdyroRhw/+ft3/tluTGEUUxACQjMverXpJa3T1nrq+/+f//EtvL12vZx+N1zky3pCrVaz8yMyJIwB8QRCIYkVklzRxzSbsyIxkkCIIAQYDAdIqRAiWBkoVJCEJEgYhzIBZbBl4/b/gIEQ18JCIgIBTmMkxZw3iqUB7HrOeXiLEM8jKcCoGqdsxcCmgWApnv56QQEmKCGoQm58x5klLc1Ts1smWW0MWAc/hQ3vc3t/s7AkqEhRC7SBhFsIggxZIBKGKMXcJpmo6nUyLc7TQkaSKaY+Ecjy/H40vf95qyqZQigiGkEAIXoEAhhCwDi8QQUzWVixSiwGyXLWcfXQ3oMo5j3+9TCkQwTbrhAxFOKaq+Pcdl1XlBRJYu7fKUh9Oz4rkwUEAKvS6PoHbOcUDESPRyfC6F6zVIQgghBGS8v7+fpqnwVNcwaz+3u/10GiBrsLgAhaWw1NnUbeWsXlYLqrIAmjm0nnRAKYxQ4wYo70BEu/wwH4YAAhTVWhE0KoGIu6El9Zh5pWutGZxtu5WjefaETmVd8VZlf8YEpf5Hpk57NeeKgLR+1wJm1al4eOz1s235qu3L3lVnFVtx/nB3LcDmt5hluTfS5y4O/GKzEtb5msDhudbTFhARqRWNfhRrJFgbfl8Cl9N4+Jn1Ey3VqN50LW7f7xtfW1Brm0GgBs/S/cfshlJv30vJRURkHE8iBVkiloGLMKfUBwzAABIi0FhyKUUQ5iBVAQREXehJ7+wzqD8LCEaaAykzcy5ZQM8rCSHUMy6ZtUNErBcXG3wyTwELBKRgeo4q8LqmSP1SoO4CBASJ5gmVeRkCAAsHCrAidYSLhyPNRgRWK8XvNYveWqvAWA0GKJolCgoKk7AACwMjUeqm2dExMI8RIRFJmbouM0PJEBGwwO19f/fw5vPjy8cvLxMHBgyUNPZ0YA4oeZwQMVI4E5IoLDN29Ym6ypNbd2vWAauFjHpvaoPJaDruM/LNsBBrFGU+u0gGRMyFEZRVMgkjIwEFwsj8w+vbhDxN+f7uNg+nx+PX8XjYdf0JYA5CFqbH4yk9fkq3/T3e3NzciPA4Tl3HfUcEGCP1fXd4OYogBXr8+OXjxw/jOHx5+nL3EN69e/fw8BBD1/e9ZjOagLoQmbnghAhIUqAUyBzKbZDD1+fAY+AMGSl0mn5ZQAAJZSJgBk4AACgARSIhqdDPLlS9WQbOfGBGy3lD77HasC+P6k32a5q/p0PzDGroc+02ScuwC2sCsAoN8xHJnv6leiHVLbWtAIE5YcyGy7eGotCHRRhAo0xgBgE3ojX/X+MBvkXD67Jm2qJRwd3Dc22u52ZQUxAhIAIzVF4EqFl85igxAQAkCAQAAJb53mmkQIwyKW4RUQALA0iJfr515BT0IhJo38AgIiQCAMwXjfzbT7hKQ21/mZlmRsXyALeRdGse6P8ufrWfKueZJwLONaXWKctQf9HpOFbWMMwfkGEVukxbKgXCMp2gCkf1wmukbUqp5POQVQtQBEUXC0MNtwCASNH88ht0e+8ycWsPcTYmMJ+JsqFmcrE0Ng/zrPKapgUKzGkwEQiYMxQUka6LUhgKQC6naQIWAkaB/T4QhS7BvgsxlAjcEaYQg3BKYdf1XQopQIwUE6QQEkzneCEzVIJI4zgSJrPC1J30FFG4sDACsSByEWAEYGDsIsUuEUTgAlJGKeNYcEQECAGRgIkkYEDsCSckKcRFpjKd8vT56+kfXz+///x7zvuccxENmJmAdPuurrbAzFMeGOYJCoC57nrREjmKiLDze/TcTcB5yXNNeVm5wznErRazXNWWgZl1P0VEzNrvfOVDRBCCxIXxyiY6pqQpHIyfzmRWSaIsA4Eq5EYVnmnWgcyDIr1ZWpeoqmEpzMqPagylpg3U18dxrA9Jk1xC3SpZ2F/tpZSizmYiogY9tS8aisQFkoGa/VMXvPmLUs30iHUjaMSPqHHx8e7uzhBeSsl51GpjGRUr8/zqySvgMAzMrPxXRFA4pdT3vV9HWOUVurs69hex3R1uPmxW5VqmztnDYLs0lq4FKTYrfatfWRrH1r3/0dL0smZBl+obYu3JBrNaYripv9m4R0XDEv9Q8RYqj7cG/u8p9rp9bV5vKlwql/Dpnzdjv9LsptRYg73+rMVWq4h4C2ERjVNVmMVMT2Urm7PM914WRwCw3Ck2z6HuAzzkl1TlP1f+xPxeL82842oH1pC3h0R3QyKLQBBCKDV7rR5WCkMppSOMIb685Id7/PHN3ZvXD33f3+zvUrf/5/vPnx//DUFQaNn+OcOYYfv6cC5VuEK9l3ij/3DeHZ5Jur6y8p6wHhHx7u7uy5cvx64jgsevXw5Pj4QYET5++D1EFJFdf3Nzc/PmTZmm6d/+7d9++su7n3/++dWrV7vdnpmHYRjHLAjH47Dr90Tx06dPv/zyy8vL4eamf/3u7X/7336aI4uOo4laRDwej33f932f83g8PU/TRIm6Lk3DVEoR1vgMKES5cPWp2fD12OTA9uQKd10j+Tr+r79oS8z4ib1llNaA0ZBxwy3XXW+yQXTlEhI2x7hmTQp32LqyuCkf2/2Pe+435+vXPT1fXw5+vM3n9bvriWgGskbsNyd9zWSCC295CaRvrtY1wIYiXkXwuoSlhrE39Wl5m+nSML+/LMhv696KFrMne8xIPQg2qYc1VZva801a2etjvRlhTWhrc9oJZqa5D4QatFALEbG7hIaIPKsQABoov0LPpfi30BTQrQnjZcxfK8xqG5kTXApkLiwciChiBBIJXHhMkXa73a7rIT+GgH0HKUKQggAp0i6RZOxj2vWpjxFJQpAuhq5LaT4Z18MeREthrOmrpADohn4O+FEEmFFEgARVneaCGPq+E5QikqGgMBKEkGKkOE4YUUM5ZxJBjEQRoxSEglOGPOWvp/zr4+P/eH785fA85cjMUoOvsjAQSq72VbO9K8XUcL9EFFOo2khmxhijaNgbXBBNnQ0ymsCaDBCW3Mo0tOrDM1NYKaJ6kc9nWGkOp3pr0bteCJRUi55e+AVpYNika6dgyR6rSmO3EwEgpSRIeX54ToSgpK95L9SSZ2s+hLDb7UIIp9NJ3ZIBAGAiiurVIDVdir3CzKXMVjgDWyQoVqSetEm1b9gJTUqJKFrWCqPwEM42kBkAZADQeKTTNOWccx5tJTMWxNnNNYREGEWEhCwv9qzVE+12u5ubm6fHx7Ol14Xosu6aD5uy/Arj3mR2WH3xffvb1S5zzOYndFpNI5U34ZSV8N4s9spaxnzzlTWQlypvCsLrHclSbF+ZAl//EmCb0+qLIfaK7FzDLEuxvR7amflfjW3QtGCjtq/rafJ/G/PgN4tvUxmoYSCEoNxKqrOWlpyz6BX6mnYCSC3q7bbSIGwUwvq53ZjaW5sj9Tj0L6LTRX3hrfldt/CdWPKQYFUIYX2sgxsUUkeyERZwJdOZUHa77u7m9v/y19v7m/1+1+1SV0qJsRsZRQqh0ByRUBsvuBxag7o/Md4r5Qr92web980lf353Cdfvv38SkVcPD10XDy/PeRjv725KyX/96193+05E1Jnl5fn4/Pz8+++/n06njx8/Ho+n/X6/392ChnnoUs6cYvf169f/+I//+PT546tXD//yL//y+t3b2E1dl1QCEEYAmKZJ8qS3QnIR5gxCRFSyHMsgOU6jeuiAcMSgZ9+ACC5FWx2gO3T2+MHL+kbDoy4hdhPz63c92psKTQt+FhYkekGFgOXKRbcXbcisEUmbI21as0Y8YdThkH+y+de/2FDdGhhPh2uOuom3SxJqTdhrGK7jwU+Wn/3NIkvh0lRer/pNOL2VtWmkmc2mzmLNXt4YNKj26PUI94S0+dYVPMAah7hxhUQ/23psSMjTKqz0Rq7XHEopeplIrwTOnoOOJOJZvImQgKoT4CgJEQlrQnAgluy9AhZww4wgA5qZwbVj4KK7kNMWtbCDBCCGDEyMgsA8soQQKVCARHHf9fd3Nzc3N9PTEUACFoKCWCJIJIqIsQ+7Lu1Sl1IkLIii8RupWshDCIGqLyXJbrcLCIhYSiEQS2EMHFAEABGoZuTUgCggoOFkGEhipBiJUoqnAQoBYk6IgAWYNbdcRi4hT3w4Tr+/HH95fP51zL8DSJ6TEzDzJJPUS3G5lBg7tTsBBn/gDQCaa0tvrOmWJUYqZU4S4O+6zPhE1PpaoZqPzmRt6Nc7GzlnkVIJi8zGGEKwrIYa9fQwntRGR957BNkrSzJbFAsi5JyxHmBgjQ6qx+p+gyUWlTTnUkrs0m63Y8B8PI7jyAxIlHMmx6q0fnGmc0Qwk51fXcyszlTMLKWNegdAIpnqrrG2RoizSVC97ajmhW+Eiu8IALrd7qzsaU5NERF5fn5WPDMz4tljhAIRhWpvTCAkIshY1d2iMHRpxq0qljMGhPW+Ky9lDNZjJ1iFsYaVVFisxeYATz/r5W+9dL7RVMsQtaz7tYlrIPECzPPrBqpLUu16uVLZN7iGat3vZp1LpcXkt+xj1xvZ7LHBxlocGnpNksHSs+Wb72527WX/elKaKW5o4DoG1p2uu0ZE36SIbOpRRlQmjZR2EcFWum5/9bosI88Zdy7kNzMUebkmIlg9VV1vAKAnysB8DqumrXj9wUhivVK+iZ8/WqyLZtHhcitm45LlzrIyHCGSGmIEAABr0rIFlpi7Pr179+5vP//l5x+7EFBK4ZyFOUUCphhwDi0hDMSgGYlWV57+xHr/E2hpiL9d9cs7PPOgnR6FiD6Wxu3t7d///t/6Lj59fUwUX727v73ZHZ4ec84AnXp5AADcwf6mv729/fr4uRT+8uXL09PTfncbQoixu7m7LUUOL8fffvvt06dPRPjDDz/89NNP/c3++fCBmRFJg43pdS8VuOM4iQgFII3EkKdpyje3yRsTSARA/RvVpRxFwE864cJ/D9zphscPLFfoJiO9jvArFcDT56r9xlbjK2xKnDV4TcubbMez/bV4ahoXp7lh3ZFfGrXnn75NPwv+uX71lq71YH2DlzC8OTt+7W8OsDnNt0bWV0Iazr/uffOJB9tOuuFCELh1Ox5pfggNhtfSZG2laCjNBrt+t3myBun6KthAxSp0c/3AFizNnutPGuVxtgrWUkpRhz7bnDdt6oSSY9fzvSm1fAVACgBVLzqDa0E4gDTRISKaBcmN6rxmzGMNl3k/fLlIqfo/SpgDuGVgAgQERmFEpBAoZMDMMr663ec8cikEHIhSwD6GLmCf+j52fd/1XQwBkYoIA7JIQSQKoYtUo40gAiDOoS8BhTVW64y7ACKESECCgBgQAQMM4zGlkFKi1GuyhAl4moZ+NqAilIAxAFApnMeSJyxMX0/lt8eX//j69M/n48fj8CShG0cNPTJmzjmrJwfFyNNUhMOcvr3YLkedBpGNUmeGaHQQIobzwDAPJ+X1c9A8AKnHA3pNbqFn1jTxapmbZ0f1Ymb11VQtCwA0/4HQmfdVspvjYUZNYsQCLMBFT+NTSuLW+UyFMSpdijti2OTy1XMThFkVwspPz4Y+XRjTNORcTqeTnpVWjavSG7IIMjPqqUkt1r4SrA2KiAAiABCBWgL90Wn9ILKKInU8HmdNtRSWNkGiiKBdBNVXgui6JpfiAnkOCZDLFELQuyLM/Pz8rJ5Cfk259QiLzytJKUtz1nW29Z1lzS5bMJZPyoVw8/9VZRMenegGReK8Lq/w9DWW1uO6JFD/NPyXnuBqTq/3iCuFbd2sIUG/XhfATbP/VaVZ+A0YTXfnKTtT9WJDhjUXrogUnmpEKEE9pgEBZEYgRM0/BABy1hy3lbS1K5fUjVozEB2Cd003msSqEH4/AjfW9Qo/31/86rgkjr9ZaKtnlTiqaQBw36d3b97+y9//lk+/AIDkElFSiqkLmCUREgjVJL+ADAJIIkTCix3qf2awV8om27xSdMtg38BjT3wdiLG7u7t7/PL1n//85/3t/u3r13d3dw+3N+NpOB6PwzA8PDxoCsEQws3tjsIblYbjkF9eXr5+/coMt/d3h8NJGF5eXkRkv98jyePTF3h57vescdGYIcao4YtCCHkaY+xCQAAoPIlgjJBSZ6HlQphQkJm56C4C3bgMvcu7UluL8RJ+/pNztGZTa/68Sf+yVOnXdXQ4mztSe1FWaqF/Vy7rhP6VhoEAAK5cFr+JUlNUPGweDPvpOun+aXlkQ2jW4Gb7/usajU2zsOLq4BaVF2qb9bVs4vNP7CsuwcnOk3GNas/J12OxgXwT7Q3FIi5I0bXM5te9XgseqgaYhfteVeDdtJ43unNMZ1UICUnk7JRyXgPu6EtVAkTUpAW2uohI4KzXnhXFujRoGRhgk4ilbtgRVCbXDKmgnvECKIhEQZBAY9nc3aUplzLFQJAiRMI+pRRiohQJY8AQsEuBKIiwQJE8ElEijJFiDIgozCIl5wIJuhRVLWTUGLlF7XWOfTAiBkSKIaRAAVFKEY2BJohzEhZgCBKiBGAcM4wsJ8aXKf96HP79+fDr0+nTcToMMhWSPAlhkKQ3CROllHrQ4B81T7q6sGIMsZJXVflEg46mlKC6kurFUaMtEbEI6c0mRi2KphCqpW4YRqjiHBFBSNXG+UJqTWghMsfc66jzvIkI9KRAb7czs0ip04eI2HVdHkd1lTT9TXFO7kChjiXY+f04jpkNG5hXC17bt6utlgkUSVIX8qQLGy2mjqaUpNqjPrfLgaWgG9Q5JX2MhHiGqkZ/gd1up3c+Rc55OJi5ZNEnXKO91XbUv1QA5tZmDZNYs60AALOo0o48A6NBYne7XQo0juMwDKXGjxWZw8PoQvJLeKYEJd+VC3fDUBrJsWHZUzwvXUbXC7mZnbVAWnNJ/9YV2bOG/Eq1zV+NzDZ/3WTuTeXrLVwC2Br/Hsg3322gWleT1dQ3o/AwILbXURqZ17S2BqAR2GvA1tUuCc5msHCBJjfBW7eArpwD4y/kuiBJ5T/niyuIyMgWJ6Dpcd3OUmBf1LT9yalsbSjxgua52dp/vmyuWREhOMPTUP4aNkQUKboUEIERwgxgAKFZIBYhol3XxYA85TwcMudSSkAqpcjT03EqH3//jQAQBUkAWIRwCdj3j/qP4mcT7dbIxkxdOJhQioK6TTKM3d7edjGFEO7v7//y47u3b9++en3/+v4OBf77/+f/9euvv07T9NNPP6WUYpeIw83NDUIopUy7qes6ADocDsyQc356enp+4r6Hu7u7vu+HYTgMX6ffHmOMQhhjd3NzEyhpeIndbjfIkPOccwLmjVw5HJ9eDqdhGPRqDCNCtWnZQHC597vEzL8H4R6T1yusuzAK9K+bpQG2SNSmcrO7Ne/1+41LL1pNK81z+/X83I2padCLy80F1by4ZFmLFtYce82NPWbWGL6Ooua5lrUi3eChYdrXi8HsV5kflx2vrzHj5wW2CMm/1VRelyv4aYbmPSibiQCH8PUYryDB6sxjubxwmtm3HlNK+pMFZNbNM5e5C7J0r8s5EhHgM8zRsEkhBCRAlhpmRuzIk2zazoxPXBa4eTwI5iLI9QqjX2wGAa8iIpxxUWDOnUGkF5XmA8jCSIQEep8RgIGQAghPBJI6iqlPJJGgS7GPHUkIQYNBzuE0Q8RAMVLCuh0nmsdrLpQ2LlRRh5AZAAVRr0qwCq1AMaQAACBZWEAyEsYQY4xcJhRMEAIHGLkwDQVeGD+cht9P+Z+Px1+eXj4PPGSEnMJpQgggVIQFIVBIKZFmmU8RAACJarwWAGFmvXKAKLYwQ9CIl5OnLTOXMbOZxXyG92EYVIGxKTDXTarRMgE0SS2ISIyRaE6CJ+4a95INna/QaF/M2SucqEFEc7ZI7p7oFXg7y1eathOaaZoyz2cbpcw7ueDUFR0XyKziqkdrqHkXc35BjO7sE5BECoBQ5d+LxFZ6s8K8Ya2Lvk85swfeSF0hV2Pg2dk1dHppkFyp3FUAOlsFs5JJrDg8U2XlDn3fpy5Uf2AfqXVjH+m/NsylERuNOLnEv3TVNcvWM81qMl303oAhS3m8Bnj9oq/pecXm83VpxJU1vik/NgXGNwVb09omhJcAu16n6d23ucbP9aY25a4XRc2v6zPFBuH2wZbMFUQ1lObHshafAAu+BFenwEjuer/WSwghxEyZMCxCETowGg8F/9P5rLNBoEfCJmV6xwf/l3B7X4iIsOX7Clt+6VdQ9E0Ca0HampoaX6dtGQGYBc4XilV2EyKy7gcEBCAFAuAvXz5PwwFO/x5C2Hfp7u6OGTIPAqnv+xCBMtk5k4jwnBm13dLBH1QRr5c1NfpF4T94XAGAv1cpfosmi5b71InI3d1dDNhFenl54Ty+PH7tU/fp06fPn7+qt07qu9vb2xACk+R8PBwOzJxSf3d3F2MsAsz88vKix5VEECMBQAjhw5cviAhAt7e3XddhDCoXSinH4/Hx+WkYBnAGAaB+nGQc9YKQrvS67QZUl9FayBgAuEWkYm69U18v1TVXvIR/P7kepeYmA6vdfLP0cLnDbKBa8wFYUZE43WlT+myWZjnDklQuycFNnDSo8K017TRcy3NjXrk+rju6VKy1S0IWnULYtLnuYs3W1uUKqpvReaT58a4bbH7yCGyGsx77eiyexqSqRQ0vWnd3hcya0giR2ubGZkklgsiCI+krVUGAZSO6EM5yeX5SN6tnqPA8zGgABcQAKFBUQ7L5EBGxTafmnZxlW72kaM5v7upgqVnF/WmJB9SfNCxJJ1IN5HmeW9Rk4gIQmDlLHohSKSxpGA4hYpc0+zzTrM+ELnYaGVx36SIQYgwBu9gBAAVA0uiuMwZTiiEEAcg1MV0IgSgUYgQoxISIBMgckCjwdDyoz2QIUSQyc2HhMUNMFChCoIJjhsNUngW/ovzPl+dfDtM/n14+HXLJESWmgjwWvCGsREYphtQj4pgnRSxFVAdFEcnTNI5j3/d6guAitbDdeDSWbfY3ZlaTXXWUYo31ojkY6jAJAPRk0YxyiAhCeulcLxYqJM5HmUvJBQwGNDBkDsV5ju9iat7xeISqsBlVzOorga+pw1GXSIqzw5Xu4JgLLpNhGreilKZpEimIEQBY8jTVxQDGfWac+DUJAEQgNQqrnrhQvWVqfJaIRIplceQaMufl5UUtrub4Po8Cz16s3hCx282qoM3LbJwMosY8bYmrHVREuq7TOATTNE3DSW8Pzv6uMKNO7xDCporo1LZm4MZ91hxcfJmfzC/D5XKpKetuzTdhS7haa5u9XGe1l5pt+OkVodIAcL2y58iXwDO+J0sp8ieKsWi5ICC90Noc1Pq5r6zLvKl8HWAvsy+Bgcsd9rpBTzk2Zczsw6ave9yg9i0qQkQjaJVRRRhKnrgA6sEnA4AFNBYRorgehYK09LqZWZDnXf6VUlPttVhaaY+ba2dd1gP8zxSjW2vOFqn+7Mn7vIo1UDUI1jxj+qYgaOAvEUGUQAG4fP388cMvxx/vhoeHuzev3/39739HDFOWAvHu4d0/fvsYCIix1C2HbjAAZ87fDPxSucIr/hBCcKUKQktma9fEDZalMuXh/r7v4vHlCRFTSi8vz++ffmPmn376YbfbTSV/+fD169evKSXJpCHf9IRXpWFI3adPn0op+xuIAU+n09PT0263i5H+9V//T4fD4eXlJcZ40+9u7h6UAg+HQwiRKHz++uXx8VFE7u7u7u8fphyIuO/jxEkmFVKIiBrdXR2g8CrfaAjeI+E7GfIa1VeeG5fb5Jnr2WkgsR1II3TsoNkGtebVV9ipNdV43DQttD+5NdVg2FzKmxZsn8xL31EvkrzQ8bA1YDTCAhake01ZWj9fk0eDmevtwAo55w8VVHZXmYwGYLUw1wqqh6SRjA0/v1Sw8n8A0PNBg8TIaU2Qnjf+6bJuARdlDnLWDEHcMWVwmXJ1t68JIdRxUaDYMQsAhkAACHNYewDgaAuDmYsAIBPMe2INiE80e5EwMyALFP2qe1y9yzSbKeAsC+0OW8lzGAwF3VsO1dilHvBQV9ecOlsjjoCIiMZv6WJkZi4SYkCQPJUDjSL4RKfbXd/vO0oxBYgB+phSiFwYI2m/JJQ6CoRSpoGPzLzb7bqu41zsrIuI1LCDzolIRDjIzW6fuljGgUtJiRIh8NT1MSAiMAggEAJEQIQw5Ry6PhfImSekz6X8+9fH9+P4f/zy2+cMzxkBdpGSjChTSSVwQt3Zp5R2aRdj1BQUmluWiFQoQCC9auiUCk3iJ6oYmhNmiOfsvUospcwWuWr4Ej011Ab9qPu+zxOXUkqWGOdwLMsSbRLn44CIGsCz6zqvIIlYxBQspahep1NsJwF+ueacUx99DE/7kHMOCKnvETBX83VhzjkHIgvrota8w8sTUbXXSY4YdYUw62JAhSfnQetPU6Ya1AcRU+pTF2Zj43k1nq9oHo9HPaQxZ1FFXc65lDlThS0BNbRK9ak2SouRSimqlptFEYD7vheBcRwq3yERTCnd9DchBN0lsF4dPB3VfRREdJ3a+hKZM917nqvc65IAONdx/NeeGH70B8Z5m2J8R5aHWLAUP2u5aJsJqUduXJ2HwQG5KX7WDNe33EhN2NqIN+14SW+fvf6v1TYvzVv769Y07xA7m7O2dvZYXro/XZoaU/w8bM2+x+PZn6lrsSFY441I2xyX7ket2FseXZubgPXXSw/F5XtYk4eCbVGUzPfEgJGzp0lFRdVSGpet4sh15p6BEkaisJTiLHoWKQAAgmLteJx74G12GlOGoYvrWaoptNqONYgMQuejMRtdKUUc0s69Ly1mm/tFP0eeEnxhF6l/seJ44Y9kfW3OqU4NIKIwiMU3L4ISYsfMh5eXt28f8vEFRH7+6S83uzi9/H/vbvbTNH39/OXm5ibtboVJxdM0zf7+hZk0q2rFuMe8jdHzKxOL7Cwk4op/i6sQISJ2665B9SbFeh7lmyWiknMIAea7HTBN0+7uFgAC0el0IOBXr14Nx+MwDPv9PoTA5abrupfj4fn5+eH+dc4ZBHPOp9NpGAZE1KRN45ip8DAMt7e39/f3KrX7vh/H8ePvn0+Hg66RnB/HMf/rv4aQUiklpa7rUINslykL0E8//uXt27eZ4//4n/9OGKdpCqFngVLKNE1dH1FYA5XX7LIooPF+Fvj02F7zkDUH1qJXSDxLRJdutFnXUBMXr1kxLW8eiXOG8hRifz3fXksiv1Ksd+8c4Vtulr8O1vbfCwIT3uyCmSOdU9eKd+1ziuV6Fa+J0zfuQVK507RjaGyKXy9XyN6XdTse25vobcD28mi9HoUXEq1p1vflqcXq29f1wWtl0fNXkyxrJPsniPNeDhx7t9asO6vfCCl0osFLMdv2SBWCnorms7AlevUXDeRh22xdiXbRqenUMGOYJyK1GWiASZXykYLBcw4OiYjIAgiamH6xDus+RkQozNf6DBc6khij1CvI5DxFjaw9oM38GWtARAwJAECgZgCeYSsZBIQIQaKgCHDJMGA+QhYiOAUIGGOKIQhRQdQ87jpX4RwgJAAjABKgRbvRoqnqdE4LgOq6IQSJxCRTGVkyEVOIhXMp09tXrzlzzrmMBUCKqDG3gNAw8WHKL1yOgX59Pvy/f//4j8enz5OMkAB7yTScspwkMKX9zdPwJIyEESFkLpCr52c425R0JpkBUTSakG15bFN+qIIh8jklIGj+ccdM1cRHLuq6WQh1XpRxl5qeq2ETIkXVIVsAZqicpkk1ExHRY1FTCM+USoKCuBLYAKzundZpjHG/39/d3f36z19c77pOyGjJlrpeB7UrE16d0xerBywwZ8/ADcmqvOkhhUCZj0LOFktTJrO62jYrXyfFNHav/hkHqUumMJ8xzKwp5kMpmuS6MNvVphgopZS61M0TXc2JBthSAAvIgm2BO1GbP1e9Dhy/hrqfnhmHiSKNadR4h+pP6j0ubSNrFWPdl01K87wpS8L7xgG/stozG7nqQ3vlecNYbexXZAYsjzDWXdgYr49iExUGRlPHP9wEzLAnW1r0nyu+d1kq1ZeG1sy7EUOzsUAn2nl5eA/1wMgLC09U17HqCXt+AgD17BKQRTadXc8CHi4rz5vjXRO2RxfWfepMVSWDCyZu/gLr+W0W8mbXftRXasJqVa4XS4PbTfoXAEBGtQSCuuCTiAjIMAxd14UYhbnr4l/+8tO//svfUsTjlxdEyeP05cuX5+cD0peB6T9++2xQ6W1/AEChwpNlmv6e4pF8heyvkOt3FkT0p2AmhhCRWXI+Z8Hd77qbm10IOA0D5xwT7dIOUYZheP/b708vz8zcdR0APB2OMUYeym63S6lnZr1XkVLq97u///3vfd/vdjuAOc/1NE39fv/Lf/zz48evAHB72weK79+/n6Yp9bs3b95UUMP9/SsAKDn/+uv72/u301SYRRgLFKmCjyWryyhopllGQGFpWfd3Imf9UPct6JRAz1obXgpb0uE6GH9iThve3miVHp41GOvleWWlwIog/eK6zlu+E/metW62Rlv5DzeLVJ3fADP8yIVgPH+oNPi8jq5LgvWSXN5EgjUFAJY77Q9B6+Gx5x45sJxT/+ESXcGWHGwG4sEAAN3/rqXJld1Ow+EREeqRfbCE5DUx/WzwOQ9VBEBM3zvjwk0AEWmcFY8R+7A+vAlOA/QHQtYgVE6qTzTFBTOIqLF2HoOI6FyKACKBQGGADAeUPBZMOXYh9V1EEgpF8PZmBwVAOIYQkAkLSAkYSoXNjDmw1M41jIcIhBBDwJAiA3ApABJioEga8vH5cBTBACGERLGPQjlznopMIpKesvzj+PLr6fjvXx9/fXx6HjlDQggkKBnLlKeJCyamMpWMiCEGDPNBKQYyMkJEBgjuvEoDZnpvJR2OJi5HRMCA7jJuDBHgrJ+bxtJ1nZ99rHn/UuzQpVP3OqFNq11d86Q28awQhjjfcC3uymIkYAAR6Ha74LQ7nl0lZRZL7sQ9hLOJsu6W1BUZLco21fXQd8mbXyrdnpNhonNmrjoRImLfdb5HhWHKk2Gs1OQQWiHnSQT98A17Sp/WjnatJzGOO2Q9qbHDIUQJoaNqqp04x5h04DF2XdqllAKE0+mUcy48+QPR9a76rBouTzpF5qj6sBK9zdeGWctyg97wKZyTZ3o+sDj/W8B2+Yx2Xay7hsyu128qy4VN4RW5aL00/Kph6+uBNMj8ZkebNdf11wK4QSwsR70JxubD/3zxMF/qwoN0XpiX4Wlo47rA2xSoiAjLKG3quzj/5NCpHiizxwIi6hU4nVm7IbYC1ZDvJ8KBx4iz+w0AoPq3zDcTFq8DCCJQCLIMaWO0x9IihIga7dbjFrbm4hIF4irFHFTleYOqcbEuDLE6ttmgWi2EAARCAJxSQinTNASCV3f3Dw93kvPu7dtAdDqdypTHcSycgXa73Y0uVcKAFIksAexGksMVws9fm+m4QmZnRF1eoLK1c3VPKsKNFdddjfk7pJRCwGE4BoRSSiDoUpen8cOHD7/++uuXL88A8Pr1w2538+Xp8dPH435P/+2vf3t4eOi6zu7zE1Hqu77vVbKUMuUsRJBS2N/07979eDiccs53dw+vXr0iouPxuNvtxnGs57NYfUloOL08fn0+HgdbNSKiNgcpxXIzi+26FhFHv6tcWBRgw2lW9Fk8OTyLUwMaxigrnwhwxH+d7TelYZue519ZNVbTrzVcbsa2cpOA4cUL02YgfozrrhuENETueUjT+3WWuznYhm9/J0o3SzNN139qeL59zjWd2IKxryjkO8ZybdQNVOv217/61jzxr7vY7NGf/a3f9VMsIojCfI5jv4mrBsLG8kw0ZwXgencMAPI4aVryWSGEhSSQc0zRyumUK2NVLiuKN+DglYOT/2lhuapBOGzw8yZbb3mgiLEnEVGrIwIgaqRwCiRIRXjETgBToTAJDgWBEENPgSlJGSNQjDEFEB4BOCBSSnbv0TvaxdjVgcye9MxQiiBDAQFAAgIMgAEIMMFQhCgWiIidcBAOp2kaB9nx7jmX/3g8/NvXr78Mx9+H00smTLvpZYIxY2aCSEAYaIIySjnPE6GoB2o1xioazzZS1UwkgxABIJJu/JmZhSkAEmoklRR7O42LxFWRriIWEeeoRGiasFnD1OUAq35oJ9beKqUPjXBt+kopgIyYrEER0XyEuqRLKX1VCNXqbUZFZpYa9E97GYbhcAgqGlUT1gyYREQUkCilRIgKjMFwtg2qNdLpgVB3d+iiCqkHjpHlPOAsIc4vKti2dE3xa9iQ3zB5nmUmXIDzMYrNQr17eS5E1HWdGipDSHqEk3NWAW9pDHVzwMy5uvbNYmipRzUspmFYS17TSpFGTm+WefiLytuy3LOC5sP1xv3XK6/YyYV/S1YnmusK7VgubwFliVv/a/PWJbF05ev6lbVIWD9fv9LUvyKMr5fNWVtjD90OabOdTfG5hsqeeIu6H1HjsrtsoZIugCqDcD4YaSuLO81EMGllkOiINjy+rEFjNX8IpZcqI6KsXHxxa6d4iT6vl0t1/LrYpHk/WXKBqpXn6MmtYx/IiBE7jcZc8lSEKUDJU5nG+/0tEcZQ/UEYJwkZn4RRGBmZcN6MIGLEeCFb8cWRXqK3NX/7oyui7QvPn7Gep+s82n0ZIrq9u6GAkXC360DKNE2Hw+HDhw/v3z+/ft397W//8sMPPwDF07/9G9Hj7e3d2zfv7u/v9dJ+TEHvE2bWS+YT8+wtBgAqvG53D29f/fj08ng8DLe3+aeffnp4eNDeX07HaZrGMR+Px8PhIIw559CFaSwiGAIJpcJ1F4SzxzWcNV3C2UPrXPBbuoFH75oXWVnP0SUe0iyE9SpYg+dfvAQnu8tgHhLf3TfhvALG5teqY7dwrt9ai+/Nlr3I20SgdWSnupdga0qzwbPt3CWL1jfLJvtaY9U+rBlgU5qJbobWfL5OFf7hukdj/use10Nr9hvrOZVVTM1m4pSTbMKwJhU7vzALxLo7w/M8iuoBas/9kM8uo8CsN8MJFthhZoaaKg0C8yTu0Mj7aGFNam/AsUv8DY4obQKk6h4GepapaqQWfB+ZWbMAMjMjE2EQ3ffDUYg5YAEZuchx1jB34eVwIi67GHaYQiCgSFBCQAx9CLPaoNqypWufMeKsUgCAQogIiITELEMBAgRKp3FEQQYcxpwzM4TMkAsNh+HXLx//+2+/fBiPcrsvtJvymF/G4WWMORBQjAAYCmIhAAyBAiIKAi1XXQhBNMlxJSBVVVSN8Z6fqi91XScWWpOSLWOUSQOoiPO2r6PW0C/Fk07Oc1RSb8WyiRbn1OTX4bxpi5jSTlUs1WQQMYT5/oymSbB5l6qn5ZxLyQBQkIxS9fkwDHpFMJcMRAAYY+z7PoQ01rAuenlyAKmRb2awpQ52Dn5TiiqSiBExpJT0NCRnTikggvZ4Zq8QCk/eURuWseObtUdEpijKMvMEV49T/66IIIqvrNMqBLvdrt4EmCc9D9k2AQbD7KFUCiIWnQ4B5FnlhcuSxvMvPx0N69FCznyx5s0VV9uHar5rf/SzicPtlr9D0fLtrxv//ha0kL9/6/h1w8Q3hdDmuK4IEl8Hl1PWVF6/4vG5CcOlLcV3FlrfQXV62p8ojXxqpCA4rPr6JrcuKfa4UggbN+Y1GAJz+kEzDRIRQDEY7ADUr4sG1Et4aKSsffVBevzUCM/Rue2aivJkIlr3jIiy/Nq0BhdW/RVsnFsWG/p3qZpa9KiL6j4DlakBAsA4jigcQ+hS7GJCxD4FhpIzl5xvYgoUc5meHl9++/UDAIkwCwgUZiHQ5E7E37JQNQu8WUFWZ5PziFiinov4aRbm+oMeYWP1LYKz7yjnnB8fv3A+dV2HiM+PT09fvupZ508/3f30088///xzSv3hNFTvDRzHUY0hIsJFJr2KUXKMkTkbTyilqC365ubu55/jzePNr+9/+/Txi96zSCkycx9T3/eqgr68vPR9f3//kGUXwgsAyHxlBgtL4YkIxDgTOmRW9Gwyru+ZEf1rl7XMuQadjdrY7CbaPc7P+1XX/kx4F7bRTWUt7O7QwhYjta4vcexL41XTxZWtgvXVNLvm2NcR7lnNJYZ/SXxcafP7J/o7G7zeS8vzLxwK2Ge3fWrlr0kWWE6ouC0NXHAJuDTLlzBxRTQ0HxqO4V80eqblZdqmZd+mxgReT7cRczOQNY1pbEXEOfkCIoJDYzy/MCumwrLIKMDMgtVGCcgsZkL01TyuvZ1QlqjZxKM9R0SGDCKIAbCgpiOSIggiCMAF1LOeMAACSJEMVIh4kiJ5mEz5Jui6HimEVAEhlDm3hLEe80sEgGmaUDPCYexSdw5VEuc0D8yZyzRlQQTB8FJALYSHws+n6TCOh9N0Oo2/PD8/HY+fX4YMIU2JWfi5lKFESF2XYugKyDDl0zRBDN2uDxAMJ2Q3RAEQgJwvK1TrXIrBaN3wzzUKn2pihOdptSHbK1pCCADiQxIpGMfjESqH1bCZKp+sNXQF3MIjotSFvu99GBXEc0gS7QVKEaed+oMK/apxU0yr1AmNMabYCRKG0Pd9jB1XfVLmD+fFo/JS7wGKsMKDOIemEEaiqPNbigzDoOIq59lubpo2l/bmcRVmi/VmwM/x1peBVU2Ke2+ZysvIFDwdbNd1QhBCVA/SUub8FTxxnTaoK/Higmok67lHre9cFDxIdkcLVvzXN3ipuB7bpd2w7LXo/Wab9vWKoPKc0TBszz2W7ImBBEue65lyQ+cNML6jTWCs2hoPl4bp4bzELdfV1pB8D4avl4aKNoG88sQ30ogoWSl4HkV+aowB6pmdF3tQ2c5aiDbroplTX8tBIDVey+KwUhOj2wv2kx3ewYqEYEVdAKC8BdyC0vpdihhTSslOzQ6HQ12M7bqTrQ2ToaKZArm6t/M/+V7W84WIFzZRWtkrXaECjSIwTVMKEon6lFIKKByQcpEUEwAdT+OnT+8/fv76MpTfPz1ViQciKMICQBIAuWUoF8rm6l6vXFspcJlifU1Y0ZV9kCWdUE35ZYSnMRuenj+PpzRNU0DKOZdp2u12b968effuXdftRPB4OI25pNQHSk/Ph8+fv8YY7+/vleZ5YpaiDiN25MWcS5FSqIDsu77r+q7rnp5ePn7+/Zd//vb23fDu3RtdOBRmd5uU0rt3737++ed//vqcUgdCdWURESIhcxYfKnYupN5ShtvrRNWgDhyZXcG5F4uXXoc1TW4dAvrK3y9i1vz50nD8Q3Dk5CFHRKR2v968eEUIXofcfvJjbyh8k/XZXnGzuzXqmijTtk/7z4iVNTfzcG5yMC+FrY7H3lpINUelnvmLnC3el5APqynYZIkeeL8oTEI1dGt7raYdX9xmeHtHoQu2gRyXfs7r7cF6jGaZm6WqEzTnkETADMyArIoTukih4tpyI9keoTdD+dUCSwOLkZf/HEJAEkZh1gR9ghhk3soCY8UUCWq0QykcdxlgEJaJRfhwkgBHHqZwfx/6LjNPORNgihwIEASr+Uvh0WguIBxDhzXEiKKMYiAi0fjZICIyFSiFJ2HGInE3Qng6TR++HD4+Hr4epq9Ph6eXw8ebFACp6wMTH2Q6jmGCLu0wdUxYCCYu45QZShAgLiwRAJEq9SBiCHHzDIyEApjiZ7ilGk7Dnks166HetnT5Fa3BaTrfheOa8yDnXPLcpk0lVIXB92j8BW2XhuyxV8lX7DqiOogGmFeMPqT5PuS8okxFxHqpr5QSY9zd7Lu0G3PJlbS6rjscDpYjpCE2Fs2CyDWJBhk9FxbmrLFOp6nkzHWkYgu7ialjz3VEAAsbmlPaz0F6DDMpRb+aAKCeQbCquyJic9p1HaOUwhqUdZpKnlhEIkV07DKEEBCISLMdmkpJdWEZf+F6mwUReeXvISvx6dkcOIbVvCV1Y2pIcKznosiBq4JwXRq+3EiLpnhVAVewNeNaDMQ1jm774tu5DqRnwQbweoy+5RXSNkBqWmsmqEEpwDdkwJUh/CfHtQbbl/UQ8OqGck1+m69s0oYuRxAAPKdZ0yYMe5q4xt7iGiFKRL0fV5BvaQU2iZfG4kftK9hhnPmh3PQ7oWD8Vm2DfmgNEV6Zjk1gLq01z9m+Zz1ukhmCcnNCOCuqoqhEZYYkUkSKujxoBC0WHIf89fHrL7++f3k57O/evvvxL59f/gMRCCLMmpUKpgJ0DbY1DaDb/10avl9KF5Xdq/xHaappbcZkJQz11vnhzcN+v395eUGBu7u7VF14AGAccwgBBHmcEEK320/T9Pvvv4vINJWbm92cHQoYA+33ewA9FgTmWH2FUCYEgFJ4mqbhOCCKQBmG41/+8hciikgnka6LKd13XXc8HktZ2OjqXz+0tVr4h0tDh7ZYsIbFhws6wBrzHr2Xam6ugkuvaDHLzBra6y/CBW7QjHeTCcsWK17jYROATYR4OeVX6PeP6Ppzv5SMXWzW/2a5Mn2bv/qu1/D4t76HhBrmsNlX83kTq7LczKzpfN2yFtv+2V+sO17aura6iX99w8N51tHcxqAZ8pq6GgrxZBnPLyAikYAQznYqZ81sD948IWrrMUaCOVC4+R8TkYWx9nOJWxsvZRYYimQBBg1oCVAA9MCRZxWUJISAEbXnkPYAmSEX5CKYWY6nUxkQxpHvbnG/4y7ALmnWeELR4KKa3UH4HGSl73uFRPWiqXBkiTFOx5FFBAoiFOEx51FKAZjk9PWUf/n49Z8fn15GxrQH6qC/Pd0gjgWHqR8KicRJsBCFUIRehuFYpowlhJD6LiGQ8KTeQUAMUERIBF2McjDOFUCtbrvdTuqRg+4qFP8aQFZEM9exmYP7NKPX7Glq7zqdTgCkFjnTBnPOfXer7VTjFWkuPq9aNDE8Z2sbz3mTRKSU0nWdhnE3jUX/TtNEjUg+67ShoSttre/7+/v7vts/H47PczDVjIjTNJ2OR4PHFDNEZBc3SX/VKGdS1TwNtDOOmUJavcsqbo1iTYICQErnrYBfXVSjjFq2iXmBxVlzNl2d5nwtQc2AunawmhaJcJrm9BI5c86ssU/1icazDiGkMOs/s1OrGjYBEc9xuj3/IiINWheXYbhtaJfiL3v+MiMERLY2UtrRpTw5a/8c+wm2yvq553fr4inKhsbuosgl3t006zcrtrjW/j8Np14DvGbE9vybQzbxsGaSVmEdTh1WeF4P9vvL+vQdVwLye7rwmDeceCQ0CLF2/FGrTQdWIQortIvUoGhKpL7BlUpmvWjQ5JlBuRGch7O1X/E8qhmvYcmjS1k61lQ0qRbdsk8s1RUgq8fgTLe4gGTG5EpXvARJO5Bl8Tytqe8n6Mqk6wjnk0EACyojMB+hKqZ5mgDCruu6rpN8il33+OXLr7/+cnw5lFLevP3h3Y9/5dD/9//xC5EIEIageQ+YmYtAd5GuNofmaQOWxHMJRX+ouE4rZtwGnYhYEKsP8M3NjQi/efP67u52Gseu6yTPaB/HcRimENLpNPz6y/v379+/HE+73W4cxqenJ0Qket11XYxxyqOepY7jSEQh4JxfKgQiurm5Y84vLy/DMBLh3d0dIP/+++8551evXr169Wq32/3www8hpHEc379/fxx6S1UvRKgeulCM/80Ti6ry4iaSrzOx9WIxJlCv6pznwttzYHXwAasFxX/k7u4lIgF3pA4rTnKJxa0b33xir39PO1fYZrP0mg8eLV46XDrz5Qt3CC/17k8N9JX/qhW0Lg1IaxpoBKJ/6xIHboA3IhShGvdOX8T6mRBBhN1zfXFD1oDbL32nHPTeWLCi57U0bIin/rUwFgvaJpcou4HBD98DPF+hcoH0tVokCVx4ziVGgJg0tBSSHuQIAERCENaoZwWRRBAAgUldSTUkW56DWyBz0L04EjIPk5ggB5e+Q9M8mJjXE8Scs3BEiDEAoIgURgAsgDJxIYkQogiVLJA5xC6EPpcnRJQAEqIQjoihcJRyivuvz3w/5Psdvp7wfor7GGMIN92HEEIsMVACgiIZAXY3+5wziJQsKpJFZg+i/x5/DCHEuAOkLDSW8vwyHIeRAT9+/vTpSxB8HbqAJFMpQuXh63Q8Hk+n0yhy6nb9fU8Ui8jLy1cG6GPYh10pZRonjF3c9cAHQIrUpapIIKIwMnNMdrEkMzOSYAxdOJv+ueRxmLPJ77t+miYuDJQRkYALFwEolJhHDa25tvhJjZvCBRApxT6XEwDwdNYeU0oBMUQUYRZGwBociIu6REJMKRGpk4wl4pv3rHpAcBYJkMZxLCWHEIigFNELiV3XZ4WSAgBMhY/DGLteCCeWp+fDKU05Z84jUE5BELALUoIwFymZQ0dILFlKjJQIAmsQl4wTC1BAoDyJ6n3MchoHIgqJEM5BLBCD3pEUKKVe/CNSC+p8mTDnLgQUkZzn6PAhzOa7ruvUY9aijIoIFxCRlJKeOGhG+67rVMFDiESIQAgIUoZTmcqzqe4kmDoE4HF6RkSWCQru+r6PqZQyDWMS1FNkncTKZYRrhnqbYhRIFLoQM8/pNCwYj9KSKdVmcZ0JQ0RDuQIiq7WZMMY0liwgxZwcSDcQgKHeNeUm4uuZf7E49bumAtdYkIg423DWjF7DJXN7eGylYXm4vGm9WdmLUn3RDj4azcryBNhbXhK0G+XL4aTXuiUsxFV74mbc38sMe96IjStlXdME51qGydIl0r+4BhVWuG3qW0e8vIfZDN8EpEfUWqI3ndZFSiIinEUACYhCjCQiIWApgIgpzTvRcZjypCscEELERBQQYsRQ5ngzBQEYZ8JOFEvNA0E1JxsCAjOICGewNC0sIkKYCBEEJM9HcupporkEur63BGsikos8vRxVFdRFVIoUJoEYQtSkcCJCc64eFhEIncewVG222a4Z/UylQK1gWJ0PGc9pFgXmHdC2H6nFJV+QkwgBZEwkjJARAJETgiAKhJKh6zrJJY8lvtq/nJ5fvQrD9Dgc0v/t//p/L9Pwt59/fvv27evXb7qb+98/PYHk0ylDlxE6iaEL/TiOEhAu3MWCrd2YHwsAyDm2EOWiBIal1gFADJ3w6EkUttavIRMAQBIgixQATcssCARCMaRpwjLxbndbeHh+eX54uLl72L/e3Y9PJeccIu5T5MDDMIDkgPTm1dvDy/Ef//7P//j3/xCBv/745ocffhin4zAM03T48Pvp+eXp5uZmt9slovE0xJCIiAtH6pEIGGSQU/jt5eXl6fnr6fTl4fXuX//1548fPwpPzy9PAgwodw8Pu10vIgDh3bvXWXb9nob/mb88n6ZpyBBi7IahpC4SCM3eYCKgnlFimTtt1Xue2UwHbDEEcbqN5wBaXwOSe0puiLn54PkAOCamlhbPtTyXXpOKvejBto4adWjNtXyzTQvqdAAAdqt2llwABLNbevOKbB3k6a81GF6rCDViq8F2M0eXZo2W/vaujoVKBhEWsdOouihEd6F6gh8AwC4f6c8Goc1Ig09zYYPlFTO2q8gsDMJSb7jMhxN1QgHaJx5FcpbX6FPaMhMwcIHKtFX7CygEIiIBwR90I+KYi3f4AscfNkmd3dhFRO+1a0Z1H6dHv1K9oOSbCiEgRjXwKMvyvzJzPRNEEchTTeCs6xR0v6eHL5GICk/N60LAzJQiAIgFcSwiIkSuY6L5hpIUZi5ENNNxpYYiQgAlkg6dcE5IyCAMLjw0zUhlAMQzBRhRespo3GMUW0sCBRFFSUA7jQNRTxJEBGYB0FxSJYAACHMp+Ut+3kfMaRqncDgcbgL1MaWAu/0xpdR1XQgTEcXYpSQhZN2XlsKlzBnh5s3xfSgC0+k0DPnpZXh6fnl+GY7DlPp+zBkDCfM4jhNPIoIkGjfl5uaGMAKhiAzDMJUCMO/UBQrMSeGRaw4iI7WZqWmewACVx6ksz0ZV5nRkiPWxXqTmqZeatVw5rWcKxoK5+osKo4ikbt45oTNnEVFKiatvlefg3krJLsaaLcI5PGZdtDRnC1SVQMyePDcCZL3YwpumwSLf6K9UvUmNC9him6bJ7n9qsxQDAIzjaK/PC7UW604jxYuIzIlBuaFYItKxUC1Rg6gS9X1vF4GMDdm8IAkgAyIFQAQKgEVIYxVV/siSC+fT6QRVmSEMonTOknOOMQaMVKOwcjXGiojGkpk5oAAiBvVDcKzNPqA7nbGvdlNUGzGClFXGBcOYn/TVtcoNRQJWRbZsEeg4wLrgSofZbPlPlzUAlyBZ1/ect5ETzdemES8vfWUTaU1l2NozNXJxDcZ/pniQmidwQYH8c12v8YCr8+kGRc0HuKQq1GUVQsBsQZ7OJ+hV0LWbJzP7I4E5aVvQLFjuGqchY6AYYwyd3gpWzqALWURKmcZxHPM0B4+R5POaaiaM5gJPHRQqY1qjyxZj8xycodXvOGmZ3fuPlu+c2ZxzIOy6Luf8j3/84/HzL6eXL8chiMjf//73f/nb3/qUUr9jSjGG3W7XjQemkEUKlwLz3ojlvHH8PuDmf685g35PM0sdYEH2iIhU+xJVpXPOiESIekLRdZGITqfTNO26Pu12u8KD7g1EZBxHkPD8/Pzh/e8fPnxAhJ9+eve3v/3t7v72eHxRqXc4HJ6fn9+/f9Y4au/evVP54jdUwnI6HUqZRIogxEi3t/uUfhrH1799eJ/z+Pvv75+Pz69fv76/v9/tO+YI9PD18clujDNzgYVp15H0f9pzdFmM2/vZXB94Nfj/X1Suc3VYLe3mIO88BatFZ3/XgmzNzJu31mJiXb95sikdmr48I/W8Yg1P5WMXnM/PmwcvoRaZBfRxw58vibztXlalEfd+Lrzw3dx16N5GHxKRWQBlWdM/Ofd1QYx+k3jaSV+y37NYr0jw2PNPmodrjDVS0kZt764pqqEQ/woRRa/1Ii5HgoA1t4S+X5bNefcMT77N/K3IpV0tm0TvcrkEASBi1NuMgkBOtykkIkyMgihBUHIpnMsoXFIqMp1yTsAJoQsUQ9hz3/ehK0G1wb7vo0QQ01J020F6I7wUGPOXnPPpOD0fjoeXYRizYCIKmY+ZYZhOp3HIeYIAMUa946eA5cJ5yCIiGGKM05Qt3/e8vUAUmVP2+XSZREQYAUBgPvyuTo+zZ5FV09Y0yOlZ1QGQqgeKSM5n/Uo3Q376xB3LCRQWnqbzSRvWQwWb6GYphhCYS0Oy+lUb10ihzngFSDMklobBL2mq7tS2CYsxTtOgo9YY3HVcs3uV2ZYVqlJyxW0N1ioLvIno+cWCGqtuacdR522W7UQVLV0XTPFTPbmmiJijE5WaON7eUgSqz6phleply+KKzZQOP1DkOV6ZAEDXdZFSybnkcxezOm3LSua5oaoQNhzEizHPT1XN9ot9nl8/rYS6Bn2bnhGreLDn6/VuX/3DRm7BBangm5ILalIDD6yGv/nEF3Keit+svPniujTS0bO+piNjlX4pbTJJa8TPJlzYczQ4+Z7STDE4btAIKlgiH74PabQVDGbdVAMDXDgOWDfVyE57xRvHGn6lypi1baM2BojUcG8R55ms/OHu7q7ruv1+r4njRCRzYS6SxU7HxqyhlQszBzp7NZdSGET59DQNBobPDLw+nfFjb0hF5BtRNP/XFdRbJ6iJE8bff//9MUzTifv97u2bVw8PD8q0KZZc5rO2uktALKIWWjsvW7e/ZmvzhxrPQzY2eAAAJAvf4Cvwb/bFwkjVeAIACMKi1BNjJEBmDqhX/fl0Oh2PQ0rp7n6PFMfpqA7DQ5lSSs/Ph69fvz49DX0Pr9+8evX6IQREvAEAC6E0DMM0TTmPX758ijGm1OsppKbhBYCbm5sQwuFwIJpD1Op5NxA+Pj5+/fr1+eujCpfb29uUUt8nZlYzQAhBaoDreZg4/72O7SsLvOHhckEv8p8vTeVmudT1NznPJXmx2YKJGC8i12wHnXJyaaSXhrMJ8JXRNUt7DYyv6WE2vn2pl0aCmABaQwtnbPh2ROoVhgrJGUi/F2rW1PdIWIPfT4RvZPM5uCiyxr2JiFb3LOzvprSVparmO4XV5F7kwB6BjdwXkQsY8LQHSwm+2VRT4RLR8taVRa0/b7n7vtddqUBR75pASBRzzkiaAz4gnmNdTmLwYZ14xHOY3YXRT+WrzegVpnB+chbIoEGuzoNBAJ8iSpUrUfMUIGi8EiwajDQEBCwMHWBASIhJKAh++Mq7ncRYRHIIpe+BaCqljGPNMVCNMKpTxe4p5zyNJWeeCohgiAKEzDCVPAzTOI1Emuedcs7qqjRORdUVIkp9TCllZr2+lcucHELrV/PaYo91djZw65lryAGuLoUAoFcL9Ka4R4u2qSeX3pWCl9GWoZ5/S80n4U351lpxmej98hORnEe/Qyo1aaGmvCwuoQLN8Qf0euSsLkodvNKbrUy7bKM9EtVM9EQqLIdhGMfRyJJrdBZKoHVmmZei5KyhRHFBWmf6FMOXW/ghBOYzOzB74H5/q569+orp2NM0NiZc3WB23c5mxBaezHFNcylFRb7tKTVAuVUrOte5zMfDGgeInemyLBQ5WyyNoGXQKBugLpreBis12JR1ahONiOLCi83YEGERqpd69dKJnFOBQ0M/nlrWkqapdr2CrwZLfuc/b0oID8mV4hWVhkcZHnxf3u3W11xbzDxs4PihR45VWM+grMSPFVidZDdo+eaoN4u1vO56LQibMX5Pj9ayVfYcpmm8YTtWgarl7YzAakUWmd3+G2rU+8MiojoDCWhMLxQQ4JmABRoLGrPKn/kGhF55IDpzBi2v7h/0AwDknIfhdDwOGiNKj6XMJyoghEAgrAfvrLllmebuF2cfBKDp7dmSHGxiezXSedvRoO7P0cNm+5eKHt4p3iBA13Wv7m7oYby9+6Hv+8Ph8OXTpz6l3c3NJPhyLON4yrlgCkSkIp2ZS6n+6iswGnZnHxBmbyWfoOPKaJvFe2XU2iMzq1csogjMWS2B0XI7AQgLszBhSJGen5+naRB4tdvPIcRCCDFK6kKMsd91t3cUYxQp03Qk6m1R6y0DEVER//j4yDwHG1OHlK7rQgjHQ3k5HL5+/YoIKQWREmPa3exTH3e7brfrvnz58vLy9PLytNvtdrvd/jY/PT0xM1EEohCAZrlb7PRQHDI97hpOdQVXl7jclfKnadL36xfClQZxZfFbg9EssTWTb168In3W2Ph+CdXU9LJvDXzTiO+XV7H3TPrDhkb0bYYsIuBODnjlDLwWYb6RBl2wtRKNljYX6Rryc31ZuNouALswF+vRbeK2GU4zHZvVFPRSM9t5qQ3L+fXw2PMGCR5CL/r9E3+/kQL4jvy2xHdHlrfcLEvO43mJgmonXMPnx+yhtMpcs3hfKduoxJrvdt5h6qkeGSYFBKEAIDKq14YQigRBYYgSAAKemIcpE5UUqQ8hACHLy1i6knU/j5hjLGoPRMRxVF+gEkj92hEg3sqYcxGBkPou0nGcDi+HqXDf9xRC6rrYBUpqAxyY+en5oMhR7YWBmHkYBr2Mh4hUZhOQ6hWst1BodrPUKS9cQgiqS5sRKedRFS1T22R2DZ0dCO1ERGQ+ZiaicRxNgWEXGAaXLtGi57U1UTs5z+/1gvQWrRDQ3DXFqY6lBiXSBv2dOiICIDPZzaQiWTcCXrNVfamU2dBnVkFEVJ9Yo2apG7WYElsur3hOF2F0a8bBWcCf77PJOaTvfKn1HHMIURQ5XbezKDXaEddgOdqgVtMIoopSVRqtKcNSmXMw1rtGagpGZ/oTdT4XsMg9uajrORHxyndCCqtjfYAtFxfY4Fx+CTeccYYKsdRXqEbmN16v78hsmTw3BUs+i39KqdiEU6Bl2d9k4pe6uFIuSTLrxbP1TQ62+RMs9T38g6qaH2AjV8Cd8zX92vno93cEW1i9jt71i3+iowZOj0N/VLEcddRwJgAAoPnwHH2e+zG01Gi0Ahhm3+8QAtIEIqhqJAqcL6uIMXMBNsueiMQY9/t+t9v1fW8O/MAyDMPj46N6RhjnqW8x6u16GyCS/kuAhFAClyKcGYEF3GE/zv/h1vqqqPhebe1PlD/anWhgPZCcM/X07t27v//8OlEJ8Q6Ajy/PXaJ91zPQ6eVUihAR6LEySuXMOUpqDH1+69MAYIjCLYc3X+ajMfc2rk4leHXfdRavRa86L0YqAFEzowh0kQBYSk7p5u3rV+X09fPn58LjDz++DhFUZjHrB3l4uCvl3el0Ohyej6fd/qaTsaSUiNI0TSpTYqQYd10XVdwrXZUyDUNBxNPpdDwex3F88+b+hx9+UH8ZBWy/389RCT59+vr166fDgPiV4WXKRWQ2BhJRoDBNk+UedEz1zxwq2QrdZM5rtt+4pcC3GMglqvONeCbp+/qeBeJH6tvxOyLfeOPpsIbBHhpCzGRirW0CvIbfg+d5cgPwepjWqSfp5utyCNtqD54tfguMQbXIecnY7Cj8GNdbyvWvjWiDC8VvF9G16x27DMKcMy4smf8FPLOZsk3I/X5p0TsAOrrymDGTg59c/7rUrbJHl9HD2adv6dBrf02eeopCxHg8vaBLT4eImQuyMmh2MNn+cnHJ2PqgmhDcoJxttXpmh8jKgxBEufblxL76b/0f7bqgiMz3PICoGnYCYAEgmPfZwCiIgAFCZBlzAdQgpBkAs4ig7DgXYl0hMI5FJItICGliKYULQAjzEEIIEpImJwgsiFIYhQJBoJBCCEIy5XI6HEXmK2fm7BFCwJDijCgwH4/oNGoi0isHNgPz9KgOU+mklKmUUsqkdkvdlMCsNpzJqLqDkndhUgcSvQEozqLl82Ip6iwCmDcneja0SVUxztZFm3eLqO5XHdUIn2tGICIChfAc1EQ/TNN0Op1yzuqqqAZb/Qs1uqatAVU4mRkF9Z4PkCEEzv4MNCswRr1qclZtcI5BgiyigV4jEdi1jRAwxqjZh82AbFPptVO/3kxbtpMX/avmTf3JgsqKiF7vnScCKRDFGHU9qg4O9ZYmuyD1s2u32mBX8hRtqyRnztIsXmOank0olye7YG4yDIGZBefN0DyQIoKgJ+VN17AQNovF3nBk6/qiAnPhQNc3ZeTqf91ubVXWEF6v3yyK9a9rONd1ZLmJ8bPTuAhuwmMwiNt1eYR///DXkH/zJy8L1nBeaucSeLgUqB5vbs0uTlJFSHn4Gtu+ndlmCEregWiO0xgCxboiEEDvIwBAQNSL8+eFRqDXArXEGLsuqkk/53w6ncZxHE+TnlvZ5fwQAlZzlZ3B2XCIav5PRKCAIgRchBGBZq96EiAERE1gjwvEbpKTH7WtI7+glJlszsv3lCu0YaWUEmPEeacYb29vHx4eAkwsKYRwt9sRcIxxmEq/G+84/Mf7T/EwTKwsaO6lgXOTNtbANDhBRNhSEUkAoTUerldiM2QKAMv3dB8FQBpRAhEREELYpXhzc5PK4eXl6fHx8ea2e/XqAYA147GSx36/L2UqZTqenr8+Ur+LAXcmJogsqhzvdjvlBuN0miNOM5dSuv5ut+9E5OHh4eHhQe9oHI7PoHHIE71+89Dv0u3d/vHx8XA4vP/0jBRENxlV6yulxERQF5cYGlfm6DXHbkpzENnMl+d1fmuxnsHvoTFfGi7U8H97CCt+u9mUf8XEsW/EatopPPgIBVtBntYSaj3SSzJrLeY24d/kujoEi6+mX223tnT1tEakgb+ZNYCWJLTZurU4P29G17R2aSBbIG3IHbwQvK25/ernqCFCa2pThK1hM+TzynV/htNtrqx3v+/yPaIIryZdP6tFwfZj+quZGQ0na2psBmKv4LJ4HPrWzjvRWmYgojnwzZMx64GMG1MF7k6UTZ6OB90JyiYRbBKxfyIAjHr2iwBlPgIkFEJGhkAB5gSFpRQmpBiQoiBiSBpXWZA0dwRLSXxS6d51HSLpIa7epZymSRg5Tgp8CCGlPnRynHgcpxA4UAKg0PW7lEDdNU96h3AU4K7r+j51uAOAIlIESGazkt7eU60JKZp25G/0GfZF5lNRLrnik5k558kcMitJzbt51Si6rouxKzWVvGkm6p7KNcO7Qo7udqK5E5hWDy59CC3DOdjKr7clz1RrZk9zEJWqEUl1ScXqr2WjIKLCsz4510EUKaVMw6BRmCJWhVC7ZufxyHOozzlyDDFMJRsHnN2AVculea163SwGpUwyU576j1X1eCbCUgozMkMFb76lacZPcOIhV6NtKeV4fGlOzhQbuiGAOcxUsPVCNYosEQWKEAgRkc+m15RSAFQ/ommaqAbonN8S0Ig4UqOD2tyZ0DJrrT30fEGqudJmXH/TNIYFFqwZcQ76NK96Ae+O2zCdZsmvBcYfLY3EutLX+smlfj3YsrWraEpzQmwMt+HXDR9vBcNVfnhJHIKTJUZam4r0n0Dy2tJoPcIKq5sPr5dGkHsOs275OiQiwgwijER1PwMQKnECwfkOHoqwGQ5VTlduoDd1PfIZkQJFY3cxhZSSut7FGNVrQ/PIa0L5YRgEIwAEhNgl7TSXoqy+3nxHjemtnaCMaMNkFkLVM4dhynw2jzECYCAAkGlzT2BE4olWFjbS/7+WSpCV9+qMS8kFgIuos0iZAKjfJeDzpQMEoAAIAcu1BAN+vOAGvrlBvFIudXGJjZCgoMZdZJmDLhIAsEoc0eTGHAlA+HR46TgDyvF4enp6eni477qUcyYkAIya6FhucxmfX/Lx+PLLL//++uGvXdeFiESUurPXqEghCjEQhZ0GoqsiNd/d3TJz3/ddl0KgGAODXaqHrktdl/b73d3d7el0Cn1+en55Hp6MQs4iABDkPFhxieqbVSmXFSppNYeN+puMwlf4E/yq6XTNgZuW112sx+gfetkNK4lpv34TLbDiaV4OriXOWkCA22lsAumFoNQtpc9k1rwCS46qj30Lfk7rB9+IrNHetIkXFN21eG0E6CZOfOX1oKQec9ivCy39W0e0HiGb0+oxs6Zqz4FtLLR1NqG8zkttqVtTX7MRxGuC9HhbU+NagPrZ9GYbfRJ3ux3XOJPMbL8WEBEU4PlcspoQuW5kG3w1K+T8wcEty8/NZMyfqz3QIhCjCk8KUAoQIhNryFFEIEQERBSAwqIRclV71fyHKUSAqBlxUZiEulDGMhLERBGAczlBAYqcQgIUjBCjAEiWQiAR6eVQSpGYer0MNgxDHnmcSoj48vIyjqeU0s3NTYiz9jVMExF15/ifBDWvxjRNeeLYzfNhEy+ikAtz3c8JAMCUp3r5e9ZJNIRmCAEx2GGAqmHM3Pf7rut0No1w7UokCFDAEIJlNMKz+6gAIFEQodNptJmy2VG1VlRLmyZzhSIidXkFzZZeC1aTd6kJJ7BatEDmY4xzoDPJBrAqhDGeE47FGBGDiOj1CahnDTZArr6yooKzsOo8CoDmXEopqfuorRmjNwvop6qdpvpoqumo/Qrn6pUaarhXtRjoECa3RrwCJlXXIqLdbmcqNzgW3/fRvhIGNRMjSL1nSDFGEqiOQ3PGZ205AAJCM0BwZ2MNF3AnC+fAhrg8KfCHUqzXIusKDc7t6iyAl3IOVly7KbjSu9b7iaa+rZrmw7rZK/1eKl4ANGLpSoMNa4ZNzlY/+52BOOHhJ+6bqGv4uJX11uE7Br1RqF49b+YFlqx7jSjYwsa6XEImr1z1fPtrwSx1xyPnJL/GHAhxAZXyWHal8KSnLWcEgiARIiCSiGiMx7u7u9vbWwqojnxqDzwejy8vT/UyYTXEhcTMGQqIBCRABIJ5q02EQR1AzrtAAnW10oGUiKHrQ9ftfh++BATB2SkGIMgs2NpZaBYCLrfCIv+Vdwjhuymq8hNRX3llmAHKbn+DXBBAM/J1XUKgl8M05YEnZsGYgGICgIk3aM/PJrq9sqdVncXzWwitHfBbI1qvowWSRUSdp1BpVQAE1dGDhfOJgCnKOJ6+fP6IOA7TeDyO3fPTD9PY7zskQiIRiCEBAAYKHfb79PXx49enR5D+3bt3d/tXKvI0SEyoeYDnvtAceZKIdF33fHg5DqdhGi22maVzqPs6vLu7u7u76293//Pf/+P952dNHwV6dFipd0k8qp//MQZiNvD1TG3W9wTs1/sfVeyvl2ZpbG6Ur7y7XmIGp0sP8I1mbcsOSzw3PK2RCOAYXdP1pQF62WFszct6X+fCEsNGitX26wHogk7OLTTNeoRsNrguBKhpe1AApWaZ4nPAEhHRkGAIiNJeJdChsXORtf2w7pOz23V7/FzySGr4zCbym7KeBd+afjjLrFVy+VKDJp5NFA512xbRWqDu2aRiQFMDrgfld57+p2g7+5zVajTnjlPhihAAZwsMM1vc5EaumMWG6m0uLaUUqPeyjC5ts97cPYN5Ux61HeVEZc5PKwxSGEphRui6LvUdqN9jh5NexAIQUnuKRHWKkBIgpC6UQtMwIMWu6zocI2HOOQ8nRAxqWZwK5zGEECgRMCLGlBARmCVEYRimMkwveokRcc7UWC8FihACBRaZpqnGPgkMFrtyHqPycKNa1Rz6PiGiMl+RmXbV7zfEOWY389kNNaXeUO3JZRznSJ7eyqcM6xz4dGKREQC0WYVhHE8W3KWUor6KzKwqkPmXWoGq+9lqKaXMKXSnWYMlItXEdB8gbr+LNdIMAAgUM9YBwDiOMRIAH4+DSi/ddZUiiNh1nVS1UCOy9H0PtTUFI+dMgHp3QtGrqmzsksVVsyzwaqyvbmOBiDQCqtGqwZnzaNgwJdDHF9W1pzobOLZri0uqu6y+YlqWvWuMaRxHdfFFRBBk4WmaptPAzCmlSCmEEACtEQ2HAYjCogHEcVYmt50G7TYpOOuWcfCFai0iInrEYPVRL+2KkJp2Ya6JNceGmjE8+2MfAmfFg7zC2fzkuYoxu/XebqbtZRBXw4MpGI0E8h3JSu56UNcizf9qXtBW06bSXvTCxhi6/Wo9kqVLck68tPKwaJDp4bR+cbnhaPr6ZkFEC9u7iSX7YLPj0eKLuAJL0XWp6+aDH/t6XvjsWZc0YxLX+Maa+VMfqMwtpaCQ7idm1wnAnDNLTgEzi8aR0jO+m5ubrut++uFnNQAOw3A8DafTSV1DZY5dPKFAIAqkabhm9wREjBREJZQIEWIgzqXrdiIiOCcO3e/3yIcYpUtd13W3t7f3969u9ncU0//j//h/vv/9C1CIkViCEI1TJoLqpr2Yd5txW6pn/IhQ/cnLa2a2PIRXpmM9O80qQEJbkvqIhVmKXuQLhEioV8EB4Pb2dsyZBJgzQokpIkrJGYXv9jeIX1V7PB6PSZNoLZmDLyGE5snMbUAaJwWpGrgfbB0GrPkVLFeTPZnpfBIAFAAKIKD8kBACFDidTpHCPnXHl0cQ+N9/+m8//fA6HL5M+TbnMXV4PB5jJLXvlVII2Y7hHh4e+l2IkZ4+H0v+nZl//PHH29vb0+lUjxelHtrPl7V0k5Z5GqYTIvZ9tUhnjbXWhRBElIsWRIR6wKr3Ryjq4XUquR4XiujpvxKZbsV1SXkC2KQZWanQDcGYo9C6hYZt+heNqm0K2DnRGecxCNciwB9Jozu4aWbcgwGevJdOoV5zBscSjRc1Txr5JTVouXFX7yPmAfMir5HjHp/rifCuPfqrHhCwC6MQXNARPxYTuHr7qcEMzucRWlNRqruXc2t2sr82SDaCAC4oTuj2QoaHTf5WgTsDeR5XYb9+DSelFHAdeQyzy2PpWWhDnAbtJuQAUBye/evN6BSqgFiWxyXmW6eYbPg5OqdQI2l/gcjozb119sjzyId6U0k93RBR9/BRrWqN3rnghoJ2ux0RAdXSojhiEcv+Srp6hBmQ9K78OQb0EiPqa9dMDLj9KDOzJjMU9S8BYUBEjIFEijCVgjFQiiPnIoWBEQlkjn+RYQDNgwSCpe9STF1PRCmFKWeKfaQ0TyozMaKe9FAECgwIgnOmRpFRaLaL6DkugN6ExAAkarpjwCBAggVDYABhprrh8PQUQxcDhBSxujIiWkSWOXHAjBD1X7WLkcCe0xllOIWQum6OB2OUJCIg1KU07yn1aLOoN2aZxrFMWaRYmJa6EmaFsKgFj0XT+KaUAhJQEBGCmtRYkwrmUjCjgBQG0Isv9RimsEqUGQ/MYx7EaYNGqRppDWA2ReqGSXmJ+grPbsGsgQAZBco0GydZyikfRSTSOcsn1KMXoHPsY102DIAh7FKKMXbnpIW2ZFQJzHw2SM4LTXmOUak48aar0bHUupRgDiuoxMkFQggInGKq7Fh5lB4KYEizsl1K4SKMZyliDMs08xgjLuSIdbqwCoJjW14YNAzaV7OvJlD1u8xuA9UWD+cjw3nsy6bWcmUmjAtn/7K1Y1hUvnSy6CQHLOWrXHjl+k8NYN9TLrXmB8WruwdXOhKnCJkQWvdiD6835T/4Vy7NxZWynspLJ/rrwZp0/86+fDse7AYGUeMeIoUkUmbJWuZ39Xi46zrOyAwBS4GitJ1ixJ28vLzs9rv7+/u724ebm5tuN8eJ+fjh8+l0en5+PhwO0zRCpTTVcBAXLpE550xoS5UAKQAwEFEX4lDy4eV5mvLtbtf3PQkEQGD4y7vXP/30U9d1CCGlFFKXGVPESDJJEc7qDztTDsIae83uxGTu5oSea/6xGfhjxZOuisJ6mhYJWAqihEYjlfnOmtAsA9vgyX4If9SCZOTi/zZwfnNEAGDBFEAIkLkewBORVBFDAe5u929e3d/f3T686nPOOZ8AOaWYUkAMIoAQqoQBPaWM6ZYIxsOXUsrvHz6NQ37z9tV+v08p5ZynadRYZepXIlBCCCFSHrcPZaZpAqVDnO2JWmEag5ErMwjWc3xw6aMavK2cGOECda1Zyh8t199tZesF7bSBZ91CwwzXADQte26zydBgJdE2hd03y5oarwjK7xwCLA8xNx82w5zDGtcKTvosdOB6j4zWNb850ktyn5cWPKtsEFJ1XdGiQQF9IwbJZr+6QZFq1xFN/QoiCFyvDIjxCASqqIAVzje7aJbA/FXO4700F36kazRaa2vO70/NPKKa7vxClrqlNJsHzhm/p2gprTfRBzCjqTBg1QbtgMowawWWq9TPnx+D3754VSeEMJWsdRiQQUBIZwuAYoiInLkw88QlCYUQMo+CgEGzS9F8HsaUUpJpzDlHwKLjx5wLnZAAAMOsEDIyhkAhhJCJom2XoYimAA6lqA5gGdixRhvnc/oHtYF0AFB4vvBtzoSllDyxotn2+iuFcHnesHQYqPFOKITQ9zuPW0QUmbcgnhw1UGopBSFwkcLnaJa6b6kWwjlEja5rACh5nsfiIqb4iYYq9vyka+XiYsbYcx9qT0TKasEbI1DCqOf9YL6p2rNVsNt0mhFV3WVLvRdax1g9YQilHsjpu6VmElNDXKiX91TQmoXBzO6KWDqHiil6piH1YDKEcM5ob5niz4Gagh3h6HOzkNtkmTIjIiHOJtlpmoSB63HLbJUVKGr6rBZLqkpsw0GagzQ/L+KK1VlzgPl5ma1y+r7MO4YaQXiZY8YggSXHF+eS4TvdLEuRg77Z9Vv2xHC4bupKL+vP3wTsyq/X313jebP+Jiu3Fvxf/5ah9/pG2Xpfw4NbgnzdUTN9CuT6PHH9uSHFS7i6juSt9oMuRhEBQaTZVUY9TcRVUx4+5jkOE1QVq+/TLvR/+ctfuq6LfUdE0zQ9fvl6OB1zzl8/P5UaDRhcNiB1+dYApzZTMUaKcZaMeSogIhIQiSHzaZ9iutmnQK9fvybAz58/I+KPP736+99/fvv27TTlaZpiikQ0THLbdylSnrhwEUGeY8+0/j+XqO7MEP74ucZ/SVG2BrPPy5xRfZrG3S7IHHlFEAGQNXNHjEQEZT53qqRyIZngFSK5QFfXTkyut9b8qomwEADQa4akJ4Wcp8I5IN3f3N7f3ex2u/HlExH0fY8kAFKKWvAgUF95WkDMiBhit9vJDz/E4/H49PT04cOHYRh++OGH27t9CEEkmWsZEVGYjQkma2wgKnWGYaClYMV6LOIVQiDRbf0lHNDSdfx7lucVltIIBbjAKwAuSqVLNRsAvsnGvznpDeO61OB6RFcqr8W0r+8Ba1i9Z5i+QiNN7KEfQtOd/drgYV3NV5h/PT9s2bjvzuuNfj/QSPM1cgAW+eKvz6Mf+KVRbBZf2coCBt+I2zUZ/F7e+cpSz7RaqHix//GU30wrrORjM8ub9MAuuyA6gMFRS9Og6S9gnl/MpZT5JncALMujRyLSKILqbrEW+X7BNP4bCy2f5lilxrZAQFgYNCZXVdn1RZiRXs38ACJICJoxJxAiARdG7aMgAAPWSyOkQQ6NaQZKTMw1DStLJsChuxER1egAaNY1BUPYCRHj7IEZAgTVm6cTIsY4298UPyGEzLNK0Fh1iUM9w6PCwCKlyFRq/vSpwAScZ49cCxJdh1CpQdM5klRtRJ/PXYPTImSOVAT+0pcw2udTOc2qjmSTBBqwVNE8m4Cg4PwWrgstHbVNt1HMoNOpsFqATQtS1cU0h0srvJQJqu1LlZ9Syul0Ymao6Shs9Wp3qiWBcwTVG5I6HUXN14K2YADOeoPBMzveuGjyZlvzOoYphNN09kawdQgukpW1YHizz1zd24jIrnHqpVCT6GM+pywDQQhERLF6FjFznqbi4usYC20YR4Nt+2AGZL+Q8Xww0ZZ5FeJiyYOnBOOAS48aXEmjTWbdVPN1NnnoJSGxyUMv1WwGeOXrpeLZN2xJ+nXXDWtuOvL1tU4zoZeaha1Z/hOlGdEa7HWnxhA2320+X5mRPwSefa4yBQECqLP0/ON8asHM4lwD9PagBupy7XCMMXap62LO49PhRZ1CT8dxHMdS9CyRQghd1yGCrU2a3XLmdcTVsyvFQKgwFESMREQSCd69/eHV3d2bN29iCKVMH379rRyf9vv9//avf9/vdyWfSp4QMJIIT1LK3e2uSziMeiDLAG3w3iuE2vy6SZb4n6GV7yjKjYVBI2Trw6pXcxBBDYUlAsghUt/3MVLO59VRSgEWoI0NN1wmpHOYlxU86D77Nq9joqkMAHpS3ZSJJcEsU2Kkm9vdfr8LKBCw7/uuixQg52maplIYhIZhQkRhTX3JMaZAsVB59eq27/eI4cuXT1++fCml/FB+ePX6Xl339YQ0hJBS9SJBYITottoKZNd1wbkdcr2qA8bzoW4pXJHFpvxMKn5z6Vnx+tc10i49+R6Ew9UJWnPdZiDW2iY33mx23Voz6ubvJg+8Iv6+k6XLUm1bV1u3vxDQK6GwJmOjDf98vam4gIpW2i67a1//ztkXaaN02rYHHYX7TpE25vd7uvMwywXlGdy9FT9YXU3rNhFRQDZQvaq2MfCrwt3+ypbWd4lU/JbDBtjgttk4aSo8ZuawjGaDYQ5DjHWLr0EagaXZ5moxZcCfSKls9sYKdKYeD5Z7S84IFBJC0/HUx6EIElGoQcmkiHJYISFQ9Qkh0DhmBgyxE4BJFUwARMisTpglxhiC5rBnkRxjqnibUwKGEIgQpACgCPq9fhE2F0GuXoi6v+/irEWoGubnuKox5/MSOC/Ic25A0gFUhTCEgPMOo93EI55zFdo0TWMxJWeaJoJZQSWiEGea0BTqlr8LSTSbAgCIzIRO1c9EYbAJcnZRLcFQUWrcVDOveSnVrBNrzVRTItIdw+3tbdd1ADwMx+YV/7VGaRMi6vuemTUBPQAIanrgxXUarDZb/TrfeKwzKMujFyNs37WfUwPGqmE98qg2ihBCGE5H5oCoOr8gAhGGQLtdb7jFqpqKyOl0MnQRkcwXAucVpwqzOGNpkI2DLriad67hekZ4m3Vm1DVnQIBEpDkS1YNU6weiS8Hsm/X+PYBdqXaFpa5R0VQAx0mv9+K/rrn8lfavNLXWcptiROgH4qXCd8K/bnbzSWNJvjQQcSJz/ZMs41LAkts3leG7J3r94rrr+jAgsmaVF83dEufooIQRaD75ms++jOarTz5O8uHD0zAMx+NxrJldapnv0OacS8n2rnpYxBj8TQwAkPGYYkhd6tOcm0KzTjzc7W738WEXSsm/f/jw6bd/9Mj/+9//su9Q8lAAuhhTTAB5Eogo97f9vkuHYylFL/Njvf644S8KywUrzV5nOYPwp+jnj5a5O3fFK4RAEhHn/MJI80QxE2JQn1LIjN+xiL5ZrqiFi69wDoK17gIv6NVn90p9joAA0zSFkBAgAKYQU4jIMp6OOE3M+TRIztMwDNM0IRJhdzwOXKQUmaYhdfTTX94+PNyXLJGo67qHhwci+vLly+Pjo5Lobt/FGEOg+Q58gcJTzpkoEkWhwMxK+yIIQH2/B9bkVQBzFisIcLYgERLKfPlcRNpN6xLzl5jVJtIuNvQdr/t21p0a8P7r9U79WbbxVVixKYPkEj9sBWKVhs26+3NIWLcAS3XXFrXHSTMc3+l6XH7tN/jx+Gw+NEgAqMHnhDwGGqGwOfymwiVPlsaT6HtQ2kyWgrUJEl6gt2Y2PTKN+P0eqcH2oqnVSO1hg+f5yRaHaSbLtgSwXIy+Ha+4rvcbfkehz3Uf66+w6tcIyAIFUBCj/83DVIS5sE9P31gYYKnEc733rBWUhqwmuTAJUGnd9I0Y/bVgjSQECJQzMwJDkRovCAMwM0LSTIUEsfYYBYNIoRgxgIjkPOac1dMjQgFhEQlCQRCBpWhquWISRJ0oZte+PCBiDgPWvBqIWDOeIzPn6nthnkjiEht4ixBUnQHnDLzzSTUi1rQHUQthDCEI2JzNWhMiptQ73axeimPW9rnA2USmjqnlPPeO6ytzmSlGXcH1rVxEVmlYbY68GU1mHWnyhkGzqumvpkya1qTavuFHRFQbtEknUmMs5FxyznbMacA0pjbTTlUh7HY90HmZLbS1GM8RZaqPKNbJMncarOqZrTT2Fu+lUmHjtVQfquCZPskp1cCwiIh936up2XMNBUZdZC1sT4xRo4z6Rd4IDGYmNsgWnM5WWfOuZw22Wo1xNDUBwK4Bg00eYhCUQFBKc1SOuAhc7sulJ2sWZk2twfbv2pMGP57rbfbesHL81ua4EahNZdzaNTag+vq2QfEDxOVmohEAHuwGeI+NdV/fWS61uTmQzZm61NTmXFzp9JtAejLYhkdIpFQlCBFBHa2Na4WQmDPWAzhmHobjMMBpHKrzPBDNYRiKBjFzoduwMuu+70sp6n6vLCKl1Pf9DZ32+/3t7c3d/qbrukAghTnnL18+fXr8WA5fOJcP79/LwD///PB//m8/jfBl1pewlOlUigjSrtvf7PsuRQII6ltJGtISNdPdNym8qbCmf7+c/hzZXC8zY3ESMISAmBA1dcPMsUsuBaKEGIJqK+ewWCggJLIikz8HLTWrvrZxfe18iznUzSvODpwkCAI559Pp8Pj0JY9DOXwSkcJTzurkH3f9TddhoHh4eXn8+jzl4fZu9+rVPd8JYui6HeIUY7fb7VJKHz58+Pr168vLy5s3bx5e3T08PKQ0XyMsRUDO54kG8AxzzVQ0azgCUrhAKeZ9pef7iDIj4YzbynGuYVuWG25bjM2G/krZkDVXN/3r+msw1l1v8mfjwL4pX/N7OKqsTlius6bN9hsRtqa3BozNX+EqD9+sb4Xc/srtGTbAsN2XiAgvSG7VcsttrgiCppf1dKw3XV4m2hYafFAfOOPzO3tsMN8g1n6yzeelmQIH2/mnrQ2MDmOzUxuy3/xosdtPm9snqRrKWUGrjTebWHH3obxoO2c/pMAIZ83N/DwRkZDcrrGdJKnXurwCbTtju8RpxXiWR/pyVhxNaJIfkGJua4ZQzdoLHVJU2FDvEDIUlJR6ioQoY8mZKBMwIwPEMgUQJIxYUES4EBRERL0HCJohIIMAaBAvmQBA3SlLKdkyDcTArPf9RW8WCAII4Hi2f3oKKDVnHRFp8BXVfELUNAMqMlPVBGIIofBUccazp00NEZlzznl2XtJ9jAaABSFzC1YACs8iNoRABFVNVS194XZYSe683qTeWOWaws7bBmeRD/PprynAlsldP5j1GGd9b3E6oNiz9hFxt9vtdt04jqfTSfGGS0bALq+OcTGLPhpqANsFwkW48rJSLQWKvRR0dGcTnxFnqOki5oeMLBzTgsd50sWzYfkc/UyjAhgf6fve7qmaUmoRXM38OKMRqBSNoIs65BgjiJRyjmZmzFfNHYIL8DwLgAvcx3MoWBasKXSMeRly3FkGSg2oKyKI7XHPZl+w4mXr4teR55WbXH7Bf92M+LE3XL6BxP8kW7L5EqieODdHuv7sG6dlZDM4r8QFe2xgbsqlA9cr8Hvq9YD5ypew3TR1HTZYosWTx39VUSap4a/I8ZamFyICIWZzBJovw+ecnw8vRBAohkAAeuAKAHA8HkONKqxNalNlTkUzR6/R1BQPDw8/3eS+7/e7XYwUkRBgmoY8QIK733759fnTKU9DPsq7O3x3v8+Hx/QAIpgnHk/HUoQoxtQjcJ9ipFnhCIDzFX68GAnGAGtX3P8CZe87SykFhKky0pyz5FFjWJJw0Nv1mZlISGKMBGfihwsU8k2yWVDa2Ui4YAXrBi+tAlhtTOHCBMQYUersIJdSNNTh7e2tQAHYI0KMsev6Lu1i3H398vz0eIwxPjw8vH5zd3//oBH+oDKEEML9PWisgcPh+enpSfMz7ff7lKJu5sw3p26tgOpWuPAs5pQ+GWZBI9Vrh5kB9CYCejnyPYjFyxaeSxyPlg5omyLJ9+KZqm1m2F1o8aKh4fMegDW3adjCJu9a89s1YzQgTWSbQnKpeLzJBY3UCyAb++bQNuE3w0zDctdrav3Et7k5asOeet1XLK1lUyvprKxX91ooNxiD5ek2LJekLHVO8Scal88LvEilVShymxepd+14GaJPnElsDfzGTmNl1jY412TQkNZaisEWfdLSHc+Beu7Ug+QXDjo/jmhecMgC4UxMbLksAqG/QnoWNAsS8fON1bxGRMM0YjUfmd5INXCIn5UKWamjaCcSiQgDQBHhzLNtJ3FXSVL3VVCEkYViKAUApQgzksSIAAgFhiMhBgpBCIS55CCCGESYkHQzP28INAu3QKmZvkspRbJCqzEJdBuMWD1shUrdNCCKKQbqSzlfdRPIIoDcdbubm5s8x+ZRJMzXyTS0UamxZ5hVVZAY4+Pjc1UkzlDpdBLGantjj1ioFicAENS0EPOBSimFJdsCW1uujMeZ9gLuviIzpzRHa2DzC6rsWw1uzWnKml/a2tabOX3fxxiPx+PhcCCikjO6MwijN+2RXAQaROz7Holmv0oRxTnqsUrlxWqIAztNqbGCDFRjBMHlh0EIEKCUEuKZoxlyRCSlBNUiqkqpgrHf9aYYm7qokHtDpXEBWh4ZKg0UmVXEEAKI8JRtvsDd/cO61mF1N8A+l2WahPVYwCnGuuAXDGbZJs4WbHeZNrQnxGt+19CYr9bUbHj3dbVnzVKpBt29/kojbDZrrpmyFx7+Ia7k8fr1RvCgu8PpxwtL1g9LJXnd+Kb883U2xap/svni9TGucQirS6pNL2tkfrPwKjqr5wO6kjQtFda7u2qhYtZIvfUgsjp+y/nQZ0bq/CLLxFnZaUoJwJ/vnE9SD4dDSunVq1dv3rxRF/fb29u7u7v+9M/ZDSEXkEIIHVGIdPf61cdf/vHyPI0jRISf//Lu5x9/4JKHYSIiFgYpXUz9fo8YDsMUKdgNLw3grSNe5zg2bMDWIoKt/cRc4X+lrqiXwIUL4bzlGoahjMcQAAEiQpwPN0WwGHNDzBqlWkT0X3T8xNPw5h2e68UIdR2Z+U8QZFNKKShIwl0Mferv7u7evn0bg+D0UhN36WV7mcZyOp3ev3//228fQkg//vjjmzdvQoCX52PhKaGEELAe3d7d3SHibte9vLx8/fL09PR0d3f39u2b+/v7WWKG5jBrHmMMcz7blBIKTDQNw+DuO9RNLc0vXh+/X3qXOPkamd+D1YaZXOFgHoA1q7/En21r6vvysske+t431pFr1tOh7XZkuem3mo2cvTKoZkT6lZb+dPbTmh962NZ4WHcnToH0lbGeDqwnXWrMBbUQIgItPcLWPSpIZ0t1Lc1En8erb7lZ9u2LeQzULniJ7dk+diGvICLKhRwqzRCarr3YFR+T4gJW/dc1uVqDtu7WkrQhmHN9SzBYNUB2zqKwJHhcHdwYMM0BDdQd+ByKQ78TC2oaH5bAgEgoiFkAIGIUEZlkAsblmpknsp5zY/W1nTXAXC1USCGSiMzRPhCx+uMZyAhAIXGdb72FAAACEJEAUZBJQCBEpBCCBBjHLCLCQkAhBIyMpWQuHYAUCSEkFJQSQiSi0zQeIRBSpKh+QZICFxCRmLqxFJTQhS6EUPIMV55e9ACOiCgFlAQayaYIIsU6KcgSQtCkDDYZiARA+lFvm+gsplCz2FHYYbAbiSDIzFMpzMNut2MAZp6moabX4kGGYTwHtPTICzR7zFINVDMTDaqeRiEAM/NYpjzxyF3XFYEikCdBMytlJgiIJAI5q4agDq6QMxOFvu+odfKeFJKu6+7v7xHDNE2ljH2/qxRyNruVUljzOAMQUCAsgpkhy8Qiu67f7XZdTKfDcDoOmkkkCmomdFiyrZIzhYQuoGjXdV3XHQ4nBOrTTgk7UCilACOIcClElDDGGEoprAlS6JwI3gg7RmLmEDQ/JzPLrL7JELiTqkHNpxshpJBCCJaZUJlm6jvaE8tkpstQkzqaifJ4PB6PR0RUB6Fpmna7vf6UpwIz2dM4jbvdLhAFxGnMw3AUzhQgl1FDH2HV8EU0kHiw9T8zJp75WtSEacwwB14iABCQGIN4tZBFE1rrQp1nEM/zrmclkllD64sgASFgYZg3JectKYAIC2soYC+Sja95PqjMgeu9bb0Kq/vFEELhBbv0LLgRmeKiKsPqqJWd44eX3EUcA53pFwGAy0oXxbnZNQDnKltsHZbMs8EGLnMK2a/WIK2ce8ExejMXeL7fwLNGl5fT+lNzKGs1pd6msPq+x0b4Ne00Y9ksBI5sfJF5qwuVWSkFBmECEYRCQeMkM3DhIhQSATNnyQGFCEVKzmOAXlAEBYmYYCoMRBK61AkDDoVZLyUAIkYQit2EiMCSc0GWiGpklLf3d3/58d1ffvwxRUApN/suRjk+/ppioBxEJGICgGHIpUDX3Z1ynuLtKTzmjm9vU757eIophND3QylFIHb7HWEsTEUQqROgm7vX+PGFJ2AoE58KIQSgcV5g5hY1T42d02vOPYtOLJp7XEII82muXv24EMDT6MFPqx2ir6kRSwZgBFLTLIsIEiANPFGALgFyYZk+f/74/PnXPtFuRyFgQGFmYAkhhbAXPI2P043EUXgaEYtIiCVAYe7deZRtVK5QUZDl0pgZILvz5bMbBUAhd9PQk26zh7NfR0RAJkEURACcc8Fi13XDcIwJcj7e77sf3972YQySjzAJy/3dAyIeDqebm7uY4N/+7d9+ef9bZrjZ5fef//H+8z+6Lu52u5zH+/1Nzhkh3N3dPTy8vrm5CSGk1L99+9Nvv/3y8ePH42ECIZCw23cpdYJqaM0EEClgjCQBAFII4+kUQsDY6fVFIup36R+fyodPR4ZOqOv73WEYmctu15WcRQpI0RgKICQSRYTovN/wCJGl7mRbT+NOl5izIbOZU2tT/85JpwBgeQ4YXB5dWF4JMSCbM2jjmfaK1W9Oz+HywdNaAbCBe6E2v67uuBo9USOvKQyB4tIbE2F2Ai91n4xLqcFY1zIsCBLd3UgDg5CAF1Y4KwZ/MzoNmus2dbDWBs9dQEQMSPOZSp21Aivjzbn1qhGQuyuBTulYtI/IFWzdRIHaGBB1vKXuT85n63myLsx9mlyePD8vWuwKlRk8QghqjTC/M1NfJ86Ac7ZS3fLoV6+yzq8ICwuh9gtVgwYNGwVid/kgEM4nMqJWnDOZGTYWRgLncRbr3XgbC1Ri9lNs8x5qy6FG0NBRTzlTmFELgYScyygsl1PTuqM5ms1wy/pWLn09rxkEBAw18KPWKcvTGvXBdKxBxZtaBwMou0FUQw3P+etmitfrdkREEuql/6gmN8Vp4GIefdapmcsQMcZqFyLQcNggsbgMBEbT3nffWBtp/sD6VVM9Kq/c7XbBpR843ysrOIfoYC7MwiUzi8jLy0vOuUxTziPnmuRApHCbelU/qIXKqPnMYemMbTtLMA9PP1nzsqlhURp+GioeDYGV9Uc/fKKA1T5cmfJiX+uJ1cxKRuKlFHX+tJoxhAKL3clMuDHCnBcVjeP4kxvrwj5ATWONNZu8Z09GJABqV2CLJWgb9Hl/eb4Sg6SJ7/XmZxVOWuau6wGK+piB87ylGmUUnUV9mqaZ01WeqNZOO3Ap56wb5yWIuBAz7bqTc57AUtjGi04omsXe6HnxukplOotArpyo6cu30AIjoIqrb39dcz0EWhqucSWrrrdw6a1tfnWh8c0Twc2+NsGr3Owi5Gt91Qj+Ooo2YfBNNR+arwazffie7oy9bD7frOOXv//awnyhcw/h9xCAyj8pwMwEbMZDlnPc0TlPWwjmJhMpUiREYuY8lWEYCgwpRKIYADHRvt/d7m92XUwh7nY7ZhYJgSKCMpNwPL7sdje73Q4RNX+9svrn5+eXl5dx5Lv77scff3jz6nVIgZkLgzCWUnhixEKUBAMLZuaUUgiYj6Je2MIMYfvEYY3nZn7Xxc/UetKbX7+J7fVbyppKKVAYALqu24WYApRyAkDAOYioCDJPMgsFzjkXFGYskiGps8k1C/9m8cNxG4kNI8aVoV1aBbg0plWa1AxPGjjnLPuMb+ecVV1i5sfH50+fPh0OcHsLNzc3+5teQ92mFGKkcRwRkbl8+fLl69en3W53f39/d3fz8vJyd3engjvn/OXLl/3Y7/f7uO9E9zy2g5IsIi/joDJlGAaVek9PT+9///Dh6fbr168iknMWOYkIIvmDMw1pi4gCjLBgvNepAuuBFGyR3xrhfu3b7Fxh19bOWeytWt5s81L5Q3zVg71eLH574ykEl0VWCLG/V2THFeAbqSQyh5jz3NIU4AZUj7crsmmFpQ0aaOpcoplLpenaFuxaljXkcV0+bojCJZL9thDcKca6x29uHlyd7wJm88mV0tD8ZvtnMvOF52yndsQMAGq/gaWyoHvCeAHvG8fDVowK/VZ+E/ozX8bzIQLW/TEvGYfNrljgRESA84JBnC0hQKQp4b39V+ZT//k0RRVCO+/RHXwHYqtVOxARJL0jyyFo/A/U3ihiCCHFvYZpMQuPnYQ1A0d3wA8AKaXdbqf6aqk5G6WeVdhDYmKYrwVOOecyZhdDskxTKZPeORQRBKDQ2/Q32F6vFhHxwTZtAdiJb3BRhgwwU2ysspq/bIxUHSMRkXnhYy3VUdNm86z5l5JzxoqE4pL+zToxkYio1qQAFJ6QoiU+Mdj0LEfgHARV+9JJN6K39g1C/dX8WkVEcA4nE9zVPn1LFcIzuTIrgrWFRpEzf1rfo4ggsTZuTwzVerdQjYqmjk7THMmQmY3wtAURyVOeas6JmeDlLIbhMpdxgqc9c5Kq7631Ln/cq7Tk+c78q1PbPNsiwCYgREOfazGwCTmdncmv7eE8v2o6+mbx9fn7pEvTb4O3pgJ+Sxv0b52RXQ8vr8jF67B9s7tN7v2dL8pqH7b5fC1HrAX/9Vx/bYmt1Zq+rkNuPzHzfM6OKCIpRhFJKamrfIwUAk1S+hRyznk8wJwWL+5ixNTHfpdSCiFwLqWUFOKui33qUMrh+fn08hIj9SnEqAZKDpIfH5+7OcJojKkHgOfn548fPwzD1O/w7du3P/zww+3t7VQmIiIYIQSeU0JRSoKEIAAsd3d3Nzc3n59f9I5DLrlRkPzwPZ4v4dz2Cpfwv373e2gDceNmIxGFoJFEoe/7169f33YYSQBGtRCqQkgUkZLADvG03+/p+YSAIWJhRqSQAkxnV+pvLqIGKsf0mnuArsKF9pq+zvgBCIBqn0X9TggAOWc1r8QYd7td33WJEgGx6GkvlFJi7MYhf/z48dOn5xDg3bs3P/3lh91uFyPNp9BE+TSosHh+Onz58uXLly/H43G/31togJzHl5djLqMGMXp49zqltN/vd/0uUmTmPGpuYdkBMYrmcxqG4df37//H//j6NH0FSjFSEWER29IgnHOoOPx8Yyvc4BOX6Ys8abG729Yg9jrpXurdbzBMJBkYxkX/KG/0jBEce2++NiD5atY1rFYoi6jzzpoaG/NOg5kGHnCs257P+40l/rwQ/87hX6pZn187BIQlf143e4nPnB9eEG0Nv9p8svniuv1GhJ1Rt9KZPdhNv7Ry5Z2rOUPuWlT5DdI8NbLtYeQfWkceNv/XKMGUfzErejX/wDJOgR+7fxgpRQ+EiIaUBW7V8YtbJVjRrm+QNdEgL3wwWF3S3OsAAFTt0Ob1MfsJASJmYSrVhwuAhMA12GCNYb4MOo6jaoCIGGsIT6vPzKVIzlmghJCIZr2lDiGISN91WB3YzKAMlcGtNyV930sVDLaZQ2dRRKfDMPN4GgVBFcJhHKc85DpDzCwWN0k38QAx9nWa/JmElGyen+ofqHgCxA1GbNqsPTQNjWrEdRupftW7Otq11xy8klNKIWJTkHDWe0UD8c05BnExZUYtwWmhWDXVXIR5ThuGVWuyWKAWy7N2pMf8wdph5/6B7lzAzLaeEgznahGEun500isJBZohbc9E1LDp1+fcsky+F/3JlEnDP9VbT3aiPI5jzjnG2Pd913Wl4tAUQqhhxBeLqK4ILycAgHl+mOp6Nw2W6hXQhm01EsgXG6Z2DPWIYSZGW5VrvsyyDhvYCCFPWrZkbHU3bzdspxFCm2AbfvyKsDUCf1AhhCX+Lwm8i69/d4XvfKsBwMBoLJBWueFgazRutr/GpH1uGl/TjyHqygRd6rfp/TrSHIvR1ABz/RBCiIgTomimisxlFJ4QOJAQcNel29ub29vbvu/3t30KEYCGYTi9HKYpS+E8nhDhdDgOpxOidJFEJKVwf3t3f5M0sXhKKXW7/X7PzF+/fv3y5fN+3//0lx/evHnTdd00jcycUmLQ8zVg5Lpdmcd4t7/Z7/cBXhAxIo11K+lR0fC3ZvibNS9Vg8vU+615WZM6CnPQA7tA+/3+1atXdz0hT7kcTSGUwqoQsnRv3vavPz2+//zEgEgpT/Otb9raR37PapKlHthwku9fjw2XQBERwhpmEVUZJgwkzAgFAKWLSTdhCMzqEFgYMSCEx8fPHz9+FIHbW3zz9tXDw0MI2HWRiArnEEKsmliMcb/fD8MwTcXO+HMeRaTrI2XIeXp6Gk7l1HXdzc3N/c39zW5v7irjmE/TKKP0fT9N0y+/vn//21cQuL29zSxQILOIkKjckQwiCICkyq6YXwjANj/xtLGJq018bsqUZifQ4P/KvDQwyFL9W7fpB+Ib9IvuUjXf6Sb84IdvvqPuFdHEU8tjUyu8clWF5eLaxM83x7he0SuGX2C5rDwm/SLyTxpU+A2D78I3tYm6zbIpntbLdu6Uts+JNumnbknaTcKaUSy6WG4StDgPuIUQ9LzQvyuu2LiIiMsGZjxF+a8iojs5JNItPsFsSpnfdMFF55R0IemL3gcVEdUTYekWJ0UT02NVCUSEC+iouTrmKuRuNOfDHjBFBRW8FqGwpMiyeUqH857SwOAa4ZRwsbXVOCqCGAFCNJxWLr28DjuvwBoGc77QCJKSGdBnLWIcKeccAnEBZbg4+yJyzhIwqc+eodLg94Rr6PEoNvdRWIb3tQkupQzDoKm9570+F64nEPUvIc6+8oFIvXyb2fWobtawzbpZL6UqMDZ3Nh1EFCmZPyE752wwHdUZ/XSMNijFtr5bSpmtfEXsyhyz5uJDXBonFVCNl4AAhEiIDAAsBYrULCDeMomISGalZLMBeoOkuLQQ3rRo2lcpxcKvLVnnHNDVVq9CGyNBjTdrF/S1grN2njEmIlM1Wlo75mvarDKb1jJnpZdQ7yUS0cSsCiGU82kWLq8CeQJT07TTUR3XWFKvvdusXP+60oaPGHzuzr3etiln8PwiXdxFdJA3vRsy0Z144eXrCrDcGfiF0AzZfvIDWeOkaXnz+Xlf+B1vrVHUQG7VYHnXZT3ABnLr6xIkl8BYj/16O7S684mOe/su1hTVtOO7s790YX69IPiekeJ8800ZC4U56UwcxpGZMxfmknOZpiGmgMKB+H6/u7+/v7m52XV913WaIQYCEwYQGYc49LGMpZRSJt7tdo/p8VMeh2GQjIWnEO5u7x9ud3R7e6tnkYfj8fn5eRzHYRiI8Meffvr73/+KeA4vTAF4TrpDRPPdB8iFy6wS9DGBBixRdpS/oTCv0dXMyBUKv4Tw76zcglGYCQlFo4ymGnoq5wwQAaRwkZKZGQlYaLe7vb3dIwln0awfIpCn0uMfDh7TwLNgVmuvswsuXvbWRlP1DZL5tBsgpJTUNBcDh5AixjIWhiIRSxEuQITH8fjrr79+/vzc9/C3v/3t7du3fZ9yHplZgKdpmCagMl9BDyHe3O7u7+9VHNhpIKDeI+JhGIZheDo9TtP0+++/f4aPN7vb169fv3n15vb2VgRLKXniPPHT8+nDh8/HI7x9u7t596/vf/94+PS5MEBAAShcRITQQqQKoq5NBjhvbGC5VA2fsFyba3uUuP2MPbE2wx8MDoS4rQCY3G+8Wq5w5usV/nRZLzFPRX7vBG692GWwhnNuQij1bra1dt6KrGpeHyOutJq1XMbFpncBdtOXpwpfxz+HCzx8BqPODGzJpktDaIBcv4VVUW84AF3QzzeLuAJLMoblTK1xbuYH63o99uazb62hn7UKZl3YyX7d+gpU/y9tx+/5fX39erYMiIhmaV+So/5tkwuzc8DblDT2RBe8urjhUl0BqiatLdRUUgbE+VKtPuH6tyqpC9w17dhuUkRyzmPJiLOHXowxhKSYyjmWUgC4cClFiAQxaJa/I2fz3dJcf1AzRjRUa7tn80j0GoKfNpvUUko+aeoKYebCzJKlTqeIoAhiUH+nEEIMgSiZ8c2bqmzUZvnRYu6F6NR+9IlKnKREDUpWi21JAeb0hry6OG591dXBpWaqqDzrzCCICLjYpNhKhkpCttLOHs917OacSTVuKlL04kdh1mwcntC1F7vjp1CpuZKZGc7XJ1z9cxgYfaJ2QiIws+KyPqSU/HQbYBTmxIPahb9d6b26jTDGcVL8dF2H1Xanc9ossQDnwxRwLMZPtz3UCPtENI0nWt6ilBoFt+FEsjrmXKzQc3eLUOC6mTg3JaC5CheKhMx+VrLenzkYvOzc5JJwuRifWVfTJ0b/sKRnBew/UxoO1oD9TZitvl+Y1wfrUbfmww1U3/P1e4pv2YjE2AhcHrJxZt+pZ+VXemzm9ApWxV1RRhQVH6C3uSQz8yxZhLvYdbv0cP/jfr97dXe/23eh3phAxMyMKCyQqIQ+hv1Onb5S6Ajl+enrMAyMgBAI466/2d9QjDHn3E1TTP3Hjx+fn5+Z+V/+5W8/vnuz3+9PpwMRxhhzGceRY5pz7cUYpe6GiEJAhAAppZRgEgChSIshN4Qhyy2IZ01rLHnMN0sb66nfpfrrWQG1wIp/hucr1hiDBj7hzGUKFEMIAQRJBAExAAYNZazOk5DLLClCyC6sq5G3/7oujVfIGi2N1L5UfHf+lTk3BgAptUoRDMBSpsycCQRj2Hd913WROGHiPuWcSx6GYfz8+fP797+fTnB/v/vhx7d9P4taAQ51RxEjgnS1O1RfFSLq+x4RBeZDVQC4vb0tPL3mV4fD4fHxcTgMuYyPj4/TMMaYXr9+m2JPWE7jdHg5jkOhALd3r/76t5+fDi/lQykMMfV6FhsCCmcEQBBEknpPR4AtSN7molvLBZMjzVtrRRFWrGON/83SSAotnp+vodqQX6sG7fOVCpekiZe8IgK4cMLyzVZ/J2t6bp+LIFZH5OU5b0PDlwY+U/vyQM1modGT7SfzmLu+IuxXWspHG53HPyxn3+0Nvn0gpcB4/aKBfw3YFXHQzI6v5mFrNsO+cYNh3Wkjv/QDLTnVJr+yMRJR1bCkaWcN6vzEyIdnaQGANdQwsp6oChAiARJSPueuO5/LiIh6oWuAQ3DBmWKpycSFEar1pjqALTA1F8IqZRcb34uDWTJlG2HxThcrtBbPKSz/ICIKU8WvHqT5KPrNrNeHM/Z1Vz2e5hBDKIIMRBSRkAQZMKZEIQeN4RGg3hyDSvFSt8jBhYcx+uB6yc2bm6AqpaZoGTXoh5yrX6SIAFCYR6viAUWYcc6vNReNK2n2wxnIqn1JKVycESmXOSSJF/aeyg1j3oPRa1P6iiqEUg8YKvKL3mkxAhCZB0g1LY9S0Jk/0jkIrXEiRIwxegVJybTrOigsdI74YmsspcRyXpbnuxBb1tpmOLoMGqZzJnAiVQj17pAhBwBEyrwRqPXtuMX8bD2RI2JMvSnA5PIiqvXYRmTHwOisiIg1Sn7O4lRNQiwrxQ+WzNEU9VkXdVcNmzMkw2FzfqHL3FtyxI3Ou5T7Bd4wBBHRy4S4ZNwqt7i6AKDjDzZ3Bhh+axsnW/JsLTz8xPnGmym79Nbmc/iWn5Is5dD1cgnmpo5v6nuaXRd03PIK9/4mhGuAG8ay5jNGbFca2QTYin9y5RU5Xx5mjeNfSgldKqOKgDk0766jVw/3//K3nwk0RhwjCmpwDhZCIQiBAIMUkEASI4UQDy+ncTxpxI6bm5uc85CnL0+PEbvdbqdZcG5vbx8fH0Wk79OrV6/6vhe9iJgiBZ1HQQglF0RMdpNQIHYxQDhlCSH0KU1jRpIO41QuIv+bODTsbS6lS1SK37GH2ywxRlA9J5J53UekIkUYgNr643ja7brdbvcyvYgUkRJDR4XsXhtezn23Hojn6rCiPbi60q27zc8AJFJAUKCKDxFByDlHQtIYnRjKxANMGGmY8jiOLy8vz8+Pnz9/LiO8eUU//fA2URgOxxAREaYpS4wxEZdSIMSIKfYqQEthJhbBcTzVg8U5OTAghxBev9rv9/u7m5thGKYhn06np6en4/EEQA/3r0NIhCGlPnU7GXOKe73dGkIALiJFvWyIoEhBgJlPA6o7l6iTxwoPNhd+dfvD4gbbcIE+19Tlu9icl03+4/+yuwzfnDpdaaoRT146NCS0ftEqnJn/klOda9bzrjVBGswtw6z112Bo5cVpJsxX+sFR/nfKlEtPGqgAWjBWFRa/Gj2g05RgawEa6tZyX1aHX/AtUrHpWGyQHMx+vppTJN+On46GzMDtJawCBcQtnmn7Gas87/n52weg/gktV5ysrDue8LQ7A9VojHn2gLMFYliKhQE0Xq1ag2B2eJOcmYXdOg909iT0s2JdNiPRz7zEo2GnrIIHrGlX6vEJIoIFw0VUH0vzP2m0BX1RWyulMJ/32Yg4TSPUK09dOlveiSiliBhMaQaAEDDnM5y6ZVcwLCaK/apfNfScV5i1juatojkbb7bBxhilzNt9JAoRNZLqHDWUuRSwRPamZRkdlFKmaZqmSe8uzp6xq7A3DTXj2aA8Y4xqYBXO4uFH55DpF6SoKluKhuF0czfjs+s6zyGN5kK9hyn57JFIVQGGUjU6FiLqYmRixsVNWf0QYxyn4h0tYBYG4dLyQKd66TkIaZrNWhxTlhCCCs5SUy+KSM4cgoUP9sGaxWPbHkINYG8AWBelxj5WWrKbgX3qQzo7taqBVSEnQKmkLjKfDxEuwknbdE/TbGlsjgZV8bZBmXuwmVUbwvCssx7NIOL2XTtZnkT6H5o2FU4SKH8kKhfiRviKNfNteOjmBxtXe6q1Cc13Fxu4x9t6LN9sZA3/pQqXfvL04D80jXu2cKXZza7trWZa7ddG5HiqWPd1HUu4LGLnCBdgnkNbFaJ63UIbiSHmTAX0ZESkTIR40yXSEMJSkobnTpCzqF6XuhBCmjCPnIWzFGSWl6evh8NhmoYY4/39/TRNLy8vX79+pRx+/PFHAGBm1QRKkTdv3qipP+cRuKAEnjKUvOtvJp7mMFpdx8zTMCLFlIoQ5lyEMxJwFiIGCqWUGBa61CY+DeHX8XllLv6TBQU0s2IBwRQDIrAgQYyR9TYdiuodiLN/4jRNMXYpRUSccp6mifokUgD+mEthQ//2+SKFX0CSd2Vc4JbPcTvUJR4RRDSVIgUEkXw8Hj9/+gLTcdfR5+fjNE2Ho8aYhf0e//rXv/71rz93XRynU4wBEU+nMZcRKeWcb3a9iJyGA8ycafY28gpDjJECAAQVHCx5lt1diTHmiacpPz8/I4Td/m6/v727p5Q+TJNgTEpsIaKMKgVUGFV1ZW1fqg+sd89STNyIO1L0Ty7RWMOO1r9eot5LlqiG8/gtwbqpP8GTmx79V9sG2AZ9s2bDkBtm2NDb+derirGfAjMqwIrt+75WuN1e+5cs7evRXXpu/H+zfSvNRDRoMfibgfvX1zMCS33vvHYAABZXD7Tw0pusgU2WRX+ybVhDeJ6h+F9DOKf1WrQm2+lAPFb9AP0Zh0HeHOUvfqW613K3pUwHwZotHCr1RvNJM32jlMI12Ze/owWV+g0gUxX01NO7SppGwVygAmTIZWYCBDcAw3UIoQgEfWiYJRyOJySiRCnG2CWo0fw1FRyebXQFAIDmuB2azQZrPgMIlGsQS2GYU73FqEeYwzBYiDmsZqUYO66XuzSY+DRN4/j/4+3f2iXHdQVBDAApKSLWNS+Vtav27p4eu/1kP/j//xJ7uj3Tp885dcnKrMx1iwhJJOAHiBBEKVZm7eMxv/xWRigoEgRBECBAYKhc7LA4eVo+k7UopsBwCa+ir0eIGKZQkwKAJFhylzdNg0QaJUyx1/e9rQGFypLa9f1EYVgSaZTI6lP9XMqmAADiAUlEQVT2duU4BrbixKLF6EQPwxAw+iXkaVfhb5pGQ54oFytmwIULsZewp6GJFHIS06udfgvDMCCLYomINC8fohyPxwwTbcQY9YCzaZrj8QgYjOmbhU09GD2WoOwimpLec21mbttGAdPr+KXa0LaLjIvlyKBJw+yO6xdhKIFVbckRUdu2uZjsAZAoxNgw8zgOTTP5BTHzOKZhGBFRby6llIY0WL9QhACUKSlfSikAAqlaCB5IW6SVgiqzGp+xeMAaQ7EheLpV+BfadWEu66N6Yy7TNciyJVtrnLKl/xJ/hFQe+lMMWaqmsHXWWx1Fk3NJxaX7sWJD3IlaKHG3KqEhhGD+27DcO3npam+LCJwAZA9p5Q+5+dkXvyc5bnZxQ11vwIaWauvSBv0Gb3Cud1O/LW3utZfg989tAeLlxLgVJPOvAnb7t6pgXVBJBiMiYXHKMO8v4zg2xWuRAAKWfDkhKOPK4xAIDrt9QxQDImcZ+27fXu12w3A+v5ybNlwf2qeXE2dEAALs2sgM574/n17++OOPX3/9tL/a/eMf/xiG4Xh8jjEOw5C65uHhARGPx+PT08vLyykEOB6Pp/MLS+q6hgL2/UnP8Y7H4/PpGCNFoi8p5XGcDuAhJobH4/Dxy1MaeH9oRqaU+dDtOI8V6vxsbs5LtfN6BrgmaVlKtOKOctZTXAAAmxdEFETOzCC7rk3jSXJWmaPvj1+fHhJgiDj2L+fzSxOpbVsByhLfvPlp7IcY4/k8hra9vr5+Gc7iFpfv1wCrBovukI6duzuuLFrzu7hYzvY8e9cjjyIBBEIUQkLRo2cWQSJk5mHoQ9ecz+dffnkej89NkOOYbm5umti9edPe3l7f39/v9l0I1Pd9bDT62hhjBJyksb7vJ14aWirFL1U9kdTcEjHGBOPV1RUyHo/HxHJ1dRVDe7W/ejmef//jYwwPN7dv7+/f3r55+/nL/3h6PB7uj8ZamZmCO2kV0YALGp2QKCDSOmO9zYWfFxMYqvW+yb7WXLQ6RvR8b02im/Po58sDuf665s9+jzOy8bu/h9w3KO6uPvg8mVhztuk5zOdX1Soz/i9ObUBEcXkFfX2rvGCeiNV8VevaE1LhA5PUXSF5+2AXAKBekta+r2Q49AKG5yR++AuEc72uVXz116D8rOWxVvix3FeqMAwAuMxjaSfmKg94OcGasn49dYE7+7B2TOux/csfhuIqs6s4MaOiB/tgVO3ZmlU2KU4leXXis4j0E+Zd416Ps+FYBA3VEWb5zxCndGsImig+g4hwlimvg4NS3D3FzfMbLJMBJbjoRMoCoE6vjnHnEsde3KYlajFDBLLjCtDwI8q9HN6JiFQhLLS4TGxKUUSwRJSRoi5Px3ElJg+VfAApzVFSbKmY4OtVc78SqtUCS9aARX8AgABBjVRSGAN4TUB9C3OBIaWU6g3bK6K+9+lrmFlYGWMgIkv3Z1Si1aq7asatZg7lOB0imkVu/RZOOsYUF0E14cNhNy0n5y4YAmE53mrbdtd2eg/nfD4xM8VGtTXlL+fz+Xg8xhiZ5ymw9em1CI9nrwqSK+qaSYDAoq65KIACeUygBxw8ZWgHEDXRGf9CV7ymbbwgpTSMvUanUK31fD7rr6bBUslkKF6TYTErt2ESEUlmAlOS87zQ0dgcEAjdhgF6wr08fBIX3RRWnP1SMeYOSx1MA1DpgQ4vniNOMRi+q3hiW9AVL8jPgFmDXa3E7+xUYBFK59KQv6d4IGdWduF1zwP9Enu9cd+a58Brrv46ABW/BcdRv3Ow5O4bW7+bEIrbAqym/fR60CDfshZ3smDAYwW2TquWxDnnLDmLQEBAEmFOKXEemZnH1OMpjT3ncRxzSmMkysMoJLFrm9CdTv3XP798/PT56fHlw4c39+/ejuPw+fPnEMKPP/5wdXXF4zMAPD4+fn147PuBENQK9OXLFxFBVJ8iAWAkIaJ+zDmPKBJjaGNTbhLmzAzAEVEEJOmlcchpWGPD2N082NWqqXaf9cPNmvAqAczbEII3M+iNdx4TN6GhEEJ4fn7+3//70+nhU38+DQw5QxPg5ja8ub9tGmSBc3/+9PnjaaCc834fR5mOBWMIOG6vF39ecGngrwxkc/iv19eHLMKadBK8IQLUqUQmhwsSxjGl1PPN2/sPHz7c3l4jSdOEtmtCwBhpmARZO1IMRSdsAGDatt3xYqWgogTADAA5ScbpjC8gIYamadq2u73Df//337/8+fXlOADEXbe/vr59OZ6fn5/P57PeTCyUgNM9cJgshNNVNkTDQrWWN/EsSyn/0sPXmbbHf9WdrDzRvqds9vKXNoXXmbZRXUWKl1rzCHmd1X+T926ua9hawp7BGmyFuuZ8wq+sEbd9bAOz3jX8X5s4q7DJfAA2uP/rk3UJdZd2f09Cs2Sy9HPcnNN1F9Wv6IJuVANcT7T7unGqBStMuomoJ9HK5hbsYzqYTMXFZdSDrRUiEIJmigcBAI0AoufeKmgrt8Igoi6kS6nXo9UArebD/zUherALeDxzIADnQqbmiHKHlfSSGHAWlgyIYm35mSNCIqI4mWvUJDUfORNR01llZs45AQgzEImFv0FEVbVVBbcoIObqSS5NX0UKm1xMlidAVhARcK4peoRULAM5Z1DjZF4obJ5uPPFNXlIl0IsafCBkIwWbtXlbEREX3YSZgWvPLtUYqzMkU65sQIYHdOc93mhsrEG3t5QS5ywiTdO0IapeFEIAFrNAxhhj06jZ2wdWUVBZFo6764WB5TYsOlsxOoXQFpoUiysW7jkMg521YPE1FcgaJrjSBrHY3PzJk/rzeLzlKZkhAoAGKLIwOXajcuYL7vYGIgIXgp/kZgDN9QcLRmNvrLT0+WylWqFG0vbZUxdcKHMFWVA7lqi/AEDOY3weFM6vA1yM8geOf/mHvHQBwiWH/eZz+JYgUo1ujQF0+6uH0/dVsezqwys9+s/fBNL6guV4PfCe1fg5FbcRbsJ5CZ+XRlEdKGwC70Gt+OTrjV9qEJazWQa4tI2rMgjzqVZ5DiKALJI5D2MIgUAIhFMWkRiDXgwTJo30y4xfHr/88cfnL38+HM9913Xv3r3FGH/77bdzf/zHP/7x4cf3RIQ5KvsK4QURMEDXdVdXV7GhYThnTogQAhI1IWLTNO+vb8exV4UwBr2PFkLTnId07LndH/HTw+PLwBkIcUwplIDPl+bF7yYeD5srenMWNqfj0tSs6UdEAGZ/mWHITw+PZxgonQ9d2x+HnOGHd1f/9f/2X969eRMbSlmej/2//Mvvnz9/eX4+EUUe8phHCVEIL8V4smXo6U0uq3lrqp4+/0Wxez12nrISgiYQjk1zf3+/C/JV8svD17Ztd7vd4XBAEgDOOff9gCj7Q4cIIrOZAnT7EAEhLqZc1S2HIRERIFJEPfQnIhEUyIghJ6EgIQSkBgACNU0EwjiOghQ+f3r45fff9rvrbnfIOX/58uXl5UX1bcagVzxExA2Ryt9ZpPkmnj3V+SevMFu/O29O1rplf3D5yivV8+9h+P+RYqM2RAlcpBlYYe/SFnPpV3Breb2oL+2nfi8Ad5wt1fa9pcb4OUUMr8zsmtusCeM7y+YGtLFZLIGs9r71W0Zv4m/DaZKYFbTflH8qeBBRJPuxSzHDmIy62pE3FPV1R25oUEFoU7lWCMHZe7yGspxQNEISkejhA0HUhNLFq0qFcCIxtqVhr/xOY19937PYAWChZBFRioEx2Did2XCBBax7EREWBrVeRiSZXSm8SqCVh2EwoV83J1UIG5pdPXPJF281VVHRX/VhjG0VD0Y7suggHpuwRfq4PCOB5QLLkFVGRnUZdXYh0bM7ZjB9EqDtolkw0Clgbbcz/TxnlCmECQrO2pEUZyouQWg8bJO64nyajQurSbo0PieUBwAN+2o4CWFKElgZUQ3JwzDofTkjFXUERU3UDtiPvd5zaEMMMQycM08hbQAgxqh64ziOIhtLV9xJjyHHiN6vGX1xivq12sA867R2Muc5CJPTsf1Xs/vpwNuuVaum2WPNYfhwOBwOhxCC4kRRlKeUFhmm05mpUJjzhU4rS2ZoK3oTWTOshRpQ1beH3jj2Cje0pnxlz1xm7JX4N3N3Csk3xLBtbRAcX6sGuAn2utp6UPVACP26ttdlyfrXgFXPeXkHY93I+t018JcqG+RrwCqGWT03CC/h4Tthu1THkLmu/J3kJMtDijVgm9xVP8GrhwsTL0JBDEiCMAiDblAo0B9PApwQAFggN10LIsMwdPvbDPD0cvry5evvv//+8pIPV91PP/3UdV2g8OnPz8fjy/39/Q8/vG0aOp/Ph7bpun0ITYzt58+fX16eIND19fXd/c0wnAGlbWNAYEkAEGOIu4NIjjRFAD6fj23bXV/dCmKf6HB1HDI8nz9JygFCxr9w98lW4noJ2Ar1FOsZ4JrCX19HiAhivyLoURdgYk4Dj2O82sebmzfv7m/l42PK57///T/9L//5f20ink4ngdQ03du3b//tt8/Pz2dpIqsXA+LQ97vQrHt8ZQF+Eyf1eP+iRRopIjAgMEgAySAIJIhZOGLAQCGE0HTX+7b5Qe7vbjLIOPYPDw+AvNu13a7d7/cx0pj6kjGLRKb8YczQNR0ApJQ0MpzwPFNEhEAibDxbBLuIOWdhAJwS2Aqj7inX14e222UJv/zysR/yzfXdbnf47c9Pp3PPrDc5sRzYkTADiADaX5WMpCSH/h48r7mxX7xrCrz0FiyZ23fSvGdBa1Bf4b3rRjbIezUE/2sF52sA63We8k28JoMAqJ5I4INdr4Ex3lj9tAbPYPMHxGt2ut4pql8dVut9UIqYVE1fJR680uOiwnKa1tVe50vTEKCmrunv6iS0gnn94ZU9sWqkPAFY+S9Ue6LhExEBtvmtf+gJGwOByPRPRzSFeZ5cJkV1TET11VJJW0R8VHmDwRgLFMl5VgiFp5CyCmipVJubxWnYm5iqUIkyn1oog1GXPN2TJ2LS1iZclmkukQkZJ0FVYxJquBHDu+ZF1EJECn4lbs7wMwM1ACbtBY0hTsULNOfM5dKXyuhdJ1C8dWl5oROXoVks2mdFMegc020+ZgSywxWiVgG7h6nqDYuyAARou8A8uSObriIiTROKNC8h2MFDxmVMES4xNlXhtztjphMSBFwa2fRFIyw/OiKCEmwtlDyBVC7By6SbLVQsc2WMMQJPrrkKxtgPGsXh0O1ijOM4nk4vp3GQ6Qrk1LIqVADAgtaRdYHuHpHNDiyPUvwsgEtc6V6ZzJtmwZsUPMhE9cLWEZ1OJ31uwUKnQ19Jds/TL+yu69SVVAo/1SkYhsEUQqUO0FjnIc7d8QYXgwVzxDVn1xKW3t2ehGDJcD3LWxd/wFFWE9oX3y9pIFr3BPG73Ec9N7SOaLl/iOPIVVlvQpvbADhmVb27yeg8J12/a5+thQrUdZv2fBOGS/hf0vBG+5ea3Wyq+vA9lTefr6mxQsgrTU1fL2iS39xuWARET3nV7yAiri9AlbuaU+hdQAG9P7zbHRAyouQ8ZpiuoA8pC+PXrw8fP358eDhnhtvb/Y8//vTu3bvj8fjx06enp4e7u5u//e1D04aUh7aLICQiu93u/fv3ItL3Jzsh6rouNqFpAgqPqWfOiNj3JwCgtokBQ8A2Nm0MbRfHxDHSbt92bZPSOIzQdiEiWZSPCuF+9j3tOcb4vZrkJv7tc9WgiHiVSjkwTtd4QoyxCbLf76+v2440BFru+3R86V9eTl1LOecQYtfFrhtRM+6GQMwEAWMcM1cHK9VqWgP5PTT8V0e9eIiISAiMgKLH5yWdVmgiMebMX79+zee4J7i/e/vSPxFRP5yG4fxyxLaNbRu7XRNj1L1l2ugRi7O9Xv2gpmliaGOMzBAjlAqcEyNJjFF3Ogoxu9izzCyMRCHnzIBE4e7u7uHhpR9yjBFCOJ/P45iYNXF10EuQgM4Vc/baXrhBVWhZW/a+ifk1+12zaFhKHVXNS+bBzab+aYJfA/xKj5tc7hW3l0p48Ix63emaeaI7ePXNWjtUQVIaNw81e3e9g2zC+Z3l9Y0Yl7nybAOteAsiaoq7NZOpMfwtYlvDI14wenW3/Z6xV7rAd8Jziats8ucKeK3mcwFov2YMgyWdYNFKrAuz0HiLhW8cAGLW/VNAUPTGNAAQYRIGJGqmvlkjIAuHJbiGCw/WGgXIE5tZZCuSYi4QkXn8iIgyu6+XWPkiQhtj1m+GGtNDVIVDNNVIE/1J4AgwxVlBgKCiP4U2NglQNdUy3mw338BpFOC4pNGWEbqnYE926PQQzxQ0jsVUoTw0bot6UxmnCuTooKLsyuxmrJNodrb0K1MHtTWJEyVVhi/vrqnthyn4zZybUYu25npc4BMnAlM1dXFqpToSIgbAlJLeFWRCgEWUGragCLgRmCvG+aFfJIY0PwuICC5zugNmAtiPK+ec8uijzrpGFp9tyTVNc3p6sWAtueTS2O/3eqvweDyqn2oJQpgcDGVN40STyJJdtgw/qAqk6ZjMYQZsk1/dvUFEu7SNONvH4FtFVroQbongALDZmmb04lejjPoBVizPg/EKkJt8f13mRmZDx3yqutlF9dDTjyyjxq0r4AXV9PuLvVixXNuoqk3UM/3NX2FFFb7ZbwK8Fhw356v6WkH1+ng3506KD0IWQVIIL96KQUT1CAXd6whCCBEJgJgZmAHlPAzH4zMjAPAwDH0fhmEYxnxze3V7e/vmzbv9fo+ITy8vnz59Sin//Pe/vX13L5J10wkQx3EElhjj3d3d8/Pjy8vL0/PD/tB1XUPU+Kg5iHh9dZVzjoS7Xds0V6AOOCFkZhCOkdo2EkwSewBMWxhYfzVWf2nG15jcpJb1fK0+l5legcHMKNB13bv7N+/v95GHhuTulp5fwvF4/Pjx0/3tVbdrApEI/Prrr8Mw7HYNx+Y0DJlzbNoYI6RUNVutowr+12npEoV/s77R8xTWGXG+OolIiAyYUhJO+yYKwsvL6fl8fH56jA0gQNvGJsQxDV+eHs/nIyAzZ7tNEGOjkdLseLTrul13aNvJcoiIKaVyH0STWiUql0qYBUCIIgIGaoD0jAMZUJiurq72h8Px9PU8DnHyUA2IhbNN46RJ5hI9iCcQAsQpx8ZlzNgqWzPDaoJeWeaeRMHNY7W7vc4o1rP/PVzrlaYq4eFStc0n3zPYS2DDFlW/3u8rr6+BqdBLK6H6FRgQ0QgSlhN3CSpD46Xte3N0691nXW1zLduHzf0IAagExvOVPalgKd/c73y1zV/9EOz5mnFVkKwb9E+yXyP2TwQL5MyM5VxAyqUkKZKzleDyhXpclRwAUv+Qcyaa7lCJU0O9i4XXi7R1cvaoaU9iYLczIaLGktGwGUYomgx1mntZIIilKISAdkLkQFrE85wbLJoh4mTKU0SMeUTEEBBAiWACnqeYooaQKX6U5sZVy5VeKtO+yN25slnc5GUGqgJgcE6IRRdexbVpdjZENYBP6GVOmsEeERGDb9neze6eWxY2i5NX2OxFrNKYwOwpSiX4ZBXlSTWcohAu1rzIrDoWOp5aKw8XgXehKCEhhDY2RDSe++PxeD6f1XG0aRrG6ToKujuQXHw9rZ012KYYe84Fy8MLm6blUfTkpKraGpk7cR40utJiRSAiYtu2umd3XafxBm0eLYMFT/F4J4uoDsrC0qpC2GLIzphp+IGiVHvgeeWXWL4uojtiORapKnuOUDHibzLudZ1pItydWKtAS0b2PQW3XNf81zUvXsPznX29Uox0K/4u3xoOLe9kbkK47uuVr+tiFOKhMsDWG5Lh0y/PSoG8NLRqBW0Cs4YfV0qFn75qAb7SvgfsVWzoB6qfA1rCFCICSJbZFTFojP4vn39HlBAppeH5+ZwBYgsA0Mabq6vrH374sev23X7ftu0wDE+Pj58/fx6GfHd/uLu706S2IQTmRKixrzIzt217c3PT9/3T01PXdW/f3sd4RZBZpAmNLvG2bQElIrRNiJFAIKU0DOem2fVjj4j7/X5/6E5DTyAjZ3BXHmyAcCG8flX8luQ5gG9n3fK6kWWlubKf3BhjSiNx3u/3b9++fXu/p3TeNYQdHU9P4/nl4evj6eURSc79+HIaP31+HMexaXYjRIBBypHi+mDl0tJ7nTzWv2KRmf5SQcQsQHqmUCL/CQYEzCmDcAiBMKZ8Zs5XtAsBUhpEctOE+/v7u7ub0+kl8/j4+DCO4zAkxEkh7No9Ij49PzBzE7vd7tQ0nVKI5rJSjVENkiBTQPg8AiATUUqJijMO85iTNN0uyyQR9n0P9NI0qet2WYZRfV1n9mypIV+L2uJx6OfaPqwJzCq8wk82Obm5zKy7/mbxvf8lDvz9xQbl2/8mk4RV8NvXoXoFWpVMYIl/gIsEbd2tBIOF2gZus4CtWZYLKrqvsO692nRgFYTMfg0ux9gaFeupXAOwWW3CgJsyz0ZkeXawxsa6ZMtbVp9P1WnuTb6qhMbpreVFG8PVJf5WKbrWkbjQntaCiADOSplJ5irPVwPU16OIAKhn4oYPgF524hJUN4SAeVvBZRe2tZon0i4ALAej4QjCa6jXEDYiIsKZGTGIwKSx8NRLpMkSaJYc3fZNFUZcmFltSpZGJxzHTDTnN7c6PM7hRrzHgpTLdewcKW0DE6ddFzAWvMMQOI6j+s6pxdKEGe2RNZGdowYqG6ThkMsdPyoJ39XQpPM19L1JfujSstPs0rnIVB5w0lv0LVX8cJnE3KPacKuFy703C29jE6EgMc+rDouypHcIh2E4Ho/98WQR1aa0FjRPro2diAQWe0YBYOrIYOOSVcUqLNXyOZ2AI8LZNGRLfcIzNeLUTgMshKBob9vW7IEhhPure62sY1Tka9qSEILmsNaHXLx2vQHBSCWEwE6vNtKyIBPL7WFDtiurLFfjtRMjX3mJjY0yN1LtHxcEr/XzbxZ0EoaBtGZTuNI61kVWWsclhks4O9CCO7tZ14RyMvd6v56EvgnnuovXi99ajEXYYvQwbA4Ets5iq53ye8BYQy5LmWzdHS7Fx7/ai29wPm0kPUfbFmq1r6ZpUhqn5EREAHA+n8fhhJxDlKv2cHV11ey6GOn69ma32wkcmtiJiB3x5Cx93z88PF1d7f7Tf/pPd3c35/MZgK+urljy8fHUdV1ookYCUyH+eB7++OOPrmtubg8U1AIPAMiSTqfTbt+FGJn5fB44jznnlKUBPJ/PIwdAjSTegx4ww3weVH2wlbKgzyWGKwqspvv1sjlNUg6nYaZzAJU50kBE19fXNzc3RDwMAzI2ze3b3bux378cvwiPYz+eTqe+zymlGCNgGDOEEBoKjJhHoTb43r34IhXnubCu/etrvP3V4pGwXNfQdG1DCACPj48wnv7+4d3/8//xfw+R//jj93/7t39LafgQ37179+bN2xsReXi4fn5+fnh4SCk3TdM2Oz0TfP/+/cvLy9Cn0+n0/HzMOe/3+9vbe705XzYdJJoCz+QsRMTAOQsWDsAs/XlshvE8ZOEp28owDKdTv7++jolxZBAQQZjlb1ZWMR3OiwCjoFzC8xqf1TJ/pcKlaZKlskHLrMuvEyo6efrSfK0rb/70l+pXnWrNV4LKGH+GsmZxpad9T1m3YwDBFtp9ugVZKCG1Ucf2kWr4pcJr+F+P2o/OY6mSpe2venj53eo7l+orW8mMJdejb7ySf+DVGdeSlxmwHT6nxk1+g4JwXpkHTX7wrFuL6REeReBsjFLurJlgX+34ZbrZX+PSkRKRmlsqVCBiDKElEo2ICCUZqsh8zoqIdtOs2n5kEW0Slaer9qlQppSEaAqESLNSwcxAxCKQWSVjAEDknHOQyHpsQYggjMhZkuTusMs5p5z1DmHEiEKSIfFkOCq6B4hIGiYHnrZtYyQA0OztY04RJYQwpD7x2LYtAIx9RsTdbjcmFtGkf7HMTdL9LlCDiJzhfBrmBYPz2A0VnCbzXdM0eoGMmQEIEANGImHmQEHvyJ3Pg1CaTG2QCUSzy3FKsVwyEBEg1LR1OWdBDE0IxV4kLABIsRGYTKyJBTSbLWJiUbkEAERYr6kLMwpoHkhOGYrfrGRGxGGcg/FAWbpGUlgMhk49m7M+iEjxDM2n08lVyIigwfpkpL7vmxCZuR/7pm27pun78fn5eZK3EPPMCyIHAoDMgsLIYsCI7nyaFkrjw6pLKoEgMwAhJWYLm4tELCVMLgEiMmRh1ihxwpN2bRFlEREIecq6wwg45nTux66ZDmM0DYbOviquqgoqlvb7/bRMaL7Om0uxZWlgW++jJAiaYRBYWDKHECLGhpqeMzAGCUhIFEUkUADOiJMNBAB0ICLZbPtl2U5RTDXtlV5ByZkBBABDCCkNbntynEIzVUG930PR/eJ0QMPMjAAMGYAQJYRyXI2TsK4uvojTFV8lV2SYguhqsyxKkBN/L1mPJq8BmViPh1C/esHFCiyvnfgDhY2zOh2a/i8zF54uPCvvFkFEAw9Kmpb15pGX+Yvm9mGOdemxjbBUdIuhNUstN4hIFgaAQAEIFY0srE3QlMjO3WNBAMT6iX4VyeNs+TdM2gZDzuNDVn412wh0x6K8cN2f6crjxz9UsNzmKuv2/SvMnJGntSKC0/VARMQ0cAqIasURQhACDEjjeIwgp5SIImFz7tOuaw5X7d1Nd3XVXO93IaKIIGuUReIsmHtEPByaGOPpdPzy+y//+stHZPjPf//h3Zsb4NQ1IWc4Ho9d1x32gXngDAhht9s9Pobn53OgbhyycEBos7BADAGJcEypwSH3+WUsWn1mTqOIRMDx+Ng0h7uuuYrhswADMkZcomuieQQslKDzqyyBEVFdoD3+ARAxVwJoWTt0OciKzbg/AqckMvm5ZAmAiFlE/XkCRMwSsRXB49ALJUFp4BkA246wuVX/iMPVPSG8v38+n/pR6OtLP5wfx4Gp2ccQKhd9K0QIk9cD2F8AmHyJVnTiR+GpN3G9TrUYb/f0DADIgggYkQmnEyQIAJEStM1ufH6U6/TD3+93Ld0c+i/n/9fb6789vfz+8PQHABxPzc24v+6uCeOb+/dN3N1c36eUHh+fv359LMHVwpv7H5qmeXp6enx8bNs25/w//sf/DgA3NzeHw2G/319dXSnXIWqaAOfz8fHx8fn5Wb11mHkYUhNbZshZcpK+HzuZIsaLQACEzJwY2hYFE+cQAggTBsZpYVIgwTExdGHmdeBYgY92awzNL1jPpe2n9VcvN89TuDKjyVLZcPvahnHCt2OOOb4vESRa+L7pi8JkTtkVFRlt4DIYgW3chgc9El+AsRK7K+RU7fvGqSgy1fB3bTezZUDzuzLPFP8KIuZlnsYKXf7hpQoFdfUpp0G7+brfR3h1r8pesV7ytG+oRyCgXkLhegnr34A4FlON+ONyxKZp7GDdwMAQ1KVNs4Yq6lSUzTkTIJcdfwpQMe2cvijCoWujiAinOSImIjBa7gMzj4kTNnQ39KhoYiMl3yDN97DYG5Zsydi6s9e1NaLZh65aMplRGAgACRhBCBmBQSAGwUUjCTJIjuM4mqkHXJ4A0yDXKxPc+nTgTrNY6cH+Ap6fUVuiNlR7nXES/hCRUHU/V3TfzFn3MFXqjD2pvcqcUaXcqtQ6DaGkWoXTsWgImTJzi9C06nRaLSdmFpj3EjFLC5JahHTgRdYH34IU02uMsQT1nKLfQrFumXZReWB6PuWnpu97b/NcMzubAn/wZlNjs+P1PaNRNX9hCQajupAUG6n15b8qDBZpxuY6NE2wq1mIDKAusGNJ+cAAYtHPRALO78KKLVYDtFsZ+qvfbDxDsUkXEaIFg/Pk7cnDDHqqQnuS1l6urq7AUn0svFJFXJbIsAzWiiWdBpdwRLqBQTl0wGJ+7Pteg5FWcyeyIAmRjVM6X0wLRWdaeeV4kpyztye8vEoUW6gRUJCLF7Rn+lIghC0Rv5pZ/6GquVlsCAbt63i4VKoePfybFS71gqtttbyyPRavrPr22VnvrdmwTP75Pfi5VGx+AeYp+54Gxe3rFXKsrJ9X24evue7U1zEhzAMgIpqXQC/Cg9Oxp8pap6gxelIJJIUaiYiaptnv9zc3V22r2x8gIklgJluYtk7VlV0E3r8/MPPDwwOXGG6haQCgfzkS0W53iHGSOImIUx7H9PT0dDze3tweQmhzOg9DYuYQIkDmLJw160AGAAJNFdXoYGKMRJCYAwXZVNhWz6RwrvAt+qw3lAsz7xjOSlD27xSTec55F5socD6ff/vtt7H/Gui830XKDYNwliRMFLuuOXS7EOPd3d3NLWDodo/HP4/DcXwGon27T+dTBed/hNrXLXj6t69LBltFv5ANdANMfh9Eb9++/fvff4rwMvRfXl5env/81+PxdHt7dX19/e7du8PhkHPux1HVP71JHkIzDEPf98fjse/7cRzfvHlzf39/e3srIsMwHA6Hl5cXZv78+bOIHA6HruuU8FJKehHdJMXD4fDDDz+cz2cEIoqIYejT09PL8/PLMPQ8NLoKmibkgJlLYu7pev88Uh30ep1+J3pxi8nDFglt8lLD/CtdGL9Cp8nD1lbiJSVE1IUvyzwWiAhCfIG4PFrAKcZVF9s1t0a0idhqK7cPa6RVytUmotbY+J6ynh3r3RbCN6fGj6sa2iXCsNfXGxCWPM++AEzeruuRbu5imy3rEy9GbiNlq1Tkiri47Fb9ZDiswGDn88XOkbCSr6q9r6LwSrjd7N0LHtOu5EJ4WM04jqPK91XTTqJdssULlOfXhrWumreH2y8n0zrsdSIyi+LcCJJoELkJnsWJnZFm8WycXrRzbsMyIgYKdlIiRYdhnnSk+WQCEqK3INc3rKYhuOiJ1ppFgzQ0igi4EZntHhG7rhWaXA3LDYFZ/TBWZTphZWf3tJVdHEtwhG4HHP5YC51A7w2A1vvSQWVKI6H17SKlSkuaT0+WyqGO0VpGr34X4UyKwuZPEJQk/Fw4gl4M3LMVzw5847CUsysSlaL1+e3E+rLnJpGrJpxw1AEqiozO1SPUwz+NKARDix/I5nrR+gDA2Qu408HKOlopuLF4JHg6qYo/NUB3txaXjF6WHHazeNbjeHH5s0L1/CbPrpZVFx4h68ZfKexcJvwHvxiti+pJ1c43+9oc+He+skasTdkmYDYdniH754u3/roO7KcbLuD/0pNXRmespqpWEerrUFVPPBM2iiKqHZmknOLDAtsiusvG+Y4KMwNoiNFd13WIY86ZRWJoocTsNl8AhvlW225Hz89HZr4X3u/3k5f+OJ5Opwbp69evu9355vpOnQARJeUREV5eXh4eHnb7pusaxcPkvyqSs6Scc86EGAJGCkOfmqYBDCHA1dVV1z33Lwlj3NRGzJ5cccXMHGLcqL+F6lcWxbqIk0RRpQIkQL3KISDCY8KmReTz8fjx4zHg+cMPN2/e3AZuj6fz+Xweh/Hr16/DMLy7v/vpx7+FSIFCaLr9gQ/djvlxOB+DQNw6J/onyuYwRURTFsNqUa8JdcItgIvJWSK3CYS2IQFE2e/3t1fXCBBDz/kcm6vdbod6F3S/150xBtDL5NpvjFFk//T08vz8PAzDy8vL8/Pz3d2dMWc9PlaSPh6PT09HRXnbNoh4fX19fX2t+5GeJ6aUrq+vU9IACNK04XDYEcE47v48pSmMWYi8oe6+plq8XkcLr1z61y1UPGezzpoj2buyuvElrhhZrnt0skTNRWfe8i2F0EPoe5xZHwAsDxrMi51oMiuvR+1NI4vBcn0Tu8JMJSd8D54vFWtwzbcdokiWCuq6F/+58jFZ09vrZZqXFZyyVAh9Zf1QycPgRHRHBtvCwPeAh05JXjwsfYHDAxYnMqdiTFPm9ZHNmao2PnbGc08Jfr9boG5JGB6ffikZTqIzoy9S93ogrFcovsgelTgpOYjOTmqgVHeczAyoCeu8KXmqjJqEQhgmayGDZGZNzEJE7Mx1sKW42gKT4kQ7I6iYvIxiRESAERsDsuCUASxj7EL2MrL21zmqmbDj/KJTRZt77V1NRm3T5RLto+s6syuqo0hFH2baVgDMIufPGDbISGbi8HCSc4kx7BERAKmZyxQ/fUVN8NYjM+vlt3HsPf1giebi28SipOWc+2G2cfk7flxcC7xZz3MQdKqjHexVNdesCpe7RbUAAGDKebU6dDTC9lMZQoA468z+J7W0281J85qG4oatIJnXqEkDfkaIyBjKNFgWAMiQZ+Xcx3lioVUQF9PA4XKpeNB6mVvxBzqbhLTdvgAYGllYE70IWKYZ9QmHFV+zFuxwpFpZlxQ2dJvB+sPr0PqyZqmv9PXNdjYW5oUGjcKrt/zJKNg5BS6eryfx+8sm6wC3EF5v1nftidmPyLdQtfZNclqzAv8VAOxOsmtBDPkGnp1QCLNVtIMbmiJY5JwTkggjRpq4EZp7vABA13XX19cjZ3Xne//+/Y8//qgXLgQx5zwcTx8/fnx+PiKE/X7f933OOWe4vb1ChOfnx+ZPOhxaQgmBANRjHREZUTPCSIxNEwhYdrudSDNIur6+3ncPD89PhAt5cl44Zb78PggXiO0S5qcnr66UappERAjRYodb4E0ACijCKQ+nUw5Ab+7aH3/88e8/f4CBhpRjjEPi//bf/tv/9r/9Nx7T3d3d2/s3DALIbdvev7m9ez59eTmCZIHtaL3/weJ5jqx8SmGLJb7aLxNFyCk21DUBkIVl13ax6Rq50o2SmY/HY6DG9tYqQMNu197eXn/58vTw8Pj01B+PZyIahgFLNLumaYhi03RNA2rZ7rrub3/72ziOz8/PDw+fRESzZ/V93zTNOI4pMci0rSNiiM3t7W1+eDgfB5Gg1wswTMmlRURgoWsBzHuEZzWvsAXDlazEcaNPq7Zux+al4vP23N/p8Dx2yQE24Fm2M5sTfCPfk5Ty0qjn7UYA3DLUlglRttigx4OnSdug13153Mqr+tU3ubevaR8qlg5Lyq92h/Xz7+kFsY68sllsJeLW5gJO065egdXBBBZvwbV4ANX0bR0ovA7hZjEZlbacou11XhohbEIvnemvPYlsZ1xQ8nJ0ImJMeubzuHETO5phyqvOa25ovbYxmBJiphgiDbu1IFNxp9pefxXnhw129QtxMlRKGQOIqPPwtHptwhaJ/tZd6LuVwGQjMhF/cVSAjBDq3RQBcWFRsX6tTV8mbMBCIyoaToDiPavMXQ1ugQJAkGJx8sMx+qi6tl1EZgsnc7mrY/NohBhdVDqbHSm6vfmZIKJCJbLhF8olnbpubFLsgTlno1ujZuua51QTk5dszrk/n7VZ3dJUB+bi5e/RW8riYXXAs/DdZ+bie03F09VoWNuvVouba6w+K0FWqwO2jG/auy2KyuamoUe9omhTXDE4v+5sRKoQJknWb6GNskRwsUhf4VDWsimo2aWaWKsiFYvf5I+vME3ve09TLF9YuJepdw5tA+z51Hd1t3WC+8qv3yzVK5t7Bqzuwr1SCjz1vTibes8/KzA2sQGVzrO67vD9UPl14blEhQRcbpObAF8YdX3S+Urjvpr95I3Yi+FPgsU3hj5zaUF00a2NbwBAoAYRBKYjW2UhwzCwgDALAgXqdu21XDFK2+rdj5Rzappmt+tC2+Sc/+eXr6fTiSjGhlSgH4bURHz37g1LYk5fv359foaujSEQIt5cHwQI0axOmJMAZx6TYJNyOp1G1nXKEkAYtpCwHCk4hvk6WtbF5mWT8HBDCpzi0JcnQQQRoGmjSEbJIcQYCRFPL8c/fvv90N0DYRr569Pzw8MTAOyvrw6HA8UQEBmJSHa73WHXPp/7DMCpVtX+uYIrCa8aWkWQ1b7vFhqoKEJCGTKUS5f9cArC+7bd7bqmaSTHAA1BGvoEgAhBrai2C5jPJyLqphBjfPPmzZ9/PiJC24a7u7u2bfu+158Oh4OIqDfpzc2NBrIex3Hox3//5d+enp4Ph30I+PT01DTh7du3IhLCVdN0qkbK5CEVn9PhPAxPx0GJ3kQXnMIizCuaRQBrbfw/PhGvlIqqfV/f06PVXDMW+8wrdlHT9oV+KsPOJvDzBiEbhzFUdbx6faMdnNJxwwoDa4+Sf269XyqejWwy5EowWNeHV/cFD7bv8XV46ocFzbK6bb65N4kT2isALoo3F4C6VF/KHctKlqtiCrjP9c64OdgK+bCa7kvYmxkXLcaec0YUIrJE5ZMeIU6vAJeZwAelWLLF+frm+lzNIDD5kl0gRK3gWa11jcW9M2XOGkkCZ+QRUS7RkDQskjcQeTAmmBe+7zO6hTQQ3aSQLMz0mBECuSsExivtGKLuFGej1twOizpv2Cs6AntRFcIJpUICZJE8DduqRClC/EMR6fvez3eln7h9ayqa+twvWiNQg1xHoekQysBBM2uZUmdpGDxIANA0nacTm3fVMUpMHbC5FufjEUq8o5yz6p/VNozlMqERlQ3EswBrWX+l4uXlEVL9dWuDiilYrxQGokmfjzE2jaYG5nJ5b/L4rdanN8r5xaIYwOWlaigXBW1EUo5IRISqaJ9F3zOFnMqJCUyRaf9adDKjDcOqXbyUJY/zGF6AtKyw8VaZIILpMuE8mxr0CeemKqa2ZDUbnPoS46PlXTj7S85hwdPeJeRcOpnzPMHAeGUPu8igi4eDh4GWFzXJ3fLVNC3W5vRuoQp7a1rIAPAXZYJNVOByX39lRB42D6T96sd1qRHfIy4f+r9ratRK+t9mm2AcrxwSCQCwMv9oOalk8m+HgMCCIGZho3l1gGDxnGeAt2/fPj09jeP4+Ph4c3PTdd1wOg3D8N//+39/eUkfPtxcXV0hBOZEBLe31+/evQPkvj+9HJ9TOvd9Zs7DMByfXxARiDAUlw0BFDmdTl23SxnPCY4jpH5AF+9lzW3WmEHNW3uBHtbz/sqiADetF6eyqEaIRfNJY9eGd+/f3OxI0um3337/P/4/jx/e/ePq+npk+eOPP15Ox5/+/veffvxwfX2T0tC2bUr55eX55eWp708pDxRaEaq6e2XRvV4qhlPxcHSH1P7X19jFBBsBQCSUNOYE/XDqT8euyQSU8iQh2I5Py7v3VC7Ym0NQ3/dEcHV1dX9/f319bWe1ANC2rclLT09Pf/zxx8vLy9PjkQjevnvz888/Hw67YRhCwKurq68Pf4YQ2rZtYgfTmW8IIez45pfffgN4AGSQyeskpYzTre9p7FPEjVAf3xha+LKnxiZv3NxiKl7huWLFh6t9Z3Mel9v69rFaeXjxLt+lYse41eh8L4vRsYjb+zb3VtjalWYmv+Scr2AD3W5eUfX3DA2gnuX1h6p8J/dY04DN3ebGAavhWyGnt8yNwHwU5dHlMWlUcQmfuFIiDEK5rBB+s3hS8cPB5d7qw1542HyEmEvtbHa6fjihq1yGN+zZYrd3iShySTwAThucLt871Fi73sRxSQirMLIezLomFJ0n5ymoDOrt/vLb7HTH8+t+JRQpikUEwxzeh3mKn0FEEAiyKnhs8DvnzPlWGzgCBVigbB4LzkibEcWimpW3mIkAlzxCsFhvgEXIsNwDUu46q6ak3Xm1WYHx6hy6qIA22ErKxKXu6meWihutQ9okj+r1GP1bMUHDFS/z4+lZSAjBnEvBrXMsN0s95SDibreb5tcdpejvnmY8018vfnJJ1f3M4vJSsm/QXoRZLpzPBXhZEFEdvTz/Nazy0qKrAPj93pOENWITasAZMqk4qhmeuSwOm9BK4MMVJ62Kp1XDYXWkAks2UbXvO/KrXn8KAHq1mglFBEUQgVEIcDalsQAt3H5gsdxmbIBjFJvM7pWyiYrXkQOXg7t4wJZc9bugWveLyy3KmrJlu1oI2w2iF2T/ojZYjWsT1IoevtmaX5hVm7iUe74Hqs2WF88vzFdhgJBVjCjHTwQgwsLTzUN1DJnWFwKRCKPCrocX5jk/5qTAa3Cp0+kkIqfzue/7l5cXAHg6vvR93/fDmzfXP/zwQ9M0aeQQQtOE/X4fG4oxdrvYdiGlPUgex7Fp+jwmZgERFAyBAJE5ccrDMDALYIPUEmHbtl2TmS2IzsKh14e5Z+8r9R2ztrlGbBWsd8NqT5nyPykA5rwDwpyA0253+PD+3e1V+/zw6fnrH4+PSdLvP8f2/t37w+EaEK+vrwPB6fSyaxsAeHp+/vPPP19eXpCgDSSBcvqP6oFVqdrx62j9U7VfICKIOiuZukIkAMhNEwQjQHp6/PPX3/qrFm6uWpCxCZ3iUl17it/KIlgDIsaGTqfTl69fzucBESyuu2a1RcSnpyc1OH/58uXr169fv349n5kI7u9u7u7urq4P6il6fX2IMaY83N7eTo6gkBCJAoQAMWKTGxFmSUgdCE/B3J3gu952K0S9XjzlwIp/rvcLz/2qHWE9X7hyqveN2F/fiBcY3NK4RFQXnSw8q/dv2SkNLzHG5ZaTb8EaqQYFbgveZKFr5Hhh4xJs/sn3TN8rjcA8a4vhrH6tz4y8mLGJijUMVX0RjYe9cTpsXYiT+sT5kVkLm81WL243u4mpi4Q9j8DEQi3eBgCFx2rFigWJK7hVPPLXaMel1u2oS6reYRnwXLe56C0ealdRzuUb9eNX9VyHO4n76ghLqOwFEEEQ9fIFAoNgIA2PzlNUfAaA+USUiKZdDfJyVeh/iubZOXB1sVJhUyWKJ7c9r6gk0+KQqOReEACxpPYTUmi6N+gJmigA1LvgBI+Xy8vpBRJ6V8zyfNLxmqYxo1m1bFT9MwLSqGWqUQSXQ8Z0Hk8cBgYVr2VDlykhDtkIAJphWWOrmvaooWVV/eNiG7S8hebp6jtVsO0Kr4mzlXZkEKZhVLsZLOfRxijO/RIAVGCrVmO1MAwzWFxqpVjnFO3FG3a2yHlG4BebokIveOigdFIUM0SU3V5exd0xpdEvnP2uM0x6nNgdwmqp27mAPcGiJ0xzVzBasSRf5LKiYocg4BgQLx3u18X6Erel4epGHyKWK7EYAJhsAxNYevxPV1vBohJ+Y1d7/TmsGPo3618qm69UjXvCvtSFx9j3dOqOABacvbqzrZPFMOcHM7oVmRJjfO9QAcC5HuFSivpLjQAsdiZ74ufUI01cwnGsREB0Loj+7zQwt/BBPUEWh5KKISg8kNVXWVUuE61Kzh5G1ysDI2dZhKdn5oaiso4xTw7zinANRRNPzel0OqtaeD4R0X/5L//rmzdvrq6u+r7PPIYQkCTl4Xw+t21su+bq6gpgD6InRFk0zrogBk0GgykNnMc0jEQUm72E7umYMvx5HvLjyxlDs8a88QrPYRCRRS5FGd2cYimKgZeDpDzRHXlKaSJSvuqeNd20V6MZooQGWaRp8er6cH3VStrLeHP9/HLYX+v9iKvrG91cRsghhD6Nw3H47bfffvn9U8aAEgFgHM5EV5vw/weLkWXmRWQ4kyho6ZI3L8mZ/5bTAwAUOJ+P+ybsusicv3z+9IRpvL+KhIfDXgOGE0XNCWazDAApJZakCEkpffny5XyGwwF2u91ut2vbVpf/MAxt2768vPz7v//7b799Zobr6+Z/+V9+fPfuXaCYUhrGPueRmVJKgFPCWmXyGqIWEZnzOHLipN4uejdTIIuoBEiACGDsHdRbUZYFvoOp+gr16r7MqA356IS6dR1ZShTV3/VXe2h8EgAAyG6yWGWR+QrG64PyYyGTIvwNeX3CiwsRFQKrPdpTWvUcV9qg+BQLq0i5r8P8zeI5Niznaz2bm9X8GP0mZTU3MfB6sSs/XvwImuSmgFF1CmVj9SSx2iy2SXoG9QJB+O4WD0u/lUnGNAJxsW0REd013eqw3sNpXaxjT/hSzR0AYCDOWYdhb8m0/yKygKglqQSPzGpZUgMRwKhyMADO2X7n3VFcsgsF2vZ1LqnVjJnmEqJN3zKIvRkH3AKopoeZBYBxyuviToam62Ft08QYz+ezKd9S7IQ5z1bB7HIQ55y7tnX2Tw4B2/IES9YKSGTXGkWQSxYULLetlM40ymh5saSVG1MuGTUsspBulqrgeWkDhNquTSlp1GkuVl1m1m1Dz6e9UBhcXh0DCVziDXT5TBAxj2kcx7Zt9dKCodduA9qMDMOgENr8skVUDyHGOLksLuMAbSyhMjrjwkYhzNw0jQYm1Vvv2qOU2/CVViwiRPMtQVvY7PILeV1UuzMtxZQoKnfkPGPyCLS/kzVgHG0xm2QwacVNRFe0ct/34vx/pNyQ1JseikBVUG1R5Cn2d1OOLdhzECinCU2IRBQxWjTXlFSXm5ONwpK7bTJoG3tlDBTnUOEHJXOL0/PsQiKJM2xaXxPtBVIpSUSQpzioBCgiLKIWT39ZmbduVXg+4OHhKZWWO4grAG9yZ4B6f13TgLfQGnUZP0HH2X3BJefd7MXDMzO3sp17SPTcxz54V+rqYGUarPsMPvePHvhvFd+jfzdP5FTjMLiAUr5cCmZTsQKbGrvHv56gTcAI6zvbfuD+pyWhznYDfa7Khrq9q6ynCxCbZhwmC/x0wxkCM3ddl9MpZ8GSjJt5uqZgR3j+TrL672lsYWa+urp6G0hEOupCCA8PD7vd7urq6nT+l/OZr/52td93sQkAMAy9SG6bcpYEQruWQsgC5jwf2066xAyx6cYkt7fXQ8JfPn6KAU85wxLbiAhY7572k8dShbfNeamWFSyF9eoAyDWIU1xu1QqFIeecORCIjIf97c3up8+B+tPxdBxOp1Pf94B0Pp8fX55Pz89Df2rb+Pzy+PGPx6aLf/v735/7dPz3X1NiWQbNmvnGhUjruMqUAFvL04qyYtuX7Ux5vX+VwuoGgYhEkUREskjetc3L4/Fv79/99OM7GE8kfX/uv7w8Df3jfr/f7TvNM7Hb7TQx8tPTg8p2KQ+6zI/H4zAMIkBEehysXjMKj2bv+PXXz+MIP/9891//63/98OFD0zTPz8+IOI7jv/7rv/zyy7/97acP//jHP8Zx0O5UtMg5t22rLOXh6QsAE5EGpkpj4nFs25bziDhpgNPokLgkKNpkdxVR0TKYWbUfVVzCGIi3ovhqFT8x2gthca3Rk73Nmj9W84fpvpeqXzAyXobZqAYr5bRaubTmmpbiAwwiIJOpcGpfAMqRyrrTJd3OmMmr2CfrzSW42HJ+UkyQ8GsEnaBejY5oscx9hWrSi71h2/uxAtjjH1b8R5ZX0gwzfo0viAfqJ1VHFRPzcpTvSMUwfyau/VoUX5MhbciI9UyRpkBdpiXzSDBdw3iIiCUkXwilaB4WK4XIv+u3SAteaABU8gktrUG5WERAFqLyOI4RJ/8as2QQUTSvNv+DXqY3SrJxigjKYv+eNbFQlBwnY3mDTMFvmVdeSPl+/qaD2yUfrlhPKEXvyFlHPGlgC6sxzmVzdxEDTL9TEJlOOpETI0J0nr66+GOMMDXFrCF8SGC6A7C4Fqijt3yMuICGQttAufbm5d28TOhnSqAqil79M6KfgHfIIaI+zznQrR1VOFUDhGLpyiWI6HqBGW0YhbAzhRWld1F0vLbADHtjToKAgQQhcbZlOaQx55w5cyEwY3J+nVgxS+B81rKabhsCX7CAiTu8MILRgDr21ZaxgWF/jdr9NMFyL6QwTaVKe578/OsGnt0tAbfJJUmmOePESvToAgEW/FEu3Ouzz1g2J3tYVV6jyH/2fMee2zEQot7BWgAQoE5LSDBrgZ6X+blbnzh4zL8Op6///QXdIYjvy4NnQIJj3AaAJ7k1tOh0uapf3BJwp9M0IHaHwb5x3+8M86XJ1BA+WET2b0VhuVQMFWswYAvn1fKviNOPy6+pNTGvMTkNa3mP3RqsFGmDxD7HGEPZBPRgkXPfNeFwOIjg6eUMgofDAYreaOeSummpKhiaaOGXR84ppRim/HKn0+l4PB6PRyJgTjKJIAtHQURU9ikADAhAQgxgd/ino4kQMERsiE6cgZoKbwDFfLeaCB3/Jjeo6NOTkCf1bzIHLAoSAKgjpUhGgMypa6BtAwEzp66JV/vrw+7q+PLw5cuXJJAy/9svv1GADx8+tG0zjkMM7Zs317HdEVF/OkPm3a49DTXk68/r0X1/IUECBCQEBBYRBhHg+TrrFkELCgigHUMgwDj2bQdv729+/PEHTGdJ5+PjUxD+eDz9/vsXEbi97W5urx8eHsZxFMk5j13XNU2jkQhiDF3XEuHdbRdj3O/3RJDSoAk2RVgkh4BXVyGE8OHD+/v727aNOaf3798Ow/Dp06fj8fl46p+fn4/HFz2tUJ0QprWg8XhRfVB1P6cGGmjGxH1/arbTk9Ac7aiM3vDs9zLDfHWeC46bVYuxYu+vF8+K/fNvksSajNf7oy/kLMP2t+J4WE7xmJmg5lQEi1NOJaQ5zdKFnmtUlIx2DBtYwpXvvf31ghm42fHKISyuaFbXoxZ4q2CTcvHs+0uF/815f50MENFcRqtxIWpI178Gk6LOk0HFPGE56dXWv3533lku01WFgRmMcrq07siaNROaPckuHCA4V0QoUqhRiNmfBGZrFiKS2OHdFAYHEAVgVkKk3LlSjdMsgVCopxLRpIjak54aog3SMO6XMS5lUHP1qSiPiMxrhWUht6AJs66YnYSni9p6uDVr2F4O8MCgM+uppZe9xSxkAADkpmnUEmhjsX7Xw4RyMGCzUjpFdC6F/iKpWX5gqTB7DcQ68spPNRZTvagUdCc0NoM6xXpcoQqhHXehy+uwxpUnNf91TfeeACquBABqY1SozGJGJdaopzFTYisCUAC84Ohp0rDnTWcVhDW9Lb/aLKjurS2ooduvw8pUxSu3dWYexzHCrHCCC38qy7M9fYtL7myDYfKxLsUNzUiu9tH3bVZMDcDytl3cF9cPq/arLnwdRBWSRCtZnQAgiCQIRTmkopuIu7kK7gRhc7I2R7oG5i8VXDJ3XhqsjH4u0aHROdb8DavKBrRvzQZVWeahKC0sr0lCnilpjb8oD//lUuGngsezCz+69exUlDljb+kUtAagQmyFlurh1DchlewvLDSvI5k6ijG+ffs2jSeURIQ5S4wRgYgIBDmLMFAIau8mQmpi3w/agAioT3oA4IaHY6/84eHh4ddff31+Ht+82d3d3QFKznpGBoikN9vDdK1RRNPtIjYSCJhABFkkSxbAgCiRcNc1z89HchaM+cPWevdPXiEejzr/1iZiNwuL6EFQqUMASUQwQObx+fnp0ITu9na/37958y4L/PLr7+cxxaYNIfz000//l//rf4GcT+cXFO7HIQuODJ/wKWfOeQBYuMhWY/8mbN9T0BkovslMkERjDollqBcBkBippbzbxRiAgIi65ga6phnS0/H4+zjCfr9/++ZdymPf9yEgAHRd07R6kgvqTgIAhI0d/KveodyxacLNzZUek4aAT08PLy9PepHh5eXlzy+fnp6e7u+v3ry5R8S+73e7nea4F4Gu6wijXnb9+Pj09PSUkkiTmCNiDAH1ZBKw3s0NP54h234HF6jLvwgX1imsGeOFFjyDhRXfeKW1V+i/LJpVuUxXXt4Ap19FClX9igdqsauGl07i1v16Ut9ktrASYGALn5dm51JBp216GawsjW+8e6lfD7P/+/0ASyn2hEVgGSmjqr/5sNodYEXhvouKfipq9LtbVfN1wp5ZDSxasLH49o0GsATgMHetCQ/FB9VDeInJIyIJMOK0Ctx4c851MEZr1Etm/qdqQ/Ko2RyzhVexLvRvE2uXpInRyLy9QTlfQXDBA4q0rdK5OTF6CIHQLn1NpnvtvQhe5DIrFDDIpHYpui4AdF3HMt8eCSUXubZvLRuDMHOtv6YFMBkAdeYso4MwjuPZdFobMqwUQmtcx2tTbrPuXSlM8wQAFDAlUC8PYDGvmcXJJlo3ITcuFe4RAPUEAVEQoURnUGxHHak/khcR83UGd2og7vQClqzTrwQPUuIMy4W3psO1dQWdB0tltasIlTMQUaBAqBJN4AwiHEKMoYHJZ6bYYCExD0YDRvlU8hD6qdGfqmRTZrmtnKihGCJgxZ6YmWRSI3PO5SYwiwgKxLCQj62g20r9uvPM1k9EtYhm2HA+XKheWXdanheeCIvdzzGjufJaK/YTWrFX449rUCtOsq7wSvH1ZcnxwWHy9U4rqCrI3ecNzxzrApebsSokhqV5vayST16C0He9yajXL17CcFW/Qtpmj75atcw9Eiow0B1v2a++pv9QGqzlznJuIkSEArTM9sHT1XdUPwwiOh6PTQRhHsex6/Zv375NY35+Pj49PSjTDs109yHG2O53mm08l+MaUj2PKITw9evXl5eXz58/D8P49u3+559/vn9zp0FibGh6lR0RmyYCgBAxEmIQEZQsktOYdClRIAQmhH3XEC7waRi4GPpgORffXCO4FShu/e7i1/UTAURo2ih5HM4vX75+zueX/vn5en8gofv7+8+fP39+eAnxfLi5vrm7jTGOwm/evEGB89BnQQhNhrYf5I/PfyZ3TLOGoaIu+OsHIv4gz4R+KlcMNlFUDi5mLqfXpds2InA/nHDsG6QAtO/2bTs2TQRIu93h+vqaiAS465qUUgio/iMAU8Qjgcxp8nBThg/OY/n6+lrVPDUJqhcSy5hSapr44cMP7969OxwO5vijV104Qxr7lE5PT09PT0+fnvPx9EwEQjTmzMJIsWmachd0Y7zf87XandeVPS2tafiVrj0TWPONqn1f85vtb5acaxf69Sg8O9o8mAap7yJe2i6tLO0H2wrJK8ivtgNY8UyvMMDCYPhd+F8PvILhUrmEf1nqVK/XX1PC9HfrpGCzl4p4qk6rndfDxrJh8/Bge5lEpBwSLXtfg2d9lQCaC59PKqY8D94aIZURYr24EDE7i/EM86RfsJfQNCNTtFtMWptcFjUDkReXIAGc9O827w2Z24Dw2wwulZlKRGBhUSPGMkaT+VWLqY7MWPLL++m0rsso5q+AJKx54Vqfaq+8G2x2VY63F9fUllLS0ztcuo2ZC+tsnCXSaK4eXTApjawU4fFDLjgKLEPSV5NtFOBfZ+fBCACS2QKEmnXOzE26qZhWzCVHSKXDW6fmsOocYhdKlwFmIJm91DAgxUveo2KT7telquOnRn+yy5OezNaMzOZLeLaq+xfDVnqJ6l1Y8QV0J3ZaIeVkc2R2S7vFaiSh+FFxE9xpCyhKl8dICk4F0nqA+C12CW7dWSOv119353ErIrq/mIRqBwxBkMsQSEAd8HBlkbMGL1HCpXn8JuV8Z7Hhe1ZGS3+ninF9J5ATqpcEaZ+Nfqqf/ER/zxhfmeh1Cx5v1Ur8Zkf+FaOi9ZNNivomdb3SF2zJJVWbNlg9j9eFNPHkoGZ/YBYGUQOU7oBd27TtTqQjor7vv355+PTpz5eXF9KMqTHoJWdVCDVNXGyaQI1mmE/CzNyfx99+/0XT1v/004cPHz5cXR3SOIaAIoRYoulwBFRDELAAMwuhYAaErCy1xN1BwpGTSG6aILIhgCKi4Py8msE1B/snkP9KEWFAEtEAM8r69GoDZYYQsI1hHPs//vjjqdlFpLgPNzc3z+f+3GcNoxIC/vD+PWdou7gPlBio6d6+xcPHT+d//wN32/1W9OyGuU26l0ja3+q3mq8ohACaTZUAbLwCCOdzut43u13bNZFQMDOXsKJt2w5DOh6Pp1N/e3tNIYooS+Gc59lkSTlnMfN1ANQUQ8gCmSWHEPZt13ZRD3abpmnblmVUkUM1mX44icjhcHh+ftntdjG0T+eXT5/+eHp8FsEY4939bWKR4zBAgMQiU5zqv8o9K3zadKzvv20i/ztZWTUvsiVhb+7Rfnde7w6Xe7+oHplTj4kxU2HBkl588QIvjmn+6aV3CfKKnVpRV6aKQ77S7+uMdPP5XxrF5jRZuYSWTWHGf16P7hWBwe9Efoey3bYSP2xuAeCvXq1Yb0NrgMFtZMzZdCtYCvabqGMXZ0SBV6nSkFCJLh4/aPKMuuCWmlGjQyMAwCLMpnFGrxDC0u2wApRK/JLMySMCF0ozwNIZTERyqo0AMB12kp6kqnHdeNV8tw3rIhvF+4JPIwIACmZ2m9UAQxyRxNhSSYqg5Xw+A87ep9mlUliTwgKCMmotdofQamo8FAgb6xadQmVGJwVANTEPvH44nU7VcGR5FARFo1OlbhgGKu6L6CxpBp7X+e1do12ab6PO4yV3WdYsYDM5rmjdhllZxjxNB5fByfr14EnRqUzdshXCpawx7HAy11RKLlHgkh97LnkjaOnUB+5sz8Njv3rLreFZROx2ByyNq2oGtDuN5mpiD2lqGYmIADVcYTVAWxe4EhDZuVLDFjOtUGQj9UO2diqeMHNAKcEJFQadSkC7gmA6ocACXcY0KmK+NH3fHELVwqXiV42HQS4IJX6Nb3a9Cap1ACsd2H8FO8ACgNWmIq5pdL++PsA1PB5vuOrlUqmANLDXM1UxomppV92tx14hxP9qeIBZV5eqHSl5I0QENNEEYs45xunMiFOGwn/6vn9zdxjHUYny8fHx08fPz08vAHRzczMlaMUpNxQz933/L//yL9fX1ze3t2oqTCmdhn4Yhpevz6djv9/v37179/bt/dXVFYAwp7Ztcb4WjRhAY0FmyJklAzKWlDKIJBJCiER6PV1YAHnXtZHqBTINdqmKG27xwpRtcgz9WvlTwOXVND1HqCJD6XQMw9gS3N3d/fz3v8GYTw8vkgEYP378Lca2acIw5pzz77///uXLl+PLy9319c3NzW63Y5H+2J9O5zwmFgirlbimWE8e37nefan45OstyCRR6P6LCKpPgQi0TYhNAGQiJAgYhASur27vbvtxHE+n/ng83t3dBIrD2MdIRbUAooAkOU8B0gEAEZizuoGIcAjUNFOK4KbpDod9iabGiFLiqMswDCISYysi+/3+fBoeHv78/Onr8Xhu293bN+/v7u5435379PXplESIIlKUKeTma8hZf60ozSrQ0tEJVpT5nRO0JtR1X+v2cSkQV8C/3k55PHvK2IYOAG3beoaPJoNxFhFC5OUyYWaN8WVNvdrvIu/iKyiyn/xdzaoLj4EFPK4F666yvRt78fBUKN2E6tL+4r9uEtJ6yqqWEefI6jbF07sifGGuL0291TQJtvrVf7Cuv3OLx0IGm1Q6b9+OGGQlN4KbBQ9JRX6wjDPiD2KsGhHhHOndrc1VF/ZuNEsULBfAAikAhjtLEF8peOtRVRMAq7JmEGVVT5jyUM1w46I4xQ/KZioCUzTOCsXgSCrn0b5iMQo1TRtKyoHpRJnI/HylqFgpJWUQ66FlF/+zdJSZF/OaXX48KAj0RkUA6Loul4R+wYVRrXBiR1brRJYGhr8iaGm11icNhbAW8frIBSzVKNhcnHX1FVNdgstv7kG1KbZwT7YUrSa7+2OwXDO0HMvmwvCj9o2zc0f0q2sNm8cql/wlXPx7/TDNxIPOK1XKcX41cFhGa/RYxaJMek49pb1mtrBXuj9xiVAqIqr8TomwZOan67W9ufSYt1c3Xtp9l3Nk0YzszMi3I+5U1OuEMrW/gX/DfPWhmp1L7OX/J8W3aXrvekf8JivzdS6NEZc4r8ivgkeKqlNRbwXGvApm/bEuFTxr8L6zeNjWBABbU1lhzL8lG2LlYsZfwbatd2YW8d3NiwLdK1q4RF3jNIWzyjm/PJ/+fTwP/RNKUvPg6eXcNt3d3Zu3b9/rCVEuLOV4PD6+PJ9PA8hxTGymgyGnnPMuNB8+fLi+Oez3+xgp58lMREQhNDaVRCQ5jOOYo2ZjggwoFPRQKwT1EsWUBgoIQLsmXl8f9nt6OF8UcNfosvW7SWbVZPm1aZR5aWH6ongWEZyz88GY5HCAm5ur+/v7kKVvD5KBR757f2CG29P5dE4vff/16+PxePzXf/3XL/t913XX19ehbRPTacwieHvYvSy7XW9wK8D+GmH7PcWGfNk8CCJZs1WhYEErBsCwg65rAHgcR2BpMIQQ2tDuEtzf36c8PD09ffnypW3bu7ub6UQeUel2HEeY7spAbGZz5fQkRkTU6Ny8dHDNOd/cduM4hoAhRJGMGETw6fGpbbuPHz/9+usf4wBv397/7cef37x5t9vtH9IJgMcxMzFQhCIhiDvQXw15Ichu0ox9WCuEFbY3G7/UKWwxFg9MxUsr+QGdFO5huEjMIgDiG7Fiwvd634SyBDzRTM8vo6Iq6zqX9kqPBD92OyuHy6v49X79u/6o95X9Gpbc+HV2sZ7Bar7gshzip3tBda/zpcuKWUUbm51uDtB3aqP45hRX43JbNnpU4OrkHZbz6NeXGY1MoTCowELRgGF4uqkjImpyE00iMOWXmbqIqkh40RaWiew8lxQRhNkgA3MHfCnytR/kkuDCmrMrYqZIuO6ejJQIH4goUKsNHuwJ78JhznO1gMSWTc5ZZPYwhBLWTzd4u63XNjuWZII7lvuHzDwHGVtyEHQSf4kxg13XeTQWagh2VcAISyG0jH+mzumsm8OnllxSNeidPU8QU5uy0EBMX1KF1hqR2Xd5wRRCCBYxVQOUWSRrP17PtdmisLpiNdfBMzwON7ltNSLfabUH+I3cPpdxbdsJObPHjKbWEBF1pYayGivPZENptUysccc+5nihhlKDh0tKRgUyhJBKrFcDSUT0xpGRHxGJIDMLS/BXZAvSKq6HSxEcVuxmzQ39r346rPG8Cn8PRSeBOeBN3ZRn3yjAIBRm12Jr0DtR+Fmr2qxGUXW6yYteL1gUXXLOw7x0S1537V9f17zU0bwlLCH3SFaifmV2YLVkLhWv38K3Jv2b3Rnj9b17krPGq6nEpd6Cy50eHM1XQ/Pk4bFXGgFYzbhWIr1JCFM87sHdBdC9IaV0PI4gQTgDJ2V0dzf3+92h6/ZEwc4cLRzU6XR6fh7O54Gen4io67qbm5t3797tdrtdaLquyzxO20fbjOOYxxERNZ7TtN7RzqdJ4dRtmYhiDG2k4XzOOQ/DEFsIoeu67hrCfr8P4+LOv59EXMkccuEgwHhCNWsevZ6hyYVUpSJSPZMigoQAXdeEEMaxF4amaZomjiFDS89PL23bJpbT15OmRCLCvu8fHh4+f/7cdIcM2I981OwguPCwqFZHNS64fCByqXh68+O9VF8mnUGBiSAZAEEgBIoxdrEJgQgkIqWUMuTd7spSPjw+PgIAorx5e6/DCRQszKw676SRTSTQol/V+ufXgiYqPA9fBbjtOs7w/PwcAnTdvm3b//k//+fjw5Ez3N9f/+3Hn9+9+4EonM/nBImZYySKzQDUDwkJmqZJFxRCWUrV1T5ScD4T3vqDx/NfKp5KV1Ow+FxR7Lr3VSMXFUKRhThuH7JLn+OXBq0cNxaQ2P9bau0rENrAK8vepdcNA7x07LJS+fI46Whb364+i4tX+ZfK62BXZQ129dO6gn/+zdfBTYSHzfZ9WM1U9dB6qfjn9KvrDpZMFYp0UW/0Jc+NOPGSmX0MDg+A2Spgmd/Cg+cH6AQYhjL1yM612AEDAJEgBNIkhFlEApGY3kKChHr7QkSEABFjmCxj7OxmIjCOqlgGKIGKFSXj5CU/a3ElN/0cHAVVsJ4OzAYCwtgIILLonciGiIFYMzzAiIgInIH1SMzGw+UyZIxRshAEEeGkuCYB4l7aXSciwACaSxdRBHJOGmGc08hpZOZIiG0jkBFQ0uQpqtqRZi4ahsFS5zVNs9/vmfl0OomI1pESzBMRQyCRXAY7BWURkZRGlEYyI2IoUqjFHdEAVgVvzClrHnlVyczER4BIgQAJyfw/J2JCaBo9wNZrhGR55LFstMKBMAgICHAGDK3fdSjE2LQaY11EhlFyBpaQWXIWZqI4K5kpJRamGGLbcPHAVBTpVbpQ0oT4BQMAIWhoNVRTKlG5R8coLs0Ju2uZXPLtgDOTAgBIcbXFaCbQnJhZrD6iS5dS1ow2i4o3kTSOIQQQ6c9nI9FAlHMA4BCi2Uux2FH7/qSasG3nRESr1LS5FL+wVX/OOeeRJQNBQESCEIBYmCWjqOc3cM5clLGAJFMabs+ONPiWXY5daBQYSESyMMCcVFAmE3Z96iki000XxTYiaQRo5kYVOXa2bgAEzBIUFEQyK7/KuwLQADFKDsjMWRgFMpT79+VIVY2wFoSp4vIVx/fs28rMbS9sD7g8hzP8iIhFDLI2jd7WAoeUMw7rsdqMa+6POCYiIhAmQEQhIIAp2+f0ijJJwGkcy/MFEZW4F5Dwciuq4DRQlf79LriuD0uNzm9m+oFL6t3pK0/wUol8CwBlJekRwOzKsqlaezzbdmBiuiddX+afMAAQUkuocamFhEGAAgaMgTTcfhJNG40Q2nY8n4ehZxlj18ad7Pbx/u767f1tIJY0jv0gMF3QIqKUkkAmCm2MDCSCmeg4jNjA8wliIz/+ePWPn/92dXVQNhtgAOE2hkiYU39OvQ5rHHvdPtq21dtijDm0lDADhiyYBIARidKI+Zw455awC03EgADPp5fh+eVuH9Ofg3c60GXOOcNKmhGYbiDrdyjTJiJYlqTOxOJFwgrDEx/YytuGqFcHkSUDMwATIhFExEgxDXnfHU6nPgK/PD+Ox/MwDONw7s9jZtzvb7AfbpvDjz/9/enx+Mtvv+6v707n42l8ur7Z396Grh9SP/TMGRAwhNgkgZxzAG6bwPlMIAQoCFmAISRAFuxcEAhbyJ6kF/yqkJynST9eXCIHABgjYiAUhISSQBJxAh7oDH97866hgEnapkGUluL5fEoRhjRwHHY33cMR/vj89er69ue/33EeUxpCACLIqRfApgkAjHE3jOeceb/fxYZSSiIjoFCTGZOIUIwAlDOPktM4ttjELqY+9f3YNYdATX8czqf89NiPA19fX/38039+//49EKWUKQTIfQiEwKnvFUjGOPQC4YCIAAycJGeQMWBqkARJWRKiwORAJyASwxSePYveqJz8QWzbqaZgsQc5nBcLv/jKFc7XrKBi77g8X8bia+OXiW8KQGx/9EuGZfRveSKhkpfLzAa6PSUQ0S3MERsAyAVPnE2+KsuoCpuDXaPCI8Ee6j5l5pPlqMG36bew6ifle3ZI7RHrDQbWDi7P7MqZta6jWCLlisgUup9AXWqzcABEgqAxI0mIMQkIC89jQmRELgoHzCYvYQAIzGr60t1Ar9wDxCYIr47vERMIBgJzdSQEQAbBMDs8MrMFg1UlQqWeiRK0TtZLotPXCTYEgmwz4uVVD4mfO87gw0aYzKPnhn6ZVMzK01JwYQ4LqidiECQANb4JAKAIocpYWY+d9BfhcTricg1B1SjLIg4KMzOIRbmEZTYMvRPlcVGtNxuVVvNuh54u5yXh0MHMglgmiRBni0rO22wCV4xecT0M4JFYcLq4Nqk1iQgJOaPJ6zMeOHVdg0V5m0KEycxl1svbr2E7AjQRzbBELodhdkVjhPpc1VTu1BkCrQVbnMYlEVGdDI0iS84oweVOn3hUwcUw3DRN0zR933NJBO9XfmyigqcTqvdtiGgYBnTZ5JTvp5QoNn7SPU/xxWa04oweVD9Ga6EyEtoUr7aEmSZn1XoZ5EaH73EYQmBOiGQzXuHZXHPBtF/IZoP1y8ErsbPimrOT7RfFcEjL/bXCiRVzXa4oJCxdejzvsOe+vhR/YHA8zhQkPyIPia3BCtsiYpb/7WGKq7lsqqJzWK5xPxZfbZN+jPDWD18psjyxfr2+R45BLiLzQUyxrlRgwHJdvA5S9YqH08ConsASt54vwRKH/kVZyQ3fBGbCwHe+8x1ljaj6V5n+ginPi4MCvVU4CUwZEgDwmDRJoMiUsig2ASDEGNUQPnH+EEWQsyTO6kSQM7x9u79/c3t3d6PeFsqnm6Y9n88WeDnnMcbYdZ1Go9HUc0jzBtTFph/HlBJDjC0p25dxRGCg+ayTyn1vvUpgHGnN3Awb//SU+YfV7nxhUnCSiFQaY2HkLLln+fjx4x98JmbhFEVP9KTddYfD9fXVm59+3u1310OCh4fHGElP0+7fvvnw47uuC5+/fvz68Cv0oJhkSVOsZXDLvKwgBCSIggKXo4PCamm8gh9b7NWSp0gkKCKZAQUCEiJSwBhF90xEEWRt3rYVnbumCQEQkE/nl0iBmYNmP4kBAUMgAMycbTsYR93rMwD0/VkDbudclFUAgtAPp3EcRVAEhaE/n/788vDly8Pj4/j+3fXPP//nt2/eN02j5/J6Zk1E+30HIw8ZQFCjPktOOG1/KEQIIRIg4pgFpvsJc8QgKMZtmfQefx9sQZnwLT556Vd/YHdpBtdT7DutOJ6f90vcsuqr6reCzVaH55yXmPYleKrt6XVI1iNd96WNVL9KEU23Wl4oqL6RavMqT7b3Jl6uO89PalnFGMXczdSX+U+ucVLJydaLLjPfIyxFqArtPu2H7Quw1LjQi5GXWUTFSQq6ajTCX1kCuArAuWp/w2l5U8jRzwyIZUOcMUOoWg6W1wlQ86gvDhIUN4oYZlZbHrk0CZA5zxl1xTQTb4+uSgWoDdifuPsx2K8yxWcvCMUNKpRVPBKDxI5w7Cf9Ok7eO3PGP/3d4Clyebm8l6cDp3LskVMqt1CYVRMmUkg4RlJCEjN7OtEfXTE49SzHE5AZ1tQOqbqWKV3aTtM0nkTE5ZCoqMT6FVnoh/ZVJEuJMwIAbdvGGJum8QqzhjWzxBUwR3UHxJkV+kweVEzbHhXe0dQz0GrR+fUgy4d+yOL0MbdyZscGDwA5rrHgHW4uKqZgvp1GycY7TOs2P94QZj3fXHBDCOLOfvzUGB2Sy8rY933b7Ool6daRShzaNfBrypUdSdiammbTMRdvsfEcfD0L6/Xu6RmqbePChiqu2XW/OtPVwHFJGRWj9GO/ZMFYK7q45KdrOCs8VACsu75UfBczfiiKCFgWhCI76vilZMazLVOzjLzeEQHkuc5FkcImfXprGa3qEuZXCKlFB9mSdVwj2wLH6yOqam5Ox7IX8NU8dTGzuPjGAKIHOsJzNClkQZnibocQiCCEOQgwGGOZaSkAjFdXV+/evdvvO4LpeuEwDOfnp9PppEloxnHs+56I9vu9MlXlruiCk6WxJ6IQd4JhSGkYhoC0bwICX+266fi5QUSMXbvf77sunU4n2610pEET0iyXzyUafqWI1OHyv9mIyv8AgBqjGwCRETASXl93bRdljA3Kfnd13e0BYH/omtgdru8Im0BdP8K//r//+6+//hJiw0O/v95/+OHdjz/+0LTIMrTxVxwQQRCITW4TZnVMcKDqkokr4q/IoxraN7FRDX9qTWO9MqIAkfLksNtR0zREBJhEhCWrwBIChrADgMdHIgEGzjmfz+dd27DkwFHKAsFJtdZTRdA9AxH1ck3b7kKxUAGYyCgAUbGSxvT8/PTly9cvXx+Px3R9FX/88acff/yRMB7Pp2FIun33eWzb9v7+vjkOz8d+ZCDhDGqVmWRrQgEIDOr9KwBsPgh2TZQ5Q1G+JzlfZt5gWwYsd9v1pFyakYrVb85p9aGar0u9g2MdG5O7BQyYw5SLUOBmbfv88fXBVjVfGeZmmXfVJSTVPliJLutGqsdVO6/gcN2L7SMepMqi+ErRJSylvn/Fb9l+ipUrq1NY9XwNIcAconK799VsbjYCDkuXuqvgtBFV4wIAvVLkn2vjdrUK3Fyve7QKm0FlNoAvbeq86B+trq4hsbJimbggIgQgBRS/BtYDM9XLg7gW0Tx5VdYYh2IRQRZ2oisiIoXAMBse7QhW70d6BGkxk51BUmySdVghfTvnHJbxNotqKsyWOQCYJ4OhFD9MVRvswrf5Slm/Bl5wGREMS6YQGlY1w6GqghbeRkpcE/XexK15ge10EQhAIsWlc1IIp8swUwtz4ElUVdBQUaKZTTcDUwmlbQjUlEdmJsVi2KxI3Gac8+J0zbNyWC656VdarCt25xGrSURwCrZRiCe/itiqv+iMhN61FcqSY2aiqb4p6n6ubVIUgJQShY1+AUAvz5j+rLv1MAxN7OyUSUTYGdsNEvusJ+5wuay4DwqLejmIiAY6nHAnsx4yQaufl1JX1axfxROqkXFKOL/Y3UV4CrkmokIGgADIlP1xqQ2u5xcco6hG5D+sf73EhTbrw5I2PDOsKlfkui7rXhz/zCCCBFRCJgUQYE3PKCAqWYmU26e+QYA5Wo/vi5YhDWQpB1fDlJWUXPHzNVrsV4HXxJSqMhRh0Zdvvu4BrhjCen7FOfP4m0Er8psKERIgAqCU+9WIFLSpJCBBoz5O+jnnnDkLBtYA1E1Dbdt2Xad8T0QaCoB4Pp++fv36/PRwfD7GCPt913b7ttvvD12Mcbfbvby8iMgwDH3f62JXBtKfx66NVze3sd09vbx8/foADNe7KJze3d/tdx0gRkBGatv2cHVF9KRrTVmHeRZtYhX/ik4oS4nBv/7KlOUsAJlwSodapgmGge+ur96/f98FiZgPu/2ujZxybJucZLdrc4LE/OXLl98//hojJU5NE26vD3d3t20TQFLXNoc9wAtHClkVJGCAwAhQzhrKwMtF6gsLcxMJ4tjsGoHrBaKfScxTyVCFiLjb7SaFUIAlaVITgTwMQ9M0OedzfzyeRkIgghiJQXLO4yhIxJwpALCgMOJ04yDlIaesCidq1OvMIsiQoQgDzHJ1dSAiZjmfXz5+/PzHpwcAuLraf/jhx6urq/P5fD6fT+cBgPR8mduw7/ZNs4vxDIBD4pRR8kkoCBKIcLnoLYIiLAW9uh1wsRCqZFhQxgABlDWt2HhZd1QtyYquDJV+Otb8017fZN1+Tu3JLEtcZmsmVGw2W9WpzvcrwDbfrYZc8duq8lp2ulRtPQSvyPn6FcLXDLbqwiaiMtWux+t/ovmiipSvAGBwTqcY7FyN9MnmiNbArDmSkaH/dd4RKl19aX/zI3pl4tYgrb9uUtoahk1Ur4nW3l2ThHXkz3PXABsYNF3Byb5HROQMpUkRESBCJiQyw5ewoHrFai1mRkIzRxjQ3iPRhC1/DG/DMBArG0s1Nk8EMl1YAgYBJEQ9dCwEsXjbFAMXS2YJ0szEVwkzKkjsq2IDzX8POefZCFneyERo0RpBMwjxmHkEAJBoFiQj4lAC1YQSpNFA8icB4i6YqfJW4dnXNPOXb9BrYmUgiDgdYDsr4kxSZkQinFQptaNaL9qgqoJ+/RudEM0ulKrEcglnav0aD5UtGkDcphwi0vR1FacwSKpl4w8mfBfo+Dg4E59hFRzftDa5RPv0LWDRk7nEg1HvMk3j4SFPeWBJ3lLqmbWp0IptdQYmInAkilwUQm3Ba4az4Wib11ROnlaqlagP/ekJrC6yr+mQl/4hvtAyQaJm0mLFsNNkECe5w2p6juanu+J0a8YHlwVfj/OK1YDjrVW/VQuXGO6lh1rWB2T6l3DaGIM6Hk8XoVFEcDqvY0UbAgMsqHSGXDYU9fkO4gWs2ogMD7Z2qjPvS/iscLIG7JvlFYx9Z4VLr8jSPXUamtRPIoUxc/G6DLEEzSIiYBRmIRDJzEjTZIWBe2ARyk2MiNTETh00+r4/Ho9dDCzp8+dPf37+dDrB1R7u7q7fvHmjJ3cxxqZpiWi6o55hLEVx3rxtRSS2XWy70DQppcevLw8P6foKKGBoIgUUAMGAgaiJxkvBLU/PA/+JUk3fX5pfNH0MAGDapoE5NvD23Zu3d7dtCzyc2tgQAoMMeq9+HMaBEZrMiQiurvcvLy/drtnvu64JyBlJ2kCHXZPzSAiARBAywHQZHoEgoLAACwMDM4JMzKaGWV41lq7xVo29+jBxQiAgvSMtGQSZu66jJlJESDopCVCD2+EwnI9Pj+eXIxHc3Ozv7m+vb66GYRAeJ1cPZkBgBpEcqMVZqBbmSQElQIwtAJCQMGSgJJwkPT6+6A7y+PD89eFpHOHmpnv75t1+f9X3/fPzcUxJRHd5CiEkESLqAvKuhcxJII05gpzHxCJZchZhQAYSBIB5P5pCRpe5LnzVBE0954VLFhjY4ueypYD59m1pv9Lm65T/CnvfbHazNXEiOLu7ElXliuS+H8hNeL5nXCs5qqZza+R1Fl3tjFDMIZdQ5PvyyLHW3JAXlkYRAWTgbey9AqTppfZieX1+y7+OiCLbOPRSkD0ptvf5yfSXN2jmm1ityMMPv3pFDUj+oX72QRkX2HMI8Tt1JeA5IWce8gIJEABZEJCnOnrHvvRKqBpWhbiImKvAmKsjHI+jJSkAFIvNWiCzh15gBYCsE1AakGlWQ2ZWCyHUbLpWr6d2yh0qD1IIoZx+TYpK4TviF4AJ8TxyzppSj60OFFtfiCgiLInzdLkOAEIJiFJNs6pYBrZVsPyEhltTHjzp6xBUe5SlbqPVuESt9MdX/sBGinBMRIjB0EUlWuakDBOxuytoor+d6PjuRMRuxJqII0t3Wal00dWS0DH6DWD+K1NaDlhx1fXq1b7UFbOiByy3Gdk5Kvs217zMkKA07Iy9i3MgVS1xGaQkRAxxdo22XtwqnXJ45Jz7vrcIchra1GijWMOZmVU5hAvh/tZlPfV+lXkc+ocVveGS6fvl/3rvE3JKLZJJJ9TGgwZrWqqO3ywV8B5gcApYBYZZwisWpM/XjPLSWPzX7wfbV/Z/CSSEEBuCvMgEJSKafrW0z/7O4QIkwIV2XQotRyQrWWT9VZYcPpT8Met+L5VLXSCiv1OEbrP4nmbXrcFyqa6+SvWKb2Smn+mAZZJtcboLTZKCIOhZCyAzIBEFipRGEQQhZkg8DsOgwb3O5/OXL1/OL89j6k8vLxTw/fv93U377t276+trPeQhIgDJOe/3ewDgDG3bMmsiaySiJrQiwgLH/jw+5TSMiNC08Le/vX/35r5pGgxBIPQZzkN6ej76aJPVIl0P/HuQbHUmCuS/5msaQgNCAAQsIAycBRJj+un92w8f3rddjJgHAJasPvUQcNfuRXBMHALtdm23a//4+Fkv7A/98enhK+FVt4sIHBDaCGPOLCwBCEivN0QiFJ4viwIIiKg+QrheocbV12O/NFJff9EUC2JADKA8VVgkZQAIJRxDAALgyRUcUPj58fH3j78+PPSIEAKeTi8fP/7GzGM/IEpsiDkjSQgoInkKdaGkLkQUKRDFiNOuzYlzkpR4GIZhGPrManbOwoTN3W24vr7uuv35fM5ZZNoIprTATdOkMQPkEEIkPHQtIuYdtESfv3wZBYRBQ7sykCCxSHD77KJM7h4BeCMh3noL3kQsbHHINc49QVbEv+5u/foaPF/NwFtTwrqmf6VaeptvrYFcF22nOmNd8KtXh7am83U767cu7WKby2GJom2F2R92r3jyRptFgdSmC5CAlklrDVJFVG7I25DbpjM/mQ7SN/QrKfnG7OsmchA32Mvip+VL35x9P+9+4OqEaHAqbBUf80O4ZPSCyRtrXmKTihgIhQBZcDqJ1tbimFOA4BvSH4im8HekClKp4M8jDSB0gmM1YG8hgdW6qtiujZwwwKQ6T24IK5Qt0nHg1mUhWNEQOtVUfPAY3D5x0Voic+CgyVAG2aZHhQMACIHE5aNDZ75nV4wObHZNZ6BScApds5Bx12ivTJGGcCkX2Kjcday0RHUg8c1KEQpjyRRvVxatWSymTt9ajMGsghV75eKkao3knANubDC4VDxmqlguWs/ayN3uY+fahzibO+zUyq/kCv9heRvT2oES4McAI6KmaTQLsLVMxVlpvnUJmUp2eyMw7Uv1eR+Qxpx4qaAd3cGEuBtQEyqUOGVBD5tFRK8HWBgnAkCdNI+xakXYGi8cYHEy54nBgkJ5jBmqAebj+vUMAqgfMzJOFsWqBcHtE0py/h7VlMEW8/EPPfFULfum/EnnupH1MC9NwSaDRo0JLFOcnoZC1oBqIDQ5xUEAZJT5KqzbUnFL/vAcFQptbKLim8hZf5CVsFWv0FXx7B0ALu3W31mstc2BGxhlyU8UD6YDrPCvDDwiSbkDrO+GEABEigcAy5QMRmQKegwAfd+/HM9fv359eHgQgRhJUh5AkOD+/v7m9urm5gbkpds1FKCBhqa0PS0ink5nACjxqKJx+zRkpDAO/efPX3759dehlzd38d2b+w8ffrg+7FJKiSGLjJkfnp5//f1jSvMNbXHuJ9WE/lWcb1beRHtViEh4volERAgUEe/ub/a7tu/PHPI4DgiBhBExJYkxjiM3TUOEDHm/7wCBArZtHMf+y+ffCd8GvG4C3t/evnuTH56en88JAChAztMmwupXwgjlOi5NB1CzfiJL6QKWq3VN237I66VhX+2JiGTQQMEwpHFMPCYOKGVTYAR4fHx8ePh6PPaAEAKMY//nn39+/fo1xpiGPkRs21ZtcoiSc84Mqrx1bbmNDyHGOJ5HRJQs45AhAzOklIZhvHn3fkzCQoFiu2s1alFKnFISdxPM9kQWyDlzzhGRIAeklkI4tDldDWM+Duk0JmIYBdNK9tdA12XsUxR7QAKu7y8Zg13vrdW8eMRWLVQPKyFhcwY395pNwAw2dOfmawDsw3rr8WRQNVu188o2bTBcYuyXFmAFwytdWIV1U7IyZL0qUeiM1C6X9qvHgKFL2bJhGkCdipfbRMkiW20ufgrmKNwrGqjo1EtF1aavGyou0z+sEWJjEY22supXSpCeig4Nz4ZYT7QVnWhhnuVwWBKVf+vSPuirVaKL9s68SJeiyqBTEQNgFirKnUhUww4RKWQ2BlRHw6JaxGIz4ZJOQEogE93Y/AL2f+0GGjjqn0mhFGtkbgeRpzingICxibkoVjDxOBTJtlvXNOGKf2jALPkC4EpQtv/9cJiZJUGaqVMpnkpEyqYJal0DwBCm+AE5c9tOrkmFO08Gq6bpDJOqUSgqdF58qACbe0OCKdtq6sRlfBQtbdvmnFNaxFYB51KoxVZIKj6r1oUpqF6/Ypf1QZyqo/qnKpN6TwYsPI/Th5dI1oWxFS4cwOJOLsi6WCC9S6f+qukKTN31q8V3bWjEciezWmnkYr14dZdKyDgRUb9lHe+kEJLkPDWOS11L2wGAOZx9AazrOhFRU2HoJnpG9m9P1qCK9XgEVsUuGXvTq36wibDZ3AS4sIXFRBtD8Bb4qmtjmgt8CghNcQekQEIy31L2jBhYgMA/Wc+R/1wx03WdNV/yl6/WQ1gXz3krfG7Wp6ViDzM3ZwAIiJEmYZrdFkggLEACCAxEAFMuBxF3dcmQvDQSYjlD8ewCVjRjDyfYULVztFQWgu6ioCHHPnyHluHX4z+nCH7npPiaUiQPeygynaP4+nm0tJ+z77eIEEYMApEIJGeQJHpWPY5JRIAiAvXj+fn5+enp6fkZDgfY7XZv3txfXe0P+26/3yEJjym2065HRCSkdkKN24yIGgmd2YkXQH3f//nn148fP56Ocn2NP/74409/e69xxM/jkDJkjOdhfHg+f/zzyHKwjRXLFXRZ5QWd2/+WpOiLiJRQk7Uqfmk6EjMyqsGIiAJxg4Ew3F1f7XbteO6JMDYUAiqyqd0POZ2H/u5qNwzDn39+GoZz08Bu17y5vUl56Ifz8fh8e9Md9ruffvzx83BOKZ36Z0EhIpjStEq09BiECEgYGZGAoDhgb5JKhY1XaGziui4QgD4PRMLgThpQ1+rQp2EYUgcYgMGO8ICIrq6umqaRKQTu7I+TUmq7uNvtpkUlOaUUm+Z0OuWcm0gAMJzHnHNDAVsMGIRx3yBRjLFBgZylD3RYevWrPTCXA2icgr3pSW4WIZVyGJFzJs4YpSX88PbuOIztaYjn4TQwccaMWbjEXNDdc05wozKUcpvCXPT8ulauKnY0U9qruocsdQPf1CtvrWtWstMmA69WzeaHzfarkb5e1kNGV6rK1eq7VNYvektd1c56XwAAmtKgXtwKDcIyKbAJmOHWBFGZLQeLpjQDlTm72csAAAIYN4DEcuzln9v6q1Ch45EiMlXPyalz1eT6YRoFwqvnnuCmtTy5qGdutmNZGyoyMIuFJ9eKViuqFqcT+lkDd8ytt1D80JCDwJziIpodwNOQCoJYaMXsPCIScLb4GUBeS7S4I+hEz4pK1tYk8/RTF0TmKagYokpFapkMCKhu69adiEy7b1Fd/H5ZPYSiRBlIKs1rDFEoSR51/xaRMU3WHmY2Xx3LCeHn1fQZjS+naSULT154Xc6on27ZoWHPx6iEpQHQYDbVC6fI1CMA6LV1G69RTEpJT6O9pUuxoUkUzUIFADmPOWfAKTaMXy2mDUoRoco2E/vhJCKq5EAJtSIimnbCx+nxerjNiM2LJcq0X0MICGHkWdDxNJPnkGsL/CghGahYVLimac7nMzOrCGU63vl8phJH1/Z+RNQ8k1psUnLOIWiSeifkiUx3kHARtBOnNBUzDWgvOss6ZXp10NazTgoRIU8SHgFmAEkZEAlRiFAAaXF33POIimuwc0AttLfwH7C/FowHl1ng/DK3n/S0wtIxqZF8yrE2RZaZBCKDpGmapCmYERCRzDDutgR2JJeGEZYpHA08g8eP3VRcW+/V6YMso6T6A4KKEVeY8WX9xPI1GUjV1FSvBJ1EFMkMccoFNKFUJAMQAhHBdJEoM6CR0LTJ0czoUVTBnhEieQG/x5X/YIxovX1Vr1QlhLD5azVM30g1NeLO3ap3RQTLnQp/bLHeMo0yk3PFLyMSEbHM78yMJTcsEakHNiJSDISg5zjjkIdhiA01SJlTYskCRMCCIqKqed/3n/748/Pnz+dxvL7G9+/f3d9ev3lzdzjsAkHOCTOHEADG6SxGSCQ3TRNCk3OOMfbnkZlDaIim7YMoYhAZ8HQ6nU6pbeHDhx/u729zzqGhYz8cDoen4xkkXN0e0h8PQ4Jm14g706zOfdYo5dVRrOLNpqCaSqNb+W5lchzHruvyyAgMgEnGv//07se/fZA87ved8AghqLkMAFIaiOLhcMg8fv368Onzxy+fn7su/PDh3f3dbUrDy7M8Pz78JuNPP/3t+ubw88/3L6fjH1+fiRCAFZM5DQEbAEQQANIAp4QkSMivScDVYH01j5xNjBlXKfnNEIGQACkQiiCNLBgbzicWiCHkMYlIE2l3f4sUVaSZCDuXvSBiOQUuFuM0XF9fqxEv53x9gBBCG9qcmCCIoCQZx5RGBhF1Wx3HkZEx4jiO/blvmuZwfTUMA/MstqWUhJACNRAP+30ehqHvG0IC1CMShtSFSFeHGJs4jG2Sl/P55dgHiiNnhEBNRKAxTzcSEVG9GUpg7ilMoeYprVDtJ8IQa0RoX41paKkOIiuZwX71smhFxuuv1ojezuBi5FjTwBpav6z80thklR5y344fstZkZ4RY6lEIy/ULTtTBC7piXkbd32Tm/usa7egOTKtXfKfVeqnEbHsYY9S8PlY58yh5SrwORXPTdwOEkZPNiJ28s3PlgGq/CItRTGhUOKWeJgJIICaUytKPz6agoqiKDdq8rPHvm/UzRc52x+7+FBbJ3GPeiN+68AuBlo6jNiNepbLeZcUDy+9TckgAAATEKMzAnDNHRkAEnQxPPX4NoLMszcmmV7cHuVzTArc/eUGfnZllE92ICCXIlQo+RAQYEJGRcjnKNetJJdix87GsgmpAyRFHNJ/5GTC4tTEov2hiJ+sCcyYomJffAhvraa5oqMA2J5syfHqvQl8MjYZ/Pwprs1qca8lYJn/FwfQiI/Gck10i8pB4K5nGVACn/lWE6CPl1Aum+Csvf53ZCrglBwJcvLh8g34x4JIzGvDikgsbvdlcV6SbXdY+fWKhJqp2Qli8OD+PCDCTnM2Uv4pparxqfT7WjhZm1pT0xdd9YmpYtkBE1BtQIhIACTEvpXpf2diH/0kNTfrIJkCnxBaeF7rtSMVjDJd3RA0/zAxA5lfkpzjnzDDjnaREO/Zk70JlIqKsFLNqpJ56KmIzVuZ/qlnNsvL6J3tuBLMuayJ/vYzjiChNaNq27WIQ0d0yC2QBNbYAAxPVW+9cWKAc1U0VBOR7LHf/Z5bLeEDPJG31ef7pWzCvTViyOG8B86vPK6jaFrqgyeCmUpdhu+tyGs/ngZlHkJSzAAjh4/NL22hycD00QYDAEBMzAPT9+PsfH//1X39NCd59uH/79u1+v9u3XdMEEGGWgBhCDAEFWpCEYNwYVIl4fHwsGYygpI2eDoZOf355fHwkgvfv73748K7runE4NbFFlBDCfr8/pfDn0/nLwzMLuDW0QN0l+rThw4V1VE2V/8HT/6WWiYgaEhEkaUNDmCPSbrfrmigypGEkHJtAITSS8zAM+oak/OXx4Zdffvn8+ZkA3r69/08//3R9fX08veTUvxwfvn7t94f2Ol/v2kMTaJqVzMIJY+zavXACAEZyEIrFWnoF8opQKw4DS+palwCYEUFTLwogEIMQUk788nI67sK+xRhJQEBoyEPT7EMIoqn+OAcKMQZpLEsHCqPgdHCvnHiSamJU5ZMAkQgSZ2FOkhKnYUwj60HikadY07oQYoyC0Pe9iABMt36yMocMKSUJKCmnNAaiJgZOk2uSBslrI1HctbvdaUxNpK6hxND3/ZgychaEQMCyYNpl7wDRKAYyM1vPhDeR6ZFvaPebnbGLi8zwW+VS11WzFUj+6+ZGY8O/BI8fu9/iKwBguTarTXb9kz337/rnl4C5jDfb3AVAEDcG5ZslqtmO/1VWGkhKqYo8BI6x+6bsg5fP7YOHyj6LTMnpjFSWe309EH0HlvNimPTEZr+uFfg1+13iWk80Ydp+7EVDUrlmiEAgGAKabM+rO2LVeOGSJx2ihYF8nQZED50BAAhxboqIsggixXkOUEDEHNXADUIrECC6aCJ+VuwVf+6oFcyH0C8hT7vLKZwkm8mSAAHIvEln1RmAWJI4P0ADwKAyTdof/5ghTquZ4i7FRGnEoZOUcgrU+eAs5k4prDnEPL0SAPi0Cp6YTID206m17LOpDTpkXsaz0mL4B+fP6Y/KbIJtaJXiYTjJeVQ1b8l8Z3ugb7YcaZPmqYeSKcG6s1fcNG0sG4PQCBQRARbTZNgTRgjby8+vW2tWXPQU/6v9ZIRhVB2cszsujRiGcC2mYvnFqV9DRHbm33IIpHibVnuVloNL0BpxShciRiAAyEbS3r1+Wgh27IIhBHHHEBU3qTBwidcYLW3OFDtbuq/vg7UYwnPOTQCZeJ5AOT8ShCmCsXY9+awzIGu0GVMgTSdEPedbXu4VrK+1rEdUw786H1nXfKUdT12vFFntuJd64TRo3oLDriWicRwURQEJETKxiJAEESZEAJxibK2gqly+jU5eH93/eeUSimiL1VdlQb2yQKbnY/bXmKG+7IBYtIllRwYXnzoEFehBZArhmxKnlL8+PnZN6LouhMmnABAhQ4zx4eHp06dPnz5/yRnu3918+PDh5uYGEQJgzhkkI0okZJacgWXMUwYjZRqISMwcqIkB7ZTTTtkenh8/fvzt61N/OOCbN/dXV1cB8zhyziMipiwYGsnw+cvXL19fJExitw3we+hTllLaK2wBAAAXTzxv3J47khAC5wQiIbacc4x0d3MVY2wDEKFk4DSmlBBD2+y6pn18fPz48Y/Pn788P50CwvV1pzF4+uHMnPb77vr6+nQ6Ho/HlNJjlv70AprxL1IYSUQkIAAKg6l/eszFwhZYtsLJpeVpQ/NPXkEmAAekDARCOKUnQRE89sMfn760Adp31xAohBjbKL0G+J73BRGYSMtjWFAAAUnD84RABCgijJwhS+aU8unUM0POWRIyAwvzFB1JW578ZTAQMw/DoKZUImAEwtn5qGm7YRiAQVCYoR9TGsamQwDIKRFgbJqmiU2AhrrrXTyN+RTgeO7HLAJZVUAL61AcSidWCzCHITWU+jW7nosK+bgU672txhP8983UXGfdtV8CFSSbW6cXJPzQ/LjW/a6/ensUuPVlspZv0KPC94sr/xdY4XkT4HVZL/YKgEsjulSqajkz4nIUOCOhAkpEuBhBNgEzmOcGYbF+pZwMVoxrPZBLT7xUA8X5v8LJmnguoci2Ku8ii654w5VsaVVr+H3j9tfLY/qrQ3K9akQFXSHXjugZdDTtSLdTH+bB3ld/ACxuOwY3uVQH1rSO0MuX1QT4ytVCRdS0BGpSEQTAoMEKCXGilAmDLLqKsku0sLmkxcmFzCwy+RBX3oyVomuKjbqA2qzkcpEREQGCmle95gzFfYudgRud15P9VHSPeW17JUrd8CoCWrMwb022qfEAp1FSyplH69HUG3UpqfBvPuW2HmR1BdRQKiIUNHBCNB/UyX/SiG9ZcMlbfV8eEsWGAALVPNeasoFsUppfV0o2WK7ceD8rKlFVjUg8tNklM/TkRM61A5CBKaWkbRaroC2KOfu8J8tyVXMR2gcRlSNOdzhZuJzR6KoohsMpRQwzl9hLG/Zt++tpI5a7eRXaK+Rbg7BavB4PsNI8PYnO3gQCbMwOEadkhVjWeznXRwA91jaE0AyPlLAr1UihbKhrADy9eeqVZVQ0TzxrnBgC4dWyxslmNURsmqbbNTFG5iyZ9WxLXZgCcxaWyVIIs668bHMdOcbGfgnOS88vmRa/Od6qrAWU0pCs61Q0uQHVSjTx84WusDjMux0dAhld+TLmlHkU1LNuziBjTue+H4f+HKk5D22MiCI0Me0G4u+///7l4bGJzc8/f3j34Yfr62tbWQiEKCGo12DmlDR1hfoF61VxEM6ZY4wAE6vRgZ/P55eXl3//t9+enk5NA2/e3F9d75kTIrdtm8chxtj3fUbuB3x8PvUDNPu2T9nGXiFzE/2bi2KTOKcWcPFi9e66pDSE0HDKCKAGrN314ebmCjjp9VhEJNKtYWSGP3759eHh8dPHr30PbQuHQwyBjs9Pw/kUYxTJh113fX2923VNG5j5/PISI7UtjJyEYwhBEHPOAQExgEyhrabYvJfpeRMt1c7i8eCRXGFARCxssiABcyYc+vT1y+PNvrm/2beEDULbtK3guR/9eZ9uB0CY07SPaBAEIhJChiB5AICSpXBMKeVRL+TnnEQDgCIGJAQBJCJhEcgZxpxIKOKc7SkExBhCuWuqm05/OuecNcsxEenN9ialpulSGjDQbrejJgpzR7JvAiJG6AJBP6RzEkk5C3G5WGh7HABPQQuX1jDbGgzPr6z36gkvzz4qT7/vmWXfmp9rXMYg9AAs2OzlXr7J6v2583pnMRrzfLuSGG2f8gvWD2Rzq/IP12v/lfKd1So+7/ffdVNSVI+i7gpMC2CJ5As9bwoV/omIxjlaUJe+xczeZXTGocNZNSOvsFA/la/T3iUpAlY8x2Z53bunT1laDtd4NjOSrTU/uop4Fl1Y1zBZh8XuEMIiFdUkJU9rXQCwBquaFVulXj9ZrwcPoik/slQSZm4iChUgMyKrOAhG9AvtdjFaPyXWi0FVpXko1suMS+uwlDtgwugHjoghNLpIF+FcQc+hIcbJEmgav+HEXKJV2aMpEGi0W5fe/UkTHxt9m+ehJ6P1rFtfpuXmhCmlrHbLMG2feoYNLhthaScTNZ7nWteaddeHctEhxDjZDBHR7F0adEScQmIIiTTH40FXfBSpGSoIjKs7Qku6AmdGqLizEQwv76xa+7TMsSHucs7aHqivj+PoMxawJEmzR2UZaZZZ4QebWS5hRRFR8YnldMc+6/bPy8iiOJ8jlFMAtH1ie33ZQCpsGEesCKkKSjFjb+n5Q0vDezlkmb8uFrKbOM5MpCnBJwEOXYD76a3i8kWIOmGyXNp/tXjmsB7aN0vFQP9S15e6CEQNhYiUVcrjUV3CYiRmTEyYc4KMGQCAUVDmVGB+viRPzlme3b0C5CV4vIX5nxvp68WzpgqYCr3TQJZgGElX24rj9q5N91NR2ObWpqZAXZeziJ5VR8EAFGLTIiIznPpRpeRxHBNn6fvnI7cN3N3d3dzcQLkljojCnFIWZmTiJkQibJosASFP6x009JqkxDFOO5GdnZ1Opy9fvjw8npoG3r29//FvP1xd7UWySG6bMJzHtm3HIQ9C50GGPjHAmOf0Px698ldE5E2qrl6vNtNXip4AFgU4IfL19dXtzZVIPh1fgIfdvtl3OxH588+vf/7557/96+e7u+7HH983TUMUhz5xSjHGpmnGsX95eRnHvmkDEbZdjDG+fXd3uLk+ntOvn5/G3CNN2XoQwhRQVNTZ0hbCt0cNjq4u4c0Ipm6Hs0AAEUAQ1YMQkSGEOKTT8aV/fjryOfQRd10jmRFhBCibY2H1FDFyCE2METHk6WKhMKfcj3rxfBiGMfXMnJPknDUoq4gIAQBqsBc1pxARxUBJBRJlnNA0AQJxCfCmH4ZheH4+Hw4HYRiGAUMM1KSMwzgADAJZYwG0wgBMKE1oIkrsQhP3L3Hk4ykldXJHmQ7upkP8EpQKXUbh1Vn/ZVq6xGltgtaE6vneP1EuvW4c5hJ4VVnvsxW0r2893+Tbl8ol+tws1aA2dvnVgdG6Ppbz7nXjm1+N7RMtZl9EqlXq4THBrBojubjKi0EVtg8rGsN1L7NUNs2+t6as6xORRks3LBltm7JwCQ/rst6pZXVZqSoVtcMyxgG4mV0LgbCa1goYPyh7skgSqL8JmWCnqs78vp8qWC4G68B+Ymf+rvaw6ifTRQGA1MCoYbwYwURhmqRPKPKBtqdHX6ZvGBh6aRuXCg8AOIfmeVxS1B5wViMipQW942dzMFE5M1su6Cl1lZvFiux0PzAM2ECq2UJnsfTKoULoUWoWKsO2p2mvjOXsD6jmU3nVgU2BnC+tlWSPpqKQK1iuwImI7uKacE871b3HhlwRhiHHU6GvLMtruKr2VC5Mhi47B/VnEBXf8S+aLdGorgKgWkgq9pm10/CMOCt7AKAhP3ChEGZZbmP2roaQAae3GzEbBsCtNcB5GXtyiuiCiWOYNCyYwi/pY/28xWUWPvG4tU+j28bY2MKKoVRjtHmcnixbnUYnAlkEwe4Tav7jbHWsd2cREpn8TRFxDoa5xforuvIDlG/phJ4A1iS3flL1uG5wswRAQM45s3pcC4cQ1E1RNxpQ5olZ41ELikgJvbMcjunP31Mugle0Sj+b/0S59KJ38r2EKE/kntL8RuVr+glFig54ApfvZBM21byzcBbNPU9N03Rdd3f3QTLnPI7jeD4Px+Mx92noM48cCPb7XdO053E49meEIJB3u52klPIAMjaE3a497Lq2jYIlLhRiSoOdjvV9D25zOZ1Omr7i5qa9urq6f3d/dXUVYwQWFdyn5RRCkDCkYRgZCIeeKYZ6eEscrse+ifBN+WON5DUC60IAIEgQMYjwrmv0giVK3zRNQCKSp6enL18ePn78+Pjw8uau+/mnf7x//z7GNufc9+MUJybnlIbHx6/DeD6dTn1/Pp+PRHT15ma323W7JgQACEJBVPAIBAAIYfIZQwS913dh6teC1KXRVT9tMIpJ0/f1EYA44+PL8fNnPO+aDiUSEqKeczJL4gwAbdu2bUsY79++QcQSME+GQTeHzEOvFDIMZ+91onHCAQk0SqPSPmEbpzvzMdI4jsMwlH0/Z5icTfSAI+fMDMjYhhh2bWy7QI1QeH45no/H/nza7dr9PjQxtAFABIGJhwDq/kMppSMISkYWKWhnhOBEC4DplNkv5E3ieYUVSzmL92KhrOLobr57qVRSh7IUL6QZqJu9wAXKgQuLpZJ5fM3KurCGrdqG1h9gyTCr9qut8JUpeGV0r2C1Av6VXdUqcDn5LWiRzNsxJkQWefxMEPIyRi3X6fXcJdgIk6jguRzzFG+qGs4mK7AP5OSQ6q01MNXX9Yuw5L3iTtKr9v14jZwQUXmCKTWGnEv0FpSBrJ6vSFTjmUvMPIX/2iTHKU58aajam23CzColRaY3/utfMei94aWay1m3AUCibFijYPFCAGari2/Q64R+OJ6kPJxEs9BskrpFlcw5C4CGscZZE1YLSRWibaF4oFMIjQtIib9qaob6hABMIZXMfKTqlrqQ+ZAkWGLvWmwSa9+GI84D1q8fCwMDyGWykl+TM+NwS8Kf0xjSuPi7mkJoLii6pylXZVdmcrq8eHywTUMdAwhNC8Amy7dWNSgiuLKY+fZt4dmgQB0p43yZVt/Se/kenjz53ky0gYgU1iIs+C48ddmJg3VtM+tx20CBpCQVmBROZiIKCl65MEZEvKWq+YMDz1aUQq2mh7zSlo0kbAOrGJAnmyry5Jotgt6RI9JLA2aVIiq+kfaWyJzzwJ9j2YTy9l7l14KnfFoGDVsDtm4HVyhdP3nl3dd7sUhFKqYRijntT4QBqNYCRcY8QQA+QcX3wPP/z3JxyAIVTqqaNU+guZqNVNzhF1RKvtueJyNFeVVXiv61dgiRy2X6aYEEAopN03BgSChIMWNsMoURKe923MROtbUMQhjP5/Pj01cA6GJsuxiDMELK49ifmyYINMMwCExCeYxRk8qUixWsku4wDOfzGRHfvXtzc3Ozuzowp2FIkRCBh+HchJhzZkYk6vvheDyDIIZoLv3fxvzy19ffmhfvlvTzCv2HiCxJWDACCO/3V2/e3IUQJEHXdQTp8eHrv/3b//z48Stn2O3Czz/9482bNyEEvX/edR2U6Nkh7Ns2jql/eXl5enpMaTgej738GZr2eDwiQtd2fYKxqEkVMaDqhDI/9x+qV6qxby75NTOcX5+ILWjwGEHOOZNIfx7P5+H26tA1ATgToB4gQgErpZRG5ftqK26BgoikVPb64Vyimidmtvjnfd9jIKI43cAWFBEQig3p5isiQ5+Ox6MePQiVlJsuFPZu11zvbw+HA1DYMUGIw5jk5TwmhhC63eFwOHRdE2EEAWIRTm0MRJBzkjxCzgCM6p1CRQfUxYUAK3PEGqVr/G+i1zDvmbauX92pv5/7XaJzX6HSPGGLZrzg4dv5JiTVqHnpqgoOV7iKeImO4a+33TVyqn43N+JNCA02J8lc1Ng9PDZH3sLh31XRpmzKgHZJxKPFNW8mHw+JoaLa4gEm1zyAxearMcyLzXpWXBGAtyizhsdPykqSXL/uH1bi3xqT6JRJbxjzkhg59zRxt882l4lvFhxVTPawUsOwumJrJAImbMRJgSlI92PT7xWCYEmCNjchhCoLuT2shrrGlG88p4SI4m4yqjwQYI7JAUWu0sN0a42WF8DAibPWrw/GwCW4rWXb1PwEIqKB9ZFEQ8MZ5AVUNfFtXptZVPYImST+prGs5XrZDIvCZvXt1+kWgTO7UcmRYBkj/EhlCpPgfVDZotQAgKVGULQVVM3tIwWbVirJN61lP7NaYqQKTinSz7Y26JaSX4eVD7Q91xMRnbUSJ3bhe2ZF+wrF+lrxEcOVZ7siwiU1hWccVDLLmzOn50SmyAWYNEmY7ahYDY0o+OuCpqFVi9zQBcuDe2aWMlUGpCn8psV5TlHhcL2uN4vRj/9boRGW/M5oNZRELDlnzzg9YCEEDCTTdDCWBTsFiRDIWJCGc/szzFvKgB/jmqJsIOj2Eo8lD2f1k3+Cl6XhV0rVppXYkB3oMDNMV6Pn6LiqEAaZbKiSt4WqS8O51O8li1CWTSb2l8tFFG2JKbKSVKpNYT26ihorMij1a78Db0/WJZZTn6fMPUgh6FXDlNLH3z8ZDylGwvP5fO6Ib364u7+/R6QhpxjaGOO5P379+rS7a+5v725u9wEkjeecs0juh6QJRYdh6HsOEbougqZdSclz47u7u+vrayK6ubkJbXM6vQzDsO/aGCggKYcfE0DTnU6nl9OA1Ox2u5wXLvTfs7Qr5BtyNl9ZL67XCxHlMUsGiAAAXdft93s9aDudzufj4x8ff//ll68vL3B7A7e392oia5ompZ6nfHhT4qUQpuu1Nzc3b97cn8/HP//88+uRzy8vT0/nvoeWUt+PDPFwOPCYEKeI5IgIOAUlLtyiljEu0Zvnex4Dm2M3flIUQgQggCwAIhhCg5ia2L19+/7t1T6nMVKIzbwvM0hK6XTsz+fz77//3jRN2+5C02pqKE1RyFmsui6UablMVEMiksY8WKyyHp+fn/u+117O5zGNgAjXdwcd2m63UwNs03T7/b4BEsR+GACbEOKYWDMf7feHw+Gw2+2IhBMHEQBGyW1s1JVVT3sJMCAxIAKqiC0IKKBJVmDFn+E7KHNzauwUz693XvlVfZM5b/ITcBtHBa0/AF1vBJ48qg/VWPzG9Po68qJFtVWZQGubuP26ZqHrYVZb+Sso8jYrT/+VXPHNsawxIHMqyynnIVswqBXwOOkcdYNYriN5VLgxuhYWu8PKeimTt6pHYzXkCs+mVdpzL7ltQltBXiHQf5USHM6+VjDYc3Y2IRujh1/VlkpEx4IdVOPKknhEBIq4KTjFOC7p6cpsLcYsTKup4+JPaBnAq4UqRUbUyt6l0yRpLCf34I4lJnkXQ2YGEI0vSTljIGqQ84AUIRAhZeGUB4JARJLmWCmZRwAoFxI0H50gTrmnmqbpujblM3DSI3iTsBFJIMfQmiPihHHB2OohmGRJFsmGplwRIYSAEHLO2fQWOXHOICb7ioggBCJSES+xyKhKL4ampZz0Ot8wnHPORFOYH0vpDkCIc6oc5lk/lKJ6GcLzKglEd4heO2XmkukLRESN64Qla6JIQ9J20RoAERDOzFIifWgImRACEbCkl6c0jqPqz0QUKTAzJ56iy0AQAXZJzB0BTNqdm4X1XwkSiYiQUJDTtMjL4UdgkTyyEBBRpEYxYBu7LSpCpGIDVFJJJZ0jCCESAoEAuDi8+/1e6V3Xo0pxIQTL3zIdSQgQBQ27asbAsvI1UWRvyNFZ8NZOMxTbW4MM03VUFmEmYA0eDsKQRqZir57C3XOJQgnOHV1CgOmOiWC571mogq6YWe/vqYWTUFiS5IwBysX9jCx6TJSg58xjFsSAMaCgCDJDCDSFQxAGBhAhQcKY8yAaSoMIKRI1AMAIp9Npt2tCJM6ZcwIQCZKyYIAxJQqRCHNiRkIiZki5UFQTEZFTBmFCyrzBVcFZOD0zxRKOGZ3Ob2unLCi2BWUiV7UpajE+A+6ACack1MQa5QJAADKDBtIAEUTNKZ8jUozx0HWc8zAMyLKLTaRARMCQz0wxEOk9SyZhgZxBQgkyAUQExAiZgUEwBjFXUhacI3rHCgmF3Yt9tSXGAITLa58XFDYrgSa+4XcXcCfohroCxiKhsHW0NvXLdFA18wprkErIZTtPsTrZnEoEEEtYEREEysIoApr1UYRBgBBiMw6jEHKAIY8ZBubm68Nz1wQNdZCG3Pf9MIzC3EXKgqPg4/NJZe79fp/HPg/j1a7p2qgK3u3Vgfm6H84AcI0xpfTnn3/+9ttvSeCwu7q6ugo4sei2jd2uDQEJUPUfDmHk3B9HEW5jDEjA3FDDYw4UeUwx0tCnGOmcURKDzFt+NWubUzbTgKNqI+N1QZnOxWFKeDmd4iEsD87twK4PIbQCgwhdNfvUn//tf/wft1et8Lk/Pz4/Phyfckrw5jr++OGnd29/uL5BlpSyxAZyQuZRBIgoJQGkbqeeLBLCAQDu72Hswu//8j8zABJkxl133feJexYWFAHUbGnMSFMUYxVxDMLZ9X2RJmoOTkixIGfGAQAI0OJJMWOMFEQEeQQZAEDjggIQY+y5yTye8x7wqumu2nhuSLiD4/EoSfb7/29v77YkOW4sCPoFIBkRmVlSS5o1zZqN2f7/96ztw66dp5nRkUbddcuMCJKA+z44ADrByKxqzZmBtVUzGSQIOBx+h/slcrjfF8jDGD6t9/j2evv181Xy9XS6nC7nGCIAJA5rAhHkcLZzlWlZmQNhuN1nkRRCmK/5y+evVv7xdn0FACIMIcRxeHl+snmN43geMzANcWxCJxFJIFUNgUUkrzdZrgFuEy0npkuQgWRdV8kCgYli0uU75NP5MufXz/P9dUnEQRBVtCjEVkTKsgeZuJ96tcHvVnQNijxgz/QPi2SALcVdExJbQriH6O3Zqyc7fgz2b3MMNKpSPtBCsm2ETUEFPUiqW4SRJ2KecB3pnh8GVl/FETjtxeNcPBvyPe8w2OnP3QSPTfcpAKy1d2Gv2BydLn5qfmW1iElavNlQYkcRGdmsyYCgKoKoAICEAlnF+odm2VG1cyVYz6kCAFj1MgIQ3Uc2KYBFgCgBbuYerUpNtPQWxQ1GoFlFQAXRzuqjLTyCgEVcY80cqQ7fENqRlhbuZHeaA8Pr2FgFP7803uTRrWC3BFhD2ERkGIYubFBq+GGHYyV7GeQdvlWd27JYO6VPjKcHLcdzqoZgE5A+MKkNuglPHiOxRjN6JPMXuJcAwJ3pasHiRQ0L3BxTWlGjQmdVZVX1Vu0YBnBqattONdnpdrNJ4c3t0zwt5XnzM9f1KA5JKeGpWL12ICpa4jNzzkQKULYcM6/J5rVtIQCAymUNKOLOswWXjsXjgXOPYAc93bvgGjw7amKUUWqEZ4d8beE8vdB6Ng8chW24i6UQ31boDwDmefbI7SmFdd4m4lehvd4e7jDN3+9IDzizbvvTf8L35vHtOGVTNdscAUux9ValvQ21vRLjYBhrW05quK/FPnVHhIkoy3btheY2njZsv2VIoeWAaodkcM9OyrsUoN9utsRlulDrC9mfqhlABZURAajpt8S2U7QiKZgJwEwSDAJA4GAizUOu9TO6nb4oQC7TQhSNMea8iiAzTtOIpBZjvOaUkjBnpk2T8cjsV6FDgw6ZPfJ4NNjtRPekH+p7j7WHO4LmRqLlFEMtI9b2uOqDpCYlq5AoOgUVfGgxKFTiE6zumUiSXMsV0pZ6tUGAto5kf05GneyO78sEP9883u7hsLUPINneFRc64WlC4Ad4Dk7t6e9vmjmUXeK9gkYhVYxwhRCyWWGgqj2iSBBCGMchLetyn5dlEYEYQ0A2fhQYFTIHXNf185frep+XZVGFeZivV0JSUhnHsdS4zxu5YJZpmn755ZfLaXRuSVTNIEoEMca3Zam8gNEIiGZZF7aEY5UQEREpEVGuArdnBz8D859s7/XTIY9fO6OniLgsi6T0ZWTS8zQiIp5Op6czDnF6efnllz/+5eX5D9+v/946pCosmqBjZ2uhruM0TTHGb7KqACKEQIZ5Jega1BhvGY8phAB6OFb7Mc4ff+3IzmMQHRwvWSRUISGlJKDncTwN/GV+JQqC6Xa7XTPkJOtsxS8L6HJOb29v92UOPCCiDnMzO5rNPa0zIs73dZ5nABqGIWAALMGTnz69bCINU5V22JgUIlIMRKRSZLBZSlI9sy/V52FZljmtJznZ180aEONIxGnN87ymJM2IzMzeLoeIttvaufEjTvqdu+fsj2lvR2/bha+D5xu59GZ+Edu3GpH3JPEhAvjxHGlRz268lOc4VHfRBtMLt3vzSje2HzbfufVpyWN3XNgF1r3XA+w3+ENJ4zh93zoe2i5MMIQDnI9gr4/1BA2xDwrdfn3fTerH00hog7mHzwf7HRHJefCOEOjQCbEkPTH5vJrX23mBB0Buos5x6Tv5xB6w6D+v75ArWNIe275yPFIP0N7t4KaqoQWL+nNxWC+kWF6x+zw6yR4enbw8Mu+Hn2+2q00n3KL7kpSSDCoIIQQgzLoLqrbWpHnvbqobb4tTbZvQw0K2qNGiKKuUTxiRlZSpZtdswhARIFKLhiVAJHOxlkBNc39lVzKuowsiNXZok1u2eGL7te0QI3ad+ifVPdum1o3TJp7SSi4ZT0MX7xXxaOeXw1q7JpdkpYHCZ231z/ud1ubenvHU0GOIX6lOyez6gT21aiaujuB2S999iIhsuVW1qypZF2sjtfaVeV79R6F6y9sadWl11Cnn7U6LIIU9/RIpJ8fKKcFqKFNV3ivSXcODuNZN3OFJNgu4oGojF5i1VqdHRPuVrB9kJCEwYooK5pf01T4QsKR4VhUOqFJO27ZNpqCkQgzMhAgpLaqZiE5T0BlWcyqKlOqexf7a16CrF48ZT9ceQukIH7992k8dHTv25hGyFZD3CiFW47HWmAwUBQZElHUBAGIOjCFQI4AAYPluVRUQAmON2RDMJEmNzBlOmO0TqvLpw1ok63G08A6U/DPvQfL45Mf3fT8fdOvR0gO/nXrwj8HBA+kbVuHgwU2zvoG2XZZzJgUB4FqZV0QC6nK/LcuSc5qG8PT08vLycpouIYSvr99rsAm+fv3+/cvX+z0PA4iAiNxut3m5zfP8/HwZhsjMqhRjfHl5mef527dvFj1Bl5OR9BiZGHOGLBtfB79/iSBnRM45W3a+nIv112Nmm46nHv8hrdsOx18f7gWs5a1zlhjj8/Plr3/9U1quab0zUOCBKADI2/W7p65EpLRFD2VZnSWo1LxlthMQwMwpCYIQcR1noQZi9AJBrKDNB0hyaN1m7/jgexDAqhOaIUIBuZiY4Xq//frrr6z355FPA98xMcessN7v65ohgyrmVcZxRGAgeoXr7TbflxnhTkTX/O18ngAg55WIUEE0AUDgQVVVE0ocTuF8PgOACsSozBy4mMWlRtCoqkWiEpJU654CQD0iAYCKaAeTDQ5pzUtOkRiRs2QFIqLAcUl5vq85W6ENUrUAlc17Bq3UVuUzR9j6O46rK8BGhz9eMo97H6+ROhGx6Yfvdfuw/+PIvXjm+emxTIvvak/ZHqvH3bsdA/pggr4TrEKv1z1+OGX/aT/Bn2xHaiB7L1H7Vw9Cu//Vj0REjz91E29/Nqbw3vA6IKgqbKE0+PCxhx12tNdjQjdadKeB/NmQh8/jQVFHF6HTjcRvkybdtW69gdW/yLSbml8FP7XWQ3DaUemaAVsxH62yKbnjZK1H/+FWigD7SgYPDANH4G5Nt8rU/rwZMwsowkaL7QPlu5Bb9WH36SyyHRgrpIF2Ru722Q23qBSpIgIiCBCaMzClBFL0IvOQmBZtLMrk/BCjdW7ybeWXZUaEKFXjto/mdVGnWjQ9sA27mUzqkvfSalm1cqIv+Ic9/fXLf0SOtqAt/UnTIcERHax6i6c7TQ1rh0jVKSQeDezPhru6N3W0rmCjDuJf7/4l54L3E+k+1+kVrX/7tcSOBkSMdsemzyWXeh9bL9XC2tDSn6Czg5TNFtXOefqVatllmubcfjKFEACklldDsaLFYJJyxy3QEbKueTNh2xcAoNrO9KIqZFQq5jfJoAS19nuLQVRWNWtR0y29GCqIpTINoLCCeUzKFgBVTQCIIEwKCoQSAg3IgGT12eH1llUth17OWS0QnCjrbundKjyWgx9SG3CUt/Xge/vhu8dPNHiW8dQaze980eQwC2MWRCUCRgqBY4yRNmd7SglylpwELFqBCBHAPPaJkQQqcoAAUDkVYH+rYlXg/SQezsiTAngk+n+AVEegeZj88PmHwGw/tS3fvWvQ5r2ldiNZDb2tW/tXlCIxklgOrWpttDQtrQcmQsmSMgR6eXlRSQw4juPl8nw+n0MIBDhO8X6/g20ozV++hGXJIYTzJXx6fhFJ19vr29ubamZmVUEM5tp6enpiZslwvV7zOqvqMAzn8zROA5Ss1KqqDMWorKoJNCoSBYy43BKQCNBtmed5rbtjO6PbgPnxev1rrSF5o5PvUR7z2DSSHgK/vLz89a9/HQYIlHTggIRIkkFEU1qGaZeACkntLFrKS950FUgpA2RmWZZ0uy05g2WHRRCTepkI3m/v7WjfKhrsApdgL20/eg3rvi90E0AVNIQgeU2Sr9f17/+EdPt+Csqk4WKHJidUzUnUDpIjMvPpFCkEUEpJ5nVJaUFgjHg6ncZxXNcVJBORQkbEaZokg6oGpGk6D8MgAuu65nQnMgEEkJAUVBBKQe1iXsGthpGa1lxmkYUAA/EwRFW1LEfTaYwxchgBjcKEdVnWNYMSIYqAgFBVxRER0FiDSY25nUIC6OmzxyXY1KoN4P75xnkPDP2BPNmtu7/vGN+PbV5oJ9wO3wVPcKrobCOkdzwfOzLrKGQXYQHv7N9utD3dfoThdqcL4NK9qvBx83vcR/y9xzT9Tmnkwo/HA83fhH0SO9+hqvjH2lc7yGzs40d73HMow55OJIA9eI89NJnWT+dhJ8cvevHvuKYdlNpPnXZ32AI7mPve2ivH6+MDfgrtK31oHBz2WDk03KBTh2Gm6CLQK5WQHQVABNqSVULeJuabT+6iVVJHLJZ0qwsMpJgZCNlOpoGqSMpZoACFamMK1k/zFOVczI3eCisi48QmuLc10OouO27pnDNibKAgc4W4YSMiIxGDqmYLBqOdzxNoi0ssO0Fhi4d7J9S76WMPCVz3sBZOHFoCw4bBUhULj3nWic+r6ZUHU1e8vteaH16LloT9JhcXDXvE4yNq+knJPq+JJ7vtxY76eNODn0h7vo2cmX1G6W1UtHMjN+AjPojtBoCnp6eSgba6kQHAfAhtLbCqzW0R/Ud1qwMJ/mbBKNo+R7abPnSz4JaFRtu/9hLUbVKlF0REwQQ12Zfta0HzXZmtoSdzVWu1JO8AQAC2KAKAjACg5mdE54W2qWyISjiOwzzPiPn5dPrlT38YxzGlZZ7nz9d5WyQqR8IAoJUbKSDdYNUHy72HVA/B1V13FM97Po+IRIdjbwCAQK6Un+nSdr/G0mophEBEgfk8jEQUqXjam38krWsWARFAUBA0s4sqostBCmBmLwtZ5/2k6lx6UGDlgh5uDbb4I7m5ax1Y2r8frMWx/7avtePuAACPB9MR57bxk02qnHcoQjq4OAWz61gPmoUpArEqWGlWAGDA0zi8PF8CATMHYmZWWdYl2YfGwVL70jTEy3nKaUGQT88vf/jlEyIO36OIhEg55zVlELlerwBwPp8vlwsCz/N8v9+lFrxNeSUCVLAEkjlnBkCujuSsCqLZyskAIi/zOq8LIuo+ILaDPzxC73+hqaM273W4I/tmDKXNwmURE7fbV9SFCdQceoSRmTkCLobw24KSMmHOJRaUa/Jwe+z79+/3BQCAKFA91ldOGhWfvKgtOYCqmVF6Kdy34+7wjx0vjgACQFDpFEIRSZJJVQVTSksm0pyX+/27RA5PT09PT08BQ1qzaophlJyZgZnHcZym4b7MKaWUl9NlPJ+enp6eJK+WQEhVrbYTEZFQE3tEkmq2eJ2cxbIPQBVhzW/aDKxU+FqmcmoPVEuw9Pl8HqbT7XZb1/Xb69ua09Onl/NkXsr09nq/35d5yapIFFRQswLTe/v0vXakGw+BfJQWPEu1G93D1prg1xhxY8fvLONjTZLwsaHhyMQLs9jjzw/b8btdt8dn/Ajf+9A2/n2hBdirKB883232Fu4IB3W0vftw4of1guOf79088IKP2nvcpxtJfwd3n+7YyvEThkVekNtY5zso3YWDIWIIoZ3x655vWQnb60e89SJi51HcZMX9kUX3iHT3jzzX08DgPi+qStU52B5VVSBsdeG7unBtQN6i0D7sBY5uqu0Zrw4RUVot5gnJoilBVAgRiV0EsApgyf7S0l1CPZ9j0rzH4PYJdhXA/d5rXh2/6vYn1VciB0Gq+pJlqrCzrlqtjOYz3ATKoqyGgZktGXRZPLDjtNlSgvgv6l4bJCKArSInM9/v19a/H2fbwG0W9pbA7hNNDms6cGeS9z14jmJ/+rOL9fldOcomqDUnWGcjgb0ToLWOMYOzIXVo071OB1Oxx/KG+sxb9lRP6Sw7k9aqEnXWvWHGr2kDmiGeqqaU7LTPcTAN3z722XoXMYoKiEp1vhGiTwkCAGBpXBQA+B3G1tR1D3BEBHJMCHTTCe30d6ftiK4KRRvcSB4AoIgCqqoQoqoQSJXPqIQ1GinkMI4hhDCNw8vTGEI4n6fnl7OlRsizWI0sVQzMoCi5ZPFB3tk7ELZV862t4/GnI0y6t/zr3fXx+SPp7LraPuevFQDFFPAhxnEcz2NR8Kzi3LqueU0mTQKAIjCSgoCogIJqiKxKxBkFUdXyZmuXBt0yABXS/0Ax2xHz/Yz+5da9rgex4wN4wmELHFfQv3i8bigK4AJaykcVt/O9La4PLHVnHMe80DrPKSkDqAgHvFwuAIJIjAigklaom3Gd74RMzCKJA16exnUdb7e7SCKi0+kUQjH2rWlelkWSXq/XZVlut9s8zwjMzNMQrTbA6+t6vSGAEGCMHGMMwzgMQ6CJbUE1p5zzKuMwArIomqpAYeCcMTDskwd0kHlvg/x86xDj40WEerqhsjoOAQAl5WUYBlRASAia02qFA8ty6JYevHGZjTILElEMwXbH1++vKkAEAkAUFErZSTNMU8l+UBVC6Gf7ryH5h7ujSDUFOKqKijUbGTNzREQMIY5hmCWn222Z76oYwnCKnEVAUOw4pCJzmE6D6osiMKMJCTlnKSWOhpZDzkKd2znunFe1RAapcm3MapmnLGoUkRAtnZ4JqLZYSMpKKhktCmoaxnFUpGmaXq/Xt7fb9Z4w3k2+muf58zULaFohK6FacoTOh18tMgUDP5L+1QmjsN/FcCDL3UVF+I8e2FhbbR0N/LhhrfELh42zjV+NhRIAgKjVovZEr+Mp3QA8L/atmZh/uGGPzMtPzfN9rF4TdCnBj1P2vbXOHwpp7cn3lhidZOX7eW+Vj4zphz138HnvxSOGdFM+MqD3HsYq17UDU0fs8sOgGujXhLrOUeE79/Jk1zrZpgmKXYcNf/yfWyewAc3PzuOe/6Lz70t/HLtjOSagMLFfcnLV4Y4w0iIZqodXt2/92rTHSLf5A4gghpqpRe3FoqBigxHWo4DtDuwwSUzYbtk7sUZI+umIWBwWQg2QqzlKFcpMi5LDdhwKTOZQrKcHdR80SxSONhhEBAXNkkRob4SwkHpf88AOq3h3WYOzR1Z9lKdBav9+Ke31phB2NNQLmlQbVB2yjaT7etMJPbL6Ifk7HtcbtvjDVLp3/Xn8Kago7TQ8tiXWWvirQ7MjNWnQsOYt1nVS236uD9uU4Xq9tlFh9RC2i+OsCUtAqQVNIaIKgpJVXihPi6oiYdj8TQ4UANByRJYBA8Ce3uGe3KA7O9EmjnXjWLqubksSkkgrF182oKiilVK3zQ6Qi1VIAzXkF1Sx4ykAECiIiAIoQQzhdBovl8s0TQRyPk8xRslJlntKpLIQZkvFYSGjHns7IOCWqOmxwtbm0l188HCHhFRTWR6fgUc0qn8AwEQxRwMtegIAJHCIMZyGATXZB3LOOa2Sk2hWUAQlsnACAtoqPoc44LrmTICrqqAoAFqtijYG8vh8iCxALEGJH0OjwU1/j1zin9RHgoIH8r/WLVbK7Be3LQchbzavAn8AKHYPANvFFsECtnkQ0cwXiMXkGBhfnp4IQCHLmta0Nr7LSIpquW3HwJfpdJtuy7K8vb1dbk9WQYGIBAFWohDW22JZ/hExpZTWJcY4Rn56ekop5bwqiIiuKa3rTETjKaeUQrIYdWRmgsCMSCErpCS323ybVwgRqgbycJkaTfshhH+m+XVE5xvvtoCq2lEzUBWRQDSOcRxHIsr5TpgIMwAiAQFa2qoSmwPcHDmNj9h5h5wKXV3XfL8vb283ZhCilBIAI+Iq2fT2w6jf9e3A+9v22E+D5MPetLFvu6iBFcV/ywQga8pZhIdpgvPCYb3Py5q/fX3NF5ziwEy2UUQEUYjodB4Vn1QzoiphWtfb7TaO8XQ6qXLKCyLmZW1nGYjV1GYiWMVy6gKCaEYRBUJEiNGyjlttCra6VmgJqRC0JC1TIiZkIEaOyDErfPv6+vnL6/U2K+GyLHcZYhyIGDSLgCIAMSK6em7OWEbkpUgPwyMTh0f2tbY05HIBwA69Hye1amRca/M3j/0f73c76Ig2x24bAvsO92JDryD9PIfq9iDs9x08Iq2e1z+c5kMSfQTCB63r4bh8TVz8+LttkB30iHqRzz/Qs+B3RuvZXzcY/8bxK8fW5Nu26NXn1K+jdcV7Ef296XfD657/4Yw8YhvxbLk8us91nMJjyMPBBG7ppPczRNzCPn1fXdxd63pdV0/c/QT8ILw71Y9Yq4smDlN5nlBETPErJfTUgcZIkeaUiu+lxUzmvIps/iVEFEkARXFf1jtWXaud5qJyLDCr7uwWiBjIQkwXcPHZxEBFCjY4CKIlzMB5TsxkgV4GruYs7siHqkoWZVLQrJIkG70OIbA5lBAENGWxslY2AKAaZapARExW8ZtEJMsWx4h1AUNN4uqxuTNIeLxsykxHfUTE5tJiMwwaMcZOS2yg86S5QwZwhKyj4w19udYeVKebtde9L65NzUIxu5791LylwP7Nrl581X7LiSOnIWzhKCklY65QlWSoGmwDb3M2etj6lKRykNqhapWaVtUS84aiVocdK1piNc96BgCwQ1oPHzxuaiUwQy9AZa7IQAWTTX5AyDXLNjKai8sQD+tHKZTTytaXaiYQVQiMSiSSEDFGupziy9N0Pp8QIASSNS3zlZmnp5NQlGW+XC7X+z3nOeecUs4KIUbmsKTVA2c3hfebR8IOYT5olUo8PlXf0WuPotv6FgF1r8yIKgoAEiIzjSEOw5Dvt9KPiGomFURAogyKgdiqNbYVZDIDHC4LujPSsI+E17aygC1ExE/tIQg8zfcQ/l3tZ8D78BX/Lb9ZvOPo+G63p1TVqpIU3DBPIKCd6aJ6etBOwJqIOc+zdUwEzEQKlvdFVQSEVACkeLwAEaFU5QZhZg4UAkVi4vJWzpmoVEoAAOZ4Xd++f/8+z/Mf//jHP//5zyp4vV6XZf7rX/+aUhJJgJqzpTO955zfvr/eYwjhDsQxxmmapmmKcciW5GOV232eZyDKOWek3MUJw4er9i+sTuuz44M9wjc6wAERs8iyJEJAHBoFhopjiFZEJakWTYaZCcmHPxFRjBGBV1zNiipyv17v65KJQBFTyhYykXNmtkVXLDENAIBGqfUgsH480w6SnoW9BxtVRWSA3KxygJjSIiKMsELGNYlchmHCEJf7XTLMr6/L8qqKw6dfOHJOJdAGEc0iRghIIJpz1tfXfLvdnp8vwzAwo6qSAhG1Cjq22bFFlGycPasCAzNFClb/dhbJzEiWL1GVzAipBAJZijUSSJckgEwck+TrPc1rCkPMWYCYKXJEWLKK+ALRHoZYBvYu2I+sqlujDrs8ysGORMDxYXCROE2GwUNm+67/h+PxopEfM7uCZHQITO1Gq3vDup+pn2w3Kvg9G9b3cET4hhIfd9JJEf719yiM90T5Zxr39HNR1ZYV9ogSD7dbzcVWemj3G9y6776333GvCD1s3dwftm7YWg9MtX3X/YqsAFqK3JSiDiIq0J19bTMCUBA7W7whSMVyAKh6mBmeVNw5uCPuPYTGQyAc+Wzhpw6z/dG2kt6zfc93ansj1wLxiGghc+1L1p15zCTtso9AFY61eoG8Cg4ASAqW4CKJgKICFvdjSYXMzU0BgLWkO5asHqufJCLmvJrXaJomRJznWTQ3r1eL90sp+cOsFUYCAG9v343sMrMdl2BCEC62OmfPsGiJ8zhZrAuVJGlcNAQAqBQcakQTEXEIWk/0AUAVuVVErH4dQtGL7Oia2VmppBGjVuPeLK8Nki3TSaNZO7fVvgqFNm28xk9azwBgwMFacsOGGmNsdVE8MrldLbk2rfqkRx5tiqtzkHbGAq0GiDakjnw0HtDCLYZhaF7Nttst0cswDFqZBOxjWdtHzdNrLN+etyINDf9zzuu6FlkTQFWXZSGi0+nULJptnCmlZVmen5/v97stt2FFy0OjrqRHA2Owha5FCxCt0F0dQ83daQBRhLSW07A+nqEzUjZg1k1DKuWZwAERJWdEAAFEIrU0lsDISIis2YiRQi0EhACS08KoqqKQh0hP5/M0jcw8DOP9fr/dbjmnECgwgaa03oKVMyY8TQOApGW2PIrTNI3jeLvNIhJCAC2G85YeyU0qd3M5+t6PTR+x3u6mZ5w+chj2hkx7yx9+Ng88AEAtQuj6TIEZFUQSEk9xCJFE0xijpLIvxhAxDvbKsizMDExWU4dr3to1JUYYQrjeZ8jCkZlYFH1xX6xCkrqz2W2x/UXbFA0a5CBzRJsjbP2fRwTr6AA4bkQ1+zkd4mfaciCixarBnl/oQYhsnTf6gAqWn8QvbqEbCtCkZwqaMzMvCstdLp8Gy/6iaqEnwMwxWPQmwXY8WCweEADm+Xa/yzDA189fVPXTp0/jGIdhwAGv16uZzP7yl7/8l//yX/70pz/9+9/+8V//63+9nMbr9Xq5XEIY5+Uuki6Xy+VySiktS3q93v75z39O50scpte321/+/GecpmVZp2liEERGgvt9HqZLVkLZiVx4kHv89I+S63FBu4X2ER9+pZoo7LtCRGVeluU0DKA5z3dLqPP29ka4DFER5LbMiDiNTyEMIpqkRImbTmf7GkBDiK+vr+NwYub7/R7jGMKgqilJWgEjMZNt02KnI0AspSfMCiIiqjiEnYPCb4TjzUZju93RoNohNgAoIaqonVtsAERcJQciAU0pR8aUdVlzDHC5PKck3/Xt/pZAXgEoEq/zwswWdWyc/X6/rzkR0bKsqjqOcV2H2+0txkhg5z6EqIgHBEjMxoymUJPYBdiSV6OkRRBxHEfjxfN8axswhIBAIllElFABZBUFXNZlnpckigzIVFTOEG7LWg69E6miIuRGMFFsPPWQN6lKt1U76RH29NYLpnhQJ/xyVDWvNyi3fpoM2fjCkeh1H+pWuT3jXeLGW70jpCk/5lQAR6PQleA+4tVx03kIHH/toOFvNtrbcLWbF+yN5u/xwUYivBbtCTs6Cc133kDnKYanRW0B/TK1r3s26ntLact+vz3jjOwepFl6ZPML6he9XVjOXmveedAZ8RtIPYjsZgihCSdH1GrZSRqGgBNQwWVkNDG1S18CB8RuC1fGJuiX/og2fimhUsvugYdYZ38Ghyi7QeScLRG/gcPehuIQ2DCvveI3dvukVl+HHpiWAaL5MdqFpNxqgOYSpoeISAoZsm6pDktpP7ceO5jmnJl3+okNgCk2ODZxXFXWtSiKpvkjop0UIFZmtP8KmRBRhGWZmdlUPt5zDrJq3K5GeTtgqWBnDq1wE4cQRPuQRfuzKU6g29prjYo0naShcrdbaAtY7Yv+QR0YOjrin291TmzwJt80/dMURYuSstfzuks/09bdtNmO3Heo76+P+OOvYU87OhRvxKjJK97vZ/PyepcHTiPfZk0wwNb4zy27FNWSG6bOGVjMKW33bb6mfGKlGuhSqvpZy56QoSNYOSUxeqF1Q2L5qWRzs05qCt5hGKQ6b9tSeirQNcZAQECgmtEO+gmICFipeodsxRRMGaRu4VIQwuqApxB4jMN0Gi7TdD6NzIySKU5DDEOkZVlyXpflKumKiKfTOMYhxhb5JmY3+faW7ve7qhITAkouSag69oOOOv1HtUY0258Pr8ExRXWsF0rmyZaYdfdiK/CNZuQjJUACBMk11BbsX8vWDihgX8kZCEUBgQGVkQWAGdn6Ei1JXlG7GLmW/euIb3DYej8DnIf3u1XwHPHhAx/0iXuy2R7qbj5k7fCO4bNFXOecNeVjCgERSeuaUkIEpu2wQ0qJCQCUSimasi62lWyPv16vX758ud9nc9Ov65qWrVICK0binPM4jsMwrOv6t7/97b//t39f11WnskntcDIzBw5YzjfSL8N0Pl14GL+/Xj9//sz8/fmSx3EkHgHm+5JUYYgDhnC/rREe7GvdJ9/6mQXq2De8s3BHxu2fx2q+zjkjSGS2eoxEOsSBKalkIgKwbPhJBY3nExGWghPFyrMscxUbIGclyjmJZDWrqwKpomQr+ISmxagWrUwRVAELZ31g+oF38BkRH0Se1ubNne5fQCULjy+/IgBkRBSESARqBFWSalBC4hCHGIeF1vuc8Pv3gJTzSoBEFCKN48iBz5dJRABxOE3Gedd1/v5dxmimw3y5XJiZKKBCzprSUllYdVtVMUoQEDjGlrQJgF0dKRM2ABCRmQUpC66S5yVd7/fvb28CeLk8jadTSunt7U3BiuGUtyykDw2vIDvIvHsK6+OGTrrdQPoOSTFQtGc8ZjYS3XprkkDXVRNdYE9DWoddt/5J3GtcDyitE7h9kM5D+efnW7fvPnjgPfL7EM4PJ6KutTuN6z2UKxr9OcLKKmr6XalVWT2OuQPvNsJHTwIAIeYDPDu2/kHzE3zvrSMPskb7kGbfPM40UIATXL3K/dBTB+8gGACgO+P68Ouw16qaAtx9QvdlEot3ijmUmDRCxF55sNdzFWusU61Ba+qEJIuxIyMWiOpWMdSQ1I4DNRh1O7NQLiM4oIhhP5MsUmp2gwlFICZ01TF6s9CmH/u8jui0QaiUEYoJeSuiUMe8btMUrjrCRndCfb6ATrckIi3o3yxMiAiaVYvIXu6vJatnBYJIdUkX+DgqJiKI5UWbS1MGTJOh/WEPqnVp/A73G1JdHK/HYNWt+FXDM1OAmzZYYdKbH5yavUHJLcq7BPGI4n6XdtusKduttU/YTwZSn3y16612ZYN8EOPeLoozLYSmpZvabHf8WptCaI/Ztl/XNaXdnqRasbAbObnD3wUtkdAqUTm4dZSr5r8xnc26sdBBdZuiSE4AKBlLVKE9bsZe0VAyxwAiFFsIAimIJpWs5nYmjDEwD8QQQhgCn8Y4jcNpiCEQaE4JkDQMPPB5HXlZeFnuKSWVfJV1qeDSLAbDnOX7NVo4NCKWKi0A8GG4iyeyx588iH4XssFesG4/edRtN713+lH3VhpNVDIiBqzZ6HCr/9kmIuWYD2O1ae2/pRYTzsyIKYsoIiD7j22qIAIcZtSB6z0ucoRPG+fuwv16xMmf4cR6iLB6+OnjxXHdG8S6oFhsHgOyGJNq31UAgCTG/4DCxg5FBIvD3A7ZFiwNISzLEuOYFb5+/fr161dEOp0gK85zen19PZ1OQ2QmElURmaaJiO73+z/+8Y/v379//fL2/Hw5n89ca1IBADOHEGzsjIHj8PT8AhzmRe6LyOdvkuE//+c/IEeFBREpQEI7ECbvHJTbget4syO8HpI/XLJurWEvIOomi0uM8XQ6DcMQo8QIWskGAps6p2qqdSQqJVBFtu3go1pAURVV0Sr6FomiVYVFy15eFcJyRPSBBPYQLD+Jpd2W92/51OsAIlCjVwhRUBVXk2KQU5KUUhbJUE4Fj+PIPEUOHEol0hgjM4rIuq7KAwAsyzIvN01pWbJIsmDRaTqfxojE67pKBmYKMarOjaEXa6AWNtPuo6IAS1UFk8WDIDOTCqa0Lsv65cu3JaWcNcY4nk7TNN2XGe9UqL9IatlJiywItfasQduYxs5e1uHMO0vwLhJ6EuQkk/4Ba57FtwEcrcAfk8GPGYcn2g1DWodHknhkMT9Je49AOA7jh8TzvfF3jz2kA1LzvTf8Jxef9d7nOspcH+jVPOuz4+9eR9KqCMAB5r7zhmS/FwjvoeWR7zzsqvu1W/eW2aIbbXMGtsmqc5X5LWPtvegnerSIsKfJUFUGIpJcxuB5rl/Wjf0hqmqJVCxc032GmQHFLNjglse8pV7iP8JrtwmhgKPbmU22PrpQsMbFBeeZFBGpucQItGblxvcwFUAQN3m9gazMXEihnOkKocRkVt3AeIuoioJyMCErq6jWErgAOk0TM0fTzRQtPwgSWpkKVTUjYlE0y6FPAQBLUWN5xlVLme/mL0XcaXoiVqmb2kzHaUKn1tvD3vcN7qBgA6/fXUcOZy/aKy0AwBMC02G816uFg3Zr1yF0w782nqZ7dysOjug0TuBH6C/8rvZ7AKvL3qN4663bDNYshq2NxyneO1rfxt/05DZUWwifzMYesz7toJHdb1YA1U0x8INvEPOeOnTuzYLDsGWeEUkASj53qGYRrWphrxACYil7Qpa6r0jMMQbNJWMdakZAQiJEhcSkhIpIMdD5NF4up3Ecz9OAiKBCKkxAKgoSQAWEGWLgGMYYMAbMeVXVeZ5TWu73JJbVQwvoiMZGlZw4sYNGnTjAgaB7bIHf2XSvXXg86fZIR0xxH6VjiLNnDEpINb9LkX1VMypnFdRdh3ZdTDxakou2TaSqpMhIzByIchZSUFYQ7JOA/WiyHmjdnfeAc4Ttw7f8Vu2+0t56SCiOPeg+SyE6m7Q+kl1Ud1lGuwFUW6UCFJ+ti4ZgAFlyqnwaQVEQuB7/BjZbCwGxIr29fv/1n7+tq3z69DLE6X6/f56/zdf5er1Op3JwjhD//Oc/3263b9++hRBOp9M0nv/6178SFOIJIKSE2Aq3wDAMJfpXi26Qsiw5zynRmtYsSCGE4b4kURrHEdN8BO976/JwOTwMH8GzXyZ/3S0NEVkorcWgm+kNgZlYZM4pSVYGQmIiVrHMl8XB3Xqw3oLlJxWroEAAMM/z7TqnrEkUEUB3pQ7KqV3bdKCAbMJYJ2j6Wf8QRA+n77cANgMEFtdiu2NhAqqaAUkliwByCLGk7rzeVeF0Hj99enl+vowhBkZmNP8JB7JERPe78ngKIUCW6zUuy1IpZ/r27ds6p3xOwzAhMBEyR6KAaTZ1GRgsc4ExK655X2qi9MK5RBgAzKokiGtO9/v97Xp/fX2N43g6nTiGFig0jmNeQRWSCqCIKlheGQDmrRwXbkodUtFHNwpjf6Z9dtzjArXnPe3t0O9ImX/Yjm66j1GCYOc0VtUSo1VfQSz0xpJptzpkXT//k4jX3e82b/fAz9BzOIC3Q+yO8XVwfti5f/i9r3tJr/HND5gR15wX/ld65Jm0T/5evt+eP8704dQAHkPehwrvP9B/y55p8qHU9PuNxR+H0QjjsX+EDVzd7mgXWLUA2O9Ba+JOezaNdKPDbQSm6YhbCdvc6iLc0EXfedeQn1UbWTFZHaDdBKCcd86xotFa/AMi1ON5WZIqct2HZTIECIxOIdzDrh+bqoomIrKac4go9WgWbYlSi2dJYTNbTtOgqv4cFxERBVVlYESEKpfVY6bUlCXvRhCrItDACEUgtrivNn6ttexNkRARrtWrqYY4ojvRV10H2BQVrQGf2RWW0IMdovOXFqhVXGk3uVbdMG8Y1vOKW3YZ5I5Sv0dKbCRtY3T/eqz1f/pDzP7+0cTQtoGncW0AbS0alLQYCPp0z6pqOVG8oggAWVZIorI7rVdXdhOd2+lzD8wO+N2Y2/be/qyCjuGe/0QDNNoeqSjfesMPOVCpHFjLF6ICQkZS0BU0oSiiMlMIGAJH5lTOZRlgIZBMQ7icJ6sKARlVFESIkTkAUa5x2oBZwQwcDADDEOZ5XlcGoBgjIaeUctaFPsG3bzlfk2QEYmao6wuOnpjI3Gbhkfb4pwfm723vsQd0pK8hzAdwLm+BlgQlIJolU6ad7Aih2fWJsirWVEZqeAgAmlURUKNFravJLGjUZINAvfCmnzaM9/Slh+bVbtbd9ZGXP2zdAPAgCnj24fcCHkKJjt/dfchpg6QbIGo8jL2oIKpIACA1G6RKtWcRhjAAgCKoYEJhRTsboQq3eQWA5Xb7x9//x+fP6/Mz//LHP8cYr9fr7Xa73db1Pqd5wXGKMXBQABjH8dOnT+fzWURU8OXl5fvXz8MwIKJZaqqxqViIJAsRzeu6LtnO94LS6/frMJ5UcFnWdi6FGGTdBG4P4feAf7zjCenHi+g7f28Vcl6JiJBEUVVvt9vb21ukATFrFhUzaVvGI5KsRJZoTUIIdh7euJUZmu0EWuCQknz98v23335bV8lJibSk8azDQIBaFdiPlUTXjv4X3HgnUeoRmB/DTWv+KKyG42IMQhRQkZoDQEEEBGFd1/syr6ueJnr+w6eXl5dxCAwqeQWwLDI5qMW+KREwaCREDqqnOARVjUMgorfvr8uy5KwxLtN4nqYzc2AOmmuwgBhRqYKZVpcZbVRUVZHJCtwnzTmn62359nq93mZFNHsEMqWcl2WpyXsVy9F6ASxJp8v3ioVuk/FQSWEXMfHetu3o80PhoVsvL2Y0egLvYOZxff1F6/A4MM932qelZjXvngEAyY+zO8JB5PgAGh//9MPm332XTr6voKIT4LvWQUMendnrrjvyogrHm3AgO25lGY8ao0K3iB4VHo78g/ZQQHoIQy0qZ49mevBwOtFrC5H1cJN6BhvceVefRKNBz3NDP9pyRx8HqXoEkxqYKiK+rGbbNVjl51wTwbT5btK532BtgFjRRfcVBeAQc+iBqE79kH1Wom6beQ2Va+bPuixgc1ORrEDMBKBkiZMtyZbCY9OwBZHuRiKaVEBVhxiLm2vdkn/uUAGLU640TKpWhhFAlcj0AUypTcSgHxDtLFm2w1FYEt5AznmtVKNBmJltgHPK3qGnlaFQLaRhCmHDiev12mIgPeqQC0TUvd/Vf7StQlVfewri7QcNBgBgp7+kRqjmLbvmPnqtti6kE3e5WzY0a9d+wEeC9ZM3/U+wJx9tUm3P+JtNfw4hbIdFAaDRRMiqkHMehxPVg8LebW4ash+Y9RDCUINsbF0AwA7JbLHQdenBSoxALutYK8eX9WrzNSplegUHyjlb+b6Gt4iUUnJh1HWnICAhZFFR8zBpToDCiArEqDRAZIoxnsZhGAZmVjrbZO/LvCxLzmm+3whl4BACxRAAgRAIkFURKUuNNNbMBIGDVRTkBYloHCGO0xBHRFyWtK5rwpd5Xa/Xu6TVTDuwVx46Yufpv0ekIxo8vPkQT9qfbdWO/TSUaDjjft18F2VI2rDOQniLGUUhj4Z+NeQsiQQLLc5ZauFQAbU8ZcHxaaOQQTRVRvXx7I6zODI2/Dlu6pdDtde0u534QZ8/XJHWw/Ervud9P4fHAKBU+wAyzbla8VVVVQQBjGtisVqK6po1AggIUTBRFwVUwbyLb2/Xz9++Z4DL86dPnz6Z0ed8Pqf0La3r2/fXyIEuJyKal2UcRzvxlVJSQDsiC44mtOkY3SBkZr5+ff3n58/fv70hhyekOE6Xy/PbnN7utyVJOE1J6X6/x3fw8yeXsgPmEaS/tzepJ8wBIKX09evXf06B9Ol04mhsSkAyUCDJuq6Z2MIBkoiESIhG/TIimoaDyOsir6+vnz9/vl7vqlGtsAKCGU9FtVlh6zh3sdawpwwdJemn9ggl/WP+eUSEkvDZ/i6GG6MKIpIBIiII5ZznZYk3ut1uy7KEAE/Pz58+vZxPZ5WU18XKRKgWXUs0IWKIpCBrWoxcAJTK9YQciOd5vd+XdclptV+CJUQgd5xJs1gypG38CiZB5awigMQAOQOmJLf7/Pp6vd3u65qnaVIzKwtahp5S1TkMUnaUxQcrFQIo+zPMlodJj3XPOpirE5fbAjxk7v6VtrjdunQI7FU12GfU6Ebl73QdNqtfe+ZIuDa5S7cvvseJuon/rnYExXudfExd24C7Pt+jtE06+qD/PV/umUKVbR6s7HsjAQdtv3ObN/4BYryjoH4MpQ4TjpShm5T/9HG+WBsAyOEIic3IEgr6DluwWIdvTSY8jhBcyGi3F5pK33qzB/jRCqKzbmPVC0x9CD1yNHHzoV2QsInOTSyubHbzzLQJGI3ooKk1dNMPDqsrsj1j5KUm2a+DQZeE1bp6lCzer5DJY5p3Phyo4bNYqJvaYWVE9OX3iGhdWr0+LmcWJaky5MoWFHDzkmHznlWFuYj4FIMd9yqHCQGIUETsAJVu6nH5vCX8JCKE4kq18dtxgrbwfr7eK0UuUsJr3U1v1Oqtbp7GI/Rwb9LQqjh5IvvQI9EhZVuIhvcNPdrAoCMBFeu84mqNDj5h/0DTlh9as9p9twP7fYXYK6scsPnSuRSHLOGvrc8Gli0b0J4/tZtaFMXgN5ePv90urH7aXu1p8aIf1Go/3myNQZNmQCEkAKEAjIEDTkOMjEMMwxDGwDHGGAgRIUTDUkQl0HXN1+vrcrs+PZ3Pp9PAxASaATQLEKGaqRtUkZSoHgFCO4CKihTjEDiIAEWIFDhzcz6XPW/jpw04qj5s57Fg11OqH7WOM4FDv4c/eUzz+PaobwGAZikvuJplhVmUUVyIsigAJBRs5U8IiahImayIGplbAuRCJ0VFBRAV8iaT1YF0s2g320+/Cz5dD/5+xyAfvuVf777ebY02PNz/6anEw1G9N59yrLpQe1QogToUaaM2BMQsGa7X63y/jpGnaTqNk2WuFlQFoMBplde327Ks5xNeLhfgQMTjeHp5eQFVCxAVEdBPMcZ4ucQYx3Gc5xkROQQReX5+9hEcqpIhW5i/qgDouubffvvtt99+E8XL6fz8/Pz8/DzP86+//vr2diMCUPLlixpR7ajlwyXosFT1gajnKU/Hx2GPOR3aW8SHaXSq8vr6+muAgQXhzOcQmbOsCjkEAoC0rtMpIm6hKwAKdWliJCJalvzly9e//e3v3769DsPAteIsAORaeR4REdSkAZt0UwiPmNAIrEddN51yx0MS3wk91R8ZYipjwnVd397e8jK/vb2llDjwOI7MnPICkpkhhABKAEIULE0XgFpAVUqLRWwSkbHuwHQ+n0NIROH6dp/nWRVUMJ/0Mnq7J5ZjKzvuDyIK1WmwahZACyGa5/l6v6eULJRXzGOg2KaPiERYFl8V0Q4YgDbHOwCA1Zo2tySUszN7q9MH+NMphA8JSENFLfrztrJ+1ZrA3cSbBpkjMXy4cODkadhvAT8jv4OqZ3vX/NyPU3gPcz6479/6AMn1/eRS732oo8APv9vm8sH44Z1F9FIf7GW8d0j6dt9Tue6t8gAA/FxEbmttXx/fOtLS46Q6ftQ92SGMn5rnZbDP/9wBze54+rPbQXs0eYjhHe2CR3jbVtPDk8xNcYSa/YZcSIPRbkXQDFZGws+2aXFtB7apigjV3rqZ22NebzGZeIu4M2CZo6zt7aoQoki27Hw54z5Vrl20Y3WmU1X2sxELPwYtya89bbW+c84rIhJZUjhGBDuWUgJYgEqpV7AKKtk7jvwaEJEpVkbrmanpLc31ioiWerRpQUQEusv0bXzFI5O9btAT2Qoc+w2sVe9qE2wMUkr6u4KgVGNQW+d2YcP2qmlbfb+UZiw4muU8thB1B652z/idj87yAfvt18yiD/DW1WnxqO93sh+/5XgwTa8tE+0d4H781qFlW805L8uyLEsbdtsvWGwcuTm9tTpddc+92q+qGgCzwUEBCQl/QN9NTGy9tTVqYQBd44CAZHUEOUQGjSGMMZynIQQehzAGDpFqnJFy5JRUIg1qGYmX9T7f1jUGioSRCQg0i6W/YWbgduJRlFBVUkpJcoyDAlmVoWVZVlEEDiHIuuXdKjYgVd3yWPUNfycD+KAdkcdvlnb/SKB960LIEBEAsVqs2iRUNWdB0SWvW1Io2Tz5xZrDFEKwsoQESkQUg4iQbHYTKRETtNlO3f8fj/8Ay38ZjEdFrnX4w3f9BqRDUIldWPxZx2j1kaDz8fhL5jSuBKGaGmIIOecV1SQPIkopvb4u8/16OY3Pz892wpOZVVFEkXhd59vtljI8nSfmuCyLEo/j+OnlBQFSSt+/rgjfpjFO05SIAMCOhMUYmeLXr18j75ZDVbICABLRNAz3JV2v12/fvi2LPr88/fLLL3/+039S1f/23//93/7t324zjOdhyTmldHl+SddrY7LvQcOTU79S6uSYh/DsQPozSBJCEBGQTEQouizp7e3tdpvOJz5NBEzGKs3s1viLfVw1qwoRMHOySJm8vr7e/v73v//3//bvOcMf//iHsCLzrGgZp3qUawohQnkANxlkp4F4XtAuEDHL4wk+ZEnlU/XrJowKmh60aVCktGa93eb1fpvTKgIhYM757e2NUQPhaYyqWa1CIDEApLSqaogU4ylXkaZJCJoF1AKOWDK9vr7drjPoq2a4jGOdzmY5Mu80Ilrwgg0+IAHTvBYOnnK2hGeqwIFTSsgcAituNbFijAJFKkCtFmuTI8FSrVJVCMGyn1HwyltvPDpCGA4eQieAbUvm8HZHkDukJRfV3727/+hj7QXN3LY/YkNbOWLwnL2IAVxyani5yBO0Hbr+foVwG9gjYenn+/G090g60MmTx43T4IDuLGhHWPyf6FqTo7qV6vYXbOu+G0kBNeFDg47x2d/Fy45U7r3HKkzKF7vFPd7pJt61dkQO9ukDPTlqHXYyNhyx9B1XeSPssvnJHtB/66EFnRn22sACoiXAVARKsnIwHrmmtLIwhQiFxEREBIUki32nFb4jIlTNKSkiY62Mp2rxFHlgy2mIRjkQwOhHFkYLxaz11gglC1Xl0+LfFK0iNqaUeIgqAoDMLEA5ZUSkoHGo5f5UEEGZTG8k5sABADQpKJkqmfLydl2HOBENTEwYVGBZlnG85LzGIcTBKtRnJM05I1nCAJKUpTioqYSuCiEFAASlShCEdChUZZW1nBhhJs7LmvO6yqpque9KYDSxlbAMknPKOYYhxhGRU0oInErqyqSQFUQVENB0M1vRVi0wu1In3kvmNTSpCVeN3Le97c8TMqNI6a0V3ENEO9rOzExRtSixKSWrAW12sscZF0Vb6QxCVLLU4VsGXiJUQBEx4RraTlBVYjjUaWmzUGdraRStEY6qEQuilkoJpAhgOeXrcMAEERNTmt8vBGqHJLUGmRFRqLm87dMtepZqOSlPdqXVEgTJOSPVQ5iKHIb7PA+RVRU0D4RZNUlmxpxKfQFAUKBy8Mm2NwcE4FagXBAVGUnLYTMsAQIIBKIpM3MkRMiASljOksjrMo1wnk7TNE5jHAMHRiKKjIY1gAta3iVrNyVKZ5QhaBhIl+G+hmXWdfp0WxFSGodEMSWlBAMrBVYMCAo5l42nsKqkdVkBAJBEACkMjBk0Zf0jTjxKDusKkjhm5BJkIkCoNQEOGhwUCFxZyI7dHhmJZ3Xeb9xiJI7MtTGbrufjA+VdrkHaFtyvYOHsMY4KCVTXDMw4hCkQA+op5pTSuqSelSIqBUJKyEAkqip6vc/LkmxVsiIRxTBmXdGKc1i0EoL9Wwbp0klrNUAIbGi5jdw2QA0raJqe1AMz7TGt1jTeV2emGpFlKbL8PrULAQiws6dgjdD2LMoPLO8r6MA+wTe6ww9Y0moj2mEAC2LTjIgIykMMkUIIeV0sCmMaY4gUkO55JcAhMoHklGCKxPDyh2cEvc63+zpPt+l8Pht9k6x/+9vfXr/N0wSXpyEOMsRMpCktHGE6j5f5KeU8p/yPz68hLOfh+8vLS7rNSCHHPI56HqfbdSWiXKY7hBCAeUmyruua5j/88Y/r8grxAnRbIQwv/+lG4//z//5/v37++nUNEGEGBoQxcp6vfvnauiCT5CoBFEMyKkAGZS5HUs3oaKlTdwivYPHL1mlzue6MelgQrCEVEgFhRuA0Y0kTRYlYib9luf39cxqG0x9+UVUMxAJMeHt9DSrL5++r5DiG23Jb0jxMwzov99sbiiLy29vbl2+vGML/+X/9H9PTs4Au39I//+/PMQhkiTCQEpARas4MGQVFzc8WneB+DA/xpcOaxRZNSdUmypVj2wZMgGJnabYDACDNgIBgFWMCqFLdd2OMktYkEgKtyF8SDDwOjEQ6xvDt2/X6ens+TQQ6Bz5NEzMLo6xIPCGeFHS+ZUgJBAMEBAQBkUQCCjhMU84aYuDxjOP06+evX+blKm9hxGkap2EUyBkyMQCqaAKUlDNTVGLJFnRA621lPInq9T5/vcqbPH1HWmAdkAnygIqYAlIIxIo56/2+8BhTWiUlUlVFAlYAQhYRsI2pikSBubCMepSmmbmPhHpPXY00NdIKquI1f7XkLZusvKNUjdS3g/3eYtXoTCc0g2wjLKiuamcfEBEe2apyzpZBEhDL2R4zQ6zZTVaJNvHa00NEBEBVaLbadnyLaqhak4h0X0quo4d+VI0kostWcgTyHuA9o/SU1n5tKfE80Pwi+nmpKmLTn80zIapgiTJEdiG1lvnJ+jBbkgjahaVnz3ltvGZj4rDx3DZaEUHVDGtza2k1feKDkOYyWXYe3e6nDRkcFlFxQmnL7oHFbS4meLauDHo5acsaaD8ZP7WAEe+SsX/ZGWtbwhithTaMGuf2bVXCbWlMGjdcMrWrQAukZRm0DQmqIigZADbSlytrxmbmUmipETccOl7YjjVYtBLq5PIFU4WgVdcp0AFtxBegJHnb4eV+teBDPPZ7YNdJpQsiIlqygKKzTIATOxCgpTJ0UysySpGAxRKZCMJ2tAxAJGVLkEgUQJUZa22MZksBVWXitksVW7LNMiQCUrWsgzuVvQo9LZpRcs5QLWFtbA3twFM3RwXa1I5I37Wmz/u3/LXUGFG/iA3srZm7w0RDIoJ9wc1u+bCZdP8XN6ymkf2dHUGx8dtG8nDLOVeN8YEm4A2THseaYVL38hY8WJeyWAWwKkA1F24lBN5/8sHgHSbbMLT9GEJEkJRXOyUYCCMTEfzxz88hhGkYhyEMMUbCwEgEklLhj4WkQvNkKlmOJw2BYtQYZU15nmeOPOTGVzYXZYMPuqEW+m4Oz5LIUUEl73Uwu98mtQGtJnb7X40/R0LUSIR/rOOI+1cQjd+LcFWcymFgAnZfAbdPBVRKfDuCmU5UEUmz5JxFQWoi04KZrZ+qCuIhNGv7xAfV1n4Ejf3S/O7WIe1uCh9aaj2E/7VPt05awAVY8UAzn0k2TjEMw2kK4xgR1Lz9b29v9/s957yua04yz/OnT9Of/vTL8+XJIvnneY5hICI7Ligi+na93W7fv8/3CPM8Xy6XEMdhGMZxBICUJMYohVXWg4tZU0rzcl/W9bevX9/e3kxS/PXXX//Hr79ajfsmQOScO1NIB71mudst2R6SH4Do+Izfzt19tyvtAUM8O5ZZHElfvnz59RQZEqlS1u9xePv+FpAw5zmtwxSXvCRZ4xjXeUnr/OnpGUBPp9Pl+WU4ncfLUzydKfA/b3+L0eRmqBlbdubzI719bwpHsvzw/gddfYC0G/yrmmHoMTG/vb6+vb0hwPnTy8vLC0gGybfbzR5k5jhM4zhSYGZWuTdK3j5a+U6BvTGp+31Zl3SdCEAJsBoGxWMCVMXJlF4AuN/vq+r9Pi/LuqbcVKYYY2BgZqrFbomQ4VHVy0Or2PiRvPG/oXnaAgfE2C03bmjfrb5URg4Of8pjTtZrz2dX0hke+XY+aD+PeN0c++n8dG+w39G6t8c18vJxV4+YY/fnfzwatG53fAHg5zlbeX1/2t/36f99j/r95IcaznQsz4/k41Ye3v4pzdtq/VeaiC6y3ScXi9e+6RZoM6m0/otCSLQbt28VxUtgPdV6Cf6gZ1MILQGGn1I9Brit3Cap1D8b4SMicyYV1e4RrGGPvv2vAOAkobyvwM5Ea358tCylVB5PKmIuU68O2eFJU/CoqfLFEqkbxHf7kzbksLIWSiqSAQVAjLERB9yONW5BrV4h9AEJ3R62zrMrbPBDwuRx3bOcZnho3j+LmN2m6XqojI98gGvrc3MdVAZmTUSQd9LwwxE+HPMHD+9oxIe2MajY0Z70DkbY8H/XuVZHPBGlvBmKwJnoOmCavxHq94pNvfYmJSRVcs4IoloPaqqQbrG+fmN3IRPbyDcdU6vIqCZngAJzYAox8jjEKYYQ6RTNHxjMMciobNpHjYFuIFDNGZSFBQVAmTkADgPGmGnW+/3OMZwuAczOVDdFtiJXZVEOMqulIEJz0G2n7I6WM0QzHvzHsJbjlv8hnvj19Vuv72czXravIJafytSK0QQJUBG20+RYjQIikmrIulYqR0RqtQxFchbzNTNzMCr7qPJqW8BuRrpXFD1z+CHEPP00dHv4/Hvbs2MHH4Ddszf/Lj5KWvbBF/1jtgeHYQAAK/hTj1pL40shhHEcn57OhJBSul6vlu7fhnE5jwDw8vT0/PxkyYGo1l5CxGEYXl7COI5xevvtt99SessZrtc55xyHyYVtEwBg4FaeBKH4AQAVEL+9vYngMAxrSr/++uv3tzeMw5rEO3htd+uPsNcT9p9ZF88uAX6AEv67qiqq7CStAnKQnOV2u339+pUhRSLKuhBfX69THALAbZkp4DiNpzCNpxFEA+Pz+bIsiZmn8wVDvK0p5YxMjbQSkcp2VvBIoivmv6vZNrLZXlFVeoSWniM8/BXKvtg6b+MMzESgWYjocrk8Twiqy+1KiM/Pz3/5y18CIar8j3/8I6V0v1/f3t443C+Xy+lyDiFwZd/eRUMWMqAgkudlnefZ7BqCervdiDAQc0DVTDWkQkTWdQWlDLjMCQCRQkrpfk8J4Dov9yUnQKj4Pw1MkJmQAaXktQZmyip+4h02bfxHxFhWE9T/N7eG9p0woE68hIYGuOG//k4dpvVjfx4jGrxz7+Hrxx36wcPwDs30w/gY4B3+615R8Y/59nFvbSKeOxyf+Z9pHalBJ9bunCK/R0BQVdMsus6PDMXt8Z3lt1t9/7DHwBavh672rB6cNw9H6H8t4T+g4lghVNUDAFqG3zaFFnOL2KvQHphH8cxaaCM4qhBtnlo9hCYAFVIqbogNfJXSNoBudE136CsicKC56AQOv088xLsNT7RF2xdEccJlWex9RLifURNezSsoIjmrSCYCBDuSZ53vwELYXEPWv42KtJYs71ZOVUIMlnuGCBWySKlAGCK1h4lyHUxvmHvIk8Cpi03ubLP2JO8I5yaCkytyiIiqufE2G4m4BCrtPtXWPlHFrK2+SvmWbr/CR/LGD9pxK6ILC/wAVq093BjZ1SHE6vBEVB/FAY6MekRqFxbzUPniFsIhImsVLrvxmzdVRAg3Ap1TikjNCNTIytHjusHfeSyxJURCVMhMeJpOl/NwOk1DxCEQM0O+EiCWbZxFBZHqESexcGhARcSs1NRDG7UF01phiGVZxgSqTGQZcwlKAtVM9IA3GIqiUxS7CTbMxHrWCDy1aR7C359d84d3js3Ta//88V3HJ3C7VwLpkd2J3CxZQTLmbuNAo3WFrKoogPlUiSwRpdUUY2YkFkRRlVxzKe9NnjUH4G7Mxz3SfvoYmm5RNgr/wfMfwNZ39cPH/LD9WnQ//XAkIkIAwzCo6rpkVaWqomfNKluuYCIaYhjHMYRgOaJtjc6nS0opECHCcp9VNY5jjHG+L/bMMAQiymAVUDnPr6pq5WQt3bRI3a0yUMvtBmWPz8s9xBhjHKdxXtLr97dV1pQSWBVgbiE9CGBw80wZGw30iNSA08EIEeEAyYcavn/P9+y/CLaN8cGqbcBHDSEwASkw8zAML+dzuF1PlylOEUiREVCHIUzThLh2y4qI67quKwAmH6RCVselMN8HZpr3sKLb16pbvVc/8g3bP6QVeJQXi51OswgRTdO0rq9tUUyLm86n0zggwOvr6zzfrter6A0AwhCZ2bKw+rmgRTwhpiTzPL++XUuiGubA0Y422MBzzpbkGQDWdbX69auoKYTEduJdMuK8ppQBQgwhhCGez6dAipoQ1IpPQ5Vz4CAWNgLu72iVhboQoAbVj3fr/3zbEdKDKtWEMfvT8+LuyYYfnmWXbg98DasBovXssQj2+6hrHX95yFne66FhrMfnTsB4bwBaLdTgzmEed9B7I++2Z7sEB09sB+QedvET7bgB0ak3sq8C8kHzE9HCXqVJvK3DtkPbt9oAvC/kIUHobnY4g1XZ8fXboZqQ+kV0/RSMNVkIN6dKN2w/wYa5bQp4sOA8BJF//f8H6J2UQtiM4xEAAAAASUVORK5CYII=\n", + "text/plain": [ + "" + ] + }, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from PIL import Image\n", + "Image.open('./mhp_extension/demo/demo.jpg')" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAABLAAAAOECAMAAACGszjIAAADAFBMVEUAAACAAAAAgACAgAAAAICAAIAAgICAgIBAAADAAABAgADAgABAAIDAAIBAgIDAgIAAQACAQAAAwACAwAAAQICAQIAAwICAwIBAQADAQABAwADAwABAQIDAQIBAwIDAwIAAAECAAEAAgECAgEAAAMCAAMAAgMCAgMBAAEDAAEBAgEDAgEBAAMDAAMBAgMDAgMAAQECAQEAAwECAwEAAQMCAQMAAwMCAwMBAQEDAQEBAwEDAwEBAQMDAQMBAwMDAwMAgAACgAAAggACggAAgAICgAIAggICggIBgAADgAABggADggABgAIDgAIBggIDggIAgQACgQAAgwACgwAAgQICgQIAgwICgwIBgQADgQABgwADgwABgQIDgQIBgwIDgwIAgAECgAEAggECggEAgAMCgAMAggMCggMBgAEDgAEBggEDggEBgAMDgAMBggMDggMAgQECgQEAgwECgwEAgQMCgQMAgwMCgwMBgQEDgQEBgwEDgwEBgQMDgQMBgwMDgwMAAIACAIAAAoACAoAAAIICAIIAAoICAoIBAIADAIABAoADAoABAIIDAIIBAoIDAoIAAYACAYAAA4ACA4AAAYICAYIAA4ICA4IBAYADAYABA4ADA4ABAYIDAYIBA4IDA4IAAIECAIEAAoECAoEAAIMCAIMAAoMCAoMBAIEDAIEBAoEDAoEBAIMDAIMBAoMDAoMAAYECAYEAA4ECA4EAAYMCAYMAA4MCA4MBAYEDAYEBA4EDA4EBAYMDAYMBA4MDA4MAgIACgIAAgoACgoAAgIICgIIAgoICgoIBgIADgIABgoADgoABgIIDgIIBgoIDgoIAgYACgYAAg4ACg4AAgYICgYIAg4ICg4IBgYADgYABg4ADg4ABgYIDgYIBg4IDg4IAgIECgIEAgoECgoEAgIMCgIMAgoMCgoMBgIEDgIEBgoEDgoEBgIMDgIMBgoMDgoMAgYECgYEAg4ECg4EAgYMCgYMAg4MCg4MBgYEDgYEBg4EDg4EBgYMDgYMBg4MDg4MCa7rFGAAA5WElEQVR4nO3d2ZbkSI5lUYvy/P9v7jZTG3SgkjJc4ELIsx+r1YUkSJwVVR2Z/vEBAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADf/98N9IwCw5//ec98aANzZqRXpAlBIa61oFgCzvlzRLAA2A7kiWgAchnNFswCkmqsVyQKQRpArkgUghahXFAtANFmuKBaAYMpeUSwAgbS5olgA4sh7RbFS/Pf/ue8ByBbQK4IV6L933DcGJIjoFcWK8LZUVAuXEdMriqXWVCuihZOL6hXFkurJFcnCacX1imLp9OaKZOGkCFZ9I7kiWTijyF5RLInRXJGsi/r3w30jAWJ7RbDmzeTqgsn63wP33aT7t8F9T0rBvaJYs2ZzdaFk/W+b+7YSbdXqZM0iWKUpcnWFZL1p1aWi9b5WJ0pWeK8o1gxVr05erKNcXSFaB7k6S7IIVmG6XJ07WW29OnO0Gmp1jmQl9IpiDdLm6rzJ6sjVGZvV2qpzFItglaXv1TmL1durMzWrL1YnSFZKryjWiIhenTFZI706SbNGcrV4sQhWVUG9Ol2xRnu1frIGc7V0sZJ6RbC6hfXqZMma6NXiyRrv1cLFygoWxeoU2asTFWsuV0sna6ZXyyYrrVcEq0tsrs5TrPlerZqsyVytWqy8YFGsHuHBOkexJL1aMlnzvVqzWASrpPhenSJZql4tWCxFsBYsVmKvKFa7lF6tXyxdr5YrlqRXCxaLYJWUFKzFi6Xs1WLFEvVqvWSlBotiNcrq1eLF0gZrpWLperVYsXJ7RbDa5PVq6WKJe7VQsZS9WqtYBKuizGCtWyx5r5YplrZXSxUrOVgUq0Vqr5YtVkCvrhqshYpFsOpJ7tWixYro1SLFkvdqnWJl94pgNUgP1pLFum6wAnq1TLEIVj35vVqxWDG9WqJYIcFapFjpwaJYhxzBWq5YUb1aoFgxvVqkWASrHEuvlivWdYMV1as1ikWwqjH1arFgxfWqerHierVEsQhWMa5eLVasyGDVLlZksBYoFsGqxderpYoV2qvSxQrtFcEiWJ2cwVqoWMHBqlus2F7VL1Z+rwjWHmuvCBbBcj/fEUOwKNZ73l6tU6zoXpUtVnSvyheLYJVCsNoQLIJFsPzcvVqlWPG9Khqs+F5VLxbBqsSdK4JVvFgZwapdLIJViLtWn9wzaHLVYKX0imARrEbuWH1xD6FBRq8uHKzSxSJYdbhTdeOeQoOrBiupVwSLYDVxp+qbewzHCBbBIlh+7lJ9c4/hWEqw6hUrq1eli0WwynCH6pd7EEdyekWwSiJYZbg79cs9iCMEi2ARLDt3pv64J3GEYF24WASrCnem7rhHceCiwUrsFcGiV0fckbrnnsW+pF4RrJIIVhHuSN1zz2IfwbpysQzBolhb3JG6557FPoJFsCiWm7tRD9zD2EewCBbBcnM36pF7GnuyelWuWATrkyVYFOuFO1GP3NPYc9VgpfaKYBGsXe5CPXGPYw/BIlgEy81dqCfucewhWASLYLm5C/XEPY49BItgESw3d6GeuMexh2ARLHrl5i7UE/c49hCsSweLf3O0BHehnrjHsYdgESyC5eYu1BP3OPYQLIJFsNzchXriHscegkWwCJabu1BP3OPYkdcrglUSvarAXagn7nHsIVjXDpahWO4nLshdqCfucezJC9aVi+V+2B0EqwB3oZ64x7GHYBEsguXmLtQT9zj2ECyCRa/c3IV64h7HHoJ18WClF8v9vBW5C/XEPY49BItgESw3d6GeuMexh2BdvFfZwXI/bknuQj1yT2PXVXtFsH4QLD93oh65p7GLYBEsguXmTtQj9zR2XTZY/M3P3wiWnztRj9zT2EWwrh6s5GK5n7Ykd6IeuIexj2ARLHpl547UPfcs9hGsq/eKYBXgjtQ99yz2XbZXicVyP+gBelWAu1J33KM4QLCuHqzMYrkftSx3pf64J3HkusFKK5b7OY8QLD93pv64J3GEYBEsemXnztQf9ySOXDhYScVyP+UhguXnztQf9ySOXDlYOcVyP+QxemXnztQv9yAOZQXrssVyP2IDemXn7tQv9yCOXTpYCcVyP2EDemXn7tQv9yCOXTtY8cVyP2ADcuXnDtUP9xyOXTxY0cVyP14TcmXnDtU39xgaXD1YwcVyP10TemXnLtU39xgaXD5YscVyP1wTeuXnTtUX9xBaXL5XscVyP1sbgmXnbtUX9xBaEKzIYrmfrBG9snO36pN7Bk0IFsEiWH7uWH1yz6AJwSJYBKsAd60W6RXB+nT1YEUWy/1oq3DnapFeEaxPV+8VwSqAXjWhV5+uHqzAYrmfbBn0qg3B+ogKlvupehAsO3rVhGB9unywPoKa5X6ohdCrJgTrE736iCmW+5lWQq9a0KtPBOsLwXIiWC0I1hd69YVeORGsBgTrC8H6QrCcCFYLevWFXn0hWE4EqwHB+kKvvtArJ3rVgF59oVc3BMuJYDUgWF8I1hd6ZUWwjtGrL/TqhmA5EawGBOsLwbqhV04EqwG5+kSwbgiWE8FqQK++EKwbguVEsBrQqy/06oZgGRGsFvTqE8H6RrB8CFYLgvWFYN0QLCOC1YJefaJX3yiWEcFqQa8+EaxvBMuIYLUgWB8E65ckWBRrDMFqQa8+EawbTbAo1iCC1YBefRCsHwTLil41IFif6NUnUa8o1iiC1YBefRCsG4LlRrCOEaxPBOtD+V/Z4H6SVRGsBvTqg2B98N+JVQHBakGwPuaL5b7/ecpgUaxBBKsBvfogWPxFFCUQrBb0iv+dUNwrgjWGXrUgWB+X/0csdbAo1hCC1YReXT1Y8l4RrCEEqwnBuvr/TqgPFsUaQa/a0CvBP2It3KyAXlGsEQSrEcG68l+qGtIrijWAYDUiWPK/t36dbgX1imD1I1gtpmt1gmBF1Oq3WbW7FdUritWPXrW4fLDianVXrariekWxuhGsBopeLRys+FrVblZksEhWJ4LVQBKsVZOVlauyzYrtFcXqQ68aiIK1YrFSc1WyWdG9IlldCNYxVa/WK1Z+rsolK6FXNKsDvTp21WB5clUrWUm9olmtCNYhXa+WKpYvV4WSldgrmtWEXh26YrC8tapSrORc0awG9OqIsleLFMsdqxv3FCy9IloH6NURbbDqF8vdqT/WMdhqRbN20asD4l4VL5a7UU9sczDn6sb29JXRqwNXCpa7Txssc3CX6o7l+SsjWPvkvapbLHebtuXPwd2oF/kjKIxe7QroVc1gubu0I3UO7ji9kzqEyujVnoheVSxWYn1G/kwWd5WOpA2irtBeLR+smF6VC5a8SnvxGfxj8dw1apUwisII1o6gXhUrljZIx+UZ/oOR3BXqFDuMygjWe2G9KtQsaYxauzP1hwO48zMkaBbVEax3QnNVpFnCEPVEZ/oAJXd4ZohHsQKC9UZ8r9zJ0vRnqDeSQwTcwREQTWIZBGtTSq6cyRKUZ6Y1soMmuFOjMv0tLIVevUqrla1Yil7MdUZ30hB3ZMQkH8UaCNaT1FpZkqVoxXRmlGf1cuclhO77qI1g3cuvVXqyNKWYj4z0sA7usMSRfiZlEaw/plxlNkuTiSnaW+l7fHdTYqk/lpII1jdnrbKSpWnEJPHddDy+Oyjx9J9MPQTrk7tW3wKfcPQ/EyP3d0fi8w64Y5Ij5NsphV59lOnVp5gHFMVBQn1TTQNwhyRNzPdj91cTglUpV9+0zycKg4r8xo4n4K5IJu23U8RdTq4eLHeb3lE9nyYJklN+DtPeW0Ow3A3Jpfpy6njoyaWD5a7SvvnnExZBdNS/h77oT9zgDki6+e+mkOegXLlX7iAdm3o8UQ3+qf+ZKOAW98bgzofB1GdTyWZRLhosd4tajT6fqgWPPdCellEsdzwsRj+aUt4W5YLBcleoy8DzqTrwkgPxcfHBcqfDZOCbKWanKBcLlrs/A3ofUZWB1xyIj9Pd6ZtJuMPh0vvFVLNflAsFy52eQV3PKIvARg3Ex0UXy90Nn/7VqOO4KFcJlrs7E9ofUpaArRiIjwsOlrsaRmMLUkFLUGKCVaxY7uTManxMWQE2YyA+Tnm/r6NwR8NpfE+sWnty+l65ayPR8qCy/d9ugfg46Q0/j8LdDK/JhbFoL8q5e+UOjczxo+rWf7sE6vOkt/x4rrsYZtNbk60vKcHBcmbMXRmpg2fVLf+bDsgPDAuWOxh2it3J05uU2GC9/X8I5w6M3t7T6nb/TQUighVULHcv7CT7k6Q/KQHF2j07Zw7uuIR4/7i6zd9ugOoScbf9d6g7F36qJYo3kJSeP9V/eMvlI7jTEuXN4woXf6MAwmsE3vfPme5aVKBcpUBjTXEEKzhZ7qwE2nxe5d4/77/2KoE3/nOmOxYVKJcpynhU5MVqPDpsFO6oRNp6XuXav+y/+jJxd3470t2KGuQ7pTYVFXWx2k+OGIU7KcFeH1i69U/rH3CZuFv/OtKdihoiFktoMipjJ7Sd3Xkf89xBCff8wNqlf9z+kOuE3fvnie5SFCFfK6XpqAwf0nJ2/61MceckweMDa3f+cftjLhR38/Tql3SppBRRmTjn8OyhmxnmjkmK+wcWr/z98j8KOVp46O1kdyfK0K2Ulqgqk2ftHT16N0PcKclx98Dqjb/b/UcxRytP/f/clShEtFBawqwIzntz8sT9dHOXJMvfE4s3/s7TaGPOlp5Kr+4J1klM2xXNkRsHT91QH3dH8vw8sXbhHzyONuhs7bHuRpQyu0xi8q7ojp36d7ymhuLOSJ7vB9bu+6PH0QadLT3VnYhapjZJTdOVzTrIz1XcVBt3RTLdnli678/uRxt1tvRQdyKKGV8kNUVTdtqgPVd2W4fcDUn19cTSdX9xP9uos6WHugtRzOAaqc3n5DgNymOV97XLnZBkX48s3fcXf7MNO1p5pjsQ5YxskdpsShrDoDxWe2dvuQNiodz3DT+zXeNkdx/K6d4hucmOdHRBeKr61t5wt8NCue8bfma7xsnuPpTTuUFqcw3pzoLs1IB72+BOh4dy3zd8zzbuZOXR7jwU1LNAalP9GKuC6NC5Y1qr5U6HiXDht9xmG3Yw/4AVq7syMhNL31IF+VVl9/7R+M9Z7nC4KDd+w224YQcTrFgjqVGYWvnjJgRcWHr3DRNyd8NGufEbbtMNO5hgBRvKzbTpjR8PguTIyBv85u6GjXLjN9ymG3YwwQo21JtJ0+s+FYTRE7V/jdjRjNzZ8FFu/IbbeKPO5V8bjTZcnWHzyz7Zg8ETxX/v4cGU3Nnwka78q9t4o87lH7DCDYdnkGDXJ3sweKD6L2rdH5M7Gz7Kld9wG+8S57rTUNNMfPopNn06B2MHyv+i1r1bdFfDSLrzr27zXeJcdxpqmslPN8Wez+dg7DyClUO6869u8w06V3ukOw1FzSWoi2LNFTkYOU78LzYc3KK7Gk7arX92m2/Qudoj3WUoaq5BPSRbrujB/IHBN+huhpV26599TzjmXOmJ7jBUNd2hVpIl1wRh+rzgG3Q3w0q69S++JxxzrvREdxjKmk9RE82Oa4Iwf17sDbqbYSXd+hffEw45V3uiuwtlSXJ0RLPgoh6UCdabO3Qnw0u79s9+ZhxxrvZEdxfK0hRpn2i/NT0Yvp+AB9q8O3cyvLRr/+xnxhHHao90d6EuTZP2qNZb04PxG9I/0ebduZNhpt37Jz8zjjhWeqK7CoWpsvSear01PRi/o4An2ro5dzHMpHv/4mfIAadKT3RXoTBVlt6SbbcoCMP3FPFMr7fmDoabdO9f/Ew54FTpie4qFKYrk7ANgUEYv6uIh3q9M3cw7KSL/+xnygGnKg90R6E0XZtUZRBS3lbIU73cmbsXdsrFf/Ez5YBDlSe6m1CaLk6qMigJ7yvkqV5uzN0LO+Xiv/iZcsChyhPdTShNFydNF8R0dxbzWM/35e6FnXLxX/xMOeBQ4YHuJNQmzJOgCnK6Wwt6rqf7cvfCT7j5L36mrD9TeaI7CcUpCzUbhQCymwt6sKfbcufCT7n6L37GLD9SeCDB2qdM1FwSomhuL+rJHm7KXYsClKv/4mfO8hN1B9KrA+JMBa31FMnthT3Z/U25a1GAcPVf/cxZfqLuQIJ1QB6qkK2eo7jBsEe7vyd3LQoQrv6G7zmrzxMGy92D8uSliljqWYJbjHu2u5PdtahAt/sb1H35UB9IsA7pY6Xf6VmCm4x7uL+D3a0oQbf7W8R9+XlzsgPdNVhAQK7KBWs7WaMHhN2buxUlyHZ/E8Fa3iV69d9msgb/eNituVtRgmz3N2n78vtJqA4kWA2u0avZYoU+Hr36I9v9bbWD5W7BEi4crI47jX0+evVLtftvhARLdR7BanGRXk0WK/wB3aEoQ7b9mwjW+i7Sq6l/uSH+Cd2dKEO2/ZsI1vquEqyZf8QiWGlk27+tcrDcJVjEVXo1UyyClUa1/W8oCyPuFcFqdJVeTRSLYKWRrf82grW+y/RqvFgEK41s/bfpgyU67B/BanWhYI3+21gEK41u/zcVDpa7A8u4UK9G/xGLYKWR7f82ZWQIlseFejVaLIKVRrb/2+TB0hz1yd2BdVw9WMc3TrDy6AqwhWCdwIV69e6/5b3nT4XclzsTdegKsOVDWCyCZXKlYI0Vi2Dl0RVgy89LlJ2lOOjGnYF1XCpYQ/91fgQrj64AW37fouqo+XO+uSuwkEv1aqhYCQ/q7kQZsgJsUgdr/pgf7gqs5FrBGvg/vBOsPLoEbCJYJ3CtXg0Ui2Dl0SVgE8E6gasFq7tYBCuPLgGbfl+j5iDFLd24I7CSq/Wqu1gEK48uAZsI1hlcrVdTxYq5IXcnytAlYJMqWKLs/XE3YCnXC1bnf0gn4VndoahCl4BtBOsELtirvn+7IeFp3aGoQpeAbUWD5U7AWq4YrK5iZTytuxRFyBLwhiZY6l4RrD4X7FVXsQhWGl0D3iBYJ3DJYHUUi2Cl0TXgDUVrPhSHPHAXYDGX7NV/Hf+fhRnP605FEboIbCNYJ3DVYDUXi2Cl0UXgjfnY/H4KsnsiWJ0u2qvmYhGsNLoIvEGwTuCywWosVsoDu1NRgy4C7xCs9V22V43FIlhpdBF4RxYs2R0RrF4XDlZ3scJuxJ2KGoQVeGO2Nr9fgu6WCFanC/eqrVgEK4uwAm8QrBO4crBaikWwsggr8AbBOoEr96qlWAQri7AC74iCpbshetXt2sE6LhbByiLMwDuaYOnuh2D1u3avjotFsLIoO/AGwVofwdp/vpSHdreiBGUH3iBYJ3DtXh0VK+ep3a0oQdmBNyTB0t3OP4I14OrB2i8WwcojLcGmyeIQrAqu3qvdYiU9tjsVNUhLsIlgnQDBel+stMd2t6IEaQk2KYIlvJ1/BGvE5Xv1rliJz+1uRQnaFGwpFyz37i+JYDUGi/+dMJY0BZsI1hkQLP8/YrlbUYI0BZs+poo1mbst7t1fEr36r7FYgdd3x6ICaQo2EaxTIFgEqwJpCjZVC5Z78xdFsP5rK1bg5d2xqECZgjemikWwiiBYnwiWmzIFb8wHS3o77s1fFMG6sT65uxYFSFuw7edNjv9h5d24F39ZBOuGYHkpY7BtJlgTqXvDvffLIljfCJaVMgbbCNYpEKwfBMtJGYNtv69y+M8Kb8a99usiWD8IlpMwBm8QrFMgWL8IlpEwBm9MBGu8dG+4t35hBOuP7cndtShAF4O3CNYpEKw/BMtGF4O3hoM1Xro33Du/NIJ1j2CZyGLw3u+7HP2Dsjtx7/zSCNYDguUhi8GOwfAMh+4d98qvjWA9IlgWqhjsIlgnQLCeOJ7cnQs/VQx2zQVLdhvulV8bwXpGsAxkNdhFsNZHsF4QrHyyGuwqESz3xi+OYL3Kf3J3L+xUNdg3Up7fT0B1E+6NXxzBekWw0qlqcIBgLY9gbch/cHcw3FQ1ODCQHnWvCNYcgrXB8ODuYpjJcrCvQLDcC786grWFYCVT5eDAeLBkt+Be+NURrE35D+5OhpesB/sI1vII1iaClUvWgwP99SFYtRCsbfkP7m6GlawHB0aDpbsD98KvjmC9QbAy6YKwj2CtjmC9k/7g7mg46YKw76O7P+JeEaxZBOsNgpVIWIRd3cFS/wMWwZpFsN4gWImERdj180I7/4DwDtz7vjyC9U76g7urYSQswi6CtTyC9Q7ByiMswq7fN9r1B5R34N735RGstwhWGmUS9vQGi3/AKodgvUWw0giTsItgLY9gvUewsgiTsKszWH11a+Fe9/URrPeyH9ydDR9hEnYRrPURrPcIVhJhEvZ1Fauvbk3c234CBOs9gpVEmIR9PcHq/MexJu5tPwGCtSP3yd3Z8BEmYR/BWh7B2kGwcgiTsK8jQh8dv23m3vYzIFjvEawUwiIc6KgQwSqKYL2X++jubtgIi3BgJFjK67uX/QwI1nsEK4UyCfvuvubmXyqv7172MyBY7yU/uzscLsok7CNY6yNYO3If3h0OF2US9j18z8//gzc/VF7fvexnQLB2EKwMyiTsOv7Gt34pvAH3sp8CwXqPYGUQFmFfy1f++jvhDbh3/RQI1nu5T+8Oh4uwCPuaP3WCVRjBeo9gZRAWYddYr/hLc4ohWO/lPr47HC66IOwjWKdAsN5Lfnx3OUx0Qdg12iuCVQvBeo9gZdAFYRfBOgeC9V7y87vLYaILwq7hYOmK5d51r48P0TEE6x2ClUHWg30Ey4xghUsegLscJrIe7PMHS7Kvy/ogWOEIVgZVDw4QLC9VsJqLFb6v9WQPwJ0OD1UPDhAsL4IVj2BlUPXgAMGy+iBY8QhWBlUPDhAsK4KVgGBlUPXgAMGy+pAVi2C9lT4AdzssVD04QLCsCFYGgpVA1YN9470iWAIfBCsDwYqnysEBgmUlDFZrsRL2tRyCFU+VgwMEy4pgpUgfgLseBqocHCBYVgQrBcGKp8rBAYJlRbBSEKx4qhwcIFhWBCvFwQACxuLORz5VDg4QLCuClWJvADGTcecjnyoHBwiW04cyWI3FUq7lMt4NIG407n6kU+XgAMFyIlhJticQORt3P9KpcrBvolcEax7BSrI1geDZuAOSTZWDfQTL6meU2tMI1ovHCaQMxx2QbKoc7Bsq1TfVPWi2dUk/o9Setk+4k8to/qaF13QHJJsqB/u6K3VHdQ+abV3Szyi1p6Xt5Crav2nlVd0FSabKwb7eSN1T3YNmW5f0O0vtaWk7uYaOb1p5WXdBkqlysK8vUY9EtyDZ1TX9zVJ8XNZOrqDrm1Ze2F2QZKIcHOh6m09EtyDZ1TXdDVN8XNJOFjb2TUtvwZ2QXKIc7Bt7q99E96BY1UXdT1N8XNJOljX4TUvvwZ2QXKIc7Bt8rTeiexBs6qrupyk+Lmknixr+pqV34U5ILlEO9g2/2E+iexBs6qrupyk+Lmkn65n6pAnWOFEO9k29XNE9CDZ1VQ/jFB+Xs5OFTH3LIcNxJySXKAf7pl6u6B7mF3VZD+MUH5ezk2VMfclRw3E3JJUoB/umXq7oHuYXdVkP4xQfl7STRUx9yGHTcTcklSgH+6bereYW5vd0XY/zFB+XspJVTH3IYdNxNySVJgcHpt6t5hYEe7+sx3mKj0tZySKmvuO46bgbkkqTgwNT71ZzC4K9X9bjPMXHpaxkEVPfcdx03A1JpcnBgal3q7kFwd4v63Ge4uNSVrKGqc84cDruhqTS5ODA1LvV3IJg75f1OE/taTkrWcLUVxw5HXdDUmlycGDq3UruQLH3y3oaqPa0lJUsYeorjhyPuyGpJDk4NPFmNTcgWfxVPU1Ue1rGRpYw8Q0Hj8fdkFSaHhwaf7Oa60sWf1VPE9WelrGRJYx/wtHjcTcklaYHLcZerOjiksVf1uNIpYelbGQJY99vxnjcDUklCkKr3vequq5k75f1OFPpYSkbWULvp5s4HndEMqmKMG7vtaquIVn7dT0OVXpYzkZWMFimjPm4I5JJlYTaJGu/LoIlQLBKcKckh2Tt10WwFAhWBe6U5JCs/boIlgLBqsCdkhSSrV+YslcEi2A5uVuSQrH0KyNYCtpg8bc/D3GnJIdi6VdGsCQIlp87JSkUO780giVBsPzcLUmh2PmlESwNgmXnbkkGxcqvjWBpSIMlnJC7IoncMcmgWPm1ESwNgmXnjkkGxcqvjWCJECw3d0wyKFZ+bYZeESyCFcEdkwSKjV8cwRKRBks3IndFErlrkkCx8YsjWCoEy82dk3iKjV8cwVIhWG7unIRTLPziHrZEehjBIljJ3D0Jp9j4xSmDlb+NtRAsM3dPwklWfm0ES6hisdwRyeTuSTjJyq+NYAkRLC93T8JJVn5tBEuoYrCuVCx3T6JJNn5xBEuJYFm5gxJNsvGLEwbLsIzVECwvd1GCaVZ+bQRLSRgs/o9YA9xFCaZZ+bURLCmCZeUuSjDNyq+NYEkRLKu+/Zf9XcxZNCu/NoIlRbCsOgOwWrE0K782giUlDJZqSu6IpHInJZRm4xdHsLQIlpW7KaE0G784gqVFsKzcTQml2fjFESwtgmXlbkoozcYvThes/FWsSBgs0ZjcDcnljkok0cqvjWBpESwvd1QiiVZ+bQRLjGBZuaMSSbTyayNYYsJgaebkTkgyd1UCiVZ+bY5eESyCFcZdlUCilV8bwVKrVix3QbK5sxJHtPJrI1hqBMtrqAXf8xYXRky08mu7Xw/VOSl7WJYqVqpBuQOSbagFfyMXR0ZJtfNLu9sO1Tk5e1iWJlW6QbkDkm6kBXsvQd2dYaKVX9vdi1Gdk7OHdY2UKXBQ7n6kG2lB8xtRR6iHaOXXdvcuRMck7WFdfbMIH5S7H+kGUtD/ZuQ1aqDa+aX9vQLRMVl7WFf/1x86J3c/8vWnYPQFDR08kKobzcJLTvH5m6PomLRFLGv06w8akzsf+fpTMP+u+k50Bmv6L8fy+hui6JgWmk0sq/trD52SOx/5HMHq5QzWx8rN+huh6JgWolWsSvNJq+7GnQ+D8rn6GCqWdOElhxl8SB6g82WpdrEoyQctuxt3PQwW6NV9sVrjpV34RZOluf3OdyVbxpoUn7Pubtz1cFiqV820C/+xZrI0N9/5snTbWNL816y8G3c8HOr3qkSwPtZrlujGO1+Wch8rmvuUxTfjjodF+V5VCdZiyVLddufLEq9kOcNfccTNuNthUb5XdYK1UrJkN935siL2spKhLzjqZtzt8Dhhr8KC9bFKs3R33Pm2onaziv7vN/Bm3OnwoFe9q6o5PpLubntfV+B61lBpHu50mJyuV9HB+qjeLOGt9r6v0AWtoNI03OVwqZsr538y52hXNRcJIL3P3hcWvKJ+lSbhDodLZ6oSI+bs1fGuiq4jpr3L3jeWsKZelcbgDofN3vY/v4r9mGmN9SotWB8VmyW+xd5XlrOpRpVm4O6Gzf7+37+L3ZaJDeYqNVifGg8S3VXLpfpur/O8fWnL6lJpAO5u+Gxm6iVNuymTc/eqZ1kbj1HdWdcd60/ck7mvFpUe350No60SNbSh+3tuNVwrT7C+tJyhureO+404873cjXUo9OzuahgNBisoWTO50v3XI4/d++Gfl91f4x3rT9yTvrTpCj25uxpGw8HSF2uqVv5gNZDe3+EtC67RwbC2yQo9trsaTsPBkhZrNlZrBOtD99fYNNyy6lJtPJubqdAzu6PhNB4sWbcUtVolWF/mb6jllnNHYFveNIWe2B0Np9f69Iai/+O+J6rVP/1f8TX3XG22rnZ8A203m/vsxvVNUud53c2weo1Pdyn6v+6PoescmdlQ2eImaLzT3Od2LnCSMo/rbobXS3sGUtHzZWvitGVmQ5W7G6z1NnMf2rrBSao8rDsZsT4+9v/fX8oz0orWz1oSpndmNlS7vZGa7zL3ic07fCk54XD5OCjWc3gGa3H8Sc8X6cDEhgZscJD2e8x9XPcSX0lSOTy+vqa9HzxnZ7IaWx/z5JGtJjY0aIsDtN9g7pO6l/hKstph8fU17f7iKTdJdQkwvqGxuyzVfm+5z+he4itJSofF7Wva/cn9zq/cq5RgDS+0Svt95T6fe4mvJKkdFt+f095P7neeYAXv9ayOO8p9LvcSX0lWPAx+PqfdH93t/Mq9Si+Wo1kdt5P7UO4lvpKkeBj8fk67v7pbeYKVst3Dem4m94ncS3wlSfXId/c97f3sbuUJVtaCj+i7ldzHcS/xlWT1I9v997T3O3dnZMZWdNboireYuJPkp3Ev8ZVkBSTbwwe190N3aFQGd3TW6I4fmrqR5GdxL/GVZAUk2eMHtfdLd2hUxnZUYXjPX6nuIvk53Et8IVkByfb4Qe390h0alYldnze+6w9Ut5D9DO4tvpCsgGR7+qJ2fukOjczMtk+b2PY/sjvIfgL3Fl9IWkGSPX1Rez91h0Zlat2nzez7F+EdpN+/e4svJCsgyV4+qZ3fukOjMr3xk2Y2Xnr97JsnWInSEpLr5ZPa+a07NCqKpZ8yuu7qy+fd+Q/3Fl9IWkJyvX5TOz92l0ZEtPcTBlY94Oop9/3IvcXXkVaQZK/f1M6P3aVRES7/mN49D7l8/G2/cq/xdaQVJNnGR/X+x+7QqIj3f4CxVj9Xj71pguWVV5BkBMvDV6v/Gw2WoFcEK0teQZJtfVVvf+wOjUpAA/oZQnV/6aE/RLAWkViQZJcMVo1ifTLU6nbZkT9DsJaRWJBsBKuA1Fp9Xm/gjxCsdST2Ix3BuiCCdWqJ+chHsNBA0yuClSGxHgYEC8dEvSJYCRLjYdFRLHdnZNz7vxyCtY7MeDgQLBxR9Ypgxctsh8cFg0Wx+hCsdWSmw4RgYZesVwQrXGY4bAgW9hCsdWR2w4ZgYYeuVwQrWmY2fFqDdZ5iuRuwEmGvCFa0xGoYESy8R7DWkRgNJ4KFt5S9IljBEqPhRLDwFsFaSGI0nF6+q3c/dHdGxl2BdUh7RbBiZUbD6PW7evdLd2dk3BlYB8FaSGY1jAgW3tH2imDFyqyGEcHCOwRrIZnRcCJYeIdgLSQzGk4bH9abX7o7I+PuwCrEvSJYoVKrYdQdrA9rbBTcIVgFwVpJajWMrhcsdweWQbBWkloNJ4KFbQRrIanNsNr4srZ/+L3vBOsqCNZCcqPhRLCwSd0rghUpNxpOzcH6LhbBugiCtZLUZng1F+u27wTrIgjWQnKT4XW1YFGsJvJeEaxAucnwIlh4pe8VwQqUmwyvrW9r84e3dSdYl0CwlpLcDCuChRcBvSJYcZKTYdYZrOWL5Y7BAiJ6RbDiJBfDbOvj2vrdbd0J1vmF9IpgxclOhhfBwoOYXhGsMNnFMNv8ujZ+d1t3gnV6BGsx6cnwIli4F9QrghUmPRlercG6FYtgnR3BWkx2MNwIFu4RrMVkB8Nt+/Pa+OHXuhOsk4vqFcGKkl4MN4KFPwRrNenBcCNY+BXWK4IVJT0Ybtvf1+vvvtZ9/WBRrB1xvSJYQfKD4Xa1YFGs9wjWcvKD4fbmA3v53deyE6wTC8wVwYpiKIYZwcKX0F4RrCCGYpi9+8Kef/e17ATrvAjWghzFMGsN1lexzhAsirWNYC3IEAy3t5/Y8w8/d/0cwfr4cMehIoK1IEcxzN5/Y08//Nz1UwTrg2BtiO0VwYphSYYXwcIngrUiSzK8dj6yxx/edt1dm3kfBGsLwVrP27/2+Mz2vrKHH96W3Z2befRqS3CvCFaE5xW9hP3v7O6Ht2V352YewdpCsBb0vKKXcPCh/f3wtuzu3Mz6IFibCNaCXnb0Co6+tL9f/jtFsD6561BPZKu+uHf7lJ439BIOP7XfX/47QbBuj+TOQz2Bqbpx7/YpPe7nRRx/az+//EewTiswVTfu3T6lj+vlqiVYP0P5R7DOKjJVN+7dXkXPqP53xV61BOt7LP8I1lkFlupb4I6fSde4vOFwafnabr/8t36wvp/H3YdyQhr1IHjRT6JvWt5wuLR8bbdf/ls+WD/P4+5DNRGFehK76CfROSxvOFwaPrbvX/4jWCcVkqhHoYu+kJ159A7Lmg2f42/t55f/Vg/W7xO5A1GNNE3bpFu/qP2pdA/LWQ2j42/t96cE65zmc3QsIgBrORhM/6yM0bA6+tT+fvn5H2xZ2O8TuQNRzWSLmgRVYBUBs/IVw+xgene/JFjnNNCfAbFFKCxoUNf8t7D+1/Ff10Cwzqk7PRPEKdAepxc5os/fupphtTu9h18SrFMa2ao58iDIDpyWN5zP35qS4bU/voefLh2sv2dyF6KY+R0boO6D6LwpqYP5+rEpGVaHE7z7rbs5U/6eyF2IWiRrNkTaCMFhmhvJmcnt17ZqGLVM8ee37ubMuHscdyJqUa3aEGUops+avoPEedx+7gyHS+Mkbz92V2fc/bO4E1GLdOH6KXMxedbcxVOH8f17bzos+gbqzs64+6dwJ6IU0aqNU0dj8rzBqyaP4vsPuOth0DdRd3aGPT6GOxKVSDZtTmg6pg4fumLCHH7+hLseBp0zdYdn1ONTuCNRiWDRZqX1I+1CQwZuxV0Pg86husMz6Okp3JEoZH7P5rk7knIRxV283I67HgadU3WXZ9DTU7grUcj8os0bLJW4JYZU3V9i6JHd9TDoHLG7PIOeH8OdiTKm9k1lLlgZ//QT4ekRCFaT3im7yzPm5THcnahiauN0LtgrzSO785Gvd87u9Ix5eQx3KKqYWzqZKwVr6lmfn9idj3zd83a3Z8jrY7hLUcPE3mldoVeCUr08sjsf+brH7m7PiI3HcKeihvHtUzt7r3S1+o9g9XHXZ8DWY7hbUcHo9kU4ca+ksXp8Znc+8vWP312ffpuP4Y5FBWP7F2R2d4sSx+rpod35yNf/Btz56bf9HO5a+A0tYJy51a1InqqXh3bnI1//W3Dnp9ub53Dnwm9gBUNNrW5BAbF6eWp3P9INvAd3gHq9ew53L9wGXn2wmc0tJyRWL0/t7ke6gTfhDlCnt8/hDobbwKsPNrO51cTU6vWx3QHJNvAqgroSc+xOsC5erIE3H45c9T+3uyDJBl5GUFZCjv1HsN4YePEJThKsuF5tPLc7IbkG3kZUVkLO3enVpYM18N5T0KuB53ZHJNPA64jqSsi5e8G6crEG3nuO9XsVmKu3D+7OSJ6RNxLUlYhj94N13WKNvPYcq/cqNFd7D+4uSZKRdxLUlYhjCdamkbeeZd1exbbq+MndLUkx8mKCuhJx7H6vrlqskZeeZsVcJaSq6dndNUkw8nqCwhJx7FGwrlmskZeeZ2plHXJa1fb47p6EG3lBQWGJOJZgbRh554mG99VTs6xUNQzgkzso0UZeUVBYIo49DNb1ijXyxnONLWvbPqullappBp/cRQk29JZiwhJwakOwrlasoReea2BVOxZaJ6lR7VP45m5KqKE3FROWgFNbgnWxYg298GTde9q50fPSAtU3h2/uqEQael0xYQk4taVX1yrW0PtO17ml/SuturVsrXfpzkqcoZcWU5aAU9uCdaFiDb1ug64dHdvpubsyab9Xd1iiDL24mLLEnNrE3ZEsQ2/bomNDx5d66I6s2m/XHZYoQ+8uKC0xpzZxlyTF0Lt2ad7Pma3uupkK2m/ZHZYoQ+9PX5aYYHU8kjsmCYZetU/jes6t9dj5Pu337Q5LlLG3KE+LPVinL9bYi65ufrFHTvdpv3d3WKKMvcmYtMSc2spdlFhj77k6xWb3nmzVfP/uroQZe50xaYk5tZm7KZHGXnN1mt3uO9ar+RncXQkz9kpj0hJyaAd3VQKNvebqZOvdcaZX82O4uxJm5J1+BLUl5NAO7qoEGnzNtQ0u+NutD8/NtPbZuLsSpf8ruQlpS8ihHdxVCTT6nisbWvKn/2FUWYI0z8bdlTCjH0tMW0IObeeuSpzR11zZ0JrHtSRF83DcXQkz+rWI0xISrO6Hcmclzuhrrmto00Mikql5PO6uhBn+YLRtIVixhl9zVUPbrs5HvtbxuLMSZ/iLCWlLyKHt3FmJM/yai3KHw6V1Pu6sxBn+ZELaEnJoM3dVAg2/5qLc4XBpnY87K3HGv5mItkjPJFh3xt9zRe5u2DTOx12VQOMfTURbIs5s545KpPH3XJA7Gz6NA3JXJdD4VxMSl4gzm7mjEmr8RZfjroZR44TcVQk0/tko20Kwwo2/6Grc0TBqHZG7KoHGvxtlWwhWvPE3XYs7Gk6tM3JXJdD4h6NsC8GKN/6mS3E3w6p1SO6qBJr4dCLiEnFmM3dSYk286ULcyfBqnZK7KoEmvp2IuESc2cydlFgTb7oMdzDcGsfkjkqkia8nJC4RZ7ZyJyXYxKuuwZ0Lv8ZBuaMSaeL7EbaFYCWYeNUVuGNRQOuo3FGJNPMJRcQl4sxW7qIEm3nVdu5WlNA6LHdUIs18RBFxCTiymbsowWZetZs7FTW0TssdlUgzX5EuLgQrwcyrdnOnoobWabmjEmnmK4qIS8SZrdxFCTbzqt3cqaihdVruqESa+Yoi4hJxZit3UYLNvGo3dypqaJ2WOyqRpj6jgLjIjiRYz6ZetZu7FSU0zsrdlFBTX1FAXGRHEqxnU6/azd2KClpn5W5KqKmvKCAusiMJ1rOpV23mbkUJrcNyNyXU1GcUEBfZkQTr2dSrNnO3ooTWYbmbEmruO9LHRXUiwXox96qt3KmooXVa7qaEmvuQ9G0RnfiPYD2be9Ne7lTU0Dotd1NCzX1I+rioTiRYz+betJc7FSU0T8vdlFCTX5I+LqITB3pFsKpyp6KG5nG5mxJq8lPS10V0IsF6NvmmndypqKF5XO6mhJr8lPR1EZ1IsJ5NvmkndypqaB6XuymhJj8lfV1EJxKsZ5Nv2shdiiKa5+VuSqjZj0leF82BBOvF7Jv2cZeihvZ5uZsSavJb0tdFdCLBejL5op3cqaihY2DuqESa/Jb0dRGdSLCeTL5oJ3cqaugYmDsqkSa/JX1dRCcSrCeTL9rIXYoiOibmjkqkyY9JXxf9ic3cTQk1+aKN3KUoomNi7qhEmvyY9HnRn9jM3ZRQky/ayF2KInpG5q5KoMmPSZ8X/YnN3E0JNfmijdylqKFrZO6qBJr9muR50RxIsJ7NvmgbdymK6JqZuyqBZj8neV40BxKsZ7Mv2sZdiiK6ZuauSqDp70mdF8l5BOvF9It2cZeiiL6hubMSZ/p7UudFct5QrwhWSe5SFNE3NHdWwsx/T+q+SM4jWC/m37SHOxRV9E3N3ZUwgi9K3BfFcQTrleBNW7hDUUTn1NxdCSP4osR9URxHsF4IXrSHuxRF9I7NHZYogi9K3RfFeQTrmeBFe7hLUUTv2NxhiSL4otR9UZxHsJ4JXrSHuxRF9I7NHZYogi9K3RfFeQTrmeBFe7hLUUTv2NxhiSL4otR9UZxHsJ4JXrSFOxRVdA/OXZYggk9K3RfFeQTrmeBFW7hDUUT/4NxlCaL4psR9ERxHsF4oXrSDuxRFDEzOnZYYim9K3BfBcQTrheJFO7hLUcTA5NxpiaH4ptR5mT+PYL0QvGcLdymKGBmduy0hFN+UOi/z5xGsF4L3bOEuRREjo3O3JYTim1LnZf48gvVC8J4t3KUoYmR07raEUHxT6rzMn0ewXgjes4M7FFUMDc8dlwiKj0qdl/nzCNYLwXt2cIeiiqHhueMSQfFRqfMyfx7BeiF4zw7uUBQxOD13XQIoPip1XubPI1jPBK/Zwl2KIgan565LAMlXpc6LqVcEqx53KYoYnJ67LgEkXxXBKm/6HZu4S1HE6PjcedGTfFUEq7zpd2ziLkURo+Nz50VP8lURrPKm37GJuxRFDM/P3Rc5yVcl7wvBUpt9xS7uUhQxPD93X+Q0n5U6LwRLbfIF27hLUcPEAN2BUdN8VtWCNfgY7qoEmnu/Pu5U1DAxQHdg1DSfFcGqbu79+rhTUcPEAN2BUdN8VvK+ECyxqddr5E5FDTMTdBdGTPNZEazqpl6vkTsVNcxMMCUiCRf5u5gCwSpu6u0auVNRw9QIMxoSf42/a0nUCtboU7irEmjm5Tq5U1HC3AhTIhJ/kb9rKcgDYwnWiYs1/mq93K0oYW6EORGJv8rftQQIVnHjr9bL3YoSJmeY0pDoizxcbJ68LwRLa3gkZu5WlDA5w5SGRF/k4WLzThKs8xZrfCRm7lgUMD3DlIYEX+TxYtNKBWviOdxdCTMxEy93LQqYnmFKQ4Iv8nixeScJ1mmLNTMTK3ctCpgfYkZDYq/xdLF58sCYgnXSYk2NxMpdiwLmh5iSkNCLvFxtlrwvrmCds1hzI3Fy16IAwRQzEhJ5jderzSJYpc2NxMldiwIEU0xJSORFXq826TzBOmWxJkdi5K5FAYIpphQk8CJbl5tzomCdsVizI/Fx18JPMsaMgIRd4831puj74gvWCZM1PxIXdy78JGPM6EfUNd5ecIY+L85gnS5ZipF4uHPhJxljRj6CrrFzxQlnC9a5kqWZiIU7F36aOSbUI+QS+5ccNl6XgGBRrGeagVi4c2EnmmNCPUIusX/JYRF1cQfrRMUSDcTCHQw30RgT6hFyif1LDiNYpYkGYuEOhptojPHxiLjC0TWHRcTFHqzzFEs1EAd3MNxEY4yPR8QVjq45rFaw+EesJ6J5WLiD4SYaY3w8Iq5wdM1hBKs00Tws3MEwU40xPh4RVzi86KCQthAsGdE8LNzFMFONMb4dEVc4vOigasHiX2x4pBmHh7sYZqoxxqcj4AoNVx0TkZaZXhGsR5pxeLiL4aWbY3g69BdoueoYglWaZhom7mR46eYYnw79FVquOoRglaaZhok7GV66OcanQ3+FlqsOKRcsTbHcoVGRDMPFnQwr4RzjyyG/QttlR5wzWO7OyCiGYeNuhpVwjgnlkF+i7bL9QspCsGQUw7BxN8NKOMeEcsgv0XbZfgSrNsUwbNzNcFLOMaEc8ku0XbYfwapNMQwfdzWMlGNMCIf6Eq3X7RZRlrleEax7imH4uKthpBxjRjjU12i9bqeQskwGS1Esd2dkBLMwclfDSDrHhHCIL9F83U4EqzbBKJzc1fDRzjEhHOJLNF+3E8GqTTAKJ3c2fLRzTOiG9hIdF+5DsGoTjMLJnQ0f7RwzuqG9RseFe8SEhWDJCEZh5e6GjXaMGd3QXqPjwj1iykKwVAST8HJ3w0U8xoxuaK/RceEeNYPF3//8Y34QZu5wuIjHmJEN6TW6rtwuKCwES2V+EGbucLio55iQDeUl+q7cjmAVNz8IM3c4XNRzzMiG8hp9V24WVBaCJTI/Bzt3OUzUY8zIhvIaXRdud95gnaNYgjlI/D9H48RprCL0ygAAAABJRU5ErkJggg==\n", + "text/plain": [ + "" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "Image.open('./mhp_extension/demo/demo_instance_human_mask.png')" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAABLAAAAOECAMAAACGszjIAAADAFBMVEUAAACAAAAAgACAgAAAAICAAIAAgICAgIBAAADAAABAgADAgABAAIDAAIBAgIDAgIAAQACAQAAAwACAwAAAQICAQIAAwICAwIBAQADAQABAwADAwABAQIDAQIBAwIDAwIAAAECAAEAAgECAgEAAAMCAAMAAgMCAgMBAAEDAAEBAgEDAgEBAAMDAAMBAgMDAgMAAQECAQEAAwECAwEAAQMCAQMAAwMCAwMBAQEDAQEBAwEDAwEBAQMDAQMBAwMDAwMAgAACgAAAggACggAAgAICgAIAggICggIBgAADgAABggADggABgAIDgAIBggIDggIAgQACgQAAgwACgwAAgQICgQIAgwICgwIBgQADgQABgwADgwABgQIDgQIBgwIDgwIAgAECgAEAggECggEAgAMCgAMAggMCggMBgAEDgAEBggEDggEBgAMDgAMBggMDggMAgQECgQEAgwECgwEAgQMCgQMAgwMCgwMBgQEDgQEBgwEDgwEBgQMDgQMBgwMDgwMAAIACAIAAAoACAoAAAIICAIIAAoICAoIBAIADAIABAoADAoABAIIDAIIBAoIDAoIAAYACAYAAA4ACA4AAAYICAYIAA4ICA4IBAYADAYABA4ADA4ABAYIDAYIBA4IDA4IAAIECAIEAAoECAoEAAIMCAIMAAoMCAoMBAIEDAIEBAoEDAoEBAIMDAIMBAoMDAoMAAYECAYEAA4ECA4EAAYMCAYMAA4MCA4MBAYEDAYEBA4EDA4EBAYMDAYMBA4MDA4MAgIACgIAAgoACgoAAgIICgIIAgoICgoIBgIADgIABgoADgoABgIIDgIIBgoIDgoIAgYACgYAAg4ACg4AAgYICgYIAg4ICg4IBgYADgYABg4ADg4ABgYIDgYIBg4IDg4IAgIECgIEAgoECgoEAgIMCgIMAgoMCgoMBgIEDgIEBgoEDgoEBgIMDgIMBgoMDgoMAgYECgYEAg4ECg4EAgYMCgYMAg4MCg4MBgYEDgYEBg4EDg4EBgYMDgYMBg4MDg4MCa7rFGAAA+R0lEQVR4nO2d225cua5FjQMEhgN07///29NVdrlu6yKJpCYljfG0sztei5wkRxzn9vEBAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADwzf/dUBcCAHDE/+2jLg0A4IEDW6EuAEhEqa1wFgCIqdMVzgIAGQ26QloAoKBZVzgLALpisxXKAoBuOOgKZQFAF5x8hbEAIBo3XWEsAAjG01cYCwAC8dUVxgKAONx9hbG6QNCwIgG+4o4CIXRYmQhfcTwREDxAjK84HG/IHiDOV1yNK4QP8BHpK47GD9IHuOItKU7GH/IH+MbTT1xMCAwAqph5+mYlcS/BMIIqlm5+e1vUNXlivQbOJRhmUMza3V9YYAXOt/2fC013MldSEtpzX2sKq/d/YYkITjf9nwdKr2PGoDS0Jb7cFIigKAN1iR4cd/jPFmXbMVdMGmqTXnUOhLBM+4fdbeqqQV7qJgeldAnLUXcUw+IprNX8QWsFuirUlrrJMalbxDLUPUWwcg7L9b7fVoWuzpWlbnNEWnaxAHVb7iwcxIKd7/VUqaszY6nbHJDGZTxH3Zgz6yaxYuN7HdX76lhZ6j7Ho30bz1H35sm6SSzZ93Y7Tbo6Vpa60dGwbOM56u7cWDiKJfve7qXZVwfKUnc6FsZlPEfdoBPrZkHbdyy+wlgu2NfxDHWHLqwbxppdb/dt89WusdStjoTHOp6ibtLOwlms2XWIr/gUy4zLNp6jbtPKwlms2fVm32Zf8SmWGad1PEXdp42Fs1i07a3GHXyFsYz4reMZ6k5NrJvFom2H+QphmfBcxzPUvRpYN4tF2/5AWDnx3ccT1M02s24Wi7Z94b14H2FhLAPe+3iCut1GFo5i1b4/Nlp38hXCasd9Hc9QN9zEwlGs2vdH4CdYCKudgH08Qd1xC+smsWzjH4GfYO0ZS93wAITs4wnqnusp62v/R86Bk6ie7iyNfwiENUguSmL28QR107WU9HS8iMMm0TDdORq/8Fq0n68QViNR+3iCuu1Kzhs63cRBg2gb7wydX3itGWGpidvHY9R913Hez8YqFrpL3dsxhhEP3vmV15IdhcUXsVqI3Mdj1J1XcdrNxiYe/0A6ShLGKQ/c+ZWXgj19hbAaCF3HE9S9V3DezL6wCpSlbu8A44zHbfyHl4IRlpjYfTxB3Xw5p61sbOLpZo6QhHnGozb+w2u9rsLa3gt1y6mJ3sdj1N2Xc9bJ1iIirALU/Z3xWm8HYaXPREj0Op6h7r+Usz42F/F8M/MnYR/xoI3feKnW11cIq5bwdTxB3X8pJ21sL2LBaqZPwmPIQzZ+46VaZ2Hxc8I64tfxDHUCZZw0sbOHEwjLY8Rjdn7jpVhvYW3uhbrnvHRYxxPUCZRx3MPeGlYJK2cSHiMes/MbL7W6C2trMdQ9p6XHNp6hzqCIww52t/B0MdMH4TPiETv/5blUf18hrArCV7HgTkeYzmmTCMuEus0jnisNENbGYqh7zkrcBhaNZZzp1PY6i7Aqp96Mus8jniv11xXCKidi92rmMsx4Ghre+E8nyamb3KB+/G2o+zziuVIvSR3vhbrnrDiuXNtgRhlPQ+Mb/+0sQ3WXbzTsQSPqTg94LtRkpuLDUPecFJ9dM8xllPm0dL/xH8+SVLf5Rv0ytKLu9ICnOtuEVH0Y6p6TYtsxj8EMMp+WFLb+40mi6jbfaFmLNtSdHvBUZ8PStxyGuuecGPbLazCDzKcliK3/eJKqus03GrfjNZuS76xudZ/tnnwZJw0l5kW0D2aQATVFsfv733dRt/lK64JsBXP2Aepe9ykeuIVRwlDitYqGwQwyobYoqn5rR8oUGjdkJ5exer9TPnEDo4ShxHEXWwczyITastj//aR7qPt8pWlDWn5fS8bmf6kYeTsvCap7TonrLrZNJv+2XvEM4yhedZ+vuK/HSM3/Yp955Vpcvq1uOiG12+g1rLG29YpnHkcJq/t8xX8bBmr+F/8TOA7m+m110wk52ka3BT0ezQjr+uHrq6GMFbAMw/R+J+IIDoL5+ba663wcHY7fgh5MZox1/fAW1jifZYRswyC9PxB0Bzu53L6t7jodB2fjuqD7oxljXT92ovIOIGEKMeswRu8PhB3CViy/31Z3nY79q/Hdz93RDLKuH9tR+SeQL4WYfRij9wcCT+E9lt9vq7tOx+7ROO/n/mzGWNcPhOW7D2P0/kDoLbzkcv+muut07B6N73rujmaUdf3YjCoggXwpxCzEGL0/EHwN26ibzseuo3zXc4uh1vVjK6qIBPKlELQRQ/T+SPA5bKLuOR9Hnoqez1DrGiGsMY42aCWG6P2R+IN4R91zPk5kFTqfodZ1I6qQBPKlELQSQ/T+SPxBvKPuOR9ntur023rzr2uIsIb43WhBK4GwClD3nA+hrwYTVkgyIxwtwvpGICx1ywkR+mq0v3UfYfkuxQi9P9HdVwjrHaWvENYYn2YgrG8QVgKUvhpbWFER5IsBYX2DsBKAsIpZ9moR1jfdhaVuOCNKXyGsMa42ai8GaP0ZhKVHKqyx/iG2mGwGuFqE9UOvs7ih7jcjCKsYhNVBWOpeD+ksLHW7KcklLHUahwRlk/9qwxYjf+vPICw9CKuYoGjyXy3C+gFh6UFYxURFk/9qEdY3CEuPVlivK6tO4xCE5d59/tafQVhyUglLHcYxCMu/+/StP9NVWOpmkyL11bDCCgwhYQ79hKXu9ASEped7AgjrnG7CUjf6TtxmpG/9CXyVgIcBdPfVUMIK+5fpXm2g7vOdwB/KhtqAj36+SrgFSdhdnx5TGW1bY7J5kYG6zw0Q1jf8Nnc9e/vTZSqDbWtQNi8PVve5QZywxloBz8aPUXeal5396TOWobY1zuXPj1X3uUWcsP4ZaAMQVgK216fTWEYTVmAO92+o+9wkTlj/jLMB/Yyl7jMxm+vTay4j+arfFzDUjW4S5qt/ftdA3WIB7q1vo24zM51GsM1QwvpYWljR//TxEBvQ6VrUXaamzwh2GEtYvVJR97lDrLDy9v1IVOuDxSClxwz2QFhbqPvcA1912AF1g/kJH8EBQ/lqeWEF/05vdXdFhHU/UghaokdwBMLaQt3nPggrrPuBMlATPIMjhvIVv0QU+yul6t7KiOt/nAzExI7gEIS1gbrNQxZt+05cAONkICZ0BMcM5SuE9YGwEJae0BEcg7A2ULd5yKJt34kLYJwM1ITO4IixfIWwLiza9p2wAMaJQE3gCE4YylcI68KaXT8QFcBIGagJnMExQ/mKP5dxZdG27wQFMFQGYuJGcMZIvuqUk7rJM9bs+pGYBAYLQUvYCM4YylcI68qaXT9DBlpC8i9iJF8hrCtLNv0KIWiJyH++IRHIhTW7foMQlPinP+OQCOTKkk2/QwhKvNOfckoEcmXJpt8hBCXe6c85JfK4smTT75CCEuf055wSeVxZsecNSEGJb/qTTok8rqzY8xbEoMQ1/VmnRB5XVux5A1KQ4hn/rGMijisr9rwFMShxTH/eMRHHlRV73oIUlLilP/OcCOPCml2/QwpK3NKfeU5kcWXRtt8gBSVe6U89J6K4smjbb/ik8Ffdxpj4hF+DuuMWSOLCqn2/4RHCX4TVhEf2dag7boEkrqza9ysOGfxFWI04hF+HuuEmCOLCqn2/Yc7g71+M1YrDClah7rcNcriwbOOvWDNAWAY8lrACdbuNkMMHwvrlry2Cv38xVjsuW1iOut1GiOHCup0/859tDAn8/YuxTHisYTHqZlshhQ+EdeOqm9YA/iIsIx5rWIq612aI4cK6nT/y45u2AP7+xVhW7GtYjLrVdkjhA2F989dgrL8IywH7Hpai7rQdUriwcOt37sap7/9JWH/UnYyKeQ2LUXdqgBA+ENZ//Pnzt91YfxGWB+Y1LEbdqQVC+LCGMMPPgp6EVWmsvy/CwliN2NawAnWjFsjgwxjCDF+2+fMsrCpj/X0TFsZqw7SGNagbNUEEdmGNbqw/r8KqMRbC8sL4W3eLUfdpgww+TCHM8Ctj78Iqv51XX12EhbGaaPjyYRPqPo0QgSWDvxMI649BWG++QljN1P5kvBV1n0aIwJDB7UrVDZjYElbZ5bzr6ltYGKuF2p+MN6Ju0wwRtGfwK6zPT3ULzfzZFFbJ5ez6CmO1gLAKIYLWCH7P9PMJdTdV/Bim3lhbuvoVFsaqp+JTWwvqNu2Y/5S+ugEH2pvfENZI3vqzK6zjtdjWFcIyUP6prQl1m0a+V7W1+b9TCOuql9bm94X17azc3vpzIKyDtdjT1V1YGKuaoh8ozKi7tHJb1abm/04grF+5NDZ/KKzsn2n9ORbWzlbs6+rvwwPVvQ3H+Y8THqi7NHJf1YbebwGrm2jn0SuNzZ8LK7GzzoS1sRcHtnoWFsqq5OzHCR/UXRp5WtXK1h+2VN1GGy9WaWu+SFhJnfWnQFh1/MFYzTzEWHmJFaibtPK8qlWtP22puo8GXo1S1//LoRYYK5+znu3i76sx90LGY5B1FqpA3aSRt01tOtgRN/NdJ1X9v15qkbCSKevFLSHCGnAzZDwl2WSjc9Q9Wnlf1ZaDHW8tt2RSE8DbpRYKK5OyXsUSJazhlkPFc5TtUjpC3aORrVVtONjRlnJbJTUBvJ9qsbGyKOtdK2G+Gmw9VLyEaRLTDuoWrWxuav29DrWRuyIpD2DrVsuFlcJYW04JFdZAG6LiNU27n15Rd2hlb1Or73WcfdzXiKX/SwQVxtIra1Mo7aJ6XAOk1cp7oF6i+kHdoJndTa0910GW8VAixYuyd63DGGvXJvWK2loDnNXIRqL46pHDRa261hFW8UQhxV/I273WGmEJlXXkknI3HWwB0mpkK1N89cDJnlYca/5PsU4FUviFvINzrROWSFnHHjl10iEVwkq+LRo2Q0VYvxTsadmp3pdV3dIeBfoo+kLecQT5jXUqkeMhn1AprMwLI2E7VXz1g9uePiyruqdNiuRR8NernEZQK6zOyioRiH0FkFYze8F66GrgP+/7g9uePm2ruqt3yszhEcJHZmOV2sO+AlirkYNozboa+S8o+MZvT5+2Vd3WK4XecMngv0zrjdXFWRXiMG8A1mrlMF2jroYXlt+evuyrurEnSp3hk8FHo7GClVUpDeP8zcSGkZmTgC22+ju6sNzWdGth1c39UC4MpwwuL20zVpy06oVhHr+doCyyc5qxQVdjC8txT3c2Vt1hja4+3SL4aP4cK8ZabbqwT98D5yhGoCDmdl+NLCy/NT1YWWmHdZpwi+AarkVYjtIyuMI8eyeckhiGoqgbdTWwsPzW9GRpVQ3WGsItge+dMBvLQVo2UdhH74V5F4aiMO2lfOW6p2dbK+mwWg5uAdyWQmwsB0+YJ++Iy1KMQXHg9boaU1i+a1qytr07bHGDWwCewmpVlpMlzIN3xW8/clORea2uBhSW95oW7m3PFpvE4Nb/fSlUxvKQQ3UUji/dx3VN0lK3bHW+Gk1YRWtRF1jx5nZqsdELbv0/LIWLsCqNZVRC6yr4vnYX72VJSe26VehqLGEVr0VtYqWr26HHViu4tf+0FN2N1SoC8yq4v3gP/5XJR+PanbnqeTfTU7EUjYF9o1u4T4Mg3Np/WgofYRUbq/78vXYh4s17hOxOKswLWLCa6anaCUMiso0zKcGr+9e16Gmsqgk7r0LMu3eI2R8598bsC3i+mcmp3Yn2RDRbZxSCV/Pva+HiqxJj1U64GM+hO+G7O0l4aMy+gAWbmZiWnWhORLB3dh9cinHofWMt7LVdCZlxIb5Dd8Frc/Lw1JjLvwx5tplZad2J1kR6b56HDn4Ksba+uRYe9X2eGat1yIU4D90B+94k4rUxn3/K9mwxc2LYibZE+q6ekw2eyvDYFv8SD41lmHIxrkO3Y1qbTGz25bCCp4uZEONONCXSc/m8XBAqrA7GMs65Br+hW2ldmlTstuWwg6eLmQyHnWhJpO1NDf15eeDz+0vuD8E5L4ZXlZGTrsJr6EYadiYZB1157ODJXmbCayfqA2l+VW2LXhq48Pz+rMLaM5bXsCtwm7qF2o3JxnFXHkt4vJdp8FyK2jwMr6rq0U0CV55f774YbnWGj7scr6lbqD+NPJw35bKGh3uZA+elqIvD9q7yJt0UcOXl5e6L4VZoh3kX4zf2ZtoOJAMlPbms4SvZQgvYiro4jBS26WaAb15e7bIZMfV2mXghnnNvo/1OpJT25LKHL6TKLGgtKtJwoKRRt/v/5vXFLpsRVHCnmZfhOfcmjAcjobwnlz08WkspgWtRmIXT285b9Tv/b15fG7AZfsV2m3oJroNvwHw1vanryWUR99dSmWPYSlwpisLvdSe9+h3/N2/vdN+MMGH5Zd6I0lYXPG6nH7UtuSzi7lrqkgxYhFdOg/B93VG3frf/zfv7vDfDt+rOk8+Ny/10oqEll03cWUtZmp7z3+ckB+/X7bfrd/k/vL/NeTO8y+49+sx4HVE8bS25rOL2VqoS9Rn8OYc5BLxvp13Hw/9m41Xeq+Fdd+/RZ8bzlAJp7chlFbe3UhOpfeKlHMUQ8sLNfj3v/putFzmvhnfh/YefF89jisLSkscq7mylIlXLqGvZTyHmfVv9ep79D1HCenmua839h58X95vyxtiRxy7eKS8sRRQ29k8zhveGXa/+h433uPxZwpcGfItWjD8pEYfliL0jj2X8paa0DFmY2L/MIF4b9j36H7aE5WGslwZ8i1aMPynuZ+WJS0cOy/hLZXX6MEzsHmYUzw373vyNIGG91u9cNb76xfWoXPFqyL6Nv1TXJ0/DxN5hhvHYsPPJ33ifyXajlbzW7102wrrhd1K+eHZk38cfGiqUx2Fh5y7DeGjY++JvvE1ku9FK3uoPqBuuOB2UL94tWffxRluR8kAMbN5lHPeO3S/+9/K3ojXuxXv9/mXDDw7n5ExAS8aFvNFapjyRdrbOMpBbx+4Hf7/8rWiNe/FWPsKKw3pMzgS1ZNzIHwyVykNp5f0qI/lp2P3eH9iM1rQWb+UH/YltuGC6JG8Ce7LK6oKpVnkqzfTz1S0l73t/YitaH1/FfYbYKf0haD8kb4Jb8hVWv6TdYhmBa8fe5/7MZrY+vkJYHWg8I2969CQWVlvWrsHk59Ky97m/HP9mtghrGFquyJtOLTkKq7FkdTLp8T71rePfzNbDV1HC6pj/AFTfkDv9epILqzpuz1RGwPnUN49/O1uENQiVF+RN56bcjBVQW7d0MuN861vHv5Ot3VcIqws1B+SNoCm5sC61lYbulscwON/65u1vZ+snrJCi4Ua1ZdzQdCUX1n/FFabulMVIeB/71u3vhJtVWB1SH4oW1Xgg60ovrJMC+ySUE+dj3zz+vXStvkJYfWjSjRlhVx7C6mEshxTGw/nat45/N12ENQRNvjGibatVWI/PDC2wV0YJcb72rePfjzejrxDWK83WaUbeVgphnQXv8IYB8T73jds/iDehsELTHpNm8TSibyuHsE6C93jDgDjf+8bxH8Vr8NXvcyNqhgcs8qknRVt2YYUby+P5I+J77lu3f5SvwVcIqxcW/VSToy+ElRbfc9+6/cN8rcLy7iAo5bGxKaiKLG01+cpdWEclujx/QFzPffP2j/Nt9tWvsFxbCEh4fGwOqiFNY/W+en1gcIEujx8Sz3PfvP2ThBFWesweKiVRZ02+chfWfoE+jx8Rz3PfvP2zhBt9hbD6YVdREZk6a/JVR2P5PH1EPM998/TPErYIy7sF32inwUVHZ+TqrM1X/sLaqdDp4SPid+07t3+acZuvEFY/fIx0TLbWmnwVIKztCr0ePiJ+5759++cZNwnr4ZkBRcMTPk46Il1rTb56fFxogV4PHxG/c9++/YKMG3z1KCy3FvxCnQwvLe2Tr7Uswtqs0O3hA+J17bu3XxByva8QVk+8tLRLxuZafBXxc8KNAt0ePSRe5753+wUp1/sKYfXEz0zb5OyuwVcIqwNe5753+yUp24Tl1IJHmJPi56YtkjbX4KsQYb0X6PfoEfG59v3jL0q5yVcIqxd+ctoia3f1vkJYPfA5993jL0u5zlevf+DHu2Z4xk9OG6ibsxjr4EmB9Tk+ekRcrn3/9gtTrvJVhLBMGU6Oo57eUPf2xyCsoycFFuj56AHxuPaD4y9N2fAJlksPhgTnx9NQCY9vr7haXyGsLjhc+9Hxl8bc/gkWworGU1E5b2+7vCTCeq7O9ckj4nDtR7dfnHOlrxBWP5w1lfH4Nsur9FWYsPz/rq2Rcbj2o9svzhlhpcVdVAlvb7NAhJUQh2s/vP3inOt85SuspuSWwd1UGU9vs8QqX8UJK/DJ42G/9uPjL8+5yFcIS4C/rPJd3naRNb7qIiznB4+I/dyPb7846CJfISwBAbpKeHoVwjp9QFht3g8eEPu5H99+RdAFvkJYApbw1Z/NPpMIq/xrwfNjP/fj268KuthXrsKqSGtN1vBVsbHOPzqqNP/njoeDpI5vvy7pY18hLAkLC2vDWCUfHVOb/1MHxMFSx7fvOsCthYgoGh5ZxFeFxir54J5FL4aDpY5vH2GNzyK+KvvNDSUf27XqtXCw1PHppxeWV20Ts4qwSj7FKvrQnjUvhoenjm8fYQ3PKr4qMVbRR/YseTE8PHV8+54T3NqHkKLhiVV8dW6ssg/sWPBqeHjq+PQR1vgs46tTY5V9XMd6V8NDVMen7y8s1w6cKpubhYR1/LuxCj+sX7XL4WKqw9tHWOOzkK+OP8Uq/LB+1S6Hi6kOT99zhAhLw0K+OjRW6Ud1q3VBXFx1dPruwnJtwKeu6VldWB8IKws+sjq4fYQ1AQv5aq/X4z80hrB64SOrg9P3nCHCErGSsPaarfigPnWuiY+tDk7fc4juwvIoagWWEtZOtxUf06fMNXHS1bmwHKa4+ZyQouGZpXzVZKwxGx0QJ131E5ZrA/aaFmEtYW23W/4RnapcEiddHdx+sLAsHdhLWoW1fNVgrGE7HY3xhWVowV7SKqwmrGpjjdvpYPQTlnmMe09BWPGs5qtqY43c6lhE+ypeWO0tWCtaiNV8ZTJWvyIXZBhh7T8FYcWznrC2ey76zh1rXI9wYXl9EevgKfgqnAV9Vfe7G4bvdhRmEFZjE8aClmJFYVUZa/xuB2EUYR0+BGGFs6Cvqow1Q7tDEC8snz9OiLC0LCmsCmNN0e4QjCGsk1VAWNEs6as/Fb9YOEm/+UFYUMCqwio21iz9pifcVx7GOlsFhBXOor4qNtY8DSdnWWEZylmRZYVVaKyJGs5NB2HZ/9rR01VAWNEs66tCY03VcWaGEpZnF4ZyVmRhYVUbS1HjMowgrPNNQFjRLOyrMmNN1nJaEBYUsbKwSow1W8tZQVhQxMq+KjHWfD3npIOvdv/uvVLOFwFfhbO2sM6NNWHPKRlIWK5dtFezKGv76txYUzadEIQFRSCs4/6mbDohCAvKWNtXZ8aatet09PCVj7B822ivZlVWF9axsabtOhv9hNU+xwBhNdeyLqv76tBYM7edC4QFZSCsfWPN3XYqhhGWbxvNtSzM8r4qjUBd5dx08BXCmgKEVRqBusypQVhQBsLiU6wE9BNW6xyLPhhhhYOv/hSGoC5yahAWFIKwEJaeDr7KJ6zGShYHYf0pC0Fd49T0EJbNWAgrCQjrwrqd52AQYfm20VbI6iCsb9btPAM9fGX7558RVhYQ1jfrdp6A9MIq/EiEFQ/C+mHdzvUgLCgFYd1Yt3M5XYXVMkqElQaEdWPdzuX08BXCmgOE9cu6navJLqziD0RY4SCsO+t2LqaLsAxfxEJYiUBYd9btXEtyYZV/IL6KB2E9sm7nSvoKq3qWCCsTCOuJdTsX0sVXzZ9i1ewAwgoHYT2zbudC+gir8U/nIKxUIKwX1u1cxxDC8m2ksgi4gbBeWbdzGZ2E1fbXjiKsVCCsN9btXEUnX8ULq7yTyiLgBsJ6Z93ORWQWVt0KIKxoENY763YuopewWv795xhh1dUAdxDWBss2LqKvsOqmWbcCCCsahLXBso2LQFhQCsLaYtnGNfTylUFYzp1U1g2/IKxNlm1cAsKCUhDWJss2LqGbsBqMhbBygbC2WbZxBfmF5d1Jbd1wA2HtsGzjAhAWlIKw9li28f5081XDHyes+/74KhyEtcOyjfcnsbAqvz/CCgdh7bBs4/3pLqzygSKsbCCsPZZtvDvdfIWwxgdh7bFs493pL6ziiYb4CmEZQFi7LNt4b/IKK+YTLIRlAGHtsmzjvVlNWPV1wy8Ia59lG+9MN1/VTrR2/AgrHoS1z7KNd2YWYQUWDr8grH2Wbbwv/YRVZ6za8SOsHiCsfZZtvC9JhVW99/iqBwjrgHU77wnCgnIQ1gHrdt6Tfr6qkVD93iOsLiCsfdbtvCcKYZ2PFGElBWHts3Dr/WjzVT9heTfSVDj8grD2Wbj1fmiEdTbShrVHWF1AWPus3Hs3FhNWU91wB2EdsHTznejoq/e/SHZvv1vWHmF1AWEdsHTznegorPIdb9l7hNUHhLXP0s13IpWw3j/v8v4nc5oKhwcQ1j5rd9+HZMJq3nuE1QmEtc/a3XehzVe23+ceYix81QmEtc/i7fegn68Q1hwgrH0Wb78H/YTV6iuElQuEtc/i7fcAYUEdCGuf1fvvQDdftQurbPII6wynE0JY+6zefwcQ1jIgrHCWDyCcJl8hrBHxuiCEtc/yAYTTz1cIS4zbBSGsXZYPIJzFhNVW+RwgrHiWDyCcfr5CWFr8Lghh7bJ8AOEgrFVAWB1YPoBw+vkKYWnxOyGEtcvyAYQzj7CKWmmtfQIQVg+WDyCaEXyFsBxwPCGEtc/yAQRT7yuENSaeJ4Swdlk+gGAQ1iogrC4sH0Aw/XyVQljNxY8PwurC8gEEM5OwCpppLn58EFYXlg8gGIS1CgirCycBrBqLG/18hbC0IKwuHAWwdjI+TCWs827aqx8d30tBWLvsBUA0LiCsRUBYndhOgGycGMJXCMsOwurEVgJk4wbCWgTfQ0FYuzwnQDjOdPNVF2GdtmOof3B878R5bBPhv9PwCMJaBN874SZ3CNhpeARhLYLvoXCT20TsNDwymbBO+jGUPzjOh8JNbhG00/BAN18hLCnOl8JNPhC90/BAN2G1TbV+uAhrE+dT4SbvxO80/FLnK4Q1LM6nwk3e6LHT8AvCWgTnU+Em/xhXevZwgkBYi+B8K2vfpGmXZw8nlvmEddiSof7Bcb6VlW/StMmzhxMNwloE51tZ+CZNizx9OtF08xXC0uJ8KwufpGmRp08nmgmFddCTpf7Bcb6VdU/StMfTpxMOwloE51tZ9yRNezx9OuEgrEVwvpVlT9K0xtOnE083XyEsLb63suxJmrZ4+nQ6MKOw9psyNTA2vsey7Ematnj+eOIpt9X//me8936TRVjv+N7Kqhdp2OEV4ulAoau+Mb6r22QR1ju+t7LqRbav8BLxdKBCV3Zj9fqLOBDWBq63supFtu3vMvF0oMpXdmFdiZ8rwnrH9VZWvcja1V0snniqdOUlrA2cx4qw3nG9lVUvstFMy+QTTpWuIo3lC8J6B2E5gLDEVPpqcGGpy5KCsDxAWFoqfTWKsRDWOwjLA4SlpVJXoxgLYb3jeinLHiTC0lLtqzGUha/eQVge+AprwoCCafDVEMpCWG8gLBcQlpQmX41qLHVNWhCWCwhLSpuvBjDWv98grF8Qlg8IS0mjr9Ib699f8NUPCMsHV2FNmVAkrb7Kbax/n0BYVxCWDwhLSbOvEhvr3zcQ1h+E5QbCEtLuq6zGetfVj7HUhakR+GrOc0RYQgy+SmmsTV19G0tdmhqE5YSrsOaMKAyTr/IZa09XF2Gpa5ODsLxAWDJsvsomrH1f/WcsdXFyEJYXCEuGzVe5jHWkq3//VVcnx/VK1r5GhCVjImEd+wpheV7J6teIsFQYfZXJWAjrGITlCMYSYfVVHmGd+AphISxHEJYG8ydYaYR15iuEhbAcQVgaENY6ICxPEJaEZb6ChbA8hcUxIiwN0/wSIcI6BWF54iiseUPyxyAsdemvIKwTEJYrCEtBq7DUdW+AsE5AWK4gLAGtX3NPGTHCOgZhuYKwBDT/ImHCjPkM6wSE5YqjsCZOyZnPZmPl+0khwjoBYfmCsPrzKqyRjYWwTkBYviCs/liElcxYZ75CWAjLF4TVnTdfjWusU18hLL8T4RQvOApr5pg82RDWoD8pxFfnICxfEFZ3jMLKY6xzXyEshOUNwurNlrBGNFaBrxAWwvLGUVhT5+THprDGM1aJr+qF9fUVUKoSha+mPkSE1ZltXyGsK19fsxkLYXmDsfqyI6zBjFWkq2phfX1NZyyE5Q3C6suesKqMJc87RFhfN2JqluB1HtzhDS9ZTR+UE57CkkUe+QnWXMbyug7u8IaPqhYIyoldYdUYSzyEQl+1foI1k7K8BsMd/tJipiWD8iFcWB0m0kFYsyjLaRbc4Z26LBYOyod9YVX8zVjayZT6qk5YX++4Vq3BaQTc4Z367V8zJyc0wiocUNkHdRPWf8oafamcroNDvNO6/YvF5MWBsIqN5T+rmu8d46ttYX2NvldO52Gb7mRUb/uSKXlxJKxSYzlNrHG4HT/Bugpr6N1yuo/2aU2I/0rDPnZh+cyrabxXhwiENe56+dxH07CmxXGh4ZRDYZUYy2Vc9Tw4RCGsUTfMp/zKWTnVnhWPdVb3MA7Hwjo3lse0GniUiEZYYy6ZT/GVw3KqPSv2bVZ3MBJGYdmH1cZXvbGqYtnx1ddLGUFDCcOp8Mph+dSeF9sqq6sfjBNhnRjLNioDzxqRCWuwdfMqu3JYPsXnpXmL1YWPyJmwDo3VPCkzLx7RCWukrXMrunJYPtXnpWmD1UWPikVYTYNy4U0kQmF9jLJ8fhVXTsun/LzU76+64oE5Fda+sern5MWGSfr4akdYHyOsoF+1tePyqT8x5NGPc2HtGat2bf3YMolaWB/Z99Cx1Np5+TSQGNLoR4GwNo1Vu7SObKtEL6yPxMvoWmftwHxaSAxJ9KNEWJ/nqiq5Zid2VNLFV+ctBo3JiG+VtRPz6SExxNCNIl89GmvzbMvv2c6uS3II6yPjYjqXWDsynyYSQwbdKBTW55OvXg63+qIN7Kskj7AunL6i8Lt50FJe5fOMYYzO8gH0o1RYnz8/G3y73eaTbuLAV8fGqkvFLqyP4y0t/G4eVNfW9sTWJKZg8fZ7Uiysz89r+IfGCDbW6ZsfBPUqsKpQHJvbenzJ93Fhv17TY6sH59ROYlbuvS8BwgpSVtGbf2X1IrC6UNxbuz/69Dv4cFax6eGG9mdl3c47U+GrCmH5G6vstW9kEVYBTgPdiMv9bareErNo2/0JEpbrXTfK6sqPrz6rQunV2CuWQR4EFvmq+MbGYMWeJUQJy+28KwW1zecYwrpSO8HzyDzecvxG757GY72ONdT46nPrGM6oX+5Hat+2i5+wXPoq417Qxv+3+1FlxVqWxtLJrKzWr4pwYbVedvV7TqgyVmxrsRRWalma+qIsbxuExdpVES+surtueHwZEcKq7K0LpWValqa+KsvbRmGpZmV8fMQLq/iqW55dSo2wqh5cf72RFFdp2plqDG8DeOCjxlgHB2G+6sYHl1MRSu2jGy44iPIabTtTi+FtAHeu29RBWEenbXxkKRWptDy+4YoDKC/QuDOVtL8N4IHrNnUUlpCKVJrfYdSNnfLabCtTS/PbAB743qYKYTWfsp6KWEzvsRnHSHldxp2ppPltAA/8rBPCesbjdSbvtFJRkXFlKml9G8ADt3UqFpbHJcsoz8XphRb1tFFRjnVn6mh9G8Cd33VCWM+4vdLgnhZqirHuTB2NbwO487BPCOsJx5e2uqeBulLMO1NF29sA7jzuU6GwHA9ZQHk0nm9tPfESDJU4LE0Nja8D+OVpoRDWI66vbb3xU0yFOOxMDW2vA/jleaEQ1gPub26+83e8qnDYmRqaXgdw53mhENYDES9vv/UnvEpwWJkqmt4HcOdloxDWnZC3G679jlsFDhtTRcv7AB542agFhFVsrJi3W+79imMFDgtTR8sLAe68rRTC+iXq/ZaLd32/y8JU0fJGgDtvK4WwfgkroPXcvV/vsC6V1L8R4JH3nUJYN+IqaDj1gLd7bEsl9a8EeOR9p+YXVqmx4gqovfOQ13ssSy317wR4ZGOpENY3kRUIbXV7u8uuICzoC8LaJbQEna2+WoXl4CuEBUa2tgphXQkuQiCqx1e7bArCgs4sKawyY/UoRGCr79f6bArCgt4grD061tPVVpf3uewJwoL+IKxd1FXGgbBgVJYUVrpPsTrjsiYICwQgrF3URcbhsSUICyQ0GEt9b3aKklEXGYfHkiAskICw9lAXGYfDjiAsEIGwdlAXGYd9RRAWyEBY26iLDMO8IAgrG//+q66gJwhrE3WRYVj3A2Gl4t9v1GV0ZD1hfZbEoi4yDON6IKw8/HtHXUpHENYm6iKjMG4HwsrBv6+oC+oHwtpEXWQUxu1AWHreZIWwENaswjIuB8JSsmkqhIWwLqiLDMK4HAhLxZGs1hbWmbHUJ2enSFhzGsu6GwhLw5muFhLW+14hrCvqKkMwLwfCUnDuK4Q1s7DW/TmhdTcQloICXyEshDUh5t1AWAoQ1gMIawd1lRGYdwNhKcBXD2wsFsK6oK4yAvNqICwBfIJ1Z/NfQjgWVpe/dzyUz2WNVbccCCsHCOvGZYfXE9YnwioDYeUAX125LTHCOgloHio3BGGlgE+wLty3eGOz5hfWWsZq3GuElQKE9efpFhFWQUjj0r7Y3r5CWG3gq+dLRFhFMQ2JabcRVg5WF9bbVlcaax1hDW4s63IjrBysLayNxV5SWLN/imVfb3dfIawmlvbV5m4jrKq4RsBhwf19hbCaWFhYO9u9tVsI6yCw7LisOMJKwrrC2ttvhFWbWG4KNvx8yQN8hbCaWNVXBxteKazRjfVZbCx1pU2U+epk0SN8hbCaWFJYxyu+tVwI6zS2lJTr6mjTQ3yFsJpYT1inS46wWpNLR5Wvdnc9xlcIqwXLjzsjUrLlm9s1vbAKjKWutJpaX+1sO8LKw1LCKlxzhGXLLw8NwtrY9yBfIawW1vFV+ZojLHOEOWjy1fvGI6xErCKsmj1HWA4hpqDRWC87j7ASsYKwatd8e72mFdb2V93/98PDt9YR1tPWR/kKYTVQ/aPNcLTs+ZrC+vyV1B7qQuvxMBbCSsTkvmpc80WFdearAY3VLqzfzQ/zFcJqYGZhta/59n5NK6yvUl8tZqzr7sf5CmE1MK2wTFu+mrC+5hWW2VgIKxVTCsu85DsLNrewSnw1orFMygrUFcJqoOwHmZFwWXGENZOwDMoK9RXCqmcqYfkt+N6GIawxhdWsLISVDBdfPS5GeMWnFXiwnLC+ioW1lrEQVjIchLW1HR0qP3q9md0Vm1dYHx9zC6vNWAgrGXZh7W9IcOmRy72/Y7MK66NYWCsZK9ZXCKuagpmdPKFoVVxrDl7rKwgLYSGshDQJ67K0t//dtjrVdfpu7jkHSzansD4QFsIagKKZPX7AfW0v31KvYBhHWzarsEp9tZCxgn2FsGopHNrtuz+v7by6OhbWm7IQ1iAgrNEpHtrlO8+ytiWcLNpswvpAWAhrBCqGNs/alnC2abMJa3vCswmr1liRrrqivv/RqJjaTGtbwOmqTSWs3REjrFjUAhiN8qnNtLUlnO8awhoQhDU0xVOba2tLKFi2hYU17OyT+QphFfITVenY5lraIkq2DWGNB8IakVtcRVObb2lLKNm2aYT10w/CQlgpuaVVNrXpdraIkm2bRVi3fhBWb18hrBJuYRWObbqdLaJg2ab5GtatIYSFsEQc5HEPC2EdcL5r0/wq4W9HCwirxlcIqw/HqTz83zZfDbuzZZzvGsIakGS+Qlh7/8LHxn8uHNtsK1vK2ao9/cZRdbEWfjuq8NWw059cWJfROD8ylKqsENYhJ+l9IqwBqfFVF2F5Kus2G78nhlIbVPnYplrZYo7T+0RYI5LPV9+Yr/9pOOl/otkSUc3YZlrZYg7T+0RYQ5JVWN80nv/7dIwPDMAeDsI64Ti+eYR172l+YdX4SiCsj2rF7I2n9XkhuARTNbWJVraY0wQfhaUu1sK9I4Ql99WVMg0cjqf2YWF4ZVI3tYlWtpiSFGcQ1kM7NcIacvw1vhIK6+NYM0XjKXxWIK55VE5tmo2toDDJ0YX12MvswqrylVZY25qpGM/ps4Zx1YXaoc2ysRXUBaqutp3HLiYX1lC+ulqmaiBHxvp53oCqulI9tVlWtoK6RNXVNvPcxtTCqvOVXliVP4CcCuuFUWT10eArhHWKutxWnruYWViVvhpeWKfGeiCpqH5oGNocK1tFZajqcht56WJiYQ3oq37C2iGBrD6qfIWwilGX28hLF/MKq9ZXCOsXgap+hnb9301Dm2FlK2mLeDRe25hVWEP6yigsN2N15nFuHwirEEvK4/DWxqTCGtNXwV91z4lpdr9PmWBnKzHnPARvbcwprGpfIaz+OAzv/rDxd7YWt7xT897GjMIa1lcmY6lLL8ZvevdnDr+z1XjHnpKNNiYUVr2v8gir3Vjqwgtxnd/9scMvbTUh0Wdjq43phDW0r5qNpS67BO8BPjx69KWtJyz+RGy2MZuwGnyVSlhtxlIXfU7ACB+ePvjSNhA6gSRs9zGVsVp0lctXTcZSl3xCzBQf3zD00jYRPwU5O31MJKwmXaUTVr2x1AWfEDTIp3eMu7SNdJmDlr0+ZhFWo67S+araWOpyj4ma5fNbRl3aZnpNQsduH3MIq1VX4wtLXe0xYeN8ec+YW9tOv1GcFRLz2KMOZxDWTL6qMpa61GPiBvr2qhG31kDXYRzWEfLYr8mFNZevaoylrvSQwJm+v2y8rbXQexoHdYQ896jB8YU1m6/KjaWu85DAqW6+b7CtNdF/HLt1hDz3sMHRhdXuq7TCKpyJusojQge7886RttaGaCJbdUQ89qTBsYU1o6/KRqIu8oDY2R68eJi1tSGcyWsdEY+dWVhT+qpoJuoSd4ge71fVNvtWkwb9YH7riHjsWX8DC2tSX52PRF3gBuEzvv0fJ3UMsLZWsownaupnvSCsdKCrA85qSb+2VrIMKGru5QMeTVjtxmqZeUeadaWxWd+hn5aTfm2NZBlR1OTLBzyar2b5E4TvtH1yVfBdAug99POK0u+tjSxTipp9+YAXEVbTwPvS9MlVkdR8UQy9pK7ke2sjy6Si5l88X4SVh9pPrj7Kv6cTqpmXVZd7b21kGVfQEpTPdzxhjf939u1SZavn7x5dmnbkpVVmXlsbWYYWtAql4x3RV0P/qxMnVBmoUm/tqOddc67qSqPIMriwpxYxqLCG/Hfpy6jQz+vEgipSz/rKYOUGkGV2MY8t7mhMX1Uaq2nWKord8z4z/2LUY/5lwJKdyTK/kMdWtLSAsJpGraPZV77GUk/4mVHr9iPLFEOeWtPSmMJq+rehJmLvxxmfp6uH+87ItfuQZJKBTy1lTGGVGqttztnZ85XdWOqx7jB6/XaSjDPwqcUM6asyYzWOOTv7vjIpSz3RA2bowUaSkYY8tbqrIYV1bqyv1jFn51BYjc5ST/OYSdow0DLTgDziHlrZ1IDCOjHW9bs0jjk3Z77aUdZuGOIxFlCejbrSKOq3JCaPuIdWNzWesI6M9fM9WuecmQJfvTvrJQ/VxBopzkZdaBityxJTR8hD65sazVdXDmy1tK+elSUckAfF4agLDaN1W2LqiHhmU1PD6eqbHV3NKKxyXT04SzYYL4rjURcaRvPChNQR8czGnsaz1ZVNXU0orEpffUtLNBNHSuNR1xlH88aElBHyUFVPIt5t9TWfsFp8NeBnzG+U5qOuM47mlQkpI+ShopZS0TzmpCCsY9R1xtG+MxFluD4TYT3QPueMtPlqHWGpywykfWkiyoh4pqijZLTPOSGNvkJYE9C+NSFlRDxT01E22gedjlZfIawJaF+bkDIinqnpKBvtg85Gs6/GF1ZpROo6A2nfm5AyIp6p6Sgd7ZPOxcK+QlgIa5Exf00jrHZfIawZMKxORBkRz5Q0lA/DpBNh8BXCmgHD7kSUEfFMSUP5MEw6DRZdTeArvuaeTlieD5U2lBDDqHNg0tUMvkJYCGuVOX8NLyybrlby1dSLbFmhiDIinsmcr1hGLceoqyl8hbC+ZhaWtJ+MWEatxuorhDULli2KKCPgkYp+MmIZtRp0daE0LXWdkVi2KKKKiGcy5yuWUavBVxdK01LXGYlliyKqiHgmc75iGbUafHWhNC11nZGY1iigCrdHIqxXTKNWg6+++F0NF0xbFFCF2yMR1iumUatBV3yCdcW0RQFVuD0SYb1iGrWYt38vckFfIawLxjVyr8LtkQjrFdOoxXy1GUsduS8VYc2LeY+cq/B6IsJ6wzZqKd8NLO4rhHXBY5E8i3B64hfCesU2aS23Hha21RfCuuKzSH5VeD0RYb1im7SWexfr6op/RPWK2yZ5VeH0xJbGvF6dE+OklTy2saquENY3jqvkU4XTExHWK8ZJK3luZEVZXWiMazJcV8mjCqcnIqxXjJNW8trKgrb6QljfOK+SvQqnJyKsV4yTFrLZzlqyumDLaxZClslShc8DEdYb1knrUCeXA/K6kmOXAtLWNZMT46CVqKPLAYFdybFLAWHrmsmJcdBK1NHlgMCu5NilgLB1zeTEOGgh6uSSQGJXcixT6BOZ8wXjoIWok0sCiV3JsUyhT2TOF4yDFqJOLglEdiXHMoU+kTFfMA5aiDq5HBDZNzm2yT9rXS9JsQ5ahjq4JJDZNznWyT9rXS9JsQ5ahjq4JJDZNzn2yT9rWStZMQ9ahTq4JBDaNzn2yT1qXStZMQ9ahTq4JBDalST75B61rpWs2CetQZ1bFkjtSpKNco9a1klaHCYtQZ1bEkjtmyQb5R61rJOsOAxagzq4JBDbN0k2yj1qWSdZcRi0BnVwSSC2b5JslHvUsk6y4jBoDergkkBs3yTZKPeoZZ1kxWHQGtTBJYHYvkmyUe5RyzrJisOgJahzywLBfZNkpdyTlnWSFYdBS1DnlgSC+yHJTrknrWokLR6DVqDOLQkk90OSnXIPWtVIWjwGrUCdWxJI7occO+WftKiRvDjMWYI6tyQQ3Q85dso/aFEjeXGYswR1bkkguh9y7JR/0KJG8uIwZwnq3JJAdD/k2Cn/oEWN5MVhzgrUsWWB8H7IsVT+OYsayYvDnBWoY8sC4f2QY6n8cxY1kheHOStQx5YE0ruRY6n8YxY1khaHMUtQ55YE0ruRY6vccxb1kRfzjEWoc0sC6d3IsVXuOYv6yIt5xiLUuSWB+G7k2Cr3mEV95MU8YxHq3JJAfDdybJV7zKI+8mKesQh1bkkgvxs5tir8gZ36yIt1xCrUuSWB/G6kWCv/mCVtZMY4YBnq3HJAgL+kWCv3lDVtZMY2Xx3q3HJAgL+kWCv3lDVtZMY2Xx3q3HJAgL+kWKsOD+zRRmZM4xWizi0HJPhLirXq8MAebWTGNF4h6txykDzBrmPKsFfuKUu6SI1pukLUueUgeYQ9x5Rir+Kf16OL1FiGq0SdWwqyR9hzTi5bFSAYTVO2NjLTPlot6txSkD3CnoPyWCpzud4PFLWRmfbRalHnloLkGXYdlH2l7OV2eWB8G6lpjkSMOrcUJM+w66DsK2Uvt8sDO/SRmfZIxKiDS0D2DLsOyhyGQ7XeD1T1kRlDJlrUwSUge4ZdB2UOwyGTHs/r0UdmLJlIUQeXgOwhdh2UPQx7ud4PlDWSF1MkUtTJJSB5iH0nZQ/DXG2nB3boJC+2SJSok0tA8hT7TsohDGu1nR7YoZO82CJRok4uAclT7DsqhzCsxXZ6YI9W0mKMRIg6uQTkTrHzqBzCsBbb6YFdesmKNRId6uT05I6x96hc0rAV6/5EaTc5sUeiQp2cntQxdh+VSxqmYvs9sU8/KfGIRIM6OT2ZY+w/Kpc0TMX2e2KvjvLhk4gEdXR6EucoGJVPGhFfcZI3ZSggGz6BSFBHJydzjoJR6eMIeKa+qWQ4BSJBnZ2azDEKJqWPI+CZ+qaS4RSIBHV2ahLHqBiUPI6+z+zVVTK8AlGgzk5N4hgVg5LHERKxvKtkOOUhQZ2dmsQxKgYljyMkYnlXyXDKQ4I6OzGZY5QMSp1G54f2aisXTnlIUGcnJnOMkkGp04iJ2KWreW7FJw4N6uzEJI5RMyh1GjEJu3Q1z634xKFBnZ2WzDlqBqUOIyZgl66muRWfNESow9OSOUfNpNRhxATs0tU0t+KThgh1eFoy56iZlDqMoIDVbaXCJQwV6vCkZM5RNCl1GEEBi7vKhUcYMtThScmco2hU6iyCAla3lQqPMGSow5OSOUfRqNRZBAWsbisVHmHIUIenJHWOolGpswgKWN1WKjzC0KFOT0jmGFWTUmcRlK+6rVR4hKFDnZ6QzDGqRqWOIipfdV+ZcMhCiDo9IZlzVI1KnURUvOq+EuEQhRJ1fDpS56galTqJqHjVfSXCIQol6vh0ZM5RNip1ElHxqvtKhEMUStTx6cico2xW6iCi4rX3Nc2lOEQhRZ2fjMwxykYlDyIoXntfsxyKQxJa1AGqSB2jbFTyIKLilTeWBXsQYtQBqsgco25U8hyiWjc3Nsuh2IMQow5QReYcdaOSxxDVurmxWQ7FHoQYdYAqMueom5U+hq6P7dpZCuw5yFFHKCJzjLJRJYih62P7tpYBhxxc+H+K+Be4j8BiOAAAAABJRU5ErkJggg==\n", + "text/plain": [ + "" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "Image.open('./mhp_extension/demo/demo_global_human_parsing.png')" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAABLAAAAOECAMAAACGszjIAAADAFBMVEUAAACAAAAAgACAgAAAAICAAIAAgICAgIBAAADAAABAgADAgABAAIDAAIBAgIDAgIAAQACAQAAAwACAwAAAQICAQIAAwICAwIBAQADAQABAwADAwABAQIDAQIBAwIDAwIAAAECAAEAAgECAgEAAAMCAAMAAgMCAgMBAAEDAAEBAgEDAgEBAAMDAAMBAgMDAgMAAQECAQEAAwECAwEAAQMCAQMAAwMCAwMBAQEDAQEBAwEDAwEBAQMDAQMBAwMDAwMAgAACgAAAggACggAAgAICgAIAggICggIBgAADgAABggADggABgAIDgAIBggIDggIAgQACgQAAgwACgwAAgQICgQIAgwICgwIBgQADgQABgwADgwABgQIDgQIBgwIDgwIAgAECgAEAggECggEAgAMCgAMAggMCggMBgAEDgAEBggEDggEBgAMDgAMBggMDggMAgQECgQEAgwECgwEAgQMCgQMAgwMCgwMBgQEDgQEBgwEDgwEBgQMDgQMBgwMDgwMAAIACAIAAAoACAoAAAIICAIIAAoICAoIBAIADAIABAoADAoABAIIDAIIBAoIDAoIAAYACAYAAA4ACA4AAAYICAYIAA4ICA4IBAYADAYABA4ADA4ABAYIDAYIBA4IDA4IAAIECAIEAAoECAoEAAIMCAIMAAoMCAoMBAIEDAIEBAoEDAoEBAIMDAIMBAoMDAoMAAYECAYEAA4ECA4EAAYMCAYMAA4MCA4MBAYEDAYEBA4EDA4EBAYMDAYMBA4MDA4MAgIACgIAAgoACgoAAgIICgIIAgoICgoIBgIADgIABgoADgoABgIIDgIIBgoIDgoIAgYACgYAAg4ACg4AAgYICgYIAg4ICg4IBgYADgYABg4ADg4ABgYIDgYIBg4IDg4IAgIECgIEAgoECgoEAgIMCgIMAgoMCgoMBgIEDgIEBgoEDgoEBgIMDgIMBgoMDgoMAgYECgYEAg4ECg4EAgYMCgYMAg4MCg4MBgYEDgYEBg4EDg4EBgYMDgYMBg4MDg4MCa7rFGAAA/tUlEQVR4nO3d7XrbtpaGYe003Z00badOstux286c/1lOJFm2RBIkPtZaL0A+z68mtUAAJO5LctP4dCIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIrr2X7fUEyEiWuu/0qmnRkR014pW0EVEHZWrFWYRkbgyrjCLiGRVcAVaRKSomivMIqLQ2rSCLCIKy4AryCKikIy8Qiwi8s6MK8QiIucsvUIsInLMlivEIiK/zL1CrJD+9T31HIiic/AKsBz7Vyr1xIgC8vAKsTxKSoVadJh8vEIs67K0Ai3aeV5eIZZpJVxBFu02P68Qy65SriCLdhpg9V8NV5BFe8zTK8QyqZYryDpov99ST8QhX68Aq70Wrg5I1v88pJ5NeL8vpJ6TZc5eIVZrrVwdiKz/WU49rcCWtNqZWdvi/HoOsDRZcHUEshJWHQqttFY7IiuPq9cQKzorr3Yu1hZXR0Brg6u9kJWvVTVb6iWOmx1X+yYrz6s9o5Wh1T7IKueqAi/1IgfNlqv9klXA1R7NyrVqH2K1cZXJlnqRY2bv1T7FKvVqT2aVYbUDsky42iZLvcwR8/Bqj2TVeLUTs2q4GlwsI662xFIvc8CcvNqdWLVejU9WJVdDi2Xn1TpZ6nWOl5tXOyOrwavByar3amCxDLlaJ0u90NHy9GpHYrVxNTRZLV4NS5axVytkqVc6Vr5c7Uesdq9GJauRq1HFMvcKsUxyB2sfYpl4NSRZ7V6NKZa9V0mx1EsdKX+vdkGWlVcDimUB1u/qRZTn4RVvsZoL8Wp8sey8Gk4sE68GFMvDK95iNRcE1uBiWXo1mFhGXo1HlotXiNVYlFeDi2UL1khi2Xk1mFhOXgFWU3FeDS2WsVcDiWXp1VhiAVaPRYI1rljmXg0jlq1XQ4nlBRZiNRTq1bBiOXh1VLB+Vy8oPy+vAKu+YK8GFcvDq0HEMvdqHLHc3mABVn3hYA0p1nHBcvBqGLHcvEqJpV7wAMV7NaJYef78888/+xPLBazf1avKKxwsxNpMAdZwYmVq9c8OxfLxahCx/LwCrMokXg0nVjZXxWKpV7aVl1djiAVYvSXyajCwSrx6EyvTLvXa1vPzagixHMHim1g1qbwaTKwir/55+K3BxfIE63f14rZz9AqwKtJ5NZRY2+ikwcogS728lVy9+l29uu0Aq6+UYA0kVplX/0x/a1yxfL3qXyzPT4QJsdRL7jqpVzsC6x/AAiwjsBArndarccQq9GoG1rBieXvVvViuXgFWaYCVV6FXc7A2xVKvMBFguYLFZ8Ky1F6NIlapV7sBy9+r3sVyBmtRLPWa+03N1T7AmntVAVafYkWA9bt6kat5g7UklnrN3abW6px6D7Iq9WoBrCHfYoV49bt6lat5ewVYBblr9OOPP+5BrOI3WIC1G7HcwVoQS73mXnOFatpuwVryaidgBXn1u3qdawFWPwVRNb5YxV5deVpCDLAAC7AqC3FqWyz1NmxX7lUFWP2JFeVV12L5gzUXS73mToujavC3WBVeAdYuwXLxCrAyi4Jq+LdYRmAN95kQsE4hYM3EUq+5z2KxWhNLvRNbVXj1z9K/PBZY/7k1tliA1UvRWo37FqvCq7NOy2+7xgHLAqtctNRrTRcB1q94tV08V6O+xarh6jtOy2+7DgDWTKsMstRrTRfhFWBlJOBqX2Cte7UI1rpY6nVOs+Rqkyz1YpPFg/Xrr+o1d5mAq6RY6r1Yr8qrFGM7BitNVYZY6sUmCwfr/Gv1ojtMoVVSLPVmrAdYFTiViqVebLIIr36dXkS96A7TcDXiWyxLr4YSy1irdbHUi00WAtav02uoV91fEq0Aa1Us9Uofs+dqTSz1apPFgnX7tXrV3SXiKiWWejvWsvVqTSz1Sh/z8Cotlnq1yUK8+nV6CfWqu0vF1U7AavBqRSz1Sh9z8QqwAKsqmVfLYqm3Yy3AMvQKsNbEev+letXdJeNqF2A1eZUWS73Sx1y8Aqys1IvuL51XOwCr0atDg5USS73adIDVQzKulsVSb8da9mClxFKv9DEfrwALsGoCrOzsvQIswAKssoReAVZKLPVKH/PxCrAAqyahV4OB5eEVYAEWXhUl9GpJLPV2rAVYpmAlxFKvdiXeYHWQ0ivAGvSbWIAFWKqUXo0NloVXg77FAizAUgVY2QEWYPEtLHVKrwDr0GAti6Ve7FqApU8K1lws9Xas5eFVQiz1Uh8DrNei32Kp19tjgJUdYAWApV7rasFgqZfbZX2Bpd6N1XzAWhJLvdJJc1b8xFKvdTXA0gdY2fl4BViABVj5AVZ2TmAtiKVe6TTAugZY+rRgTcVS78ZqgGUN1oJY6qWuB1jyugJLvRnrAZY5WHOx1EtdLxQs9WI7TerVsGAZerUglnql0+LAUq90I8DSd5UJsLYLA0u90HluYE3FUi90I7zqoH+l+AjwaiiwTkFg/aNe5zw/sP4zFFgnvoOlLwlIOFjqndjKC6xHsf7pEKypWAcGK8orwEqVEiTCq1HBsvXqn8nA6nUu5AfWf4YCK4wrvEqWICTEq0HBMvbqJtbtV+p1LuUH1n8G8gqwOmiZkBivhgPLnKp3sd5/oV7nYn5g/WcgsMLEUq+z4xYNCfLqx5G8Ovl5NUm90MXcvHonS73EjPBKXpRN44N1OjRYJ0ewXtFSrzAjvJIHWNkFedUpWCdfsL6nXmBGcKUPsHI7OFgnvAoQS73A/usELPU2ZHR0sE6uXgEWXmUFWLkdHqwTYOGVvi7AUm9CTof36iyWm1djgOUslnp1QwRYmQHW6XR0r3zBUi9ujHoAS70HWQEWYAGWPsDKDLAAC7A6CK/yAqyTo1jqheWGV/JkYP04lFeAde7oXgFWB4nBUi8/N7w6d3SwHMVSr2yYZGD9OJJXQWKpF7nV0b1yFEu9sHFSgqVee0GAde7wYJ2czFIvaqBkYP04kleAdQmvTj5iqdc0UjKw1AsvCq/OAdYlwFIGWDkB1iW8uoRXygArI8C6BFiXAEsZYOWEV5fw6hJgKQOsjADrEl5dwitleJURXl3Cq2uApQywMgKsS4B1Ca+kAdZ2eHUJr64BljLAygiwLgHWNbxSBlgZwdU5wLoGWMoAKyO8ugRY1wBLGWBlhFeX8OqajVe/qJcxZoCVE16dA6zXTLwCrKoAKyfAugRY10y8Aqy6ACsnvDqHV69ZeIVYlQFWTnh1DrBeAyxhgJUTYJ0A661fDLxCrMoAKye8OgdY175r0+4VYtUGWBnh1Qmwbl24afYKsCrDq4wA6xxenXv1ptUrxKoNsDLCqxNgXfulQaxfAMsgwNoOsM4B1ukdrBqyHsD6pF7JqAFWRnh1Aqzvffr0S71YvwCWRYCVE2CdWsX69k09//YewCoU65cJWIhVGWBlhFenRrC+7QCsT49gFYn1ywwsxKoLsHLCq3awRhfr0xSsErEAy6qPeJURYJ2axPq2S7DyxZp6dQYLsar6+DGGLPU6G8OrFrC+7QCsTw1gzbwCrOo+BomlXmdjgNUA1rdvOxBrCaw8seZcXcFCrJo+xoilXmZzeFUv1htYX76ol1Ddp0WwcsRKeoVYNQFWZoBVC9bNq29fHlKvpqhXYcrFWuLqDSzEKu9jjFjqZbb3d4NVf59TL8CgNq8mYI3k1qckWOtiLXMFWA19jBFLvczGnp+fv5PT5NXwYF14afFqGayrWX279WkFrBWyUly9g4VYxX0MEUu9ytauYFWS9fcOwHrDpcGrJFi9v9P6tA5WQqw0V7/cDahe23B9DBFLvcrGnm9g1Yj19/Bg3bvS4NU6WB2btQXWglkrWj2CBVmFfQwRS73Kxt7BKjfr7XXPz+pl1DVRpd6rTbA6NetTBlhlfUKs6j5GiKVeZGsPYJWJ9fcdWCOKNRXluzvVYOWI1Z9Zj7rYewVZRX2MEEu9yMaeH8EqIOv+Rc8DijXn5AJPpVd5YHVG1qcAsDArv48BYqnX2NpzrVh/jw3WEiav9FR5lQtWT2RNYfECC7My+xgglnqNjT3PwcoT6+8ZWCOJtUzJtwKxZmDli9ULWXNW3LzCrKw++oulXmJrS2DliPX3wGAlIfmWLdacqxKwuhBryRRXsDBrs4/uYqlX2NrzIlibZE2/fCSx0ozc4VPh1benArH0ZC2C4uwVaG00BcucLPUCm0uBtSrWnKtXsPoXaxWRb7liLXo1klhJTQLAwqx0c7BsxVKvr7nnJFhps5a+9AZW32JtEPL0LVOshFdlYAnJWrMkwivQSrUAlqVY6uW1twrWIlnLXzjCW6xNQB7BSoqV4qoYLBFZ646EgQVaCy2BZSiWennNPW+ANTEr+UUDiJXBx9MUoDKuxhBrE5FIsEBr0iJYZmKpV9fc83OGWNl1DFYWHk8zsGZkrWpVA1YwWTmABHsFWvctg2VD1kf14pp79gCrQ7Hy5Hh6WhKrrFPPYuXqIQALta6lvDIg6/sQ6tW19vxsK1anYGW68WQDVoVYIWYVwCHyCrVWwWoj6zKAenWNPVuD1aVYuWY82YBVKZYzWYVoCL06OFqrYFWTdXu5enVtTb2yA6sfs/LBeLIC61TzqdAVrXIwxF4d2KwNsKrIen+xenUtzbgy/EzYi1kFVjwZgXUWq/I9lo9adVzIvbpkvBUjtAlWOVl3L1WvrqElr4zBEpNVxoQlWPVvsYzRarCiA64uGe3EMGWAVUbWwwvVq6tukSsDsWYDqhZYKsSTKVjtYhmg1QZFJ159OhpZWWAVkLUHr1JaGYg1H1CywmIcnozBUotl4EQvXp0zeSjGKBOsTLImr1EvrqY1rTzAiierxoYuwaoly0iJfrw6Z/d89F02WBlkzV6hXlxxG1q1i7U8ZuQSq2B4MgdLJpahER1xdc70Mem2ArC2zJp/uXpxhWVw5QNWnFmVLjiAVTuVJrFshejLq4OIVQbWCllLX6xeXElZWjWLtTJuwBprVXjyACteLHMhOgPrGGSVirUAV/KL1GvLL5srP7Cc0frSAETPYGWL5QFEZ159OgJZlWDlpF5afgVeNYmVMbrPAptIeDIFSyOWDw/9gbVXsT58uP0TYJVx1SRW7hVs19cIwpMXWHFiufHQnVc7FevDhzexjg5WqVZNYhVcw2p97R58V+o0NliOPPQH1g7F+nDt9RdHBqtGqxaxyq7Svj4LDs5enU4nD7BixHL2oTuw9iXWh7uuvz6uV7VcVYtVfJmm5Rlp8ArWNQOvHMBaFSuCiK682pFYHx67/uZBwWrQqlqsmgvVrs/KAlewAsQKdKIbsPYh1odZr//igGA1alUrVt2VKtZn5cCX67fc3we2ACv0LVY0FX2AtQOx5lwdFSwDrCrFqr5U6RKtGDg3BlgpsQRWAJZBS1y5/8kG5YJTWWl1LsyrQrHMELj0CJaFVy6fCZfFkmDxSe/V2GIta3UHlpNYwiUnMrPqWphXzyVkmRFw6WlksERYfAKshpJcvXvlA9b9BXrIBqmH4rx6zibLTIBr3mB5iiWy4tODWKIZ1J8TaWmtHsByEasrrwx0WirQq+c8sszO/7WnccCaiSWi4prYqzHFWuXK/S2Wbt3TLLBIFMjVc45Ydsf/2sQrB7DcxJJRcU3s1XhgbWnl/hZreTLBu3By5eo5Uyy7y22s1e7wX5t6NRBYOipeU2p1zuLsxJXBVSBYycu6Z0dFskCuzq2t1u7sX5ty5QKWk1haLTrI5PwElcXVoxyeYG1c2i9bKlKFerUmlt3Jf23mVedg3Yml5kKf1SHyL5OriRp+XqUm5L4R1lSkCvXqOUmW4cG/Nveqd7DexFJr0UOWR8mxbK7kYDmT5SBFomCvEmJZnvtrc69sxHKc+G1MNRY9ZHmYvCrQyluszGm5bYWLFImCvVoEy/LYv+YF1mRc0zlfh1Rb0UfmZ8q6Qq6mWkjAcjLLB4pksV4tiWV66l9bAMvk/yWcfNS0nfRlSDUVfeRxsAwr5mpGhQgsB7K8oEgV7NVcLNtD/9oSWBZiTf7jo+2kzyOqpegk82NlWQVXcyi8wMqYnelm+EGRKtirqVi2Z/6WE1jTP9xlPGu8esv0UJlWxdWCEjqwLMnyhCJVsFePYhkf+VtzsEz+iuTp/+5jPW3AumV3pGyr5GoRCR1YZmT5QpEq2Kt7sKxP/K0ZWCZ/qfuTM1g//6x2opuMDpRt9Vr5ilU3SYMNcYYiXahX92IZn/j3PMB6+/E7TtP/GbDeMzhOxrVx5SlW7TRbd8QdinSRXD2/i2V74O+bgGXyY3MAK7DWw2Rcs1bLPmjBajUrgopUoV7dwLI97489bq3FD/p6cgYLr+5rOknWWXCV0EEMVgtZIVSsFOfVTSzT8z7tfmstfjLh/c+Qvo1rOmHAeqj+IFlnxFXKBluwzKa1WRAVfXRZselxn3W/twY//PkJsGKrPEbWmWm1QoMYrDqy1IQEd17yi+l5n3b/A3Oe2sVyB+tnwHqs5hRZZ8pVGgZDsCqnXLwzakCiezlned5nvbPyYI2FV15gqYnoq+IzZJ6tVmsuyMEqJksNSHTuYD0lwKoUC7DCKzxB1lk6lcGCmVgOc1tI7Ud8cWA9PbWL9eQOFp8IZ5UcIOvsiMpWQQ7WeW65aqn5iM/5LdadK35gWU4YsGYVK2OWGU/LKiSSg/V9cpnvs9R6CNKBVSNWCFhqIHqrhhqLjGRKmZBOD9bGBG+p8VAkBKtcrOkAr/fNcsKANa+Km+YMUKoFwQKsCLHUeEgSglUsFmBJqvKmMQORGkCoBet+TNcJXlLToUkJVqFYAV4B1kLV6lTXftgbPegCrC2x1HRocgXrHpYlsMrE4g2WqGp4KjM4640e9AHWhlhqOkRJwSoRa/ba27iGEwaspVrwKc/ipDdz0A6Wu1hqOFQ5gjWBpQ2s+Wtv4xrOGLCWauGnOItz3s4BYHWbFqx8sRJgPRmDpcahy9oIKsrimFtwUOWVOVhrU1TDocrvm1jTd0JNYi288m1YuykD1mJtBpVkcsotPCj3ajqg8wTVbugKAqtRrBCw+ES4XLNDuZkcchsQqrwyBys9QTUbutRgZYm19Lq3Uc1mDFiJ2inKyuaM24BQ5VWgWGo2dHmB9QiLE1jXf7KaMl4lMuFoK5sDbuRBIVhL47nOUK2GsCiwWsRafNn7mFZTBqxENiKtZ3S+bTw4lYm1OJzrDNVqKJODtS1WyivACsrGpLWsjreNB6cisJaHc52gGg1lTm+xCsDaEiv5Bus2ps2M8SqZFUvprI63jQfnegFrcYZqNJT5gDWl5VQv1vJL7oe0mTJgJbNiKZnZ6TYC4VyNVx6fCRcmqDZDW+dgJV5yP6TNlAErmZ1My5mdbSMQrlV4BVgBubzFKgNrTawMsEzEwquV7GxayuxoW4FwrcIrF7DmE1SToa0HsNJirXkFWFHZ4bSU3dE2AuG1cq8AK6IewEqJlfryyZAGMwaslexwWsjuZJuJ8FonYM3mpxZDnANYc1u2wFoWK/nVkyHbZ4xXaxnyNMvwYNeWmlqxV05gTSeoFkOcw2fCCrAWxcr0ykAswFrNUqjHLM91danJlXoFWCH5gtUgVvJLZwM2zxiwVrMk6iHLY93U8vQ6Aetxdmov5JmDtaTLNlgzsTa8sgSL//F5PWOmnI51U4vTK/TKDayH2am9kAdYgLWROVQup7qtxQkCVoc5g7X5FzYkxNryyhAsvNrIXCqPQ93a4hSLvPID635ktRfyzL/rXgvWU5ZXgCXIHiv7M93a8iRLvAoBS81FBxmDlRCmDKyVr7rdOTOwfgasrRy46g6sZbJKvHIE631otRYd1A1Y72StfQ1gxXcIrz4sktUJWG9jq7XooI7AeiUrxytTsNQgdN8xvMoWa/vVXlNTY9FDtmCljMkE60zW6r82B4s3WBkdGKwFsXJe7TM3tRVd1BlYGy14BVjuHcSrTLFyXuwyOzUVfWT7nwkBa5cdxKu8P9yQ81qX2amp6CNTsJLG9AyWWoMBOgpYOW+xsl7qMjk1FX3kDtbdz7jpDyzeYGV1FK9yxMp6pcvc1FT0UQxYVmIteAVYAR3Fq22x8l7oMjU1FX1kCdYKMoA1dIfxalOsvNe5zExNRR+NCNbj77XMGK/yOhBY638aK/NlLhNTU9FHhwaLN1iZHcir9bdYmS9zmZiaij4yBKsAGcAaqwN5tSpW7qtc5qWmopPGA2vye21gqSkYpKODdQKsXrJ7iwVY++1AXqX+lvc1raavcpmXWopOMgNrFRkjsQBL1JHASolV8CKXaaml6KQosEzEMgdLDcEoHQqshFgFr3GZlVqKTgoDy0CsxXFavAKszA7lVZVYAQtVU9FHw4E1/U3AiuhYYC2Llf8Kn0mpqegjd7AMPxMuD9MAlpqBcTqWVxViAVZQ44NVLRZg5Xc0sIrFAqyg4sBqFis1CmD5dzSvisUCrKiMwNpyxhGsWrEAq6CjedUkls+E1FJ00jBgpUcBLP+OB9YyWVlf7DQfNRV95A6W1TexVkapBEuNwEgd0KuyP90QsFo1FX20B7CqxOINVklHBKtIrIjVqq3oolHAWh0EsNw7oFdFYgFWUP5g2fzvhICl7ZBgFYgFWFGNAdZpfRDA8u6QXn0o+I+FEetVW9FFNm+xAGvnHRWsbLEAKygTsLK0MfAKsGQd1KtssQArqMOCpRZgsA4LVqZYIQtWY9FDAWC1/7WjW2BViAVYZR3Wq0yxACuoocBKfgFgeXdgsIrFcpuIGoseGgGstycBsGQd2Ks8sQArJsCirI4MVo5YgBUTYFFWR/YqRyzAiskCrDxx2sFKfwVeuXdssLbFAqyYBgJr5SsAy71je7UtFmDFBFiUFWCtry9k0WotOgiwKK9je7UlVsyq1Vp0kAFYmeK0grX6JYDl3tHBWhcLsIIy+DEUJuIEg6U+/QN2dK9WxQpatlqLDgIsyguw0mKFLVvNhb5hwFr/EsDy7/BepcQKXLeaiw5qBitXnJ7A4ltYFQFWJlh8JvQMsCgvwNK/xVJr0UFxYNWKlfViwHIPrz5kiuV4fTUX+gCLMgMswNLXClY+Of2AhVdVAdaHPLEcL6/mQl8EWG1iAVYnAdY5wNI2CFhbXwRY/gHWNenK1V7IawSrwJxasczB4ifmVAZY1wBLWOOfHPUHK/OVgOUfYL0GWLoAi3IDrFuAJSsUrBqxzMH6GbAqA6xbgCWrDawidABr8ADrLcBS1TtY2S8s8Aqw6gKs92QrV4OhLgSshm9iAVZHAdZ7gKWpc7DyX4hX/gHWfYClKBasYrEAq6cA6yHAEtQElgc8iy/LeV22V4BVG2A9BliCYsCq/L9zAKurAGuSYuVqMNQNAVbO1wKWe4A1DbDCCwKr7q8dBayuAqxZgBVdA1gV9jiClSPWz4DVEmDNi1+5WgxxPYP19ggAVhcB1jzACi4KrJqf/+wDlvrYjxtgLRS/cDUZ2mLBKhOryCvAcg+wFhIsXG2GtCOBxSfCtgBrKcAKrR6sMq8awMr8asDyDrAWi1+4Gg1lgEW5AdZigBVZGFgVYgFWXwHWcvELV6shrH+wcr8asLwDrESAFRdgUW6AlSp84Wo2dFWDVepVxf9OWPb1mWCpT/3IAVYiwAqrY7AKvx6w3AOsRIAVVjhY+QIBVm8BVqrwhavdkFULVrFXgDV+gJUKsKKKByubIGuvAKs1wEoGWEH1C5bPGyzAagiwkgFWUEcDS33mhw6w0gFWTJVglXtVClaZboAVEWCli164Gg5VewErY86A1RxgpQOskOLAKhOrTDfAigmw0gFWSJ2CVfh2LBss9YkfPMBaKXblajhUARblB1grAVZEdWDVeFWC0KngawErMMBKB1gRxb3BKlEIsDoNsNLFLl0Nh6jAT4RVYGUPDlghAVY6wApIA9aWQ+VvsAArJsBKF7x2NR2aDgaW+rwPH2CtFLt4NR2aAr/n/vTwPE9/I/GFgNVXgLUSYPmn+I+EyZa+ErD6CrDSAZZ/XYE1f99l+CNzAMskwEoXu3o1HZo6A2uZMMDqJ8BKB1juvcSBVeeV3c+pP4OlPu07CLDSxS5fbYekOrBqvAKsfQRY6YKXr8ZDURxYtV4BVl8BVjrAcg+wqCzAShe8fjUeiqrAqvGqHqw8sQBrq9PJaBjASgVY7gHWYQIs94I3QI2HoJcjgXXsP9VwAiz3AMu7KrCqvAIscVZgZYvlfl77K3oD1HzEdzCwbE7smAGWf4DlXQ1YdV4BlrYTYPkHWN4B1lECrIAAy7sKsCq9AixtJzOxACtZ+Aao/QhvP2Bl/eBnkwM7ZoAVEWA5Vw5WvFeAZdAJsCICLN+GeIMFWAYZgpUrVsB57S7A8g2wjhJghRS+AWpBgov7RNgFWDbndcgAKyTA8m1PYG2LBViWYwHWQoDlG2AdJcAKaWMDHLZFTUhsxWBVewVY2gArpLUN8NkZNSGx7QqsTbEODNbJEqxMsSyP5TClNsBva9SGhAZYBwmwglreAc+9URsSWilYEq8Aqz3ACmppB5z3Ro1IZIB1kG5baTsaYM163IGQzVEjElkhWPVehYC1JRZgAZZz2c+04TXViEQGWAfptpW2o4WdyVHKf6Ytr6pWJDDAOkhve2k7WtiZHKOCZ9rysmpFAtsZWOti4dUJsBwreqYtL6xWJLAysBq8Aixpd5tpPFzQmey4umfadApqRuIKA6vurr5WcB3AWux+N42HCzqT3Vb5TJvOQc1IWC+AdYzud9N4uKAz2WnVz7TpLNSOhAVYB+l+N42HCzqT/dX0SANWVYB1kB6203i4mDPZUU3PssvmqB0Ja39grYkFWNeMh4s5k93U9CR7bY4akqgA6yA9bKfxcEFnspOaHmS33VFDElUZWA1e9QAWf6P7LePhQo5kLzU9yG67o4Ykqh2ClRbrwF4BllVNz7Hf7qghiQqwDtLjfhoPF3IkO6npOfbbHTUkUQHWQXrcT+PhQo5kHzU9xo67o4YkqiKwWrwCLG2P+2k7WsyR7KKmp9hzd9SQRLVHsFJiHfl77lNibEcLOZJd1PQUe26PGpKo8sH68882sCL+SnfASjbZUdvRIk5kFzU8w87bo4Ykqiyw/nytDawGsQCrucmO2o4WcSK7qP4R9t4eNSRRZYD153uNYtWaVXoVwFrocUtNBws5kV1U9/xGbI8akqi2wfrTEqwqtcqvAFjzHvfUdLCQE9lFpY9u4PaoJYnpZQusP/+0B2uhtdtaMVzCK8B6y3SwmBPZQ5UyReyPmpKYNsD6c5q5VD4B1jzAMgiwxK2DNfNqcLBMDv6oAZZFgKVtFay5V6OIBVjzAMsiwNK2AtYSV6OIxSfCeZZeARZgaUqDlfBqDLJ4gzUPsCyyBYuf/lxahVdDkAVYswDLJMCSlgJr1ashxTr6J0LAsgmwpCXA2vBqALH+uAZYbwGWTYClbBmsTa+6F+uPt/DqNcCyyRQswx1SUxLTIlgZXvUt1h8PAdYlwLIJsJQtgZXlVcdi/TELsD4BllmAJWwBrEyvehVrztVFrMN7Zfqjn+OPY0cBlrA5WNledSnWIlff4w0WYFllCpbdFqkpCWn+50YLvOpPrBRXZ7AszvzQAZZVgCXrZSpWkVe9gZX26o/fAAuwrAIsWVOwyrzqS6wVrv747TeLIz90D6fEdDDAAqyodgTWmleAZQtW/GnsK8BSNQGr1KuexAKs9QDLsB7FUlsS0iNY5V71A9aqV4AFWKYBlqaX3YC17hVgAZZpPYJ1BLGOAtZvgAVYpgGWpEewxuUKsLYzBEtwGHsLsCS9PIg1LleAtR1gWWYIFt/Eyq8BLLVQ0wBrI8AyDbAU1YKl1mkhwNoIsEwDLEEvlWDV/Cxm9wBrPcAyDbAE1YLVo1i8w9oIsEwzBMtql9Sc+PdSLVZ/Hwo3vAIswLINsOKbgjWyWIC1EWDZBljxtYDVmVhbnwgBC7BsA6zwXmZgDSvWqleAdc4OrPij2GOGYBltk9oT917axFIjdde6V3zP/Rxg2QZY4TWC1Y9YG14B1jnAMg6wolsCa0SxtrwCrHOAZZwhWDb7pPbEvZd9iLXpVRVYnz83Her+UngFWIBl1wtgpfv8eW9iAZZ1vYmlBsW7l12Itc1VDVifP+9OLMCyDrBiS4FVJNbrfu8MrM+3mk52X90fD6txQs5ht1lhZbVRalC8swTrpDIrx6vyvyD58w7FujsdVuPEnMNus6HKbqPUoHg3BatKrLWb0ItXxWB9vq/pdHfU3Y2xGifmHPZbjUyOG6UGxTt3sB4aFqy9kHV3L4yGCTqH/Va2F+4bpQbFuzRYBX8zVnECr0rB+jyv5Yh30vstMBom6hz2W/nT77pPalC8m4FV8Rar9gZtUpT3ojCwvpPV/OPdxb3vo9EwYQex22qffqdtUoPi3QpY2WK136scqVJfnenVH22fCF87Nf9wLG3vm2g0TE42J7Hbip92111Sg+LdGli5YhndsYL8wVr26vP18k2HXdr7FhoNk5PRUew1m0faajZqULybg1X6FsvmfhV2terrOQFY45J1MllA4c2yOoudZvJAm81GDYp3q2DliGVyu8r7+tZJAdaoZNlMv/BemR3GPrN4nO1mowbFuwWwisSyuFsVfb1LA9aYZNlMvvBm2Z3GLmt/mi1nowbFu0aw2m9WXV/LxSo6kgmvPk+m0XTsBRlNvPBmWZ7HHmt7lI0nowbFuyWw8sVqu1UNfS0Xq+hIZoI1GFlW0y68WcZHsruqn2KPyahB8W4LrFWxqu9Uc1/LxSo6ktlgjUSW2aQLb5bHueypqifYazJqULxbBCvzLVbVjTLp6zQhWKdRzLKbceHd8jqbvVT+/DpORg2Kd5tgpcUqv09WzbzKEKvoRKa8SoB1GsEsu9mW3i7H49lHPe2HGhTvlsHKEav0sbVrwSs5WKfezTKcaun9cj2gPdTTbqhB8S4B1qZYpQ+tYUtebYtVdCSrwDp1bJbpPEtvmPMR1dfTTqhB8S4HrC/bVF3/CGdIy15tgVV0IpNebYHVK1m2syy9YwHHVFtP26AGxbmUV0mxFmV6U6P0Ua4o4dWWWEUnsgGsU49mGU+x9JbFnFRhPe2BWhTnMsH68uDVRKYHNkof5sKSXHUE1rnMQ19LRlE10yscb72ww6qqpw1Qi+JcGqwlsR7AuN6LmRvFj3NJK16ti1V2JNvBOq2jkPllFhXPrW7EtSLPq6Selq8WxbkVsCZifbls/qoYzmJtXvkOqClgRScy7VUJWJdyDnyTHhvrKJhWdsU3LvbEKupo7WpRnHMAy4msrCu/YTUBrPigW4F1bfOwNwGyvgL76xUvP/zQhtfRytWi+LbmVT1Y9mLlXXbZrw7AyqiJkCkmm1M2uEZBgmMbXEfLVpPi2ypYL9VgmYpVidW9WF+KDuSKV35gnex+jE3GlK0ulZfm5EbW0ZrVpPjmBZaZWy1avfVlDLAutfORM+XSq6xfcSvZ4Q2roxWrSXFt3asFsEqpKH+47zPR6iJW0YlcAyvKrdPyN7621cibLGDZ1s961aT45g5WrVhmVNWIlQFWFFrFZc4UsIzrZrlqUnzbAOulHawysqydessDrB7Nyp0mYFnXy2LVpPh2OvmDlS2WNVL3lYCV71V3ZGXPMtQr9Rk+VGpSfDttiDUFq5KL7UfaVKelvMDqyqz8OQLWXlOT4trlaQoA69bSw2zC0XaOYPWDVv4EQ70CrMDUprh2eZqy32KdwnRxyBmsPtQKAKtqXupDfKTUpnh2fZoKwFKr01AEWHK0AIvUqHj2+jgBliFYSrcKZhTqFWAFpkbFsdvjlPsWa2SvSsSyAEtiFmDRjsF6e5wAywWscLJKJhPqFWAFpmbFrbvnCbB8wAolq2wqoV4BVmBqV7y6f54ywVKT05YELFexGmZS5xVgDZAaFq8eHqg8sdTktKUBy0+sponEegVYgalhcerxgQIsL6+MzbKaRaxXgBWXGhavHh8owPIEa6pFfVZTCPYKsOJSw+LV5Ik6AFhPSrBsxDKbQbBXgBWXGhavJk9U1lssNTmNScFqF8twBtFeAVZcalicmj1SgOUMVhtZptcP9wqw4lLL4tTskQIsd7CqxbK+fDRXgBWYWhan5s9UhlhqcRqTg1UjlsPV470CrLDUsHg1f6b2D1auWIZGJMjIzeXyAq8AKyw1LF4tPFSA5e1VmVhOV1d4BVhRqV1xC7AkYGWL5XdxhVeAFZXaFbeWnirA8gdriyz3S0u8Aqyg1Kz4dUiw8sRyNeOOjmCtrpct9gqwRkrNimOApQTrDZBArc7XE3kFWDGpUfEMsHoQKzjA2nVqU1w7JFifDw6WyivAikhNim+ABVhhXgFWQGpRvCsQC7B2EmDtODUo3gEWYEV5BVj+qT3x74BgdfefCYMDrP2m5iQgwDoYWDKvAMurf//79R/UmIQEWIAFWMP272vXX6gtCel4YP0MWAqvAMu6f793+bWakphywXoBrD2k8wqwDPv3tPNvqimJCbAAC7BGaobVK1hqSYICrCOBJfQKsFpblAqwAAuwAKu31rA6Nlhb33VXc9NeFlj7FEvpFWDVt8XVGSw1JEHNnyvAAizA6qptrwBrz2Ad9zOh0ivAqi3DK8ACrB1W5hVgdVIWWGpIogIswAKsvuMN1l0LDxZgAZaDV4BVGZ8I3/vrr2KwTmpvmvv5sGIB1ogB1q2//joiWD8DFmCNVJZX+wfrr9cA6zBglXkFWH3EG6xzf/2VBish1p7AOpZYv90CrAEDrOd7rwBr32D99pDSK8Cqi0+E91yVfxMLsMbpt3mANVpHB+uvadliHQ2swcVa0KpILMDqo2N/IpxxdTywvhzhLVZCqwKyzL0CrKoO/QZrgSvA2iFYa1xlimXvFWBVdWCwFrlaBGtZLMAaow2u8sgCrE46LlgJrwBrZ2BleLVNloNXgFXVUb1KcVXwmfAVrNHFuoKVI5aanqryvNowy8MrwKrqkGCtaFXwFguw+i+fqzWxXLwCrKqOB9a6VoC1J7CKvEqS5eMVYNW07dW+/lDDplYJsJbE2hdYGWKp+Smu1KsEWYDVT4cCK0crwDo2WAtkOXkFWDUd5xNhplYFYL0AVt9VeTUXC7A66ihvsPK5AqzdgFUr1oQswOqoI4BVglUSrORnwuHB+roI1n+/dver44D1QJaXV4BV0bZXo4NVqlXJW6x9gfXzG1Kp1P6UZyEWYHVUjlcDg1Wj1XHB2vJqQLHqwXojy80rwKpoz2BVapUCa0GsnYD1Nderg4n1m69XgFXRbj8R1mt1QLC+7hesZrEAq6t2CVYTVmmw5mLtCqwcr0YUq4ksR64Aq6Jtr0YDqxkrwNobWA1kuXoFWOXtCiwTq9bAmokFWKMEWPvIxKv7ox7g0sYMIsF62QtYX7PBOpZYgNVZBmAtHfgQpdKXdwNr8S3WPsA6nfYNVp1YgNVZ7WClT/2IUm2BNRVrN2CdssE6kli+XgFWcdtebYGVdf6HgeoWYAEWYHVYFVj/9702PbpE6r6Vh2yfYJ0AC7AGKAOs02mK1TWFI2GtPWV7BSvXqwOJ5ewVYJWW4dV3sE5zrS5iqVVxbP05A6wxA6zRywTrStb/TVKj4tnGg7Y3sE6ABVgjlA3W6TTl6tBgPYq1A7DO7R+sUrE8rbqkPv+jlePVK1hzrw4N1mlXYF2XBFiA1Xn5YB3Mq22wToA1YIA1dFlencFa4OrwYJ0ODNawYnXmFWBl9rpVWV79b8Krw4N1AqzxAqwRu21XFljLWgHWjsB6XQ9gAVaX3XYr7xPhIb06FFi39QBWtFeAldNts/K8AqxUu/ke1m1BgAVYolb2432z8rz6X8Babjf/lfBtRQcAq7NPhID14V6kpV25++22N1iA9S4WYI1SZ14B1gJXDxtz91t8Ilxt61G7/6Puh/MKsIwyPv0/fc94SNeK9opPhKtt7N4LYA1Yb58IT5Zk/fSa3YiulW5U5husY/4prL8K/roGwBqn/ry61nz6f7qv+w+aNVuU6dXl/8sBrGkvgDVkvYJ1rfL4/zSrcUCH2jcHsDZa375HsUYG631N+werw0+EkwodmFv1KFYXZJlsTIlXhwRrcwfvwVKj09L7igBL7tWlPAZSVj2CpRbLak+KvAKsRHsA6245JWANKVb/b7BurRmwKtVcLBVZpvuR6xVgbXUVS61Offdr2TtYRV5pwVpmJouqGVjxZJlvRqFXRwSrbEPV7NR3v4qdgzWUVxdlCoBaF+t1vAGpupTt1SpY+xarbEfV7FT3uIxdg1XmlR6s06nFqzlYk0bB6lThFWBtpoantsdV7BmsQq+GB2tTrLs6heq1fK8AKzs1PJVNVrFjsAb0Kg6sRB1gdSryCrCyU8tT2WQV+wWr1CvAektA1Q/XLv9c4xVgbaaWp7LpMvYK1pBeNYJlJlZwP9x1AqzMSndZLU9ds2XsFKwxvXL+rnuf/TDNEKw9i1W6z2p66potY59gFXsFWPHNpCoH630wwNpMbU9V82XsEaxhvWoSSz317FJSAVZJxduutqemhWXsEKxyr/oBq14s9cQz29CqTKz3YRNg7Vis8q1X61PR0jJ2B9bQXlWLpZ52ThlYFYF1NzRgbafWp7zFZewNrAqvugKrTiz1pLfL1KpErLvRAWs7NT/lLa9jV2LVcNWXV1Viqae8UQFWBWLdXyEF1n7FKr8Lan6KS6xjR2BVcdUdWOViqSe8UalXmWI9XAOwtlMDVFpqHXsBq5Kr7rwqFks93fXKucoT6/EqgLWdGqDCkuvYB1i1XI0Plnq261V5lSPW5DpHE6viVji54jPsCli5YqlJWm1PXhWJpZ7qepVcZYg1uxRgbeXEisuwX3cO1r68KhFLPdPV6r3aJGt+McDayIsVl3FXvNoBWHvzKl8s9TxXa/JqXazF6wHWal6uuIy7BlamWGqV0tV71S1YmWKpZ7lWI1frZCWuCVhrObniMew6WHliqVlKtkev8sBST3IlA65WyFq58EHEqrknTq54DLtnsHbpVZZY6ikmMrIqSdYPr3/VXyrASuTkisew617liaWGKdFOvdoGSz3BhWypWkDr9hsb8wCsxZxg8Rh2C6wcsdQyJdorWBtiqWe3kBdXS23NBbAWcoLFY9gdg1UvVs09D6yaK41mkVxtg3UCrHlOsHgMuwnWtlhql5Ltk6tTWqyMV8VM8K5YrjLAOgHWPB9YHEbNAGtTLLVLyfbqVQKsvJcETfFSNFZ5YN3IUsviVNWd8oHFYdQcsLbEUruUbLdgLYiV/4KQCUqsyvXqVSy1LE5V3S4fWBxGzfFqSyy1S+n26tVUrKIv956aiqoSsM5kqWHxquqm+cjiMGoeWKtiqVVaa69eFb5lKuStPi1WJWCdTmpYvKq6cT6y+Iya1aBgDflz6fMq4Kfw42Ntaqsu5U9XDYtXVffOiRafUbMa06tCsarutapse4q/4VWR2qm38qeshsWrqvtnL4sPWAVLOgBYVbdaV7VXtmKpiXosf95qWLyqu4vmtMjBSomlFmmrvXKV2RJXdmSpeZqXP3c1LF7V3UkfWnxGzW1MsHLFqrvPvZfyql0stUyJsuevdsWtutvpQ4vPqNkN6VWeWJ/rbnPvpb1qIkut0krZa1C74lbdLfWhxWXQgj4PCda2WN+/pu42994qWJVmqUlaL3sZalfcqrmnJydbXAYt6Hz6BwRrQ6zLl1Te5r7b8ipBVvLUqzXaLn9v1K54Vf6UXHOxxWXQgq7Hfzyw1sR6/Yra+9xzGV7NzZqcfCk/5WXvjdoVt2ofFh9bXAbN73b+R/Pq0opWh/bqkSy1OI1lb47aFbdqnxZjWlzAKl7U++kfjqtrCa72CFY+V3dmqb1pLnt71K64Vf3A2NrSG1jfyQq2xqhFrnYIVqFXV7TU3LSXuz1qVvyqfmJcbHEZND8RMrbNtfq8P7BqvPrpJzU37eXuj5oVv6ofGRdbXAbNTsFLUNW3udMAaz01K37VPzMetpiOCVh31d/nHqvz6jhgqVVxrP6h8bDFY8z81Kh4Vn+fO6zSK8DaQfVPjQsuHmNmp0bFtfob3V21XgHWDqp/bCxtASz36m90b1V7NT5YuVukVsWx+ufG0hbA8q/+TvfVgb0CLMC6T02Kb/V3uqvqvQKsPdTw6Hjg4jFmdmpSfGu40x3V4BVg7aGGZ8cDF48xs1OT4lvDne6mFq524BXfc+8OLMtBixekJsW5hlvdR01c7cErwAKs+9SiONdwq3uojasjeQVYiTxw8RgzN7UozrXcanmNXO3CK8D6a89gla9HLYpzLbdaXatXgLWXWp4iO1wAK6CWW60Ors7l7pYaFc9aniIPXDzGzE0tinMtt1odXp3L3S01Kp61PEUeuHiMmZtaFOdabrU6vDqXu1tqVDxreowccDEbErCmNd1qdXj1A3+q4VzTU+SAi9mQgDWt6VargyveYF1qeooccDEbErCmNd1qcT/8gFeAda7pMXLAxWxIwJrWdKvF/VAnlpoY23I3S22Ka23PkT0uViMC1qy2Wy3temAP7hVgnWt7kOxtMRrxK2BNa7vT2m5H9sBa/QBYl9oeJHtcrEYErGltd1rb+6E9Llf8ENVLjU+SPS5GI1Z4BVi9dn9sj8oVYF1rfJTsdTEaEbCmNd5pZY8H94hYncveLrUprjU+Sva6GI0IWNMa77Sy6dE9oFY/ANa1xkfJXhejEQFrWuOdFrZ4fI+F1bns/VKb4lrrw2Sui82AgDWr9U7rUkvRR/n7pTbFtcZnyV4XoxEBa1LjjVampqKPCjZMjYpnjc+SvS5GIwLWpMYbrUxNRR8VbJgaFc8anyV7XYxGBKxJjTdamFqKTirYMTUqnjU+TPa62I+YndoU1xpvtDC1FJ1UsGNqVDxrfJjsebEfMTu1Ka413mhhaik6qWTL1Ko41vgw2fNiP2J2alNca7zRwtRS9FHRlqlVcaz1aTLnxWZAwJrWeqNlqaXopKI9U6viWOvjZM6LzYCANa31RstSS9FJRXumVsWx5ufJmheT8QBrVvONVqWWopPKNk3Nil/Nz5M1LybjVXkFWF2mlqKTyjZNzYpb7c+TtS8m4wHWrPY7rUkNRS+V7ZraFbcMnihjXyyGA6x5BndakhqKTircNbUrbhk8Uca+WAwHWLMMbrQmtRSdVLptali8MniirH2xGA+wphncaE1qKTqpdNvUsHhl8ERZ+2IxHmBNM7jRmtRSdFLptqlh8crgibL2xWI8wJpmcKM1qaXopNJtU8PilcETZe2LxXiANc3gRktSQ9FLxRunlsUpg0fK2heL8QBrmsGNlqSGopPKN04ti1MWz5SxLwbDAdYsixutSC1FJ1XsnJoWnyyeKWNfDIYDrFkWN1qRWopOqtg5NS0+WTxT1ry0jwdYswzusyS1FJ1Us3VqW1yyeKaseWkfD7BmGdxnSWopOqlm69S2uGTxTFnz0j4eYM0yuM+S1FJ0Us3WqW1xyeKZsualfTzAmmVwnxWpoeilqs1T4+KRxUNlzUv7eIA1y+A+K1JD0UtVm6fGxSOLh8qal/bxAGuWwX1WpIaikyp3T62LQxYPlTUv7eMB1jSD2yxJLUUnVe6eWheHTJ4qa15EXgFWf6ml6KTK3VPr4pDJUwVY3dd8j0Wppeik2u1T82KfyVMFWN3XfI9FqaXopNrtU/Nin8lTBVjd13yPRaml6KTq/VP7Yp7JU2XuC2BZ13qLVaml6KTq/VP7Yp7NY2XNC2BZ13iDZaml6KOGDVQDY53NY9UbWJXLUKviWNv91aWmoo8aNlANjHU2jxVg9V7b/dWlpqKPGjZQDYx1No+VuS+AZVzT7RWmpqKPWnZQLYxxNo8VYPVe0+0Vpqaij1p2MASRgIu8X8wiwOq8prsrTE1FHzVtYYQh/td4v5ZJfYFVuwq1Ko613Fxlaiq6qG0LQxDxv8j7tSwyB0YC1o7Fqr+12tRWdFHbFsYg4n+V92sZBFidV39rtamt6KLGPQwxxPsiDxdrz9wXwLKtekvEqa3oosY9DDHE+yIPF2tvJ2DtV6z6LRGnxqKDmvcwxBDnizxerLmuwGpYh9oVtxr2RJtaiw5q3sMQQ5wv8nix9nYC1m7FatkTaWotOqh9EyMM8b3G5GLtmQMjAmunYjVtiTS1Fh3UvokhhLheZHa11sx9UYG1T7HatkSZWosOMtjFCEI8rzG/WmuA1XVtW6JMrUUHGexiCCGeF5lfrbH9gLVLsRq3RJhaiw4y2MUQQRwvsnS5tnYE1h7Fat0SXWot9JlsYwQgbtdIXK8pe190YO2QrPYtUaXmQp/JNkb44XWN5AVbsudFCdbuyLLYEk1qLvSZbGMEH07XWLliQ3sDa19k2eyIJDUX+mz2MUAPl0usX7K6el0cwEKsaTYbIknNhTyjfQzQw+US65eszkMXNVg7EstoQySpwVBntI0BerhcYv2S1QFW1xltiCQ1GOqMttEfD48rbF2zOg9c5GDtRyyrDVGkBkOd0Tb64+Fxha1rVtcXWLzFmmS0H5LUYKgz2kZ/PDyusHXN6gCr64z2Q5IaDHFW2+iPh8cVNi9amYstgGWW0X5IUoshzmob/e3wuMLmRSvrDSz+YMNjNtuhSS2GOKtt9KfD4QoZV63Lg5YWrwDrMZvt0KQWQ5vdPrrTYX+BnKvWBVhdZ7MbotRkaLPbR3867K+Qc9WqAKvrbHZDlJoMbXb76E+H/RVyrlpVd2DZiKWGxiqTzVClJkOa4T76y2F+hbzL1rRPsNTOmGWxGbLUZkgz3McAOcwvkXfZ8lxkASyzLDZDltoMaYb7GCCH+SXyLlseYPWdxWbIUpuhzHIfA+Qwv0TeZcsDrL6z2AxdajWEWW5jABzWl8i9bnEesrR5BVj3WWyGLrUawiy3MQIO62vkXrcwF1kawbIQS+2MWQZ7IUythjDTfQyAw/gS2dctDLD6zmArlKnV0GW7jwFwGF8i+7qFAVbfGWyFMjUbumz3McAN20sUXLgswOo7g61QpmZDl+0+Rrhhe42CC5fkAwtgmWWwFdLUbsiy3cYIN2yvUXDhknxkASyrDHZCm9oNVcbbGOGG7TUKLlxSn2Dx859vtW+EODUcqoy3MYIN02sUXTk/J1gAy6r2jRCnhkOV9T4GsGF5ibIr5wdYnde+EeLUcKiy3scINiyvUXbl7JxkASyj2vdBnloOUdbbGMGG5TWKLpzffsHah1gG+2DS/wOD1EtKBACiBQAAAABJRU5ErkJggg==\n", + "text/plain": [ + "" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "Image.open('./mhp_extension/demo/demo_multiple_human_parsing.png')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.6" + }, + "pycharm": { + "stem_cell": { + "cell_type": "raw", + "metadata": { + "collapsed": false + }, + "source": [ + "## COCO style annotation transfer" + ] + } + } + }, + "nbformat": 4, + "nbformat_minor": 1 +} diff --git a/preprocess/mhp_extension/demo/demo_global_human_parsing.png b/preprocess/mhp_extension/demo/demo_global_human_parsing.png new file mode 100644 index 0000000000000000000000000000000000000000..afc5b8fee781e0cf5c505b0e99a86849742d9ef0 Binary files /dev/null and b/preprocess/mhp_extension/demo/demo_global_human_parsing.png differ diff --git a/preprocess/mhp_extension/demo/demo_instance_human_mask.png b/preprocess/mhp_extension/demo/demo_instance_human_mask.png new file mode 100644 index 0000000000000000000000000000000000000000..9cd5b1b2223d2bd302e347806a42e6aa09c2c5b7 Binary files /dev/null and b/preprocess/mhp_extension/demo/demo_instance_human_mask.png differ diff --git a/preprocess/mhp_extension/demo/demo_multiple_human_parsing.png b/preprocess/mhp_extension/demo/demo_multiple_human_parsing.png new file mode 100644 index 0000000000000000000000000000000000000000..28875d8dc700464b3841cfb79d7e10428684d69a Binary files /dev/null and b/preprocess/mhp_extension/demo/demo_multiple_human_parsing.png differ diff --git a/preprocess/mhp_extension/detectron2/.circleci/config.yml b/preprocess/mhp_extension/detectron2/.circleci/config.yml new file mode 100644 index 0000000000000000000000000000000000000000..6c605889cf4ac01d3ed63f62d65a0d6ae1f6edd0 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/.circleci/config.yml @@ -0,0 +1,179 @@ +# Python CircleCI 2.0 configuration file +# +# Check https://circleci.com/docs/2.0/language-python/ for more details +# +version: 2 + +# ------------------------------------------------------------------------------------- +# Environments to run the jobs in +# ------------------------------------------------------------------------------------- +cpu: &cpu + docker: + - image: circleci/python:3.6.8-stretch + resource_class: medium + +gpu: &gpu + machine: + image: ubuntu-1604:201903-01 + docker_layer_caching: true + resource_class: gpu.small + +# ------------------------------------------------------------------------------------- +# Re-usable commands +# ------------------------------------------------------------------------------------- +install_python: &install_python + - run: + name: Install Python + working_directory: ~/ + command: | + pyenv install 3.6.1 + pyenv global 3.6.1 + +setup_venv: &setup_venv + - run: + name: Setup Virtual Env + working_directory: ~/ + command: | + python -m venv ~/venv + echo ". ~/venv/bin/activate" >> $BASH_ENV + . ~/venv/bin/activate + python --version + which python + which pip + pip install --upgrade pip + +install_dep: &install_dep + - run: + name: Install Dependencies + command: | + pip install --progress-bar off -U 'git+https://github.com/facebookresearch/fvcore' + pip install --progress-bar off cython opencv-python + pip install --progress-bar off 'git+https://github.com/cocodataset/cocoapi.git#subdirectory=PythonAPI' + pip install --progress-bar off torch torchvision + +install_detectron2: &install_detectron2 + - run: + name: Install Detectron2 + command: | + gcc --version + pip install -U --progress-bar off -e .[dev] + python -m detectron2.utils.collect_env + +install_nvidia_driver: &install_nvidia_driver + - run: + name: Install nvidia driver + working_directory: ~/ + command: | + wget -q 'https://s3.amazonaws.com/ossci-linux/nvidia_driver/NVIDIA-Linux-x86_64-430.40.run' + sudo /bin/bash ./NVIDIA-Linux-x86_64-430.40.run -s --no-drm + nvidia-smi + +run_unittests: &run_unittests + - run: + name: Run Unit Tests + command: | + python -m unittest discover -v -s tests + +# ------------------------------------------------------------------------------------- +# Jobs to run +# ------------------------------------------------------------------------------------- +jobs: + cpu_tests: + <<: *cpu + + working_directory: ~/detectron2 + + steps: + - checkout + - <<: *setup_venv + + # Cache the venv directory that contains dependencies + - restore_cache: + keys: + - cache-key-{{ .Branch }}-ID-20200425 + + - <<: *install_dep + + - save_cache: + paths: + - ~/venv + key: cache-key-{{ .Branch }}-ID-20200425 + + - <<: *install_detectron2 + + - run: + name: isort + command: | + isort -c -sp . + - run: + name: black + command: | + black --check -l 100 . + - run: + name: flake8 + command: | + flake8 . + + - <<: *run_unittests + + gpu_tests: + <<: *gpu + + working_directory: ~/detectron2 + + steps: + - checkout + - <<: *install_nvidia_driver + + - run: + name: Install nvidia-docker + working_directory: ~/ + command: | + curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey | sudo apt-key add - + distribution=$(. /etc/os-release;echo $ID$VERSION_ID) + curl -s -L https://nvidia.github.io/nvidia-docker/$distribution/nvidia-docker.list | \ + sudo tee /etc/apt/sources.list.d/nvidia-docker.list + sudo apt-get update && sudo apt-get install -y nvidia-docker2 + # reload the docker daemon configuration + sudo pkill -SIGHUP dockerd + + - run: + name: Launch docker + working_directory: ~/detectron2/docker + command: | + nvidia-docker build -t detectron2:v0 -f Dockerfile-circleci . + nvidia-docker run -itd --name d2 detectron2:v0 + docker exec -it d2 nvidia-smi + + - run: + name: Build Detectron2 + command: | + docker exec -it d2 pip install 'git+https://github.com/facebookresearch/fvcore' + docker cp ~/detectron2 d2:/detectron2 + # This will build d2 for the target GPU arch only + docker exec -it d2 pip install -e /detectron2 + docker exec -it d2 python3 -m detectron2.utils.collect_env + docker exec -it d2 python3 -c 'import torch; assert(torch.cuda.is_available())' + + - run: + name: Run Unit Tests + command: | + docker exec -e CIRCLECI=true -it d2 python3 -m unittest discover -v -s /detectron2/tests + +workflows: + version: 2 + regular_test: + jobs: + - cpu_tests + - gpu_tests + + #nightly_test: + #jobs: + #- gpu_tests + #triggers: + #- schedule: + #cron: "0 0 * * *" + #filters: + #branches: + #only: + #- master diff --git a/preprocess/mhp_extension/detectron2/.clang-format b/preprocess/mhp_extension/detectron2/.clang-format new file mode 100644 index 0000000000000000000000000000000000000000..a757d4fff0c2f065d7d51719b52aef35ec48d04e --- /dev/null +++ b/preprocess/mhp_extension/detectron2/.clang-format @@ -0,0 +1,85 @@ +AccessModifierOffset: -1 +AlignAfterOpenBracket: AlwaysBreak +AlignConsecutiveAssignments: false +AlignConsecutiveDeclarations: false +AlignEscapedNewlinesLeft: true +AlignOperands: false +AlignTrailingComments: false +AllowAllParametersOfDeclarationOnNextLine: false +AllowShortBlocksOnASingleLine: false +AllowShortCaseLabelsOnASingleLine: false +AllowShortFunctionsOnASingleLine: Empty +AllowShortIfStatementsOnASingleLine: false +AllowShortLoopsOnASingleLine: false +AlwaysBreakAfterReturnType: None +AlwaysBreakBeforeMultilineStrings: true +AlwaysBreakTemplateDeclarations: true +BinPackArguments: false +BinPackParameters: false +BraceWrapping: + AfterClass: false + AfterControlStatement: false + AfterEnum: false + AfterFunction: false + AfterNamespace: false + AfterObjCDeclaration: false + AfterStruct: false + AfterUnion: false + BeforeCatch: false + BeforeElse: false + IndentBraces: false +BreakBeforeBinaryOperators: None +BreakBeforeBraces: Attach +BreakBeforeTernaryOperators: true +BreakConstructorInitializersBeforeComma: false +BreakAfterJavaFieldAnnotations: false +BreakStringLiterals: false +ColumnLimit: 80 +CommentPragmas: '^ IWYU pragma:' +ConstructorInitializerAllOnOneLineOrOnePerLine: true +ConstructorInitializerIndentWidth: 4 +ContinuationIndentWidth: 4 +Cpp11BracedListStyle: true +DerivePointerAlignment: false +DisableFormat: false +ForEachMacros: [ FOR_EACH, FOR_EACH_ENUMERATE, FOR_EACH_KV, FOR_EACH_R, FOR_EACH_RANGE, ] +IncludeCategories: + - Regex: '^<.*\.h(pp)?>' + Priority: 1 + - Regex: '^<.*' + Priority: 2 + - Regex: '.*' + Priority: 3 +IndentCaseLabels: true +IndentWidth: 2 +IndentWrappedFunctionNames: false +KeepEmptyLinesAtTheStartOfBlocks: false +MacroBlockBegin: '' +MacroBlockEnd: '' +MaxEmptyLinesToKeep: 1 +NamespaceIndentation: None +ObjCBlockIndentWidth: 2 +ObjCSpaceAfterProperty: false +ObjCSpaceBeforeProtocolList: false +PenaltyBreakBeforeFirstCallParameter: 1 +PenaltyBreakComment: 300 +PenaltyBreakFirstLessLess: 120 +PenaltyBreakString: 1000 +PenaltyExcessCharacter: 1000000 +PenaltyReturnTypeOnItsOwnLine: 200 +PointerAlignment: Left +ReflowComments: true +SortIncludes: true +SpaceAfterCStyleCast: false +SpaceBeforeAssignmentOperators: true +SpaceBeforeParens: ControlStatements +SpaceInEmptyParentheses: false +SpacesBeforeTrailingComments: 1 +SpacesInAngles: false +SpacesInContainerLiterals: true +SpacesInCStyleCastParentheses: false +SpacesInParentheses: false +SpacesInSquareBrackets: false +Standard: Cpp11 +TabWidth: 8 +UseTab: Never diff --git a/preprocess/mhp_extension/detectron2/.flake8 b/preprocess/mhp_extension/detectron2/.flake8 new file mode 100644 index 0000000000000000000000000000000000000000..0cc61b77a7e7005b3499394c36288dc8f3bcad39 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/.flake8 @@ -0,0 +1,9 @@ +# This is an example .flake8 config, used when developing *Black* itself. +# Keep in sync with setup.cfg which is used for source packages. + +[flake8] +ignore = W503, E203, E221, C901, C408, E741 +max-line-length = 100 +max-complexity = 18 +select = B,C,E,F,W,T4,B9 +exclude = build,__init__.py diff --git a/preprocess/mhp_extension/detectron2/.github/CODE_OF_CONDUCT.md b/preprocess/mhp_extension/detectron2/.github/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000000000000000000000000000000000..0f7ad8bfc173eac554f0b6ef7c684861e8014bbe --- /dev/null +++ b/preprocess/mhp_extension/detectron2/.github/CODE_OF_CONDUCT.md @@ -0,0 +1,5 @@ +# Code of Conduct + +Facebook has adopted a Code of Conduct that we expect project participants to adhere to. +Please read the [full text](https://code.fb.com/codeofconduct/) +so that you can understand what actions will and will not be tolerated. diff --git a/preprocess/mhp_extension/detectron2/.github/CONTRIBUTING.md b/preprocess/mhp_extension/detectron2/.github/CONTRIBUTING.md new file mode 100644 index 0000000000000000000000000000000000000000..81936dfedb495dd5cd21da2bfcf9819b97ed1dff --- /dev/null +++ b/preprocess/mhp_extension/detectron2/.github/CONTRIBUTING.md @@ -0,0 +1,49 @@ +# Contributing to detectron2 + +## Issues +We use GitHub issues to track public bugs and questions. +Please make sure to follow one of the +[issue templates](https://github.com/facebookresearch/detectron2/issues/new/choose) +when reporting any issues. + +Facebook has a [bounty program](https://www.facebook.com/whitehat/) for the safe +disclosure of security bugs. In those cases, please go through the process +outlined on that page and do not file a public issue. + +## Pull Requests +We actively welcome your pull requests. + +However, if you're adding any significant features (e.g. > 50 lines), please +make sure to have a corresponding issue to discuss your motivation and proposals, +before sending a PR. We do not always accept new features, and we take the following +factors into consideration: + +1. Whether the same feature can be achieved without modifying detectron2. +Detectron2 is designed so that you can implement many extensions from the outside, e.g. +those in [projects](https://github.com/facebookresearch/detectron2/tree/master/projects). +If some part is not as extensible, you can also bring up the issue to make it more extensible. +2. Whether the feature is potentially useful to a large audience, or only to a small portion of users. +3. Whether the proposed solution has a good design / interface. +4. Whether the proposed solution adds extra mental/practical overhead to users who don't + need such feature. +5. Whether the proposed solution breaks existing APIs. + +When sending a PR, please do: + +1. If a PR contains multiple orthogonal changes, split it to several PRs. +2. If you've added code that should be tested, add tests. +3. For PRs that need experiments (e.g. adding a new model or new methods), + you don't need to update model zoo, but do provide experiment results in the description of the PR. +4. If APIs are changed, update the documentation. +5. Make sure your code lints with `./dev/linter.sh`. + + +## Contributor License Agreement ("CLA") +In order to accept your pull request, we need you to submit a CLA. You only need +to do this once to work on any of Facebook's open source projects. + +Complete your CLA here: + +## License +By contributing to detectron2, you agree that your contributions will be licensed +under the LICENSE file in the root directory of this source tree. diff --git a/preprocess/mhp_extension/detectron2/.github/Detectron2-Logo-Horz.svg b/preprocess/mhp_extension/detectron2/.github/Detectron2-Logo-Horz.svg new file mode 100644 index 0000000000000000000000000000000000000000..eb2d643ddd940cd8bdb5eaad093029969ff2364c --- /dev/null +++ b/preprocess/mhp_extension/detectron2/.github/Detectron2-Logo-Horz.svg @@ -0,0 +1 @@ +Detectron2-Logo-Horz \ No newline at end of file diff --git a/preprocess/mhp_extension/detectron2/.github/ISSUE_TEMPLATE.md b/preprocess/mhp_extension/detectron2/.github/ISSUE_TEMPLATE.md new file mode 100644 index 0000000000000000000000000000000000000000..5e8aaa2d3722e7e73a3d94b2b7dfc4f751d7a240 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/.github/ISSUE_TEMPLATE.md @@ -0,0 +1,5 @@ + +Please select an issue template from +https://github.com/facebookresearch/detectron2/issues/new/choose . + +Otherwise your issue will be closed. diff --git a/preprocess/mhp_extension/detectron2/.github/ISSUE_TEMPLATE/bugs.md b/preprocess/mhp_extension/detectron2/.github/ISSUE_TEMPLATE/bugs.md new file mode 100644 index 0000000000000000000000000000000000000000..52d299886a457480d27c54a27734a704786a1d28 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/.github/ISSUE_TEMPLATE/bugs.md @@ -0,0 +1,36 @@ +--- +name: "๐Ÿ› Bugs" +about: Report bugs in detectron2 +title: Please read & provide the following + +--- + +## Instructions To Reproduce the ๐Ÿ› Bug: + +1. what changes you made (`git diff`) or what code you wrote +``` + +``` +2. what exact command you run: +3. what you observed (including __full logs__): +``` + +``` +4. please simplify the steps as much as possible so they do not require additional resources to + run, such as a private dataset. + +## Expected behavior: + +If there are no obvious error in "what you observed" provided above, +please tell us the expected behavior. + +## Environment: + +Provide your environment information using the following command: +``` +wget -nc -q https://github.com/facebookresearch/detectron2/raw/master/detectron2/utils/collect_env.py && python collect_env.py +``` + +If your issue looks like an installation issue / environment issue, +please first try to solve it yourself with the instructions in +https://detectron2.readthedocs.io/tutorials/install.html#common-installation-issues diff --git a/preprocess/mhp_extension/detectron2/.github/ISSUE_TEMPLATE/config.yml b/preprocess/mhp_extension/detectron2/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 0000000000000000000000000000000000000000..c19e2490a71893c516b2bd54b887399493fadcd4 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,9 @@ +# require an issue template to be chosen +blank_issues_enabled: false + +# Unexpected behaviors & bugs are split to two templates. +# When they are one template, users think "it's not a bug" and don't choose the template. +# +# But the file name is still "unexpected-problems-bugs.md" so that old references +# to this issue template still works. +# It's ok since this template should be a superset of "bugs.md" (unexpected behaviors is a superset of bugs) diff --git a/preprocess/mhp_extension/detectron2/.github/ISSUE_TEMPLATE/feature-request.md b/preprocess/mhp_extension/detectron2/.github/ISSUE_TEMPLATE/feature-request.md new file mode 100644 index 0000000000000000000000000000000000000000..dd69a33478c85068cdd7b8b90161f97cc55c1621 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/.github/ISSUE_TEMPLATE/feature-request.md @@ -0,0 +1,31 @@ +--- +name: "\U0001F680Feature Request" +about: Submit a proposal/request for a new detectron2 feature + +--- + +## ๐Ÿš€ Feature +A clear and concise description of the feature proposal. + + +## Motivation & Examples + +Tell us why the feature is useful. + +Describe what the feature would look like, if it is implemented. +Best demonstrated using **code examples** in addition to words. + +## Note + +We only consider adding new features if they are relevant to many users. + +If you request implementation of research papers -- +we only consider papers that have enough significance and prevalance in the object detection field. + +We do not take requests for most projects in the `projects/` directory, +because they are research code release that is mainly for other researchers to reproduce results. + +Instead of adding features inside detectron2, +you can implement many features by [extending detectron2](https://detectron2.readthedocs.io/tutorials/extend.html). +The [projects/](https://github.com/facebookresearch/detectron2/tree/master/projects/) directory contains many of such examples. + diff --git a/preprocess/mhp_extension/detectron2/.github/ISSUE_TEMPLATE/questions-help-support.md b/preprocess/mhp_extension/detectron2/.github/ISSUE_TEMPLATE/questions-help-support.md new file mode 100644 index 0000000000000000000000000000000000000000..081156136b709b1e0ec4d27404b9cb8fa9ba1d27 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/.github/ISSUE_TEMPLATE/questions-help-support.md @@ -0,0 +1,26 @@ +--- +name: "โ“How to do something?" +about: How to do something using detectron2? What does an API do? + +--- + +## โ“ How to do something using detectron2 + +Describe what you want to do, including: +1. what inputs you will provide, if any: +2. what outputs you are expecting: + +## โ“ What does an API do and how to use it? +Please link to which API or documentation you're asking about from +https://detectron2.readthedocs.io/ + + +NOTE: + +1. Only general answers are provided. + If you want to ask about "why X did not work", please use the + [Unexpected behaviors](https://github.com/facebookresearch/detectron2/issues/new/choose) issue template. + +2. About how to implement new models / new dataloader / new training logic, etc., check documentation first. + +3. We do not answer general machine learning / computer vision questions that are not specific to detectron2, such as how a model works, how to improve your training/make it converge, or what algorithm/methods can be used to achieve X. diff --git a/preprocess/mhp_extension/detectron2/.github/ISSUE_TEMPLATE/unexpected-problems-bugs.md b/preprocess/mhp_extension/detectron2/.github/ISSUE_TEMPLATE/unexpected-problems-bugs.md new file mode 100644 index 0000000000000000000000000000000000000000..bafee7a1a3897903d26e68001d3d3d2b7686015b --- /dev/null +++ b/preprocess/mhp_extension/detectron2/.github/ISSUE_TEMPLATE/unexpected-problems-bugs.md @@ -0,0 +1,45 @@ +--- +name: "Unexpected behaviors" +about: Run into unexpected behaviors when using detectron2 +title: Please read & provide the following + +--- + +If you do not know the root cause of the problem, and wish someone to help you, please +post according to this template: + +## Instructions To Reproduce the Issue: + +1. what changes you made (`git diff`) or what code you wrote +``` + +``` +2. what exact command you run: +3. what you observed (including __full logs__): +``` + +``` +4. please simplify the steps as much as possible so they do not require additional resources to + run, such as a private dataset. + +## Expected behavior: + +If there are no obvious error in "what you observed" provided above, +please tell us the expected behavior. + +If you expect the model to converge / work better, note that we do not give suggestions +on how to train a new model. +Only in one of the two conditions we will help with it: +(1) You're unable to reproduce the results in detectron2 model zoo. +(2) It indicates a detectron2 bug. + +## Environment: + +Provide your environment information using the following command: +``` +wget -nc -q https://github.com/facebookresearch/detectron2/raw/master/detectron2/utils/collect_env.py && python collect_env.py +``` + +If your issue looks like an installation issue / environment issue, +please first try to solve it yourself with the instructions in +https://detectron2.readthedocs.io/tutorials/install.html#common-installation-issues diff --git a/preprocess/mhp_extension/detectron2/.github/pull_request_template.md b/preprocess/mhp_extension/detectron2/.github/pull_request_template.md new file mode 100644 index 0000000000000000000000000000000000000000..4ff5ea51776ff27b3e794e366a92a455e2f06a01 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/.github/pull_request_template.md @@ -0,0 +1,9 @@ +Thanks for your contribution! + +If you're sending a large PR (e.g., >50 lines), +please open an issue first about the feature / bug, and indicate how you want to contribute. + +Before submitting a PR, please run `dev/linter.sh` to lint the code. + +See https://detectron2.readthedocs.io/notes/contributing.html#pull-requests +about how we handle PRs. diff --git a/preprocess/mhp_extension/detectron2/.gitignore b/preprocess/mhp_extension/detectron2/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..e85df4cf713e2c4a6fc02885f2b2ff3d0f104763 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/.gitignore @@ -0,0 +1,46 @@ +# output dir +output +instant_test_output +inference_test_output + + +*.jpg +*.png +*.txt +*.json +*.diff + +# compilation and distribution +__pycache__ +_ext +*.pyc +*.so +detectron2.egg-info/ +build/ +dist/ +wheels/ + +# pytorch/python/numpy formats +*.pth +*.pkl +*.npy + +# ipython/jupyter notebooks +*.ipynb +**/.ipynb_checkpoints/ + +# Editor temporaries +*.swn +*.swo +*.swp +*~ + +# editor settings +.idea +.vscode + +# project dirs +/detectron2/model_zoo/configs +/datasets +/projects/*/datasets +/models diff --git a/preprocess/mhp_extension/detectron2/GETTING_STARTED.md b/preprocess/mhp_extension/detectron2/GETTING_STARTED.md new file mode 100644 index 0000000000000000000000000000000000000000..acaf13f02c906b45ffc2f49ee5a0ce01d82b4786 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/GETTING_STARTED.md @@ -0,0 +1,79 @@ +## Getting Started with Detectron2 + +This document provides a brief intro of the usage of builtin command-line tools in detectron2. + +For a tutorial that involves actual coding with the API, +see our [Colab Notebook](https://colab.research.google.com/drive/16jcaJoc6bCFAQ96jDe2HwtXj7BMD_-m5) +which covers how to run inference with an +existing model, and how to train a builtin model on a custom dataset. + +For more advanced tutorials, refer to our [documentation](https://detectron2.readthedocs.io/tutorials/extend.html). + + +### Inference Demo with Pre-trained Models + +1. Pick a model and its config file from + [model zoo](MODEL_ZOO.md), + for example, `mask_rcnn_R_50_FPN_3x.yaml`. +2. We provide `demo.py` that is able to run builtin standard models. Run it with: +``` +cd demo/ +python demo.py --config-file ../configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml \ + --input input1.jpg input2.jpg \ + [--other-options] + --opts MODEL.WEIGHTS detectron2://COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x/137849600/model_final_f10217.pkl +``` +The configs are made for training, therefore we need to specify `MODEL.WEIGHTS` to a model from model zoo for evaluation. +This command will run the inference and show visualizations in an OpenCV window. + +For details of the command line arguments, see `demo.py -h` or look at its source code +to understand its behavior. Some common arguments are: +* To run __on your webcam__, replace `--input files` with `--webcam`. +* To run __on a video__, replace `--input files` with `--video-input video.mp4`. +* To run __on cpu__, add `MODEL.DEVICE cpu` after `--opts`. +* To save outputs to a directory (for images) or a file (for webcam or video), use `--output`. + + +### Training & Evaluation in Command Line + +We provide a script in "tools/{,plain_}train_net.py", that is made to train +all the configs provided in detectron2. +You may want to use it as a reference to write your own training script. + +To train a model with "train_net.py", first +setup the corresponding datasets following +[datasets/README.md](./datasets/README.md), +then run: +``` +cd tools/ +./train_net.py --num-gpus 8 \ + --config-file ../configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml +``` + +The configs are made for 8-GPU training. +To train on 1 GPU, you may need to [change some parameters](https://arxiv.org/abs/1706.02677), e.g.: +``` +./train_net.py \ + --config-file ../configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml \ + --num-gpus 1 SOLVER.IMS_PER_BATCH 2 SOLVER.BASE_LR 0.0025 +``` + +For most models, CPU training is not supported. + +To evaluate a model's performance, use +``` +./train_net.py \ + --config-file ../configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml \ + --eval-only MODEL.WEIGHTS /path/to/checkpoint_file +``` +For more options, see `./train_net.py -h`. + +### Use Detectron2 APIs in Your Code + +See our [Colab Notebook](https://colab.research.google.com/drive/16jcaJoc6bCFAQ96jDe2HwtXj7BMD_-m5) +to learn how to use detectron2 APIs to: +1. run inference with an existing model +2. train a builtin model on a custom dataset + +See [detectron2/projects](https://github.com/facebookresearch/detectron2/tree/master/projects) +for more ways to build your project on detectron2. diff --git a/preprocess/mhp_extension/detectron2/INSTALL.md b/preprocess/mhp_extension/detectron2/INSTALL.md new file mode 100644 index 0000000000000000000000000000000000000000..3985f8ae4f5ecde26b310b4ab01c49b922f742e9 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/INSTALL.md @@ -0,0 +1,184 @@ +## Installation + +Our [Colab Notebook](https://colab.research.google.com/drive/16jcaJoc6bCFAQ96jDe2HwtXj7BMD_-m5) +has step-by-step instructions that install detectron2. +The [Dockerfile](docker) +also installs detectron2 with a few simple commands. + +### Requirements +- Linux or macOS with Python โ‰ฅ 3.6 +- PyTorch โ‰ฅ 1.4 +- [torchvision](https://github.com/pytorch/vision/) that matches the PyTorch installation. + You can install them together at [pytorch.org](https://pytorch.org) to make sure of this. +- OpenCV, optional, needed by demo and visualization +- pycocotools: `pip install cython; pip install -U 'git+https://github.com/cocodataset/cocoapi.git#subdirectory=PythonAPI'` + + +### Build Detectron2 from Source + +gcc & g++ โ‰ฅ 5 are required. [ninja](https://ninja-build.org/) is recommended for faster build. +After having them, run: +``` +python -m pip install 'git+https://github.com/facebookresearch/detectron2.git' +# (add --user if you don't have permission) + +# Or, to install it from a local clone: +git clone https://github.com/facebookresearch/detectron2.git +python -m pip install -e detectron2 + +# Or if you are on macOS +# CC=clang CXX=clang++ python -m pip install -e . +``` + +To __rebuild__ detectron2 that's built from a local clone, use `rm -rf build/ **/*.so` to clean the +old build first. You often need to rebuild detectron2 after reinstalling PyTorch. + +### Install Pre-Built Detectron2 (Linux only) +``` +# for CUDA 10.1: +python -m pip install detectron2 -f https://dl.fbaipublicfiles.com/detectron2/wheels/cu101/index.html +``` +You can replace cu101 with "cu{100,92}" or "cpu". + +Note that: +1. Such installation has to be used with certain version of official PyTorch release. + See [releases](https://github.com/facebookresearch/detectron2/releases) for requirements. + It will not work with a different version of PyTorch or a non-official build of PyTorch. +2. Such installation is out-of-date w.r.t. master branch of detectron2. It may not be + compatible with the master branch of a research project that uses detectron2 (e.g. those in + [projects](projects) or [meshrcnn](https://github.com/facebookresearch/meshrcnn/)). + +### Common Installation Issues + +If you met issues using the pre-built detectron2, please uninstall it and try building it from source. + +Click each issue for its solutions: + +
+ +Undefined torch/aten/caffe2 symbols, or segmentation fault immediately when running the library. + +
+ +This usually happens when detectron2 or torchvision is not +compiled with the version of PyTorch you're running. + +Pre-built torchvision or detectron2 has to work with the corresponding official release of pytorch. +If the error comes from a pre-built torchvision, uninstall torchvision and pytorch and reinstall them +following [pytorch.org](http://pytorch.org). So the versions will match. + +If the error comes from a pre-built detectron2, check [release notes](https://github.com/facebookresearch/detectron2/releases) +to see the corresponding pytorch version required for each pre-built detectron2. + +If the error comes from detectron2 or torchvision that you built manually from source, +remove files you built (`build/`, `**/*.so`) and rebuild it so it can pick up the version of pytorch currently in your environment. + +If you cannot resolve this problem, please include the output of `gdb -ex "r" -ex "bt" -ex "quit" --args python -m detectron2.utils.collect_env` +in your issue. +
+ +
+ +Undefined C++ symbols (e.g. `GLIBCXX`) or C++ symbols not found. + +
+Usually it's because the library is compiled with a newer C++ compiler but run with an old C++ runtime. + +This often happens with old anaconda. +Try `conda update libgcc`. Then rebuild detectron2. + +The fundamental solution is to run the code with proper C++ runtime. +One way is to use `LD_PRELOAD=/path/to/libstdc++.so`. + +
+ +
+ +"Not compiled with GPU support" or "Detectron2 CUDA Compiler: not available". + +
+CUDA is not found when building detectron2. +You should make sure + +``` +python -c 'import torch; from torch.utils.cpp_extension import CUDA_HOME; print(torch.cuda.is_available(), CUDA_HOME)' +``` + +print valid outputs at the time you build detectron2. + +Most models can run inference (but not training) without GPU support. To use CPUs, set `MODEL.DEVICE='cpu'` in the config. +
+ +
+ +"invalid device function" or "no kernel image is available for execution". + +
+Two possibilities: + +* You build detectron2 with one version of CUDA but run it with a different version. + + To check whether it is the case, + use `python -m detectron2.utils.collect_env` to find out inconsistent CUDA versions. + In the output of this command, you should expect "Detectron2 CUDA Compiler", "CUDA_HOME", "PyTorch built with - CUDA" + to contain cuda libraries of the same version. + + When they are inconsistent, + you need to either install a different build of PyTorch (or build by yourself) + to match your local CUDA installation, or install a different version of CUDA to match PyTorch. + +* Detectron2 or PyTorch/torchvision is not built for the correct GPU architecture (compute compatibility). + + The GPU architecture for PyTorch/detectron2/torchvision is available in the "architecture flags" in + `python -m detectron2.utils.collect_env`. + + The GPU architecture flags of detectron2/torchvision by default matches the GPU model detected + during compilation. This means the compiled code may not work on a different GPU model. + To overwrite the GPU architecture for detectron2/torchvision, use `TORCH_CUDA_ARCH_LIST` environment variable during compilation. + + For example, `export TORCH_CUDA_ARCH_LIST=6.0,7.0` makes it compile for both P100s and V100s. + Visit [developer.nvidia.com/cuda-gpus](https://developer.nvidia.com/cuda-gpus) to find out + the correct compute compatibility number for your device. + +
+ +
+ +Undefined CUDA symbols; cannot open libcudart.so; other nvcc failures. + +
+The version of NVCC you use to build detectron2 or torchvision does +not match the version of CUDA you are running with. +This often happens when using anaconda's CUDA runtime. + +Use `python -m detectron2.utils.collect_env` to find out inconsistent CUDA versions. +In the output of this command, you should expect "Detectron2 CUDA Compiler", "CUDA_HOME", "PyTorch built with - CUDA" +to contain cuda libraries of the same version. + +When they are inconsistent, +you need to either install a different build of PyTorch (or build by yourself) +to match your local CUDA installation, or install a different version of CUDA to match PyTorch. +
+ + +
+ +"ImportError: cannot import name '_C'". + +
+Please build and install detectron2 following the instructions above. + +If you are running code from detectron2's root directory, `cd` to a different one. +Otherwise you may not import the code that you installed. +
+ +
+ +ONNX conversion segfault after some "TraceWarning". + +
+The ONNX package is compiled with too old compiler. + +Please build and install ONNX from its source code using a compiler +whose version is closer to what's used by PyTorch (available in `torch.__config__.show()`). +
diff --git a/preprocess/mhp_extension/detectron2/LICENSE b/preprocess/mhp_extension/detectron2/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..d4836895578c791dffd78d07d83a72a961e270a4 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/LICENSE @@ -0,0 +1,201 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, +and distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by +the copyright owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all +other entities that control, are controlled by, or are under common +control with that entity. For the purposes of this definition, +"control" means (i) the power, direct or indirect, to cause the +direction or management of such entity, whether by contract or +otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity +exercising permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, +including but not limited to software source code, documentation +source, and configuration files. + +"Object" form shall mean any form resulting from mechanical +transformation or translation of a Source form, including but +not limited to compiled object code, generated documentation, +and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or +Object form, made available under the License, as indicated by a +copyright notice that is included in or attached to the work +(an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object +form, that is based on (or derived from) the Work and for which the +editorial revisions, annotations, elaborations, or other modifications +represent, as a whole, an original work of authorship. For the purposes +of this License, Derivative Works shall not include works that remain +separable from, or merely link (or bind by name) to the interfaces of, +the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including +the original version of the Work and any modifications or additions +to that Work or Derivative Works thereof, that is intentionally +submitted to Licensor for inclusion in the Work by the copyright owner +or by an individual or Legal Entity authorized to submit on behalf of +the copyright owner. For the purposes of this definition, "submitted" +means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, +and issue tracking systems that are managed by, or on behalf of, the +Licensor for the purpose of discussing and improving the Work, but +excluding communication that is conspicuously marked or otherwise +designated in writing by the copyright owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity +on behalf of whom a Contribution has been received by Licensor and +subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of +this License, each Contributor hereby grants to You a perpetual, +worldwide, non-exclusive, no-charge, royalty-free, irrevocable +copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the +Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of +this License, each Contributor hereby grants to You a perpetual, +worldwide, non-exclusive, no-charge, royalty-free, irrevocable +(except as stated in this section) patent license to make, have made, +use, offer to sell, sell, import, and otherwise transfer the Work, +where such license applies only to those patent claims licensable +by such Contributor that are necessarily infringed by their +Contribution(s) alone or by combination of their Contribution(s) +with the Work to which such Contribution(s) was submitted. If You +institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work +or a Contribution incorporated within the Work constitutes direct +or contributory patent infringement, then any patent licenses +granted to You under this License for that Work shall terminate +as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the +Work or Derivative Works thereof in any medium, with or without +modifications, and in Source or Object form, provided that You +meet the following conditions: + +(a) You must give any other recipients of the Work or +Derivative Works a copy of this License; and + +(b) You must cause any modified files to carry prominent notices +stating that You changed the files; and + +(c) You must retain, in the Source form of any Derivative Works +that You distribute, all copyright, patent, trademark, and +attribution notices from the Source form of the Work, +excluding those notices that do not pertain to any part of +the Derivative Works; and + +(d) If the Work includes a "NOTICE" text file as part of its +distribution, then any Derivative Works that You distribute must +include a readable copy of the attribution notices contained +within such NOTICE file, excluding those notices that do not +pertain to any part of the Derivative Works, in at least one +of the following places: within a NOTICE text file distributed +as part of the Derivative Works; within the Source form or +documentation, if provided along with the Derivative Works; or, +within a display generated by the Derivative Works, if and +wherever such third-party notices normally appear. The contents +of the NOTICE file are for informational purposes only and +do not modify the License. You may add Your own attribution +notices within Derivative Works that You distribute, alongside +or as an addendum to the NOTICE text from the Work, provided +that such additional attribution notices cannot be construed +as modifying the License. + +You may add Your own copyright statement to Your modifications and +may provide additional or different license terms and conditions +for use, reproduction, or distribution of Your modifications, or +for any such Derivative Works as a whole, provided Your use, +reproduction, and distribution of the Work otherwise complies with +the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, +any Contribution intentionally submitted for inclusion in the Work +by You to the Licensor shall be under the terms and conditions of +this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify +the terms of any separate license agreement you may have executed +with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade +names, trademarks, service marks, or product names of the Licensor, +except as required for reasonable and customary use in describing the +origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or +agreed to in writing, Licensor provides the Work (and each +Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +implied, including, without limitation, any warranties or conditions +of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A +PARTICULAR PURPOSE. You are solely responsible for determining the +appropriateness of using or redistributing the Work and assume any +risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, +whether in tort (including negligence), contract, or otherwise, +unless required by applicable law (such as deliberate and grossly +negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, +incidental, or consequential damages of any character arising as a +result of this License or out of the use or inability to use the +Work (including but not limited to damages for loss of goodwill, +work stoppage, computer failure or malfunction, or any and all +other commercial damages or losses), even if such Contributor +has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing +the Work or Derivative Works thereof, You may choose to offer, +and charge a fee for, acceptance of support, warranty, indemnity, +or other liability obligations and/or rights consistent with this +License. However, in accepting such obligations, You may act only +on Your own behalf and on Your sole responsibility, not on behalf +of any other Contributor, and only if You agree to indemnify, +defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason +of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + +To apply the Apache License to your work, attach the following +boilerplate notice, with the fields enclosed by brackets "[]" +replaced with your own identifying information. (Don't include +the brackets!) The text should be enclosed in the appropriate +comment syntax for the file format. We also recommend that a +file or class name and description of purpose be included on the +same "printed page" as the copyright notice for easier +identification within third-party archives. + +Copyright 2019 - present, Facebook, Inc + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/preprocess/mhp_extension/detectron2/MODEL_ZOO.md b/preprocess/mhp_extension/detectron2/MODEL_ZOO.md new file mode 100644 index 0000000000000000000000000000000000000000..07b81ffffa37d97b10f8d39f934b9f62bcb51cc1 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/MODEL_ZOO.md @@ -0,0 +1,903 @@ +# Detectron2 Model Zoo and Baselines + +## Introduction + +This file documents a large collection of baselines trained +with detectron2 in Sep-Oct, 2019. +All numbers were obtained on [Big Basin](https://engineering.fb.com/data-center-engineering/introducing-big-basin-our-next-generation-ai-hardware/) +servers with 8 NVIDIA V100 GPUs & NVLink. The software in use were PyTorch 1.3, CUDA 9.2, cuDNN 7.4.2 or 7.6.3. +You can access these models from code using [detectron2.model_zoo](https://detectron2.readthedocs.io/modules/model_zoo.html) APIs. + +In addition to these official baseline models, you can find more models in [projects/](projects/). + +#### How to Read the Tables +* The "Name" column contains a link to the config file. Running `tools/train_net.py` with this config file + and 8 GPUs will reproduce the model. +* Training speed is averaged across the entire training. + We keep updating the speed with latest version of detectron2/pytorch/etc., + so they might be different from the `metrics` file. + Training speed for multi-machine jobs is not provided. +* Inference speed is measured by `tools/train_net.py --eval-only`, or [inference_on_dataset()](https://detectron2.readthedocs.io/modules/evaluation.html#detectron2.evaluation.inference_on_dataset), + with batch size 1 in detectron2 directly. + Measuring it with your own code will likely introduce other overhead. + Actual deployment in production should in general be faster than the given inference + speed due to more optimizations. +* The *model id* column is provided for ease of reference. + To check downloaded file integrity, any model on this page contains its md5 prefix in its file name. +* Training curves and other statistics can be found in `metrics` for each model. + +#### Common Settings for COCO Models +* All COCO models were trained on `train2017` and evaluated on `val2017`. +* The default settings are __not directly comparable__ with Detectron's standard settings. + For example, our default training data augmentation uses scale jittering in addition to horizontal flipping. + + To make fair comparisons with Detectron's settings, see + [Detectron1-Comparisons](configs/Detectron1-Comparisons/) for accuracy comparison, + and [benchmarks](https://detectron2.readthedocs.io/notes/benchmarks.html) + for speed comparison. +* For Faster/Mask R-CNN, we provide baselines based on __3 different backbone combinations__: + * __FPN__: Use a ResNet+FPN backbone with standard conv and FC heads for mask and box prediction, + respectively. It obtains the best + speed/accuracy tradeoff, but the other two are still useful for research. + * __C4__: Use a ResNet conv4 backbone with conv5 head. The original baseline in the Faster R-CNN paper. + * __DC5__ (Dilated-C5): Use a ResNet conv5 backbone with dilations in conv5, and standard conv and FC heads + for mask and box prediction, respectively. + This is used by the Deformable ConvNet paper. +* Most models are trained with the 3x schedule (~37 COCO epochs). + Although 1x models are heavily under-trained, we provide some ResNet-50 models with the 1x (~12 COCO epochs) + training schedule for comparison when doing quick research iteration. + +#### ImageNet Pretrained Models + +We provide backbone models pretrained on ImageNet-1k dataset. +These models have __different__ format from those provided in Detectron: we do not fuse BatchNorm into an affine layer. +* [R-50.pkl](https://dl.fbaipublicfiles.com/detectron2/ImageNetPretrained/MSRA/R-50.pkl): converted copy of [MSRA's original ResNet-50](https://github.com/KaimingHe/deep-residual-networks) model. +* [R-101.pkl](https://dl.fbaipublicfiles.com/detectron2/ImageNetPretrained/MSRA/R-101.pkl): converted copy of [MSRA's original ResNet-101](https://github.com/KaimingHe/deep-residual-networks) model. +* [X-101-32x8d.pkl](https://dl.fbaipublicfiles.com/detectron2/ImageNetPretrained/FAIR/X-101-32x8d.pkl): ResNeXt-101-32x8d model trained with Caffe2 at FB. + +Pretrained models in Detectron's format can still be used. For example: +* [X-152-32x8d-IN5k.pkl](https://dl.fbaipublicfiles.com/detectron/ImageNetPretrained/25093814/X-152-32x8d-IN5k.pkl): + ResNeXt-152-32x8d model trained on ImageNet-5k with Caffe2 at FB (see ResNeXt paper for details on ImageNet-5k). +* [R-50-GN.pkl](https://dl.fbaipublicfiles.com/detectron/ImageNetPretrained/47261647/R-50-GN.pkl): + ResNet-50 with Group Normalization. +* [R-101-GN.pkl](https://dl.fbaipublicfiles.com/detectron/ImageNetPretrained/47592356/R-101-GN.pkl): + ResNet-101 with Group Normalization. + +Torchvision's ResNet models can be used after converted by [this script](tools/convert-torchvision-to-d2.py). + +#### License + +All models available for download through this document are licensed under the +[Creative Commons Attribution-ShareAlike 3.0 license](https://creativecommons.org/licenses/by-sa/3.0/). + +### COCO Object Detection Baselines + +#### Faster R-CNN: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Namelr
sched
train
time
(s/iter)
inference
time
(s/im)
train
mem
(GB)
box
AP
model iddownload
R50-C41x0.5510.1024.835.7137257644model | metrics
R50-DC51x0.3800.0685.037.3137847829model | metrics
R50-FPN1x0.2100.0383.037.9137257794model | metrics
R50-C43x0.5430.1044.838.4137849393model | metrics
R50-DC53x0.3780.0705.039.0137849425model | metrics
R50-FPN3x0.2090.0383.040.2137849458model | metrics
R101-C43x0.6190.1395.941.1138204752model | metrics
R101-DC53x0.4520.0866.140.6138204841model | metrics
R101-FPN3x0.2860.0514.142.0137851257model | metrics
X101-FPN3x0.6380.0986.743.0139173657model | metrics
+ +#### RetinaNet: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Namelr
sched
train
time
(s/iter)
inference
time
(s/im)
train
mem
(GB)
box
AP
model iddownload
R501x0.2000.0553.936.5137593951model | metrics
R503x0.2010.0553.937.9137849486model | metrics
R1013x0.2800.0685.139.9138363263model | metrics
+ +#### RPN & Fast R-CNN: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Namelr
sched
train
time
(s/iter)
inference
time
(s/im)
train
mem
(GB)
box
AP
prop.
AR
model iddownload
RPN R50-C41x0.1300.0341.551.6137258005model | metrics
RPN R50-FPN1x0.1860.0322.758.0137258492model | metrics
Fast R-CNN R50-FPN1x0.1400.0292.637.8137635226model | metrics
+ +### COCO Instance Segmentation Baselines with Mask R-CNN + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Namelr
sched
train
time
(s/iter)
inference
time
(s/im)
train
mem
(GB)
box
AP
mask
AP
model iddownload
R50-C41x0.5840.1105.236.832.2137259246model | metrics
R50-DC51x0.4710.0766.538.334.2137260150model | metrics
R50-FPN1x0.2610.0433.438.635.2137260431model | metrics
R50-C43x0.5750.1115.239.834.4137849525model | metrics
R50-DC53x0.4700.0766.540.035.9137849551model | metrics
R50-FPN3x0.2610.0433.441.037.2137849600model | metrics
R101-C43x0.6520.1456.342.636.7138363239model | metrics
R101-DC53x0.5450.0927.641.937.3138363294model | metrics
R101-FPN3x0.3400.0564.642.938.6138205316model | metrics
X101-FPN3x0.6900.1037.244.339.5139653917model | metrics
+ +### COCO Person Keypoint Detection Baselines with Keypoint R-CNN + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Namelr
sched
train
time
(s/iter)
inference
time
(s/im)
train
mem
(GB)
box
AP
kp.
AP
model iddownload
R50-FPN1x0.3150.0725.053.664.0137261548model | metrics
R50-FPN3x0.3160.0665.055.465.5137849621model | metrics
R101-FPN3x0.3900.0766.156.466.1138363331model | metrics
X101-FPN3x0.7380.1218.757.366.0139686956model | metrics
+ +### COCO Panoptic Segmentation Baselines with Panoptic FPN + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Namelr
sched
train
time
(s/iter)
inference
time
(s/im)
train
mem
(GB)
box
AP
mask
AP
PQmodel iddownload
R50-FPN1x0.3040.0534.837.634.739.4139514544model | metrics
R50-FPN3x0.3020.0534.840.036.541.5139514569model | metrics
R101-FPN3x0.3920.0666.042.438.543.0139514519model | metrics
+ + +### LVIS Instance Segmentation Baselines with Mask R-CNN + +Mask R-CNN baselines on the [LVIS dataset](https://lvisdataset.org), v0.5. +These baselines are described in Table 3(c) of the [LVIS paper](https://arxiv.org/abs/1908.03195). + +NOTE: the 1x schedule here has the same amount of __iterations__ as the COCO 1x baselines. +They are roughly 24 epochs of LVISv0.5 data. +The final results of these configs have large variance across different runs. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Namelr
sched
train
time
(s/iter)
inference
time
(s/im)
train
mem
(GB)
box
AP
mask
AP
model iddownload
R50-FPN1x0.2920.1077.123.624.4144219072model | metrics
R101-FPN1x0.3710.1147.825.625.9144219035model | metrics
X101-FPN1x0.7120.15110.226.727.1144219108model | metrics
+ + + +### Cityscapes & Pascal VOC Baselines + +Simple baselines for +* Mask R-CNN on Cityscapes instance segmentation (initialized from COCO pre-training, then trained on Cityscapes fine annotations only) +* Faster R-CNN on PASCAL VOC object detection (trained on VOC 2007 train+val + VOC 2012 train+val, tested on VOC 2007 using 11-point interpolated AP) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Nametrain
time
(s/iter)
inference
time
(s/im)
train
mem
(GB)
box
AP
box
AP50
mask
AP
model iddownload
R50-FPN, Cityscapes0.2400.0784.436.5142423278model | metrics
R50-C4, VOC0.5370.0814.851.980.3142202221model | metrics
+ + + +### Other Settings + +Ablations for Deformable Conv and Cascade R-CNN: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Namelr
sched
train
time
(s/iter)
inference
time
(s/im)
train
mem
(GB)
box
AP
mask
AP
model iddownload
Baseline R50-FPN1x0.2610.0433.438.635.2137260431model | metrics
Deformable Conv1x0.3420.0483.541.537.5138602867model | metrics
Cascade R-CNN1x0.3170.0524.042.136.4138602847model | metrics
Baseline R50-FPN3x0.2610.0433.441.037.2137849600model | metrics
Deformable Conv3x0.3490.0473.542.738.5144998336model | metrics
Cascade R-CNN3x0.3280.0534.044.338.5144998488model | metrics
+ + +Ablations for normalization methods, and a few models trained from scratch following [Rethinking ImageNet Pre-training](https://arxiv.org/abs/1811.08883). +(Note: The baseline uses `2fc` head while the others use [`4conv1fc` head](https://arxiv.org/abs/1803.08494)) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Namelr
sched
train
time
(s/iter)
inference
time
(s/im)
train
mem
(GB)
box
AP
mask
AP
model iddownload
Baseline R50-FPN3x0.2610.0433.441.037.2137849600model | metrics
GN3x0.3560.0697.342.638.6138602888model | metrics
SyncBN3x0.3710.0535.541.937.8169527823model | metrics
GN (from scratch)3x0.4000.0699.839.936.6138602908model | metrics
GN (from scratch)9xN/A0.0709.843.739.6183808979model | metrics
SyncBN (from scratch)9xN/A0.0557.243.639.3184226666model | metrics
+ + +A few very large models trained for a long time, for demo purposes. They are trained using multiple machines: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Nameinference
time
(s/im)
train
mem
(GB)
box
AP
mask
AP
PQmodel iddownload
Panoptic FPN R1010.10711.447.441.346.1139797668model | metrics
Mask R-CNN X1520.24215.150.244.018131413model | metrics
above + test-time aug.51.945.9
diff --git a/preprocess/mhp_extension/detectron2/README.md b/preprocess/mhp_extension/detectron2/README.md new file mode 100644 index 0000000000000000000000000000000000000000..1fbb95b39ce9e9c0eab83079319a9298fca438b1 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/README.md @@ -0,0 +1,56 @@ + + +Detectron2 is Facebook AI Research's next generation software system +that implements state-of-the-art object detection algorithms. +It is a ground-up rewrite of the previous version, +[Detectron](https://github.com/facebookresearch/Detectron/), +and it originates from [maskrcnn-benchmark](https://github.com/facebookresearch/maskrcnn-benchmark/). + +
+ +
+ +### What's New +* It is powered by the [PyTorch](https://pytorch.org) deep learning framework. +* Includes more features such as panoptic segmentation, densepose, Cascade R-CNN, rotated bounding boxes, etc. +* Can be used as a library to support [different projects](projects/) on top of it. + We'll open source more research projects in this way. +* It [trains much faster](https://detectron2.readthedocs.io/notes/benchmarks.html). + +See our [blog post](https://ai.facebook.com/blog/-detectron2-a-pytorch-based-modular-object-detection-library-/) +to see more demos and learn about detectron2. + +## Installation + +See [INSTALL.md](INSTALL.md). + +## Quick Start + +See [GETTING_STARTED.md](GETTING_STARTED.md), +or the [Colab Notebook](https://colab.research.google.com/drive/16jcaJoc6bCFAQ96jDe2HwtXj7BMD_-m5). + +Learn more at our [documentation](https://detectron2.readthedocs.org). +And see [projects/](projects/) for some projects that are built on top of detectron2. + +## Model Zoo and Baselines + +We provide a large set of baseline results and trained models available for download in the [Detectron2 Model Zoo](MODEL_ZOO.md). + + +## License + +Detectron2 is released under the [Apache 2.0 license](LICENSE). + +## Citing Detectron2 + +If you use Detectron2 in your research or wish to refer to the baseline results published in the [Model Zoo](MODEL_ZOO.md), please use the following BibTeX entry. + +```BibTeX +@misc{wu2019detectron2, + author = {Yuxin Wu and Alexander Kirillov and Francisco Massa and + Wan-Yen Lo and Ross Girshick}, + title = {Detectron2}, + howpublished = {\url{https://github.com/facebookresearch/detectron2}}, + year = {2019} +} +``` diff --git a/preprocess/mhp_extension/detectron2/configs/Base-RCNN-C4.yaml b/preprocess/mhp_extension/detectron2/configs/Base-RCNN-C4.yaml new file mode 100644 index 0000000000000000000000000000000000000000..fbf34a0ea57a587e09997edd94c4012d69d0b6ad --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/Base-RCNN-C4.yaml @@ -0,0 +1,18 @@ +MODEL: + META_ARCHITECTURE: "GeneralizedRCNN" + RPN: + PRE_NMS_TOPK_TEST: 6000 + POST_NMS_TOPK_TEST: 1000 + ROI_HEADS: + NAME: "Res5ROIHeads" +DATASETS: + TRAIN: ("coco_2017_train",) + TEST: ("coco_2017_val",) +SOLVER: + IMS_PER_BATCH: 16 + BASE_LR: 0.02 + STEPS: (60000, 80000) + MAX_ITER: 90000 +INPUT: + MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) +VERSION: 2 diff --git a/preprocess/mhp_extension/detectron2/configs/Base-RCNN-DilatedC5.yaml b/preprocess/mhp_extension/detectron2/configs/Base-RCNN-DilatedC5.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c0d6d16bdaf532f09e4976f0aa240a49e748da27 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/Base-RCNN-DilatedC5.yaml @@ -0,0 +1,31 @@ +MODEL: + META_ARCHITECTURE: "GeneralizedRCNN" + RESNETS: + OUT_FEATURES: ["res5"] + RES5_DILATION: 2 + RPN: + IN_FEATURES: ["res5"] + PRE_NMS_TOPK_TEST: 6000 + POST_NMS_TOPK_TEST: 1000 + ROI_HEADS: + NAME: "StandardROIHeads" + IN_FEATURES: ["res5"] + ROI_BOX_HEAD: + NAME: "FastRCNNConvFCHead" + NUM_FC: 2 + POOLER_RESOLUTION: 7 + ROI_MASK_HEAD: + NAME: "MaskRCNNConvUpsampleHead" + NUM_CONV: 4 + POOLER_RESOLUTION: 14 +DATASETS: + TRAIN: ("coco_2017_train",) + TEST: ("coco_2017_val",) +SOLVER: + IMS_PER_BATCH: 16 + BASE_LR: 0.02 + STEPS: (60000, 80000) + MAX_ITER: 90000 +INPUT: + MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) +VERSION: 2 diff --git a/preprocess/mhp_extension/detectron2/configs/Base-RCNN-FPN.yaml b/preprocess/mhp_extension/detectron2/configs/Base-RCNN-FPN.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3e020f2e7b2f26765be317f907126a1556621abf --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/Base-RCNN-FPN.yaml @@ -0,0 +1,42 @@ +MODEL: + META_ARCHITECTURE: "GeneralizedRCNN" + BACKBONE: + NAME: "build_resnet_fpn_backbone" + RESNETS: + OUT_FEATURES: ["res2", "res3", "res4", "res5"] + FPN: + IN_FEATURES: ["res2", "res3", "res4", "res5"] + ANCHOR_GENERATOR: + SIZES: [[32], [64], [128], [256], [512]] # One size for each in feature map + ASPECT_RATIOS: [[0.5, 1.0, 2.0]] # Three aspect ratios (same for all in feature maps) + RPN: + IN_FEATURES: ["p2", "p3", "p4", "p5", "p6"] + PRE_NMS_TOPK_TRAIN: 2000 # Per FPN level + PRE_NMS_TOPK_TEST: 1000 # Per FPN level + # Detectron1 uses 2000 proposals per-batch, + # (See "modeling/rpn/rpn_outputs.py" for details of this legacy issue) + # which is approximately 1000 proposals per-image since the default batch size for FPN is 2. + POST_NMS_TOPK_TRAIN: 1000 + POST_NMS_TOPK_TEST: 1000 + ROI_HEADS: + NAME: "StandardROIHeads" + IN_FEATURES: ["p2", "p3", "p4", "p5"] + ROI_BOX_HEAD: + NAME: "FastRCNNConvFCHead" + NUM_FC: 2 + POOLER_RESOLUTION: 7 + ROI_MASK_HEAD: + NAME: "MaskRCNNConvUpsampleHead" + NUM_CONV: 4 + POOLER_RESOLUTION: 14 +DATASETS: + TRAIN: ("coco_2017_train",) + TEST: ("coco_2017_val",) +SOLVER: + IMS_PER_BATCH: 16 + BASE_LR: 0.02 + STEPS: (60000, 80000) + MAX_ITER: 90000 +INPUT: + MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) +VERSION: 2 diff --git a/preprocess/mhp_extension/detectron2/configs/Base-RetinaNet.yaml b/preprocess/mhp_extension/detectron2/configs/Base-RetinaNet.yaml new file mode 100644 index 0000000000000000000000000000000000000000..12ec9d2fc20cc0438f17bde2c5f6fbee9496c1b0 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/Base-RetinaNet.yaml @@ -0,0 +1,24 @@ +MODEL: + META_ARCHITECTURE: "RetinaNet" + BACKBONE: + NAME: "build_retinanet_resnet_fpn_backbone" + RESNETS: + OUT_FEATURES: ["res3", "res4", "res5"] + ANCHOR_GENERATOR: + SIZES: !!python/object/apply:eval ["[[x, x * 2**(1.0/3), x * 2**(2.0/3) ] for x in [32, 64, 128, 256, 512 ]]"] + FPN: + IN_FEATURES: ["res3", "res4", "res5"] + RETINANET: + IOU_THRESHOLDS: [0.4, 0.5] + IOU_LABELS: [0, -1, 1] +DATASETS: + TRAIN: ("coco_2017_train",) + TEST: ("coco_2017_val",) +SOLVER: + IMS_PER_BATCH: 16 + BASE_LR: 0.01 # Note that RetinaNet uses a different default learning rate + STEPS: (60000, 80000) + MAX_ITER: 90000 +INPUT: + MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) +VERSION: 2 diff --git a/preprocess/mhp_extension/detectron2/configs/COCO-Detection/fast_rcnn_R_50_FPN_1x.yaml b/preprocess/mhp_extension/detectron2/configs/COCO-Detection/fast_rcnn_R_50_FPN_1x.yaml new file mode 100644 index 0000000000000000000000000000000000000000..773ac10e87c626760d00d831bf664ce9ff073c49 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/COCO-Detection/fast_rcnn_R_50_FPN_1x.yaml @@ -0,0 +1,17 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: False + LOAD_PROPOSALS: True + RESNETS: + DEPTH: 50 + PROPOSAL_GENERATOR: + NAME: "PrecomputedProposals" +DATASETS: + TRAIN: ("coco_2017_train",) + PROPOSAL_FILES_TRAIN: ("detectron2://COCO-Detection/rpn_R_50_FPN_1x/137258492/coco_2017_train_box_proposals_21bc3a.pkl", ) + TEST: ("coco_2017_val",) + PROPOSAL_FILES_TEST: ("detectron2://COCO-Detection/rpn_R_50_FPN_1x/137258492/coco_2017_val_box_proposals_ee0dad.pkl", ) +DATALOADER: + # proposals are part of the dataset_dicts, and take a lot of RAM + NUM_WORKERS: 2 diff --git a/preprocess/mhp_extension/detectron2/configs/COCO-Detection/faster_rcnn_R_101_C4_3x.yaml b/preprocess/mhp_extension/detectron2/configs/COCO-Detection/faster_rcnn_R_101_C4_3x.yaml new file mode 100644 index 0000000000000000000000000000000000000000..db142cd671c1841b4f64cf130bee7f7954ecdd28 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/COCO-Detection/faster_rcnn_R_101_C4_3x.yaml @@ -0,0 +1,9 @@ +_BASE_: "../Base-RCNN-C4.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" + MASK_ON: False + RESNETS: + DEPTH: 101 +SOLVER: + STEPS: (210000, 250000) + MAX_ITER: 270000 diff --git a/preprocess/mhp_extension/detectron2/configs/COCO-Detection/faster_rcnn_R_101_DC5_3x.yaml b/preprocess/mhp_extension/detectron2/configs/COCO-Detection/faster_rcnn_R_101_DC5_3x.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bceb6b343618d8cd9a6c414ff9eb86ab31cc230a --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/COCO-Detection/faster_rcnn_R_101_DC5_3x.yaml @@ -0,0 +1,9 @@ +_BASE_: "../Base-RCNN-DilatedC5.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" + MASK_ON: False + RESNETS: + DEPTH: 101 +SOLVER: + STEPS: (210000, 250000) + MAX_ITER: 270000 diff --git a/preprocess/mhp_extension/detectron2/configs/COCO-Detection/faster_rcnn_R_101_FPN_3x.yaml b/preprocess/mhp_extension/detectron2/configs/COCO-Detection/faster_rcnn_R_101_FPN_3x.yaml new file mode 100644 index 0000000000000000000000000000000000000000..57a098f53ee8c54ecfa354cc96efefd890dc1b72 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/COCO-Detection/faster_rcnn_R_101_FPN_3x.yaml @@ -0,0 +1,9 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" + MASK_ON: False + RESNETS: + DEPTH: 101 +SOLVER: + STEPS: (210000, 250000) + MAX_ITER: 270000 diff --git a/preprocess/mhp_extension/detectron2/configs/COCO-Detection/faster_rcnn_R_50_C4_1x.yaml b/preprocess/mhp_extension/detectron2/configs/COCO-Detection/faster_rcnn_R_50_C4_1x.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f96130105c3ba6ab393e0932870903875f5cb732 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/COCO-Detection/faster_rcnn_R_50_C4_1x.yaml @@ -0,0 +1,6 @@ +_BASE_: "../Base-RCNN-C4.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: False + RESNETS: + DEPTH: 50 diff --git a/preprocess/mhp_extension/detectron2/configs/COCO-Detection/faster_rcnn_R_50_C4_3x.yaml b/preprocess/mhp_extension/detectron2/configs/COCO-Detection/faster_rcnn_R_50_C4_3x.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bc51bce390a85ee3529ffdcebde05748e1646be0 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/COCO-Detection/faster_rcnn_R_50_C4_3x.yaml @@ -0,0 +1,9 @@ +_BASE_: "../Base-RCNN-C4.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: False + RESNETS: + DEPTH: 50 +SOLVER: + STEPS: (210000, 250000) + MAX_ITER: 270000 diff --git a/preprocess/mhp_extension/detectron2/configs/COCO-Detection/faster_rcnn_R_50_DC5_1x.yaml b/preprocess/mhp_extension/detectron2/configs/COCO-Detection/faster_rcnn_R_50_DC5_1x.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0fe96f57febdac5790ea4cec168fa4b97ac4807a --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/COCO-Detection/faster_rcnn_R_50_DC5_1x.yaml @@ -0,0 +1,6 @@ +_BASE_: "../Base-RCNN-DilatedC5.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: False + RESNETS: + DEPTH: 50 diff --git a/preprocess/mhp_extension/detectron2/configs/COCO-Detection/faster_rcnn_R_50_DC5_3x.yaml b/preprocess/mhp_extension/detectron2/configs/COCO-Detection/faster_rcnn_R_50_DC5_3x.yaml new file mode 100644 index 0000000000000000000000000000000000000000..33fadeb87d1ef67ab2b55926b9a652ab4ac4a27d --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/COCO-Detection/faster_rcnn_R_50_DC5_3x.yaml @@ -0,0 +1,9 @@ +_BASE_: "../Base-RCNN-DilatedC5.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: False + RESNETS: + DEPTH: 50 +SOLVER: + STEPS: (210000, 250000) + MAX_ITER: 270000 diff --git a/preprocess/mhp_extension/detectron2/configs/COCO-Detection/faster_rcnn_R_50_FPN_1x.yaml b/preprocess/mhp_extension/detectron2/configs/COCO-Detection/faster_rcnn_R_50_FPN_1x.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3262019a1211b910d3b371569199ed1afaacf6a4 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/COCO-Detection/faster_rcnn_R_50_FPN_1x.yaml @@ -0,0 +1,6 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: False + RESNETS: + DEPTH: 50 diff --git a/preprocess/mhp_extension/detectron2/configs/COCO-Detection/faster_rcnn_R_50_FPN_3x.yaml b/preprocess/mhp_extension/detectron2/configs/COCO-Detection/faster_rcnn_R_50_FPN_3x.yaml new file mode 100644 index 0000000000000000000000000000000000000000..41395182bf5c9dd8ab1241c4414068817298d554 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/COCO-Detection/faster_rcnn_R_50_FPN_3x.yaml @@ -0,0 +1,9 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: False + RESNETS: + DEPTH: 50 +SOLVER: + STEPS: (210000, 250000) + MAX_ITER: 270000 diff --git a/preprocess/mhp_extension/detectron2/configs/COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x.yaml b/preprocess/mhp_extension/detectron2/configs/COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9c9b5ab77157baa581d90d9847c045c19ed6ffa3 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x.yaml @@ -0,0 +1,13 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + MASK_ON: False + WEIGHTS: "detectron2://ImageNetPretrained/FAIR/X-101-32x8d.pkl" + PIXEL_STD: [57.375, 57.120, 58.395] + RESNETS: + STRIDE_IN_1X1: False # this is a C2 model + NUM_GROUPS: 32 + WIDTH_PER_GROUP: 8 + DEPTH: 101 +SOLVER: + STEPS: (210000, 250000) + MAX_ITER: 270000 diff --git a/preprocess/mhp_extension/detectron2/configs/COCO-Detection/retinanet_R_101_FPN_3x.yaml b/preprocess/mhp_extension/detectron2/configs/COCO-Detection/retinanet_R_101_FPN_3x.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4abb1b9a547957aa6afc0b29129e00f89cf98d59 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/COCO-Detection/retinanet_R_101_FPN_3x.yaml @@ -0,0 +1,8 @@ +_BASE_: "../Base-RetinaNet.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" + RESNETS: + DEPTH: 101 +SOLVER: + STEPS: (210000, 250000) + MAX_ITER: 270000 diff --git a/preprocess/mhp_extension/detectron2/configs/COCO-Detection/retinanet_R_50_FPN_1x.yaml b/preprocess/mhp_extension/detectron2/configs/COCO-Detection/retinanet_R_50_FPN_1x.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4a24ce3a9a108a8792e18c8aabfb7b712f0d3725 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/COCO-Detection/retinanet_R_50_FPN_1x.yaml @@ -0,0 +1,5 @@ +_BASE_: "../Base-RetinaNet.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + RESNETS: + DEPTH: 50 diff --git a/preprocess/mhp_extension/detectron2/configs/COCO-Detection/retinanet_R_50_FPN_3x.yaml b/preprocess/mhp_extension/detectron2/configs/COCO-Detection/retinanet_R_50_FPN_3x.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3b5412d4a7aef1d6c3f7c1e34f94007de639b833 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/COCO-Detection/retinanet_R_50_FPN_3x.yaml @@ -0,0 +1,8 @@ +_BASE_: "../Base-RetinaNet.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + RESNETS: + DEPTH: 50 +SOLVER: + STEPS: (210000, 250000) + MAX_ITER: 270000 diff --git a/preprocess/mhp_extension/detectron2/configs/COCO-Detection/rpn_R_50_C4_1x.yaml b/preprocess/mhp_extension/detectron2/configs/COCO-Detection/rpn_R_50_C4_1x.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e04821156b0376ba5215d5ce5b7010a36b43e6a1 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/COCO-Detection/rpn_R_50_C4_1x.yaml @@ -0,0 +1,10 @@ +_BASE_: "../Base-RCNN-C4.yaml" +MODEL: + META_ARCHITECTURE: "ProposalNetwork" + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: False + RESNETS: + DEPTH: 50 + RPN: + PRE_NMS_TOPK_TEST: 12000 + POST_NMS_TOPK_TEST: 2000 diff --git a/preprocess/mhp_extension/detectron2/configs/COCO-Detection/rpn_R_50_FPN_1x.yaml b/preprocess/mhp_extension/detectron2/configs/COCO-Detection/rpn_R_50_FPN_1x.yaml new file mode 100644 index 0000000000000000000000000000000000000000..dc9c95203b1c3c9cd9bb9876bb8d9a5dd9b31d9a --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/COCO-Detection/rpn_R_50_FPN_1x.yaml @@ -0,0 +1,9 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + META_ARCHITECTURE: "ProposalNetwork" + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: False + RESNETS: + DEPTH: 50 + RPN: + POST_NMS_TOPK_TEST: 2000 diff --git a/preprocess/mhp_extension/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_101_C4_3x.yaml b/preprocess/mhp_extension/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_101_C4_3x.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1a94cc45a0f2aaa8c92e14871c553b736545e327 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_101_C4_3x.yaml @@ -0,0 +1,9 @@ +_BASE_: "../Base-RCNN-C4.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" + MASK_ON: True + RESNETS: + DEPTH: 101 +SOLVER: + STEPS: (210000, 250000) + MAX_ITER: 270000 diff --git a/preprocess/mhp_extension/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_101_DC5_3x.yaml b/preprocess/mhp_extension/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_101_DC5_3x.yaml new file mode 100644 index 0000000000000000000000000000000000000000..67b70cf4be8c19f5dc735b6f55a8690698f34b69 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_101_DC5_3x.yaml @@ -0,0 +1,9 @@ +_BASE_: "../Base-RCNN-DilatedC5.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" + MASK_ON: True + RESNETS: + DEPTH: 101 +SOLVER: + STEPS: (210000, 250000) + MAX_ITER: 270000 diff --git a/preprocess/mhp_extension/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x.yaml b/preprocess/mhp_extension/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1935a302d2d0fa7f69553b3fd50b5a7082c6c0d1 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x.yaml @@ -0,0 +1,9 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" + MASK_ON: True + RESNETS: + DEPTH: 101 +SOLVER: + STEPS: (210000, 250000) + MAX_ITER: 270000 diff --git a/preprocess/mhp_extension/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_C4_1x.yaml b/preprocess/mhp_extension/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_C4_1x.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a9aeb4eac38026dbb867e799f9fd3a8d8eb3af80 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_C4_1x.yaml @@ -0,0 +1,6 @@ +_BASE_: "../Base-RCNN-C4.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: True + RESNETS: + DEPTH: 50 diff --git a/preprocess/mhp_extension/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_C4_3x.yaml b/preprocess/mhp_extension/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_C4_3x.yaml new file mode 100644 index 0000000000000000000000000000000000000000..38ed867d897dfec839cbcf11a2e2dc8abb92f07c --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_C4_3x.yaml @@ -0,0 +1,9 @@ +_BASE_: "../Base-RCNN-C4.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: True + RESNETS: + DEPTH: 50 +SOLVER: + STEPS: (210000, 250000) + MAX_ITER: 270000 diff --git a/preprocess/mhp_extension/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_DC5_1x.yaml b/preprocess/mhp_extension/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_DC5_1x.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b13eefab2a049c48d94d5051c82ceb6dbde40579 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_DC5_1x.yaml @@ -0,0 +1,6 @@ +_BASE_: "../Base-RCNN-DilatedC5.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: True + RESNETS: + DEPTH: 50 diff --git a/preprocess/mhp_extension/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_DC5_3x.yaml b/preprocess/mhp_extension/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_DC5_3x.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d401016358f967f6619d88b1c9bd5673a1cdeba8 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_DC5_3x.yaml @@ -0,0 +1,9 @@ +_BASE_: "../Base-RCNN-DilatedC5.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: True + RESNETS: + DEPTH: 50 +SOLVER: + STEPS: (210000, 250000) + MAX_ITER: 270000 diff --git a/preprocess/mhp_extension/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml b/preprocess/mhp_extension/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d50fb866ca7811a87b42555c7213f88e00bf6df1 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml @@ -0,0 +1,6 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: True + RESNETS: + DEPTH: 50 diff --git a/preprocess/mhp_extension/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml b/preprocess/mhp_extension/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml new file mode 100644 index 0000000000000000000000000000000000000000..be7d06b8e0f032ee7fcaabd7c122158518489fd2 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml @@ -0,0 +1,9 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: True + RESNETS: + DEPTH: 50 +SOLVER: + STEPS: (210000, 250000) + MAX_ITER: 270000 diff --git a/preprocess/mhp_extension/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_3x.yaml b/preprocess/mhp_extension/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_3x.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d14c63f74383bfc308750f51d51344398b02a239 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_3x.yaml @@ -0,0 +1,13 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + MASK_ON: True + WEIGHTS: "detectron2://ImageNetPretrained/FAIR/X-101-32x8d.pkl" + PIXEL_STD: [57.375, 57.120, 58.395] + RESNETS: + STRIDE_IN_1X1: False # this is a C2 model + NUM_GROUPS: 32 + WIDTH_PER_GROUP: 8 + DEPTH: 101 +SOLVER: + STEPS: (210000, 250000) + MAX_ITER: 270000 diff --git a/preprocess/mhp_extension/detectron2/configs/COCO-Keypoints/Base-Keypoint-RCNN-FPN.yaml b/preprocess/mhp_extension/detectron2/configs/COCO-Keypoints/Base-Keypoint-RCNN-FPN.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4e03944a42d2e497da5ceca17c8fda797dac3f82 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/COCO-Keypoints/Base-Keypoint-RCNN-FPN.yaml @@ -0,0 +1,15 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + KEYPOINT_ON: True + ROI_HEADS: + NUM_CLASSES: 1 + ROI_BOX_HEAD: + SMOOTH_L1_BETA: 0.5 # Keypoint AP degrades (though box AP improves) when using plain L1 loss + RPN: + # Detectron1 uses 2000 proposals per-batch, but this option is per-image in detectron2. + # 1000 proposals per-image is found to hurt box AP. + # Therefore we increase it to 1500 per-image. + POST_NMS_TOPK_TRAIN: 1500 +DATASETS: + TRAIN: ("keypoints_coco_2017_train",) + TEST: ("keypoints_coco_2017_val",) diff --git a/preprocess/mhp_extension/detectron2/configs/COCO-Keypoints/keypoint_rcnn_R_101_FPN_3x.yaml b/preprocess/mhp_extension/detectron2/configs/COCO-Keypoints/keypoint_rcnn_R_101_FPN_3x.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9309535c57a1aa7d23297aac80a9bd78a6c79fcc --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/COCO-Keypoints/keypoint_rcnn_R_101_FPN_3x.yaml @@ -0,0 +1,8 @@ +_BASE_: "Base-Keypoint-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" + RESNETS: + DEPTH: 101 +SOLVER: + STEPS: (210000, 250000) + MAX_ITER: 270000 diff --git a/preprocess/mhp_extension/detectron2/configs/COCO-Keypoints/keypoint_rcnn_R_50_FPN_1x.yaml b/preprocess/mhp_extension/detectron2/configs/COCO-Keypoints/keypoint_rcnn_R_50_FPN_1x.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7bf85cf745b53b3e7ab28fe94b7f4f9e7fe6e335 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/COCO-Keypoints/keypoint_rcnn_R_50_FPN_1x.yaml @@ -0,0 +1,5 @@ +_BASE_: "Base-Keypoint-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + RESNETS: + DEPTH: 50 diff --git a/preprocess/mhp_extension/detectron2/configs/COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x.yaml b/preprocess/mhp_extension/detectron2/configs/COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a07f243f650a497b9372501e3face75194cf0941 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x.yaml @@ -0,0 +1,8 @@ +_BASE_: "Base-Keypoint-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + RESNETS: + DEPTH: 50 +SOLVER: + STEPS: (210000, 250000) + MAX_ITER: 270000 diff --git a/preprocess/mhp_extension/detectron2/configs/COCO-Keypoints/keypoint_rcnn_X_101_32x8d_FPN_3x.yaml b/preprocess/mhp_extension/detectron2/configs/COCO-Keypoints/keypoint_rcnn_X_101_32x8d_FPN_3x.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d4bfa20a98c0a65c6bd60e93b07e8f4b7d92a867 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/COCO-Keypoints/keypoint_rcnn_X_101_32x8d_FPN_3x.yaml @@ -0,0 +1,12 @@ +_BASE_: "Base-Keypoint-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/FAIR/X-101-32x8d.pkl" + PIXEL_STD: [57.375, 57.120, 58.395] + RESNETS: + STRIDE_IN_1X1: False # this is a C2 model + NUM_GROUPS: 32 + WIDTH_PER_GROUP: 8 + DEPTH: 101 +SOLVER: + STEPS: (210000, 250000) + MAX_ITER: 270000 diff --git a/preprocess/mhp_extension/detectron2/configs/COCO-PanopticSegmentation/Base-Panoptic-FPN.yaml b/preprocess/mhp_extension/detectron2/configs/COCO-PanopticSegmentation/Base-Panoptic-FPN.yaml new file mode 100644 index 0000000000000000000000000000000000000000..755c12018c5db8ca456d5e7fa8cbd18d90f97527 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/COCO-PanopticSegmentation/Base-Panoptic-FPN.yaml @@ -0,0 +1,9 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + META_ARCHITECTURE: "PanopticFPN" + MASK_ON: True + SEM_SEG_HEAD: + LOSS_WEIGHT: 0.5 +DATASETS: + TRAIN: ("coco_2017_train_panoptic_separated",) + TEST: ("coco_2017_val_panoptic_separated",) diff --git a/preprocess/mhp_extension/detectron2/configs/COCO-PanopticSegmentation/panoptic_fpn_R_101_3x.yaml b/preprocess/mhp_extension/detectron2/configs/COCO-PanopticSegmentation/panoptic_fpn_R_101_3x.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0e01f6fb31e9b00b1857b7de3b5074184d1f4a21 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/COCO-PanopticSegmentation/panoptic_fpn_R_101_3x.yaml @@ -0,0 +1,8 @@ +_BASE_: "Base-Panoptic-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" + RESNETS: + DEPTH: 101 +SOLVER: + STEPS: (210000, 250000) + MAX_ITER: 270000 diff --git a/preprocess/mhp_extension/detectron2/configs/COCO-PanopticSegmentation/panoptic_fpn_R_50_1x.yaml b/preprocess/mhp_extension/detectron2/configs/COCO-PanopticSegmentation/panoptic_fpn_R_50_1x.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6afa2c1cc92495309ed1553a17359fe5d7d6566e --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/COCO-PanopticSegmentation/panoptic_fpn_R_50_1x.yaml @@ -0,0 +1,5 @@ +_BASE_: "Base-Panoptic-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + RESNETS: + DEPTH: 50 diff --git a/preprocess/mhp_extension/detectron2/configs/COCO-PanopticSegmentation/panoptic_fpn_R_50_3x.yaml b/preprocess/mhp_extension/detectron2/configs/COCO-PanopticSegmentation/panoptic_fpn_R_50_3x.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b956b3f673e78649184fe2c50e2700b3f1f14794 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/COCO-PanopticSegmentation/panoptic_fpn_R_50_3x.yaml @@ -0,0 +1,8 @@ +_BASE_: "Base-Panoptic-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + RESNETS: + DEPTH: 50 +SOLVER: + STEPS: (210000, 250000) + MAX_ITER: 270000 diff --git a/preprocess/mhp_extension/detectron2/configs/Cityscapes/mask_rcnn_R_50_FPN.yaml b/preprocess/mhp_extension/detectron2/configs/Cityscapes/mask_rcnn_R_50_FPN.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1a7aaeb961581ed9492c4cfe5a69a1eb60495b3e --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/Cityscapes/mask_rcnn_R_50_FPN.yaml @@ -0,0 +1,27 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + # WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + # For better, more stable performance initialize from COCO + WEIGHTS: "detectron2://COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x/137849600/model_final_f10217.pkl" + MASK_ON: True + ROI_HEADS: + NUM_CLASSES: 8 +# This is similar to the setting used in Mask R-CNN paper, Appendix A +# But there are some differences, e.g., we did not initialize the output +# layer using the corresponding classes from COCO +INPUT: + MIN_SIZE_TRAIN: (800, 832, 864, 896, 928, 960, 992, 1024) + MIN_SIZE_TRAIN_SAMPLING: "choice" + MIN_SIZE_TEST: 1024 + MAX_SIZE_TRAIN: 2048 + MAX_SIZE_TEST: 2048 +DATASETS: + TRAIN: ("cityscapes_fine_instance_seg_train",) + TEST: ("cityscapes_fine_instance_seg_val",) +SOLVER: + BASE_LR: 0.01 + STEPS: (18000,) + MAX_ITER: 24000 + IMS_PER_BATCH: 8 +TEST: + EVAL_PERIOD: 8000 diff --git a/preprocess/mhp_extension/detectron2/configs/Detectron1-Comparisons/README.md b/preprocess/mhp_extension/detectron2/configs/Detectron1-Comparisons/README.md new file mode 100644 index 0000000000000000000000000000000000000000..a90ed9e433a00b8b9f43961d7a2696d5b9013127 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/Detectron1-Comparisons/README.md @@ -0,0 +1,83 @@ + +Detectron2 model zoo's experimental settings and a few implementation details are different from Detectron. + +The differences in implementation details are shared in +[Compatibility with Other Libraries](../../docs/notes/compatibility.md). + +The differences in model zoo's experimental settings include: +* Use scale augmentation during training. This improves AP with lower training cost. +* Use L1 loss instead of smooth L1 loss for simplicity. This sometimes improves box AP but may + affect other AP. +* Use `POOLER_SAMPLING_RATIO=0` instead of 2. This does not significantly affect AP. +* Use `ROIAlignV2`. This does not significantly affect AP. + +In this directory, we provide a few configs that __do not__ have the above changes. +They mimic Detectron's behavior as close as possible, +and provide a fair comparison of accuracy and speed against Detectron. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Namelr
sched
train
time
(s/iter)
inference
time
(s/im)
train
mem
(GB)
box
AP
mask
AP
kp.
AP
model iddownload
Faster R-CNN1x0.2190.0383.136.9137781054model | metrics
Keypoint R-CNN1x0.3130.0715.053.164.2137781195model | metrics
Mask R-CNN1x0.2730.0433.437.834.9137781281model | metrics
+ +## Comparisons: + +* Faster R-CNN: Detectron's AP is 36.7, similar to ours. +* Keypoint R-CNN: Detectron's AP is box 53.6, keypoint 64.2. Fixing a Detectron's + [bug](https://github.com/facebookresearch/Detectron/issues/459) lead to a drop in box AP, and can be + compensated back by some parameter tuning. +* Mask R-CNN: Detectron's AP is box 37.7, mask 33.9. We're 1 AP better in mask AP, due to more correct implementation. + +For speed comparison, see [benchmarks](https://detectron2.readthedocs.io/notes/benchmarks.html). diff --git a/preprocess/mhp_extension/detectron2/configs/Detectron1-Comparisons/faster_rcnn_R_50_FPN_noaug_1x.yaml b/preprocess/mhp_extension/detectron2/configs/Detectron1-Comparisons/faster_rcnn_R_50_FPN_noaug_1x.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6ce77f137fa2c4e5254a62b58c18b8b76096f2aa --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/Detectron1-Comparisons/faster_rcnn_R_50_FPN_noaug_1x.yaml @@ -0,0 +1,17 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: False + RESNETS: + DEPTH: 50 + # Detectron1 uses smooth L1 loss with some magic beta values. + # The defaults are changed to L1 loss in Detectron2. + RPN: + SMOOTH_L1_BETA: 0.1111 + ROI_BOX_HEAD: + SMOOTH_L1_BETA: 1.0 + POOLER_SAMPLING_RATIO: 2 + POOLER_TYPE: "ROIAlign" +INPUT: + # no scale augmentation + MIN_SIZE_TRAIN: (800, ) diff --git a/preprocess/mhp_extension/detectron2/configs/Detectron1-Comparisons/keypoint_rcnn_R_50_FPN_1x.yaml b/preprocess/mhp_extension/detectron2/configs/Detectron1-Comparisons/keypoint_rcnn_R_50_FPN_1x.yaml new file mode 100644 index 0000000000000000000000000000000000000000..aacf868ba5290c752031c130a2081af48afc0808 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/Detectron1-Comparisons/keypoint_rcnn_R_50_FPN_1x.yaml @@ -0,0 +1,27 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + KEYPOINT_ON: True + RESNETS: + DEPTH: 50 + ROI_HEADS: + NUM_CLASSES: 1 + ROI_KEYPOINT_HEAD: + POOLER_RESOLUTION: 14 + POOLER_SAMPLING_RATIO: 2 + POOLER_TYPE: "ROIAlign" + # Detectron1 uses smooth L1 loss with some magic beta values. + # The defaults are changed to L1 loss in Detectron2. + ROI_BOX_HEAD: + SMOOTH_L1_BETA: 1.0 + POOLER_SAMPLING_RATIO: 2 + POOLER_TYPE: "ROIAlign" + RPN: + SMOOTH_L1_BETA: 0.1111 + # Detectron1 uses 2000 proposals per-batch, but this option is per-image in detectron2 + # 1000 proposals per-image is found to hurt box AP. + # Therefore we increase it to 1500 per-image. + POST_NMS_TOPK_TRAIN: 1500 +DATASETS: + TRAIN: ("keypoints_coco_2017_train",) + TEST: ("keypoints_coco_2017_val",) diff --git a/preprocess/mhp_extension/detectron2/configs/Detectron1-Comparisons/mask_rcnn_R_50_FPN_noaug_1x.yaml b/preprocess/mhp_extension/detectron2/configs/Detectron1-Comparisons/mask_rcnn_R_50_FPN_noaug_1x.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4ea86a8d8e2cd3e51cbc7311b0d00710c07d01f6 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/Detectron1-Comparisons/mask_rcnn_R_50_FPN_noaug_1x.yaml @@ -0,0 +1,20 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: True + RESNETS: + DEPTH: 50 + # Detectron1 uses smooth L1 loss with some magic beta values. + # The defaults are changed to L1 loss in Detectron2. + RPN: + SMOOTH_L1_BETA: 0.1111 + ROI_BOX_HEAD: + SMOOTH_L1_BETA: 1.0 + POOLER_SAMPLING_RATIO: 2 + POOLER_TYPE: "ROIAlign" + ROI_MASK_HEAD: + POOLER_SAMPLING_RATIO: 2 + POOLER_TYPE: "ROIAlign" +INPUT: + # no scale augmentation + MIN_SIZE_TRAIN: (800, ) diff --git a/preprocess/mhp_extension/detectron2/configs/LVIS-InstanceSegmentation/mask_rcnn_R_101_FPN_1x.yaml b/preprocess/mhp_extension/detectron2/configs/LVIS-InstanceSegmentation/mask_rcnn_R_101_FPN_1x.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f0c3a1bbc0a09e1384de522f30c443ba1e36fafa --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/LVIS-InstanceSegmentation/mask_rcnn_R_101_FPN_1x.yaml @@ -0,0 +1,19 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" + MASK_ON: True + RESNETS: + DEPTH: 101 + ROI_HEADS: + NUM_CLASSES: 1230 + SCORE_THRESH_TEST: 0.0001 +INPUT: + MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) +DATASETS: + TRAIN: ("lvis_v0.5_train",) + TEST: ("lvis_v0.5_val",) +TEST: + DETECTIONS_PER_IMAGE: 300 # LVIS allows up to 300 +DATALOADER: + SAMPLER_TRAIN: "RepeatFactorTrainingSampler" + REPEAT_THRESHOLD: 0.001 diff --git a/preprocess/mhp_extension/detectron2/configs/LVIS-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml b/preprocess/mhp_extension/detectron2/configs/LVIS-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml new file mode 100644 index 0000000000000000000000000000000000000000..64b4caa4ef2b284782367ea702e1ae6653472630 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/LVIS-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml @@ -0,0 +1,19 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: True + RESNETS: + DEPTH: 50 + ROI_HEADS: + NUM_CLASSES: 1230 + SCORE_THRESH_TEST: 0.0001 +INPUT: + MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) +DATASETS: + TRAIN: ("lvis_v0.5_train",) + TEST: ("lvis_v0.5_val",) +TEST: + DETECTIONS_PER_IMAGE: 300 # LVIS allows up to 300 +DATALOADER: + SAMPLER_TRAIN: "RepeatFactorTrainingSampler" + REPEAT_THRESHOLD: 0.001 diff --git a/preprocess/mhp_extension/detectron2/configs/LVIS-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_1x.yaml b/preprocess/mhp_extension/detectron2/configs/LVIS-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_1x.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c8b822c6c006ba642f4caf9b55e7983f6797427a --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/LVIS-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_1x.yaml @@ -0,0 +1,23 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/FAIR/X-101-32x8d.pkl" + PIXEL_STD: [57.375, 57.120, 58.395] + MASK_ON: True + RESNETS: + STRIDE_IN_1X1: False # this is a C2 model + NUM_GROUPS: 32 + WIDTH_PER_GROUP: 8 + DEPTH: 101 + ROI_HEADS: + NUM_CLASSES: 1230 + SCORE_THRESH_TEST: 0.0001 +INPUT: + MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) +DATASETS: + TRAIN: ("lvis_v0.5_train",) + TEST: ("lvis_v0.5_val",) +TEST: + DETECTIONS_PER_IMAGE: 300 # LVIS allows up to 300 +DATALOADER: + SAMPLER_TRAIN: "RepeatFactorTrainingSampler" + REPEAT_THRESHOLD: 0.001 diff --git a/preprocess/mhp_extension/detectron2/configs/Misc/cascade_mask_rcnn_R_50_FPN_1x.yaml b/preprocess/mhp_extension/detectron2/configs/Misc/cascade_mask_rcnn_R_50_FPN_1x.yaml new file mode 100644 index 0000000000000000000000000000000000000000..abb33b618932e94b66239945ac892f4c84a6e8f8 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/Misc/cascade_mask_rcnn_R_50_FPN_1x.yaml @@ -0,0 +1,12 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: True + RESNETS: + DEPTH: 50 + ROI_HEADS: + NAME: CascadeROIHeads + ROI_BOX_HEAD: + CLS_AGNOSTIC_BBOX_REG: True + RPN: + POST_NMS_TOPK_TRAIN: 2000 diff --git a/preprocess/mhp_extension/detectron2/configs/Misc/cascade_mask_rcnn_R_50_FPN_3x.yaml b/preprocess/mhp_extension/detectron2/configs/Misc/cascade_mask_rcnn_R_50_FPN_3x.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e2201ad5c46ded91ccfa47b7698a521625c5e447 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/Misc/cascade_mask_rcnn_R_50_FPN_3x.yaml @@ -0,0 +1,15 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: True + RESNETS: + DEPTH: 50 + ROI_HEADS: + NAME: CascadeROIHeads + ROI_BOX_HEAD: + CLS_AGNOSTIC_BBOX_REG: True + RPN: + POST_NMS_TOPK_TRAIN: 2000 +SOLVER: + STEPS: (210000, 250000) + MAX_ITER: 270000 diff --git a/preprocess/mhp_extension/detectron2/configs/Misc/cascade_mask_rcnn_X_152_32x8d_FPN_IN5k_gn_dconv.yaml b/preprocess/mhp_extension/detectron2/configs/Misc/cascade_mask_rcnn_X_152_32x8d_FPN_IN5k_gn_dconv.yaml new file mode 100644 index 0000000000000000000000000000000000000000..fc117f6b5e3e51558ec2f01b73c5365622e5ce25 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/Misc/cascade_mask_rcnn_X_152_32x8d_FPN_IN5k_gn_dconv.yaml @@ -0,0 +1,36 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + MASK_ON: True + WEIGHTS: "catalog://ImageNetPretrained/FAIR/X-152-32x8d-IN5k" + RESNETS: + STRIDE_IN_1X1: False # this is a C2 model + NUM_GROUPS: 32 + WIDTH_PER_GROUP: 8 + DEPTH: 152 + DEFORM_ON_PER_STAGE: [False, True, True, True] + ROI_HEADS: + NAME: "CascadeROIHeads" + ROI_BOX_HEAD: + NAME: "FastRCNNConvFCHead" + NUM_CONV: 4 + NUM_FC: 1 + NORM: "GN" + CLS_AGNOSTIC_BBOX_REG: True + ROI_MASK_HEAD: + NUM_CONV: 8 + NORM: "GN" + RPN: + POST_NMS_TOPK_TRAIN: 2000 +SOLVER: + IMS_PER_BATCH: 128 + STEPS: (35000, 45000) + MAX_ITER: 50000 + BASE_LR: 0.16 +INPUT: + MIN_SIZE_TRAIN: (640, 864) + MIN_SIZE_TRAIN_SAMPLING: "range" + MAX_SIZE_TRAIN: 1440 + CROP: + ENABLED: True +TEST: + EVAL_PERIOD: 2500 diff --git a/preprocess/mhp_extension/detectron2/configs/Misc/cascade_mask_rcnn_X_152_32x8d_FPN_IN5k_gn_dconv_parsing.yaml b/preprocess/mhp_extension/detectron2/configs/Misc/cascade_mask_rcnn_X_152_32x8d_FPN_IN5k_gn_dconv_parsing.yaml new file mode 100644 index 0000000000000000000000000000000000000000..544f58f620607ba6eb592593a2f85243c8670451 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/Misc/cascade_mask_rcnn_X_152_32x8d_FPN_IN5k_gn_dconv_parsing.yaml @@ -0,0 +1,42 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + MASK_ON: True +# WEIGHTS: "catalog://ImageNetPretrained/FAIR/X-152-32x8d-IN5k" + WEIGHTS: "model_0039999_e76410.pkl" + RESNETS: + STRIDE_IN_1X1: False # this is a C2 model + NUM_GROUPS: 32 + WIDTH_PER_GROUP: 8 + DEPTH: 152 + DEFORM_ON_PER_STAGE: [False, True, True, True] + ROI_HEADS: + NAME: "CascadeROIHeads" + NUM_CLASSES: 1 + ROI_BOX_HEAD: + NAME: "FastRCNNConvFCHead" + NUM_CONV: 4 + NUM_FC: 1 + NORM: "GN" + CLS_AGNOSTIC_BBOX_REG: True + ROI_MASK_HEAD: + NUM_CONV: 8 + NORM: "GN" + RPN: + POST_NMS_TOPK_TRAIN: 2000 +SOLVER: +# IMS_PER_BATCH: 128 + IMS_PER_BATCH: 1 + STEPS: (35000, 45000) + MAX_ITER: 50000 + BASE_LR: 0.16 +INPUT: + MIN_SIZE_TRAIN: (640, 864) + MIN_SIZE_TRAIN_SAMPLING: "range" + MAX_SIZE_TRAIN: 1440 + CROP: + ENABLED: True +TEST: + EVAL_PERIOD: 2500 +DATASETS: + TRAIN: ("CIHP_train","VIP_trainval") + TEST: ("CIHP_val",) diff --git a/preprocess/mhp_extension/detectron2/configs/Misc/demo.yaml b/preprocess/mhp_extension/detectron2/configs/Misc/demo.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bbf9685f5921c7aa1c967b4e7da88aaf061a72e2 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/Misc/demo.yaml @@ -0,0 +1,25 @@ +_BASE_: "cascade_mask_rcnn_X_152_32x8d_FPN_IN5k_gn_dconv.yaml" +MODEL: + MASK_ON: True + ROI_HEADS: + NMS_THRESH_TEST: 0.95 + SCORE_THRESH_TEST: 0.5 + NUM_CLASSES: 1 +SOLVER: + IMS_PER_BATCH: 1 + STEPS: (30000, 45000) + MAX_ITER: 50000 + BASE_LR: 0.02 +INPUT: + MIN_SIZE_TRAIN: (640, 864) + MIN_SIZE_TRAIN_SAMPLING: "range" + MAX_SIZE_TRAIN: 1440 + CROP: + ENABLED: True +TEST: + AUG: + ENABLED: True +DATASETS: + TRAIN: ("demo_train",) + TEST: ("demo_val",) +OUTPUT_DIR: "../../data/DemoDataset/detectron2_prediction" diff --git a/preprocess/mhp_extension/detectron2/configs/Misc/mask_rcnn_R_50_FPN_1x_cls_agnostic.yaml b/preprocess/mhp_extension/detectron2/configs/Misc/mask_rcnn_R_50_FPN_1x_cls_agnostic.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4c3b767ff473bbab7225cc8a4a92608543d78246 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/Misc/mask_rcnn_R_50_FPN_1x_cls_agnostic.yaml @@ -0,0 +1,10 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: True + RESNETS: + DEPTH: 50 + ROI_BOX_HEAD: + CLS_AGNOSTIC_BBOX_REG: True + ROI_MASK_HEAD: + CLS_AGNOSTIC_MASK: True diff --git a/preprocess/mhp_extension/detectron2/configs/Misc/mask_rcnn_R_50_FPN_1x_dconv_c3-c5.yaml b/preprocess/mhp_extension/detectron2/configs/Misc/mask_rcnn_R_50_FPN_1x_dconv_c3-c5.yaml new file mode 100644 index 0000000000000000000000000000000000000000..04ff988d073ef9169ee4ca2cbce0d6f030c15232 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/Misc/mask_rcnn_R_50_FPN_1x_dconv_c3-c5.yaml @@ -0,0 +1,8 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: True + RESNETS: + DEPTH: 50 + DEFORM_ON_PER_STAGE: [False, True, True, True] # on Res3,Res4,Res5 + DEFORM_MODULATED: False diff --git a/preprocess/mhp_extension/detectron2/configs/Misc/mask_rcnn_R_50_FPN_3x_dconv_c3-c5.yaml b/preprocess/mhp_extension/detectron2/configs/Misc/mask_rcnn_R_50_FPN_3x_dconv_c3-c5.yaml new file mode 100644 index 0000000000000000000000000000000000000000..68c0ca58d7df97ca728c339da0ca9828fe6be318 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/Misc/mask_rcnn_R_50_FPN_3x_dconv_c3-c5.yaml @@ -0,0 +1,11 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: True + RESNETS: + DEPTH: 50 + DEFORM_ON_PER_STAGE: [False, True, True, True] # on Res3,Res4,Res5 + DEFORM_MODULATED: False +SOLVER: + STEPS: (210000, 250000) + MAX_ITER: 270000 diff --git a/preprocess/mhp_extension/detectron2/configs/Misc/mask_rcnn_R_50_FPN_3x_gn.yaml b/preprocess/mhp_extension/detectron2/configs/Misc/mask_rcnn_R_50_FPN_3x_gn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..74d274e5a529b5a8afe186940868f9d48c6112b3 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/Misc/mask_rcnn_R_50_FPN_3x_gn.yaml @@ -0,0 +1,21 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "catalog://ImageNetPretrained/FAIR/R-50-GN" + MASK_ON: True + RESNETS: + DEPTH: 50 + NORM: "GN" + STRIDE_IN_1X1: False + FPN: + NORM: "GN" + ROI_BOX_HEAD: + NAME: "FastRCNNConvFCHead" + NUM_CONV: 4 + NUM_FC: 1 + NORM: "GN" + ROI_MASK_HEAD: + NORM: "GN" +SOLVER: + # 3x schedule + STEPS: (210000, 250000) + MAX_ITER: 270000 diff --git a/preprocess/mhp_extension/detectron2/configs/Misc/mask_rcnn_R_50_FPN_3x_syncbn.yaml b/preprocess/mhp_extension/detectron2/configs/Misc/mask_rcnn_R_50_FPN_3x_syncbn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..11ebb076ba529f26c71a0d972e96ca4c2d6a830b --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/Misc/mask_rcnn_R_50_FPN_3x_syncbn.yaml @@ -0,0 +1,24 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: True + RESNETS: + DEPTH: 50 + NORM: "SyncBN" + STRIDE_IN_1X1: True + FPN: + NORM: "SyncBN" + ROI_BOX_HEAD: + NAME: "FastRCNNConvFCHead" + NUM_CONV: 4 + NUM_FC: 1 + NORM: "SyncBN" + ROI_MASK_HEAD: + NORM: "SyncBN" +SOLVER: + # 3x schedule + STEPS: (210000, 250000) + MAX_ITER: 270000 +TEST: + PRECISE_BN: + ENABLED: True diff --git a/preprocess/mhp_extension/detectron2/configs/Misc/panoptic_fpn_R_101_dconv_cascade_gn_3x.yaml b/preprocess/mhp_extension/detectron2/configs/Misc/panoptic_fpn_R_101_dconv_cascade_gn_3x.yaml new file mode 100644 index 0000000000000000000000000000000000000000..34016cea3ca9d7fb69ef4fe01d6b47ee8690a13b --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/Misc/panoptic_fpn_R_101_dconv_cascade_gn_3x.yaml @@ -0,0 +1,26 @@ +# A large PanopticFPN for demo purposes. +# Use GN on backbone to support semantic seg. +# Use Cascade + Deform Conv to improve localization. +_BASE_: "../COCO-PanopticSegmentation/Base-Panoptic-FPN.yaml" +MODEL: + WEIGHTS: "catalog://ImageNetPretrained/FAIR/R-101-GN" + RESNETS: + DEPTH: 101 + NORM: "GN" + DEFORM_ON_PER_STAGE: [False, True, True, True] + STRIDE_IN_1X1: False + FPN: + NORM: "GN" + ROI_HEADS: + NAME: CascadeROIHeads + ROI_BOX_HEAD: + CLS_AGNOSTIC_BBOX_REG: True + ROI_MASK_HEAD: + NORM: "GN" + RPN: + POST_NMS_TOPK_TRAIN: 2000 +SOLVER: + STEPS: (105000, 125000) + MAX_ITER: 135000 + IMS_PER_BATCH: 32 + BASE_LR: 0.04 diff --git a/preprocess/mhp_extension/detectron2/configs/Misc/parsing_finetune_cihp.yaml b/preprocess/mhp_extension/detectron2/configs/Misc/parsing_finetune_cihp.yaml new file mode 100644 index 0000000000000000000000000000000000000000..766f46aa0cd3a80efb330052bdb695bebb5efb7d --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/Misc/parsing_finetune_cihp.yaml @@ -0,0 +1,24 @@ +_BASE_: "cascade_mask_rcnn_X_152_32x8d_FPN_IN5k_gn_dconv.yaml" +MODEL: + MASK_ON: True + WEIGHTS: "model_0039999_e76410.pkl" + ROI_HEADS: + NUM_CLASSES: 1 +SOLVER: + IMS_PER_BATCH: 16 + STEPS: (140000, 180000) + MAX_ITER: 200000 + BASE_LR: 0.02 +INPUT: + MIN_SIZE_TRAIN: (640, 864) + MIN_SIZE_TRAIN_SAMPLING: "range" + MAX_SIZE_TRAIN: 1440 + CROP: + ENABLED: True +TEST: + EVAL_PERIOD: 0 +DATASETS: + TRAIN: ("CIHP_train") + TEST: ("CIHP_val",) +OUTPUT_DIR: "./finetune_output" + diff --git a/preprocess/mhp_extension/detectron2/configs/Misc/parsing_inference.yaml b/preprocess/mhp_extension/detectron2/configs/Misc/parsing_inference.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d6a529b1eff2ddf553b1ba32f7b65172f03fae1f --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/Misc/parsing_inference.yaml @@ -0,0 +1,26 @@ +_BASE_: "cascade_mask_rcnn_X_152_32x8d_FPN_IN5k_gn_dconv.yaml" +MODEL: + MASK_ON: True + WEIGHTS: "./finetune_ouput/model_final.pth" + ROI_HEADS: + NMS_THRESH_TEST: 0.95 + SCORE_THRESH_TEST: 0.5 + NUM_CLASSES: 1 +SOLVER: + IMS_PER_BATCH: 1 + STEPS: (30000, 45000) + MAX_ITER: 50000 + BASE_LR: 0.02 +INPUT: + MIN_SIZE_TRAIN: (640, 864) + MIN_SIZE_TRAIN_SAMPLING: "range" + MAX_SIZE_TRAIN: 1440 + CROP: + ENABLED: True +TEST: + AUG: + ENABLED: True +DATASETS: + TRAIN: ("CIHP_trainval",) + TEST: ("CIHP_test",) +OUTPUT_DIR: "./inference_output" diff --git a/preprocess/mhp_extension/detectron2/configs/Misc/scratch_mask_rcnn_R_50_FPN_3x_gn.yaml b/preprocess/mhp_extension/detectron2/configs/Misc/scratch_mask_rcnn_R_50_FPN_3x_gn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f3400288cde242fcf66eef7f63b5a9165ca663c5 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/Misc/scratch_mask_rcnn_R_50_FPN_3x_gn.yaml @@ -0,0 +1,13 @@ +_BASE_: "mask_rcnn_R_50_FPN_3x_gn.yaml" +MODEL: + # Train from random initialization. + WEIGHTS: "" + # It makes sense to divide by STD when training from scratch + # But it seems to make no difference on the results and C2's models didn't do this. + # So we keep things consistent with C2. + # PIXEL_STD: [57.375, 57.12, 58.395] + MASK_ON: True + BACKBONE: + FREEZE_AT: 0 +# NOTE: Please refer to Rethinking ImageNet Pre-training https://arxiv.org/abs/1811.08883 +# to learn what you need for training from scratch. diff --git a/preprocess/mhp_extension/detectron2/configs/Misc/scratch_mask_rcnn_R_50_FPN_9x_gn.yaml b/preprocess/mhp_extension/detectron2/configs/Misc/scratch_mask_rcnn_R_50_FPN_9x_gn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d90c9ff0ef4573252ee165b4c958ec5f74178176 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/Misc/scratch_mask_rcnn_R_50_FPN_9x_gn.yaml @@ -0,0 +1,19 @@ +_BASE_: "mask_rcnn_R_50_FPN_3x_gn.yaml" +MODEL: + PIXEL_STD: [57.375, 57.12, 58.395] + WEIGHTS: "" + MASK_ON: True + RESNETS: + STRIDE_IN_1X1: False + BACKBONE: + FREEZE_AT: 0 +SOLVER: + # 9x schedule + IMS_PER_BATCH: 64 # 4x the standard + STEPS: (187500, 197500) # last 60/4==15k and last 20/4==5k + MAX_ITER: 202500 # 90k * 9 / 4 + BASE_LR: 0.08 +TEST: + EVAL_PERIOD: 2500 +# NOTE: Please refer to Rethinking ImageNet Pre-training https://arxiv.org/abs/1811.08883 +# to learn what you need for training from scratch. diff --git a/preprocess/mhp_extension/detectron2/configs/Misc/scratch_mask_rcnn_R_50_FPN_9x_syncbn.yaml b/preprocess/mhp_extension/detectron2/configs/Misc/scratch_mask_rcnn_R_50_FPN_9x_syncbn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..60d4e42330e396a1901437df8e17b262d5ad547a --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/Misc/scratch_mask_rcnn_R_50_FPN_9x_syncbn.yaml @@ -0,0 +1,19 @@ +_BASE_: "mask_rcnn_R_50_FPN_3x_syncbn.yaml" +MODEL: + PIXEL_STD: [57.375, 57.12, 58.395] + WEIGHTS: "" + MASK_ON: True + RESNETS: + STRIDE_IN_1X1: False + BACKBONE: + FREEZE_AT: 0 +SOLVER: + # 9x schedule + IMS_PER_BATCH: 64 # 4x the standard + STEPS: (187500, 197500) # last 60/4==15k and last 20/4==5k + MAX_ITER: 202500 # 90k * 9 / 4 + BASE_LR: 0.08 +TEST: + EVAL_PERIOD: 2500 +# NOTE: Please refer to Rethinking ImageNet Pre-training https://arxiv.org/abs/1811.08883 +# to learn what you need for training from scratch. diff --git a/preprocess/mhp_extension/detectron2/configs/Misc/semantic_R_50_FPN_1x.yaml b/preprocess/mhp_extension/detectron2/configs/Misc/semantic_R_50_FPN_1x.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ac256e1372770ab3d9ae522c962de0fd0dbceeb5 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/Misc/semantic_R_50_FPN_1x.yaml @@ -0,0 +1,11 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + META_ARCHITECTURE: "SemanticSegmentor" + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + RESNETS: + DEPTH: 50 +DATASETS: + TRAIN: ("coco_2017_train_panoptic_stuffonly",) + TEST: ("coco_2017_val_panoptic_stuffonly",) +INPUT: + MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) diff --git a/preprocess/mhp_extension/detectron2/configs/PascalVOC-Detection/faster_rcnn_R_50_C4.yaml b/preprocess/mhp_extension/detectron2/configs/PascalVOC-Detection/faster_rcnn_R_50_C4.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ea2a6baaebd1a186db18f2904430ffb25901898e --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/PascalVOC-Detection/faster_rcnn_R_50_C4.yaml @@ -0,0 +1,18 @@ +_BASE_: "../Base-RCNN-C4.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: False + RESNETS: + DEPTH: 50 + ROI_HEADS: + NUM_CLASSES: 20 +INPUT: + MIN_SIZE_TRAIN: (480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800) + MIN_SIZE_TEST: 800 +DATASETS: + TRAIN: ('voc_2007_trainval', 'voc_2012_trainval') + TEST: ('voc_2007_test',) +SOLVER: + STEPS: (12000, 16000) + MAX_ITER: 18000 # 17.4 epochs + WARMUP_ITERS: 100 diff --git a/preprocess/mhp_extension/detectron2/configs/PascalVOC-Detection/faster_rcnn_R_50_FPN.yaml b/preprocess/mhp_extension/detectron2/configs/PascalVOC-Detection/faster_rcnn_R_50_FPN.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e554cab18a358a27b630c1ab0c2359666b0e1514 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/PascalVOC-Detection/faster_rcnn_R_50_FPN.yaml @@ -0,0 +1,18 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: False + RESNETS: + DEPTH: 50 + ROI_HEADS: + NUM_CLASSES: 20 +INPUT: + MIN_SIZE_TRAIN: (480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800) + MIN_SIZE_TEST: 800 +DATASETS: + TRAIN: ('voc_2007_trainval', 'voc_2012_trainval') + TEST: ('voc_2007_test',) +SOLVER: + STEPS: (12000, 16000) + MAX_ITER: 18000 # 17.4 epochs + WARMUP_ITERS: 100 diff --git a/preprocess/mhp_extension/detectron2/configs/my_Base-RCNN-FPN.yaml b/preprocess/mhp_extension/detectron2/configs/my_Base-RCNN-FPN.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d649eed7f333dfb07d7a096c6267dc0066e847c1 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/my_Base-RCNN-FPN.yaml @@ -0,0 +1,42 @@ +MODEL: + META_ARCHITECTURE: "GeneralizedRCNN" + BACKBONE: + NAME: "build_resnet_fpn_backbone" + RESNETS: + OUT_FEATURES: ["res2", "res3", "res4", "res5"] + FPN: + IN_FEATURES: ["res2", "res3", "res4", "res5"] + ANCHOR_GENERATOR: + SIZES: [[32], [64], [128], [256], [512]] # One size for each in feature map + ASPECT_RATIOS: [[0.5, 1.0, 2.0]] # Three aspect ratios (same for all in feature maps) + RPN: + IN_FEATURES: ["p2", "p3", "p4", "p5", "p6"] + PRE_NMS_TOPK_TRAIN: 2000 # Per FPN level + PRE_NMS_TOPK_TEST: 1000 # Per FPN level + # Detectron1 uses 2000 proposals per-batch, + # (See "modeling/rpn/rpn_outputs.py" for details of this legacy issue) + # which is approximately 1000 proposals per-image since the default batch size for FPN is 2. + POST_NMS_TOPK_TRAIN: 1000 + POST_NMS_TOPK_TEST: 1000 + ROI_HEADS: + NAME: "StandardROIHeads" + IN_FEATURES: ["p2", "p3", "p4", "p5"] + ROI_BOX_HEAD: + NAME: "FastRCNNConvFCHead" + NUM_FC: 2 + POOLER_RESOLUTION: 7 + ROI_MASK_HEAD: + NAME: "MaskRCNNConvUpsampleHead" + NUM_CONV: 4 + POOLER_RESOLUTION: 14 +DATASETS: + TRAIN: ("coco_2017_train",) + TEST: ("coco_2017_val",) +SOLVER: + IMS_PER_BATCH: 2 + BASE_LR: 0.02 + STEPS: (60000, 80000) + MAX_ITER: 90000 +INPUT: + MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) +VERSION: 2 diff --git a/preprocess/mhp_extension/detectron2/configs/quick_schedules/README.md b/preprocess/mhp_extension/detectron2/configs/quick_schedules/README.md new file mode 100644 index 0000000000000000000000000000000000000000..a278199b8557a1e2fb341fe6757786a6cecb82b3 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/quick_schedules/README.md @@ -0,0 +1 @@ +These are quick configs for performance or accuracy regression tracking purposes. diff --git a/preprocess/mhp_extension/detectron2/configs/quick_schedules/cascade_mask_rcnn_R_50_FPN_inference_acc_test.yaml b/preprocess/mhp_extension/detectron2/configs/quick_schedules/cascade_mask_rcnn_R_50_FPN_inference_acc_test.yaml new file mode 100644 index 0000000000000000000000000000000000000000..fc5a4116cb096278823049c1f823e99f8e16e97e --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/quick_schedules/cascade_mask_rcnn_R_50_FPN_inference_acc_test.yaml @@ -0,0 +1,7 @@ +_BASE_: "../Misc/cascade_mask_rcnn_R_50_FPN_3x.yaml" +MODEL: + WEIGHTS: "detectron2://Misc/cascade_mask_rcnn_R_50_FPN_3x/144998488/model_final_480dd8.pkl" +DATASETS: + TEST: ("coco_2017_val_100",) +TEST: + EXPECTED_RESULTS: [["bbox", "AP", 50.18, 0.02], ["segm", "AP", 43.87, 0.02]] diff --git a/preprocess/mhp_extension/detectron2/configs/quick_schedules/cascade_mask_rcnn_R_50_FPN_instant_test.yaml b/preprocess/mhp_extension/detectron2/configs/quick_schedules/cascade_mask_rcnn_R_50_FPN_instant_test.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e41a0fe7ffe9c3531741df49e546aa45cfe4fdee --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/quick_schedules/cascade_mask_rcnn_R_50_FPN_instant_test.yaml @@ -0,0 +1,11 @@ +_BASE_: "../Misc/cascade_mask_rcnn_R_50_FPN_3x.yaml" +DATASETS: + TRAIN: ("coco_2017_val_100",) + TEST: ("coco_2017_val_100",) +SOLVER: + BASE_LR: 0.005 + STEPS: (30,) + MAX_ITER: 40 + IMS_PER_BATCH: 4 +DATALOADER: + NUM_WORKERS: 2 diff --git a/preprocess/mhp_extension/detectron2/configs/quick_schedules/fast_rcnn_R_50_FPN_inference_acc_test.yaml b/preprocess/mhp_extension/detectron2/configs/quick_schedules/fast_rcnn_R_50_FPN_inference_acc_test.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a2f37e5e2cc2a9e195e13703e9930e67e0f9a896 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/quick_schedules/fast_rcnn_R_50_FPN_inference_acc_test.yaml @@ -0,0 +1,7 @@ +_BASE_: "../COCO-Detection/fast_rcnn_R_50_FPN_1x.yaml" +MODEL: + WEIGHTS: "detectron2://COCO-Detection/fast_rcnn_R_50_FPN_1x/137635226/model_final_e5f7ce.pkl" +DATASETS: + TEST: ("coco_2017_val_100",) +TEST: + EXPECTED_RESULTS: [["bbox", "AP", 45.70, 0.02]] diff --git a/preprocess/mhp_extension/detectron2/configs/quick_schedules/fast_rcnn_R_50_FPN_instant_test.yaml b/preprocess/mhp_extension/detectron2/configs/quick_schedules/fast_rcnn_R_50_FPN_instant_test.yaml new file mode 100644 index 0000000000000000000000000000000000000000..52fc0ec03c8b87ab2be1dda97bec1e8c93e6bb5c --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/quick_schedules/fast_rcnn_R_50_FPN_instant_test.yaml @@ -0,0 +1,15 @@ +_BASE_: "../COCO-Detection/fast_rcnn_R_50_FPN_1x.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" +DATASETS: + TRAIN: ("coco_2017_val_100",) + PROPOSAL_FILES_TRAIN: ("detectron2://COCO-Detection/rpn_R_50_FPN_1x/137258492/coco_2017_val_box_proposals_ee0dad.pkl", ) + TEST: ("coco_2017_val_100",) + PROPOSAL_FILES_TEST: ("detectron2://COCO-Detection/rpn_R_50_FPN_1x/137258492/coco_2017_val_box_proposals_ee0dad.pkl", ) +SOLVER: + BASE_LR: 0.005 + STEPS: (30,) + MAX_ITER: 40 + IMS_PER_BATCH: 4 +DATALOADER: + NUM_WORKERS: 2 diff --git a/preprocess/mhp_extension/detectron2/configs/quick_schedules/keypoint_rcnn_R_50_FPN_inference_acc_test.yaml b/preprocess/mhp_extension/detectron2/configs/quick_schedules/keypoint_rcnn_R_50_FPN_inference_acc_test.yaml new file mode 100644 index 0000000000000000000000000000000000000000..14cf2aa82aec52ad44e28ead0665dad811d55457 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/quick_schedules/keypoint_rcnn_R_50_FPN_inference_acc_test.yaml @@ -0,0 +1,7 @@ +_BASE_: "../COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x.yaml" +MODEL: + WEIGHTS: "detectron2://COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x/137849621/model_final_a6e10b.pkl" +DATASETS: + TEST: ("keypoints_coco_2017_val_100",) +TEST: + EXPECTED_RESULTS: [["bbox", "AP", 52.47, 0.02], ["keypoints", "AP", 67.36, 0.02]] diff --git a/preprocess/mhp_extension/detectron2/configs/quick_schedules/keypoint_rcnn_R_50_FPN_instant_test.yaml b/preprocess/mhp_extension/detectron2/configs/quick_schedules/keypoint_rcnn_R_50_FPN_instant_test.yaml new file mode 100644 index 0000000000000000000000000000000000000000..dc09034bdd3db9d3e0dc62a017a3883dbe79c649 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/quick_schedules/keypoint_rcnn_R_50_FPN_instant_test.yaml @@ -0,0 +1,14 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + KEYPOINT_ON: True +DATASETS: + TRAIN: ("keypoints_coco_2017_val_100",) + TEST: ("keypoints_coco_2017_val_100",) +SOLVER: + BASE_LR: 0.005 + STEPS: (30,) + MAX_ITER: 40 + IMS_PER_BATCH: 4 +DATALOADER: + NUM_WORKERS: 2 diff --git a/preprocess/mhp_extension/detectron2/configs/quick_schedules/keypoint_rcnn_R_50_FPN_normalized_training_acc_test.yaml b/preprocess/mhp_extension/detectron2/configs/quick_schedules/keypoint_rcnn_R_50_FPN_normalized_training_acc_test.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4b92392f1c4457033ae4c87a521e339fe9e184ce --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/quick_schedules/keypoint_rcnn_R_50_FPN_normalized_training_acc_test.yaml @@ -0,0 +1,30 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + KEYPOINT_ON: True + RESNETS: + DEPTH: 50 + ROI_HEADS: + BATCH_SIZE_PER_IMAGE: 256 + NUM_CLASSES: 1 + ROI_KEYPOINT_HEAD: + POOLER_RESOLUTION: 14 + POOLER_SAMPLING_RATIO: 2 + NORMALIZE_LOSS_BY_VISIBLE_KEYPOINTS: False + LOSS_WEIGHT: 4.0 + ROI_BOX_HEAD: + SMOOTH_L1_BETA: 1.0 # Keypoint AP degrades when using plain L1 loss + RPN: + SMOOTH_L1_BETA: 0.2 # Keypoint AP degrades when using plain L1 loss +DATASETS: + TRAIN: ("keypoints_coco_2017_val",) + TEST: ("keypoints_coco_2017_val",) +INPUT: + MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) +SOLVER: + WARMUP_FACTOR: 0.33333333 + WARMUP_ITERS: 100 + STEPS: (5500, 5800) + MAX_ITER: 6000 +TEST: + EXPECTED_RESULTS: [["bbox", "AP", 55.35, 1.0], ["keypoints", "AP", 76.91, 1.0]] diff --git a/preprocess/mhp_extension/detectron2/configs/quick_schedules/keypoint_rcnn_R_50_FPN_training_acc_test.yaml b/preprocess/mhp_extension/detectron2/configs/quick_schedules/keypoint_rcnn_R_50_FPN_training_acc_test.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9bd962878fea64035887c48981beeb8d41bfdbd0 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/quick_schedules/keypoint_rcnn_R_50_FPN_training_acc_test.yaml @@ -0,0 +1,28 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + KEYPOINT_ON: True + RESNETS: + DEPTH: 50 + ROI_HEADS: + BATCH_SIZE_PER_IMAGE: 256 + NUM_CLASSES: 1 + ROI_KEYPOINT_HEAD: + POOLER_RESOLUTION: 14 + POOLER_SAMPLING_RATIO: 2 + ROI_BOX_HEAD: + SMOOTH_L1_BETA: 1.0 # Keypoint AP degrades when using plain L1 loss + RPN: + SMOOTH_L1_BETA: 0.2 # Keypoint AP degrades when using plain L1 loss +DATASETS: + TRAIN: ("keypoints_coco_2017_val",) + TEST: ("keypoints_coco_2017_val",) +INPUT: + MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) +SOLVER: + WARMUP_FACTOR: 0.33333333 + WARMUP_ITERS: 100 + STEPS: (5500, 5800) + MAX_ITER: 6000 +TEST: + EXPECTED_RESULTS: [["bbox", "AP", 53.5, 1.0], ["keypoints", "AP", 72.4, 1.0]] diff --git a/preprocess/mhp_extension/detectron2/configs/quick_schedules/mask_rcnn_R_50_C4_GCV_instant_test.yaml b/preprocess/mhp_extension/detectron2/configs/quick_schedules/mask_rcnn_R_50_C4_GCV_instant_test.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ab6e69812b94ea7e071f29d9a6937d5c70805b5b --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/quick_schedules/mask_rcnn_R_50_C4_GCV_instant_test.yaml @@ -0,0 +1,18 @@ +_BASE_: "../Base-RCNN-C4.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: True +DATASETS: + TRAIN: ("coco_2017_val_100",) + TEST: ("coco_2017_val_100",) +SOLVER: + BASE_LR: 0.001 + STEPS: (30,) + MAX_ITER: 40 + IMS_PER_BATCH: 4 + CLIP_GRADIENTS: + ENABLED: True + CLIP_TYPE: "value" + CLIP_VALUE: 1.0 +DATALOADER: + NUM_WORKERS: 2 diff --git a/preprocess/mhp_extension/detectron2/configs/quick_schedules/mask_rcnn_R_50_C4_inference_acc_test.yaml b/preprocess/mhp_extension/detectron2/configs/quick_schedules/mask_rcnn_R_50_C4_inference_acc_test.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b2d5b7ff87e069f8c774a230bdfd47b8c12d18a3 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/quick_schedules/mask_rcnn_R_50_C4_inference_acc_test.yaml @@ -0,0 +1,7 @@ +_BASE_: "../COCO-InstanceSegmentation/mask_rcnn_R_50_C4_3x.yaml" +MODEL: + WEIGHTS: "detectron2://COCO-InstanceSegmentation/mask_rcnn_R_50_C4_3x/137849525/model_final_4ce675.pkl" +DATASETS: + TEST: ("coco_2017_val_100",) +TEST: + EXPECTED_RESULTS: [["bbox", "AP", 47.37, 0.02], ["segm", "AP", 40.99, 0.02]] diff --git a/preprocess/mhp_extension/detectron2/configs/quick_schedules/mask_rcnn_R_50_C4_instant_test.yaml b/preprocess/mhp_extension/detectron2/configs/quick_schedules/mask_rcnn_R_50_C4_instant_test.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6c4f1214efa520944fd941daec082ad45c164a23 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/quick_schedules/mask_rcnn_R_50_C4_instant_test.yaml @@ -0,0 +1,14 @@ +_BASE_: "../Base-RCNN-C4.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: True +DATASETS: + TRAIN: ("coco_2017_val_100",) + TEST: ("coco_2017_val_100",) +SOLVER: + BASE_LR: 0.001 + STEPS: (30,) + MAX_ITER: 40 + IMS_PER_BATCH: 4 +DATALOADER: + NUM_WORKERS: 2 diff --git a/preprocess/mhp_extension/detectron2/configs/quick_schedules/mask_rcnn_R_50_C4_training_acc_test.yaml b/preprocess/mhp_extension/detectron2/configs/quick_schedules/mask_rcnn_R_50_C4_training_acc_test.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f68dd8f96c7896b5fc95d694a399f2ce417c1deb --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/quick_schedules/mask_rcnn_R_50_C4_training_acc_test.yaml @@ -0,0 +1,22 @@ +_BASE_: "../Base-RCNN-C4.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + ROI_HEADS: + BATCH_SIZE_PER_IMAGE: 256 + MASK_ON: True +DATASETS: + TRAIN: ("coco_2017_val",) + TEST: ("coco_2017_val",) +INPUT: + MIN_SIZE_TRAIN: (600,) + MAX_SIZE_TRAIN: 1000 + MIN_SIZE_TEST: 800 + MAX_SIZE_TEST: 1000 +SOLVER: + IMS_PER_BATCH: 8 # base uses 16 + WARMUP_FACTOR: 0.33333 + WARMUP_ITERS: 100 + STEPS: (11000, 11600) + MAX_ITER: 12000 +TEST: + EXPECTED_RESULTS: [["bbox", "AP", 41.88, 0.7], ["segm", "AP", 33.79, 0.5]] diff --git a/preprocess/mhp_extension/detectron2/configs/quick_schedules/mask_rcnn_R_50_DC5_inference_acc_test.yaml b/preprocess/mhp_extension/detectron2/configs/quick_schedules/mask_rcnn_R_50_DC5_inference_acc_test.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e3ce6cf922ae07fba5b5e01edbac19bf58a8e9dd --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/quick_schedules/mask_rcnn_R_50_DC5_inference_acc_test.yaml @@ -0,0 +1,7 @@ +_BASE_: "../COCO-InstanceSegmentation/mask_rcnn_R_50_DC5_3x.yaml" +MODEL: + WEIGHTS: "detectron2://COCO-InstanceSegmentation/mask_rcnn_R_50_DC5_3x/137849551/model_final_84107b.pkl" +DATASETS: + TEST: ("coco_2017_val_100",) +TEST: + EXPECTED_RESULTS: [["bbox", "AP", 47.44, 0.02], ["segm", "AP", 42.94, 0.02]] diff --git a/preprocess/mhp_extension/detectron2/configs/quick_schedules/mask_rcnn_R_50_FPN_inference_acc_test.yaml b/preprocess/mhp_extension/detectron2/configs/quick_schedules/mask_rcnn_R_50_FPN_inference_acc_test.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e5454bfd95cc37749c50aec7866f32d9a80ca2b7 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/quick_schedules/mask_rcnn_R_50_FPN_inference_acc_test.yaml @@ -0,0 +1,10 @@ +_BASE_: "../COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml" +MODEL: + WEIGHTS: "detectron2://COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x/137849600/model_final_f10217.pkl" +DATASETS: + TEST: ("coco_2017_val_100",) +TEST: + EXPECTED_RESULTS: [["bbox", "AP", 47.34, 0.02], ["segm", "AP", 42.67, 0.02], ["bbox_TTA", "AP", 49.11, 0.02], ["segm_TTA", "AP", 45.04, 0.02]] + AUG: + ENABLED: True + MIN_SIZES: (700, 800) # to save some time diff --git a/preprocess/mhp_extension/detectron2/configs/quick_schedules/mask_rcnn_R_50_FPN_instant_test.yaml b/preprocess/mhp_extension/detectron2/configs/quick_schedules/mask_rcnn_R_50_FPN_instant_test.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6dbfcde0bf837990634d419a6dda1e2909c3cd7f --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/quick_schedules/mask_rcnn_R_50_FPN_instant_test.yaml @@ -0,0 +1,14 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: True +DATASETS: + TRAIN: ("coco_2017_val_100",) + TEST: ("coco_2017_val_100",) +SOLVER: + BASE_LR: 0.005 + STEPS: (30,) + MAX_ITER: 40 + IMS_PER_BATCH: 4 +DATALOADER: + NUM_WORKERS: 2 diff --git a/preprocess/mhp_extension/detectron2/configs/quick_schedules/mask_rcnn_R_50_FPN_training_acc_test.yaml b/preprocess/mhp_extension/detectron2/configs/quick_schedules/mask_rcnn_R_50_FPN_training_acc_test.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ffca550461035967a565dca39bca039658a68eed --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/quick_schedules/mask_rcnn_R_50_FPN_training_acc_test.yaml @@ -0,0 +1,21 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + ROI_HEADS: + BATCH_SIZE_PER_IMAGE: 256 + MASK_ON: True +DATASETS: + TRAIN: ("coco_2017_val",) + TEST: ("coco_2017_val",) +INPUT: + MIN_SIZE_TRAIN: (600,) + MAX_SIZE_TRAIN: 1000 + MIN_SIZE_TEST: 800 + MAX_SIZE_TEST: 1000 +SOLVER: + WARMUP_FACTOR: 0.3333333 + WARMUP_ITERS: 100 + STEPS: (5500, 5800) + MAX_ITER: 6000 +TEST: + EXPECTED_RESULTS: [["bbox", "AP", 42.0, 1.6], ["segm", "AP", 35.4, 1.25]] diff --git a/preprocess/mhp_extension/detectron2/configs/quick_schedules/panoptic_fpn_R_50_inference_acc_test.yaml b/preprocess/mhp_extension/detectron2/configs/quick_schedules/panoptic_fpn_R_50_inference_acc_test.yaml new file mode 100644 index 0000000000000000000000000000000000000000..70874e3a92c9034d75cbbebb145b61084ba15e42 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/quick_schedules/panoptic_fpn_R_50_inference_acc_test.yaml @@ -0,0 +1,7 @@ +_BASE_: "../COCO-PanopticSegmentation/panoptic_fpn_R_50_3x.yaml" +MODEL: + WEIGHTS: "detectron2://COCO-PanopticSegmentation/panoptic_fpn_R_50_3x/139514569/model_final_c10459.pkl" +DATASETS: + TEST: ("coco_2017_val_100_panoptic_separated",) +TEST: + EXPECTED_RESULTS: [["bbox", "AP", 46.47, 0.02], ["segm", "AP", 43.39, 0.02], ["sem_seg", "mIoU", 42.55, 0.02], ["panoptic_seg", "PQ", 38.99, 0.02]] diff --git a/preprocess/mhp_extension/detectron2/configs/quick_schedules/panoptic_fpn_R_50_instant_test.yaml b/preprocess/mhp_extension/detectron2/configs/quick_schedules/panoptic_fpn_R_50_instant_test.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7cdee7bfcf6dc75dda52602a0d9177ad0a9cc6ed --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/quick_schedules/panoptic_fpn_R_50_instant_test.yaml @@ -0,0 +1,19 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + META_ARCHITECTURE: "PanopticFPN" + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: True + RESNETS: + DEPTH: 50 + SEM_SEG_HEAD: + LOSS_WEIGHT: 0.5 +DATASETS: + TRAIN: ("coco_2017_val_100_panoptic_separated",) + TEST: ("coco_2017_val_100_panoptic_separated",) +SOLVER: + BASE_LR: 0.005 + STEPS: (30,) + MAX_ITER: 40 + IMS_PER_BATCH: 4 +DATALOADER: + NUM_WORKERS: 1 diff --git a/preprocess/mhp_extension/detectron2/configs/quick_schedules/panoptic_fpn_R_50_training_acc_test.yaml b/preprocess/mhp_extension/detectron2/configs/quick_schedules/panoptic_fpn_R_50_training_acc_test.yaml new file mode 100644 index 0000000000000000000000000000000000000000..05816316f851690e60ee54b852b6f49ede73c886 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/quick_schedules/panoptic_fpn_R_50_training_acc_test.yaml @@ -0,0 +1,20 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + META_ARCHITECTURE: "PanopticFPN" + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: True + RESNETS: + DEPTH: 50 + SEM_SEG_HEAD: + LOSS_WEIGHT: 0.5 +DATASETS: + TRAIN: ("coco_2017_val_panoptic_separated",) + TEST: ("coco_2017_val_panoptic_separated",) +SOLVER: + BASE_LR: 0.01 + WARMUP_FACTOR: 0.001 + WARMUP_ITERS: 500 + STEPS: (5500,) + MAX_ITER: 7000 +TEST: + EXPECTED_RESULTS: [["bbox", "AP", 46.70, 1.1], ["segm", "AP", 38.73, 0.7], ["sem_seg", "mIoU", 64.73, 1.2], ["panoptic_seg", "PQ", 48.13, 0.8]] diff --git a/preprocess/mhp_extension/detectron2/configs/quick_schedules/retinanet_R_50_FPN_inference_acc_test.yaml b/preprocess/mhp_extension/detectron2/configs/quick_schedules/retinanet_R_50_FPN_inference_acc_test.yaml new file mode 100644 index 0000000000000000000000000000000000000000..36b998833bac04c830d5ab9f44d5773b0437ac0b --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/quick_schedules/retinanet_R_50_FPN_inference_acc_test.yaml @@ -0,0 +1,7 @@ +_BASE_: "../COCO-Detection/retinanet_R_50_FPN_3x.yaml" +MODEL: + WEIGHTS: "detectron2://COCO-Detection/retinanet_R_50_FPN_3x/137849486/model_final_4cafe0.pkl" +DATASETS: + TEST: ("coco_2017_val_100",) +TEST: + EXPECTED_RESULTS: [["bbox", "AP", 44.36, 0.02]] diff --git a/preprocess/mhp_extension/detectron2/configs/quick_schedules/retinanet_R_50_FPN_instant_test.yaml b/preprocess/mhp_extension/detectron2/configs/quick_schedules/retinanet_R_50_FPN_instant_test.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8d95c1f614296716374686b22055a587ccd052b9 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/quick_schedules/retinanet_R_50_FPN_instant_test.yaml @@ -0,0 +1,13 @@ +_BASE_: "../COCO-Detection/retinanet_R_50_FPN_1x.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" +DATASETS: + TRAIN: ("coco_2017_val_100",) + TEST: ("coco_2017_val_100",) +SOLVER: + BASE_LR: 0.005 + STEPS: (30,) + MAX_ITER: 40 + IMS_PER_BATCH: 4 +DATALOADER: + NUM_WORKERS: 2 diff --git a/preprocess/mhp_extension/detectron2/configs/quick_schedules/rpn_R_50_FPN_inference_acc_test.yaml b/preprocess/mhp_extension/detectron2/configs/quick_schedules/rpn_R_50_FPN_inference_acc_test.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c7c3f908a9e80e98b2d25b6d384a60acaba9d4f8 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/quick_schedules/rpn_R_50_FPN_inference_acc_test.yaml @@ -0,0 +1,7 @@ +_BASE_: "../COCO-Detection/rpn_R_50_FPN_1x.yaml" +MODEL: + WEIGHTS: "detectron2://COCO-Detection/rpn_R_50_FPN_1x/137258492/model_final_02ce48.pkl" +DATASETS: + TEST: ("coco_2017_val_100",) +TEST: + EXPECTED_RESULTS: [["box_proposals", "AR@1000", 58.16, 0.02]] diff --git a/preprocess/mhp_extension/detectron2/configs/quick_schedules/rpn_R_50_FPN_instant_test.yaml b/preprocess/mhp_extension/detectron2/configs/quick_schedules/rpn_R_50_FPN_instant_test.yaml new file mode 100644 index 0000000000000000000000000000000000000000..402d432477507dc36f04c4a9777cb80fe06b2809 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/quick_schedules/rpn_R_50_FPN_instant_test.yaml @@ -0,0 +1,13 @@ +_BASE_: "../COCO-Detection/rpn_R_50_FPN_1x.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" +DATASETS: + TRAIN: ("coco_2017_val_100",) + TEST: ("coco_2017_val_100",) +SOLVER: + STEPS: (30,) + MAX_ITER: 40 + BASE_LR: 0.005 + IMS_PER_BATCH: 4 +DATALOADER: + NUM_WORKERS: 2 diff --git a/preprocess/mhp_extension/detectron2/configs/quick_schedules/semantic_R_50_FPN_inference_acc_test.yaml b/preprocess/mhp_extension/detectron2/configs/quick_schedules/semantic_R_50_FPN_inference_acc_test.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bca74987d5218736983617883e0fe37f79d219b7 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/quick_schedules/semantic_R_50_FPN_inference_acc_test.yaml @@ -0,0 +1,10 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + META_ARCHITECTURE: "SemanticSegmentor" + WEIGHTS: "detectron2://semantic_R_50_FPN_1x/111802073/model_final_c18079783c55a94968edc28b7101c5f0.pkl" + RESNETS: + DEPTH: 50 +DATASETS: + TEST: ("coco_2017_val_100_panoptic_stuffonly",) +TEST: + EXPECTED_RESULTS: [["sem_seg", "mIoU", 39.53, 0.02], ["sem_seg", "mACC", 51.50, 0.02]] diff --git a/preprocess/mhp_extension/detectron2/configs/quick_schedules/semantic_R_50_FPN_instant_test.yaml b/preprocess/mhp_extension/detectron2/configs/quick_schedules/semantic_R_50_FPN_instant_test.yaml new file mode 100644 index 0000000000000000000000000000000000000000..14ab606f219b462fe37fcc7d5fbdbe65cb5c2642 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/quick_schedules/semantic_R_50_FPN_instant_test.yaml @@ -0,0 +1,18 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + META_ARCHITECTURE: "SemanticSegmentor" + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + RESNETS: + DEPTH: 50 +DATASETS: + TRAIN: ("coco_2017_val_100_panoptic_stuffonly",) + TEST: ("coco_2017_val_100_panoptic_stuffonly",) +INPUT: + MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) +SOLVER: + BASE_LR: 0.005 + STEPS: (30,) + MAX_ITER: 40 + IMS_PER_BATCH: 4 +DATALOADER: + NUM_WORKERS: 2 diff --git a/preprocess/mhp_extension/detectron2/configs/quick_schedules/semantic_R_50_FPN_training_acc_test.yaml b/preprocess/mhp_extension/detectron2/configs/quick_schedules/semantic_R_50_FPN_training_acc_test.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1f78d775889b11e9e76743de5ddb8139198edf61 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/configs/quick_schedules/semantic_R_50_FPN_training_acc_test.yaml @@ -0,0 +1,20 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + META_ARCHITECTURE: "SemanticSegmentor" + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + RESNETS: + DEPTH: 50 +DATASETS: + TRAIN: ("coco_2017_val_panoptic_stuffonly",) + TEST: ("coco_2017_val_panoptic_stuffonly",) +SOLVER: + BASE_LR: 0.01 + WARMUP_FACTOR: 0.001 + WARMUP_ITERS: 300 + STEPS: (5500,) + MAX_ITER: 7000 +TEST: + EXPECTED_RESULTS: [["sem_seg", "mIoU", 76.51, 1.0], ["sem_seg", "mACC", 83.25, 1.0]] +INPUT: + # no scale augmentation + MIN_SIZE_TRAIN: (800, ) diff --git a/preprocess/mhp_extension/detectron2/demo/README.md b/preprocess/mhp_extension/detectron2/demo/README.md new file mode 100644 index 0000000000000000000000000000000000000000..caa755f6f0f472a04a419deec4a6acfdb949023b --- /dev/null +++ b/preprocess/mhp_extension/detectron2/demo/README.md @@ -0,0 +1,8 @@ + +## Detectron2 Demo + +We provide a command line tool to run a simple demo of builtin models. +The usage is explained in [GETTING_STARTED.md](../GETTING_STARTED.md). + +See our [blog post](https://ai.facebook.com/blog/-detectron2-a-pytorch-based-modular-object-detection-library-) +for a high-quality demo generated with this tool. diff --git a/preprocess/mhp_extension/detectron2/demo/demo.py b/preprocess/mhp_extension/detectron2/demo/demo.py new file mode 100755 index 0000000000000000000000000000000000000000..1fd8df8f539cfe4a4f003fb820f49ffad0f54f80 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/demo/demo.py @@ -0,0 +1,161 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import argparse +import glob +import multiprocessing as mp +import os +import time +import cv2 +import tqdm + +from detectron2.config import get_cfg +from detectron2.data.detection_utils import read_image +from detectron2.utils.logger import setup_logger + +from predictor import VisualizationDemo + +# constants +WINDOW_NAME = "COCO detections" + + +def setup_cfg(args): + # load config from file and command-line arguments + cfg = get_cfg() + cfg.merge_from_file(args.config_file) + cfg.merge_from_list(args.opts) + # Set score_threshold for builtin models + cfg.MODEL.RETINANET.SCORE_THRESH_TEST = args.confidence_threshold + cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = args.confidence_threshold + cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = args.confidence_threshold + cfg.freeze() + return cfg + + +def get_parser(): + parser = argparse.ArgumentParser(description="Detectron2 demo for builtin models") + parser.add_argument( + "--config-file", + default="configs/quick_schedules/mask_rcnn_R_50_FPN_inference_acc_test.yaml", + metavar="FILE", + help="path to config file", + ) + parser.add_argument("--webcam", action="store_true", help="Take inputs from webcam.") + parser.add_argument("--video-input", help="Path to video file.") + parser.add_argument( + "--input", + nargs="+", + help="A list of space separated input images; " + "or a single glob pattern such as 'directory/*.jpg'", + ) + parser.add_argument( + "--output", + help="A file or directory to save output visualizations. " + "If not given, will show output in an OpenCV window.", + ) + + parser.add_argument( + "--confidence-threshold", + type=float, + default=0.5, + help="Minimum score for instance predictions to be shown", + ) + parser.add_argument( + "--opts", + help="Modify config options using the command-line 'KEY VALUE' pairs", + default=[], + nargs=argparse.REMAINDER, + ) + return parser + + +if __name__ == "__main__": + mp.set_start_method("spawn", force=True) + args = get_parser().parse_args() + setup_logger(name="fvcore") + logger = setup_logger() + logger.info("Arguments: " + str(args)) + + cfg = setup_cfg(args) + + demo = VisualizationDemo(cfg) + + if args.input: + if len(args.input) == 1: + args.input = glob.glob(os.path.expanduser(args.input[0])) + assert args.input, "The input path(s) was not found" + for path in tqdm.tqdm(args.input, disable=not args.output): + # use PIL, to be consistent with evaluation + img = read_image(path, format="BGR") + start_time = time.time() + predictions, visualized_output = demo.run_on_image(img) + logger.info( + "{}: {} in {:.2f}s".format( + path, + "detected {} instances".format(len(predictions["instances"])) + if "instances" in predictions + else "finished", + time.time() - start_time, + ) + ) + + if args.output: + if os.path.isdir(args.output): + assert os.path.isdir(args.output), args.output + out_filename = os.path.join(args.output, os.path.basename(path)) + else: + assert len(args.input) == 1, "Please specify a directory with args.output" + out_filename = args.output + visualized_output.save(out_filename) + else: + cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_NORMAL) + cv2.imshow(WINDOW_NAME, visualized_output.get_image()[:, :, ::-1]) + if cv2.waitKey(0) == 27: + break # esc to quit + elif args.webcam: + assert args.input is None, "Cannot have both --input and --webcam!" + assert args.output is None, "output not yet supported with --webcam!" + cam = cv2.VideoCapture(0) + for vis in tqdm.tqdm(demo.run_on_video(cam)): + cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_NORMAL) + cv2.imshow(WINDOW_NAME, vis) + if cv2.waitKey(1) == 27: + break # esc to quit + cam.release() + cv2.destroyAllWindows() + elif args.video_input: + video = cv2.VideoCapture(args.video_input) + width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH)) + height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT)) + frames_per_second = video.get(cv2.CAP_PROP_FPS) + num_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT)) + basename = os.path.basename(args.video_input) + + if args.output: + if os.path.isdir(args.output): + output_fname = os.path.join(args.output, basename) + output_fname = os.path.splitext(output_fname)[0] + ".mkv" + else: + output_fname = args.output + assert not os.path.isfile(output_fname), output_fname + output_file = cv2.VideoWriter( + filename=output_fname, + # some installation of opencv may not support x264 (due to its license), + # you can try other format (e.g. MPEG) + fourcc=cv2.VideoWriter_fourcc(*"x264"), + fps=float(frames_per_second), + frameSize=(width, height), + isColor=True, + ) + assert os.path.isfile(args.video_input) + for vis_frame in tqdm.tqdm(demo.run_on_video(video), total=num_frames): + if args.output: + output_file.write(vis_frame) + else: + cv2.namedWindow(basename, cv2.WINDOW_NORMAL) + cv2.imshow(basename, vis_frame) + if cv2.waitKey(1) == 27: + break # esc to quit + video.release() + if args.output: + output_file.release() + else: + cv2.destroyAllWindows() diff --git a/preprocess/mhp_extension/detectron2/demo/predictor.py b/preprocess/mhp_extension/detectron2/demo/predictor.py new file mode 100644 index 0000000000000000000000000000000000000000..689fa85436d928858e652df665f5e7460a1f3154 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/demo/predictor.py @@ -0,0 +1,220 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import atexit +import bisect +import multiprocessing as mp +from collections import deque +import cv2 +import torch + +from detectron2.data import MetadataCatalog +from detectron2.engine.defaults import DefaultPredictor +from detectron2.utils.video_visualizer import VideoVisualizer +from detectron2.utils.visualizer import ColorMode, Visualizer + + +class VisualizationDemo(object): + def __init__(self, cfg, instance_mode=ColorMode.IMAGE, parallel=False): + """ + Args: + cfg (CfgNode): + instance_mode (ColorMode): + parallel (bool): whether to run the model in different processes from visualization. + Useful since the visualization logic can be slow. + """ + self.metadata = MetadataCatalog.get( + cfg.DATASETS.TEST[0] if len(cfg.DATASETS.TEST) else "__unused" + ) + self.cpu_device = torch.device("cpu") + self.instance_mode = instance_mode + + self.parallel = parallel + if parallel: + num_gpu = torch.cuda.device_count() + self.predictor = AsyncPredictor(cfg, num_gpus=num_gpu) + else: + self.predictor = DefaultPredictor(cfg) + + def run_on_image(self, image): + """ + Args: + image (np.ndarray): an image of shape (H, W, C) (in BGR order). + This is the format used by OpenCV. + + Returns: + predictions (dict): the output of the model. + vis_output (VisImage): the visualized image output. + """ + vis_output = None + predictions = self.predictor(image) + # Convert image from OpenCV BGR format to Matplotlib RGB format. + image = image[:, :, ::-1] + visualizer = Visualizer(image, self.metadata, instance_mode=self.instance_mode) + if "panoptic_seg" in predictions: + panoptic_seg, segments_info = predictions["panoptic_seg"] + vis_output = visualizer.draw_panoptic_seg_predictions( + panoptic_seg.to(self.cpu_device), segments_info + ) + else: + if "sem_seg" in predictions: + vis_output = visualizer.draw_sem_seg( + predictions["sem_seg"].argmax(dim=0).to(self.cpu_device) + ) + if "instances" in predictions: + instances = predictions["instances"].to(self.cpu_device) + vis_output = visualizer.draw_instance_predictions(predictions=instances) + + return predictions, vis_output + + def _frame_from_video(self, video): + while video.isOpened(): + success, frame = video.read() + if success: + yield frame + else: + break + + def run_on_video(self, video): + """ + Visualizes predictions on frames of the input video. + + Args: + video (cv2.VideoCapture): a :class:`VideoCapture` object, whose source can be + either a webcam or a video file. + + Yields: + ndarray: BGR visualizations of each video frame. + """ + video_visualizer = VideoVisualizer(self.metadata, self.instance_mode) + + def process_predictions(frame, predictions): + frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) + if "panoptic_seg" in predictions: + panoptic_seg, segments_info = predictions["panoptic_seg"] + vis_frame = video_visualizer.draw_panoptic_seg_predictions( + frame, panoptic_seg.to(self.cpu_device), segments_info + ) + elif "instances" in predictions: + predictions = predictions["instances"].to(self.cpu_device) + vis_frame = video_visualizer.draw_instance_predictions(frame, predictions) + elif "sem_seg" in predictions: + vis_frame = video_visualizer.draw_sem_seg( + frame, predictions["sem_seg"].argmax(dim=0).to(self.cpu_device) + ) + + # Converts Matplotlib RGB format to OpenCV BGR format + vis_frame = cv2.cvtColor(vis_frame.get_image(), cv2.COLOR_RGB2BGR) + return vis_frame + + frame_gen = self._frame_from_video(video) + if self.parallel: + buffer_size = self.predictor.default_buffer_size + + frame_data = deque() + + for cnt, frame in enumerate(frame_gen): + frame_data.append(frame) + self.predictor.put(frame) + + if cnt >= buffer_size: + frame = frame_data.popleft() + predictions = self.predictor.get() + yield process_predictions(frame, predictions) + + while len(frame_data): + frame = frame_data.popleft() + predictions = self.predictor.get() + yield process_predictions(frame, predictions) + else: + for frame in frame_gen: + yield process_predictions(frame, self.predictor(frame)) + + +class AsyncPredictor: + """ + A predictor that runs the model asynchronously, possibly on >1 GPUs. + Because rendering the visualization takes considerably amount of time, + this helps improve throughput when rendering videos. + """ + + class _StopToken: + pass + + class _PredictWorker(mp.Process): + def __init__(self, cfg, task_queue, result_queue): + self.cfg = cfg + self.task_queue = task_queue + self.result_queue = result_queue + super().__init__() + + def run(self): + predictor = DefaultPredictor(self.cfg) + + while True: + task = self.task_queue.get() + if isinstance(task, AsyncPredictor._StopToken): + break + idx, data = task + result = predictor(data) + self.result_queue.put((idx, result)) + + def __init__(self, cfg, num_gpus: int = 1): + """ + Args: + cfg (CfgNode): + num_gpus (int): if 0, will run on CPU + """ + num_workers = max(num_gpus, 1) + self.task_queue = mp.Queue(maxsize=num_workers * 3) + self.result_queue = mp.Queue(maxsize=num_workers * 3) + self.procs = [] + for gpuid in range(max(num_gpus, 1)): + cfg = cfg.clone() + cfg.defrost() + cfg.MODEL.DEVICE = "cuda:{}".format(gpuid) if num_gpus > 0 else "cpu" + self.procs.append( + AsyncPredictor._PredictWorker(cfg, self.task_queue, self.result_queue) + ) + + self.put_idx = 0 + self.get_idx = 0 + self.result_rank = [] + self.result_data = [] + + for p in self.procs: + p.start() + atexit.register(self.shutdown) + + def put(self, image): + self.put_idx += 1 + self.task_queue.put((self.put_idx, image)) + + def get(self): + self.get_idx += 1 # the index needed for this request + if len(self.result_rank) and self.result_rank[0] == self.get_idx: + res = self.result_data[0] + del self.result_data[0], self.result_rank[0] + return res + + while True: + # make sure the results are returned in the correct order + idx, res = self.result_queue.get() + if idx == self.get_idx: + return res + insert = bisect.bisect(self.result_rank, idx) + self.result_rank.insert(insert, idx) + self.result_data.insert(insert, res) + + def __len__(self): + return self.put_idx - self.get_idx + + def __call__(self, image): + self.put(image) + return self.get() + + def shutdown(self): + for _ in self.procs: + self.task_queue.put(AsyncPredictor._StopToken()) + + @property + def default_buffer_size(self): + return len(self.procs) * 5 diff --git a/preprocess/mhp_extension/detectron2/detectron2/__init__.py b/preprocess/mhp_extension/detectron2/detectron2/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..41816af2e8e538fa2ef4dc7b34f5667e0e823b90 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. + +from .utils.env import setup_environment + +setup_environment() + + +# This line will be programatically read/write by setup.py. +# Leave them at the bottom of this file and don't touch them. +__version__ = "0.1.3" diff --git a/preprocess/mhp_extension/detectron2/detectron2/checkpoint/__init__.py b/preprocess/mhp_extension/detectron2/detectron2/checkpoint/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e17a9df03d886b379ffbb1c4ec41e03c5025410f --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/checkpoint/__init__.py @@ -0,0 +1,10 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +# File: + + +from . import catalog as _UNUSED # register the handler +from .detection_checkpoint import DetectionCheckpointer +from fvcore.common.checkpoint import Checkpointer, PeriodicCheckpointer + +__all__ = ["Checkpointer", "PeriodicCheckpointer", "DetectionCheckpointer"] diff --git a/preprocess/mhp_extension/detectron2/detectron2/checkpoint/c2_model_loading.py b/preprocess/mhp_extension/detectron2/detectron2/checkpoint/c2_model_loading.py new file mode 100644 index 0000000000000000000000000000000000000000..e27ba8463c744438d44f04f23fd4975525eba667 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/checkpoint/c2_model_loading.py @@ -0,0 +1,313 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import copy +import logging +import re +import torch +from fvcore.common.checkpoint import ( + get_missing_parameters_message, + get_unexpected_parameters_message, +) + + +def convert_basic_c2_names(original_keys): + """ + Apply some basic name conversion to names in C2 weights. + It only deals with typical backbone models. + + Args: + original_keys (list[str]): + Returns: + list[str]: The same number of strings matching those in original_keys. + """ + layer_keys = copy.deepcopy(original_keys) + layer_keys = [ + {"pred_b": "linear_b", "pred_w": "linear_w"}.get(k, k) for k in layer_keys + ] # some hard-coded mappings + + layer_keys = [k.replace("_", ".") for k in layer_keys] + layer_keys = [re.sub("\\.b$", ".bias", k) for k in layer_keys] + layer_keys = [re.sub("\\.w$", ".weight", k) for k in layer_keys] + # Uniform both bn and gn names to "norm" + layer_keys = [re.sub("bn\\.s$", "norm.weight", k) for k in layer_keys] + layer_keys = [re.sub("bn\\.bias$", "norm.bias", k) for k in layer_keys] + layer_keys = [re.sub("bn\\.rm", "norm.running_mean", k) for k in layer_keys] + layer_keys = [re.sub("bn\\.running.mean$", "norm.running_mean", k) for k in layer_keys] + layer_keys = [re.sub("bn\\.riv$", "norm.running_var", k) for k in layer_keys] + layer_keys = [re.sub("bn\\.running.var$", "norm.running_var", k) for k in layer_keys] + layer_keys = [re.sub("bn\\.gamma$", "norm.weight", k) for k in layer_keys] + layer_keys = [re.sub("bn\\.beta$", "norm.bias", k) for k in layer_keys] + layer_keys = [re.sub("gn\\.s$", "norm.weight", k) for k in layer_keys] + layer_keys = [re.sub("gn\\.bias$", "norm.bias", k) for k in layer_keys] + + # stem + layer_keys = [re.sub("^res\\.conv1\\.norm\\.", "conv1.norm.", k) for k in layer_keys] + # to avoid mis-matching with "conv1" in other components (e.g. detection head) + layer_keys = [re.sub("^conv1\\.", "stem.conv1.", k) for k in layer_keys] + + # layer1-4 is used by torchvision, however we follow the C2 naming strategy (res2-5) + # layer_keys = [re.sub("^res2.", "layer1.", k) for k in layer_keys] + # layer_keys = [re.sub("^res3.", "layer2.", k) for k in layer_keys] + # layer_keys = [re.sub("^res4.", "layer3.", k) for k in layer_keys] + # layer_keys = [re.sub("^res5.", "layer4.", k) for k in layer_keys] + + # blocks + layer_keys = [k.replace(".branch1.", ".shortcut.") for k in layer_keys] + layer_keys = [k.replace(".branch2a.", ".conv1.") for k in layer_keys] + layer_keys = [k.replace(".branch2b.", ".conv2.") for k in layer_keys] + layer_keys = [k.replace(".branch2c.", ".conv3.") for k in layer_keys] + + # DensePose substitutions + layer_keys = [re.sub("^body.conv.fcn", "body_conv_fcn", k) for k in layer_keys] + layer_keys = [k.replace("AnnIndex.lowres", "ann_index_lowres") for k in layer_keys] + layer_keys = [k.replace("Index.UV.lowres", "index_uv_lowres") for k in layer_keys] + layer_keys = [k.replace("U.lowres", "u_lowres") for k in layer_keys] + layer_keys = [k.replace("V.lowres", "v_lowres") for k in layer_keys] + return layer_keys + + +def convert_c2_detectron_names(weights): + """ + Map Caffe2 Detectron weight names to Detectron2 names. + + Args: + weights (dict): name -> tensor + + Returns: + dict: detectron2 names -> tensor + dict: detectron2 names -> C2 names + """ + logger = logging.getLogger(__name__) + logger.info("Remapping C2 weights ......") + original_keys = sorted(weights.keys()) + layer_keys = copy.deepcopy(original_keys) + + layer_keys = convert_basic_c2_names(layer_keys) + + # -------------------------------------------------------------------------- + # RPN hidden representation conv + # -------------------------------------------------------------------------- + # FPN case + # In the C2 model, the RPN hidden layer conv is defined for FPN level 2 and then + # shared for all other levels, hence the appearance of "fpn2" + layer_keys = [ + k.replace("conv.rpn.fpn2", "proposal_generator.rpn_head.conv") for k in layer_keys + ] + # Non-FPN case + layer_keys = [k.replace("conv.rpn", "proposal_generator.rpn_head.conv") for k in layer_keys] + + # -------------------------------------------------------------------------- + # RPN box transformation conv + # -------------------------------------------------------------------------- + # FPN case (see note above about "fpn2") + layer_keys = [ + k.replace("rpn.bbox.pred.fpn2", "proposal_generator.rpn_head.anchor_deltas") + for k in layer_keys + ] + layer_keys = [ + k.replace("rpn.cls.logits.fpn2", "proposal_generator.rpn_head.objectness_logits") + for k in layer_keys + ] + # Non-FPN case + layer_keys = [ + k.replace("rpn.bbox.pred", "proposal_generator.rpn_head.anchor_deltas") for k in layer_keys + ] + layer_keys = [ + k.replace("rpn.cls.logits", "proposal_generator.rpn_head.objectness_logits") + for k in layer_keys + ] + + # -------------------------------------------------------------------------- + # Fast R-CNN box head + # -------------------------------------------------------------------------- + layer_keys = [re.sub("^bbox\\.pred", "bbox_pred", k) for k in layer_keys] + layer_keys = [re.sub("^cls\\.score", "cls_score", k) for k in layer_keys] + layer_keys = [re.sub("^fc6\\.", "box_head.fc1.", k) for k in layer_keys] + layer_keys = [re.sub("^fc7\\.", "box_head.fc2.", k) for k in layer_keys] + # 4conv1fc head tensor names: head_conv1_w, head_conv1_gn_s + layer_keys = [re.sub("^head\\.conv", "box_head.conv", k) for k in layer_keys] + + # -------------------------------------------------------------------------- + # FPN lateral and output convolutions + # -------------------------------------------------------------------------- + def fpn_map(name): + """ + Look for keys with the following patterns: + 1) Starts with "fpn.inner." + Example: "fpn.inner.res2.2.sum.lateral.weight" + Meaning: These are lateral pathway convolutions + 2) Starts with "fpn.res" + Example: "fpn.res2.2.sum.weight" + Meaning: These are FPN output convolutions + """ + splits = name.split(".") + norm = ".norm" if "norm" in splits else "" + if name.startswith("fpn.inner."): + # splits example: ['fpn', 'inner', 'res2', '2', 'sum', 'lateral', 'weight'] + stage = int(splits[2][len("res") :]) + return "fpn_lateral{}{}.{}".format(stage, norm, splits[-1]) + elif name.startswith("fpn.res"): + # splits example: ['fpn', 'res2', '2', 'sum', 'weight'] + stage = int(splits[1][len("res") :]) + return "fpn_output{}{}.{}".format(stage, norm, splits[-1]) + return name + + layer_keys = [fpn_map(k) for k in layer_keys] + + # -------------------------------------------------------------------------- + # Mask R-CNN mask head + # -------------------------------------------------------------------------- + # roi_heads.StandardROIHeads case + layer_keys = [k.replace(".[mask].fcn", "mask_head.mask_fcn") for k in layer_keys] + layer_keys = [re.sub("^\\.mask\\.fcn", "mask_head.mask_fcn", k) for k in layer_keys] + layer_keys = [k.replace("mask.fcn.logits", "mask_head.predictor") for k in layer_keys] + # roi_heads.Res5ROIHeads case + layer_keys = [k.replace("conv5.mask", "mask_head.deconv") for k in layer_keys] + + # -------------------------------------------------------------------------- + # Keypoint R-CNN head + # -------------------------------------------------------------------------- + # interestingly, the keypoint head convs have blob names that are simply "conv_fcnX" + layer_keys = [k.replace("conv.fcn", "roi_heads.keypoint_head.conv_fcn") for k in layer_keys] + layer_keys = [ + k.replace("kps.score.lowres", "roi_heads.keypoint_head.score_lowres") for k in layer_keys + ] + layer_keys = [k.replace("kps.score.", "roi_heads.keypoint_head.score.") for k in layer_keys] + + # -------------------------------------------------------------------------- + # Done with replacements + # -------------------------------------------------------------------------- + assert len(set(layer_keys)) == len(layer_keys) + assert len(original_keys) == len(layer_keys) + + new_weights = {} + new_keys_to_original_keys = {} + for orig, renamed in zip(original_keys, layer_keys): + new_keys_to_original_keys[renamed] = orig + if renamed.startswith("bbox_pred.") or renamed.startswith("mask_head.predictor."): + # remove the meaningless prediction weight for background class + new_start_idx = 4 if renamed.startswith("bbox_pred.") else 1 + new_weights[renamed] = weights[orig][new_start_idx:] + logger.info( + "Remove prediction weight for background class in {}. The shape changes from " + "{} to {}.".format( + renamed, tuple(weights[orig].shape), tuple(new_weights[renamed].shape) + ) + ) + elif renamed.startswith("cls_score."): + # move weights of bg class from original index 0 to last index + logger.info( + "Move classification weights for background class in {} from index 0 to " + "index {}.".format(renamed, weights[orig].shape[0] - 1) + ) + new_weights[renamed] = torch.cat([weights[orig][1:], weights[orig][:1]]) + else: + new_weights[renamed] = weights[orig] + + return new_weights, new_keys_to_original_keys + + +# Note the current matching is not symmetric. +# it assumes model_state_dict will have longer names. +def align_and_update_state_dicts(model_state_dict, ckpt_state_dict, c2_conversion=True): + """ + Match names between the two state-dict, and update the values of model_state_dict in-place with + copies of the matched tensor in ckpt_state_dict. + If `c2_conversion==True`, `ckpt_state_dict` is assumed to be a Caffe2 + model and will be renamed at first. + + Strategy: suppose that the models that we will create will have prefixes appended + to each of its keys, for example due to an extra level of nesting that the original + pre-trained weights from ImageNet won't contain. For example, model.state_dict() + might return backbone[0].body.res2.conv1.weight, while the pre-trained model contains + res2.conv1.weight. We thus want to match both parameters together. + For that, we look for each model weight, look among all loaded keys if there is one + that is a suffix of the current weight name, and use it if that's the case. + If multiple matches exist, take the one with longest size + of the corresponding name. For example, for the same model as before, the pretrained + weight file can contain both res2.conv1.weight, as well as conv1.weight. In this case, + we want to match backbone[0].body.conv1.weight to conv1.weight, and + backbone[0].body.res2.conv1.weight to res2.conv1.weight. + """ + model_keys = sorted(model_state_dict.keys()) + if c2_conversion: + ckpt_state_dict, original_keys = convert_c2_detectron_names(ckpt_state_dict) + # original_keys: the name in the original dict (before renaming) + else: + original_keys = {x: x for x in ckpt_state_dict.keys()} + ckpt_keys = sorted(ckpt_state_dict.keys()) + + def match(a, b): + # Matched ckpt_key should be a complete (starts with '.') suffix. + # For example, roi_heads.mesh_head.whatever_conv1 does not match conv1, + # but matches whatever_conv1 or mesh_head.whatever_conv1. + return a == b or a.endswith("." + b) + + # get a matrix of string matches, where each (i, j) entry correspond to the size of the + # ckpt_key string, if it matches + match_matrix = [len(j) if match(i, j) else 0 for i in model_keys for j in ckpt_keys] + match_matrix = torch.as_tensor(match_matrix).view(len(model_keys), len(ckpt_keys)) + # use the matched one with longest size in case of multiple matches + max_match_size, idxs = match_matrix.max(1) + # remove indices that correspond to no-match + idxs[max_match_size == 0] = -1 + + # used for logging + max_len_model = max(len(key) for key in model_keys) if model_keys else 1 + max_len_ckpt = max(len(key) for key in ckpt_keys) if ckpt_keys else 1 + log_str_template = "{: <{}} loaded from {: <{}} of shape {}" + logger = logging.getLogger(__name__) + # matched_pairs (matched checkpoint key --> matched model key) + matched_keys = {} + for idx_model, idx_ckpt in enumerate(idxs.tolist()): + if idx_ckpt == -1: + continue + key_model = model_keys[idx_model] + key_ckpt = ckpt_keys[idx_ckpt] + value_ckpt = ckpt_state_dict[key_ckpt] + shape_in_model = model_state_dict[key_model].shape + + if shape_in_model != value_ckpt.shape: + logger.warning( + "Shape of {} in checkpoint is {}, while shape of {} in model is {}.".format( + key_ckpt, value_ckpt.shape, key_model, shape_in_model + ) + ) + logger.warning( + "{} will not be loaded. Please double check and see if this is desired.".format( + key_ckpt + ) + ) + continue + + model_state_dict[key_model] = value_ckpt.clone() + if key_ckpt in matched_keys: # already added to matched_keys + logger.error( + "Ambiguity found for {} in checkpoint!" + "It matches at least two keys in the model ({} and {}).".format( + key_ckpt, key_model, matched_keys[key_ckpt] + ) + ) + raise ValueError("Cannot match one checkpoint key to multiple keys in the model.") + + matched_keys[key_ckpt] = key_model + logger.info( + log_str_template.format( + key_model, + max_len_model, + original_keys[key_ckpt], + max_len_ckpt, + tuple(shape_in_model), + ) + ) + matched_model_keys = matched_keys.values() + matched_ckpt_keys = matched_keys.keys() + # print warnings about unmatched keys on both side + unmatched_model_keys = [k for k in model_keys if k not in matched_model_keys] + if len(unmatched_model_keys): + logger.info(get_missing_parameters_message(unmatched_model_keys)) + + unmatched_ckpt_keys = [k for k in ckpt_keys if k not in matched_ckpt_keys] + if len(unmatched_ckpt_keys): + logger.info( + get_unexpected_parameters_message(original_keys[x] for x in unmatched_ckpt_keys) + ) diff --git a/preprocess/mhp_extension/detectron2/detectron2/checkpoint/catalog.py b/preprocess/mhp_extension/detectron2/detectron2/checkpoint/catalog.py new file mode 100644 index 0000000000000000000000000000000000000000..62f81f3c1531e2726400cba4c97b60d744670da5 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/checkpoint/catalog.py @@ -0,0 +1,134 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import logging +from fvcore.common.file_io import PathHandler, PathManager + + +class ModelCatalog(object): + """ + Store mappings from names to third-party models. + """ + + S3_C2_DETECTRON_PREFIX = "https://dl.fbaipublicfiles.com/detectron" + + # MSRA models have STRIDE_IN_1X1=True. False otherwise. + # NOTE: all BN models here have fused BN into an affine layer. + # As a result, you should only load them to a model with "FrozenBN". + # Loading them to a model with regular BN or SyncBN is wrong. + # Even when loaded to FrozenBN, it is still different from affine by an epsilon, + # which should be negligible for training. + # NOTE: all models here uses PIXEL_STD=[1,1,1] + # NOTE: Most of the BN models here are no longer used. We use the + # re-converted pre-trained models under detectron2 model zoo instead. + C2_IMAGENET_MODELS = { + "MSRA/R-50": "ImageNetPretrained/MSRA/R-50.pkl", + "MSRA/R-101": "ImageNetPretrained/MSRA/R-101.pkl", + "FAIR/R-50-GN": "ImageNetPretrained/47261647/R-50-GN.pkl", + "FAIR/R-101-GN": "ImageNetPretrained/47592356/R-101-GN.pkl", + "FAIR/X-101-32x8d": "ImageNetPretrained/20171220/X-101-32x8d.pkl", + "FAIR/X-101-64x4d": "ImageNetPretrained/FBResNeXt/X-101-64x4d.pkl", + "FAIR/X-152-32x8d-IN5k": "ImageNetPretrained/25093814/X-152-32x8d-IN5k.pkl", + } + + C2_DETECTRON_PATH_FORMAT = ( + "{prefix}/{url}/output/train/{dataset}/{type}/model_final.pkl" # noqa B950 + ) + + C2_DATASET_COCO = "coco_2014_train%3Acoco_2014_valminusminival" + C2_DATASET_COCO_KEYPOINTS = "keypoints_coco_2014_train%3Akeypoints_coco_2014_valminusminival" + + # format: {model_name} -> part of the url + C2_DETECTRON_MODELS = { + "35857197/e2e_faster_rcnn_R-50-C4_1x": "35857197/12_2017_baselines/e2e_faster_rcnn_R-50-C4_1x.yaml.01_33_49.iAX0mXvW", # noqa B950 + "35857345/e2e_faster_rcnn_R-50-FPN_1x": "35857345/12_2017_baselines/e2e_faster_rcnn_R-50-FPN_1x.yaml.01_36_30.cUF7QR7I", # noqa B950 + "35857890/e2e_faster_rcnn_R-101-FPN_1x": "35857890/12_2017_baselines/e2e_faster_rcnn_R-101-FPN_1x.yaml.01_38_50.sNxI7sX7", # noqa B950 + "36761737/e2e_faster_rcnn_X-101-32x8d-FPN_1x": "36761737/12_2017_baselines/e2e_faster_rcnn_X-101-32x8d-FPN_1x.yaml.06_31_39.5MIHi1fZ", # noqa B950 + "35858791/e2e_mask_rcnn_R-50-C4_1x": "35858791/12_2017_baselines/e2e_mask_rcnn_R-50-C4_1x.yaml.01_45_57.ZgkA7hPB", # noqa B950 + "35858933/e2e_mask_rcnn_R-50-FPN_1x": "35858933/12_2017_baselines/e2e_mask_rcnn_R-50-FPN_1x.yaml.01_48_14.DzEQe4wC", # noqa B950 + "35861795/e2e_mask_rcnn_R-101-FPN_1x": "35861795/12_2017_baselines/e2e_mask_rcnn_R-101-FPN_1x.yaml.02_31_37.KqyEK4tT", # noqa B950 + "36761843/e2e_mask_rcnn_X-101-32x8d-FPN_1x": "36761843/12_2017_baselines/e2e_mask_rcnn_X-101-32x8d-FPN_1x.yaml.06_35_59.RZotkLKI", # noqa B950 + "48616381/e2e_mask_rcnn_R-50-FPN_2x_gn": "GN/48616381/04_2018_gn_baselines/e2e_mask_rcnn_R-50-FPN_2x_gn_0416.13_23_38.bTlTI97Q", # noqa B950 + "37697547/e2e_keypoint_rcnn_R-50-FPN_1x": "37697547/12_2017_baselines/e2e_keypoint_rcnn_R-50-FPN_1x.yaml.08_42_54.kdzV35ao", # noqa B950 + "35998355/rpn_R-50-C4_1x": "35998355/12_2017_baselines/rpn_R-50-C4_1x.yaml.08_00_43.njH5oD9L", # noqa B950 + "35998814/rpn_R-50-FPN_1x": "35998814/12_2017_baselines/rpn_R-50-FPN_1x.yaml.08_06_03.Axg0r179", # noqa B950 + "36225147/fast_R-50-FPN_1x": "36225147/12_2017_baselines/fast_rcnn_R-50-FPN_1x.yaml.08_39_09.L3obSdQ2", # noqa B950 + } + + @staticmethod + def get(name): + if name.startswith("Caffe2Detectron/COCO"): + return ModelCatalog._get_c2_detectron_baseline(name) + if name.startswith("ImageNetPretrained/"): + return ModelCatalog._get_c2_imagenet_pretrained(name) + raise RuntimeError("model not present in the catalog: {}".format(name)) + + @staticmethod + def _get_c2_imagenet_pretrained(name): + prefix = ModelCatalog.S3_C2_DETECTRON_PREFIX + name = name[len("ImageNetPretrained/") :] + name = ModelCatalog.C2_IMAGENET_MODELS[name] + url = "/".join([prefix, name]) + return url + + @staticmethod + def _get_c2_detectron_baseline(name): + name = name[len("Caffe2Detectron/COCO/") :] + url = ModelCatalog.C2_DETECTRON_MODELS[name] + if "keypoint_rcnn" in name: + dataset = ModelCatalog.C2_DATASET_COCO_KEYPOINTS + else: + dataset = ModelCatalog.C2_DATASET_COCO + + if "35998355/rpn_R-50-C4_1x" in name: + # this one model is somehow different from others .. + type = "rpn" + else: + type = "generalized_rcnn" + + # Detectron C2 models are stored in the structure defined in `C2_DETECTRON_PATH_FORMAT`. + url = ModelCatalog.C2_DETECTRON_PATH_FORMAT.format( + prefix=ModelCatalog.S3_C2_DETECTRON_PREFIX, url=url, type=type, dataset=dataset + ) + return url + + +class ModelCatalogHandler(PathHandler): + """ + Resolve URL like catalog://. + """ + + PREFIX = "catalog://" + + def _get_supported_prefixes(self): + return [self.PREFIX] + + def _get_local_path(self, path): + logger = logging.getLogger(__name__) + catalog_path = ModelCatalog.get(path[len(self.PREFIX) :]) + logger.info("Catalog entry {} points to {}".format(path, catalog_path)) + return PathManager.get_local_path(catalog_path) + + def _open(self, path, mode="r", **kwargs): + return PathManager.open(self._get_local_path(path), mode, **kwargs) + + +class Detectron2Handler(PathHandler): + """ + Resolve anything that's in Detectron2 model zoo. + """ + + PREFIX = "detectron2://" + S3_DETECTRON2_PREFIX = "https://dl.fbaipublicfiles.com/detectron2/" + + def _get_supported_prefixes(self): + return [self.PREFIX] + + def _get_local_path(self, path): + name = path[len(self.PREFIX) :] + return PathManager.get_local_path(self.S3_DETECTRON2_PREFIX + name) + + def _open(self, path, mode="r", **kwargs): + return PathManager.open(self._get_local_path(path), mode, **kwargs) + + +PathManager.register_handler(ModelCatalogHandler()) +PathManager.register_handler(Detectron2Handler()) diff --git a/preprocess/mhp_extension/detectron2/detectron2/checkpoint/detection_checkpoint.py b/preprocess/mhp_extension/detectron2/detectron2/checkpoint/detection_checkpoint.py new file mode 100644 index 0000000000000000000000000000000000000000..06e6739f7b2070cf3e2d34099188e5ea1f7cf622 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/checkpoint/detection_checkpoint.py @@ -0,0 +1,73 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import pickle +from fvcore.common.checkpoint import Checkpointer +from fvcore.common.file_io import PathManager + +import detectron2.utils.comm as comm + +from .c2_model_loading import align_and_update_state_dicts + + +class DetectionCheckpointer(Checkpointer): + """ + Same as :class:`Checkpointer`, but is able to handle models in detectron & detectron2 + model zoo, and apply conversions for legacy models. + """ + + def __init__(self, model, save_dir="", *, save_to_disk=None, **checkpointables): + is_main_process = comm.is_main_process() + super().__init__( + model, + save_dir, + save_to_disk=is_main_process if save_to_disk is None else save_to_disk, + **checkpointables, + ) + + def _load_file(self, filename): + if filename.endswith(".pkl"): + with PathManager.open(filename, "rb") as f: + data = pickle.load(f, encoding="latin1") + if "model" in data and "__author__" in data: + # file is in Detectron2 model zoo format + self.logger.info("Reading a file from '{}'".format(data["__author__"])) + return data + else: + # assume file is from Caffe2 / Detectron1 model zoo + if "blobs" in data: + # Detection models have "blobs", but ImageNet models don't + data = data["blobs"] + data = {k: v for k, v in data.items() if not k.endswith("_momentum")} + return {"model": data, "__author__": "Caffe2", "matching_heuristics": True} + + loaded = super()._load_file(filename) # load native pth checkpoint + if "model" not in loaded: + loaded = {"model": loaded} + return loaded + + def _load_model(self, checkpoint): + if checkpoint.get("matching_heuristics", False): + self._convert_ndarray_to_tensor(checkpoint["model"]) + # convert weights by name-matching heuristics + model_state_dict = self.model.state_dict() + align_and_update_state_dicts( + model_state_dict, + checkpoint["model"], + c2_conversion=checkpoint.get("__author__", None) == "Caffe2", + ) + checkpoint["model"] = model_state_dict + # for non-caffe2 models, use standard ways to load it + incompatible = super()._load_model(checkpoint) + if incompatible is None: # support older versions of fvcore + return None + + model_buffers = dict(self.model.named_buffers(recurse=False)) + for k in ["pixel_mean", "pixel_std"]: + # Ignore missing key message about pixel_mean/std. + # Though they may be missing in old checkpoints, they will be correctly + # initialized from config anyway. + if k in model_buffers: + try: + incompatible.missing_keys.remove(k) + except ValueError: + pass + return incompatible diff --git a/preprocess/mhp_extension/detectron2/detectron2/config/__init__.py b/preprocess/mhp_extension/detectron2/detectron2/config/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f996ecd74947c504f86e3e6854a45bd74ad32c1c --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/config/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +from .compat import downgrade_config, upgrade_config +from .config import CfgNode, get_cfg, global_cfg, set_global_cfg, configurable + +__all__ = [ + "CfgNode", + "get_cfg", + "global_cfg", + "set_global_cfg", + "downgrade_config", + "upgrade_config", + "configurable", +] diff --git a/preprocess/mhp_extension/detectron2/detectron2/config/compat.py b/preprocess/mhp_extension/detectron2/detectron2/config/compat.py new file mode 100644 index 0000000000000000000000000000000000000000..41fe3a00ca05885abf28106808fe7f8d862b5036 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/config/compat.py @@ -0,0 +1,229 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +""" +Backward compatibility of configs. + +Instructions to bump version: ++ It's not needed to bump version if new keys are added. + It's only needed when backward-incompatible changes happen + (i.e., some existing keys disappear, or the meaning of a key changes) ++ To bump version, do the following: + 1. Increment _C.VERSION in defaults.py + 2. Add a converter in this file. + + Each ConverterVX has a function "upgrade" which in-place upgrades config from X-1 to X, + and a function "downgrade" which in-place downgrades config from X to X-1 + + In each function, VERSION is left unchanged. + + Each converter assumes that its input has the relevant keys + (i.e., the input is not a partial config). + 3. Run the tests (test_config.py) to make sure the upgrade & downgrade + functions are consistent. +""" + +import logging +from typing import List, Optional, Tuple + +from .config import CfgNode as CN +from .defaults import _C + +__all__ = ["upgrade_config", "downgrade_config"] + + +def upgrade_config(cfg: CN, to_version: Optional[int] = None) -> CN: + """ + Upgrade a config from its current version to a newer version. + + Args: + cfg (CfgNode): + to_version (int): defaults to the latest version. + """ + cfg = cfg.clone() + if to_version is None: + to_version = _C.VERSION + + assert cfg.VERSION <= to_version, "Cannot upgrade from v{} to v{}!".format( + cfg.VERSION, to_version + ) + for k in range(cfg.VERSION, to_version): + converter = globals()["ConverterV" + str(k + 1)] + converter.upgrade(cfg) + cfg.VERSION = k + 1 + return cfg + + +def downgrade_config(cfg: CN, to_version: int) -> CN: + """ + Downgrade a config from its current version to an older version. + + Args: + cfg (CfgNode): + to_version (int): + + Note: + A general downgrade of arbitrary configs is not always possible due to the + different functionalities in different versions. + The purpose of downgrade is only to recover the defaults in old versions, + allowing it to load an old partial yaml config. + Therefore, the implementation only needs to fill in the default values + in the old version when a general downgrade is not possible. + """ + cfg = cfg.clone() + assert cfg.VERSION >= to_version, "Cannot downgrade from v{} to v{}!".format( + cfg.VERSION, to_version + ) + for k in range(cfg.VERSION, to_version, -1): + converter = globals()["ConverterV" + str(k)] + converter.downgrade(cfg) + cfg.VERSION = k - 1 + return cfg + + +def guess_version(cfg: CN, filename: str) -> int: + """ + Guess the version of a partial config where the VERSION field is not specified. + Returns the version, or the latest if cannot make a guess. + + This makes it easier for users to migrate. + """ + logger = logging.getLogger(__name__) + + def _has(name: str) -> bool: + cur = cfg + for n in name.split("."): + if n not in cur: + return False + cur = cur[n] + return True + + # Most users' partial configs have "MODEL.WEIGHT", so guess on it + ret = None + if _has("MODEL.WEIGHT") or _has("TEST.AUG_ON"): + ret = 1 + + if ret is not None: + logger.warning("Config '{}' has no VERSION. Assuming it to be v{}.".format(filename, ret)) + else: + ret = _C.VERSION + logger.warning( + "Config '{}' has no VERSION. Assuming it to be compatible with latest v{}.".format( + filename, ret + ) + ) + return ret + + +def _rename(cfg: CN, old: str, new: str) -> None: + old_keys = old.split(".") + new_keys = new.split(".") + + def _set(key_seq: List[str], val: str) -> None: + cur = cfg + for k in key_seq[:-1]: + if k not in cur: + cur[k] = CN() + cur = cur[k] + cur[key_seq[-1]] = val + + def _get(key_seq: List[str]) -> CN: + cur = cfg + for k in key_seq: + cur = cur[k] + return cur + + def _del(key_seq: List[str]) -> None: + cur = cfg + for k in key_seq[:-1]: + cur = cur[k] + del cur[key_seq[-1]] + if len(cur) == 0 and len(key_seq) > 1: + _del(key_seq[:-1]) + + _set(new_keys, _get(old_keys)) + _del(old_keys) + + +class _RenameConverter: + """ + A converter that handles simple rename. + """ + + RENAME: List[Tuple[str, str]] = [] # list of tuples of (old name, new name) + + @classmethod + def upgrade(cls, cfg: CN) -> None: + for old, new in cls.RENAME: + _rename(cfg, old, new) + + @classmethod + def downgrade(cls, cfg: CN) -> None: + for old, new in cls.RENAME[::-1]: + _rename(cfg, new, old) + + +class ConverterV1(_RenameConverter): + RENAME = [("MODEL.RPN_HEAD.NAME", "MODEL.RPN.HEAD_NAME")] + + +class ConverterV2(_RenameConverter): + """ + A large bulk of rename, before public release. + """ + + RENAME = [ + ("MODEL.WEIGHT", "MODEL.WEIGHTS"), + ("MODEL.PANOPTIC_FPN.SEMANTIC_LOSS_SCALE", "MODEL.SEM_SEG_HEAD.LOSS_WEIGHT"), + ("MODEL.PANOPTIC_FPN.RPN_LOSS_SCALE", "MODEL.RPN.LOSS_WEIGHT"), + ("MODEL.PANOPTIC_FPN.INSTANCE_LOSS_SCALE", "MODEL.PANOPTIC_FPN.INSTANCE_LOSS_WEIGHT"), + ("MODEL.PANOPTIC_FPN.COMBINE_ON", "MODEL.PANOPTIC_FPN.COMBINE.ENABLED"), + ( + "MODEL.PANOPTIC_FPN.COMBINE_OVERLAP_THRESHOLD", + "MODEL.PANOPTIC_FPN.COMBINE.OVERLAP_THRESH", + ), + ( + "MODEL.PANOPTIC_FPN.COMBINE_STUFF_AREA_LIMIT", + "MODEL.PANOPTIC_FPN.COMBINE.STUFF_AREA_LIMIT", + ), + ( + "MODEL.PANOPTIC_FPN.COMBINE_INSTANCES_CONFIDENCE_THRESHOLD", + "MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH", + ), + ("MODEL.ROI_HEADS.SCORE_THRESH", "MODEL.ROI_HEADS.SCORE_THRESH_TEST"), + ("MODEL.ROI_HEADS.NMS", "MODEL.ROI_HEADS.NMS_THRESH_TEST"), + ("MODEL.RETINANET.INFERENCE_SCORE_THRESHOLD", "MODEL.RETINANET.SCORE_THRESH_TEST"), + ("MODEL.RETINANET.INFERENCE_TOPK_CANDIDATES", "MODEL.RETINANET.TOPK_CANDIDATES_TEST"), + ("MODEL.RETINANET.INFERENCE_NMS_THRESHOLD", "MODEL.RETINANET.NMS_THRESH_TEST"), + ("TEST.DETECTIONS_PER_IMG", "TEST.DETECTIONS_PER_IMAGE"), + ("TEST.AUG_ON", "TEST.AUG.ENABLED"), + ("TEST.AUG_MIN_SIZES", "TEST.AUG.MIN_SIZES"), + ("TEST.AUG_MAX_SIZE", "TEST.AUG.MAX_SIZE"), + ("TEST.AUG_FLIP", "TEST.AUG.FLIP"), + ] + + @classmethod + def upgrade(cls, cfg: CN) -> None: + super().upgrade(cfg) + + if cfg.MODEL.META_ARCHITECTURE == "RetinaNet": + _rename( + cfg, "MODEL.RETINANET.ANCHOR_ASPECT_RATIOS", "MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS" + ) + _rename(cfg, "MODEL.RETINANET.ANCHOR_SIZES", "MODEL.ANCHOR_GENERATOR.SIZES") + del cfg["MODEL"]["RPN"]["ANCHOR_SIZES"] + del cfg["MODEL"]["RPN"]["ANCHOR_ASPECT_RATIOS"] + else: + _rename(cfg, "MODEL.RPN.ANCHOR_ASPECT_RATIOS", "MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS") + _rename(cfg, "MODEL.RPN.ANCHOR_SIZES", "MODEL.ANCHOR_GENERATOR.SIZES") + del cfg["MODEL"]["RETINANET"]["ANCHOR_SIZES"] + del cfg["MODEL"]["RETINANET"]["ANCHOR_ASPECT_RATIOS"] + del cfg["MODEL"]["RETINANET"]["ANCHOR_STRIDES"] + + @classmethod + def downgrade(cls, cfg: CN) -> None: + super().downgrade(cfg) + + _rename(cfg, "MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS", "MODEL.RPN.ANCHOR_ASPECT_RATIOS") + _rename(cfg, "MODEL.ANCHOR_GENERATOR.SIZES", "MODEL.RPN.ANCHOR_SIZES") + cfg.MODEL.RETINANET.ANCHOR_ASPECT_RATIOS = cfg.MODEL.RPN.ANCHOR_ASPECT_RATIOS + cfg.MODEL.RETINANET.ANCHOR_SIZES = cfg.MODEL.RPN.ANCHOR_SIZES + cfg.MODEL.RETINANET.ANCHOR_STRIDES = [] # this is not used anywhere in any version diff --git a/preprocess/mhp_extension/detectron2/detectron2/config/config.py b/preprocess/mhp_extension/detectron2/detectron2/config/config.py new file mode 100644 index 0000000000000000000000000000000000000000..14ad524f00e706ddba567a62f805481c2f185a8e --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/config/config.py @@ -0,0 +1,202 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +import functools +import inspect +import logging +from fvcore.common.config import CfgNode as _CfgNode +from fvcore.common.file_io import PathManager + + +class CfgNode(_CfgNode): + """ + The same as `fvcore.common.config.CfgNode`, but different in: + + 1. Use unsafe yaml loading by default. + Note that this may lead to arbitrary code execution: you must not + load a config file from untrusted sources before manually inspecting + the content of the file. + 2. Support config versioning. + When attempting to merge an old config, it will convert the old config automatically. + """ + + # Note that the default value of allow_unsafe is changed to True + def merge_from_file(self, cfg_filename: str, allow_unsafe: bool = True) -> None: + assert PathManager.isfile(cfg_filename), f"Config file '{cfg_filename}' does not exist!" + loaded_cfg = _CfgNode.load_yaml_with_base(cfg_filename, allow_unsafe=allow_unsafe) + loaded_cfg = type(self)(loaded_cfg) + + # defaults.py needs to import CfgNode + from .defaults import _C + + latest_ver = _C.VERSION + assert ( + latest_ver == self.VERSION + ), "CfgNode.merge_from_file is only allowed on a config object of latest version!" + + logger = logging.getLogger(__name__) + + loaded_ver = loaded_cfg.get("VERSION", None) + if loaded_ver is None: + from .compat import guess_version + + loaded_ver = guess_version(loaded_cfg, cfg_filename) + assert loaded_ver <= self.VERSION, "Cannot merge a v{} config into a v{} config.".format( + loaded_ver, self.VERSION + ) + + if loaded_ver == self.VERSION: + self.merge_from_other_cfg(loaded_cfg) + else: + # compat.py needs to import CfgNode + from .compat import upgrade_config, downgrade_config + + logger.warning( + "Loading an old v{} config file '{}' by automatically upgrading to v{}. " + "See docs/CHANGELOG.md for instructions to update your files.".format( + loaded_ver, cfg_filename, self.VERSION + ) + ) + # To convert, first obtain a full config at an old version + old_self = downgrade_config(self, to_version=loaded_ver) + old_self.merge_from_other_cfg(loaded_cfg) + new_config = upgrade_config(old_self) + self.clear() + self.update(new_config) + + def dump(self, *args, **kwargs): + """ + Returns: + str: a yaml string representation of the config + """ + # to make it show up in docs + return super().dump(*args, **kwargs) + + +global_cfg = CfgNode() + + +def get_cfg() -> CfgNode: + """ + Get a copy of the default config. + + Returns: + a detectron2 CfgNode instance. + """ + from .defaults import _C + + return _C.clone() + + +def set_global_cfg(cfg: CfgNode) -> None: + """ + Let the global config point to the given cfg. + + Assume that the given "cfg" has the key "KEY", after calling + `set_global_cfg(cfg)`, the key can be accessed by: + + .. code-block:: python + + from detectron2.config import global_cfg + print(global_cfg.KEY) + + By using a hacky global config, you can access these configs anywhere, + without having to pass the config object or the values deep into the code. + This is a hacky feature introduced for quick prototyping / research exploration. + """ + global global_cfg + global_cfg.clear() + global_cfg.update(cfg) + + +def configurable(init_func): + """ + Decorate a class's __init__ method so that it can be called with a CfgNode + object using the class's from_config classmethod. + + Examples: + + .. code-block:: python + + class A: + @configurable + def __init__(self, a, b=2, c=3): + pass + + @classmethod + def from_config(cls, cfg): + # Returns kwargs to be passed to __init__ + return {"a": cfg.A, "b": cfg.B} + + a1 = A(a=1, b=2) # regular construction + a2 = A(cfg) # construct with a cfg + a3 = A(cfg, b=3, c=4) # construct with extra overwrite + """ + assert init_func.__name__ == "__init__", "@configurable should only be used for __init__!" + if init_func.__module__.startswith("detectron2."): + assert ( + init_func.__doc__ is not None and "experimental" in init_func.__doc__ + ), f"configurable {init_func} should be marked experimental" + + @functools.wraps(init_func) + def wrapped(self, *args, **kwargs): + try: + from_config_func = type(self).from_config + except AttributeError: + raise AttributeError("Class with @configurable must have a 'from_config' classmethod.") + if not inspect.ismethod(from_config_func): + raise TypeError("Class with @configurable must have a 'from_config' classmethod.") + + if _called_with_cfg(*args, **kwargs): + explicit_args = _get_args_from_config(from_config_func, *args, **kwargs) + init_func(self, **explicit_args) + else: + init_func(self, *args, **kwargs) + + return wrapped + + +def _get_args_from_config(from_config_func, *args, **kwargs): + """ + Use `from_config` to obtain explicit arguments. + + Returns: + dict: arguments to be used for cls.__init__ + """ + signature = inspect.signature(from_config_func) + if list(signature.parameters.keys())[0] != "cfg": + raise TypeError( + f"{from_config_func.__self__}.from_config must take 'cfg' as the first argument!" + ) + support_var_arg = any( + param.kind in [param.VAR_POSITIONAL, param.VAR_KEYWORD] + for param in signature.parameters.values() + ) + if support_var_arg: # forward all arguments to from_config, if from_config accepts them + ret = from_config_func(*args, **kwargs) + else: + # forward supported arguments to from_config + supported_arg_names = set(signature.parameters.keys()) + extra_kwargs = {} + for name in list(kwargs.keys()): + if name not in supported_arg_names: + extra_kwargs[name] = kwargs.pop(name) + ret = from_config_func(*args, **kwargs) + # forward the other arguments to __init__ + ret.update(extra_kwargs) + return ret + + +def _called_with_cfg(*args, **kwargs): + """ + Returns: + bool: whether the arguments contain CfgNode and should be considered + forwarded to from_config. + """ + if len(args) and isinstance(args[0], _CfgNode): + return True + if isinstance(kwargs.pop("cfg", None), _CfgNode): + return True + # `from_config`'s first argument is forced to be "cfg". + # So the above check covers all cases. + return False diff --git a/preprocess/mhp_extension/detectron2/detectron2/config/defaults.py b/preprocess/mhp_extension/detectron2/detectron2/config/defaults.py new file mode 100644 index 0000000000000000000000000000000000000000..b9ad62f5f01606438082e012ba5a4a68381c3b3c --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/config/defaults.py @@ -0,0 +1,598 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +from .config import CfgNode as CN + +# ----------------------------------------------------------------------------- +# Convention about Training / Test specific parameters +# ----------------------------------------------------------------------------- +# Whenever an argument can be either used for training or for testing, the +# corresponding name will be post-fixed by a _TRAIN for a training parameter, +# or _TEST for a test-specific parameter. +# For example, the number of images during training will be +# IMAGES_PER_BATCH_TRAIN, while the number of images for testing will be +# IMAGES_PER_BATCH_TEST + +# ----------------------------------------------------------------------------- +# Config definition +# ----------------------------------------------------------------------------- + +_C = CN() + +# The version number, to upgrade from old configs to new ones if any +# changes happen. It's recommended to keep a VERSION in your config file. +_C.VERSION = 2 + +_C.MODEL = CN() +_C.MODEL.LOAD_PROPOSALS = False +_C.MODEL.MASK_ON = False +_C.MODEL.KEYPOINT_ON = False +_C.MODEL.DEVICE = "cuda" +_C.MODEL.META_ARCHITECTURE = "GeneralizedRCNN" + +# Path (possibly with schema like catalog:// or detectron2://) to a checkpoint file +# to be loaded to the model. You can find available models in the model zoo. +_C.MODEL.WEIGHTS = "" + +# Values to be used for image normalization (BGR order, since INPUT.FORMAT defaults to BGR). +# To train on images of different number of channels, just set different mean & std. +# Default values are the mean pixel value from ImageNet: [103.53, 116.28, 123.675] +_C.MODEL.PIXEL_MEAN = [103.530, 116.280, 123.675] +# When using pre-trained models in Detectron1 or any MSRA models, +# std has been absorbed into its conv1 weights, so the std needs to be set 1. +# Otherwise, you can use [57.375, 57.120, 58.395] (ImageNet std) +_C.MODEL.PIXEL_STD = [1.0, 1.0, 1.0] + + +# ----------------------------------------------------------------------------- +# INPUT +# ----------------------------------------------------------------------------- +_C.INPUT = CN() +# Size of the smallest side of the image during training +_C.INPUT.MIN_SIZE_TRAIN = (800,) +# Sample size of smallest side by choice or random selection from range give by +# INPUT.MIN_SIZE_TRAIN +_C.INPUT.MIN_SIZE_TRAIN_SAMPLING = "choice" +# Maximum size of the side of the image during training +_C.INPUT.MAX_SIZE_TRAIN = 1333 +# Size of the smallest side of the image during testing. Set to zero to disable resize in testing. +_C.INPUT.MIN_SIZE_TEST = 800 +# Maximum size of the side of the image during testing +_C.INPUT.MAX_SIZE_TEST = 1333 + +# `True` if cropping is used for data augmentation during training +_C.INPUT.CROP = CN({"ENABLED": False}) +# Cropping type: +# - "relative" crop (H * CROP.SIZE[0], W * CROP.SIZE[1]) part of an input of size (H, W) +# - "relative_range" uniformly sample relative crop size from between [CROP.SIZE[0], [CROP.SIZE[1]]. +# and [1, 1] and use it as in "relative" scenario. +# - "absolute" crop part of an input with absolute size: (CROP.SIZE[0], CROP.SIZE[1]). +_C.INPUT.CROP.TYPE = "relative_range" +# Size of crop in range (0, 1] if CROP.TYPE is "relative" or "relative_range" and in number of +# pixels if CROP.TYPE is "absolute" +_C.INPUT.CROP.SIZE = [0.9, 0.9] + + +# Whether the model needs RGB, YUV, HSV etc. +# Should be one of the modes defined here, as we use PIL to read the image: +# https://pillow.readthedocs.io/en/stable/handbook/concepts.html#concept-modes +# with BGR being the one exception. One can set image format to BGR, we will +# internally use RGB for conversion and flip the channels over +_C.INPUT.FORMAT = "BGR" +# The ground truth mask format that the model will use. +# Mask R-CNN supports either "polygon" or "bitmask" as ground truth. +_C.INPUT.MASK_FORMAT = "polygon" # alternative: "bitmask" + + +# ----------------------------------------------------------------------------- +# Dataset +# ----------------------------------------------------------------------------- +_C.DATASETS = CN() +# List of the dataset names for training. Must be registered in DatasetCatalog +_C.DATASETS.TRAIN = () +# List of the pre-computed proposal files for training, which must be consistent +# with data listed in DATASETS.TRAIN. +_C.DATASETS.PROPOSAL_FILES_TRAIN = () +# Number of top scoring precomputed proposals to keep for training +_C.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TRAIN = 2000 +# List of the dataset names for testing. Must be registered in DatasetCatalog +_C.DATASETS.TEST = () +# List of the pre-computed proposal files for test, which must be consistent +# with data listed in DATASETS.TEST. +_C.DATASETS.PROPOSAL_FILES_TEST = () +# Number of top scoring precomputed proposals to keep for test +_C.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TEST = 1000 + +# ----------------------------------------------------------------------------- +# DataLoader +# ----------------------------------------------------------------------------- +_C.DATALOADER = CN() +# Number of data loading threads +_C.DATALOADER.NUM_WORKERS = 4 +# If True, each batch should contain only images for which the aspect ratio +# is compatible. This groups portrait images together, and landscape images +# are not batched with portrait images. +_C.DATALOADER.ASPECT_RATIO_GROUPING = True +# Options: TrainingSampler, RepeatFactorTrainingSampler +_C.DATALOADER.SAMPLER_TRAIN = "TrainingSampler" +# Repeat threshold for RepeatFactorTrainingSampler +_C.DATALOADER.REPEAT_THRESHOLD = 0.0 +# if True, the dataloader will filter out images that have no associated +# annotations at train time. +_C.DATALOADER.FILTER_EMPTY_ANNOTATIONS = True + +# ---------------------------------------------------------------------------- # +# Backbone options +# ---------------------------------------------------------------------------- # +_C.MODEL.BACKBONE = CN() + +_C.MODEL.BACKBONE.NAME = "build_resnet_backbone" +# Freeze the first several stages so they are not trained. +# There are 5 stages in ResNet. The first is a convolution, and the following +# stages are each group of residual blocks. +_C.MODEL.BACKBONE.FREEZE_AT = 2 + + +# ---------------------------------------------------------------------------- # +# FPN options +# ---------------------------------------------------------------------------- # +_C.MODEL.FPN = CN() +# Names of the input feature maps to be used by FPN +# They must have contiguous power of 2 strides +# e.g., ["res2", "res3", "res4", "res5"] +_C.MODEL.FPN.IN_FEATURES = [] +_C.MODEL.FPN.OUT_CHANNELS = 256 + +# Options: "" (no norm), "GN" +_C.MODEL.FPN.NORM = "" + +# Types for fusing the FPN top-down and lateral features. Can be either "sum" or "avg" +_C.MODEL.FPN.FUSE_TYPE = "sum" + + +# ---------------------------------------------------------------------------- # +# Proposal generator options +# ---------------------------------------------------------------------------- # +_C.MODEL.PROPOSAL_GENERATOR = CN() +# Current proposal generators include "RPN", "RRPN" and "PrecomputedProposals" +_C.MODEL.PROPOSAL_GENERATOR.NAME = "RPN" +# Proposal height and width both need to be greater than MIN_SIZE +# (a the scale used during training or inference) +_C.MODEL.PROPOSAL_GENERATOR.MIN_SIZE = 0 + + +# ---------------------------------------------------------------------------- # +# Anchor generator options +# ---------------------------------------------------------------------------- # +_C.MODEL.ANCHOR_GENERATOR = CN() +# The generator can be any name in the ANCHOR_GENERATOR registry +_C.MODEL.ANCHOR_GENERATOR.NAME = "DefaultAnchorGenerator" +# Anchor sizes (i.e. sqrt of area) in absolute pixels w.r.t. the network input. +# Format: list[list[float]]. SIZES[i] specifies the list of sizes +# to use for IN_FEATURES[i]; len(SIZES) == len(IN_FEATURES) must be true, +# or len(SIZES) == 1 is true and size list SIZES[0] is used for all +# IN_FEATURES. +_C.MODEL.ANCHOR_GENERATOR.SIZES = [[32, 64, 128, 256, 512]] +# Anchor aspect ratios. For each area given in `SIZES`, anchors with different aspect +# ratios are generated by an anchor generator. +# Format: list[list[float]]. ASPECT_RATIOS[i] specifies the list of aspect ratios (H/W) +# to use for IN_FEATURES[i]; len(ASPECT_RATIOS) == len(IN_FEATURES) must be true, +# or len(ASPECT_RATIOS) == 1 is true and aspect ratio list ASPECT_RATIOS[0] is used +# for all IN_FEATURES. +_C.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS = [[0.5, 1.0, 2.0]] +# Anchor angles. +# list[list[float]], the angle in degrees, for each input feature map. +# ANGLES[i] specifies the list of angles for IN_FEATURES[i]. +_C.MODEL.ANCHOR_GENERATOR.ANGLES = [[-90, 0, 90]] +# Relative offset between the center of the first anchor and the top-left corner of the image +# Value has to be in [0, 1). Recommend to use 0.5, which means half stride. +# The value is not expected to affect model accuracy. +_C.MODEL.ANCHOR_GENERATOR.OFFSET = 0.0 + +# ---------------------------------------------------------------------------- # +# RPN options +# ---------------------------------------------------------------------------- # +_C.MODEL.RPN = CN() +_C.MODEL.RPN.HEAD_NAME = "StandardRPNHead" # used by RPN_HEAD_REGISTRY + +# Names of the input feature maps to be used by RPN +# e.g., ["p2", "p3", "p4", "p5", "p6"] for FPN +_C.MODEL.RPN.IN_FEATURES = ["res4"] +# Remove RPN anchors that go outside the image by BOUNDARY_THRESH pixels +# Set to -1 or a large value, e.g. 100000, to disable pruning anchors +_C.MODEL.RPN.BOUNDARY_THRESH = -1 +# IOU overlap ratios [BG_IOU_THRESHOLD, FG_IOU_THRESHOLD] +# Minimum overlap required between an anchor and ground-truth box for the +# (anchor, gt box) pair to be a positive example (IoU >= FG_IOU_THRESHOLD +# ==> positive RPN example: 1) +# Maximum overlap allowed between an anchor and ground-truth box for the +# (anchor, gt box) pair to be a negative examples (IoU < BG_IOU_THRESHOLD +# ==> negative RPN example: 0) +# Anchors with overlap in between (BG_IOU_THRESHOLD <= IoU < FG_IOU_THRESHOLD) +# are ignored (-1) +_C.MODEL.RPN.IOU_THRESHOLDS = [0.3, 0.7] +_C.MODEL.RPN.IOU_LABELS = [0, -1, 1] +# Total number of RPN examples per image +_C.MODEL.RPN.BATCH_SIZE_PER_IMAGE = 256 +# Target fraction of foreground (positive) examples per RPN minibatch +_C.MODEL.RPN.POSITIVE_FRACTION = 0.5 +# Weights on (dx, dy, dw, dh) for normalizing RPN anchor regression targets +_C.MODEL.RPN.BBOX_REG_WEIGHTS = (1.0, 1.0, 1.0, 1.0) +# The transition point from L1 to L2 loss. Set to 0.0 to make the loss simply L1. +_C.MODEL.RPN.SMOOTH_L1_BETA = 0.0 +_C.MODEL.RPN.LOSS_WEIGHT = 1.0 +# Number of top scoring RPN proposals to keep before applying NMS +# When FPN is used, this is *per FPN level* (not total) +_C.MODEL.RPN.PRE_NMS_TOPK_TRAIN = 12000 +_C.MODEL.RPN.PRE_NMS_TOPK_TEST = 6000 +# Number of top scoring RPN proposals to keep after applying NMS +# When FPN is used, this limit is applied per level and then again to the union +# of proposals from all levels +# NOTE: When FPN is used, the meaning of this config is different from Detectron1. +# It means per-batch topk in Detectron1, but per-image topk here. +# See "modeling/rpn/rpn_outputs.py" for details. +_C.MODEL.RPN.POST_NMS_TOPK_TRAIN = 2000 +_C.MODEL.RPN.POST_NMS_TOPK_TEST = 1000 +# NMS threshold used on RPN proposals +_C.MODEL.RPN.NMS_THRESH = 0.7 + +# ---------------------------------------------------------------------------- # +# ROI HEADS options +# ---------------------------------------------------------------------------- # +_C.MODEL.ROI_HEADS = CN() +_C.MODEL.ROI_HEADS.NAME = "Res5ROIHeads" +# Number of foreground classes +_C.MODEL.ROI_HEADS.NUM_CLASSES = 80 +# Names of the input feature maps to be used by ROI heads +# Currently all heads (box, mask, ...) use the same input feature map list +# e.g., ["p2", "p3", "p4", "p5"] is commonly used for FPN +_C.MODEL.ROI_HEADS.IN_FEATURES = ["res4"] +# IOU overlap ratios [IOU_THRESHOLD] +# Overlap threshold for an RoI to be considered background (if < IOU_THRESHOLD) +# Overlap threshold for an RoI to be considered foreground (if >= IOU_THRESHOLD) +_C.MODEL.ROI_HEADS.IOU_THRESHOLDS = [0.5] +_C.MODEL.ROI_HEADS.IOU_LABELS = [0, 1] +# RoI minibatch size *per image* (number of regions of interest [ROIs]) +# Total number of RoIs per training minibatch = +# ROI_HEADS.BATCH_SIZE_PER_IMAGE * SOLVER.IMS_PER_BATCH +# E.g., a common configuration is: 512 * 16 = 8192 +_C.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 512 +# Target fraction of RoI minibatch that is labeled foreground (i.e. class > 0) +_C.MODEL.ROI_HEADS.POSITIVE_FRACTION = 0.25 + +# Only used on test mode + +# Minimum score threshold (assuming scores in a [0, 1] range); a value chosen to +# balance obtaining high recall with not having too many low precision +# detections that will slow down inference post processing steps (like NMS) +# A default threshold of 0.0 increases AP by ~0.2-0.3 but significantly slows down +# inference. +_C.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.05 +# Overlap threshold used for non-maximum suppression (suppress boxes with +# IoU >= this threshold) +_C.MODEL.ROI_HEADS.NMS_THRESH_TEST = 0.5 +# If True, augment proposals with ground-truth boxes before sampling proposals to +# train ROI heads. +_C.MODEL.ROI_HEADS.PROPOSAL_APPEND_GT = True + +# ---------------------------------------------------------------------------- # +# Box Head +# ---------------------------------------------------------------------------- # +_C.MODEL.ROI_BOX_HEAD = CN() +# C4 don't use head name option +# Options for non-C4 models: FastRCNNConvFCHead, +_C.MODEL.ROI_BOX_HEAD.NAME = "" +# Default weights on (dx, dy, dw, dh) for normalizing bbox regression targets +# These are empirically chosen to approximately lead to unit variance targets +_C.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS = (10.0, 10.0, 5.0, 5.0) +# The transition point from L1 to L2 loss. Set to 0.0 to make the loss simply L1. +_C.MODEL.ROI_BOX_HEAD.SMOOTH_L1_BETA = 0.0 +_C.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION = 14 +_C.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO = 0 +# Type of pooling operation applied to the incoming feature map for each RoI +_C.MODEL.ROI_BOX_HEAD.POOLER_TYPE = "ROIAlignV2" + +_C.MODEL.ROI_BOX_HEAD.NUM_FC = 0 +# Hidden layer dimension for FC layers in the RoI box head +_C.MODEL.ROI_BOX_HEAD.FC_DIM = 1024 +_C.MODEL.ROI_BOX_HEAD.NUM_CONV = 0 +# Channel dimension for Conv layers in the RoI box head +_C.MODEL.ROI_BOX_HEAD.CONV_DIM = 256 +# Normalization method for the convolution layers. +# Options: "" (no norm), "GN", "SyncBN". +_C.MODEL.ROI_BOX_HEAD.NORM = "" +# Whether to use class agnostic for bbox regression +_C.MODEL.ROI_BOX_HEAD.CLS_AGNOSTIC_BBOX_REG = False +# If true, RoI heads use bounding boxes predicted by the box head rather than proposal boxes. +_C.MODEL.ROI_BOX_HEAD.TRAIN_ON_PRED_BOXES = False + +# ---------------------------------------------------------------------------- # +# Cascaded Box Head +# ---------------------------------------------------------------------------- # +_C.MODEL.ROI_BOX_CASCADE_HEAD = CN() +# The number of cascade stages is implicitly defined by the length of the following two configs. +_C.MODEL.ROI_BOX_CASCADE_HEAD.BBOX_REG_WEIGHTS = ( + (10.0, 10.0, 5.0, 5.0), + (20.0, 20.0, 10.0, 10.0), + (30.0, 30.0, 15.0, 15.0), +) +_C.MODEL.ROI_BOX_CASCADE_HEAD.IOUS = (0.5, 0.6, 0.7) + + +# ---------------------------------------------------------------------------- # +# Mask Head +# ---------------------------------------------------------------------------- # +_C.MODEL.ROI_MASK_HEAD = CN() +_C.MODEL.ROI_MASK_HEAD.NAME = "MaskRCNNConvUpsampleHead" +_C.MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION = 14 +_C.MODEL.ROI_MASK_HEAD.POOLER_SAMPLING_RATIO = 0 +_C.MODEL.ROI_MASK_HEAD.NUM_CONV = 0 # The number of convs in the mask head +_C.MODEL.ROI_MASK_HEAD.CONV_DIM = 256 +# Normalization method for the convolution layers. +# Options: "" (no norm), "GN", "SyncBN". +_C.MODEL.ROI_MASK_HEAD.NORM = "" +# Whether to use class agnostic for mask prediction +_C.MODEL.ROI_MASK_HEAD.CLS_AGNOSTIC_MASK = False +# Type of pooling operation applied to the incoming feature map for each RoI +_C.MODEL.ROI_MASK_HEAD.POOLER_TYPE = "ROIAlignV2" + + +# ---------------------------------------------------------------------------- # +# Keypoint Head +# ---------------------------------------------------------------------------- # +_C.MODEL.ROI_KEYPOINT_HEAD = CN() +_C.MODEL.ROI_KEYPOINT_HEAD.NAME = "KRCNNConvDeconvUpsampleHead" +_C.MODEL.ROI_KEYPOINT_HEAD.POOLER_RESOLUTION = 14 +_C.MODEL.ROI_KEYPOINT_HEAD.POOLER_SAMPLING_RATIO = 0 +_C.MODEL.ROI_KEYPOINT_HEAD.CONV_DIMS = tuple(512 for _ in range(8)) +_C.MODEL.ROI_KEYPOINT_HEAD.NUM_KEYPOINTS = 17 # 17 is the number of keypoints in COCO. + +# Images with too few (or no) keypoints are excluded from training. +_C.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE = 1 +# Normalize by the total number of visible keypoints in the minibatch if True. +# Otherwise, normalize by the total number of keypoints that could ever exist +# in the minibatch. +# The keypoint softmax loss is only calculated on visible keypoints. +# Since the number of visible keypoints can vary significantly between +# minibatches, this has the effect of up-weighting the importance of +# minibatches with few visible keypoints. (Imagine the extreme case of +# only one visible keypoint versus N: in the case of N, each one +# contributes 1/N to the gradient compared to the single keypoint +# determining the gradient direction). Instead, we can normalize the +# loss by the total number of keypoints, if it were the case that all +# keypoints were visible in a full minibatch. (Returning to the example, +# this means that the one visible keypoint contributes as much as each +# of the N keypoints.) +_C.MODEL.ROI_KEYPOINT_HEAD.NORMALIZE_LOSS_BY_VISIBLE_KEYPOINTS = True +# Multi-task loss weight to use for keypoints +# Recommended values: +# - use 1.0 if NORMALIZE_LOSS_BY_VISIBLE_KEYPOINTS is True +# - use 4.0 if NORMALIZE_LOSS_BY_VISIBLE_KEYPOINTS is False +_C.MODEL.ROI_KEYPOINT_HEAD.LOSS_WEIGHT = 1.0 +# Type of pooling operation applied to the incoming feature map for each RoI +_C.MODEL.ROI_KEYPOINT_HEAD.POOLER_TYPE = "ROIAlignV2" + +# ---------------------------------------------------------------------------- # +# Semantic Segmentation Head +# ---------------------------------------------------------------------------- # +_C.MODEL.SEM_SEG_HEAD = CN() +_C.MODEL.SEM_SEG_HEAD.NAME = "SemSegFPNHead" +_C.MODEL.SEM_SEG_HEAD.IN_FEATURES = ["p2", "p3", "p4", "p5"] +# Label in the semantic segmentation ground truth that is ignored, i.e., no loss is calculated for +# the correposnding pixel. +_C.MODEL.SEM_SEG_HEAD.IGNORE_VALUE = 255 +# Number of classes in the semantic segmentation head +_C.MODEL.SEM_SEG_HEAD.NUM_CLASSES = 54 +# Number of channels in the 3x3 convs inside semantic-FPN heads. +_C.MODEL.SEM_SEG_HEAD.CONVS_DIM = 128 +# Outputs from semantic-FPN heads are up-scaled to the COMMON_STRIDE stride. +_C.MODEL.SEM_SEG_HEAD.COMMON_STRIDE = 4 +# Normalization method for the convolution layers. Options: "" (no norm), "GN". +_C.MODEL.SEM_SEG_HEAD.NORM = "GN" +_C.MODEL.SEM_SEG_HEAD.LOSS_WEIGHT = 1.0 + +_C.MODEL.PANOPTIC_FPN = CN() +# Scaling of all losses from instance detection / segmentation head. +_C.MODEL.PANOPTIC_FPN.INSTANCE_LOSS_WEIGHT = 1.0 + +# options when combining instance & semantic segmentation outputs +_C.MODEL.PANOPTIC_FPN.COMBINE = CN({"ENABLED": True}) +_C.MODEL.PANOPTIC_FPN.COMBINE.OVERLAP_THRESH = 0.5 +_C.MODEL.PANOPTIC_FPN.COMBINE.STUFF_AREA_LIMIT = 4096 +_C.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = 0.5 + + +# ---------------------------------------------------------------------------- # +# RetinaNet Head +# ---------------------------------------------------------------------------- # +_C.MODEL.RETINANET = CN() + +# This is the number of foreground classes. +_C.MODEL.RETINANET.NUM_CLASSES = 80 + +_C.MODEL.RETINANET.IN_FEATURES = ["p3", "p4", "p5", "p6", "p7"] + +# Convolutions to use in the cls and bbox tower +# NOTE: this doesn't include the last conv for logits +_C.MODEL.RETINANET.NUM_CONVS = 4 + +# IoU overlap ratio [bg, fg] for labeling anchors. +# Anchors with < bg are labeled negative (0) +# Anchors with >= bg and < fg are ignored (-1) +# Anchors with >= fg are labeled positive (1) +_C.MODEL.RETINANET.IOU_THRESHOLDS = [0.4, 0.5] +_C.MODEL.RETINANET.IOU_LABELS = [0, -1, 1] + +# Prior prob for rare case (i.e. foreground) at the beginning of training. +# This is used to set the bias for the logits layer of the classifier subnet. +# This improves training stability in the case of heavy class imbalance. +_C.MODEL.RETINANET.PRIOR_PROB = 0.01 + +# Inference cls score threshold, only anchors with score > INFERENCE_TH are +# considered for inference (to improve speed) +_C.MODEL.RETINANET.SCORE_THRESH_TEST = 0.05 +_C.MODEL.RETINANET.TOPK_CANDIDATES_TEST = 1000 +_C.MODEL.RETINANET.NMS_THRESH_TEST = 0.5 + +# Weights on (dx, dy, dw, dh) for normalizing Retinanet anchor regression targets +_C.MODEL.RETINANET.BBOX_REG_WEIGHTS = (1.0, 1.0, 1.0, 1.0) + +# Loss parameters +_C.MODEL.RETINANET.FOCAL_LOSS_GAMMA = 2.0 +_C.MODEL.RETINANET.FOCAL_LOSS_ALPHA = 0.25 +_C.MODEL.RETINANET.SMOOTH_L1_LOSS_BETA = 0.1 + + +# ---------------------------------------------------------------------------- # +# ResNe[X]t options (ResNets = {ResNet, ResNeXt} +# Note that parts of a resnet may be used for both the backbone and the head +# These options apply to both +# ---------------------------------------------------------------------------- # +_C.MODEL.RESNETS = CN() + +_C.MODEL.RESNETS.DEPTH = 50 +_C.MODEL.RESNETS.OUT_FEATURES = ["res4"] # res4 for C4 backbone, res2..5 for FPN backbone + +# Number of groups to use; 1 ==> ResNet; > 1 ==> ResNeXt +_C.MODEL.RESNETS.NUM_GROUPS = 1 + +# Options: FrozenBN, GN, "SyncBN", "BN" +_C.MODEL.RESNETS.NORM = "FrozenBN" + +# Baseline width of each group. +# Scaling this parameters will scale the width of all bottleneck layers. +_C.MODEL.RESNETS.WIDTH_PER_GROUP = 64 + +# Place the stride 2 conv on the 1x1 filter +# Use True only for the original MSRA ResNet; use False for C2 and Torch models +_C.MODEL.RESNETS.STRIDE_IN_1X1 = True + +# Apply dilation in stage "res5" +_C.MODEL.RESNETS.RES5_DILATION = 1 + +# Output width of res2. Scaling this parameters will scale the width of all 1x1 convs in ResNet +# For R18 and R34, this needs to be set to 64 +_C.MODEL.RESNETS.RES2_OUT_CHANNELS = 256 +_C.MODEL.RESNETS.STEM_OUT_CHANNELS = 64 + +# Apply Deformable Convolution in stages +# Specify if apply deform_conv on Res2, Res3, Res4, Res5 +_C.MODEL.RESNETS.DEFORM_ON_PER_STAGE = [False, False, False, False] +# Use True to use modulated deform_conv (DeformableV2, https://arxiv.org/abs/1811.11168); +# Use False for DeformableV1. +_C.MODEL.RESNETS.DEFORM_MODULATED = False +# Number of groups in deformable conv. +_C.MODEL.RESNETS.DEFORM_NUM_GROUPS = 1 + + +# ---------------------------------------------------------------------------- # +# Solver +# ---------------------------------------------------------------------------- # +_C.SOLVER = CN() + +# See detectron2/solver/build.py for LR scheduler options +_C.SOLVER.LR_SCHEDULER_NAME = "WarmupMultiStepLR" + +_C.SOLVER.MAX_ITER = 40000 + +_C.SOLVER.BASE_LR = 0.001 + +_C.SOLVER.MOMENTUM = 0.9 + +_C.SOLVER.NESTEROV = False + +_C.SOLVER.WEIGHT_DECAY = 0.0001 +# The weight decay that's applied to parameters of normalization layers +# (typically the affine transformation) +_C.SOLVER.WEIGHT_DECAY_NORM = 0.0 + +_C.SOLVER.GAMMA = 0.1 +# The iteration number to decrease learning rate by GAMMA. +_C.SOLVER.STEPS = (30000,) + +_C.SOLVER.WARMUP_FACTOR = 1.0 / 1000 +_C.SOLVER.WARMUP_ITERS = 1000 +_C.SOLVER.WARMUP_METHOD = "linear" + +# Save a checkpoint after every this number of iterations +_C.SOLVER.CHECKPOINT_PERIOD = 5000 + +# Number of images per batch across all machines. +# If we have 16 GPUs and IMS_PER_BATCH = 32, +# each GPU will see 2 images per batch. +_C.SOLVER.IMS_PER_BATCH = 16 + +# Detectron v1 (and previous detection code) used a 2x higher LR and 0 WD for +# biases. This is not useful (at least for recent models). You should avoid +# changing these and they exist only to reproduce Detectron v1 training if +# desired. +_C.SOLVER.BIAS_LR_FACTOR = 1.0 +_C.SOLVER.WEIGHT_DECAY_BIAS = _C.SOLVER.WEIGHT_DECAY + +# Gradient clipping +_C.SOLVER.CLIP_GRADIENTS = CN({"ENABLED": False}) +# Type of gradient clipping, currently 2 values are supported: +# - "value": the absolute values of elements of each gradients are clipped +# - "norm": the norm of the gradient for each parameter is clipped thus +# affecting all elements in the parameter +_C.SOLVER.CLIP_GRADIENTS.CLIP_TYPE = "value" +# Maximum absolute value used for clipping gradients +_C.SOLVER.CLIP_GRADIENTS.CLIP_VALUE = 1.0 +# Floating point number p for L-p norm to be used with the "norm" +# gradient clipping type; for L-inf, please specify .inf +_C.SOLVER.CLIP_GRADIENTS.NORM_TYPE = 2.0 + +# ---------------------------------------------------------------------------- # +# Specific test options +# ---------------------------------------------------------------------------- # +_C.TEST = CN() +# For end-to-end tests to verify the expected accuracy. +# Each item is [task, metric, value, tolerance] +# e.g.: [['bbox', 'AP', 38.5, 0.2]] +_C.TEST.EXPECTED_RESULTS = [] +# The period (in terms of steps) to evaluate the model during training. +# Set to 0 to disable. +_C.TEST.EVAL_PERIOD = 0 +# The sigmas used to calculate keypoint OKS. See http://cocodataset.org/#keypoints-eval +# When empty it will use the defaults in COCO. +# Otherwise it should have the same length as ROI_KEYPOINT_HEAD.NUM_KEYPOINTS. +_C.TEST.KEYPOINT_OKS_SIGMAS = [] +# Maximum number of detections to return per image during inference (100 is +# based on the limit established for the COCO dataset). +_C.TEST.DETECTIONS_PER_IMAGE = 100 + +_C.TEST.AUG = CN({"ENABLED": False}) +_C.TEST.AUG.MIN_SIZES = (400, 500, 600, 700, 800, 900, 1000, 1100, 1200) +_C.TEST.AUG.MAX_SIZE = 4000 +_C.TEST.AUG.FLIP = True + +_C.TEST.PRECISE_BN = CN({"ENABLED": False}) +_C.TEST.PRECISE_BN.NUM_ITER = 200 + +# ---------------------------------------------------------------------------- # +# Misc options +# ---------------------------------------------------------------------------- # +# Directory where output files are written +_C.OUTPUT_DIR = "./output" +# Set seed to negative to fully randomize everything. +# Set seed to positive to use a fixed seed. Note that a fixed seed increases +# reproducibility but does not guarantee fully deterministic behavior. +# Disabling all parallelism further increases reproducibility. +_C.SEED = -1 +# Benchmark different cudnn algorithms. +# If input images have very different sizes, this option will have large overhead +# for about 10k iterations. It usually hurts total time, but can benefit for certain models. +# If input images have the same or similar sizes, benchmark is often helpful. +_C.CUDNN_BENCHMARK = False +# The period (in terms of steps) for minibatch visualization at train time. +# Set to 0 to disable. +_C.VIS_PERIOD = 0 + +# global config is for quick hack purposes. +# You can set them in command line or config files, +# and access it with: +# +# from detectron2.config import global_cfg +# print(global_cfg.HACK) +# +# Do not commit any configs into it. +_C.GLOBAL = CN() +_C.GLOBAL.HACK = 1.0 diff --git a/preprocess/mhp_extension/detectron2/detectron2/engine/__init__.py b/preprocess/mhp_extension/detectron2/detectron2/engine/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6a4538da3e66593e4ef8916cd9cbca3c83b8c14e --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/engine/__init__.py @@ -0,0 +1,12 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. + +from .launch import * +from .train_loop import * + +__all__ = [k for k in globals().keys() if not k.startswith("_")] + + +# prefer to let hooks and defaults live in separate namespaces (therefore not in __all__) +# but still make them available here +from .hooks import * +from .defaults import * diff --git a/preprocess/mhp_extension/detectron2/detectron2/engine/defaults.py b/preprocess/mhp_extension/detectron2/detectron2/engine/defaults.py new file mode 100644 index 0000000000000000000000000000000000000000..db9ab68f21d77b9e3be730c4784abe665df3d96a --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/engine/defaults.py @@ -0,0 +1,531 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +""" +This file contains components with some default boilerplate logic user may need +in training / testing. They will not work for everyone, but many users may find them useful. + +The behavior of functions/classes in this file is subject to change, +since they are meant to represent the "common default behavior" people need in their projects. +""" + +import argparse +import logging +import os +import sys +from collections import OrderedDict +import torch +from fvcore.common.file_io import PathManager +from fvcore.nn.precise_bn import get_bn_modules +from torch.nn.parallel import DistributedDataParallel + +import detectron2.data.transforms as T +from detectron2.checkpoint import DetectionCheckpointer +from detectron2.data import ( + MetadataCatalog, + build_detection_test_loader, + build_detection_train_loader, +) +from detectron2.evaluation import ( + DatasetEvaluator, + inference_on_dataset, + print_csv_format, + verify_results, +) +from detectron2.modeling import build_model +from detectron2.solver import build_lr_scheduler, build_optimizer +from detectron2.utils import comm +from detectron2.utils.collect_env import collect_env_info +from detectron2.utils.env import seed_all_rng +from detectron2.utils.events import CommonMetricPrinter, JSONWriter, TensorboardXWriter +from detectron2.utils.logger import setup_logger + +from . import hooks +from .train_loop import SimpleTrainer + +__all__ = ["default_argument_parser", "default_setup", "DefaultPredictor", "DefaultTrainer"] + + +def default_argument_parser(epilog=None): + """ + Create a parser with some common arguments used by detectron2 users. + + Args: + epilog (str): epilog passed to ArgumentParser describing the usage. + + Returns: + argparse.ArgumentParser: + """ + parser = argparse.ArgumentParser( + epilog=epilog + or f""" +Examples: + +Run on single machine: + $ {sys.argv[0]} --num-gpus 8 --config-file cfg.yaml MODEL.WEIGHTS /path/to/weight.pth + +Run on multiple machines: + (machine0)$ {sys.argv[0]} --machine-rank 0 --num-machines 2 --dist-url [--other-flags] + (machine1)$ {sys.argv[0]} --machine-rank 1 --num-machines 2 --dist-url [--other-flags] +""", + formatter_class=argparse.RawDescriptionHelpFormatter, + ) + parser.add_argument("--config-file", default="", metavar="FILE", help="path to config file") + parser.add_argument( + "--resume", + action="store_true", + help="whether to attempt to resume from the checkpoint directory", + ) + parser.add_argument("--eval-only", action="store_true", help="perform evaluation only") + parser.add_argument("--num-gpus", type=int, default=1, help="number of gpus *per machine*") + parser.add_argument("--num-machines", type=int, default=1, help="total number of machines") + parser.add_argument( + "--machine-rank", type=int, default=0, help="the rank of this machine (unique per machine)" + ) + + # PyTorch still may leave orphan processes in multi-gpu training. + # Therefore we use a deterministic way to obtain port, + # so that users are aware of orphan processes by seeing the port occupied. + port = 2 ** 15 + 2 ** 14 + hash(os.getuid() if sys.platform != "win32" else 1) % 2 ** 14 + parser.add_argument( + "--dist-url", + default="tcp://127.0.0.1:{}".format(port), + help="initialization URL for pytorch distributed backend. See " + "https://pytorch.org/docs/stable/distributed.html for details.", + ) + parser.add_argument( + "opts", + help="Modify config options using the command-line", + default=None, + nargs=argparse.REMAINDER, + ) + return parser + + +def default_setup(cfg, args): + """ + Perform some basic common setups at the beginning of a job, including: + + 1. Set up the detectron2 logger + 2. Log basic information about environment, cmdline arguments, and config + 3. Backup the config to the output directory + + Args: + cfg (CfgNode): the full config to be used + args (argparse.NameSpace): the command line arguments to be logged + """ + output_dir = cfg.OUTPUT_DIR + if comm.is_main_process() and output_dir: + PathManager.mkdirs(output_dir) + + rank = comm.get_rank() + setup_logger(output_dir, distributed_rank=rank, name="fvcore") + logger = setup_logger(output_dir, distributed_rank=rank) + + logger.info("Rank of current process: {}. World size: {}".format(rank, comm.get_world_size())) + logger.info("Environment info:\n" + collect_env_info()) + + logger.info("Command line arguments: " + str(args)) + if hasattr(args, "config_file") and args.config_file != "": + logger.info( + "Contents of args.config_file={}:\n{}".format( + args.config_file, PathManager.open(args.config_file, "r").read() + ) + ) + + logger.info("Running with full config:\n{}".format(cfg)) + if comm.is_main_process() and output_dir: + # Note: some of our scripts may expect the existence of + # config.yaml in output directory + path = os.path.join(output_dir, "config.yaml") + with PathManager.open(path, "w") as f: + f.write(cfg.dump()) + logger.info("Full config saved to {}".format(path)) + + # make sure each worker has a different, yet deterministic seed if specified + seed_all_rng(None if cfg.SEED < 0 else cfg.SEED + rank) + + # cudnn benchmark has large overhead. It shouldn't be used considering the small size of + # typical validation set. + if not (hasattr(args, "eval_only") and args.eval_only): + torch.backends.cudnn.benchmark = cfg.CUDNN_BENCHMARK + + +class DefaultPredictor: + """ + Create a simple end-to-end predictor with the given config that runs on + single device for a single input image. + + Compared to using the model directly, this class does the following additions: + + 1. Load checkpoint from `cfg.MODEL.WEIGHTS`. + 2. Always take BGR image as the input and apply conversion defined by `cfg.INPUT.FORMAT`. + 3. Apply resizing defined by `cfg.INPUT.{MIN,MAX}_SIZE_TEST`. + 4. Take one input image and produce a single output, instead of a batch. + + If you'd like to do anything more fancy, please refer to its source code + as examples to build and use the model manually. + + Attributes: + metadata (Metadata): the metadata of the underlying dataset, obtained from + cfg.DATASETS.TEST. + + Examples: + + .. code-block:: python + + pred = DefaultPredictor(cfg) + inputs = cv2.imread("input.jpg") + outputs = pred(inputs) + """ + + def __init__(self, cfg): + self.cfg = cfg.clone() # cfg can be modified by model + self.model = build_model(self.cfg) + self.model.eval() + self.metadata = MetadataCatalog.get(cfg.DATASETS.TEST[0]) + + checkpointer = DetectionCheckpointer(self.model) + checkpointer.load(cfg.MODEL.WEIGHTS) + + self.transform_gen = T.ResizeShortestEdge( + [cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST], cfg.INPUT.MAX_SIZE_TEST + ) + + self.input_format = cfg.INPUT.FORMAT + assert self.input_format in ["RGB", "BGR"], self.input_format + + def __call__(self, original_image): + """ + Args: + original_image (np.ndarray): an image of shape (H, W, C) (in BGR order). + + Returns: + predictions (dict): + the output of the model for one image only. + See :doc:`/tutorials/models` for details about the format. + """ + with torch.no_grad(): # https://github.com/sphinx-doc/sphinx/issues/4258 + # Apply pre-processing to image. + if self.input_format == "RGB": + # whether the model expects BGR inputs or RGB + original_image = original_image[:, :, ::-1] + height, width = original_image.shape[:2] + image = self.transform_gen.get_transform(original_image).apply_image(original_image) + image = torch.as_tensor(image.astype("float32").transpose(2, 0, 1)) + + inputs = {"image": image, "height": height, "width": width} + predictions = self.model([inputs])[0] + return predictions + + +class DefaultTrainer(SimpleTrainer): + """ + A trainer with default training logic. Compared to `SimpleTrainer`, it + contains the following logic in addition: + + 1. Create model, optimizer, scheduler, dataloader from the given config. + 2. Load a checkpoint or `cfg.MODEL.WEIGHTS`, if exists, when + `resume_or_load` is called. + 3. Register a few common hooks. + + It is created to simplify the **standard model training workflow** and reduce code boilerplate + for users who only need the standard training workflow, with standard features. + It means this class makes *many assumptions* about your training logic that + may easily become invalid in a new research. In fact, any assumptions beyond those made in the + :class:`SimpleTrainer` are too much for research. + + The code of this class has been annotated about restrictive assumptions it mades. + When they do not work for you, you're encouraged to: + + 1. Overwrite methods of this class, OR: + 2. Use :class:`SimpleTrainer`, which only does minimal SGD training and + nothing else. You can then add your own hooks if needed. OR: + 3. Write your own training loop similar to `tools/plain_train_net.py`. + + Also note that the behavior of this class, like other functions/classes in + this file, is not stable, since it is meant to represent the "common default behavior". + It is only guaranteed to work well with the standard models and training workflow in detectron2. + To obtain more stable behavior, write your own training logic with other public APIs. + + Examples: + + .. code-block:: python + + trainer = DefaultTrainer(cfg) + trainer.resume_or_load() # load last checkpoint or MODEL.WEIGHTS + trainer.train() + + Attributes: + scheduler: + checkpointer (DetectionCheckpointer): + cfg (CfgNode): + """ + + def __init__(self, cfg): + """ + Args: + cfg (CfgNode): + """ + logger = logging.getLogger("detectron2") + if not logger.isEnabledFor(logging.INFO): # setup_logger is not called for d2 + setup_logger() + # Assume these objects must be constructed in this order. + model = self.build_model(cfg) + optimizer = self.build_optimizer(cfg, model) + data_loader = self.build_train_loader(cfg) + + # For training, wrap with DDP. But don't need this for inference. + if comm.get_world_size() > 1: + model = DistributedDataParallel( + model, device_ids=[comm.get_local_rank()], broadcast_buffers=False + ) + super().__init__(model, data_loader, optimizer) + + self.scheduler = self.build_lr_scheduler(cfg, optimizer) + # Assume no other objects need to be checkpointed. + # We can later make it checkpoint the stateful hooks + self.checkpointer = DetectionCheckpointer( + # Assume you want to save checkpoints together with logs/statistics + model, + cfg.OUTPUT_DIR, + optimizer=optimizer, + scheduler=self.scheduler, + ) + self.start_iter = 0 + self.max_iter = cfg.SOLVER.MAX_ITER + self.cfg = cfg + + self.register_hooks(self.build_hooks()) + + def resume_or_load(self, resume=True): + """ + If `resume==True`, and last checkpoint exists, resume from it and load all + checkpointables (eg. optimizer and scheduler). + + Otherwise, load the model specified by the config (skip all checkpointables). + + Args: + resume (bool): whether to do resume or not + """ + checkpoint = self.checkpointer.resume_or_load(self.cfg.MODEL.WEIGHTS, resume=resume) + self.start_iter = checkpoint.get("iteration", -1) if resume else -1 + # The checkpoint stores the training iteration that just finished, thus we start + # at the next iteration (or iter zero if there's no checkpoint). + self.start_iter += 1 + + def build_hooks(self): + """ + Build a list of default hooks, including timing, evaluation, + checkpointing, lr scheduling, precise BN, writing events. + + Returns: + list[HookBase]: + """ + cfg = self.cfg.clone() + cfg.defrost() + cfg.DATALOADER.NUM_WORKERS = 0 # save some memory and time for PreciseBN + + ret = [ + hooks.IterationTimer(), + hooks.LRScheduler(self.optimizer, self.scheduler), + hooks.PreciseBN( + # Run at the same freq as (but before) evaluation. + cfg.TEST.EVAL_PERIOD, + self.model, + # Build a new data loader to not affect training + self.build_train_loader(cfg), + cfg.TEST.PRECISE_BN.NUM_ITER, + ) + if cfg.TEST.PRECISE_BN.ENABLED and get_bn_modules(self.model) + else None, + ] + + # Do PreciseBN before checkpointer, because it updates the model and need to + # be saved by checkpointer. + # This is not always the best: if checkpointing has a different frequency, + # some checkpoints may have more precise statistics than others. + if comm.is_main_process(): + ret.append(hooks.PeriodicCheckpointer(self.checkpointer, cfg.SOLVER.CHECKPOINT_PERIOD)) + + def test_and_save_results(): + self._last_eval_results = self.test(self.cfg, self.model) + return self._last_eval_results + + # Do evaluation after checkpointer, because then if it fails, + # we can use the saved checkpoint to debug. + ret.append(hooks.EvalHook(cfg.TEST.EVAL_PERIOD, test_and_save_results)) + + if comm.is_main_process(): + # run writers in the end, so that evaluation metrics are written + ret.append(hooks.PeriodicWriter(self.build_writers(), period=20)) + return ret + + def build_writers(self): + """ + Build a list of writers to be used. By default it contains + writers that write metrics to the screen, + a json file, and a tensorboard event file respectively. + If you'd like a different list of writers, you can overwrite it in + your trainer. + + Returns: + list[EventWriter]: a list of :class:`EventWriter` objects. + + It is now implemented by: + + .. code-block:: python + + return [ + CommonMetricPrinter(self.max_iter), + JSONWriter(os.path.join(self.cfg.OUTPUT_DIR, "metrics.json")), + TensorboardXWriter(self.cfg.OUTPUT_DIR), + ] + + """ + # Here the default print/log frequency of each writer is used. + return [ + # It may not always print what you want to see, since it prints "common" metrics only. + CommonMetricPrinter(self.max_iter), + JSONWriter(os.path.join(self.cfg.OUTPUT_DIR, "metrics.json")), + TensorboardXWriter(self.cfg.OUTPUT_DIR), + ] + + def train(self): + """ + Run training. + + Returns: + OrderedDict of results, if evaluation is enabled. Otherwise None. + """ + super().train(self.start_iter, self.max_iter) + if len(self.cfg.TEST.EXPECTED_RESULTS) and comm.is_main_process(): + assert hasattr( + self, "_last_eval_results" + ), "No evaluation results obtained during training!" + verify_results(self.cfg, self._last_eval_results) + return self._last_eval_results + + @classmethod + def build_model(cls, cfg): + """ + Returns: + torch.nn.Module: + + It now calls :func:`detectron2.modeling.build_model`. + Overwrite it if you'd like a different model. + """ + model = build_model(cfg) + logger = logging.getLogger(__name__) + logger.info("Model:\n{}".format(model)) + return model + + @classmethod + def build_optimizer(cls, cfg, model): + """ + Returns: + torch.optim.Optimizer: + + It now calls :func:`detectron2.solver.build_optimizer`. + Overwrite it if you'd like a different optimizer. + """ + return build_optimizer(cfg, model) + + @classmethod + def build_lr_scheduler(cls, cfg, optimizer): + """ + It now calls :func:`detectron2.solver.build_lr_scheduler`. + Overwrite it if you'd like a different scheduler. + """ + return build_lr_scheduler(cfg, optimizer) + + @classmethod + def build_train_loader(cls, cfg): + """ + Returns: + iterable + + It now calls :func:`detectron2.data.build_detection_train_loader`. + Overwrite it if you'd like a different data loader. + """ + return build_detection_train_loader(cfg) + + @classmethod + def build_test_loader(cls, cfg, dataset_name): + """ + Returns: + iterable + + It now calls :func:`detectron2.data.build_detection_test_loader`. + Overwrite it if you'd like a different data loader. + """ + return build_detection_test_loader(cfg, dataset_name) + + @classmethod + def build_evaluator(cls, cfg, dataset_name): + """ + Returns: + DatasetEvaluator or None + + It is not implemented by default. + """ + raise NotImplementedError( + """ +If you want DefaultTrainer to automatically run evaluation, +please implement `build_evaluator()` in subclasses (see train_net.py for example). +Alternatively, you can call evaluation functions yourself (see Colab balloon tutorial for example). +""" + ) + + @classmethod + def test(cls, cfg, model, evaluators=None): + """ + Args: + cfg (CfgNode): + model (nn.Module): + evaluators (list[DatasetEvaluator] or None): if None, will call + :meth:`build_evaluator`. Otherwise, must have the same length as + `cfg.DATASETS.TEST`. + + Returns: + dict: a dict of result metrics + """ + logger = logging.getLogger(__name__) + if isinstance(evaluators, DatasetEvaluator): + evaluators = [evaluators] + if evaluators is not None: + assert len(cfg.DATASETS.TEST) == len(evaluators), "{} != {}".format( + len(cfg.DATASETS.TEST), len(evaluators) + ) + + results = OrderedDict() + for idx, dataset_name in enumerate(cfg.DATASETS.TEST): + data_loader = cls.build_test_loader(cfg, dataset_name) + # When evaluators are passed in as arguments, + # implicitly assume that evaluators can be created before data_loader. + if evaluators is not None: + evaluator = evaluators[idx] + else: + try: + evaluator = cls.build_evaluator(cfg, dataset_name) + except NotImplementedError: + logger.warn( + "No evaluator found. Use `DefaultTrainer.test(evaluators=)`, " + "or implement its `build_evaluator` method." + ) + results[dataset_name] = {} + continue + results_i = inference_on_dataset(model, data_loader, evaluator) + results[dataset_name] = results_i + if comm.is_main_process(): + assert isinstance( + results_i, dict + ), "Evaluator must return a dict on the main process. Got {} instead.".format( + results_i + ) + logger.info("Evaluation results for {} in csv format:".format(dataset_name)) + print_csv_format(results_i) + + if len(results) == 1: + results = list(results.values())[0] + return results diff --git a/preprocess/mhp_extension/detectron2/detectron2/engine/hooks.py b/preprocess/mhp_extension/detectron2/detectron2/engine/hooks.py new file mode 100644 index 0000000000000000000000000000000000000000..e5085b4561302d2328ab505568dec4e9fc5ee0ad --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/engine/hooks.py @@ -0,0 +1,427 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +import datetime +import itertools +import logging +import os +import tempfile +import time +from collections import Counter +import torch +from fvcore.common.checkpoint import PeriodicCheckpointer as _PeriodicCheckpointer +from fvcore.common.file_io import PathManager +from fvcore.common.timer import Timer +from fvcore.nn.precise_bn import get_bn_modules, update_bn_stats + +import detectron2.utils.comm as comm +from detectron2.evaluation.testing import flatten_results_dict +from detectron2.utils.events import EventStorage, EventWriter + +from .train_loop import HookBase + +__all__ = [ + "CallbackHook", + "IterationTimer", + "PeriodicWriter", + "PeriodicCheckpointer", + "LRScheduler", + "AutogradProfiler", + "EvalHook", + "PreciseBN", +] + + +""" +Implement some common hooks. +""" + + +class CallbackHook(HookBase): + """ + Create a hook using callback functions provided by the user. + """ + + def __init__(self, *, before_train=None, after_train=None, before_step=None, after_step=None): + """ + Each argument is a function that takes one argument: the trainer. + """ + self._before_train = before_train + self._before_step = before_step + self._after_step = after_step + self._after_train = after_train + + def before_train(self): + if self._before_train: + self._before_train(self.trainer) + + def after_train(self): + if self._after_train: + self._after_train(self.trainer) + # The functions may be closures that hold reference to the trainer + # Therefore, delete them to avoid circular reference. + del self._before_train, self._after_train + del self._before_step, self._after_step + + def before_step(self): + if self._before_step: + self._before_step(self.trainer) + + def after_step(self): + if self._after_step: + self._after_step(self.trainer) + + +class IterationTimer(HookBase): + """ + Track the time spent for each iteration (each run_step call in the trainer). + Print a summary in the end of training. + + This hook uses the time between the call to its :meth:`before_step` + and :meth:`after_step` methods. + Under the convention that :meth:`before_step` of all hooks should only + take negligible amount of time, the :class:`IterationTimer` hook should be + placed at the beginning of the list of hooks to obtain accurate timing. + """ + + def __init__(self, warmup_iter=3): + """ + Args: + warmup_iter (int): the number of iterations at the beginning to exclude + from timing. + """ + self._warmup_iter = warmup_iter + self._step_timer = Timer() + self._start_time = time.perf_counter() + self._total_timer = Timer() + + def before_train(self): + self._start_time = time.perf_counter() + self._total_timer.reset() + self._total_timer.pause() + + def after_train(self): + logger = logging.getLogger(__name__) + total_time = time.perf_counter() - self._start_time + total_time_minus_hooks = self._total_timer.seconds() + hook_time = total_time - total_time_minus_hooks + + num_iter = self.trainer.iter + 1 - self.trainer.start_iter - self._warmup_iter + + if num_iter > 0 and total_time_minus_hooks > 0: + # Speed is meaningful only after warmup + # NOTE this format is parsed by grep in some scripts + logger.info( + "Overall training speed: {} iterations in {} ({:.4f} s / it)".format( + num_iter, + str(datetime.timedelta(seconds=int(total_time_minus_hooks))), + total_time_minus_hooks / num_iter, + ) + ) + + logger.info( + "Total training time: {} ({} on hooks)".format( + str(datetime.timedelta(seconds=int(total_time))), + str(datetime.timedelta(seconds=int(hook_time))), + ) + ) + + def before_step(self): + self._step_timer.reset() + self._total_timer.resume() + + def after_step(self): + # +1 because we're in after_step + iter_done = self.trainer.iter - self.trainer.start_iter + 1 + if iter_done >= self._warmup_iter: + sec = self._step_timer.seconds() + self.trainer.storage.put_scalars(time=sec) + else: + self._start_time = time.perf_counter() + self._total_timer.reset() + + self._total_timer.pause() + + +class PeriodicWriter(HookBase): + """ + Write events to EventStorage periodically. + + It is executed every ``period`` iterations and after the last iteration. + """ + + def __init__(self, writers, period=20): + """ + Args: + writers (list[EventWriter]): a list of EventWriter objects + period (int): + """ + self._writers = writers + for w in writers: + assert isinstance(w, EventWriter), w + self._period = period + + def after_step(self): + if (self.trainer.iter + 1) % self._period == 0 or ( + self.trainer.iter == self.trainer.max_iter - 1 + ): + for writer in self._writers: + writer.write() + + def after_train(self): + for writer in self._writers: + writer.close() + + +class PeriodicCheckpointer(_PeriodicCheckpointer, HookBase): + """ + Same as :class:`detectron2.checkpoint.PeriodicCheckpointer`, but as a hook. + + Note that when used as a hook, + it is unable to save additional data other than what's defined + by the given `checkpointer`. + + It is executed every ``period`` iterations and after the last iteration. + """ + + def before_train(self): + self.max_iter = self.trainer.max_iter + + def after_step(self): + # No way to use **kwargs + self.step(self.trainer.iter) + + +class LRScheduler(HookBase): + """ + A hook which executes a torch builtin LR scheduler and summarizes the LR. + It is executed after every iteration. + """ + + def __init__(self, optimizer, scheduler): + """ + Args: + optimizer (torch.optim.Optimizer): + scheduler (torch.optim._LRScheduler) + """ + self._optimizer = optimizer + self._scheduler = scheduler + + # NOTE: some heuristics on what LR to summarize + # summarize the param group with most parameters + largest_group = max(len(g["params"]) for g in optimizer.param_groups) + + if largest_group == 1: + # If all groups have one parameter, + # then find the most common initial LR, and use it for summary + lr_count = Counter([g["lr"] for g in optimizer.param_groups]) + lr = lr_count.most_common()[0][0] + for i, g in enumerate(optimizer.param_groups): + if g["lr"] == lr: + self._best_param_group_id = i + break + else: + for i, g in enumerate(optimizer.param_groups): + if len(g["params"]) == largest_group: + self._best_param_group_id = i + break + + def after_step(self): + lr = self._optimizer.param_groups[self._best_param_group_id]["lr"] + self.trainer.storage.put_scalar("lr", lr, smoothing_hint=False) + self._scheduler.step() + + +class AutogradProfiler(HookBase): + """ + A hook which runs `torch.autograd.profiler.profile`. + + Examples: + + .. code-block:: python + + hooks.AutogradProfiler( + lambda trainer: trainer.iter > 10 and trainer.iter < 20, self.cfg.OUTPUT_DIR + ) + + The above example will run the profiler for iteration 10~20 and dump + results to ``OUTPUT_DIR``. We did not profile the first few iterations + because they are typically slower than the rest. + The result files can be loaded in the ``chrome://tracing`` page in chrome browser. + + Note: + When used together with NCCL on older version of GPUs, + autograd profiler may cause deadlock because it unnecessarily allocates + memory on every device it sees. The memory management calls, if + interleaved with NCCL calls, lead to deadlock on GPUs that do not + support `cudaLaunchCooperativeKernelMultiDevice`. + """ + + def __init__(self, enable_predicate, output_dir, *, use_cuda=True): + """ + Args: + enable_predicate (callable[trainer -> bool]): a function which takes a trainer, + and returns whether to enable the profiler. + It will be called once every step, and can be used to select which steps to profile. + output_dir (str): the output directory to dump tracing files. + use_cuda (bool): same as in `torch.autograd.profiler.profile`. + """ + self._enable_predicate = enable_predicate + self._use_cuda = use_cuda + self._output_dir = output_dir + + def before_step(self): + if self._enable_predicate(self.trainer): + self._profiler = torch.autograd.profiler.profile(use_cuda=self._use_cuda) + self._profiler.__enter__() + else: + self._profiler = None + + def after_step(self): + if self._profiler is None: + return + self._profiler.__exit__(None, None, None) + PathManager.mkdirs(self._output_dir) + out_file = os.path.join( + self._output_dir, "profiler-trace-iter{}.json".format(self.trainer.iter) + ) + if "://" not in out_file: + self._profiler.export_chrome_trace(out_file) + else: + # Support non-posix filesystems + with tempfile.TemporaryDirectory(prefix="detectron2_profiler") as d: + tmp_file = os.path.join(d, "tmp.json") + self._profiler.export_chrome_trace(tmp_file) + with open(tmp_file) as f: + content = f.read() + with PathManager.open(out_file, "w") as f: + f.write(content) + + +class EvalHook(HookBase): + """ + Run an evaluation function periodically, and at the end of training. + + It is executed every ``eval_period`` iterations and after the last iteration. + """ + + def __init__(self, eval_period, eval_function): + """ + Args: + eval_period (int): the period to run `eval_function`. + eval_function (callable): a function which takes no arguments, and + returns a nested dict of evaluation metrics. + + Note: + This hook must be enabled in all or none workers. + If you would like only certain workers to perform evaluation, + give other workers a no-op function (`eval_function=lambda: None`). + """ + self._period = eval_period + self._func = eval_function + + def _do_eval(self): + results = self._func() + + if results: + assert isinstance( + results, dict + ), "Eval function must return a dict. Got {} instead.".format(results) + + flattened_results = flatten_results_dict(results) + for k, v in flattened_results.items(): + try: + v = float(v) + except Exception: + raise ValueError( + "[EvalHook] eval_function should return a nested dict of float. " + "Got '{}: {}' instead.".format(k, v) + ) + self.trainer.storage.put_scalars(**flattened_results, smoothing_hint=False) + + # Evaluation may take different time among workers. + # A barrier make them start the next iteration together. + comm.synchronize() + + def after_step(self): + next_iter = self.trainer.iter + 1 + is_final = next_iter == self.trainer.max_iter + if is_final or (self._period > 0 and next_iter % self._period == 0): + self._do_eval() + + def after_train(self): + # func is likely a closure that holds reference to the trainer + # therefore we clean it to avoid circular reference in the end + del self._func + + +class PreciseBN(HookBase): + """ + The standard implementation of BatchNorm uses EMA in inference, which is + sometimes suboptimal. + This class computes the true average of statistics rather than the moving average, + and put true averages to every BN layer in the given model. + + It is executed every ``period`` iterations and after the last iteration. + """ + + def __init__(self, period, model, data_loader, num_iter): + """ + Args: + period (int): the period this hook is run, or 0 to not run during training. + The hook will always run in the end of training. + model (nn.Module): a module whose all BN layers in training mode will be + updated by precise BN. + Note that user is responsible for ensuring the BN layers to be + updated are in training mode when this hook is triggered. + data_loader (iterable): it will produce data to be run by `model(data)`. + num_iter (int): number of iterations used to compute the precise + statistics. + """ + self._logger = logging.getLogger(__name__) + if len(get_bn_modules(model)) == 0: + self._logger.info( + "PreciseBN is disabled because model does not contain BN layers in training mode." + ) + self._disabled = True + return + + self._model = model + self._data_loader = data_loader + self._num_iter = num_iter + self._period = period + self._disabled = False + + self._data_iter = None + + def after_step(self): + next_iter = self.trainer.iter + 1 + is_final = next_iter == self.trainer.max_iter + if is_final or (self._period > 0 and next_iter % self._period == 0): + self.update_stats() + + def update_stats(self): + """ + Update the model with precise statistics. Users can manually call this method. + """ + if self._disabled: + return + + if self._data_iter is None: + self._data_iter = iter(self._data_loader) + + def data_loader(): + for num_iter in itertools.count(1): + if num_iter % 100 == 0: + self._logger.info( + "Running precise-BN ... {}/{} iterations.".format(num_iter, self._num_iter) + ) + # This way we can reuse the same iterator + yield next(self._data_iter) + + with EventStorage(): # capture events in a new storage to discard them + self._logger.info( + "Running precise-BN for {} iterations... ".format(self._num_iter) + + "Note that this could produce different statistics every time." + ) + update_bn_stats(self._model, data_loader(), self._num_iter) diff --git a/preprocess/mhp_extension/detectron2/detectron2/engine/launch.py b/preprocess/mhp_extension/detectron2/detectron2/engine/launch.py new file mode 100644 index 0000000000000000000000000000000000000000..9efbb0395d2c788d8cfe2cbbf66cde6ddc053585 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/engine/launch.py @@ -0,0 +1,89 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import logging +import torch +import torch.distributed as dist +import torch.multiprocessing as mp + +from detectron2.utils import comm + +__all__ = ["launch"] + + +def _find_free_port(): + import socket + + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + # Binding to port 0 will cause the OS to find an available port for us + sock.bind(("", 0)) + port = sock.getsockname()[1] + sock.close() + # NOTE: there is still a chance the port could be taken by other processes. + return port + + +def launch(main_func, num_gpus_per_machine, num_machines=1, machine_rank=0, dist_url=None, args=()): + """ + Args: + main_func: a function that will be called by `main_func(*args)` + num_machines (int): the total number of machines + machine_rank (int): the rank of this machine (one per machine) + dist_url (str): url to connect to for distributed jobs, including protocol + e.g. "tcp://127.0.0.1:8686". + Can be set to "auto" to automatically select a free port on localhost + args (tuple): arguments passed to main_func + """ + world_size = num_machines * num_gpus_per_machine + if world_size > 1: + # https://github.com/pytorch/pytorch/pull/14391 + # TODO prctl in spawned processes + + if dist_url == "auto": + assert num_machines == 1, "dist_url=auto not supported in multi-machine jobs." + port = _find_free_port() + dist_url = f"tcp://127.0.0.1:{port}" + if num_machines > 1 and dist_url.startswith("file://"): + logger = logging.getLogger(__name__) + logger.warning( + "file:// is not a reliable init_method in multi-machine jobs. Prefer tcp://" + ) + + mp.spawn( + _distributed_worker, + nprocs=num_gpus_per_machine, + args=(main_func, world_size, num_gpus_per_machine, machine_rank, dist_url, args), + daemon=False, + ) + else: + main_func(*args) + + +def _distributed_worker( + local_rank, main_func, world_size, num_gpus_per_machine, machine_rank, dist_url, args +): + assert torch.cuda.is_available(), "cuda is not available. Please check your installation." + global_rank = machine_rank * num_gpus_per_machine + local_rank + try: + dist.init_process_group( + backend="NCCL", init_method=dist_url, world_size=world_size, rank=global_rank + ) + except Exception as e: + logger = logging.getLogger(__name__) + logger.error("Process group URL: {}".format(dist_url)) + raise e + # synchronize is needed here to prevent a possible timeout after calling init_process_group + # See: https://github.com/facebookresearch/maskrcnn-benchmark/issues/172 + comm.synchronize() + + assert num_gpus_per_machine <= torch.cuda.device_count() + torch.cuda.set_device(local_rank) + + # Setup the local process group (which contains ranks within the same machine) + assert comm._LOCAL_PROCESS_GROUP is None + num_machines = world_size // num_gpus_per_machine + for i in range(num_machines): + ranks_on_i = list(range(i * num_gpus_per_machine, (i + 1) * num_gpus_per_machine)) + pg = dist.new_group(ranks_on_i) + if i == machine_rank: + comm._LOCAL_PROCESS_GROUP = pg + + main_func(*args) diff --git a/preprocess/mhp_extension/detectron2/detectron2/engine/train_loop.py b/preprocess/mhp_extension/detectron2/detectron2/engine/train_loop.py new file mode 100644 index 0000000000000000000000000000000000000000..453c9acfde2d65a182fbf18a6bce4b4583df5ca5 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/engine/train_loop.py @@ -0,0 +1,273 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +import logging +import numpy as np +import time +import weakref +import torch + +import detectron2.utils.comm as comm +from detectron2.utils.events import EventStorage + +__all__ = ["HookBase", "TrainerBase", "SimpleTrainer"] + + +class HookBase: + """ + Base class for hooks that can be registered with :class:`TrainerBase`. + + Each hook can implement 4 methods. The way they are called is demonstrated + in the following snippet: + + .. code-block:: python + + hook.before_train() + for iter in range(start_iter, max_iter): + hook.before_step() + trainer.run_step() + hook.after_step() + hook.after_train() + + Notes: + 1. In the hook method, users can access `self.trainer` to access more + properties about the context (e.g., current iteration). + + 2. A hook that does something in :meth:`before_step` can often be + implemented equivalently in :meth:`after_step`. + If the hook takes non-trivial time, it is strongly recommended to + implement the hook in :meth:`after_step` instead of :meth:`before_step`. + The convention is that :meth:`before_step` should only take negligible time. + + Following this convention will allow hooks that do care about the difference + between :meth:`before_step` and :meth:`after_step` (e.g., timer) to + function properly. + + Attributes: + trainer: A weak reference to the trainer object. Set by the trainer when the hook is + registered. + """ + + def before_train(self): + """ + Called before the first iteration. + """ + pass + + def after_train(self): + """ + Called after the last iteration. + """ + pass + + def before_step(self): + """ + Called before each iteration. + """ + pass + + def after_step(self): + """ + Called after each iteration. + """ + pass + + +class TrainerBase: + """ + Base class for iterative trainer with hooks. + + The only assumption we made here is: the training runs in a loop. + A subclass can implement what the loop is. + We made no assumptions about the existence of dataloader, optimizer, model, etc. + + Attributes: + iter(int): the current iteration. + + start_iter(int): The iteration to start with. + By convention the minimum possible value is 0. + + max_iter(int): The iteration to end training. + + storage(EventStorage): An EventStorage that's opened during the course of training. + """ + + def __init__(self): + self._hooks = [] + + def register_hooks(self, hooks): + """ + Register hooks to the trainer. The hooks are executed in the order + they are registered. + + Args: + hooks (list[Optional[HookBase]]): list of hooks + """ + hooks = [h for h in hooks if h is not None] + for h in hooks: + assert isinstance(h, HookBase) + # To avoid circular reference, hooks and trainer cannot own each other. + # This normally does not matter, but will cause memory leak if the + # involved objects contain __del__: + # See http://engineering.hearsaysocial.com/2013/06/16/circular-references-in-python/ + h.trainer = weakref.proxy(self) + self._hooks.extend(hooks) + + def train(self, start_iter: int, max_iter: int): + """ + Args: + start_iter, max_iter (int): See docs above + """ + logger = logging.getLogger(__name__) + logger.info("Starting training from iteration {}".format(start_iter)) + + self.iter = self.start_iter = start_iter + self.max_iter = max_iter + + with EventStorage(start_iter) as self.storage: + try: + self.before_train() + for self.iter in range(start_iter, max_iter): + self.before_step() + self.run_step() + self.after_step() + except Exception: + logger.exception("Exception during training:") + raise + finally: + self.after_train() + + def before_train(self): + for h in self._hooks: + h.before_train() + + def after_train(self): + for h in self._hooks: + h.after_train() + + def before_step(self): + for h in self._hooks: + h.before_step() + + def after_step(self): + for h in self._hooks: + h.after_step() + # this guarantees, that in each hook's after_step, storage.iter == trainer.iter + self.storage.step() + + def run_step(self): + raise NotImplementedError + + +class SimpleTrainer(TrainerBase): + """ + A simple trainer for the most common type of task: + single-cost single-optimizer single-data-source iterative optimization. + It assumes that every step, you: + + 1. Compute the loss with a data from the data_loader. + 2. Compute the gradients with the above loss. + 3. Update the model with the optimizer. + + If you want to do anything fancier than this, + either subclass TrainerBase and implement your own `run_step`, + or write your own training loop. + """ + + def __init__(self, model, data_loader, optimizer): + """ + Args: + model: a torch Module. Takes a data from data_loader and returns a + dict of losses. + data_loader: an iterable. Contains data to be used to call model. + optimizer: a torch optimizer. + """ + super().__init__() + + """ + We set the model to training mode in the trainer. + However it's valid to train a model that's in eval mode. + If you want your model (or a submodule of it) to behave + like evaluation during training, you can overwrite its train() method. + """ + model.train() + + self.model = model + self.data_loader = data_loader + self._data_loader_iter = iter(data_loader) + self.optimizer = optimizer + + def run_step(self): + """ + Implement the standard training logic described above. + """ + assert self.model.training, "[SimpleTrainer] model was changed to eval mode!" + start = time.perf_counter() + """ + If you want to do something with the data, you can wrap the dataloader. + """ + data = next(self._data_loader_iter) + data_time = time.perf_counter() - start + + """ + If you want to do something with the losses, you can wrap the model. + """ + loss_dict = self.model(data) + losses = sum(loss_dict.values()) + self._detect_anomaly(losses, loss_dict) + + metrics_dict = loss_dict + metrics_dict["data_time"] = data_time + self._write_metrics(metrics_dict) + + """ + If you need to accumulate gradients or something similar, you can + wrap the optimizer with your custom `zero_grad()` method. + """ + self.optimizer.zero_grad() + losses.backward() + + """ + If you need gradient clipping/scaling or other processing, you can + wrap the optimizer with your custom `step()` method. + """ + self.optimizer.step() + + def _detect_anomaly(self, losses, loss_dict): + if not torch.isfinite(losses).all(): + raise FloatingPointError( + "Loss became infinite or NaN at iteration={}!\nloss_dict = {}".format( + self.iter, loss_dict + ) + ) + + def _write_metrics(self, metrics_dict: dict): + """ + Args: + metrics_dict (dict): dict of scalar metrics + """ + metrics_dict = { + k: v.detach().cpu().item() if isinstance(v, torch.Tensor) else float(v) + for k, v in metrics_dict.items() + } + # gather metrics among all workers for logging + # This assumes we do DDP-style training, which is currently the only + # supported method in detectron2. + all_metrics_dict = comm.gather(metrics_dict) + + if comm.is_main_process(): + if "data_time" in all_metrics_dict[0]: + # data_time among workers can have high variance. The actual latency + # caused by data_time is the maximum among workers. + data_time = np.max([x.pop("data_time") for x in all_metrics_dict]) + self.storage.put_scalar("data_time", data_time) + + # average the rest metrics + metrics_dict = { + k: np.mean([x[k] for x in all_metrics_dict]) for k in all_metrics_dict[0].keys() + } + total_losses_reduced = sum(loss for loss in metrics_dict.values()) + + self.storage.put_scalar("total_loss", total_losses_reduced) + if len(metrics_dict) > 1: + self.storage.put_scalars(**metrics_dict) diff --git a/preprocess/mhp_extension/detectron2/detectron2/evaluation/__init__.py b/preprocess/mhp_extension/detectron2/detectron2/evaluation/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f1d2f1001af2eb46060db362a94d9dae26e3fb4e --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/evaluation/__init__.py @@ -0,0 +1,12 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +from .cityscapes_evaluation import CityscapesInstanceEvaluator, CityscapesSemSegEvaluator +from .coco_evaluation import COCOEvaluator +from .rotated_coco_evaluation import RotatedCOCOEvaluator +from .evaluator import DatasetEvaluator, DatasetEvaluators, inference_context, inference_on_dataset +from .lvis_evaluation import LVISEvaluator +from .panoptic_evaluation import COCOPanopticEvaluator +from .pascal_voc_evaluation import PascalVOCDetectionEvaluator +from .sem_seg_evaluation import SemSegEvaluator +from .testing import print_csv_format, verify_results + +__all__ = [k for k in globals().keys() if not k.startswith("_")] diff --git a/preprocess/mhp_extension/detectron2/detectron2/evaluation/cityscapes_evaluation.py b/preprocess/mhp_extension/detectron2/detectron2/evaluation/cityscapes_evaluation.py new file mode 100644 index 0000000000000000000000000000000000000000..f6287a8980b10d9d13f0f0e6a0f0e1a16ff3566c --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/evaluation/cityscapes_evaluation.py @@ -0,0 +1,187 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import glob +import logging +import numpy as np +import os +import tempfile +from collections import OrderedDict +import torch +from fvcore.common.file_io import PathManager +from PIL import Image + +from detectron2.data import MetadataCatalog +from detectron2.utils import comm + +from .evaluator import DatasetEvaluator + + +class CityscapesEvaluator(DatasetEvaluator): + """ + Base class for evaluation using cityscapes API. + """ + + def __init__(self, dataset_name): + """ + Args: + dataset_name (str): the name of the dataset. + It must have the following metadata associated with it: + "thing_classes", "gt_dir". + """ + self._metadata = MetadataCatalog.get(dataset_name) + self._cpu_device = torch.device("cpu") + self._logger = logging.getLogger(__name__) + + def reset(self): + self._working_dir = tempfile.TemporaryDirectory(prefix="cityscapes_eval_") + self._temp_dir = self._working_dir.name + # All workers will write to the same results directory + # TODO this does not work in distributed training + self._temp_dir = comm.all_gather(self._temp_dir)[0] + if self._temp_dir != self._working_dir.name: + self._working_dir.cleanup() + self._logger.info( + "Writing cityscapes results to temporary directory {} ...".format(self._temp_dir) + ) + + +class CityscapesInstanceEvaluator(CityscapesEvaluator): + """ + Evaluate instance segmentation results using cityscapes API. + + Note: + * It does not work in multi-machine distributed training. + * It contains a synchronization, therefore has to be used on all ranks. + * Only the main process runs evaluation. + """ + + def process(self, inputs, outputs): + from cityscapesscripts.helpers.labels import name2label + + for input, output in zip(inputs, outputs): + file_name = input["file_name"] + basename = os.path.splitext(os.path.basename(file_name))[0] + pred_txt = os.path.join(self._temp_dir, basename + "_pred.txt") + + output = output["instances"].to(self._cpu_device) + num_instances = len(output) + with open(pred_txt, "w") as fout: + for i in range(num_instances): + pred_class = output.pred_classes[i] + classes = self._metadata.thing_classes[pred_class] + class_id = name2label[classes].id + score = output.scores[i] + mask = output.pred_masks[i].numpy().astype("uint8") + png_filename = os.path.join( + self._temp_dir, basename + "_{}_{}.png".format(i, classes) + ) + + Image.fromarray(mask * 255).save(png_filename) + fout.write("{} {} {}\n".format(os.path.basename(png_filename), class_id, score)) + + def evaluate(self): + """ + Returns: + dict: has a key "segm", whose value is a dict of "AP" and "AP50". + """ + comm.synchronize() + if comm.get_rank() > 0: + return + import cityscapesscripts.evaluation.evalInstanceLevelSemanticLabeling as cityscapes_eval + + self._logger.info("Evaluating results under {} ...".format(self._temp_dir)) + + # set some global states in cityscapes evaluation API, before evaluating + cityscapes_eval.args.predictionPath = os.path.abspath(self._temp_dir) + cityscapes_eval.args.predictionWalk = None + cityscapes_eval.args.JSONOutput = False + cityscapes_eval.args.colorized = False + cityscapes_eval.args.gtInstancesFile = os.path.join(self._temp_dir, "gtInstances.json") + + # These lines are adopted from + # https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/evalInstanceLevelSemanticLabeling.py # noqa + gt_dir = PathManager.get_local_path(self._metadata.gt_dir) + groundTruthImgList = glob.glob(os.path.join(gt_dir, "*", "*_gtFine_instanceIds.png")) + assert len( + groundTruthImgList + ), "Cannot find any ground truth images to use for evaluation. Searched for: {}".format( + cityscapes_eval.args.groundTruthSearch + ) + predictionImgList = [] + for gt in groundTruthImgList: + predictionImgList.append(cityscapes_eval.getPrediction(gt, cityscapes_eval.args)) + results = cityscapes_eval.evaluateImgLists( + predictionImgList, groundTruthImgList, cityscapes_eval.args + )["averages"] + + ret = OrderedDict() + ret["segm"] = {"AP": results["allAp"] * 100, "AP50": results["allAp50%"] * 100} + self._working_dir.cleanup() + return ret + + +class CityscapesSemSegEvaluator(CityscapesEvaluator): + """ + Evaluate semantic segmentation results using cityscapes API. + + Note: + * It does not work in multi-machine distributed training. + * It contains a synchronization, therefore has to be used on all ranks. + * Only the main process runs evaluation. + """ + + def process(self, inputs, outputs): + from cityscapesscripts.helpers.labels import trainId2label + + for input, output in zip(inputs, outputs): + file_name = input["file_name"] + basename = os.path.splitext(os.path.basename(file_name))[0] + pred_filename = os.path.join(self._temp_dir, basename + "_pred.png") + + output = output["sem_seg"].argmax(dim=0).to(self._cpu_device).numpy() + pred = 255 * np.ones(output.shape, dtype=np.uint8) + for train_id, label in trainId2label.items(): + if label.ignoreInEval: + continue + pred[output == train_id] = label.id + Image.fromarray(pred).save(pred_filename) + + def evaluate(self): + comm.synchronize() + if comm.get_rank() > 0: + return + # Load the Cityscapes eval script *after* setting the required env var, + # since the script reads CITYSCAPES_DATASET into global variables at load time. + import cityscapesscripts.evaluation.evalPixelLevelSemanticLabeling as cityscapes_eval + + self._logger.info("Evaluating results under {} ...".format(self._temp_dir)) + + # set some global states in cityscapes evaluation API, before evaluating + cityscapes_eval.args.predictionPath = os.path.abspath(self._temp_dir) + cityscapes_eval.args.predictionWalk = None + cityscapes_eval.args.JSONOutput = False + cityscapes_eval.args.colorized = False + + # These lines are adopted from + # https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/evalPixelLevelSemanticLabeling.py # noqa + gt_dir = PathManager.get_local_path(self._metadata.gt_dir) + groundTruthImgList = glob.glob(os.path.join(gt_dir, "*", "*_gtFine_labelIds.png")) + assert len( + groundTruthImgList + ), "Cannot find any ground truth images to use for evaluation. Searched for: {}".format( + cityscapes_eval.args.groundTruthSearch + ) + predictionImgList = [] + for gt in groundTruthImgList: + predictionImgList.append(cityscapes_eval.getPrediction(cityscapes_eval.args, gt)) + results = cityscapes_eval.evaluateImgLists( + predictionImgList, groundTruthImgList, cityscapes_eval.args + ) + ret = OrderedDict() + ret["sem_seg"] = { + "IoU": 100.0 * results["averageScoreClasses"], + "iIoU": 100.0 * results["averageScoreInstClasses"], + "IoU_sup": 100.0 * results["averageScoreCategories"], + "iIoU_sup": 100.0 * results["averageScoreInstCategories"], + } + self._working_dir.cleanup() + return ret diff --git a/preprocess/mhp_extension/detectron2/detectron2/evaluation/coco_evaluation.py b/preprocess/mhp_extension/detectron2/detectron2/evaluation/coco_evaluation.py new file mode 100644 index 0000000000000000000000000000000000000000..64b0903a43187db785113267ed16e82be6f5b28c --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/evaluation/coco_evaluation.py @@ -0,0 +1,512 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import contextlib +import copy +import io +import itertools +import json +import logging +import numpy as np +import os +import pickle +from collections import OrderedDict +import pycocotools.mask as mask_util +import torch +from fvcore.common.file_io import PathManager +from pycocotools.coco import COCO +from pycocotools.cocoeval import COCOeval +from tabulate import tabulate + +import detectron2.utils.comm as comm +from detectron2.data import MetadataCatalog +from detectron2.data.datasets.coco import convert_to_coco_json +from detectron2.structures import Boxes, BoxMode, pairwise_iou +from detectron2.utils.logger import create_small_table + +from .evaluator import DatasetEvaluator + + +class COCOEvaluator(DatasetEvaluator): + """ + Evaluate object proposal, instance detection/segmentation, keypoint detection + outputs using COCO's metrics and APIs. + """ + + def __init__(self, dataset_name, cfg, distributed, output_dir=None): + """ + Args: + dataset_name (str): name of the dataset to be evaluated. + It must have either the following corresponding metadata: + + "json_file": the path to the COCO format annotation + + Or it must be in detectron2's standard dataset format + so it can be converted to COCO format automatically. + cfg (CfgNode): config instance + distributed (True): if True, will collect results from all ranks and run evaluation + in the main process. + Otherwise, will evaluate the results in the current process. + output_dir (str): optional, an output directory to dump all + results predicted on the dataset. The dump contains two files: + + 1. "instance_predictions.pth" a file in torch serialization + format that contains all the raw original predictions. + 2. "coco_instances_results.json" a json file in COCO's result + format. + """ + self._tasks = self._tasks_from_config(cfg) + self._distributed = distributed + self._output_dir = output_dir + + self._cpu_device = torch.device("cpu") + self._logger = logging.getLogger(__name__) + + self._metadata = MetadataCatalog.get(dataset_name) + if not hasattr(self._metadata, "json_file"): + self._logger.warning( + f"json_file was not found in MetaDataCatalog for '{dataset_name}'." + " Trying to convert it to COCO format ..." + ) + + cache_path = os.path.join(output_dir, f"{dataset_name}_coco_format.json") + self._metadata.json_file = cache_path + convert_to_coco_json(dataset_name, cache_path) + + json_file = PathManager.get_local_path(self._metadata.json_file) + with contextlib.redirect_stdout(io.StringIO()): + self._coco_api = COCO(json_file) + + self._kpt_oks_sigmas = cfg.TEST.KEYPOINT_OKS_SIGMAS + # Test set json files do not contain annotations (evaluation must be + # performed using the COCO evaluation server). + self._do_evaluation = "annotations" in self._coco_api.split_name + + def reset(self): + self._predictions = [] + + def _tasks_from_config(self, cfg): + """ + Returns: + tuple[str]: tasks that can be evaluated under the given configuration. + """ + tasks = ("bbox",) + if cfg.MODEL.MASK_ON: + tasks = tasks + ("segm",) + if cfg.MODEL.KEYPOINT_ON: + tasks = tasks + ("keypoints",) + return tasks + + def process(self, inputs, outputs): + """ + Args: + inputs: the inputs to a COCO model (e.g., GeneralizedRCNN). + It is a list of dict. Each dict corresponds to an image and + contains keys like "height", "width", "file_name", "image_id". + outputs: the outputs of a COCO model. It is a list of dicts with key + "instances" that contains :class:`Instances`. + """ + for input, output in zip(inputs, outputs): + prediction = {"image_id": input["image_id"]} + + # TODO this is ugly + if "instances" in output: + instances = output["instances"].to(self._cpu_device) + prediction["instances"] = instances_to_coco_json(instances, input["image_id"]) + if "proposals" in output: + prediction["proposals"] = output["proposals"].to(self._cpu_device) + self._predictions.append(prediction) + + def evaluate(self): + if self._distributed: + comm.synchronize() + predictions = comm.gather(self._predictions, dst=0) + predictions = list(itertools.chain(*predictions)) + + if not comm.is_main_process(): + return {} + else: + predictions = self._predictions + + if len(predictions) == 0: + self._logger.warning("[COCOEvaluator] Did not receive valid predictions.") + return {} + + if self._output_dir: + PathManager.mkdirs(self._output_dir) + file_path = os.path.join(self._output_dir, "instances_predictions.pth") + with PathManager.open(file_path, "wb") as f: + torch.save(predictions, f) + + self._results = OrderedDict() + if "proposals" in predictions[0]: + self._eval_box_proposals(predictions) + if "instances" in predictions[0]: + self._eval_predictions(set(self._tasks), predictions) + # Copy so the caller can do whatever with results + return copy.deepcopy(self._results) + + def _eval_predictions(self, tasks, predictions): + """ + Evaluate predictions on the given tasks. + Fill self._results with the metrics of the tasks. + """ + self._logger.info("Preparing results for COCO format ...") + coco_results = list(itertools.chain(*[x["instances"] for x in predictions])) + + # unmap the category ids for COCO + if hasattr(self._metadata, "thing_dataset_id_to_contiguous_id"): + reverse_id_mapping = { + v: k for k, v in self._metadata.thing_dataset_id_to_contiguous_id.items() + } + for result in coco_results: + category_id = result["category_id"] + assert ( + category_id in reverse_id_mapping + ), "A prediction has category_id={}, which is not available in the dataset.".format( + category_id + ) + result["category_id"] = reverse_id_mapping[category_id] + + if self._output_dir: + file_path = os.path.join(self._output_dir, "coco_instances_results.json") + self._logger.info("Saving results to {}".format(file_path)) + with PathManager.open(file_path, "w") as f: + f.write(json.dumps(coco_results)) + f.flush() + + if not self._do_evaluation: + self._logger.info("Annotations are not available for evaluation.") + return + + self._logger.info("Evaluating predictions ...") + for task in sorted(tasks): + coco_eval = ( + _evaluate_predictions_on_coco( + self._coco_api, coco_results, task, kpt_oks_sigmas=self._kpt_oks_sigmas + ) + if len(coco_results) > 0 + else None # cocoapi does not handle empty results very well + ) + + res = self._derive_coco_results( + coco_eval, task, class_names=self._metadata.get("thing_classes") + ) + self._results[task] = res + + def _eval_box_proposals(self, predictions): + """ + Evaluate the box proposals in predictions. + Fill self._results with the metrics for "box_proposals" task. + """ + if self._output_dir: + # Saving generated box proposals to file. + # Predicted box_proposals are in XYXY_ABS mode. + bbox_mode = BoxMode.XYXY_ABS.value + ids, boxes, objectness_logits = [], [], [] + for prediction in predictions: + ids.append(prediction["image_id"]) + boxes.append(prediction["proposals"].proposal_boxes.tensor.numpy()) + objectness_logits.append(prediction["proposals"].objectness_logits.numpy()) + + proposal_data = { + "boxes": boxes, + "objectness_logits": objectness_logits, + "ids": ids, + "bbox_mode": bbox_mode, + } + with PathManager.open(os.path.join(self._output_dir, "box_proposals.pkl"), "wb") as f: + pickle.dump(proposal_data, f) + + if not self._do_evaluation: + self._logger.info("Annotations are not available for evaluation.") + return + + self._logger.info("Evaluating bbox proposals ...") + res = {} + areas = {"all": "", "small": "s", "medium": "m", "large": "l"} + for limit in [100, 1000]: + for area, suffix in areas.items(): + stats = _evaluate_box_proposals(predictions, self._coco_api, area=area, limit=limit) + key = "AR{}@{:d}".format(suffix, limit) + res[key] = float(stats["ar"].item() * 100) + self._logger.info("Proposal metrics: \n" + create_small_table(res)) + self._results["box_proposals"] = res + + def _derive_coco_results(self, coco_eval, iou_type, class_names=None): + """ + Derive the desired score numbers from summarized COCOeval. + + Args: + coco_eval (None or COCOEval): None represents no predictions from model. + iou_type (str): + class_names (None or list[str]): if provided, will use it to predict + per-category AP. + + Returns: + a dict of {metric name: score} + """ + + metrics = { + "bbox": ["AP", "AP50", "AP75", "APs", "APm", "APl"], + "segm": ["AP", "AP50", "AP75", "APs", "APm", "APl"], + "keypoints": ["AP", "AP50", "AP75", "APm", "APl"], + }[iou_type] + + if coco_eval is None: + self._logger.warn("No predictions from the model!") + return {metric: float("nan") for metric in metrics} + + # the standard metrics + results = { + metric: float(coco_eval.stats[idx] * 100 if coco_eval.stats[idx] >= 0 else "nan") + for idx, metric in enumerate(metrics) + } + self._logger.info( + "Evaluation results for {}: \n".format(iou_type) + create_small_table(results) + ) + if not np.isfinite(sum(results.values())): + self._logger.info("Note that some metrics cannot be computed.") + + if class_names is None or len(class_names) <= 1: + return results + # Compute per-category AP + # from https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L222-L252 # noqa + precisions = coco_eval.eval["precision"] + # precision has dims (iou, recall, cls, area range, max dets) + assert len(class_names) == precisions.shape[2] + + results_per_category = [] + for idx, name in enumerate(class_names): + # area range index 0: all area ranges + # max dets index -1: typically 100 per image + precision = precisions[:, :, idx, 0, -1] + precision = precision[precision > -1] + ap = np.mean(precision) if precision.size else float("nan") + results_per_category.append(("{}".format(name), float(ap * 100))) + + # tabulate it + N_COLS = min(6, len(results_per_category) * 2) + results_flatten = list(itertools.chain(*results_per_category)) + results_2d = itertools.zip_longest(*[results_flatten[i::N_COLS] for i in range(N_COLS)]) + table = tabulate( + results_2d, + tablefmt="pipe", + floatfmt=".3f", + headers=["category", "AP"] * (N_COLS // 2), + numalign="left", + ) + self._logger.info("Per-category {} AP: \n".format(iou_type) + table) + + results.update({"AP-" + name: ap for name, ap in results_per_category}) + return results + + +def instances_to_coco_json(instances, img_id): + """ + Dump an "Instances" object to a COCO-format json that's used for evaluation. + + Args: + instances (Instances): + img_id (int): the image id + + Returns: + list[dict]: list of json annotations in COCO format. + """ + num_instance = len(instances) + if num_instance == 0: + return [] + + boxes = instances.pred_boxes.tensor.numpy() + boxes = BoxMode.convert(boxes, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS) + boxes = boxes.tolist() + scores = instances.scores.tolist() + classes = instances.pred_classes.tolist() + + has_mask = instances.has("pred_masks") + if has_mask: + # use RLE to encode the masks, because they are too large and takes memory + # since this evaluator stores outputs of the entire dataset + rles = [ + mask_util.encode(np.array(mask[:, :, None], order="F", dtype="uint8"))[0] + for mask in instances.pred_masks + ] + for rle in rles: + # "counts" is an array encoded by mask_util as a byte-stream. Python3's + # json writer which always produces strings cannot serialize a bytestream + # unless you decode it. Thankfully, utf-8 works out (which is also what + # the pycocotools/_mask.pyx does). + rle["counts"] = rle["counts"].decode("utf-8") + + has_keypoints = instances.has("pred_keypoints") + if has_keypoints: + keypoints = instances.pred_keypoints + + results = [] + for k in range(num_instance): + result = { + "image_id": img_id, + "category_id": classes[k], + "bbox": boxes[k], + "score": scores[k], + } + if has_mask: + result["segmentation"] = rles[k] + if has_keypoints: + # In COCO annotations, + # keypoints coordinates are pixel indices. + # However our predictions are floating point coordinates. + # Therefore we subtract 0.5 to be consistent with the annotation format. + # This is the inverse of data loading logic in `data/coco.py`. + keypoints[k][:, :2] -= 0.5 + result["keypoints"] = keypoints[k].flatten().tolist() + results.append(result) + return results + + +# inspired from Detectron: +# https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L255 # noqa +def _evaluate_box_proposals(dataset_predictions, coco_api, thresholds=None, area="all", limit=None): + """ + Evaluate detection proposal recall metrics. This function is a much + faster alternative to the official COCO API recall evaluation code. However, + it produces slightly different results. + """ + # Record max overlap value for each gt box + # Return vector of overlap values + areas = { + "all": 0, + "small": 1, + "medium": 2, + "large": 3, + "96-128": 4, + "128-256": 5, + "256-512": 6, + "512-inf": 7, + } + area_ranges = [ + [0 ** 2, 1e5 ** 2], # all + [0 ** 2, 32 ** 2], # small + [32 ** 2, 96 ** 2], # medium + [96 ** 2, 1e5 ** 2], # large + [96 ** 2, 128 ** 2], # 96-128 + [128 ** 2, 256 ** 2], # 128-256 + [256 ** 2, 512 ** 2], # 256-512 + [512 ** 2, 1e5 ** 2], + ] # 512-inf + assert area in areas, "Unknown area range: {}".format(area) + area_range = area_ranges[areas[area]] + gt_overlaps = [] + num_pos = 0 + + for prediction_dict in dataset_predictions: + predictions = prediction_dict["proposals"] + + # sort predictions in descending order + # TODO maybe remove this and make it explicit in the documentation + inds = predictions.objectness_logits.sort(descending=True)[1] + predictions = predictions[inds] + + ann_ids = coco_api.getAnnIds(imgIds=prediction_dict["image_id"]) + anno = coco_api.loadAnns(ann_ids) + gt_boxes = [ + BoxMode.convert(obj["bbox"], BoxMode.XYWH_ABS, BoxMode.XYXY_ABS) + for obj in anno + if obj["iscrowd"] == 0 + ] + gt_boxes = torch.as_tensor(gt_boxes).reshape(-1, 4) # guard against no boxes + gt_boxes = Boxes(gt_boxes) + gt_areas = torch.as_tensor([obj["area"] for obj in anno if obj["iscrowd"] == 0]) + + if len(gt_boxes) == 0 or len(predictions) == 0: + continue + + valid_gt_inds = (gt_areas >= area_range[0]) & (gt_areas <= area_range[1]) + gt_boxes = gt_boxes[valid_gt_inds] + + num_pos += len(gt_boxes) + + if len(gt_boxes) == 0: + continue + + if limit is not None and len(predictions) > limit: + predictions = predictions[:limit] + + overlaps = pairwise_iou(predictions.proposal_boxes, gt_boxes) + + _gt_overlaps = torch.zeros(len(gt_boxes)) + for j in range(min(len(predictions), len(gt_boxes))): + # find which proposal box maximally covers each gt box + # and get the iou amount of coverage for each gt box + max_overlaps, argmax_overlaps = overlaps.max(dim=0) + + # find which gt box is 'best' covered (i.e. 'best' = most iou) + gt_ovr, gt_ind = max_overlaps.max(dim=0) + assert gt_ovr >= 0 + # find the proposal box that covers the best covered gt box + box_ind = argmax_overlaps[gt_ind] + # record the iou coverage of this gt box + _gt_overlaps[j] = overlaps[box_ind, gt_ind] + assert _gt_overlaps[j] == gt_ovr + # mark the proposal box and the gt box as used + overlaps[box_ind, :] = -1 + overlaps[:, gt_ind] = -1 + + # append recorded iou coverage level + gt_overlaps.append(_gt_overlaps) + gt_overlaps = ( + torch.cat(gt_overlaps, dim=0) if len(gt_overlaps) else torch.zeros(0, dtype=torch.float32) + ) + gt_overlaps, _ = torch.sort(gt_overlaps) + + if thresholds is None: + step = 0.05 + thresholds = torch.arange(0.5, 0.95 + 1e-5, step, dtype=torch.float32) + recalls = torch.zeros_like(thresholds) + # compute recall for each iou threshold + for i, t in enumerate(thresholds): + recalls[i] = (gt_overlaps >= t).float().sum() / float(num_pos) + # ar = 2 * np.trapz(recalls, thresholds) + ar = recalls.mean() + return { + "ar": ar, + "recalls": recalls, + "thresholds": thresholds, + "gt_overlaps": gt_overlaps, + "num_pos": num_pos, + } + + +def _evaluate_predictions_on_coco(coco_gt, coco_results, iou_type, kpt_oks_sigmas=None): + """ + Evaluate the coco results using COCOEval API. + """ + assert len(coco_results) > 0 + + if iou_type == "segm": + coco_results = copy.deepcopy(coco_results) + # When evaluating mask AP, if the results contain bbox, cocoapi will + # use the box area as the area of the instance, instead of the mask area. + # This leads to a different definition of small/medium/large. + # We remove the bbox field to let mask AP use mask area. + for c in coco_results: + c.pop("bbox", None) + + coco_dt = coco_gt.loadRes(coco_results) + coco_eval = COCOeval(coco_gt, coco_dt, iou_type) + # Use the COCO default keypoint OKS sigmas unless overrides are specified + if kpt_oks_sigmas: + coco_eval.params.kpt_oks_sigmas = np.array(kpt_oks_sigmas) + + if iou_type == "keypoints": + num_keypoints = len(coco_results[0]["keypoints"]) // 3 + assert len(coco_eval.params.kpt_oks_sigmas) == num_keypoints, ( + "[COCOEvaluator] The length of cfg.TEST.KEYPOINT_OKS_SIGMAS (default: 17) " + "must be equal to the number of keypoints. However the prediction has {} " + "keypoints! For more information please refer to " + "http://cocodataset.org/#keypoints-eval.".format(num_keypoints) + ) + + coco_eval.evaluate() + coco_eval.accumulate() + coco_eval.summarize() + + return coco_eval diff --git a/preprocess/mhp_extension/detectron2/detectron2/evaluation/evaluator.py b/preprocess/mhp_extension/detectron2/detectron2/evaluation/evaluator.py new file mode 100644 index 0000000000000000000000000000000000000000..dcb98043a1ededb3925d0ecbba3914d6409dc022 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/evaluation/evaluator.py @@ -0,0 +1,196 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import datetime +import logging +import time +from collections import OrderedDict +from contextlib import contextmanager +import torch + +from detectron2.utils.comm import get_world_size, is_main_process +from detectron2.utils.logger import log_every_n_seconds + + +class DatasetEvaluator: + """ + Base class for a dataset evaluator. + + The function :func:`inference_on_dataset` runs the model over + all samples in the dataset, and have a DatasetEvaluator to process the inputs/outputs. + + This class will accumulate information of the inputs/outputs (by :meth:`process`), + and produce evaluation results in the end (by :meth:`evaluate`). + """ + + def reset(self): + """ + Preparation for a new round of evaluation. + Should be called before starting a round of evaluation. + """ + pass + + def process(self, inputs, outputs): + """ + Process the pair of inputs and outputs. + If they contain batches, the pairs can be consumed one-by-one using `zip`: + + .. code-block:: python + + for input_, output in zip(inputs, outputs): + # do evaluation on single input/output pair + ... + + Args: + inputs (list): the inputs that's used to call the model. + outputs (list): the return value of `model(inputs)` + """ + pass + + def evaluate(self): + """ + Evaluate/summarize the performance, after processing all input/output pairs. + + Returns: + dict: + A new evaluator class can return a dict of arbitrary format + as long as the user can process the results. + In our train_net.py, we expect the following format: + + * key: the name of the task (e.g., bbox) + * value: a dict of {metric name: score}, e.g.: {"AP50": 80} + """ + pass + + +class DatasetEvaluators(DatasetEvaluator): + """ + Wrapper class to combine multiple :class:`DatasetEvaluator` instances. + + This class dispatches every evaluation call to + all of its :class:`DatasetEvaluator`. + """ + + def __init__(self, evaluators): + """ + Args: + evaluators (list): the evaluators to combine. + """ + super().__init__() + self._evaluators = evaluators + + def reset(self): + for evaluator in self._evaluators: + evaluator.reset() + + def process(self, inputs, outputs): + for evaluator in self._evaluators: + evaluator.process(inputs, outputs) + + def evaluate(self): + results = OrderedDict() + for evaluator in self._evaluators: + result = evaluator.evaluate() + if is_main_process() and result is not None: + for k, v in result.items(): + assert ( + k not in results + ), "Different evaluators produce results with the same key {}".format(k) + results[k] = v + return results + + +def inference_on_dataset(model, data_loader, evaluator): + """ + Run model on the data_loader and evaluate the metrics with evaluator. + Also benchmark the inference speed of `model.forward` accurately. + The model will be used in eval mode. + + Args: + model (nn.Module): a module which accepts an object from + `data_loader` and returns some outputs. It will be temporarily set to `eval` mode. + + If you wish to evaluate a model in `training` mode instead, you can + wrap the given model and override its behavior of `.eval()` and `.train()`. + data_loader: an iterable object with a length. + The elements it generates will be the inputs to the model. + evaluator (DatasetEvaluator): the evaluator to run. Use `None` if you only want + to benchmark, but don't want to do any evaluation. + + Returns: + The return value of `evaluator.evaluate()` + """ + num_devices = get_world_size() + logger = logging.getLogger(__name__) + logger.info("Start inference on {} images".format(len(data_loader))) + + total = len(data_loader) # inference data loader must have a fixed length + if evaluator is None: + # create a no-op evaluator + evaluator = DatasetEvaluators([]) + evaluator.reset() + + num_warmup = min(5, total - 1) + start_time = time.perf_counter() + total_compute_time = 0 + with inference_context(model), torch.no_grad(): + for idx, inputs in enumerate(data_loader): + if idx == num_warmup: + start_time = time.perf_counter() + total_compute_time = 0 + + start_compute_time = time.perf_counter() + outputs = model(inputs) + if torch.cuda.is_available(): + torch.cuda.synchronize() + total_compute_time += time.perf_counter() - start_compute_time + evaluator.process(inputs, outputs) + + iters_after_start = idx + 1 - num_warmup * int(idx >= num_warmup) + seconds_per_img = total_compute_time / iters_after_start + if idx >= num_warmup * 2 or seconds_per_img > 5: + total_seconds_per_img = (time.perf_counter() - start_time) / iters_after_start + eta = datetime.timedelta(seconds=int(total_seconds_per_img * (total - idx - 1))) + log_every_n_seconds( + logging.INFO, + "Inference done {}/{}. {:.4f} s / demo. ETA={}".format( + idx + 1, total, seconds_per_img, str(eta) + ), + n=5, + ) + + # Measure the time only for this worker (before the synchronization barrier) + total_time = time.perf_counter() - start_time + total_time_str = str(datetime.timedelta(seconds=total_time)) + # NOTE this format is parsed by grep + logger.info( + "Total inference time: {} ({:.6f} s / demo per device, on {} devices)".format( + total_time_str, total_time / (total - num_warmup), num_devices + ) + ) + total_compute_time_str = str(datetime.timedelta(seconds=int(total_compute_time))) + logger.info( + "Total inference pure compute time: {} ({:.6f} s / demo per device, on {} devices)".format( + total_compute_time_str, total_compute_time / (total - num_warmup), num_devices + ) + ) + + results = evaluator.evaluate() + # An evaluator may return None when not in main process. + # Replace it by an empty dict instead to make it easier for downstream code to handle + if results is None: + results = {} + return results + + +@contextmanager +def inference_context(model): + """ + A context where the model is temporarily changed to eval mode, + and restored to previous mode afterwards. + + Args: + model: a torch Module + """ + training_mode = model.training + model.eval() + yield + model.train(training_mode) diff --git a/preprocess/mhp_extension/detectron2/detectron2/evaluation/lvis_evaluation.py b/preprocess/mhp_extension/detectron2/detectron2/evaluation/lvis_evaluation.py new file mode 100644 index 0000000000000000000000000000000000000000..e55f50fb9d1fa7ccb685f812b603c10f9a1ffea0 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/evaluation/lvis_evaluation.py @@ -0,0 +1,350 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import copy +import itertools +import json +import logging +import os +import pickle +from collections import OrderedDict +import torch +from fvcore.common.file_io import PathManager + +import detectron2.utils.comm as comm +from detectron2.data import MetadataCatalog +from detectron2.structures import Boxes, BoxMode, pairwise_iou +from detectron2.utils.logger import create_small_table + +from .coco_evaluation import instances_to_coco_json +from .evaluator import DatasetEvaluator + + +class LVISEvaluator(DatasetEvaluator): + """ + Evaluate object proposal and instance detection/segmentation outputs using + LVIS's metrics and evaluation API. + """ + + def __init__(self, dataset_name, cfg, distributed, output_dir=None): + """ + Args: + dataset_name (str): name of the dataset to be evaluated. + It must have the following corresponding metadata: + "json_file": the path to the LVIS format annotation + cfg (CfgNode): config instance + distributed (True): if True, will collect results from all ranks for evaluation. + Otherwise, will evaluate the results in the current process. + output_dir (str): optional, an output directory to dump results. + """ + from lvis import LVIS + + self._tasks = self._tasks_from_config(cfg) + self._distributed = distributed + self._output_dir = output_dir + + self._cpu_device = torch.device("cpu") + self._logger = logging.getLogger(__name__) + + self._metadata = MetadataCatalog.get(dataset_name) + json_file = PathManager.get_local_path(self._metadata.json_file) + self._lvis_api = LVIS(json_file) + # Test set json files do not contain annotations (evaluation must be + # performed using the LVIS evaluation server). + self._do_evaluation = len(self._lvis_api.get_ann_ids()) > 0 + + def reset(self): + self._predictions = [] + + def _tasks_from_config(self, cfg): + """ + Returns: + tuple[str]: tasks that can be evaluated under the given configuration. + """ + tasks = ("bbox",) + if cfg.MODEL.MASK_ON: + tasks = tasks + ("segm",) + return tasks + + def process(self, inputs, outputs): + """ + Args: + inputs: the inputs to a LVIS model (e.g., GeneralizedRCNN). + It is a list of dict. Each dict corresponds to an image and + contains keys like "height", "width", "file_name", "image_id". + outputs: the outputs of a LVIS model. It is a list of dicts with key + "instances" that contains :class:`Instances`. + """ + for input, output in zip(inputs, outputs): + prediction = {"image_id": input["image_id"]} + + if "instances" in output: + instances = output["instances"].to(self._cpu_device) + prediction["instances"] = instances_to_coco_json(instances, input["image_id"]) + if "proposals" in output: + prediction["proposals"] = output["proposals"].to(self._cpu_device) + self._predictions.append(prediction) + + def evaluate(self): + if self._distributed: + comm.synchronize() + predictions = comm.gather(self._predictions, dst=0) + predictions = list(itertools.chain(*predictions)) + + if not comm.is_main_process(): + return + else: + predictions = self._predictions + + if len(predictions) == 0: + self._logger.warning("[LVISEvaluator] Did not receive valid predictions.") + return {} + + if self._output_dir: + PathManager.mkdirs(self._output_dir) + file_path = os.path.join(self._output_dir, "instances_predictions.pth") + with PathManager.open(file_path, "wb") as f: + torch.save(predictions, f) + + self._results = OrderedDict() + if "proposals" in predictions[0]: + self._eval_box_proposals(predictions) + if "instances" in predictions[0]: + self._eval_predictions(set(self._tasks), predictions) + # Copy so the caller can do whatever with results + return copy.deepcopy(self._results) + + def _eval_predictions(self, tasks, predictions): + """ + Evaluate predictions on the given tasks. + Fill self._results with the metrics of the tasks. + + Args: + predictions (list[dict]): list of outputs from the model + """ + self._logger.info("Preparing results in the LVIS format ...") + lvis_results = list(itertools.chain(*[x["instances"] for x in predictions])) + + # LVIS evaluator can be used to evaluate results for COCO dataset categories. + # In this case `_metadata` variable will have a field with COCO-specific category mapping. + if hasattr(self._metadata, "thing_dataset_id_to_contiguous_id"): + reverse_id_mapping = { + v: k for k, v in self._metadata.thing_dataset_id_to_contiguous_id.items() + } + for result in lvis_results: + result["category_id"] = reverse_id_mapping[result["category_id"]] + else: + # unmap the category ids for LVIS (from 0-indexed to 1-indexed) + for result in lvis_results: + result["category_id"] += 1 + + if self._output_dir: + file_path = os.path.join(self._output_dir, "lvis_instances_results.json") + self._logger.info("Saving results to {}".format(file_path)) + with PathManager.open(file_path, "w") as f: + f.write(json.dumps(lvis_results)) + f.flush() + + if not self._do_evaluation: + self._logger.info("Annotations are not available for evaluation.") + return + + self._logger.info("Evaluating predictions ...") + for task in sorted(tasks): + res = _evaluate_predictions_on_lvis( + self._lvis_api, lvis_results, task, class_names=self._metadata.get("thing_classes") + ) + self._results[task] = res + + def _eval_box_proposals(self, predictions): + """ + Evaluate the box proposals in predictions. + Fill self._results with the metrics for "box_proposals" task. + """ + if self._output_dir: + # Saving generated box proposals to file. + # Predicted box_proposals are in XYXY_ABS mode. + bbox_mode = BoxMode.XYXY_ABS.value + ids, boxes, objectness_logits = [], [], [] + for prediction in predictions: + ids.append(prediction["image_id"]) + boxes.append(prediction["proposals"].proposal_boxes.tensor.numpy()) + objectness_logits.append(prediction["proposals"].objectness_logits.numpy()) + + proposal_data = { + "boxes": boxes, + "objectness_logits": objectness_logits, + "ids": ids, + "bbox_mode": bbox_mode, + } + with PathManager.open(os.path.join(self._output_dir, "box_proposals.pkl"), "wb") as f: + pickle.dump(proposal_data, f) + + if not self._do_evaluation: + self._logger.info("Annotations are not available for evaluation.") + return + + self._logger.info("Evaluating bbox proposals ...") + res = {} + areas = {"all": "", "small": "s", "medium": "m", "large": "l"} + for limit in [100, 1000]: + for area, suffix in areas.items(): + stats = _evaluate_box_proposals(predictions, self._lvis_api, area=area, limit=limit) + key = "AR{}@{:d}".format(suffix, limit) + res[key] = float(stats["ar"].item() * 100) + self._logger.info("Proposal metrics: \n" + create_small_table(res)) + self._results["box_proposals"] = res + + +# inspired from Detectron: +# https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L255 # noqa +def _evaluate_box_proposals(dataset_predictions, lvis_api, thresholds=None, area="all", limit=None): + """ + Evaluate detection proposal recall metrics. This function is a much + faster alternative to the official LVIS API recall evaluation code. However, + it produces slightly different results. + """ + # Record max overlap value for each gt box + # Return vector of overlap values + areas = { + "all": 0, + "small": 1, + "medium": 2, + "large": 3, + "96-128": 4, + "128-256": 5, + "256-512": 6, + "512-inf": 7, + } + area_ranges = [ + [0 ** 2, 1e5 ** 2], # all + [0 ** 2, 32 ** 2], # small + [32 ** 2, 96 ** 2], # medium + [96 ** 2, 1e5 ** 2], # large + [96 ** 2, 128 ** 2], # 96-128 + [128 ** 2, 256 ** 2], # 128-256 + [256 ** 2, 512 ** 2], # 256-512 + [512 ** 2, 1e5 ** 2], + ] # 512-inf + assert area in areas, "Unknown area range: {}".format(area) + area_range = area_ranges[areas[area]] + gt_overlaps = [] + num_pos = 0 + + for prediction_dict in dataset_predictions: + predictions = prediction_dict["proposals"] + + # sort predictions in descending order + # TODO maybe remove this and make it explicit in the documentation + inds = predictions.objectness_logits.sort(descending=True)[1] + predictions = predictions[inds] + + ann_ids = lvis_api.get_ann_ids(img_ids=[prediction_dict["image_id"]]) + anno = lvis_api.load_anns(ann_ids) + gt_boxes = [ + BoxMode.convert(obj["bbox"], BoxMode.XYWH_ABS, BoxMode.XYXY_ABS) for obj in anno + ] + gt_boxes = torch.as_tensor(gt_boxes).reshape(-1, 4) # guard against no boxes + gt_boxes = Boxes(gt_boxes) + gt_areas = torch.as_tensor([obj["area"] for obj in anno]) + + if len(gt_boxes) == 0 or len(predictions) == 0: + continue + + valid_gt_inds = (gt_areas >= area_range[0]) & (gt_areas <= area_range[1]) + gt_boxes = gt_boxes[valid_gt_inds] + + num_pos += len(gt_boxes) + + if len(gt_boxes) == 0: + continue + + if limit is not None and len(predictions) > limit: + predictions = predictions[:limit] + + overlaps = pairwise_iou(predictions.proposal_boxes, gt_boxes) + + _gt_overlaps = torch.zeros(len(gt_boxes)) + for j in range(min(len(predictions), len(gt_boxes))): + # find which proposal box maximally covers each gt box + # and get the iou amount of coverage for each gt box + max_overlaps, argmax_overlaps = overlaps.max(dim=0) + + # find which gt box is 'best' covered (i.e. 'best' = most iou) + gt_ovr, gt_ind = max_overlaps.max(dim=0) + assert gt_ovr >= 0 + # find the proposal box that covers the best covered gt box + box_ind = argmax_overlaps[gt_ind] + # record the iou coverage of this gt box + _gt_overlaps[j] = overlaps[box_ind, gt_ind] + assert _gt_overlaps[j] == gt_ovr + # mark the proposal box and the gt box as used + overlaps[box_ind, :] = -1 + overlaps[:, gt_ind] = -1 + + # append recorded iou coverage level + gt_overlaps.append(_gt_overlaps) + gt_overlaps = ( + torch.cat(gt_overlaps, dim=0) if len(gt_overlaps) else torch.zeros(0, dtype=torch.float32) + ) + gt_overlaps, _ = torch.sort(gt_overlaps) + + if thresholds is None: + step = 0.05 + thresholds = torch.arange(0.5, 0.95 + 1e-5, step, dtype=torch.float32) + recalls = torch.zeros_like(thresholds) + # compute recall for each iou threshold + for i, t in enumerate(thresholds): + recalls[i] = (gt_overlaps >= t).float().sum() / float(num_pos) + # ar = 2 * np.trapz(recalls, thresholds) + ar = recalls.mean() + return { + "ar": ar, + "recalls": recalls, + "thresholds": thresholds, + "gt_overlaps": gt_overlaps, + "num_pos": num_pos, + } + + +def _evaluate_predictions_on_lvis(lvis_gt, lvis_results, iou_type, class_names=None): + """ + Args: + iou_type (str): + kpt_oks_sigmas (list[float]): + class_names (None or list[str]): if provided, will use it to predict + per-category AP. + + Returns: + a dict of {metric name: score} + """ + metrics = { + "bbox": ["AP", "AP50", "AP75", "APs", "APm", "APl", "APr", "APc", "APf"], + "segm": ["AP", "AP50", "AP75", "APs", "APm", "APl", "APr", "APc", "APf"], + }[iou_type] + + logger = logging.getLogger(__name__) + + if len(lvis_results) == 0: # TODO: check if needed + logger.warn("No predictions from the model!") + return {metric: float("nan") for metric in metrics} + + if iou_type == "segm": + lvis_results = copy.deepcopy(lvis_results) + # When evaluating mask AP, if the results contain bbox, LVIS API will + # use the box area as the area of the instance, instead of the mask area. + # This leads to a different definition of small/medium/large. + # We remove the bbox field to let mask AP use mask area. + for c in lvis_results: + c.pop("bbox", None) + + from lvis import LVISEval, LVISResults + + lvis_results = LVISResults(lvis_gt, lvis_results) + lvis_eval = LVISEval(lvis_gt, lvis_results, iou_type) + lvis_eval.run() + lvis_eval.print_results() + + # Pull the standard metrics from the LVIS results + results = lvis_eval.get_results() + results = {metric: float(results[metric] * 100) for metric in metrics} + logger.info("Evaluation results for {}: \n".format(iou_type) + create_small_table(results)) + return results diff --git a/preprocess/mhp_extension/detectron2/detectron2/evaluation/panoptic_evaluation.py b/preprocess/mhp_extension/detectron2/detectron2/evaluation/panoptic_evaluation.py new file mode 100644 index 0000000000000000000000000000000000000000..fb5e7ab87b1dd5bb3e0c5d1e405e321c48d9e6a0 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/evaluation/panoptic_evaluation.py @@ -0,0 +1,167 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import contextlib +import io +import itertools +import json +import logging +import os +import tempfile +from collections import OrderedDict +from fvcore.common.file_io import PathManager +from PIL import Image +from tabulate import tabulate + +from detectron2.data import MetadataCatalog +from detectron2.utils import comm + +from .evaluator import DatasetEvaluator + +logger = logging.getLogger(__name__) + + +class COCOPanopticEvaluator(DatasetEvaluator): + """ + Evaluate Panoptic Quality metrics on COCO using PanopticAPI. + It saves panoptic segmentation prediction in `output_dir` + + It contains a synchronize call and has to be called from all workers. + """ + + def __init__(self, dataset_name, output_dir): + """ + Args: + dataset_name (str): name of the dataset + output_dir (str): output directory to save results for evaluation + """ + self._metadata = MetadataCatalog.get(dataset_name) + self._thing_contiguous_id_to_dataset_id = { + v: k for k, v in self._metadata.thing_dataset_id_to_contiguous_id.items() + } + self._stuff_contiguous_id_to_dataset_id = { + v: k for k, v in self._metadata.stuff_dataset_id_to_contiguous_id.items() + } + + self._predictions_json = os.path.join(output_dir, "predictions.json") + + def reset(self): + self._predictions = [] + + def _convert_category_id(self, segment_info): + isthing = segment_info.pop("isthing", None) + if isthing is None: + # the model produces panoptic category id directly. No more conversion needed + return segment_info + if isthing is True: + segment_info["category_id"] = self._thing_contiguous_id_to_dataset_id[ + segment_info["category_id"] + ] + else: + segment_info["category_id"] = self._stuff_contiguous_id_to_dataset_id[ + segment_info["category_id"] + ] + return segment_info + + def process(self, inputs, outputs): + from panopticapi.utils import id2rgb + + for input, output in zip(inputs, outputs): + panoptic_img, segments_info = output["panoptic_seg"] + panoptic_img = panoptic_img.cpu().numpy() + + file_name = os.path.basename(input["file_name"]) + file_name_png = os.path.splitext(file_name)[0] + ".png" + with io.BytesIO() as out: + Image.fromarray(id2rgb(panoptic_img)).save(out, format="PNG") + segments_info = [self._convert_category_id(x) for x in segments_info] + self._predictions.append( + { + "image_id": input["image_id"], + "file_name": file_name_png, + "png_string": out.getvalue(), + "segments_info": segments_info, + } + ) + + def evaluate(self): + comm.synchronize() + + self._predictions = comm.gather(self._predictions) + self._predictions = list(itertools.chain(*self._predictions)) + if not comm.is_main_process(): + return + + # PanopticApi requires local files + gt_json = PathManager.get_local_path(self._metadata.panoptic_json) + gt_folder = PathManager.get_local_path(self._metadata.panoptic_root) + + with tempfile.TemporaryDirectory(prefix="panoptic_eval") as pred_dir: + logger.info("Writing all panoptic predictions to {} ...".format(pred_dir)) + for p in self._predictions: + with open(os.path.join(pred_dir, p["file_name"]), "wb") as f: + f.write(p.pop("png_string")) + + with open(gt_json, "r") as f: + json_data = json.load(f) + json_data["annotations"] = self._predictions + with PathManager.open(self._predictions_json, "w") as f: + f.write(json.dumps(json_data)) + + from panopticapi.evaluation import pq_compute + + with contextlib.redirect_stdout(io.StringIO()): + pq_res = pq_compute( + gt_json, + PathManager.get_local_path(self._predictions_json), + gt_folder=gt_folder, + pred_folder=pred_dir, + ) + + res = {} + res["PQ"] = 100 * pq_res["All"]["pq"] + res["SQ"] = 100 * pq_res["All"]["sq"] + res["RQ"] = 100 * pq_res["All"]["rq"] + res["PQ_th"] = 100 * pq_res["Things"]["pq"] + res["SQ_th"] = 100 * pq_res["Things"]["sq"] + res["RQ_th"] = 100 * pq_res["Things"]["rq"] + res["PQ_st"] = 100 * pq_res["Stuff"]["pq"] + res["SQ_st"] = 100 * pq_res["Stuff"]["sq"] + res["RQ_st"] = 100 * pq_res["Stuff"]["rq"] + + results = OrderedDict({"panoptic_seg": res}) + _print_panoptic_results(pq_res) + + return results + + +def _print_panoptic_results(pq_res): + headers = ["", "PQ", "SQ", "RQ", "#categories"] + data = [] + for name in ["All", "Things", "Stuff"]: + row = [name] + [pq_res[name][k] * 100 for k in ["pq", "sq", "rq"]] + [pq_res[name]["n"]] + data.append(row) + table = tabulate( + data, headers=headers, tablefmt="pipe", floatfmt=".3f", stralign="center", numalign="center" + ) + logger.info("Panoptic Evaluation Results:\n" + table) + + +if __name__ == "__main__": + from detectron2.utils.logger import setup_logger + + logger = setup_logger() + import argparse + + parser = argparse.ArgumentParser() + parser.add_argument("--gt-json") + parser.add_argument("--gt-dir") + parser.add_argument("--pred-json") + parser.add_argument("--pred-dir") + args = parser.parse_args() + + from panopticapi.evaluation import pq_compute + + with contextlib.redirect_stdout(io.StringIO()): + pq_res = pq_compute( + args.gt_json, args.pred_json, gt_folder=args.gt_dir, pred_folder=args.pred_dir + ) + _print_panoptic_results(pq_res) diff --git a/preprocess/mhp_extension/detectron2/detectron2/evaluation/pascal_voc_evaluation.py b/preprocess/mhp_extension/detectron2/detectron2/evaluation/pascal_voc_evaluation.py new file mode 100644 index 0000000000000000000000000000000000000000..22d2e523d23c695e06e5da5cb3a210a6d1945dfb --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/evaluation/pascal_voc_evaluation.py @@ -0,0 +1,294 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +import logging +import numpy as np +import os +import tempfile +import xml.etree.ElementTree as ET +from collections import OrderedDict, defaultdict +from functools import lru_cache +import torch +from fvcore.common.file_io import PathManager + +from detectron2.data import MetadataCatalog +from detectron2.utils import comm + +from .evaluator import DatasetEvaluator + + +class PascalVOCDetectionEvaluator(DatasetEvaluator): + """ + Evaluate Pascal VOC AP. + It contains a synchronization, therefore has to be called from all ranks. + + Note that this is a rewrite of the official Matlab API. + The results should be similar, but not identical to the one produced by + the official API. + """ + + def __init__(self, dataset_name): + """ + Args: + dataset_name (str): name of the dataset, e.g., "voc_2007_test" + """ + self._dataset_name = dataset_name + meta = MetadataCatalog.get(dataset_name) + self._anno_file_template = os.path.join(meta.dirname, "Annotations", "{}.xml") + self._image_set_path = os.path.join(meta.dirname, "ImageSets", "Main", meta.split + ".txt") + self._class_names = meta.thing_classes + assert meta.year in [2007, 2012], meta.year + self._is_2007 = meta.year == 2007 + self._cpu_device = torch.device("cpu") + self._logger = logging.getLogger(__name__) + + def reset(self): + self._predictions = defaultdict(list) # class name -> list of prediction strings + + def process(self, inputs, outputs): + for input, output in zip(inputs, outputs): + image_id = input["image_id"] + instances = output["instances"].to(self._cpu_device) + boxes = instances.pred_boxes.tensor.numpy() + scores = instances.scores.tolist() + classes = instances.pred_classes.tolist() + for box, score, cls in zip(boxes, scores, classes): + xmin, ymin, xmax, ymax = box + # The inverse of data loading logic in `data/pascal_voc.py` + xmin += 1 + ymin += 1 + self._predictions[cls].append( + f"{image_id} {score:.3f} {xmin:.1f} {ymin:.1f} {xmax:.1f} {ymax:.1f}" + ) + + def evaluate(self): + """ + Returns: + dict: has a key "segm", whose value is a dict of "AP", "AP50", and "AP75". + """ + all_predictions = comm.gather(self._predictions, dst=0) + if not comm.is_main_process(): + return + predictions = defaultdict(list) + for predictions_per_rank in all_predictions: + for clsid, lines in predictions_per_rank.items(): + predictions[clsid].extend(lines) + del all_predictions + + self._logger.info( + "Evaluating {} using {} metric. " + "Note that results do not use the official Matlab API.".format( + self._dataset_name, 2007 if self._is_2007 else 2012 + ) + ) + + with tempfile.TemporaryDirectory(prefix="pascal_voc_eval_") as dirname: + res_file_template = os.path.join(dirname, "{}.txt") + + aps = defaultdict(list) # iou -> ap per class + for cls_id, cls_name in enumerate(self._class_names): + lines = predictions.get(cls_id, [""]) + + with open(res_file_template.format(cls_name), "w") as f: + f.write("\n".join(lines)) + + for thresh in range(50, 100, 5): + rec, prec, ap = voc_eval( + res_file_template, + self._anno_file_template, + self._image_set_path, + cls_name, + ovthresh=thresh / 100.0, + use_07_metric=self._is_2007, + ) + aps[thresh].append(ap * 100) + + ret = OrderedDict() + mAP = {iou: np.mean(x) for iou, x in aps.items()} + ret["bbox"] = {"AP": np.mean(list(mAP.values())), "AP50": mAP[50], "AP75": mAP[75]} + return ret + + +############################################################################## +# +# Below code is modified from +# https://github.com/rbgirshick/py-faster-rcnn/blob/master/lib/datasets/voc_eval.py +# -------------------------------------------------------- +# Fast/er R-CNN +# Licensed under The MIT License [see LICENSE for details] +# Written by Bharath Hariharan +# -------------------------------------------------------- + +"""Python implementation of the PASCAL VOC devkit's AP evaluation code.""" + + +@lru_cache(maxsize=None) +def parse_rec(filename): + """Parse a PASCAL VOC xml file.""" + with PathManager.open(filename) as f: + tree = ET.parse(f) + objects = [] + for obj in tree.findall("object"): + obj_struct = {} + obj_struct["name"] = obj.find("name").text + obj_struct["pose"] = obj.find("pose").text + obj_struct["truncated"] = int(obj.find("truncated").text) + obj_struct["difficult"] = int(obj.find("difficult").text) + bbox = obj.find("bndbox") + obj_struct["bbox"] = [ + int(bbox.find("xmin").text), + int(bbox.find("ymin").text), + int(bbox.find("xmax").text), + int(bbox.find("ymax").text), + ] + objects.append(obj_struct) + + return objects + + +def voc_ap(rec, prec, use_07_metric=False): + """Compute VOC AP given precision and recall. If use_07_metric is true, uses + the VOC 07 11-point method (default:False). + """ + if use_07_metric: + # 11 point metric + ap = 0.0 + for t in np.arange(0.0, 1.1, 0.1): + if np.sum(rec >= t) == 0: + p = 0 + else: + p = np.max(prec[rec >= t]) + ap = ap + p / 11.0 + else: + # correct AP calculation + # first append sentinel values at the end + mrec = np.concatenate(([0.0], rec, [1.0])) + mpre = np.concatenate(([0.0], prec, [0.0])) + + # compute the precision envelope + for i in range(mpre.size - 1, 0, -1): + mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i]) + + # to calculate area under PR curve, look for points + # where X axis (recall) changes value + i = np.where(mrec[1:] != mrec[:-1])[0] + + # and sum (\Delta recall) * prec + ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) + return ap + + +def voc_eval(detpath, annopath, imagesetfile, classname, ovthresh=0.5, use_07_metric=False): + """rec, prec, ap = voc_eval(detpath, + annopath, + imagesetfile, + classname, + [ovthresh], + [use_07_metric]) + + Top level function that does the PASCAL VOC evaluation. + + detpath: Path to detections + detpath.format(classname) should produce the detection results file. + annopath: Path to annotations + annopath.format(imagename) should be the xml annotations file. + imagesetfile: Text file containing the list of images, one image per line. + classname: Category name (duh) + [ovthresh]: Overlap threshold (default = 0.5) + [use_07_metric]: Whether to use VOC07's 11 point AP computation + (default False) + """ + # assumes detections are in detpath.format(classname) + # assumes annotations are in annopath.format(imagename) + # assumes imagesetfile is a text file with each line an image name + + # first load gt + # read list of images + with PathManager.open(imagesetfile, "r") as f: + lines = f.readlines() + imagenames = [x.strip() for x in lines] + + # load annots + recs = {} + for imagename in imagenames: + recs[imagename] = parse_rec(annopath.format(imagename)) + + # extract gt objects for this class + class_recs = {} + npos = 0 + for imagename in imagenames: + R = [obj for obj in recs[imagename] if obj["name"] == classname] + bbox = np.array([x["bbox"] for x in R]) + difficult = np.array([x["difficult"] for x in R]).astype(np.bool) + # difficult = np.array([False for x in R]).astype(np.bool) # treat all "difficult" as GT + det = [False] * len(R) + npos = npos + sum(~difficult) + class_recs[imagename] = {"bbox": bbox, "difficult": difficult, "det": det} + + # read dets + detfile = detpath.format(classname) + with open(detfile, "r") as f: + lines = f.readlines() + + splitlines = [x.strip().split(" ") for x in lines] + image_ids = [x[0] for x in splitlines] + confidence = np.array([float(x[1]) for x in splitlines]) + BB = np.array([[float(z) for z in x[2:]] for x in splitlines]).reshape(-1, 4) + + # sort by confidence + sorted_ind = np.argsort(-confidence) + BB = BB[sorted_ind, :] + image_ids = [image_ids[x] for x in sorted_ind] + + # go down dets and mark TPs and FPs + nd = len(image_ids) + tp = np.zeros(nd) + fp = np.zeros(nd) + for d in range(nd): + R = class_recs[image_ids[d]] + bb = BB[d, :].astype(float) + ovmax = -np.inf + BBGT = R["bbox"].astype(float) + + if BBGT.size > 0: + # compute overlaps + # intersection + ixmin = np.maximum(BBGT[:, 0], bb[0]) + iymin = np.maximum(BBGT[:, 1], bb[1]) + ixmax = np.minimum(BBGT[:, 2], bb[2]) + iymax = np.minimum(BBGT[:, 3], bb[3]) + iw = np.maximum(ixmax - ixmin + 1.0, 0.0) + ih = np.maximum(iymax - iymin + 1.0, 0.0) + inters = iw * ih + + # union + uni = ( + (bb[2] - bb[0] + 1.0) * (bb[3] - bb[1] + 1.0) + + (BBGT[:, 2] - BBGT[:, 0] + 1.0) * (BBGT[:, 3] - BBGT[:, 1] + 1.0) + - inters + ) + + overlaps = inters / uni + ovmax = np.max(overlaps) + jmax = np.argmax(overlaps) + + if ovmax > ovthresh: + if not R["difficult"][jmax]: + if not R["det"][jmax]: + tp[d] = 1.0 + R["det"][jmax] = 1 + else: + fp[d] = 1.0 + else: + fp[d] = 1.0 + + # compute precision recall + fp = np.cumsum(fp) + tp = np.cumsum(tp) + rec = tp / float(npos) + # avoid divide by zero in case the first detection matches a difficult + # ground truth + prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps) + ap = voc_ap(rec, prec, use_07_metric) + + return rec, prec, ap diff --git a/preprocess/mhp_extension/detectron2/detectron2/evaluation/rotated_coco_evaluation.py b/preprocess/mhp_extension/detectron2/detectron2/evaluation/rotated_coco_evaluation.py new file mode 100644 index 0000000000000000000000000000000000000000..30746e1aaac9a1feb0c7994d9229423e9f04bb51 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/evaluation/rotated_coco_evaluation.py @@ -0,0 +1,204 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import itertools +import json +import numpy as np +import os +import torch +from fvcore.common.file_io import PathManager +from pycocotools.cocoeval import COCOeval, maskUtils + +from detectron2.structures import BoxMode, RotatedBoxes, pairwise_iou_rotated + +from .coco_evaluation import COCOEvaluator + + +class RotatedCOCOeval(COCOeval): + @staticmethod + def is_rotated(box_list): + if type(box_list) == np.ndarray: + return box_list.shape[1] == 5 + elif type(box_list) == list: + if box_list == []: # cannot decide the box_dim + return False + return np.all( + np.array( + [ + (len(obj) == 5) and ((type(obj) == list) or (type(obj) == np.ndarray)) + for obj in box_list + ] + ) + ) + return False + + @staticmethod + def boxlist_to_tensor(boxlist, output_box_dim): + if type(boxlist) == np.ndarray: + box_tensor = torch.from_numpy(boxlist) + elif type(boxlist) == list: + if boxlist == []: + return torch.zeros((0, output_box_dim), dtype=torch.float32) + else: + box_tensor = torch.FloatTensor(boxlist) + else: + raise Exception("Unrecognized boxlist type") + + input_box_dim = box_tensor.shape[1] + if input_box_dim != output_box_dim: + if input_box_dim == 4 and output_box_dim == 5: + box_tensor = BoxMode.convert(box_tensor, BoxMode.XYWH_ABS, BoxMode.XYWHA_ABS) + else: + raise Exception( + "Unable to convert from {}-dim box to {}-dim box".format( + input_box_dim, output_box_dim + ) + ) + return box_tensor + + def compute_iou_dt_gt(self, dt, gt, is_crowd): + if self.is_rotated(dt) or self.is_rotated(gt): + # TODO: take is_crowd into consideration + assert all(c == 0 for c in is_crowd) + dt = RotatedBoxes(self.boxlist_to_tensor(dt, output_box_dim=5)) + gt = RotatedBoxes(self.boxlist_to_tensor(gt, output_box_dim=5)) + return pairwise_iou_rotated(dt, gt) + else: + # This is the same as the classical COCO evaluation + return maskUtils.iou(dt, gt, is_crowd) + + def computeIoU(self, imgId, catId): + p = self.params + if p.useCats: + gt = self._gts[imgId, catId] + dt = self._dts[imgId, catId] + else: + gt = [_ for cId in p.catIds for _ in self._gts[imgId, cId]] + dt = [_ for cId in p.catIds for _ in self._dts[imgId, cId]] + if len(gt) == 0 and len(dt) == 0: + return [] + inds = np.argsort([-d["score"] for d in dt], kind="mergesort") + dt = [dt[i] for i in inds] + if len(dt) > p.maxDets[-1]: + dt = dt[0 : p.maxDets[-1]] + + assert p.iouType == "bbox", "unsupported iouType for iou computation" + + g = [g["bbox"] for g in gt] + d = [d["bbox"] for d in dt] + + # compute iou between each dt and gt region + iscrowd = [int(o["iscrowd"]) for o in gt] + + # Note: this function is copied from cocoeval.py in cocoapi + # and the major difference is here. + ious = self.compute_iou_dt_gt(d, g, iscrowd) + return ious + + +class RotatedCOCOEvaluator(COCOEvaluator): + """ + Evaluate object proposal/instance detection outputs using COCO-like metrics and APIs, + with rotated boxes support. + Note: this uses IOU only and does not consider angle differences. + """ + + def process(self, inputs, outputs): + """ + Args: + inputs: the inputs to a COCO model (e.g., GeneralizedRCNN). + It is a list of dict. Each dict corresponds to an image and + contains keys like "height", "width", "file_name", "image_id". + outputs: the outputs of a COCO model. It is a list of dicts with key + "instances" that contains :class:`Instances`. + """ + for input, output in zip(inputs, outputs): + prediction = {"image_id": input["image_id"]} + + if "instances" in output: + instances = output["instances"].to(self._cpu_device) + + prediction["instances"] = self.instances_to_json(instances, input["image_id"]) + if "proposals" in output: + prediction["proposals"] = output["proposals"].to(self._cpu_device) + self._predictions.append(prediction) + + def instances_to_json(self, instances, img_id): + num_instance = len(instances) + if num_instance == 0: + return [] + + boxes = instances.pred_boxes.tensor.numpy() + if boxes.shape[1] == 4: + boxes = BoxMode.convert(boxes, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS) + boxes = boxes.tolist() + scores = instances.scores.tolist() + classes = instances.pred_classes.tolist() + + results = [] + for k in range(num_instance): + result = { + "image_id": img_id, + "category_id": classes[k], + "bbox": boxes[k], + "score": scores[k], + } + + results.append(result) + return results + + def _eval_predictions(self, tasks, predictions): + """ + Evaluate predictions on the given tasks. + Fill self._results with the metrics of the tasks. + """ + self._logger.info("Preparing results for COCO format ...") + coco_results = list(itertools.chain(*[x["instances"] for x in predictions])) + + # unmap the category ids for COCO + if hasattr(self._metadata, "thing_dataset_id_to_contiguous_id"): + reverse_id_mapping = { + v: k for k, v in self._metadata.thing_dataset_id_to_contiguous_id.items() + } + for result in coco_results: + result["category_id"] = reverse_id_mapping[result["category_id"]] + + if self._output_dir: + file_path = os.path.join(self._output_dir, "coco_instances_results.json") + self._logger.info("Saving results to {}".format(file_path)) + with PathManager.open(file_path, "w") as f: + f.write(json.dumps(coco_results)) + f.flush() + + if not self._do_evaluation: + self._logger.info("Annotations are not available for evaluation.") + return + + self._logger.info("Evaluating predictions ...") + for task in sorted(tasks): + assert task == "bbox", "Task {} is not supported".format(task) + coco_eval = ( + self._evaluate_predictions_on_coco(self._coco_api, coco_results) + if len(coco_results) > 0 + else None # cocoapi does not handle empty results very well + ) + + res = self._derive_coco_results( + coco_eval, task, class_names=self._metadata.get("thing_classes") + ) + self._results[task] = res + + def _evaluate_predictions_on_coco(self, coco_gt, coco_results): + """ + Evaluate the coco results using COCOEval API. + """ + assert len(coco_results) > 0 + + coco_dt = coco_gt.loadRes(coco_results) + + # Only bbox is supported for now + coco_eval = RotatedCOCOeval(coco_gt, coco_dt, iouType="bbox") + + coco_eval.evaluate() + coco_eval.accumulate() + coco_eval.summarize() + + return coco_eval diff --git a/preprocess/mhp_extension/detectron2/detectron2/evaluation/sem_seg_evaluation.py b/preprocess/mhp_extension/detectron2/detectron2/evaluation/sem_seg_evaluation.py new file mode 100644 index 0000000000000000000000000000000000000000..fb3b28d79284a5eeb335fc8ee8d859b4e46510ef --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/evaluation/sem_seg_evaluation.py @@ -0,0 +1,168 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import itertools +import json +import logging +import numpy as np +import os +from collections import OrderedDict +import PIL.Image as Image +import pycocotools.mask as mask_util +import torch +from fvcore.common.file_io import PathManager + +from detectron2.data import DatasetCatalog, MetadataCatalog +from detectron2.utils.comm import all_gather, is_main_process, synchronize + +from .evaluator import DatasetEvaluator + + +class SemSegEvaluator(DatasetEvaluator): + """ + Evaluate semantic segmentation + """ + + def __init__(self, dataset_name, distributed, num_classes, ignore_label=255, output_dir=None): + """ + Args: + dataset_name (str): name of the dataset to be evaluated. + distributed (True): if True, will collect results from all ranks for evaluation. + Otherwise, will evaluate the results in the current process. + num_classes (int): number of classes + ignore_label (int): value in semantic segmentation ground truth. Predictions for the + corresponding pixels should be ignored. + output_dir (str): an output directory to dump results. + """ + self._dataset_name = dataset_name + self._distributed = distributed + self._output_dir = output_dir + self._num_classes = num_classes + self._ignore_label = ignore_label + self._N = num_classes + 1 + + self._cpu_device = torch.device("cpu") + self._logger = logging.getLogger(__name__) + + self.input_file_to_gt_file = { + dataset_record["file_name"]: dataset_record["sem_seg_file_name"] + for dataset_record in DatasetCatalog.get(dataset_name) + } + + meta = MetadataCatalog.get(dataset_name) + # Dict that maps contiguous training ids to COCO category ids + try: + c2d = meta.stuff_dataset_id_to_contiguous_id + self._contiguous_id_to_dataset_id = {v: k for k, v in c2d.items()} + except AttributeError: + self._contiguous_id_to_dataset_id = None + self._class_names = meta.stuff_classes + + def reset(self): + self._conf_matrix = np.zeros((self._N, self._N), dtype=np.int64) + self._predictions = [] + + def process(self, inputs, outputs): + """ + Args: + inputs: the inputs to a model. + It is a list of dicts. Each dict corresponds to an image and + contains keys like "height", "width", "file_name". + outputs: the outputs of a model. It is either list of semantic segmentation predictions + (Tensor [H, W]) or list of dicts with key "sem_seg" that contains semantic + segmentation prediction in the same format. + """ + for input, output in zip(inputs, outputs): + output = output["sem_seg"].argmax(dim=0).to(self._cpu_device) + pred = np.array(output, dtype=np.int) + with PathManager.open(self.input_file_to_gt_file[input["file_name"]], "rb") as f: + gt = np.array(Image.open(f), dtype=np.int) + + gt[gt == self._ignore_label] = self._num_classes + + self._conf_matrix += np.bincount( + self._N * pred.reshape(-1) + gt.reshape(-1), minlength=self._N ** 2 + ).reshape(self._N, self._N) + + self._predictions.extend(self.encode_json_sem_seg(pred, input["file_name"])) + + def evaluate(self): + """ + Evaluates standard semantic segmentation metrics (http://cocodataset.org/#stuff-eval): + + * Mean intersection-over-union averaged across classes (mIoU) + * Frequency Weighted IoU (fwIoU) + * Mean pixel accuracy averaged across classes (mACC) + * Pixel Accuracy (pACC) + """ + if self._distributed: + synchronize() + conf_matrix_list = all_gather(self._conf_matrix) + self._predictions = all_gather(self._predictions) + self._predictions = list(itertools.chain(*self._predictions)) + if not is_main_process(): + return + + self._conf_matrix = np.zeros_like(self._conf_matrix) + for conf_matrix in conf_matrix_list: + self._conf_matrix += conf_matrix + + if self._output_dir: + PathManager.mkdirs(self._output_dir) + file_path = os.path.join(self._output_dir, "sem_seg_predictions.json") + with PathManager.open(file_path, "w") as f: + f.write(json.dumps(self._predictions)) + + acc = np.full(self._num_classes, np.nan, dtype=np.float) + iou = np.full(self._num_classes, np.nan, dtype=np.float) + tp = self._conf_matrix.diagonal()[:-1].astype(np.float) + pos_gt = np.sum(self._conf_matrix[:-1, :-1], axis=0).astype(np.float) + class_weights = pos_gt / np.sum(pos_gt) + pos_pred = np.sum(self._conf_matrix[:-1, :-1], axis=1).astype(np.float) + acc_valid = pos_gt > 0 + acc[acc_valid] = tp[acc_valid] / pos_gt[acc_valid] + iou_valid = (pos_gt + pos_pred) > 0 + union = pos_gt + pos_pred - tp + iou[acc_valid] = tp[acc_valid] / union[acc_valid] + macc = np.sum(acc[acc_valid]) / np.sum(acc_valid) + miou = np.sum(iou[acc_valid]) / np.sum(iou_valid) + fiou = np.sum(iou[acc_valid] * class_weights[acc_valid]) + pacc = np.sum(tp) / np.sum(pos_gt) + + res = {} + res["mIoU"] = 100 * miou + res["fwIoU"] = 100 * fiou + for i, name in enumerate(self._class_names): + res["IoU-{}".format(name)] = 100 * iou[i] + res["mACC"] = 100 * macc + res["pACC"] = 100 * pacc + for i, name in enumerate(self._class_names): + res["ACC-{}".format(name)] = 100 * acc[i] + + if self._output_dir: + file_path = os.path.join(self._output_dir, "sem_seg_evaluation.pth") + with PathManager.open(file_path, "wb") as f: + torch.save(res, f) + results = OrderedDict({"sem_seg": res}) + self._logger.info(results) + return results + + def encode_json_sem_seg(self, sem_seg, input_file_name): + """ + Convert semantic segmentation to COCO stuff format with segments encoded as RLEs. + See http://cocodataset.org/#format-results + """ + json_list = [] + for label in np.unique(sem_seg): + if self._contiguous_id_to_dataset_id is not None: + assert ( + label in self._contiguous_id_to_dataset_id + ), "Label {} is not in the metadata info for {}".format(label, self._dataset_name) + dataset_id = self._contiguous_id_to_dataset_id[label] + else: + dataset_id = int(label) + mask = (sem_seg == label).astype(np.uint8) + mask_rle = mask_util.encode(np.array(mask[:, :, None], order="F"))[0] + mask_rle["counts"] = mask_rle["counts"].decode("utf-8") + json_list.append( + {"file_name": input_file_name, "category_id": dataset_id, "segmentation": mask_rle} + ) + return json_list diff --git a/preprocess/mhp_extension/detectron2/detectron2/evaluation/testing.py b/preprocess/mhp_extension/detectron2/detectron2/evaluation/testing.py new file mode 100644 index 0000000000000000000000000000000000000000..95addebc185111c572cb19aa98f7e055b21fc74e --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/evaluation/testing.py @@ -0,0 +1,78 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import logging +import numpy as np +import pprint +import sys +from collections import OrderedDict +from collections.abc import Mapping + + +def print_csv_format(results): + """ + Print main metrics in a format similar to Detectron, + so that they are easy to copypaste into a spreadsheet. + + Args: + results (OrderedDict[dict]): task_name -> {metric -> score} + """ + assert isinstance(results, OrderedDict), results # unordered results cannot be properly printed + logger = logging.getLogger(__name__) + for task, res in results.items(): + # Don't print "AP-category" metrics since they are usually not tracked. + important_res = [(k, v) for k, v in res.items() if "-" not in k] + logger.info("copypaste: Task: {}".format(task)) + logger.info("copypaste: " + ",".join([k[0] for k in important_res])) + logger.info("copypaste: " + ",".join(["{0:.4f}".format(k[1]) for k in important_res])) + + +def verify_results(cfg, results): + """ + Args: + results (OrderedDict[dict]): task_name -> {metric -> score} + + Returns: + bool: whether the verification succeeds or not + """ + expected_results = cfg.TEST.EXPECTED_RESULTS + if not len(expected_results): + return True + + ok = True + for task, metric, expected, tolerance in expected_results: + actual = results[task][metric] + if not np.isfinite(actual): + ok = False + diff = abs(actual - expected) + if diff > tolerance: + ok = False + + logger = logging.getLogger(__name__) + if not ok: + logger.error("Result verification failed!") + logger.error("Expected Results: " + str(expected_results)) + logger.error("Actual Results: " + pprint.pformat(results)) + + sys.exit(1) + else: + logger.info("Results verification passed.") + return ok + + +def flatten_results_dict(results): + """ + Expand a hierarchical dict of scalars into a flat dict of scalars. + If results[k1][k2][k3] = v, the returned dict will have the entry + {"k1/k2/k3": v}. + + Args: + results (dict): + """ + r = {} + for k, v in results.items(): + if isinstance(v, Mapping): + v = flatten_results_dict(v) + for kk, vv in v.items(): + r[k + "/" + kk] = vv + else: + r[k] = v + return r diff --git a/preprocess/mhp_extension/detectron2/detectron2/export/README.md b/preprocess/mhp_extension/detectron2/detectron2/export/README.md new file mode 100644 index 0000000000000000000000000000000000000000..9bd8b57c1a5f15e391eb63b690f1051b1ad79d21 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/export/README.md @@ -0,0 +1,10 @@ + +This directory contains code to prepare a detectron2 model for deployment. +Currently it supports exporting a detectron2 model to Caffe2 format through ONNX. + +Please see [documentation](https://detectron2.readthedocs.io/tutorials/deployment.html) for its usage. + + +### Acknowledgements + +Thanks to Mobile Vision team at Facebook for developing the conversion tools. diff --git a/preprocess/mhp_extension/detectron2/detectron2/export/__init__.py b/preprocess/mhp_extension/detectron2/detectron2/export/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1e2bf4d0670ed0ccd73dbdb7ce27a8e617bbf6aa --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/export/__init__.py @@ -0,0 +1,5 @@ +# -*- coding: utf-8 -*- + +from .api import * + +__all__ = [k for k in globals().keys() if not k.startswith("_")] diff --git a/preprocess/mhp_extension/detectron2/detectron2/export/api.py b/preprocess/mhp_extension/detectron2/detectron2/export/api.py new file mode 100644 index 0000000000000000000000000000000000000000..a7600714e1edb019def04f9d0d1a063668943101 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/export/api.py @@ -0,0 +1,277 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +import copy +import logging +import os +import torch +from caffe2.proto import caffe2_pb2 +from torch import nn + +from detectron2.config import CfgNode as CN + +from .caffe2_export import export_caffe2_detection_model +from .caffe2_export import export_onnx_model as export_onnx_model_impl +from .caffe2_export import run_and_save_graph +from .caffe2_inference import ProtobufDetectionModel +from .caffe2_modeling import META_ARCH_CAFFE2_EXPORT_TYPE_MAP, convert_batched_inputs_to_c2_format +from .shared import get_pb_arg_vali, get_pb_arg_vals, save_graph + +__all__ = [ + "add_export_config", + "export_caffe2_model", + "Caffe2Model", + "export_onnx_model", + "Caffe2Tracer", +] + + +def add_export_config(cfg): + """ + Args: + cfg (CfgNode): a detectron2 config + + Returns: + CfgNode: an updated config with new options that will be used + by :class:`Caffe2Tracer`. + """ + is_frozen = cfg.is_frozen() + cfg.defrost() + cfg.EXPORT_CAFFE2 = CN() + cfg.EXPORT_CAFFE2.USE_HEATMAP_MAX_KEYPOINT = False + if is_frozen: + cfg.freeze() + return cfg + + +class Caffe2Tracer: + """ + Make a detectron2 model traceable with caffe2 style. + + An original detectron2 model may not be traceable, or + cannot be deployed directly after being traced, due to some reasons: + 1. control flow in some ops + 2. custom ops + 3. complicated pre/post processing + + This class provides a traceable version of a detectron2 model by: + 1. Rewrite parts of the model using ops in caffe2. Note that some ops do + not have GPU implementation. + 2. Define the inputs "after pre-processing" as inputs to the model + 3. Remove post-processing and produce raw layer outputs + + More specifically about inputs: all builtin models take two input tensors. + (1) NCHW float "data" which is an image (usually in [0, 255]) + (2) Nx3 float "im_info", each row of which is (height, width, 1.0) + + After making a traceable model, the class provide methods to export such a + model to different deployment formats. + + The class currently only supports models using builtin meta architectures. + """ + + def __init__(self, cfg, model, inputs): + """ + Args: + cfg (CfgNode): a detectron2 config, with extra export-related options + added by :func:`add_export_config`. + model (nn.Module): a model built by + :func:`detectron2.modeling.build_model`. + inputs: sample inputs that the given model takes for inference. + Will be used to trace the model. + """ + assert isinstance(cfg, CN), cfg + assert isinstance(model, torch.nn.Module), type(model) + if "EXPORT_CAFFE2" not in cfg: + cfg = add_export_config(cfg) # will just the defaults + + self.cfg = cfg + self.model = model + self.inputs = inputs + + def _get_traceable(self): + # TODO how to make it extensible to support custom models + C2MetaArch = META_ARCH_CAFFE2_EXPORT_TYPE_MAP[self.cfg.MODEL.META_ARCHITECTURE] + traceable_model = C2MetaArch(self.cfg, copy.deepcopy(self.model)) + traceable_inputs = traceable_model.get_caffe2_inputs(self.inputs) + return traceable_model, traceable_inputs + + def export_caffe2(self): + """ + Export the model to Caffe2's protobuf format. + The returned object can be saved with `.save_protobuf()` method. + The result can be loaded and executed using Caffe2 runtime. + + Returns: + Caffe2Model + """ + model, inputs = self._get_traceable() + predict_net, init_net = export_caffe2_detection_model(model, inputs) + return Caffe2Model(predict_net, init_net) + + def export_onnx(self): + """ + Export the model to ONNX format. + Note that the exported model contains custom ops only available in caffe2, therefore it + cannot be directly executed by other runtime. Post-processing or transformation passes + may be applied on the model to accommodate different runtimes. + + Returns: + onnx.ModelProto: an onnx model. + """ + model, inputs = self._get_traceable() + return export_onnx_model_impl(model, (inputs,)) + + def export_torchscript(self): + """ + Export the model to a `torch.jit.TracedModule` by tracing. + The returned object can be saved to a file by ".save()". + + Returns: + torch.jit.TracedModule: a torch TracedModule + """ + model, inputs = self._get_traceable() + logger = logging.getLogger(__name__) + logger.info("Tracing the model with torch.jit.trace ...") + with torch.no_grad(): + return torch.jit.trace(model, (inputs,), optimize=True) + + +def export_caffe2_model(cfg, model, inputs): + """ + Export a detectron2 model to caffe2 format. + + Args: + cfg (CfgNode): a detectron2 config, with extra export-related options + added by :func:`add_export_config`. + model (nn.Module): a model built by + :func:`detectron2.modeling.build_model`. + It will be modified by this function. + inputs: sample inputs that the given model takes for inference. + Will be used to trace the model. + + Returns: + Caffe2Model + """ + return Caffe2Tracer(cfg, model, inputs).export_caffe2() + + +def export_onnx_model(cfg, model, inputs): + """ + Export a detectron2 model to ONNX format. + Note that the exported model contains custom ops only available in caffe2, therefore it + cannot be directly executed by other runtime. Post-processing or transformation passes + may be applied on the model to accommodate different runtimes. + Args: + cfg (CfgNode): a detectron2 config, with extra export-related options + added by :func:`add_export_config`. + model (nn.Module): a model built by + :func:`detectron2.modeling.build_model`. + It will be modified by this function. + inputs: sample inputs that the given model takes for inference. + Will be used to trace the model. + Returns: + onnx.ModelProto: an onnx model. + """ + return Caffe2Tracer(cfg, model, inputs).export_onnx() + + +class Caffe2Model(nn.Module): + """ + A wrapper around the traced model in caffe2's pb format. + """ + + def __init__(self, predict_net, init_net): + super().__init__() + self.eval() # always in eval mode + self._predict_net = predict_net + self._init_net = init_net + self._predictor = None + + @property + def predict_net(self): + """ + Returns: + core.Net: the underlying caffe2 predict net + """ + return self._predict_net + + @property + def init_net(self): + """ + Returns: + core.Net: the underlying caffe2 init net + """ + return self._init_net + + __init__.__HIDE_SPHINX_DOC__ = True + + def save_protobuf(self, output_dir): + """ + Save the model as caffe2's protobuf format. + + Args: + output_dir (str): the output directory to save protobuf files. + """ + logger = logging.getLogger(__name__) + logger.info("Saving model to {} ...".format(output_dir)) + os.makedirs(output_dir, exist_ok=True) + + with open(os.path.join(output_dir, "model.pb"), "wb") as f: + f.write(self._predict_net.SerializeToString()) + with open(os.path.join(output_dir, "model.pbtxt"), "w") as f: + f.write(str(self._predict_net)) + with open(os.path.join(output_dir, "model_init.pb"), "wb") as f: + f.write(self._init_net.SerializeToString()) + + def save_graph(self, output_file, inputs=None): + """ + Save the graph as SVG format. + + Args: + output_file (str): a SVG file + inputs: optional inputs given to the model. + If given, the inputs will be used to run the graph to record + shape of every tensor. The shape information will be + saved together with the graph. + """ + if inputs is None: + save_graph(self._predict_net, output_file, op_only=False) + else: + size_divisibility = get_pb_arg_vali(self._predict_net, "size_divisibility", 0) + device = get_pb_arg_vals(self._predict_net, "device", b"cpu").decode("ascii") + inputs = convert_batched_inputs_to_c2_format(inputs, size_divisibility, device) + inputs = [x.cpu().numpy() for x in inputs] + run_and_save_graph(self._predict_net, self._init_net, inputs, output_file) + + @staticmethod + def load_protobuf(dir): + """ + Args: + dir (str): a directory used to save Caffe2Model with + :meth:`save_protobuf`. + The files "model.pb" and "model_init.pb" are needed. + + Returns: + Caffe2Model: the caffe2 model loaded from this directory. + """ + predict_net = caffe2_pb2.NetDef() + with open(os.path.join(dir, "model.pb"), "rb") as f: + predict_net.ParseFromString(f.read()) + + init_net = caffe2_pb2.NetDef() + with open(os.path.join(dir, "model_init.pb"), "rb") as f: + init_net.ParseFromString(f.read()) + + return Caffe2Model(predict_net, init_net) + + def __call__(self, inputs): + """ + An interface that wraps around a caffe2 model and mimics detectron2's models' + input & output format. This is used to compare the outputs of caffe2 model + with its original torch model. + + Due to the extra conversion between torch/caffe2, + this method is not meant for benchmark. + """ + if self._predictor is None: + self._predictor = ProtobufDetectionModel(self._predict_net, self._init_net) + return self._predictor(inputs) diff --git a/preprocess/mhp_extension/detectron2/detectron2/export/c10.py b/preprocess/mhp_extension/detectron2/detectron2/export/c10.py new file mode 100644 index 0000000000000000000000000000000000000000..6e3cbe3ce94d0c56596c645b8c85592ed5d31fe1 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/export/c10.py @@ -0,0 +1,503 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. + +import math +import torch +import torch.nn.functional as F + +from detectron2.layers import cat +from detectron2.layers.roi_align_rotated import ROIAlignRotated +from detectron2.modeling import poolers +from detectron2.modeling.proposal_generator import rpn +from detectron2.modeling.roi_heads.mask_head import mask_rcnn_inference +from detectron2.structures import Boxes, ImageList, Instances, Keypoints + +from .shared import alias, to_device + + +""" +This file contains caffe2-compatible implementation of several detectrno2 components. +""" + + +class Caffe2Boxes(Boxes): + """ + Representing a list of detectron2.structures.Boxes from minibatch, each box + is represented by a 5d vector (batch index + 4 coordinates), or a 6d vector + (batch index + 5 coordinates) for RotatedBoxes. + """ + + def __init__(self, tensor): + assert isinstance(tensor, torch.Tensor) + assert tensor.dim() == 2 and tensor.size(-1) in [4, 5, 6], tensor.size() + # TODO: make tensor immutable when dim is Nx5 for Boxes, + # and Nx6 for RotatedBoxes? + self.tensor = tensor + + +# TODO clean up this class, maybe just extend Instances +class InstancesList(object): + """ + Tensor representation of a list of Instances object for a batch of images. + + When dealing with a batch of images with Caffe2 ops, a list of bboxes + (instances) are usually represented by single Tensor with size + (sigma(Ni), 5) or (sigma(Ni), 4) plus a batch split Tensor. This class is + for providing common functions to convert between these two representations. + """ + + def __init__(self, im_info, indices, extra_fields=None): + # [N, 3] -> (H, W, Scale) + self.im_info = im_info + # [N,] -> indice of batch to which the instance belongs + self.indices = indices + # [N, ...] + self.batch_extra_fields = extra_fields or {} + + self.image_size = self.im_info + + def get_fields(self): + """ like `get_fields` in the Instances object, + but return each field in tensor representations """ + ret = {} + for k, v in self.batch_extra_fields.items(): + # if isinstance(v, torch.Tensor): + # tensor_rep = v + # elif isinstance(v, (Boxes, Keypoints)): + # tensor_rep = v.tensor + # else: + # raise ValueError("Can't find tensor representation for: {}".format()) + ret[k] = v + return ret + + def has(self, name): + return name in self.batch_extra_fields + + def set(self, name, value): + data_len = len(value) + if len(self.batch_extra_fields): + assert ( + len(self) == data_len + ), "Adding a field of length {} to a Instances of length {}".format(data_len, len(self)) + self.batch_extra_fields[name] = value + + def __setattr__(self, name, val): + if name in ["im_info", "indices", "batch_extra_fields", "image_size"]: + super().__setattr__(name, val) + else: + self.set(name, val) + + def __getattr__(self, name): + if name not in self.batch_extra_fields: + raise AttributeError("Cannot find field '{}' in the given Instances!".format(name)) + return self.batch_extra_fields[name] + + def __len__(self): + return len(self.indices) + + def flatten(self): + ret = [] + for _, v in self.batch_extra_fields.items(): + if isinstance(v, (Boxes, Keypoints)): + ret.append(v.tensor) + else: + ret.append(v) + return ret + + @staticmethod + def to_d2_instances_list(instances_list): + """ + Convert InstancesList to List[Instances]. The input `instances_list` can + also be a List[Instances], in this case this method is a non-op. + """ + if not isinstance(instances_list, InstancesList): + assert all(isinstance(x, Instances) for x in instances_list) + return instances_list + + ret = [] + for i, info in enumerate(instances_list.im_info): + instances = Instances(torch.Size([int(info[0].item()), int(info[1].item())])) + + ids = instances_list.indices == i + for k, v in instances_list.batch_extra_fields.items(): + if isinstance(v, torch.Tensor): + instances.set(k, v[ids]) + continue + elif isinstance(v, Boxes): + instances.set(k, v[ids, -4:]) + continue + + target_type, tensor_source = v + assert isinstance(tensor_source, torch.Tensor) + assert tensor_source.shape[0] == instances_list.indices.shape[0] + tensor_source = tensor_source[ids] + + if issubclass(target_type, Boxes): + instances.set(k, Boxes(tensor_source[:, -4:])) + elif issubclass(target_type, Keypoints): + instances.set(k, Keypoints(tensor_source)) + elif issubclass(target_type, torch.Tensor): + instances.set(k, tensor_source) + else: + raise ValueError("Can't handle targe type: {}".format(target_type)) + + ret.append(instances) + return ret + + +class Caffe2Compatible(object): + def _get_tensor_mode(self): + return self._tensor_mode + + def _set_tensor_mode(self, v): + self._tensor_mode = v + + tensor_mode = property(_get_tensor_mode, _set_tensor_mode) + """ + If true, the model expects C2-style tensor only inputs/outputs format. + """ + + +class Caffe2RPN(Caffe2Compatible, rpn.RPN): + def forward(self, images, features, gt_instances=None): + assert not self.training + + features = [features[f] for f in self.in_features] + objectness_logits_pred, anchor_deltas_pred = self.rpn_head(features) + + assert isinstance(images, ImageList) + if self.tensor_mode: + im_info = images.image_sizes + else: + im_info = torch.Tensor( + [[im_sz[0], im_sz[1], torch.Tensor([1.0])] for im_sz in images.image_sizes] + ).to(images.tensor.device) + assert isinstance(im_info, torch.Tensor) + + rpn_rois_list = [] + rpn_roi_probs_list = [] + for scores, bbox_deltas, cell_anchors_tensor, feat_stride in zip( + objectness_logits_pred, + anchor_deltas_pred, + iter(self.anchor_generator.cell_anchors), + self.anchor_generator.strides, + ): + scores = scores.detach() + bbox_deltas = bbox_deltas.detach() + + rpn_rois, rpn_roi_probs = torch.ops._caffe2.GenerateProposals( + scores, + bbox_deltas, + im_info, + cell_anchors_tensor, + spatial_scale=1.0 / feat_stride, + pre_nms_topN=self.pre_nms_topk[self.training], + post_nms_topN=self.post_nms_topk[self.training], + nms_thresh=self.nms_thresh, + min_size=self.min_box_side_len, + # correct_transform_coords=True, # deprecated argument + angle_bound_on=True, # Default + angle_bound_lo=-180, + angle_bound_hi=180, + clip_angle_thresh=1.0, # Default + legacy_plus_one=False, + ) + rpn_rois_list.append(rpn_rois) + rpn_roi_probs_list.append(rpn_roi_probs) + + # For FPN in D2, in RPN all proposals from different levels are concated + # together, ranked and picked by top post_nms_topk. Then in ROIPooler + # it calculates level_assignments and calls the RoIAlign from + # the corresponding level. + + if len(objectness_logits_pred) == 1: + rpn_rois = rpn_rois_list[0] + rpn_roi_probs = rpn_roi_probs_list[0] + else: + assert len(rpn_rois_list) == len(rpn_roi_probs_list) + rpn_post_nms_topN = self.post_nms_topk[self.training] + + device = rpn_rois_list[0].device + input_list = [to_device(x, "cpu") for x in (rpn_rois_list + rpn_roi_probs_list)] + + # TODO remove this after confirming rpn_max_level/rpn_min_level + # is not needed in CollectRpnProposals. + feature_strides = list(self.anchor_generator.strides) + rpn_min_level = int(math.log2(feature_strides[0])) + rpn_max_level = int(math.log2(feature_strides[-1])) + assert (rpn_max_level - rpn_min_level + 1) == len( + rpn_rois_list + ), "CollectRpnProposals requires continuous levels" + + rpn_rois = torch.ops._caffe2.CollectRpnProposals( + input_list, + # NOTE: in current implementation, rpn_max_level and rpn_min_level + # are not needed, only the subtraction of two matters and it + # can be infer from the number of inputs. Keep them now for + # consistency. + rpn_max_level=2 + len(rpn_rois_list) - 1, + rpn_min_level=2, + rpn_post_nms_topN=rpn_post_nms_topN, + ) + rpn_rois = to_device(rpn_rois, device) + rpn_roi_probs = [] + + proposals = self.c2_postprocess(im_info, rpn_rois, rpn_roi_probs, self.tensor_mode) + return proposals, {} + + @staticmethod + def c2_postprocess(im_info, rpn_rois, rpn_roi_probs, tensor_mode): + proposals = InstancesList( + im_info=im_info, + indices=rpn_rois[:, 0], + extra_fields={ + "proposal_boxes": Caffe2Boxes(rpn_rois), + "objectness_logits": (torch.Tensor, rpn_roi_probs), + }, + ) + if not tensor_mode: + proposals = InstancesList.to_d2_instances_list(proposals) + else: + proposals = [proposals] + return proposals + + +class Caffe2ROIPooler(Caffe2Compatible, poolers.ROIPooler): + @staticmethod + def c2_preprocess(box_lists): + assert all(isinstance(x, Boxes) for x in box_lists) + if all(isinstance(x, Caffe2Boxes) for x in box_lists): + # input is pure-tensor based + assert len(box_lists) == 1 + pooler_fmt_boxes = box_lists[0].tensor + else: + pooler_fmt_boxes = poolers.convert_boxes_to_pooler_format(box_lists) + return pooler_fmt_boxes + + def forward(self, x, box_lists): + assert not self.training + + pooler_fmt_boxes = self.c2_preprocess(box_lists) + num_level_assignments = len(self.level_poolers) + + if num_level_assignments == 1: + if isinstance(self.level_poolers[0], ROIAlignRotated): + c2_roi_align = torch.ops._caffe2.RoIAlignRotated + aligned = True + else: + c2_roi_align = torch.ops._caffe2.RoIAlign + aligned = self.level_poolers[0].aligned + + out = c2_roi_align( + x[0], + pooler_fmt_boxes, + order="NCHW", + spatial_scale=float(self.level_poolers[0].spatial_scale), + pooled_h=int(self.output_size[0]), + pooled_w=int(self.output_size[1]), + sampling_ratio=int(self.level_poolers[0].sampling_ratio), + aligned=aligned, + ) + return out + + device = pooler_fmt_boxes.device + assert ( + self.max_level - self.min_level + 1 == 4 + ), "Currently DistributeFpnProposals only support 4 levels" + fpn_outputs = torch.ops._caffe2.DistributeFpnProposals( + to_device(pooler_fmt_boxes, "cpu"), + roi_canonical_scale=self.canonical_box_size, + roi_canonical_level=self.canonical_level, + roi_max_level=self.max_level, + roi_min_level=self.min_level, + legacy_plus_one=False, + ) + fpn_outputs = [to_device(x, device) for x in fpn_outputs] + + rois_fpn_list = fpn_outputs[:-1] + rois_idx_restore_int32 = fpn_outputs[-1] + + roi_feat_fpn_list = [] + for roi_fpn, x_level, pooler in zip(rois_fpn_list, x, self.level_poolers): + if isinstance(pooler, ROIAlignRotated): + c2_roi_align = torch.ops._caffe2.RoIAlignRotated + aligned = True + else: + c2_roi_align = torch.ops._caffe2.RoIAlign + aligned = bool(pooler.aligned) + + roi_feat_fpn = c2_roi_align( + x_level, + roi_fpn, + order="NCHW", + spatial_scale=float(pooler.spatial_scale), + pooled_h=int(self.output_size[0]), + pooled_w=int(self.output_size[1]), + sampling_ratio=int(pooler.sampling_ratio), + aligned=aligned, + ) + roi_feat_fpn_list.append(roi_feat_fpn) + + roi_feat_shuffled = cat(roi_feat_fpn_list, dim=0) + roi_feat = torch.ops._caffe2.BatchPermutation(roi_feat_shuffled, rois_idx_restore_int32) + return roi_feat + + +class Caffe2FastRCNNOutputsInference: + def __init__(self, tensor_mode): + self.tensor_mode = tensor_mode # whether the output is caffe2 tensor mode + + def __call__(self, box_predictor, predictions, proposals): + """ equivalent to FastRCNNOutputLayers.inference """ + score_thresh = box_predictor.test_score_thresh + nms_thresh = box_predictor.test_nms_thresh + topk_per_image = box_predictor.test_topk_per_image + is_rotated = len(box_predictor.box2box_transform.weights) == 5 + + if is_rotated: + box_dim = 5 + assert box_predictor.box2box_transform.weights[4] == 1, ( + "The weights for Rotated BBoxTransform in C2 have only 4 dimensions," + + " thus enforcing the angle weight to be 1 for now" + ) + box2box_transform_weights = box_predictor.box2box_transform.weights[:4] + else: + box_dim = 4 + box2box_transform_weights = box_predictor.box2box_transform.weights + + class_logits, box_regression = predictions + class_prob = F.softmax(class_logits, -1) + + assert box_regression.shape[1] % box_dim == 0 + cls_agnostic_bbox_reg = box_regression.shape[1] // box_dim == 1 + + input_tensor_mode = proposals[0].proposal_boxes.tensor.shape[1] == box_dim + 1 + + rois = type(proposals[0].proposal_boxes).cat([p.proposal_boxes for p in proposals]) + device, dtype = rois.tensor.device, rois.tensor.dtype + if input_tensor_mode: + im_info = proposals[0].image_size + rois = rois.tensor + else: + im_info = torch.Tensor( + [[sz[0], sz[1], 1.0] for sz in [x.image_size for x in proposals]] + ) + batch_ids = cat( + [ + torch.full((b, 1), i, dtype=dtype, device=device) + for i, b in enumerate(len(p) for p in proposals) + ], + dim=0, + ) + rois = torch.cat([batch_ids, rois.tensor], dim=1) + + roi_pred_bbox, roi_batch_splits = torch.ops._caffe2.BBoxTransform( + to_device(rois, "cpu"), + to_device(box_regression, "cpu"), + to_device(im_info, "cpu"), + weights=box2box_transform_weights, + apply_scale=True, + rotated=is_rotated, + angle_bound_on=True, + angle_bound_lo=-180, + angle_bound_hi=180, + clip_angle_thresh=1.0, + legacy_plus_one=False, + ) + roi_pred_bbox = to_device(roi_pred_bbox, device) + roi_batch_splits = to_device(roi_batch_splits, device) + + nms_outputs = torch.ops._caffe2.BoxWithNMSLimit( + to_device(class_prob, "cpu"), + to_device(roi_pred_bbox, "cpu"), + to_device(roi_batch_splits, "cpu"), + score_thresh=float(score_thresh), + nms=float(nms_thresh), + detections_per_im=int(topk_per_image), + soft_nms_enabled=False, + soft_nms_method="linear", + soft_nms_sigma=0.5, + soft_nms_min_score_thres=0.001, + rotated=is_rotated, + cls_agnostic_bbox_reg=cls_agnostic_bbox_reg, + input_boxes_include_bg_cls=False, + output_classes_include_bg_cls=False, + legacy_plus_one=False, + ) + roi_score_nms = to_device(nms_outputs[0], device) + roi_bbox_nms = to_device(nms_outputs[1], device) + roi_class_nms = to_device(nms_outputs[2], device) + roi_batch_splits_nms = to_device(nms_outputs[3], device) + roi_keeps_nms = to_device(nms_outputs[4], device) + roi_keeps_size_nms = to_device(nms_outputs[5], device) + if not self.tensor_mode: + roi_class_nms = roi_class_nms.to(torch.int64) + + roi_batch_ids = cat( + [ + torch.full((b, 1), i, dtype=dtype, device=device) + for i, b in enumerate(int(x.item()) for x in roi_batch_splits_nms) + ], + dim=0, + ) + + roi_class_nms = alias(roi_class_nms, "class_nms") + roi_score_nms = alias(roi_score_nms, "score_nms") + roi_bbox_nms = alias(roi_bbox_nms, "bbox_nms") + roi_batch_splits_nms = alias(roi_batch_splits_nms, "batch_splits_nms") + roi_keeps_nms = alias(roi_keeps_nms, "keeps_nms") + roi_keeps_size_nms = alias(roi_keeps_size_nms, "keeps_size_nms") + + results = InstancesList( + im_info=im_info, + indices=roi_batch_ids[:, 0], + extra_fields={ + "pred_boxes": Caffe2Boxes(roi_bbox_nms), + "scores": roi_score_nms, + "pred_classes": roi_class_nms, + }, + ) + + if not self.tensor_mode: + results = InstancesList.to_d2_instances_list(results) + batch_splits = roi_batch_splits_nms.int().tolist() + kept_indices = list(roi_keeps_nms.to(torch.int64).split(batch_splits)) + else: + results = [results] + kept_indices = [roi_keeps_nms] + + return results, kept_indices + + +class Caffe2MaskRCNNInference: + def __call__(self, pred_mask_logits, pred_instances): + """ equivalent to mask_head.mask_rcnn_inference """ + if all(isinstance(x, InstancesList) for x in pred_instances): + assert len(pred_instances) == 1 + mask_probs_pred = pred_mask_logits.sigmoid() + mask_probs_pred = alias(mask_probs_pred, "mask_fcn_probs") + pred_instances[0].pred_masks = mask_probs_pred + else: + mask_rcnn_inference(pred_mask_logits, pred_instances) + + +class Caffe2KeypointRCNNInference: + def __init__(self, use_heatmap_max_keypoint): + self.use_heatmap_max_keypoint = use_heatmap_max_keypoint + + def __call__(self, pred_keypoint_logits, pred_instances): + # just return the keypoint heatmap for now, + # there will be option to call HeatmapMaxKeypointOp + output = alias(pred_keypoint_logits, "kps_score") + if all(isinstance(x, InstancesList) for x in pred_instances): + assert len(pred_instances) == 1 + if self.use_heatmap_max_keypoint: + device = output.device + output = torch.ops._caffe2.HeatmapMaxKeypoint( + to_device(output, "cpu"), + pred_instances[0].pred_boxes.tensor, + should_output_softmax=True, # worth make it configerable? + ) + output = to_device(output, device) + output = alias(output, "keypoints_out") + pred_instances[0].pred_keypoints = output + return pred_keypoint_logits diff --git a/preprocess/mhp_extension/detectron2/detectron2/export/caffe2_export.py b/preprocess/mhp_extension/detectron2/detectron2/export/caffe2_export.py new file mode 100644 index 0000000000000000000000000000000000000000..ccac809d7bf49ab144b5f0a34f57e00c3534ad60 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/export/caffe2_export.py @@ -0,0 +1,204 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +import copy +import io +import logging +import numpy as np +from typing import List +import onnx +import torch +from caffe2.proto import caffe2_pb2 +from caffe2.python import core +from caffe2.python.onnx.backend import Caffe2Backend +from tabulate import tabulate +from termcolor import colored +from torch.onnx import OperatorExportTypes + +from .shared import ( + ScopedWS, + construct_init_net_from_params, + fuse_alias_placeholder, + fuse_copy_between_cpu_and_gpu, + get_params_from_init_net, + group_norm_replace_aten_with_caffe2, + infer_device_type, + remove_dead_end_ops, + remove_reshape_for_fc, + save_graph, +) + +logger = logging.getLogger(__name__) + + +def export_onnx_model(model, inputs): + """ + Trace and export a model to onnx format. + + Args: + model (nn.Module): + inputs (tuple[args]): the model will be called by `model(*inputs)` + + Returns: + an onnx model + """ + assert isinstance(model, torch.nn.Module) + + # make sure all modules are in eval mode, onnx may change the training state + # of the module if the states are not consistent + def _check_eval(module): + assert not module.training + + model.apply(_check_eval) + + # Export the model to ONNX + with torch.no_grad(): + with io.BytesIO() as f: + torch.onnx.export( + model, + inputs, + f, + operator_export_type=OperatorExportTypes.ONNX_ATEN_FALLBACK, + # verbose=True, # NOTE: uncomment this for debugging + # export_params=True, + ) + onnx_model = onnx.load_from_string(f.getvalue()) + + # Apply ONNX's Optimization + all_passes = onnx.optimizer.get_available_passes() + passes = ["fuse_bn_into_conv"] + assert all(p in all_passes for p in passes) + onnx_model = onnx.optimizer.optimize(onnx_model, passes) + return onnx_model + + +def _op_stats(net_def): + type_count = {} + for t in [op.type for op in net_def.op]: + type_count[t] = type_count.get(t, 0) + 1 + type_count_list = sorted(type_count.items(), key=lambda kv: kv[0]) # alphabet + type_count_list = sorted(type_count_list, key=lambda kv: -kv[1]) # count + return "\n".join("{:>4}x {}".format(count, name) for name, count in type_count_list) + + +def _assign_device_option( + predict_net: caffe2_pb2.NetDef, init_net: caffe2_pb2.NetDef, tensor_inputs: List[torch.Tensor] +): + """ + ONNX exported network doesn't have concept of device, assign necessary + device option for each op in order to make it runable on GPU runtime. + """ + + def _get_device_type(torch_tensor): + assert torch_tensor.device.type in ["cpu", "cuda"] + assert torch_tensor.device.index == 0 + return torch_tensor.device.type + + def _assign_op_device_option(net_proto, net_ssa, blob_device_types): + for op, ssa_i in zip(net_proto.op, net_ssa): + if op.type in ["CopyCPUToGPU", "CopyGPUToCPU"]: + op.device_option.CopyFrom(core.DeviceOption(caffe2_pb2.CUDA, 0)) + else: + devices = [blob_device_types[b] for b in ssa_i[0] + ssa_i[1]] + assert all(d == devices[0] for d in devices) + if devices[0] == "cuda": + op.device_option.CopyFrom(core.DeviceOption(caffe2_pb2.CUDA, 0)) + + # update ops in predict_net + predict_net_input_device_types = { + (name, 0): _get_device_type(tensor) + for name, tensor in zip(predict_net.external_input, tensor_inputs) + } + predict_net_device_types = infer_device_type( + predict_net, known_status=predict_net_input_device_types, device_name_style="pytorch" + ) + predict_net_ssa, _ = core.get_ssa(predict_net) + _assign_op_device_option(predict_net, predict_net_ssa, predict_net_device_types) + + # update ops in init_net + init_net_ssa, versions = core.get_ssa(init_net) + init_net_output_device_types = { + (name, versions[name]): predict_net_device_types[(name, 0)] + for name in init_net.external_output + } + init_net_device_types = infer_device_type( + init_net, known_status=init_net_output_device_types, device_name_style="pytorch" + ) + _assign_op_device_option(init_net, init_net_ssa, init_net_device_types) + + +def export_caffe2_detection_model(model: torch.nn.Module, tensor_inputs: List[torch.Tensor]): + """ + Export a caffe2-compatible Detectron2 model to caffe2 format via ONNX. + + Arg: + model: a caffe2-compatible version of detectron2 model, defined in caffe2_modeling.py + tensor_inputs: a list of tensors that caffe2 model takes as input. + """ + model = copy.deepcopy(model) + assert isinstance(model, torch.nn.Module) + assert hasattr(model, "encode_additional_info") + + # Export via ONNX + logger.info("Exporting a {} model via ONNX ...".format(type(model).__name__)) + onnx_model = export_onnx_model(model, (tensor_inputs,)) + # Convert ONNX model to Caffe2 protobuf + init_net, predict_net = Caffe2Backend.onnx_graph_to_caffe2_net(onnx_model) + ops_table = [[op.type, op.input, op.output] for op in predict_net.op] + table = tabulate(ops_table, headers=["type", "input", "output"], tablefmt="pipe") + logger.info( + "ONNX export Done. Exported predict_net (before optimizations):\n" + colored(table, "cyan") + ) + + # Apply protobuf optimization + fuse_alias_placeholder(predict_net, init_net) + if any(t.device.type != "cpu" for t in tensor_inputs): + fuse_copy_between_cpu_and_gpu(predict_net) + remove_dead_end_ops(init_net) + _assign_device_option(predict_net, init_net, tensor_inputs) + params, device_options = get_params_from_init_net(init_net) + predict_net, params = remove_reshape_for_fc(predict_net, params) + init_net = construct_init_net_from_params(params, device_options) + group_norm_replace_aten_with_caffe2(predict_net) + + # Record necessary information for running the pb model in Detectron2 system. + model.encode_additional_info(predict_net, init_net) + + logger.info("Operators used in predict_net: \n{}".format(_op_stats(predict_net))) + logger.info("Operators used in init_net: \n{}".format(_op_stats(init_net))) + + return predict_net, init_net + + +def run_and_save_graph(predict_net, init_net, tensor_inputs, graph_save_path): + """ + Run the caffe2 model on given inputs, recording the shape and draw the graph. + + predict_net/init_net: caffe2 model. + tensor_inputs: a list of tensors that caffe2 model takes as input. + graph_save_path: path for saving graph of exported model. + """ + + logger.info("Saving graph of ONNX exported model to {} ...".format(graph_save_path)) + save_graph(predict_net, graph_save_path, op_only=False) + + # Run the exported Caffe2 net + logger.info("Running ONNX exported model ...") + with ScopedWS("__ws_tmp__", True) as ws: + ws.RunNetOnce(init_net) + initialized_blobs = set(ws.Blobs()) + uninitialized = [inp for inp in predict_net.external_input if inp not in initialized_blobs] + for name, blob in zip(uninitialized, tensor_inputs): + ws.FeedBlob(name, blob) + + try: + ws.RunNetOnce(predict_net) + except RuntimeError as e: + logger.warning("Encountered RuntimeError: \n{}".format(str(e))) + + ws_blobs = {b: ws.FetchBlob(b) for b in ws.Blobs()} + blob_sizes = {b: ws_blobs[b].shape for b in ws_blobs if isinstance(ws_blobs[b], np.ndarray)} + + logger.info("Saving graph with blob shapes to {} ...".format(graph_save_path)) + save_graph(predict_net, graph_save_path, op_only=False, blob_sizes=blob_sizes) + + return ws_blobs diff --git a/preprocess/mhp_extension/detectron2/detectron2/export/caffe2_inference.py b/preprocess/mhp_extension/detectron2/detectron2/export/caffe2_inference.py new file mode 100644 index 0000000000000000000000000000000000000000..92718d04031b4513c2324ad596eae9cdbfa7c75e --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/export/caffe2_inference.py @@ -0,0 +1,136 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +import collections +import logging +import numpy as np +import torch +from caffe2.proto import caffe2_pb2 +from caffe2.python import core + +from .caffe2_modeling import META_ARCH_CAFFE2_EXPORT_TYPE_MAP, convert_batched_inputs_to_c2_format +from .shared import ScopedWS, get_pb_arg_vali, get_pb_arg_vals, infer_device_type + +logger = logging.getLogger(__name__) + + +class ProtobufModel(torch.nn.Module): + """ + A class works just like nn.Module in terms of inference, but running + caffe2 model under the hood. Input/Output are Dict[str, tensor] whose keys + are in external_input/output. + """ + + def __init__(self, predict_net, init_net): + logger.info("Initializing ProtobufModel ...") + super().__init__() + assert isinstance(predict_net, caffe2_pb2.NetDef) + assert isinstance(init_net, caffe2_pb2.NetDef) + self.ws_name = "__ws_tmp__" + self.net = core.Net(predict_net) + + with ScopedWS(self.ws_name, is_reset=True, is_cleanup=False) as ws: + ws.RunNetOnce(init_net) + for blob in self.net.Proto().external_input: + if blob not in ws.Blobs(): + ws.CreateBlob(blob) + ws.CreateNet(self.net) + + self._error_msgs = set() + + def forward(self, inputs_dict): + assert all(inp in self.net.Proto().external_input for inp in inputs_dict) + with ScopedWS(self.ws_name, is_reset=False, is_cleanup=False) as ws: + for b, tensor in inputs_dict.items(): + ws.FeedBlob(b, tensor) + try: + ws.RunNet(self.net.Proto().name) + except RuntimeError as e: + if not str(e) in self._error_msgs: + self._error_msgs.add(str(e)) + logger.warning("Encountered new RuntimeError: \n{}".format(str(e))) + logger.warning("Catch the error and use partial results.") + + outputs_dict = collections.OrderedDict( + [(b, ws.FetchBlob(b)) for b in self.net.Proto().external_output] + ) + # Remove outputs of current run, this is necessary in order to + # prevent fetching the result from previous run if the model fails + # in the middle. + for b in self.net.Proto().external_output: + # Needs to create uninitialized blob to make the net runable. + # This is "equivalent" to: ws.RemoveBlob(b) then ws.CreateBlob(b), + # but there'no such API. + ws.FeedBlob(b, "{}, a C++ native class of type nullptr (uninitialized).".format(b)) + + return outputs_dict + + +class ProtobufDetectionModel(torch.nn.Module): + """ + A class works just like a pytorch meta arch in terms of inference, but running + caffe2 model under the hood. + """ + + def __init__(self, predict_net, init_net, *, convert_outputs=None): + """ + Args: + predict_net, init_net (core.Net): caffe2 nets + convert_outptus (callable): a function that converts caffe2 + outputs to the same format of the original pytorch model. + By default, use the one defined in the caffe2 meta_arch. + """ + super().__init__() + self.protobuf_model = ProtobufModel(predict_net, init_net) + self.size_divisibility = get_pb_arg_vali(predict_net, "size_divisibility", 0) + self.device = get_pb_arg_vals(predict_net, "device", b"cpu").decode("ascii") + + if convert_outputs is None: + meta_arch = get_pb_arg_vals(predict_net, "meta_architecture", b"GeneralizedRCNN") + meta_arch = META_ARCH_CAFFE2_EXPORT_TYPE_MAP[meta_arch.decode("ascii")] + self._convert_outputs = meta_arch.get_outputs_converter(predict_net, init_net) + else: + self._convert_outputs = convert_outputs + + def _infer_output_devices(self, inputs_dict): + def _get_device_type(torch_tensor): + assert torch_tensor.device.type in ["cpu", "cuda"] + assert torch_tensor.device.index == 0 + return torch_tensor.device.type + + predict_net = self.protobuf_model.net.Proto() + input_device_types = { + (name, 0): _get_device_type(tensor) for name, tensor in inputs_dict.items() + } + device_type_map = infer_device_type( + predict_net, known_status=input_device_types, device_name_style="pytorch" + ) + ssa, versions = core.get_ssa(predict_net) + versioned_outputs = [(name, versions[name]) for name in predict_net.external_output] + output_devices = [device_type_map[outp] for outp in versioned_outputs] + return output_devices + + def _convert_inputs(self, batched_inputs): + # currently all models convert inputs in the same way + data, im_info = convert_batched_inputs_to_c2_format( + batched_inputs, self.size_divisibility, self.device + ) + return {"data": data, "im_info": im_info} + + def forward(self, batched_inputs): + c2_inputs = self._convert_inputs(batched_inputs) + c2_results = self.protobuf_model(c2_inputs) + + if any(t.device.type != "cpu" for _, t in c2_inputs.items()): + output_devices = self._infer_output_devices(c2_inputs) + else: + output_devices = ["cpu" for _ in self.protobuf_model.net.Proto().external_output] + + def _cast_caffe2_blob_to_torch_tensor(blob, device): + return torch.Tensor(blob).to(device) if isinstance(blob, np.ndarray) else None + + c2_results = { + name: _cast_caffe2_blob_to_torch_tensor(c2_results[name], device) + for name, device in zip(self.protobuf_model.net.Proto().external_output, output_devices) + } + + return self._convert_outputs(batched_inputs, c2_inputs, c2_results) diff --git a/preprocess/mhp_extension/detectron2/detectron2/export/caffe2_modeling.py b/preprocess/mhp_extension/detectron2/detectron2/export/caffe2_modeling.py new file mode 100644 index 0000000000000000000000000000000000000000..1732b322c75abc3ac178d61d31cdec4cdcd61dfd --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/export/caffe2_modeling.py @@ -0,0 +1,493 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +import functools +import io +import struct +import types +import torch + +from detectron2.modeling import meta_arch +from detectron2.modeling.box_regression import Box2BoxTransform +from detectron2.modeling.meta_arch.panoptic_fpn import combine_semantic_and_instance_outputs +from detectron2.modeling.postprocessing import detector_postprocess, sem_seg_postprocess +from detectron2.modeling.roi_heads import keypoint_head +from detectron2.structures import Boxes, ImageList, Instances, RotatedBoxes + +from .c10 import Caffe2Compatible +from .patcher import ROIHeadsPatcher, patch_generalized_rcnn +from .shared import ( + alias, + check_set_pb_arg, + get_pb_arg_floats, + get_pb_arg_valf, + get_pb_arg_vali, + get_pb_arg_vals, + mock_torch_nn_functional_interpolate, +) + + +def assemble_rcnn_outputs_by_name(image_sizes, tensor_outputs, force_mask_on=False): + """ + A function to assemble caffe2 model's outputs (i.e. Dict[str, Tensor]) + to detectron2's format (i.e. list of Instances instance). + This only works when the model follows the Caffe2 detectron's naming convention. + + Args: + image_sizes (List[List[int, int]]): [H, W] of every image. + tensor_outputs (Dict[str, Tensor]): external_output to its tensor. + + force_mask_on (Bool): if true, the it make sure there'll be pred_masks even + if the mask is not found from tensor_outputs (usually due to model crash) + """ + + results = [Instances(image_size) for image_size in image_sizes] + + batch_splits = tensor_outputs.get("batch_splits", None) + if batch_splits: + raise NotImplementedError() + assert len(image_sizes) == 1 + result = results[0] + + bbox_nms = tensor_outputs["bbox_nms"] + score_nms = tensor_outputs["score_nms"] + class_nms = tensor_outputs["class_nms"] + # Detection will always success because Conv support 0-batch + assert bbox_nms is not None + assert score_nms is not None + assert class_nms is not None + if bbox_nms.shape[1] == 5: + result.pred_boxes = RotatedBoxes(bbox_nms) + else: + result.pred_boxes = Boxes(bbox_nms) + result.scores = score_nms + result.pred_classes = class_nms.to(torch.int64) + + mask_fcn_probs = tensor_outputs.get("mask_fcn_probs", None) + if mask_fcn_probs is not None: + # finish the mask pred + mask_probs_pred = mask_fcn_probs + num_masks = mask_probs_pred.shape[0] + class_pred = result.pred_classes + indices = torch.arange(num_masks, device=class_pred.device) + mask_probs_pred = mask_probs_pred[indices, class_pred][:, None] + result.pred_masks = mask_probs_pred + elif force_mask_on: + # NOTE: there's no way to know the height/width of mask here, it won't be + # used anyway when batch size is 0, so just set them to 0. + result.pred_masks = torch.zeros([0, 1, 0, 0], dtype=torch.uint8) + + keypoints_out = tensor_outputs.get("keypoints_out", None) + kps_score = tensor_outputs.get("kps_score", None) + if keypoints_out is not None: + # keypoints_out: [N, 4, #kypoints], where 4 is in order of (x, y, score, prob) + keypoints_tensor = keypoints_out + # NOTE: it's possible that prob is not calculated if "should_output_softmax" + # is set to False in HeatmapMaxKeypoint, so just using raw score, seems + # it doesn't affect mAP. TODO: check more carefully. + keypoint_xyp = keypoints_tensor.transpose(1, 2)[:, :, [0, 1, 2]] + result.pred_keypoints = keypoint_xyp + elif kps_score is not None: + # keypoint heatmap to sparse data structure + pred_keypoint_logits = kps_score + keypoint_head.keypoint_rcnn_inference(pred_keypoint_logits, [result]) + + return results + + +def _cast_to_f32(f64): + return struct.unpack("f", struct.pack("f", f64))[0] + + +def set_caffe2_compatible_tensor_mode(model, enable=True): + def _fn(m): + if isinstance(m, Caffe2Compatible): + m.tensor_mode = enable + + model.apply(_fn) + + +def convert_batched_inputs_to_c2_format(batched_inputs, size_divisibility, device): + """ + See get_caffe2_inputs() below. + """ + assert all(isinstance(x, dict) for x in batched_inputs) + assert all(x["image"].dim() == 3 for x in batched_inputs) + + images = [x["image"] for x in batched_inputs] + images = ImageList.from_tensors(images, size_divisibility) + + im_info = [] + for input_per_image, image_size in zip(batched_inputs, images.image_sizes): + target_height = input_per_image.get("height", image_size[0]) + target_width = input_per_image.get("width", image_size[1]) # noqa + # NOTE: The scale inside im_info is kept as convention and for providing + # post-processing information if further processing is needed. For + # current Caffe2 model definitions that don't include post-processing inside + # the model, this number is not used. + # NOTE: There can be a slight difference between width and height + # scales, using a single number can results in numerical difference + # compared with D2's post-processing. + scale = target_height / image_size[0] + im_info.append([image_size[0], image_size[1], scale]) + im_info = torch.Tensor(im_info) + + return images.tensor.to(device), im_info.to(device) + + +class Caffe2MetaArch(Caffe2Compatible, torch.nn.Module): + """ + Base class for caffe2-compatible implementation of a meta architecture. + The forward is traceable and its traced graph can be converted to caffe2 + graph through ONNX. + """ + + def __init__(self, cfg, torch_model): + """ + Args: + cfg (CfgNode): + torch_model (nn.Module): the detectron2 model (meta_arch) to be + converted. + """ + super().__init__() + self._wrapped_model = torch_model + self.eval() + set_caffe2_compatible_tensor_mode(self, True) + + def get_caffe2_inputs(self, batched_inputs): + """ + Convert pytorch-style structured inputs to caffe2-style inputs that + are tuples of tensors. + + Args: + batched_inputs (list[dict]): inputs to a detectron2 model + in its standard format. Each dict has "image" (CHW tensor), and optionally + "height" and "width". + + Returns: + tuple[Tensor]: + tuple of tensors that will be the inputs to the + :meth:`forward` method. For existing models, the first + is an NCHW tensor (padded and batched); the second is + a im_info Nx3 tensor, where the rows are + (height, width, unused legacy parameter) + """ + return convert_batched_inputs_to_c2_format( + batched_inputs, + self._wrapped_model.backbone.size_divisibility, + self._wrapped_model.device, + ) + + def encode_additional_info(self, predict_net, init_net): + """ + Save extra metadata that will be used by inference in the output protobuf. + """ + pass + + def forward(self, inputs): + """ + Run the forward in caffe2-style. It has to use caffe2-compatible ops + and the method will be used for tracing. + + Args: + inputs (tuple[Tensor]): inputs defined by :meth:`get_caffe2_input`. + They will be the inputs of the converted caffe2 graph. + + Returns: + tuple[Tensor]: output tensors. They will be the outputs of the + converted caffe2 graph. + """ + raise NotImplementedError + + def _caffe2_preprocess_image(self, inputs): + """ + Caffe2 implementation of preprocess_image, which is called inside each MetaArch's forward. + It normalizes the input images, and the final caffe2 graph assumes the + inputs have been batched already. + """ + data, im_info = inputs + data = alias(data, "data") + im_info = alias(im_info, "im_info") + mean, std = self._wrapped_model.pixel_mean, self._wrapped_model.pixel_std + normalized_data = (data - mean) / std + normalized_data = alias(normalized_data, "normalized_data") + + # Pack (data, im_info) into ImageList which is recognized by self.inference. + images = ImageList(tensor=normalized_data, image_sizes=im_info) + return images + + @staticmethod + def get_outputs_converter(predict_net, init_net): + """ + Creates a function that converts outputs of the caffe2 model to + detectron2's standard format. + The function uses information in `predict_net` and `init_net` that are + available at inferene time. Therefore the function logic can be used in inference. + + The returned function has the following signature: + + def convert(batched_inputs, c2_inputs, c2_results) -> detectron2_outputs + + Where + + * batched_inputs (list[dict]): the original input format of the meta arch + * c2_inputs (dict[str, Tensor]): the caffe2 inputs. + * c2_results (dict[str, Tensor]): the caffe2 output format, + corresponding to the outputs of the :meth:`forward` function. + * detectron2_outputs: the original output format of the meta arch. + + This function can be used to compare the outputs of the original meta arch and + the converted caffe2 graph. + + Returns: + callable: a callable of the above signature. + """ + raise NotImplementedError + + +class Caffe2GeneralizedRCNN(Caffe2MetaArch): + def __init__(self, cfg, torch_model): + assert isinstance(torch_model, meta_arch.GeneralizedRCNN) + torch_model = patch_generalized_rcnn(torch_model) + super().__init__(cfg, torch_model) + + self.roi_heads_patcher = ROIHeadsPatcher(cfg, self._wrapped_model.roi_heads) + + def encode_additional_info(self, predict_net, init_net): + size_divisibility = self._wrapped_model.backbone.size_divisibility + check_set_pb_arg(predict_net, "size_divisibility", "i", size_divisibility) + check_set_pb_arg( + predict_net, "device", "s", str.encode(str(self._wrapped_model.device), "ascii") + ) + check_set_pb_arg(predict_net, "meta_architecture", "s", b"GeneralizedRCNN") + + @mock_torch_nn_functional_interpolate() + def forward(self, inputs): + if not self.tensor_mode: + return self._wrapped_model.inference(inputs) + images = self._caffe2_preprocess_image(inputs) + features = self._wrapped_model.backbone(images.tensor) + proposals, _ = self._wrapped_model.proposal_generator(images, features) + with self.roi_heads_patcher.mock_roi_heads(): + detector_results, _ = self._wrapped_model.roi_heads(images, features, proposals) + return tuple(detector_results[0].flatten()) + + @staticmethod + def get_outputs_converter(predict_net, init_net): + def f(batched_inputs, c2_inputs, c2_results): + image_sizes = [[int(im[0]), int(im[1])] for im in c2_inputs["im_info"]] + results = assemble_rcnn_outputs_by_name(image_sizes, c2_results) + return meta_arch.GeneralizedRCNN._postprocess(results, batched_inputs, image_sizes) + + return f + + +class Caffe2PanopticFPN(Caffe2MetaArch): + def __init__(self, cfg, torch_model): + assert isinstance(torch_model, meta_arch.PanopticFPN) + torch_model = patch_generalized_rcnn(torch_model) + super().__init__(cfg, torch_model) + + self.roi_heads_patcher = ROIHeadsPatcher(cfg, self._wrapped_model.roi_heads) + + @mock_torch_nn_functional_interpolate() + def forward(self, inputs): + assert self.tensor_mode + images = self._caffe2_preprocess_image(inputs) + features = self._wrapped_model.backbone(images.tensor) + + sem_seg_results, _ = self._wrapped_model.sem_seg_head(features) + sem_seg_results = alias(sem_seg_results, "sem_seg") + + proposals, _ = self._wrapped_model.proposal_generator(images, features) + + with self.roi_heads_patcher.mock_roi_heads(self.tensor_mode): + detector_results, _ = self._wrapped_model.roi_heads(images, features, proposals) + + return tuple(detector_results[0].flatten()) + (sem_seg_results,) + + def encode_additional_info(self, predict_net, init_net): + size_divisibility = self._wrapped_model.backbone.size_divisibility + check_set_pb_arg(predict_net, "size_divisibility", "i", size_divisibility) + check_set_pb_arg( + predict_net, "device", "s", str.encode(str(self._wrapped_model.device), "ascii") + ) + check_set_pb_arg(predict_net, "meta_architecture", "s", b"PanopticFPN") + + # Inference parameters: + check_set_pb_arg(predict_net, "combine_on", "i", self._wrapped_model.combine_on) + check_set_pb_arg( + predict_net, + "combine_overlap_threshold", + "f", + _cast_to_f32(self._wrapped_model.combine_overlap_threshold), + ) + check_set_pb_arg( + predict_net, + "combine_stuff_area_limit", + "i", + self._wrapped_model.combine_stuff_area_limit, + ) + check_set_pb_arg( + predict_net, + "combine_instances_confidence_threshold", + "f", + _cast_to_f32(self._wrapped_model.combine_instances_confidence_threshold), + ) + + @staticmethod + def get_outputs_converter(predict_net, init_net): + combine_on = get_pb_arg_vali(predict_net, "combine_on", None) + combine_overlap_threshold = get_pb_arg_valf(predict_net, "combine_overlap_threshold", None) + combine_stuff_area_limit = get_pb_arg_vali(predict_net, "combine_stuff_area_limit", None) + combine_instances_confidence_threshold = get_pb_arg_valf( + predict_net, "combine_instances_confidence_threshold", None + ) + + def f(batched_inputs, c2_inputs, c2_results): + image_sizes = [[int(im[0]), int(im[1])] for im in c2_inputs["im_info"]] + detector_results = assemble_rcnn_outputs_by_name( + image_sizes, c2_results, force_mask_on=True + ) + sem_seg_results = c2_results["sem_seg"] + + # copied from meta_arch/panoptic_fpn.py ... + processed_results = [] + for sem_seg_result, detector_result, input_per_image, image_size in zip( + sem_seg_results, detector_results, batched_inputs, image_sizes + ): + height = input_per_image.get("height", image_size[0]) + width = input_per_image.get("width", image_size[1]) + sem_seg_r = sem_seg_postprocess(sem_seg_result, image_size, height, width) + detector_r = detector_postprocess(detector_result, height, width) + + processed_results.append({"sem_seg": sem_seg_r, "instances": detector_r}) + + if combine_on: + panoptic_r = combine_semantic_and_instance_outputs( + detector_r, + sem_seg_r.argmax(dim=0), + combine_overlap_threshold, + combine_stuff_area_limit, + combine_instances_confidence_threshold, + ) + processed_results[-1]["panoptic_seg"] = panoptic_r + return processed_results + + return f + + +class Caffe2RetinaNet(Caffe2MetaArch): + def __init__(self, cfg, torch_model): + assert isinstance(torch_model, meta_arch.RetinaNet) + super().__init__(cfg, torch_model) + + @mock_torch_nn_functional_interpolate() + def forward(self, inputs): + assert self.tensor_mode + images = self._caffe2_preprocess_image(inputs) + + # explicitly return the images sizes to avoid removing "im_info" by ONNX + # since it's not used in the forward path + return_tensors = [images.image_sizes] + + features = self._wrapped_model.backbone(images.tensor) + features = [features[f] for f in self._wrapped_model.in_features] + for i, feature_i in enumerate(features): + features[i] = alias(feature_i, "feature_{}".format(i), is_backward=True) + return_tensors.append(features[i]) + + box_cls, box_delta = self._wrapped_model.head(features) + for i, (box_cls_i, box_delta_i) in enumerate(zip(box_cls, box_delta)): + return_tensors.append(alias(box_cls_i, "box_cls_{}".format(i))) + return_tensors.append(alias(box_delta_i, "box_delta_{}".format(i))) + + return tuple(return_tensors) + + def encode_additional_info(self, predict_net, init_net): + size_divisibility = self._wrapped_model.backbone.size_divisibility + check_set_pb_arg(predict_net, "size_divisibility", "i", size_divisibility) + check_set_pb_arg( + predict_net, "device", "s", str.encode(str(self._wrapped_model.device), "ascii") + ) + check_set_pb_arg(predict_net, "meta_architecture", "s", b"RetinaNet") + + # Inference parameters: + check_set_pb_arg( + predict_net, "score_threshold", "f", _cast_to_f32(self._wrapped_model.score_threshold) + ) + check_set_pb_arg(predict_net, "topk_candidates", "i", self._wrapped_model.topk_candidates) + check_set_pb_arg( + predict_net, "nms_threshold", "f", _cast_to_f32(self._wrapped_model.nms_threshold) + ) + check_set_pb_arg( + predict_net, + "max_detections_per_image", + "i", + self._wrapped_model.max_detections_per_image, + ) + + check_set_pb_arg( + predict_net, + "bbox_reg_weights", + "floats", + [_cast_to_f32(w) for w in self._wrapped_model.box2box_transform.weights], + ) + self._encode_anchor_generator_cfg(predict_net) + + def _encode_anchor_generator_cfg(self, predict_net): + # serialize anchor_generator for future use + serialized_anchor_generator = io.BytesIO() + torch.save(self._wrapped_model.anchor_generator, serialized_anchor_generator) + # Ideally we can put anchor generating inside the model, then we don't + # need to store this information. + bytes = serialized_anchor_generator.getvalue() + check_set_pb_arg(predict_net, "serialized_anchor_generator", "s", bytes) + + @staticmethod + def get_outputs_converter(predict_net, init_net): + self = types.SimpleNamespace() + serialized_anchor_generator = io.BytesIO( + get_pb_arg_vals(predict_net, "serialized_anchor_generator", None) + ) + self.anchor_generator = torch.load(serialized_anchor_generator) + bbox_reg_weights = get_pb_arg_floats(predict_net, "bbox_reg_weights", None) + self.box2box_transform = Box2BoxTransform(weights=tuple(bbox_reg_weights)) + self.score_threshold = get_pb_arg_valf(predict_net, "score_threshold", None) + self.topk_candidates = get_pb_arg_vali(predict_net, "topk_candidates", None) + self.nms_threshold = get_pb_arg_valf(predict_net, "nms_threshold", None) + self.max_detections_per_image = get_pb_arg_vali( + predict_net, "max_detections_per_image", None + ) + + # hack to reuse inference code from RetinaNet + self.inference = functools.partial(meta_arch.RetinaNet.inference, self) + self.inference_single_image = functools.partial( + meta_arch.RetinaNet.inference_single_image, self + ) + + def f(batched_inputs, c2_inputs, c2_results): + image_sizes = [[int(im[0]), int(im[1])] for im in c2_inputs["im_info"]] + + num_features = len([x for x in c2_results.keys() if x.startswith("box_cls_")]) + box_cls = [c2_results["box_cls_{}".format(i)] for i in range(num_features)] + box_delta = [c2_results["box_delta_{}".format(i)] for i in range(num_features)] + + # For each feature level, feature should have the same batch size and + # spatial dimension as the box_cls and box_delta. + dummy_features = [box_delta[i].clone()[:, 0:0, :, :] for i in range(num_features)] + anchors = self.anchor_generator(dummy_features) + + # self.num_classess can be inferred + self.num_classes = box_cls[0].shape[1] // (box_delta[0].shape[1] // 4) + + results = self.inference(box_cls, box_delta, anchors, image_sizes) + return meta_arch.GeneralizedRCNN._postprocess(results, batched_inputs, image_sizes) + + return f + + +META_ARCH_CAFFE2_EXPORT_TYPE_MAP = { + "GeneralizedRCNN": Caffe2GeneralizedRCNN, + "PanopticFPN": Caffe2PanopticFPN, + "RetinaNet": Caffe2RetinaNet, +} diff --git a/preprocess/mhp_extension/detectron2/detectron2/export/patcher.py b/preprocess/mhp_extension/detectron2/detectron2/export/patcher.py new file mode 100644 index 0000000000000000000000000000000000000000..3f0b0fd8122d12c10d06cfc1b0720e3c3374c737 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/export/patcher.py @@ -0,0 +1,153 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +import contextlib +import mock +import torch + +from detectron2.modeling import poolers +from detectron2.modeling.proposal_generator import rpn +from detectron2.modeling.roi_heads import keypoint_head, mask_head +from detectron2.modeling.roi_heads.fast_rcnn import FastRCNNOutputLayers + +from .c10 import ( + Caffe2Compatible, + Caffe2FastRCNNOutputsInference, + Caffe2KeypointRCNNInference, + Caffe2MaskRCNNInference, + Caffe2ROIPooler, + Caffe2RPN, +) + + +class GenericMixin(object): + pass + + +class Caffe2CompatibleConverter(object): + """ + A GenericUpdater which implements the `create_from` interface, by modifying + module object and assign it with another class replaceCls. + """ + + def __init__(self, replaceCls): + self.replaceCls = replaceCls + + def create_from(self, module): + # update module's class to the new class + assert isinstance(module, torch.nn.Module) + if issubclass(self.replaceCls, GenericMixin): + # replaceCls should act as mixin, create a new class on-the-fly + new_class = type( + "{}MixedWith{}".format(self.replaceCls.__name__, module.__class__.__name__), + (self.replaceCls, module.__class__), + {}, # {"new_method": lambda self: ...}, + ) + module.__class__ = new_class + else: + # replaceCls is complete class, this allow arbitrary class swap + module.__class__ = self.replaceCls + + # initialize Caffe2Compatible + if isinstance(module, Caffe2Compatible): + module.tensor_mode = False + + return module + + +def patch(model, target, updater, *args, **kwargs): + """ + recursively (post-order) update all modules with the target type and its + subclasses, make a initialization/composition/inheritance/... via the + updater.create_from. + """ + for name, module in model.named_children(): + model._modules[name] = patch(module, target, updater, *args, **kwargs) + if isinstance(model, target): + return updater.create_from(model, *args, **kwargs) + return model + + +def patch_generalized_rcnn(model): + ccc = Caffe2CompatibleConverter + model = patch(model, rpn.RPN, ccc(Caffe2RPN)) + model = patch(model, poolers.ROIPooler, ccc(Caffe2ROIPooler)) + + return model + + +@contextlib.contextmanager +def mock_fastrcnn_outputs_inference( + tensor_mode, check=True, box_predictor_type=FastRCNNOutputLayers +): + with mock.patch.object( + box_predictor_type, + "inference", + autospec=True, + side_effect=Caffe2FastRCNNOutputsInference(tensor_mode), + ) as mocked_func: + yield + if check: + assert mocked_func.call_count > 0 + + +@contextlib.contextmanager +def mock_mask_rcnn_inference(tensor_mode, patched_module, check=True): + with mock.patch( + "{}.mask_rcnn_inference".format(patched_module), side_effect=Caffe2MaskRCNNInference() + ) as mocked_func: + yield + if check: + assert mocked_func.call_count > 0 + + +@contextlib.contextmanager +def mock_keypoint_rcnn_inference(tensor_mode, patched_module, use_heatmap_max_keypoint, check=True): + with mock.patch( + "{}.keypoint_rcnn_inference".format(patched_module), + side_effect=Caffe2KeypointRCNNInference(use_heatmap_max_keypoint), + ) as mocked_func: + yield + if check: + assert mocked_func.call_count > 0 + + +class ROIHeadsPatcher: + def __init__(self, cfg, heads): + self.heads = heads + + self.use_heatmap_max_keypoint = cfg.EXPORT_CAFFE2.USE_HEATMAP_MAX_KEYPOINT + + @contextlib.contextmanager + def mock_roi_heads(self, tensor_mode=True): + """ + Patching several inference functions inside ROIHeads and its subclasses + + Args: + tensor_mode (bool): whether the inputs/outputs are caffe2's tensor + format or not. Default to True. + """ + # NOTE: this requries the `keypoint_rcnn_inference` and `mask_rcnn_inference` + # are called inside the same file as BaseXxxHead due to using mock.patch. + kpt_heads_mod = keypoint_head.BaseKeypointRCNNHead.__module__ + mask_head_mod = mask_head.BaseMaskRCNNHead.__module__ + + mock_ctx_managers = [ + mock_fastrcnn_outputs_inference( + tensor_mode=tensor_mode, + check=True, + box_predictor_type=type(self.heads.box_predictor), + ) + ] + if getattr(self.heads, "keypoint_on", False): + mock_ctx_managers += [ + mock_keypoint_rcnn_inference( + tensor_mode, kpt_heads_mod, self.use_heatmap_max_keypoint + ) + ] + if getattr(self.heads, "mask_on", False): + mock_ctx_managers += [mock_mask_rcnn_inference(tensor_mode, mask_head_mod)] + + with contextlib.ExitStack() as stack: # python 3.3+ + for mgr in mock_ctx_managers: + stack.enter_context(mgr) + yield diff --git a/preprocess/mhp_extension/detectron2/detectron2/export/shared.py b/preprocess/mhp_extension/detectron2/detectron2/export/shared.py new file mode 100644 index 0000000000000000000000000000000000000000..cb7ffeb098f21178660572830164126fab63e0e1 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/export/shared.py @@ -0,0 +1,1034 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +import collections +import contextlib +import copy +import functools +import logging +import mock +import numpy as np +import os +from typing import Any, Callable, Dict, List, Optional, Tuple, Union +import caffe2.python.utils as putils +import torch +import torch.nn.functional as F +from caffe2.proto import caffe2_pb2 +from caffe2.python import core, net_drawer, workspace +from torch.nn.functional import interpolate as interp + +logger = logging.getLogger(__name__) + + +# ==== torch/utils_toffee/cast.py ======================================= + + +def to_device(t, device_str): + """ + This function is a replacement of .to(another_device) such that it allows the + casting to be traced properly by explicitly calling the underlying copy ops. + It also avoids introducing unncessary op when casting to the same device. + """ + src = t.device + dst = torch.device(device_str) + + if src == dst: + return t + elif src.type == "cuda" and dst.type == "cpu": + return torch.ops._caffe2.CopyGPUToCPU(t) + elif src.type == "cpu" and dst.type == "cuda": + return torch.ops._caffe2.CopyCPUToGPU(t) + else: + raise RuntimeError("Can't cast tensor from device {} to device {}".format(src, dst)) + + +# ==== torch/utils_toffee/interpolate.py ======================================= + + +# Note: borrowed from vision/detection/fair/detectron/detectron/modeling/detector.py +def BilinearInterpolation(tensor_in, up_scale): + assert up_scale % 2 == 0, "Scale should be even" + + def upsample_filt(size): + factor = (size + 1) // 2 + if size % 2 == 1: + center = factor - 1 + else: + center = factor - 0.5 + + og = np.ogrid[:size, :size] + return (1 - abs(og[0] - center) / factor) * (1 - abs(og[1] - center) / factor) + + kernel_size = int(up_scale) * 2 + bil_filt = upsample_filt(kernel_size) + + dim = int(tensor_in.shape[1]) + kernel = np.zeros((dim, dim, kernel_size, kernel_size), dtype=np.float32) + kernel[range(dim), range(dim), :, :] = bil_filt + + tensor_out = F.conv_transpose2d( + tensor_in, + weight=to_device(torch.Tensor(kernel), tensor_in.device), + bias=None, + stride=int(up_scale), + padding=int(up_scale / 2), + ) + + return tensor_out + + +# NOTE: ONNX is incompatible with traced torch.nn.functional.interpolate if +# using dynamic `scale_factor` rather than static `size`. (T43166860) +# NOTE: Caffe2 Int8 conversion might not be able to quantize `size` properly. +def onnx_compatibale_interpolate( + input, size=None, scale_factor=None, mode="nearest", align_corners=None +): + # NOTE: The input dimensions are interpreted in the form: + # `mini-batch x channels x [optional depth] x [optional height] x width`. + if size is None and scale_factor is not None: + if input.dim() == 4: + if isinstance(scale_factor, (int, float)): + height_scale, width_scale = (scale_factor, scale_factor) + else: + assert isinstance(scale_factor, (tuple, list)) + assert len(scale_factor) == 2 + height_scale, width_scale = scale_factor + + assert not align_corners, "No matching C2 op for align_corners == True" + if mode == "nearest": + return torch.ops._caffe2.ResizeNearest( + input, order="NCHW", width_scale=width_scale, height_scale=height_scale + ) + elif mode == "bilinear": + logger.warning( + "Use F.conv_transpose2d for bilinear interpolate" + " because there's no such C2 op, this may cause significant" + " slowdown and the boundary pixels won't be as same as" + " using F.interpolate due to padding." + ) + assert height_scale == width_scale + return BilinearInterpolation(input, up_scale=height_scale) + logger.warning("Output size is not static, it might cause ONNX conversion issue") + + return interp(input, size, scale_factor, mode, align_corners) + + +@contextlib.contextmanager +def mock_torch_nn_functional_interpolate(): + if torch.onnx.is_in_onnx_export(): + with mock.patch( + "torch.nn.functional.interpolate", side_effect=onnx_compatibale_interpolate + ): + yield + else: + yield + + +# ==== torch/utils_caffe2/ws_utils.py ========================================== + + +class ScopedWS(object): + def __init__(self, ws_name, is_reset, is_cleanup=False): + self.ws_name = ws_name + self.is_reset = is_reset + self.is_cleanup = is_cleanup + self.org_ws = "" + + def __enter__(self): + self.org_ws = workspace.CurrentWorkspace() + if self.ws_name is not None: + workspace.SwitchWorkspace(self.ws_name, True) + if self.is_reset: + workspace.ResetWorkspace() + + return workspace + + def __exit__(self, *args): + if self.is_cleanup: + workspace.ResetWorkspace() + if self.ws_name is not None: + workspace.SwitchWorkspace(self.org_ws) + + +def fetch_any_blob(name): + bb = None + try: + bb = workspace.FetchBlob(name) + except TypeError: + bb = workspace.FetchInt8Blob(name) + except Exception as e: + logger.error("Get blob {} error: {}".format(name, e)) + + return bb + + +# ==== torch/utils_caffe2/protobuf.py ========================================== + + +def get_pb_arg(pb, arg_name): + for x in pb.arg: + if x.name == arg_name: + return x + return None + + +def get_pb_arg_valf(pb, arg_name, default_val): + arg = get_pb_arg(pb, arg_name) + return arg.f if arg is not None else default_val + + +def get_pb_arg_floats(pb, arg_name, default_val): + arg = get_pb_arg(pb, arg_name) + return list(map(float, arg.floats)) if arg is not None else default_val + + +def get_pb_arg_ints(pb, arg_name, default_val): + arg = get_pb_arg(pb, arg_name) + return list(map(int, arg.ints)) if arg is not None else default_val + + +def get_pb_arg_vali(pb, arg_name, default_val): + arg = get_pb_arg(pb, arg_name) + return arg.i if arg is not None else default_val + + +def get_pb_arg_vals(pb, arg_name, default_val): + arg = get_pb_arg(pb, arg_name) + return arg.s if arg is not None else default_val + + +def get_pb_arg_valstrings(pb, arg_name, default_val): + arg = get_pb_arg(pb, arg_name) + return list(arg.strings) if arg is not None else default_val + + +def check_set_pb_arg(pb, arg_name, arg_attr, arg_value, allow_override=False): + arg = get_pb_arg(pb, arg_name) + if arg is None: + arg = putils.MakeArgument(arg_name, arg_value) + assert hasattr(arg, arg_attr) + pb.arg.extend([arg]) + if allow_override and getattr(arg, arg_attr) != arg_value: + logger.warning( + "Override argument {}: {} -> {}".format(arg_name, getattr(arg, arg_attr), arg_value) + ) + setattr(arg, arg_attr, arg_value) + else: + assert arg is not None + assert getattr(arg, arg_attr) == arg_value, "Existing value {}, new value {}".format( + getattr(arg, arg_attr), arg_value + ) + + +def _create_const_fill_op_from_numpy(name, tensor, device_option=None): + assert type(tensor) == np.ndarray + kTypeNameMapper = { + np.dtype("float32"): "GivenTensorFill", + np.dtype("int32"): "GivenTensorIntFill", + np.dtype("int64"): "GivenTensorInt64Fill", + np.dtype("uint8"): "GivenTensorStringFill", + } + + args_dict = {} + if tensor.dtype == np.dtype("uint8"): + args_dict.update({"values": [str(tensor.data)], "shape": [1]}) + else: + args_dict.update({"values": tensor, "shape": tensor.shape}) + + if device_option is not None: + args_dict["device_option"] = device_option + + return core.CreateOperator(kTypeNameMapper[tensor.dtype], [], [name], **args_dict) + + +def _create_const_fill_op_from_c2_int8_tensor(name, int8_tensor): + assert type(int8_tensor) == workspace.Int8Tensor + kTypeNameMapper = { + np.dtype("int32"): "Int8GivenIntTensorFill", + np.dtype("uint8"): "Int8GivenTensorFill", + } + + tensor = int8_tensor.data + assert tensor.dtype in [np.dtype("uint8"), np.dtype("int32")] + values = tensor.tobytes() if tensor.dtype == np.dtype("uint8") else tensor + + return core.CreateOperator( + kTypeNameMapper[tensor.dtype], + [], + [name], + values=values, + shape=tensor.shape, + Y_scale=int8_tensor.scale, + Y_zero_point=int8_tensor.zero_point, + ) + + +def create_const_fill_op( + name: str, + blob: Union[np.ndarray, workspace.Int8Tensor], + device_option: Optional[caffe2_pb2.DeviceOption] = None, +) -> caffe2_pb2.OperatorDef: + """ + Given a blob object, return the Caffe2 operator that creates this blob + as constant. Currently support NumPy tensor and Caffe2 Int8Tensor. + """ + + tensor_type = type(blob) + assert tensor_type in [ + np.ndarray, + workspace.Int8Tensor, + ], 'Error when creating const fill op for "{}", unsupported blob type: {}'.format( + name, type(blob) + ) + + if tensor_type == np.ndarray: + return _create_const_fill_op_from_numpy(name, blob, device_option) + elif tensor_type == workspace.Int8Tensor: + assert device_option is None + return _create_const_fill_op_from_c2_int8_tensor(name, blob) + + +def construct_init_net_from_params( + params: Dict[str, Any], device_options: Optional[Dict[str, caffe2_pb2.DeviceOption]] = None +) -> caffe2_pb2.NetDef: + """ + Construct the init_net from params dictionary + """ + init_net = caffe2_pb2.NetDef() + device_options = device_options or {} + for name, blob in params.items(): + if isinstance(blob, str): + logger.warning( + ( + "Blob {} with type {} is not supported in generating init net," + " skipped.".format(name, type(blob)) + ) + ) + continue + init_net.op.extend( + [create_const_fill_op(name, blob, device_option=device_options.get(name, None))] + ) + init_net.external_output.append(name) + return init_net + + +def get_producer_map(ssa): + """ + Return dict from versioned blob to (i, j), + where i is index of producer op, j is the index of output of that op. + """ + producer_map = {} + for i in range(len(ssa)): + outputs = ssa[i][1] + for j, outp in enumerate(outputs): + producer_map[outp] = (i, j) + return producer_map + + +def get_consumer_map(ssa): + """ + Return dict from versioned blob to list of (i, j), + where i is index of consumer op, j is the index of input of that op. + """ + consumer_map = collections.defaultdict(list) + for i in range(len(ssa)): + inputs = ssa[i][0] + for j, inp in enumerate(inputs): + consumer_map[inp].append((i, j)) + return consumer_map + + +def get_params_from_init_net( + init_net: caffe2_pb2.NetDef, +) -> [Dict[str, Any], Dict[str, caffe2_pb2.DeviceOption]]: + """ + Take the output blobs from init_net by running it. + Outputs: + params: dict from blob name to numpy array + device_options: dict from blob name to the device option of its creating op + """ + # NOTE: this assumes that the params is determined by producer op with the + # only exception be CopyGPUToCPU which is CUDA op but returns CPU tensor. + def _get_device_option(producer_op): + if producer_op.type == "CopyGPUToCPU": + return caffe2_pb2.DeviceOption() + else: + return producer_op.device_option + + with ScopedWS("__get_params_from_init_net__", is_reset=True, is_cleanup=True) as ws: + ws.RunNetOnce(init_net) + params = {b: fetch_any_blob(b) for b in init_net.external_output} + ssa, versions = core.get_ssa(init_net) + producer_map = get_producer_map(ssa) + device_options = { + b: _get_device_option(init_net.op[producer_map[(b, versions[b])][0]]) + for b in init_net.external_output + } + return params, device_options + + +def _updater_raise(op, input_types, output_types): + raise RuntimeError( + "Failed to apply updater for op {} given input_types {} and" + " output_types {}".format(op, input_types, output_types) + ) + + +def _generic_status_identifier( + predict_net: caffe2_pb2.NetDef, + status_updater: Callable, + known_status: Dict[Tuple[str, int], Any], +) -> Dict[Tuple[str, int], Any]: + """ + Statically infer the status of each blob, the status can be such as device type + (CPU/GPU), layout (NCHW/NHWC), data type (float32/int8), etc. "Blob" here + is versioned blob (Tuple[str, int]) in the format compatible with ssa. + Inputs: + predict_net: the caffe2 network + status_updater: a callable, given an op and the status of its input/output, + it returns the updated status of input/output. `None` is used for + representing unknown status. + known_status: a dict containing known status, used as initialization. + Outputs: + A dict mapping from versioned blob to its status + """ + ssa, versions = core.get_ssa(predict_net) + versioned_ext_input = [(b, 0) for b in predict_net.external_input] + versioned_ext_output = [(b, versions[b]) for b in predict_net.external_output] + all_versioned_blobs = set().union(*[set(x[0] + x[1]) for x in ssa]) + + allowed_vbs = all_versioned_blobs.union(versioned_ext_input).union(versioned_ext_output) + assert all(k in allowed_vbs for k in known_status) + assert all(v is not None for v in known_status.values()) + _known_status = copy.deepcopy(known_status) + + def _check_and_update(key, value): + assert value is not None + if key in _known_status: + if not _known_status[key] == value: + raise RuntimeError( + "Confilict status for {}, existing status {}, new status {}".format( + key, _known_status[key], value + ) + ) + _known_status[key] = value + + def _update_i(op, ssa_i): + versioned_inputs = ssa_i[0] + versioned_outputs = ssa_i[1] + + inputs_status = [_known_status.get(b, None) for b in versioned_inputs] + outputs_status = [_known_status.get(b, None) for b in versioned_outputs] + + new_inputs_status, new_outputs_status = status_updater(op, inputs_status, outputs_status) + + for versioned_blob, status in zip( + versioned_inputs + versioned_outputs, new_inputs_status + new_outputs_status + ): + if status is not None: + _check_and_update(versioned_blob, status) + + for op, ssa_i in zip(predict_net.op, ssa): + _update_i(op, ssa_i) + for op, ssa_i in zip(reversed(predict_net.op), reversed(ssa)): + _update_i(op, ssa_i) + + # NOTE: This strictly checks all the blob from predict_net must be assgined + # a known status. However sometimes it's impossible (eg. having deadend op), + # we may relax this constraint if + for k in all_versioned_blobs: + if k not in _known_status: + raise NotImplementedError( + "Can not infer the status for {}. Currently only support the case where" + " a single forward and backward pass can identify status for all blobs.".format(k) + ) + + return _known_status + + +def infer_device_type( + predict_net: caffe2_pb2.NetDef, + known_status: Dict[Tuple[str, int], Any], + device_name_style: str = "caffe2", +) -> Dict[Tuple[str, int], str]: + """ Return the device type ("cpu" or "gpu"/"cuda") of each (versioned) blob """ + + assert device_name_style in ["caffe2", "pytorch"] + _CPU_STR = "cpu" + _GPU_STR = "gpu" if device_name_style == "caffe2" else "cuda" + + def _copy_cpu_to_gpu_updater(op, input_types, output_types): + if input_types[0] == _GPU_STR or output_types[0] == _CPU_STR: + _updater_raise(op, input_types, output_types) + return ([_CPU_STR], [_GPU_STR]) + + def _copy_gpu_to_cpu_updater(op, input_types, output_types): + if input_types[0] == _CPU_STR or output_types[0] == _GPU_STR: + _updater_raise(op, input_types, output_types) + return ([_GPU_STR], [_CPU_STR]) + + def _other_ops_updater(op, input_types, output_types): + non_none_types = [x for x in input_types + output_types if x is not None] + if len(non_none_types) > 0: + the_type = non_none_types[0] + if not all(x == the_type for x in non_none_types): + _updater_raise(op, input_types, output_types) + else: + the_type = None + return ([the_type for _ in op.input], [the_type for _ in op.output]) + + def _device_updater(op, *args, **kwargs): + return { + "CopyCPUToGPU": _copy_cpu_to_gpu_updater, + "CopyGPUToCPU": _copy_gpu_to_cpu_updater, + }.get(op.type, _other_ops_updater)(op, *args, **kwargs) + + return _generic_status_identifier(predict_net, _device_updater, known_status) + + +# ==== torch/utils_caffe2/vis.py =============================================== + + +def _modify_blob_names(ops, blob_rename_f): + ret = [] + + def _replace_list(blob_list, replaced_list): + del blob_list[:] + blob_list.extend(replaced_list) + + for x in ops: + cur = copy.deepcopy(x) + _replace_list(cur.input, list(map(blob_rename_f, cur.input))) + _replace_list(cur.output, list(map(blob_rename_f, cur.output))) + ret.append(cur) + + return ret + + +def _rename_blob(name, blob_sizes, blob_ranges): + def _list_to_str(bsize): + ret = ", ".join([str(x) for x in bsize]) + ret = "[" + ret + "]" + return ret + + ret = name + if blob_sizes is not None and name in blob_sizes: + ret += "\n" + _list_to_str(blob_sizes[name]) + if blob_ranges is not None and name in blob_ranges: + ret += "\n" + _list_to_str(blob_ranges[name]) + + return ret + + +# graph_name could not contain word 'graph' +def save_graph(net, file_name, graph_name="net", op_only=True, blob_sizes=None, blob_ranges=None): + blob_rename_f = functools.partial(_rename_blob, blob_sizes=blob_sizes, blob_ranges=blob_ranges) + return save_graph_base(net, file_name, graph_name, op_only, blob_rename_f) + + +def save_graph_base(net, file_name, graph_name="net", op_only=True, blob_rename_func=None): + graph = None + ops = net.op + if blob_rename_func is not None: + ops = _modify_blob_names(ops, blob_rename_func) + if not op_only: + graph = net_drawer.GetPydotGraph(ops, graph_name, rankdir="TB") + else: + graph = net_drawer.GetPydotGraphMinimal( + ops, graph_name, rankdir="TB", minimal_dependency=True + ) + + try: + par_dir = os.path.dirname(file_name) + if not os.path.exists(par_dir): + os.makedirs(par_dir) + + format = os.path.splitext(os.path.basename(file_name))[-1] + if format == ".png": + graph.write_png(file_name) + elif format == ".pdf": + graph.write_pdf(file_name) + elif format == ".svg": + graph.write_svg(file_name) + else: + print("Incorrect format {}".format(format)) + except Exception as e: + print("Error when writing graph to image {}".format(e)) + + return graph + + +# ==== torch/utils_toffee/aten_to_caffe2.py ==================================== + + +def group_norm_replace_aten_with_caffe2(predict_net: caffe2_pb2.NetDef): + """ + For ONNX exported model, GroupNorm will be represented as ATen op, + this can be a drop in replacement from ATen to GroupNorm + """ + count = 0 + for op in predict_net.op: + if op.type == "ATen": + op_name = get_pb_arg_vals(op, "operator", None) # return byte in py3 + if op_name and op_name.decode() == "group_norm": + op.arg.remove(get_pb_arg(op, "operator")) + + if get_pb_arg_vali(op, "cudnn_enabled", None): + op.arg.remove(get_pb_arg(op, "cudnn_enabled")) + + num_groups = get_pb_arg_vali(op, "num_groups", None) + if num_groups is not None: + op.arg.remove(get_pb_arg(op, "num_groups")) + check_set_pb_arg(op, "group", "i", num_groups) + + op.type = "GroupNorm" + count += 1 + if count > 1: + logger.info("Replaced {} ATen operator to GroupNormOp".format(count)) + + +# ==== torch/utils_toffee/alias.py ============================================= + + +def alias(x, name, is_backward=False): + if not torch.onnx.is_in_onnx_export(): + return x + assert isinstance(x, torch.Tensor) + return torch.ops._caffe2.AliasWithName(x, name, is_backward=is_backward) + + +def fuse_alias_placeholder(predict_net, init_net): + """ Remove AliasWithName placeholder and rename the input/output of it """ + # First we finish all the re-naming + for i, op in enumerate(predict_net.op): + if op.type == "AliasWithName": + assert len(op.input) == 1 + assert len(op.output) == 1 + name = get_pb_arg_vals(op, "name", None).decode() + is_backward = bool(get_pb_arg_vali(op, "is_backward", 0)) + rename_op_input(predict_net, init_net, i, 0, name, from_producer=is_backward) + rename_op_output(predict_net, i, 0, name) + + # Remove AliasWithName, should be very safe since it's a non-op + new_ops = [] + for op in predict_net.op: + if op.type != "AliasWithName": + new_ops.append(op) + else: + # safety check + assert op.input == op.output + assert op.input[0] == op.arg[0].s.decode() + del predict_net.op[:] + predict_net.op.extend(new_ops) + + +# ==== torch/utils_caffe2/graph_transform.py =================================== + + +class IllegalGraphTransformError(ValueError): + """ When a graph transform function call can't be executed. """ + + +def _rename_versioned_blob_in_proto( + proto: caffe2_pb2.NetDef, + old_name: str, + new_name: str, + version: int, + ssa: List[Tuple[List[Tuple[str, int]], List[Tuple[str, int]]]], + start_versions: Dict[str, int], + end_versions: Dict[str, int], +): + """ In given proto, rename all blobs with matched version """ + # Operater list + for op, i_th_ssa in zip(proto.op, ssa): + versioned_inputs, versioned_outputs = i_th_ssa + for i in range(len(op.input)): + if versioned_inputs[i] == (old_name, version): + op.input[i] = new_name + for i in range(len(op.output)): + if versioned_outputs[i] == (old_name, version): + op.output[i] = new_name + # external_input + if start_versions.get(old_name, 0) == version: + for i in range(len(proto.external_input)): + if proto.external_input[i] == old_name: + proto.external_input[i] = new_name + # external_output + if end_versions.get(old_name, 0) == version: + for i in range(len(proto.external_output)): + if proto.external_output[i] == old_name: + proto.external_output[i] = new_name + + +def rename_op_input( + predict_net: caffe2_pb2.NetDef, + init_net: caffe2_pb2.NetDef, + op_id: int, + input_id: int, + new_name: str, + from_producer: bool = False, +): + """ + Rename the op_id-th operator in predict_net, change it's input_id-th input's + name to the new_name. It also does automatic re-route and change + external_input and init_net if necessary. + - It requires the input is only consumed by this op. + - This function modifies predict_net and init_net in-place. + - When from_producer is enable, this also updates other operators that consumes + the same input. Be cautious because may trigger unintended behavior. + """ + assert isinstance(predict_net, caffe2_pb2.NetDef) + assert isinstance(init_net, caffe2_pb2.NetDef) + + init_net_ssa, init_net_versions = core.get_ssa(init_net) + predict_net_ssa, predict_net_versions = core.get_ssa( + predict_net, copy.deepcopy(init_net_versions) + ) + + versioned_inputs, versioned_outputs = predict_net_ssa[op_id] + old_name, version = versioned_inputs[input_id] + + if from_producer: + producer_map = get_producer_map(predict_net_ssa) + if not (old_name, version) in producer_map: + raise NotImplementedError( + "Can't find producer, the input {} is probably from" + " init_net, this is not supported yet.".format(old_name) + ) + producer = producer_map[(old_name, version)] + rename_op_output(predict_net, producer[0], producer[1], new_name) + return + + def contain_targets(op_ssa): + return (old_name, version) in op_ssa[0] + + is_consumer = [contain_targets(op_ssa) for op_ssa in predict_net_ssa] + if sum(is_consumer) > 1: + raise IllegalGraphTransformError( + ( + "Input '{}' of operator(#{}) are consumed by other ops, please use" + + " rename_op_output on the producer instead. Offending op: \n{}" + ).format(old_name, op_id, predict_net.op[op_id]) + ) + + # update init_net + _rename_versioned_blob_in_proto( + init_net, old_name, new_name, version, init_net_ssa, {}, init_net_versions + ) + # update predict_net + _rename_versioned_blob_in_proto( + predict_net, + old_name, + new_name, + version, + predict_net_ssa, + init_net_versions, + predict_net_versions, + ) + + +def rename_op_output(predict_net: caffe2_pb2.NetDef, op_id: int, output_id: int, new_name: str): + """ + Rename the op_id-th operator in predict_net, change it's output_id-th input's + name to the new_name. It also does automatic re-route and change + external_output and if necessary. + - It allows multiple consumers of its output. + - This function modifies predict_net in-place, doesn't need init_net. + """ + assert isinstance(predict_net, caffe2_pb2.NetDef) + + ssa, blob_versions = core.get_ssa(predict_net) + + versioned_inputs, versioned_outputs = ssa[op_id] + old_name, version = versioned_outputs[output_id] + + # update predict_net + _rename_versioned_blob_in_proto( + predict_net, old_name, new_name, version, ssa, {}, blob_versions + ) + + +def get_sub_graph_external_input_output( + predict_net: caffe2_pb2.NetDef, sub_graph_op_indices: List[int] +) -> Tuple[List[Tuple[str, int]], List[Tuple[str, int]]]: + """ + Return the list of external input/output of sub-graph, + each element is tuple of the name and corresponding version in predict_net. + + external input/output is defined the same way as caffe2 NetDef. + """ + ssa, versions = core.get_ssa(predict_net) + + all_inputs = [] + all_outputs = [] + for op_id in sub_graph_op_indices: + all_inputs += [inp for inp in ssa[op_id][0] if inp not in all_inputs] + all_outputs += list(ssa[op_id][1]) # ssa output won't repeat + + # for versioned blobs, external inputs are just those blob in all_inputs + # but not in all_outputs + ext_inputs = [inp for inp in all_inputs if inp not in all_outputs] + + # external outputs are essentially outputs of this subgraph that are used + # outside of this sub-graph (including predict_net.external_output) + all_other_inputs = sum( + (ssa[i][0] for i in range(len(ssa)) if i not in sub_graph_op_indices), + [(outp, versions[outp]) for outp in predict_net.external_output], + ) + ext_outputs = [outp for outp in all_outputs if outp in set(all_other_inputs)] + + return ext_inputs, ext_outputs + + +class DiGraph: + """ A DAG representation of caffe2 graph, each vertice is a versioned blob. """ + + def __init__(self): + self.vertices = set() + self.graph = collections.defaultdict(list) + + def add_edge(self, u, v): + self.graph[u].append(v) + self.vertices.add(u) + self.vertices.add(v) + + # grab from https://www.geeksforgeeks.org/find-paths-given-source-destination/ + def get_all_paths(self, s, d): + visited = {k: False for k in self.vertices} + path = [] + all_paths = [] + + def _get_all_paths_util(graph, u, d, visited, path): + visited[u] = True + path.append(u) + if u == d: + all_paths.append(copy.deepcopy(path)) + else: + for i in graph[u]: + if not visited[i]: + _get_all_paths_util(graph, i, d, visited, path) + path.pop() + visited[u] = False + + _get_all_paths_util(self.graph, s, d, visited, path) + return all_paths + + @staticmethod + def from_ssa(ssa): + graph = DiGraph() + for op_id in range(len(ssa)): + for inp in ssa[op_id][0]: + for outp in ssa[op_id][1]: + graph.add_edge(inp, outp) + return graph + + +def _get_dependency_chain(ssa, versioned_target, versioned_source): + """ + Return the index list of relevant operator to produce target blob from source blob, + if there's no dependency, return empty list. + """ + + # finding all paths between nodes can be O(N!), thus we can only search + # in the subgraph using the op starting from the first consumer of source blob + # to the producer of the target blob. + consumer_map = get_consumer_map(ssa) + producer_map = get_producer_map(ssa) + start_op = min(x[0] for x in consumer_map[versioned_source]) - 15 + end_op = ( + producer_map[versioned_target][0] + 15 if versioned_target in producer_map else start_op + ) + sub_graph_ssa = ssa[start_op : end_op + 1] + if len(sub_graph_ssa) > 30: + logger.warning( + "Subgraph bebetween {} and {} is large (from op#{} to op#{}), it" + " might take non-trival time to find all paths between them.".format( + versioned_source, versioned_target, start_op, end_op + ) + ) + + dag = DiGraph.from_ssa(sub_graph_ssa) + paths = dag.get_all_paths(versioned_source, versioned_target) # include two ends + ops_in_paths = [[producer_map[blob][0] for blob in path[1:]] for path in paths] + return sorted(set().union(*[set(ops) for ops in ops_in_paths])) + + +def identify_reshape_sub_graph(predict_net: caffe2_pb2.NetDef) -> List[List[int]]: + """ + Idenfity the reshape sub-graph in a protobuf. + The reshape sub-graph is defined as matching the following pattern: + + (input_blob) -> Op_1 -> ... -> Op_N -> (new_shape) -โ”€โ” + โ””-------------------------------------------> Reshape -> (output_blob) + + Return: + List of sub-graphs, each sub-graph is represented as a list of indices + of the relavent ops, [Op_1, Op_2, ..., Op_N, Reshape] + """ + + ssa, _ = core.get_ssa(predict_net) + + ret = [] + for i, op in enumerate(predict_net.op): + if op.type == "Reshape": + assert len(op.input) == 2 + input_ssa = ssa[i][0] + data_source = input_ssa[0] + shape_source = input_ssa[1] + op_indices = _get_dependency_chain(ssa, shape_source, data_source) + ret.append(op_indices + [i]) + return ret + + +def remove_reshape_for_fc(predict_net, params): + """ + In PyTorch nn.Linear has to take 2D tensor, this often leads to reshape + a 4D tensor to 2D by calling .view(). However this (dynamic) reshaping + doesn't work well with ONNX and Int8 tools, and cause using extra + ops (eg. ExpandDims) that might not be available on mobile. + Luckily Caffe2 supports 4D tensor for FC, so we can remove those reshape + after exporting ONNX model. + """ + from caffe2.python import core + + # find all reshape sub-graph that can be removed, which is now all Reshape + # sub-graph whose output is only consumed by FC. + # TODO: to make it safer, we may need the actually value to better determine + # if a Reshape before FC is removable. + reshape_sub_graphs = identify_reshape_sub_graph(predict_net) + sub_graphs_to_remove = [] + for reshape_sub_graph in reshape_sub_graphs: + reshape_op_id = reshape_sub_graph[-1] + assert predict_net.op[reshape_op_id].type == "Reshape" + ssa, _ = core.get_ssa(predict_net) + reshape_output = ssa[reshape_op_id][1][0] + consumers = [i for i in range(len(ssa)) if reshape_output in ssa[i][0]] + if all(predict_net.op[consumer].type == "FC" for consumer in consumers): + # safety check if the sub-graph is isolated, for this reshape sub-graph, + # it means it has one non-param external input and one external output. + ext_inputs, ext_outputs = get_sub_graph_external_input_output( + predict_net, reshape_sub_graph + ) + non_params_ext_inputs = [inp for inp in ext_inputs if inp[1] != 0] + if len(non_params_ext_inputs) == 1 and len(ext_outputs) == 1: + sub_graphs_to_remove.append(reshape_sub_graph) + + # perform removing subgraph by: + # 1: rename the Reshape's output to its input, then the graph can be + # seen as in-place itentify, meaning whose external input/output are the same. + # 2: simply remove those ops. + remove_op_ids = [] + params_to_remove = [] + for sub_graph in sub_graphs_to_remove: + logger.info( + "Remove Reshape sub-graph:\n{}".format( + "".join(["(#{:>4})\n{}".format(i, predict_net.op[i]) for i in sub_graph]) + ) + ) + reshape_op_id = sub_graph[-1] + new_reshap_output = predict_net.op[reshape_op_id].input[0] + rename_op_output(predict_net, reshape_op_id, 0, new_reshap_output) + ext_inputs, ext_outputs = get_sub_graph_external_input_output(predict_net, sub_graph) + non_params_ext_inputs = [inp for inp in ext_inputs if inp[1] != 0] + params_ext_inputs = [inp for inp in ext_inputs if inp[1] == 0] + assert len(non_params_ext_inputs) == 1 and len(ext_outputs) == 1 + assert ext_outputs[0][0] == non_params_ext_inputs[0][0] + assert ext_outputs[0][1] == non_params_ext_inputs[0][1] + 1 + remove_op_ids.extend(sub_graph) + params_to_remove.extend(params_ext_inputs) + + predict_net = copy.deepcopy(predict_net) + new_ops = [op for i, op in enumerate(predict_net.op) if i not in remove_op_ids] + del predict_net.op[:] + predict_net.op.extend(new_ops) + for versioned_params in params_to_remove: + name = versioned_params[0] + logger.info("Remove params: {} from init_net and predict_net.external_input".format(name)) + del params[name] + predict_net.external_input.remove(name) + + return predict_net, params + + +def fuse_copy_between_cpu_and_gpu(predict_net: caffe2_pb2.NetDef): + """ + In-place fuse extra copy ops between cpu/gpu for the following case: + a -CopyAToB-> b -CopyBToA> c1 -NextOp1-> d1 + -CopyBToA> c2 -NextOp2-> d2 + The fused network will look like: + a -NextOp1-> d1 + -NextOp2-> d2 + """ + + _COPY_OPS = ["CopyCPUToGPU", "CopyGPUToCPU"] + + def _fuse_once(predict_net): + ssa, blob_versions = core.get_ssa(predict_net) + consumer_map = get_consumer_map(ssa) + versioned_external_output = [ + (name, blob_versions[name]) for name in predict_net.external_output + ] + + for op_id, op in enumerate(predict_net.op): + if op.type in _COPY_OPS: + fw_copy_versioned_output = ssa[op_id][1][0] + consumer_ids = [x[0] for x in consumer_map[fw_copy_versioned_output]] + reverse_op_type = _COPY_OPS[1 - _COPY_OPS.index(op.type)] + + is_fusable = ( + len(consumer_ids) > 0 + and fw_copy_versioned_output not in versioned_external_output + and all( + predict_net.op[_op_id].type == reverse_op_type + and ssa[_op_id][1][0] not in versioned_external_output + for _op_id in consumer_ids + ) + ) + + if is_fusable: + for rv_copy_op_id in consumer_ids: + # making each NextOp uses "a" directly and removing Copy ops + rs_copy_versioned_output = ssa[rv_copy_op_id][1][0] + next_op_id, inp_id = consumer_map[rs_copy_versioned_output][0] + predict_net.op[next_op_id].input[inp_id] = op.input[0] + # remove CopyOps + new_ops = [ + op + for i, op in enumerate(predict_net.op) + if i != op_id and i not in consumer_ids + ] + del predict_net.op[:] + predict_net.op.extend(new_ops) + return True + + return False + + # _fuse_once returns False is nothing can be fused + while _fuse_once(predict_net): + pass + + +def remove_dead_end_ops(net_def: caffe2_pb2.NetDef): + """ remove ops if its output is not used or not in external_output """ + ssa, versions = core.get_ssa(net_def) + versioned_external_output = [(name, versions[name]) for name in net_def.external_output] + consumer_map = get_consumer_map(ssa) + removed_op_ids = set() + + def _is_dead_end(versioned_blob): + return not ( + versioned_blob in versioned_external_output + or ( + len(consumer_map[versioned_blob]) > 0 + and all(x[0] not in removed_op_ids for x in consumer_map[versioned_blob]) + ) + ) + + for i, ssa_i in reversed(list(enumerate(ssa))): + versioned_outputs = ssa_i[1] + if all(_is_dead_end(outp) for outp in versioned_outputs): + removed_op_ids.add(i) + + # simply removing those deadend ops should have no effect to external_output + new_ops = [op for i, op in enumerate(net_def.op) if i not in removed_op_ids] + del net_def.op[:] + net_def.op.extend(new_ops) diff --git a/preprocess/mhp_extension/detectron2/detectron2/layers/__init__.py b/preprocess/mhp_extension/detectron2/detectron2/layers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2753739a03659dff5bc5b87f8c8417056d319842 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/layers/__init__.py @@ -0,0 +1,12 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +from .batch_norm import FrozenBatchNorm2d, get_norm, NaiveSyncBatchNorm +from .deform_conv import DeformConv, ModulatedDeformConv +from .mask_ops import paste_masks_in_image +from .nms import batched_nms, batched_nms_rotated, nms, nms_rotated +from .roi_align import ROIAlign, roi_align +from .roi_align_rotated import ROIAlignRotated, roi_align_rotated +from .shape_spec import ShapeSpec +from .wrappers import BatchNorm2d, Conv2d, ConvTranspose2d, cat, interpolate, Linear +from .blocks import CNNBlockBase + +__all__ = [k for k in globals().keys() if not k.startswith("_")] diff --git a/preprocess/mhp_extension/detectron2/detectron2/layers/batch_norm.py b/preprocess/mhp_extension/detectron2/detectron2/layers/batch_norm.py new file mode 100644 index 0000000000000000000000000000000000000000..1339c6eaedfbc65c9604043234b738382d07fd40 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/layers/batch_norm.py @@ -0,0 +1,242 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import logging +import torch +import torch.distributed as dist +from torch import nn +from torch.autograd.function import Function +from torch.nn import functional as F + +from detectron2.utils import comm + +from .wrappers import BatchNorm2d + +TORCH_VERSION = tuple(int(x) for x in torch.__version__.split(".")[:2]) + + +class FrozenBatchNorm2d(nn.Module): + """ + BatchNorm2d where the batch statistics and the affine parameters are fixed. + + It contains non-trainable buffers called + "weight" and "bias", "running_mean", "running_var", + initialized to perform identity transformation. + + The pre-trained backbone models from Caffe2 only contain "weight" and "bias", + which are computed from the original four parameters of BN. + The affine transform `x * weight + bias` will perform the equivalent + computation of `(x - running_mean) / sqrt(running_var) * weight + bias`. + When loading a backbone model from Caffe2, "running_mean" and "running_var" + will be left unchanged as identity transformation. + + Other pre-trained backbone models may contain all 4 parameters. + + The forward is implemented by `F.batch_norm(..., training=False)`. + """ + + _version = 3 + + def __init__(self, num_features, eps=1e-5): + super().__init__() + self.num_features = num_features + self.eps = eps + self.register_buffer("weight", torch.ones(num_features)) + self.register_buffer("bias", torch.zeros(num_features)) + self.register_buffer("running_mean", torch.zeros(num_features)) + self.register_buffer("running_var", torch.ones(num_features) - eps) + + def forward(self, x): + if x.requires_grad: + # When gradients are needed, F.batch_norm will use extra memory + # because its backward op computes gradients for weight/bias as well. + scale = self.weight * (self.running_var + self.eps).rsqrt() + bias = self.bias - self.running_mean * scale + scale = scale.reshape(1, -1, 1, 1) + bias = bias.reshape(1, -1, 1, 1) + return x * scale + bias + else: + # When gradients are not needed, F.batch_norm is a single fused op + # and provide more optimization opportunities. + return F.batch_norm( + x, + self.running_mean, + self.running_var, + self.weight, + self.bias, + training=False, + eps=self.eps, + ) + + def _load_from_state_dict( + self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs + ): + version = local_metadata.get("version", None) + + if version is None or version < 2: + # No running_mean/var in early versions + # This will silent the warnings + if prefix + "running_mean" not in state_dict: + state_dict[prefix + "running_mean"] = torch.zeros_like(self.running_mean) + if prefix + "running_var" not in state_dict: + state_dict[prefix + "running_var"] = torch.ones_like(self.running_var) + + if version is not None and version < 3: + logger = logging.getLogger(__name__) + logger.info("FrozenBatchNorm {} is upgraded to version 3.".format(prefix.rstrip("."))) + # In version < 3, running_var are used without +eps. + state_dict[prefix + "running_var"] -= self.eps + + super()._load_from_state_dict( + state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs + ) + + def __repr__(self): + return "FrozenBatchNorm2d(num_features={}, eps={})".format(self.num_features, self.eps) + + @classmethod + def convert_frozen_batchnorm(cls, module): + """ + Convert BatchNorm/SyncBatchNorm in module into FrozenBatchNorm. + + Args: + module (torch.nn.Module): + + Returns: + If module is BatchNorm/SyncBatchNorm, returns a new module. + Otherwise, in-place convert module and return it. + + Similar to convert_sync_batchnorm in + https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/batchnorm.py + """ + bn_module = nn.modules.batchnorm + bn_module = (bn_module.BatchNorm2d, bn_module.SyncBatchNorm) + res = module + if isinstance(module, bn_module): + res = cls(module.num_features) + if module.affine: + res.weight.data = module.weight.data.clone().detach() + res.bias.data = module.bias.data.clone().detach() + res.running_mean.data = module.running_mean.data + res.running_var.data = module.running_var.data + res.eps = module.eps + else: + for name, child in module.named_children(): + new_child = cls.convert_frozen_batchnorm(child) + if new_child is not child: + res.add_module(name, new_child) + return res + + +def get_norm(norm, out_channels): + """ + Args: + norm (str or callable): either one of BN, SyncBN, FrozenBN, GN; + or a callable that takes a channel number and returns + the normalization layer as a nn.Module. + + Returns: + nn.Module or None: the normalization layer + """ + if isinstance(norm, str): + if len(norm) == 0: + return None + norm = { + "BN": BatchNorm2d, + # Fixed in https://github.com/pytorch/pytorch/pull/36382 + "SyncBN": NaiveSyncBatchNorm if TORCH_VERSION <= (1, 5) else nn.SyncBatchNorm, + "FrozenBN": FrozenBatchNorm2d, + "GN": lambda channels: nn.GroupNorm(32, channels), + # for debugging: + "nnSyncBN": nn.SyncBatchNorm, + "naiveSyncBN": NaiveSyncBatchNorm, + }[norm] + return norm(out_channels) + + +class AllReduce(Function): + @staticmethod + def forward(ctx, input): + input_list = [torch.zeros_like(input) for k in range(dist.get_world_size())] + # Use allgather instead of allreduce since I don't trust in-place operations .. + dist.all_gather(input_list, input, async_op=False) + inputs = torch.stack(input_list, dim=0) + return torch.sum(inputs, dim=0) + + @staticmethod + def backward(ctx, grad_output): + dist.all_reduce(grad_output, async_op=False) + return grad_output + + +class NaiveSyncBatchNorm(BatchNorm2d): + """ + In PyTorch<=1.5, `nn.SyncBatchNorm` has incorrect gradient + when the batch size on each worker is different. + (e.g., when scale augmentation is used, or when it is applied to mask head). + + This is a slower but correct alternative to `nn.SyncBatchNorm`. + + Note: + There isn't a single definition of Sync BatchNorm. + + When ``stats_mode==""``, this module computes overall statistics by using + statistics of each worker with equal weight. The result is true statistics + of all samples (as if they are all on one worker) only when all workers + have the same (N, H, W). This mode does not support inputs with zero batch size. + + When ``stats_mode=="N"``, this module computes overall statistics by weighting + the statistics of each worker by their ``N``. The result is true statistics + of all samples (as if they are all on one worker) only when all workers + have the same (H, W). It is slower than ``stats_mode==""``. + + Even though the result of this module may not be the true statistics of all samples, + it may still be reasonable because it might be preferrable to assign equal weights + to all workers, regardless of their (H, W) dimension, instead of putting larger weight + on larger images. From preliminary experiments, little difference is found between such + a simplified implementation and an accurate computation of overall mean & variance. + """ + + def __init__(self, *args, stats_mode="", **kwargs): + super().__init__(*args, **kwargs) + assert stats_mode in ["", "N"] + self._stats_mode = stats_mode + + def forward(self, input): + if comm.get_world_size() == 1 or not self.training: + return super().forward(input) + + B, C = input.shape[0], input.shape[1] + + mean = torch.mean(input, dim=[0, 2, 3]) + meansqr = torch.mean(input * input, dim=[0, 2, 3]) + + if self._stats_mode == "": + assert B > 0, 'SyncBatchNorm(stats_mode="") does not support zero batch size.' + vec = torch.cat([mean, meansqr], dim=0) + vec = AllReduce.apply(vec) * (1.0 / dist.get_world_size()) + mean, meansqr = torch.split(vec, C) + momentum = self.momentum + else: + if B == 0: + vec = torch.zeros([2 * C + 1], device=mean.device, dtype=mean.dtype) + vec = vec + input.sum() # make sure there is gradient w.r.t input + else: + vec = torch.cat( + [mean, meansqr, torch.ones([1], device=mean.device, dtype=mean.dtype)], dim=0 + ) + vec = AllReduce.apply(vec * B) + + total_batch = vec[-1].detach() + momentum = total_batch.clamp(max=1) * self.momentum # no update if total_batch is 0 + total_batch = torch.max(total_batch, torch.ones_like(total_batch)) # avoid div-by-zero + mean, meansqr, _ = torch.split(vec / total_batch, C) + + var = meansqr - mean * mean + invstd = torch.rsqrt(var + self.eps) + scale = self.weight * invstd + bias = self.bias - mean * scale + scale = scale.reshape(1, -1, 1, 1) + bias = bias.reshape(1, -1, 1, 1) + + self.running_mean += momentum * (mean.detach() - self.running_mean) + self.running_var += momentum * (var.detach() - self.running_var) + return input * scale + bias diff --git a/preprocess/mhp_extension/detectron2/detectron2/layers/blocks.py b/preprocess/mhp_extension/detectron2/detectron2/layers/blocks.py new file mode 100644 index 0000000000000000000000000000000000000000..1d06fec22e472febbc960c49f747acddd2ab7208 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/layers/blocks.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +from torch import nn + +from .batch_norm import FrozenBatchNorm2d + + +class CNNBlockBase(nn.Module): + """ + A CNN block is assumed to have input channels, output channels and a stride. + The input and output of `forward()` method must be NCHW tensors. + The method can perform arbitrary computation but must match the given + channels and stride specification. + + Attribute: + in_channels (int): + out_channels (int): + stride (int): + """ + + def __init__(self, in_channels, out_channels, stride): + """ + The `__init__` method of any subclass should also contain these arguments. + + Args: + in_channels (int): + out_channels (int): + stride (int): + """ + super().__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.stride = stride + + def freeze(self): + """ + Make this block not trainable. + This method sets all parameters to `requires_grad=False`, + and convert all BatchNorm layers to FrozenBatchNorm + + Returns: + the block itself + """ + for p in self.parameters(): + p.requires_grad = False + FrozenBatchNorm2d.convert_frozen_batchnorm(self) + return self diff --git a/preprocess/mhp_extension/detectron2/detectron2/layers/csrc/README.md b/preprocess/mhp_extension/detectron2/detectron2/layers/csrc/README.md new file mode 100644 index 0000000000000000000000000000000000000000..778ed3da0bae89820831bcd8a72ff7b9cad8d4dd --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/layers/csrc/README.md @@ -0,0 +1,7 @@ + + +To add a new Op: + +1. Create a new directory +2. Implement new ops there +3. Delcare its Python interface in `vision.cpp`. diff --git a/preprocess/mhp_extension/detectron2/detectron2/layers/csrc/ROIAlign/ROIAlign.h b/preprocess/mhp_extension/detectron2/detectron2/layers/csrc/ROIAlign/ROIAlign.h new file mode 100644 index 0000000000000000000000000000000000000000..2d95eac6e29d5e5624afbc6c545776d78ebc709c --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/layers/csrc/ROIAlign/ROIAlign.h @@ -0,0 +1,130 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +#pragma once +#include + +namespace detectron2 { + +at::Tensor ROIAlign_forward_cpu( + const at::Tensor& input, + const at::Tensor& rois, + const float spatial_scale, + const int pooled_height, + const int pooled_width, + const int sampling_ratio, + bool aligned); + +at::Tensor ROIAlign_backward_cpu( + const at::Tensor& grad, + const at::Tensor& rois, + const float spatial_scale, + const int pooled_height, + const int pooled_width, + const int batch_size, + const int channels, + const int height, + const int width, + const int sampling_ratio, + bool aligned); + +#ifdef WITH_CUDA +at::Tensor ROIAlign_forward_cuda( + const at::Tensor& input, + const at::Tensor& rois, + const float spatial_scale, + const int pooled_height, + const int pooled_width, + const int sampling_ratio, + bool aligned); + +at::Tensor ROIAlign_backward_cuda( + const at::Tensor& grad, + const at::Tensor& rois, + const float spatial_scale, + const int pooled_height, + const int pooled_width, + const int batch_size, + const int channels, + const int height, + const int width, + const int sampling_ratio, + bool aligned); +#endif + +// Interface for Python +inline at::Tensor ROIAlign_forward( + const at::Tensor& input, + const at::Tensor& rois, + const float spatial_scale, + const int pooled_height, + const int pooled_width, + const int sampling_ratio, + bool aligned) { + if (input.is_cuda()) { +#ifdef WITH_CUDA + return ROIAlign_forward_cuda( + input, + rois, + spatial_scale, + pooled_height, + pooled_width, + sampling_ratio, + aligned); +#else + AT_ERROR("Not compiled with GPU support"); +#endif + } + return ROIAlign_forward_cpu( + input, + rois, + spatial_scale, + pooled_height, + pooled_width, + sampling_ratio, + aligned); +} + +inline at::Tensor ROIAlign_backward( + const at::Tensor& grad, + const at::Tensor& rois, + const float spatial_scale, + const int pooled_height, + const int pooled_width, + const int batch_size, + const int channels, + const int height, + const int width, + const int sampling_ratio, + bool aligned) { + if (grad.is_cuda()) { +#ifdef WITH_CUDA + return ROIAlign_backward_cuda( + grad, + rois, + spatial_scale, + pooled_height, + pooled_width, + batch_size, + channels, + height, + width, + sampling_ratio, + aligned); +#else + AT_ERROR("Not compiled with GPU support"); +#endif + } + return ROIAlign_backward_cpu( + grad, + rois, + spatial_scale, + pooled_height, + pooled_width, + batch_size, + channels, + height, + width, + sampling_ratio, + aligned); +} + +} // namespace detectron2 diff --git a/preprocess/mhp_extension/detectron2/detectron2/layers/csrc/ROIAlign/ROIAlign_cpu.cpp b/preprocess/mhp_extension/detectron2/detectron2/layers/csrc/ROIAlign/ROIAlign_cpu.cpp new file mode 100644 index 0000000000000000000000000000000000000000..52fc83f8140b29de7b2ad3cb490b8cb672959e16 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/layers/csrc/ROIAlign/ROIAlign_cpu.cpp @@ -0,0 +1,508 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +#include +#include "ROIAlign.h" + +namespace { + +// implementation taken from Caffe2 +template +struct PreCalc { + int pos1; + int pos2; + int pos3; + int pos4; + T w1; + T w2; + T w3; + T w4; +}; + +template +void pre_calc_for_bilinear_interpolate( + const int height, + const int width, + const int pooled_height, + const int pooled_width, + const int iy_upper, + const int ix_upper, + T roi_start_h, + T roi_start_w, + T bin_size_h, + T bin_size_w, + int roi_bin_grid_h, + int roi_bin_grid_w, + std::vector>& pre_calc) { + int pre_calc_index = 0; + for (int ph = 0; ph < pooled_height; ph++) { + for (int pw = 0; pw < pooled_width; pw++) { + for (int iy = 0; iy < iy_upper; iy++) { + const T yy = roi_start_h + ph * bin_size_h + + static_cast(iy + .5f) * bin_size_h / + static_cast(roi_bin_grid_h); // e.g., 0.5, 1.5 + for (int ix = 0; ix < ix_upper; ix++) { + const T xx = roi_start_w + pw * bin_size_w + + static_cast(ix + .5f) * bin_size_w / + static_cast(roi_bin_grid_w); + + T x = xx; + T y = yy; + // deal with: inverse elements are out of feature map boundary + if (y < -1.0 || y > height || x < -1.0 || x > width) { + // empty + PreCalc pc; + pc.pos1 = 0; + pc.pos2 = 0; + pc.pos3 = 0; + pc.pos4 = 0; + pc.w1 = 0; + pc.w2 = 0; + pc.w3 = 0; + pc.w4 = 0; + pre_calc[pre_calc_index] = pc; + pre_calc_index += 1; + continue; + } + + if (y <= 0) { + y = 0; + } + if (x <= 0) { + x = 0; + } + + int y_low = (int)y; + int x_low = (int)x; + int y_high; + int x_high; + + if (y_low >= height - 1) { + y_high = y_low = height - 1; + y = (T)y_low; + } else { + y_high = y_low + 1; + } + + if (x_low >= width - 1) { + x_high = x_low = width - 1; + x = (T)x_low; + } else { + x_high = x_low + 1; + } + + T ly = y - y_low; + T lx = x - x_low; + T hy = 1. - ly, hx = 1. - lx; + T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; + + // save weights and indices + PreCalc pc; + pc.pos1 = y_low * width + x_low; + pc.pos2 = y_low * width + x_high; + pc.pos3 = y_high * width + x_low; + pc.pos4 = y_high * width + x_high; + pc.w1 = w1; + pc.w2 = w2; + pc.w3 = w3; + pc.w4 = w4; + pre_calc[pre_calc_index] = pc; + + pre_calc_index += 1; + } + } + } + } +} + +template +void ROIAlignForward( + const int nthreads, + const T* input, + const T& spatial_scale, + const int channels, + const int height, + const int width, + const int pooled_height, + const int pooled_width, + const int sampling_ratio, + const T* rois, + T* output, + bool aligned) { + int n_rois = nthreads / channels / pooled_width / pooled_height; + // (n, c, ph, pw) is an element in the pooled output + // can be parallelized using omp + // #pragma omp parallel for num_threads(32) + for (int n = 0; n < n_rois; n++) { + int index_n = n * channels * pooled_width * pooled_height; + + const T* offset_rois = rois + n * 5; + int roi_batch_ind = offset_rois[0]; + + // Do not use rounding; this implementation detail is critical + T offset = aligned ? (T)0.5 : (T)0.0; + T roi_start_w = offset_rois[1] * spatial_scale - offset; + T roi_start_h = offset_rois[2] * spatial_scale - offset; + T roi_end_w = offset_rois[3] * spatial_scale - offset; + T roi_end_h = offset_rois[4] * spatial_scale - offset; + + T roi_width = roi_end_w - roi_start_w; + T roi_height = roi_end_h - roi_start_h; + if (aligned) { + AT_ASSERTM( + roi_width >= 0 && roi_height >= 0, + "ROIs in ROIAlign cannot have non-negative size!"); + } else { // for backward-compatibility only + roi_width = std::max(roi_width, (T)1.); + roi_height = std::max(roi_height, (T)1.); + } + T bin_size_h = static_cast(roi_height) / static_cast(pooled_height); + T bin_size_w = static_cast(roi_width) / static_cast(pooled_width); + + // We use roi_bin_grid to sample the grid and mimic integral + int roi_bin_grid_h = (sampling_ratio > 0) + ? sampling_ratio + : ceil(roi_height / pooled_height); // e.g., = 2 + int roi_bin_grid_w = + (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); + + // We do average (integral) pooling inside a bin + // When the grid is empty, output zeros == 0/1, instead of NaN. + const T count = std::max(roi_bin_grid_h * roi_bin_grid_w, 1); // e.g. = 4 + + // we want to precalculate indices and weights shared by all channels, + // this is the key point of optimization + std::vector> pre_calc( + roi_bin_grid_h * roi_bin_grid_w * pooled_width * pooled_height); + pre_calc_for_bilinear_interpolate( + height, + width, + pooled_height, + pooled_width, + roi_bin_grid_h, + roi_bin_grid_w, + roi_start_h, + roi_start_w, + bin_size_h, + bin_size_w, + roi_bin_grid_h, + roi_bin_grid_w, + pre_calc); + + for (int c = 0; c < channels; c++) { + int index_n_c = index_n + c * pooled_width * pooled_height; + const T* offset_input = + input + (roi_batch_ind * channels + c) * height * width; + int pre_calc_index = 0; + + for (int ph = 0; ph < pooled_height; ph++) { + for (int pw = 0; pw < pooled_width; pw++) { + int index = index_n_c + ph * pooled_width + pw; + + T output_val = 0.; + for (int iy = 0; iy < roi_bin_grid_h; iy++) { + for (int ix = 0; ix < roi_bin_grid_w; ix++) { + PreCalc pc = pre_calc[pre_calc_index]; + output_val += pc.w1 * offset_input[pc.pos1] + + pc.w2 * offset_input[pc.pos2] + + pc.w3 * offset_input[pc.pos3] + pc.w4 * offset_input[pc.pos4]; + + pre_calc_index += 1; + } + } + output_val /= count; + + output[index] = output_val; + } // for pw + } // for ph + } // for c + } // for n +} + +template +void bilinear_interpolate_gradient( + const int height, + const int width, + T y, + T x, + T& w1, + T& w2, + T& w3, + T& w4, + int& x_low, + int& x_high, + int& y_low, + int& y_high, + const int index /* index for debug only*/) { + // deal with cases that inverse elements are out of feature map boundary + if (y < -1.0 || y > height || x < -1.0 || x > width) { + // empty + w1 = w2 = w3 = w4 = 0.; + x_low = x_high = y_low = y_high = -1; + return; + } + + if (y <= 0) + y = 0; + if (x <= 0) + x = 0; + + y_low = (int)y; + x_low = (int)x; + + if (y_low >= height - 1) { + y_high = y_low = height - 1; + y = (T)y_low; + } else { + y_high = y_low + 1; + } + + if (x_low >= width - 1) { + x_high = x_low = width - 1; + x = (T)x_low; + } else { + x_high = x_low + 1; + } + + T ly = y - y_low; + T lx = x - x_low; + T hy = 1. - ly, hx = 1. - lx; + + // reference in forward + // T v1 = input[y_low * width + x_low]; + // T v2 = input[y_low * width + x_high]; + // T v3 = input[y_high * width + x_low]; + // T v4 = input[y_high * width + x_high]; + // T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + + w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; + + return; +} + +template +inline void add(T* address, const T& val) { + *address += val; +} + +template +void ROIAlignBackward( + const int nthreads, + // may not be contiguous, and should be indexed using n_stride, etc + const T* grad_output, + const T& spatial_scale, + const int channels, + const int height, + const int width, + const int pooled_height, + const int pooled_width, + const int sampling_ratio, + T* grad_input, + const T* rois, + const int n_stride, + const int c_stride, + const int h_stride, + const int w_stride, + bool aligned) { + for (int index = 0; index < nthreads; index++) { + // (n, c, ph, pw) is an element in the pooled output + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + + const T* offset_rois = rois + n * 5; + int roi_batch_ind = offset_rois[0]; + + // Do not use rounding; this implementation detail is critical + T offset = aligned ? (T)0.5 : (T)0.0; + T roi_start_w = offset_rois[1] * spatial_scale - offset; + T roi_start_h = offset_rois[2] * spatial_scale - offset; + T roi_end_w = offset_rois[3] * spatial_scale - offset; + T roi_end_h = offset_rois[4] * spatial_scale - offset; + + T roi_width = roi_end_w - roi_start_w; + T roi_height = roi_end_h - roi_start_h; + if (aligned) { + AT_ASSERTM( + roi_width >= 0 && roi_height >= 0, + "ROIs in ROIAlign do not have non-negative size!"); + } else { // for backward-compatibility only + roi_width = std::max(roi_width, (T)1.); + roi_height = std::max(roi_height, (T)1.); + } + T bin_size_h = static_cast(roi_height) / static_cast(pooled_height); + T bin_size_w = static_cast(roi_width) / static_cast(pooled_width); + + T* offset_grad_input = + grad_input + ((roi_batch_ind * channels + c) * height * width); + + int output_offset = n * n_stride + c * c_stride; + const T* offset_grad_output = grad_output + output_offset; + const T grad_output_this_bin = + offset_grad_output[ph * h_stride + pw * w_stride]; + + // We use roi_bin_grid to sample the grid and mimic integral + int roi_bin_grid_h = (sampling_ratio > 0) + ? sampling_ratio + : ceil(roi_height / pooled_height); // e.g., = 2 + int roi_bin_grid_w = + (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); + + // We do average (integral) pooling inside a bin + const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 + + for (int iy = 0; iy < roi_bin_grid_h; iy++) { + const T y = roi_start_h + ph * bin_size_h + + static_cast(iy + .5f) * bin_size_h / + static_cast(roi_bin_grid_h); // e.g., 0.5, 1.5 + for (int ix = 0; ix < roi_bin_grid_w; ix++) { + const T x = roi_start_w + pw * bin_size_w + + static_cast(ix + .5f) * bin_size_w / + static_cast(roi_bin_grid_w); + + T w1, w2, w3, w4; + int x_low, x_high, y_low, y_high; + + bilinear_interpolate_gradient( + height, + width, + y, + x, + w1, + w2, + w3, + w4, + x_low, + x_high, + y_low, + y_high, + index); + + T g1 = grad_output_this_bin * w1 / count; + T g2 = grad_output_this_bin * w2 / count; + T g3 = grad_output_this_bin * w3 / count; + T g4 = grad_output_this_bin * w4 / count; + + if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) { + // atomic add is not needed for now since it is single threaded + add(offset_grad_input + y_low * width + x_low, static_cast(g1)); + add(offset_grad_input + y_low * width + x_high, static_cast(g2)); + add(offset_grad_input + y_high * width + x_low, static_cast(g3)); + add(offset_grad_input + y_high * width + x_high, static_cast(g4)); + } // if + } // ix + } // iy + } // for +} // ROIAlignBackward + +} // namespace + +namespace detectron2 { + +at::Tensor ROIAlign_forward_cpu( + const at::Tensor& input, + const at::Tensor& rois, + const float spatial_scale, + const int pooled_height, + const int pooled_width, + const int sampling_ratio, + bool aligned) { + AT_ASSERTM(input.device().is_cpu(), "input must be a CPU tensor"); + AT_ASSERTM(rois.device().is_cpu(), "rois must be a CPU tensor"); + + at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2}; + + at::CheckedFrom c = "ROIAlign_forward_cpu"; + at::checkAllSameType(c, {input_t, rois_t}); + + auto num_rois = rois.size(0); + auto channels = input.size(1); + auto height = input.size(2); + auto width = input.size(3); + + at::Tensor output = at::zeros( + {num_rois, channels, pooled_height, pooled_width}, input.options()); + + auto output_size = num_rois * pooled_height * pooled_width * channels; + + if (output.numel() == 0) + return output; + + auto input_ = input.contiguous(), rois_ = rois.contiguous(); + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + input.scalar_type(), "ROIAlign_forward", [&] { + ROIAlignForward( + output_size, + input_.data_ptr(), + spatial_scale, + channels, + height, + width, + pooled_height, + pooled_width, + sampling_ratio, + rois_.data_ptr(), + output.data_ptr(), + aligned); + }); + return output; +} + +at::Tensor ROIAlign_backward_cpu( + const at::Tensor& grad, + const at::Tensor& rois, + const float spatial_scale, + const int pooled_height, + const int pooled_width, + const int batch_size, + const int channels, + const int height, + const int width, + const int sampling_ratio, + bool aligned) { + AT_ASSERTM(grad.device().is_cpu(), "grad must be a CPU tensor"); + AT_ASSERTM(rois.device().is_cpu(), "rois must be a CPU tensor"); + + at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2}; + + at::CheckedFrom c = "ROIAlign_backward_cpu"; + at::checkAllSameType(c, {grad_t, rois_t}); + + at::Tensor grad_input = + at::zeros({batch_size, channels, height, width}, grad.options()); + + // handle possibly empty gradients + if (grad.numel() == 0) { + return grad_input; + } + + // get stride values to ensure indexing into gradients is correct. + int n_stride = grad.stride(0); + int c_stride = grad.stride(1); + int h_stride = grad.stride(2); + int w_stride = grad.stride(3); + + auto rois_ = rois.contiguous(); + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + grad.scalar_type(), "ROIAlign_forward", [&] { + ROIAlignBackward( + grad.numel(), + grad.data_ptr(), + spatial_scale, + channels, + height, + width, + pooled_height, + pooled_width, + sampling_ratio, + grad_input.data_ptr(), + rois_.data_ptr(), + n_stride, + c_stride, + h_stride, + w_stride, + aligned); + }); + return grad_input; +} + +} // namespace detectron2 diff --git a/preprocess/mhp_extension/detectron2/detectron2/layers/csrc/ROIAlign/ROIAlign_cuda.cu b/preprocess/mhp_extension/detectron2/detectron2/layers/csrc/ROIAlign/ROIAlign_cuda.cu new file mode 100644 index 0000000000000000000000000000000000000000..2e05953b03089203d29bc304726afbca7ee5d464 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/layers/csrc/ROIAlign/ROIAlign_cuda.cu @@ -0,0 +1,430 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +#include +#include +#include +#include + +// TODO make it in a common file +#define CUDA_1D_KERNEL_LOOP(i, n) \ + for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ + i += blockDim.x * gridDim.x) + +template +__device__ T bilinear_interpolate( + const T* bottom_data, + const int height, + const int width, + T y, + T x, + const int index /* index for debug only*/) { + // deal with cases that inverse elements are out of feature map boundary + if (y < -1.0 || y > height || x < -1.0 || x > width) { + // empty + return 0; + } + + if (y <= 0) + y = 0; + if (x <= 0) + x = 0; + + int y_low = (int)y; + int x_low = (int)x; + int y_high; + int x_high; + + if (y_low >= height - 1) { + y_high = y_low = height - 1; + y = (T)y_low; + } else { + y_high = y_low + 1; + } + + if (x_low >= width - 1) { + x_high = x_low = width - 1; + x = (T)x_low; + } else { + x_high = x_low + 1; + } + + T ly = y - y_low; + T lx = x - x_low; + T hy = 1. - ly, hx = 1. - lx; + // do bilinear interpolation + T v1 = bottom_data[y_low * width + x_low]; + T v2 = bottom_data[y_low * width + x_high]; + T v3 = bottom_data[y_high * width + x_low]; + T v4 = bottom_data[y_high * width + x_high]; + T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; + + T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + + return val; +} + +template +__global__ void RoIAlignForward( + const int nthreads, + const T* bottom_data, + const T spatial_scale, + const int channels, + const int height, + const int width, + const int pooled_height, + const int pooled_width, + const int sampling_ratio, + const T* bottom_rois, + T* top_data, + bool aligned) { + CUDA_1D_KERNEL_LOOP(index, nthreads) { + // (n, c, ph, pw) is an element in the pooled output + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + + const T* offset_bottom_rois = bottom_rois + n * 5; + int roi_batch_ind = offset_bottom_rois[0]; + + // Do not use rounding; this implementation detail is critical + T offset = aligned ? (T)0.5 : (T)0.0; + T roi_start_w = offset_bottom_rois[1] * spatial_scale - offset; + T roi_start_h = offset_bottom_rois[2] * spatial_scale - offset; + T roi_end_w = offset_bottom_rois[3] * spatial_scale - offset; + T roi_end_h = offset_bottom_rois[4] * spatial_scale - offset; + + T roi_width = roi_end_w - roi_start_w; + T roi_height = roi_end_h - roi_start_h; + if (!aligned) { // for backward-compatibility only + roi_width = max(roi_width, (T)1.); + roi_height = max(roi_height, (T)1.); + } + T bin_size_h = static_cast(roi_height) / static_cast(pooled_height); + T bin_size_w = static_cast(roi_width) / static_cast(pooled_width); + + const T* offset_bottom_data = + bottom_data + (roi_batch_ind * channels + c) * height * width; + + // We use roi_bin_grid to sample the grid and mimic integral + int roi_bin_grid_h = (sampling_ratio > 0) + ? sampling_ratio + : ceil(roi_height / pooled_height); // e.g., = 2 + int roi_bin_grid_w = + (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); + + // We do average (integral) pooling inside a bin + // When the grid is empty, output zeros == 0/1, instead of NaN. + const T count = max(roi_bin_grid_h * roi_bin_grid_w, 1); // e.g. = 4 + + T output_val = 0.; + for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1 + { + const T y = roi_start_h + ph * bin_size_h + + static_cast(iy + .5f) * bin_size_h / + static_cast(roi_bin_grid_h); // e.g., 0.5, 1.5 + for (int ix = 0; ix < roi_bin_grid_w; ix++) { + const T x = roi_start_w + pw * bin_size_w + + static_cast(ix + .5f) * bin_size_w / + static_cast(roi_bin_grid_w); + + T val = bilinear_interpolate( + offset_bottom_data, height, width, y, x, index); + output_val += val; + } + } + output_val /= count; + + top_data[index] = output_val; + } +} + +template +__device__ void bilinear_interpolate_gradient( + const int height, + const int width, + T y, + T x, + T& w1, + T& w2, + T& w3, + T& w4, + int& x_low, + int& x_high, + int& y_low, + int& y_high, + const int index /* index for debug only*/) { + // deal with cases that inverse elements are out of feature map boundary + if (y < -1.0 || y > height || x < -1.0 || x > width) { + // empty + w1 = w2 = w3 = w4 = 0.; + x_low = x_high = y_low = y_high = -1; + return; + } + + if (y <= 0) + y = 0; + if (x <= 0) + x = 0; + + y_low = (int)y; + x_low = (int)x; + + if (y_low >= height - 1) { + y_high = y_low = height - 1; + y = (T)y_low; + } else { + y_high = y_low + 1; + } + + if (x_low >= width - 1) { + x_high = x_low = width - 1; + x = (T)x_low; + } else { + x_high = x_low + 1; + } + + T ly = y - y_low; + T lx = x - x_low; + T hy = 1. - ly, hx = 1. - lx; + + // reference in forward + // T v1 = bottom_data[y_low * width + x_low]; + // T v2 = bottom_data[y_low * width + x_high]; + // T v3 = bottom_data[y_high * width + x_low]; + // T v4 = bottom_data[y_high * width + x_high]; + // T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + + w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; + + return; +} + +template +__global__ void RoIAlignBackwardFeature( + const int nthreads, + const T* top_diff, + const int num_rois, + const T spatial_scale, + const int channels, + const int height, + const int width, + const int pooled_height, + const int pooled_width, + const int sampling_ratio, + T* bottom_diff, + const T* bottom_rois, + bool aligned) { + CUDA_1D_KERNEL_LOOP(index, nthreads) { + // (n, c, ph, pw) is an element in the pooled output + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + + const T* offset_bottom_rois = bottom_rois + n * 5; + int roi_batch_ind = offset_bottom_rois[0]; + + // Do not use rounding; this implementation detail is critical + T offset = aligned ? (T)0.5 : (T)0.0; + T roi_start_w = offset_bottom_rois[1] * spatial_scale - offset; + T roi_start_h = offset_bottom_rois[2] * spatial_scale - offset; + T roi_end_w = offset_bottom_rois[3] * spatial_scale - offset; + T roi_end_h = offset_bottom_rois[4] * spatial_scale - offset; + + T roi_width = roi_end_w - roi_start_w; + T roi_height = roi_end_h - roi_start_h; + if (!aligned) { // for backward-compatibility only + roi_width = max(roi_width, (T)1.); + roi_height = max(roi_height, (T)1.); + } + T bin_size_h = static_cast(roi_height) / static_cast(pooled_height); + T bin_size_w = static_cast(roi_width) / static_cast(pooled_width); + + T* offset_bottom_diff = + bottom_diff + (roi_batch_ind * channels + c) * height * width; + + int top_offset = (n * channels + c) * pooled_height * pooled_width; + const T* offset_top_diff = top_diff + top_offset; + const T top_diff_this_bin = offset_top_diff[ph * pooled_width + pw]; + + // We use roi_bin_grid to sample the grid and mimic integral + int roi_bin_grid_h = (sampling_ratio > 0) + ? sampling_ratio + : ceil(roi_height / pooled_height); // e.g., = 2 + int roi_bin_grid_w = + (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); + + // We do average (integral) pooling inside a bin + const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 + + for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1 + { + const T y = roi_start_h + ph * bin_size_h + + static_cast(iy + .5f) * bin_size_h / + static_cast(roi_bin_grid_h); // e.g., 0.5, 1.5 + for (int ix = 0; ix < roi_bin_grid_w; ix++) { + const T x = roi_start_w + pw * bin_size_w + + static_cast(ix + .5f) * bin_size_w / + static_cast(roi_bin_grid_w); + + T w1, w2, w3, w4; + int x_low, x_high, y_low, y_high; + + bilinear_interpolate_gradient( + height, + width, + y, + x, + w1, + w2, + w3, + w4, + x_low, + x_high, + y_low, + y_high, + index); + + T g1 = top_diff_this_bin * w1 / count; + T g2 = top_diff_this_bin * w2 / count; + T g3 = top_diff_this_bin * w3 / count; + T g4 = top_diff_this_bin * w4 / count; + + if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) { + atomicAdd( + offset_bottom_diff + y_low * width + x_low, static_cast(g1)); + atomicAdd( + offset_bottom_diff + y_low * width + x_high, static_cast(g2)); + atomicAdd( + offset_bottom_diff + y_high * width + x_low, static_cast(g3)); + atomicAdd( + offset_bottom_diff + y_high * width + x_high, static_cast(g4)); + } // if + } // ix + } // iy + } // CUDA_1D_KERNEL_LOOP +} // RoIAlignBackward + +namespace detectron2 { + +at::Tensor ROIAlign_forward_cuda( + const at::Tensor& input, + const at::Tensor& rois, + const float spatial_scale, + const int pooled_height, + const int pooled_width, + const int sampling_ratio, + bool aligned) { + AT_ASSERTM(input.device().is_cuda(), "input must be a CUDA tensor"); + AT_ASSERTM(rois.device().is_cuda(), "rois must be a CUDA tensor"); + at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2}; + + at::CheckedFrom c = "ROIAlign_forward_cuda"; + at::checkAllSameGPU(c, {input_t, rois_t}); + at::checkAllSameType(c, {input_t, rois_t}); + at::cuda::CUDAGuard device_guard(input.device()); + + auto num_rois = rois.size(0); + auto channels = input.size(1); + auto height = input.size(2); + auto width = input.size(3); + + auto output = at::empty( + {num_rois, channels, pooled_height, pooled_width}, input.options()); + auto output_size = num_rois * pooled_height * pooled_width * channels; + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + dim3 grid(std::min( + at::cuda::ATenCeilDiv( + static_cast(output_size), static_cast(512)), + static_cast(4096))); + dim3 block(512); + + if (output.numel() == 0) { + AT_CUDA_CHECK(cudaGetLastError()); + return output; + } + + auto input_ = input.contiguous(), rois_ = rois.contiguous(); + AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "ROIAlign_forward", [&] { + RoIAlignForward<<>>( + output_size, + input_.data_ptr(), + spatial_scale, + channels, + height, + width, + pooled_height, + pooled_width, + sampling_ratio, + rois_.data_ptr(), + output.data_ptr(), + aligned); + }); + cudaDeviceSynchronize(); + AT_CUDA_CHECK(cudaGetLastError()); + return output; +} + +// TODO remove the dependency on input and use instead its sizes -> save memory +at::Tensor ROIAlign_backward_cuda( + const at::Tensor& grad, + const at::Tensor& rois, + const float spatial_scale, + const int pooled_height, + const int pooled_width, + const int batch_size, + const int channels, + const int height, + const int width, + const int sampling_ratio, + bool aligned) { + AT_ASSERTM(grad.device().is_cuda(), "grad must be a CUDA tensor"); + AT_ASSERTM(rois.device().is_cuda(), "rois must be a CUDA tensor"); + + at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2}; + at::CheckedFrom c = "ROIAlign_backward_cuda"; + at::checkAllSameGPU(c, {grad_t, rois_t}); + at::checkAllSameType(c, {grad_t, rois_t}); + at::cuda::CUDAGuard device_guard(grad.device()); + + auto num_rois = rois.size(0); + auto grad_input = + at::zeros({batch_size, channels, height, width}, grad.options()); + + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + dim3 grid(std::min( + at::cuda::ATenCeilDiv( + static_cast(grad.numel()), static_cast(512)), + static_cast(4096))); + dim3 block(512); + + // handle possibly empty gradients + if (grad.numel() == 0) { + AT_CUDA_CHECK(cudaGetLastError()); + return grad_input; + } + + auto grad_ = grad.contiguous(), rois_ = rois.contiguous(); + AT_DISPATCH_FLOATING_TYPES(grad.scalar_type(), "ROIAlign_backward", [&] { + RoIAlignBackwardFeature<<>>( + grad.numel(), + grad_.data_ptr(), + num_rois, + spatial_scale, + channels, + height, + width, + pooled_height, + pooled_width, + sampling_ratio, + grad_input.data_ptr(), + rois_.data_ptr(), + aligned); + }); + AT_CUDA_CHECK(cudaGetLastError()); + return grad_input; +} + +} // namespace detectron2 diff --git a/preprocess/mhp_extension/detectron2/detectron2/layers/csrc/ROIAlignRotated/ROIAlignRotated.h b/preprocess/mhp_extension/detectron2/detectron2/layers/csrc/ROIAlignRotated/ROIAlignRotated.h new file mode 100644 index 0000000000000000000000000000000000000000..a99c8ebddaa4936e26437b42d62e2b8355c655aa --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/layers/csrc/ROIAlignRotated/ROIAlignRotated.h @@ -0,0 +1,115 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +#pragma once +#include + +namespace detectron2 { + +at::Tensor ROIAlignRotated_forward_cpu( + const at::Tensor& input, + const at::Tensor& rois, + const float spatial_scale, + const int pooled_height, + const int pooled_width, + const int sampling_ratio); + +at::Tensor ROIAlignRotated_backward_cpu( + const at::Tensor& grad, + const at::Tensor& rois, + const float spatial_scale, + const int pooled_height, + const int pooled_width, + const int batch_size, + const int channels, + const int height, + const int width, + const int sampling_ratio); + +#ifdef WITH_CUDA +at::Tensor ROIAlignRotated_forward_cuda( + const at::Tensor& input, + const at::Tensor& rois, + const float spatial_scale, + const int pooled_height, + const int pooled_width, + const int sampling_ratio); + +at::Tensor ROIAlignRotated_backward_cuda( + const at::Tensor& grad, + const at::Tensor& rois, + const float spatial_scale, + const int pooled_height, + const int pooled_width, + const int batch_size, + const int channels, + const int height, + const int width, + const int sampling_ratio); +#endif + +// Interface for Python +inline at::Tensor ROIAlignRotated_forward( + const at::Tensor& input, + const at::Tensor& rois, + const float spatial_scale, + const int pooled_height, + const int pooled_width, + const int sampling_ratio) { + if (input.is_cuda()) { +#ifdef WITH_CUDA + return ROIAlignRotated_forward_cuda( + input, + rois, + spatial_scale, + pooled_height, + pooled_width, + sampling_ratio); +#else + AT_ERROR("Not compiled with GPU support"); +#endif + } + return ROIAlignRotated_forward_cpu( + input, rois, spatial_scale, pooled_height, pooled_width, sampling_ratio); +} + +inline at::Tensor ROIAlignRotated_backward( + const at::Tensor& grad, + const at::Tensor& rois, + const float spatial_scale, + const int pooled_height, + const int pooled_width, + const int batch_size, + const int channels, + const int height, + const int width, + const int sampling_ratio) { + if (grad.is_cuda()) { +#ifdef WITH_CUDA + return ROIAlignRotated_backward_cuda( + grad, + rois, + spatial_scale, + pooled_height, + pooled_width, + batch_size, + channels, + height, + width, + sampling_ratio); +#else + AT_ERROR("Not compiled with GPU support"); +#endif + } + return ROIAlignRotated_backward_cpu( + grad, + rois, + spatial_scale, + pooled_height, + pooled_width, + batch_size, + channels, + height, + width, + sampling_ratio); +} + +} // namespace detectron2 diff --git a/preprocess/mhp_extension/detectron2/detectron2/layers/csrc/ROIAlignRotated/ROIAlignRotated_cpu.cpp b/preprocess/mhp_extension/detectron2/detectron2/layers/csrc/ROIAlignRotated/ROIAlignRotated_cpu.cpp new file mode 100644 index 0000000000000000000000000000000000000000..7e5e1ffdccd0e2ced15fa34b4906388d371bffe2 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/layers/csrc/ROIAlignRotated/ROIAlignRotated_cpu.cpp @@ -0,0 +1,522 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +#include +#include "ROIAlignRotated.h" + +// Note: this implementation originates from the Caffe2 ROIAlignRotated Op +// and PyTorch ROIAlign (non-rotated) Op implementations. +// The key difference between this implementation and those ones is +// we don't do "legacy offset" in this version, as there aren't many previous +// works, if any, using the "legacy" ROIAlignRotated Op. +// This would make the interface a bit cleaner. + +namespace detectron2 { + +namespace { +template +struct PreCalc { + int pos1; + int pos2; + int pos3; + int pos4; + T w1; + T w2; + T w3; + T w4; +}; + +template +void pre_calc_for_bilinear_interpolate( + const int height, + const int width, + const int pooled_height, + const int pooled_width, + const int iy_upper, + const int ix_upper, + T roi_start_h, + T roi_start_w, + T bin_size_h, + T bin_size_w, + int roi_bin_grid_h, + int roi_bin_grid_w, + T roi_center_h, + T roi_center_w, + T cos_theta, + T sin_theta, + std::vector>& pre_calc) { + int pre_calc_index = 0; + for (int ph = 0; ph < pooled_height; ph++) { + for (int pw = 0; pw < pooled_width; pw++) { + for (int iy = 0; iy < iy_upper; iy++) { + const T yy = roi_start_h + ph * bin_size_h + + static_cast(iy + .5f) * bin_size_h / + static_cast(roi_bin_grid_h); // e.g., 0.5, 1.5 + for (int ix = 0; ix < ix_upper; ix++) { + const T xx = roi_start_w + pw * bin_size_w + + static_cast(ix + .5f) * bin_size_w / + static_cast(roi_bin_grid_w); + + // Rotate by theta around the center and translate + // In image space, (y, x) is the order for Right Handed System, + // and this is essentially multiplying the point by a rotation matrix + // to rotate it counterclockwise through angle theta. + T y = yy * cos_theta - xx * sin_theta + roi_center_h; + T x = yy * sin_theta + xx * cos_theta + roi_center_w; + // deal with: inverse elements are out of feature map boundary + if (y < -1.0 || y > height || x < -1.0 || x > width) { + // empty + PreCalc pc; + pc.pos1 = 0; + pc.pos2 = 0; + pc.pos3 = 0; + pc.pos4 = 0; + pc.w1 = 0; + pc.w2 = 0; + pc.w3 = 0; + pc.w4 = 0; + pre_calc[pre_calc_index] = pc; + pre_calc_index += 1; + continue; + } + + if (y < 0) { + y = 0; + } + if (x < 0) { + x = 0; + } + + int y_low = (int)y; + int x_low = (int)x; + int y_high; + int x_high; + + if (y_low >= height - 1) { + y_high = y_low = height - 1; + y = (T)y_low; + } else { + y_high = y_low + 1; + } + + if (x_low >= width - 1) { + x_high = x_low = width - 1; + x = (T)x_low; + } else { + x_high = x_low + 1; + } + + T ly = y - y_low; + T lx = x - x_low; + T hy = 1. - ly, hx = 1. - lx; + T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; + + // save weights and indices + PreCalc pc; + pc.pos1 = y_low * width + x_low; + pc.pos2 = y_low * width + x_high; + pc.pos3 = y_high * width + x_low; + pc.pos4 = y_high * width + x_high; + pc.w1 = w1; + pc.w2 = w2; + pc.w3 = w3; + pc.w4 = w4; + pre_calc[pre_calc_index] = pc; + + pre_calc_index += 1; + } + } + } + } +} + +template +void bilinear_interpolate_gradient( + const int height, + const int width, + T y, + T x, + T& w1, + T& w2, + T& w3, + T& w4, + int& x_low, + int& x_high, + int& y_low, + int& y_high) { + // deal with cases that inverse elements are out of feature map boundary + if (y < -1.0 || y > height || x < -1.0 || x > width) { + // empty + w1 = w2 = w3 = w4 = 0.; + x_low = x_high = y_low = y_high = -1; + return; + } + + if (y < 0) { + y = 0; + } + + if (x < 0) { + x = 0; + } + + y_low = (int)y; + x_low = (int)x; + + if (y_low >= height - 1) { + y_high = y_low = height - 1; + y = (T)y_low; + } else { + y_high = y_low + 1; + } + + if (x_low >= width - 1) { + x_high = x_low = width - 1; + x = (T)x_low; + } else { + x_high = x_low + 1; + } + + T ly = y - y_low; + T lx = x - x_low; + T hy = 1. - ly, hx = 1. - lx; + + // reference in forward + // T v1 = input[y_low * width + x_low]; + // T v2 = input[y_low * width + x_high]; + // T v3 = input[y_high * width + x_low]; + // T v4 = input[y_high * width + x_high]; + // T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + + w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; + + return; +} + +template +inline void add(T* address, const T& val) { + *address += val; +} + +} // namespace + +template +void ROIAlignRotatedForward( + const int nthreads, + const T* input, + const T& spatial_scale, + const int channels, + const int height, + const int width, + const int pooled_height, + const int pooled_width, + const int sampling_ratio, + const T* rois, + T* output) { + int n_rois = nthreads / channels / pooled_width / pooled_height; + // (n, c, ph, pw) is an element in the pooled output + // can be parallelized using omp + // #pragma omp parallel for num_threads(32) + for (int n = 0; n < n_rois; n++) { + int index_n = n * channels * pooled_width * pooled_height; + + const T* current_roi = rois + n * 6; + int roi_batch_ind = current_roi[0]; + + // Do not use rounding; this implementation detail is critical + // ROIAlignRotated supports align == true, i.e., continuous coordinate + // by default, thus the 0.5 offset + T offset = (T)0.5; + T roi_center_w = current_roi[1] * spatial_scale - offset; + T roi_center_h = current_roi[2] * spatial_scale - offset; + T roi_width = current_roi[3] * spatial_scale; + T roi_height = current_roi[4] * spatial_scale; + T theta = current_roi[5] * M_PI / 180.0; + T cos_theta = cos(theta); + T sin_theta = sin(theta); + + AT_ASSERTM( + roi_width >= 0 && roi_height >= 0, + "ROIs in ROIAlignRotated do not have non-negative size!"); + + T bin_size_h = static_cast(roi_height) / static_cast(pooled_height); + T bin_size_w = static_cast(roi_width) / static_cast(pooled_width); + + // We use roi_bin_grid to sample the grid and mimic integral + int roi_bin_grid_h = (sampling_ratio > 0) + ? sampling_ratio + : ceil(roi_height / pooled_height); // e.g., = 2 + int roi_bin_grid_w = + (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); + + // We do average (integral) pooling inside a bin + const T count = std::max(roi_bin_grid_h * roi_bin_grid_w, 1); // e.g. = 4 + + // we want to precalculate indices and weights shared by all channels, + // this is the key point of optimization + std::vector> pre_calc( + roi_bin_grid_h * roi_bin_grid_w * pooled_width * pooled_height); + + // roi_start_h and roi_start_w are computed wrt the center of RoI (x, y). + // Appropriate translation needs to be applied after. + T roi_start_h = -roi_height / 2.0; + T roi_start_w = -roi_width / 2.0; + + pre_calc_for_bilinear_interpolate( + height, + width, + pooled_height, + pooled_width, + roi_bin_grid_h, + roi_bin_grid_w, + roi_start_h, + roi_start_w, + bin_size_h, + bin_size_w, + roi_bin_grid_h, + roi_bin_grid_w, + roi_center_h, + roi_center_w, + cos_theta, + sin_theta, + pre_calc); + + for (int c = 0; c < channels; c++) { + int index_n_c = index_n + c * pooled_width * pooled_height; + const T* offset_input = + input + (roi_batch_ind * channels + c) * height * width; + int pre_calc_index = 0; + + for (int ph = 0; ph < pooled_height; ph++) { + for (int pw = 0; pw < pooled_width; pw++) { + int index = index_n_c + ph * pooled_width + pw; + + T output_val = 0.; + for (int iy = 0; iy < roi_bin_grid_h; iy++) { + for (int ix = 0; ix < roi_bin_grid_w; ix++) { + PreCalc pc = pre_calc[pre_calc_index]; + output_val += pc.w1 * offset_input[pc.pos1] + + pc.w2 * offset_input[pc.pos2] + + pc.w3 * offset_input[pc.pos3] + pc.w4 * offset_input[pc.pos4]; + + pre_calc_index += 1; + } + } + output_val /= count; + + output[index] = output_val; + } // for pw + } // for ph + } // for c + } // for n +} + +template +void ROIAlignRotatedBackward( + const int nthreads, + // may not be contiguous. should index using n_stride, etc + const T* grad_output, + const T& spatial_scale, + const int channels, + const int height, + const int width, + const int pooled_height, + const int pooled_width, + const int sampling_ratio, + T* grad_input, + const T* rois, + const int n_stride, + const int c_stride, + const int h_stride, + const int w_stride) { + for (int index = 0; index < nthreads; index++) { + // (n, c, ph, pw) is an element in the pooled output + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + + const T* current_roi = rois + n * 6; + int roi_batch_ind = current_roi[0]; + + // Do not use rounding; this implementation detail is critical + // ROIAlignRotated supports align == true, i.e., continuous coordinate + // by default, thus the 0.5 offset + T offset = (T)0.5; + T roi_center_w = current_roi[1] * spatial_scale - offset; + T roi_center_h = current_roi[2] * spatial_scale - offset; + T roi_width = current_roi[3] * spatial_scale; + T roi_height = current_roi[4] * spatial_scale; + T theta = current_roi[5] * M_PI / 180.0; + T cos_theta = cos(theta); + T sin_theta = sin(theta); + + AT_ASSERTM( + roi_width >= 0 && roi_height >= 0, + "ROIs in ROIAlignRotated do not have non-negative size!"); + + T bin_size_h = static_cast(roi_height) / static_cast(pooled_height); + T bin_size_w = static_cast(roi_width) / static_cast(pooled_width); + + T* offset_grad_input = + grad_input + ((roi_batch_ind * channels + c) * height * width); + + int output_offset = n * n_stride + c * c_stride; + const T* offset_grad_output = grad_output + output_offset; + const T grad_output_this_bin = + offset_grad_output[ph * h_stride + pw * w_stride]; + + // We use roi_bin_grid to sample the grid and mimic integral + int roi_bin_grid_h = (sampling_ratio > 0) + ? sampling_ratio + : ceil(roi_height / pooled_height); // e.g., = 2 + int roi_bin_grid_w = + (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); + + // roi_start_h and roi_start_w are computed wrt the center of RoI (x, y). + // Appropriate translation needs to be applied after. + T roi_start_h = -roi_height / 2.0; + T roi_start_w = -roi_width / 2.0; + + // We do average (integral) pooling inside a bin + const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 + + for (int iy = 0; iy < roi_bin_grid_h; iy++) { + const T yy = roi_start_h + ph * bin_size_h + + static_cast(iy + .5f) * bin_size_h / + static_cast(roi_bin_grid_h); // e.g., 0.5, 1.5 + for (int ix = 0; ix < roi_bin_grid_w; ix++) { + const T xx = roi_start_w + pw * bin_size_w + + static_cast(ix + .5f) * bin_size_w / + static_cast(roi_bin_grid_w); + + // Rotate by theta around the center and translate + T y = yy * cos_theta - xx * sin_theta + roi_center_h; + T x = yy * sin_theta + xx * cos_theta + roi_center_w; + + T w1, w2, w3, w4; + int x_low, x_high, y_low, y_high; + + bilinear_interpolate_gradient( + height, width, y, x, w1, w2, w3, w4, x_low, x_high, y_low, y_high); + + T g1 = grad_output_this_bin * w1 / count; + T g2 = grad_output_this_bin * w2 / count; + T g3 = grad_output_this_bin * w3 / count; + T g4 = grad_output_this_bin * w4 / count; + + if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) { + // atomic add is not needed for now since it is single threaded + add(offset_grad_input + y_low * width + x_low, static_cast(g1)); + add(offset_grad_input + y_low * width + x_high, static_cast(g2)); + add(offset_grad_input + y_high * width + x_low, static_cast(g3)); + add(offset_grad_input + y_high * width + x_high, static_cast(g4)); + } // if + } // ix + } // iy + } // for +} // ROIAlignRotatedBackward + +at::Tensor ROIAlignRotated_forward_cpu( + const at::Tensor& input, + const at::Tensor& rois, + const float spatial_scale, + const int pooled_height, + const int pooled_width, + const int sampling_ratio) { + AT_ASSERTM(input.device().is_cpu(), "input must be a CPU tensor"); + AT_ASSERTM(rois.device().is_cpu(), "rois must be a CPU tensor"); + + at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2}; + + at::CheckedFrom c = "ROIAlign_forward_cpu"; + at::checkAllSameType(c, {input_t, rois_t}); + + auto num_rois = rois.size(0); + auto channels = input.size(1); + auto height = input.size(2); + auto width = input.size(3); + + at::Tensor output = at::zeros( + {num_rois, channels, pooled_height, pooled_width}, input.options()); + + auto output_size = num_rois * pooled_height * pooled_width * channels; + + if (output.numel() == 0) { + return output; + } + + auto input_ = input.contiguous(), rois_ = rois.contiguous(); + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + input.scalar_type(), "ROIAlignRotated_forward", [&] { + ROIAlignRotatedForward( + output_size, + input_.data_ptr(), + spatial_scale, + channels, + height, + width, + pooled_height, + pooled_width, + sampling_ratio, + rois_.data_ptr(), + output.data_ptr()); + }); + return output; +} + +at::Tensor ROIAlignRotated_backward_cpu( + const at::Tensor& grad, + const at::Tensor& rois, + const float spatial_scale, + const int pooled_height, + const int pooled_width, + const int batch_size, + const int channels, + const int height, + const int width, + const int sampling_ratio) { + AT_ASSERTM(grad.device().is_cpu(), "grad must be a CPU tensor"); + AT_ASSERTM(rois.device().is_cpu(), "rois must be a CPU tensor"); + + at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2}; + + at::CheckedFrom c = "ROIAlignRotated_backward_cpu"; + at::checkAllSameType(c, {grad_t, rois_t}); + + at::Tensor grad_input = + at::zeros({batch_size, channels, height, width}, grad.options()); + + // handle possibly empty gradients + if (grad.numel() == 0) { + return grad_input; + } + + // get stride values to ensure indexing into gradients is correct. + int n_stride = grad.stride(0); + int c_stride = grad.stride(1); + int h_stride = grad.stride(2); + int w_stride = grad.stride(3); + + auto rois_ = rois.contiguous(); + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + grad.scalar_type(), "ROIAlignRotated_forward", [&] { + ROIAlignRotatedBackward( + grad.numel(), + grad.data_ptr(), + spatial_scale, + channels, + height, + width, + pooled_height, + pooled_width, + sampling_ratio, + grad_input.data_ptr(), + rois_.data_ptr(), + n_stride, + c_stride, + h_stride, + w_stride); + }); + return grad_input; +} + +} // namespace detectron2 diff --git a/preprocess/mhp_extension/detectron2/detectron2/layers/csrc/ROIAlignRotated/ROIAlignRotated_cuda.cu b/preprocess/mhp_extension/detectron2/detectron2/layers/csrc/ROIAlignRotated/ROIAlignRotated_cuda.cu new file mode 100644 index 0000000000000000000000000000000000000000..9c376fc6973b75b34967faf870a9f85a3ee430be --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/layers/csrc/ROIAlignRotated/ROIAlignRotated_cuda.cu @@ -0,0 +1,443 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +#include +#include +#include +#include + +// TODO make it in a common file +#define CUDA_1D_KERNEL_LOOP(i, n) \ + for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ + i += blockDim.x * gridDim.x) + +// Note: this implementation originates from the Caffe2 ROIAlignRotated Op +// and PyTorch ROIAlign (non-rotated) Op implementations. +// The key difference between this implementation and those ones is +// we don't do "legacy offset" in this version, as there aren't many previous +// works, if any, using the "legacy" ROIAlignRotated Op. +// This would make the interface a bit cleaner. + +namespace detectron2 { + +namespace { + +template +__device__ T bilinear_interpolate( + const T* input, + const int height, + const int width, + T y, + T x) { + // deal with cases that inverse elements are out of feature map boundary + if (y < -1.0 || y > height || x < -1.0 || x > width) { + // empty + return 0; + } + + if (y < 0) { + y = 0; + } + + if (x < 0) { + x = 0; + } + + int y_low = (int)y; + int x_low = (int)x; + int y_high; + int x_high; + + if (y_low >= height - 1) { + y_high = y_low = height - 1; + y = (T)y_low; + } else { + y_high = y_low + 1; + } + + if (x_low >= width - 1) { + x_high = x_low = width - 1; + x = (T)x_low; + } else { + x_high = x_low + 1; + } + + T ly = y - y_low; + T lx = x - x_low; + T hy = 1. - ly, hx = 1. - lx; + // do bilinear interpolation + T v1 = input[y_low * width + x_low]; + T v2 = input[y_low * width + x_high]; + T v3 = input[y_high * width + x_low]; + T v4 = input[y_high * width + x_high]; + T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; + + T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + + return val; +} + +template +__device__ void bilinear_interpolate_gradient( + const int height, + const int width, + T y, + T x, + T& w1, + T& w2, + T& w3, + T& w4, + int& x_low, + int& x_high, + int& y_low, + int& y_high) { + // deal with cases that inverse elements are out of feature map boundary + if (y < -1.0 || y > height || x < -1.0 || x > width) { + // empty + w1 = w2 = w3 = w4 = 0.; + x_low = x_high = y_low = y_high = -1; + return; + } + + if (y < 0) { + y = 0; + } + + if (x < 0) { + x = 0; + } + + y_low = (int)y; + x_low = (int)x; + + if (y_low >= height - 1) { + y_high = y_low = height - 1; + y = (T)y_low; + } else { + y_high = y_low + 1; + } + + if (x_low >= width - 1) { + x_high = x_low = width - 1; + x = (T)x_low; + } else { + x_high = x_low + 1; + } + + T ly = y - y_low; + T lx = x - x_low; + T hy = 1. - ly, hx = 1. - lx; + + // reference in forward + // T v1 = input[y_low * width + x_low]; + // T v2 = input[y_low * width + x_high]; + // T v3 = input[y_high * width + x_low]; + // T v4 = input[y_high * width + x_high]; + // T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + + w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; + + return; +} + +} // namespace + +template +__global__ void RoIAlignRotatedForward( + const int nthreads, + const T* input, + const T spatial_scale, + const int channels, + const int height, + const int width, + const int pooled_height, + const int pooled_width, + const int sampling_ratio, + const T* rois, + T* top_data) { + CUDA_1D_KERNEL_LOOP(index, nthreads) { + // (n, c, ph, pw) is an element in the pooled output + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + + const T* current_roi = rois + n * 6; + int roi_batch_ind = current_roi[0]; + + // Do not use rounding; this implementation detail is critical + // ROIAlignRotated supports align == true, i.e., continuous coordinate + // by default, thus the 0.5 offset + T offset = (T)0.5; + T roi_center_w = current_roi[1] * spatial_scale - offset; + T roi_center_h = current_roi[2] * spatial_scale - offset; + T roi_width = current_roi[3] * spatial_scale; + T roi_height = current_roi[4] * spatial_scale; + T theta = current_roi[5] * M_PI / 180.0; + T cos_theta = cos(theta); + T sin_theta = sin(theta); + + T bin_size_h = static_cast(roi_height) / static_cast(pooled_height); + T bin_size_w = static_cast(roi_width) / static_cast(pooled_width); + + const T* offset_input = + input + (roi_batch_ind * channels + c) * height * width; + + // We use roi_bin_grid to sample the grid and mimic integral + int roi_bin_grid_h = (sampling_ratio > 0) + ? sampling_ratio + : ceil(roi_height / pooled_height); // e.g., = 2 + int roi_bin_grid_w = + (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); + + // roi_start_h and roi_start_w are computed wrt the center of RoI (x, y). + // Appropriate translation needs to be applied after. + T roi_start_h = -roi_height / 2.0; + T roi_start_w = -roi_width / 2.0; + + // We do average (inte gral) pooling inside a bin + const T count = max(roi_bin_grid_h * roi_bin_grid_w, 1); // e.g. = 4 + + T output_val = 0.; + for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1 + { + const T yy = roi_start_h + ph * bin_size_h + + static_cast(iy + .5f) * bin_size_h / + static_cast(roi_bin_grid_h); // e.g., 0.5, 1.5 + for (int ix = 0; ix < roi_bin_grid_w; ix++) { + const T xx = roi_start_w + pw * bin_size_w + + static_cast(ix + .5f) * bin_size_w / + static_cast(roi_bin_grid_w); + + // Rotate by theta around the center and translate + T y = yy * cos_theta - xx * sin_theta + roi_center_h; + T x = yy * sin_theta + xx * cos_theta + roi_center_w; + + T val = bilinear_interpolate(offset_input, height, width, y, x); + output_val += val; + } + } + output_val /= count; + + top_data[index] = output_val; + } +} + +template +__global__ void RoIAlignRotatedBackwardFeature( + const int nthreads, + const T* top_diff, + const int num_rois, + const T spatial_scale, + const int channels, + const int height, + const int width, + const int pooled_height, + const int pooled_width, + const int sampling_ratio, + T* bottom_diff, + const T* rois) { + CUDA_1D_KERNEL_LOOP(index, nthreads) { + // (n, c, ph, pw) is an element in the pooled output + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + + const T* current_roi = rois + n * 6; + int roi_batch_ind = current_roi[0]; + + // Do not use rounding; this implementation detail is critical + // ROIAlignRotated supports align == true, i.e., continuous coordinate + // by default, thus the 0.5 offset + T offset = (T)0.5; + T roi_center_w = current_roi[1] * spatial_scale - offset; + T roi_center_h = current_roi[2] * spatial_scale - offset; + T roi_width = current_roi[3] * spatial_scale; + T roi_height = current_roi[4] * spatial_scale; + T theta = current_roi[5] * M_PI / 180.0; + T cos_theta = cos(theta); + T sin_theta = sin(theta); + + T bin_size_h = static_cast(roi_height) / static_cast(pooled_height); + T bin_size_w = static_cast(roi_width) / static_cast(pooled_width); + + T* offset_bottom_diff = + bottom_diff + (roi_batch_ind * channels + c) * height * width; + + int top_offset = (n * channels + c) * pooled_height * pooled_width; + const T* offset_top_diff = top_diff + top_offset; + const T top_diff_this_bin = offset_top_diff[ph * pooled_width + pw]; + + // We use roi_bin_grid to sample the grid and mimic integral + int roi_bin_grid_h = (sampling_ratio > 0) + ? sampling_ratio + : ceil(roi_height / pooled_height); // e.g., = 2 + int roi_bin_grid_w = + (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); + + // roi_start_h and roi_start_w are computed wrt the center of RoI (x, y). + // Appropriate translation needs to be applied after. + T roi_start_h = -roi_height / 2.0; + T roi_start_w = -roi_width / 2.0; + + // We do average (integral) pooling inside a bin + const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 + + for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1 + { + const T yy = roi_start_h + ph * bin_size_h + + static_cast(iy + .5f) * bin_size_h / + static_cast(roi_bin_grid_h); // e.g., 0.5, 1.5 + for (int ix = 0; ix < roi_bin_grid_w; ix++) { + const T xx = roi_start_w + pw * bin_size_w + + static_cast(ix + .5f) * bin_size_w / + static_cast(roi_bin_grid_w); + + // Rotate by theta around the center and translate + T y = yy * cos_theta - xx * sin_theta + roi_center_h; + T x = yy * sin_theta + xx * cos_theta + roi_center_w; + + T w1, w2, w3, w4; + int x_low, x_high, y_low, y_high; + + bilinear_interpolate_gradient( + height, width, y, x, w1, w2, w3, w4, x_low, x_high, y_low, y_high); + + T g1 = top_diff_this_bin * w1 / count; + T g2 = top_diff_this_bin * w2 / count; + T g3 = top_diff_this_bin * w3 / count; + T g4 = top_diff_this_bin * w4 / count; + + if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) { + atomicAdd( + offset_bottom_diff + y_low * width + x_low, static_cast(g1)); + atomicAdd( + offset_bottom_diff + y_low * width + x_high, static_cast(g2)); + atomicAdd( + offset_bottom_diff + y_high * width + x_low, static_cast(g3)); + atomicAdd( + offset_bottom_diff + y_high * width + x_high, static_cast(g4)); + } // if + } // ix + } // iy + } // CUDA_1D_KERNEL_LOOP +} // RoIAlignRotatedBackward + +at::Tensor ROIAlignRotated_forward_cuda( + const at::Tensor& input, + const at::Tensor& rois, + const float spatial_scale, + const int pooled_height, + const int pooled_width, + const int sampling_ratio) { + AT_ASSERTM(input.device().is_cuda(), "input must be a CUDA tensor"); + AT_ASSERTM(rois.device().is_cuda(), "rois must be a CUDA tensor"); + at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2}; + + at::CheckedFrom c = "ROIAlignRotated_forward_cuda"; + at::checkAllSameGPU(c, {input_t, rois_t}); + at::checkAllSameType(c, {input_t, rois_t}); + at::cuda::CUDAGuard device_guard(input.device()); + + auto num_rois = rois.size(0); + auto channels = input.size(1); + auto height = input.size(2); + auto width = input.size(3); + + auto output = at::empty( + {num_rois, channels, pooled_height, pooled_width}, input.options()); + auto output_size = num_rois * pooled_height * pooled_width * channels; + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + dim3 grid(std::min( + at::cuda::ATenCeilDiv( + static_cast(output_size), static_cast(512)), + static_cast(4096))); + dim3 block(512); + + if (output.numel() == 0) { + AT_CUDA_CHECK(cudaGetLastError()); + return output; + } + + auto input_ = input.contiguous(), rois_ = rois.contiguous(); + AT_DISPATCH_FLOATING_TYPES( + input.scalar_type(), "ROIAlignRotated_forward", [&] { + RoIAlignRotatedForward<<>>( + output_size, + input_.data_ptr(), + spatial_scale, + channels, + height, + width, + pooled_height, + pooled_width, + sampling_ratio, + rois_.data_ptr(), + output.data_ptr()); + }); + cudaDeviceSynchronize(); + AT_CUDA_CHECK(cudaGetLastError()); + return output; +} + +// TODO remove the dependency on input and use instead its sizes -> save memory +at::Tensor ROIAlignRotated_backward_cuda( + const at::Tensor& grad, + const at::Tensor& rois, + const float spatial_scale, + const int pooled_height, + const int pooled_width, + const int batch_size, + const int channels, + const int height, + const int width, + const int sampling_ratio) { + AT_ASSERTM(grad.device().is_cuda(), "grad must be a CUDA tensor"); + AT_ASSERTM(rois.device().is_cuda(), "rois must be a CUDA tensor"); + + at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2}; + at::CheckedFrom c = "ROIAlign_backward_cuda"; + at::checkAllSameGPU(c, {grad_t, rois_t}); + at::checkAllSameType(c, {grad_t, rois_t}); + at::cuda::CUDAGuard device_guard(grad.device()); + + auto num_rois = rois.size(0); + auto grad_input = + at::zeros({batch_size, channels, height, width}, grad.options()); + + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + dim3 grid(std::min( + at::cuda::ATenCeilDiv( + static_cast(grad.numel()), static_cast(512)), + static_cast(4096))); + dim3 block(512); + + // handle possibly empty gradients + if (grad.numel() == 0) { + AT_CUDA_CHECK(cudaGetLastError()); + return grad_input; + } + + auto grad_ = grad.contiguous(), rois_ = rois.contiguous(); + AT_DISPATCH_FLOATING_TYPES( + grad.scalar_type(), "ROIAlignRotated_backward", [&] { + RoIAlignRotatedBackwardFeature<<>>( + grad.numel(), + grad_.data_ptr(), + num_rois, + spatial_scale, + channels, + height, + width, + pooled_height, + pooled_width, + sampling_ratio, + grad_input.data_ptr(), + rois_.data_ptr()); + }); + AT_CUDA_CHECK(cudaGetLastError()); + return grad_input; +} + +} // namespace detectron2 diff --git a/preprocess/mhp_extension/detectron2/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated.h b/preprocess/mhp_extension/detectron2/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated.h new file mode 100644 index 0000000000000000000000000000000000000000..7c389c6cbdbefdfb623296b0918c27c634d621bb --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated.h @@ -0,0 +1,35 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +#pragma once +#include + +namespace detectron2 { + +at::Tensor box_iou_rotated_cpu( + const at::Tensor& boxes1, + const at::Tensor& boxes2); + +#ifdef WITH_CUDA +at::Tensor box_iou_rotated_cuda( + const at::Tensor& boxes1, + const at::Tensor& boxes2); +#endif + +// Interface for Python +// inline is needed to prevent multiple function definitions when this header is +// included by different cpps +inline at::Tensor box_iou_rotated( + const at::Tensor& boxes1, + const at::Tensor& boxes2) { + assert(boxes1.device().is_cuda() == boxes2.device().is_cuda()); + if (boxes1.device().is_cuda()) { +#ifdef WITH_CUDA + return box_iou_rotated_cuda(boxes1.contiguous(), boxes2.contiguous()); +#else + AT_ERROR("Not compiled with GPU support"); +#endif + } + + return box_iou_rotated_cpu(boxes1.contiguous(), boxes2.contiguous()); +} + +} // namespace detectron2 diff --git a/preprocess/mhp_extension/detectron2/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_cpu.cpp b/preprocess/mhp_extension/detectron2/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_cpu.cpp new file mode 100644 index 0000000000000000000000000000000000000000..f2b02d171077d96fcaf29b585fa6a678af1f2842 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_cpu.cpp @@ -0,0 +1,39 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +#include "box_iou_rotated.h" +#include "box_iou_rotated_utils.h" + +namespace detectron2 { + +template +void box_iou_rotated_cpu_kernel( + const at::Tensor& boxes1, + const at::Tensor& boxes2, + at::Tensor& ious) { + auto num_boxes1 = boxes1.size(0); + auto num_boxes2 = boxes2.size(0); + + for (int i = 0; i < num_boxes1; i++) { + for (int j = 0; j < num_boxes2; j++) { + ious[i * num_boxes2 + j] = single_box_iou_rotated( + boxes1[i].data_ptr(), boxes2[j].data_ptr()); + } + } +} + +at::Tensor box_iou_rotated_cpu( + // input must be contiguous: + const at::Tensor& boxes1, + const at::Tensor& boxes2) { + auto num_boxes1 = boxes1.size(0); + auto num_boxes2 = boxes2.size(0); + at::Tensor ious = + at::empty({num_boxes1 * num_boxes2}, boxes1.options().dtype(at::kFloat)); + + box_iou_rotated_cpu_kernel(boxes1, boxes2, ious); + + // reshape from 1d array to 2d array + auto shape = std::vector{num_boxes1, num_boxes2}; + return ious.reshape(shape); +} + +} // namespace detectron2 diff --git a/preprocess/mhp_extension/detectron2/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_cuda.cu b/preprocess/mhp_extension/detectron2/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_cuda.cu new file mode 100644 index 0000000000000000000000000000000000000000..e3403c11796cb313771b8b6350c793b9fbdfbcaa --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_cuda.cu @@ -0,0 +1,130 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +#include +#include +#include +#include +#include "box_iou_rotated_utils.h" + +namespace detectron2 { + +// 2D block with 32 * 16 = 512 threads per block +const int BLOCK_DIM_X = 32; +const int BLOCK_DIM_Y = 16; + +template +__global__ void box_iou_rotated_cuda_kernel( + const int n_boxes1, + const int n_boxes2, + const T* dev_boxes1, + const T* dev_boxes2, + T* dev_ious) { + const int row_start = blockIdx.x * blockDim.x; + const int col_start = blockIdx.y * blockDim.y; + + const int row_size = min(n_boxes1 - row_start, blockDim.x); + const int col_size = min(n_boxes2 - col_start, blockDim.y); + + __shared__ float block_boxes1[BLOCK_DIM_X * 5]; + __shared__ float block_boxes2[BLOCK_DIM_Y * 5]; + + // It's safe to copy using threadIdx.x since BLOCK_DIM_X >= BLOCK_DIM_Y + if (threadIdx.x < row_size && threadIdx.y == 0) { + block_boxes1[threadIdx.x * 5 + 0] = + dev_boxes1[(row_start + threadIdx.x) * 5 + 0]; + block_boxes1[threadIdx.x * 5 + 1] = + dev_boxes1[(row_start + threadIdx.x) * 5 + 1]; + block_boxes1[threadIdx.x * 5 + 2] = + dev_boxes1[(row_start + threadIdx.x) * 5 + 2]; + block_boxes1[threadIdx.x * 5 + 3] = + dev_boxes1[(row_start + threadIdx.x) * 5 + 3]; + block_boxes1[threadIdx.x * 5 + 4] = + dev_boxes1[(row_start + threadIdx.x) * 5 + 4]; + } + + if (threadIdx.x < col_size && threadIdx.y == 0) { + block_boxes2[threadIdx.x * 5 + 0] = + dev_boxes2[(col_start + threadIdx.x) * 5 + 0]; + block_boxes2[threadIdx.x * 5 + 1] = + dev_boxes2[(col_start + threadIdx.x) * 5 + 1]; + block_boxes2[threadIdx.x * 5 + 2] = + dev_boxes2[(col_start + threadIdx.x) * 5 + 2]; + block_boxes2[threadIdx.x * 5 + 3] = + dev_boxes2[(col_start + threadIdx.x) * 5 + 3]; + block_boxes2[threadIdx.x * 5 + 4] = + dev_boxes2[(col_start + threadIdx.x) * 5 + 4]; + } + __syncthreads(); + + if (threadIdx.x < row_size && threadIdx.y < col_size) { + int offset = (row_start + threadIdx.x) * n_boxes2 + col_start + threadIdx.y; + dev_ious[offset] = single_box_iou_rotated( + block_boxes1 + threadIdx.x * 5, block_boxes2 + threadIdx.y * 5); + } +} + +at::Tensor box_iou_rotated_cuda( + // input must be contiguous + const at::Tensor& boxes1, + const at::Tensor& boxes2) { + using scalar_t = float; + AT_ASSERTM( + boxes1.scalar_type() == at::kFloat, "boxes1 must be a float tensor"); + AT_ASSERTM( + boxes2.scalar_type() == at::kFloat, "boxes2 must be a float tensor"); + AT_ASSERTM(boxes1.is_cuda(), "boxes1 must be a CUDA tensor"); + AT_ASSERTM(boxes2.is_cuda(), "boxes2 must be a CUDA tensor"); + at::cuda::CUDAGuard device_guard(boxes1.device()); + + auto num_boxes1 = boxes1.size(0); + auto num_boxes2 = boxes2.size(0); + + at::Tensor ious = + at::empty({num_boxes1 * num_boxes2}, boxes1.options().dtype(at::kFloat)); + + bool transpose = false; + if (num_boxes1 > 0 && num_boxes2 > 0) { + scalar_t *data1 = boxes1.data_ptr(), + *data2 = boxes2.data_ptr(); + + if (num_boxes2 > 65535 * BLOCK_DIM_Y) { + AT_ASSERTM( + num_boxes1 <= 65535 * BLOCK_DIM_Y, + "Too many boxes for box_iou_rotated_cuda!"); + // x dim is allowed to be large, but y dim cannot, + // so we transpose the two to avoid "invalid configuration argument" + // error. We assume one of them is small. Otherwise the result is hard to + // fit in memory anyway. + std::swap(num_boxes1, num_boxes2); + std::swap(data1, data2); + transpose = true; + } + + const int blocks_x = + at::cuda::ATenCeilDiv(static_cast(num_boxes1), BLOCK_DIM_X); + const int blocks_y = + at::cuda::ATenCeilDiv(static_cast(num_boxes2), BLOCK_DIM_Y); + + dim3 blocks(blocks_x, blocks_y); + dim3 threads(BLOCK_DIM_X, BLOCK_DIM_Y); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + box_iou_rotated_cuda_kernel<<>>( + num_boxes1, + num_boxes2, + data1, + data2, + (scalar_t*)ious.data_ptr()); + + AT_CUDA_CHECK(cudaGetLastError()); + } + + // reshape from 1d array to 2d array + auto shape = std::vector{num_boxes1, num_boxes2}; + if (transpose) { + return ious.view(shape).t(); + } else { + return ious.view(shape); + } +} + +} // namespace detectron2 diff --git a/preprocess/mhp_extension/detectron2/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_utils.h b/preprocess/mhp_extension/detectron2/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_utils.h new file mode 100644 index 0000000000000000000000000000000000000000..d8757ec376e8703e1edc5f76bf5ef214620bd69f --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_utils.h @@ -0,0 +1,363 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +#pragma once + +#include +#include + +#ifdef __CUDACC__ +// Designates functions callable from the host (CPU) and the device (GPU) +#define HOST_DEVICE __host__ __device__ +#define HOST_DEVICE_INLINE HOST_DEVICE __forceinline__ +#else +#include +#define HOST_DEVICE +#define HOST_DEVICE_INLINE HOST_DEVICE inline +#endif + +namespace detectron2 { + +namespace { + +template +struct RotatedBox { + T x_ctr, y_ctr, w, h, a; +}; + +template +struct Point { + T x, y; + HOST_DEVICE_INLINE Point(const T& px = 0, const T& py = 0) : x(px), y(py) {} + HOST_DEVICE_INLINE Point operator+(const Point& p) const { + return Point(x + p.x, y + p.y); + } + HOST_DEVICE_INLINE Point& operator+=(const Point& p) { + x += p.x; + y += p.y; + return *this; + } + HOST_DEVICE_INLINE Point operator-(const Point& p) const { + return Point(x - p.x, y - p.y); + } + HOST_DEVICE_INLINE Point operator*(const T coeff) const { + return Point(x * coeff, y * coeff); + } +}; + +template +HOST_DEVICE_INLINE T dot_2d(const Point& A, const Point& B) { + return A.x * B.x + A.y * B.y; +} + +// R: result type. can be different from input type +template +HOST_DEVICE_INLINE R cross_2d(const Point& A, const Point& B) { + return static_cast(A.x) * static_cast(B.y) - + static_cast(B.x) * static_cast(A.y); +} + +template +HOST_DEVICE_INLINE void get_rotated_vertices( + const RotatedBox& box, + Point (&pts)[4]) { + // M_PI / 180. == 0.01745329251 + double theta = box.a * 0.01745329251; + T cosTheta2 = (T)cos(theta) * 0.5f; + T sinTheta2 = (T)sin(theta) * 0.5f; + + // y: top --> down; x: left --> right + pts[0].x = box.x_ctr + sinTheta2 * box.h + cosTheta2 * box.w; + pts[0].y = box.y_ctr + cosTheta2 * box.h - sinTheta2 * box.w; + pts[1].x = box.x_ctr - sinTheta2 * box.h + cosTheta2 * box.w; + pts[1].y = box.y_ctr - cosTheta2 * box.h - sinTheta2 * box.w; + pts[2].x = 2 * box.x_ctr - pts[0].x; + pts[2].y = 2 * box.y_ctr - pts[0].y; + pts[3].x = 2 * box.x_ctr - pts[1].x; + pts[3].y = 2 * box.y_ctr - pts[1].y; +} + +template +HOST_DEVICE_INLINE int get_intersection_points( + const Point (&pts1)[4], + const Point (&pts2)[4], + Point (&intersections)[24]) { + // Line vector + // A line from p1 to p2 is: p1 + (p2-p1)*t, t=[0,1] + Point vec1[4], vec2[4]; + for (int i = 0; i < 4; i++) { + vec1[i] = pts1[(i + 1) % 4] - pts1[i]; + vec2[i] = pts2[(i + 1) % 4] - pts2[i]; + } + + // Line test - test all line combos for intersection + int num = 0; // number of intersections + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + // Solve for 2x2 Ax=b + T det = cross_2d(vec2[j], vec1[i]); + + // This takes care of parallel lines + if (fabs(det) <= 1e-14) { + continue; + } + + auto vec12 = pts2[j] - pts1[i]; + + T t1 = cross_2d(vec2[j], vec12) / det; + T t2 = cross_2d(vec1[i], vec12) / det; + + if (t1 >= 0.0f && t1 <= 1.0f && t2 >= 0.0f && t2 <= 1.0f) { + intersections[num++] = pts1[i] + vec1[i] * t1; + } + } + } + + // Check for vertices of rect1 inside rect2 + { + const auto& AB = vec2[0]; + const auto& DA = vec2[3]; + auto ABdotAB = dot_2d(AB, AB); + auto ADdotAD = dot_2d(DA, DA); + for (int i = 0; i < 4; i++) { + // assume ABCD is the rectangle, and P is the point to be judged + // P is inside ABCD iff. P's projection on AB lies within AB + // and P's projection on AD lies within AD + + auto AP = pts1[i] - pts2[0]; + + auto APdotAB = dot_2d(AP, AB); + auto APdotAD = -dot_2d(AP, DA); + + if ((APdotAB >= 0) && (APdotAD >= 0) && (APdotAB <= ABdotAB) && + (APdotAD <= ADdotAD)) { + intersections[num++] = pts1[i]; + } + } + } + + // Reverse the check - check for vertices of rect2 inside rect1 + { + const auto& AB = vec1[0]; + const auto& DA = vec1[3]; + auto ABdotAB = dot_2d(AB, AB); + auto ADdotAD = dot_2d(DA, DA); + for (int i = 0; i < 4; i++) { + auto AP = pts2[i] - pts1[0]; + + auto APdotAB = dot_2d(AP, AB); + auto APdotAD = -dot_2d(AP, DA); + + if ((APdotAB >= 0) && (APdotAD >= 0) && (APdotAB <= ABdotAB) && + (APdotAD <= ADdotAD)) { + intersections[num++] = pts2[i]; + } + } + } + + return num; +} + +template +HOST_DEVICE_INLINE int convex_hull_graham( + const Point (&p)[24], + const int& num_in, + Point (&q)[24], + bool shift_to_zero = false) { + assert(num_in >= 2); + + // Step 1: + // Find point with minimum y + // if more than 1 points have the same minimum y, + // pick the one with the minimum x. + int t = 0; + for (int i = 1; i < num_in; i++) { + if (p[i].y < p[t].y || (p[i].y == p[t].y && p[i].x < p[t].x)) { + t = i; + } + } + auto& start = p[t]; // starting point + + // Step 2: + // Subtract starting point from every points (for sorting in the next step) + for (int i = 0; i < num_in; i++) { + q[i] = p[i] - start; + } + + // Swap the starting point to position 0 + auto tmp = q[0]; + q[0] = q[t]; + q[t] = tmp; + + // Step 3: + // Sort point 1 ~ num_in according to their relative cross-product values + // (essentially sorting according to angles) + // If the angles are the same, sort according to their distance to origin + T dist[24]; +#ifdef __CUDACC__ + // compute distance to origin before sort, and sort them together with the + // points + for (int i = 0; i < num_in; i++) { + dist[i] = dot_2d(q[i], q[i]); + } + + // CUDA version + // In the future, we can potentially use thrust + // for sorting here to improve speed (though not guaranteed) + for (int i = 1; i < num_in - 1; i++) { + for (int j = i + 1; j < num_in; j++) { + T crossProduct = cross_2d(q[i], q[j]); + if ((crossProduct < -1e-6) || + (fabs(crossProduct) < 1e-6 && dist[i] > dist[j])) { + auto q_tmp = q[i]; + q[i] = q[j]; + q[j] = q_tmp; + auto dist_tmp = dist[i]; + dist[i] = dist[j]; + dist[j] = dist_tmp; + } + } + } +#else + // CPU version + std::sort( + q + 1, q + num_in, [](const Point& A, const Point& B) -> bool { + T temp = cross_2d(A, B); + if (fabs(temp) < 1e-6) { + return dot_2d(A, A) < dot_2d(B, B); + } else { + return temp > 0; + } + }); + // compute distance to origin after sort, since the points are now different. + for (int i = 0; i < num_in; i++) { + dist[i] = dot_2d(q[i], q[i]); + } +#endif + + // Step 4: + // Make sure there are at least 2 points (that don't overlap with each other) + // in the stack + int k; // index of the non-overlapped second point + for (k = 1; k < num_in; k++) { + if (dist[k] > 1e-8) { + break; + } + } + if (k == num_in) { + // We reach the end, which means the convex hull is just one point + q[0] = p[t]; + return 1; + } + q[1] = q[k]; + int m = 2; // 2 points in the stack + // Step 5: + // Finally we can start the scanning process. + // When a non-convex relationship between the 3 points is found + // (either concave shape or duplicated points), + // we pop the previous point from the stack + // until the 3-point relationship is convex again, or + // until the stack only contains two points + for (int i = k + 1; i < num_in; i++) { + while (m > 1) { + auto q1 = q[i] - q[m - 2], q2 = q[m - 1] - q[m - 2]; + // cross_2d() uses FMA and therefore computes round(round(q1.x*q2.y) - + // q2.x*q1.y) So it may not return 0 even when q1==q2. Therefore we + // compare round(q1.x*q2.y) and round(q2.x*q1.y) directly. (round means + // round to nearest floating point). + if (q1.x * q2.y >= q2.x * q1.y) + m--; + else + break; + } + // Using double also helps, but float can solve the issue for now. + // while (m > 1 && cross_2d(q[i] - q[m - 2], q[m - 1] - q[m - 2]) + // >= 0) { + // m--; + // } + q[m++] = q[i]; + } + + // Step 6 (Optional): + // In general sense we need the original coordinates, so we + // need to shift the points back (reverting Step 2) + // But if we're only interested in getting the area/perimeter of the shape + // We can simply return. + if (!shift_to_zero) { + for (int i = 0; i < m; i++) { + q[i] += start; + } + } + + return m; +} + +template +HOST_DEVICE_INLINE T polygon_area(const Point (&q)[24], const int& m) { + if (m <= 2) { + return 0; + } + + T area = 0; + for (int i = 1; i < m - 1; i++) { + area += fabs(cross_2d(q[i] - q[0], q[i + 1] - q[0])); + } + + return area / 2.0; +} + +template +HOST_DEVICE_INLINE T rotated_boxes_intersection( + const RotatedBox& box1, + const RotatedBox& box2) { + // There are up to 4 x 4 + 4 + 4 = 24 intersections (including dups) returned + // from rotated_rect_intersection_pts + Point intersectPts[24], orderedPts[24]; + + Point pts1[4]; + Point pts2[4]; + get_rotated_vertices(box1, pts1); + get_rotated_vertices(box2, pts2); + + int num = get_intersection_points(pts1, pts2, intersectPts); + + if (num <= 2) { + return 0.0; + } + + // Convex Hull to order the intersection points in clockwise order and find + // the contour area. + int num_convex = convex_hull_graham(intersectPts, num, orderedPts, true); + return polygon_area(orderedPts, num_convex); +} + +} // namespace + +template +HOST_DEVICE_INLINE T +single_box_iou_rotated(T const* const box1_raw, T const* const box2_raw) { + // shift center to the middle point to achieve higher precision in result + RotatedBox box1, box2; + auto center_shift_x = (box1_raw[0] + box2_raw[0]) / 2.0; + auto center_shift_y = (box1_raw[1] + box2_raw[1]) / 2.0; + box1.x_ctr = box1_raw[0] - center_shift_x; + box1.y_ctr = box1_raw[1] - center_shift_y; + box1.w = box1_raw[2]; + box1.h = box1_raw[3]; + box1.a = box1_raw[4]; + box2.x_ctr = box2_raw[0] - center_shift_x; + box2.y_ctr = box2_raw[1] - center_shift_y; + box2.w = box2_raw[2]; + box2.h = box2_raw[3]; + box2.a = box2_raw[4]; + + T area1 = box1.w * box1.h; + T area2 = box2.w * box2.h; + if (area1 < 1e-14 || area2 < 1e-14) { + return 0.f; + } + + T intersection = rotated_boxes_intersection(box1, box2); + T iou = intersection / (area1 + area2 - intersection); + return iou; +} + +} // namespace detectron2 diff --git a/preprocess/mhp_extension/detectron2/detectron2/layers/csrc/cuda_version.cu b/preprocess/mhp_extension/detectron2/detectron2/layers/csrc/cuda_version.cu new file mode 100644 index 0000000000000000000000000000000000000000..af088e7572f6f27b9d653b4d7178f4e03de6befc --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/layers/csrc/cuda_version.cu @@ -0,0 +1,9 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. + +#include + +namespace detectron2 { +int get_cudart_version() { + return CUDART_VERSION; +} +} // namespace detectron2 diff --git a/preprocess/mhp_extension/detectron2/detectron2/layers/csrc/deformable/deform_conv.h b/preprocess/mhp_extension/detectron2/detectron2/layers/csrc/deformable/deform_conv.h new file mode 100644 index 0000000000000000000000000000000000000000..49ccd868ace8fd79f6fcbde6fe41f2b95873c414 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/layers/csrc/deformable/deform_conv.h @@ -0,0 +1,377 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +#pragma once +#include + +namespace detectron2 { + +#ifdef WITH_CUDA +int deform_conv_forward_cuda( + at::Tensor input, + at::Tensor weight, + at::Tensor offset, + at::Tensor output, + at::Tensor columns, + at::Tensor ones, + int kW, + int kH, + int dW, + int dH, + int padW, + int padH, + int dilationW, + int dilationH, + int group, + int deformable_group, + int im2col_step); + +int deform_conv_backward_input_cuda( + at::Tensor input, + at::Tensor offset, + at::Tensor gradOutput, + at::Tensor gradInput, + at::Tensor gradOffset, + at::Tensor weight, + at::Tensor columns, + int kW, + int kH, + int dW, + int dH, + int padW, + int padH, + int dilationW, + int dilationH, + int group, + int deformable_group, + int im2col_step); + +int deform_conv_backward_parameters_cuda( + at::Tensor input, + at::Tensor offset, + at::Tensor gradOutput, + at::Tensor gradWeight, // at::Tensor gradBias, + at::Tensor columns, + at::Tensor ones, + int kW, + int kH, + int dW, + int dH, + int padW, + int padH, + int dilationW, + int dilationH, + int group, + int deformable_group, + float scale, + int im2col_step); + +void modulated_deform_conv_cuda_forward( + at::Tensor input, + at::Tensor weight, + at::Tensor bias, + at::Tensor ones, + at::Tensor offset, + at::Tensor mask, + at::Tensor output, + at::Tensor columns, + int kernel_h, + int kernel_w, + const int stride_h, + const int stride_w, + const int pad_h, + const int pad_w, + const int dilation_h, + const int dilation_w, + const int group, + const int deformable_group, + const bool with_bias); + +void modulated_deform_conv_cuda_backward( + at::Tensor input, + at::Tensor weight, + at::Tensor bias, + at::Tensor ones, + at::Tensor offset, + at::Tensor mask, + at::Tensor columns, + at::Tensor grad_input, + at::Tensor grad_weight, + at::Tensor grad_bias, + at::Tensor grad_offset, + at::Tensor grad_mask, + at::Tensor grad_output, + int kernel_h, + int kernel_w, + int stride_h, + int stride_w, + int pad_h, + int pad_w, + int dilation_h, + int dilation_w, + int group, + int deformable_group, + const bool with_bias); + +#endif + +inline int deform_conv_forward( + at::Tensor input, + at::Tensor weight, + at::Tensor offset, + at::Tensor output, + at::Tensor columns, + at::Tensor ones, + int kW, + int kH, + int dW, + int dH, + int padW, + int padH, + int dilationW, + int dilationH, + int group, + int deformable_group, + int im2col_step) { + if (input.is_cuda()) { +#ifdef WITH_CUDA + TORCH_CHECK(weight.is_cuda(), "weight tensor is not on GPU!"); + TORCH_CHECK(offset.is_cuda(), "offset tensor is not on GPU!"); + return deform_conv_forward_cuda( + input, + weight, + offset, + output, + columns, + ones, + kW, + kH, + dW, + dH, + padW, + padH, + dilationW, + dilationH, + group, + deformable_group, + im2col_step); +#else + AT_ERROR("Not compiled with GPU support"); +#endif + } + AT_ERROR("Not implemented on the CPU"); +} + +inline int deform_conv_backward_input( + at::Tensor input, + at::Tensor offset, + at::Tensor gradOutput, + at::Tensor gradInput, + at::Tensor gradOffset, + at::Tensor weight, + at::Tensor columns, + int kW, + int kH, + int dW, + int dH, + int padW, + int padH, + int dilationW, + int dilationH, + int group, + int deformable_group, + int im2col_step) { + if (gradOutput.is_cuda()) { +#ifdef WITH_CUDA + TORCH_CHECK(input.is_cuda(), "input tensor is not on GPU!"); + TORCH_CHECK(weight.is_cuda(), "weight tensor is not on GPU!"); + TORCH_CHECK(offset.is_cuda(), "offset tensor is not on GPU!"); + return deform_conv_backward_input_cuda( + input, + offset, + gradOutput, + gradInput, + gradOffset, + weight, + columns, + kW, + kH, + dW, + dH, + padW, + padH, + dilationW, + dilationH, + group, + deformable_group, + im2col_step); +#else + AT_ERROR("Not compiled with GPU support"); +#endif + } + AT_ERROR("Not implemented on the CPU"); +} + +inline int deform_conv_backward_filter( + at::Tensor input, + at::Tensor offset, + at::Tensor gradOutput, + at::Tensor gradWeight, // at::Tensor gradBias, + at::Tensor columns, + at::Tensor ones, + int kW, + int kH, + int dW, + int dH, + int padW, + int padH, + int dilationW, + int dilationH, + int group, + int deformable_group, + float scale, + int im2col_step) { + if (gradOutput.is_cuda()) { +#ifdef WITH_CUDA + TORCH_CHECK(input.is_cuda(), "input tensor is not on GPU!"); + TORCH_CHECK(offset.is_cuda(), "offset tensor is not on GPU!"); + return deform_conv_backward_parameters_cuda( + input, + offset, + gradOutput, + gradWeight, + columns, + ones, + kW, + kH, + dW, + dH, + padW, + padH, + dilationW, + dilationH, + group, + deformable_group, + scale, + im2col_step); +#else + AT_ERROR("Not compiled with GPU support"); +#endif + } + AT_ERROR("Not implemented on the CPU"); +} + +inline void modulated_deform_conv_forward( + at::Tensor input, + at::Tensor weight, + at::Tensor bias, + at::Tensor ones, + at::Tensor offset, + at::Tensor mask, + at::Tensor output, + at::Tensor columns, + int kernel_h, + int kernel_w, + const int stride_h, + const int stride_w, + const int pad_h, + const int pad_w, + const int dilation_h, + const int dilation_w, + const int group, + const int deformable_group, + const bool with_bias) { + if (input.is_cuda()) { +#ifdef WITH_CUDA + TORCH_CHECK(weight.is_cuda(), "weight tensor is not on GPU!"); + TORCH_CHECK(bias.is_cuda(), "bias tensor is not on GPU!"); + TORCH_CHECK(offset.is_cuda(), "offset tensor is not on GPU!"); + return modulated_deform_conv_cuda_forward( + input, + weight, + bias, + ones, + offset, + mask, + output, + columns, + kernel_h, + kernel_w, + stride_h, + stride_w, + pad_h, + pad_w, + dilation_h, + dilation_w, + group, + deformable_group, + with_bias); +#else + AT_ERROR("Not compiled with GPU support"); +#endif + } + AT_ERROR("Not implemented on the CPU"); +} + +inline void modulated_deform_conv_backward( + at::Tensor input, + at::Tensor weight, + at::Tensor bias, + at::Tensor ones, + at::Tensor offset, + at::Tensor mask, + at::Tensor columns, + at::Tensor grad_input, + at::Tensor grad_weight, + at::Tensor grad_bias, + at::Tensor grad_offset, + at::Tensor grad_mask, + at::Tensor grad_output, + int kernel_h, + int kernel_w, + int stride_h, + int stride_w, + int pad_h, + int pad_w, + int dilation_h, + int dilation_w, + int group, + int deformable_group, + const bool with_bias) { + if (grad_output.is_cuda()) { +#ifdef WITH_CUDA + TORCH_CHECK(input.is_cuda(), "input tensor is not on GPU!"); + TORCH_CHECK(weight.is_cuda(), "weight tensor is not on GPU!"); + TORCH_CHECK(bias.is_cuda(), "bias tensor is not on GPU!"); + TORCH_CHECK(offset.is_cuda(), "offset tensor is not on GPU!"); + return modulated_deform_conv_cuda_backward( + input, + weight, + bias, + ones, + offset, + mask, + columns, + grad_input, + grad_weight, + grad_bias, + grad_offset, + grad_mask, + grad_output, + kernel_h, + kernel_w, + stride_h, + stride_w, + pad_h, + pad_w, + dilation_h, + dilation_w, + group, + deformable_group, + with_bias); +#else + AT_ERROR("Not compiled with GPU support"); +#endif + } + AT_ERROR("Not implemented on the CPU"); +} + +} // namespace detectron2 diff --git a/preprocess/mhp_extension/detectron2/detectron2/layers/csrc/deformable/deform_conv_cuda.cu b/preprocess/mhp_extension/detectron2/detectron2/layers/csrc/deformable/deform_conv_cuda.cu new file mode 100644 index 0000000000000000000000000000000000000000..5376db0cc4d93e245cfc9fea0f3b5715a1f88db2 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/layers/csrc/deformable/deform_conv_cuda.cu @@ -0,0 +1,1131 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +// modified from +// https://github.com/open-mmlab/mmdetection/blob/master/mmdet/ops/dcn/src/deform_conv_cuda.cpp +// Original license: Apache 2.0 + +// modify from +// https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/blob/mmdetection/mmdet/ops/dcn/src/deform_conv_cuda.c +// Original license: Apache 2.0 + +#include + +#include "deform_conv.h" + +#include +#include + +namespace detectron2 { + +void deformable_im2col( + const at::Tensor data_im, + const at::Tensor data_offset, + const int channels, + const int height, + const int width, + const int ksize_h, + const int ksize_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int parallel_imgs, + const int deformable_group, + at::Tensor data_col); + +void deformable_col2im( + const at::Tensor data_col, + const at::Tensor data_offset, + const int channels, + const int height, + const int width, + const int ksize_h, + const int ksize_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int parallel_imgs, + const int deformable_group, + at::Tensor grad_im); + +void deformable_col2im_coord( + const at::Tensor data_col, + const at::Tensor data_im, + const at::Tensor data_offset, + const int channels, + const int height, + const int width, + const int ksize_h, + const int ksize_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int parallel_imgs, + const int deformable_group, + at::Tensor grad_offset); + +void modulated_deformable_im2col_cuda( + const at::Tensor data_im, + const at::Tensor data_offset, + const at::Tensor data_mask, + const int batch_size, + const int channels, + const int height_im, + const int width_im, + const int height_col, + const int width_col, + const int kernel_h, + const int kenerl_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int deformable_group, + at::Tensor data_col); + +void modulated_deformable_col2im_cuda( + const at::Tensor data_col, + const at::Tensor data_offset, + const at::Tensor data_mask, + const int batch_size, + const int channels, + const int height_im, + const int width_im, + const int height_col, + const int width_col, + const int kernel_h, + const int kenerl_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int deformable_group, + at::Tensor grad_im); + +void modulated_deformable_col2im_coord_cuda( + const at::Tensor data_col, + const at::Tensor data_im, + const at::Tensor data_offset, + const at::Tensor data_mask, + const int batch_size, + const int channels, + const int height_im, + const int width_im, + const int height_col, + const int width_col, + const int kernel_h, + const int kenerl_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int deformable_group, + at::Tensor grad_offset, + at::Tensor grad_mask); + +void shape_check( + at::Tensor input, + at::Tensor offset, + at::Tensor* gradOutput, + at::Tensor weight, + int kH, + int kW, + int dH, + int dW, + int padH, + int padW, + int dilationH, + int dilationW, + int group, + int deformable_group) { + TORCH_CHECK( + weight.ndimension() == 4, + "4D weight tensor (nOutputPlane,nInputPlane,kH,kW) expected, " + "but got: %s", + weight.ndimension()); + + TORCH_CHECK(weight.is_contiguous(), "weight tensor has to be contiguous"); + + TORCH_CHECK( + kW > 0 && kH > 0, + "kernel size should be greater than zero, but got kH: %d kW: %d", + kH, + kW); + + TORCH_CHECK( + (weight.size(2) == kH && weight.size(3) == kW), + "kernel size should be consistent with weight, ", + "but got kH: %d kW: %d weight.size(2): %d, weight.size(3): %d", + kH, + kW, + weight.size(2), + weight.size(3)); + + TORCH_CHECK( + dW > 0 && dH > 0, + "stride should be greater than zero, but got dH: %d dW: %d", + dH, + dW); + + TORCH_CHECK( + dilationW > 0 && dilationH > 0, + "dilation should be greater than 0, but got dilationH: %d dilationW: %d", + dilationH, + dilationW); + + int ndim = input.ndimension(); + int dimf = 0; + int dimh = 1; + int dimw = 2; + + if (ndim == 4) { + dimf++; + dimh++; + dimw++; + } + + TORCH_CHECK( + ndim == 3 || ndim == 4, + "3D or 4D input tensor expected but got: %s", + ndim); + + long nInputPlane = weight.size(1) * group; + long inputHeight = input.size(dimh); + long inputWidth = input.size(dimw); + long nOutputPlane = weight.size(0); + long outputHeight = + (inputHeight + 2 * padH - (dilationH * (kH - 1) + 1)) / dH + 1; + long outputWidth = + (inputWidth + 2 * padW - (dilationW * (kW - 1) + 1)) / dW + 1; + + TORCH_CHECK( + nInputPlane % deformable_group == 0, + "input channels must divide deformable group size"); + + if (outputWidth < 1 || outputHeight < 1) + AT_ERROR( + "Given input size: (%ld x %ld x %ld). " + "Calculated output size: (%ld x %ld x %ld). Output size is too small", + nInputPlane, + inputHeight, + inputWidth, + nOutputPlane, + outputHeight, + outputWidth); + + TORCH_CHECK( + input.size(1) == nInputPlane, + "invalid number of input planes, expected: %d, but got: %d", + nInputPlane, + input.size(1)); + + TORCH_CHECK( + (inputHeight >= kH && inputWidth >= kW), + "input image is smaller than kernel"); + + TORCH_CHECK( + (offset.size(2) == outputHeight && offset.size(3) == outputWidth), + "invalid spatial size of offset, expected height: %d width: %d, but " + "got height: %d width: %d", + outputHeight, + outputWidth, + offset.size(2), + offset.size(3)); + + TORCH_CHECK( + (offset.size(1) == deformable_group * 2 * kH * kW), + "invalid number of channels of offset"); + + if (gradOutput != NULL) { + TORCH_CHECK( + gradOutput->size(dimf) == nOutputPlane, + "invalid number of gradOutput planes, expected: %d, but got: %d", + nOutputPlane, + gradOutput->size(dimf)); + + TORCH_CHECK( + (gradOutput->size(dimh) == outputHeight && + gradOutput->size(dimw) == outputWidth), + "invalid size of gradOutput, expected height: %d width: %d , but " + "got height: %d width: %d", + outputHeight, + outputWidth, + gradOutput->size(dimh), + gradOutput->size(dimw)); + } +} + +int deform_conv_forward_cuda( + at::Tensor input, + at::Tensor weight, + at::Tensor offset, + at::Tensor output, + at::Tensor columns, + at::Tensor ones, + int kW, + int kH, + int dW, + int dH, + int padW, + int padH, + int dilationW, + int dilationH, + int group, + int deformable_group, + int im2col_step) { + // todo: resize columns to include im2col: done + // todo: add im2col_step as input + // todo: add new output buffer and transpose it to output (or directly + // transpose output) todo: possibly change data indexing because of + // parallel_imgs + + shape_check( + input, + offset, + NULL, + weight, + kH, + kW, + dH, + dW, + padH, + padW, + dilationH, + dilationW, + group, + deformable_group); + + input = input.contiguous(); + offset = offset.contiguous(); + weight = weight.contiguous(); + + int batch = 1; + if (input.ndimension() == 3) { + // Force batch + batch = 0; + input.unsqueeze_(0); + offset.unsqueeze_(0); + } + + // todo: assert batchsize dividable by im2col_step + + long batchSize = input.size(0); + long nInputPlane = input.size(1); + long inputHeight = input.size(2); + long inputWidth = input.size(3); + + long nOutputPlane = weight.size(0); + + long outputWidth = + (inputWidth + 2 * padW - (dilationW * (kW - 1) + 1)) / dW + 1; + long outputHeight = + (inputHeight + 2 * padH - (dilationH * (kH - 1) + 1)) / dH + 1; + + TORCH_CHECK((offset.size(0) == batchSize), "invalid batch size of offset"); + + output = output.view({batchSize / im2col_step, + im2col_step, + nOutputPlane, + outputHeight, + outputWidth}); + columns = at::zeros( + {nInputPlane * kW * kH, im2col_step * outputHeight * outputWidth}, + input.options()); + + if (ones.ndimension() != 2 || + ones.size(0) * ones.size(1) < outputHeight * outputWidth) { + ones = at::ones({outputHeight, outputWidth}, input.options()); + } + + input = input.view({batchSize / im2col_step, + im2col_step, + nInputPlane, + inputHeight, + inputWidth}); + offset = offset.view({batchSize / im2col_step, + im2col_step, + deformable_group * 2 * kH * kW, + outputHeight, + outputWidth}); + + at::Tensor output_buffer = at::zeros( + {batchSize / im2col_step, + nOutputPlane, + im2col_step * outputHeight, + outputWidth}, + output.options()); + + output_buffer = output_buffer.view({output_buffer.size(0), + group, + output_buffer.size(1) / group, + output_buffer.size(2), + output_buffer.size(3)}); + + for (int elt = 0; elt < batchSize / im2col_step; elt++) { + deformable_im2col( + input[elt], + offset[elt], + nInputPlane, + inputHeight, + inputWidth, + kH, + kW, + padH, + padW, + dH, + dW, + dilationH, + dilationW, + im2col_step, + deformable_group, + columns); + + columns = columns.view({group, columns.size(0) / group, columns.size(1)}); + weight = weight.view({group, + weight.size(0) / group, + weight.size(1), + weight.size(2), + weight.size(3)}); + + for (int g = 0; g < group; g++) { + output_buffer[elt][g] = output_buffer[elt][g] + .flatten(1) + .addmm_(weight[g].flatten(1), columns[g]) + .view_as(output_buffer[elt][g]); + } + } + + output_buffer = + output_buffer.view({output_buffer.size(0), + output_buffer.size(1) * output_buffer.size(2), + output_buffer.size(3), + output_buffer.size(4)}); + + output_buffer = output_buffer.view({batchSize / im2col_step, + nOutputPlane, + im2col_step, + outputHeight, + outputWidth}); + output_buffer.transpose_(1, 2); + output.copy_(output_buffer); + output = output.view({batchSize, nOutputPlane, outputHeight, outputWidth}); + + input = input.view({batchSize, nInputPlane, inputHeight, inputWidth}); + offset = offset.view( + {batchSize, deformable_group * 2 * kH * kW, outputHeight, outputWidth}); + + if (batch == 0) { + output = output.view({nOutputPlane, outputHeight, outputWidth}); + input = input.view({nInputPlane, inputHeight, inputWidth}); + offset = offset.view({offset.size(1), offset.size(2), offset.size(3)}); + } + + return 1; +} + +int deform_conv_backward_input_cuda( + at::Tensor input, + at::Tensor offset, + at::Tensor gradOutput, + at::Tensor gradInput, + at::Tensor gradOffset, + at::Tensor weight, + at::Tensor columns, + int kW, + int kH, + int dW, + int dH, + int padW, + int padH, + int dilationW, + int dilationH, + int group, + int deformable_group, + int im2col_step) { + shape_check( + input, + offset, + &gradOutput, + weight, + kH, + kW, + dH, + dW, + padH, + padW, + dilationH, + dilationW, + group, + deformable_group); + + input = input.contiguous(); + offset = offset.contiguous(); + gradOutput = gradOutput.contiguous(); + weight = weight.contiguous(); + + int batch = 1; + + if (input.ndimension() == 3) { + // Force batch + batch = 0; + input = input.view({1, input.size(0), input.size(1), input.size(2)}); + offset = offset.view({1, offset.size(0), offset.size(1), offset.size(2)}); + gradOutput = gradOutput.view( + {1, gradOutput.size(0), gradOutput.size(1), gradOutput.size(2)}); + } + + long batchSize = input.size(0); + long nInputPlane = input.size(1); + long inputHeight = input.size(2); + long inputWidth = input.size(3); + + long nOutputPlane = weight.size(0); + + long outputWidth = + (inputWidth + 2 * padW - (dilationW * (kW - 1) + 1)) / dW + 1; + long outputHeight = + (inputHeight + 2 * padH - (dilationH * (kH - 1) + 1)) / dH + 1; + + TORCH_CHECK((offset.size(0) == batchSize), 3, "invalid batch size of offset"); + gradInput = gradInput.view({batchSize, nInputPlane, inputHeight, inputWidth}); + columns = at::zeros( + {nInputPlane * kW * kH, im2col_step * outputHeight * outputWidth}, + input.options()); + + // change order of grad output + gradOutput = gradOutput.view({batchSize / im2col_step, + im2col_step, + nOutputPlane, + outputHeight, + outputWidth}); + gradOutput.transpose_(1, 2); + + gradInput = gradInput.view({batchSize / im2col_step, + im2col_step, + nInputPlane, + inputHeight, + inputWidth}); + input = input.view({batchSize / im2col_step, + im2col_step, + nInputPlane, + inputHeight, + inputWidth}); + gradOffset = gradOffset.view({batchSize / im2col_step, + im2col_step, + deformable_group * 2 * kH * kW, + outputHeight, + outputWidth}); + offset = offset.view({batchSize / im2col_step, + im2col_step, + deformable_group * 2 * kH * kW, + outputHeight, + outputWidth}); + + for (int elt = 0; elt < batchSize / im2col_step; elt++) { + // divide into groups + columns = columns.view({group, columns.size(0) / group, columns.size(1)}); + weight = weight.view({group, + weight.size(0) / group, + weight.size(1), + weight.size(2), + weight.size(3)}); + gradOutput = gradOutput.view({gradOutput.size(0), + group, + gradOutput.size(1) / group, + gradOutput.size(2), + gradOutput.size(3), + gradOutput.size(4)}); + + for (int g = 0; g < group; g++) { + columns[g] = columns[g].addmm_( + weight[g].flatten(1).transpose(0, 1), + gradOutput[elt][g].flatten(1), + 0.0f, + 1.0f); + } + + columns = + columns.view({columns.size(0) * columns.size(1), columns.size(2)}); + gradOutput = gradOutput.view({gradOutput.size(0), + gradOutput.size(1) * gradOutput.size(2), + gradOutput.size(3), + gradOutput.size(4), + gradOutput.size(5)}); + + deformable_col2im_coord( + columns, + input[elt], + offset[elt], + nInputPlane, + inputHeight, + inputWidth, + kH, + kW, + padH, + padW, + dH, + dW, + dilationH, + dilationW, + im2col_step, + deformable_group, + gradOffset[elt]); + + deformable_col2im( + columns, + offset[elt], + nInputPlane, + inputHeight, + inputWidth, + kH, + kW, + padH, + padW, + dH, + dW, + dilationH, + dilationW, + im2col_step, + deformable_group, + gradInput[elt]); + } + + gradOutput.transpose_(1, 2); + gradOutput = + gradOutput.view({batchSize, nOutputPlane, outputHeight, outputWidth}); + + gradInput = gradInput.view({batchSize, nInputPlane, inputHeight, inputWidth}); + input = input.view({batchSize, nInputPlane, inputHeight, inputWidth}); + gradOffset = gradOffset.view( + {batchSize, deformable_group * 2 * kH * kW, outputHeight, outputWidth}); + offset = offset.view( + {batchSize, deformable_group * 2 * kH * kW, outputHeight, outputWidth}); + + if (batch == 0) { + gradOutput = gradOutput.view({nOutputPlane, outputHeight, outputWidth}); + input = input.view({nInputPlane, inputHeight, inputWidth}); + gradInput = gradInput.view({nInputPlane, inputHeight, inputWidth}); + offset = offset.view({offset.size(1), offset.size(2), offset.size(3)}); + gradOffset = + gradOffset.view({offset.size(1), offset.size(2), offset.size(3)}); + } + + return 1; +} + +int deform_conv_backward_parameters_cuda( + at::Tensor input, + at::Tensor offset, + at::Tensor gradOutput, + at::Tensor gradWeight, // at::Tensor gradBias, + at::Tensor columns, + at::Tensor ones, + int kW, + int kH, + int dW, + int dH, + int padW, + int padH, + int dilationW, + int dilationH, + int group, + int deformable_group, + float scale, + int im2col_step) { + // todo: transpose and reshape outGrad + // todo: reshape columns + // todo: add im2col_step as input + + shape_check( + input, + offset, + &gradOutput, + gradWeight, + kH, + kW, + dH, + dW, + padH, + padW, + dilationH, + dilationW, + group, + deformable_group); + + input = input.contiguous(); + offset = offset.contiguous(); + gradOutput = gradOutput.contiguous(); + + int batch = 1; + + if (input.ndimension() == 3) { + // Force batch + batch = 0; + input = input.view( + at::IntList({1, input.size(0), input.size(1), input.size(2)})); + gradOutput = gradOutput.view( + {1, gradOutput.size(0), gradOutput.size(1), gradOutput.size(2)}); + } + + long batchSize = input.size(0); + long nInputPlane = input.size(1); + long inputHeight = input.size(2); + long inputWidth = input.size(3); + + long nOutputPlane = gradWeight.size(0); + + long outputWidth = + (inputWidth + 2 * padW - (dilationW * (kW - 1) + 1)) / dW + 1; + long outputHeight = + (inputHeight + 2 * padH - (dilationH * (kH - 1) + 1)) / dH + 1; + + TORCH_CHECK((offset.size(0) == batchSize), "invalid batch size of offset"); + + columns = at::zeros( + {nInputPlane * kW * kH, im2col_step * outputHeight * outputWidth}, + input.options()); + + gradOutput = gradOutput.view({batchSize / im2col_step, + im2col_step, + nOutputPlane, + outputHeight, + outputWidth}); + gradOutput.transpose_(1, 2); + + at::Tensor gradOutputBuffer = at::zeros_like(gradOutput); + gradOutputBuffer = gradOutputBuffer.view({batchSize / im2col_step, + nOutputPlane, + im2col_step, + outputHeight, + outputWidth}); + gradOutputBuffer.copy_(gradOutput); + // gradOutput is not contiguous, so we do reshape (instead of view) next + gradOutputBuffer = gradOutputBuffer.reshape({batchSize / im2col_step, + nOutputPlane, + im2col_step * outputHeight, + outputWidth}); + + gradOutput.transpose_(1, 2); + gradOutput = + gradOutput.view({batchSize, nOutputPlane, outputHeight, outputWidth}); + + input = input.view({batchSize / im2col_step, + im2col_step, + nInputPlane, + inputHeight, + inputWidth}); + offset = offset.view({batchSize / im2col_step, + im2col_step, + deformable_group * 2 * kH * kW, + outputHeight, + outputWidth}); + + for (int elt = 0; elt < batchSize / im2col_step; elt++) { + deformable_im2col( + input[elt], + offset[elt], + nInputPlane, + inputHeight, + inputWidth, + kH, + kW, + padH, + padW, + dH, + dW, + dilationH, + dilationW, + im2col_step, + deformable_group, + columns); + + // divide into group + gradOutputBuffer = gradOutputBuffer.view({gradOutputBuffer.size(0), + group, + gradOutputBuffer.size(1) / group, + gradOutputBuffer.size(2), + gradOutputBuffer.size(3)}); + columns = columns.view({group, columns.size(0) / group, columns.size(1)}); + gradWeight = gradWeight.view({group, + gradWeight.size(0) / group, + gradWeight.size(1), + gradWeight.size(2), + gradWeight.size(3)}); + + for (int g = 0; g < group; g++) { + gradWeight[g] = gradWeight[g] + .flatten(1) + .addmm_( + gradOutputBuffer[elt][g].flatten(1), + columns[g].transpose(1, 0), + 1.0, + scale) + .view_as(gradWeight[g]); + } + gradOutputBuffer = gradOutputBuffer.view( + {gradOutputBuffer.size(0), + gradOutputBuffer.size(1) * gradOutputBuffer.size(2), + gradOutputBuffer.size(3), + gradOutputBuffer.size(4)}); + columns = + columns.view({columns.size(0) * columns.size(1), columns.size(2)}); + gradWeight = gradWeight.view({gradWeight.size(0) * gradWeight.size(1), + gradWeight.size(2), + gradWeight.size(3), + gradWeight.size(4)}); + } + + input = input.view({batchSize, nInputPlane, inputHeight, inputWidth}); + offset = offset.view( + {batchSize, deformable_group * 2 * kH * kW, outputHeight, outputWidth}); + + if (batch == 0) { + gradOutput = gradOutput.view({nOutputPlane, outputHeight, outputWidth}); + input = input.view({nInputPlane, inputHeight, inputWidth}); + } + + return 1; +} + +void modulated_deform_conv_cuda_forward( + at::Tensor input, + at::Tensor weight, + at::Tensor bias, + at::Tensor ones, + at::Tensor offset, + at::Tensor mask, + at::Tensor output, + at::Tensor columns, + int kernel_h, + int kernel_w, + const int stride_h, + const int stride_w, + const int pad_h, + const int pad_w, + const int dilation_h, + const int dilation_w, + const int group, + const int deformable_group, + const bool with_bias) { + TORCH_CHECK(input.is_contiguous(), "input tensor has to be contiguous"); + TORCH_CHECK(weight.is_contiguous(), "weight tensor has to be contiguous"); + + const int batch = input.size(0); + const int channels = input.size(1); + const int height = input.size(2); + const int width = input.size(3); + + const int channels_out = weight.size(0); + const int channels_kernel = weight.size(1); + const int kernel_h_ = weight.size(2); + const int kernel_w_ = weight.size(3); + + if (kernel_h_ != kernel_h || kernel_w_ != kernel_w) + AT_ERROR( + "Input shape and kernel shape wont match: (%d x %d vs %d x %d).", + kernel_h_, + kernel_w, + kernel_h_, + kernel_w_); + if (channels != channels_kernel * group) + AT_ERROR( + "Input shape and kernel channels wont match: (%d vs %d).", + channels, + channels_kernel * group); + + const int height_out = + (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; + const int width_out = + (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; + + if (ones.ndimension() != 2 || + ones.size(0) * ones.size(1) < height_out * width_out) { + // Resize plane and fill with ones... + ones = at::ones({height_out, width_out}, input.options()); + } + + // resize output + output = output.view({batch, channels_out, height_out, width_out}).zero_(); + // resize temporary columns + columns = at::zeros( + {channels * kernel_h * kernel_w, 1 * height_out * width_out}, + input.options()); + + output = output.view({output.size(0), + group, + output.size(1) / group, + output.size(2), + output.size(3)}); + + for (int b = 0; b < batch; b++) { + modulated_deformable_im2col_cuda( + input[b], + offset[b], + mask[b], + 1, + channels, + height, + width, + height_out, + width_out, + kernel_h, + kernel_w, + pad_h, + pad_w, + stride_h, + stride_w, + dilation_h, + dilation_w, + deformable_group, + columns); + + // divide into group + weight = weight.view({group, + weight.size(0) / group, + weight.size(1), + weight.size(2), + weight.size(3)}); + columns = columns.view({group, columns.size(0) / group, columns.size(1)}); + + for (int g = 0; g < group; g++) { + output[b][g] = output[b][g] + .flatten(1) + .addmm_(weight[g].flatten(1), columns[g]) + .view_as(output[b][g]); + } + + weight = weight.view({weight.size(0) * weight.size(1), + weight.size(2), + weight.size(3), + weight.size(4)}); + columns = + columns.view({columns.size(0) * columns.size(1), columns.size(2)}); + } + + output = output.view({output.size(0), + output.size(1) * output.size(2), + output.size(3), + output.size(4)}); + + if (with_bias) { + output += bias.view({1, bias.size(0), 1, 1}); + } +} + +void modulated_deform_conv_cuda_backward( + at::Tensor input, + at::Tensor weight, + at::Tensor bias, + at::Tensor ones, + at::Tensor offset, + at::Tensor mask, + at::Tensor columns, + at::Tensor grad_input, + at::Tensor grad_weight, + at::Tensor grad_bias, + at::Tensor grad_offset, + at::Tensor grad_mask, + at::Tensor grad_output, + int kernel_h, + int kernel_w, + int stride_h, + int stride_w, + int pad_h, + int pad_w, + int dilation_h, + int dilation_w, + int group, + int deformable_group, + const bool with_bias) { + TORCH_CHECK(input.is_contiguous(), "input tensor has to be contiguous"); + TORCH_CHECK(weight.is_contiguous(), "weight tensor has to be contiguous"); + + const int batch = input.size(0); + const int channels = input.size(1); + const int height = input.size(2); + const int width = input.size(3); + + const int channels_kernel = weight.size(1); + const int kernel_h_ = weight.size(2); + const int kernel_w_ = weight.size(3); + if (kernel_h_ != kernel_h || kernel_w_ != kernel_w) + AT_ERROR( + "Input shape and kernel shape wont match: (%d x %d vs %d x %d).", + kernel_h_, + kernel_w, + kernel_h_, + kernel_w_); + if (channels != channels_kernel * group) + AT_ERROR( + "Input shape and kernel channels wont match: (%d vs %d).", + channels, + channels_kernel * group); + + const int height_out = + (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; + const int width_out = + (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; + + if (ones.ndimension() != 2 || + ones.size(0) * ones.size(1) < height_out * width_out) { + // Resize plane and fill with ones... + ones = at::ones({height_out, width_out}, input.options()); + } + + grad_input = grad_input.view({batch, channels, height, width}); + columns = at::zeros( + {channels * kernel_h * kernel_w, height_out * width_out}, + input.options()); + + grad_output = grad_output.view({grad_output.size(0), + group, + grad_output.size(1) / group, + grad_output.size(2), + grad_output.size(3)}); + + for (int b = 0; b < batch; b++) { + // divide int group + columns = columns.view({group, columns.size(0) / group, columns.size(1)}); + weight = weight.view({group, + weight.size(0) / group, + weight.size(1), + weight.size(2), + weight.size(3)}); + + for (int g = 0; g < group; g++) { + columns[g].addmm_( + weight[g].flatten(1).transpose(0, 1), + grad_output[b][g].flatten(1), + 0.0f, + 1.0f); + } + + columns = + columns.view({columns.size(0) * columns.size(1), columns.size(2)}); + weight = weight.view({weight.size(0) * weight.size(1), + weight.size(2), + weight.size(3), + weight.size(4)}); + + // gradient w.r.t. input coordinate data + modulated_deformable_col2im_coord_cuda( + columns, + input[b], + offset[b], + mask[b], + 1, + channels, + height, + width, + height_out, + width_out, + kernel_h, + kernel_w, + pad_h, + pad_w, + stride_h, + stride_w, + dilation_h, + dilation_w, + deformable_group, + grad_offset[b], + grad_mask[b]); + // gradient w.r.t. input data + modulated_deformable_col2im_cuda( + columns, + offset[b], + mask[b], + 1, + channels, + height, + width, + height_out, + width_out, + kernel_h, + kernel_w, + pad_h, + pad_w, + stride_h, + stride_w, + dilation_h, + dilation_w, + deformable_group, + grad_input[b]); + + // gradient w.r.t. weight, dWeight should accumulate across the batch and + // group + modulated_deformable_im2col_cuda( + input[b], + offset[b], + mask[b], + 1, + channels, + height, + width, + height_out, + width_out, + kernel_h, + kernel_w, + pad_h, + pad_w, + stride_h, + stride_w, + dilation_h, + dilation_w, + deformable_group, + columns); + + columns = columns.view({group, columns.size(0) / group, columns.size(1)}); + grad_weight = grad_weight.view({group, + grad_weight.size(0) / group, + grad_weight.size(1), + grad_weight.size(2), + grad_weight.size(3)}); + if (with_bias) + grad_bias = grad_bias.view({group, grad_bias.size(0) / group}); + + for (int g = 0; g < group; g++) { + grad_weight[g] = + grad_weight[g] + .flatten(1) + .addmm_(grad_output[b][g].flatten(1), columns[g].transpose(0, 1)) + .view_as(grad_weight[g]); + if (with_bias) { + grad_bias[g] = + grad_bias[g] + .view({-1, 1}) + .addmm_(grad_output[b][g].flatten(1), ones.view({-1, 1})) + .view(-1); + } + } + + columns = + columns.view({columns.size(0) * columns.size(1), columns.size(2)}); + grad_weight = grad_weight.view({grad_weight.size(0) * grad_weight.size(1), + grad_weight.size(2), + grad_weight.size(3), + grad_weight.size(4)}); + if (with_bias) + grad_bias = grad_bias.view({grad_bias.size(0) * grad_bias.size(1)}); + } + grad_output = grad_output.view({grad_output.size(0) * grad_output.size(1), + grad_output.size(2), + grad_output.size(3), + grad_output.size(4)}); +} + +} // namespace detectron2 diff --git a/preprocess/mhp_extension/detectron2/detectron2/layers/csrc/deformable/deform_conv_cuda_kernel.cu b/preprocess/mhp_extension/detectron2/detectron2/layers/csrc/deformable/deform_conv_cuda_kernel.cu new file mode 100644 index 0000000000000000000000000000000000000000..841f3166c902e7f1c17fe58137d42a58e4f66d69 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/layers/csrc/deformable/deform_conv_cuda_kernel.cu @@ -0,0 +1,1288 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +// modified from +// https://github.com/open-mmlab/mmdetection/blob/master/mmdet/ops/dcn/src/deform_conv_cuda_kernel.cu +// Original license: Apache 2.0 +// clang-format off + +// modify from +// https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/blob/mmdetection/mmdet/ops/dcn/src/deform_conv_cuda_kernel.cu + +/*! + ******************* BEGIN Caffe Copyright Notice and Disclaimer ***************** + * + * COPYRIGHT + * + * All contributions by the University of California: + * Copyright (c) 2014-2017 The Regents of the University of California (Regents) + * All rights reserved. + * + * All other contributions: + * Copyright (c) 2014-2017, the respective contributors + * All rights reserved. + * + * Caffe uses a shared copyright model: each contributor holds copyright over + * their contributions to Caffe. The project versioning records all such + * contribution and copyright details. If a contributor wants to further mark + * their specific copyright on a particular contribution, they should indicate + * their copyright solely in the commit message of the change when it is + * committed. + * + * LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + *AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + *IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE + *FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + *DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + *SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + *CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + *OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + *OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * CONTRIBUTION AGREEMENT + * + * By contributing to the BVLC/caffe repository through pull-request, comment, + * or otherwise, the contributor releases their content to the + * license and copyright terms herein. + * + ***************** END Caffe Copyright Notice and Disclaimer ********************* + * + * Copyright (c) 2018 Microsoft + * Licensed under The MIT License [see LICENSE for details] + * \file modulated_deformable_im2col.cuh + * \brief Function definitions of converting an image to + * column matrix based on kernel, padding, dilation, and offset. + * These functions are mainly used in deformable convolution operators. + * \ref: https://arxiv.org/abs/1703.06211 + * \author Yuwen Xiong, Haozhi Qi, Jifeng Dai, Xizhou Zhu, Han Hu, Dazhi Cheng + */ + +#include +#include +#include +#include +#include +#include + +using namespace at; + +#define CUDA_KERNEL_LOOP(i, n) \ + for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \ + i += blockDim.x * gridDim.x) + + +namespace { + +const int CUDA_NUM_THREADS = 1024; +const int kMaxGridNum = 65535; + +inline int GET_BLOCKS(const int N) { + return std::min(kMaxGridNum, (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS); +} + +} + +template +__device__ scalar_t deformable_im2col_bilinear( + const scalar_t* bottom_data, + const int data_width, + const int height, + const int width, + scalar_t h, + scalar_t w) { + int h_low = floor(h); + int w_low = floor(w); + int h_high = h_low + 1; + int w_high = w_low + 1; + + scalar_t lh = h - h_low; + scalar_t lw = w - w_low; + scalar_t hh = 1 - lh, hw = 1 - lw; + + scalar_t v1 = 0; + if (h_low >= 0 && w_low >= 0) + v1 = bottom_data[h_low * data_width + w_low]; + scalar_t v2 = 0; + if (h_low >= 0 && w_high <= width - 1) + v2 = bottom_data[h_low * data_width + w_high]; + scalar_t v3 = 0; + if (h_high <= height - 1 && w_low >= 0) + v3 = bottom_data[h_high * data_width + w_low]; + scalar_t v4 = 0; + if (h_high <= height - 1 && w_high <= width - 1) + v4 = bottom_data[h_high * data_width + w_high]; + + scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; + + scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + return val; +} + +template +__device__ scalar_t get_gradient_weight( + scalar_t argmax_h, + scalar_t argmax_w, + const int h, + const int w, + const int height, + const int width) { + if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || + argmax_w >= width) { + // empty + return 0; + } + + int argmax_h_low = floor(argmax_h); + int argmax_w_low = floor(argmax_w); + int argmax_h_high = argmax_h_low + 1; + int argmax_w_high = argmax_w_low + 1; + + scalar_t weight = 0; + if (h == argmax_h_low && w == argmax_w_low) + weight = (h + 1 - argmax_h) * (w + 1 - argmax_w); + if (h == argmax_h_low && w == argmax_w_high) + weight = (h + 1 - argmax_h) * (argmax_w + 1 - w); + if (h == argmax_h_high && w == argmax_w_low) + weight = (argmax_h + 1 - h) * (w + 1 - argmax_w); + if (h == argmax_h_high && w == argmax_w_high) + weight = (argmax_h + 1 - h) * (argmax_w + 1 - w); + return weight; +} + +template +__device__ scalar_t get_coordinate_weight( + scalar_t argmax_h, + scalar_t argmax_w, + const int height, + const int width, + const scalar_t* im_data, + const int data_width, + const int bp_dir) { + if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || + argmax_w >= width) { + // empty + return 0; + } + + int argmax_h_low = floor(argmax_h); + int argmax_w_low = floor(argmax_w); + int argmax_h_high = argmax_h_low + 1; + int argmax_w_high = argmax_w_low + 1; + + scalar_t weight = 0; + + if (bp_dir == 0) { + if (argmax_h_low >= 0 && argmax_w_low >= 0) + weight += -1 * (argmax_w_low + 1 - argmax_w) * + im_data[argmax_h_low * data_width + argmax_w_low]; + if (argmax_h_low >= 0 && argmax_w_high <= width - 1) + weight += -1 * (argmax_w - argmax_w_low) * + im_data[argmax_h_low * data_width + argmax_w_high]; + if (argmax_h_high <= height - 1 && argmax_w_low >= 0) + weight += (argmax_w_low + 1 - argmax_w) * + im_data[argmax_h_high * data_width + argmax_w_low]; + if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) + weight += (argmax_w - argmax_w_low) * + im_data[argmax_h_high * data_width + argmax_w_high]; + } else if (bp_dir == 1) { + if (argmax_h_low >= 0 && argmax_w_low >= 0) + weight += -1 * (argmax_h_low + 1 - argmax_h) * + im_data[argmax_h_low * data_width + argmax_w_low]; + if (argmax_h_low >= 0 && argmax_w_high <= width - 1) + weight += (argmax_h_low + 1 - argmax_h) * + im_data[argmax_h_low * data_width + argmax_w_high]; + if (argmax_h_high <= height - 1 && argmax_w_low >= 0) + weight += -1 * (argmax_h - argmax_h_low) * + im_data[argmax_h_high * data_width + argmax_w_low]; + if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) + weight += (argmax_h - argmax_h_low) * + im_data[argmax_h_high * data_width + argmax_w_high]; + } + + return weight; +} + +template +__global__ void deformable_im2col_gpu_kernel( + const int n, + const scalar_t* data_im, + const scalar_t* data_offset, + const int height, + const int width, + const int kernel_h, + const int kernel_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int channel_per_deformable_group, + const int batch_size, + const int num_channels, + const int deformable_group, + const int height_col, + const int width_col, + scalar_t* data_col) { + CUDA_KERNEL_LOOP(index, n) { + // index index of output matrix + const int w_col = index % width_col; + const int h_col = (index / width_col) % height_col; + const int b_col = (index / width_col / height_col) % batch_size; + const int c_im = (index / width_col / height_col) / batch_size; + const int c_col = c_im * kernel_h * kernel_w; + + // compute deformable group index + const int deformable_group_index = c_im / channel_per_deformable_group; + + const int h_in = h_col * stride_h - pad_h; + const int w_in = w_col * stride_w - pad_w; + scalar_t* data_col_ptr = data_col + + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col; + // const scalar_t* data_im_ptr = data_im + ((b_col * num_channels + c_im) * + // height + h_in) * width + w_in; + const scalar_t* data_im_ptr = + data_im + (b_col * num_channels + c_im) * height * width; + const scalar_t* data_offset_ptr = data_offset + + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * + kernel_w * height_col * width_col; + + for (int i = 0; i < kernel_h; ++i) { + for (int j = 0; j < kernel_w; ++j) { + const int data_offset_h_ptr = + ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col; + const int data_offset_w_ptr = + ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + + w_col; + const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; + const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; + scalar_t val = static_cast(0); + const scalar_t h_im = h_in + i * dilation_h + offset_h; + const scalar_t w_im = w_in + j * dilation_w + offset_w; + if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) { + // const scalar_t map_h = i * dilation_h + offset_h; + // const scalar_t map_w = j * dilation_w + offset_w; + // const int cur_height = height - h_in; + // const int cur_width = width - w_in; + // val = deformable_im2col_bilinear(data_im_ptr, width, cur_height, + // cur_width, map_h, map_w); + val = deformable_im2col_bilinear( + data_im_ptr, width, height, width, h_im, w_im); + } + *data_col_ptr = val; + data_col_ptr += batch_size * height_col * width_col; + } + } + } +} + + +template +__global__ void deformable_col2im_gpu_kernel( + const int n, + const scalar_t* data_col, + const scalar_t* data_offset, + const int channels, + const int height, + const int width, + const int kernel_h, + const int kernel_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int channel_per_deformable_group, + const int batch_size, + const int deformable_group, + const int height_col, + const int width_col, + scalar_t* grad_im) { + CUDA_KERNEL_LOOP(index, n) { + const int j = (index / width_col / height_col / batch_size) % kernel_w; + const int i = + (index / width_col / height_col / batch_size / kernel_w) % kernel_h; + const int c = + index / width_col / height_col / batch_size / kernel_w / kernel_h; + // compute the start and end of the output + + const int deformable_group_index = c / channel_per_deformable_group; + + int w_out = index % width_col; + int h_out = (index / width_col) % height_col; + int b = (index / width_col / height_col) % batch_size; + int w_in = w_out * stride_w - pad_w; + int h_in = h_out * stride_h - pad_h; + + const scalar_t* data_offset_ptr = data_offset + + (b * deformable_group + deformable_group_index) * 2 * kernel_h * + kernel_w * height_col * width_col; + const int data_offset_h_ptr = + ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out; + const int data_offset_w_ptr = + ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out; + const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; + const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; + const scalar_t cur_inv_h_data = h_in + i * dilation_h + offset_h; + const scalar_t cur_inv_w_data = w_in + j * dilation_w + offset_w; + + const scalar_t cur_top_grad = data_col[index]; + const int cur_h = (int)cur_inv_h_data; + const int cur_w = (int)cur_inv_w_data; + for (int dy = -2; dy <= 2; dy++) { + for (int dx = -2; dx <= 2; dx++) { + if (cur_h + dy >= 0 && cur_h + dy < height && cur_w + dx >= 0 && + cur_w + dx < width && abs(cur_inv_h_data - (cur_h + dy)) < 1 && + abs(cur_inv_w_data - (cur_w + dx)) < 1) { + int cur_bottom_grad_pos = + ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx; + scalar_t weight = get_gradient_weight( + cur_inv_h_data, + cur_inv_w_data, + cur_h + dy, + cur_w + dx, + height, + width); + atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad); + } + } + } + } +} + + +template +__global__ void deformable_col2im_coord_gpu_kernel( + const int n, + const scalar_t* data_col, + const scalar_t* data_im, + const scalar_t* data_offset, + const int channels, + const int height, + const int width, + const int kernel_h, + const int kernel_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int channel_per_deformable_group, + const int batch_size, + const int offset_channels, + const int deformable_group, + const int height_col, + const int width_col, + scalar_t* grad_offset) { + CUDA_KERNEL_LOOP(index, n) { + scalar_t val = 0; + int w = index % width_col; + int h = (index / width_col) % height_col; + int c = (index / width_col / height_col) % offset_channels; + int b = (index / width_col / height_col) / offset_channels; + // compute the start and end of the output + + const int deformable_group_index = c / (2 * kernel_h * kernel_w); + const int col_step = kernel_h * kernel_w; + int cnt = 0; + const scalar_t* data_col_ptr = data_col + + deformable_group_index * channel_per_deformable_group * batch_size * + width_col * height_col; + const scalar_t* data_im_ptr = data_im + + (b * deformable_group + deformable_group_index) * + channel_per_deformable_group / kernel_h / kernel_w * height * width; + const scalar_t* data_offset_ptr = data_offset + + (b * deformable_group + deformable_group_index) * 2 * kernel_h * + kernel_w * height_col * width_col; + + const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w; + + for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; + col_c += col_step) { + const int col_pos = + (((col_c * batch_size + b) * height_col) + h) * width_col + w; + const int bp_dir = offset_c % 2; + + int j = (col_pos / width_col / height_col / batch_size) % kernel_w; + int i = + (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h; + int w_out = col_pos % width_col; + int h_out = (col_pos / width_col) % height_col; + int w_in = w_out * stride_w - pad_w; + int h_in = h_out * stride_h - pad_h; + const int data_offset_h_ptr = + (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out); + const int data_offset_w_ptr = + (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + + w_out); + const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; + const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; + scalar_t inv_h = h_in + i * dilation_h + offset_h; + scalar_t inv_w = w_in + j * dilation_w + offset_w; + if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width) { + inv_h = inv_w = -2; + } + const scalar_t weight = get_coordinate_weight( + inv_h, + inv_w, + height, + width, + data_im_ptr + cnt * height * width, + width, + bp_dir); + val += weight * data_col_ptr[col_pos]; + cnt += 1; + } + + grad_offset[index] = val; + } +} + + +namespace detectron2 { + +void deformable_im2col( + const at::Tensor data_im, + const at::Tensor data_offset, + const int channels, + const int height, + const int width, + const int ksize_h, + const int ksize_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int parallel_imgs, + const int deformable_group, + at::Tensor data_col) { + // num_axes should be smaller than block size + // todo: check parallel_imgs is correctly passed in + int height_col = + (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; + int width_col = + (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; + int num_kernels = channels * height_col * width_col * parallel_imgs; + int channel_per_deformable_group = channels / deformable_group; + + at::cuda::CUDAGuard device_guard(data_im.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + data_im.scalar_type(), "deformable_im2col_gpu", ([&] { + const scalar_t* data_im_ = data_im.data_ptr(); + const scalar_t* data_offset_ = data_offset.data_ptr(); + scalar_t* data_col_ = data_col.data_ptr(); + + deformable_im2col_gpu_kernel<<< + GET_BLOCKS(num_kernels), + CUDA_NUM_THREADS, + 0, + stream>>>( + num_kernels, + data_im_, + data_offset_, + height, + width, + ksize_h, + ksize_w, + pad_h, + pad_w, + stride_h, + stride_w, + dilation_h, + dilation_w, + channel_per_deformable_group, + parallel_imgs, + channels, + deformable_group, + height_col, + width_col, + data_col_); + })); + + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) { + printf("error in deformable_im2col: %s\n", cudaGetErrorString(err)); + } +} + + +void deformable_col2im( + const at::Tensor data_col, + const at::Tensor data_offset, + const int channels, + const int height, + const int width, + const int ksize_h, + const int ksize_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int parallel_imgs, + const int deformable_group, + at::Tensor grad_im) { + // todo: make sure parallel_imgs is passed in correctly + int height_col = + (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; + int width_col = + (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; + int num_kernels = + channels * ksize_h * ksize_w * height_col * width_col * parallel_imgs; + int channel_per_deformable_group = channels / deformable_group; + + at::cuda::CUDAGuard device_guard(data_col.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + data_col.scalar_type(), "deformable_col2im_gpu", ([&] { + const scalar_t* data_col_ = data_col.data_ptr(); + const scalar_t* data_offset_ = data_offset.data_ptr(); + scalar_t* grad_im_ = grad_im.data_ptr(); + + deformable_col2im_gpu_kernel<<< + GET_BLOCKS(num_kernels), + CUDA_NUM_THREADS, + 0, + stream>>>( + num_kernels, + data_col_, + data_offset_, + channels, + height, + width, + ksize_h, + ksize_w, + pad_h, + pad_w, + stride_h, + stride_w, + dilation_h, + dilation_w, + channel_per_deformable_group, + parallel_imgs, + deformable_group, + height_col, + width_col, + grad_im_); + })); + + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) { + printf("error in deformable_col2im: %s\n", cudaGetErrorString(err)); + } +} + + +void deformable_col2im_coord( + const at::Tensor data_col, + const at::Tensor data_im, + const at::Tensor data_offset, + const int channels, + const int height, + const int width, + const int ksize_h, + const int ksize_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int parallel_imgs, + const int deformable_group, + at::Tensor grad_offset) { + int height_col = + (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; + int width_col = + (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; + int num_kernels = height_col * width_col * 2 * ksize_h * ksize_w * + deformable_group * parallel_imgs; + int channel_per_deformable_group = + channels * ksize_h * ksize_w / deformable_group; + + at::cuda::CUDAGuard device_guard(data_col.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + data_col.scalar_type(), "deformable_col2im_coord_gpu", ([&] { + const scalar_t* data_col_ = data_col.data_ptr(); + const scalar_t* data_im_ = data_im.data_ptr(); + const scalar_t* data_offset_ = data_offset.data_ptr(); + scalar_t* grad_offset_ = grad_offset.data_ptr(); + + deformable_col2im_coord_gpu_kernel<<< + GET_BLOCKS(num_kernels), + CUDA_NUM_THREADS, + 0, + stream>>>( + num_kernels, + data_col_, + data_im_, + data_offset_, + channels, + height, + width, + ksize_h, + ksize_w, + pad_h, + pad_w, + stride_h, + stride_w, + dilation_h, + dilation_w, + channel_per_deformable_group, + parallel_imgs, + 2 * ksize_h * ksize_w * deformable_group, + deformable_group, + height_col, + width_col, + grad_offset_); + })); +} + +} // namespace detectron2 + + +template +__device__ scalar_t dmcn_im2col_bilinear( + const scalar_t* bottom_data, + const int data_width, + const int height, + const int width, + scalar_t h, + scalar_t w) { + int h_low = floor(h); + int w_low = floor(w); + int h_high = h_low + 1; + int w_high = w_low + 1; + + scalar_t lh = h - h_low; + scalar_t lw = w - w_low; + scalar_t hh = 1 - lh, hw = 1 - lw; + + scalar_t v1 = 0; + if (h_low >= 0 && w_low >= 0) + v1 = bottom_data[h_low * data_width + w_low]; + scalar_t v2 = 0; + if (h_low >= 0 && w_high <= width - 1) + v2 = bottom_data[h_low * data_width + w_high]; + scalar_t v3 = 0; + if (h_high <= height - 1 && w_low >= 0) + v3 = bottom_data[h_high * data_width + w_low]; + scalar_t v4 = 0; + if (h_high <= height - 1 && w_high <= width - 1) + v4 = bottom_data[h_high * data_width + w_high]; + + scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; + + scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + return val; +} + +template +__device__ scalar_t dmcn_get_gradient_weight( + scalar_t argmax_h, + scalar_t argmax_w, + const int h, + const int w, + const int height, + const int width) { + if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || + argmax_w >= width) { + // empty + return 0; + } + + int argmax_h_low = floor(argmax_h); + int argmax_w_low = floor(argmax_w); + int argmax_h_high = argmax_h_low + 1; + int argmax_w_high = argmax_w_low + 1; + + scalar_t weight = 0; + if (h == argmax_h_low && w == argmax_w_low) + weight = (h + 1 - argmax_h) * (w + 1 - argmax_w); + if (h == argmax_h_low && w == argmax_w_high) + weight = (h + 1 - argmax_h) * (argmax_w + 1 - w); + if (h == argmax_h_high && w == argmax_w_low) + weight = (argmax_h + 1 - h) * (w + 1 - argmax_w); + if (h == argmax_h_high && w == argmax_w_high) + weight = (argmax_h + 1 - h) * (argmax_w + 1 - w); + return weight; +} + +template +__device__ scalar_t dmcn_get_coordinate_weight( + scalar_t argmax_h, + scalar_t argmax_w, + const int height, + const int width, + const scalar_t* im_data, + const int data_width, + const int bp_dir) { + if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || + argmax_w >= width) { + // empty + return 0; + } + + int argmax_h_low = floor(argmax_h); + int argmax_w_low = floor(argmax_w); + int argmax_h_high = argmax_h_low + 1; + int argmax_w_high = argmax_w_low + 1; + + scalar_t weight = 0; + + if (bp_dir == 0) { + if (argmax_h_low >= 0 && argmax_w_low >= 0) + weight += -1 * (argmax_w_low + 1 - argmax_w) * + im_data[argmax_h_low * data_width + argmax_w_low]; + if (argmax_h_low >= 0 && argmax_w_high <= width - 1) + weight += -1 * (argmax_w - argmax_w_low) * + im_data[argmax_h_low * data_width + argmax_w_high]; + if (argmax_h_high <= height - 1 && argmax_w_low >= 0) + weight += (argmax_w_low + 1 - argmax_w) * + im_data[argmax_h_high * data_width + argmax_w_low]; + if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) + weight += (argmax_w - argmax_w_low) * + im_data[argmax_h_high * data_width + argmax_w_high]; + } else if (bp_dir == 1) { + if (argmax_h_low >= 0 && argmax_w_low >= 0) + weight += -1 * (argmax_h_low + 1 - argmax_h) * + im_data[argmax_h_low * data_width + argmax_w_low]; + if (argmax_h_low >= 0 && argmax_w_high <= width - 1) + weight += (argmax_h_low + 1 - argmax_h) * + im_data[argmax_h_low * data_width + argmax_w_high]; + if (argmax_h_high <= height - 1 && argmax_w_low >= 0) + weight += -1 * (argmax_h - argmax_h_low) * + im_data[argmax_h_high * data_width + argmax_w_low]; + if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) + weight += (argmax_h - argmax_h_low) * + im_data[argmax_h_high * data_width + argmax_w_high]; + } + + return weight; +} + +template +__global__ void modulated_deformable_im2col_gpu_kernel( + const int n, + const scalar_t* data_im, + const scalar_t* data_offset, + const scalar_t* data_mask, + const int height, + const int width, + const int kernel_h, + const int kernel_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int channel_per_deformable_group, + const int batch_size, + const int num_channels, + const int deformable_group, + const int height_col, + const int width_col, + scalar_t* data_col) { + CUDA_KERNEL_LOOP(index, n) { + // index index of output matrix + const int w_col = index % width_col; + const int h_col = (index / width_col) % height_col; + const int b_col = (index / width_col / height_col) % batch_size; + const int c_im = (index / width_col / height_col) / batch_size; + const int c_col = c_im * kernel_h * kernel_w; + + // compute deformable group index + const int deformable_group_index = c_im / channel_per_deformable_group; + + const int h_in = h_col * stride_h - pad_h; + const int w_in = w_col * stride_w - pad_w; + + scalar_t* data_col_ptr = data_col + + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col; + // const float* data_im_ptr = data_im + ((b_col * num_channels + c_im) * + // height + h_in) * width + w_in; + const scalar_t* data_im_ptr = + data_im + (b_col * num_channels + c_im) * height * width; + const scalar_t* data_offset_ptr = data_offset + + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * + kernel_w * height_col * width_col; + + const scalar_t* data_mask_ptr = data_mask + + (b_col * deformable_group + deformable_group_index) * kernel_h * + kernel_w * height_col * width_col; + + for (int i = 0; i < kernel_h; ++i) { + for (int j = 0; j < kernel_w; ++j) { + const int data_offset_h_ptr = + ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col; + const int data_offset_w_ptr = + ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + + w_col; + const int data_mask_hw_ptr = + ((i * kernel_w + j) * height_col + h_col) * width_col + w_col; + const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; + const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; + const scalar_t mask = data_mask_ptr[data_mask_hw_ptr]; + scalar_t val = static_cast(0); + const scalar_t h_im = h_in + i * dilation_h + offset_h; + const scalar_t w_im = w_in + j * dilation_w + offset_w; + // if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) { + if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) { + // const float map_h = i * dilation_h + offset_h; + // const float map_w = j * dilation_w + offset_w; + // const int cur_height = height - h_in; + // const int cur_width = width - w_in; + // val = dmcn_im2col_bilinear(data_im_ptr, width, cur_height, + // cur_width, map_h, map_w); + val = dmcn_im2col_bilinear( + data_im_ptr, width, height, width, h_im, w_im); + } + *data_col_ptr = val * mask; + data_col_ptr += batch_size * height_col * width_col; + // data_col_ptr += height_col * width_col; + } + } + } +} + +template +__global__ void modulated_deformable_col2im_gpu_kernel( + const int n, + const scalar_t* data_col, + const scalar_t* data_offset, + const scalar_t* data_mask, + const int channels, + const int height, + const int width, + const int kernel_h, + const int kernel_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int channel_per_deformable_group, + const int batch_size, + const int deformable_group, + const int height_col, + const int width_col, + scalar_t* grad_im) { + CUDA_KERNEL_LOOP(index, n) { + const int j = (index / width_col / height_col / batch_size) % kernel_w; + const int i = + (index / width_col / height_col / batch_size / kernel_w) % kernel_h; + const int c = + index / width_col / height_col / batch_size / kernel_w / kernel_h; + // compute the start and end of the output + + const int deformable_group_index = c / channel_per_deformable_group; + + int w_out = index % width_col; + int h_out = (index / width_col) % height_col; + int b = (index / width_col / height_col) % batch_size; + int w_in = w_out * stride_w - pad_w; + int h_in = h_out * stride_h - pad_h; + + const scalar_t* data_offset_ptr = data_offset + + (b * deformable_group + deformable_group_index) * 2 * kernel_h * + kernel_w * height_col * width_col; + const scalar_t* data_mask_ptr = data_mask + + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * + height_col * width_col; + const int data_offset_h_ptr = + ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out; + const int data_offset_w_ptr = + ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out; + const int data_mask_hw_ptr = + ((i * kernel_w + j) * height_col + h_out) * width_col + w_out; + const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; + const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; + const scalar_t mask = data_mask_ptr[data_mask_hw_ptr]; + const scalar_t cur_inv_h_data = h_in + i * dilation_h + offset_h; + const scalar_t cur_inv_w_data = w_in + j * dilation_w + offset_w; + + const scalar_t cur_top_grad = data_col[index] * mask; + const int cur_h = (int)cur_inv_h_data; + const int cur_w = (int)cur_inv_w_data; + for (int dy = -2; dy <= 2; dy++) { + for (int dx = -2; dx <= 2; dx++) { + if (cur_h + dy >= 0 && cur_h + dy < height && cur_w + dx >= 0 && + cur_w + dx < width && abs(cur_inv_h_data - (cur_h + dy)) < 1 && + abs(cur_inv_w_data - (cur_w + dx)) < 1) { + int cur_bottom_grad_pos = + ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx; + scalar_t weight = dmcn_get_gradient_weight( + cur_inv_h_data, + cur_inv_w_data, + cur_h + dy, + cur_w + dx, + height, + width); + atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad); + } + } + } + } +} + +template +__global__ void modulated_deformable_col2im_coord_gpu_kernel( + const int n, + const scalar_t* data_col, + const scalar_t* data_im, + const scalar_t* data_offset, + const scalar_t* data_mask, + const int channels, + const int height, + const int width, + const int kernel_h, + const int kernel_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int channel_per_deformable_group, + const int batch_size, + const int offset_channels, + const int deformable_group, + const int height_col, + const int width_col, + scalar_t* grad_offset, + scalar_t* grad_mask) { + CUDA_KERNEL_LOOP(index, n) { + scalar_t val = 0, mval = 0; + int w = index % width_col; + int h = (index / width_col) % height_col; + int c = (index / width_col / height_col) % offset_channels; + int b = (index / width_col / height_col) / offset_channels; + // compute the start and end of the output + + const int deformable_group_index = c / (2 * kernel_h * kernel_w); + const int col_step = kernel_h * kernel_w; + int cnt = 0; + const scalar_t* data_col_ptr = data_col + + deformable_group_index * channel_per_deformable_group * batch_size * + width_col * height_col; + const scalar_t* data_im_ptr = data_im + + (b * deformable_group + deformable_group_index) * + channel_per_deformable_group / kernel_h / kernel_w * height * width; + const scalar_t* data_offset_ptr = data_offset + + (b * deformable_group + deformable_group_index) * 2 * kernel_h * + kernel_w * height_col * width_col; + const scalar_t* data_mask_ptr = data_mask + + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * + height_col * width_col; + + const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w; + + for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; + col_c += col_step) { + const int col_pos = + (((col_c * batch_size + b) * height_col) + h) * width_col + w; + const int bp_dir = offset_c % 2; + + int j = (col_pos / width_col / height_col / batch_size) % kernel_w; + int i = + (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h; + int w_out = col_pos % width_col; + int h_out = (col_pos / width_col) % height_col; + int w_in = w_out * stride_w - pad_w; + int h_in = h_out * stride_h - pad_h; + const int data_offset_h_ptr = + (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out); + const int data_offset_w_ptr = + (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + + w_out); + const int data_mask_hw_ptr = + (((i * kernel_w + j) * height_col + h_out) * width_col + w_out); + const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; + const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; + const scalar_t mask = data_mask_ptr[data_mask_hw_ptr]; + scalar_t inv_h = h_in + i * dilation_h + offset_h; + scalar_t inv_w = w_in + j * dilation_w + offset_w; + if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width) { + inv_h = inv_w = -2; + } else { + mval += data_col_ptr[col_pos] * + dmcn_im2col_bilinear( + data_im_ptr + cnt * height * width, + width, + height, + width, + inv_h, + inv_w); + } + const scalar_t weight = dmcn_get_coordinate_weight( + inv_h, + inv_w, + height, + width, + data_im_ptr + cnt * height * width, + width, + bp_dir); + val += weight * data_col_ptr[col_pos] * mask; + cnt += 1; + } + // KERNEL_ASSIGN(grad_offset[index], offset_req, val); + grad_offset[index] = val; + if (offset_c % 2 == 0) + // KERNEL_ASSIGN(grad_mask[(((b * deformable_group + + // deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * + // height_col + h) * width_col + w], mask_req, mval); + grad_mask + [(((b * deformable_group + deformable_group_index) * kernel_h * + kernel_w + + offset_c / 2) * + height_col + + h) * + width_col + + w] = mval; + } +} + + +namespace detectron2 { + +void modulated_deformable_im2col_cuda( + const at::Tensor data_im, + const at::Tensor data_offset, + const at::Tensor data_mask, + const int batch_size, + const int channels, + const int height_im, + const int width_im, + const int height_col, + const int width_col, + const int kernel_h, + const int kenerl_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int deformable_group, + at::Tensor data_col) { + // num_axes should be smaller than block size + const int channel_per_deformable_group = channels / deformable_group; + const int num_kernels = channels * batch_size * height_col * width_col; + + at::cuda::CUDAGuard device_guard(data_im.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + data_im.scalar_type(), "modulated_deformable_im2col_gpu", ([&] { + const scalar_t* data_im_ = data_im.data_ptr(); + const scalar_t* data_offset_ = data_offset.data_ptr(); + const scalar_t* data_mask_ = data_mask.data_ptr(); + scalar_t* data_col_ = data_col.data_ptr(); + + modulated_deformable_im2col_gpu_kernel<<< + GET_BLOCKS(num_kernels), + CUDA_NUM_THREADS, + 0, + stream>>>( + num_kernels, + data_im_, + data_offset_, + data_mask_, + height_im, + width_im, + kernel_h, + kenerl_w, + pad_h, + pad_w, + stride_h, + stride_w, + dilation_h, + dilation_w, + channel_per_deformable_group, + batch_size, + channels, + deformable_group, + height_col, + width_col, + data_col_); + })); + + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) { + printf( + "error in modulated_deformable_im2col_cuda: %s\n", + cudaGetErrorString(err)); + } +} + +void modulated_deformable_col2im_cuda( + const at::Tensor data_col, + const at::Tensor data_offset, + const at::Tensor data_mask, + const int batch_size, + const int channels, + const int height_im, + const int width_im, + const int height_col, + const int width_col, + const int kernel_h, + const int kernel_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int deformable_group, + at::Tensor grad_im) { + const int channel_per_deformable_group = channels / deformable_group; + const int num_kernels = + channels * kernel_h * kernel_w * batch_size * height_col * width_col; + + at::cuda::CUDAGuard device_guard(data_col.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + data_col.scalar_type(), "modulated_deformable_col2im_gpu", ([&] { + const scalar_t* data_col_ = data_col.data_ptr(); + const scalar_t* data_offset_ = data_offset.data_ptr(); + const scalar_t* data_mask_ = data_mask.data_ptr(); + scalar_t* grad_im_ = grad_im.data_ptr(); + + modulated_deformable_col2im_gpu_kernel<<< + GET_BLOCKS(num_kernels), + CUDA_NUM_THREADS, + 0, + stream>>>( + num_kernels, + data_col_, + data_offset_, + data_mask_, + channels, + height_im, + width_im, + kernel_h, + kernel_w, + pad_h, + pad_w, + stride_h, + stride_w, + dilation_h, + dilation_w, + channel_per_deformable_group, + batch_size, + deformable_group, + height_col, + width_col, + grad_im_); + })); + + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) { + printf( + "error in modulated_deformable_col2im_cuda: %s\n", + cudaGetErrorString(err)); + } +} + +void modulated_deformable_col2im_coord_cuda( + const at::Tensor data_col, + const at::Tensor data_im, + const at::Tensor data_offset, + const at::Tensor data_mask, + const int batch_size, + const int channels, + const int height_im, + const int width_im, + const int height_col, + const int width_col, + const int kernel_h, + const int kernel_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int deformable_group, + at::Tensor grad_offset, + at::Tensor grad_mask) { + const int num_kernels = batch_size * height_col * width_col * 2 * kernel_h * + kernel_w * deformable_group; + const int channel_per_deformable_group = + channels * kernel_h * kernel_w / deformable_group; + + at::cuda::CUDAGuard device_guard(data_col.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + data_col.scalar_type(), "modulated_deformable_col2im_coord_gpu", ([&] { + const scalar_t* data_col_ = data_col.data_ptr(); + const scalar_t* data_im_ = data_im.data_ptr(); + const scalar_t* data_offset_ = data_offset.data_ptr(); + const scalar_t* data_mask_ = data_mask.data_ptr(); + scalar_t* grad_offset_ = grad_offset.data_ptr(); + scalar_t* grad_mask_ = grad_mask.data_ptr(); + + modulated_deformable_col2im_coord_gpu_kernel<<< + GET_BLOCKS(num_kernels), + CUDA_NUM_THREADS, + 0, + stream>>>( + num_kernels, + data_col_, + data_im_, + data_offset_, + data_mask_, + channels, + height_im, + width_im, + kernel_h, + kernel_w, + pad_h, + pad_w, + stride_h, + stride_w, + dilation_h, + dilation_w, + channel_per_deformable_group, + batch_size, + 2 * kernel_h * kernel_w * deformable_group, + deformable_group, + height_col, + width_col, + grad_offset_, + grad_mask_); + })); + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) { + printf( + "error in modulated_deformable_col2im_coord_cuda: %s\n", + cudaGetErrorString(err)); + } +} + +} // namespace detectron2 diff --git a/preprocess/mhp_extension/detectron2/detectron2/layers/csrc/nms_rotated/nms_rotated.h b/preprocess/mhp_extension/detectron2/detectron2/layers/csrc/nms_rotated/nms_rotated.h new file mode 100644 index 0000000000000000000000000000000000000000..9c86c8d55cd24fb5322657b9d2f676fc3e1373ba --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/layers/csrc/nms_rotated/nms_rotated.h @@ -0,0 +1,39 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +#pragma once +#include + +namespace detectron2 { + +at::Tensor nms_rotated_cpu( + const at::Tensor& dets, + const at::Tensor& scores, + const float iou_threshold); + +#ifdef WITH_CUDA +at::Tensor nms_rotated_cuda( + const at::Tensor& dets, + const at::Tensor& scores, + const float iou_threshold); +#endif + +// Interface for Python +// inline is needed to prevent multiple function definitions when this header is +// included by different cpps +inline at::Tensor nms_rotated( + const at::Tensor& dets, + const at::Tensor& scores, + const float iou_threshold) { + assert(dets.device().is_cuda() == scores.device().is_cuda()); + if (dets.device().is_cuda()) { +#ifdef WITH_CUDA + return nms_rotated_cuda( + dets.contiguous(), scores.contiguous(), iou_threshold); +#else + AT_ERROR("Not compiled with GPU support"); +#endif + } + + return nms_rotated_cpu(dets.contiguous(), scores.contiguous(), iou_threshold); +} + +} // namespace detectron2 diff --git a/preprocess/mhp_extension/detectron2/detectron2/layers/csrc/nms_rotated/nms_rotated_cpu.cpp b/preprocess/mhp_extension/detectron2/detectron2/layers/csrc/nms_rotated/nms_rotated_cpu.cpp new file mode 100644 index 0000000000000000000000000000000000000000..0658e388df005748c358dcbf3a1ad2a59da6cac8 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/layers/csrc/nms_rotated/nms_rotated_cpu.cpp @@ -0,0 +1,75 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +#include "../box_iou_rotated/box_iou_rotated_utils.h" +#include "nms_rotated.h" + +namespace detectron2 { + +template +at::Tensor nms_rotated_cpu_kernel( + const at::Tensor& dets, + const at::Tensor& scores, + const float iou_threshold) { + // nms_rotated_cpu_kernel is modified from torchvision's nms_cpu_kernel, + // however, the code in this function is much shorter because + // we delegate the IoU computation for rotated boxes to + // the single_box_iou_rotated function in box_iou_rotated_utils.h + AT_ASSERTM(dets.device().is_cpu(), "dets must be a CPU tensor"); + AT_ASSERTM(scores.device().is_cpu(), "scores must be a CPU tensor"); + AT_ASSERTM( + dets.scalar_type() == scores.scalar_type(), + "dets should have the same type as scores"); + + if (dets.numel() == 0) { + return at::empty({0}, dets.options().dtype(at::kLong)); + } + + auto order_t = std::get<1>(scores.sort(0, /* descending=*/true)); + + auto ndets = dets.size(0); + at::Tensor suppressed_t = at::zeros({ndets}, dets.options().dtype(at::kByte)); + at::Tensor keep_t = at::zeros({ndets}, dets.options().dtype(at::kLong)); + + auto suppressed = suppressed_t.data_ptr(); + auto keep = keep_t.data_ptr(); + auto order = order_t.data_ptr(); + + int64_t num_to_keep = 0; + + for (int64_t _i = 0; _i < ndets; _i++) { + auto i = order[_i]; + if (suppressed[i] == 1) { + continue; + } + + keep[num_to_keep++] = i; + + for (int64_t _j = _i + 1; _j < ndets; _j++) { + auto j = order[_j]; + if (suppressed[j] == 1) { + continue; + } + + auto ovr = single_box_iou_rotated( + dets[i].data_ptr(), dets[j].data_ptr()); + if (ovr >= iou_threshold) { + suppressed[j] = 1; + } + } + } + return keep_t.narrow(/*dim=*/0, /*start=*/0, /*length=*/num_to_keep); +} + +at::Tensor nms_rotated_cpu( + // input must be contiguous + const at::Tensor& dets, + const at::Tensor& scores, + const float iou_threshold) { + auto result = at::empty({0}, dets.options()); + + AT_DISPATCH_FLOATING_TYPES(dets.scalar_type(), "nms_rotated", [&] { + result = nms_rotated_cpu_kernel(dets, scores, iou_threshold); + }); + return result; +} + +} // namespace detectron2 diff --git a/preprocess/mhp_extension/detectron2/detectron2/layers/csrc/nms_rotated/nms_rotated_cuda.cu b/preprocess/mhp_extension/detectron2/detectron2/layers/csrc/nms_rotated/nms_rotated_cuda.cu new file mode 100644 index 0000000000000000000000000000000000000000..40977a0da1761fe807205fbcf8029d56bf75786c --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/layers/csrc/nms_rotated/nms_rotated_cuda.cu @@ -0,0 +1,139 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +#include +#include +#include +#include +#include "../box_iou_rotated/box_iou_rotated_utils.h" + +using namespace detectron2; + +namespace { +int const threadsPerBlock = sizeof(unsigned long long) * 8; +} + +template +__global__ void nms_rotated_cuda_kernel( + const int n_boxes, + const float iou_threshold, + const T* dev_boxes, + unsigned long long* dev_mask) { + // nms_rotated_cuda_kernel is modified from torchvision's nms_cuda_kernel + + const int row_start = blockIdx.y; + const int col_start = blockIdx.x; + + // if (row_start > col_start) return; + + const int row_size = + min(n_boxes - row_start * threadsPerBlock, threadsPerBlock); + const int col_size = + min(n_boxes - col_start * threadsPerBlock, threadsPerBlock); + + // Compared to nms_cuda_kernel, where each box is represented with 4 values + // (x1, y1, x2, y2), each rotated box is represented with 5 values + // (x_center, y_center, width, height, angle_degrees) here. + __shared__ T block_boxes[threadsPerBlock * 5]; + if (threadIdx.x < col_size) { + block_boxes[threadIdx.x * 5 + 0] = + dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0]; + block_boxes[threadIdx.x * 5 + 1] = + dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1]; + block_boxes[threadIdx.x * 5 + 2] = + dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2]; + block_boxes[threadIdx.x * 5 + 3] = + dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3]; + block_boxes[threadIdx.x * 5 + 4] = + dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4]; + } + __syncthreads(); + + if (threadIdx.x < row_size) { + const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x; + const T* cur_box = dev_boxes + cur_box_idx * 5; + int i = 0; + unsigned long long t = 0; + int start = 0; + if (row_start == col_start) { + start = threadIdx.x + 1; + } + for (i = start; i < col_size; i++) { + // Instead of devIoU used by original horizontal nms, here + // we use the single_box_iou_rotated function from box_iou_rotated_utils.h + if (single_box_iou_rotated(cur_box, block_boxes + i * 5) > + iou_threshold) { + t |= 1ULL << i; + } + } + const int col_blocks = at::cuda::ATenCeilDiv(n_boxes, threadsPerBlock); + dev_mask[cur_box_idx * col_blocks + col_start] = t; + } +} + +namespace detectron2 { + +at::Tensor nms_rotated_cuda( + // input must be contiguous + const at::Tensor& dets, + const at::Tensor& scores, + float iou_threshold) { + // using scalar_t = float; + AT_ASSERTM(dets.is_cuda(), "dets must be a CUDA tensor"); + AT_ASSERTM(scores.is_cuda(), "scores must be a CUDA tensor"); + at::cuda::CUDAGuard device_guard(dets.device()); + + auto order_t = std::get<1>(scores.sort(0, /* descending=*/true)); + auto dets_sorted = dets.index_select(0, order_t); + + auto dets_num = dets.size(0); + + const int col_blocks = + at::cuda::ATenCeilDiv(static_cast(dets_num), threadsPerBlock); + + at::Tensor mask = + at::empty({dets_num * col_blocks}, dets.options().dtype(at::kLong)); + + dim3 blocks(col_blocks, col_blocks); + dim3 threads(threadsPerBlock); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + AT_DISPATCH_FLOATING_TYPES( + dets_sorted.scalar_type(), "nms_rotated_kernel_cuda", [&] { + nms_rotated_cuda_kernel<<>>( + dets_num, + iou_threshold, + dets_sorted.data_ptr(), + (unsigned long long*)mask.data_ptr()); + }); + + at::Tensor mask_cpu = mask.to(at::kCPU); + unsigned long long* mask_host = + (unsigned long long*)mask_cpu.data_ptr(); + + std::vector remv(col_blocks); + memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks); + + at::Tensor keep = + at::empty({dets_num}, dets.options().dtype(at::kLong).device(at::kCPU)); + int64_t* keep_out = keep.data_ptr(); + + int num_to_keep = 0; + for (int i = 0; i < dets_num; i++) { + int nblock = i / threadsPerBlock; + int inblock = i % threadsPerBlock; + + if (!(remv[nblock] & (1ULL << inblock))) { + keep_out[num_to_keep++] = i; + unsigned long long* p = mask_host + i * col_blocks; + for (int j = nblock; j < col_blocks; j++) { + remv[j] |= p[j]; + } + } + } + + AT_CUDA_CHECK(cudaGetLastError()); + return order_t.index( + {keep.narrow(/*dim=*/0, /*start=*/0, /*length=*/num_to_keep) + .to(order_t.device(), keep.scalar_type())}); +} + +} // namespace detectron2 diff --git a/preprocess/mhp_extension/detectron2/detectron2/layers/csrc/vision.cpp b/preprocess/mhp_extension/detectron2/detectron2/layers/csrc/vision.cpp new file mode 100644 index 0000000000000000000000000000000000000000..fa7942e881af704d33a79e8b2ecd1ac5b6f3a7ef --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/layers/csrc/vision.cpp @@ -0,0 +1,102 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +#include +#include "ROIAlign/ROIAlign.h" +#include "ROIAlignRotated/ROIAlignRotated.h" +#include "box_iou_rotated/box_iou_rotated.h" +#include "deformable/deform_conv.h" +#include "nms_rotated/nms_rotated.h" + +namespace detectron2 { + +#ifdef WITH_CUDA +extern int get_cudart_version(); +#endif + +std::string get_cuda_version() { +#ifdef WITH_CUDA + std::ostringstream oss; + + // copied from + // https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/cuda/detail/CUDAHooks.cpp#L231 + auto printCudaStyleVersion = [&](int v) { + oss << (v / 1000) << "." << (v / 10 % 100); + if (v % 10 != 0) { + oss << "." << (v % 10); + } + }; + printCudaStyleVersion(get_cudart_version()); + return oss.str(); +#else + return std::string("not available"); +#endif +} + +// similar to +// https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/Version.cpp +std::string get_compiler_version() { + std::ostringstream ss; +#if defined(__GNUC__) +#ifndef __clang__ + +#if ((__GNUC__ <= 4) && (__GNUC_MINOR__ <= 8)) +#error "GCC >= 4.9 is required!" +#endif + + { ss << "GCC " << __GNUC__ << "." << __GNUC_MINOR__; } +#endif +#endif + +#if defined(__clang_major__) + { + ss << "clang " << __clang_major__ << "." << __clang_minor__ << "." + << __clang_patchlevel__; + } +#endif + +#if defined(_MSC_VER) + { ss << "MSVC " << _MSC_FULL_VER; } +#endif + return ss.str(); +} + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("get_compiler_version", &get_compiler_version, "get_compiler_version"); + m.def("get_cuda_version", &get_cuda_version, "get_cuda_version"); + + m.def("box_iou_rotated", &box_iou_rotated, "IoU for rotated boxes"); + + m.def("deform_conv_forward", &deform_conv_forward, "deform_conv_forward"); + m.def( + "deform_conv_backward_input", + &deform_conv_backward_input, + "deform_conv_backward_input"); + m.def( + "deform_conv_backward_filter", + &deform_conv_backward_filter, + "deform_conv_backward_filter"); + m.def( + "modulated_deform_conv_forward", + &modulated_deform_conv_forward, + "modulated_deform_conv_forward"); + m.def( + "modulated_deform_conv_backward", + &modulated_deform_conv_backward, + "modulated_deform_conv_backward"); + + m.def("nms_rotated", &nms_rotated, "NMS for rotated boxes"); + + m.def("roi_align_forward", &ROIAlign_forward, "ROIAlign_forward"); + m.def("roi_align_backward", &ROIAlign_backward, "ROIAlign_backward"); + + m.def( + "roi_align_rotated_forward", + &ROIAlignRotated_forward, + "Forward pass for Rotated ROI-Align Operator"); + m.def( + "roi_align_rotated_backward", + &ROIAlignRotated_backward, + "Backward pass for Rotated ROI-Align Operator"); +} + +} // namespace detectron2 diff --git a/preprocess/mhp_extension/detectron2/detectron2/layers/deform_conv.py b/preprocess/mhp_extension/detectron2/detectron2/layers/deform_conv.py new file mode 100644 index 0000000000000000000000000000000000000000..ba8c6498ffdfffa281e1f02037d40cbbb6e66164 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/layers/deform_conv.py @@ -0,0 +1,494 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import math +from functools import lru_cache +import torch +from torch import nn +from torch.autograd import Function +from torch.autograd.function import once_differentiable +from torch.nn.modules.utils import _pair + +from detectron2 import _C + +from .wrappers import _NewEmptyTensorOp + + +class _DeformConv(Function): + @staticmethod + def forward( + ctx, + input, + offset, + weight, + stride=1, + padding=0, + dilation=1, + groups=1, + deformable_groups=1, + im2col_step=64, + ): + if input is not None and input.dim() != 4: + raise ValueError( + "Expected 4D tensor as input, got {}D tensor instead.".format(input.dim()) + ) + ctx.stride = _pair(stride) + ctx.padding = _pair(padding) + ctx.dilation = _pair(dilation) + ctx.groups = groups + ctx.deformable_groups = deformable_groups + ctx.im2col_step = im2col_step + + ctx.save_for_backward(input, offset, weight) + + output = input.new_empty( + _DeformConv._output_size(input, weight, ctx.padding, ctx.dilation, ctx.stride) + ) + + ctx.bufs_ = [input.new_empty(0), input.new_empty(0)] # columns, ones + + if not input.is_cuda: + raise NotImplementedError + else: + cur_im2col_step = _DeformConv._cal_im2col_step(input.shape[0], ctx.im2col_step) + assert (input.shape[0] % cur_im2col_step) == 0, "im2col step must divide batchsize" + + _C.deform_conv_forward( + input, + weight, + offset, + output, + ctx.bufs_[0], + ctx.bufs_[1], + weight.size(3), + weight.size(2), + ctx.stride[1], + ctx.stride[0], + ctx.padding[1], + ctx.padding[0], + ctx.dilation[1], + ctx.dilation[0], + ctx.groups, + ctx.deformable_groups, + cur_im2col_step, + ) + return output + + @staticmethod + @once_differentiable + def backward(ctx, grad_output): + input, offset, weight = ctx.saved_tensors + + grad_input = grad_offset = grad_weight = None + + if not grad_output.is_cuda: + raise NotImplementedError + else: + cur_im2col_step = _DeformConv._cal_im2col_step(input.shape[0], ctx.im2col_step) + assert (input.shape[0] % cur_im2col_step) == 0, "im2col step must divide batchsize" + + if ctx.needs_input_grad[0] or ctx.needs_input_grad[1]: + grad_input = torch.zeros_like(input) + grad_offset = torch.zeros_like(offset) + _C.deform_conv_backward_input( + input, + offset, + grad_output, + grad_input, + grad_offset, + weight, + ctx.bufs_[0], + weight.size(3), + weight.size(2), + ctx.stride[1], + ctx.stride[0], + ctx.padding[1], + ctx.padding[0], + ctx.dilation[1], + ctx.dilation[0], + ctx.groups, + ctx.deformable_groups, + cur_im2col_step, + ) + + if ctx.needs_input_grad[2]: + grad_weight = torch.zeros_like(weight) + _C.deform_conv_backward_filter( + input, + offset, + grad_output, + grad_weight, + ctx.bufs_[0], + ctx.bufs_[1], + weight.size(3), + weight.size(2), + ctx.stride[1], + ctx.stride[0], + ctx.padding[1], + ctx.padding[0], + ctx.dilation[1], + ctx.dilation[0], + ctx.groups, + ctx.deformable_groups, + 1, + cur_im2col_step, + ) + + return grad_input, grad_offset, grad_weight, None, None, None, None, None, None + + @staticmethod + def _output_size(input, weight, padding, dilation, stride): + channels = weight.size(0) + output_size = (input.size(0), channels) + for d in range(input.dim() - 2): + in_size = input.size(d + 2) + pad = padding[d] + kernel = dilation[d] * (weight.size(d + 2) - 1) + 1 + stride_ = stride[d] + output_size += ((in_size + (2 * pad) - kernel) // stride_ + 1,) + if not all(map(lambda s: s > 0, output_size)): + raise ValueError( + "convolution input is too small (output would be {})".format( + "x".join(map(str, output_size)) + ) + ) + return output_size + + @staticmethod + @lru_cache(maxsize=128) + def _cal_im2col_step(input_size, default_size): + """ + Calculate proper im2col step size, which should be divisible by input_size and not larger + than prefer_size. Meanwhile the step size should be as large as possible to be more + efficient. So we choose the largest one among all divisors of input_size which are smaller + than prefer_size. + :param input_size: input batch size . + :param default_size: default preferred im2col step size. + :return: the largest proper step size. + """ + if input_size <= default_size: + return input_size + best_step = 1 + for step in range(2, min(int(math.sqrt(input_size)) + 1, default_size)): + if input_size % step == 0: + if input_size // step <= default_size: + return input_size // step + best_step = step + + return best_step + + +class _ModulatedDeformConv(Function): + @staticmethod + def forward( + ctx, + input, + offset, + mask, + weight, + bias=None, + stride=1, + padding=0, + dilation=1, + groups=1, + deformable_groups=1, + ): + ctx.stride = stride + ctx.padding = padding + ctx.dilation = dilation + ctx.groups = groups + ctx.deformable_groups = deformable_groups + ctx.with_bias = bias is not None + if not ctx.with_bias: + bias = input.new_empty(1) # fake tensor + if not input.is_cuda: + raise NotImplementedError + if ( + weight.requires_grad + or mask.requires_grad + or offset.requires_grad + or input.requires_grad + ): + ctx.save_for_backward(input, offset, mask, weight, bias) + output = input.new_empty(_ModulatedDeformConv._infer_shape(ctx, input, weight)) + ctx._bufs = [input.new_empty(0), input.new_empty(0)] + _C.modulated_deform_conv_forward( + input, + weight, + bias, + ctx._bufs[0], + offset, + mask, + output, + ctx._bufs[1], + weight.shape[2], + weight.shape[3], + ctx.stride, + ctx.stride, + ctx.padding, + ctx.padding, + ctx.dilation, + ctx.dilation, + ctx.groups, + ctx.deformable_groups, + ctx.with_bias, + ) + return output + + @staticmethod + @once_differentiable + def backward(ctx, grad_output): + if not grad_output.is_cuda: + raise NotImplementedError + input, offset, mask, weight, bias = ctx.saved_tensors + grad_input = torch.zeros_like(input) + grad_offset = torch.zeros_like(offset) + grad_mask = torch.zeros_like(mask) + grad_weight = torch.zeros_like(weight) + grad_bias = torch.zeros_like(bias) + _C.modulated_deform_conv_backward( + input, + weight, + bias, + ctx._bufs[0], + offset, + mask, + ctx._bufs[1], + grad_input, + grad_weight, + grad_bias, + grad_offset, + grad_mask, + grad_output, + weight.shape[2], + weight.shape[3], + ctx.stride, + ctx.stride, + ctx.padding, + ctx.padding, + ctx.dilation, + ctx.dilation, + ctx.groups, + ctx.deformable_groups, + ctx.with_bias, + ) + if not ctx.with_bias: + grad_bias = None + + return ( + grad_input, + grad_offset, + grad_mask, + grad_weight, + grad_bias, + None, + None, + None, + None, + None, + ) + + @staticmethod + def _infer_shape(ctx, input, weight): + n = input.size(0) + channels_out = weight.size(0) + height, width = input.shape[2:4] + kernel_h, kernel_w = weight.shape[2:4] + height_out = ( + height + 2 * ctx.padding - (ctx.dilation * (kernel_h - 1) + 1) + ) // ctx.stride + 1 + width_out = ( + width + 2 * ctx.padding - (ctx.dilation * (kernel_w - 1) + 1) + ) // ctx.stride + 1 + return n, channels_out, height_out, width_out + + +deform_conv = _DeformConv.apply +modulated_deform_conv = _ModulatedDeformConv.apply + + +class DeformConv(nn.Module): + def __init__( + self, + in_channels, + out_channels, + kernel_size, + stride=1, + padding=0, + dilation=1, + groups=1, + deformable_groups=1, + bias=False, + norm=None, + activation=None, + ): + """ + Deformable convolution from :paper:`deformconv`. + + Arguments are similar to :class:`Conv2D`. Extra arguments: + + Args: + deformable_groups (int): number of groups used in deformable convolution. + norm (nn.Module, optional): a normalization layer + activation (callable(Tensor) -> Tensor): a callable activation function + """ + super(DeformConv, self).__init__() + + assert not bias + assert in_channels % groups == 0, "in_channels {} cannot be divisible by groups {}".format( + in_channels, groups + ) + assert ( + out_channels % groups == 0 + ), "out_channels {} cannot be divisible by groups {}".format(out_channels, groups) + + self.in_channels = in_channels + self.out_channels = out_channels + self.kernel_size = _pair(kernel_size) + self.stride = _pair(stride) + self.padding = _pair(padding) + self.dilation = _pair(dilation) + self.groups = groups + self.deformable_groups = deformable_groups + self.norm = norm + self.activation = activation + + self.weight = nn.Parameter( + torch.Tensor(out_channels, in_channels // self.groups, *self.kernel_size) + ) + self.bias = None + + nn.init.kaiming_uniform_(self.weight, nonlinearity="relu") + + def forward(self, x, offset): + if x.numel() == 0: + # When input is empty, we want to return a empty tensor with "correct" shape, + # So that the following operations will not panic + # if they check for the shape of the tensor. + # This computes the height and width of the output tensor + output_shape = [ + (i + 2 * p - (di * (k - 1) + 1)) // s + 1 + for i, p, di, k, s in zip( + x.shape[-2:], self.padding, self.dilation, self.kernel_size, self.stride + ) + ] + output_shape = [x.shape[0], self.weight.shape[0]] + output_shape + return _NewEmptyTensorOp.apply(x, output_shape) + + x = deform_conv( + x, + offset, + self.weight, + self.stride, + self.padding, + self.dilation, + self.groups, + self.deformable_groups, + ) + if self.norm is not None: + x = self.norm(x) + if self.activation is not None: + x = self.activation(x) + return x + + def extra_repr(self): + tmpstr = "in_channels=" + str(self.in_channels) + tmpstr += ", out_channels=" + str(self.out_channels) + tmpstr += ", kernel_size=" + str(self.kernel_size) + tmpstr += ", stride=" + str(self.stride) + tmpstr += ", padding=" + str(self.padding) + tmpstr += ", dilation=" + str(self.dilation) + tmpstr += ", groups=" + str(self.groups) + tmpstr += ", deformable_groups=" + str(self.deformable_groups) + tmpstr += ", bias=False" + return tmpstr + + +class ModulatedDeformConv(nn.Module): + def __init__( + self, + in_channels, + out_channels, + kernel_size, + stride=1, + padding=0, + dilation=1, + groups=1, + deformable_groups=1, + bias=True, + norm=None, + activation=None, + ): + """ + Modulated deformable convolution from :paper:`deformconv2`. + + Arguments are similar to :class:`Conv2D`. Extra arguments: + + Args: + deformable_groups (int): number of groups used in deformable convolution. + norm (nn.Module, optional): a normalization layer + activation (callable(Tensor) -> Tensor): a callable activation function + """ + super(ModulatedDeformConv, self).__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.kernel_size = _pair(kernel_size) + self.stride = stride + self.padding = padding + self.dilation = dilation + self.groups = groups + self.deformable_groups = deformable_groups + self.with_bias = bias + self.norm = norm + self.activation = activation + + self.weight = nn.Parameter( + torch.Tensor(out_channels, in_channels // groups, *self.kernel_size) + ) + if bias: + self.bias = nn.Parameter(torch.Tensor(out_channels)) + else: + self.bias = None + + nn.init.kaiming_uniform_(self.weight, nonlinearity="relu") + if self.bias is not None: + nn.init.constant_(self.bias, 0) + + def forward(self, x, offset, mask): + if x.numel() == 0: + output_shape = [ + (i + 2 * p - (di * (k - 1) + 1)) // s + 1 + for i, p, di, k, s in zip( + x.shape[-2:], self.padding, self.dilation, self.kernel_size, self.stride + ) + ] + output_shape = [x.shape[0], self.weight.shape[0]] + output_shape + return _NewEmptyTensorOp.apply(x, output_shape) + + x = modulated_deform_conv( + x, + offset, + mask, + self.weight, + self.bias, + self.stride, + self.padding, + self.dilation, + self.groups, + self.deformable_groups, + ) + if self.norm is not None: + x = self.norm(x) + if self.activation is not None: + x = self.activation(x) + return x + + def extra_repr(self): + tmpstr = "in_channels=" + str(self.in_channels) + tmpstr += ", out_channels=" + str(self.out_channels) + tmpstr += ", kernel_size=" + str(self.kernel_size) + tmpstr += ", stride=" + str(self.stride) + tmpstr += ", padding=" + str(self.padding) + tmpstr += ", dilation=" + str(self.dilation) + tmpstr += ", groups=" + str(self.groups) + tmpstr += ", deformable_groups=" + str(self.deformable_groups) + tmpstr += ", bias=" + str(self.with_bias) + return tmpstr diff --git a/preprocess/mhp_extension/detectron2/detectron2/layers/mask_ops.py b/preprocess/mhp_extension/detectron2/detectron2/layers/mask_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..0fe115dbbe15c354575c67d7d10f055eab0bdf91 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/layers/mask_ops.py @@ -0,0 +1,248 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import numpy as np +import torch +from PIL import Image +from torch.nn import functional as F + +__all__ = ["paste_masks_in_image"] + + +BYTES_PER_FLOAT = 4 +# TODO: This memory limit may be too much or too little. It would be better to +# determine it based on available resources. +GPU_MEM_LIMIT = 1024 ** 3 # 1 GB memory limit + + +def _do_paste_mask(masks, boxes, img_h, img_w, skip_empty=True): + """ + Args: + masks: N, 1, H, W + boxes: N, 4 + img_h, img_w (int): + skip_empty (bool): only paste masks within the region that + tightly bound all boxes, and returns the results this region only. + An important optimization for CPU. + + Returns: + if skip_empty == False, a mask of shape (N, img_h, img_w) + if skip_empty == True, a mask of shape (N, h', w'), and the slice + object for the corresponding region. + """ + # On GPU, paste all masks together (up to chunk size) + # by using the entire image to sample the masks + # Compared to pasting them one by one, + # this has more operations but is faster on COCO-scale dataset. + device = masks.device + if skip_empty: + x0_int, y0_int = torch.clamp(boxes.min(dim=0).values.floor()[:2] - 1, min=0).to( + dtype=torch.int32 + ) + x1_int = torch.clamp(boxes[:, 2].max().ceil() + 1, max=img_w).to(dtype=torch.int32) + y1_int = torch.clamp(boxes[:, 3].max().ceil() + 1, max=img_h).to(dtype=torch.int32) + else: + x0_int, y0_int = 0, 0 + x1_int, y1_int = img_w, img_h + x0, y0, x1, y1 = torch.split(boxes, 1, dim=1) # each is Nx1 + + N = masks.shape[0] + + img_y = torch.arange(y0_int, y1_int, device=device, dtype=torch.float32) + 0.5 + img_x = torch.arange(x0_int, x1_int, device=device, dtype=torch.float32) + 0.5 + img_y = (img_y - y0) / (y1 - y0) * 2 - 1 + img_x = (img_x - x0) / (x1 - x0) * 2 - 1 + # img_x, img_y have shapes (N, w), (N, h) + + gx = img_x[:, None, :].expand(N, img_y.size(1), img_x.size(1)) + gy = img_y[:, :, None].expand(N, img_y.size(1), img_x.size(1)) + grid = torch.stack([gx, gy], dim=3) + + img_masks = F.grid_sample(masks.to(dtype=torch.float32), grid, align_corners=False) + + if skip_empty: + return img_masks[:, 0], (slice(y0_int, y1_int), slice(x0_int, x1_int)) + else: + return img_masks[:, 0], () + + +def paste_masks_in_image(masks, boxes, image_shape, threshold=0.5): + """ + Paste a set of masks that are of a fixed resolution (e.g., 28 x 28) into an image. + The location, height, and width for pasting each mask is determined by their + corresponding bounding boxes in boxes. + + Note: + This is a complicated but more accurate implementation. In actual deployment, it is + often enough to use a faster but less accurate implementation. + See :func:`paste_mask_in_image_old` in this file for an alternative implementation. + + Args: + masks (tensor): Tensor of shape (Bimg, Hmask, Wmask), where Bimg is the number of + detected object instances in the image and Hmask, Wmask are the mask width and mask + height of the predicted mask (e.g., Hmask = Wmask = 28). Values are in [0, 1]. + boxes (Boxes or Tensor): A Boxes of length Bimg or Tensor of shape (Bimg, 4). + boxes[i] and masks[i] correspond to the same object instance. + image_shape (tuple): height, width + threshold (float): A threshold in [0, 1] for converting the (soft) masks to + binary masks. + + Returns: + img_masks (Tensor): A tensor of shape (Bimg, Himage, Wimage), where Bimg is the + number of detected object instances and Himage, Wimage are the image width + and height. img_masks[i] is a binary mask for object instance i. + """ + + assert masks.shape[-1] == masks.shape[-2], "Only square mask predictions are supported" + N = len(masks) + if N == 0: + return masks.new_empty((0,) + image_shape, dtype=torch.uint8) + if not isinstance(boxes, torch.Tensor): + boxes = boxes.tensor + device = boxes.device + assert len(boxes) == N, boxes.shape + + img_h, img_w = image_shape + + # The actual implementation split the input into chunks, + # and paste them chunk by chunk. + if device.type == "cpu": + # CPU is most efficient when they are pasted one by one with skip_empty=True + # so that it performs minimal number of operations. + num_chunks = N + else: + # GPU benefits from parallelism for larger chunks, but may have memory issue + # int(img_h) because shape may be tensors in tracing + num_chunks = int(np.ceil(N * int(img_h) * int(img_w) * BYTES_PER_FLOAT / GPU_MEM_LIMIT)) + assert ( + num_chunks <= N + ), "Default GPU_MEM_LIMIT in mask_ops.py is too small; try increasing it" + chunks = torch.chunk(torch.arange(N, device=device), num_chunks) + + img_masks = torch.zeros( + N, img_h, img_w, device=device, dtype=torch.bool if threshold >= 0 else torch.uint8 + ) + for inds in chunks: + masks_chunk, spatial_inds = _do_paste_mask( + masks[inds, None, :, :], boxes[inds], img_h, img_w, skip_empty=device.type == "cpu" + ) + + if threshold >= 0: + masks_chunk = (masks_chunk >= threshold).to(dtype=torch.bool) + else: + # for visualization and debugging + masks_chunk = (masks_chunk * 255).to(dtype=torch.uint8) + + img_masks[(inds,) + spatial_inds] = masks_chunk + return img_masks + + +# The below are the original paste function (from Detectron1) which has +# larger quantization error. +# It is faster on CPU, while the aligned one is faster on GPU thanks to grid_sample. + + +def paste_mask_in_image_old(mask, box, img_h, img_w, threshold): + """ + Paste a single mask in an image. + This is a per-box implementation of :func:`paste_masks_in_image`. + This function has larger quantization error due to incorrect pixel + modeling and is not used any more. + + Args: + mask (Tensor): A tensor of shape (Hmask, Wmask) storing the mask of a single + object instance. Values are in [0, 1]. + box (Tensor): A tensor of shape (4, ) storing the x0, y0, x1, y1 box corners + of the object instance. + img_h, img_w (int): Image height and width. + threshold (float): Mask binarization threshold in [0, 1]. + + Returns: + im_mask (Tensor): + The resized and binarized object mask pasted into the original + image plane (a tensor of shape (img_h, img_w)). + """ + # Conversion from continuous box coordinates to discrete pixel coordinates + # via truncation (cast to int32). This determines which pixels to paste the + # mask onto. + box = box.to(dtype=torch.int32) # Continuous to discrete coordinate conversion + # An example (1D) box with continuous coordinates (x0=0.7, x1=4.3) will map to + # a discrete coordinates (x0=0, x1=4). Note that box is mapped to 5 = x1 - x0 + 1 + # pixels (not x1 - x0 pixels). + samples_w = box[2] - box[0] + 1 # Number of pixel samples, *not* geometric width + samples_h = box[3] - box[1] + 1 # Number of pixel samples, *not* geometric height + + # Resample the mask from it's original grid to the new samples_w x samples_h grid + mask = Image.fromarray(mask.cpu().numpy()) + mask = mask.resize((samples_w, samples_h), resample=Image.BILINEAR) + mask = np.array(mask, copy=False) + + if threshold >= 0: + mask = np.array(mask > threshold, dtype=np.uint8) + mask = torch.from_numpy(mask) + else: + # for visualization and debugging, we also + # allow it to return an unmodified mask + mask = torch.from_numpy(mask * 255).to(torch.uint8) + + im_mask = torch.zeros((img_h, img_w), dtype=torch.uint8) + x_0 = max(box[0], 0) + x_1 = min(box[2] + 1, img_w) + y_0 = max(box[1], 0) + y_1 = min(box[3] + 1, img_h) + + im_mask[y_0:y_1, x_0:x_1] = mask[ + (y_0 - box[1]) : (y_1 - box[1]), (x_0 - box[0]) : (x_1 - box[0]) + ] + return im_mask + + +# Our pixel modeling requires extrapolation for any continuous +# coordinate < 0.5 or > length - 0.5. When sampling pixels on the masks, +# we would like this extrapolation to be an interpolation between boundary values and zero, +# instead of using absolute zero or boundary values. +# Therefore `paste_mask_in_image_old` is often used with zero padding around the masks like this: +# masks, scale = pad_masks(masks[:, 0, :, :], 1) +# boxes = scale_boxes(boxes.tensor, scale) + + +def pad_masks(masks, padding): + """ + Args: + masks (tensor): A tensor of shape (B, M, M) representing B masks. + padding (int): Number of cells to pad on all sides. + + Returns: + The padded masks and the scale factor of the padding size / original size. + """ + B = masks.shape[0] + M = masks.shape[-1] + pad2 = 2 * padding + scale = float(M + pad2) / M + padded_masks = masks.new_zeros((B, M + pad2, M + pad2)) + padded_masks[:, padding:-padding, padding:-padding] = masks + return padded_masks, scale + + +def scale_boxes(boxes, scale): + """ + Args: + boxes (tensor): A tensor of shape (B, 4) representing B boxes with 4 + coords representing the corners x0, y0, x1, y1, + scale (float): The box scaling factor. + + Returns: + Scaled boxes. + """ + w_half = (boxes[:, 2] - boxes[:, 0]) * 0.5 + h_half = (boxes[:, 3] - boxes[:, 1]) * 0.5 + x_c = (boxes[:, 2] + boxes[:, 0]) * 0.5 + y_c = (boxes[:, 3] + boxes[:, 1]) * 0.5 + + w_half *= scale + h_half *= scale + + scaled_boxes = torch.zeros_like(boxes) + scaled_boxes[:, 0] = x_c - w_half + scaled_boxes[:, 2] = x_c + w_half + scaled_boxes[:, 1] = y_c - h_half + scaled_boxes[:, 3] = y_c + h_half + return scaled_boxes diff --git a/preprocess/mhp_extension/detectron2/detectron2/layers/nms.py b/preprocess/mhp_extension/detectron2/detectron2/layers/nms.py new file mode 100644 index 0000000000000000000000000000000000000000..aafe29b3aa551caeeda769dd17b8834b08c7f11c --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/layers/nms.py @@ -0,0 +1,146 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +import torch +from torchvision.ops import boxes as box_ops +from torchvision.ops import nms # BC-compat + + +def batched_nms(boxes, scores, idxs, iou_threshold): + """ + Same as torchvision.ops.boxes.batched_nms, but safer. + """ + assert boxes.shape[-1] == 4 + # TODO may need better strategy. + # Investigate after having a fully-cuda NMS op. + if len(boxes) < 40000: + return box_ops.batched_nms(boxes, scores, idxs, iou_threshold) + + result_mask = scores.new_zeros(scores.size(), dtype=torch.bool) + for id in torch.unique(idxs).cpu().tolist(): + mask = (idxs == id).nonzero().view(-1) + keep = nms(boxes[mask], scores[mask], iou_threshold) + result_mask[mask[keep]] = True + keep = result_mask.nonzero().view(-1) + keep = keep[scores[keep].argsort(descending=True)] + return keep + + +# Note: this function (nms_rotated) might be moved into +# torchvision/ops/boxes.py in the future +def nms_rotated(boxes, scores, iou_threshold): + """ + Performs non-maximum suppression (NMS) on the rotated boxes according + to their intersection-over-union (IoU). + + Rotated NMS iteratively removes lower scoring rotated boxes which have an + IoU greater than iou_threshold with another (higher scoring) rotated box. + + Note that RotatedBox (5, 3, 4, 2, -90) covers exactly the same region as + RotatedBox (5, 3, 4, 2, 90) does, and their IoU will be 1. However, they + can be representing completely different objects in certain tasks, e.g., OCR. + + As for the question of whether rotated-NMS should treat them as faraway boxes + even though their IOU is 1, it depends on the application and/or ground truth annotation. + + As an extreme example, consider a single character v and the square box around it. + + If the angle is 0 degree, the object (text) would be read as 'v'; + + If the angle is 90 degrees, the object (text) would become '>'; + + If the angle is 180 degrees, the object (text) would become '^'; + + If the angle is 270/-90 degrees, the object (text) would become '<' + + All of these cases have IoU of 1 to each other, and rotated NMS that only + uses IoU as criterion would only keep one of them with the highest score - + which, practically, still makes sense in most cases because typically + only one of theses orientations is the correct one. Also, it does not matter + as much if the box is only used to classify the object (instead of transcribing + them with a sequential OCR recognition model) later. + + On the other hand, when we use IoU to filter proposals that are close to the + ground truth during training, we should definitely take the angle into account if + we know the ground truth is labeled with the strictly correct orientation (as in, + upside-down words are annotated with -180 degrees even though they can be covered + with a 0/90/-90 degree box, etc.) + + The way the original dataset is annotated also matters. For example, if the dataset + is a 4-point polygon dataset that does not enforce ordering of vertices/orientation, + we can estimate a minimum rotated bounding box to this polygon, but there's no way + we can tell the correct angle with 100% confidence (as shown above, there could be 4 different + rotated boxes, with angles differed by 90 degrees to each other, covering the exactly + same region). In that case we have to just use IoU to determine the box + proximity (as many detection benchmarks (even for text) do) unless there're other + assumptions we can make (like width is always larger than height, or the object is not + rotated by more than 90 degrees CCW/CW, etc.) + + In summary, not considering angles in rotated NMS seems to be a good option for now, + but we should be aware of its implications. + + Args: + boxes (Tensor[N, 5]): Rotated boxes to perform NMS on. They are expected to be in + (x_center, y_center, width, height, angle_degrees) format. + scores (Tensor[N]): Scores for each one of the rotated boxes + iou_threshold (float): Discards all overlapping rotated boxes with IoU < iou_threshold + + Returns: + keep (Tensor): int64 tensor with the indices of the elements that have been kept + by Rotated NMS, sorted in decreasing order of scores + """ + from detectron2 import _C + + return _C.nms_rotated(boxes, scores, iou_threshold) + + +# Note: this function (batched_nms_rotated) might be moved into +# torchvision/ops/boxes.py in the future +def batched_nms_rotated(boxes, scores, idxs, iou_threshold): + """ + Performs non-maximum suppression in a batched fashion. + + Each index value correspond to a category, and NMS + will not be applied between elements of different categories. + + Args: + boxes (Tensor[N, 5]): + boxes where NMS will be performed. They + are expected to be in (x_ctr, y_ctr, width, height, angle_degrees) format + scores (Tensor[N]): + scores for each one of the boxes + idxs (Tensor[N]): + indices of the categories for each one of the boxes. + iou_threshold (float): + discards all overlapping boxes + with IoU < iou_threshold + + Returns: + Tensor: + int64 tensor with the indices of the elements that have been kept + by NMS, sorted in decreasing order of scores + """ + assert boxes.shape[-1] == 5 + + if boxes.numel() == 0: + return torch.empty((0,), dtype=torch.int64, device=boxes.device) + # Strategy: in order to perform NMS independently per class, + # we add an offset to all the boxes. The offset is dependent + # only on the class idx, and is large enough so that boxes + # from different classes do not overlap + + # Note that batched_nms in torchvision/ops/boxes.py only uses max_coordinate, + # which won't handle negative coordinates correctly. + # Here by using min_coordinate we can make sure the negative coordinates are + # correctly handled. + max_coordinate = ( + torch.max(boxes[:, 0], boxes[:, 1]) + torch.max(boxes[:, 2], boxes[:, 3]) / 2 + ).max() + min_coordinate = ( + torch.min(boxes[:, 0], boxes[:, 1]) - torch.max(boxes[:, 2], boxes[:, 3]) / 2 + ).min() + offsets = idxs.to(boxes) * (max_coordinate - min_coordinate + 1) + boxes_for_nms = boxes.clone() # avoid modifying the original values in boxes + boxes_for_nms[:, :2] += offsets[:, None] + keep = nms_rotated(boxes_for_nms, scores, iou_threshold) + return keep diff --git a/preprocess/mhp_extension/detectron2/detectron2/layers/roi_align.py b/preprocess/mhp_extension/detectron2/detectron2/layers/roi_align.py new file mode 100644 index 0000000000000000000000000000000000000000..f8c4ce1d747ec77329fab34436f5efa0e958ef32 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/layers/roi_align.py @@ -0,0 +1,105 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +from torch import nn +from torch.autograd import Function +from torch.autograd.function import once_differentiable +from torch.nn.modules.utils import _pair + +from detectron2 import _C + + +class _ROIAlign(Function): + @staticmethod + def forward(ctx, input, roi, output_size, spatial_scale, sampling_ratio, aligned): + ctx.save_for_backward(roi) + ctx.output_size = _pair(output_size) + ctx.spatial_scale = spatial_scale + ctx.sampling_ratio = sampling_ratio + ctx.input_shape = input.size() + ctx.aligned = aligned + output = _C.roi_align_forward( + input, roi, spatial_scale, output_size[0], output_size[1], sampling_ratio, aligned + ) + return output + + @staticmethod + @once_differentiable + def backward(ctx, grad_output): + (rois,) = ctx.saved_tensors + output_size = ctx.output_size + spatial_scale = ctx.spatial_scale + sampling_ratio = ctx.sampling_ratio + bs, ch, h, w = ctx.input_shape + grad_input = _C.roi_align_backward( + grad_output, + rois, + spatial_scale, + output_size[0], + output_size[1], + bs, + ch, + h, + w, + sampling_ratio, + ctx.aligned, + ) + return grad_input, None, None, None, None, None + + +roi_align = _ROIAlign.apply + + +class ROIAlign(nn.Module): + def __init__(self, output_size, spatial_scale, sampling_ratio, aligned=True): + """ + Args: + output_size (tuple): h, w + spatial_scale (float): scale the input boxes by this number + sampling_ratio (int): number of inputs samples to take for each output + sample. 0 to take samples densely. + aligned (bool): if False, use the legacy implementation in + Detectron. If True, align the results more perfectly. + + Note: + The meaning of aligned=True: + + Given a continuous coordinate c, its two neighboring pixel indices (in our + pixel model) are computed by floor(c - 0.5) and ceil(c - 0.5). For example, + c=1.3 has pixel neighbors with discrete indices [0] and [1] (which are sampled + from the underlying signal at continuous coordinates 0.5 and 1.5). But the original + roi_align (aligned=False) does not subtract the 0.5 when computing neighboring + pixel indices and therefore it uses pixels with a slightly incorrect alignment + (relative to our pixel model) when performing bilinear interpolation. + + With `aligned=True`, + we first appropriately scale the ROI and then shift it by -0.5 + prior to calling roi_align. This produces the correct neighbors; see + detectron2/tests/test_roi_align.py for verification. + + The difference does not make a difference to the model's performance if + ROIAlign is used together with conv layers. + """ + super(ROIAlign, self).__init__() + self.output_size = output_size + self.spatial_scale = spatial_scale + self.sampling_ratio = sampling_ratio + self.aligned = aligned + + def forward(self, input, rois): + """ + Args: + input: NCHW images + rois: Bx5 boxes. First column is the index into N. The other 4 columns are xyxy. + """ + assert rois.dim() == 2 and rois.size(1) == 5 + return roi_align( + input, rois, self.output_size, self.spatial_scale, self.sampling_ratio, self.aligned + ) + + def __repr__(self): + tmpstr = self.__class__.__name__ + "(" + tmpstr += "output_size=" + str(self.output_size) + tmpstr += ", spatial_scale=" + str(self.spatial_scale) + tmpstr += ", sampling_ratio=" + str(self.sampling_ratio) + tmpstr += ", aligned=" + str(self.aligned) + tmpstr += ")" + return tmpstr diff --git a/preprocess/mhp_extension/detectron2/detectron2/layers/roi_align_rotated.py b/preprocess/mhp_extension/detectron2/detectron2/layers/roi_align_rotated.py new file mode 100644 index 0000000000000000000000000000000000000000..6ed87e69d5e738f8dbaa7c73c5c8de65343de0fd --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/layers/roi_align_rotated.py @@ -0,0 +1,88 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +from torch import nn +from torch.autograd import Function +from torch.autograd.function import once_differentiable +from torch.nn.modules.utils import _pair + +from detectron2 import _C + + +class _ROIAlignRotated(Function): + @staticmethod + def forward(ctx, input, roi, output_size, spatial_scale, sampling_ratio): + ctx.save_for_backward(roi) + ctx.output_size = _pair(output_size) + ctx.spatial_scale = spatial_scale + ctx.sampling_ratio = sampling_ratio + ctx.input_shape = input.size() + output = _C.roi_align_rotated_forward( + input, roi, spatial_scale, output_size[0], output_size[1], sampling_ratio + ) + return output + + @staticmethod + @once_differentiable + def backward(ctx, grad_output): + (rois,) = ctx.saved_tensors + output_size = ctx.output_size + spatial_scale = ctx.spatial_scale + sampling_ratio = ctx.sampling_ratio + bs, ch, h, w = ctx.input_shape + grad_input = _C.roi_align_rotated_backward( + grad_output, + rois, + spatial_scale, + output_size[0], + output_size[1], + bs, + ch, + h, + w, + sampling_ratio, + ) + return grad_input, None, None, None, None, None + + +roi_align_rotated = _ROIAlignRotated.apply + + +class ROIAlignRotated(nn.Module): + def __init__(self, output_size, spatial_scale, sampling_ratio): + """ + Args: + output_size (tuple): h, w + spatial_scale (float): scale the input boxes by this number + sampling_ratio (int): number of inputs samples to take for each output + sample. 0 to take samples densely. + + Note: + ROIAlignRotated supports continuous coordinate by default: + Given a continuous coordinate c, its two neighboring pixel indices (in our + pixel model) are computed by floor(c - 0.5) and ceil(c - 0.5). For example, + c=1.3 has pixel neighbors with discrete indices [0] and [1] (which are sampled + from the underlying signal at continuous coordinates 0.5 and 1.5). + """ + super(ROIAlignRotated, self).__init__() + self.output_size = output_size + self.spatial_scale = spatial_scale + self.sampling_ratio = sampling_ratio + + def forward(self, input, rois): + """ + Args: + input: NCHW images + rois: Bx6 boxes. First column is the index into N. + The other 5 columns are (x_ctr, y_ctr, width, height, angle_degrees). + """ + assert rois.dim() == 2 and rois.size(1) == 6 + return roi_align_rotated( + input, rois, self.output_size, self.spatial_scale, self.sampling_ratio + ) + + def __repr__(self): + tmpstr = self.__class__.__name__ + "(" + tmpstr += "output_size=" + str(self.output_size) + tmpstr += ", spatial_scale=" + str(self.spatial_scale) + tmpstr += ", sampling_ratio=" + str(self.sampling_ratio) + tmpstr += ")" + return tmpstr diff --git a/preprocess/mhp_extension/detectron2/detectron2/layers/rotated_boxes.py b/preprocess/mhp_extension/detectron2/detectron2/layers/rotated_boxes.py new file mode 100644 index 0000000000000000000000000000000000000000..ea9b08583da79aae871b500bcffc19f8a352da6e --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/layers/rotated_boxes.py @@ -0,0 +1,22 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +from __future__ import absolute_import, division, print_function, unicode_literals + +from detectron2 import _C + + +def pairwise_iou_rotated(boxes1, boxes2): + """ + Return intersection-over-union (Jaccard index) of boxes. + + Both sets of boxes are expected to be in + (x_center, y_center, width, height, angle) format. + + Arguments: + boxes1 (Tensor[N, 5]) + boxes2 (Tensor[M, 5]) + + Returns: + iou (Tensor[N, M]): the NxM matrix containing the pairwise + IoU values for every element in boxes1 and boxes2 + """ + return _C.box_iou_rotated(boxes1, boxes2) diff --git a/preprocess/mhp_extension/detectron2/detectron2/layers/shape_spec.py b/preprocess/mhp_extension/detectron2/detectron2/layers/shape_spec.py new file mode 100644 index 0000000000000000000000000000000000000000..ed7f0d08268a2342cfb8246cc032686f2343ef8f --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/layers/shape_spec.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +from collections import namedtuple + + +class ShapeSpec(namedtuple("_ShapeSpec", ["channels", "height", "width", "stride"])): + """ + A simple structure that contains basic shape specification about a tensor. + It is often used as the auxiliary inputs/outputs of models, + to obtain the shape inference ability among pytorch modules. + + Attributes: + channels: + height: + width: + stride: + """ + + def __new__(cls, *, channels=None, height=None, width=None, stride=None): + return super().__new__(cls, channels, height, width, stride) diff --git a/preprocess/mhp_extension/detectron2/detectron2/layers/wrappers.py b/preprocess/mhp_extension/detectron2/detectron2/layers/wrappers.py new file mode 100644 index 0000000000000000000000000000000000000000..7e3935e90c61f02e000568af79ed458dd491fed7 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/layers/wrappers.py @@ -0,0 +1,215 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +""" +Wrappers around on some nn functions, mainly to support empty tensors. + +Ideally, add support directly in PyTorch to empty tensors in those functions. + +These can be removed once https://github.com/pytorch/pytorch/issues/12013 +is implemented +""" + +import math +import torch +from torch.nn.modules.utils import _ntuple + +TORCH_VERSION = tuple(int(x) for x in torch.__version__.split(".")[:2]) + + +def cat(tensors, dim=0): + """ + Efficient version of torch.cat that avoids a copy if there is only a single element in a list + """ + assert isinstance(tensors, (list, tuple)) + if len(tensors) == 1: + return tensors[0] + return torch.cat(tensors, dim) + + +class _NewEmptyTensorOp(torch.autograd.Function): + @staticmethod + def forward(ctx, x, new_shape): + ctx.shape = x.shape + return x.new_empty(new_shape) + + @staticmethod + def backward(ctx, grad): + shape = ctx.shape + return _NewEmptyTensorOp.apply(grad, shape), None + + +class Conv2d(torch.nn.Conv2d): + """ + A wrapper around :class:`torch.nn.Conv2d` to support empty inputs and more features. + """ + + def __init__(self, *args, **kwargs): + """ + Extra keyword arguments supported in addition to those in `torch.nn.Conv2d`: + + Args: + norm (nn.Module, optional): a normalization layer + activation (callable(Tensor) -> Tensor): a callable activation function + + It assumes that norm layer is used before activation. + """ + norm = kwargs.pop("norm", None) + activation = kwargs.pop("activation", None) + super().__init__(*args, **kwargs) + + self.norm = norm + self.activation = activation + + def forward(self, x): + if x.numel() == 0 and self.training: + # https://github.com/pytorch/pytorch/issues/12013 + assert not isinstance( + self.norm, torch.nn.SyncBatchNorm + ), "SyncBatchNorm does not support empty inputs!" + + if x.numel() == 0 and TORCH_VERSION <= (1, 4): + assert not isinstance( + self.norm, torch.nn.GroupNorm + ), "GroupNorm does not support empty inputs in PyTorch <=1.4!" + # When input is empty, we want to return a empty tensor with "correct" shape, + # So that the following operations will not panic + # if they check for the shape of the tensor. + # This computes the height and width of the output tensor + output_shape = [ + (i + 2 * p - (di * (k - 1) + 1)) // s + 1 + for i, p, di, k, s in zip( + x.shape[-2:], self.padding, self.dilation, self.kernel_size, self.stride + ) + ] + output_shape = [x.shape[0], self.weight.shape[0]] + output_shape + empty = _NewEmptyTensorOp.apply(x, output_shape) + if self.training: + # This is to make DDP happy. + # DDP expects all workers to have gradient w.r.t the same set of parameters. + _dummy = sum(x.view(-1)[0] for x in self.parameters()) * 0.0 + return empty + _dummy + else: + return empty + + x = super().forward(x) + if self.norm is not None: + x = self.norm(x) + if self.activation is not None: + x = self.activation(x) + return x + + +if TORCH_VERSION > (1, 4): + ConvTranspose2d = torch.nn.ConvTranspose2d +else: + + class ConvTranspose2d(torch.nn.ConvTranspose2d): + """ + A wrapper around :class:`torch.nn.ConvTranspose2d` to support zero-size tensor. + """ + + def forward(self, x): + if x.numel() > 0: + return super(ConvTranspose2d, self).forward(x) + # get output shape + + # When input is empty, we want to return a empty tensor with "correct" shape, + # So that the following operations will not panic + # if they check for the shape of the tensor. + # This computes the height and width of the output tensor + output_shape = [ + (i - 1) * d - 2 * p + (di * (k - 1) + 1) + op + for i, p, di, k, d, op in zip( + x.shape[-2:], + self.padding, + self.dilation, + self.kernel_size, + self.stride, + self.output_padding, + ) + ] + output_shape = [x.shape[0], self.out_channels] + output_shape + # This is to make DDP happy. + # DDP expects all workers to have gradient w.r.t the same set of parameters. + _dummy = sum(x.view(-1)[0] for x in self.parameters()) * 0.0 + return _NewEmptyTensorOp.apply(x, output_shape) + _dummy + + +if TORCH_VERSION > (1, 4): + BatchNorm2d = torch.nn.BatchNorm2d +else: + + class BatchNorm2d(torch.nn.BatchNorm2d): + """ + A wrapper around :class:`torch.nn.BatchNorm2d` to support zero-size tensor. + """ + + def forward(self, x): + if x.numel() > 0: + return super(BatchNorm2d, self).forward(x) + # get output shape + output_shape = x.shape + return _NewEmptyTensorOp.apply(x, output_shape) + + +if TORCH_VERSION > (1, 5): + Linear = torch.nn.Linear +else: + + class Linear(torch.nn.Linear): + """ + A wrapper around :class:`torch.nn.Linear` to support empty inputs and more features. + Because of https://github.com/pytorch/pytorch/issues/34202 + """ + + def forward(self, x): + if x.numel() == 0: + output_shape = [x.shape[0], self.weight.shape[0]] + + empty = _NewEmptyTensorOp.apply(x, output_shape) + if self.training: + # This is to make DDP happy. + # DDP expects all workers to have gradient w.r.t the same set of parameters. + _dummy = sum(x.view(-1)[0] for x in self.parameters()) * 0.0 + return empty + _dummy + else: + return empty + + x = super().forward(x) + return x + + +def interpolate(input, size=None, scale_factor=None, mode="nearest", align_corners=None): + """ + A wrapper around :func:`torch.nn.functional.interpolate` to support zero-size tensor. + """ + if TORCH_VERSION > (1, 4) or input.numel() > 0: + return torch.nn.functional.interpolate( + input, size, scale_factor, mode, align_corners=align_corners + ) + + def _check_size_scale_factor(dim): + if size is None and scale_factor is None: + raise ValueError("either size or scale_factor should be defined") + if size is not None and scale_factor is not None: + raise ValueError("only one of size or scale_factor should be defined") + if ( + scale_factor is not None + and isinstance(scale_factor, tuple) + and len(scale_factor) != dim + ): + raise ValueError( + "scale_factor shape must match input shape. " + "Input is {}D, scale_factor size is {}".format(dim, len(scale_factor)) + ) + + def _output_size(dim): + _check_size_scale_factor(dim) + if size is not None: + return size + scale_factors = _ntuple(dim)(scale_factor) + # math.floor might return float in py2.7 + return [int(math.floor(input.size(i + 2) * scale_factors[i])) for i in range(dim)] + + output_shape = tuple(_output_size(2)) + output_shape = input.shape[:-2] + output_shape + return _NewEmptyTensorOp.apply(input, output_shape) diff --git a/preprocess/mhp_extension/detectron2/detectron2/model_zoo/__init__.py b/preprocess/mhp_extension/detectron2/detectron2/model_zoo/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..886616f8e11ef31ea85d7a7ba9a75308befceedf --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/model_zoo/__init__.py @@ -0,0 +1,9 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +""" +Model Zoo API for Detectron2: a collection of functions to create common model architectures and +optionally load pre-trained weights as released in +`MODEL_ZOO.md `_. +""" +from .model_zoo import get, get_config_file, get_checkpoint_url + +__all__ = ["get_checkpoint_url", "get", "get_config_file"] diff --git a/preprocess/mhp_extension/detectron2/detectron2/model_zoo/model_zoo.py b/preprocess/mhp_extension/detectron2/detectron2/model_zoo/model_zoo.py new file mode 100644 index 0000000000000000000000000000000000000000..68d0ce5dc442864474bb1086bf04d6e40708c190 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/model_zoo/model_zoo.py @@ -0,0 +1,150 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import os +import pkg_resources +import torch + +from detectron2.checkpoint import DetectionCheckpointer +from detectron2.config import get_cfg +from detectron2.modeling import build_model + + +class _ModelZooUrls(object): + """ + Mapping from names to officially released Detectron2 pre-trained models. + """ + + S3_PREFIX = "https://dl.fbaipublicfiles.com/detectron2/" + + # format: {config_path.yaml} -> model_id/model_final_{commit}.pkl + CONFIG_PATH_TO_URL_SUFFIX = { + # COCO Detection with Faster R-CNN + "COCO-Detection/faster_rcnn_R_50_C4_1x.yaml": "137257644/model_final_721ade.pkl", + "COCO-Detection/faster_rcnn_R_50_DC5_1x.yaml": "137847829/model_final_51d356.pkl", + "COCO-Detection/faster_rcnn_R_50_FPN_1x.yaml": "137257794/model_final_b275ba.pkl", + "COCO-Detection/faster_rcnn_R_50_C4_3x.yaml": "137849393/model_final_f97cb7.pkl", + "COCO-Detection/faster_rcnn_R_50_DC5_3x.yaml": "137849425/model_final_68d202.pkl", + "COCO-Detection/faster_rcnn_R_50_FPN_3x.yaml": "137849458/model_final_280758.pkl", + "COCO-Detection/faster_rcnn_R_101_C4_3x.yaml": "138204752/model_final_298dad.pkl", + "COCO-Detection/faster_rcnn_R_101_DC5_3x.yaml": "138204841/model_final_3e0943.pkl", + "COCO-Detection/faster_rcnn_R_101_FPN_3x.yaml": "137851257/model_final_f6e8b1.pkl", + "COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x.yaml": "139173657/model_final_68b088.pkl", + # COCO Detection with RetinaNet + "COCO-Detection/retinanet_R_50_FPN_1x.yaml": "137593951/model_final_b796dc.pkl", + "COCO-Detection/retinanet_R_50_FPN_3x.yaml": "137849486/model_final_4cafe0.pkl", + "COCO-Detection/retinanet_R_101_FPN_3x.yaml": "138363263/model_final_59f53c.pkl", + # COCO Detection with RPN and Fast R-CNN + "COCO-Detection/rpn_R_50_C4_1x.yaml": "137258005/model_final_450694.pkl", + "COCO-Detection/rpn_R_50_FPN_1x.yaml": "137258492/model_final_02ce48.pkl", + "COCO-Detection/fast_rcnn_R_50_FPN_1x.yaml": "137635226/model_final_e5f7ce.pkl", + # COCO Instance Segmentation Baselines with Mask R-CNN + "COCO-InstanceSegmentation/mask_rcnn_R_50_C4_1x.yaml": "137259246/model_final_9243eb.pkl", + "COCO-InstanceSegmentation/mask_rcnn_R_50_DC5_1x.yaml": "137260150/model_final_4f86c3.pkl", + "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml": "137260431/model_final_a54504.pkl", + "COCO-InstanceSegmentation/mask_rcnn_R_50_C4_3x.yaml": "137849525/model_final_4ce675.pkl", + "COCO-InstanceSegmentation/mask_rcnn_R_50_DC5_3x.yaml": "137849551/model_final_84107b.pkl", + "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml": "137849600/model_final_f10217.pkl", + "COCO-InstanceSegmentation/mask_rcnn_R_101_C4_3x.yaml": "138363239/model_final_a2914c.pkl", + "COCO-InstanceSegmentation/mask_rcnn_R_101_DC5_3x.yaml": "138363294/model_final_0464b7.pkl", + "COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x.yaml": "138205316/model_final_a3ec72.pkl", + "COCO-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_3x.yaml": "139653917/model_final_2d9806.pkl", # noqa + # COCO Person Keypoint Detection Baselines with Keypoint R-CNN + "COCO-Keypoints/keypoint_rcnn_R_50_FPN_1x.yaml": "137261548/model_final_04e291.pkl", + "COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x.yaml": "137849621/model_final_a6e10b.pkl", + "COCO-Keypoints/keypoint_rcnn_R_101_FPN_3x.yaml": "138363331/model_final_997cc7.pkl", + "COCO-Keypoints/keypoint_rcnn_X_101_32x8d_FPN_3x.yaml": "139686956/model_final_5ad38f.pkl", + # COCO Panoptic Segmentation Baselines with Panoptic FPN + "COCO-PanopticSegmentation/panoptic_fpn_R_50_1x.yaml": "139514544/model_final_dbfeb4.pkl", + "COCO-PanopticSegmentation/panoptic_fpn_R_50_3x.yaml": "139514569/model_final_c10459.pkl", + "COCO-PanopticSegmentation/panoptic_fpn_R_101_3x.yaml": "139514519/model_final_cafdb1.pkl", + # LVIS Instance Segmentation Baselines with Mask R-CNN + "LVIS-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml": "144219072/model_final_571f7c.pkl", + "LVIS-InstanceSegmentation/mask_rcnn_R_101_FPN_1x.yaml": "144219035/model_final_824ab5.pkl", + "LVIS-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_1x.yaml": "144219108/model_final_5e3439.pkl", # noqa + # Cityscapes & Pascal VOC Baselines + "Cityscapes/mask_rcnn_R_50_FPN.yaml": "142423278/model_final_af9cf5.pkl", + "PascalVOC-Detection/faster_rcnn_R_50_C4.yaml": "142202221/model_final_b1acc2.pkl", + # Other Settings + "Misc/mask_rcnn_R_50_FPN_1x_dconv_c3-c5.yaml": "138602867/model_final_65c703.pkl", + "Misc/mask_rcnn_R_50_FPN_3x_dconv_c3-c5.yaml": "144998336/model_final_821d0b.pkl", + "Misc/cascade_mask_rcnn_R_50_FPN_1x.yaml": "138602847/model_final_e9d89b.pkl", + "Misc/cascade_mask_rcnn_R_50_FPN_3x.yaml": "144998488/model_final_480dd8.pkl", + "Misc/mask_rcnn_R_50_FPN_3x_syncbn.yaml": "169527823/model_final_3b3c51.pkl", + "Misc/mask_rcnn_R_50_FPN_3x_gn.yaml": "138602888/model_final_dc5d9e.pkl", + "Misc/scratch_mask_rcnn_R_50_FPN_3x_gn.yaml": "138602908/model_final_01ca85.pkl", + "Misc/panoptic_fpn_R_101_dconv_cascade_gn_3x.yaml": "139797668/model_final_be35db.pkl", + "Misc/cascade_mask_rcnn_X_152_32x8d_FPN_IN5k_gn_dconv.yaml": "18131413/model_0039999_e76410.pkl", # noqa + # D1 Comparisons + "Detectron1-Comparisons/faster_rcnn_R_50_FPN_noaug_1x.yaml": "137781054/model_final_7ab50c.pkl", # noqa + "Detectron1-Comparisons/mask_rcnn_R_50_FPN_noaug_1x.yaml": "137781281/model_final_62ca52.pkl", # noqa + "Detectron1-Comparisons/keypoint_rcnn_R_50_FPN_1x.yaml": "137781195/model_final_cce136.pkl", + } + + +def get_checkpoint_url(config_path): + """ + Returns the URL to the model trained using the given config + + Args: + config_path (str): config file name relative to detectron2's "configs/" + directory, e.g., "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml" + + Returns: + str: a URL to the model + """ + name = config_path.replace(".yaml", "") + if config_path in _ModelZooUrls.CONFIG_PATH_TO_URL_SUFFIX: + suffix = _ModelZooUrls.CONFIG_PATH_TO_URL_SUFFIX[config_path] + return _ModelZooUrls.S3_PREFIX + name + "/" + suffix + raise RuntimeError("{} not available in Model Zoo!".format(name)) + + +def get_config_file(config_path): + """ + Returns path to a builtin config file. + + Args: + config_path (str): config file name relative to detectron2's "configs/" + directory, e.g., "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml" + + Returns: + str: the real path to the config file. + """ + cfg_file = pkg_resources.resource_filename( + "detectron2.model_zoo", os.path.join("configs", config_path) + ) + if not os.path.exists(cfg_file): + raise RuntimeError("{} not available in Model Zoo!".format(config_path)) + return cfg_file + + +def get(config_path, trained: bool = False): + """ + Get a model specified by relative path under Detectron2's official ``configs/`` directory. + + Args: + config_path (str): config file name relative to detectron2's "configs/" + directory, e.g., "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml" + trained (bool): If True, will initialize the model with the trained model zoo weights. + If False, the checkpoint specified in the config file's ``MODEL.WEIGHTS`` is used + instead; this will typically (though not always) initialize a subset of weights using + an ImageNet pre-trained model, while randomly initializing the other weights. + + Example: + + .. code-block:: python + + from detectron2 import model_zoo + model = model_zoo.get("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml", trained=True) + """ + cfg_file = get_config_file(config_path) + + cfg = get_cfg() + cfg.merge_from_file(cfg_file) + if trained: + cfg.MODEL.WEIGHTS = get_checkpoint_url(config_path) + if not torch.cuda.is_available(): + cfg.MODEL.DEVICE = "cpu" + + model = build_model(cfg) + DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS) + return model diff --git a/preprocess/mhp_extension/detectron2/detectron2/modeling/__init__.py b/preprocess/mhp_extension/detectron2/detectron2/modeling/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9e23fe4a7037c8ece8f4c553b4cfda1631b79c9c --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/modeling/__init__.py @@ -0,0 +1,56 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import torch + +from detectron2.layers import ShapeSpec + +from .anchor_generator import build_anchor_generator, ANCHOR_GENERATOR_REGISTRY +from .backbone import ( + BACKBONE_REGISTRY, + FPN, + Backbone, + ResNet, + ResNetBlockBase, + build_backbone, + build_resnet_backbone, + make_stage, +) +from .meta_arch import ( + META_ARCH_REGISTRY, + SEM_SEG_HEADS_REGISTRY, + GeneralizedRCNN, + PanopticFPN, + ProposalNetwork, + RetinaNet, + SemanticSegmentor, + build_model, + build_sem_seg_head, +) +from .postprocessing import detector_postprocess +from .proposal_generator import ( + PROPOSAL_GENERATOR_REGISTRY, + build_proposal_generator, + RPN_HEAD_REGISTRY, + build_rpn_head, +) +from .roi_heads import ( + ROI_BOX_HEAD_REGISTRY, + ROI_HEADS_REGISTRY, + ROI_KEYPOINT_HEAD_REGISTRY, + ROI_MASK_HEAD_REGISTRY, + ROIHeads, + StandardROIHeads, + BaseMaskRCNNHead, + BaseKeypointRCNNHead, + build_box_head, + build_keypoint_head, + build_mask_head, + build_roi_heads, +) +from .test_time_augmentation import DatasetMapperTTA, GeneralizedRCNNWithTTA + +_EXCLUDE = {"torch", "ShapeSpec"} +__all__ = [k for k in globals().keys() if k not in _EXCLUDE and not k.startswith("_")] + +assert ( + torch.Tensor([1]) == torch.Tensor([2]) +).dtype == torch.bool, "Your Pytorch is too old. Please update to contain https://github.com/pytorch/pytorch/pull/21113" diff --git a/preprocess/mhp_extension/detectron2/detectron2/modeling/anchor_generator.py b/preprocess/mhp_extension/detectron2/detectron2/modeling/anchor_generator.py new file mode 100644 index 0000000000000000000000000000000000000000..93927bc1c16106710bc1ca1da4d186f7710e1606 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/modeling/anchor_generator.py @@ -0,0 +1,382 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import math +from typing import List +import torch +from torch import nn + +from detectron2.config import configurable +from detectron2.layers import ShapeSpec +from detectron2.structures import Boxes, RotatedBoxes +from detectron2.utils.registry import Registry + +ANCHOR_GENERATOR_REGISTRY = Registry("ANCHOR_GENERATOR") +ANCHOR_GENERATOR_REGISTRY.__doc__ = """ +Registry for modules that creates object detection anchors for feature maps. + +The registered object will be called with `obj(cfg, input_shape)`. +""" + + +class BufferList(nn.Module): + """ + Similar to nn.ParameterList, but for buffers + """ + + def __init__(self, buffers=None): + super(BufferList, self).__init__() + if buffers is not None: + self.extend(buffers) + + def extend(self, buffers): + offset = len(self) + for i, buffer in enumerate(buffers): + self.register_buffer(str(offset + i), buffer) + return self + + def __len__(self): + return len(self._buffers) + + def __iter__(self): + return iter(self._buffers.values()) + + +def _create_grid_offsets(size: List[int], stride: int, offset: float, device: torch.device): + grid_height, grid_width = size + shifts_x = torch.arange( + offset * stride, grid_width * stride, step=stride, dtype=torch.float32, device=device + ) + shifts_y = torch.arange( + offset * stride, grid_height * stride, step=stride, dtype=torch.float32, device=device + ) + + shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x) + shift_x = shift_x.reshape(-1) + shift_y = shift_y.reshape(-1) + return shift_x, shift_y + + +def _broadcast_params(params, num_features, name): + """ + If one size (or aspect ratio) is specified and there are multiple feature + maps, we "broadcast" anchors of that single size (or aspect ratio) + over all feature maps. + + If params is list[float], or list[list[float]] with len(params) == 1, repeat + it num_features time. + + Returns: + list[list[float]]: param for each feature + """ + assert isinstance( + params, (list, tuple) + ), f"{name} in anchor generator has to be a list! Got {params}." + assert len(params), f"{name} in anchor generator cannot be empty!" + if not isinstance(params[0], (list, tuple)): # list[float] + return [params] * num_features + if len(params) == 1: + return list(params) * num_features + assert len(params) == num_features, ( + f"Got {name} of length {len(params)} in anchor generator, " + f"but the number of input features is {num_features}!" + ) + return params + + +@ANCHOR_GENERATOR_REGISTRY.register() +class DefaultAnchorGenerator(nn.Module): + """ + Compute anchors in the standard ways described in + "Faster R-CNN: Towards Real-Time Object Detection with Region Proposal Networks". + """ + + box_dim: int = 4 + """ + the dimension of each anchor box. + """ + + @configurable + def __init__(self, *, sizes, aspect_ratios, strides, offset=0.5): + """ + This interface is experimental. + + Args: + sizes (list[list[float]] or list[float]): + If sizes is list[list[float]], sizes[i] is the list of anchor sizes + (i.e. sqrt of anchor area) to use for the i-th feature map. + If sizes is list[float], the sizes are used for all feature maps. + Anchor sizes are given in absolute lengths in units of + the input image; they do not dynamically scale if the input image size changes. + aspect_ratios (list[list[float]] or list[float]): list of aspect ratios + (i.e. height / width) to use for anchors. Same "broadcast" rule for `sizes` applies. + strides (list[int]): stride of each input feature. + offset (float): Relative offset between the center of the first anchor and the top-left + corner of the image. Value has to be in [0, 1). + Recommend to use 0.5, which means half stride. + """ + super().__init__() + + self.strides = strides + self.num_features = len(self.strides) + sizes = _broadcast_params(sizes, self.num_features, "sizes") + aspect_ratios = _broadcast_params(aspect_ratios, self.num_features, "aspect_ratios") + self.cell_anchors = self._calculate_anchors(sizes, aspect_ratios) + + self.offset = offset + assert 0.0 <= self.offset < 1.0, self.offset + + @classmethod + def from_config(cls, cfg, input_shape: List[ShapeSpec]): + return { + "sizes": cfg.MODEL.ANCHOR_GENERATOR.SIZES, + "aspect_ratios": cfg.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS, + "strides": [x.stride for x in input_shape], + "offset": cfg.MODEL.ANCHOR_GENERATOR.OFFSET, + } + + def _calculate_anchors(self, sizes, aspect_ratios): + cell_anchors = [ + self.generate_cell_anchors(s, a).float() for s, a in zip(sizes, aspect_ratios) + ] + return BufferList(cell_anchors) + + @property + def num_cell_anchors(self): + """ + Alias of `num_anchors`. + """ + return self.num_anchors + + @property + def num_anchors(self): + """ + Returns: + list[int]: Each int is the number of anchors at every pixel + location, on that feature map. + For example, if at every pixel we use anchors of 3 aspect + ratios and 5 sizes, the number of anchors is 15. + (See also ANCHOR_GENERATOR.SIZES and ANCHOR_GENERATOR.ASPECT_RATIOS in config) + + In standard RPN models, `num_anchors` on every feature map is the same. + """ + return [len(cell_anchors) for cell_anchors in self.cell_anchors] + + def _grid_anchors(self, grid_sizes: List[List[int]]): + """ + Returns: + list[Tensor]: #featuremap tensors, each is (#locations x #cell_anchors) x 4 + """ + anchors = [] + for size, stride, base_anchors in zip(grid_sizes, self.strides, self.cell_anchors): + shift_x, shift_y = _create_grid_offsets(size, stride, self.offset, base_anchors.device) + shifts = torch.stack((shift_x, shift_y, shift_x, shift_y), dim=1) + + anchors.append((shifts.view(-1, 1, 4) + base_anchors.view(1, -1, 4)).reshape(-1, 4)) + + return anchors + + def generate_cell_anchors(self, sizes=(32, 64, 128, 256, 512), aspect_ratios=(0.5, 1, 2)): + """ + Generate a tensor storing canonical anchor boxes, which are all anchor + boxes of different sizes and aspect_ratios centered at (0, 0). + We can later build the set of anchors for a full feature map by + shifting and tiling these tensors (see `meth:_grid_anchors`). + + Args: + sizes (tuple[float]): + aspect_ratios (tuple[float]]): + + Returns: + Tensor of shape (len(sizes) * len(aspect_ratios), 4) storing anchor boxes + in XYXY format. + """ + + # This is different from the anchor generator defined in the original Faster R-CNN + # code or Detectron. They yield the same AP, however the old version defines cell + # anchors in a less natural way with a shift relative to the feature grid and + # quantization that results in slightly different sizes for different aspect ratios. + # See also https://github.com/facebookresearch/Detectron/issues/227 + + anchors = [] + for size in sizes: + area = size ** 2.0 + for aspect_ratio in aspect_ratios: + # s * s = w * h + # a = h / w + # ... some algebra ... + # w = sqrt(s * s / a) + # h = a * w + w = math.sqrt(area / aspect_ratio) + h = aspect_ratio * w + x0, y0, x1, y1 = -w / 2.0, -h / 2.0, w / 2.0, h / 2.0 + anchors.append([x0, y0, x1, y1]) + return torch.tensor(anchors) + + def forward(self, features): + """ + Args: + features (list[Tensor]): list of backbone feature maps on which to generate anchors. + + Returns: + list[Boxes]: a list of Boxes containing all the anchors for each feature map + (i.e. the cell anchors repeated over all locations in the feature map). + The number of anchors of each feature map is Hi x Wi x num_cell_anchors, + where Hi, Wi are resolution of the feature map divided by anchor stride. + """ + grid_sizes = [feature_map.shape[-2:] for feature_map in features] + anchors_over_all_feature_maps = self._grid_anchors(grid_sizes) + return [Boxes(x) for x in anchors_over_all_feature_maps] + + +@ANCHOR_GENERATOR_REGISTRY.register() +class RotatedAnchorGenerator(nn.Module): + """ + Compute rotated anchors used by Rotated RPN (RRPN), described in + "Arbitrary-Oriented Scene Text Detection via Rotation Proposals". + """ + + box_dim: int = 5 + """ + the dimension of each anchor box. + """ + + @configurable + def __init__(self, *, sizes, aspect_ratios, strides, angles, offset=0.5): + """ + This interface is experimental. + + Args: + sizes (list[list[float]] or list[float]): + If sizes is list[list[float]], sizes[i] is the list of anchor sizes + (i.e. sqrt of anchor area) to use for the i-th feature map. + If sizes is list[float], the sizes are used for all feature maps. + Anchor sizes are given in absolute lengths in units of + the input image; they do not dynamically scale if the input image size changes. + aspect_ratios (list[list[float]] or list[float]): list of aspect ratios + (i.e. height / width) to use for anchors. Same "broadcast" rule for `sizes` applies. + strides (list[int]): stride of each input feature. + angles (list[list[float]] or list[float]): list of angles (in degrees CCW) + to use for anchors. Same "broadcast" rule for `sizes` applies. + offset (float): Relative offset between the center of the first anchor and the top-left + corner of the image. Value has to be in [0, 1). + Recommend to use 0.5, which means half stride. + """ + super().__init__() + + self.strides = strides + self.num_features = len(self.strides) + sizes = _broadcast_params(sizes, self.num_features, "sizes") + aspect_ratios = _broadcast_params(aspect_ratios, self.num_features, "aspect_ratios") + angles = _broadcast_params(angles, self.num_features, "angles") + self.cell_anchors = self._calculate_anchors(sizes, aspect_ratios, angles) + + self.offset = offset + assert 0.0 <= self.offset < 1.0, self.offset + + @classmethod + def from_config(cls, cfg, input_shape: List[ShapeSpec]): + return { + "sizes": cfg.MODEL.ANCHOR_GENERATOR.SIZES, + "aspect_ratios": cfg.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS, + "strides": [x.stride for x in input_shape], + "offset": cfg.MODEL.ANCHOR_GENERATOR.OFFSET, + "angles": cfg.MODEL.ANCHOR_GENERATOR.ANGLES, + } + + def _calculate_anchors(self, sizes, aspect_ratios, angles): + cell_anchors = [ + self.generate_cell_anchors(size, aspect_ratio, angle).float() + for size, aspect_ratio, angle in zip(sizes, aspect_ratios, angles) + ] + return BufferList(cell_anchors) + + @property + def num_cell_anchors(self): + """ + Alias of `num_anchors`. + """ + return self.num_anchors + + @property + def num_anchors(self): + """ + Returns: + list[int]: Each int is the number of anchors at every pixel + location, on that feature map. + For example, if at every pixel we use anchors of 3 aspect + ratios, 2 sizes and 5 angles, the number of anchors is 30. + (See also ANCHOR_GENERATOR.SIZES, ANCHOR_GENERATOR.ASPECT_RATIOS + and ANCHOR_GENERATOR.ANGLES in config) + + In standard RRPN models, `num_anchors` on every feature map is the same. + """ + return [len(cell_anchors) for cell_anchors in self.cell_anchors] + + def _grid_anchors(self, grid_sizes): + anchors = [] + for size, stride, base_anchors in zip(grid_sizes, self.strides, self.cell_anchors): + shift_x, shift_y = _create_grid_offsets(size, stride, self.offset, base_anchors.device) + zeros = torch.zeros_like(shift_x) + shifts = torch.stack((shift_x, shift_y, zeros, zeros, zeros), dim=1) + + anchors.append((shifts.view(-1, 1, 5) + base_anchors.view(1, -1, 5)).reshape(-1, 5)) + + return anchors + + def generate_cell_anchors( + self, + sizes=(32, 64, 128, 256, 512), + aspect_ratios=(0.5, 1, 2), + angles=(-90, -60, -30, 0, 30, 60, 90), + ): + """ + Generate a tensor storing canonical anchor boxes, which are all anchor + boxes of different sizes, aspect_ratios, angles centered at (0, 0). + We can later build the set of anchors for a full feature map by + shifting and tiling these tensors (see `meth:_grid_anchors`). + + Args: + sizes (tuple[float]): + aspect_ratios (tuple[float]]): + angles (tuple[float]]): + + Returns: + Tensor of shape (len(sizes) * len(aspect_ratios) * len(angles), 5) + storing anchor boxes in (x_ctr, y_ctr, w, h, angle) format. + """ + anchors = [] + for size in sizes: + area = size ** 2.0 + for aspect_ratio in aspect_ratios: + # s * s = w * h + # a = h / w + # ... some algebra ... + # w = sqrt(s * s / a) + # h = a * w + w = math.sqrt(area / aspect_ratio) + h = aspect_ratio * w + anchors.extend([0, 0, w, h, a] for a in angles) + + return torch.tensor(anchors) + + def forward(self, features): + """ + Args: + features (list[Tensor]): list of backbone feature maps on which to generate anchors. + + Returns: + list[RotatedBoxes]: a list of Boxes containing all the anchors for each feature map + (i.e. the cell anchors repeated over all locations in the feature map). + The number of anchors of each feature map is Hi x Wi x num_cell_anchors, + where Hi, Wi are resolution of the feature map divided by anchor stride. + """ + grid_sizes = [feature_map.shape[-2:] for feature_map in features] + anchors_over_all_feature_maps = self._grid_anchors(grid_sizes) + return [RotatedBoxes(x) for x in anchors_over_all_feature_maps] + + +def build_anchor_generator(cfg, input_shape): + """ + Built an anchor generator from `cfg.MODEL.ANCHOR_GENERATOR.NAME`. + """ + anchor_generator = cfg.MODEL.ANCHOR_GENERATOR.NAME + return ANCHOR_GENERATOR_REGISTRY.get(anchor_generator)(cfg, input_shape) diff --git a/preprocess/mhp_extension/detectron2/detectron2/modeling/backbone/__init__.py b/preprocess/mhp_extension/detectron2/detectron2/modeling/backbone/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d477fb1e596f77b4c24f2b2c66b528bf2f83b00e --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/modeling/backbone/__init__.py @@ -0,0 +1,9 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +from .build import build_backbone, BACKBONE_REGISTRY # noqa F401 isort:skip + +from .backbone import Backbone +from .fpn import FPN +from .resnet import ResNet, ResNetBlockBase, build_resnet_backbone, make_stage + +__all__ = [k for k in globals().keys() if not k.startswith("_")] +# TODO can expose more resnet blocks after careful consideration diff --git a/preprocess/mhp_extension/detectron2/detectron2/modeling/backbone/backbone.py b/preprocess/mhp_extension/detectron2/detectron2/modeling/backbone/backbone.py new file mode 100644 index 0000000000000000000000000000000000000000..66dee4a6565e6c45ed17d0880fcc37eac8f75c3a --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/modeling/backbone/backbone.py @@ -0,0 +1,53 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +from abc import ABCMeta, abstractmethod +import torch.nn as nn + +from detectron2.layers import ShapeSpec + +__all__ = ["Backbone"] + + +class Backbone(nn.Module, metaclass=ABCMeta): + """ + Abstract base class for network backbones. + """ + + def __init__(self): + """ + The `__init__` method of any subclass can specify its own set of arguments. + """ + super().__init__() + + @abstractmethod + def forward(self): + """ + Subclasses must override this method, but adhere to the same return type. + + Returns: + dict[str->Tensor]: mapping from feature name (e.g., "res2") to tensor + """ + pass + + @property + def size_divisibility(self): + """ + Some backbones require the input height and width to be divisible by a + specific integer. This is typically true for encoder / decoder type networks + with lateral connection (e.g., FPN) for which feature maps need to match + dimension in the "bottom up" and "top down" paths. Set to 0 if no specific + input size divisibility is required. + """ + return 0 + + def output_shape(self): + """ + Returns: + dict[str->ShapeSpec] + """ + # this is a backward-compatible default + return { + name: ShapeSpec( + channels=self._out_feature_channels[name], stride=self._out_feature_strides[name] + ) + for name in self._out_features + } diff --git a/preprocess/mhp_extension/detectron2/detectron2/modeling/backbone/build.py b/preprocess/mhp_extension/detectron2/detectron2/modeling/backbone/build.py new file mode 100644 index 0000000000000000000000000000000000000000..3d2ecae783257418708b572e298a23e167dabb26 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/modeling/backbone/build.py @@ -0,0 +1,33 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +from detectron2.layers import ShapeSpec +from detectron2.utils.registry import Registry + +from .backbone import Backbone + +BACKBONE_REGISTRY = Registry("BACKBONE") +BACKBONE_REGISTRY.__doc__ = """ +Registry for backbones, which extract feature maps from images + +The registered object must be a callable that accepts two arguments: + +1. A :class:`detectron2.config.CfgNode` +2. A :class:`detectron2.layers.ShapeSpec`, which contains the input shape specification. + +It must returns an instance of :class:`Backbone`. +""" + + +def build_backbone(cfg, input_shape=None): + """ + Build a backbone from `cfg.MODEL.BACKBONE.NAME`. + + Returns: + an instance of :class:`Backbone` + """ + if input_shape is None: + input_shape = ShapeSpec(channels=len(cfg.MODEL.PIXEL_MEAN)) + + backbone_name = cfg.MODEL.BACKBONE.NAME + backbone = BACKBONE_REGISTRY.get(backbone_name)(cfg, input_shape) + assert isinstance(backbone, Backbone) + return backbone diff --git a/preprocess/mhp_extension/detectron2/detectron2/modeling/backbone/fpn.py b/preprocess/mhp_extension/detectron2/detectron2/modeling/backbone/fpn.py new file mode 100644 index 0000000000000000000000000000000000000000..338b5f5286ce233f17aa41f50a5a0a8fb819b8d3 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/modeling/backbone/fpn.py @@ -0,0 +1,245 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import math +import fvcore.nn.weight_init as weight_init +import torch.nn.functional as F +from torch import nn + +from detectron2.layers import Conv2d, ShapeSpec, get_norm + +from .backbone import Backbone +from .build import BACKBONE_REGISTRY +from .resnet import build_resnet_backbone + +__all__ = ["build_resnet_fpn_backbone", "build_retinanet_resnet_fpn_backbone", "FPN"] + + +class FPN(Backbone): + """ + This module implements :paper:`FPN`. + It creates pyramid features built on top of some input feature maps. + """ + + def __init__( + self, bottom_up, in_features, out_channels, norm="", top_block=None, fuse_type="sum" + ): + """ + Args: + bottom_up (Backbone): module representing the bottom up subnetwork. + Must be a subclass of :class:`Backbone`. The multi-scale feature + maps generated by the bottom up network, and listed in `in_features`, + are used to generate FPN levels. + in_features (list[str]): names of the input feature maps coming + from the backbone to which FPN is attached. For example, if the + backbone produces ["res2", "res3", "res4"], any *contiguous* sublist + of these may be used; order must be from high to low resolution. + out_channels (int): number of channels in the output feature maps. + norm (str): the normalization to use. + top_block (nn.Module or None): if provided, an extra operation will + be performed on the output of the last (smallest resolution) + FPN output, and the result will extend the result list. The top_block + further downsamples the feature map. It must have an attribute + "num_levels", meaning the number of extra FPN levels added by + this block, and "in_feature", which is a string representing + its input feature (e.g., p5). + fuse_type (str): types for fusing the top down features and the lateral + ones. It can be "sum" (default), which sums up element-wise; or "avg", + which takes the element-wise mean of the two. + """ + super(FPN, self).__init__() + assert isinstance(bottom_up, Backbone) + + # Feature map strides and channels from the bottom up network (e.g. ResNet) + input_shapes = bottom_up.output_shape() + in_strides = [input_shapes[f].stride for f in in_features] + in_channels = [input_shapes[f].channels for f in in_features] + + _assert_strides_are_log2_contiguous(in_strides) + lateral_convs = [] + output_convs = [] + + use_bias = norm == "" + for idx, in_channels in enumerate(in_channels): + lateral_norm = get_norm(norm, out_channels) + output_norm = get_norm(norm, out_channels) + + lateral_conv = Conv2d( + in_channels, out_channels, kernel_size=1, bias=use_bias, norm=lateral_norm + ) + output_conv = Conv2d( + out_channels, + out_channels, + kernel_size=3, + stride=1, + padding=1, + bias=use_bias, + norm=output_norm, + ) + weight_init.c2_xavier_fill(lateral_conv) + weight_init.c2_xavier_fill(output_conv) + stage = int(math.log2(in_strides[idx])) + self.add_module("fpn_lateral{}".format(stage), lateral_conv) + self.add_module("fpn_output{}".format(stage), output_conv) + + lateral_convs.append(lateral_conv) + output_convs.append(output_conv) + # Place convs into top-down order (from low to high resolution) + # to make the top-down computation in forward clearer. + self.lateral_convs = lateral_convs[::-1] + self.output_convs = output_convs[::-1] + self.top_block = top_block + self.in_features = in_features + self.bottom_up = bottom_up + # Return feature names are "p", like ["p2", "p3", ..., "p6"] + self._out_feature_strides = {"p{}".format(int(math.log2(s))): s for s in in_strides} + # top block output feature maps. + if self.top_block is not None: + for s in range(stage, stage + self.top_block.num_levels): + self._out_feature_strides["p{}".format(s + 1)] = 2 ** (s + 1) + + self._out_features = list(self._out_feature_strides.keys()) + self._out_feature_channels = {k: out_channels for k in self._out_features} + self._size_divisibility = in_strides[-1] + assert fuse_type in {"avg", "sum"} + self._fuse_type = fuse_type + + @property + def size_divisibility(self): + return self._size_divisibility + + def forward(self, x): + """ + Args: + input (dict[str->Tensor]): mapping feature map name (e.g., "res5") to + feature map tensor for each feature level in high to low resolution order. + + Returns: + dict[str->Tensor]: + mapping from feature map name to FPN feature map tensor + in high to low resolution order. Returned feature names follow the FPN + paper convention: "p", where stage has stride = 2 ** stage e.g., + ["p2", "p3", ..., "p6"]. + """ + # Reverse feature maps into top-down order (from low to high resolution) + bottom_up_features = self.bottom_up(x) + x = [bottom_up_features[f] for f in self.in_features[::-1]] + results = [] + prev_features = self.lateral_convs[0](x[0]) + results.append(self.output_convs[0](prev_features)) + for features, lateral_conv, output_conv in zip( + x[1:], self.lateral_convs[1:], self.output_convs[1:] + ): + top_down_features = F.interpolate(prev_features, scale_factor=2, mode="nearest") + lateral_features = lateral_conv(features) + prev_features = lateral_features + top_down_features + if self._fuse_type == "avg": + prev_features /= 2 + results.insert(0, output_conv(prev_features)) + + if self.top_block is not None: + top_block_in_feature = bottom_up_features.get(self.top_block.in_feature, None) + if top_block_in_feature is None: + top_block_in_feature = results[self._out_features.index(self.top_block.in_feature)] + results.extend(self.top_block(top_block_in_feature)) + assert len(self._out_features) == len(results) + return dict(zip(self._out_features, results)) + + def output_shape(self): + return { + name: ShapeSpec( + channels=self._out_feature_channels[name], stride=self._out_feature_strides[name] + ) + for name in self._out_features + } + + +def _assert_strides_are_log2_contiguous(strides): + """ + Assert that each stride is 2x times its preceding stride, i.e. "contiguous in log2". + """ + for i, stride in enumerate(strides[1:], 1): + assert stride == 2 * strides[i - 1], "Strides {} {} are not log2 contiguous".format( + stride, strides[i - 1] + ) + + +class LastLevelMaxPool(nn.Module): + """ + This module is used in the original FPN to generate a downsampled + P6 feature from P5. + """ + + def __init__(self): + super().__init__() + self.num_levels = 1 + self.in_feature = "p5" + + def forward(self, x): + return [F.max_pool2d(x, kernel_size=1, stride=2, padding=0)] + + +class LastLevelP6P7(nn.Module): + """ + This module is used in RetinaNet to generate extra layers, P6 and P7 from + C5 feature. + """ + + def __init__(self, in_channels, out_channels, in_feature="res5"): + super().__init__() + self.num_levels = 2 + self.in_feature = in_feature + self.p6 = nn.Conv2d(in_channels, out_channels, 3, 2, 1) + self.p7 = nn.Conv2d(out_channels, out_channels, 3, 2, 1) + for module in [self.p6, self.p7]: + weight_init.c2_xavier_fill(module) + + def forward(self, c5): + p6 = self.p6(c5) + p7 = self.p7(F.relu(p6)) + return [p6, p7] + + +@BACKBONE_REGISTRY.register() +def build_resnet_fpn_backbone(cfg, input_shape: ShapeSpec): + """ + Args: + cfg: a detectron2 CfgNode + + Returns: + backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`. + """ + bottom_up = build_resnet_backbone(cfg, input_shape) + in_features = cfg.MODEL.FPN.IN_FEATURES + out_channels = cfg.MODEL.FPN.OUT_CHANNELS + backbone = FPN( + bottom_up=bottom_up, + in_features=in_features, + out_channels=out_channels, + norm=cfg.MODEL.FPN.NORM, + top_block=LastLevelMaxPool(), + fuse_type=cfg.MODEL.FPN.FUSE_TYPE, + ) + return backbone + + +@BACKBONE_REGISTRY.register() +def build_retinanet_resnet_fpn_backbone(cfg, input_shape: ShapeSpec): + """ + Args: + cfg: a detectron2 CfgNode + + Returns: + backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`. + """ + bottom_up = build_resnet_backbone(cfg, input_shape) + in_features = cfg.MODEL.FPN.IN_FEATURES + out_channels = cfg.MODEL.FPN.OUT_CHANNELS + in_channels_p6p7 = bottom_up.output_shape()["res5"].channels + backbone = FPN( + bottom_up=bottom_up, + in_features=in_features, + out_channels=out_channels, + norm=cfg.MODEL.FPN.NORM, + top_block=LastLevelP6P7(in_channels_p6p7, out_channels), + fuse_type=cfg.MODEL.FPN.FUSE_TYPE, + ) + return backbone diff --git a/preprocess/mhp_extension/detectron2/detectron2/modeling/backbone/resnet.py b/preprocess/mhp_extension/detectron2/detectron2/modeling/backbone/resnet.py new file mode 100644 index 0000000000000000000000000000000000000000..f1faae012f346166a311902826fb9e4b61e24e54 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/modeling/backbone/resnet.py @@ -0,0 +1,591 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import numpy as np +import fvcore.nn.weight_init as weight_init +import torch +import torch.nn.functional as F +from torch import nn + +from detectron2.layers import ( + CNNBlockBase, + Conv2d, + DeformConv, + ModulatedDeformConv, + ShapeSpec, + get_norm, +) + +from .backbone import Backbone +from .build import BACKBONE_REGISTRY + +__all__ = [ + "ResNetBlockBase", + "BasicBlock", + "BottleneckBlock", + "DeformBottleneckBlock", + "BasicStem", + "ResNet", + "make_stage", + "build_resnet_backbone", +] + + +ResNetBlockBase = CNNBlockBase +""" +Alias for backward compatibiltiy. +""" + + +class BasicBlock(CNNBlockBase): + """ + The basic residual block for ResNet-18 and ResNet-34 defined in :paper:`ResNet`, + with two 3x3 conv layers and a projection shortcut if needed. + """ + + def __init__(self, in_channels, out_channels, *, stride=1, norm="BN"): + """ + Args: + in_channels (int): Number of input channels. + out_channels (int): Number of output channels. + stride (int): Stride for the first conv. + norm (str or callable): normalization for all conv layers. + See :func:`layers.get_norm` for supported format. + """ + super().__init__(in_channels, out_channels, stride) + + if in_channels != out_channels: + self.shortcut = Conv2d( + in_channels, + out_channels, + kernel_size=1, + stride=stride, + bias=False, + norm=get_norm(norm, out_channels), + ) + else: + self.shortcut = None + + self.conv1 = Conv2d( + in_channels, + out_channels, + kernel_size=3, + stride=stride, + padding=1, + bias=False, + norm=get_norm(norm, out_channels), + ) + + self.conv2 = Conv2d( + out_channels, + out_channels, + kernel_size=3, + stride=1, + padding=1, + bias=False, + norm=get_norm(norm, out_channels), + ) + + for layer in [self.conv1, self.conv2, self.shortcut]: + if layer is not None: # shortcut can be None + weight_init.c2_msra_fill(layer) + + def forward(self, x): + out = self.conv1(x) + out = F.relu_(out) + out = self.conv2(out) + + if self.shortcut is not None: + shortcut = self.shortcut(x) + else: + shortcut = x + + out += shortcut + out = F.relu_(out) + return out + + +class BottleneckBlock(CNNBlockBase): + """ + The standard bottleneck residual block used by ResNet-50, 101 and 152 + defined in :paper:`ResNet`. It contains 3 conv layers with kernels + 1x1, 3x3, 1x1, and a projection shortcut if needed. + """ + + def __init__( + self, + in_channels, + out_channels, + *, + bottleneck_channels, + stride=1, + num_groups=1, + norm="BN", + stride_in_1x1=False, + dilation=1, + ): + """ + Args: + bottleneck_channels (int): number of output channels for the 3x3 + "bottleneck" conv layers. + num_groups (int): number of groups for the 3x3 conv layer. + norm (str or callable): normalization for all conv layers. + See :func:`layers.get_norm` for supported format. + stride_in_1x1 (bool): when stride>1, whether to put stride in the + first 1x1 convolution or the bottleneck 3x3 convolution. + dilation (int): the dilation rate of the 3x3 conv layer. + """ + super().__init__(in_channels, out_channels, stride) + + if in_channels != out_channels: + self.shortcut = Conv2d( + in_channels, + out_channels, + kernel_size=1, + stride=stride, + bias=False, + norm=get_norm(norm, out_channels), + ) + else: + self.shortcut = None + + # The original MSRA ResNet models have stride in the first 1x1 conv + # The subsequent fb.torch.resnet and Caffe2 ResNe[X]t implementations have + # stride in the 3x3 conv + stride_1x1, stride_3x3 = (stride, 1) if stride_in_1x1 else (1, stride) + + self.conv1 = Conv2d( + in_channels, + bottleneck_channels, + kernel_size=1, + stride=stride_1x1, + bias=False, + norm=get_norm(norm, bottleneck_channels), + ) + + self.conv2 = Conv2d( + bottleneck_channels, + bottleneck_channels, + kernel_size=3, + stride=stride_3x3, + padding=1 * dilation, + bias=False, + groups=num_groups, + dilation=dilation, + norm=get_norm(norm, bottleneck_channels), + ) + + self.conv3 = Conv2d( + bottleneck_channels, + out_channels, + kernel_size=1, + bias=False, + norm=get_norm(norm, out_channels), + ) + + for layer in [self.conv1, self.conv2, self.conv3, self.shortcut]: + if layer is not None: # shortcut can be None + weight_init.c2_msra_fill(layer) + + # Zero-initialize the last normalization in each residual branch, + # so that at the beginning, the residual branch starts with zeros, + # and each residual block behaves like an identity. + # See Sec 5.1 in "Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour": + # "For BN layers, the learnable scaling coefficient ฮณ is initialized + # to be 1, except for each residual block's last BN + # where ฮณ is initialized to be 0." + + # nn.init.constant_(self.conv3.norm.weight, 0) + # TODO this somehow hurts performance when training GN models from scratch. + # Add it as an option when we need to use this code to train a backbone. + + def forward(self, x): + out = self.conv1(x) + out = F.relu_(out) + + out = self.conv2(out) + out = F.relu_(out) + + out = self.conv3(out) + + if self.shortcut is not None: + shortcut = self.shortcut(x) + else: + shortcut = x + + out += shortcut + out = F.relu_(out) + return out + + +class DeformBottleneckBlock(ResNetBlockBase): + """ + Similar to :class:`BottleneckBlock`, but with :paper:`deformable conv ` + in the 3x3 convolution. + """ + + def __init__( + self, + in_channels, + out_channels, + *, + bottleneck_channels, + stride=1, + num_groups=1, + norm="BN", + stride_in_1x1=False, + dilation=1, + deform_modulated=False, + deform_num_groups=1, + ): + super().__init__(in_channels, out_channels, stride) + self.deform_modulated = deform_modulated + + if in_channels != out_channels: + self.shortcut = Conv2d( + in_channels, + out_channels, + kernel_size=1, + stride=stride, + bias=False, + norm=get_norm(norm, out_channels), + ) + else: + self.shortcut = None + + stride_1x1, stride_3x3 = (stride, 1) if stride_in_1x1 else (1, stride) + + self.conv1 = Conv2d( + in_channels, + bottleneck_channels, + kernel_size=1, + stride=stride_1x1, + bias=False, + norm=get_norm(norm, bottleneck_channels), + ) + + if deform_modulated: + deform_conv_op = ModulatedDeformConv + # offset channels are 2 or 3 (if with modulated) * kernel_size * kernel_size + offset_channels = 27 + else: + deform_conv_op = DeformConv + offset_channels = 18 + + self.conv2_offset = Conv2d( + bottleneck_channels, + offset_channels * deform_num_groups, + kernel_size=3, + stride=stride_3x3, + padding=1 * dilation, + dilation=dilation, + ) + self.conv2 = deform_conv_op( + bottleneck_channels, + bottleneck_channels, + kernel_size=3, + stride=stride_3x3, + padding=1 * dilation, + bias=False, + groups=num_groups, + dilation=dilation, + deformable_groups=deform_num_groups, + norm=get_norm(norm, bottleneck_channels), + ) + + self.conv3 = Conv2d( + bottleneck_channels, + out_channels, + kernel_size=1, + bias=False, + norm=get_norm(norm, out_channels), + ) + + for layer in [self.conv1, self.conv2, self.conv3, self.shortcut]: + if layer is not None: # shortcut can be None + weight_init.c2_msra_fill(layer) + + nn.init.constant_(self.conv2_offset.weight, 0) + nn.init.constant_(self.conv2_offset.bias, 0) + + def forward(self, x): + out = self.conv1(x) + out = F.relu_(out) + + if self.deform_modulated: + offset_mask = self.conv2_offset(out) + offset_x, offset_y, mask = torch.chunk(offset_mask, 3, dim=1) + offset = torch.cat((offset_x, offset_y), dim=1) + mask = mask.sigmoid() + out = self.conv2(out, offset, mask) + else: + offset = self.conv2_offset(out) + out = self.conv2(out, offset) + out = F.relu_(out) + + out = self.conv3(out) + + if self.shortcut is not None: + shortcut = self.shortcut(x) + else: + shortcut = x + + out += shortcut + out = F.relu_(out) + return out + + +def make_stage(block_class, num_blocks, first_stride, *, in_channels, out_channels, **kwargs): + """ + Create a list of blocks just like those in a ResNet stage. + + Args: + block_class (type): a subclass of ResNetBlockBase + num_blocks (int): + first_stride (int): the stride of the first block. The other blocks will have stride=1. + in_channels (int): input channels of the entire stage. + out_channels (int): output channels of **every block** in the stage. + kwargs: other arguments passed to the constructor of every block. + + Returns: + list[nn.Module]: a list of block module. + """ + assert "stride" not in kwargs, "Stride of blocks in make_stage cannot be changed." + blocks = [] + for i in range(num_blocks): + blocks.append( + block_class( + in_channels=in_channels, + out_channels=out_channels, + stride=first_stride if i == 0 else 1, + **kwargs, + ) + ) + in_channels = out_channels + return blocks + + +class BasicStem(CNNBlockBase): + """ + The standard ResNet stem (layers before the first residual block). + """ + + def __init__(self, in_channels=3, out_channels=64, norm="BN"): + """ + Args: + norm (str or callable): norm after the first conv layer. + See :func:`layers.get_norm` for supported format. + """ + super().__init__(in_channels, out_channels, 4) + self.in_channels = in_channels + self.conv1 = Conv2d( + in_channels, + out_channels, + kernel_size=7, + stride=2, + padding=3, + bias=False, + norm=get_norm(norm, out_channels), + ) + weight_init.c2_msra_fill(self.conv1) + + def forward(self, x): + x = self.conv1(x) + x = F.relu_(x) + x = F.max_pool2d(x, kernel_size=3, stride=2, padding=1) + return x + + +class ResNet(Backbone): + """ + Implement :paper:`ResNet`. + """ + + def __init__(self, stem, stages, num_classes=None, out_features=None): + """ + Args: + stem (nn.Module): a stem module + stages (list[list[CNNBlockBase]]): several (typically 4) stages, + each contains multiple :class:`CNNBlockBase`. + num_classes (None or int): if None, will not perform classification. + Otherwise, will create a linear layer. + out_features (list[str]): name of the layers whose outputs should + be returned in forward. Can be anything in "stem", "linear", or "res2" ... + If None, will return the output of the last layer. + """ + super(ResNet, self).__init__() + self.stem = stem + self.num_classes = num_classes + + current_stride = self.stem.stride + self._out_feature_strides = {"stem": current_stride} + self._out_feature_channels = {"stem": self.stem.out_channels} + + self.stages_and_names = [] + for i, blocks in enumerate(stages): + assert len(blocks) > 0, len(blocks) + for block in blocks: + assert isinstance(block, CNNBlockBase), block + + name = "res" + str(i + 2) + stage = nn.Sequential(*blocks) + + self.add_module(name, stage) + self.stages_and_names.append((stage, name)) + + self._out_feature_strides[name] = current_stride = int( + current_stride * np.prod([k.stride for k in blocks]) + ) + self._out_feature_channels[name] = curr_channels = blocks[-1].out_channels + + if num_classes is not None: + self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) + self.linear = nn.Linear(curr_channels, num_classes) + + # Sec 5.1 in "Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour": + # "The 1000-way fully-connected layer is initialized by + # drawing weights from a zero-mean Gaussian with standard deviation of 0.01." + nn.init.normal_(self.linear.weight, std=0.01) + name = "linear" + + if out_features is None: + out_features = [name] + self._out_features = out_features + assert len(self._out_features) + children = [x[0] for x in self.named_children()] + for out_feature in self._out_features: + assert out_feature in children, "Available children: {}".format(", ".join(children)) + + def forward(self, x): + outputs = {} + x = self.stem(x) + if "stem" in self._out_features: + outputs["stem"] = x + for stage, name in self.stages_and_names: + x = stage(x) + if name in self._out_features: + outputs[name] = x + if self.num_classes is not None: + x = self.avgpool(x) + x = torch.flatten(x, 1) + x = self.linear(x) + if "linear" in self._out_features: + outputs["linear"] = x + return outputs + + def output_shape(self): + return { + name: ShapeSpec( + channels=self._out_feature_channels[name], stride=self._out_feature_strides[name] + ) + for name in self._out_features + } + + def freeze(self, freeze_at=0): + """ + Freeze the first several stages of the ResNet. Commonly used in + fine-tuning. + + Layers that produce the same feature map spatial size are defined as one + "stage" by :paper:`FPN`. + + Args: + freeze_at (int): number of stages to freeze. + `1` means freezing the stem. `2` means freezing the stem and + one residual stage, etc. + + Returns: + nn.Module: this ResNet itself + """ + if freeze_at >= 1: + self.stem.freeze() + for idx, (stage, _) in enumerate(self.stages_and_names, start=2): + if freeze_at >= idx: + for block in stage.children(): + block.freeze() + return self + + +@BACKBONE_REGISTRY.register() +def build_resnet_backbone(cfg, input_shape): + """ + Create a ResNet instance from config. + + Returns: + ResNet: a :class:`ResNet` instance. + """ + # need registration of new blocks/stems? + norm = cfg.MODEL.RESNETS.NORM + stem = BasicStem( + in_channels=input_shape.channels, + out_channels=cfg.MODEL.RESNETS.STEM_OUT_CHANNELS, + norm=norm, + ) + + # fmt: off + freeze_at = cfg.MODEL.BACKBONE.FREEZE_AT + out_features = cfg.MODEL.RESNETS.OUT_FEATURES + depth = cfg.MODEL.RESNETS.DEPTH + num_groups = cfg.MODEL.RESNETS.NUM_GROUPS + width_per_group = cfg.MODEL.RESNETS.WIDTH_PER_GROUP + bottleneck_channels = num_groups * width_per_group + in_channels = cfg.MODEL.RESNETS.STEM_OUT_CHANNELS + out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS + stride_in_1x1 = cfg.MODEL.RESNETS.STRIDE_IN_1X1 + res5_dilation = cfg.MODEL.RESNETS.RES5_DILATION + deform_on_per_stage = cfg.MODEL.RESNETS.DEFORM_ON_PER_STAGE + deform_modulated = cfg.MODEL.RESNETS.DEFORM_MODULATED + deform_num_groups = cfg.MODEL.RESNETS.DEFORM_NUM_GROUPS + # fmt: on + assert res5_dilation in {1, 2}, "res5_dilation cannot be {}.".format(res5_dilation) + + num_blocks_per_stage = { + 18: [2, 2, 2, 2], + 34: [3, 4, 6, 3], + 50: [3, 4, 6, 3], + 101: [3, 4, 23, 3], + 152: [3, 8, 36, 3], + }[depth] + + if depth in [18, 34]: + assert out_channels == 64, "Must set MODEL.RESNETS.RES2_OUT_CHANNELS = 64 for R18/R34" + assert not any( + deform_on_per_stage + ), "MODEL.RESNETS.DEFORM_ON_PER_STAGE unsupported for R18/R34" + assert res5_dilation == 1, "Must set MODEL.RESNETS.RES5_DILATION = 1 for R18/R34" + assert num_groups == 1, "Must set MODEL.RESNETS.NUM_GROUPS = 1 for R18/R34" + + stages = [] + + # Avoid creating variables without gradients + # It consumes extra memory and may cause allreduce to fail + out_stage_idx = [{"res2": 2, "res3": 3, "res4": 4, "res5": 5}[f] for f in out_features] + max_stage_idx = max(out_stage_idx) + for idx, stage_idx in enumerate(range(2, max_stage_idx + 1)): + dilation = res5_dilation if stage_idx == 5 else 1 + first_stride = 1 if idx == 0 or (stage_idx == 5 and dilation == 2) else 2 + stage_kargs = { + "num_blocks": num_blocks_per_stage[idx], + "first_stride": first_stride, + "in_channels": in_channels, + "out_channels": out_channels, + "norm": norm, + } + # Use BasicBlock for R18 and R34. + if depth in [18, 34]: + stage_kargs["block_class"] = BasicBlock + else: + stage_kargs["bottleneck_channels"] = bottleneck_channels + stage_kargs["stride_in_1x1"] = stride_in_1x1 + stage_kargs["dilation"] = dilation + stage_kargs["num_groups"] = num_groups + if deform_on_per_stage[idx]: + stage_kargs["block_class"] = DeformBottleneckBlock + stage_kargs["deform_modulated"] = deform_modulated + stage_kargs["deform_num_groups"] = deform_num_groups + else: + stage_kargs["block_class"] = BottleneckBlock + blocks = make_stage(**stage_kargs) + in_channels = out_channels + out_channels *= 2 + bottleneck_channels *= 2 + stages.append(blocks) + return ResNet(stem, stages, out_features=out_features).freeze(freeze_at) diff --git a/preprocess/mhp_extension/detectron2/detectron2/modeling/box_regression.py b/preprocess/mhp_extension/detectron2/detectron2/modeling/box_regression.py new file mode 100644 index 0000000000000000000000000000000000000000..88426fddf36812f33def8fb434bebce53db3a4b4 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/modeling/box_regression.py @@ -0,0 +1,247 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import math +from typing import Tuple +import torch + +# Value for clamping large dw and dh predictions. The heuristic is that we clamp +# such that dw and dh are no larger than what would transform a 16px box into a +# 1000px box (based on a small anchor, 16px, and a typical image size, 1000px). +_DEFAULT_SCALE_CLAMP = math.log(1000.0 / 16) + + +__all__ = ["Box2BoxTransform", "Box2BoxTransformRotated"] + + +def apply_deltas_broadcast(box2box_transform, deltas, boxes): + """ + Apply transform deltas to boxes. Similar to `box2box_transform.apply_deltas`, + but allow broadcasting boxes when the second dimension of deltas is a multiple + of box dimension. + + Args: + box2box_transform (Box2BoxTransform or Box2BoxTransformRotated): the transform to apply + deltas (Tensor): tensor of shape (N,B) or (N,KxB) + boxes (Tensor): tensor of shape (N,B) + + Returns: + Tensor: same shape as deltas. + """ + assert deltas.dim() == boxes.dim() == 2, f"{deltas.shape}, {boxes.shape}" + N, B = boxes.shape + assert ( + deltas.shape[1] % B == 0 + ), f"Second dim of deltas should be a multiple of {B}. Got {deltas.shape}" + K = deltas.shape[1] // B + ret = box2box_transform.apply_deltas( + deltas.view(N * K, B), boxes.unsqueeze(1).expand(N, K, B).reshape(N * K, B) + ) + return ret.view(N, K * B) + + +@torch.jit.script +class Box2BoxTransform(object): + """ + The box-to-box transform defined in R-CNN. The transformation is parameterized + by 4 deltas: (dx, dy, dw, dh). The transformation scales the box's width and height + by exp(dw), exp(dh) and shifts a box's center by the offset (dx * width, dy * height). + """ + + def __init__( + self, weights: Tuple[float, float, float, float], scale_clamp: float = _DEFAULT_SCALE_CLAMP + ): + """ + Args: + weights (4-element tuple): Scaling factors that are applied to the + (dx, dy, dw, dh) deltas. In Fast R-CNN, these were originally set + such that the deltas have unit variance; now they are treated as + hyperparameters of the system. + scale_clamp (float): When predicting deltas, the predicted box scaling + factors (dw and dh) are clamped such that they are <= scale_clamp. + """ + self.weights = weights + self.scale_clamp = scale_clamp + + def get_deltas(self, src_boxes, target_boxes): + """ + Get box regression transformation deltas (dx, dy, dw, dh) that can be used + to transform the `src_boxes` into the `target_boxes`. That is, the relation + ``target_boxes == self.apply_deltas(deltas, src_boxes)`` is true (unless + any delta is too large and is clamped). + + Args: + src_boxes (Tensor): source boxes, e.g., object proposals + target_boxes (Tensor): target of the transformation, e.g., ground-truth + boxes. + """ + assert isinstance(src_boxes, torch.Tensor), type(src_boxes) + assert isinstance(target_boxes, torch.Tensor), type(target_boxes) + + src_widths = src_boxes[:, 2] - src_boxes[:, 0] + src_heights = src_boxes[:, 3] - src_boxes[:, 1] + src_ctr_x = src_boxes[:, 0] + 0.5 * src_widths + src_ctr_y = src_boxes[:, 1] + 0.5 * src_heights + + target_widths = target_boxes[:, 2] - target_boxes[:, 0] + target_heights = target_boxes[:, 3] - target_boxes[:, 1] + target_ctr_x = target_boxes[:, 0] + 0.5 * target_widths + target_ctr_y = target_boxes[:, 1] + 0.5 * target_heights + + wx, wy, ww, wh = self.weights + dx = wx * (target_ctr_x - src_ctr_x) / src_widths + dy = wy * (target_ctr_y - src_ctr_y) / src_heights + dw = ww * torch.log(target_widths / src_widths) + dh = wh * torch.log(target_heights / src_heights) + + deltas = torch.stack((dx, dy, dw, dh), dim=1) + assert (src_widths > 0).all().item(), "Input boxes to Box2BoxTransform are not valid!" + return deltas + + def apply_deltas(self, deltas, boxes): + """ + Apply transformation `deltas` (dx, dy, dw, dh) to `boxes`. + + Args: + deltas (Tensor): transformation deltas of shape (N, k*4), where k >= 1. + deltas[i] represents k potentially different class-specific + box transformations for the single box boxes[i]. + boxes (Tensor): boxes to transform, of shape (N, 4) + """ + boxes = boxes.to(deltas.dtype) + + widths = boxes[:, 2] - boxes[:, 0] + heights = boxes[:, 3] - boxes[:, 1] + ctr_x = boxes[:, 0] + 0.5 * widths + ctr_y = boxes[:, 1] + 0.5 * heights + + wx, wy, ww, wh = self.weights + dx = deltas[:, 0::4] / wx + dy = deltas[:, 1::4] / wy + dw = deltas[:, 2::4] / ww + dh = deltas[:, 3::4] / wh + + # Prevent sending too large values into torch.exp() + dw = torch.clamp(dw, max=self.scale_clamp) + dh = torch.clamp(dh, max=self.scale_clamp) + + pred_ctr_x = dx * widths[:, None] + ctr_x[:, None] + pred_ctr_y = dy * heights[:, None] + ctr_y[:, None] + pred_w = torch.exp(dw) * widths[:, None] + pred_h = torch.exp(dh) * heights[:, None] + + pred_boxes = torch.zeros_like(deltas) + pred_boxes[:, 0::4] = pred_ctr_x - 0.5 * pred_w # x1 + pred_boxes[:, 1::4] = pred_ctr_y - 0.5 * pred_h # y1 + pred_boxes[:, 2::4] = pred_ctr_x + 0.5 * pred_w # x2 + pred_boxes[:, 3::4] = pred_ctr_y + 0.5 * pred_h # y2 + return pred_boxes + + +@torch.jit.script +class Box2BoxTransformRotated(object): + """ + The box-to-box transform defined in Rotated R-CNN. The transformation is parameterized + by 5 deltas: (dx, dy, dw, dh, da). The transformation scales the box's width and height + by exp(dw), exp(dh), shifts a box's center by the offset (dx * width, dy * height), + and rotate a box's angle by da (radians). + Note: angles of deltas are in radians while angles of boxes are in degrees. + """ + + def __init__( + self, + weights: Tuple[float, float, float, float, float], + scale_clamp: float = _DEFAULT_SCALE_CLAMP, + ): + """ + Args: + weights (5-element tuple): Scaling factors that are applied to the + (dx, dy, dw, dh, da) deltas. These are treated as + hyperparameters of the system. + scale_clamp (float): When predicting deltas, the predicted box scaling + factors (dw and dh) are clamped such that they are <= scale_clamp. + """ + self.weights = weights + self.scale_clamp = scale_clamp + + def get_deltas(self, src_boxes, target_boxes): + """ + Get box regression transformation deltas (dx, dy, dw, dh, da) that can be used + to transform the `src_boxes` into the `target_boxes`. That is, the relation + ``target_boxes == self.apply_deltas(deltas, src_boxes)`` is true (unless + any delta is too large and is clamped). + + Args: + src_boxes (Tensor): Nx5 source boxes, e.g., object proposals + target_boxes (Tensor): Nx5 target of the transformation, e.g., ground-truth + boxes. + """ + assert isinstance(src_boxes, torch.Tensor), type(src_boxes) + assert isinstance(target_boxes, torch.Tensor), type(target_boxes) + + src_ctr_x, src_ctr_y, src_widths, src_heights, src_angles = torch.unbind(src_boxes, dim=1) + + target_ctr_x, target_ctr_y, target_widths, target_heights, target_angles = torch.unbind( + target_boxes, dim=1 + ) + + wx, wy, ww, wh, wa = self.weights + dx = wx * (target_ctr_x - src_ctr_x) / src_widths + dy = wy * (target_ctr_y - src_ctr_y) / src_heights + dw = ww * torch.log(target_widths / src_widths) + dh = wh * torch.log(target_heights / src_heights) + # Angles of deltas are in radians while angles of boxes are in degrees. + # the conversion to radians serve as a way to normalize the values + da = target_angles - src_angles + da = (da + 180.0) % 360.0 - 180.0 # make it in [-180, 180) + da *= wa * math.pi / 180.0 + + deltas = torch.stack((dx, dy, dw, dh, da), dim=1) + assert ( + (src_widths > 0).all().item() + ), "Input boxes to Box2BoxTransformRotated are not valid!" + return deltas + + def apply_deltas(self, deltas, boxes): + """ + Apply transformation `deltas` (dx, dy, dw, dh, da) to `boxes`. + + Args: + deltas (Tensor): transformation deltas of shape (N, 5). + deltas[i] represents box transformation for the single box boxes[i]. + boxes (Tensor): boxes to transform, of shape (N, 5) + """ + assert deltas.shape[1] == 5 and boxes.shape[1] == 5 + + boxes = boxes.to(deltas.dtype) + + ctr_x = boxes[:, 0] + ctr_y = boxes[:, 1] + widths = boxes[:, 2] + heights = boxes[:, 3] + angles = boxes[:, 4] + + wx, wy, ww, wh, wa = self.weights + + dx = deltas[:, 0] / wx + dy = deltas[:, 1] / wy + dw = deltas[:, 2] / ww + dh = deltas[:, 3] / wh + da = deltas[:, 4] / wa + + # Prevent sending too large values into torch.exp() + dw = torch.clamp(dw, max=self.scale_clamp) + dh = torch.clamp(dh, max=self.scale_clamp) + + pred_boxes = torch.zeros_like(deltas) + pred_boxes[:, 0] = dx * widths + ctr_x # x_ctr + pred_boxes[:, 1] = dy * heights + ctr_y # y_ctr + pred_boxes[:, 2] = torch.exp(dw) * widths # width + pred_boxes[:, 3] = torch.exp(dh) * heights # height + + # Following original RRPN implementation, + # angles of deltas are in radians while angles of boxes are in degrees. + pred_angle = da * 180.0 / math.pi + angles + pred_angle = (pred_angle + 180.0) % 360.0 - 180.0 # make it in [-180, 180) + + pred_boxes[:, 4] = pred_angle + + return pred_boxes diff --git a/preprocess/mhp_extension/detectron2/detectron2/modeling/matcher.py b/preprocess/mhp_extension/detectron2/detectron2/modeling/matcher.py new file mode 100644 index 0000000000000000000000000000000000000000..2911f8c1937749dec4dbe64aa3e8491a631e03f2 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/modeling/matcher.py @@ -0,0 +1,123 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +from typing import List +import torch + + +class Matcher(object): + """ + This class assigns to each predicted "element" (e.g., a box) a ground-truth + element. Each predicted element will have exactly zero or one matches; each + ground-truth element may be matched to zero or more predicted elements. + + The matching is determined by the MxN match_quality_matrix, that characterizes + how well each (ground-truth, prediction)-pair match each other. For example, + if the elements are boxes, this matrix may contain box intersection-over-union + overlap values. + + The matcher returns (a) a vector of length N containing the index of the + ground-truth element m in [0, M) that matches to prediction n in [0, N). + (b) a vector of length N containing the labels for each prediction. + """ + + def __init__( + self, thresholds: List[float], labels: List[int], allow_low_quality_matches: bool = False + ): + """ + Args: + thresholds (list): a list of thresholds used to stratify predictions + into levels. + labels (list): a list of values to label predictions belonging at + each level. A label can be one of {-1, 0, 1} signifying + {ignore, negative class, positive class}, respectively. + allow_low_quality_matches (bool): if True, produce additional matches + for predictions with maximum match quality lower than high_threshold. + See set_low_quality_matches_ for more details. + + For example, + thresholds = [0.3, 0.5] + labels = [0, -1, 1] + All predictions with iou < 0.3 will be marked with 0 and + thus will be considered as false positives while training. + All predictions with 0.3 <= iou < 0.5 will be marked with -1 and + thus will be ignored. + All predictions with 0.5 <= iou will be marked with 1 and + thus will be considered as true positives. + """ + # Add -inf and +inf to first and last position in thresholds + thresholds = thresholds[:] + assert thresholds[0] > 0 + thresholds.insert(0, -float("inf")) + thresholds.append(float("inf")) + assert all(low <= high for (low, high) in zip(thresholds[:-1], thresholds[1:])) + assert all(l in [-1, 0, 1] for l in labels) + assert len(labels) == len(thresholds) - 1 + self.thresholds = thresholds + self.labels = labels + self.allow_low_quality_matches = allow_low_quality_matches + + def __call__(self, match_quality_matrix): + """ + Args: + match_quality_matrix (Tensor[float]): an MxN tensor, containing the + pairwise quality between M ground-truth elements and N predicted + elements. All elements must be >= 0 (due to the us of `torch.nonzero` + for selecting indices in :meth:`set_low_quality_matches_`). + + Returns: + matches (Tensor[int64]): a vector of length N, where matches[i] is a matched + ground-truth index in [0, M) + match_labels (Tensor[int8]): a vector of length N, where pred_labels[i] indicates + whether a prediction is a true or false positive or ignored + """ + assert match_quality_matrix.dim() == 2 + if match_quality_matrix.numel() == 0: + default_matches = match_quality_matrix.new_full( + (match_quality_matrix.size(1),), 0, dtype=torch.int64 + ) + # When no gt boxes exist, we define IOU = 0 and therefore set labels + # to `self.labels[0]`, which usually defaults to background class 0 + # To choose to ignore instead, can make labels=[-1,0,-1,1] + set appropriate thresholds + default_match_labels = match_quality_matrix.new_full( + (match_quality_matrix.size(1),), self.labels[0], dtype=torch.int8 + ) + return default_matches, default_match_labels + + assert torch.all(match_quality_matrix >= 0) + + # match_quality_matrix is M (gt) x N (predicted) + # Max over gt elements (dim 0) to find best gt candidate for each prediction + matched_vals, matches = match_quality_matrix.max(dim=0) + + match_labels = matches.new_full(matches.size(), 1, dtype=torch.int8) + + for (l, low, high) in zip(self.labels, self.thresholds[:-1], self.thresholds[1:]): + low_high = (matched_vals >= low) & (matched_vals < high) + match_labels[low_high] = l + + if self.allow_low_quality_matches: + self.set_low_quality_matches_(match_labels, match_quality_matrix) + + return matches, match_labels + + def set_low_quality_matches_(self, match_labels, match_quality_matrix): + """ + Produce additional matches for predictions that have only low-quality matches. + Specifically, for each ground-truth G find the set of predictions that have + maximum overlap with it (including ties); for each prediction in that set, if + it is unmatched, then match it to the ground-truth G. + + This function implements the RPN assignment case (i) in Sec. 3.1.2 of + :paper:`Faster R-CNN`. + """ + # For each gt, find the prediction with which it has highest quality + highest_quality_foreach_gt, _ = match_quality_matrix.max(dim=1) + # Find the highest quality match available, even if it is low, including ties. + # Note that the matches qualities must be positive due to the use of + # `torch.nonzero`. + _, pred_inds_with_highest_quality = torch.nonzero( + match_quality_matrix == highest_quality_foreach_gt[:, None], as_tuple=True + ) + # If an anchor was labeled positive only due to a low-quality match + # with gt_A, but it has larger overlap with gt_B, it's matched index will still be gt_B. + # This follows the implementation in Detectron, and is found to have no significant impact. + match_labels[pred_inds_with_highest_quality] = 1 diff --git a/preprocess/mhp_extension/detectron2/detectron2/modeling/meta_arch/__init__.py b/preprocess/mhp_extension/detectron2/detectron2/modeling/meta_arch/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..96ef9b582c2ed38525102ebb589a750cf6b9fa54 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/modeling/meta_arch/__init__.py @@ -0,0 +1,11 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +from .build import META_ARCH_REGISTRY, build_model # isort:skip + +from .panoptic_fpn import PanopticFPN + +# import all the meta_arch, so they will be registered +from .rcnn import GeneralizedRCNN, ProposalNetwork +from .retinanet import RetinaNet +from .semantic_seg import SEM_SEG_HEADS_REGISTRY, SemanticSegmentor, build_sem_seg_head diff --git a/preprocess/mhp_extension/detectron2/detectron2/modeling/meta_arch/build.py b/preprocess/mhp_extension/detectron2/detectron2/modeling/meta_arch/build.py new file mode 100644 index 0000000000000000000000000000000000000000..630389dfca822f295447abd5e8424186d02e0465 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/modeling/meta_arch/build.py @@ -0,0 +1,23 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import torch + +from detectron2.utils.registry import Registry + +META_ARCH_REGISTRY = Registry("META_ARCH") # noqa F401 isort:skip +META_ARCH_REGISTRY.__doc__ = """ +Registry for meta-architectures, i.e. the whole model. + +The registered object will be called with `obj(cfg)` +and expected to return a `nn.Module` object. +""" + + +def build_model(cfg): + """ + Build the whole model architecture, defined by ``cfg.MODEL.META_ARCHITECTURE``. + Note that it does not load any weights from ``cfg``. + """ + meta_arch = cfg.MODEL.META_ARCHITECTURE + model = META_ARCH_REGISTRY.get(meta_arch)(cfg) + model.to(torch.device(cfg.MODEL.DEVICE)) + return model diff --git a/preprocess/mhp_extension/detectron2/detectron2/modeling/meta_arch/panoptic_fpn.py b/preprocess/mhp_extension/detectron2/detectron2/modeling/meta_arch/panoptic_fpn.py new file mode 100644 index 0000000000000000000000000000000000000000..c5f92f701f2da3aff6602ad2388307874102fc5c --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/modeling/meta_arch/panoptic_fpn.py @@ -0,0 +1,218 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +import torch +from torch import nn + +from detectron2.structures import ImageList + +from ..backbone import build_backbone +from ..postprocessing import detector_postprocess, sem_seg_postprocess +from ..proposal_generator import build_proposal_generator +from ..roi_heads import build_roi_heads +from .build import META_ARCH_REGISTRY +from .semantic_seg import build_sem_seg_head + +__all__ = ["PanopticFPN"] + + +@META_ARCH_REGISTRY.register() +class PanopticFPN(nn.Module): + """ + Implement the paper :paper:`PanopticFPN`. + """ + + def __init__(self, cfg): + super().__init__() + + self.instance_loss_weight = cfg.MODEL.PANOPTIC_FPN.INSTANCE_LOSS_WEIGHT + + # options when combining instance & semantic outputs + self.combine_on = cfg.MODEL.PANOPTIC_FPN.COMBINE.ENABLED + self.combine_overlap_threshold = cfg.MODEL.PANOPTIC_FPN.COMBINE.OVERLAP_THRESH + self.combine_stuff_area_limit = cfg.MODEL.PANOPTIC_FPN.COMBINE.STUFF_AREA_LIMIT + self.combine_instances_confidence_threshold = ( + cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH + ) + + self.backbone = build_backbone(cfg) + self.proposal_generator = build_proposal_generator(cfg, self.backbone.output_shape()) + self.roi_heads = build_roi_heads(cfg, self.backbone.output_shape()) + self.sem_seg_head = build_sem_seg_head(cfg, self.backbone.output_shape()) + + self.register_buffer("pixel_mean", torch.Tensor(cfg.MODEL.PIXEL_MEAN).view(-1, 1, 1)) + self.register_buffer("pixel_std", torch.Tensor(cfg.MODEL.PIXEL_STD).view(-1, 1, 1)) + + @property + def device(self): + return self.pixel_mean.device + + def forward(self, batched_inputs): + """ + Args: + batched_inputs: a list, batched outputs of :class:`DatasetMapper`. + Each item in the list contains the inputs for one image. + + For now, each item in the list is a dict that contains: + + * "image": Tensor, image in (C, H, W) format. + * "instances": Instances + * "sem_seg": semantic segmentation ground truth. + * Other information that's included in the original dicts, such as: + "height", "width" (int): the output resolution of the model, used in inference. + See :meth:`postprocess` for details. + + Returns: + list[dict]: + each dict is the results for one image. The dict contains the following keys: + + * "instances": see :meth:`GeneralizedRCNN.forward` for its format. + * "sem_seg": see :meth:`SemanticSegmentor.forward` for its format. + * "panoptic_seg": available when `PANOPTIC_FPN.COMBINE.ENABLED`. + See the return value of + :func:`combine_semantic_and_instance_outputs` for its format. + """ + images = [x["image"].to(self.device) for x in batched_inputs] + images = [(x - self.pixel_mean) / self.pixel_std for x in images] + images = ImageList.from_tensors(images, self.backbone.size_divisibility) + features = self.backbone(images.tensor) + + if "proposals" in batched_inputs[0]: + proposals = [x["proposals"].to(self.device) for x in batched_inputs] + proposal_losses = {} + + if "sem_seg" in batched_inputs[0]: + gt_sem_seg = [x["sem_seg"].to(self.device) for x in batched_inputs] + gt_sem_seg = ImageList.from_tensors( + gt_sem_seg, self.backbone.size_divisibility, self.sem_seg_head.ignore_value + ).tensor + else: + gt_sem_seg = None + sem_seg_results, sem_seg_losses = self.sem_seg_head(features, gt_sem_seg) + + if "instances" in batched_inputs[0]: + gt_instances = [x["instances"].to(self.device) for x in batched_inputs] + else: + gt_instances = None + if self.proposal_generator: + proposals, proposal_losses = self.proposal_generator(images, features, gt_instances) + detector_results, detector_losses = self.roi_heads( + images, features, proposals, gt_instances + ) + + if self.training: + losses = {} + losses.update(sem_seg_losses) + losses.update({k: v * self.instance_loss_weight for k, v in detector_losses.items()}) + losses.update(proposal_losses) + return losses + + processed_results = [] + for sem_seg_result, detector_result, input_per_image, image_size in zip( + sem_seg_results, detector_results, batched_inputs, images.image_sizes + ): + height = input_per_image.get("height", image_size[0]) + width = input_per_image.get("width", image_size[1]) + sem_seg_r = sem_seg_postprocess(sem_seg_result, image_size, height, width) + detector_r = detector_postprocess(detector_result, height, width) + + processed_results.append({"sem_seg": sem_seg_r, "instances": detector_r}) + + if self.combine_on: + panoptic_r = combine_semantic_and_instance_outputs( + detector_r, + sem_seg_r.argmax(dim=0), + self.combine_overlap_threshold, + self.combine_stuff_area_limit, + self.combine_instances_confidence_threshold, + ) + processed_results[-1]["panoptic_seg"] = panoptic_r + return processed_results + + +def combine_semantic_and_instance_outputs( + instance_results, + semantic_results, + overlap_threshold, + stuff_area_limit, + instances_confidence_threshold, +): + """ + Implement a simple combining logic following + "combine_semantic_and_instance_predictions.py" in panopticapi + to produce panoptic segmentation outputs. + + Args: + instance_results: output of :func:`detector_postprocess`. + semantic_results: an (H, W) tensor, each is the contiguous semantic + category id + + Returns: + panoptic_seg (Tensor): of shape (height, width) where the values are ids for each segment. + segments_info (list[dict]): Describe each segment in `panoptic_seg`. + Each dict contains keys "id", "category_id", "isthing". + """ + panoptic_seg = torch.zeros_like(semantic_results, dtype=torch.int32) + + # sort instance outputs by scores + sorted_inds = torch.argsort(-instance_results.scores) + + current_segment_id = 0 + segments_info = [] + + instance_masks = instance_results.pred_masks.to(dtype=torch.bool, device=panoptic_seg.device) + + # Add instances one-by-one, check for overlaps with existing ones + for inst_id in sorted_inds: + score = instance_results.scores[inst_id].item() + if score < instances_confidence_threshold: + break + mask = instance_masks[inst_id] # H,W + mask_area = mask.sum().item() + + if mask_area == 0: + continue + + intersect = (mask > 0) & (panoptic_seg > 0) + intersect_area = intersect.sum().item() + + if intersect_area * 1.0 / mask_area > overlap_threshold: + continue + + if intersect_area > 0: + mask = mask & (panoptic_seg == 0) + + current_segment_id += 1 + panoptic_seg[mask] = current_segment_id + segments_info.append( + { + "id": current_segment_id, + "isthing": True, + "score": score, + "category_id": instance_results.pred_classes[inst_id].item(), + "instance_id": inst_id.item(), + } + ) + + # Add semantic results to remaining empty areas + semantic_labels = torch.unique(semantic_results).cpu().tolist() + for semantic_label in semantic_labels: + if semantic_label == 0: # 0 is a special "thing" class + continue + mask = (semantic_results == semantic_label) & (panoptic_seg == 0) + mask_area = mask.sum().item() + if mask_area < stuff_area_limit: + continue + + current_segment_id += 1 + panoptic_seg[mask] = current_segment_id + segments_info.append( + { + "id": current_segment_id, + "isthing": False, + "category_id": semantic_label, + "area": mask_area, + } + ) + + return panoptic_seg, segments_info diff --git a/preprocess/mhp_extension/detectron2/detectron2/modeling/meta_arch/rcnn.py b/preprocess/mhp_extension/detectron2/detectron2/modeling/meta_arch/rcnn.py new file mode 100644 index 0000000000000000000000000000000000000000..b15ea8a38e5ddfbb4049c89917f055295e396b4f --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/modeling/meta_arch/rcnn.py @@ -0,0 +1,263 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import logging +import numpy as np +import torch +from torch import nn + +from detectron2.structures import ImageList +from detectron2.utils.events import get_event_storage +from detectron2.utils.logger import log_first_n + +from ..backbone import build_backbone +from ..postprocessing import detector_postprocess +from ..proposal_generator import build_proposal_generator +from ..roi_heads import build_roi_heads +from .build import META_ARCH_REGISTRY + +__all__ = ["GeneralizedRCNN", "ProposalNetwork"] + + +@META_ARCH_REGISTRY.register() +class GeneralizedRCNN(nn.Module): + """ + Generalized R-CNN. Any models that contains the following three components: + 1. Per-image feature extraction (aka backbone) + 2. Region proposal generation + 3. Per-region feature extraction and prediction + """ + + def __init__(self, cfg): + super().__init__() + + self.backbone = build_backbone(cfg) + self.proposal_generator = build_proposal_generator(cfg, self.backbone.output_shape()) + self.roi_heads = build_roi_heads(cfg, self.backbone.output_shape()) + self.vis_period = cfg.VIS_PERIOD + self.input_format = cfg.INPUT.FORMAT + + assert len(cfg.MODEL.PIXEL_MEAN) == len(cfg.MODEL.PIXEL_STD) + self.register_buffer("pixel_mean", torch.Tensor(cfg.MODEL.PIXEL_MEAN).view(-1, 1, 1)) + self.register_buffer("pixel_std", torch.Tensor(cfg.MODEL.PIXEL_STD).view(-1, 1, 1)) + + @property + def device(self): + return self.pixel_mean.device + + def visualize_training(self, batched_inputs, proposals): + """ + A function used to visualize images and proposals. It shows ground truth + bounding boxes on the original image and up to 20 predicted object + proposals on the original image. Users can implement different + visualization functions for different models. + + Args: + batched_inputs (list): a list that contains input to the model. + proposals (list): a list that contains predicted proposals. Both + batched_inputs and proposals should have the same length. + """ + from detectron2.utils.visualizer import Visualizer + + storage = get_event_storage() + max_vis_prop = 20 + + for input, prop in zip(batched_inputs, proposals): + img = input["image"].cpu().numpy() + assert img.shape[0] == 3, "Images should have 3 channels." + if self.input_format == "BGR": + img = img[::-1, :, :] + img = img.transpose(1, 2, 0) + v_gt = Visualizer(img, None) + v_gt = v_gt.overlay_instances(boxes=input["instances"].gt_boxes) + anno_img = v_gt.get_image() + box_size = min(len(prop.proposal_boxes), max_vis_prop) + v_pred = Visualizer(img, None) + v_pred = v_pred.overlay_instances( + boxes=prop.proposal_boxes[0:box_size].tensor.cpu().numpy() + ) + prop_img = v_pred.get_image() + vis_img = np.concatenate((anno_img, prop_img), axis=1) + vis_img = vis_img.transpose(2, 0, 1) + vis_name = "Left: GT bounding boxes; Right: Predicted proposals" + storage.put_image(vis_name, vis_img) + break # only visualize one image in a batch + + def forward(self, batched_inputs): + """ + Args: + batched_inputs: a list, batched outputs of :class:`DatasetMapper` . + Each item in the list contains the inputs for one image. + For now, each item in the list is a dict that contains: + + * image: Tensor, image in (C, H, W) format. + * instances (optional): groundtruth :class:`Instances` + * proposals (optional): :class:`Instances`, precomputed proposals. + + Other information that's included in the original dicts, such as: + + * "height", "width" (int): the output resolution of the model, used in inference. + See :meth:`postprocess` for details. + + Returns: + list[dict]: + Each dict is the output for one input image. + The dict contains one key "instances" whose value is a :class:`Instances`. + The :class:`Instances` object has the following keys: + "pred_boxes", "pred_classes", "scores", "pred_masks", "pred_keypoints" + """ + if not self.training: + return self.inference(batched_inputs) + + images = self.preprocess_image(batched_inputs) + if "instances" in batched_inputs[0]: + gt_instances = [x["instances"].to(self.device) for x in batched_inputs] + elif "targets" in batched_inputs[0]: + log_first_n( + logging.WARN, "'targets' in the model inputs is now renamed to 'instances'!", n=10 + ) + gt_instances = [x["targets"].to(self.device) for x in batched_inputs] + else: + gt_instances = None + + features = self.backbone(images.tensor) + + if self.proposal_generator: + proposals, proposal_losses = self.proposal_generator(images, features, gt_instances) + else: + assert "proposals" in batched_inputs[0] + proposals = [x["proposals"].to(self.device) for x in batched_inputs] + proposal_losses = {} + + _, detector_losses = self.roi_heads(images, features, proposals, gt_instances) + if self.vis_period > 0: + storage = get_event_storage() + if storage.iter % self.vis_period == 0: + self.visualize_training(batched_inputs, proposals) + + losses = {} + losses.update(detector_losses) + losses.update(proposal_losses) + return losses + + def inference(self, batched_inputs, detected_instances=None, do_postprocess=True): + """ + Run inference on the given inputs. + + Args: + batched_inputs (list[dict]): same as in :meth:`forward` + detected_instances (None or list[Instances]): if not None, it + contains an `Instances` object per image. The `Instances` + object contains "pred_boxes" and "pred_classes" which are + known boxes in the image. + The inference will then skip the detection of bounding boxes, + and only predict other per-ROI outputs. + do_postprocess (bool): whether to apply post-processing on the outputs. + + Returns: + same as in :meth:`forward`. + """ + assert not self.training + + images = self.preprocess_image(batched_inputs) + features = self.backbone(images.tensor) + + if detected_instances is None: + if self.proposal_generator: + proposals, _ = self.proposal_generator(images, features, None) + else: + assert "proposals" in batched_inputs[0] + proposals = [x["proposals"].to(self.device) for x in batched_inputs] + + results, _ = self.roi_heads(images, features, proposals, None) + else: + detected_instances = [x.to(self.device) for x in detected_instances] + results = self.roi_heads.forward_with_given_boxes(features, detected_instances) + + if do_postprocess: + return GeneralizedRCNN._postprocess(results, batched_inputs, images.image_sizes) + else: + return results + + def preprocess_image(self, batched_inputs): + """ + Normalize, pad and batch the input images. + """ + images = [x["image"].to(self.device) for x in batched_inputs] + images = [(x - self.pixel_mean) / self.pixel_std for x in images] + images = ImageList.from_tensors(images, self.backbone.size_divisibility) + return images + + @staticmethod + def _postprocess(instances, batched_inputs, image_sizes): + """ + Rescale the output instances to the target size. + """ + # note: private function; subject to changes + processed_results = [] + for results_per_image, input_per_image, image_size in zip( + instances, batched_inputs, image_sizes + ): + height = input_per_image.get("height", image_size[0]) + width = input_per_image.get("width", image_size[1]) + r = detector_postprocess(results_per_image, height, width) + processed_results.append({"instances": r}) + return processed_results + + +@META_ARCH_REGISTRY.register() +class ProposalNetwork(nn.Module): + """ + A meta architecture that only predicts object proposals. + """ + + def __init__(self, cfg): + super().__init__() + self.backbone = build_backbone(cfg) + self.proposal_generator = build_proposal_generator(cfg, self.backbone.output_shape()) + + self.register_buffer("pixel_mean", torch.Tensor(cfg.MODEL.PIXEL_MEAN).view(-1, 1, 1)) + self.register_buffer("pixel_std", torch.Tensor(cfg.MODEL.PIXEL_STD).view(-1, 1, 1)) + + @property + def device(self): + return self.pixel_mean.device + + def forward(self, batched_inputs): + """ + Args: + Same as in :class:`GeneralizedRCNN.forward` + + Returns: + list[dict]: + Each dict is the output for one input image. + The dict contains one key "proposals" whose value is a + :class:`Instances` with keys "proposal_boxes" and "objectness_logits". + """ + images = [x["image"].to(self.device) for x in batched_inputs] + images = [(x - self.pixel_mean) / self.pixel_std for x in images] + images = ImageList.from_tensors(images, self.backbone.size_divisibility) + features = self.backbone(images.tensor) + + if "instances" in batched_inputs[0]: + gt_instances = [x["instances"].to(self.device) for x in batched_inputs] + elif "targets" in batched_inputs[0]: + log_first_n( + logging.WARN, "'targets' in the model inputs is now renamed to 'instances'!", n=10 + ) + gt_instances = [x["targets"].to(self.device) for x in batched_inputs] + else: + gt_instances = None + proposals, proposal_losses = self.proposal_generator(images, features, gt_instances) + # In training, the proposals are not useful at all but we generate them anyway. + # This makes RPN-only models about 5% slower. + if self.training: + return proposal_losses + + processed_results = [] + for results_per_image, input_per_image, image_size in zip( + proposals, batched_inputs, images.image_sizes + ): + height = input_per_image.get("height", image_size[0]) + width = input_per_image.get("width", image_size[1]) + r = detector_postprocess(results_per_image, height, width) + processed_results.append({"proposals": r}) + return processed_results diff --git a/preprocess/mhp_extension/detectron2/detectron2/modeling/meta_arch/retinanet.py b/preprocess/mhp_extension/detectron2/detectron2/modeling/meta_arch/retinanet.py new file mode 100644 index 0000000000000000000000000000000000000000..35c42cc25e93bf2841c5e1fcff389f317ed0883a --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/modeling/meta_arch/retinanet.py @@ -0,0 +1,489 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import logging +import math +import numpy as np +from typing import List +import torch +from fvcore.nn import sigmoid_focal_loss_jit, smooth_l1_loss +from torch import nn + +from detectron2.layers import ShapeSpec, batched_nms, cat +from detectron2.structures import Boxes, ImageList, Instances, pairwise_iou +from detectron2.utils.events import get_event_storage +from detectron2.utils.logger import log_first_n + +from ..anchor_generator import build_anchor_generator +from ..backbone import build_backbone +from ..box_regression import Box2BoxTransform +from ..matcher import Matcher +from ..postprocessing import detector_postprocess +from .build import META_ARCH_REGISTRY + +__all__ = ["RetinaNet"] + + +def permute_to_N_HWA_K(tensor, K): + """ + Transpose/reshape a tensor from (N, (A x K), H, W) to (N, (HxWxA), K) + """ + assert tensor.dim() == 4, tensor.shape + N, _, H, W = tensor.shape + tensor = tensor.view(N, -1, K, H, W) + tensor = tensor.permute(0, 3, 4, 1, 2) + tensor = tensor.reshape(N, -1, K) # Size=(N,HWA,K) + return tensor + + +def permute_all_cls_and_box_to_N_HWA_K_and_concat(box_cls, box_delta, num_classes=80): + """ + Rearrange the tensor layout from the network output, i.e.: + list[Tensor]: #lvl tensors of shape (N, A x K, Hi, Wi) + to per-image predictions, i.e.: + Tensor: of shape (N x sum(Hi x Wi x A), K) + """ + # for each feature level, permute the outputs to make them be in the + # same format as the labels. Note that the labels are computed for + # all feature levels concatenated, so we keep the same representation + # for the objectness and the box_delta + box_cls_flattened = [permute_to_N_HWA_K(x, num_classes) for x in box_cls] + box_delta_flattened = [permute_to_N_HWA_K(x, 4) for x in box_delta] + # concatenate on the first dimension (representing the feature levels), to + # take into account the way the labels were generated (with all feature maps + # being concatenated as well) + box_cls = cat(box_cls_flattened, dim=1).view(-1, num_classes) + box_delta = cat(box_delta_flattened, dim=1).view(-1, 4) + return box_cls, box_delta + + +@META_ARCH_REGISTRY.register() +class RetinaNet(nn.Module): + """ + Implement RetinaNet in :paper:`RetinaNet`. + """ + + def __init__(self, cfg): + super().__init__() + + # fmt: off + self.num_classes = cfg.MODEL.RETINANET.NUM_CLASSES + self.in_features = cfg.MODEL.RETINANET.IN_FEATURES + # Loss parameters: + self.focal_loss_alpha = cfg.MODEL.RETINANET.FOCAL_LOSS_ALPHA + self.focal_loss_gamma = cfg.MODEL.RETINANET.FOCAL_LOSS_GAMMA + self.smooth_l1_loss_beta = cfg.MODEL.RETINANET.SMOOTH_L1_LOSS_BETA + # Inference parameters: + self.score_threshold = cfg.MODEL.RETINANET.SCORE_THRESH_TEST + self.topk_candidates = cfg.MODEL.RETINANET.TOPK_CANDIDATES_TEST + self.nms_threshold = cfg.MODEL.RETINANET.NMS_THRESH_TEST + self.max_detections_per_image = cfg.TEST.DETECTIONS_PER_IMAGE + # Vis parameters + self.vis_period = cfg.VIS_PERIOD + self.input_format = cfg.INPUT.FORMAT + # fmt: on + + self.backbone = build_backbone(cfg) + + backbone_shape = self.backbone.output_shape() + feature_shapes = [backbone_shape[f] for f in self.in_features] + self.head = RetinaNetHead(cfg, feature_shapes) + self.anchor_generator = build_anchor_generator(cfg, feature_shapes) + + # Matching and loss + self.box2box_transform = Box2BoxTransform(weights=cfg.MODEL.RPN.BBOX_REG_WEIGHTS) + self.matcher = Matcher( + cfg.MODEL.RETINANET.IOU_THRESHOLDS, + cfg.MODEL.RETINANET.IOU_LABELS, + allow_low_quality_matches=True, + ) + + self.register_buffer("pixel_mean", torch.Tensor(cfg.MODEL.PIXEL_MEAN).view(-1, 1, 1)) + self.register_buffer("pixel_std", torch.Tensor(cfg.MODEL.PIXEL_STD).view(-1, 1, 1)) + + """ + In Detectron1, loss is normalized by number of foreground samples in the batch. + When batch size is 1 per GPU, #foreground has a large variance and + using it lead to lower performance. Here we maintain an EMA of #foreground to + stabilize the normalizer. + """ + self.loss_normalizer = 100 # initialize with any reasonable #fg that's not too small + self.loss_normalizer_momentum = 0.9 + + @property + def device(self): + return self.pixel_mean.device + + def visualize_training(self, batched_inputs, results): + """ + A function used to visualize ground truth images and final network predictions. + It shows ground truth bounding boxes on the original image and up to 20 + predicted object bounding boxes on the original image. + + Args: + batched_inputs (list): a list that contains input to the model. + results (List[Instances]): a list of #images elements. + """ + from detectron2.utils.visualizer import Visualizer + + assert len(batched_inputs) == len( + results + ), "Cannot visualize inputs and results of different sizes" + storage = get_event_storage() + max_boxes = 20 + + image_index = 0 # only visualize a single image + img = batched_inputs[image_index]["image"].cpu().numpy() + assert img.shape[0] == 3, "Images should have 3 channels." + if self.input_format == "BGR": + img = img[::-1, :, :] + img = img.transpose(1, 2, 0) + v_gt = Visualizer(img, None) + v_gt = v_gt.overlay_instances(boxes=batched_inputs[image_index]["instances"].gt_boxes) + anno_img = v_gt.get_image() + processed_results = detector_postprocess(results[image_index], img.shape[0], img.shape[1]) + predicted_boxes = processed_results.pred_boxes.tensor.detach().cpu().numpy() + + v_pred = Visualizer(img, None) + v_pred = v_pred.overlay_instances(boxes=predicted_boxes[0:max_boxes]) + prop_img = v_pred.get_image() + vis_img = np.vstack((anno_img, prop_img)) + vis_img = vis_img.transpose(2, 0, 1) + vis_name = f"Top: GT bounding boxes; Bottom: {max_boxes} Highest Scoring Results" + storage.put_image(vis_name, vis_img) + + def forward(self, batched_inputs): + """ + Args: + batched_inputs: a list, batched outputs of :class:`DatasetMapper` . + Each item in the list contains the inputs for one image. + For now, each item in the list is a dict that contains: + + * image: Tensor, image in (C, H, W) format. + * instances: Instances + + Other information that's included in the original dicts, such as: + + * "height", "width" (int): the output resolution of the model, used in inference. + See :meth:`postprocess` for details. + Returns: + dict[str: Tensor]: + mapping from a named loss to a tensor storing the loss. Used during training only. + """ + images = self.preprocess_image(batched_inputs) + if "instances" in batched_inputs[0]: + gt_instances = [x["instances"].to(self.device) for x in batched_inputs] + elif "targets" in batched_inputs[0]: + log_first_n( + logging.WARN, "'targets' in the model inputs is now renamed to 'instances'!", n=10 + ) + gt_instances = [x["targets"].to(self.device) for x in batched_inputs] + else: + gt_instances = None + + features = self.backbone(images.tensor) + features = [features[f] for f in self.in_features] + box_cls, box_delta = self.head(features) + anchors = self.anchor_generator(features) + + if self.training: + gt_classes, gt_anchors_reg_deltas = self.get_ground_truth(anchors, gt_instances) + losses = self.losses(gt_classes, gt_anchors_reg_deltas, box_cls, box_delta) + + if self.vis_period > 0: + storage = get_event_storage() + if storage.iter % self.vis_period == 0: + results = self.inference(box_cls, box_delta, anchors, images.image_sizes) + self.visualize_training(batched_inputs, results) + + return losses + else: + results = self.inference(box_cls, box_delta, anchors, images.image_sizes) + processed_results = [] + for results_per_image, input_per_image, image_size in zip( + results, batched_inputs, images.image_sizes + ): + height = input_per_image.get("height", image_size[0]) + width = input_per_image.get("width", image_size[1]) + r = detector_postprocess(results_per_image, height, width) + processed_results.append({"instances": r}) + return processed_results + + def losses(self, gt_classes, gt_anchors_deltas, pred_class_logits, pred_anchor_deltas): + """ + Args: + For `gt_classes` and `gt_anchors_deltas` parameters, see + :meth:`RetinaNet.get_ground_truth`. + Their shapes are (N, R) and (N, R, 4), respectively, where R is + the total number of anchors across levels, i.e. sum(Hi x Wi x A) + For `pred_class_logits` and `pred_anchor_deltas`, see + :meth:`RetinaNetHead.forward`. + + Returns: + dict[str, Tensor]: + mapping from a named loss to a scalar tensor + storing the loss. Used during training only. The dict keys are: + "loss_cls" and "loss_box_reg" + """ + pred_class_logits, pred_anchor_deltas = permute_all_cls_and_box_to_N_HWA_K_and_concat( + pred_class_logits, pred_anchor_deltas, self.num_classes + ) # Shapes: (N x R, K) and (N x R, 4), respectively. + + gt_classes = gt_classes.flatten() + gt_anchors_deltas = gt_anchors_deltas.view(-1, 4) + + valid_idxs = gt_classes >= 0 + foreground_idxs = (gt_classes >= 0) & (gt_classes != self.num_classes) + num_foreground = foreground_idxs.sum().item() + get_event_storage().put_scalar("num_foreground", num_foreground) + self.loss_normalizer = ( + self.loss_normalizer_momentum * self.loss_normalizer + + (1 - self.loss_normalizer_momentum) * num_foreground + ) + + gt_classes_target = torch.zeros_like(pred_class_logits) + gt_classes_target[foreground_idxs, gt_classes[foreground_idxs]] = 1 + + # logits loss + loss_cls = sigmoid_focal_loss_jit( + pred_class_logits[valid_idxs], + gt_classes_target[valid_idxs], + alpha=self.focal_loss_alpha, + gamma=self.focal_loss_gamma, + reduction="sum", + ) / max(1, self.loss_normalizer) + + # regression loss + loss_box_reg = smooth_l1_loss( + pred_anchor_deltas[foreground_idxs], + gt_anchors_deltas[foreground_idxs], + beta=self.smooth_l1_loss_beta, + reduction="sum", + ) / max(1, self.loss_normalizer) + + return {"loss_cls": loss_cls, "loss_box_reg": loss_box_reg} + + @torch.no_grad() + def get_ground_truth(self, anchors, targets): + """ + Args: + anchors (list[Boxes]): A list of #feature level Boxes. + The Boxes contains anchors of this image on the specific feature level. + targets (list[Instances]): a list of N `Instances`s. The i-th + `Instances` contains the ground-truth per-instance annotations + for the i-th input image. Specify `targets` during training only. + + Returns: + gt_classes (Tensor): + An integer tensor of shape (N, R) storing ground-truth labels for each anchor. + R is the total number of anchors, i.e. the sum of Hi x Wi x A for all levels. + Anchors with an IoU with some target higher than the foreground threshold + are assigned their corresponding label in the [0, K-1] range. + Anchors whose IoU are below the background threshold are assigned + the label "K". Anchors whose IoU are between the foreground and background + thresholds are assigned a label "-1", i.e. ignore. + gt_anchors_deltas (Tensor): + Shape (N, R, 4). + The last dimension represents ground-truth box2box transform + targets (dx, dy, dw, dh) that map each anchor to its matched ground-truth box. + The values in the tensor are meaningful only when the corresponding + anchor is labeled as foreground. + """ + gt_classes = [] + gt_anchors_deltas = [] + anchors = Boxes.cat(anchors) # Rx4 + + for targets_per_image in targets: + match_quality_matrix = pairwise_iou(targets_per_image.gt_boxes, anchors) + gt_matched_idxs, anchor_labels = self.matcher(match_quality_matrix) + + has_gt = len(targets_per_image) > 0 + if has_gt: + # ground truth box regression + matched_gt_boxes = targets_per_image.gt_boxes[gt_matched_idxs] + gt_anchors_reg_deltas_i = self.box2box_transform.get_deltas( + anchors.tensor, matched_gt_boxes.tensor + ) + + gt_classes_i = targets_per_image.gt_classes[gt_matched_idxs] + # Anchors with label 0 are treated as background. + gt_classes_i[anchor_labels == 0] = self.num_classes + # Anchors with label -1 are ignored. + gt_classes_i[anchor_labels == -1] = -1 + else: + gt_classes_i = torch.zeros_like(gt_matched_idxs) + self.num_classes + gt_anchors_reg_deltas_i = torch.zeros_like(anchors.tensor) + + gt_classes.append(gt_classes_i) + gt_anchors_deltas.append(gt_anchors_reg_deltas_i) + + return torch.stack(gt_classes), torch.stack(gt_anchors_deltas) + + def inference(self, box_cls, box_delta, anchors, image_sizes): + """ + Arguments: + box_cls, box_delta: Same as the output of :meth:`RetinaNetHead.forward` + anchors (list[Boxes]): A list of #feature level Boxes. + The Boxes contain anchors of this image on the specific feature level. + image_sizes (List[torch.Size]): the input image sizes + + Returns: + results (List[Instances]): a list of #images elements. + """ + results = [] + + box_cls = [permute_to_N_HWA_K(x, self.num_classes) for x in box_cls] + box_delta = [permute_to_N_HWA_K(x, 4) for x in box_delta] + # list[Tensor], one per level, each has shape (N, Hi x Wi x A, K or 4) + + for img_idx, image_size in enumerate(image_sizes): + box_cls_per_image = [box_cls_per_level[img_idx] for box_cls_per_level in box_cls] + box_reg_per_image = [box_reg_per_level[img_idx] for box_reg_per_level in box_delta] + results_per_image = self.inference_single_image( + box_cls_per_image, box_reg_per_image, anchors, tuple(image_size) + ) + results.append(results_per_image) + return results + + def inference_single_image(self, box_cls, box_delta, anchors, image_size): + """ + Single-image inference. Return bounding-box detection results by thresholding + on scores and applying non-maximum suppression (NMS). + + Arguments: + box_cls (list[Tensor]): list of #feature levels. Each entry contains + tensor of size (H x W x A, K) + box_delta (list[Tensor]): Same shape as 'box_cls' except that K becomes 4. + anchors (list[Boxes]): list of #feature levels. Each entry contains + a Boxes object, which contains all the anchors for that + image in that feature level. + image_size (tuple(H, W)): a tuple of the image height and width. + + Returns: + Same as `inference`, but for only one image. + """ + boxes_all = [] + scores_all = [] + class_idxs_all = [] + + # Iterate over every feature level + for box_cls_i, box_reg_i, anchors_i in zip(box_cls, box_delta, anchors): + # (HxWxAxK,) + box_cls_i = box_cls_i.flatten().sigmoid_() + + # Keep top k top scoring indices only. + num_topk = min(self.topk_candidates, box_reg_i.size(0)) + # torch.sort is actually faster than .topk (at least on GPUs) + predicted_prob, topk_idxs = box_cls_i.sort(descending=True) + predicted_prob = predicted_prob[:num_topk] + topk_idxs = topk_idxs[:num_topk] + + # filter out the proposals with low confidence score + keep_idxs = predicted_prob > self.score_threshold + predicted_prob = predicted_prob[keep_idxs] + topk_idxs = topk_idxs[keep_idxs] + + anchor_idxs = topk_idxs // self.num_classes + classes_idxs = topk_idxs % self.num_classes + + box_reg_i = box_reg_i[anchor_idxs] + anchors_i = anchors_i[anchor_idxs] + # predict boxes + predicted_boxes = self.box2box_transform.apply_deltas(box_reg_i, anchors_i.tensor) + + boxes_all.append(predicted_boxes) + scores_all.append(predicted_prob) + class_idxs_all.append(classes_idxs) + + boxes_all, scores_all, class_idxs_all = [ + cat(x) for x in [boxes_all, scores_all, class_idxs_all] + ] + keep = batched_nms(boxes_all, scores_all, class_idxs_all, self.nms_threshold) + keep = keep[: self.max_detections_per_image] + + result = Instances(image_size) + result.pred_boxes = Boxes(boxes_all[keep]) + result.scores = scores_all[keep] + result.pred_classes = class_idxs_all[keep] + return result + + def preprocess_image(self, batched_inputs): + """ + Normalize, pad and batch the input images. + """ + images = [x["image"].to(self.device) for x in batched_inputs] + images = [(x - self.pixel_mean) / self.pixel_std for x in images] + images = ImageList.from_tensors(images, self.backbone.size_divisibility) + return images + + +class RetinaNetHead(nn.Module): + """ + The head used in RetinaNet for object classification and box regression. + It has two subnets for the two tasks, with a common structure but separate parameters. + """ + + def __init__(self, cfg, input_shape: List[ShapeSpec]): + super().__init__() + # fmt: off + in_channels = input_shape[0].channels + num_classes = cfg.MODEL.RETINANET.NUM_CLASSES + num_convs = cfg.MODEL.RETINANET.NUM_CONVS + prior_prob = cfg.MODEL.RETINANET.PRIOR_PROB + num_anchors = build_anchor_generator(cfg, input_shape).num_cell_anchors + # fmt: on + assert ( + len(set(num_anchors)) == 1 + ), "Using different number of anchors between levels is not currently supported!" + num_anchors = num_anchors[0] + + cls_subnet = [] + bbox_subnet = [] + for _ in range(num_convs): + cls_subnet.append( + nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1) + ) + cls_subnet.append(nn.ReLU()) + bbox_subnet.append( + nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1) + ) + bbox_subnet.append(nn.ReLU()) + + self.cls_subnet = nn.Sequential(*cls_subnet) + self.bbox_subnet = nn.Sequential(*bbox_subnet) + self.cls_score = nn.Conv2d( + in_channels, num_anchors * num_classes, kernel_size=3, stride=1, padding=1 + ) + self.bbox_pred = nn.Conv2d(in_channels, num_anchors * 4, kernel_size=3, stride=1, padding=1) + + # Initialization + for modules in [self.cls_subnet, self.bbox_subnet, self.cls_score, self.bbox_pred]: + for layer in modules.modules(): + if isinstance(layer, nn.Conv2d): + torch.nn.init.normal_(layer.weight, mean=0, std=0.01) + torch.nn.init.constant_(layer.bias, 0) + + # Use prior in model initialization to improve stability + bias_value = -(math.log((1 - prior_prob) / prior_prob)) + torch.nn.init.constant_(self.cls_score.bias, bias_value) + + def forward(self, features): + """ + Arguments: + features (list[Tensor]): FPN feature map tensors in high to low resolution. + Each tensor in the list correspond to different feature levels. + + Returns: + logits (list[Tensor]): #lvl tensors, each has shape (N, AxK, Hi, Wi). + The tensor predicts the classification probability + at each spatial position for each of the A anchors and K object + classes. + bbox_reg (list[Tensor]): #lvl tensors, each has shape (N, Ax4, Hi, Wi). + The tensor predicts 4-vector (dx,dy,dw,dh) box + regression values for every anchor. These values are the + relative offset between the anchor and the ground truth box. + """ + logits = [] + bbox_reg = [] + for feature in features: + logits.append(self.cls_score(self.cls_subnet(feature))) + bbox_reg.append(self.bbox_pred(self.bbox_subnet(feature))) + return logits, bbox_reg diff --git a/preprocess/mhp_extension/detectron2/detectron2/modeling/meta_arch/semantic_seg.py b/preprocess/mhp_extension/detectron2/detectron2/modeling/meta_arch/semantic_seg.py new file mode 100644 index 0000000000000000000000000000000000000000..2c41a7235cb9c578e2c6de5835854bdff7493616 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/modeling/meta_arch/semantic_seg.py @@ -0,0 +1,186 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import numpy as np +from typing import Dict +import fvcore.nn.weight_init as weight_init +import torch +from torch import nn +from torch.nn import functional as F + +from detectron2.layers import Conv2d, ShapeSpec +from detectron2.structures import ImageList +from detectron2.utils.registry import Registry + +from ..backbone import build_backbone +from ..postprocessing import sem_seg_postprocess +from .build import META_ARCH_REGISTRY + +__all__ = ["SemanticSegmentor", "SEM_SEG_HEADS_REGISTRY", "SemSegFPNHead", "build_sem_seg_head"] + + +SEM_SEG_HEADS_REGISTRY = Registry("SEM_SEG_HEADS") +SEM_SEG_HEADS_REGISTRY.__doc__ = """ +Registry for semantic segmentation heads, which make semantic segmentation predictions +from feature maps. +""" + + +@META_ARCH_REGISTRY.register() +class SemanticSegmentor(nn.Module): + """ + Main class for semantic segmentation architectures. + """ + + def __init__(self, cfg): + super().__init__() + self.backbone = build_backbone(cfg) + self.sem_seg_head = build_sem_seg_head(cfg, self.backbone.output_shape()) + self.register_buffer("pixel_mean", torch.Tensor(cfg.MODEL.PIXEL_MEAN).view(-1, 1, 1)) + self.register_buffer("pixel_std", torch.Tensor(cfg.MODEL.PIXEL_STD).view(-1, 1, 1)) + + @property + def device(self): + return self.pixel_mean.device + + def forward(self, batched_inputs): + """ + Args: + batched_inputs: a list, batched outputs of :class:`DatasetMapper`. + Each item in the list contains the inputs for one image. + + For now, each item in the list is a dict that contains: + + * "image": Tensor, image in (C, H, W) format. + * "sem_seg": semantic segmentation ground truth + * Other information that's included in the original dicts, such as: + "height", "width" (int): the output resolution of the model, used in inference. + See :meth:`postprocess` for details. + + Returns: + list[dict]: + Each dict is the output for one input image. + The dict contains one key "sem_seg" whose value is a + Tensor that represents the + per-pixel segmentation prediced by the head. + The prediction has shape KxHxW that represents the logits of + each class for each pixel. + """ + images = [x["image"].to(self.device) for x in batched_inputs] + images = [(x - self.pixel_mean) / self.pixel_std for x in images] + images = ImageList.from_tensors(images, self.backbone.size_divisibility) + + features = self.backbone(images.tensor) + + if "sem_seg" in batched_inputs[0]: + targets = [x["sem_seg"].to(self.device) for x in batched_inputs] + targets = ImageList.from_tensors( + targets, self.backbone.size_divisibility, self.sem_seg_head.ignore_value + ).tensor + else: + targets = None + results, losses = self.sem_seg_head(features, targets) + + if self.training: + return losses + + processed_results = [] + for result, input_per_image, image_size in zip(results, batched_inputs, images.image_sizes): + height = input_per_image.get("height") + width = input_per_image.get("width") + r = sem_seg_postprocess(result, image_size, height, width) + processed_results.append({"sem_seg": r}) + return processed_results + + +def build_sem_seg_head(cfg, input_shape): + """ + Build a semantic segmentation head from `cfg.MODEL.SEM_SEG_HEAD.NAME`. + """ + name = cfg.MODEL.SEM_SEG_HEAD.NAME + return SEM_SEG_HEADS_REGISTRY.get(name)(cfg, input_shape) + + +@SEM_SEG_HEADS_REGISTRY.register() +class SemSegFPNHead(nn.Module): + """ + A semantic segmentation head described in :paper:`PanopticFPN`. + It takes FPN features as input and merges information from all + levels of the FPN into single output. + """ + + def __init__(self, cfg, input_shape: Dict[str, ShapeSpec]): + super().__init__() + + # fmt: off + self.in_features = cfg.MODEL.SEM_SEG_HEAD.IN_FEATURES + feature_strides = {k: v.stride for k, v in input_shape.items()} + feature_channels = {k: v.channels for k, v in input_shape.items()} + self.ignore_value = cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE + num_classes = cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES + conv_dims = cfg.MODEL.SEM_SEG_HEAD.CONVS_DIM + self.common_stride = cfg.MODEL.SEM_SEG_HEAD.COMMON_STRIDE + norm = cfg.MODEL.SEM_SEG_HEAD.NORM + self.loss_weight = cfg.MODEL.SEM_SEG_HEAD.LOSS_WEIGHT + # fmt: on + + self.scale_heads = [] + for in_feature in self.in_features: + head_ops = [] + head_length = max( + 1, int(np.log2(feature_strides[in_feature]) - np.log2(self.common_stride)) + ) + for k in range(head_length): + norm_module = nn.GroupNorm(32, conv_dims) if norm == "GN" else None + conv = Conv2d( + feature_channels[in_feature] if k == 0 else conv_dims, + conv_dims, + kernel_size=3, + stride=1, + padding=1, + bias=not norm, + norm=norm_module, + activation=F.relu, + ) + weight_init.c2_msra_fill(conv) + head_ops.append(conv) + if feature_strides[in_feature] != self.common_stride: + head_ops.append( + nn.Upsample(scale_factor=2, mode="bilinear", align_corners=False) + ) + self.scale_heads.append(nn.Sequential(*head_ops)) + self.add_module(in_feature, self.scale_heads[-1]) + self.predictor = Conv2d(conv_dims, num_classes, kernel_size=1, stride=1, padding=0) + weight_init.c2_msra_fill(self.predictor) + + def forward(self, features, targets=None): + """ + Returns: + In training, returns (None, dict of losses) + In inference, returns (CxHxW logits, {}) + """ + x = self.layers(features) + if self.training: + return None, self.losses(x, targets) + else: + x = F.interpolate( + x, scale_factor=self.common_stride, mode="bilinear", align_corners=False + ) + return x, {} + + def layers(self, features): + for i, f in enumerate(self.in_features): + if i == 0: + x = self.scale_heads[i](features[f]) + else: + x = x + self.scale_heads[i](features[f]) + x = self.predictor(x) + return x + + def losses(self, predictions, targets): + predictions = F.interpolate( + predictions, scale_factor=self.common_stride, mode="bilinear", align_corners=False + ) + loss = F.cross_entropy( + predictions, targets, reduction="mean", ignore_index=self.ignore_value + ) + losses = {"loss_sem_seg": loss * self.loss_weight} + return losses diff --git a/preprocess/mhp_extension/detectron2/detectron2/modeling/poolers.py b/preprocess/mhp_extension/detectron2/detectron2/modeling/poolers.py new file mode 100644 index 0000000000000000000000000000000000000000..678f5afc5680e6bdc9931f0449e2ab334a3a5369 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/modeling/poolers.py @@ -0,0 +1,231 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +import math +import sys +import torch +from torch import nn +from torchvision.ops import RoIPool + +from detectron2.layers import ROIAlign, ROIAlignRotated, cat + +__all__ = ["ROIPooler"] + + +def assign_boxes_to_levels(box_lists, min_level, max_level, canonical_box_size, canonical_level): + """ + Map each box in `box_lists` to a feature map level index and return the assignment + vector. + + Args: + box_lists (list[Boxes] | list[RotatedBoxes]): A list of N Boxes or N RotatedBoxes, + where N is the number of images in the batch. + min_level (int): Smallest feature map level index. The input is considered index 0, + the output of stage 1 is index 1, and so. + max_level (int): Largest feature map level index. + canonical_box_size (int): A canonical box size in pixels (sqrt(box area)). + canonical_level (int): The feature map level index on which a canonically-sized box + should be placed. + + Returns: + A tensor of length M, where M is the total number of boxes aggregated over all + N batch images. The memory layout corresponds to the concatenation of boxes + from all images. Each element is the feature map index, as an offset from + `self.min_level`, for the corresponding box (so value i means the box is at + `self.min_level + i`). + """ + eps = sys.float_info.epsilon + box_sizes = torch.sqrt(cat([boxes.area() for boxes in box_lists])) + # Eqn.(1) in FPN paper + level_assignments = torch.floor( + canonical_level + torch.log2(box_sizes / canonical_box_size + eps) + ) + # clamp level to (min, max), in case the box size is too large or too small + # for the available feature maps + level_assignments = torch.clamp(level_assignments, min=min_level, max=max_level) + return level_assignments.to(torch.int64) - min_level + + +def convert_boxes_to_pooler_format(box_lists): + """ + Convert all boxes in `box_lists` to the low-level format used by ROI pooling ops + (see description under Returns). + + Args: + box_lists (list[Boxes] | list[RotatedBoxes]): + A list of N Boxes or N RotatedBoxes, where N is the number of images in the batch. + + Returns: + When input is list[Boxes]: + A tensor of shape (M, 5), where M is the total number of boxes aggregated over all + N batch images. + The 5 columns are (batch index, x0, y0, x1, y1), where batch index + is the index in [0, N) identifying which batch image the box with corners at + (x0, y0, x1, y1) comes from. + When input is list[RotatedBoxes]: + A tensor of shape (M, 6), where M is the total number of boxes aggregated over all + N batch images. + The 6 columns are (batch index, x_ctr, y_ctr, width, height, angle_degrees), + where batch index is the index in [0, N) identifying which batch image the + rotated box (x_ctr, y_ctr, width, height, angle_degrees) comes from. + """ + + def fmt_box_list(box_tensor, batch_index): + repeated_index = torch.full( + (len(box_tensor), 1), batch_index, dtype=box_tensor.dtype, device=box_tensor.device + ) + return cat((repeated_index, box_tensor), dim=1) + + pooler_fmt_boxes = cat( + [fmt_box_list(box_list.tensor, i) for i, box_list in enumerate(box_lists)], dim=0 + ) + + return pooler_fmt_boxes + + +class ROIPooler(nn.Module): + """ + Region of interest feature map pooler that supports pooling from one or more + feature maps. + """ + + def __init__( + self, + output_size, + scales, + sampling_ratio, + pooler_type, + canonical_box_size=224, + canonical_level=4, + ): + """ + Args: + output_size (int, tuple[int] or list[int]): output size of the pooled region, + e.g., 14 x 14. If tuple or list is given, the length must be 2. + scales (list[float]): The scale for each low-level pooling op relative to + the input image. For a feature map with stride s relative to the input + image, scale is defined as a 1 / s. The stride must be power of 2. + When there are multiple scales, they must form a pyramid, i.e. they must be + a monotically decreasing geometric sequence with a factor of 1/2. + sampling_ratio (int): The `sampling_ratio` parameter for the ROIAlign op. + pooler_type (string): Name of the type of pooling operation that should be applied. + For instance, "ROIPool" or "ROIAlignV2". + canonical_box_size (int): A canonical box size in pixels (sqrt(box area)). The default + is heuristically defined as 224 pixels in the FPN paper (based on ImageNet + pre-training). + canonical_level (int): The feature map level index from which a canonically-sized box + should be placed. The default is defined as level 4 (stride=16) in the FPN paper, + i.e., a box of size 224x224 will be placed on the feature with stride=16. + The box placement for all boxes will be determined from their sizes w.r.t + canonical_box_size. For example, a box whose area is 4x that of a canonical box + should be used to pool features from feature level ``canonical_level+1``. + + Note that the actual input feature maps given to this module may not have + sufficiently many levels for the input boxes. If the boxes are too large or too + small for the input feature maps, the closest level will be used. + """ + super().__init__() + + if isinstance(output_size, int): + output_size = (output_size, output_size) + assert len(output_size) == 2 + assert isinstance(output_size[0], int) and isinstance(output_size[1], int) + self.output_size = output_size + + if pooler_type == "ROIAlign": + self.level_poolers = nn.ModuleList( + ROIAlign( + output_size, spatial_scale=scale, sampling_ratio=sampling_ratio, aligned=False + ) + for scale in scales + ) + elif pooler_type == "ROIAlignV2": + self.level_poolers = nn.ModuleList( + ROIAlign( + output_size, spatial_scale=scale, sampling_ratio=sampling_ratio, aligned=True + ) + for scale in scales + ) + elif pooler_type == "ROIPool": + self.level_poolers = nn.ModuleList( + RoIPool(output_size, spatial_scale=scale) for scale in scales + ) + elif pooler_type == "ROIAlignRotated": + self.level_poolers = nn.ModuleList( + ROIAlignRotated(output_size, spatial_scale=scale, sampling_ratio=sampling_ratio) + for scale in scales + ) + else: + raise ValueError("Unknown pooler type: {}".format(pooler_type)) + + # Map scale (defined as 1 / stride) to its feature map level under the + # assumption that stride is a power of 2. + min_level = -(math.log2(scales[0])) + max_level = -(math.log2(scales[-1])) + assert math.isclose(min_level, int(min_level)) and math.isclose( + max_level, int(max_level) + ), "Featuremap stride is not power of 2!" + self.min_level = int(min_level) + self.max_level = int(max_level) + assert ( + len(scales) == self.max_level - self.min_level + 1 + ), "[ROIPooler] Sizes of input featuremaps do not form a pyramid!" + assert 0 < self.min_level and self.min_level <= self.max_level + self.canonical_level = canonical_level + assert canonical_box_size > 0 + self.canonical_box_size = canonical_box_size + + def forward(self, x, box_lists): + """ + Args: + x (list[Tensor]): A list of feature maps of NCHW shape, with scales matching those + used to construct this module. + box_lists (list[Boxes] | list[RotatedBoxes]): + A list of N Boxes or N RotatedBoxes, where N is the number of images in the batch. + The box coordinates are defined on the original image and + will be scaled by the `scales` argument of :class:`ROIPooler`. + + Returns: + Tensor: + A tensor of shape (M, C, output_size, output_size) where M is the total number of + boxes aggregated over all N batch images and C is the number of channels in `x`. + """ + num_level_assignments = len(self.level_poolers) + + assert isinstance(x, list) and isinstance( + box_lists, list + ), "Arguments to pooler must be lists" + assert ( + len(x) == num_level_assignments + ), "unequal value, num_level_assignments={}, but x is list of {} Tensors".format( + num_level_assignments, len(x) + ) + + assert len(box_lists) == x[0].size( + 0 + ), "unequal value, x[0] batch dim 0 is {}, but box_list has length {}".format( + x[0].size(0), len(box_lists) + ) + + pooler_fmt_boxes = convert_boxes_to_pooler_format(box_lists) + + if num_level_assignments == 1: + return self.level_poolers[0](x[0], pooler_fmt_boxes) + + level_assignments = assign_boxes_to_levels( + box_lists, self.min_level, self.max_level, self.canonical_box_size, self.canonical_level + ) + + num_boxes = len(pooler_fmt_boxes) + num_channels = x[0].shape[1] + output_size = self.output_size[0] + + dtype, device = x[0].dtype, x[0].device + output = torch.zeros( + (num_boxes, num_channels, output_size, output_size), dtype=dtype, device=device + ) + + for level, (x_level, pooler) in enumerate(zip(x, self.level_poolers)): + inds = torch.nonzero(level_assignments == level, as_tuple=True)[0] + pooler_fmt_boxes_level = pooler_fmt_boxes[inds] + output[inds] = pooler(x_level, pooler_fmt_boxes_level) + + return output diff --git a/preprocess/mhp_extension/detectron2/detectron2/modeling/postprocessing.py b/preprocess/mhp_extension/detectron2/detectron2/modeling/postprocessing.py new file mode 100644 index 0000000000000000000000000000000000000000..e85541ff2e25568cdb9c73702f6c9e68a23f6e4c --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/modeling/postprocessing.py @@ -0,0 +1,79 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +from torch.nn import functional as F + +from detectron2.layers import paste_masks_in_image +from detectron2.structures import Instances +from detectron2.utils.memory import retry_if_cuda_oom + + +def detector_postprocess(results, output_height, output_width, mask_threshold=0.5): + """ + Resize the output instances. + The input images are often resized when entering an object detector. + As a result, we often need the outputs of the detector in a different + resolution from its inputs. + + This function will resize the raw outputs of an R-CNN detector + to produce outputs according to the desired output resolution. + + Args: + results (Instances): the raw outputs from the detector. + `results.image_size` contains the input image resolution the detector sees. + This object might be modified in-place. + output_height, output_width: the desired output resolution. + + Returns: + Instances: the resized output from the model, based on the output resolution + """ + scale_x, scale_y = (output_width / results.image_size[1], output_height / results.image_size[0]) + results = Instances((output_height, output_width), **results.get_fields()) + + if results.has("pred_boxes"): + output_boxes = results.pred_boxes + elif results.has("proposal_boxes"): + output_boxes = results.proposal_boxes + + output_boxes.scale(scale_x, scale_y) + output_boxes.clip(results.image_size) + + results = results[output_boxes.nonempty()] + + if results.has("pred_masks"): + results.pred_masks = retry_if_cuda_oom(paste_masks_in_image)( + results.pred_masks[:, 0, :, :], # N, 1, M, M + results.pred_boxes, + results.image_size, + threshold=mask_threshold, + ) + + if results.has("pred_keypoints"): + results.pred_keypoints[:, :, 0] *= scale_x + results.pred_keypoints[:, :, 1] *= scale_y + + return results + + +def sem_seg_postprocess(result, img_size, output_height, output_width): + """ + Return semantic segmentation predictions in the original resolution. + + The input images are often resized when entering semantic segmentor. Moreover, in same + cases, they also padded inside segmentor to be divisible by maximum network stride. + As a result, we often need the predictions of the segmentor in a different + resolution from its inputs. + + Args: + result (Tensor): semantic segmentation prediction logits. A tensor of shape (C, H, W), + where C is the number of classes, and H, W are the height and width of the prediction. + img_size (tuple): image size that segmentor is taking as input. + output_height, output_width: the desired output resolution. + + Returns: + semantic segmentation prediction (Tensor): A tensor of the shape + (C, output_height, output_width) that contains per-pixel soft predictions. + """ + result = result[:, : img_size[0], : img_size[1]].expand(1, -1, -1, -1) + result = F.interpolate( + result, size=(output_height, output_width), mode="bilinear", align_corners=False + )[0] + return result diff --git a/preprocess/mhp_extension/detectron2/detectron2/modeling/proposal_generator/__init__.py b/preprocess/mhp_extension/detectron2/detectron2/modeling/proposal_generator/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..64fb6d46359c05ed3d7aa1ec91fdd6e15b14c932 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/modeling/proposal_generator/__init__.py @@ -0,0 +1,3 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +from .build import PROPOSAL_GENERATOR_REGISTRY, build_proposal_generator +from .rpn import RPN_HEAD_REGISTRY, build_rpn_head, RPN diff --git a/preprocess/mhp_extension/detectron2/detectron2/modeling/proposal_generator/build.py b/preprocess/mhp_extension/detectron2/detectron2/modeling/proposal_generator/build.py new file mode 100644 index 0000000000000000000000000000000000000000..7f252bcb982032cd09270c44741772a34ef32277 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/modeling/proposal_generator/build.py @@ -0,0 +1,24 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +from detectron2.utils.registry import Registry + +PROPOSAL_GENERATOR_REGISTRY = Registry("PROPOSAL_GENERATOR") +PROPOSAL_GENERATOR_REGISTRY.__doc__ = """ +Registry for proposal generator, which produces object proposals from feature maps. + +The registered object will be called with `obj(cfg, input_shape)`. +The call should return a `nn.Module` object. +""" + +from . import rpn, rrpn # noqa F401 isort:skip + + +def build_proposal_generator(cfg, input_shape): + """ + Build a proposal generator from `cfg.MODEL.PROPOSAL_GENERATOR.NAME`. + The name can be "PrecomputedProposals" to use no proposal generator. + """ + name = cfg.MODEL.PROPOSAL_GENERATOR.NAME + if name == "PrecomputedProposals": + return None + + return PROPOSAL_GENERATOR_REGISTRY.get(name)(cfg, input_shape) diff --git a/preprocess/mhp_extension/detectron2/detectron2/modeling/proposal_generator/proposal_utils.py b/preprocess/mhp_extension/detectron2/detectron2/modeling/proposal_generator/proposal_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..d4af90525ba07eb8d313460ee2c3f468fe367cff --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/modeling/proposal_generator/proposal_utils.py @@ -0,0 +1,57 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import math +import torch + +from detectron2.structures import Instances + + +def add_ground_truth_to_proposals(gt_boxes, proposals): + """ + Call `add_ground_truth_to_proposals_single_image` for all images. + + Args: + gt_boxes(list[Boxes]): list of N elements. Element i is a Boxes + representing the gound-truth for image i. + proposals (list[Instances]): list of N elements. Element i is a Instances + representing the proposals for image i. + + Returns: + list[Instances]: list of N Instances. Each is the proposals for the image, + with field "proposal_boxes" and "objectness_logits". + """ + assert gt_boxes is not None + + assert len(proposals) == len(gt_boxes) + if len(proposals) == 0: + return proposals + + return [ + add_ground_truth_to_proposals_single_image(gt_boxes_i, proposals_i) + for gt_boxes_i, proposals_i in zip(gt_boxes, proposals) + ] + + +def add_ground_truth_to_proposals_single_image(gt_boxes, proposals): + """ + Augment `proposals` with ground-truth boxes from `gt_boxes`. + + Args: + Same as `add_ground_truth_to_proposals`, but with gt_boxes and proposals + per image. + + Returns: + Same as `add_ground_truth_to_proposals`, but for only one image. + """ + device = proposals.objectness_logits.device + # Concatenating gt_boxes with proposals requires them to have the same fields + # Assign all ground-truth boxes an objectness logit corresponding to P(object) \approx 1. + gt_logit_value = math.log((1.0 - 1e-10) / (1 - (1.0 - 1e-10))) + + gt_logits = gt_logit_value * torch.ones(len(gt_boxes), device=device) + gt_proposal = Instances(proposals.image_size) + + gt_proposal.proposal_boxes = gt_boxes + gt_proposal.objectness_logits = gt_logits + new_proposals = Instances.cat([proposals, gt_proposal]) + + return new_proposals diff --git a/preprocess/mhp_extension/detectron2/detectron2/modeling/proposal_generator/rpn.py b/preprocess/mhp_extension/detectron2/detectron2/modeling/proposal_generator/rpn.py new file mode 100644 index 0000000000000000000000000000000000000000..8eb93b8e6ecf9f14d5b8de5a7e1d2b1560bcacfd --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/modeling/proposal_generator/rpn.py @@ -0,0 +1,285 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +from typing import Dict, List +import torch +import torch.nn.functional as F +from torch import nn + +from detectron2.config import configurable +from detectron2.layers import ShapeSpec +from detectron2.structures import Boxes, Instances, pairwise_iou +from detectron2.utils.memory import retry_if_cuda_oom +from detectron2.utils.registry import Registry + +from ..anchor_generator import build_anchor_generator +from ..box_regression import Box2BoxTransform +from ..matcher import Matcher +from ..sampling import subsample_labels +from .build import PROPOSAL_GENERATOR_REGISTRY +from .rpn_outputs import RPNOutputs, find_top_rpn_proposals + +RPN_HEAD_REGISTRY = Registry("RPN_HEAD") +RPN_HEAD_REGISTRY.__doc__ = """ +Registry for RPN heads, which take feature maps and perform +objectness classification and bounding box regression for anchors. + +The registered object will be called with `obj(cfg, input_shape)`. +The call should return a `nn.Module` object. +""" + + +def build_rpn_head(cfg, input_shape): + """ + Build an RPN head defined by `cfg.MODEL.RPN.HEAD_NAME`. + """ + name = cfg.MODEL.RPN.HEAD_NAME + return RPN_HEAD_REGISTRY.get(name)(cfg, input_shape) + + +@RPN_HEAD_REGISTRY.register() +class StandardRPNHead(nn.Module): + """ + Standard RPN classification and regression heads described in :paper:`Faster R-CNN`. + Uses a 3x3 conv to produce a shared hidden state from which one 1x1 conv predicts + objectness logits for each anchor and a second 1x1 conv predicts bounding-box deltas + specifying how to deform each anchor into an object proposal. + """ + + @configurable + def __init__(self, *, in_channels: int, num_anchors: int, box_dim: int = 4): + """ + NOTE: this interface is experimental. + + Args: + in_channels (int): number of input feature channels. When using multiple + input features, they must have the same number of channels. + num_anchors (int): number of anchors to predict for *each spatial position* + on the feature map. The total number of anchors for each + feature map will be `num_anchors * H * W`. + box_dim (int): dimension of a box, which is also the number of box regression + predictions to make for each anchor. An axis aligned box has + box_dim=4, while a rotated box has box_dim=5. + """ + super().__init__() + # 3x3 conv for the hidden representation + self.conv = nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1) + # 1x1 conv for predicting objectness logits + self.objectness_logits = nn.Conv2d(in_channels, num_anchors, kernel_size=1, stride=1) + # 1x1 conv for predicting box2box transform deltas + self.anchor_deltas = nn.Conv2d(in_channels, num_anchors * box_dim, kernel_size=1, stride=1) + + for l in [self.conv, self.objectness_logits, self.anchor_deltas]: + nn.init.normal_(l.weight, std=0.01) + nn.init.constant_(l.bias, 0) + + @classmethod + def from_config(cls, cfg, input_shape): + # Standard RPN is shared across levels: + in_channels = [s.channels for s in input_shape] + assert len(set(in_channels)) == 1, "Each level must have the same channel!" + in_channels = in_channels[0] + + # RPNHead should take the same input as anchor generator + # NOTE: it assumes that creating an anchor generator does not have unwanted side effect. + anchor_generator = build_anchor_generator(cfg, input_shape) + num_anchors = anchor_generator.num_anchors + box_dim = anchor_generator.box_dim + assert ( + len(set(num_anchors)) == 1 + ), "Each level must have the same number of anchors per spatial position" + return {"in_channels": in_channels, "num_anchors": num_anchors[0], "box_dim": box_dim} + + def forward(self, features): + """ + Args: + features (list[Tensor]): list of feature maps + + Returns: + list[Tensor]: A list of L elements. + Element i is a tensor of shape (N, A, Hi, Wi) representing + the predicted objectness logits for all anchors. A is the number of cell anchors. + list[Tensor]: A list of L elements. Element i is a tensor of shape + (N, A*box_dim, Hi, Wi) representing the predicted "deltas" used to transform anchors + to proposals. + """ + pred_objectness_logits = [] + pred_anchor_deltas = [] + for x in features: + t = F.relu(self.conv(x)) + pred_objectness_logits.append(self.objectness_logits(t)) + pred_anchor_deltas.append(self.anchor_deltas(t)) + return pred_objectness_logits, pred_anchor_deltas + + +@PROPOSAL_GENERATOR_REGISTRY.register() +class RPN(nn.Module): + """ + Region Proposal Network, introduced by :paper:`Faster R-CNN`. + """ + + def __init__(self, cfg, input_shape: Dict[str, ShapeSpec]): + super().__init__() + + # fmt: off + self.min_box_side_len = cfg.MODEL.PROPOSAL_GENERATOR.MIN_SIZE + self.in_features = cfg.MODEL.RPN.IN_FEATURES + self.nms_thresh = cfg.MODEL.RPN.NMS_THRESH + self.batch_size_per_image = cfg.MODEL.RPN.BATCH_SIZE_PER_IMAGE + self.positive_fraction = cfg.MODEL.RPN.POSITIVE_FRACTION + self.smooth_l1_beta = cfg.MODEL.RPN.SMOOTH_L1_BETA + self.loss_weight = cfg.MODEL.RPN.LOSS_WEIGHT + # fmt: on + + # Map from self.training state to train/test settings + self.pre_nms_topk = { + True: cfg.MODEL.RPN.PRE_NMS_TOPK_TRAIN, + False: cfg.MODEL.RPN.PRE_NMS_TOPK_TEST, + } + self.post_nms_topk = { + True: cfg.MODEL.RPN.POST_NMS_TOPK_TRAIN, + False: cfg.MODEL.RPN.POST_NMS_TOPK_TEST, + } + self.boundary_threshold = cfg.MODEL.RPN.BOUNDARY_THRESH + + self.anchor_generator = build_anchor_generator( + cfg, [input_shape[f] for f in self.in_features] + ) + self.box2box_transform = Box2BoxTransform(weights=cfg.MODEL.RPN.BBOX_REG_WEIGHTS) + self.anchor_matcher = Matcher( + cfg.MODEL.RPN.IOU_THRESHOLDS, cfg.MODEL.RPN.IOU_LABELS, allow_low_quality_matches=True + ) + self.rpn_head = build_rpn_head(cfg, [input_shape[f] for f in self.in_features]) + + def _subsample_labels(self, label): + """ + Randomly sample a subset of positive and negative examples, and overwrite + the label vector to the ignore value (-1) for all elements that are not + included in the sample. + + Args: + labels (Tensor): a vector of -1, 0, 1. Will be modified in-place and returned. + """ + pos_idx, neg_idx = subsample_labels( + label, self.batch_size_per_image, self.positive_fraction, 0 + ) + # Fill with the ignore label (-1), then set positive and negative labels + label.fill_(-1) + label.scatter_(0, pos_idx, 1) + label.scatter_(0, neg_idx, 0) + return label + + @torch.no_grad() + def label_and_sample_anchors(self, anchors: List[Boxes], gt_instances: List[Instances]): + """ + Args: + anchors (list[Boxes]): anchors for each feature map. + gt_instances: the ground-truth instances for each image. + + Returns: + list[Tensor]: + List of #demo tensors. i-th element is a vector of labels whose length is + the total number of anchors across feature maps. Label values are in {-1, 0, 1}, + with meanings: -1 = ignore; 0 = negative class; 1 = positive class. + list[Tensor]: + i-th element is a Nx4 tensor, where N is the total number of anchors across + feature maps. The values are the matched gt boxes for each anchor. + Values are undefined for those anchors not labeled as 1. + """ + anchors = Boxes.cat(anchors) + + gt_boxes = [x.gt_boxes for x in gt_instances] + image_sizes = [x.image_size for x in gt_instances] + del gt_instances + + gt_labels = [] + matched_gt_boxes = [] + for image_size_i, gt_boxes_i in zip(image_sizes, gt_boxes): + """ + image_size_i: (h, w) for the i-th image + gt_boxes_i: ground-truth boxes for i-th image + """ + + match_quality_matrix = retry_if_cuda_oom(pairwise_iou)(gt_boxes_i, anchors) + matched_idxs, gt_labels_i = retry_if_cuda_oom(self.anchor_matcher)(match_quality_matrix) + # Matching is memory-expensive and may result in CPU tensors. But the result is small + gt_labels_i = gt_labels_i.to(device=gt_boxes_i.device) + del match_quality_matrix + + if self.boundary_threshold >= 0: + # Discard anchors that go out of the boundaries of the image + # NOTE: This is legacy functionality that is turned off by default in Detectron2 + anchors_inside_image = anchors.inside_box(image_size_i, self.boundary_threshold) + gt_labels_i[~anchors_inside_image] = -1 + + # A vector of labels (-1, 0, 1) for each anchor + gt_labels_i = self._subsample_labels(gt_labels_i) + + if len(gt_boxes_i) == 0: + # These values won't be used anyway since the anchor is labeled as background + matched_gt_boxes_i = torch.zeros_like(anchors.tensor) + else: + # TODO wasted indexing computation for ignored boxes + matched_gt_boxes_i = gt_boxes_i[matched_idxs].tensor + + gt_labels.append(gt_labels_i) # N,AHW + matched_gt_boxes.append(matched_gt_boxes_i) + return gt_labels, matched_gt_boxes + + def forward(self, images, features, gt_instances=None): + """ + Args: + images (ImageList): input images of length `N` + features (dict[str: Tensor]): input data as a mapping from feature + map name to tensor. Axis 0 represents the number of images `N` in + the input data; axes 1-3 are channels, height, and width, which may + vary between feature maps (e.g., if a feature pyramid is used). + gt_instances (list[Instances], optional): a length `N` list of `Instances`s. + Each `Instances` stores ground-truth instances for the corresponding image. + + Returns: + proposals: list[Instances]: contains fields "proposal_boxes", "objectness_logits" + loss: dict[Tensor] or None + """ + features = [features[f] for f in self.in_features] + pred_objectness_logits, pred_anchor_deltas = self.rpn_head(features) + anchors = self.anchor_generator(features) + + if self.training: + gt_labels, gt_boxes = self.label_and_sample_anchors(anchors, gt_instances) + else: + gt_labels, gt_boxes = None, None + + outputs = RPNOutputs( + self.box2box_transform, + self.batch_size_per_image, + images, + pred_objectness_logits, + pred_anchor_deltas, + anchors, + gt_labels, + gt_boxes, + self.smooth_l1_beta, + ) + + if self.training: + losses = {k: v * self.loss_weight for k, v in outputs.losses().items()} + else: + losses = {} + + with torch.no_grad(): + # Find the top proposals by applying NMS and removing boxes that + # are too small. The proposals are treated as fixed for approximate + # joint training with roi heads. This approach ignores the derivative + # w.r.t. the proposal boxesโ€™ coordinates that are also network + # responses, so is approximate. + proposals = find_top_rpn_proposals( + outputs.predict_proposals(), + outputs.predict_objectness_logits(), + images, + self.nms_thresh, + self.pre_nms_topk[self.training], + self.post_nms_topk[self.training], + self.min_box_side_len, + self.training, + ) + + return proposals, losses diff --git a/preprocess/mhp_extension/detectron2/detectron2/modeling/proposal_generator/rpn_outputs.py b/preprocess/mhp_extension/detectron2/detectron2/modeling/proposal_generator/rpn_outputs.py new file mode 100644 index 0000000000000000000000000000000000000000..44f846f18b30d846d1d87faf7f2aa3b10c2333b8 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/modeling/proposal_generator/rpn_outputs.py @@ -0,0 +1,323 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import itertools +import logging +import torch +import torch.nn.functional as F +from fvcore.nn import smooth_l1_loss + +from detectron2.layers import batched_nms, cat +from detectron2.structures import Boxes, Instances +from detectron2.utils.events import get_event_storage + +logger = logging.getLogger(__name__) + +# TODO: comments for future refactoring of this module +# +# From @rbg: +# This code involves a significant amount of tensor reshaping and permuting. Look for +# ways to simplify this. + +""" +Shape shorthand in this module: + + N: number of images in the minibatch + L: number of feature maps per image on which RPN is run + A: number of cell anchors (must be the same for all feature maps) + Hi, Wi: height and width of the i-th feature map + 4: size of the box parameterization + +Naming convention: + + objectness: refers to the binary classification of an anchor as object vs. not + object. + + deltas: refers to the 4-d (dx, dy, dw, dh) deltas that parameterize the box2box + transform (see :class:`box_regression.Box2BoxTransform`). + + pred_objectness_logits: predicted objectness scores in [-inf, +inf]; use + sigmoid(pred_objectness_logits) to estimate P(object). + + gt_labels: ground-truth binary classification labels for objectness + + pred_anchor_deltas: predicted box2box transform deltas + + gt_anchor_deltas: ground-truth box2box transform deltas +""" + + +def find_top_rpn_proposals( + proposals, + pred_objectness_logits, + images, + nms_thresh, + pre_nms_topk, + post_nms_topk, + min_box_side_len, + training, +): + """ + For each feature map, select the `pre_nms_topk` highest scoring proposals, + apply NMS, clip proposals, and remove small boxes. Return the `post_nms_topk` + highest scoring proposals among all the feature maps if `training` is True, + otherwise, returns the highest `post_nms_topk` scoring proposals for each + feature map. + + Args: + proposals (list[Tensor]): A list of L tensors. Tensor i has shape (N, Hi*Wi*A, 4). + All proposal predictions on the feature maps. + pred_objectness_logits (list[Tensor]): A list of L tensors. Tensor i has shape (N, Hi*Wi*A). + images (ImageList): Input images as an :class:`ImageList`. + nms_thresh (float): IoU threshold to use for NMS + pre_nms_topk (int): number of top k scoring proposals to keep before applying NMS. + When RPN is run on multiple feature maps (as in FPN) this number is per + feature map. + post_nms_topk (int): number of top k scoring proposals to keep after applying NMS. + When RPN is run on multiple feature maps (as in FPN) this number is total, + over all feature maps. + min_box_side_len (float): minimum proposal box side length in pixels (absolute units + wrt input images). + training (bool): True if proposals are to be used in training, otherwise False. + This arg exists only to support a legacy bug; look for the "NB: Legacy bug ..." + comment. + + Returns: + proposals (list[Instances]): list of N Instances. The i-th Instances + stores post_nms_topk object proposals for image i, sorted by their + objectness score in descending order. + """ + image_sizes = images.image_sizes # in (h, w) order + num_images = len(image_sizes) + device = proposals[0].device + + # 1. Select top-k anchor for every level and every image + topk_scores = [] # #lvl Tensor, each of shape N x topk + topk_proposals = [] + level_ids = [] # #lvl Tensor, each of shape (topk,) + batch_idx = torch.arange(num_images, device=device) + for level_id, proposals_i, logits_i in zip( + itertools.count(), proposals, pred_objectness_logits + ): + Hi_Wi_A = logits_i.shape[1] + num_proposals_i = min(pre_nms_topk, Hi_Wi_A) + + # sort is faster than topk (https://github.com/pytorch/pytorch/issues/22812) + # topk_scores_i, topk_idx = logits_i.topk(num_proposals_i, dim=1) + logits_i, idx = logits_i.sort(descending=True, dim=1) + topk_scores_i = logits_i[batch_idx, :num_proposals_i] + topk_idx = idx[batch_idx, :num_proposals_i] + + # each is N x topk + topk_proposals_i = proposals_i[batch_idx[:, None], topk_idx] # N x topk x 4 + + topk_proposals.append(topk_proposals_i) + topk_scores.append(topk_scores_i) + level_ids.append(torch.full((num_proposals_i,), level_id, dtype=torch.int64, device=device)) + + # 2. Concat all levels together + topk_scores = cat(topk_scores, dim=1) + topk_proposals = cat(topk_proposals, dim=1) + level_ids = cat(level_ids, dim=0) + + # 3. For each image, run a per-level NMS, and choose topk results. + results = [] + for n, image_size in enumerate(image_sizes): + boxes = Boxes(topk_proposals[n]) + scores_per_img = topk_scores[n] + lvl = level_ids + + valid_mask = torch.isfinite(boxes.tensor).all(dim=1) & torch.isfinite(scores_per_img) + if not valid_mask.all(): + if training: + raise FloatingPointError( + "Predicted boxes or scores contain Inf/NaN. Training has diverged." + ) + boxes = boxes[valid_mask] + scores_per_img = scores_per_img[valid_mask] + lvl = lvl[valid_mask] + boxes.clip(image_size) + + # filter empty boxes + keep = boxes.nonempty(threshold=min_box_side_len) + if keep.sum().item() != len(boxes): + boxes, scores_per_img, lvl = boxes[keep], scores_per_img[keep], lvl[keep] + + keep = batched_nms(boxes.tensor, scores_per_img, lvl, nms_thresh) + # In Detectron1, there was different behavior during training vs. testing. + # (https://github.com/facebookresearch/Detectron/issues/459) + # During training, topk is over the proposals from *all* images in the training batch. + # During testing, it is over the proposals for each image separately. + # As a result, the training behavior becomes batch-dependent, + # and the configuration "POST_NMS_TOPK_TRAIN" end up relying on the batch size. + # This bug is addressed in Detectron2 to make the behavior independent of batch size. + keep = keep[:post_nms_topk] # keep is already sorted + + res = Instances(image_size) + res.proposal_boxes = boxes[keep] + res.objectness_logits = scores_per_img[keep] + results.append(res) + return results + + +def rpn_losses( + gt_labels, gt_anchor_deltas, pred_objectness_logits, pred_anchor_deltas, smooth_l1_beta +): + """ + Args: + gt_labels (Tensor): shape (N,), each element in {-1, 0, 1} representing + ground-truth objectness labels with: -1 = ignore; 0 = not object; 1 = object. + gt_anchor_deltas (Tensor): shape (N, box_dim), row i represents ground-truth + box2box transform targets (dx, dy, dw, dh) or (dx, dy, dw, dh, da) that map anchor i to + its matched ground-truth box. + pred_objectness_logits (Tensor): shape (N,), each element is a predicted objectness + logit. + pred_anchor_deltas (Tensor): shape (N, box_dim), each row is a predicted box2box + transform (dx, dy, dw, dh) or (dx, dy, dw, dh, da) + smooth_l1_beta (float): The transition point between L1 and L2 loss in + the smooth L1 loss function. When set to 0, the loss becomes L1. When + set to +inf, the loss becomes constant 0. + + Returns: + objectness_loss, localization_loss, both unnormalized (summed over samples). + """ + pos_masks = gt_labels == 1 + localization_loss = smooth_l1_loss( + pred_anchor_deltas[pos_masks], gt_anchor_deltas[pos_masks], smooth_l1_beta, reduction="sum" + ) + + valid_masks = gt_labels >= 0 + objectness_loss = F.binary_cross_entropy_with_logits( + pred_objectness_logits[valid_masks], + gt_labels[valid_masks].to(torch.float32), + reduction="sum", + ) + return objectness_loss, localization_loss + + +class RPNOutputs(object): + def __init__( + self, + box2box_transform, + batch_size_per_image, + images, + pred_objectness_logits, + pred_anchor_deltas, + anchors, + gt_labels=None, + gt_boxes=None, + smooth_l1_beta=0.0, + ): + """ + Args: + box2box_transform (Box2BoxTransform): :class:`Box2BoxTransform` instance for + anchor-proposal transformations. + images (ImageList): :class:`ImageList` instance representing N input images + batch_size_per_image (int): number of proposals to sample when training + pred_objectness_logits (list[Tensor]): A list of L elements. + Element i is a tensor of shape (N, A, Hi, Wi) representing + the predicted objectness logits for anchors. + pred_anchor_deltas (list[Tensor]): A list of L elements. Element i is a tensor of shape + (N, A*4 or 5, Hi, Wi) representing the predicted "deltas" used to transform anchors + to proposals. + anchors (list[Boxes or RotatedBoxes]): A list of Boxes/RotatedBoxes storing the all + the anchors for each feature map. See :meth:`AnchorGenerator.forward`. + gt_labels (list[Tensor]): Available on in training. + See :meth:`RPN.label_and_sample_anchors`. + gt_boxes (list[Boxes or RotatedBoxes]): Available on in training. + See :meth:`RPN.label_and_sample_anchors`. + smooth_l1_beta (float): The transition point between L1 and L2 loss in + the smooth L1 loss function. When set to 0, the loss becomes L1. When + set to +inf, the loss becomes constant 0. + """ + self.box2box_transform = box2box_transform + self.batch_size_per_image = batch_size_per_image + + B = anchors[0].tensor.size(1) # box dimension (4 or 5) + self.pred_objectness_logits = [ + # Reshape: (N, A, Hi, Wi) -> (N, Hi, Wi, A) -> (N, Hi*Wi*A) + score.permute(0, 2, 3, 1).flatten(1) + for score in pred_objectness_logits + ] + + self.pred_anchor_deltas = [ + # Reshape: (N, A*B, Hi, Wi) -> (N, A, B, Hi, Wi) -> (N, Hi, Wi, A, B) + # -> (N, Hi*Wi*A, B) + x.view(x.shape[0], -1, B, x.shape[-2], x.shape[-1]) + .permute(0, 3, 4, 1, 2) + .flatten(1, -2) + for x in pred_anchor_deltas + ] + + self.anchors = anchors + + self.gt_boxes = gt_boxes + self.gt_labels = gt_labels + + self.num_images = len(images) + self.smooth_l1_beta = smooth_l1_beta + + def losses(self): + """ + Return the losses from a set of RPN predictions and their associated ground-truth. + + Returns: + dict[loss name -> loss value]: A dict mapping from loss name to loss value. + Loss names are: `loss_rpn_cls` for objectness classification and + `loss_rpn_loc` for proposal localization. + """ + gt_labels = torch.stack(self.gt_labels) + anchors = self.anchors[0].cat(self.anchors).tensor # Ax(4 or 5) + gt_anchor_deltas = [self.box2box_transform.get_deltas(anchors, k) for k in self.gt_boxes] + gt_anchor_deltas = torch.stack(gt_anchor_deltas) + + # Log the number of positive/negative anchors per-image that's used in training + num_pos_anchors = (gt_labels == 1).sum().item() + num_neg_anchors = (gt_labels == 0).sum().item() + storage = get_event_storage() + storage.put_scalar("rpn/num_pos_anchors", num_pos_anchors / self.num_images) + storage.put_scalar("rpn/num_neg_anchors", num_neg_anchors / self.num_images) + + objectness_loss, localization_loss = rpn_losses( + gt_labels, + gt_anchor_deltas, + # concat on the Hi*Wi*A dimension + cat(self.pred_objectness_logits, dim=1), + cat(self.pred_anchor_deltas, dim=1), + self.smooth_l1_beta, + ) + normalizer = self.batch_size_per_image * self.num_images + return { + "loss_rpn_cls": objectness_loss / normalizer, + "loss_rpn_loc": localization_loss / normalizer, + } + + def predict_proposals(self): + """ + Transform anchors into proposals by applying the predicted anchor deltas. + + Returns: + proposals (list[Tensor]): A list of L tensors. Tensor i has shape + (N, Hi*Wi*A, B), where B is box dimension (4 or 5). + """ + proposals = [] + # For each feature map + for anchors_i, pred_anchor_deltas_i in zip(self.anchors, self.pred_anchor_deltas): + B = anchors_i.tensor.size(1) + N = self.num_images + pred_anchor_deltas_i = pred_anchor_deltas_i.reshape(-1, B) + # Expand anchors to shape (N*Hi*Wi*A, B) + anchors_i = anchors_i.tensor.unsqueeze(0).expand(N, -1, -1).reshape(-1, B) + proposals_i = self.box2box_transform.apply_deltas(pred_anchor_deltas_i, anchors_i) + # Append feature map proposals with shape (N, Hi*Wi*A, B) + proposals.append(proposals_i.view(N, -1, B)) + return proposals + + def predict_objectness_logits(self): + """ + Return objectness logits in the same format as the proposals returned by + :meth:`predict_proposals`. + + Returns: + pred_objectness_logits (list[Tensor]): A list of L tensors. Tensor i has shape + (N, Hi*Wi*A). + """ + return self.pred_objectness_logits diff --git a/preprocess/mhp_extension/detectron2/detectron2/modeling/proposal_generator/rrpn.py b/preprocess/mhp_extension/detectron2/detectron2/modeling/proposal_generator/rrpn.py new file mode 100644 index 0000000000000000000000000000000000000000..8c2ac366face34a12af63c9f13e6dbb14f59bf04 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/modeling/proposal_generator/rrpn.py @@ -0,0 +1,233 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import itertools +import logging +from typing import Dict, List +import torch + +from detectron2.layers import ShapeSpec, batched_nms_rotated, cat +from detectron2.structures import Instances, RotatedBoxes, pairwise_iou_rotated +from detectron2.utils.memory import retry_if_cuda_oom + +from ..box_regression import Box2BoxTransformRotated +from .build import PROPOSAL_GENERATOR_REGISTRY +from .rpn import RPN +from .rpn_outputs import RPNOutputs + +logger = logging.getLogger(__name__) + + +def find_top_rrpn_proposals( + proposals, + pred_objectness_logits, + images, + nms_thresh, + pre_nms_topk, + post_nms_topk, + min_box_side_len, + training, +): + """ + For each feature map, select the `pre_nms_topk` highest scoring proposals, + apply NMS, clip proposals, and remove small boxes. Return the `post_nms_topk` + highest scoring proposals among all the feature maps if `training` is True, + otherwise, returns the highest `post_nms_topk` scoring proposals for each + feature map. + + Args: + proposals (list[Tensor]): A list of L tensors. Tensor i has shape (N, Hi*Wi*A, 5). + All proposal predictions on the feature maps. + pred_objectness_logits (list[Tensor]): A list of L tensors. Tensor i has shape (N, Hi*Wi*A). + images (ImageList): Input images as an :class:`ImageList`. + nms_thresh (float): IoU threshold to use for NMS + pre_nms_topk (int): number of top k scoring proposals to keep before applying NMS. + When RRPN is run on multiple feature maps (as in FPN) this number is per + feature map. + post_nms_topk (int): number of top k scoring proposals to keep after applying NMS. + When RRPN is run on multiple feature maps (as in FPN) this number is total, + over all feature maps. + min_box_side_len (float): minimum proposal box side length in pixels (absolute units + wrt input images). + training (bool): True if proposals are to be used in training, otherwise False. + This arg exists only to support a legacy bug; look for the "NB: Legacy bug ..." + comment. + + Returns: + proposals (list[Instances]): list of N Instances. The i-th Instances + stores post_nms_topk object proposals for image i. + """ + image_sizes = images.image_sizes # in (h, w) order + num_images = len(image_sizes) + device = proposals[0].device + + # 1. Select top-k anchor for every level and every image + topk_scores = [] # #lvl Tensor, each of shape N x topk + topk_proposals = [] + level_ids = [] # #lvl Tensor, each of shape (topk,) + batch_idx = torch.arange(num_images, device=device) + for level_id, proposals_i, logits_i in zip( + itertools.count(), proposals, pred_objectness_logits + ): + Hi_Wi_A = logits_i.shape[1] + num_proposals_i = min(pre_nms_topk, Hi_Wi_A) + + # sort is faster than topk (https://github.com/pytorch/pytorch/issues/22812) + # topk_scores_i, topk_idx = logits_i.topk(num_proposals_i, dim=1) + logits_i, idx = logits_i.sort(descending=True, dim=1) + topk_scores_i = logits_i[batch_idx, :num_proposals_i] + topk_idx = idx[batch_idx, :num_proposals_i] + + # each is N x topk + topk_proposals_i = proposals_i[batch_idx[:, None], topk_idx] # N x topk x 5 + + topk_proposals.append(topk_proposals_i) + topk_scores.append(topk_scores_i) + level_ids.append(torch.full((num_proposals_i,), level_id, dtype=torch.int64, device=device)) + + # 2. Concat all levels together + topk_scores = cat(topk_scores, dim=1) + topk_proposals = cat(topk_proposals, dim=1) + level_ids = cat(level_ids, dim=0) + + # 3. For each image, run a per-level NMS, and choose topk results. + results = [] + for n, image_size in enumerate(image_sizes): + boxes = RotatedBoxes(topk_proposals[n]) + scores_per_img = topk_scores[n] + valid_mask = torch.isfinite(boxes.tensor).all(dim=1) & torch.isfinite(scores_per_img) + if not valid_mask.all(): + boxes = boxes[valid_mask] + scores_per_img = scores_per_img[valid_mask] + boxes.clip(image_size) + + # filter empty boxes + keep = boxes.nonempty(threshold=min_box_side_len) + lvl = level_ids + if keep.sum().item() != len(boxes): + boxes, scores_per_img, lvl = (boxes[keep], scores_per_img[keep], level_ids[keep]) + + keep = batched_nms_rotated(boxes.tensor, scores_per_img, lvl, nms_thresh) + # In Detectron1, there was different behavior during training vs. testing. + # (https://github.com/facebookresearch/Detectron/issues/459) + # During training, topk is over the proposals from *all* images in the training batch. + # During testing, it is over the proposals for each image separately. + # As a result, the training behavior becomes batch-dependent, + # and the configuration "POST_NMS_TOPK_TRAIN" end up relying on the batch size. + # This bug is addressed in Detectron2 to make the behavior independent of batch size. + keep = keep[:post_nms_topk] + + res = Instances(image_size) + res.proposal_boxes = boxes[keep] + res.objectness_logits = scores_per_img[keep] + results.append(res) + return results + + +@PROPOSAL_GENERATOR_REGISTRY.register() +class RRPN(RPN): + """ + Rotated Region Proposal Network described in :paper:`RRPN`. + """ + + def __init__(self, cfg, input_shape: Dict[str, ShapeSpec]): + super().__init__(cfg, input_shape) + self.box2box_transform = Box2BoxTransformRotated(weights=cfg.MODEL.RPN.BBOX_REG_WEIGHTS) + if self.boundary_threshold >= 0: + raise NotImplementedError( + "boundary_threshold is a legacy option not implemented for RRPN." + ) + + @torch.no_grad() + def label_and_sample_anchors(self, anchors: List[RotatedBoxes], gt_instances: List[Instances]): + """ + Args: + anchors (list[RotatedBoxes]): anchors for each feature map. + gt_instances: the ground-truth instances for each image. + + Returns: + list[Tensor]: + List of #demo tensors. i-th element is a vector of labels whose length is + the total number of anchors across feature maps. Label values are in {-1, 0, 1}, + with meanings: -1 = ignore; 0 = negative class; 1 = positive class. + list[Tensor]: + i-th element is a Nx5 tensor, where N is the total number of anchors across + feature maps. The values are the matched gt boxes for each anchor. + Values are undefined for those anchors not labeled as 1. + """ + anchors = RotatedBoxes.cat(anchors) + + gt_boxes = [x.gt_boxes for x in gt_instances] + del gt_instances + + gt_labels = [] + matched_gt_boxes = [] + for gt_boxes_i in gt_boxes: + """ + gt_boxes_i: ground-truth boxes for i-th image + """ + match_quality_matrix = retry_if_cuda_oom(pairwise_iou_rotated)(gt_boxes_i, anchors) + matched_idxs, gt_labels_i = retry_if_cuda_oom(self.anchor_matcher)(match_quality_matrix) + # Matching is memory-expensive and may result in CPU tensors. But the result is small + gt_labels_i = gt_labels_i.to(device=gt_boxes_i.device) + + # A vector of labels (-1, 0, 1) for each anchor + gt_labels_i = self._subsample_labels(gt_labels_i) + + if len(gt_boxes_i) == 0: + # These values won't be used anyway since the anchor is labeled as background + matched_gt_boxes_i = torch.zeros_like(anchors.tensor) + else: + # TODO wasted indexing computation for ignored boxes + matched_gt_boxes_i = gt_boxes_i[matched_idxs].tensor + + gt_labels.append(gt_labels_i) # N,AHW + matched_gt_boxes.append(matched_gt_boxes_i) + return gt_labels, matched_gt_boxes + + def forward(self, images, features, gt_instances=None): + # same signature as RPN.forward + features = [features[f] for f in self.in_features] + pred_objectness_logits, pred_anchor_deltas = self.rpn_head(features) + anchors = self.anchor_generator(features) + + if self.training: + gt_labels, gt_boxes = self.label_and_sample_anchors(anchors, gt_instances) + else: + gt_labels, gt_boxes = None, None + + outputs = RPNOutputs( + self.box2box_transform, + self.batch_size_per_image, + images, + pred_objectness_logits, + pred_anchor_deltas, + anchors, + gt_labels, + gt_boxes, + self.smooth_l1_beta, + ) + + if self.training: + losses = {k: v * self.loss_weight for k, v in outputs.losses().items()} + else: + losses = {} + + with torch.no_grad(): + # Find the top proposals by applying NMS and removing boxes that + # are too small. The proposals are treated as fixed for approximate + # joint training with roi heads. This approach ignores the derivative + # w.r.t. the proposal boxesโ€™ coordinates that are also network + # responses, so is approximate. + + # Note: this line is the only difference v.s. RPN.forward + proposals = find_top_rrpn_proposals( + outputs.predict_proposals(), + outputs.predict_objectness_logits(), + images, + self.nms_thresh, + self.pre_nms_topk[self.training], + self.post_nms_topk[self.training], + self.min_box_side_len, + self.training, + ) + + return proposals, losses diff --git a/preprocess/mhp_extension/detectron2/detectron2/modeling/roi_heads/__init__.py b/preprocess/mhp_extension/detectron2/detectron2/modeling/roi_heads/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a49099aa5cfa58b55c66fe8fa85092eb26d15535 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/modeling/roi_heads/__init__.py @@ -0,0 +1,16 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +from .box_head import ROI_BOX_HEAD_REGISTRY, build_box_head +from .keypoint_head import ROI_KEYPOINT_HEAD_REGISTRY, build_keypoint_head, BaseKeypointRCNNHead +from .mask_head import ROI_MASK_HEAD_REGISTRY, build_mask_head, BaseMaskRCNNHead +from .roi_heads import ( + ROI_HEADS_REGISTRY, + ROIHeads, + Res5ROIHeads, + StandardROIHeads, + build_roi_heads, + select_foreground_proposals, +) +from .rotated_fast_rcnn import RROIHeads +from .fast_rcnn import FastRCNNOutputLayers + +from . import cascade_rcnn # isort:skip diff --git a/preprocess/mhp_extension/detectron2/detectron2/modeling/roi_heads/box_head.py b/preprocess/mhp_extension/detectron2/detectron2/modeling/roi_heads/box_head.py new file mode 100644 index 0000000000000000000000000000000000000000..de62d47acfd0ac634daf7db228b43f035cc721f3 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/modeling/roi_heads/box_head.py @@ -0,0 +1,115 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import numpy as np +from typing import List +import fvcore.nn.weight_init as weight_init +import torch +from torch import nn +from torch.nn import functional as F + +from detectron2.config import configurable +from detectron2.layers import Conv2d, Linear, ShapeSpec, get_norm +from detectron2.utils.registry import Registry + +ROI_BOX_HEAD_REGISTRY = Registry("ROI_BOX_HEAD") +ROI_BOX_HEAD_REGISTRY.__doc__ = """ +Registry for box heads, which make box predictions from per-region features. + +The registered object will be called with `obj(cfg, input_shape)`. +""" + + +@ROI_BOX_HEAD_REGISTRY.register() +class FastRCNNConvFCHead(nn.Module): + """ + A head with several 3x3 conv layers (each followed by norm & relu) and then + several fc layers (each followed by relu). + """ + + @configurable + def __init__( + self, input_shape: ShapeSpec, *, conv_dims: List[int], fc_dims: List[int], conv_norm="" + ): + """ + NOTE: this interface is experimental. + + Args: + input_shape (ShapeSpec): shape of the input feature. + conv_dims (list[int]): the output dimensions of the conv layers + fc_dims (list[int]): the output dimensions of the fc layers + conv_norm (str or callable): normalization for the conv layers. + See :func:`detectron2.layers.get_norm` for supported types. + """ + super().__init__() + assert len(conv_dims) + len(fc_dims) > 0 + + self._output_size = (input_shape.channels, input_shape.height, input_shape.width) + + self.conv_norm_relus = [] + for k, conv_dim in enumerate(conv_dims): + conv = Conv2d( + self._output_size[0], + conv_dim, + kernel_size=3, + padding=1, + bias=not conv_norm, + norm=get_norm(conv_norm, conv_dim), + activation=F.relu, + ) + self.add_module("conv{}".format(k + 1), conv) + self.conv_norm_relus.append(conv) + self._output_size = (conv_dim, self._output_size[1], self._output_size[2]) + + self.fcs = [] + for k, fc_dim in enumerate(fc_dims): + fc = Linear(np.prod(self._output_size), fc_dim) + self.add_module("fc{}".format(k + 1), fc) + self.fcs.append(fc) + self._output_size = fc_dim + + for layer in self.conv_norm_relus: + weight_init.c2_msra_fill(layer) + for layer in self.fcs: + weight_init.c2_xavier_fill(layer) + + @classmethod + def from_config(cls, cfg, input_shape): + num_conv = cfg.MODEL.ROI_BOX_HEAD.NUM_CONV + conv_dim = cfg.MODEL.ROI_BOX_HEAD.CONV_DIM + num_fc = cfg.MODEL.ROI_BOX_HEAD.NUM_FC + fc_dim = cfg.MODEL.ROI_BOX_HEAD.FC_DIM + return { + "input_shape": input_shape, + "conv_dims": [conv_dim] * num_conv, + "fc_dims": [fc_dim] * num_fc, + "conv_norm": cfg.MODEL.ROI_BOX_HEAD.NORM, + } + + def forward(self, x): + for layer in self.conv_norm_relus: + x = layer(x) + if len(self.fcs): + if x.dim() > 2: + x = torch.flatten(x, start_dim=1) + for layer in self.fcs: + x = F.relu(layer(x)) + return x + + @property + def output_shape(self): + """ + Returns: + ShapeSpec: the output feature shape + """ + o = self._output_size + if isinstance(o, int): + return ShapeSpec(channels=o) + else: + return ShapeSpec(channels=o[0], height=o[1], width=o[2]) + + +def build_box_head(cfg, input_shape): + """ + Build a box head defined by `cfg.MODEL.ROI_BOX_HEAD.NAME`. + """ + name = cfg.MODEL.ROI_BOX_HEAD.NAME + return ROI_BOX_HEAD_REGISTRY.get(name)(cfg, input_shape) diff --git a/preprocess/mhp_extension/detectron2/detectron2/modeling/roi_heads/cascade_rcnn.py b/preprocess/mhp_extension/detectron2/detectron2/modeling/roi_heads/cascade_rcnn.py new file mode 100644 index 0000000000000000000000000000000000000000..b3efdcf70c3b71b935676e103be288484c66f4e2 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/modeling/roi_heads/cascade_rcnn.py @@ -0,0 +1,298 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +from typing import List +import torch +from torch import nn +from torch.autograd.function import Function + +from detectron2.config import configurable +from detectron2.layers import ShapeSpec +from detectron2.structures import Boxes, Instances, pairwise_iou +from detectron2.utils.events import get_event_storage + +from ..box_regression import Box2BoxTransform +from ..matcher import Matcher +from ..poolers import ROIPooler +from .box_head import build_box_head +from .fast_rcnn import FastRCNNOutputLayers, fast_rcnn_inference +from .roi_heads import ROI_HEADS_REGISTRY, StandardROIHeads + + +class _ScaleGradient(Function): + @staticmethod + def forward(ctx, input, scale): + ctx.scale = scale + return input + + @staticmethod + def backward(ctx, grad_output): + return grad_output * ctx.scale, None + + +@ROI_HEADS_REGISTRY.register() +class CascadeROIHeads(StandardROIHeads): + """ + Implement :paper:`Cascade R-CNN`. + """ + + @configurable + def __init__( + self, + *, + box_in_features: List[str], + box_pooler: ROIPooler, + box_heads: List[nn.Module], + box_predictors: List[nn.Module], + proposal_matchers: List[Matcher], + **kwargs, + ): + """ + NOTE: this interface is experimental. + + Args: + box_pooler (ROIPooler): pooler that extracts region features from given boxes + box_heads (list[nn.Module]): box head for each cascade stage + box_predictors (list[nn.Module]): box predictor for each cascade stage + proposal_matchers (list[Matcher]): matcher with different IoU thresholds to + match boxes with ground truth for each stage. The first matcher matches + RPN proposals with ground truth, the other matchers use boxes predicted + by the previous stage as proposals and match them with ground truth. + """ + assert "proposal_matcher" not in kwargs, ( + "CascadeROIHeads takes 'proposal_matchers=' for each stage instead " + "of one 'proposal_matcher='." + ) + # The first matcher matches RPN proposals with ground truth, done in the base class + kwargs["proposal_matcher"] = proposal_matchers[0] + num_stages = self.num_cascade_stages = len(box_heads) + box_heads = nn.ModuleList(box_heads) + box_predictors = nn.ModuleList(box_predictors) + assert len(box_predictors) == num_stages, f"{len(box_predictors)} != {num_stages}!" + assert len(proposal_matchers) == num_stages, f"{len(proposal_matchers)} != {num_stages}!" + super().__init__( + box_in_features=box_in_features, + box_pooler=box_pooler, + box_head=box_heads, + box_predictor=box_predictors, + **kwargs, + ) + self.proposal_matchers = proposal_matchers + + @classmethod + def from_config(cls, cfg, input_shape): + ret = super().from_config(cfg, input_shape) + ret.pop("proposal_matcher") + return ret + + @classmethod + def _init_box_head(cls, cfg, input_shape): + # fmt: off + in_features = cfg.MODEL.ROI_HEADS.IN_FEATURES + pooler_resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION + pooler_scales = tuple(1.0 / input_shape[k].stride for k in in_features) + sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO + pooler_type = cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE + cascade_bbox_reg_weights = cfg.MODEL.ROI_BOX_CASCADE_HEAD.BBOX_REG_WEIGHTS + cascade_ious = cfg.MODEL.ROI_BOX_CASCADE_HEAD.IOUS + assert len(cascade_bbox_reg_weights) == len(cascade_ious) + assert cfg.MODEL.ROI_BOX_HEAD.CLS_AGNOSTIC_BBOX_REG, \ + "CascadeROIHeads only support class-agnostic regression now!" + assert cascade_ious[0] == cfg.MODEL.ROI_HEADS.IOU_THRESHOLDS[0] + # fmt: on + + in_channels = [input_shape[f].channels for f in in_features] + # Check all channel counts are equal + assert len(set(in_channels)) == 1, in_channels + in_channels = in_channels[0] + + box_pooler = ROIPooler( + output_size=pooler_resolution, + scales=pooler_scales, + sampling_ratio=sampling_ratio, + pooler_type=pooler_type, + ) + pooled_shape = ShapeSpec( + channels=in_channels, width=pooler_resolution, height=pooler_resolution + ) + + box_heads, box_predictors, proposal_matchers = [], [], [] + for match_iou, bbox_reg_weights in zip(cascade_ious, cascade_bbox_reg_weights): + box_head = build_box_head(cfg, pooled_shape) + box_heads.append(box_head) + box_predictors.append( + FastRCNNOutputLayers( + cfg, + box_head.output_shape, + box2box_transform=Box2BoxTransform(weights=bbox_reg_weights), + ) + ) + proposal_matchers.append(Matcher([match_iou], [0, 1], allow_low_quality_matches=False)) + return { + "box_in_features": in_features, + "box_pooler": box_pooler, + "box_heads": box_heads, + "box_predictors": box_predictors, + "proposal_matchers": proposal_matchers, + } + + def forward(self, images, features, proposals, targets=None): + del images + if self.training: + proposals = self.label_and_sample_proposals(proposals, targets) + + if self.training: + # Need targets to box head + losses = self._forward_box(features, proposals, targets) + losses.update(self._forward_mask(features, proposals)) + losses.update(self._forward_keypoint(features, proposals)) + return proposals, losses + else: + pred_instances = self._forward_box(features, proposals) + pred_instances = self.forward_with_given_boxes(features, pred_instances) + return pred_instances, {} + + def _forward_box(self, features, proposals, targets=None): + """ + Args: + features, targets: the same as in + Same as in :meth:`ROIHeads.forward`. + proposals (list[Instances]): the per-image object proposals with + their matching ground truth. + Each has fields "proposal_boxes", and "objectness_logits", + "gt_classes", "gt_boxes". + """ + features = [features[f] for f in self.box_in_features] + head_outputs = [] # (predictor, predictions, proposals) + prev_pred_boxes = None + image_sizes = [x.image_size for x in proposals] + for k in range(self.num_cascade_stages): + if k > 0: + # The output boxes of the previous stage are used to create the input + # proposals of the next stage. + proposals = self._create_proposals_from_boxes(prev_pred_boxes, image_sizes) + if self.training: + proposals = self._match_and_label_boxes(proposals, k, targets) + predictions = self._run_stage(features, proposals, k) + prev_pred_boxes = self.box_predictor[k].predict_boxes(predictions, proposals) + head_outputs.append((self.box_predictor[k], predictions, proposals)) + + if self.training: + losses = {} + storage = get_event_storage() + for stage, (predictor, predictions, proposals) in enumerate(head_outputs): + with storage.name_scope("stage{}".format(stage)): + stage_losses = predictor.losses(predictions, proposals) + losses.update({k + "_stage{}".format(stage): v for k, v in stage_losses.items()}) + return losses + else: + # Each is a list[Tensor] of length #image. Each tensor is Ri x (K+1) + scores_per_stage = [h[0].predict_probs(h[1], h[2]) for h in head_outputs] + + # Average the scores across heads + scores = [ + sum(list(scores_per_image)) * (1.0 / self.num_cascade_stages) + for scores_per_image in zip(*scores_per_stage) + ] + # Use the boxes of the last head + predictor, predictions, proposals = head_outputs[-1] + boxes = predictor.predict_boxes(predictions, proposals) + pred_instances, _ = fast_rcnn_inference( + boxes, + scores, + image_sizes, + predictor.test_score_thresh, + predictor.test_nms_thresh, + predictor.test_topk_per_image, + ) + return pred_instances + + @torch.no_grad() + def _match_and_label_boxes(self, proposals, stage, targets): + """ + Match proposals with groundtruth using the matcher at the given stage. + Label the proposals as foreground or background based on the match. + + Args: + proposals (list[Instances]): One Instances for each image, with + the field "proposal_boxes". + stage (int): the current stage + targets (list[Instances]): the ground truth instances + + Returns: + list[Instances]: the same proposals, but with fields "gt_classes" and "gt_boxes" + """ + num_fg_samples, num_bg_samples = [], [] + for proposals_per_image, targets_per_image in zip(proposals, targets): + match_quality_matrix = pairwise_iou( + targets_per_image.gt_boxes, proposals_per_image.proposal_boxes + ) + # proposal_labels are 0 or 1 + matched_idxs, proposal_labels = self.proposal_matchers[stage](match_quality_matrix) + if len(targets_per_image) > 0: + gt_classes = targets_per_image.gt_classes[matched_idxs] + # Label unmatched proposals (0 label from matcher) as background (label=num_classes) + gt_classes[proposal_labels == 0] = self.num_classes + gt_boxes = targets_per_image.gt_boxes[matched_idxs] + else: + gt_classes = torch.zeros_like(matched_idxs) + self.num_classes + gt_boxes = Boxes( + targets_per_image.gt_boxes.tensor.new_zeros((len(proposals_per_image), 4)) + ) + proposals_per_image.gt_classes = gt_classes + proposals_per_image.gt_boxes = gt_boxes + + num_fg_samples.append((proposal_labels == 1).sum().item()) + num_bg_samples.append(proposal_labels.numel() - num_fg_samples[-1]) + + # Log the number of fg/bg samples in each stage + storage = get_event_storage() + storage.put_scalar( + "stage{}/roi_head/num_fg_samples".format(stage), + sum(num_fg_samples) / len(num_fg_samples), + ) + storage.put_scalar( + "stage{}/roi_head/num_bg_samples".format(stage), + sum(num_bg_samples) / len(num_bg_samples), + ) + return proposals + + def _run_stage(self, features, proposals, stage): + """ + Args: + features (list[Tensor]): #lvl input features to ROIHeads + proposals (list[Instances]): #image Instances, with the field "proposal_boxes" + stage (int): the current stage + + Returns: + Same output as `FastRCNNOutputLayers.forward()`. + """ + box_features = self.box_pooler(features, [x.proposal_boxes for x in proposals]) + # The original implementation averages the losses among heads, + # but scale up the parameter gradients of the heads. + # This is equivalent to adding the losses among heads, + # but scale down the gradients on features. + box_features = _ScaleGradient.apply(box_features, 1.0 / self.num_cascade_stages) + box_features = self.box_head[stage](box_features) + return self.box_predictor[stage](box_features) + + def _create_proposals_from_boxes(self, boxes, image_sizes): + """ + Args: + boxes (list[Tensor]): per-image predicted boxes, each of shape Ri x 4 + image_sizes (list[tuple]): list of image shapes in (h, w) + + Returns: + list[Instances]: per-image proposals with the given boxes. + """ + # Just like RPN, the proposals should not have gradients + boxes = [Boxes(b.detach()) for b in boxes] + proposals = [] + for boxes_per_image, image_size in zip(boxes, image_sizes): + boxes_per_image.clip(image_size) + if self.training: + # do not filter empty boxes at inference time, + # because the scores from each stage need to be aligned and added later + boxes_per_image = boxes_per_image[boxes_per_image.nonempty()] + prop = Instances(image_size) + prop.proposal_boxes = boxes_per_image + proposals.append(prop) + return proposals diff --git a/preprocess/mhp_extension/detectron2/detectron2/modeling/roi_heads/fast_rcnn.py b/preprocess/mhp_extension/detectron2/detectron2/modeling/roi_heads/fast_rcnn.py new file mode 100644 index 0000000000000000000000000000000000000000..ca796ace55509efb8a898f580203076bada387f2 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/modeling/roi_heads/fast_rcnn.py @@ -0,0 +1,510 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import logging +import torch +from fvcore.nn import smooth_l1_loss +from torch import nn +from torch.nn import functional as F + +from detectron2.config import configurable +from detectron2.layers import Linear, ShapeSpec, batched_nms, cat +from detectron2.modeling.box_regression import Box2BoxTransform, apply_deltas_broadcast +from detectron2.structures import Boxes, Instances +from detectron2.utils.events import get_event_storage + +__all__ = ["fast_rcnn_inference", "FastRCNNOutputLayers"] + + +logger = logging.getLogger(__name__) + +""" +Shape shorthand in this module: + + N: number of images in the minibatch + R: number of ROIs, combined over all images, in the minibatch + Ri: number of ROIs in image i + K: number of foreground classes. E.g.,there are 80 foreground classes in COCO. + +Naming convention: + + deltas: refers to the 4-d (dx, dy, dw, dh) deltas that parameterize the box2box + transform (see :class:`box_regression.Box2BoxTransform`). + + pred_class_logits: predicted class scores in [-inf, +inf]; use + softmax(pred_class_logits) to estimate P(class). + + gt_classes: ground-truth classification labels in [0, K], where [0, K) represent + foreground object classes and K represents the background class. + + pred_proposal_deltas: predicted box2box transform deltas for transforming proposals + to detection box predictions. + + gt_proposal_deltas: ground-truth box2box transform deltas +""" + + +def fast_rcnn_inference(boxes, scores, image_shapes, score_thresh, nms_thresh, topk_per_image): + """ + Call `fast_rcnn_inference_single_image` for all images. + + Args: + boxes (list[Tensor]): A list of Tensors of predicted class-specific or class-agnostic + boxes for each image. Element i has shape (Ri, K * 4) if doing + class-specific regression, or (Ri, 4) if doing class-agnostic + regression, where Ri is the number of predicted objects for image i. + This is compatible with the output of :meth:`FastRCNNOutputLayers.predict_boxes`. + scores (list[Tensor]): A list of Tensors of predicted class scores for each image. + Element i has shape (Ri, K + 1), where Ri is the number of predicted objects + for image i. Compatible with the output of :meth:`FastRCNNOutputLayers.predict_probs`. + image_shapes (list[tuple]): A list of (width, height) tuples for each image in the batch. + score_thresh (float): Only return detections with a confidence score exceeding this + threshold. + nms_thresh (float): The threshold to use for box non-maximum suppression. Value in [0, 1]. + topk_per_image (int): The number of top scoring detections to return. Set < 0 to return + all detections. + + Returns: + instances: (list[Instances]): A list of N instances, one for each image in the batch, + that stores the topk most confidence detections. + kept_indices: (list[Tensor]): A list of 1D tensor of length of N, each element indicates + the corresponding boxes/scores index in [0, Ri) from the input, for image i. + """ + result_per_image = [ + fast_rcnn_inference_single_image( + boxes_per_image, scores_per_image, image_shape, score_thresh, nms_thresh, topk_per_image + ) + for scores_per_image, boxes_per_image, image_shape in zip(scores, boxes, image_shapes) + ] + return [x[0] for x in result_per_image], [x[1] for x in result_per_image] + + +def fast_rcnn_inference_single_image( + boxes, scores, image_shape, score_thresh, nms_thresh, topk_per_image +): + """ + Single-image inference. Return bounding-box detection results by thresholding + on scores and applying non-maximum suppression (NMS). + + Args: + Same as `fast_rcnn_inference`, but with boxes, scores, and image shapes + per image. + + Returns: + Same as `fast_rcnn_inference`, but for only one image. + """ + valid_mask = torch.isfinite(boxes).all(dim=1) & torch.isfinite(scores).all(dim=1) + if not valid_mask.all(): + boxes = boxes[valid_mask] + scores = scores[valid_mask] + + scores = scores[:, :-1] + num_bbox_reg_classes = boxes.shape[1] // 4 + # Convert to Boxes to use the `clip` function ... + boxes = Boxes(boxes.reshape(-1, 4)) + boxes.clip(image_shape) + boxes = boxes.tensor.view(-1, num_bbox_reg_classes, 4) # R x C x 4 + + # Filter results based on detection scores + filter_mask = scores > score_thresh # R x K + # R' x 2. First column contains indices of the R predictions; + # Second column contains indices of classes. + filter_inds = filter_mask.nonzero() + if num_bbox_reg_classes == 1: + boxes = boxes[filter_inds[:, 0], 0] + else: + boxes = boxes[filter_mask] + scores = scores[filter_mask] + + # Apply per-class NMS + keep = batched_nms(boxes, scores, filter_inds[:, 1], nms_thresh) + if topk_per_image >= 0: + keep = keep[:topk_per_image] + boxes, scores, filter_inds = boxes[keep], scores[keep], filter_inds[keep] + + result = Instances(image_shape) + result.pred_boxes = Boxes(boxes) + result.scores = scores + result.pred_classes = filter_inds[:, 1] + return result, filter_inds[:, 0] + + +class FastRCNNOutputs(object): + """ + A class that stores information about outputs of a Fast R-CNN head. + It provides methods that are used to decode the outputs of a Fast R-CNN head. + """ + + def __init__( + self, + box2box_transform, + pred_class_logits, + pred_proposal_deltas, + proposals, + smooth_l1_beta=0, + ): + """ + Args: + box2box_transform (Box2BoxTransform/Box2BoxTransformRotated): + box2box transform instance for proposal-to-detection transformations. + pred_class_logits (Tensor): A tensor of shape (R, K + 1) storing the predicted class + logits for all R predicted object instances. + Each row corresponds to a predicted object instance. + pred_proposal_deltas (Tensor): A tensor of shape (R, K * B) or (R, B) for + class-specific or class-agnostic regression. It stores the predicted deltas that + transform proposals into final box detections. + B is the box dimension (4 or 5). + When B is 4, each row is [dx, dy, dw, dh (, ....)]. + When B is 5, each row is [dx, dy, dw, dh, da (, ....)]. + proposals (list[Instances]): A list of N Instances, where Instances i stores the + proposals for image i, in the field "proposal_boxes". + When training, each Instances must have ground-truth labels + stored in the field "gt_classes" and "gt_boxes". + The total number of all instances must be equal to R. + smooth_l1_beta (float): The transition point between L1 and L2 loss in + the smooth L1 loss function. When set to 0, the loss becomes L1. When + set to +inf, the loss becomes constant 0. + """ + self.box2box_transform = box2box_transform + self.num_preds_per_image = [len(p) for p in proposals] + self.pred_class_logits = pred_class_logits + self.pred_proposal_deltas = pred_proposal_deltas + self.smooth_l1_beta = smooth_l1_beta + self.image_shapes = [x.image_size for x in proposals] + + if len(proposals): + box_type = type(proposals[0].proposal_boxes) + # cat(..., dim=0) concatenates over all images in the batch + self.proposals = box_type.cat([p.proposal_boxes for p in proposals]) + assert ( + not self.proposals.tensor.requires_grad + ), "Proposals should not require gradients!" + + # The following fields should exist only when training. + if proposals[0].has("gt_boxes"): + self.gt_boxes = box_type.cat([p.gt_boxes for p in proposals]) + assert proposals[0].has("gt_classes") + self.gt_classes = cat([p.gt_classes for p in proposals], dim=0) + else: + self.proposals = Boxes(torch.zeros(0, 4, device=self.pred_proposal_deltas.device)) + self._no_instances = len(proposals) == 0 # no instances found + + def _log_accuracy(self): + """ + Log the accuracy metrics to EventStorage. + """ + num_instances = self.gt_classes.numel() + pred_classes = self.pred_class_logits.argmax(dim=1) + bg_class_ind = self.pred_class_logits.shape[1] - 1 + + fg_inds = (self.gt_classes >= 0) & (self.gt_classes < bg_class_ind) + num_fg = fg_inds.nonzero().numel() + fg_gt_classes = self.gt_classes[fg_inds] + fg_pred_classes = pred_classes[fg_inds] + + num_false_negative = (fg_pred_classes == bg_class_ind).nonzero().numel() + num_accurate = (pred_classes == self.gt_classes).nonzero().numel() + fg_num_accurate = (fg_pred_classes == fg_gt_classes).nonzero().numel() + + storage = get_event_storage() + if num_instances > 0: + storage.put_scalar("fast_rcnn/cls_accuracy", num_accurate / num_instances) + if num_fg > 0: + storage.put_scalar("fast_rcnn/fg_cls_accuracy", fg_num_accurate / num_fg) + storage.put_scalar("fast_rcnn/false_negative", num_false_negative / num_fg) + + def softmax_cross_entropy_loss(self): + """ + Compute the softmax cross entropy loss for box classification. + + Returns: + scalar Tensor + """ + if self._no_instances: + return 0.0 * self.pred_class_logits.sum() + else: + self._log_accuracy() + return F.cross_entropy(self.pred_class_logits, self.gt_classes, reduction="mean") + + def smooth_l1_loss(self): + """ + Compute the smooth L1 loss for box regression. + + Returns: + scalar Tensor + """ + if self._no_instances: + return 0.0 * self.pred_proposal_deltas.sum() + gt_proposal_deltas = self.box2box_transform.get_deltas( + self.proposals.tensor, self.gt_boxes.tensor + ) + box_dim = gt_proposal_deltas.size(1) # 4 or 5 + cls_agnostic_bbox_reg = self.pred_proposal_deltas.size(1) == box_dim + device = self.pred_proposal_deltas.device + + bg_class_ind = self.pred_class_logits.shape[1] - 1 + + # Box delta loss is only computed between the prediction for the gt class k + # (if 0 <= k < bg_class_ind) and the target; there is no loss defined on predictions + # for non-gt classes and background. + # Empty fg_inds produces a valid loss of zero as long as the size_average + # arg to smooth_l1_loss is False (otherwise it uses torch.mean internally + # and would produce a nan loss). + fg_inds = torch.nonzero( + (self.gt_classes >= 0) & (self.gt_classes < bg_class_ind), as_tuple=True + )[0] + if cls_agnostic_bbox_reg: + # pred_proposal_deltas only corresponds to foreground class for agnostic + gt_class_cols = torch.arange(box_dim, device=device) + else: + fg_gt_classes = self.gt_classes[fg_inds] + # pred_proposal_deltas for class k are located in columns [b * k : b * k + b], + # where b is the dimension of box representation (4 or 5) + # Note that compared to Detectron1, + # we do not perform bounding box regression for background classes. + gt_class_cols = box_dim * fg_gt_classes[:, None] + torch.arange(box_dim, device=device) + + loss_box_reg = smooth_l1_loss( + self.pred_proposal_deltas[fg_inds[:, None], gt_class_cols], + gt_proposal_deltas[fg_inds], + self.smooth_l1_beta, + reduction="sum", + ) + # The loss is normalized using the total number of regions (R), not the number + # of foreground regions even though the box regression loss is only defined on + # foreground regions. Why? Because doing so gives equal training influence to + # each foreground example. To see how, consider two different minibatches: + # (1) Contains a single foreground region + # (2) Contains 100 foreground regions + # If we normalize by the number of foreground regions, the single example in + # minibatch (1) will be given 100 times as much influence as each foreground + # example in minibatch (2). Normalizing by the total number of regions, R, + # means that the single example in minibatch (1) and each of the 100 examples + # in minibatch (2) are given equal influence. + loss_box_reg = loss_box_reg / self.gt_classes.numel() + return loss_box_reg + + def _predict_boxes(self): + """ + Returns: + Tensor: A Tensors of predicted class-specific or class-agnostic boxes + for all images in a batch. Element i has shape (Ri, K * B) or (Ri, B), where Ri is + the number of predicted objects for image i and B is the box dimension (4 or 5) + """ + return apply_deltas_broadcast( + self.box2box_transform, self.pred_proposal_deltas, self.proposals.tensor + ) + + """ + A subclass is expected to have the following methods because + they are used to query information about the head predictions. + """ + + def losses(self): + """ + Compute the default losses for box head in Fast(er) R-CNN, + with softmax cross entropy loss and smooth L1 loss. + + Returns: + A dict of losses (scalar tensors) containing keys "loss_cls" and "loss_box_reg". + """ + return { + "loss_cls": self.softmax_cross_entropy_loss(), + "loss_box_reg": self.smooth_l1_loss(), + } + + def predict_boxes(self): + """ + Deprecated + """ + return self._predict_boxes().split(self.num_preds_per_image, dim=0) + + def predict_probs(self): + """ + Deprecated + """ + probs = F.softmax(self.pred_class_logits, dim=-1) + return probs.split(self.num_preds_per_image, dim=0) + + def inference(self, score_thresh, nms_thresh, topk_per_image): + """ + Deprecated + """ + boxes = self.predict_boxes() + scores = self.predict_probs() + image_shapes = self.image_shapes + return fast_rcnn_inference( + boxes, scores, image_shapes, score_thresh, nms_thresh, topk_per_image + ) + + +class FastRCNNOutputLayers(nn.Module): + """ + Two linear layers for predicting Fast R-CNN outputs: + (1) proposal-to-detection box regression deltas + (2) classification scores + """ + + @configurable + def __init__( + self, + input_shape, + *, + box2box_transform, + num_classes, + cls_agnostic_bbox_reg=False, + smooth_l1_beta=0.0, + test_score_thresh=0.0, + test_nms_thresh=0.5, + test_topk_per_image=100, + ): + """ + NOTE: this interface is experimental. + + Args: + input_shape (ShapeSpec): shape of the input feature to this module + box2box_transform (Box2BoxTransform or Box2BoxTransformRotated): + num_classes (int): number of foreground classes + cls_agnostic_bbox_reg (bool): whether to use class agnostic for bbox regression + smooth_l1_beta (float): transition point from L1 to L2 loss. + test_score_thresh (float): threshold to filter predictions results. + test_nms_thresh (float): NMS threshold for prediction results. + test_topk_per_image (int): number of top predictions to produce per image. + """ + super().__init__() + if isinstance(input_shape, int): # some backward compatibility + input_shape = ShapeSpec(channels=input_shape) + input_size = input_shape.channels * (input_shape.width or 1) * (input_shape.height or 1) + # The prediction layer for num_classes foreground classes and one background class + # (hence + 1) + self.cls_score = Linear(input_size, num_classes + 1) + num_bbox_reg_classes = 1 if cls_agnostic_bbox_reg else num_classes + box_dim = len(box2box_transform.weights) + self.bbox_pred = Linear(input_size, num_bbox_reg_classes * box_dim) + + nn.init.normal_(self.cls_score.weight, std=0.01) + nn.init.normal_(self.bbox_pred.weight, std=0.001) + for l in [self.cls_score, self.bbox_pred]: + nn.init.constant_(l.bias, 0) + + self.box2box_transform = box2box_transform + self.smooth_l1_beta = smooth_l1_beta + self.test_score_thresh = test_score_thresh + self.test_nms_thresh = test_nms_thresh + self.test_topk_per_image = test_topk_per_image + + @classmethod + def from_config(cls, cfg, input_shape): + return { + "input_shape": input_shape, + "box2box_transform": Box2BoxTransform(weights=cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS), + # fmt: off + "num_classes" : cfg.MODEL.ROI_HEADS.NUM_CLASSES, + "cls_agnostic_bbox_reg" : cfg.MODEL.ROI_BOX_HEAD.CLS_AGNOSTIC_BBOX_REG, + "smooth_l1_beta" : cfg.MODEL.ROI_BOX_HEAD.SMOOTH_L1_BETA, + "test_score_thresh" : cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST, + "test_nms_thresh" : cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST, + "test_topk_per_image" : cfg.TEST.DETECTIONS_PER_IMAGE + # fmt: on + } + + def forward(self, x): + """ + Returns: + Tensor: Nx(K+1) scores for each box + Tensor: Nx4 or Nx(Kx4) bounding box regression deltas. + """ + if x.dim() > 2: + x = torch.flatten(x, start_dim=1) + scores = self.cls_score(x) + proposal_deltas = self.bbox_pred(x) + return scores, proposal_deltas + + # TODO: move the implementation to this class. + def losses(self, predictions, proposals): + """ + Args: + predictions: return values of :meth:`forward()`. + proposals (list[Instances]): proposals that match the features + that were used to compute predictions. + """ + scores, proposal_deltas = predictions + return FastRCNNOutputs( + self.box2box_transform, scores, proposal_deltas, proposals, self.smooth_l1_beta + ).losses() + + def inference(self, predictions, proposals): + """ + Returns: + list[Instances]: same as `fast_rcnn_inference`. + list[Tensor]: same as `fast_rcnn_inference`. + """ + boxes = self.predict_boxes(predictions, proposals) + scores = self.predict_probs(predictions, proposals) + image_shapes = [x.image_size for x in proposals] + return fast_rcnn_inference( + boxes, + scores, + image_shapes, + self.test_score_thresh, + self.test_nms_thresh, + self.test_topk_per_image, + ) + + def predict_boxes_for_gt_classes(self, predictions, proposals): + """ + Returns: + list[Tensor]: A list of Tensors of predicted boxes for GT classes in case of + class-specific box head. Element i of the list has shape (Ri, B), where Ri is + the number of predicted objects for image i and B is the box dimension (4 or 5) + """ + if not len(proposals): + return [] + scores, proposal_deltas = predictions + proposal_boxes = [p.proposal_boxes for p in proposals] + proposal_boxes = proposal_boxes[0].cat(proposal_boxes).tensor + N, B = proposal_boxes.shape + predict_boxes = apply_deltas_broadcast( + self.box2box_transform, proposal_deltas, proposal_boxes + ) # Nx(KxB) + + K = predict_boxes.shape[1] // B + if K > 1: + gt_classes = torch.cat([p.gt_classes for p in proposals], dim=0) + # Some proposals are ignored or have a background class. Their gt_classes + # cannot be used as index. + gt_classes = gt_classes.clamp_(0, K - 1) + + predict_boxes = predict_boxes.view(N, K, B)[ + torch.arange(N, dtype=torch.long, device=predict_boxes.device), gt_classes + ] + num_prop_per_image = [len(p) for p in proposals] + return predict_boxes.split(num_prop_per_image) + + def predict_boxes(self, predictions, proposals): + """ + Returns: + list[Tensor]: A list of Tensors of predicted class-specific or class-agnostic boxes + for each image. Element i has shape (Ri, K * B) or (Ri, B), where Ri is + the number of predicted objects for image i and B is the box dimension (4 or 5) + """ + if not len(proposals): + return [] + _, proposal_deltas = predictions + num_prop_per_image = [len(p) for p in proposals] + proposal_boxes = [p.proposal_boxes for p in proposals] + proposal_boxes = proposal_boxes[0].cat(proposal_boxes).tensor + predict_boxes = apply_deltas_broadcast( + self.box2box_transform, proposal_deltas, proposal_boxes + ) # Nx(KxB) + return predict_boxes.split(num_prop_per_image) + + def predict_probs(self, predictions, proposals): + """ + Returns: + list[Tensor]: A list of Tensors of predicted class probabilities for each image. + Element i has shape (Ri, K + 1), where Ri is the number of predicted objects + for image i. + """ + scores, _ = predictions + num_inst_per_image = [len(p) for p in proposals] + probs = F.softmax(scores, dim=-1) + return probs.split(num_inst_per_image, dim=0) diff --git a/preprocess/mhp_extension/detectron2/detectron2/modeling/roi_heads/keypoint_head.py b/preprocess/mhp_extension/detectron2/detectron2/modeling/roi_heads/keypoint_head.py new file mode 100644 index 0000000000000000000000000000000000000000..c7990c8fd90c70c98d6b2e3f94935f571b957a79 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/modeling/roi_heads/keypoint_head.py @@ -0,0 +1,253 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +from typing import List +import torch +from torch import nn +from torch.nn import functional as F + +from detectron2.config import configurable +from detectron2.layers import Conv2d, ConvTranspose2d, cat, interpolate +from detectron2.structures import Instances, heatmaps_to_keypoints +from detectron2.utils.events import get_event_storage +from detectron2.utils.registry import Registry + +_TOTAL_SKIPPED = 0 + +ROI_KEYPOINT_HEAD_REGISTRY = Registry("ROI_KEYPOINT_HEAD") +ROI_KEYPOINT_HEAD_REGISTRY.__doc__ = """ +Registry for keypoint heads, which make keypoint predictions from per-region features. + +The registered object will be called with `obj(cfg, input_shape)`. +""" + + +def build_keypoint_head(cfg, input_shape): + """ + Build a keypoint head from `cfg.MODEL.ROI_KEYPOINT_HEAD.NAME`. + """ + name = cfg.MODEL.ROI_KEYPOINT_HEAD.NAME + return ROI_KEYPOINT_HEAD_REGISTRY.get(name)(cfg, input_shape) + + +def keypoint_rcnn_loss(pred_keypoint_logits, instances, normalizer): + """ + Arguments: + pred_keypoint_logits (Tensor): A tensor of shape (N, K, S, S) where N is the total number + of instances in the batch, K is the number of keypoints, and S is the side length + of the keypoint heatmap. The values are spatial logits. + instances (list[Instances]): A list of M Instances, where M is the batch size. + These instances are predictions from the model + that are in 1:1 correspondence with pred_keypoint_logits. + Each Instances should contain a `gt_keypoints` field containing a `structures.Keypoint` + instance. + normalizer (float): Normalize the loss by this amount. + If not specified, we normalize by the number of visible keypoints in the minibatch. + + Returns a scalar tensor containing the loss. + """ + heatmaps = [] + valid = [] + + keypoint_side_len = pred_keypoint_logits.shape[2] + for instances_per_image in instances: + if len(instances_per_image) == 0: + continue + keypoints = instances_per_image.gt_keypoints + heatmaps_per_image, valid_per_image = keypoints.to_heatmap( + instances_per_image.proposal_boxes.tensor, keypoint_side_len + ) + heatmaps.append(heatmaps_per_image.view(-1)) + valid.append(valid_per_image.view(-1)) + + if len(heatmaps): + keypoint_targets = cat(heatmaps, dim=0) + valid = cat(valid, dim=0).to(dtype=torch.uint8) + valid = torch.nonzero(valid).squeeze(1) + + # torch.mean (in binary_cross_entropy_with_logits) doesn't + # accept empty tensors, so handle it separately + if len(heatmaps) == 0 or valid.numel() == 0: + global _TOTAL_SKIPPED + _TOTAL_SKIPPED += 1 + storage = get_event_storage() + storage.put_scalar("kpts_num_skipped_batches", _TOTAL_SKIPPED, smoothing_hint=False) + return pred_keypoint_logits.sum() * 0 + + N, K, H, W = pred_keypoint_logits.shape + pred_keypoint_logits = pred_keypoint_logits.view(N * K, H * W) + + keypoint_loss = F.cross_entropy( + pred_keypoint_logits[valid], keypoint_targets[valid], reduction="sum" + ) + + # If a normalizer isn't specified, normalize by the number of visible keypoints in the minibatch + if normalizer is None: + normalizer = valid.numel() + keypoint_loss /= normalizer + + return keypoint_loss + + +def keypoint_rcnn_inference(pred_keypoint_logits, pred_instances): + """ + Post process each predicted keypoint heatmap in `pred_keypoint_logits` into (x, y, score) + and add it to the `pred_instances` as a `pred_keypoints` field. + + Args: + pred_keypoint_logits (Tensor): A tensor of shape (R, K, S, S) where R is the total number + of instances in the batch, K is the number of keypoints, and S is the side length of + the keypoint heatmap. The values are spatial logits. + pred_instances (list[Instances]): A list of N Instances, where N is the number of images. + + Returns: + None. Each element in pred_instances will contain an extra "pred_keypoints" field. + The field is a tensor of shape (#instance, K, 3) where the last + dimension corresponds to (x, y, score). + The scores are larger than 0. + """ + # flatten all bboxes from all images together (list[Boxes] -> Rx4 tensor) + bboxes_flat = cat([b.pred_boxes.tensor for b in pred_instances], dim=0) + + keypoint_results = heatmaps_to_keypoints(pred_keypoint_logits.detach(), bboxes_flat.detach()) + num_instances_per_image = [len(i) for i in pred_instances] + keypoint_results = keypoint_results[:, :, [0, 1, 3]].split(num_instances_per_image, dim=0) + + for keypoint_results_per_image, instances_per_image in zip(keypoint_results, pred_instances): + # keypoint_results_per_image is (num instances)x(num keypoints)x(x, y, score) + instances_per_image.pred_keypoints = keypoint_results_per_image + + +class BaseKeypointRCNNHead(nn.Module): + """ + Implement the basic Keypoint R-CNN losses and inference logic described in :paper:`Mask R-CNN`. + """ + + @configurable + def __init__(self, *, num_keypoints, loss_weight=1.0, loss_normalizer=1.0): + """ + NOTE: this interface is experimental. + + Args: + num_keypoints (int): number of keypoints to predict + loss_weight (float): weight to multiple on the keypoint loss + loss_normalizer (float or str): + If float, divide the loss by `loss_normalizer * #images`. + If 'visible', the loss is normalized by the total number of + visible keypoints across images. + """ + super().__init__() + self.num_keypoints = num_keypoints + self.loss_weight = loss_weight + assert loss_normalizer == "visible" or isinstance(loss_normalizer, float), loss_normalizer + self.loss_normalizer = loss_normalizer + + @classmethod + def from_config(cls, cfg, input_shape): + ret = { + "loss_weight": cfg.MODEL.ROI_KEYPOINT_HEAD.LOSS_WEIGHT, + "num_keypoints": cfg.MODEL.ROI_KEYPOINT_HEAD.NUM_KEYPOINTS, + } + normalize_by_visible = ( + cfg.MODEL.ROI_KEYPOINT_HEAD.NORMALIZE_LOSS_BY_VISIBLE_KEYPOINTS + ) # noqa + if not normalize_by_visible: + batch_size_per_image = cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE + positive_sample_fraction = cfg.MODEL.ROI_HEADS.POSITIVE_FRACTION + ret["loss_normalizer"] = ( + ret["num_keypoints"] * batch_size_per_image * positive_sample_fraction + ) + else: + ret["loss_normalizer"] = "visible" + return ret + + def forward(self, x, instances: List[Instances]): + """ + Args: + x: input region feature(s) provided by :class:`ROIHeads`. + instances (list[Instances]): contains the boxes & labels corresponding + to the input features. + Exact format is up to its caller to decide. + Typically, this is the foreground instances in training, with + "proposal_boxes" field and other gt annotations. + In inference, it contains boxes that are already predicted. + + Returns: + A dict of losses if in training. The predicted "instances" if in inference. + """ + x = self.layers(x) + if self.training: + num_images = len(instances) + normalizer = ( + None if self.loss_normalizer == "visible" else num_images * self.loss_normalizer + ) + return { + "loss_keypoint": keypoint_rcnn_loss(x, instances, normalizer=normalizer) + * self.loss_weight + } + else: + keypoint_rcnn_inference(x, instances) + return instances + + def layers(self, x): + """ + Neural network layers that makes predictions from regional input features. + """ + raise NotImplementedError + + +@ROI_KEYPOINT_HEAD_REGISTRY.register() +class KRCNNConvDeconvUpsampleHead(BaseKeypointRCNNHead): + """ + A standard keypoint head containing a series of 3x3 convs, followed by + a transpose convolution and bilinear interpolation for upsampling. + """ + + @configurable + def __init__(self, input_shape, *, num_keypoints, conv_dims, **kwargs): + """ + NOTE: this interface is experimental. + + Args: + input_shape (ShapeSpec): shape of the input feature + conv_dims: an iterable of output channel counts for each conv in the head + e.g. (512, 512, 512) for three convs outputting 512 channels. + """ + super().__init__(num_keypoints=num_keypoints, **kwargs) + + # default up_scale to 2 (this can be made an option) + up_scale = 2 + in_channels = input_shape.channels + + self.blocks = [] + for idx, layer_channels in enumerate(conv_dims, 1): + module = Conv2d(in_channels, layer_channels, 3, stride=1, padding=1) + self.add_module("conv_fcn{}".format(idx), module) + self.blocks.append(module) + in_channels = layer_channels + + deconv_kernel = 4 + self.score_lowres = ConvTranspose2d( + in_channels, num_keypoints, deconv_kernel, stride=2, padding=deconv_kernel // 2 - 1 + ) + self.up_scale = up_scale + + for name, param in self.named_parameters(): + if "bias" in name: + nn.init.constant_(param, 0) + elif "weight" in name: + # Caffe2 implementation uses MSRAFill, which in fact + # corresponds to kaiming_normal_ in PyTorch + nn.init.kaiming_normal_(param, mode="fan_out", nonlinearity="relu") + + @classmethod + def from_config(cls, cfg, input_shape): + ret = super().from_config(cfg, input_shape) + ret["input_shape"] = input_shape + ret["conv_dims"] = cfg.MODEL.ROI_KEYPOINT_HEAD.CONV_DIMS + return ret + + def layers(self, x): + for layer in self.blocks: + x = F.relu(layer(x)) + x = self.score_lowres(x) + x = interpolate(x, scale_factor=self.up_scale, mode="bilinear", align_corners=False) + return x diff --git a/preprocess/mhp_extension/detectron2/detectron2/modeling/roi_heads/mask_head.py b/preprocess/mhp_extension/detectron2/detectron2/modeling/roi_heads/mask_head.py new file mode 100644 index 0000000000000000000000000000000000000000..5209722fb96b5e430bb5f30b3fce2b94b91f2b2e --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/modeling/roi_heads/mask_head.py @@ -0,0 +1,277 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +from typing import List +import fvcore.nn.weight_init as weight_init +import torch +from torch import nn +from torch.nn import functional as F + +from detectron2.config import configurable +from detectron2.layers import Conv2d, ConvTranspose2d, ShapeSpec, cat, get_norm +from detectron2.structures import Instances +from detectron2.utils.events import get_event_storage +from detectron2.utils.registry import Registry + +ROI_MASK_HEAD_REGISTRY = Registry("ROI_MASK_HEAD") +ROI_MASK_HEAD_REGISTRY.__doc__ = """ +Registry for mask heads, which predicts instance masks given +per-region features. + +The registered object will be called with `obj(cfg, input_shape)`. +""" + + +def mask_rcnn_loss(pred_mask_logits, instances, vis_period=0): + """ + Compute the mask prediction loss defined in the Mask R-CNN paper. + + Args: + pred_mask_logits (Tensor): A tensor of shape (B, C, Hmask, Wmask) or (B, 1, Hmask, Wmask) + for class-specific or class-agnostic, where B is the total number of predicted masks + in all images, C is the number of foreground classes, and Hmask, Wmask are the height + and width of the mask predictions. The values are logits. + instances (list[Instances]): A list of N Instances, where N is the number of images + in the batch. These instances are in 1:1 + correspondence with the pred_mask_logits. The ground-truth labels (class, box, mask, + ...) associated with each instance are stored in fields. + vis_period (int): the period (in steps) to dump visualization. + + Returns: + mask_loss (Tensor): A scalar tensor containing the loss. + """ + cls_agnostic_mask = pred_mask_logits.size(1) == 1 + total_num_masks = pred_mask_logits.size(0) + mask_side_len = pred_mask_logits.size(2) + assert pred_mask_logits.size(2) == pred_mask_logits.size(3), "Mask prediction must be square!" + + gt_classes = [] + gt_masks = [] + for instances_per_image in instances: + if len(instances_per_image) == 0: + continue + if not cls_agnostic_mask: + gt_classes_per_image = instances_per_image.gt_classes.to(dtype=torch.int64) + gt_classes.append(gt_classes_per_image) + + gt_masks_per_image = instances_per_image.gt_masks.crop_and_resize( + instances_per_image.proposal_boxes.tensor, mask_side_len + ).to(device=pred_mask_logits.device) + # A tensor of shape (N, M, M), N=#instances in the image; M=mask_side_len + gt_masks.append(gt_masks_per_image) + + if len(gt_masks) == 0: + return pred_mask_logits.sum() * 0 + + gt_masks = cat(gt_masks, dim=0) + + if cls_agnostic_mask: + pred_mask_logits = pred_mask_logits[:, 0] + else: + indices = torch.arange(total_num_masks) + gt_classes = cat(gt_classes, dim=0) + pred_mask_logits = pred_mask_logits[indices, gt_classes] + + if gt_masks.dtype == torch.bool: + gt_masks_bool = gt_masks + else: + # Here we allow gt_masks to be float as well (depend on the implementation of rasterize()) + gt_masks_bool = gt_masks > 0.5 + gt_masks = gt_masks.to(dtype=torch.float32) + + # Log the training accuracy (using gt classes and 0.5 threshold) + mask_incorrect = (pred_mask_logits > 0.0) != gt_masks_bool + mask_accuracy = 1 - (mask_incorrect.sum().item() / max(mask_incorrect.numel(), 1.0)) + num_positive = gt_masks_bool.sum().item() + false_positive = (mask_incorrect & ~gt_masks_bool).sum().item() / max( + gt_masks_bool.numel() - num_positive, 1.0 + ) + false_negative = (mask_incorrect & gt_masks_bool).sum().item() / max(num_positive, 1.0) + + storage = get_event_storage() + storage.put_scalar("mask_rcnn/accuracy", mask_accuracy) + storage.put_scalar("mask_rcnn/false_positive", false_positive) + storage.put_scalar("mask_rcnn/false_negative", false_negative) + if vis_period > 0 and storage.iter % vis_period == 0: + pred_masks = pred_mask_logits.sigmoid() + vis_masks = torch.cat([pred_masks, gt_masks], axis=2) + name = "Left: mask prediction; Right: mask GT" + for idx, vis_mask in enumerate(vis_masks): + vis_mask = torch.stack([vis_mask] * 3, axis=0) + storage.put_image(name + f" ({idx})", vis_mask) + + mask_loss = F.binary_cross_entropy_with_logits(pred_mask_logits, gt_masks, reduction="mean") + return mask_loss + + +def mask_rcnn_inference(pred_mask_logits, pred_instances): + """ + Convert pred_mask_logits to estimated foreground probability masks while also + extracting only the masks for the predicted classes in pred_instances. For each + predicted box, the mask of the same class is attached to the instance by adding a + new "pred_masks" field to pred_instances. + + Args: + pred_mask_logits (Tensor): A tensor of shape (B, C, Hmask, Wmask) or (B, 1, Hmask, Wmask) + for class-specific or class-agnostic, where B is the total number of predicted masks + in all images, C is the number of foreground classes, and Hmask, Wmask are the height + and width of the mask predictions. The values are logits. + pred_instances (list[Instances]): A list of N Instances, where N is the number of images + in the batch. Each Instances must have field "pred_classes". + + Returns: + None. pred_instances will contain an extra "pred_masks" field storing a mask of size (Hmask, + Wmask) for predicted class. Note that the masks are returned as a soft (non-quantized) + masks the resolution predicted by the network; post-processing steps, such as resizing + the predicted masks to the original image resolution and/or binarizing them, is left + to the caller. + """ + cls_agnostic_mask = pred_mask_logits.size(1) == 1 + + if cls_agnostic_mask: + mask_probs_pred = pred_mask_logits.sigmoid() + else: + # Select masks corresponding to the predicted classes + num_masks = pred_mask_logits.shape[0] + class_pred = cat([i.pred_classes for i in pred_instances]) + indices = torch.arange(num_masks, device=class_pred.device) + mask_probs_pred = pred_mask_logits[indices, class_pred][:, None].sigmoid() + # mask_probs_pred.shape: (B, 1, Hmask, Wmask) + + num_boxes_per_image = [len(i) for i in pred_instances] + mask_probs_pred = mask_probs_pred.split(num_boxes_per_image, dim=0) + + for prob, instances in zip(mask_probs_pred, pred_instances): + instances.pred_masks = prob # (1, Hmask, Wmask) + + +class BaseMaskRCNNHead(nn.Module): + """ + Implement the basic Mask R-CNN losses and inference logic described in :paper:`Mask R-CNN` + """ + + @configurable + def __init__(self, *, vis_period=0): + """ + NOTE: this interface is experimental. + + Args: + vis_period (int): visualization period + """ + super().__init__() + self.vis_period = vis_period + + @classmethod + def from_config(cls, cfg, input_shape): + return {"vis_period": cfg.VIS_PERIOD} + + def forward(self, x, instances: List[Instances]): + """ + Args: + x: input region feature(s) provided by :class:`ROIHeads`. + instances (list[Instances]): contains the boxes & labels corresponding + to the input features. + Exact format is up to its caller to decide. + Typically, this is the foreground instances in training, with + "proposal_boxes" field and other gt annotations. + In inference, it contains boxes that are already predicted. + + Returns: + A dict of losses in training. The predicted "instances" in inference. + """ + x = self.layers(x) + if self.training: + return {"loss_mask": mask_rcnn_loss(x, instances, self.vis_period)} + else: + mask_rcnn_inference(x, instances) + return instances + + def layers(self, x): + """ + Neural network layers that makes predictions from input features. + """ + raise NotImplementedError + + +@ROI_MASK_HEAD_REGISTRY.register() +class MaskRCNNConvUpsampleHead(BaseMaskRCNNHead): + """ + A mask head with several conv layers, plus an upsample layer (with `ConvTranspose2d`). + Predictions are made with a final 1x1 conv layer. + """ + + @configurable + def __init__(self, input_shape: ShapeSpec, *, num_classes, conv_dims, conv_norm="", **kwargs): + """ + NOTE: this interface is experimental. + + Args: + input_shape (ShapeSpec): shape of the input feature + num_classes (int): the number of classes. 1 if using class agnostic prediction. + conv_dims (list[int]): a list of N>0 integers representing the output dimensions + of N-1 conv layers and the last upsample layer. + conv_norm (str or callable): normalization for the conv layers. + See :func:`detectron2.layers.get_norm` for supported types. + """ + super().__init__(**kwargs) + assert len(conv_dims) >= 1, "conv_dims have to be non-empty!" + + self.conv_norm_relus = [] + + cur_channels = input_shape.channels + for k, conv_dim in enumerate(conv_dims[:-1]): + conv = Conv2d( + cur_channels, + conv_dim, + kernel_size=3, + stride=1, + padding=1, + bias=not conv_norm, + norm=get_norm(conv_norm, conv_dim), + activation=F.relu, + ) + self.add_module("mask_fcn{}".format(k + 1), conv) + self.conv_norm_relus.append(conv) + cur_channels = conv_dim + + self.deconv = ConvTranspose2d( + cur_channels, conv_dims[-1], kernel_size=2, stride=2, padding=0 + ) + cur_channels = conv_dims[-1] + + self.predictor = Conv2d(cur_channels, num_classes, kernel_size=1, stride=1, padding=0) + + for layer in self.conv_norm_relus + [self.deconv]: + weight_init.c2_msra_fill(layer) + # use normal distribution initialization for mask prediction layer + nn.init.normal_(self.predictor.weight, std=0.001) + if self.predictor.bias is not None: + nn.init.constant_(self.predictor.bias, 0) + + @classmethod + def from_config(cls, cfg, input_shape): + ret = super().from_config(cfg, input_shape) + conv_dim = cfg.MODEL.ROI_MASK_HEAD.CONV_DIM + num_conv = cfg.MODEL.ROI_MASK_HEAD.NUM_CONV + ret.update( + conv_dims=[conv_dim] * (num_conv + 1), # +1 for ConvTranspose + conv_norm=cfg.MODEL.ROI_MASK_HEAD.NORM, + input_shape=input_shape, + ) + if cfg.MODEL.ROI_MASK_HEAD.CLS_AGNOSTIC_MASK: + ret["num_classes"] = 1 + else: + ret["num_classes"] = cfg.MODEL.ROI_HEADS.NUM_CLASSES + return ret + + def layers(self, x): + for layer in self.conv_norm_relus: + x = layer(x) + x = F.relu(self.deconv(x)) + return self.predictor(x) + + +def build_mask_head(cfg, input_shape): + """ + Build a mask head defined by `cfg.MODEL.ROI_MASK_HEAD.NAME`. + """ + name = cfg.MODEL.ROI_MASK_HEAD.NAME + return ROI_MASK_HEAD_REGISTRY.get(name)(cfg, input_shape) diff --git a/preprocess/mhp_extension/detectron2/detectron2/modeling/roi_heads/roi_heads.py b/preprocess/mhp_extension/detectron2/detectron2/modeling/roi_heads/roi_heads.py new file mode 100644 index 0000000000000000000000000000000000000000..f35588e474a1c3d938e5a3b2b8a8ae5e88006215 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/modeling/roi_heads/roi_heads.py @@ -0,0 +1,812 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import inspect +import logging +import numpy as np +from typing import Dict, List, Optional, Tuple, Union +import torch +from torch import nn + +from detectron2.config import configurable +from detectron2.layers import ShapeSpec +from detectron2.structures import Boxes, ImageList, Instances, pairwise_iou +from detectron2.utils.events import get_event_storage +from detectron2.utils.registry import Registry + +from ..backbone.resnet import BottleneckBlock, make_stage +from ..matcher import Matcher +from ..poolers import ROIPooler +from ..proposal_generator.proposal_utils import add_ground_truth_to_proposals +from ..sampling import subsample_labels +from .box_head import build_box_head +from .fast_rcnn import FastRCNNOutputLayers +from .keypoint_head import build_keypoint_head +from .mask_head import build_mask_head + +ROI_HEADS_REGISTRY = Registry("ROI_HEADS") +ROI_HEADS_REGISTRY.__doc__ = """ +Registry for ROI heads in a generalized R-CNN model. +ROIHeads take feature maps and region proposals, and +perform per-region computation. + +The registered object will be called with `obj(cfg, input_shape)`. +The call is expected to return an :class:`ROIHeads`. +""" + +logger = logging.getLogger(__name__) + + +def build_roi_heads(cfg, input_shape): + """ + Build ROIHeads defined by `cfg.MODEL.ROI_HEADS.NAME`. + """ + name = cfg.MODEL.ROI_HEADS.NAME + return ROI_HEADS_REGISTRY.get(name)(cfg, input_shape) + + +def select_foreground_proposals( + proposals: List[Instances], bg_label: int +) -> Tuple[List[Instances], List[torch.Tensor]]: + """ + Given a list of N Instances (for N images), each containing a `gt_classes` field, + return a list of Instances that contain only instances with `gt_classes != -1 && + gt_classes != bg_label`. + + Args: + proposals (list[Instances]): A list of N Instances, where N is the number of + images in the batch. + bg_label: label index of background class. + + Returns: + list[Instances]: N Instances, each contains only the selected foreground instances. + list[Tensor]: N boolean vector, correspond to the selection mask of + each Instances object. True for selected instances. + """ + assert isinstance(proposals, (list, tuple)) + assert isinstance(proposals[0], Instances) + assert proposals[0].has("gt_classes") + fg_proposals = [] + fg_selection_masks = [] + for proposals_per_image in proposals: + gt_classes = proposals_per_image.gt_classes + fg_selection_mask = (gt_classes != -1) & (gt_classes != bg_label) + fg_idxs = fg_selection_mask.nonzero().squeeze(1) + fg_proposals.append(proposals_per_image[fg_idxs]) + fg_selection_masks.append(fg_selection_mask) + return fg_proposals, fg_selection_masks + + +def select_proposals_with_visible_keypoints(proposals: List[Instances]) -> List[Instances]: + """ + Args: + proposals (list[Instances]): a list of N Instances, where N is the + number of images. + + Returns: + proposals: only contains proposals with at least one visible keypoint. + + Note that this is still slightly different from Detectron. + In Detectron, proposals for training keypoint head are re-sampled from + all the proposals with IOU>threshold & >=1 visible keypoint. + + Here, the proposals are first sampled from all proposals with + IOU>threshold, then proposals with no visible keypoint are filtered out. + This strategy seems to make no difference on Detectron and is easier to implement. + """ + ret = [] + all_num_fg = [] + for proposals_per_image in proposals: + # If empty/unannotated image (hard negatives), skip filtering for train + if len(proposals_per_image) == 0: + ret.append(proposals_per_image) + continue + gt_keypoints = proposals_per_image.gt_keypoints.tensor + # #fg x K x 3 + vis_mask = gt_keypoints[:, :, 2] >= 1 + xs, ys = gt_keypoints[:, :, 0], gt_keypoints[:, :, 1] + proposal_boxes = proposals_per_image.proposal_boxes.tensor.unsqueeze(dim=1) # #fg x 1 x 4 + kp_in_box = ( + (xs >= proposal_boxes[:, :, 0]) + & (xs <= proposal_boxes[:, :, 2]) + & (ys >= proposal_boxes[:, :, 1]) + & (ys <= proposal_boxes[:, :, 3]) + ) + selection = (kp_in_box & vis_mask).any(dim=1) + selection_idxs = torch.nonzero(selection, as_tuple=True)[0] + all_num_fg.append(selection_idxs.numel()) + ret.append(proposals_per_image[selection_idxs]) + + storage = get_event_storage() + storage.put_scalar("keypoint_head/num_fg_samples", np.mean(all_num_fg)) + return ret + + +class ROIHeads(torch.nn.Module): + """ + ROIHeads perform all per-region computation in an R-CNN. + + It typically contains logic to + 1. (in training only) match proposals with ground truth and sample them + 2. crop the regions and extract per-region features using proposals + 3. make per-region predictions with different heads + + It can have many variants, implemented as subclasses of this class. + This base class contains the logic to match/sample proposals. + But it is not necessary to inherit this class if the sampling logic is not needed. + """ + + @configurable + def __init__( + self, + *, + num_classes, + batch_size_per_image, + positive_sample_fraction, + proposal_matcher, + proposal_append_gt=True + ): + """ + NOTE: this interface is experimental. + + Args: + num_classes (int): number of classes. Used to label background proposals. + batch_size_per_image (int): number of proposals to use for training + positive_sample_fraction (float): fraction of positive (foreground) proposals + to use for training. + proposal_matcher (Matcher): matcher that matches proposals and ground truth + proposal_append_gt (bool): whether to include ground truth as proposals as well + """ + super().__init__() + self.batch_size_per_image = batch_size_per_image + self.positive_sample_fraction = positive_sample_fraction + self.num_classes = num_classes + self.proposal_matcher = proposal_matcher + self.proposal_append_gt = proposal_append_gt + + @classmethod + def from_config(cls, cfg): + return { + "batch_size_per_image": cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE, + "positive_sample_fraction": cfg.MODEL.ROI_HEADS.POSITIVE_FRACTION, + "num_classes": cfg.MODEL.ROI_HEADS.NUM_CLASSES, + "proposal_append_gt": cfg.MODEL.ROI_HEADS.PROPOSAL_APPEND_GT, + # Matcher to assign box proposals to gt boxes + "proposal_matcher": Matcher( + cfg.MODEL.ROI_HEADS.IOU_THRESHOLDS, + cfg.MODEL.ROI_HEADS.IOU_LABELS, + allow_low_quality_matches=False, + ), + } + + def _sample_proposals( + self, matched_idxs: torch.Tensor, matched_labels: torch.Tensor, gt_classes: torch.Tensor + ) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Based on the matching between N proposals and M groundtruth, + sample the proposals and set their classification labels. + + Args: + matched_idxs (Tensor): a vector of length N, each is the best-matched + gt index in [0, M) for each proposal. + matched_labels (Tensor): a vector of length N, the matcher's label + (one of cfg.MODEL.ROI_HEADS.IOU_LABELS) for each proposal. + gt_classes (Tensor): a vector of length M. + + Returns: + Tensor: a vector of indices of sampled proposals. Each is in [0, N). + Tensor: a vector of the same length, the classification label for + each sampled proposal. Each sample is labeled as either a category in + [0, num_classes) or the background (num_classes). + """ + has_gt = gt_classes.numel() > 0 + # Get the corresponding GT for each proposal + if has_gt: + gt_classes = gt_classes[matched_idxs] + # Label unmatched proposals (0 label from matcher) as background (label=num_classes) + gt_classes[matched_labels == 0] = self.num_classes + # Label ignore proposals (-1 label) + gt_classes[matched_labels == -1] = -1 + else: + gt_classes = torch.zeros_like(matched_idxs) + self.num_classes + + sampled_fg_idxs, sampled_bg_idxs = subsample_labels( + gt_classes, self.batch_size_per_image, self.positive_sample_fraction, self.num_classes + ) + + sampled_idxs = torch.cat([sampled_fg_idxs, sampled_bg_idxs], dim=0) + return sampled_idxs, gt_classes[sampled_idxs] + + @torch.no_grad() + def label_and_sample_proposals( + self, proposals: List[Instances], targets: List[Instances] + ) -> List[Instances]: + """ + Prepare some proposals to be used to train the ROI heads. + It performs box matching between `proposals` and `targets`, and assigns + training labels to the proposals. + It returns ``self.batch_size_per_image`` random samples from proposals and groundtruth + boxes, with a fraction of positives that is no larger than + ``self.positive_sample_fraction``. + + Args: + See :meth:`ROIHeads.forward` + + Returns: + list[Instances]: + length `N` list of `Instances`s containing the proposals + sampled for training. Each `Instances` has the following fields: + + - proposal_boxes: the proposal boxes + - gt_boxes: the ground-truth box that the proposal is assigned to + (this is only meaningful if the proposal has a label > 0; if label = 0 + then the ground-truth box is random) + + Other fields such as "gt_classes", "gt_masks", that's included in `targets`. + """ + gt_boxes = [x.gt_boxes for x in targets] + # Augment proposals with ground-truth boxes. + # In the case of learned proposals (e.g., RPN), when training starts + # the proposals will be low quality due to random initialization. + # It's possible that none of these initial + # proposals have high enough overlap with the gt objects to be used + # as positive examples for the second stage components (box head, + # cls head, mask head). Adding the gt boxes to the set of proposals + # ensures that the second stage components will have some positive + # examples from the start of training. For RPN, this augmentation improves + # convergence and empirically improves box AP on COCO by about 0.5 + # points (under one tested configuration). + if self.proposal_append_gt: + proposals = add_ground_truth_to_proposals(gt_boxes, proposals) + + proposals_with_gt = [] + + num_fg_samples = [] + num_bg_samples = [] + for proposals_per_image, targets_per_image in zip(proposals, targets): + has_gt = len(targets_per_image) > 0 + match_quality_matrix = pairwise_iou( + targets_per_image.gt_boxes, proposals_per_image.proposal_boxes + ) + matched_idxs, matched_labels = self.proposal_matcher(match_quality_matrix) + sampled_idxs, gt_classes = self._sample_proposals( + matched_idxs, matched_labels, targets_per_image.gt_classes + ) + + # Set target attributes of the sampled proposals: + proposals_per_image = proposals_per_image[sampled_idxs] + proposals_per_image.gt_classes = gt_classes + + # We index all the attributes of targets that start with "gt_" + # and have not been added to proposals yet (="gt_classes"). + if has_gt: + sampled_targets = matched_idxs[sampled_idxs] + # NOTE: here the indexing waste some compute, because heads + # like masks, keypoints, etc, will filter the proposals again, + # (by foreground/background, or number of keypoints in the image, etc) + # so we essentially index the data twice. + for (trg_name, trg_value) in targets_per_image.get_fields().items(): + if trg_name.startswith("gt_") and not proposals_per_image.has(trg_name): + proposals_per_image.set(trg_name, trg_value[sampled_targets]) + else: + gt_boxes = Boxes( + targets_per_image.gt_boxes.tensor.new_zeros((len(sampled_idxs), 4)) + ) + proposals_per_image.gt_boxes = gt_boxes + + num_bg_samples.append((gt_classes == self.num_classes).sum().item()) + num_fg_samples.append(gt_classes.numel() - num_bg_samples[-1]) + proposals_with_gt.append(proposals_per_image) + + # Log the number of fg/bg samples that are selected for training ROI heads + storage = get_event_storage() + storage.put_scalar("roi_head/num_fg_samples", np.mean(num_fg_samples)) + storage.put_scalar("roi_head/num_bg_samples", np.mean(num_bg_samples)) + + return proposals_with_gt + + def forward( + self, + images: ImageList, + features: Dict[str, torch.Tensor], + proposals: List[Instances], + targets: Optional[List[Instances]] = None, + ) -> Tuple[List[Instances], Dict[str, torch.Tensor]]: + """ + Args: + images (ImageList): + features (dict[str,Tensor]): input data as a mapping from feature + map name to tensor. Axis 0 represents the number of images `N` in + the input data; axes 1-3 are channels, height, and width, which may + vary between feature maps (e.g., if a feature pyramid is used). + proposals (list[Instances]): length `N` list of `Instances`. The i-th + `Instances` contains object proposals for the i-th input image, + with fields "proposal_boxes" and "objectness_logits". + targets (list[Instances], optional): length `N` list of `Instances`. The i-th + `Instances` contains the ground-truth per-instance annotations + for the i-th input image. Specify `targets` during training only. + It may have the following fields: + + - gt_boxes: the bounding box of each instance. + - gt_classes: the label for each instance with a category ranging in [0, #class]. + - gt_masks: PolygonMasks or BitMasks, the ground-truth masks of each instance. + - gt_keypoints: NxKx3, the groud-truth keypoints for each instance. + + Returns: + list[Instances]: length `N` list of `Instances` containing the + detected instances. Returned during inference only; may be [] during training. + + dict[str->Tensor]: + mapping from a named loss to a tensor storing the loss. Used during training only. + """ + raise NotImplementedError() + + +@ROI_HEADS_REGISTRY.register() +class Res5ROIHeads(ROIHeads): + """ + The ROIHeads in a typical "C4" R-CNN model, where + the box and mask head share the cropping and + the per-region feature computation by a Res5 block. + """ + + def __init__(self, cfg, input_shape): + super().__init__(cfg) + + # fmt: off + self.in_features = cfg.MODEL.ROI_HEADS.IN_FEATURES + pooler_resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION + pooler_type = cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE + pooler_scales = (1.0 / input_shape[self.in_features[0]].stride, ) + sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO + self.mask_on = cfg.MODEL.MASK_ON + # fmt: on + assert not cfg.MODEL.KEYPOINT_ON + assert len(self.in_features) == 1 + + self.pooler = ROIPooler( + output_size=pooler_resolution, + scales=pooler_scales, + sampling_ratio=sampling_ratio, + pooler_type=pooler_type, + ) + + self.res5, out_channels = self._build_res5_block(cfg) + self.box_predictor = FastRCNNOutputLayers( + cfg, ShapeSpec(channels=out_channels, height=1, width=1) + ) + + if self.mask_on: + self.mask_head = build_mask_head( + cfg, + ShapeSpec(channels=out_channels, width=pooler_resolution, height=pooler_resolution), + ) + + def _build_res5_block(self, cfg): + # fmt: off + stage_channel_factor = 2 ** 3 # res5 is 8x res2 + num_groups = cfg.MODEL.RESNETS.NUM_GROUPS + width_per_group = cfg.MODEL.RESNETS.WIDTH_PER_GROUP + bottleneck_channels = num_groups * width_per_group * stage_channel_factor + out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS * stage_channel_factor + stride_in_1x1 = cfg.MODEL.RESNETS.STRIDE_IN_1X1 + norm = cfg.MODEL.RESNETS.NORM + assert not cfg.MODEL.RESNETS.DEFORM_ON_PER_STAGE[-1], \ + "Deformable conv is not yet supported in res5 head." + # fmt: on + + blocks = make_stage( + BottleneckBlock, + 3, + first_stride=2, + in_channels=out_channels // 2, + bottleneck_channels=bottleneck_channels, + out_channels=out_channels, + num_groups=num_groups, + norm=norm, + stride_in_1x1=stride_in_1x1, + ) + return nn.Sequential(*blocks), out_channels + + def _shared_roi_transform(self, features, boxes): + x = self.pooler(features, boxes) + return self.res5(x) + + def forward(self, images, features, proposals, targets=None): + """ + See :meth:`ROIHeads.forward`. + """ + del images + + if self.training: + assert targets + proposals = self.label_and_sample_proposals(proposals, targets) + del targets + + proposal_boxes = [x.proposal_boxes for x in proposals] + box_features = self._shared_roi_transform( + [features[f] for f in self.in_features], proposal_boxes + ) + predictions = self.box_predictor(box_features.mean(dim=[2, 3])) + + if self.training: + del features + losses = self.box_predictor.losses(predictions, proposals) + if self.mask_on: + proposals, fg_selection_masks = select_foreground_proposals( + proposals, self.num_classes + ) + # Since the ROI feature transform is shared between boxes and masks, + # we don't need to recompute features. The mask loss is only defined + # on foreground proposals, so we need to select out the foreground + # features. + mask_features = box_features[torch.cat(fg_selection_masks, dim=0)] + del box_features + losses.update(self.mask_head(mask_features, proposals)) + return [], losses + else: + pred_instances, _ = self.box_predictor.inference(predictions, proposals) + pred_instances = self.forward_with_given_boxes(features, pred_instances) + return pred_instances, {} + + def forward_with_given_boxes(self, features, instances): + """ + Use the given boxes in `instances` to produce other (non-box) per-ROI outputs. + + Args: + features: same as in `forward()` + instances (list[Instances]): instances to predict other outputs. Expect the keys + "pred_boxes" and "pred_classes" to exist. + + Returns: + instances (Instances): + the same `Instances` object, with extra + fields such as `pred_masks` or `pred_keypoints`. + """ + assert not self.training + assert instances[0].has("pred_boxes") and instances[0].has("pred_classes") + + if self.mask_on: + features = [features[f] for f in self.in_features] + x = self._shared_roi_transform(features, [x.pred_boxes for x in instances]) + return self.mask_head(x, instances) + else: + return instances + + +@ROI_HEADS_REGISTRY.register() +class StandardROIHeads(ROIHeads): + """ + It's "standard" in a sense that there is no ROI transform sharing + or feature sharing between tasks. + Each head independently processes the input features by each head's + own pooler and head. + + This class is used by most models, such as FPN and C5. + To implement more models, you can subclass it and implement a different + :meth:`forward()` or a head. + """ + + @configurable + def __init__( + self, + *, + box_in_features: List[str], + box_pooler: ROIPooler, + box_head: nn.Module, + box_predictor: nn.Module, + mask_in_features: Optional[List[str]] = None, + mask_pooler: Optional[ROIPooler] = None, + mask_head: Optional[nn.Module] = None, + keypoint_in_features: Optional[List[str]] = None, + keypoint_pooler: Optional[ROIPooler] = None, + keypoint_head: Optional[nn.Module] = None, + train_on_pred_boxes: bool = False, + **kwargs + ): + """ + NOTE: this interface is experimental. + + Args: + box_in_features (list[str]): list of feature names to use for the box head. + box_pooler (ROIPooler): pooler to extra region features for box head + box_head (nn.Module): transform features to make box predictions + box_predictor (nn.Module): make box predictions from the feature. + Should have the same interface as :class:`FastRCNNOutputLayers`. + mask_in_features (list[str]): list of feature names to use for the mask head. + None if not using mask head. + mask_pooler (ROIPooler): pooler to extra region features for mask head + mask_head (nn.Module): transform features to make mask predictions + keypoint_in_features, keypoint_pooler, keypoint_head: similar to ``mask*``. + train_on_pred_boxes (bool): whether to use proposal boxes or + predicted boxes from the box head to train other heads. + """ + super().__init__(**kwargs) + # keep self.in_features for backward compatibility + self.in_features = self.box_in_features = box_in_features + self.box_pooler = box_pooler + self.box_head = box_head + self.box_predictor = box_predictor + + self.mask_on = mask_in_features is not None + if self.mask_on: + self.mask_in_features = mask_in_features + self.mask_pooler = mask_pooler + self.mask_head = mask_head + self.keypoint_on = keypoint_in_features is not None + if self.keypoint_on: + self.keypoint_in_features = keypoint_in_features + self.keypoint_pooler = keypoint_pooler + self.keypoint_head = keypoint_head + + self.train_on_pred_boxes = train_on_pred_boxes + + @classmethod + def from_config(cls, cfg, input_shape): + ret = super().from_config(cfg) + ret["train_on_pred_boxes"] = cfg.MODEL.ROI_BOX_HEAD.TRAIN_ON_PRED_BOXES + # Subclasses that have not been updated to use from_config style construction + # may have overridden _init_*_head methods. In this case, those overridden methods + # will not be classmethods and we need to avoid trying to call them here. + # We test for this with ismethod which only returns True for bound methods of cls. + # Such subclasses will need to handle calling their overridden _init_*_head methods. + if inspect.ismethod(cls._init_box_head): + ret.update(cls._init_box_head(cfg, input_shape)) + if inspect.ismethod(cls._init_mask_head): + ret.update(cls._init_mask_head(cfg, input_shape)) + if inspect.ismethod(cls._init_keypoint_head): + ret.update(cls._init_keypoint_head(cfg, input_shape)) + return ret + + @classmethod + def _init_box_head(cls, cfg, input_shape): + # fmt: off + in_features = cfg.MODEL.ROI_HEADS.IN_FEATURES + pooler_resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION + pooler_scales = tuple(1.0 / input_shape[k].stride for k in in_features) + sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO + pooler_type = cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE + # fmt: on + + # If StandardROIHeads is applied on multiple feature maps (as in FPN), + # then we share the same predictors and therefore the channel counts must be the same + in_channels = [input_shape[f].channels for f in in_features] + # Check all channel counts are equal + assert len(set(in_channels)) == 1, in_channels + in_channels = in_channels[0] + + box_pooler = ROIPooler( + output_size=pooler_resolution, + scales=pooler_scales, + sampling_ratio=sampling_ratio, + pooler_type=pooler_type, + ) + # Here we split "box head" and "box predictor", which is mainly due to historical reasons. + # They are used together so the "box predictor" layers should be part of the "box head". + # New subclasses of ROIHeads do not need "box predictor"s. + box_head = build_box_head( + cfg, ShapeSpec(channels=in_channels, height=pooler_resolution, width=pooler_resolution) + ) + box_predictor = FastRCNNOutputLayers(cfg, box_head.output_shape) + return { + "box_in_features": in_features, + "box_pooler": box_pooler, + "box_head": box_head, + "box_predictor": box_predictor, + } + + @classmethod + def _init_mask_head(cls, cfg, input_shape): + if not cfg.MODEL.MASK_ON: + return {} + # fmt: off + in_features = cfg.MODEL.ROI_HEADS.IN_FEATURES + pooler_resolution = cfg.MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION + pooler_scales = tuple(1.0 / input_shape[k].stride for k in in_features) + sampling_ratio = cfg.MODEL.ROI_MASK_HEAD.POOLER_SAMPLING_RATIO + pooler_type = cfg.MODEL.ROI_MASK_HEAD.POOLER_TYPE + # fmt: on + + in_channels = [input_shape[f].channels for f in in_features][0] + + ret = {"mask_in_features": in_features} + ret["mask_pooler"] = ROIPooler( + output_size=pooler_resolution, + scales=pooler_scales, + sampling_ratio=sampling_ratio, + pooler_type=pooler_type, + ) + ret["mask_head"] = build_mask_head( + cfg, ShapeSpec(channels=in_channels, width=pooler_resolution, height=pooler_resolution) + ) + return ret + + @classmethod + def _init_keypoint_head(cls, cfg, input_shape): + if not cfg.MODEL.KEYPOINT_ON: + return {} + # fmt: off + in_features = cfg.MODEL.ROI_HEADS.IN_FEATURES + pooler_resolution = cfg.MODEL.ROI_KEYPOINT_HEAD.POOLER_RESOLUTION + pooler_scales = tuple(1.0 / input_shape[k].stride for k in in_features) # noqa + sampling_ratio = cfg.MODEL.ROI_KEYPOINT_HEAD.POOLER_SAMPLING_RATIO + pooler_type = cfg.MODEL.ROI_KEYPOINT_HEAD.POOLER_TYPE + # fmt: on + + in_channels = [input_shape[f].channels for f in in_features][0] + + ret = {"keypoint_in_features": in_features} + ret["keypoint_pooler"] = ROIPooler( + output_size=pooler_resolution, + scales=pooler_scales, + sampling_ratio=sampling_ratio, + pooler_type=pooler_type, + ) + ret["keypoint_head"] = build_keypoint_head( + cfg, ShapeSpec(channels=in_channels, width=pooler_resolution, height=pooler_resolution) + ) + return ret + + def forward( + self, + images: ImageList, + features: Dict[str, torch.Tensor], + proposals: List[Instances], + targets: Optional[List[Instances]] = None, + ) -> Tuple[List[Instances], Dict[str, torch.Tensor]]: + """ + See :class:`ROIHeads.forward`. + """ + del images + if self.training: + assert targets + proposals = self.label_and_sample_proposals(proposals, targets) + del targets + + if self.training: + losses = self._forward_box(features, proposals) + # Usually the original proposals used by the box head are used by the mask, keypoint + # heads. But when `self.train_on_pred_boxes is True`, proposals will contain boxes + # predicted by the box head. + losses.update(self._forward_mask(features, proposals)) + losses.update(self._forward_keypoint(features, proposals)) + return proposals, losses + else: + pred_instances = self._forward_box(features, proposals) + # During inference cascaded prediction is used: the mask and keypoints heads are only + # applied to the top scoring box detections. + pred_instances = self.forward_with_given_boxes(features, pred_instances) + return pred_instances, {} + + def forward_with_given_boxes( + self, features: Dict[str, torch.Tensor], instances: List[Instances] + ) -> List[Instances]: + """ + Use the given boxes in `instances` to produce other (non-box) per-ROI outputs. + + This is useful for downstream tasks where a box is known, but need to obtain + other attributes (outputs of other heads). + Test-time augmentation also uses this. + + Args: + features: same as in `forward()` + instances (list[Instances]): instances to predict other outputs. Expect the keys + "pred_boxes" and "pred_classes" to exist. + + Returns: + instances (list[Instances]): + the same `Instances` objects, with extra + fields such as `pred_masks` or `pred_keypoints`. + """ + assert not self.training + assert instances[0].has("pred_boxes") and instances[0].has("pred_classes") + + instances = self._forward_mask(features, instances) + instances = self._forward_keypoint(features, instances) + return instances + + def _forward_box( + self, features: Dict[str, torch.Tensor], proposals: List[Instances] + ) -> Union[Dict[str, torch.Tensor], List[Instances]]: + """ + Forward logic of the box prediction branch. If `self.train_on_pred_boxes is True`, + the function puts predicted boxes in the `proposal_boxes` field of `proposals` argument. + + Args: + features (dict[str, Tensor]): mapping from feature map names to tensor. + Same as in :meth:`ROIHeads.forward`. + proposals (list[Instances]): the per-image object proposals with + their matching ground truth. + Each has fields "proposal_boxes", and "objectness_logits", + "gt_classes", "gt_boxes". + + Returns: + In training, a dict of losses. + In inference, a list of `Instances`, the predicted instances. + """ + features = [features[f] for f in self.box_in_features] + box_features = self.box_pooler(features, [x.proposal_boxes for x in proposals]) + box_features = self.box_head(box_features) + predictions = self.box_predictor(box_features) + del box_features + + if self.training: + losses = self.box_predictor.losses(predictions, proposals) + # proposals is modified in-place below, so losses must be computed first. + if self.train_on_pred_boxes: + with torch.no_grad(): + pred_boxes = self.box_predictor.predict_boxes_for_gt_classes( + predictions, proposals + ) + for proposals_per_image, pred_boxes_per_image in zip(proposals, pred_boxes): + proposals_per_image.proposal_boxes = Boxes(pred_boxes_per_image) + return losses + else: + pred_instances, _ = self.box_predictor.inference(predictions, proposals) + return pred_instances + + def _forward_mask( + self, features: Dict[str, torch.Tensor], instances: List[Instances] + ) -> Union[Dict[str, torch.Tensor], List[Instances]]: + """ + Forward logic of the mask prediction branch. + + Args: + features (dict[str, Tensor]): mapping from feature map names to tensor. + Same as in :meth:`ROIHeads.forward`. + instances (list[Instances]): the per-image instances to train/predict masks. + In training, they can be the proposals. + In inference, they can be the predicted boxes. + + Returns: + In training, a dict of losses. + In inference, update `instances` with new fields "pred_masks" and return it. + """ + if not self.mask_on: + return {} if self.training else instances + + features = [features[f] for f in self.mask_in_features] + + if self.training: + # The loss is only defined on positive proposals. + proposals, _ = select_foreground_proposals(instances, self.num_classes) + proposal_boxes = [x.proposal_boxes for x in proposals] + mask_features = self.mask_pooler(features, proposal_boxes) + return self.mask_head(mask_features, proposals) + else: + pred_boxes = [x.pred_boxes for x in instances] + mask_features = self.mask_pooler(features, pred_boxes) + return self.mask_head(mask_features, instances) + + def _forward_keypoint( + self, features: Dict[str, torch.Tensor], instances: List[Instances] + ) -> Union[Dict[str, torch.Tensor], List[Instances]]: + """ + Forward logic of the keypoint prediction branch. + + Args: + features (dict[str, Tensor]): mapping from feature map names to tensor. + Same as in :meth:`ROIHeads.forward`. + instances (list[Instances]): the per-image instances to train/predict keypoints. + In training, they can be the proposals. + In inference, they can be the predicted boxes. + + Returns: + In training, a dict of losses. + In inference, update `instances` with new fields "pred_keypoints" and return it. + """ + if not self.keypoint_on: + return {} if self.training else instances + + features = [features[f] for f in self.keypoint_in_features] + + if self.training: + # The loss is defined on positive proposals with >=1 visible keypoints. + proposals, _ = select_foreground_proposals(instances, self.num_classes) + proposals = select_proposals_with_visible_keypoints(proposals) + proposal_boxes = [x.proposal_boxes for x in proposals] + + keypoint_features = self.keypoint_pooler(features, proposal_boxes) + return self.keypoint_head(keypoint_features, proposals) + else: + pred_boxes = [x.pred_boxes for x in instances] + keypoint_features = self.keypoint_pooler(features, pred_boxes) + return self.keypoint_head(keypoint_features, instances) diff --git a/preprocess/mhp_extension/detectron2/detectron2/modeling/roi_heads/rotated_fast_rcnn.py b/preprocess/mhp_extension/detectron2/detectron2/modeling/roi_heads/rotated_fast_rcnn.py new file mode 100644 index 0000000000000000000000000000000000000000..3d7362d93f9be8d3838c477406540603e81ee0be --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/modeling/roi_heads/rotated_fast_rcnn.py @@ -0,0 +1,276 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import logging +import numpy as np +import torch + +from detectron2.config import configurable +from detectron2.layers import ShapeSpec, batched_nms_rotated +from detectron2.structures import Instances, RotatedBoxes, pairwise_iou_rotated +from detectron2.utils.events import get_event_storage + +from ..box_regression import Box2BoxTransformRotated +from ..poolers import ROIPooler +from ..proposal_generator.proposal_utils import add_ground_truth_to_proposals +from .box_head import build_box_head +from .fast_rcnn import FastRCNNOutputLayers +from .roi_heads import ROI_HEADS_REGISTRY, StandardROIHeads + +logger = logging.getLogger(__name__) + +""" +Shape shorthand in this module: + + N: number of images in the minibatch + R: number of ROIs, combined over all images, in the minibatch + Ri: number of ROIs in image i + K: number of foreground classes. E.g.,there are 80 foreground classes in COCO. + +Naming convention: + + deltas: refers to the 5-d (dx, dy, dw, dh, da) deltas that parameterize the box2box + transform (see :class:`box_regression.Box2BoxTransformRotated`). + + pred_class_logits: predicted class scores in [-inf, +inf]; use + softmax(pred_class_logits) to estimate P(class). + + gt_classes: ground-truth classification labels in [0, K], where [0, K) represent + foreground object classes and K represents the background class. + + pred_proposal_deltas: predicted rotated box2box transform deltas for transforming proposals + to detection box predictions. + + gt_proposal_deltas: ground-truth rotated box2box transform deltas +""" + + +def fast_rcnn_inference_rotated( + boxes, scores, image_shapes, score_thresh, nms_thresh, topk_per_image +): + """ + Call `fast_rcnn_inference_single_image_rotated` for all images. + + Args: + boxes (list[Tensor]): A list of Tensors of predicted class-specific or class-agnostic + boxes for each image. Element i has shape (Ri, K * 5) if doing + class-specific regression, or (Ri, 5) if doing class-agnostic + regression, where Ri is the number of predicted objects for image i. + This is compatible with the output of :meth:`FastRCNNOutputs.predict_boxes`. + scores (list[Tensor]): A list of Tensors of predicted class scores for each image. + Element i has shape (Ri, K + 1), where Ri is the number of predicted objects + for image i. Compatible with the output of :meth:`FastRCNNOutputs.predict_probs`. + image_shapes (list[tuple]): A list of (width, height) tuples for each image in the batch. + score_thresh (float): Only return detections with a confidence score exceeding this + threshold. + nms_thresh (float): The threshold to use for box non-maximum suppression. Value in [0, 1]. + topk_per_image (int): The number of top scoring detections to return. Set < 0 to return + all detections. + + Returns: + instances: (list[Instances]): A list of N instances, one for each image in the batch, + that stores the topk most confidence detections. + kept_indices: (list[Tensor]): A list of 1D tensor of length of N, each element indicates + the corresponding boxes/scores index in [0, Ri) from the input, for image i. + """ + result_per_image = [ + fast_rcnn_inference_single_image_rotated( + boxes_per_image, scores_per_image, image_shape, score_thresh, nms_thresh, topk_per_image + ) + for scores_per_image, boxes_per_image, image_shape in zip(scores, boxes, image_shapes) + ] + return [x[0] for x in result_per_image], [x[1] for x in result_per_image] + + +def fast_rcnn_inference_single_image_rotated( + boxes, scores, image_shape, score_thresh, nms_thresh, topk_per_image +): + """ + Single-image inference. Return rotated bounding-box detection results by thresholding + on scores and applying rotated non-maximum suppression (Rotated NMS). + + Args: + Same as `fast_rcnn_inference_rotated`, but with rotated boxes, scores, and image shapes + per image. + + Returns: + Same as `fast_rcnn_inference_rotated`, but for only one image. + """ + valid_mask = torch.isfinite(boxes).all(dim=1) & torch.isfinite(scores).all(dim=1) + if not valid_mask.all(): + boxes = boxes[valid_mask] + scores = scores[valid_mask] + + B = 5 # box dimension + scores = scores[:, :-1] + num_bbox_reg_classes = boxes.shape[1] // B + # Convert to Boxes to use the `clip` function ... + boxes = RotatedBoxes(boxes.reshape(-1, B)) + boxes.clip(image_shape) + boxes = boxes.tensor.view(-1, num_bbox_reg_classes, B) # R x C x B + # Filter results based on detection scores + filter_mask = scores > score_thresh # R x K + # R' x 2. First column contains indices of the R predictions; + # Second column contains indices of classes. + filter_inds = filter_mask.nonzero() + if num_bbox_reg_classes == 1: + boxes = boxes[filter_inds[:, 0], 0] + else: + boxes = boxes[filter_mask] + scores = scores[filter_mask] + + # Apply per-class Rotated NMS + keep = batched_nms_rotated(boxes, scores, filter_inds[:, 1], nms_thresh) + if topk_per_image >= 0: + keep = keep[:topk_per_image] + boxes, scores, filter_inds = boxes[keep], scores[keep], filter_inds[keep] + + result = Instances(image_shape) + result.pred_boxes = RotatedBoxes(boxes) + result.scores = scores + result.pred_classes = filter_inds[:, 1] + + return result, filter_inds[:, 0] + + +class RotatedFastRCNNOutputLayers(FastRCNNOutputLayers): + """ + Two linear layers for predicting Rotated Fast R-CNN outputs. + """ + + @classmethod + def from_config(cls, cfg, input_shape): + args = super().from_config(cfg, input_shape) + args["box2box_transform"] = Box2BoxTransformRotated( + weights=cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS + ) + return args + + def inference(self, predictions, proposals): + """ + Returns: + list[Instances]: same as `fast_rcnn_inference_rotated`. + list[Tensor]: same as `fast_rcnn_inference_rotated`. + """ + boxes = self.predict_boxes(predictions, proposals) + scores = self.predict_probs(predictions, proposals) + image_shapes = [x.image_size for x in proposals] + + return fast_rcnn_inference_rotated( + boxes, + scores, + image_shapes, + self.test_score_thresh, + self.test_nms_thresh, + self.test_topk_per_image, + ) + + +@ROI_HEADS_REGISTRY.register() +class RROIHeads(StandardROIHeads): + """ + This class is used by Rotated Fast R-CNN to detect rotated boxes. + For now, it only supports box predictions but not mask or keypoints. + """ + + @configurable + def __init__(self, **kwargs): + """ + NOTE: this interface is experimental. + """ + super().__init__(**kwargs) + assert ( + not self.mask_on and not self.keypoint_on + ), "Mask/Keypoints not supported in Rotated ROIHeads." + assert not self.train_on_pred_boxes, "train_on_pred_boxes not implemented for RROIHeads!" + + @classmethod + def _init_box_head(cls, cfg, input_shape): + # fmt: off + in_features = cfg.MODEL.ROI_HEADS.IN_FEATURES + pooler_resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION + pooler_scales = tuple(1.0 / input_shape[k].stride for k in in_features) + sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO + pooler_type = cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE + # fmt: on + assert pooler_type in ["ROIAlignRotated"], pooler_type + # assume all channel counts are equal + in_channels = [input_shape[f].channels for f in in_features][0] + + box_pooler = ROIPooler( + output_size=pooler_resolution, + scales=pooler_scales, + sampling_ratio=sampling_ratio, + pooler_type=pooler_type, + ) + box_head = build_box_head( + cfg, ShapeSpec(channels=in_channels, height=pooler_resolution, width=pooler_resolution) + ) + # This line is the only difference v.s. StandardROIHeads + box_predictor = RotatedFastRCNNOutputLayers(cfg, box_head.output_shape) + return { + "box_in_features": in_features, + "box_pooler": box_pooler, + "box_head": box_head, + "box_predictor": box_predictor, + } + + @torch.no_grad() + def label_and_sample_proposals(self, proposals, targets): + """ + Prepare some proposals to be used to train the RROI heads. + It performs box matching between `proposals` and `targets`, and assigns + training labels to the proposals. + It returns `self.batch_size_per_image` random samples from proposals and groundtruth boxes, + with a fraction of positives that is no larger than `self.positive_sample_fraction. + + Args: + See :meth:`StandardROIHeads.forward` + + Returns: + list[Instances]: length `N` list of `Instances`s containing the proposals + sampled for training. Each `Instances` has the following fields: + - proposal_boxes: the rotated proposal boxes + - gt_boxes: the ground-truth rotated boxes that the proposal is assigned to + (this is only meaningful if the proposal has a label > 0; if label = 0 + then the ground-truth box is random) + - gt_classes: the ground-truth classification lable for each proposal + """ + gt_boxes = [x.gt_boxes for x in targets] + if self.proposal_append_gt: + proposals = add_ground_truth_to_proposals(gt_boxes, proposals) + + proposals_with_gt = [] + + num_fg_samples = [] + num_bg_samples = [] + for proposals_per_image, targets_per_image in zip(proposals, targets): + has_gt = len(targets_per_image) > 0 + match_quality_matrix = pairwise_iou_rotated( + targets_per_image.gt_boxes, proposals_per_image.proposal_boxes + ) + matched_idxs, matched_labels = self.proposal_matcher(match_quality_matrix) + sampled_idxs, gt_classes = self._sample_proposals( + matched_idxs, matched_labels, targets_per_image.gt_classes + ) + + proposals_per_image = proposals_per_image[sampled_idxs] + proposals_per_image.gt_classes = gt_classes + + if has_gt: + sampled_targets = matched_idxs[sampled_idxs] + proposals_per_image.gt_boxes = targets_per_image.gt_boxes[sampled_targets] + else: + gt_boxes = RotatedBoxes( + targets_per_image.gt_boxes.tensor.new_zeros((len(sampled_idxs), 5)) + ) + proposals_per_image.gt_boxes = gt_boxes + + num_bg_samples.append((gt_classes == self.num_classes).sum().item()) + num_fg_samples.append(gt_classes.numel() - num_bg_samples[-1]) + proposals_with_gt.append(proposals_per_image) + + # Log the number of fg/bg samples that are selected for training ROI heads + storage = get_event_storage() + storage.put_scalar("roi_head/num_fg_samples", np.mean(num_fg_samples)) + storage.put_scalar("roi_head/num_bg_samples", np.mean(num_bg_samples)) + + return proposals_with_gt diff --git a/preprocess/mhp_extension/detectron2/detectron2/modeling/sampling.py b/preprocess/mhp_extension/detectron2/detectron2/modeling/sampling.py new file mode 100644 index 0000000000000000000000000000000000000000..ecf251a2fa301d9e31eee7d3ba5dc6eaab1732f8 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/modeling/sampling.py @@ -0,0 +1,50 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import torch + +__all__ = ["subsample_labels"] + + +def subsample_labels(labels, num_samples, positive_fraction, bg_label): + """ + Return `num_samples` (or fewer, if not enough found) + random samples from `labels` which is a mixture of positives & negatives. + It will try to return as many positives as possible without + exceeding `positive_fraction * num_samples`, and then try to + fill the remaining slots with negatives. + + Args: + labels (Tensor): (N, ) label vector with values: + * -1: ignore + * bg_label: background ("negative") class + * otherwise: one or more foreground ("positive") classes + num_samples (int): The total number of labels with value >= 0 to return. + Values that are not sampled will be filled with -1 (ignore). + positive_fraction (float): The number of subsampled labels with values > 0 + is `min(num_positives, int(positive_fraction * num_samples))`. The number + of negatives sampled is `min(num_negatives, num_samples - num_positives_sampled)`. + In order words, if there are not enough positives, the sample is filled with + negatives. If there are also not enough negatives, then as many elements are + sampled as is possible. + bg_label (int): label index of background ("negative") class. + + Returns: + pos_idx, neg_idx (Tensor): + 1D vector of indices. The total length of both is `num_samples` or fewer. + """ + positive = torch.nonzero((labels != -1) & (labels != bg_label), as_tuple=True)[0] + negative = torch.nonzero(labels == bg_label, as_tuple=True)[0] + + num_pos = int(num_samples * positive_fraction) + # protect against not enough positive examples + num_pos = min(positive.numel(), num_pos) + num_neg = num_samples - num_pos + # protect against not enough negative examples + num_neg = min(negative.numel(), num_neg) + + # randomly select positive and negative examples + perm1 = torch.randperm(positive.numel(), device=positive.device)[:num_pos] + perm2 = torch.randperm(negative.numel(), device=negative.device)[:num_neg] + + pos_idx = positive[perm1] + neg_idx = negative[perm2] + return pos_idx, neg_idx diff --git a/preprocess/mhp_extension/detectron2/detectron2/modeling/test_time_augmentation.py b/preprocess/mhp_extension/detectron2/detectron2/modeling/test_time_augmentation.py new file mode 100644 index 0000000000000000000000000000000000000000..1e5bcf02f655956f76eb78fb7de36d691de6a53c --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/modeling/test_time_augmentation.py @@ -0,0 +1,285 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import copy +import numpy as np +from contextlib import contextmanager +from itertools import count +import torch +from torch import nn +from torch.nn.parallel import DistributedDataParallel + +from detectron2.data.detection_utils import read_image +from detectron2.data.transforms import ResizeShortestEdge +from detectron2.structures import Instances + +from .meta_arch import GeneralizedRCNN +from .postprocessing import detector_postprocess +from .roi_heads.fast_rcnn import fast_rcnn_inference_single_image + +__all__ = ["DatasetMapperTTA", "GeneralizedRCNNWithTTA"] + + +class DatasetMapperTTA: + """ + Implement test-time augmentation for detection data. + It is a callable which takes a dataset dict from a detection dataset, + and returns a list of dataset dicts where the images + are augmented from the input image by the transformations defined in the config. + This is used for test-time augmentation. + """ + + def __init__(self, cfg): + self.min_sizes = cfg.TEST.AUG.MIN_SIZES + self.max_size = cfg.TEST.AUG.MAX_SIZE + self.flip = cfg.TEST.AUG.FLIP + self.image_format = cfg.INPUT.FORMAT + + def __call__(self, dataset_dict): + """ + Args: + dict: a detection dataset dict + + Returns: + list[dict]: + a list of dataset dicts, which contain augmented version of the input image. + The total number of dicts is ``len(min_sizes) * (2 if flip else 1)``. + """ + ret = [] + if "image" not in dataset_dict: + numpy_image = read_image(dataset_dict["file_name"], self.image_format) + else: + numpy_image = dataset_dict["image"].permute(1, 2, 0).numpy().astype("uint8") + for min_size in self.min_sizes: + image = np.copy(numpy_image) + tfm = ResizeShortestEdge(min_size, self.max_size).get_transform(image) + resized = tfm.apply_image(image) + resized = torch.as_tensor(resized.transpose(2, 0, 1).astype("float32")) + + dic = copy.deepcopy(dataset_dict) + dic["horiz_flip"] = False + dic["image"] = resized + ret.append(dic) + + if self.flip: + dic = copy.deepcopy(dataset_dict) + dic["horiz_flip"] = True + dic["image"] = torch.flip(resized, dims=[2]) + ret.append(dic) + return ret + + +class GeneralizedRCNNWithTTA(nn.Module): + """ + A GeneralizedRCNN with test-time augmentation enabled. + Its :meth:`__call__` method has the same interface as :meth:`GeneralizedRCNN.forward`. + """ + + def __init__(self, cfg, model, tta_mapper=None, batch_size=3): + """ + Args: + cfg (CfgNode): + model (GeneralizedRCNN): a GeneralizedRCNN to apply TTA on. + tta_mapper (callable): takes a dataset dict and returns a list of + augmented versions of the dataset dict. Defaults to + `DatasetMapperTTA(cfg)`. + batch_size (int): batch the augmented images into this batch size for inference. + """ + super().__init__() + if isinstance(model, DistributedDataParallel): + model = model.module + assert isinstance( + model, GeneralizedRCNN + ), "TTA is only supported on GeneralizedRCNN. Got a model of type {}".format(type(model)) + self.cfg = cfg.clone() + assert not self.cfg.MODEL.KEYPOINT_ON, "TTA for keypoint is not supported yet" + assert ( + not self.cfg.MODEL.LOAD_PROPOSALS + ), "TTA for pre-computed proposals is not supported yet" + + self.model = model + + if tta_mapper is None: + tta_mapper = DatasetMapperTTA(cfg) + self.tta_mapper = tta_mapper + self.batch_size = batch_size + + @contextmanager + def _turn_off_roi_heads(self, attrs): + """ + Open a context where some heads in `model.roi_heads` are temporarily turned off. + Args: + attr (list[str]): the attribute in `model.roi_heads` which can be used + to turn off a specific head, e.g., "mask_on", "keypoint_on". + """ + roi_heads = self.model.roi_heads + old = {} + for attr in attrs: + try: + old[attr] = getattr(roi_heads, attr) + except AttributeError: + # The head may not be implemented in certain ROIHeads + pass + + if len(old.keys()) == 0: + yield + else: + for attr in old.keys(): + setattr(roi_heads, attr, False) + yield + for attr in old.keys(): + setattr(roi_heads, attr, old[attr]) + + def _batch_inference(self, batched_inputs, detected_instances=None, do_postprocess=True): + """ + Execute inference on a list of inputs, + using batch size = self.batch_size, instead of the length of the list. + + Inputs & outputs have the same format as :meth:`GeneralizedRCNN.inference` + """ + if detected_instances is None: + detected_instances = [None] * len(batched_inputs) + + outputs = [] + inputs, instances = [], [] + for idx, input, instance in zip(count(), batched_inputs, detected_instances): + inputs.append(input) + instances.append(instance) + if len(inputs) == self.batch_size or idx == len(batched_inputs) - 1: + outputs.extend( + self.model.inference( + inputs, + instances if instances[0] is not None else None, + do_postprocess=do_postprocess, + ) + ) + inputs, instances = [], [] + return outputs + + def __call__(self, batched_inputs): + """ + Same input/output format as :meth:`GeneralizedRCNN.forward` + """ + return [self._inference_one_image(x) for x in batched_inputs] + + def _detector_postprocess(self, outputs, aug_vars): + return detector_postprocess(outputs, aug_vars["height"], aug_vars["width"]) + + def _inference_one_image(self, input): + """ + Args: + input (dict): one dataset dict + + Returns: + dict: one output dict + """ + + augmented_inputs, aug_vars = self._get_augmented_inputs(input) + # Detect boxes from all augmented versions + with self._turn_off_roi_heads(["mask_on", "keypoint_on"]): + # temporarily disable roi heads + all_boxes, all_scores, all_classes = self._get_augmented_boxes( + augmented_inputs, aug_vars + ) + merged_instances = self._merge_detections( + all_boxes, all_scores, all_classes, (aug_vars["height"], aug_vars["width"]) + ) + + if self.cfg.MODEL.MASK_ON: + # Use the detected boxes to obtain new fields + augmented_instances = self._rescale_detected_boxes( + augmented_inputs, merged_instances, aug_vars + ) + # run forward on the detected boxes + outputs = self._batch_inference( + augmented_inputs, augmented_instances, do_postprocess=False + ) + # Delete now useless variables to avoid being out of memory + del augmented_inputs, augmented_instances, merged_instances + # average the predictions + outputs[0].pred_masks = self._reduce_pred_masks(outputs, aug_vars) + # postprocess + output = self._detector_postprocess(outputs[0], aug_vars) + return {"instances": output} + else: + return {"instances": merged_instances} + + def _get_augmented_inputs(self, input): + augmented_inputs = self.tta_mapper(input) + + do_hflip = [k.pop("horiz_flip", False) for k in augmented_inputs] + heights = [k["height"] for k in augmented_inputs] + widths = [k["width"] for k in augmented_inputs] + assert ( + len(set(heights)) == 1 and len(set(widths)) == 1 + ), "Augmented version of the inputs should have the same original resolution!" + height = heights[0] + width = widths[0] + aug_vars = {"height": height, "width": width, "do_hflip": do_hflip} + + return augmented_inputs, aug_vars + + def _get_augmented_boxes(self, augmented_inputs, aug_vars): + # 1: forward with all augmented images + outputs = self._batch_inference(augmented_inputs, do_postprocess=False) + # 2: union the results + all_boxes = [] + all_scores = [] + all_classes = [] + for idx, output in enumerate(outputs): + rescaled_output = self._detector_postprocess(output, aug_vars) + pred_boxes = rescaled_output.pred_boxes.tensor + if aug_vars["do_hflip"][idx]: + pred_boxes[:, [0, 2]] = aug_vars["width"] - pred_boxes[:, [2, 0]] + all_boxes.append(pred_boxes) + all_scores.extend(rescaled_output.scores) + all_classes.extend(rescaled_output.pred_classes) + all_boxes = torch.cat(all_boxes, dim=0).cpu() + return all_boxes, all_scores, all_classes + + def _merge_detections(self, all_boxes, all_scores, all_classes, shape_hw): + # select from the union of all results + num_boxes = len(all_boxes) + num_classes = self.cfg.MODEL.ROI_HEADS.NUM_CLASSES + # +1 because fast_rcnn_inference expects background scores as well + all_scores_2d = torch.zeros(num_boxes, num_classes + 1, device=all_boxes.device) + for idx, cls, score in zip(count(), all_classes, all_scores): + all_scores_2d[idx, cls] = score + + merged_instances, _ = fast_rcnn_inference_single_image( + all_boxes, + all_scores_2d, + shape_hw, + 1e-8, + self.cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST, + self.cfg.TEST.DETECTIONS_PER_IMAGE, + ) + + return merged_instances + + def _rescale_detected_boxes(self, augmented_inputs, merged_instances, aug_vars): + augmented_instances = [] + for idx, input in enumerate(augmented_inputs): + actual_height, actual_width = input["image"].shape[1:3] + scale_x = actual_width * 1.0 / aug_vars["width"] + scale_y = actual_height * 1.0 / aug_vars["height"] + pred_boxes = merged_instances.pred_boxes.clone() + pred_boxes.tensor[:, 0::2] *= scale_x + pred_boxes.tensor[:, 1::2] *= scale_y + if aug_vars["do_hflip"][idx]: + pred_boxes.tensor[:, [0, 2]] = actual_width - pred_boxes.tensor[:, [2, 0]] + + aug_instances = Instances( + image_size=(actual_height, actual_width), + pred_boxes=pred_boxes, + pred_classes=merged_instances.pred_classes, + scores=merged_instances.scores, + ) + augmented_instances.append(aug_instances) + return augmented_instances + + def _reduce_pred_masks(self, outputs, aug_vars): + for idx, output in enumerate(outputs): + if aug_vars["do_hflip"][idx]: + output.pred_masks = output.pred_masks.flip(dims=[3]) + all_pred_masks = torch.stack([o.pred_masks for o in outputs], dim=0) + avg_pred_masks = torch.mean(all_pred_masks, dim=0) + return avg_pred_masks diff --git a/preprocess/mhp_extension/detectron2/detectron2/solver/__init__.py b/preprocess/mhp_extension/detectron2/detectron2/solver/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..10f84e12d029a07d5c7d3ac29e18b572a92ef03c --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/solver/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +from .build import build_lr_scheduler, build_optimizer +from .lr_scheduler import WarmupCosineLR, WarmupMultiStepLR + +__all__ = [k for k in globals().keys() if not k.startswith("_")] diff --git a/preprocess/mhp_extension/detectron2/detectron2/solver/build.py b/preprocess/mhp_extension/detectron2/detectron2/solver/build.py new file mode 100644 index 0000000000000000000000000000000000000000..6d9d0ee5df1a6135c1a3df0151dfe0e36aa9971a --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/solver/build.py @@ -0,0 +1,165 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +from enum import Enum +from typing import Any, Callable, Dict, Iterable, List, Set, Type, Union +import torch + +from detectron2.config import CfgNode + +from .lr_scheduler import WarmupCosineLR, WarmupMultiStepLR + +_GradientClipperInput = Union[torch.Tensor, Iterable[torch.Tensor]] +_GradientClipper = Callable[[_GradientClipperInput], None] + + +class GradientClipType(Enum): + VALUE = "value" + NORM = "norm" + + +def _create_gradient_clipper(cfg: CfgNode) -> _GradientClipper: + """ + Creates gradient clipping closure to clip by value or by norm, + according to the provided config. + """ + cfg = cfg.clone() + + def clip_grad_norm(p: _GradientClipperInput): + torch.nn.utils.clip_grad_norm_(p, cfg.CLIP_VALUE, cfg.NORM_TYPE) + + def clip_grad_value(p: _GradientClipperInput): + torch.nn.utils.clip_grad_value_(p, cfg.CLIP_VALUE) + + _GRADIENT_CLIP_TYPE_TO_CLIPPER = { + GradientClipType.VALUE: clip_grad_value, + GradientClipType.NORM: clip_grad_norm, + } + return _GRADIENT_CLIP_TYPE_TO_CLIPPER[GradientClipType(cfg.CLIP_TYPE)] + + +def _generate_optimizer_class_with_gradient_clipping( + optimizer_type: Type[torch.optim.Optimizer], gradient_clipper: _GradientClipper +) -> Type[torch.optim.Optimizer]: + """ + Dynamically creates a new type that inherits the type of a given instance + and overrides the `step` method to add gradient clipping + """ + + def optimizer_wgc_step(self, closure=None): + for group in self.param_groups: + for p in group["params"]: + gradient_clipper(p) + super(type(self), self).step(closure) + + OptimizerWithGradientClip = type( + optimizer_type.__name__ + "WithGradientClip", + (optimizer_type,), + {"step": optimizer_wgc_step}, + ) + return OptimizerWithGradientClip + + +def maybe_add_gradient_clipping( + cfg: CfgNode, optimizer: torch.optim.Optimizer +) -> torch.optim.Optimizer: + """ + If gradient clipping is enabled through config options, wraps the existing + optimizer instance of some type OptimizerType to become an instance + of the new dynamically created class OptimizerTypeWithGradientClip + that inherits OptimizerType and overrides the `step` method to + include gradient clipping. + + Args: + cfg: CfgNode + configuration options + optimizer: torch.optim.Optimizer + existing optimizer instance + + Return: + optimizer: torch.optim.Optimizer + either the unmodified optimizer instance (if gradient clipping is + disabled), or the same instance with adjusted __class__ to override + the `step` method and include gradient clipping + """ + if not cfg.SOLVER.CLIP_GRADIENTS.ENABLED: + return optimizer + grad_clipper = _create_gradient_clipper(cfg.SOLVER.CLIP_GRADIENTS) + OptimizerWithGradientClip = _generate_optimizer_class_with_gradient_clipping( + type(optimizer), grad_clipper + ) + optimizer.__class__ = OptimizerWithGradientClip + return optimizer + + +def build_optimizer(cfg: CfgNode, model: torch.nn.Module) -> torch.optim.Optimizer: + """ + Build an optimizer from config. + """ + norm_module_types = ( + torch.nn.BatchNorm1d, + torch.nn.BatchNorm2d, + torch.nn.BatchNorm3d, + torch.nn.SyncBatchNorm, + # NaiveSyncBatchNorm inherits from BatchNorm2d + torch.nn.GroupNorm, + torch.nn.InstanceNorm1d, + torch.nn.InstanceNorm2d, + torch.nn.InstanceNorm3d, + torch.nn.LayerNorm, + torch.nn.LocalResponseNorm, + ) + params: List[Dict[str, Any]] = [] + memo: Set[torch.nn.parameter.Parameter] = set() + for module in model.modules(): + for key, value in module.named_parameters(recurse=False): + if not value.requires_grad: + continue + # Avoid duplicating parameters + if value in memo: + continue + memo.add(value) + lr = cfg.SOLVER.BASE_LR + weight_decay = cfg.SOLVER.WEIGHT_DECAY + if isinstance(module, norm_module_types): + weight_decay = cfg.SOLVER.WEIGHT_DECAY_NORM + elif key == "bias": + # NOTE: unlike Detectron v1, we now default BIAS_LR_FACTOR to 1.0 + # and WEIGHT_DECAY_BIAS to WEIGHT_DECAY so that bias optimizer + # hyperparameters are by default exactly the same as for regular + # weights. + lr = cfg.SOLVER.BASE_LR * cfg.SOLVER.BIAS_LR_FACTOR + weight_decay = cfg.SOLVER.WEIGHT_DECAY_BIAS + params += [{"params": [value], "lr": lr, "weight_decay": weight_decay}] + + optimizer = torch.optim.SGD( + params, cfg.SOLVER.BASE_LR, momentum=cfg.SOLVER.MOMENTUM, nesterov=cfg.SOLVER.NESTEROV + ) + optimizer = maybe_add_gradient_clipping(cfg, optimizer) + return optimizer + + +def build_lr_scheduler( + cfg: CfgNode, optimizer: torch.optim.Optimizer +) -> torch.optim.lr_scheduler._LRScheduler: + """ + Build a LR scheduler from config. + """ + name = cfg.SOLVER.LR_SCHEDULER_NAME + if name == "WarmupMultiStepLR": + return WarmupMultiStepLR( + optimizer, + cfg.SOLVER.STEPS, + cfg.SOLVER.GAMMA, + warmup_factor=cfg.SOLVER.WARMUP_FACTOR, + warmup_iters=cfg.SOLVER.WARMUP_ITERS, + warmup_method=cfg.SOLVER.WARMUP_METHOD, + ) + elif name == "WarmupCosineLR": + return WarmupCosineLR( + optimizer, + cfg.SOLVER.MAX_ITER, + warmup_factor=cfg.SOLVER.WARMUP_FACTOR, + warmup_iters=cfg.SOLVER.WARMUP_ITERS, + warmup_method=cfg.SOLVER.WARMUP_METHOD, + ) + else: + raise ValueError("Unknown LR scheduler: {}".format(name)) diff --git a/preprocess/mhp_extension/detectron2/detectron2/solver/lr_scheduler.py b/preprocess/mhp_extension/detectron2/detectron2/solver/lr_scheduler.py new file mode 100644 index 0000000000000000000000000000000000000000..6148d86785dae03ed2611792fb28da387d1103b8 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/solver/lr_scheduler.py @@ -0,0 +1,116 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import math +from bisect import bisect_right +from typing import List +import torch + +# NOTE: PyTorch's LR scheduler interface uses names that assume the LR changes +# only on epoch boundaries. We typically use iteration based schedules instead. +# As a result, "epoch" (e.g., as in self.last_epoch) should be understood to mean +# "iteration" instead. + +# FIXME: ideally this would be achieved with a CombinedLRScheduler, separating +# MultiStepLR with WarmupLR but the current LRScheduler design doesn't allow it. + + +class WarmupMultiStepLR(torch.optim.lr_scheduler._LRScheduler): + def __init__( + self, + optimizer: torch.optim.Optimizer, + milestones: List[int], + gamma: float = 0.1, + warmup_factor: float = 0.001, + warmup_iters: int = 1000, + warmup_method: str = "linear", + last_epoch: int = -1, + ): + if not list(milestones) == sorted(milestones): + raise ValueError( + "Milestones should be a list of" " increasing integers. Got {}", milestones + ) + self.milestones = milestones + self.gamma = gamma + self.warmup_factor = warmup_factor + self.warmup_iters = warmup_iters + self.warmup_method = warmup_method + super().__init__(optimizer, last_epoch) + + def get_lr(self) -> List[float]: + warmup_factor = _get_warmup_factor_at_iter( + self.warmup_method, self.last_epoch, self.warmup_iters, self.warmup_factor + ) + return [ + base_lr * warmup_factor * self.gamma ** bisect_right(self.milestones, self.last_epoch) + for base_lr in self.base_lrs + ] + + def _compute_values(self) -> List[float]: + # The new interface + return self.get_lr() + + +class WarmupCosineLR(torch.optim.lr_scheduler._LRScheduler): + def __init__( + self, + optimizer: torch.optim.Optimizer, + max_iters: int, + warmup_factor: float = 0.001, + warmup_iters: int = 1000, + warmup_method: str = "linear", + last_epoch: int = -1, + ): + self.max_iters = max_iters + self.warmup_factor = warmup_factor + self.warmup_iters = warmup_iters + self.warmup_method = warmup_method + super().__init__(optimizer, last_epoch) + + def get_lr(self) -> List[float]: + warmup_factor = _get_warmup_factor_at_iter( + self.warmup_method, self.last_epoch, self.warmup_iters, self.warmup_factor + ) + # Different definitions of half-cosine with warmup are possible. For + # simplicity we multiply the standard half-cosine schedule by the warmup + # factor. An alternative is to start the period of the cosine at warmup_iters + # instead of at 0. In the case that warmup_iters << max_iters the two are + # very close to each other. + return [ + base_lr + * warmup_factor + * 0.5 + * (1.0 + math.cos(math.pi * self.last_epoch / self.max_iters)) + for base_lr in self.base_lrs + ] + + def _compute_values(self) -> List[float]: + # The new interface + return self.get_lr() + + +def _get_warmup_factor_at_iter( + method: str, iter: int, warmup_iters: int, warmup_factor: float +) -> float: + """ + Return the learning rate warmup factor at a specific iteration. + See :paper:`in1k1h` for more details. + + Args: + method (str): warmup method; either "constant" or "linear". + iter (int): iteration at which to calculate the warmup factor. + warmup_iters (int): the number of warmup iterations. + warmup_factor (float): the base warmup factor (the meaning changes according + to the method used). + + Returns: + float: the effective warmup factor at the given iteration. + """ + if iter >= warmup_iters: + return 1.0 + + if method == "constant": + return warmup_factor + elif method == "linear": + alpha = iter / warmup_iters + return warmup_factor * (1 - alpha) + alpha + else: + raise ValueError("Unknown warmup method: {}".format(method)) diff --git a/preprocess/mhp_extension/detectron2/detectron2/structures/__init__.py b/preprocess/mhp_extension/detectron2/detectron2/structures/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..618f526753b5813b86645023271b67b421ea4cb5 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/structures/__init__.py @@ -0,0 +1,11 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +from .boxes import Boxes, BoxMode, pairwise_iou +from .image_list import ImageList + +from .instances import Instances +from .keypoints import Keypoints, heatmaps_to_keypoints +from .masks import BitMasks, PolygonMasks, rasterize_polygons_within_box, polygons_to_bitmask +from .rotated_boxes import RotatedBoxes +from .rotated_boxes import pairwise_iou as pairwise_iou_rotated + +__all__ = [k for k in globals().keys() if not k.startswith("_")] diff --git a/preprocess/mhp_extension/detectron2/detectron2/structures/boxes.py b/preprocess/mhp_extension/detectron2/detectron2/structures/boxes.py new file mode 100644 index 0000000000000000000000000000000000000000..e625803e23ec6c0f71ada847ba7bef8e15c8fa40 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/structures/boxes.py @@ -0,0 +1,367 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import math +import numpy as np +from enum import IntEnum, unique +from typing import Iterator, List, Tuple, Union +import torch + +_RawBoxType = Union[List[float], Tuple[float, ...], torch.Tensor, np.ndarray] + + +@unique +class BoxMode(IntEnum): + """ + Enum of different ways to represent a box. + """ + + XYXY_ABS = 0 + """ + (x0, y0, x1, y1) in absolute floating points coordinates. + The coordinates in range [0, width or height]. + """ + XYWH_ABS = 1 + """ + (x0, y0, w, h) in absolute floating points coordinates. + """ + XYXY_REL = 2 + """ + Not yet supported! + (x0, y0, x1, y1) in range [0, 1]. They are relative to the size of the image. + """ + XYWH_REL = 3 + """ + Not yet supported! + (x0, y0, w, h) in range [0, 1]. They are relative to the size of the image. + """ + XYWHA_ABS = 4 + """ + (xc, yc, w, h, a) in absolute floating points coordinates. + (xc, yc) is the center of the rotated box, and the angle a is in degrees ccw. + """ + + @staticmethod + def convert(box: _RawBoxType, from_mode: "BoxMode", to_mode: "BoxMode") -> _RawBoxType: + """ + Args: + box: can be a k-tuple, k-list or an Nxk array/tensor, where k = 4 or 5 + from_mode, to_mode (BoxMode) + + Returns: + The converted box of the same type. + """ + if from_mode == to_mode: + return box + + original_type = type(box) + is_numpy = isinstance(box, np.ndarray) + single_box = isinstance(box, (list, tuple)) + if single_box: + assert len(box) == 4 or len(box) == 5, ( + "BoxMode.convert takes either a k-tuple/list or an Nxk array/tensor," + " where k == 4 or 5" + ) + arr = torch.tensor(box)[None, :] + else: + # avoid modifying the input box + if is_numpy: + arr = torch.from_numpy(np.asarray(box)).clone() + else: + arr = box.clone() + + assert to_mode.value not in [ + BoxMode.XYXY_REL, + BoxMode.XYWH_REL, + ] and from_mode.value not in [ + BoxMode.XYXY_REL, + BoxMode.XYWH_REL, + ], "Relative mode not yet supported!" + + if from_mode == BoxMode.XYWHA_ABS and to_mode == BoxMode.XYXY_ABS: + assert ( + arr.shape[-1] == 5 + ), "The last dimension of input shape must be 5 for XYWHA format" + original_dtype = arr.dtype + arr = arr.double() + + w = arr[:, 2] + h = arr[:, 3] + a = arr[:, 4] + c = torch.abs(torch.cos(a * math.pi / 180.0)) + s = torch.abs(torch.sin(a * math.pi / 180.0)) + # This basically computes the horizontal bounding rectangle of the rotated box + new_w = c * w + s * h + new_h = c * h + s * w + + # convert center to top-left corner + arr[:, 0] -= new_w / 2.0 + arr[:, 1] -= new_h / 2.0 + # bottom-right corner + arr[:, 2] = arr[:, 0] + new_w + arr[:, 3] = arr[:, 1] + new_h + + arr = arr[:, :4].to(dtype=original_dtype) + elif from_mode == BoxMode.XYWH_ABS and to_mode == BoxMode.XYWHA_ABS: + original_dtype = arr.dtype + arr = arr.double() + arr[:, 0] += arr[:, 2] / 2.0 + arr[:, 1] += arr[:, 3] / 2.0 + angles = torch.zeros((arr.shape[0], 1), dtype=arr.dtype) + arr = torch.cat((arr, angles), axis=1).to(dtype=original_dtype) + else: + if to_mode == BoxMode.XYXY_ABS and from_mode == BoxMode.XYWH_ABS: + arr[:, 2] += arr[:, 0] + arr[:, 3] += arr[:, 1] + elif from_mode == BoxMode.XYXY_ABS and to_mode == BoxMode.XYWH_ABS: + arr[:, 2] -= arr[:, 0] + arr[:, 3] -= arr[:, 1] + else: + raise NotImplementedError( + "Conversion from BoxMode {} to {} is not supported yet".format( + from_mode, to_mode + ) + ) + + if single_box: + return original_type(arr.flatten().tolist()) + if is_numpy: + return arr.numpy() + else: + return arr + + +class Boxes: + """ + This structure stores a list of boxes as a Nx4 torch.Tensor. + It supports some common methods about boxes + (`area`, `clip`, `nonempty`, etc), + and also behaves like a Tensor + (support indexing, `to(device)`, `.device`, and iteration over all boxes) + + Attributes: + tensor (torch.Tensor): float matrix of Nx4. Each row is (x1, y1, x2, y2). + """ + + BoxSizeType = Union[List[int], Tuple[int, int]] + + def __init__(self, tensor: torch.Tensor): + """ + Args: + tensor (Tensor[float]): a Nx4 matrix. Each row is (x1, y1, x2, y2). + """ + device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device("cpu") + tensor = torch.as_tensor(tensor, dtype=torch.float32, device=device) + if tensor.numel() == 0: + # Use reshape, so we don't end up creating a new tensor that does not depend on + # the inputs (and consequently confuses jit) + tensor = tensor.reshape((0, 4)).to(dtype=torch.float32, device=device) + assert tensor.dim() == 2 and tensor.size(-1) == 4, tensor.size() + + self.tensor = tensor + + def clone(self) -> "Boxes": + """ + Clone the Boxes. + + Returns: + Boxes + """ + return Boxes(self.tensor.clone()) + + def to(self, device: str) -> "Boxes": + return Boxes(self.tensor.to(device)) + + def area(self) -> torch.Tensor: + """ + Computes the area of all the boxes. + + Returns: + torch.Tensor: a vector with areas of each box. + """ + box = self.tensor + area = (box[:, 2] - box[:, 0]) * (box[:, 3] - box[:, 1]) + return area + + def clip(self, box_size: BoxSizeType) -> None: + """ + Clip (in place) the boxes by limiting x coordinates to the range [0, width] + and y coordinates to the range [0, height]. + + Args: + box_size (height, width): The clipping box's size. + """ + assert torch.isfinite(self.tensor).all(), "Box tensor contains infinite or NaN!" + h, w = box_size + self.tensor[:, 0].clamp_(min=0, max=w) + self.tensor[:, 1].clamp_(min=0, max=h) + self.tensor[:, 2].clamp_(min=0, max=w) + self.tensor[:, 3].clamp_(min=0, max=h) + + def nonempty(self, threshold: float = 0.0) -> torch.Tensor: + """ + Find boxes that are non-empty. + A box is considered empty, if either of its side is no larger than threshold. + + Returns: + Tensor: + a binary vector which represents whether each box is empty + (False) or non-empty (True). + """ + box = self.tensor + widths = box[:, 2] - box[:, 0] + heights = box[:, 3] - box[:, 1] + keep = (widths > threshold) & (heights > threshold) + return keep + + def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "Boxes": + """ + Returns: + Boxes: Create a new :class:`Boxes` by indexing. + + The following usage are allowed: + + 1. `new_boxes = boxes[3]`: return a `Boxes` which contains only one box. + 2. `new_boxes = boxes[2:10]`: return a slice of boxes. + 3. `new_boxes = boxes[vector]`, where vector is a torch.BoolTensor + with `length = len(boxes)`. Nonzero elements in the vector will be selected. + + Note that the returned Boxes might share storage with this Boxes, + subject to Pytorch's indexing semantics. + """ + if isinstance(item, int): + return Boxes(self.tensor[item].view(1, -1)) + b = self.tensor[item] + assert b.dim() == 2, "Indexing on Boxes with {} failed to return a matrix!".format(item) + return Boxes(b) + + def __len__(self) -> int: + return self.tensor.shape[0] + + def __repr__(self) -> str: + return "Boxes(" + str(self.tensor) + ")" + + def inside_box(self, box_size: BoxSizeType, boundary_threshold: int = 0) -> torch.Tensor: + """ + Args: + box_size (height, width): Size of the reference box. + boundary_threshold (int): Boxes that extend beyond the reference box + boundary by more than boundary_threshold are considered "outside". + + Returns: + a binary vector, indicating whether each box is inside the reference box. + """ + height, width = box_size + inds_inside = ( + (self.tensor[..., 0] >= -boundary_threshold) + & (self.tensor[..., 1] >= -boundary_threshold) + & (self.tensor[..., 2] < width + boundary_threshold) + & (self.tensor[..., 3] < height + boundary_threshold) + ) + return inds_inside + + def get_centers(self) -> torch.Tensor: + """ + Returns: + The box centers in a Nx2 array of (x, y). + """ + return (self.tensor[:, :2] + self.tensor[:, 2:]) / 2 + + def scale(self, scale_x: float, scale_y: float) -> None: + """ + Scale the box with horizontal and vertical scaling factors + """ + self.tensor[:, 0::2] *= scale_x + self.tensor[:, 1::2] *= scale_y + + @classmethod + def cat(cls, boxes_list: List["Boxes"]) -> "Boxes": + """ + Concatenates a list of Boxes into a single Boxes + + Arguments: + boxes_list (list[Boxes]) + + Returns: + Boxes: the concatenated Boxes + """ + assert isinstance(boxes_list, (list, tuple)) + if len(boxes_list) == 0: + return cls(torch.empty(0)) + assert all(isinstance(box, Boxes) for box in boxes_list) + + # use torch.cat (v.s. layers.cat) so the returned boxes never share storage with input + cat_boxes = cls(torch.cat([b.tensor for b in boxes_list], dim=0)) + return cat_boxes + + @property + def device(self) -> torch.device: + return self.tensor.device + + def __iter__(self) -> Iterator[torch.Tensor]: + """ + Yield a box as a Tensor of shape (4,) at a time. + """ + yield from self.tensor + + +# implementation from https://github.com/kuangliu/torchcv/blob/master/torchcv/utils/box.py +# with slight modifications +def pairwise_iou(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor: + """ + Given two lists of boxes of size N and M, + compute the IoU (intersection over union) + between __all__ N x M pairs of boxes. + The box order must be (xmin, ymin, xmax, ymax). + + Args: + boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively. + + Returns: + Tensor: IoU, sized [N,M]. + """ + area1 = boxes1.area() + area2 = boxes2.area() + + boxes1, boxes2 = boxes1.tensor, boxes2.tensor + + width_height = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) - torch.max( + boxes1[:, None, :2], boxes2[:, :2] + ) # [N,M,2] + + width_height.clamp_(min=0) # [N,M,2] + inter = width_height.prod(dim=2) # [N,M] + del width_height + + # handle empty boxes + iou = torch.where( + inter > 0, + inter / (area1[:, None] + area2 - inter), + torch.zeros(1, dtype=inter.dtype, device=inter.device), + ) + return iou + + +def matched_boxlist_iou(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor: + """ + Compute pairwise intersection over union (IOU) of two sets of matched + boxes. The box order must be (xmin, ymin, xmax, ymax). + Similar to boxlist_iou, but computes only diagonal elements of the matrix + Arguments: + boxes1: (Boxes) bounding boxes, sized [N,4]. + boxes2: (Boxes) bounding boxes, sized [N,4]. + Returns: + (tensor) iou, sized [N]. + """ + assert len(boxes1) == len( + boxes2 + ), "boxlists should have the same" "number of entries, got {}, {}".format( + len(boxes1), len(boxes2) + ) + area1 = boxes1.area() # [N] + area2 = boxes2.area() # [N] + box1, box2 = boxes1.tensor, boxes2.tensor + lt = torch.max(box1[:, :2], box2[:, :2]) # [N,2] + rb = torch.min(box1[:, 2:], box2[:, 2:]) # [N,2] + wh = (rb - lt).clamp(min=0) # [N,2] + inter = wh[:, 0] * wh[:, 1] # [N] + iou = inter / (area1 + area2 - inter) # [N] + return iou diff --git a/preprocess/mhp_extension/detectron2/detectron2/structures/image_list.py b/preprocess/mhp_extension/detectron2/detectron2/structures/image_list.py new file mode 100644 index 0000000000000000000000000000000000000000..2d89224b64402badf7f0b113188b5f653df912ac --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/structures/image_list.py @@ -0,0 +1,113 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +from __future__ import division +from typing import Any, List, Sequence, Tuple, Union +import torch +from torch.nn import functional as F + + +class ImageList(object): + """ + Structure that holds a list of images (of possibly + varying sizes) as a single tensor. + This works by padding the images to the same size, + and storing in a field the original sizes of each image + + Attributes: + image_sizes (list[tuple[int, int]]): each tuple is (h, w) + """ + + def __init__(self, tensor: torch.Tensor, image_sizes: List[Tuple[int, int]]): + """ + Arguments: + tensor (Tensor): of shape (N, H, W) or (N, C_1, ..., C_K, H, W) where K >= 1 + image_sizes (list[tuple[int, int]]): Each tuple is (h, w). It can + be smaller than (H, W) due to padding. + """ + self.tensor = tensor + self.image_sizes = image_sizes + + def __len__(self) -> int: + return len(self.image_sizes) + + def __getitem__(self, idx: Union[int, slice]) -> torch.Tensor: + """ + Access the individual image in its original size. + + Returns: + Tensor: an image of shape (H, W) or (C_1, ..., C_K, H, W) where K >= 1 + """ + size = self.image_sizes[idx] + return self.tensor[idx, ..., : size[0], : size[1]] # type: ignore + + def to(self, *args: Any, **kwargs: Any) -> "ImageList": + cast_tensor = self.tensor.to(*args, **kwargs) + return ImageList(cast_tensor, self.image_sizes) + + @property + def device(self) -> torch.device: + return self.tensor.device + + @staticmethod + def from_tensors( + tensors: Sequence[torch.Tensor], size_divisibility: int = 0, pad_value: float = 0.0 + ) -> "ImageList": + """ + Args: + tensors: a tuple or list of `torch.Tensors`, each of shape (Hi, Wi) or + (C_1, ..., C_K, Hi, Wi) where K >= 1. The Tensors will be padded + to the same shape with `pad_value`. + size_divisibility (int): If `size_divisibility > 0`, add padding to ensure + the common height and width is divisible by `size_divisibility`. + This depends on the model and many models need a divisibility of 32. + pad_value (float): value to pad + + Returns: + an `ImageList`. + """ + assert len(tensors) > 0 + assert isinstance(tensors, (tuple, list)) + for t in tensors: + assert isinstance(t, torch.Tensor), type(t) + assert t.shape[1:-2] == tensors[0].shape[1:-2], t.shape + # per dimension maximum (H, W) or (C_1, ..., C_K, H, W) where K >= 1 among all tensors + max_size = ( + # In tracing mode, x.shape[i] is Tensor, and should not be converted + # to int: this will cause the traced graph to have hard-coded shapes. + # Instead we should make max_size a Tensor that depends on these tensors. + # Using torch.stack twice seems to be the best way to convert + # list[list[ScalarTensor]] to a Tensor + torch.stack( + [ + torch.stack([torch.as_tensor(dim) for dim in size]) + for size in [tuple(img.shape) for img in tensors] + ] + ) + .max(0) + .values + ) + + if size_divisibility > 0: + stride = size_divisibility + # the last two dims are H,W, both subject to divisibility requirement + max_size = torch.cat([max_size[:-2], (max_size[-2:] + (stride - 1)) // stride * stride]) + + image_sizes = [tuple(im.shape[-2:]) for im in tensors] + + if len(tensors) == 1: + # This seems slightly (2%) faster. + # TODO: check whether it's faster for multiple images as well + image_size = image_sizes[0] + padding_size = [0, max_size[-1] - image_size[1], 0, max_size[-2] - image_size[0]] + if all(x == 0 for x in padding_size): # https://github.com/pytorch/pytorch/issues/31734 + batched_imgs = tensors[0].unsqueeze(0) + else: + padded = F.pad(tensors[0], padding_size, value=pad_value) + batched_imgs = padded.unsqueeze_(0) + else: + # max_size can be a tensor in tracing mode, therefore use tuple() + batch_shape = (len(tensors),) + tuple(max_size) + batched_imgs = tensors[0].new_full(batch_shape, pad_value) + for img, pad_img in zip(tensors, batched_imgs): + pad_img[..., : img.shape[-2], : img.shape[-1]].copy_(img) + + return ImageList(batched_imgs.contiguous(), image_sizes) diff --git a/preprocess/mhp_extension/detectron2/detectron2/structures/instances.py b/preprocess/mhp_extension/detectron2/detectron2/structures/instances.py new file mode 100644 index 0000000000000000000000000000000000000000..373de08c01517c0f78b14d94da7ff702daaf375d --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/structures/instances.py @@ -0,0 +1,185 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import itertools +from typing import Any, Dict, List, Tuple, Union +import torch + + +class Instances: + """ + This class represents a list of instances in an image. + It stores the attributes of instances (e.g., boxes, masks, labels, scores) as "fields". + All fields must have the same ``__len__`` which is the number of instances. + + All other (non-field) attributes of this class are considered private: + they must start with '_' and are not modifiable by a user. + + Some basic usage: + + 1. Set/Get a field: + + .. code-block:: python + + instances.gt_boxes = Boxes(...) + print(instances.pred_masks) # a tensor of shape (N, H, W) + print('gt_masks' in instances) + + 2. ``len(instances)`` returns the number of instances + 3. Indexing: ``instances[indices]`` will apply the indexing on all the fields + and returns a new :class:`Instances`. + Typically, ``indices`` is a integer vector of indices, + or a binary mask of length ``num_instances``, + """ + + def __init__(self, image_size: Tuple[int, int], **kwargs: Any): + """ + Args: + image_size (height, width): the spatial size of the image. + kwargs: fields to add to this `Instances`. + """ + self._image_size = image_size + self._fields: Dict[str, Any] = {} + for k, v in kwargs.items(): + self.set(k, v) + + @property + def image_size(self) -> Tuple[int, int]: + """ + Returns: + tuple: height, width + """ + return self._image_size + + def __setattr__(self, name: str, val: Any) -> None: + if name.startswith("_"): + super().__setattr__(name, val) + else: + self.set(name, val) + + def __getattr__(self, name: str) -> Any: + if name == "_fields" or name not in self._fields: + raise AttributeError("Cannot find field '{}' in the given Instances!".format(name)) + return self._fields[name] + + def set(self, name: str, value: Any) -> None: + """ + Set the field named `name` to `value`. + The length of `value` must be the number of instances, + and must agree with other existing fields in this object. + """ + data_len = len(value) + if len(self._fields): + assert ( + len(self) == data_len + ), "Adding a field of length {} to a Instances of length {}".format(data_len, len(self)) + self._fields[name] = value + + def has(self, name: str) -> bool: + """ + Returns: + bool: whether the field called `name` exists. + """ + return name in self._fields + + def remove(self, name: str) -> None: + """ + Remove the field called `name`. + """ + del self._fields[name] + + def get(self, name: str) -> Any: + """ + Returns the field called `name`. + """ + return self._fields[name] + + def get_fields(self) -> Dict[str, Any]: + """ + Returns: + dict: a dict which maps names (str) to data of the fields + + Modifying the returned dict will modify this instance. + """ + return self._fields + + # Tensor-like methods + def to(self, device: str) -> "Instances": + """ + Returns: + Instances: all fields are called with a `to(device)`, if the field has this method. + """ + ret = Instances(self._image_size) + for k, v in self._fields.items(): + if hasattr(v, "to"): + v = v.to(device) + ret.set(k, v) + return ret + + def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "Instances": + """ + Args: + item: an index-like object and will be used to index all the fields. + + Returns: + If `item` is a string, return the data in the corresponding field. + Otherwise, returns an `Instances` where all fields are indexed by `item`. + """ + if type(item) == int: + if item >= len(self) or item < -len(self): + raise IndexError("Instances index out of range!") + else: + item = slice(item, None, len(self)) + + ret = Instances(self._image_size) + for k, v in self._fields.items(): + ret.set(k, v[item]) + return ret + + def __len__(self) -> int: + for v in self._fields.values(): + return len(v) + raise NotImplementedError("Empty Instances does not support __len__!") + + def __iter__(self): + raise NotImplementedError("`Instances` object is not iterable!") + + @staticmethod + def cat(instance_lists: List["Instances"]) -> "Instances": + """ + Args: + instance_lists (list[Instances]) + + Returns: + Instances + """ + assert all(isinstance(i, Instances) for i in instance_lists) + assert len(instance_lists) > 0 + if len(instance_lists) == 1: + return instance_lists[0] + + image_size = instance_lists[0].image_size + for i in instance_lists[1:]: + assert i.image_size == image_size + ret = Instances(image_size) + for k in instance_lists[0]._fields.keys(): + values = [i.get(k) for i in instance_lists] + v0 = values[0] + if isinstance(v0, torch.Tensor): + values = torch.cat(values, dim=0) + elif isinstance(v0, list): + values = list(itertools.chain(*values)) + elif hasattr(type(v0), "cat"): + values = type(v0).cat(values) + else: + raise ValueError("Unsupported type {} for concatenation".format(type(v0))) + ret.set(k, values) + return ret + + def __str__(self) -> str: + s = self.__class__.__name__ + "(" + s += "num_instances={}, ".format(len(self)) + s += "image_height={}, ".format(self._image_size[0]) + s += "image_width={}, ".format(self._image_size[1]) + s += "fields=[{}])".format(", ".join((f"{k}: {v}" for k, v in self._fields.items()))) + return s + + __repr__ = __str__ diff --git a/preprocess/mhp_extension/detectron2/detectron2/structures/keypoints.py b/preprocess/mhp_extension/detectron2/detectron2/structures/keypoints.py new file mode 100644 index 0000000000000000000000000000000000000000..2242815f31dfe88aaabbf4b49f724c999a71912d --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/structures/keypoints.py @@ -0,0 +1,209 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import numpy as np +from typing import Any, List, Tuple, Union +import torch + +from detectron2.layers import interpolate + + +class Keypoints: + """ + Stores keypoint annotation data. GT Instances have a `gt_keypoints` property + containing the x,y location and visibility flag of each keypoint. This tensor has shape + (N, K, 3) where N is the number of instances and K is the number of keypoints per instance. + + The visibility flag follows the COCO format and must be one of three integers: + * v=0: not labeled (in which case x=y=0) + * v=1: labeled but not visible + * v=2: labeled and visible + """ + + def __init__(self, keypoints: Union[torch.Tensor, np.ndarray, List[List[float]]]): + """ + Arguments: + keypoints: A Tensor, numpy array, or list of the x, y, and visibility of each keypoint. + The shape should be (N, K, 3) where N is the number of + instances, and K is the number of keypoints per instance. + """ + device = keypoints.device if isinstance(keypoints, torch.Tensor) else torch.device("cpu") + keypoints = torch.as_tensor(keypoints, dtype=torch.float32, device=device) + assert keypoints.dim() == 3 and keypoints.shape[2] == 3, keypoints.shape + self.tensor = keypoints + + def __len__(self) -> int: + return self.tensor.size(0) + + def to(self, *args: Any, **kwargs: Any) -> "Keypoints": + return type(self)(self.tensor.to(*args, **kwargs)) + + @property + def device(self) -> torch.device: + return self.tensor.device + + def to_heatmap(self, boxes: torch.Tensor, heatmap_size: int) -> torch.Tensor: + """ + Arguments: + boxes: Nx4 tensor, the boxes to draw the keypoints to + + Returns: + heatmaps: + A tensor of shape (N, K) containing an integer spatial label + in the range [0, heatmap_size**2 - 1] for each keypoint in the input. + valid: + A tensor of shape (N, K) containing whether each keypoint is in the roi or not. + """ + return _keypoints_to_heatmap(self.tensor, boxes, heatmap_size) + + def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "Keypoints": + """ + Create a new `Keypoints` by indexing on this `Keypoints`. + + The following usage are allowed: + + 1. `new_kpts = kpts[3]`: return a `Keypoints` which contains only one instance. + 2. `new_kpts = kpts[2:10]`: return a slice of key points. + 3. `new_kpts = kpts[vector]`, where vector is a torch.ByteTensor + with `length = len(kpts)`. Nonzero elements in the vector will be selected. + + Note that the returned Keypoints might share storage with this Keypoints, + subject to Pytorch's indexing semantics. + """ + if isinstance(item, int): + return Keypoints([self.tensor[item]]) + return Keypoints(self.tensor[item]) + + def __repr__(self) -> str: + s = self.__class__.__name__ + "(" + s += "num_instances={})".format(len(self.tensor)) + return s + + +# TODO make this nicer, this is a direct translation from C2 (but removing the inner loop) +def _keypoints_to_heatmap( + keypoints: torch.Tensor, rois: torch.Tensor, heatmap_size: int +) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Encode keypoint locations into a target heatmap for use in SoftmaxWithLoss across space. + + Maps keypoints from the half-open interval [x1, x2) on continuous image coordinates to the + closed interval [0, heatmap_size - 1] on discrete image coordinates. We use the + continuous-discrete conversion from Heckbert 1990 ("What is the coordinate of a pixel?"): + d = floor(c) and c = d + 0.5, where d is a discrete coordinate and c is a continuous coordinate. + + Arguments: + keypoints: tensor of keypoint locations in of shape (N, K, 3). + rois: Nx4 tensor of rois in xyxy format + heatmap_size: integer side length of square heatmap. + + Returns: + heatmaps: A tensor of shape (N, K) containing an integer spatial label + in the range [0, heatmap_size**2 - 1] for each keypoint in the input. + valid: A tensor of shape (N, K) containing whether each keypoint is in + the roi or not. + """ + + if rois.numel() == 0: + return rois.new().long(), rois.new().long() + offset_x = rois[:, 0] + offset_y = rois[:, 1] + scale_x = heatmap_size / (rois[:, 2] - rois[:, 0]) + scale_y = heatmap_size / (rois[:, 3] - rois[:, 1]) + + offset_x = offset_x[:, None] + offset_y = offset_y[:, None] + scale_x = scale_x[:, None] + scale_y = scale_y[:, None] + + x = keypoints[..., 0] + y = keypoints[..., 1] + + x_boundary_inds = x == rois[:, 2][:, None] + y_boundary_inds = y == rois[:, 3][:, None] + + x = (x - offset_x) * scale_x + x = x.floor().long() + y = (y - offset_y) * scale_y + y = y.floor().long() + + x[x_boundary_inds] = heatmap_size - 1 + y[y_boundary_inds] = heatmap_size - 1 + + valid_loc = (x >= 0) & (y >= 0) & (x < heatmap_size) & (y < heatmap_size) + vis = keypoints[..., 2] > 0 + valid = (valid_loc & vis).long() + + lin_ind = y * heatmap_size + x + heatmaps = lin_ind * valid + + return heatmaps, valid + + +@torch.no_grad() +def heatmaps_to_keypoints(maps: torch.Tensor, rois: torch.Tensor) -> torch.Tensor: + """ + Extract predicted keypoint locations from heatmaps. + + Args: + maps (Tensor): (#ROIs, #keypoints, POOL_H, POOL_W). The predicted heatmap of logits for + each ROI and each keypoint. + rois (Tensor): (#ROIs, 4). The box of each ROI. + + Returns: + Tensor of shape (#ROIs, #keypoints, 4) with the last dimension corresponding to + (x, y, logit, score) for each keypoint. + + When converting discrete pixel indices in an NxN image to a continuous keypoint coordinate, + we maintain consistency with :meth:`Keypoints.to_heatmap` by using the conversion from + Heckbert 1990: c = d + 0.5, where d is a discrete coordinate and c is a continuous coordinate. + """ + offset_x = rois[:, 0] + offset_y = rois[:, 1] + + widths = (rois[:, 2] - rois[:, 0]).clamp(min=1) + heights = (rois[:, 3] - rois[:, 1]).clamp(min=1) + widths_ceil = widths.ceil() + heights_ceil = heights.ceil() + + num_rois, num_keypoints = maps.shape[:2] + xy_preds = maps.new_zeros(rois.shape[0], num_keypoints, 4) + + width_corrections = widths / widths_ceil + height_corrections = heights / heights_ceil + + keypoints_idx = torch.arange(num_keypoints, device=maps.device) + + for i in range(num_rois): + outsize = (int(heights_ceil[i]), int(widths_ceil[i])) + roi_map = interpolate(maps[[i]], size=outsize, mode="bicubic", align_corners=False).squeeze( + 0 + ) # #keypoints x H x W + + # softmax over the spatial region + max_score, _ = roi_map.view(num_keypoints, -1).max(1) + max_score = max_score.view(num_keypoints, 1, 1) + tmp_full_resolution = (roi_map - max_score).exp_() + tmp_pool_resolution = (maps[i] - max_score).exp_() + # Produce scores over the region H x W, but normalize with POOL_H x POOL_W, + # so that the scores of objects of different absolute sizes will be more comparable + roi_map_scores = tmp_full_resolution / tmp_pool_resolution.sum((1, 2), keepdim=True) + + w = roi_map.shape[2] + pos = roi_map.view(num_keypoints, -1).argmax(1) + + x_int = pos % w + y_int = (pos - x_int) // w + + assert ( + roi_map_scores[keypoints_idx, y_int, x_int] + == roi_map_scores.view(num_keypoints, -1).max(1)[0] + ).all() + + x = (x_int.float() + 0.5) * width_corrections[i] + y = (y_int.float() + 0.5) * height_corrections[i] + + xy_preds[i, :, 0] = x + offset_x[i] + xy_preds[i, :, 1] = y + offset_y[i] + xy_preds[i, :, 2] = roi_map[keypoints_idx, y_int, x_int] + xy_preds[i, :, 3] = roi_map_scores[keypoints_idx, y_int, x_int] + + return xy_preds diff --git a/preprocess/mhp_extension/detectron2/detectron2/structures/masks.py b/preprocess/mhp_extension/detectron2/detectron2/structures/masks.py new file mode 100644 index 0000000000000000000000000000000000000000..e363baf3d8cfc4694558fc12bbd2e9d65507b9d9 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/structures/masks.py @@ -0,0 +1,424 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import copy +import itertools +import numpy as np +from typing import Any, Iterator, List, Union +import pycocotools.mask as mask_utils +import torch + +from detectron2.layers.roi_align import ROIAlign + +from .boxes import Boxes + + +def polygon_area(x, y): + # Using the shoelace formula + # https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates + return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1))) + + +def polygons_to_bitmask(polygons: List[np.ndarray], height: int, width: int) -> np.ndarray: + """ + Args: + polygons (list[ndarray]): each array has shape (Nx2,) + height, width (int) + + Returns: + ndarray: a bool mask of shape (height, width) + """ + assert len(polygons) > 0, "COCOAPI does not support empty polygons" + rles = mask_utils.frPyObjects(polygons, height, width) + rle = mask_utils.merge(rles) + return mask_utils.decode(rle).astype(np.bool) + + +def rasterize_polygons_within_box( + polygons: List[np.ndarray], box: np.ndarray, mask_size: int +) -> torch.Tensor: + """ + Rasterize the polygons into a mask image and + crop the mask content in the given box. + The cropped mask is resized to (mask_size, mask_size). + + This function is used when generating training targets for mask head in Mask R-CNN. + Given original ground-truth masks for an image, new ground-truth mask + training targets in the size of `mask_size x mask_size` + must be provided for each predicted box. This function will be called to + produce such targets. + + Args: + polygons (list[ndarray[float]]): a list of polygons, which represents an instance. + box: 4-element numpy array + mask_size (int): + + Returns: + Tensor: BoolTensor of shape (mask_size, mask_size) + """ + # 1. Shift the polygons w.r.t the boxes + w, h = box[2] - box[0], box[3] - box[1] + + polygons = copy.deepcopy(polygons) + for p in polygons: + p[0::2] = p[0::2] - box[0] + p[1::2] = p[1::2] - box[1] + + # 2. Rescale the polygons to the new box size + # max() to avoid division by small number + ratio_h = mask_size / max(h, 0.1) + ratio_w = mask_size / max(w, 0.1) + + if ratio_h == ratio_w: + for p in polygons: + p *= ratio_h + else: + for p in polygons: + p[0::2] *= ratio_w + p[1::2] *= ratio_h + + # 3. Rasterize the polygons with coco api + mask = polygons_to_bitmask(polygons, mask_size, mask_size) + mask = torch.from_numpy(mask) + return mask + + +class BitMasks: + """ + This class stores the segmentation masks for all objects in one image, in + the form of bitmaps. + + Attributes: + tensor: bool Tensor of N,H,W, representing N instances in the image. + """ + + def __init__(self, tensor: Union[torch.Tensor, np.ndarray]): + """ + Args: + tensor: bool Tensor of N,H,W, representing N instances in the image. + """ + device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device("cpu") + tensor = torch.as_tensor(tensor, dtype=torch.bool, device=device) + assert tensor.dim() == 3, tensor.size() + self.image_size = tensor.shape[1:] + self.tensor = tensor + + def to(self, device: str) -> "BitMasks": + return BitMasks(self.tensor.to(device)) + + @property + def device(self) -> torch.device: + return self.tensor.device + + def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "BitMasks": + """ + Returns: + BitMasks: Create a new :class:`BitMasks` by indexing. + + The following usage are allowed: + + 1. `new_masks = masks[3]`: return a `BitMasks` which contains only one mask. + 2. `new_masks = masks[2:10]`: return a slice of masks. + 3. `new_masks = masks[vector]`, where vector is a torch.BoolTensor + with `length = len(masks)`. Nonzero elements in the vector will be selected. + + Note that the returned object might share storage with this object, + subject to Pytorch's indexing semantics. + """ + if isinstance(item, int): + return BitMasks(self.tensor[item].view(1, -1)) + m = self.tensor[item] + assert m.dim() == 3, "Indexing on BitMasks with {} returns a tensor with shape {}!".format( + item, m.shape + ) + return BitMasks(m) + + def __iter__(self) -> torch.Tensor: + yield from self.tensor + + def __repr__(self) -> str: + s = self.__class__.__name__ + "(" + s += "num_instances={})".format(len(self.tensor)) + return s + + def __len__(self) -> int: + return self.tensor.shape[0] + + def nonempty(self) -> torch.Tensor: + """ + Find masks that are non-empty. + + Returns: + Tensor: a BoolTensor which represents + whether each mask is empty (False) or non-empty (True). + """ + return self.tensor.flatten(1).any(dim=1) + + @staticmethod + def from_polygon_masks( + polygon_masks: Union["PolygonMasks", List[List[np.ndarray]]], height: int, width: int + ) -> "BitMasks": + """ + Args: + polygon_masks (list[list[ndarray]] or PolygonMasks) + height, width (int) + """ + if isinstance(polygon_masks, PolygonMasks): + polygon_masks = polygon_masks.polygons + masks = [polygons_to_bitmask(p, height, width) for p in polygon_masks] + return BitMasks(torch.stack([torch.from_numpy(x) for x in masks])) + + def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor: + """ + Crop each bitmask by the given box, and resize results to (mask_size, mask_size). + This can be used to prepare training targets for Mask R-CNN. + It has less reconstruction error compared to rasterization with polygons. + However we observe no difference in accuracy, + but BitMasks requires more memory to store all the masks. + + Args: + boxes (Tensor): Nx4 tensor storing the boxes for each mask + mask_size (int): the size of the rasterized mask. + + Returns: + Tensor: + A bool tensor of shape (N, mask_size, mask_size), where + N is the number of predicted boxes for this image. + """ + assert len(boxes) == len(self), "{} != {}".format(len(boxes), len(self)) + device = self.tensor.device + + batch_inds = torch.arange(len(boxes), device=device).to(dtype=boxes.dtype)[:, None] + rois = torch.cat([batch_inds, boxes], dim=1) # Nx5 + + bit_masks = self.tensor.to(dtype=torch.float32) + rois = rois.to(device=device) + output = ( + ROIAlign((mask_size, mask_size), 1.0, 0, aligned=True) + .forward(bit_masks[:, None, :, :], rois) + .squeeze(1) + ) + output = output >= 0.5 + return output + + def get_bounding_boxes(self) -> None: + # not needed now + raise NotImplementedError + + @staticmethod + def cat(bitmasks_list: List["BitMasks"]) -> "BitMasks": + """ + Concatenates a list of BitMasks into a single BitMasks + + Arguments: + bitmasks_list (list[BitMasks]) + + Returns: + BitMasks: the concatenated BitMasks + """ + assert isinstance(bitmasks_list, (list, tuple)) + assert len(bitmasks_list) > 0 + assert all(isinstance(bitmask, BitMasks) for bitmask in bitmasks_list) + + cat_bitmasks = type(bitmasks_list[0])(torch.cat([bm.tensor for bm in bitmasks_list], dim=0)) + return cat_bitmasks + + +class PolygonMasks: + """ + This class stores the segmentation masks for all objects in one image, in the form of polygons. + + Attributes: + polygons: list[list[ndarray]]. Each ndarray is a float64 vector representing a polygon. + """ + + def __init__(self, polygons: List[List[Union[torch.Tensor, np.ndarray]]]): + """ + Arguments: + polygons (list[list[np.ndarray]]): The first + level of the list correspond to individual instances, + the second level to all the polygons that compose the + instance, and the third level to the polygon coordinates. + The third level array should have the format of + [x0, y0, x1, y1, ..., xn, yn] (n >= 3). + """ + assert isinstance(polygons, list), ( + "Cannot create PolygonMasks: Expect a list of list of polygons per image. " + "Got '{}' instead.".format(type(polygons)) + ) + + def _make_array(t: Union[torch.Tensor, np.ndarray]) -> np.ndarray: + # Use float64 for higher precision, because why not? + # Always put polygons on CPU (self.to is a no-op) since they + # are supposed to be small tensors. + # May need to change this assumption if GPU placement becomes useful + if isinstance(t, torch.Tensor): + t = t.cpu().numpy() + return np.asarray(t).astype("float64") + + def process_polygons( + polygons_per_instance: List[Union[torch.Tensor, np.ndarray]] + ) -> List[np.ndarray]: + assert isinstance(polygons_per_instance, list), ( + "Cannot create polygons: Expect a list of polygons per instance. " + "Got '{}' instead.".format(type(polygons_per_instance)) + ) + # transform the polygon to a tensor + polygons_per_instance = [_make_array(p) for p in polygons_per_instance] + for polygon in polygons_per_instance: + assert len(polygon) % 2 == 0 and len(polygon) >= 6 + return polygons_per_instance + + self.polygons: List[List[np.ndarray]] = [ + process_polygons(polygons_per_instance) for polygons_per_instance in polygons + ] + + def to(self, *args: Any, **kwargs: Any) -> "PolygonMasks": + return self + + @property + def device(self) -> torch.device: + return torch.device("cpu") + + def get_bounding_boxes(self) -> Boxes: + """ + Returns: + Boxes: tight bounding boxes around polygon masks. + """ + boxes = torch.zeros(len(self.polygons), 4, dtype=torch.float32) + for idx, polygons_per_instance in enumerate(self.polygons): + minxy = torch.as_tensor([float("inf"), float("inf")], dtype=torch.float32) + maxxy = torch.zeros(2, dtype=torch.float32) + for polygon in polygons_per_instance: + coords = torch.from_numpy(polygon).view(-1, 2).to(dtype=torch.float32) + minxy = torch.min(minxy, torch.min(coords, dim=0).values) + maxxy = torch.max(maxxy, torch.max(coords, dim=0).values) + boxes[idx, :2] = minxy + boxes[idx, 2:] = maxxy + return Boxes(boxes) + + def nonempty(self) -> torch.Tensor: + """ + Find masks that are non-empty. + + Returns: + Tensor: + a BoolTensor which represents whether each mask is empty (False) or not (True). + """ + keep = [1 if len(polygon) > 0 else 0 for polygon in self.polygons] + return torch.from_numpy(np.asarray(keep, dtype=np.bool)) + + def __getitem__(self, item: Union[int, slice, List[int], torch.BoolTensor]) -> "PolygonMasks": + """ + Support indexing over the instances and return a `PolygonMasks` object. + `item` can be: + + 1. An integer. It will return an object with only one instance. + 2. A slice. It will return an object with the selected instances. + 3. A list[int]. It will return an object with the selected instances, + correpsonding to the indices in the list. + 4. A vector mask of type BoolTensor, whose length is num_instances. + It will return an object with the instances whose mask is nonzero. + """ + if isinstance(item, int): + selected_polygons = [self.polygons[item]] + elif isinstance(item, slice): + selected_polygons = self.polygons[item] + elif isinstance(item, list): + selected_polygons = [self.polygons[i] for i in item] + elif isinstance(item, torch.Tensor): + # Polygons is a list, so we have to move the indices back to CPU. + if item.dtype == torch.bool: + assert item.dim() == 1, item.shape + item = item.nonzero().squeeze(1).cpu().numpy().tolist() + elif item.dtype in [torch.int32, torch.int64]: + item = item.cpu().numpy().tolist() + else: + raise ValueError("Unsupported tensor dtype={} for indexing!".format(item.dtype)) + selected_polygons = [self.polygons[i] for i in item] + return PolygonMasks(selected_polygons) + + def __iter__(self) -> Iterator[List[np.ndarray]]: + """ + Yields: + list[ndarray]: the polygons for one instance. + Each Tensor is a float64 vector representing a polygon. + """ + return iter(self.polygons) + + def __repr__(self) -> str: + s = self.__class__.__name__ + "(" + s += "num_instances={})".format(len(self.polygons)) + return s + + def __len__(self) -> int: + return len(self.polygons) + + def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor: + """ + Crop each mask by the given box, and resize results to (mask_size, mask_size). + This can be used to prepare training targets for Mask R-CNN. + + Args: + boxes (Tensor): Nx4 tensor storing the boxes for each mask + mask_size (int): the size of the rasterized mask. + + Returns: + Tensor: A bool tensor of shape (N, mask_size, mask_size), where + N is the number of predicted boxes for this image. + """ + assert len(boxes) == len(self), "{} != {}".format(len(boxes), len(self)) + + device = boxes.device + # Put boxes on the CPU, as the polygon representation is not efficient GPU-wise + # (several small tensors for representing a single instance mask) + boxes = boxes.to(torch.device("cpu")) + + results = [ + rasterize_polygons_within_box(poly, box.numpy(), mask_size) + for poly, box in zip(self.polygons, boxes) + ] + """ + poly: list[list[float]], the polygons for one instance + box: a tensor of shape (4,) + """ + if len(results) == 0: + return torch.empty(0, mask_size, mask_size, dtype=torch.bool, device=device) + return torch.stack(results, dim=0).to(device=device) + + def area(self): + """ + Computes area of the mask. + Only works with Polygons, using the shoelace formula: + https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates + + Returns: + Tensor: a vector, area for each instance + """ + + area = [] + for polygons_per_instance in self.polygons: + area_per_instance = 0 + for p in polygons_per_instance: + area_per_instance += polygon_area(p[0::2], p[1::2]) + area.append(area_per_instance) + + return torch.tensor(area) + + @staticmethod + def cat(polymasks_list: List["PolygonMasks"]) -> "PolygonMasks": + """ + Concatenates a list of PolygonMasks into a single PolygonMasks + + Arguments: + polymasks_list (list[PolygonMasks]) + + Returns: + PolygonMasks: the concatenated PolygonMasks + """ + assert isinstance(polymasks_list, (list, tuple)) + assert len(polymasks_list) > 0 + assert all(isinstance(polymask, PolygonMasks) for polymask in polymasks_list) + + cat_polymasks = type(polymasks_list[0])( + list(itertools.chain.from_iterable(pm.polygons for pm in polymasks_list)) + ) + return cat_polymasks diff --git a/preprocess/mhp_extension/detectron2/detectron2/structures/rotated_boxes.py b/preprocess/mhp_extension/detectron2/detectron2/structures/rotated_boxes.py new file mode 100644 index 0000000000000000000000000000000000000000..823cfb62a13d0ff060099d1b930bc900a4ca009b --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/structures/rotated_boxes.py @@ -0,0 +1,481 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import math +from typing import Iterator, Union +import torch + +from detectron2.layers.rotated_boxes import pairwise_iou_rotated + +from .boxes import Boxes + + +class RotatedBoxes(Boxes): + """ + This structure stores a list of rotated boxes as a Nx5 torch.Tensor. + It supports some common methods about boxes + (`area`, `clip`, `nonempty`, etc), + and also behaves like a Tensor + (support indexing, `to(device)`, `.device`, and iteration over all boxes) + """ + + def __init__(self, tensor: torch.Tensor): + """ + Args: + tensor (Tensor[float]): a Nx5 matrix. Each row is + (x_center, y_center, width, height, angle), + in which angle is represented in degrees. + While there's no strict range restriction for it, + the recommended principal range is between [-180, 180) degrees. + + Assume we have a horizontal box B = (x_center, y_center, width, height), + where width is along the x-axis and height is along the y-axis. + The rotated box B_rot (x_center, y_center, width, height, angle) + can be seen as: + + 1. When angle == 0: + B_rot == B + 2. When angle > 0: + B_rot is obtained by rotating B w.r.t its center by :math:`|angle|` degrees CCW; + 3. When angle < 0: + B_rot is obtained by rotating B w.r.t its center by :math:`|angle|` degrees CW. + + Mathematically, since the right-handed coordinate system for image space + is (y, x), where y is top->down and x is left->right, the 4 vertices of the + rotated rectangle :math:`(yr_i, xr_i)` (i = 1, 2, 3, 4) can be obtained from + the vertices of the horizontal rectangle (y_i, x_i) (i = 1, 2, 3, 4) + in the following way (:math:`\\theta = angle*\\pi/180` is the angle in radians, + (y_c, x_c) is the center of the rectangle): + + .. math:: + + yr_i = \\cos(\\theta) (y_i - y_c) - \\sin(\\theta) (x_i - x_c) + y_c, + + xr_i = \\sin(\\theta) (y_i - y_c) + \\cos(\\theta) (x_i - x_c) + x_c, + + which is the standard rigid-body rotation transformation. + + Intuitively, the angle is + (1) the rotation angle from y-axis in image space + to the height vector (top->down in the box's local coordinate system) + of the box in CCW, and + (2) the rotation angle from x-axis in image space + to the width vector (left->right in the box's local coordinate system) + of the box in CCW. + + More intuitively, consider the following horizontal box ABCD represented + in (x1, y1, x2, y2): (3, 2, 7, 4), + covering the [3, 7] x [2, 4] region of the continuous coordinate system + which looks like this: + + .. code:: none + + O--------> x + | + | A---B + | | | + | D---C + | + v y + + Note that each capital letter represents one 0-dimensional geometric point + instead of a 'square pixel' here. + + In the example above, using (x, y) to represent a point we have: + + .. math:: + + O = (0, 0), A = (3, 2), B = (7, 2), C = (7, 4), D = (3, 4) + + We name vector AB = vector DC as the width vector in box's local coordinate system, and + vector AD = vector BC as the height vector in box's local coordinate system. Initially, + when angle = 0 degree, they're aligned with the positive directions of x-axis and y-axis + in the image space, respectively. + + For better illustration, we denote the center of the box as E, + + .. code:: none + + O--------> x + | + | A---B + | | E | + | D---C + | + v y + + where the center E = ((3+7)/2, (2+4)/2) = (5, 3). + + Also, + + .. math:: + + width = |AB| = |CD| = 7 - 3 = 4, + height = |AD| = |BC| = 4 - 2 = 2. + + Therefore, the corresponding representation for the same shape in rotated box in + (x_center, y_center, width, height, angle) format is: + + (5, 3, 4, 2, 0), + + Now, let's consider (5, 3, 4, 2, 90), which is rotated by 90 degrees + CCW (counter-clockwise) by definition. It looks like this: + + .. code:: none + + O--------> x + | B-C + | | | + | |E| + | | | + | A-D + v y + + The center E is still located at the same point (5, 3), while the vertices + ABCD are rotated by 90 degrees CCW with regard to E: + A = (4, 5), B = (4, 1), C = (6, 1), D = (6, 5) + + Here, 90 degrees can be seen as the CCW angle to rotate from y-axis to + vector AD or vector BC (the top->down height vector in box's local coordinate system), + or the CCW angle to rotate from x-axis to vector AB or vector DC (the left->right + width vector in box's local coordinate system). + + .. math:: + + width = |AB| = |CD| = 5 - 1 = 4, + height = |AD| = |BC| = 6 - 4 = 2. + + Next, how about (5, 3, 4, 2, -90), which is rotated by 90 degrees CW (clockwise) + by definition? It looks like this: + + .. code:: none + + O--------> x + | D-A + | | | + | |E| + | | | + | C-B + v y + + The center E is still located at the same point (5, 3), while the vertices + ABCD are rotated by 90 degrees CW with regard to E: + A = (6, 1), B = (6, 5), C = (4, 5), D = (4, 1) + + .. math:: + + width = |AB| = |CD| = 5 - 1 = 4, + height = |AD| = |BC| = 6 - 4 = 2. + + This covers exactly the same region as (5, 3, 4, 2, 90) does, and their IoU + will be 1. However, these two will generate different RoI Pooling results and + should not be treated as an identical box. + + On the other hand, it's easy to see that (X, Y, W, H, A) is identical to + (X, Y, W, H, A+360N), for any integer N. For example (5, 3, 4, 2, 270) would be + identical to (5, 3, 4, 2, -90), because rotating the shape 270 degrees CCW is + equivalent to rotating the same shape 90 degrees CW. + + We could rotate further to get (5, 3, 4, 2, 180), or (5, 3, 4, 2, -180): + + .. code:: none + + O--------> x + | + | C---D + | | E | + | B---A + | + v y + + .. math:: + + A = (7, 4), B = (3, 4), C = (3, 2), D = (7, 2), + + width = |AB| = |CD| = 7 - 3 = 4, + height = |AD| = |BC| = 4 - 2 = 2. + + Finally, this is a very inaccurate (heavily quantized) illustration of + how (5, 3, 4, 2, 60) looks like in case anyone wonders: + + .. code:: none + + O--------> x + | B\ + | / C + | /E / + | A / + | `D + v y + + It's still a rectangle with center of (5, 3), width of 4 and height of 2, + but its angle (and thus orientation) is somewhere between + (5, 3, 4, 2, 0) and (5, 3, 4, 2, 90). + """ + device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device("cpu") + tensor = torch.as_tensor(tensor, dtype=torch.float32, device=device) + if tensor.numel() == 0: + # Use reshape, so we don't end up creating a new tensor that does not depend on + # the inputs (and consequently confuses jit) + tensor = tensor.reshape((0, 5)).to(dtype=torch.float32, device=device) + assert tensor.dim() == 2 and tensor.size(-1) == 5, tensor.size() + + self.tensor = tensor + + def clone(self) -> "RotatedBoxes": + """ + Clone the RotatedBoxes. + + Returns: + RotatedBoxes + """ + return RotatedBoxes(self.tensor.clone()) + + def to(self, device: str) -> "RotatedBoxes": + return RotatedBoxes(self.tensor.to(device)) + + def area(self) -> torch.Tensor: + """ + Computes the area of all the boxes. + + Returns: + torch.Tensor: a vector with areas of each box. + """ + box = self.tensor + area = box[:, 2] * box[:, 3] + return area + + def normalize_angles(self) -> None: + """ + Restrict angles to the range of [-180, 180) degrees + """ + self.tensor[:, 4] = (self.tensor[:, 4] + 180.0) % 360.0 - 180.0 + + def clip(self, box_size: Boxes.BoxSizeType, clip_angle_threshold: float = 1.0) -> None: + """ + Clip (in place) the boxes by limiting x coordinates to the range [0, width] + and y coordinates to the range [0, height]. + + For RRPN: + Only clip boxes that are almost horizontal with a tolerance of + clip_angle_threshold to maintain backward compatibility. + + Rotated boxes beyond this threshold are not clipped for two reasons: + + 1. There are potentially multiple ways to clip a rotated box to make it + fit within the image. + 2. It's tricky to make the entire rectangular box fit within the image + and still be able to not leave out pixels of interest. + + Therefore we rely on ops like RoIAlignRotated to safely handle this. + + Args: + box_size (height, width): The clipping box's size. + clip_angle_threshold: + Iff. abs(normalized(angle)) <= clip_angle_threshold (in degrees), + we do the clipping as horizontal boxes. + """ + h, w = box_size + + # normalize angles to be within (-180, 180] degrees + self.normalize_angles() + + idx = torch.where(torch.abs(self.tensor[:, 4]) <= clip_angle_threshold)[0] + + # convert to (x1, y1, x2, y2) + x1 = self.tensor[idx, 0] - self.tensor[idx, 2] / 2.0 + y1 = self.tensor[idx, 1] - self.tensor[idx, 3] / 2.0 + x2 = self.tensor[idx, 0] + self.tensor[idx, 2] / 2.0 + y2 = self.tensor[idx, 1] + self.tensor[idx, 3] / 2.0 + + # clip + x1.clamp_(min=0, max=w) + y1.clamp_(min=0, max=h) + x2.clamp_(min=0, max=w) + y2.clamp_(min=0, max=h) + + # convert back to (xc, yc, w, h) + self.tensor[idx, 0] = (x1 + x2) / 2.0 + self.tensor[idx, 1] = (y1 + y2) / 2.0 + # make sure widths and heights do not increase due to numerical errors + self.tensor[idx, 2] = torch.min(self.tensor[idx, 2], x2 - x1) + self.tensor[idx, 3] = torch.min(self.tensor[idx, 3], y2 - y1) + + def nonempty(self, threshold: float = 0.0) -> torch.Tensor: + """ + Find boxes that are non-empty. + A box is considered empty, if either of its side is no larger than threshold. + + Returns: + Tensor: a binary vector which represents + whether each box is empty (False) or non-empty (True). + """ + box = self.tensor + widths = box[:, 2] + heights = box[:, 3] + keep = (widths > threshold) & (heights > threshold) + return keep + + def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "RotatedBoxes": + """ + Returns: + RotatedBoxes: Create a new :class:`RotatedBoxes` by indexing. + + The following usage are allowed: + + 1. `new_boxes = boxes[3]`: return a `RotatedBoxes` which contains only one box. + 2. `new_boxes = boxes[2:10]`: return a slice of boxes. + 3. `new_boxes = boxes[vector]`, where vector is a torch.ByteTensor + with `length = len(boxes)`. Nonzero elements in the vector will be selected. + + Note that the returned RotatedBoxes might share storage with this RotatedBoxes, + subject to Pytorch's indexing semantics. + """ + if isinstance(item, int): + return RotatedBoxes(self.tensor[item].view(1, -1)) + b = self.tensor[item] + assert b.dim() == 2, "Indexing on RotatedBoxes with {} failed to return a matrix!".format( + item + ) + return RotatedBoxes(b) + + def __len__(self) -> int: + return self.tensor.shape[0] + + def __repr__(self) -> str: + return "RotatedBoxes(" + str(self.tensor) + ")" + + def inside_box(self, box_size: Boxes.BoxSizeType, boundary_threshold: int = 0) -> torch.Tensor: + """ + Args: + box_size (height, width): Size of the reference box covering + [0, width] x [0, height] + boundary_threshold (int): Boxes that extend beyond the reference box + boundary by more than boundary_threshold are considered "outside". + + For RRPN, it might not be necessary to call this function since it's common + for rotated box to extend to outside of the image boundaries + (the clip function only clips the near-horizontal boxes) + + Returns: + a binary vector, indicating whether each box is inside the reference box. + """ + height, width = box_size + + cnt_x = self.tensor[..., 0] + cnt_y = self.tensor[..., 1] + half_w = self.tensor[..., 2] / 2.0 + half_h = self.tensor[..., 3] / 2.0 + a = self.tensor[..., 4] + c = torch.abs(torch.cos(a * math.pi / 180.0)) + s = torch.abs(torch.sin(a * math.pi / 180.0)) + # This basically computes the horizontal bounding rectangle of the rotated box + max_rect_dx = c * half_w + s * half_h + max_rect_dy = c * half_h + s * half_w + + inds_inside = ( + (cnt_x - max_rect_dx >= -boundary_threshold) + & (cnt_y - max_rect_dy >= -boundary_threshold) + & (cnt_x + max_rect_dx < width + boundary_threshold) + & (cnt_y + max_rect_dy < height + boundary_threshold) + ) + + return inds_inside + + def get_centers(self) -> torch.Tensor: + """ + Returns: + The box centers in a Nx2 array of (x, y). + """ + return self.tensor[:, :2] + + def scale(self, scale_x: float, scale_y: float) -> None: + """ + Scale the rotated box with horizontal and vertical scaling factors + Note: when scale_factor_x != scale_factor_y, + the rotated box does not preserve the rectangular shape when the angle + is not a multiple of 90 degrees under resize transformation. + Instead, the shape is a parallelogram (that has skew) + Here we make an approximation by fitting a rotated rectangle to the parallelogram. + """ + self.tensor[:, 0] *= scale_x + self.tensor[:, 1] *= scale_y + theta = self.tensor[:, 4] * math.pi / 180.0 + c = torch.cos(theta) + s = torch.sin(theta) + + # In image space, y is top->down and x is left->right + # Consider the local coordintate system for the rotated box, + # where the box center is located at (0, 0), and the four vertices ABCD are + # A(-w / 2, -h / 2), B(w / 2, -h / 2), C(w / 2, h / 2), D(-w / 2, h / 2) + # the midpoint of the left edge AD of the rotated box E is: + # E = (A+D)/2 = (-w / 2, 0) + # the midpoint of the top edge AB of the rotated box F is: + # F(0, -h / 2) + # To get the old coordinates in the global system, apply the rotation transformation + # (Note: the right-handed coordinate system for image space is yOx): + # (old_x, old_y) = (s * y + c * x, c * y - s * x) + # E(old) = (s * 0 + c * (-w/2), c * 0 - s * (-w/2)) = (-c * w / 2, s * w / 2) + # F(old) = (s * (-h / 2) + c * 0, c * (-h / 2) - s * 0) = (-s * h / 2, -c * h / 2) + # After applying the scaling factor (sfx, sfy): + # E(new) = (-sfx * c * w / 2, sfy * s * w / 2) + # F(new) = (-sfx * s * h / 2, -sfy * c * h / 2) + # The new width after scaling tranformation becomes: + + # w(new) = |E(new) - O| * 2 + # = sqrt[(sfx * c * w / 2)^2 + (sfy * s * w / 2)^2] * 2 + # = sqrt[(sfx * c)^2 + (sfy * s)^2] * w + # i.e., scale_factor_w = sqrt[(sfx * c)^2 + (sfy * s)^2] + # + # For example, + # when angle = 0 or 180, |c| = 1, s = 0, scale_factor_w == scale_factor_x; + # when |angle| = 90, c = 0, |s| = 1, scale_factor_w == scale_factor_y + self.tensor[:, 2] *= torch.sqrt((scale_x * c) ** 2 + (scale_y * s) ** 2) + + # h(new) = |F(new) - O| * 2 + # = sqrt[(sfx * s * h / 2)^2 + (sfy * c * h / 2)^2] * 2 + # = sqrt[(sfx * s)^2 + (sfy * c)^2] * h + # i.e., scale_factor_h = sqrt[(sfx * s)^2 + (sfy * c)^2] + # + # For example, + # when angle = 0 or 180, |c| = 1, s = 0, scale_factor_h == scale_factor_y; + # when |angle| = 90, c = 0, |s| = 1, scale_factor_h == scale_factor_x + self.tensor[:, 3] *= torch.sqrt((scale_x * s) ** 2 + (scale_y * c) ** 2) + + # The angle is the rotation angle from y-axis in image space to the height + # vector (top->down in the box's local coordinate system) of the box in CCW. + # + # angle(new) = angle_yOx(O - F(new)) + # = angle_yOx( (sfx * s * h / 2, sfy * c * h / 2) ) + # = atan2(sfx * s * h / 2, sfy * c * h / 2) + # = atan2(sfx * s, sfy * c) + # + # For example, + # when sfx == sfy, angle(new) == atan2(s, c) == angle(old) + self.tensor[:, 4] = torch.atan2(scale_x * s, scale_y * c) * 180 / math.pi + + @property + def device(self) -> str: + return self.tensor.device + + def __iter__(self) -> Iterator[torch.Tensor]: + """ + Yield a box as a Tensor of shape (5,) at a time. + """ + yield from self.tensor + + +def pairwise_iou(boxes1: RotatedBoxes, boxes2: RotatedBoxes) -> None: + """ + Given two lists of rotated boxes of size N and M, + compute the IoU (intersection over union) + between __all__ N x M pairs of boxes. + The box order must be (x_center, y_center, width, height, angle). + + Args: + boxes1, boxes2 (RotatedBoxes): + two `RotatedBoxes`. Contains N & M rotated boxes, respectively. + + Returns: + Tensor: IoU, sized [N,M]. + """ + + return pairwise_iou_rotated(boxes1.tensor, boxes2.tensor) diff --git a/preprocess/mhp_extension/detectron2/detectron2/utils/README.md b/preprocess/mhp_extension/detectron2/detectron2/utils/README.md new file mode 100644 index 0000000000000000000000000000000000000000..9765b24a730b77556104187ac3ef5439ab0859fd --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/utils/README.md @@ -0,0 +1,5 @@ +# Utility functions + +This folder contain utility functions that are not used in the +core library, but are useful for building models or training +code using the config system. diff --git a/preprocess/mhp_extension/detectron2/detectron2/utils/__init__.py b/preprocess/mhp_extension/detectron2/detectron2/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..168f9979a4623806934b0ff1102ac166704e7dec --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/utils/__init__.py @@ -0,0 +1 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved diff --git a/preprocess/mhp_extension/detectron2/detectron2/utils/analysis.py b/preprocess/mhp_extension/detectron2/detectron2/utils/analysis.py new file mode 100644 index 0000000000000000000000000000000000000000..c48e376c242f57f480280538ae770520d14110f8 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/utils/analysis.py @@ -0,0 +1,164 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +# -*- coding: utf-8 -*- + +import logging +import typing +import torch +from fvcore.nn import activation_count, flop_count, parameter_count, parameter_count_table +from torch import nn + +from detectron2.structures import BitMasks, Boxes, ImageList, Instances + +from .logger import log_first_n + +__all__ = [ + "activation_count_operators", + "flop_count_operators", + "parameter_count_table", + "parameter_count", +] + +FLOPS_MODE = "flops" +ACTIVATIONS_MODE = "activations" + + +# some extra ops to ignore from counting. +_IGNORED_OPS = [ + "aten::add", + "aten::add_", + "aten::batch_norm", + "aten::constant_pad_nd", + "aten::div", + "aten::div_", + "aten::exp", + "aten::log2", + "aten::max_pool2d", + "aten::meshgrid", + "aten::mul", + "aten::mul_", + "aten::nonzero_numpy", + "aten::relu", + "aten::relu_", + "aten::rsub", + "aten::sigmoid", + "aten::sigmoid_", + "aten::softmax", + "aten::sort", + "aten::sqrt", + "aten::sub", + "aten::upsample_nearest2d", + "prim::PythonOp", + "torchvision::nms", +] + + +def flop_count_operators( + model: nn.Module, inputs: list, **kwargs +) -> typing.DefaultDict[str, float]: + """ + Implement operator-level flops counting using jit. + This is a wrapper of fvcore.nn.flop_count, that supports standard detection models + in detectron2. + + Note: + The function runs the input through the model to compute flops. + The flops of a detection model is often input-dependent, for example, + the flops of box & mask head depends on the number of proposals & + the number of detected objects. + Therefore, the flops counting using a single input may not accurately + reflect the computation cost of a model. + + Args: + model: a detectron2 model that takes `list[dict]` as input. + inputs (list[dict]): inputs to model, in detectron2's standard format. + """ + return _wrapper_count_operators(model=model, inputs=inputs, mode=FLOPS_MODE, **kwargs) + + +def activation_count_operators( + model: nn.Module, inputs: list, **kwargs +) -> typing.DefaultDict[str, float]: + """ + Implement operator-level activations counting using jit. + This is a wrapper of fvcore.nn.activation_count, that supports standard detection models + in detectron2. + + Note: + The function runs the input through the model to compute activations. + The activations of a detection model is often input-dependent, for example, + the activations of box & mask head depends on the number of proposals & + the number of detected objects. + + Args: + model: a detectron2 model that takes `list[dict]` as input. + inputs (list[dict]): inputs to model, in detectron2's standard format. + """ + return _wrapper_count_operators(model=model, inputs=inputs, mode=ACTIVATIONS_MODE, **kwargs) + + +def _flatten_to_tuple(outputs): + result = [] + if isinstance(outputs, torch.Tensor): + result.append(outputs) + elif isinstance(outputs, (list, tuple)): + for v in outputs: + result.extend(_flatten_to_tuple(v)) + elif isinstance(outputs, dict): + for _, v in outputs.items(): + result.extend(_flatten_to_tuple(v)) + elif isinstance(outputs, Instances): + result.extend(_flatten_to_tuple(outputs.get_fields())) + elif isinstance(outputs, (Boxes, BitMasks, ImageList)): + result.append(outputs.tensor) + else: + log_first_n( + logging.WARN, + f"Output of type {type(outputs)} not included in flops/activations count.", + n=10, + ) + return tuple(result) + + +def _wrapper_count_operators( + model: nn.Module, inputs: list, mode: str, **kwargs +) -> typing.DefaultDict[str, float]: + + # ignore some ops + supported_ops = {k: lambda *args, **kwargs: {} for k in _IGNORED_OPS} + supported_ops.update(kwargs.pop("supported_ops", {})) + kwargs["supported_ops"] = supported_ops + + assert len(inputs) == 1, "Please use batch size=1" + tensor_input = inputs[0]["image"] + + class WrapModel(nn.Module): + def __init__(self, model): + super().__init__() + if isinstance( + model, (nn.parallel.distributed.DistributedDataParallel, nn.DataParallel) + ): + self.model = model.module + else: + self.model = model + + def forward(self, image): + # jit requires the input/output to be Tensors + inputs = [{"image": image}] + outputs = self.model.forward(inputs) + # Only the subgraph that computes the returned tuple of tensor will be + # counted. So we flatten everything we found to tuple of tensors. + return _flatten_to_tuple(outputs) + + old_train = model.training + with torch.no_grad(): + if mode == FLOPS_MODE: + ret = flop_count(WrapModel(model).train(False), (tensor_input,), **kwargs) + elif mode == ACTIVATIONS_MODE: + ret = activation_count(WrapModel(model).train(False), (tensor_input,), **kwargs) + else: + raise NotImplementedError("Count for mode {} is not supported yet.".format(mode)) + # compatible with change in fvcore + if isinstance(ret, tuple): + ret = ret[0] + model.train(old_train) + return ret diff --git a/preprocess/mhp_extension/detectron2/detectron2/utils/collect_env.py b/preprocess/mhp_extension/detectron2/detectron2/utils/collect_env.py new file mode 100644 index 0000000000000000000000000000000000000000..c25b99cb0ab626cc4f4dabca5eb81f710011f2e3 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/utils/collect_env.py @@ -0,0 +1,160 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import importlib +import numpy as np +import os +import re +import subprocess +import sys +from collections import defaultdict +import PIL +import torch +import torchvision +from tabulate import tabulate + +__all__ = ["collect_env_info"] + + +def collect_torch_env(): + try: + import torch.__config__ + + return torch.__config__.show() + except ImportError: + # compatible with older versions of pytorch + from torch.utils.collect_env import get_pretty_env_info + + return get_pretty_env_info() + + +def get_env_module(): + var_name = "DETECTRON2_ENV_MODULE" + return var_name, os.environ.get(var_name, "") + + +def detect_compute_compatibility(CUDA_HOME, so_file): + try: + cuobjdump = os.path.join(CUDA_HOME, "bin", "cuobjdump") + if os.path.isfile(cuobjdump): + output = subprocess.check_output( + "'{}' --list-elf '{}'".format(cuobjdump, so_file), shell=True + ) + output = output.decode("utf-8").strip().split("\n") + sm = [] + for line in output: + line = re.findall(r"\.sm_[0-9]*\.", line)[0] + sm.append(line.strip(".")) + sm = sorted(set(sm)) + return ", ".join(sm) + else: + return so_file + "; cannot find cuobjdump" + except Exception: + # unhandled failure + return so_file + + +def collect_env_info(): + has_cuda = torch.cuda.is_available() + # NOTE: the use of CUDA_HOME requires the CUDA build deps, though in + # theory detectron2 should be made runnable with only the CUDA runtime + from torch.utils.cpp_extension import CUDA_HOME + + data = [] + data.append(("sys.platform", sys.platform)) + data.append(("Python", sys.version.replace("\n", ""))) + data.append(("numpy", np.__version__)) + + try: + import detectron2 # noqa + + data.append( + ("detectron2", detectron2.__version__ + " @" + os.path.dirname(detectron2.__file__)) + ) + except ImportError: + data.append(("detectron2", "failed to import")) + else: + try: + from detectron2 import _C + except ImportError: + data.append(("detectron2._C", "failed to import")) + else: + data.append(("detectron2 compiler", _C.get_compiler_version())) + data.append(("detectron2 CUDA compiler", _C.get_cuda_version())) + if has_cuda: + data.append( + ("detectron2 arch flags", detect_compute_compatibility(CUDA_HOME, _C.__file__)) + ) + + data.append(get_env_module()) + data.append(("PyTorch", torch.__version__ + " @" + os.path.dirname(torch.__file__))) + data.append(("PyTorch debug build", torch.version.debug)) + + data.append(("CUDA available", has_cuda)) + if has_cuda: + devices = defaultdict(list) + for k in range(torch.cuda.device_count()): + devices[torch.cuda.get_device_name(k)].append(str(k)) + for name, devids in devices.items(): + data.append(("GPU " + ",".join(devids), name)) + + from torch.utils.cpp_extension import CUDA_HOME + + data.append(("CUDA_HOME", str(CUDA_HOME))) + + if CUDA_HOME is not None and os.path.isdir(CUDA_HOME): + try: + nvcc = os.path.join(CUDA_HOME, "bin", "nvcc") + nvcc = subprocess.check_output("'{}' -V | tail -n1".format(nvcc), shell=True) + nvcc = nvcc.decode("utf-8").strip() + except subprocess.SubprocessError: + nvcc = "Not Available" + data.append(("NVCC", nvcc)) + + cuda_arch_list = os.environ.get("TORCH_CUDA_ARCH_LIST", None) + if cuda_arch_list: + data.append(("TORCH_CUDA_ARCH_LIST", cuda_arch_list)) + data.append(("Pillow", PIL.__version__)) + + try: + data.append( + ( + "torchvision", + str(torchvision.__version__) + " @" + os.path.dirname(torchvision.__file__), + ) + ) + if has_cuda: + try: + torchvision_C = importlib.util.find_spec("torchvision._C").origin + msg = detect_compute_compatibility(CUDA_HOME, torchvision_C) + data.append(("torchvision arch flags", msg)) + except ImportError: + data.append(("torchvision._C", "failed to find")) + except AttributeError: + data.append(("torchvision", "unknown")) + + try: + import fvcore + + data.append(("fvcore", fvcore.__version__)) + except ImportError: + pass + + try: + import cv2 + + data.append(("cv2", cv2.__version__)) + except ImportError: + pass + env_str = tabulate(data) + "\n" + env_str += collect_torch_env() + return env_str + + +if __name__ == "__main__": + try: + import detectron2 # noqa + except ImportError: + print(collect_env_info()) + else: + from detectron2.utils.collect_env import collect_env_info + + print(collect_env_info()) diff --git a/preprocess/mhp_extension/detectron2/detectron2/utils/colormap.py b/preprocess/mhp_extension/detectron2/detectron2/utils/colormap.py new file mode 100644 index 0000000000000000000000000000000000000000..1bf1455e4ce9e077961143c8d734a7298d28476d --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/utils/colormap.py @@ -0,0 +1,140 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. + +""" +An awesome colormap for really neat visualizations. +Copied from Detectron, and removed gray colors. +""" + +import numpy as np + +__all__ = ["colormap", "random_color"] + +# fmt: off +# RGB: +_COLORS = np.array( + [ + 0.000, 0.447, 0.741, + 0.850, 0.325, 0.098, + 0.929, 0.694, 0.125, + 0.494, 0.184, 0.556, + 0.466, 0.674, 0.188, + 0.301, 0.745, 0.933, + 0.635, 0.078, 0.184, + 0.300, 0.300, 0.300, + 0.600, 0.600, 0.600, + 1.000, 0.000, 0.000, + 1.000, 0.500, 0.000, + 0.749, 0.749, 0.000, + 0.000, 1.000, 0.000, + 0.000, 0.000, 1.000, + 0.667, 0.000, 1.000, + 0.333, 0.333, 0.000, + 0.333, 0.667, 0.000, + 0.333, 1.000, 0.000, + 0.667, 0.333, 0.000, + 0.667, 0.667, 0.000, + 0.667, 1.000, 0.000, + 1.000, 0.333, 0.000, + 1.000, 0.667, 0.000, + 1.000, 1.000, 0.000, + 0.000, 0.333, 0.500, + 0.000, 0.667, 0.500, + 0.000, 1.000, 0.500, + 0.333, 0.000, 0.500, + 0.333, 0.333, 0.500, + 0.333, 0.667, 0.500, + 0.333, 1.000, 0.500, + 0.667, 0.000, 0.500, + 0.667, 0.333, 0.500, + 0.667, 0.667, 0.500, + 0.667, 1.000, 0.500, + 1.000, 0.000, 0.500, + 1.000, 0.333, 0.500, + 1.000, 0.667, 0.500, + 1.000, 1.000, 0.500, + 0.000, 0.333, 1.000, + 0.000, 0.667, 1.000, + 0.000, 1.000, 1.000, + 0.333, 0.000, 1.000, + 0.333, 0.333, 1.000, + 0.333, 0.667, 1.000, + 0.333, 1.000, 1.000, + 0.667, 0.000, 1.000, + 0.667, 0.333, 1.000, + 0.667, 0.667, 1.000, + 0.667, 1.000, 1.000, + 1.000, 0.000, 1.000, + 1.000, 0.333, 1.000, + 1.000, 0.667, 1.000, + 0.333, 0.000, 0.000, + 0.500, 0.000, 0.000, + 0.667, 0.000, 0.000, + 0.833, 0.000, 0.000, + 1.000, 0.000, 0.000, + 0.000, 0.167, 0.000, + 0.000, 0.333, 0.000, + 0.000, 0.500, 0.000, + 0.000, 0.667, 0.000, + 0.000, 0.833, 0.000, + 0.000, 1.000, 0.000, + 0.000, 0.000, 0.167, + 0.000, 0.000, 0.333, + 0.000, 0.000, 0.500, + 0.000, 0.000, 0.667, + 0.000, 0.000, 0.833, + 0.000, 0.000, 1.000, + 0.000, 0.000, 0.000, + 0.143, 0.143, 0.143, + 0.857, 0.857, 0.857, + 1.000, 1.000, 1.000 + ] +).astype(np.float32).reshape(-1, 3) +# fmt: on + + +def colormap(rgb=False, maximum=255): + """ + Args: + rgb (bool): whether to return RGB colors or BGR colors. + maximum (int): either 255 or 1 + + Returns: + ndarray: a float32 array of Nx3 colors, in range [0, 255] or [0, 1] + """ + assert maximum in [255, 1], maximum + c = _COLORS * maximum + if not rgb: + c = c[:, ::-1] + return c + + +def random_color(rgb=False, maximum=255): + """ + Args: + rgb (bool): whether to return RGB colors or BGR colors. + maximum (int): either 255 or 1 + + Returns: + ndarray: a vector of 3 numbers + """ + idx = np.random.randint(0, len(_COLORS)) + ret = _COLORS[idx] * maximum + if not rgb: + ret = ret[::-1] + return ret + + +if __name__ == "__main__": + import cv2 + + size = 100 + H, W = 10, 10 + canvas = np.random.rand(H * size, W * size, 3).astype("float32") + for h in range(H): + for w in range(W): + idx = h * W + w + if idx >= len(_COLORS): + break + canvas[h * size : (h + 1) * size, w * size : (w + 1) * size] = _COLORS[idx] + cv2.imshow("a", canvas) + cv2.waitKey(0) diff --git a/preprocess/mhp_extension/detectron2/detectron2/utils/comm.py b/preprocess/mhp_extension/detectron2/detectron2/utils/comm.py new file mode 100644 index 0000000000000000000000000000000000000000..8cc7b3dac5a45db87fa91ac86fce50805ecf1bad --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/utils/comm.py @@ -0,0 +1,263 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +""" +This file contains primitives for multi-gpu communication. +This is useful when doing distributed training. +""" + +import functools +import logging +import numpy as np +import pickle +import torch +import torch.distributed as dist + +_LOCAL_PROCESS_GROUP = None +""" +A torch process group which only includes processes that on the same machine as the current process. +This variable is set when processes are spawned by `launch()` in "engine/launch.py". +""" + + +def get_world_size() -> int: + if not dist.is_available(): + return 1 + if not dist.is_initialized(): + return 1 + return dist.get_world_size() + + +def get_rank() -> int: + if not dist.is_available(): + return 0 + if not dist.is_initialized(): + return 0 + return dist.get_rank() + + +def get_local_rank() -> int: + """ + Returns: + The rank of the current process within the local (per-machine) process group. + """ + if not dist.is_available(): + return 0 + if not dist.is_initialized(): + return 0 + assert _LOCAL_PROCESS_GROUP is not None + return dist.get_rank(group=_LOCAL_PROCESS_GROUP) + + +def get_local_size() -> int: + """ + Returns: + The size of the per-machine process group, + i.e. the number of processes per machine. + """ + if not dist.is_available(): + return 1 + if not dist.is_initialized(): + return 1 + return dist.get_world_size(group=_LOCAL_PROCESS_GROUP) + + +def is_main_process() -> bool: + return get_rank() == 0 + + +def synchronize(): + """ + Helper function to synchronize (barrier) among all processes when + using distributed training + """ + if not dist.is_available(): + return + if not dist.is_initialized(): + return + world_size = dist.get_world_size() + if world_size == 1: + return + dist.barrier() + + +@functools.lru_cache() +def _get_global_gloo_group(): + """ + Return a process group based on gloo backend, containing all the ranks + The result is cached. + """ + if dist.get_backend() == "nccl": + return dist.new_group(backend="gloo") + else: + return dist.group.WORLD + + +def _serialize_to_tensor(data, group): + backend = dist.get_backend(group) + assert backend in ["gloo", "nccl"] + device = torch.device("cpu" if backend == "gloo" else "cuda") + + buffer = pickle.dumps(data) + if len(buffer) > 1024 ** 3: + logger = logging.getLogger(__name__) + logger.warning( + "Rank {} trying to all-gather {:.2f} GB of data on device {}".format( + get_rank(), len(buffer) / (1024 ** 3), device + ) + ) + storage = torch.ByteStorage.from_buffer(buffer) + tensor = torch.ByteTensor(storage).to(device=device) + return tensor + + +def _pad_to_largest_tensor(tensor, group): + """ + Returns: + list[int]: size of the tensor, on each rank + Tensor: padded tensor that has the max size + """ + world_size = dist.get_world_size(group=group) + assert ( + world_size >= 1 + ), "comm.gather/all_gather must be called from ranks within the given group!" + local_size = torch.tensor([tensor.numel()], dtype=torch.int64, device=tensor.device) + size_list = [ + torch.zeros([1], dtype=torch.int64, device=tensor.device) for _ in range(world_size) + ] + dist.all_gather(size_list, local_size, group=group) + size_list = [int(size.item()) for size in size_list] + + max_size = max(size_list) + + # we pad the tensor because torch all_gather does not support + # gathering tensors of different shapes + if local_size != max_size: + padding = torch.zeros((max_size - local_size,), dtype=torch.uint8, device=tensor.device) + tensor = torch.cat((tensor, padding), dim=0) + return size_list, tensor + + +def all_gather(data, group=None): + """ + Run all_gather on arbitrary picklable data (not necessarily tensors). + + Args: + data: any picklable object + group: a torch process group. By default, will use a group which + contains all ranks on gloo backend. + + Returns: + list[data]: list of data gathered from each rank + """ + if get_world_size() == 1: + return [data] + if group is None: + group = _get_global_gloo_group() + if dist.get_world_size(group) == 1: + return [data] + + tensor = _serialize_to_tensor(data, group) + + size_list, tensor = _pad_to_largest_tensor(tensor, group) + max_size = max(size_list) + + # receiving Tensor from all ranks + tensor_list = [ + torch.empty((max_size,), dtype=torch.uint8, device=tensor.device) for _ in size_list + ] + dist.all_gather(tensor_list, tensor, group=group) + + data_list = [] + for size, tensor in zip(size_list, tensor_list): + buffer = tensor.cpu().numpy().tobytes()[:size] + data_list.append(pickle.loads(buffer)) + + return data_list + + +def gather(data, dst=0, group=None): + """ + Run gather on arbitrary picklable data (not necessarily tensors). + + Args: + data: any picklable object + dst (int): destination rank + group: a torch process group. By default, will use a group which + contains all ranks on gloo backend. + + Returns: + list[data]: on dst, a list of data gathered from each rank. Otherwise, + an empty list. + """ + if get_world_size() == 1: + return [data] + if group is None: + group = _get_global_gloo_group() + if dist.get_world_size(group=group) == 1: + return [data] + rank = dist.get_rank(group=group) + + tensor = _serialize_to_tensor(data, group) + size_list, tensor = _pad_to_largest_tensor(tensor, group) + + # receiving Tensor from all ranks + if rank == dst: + max_size = max(size_list) + tensor_list = [ + torch.empty((max_size,), dtype=torch.uint8, device=tensor.device) for _ in size_list + ] + dist.gather(tensor, tensor_list, dst=dst, group=group) + + data_list = [] + for size, tensor in zip(size_list, tensor_list): + buffer = tensor.cpu().numpy().tobytes()[:size] + data_list.append(pickle.loads(buffer)) + return data_list + else: + dist.gather(tensor, [], dst=dst, group=group) + return [] + + +def shared_random_seed(): + """ + Returns: + int: a random number that is the same across all workers. + If workers need a shared RNG, they can use this shared seed to + create one. + + All workers must call this function, otherwise it will deadlock. + """ + ints = np.random.randint(2 ** 31) + all_ints = all_gather(ints) + return all_ints[0] + + +def reduce_dict(input_dict, average=True): + """ + Reduce the values in the dictionary from all processes so that process with rank + 0 has the reduced results. + + Args: + input_dict (dict): inputs to be reduced. All the values must be scalar CUDA Tensor. + average (bool): whether to do average or sum + + Returns: + a dict with the same keys as input_dict, after reduction. + """ + world_size = get_world_size() + if world_size < 2: + return input_dict + with torch.no_grad(): + names = [] + values = [] + # sort the keys so that they are consistent across processes + for k in sorted(input_dict.keys()): + names.append(k) + values.append(input_dict[k]) + values = torch.stack(values, dim=0) + dist.reduce(values, dst=0) + if dist.get_rank() == 0 and average: + # only main process gets accumulated, so only divide by + # world_size in this case + values /= world_size + reduced_dict = {k: v for k, v in zip(names, values)} + return reduced_dict diff --git a/preprocess/mhp_extension/detectron2/detectron2/utils/env.py b/preprocess/mhp_extension/detectron2/detectron2/utils/env.py new file mode 100644 index 0000000000000000000000000000000000000000..6769cae4cfb71ae05c605cb9e30eb12ee58c6ee7 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/utils/env.py @@ -0,0 +1,116 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import importlib +import importlib.util +import logging +import numpy as np +import os +import random +import sys +from datetime import datetime +import torch + +__all__ = ["seed_all_rng"] + + +def seed_all_rng(seed=None): + """ + Set the random seed for the RNG in torch, numpy and python. + + Args: + seed (int): if None, will use a strong random seed. + """ + if seed is None: + seed = ( + os.getpid() + + int(datetime.now().strftime("%S%f")) + + int.from_bytes(os.urandom(2), "big") + ) + logger = logging.getLogger(__name__) + logger.info("Using a generated random seed {}".format(seed)) + np.random.seed(seed) + torch.set_rng_state(torch.manual_seed(seed).get_state()) + random.seed(seed) + + +# from https://stackoverflow.com/questions/67631/how-to-import-a-module-given-the-full-path +def _import_file(module_name, file_path, make_importable=False): + spec = importlib.util.spec_from_file_location(module_name, file_path) + module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(module) + if make_importable: + sys.modules[module_name] = module + return module + + +def _configure_libraries(): + """ + Configurations for some libraries. + """ + # An environment option to disable `import cv2` globally, + # in case it leads to negative performance impact + disable_cv2 = int(os.environ.get("DETECTRON2_DISABLE_CV2", False)) + if disable_cv2: + sys.modules["cv2"] = None + else: + # Disable opencl in opencv since its interaction with cuda often has negative effects + # This envvar is supported after OpenCV 3.4.0 + os.environ["OPENCV_OPENCL_RUNTIME"] = "disabled" + try: + import cv2 + + if int(cv2.__version__.split(".")[0]) >= 3: + cv2.ocl.setUseOpenCL(False) + except ImportError: + pass + + def get_version(module, digit=2): + return tuple(map(int, module.__version__.split(".")[:digit])) + + # fmt: off + assert get_version(torch) >= (1, 4), "Requires torch>=1.4" + import fvcore + assert get_version(fvcore, 3) >= (0, 1, 1), "Requires fvcore>=0.1.1" + import yaml + assert get_version(yaml) >= (5, 1), "Requires pyyaml>=5.1" + # fmt: on + + +_ENV_SETUP_DONE = False + + +def setup_environment(): + """Perform environment setup work. The default setup is a no-op, but this + function allows the user to specify a Python source file or a module in + the $DETECTRON2_ENV_MODULE environment variable, that performs + custom setup work that may be necessary to their computing environment. + """ + global _ENV_SETUP_DONE + if _ENV_SETUP_DONE: + return + _ENV_SETUP_DONE = True + + _configure_libraries() + + custom_module_path = os.environ.get("DETECTRON2_ENV_MODULE") + + if custom_module_path: + setup_custom_environment(custom_module_path) + else: + # The default setup is a no-op + pass + + +def setup_custom_environment(custom_module): + """ + Load custom environment setup by importing a Python source file or a + module, and run the setup function. + """ + if custom_module.endswith(".py"): + module = _import_file("detectron2.utils.env.custom_module", custom_module) + else: + module = importlib.import_module(custom_module) + assert hasattr(module, "setup_environment") and callable(module.setup_environment), ( + "Custom environment module defined in {} does not have the " + "required callable attribute 'setup_environment'." + ).format(custom_module) + module.setup_environment() diff --git a/preprocess/mhp_extension/detectron2/detectron2/utils/events.py b/preprocess/mhp_extension/detectron2/detectron2/utils/events.py new file mode 100644 index 0000000000000000000000000000000000000000..a3c57edb05016d2df041d756f59e90dfabddd718 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/utils/events.py @@ -0,0 +1,432 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import datetime +import json +import logging +import os +import time +from collections import defaultdict +from contextlib import contextmanager +import torch +from fvcore.common.file_io import PathManager +from fvcore.common.history_buffer import HistoryBuffer + +_CURRENT_STORAGE_STACK = [] + + +def get_event_storage(): + """ + Returns: + The :class:`EventStorage` object that's currently being used. + Throws an error if no :class:`EventStorage` is currently enabled. + """ + assert len( + _CURRENT_STORAGE_STACK + ), "get_event_storage() has to be called inside a 'with EventStorage(...)' context!" + return _CURRENT_STORAGE_STACK[-1] + + +class EventWriter: + """ + Base class for writers that obtain events from :class:`EventStorage` and process them. + """ + + def write(self): + raise NotImplementedError + + def close(self): + pass + + +class JSONWriter(EventWriter): + """ + Write scalars to a json file. + + It saves scalars as one json per line (instead of a big json) for easy parsing. + + Examples parsing such a json file: + + .. code-block:: none + + $ cat metrics.json | jq -s '.[0:2]' + [ + { + "data_time": 0.008433341979980469, + "iteration": 20, + "loss": 1.9228371381759644, + "loss_box_reg": 0.050025828182697296, + "loss_classifier": 0.5316952466964722, + "loss_mask": 0.7236229181289673, + "loss_rpn_box": 0.0856662318110466, + "loss_rpn_cls": 0.48198649287223816, + "lr": 0.007173333333333333, + "time": 0.25401854515075684 + }, + { + "data_time": 0.007216215133666992, + "iteration": 40, + "loss": 1.282649278640747, + "loss_box_reg": 0.06222952902317047, + "loss_classifier": 0.30682939291000366, + "loss_mask": 0.6970193982124329, + "loss_rpn_box": 0.038663312792778015, + "loss_rpn_cls": 0.1471673548221588, + "lr": 0.007706666666666667, + "time": 0.2490077018737793 + } + ] + + $ cat metrics.json | jq '.loss_mask' + 0.7126231789588928 + 0.689423680305481 + 0.6776131987571716 + ... + + """ + + def __init__(self, json_file, window_size=20): + """ + Args: + json_file (str): path to the json file. New data will be appended if the file exists. + window_size (int): the window size of median smoothing for the scalars whose + `smoothing_hint` are True. + """ + self._file_handle = PathManager.open(json_file, "a") + self._window_size = window_size + + def write(self): + storage = get_event_storage() + to_save = {"iteration": storage.iter} + to_save.update(storage.latest_with_smoothing_hint(self._window_size)) + self._file_handle.write(json.dumps(to_save, sort_keys=True) + "\n") + self._file_handle.flush() + try: + os.fsync(self._file_handle.fileno()) + except AttributeError: + pass + + def close(self): + self._file_handle.close() + + +class TensorboardXWriter(EventWriter): + """ + Write all scalars to a tensorboard file. + """ + + def __init__(self, log_dir: str, window_size: int = 20, **kwargs): + """ + Args: + log_dir (str): the directory to save the output events + window_size (int): the scalars will be median-smoothed by this window size + + kwargs: other arguments passed to `torch.utils.tensorboard.SummaryWriter(...)` + """ + self._window_size = window_size + from torch.utils.tensorboard import SummaryWriter + + self._writer = SummaryWriter(log_dir, **kwargs) + + def write(self): + storage = get_event_storage() + for k, v in storage.latest_with_smoothing_hint(self._window_size).items(): + self._writer.add_scalar(k, v, storage.iter) + + # storage.put_{image,histogram} is only meant to be used by + # tensorboard writer. So we access its internal fields directly from here. + if len(storage._vis_data) >= 1: + for img_name, img, step_num in storage._vis_data: + self._writer.add_image(img_name, img, step_num) + # Storage stores all image data and rely on this writer to clear them. + # As a result it assumes only one writer will use its image data. + # An alternative design is to let storage store limited recent + # data (e.g. only the most recent image) that all writers can access. + # In that case a writer may not see all image data if its period is long. + storage.clear_images() + + if len(storage._histograms) >= 1: + for params in storage._histograms: + self._writer.add_histogram_raw(**params) + storage.clear_histograms() + + def close(self): + if hasattr(self, "_writer"): # doesn't exist when the code fails at import + self._writer.close() + + +class CommonMetricPrinter(EventWriter): + """ + Print **common** metrics to the terminal, including + iteration time, ETA, memory, all losses, and the learning rate. + + To print something different, please implement a similar printer by yourself. + """ + + def __init__(self, max_iter): + """ + Args: + max_iter (int): the maximum number of iterations to train. + Used to compute ETA. + """ + self.logger = logging.getLogger(__name__) + self._max_iter = max_iter + self._last_write = None + + def write(self): + storage = get_event_storage() + iteration = storage.iter + + try: + data_time = storage.history("data_time").avg(20) + except KeyError: + # they may not exist in the first few iterations (due to warmup) + # or when SimpleTrainer is not used + data_time = None + + eta_string = None + try: + iter_time = storage.history("time").global_avg() + eta_seconds = storage.history("time").median(1000) * (self._max_iter - iteration) + storage.put_scalar("eta_seconds", eta_seconds, smoothing_hint=False) + eta_string = str(datetime.timedelta(seconds=int(eta_seconds))) + except KeyError: + iter_time = None + # estimate eta on our own - more noisy + if self._last_write is not None: + estimate_iter_time = (time.perf_counter() - self._last_write[1]) / ( + iteration - self._last_write[0] + ) + eta_seconds = estimate_iter_time * (self._max_iter - iteration) + eta_string = str(datetime.timedelta(seconds=int(eta_seconds))) + self._last_write = (iteration, time.perf_counter()) + + try: + lr = "{:.6f}".format(storage.history("lr").latest()) + except KeyError: + lr = "N/A" + + if torch.cuda.is_available(): + max_mem_mb = torch.cuda.max_memory_allocated() / 1024.0 / 1024.0 + else: + max_mem_mb = None + + # NOTE: max_mem is parsed by grep in "dev/parse_results.sh" + self.logger.info( + " {eta}iter: {iter} {losses} {time}{data_time}lr: {lr} {memory}".format( + eta=f"eta: {eta_string} " if eta_string else "", + iter=iteration, + losses=" ".join( + [ + "{}: {:.3f}".format(k, v.median(20)) + for k, v in storage.histories().items() + if "loss" in k + ] + ), + time="time: {:.4f} ".format(iter_time) if iter_time is not None else "", + data_time="data_time: {:.4f} ".format(data_time) if data_time is not None else "", + lr=lr, + memory="max_mem: {:.0f}M".format(max_mem_mb) if max_mem_mb is not None else "", + ) + ) + + +class EventStorage: + """ + The user-facing class that provides metric storage functionalities. + + In the future we may add support for storing / logging other types of data if needed. + """ + + def __init__(self, start_iter=0): + """ + Args: + start_iter (int): the iteration number to start with + """ + self._history = defaultdict(HistoryBuffer) + self._smoothing_hints = {} + self._latest_scalars = {} + self._iter = start_iter + self._current_prefix = "" + self._vis_data = [] + self._histograms = [] + + def put_image(self, img_name, img_tensor): + """ + Add an `img_tensor` associated with `img_name`, to be shown on + tensorboard. + + Args: + img_name (str): The name of the image to put into tensorboard. + img_tensor (torch.Tensor or numpy.array): An `uint8` or `float` + Tensor of shape `[channel, height, width]` where `channel` is + 3. The image format should be RGB. The elements in img_tensor + can either have values in [0, 1] (float32) or [0, 255] (uint8). + The `img_tensor` will be visualized in tensorboard. + """ + self._vis_data.append((img_name, img_tensor, self._iter)) + + def put_scalar(self, name, value, smoothing_hint=True): + """ + Add a scalar `value` to the `HistoryBuffer` associated with `name`. + + Args: + smoothing_hint (bool): a 'hint' on whether this scalar is noisy and should be + smoothed when logged. The hint will be accessible through + :meth:`EventStorage.smoothing_hints`. A writer may ignore the hint + and apply custom smoothing rule. + + It defaults to True because most scalars we save need to be smoothed to + provide any useful signal. + """ + name = self._current_prefix + name + history = self._history[name] + value = float(value) + history.update(value, self._iter) + self._latest_scalars[name] = value + + existing_hint = self._smoothing_hints.get(name) + if existing_hint is not None: + assert ( + existing_hint == smoothing_hint + ), "Scalar {} was put with a different smoothing_hint!".format(name) + else: + self._smoothing_hints[name] = smoothing_hint + + def put_scalars(self, *, smoothing_hint=True, **kwargs): + """ + Put multiple scalars from keyword arguments. + + Examples: + + storage.put_scalars(loss=my_loss, accuracy=my_accuracy, smoothing_hint=True) + """ + for k, v in kwargs.items(): + self.put_scalar(k, v, smoothing_hint=smoothing_hint) + + def put_histogram(self, hist_name, hist_tensor, bins=1000): + """ + Create a histogram from a tensor. + + Args: + hist_name (str): The name of the histogram to put into tensorboard. + hist_tensor (torch.Tensor): A Tensor of arbitrary shape to be converted + into a histogram. + bins (int): Number of histogram bins. + """ + ht_min, ht_max = hist_tensor.min().item(), hist_tensor.max().item() + + # Create a histogram with PyTorch + hist_counts = torch.histc(hist_tensor, bins=bins) + hist_edges = torch.linspace(start=ht_min, end=ht_max, steps=bins + 1, dtype=torch.float32) + + # Parameter for the add_histogram_raw function of SummaryWriter + hist_params = dict( + tag=hist_name, + min=ht_min, + max=ht_max, + num=len(hist_tensor), + sum=float(hist_tensor.sum()), + sum_squares=float(torch.sum(hist_tensor ** 2)), + bucket_limits=hist_edges[1:].tolist(), + bucket_counts=hist_counts.tolist(), + global_step=self._iter, + ) + self._histograms.append(hist_params) + + def history(self, name): + """ + Returns: + HistoryBuffer: the scalar history for name + """ + ret = self._history.get(name, None) + if ret is None: + raise KeyError("No history metric available for {}!".format(name)) + return ret + + def histories(self): + """ + Returns: + dict[name -> HistoryBuffer]: the HistoryBuffer for all scalars + """ + return self._history + + def latest(self): + """ + Returns: + dict[name -> number]: the scalars that's added in the current iteration. + """ + return self._latest_scalars + + def latest_with_smoothing_hint(self, window_size=20): + """ + Similar to :meth:`latest`, but the returned values + are either the un-smoothed original latest value, + or a median of the given window_size, + depend on whether the smoothing_hint is True. + + This provides a default behavior that other writers can use. + """ + result = {} + for k, v in self._latest_scalars.items(): + result[k] = self._history[k].median(window_size) if self._smoothing_hints[k] else v + return result + + def smoothing_hints(self): + """ + Returns: + dict[name -> bool]: the user-provided hint on whether the scalar + is noisy and needs smoothing. + """ + return self._smoothing_hints + + def step(self): + """ + User should call this function at the beginning of each iteration, to + notify the storage of the start of a new iteration. + The storage will then be able to associate the new data with the + correct iteration number. + """ + self._iter += 1 + self._latest_scalars = {} + + @property + def iter(self): + return self._iter + + @property + def iteration(self): + # for backward compatibility + return self._iter + + def __enter__(self): + _CURRENT_STORAGE_STACK.append(self) + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + assert _CURRENT_STORAGE_STACK[-1] == self + _CURRENT_STORAGE_STACK.pop() + + @contextmanager + def name_scope(self, name): + """ + Yields: + A context within which all the events added to this storage + will be prefixed by the name scope. + """ + old_prefix = self._current_prefix + self._current_prefix = name.rstrip("/") + "/" + yield + self._current_prefix = old_prefix + + def clear_images(self): + """ + Delete all the stored images for visualization. This should be called + after images are written to tensorboard. + """ + self._vis_data = [] + + def clear_histograms(self): + """ + Delete all the stored histograms for visualization. + This should be called after histograms are written to tensorboard. + """ + self._histograms = [] diff --git a/preprocess/mhp_extension/detectron2/detectron2/utils/logger.py b/preprocess/mhp_extension/detectron2/detectron2/utils/logger.py new file mode 100644 index 0000000000000000000000000000000000000000..b6496d9d6096f557ffa684be80342ec220c6014c --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/utils/logger.py @@ -0,0 +1,221 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import functools +import logging +import os +import sys +import time +from collections import Counter +from fvcore.common.file_io import PathManager +from tabulate import tabulate +from termcolor import colored + + +class _ColorfulFormatter(logging.Formatter): + def __init__(self, *args, **kwargs): + self._root_name = kwargs.pop("root_name") + "." + self._abbrev_name = kwargs.pop("abbrev_name", "") + if len(self._abbrev_name): + self._abbrev_name = self._abbrev_name + "." + super(_ColorfulFormatter, self).__init__(*args, **kwargs) + + def formatMessage(self, record): + record.name = record.name.replace(self._root_name, self._abbrev_name) + log = super(_ColorfulFormatter, self).formatMessage(record) + if record.levelno == logging.WARNING: + prefix = colored("WARNING", "red", attrs=["blink"]) + elif record.levelno == logging.ERROR or record.levelno == logging.CRITICAL: + prefix = colored("ERROR", "red", attrs=["blink", "underline"]) + else: + return log + return prefix + " " + log + + +@functools.lru_cache() # so that calling setup_logger multiple times won't add many handlers +def setup_logger( + output=None, distributed_rank=0, *, color=True, name="detectron2", abbrev_name=None +): + """ + Initialize the detectron2 logger and set its verbosity level to "DEBUG". + + Args: + output (str): a file name or a directory to save log. If None, will not save log file. + If ends with ".txt" or ".log", assumed to be a file name. + Otherwise, logs will be saved to `output/log.txt`. + name (str): the root module name of this logger + abbrev_name (str): an abbreviation of the module, to avoid long names in logs. + Set to "" to not log the root module in logs. + By default, will abbreviate "detectron2" to "d2" and leave other + modules unchanged. + + Returns: + logging.Logger: a logger + """ + logger = logging.getLogger(name) + logger.setLevel(logging.DEBUG) + logger.propagate = False + + if abbrev_name is None: + abbrev_name = "d2" if name == "detectron2" else name + + plain_formatter = logging.Formatter( + "[%(asctime)s] %(name)s %(levelname)s: %(message)s", datefmt="%m/%d %H:%M:%S" + ) + # stdout logging: master only + if distributed_rank == 0: + ch = logging.StreamHandler(stream=sys.stdout) + ch.setLevel(logging.DEBUG) + if color: + formatter = _ColorfulFormatter( + colored("[%(asctime)s %(name)s]: ", "green") + "%(message)s", + datefmt="%m/%d %H:%M:%S", + root_name=name, + abbrev_name=str(abbrev_name), + ) + else: + formatter = plain_formatter + ch.setFormatter(formatter) + logger.addHandler(ch) + + # file logging: all workers + if output is not None: + if output.endswith(".txt") or output.endswith(".log"): + filename = output + else: + filename = os.path.join(output, "log.txt") + if distributed_rank > 0: + filename = filename + ".rank{}".format(distributed_rank) + PathManager.mkdirs(os.path.dirname(filename)) + + fh = logging.StreamHandler(_cached_log_stream(filename)) + fh.setLevel(logging.DEBUG) + fh.setFormatter(plain_formatter) + logger.addHandler(fh) + + return logger + + +# cache the opened file object, so that different calls to `setup_logger` +# with the same file name can safely write to the same file. +@functools.lru_cache(maxsize=None) +def _cached_log_stream(filename): + return PathManager.open(filename, "a") + + +""" +Below are some other convenient logging methods. +They are mainly adopted from +https://github.com/abseil/abseil-py/blob/master/absl/logging/__init__.py +""" + + +def _find_caller(): + """ + Returns: + str: module name of the caller + tuple: a hashable key to be used to identify different callers + """ + frame = sys._getframe(2) + while frame: + code = frame.f_code + if os.path.join("utils", "logger.") not in code.co_filename: + mod_name = frame.f_globals["__name__"] + if mod_name == "__main__": + mod_name = "detectron2" + return mod_name, (code.co_filename, frame.f_lineno, code.co_name) + frame = frame.f_back + + +_LOG_COUNTER = Counter() +_LOG_TIMER = {} + + +def log_first_n(lvl, msg, n=1, *, name=None, key="caller"): + """ + Log only for the first n times. + + Args: + lvl (int): the logging level + msg (str): + n (int): + name (str): name of the logger to use. Will use the caller's module by default. + key (str or tuple[str]): the string(s) can be one of "caller" or + "message", which defines how to identify duplicated logs. + For example, if called with `n=1, key="caller"`, this function + will only log the first call from the same caller, regardless of + the message content. + If called with `n=1, key="message"`, this function will log the + same content only once, even if they are called from different places. + If called with `n=1, key=("caller", "message")`, this function + will not log only if the same caller has logged the same message before. + """ + if isinstance(key, str): + key = (key,) + assert len(key) > 0 + + caller_module, caller_key = _find_caller() + hash_key = () + if "caller" in key: + hash_key = hash_key + caller_key + if "message" in key: + hash_key = hash_key + (msg,) + + _LOG_COUNTER[hash_key] += 1 + if _LOG_COUNTER[hash_key] <= n: + logging.getLogger(name or caller_module).log(lvl, msg) + + +def log_every_n(lvl, msg, n=1, *, name=None): + """ + Log once per n times. + + Args: + lvl (int): the logging level + msg (str): + n (int): + name (str): name of the logger to use. Will use the caller's module by default. + """ + caller_module, key = _find_caller() + _LOG_COUNTER[key] += 1 + if n == 1 or _LOG_COUNTER[key] % n == 1: + logging.getLogger(name or caller_module).log(lvl, msg) + + +def log_every_n_seconds(lvl, msg, n=1, *, name=None): + """ + Log no more than once per n seconds. + + Args: + lvl (int): the logging level + msg (str): + n (int): + name (str): name of the logger to use. Will use the caller's module by default. + """ + caller_module, key = _find_caller() + last_logged = _LOG_TIMER.get(key, None) + current_time = time.time() + if last_logged is None or current_time - last_logged >= n: + logging.getLogger(name or caller_module).log(lvl, msg) + _LOG_TIMER[key] = current_time + + +def create_small_table(small_dict): + """ + Create a small table using the keys of small_dict as headers. This is only + suitable for small dictionaries. + + Args: + small_dict (dict): a result dictionary of only a few items. + + Returns: + str: the table as a string. + """ + keys, values = tuple(zip(*small_dict.items())) + table = tabulate( + [values], + headers=keys, + tablefmt="pipe", + floatfmt=".3f", + stralign="center", + numalign="center", + ) + return table diff --git a/preprocess/mhp_extension/detectron2/detectron2/utils/memory.py b/preprocess/mhp_extension/detectron2/detectron2/utils/memory.py new file mode 100644 index 0000000000000000000000000000000000000000..d495a1681f460668c96f64454e31e7f2fca8137a --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/utils/memory.py @@ -0,0 +1,86 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. + +import logging +from contextlib import contextmanager +from functools import wraps +import torch + +__all__ = ["retry_if_cuda_oom"] + + +@contextmanager +def _ignore_torch_cuda_oom(): + """ + A context which ignores CUDA OOM exception from pytorch. + """ + try: + yield + except RuntimeError as e: + # NOTE: the string may change? + if "CUDA out of memory. " in str(e): + pass + else: + raise + + +def retry_if_cuda_oom(func): + """ + Makes a function retry itself after encountering + pytorch's CUDA OOM error. + It will first retry after calling `torch.cuda.empty_cache()`. + + If that still fails, it will then retry by trying to convert inputs to CPUs. + In this case, it expects the function to dispatch to CPU implementation. + The return values may become CPU tensors as well and it's user's + responsibility to convert it back to CUDA tensor if needed. + + Args: + func: a stateless callable that takes tensor-like objects as arguments + + Returns: + a callable which retries `func` if OOM is encountered. + + Examples: + + .. code-block:: python + + output = retry_if_cuda_oom(some_torch_function)(input1, input2) + # output may be on CPU even if inputs are on GPU + + Note: + 1. When converting inputs to CPU, it will only look at each argument and check + if it has `.device` and `.to` for conversion. Nested structures of tensors + are not supported. + + 2. Since the function might be called more than once, it has to be + stateless. + """ + + def maybe_to_cpu(x): + try: + like_gpu_tensor = x.device.type == "cuda" and hasattr(x, "to") + except AttributeError: + like_gpu_tensor = False + if like_gpu_tensor: + return x.to(device="cpu") + else: + return x + + @wraps(func) + def wrapped(*args, **kwargs): + with _ignore_torch_cuda_oom(): + return func(*args, **kwargs) + + # Clear cache and retry + torch.cuda.empty_cache() + with _ignore_torch_cuda_oom(): + return func(*args, **kwargs) + + # Try on CPU. This slows down the code significantly, therefore print a notice. + logger = logging.getLogger(__name__) + logger.info("Attempting to copy inputs of {} to CPU due to CUDA OOM".format(str(func))) + new_args = (maybe_to_cpu(x) for x in args) + new_kwargs = {k: maybe_to_cpu(v) for k, v in kwargs.items()} + return func(*new_args, **new_kwargs) + + return wrapped diff --git a/preprocess/mhp_extension/detectron2/detectron2/utils/registry.py b/preprocess/mhp_extension/detectron2/detectron2/utils/registry.py new file mode 100644 index 0000000000000000000000000000000000000000..fea1de961f0dbdacc934e11b9af5647b2a008051 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/utils/registry.py @@ -0,0 +1,6 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +# Keep this module for backward compatibility. +from fvcore.common.registry import Registry # noqa + +__all__ = ["Registry"] diff --git a/preprocess/mhp_extension/detectron2/detectron2/utils/serialize.py b/preprocess/mhp_extension/detectron2/detectron2/utils/serialize.py new file mode 100644 index 0000000000000000000000000000000000000000..734a62c2c4ecfd520eb9e8b941857b6f7e17d4c8 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/utils/serialize.py @@ -0,0 +1,29 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import cloudpickle + + +class PicklableWrapper(object): + """ + Wrap an object to make it more picklable, note that it uses + heavy weight serialization libraries that are slower than pickle. + It's best to use it only on closures (which are usually not picklable). + + This is a simplified version of + https://github.com/joblib/joblib/blob/master/joblib/externals/loky/cloudpickle_wrapper.py + """ + + def __init__(self, obj): + self._obj = obj + + def __reduce__(self): + s = cloudpickle.dumps(self._obj) + return cloudpickle.loads, (s,) + + def __call__(self, *args, **kwargs): + return self._obj(*args, **kwargs) + + def __getattr__(self, attr): + # Ensure that the wrapped object can be used seamlessly as the previous object. + if attr not in ["_obj"]: + return getattr(self._obj, attr) + return getattr(self, attr) diff --git a/preprocess/mhp_extension/detectron2/detectron2/utils/video_visualizer.py b/preprocess/mhp_extension/detectron2/detectron2/utils/video_visualizer.py new file mode 100644 index 0000000000000000000000000000000000000000..0144b679d09bbb8049c30eb849099422355b492c --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/utils/video_visualizer.py @@ -0,0 +1,235 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import numpy as np +import pycocotools.mask as mask_util + +from detectron2.utils.visualizer import ( + ColorMode, + Visualizer, + _create_text_labels, + _PanopticPrediction, +) + +from .colormap import random_color + + +class _DetectedInstance: + """ + Used to store data about detected objects in video frame, + in order to transfer color to objects in the future frames. + + Attributes: + label (int): + bbox (tuple[float]): + mask_rle (dict): + color (tuple[float]): RGB colors in range (0, 1) + ttl (int): time-to-live for the instance. For example, if ttl=2, + the instance color can be transferred to objects in the next two frames. + """ + + __slots__ = ["label", "bbox", "mask_rle", "color", "ttl"] + + def __init__(self, label, bbox, mask_rle, color, ttl): + self.label = label + self.bbox = bbox + self.mask_rle = mask_rle + self.color = color + self.ttl = ttl + + +class VideoVisualizer: + def __init__(self, metadata, instance_mode=ColorMode.IMAGE): + """ + Args: + metadata (MetadataCatalog): image metadata. + """ + self.metadata = metadata + self._old_instances = [] + assert instance_mode in [ + ColorMode.IMAGE, + ColorMode.IMAGE_BW, + ], "Other mode not supported yet." + self._instance_mode = instance_mode + + def draw_instance_predictions(self, frame, predictions): + """ + Draw instance-level prediction results on an image. + + Args: + frame (ndarray): an RGB image of shape (H, W, C), in the range [0, 255]. + predictions (Instances): the output of an instance detection/segmentation + model. Following fields will be used to draw: + "pred_boxes", "pred_classes", "scores", "pred_masks" (or "pred_masks_rle"). + + Returns: + output (VisImage): image object with visualizations. + """ + frame_visualizer = Visualizer(frame, self.metadata) + num_instances = len(predictions) + if num_instances == 0: + return frame_visualizer.output + + boxes = predictions.pred_boxes.tensor.numpy() if predictions.has("pred_boxes") else None + scores = predictions.scores if predictions.has("scores") else None + classes = predictions.pred_classes.numpy() if predictions.has("pred_classes") else None + keypoints = predictions.pred_keypoints if predictions.has("pred_keypoints") else None + + if predictions.has("pred_masks"): + masks = predictions.pred_masks + # mask IOU is not yet enabled + # masks_rles = mask_util.encode(np.asarray(masks.permute(1, 2, 0), order="F")) + # assert len(masks_rles) == num_instances + else: + masks = None + + detected = [ + _DetectedInstance(classes[i], boxes[i], mask_rle=None, color=None, ttl=8) + for i in range(num_instances) + ] + colors = self._assign_colors(detected) + + labels = _create_text_labels(classes, scores, self.metadata.get("thing_classes", None)) + + if self._instance_mode == ColorMode.IMAGE_BW: + # any() returns uint8 tensor + frame_visualizer.output.img = frame_visualizer._create_grayscale_image( + (masks.any(dim=0) > 0).numpy() if masks is not None else None + ) + alpha = 0.3 + else: + alpha = 0.5 + + frame_visualizer.overlay_instances( + boxes=None if masks is not None else boxes, # boxes are a bit distracting + masks=masks, + labels=labels, + keypoints=keypoints, + assigned_colors=colors, + alpha=alpha, + ) + + return frame_visualizer.output + + def draw_sem_seg(self, frame, sem_seg, area_threshold=None): + """ + Args: + sem_seg (ndarray or Tensor): semantic segmentation of shape (H, W), + each value is the integer label. + area_threshold (Optional[int]): only draw segmentations larger than the threshold + """ + # don't need to do anything special + frame_visualizer = Visualizer(frame, self.metadata) + frame_visualizer.draw_sem_seg(sem_seg, area_threshold=None) + return frame_visualizer.output + + def draw_panoptic_seg_predictions( + self, frame, panoptic_seg, segments_info, area_threshold=None, alpha=0.5 + ): + frame_visualizer = Visualizer(frame, self.metadata) + pred = _PanopticPrediction(panoptic_seg, segments_info) + + if self._instance_mode == ColorMode.IMAGE_BW: + frame_visualizer.output.img = frame_visualizer._create_grayscale_image( + pred.non_empty_mask() + ) + + # draw mask for all semantic segments first i.e. "stuff" + for mask, sinfo in pred.semantic_masks(): + category_idx = sinfo["category_id"] + try: + mask_color = [x / 255 for x in self.metadata.stuff_colors[category_idx]] + except AttributeError: + mask_color = None + + frame_visualizer.draw_binary_mask( + mask, + color=mask_color, + text=self.metadata.stuff_classes[category_idx], + alpha=alpha, + area_threshold=area_threshold, + ) + + all_instances = list(pred.instance_masks()) + if len(all_instances) == 0: + return frame_visualizer.output + # draw mask for all instances second + masks, sinfo = list(zip(*all_instances)) + num_instances = len(masks) + masks_rles = mask_util.encode( + np.asarray(np.asarray(masks).transpose(1, 2, 0), dtype=np.uint8, order="F") + ) + assert len(masks_rles) == num_instances + + category_ids = [x["category_id"] for x in sinfo] + detected = [ + _DetectedInstance(category_ids[i], bbox=None, mask_rle=masks_rles[i], color=None, ttl=8) + for i in range(num_instances) + ] + colors = self._assign_colors(detected) + labels = [self.metadata.thing_classes[k] for k in category_ids] + + frame_visualizer.overlay_instances( + boxes=None, + masks=masks, + labels=labels, + keypoints=None, + assigned_colors=colors, + alpha=alpha, + ) + return frame_visualizer.output + + def _assign_colors(self, instances): + """ + Naive tracking heuristics to assign same color to the same instance, + will update the internal state of tracked instances. + + Returns: + list[tuple[float]]: list of colors. + """ + + # Compute iou with either boxes or masks: + is_crowd = np.zeros((len(instances),), dtype=np.bool) + if instances[0].bbox is None: + assert instances[0].mask_rle is not None + # use mask iou only when box iou is None + # because box seems good enough + rles_old = [x.mask_rle for x in self._old_instances] + rles_new = [x.mask_rle for x in instances] + ious = mask_util.iou(rles_old, rles_new, is_crowd) + threshold = 0.5 + else: + boxes_old = [x.bbox for x in self._old_instances] + boxes_new = [x.bbox for x in instances] + ious = mask_util.iou(boxes_old, boxes_new, is_crowd) + threshold = 0.6 + if len(ious) == 0: + ious = np.zeros((len(self._old_instances), len(instances)), dtype="float32") + + # Only allow matching instances of the same label: + for old_idx, old in enumerate(self._old_instances): + for new_idx, new in enumerate(instances): + if old.label != new.label: + ious[old_idx, new_idx] = 0 + + matched_new_per_old = np.asarray(ious).argmax(axis=1) + max_iou_per_old = np.asarray(ious).max(axis=1) + + # Try to find match for each old instance: + extra_instances = [] + for idx, inst in enumerate(self._old_instances): + if max_iou_per_old[idx] > threshold: + newidx = matched_new_per_old[idx] + if instances[newidx].color is None: + instances[newidx].color = inst.color + continue + # If an old instance does not match any new instances, + # keep it for the next frame in case it is just missed by the detector + inst.ttl -= 1 + if inst.ttl > 0: + extra_instances.append(inst) + + # Assign random color to newly-detected instances: + for inst in instances: + if inst.color is None: + inst.color = random_color(rgb=True, maximum=1) + self._old_instances = instances[:] + extra_instances + return [d.color for d in instances] diff --git a/preprocess/mhp_extension/detectron2/detectron2/utils/visualizer.py b/preprocess/mhp_extension/detectron2/detectron2/utils/visualizer.py new file mode 100644 index 0000000000000000000000000000000000000000..3ffcbdbd19518bce877a776582a7caeddc18108e --- /dev/null +++ b/preprocess/mhp_extension/detectron2/detectron2/utils/visualizer.py @@ -0,0 +1,1143 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import colorsys +import logging +import math +import numpy as np +from enum import Enum, unique +import cv2 +import matplotlib as mpl +import matplotlib.colors as mplc +import matplotlib.figure as mplfigure +import pycocotools.mask as mask_util +import torch +from fvcore.common.file_io import PathManager +from matplotlib.backends.backend_agg import FigureCanvasAgg +from PIL import Image + +from detectron2.structures import BitMasks, Boxes, BoxMode, Keypoints, PolygonMasks, RotatedBoxes + +from .colormap import random_color + +logger = logging.getLogger(__name__) + +__all__ = ["ColorMode", "VisImage", "Visualizer"] + + +_SMALL_OBJECT_AREA_THRESH = 1000 +_LARGE_MASK_AREA_THRESH = 120000 +_OFF_WHITE = (1.0, 1.0, 240.0 / 255) +_BLACK = (0, 0, 0) +_RED = (1.0, 0, 0) + +_KEYPOINT_THRESHOLD = 0.05 + + +@unique +class ColorMode(Enum): + """ + Enum of different color modes to use for instance visualizations. + """ + + IMAGE = 0 + """ + Picks a random color for every instance and overlay segmentations with low opacity. + """ + SEGMENTATION = 1 + """ + Let instances of the same category have similar colors + (from metadata.thing_colors), and overlay them with + high opacity. This provides more attention on the quality of segmentation. + """ + IMAGE_BW = 2 + """ + Same as IMAGE, but convert all areas without masks to gray-scale. + Only available for drawing per-instance mask predictions. + """ + + +class GenericMask: + """ + Attribute: + polygons (list[ndarray]): list[ndarray]: polygons for this mask. + Each ndarray has format [x, y, x, y, ...] + mask (ndarray): a binary mask + """ + + def __init__(self, mask_or_polygons, height, width): + self._mask = self._polygons = self._has_holes = None + self.height = height + self.width = width + + m = mask_or_polygons + if isinstance(m, dict): + # RLEs + assert "counts" in m and "size" in m + if isinstance(m["counts"], list): # uncompressed RLEs + h, w = m["size"] + assert h == height and w == width + m = mask_util.frPyObjects(m, h, w) + self._mask = mask_util.decode(m)[:, :] + return + + if isinstance(m, list): # list[ndarray] + self._polygons = [np.asarray(x).reshape(-1) for x in m] + return + + if isinstance(m, np.ndarray): # assumed to be a binary mask + assert m.shape[1] != 2, m.shape + assert m.shape == (height, width), m.shape + self._mask = m.astype("uint8") + return + + raise ValueError("GenericMask cannot handle object {} of type '{}'".format(m, type(m))) + + @property + def mask(self): + if self._mask is None: + self._mask = self.polygons_to_mask(self._polygons) + return self._mask + + @property + def polygons(self): + if self._polygons is None: + self._polygons, self._has_holes = self.mask_to_polygons(self._mask) + return self._polygons + + @property + def has_holes(self): + if self._has_holes is None: + if self._mask is not None: + self._polygons, self._has_holes = self.mask_to_polygons(self._mask) + else: + self._has_holes = False # if original format is polygon, does not have holes + return self._has_holes + + def mask_to_polygons(self, mask): + # cv2.RETR_CCOMP flag retrieves all the contours and arranges them to a 2-level + # hierarchy. External contours (boundary) of the object are placed in hierarchy-1. + # Internal contours (holes) are placed in hierarchy-2. + # cv2.CHAIN_APPROX_NONE flag gets vertices of polygons from contours. + mask = np.ascontiguousarray(mask) # some versions of cv2 does not support incontiguous arr + res = cv2.findContours(mask.astype("uint8"), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE) + hierarchy = res[-1] + if hierarchy is None: # empty mask + return [], False + has_holes = (hierarchy.reshape(-1, 4)[:, 3] >= 0).sum() > 0 + res = res[-2] + res = [x.flatten() for x in res] + res = [x for x in res if len(x) >= 6] + return res, has_holes + + def polygons_to_mask(self, polygons): + rle = mask_util.frPyObjects(polygons, self.height, self.width) + rle = mask_util.merge(rle) + return mask_util.decode(rle)[:, :] + + def area(self): + return self.mask.sum() + + def bbox(self): + p = mask_util.frPyObjects(self.polygons, self.height, self.width) + p = mask_util.merge(p) + bbox = mask_util.toBbox(p) + bbox[2] += bbox[0] + bbox[3] += bbox[1] + return bbox + + +class _PanopticPrediction: + def __init__(self, panoptic_seg, segments_info): + self._seg = panoptic_seg + + self._sinfo = {s["id"]: s for s in segments_info} # seg id -> seg info + segment_ids, areas = torch.unique(panoptic_seg, sorted=True, return_counts=True) + areas = areas.numpy() + sorted_idxs = np.argsort(-areas) + self._seg_ids, self._seg_areas = segment_ids[sorted_idxs], areas[sorted_idxs] + self._seg_ids = self._seg_ids.tolist() + for sid, area in zip(self._seg_ids, self._seg_areas): + if sid in self._sinfo: + self._sinfo[sid]["area"] = float(area) + + def non_empty_mask(self): + """ + Returns: + (H, W) array, a mask for all pixels that have a prediction + """ + empty_ids = [] + for id in self._seg_ids: + if id not in self._sinfo: + empty_ids.append(id) + if len(empty_ids) == 0: + return np.zeros(self._seg.shape, dtype=np.uint8) + assert ( + len(empty_ids) == 1 + ), ">1 ids corresponds to no labels. This is currently not supported" + return (self._seg != empty_ids[0]).numpy().astype(np.bool) + + def semantic_masks(self): + for sid in self._seg_ids: + sinfo = self._sinfo.get(sid) + if sinfo is None or sinfo["isthing"]: + # Some pixels (e.g. id 0 in PanopticFPN) have no instance or semantic predictions. + continue + yield (self._seg == sid).numpy().astype(np.bool), sinfo + + def instance_masks(self): + for sid in self._seg_ids: + sinfo = self._sinfo.get(sid) + if sinfo is None or not sinfo["isthing"]: + continue + mask = (self._seg == sid).numpy().astype(np.bool) + if mask.sum() > 0: + yield mask, sinfo + + +def _create_text_labels(classes, scores, class_names): + """ + Args: + classes (list[int] or None): + scores (list[float] or None): + class_names (list[str] or None): + + Returns: + list[str] or None + """ + labels = None + if classes is not None and class_names is not None and len(class_names) > 1: + labels = [class_names[i] for i in classes] + if scores is not None: + if labels is None: + labels = ["{:.0f}%".format(s * 100) for s in scores] + else: + labels = ["{} {:.0f}%".format(l, s * 100) for l, s in zip(labels, scores)] + return labels + + +class VisImage: + def __init__(self, img, scale=1.0): + """ + Args: + img (ndarray): an RGB image of shape (H, W, 3). + scale (float): scale the input image + """ + self.img = img + self.scale = scale + self.width, self.height = img.shape[1], img.shape[0] + self._setup_figure(img) + + def _setup_figure(self, img): + """ + Args: + Same as in :meth:`__init__()`. + + Returns: + fig (matplotlib.pyplot.figure): top level container for all the image plot elements. + ax (matplotlib.pyplot.Axes): contains figure elements and sets the coordinate system. + """ + fig = mplfigure.Figure(frameon=False) + self.dpi = fig.get_dpi() + # add a small 1e-2 to avoid precision lost due to matplotlib's truncation + # (https://github.com/matplotlib/matplotlib/issues/15363) + fig.set_size_inches( + (self.width * self.scale + 1e-2) / self.dpi, + (self.height * self.scale + 1e-2) / self.dpi, + ) + self.canvas = FigureCanvasAgg(fig) + # self.canvas = mpl.backends.backend_cairo.FigureCanvasCairo(fig) + ax = fig.add_axes([0.0, 0.0, 1.0, 1.0]) + ax.axis("off") + ax.set_xlim(0.0, self.width) + ax.set_ylim(self.height) + + self.fig = fig + self.ax = ax + + def save(self, filepath): + """ + Args: + filepath (str): a string that contains the absolute path, including the file name, where + the visualized image will be saved. + """ + if filepath.lower().endswith(".jpg") or filepath.lower().endswith(".png"): + # faster than matplotlib's imshow + cv2.imwrite(filepath, self.get_image()[:, :, ::-1]) + else: + # support general formats (e.g. pdf) + self.ax.imshow(self.img, interpolation="nearest") + self.fig.savefig(filepath) + + def get_image(self): + """ + Returns: + ndarray: + the visualized image of shape (H, W, 3) (RGB) in uint8 type. + The shape is scaled w.r.t the input image using the given `scale` argument. + """ + canvas = self.canvas + s, (width, height) = canvas.print_to_buffer() + if (self.width, self.height) != (width, height): + img = cv2.resize(self.img, (width, height)) + else: + img = self.img + + # buf = io.BytesIO() # works for cairo backend + # canvas.print_rgba(buf) + # width, height = self.width, self.height + # s = buf.getvalue() + + buffer = np.frombuffer(s, dtype="uint8") + + # imshow is slow. blend manually (still quite slow) + img_rgba = buffer.reshape(height, width, 4) + rgb, alpha = np.split(img_rgba, [3], axis=2) + + try: + import numexpr as ne # fuse them with numexpr + + visualized_image = ne.evaluate("demo * (1 - alpha / 255.0) + rgb * (alpha / 255.0)") + except ImportError: + alpha = alpha.astype("float32") / 255.0 + visualized_image = img * (1 - alpha) + rgb * alpha + + visualized_image = visualized_image.astype("uint8") + + return visualized_image + + +class Visualizer: + def __init__(self, img_rgb, metadata, scale=1.0, instance_mode=ColorMode.IMAGE): + """ + Args: + img_rgb: a numpy array of shape (H, W, C), where H and W correspond to + the height and width of the image respectively. C is the number of + color channels. The image is required to be in RGB format since that + is a requirement of the Matplotlib library. The image is also expected + to be in the range [0, 255]. + metadata (MetadataCatalog): image metadata. + """ + self.img = np.asarray(img_rgb).clip(0, 255).astype(np.uint8) + self.metadata = metadata + self.output = VisImage(self.img, scale=scale) + self.cpu_device = torch.device("cpu") + + # too small texts are useless, therefore clamp to 9 + self._default_font_size = max( + np.sqrt(self.output.height * self.output.width) // 90, 10 // scale + ) + self._instance_mode = instance_mode + + def draw_instance_predictions(self, predictions): + """ + Draw instance-level prediction results on an image. + + Args: + predictions (Instances): the output of an instance detection/segmentation + model. Following fields will be used to draw: + "pred_boxes", "pred_classes", "scores", "pred_masks" (or "pred_masks_rle"). + + Returns: + output (VisImage): image object with visualizations. + """ + boxes = predictions.pred_boxes if predictions.has("pred_boxes") else None + scores = predictions.scores if predictions.has("scores") else None + classes = predictions.pred_classes if predictions.has("pred_classes") else None + labels = _create_text_labels(classes, scores, self.metadata.get("thing_classes", None)) + keypoints = predictions.pred_keypoints if predictions.has("pred_keypoints") else None + + if predictions.has("pred_masks"): + masks = np.asarray(predictions.pred_masks) + masks = [GenericMask(x, self.output.height, self.output.width) for x in masks] + else: + masks = None + + if self._instance_mode == ColorMode.SEGMENTATION and self.metadata.get("thing_colors"): + colors = [ + self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in classes + ] + alpha = 0.8 + else: + colors = None + alpha = 0.5 + + if self._instance_mode == ColorMode.IMAGE_BW: + self.output.img = self._create_grayscale_image( + (predictions.pred_masks.any(dim=0) > 0).numpy() + ) + alpha = 0.3 + + self.overlay_instances( + masks=masks, + boxes=boxes, + labels=labels, + keypoints=keypoints, + assigned_colors=colors, + alpha=alpha, + ) + return self.output + + def draw_sem_seg(self, sem_seg, area_threshold=None, alpha=0.8): + """ + Draw semantic segmentation predictions/labels. + + Args: + sem_seg (Tensor or ndarray): the segmentation of shape (H, W). + Each value is the integer label of the pixel. + area_threshold (int): segments with less than `area_threshold` are not drawn. + alpha (float): the larger it is, the more opaque the segmentations are. + + Returns: + output (VisImage): image object with visualizations. + """ + if isinstance(sem_seg, torch.Tensor): + sem_seg = sem_seg.numpy() + labels, areas = np.unique(sem_seg, return_counts=True) + sorted_idxs = np.argsort(-areas).tolist() + labels = labels[sorted_idxs] + for label in filter(lambda l: l < len(self.metadata.stuff_classes), labels): + try: + mask_color = [x / 255 for x in self.metadata.stuff_colors[label]] + except (AttributeError, IndexError): + mask_color = None + + binary_mask = (sem_seg == label).astype(np.uint8) + text = self.metadata.stuff_classes[label] + self.draw_binary_mask( + binary_mask, + color=mask_color, + edge_color=_OFF_WHITE, + text=text, + alpha=alpha, + area_threshold=area_threshold, + ) + return self.output + + def draw_panoptic_seg_predictions( + self, panoptic_seg, segments_info, area_threshold=None, alpha=0.7 + ): + """ + Draw panoptic prediction results on an image. + + Args: + panoptic_seg (Tensor): of shape (height, width) where the values are ids for each + segment. + segments_info (list[dict]): Describe each segment in `panoptic_seg`. + Each dict contains keys "id", "category_id", "isthing". + area_threshold (int): stuff segments with less than `area_threshold` are not drawn. + + Returns: + output (VisImage): image object with visualizations. + """ + pred = _PanopticPrediction(panoptic_seg, segments_info) + + if self._instance_mode == ColorMode.IMAGE_BW: + self.output.img = self._create_grayscale_image(pred.non_empty_mask()) + + # draw mask for all semantic segments first i.e. "stuff" + for mask, sinfo in pred.semantic_masks(): + category_idx = sinfo["category_id"] + try: + mask_color = [x / 255 for x in self.metadata.stuff_colors[category_idx]] + except AttributeError: + mask_color = None + + text = self.metadata.stuff_classes[category_idx] + self.draw_binary_mask( + mask, + color=mask_color, + edge_color=_OFF_WHITE, + text=text, + alpha=alpha, + area_threshold=area_threshold, + ) + + # draw mask for all instances second + all_instances = list(pred.instance_masks()) + if len(all_instances) == 0: + return self.output + masks, sinfo = list(zip(*all_instances)) + category_ids = [x["category_id"] for x in sinfo] + + try: + scores = [x["score"] for x in sinfo] + except KeyError: + scores = None + labels = _create_text_labels(category_ids, scores, self.metadata.thing_classes) + + try: + colors = [random_color(rgb=True, maximum=1) for k in category_ids] + except AttributeError: + colors = None + self.overlay_instances(masks=masks, labels=labels, assigned_colors=colors, alpha=alpha) + + return self.output + + def draw_dataset_dict(self, dic): + """ + Draw annotations/segmentaions in Detectron2 Dataset format. + + Args: + dic (dict): annotation/segmentation data of one image, in Detectron2 Dataset format. + + Returns: + output (VisImage): image object with visualizations. + """ + annos = dic.get("annotations", None) + if annos: + if "segmentation" in annos[0]: + masks = [x["segmentation"] for x in annos] + else: + masks = None + if "keypoints" in annos[0]: + keypts = [x["keypoints"] for x in annos] + keypts = np.array(keypts).reshape(len(annos), -1, 3) + else: + keypts = None + + boxes = [BoxMode.convert(x["bbox"], x["bbox_mode"], BoxMode.XYXY_ABS) for x in annos] + + labels = [x["category_id"] for x in annos] + colors = None + if self._instance_mode == ColorMode.SEGMENTATION and self.metadata.get("thing_colors"): + colors = [ + self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in labels + ] + names = self.metadata.get("thing_classes", None) + if names: + labels = [names[i] for i in labels] + labels = [ + "{}".format(i) + ("|crowd" if a.get("iscrowd", 0) else "") + for i, a in zip(labels, annos) + ] + self.overlay_instances( + labels=labels, boxes=boxes, masks=masks, keypoints=keypts, assigned_colors=colors + ) + + sem_seg = dic.get("sem_seg", None) + if sem_seg is None and "sem_seg_file_name" in dic: + with PathManager.open(dic["sem_seg_file_name"], "rb") as f: + sem_seg = Image.open(f) + sem_seg = np.asarray(sem_seg, dtype="uint8") + if sem_seg is not None: + self.draw_sem_seg(sem_seg, area_threshold=0, alpha=0.5) + return self.output + + def overlay_instances( + self, + *, + boxes=None, + labels=None, + masks=None, + keypoints=None, + assigned_colors=None, + alpha=0.5 + ): + """ + Args: + boxes (Boxes, RotatedBoxes or ndarray): either a :class:`Boxes`, + or an Nx4 numpy array of XYXY_ABS format for the N objects in a single image, + or a :class:`RotatedBoxes`, + or an Nx5 numpy array of (x_center, y_center, width, height, angle_degrees) format + for the N objects in a single image, + labels (list[str]): the text to be displayed for each instance. + masks (masks-like object): Supported types are: + + * :class:`detectron2.structures.PolygonMasks`, + :class:`detectron2.structures.BitMasks`. + * list[list[ndarray]]: contains the segmentation masks for all objects in one image. + The first level of the list corresponds to individual instances. The second + level to all the polygon that compose the instance, and the third level + to the polygon coordinates. The third level should have the format of + [x0, y0, x1, y1, ..., xn, yn] (n >= 3). + * list[ndarray]: each ndarray is a binary mask of shape (H, W). + * list[dict]: each dict is a COCO-style RLE. + keypoints (Keypoint or array like): an array-like object of shape (N, K, 3), + where the N is the number of instances and K is the number of keypoints. + The last dimension corresponds to (x, y, visibility or score). + assigned_colors (list[matplotlib.colors]): a list of colors, where each color + corresponds to each mask or box in the image. Refer to 'matplotlib.colors' + for full list of formats that the colors are accepted in. + + Returns: + output (VisImage): image object with visualizations. + """ + num_instances = None + if boxes is not None: + boxes = self._convert_boxes(boxes) + num_instances = len(boxes) + if masks is not None: + masks = self._convert_masks(masks) + if num_instances: + assert len(masks) == num_instances + else: + num_instances = len(masks) + if keypoints is not None: + if num_instances: + assert len(keypoints) == num_instances + else: + num_instances = len(keypoints) + keypoints = self._convert_keypoints(keypoints) + if labels is not None: + assert len(labels) == num_instances + if assigned_colors is None: + assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)] + if num_instances == 0: + return self.output + if boxes is not None and boxes.shape[1] == 5: + return self.overlay_rotated_instances( + boxes=boxes, labels=labels, assigned_colors=assigned_colors + ) + + # Display in largest to smallest order to reduce occlusion. + areas = None + if boxes is not None: + areas = np.prod(boxes[:, 2:] - boxes[:, :2], axis=1) + elif masks is not None: + areas = np.asarray([x.area() for x in masks]) + + if areas is not None: + sorted_idxs = np.argsort(-areas).tolist() + # Re-order overlapped instances in descending order. + boxes = boxes[sorted_idxs] if boxes is not None else None + labels = [labels[k] for k in sorted_idxs] if labels is not None else None + masks = [masks[idx] for idx in sorted_idxs] if masks is not None else None + assigned_colors = [assigned_colors[idx] for idx in sorted_idxs] + keypoints = keypoints[sorted_idxs] if keypoints is not None else None + + for i in range(num_instances): + color = assigned_colors[i] + if boxes is not None: + self.draw_box(boxes[i], edge_color=color) + + if masks is not None: + for segment in masks[i].polygons: + self.draw_polygon(segment.reshape(-1, 2), color, alpha=alpha) + + if labels is not None: + # first get a box + if boxes is not None: + x0, y0, x1, y1 = boxes[i] + text_pos = (x0, y0) # if drawing boxes, put text on the box corner. + horiz_align = "left" + elif masks is not None: + x0, y0, x1, y1 = masks[i].bbox() + + # draw text in the center (defined by median) when box is not drawn + # median is less sensitive to outliers. + text_pos = np.median(masks[i].mask.nonzero(), axis=1)[::-1] + horiz_align = "center" + else: + continue # drawing the box confidence for keypoints isn't very useful. + # for small objects, draw text at the side to avoid occlusion + instance_area = (y1 - y0) * (x1 - x0) + if ( + instance_area < _SMALL_OBJECT_AREA_THRESH * self.output.scale + or y1 - y0 < 40 * self.output.scale + ): + if y1 >= self.output.height - 5: + text_pos = (x1, y0) + else: + text_pos = (x0, y1) + + height_ratio = (y1 - y0) / np.sqrt(self.output.height * self.output.width) + lighter_color = self._change_color_brightness(color, brightness_factor=0.7) + font_size = ( + np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2) + * 0.5 + * self._default_font_size + ) + self.draw_text( + labels[i], + text_pos, + color=lighter_color, + horizontal_alignment=horiz_align, + font_size=font_size, + ) + + # draw keypoints + if keypoints is not None: + for keypoints_per_instance in keypoints: + self.draw_and_connect_keypoints(keypoints_per_instance) + + return self.output + + def overlay_rotated_instances(self, boxes=None, labels=None, assigned_colors=None): + """ + Args: + boxes (ndarray): an Nx5 numpy array of + (x_center, y_center, width, height, angle_degrees) format + for the N objects in a single image. + labels (list[str]): the text to be displayed for each instance. + assigned_colors (list[matplotlib.colors]): a list of colors, where each color + corresponds to each mask or box in the image. Refer to 'matplotlib.colors' + for full list of formats that the colors are accepted in. + + Returns: + output (VisImage): image object with visualizations. + """ + + num_instances = len(boxes) + + if assigned_colors is None: + assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)] + if num_instances == 0: + return self.output + + # Display in largest to smallest order to reduce occlusion. + if boxes is not None: + areas = boxes[:, 2] * boxes[:, 3] + + sorted_idxs = np.argsort(-areas).tolist() + # Re-order overlapped instances in descending order. + boxes = boxes[sorted_idxs] + labels = [labels[k] for k in sorted_idxs] if labels is not None else None + colors = [assigned_colors[idx] for idx in sorted_idxs] + + for i in range(num_instances): + self.draw_rotated_box_with_label( + boxes[i], edge_color=colors[i], label=labels[i] if labels is not None else None + ) + + return self.output + + def draw_and_connect_keypoints(self, keypoints): + """ + Draws keypoints of an instance and follows the rules for keypoint connections + to draw lines between appropriate keypoints. This follows color heuristics for + line color. + + Args: + keypoints (Tensor): a tensor of shape (K, 3), where K is the number of keypoints + and the last dimension corresponds to (x, y, probability). + + Returns: + output (VisImage): image object with visualizations. + """ + visible = {} + keypoint_names = self.metadata.get("keypoint_names") + for idx, keypoint in enumerate(keypoints): + # draw keypoint + x, y, prob = keypoint + if prob > _KEYPOINT_THRESHOLD: + self.draw_circle((x, y), color=_RED) + if keypoint_names: + keypoint_name = keypoint_names[idx] + visible[keypoint_name] = (x, y) + + if self.metadata.get("keypoint_connection_rules"): + for kp0, kp1, color in self.metadata.keypoint_connection_rules: + if kp0 in visible and kp1 in visible: + x0, y0 = visible[kp0] + x1, y1 = visible[kp1] + color = tuple(x / 255.0 for x in color) + self.draw_line([x0, x1], [y0, y1], color=color) + + # draw lines from nose to mid-shoulder and mid-shoulder to mid-hip + # Note that this strategy is specific to person keypoints. + # For other keypoints, it should just do nothing + try: + ls_x, ls_y = visible["left_shoulder"] + rs_x, rs_y = visible["right_shoulder"] + mid_shoulder_x, mid_shoulder_y = (ls_x + rs_x) / 2, (ls_y + rs_y) / 2 + except KeyError: + pass + else: + # draw line from nose to mid-shoulder + nose_x, nose_y = visible.get("nose", (None, None)) + if nose_x is not None: + self.draw_line([nose_x, mid_shoulder_x], [nose_y, mid_shoulder_y], color=_RED) + + try: + # draw line from mid-shoulder to mid-hip + lh_x, lh_y = visible["left_hip"] + rh_x, rh_y = visible["right_hip"] + except KeyError: + pass + else: + mid_hip_x, mid_hip_y = (lh_x + rh_x) / 2, (lh_y + rh_y) / 2 + self.draw_line([mid_hip_x, mid_shoulder_x], [mid_hip_y, mid_shoulder_y], color=_RED) + return self.output + + """ + Primitive drawing functions: + """ + + def draw_text( + self, + text, + position, + *, + font_size=None, + color="g", + horizontal_alignment="center", + rotation=0 + ): + """ + Args: + text (str): class label + position (tuple): a tuple of the x and y coordinates to place text on image. + font_size (int, optional): font of the text. If not provided, a font size + proportional to the image width is calculated and used. + color: color of the text. Refer to `matplotlib.colors` for full list + of formats that are accepted. + horizontal_alignment (str): see `matplotlib.text.Text` + rotation: rotation angle in degrees CCW + + Returns: + output (VisImage): image object with text drawn. + """ + if not font_size: + font_size = self._default_font_size + + # since the text background is dark, we don't want the text to be dark + color = np.maximum(list(mplc.to_rgb(color)), 0.2) + color[np.argmax(color)] = max(0.8, np.max(color)) + + x, y = position + self.output.ax.text( + x, + y, + text, + size=font_size * self.output.scale, + family="sans-serif", + bbox={"facecolor": "black", "alpha": 0.8, "pad": 0.7, "edgecolor": "none"}, + verticalalignment="top", + horizontalalignment=horizontal_alignment, + color=color, + zorder=10, + rotation=rotation, + ) + return self.output + + def draw_box(self, box_coord, alpha=0.5, edge_color="g", line_style="-"): + """ + Args: + box_coord (tuple): a tuple containing x0, y0, x1, y1 coordinates, where x0 and y0 + are the coordinates of the image's top left corner. x1 and y1 are the + coordinates of the image's bottom right corner. + alpha (float): blending efficient. Smaller values lead to more transparent masks. + edge_color: color of the outline of the box. Refer to `matplotlib.colors` + for full list of formats that are accepted. + line_style (string): the string to use to create the outline of the boxes. + + Returns: + output (VisImage): image object with box drawn. + """ + x0, y0, x1, y1 = box_coord + width = x1 - x0 + height = y1 - y0 + + linewidth = max(self._default_font_size / 4, 1) + + self.output.ax.add_patch( + mpl.patches.Rectangle( + (x0, y0), + width, + height, + fill=False, + edgecolor=edge_color, + linewidth=linewidth * self.output.scale, + alpha=alpha, + linestyle=line_style, + ) + ) + return self.output + + def draw_rotated_box_with_label( + self, rotated_box, alpha=0.5, edge_color="g", line_style="-", label=None + ): + """ + Args: + rotated_box (tuple): a tuple containing (cnt_x, cnt_y, w, h, angle), + where cnt_x and cnt_y are the center coordinates of the box. + w and h are the width and height of the box. angle represents how + many degrees the box is rotated CCW with regard to the 0-degree box. + alpha (float): blending efficient. Smaller values lead to more transparent masks. + edge_color: color of the outline of the box. Refer to `matplotlib.colors` + for full list of formats that are accepted. + line_style (string): the string to use to create the outline of the boxes. + label (string): label for rotated box. It will not be rendered when set to None. + + Returns: + output (VisImage): image object with box drawn. + """ + cnt_x, cnt_y, w, h, angle = rotated_box + area = w * h + # use thinner lines when the box is small + linewidth = self._default_font_size / ( + 6 if area < _SMALL_OBJECT_AREA_THRESH * self.output.scale else 3 + ) + + theta = angle * math.pi / 180.0 + c = math.cos(theta) + s = math.sin(theta) + rect = [(-w / 2, h / 2), (-w / 2, -h / 2), (w / 2, -h / 2), (w / 2, h / 2)] + # x: left->right ; y: top->down + rotated_rect = [(s * yy + c * xx + cnt_x, c * yy - s * xx + cnt_y) for (xx, yy) in rect] + for k in range(4): + j = (k + 1) % 4 + self.draw_line( + [rotated_rect[k][0], rotated_rect[j][0]], + [rotated_rect[k][1], rotated_rect[j][1]], + color=edge_color, + linestyle="--" if k == 1 else line_style, + linewidth=linewidth, + ) + + if label is not None: + text_pos = rotated_rect[1] # topleft corner + + height_ratio = h / np.sqrt(self.output.height * self.output.width) + label_color = self._change_color_brightness(edge_color, brightness_factor=0.7) + font_size = ( + np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2) * 0.5 * self._default_font_size + ) + self.draw_text(label, text_pos, color=label_color, font_size=font_size, rotation=angle) + + return self.output + + def draw_circle(self, circle_coord, color, radius=3): + """ + Args: + circle_coord (list(int) or tuple(int)): contains the x and y coordinates + of the center of the circle. + color: color of the polygon. Refer to `matplotlib.colors` for a full list of + formats that are accepted. + radius (int): radius of the circle. + + Returns: + output (VisImage): image object with box drawn. + """ + x, y = circle_coord + self.output.ax.add_patch( + mpl.patches.Circle(circle_coord, radius=radius, fill=True, color=color) + ) + return self.output + + def draw_line(self, x_data, y_data, color, linestyle="-", linewidth=None): + """ + Args: + x_data (list[int]): a list containing x values of all the points being drawn. + Length of list should match the length of y_data. + y_data (list[int]): a list containing y values of all the points being drawn. + Length of list should match the length of x_data. + color: color of the line. Refer to `matplotlib.colors` for a full list of + formats that are accepted. + linestyle: style of the line. Refer to `matplotlib.lines.Line2D` + for a full list of formats that are accepted. + linewidth (float or None): width of the line. When it's None, + a default value will be computed and used. + + Returns: + output (VisImage): image object with line drawn. + """ + if linewidth is None: + linewidth = self._default_font_size / 3 + linewidth = max(linewidth, 1) + self.output.ax.add_line( + mpl.lines.Line2D( + x_data, + y_data, + linewidth=linewidth * self.output.scale, + color=color, + linestyle=linestyle, + ) + ) + return self.output + + def draw_binary_mask( + self, binary_mask, color=None, *, edge_color=None, text=None, alpha=0.5, area_threshold=4096 + ): + """ + Args: + binary_mask (ndarray): numpy array of shape (H, W), where H is the image height and + W is the image width. Each value in the array is either a 0 or 1 value of uint8 + type. + color: color of the mask. Refer to `matplotlib.colors` for a full list of + formats that are accepted. If None, will pick a random color. + edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a + full list of formats that are accepted. + text (str): if None, will be drawn in the object's center of mass. + alpha (float): blending efficient. Smaller values lead to more transparent masks. + area_threshold (float): a connected component small than this will not be shown. + + Returns: + output (VisImage): image object with mask drawn. + """ + if color is None: + color = random_color(rgb=True, maximum=1) + if area_threshold is None: + area_threshold = 4096 + + has_valid_segment = False + binary_mask = binary_mask.astype("uint8") # opencv needs uint8 + mask = GenericMask(binary_mask, self.output.height, self.output.width) + shape2d = (binary_mask.shape[0], binary_mask.shape[1]) + + if not mask.has_holes: + # draw polygons for regular masks + for segment in mask.polygons: + area = mask_util.area(mask_util.frPyObjects([segment], shape2d[0], shape2d[1])) + if area < area_threshold: + continue + has_valid_segment = True + segment = segment.reshape(-1, 2) + self.draw_polygon(segment, color=color, edge_color=edge_color, alpha=alpha) + else: + rgba = np.zeros(shape2d + (4,), dtype="float32") + rgba[:, :, :3] = color + rgba[:, :, 3] = (mask.mask == 1).astype("float32") * alpha + has_valid_segment = True + self.output.ax.imshow(rgba) + + if text is not None and has_valid_segment: + # TODO sometimes drawn on wrong objects. the heuristics here can improve. + lighter_color = self._change_color_brightness(color, brightness_factor=0.7) + _num_cc, cc_labels, stats, centroids = cv2.connectedComponentsWithStats(binary_mask, 8) + largest_component_id = np.argmax(stats[1:, -1]) + 1 + + # draw text on the largest component, as well as other very large components. + for cid in range(1, _num_cc): + if cid == largest_component_id or stats[cid, -1] > _LARGE_MASK_AREA_THRESH: + # median is more stable than centroid + # center = centroids[largest_component_id] + center = np.median((cc_labels == cid).nonzero(), axis=1)[::-1] + self.draw_text(text, center, color=lighter_color) + return self.output + + def draw_polygon(self, segment, color, edge_color=None, alpha=0.5): + """ + Args: + segment: numpy array of shape Nx2, containing all the points in the polygon. + color: color of the polygon. Refer to `matplotlib.colors` for a full list of + formats that are accepted. + edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a + full list of formats that are accepted. If not provided, a darker shade + of the polygon color will be used instead. + alpha (float): blending efficient. Smaller values lead to more transparent masks. + + Returns: + output (VisImage): image object with polygon drawn. + """ + if edge_color is None: + # make edge color darker than the polygon color + if alpha > 0.8: + edge_color = self._change_color_brightness(color, brightness_factor=-0.7) + else: + edge_color = color + edge_color = mplc.to_rgb(edge_color) + (1,) + + polygon = mpl.patches.Polygon( + segment, + fill=True, + facecolor=mplc.to_rgb(color) + (alpha,), + edgecolor=edge_color, + linewidth=max(self._default_font_size // 15 * self.output.scale, 1), + ) + self.output.ax.add_patch(polygon) + return self.output + + """ + Internal methods: + """ + + def _jitter(self, color): + """ + Randomly modifies given color to produce a slightly different color than the color given. + + Args: + color (tuple[double]): a tuple of 3 elements, containing the RGB values of the color + picked. The values in the list are in the [0.0, 1.0] range. + + Returns: + jittered_color (tuple[double]): a tuple of 3 elements, containing the RGB values of the + color after being jittered. The values in the list are in the [0.0, 1.0] range. + """ + color = mplc.to_rgb(color) + vec = np.random.rand(3) + # better to do it in another color space + vec = vec / np.linalg.norm(vec) * 0.5 + res = np.clip(vec + color, 0, 1) + return tuple(res) + + def _create_grayscale_image(self, mask=None): + """ + Create a grayscale version of the original image. + The colors in masked area, if given, will be kept. + """ + img_bw = self.img.astype("f4").mean(axis=2) + img_bw = np.stack([img_bw] * 3, axis=2) + if mask is not None: + img_bw[mask] = self.img[mask] + return img_bw + + def _change_color_brightness(self, color, brightness_factor): + """ + Depending on the brightness_factor, gives a lighter or darker color i.e. a color with + less or more saturation than the original color. + + Args: + color: color of the polygon. Refer to `matplotlib.colors` for a full list of + formats that are accepted. + brightness_factor (float): a value in [-1.0, 1.0] range. A lightness factor of + 0 will correspond to no change, a factor in [-1.0, 0) range will result in + a darker color and a factor in (0, 1.0] range will result in a lighter color. + + Returns: + modified_color (tuple[double]): a tuple containing the RGB values of the + modified color. Each value in the tuple is in the [0.0, 1.0] range. + """ + assert brightness_factor >= -1.0 and brightness_factor <= 1.0 + color = mplc.to_rgb(color) + polygon_color = colorsys.rgb_to_hls(*mplc.to_rgb(color)) + modified_lightness = polygon_color[1] + (brightness_factor * polygon_color[1]) + modified_lightness = 0.0 if modified_lightness < 0.0 else modified_lightness + modified_lightness = 1.0 if modified_lightness > 1.0 else modified_lightness + modified_color = colorsys.hls_to_rgb(polygon_color[0], modified_lightness, polygon_color[2]) + return modified_color + + def _convert_boxes(self, boxes): + """ + Convert different format of boxes to an NxB array, where B = 4 or 5 is the box dimension. + """ + if isinstance(boxes, Boxes) or isinstance(boxes, RotatedBoxes): + return boxes.tensor.numpy() + else: + return np.asarray(boxes) + + def _convert_masks(self, masks_or_polygons): + """ + Convert different format of masks or polygons to a tuple of masks and polygons. + + Returns: + list[GenericMask]: + """ + + m = masks_or_polygons + if isinstance(m, PolygonMasks): + m = m.polygons + if isinstance(m, BitMasks): + m = m.tensor.numpy() + if isinstance(m, torch.Tensor): + m = m.numpy() + ret = [] + for x in m: + if isinstance(x, GenericMask): + ret.append(x) + else: + ret.append(GenericMask(x, self.output.height, self.output.width)) + return ret + + def _convert_keypoints(self, keypoints): + if isinstance(keypoints, Keypoints): + keypoints = keypoints.tensor + keypoints = np.asarray(keypoints) + return keypoints + + def get_output(self): + """ + Returns: + output (VisImage): the image output containing the visualizations added + to the image. + """ + return self.output diff --git a/preprocess/mhp_extension/detectron2/dev/README.md b/preprocess/mhp_extension/detectron2/dev/README.md new file mode 100644 index 0000000000000000000000000000000000000000..cc0d3297b2d436f279c3546c16c86f296402f6c5 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/dev/README.md @@ -0,0 +1,7 @@ + +## Some scripts for developers to use, include: + +- `linter.sh`: lint the codebase before commit +- `run_{inference,instant}_tests.sh`: run inference/training for a few iterations. + Note that these tests require 2 GPUs. +- `parse_results.sh`: parse results from a log file. diff --git a/preprocess/mhp_extension/detectron2/dev/linter.sh b/preprocess/mhp_extension/detectron2/dev/linter.sh new file mode 100755 index 0000000000000000000000000000000000000000..fd7081dbc27b85e5323d25085fb79c7ee3b54e4a --- /dev/null +++ b/preprocess/mhp_extension/detectron2/dev/linter.sh @@ -0,0 +1,46 @@ +#!/bin/bash -e +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +# Run this script at project root by "./dev/linter.sh" before you commit + +vergte() { + [ "$2" = "$(echo -e "$1\\n$2" | sort -V | head -n1)" ] +} + +{ + black --version | grep -E "(19.3b0.*6733274)|(19.3b0\\+8)" > /dev/null +} || { + echo "Linter requires 'black @ git+https://github.com/psf/black@673327449f86fce558adde153bb6cbe54bfebad2' !" + exit 1 +} + +ISORT_TARGET_VERSION="4.3.21" +ISORT_VERSION=$(isort -v | grep VERSION | awk '{print $2}') +vergte "$ISORT_VERSION" "$ISORT_TARGET_VERSION" || { + echo "Linter requires isort>=${ISORT_TARGET_VERSION} !" + exit 1 +} + +set -v + +echo "Running isort ..." +isort -y -sp . --atomic + +echo "Running black ..." +black -l 100 . + +echo "Running flake8 ..." +if [ -x "$(command -v flake8-3)" ]; then + flake8-3 . +else + python3 -m flake8 . +fi + +# echo "Running mypy ..." +# Pytorch does not have enough type annotations +# mypy detectron2/solver detectron2/structures detectron2/config + +echo "Running clang-format ..." +find . -regex ".*\.\(cpp\|c\|cc\|cu\|cxx\|h\|hh\|hpp\|hxx\|tcc\|mm\|m\)" -print0 | xargs -0 clang-format -i + +command -v arc > /dev/null && arc lint diff --git a/preprocess/mhp_extension/detectron2/dev/packaging/README.md b/preprocess/mhp_extension/detectron2/dev/packaging/README.md new file mode 100644 index 0000000000000000000000000000000000000000..095684fcc1c5593805158c81aa0168263eb57ced --- /dev/null +++ b/preprocess/mhp_extension/detectron2/dev/packaging/README.md @@ -0,0 +1,17 @@ + +## To build a cu101 wheel for release: + +``` +$ nvidia-docker run -it --storage-opt "size=20GB" --name pt pytorch/manylinux-cuda101 +# inside the container: +# git clone https://github.com/facebookresearch/detectron2/ +# cd detectron2 +# export CU_VERSION=cu101 D2_VERSION_SUFFIX= PYTHON_VERSION=3.7 PYTORCH_VERSION=1.4 +# ./dev/packaging/build_wheel.sh +``` + +## To build all wheels for `CUDA {9.2,10.0,10.1}` x `Python {3.6,3.7,3.8}`: +``` +./dev/packaging/build_all_wheels.sh +./dev/packaging/gen_wheel_index.sh /path/to/wheels +``` diff --git a/preprocess/mhp_extension/detectron2/dev/packaging/build_all_wheels.sh b/preprocess/mhp_extension/detectron2/dev/packaging/build_all_wheels.sh new file mode 100755 index 0000000000000000000000000000000000000000..eb64dea70cda26f5d101c414af43645ef7e3a349 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/dev/packaging/build_all_wheels.sh @@ -0,0 +1,57 @@ +#!/bin/bash -e +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +PYTORCH_VERSION=1.5 + +build_for_one_cuda() { + cu=$1 + + case "$cu" in + cu*) + container_name=manylinux-cuda${cu/cu/} + ;; + cpu) + container_name=manylinux-cuda101 + ;; + *) + echo "Unrecognized cu=$cu" + exit 1 + ;; + esac + + echo "Launching container $container_name ..." + + for py in 3.6 3.7 3.8; do + docker run -itd \ + --name $container_name \ + --mount type=bind,source="$(pwd)",target=/detectron2 \ + pytorch/$container_name + + cat </dev/null 2>&1 && pwd )" +. "$script_dir/pkg_helpers.bash" + +echo "Build Settings:" +echo "CU_VERSION: $CU_VERSION" # e.g. cu101 +echo "D2_VERSION_SUFFIX: $D2_VERSION_SUFFIX" # e.g. +cu101 or "" +echo "PYTHON_VERSION: $PYTHON_VERSION" # e.g. 3.6 +echo "PYTORCH_VERSION: $PYTORCH_VERSION" # e.g. 1.4 + +setup_cuda +setup_wheel_python +yum install ninja-build -y && ln -sv /usr/bin/ninja-build /usr/bin/ninja + +export TORCH_VERSION_SUFFIX="+$CU_VERSION" +if [[ "$CU_VERSION" == "cu102" ]]; then + export TORCH_VERSION_SUFFIX="" +fi +pip_install pip numpy -U +pip_install "torch==$PYTORCH_VERSION$TORCH_VERSION_SUFFIX" \ + -f https://download.pytorch.org/whl/$CU_VERSION/torch_stable.html + +# use separate directories to allow parallel build +BASE_BUILD_DIR=build/$CU_VERSION/$PYTHON_VERSION +python setup.py \ + build -b $BASE_BUILD_DIR \ + bdist_wheel -b $BASE_BUILD_DIR/build_dist -d wheels/$CU_VERSION diff --git a/preprocess/mhp_extension/detectron2/dev/packaging/gen_wheel_index.sh b/preprocess/mhp_extension/detectron2/dev/packaging/gen_wheel_index.sh new file mode 100755 index 0000000000000000000000000000000000000000..44d6041cdf45afdd39a85d413f08373e8516999b --- /dev/null +++ b/preprocess/mhp_extension/detectron2/dev/packaging/gen_wheel_index.sh @@ -0,0 +1,27 @@ +#!/bin/bash -e +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + + +root=$1 +if [[ -z "$root" ]]; then + echo "Usage: ./gen_wheel_index.sh /path/to/wheels" + exit +fi + +index=$root/index.html + +cd "$root" +for cu in cpu cu92 cu100 cu101 cu102; do + cd $cu + echo "Creating $PWD/index.html ..." + for whl in *.whl; do + echo "$whl
" + done > index.html + cd "$root" +done + +echo "Creating $index ..." +for whl in $(find . -type f -name '*.whl' -printf '%P\n' | sort); do + echo "$whl
" +done > "$index" + diff --git a/preprocess/mhp_extension/detectron2/dev/packaging/pkg_helpers.bash b/preprocess/mhp_extension/detectron2/dev/packaging/pkg_helpers.bash new file mode 100755 index 0000000000000000000000000000000000000000..51e6185c7fba6ba0f7a325c467993196f1c9b4ef --- /dev/null +++ b/preprocess/mhp_extension/detectron2/dev/packaging/pkg_helpers.bash @@ -0,0 +1,57 @@ +#!/bin/bash -e +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +# Function to retry functions that sometimes timeout or have flaky failures +retry () { + $* || (sleep 1 && $*) || (sleep 2 && $*) || (sleep 4 && $*) || (sleep 8 && $*) +} +# Install with pip a bit more robustly than the default +pip_install() { + retry pip install --progress-bar off "$@" +} + + +setup_cuda() { + # Now work out the CUDA settings + # Like other torch domain libraries, we choose common GPU architectures only. + export FORCE_CUDA=1 + case "$CU_VERSION" in + cu102) + export CUDA_HOME=/usr/local/cuda-10.2/ + export TORCH_CUDA_ARCH_LIST="3.5;3.7;5.0;5.2;6.0+PTX;6.1+PTX;7.0+PTX;7.5+PTX" + ;; + cu101) + export CUDA_HOME=/usr/local/cuda-10.1/ + export TORCH_CUDA_ARCH_LIST="3.5;3.7;5.0;5.2;6.0+PTX;6.1+PTX;7.0+PTX;7.5+PTX" + ;; + cu100) + export CUDA_HOME=/usr/local/cuda-10.0/ + export TORCH_CUDA_ARCH_LIST="3.5;3.7;5.0;5.2;6.0+PTX;6.1+PTX;7.0+PTX;7.5+PTX" + ;; + cu92) + export CUDA_HOME=/usr/local/cuda-9.2/ + export TORCH_CUDA_ARCH_LIST="3.5;3.7;5.0;5.2;6.0+PTX;6.1+PTX;7.0+PTX" + ;; + cpu) + unset FORCE_CUDA + export CUDA_VISIBLE_DEVICES= + ;; + *) + echo "Unrecognized CU_VERSION=$CU_VERSION" + exit 1 + ;; + esac +} + +setup_wheel_python() { + case "$PYTHON_VERSION" in + 3.6) python_abi=cp36-cp36m ;; + 3.7) python_abi=cp37-cp37m ;; + 3.8) python_abi=cp38-cp38 ;; + *) + echo "Unrecognized PYTHON_VERSION=$PYTHON_VERSION" + exit 1 + ;; + esac + export PATH="/opt/python/$python_abi/bin:$PATH" +} diff --git a/preprocess/mhp_extension/detectron2/dev/parse_results.sh b/preprocess/mhp_extension/detectron2/dev/parse_results.sh new file mode 100755 index 0000000000000000000000000000000000000000..874b688889049e869854273c83182e5b019315b3 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/dev/parse_results.sh @@ -0,0 +1,45 @@ +#!/bin/bash +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +# A shell script that parses metrics from the log file. +# Make it easier for developers to track performance of models. + +LOG="$1" + +if [[ -z "$LOG" ]]; then + echo "Usage: $0 /path/to/log/file" + exit 1 +fi + +# [12/15 11:47:32] trainer INFO: Total training time: 12:15:04.446477 (0.4900 s / it) +# [12/15 11:49:03] inference INFO: Total inference time: 0:01:25.326167 (0.13652186737060548 s / demo per device, on 8 devices) +# [12/15 11:49:03] inference INFO: Total inference pure compute time: ..... + +# training time +trainspeed=$(grep -o 'Overall training.*' "$LOG" | grep -Eo '\(.*\)' | grep -o '[0-9\.]*') +echo "Training speed: $trainspeed s/it" + +# inference time: there could be multiple inference during training +inferencespeed=$(grep -o 'Total inference pure.*' "$LOG" | tail -n1 | grep -Eo '\(.*\)' | grep -o '[0-9\.]*' | head -n1) +echo "Inference speed: $inferencespeed s/it" + +# [12/15 11:47:18] trainer INFO: eta: 0:00:00 iter: 90000 loss: 0.5407 (0.7256) loss_classifier: 0.1744 (0.2446) loss_box_reg: 0.0838 (0.1160) loss_mask: 0.2159 (0.2722) loss_objectness: 0.0244 (0.0429) loss_rpn_box_reg: 0.0279 (0.0500) time: 0.4487 (0.4899) data: 0.0076 (0.0975) lr: 0.000200 max mem: 4161 +memory=$(grep -o 'max[_ ]mem: [0-9]*' "$LOG" | tail -n1 | grep -o '[0-9]*') +echo "Training memory: $memory MB" + +echo "Easy to copypaste:" +echo "$trainspeed","$inferencespeed","$memory" + +echo "------------------------------" + +# [12/26 17:26:32] engine.coco_evaluation: copypaste: Task: bbox +# [12/26 17:26:32] engine.coco_evaluation: copypaste: AP,AP50,AP75,APs,APm,APl +# [12/26 17:26:32] engine.coco_evaluation: copypaste: 0.0017,0.0024,0.0017,0.0005,0.0019,0.0011 +# [12/26 17:26:32] engine.coco_evaluation: copypaste: Task: segm +# [12/26 17:26:32] engine.coco_evaluation: copypaste: AP,AP50,AP75,APs,APm,APl +# [12/26 17:26:32] engine.coco_evaluation: copypaste: 0.0014,0.0021,0.0016,0.0005,0.0016,0.0011 + +echo "COCO Results:" +num_tasks=$(grep -o 'copypaste:.*Task.*' "$LOG" | sort -u | wc -l) +# each task has 3 lines +grep -o 'copypaste:.*' "$LOG" | cut -d ' ' -f 2- | tail -n $((num_tasks * 3)) diff --git a/preprocess/mhp_extension/detectron2/dev/run_inference_tests.sh b/preprocess/mhp_extension/detectron2/dev/run_inference_tests.sh new file mode 100755 index 0000000000000000000000000000000000000000..17e422d576e5fe9efcd85790954c569c962657d6 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/dev/run_inference_tests.sh @@ -0,0 +1,44 @@ +#!/bin/bash -e +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +BIN="python tools/train_net.py" +OUTPUT="inference_test_output" +NUM_GPUS=2 + +CFG_LIST=( "${@:1}" ) + +if [ ${#CFG_LIST[@]} -eq 0 ]; then + CFG_LIST=( ./configs/quick_schedules/*inference_acc_test.yaml ) +fi + +echo "========================================================================" +echo "Configs to run:" +echo "${CFG_LIST[@]}" +echo "========================================================================" + + +for cfg in "${CFG_LIST[@]}"; do + echo "========================================================================" + echo "Running $cfg ..." + echo "========================================================================" + $BIN \ + --eval-only \ + --num-gpus $NUM_GPUS \ + --config-file "$cfg" \ + OUTPUT_DIR $OUTPUT + rm -rf $OUTPUT +done + + +echo "========================================================================" +echo "Running demo.py ..." +echo "========================================================================" +DEMO_BIN="python demo/demo.py" +COCO_DIR=datasets/coco/val2014 +mkdir -pv $OUTPUT + +set -v + +$DEMO_BIN --config-file ./configs/quick_schedules/panoptic_fpn_R_50_inference_acc_test.yaml \ + --input $COCO_DIR/COCO_val2014_0000001933* --output $OUTPUT +rm -rf $OUTPUT diff --git a/preprocess/mhp_extension/detectron2/dev/run_instant_tests.sh b/preprocess/mhp_extension/detectron2/dev/run_instant_tests.sh new file mode 100755 index 0000000000000000000000000000000000000000..2c51de649262e7371fb173210c8edc377e8177e0 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/dev/run_instant_tests.sh @@ -0,0 +1,27 @@ +#!/bin/bash -e +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +BIN="python tools/train_net.py" +OUTPUT="instant_test_output" +NUM_GPUS=2 + +CFG_LIST=( "${@:1}" ) +if [ ${#CFG_LIST[@]} -eq 0 ]; then + CFG_LIST=( ./configs/quick_schedules/*instant_test.yaml ) +fi + +echo "========================================================================" +echo "Configs to run:" +echo "${CFG_LIST[@]}" +echo "========================================================================" + +for cfg in "${CFG_LIST[@]}"; do + echo "========================================================================" + echo "Running $cfg ..." + echo "========================================================================" + $BIN --num-gpus $NUM_GPUS --config-file "$cfg" \ + SOLVER.IMS_PER_BATCH $(($NUM_GPUS * 2)) \ + OUTPUT_DIR "$OUTPUT" + rm -rf "$OUTPUT" +done + diff --git a/preprocess/mhp_extension/detectron2/docker/Dockerfile b/preprocess/mhp_extension/detectron2/docker/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..2a8603903e36eafb3a61fac0a086a919cc67fe38 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/docker/Dockerfile @@ -0,0 +1,49 @@ +FROM nvidia/cuda:10.1-cudnn7-devel + +ENV DEBIAN_FRONTEND noninteractive +RUN apt-get update && apt-get install -y \ + python3-opencv ca-certificates python3-dev git wget sudo \ + cmake ninja-build protobuf-compiler libprotobuf-dev && \ + rm -rf /var/lib/apt/lists/* +RUN ln -sv /usr/bin/python3 /usr/bin/python + +# create a non-root user +ARG USER_ID=1000 +RUN useradd -m --no-log-init --system --uid ${USER_ID} appuser -g sudo +RUN echo '%sudo ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers +USER appuser +WORKDIR /home/appuser + +ENV PATH="/home/appuser/.local/bin:${PATH}" +RUN wget https://bootstrap.pypa.io/get-pip.py && \ + python3 get-pip.py --user && \ + rm get-pip.py + +# install dependencies +# See https://pytorch.org/ for other options if you use a different version of CUDA +RUN pip install --user tensorboard cython +RUN pip install --user torch==1.5+cu101 torchvision==0.6+cu101 -f https://download.pytorch.org/whl/torch_stable.html +RUN pip install --user 'git+https://github.com/cocodataset/cocoapi.git#subdirectory=PythonAPI' + +RUN pip install --user 'git+https://github.com/facebookresearch/fvcore' +# install detectron2 +RUN git clone https://github.com/facebookresearch/detectron2 detectron2_repo +# set FORCE_CUDA because during `docker build` cuda is not accessible +ENV FORCE_CUDA="1" +# This will by default build detectron2 for all common cuda architectures and take a lot more time, +# because inside `docker build`, there is no way to tell which architecture will be used. +ARG TORCH_CUDA_ARCH_LIST="Kepler;Kepler+Tesla;Maxwell;Maxwell+Tegra;Pascal;Volta;Turing" +ENV TORCH_CUDA_ARCH_LIST="${TORCH_CUDA_ARCH_LIST}" + +RUN pip install --user -e detectron2_repo + +# Set a fixed model cache directory. +ENV FVCORE_CACHE="/tmp" +WORKDIR /home/appuser/detectron2_repo + +# run detectron2 under user "appuser": +# wget http://images.cocodataset.org/val2017/000000439715.jpg -O input.jpg +# python3 demo/demo.py \ + #--config-file configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml \ + #--input input.jpg --output outputs/ \ + #--opts MODEL.WEIGHTS detectron2://COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x/137849600/model_final_f10217.pkl diff --git a/preprocess/mhp_extension/detectron2/docker/Dockerfile-circleci b/preprocess/mhp_extension/detectron2/docker/Dockerfile-circleci new file mode 100644 index 0000000000000000000000000000000000000000..bc0be845adc247eb458d212ae5352c594cd80a72 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/docker/Dockerfile-circleci @@ -0,0 +1,17 @@ +FROM nvidia/cuda:10.1-cudnn7-devel +# This dockerfile only aims to provide an environment for unittest on CircleCI + +ENV DEBIAN_FRONTEND noninteractive +RUN apt-get update && apt-get install -y \ + python3-opencv ca-certificates python3-dev git wget sudo ninja-build && \ + rm -rf /var/lib/apt/lists/* + +RUN wget -q https://bootstrap.pypa.io/get-pip.py && \ + python3 get-pip.py && \ + rm get-pip.py + +# install dependencies +# See https://pytorch.org/ for other options if you use a different version of CUDA +RUN pip install tensorboard cython +RUN pip install torch==1.5+cu101 torchvision==0.6+cu101 -f https://download.pytorch.org/whl/torch_stable.html +RUN pip install 'git+https://github.com/cocodataset/cocoapi.git#subdirectory=PythonAPI' diff --git a/preprocess/mhp_extension/detectron2/docker/README.md b/preprocess/mhp_extension/detectron2/docker/README.md new file mode 100644 index 0000000000000000000000000000000000000000..760c4054d0e4fa56a67ab4b59c14979498e2f94a --- /dev/null +++ b/preprocess/mhp_extension/detectron2/docker/README.md @@ -0,0 +1,36 @@ + +## Use the container (with docker โ‰ฅ 19.03) + +``` +cd docker/ +# Build: +docker build --build-arg USER_ID=$UID -t detectron2:v0 . +# Run: +docker run --gpus all -it \ + --shm-size=8gb --env="DISPLAY" --volume="/tmp/.X11-unix:/tmp/.X11-unix:rw" \ + --name=detectron2 detectron2:v0 + +# Grant docker access to host X server to show images +xhost +local:`docker inspect --format='{{ .Config.Hostname }}' detectron2` +``` + +## Use the container (with docker < 19.03) + +Install docker-compose and nvidia-docker2, then run: +``` +cd docker && USER_ID=$UID docker-compose run detectron2 +``` + +#### Using a persistent cache directory + +You can prevent models from being re-downloaded on every run, +by storing them in a cache directory. + +To do this, add `--volume=$HOME/.torch/fvcore_cache:/tmp:rw` in the run command. + +## Install new dependencies +Add the following to `Dockerfile` to make persistent changes. +``` +RUN sudo apt-get update && sudo apt-get install -y vim +``` +Or run them in the container to make temporary changes. diff --git a/preprocess/mhp_extension/detectron2/docker/docker-compose.yml b/preprocess/mhp_extension/detectron2/docker/docker-compose.yml new file mode 100644 index 0000000000000000000000000000000000000000..e660f44645a5cc164cd5a59f2cdcf7e1ded60c2e --- /dev/null +++ b/preprocess/mhp_extension/detectron2/docker/docker-compose.yml @@ -0,0 +1,18 @@ +version: "2.3" +services: + detectron2: + build: + context: . + dockerfile: Dockerfile + args: + USER_ID: ${USER_ID:-1000} + runtime: nvidia # TODO: Exchange with "gpu: all" in the future (see https://github.com/facebookresearch/detectron2/pull/197/commits/00545e1f376918db4a8ce264d427a07c1e896c5a). + shm_size: "8gb" + ulimits: + memlock: -1 + stack: 67108864 + volumes: + - /tmp/.X11-unix:/tmp/.X11-unix:ro + environment: + - DISPLAY=$DISPLAY + - NVIDIA_VISIBLE_DEVICES=all diff --git a/preprocess/mhp_extension/detectron2/docs/.gitignore b/preprocess/mhp_extension/detectron2/docs/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..e35d8850c9688b1ce82711694692cc574a799396 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/docs/.gitignore @@ -0,0 +1 @@ +_build diff --git a/preprocess/mhp_extension/detectron2/docs/Makefile b/preprocess/mhp_extension/detectron2/docs/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..d537643dd411736a5f309383cfef52ea7d5e4599 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/docs/Makefile @@ -0,0 +1,19 @@ +# Minimal makefile for Sphinx documentation +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +# You can set these variables from the command line. +SPHINXOPTS = +SPHINXBUILD = sphinx-build +SOURCEDIR = . +BUILDDIR = _build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/preprocess/mhp_extension/detectron2/docs/README.md b/preprocess/mhp_extension/detectron2/docs/README.md new file mode 100644 index 0000000000000000000000000000000000000000..2c65c3676b488f3654b7e3231e1cfd06df48d4be --- /dev/null +++ b/preprocess/mhp_extension/detectron2/docs/README.md @@ -0,0 +1,16 @@ +# Read the docs: + +The latest documentation built from this directory is available at [detectron2.readthedocs.io](https://detectron2.readthedocs.io/). +Documents in this directory are not meant to be read on github. + +# Build the docs: + +1. Install detectron2 according to [INSTALL.md](INSTALL.md). +2. Install additional libraries required to build docs: + - docutils==0.16 + - Sphinx==3.0.0 + - recommonmark==0.6.0 + - sphinx_rtd_theme + - mock + +3. Run `make html` from this directory. diff --git a/preprocess/mhp_extension/detectron2/docs/conf.py b/preprocess/mhp_extension/detectron2/docs/conf.py new file mode 100644 index 0000000000000000000000000000000000000000..44e9f2b4db549a3a5ef1420b27d408915e86657c --- /dev/null +++ b/preprocess/mhp_extension/detectron2/docs/conf.py @@ -0,0 +1,335 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +# flake8: noqa + +# Configuration file for the Sphinx documentation builder. +# +# This file does only contain a selection of the most common options. For a +# full list see the documentation: +# http://www.sphinx-doc.org/en/master/config + +# -- Path setup -------------------------------------------------------------- + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# +import os +import sys +import mock +from sphinx.domains import Domain +from typing import Dict, List, Tuple + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +import sphinx_rtd_theme + + +class GithubURLDomain(Domain): + """ + Resolve certain links in markdown files to github source. + """ + + name = "githuburl" + ROOT = "https://github.com/facebookresearch/detectron2/blob/master/" + LINKED_DOC = ["tutorials/install", "tutorials/getting_started"] + + def resolve_any_xref(self, env, fromdocname, builder, target, node, contnode): + github_url = None + if not target.endswith("html") and target.startswith("../../"): + url = target.replace("../", "") + github_url = url + if fromdocname in self.LINKED_DOC: + # unresolved links in these docs are all github links + github_url = target + + if github_url is not None: + if github_url.endswith("MODEL_ZOO") or github_url.endswith("README"): + # bug of recommonmark. + # https://github.com/readthedocs/recommonmark/blob/ddd56e7717e9745f11300059e4268e204138a6b1/recommonmark/parser.py#L152-L155 + github_url += ".md" + print("Ref {} resolved to github:{}".format(target, github_url)) + contnode["refuri"] = self.ROOT + github_url + return [("githuburl:any", contnode)] + else: + return [] + + +# to support markdown +from recommonmark.parser import CommonMarkParser + +sys.path.insert(0, os.path.abspath("../")) +os.environ["DOC_BUILDING"] = "True" +DEPLOY = os.environ.get("READTHEDOCS") == "True" + + +# -- Project information ----------------------------------------------------- + +# fmt: off +try: + import torch # noqa +except ImportError: + for m in [ + "torch", "torchvision", "torch.nn", "torch.nn.parallel", "torch.distributed", "torch.multiprocessing", "torch.autograd", + "torch.autograd.function", "torch.nn.modules", "torch.nn.modules.utils", "torch.utils", "torch.utils.data", "torch.onnx", + "torchvision", "torchvision.ops", + ]: + sys.modules[m] = mock.Mock(name=m) + sys.modules['torch'].__version__ = "1.5" # fake version + +for m in [ + "cv2", "scipy", "portalocker", "detectron2._C", + "pycocotools", "pycocotools.mask", "pycocotools.coco", "pycocotools.cocoeval", + "google", "google.protobuf", "google.protobuf.internal", "onnx", + "caffe2", "caffe2.proto", "caffe2.python", "caffe2.python.utils", "caffe2.python.onnx", "caffe2.python.onnx.backend", +]: + sys.modules[m] = mock.Mock(name=m) +# fmt: on +sys.modules["cv2"].__version__ = "3.4" + +import detectron2 # isort: skip + + +project = "detectron2" +copyright = "2019-2020, detectron2 contributors" +author = "detectron2 contributors" + +# The short X.Y version +version = detectron2.__version__ +# The full version, including alpha/beta/rc tags +release = version + + +# -- General configuration --------------------------------------------------- + +# If your documentation needs a minimal Sphinx version, state it here. +# +needs_sphinx = "3.0" + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + "recommonmark", + "sphinx.ext.autodoc", + "sphinx.ext.napoleon", + "sphinx.ext.intersphinx", + "sphinx.ext.todo", + "sphinx.ext.coverage", + "sphinx.ext.mathjax", + "sphinx.ext.viewcode", + "sphinx.ext.githubpages", +] + +# -- Configurations for plugins ------------ +napoleon_google_docstring = True +napoleon_include_init_with_doc = True +napoleon_include_special_with_doc = True +napoleon_numpy_docstring = False +napoleon_use_rtype = False +autodoc_inherit_docstrings = False +autodoc_member_order = "bysource" + +if DEPLOY: + intersphinx_timeout = 10 +else: + # skip this when building locally + intersphinx_timeout = 0.1 +intersphinx_mapping = { + "python": ("https://docs.python.org/3.6", None), + "numpy": ("https://docs.scipy.org/doc/numpy/", None), + "torch": ("https://pytorch.org/docs/master/", None), +} +# ------------------------- + + +# Add any paths that contain templates here, relative to this directory. +templates_path = ["_templates"] + +source_suffix = [".rst", ".md"] + +# The master toctree document. +master_doc = "index" + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = None + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This pattern also affects html_static_path and html_extra_path. +exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", "build", "README.md", "tutorials/README.md"] + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = "sphinx" + + +# -- Options for HTML output ------------------------------------------------- + +html_theme = "sphinx_rtd_theme" +html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +# +# html_theme_options = {} + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ["_static"] + +# Custom sidebar templates, must be a dictionary that maps document names +# to template names. +# +# The default sidebars (for documents that don't match any pattern) are +# defined by theme itself. Builtin themes are using these templates by +# default: ``['localtoc.html', 'relations.html', 'sourcelink.html', +# 'searchbox.html']``. +# +# html_sidebars = {} + + +# -- Options for HTMLHelp output --------------------------------------------- + +# Output file base name for HTML help builder. +htmlhelp_basename = "detectron2doc" + + +# -- Options for LaTeX output ------------------------------------------------ + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # + # 'papersize': 'letterpaper', + # The font size ('10pt', '11pt' or '12pt'). + # + # 'pointsize': '10pt', + # Additional stuff for the LaTeX preamble. + # + # 'preamble': '', + # Latex figure (float) alignment + # + # 'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + (master_doc, "detectron2.tex", "detectron2 Documentation", "detectron2 contributors", "manual") +] + + +# -- Options for manual page output ------------------------------------------ + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [(master_doc, "detectron2", "detectron2 Documentation", [author], 1)] + + +# -- Options for Texinfo output ---------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + ( + master_doc, + "detectron2", + "detectron2 Documentation", + author, + "detectron2", + "One line description of project.", + "Miscellaneous", + ) +] + + +# -- Options for todo extension ---------------------------------------------- + +# If true, `todo` and `todoList` produce output, else they produce nothing. +todo_include_todos = True + + +_DEPRECATED_NAMES = set() + + +def autodoc_skip_member(app, what, name, obj, skip, options): + # we hide something deliberately + if getattr(obj, "__HIDE_SPHINX_DOC__", False): + return True + # Hide some names that are deprecated or not intended to be used + if name in _DEPRECATED_NAMES: + return True + return None + + +_PAPER_DATA = { + "resnet": ("1512.03385", "Deep Residual Learning for Image Recognition"), + "fpn": ("1612.03144", "Feature Pyramid Networks for Object Detection"), + "mask r-cnn": ("1703.06870", "Mask R-CNN"), + "faster r-cnn": ( + "1506.01497", + "Faster R-CNN: Towards Real-Time Object Detection with Region Proposal Networks", + ), + "deformconv": ("1703.06211", "Deformable Convolutional Networks"), + "deformconv2": ("1811.11168", "Deformable ConvNets v2: More Deformable, Better Results"), + "panopticfpn": ("1901.02446", "Panoptic Feature Pyramid Networks"), + "retinanet": ("1708.02002", "Focal Loss for Dense Object Detection"), + "cascade r-cnn": ("1712.00726", "Cascade R-CNN: Delving into High Quality Object Detection"), + "lvis": ("1908.03195", "LVIS: A Dataset for Large Vocabulary Instance Segmentation"), + "rrpn": ("1703.01086", "Arbitrary-Oriented Scene Text Detection via Rotation Proposals"), + "in1k1h": ("1706.02677", "Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour"), +} + + +def paper_ref_role( + typ: str, + rawtext: str, + text: str, + lineno: int, + inliner, + options: Dict = {}, + content: List[str] = [], +): + """ + Parse :paper:`xxx`. Similar to the "extlinks" sphinx extension. + """ + from docutils import nodes, utils + from sphinx.util.nodes import split_explicit_title + + text = utils.unescape(text) + has_explicit_title, title, link = split_explicit_title(text) + link = link.lower() + if link not in _PAPER_DATA: + inliner.reporter.warning("Cannot find paper " + link) + paper_url, paper_title = "#", link + else: + paper_url, paper_title = _PAPER_DATA[link] + if "/" not in paper_url: + paper_url = "https://arxiv.org/abs/" + paper_url + if not has_explicit_title: + title = paper_title + pnode = nodes.reference(title, title, internal=False, refuri=paper_url) + return [pnode], [] + + +def setup(app): + from recommonmark.transform import AutoStructify + + app.add_domain(GithubURLDomain) + app.connect("autodoc-skip-member", autodoc_skip_member) + app.add_role("paper", paper_ref_role) + app.add_config_value( + "recommonmark_config", + {"enable_math": True, "enable_inline_math": True, "enable_eval_rst": True}, + True, + ) + app.add_transform(AutoStructify) diff --git a/preprocess/mhp_extension/detectron2/docs/index.rst b/preprocess/mhp_extension/detectron2/docs/index.rst new file mode 100644 index 0000000000000000000000000000000000000000..8634b7b12ab906c10a78d6053428029799282ffd --- /dev/null +++ b/preprocess/mhp_extension/detectron2/docs/index.rst @@ -0,0 +1,14 @@ +.. detectron2 documentation master file, created by + sphinx-quickstart on Sat Sep 21 13:46:45 2019. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +Welcome to detectron2's documentation! +====================================== + +.. toctree:: + :maxdepth: 2 + + tutorials/index + notes/index + modules/index diff --git a/preprocess/mhp_extension/detectron2/docs/modules/checkpoint.rst b/preprocess/mhp_extension/detectron2/docs/modules/checkpoint.rst new file mode 100644 index 0000000000000000000000000000000000000000..616cb186c40212d7a0ca311d21691245b2fce996 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/docs/modules/checkpoint.rst @@ -0,0 +1,7 @@ +detectron2.checkpoint package +============================= + +.. automodule:: detectron2.checkpoint + :members: + :undoc-members: + :show-inheritance: diff --git a/preprocess/mhp_extension/detectron2/docs/modules/config.rst b/preprocess/mhp_extension/detectron2/docs/modules/config.rst new file mode 100644 index 0000000000000000000000000000000000000000..034bd5f5e8a79d9eb2109f86b7aa12eea9c8b786 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/docs/modules/config.rst @@ -0,0 +1,17 @@ +detectron2.config package +========================= + +.. automodule:: detectron2.config + :members: + :undoc-members: + :show-inheritance: + :inherited-members: + + +Config References +----------------- + +.. literalinclude:: ../../detectron2/config/defaults.py + :language: python + :linenos: + :lines: 4- diff --git a/preprocess/mhp_extension/detectron2/docs/modules/data.rst b/preprocess/mhp_extension/detectron2/docs/modules/data.rst new file mode 100644 index 0000000000000000000000000000000000000000..3697f0e22f3351a68ee40e4cadbd3ee6d978af8d --- /dev/null +++ b/preprocess/mhp_extension/detectron2/docs/modules/data.rst @@ -0,0 +1,40 @@ +detectron2.data package +======================= + +.. automodule:: detectron2.data + :members: + :undoc-members: + :show-inheritance: + +detectron2.data.detection\_utils module +--------------------------------------- + +.. automodule:: detectron2.data.detection_utils + :members: + :undoc-members: + :show-inheritance: + +detectron2.data.datasets module +--------------------------------------- + +.. automodule:: detectron2.data.datasets + :members: + :undoc-members: + :show-inheritance: + +detectron2.data.samplers module +--------------------------------------- + +.. automodule:: detectron2.data.samplers + :members: + :undoc-members: + :show-inheritance: + + +detectron2.data.transforms module +--------------------------------------- + +.. automodule:: detectron2.data.transforms + :members: + :undoc-members: + :show-inheritance: diff --git a/preprocess/mhp_extension/detectron2/docs/modules/engine.rst b/preprocess/mhp_extension/detectron2/docs/modules/engine.rst new file mode 100644 index 0000000000000000000000000000000000000000..bb8b533aee225b1096fe4353b03533208f92732e --- /dev/null +++ b/preprocess/mhp_extension/detectron2/docs/modules/engine.rst @@ -0,0 +1,25 @@ +detectron2.engine package +========================= + + +.. automodule:: detectron2.engine + :members: + :undoc-members: + :show-inheritance: + + +detectron2.engine.defaults module +--------------------------------- + +.. automodule:: detectron2.engine.defaults + :members: + :undoc-members: + :show-inheritance: + +detectron2.engine.hooks module +--------------------------------- + +.. automodule:: detectron2.engine.hooks + :members: + :undoc-members: + :show-inheritance: diff --git a/preprocess/mhp_extension/detectron2/docs/modules/evaluation.rst b/preprocess/mhp_extension/detectron2/docs/modules/evaluation.rst new file mode 100644 index 0000000000000000000000000000000000000000..d9d34ff1a21c42b33ce2ad8b4415052af194397f --- /dev/null +++ b/preprocess/mhp_extension/detectron2/docs/modules/evaluation.rst @@ -0,0 +1,7 @@ +detectron2.evaluation package +============================= + +.. automodule:: detectron2.evaluation + :members: + :undoc-members: + :show-inheritance: diff --git a/preprocess/mhp_extension/detectron2/docs/modules/export.rst b/preprocess/mhp_extension/detectron2/docs/modules/export.rst new file mode 100644 index 0000000000000000000000000000000000000000..bb7c3c9173cae323e67cb9330b292fefc40ec760 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/docs/modules/export.rst @@ -0,0 +1,7 @@ +detectron2.export package +========================= + +.. automodule:: detectron2.export + :members: + :undoc-members: + :show-inheritance: diff --git a/preprocess/mhp_extension/detectron2/docs/modules/index.rst b/preprocess/mhp_extension/detectron2/docs/modules/index.rst new file mode 100644 index 0000000000000000000000000000000000000000..1b246f570070b4f8ef47d00968498d49f0310a6e --- /dev/null +++ b/preprocess/mhp_extension/detectron2/docs/modules/index.rst @@ -0,0 +1,17 @@ +API Documentation +================== + +.. toctree:: + + checkpoint + config + data + engine + evaluation + layers + model_zoo + modeling + solver + structures + utils + export diff --git a/preprocess/mhp_extension/detectron2/docs/modules/layers.rst b/preprocess/mhp_extension/detectron2/docs/modules/layers.rst new file mode 100644 index 0000000000000000000000000000000000000000..6aeb5213a4b27edeb7c0b2bdb816fd1af8d22ce4 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/docs/modules/layers.rst @@ -0,0 +1,7 @@ +detectron2.layers package +========================= + +.. automodule:: detectron2.layers + :members: + :undoc-members: + :show-inheritance: diff --git a/preprocess/mhp_extension/detectron2/docs/modules/model_zoo.rst b/preprocess/mhp_extension/detectron2/docs/modules/model_zoo.rst new file mode 100644 index 0000000000000000000000000000000000000000..8b1c7d598f509db2361928aac1be4f25854d9f93 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/docs/modules/model_zoo.rst @@ -0,0 +1,7 @@ +detectron2.model_zoo package +============================ + +.. automodule:: detectron2.model_zoo + :members: + :undoc-members: + :show-inheritance: diff --git a/preprocess/mhp_extension/detectron2/docs/modules/modeling.rst b/preprocess/mhp_extension/detectron2/docs/modules/modeling.rst new file mode 100644 index 0000000000000000000000000000000000000000..58ccd2c591774f3766f71da00b6938a0f4f3f592 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/docs/modules/modeling.rst @@ -0,0 +1,58 @@ +detectron2.modeling package +=========================== + +.. automodule:: detectron2.modeling + :members: + :undoc-members: + :show-inheritance: + + +detectron2.modeling.poolers module +--------------------------------------- + +.. automodule:: detectron2.modeling.poolers + :members: + :undoc-members: + :show-inheritance: + + +detectron2.modeling.sampling module +------------------------------------ + +.. automodule:: detectron2.modeling.sampling + :members: + :undoc-members: + :show-inheritance: + + +detectron2.modeling.box_regression module +------------------------------------------ + +.. automodule:: detectron2.modeling.box_regression + :members: + :undoc-members: + :show-inheritance: + + +Model Registries +----------------- + +These are different registries provided in modeling. +Each registry provide you the ability to replace it with your customized component, +without having to modify detectron2's code. + +Note that it is impossible to allow users to customize any line of code directly. +Even just to add one line at some place, +you'll likely need to find out the smallest registry which contains that line, +and register your component to that registry. + + +.. autodata:: detectron2.modeling.META_ARCH_REGISTRY +.. autodata:: detectron2.modeling.BACKBONE_REGISTRY +.. autodata:: detectron2.modeling.PROPOSAL_GENERATOR_REGISTRY +.. autodata:: detectron2.modeling.RPN_HEAD_REGISTRY +.. autodata:: detectron2.modeling.ANCHOR_GENERATOR_REGISTRY +.. autodata:: detectron2.modeling.ROI_HEADS_REGISTRY +.. autodata:: detectron2.modeling.ROI_BOX_HEAD_REGISTRY +.. autodata:: detectron2.modeling.ROI_MASK_HEAD_REGISTRY +.. autodata:: detectron2.modeling.ROI_KEYPOINT_HEAD_REGISTRY diff --git a/preprocess/mhp_extension/detectron2/docs/modules/solver.rst b/preprocess/mhp_extension/detectron2/docs/modules/solver.rst new file mode 100644 index 0000000000000000000000000000000000000000..7f4a49f2ebaef2760b91eb7cecd32dcbff038efb --- /dev/null +++ b/preprocess/mhp_extension/detectron2/docs/modules/solver.rst @@ -0,0 +1,7 @@ +detectron2.solver package +========================= + +.. automodule:: detectron2.solver + :members: + :undoc-members: + :show-inheritance: diff --git a/preprocess/mhp_extension/detectron2/docs/modules/structures.rst b/preprocess/mhp_extension/detectron2/docs/modules/structures.rst new file mode 100644 index 0000000000000000000000000000000000000000..5701c61abf5f74f61807e131f708304a8c9bab82 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/docs/modules/structures.rst @@ -0,0 +1,7 @@ +detectron2.structures package +============================= + +.. automodule:: detectron2.structures + :members: + :undoc-members: + :show-inheritance: diff --git a/preprocess/mhp_extension/detectron2/docs/modules/utils.rst b/preprocess/mhp_extension/detectron2/docs/modules/utils.rst new file mode 100644 index 0000000000000000000000000000000000000000..8b57292ac0e655f40756b19c8eea259bddb62aab --- /dev/null +++ b/preprocess/mhp_extension/detectron2/docs/modules/utils.rst @@ -0,0 +1,80 @@ +detectron2.utils package +======================== + +detectron2.utils.colormap module +-------------------------------- + +.. automodule:: detectron2.utils.colormap + :members: + :undoc-members: + :show-inheritance: + +detectron2.utils.comm module +---------------------------- + +.. automodule:: detectron2.utils.comm + :members: + :undoc-members: + :show-inheritance: + + +detectron2.utils.events module +------------------------------ + +.. automodule:: detectron2.utils.events + :members: + :undoc-members: + :show-inheritance: + + +detectron2.utils.logger module +------------------------------ + +.. automodule:: detectron2.utils.logger + :members: + :undoc-members: + :show-inheritance: + + +detectron2.utils.registry module +-------------------------------- + +.. automodule:: detectron2.utils.registry + :members: + :undoc-members: + :show-inheritance: + +detectron2.utils.memory module +---------------------------------- + +.. automodule:: detectron2.utils.memory + :members: + :undoc-members: + :show-inheritance: + + +detectron2.utils.analysis module +---------------------------------- + +.. automodule:: detectron2.utils.analysis + :members: + :undoc-members: + :show-inheritance: + + +detectron2.utils.visualizer module +---------------------------------- + +.. automodule:: detectron2.utils.visualizer + :members: + :undoc-members: + :show-inheritance: + +detectron2.utils.video\_visualizer module +----------------------------------------- + +.. automodule:: detectron2.utils.video_visualizer + :members: + :undoc-members: + :show-inheritance: + diff --git a/preprocess/mhp_extension/detectron2/docs/notes/benchmarks.md b/preprocess/mhp_extension/detectron2/docs/notes/benchmarks.md new file mode 100644 index 0000000000000000000000000000000000000000..963f9210b39ce3ae248541644362631cb325d2b2 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/docs/notes/benchmarks.md @@ -0,0 +1,196 @@ + +# Benchmarks + +Here we benchmark the training speed of a Mask R-CNN in detectron2, +with some other popular open source Mask R-CNN implementations. + + +### Settings + +* Hardware: 8 NVIDIA V100s with NVLink. +* Software: Python 3.7, CUDA 10.1, cuDNN 7.6.5, PyTorch 1.5, + TensorFlow 1.15.0rc2, Keras 2.2.5, MxNet 1.6.0b20190820. +* Model: an end-to-end R-50-FPN Mask-RCNN model, using the same hyperparameter as the + [Detectron baseline config](https://github.com/facebookresearch/Detectron/blob/master/configs/12_2017_baselines/e2e_mask_rcnn_R-50-FPN_1x.yaml) + (it does no have scale augmentation). +* Metrics: We use the average throughput in iterations 100-500 to skip GPU warmup time. + Note that for R-CNN-style models, the throughput of a model typically changes during training, because + it depends on the predictions of the model. Therefore this metric is not directly comparable with + "train speed" in model zoo, which is the average speed of the entire training run. + + +### Main Results + +```eval_rst ++-------------------------------+--------------------+ +| Implementation | Throughput (img/s) | ++===============================+====================+ +| |D2| |PT| | 62 | ++-------------------------------+--------------------+ +| mmdetection_ |PT| | 53 | ++-------------------------------+--------------------+ +| maskrcnn-benchmark_ |PT| | 53 | ++-------------------------------+--------------------+ +| tensorpack_ |TF| | 50 | ++-------------------------------+--------------------+ +| simpledet_ |mxnet| | 39 | ++-------------------------------+--------------------+ +| Detectron_ |C2| | 19 | ++-------------------------------+--------------------+ +| `matterport/Mask_RCNN`__ |TF| | 14 | ++-------------------------------+--------------------+ + +.. _maskrcnn-benchmark: https://github.com/facebookresearch/maskrcnn-benchmark/ +.. _tensorpack: https://github.com/tensorpack/tensorpack/tree/master/examples/FasterRCNN +.. _mmdetection: https://github.com/open-mmlab/mmdetection/ +.. _simpledet: https://github.com/TuSimple/simpledet/ +.. _Detectron: https://github.com/facebookresearch/Detectron +__ https://github.com/matterport/Mask_RCNN/ + +.. |D2| image:: https://github.com/facebookresearch/detectron2/raw/master/.github/Detectron2-Logo-Horz.svg?sanitize=true + :height: 15pt + :target: https://github.com/facebookresearch/detectron2/ +.. |PT| image:: https://pytorch.org/assets/images/logo-icon.svg + :width: 15pt + :height: 15pt + :target: https://pytorch.org +.. |TF| image:: https://static.nvidiagrid.net/ngc/containers/tensorflow.png + :width: 15pt + :height: 15pt + :target: https://tensorflow.org +.. |mxnet| image:: https://github.com/dmlc/web-data/raw/master/mxnet/image/mxnet_favicon.png + :width: 15pt + :height: 15pt + :target: https://mxnet.apache.org/ +.. |C2| image:: https://caffe2.ai/static/logo.svg + :width: 15pt + :height: 15pt + :target: https://caffe2.ai +``` + + +Details for each implementation: + +* __Detectron2__: with release v0.1.2, run: + ``` + python tools/train_net.py --config-file configs/Detectron1-Comparisons/mask_rcnn_R_50_FPN_noaug_1x.yaml --num-gpus 8 + ``` + +* __mmdetection__: at commit `b0d845f`, run + ``` + ./tools/dist_train.sh configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_1x_coco.py 8 + ``` + +* __maskrcnn-benchmark__: use commit `0ce8f6f` with `sed -i โ€˜s/torch.uint8/torch.bool/gโ€™ **/*.py; sed -i 's/AT_CHECK/TORCH_CHECK/g' **/*.cu` + to make it compatible with PyTorch 1.5. Then, run training with + ``` + python -m torch.distributed.launch --nproc_per_node=8 tools/train_net.py --config-file configs/e2e_mask_rcnn_R_50_FPN_1x.yaml + ``` + The speed we observed is faster than its model zoo, likely due to different software versions. + +* __tensorpack__: at commit `caafda`, `export TF_CUDNN_USE_AUTOTUNE=0`, then run + ``` + mpirun -np 8 ./train.py --config DATA.BASEDIR=/data/coco TRAINER=horovod BACKBONE.STRIDE_1X1=True TRAIN.STEPS_PER_EPOCH=50 --load ImageNet-R50-AlignPadding.npz + ``` + +* __SimpleDet__: at commit `9187a1`, run + ``` + python detection_train.py --config config/mask_r50v1_fpn_1x.py + ``` + +* __Detectron__: run + ``` + python tools/train_net.py --cfg configs/12_2017_baselines/e2e_mask_rcnn_R-50-FPN_1x.yaml + ``` + Note that many of its ops run on CPUs, therefore the performance is limited. + +* __matterport/Mask_RCNN__: at commit `3deaec`, apply the following diff, `export TF_CUDNN_USE_AUTOTUNE=0`, then run + ``` + python coco.py train --dataset=/data/coco/ --model=imagenet + ``` + Note that many small details in this implementation might be different + from Detectron's standards. + +
+ + (diff to make it use the same hyperparameters - click to expand) + + + ```diff + diff --git i/mrcnn/model.py w/mrcnn/model.py + index 62cb2b0..61d7779 100644 + --- i/mrcnn/model.py + +++ w/mrcnn/model.py + @@ -2367,8 +2367,8 @@ class MaskRCNN(): + epochs=epochs, + steps_per_epoch=self.config.STEPS_PER_EPOCH, + callbacks=callbacks, + - validation_data=val_generator, + - validation_steps=self.config.VALIDATION_STEPS, + + #validation_data=val_generator, + + #validation_steps=self.config.VALIDATION_STEPS, + max_queue_size=100, + workers=workers, + use_multiprocessing=True, + diff --git i/mrcnn/parallel_model.py w/mrcnn/parallel_model.py + index d2bf53b..060172a 100644 + --- i/mrcnn/parallel_model.py + +++ w/mrcnn/parallel_model.py + @@ -32,6 +32,7 @@ class ParallelModel(KM.Model): + keras_model: The Keras model to parallelize + gpu_count: Number of GPUs. Must be > 1 + """ + + super().__init__() + self.inner_model = keras_model + self.gpu_count = gpu_count + merged_outputs = self.make_parallel() + diff --git i/samples/coco/coco.py w/samples/coco/coco.py + index 5d172b5..239ed75 100644 + --- i/samples/coco/coco.py + +++ w/samples/coco/coco.py + @@ -81,7 +81,10 @@ class CocoConfig(Config): + IMAGES_PER_GPU = 2 + + # Uncomment to train on 8 GPUs (default is 1) + - # GPU_COUNT = 8 + + GPU_COUNT = 8 + + BACKBONE = "resnet50" + + STEPS_PER_EPOCH = 50 + + TRAIN_ROIS_PER_IMAGE = 512 + + # Number of classes (including background) + NUM_CLASSES = 1 + 80 # COCO has 80 classes + @@ -496,29 +499,10 @@ if __name__ == '__main__': + # *** This training schedule is an example. Update to your needs *** + + # Training - Stage 1 + - print("Training network heads") + model.train(dataset_train, dataset_val, + learning_rate=config.LEARNING_RATE, + epochs=40, + - layers='heads', + - augmentation=augmentation) + - + - # Training - Stage 2 + - # Finetune layers from ResNet stage 4 and up + - print("Fine tune Resnet stage 4 and up") + - model.train(dataset_train, dataset_val, + - learning_rate=config.LEARNING_RATE, + - epochs=120, + - layers='4+', + - augmentation=augmentation) + - + - # Training - Stage 3 + - # Fine tune all layers + - print("Fine tune all layers") + - model.train(dataset_train, dataset_val, + - learning_rate=config.LEARNING_RATE / 10, + - epochs=160, + - layers='all', + + layers='3+', + augmentation=augmentation) + + elif args.command == "evaluate": + ``` + +
diff --git a/preprocess/mhp_extension/detectron2/docs/notes/changelog.md b/preprocess/mhp_extension/detectron2/docs/notes/changelog.md new file mode 100644 index 0000000000000000000000000000000000000000..c0d4f5900bc64dbc4d2ce2d9bd31d32b9ee39f8f --- /dev/null +++ b/preprocess/mhp_extension/detectron2/docs/notes/changelog.md @@ -0,0 +1,26 @@ +# Change Log + +### Releases +See release log at +[https://github.com/facebookresearch/detectron2/releases](https://github.com/facebookresearch/detectron2/releases). + +### Notable Backward Incompatible Changes: + +* 03/30/2020: Custom box head's `output_size` changed to `output_shape`. +* 02/14/2020,02/18/2020: Mask head and keypoint head now include logic for losses & inference. Custom heads + should overwrite the feature computation by `layers()` method. +* 11/11/2019: `detectron2.data.detection_utils.read_image` transposes images with exif information. + +### Config Version Change Log + +* v1: Rename `RPN_HEAD.NAME` to `RPN.HEAD_NAME`. +* v2: A batch of rename of many configurations before release. + +### Silent Regression in Historical Versions: + +We list a few silent regressions since they may silently produce incorrect results and will be hard to debug. + +* 04/01/2020 - 05/11/2020: Bad accuracy if `TRAIN_ON_PRED_BOXES` is set to True. +* 03/30/2020 - 04/01/2020: ResNets are not correctly built. +* 12/19/2019 - 12/26/2019: Using aspect ratio grouping causes a drop in accuracy. +* release - 11/9/2019: Test time augmentation does not predict the last category. diff --git a/preprocess/mhp_extension/detectron2/docs/notes/compatibility.md b/preprocess/mhp_extension/detectron2/docs/notes/compatibility.md new file mode 100644 index 0000000000000000000000000000000000000000..f7b66c2e384b162864fb96a2fed44ba3084b8226 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/docs/notes/compatibility.md @@ -0,0 +1,83 @@ +# Compatibility with Other Libraries + +## Compatibility with Detectron (and maskrcnn-benchmark) + +Detectron2 addresses some legacy issues left in Detectron. As a result, their models +are not compatible: +running inference with the same model weights will produce different results in the two code bases. + +The major differences regarding inference are: + +- The height and width of a box with corners (x1, y1) and (x2, y2) is now computed more naturally as + width = x2 - x1 and height = y2 - y1; + In Detectron, a "+ 1" was added both height and width. + + Note that the relevant ops in Caffe2 have [adopted this change of convention](https://github.com/pytorch/pytorch/pull/20550) + with an extra option. + So it is still possible to run inference with a Detectron2-trained model in Caffe2. + + The change in height/width calculations most notably changes: + - encoding/decoding in bounding box regression. + - non-maximum suppression. The effect here is very negligible, though. + +- RPN now uses simpler anchors with fewer quantization artifacts. + + In Detectron, the anchors were quantized and + [do not have accurate areas](https://github.com/facebookresearch/Detectron/issues/227). + In Detectron2, the anchors are center-aligned to feature grid points and not quantized. + +- Classification layers have a different ordering of class labels. + + This involves any trainable parameter with shape (..., num_categories + 1, ...). + In Detectron2, integer labels [0, K-1] correspond to the K = num_categories object categories + and the label "K" corresponds to the special "background" category. + In Detectron, label "0" means background, and labels [1, K] correspond to the K categories. + +- ROIAlign is implemented differently. The new implementation is [available in Caffe2](https://github.com/pytorch/pytorch/pull/23706). + + 1. All the ROIs are shifted by half a pixel compared to Detectron in order to create better image-feature-map alignment. + See `layers/roi_align.py` for details. + To enable the old behavior, use `ROIAlign(aligned=False)`, or `POOLER_TYPE=ROIAlign` instead of + `ROIAlignV2` (the default). + + 1. The ROIs are not required to have a minimum size of 1. + This will lead to tiny differences in the output, but should be negligible. + +- Mask inference function is different. + + In Detectron2, the "paste_mask" function is different and should be more accurate than in Detectron. This change + can improve mask AP on COCO by ~0.5% absolute. + +There are some other differences in training as well, but they won't affect +model-level compatibility. The major ones are: + +- We fixed a [bug](https://github.com/facebookresearch/Detectron/issues/459) in + Detectron, by making `RPN.POST_NMS_TOPK_TRAIN` per-image, rather than per-batch. + The fix may lead to a small accuracy drop for a few models (e.g. keypoint + detection) and will require some parameter tuning to match the Detectron results. +- For simplicity, we change the default loss in bounding box regression to L1 loss, instead of smooth L1 loss. + We have observed that this tends to slightly decrease box AP50 while improving box AP for higher + overlap thresholds (and leading to a slight overall improvement in box AP). +- We interpret the coordinates in COCO bounding box and segmentation annotations + as coordinates in range `[0, width]` or `[0, height]`. The coordinates in + COCO keypoint annotations are interpreted as pixel indices in range `[0, width - 1]` or `[0, height - 1]`. + Note that this affects how flip augmentation is implemented. + + +We will later share more details and rationale behind the above mentioned issues +about pixels, coordinates, and "+1"s. + + +## Compatibility with Caffe2 + +As mentioned above, despite the incompatibilities with Detectron, the relevant +ops have been implemented in Caffe2. +Therefore, models trained with detectron2 can be converted in Caffe2. +See [Deployment](../tutorials/deployment.md) for the tutorial. + +## Compatibility with TensorFlow + +Most ops are available in TensorFlow, although some tiny differences in +the implementation of resize / ROIAlign / padding need to be addressed. +A working conversion script is provided by [tensorpack FasterRCNN](https://github.com/tensorpack/tensorpack/tree/master/examples/FasterRCNN/convert_d2) +to run a standard detectron2 model in TensorFlow. diff --git a/preprocess/mhp_extension/detectron2/docs/notes/contributing.md b/preprocess/mhp_extension/detectron2/docs/notes/contributing.md new file mode 100644 index 0000000000000000000000000000000000000000..81936dfedb495dd5cd21da2bfcf9819b97ed1dff --- /dev/null +++ b/preprocess/mhp_extension/detectron2/docs/notes/contributing.md @@ -0,0 +1,49 @@ +# Contributing to detectron2 + +## Issues +We use GitHub issues to track public bugs and questions. +Please make sure to follow one of the +[issue templates](https://github.com/facebookresearch/detectron2/issues/new/choose) +when reporting any issues. + +Facebook has a [bounty program](https://www.facebook.com/whitehat/) for the safe +disclosure of security bugs. In those cases, please go through the process +outlined on that page and do not file a public issue. + +## Pull Requests +We actively welcome your pull requests. + +However, if you're adding any significant features (e.g. > 50 lines), please +make sure to have a corresponding issue to discuss your motivation and proposals, +before sending a PR. We do not always accept new features, and we take the following +factors into consideration: + +1. Whether the same feature can be achieved without modifying detectron2. +Detectron2 is designed so that you can implement many extensions from the outside, e.g. +those in [projects](https://github.com/facebookresearch/detectron2/tree/master/projects). +If some part is not as extensible, you can also bring up the issue to make it more extensible. +2. Whether the feature is potentially useful to a large audience, or only to a small portion of users. +3. Whether the proposed solution has a good design / interface. +4. Whether the proposed solution adds extra mental/practical overhead to users who don't + need such feature. +5. Whether the proposed solution breaks existing APIs. + +When sending a PR, please do: + +1. If a PR contains multiple orthogonal changes, split it to several PRs. +2. If you've added code that should be tested, add tests. +3. For PRs that need experiments (e.g. adding a new model or new methods), + you don't need to update model zoo, but do provide experiment results in the description of the PR. +4. If APIs are changed, update the documentation. +5. Make sure your code lints with `./dev/linter.sh`. + + +## Contributor License Agreement ("CLA") +In order to accept your pull request, we need you to submit a CLA. You only need +to do this once to work on any of Facebook's open source projects. + +Complete your CLA here: + +## License +By contributing to detectron2, you agree that your contributions will be licensed +under the LICENSE file in the root directory of this source tree. diff --git a/preprocess/mhp_extension/detectron2/docs/notes/index.rst b/preprocess/mhp_extension/detectron2/docs/notes/index.rst new file mode 100644 index 0000000000000000000000000000000000000000..63cf907be7bb15f5316af6d44a46df601755a86b --- /dev/null +++ b/preprocess/mhp_extension/detectron2/docs/notes/index.rst @@ -0,0 +1,10 @@ +Notes +====================================== + +.. toctree:: + :maxdepth: 2 + + benchmarks + compatibility + contributing + changelog diff --git a/preprocess/mhp_extension/detectron2/docs/tutorials/README.md b/preprocess/mhp_extension/detectron2/docs/tutorials/README.md new file mode 100644 index 0000000000000000000000000000000000000000..1ca9c94d042ef838143a45490fe6b4556c19f3c9 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/docs/tutorials/README.md @@ -0,0 +1,4 @@ +# Read the docs: + +The latest documentation built from this directory is available at [detectron2.readthedocs.io](https://detectron2.readthedocs.io/). +Documents in this directory are not meant to be read on github. diff --git a/preprocess/mhp_extension/detectron2/docs/tutorials/builtin_datasets.md b/preprocess/mhp_extension/detectron2/docs/tutorials/builtin_datasets.md new file mode 100644 index 0000000000000000000000000000000000000000..1a2633f95e6f6a5e54c8beca102a490036478587 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/docs/tutorials/builtin_datasets.md @@ -0,0 +1,99 @@ +# Setup Builtin Datasets + +Detectron2 has builtin support for a few datasets. +The datasets are assumed to exist in a directory specified by the environment variable +`DETECTRON2_DATASETS`. +Under this directory, detectron2 expects to find datasets in the structure described below. + +You can set the location for builtin datasets by `export DETECTRON2_DATASETS=/path/to/datasets`. +If left unset, the default is `./datasets` relative to your current working directory. + +The [model zoo](https://github.com/facebookresearch/detectron2/blob/master/MODEL_ZOO.md) +contains configs and models that use these builtin datasets. + +## Expected dataset structure for COCO instance/keypoint detection: + +``` +coco/ + annotations/ + instances_{train,val}2017.json + person_keypoints_{train,val}2017.json + {train,val}2017/ + # image files that are mentioned in the corresponding json +``` + +You can use the 2014 version of the dataset as well. + +Some of the builtin tests (`dev/run_*_tests.sh`) uses a tiny version of the COCO dataset, +which you can download with `./prepare_for_tests.sh`. + +## Expected dataset structure for PanopticFPN: + +``` +coco/ + annotations/ + panoptic_{train,val}2017.json + panoptic_{train,val}2017/ # png annotations + panoptic_stuff_{train,val}2017/ # generated by the script mentioned below +``` + +Install panopticapi by: +``` +pip install git+https://github.com/cocodataset/panopticapi.git +``` +Then, run `python prepare_panoptic_fpn.py`, to extract semantic annotations from panoptic annotations. + +## Expected dataset structure for LVIS instance segmentation: +``` +coco/ + {train,val,test}2017/ +lvis/ + lvis_v0.5_{train,val}.json + lvis_v0.5_image_info_test.json +``` + +Install lvis-api by: +``` +pip install git+https://github.com/lvis-dataset/lvis-api.git +``` + +Run `python prepare_cocofied_lvis.py` to prepare "cocofied" LVIS annotations for evaluation of models trained on the COCO dataset. + +## Expected dataset structure for cityscapes: +``` +cityscapes/ + gtFine/ + train/ + aachen/ + color.png, instanceIds.png, labelIds.png, polygons.json, + labelTrainIds.png + ... + val/ + test/ + leftImg8bit/ + train/ + val/ + test/ +``` +Install cityscapes scripts by: +``` +pip install git+https://github.com/mcordts/cityscapesScripts.git +``` + +Note: labelTrainIds.png are created using cityscapesescript with: +``` +CITYSCAPES_DATASET=$DETECTRON2_DATASETS/cityscapes python cityscapesscripts/preparation/createTrainIdLabelImgs.py +``` +They are not needed for instance segmentation. + +## Expected dataset structure for Pascal VOC: +``` +VOC20{07,12}/ + Annotations/ + ImageSets/ + Main/ + trainval.txt + test.txt + # train.txt or val.txt, if you use these splits + JPEGImages/ +``` diff --git a/preprocess/mhp_extension/detectron2/docs/tutorials/configs.md b/preprocess/mhp_extension/detectron2/docs/tutorials/configs.md new file mode 100644 index 0000000000000000000000000000000000000000..ea82583825b51955993ca87d14c17ffb3ab031f4 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/docs/tutorials/configs.md @@ -0,0 +1,58 @@ +# Configs + +Detectron2 provides a key-value based config system that can be +used to obtain standard, common behaviors. + +Detectron2's config system uses YAML and [yacs](https://github.com/rbgirshick/yacs). +In addition to the [basic operations](../modules/config.html#detectron2.config.CfgNode) +that access and update a config, we provide the following extra functionalities: + +1. The config can have `_BASE_: base.yaml` field, which will load a base config first. + Values in the base config will be overwritten in sub-configs, if there are any conflicts. + We provided several base configs for standard model architectures. +2. We provide config versioning, for backward compatibility. + If your config file is versioned with a config line like `VERSION: 2`, + detectron2 will still recognize it even if we change some keys in the future. + +"Config" is a very limited abstraction. +We do not expect all features in detectron2 to be available through configs. +If you need something that's not available in the config space, +please write code using detectron2's API. + +### Basic Usage + +Some basic usage of the `CfgNode` object is shown here. See more in [documentation](../modules/config.html#detectron2.config.CfgNode). +```python +from detectron2.config import get_cfg +cfg = get_cfg() # obtain detectron2's default config +cfg.xxx = yyy # add new configs for your own custom components +cfg.merge_from_file("my_cfg.yaml") # load values from a file + +cfg.merge_from_list(["MODEL.WEIGHTS", "weights.pth"]) # can also load values from a list of str +print(cfg.dump()) # print formatted configs +``` + +Many builtin tools in detectron2 accepts command line config overwrite: +Key-value pairs provided in the command line will overwrite the existing values in the config file. +For example, [demo.py](../../demo/demo.py) can be used with +``` +./demo.py --config-file config.yaml [--other-options] \ + --opts MODEL.WEIGHTS /path/to/weights INPUT.MIN_SIZE_TEST 1000 +``` + +To see a list of available configs in detectron2 and what they mean, +check [Config References](../modules/config.html#config-references) + + +### Best Practice with Configs + +1. Treat the configs you write as "code": avoid copying them or duplicating them; use `_BASE_` + to share common parts between configs. + +2. Keep the configs you write simple: don't include keys that do not affect the experimental setting. + +3. Keep a version number in your configs (or the base config), e.g., `VERSION: 2`, + for backward compatibility. + We print a warning when reading a config without version number. + The official configs do not include version number because they are meant to + be always up-to-date. diff --git a/preprocess/mhp_extension/detectron2/docs/tutorials/data_loading.md b/preprocess/mhp_extension/detectron2/docs/tutorials/data_loading.md new file mode 100644 index 0000000000000000000000000000000000000000..bb037ca534ccbb0cf82c456d0cd54544520b3a3f --- /dev/null +++ b/preprocess/mhp_extension/detectron2/docs/tutorials/data_loading.md @@ -0,0 +1,77 @@ + +# Use Custom Dataloaders + +## How the Existing Dataloader Works + +Detectron2 contains a builtin data loading pipeline. +It's good to understand how it works, in case you need to write a custom one. + +Detectron2 provides two functions +[build_detection_{train,test}_loader](../modules/data.html#detectron2.data.build_detection_train_loader) +that create a default data loader from a given config. +Here is how `build_detection_{train,test}_loader` work: + +1. It takes the name of a registered dataset (e.g., "coco_2017_train") and loads a `list[dict]` representing the dataset items + in a lightweight, canonical format. These dataset items are not yet ready to be used by the model (e.g., images are + not loaded into memory, random augmentations have not been applied, etc.). + Details about the dataset format and dataset registration can be found in + [datasets](./datasets.md). +2. Each dict in this list is mapped by a function ("mapper"): + * Users can customize this mapping function by specifying the "mapper" argument in + `build_detection_{train,test}_loader`. The default mapper is [DatasetMapper](../modules/data.html#detectron2.data.DatasetMapper). + * The output format of such function can be arbitrary, as long as it is accepted by the consumer of this data loader (usually the model). + The outputs of the default mapper, after batching, follow the default model input format documented in + [Use Models](./models.html#model-input-format). + * The role of the mapper is to transform the lightweight, canonical representation of a dataset item into a format + that is ready for the model to consume (including, e.g., read images, perform random data augmentation and convert to torch Tensors). + If you would like to perform custom transformations to data, you often want a custom mapper. +3. The outputs of the mapper are batched (simply into a list). +4. This batched data is the output of the data loader. Typically, it's also the input of + `model.forward()`. + + +## Write a Custom Dataloader + +Using a different "mapper" with `build_detection_{train,test}_loader(mapper=)` works for most use cases +of custom data loading. +For example, if you want to resize all images to a fixed size for Mask R-CNN training, write this: + +```python +from detectron2.data import build_detection_train_loader +from detectron2.data import transforms as T +from detectron2.data import detection_utils as utils + +def mapper(dataset_dict): + # Implement a mapper, similar to the default DatasetMapper, but with your own customizations + dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below + image = utils.read_image(dataset_dict["file_name"], format="BGR") + image, transforms = T.apply_transform_gens([T.Resize((800, 800))], image) + dataset_dict["image"] = torch.as_tensor(image.transpose(2, 0, 1).astype("float32")) + + annos = [ + utils.transform_instance_annotations(obj, transforms, image.shape[:2]) + for obj in dataset_dict.pop("annotations") + if obj.get("iscrowd", 0) == 0 + ] + instances = utils.annotations_to_instances(annos, image.shape[:2]) + dataset_dict["instances"] = utils.filter_empty_instances(instances) + return dataset_dict + +data_loader = build_detection_train_loader(cfg, mapper=mapper) +# use this dataloader instead of the default +``` +Refer to [API documentation of detectron2.data](../modules/data) for details. + +If you want to change not only the mapper (e.g., to write different sampling or batching logic), +you can write your own data loader. The data loader is simply a +python iterator that produces [the format](./models.md) your model accepts. +You can implement it using any tools you like. + +## Use a Custom Dataloader + +If you use [DefaultTrainer](../modules/engine.html#detectron2.engine.defaults.DefaultTrainer), +you can overwrite its `build_{train,test}_loader` method to use your own dataloader. +See the [densepose dataloader](../../projects/DensePose/train_net.py) +for an example. + +If you write your own training loop, you can plug in your data loader easily. diff --git a/preprocess/mhp_extension/detectron2/docs/tutorials/datasets.md b/preprocess/mhp_extension/detectron2/docs/tutorials/datasets.md new file mode 100644 index 0000000000000000000000000000000000000000..8dc1c0c55598887e4de73e988567753ebf4538e2 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/docs/tutorials/datasets.md @@ -0,0 +1,221 @@ +# Use Custom Datasets + +Datasets that have builtin support in detectron2 are listed in [datasets](../../datasets). +If you want to use a custom dataset while also reusing detectron2's data loaders, +you will need to + +1. __Register__ your dataset (i.e., tell detectron2 how to obtain your dataset). +2. Optionally, __register metadata__ for your dataset. + +Next, we explain the above two concepts in detail. + +The [Colab tutorial](https://colab.research.google.com/drive/16jcaJoc6bCFAQ96jDe2HwtXj7BMD_-m5) +has a live example of how to register and train on a dataset of custom formats. + +### Register a Dataset + +To let detectron2 know how to obtain a dataset named "my_dataset", you will implement +a function that returns the items in your dataset and then tell detectron2 about this +function: +```python +def my_dataset_function(): + ... + return list[dict] in the following format + +from detectron2.data import DatasetCatalog +DatasetCatalog.register("my_dataset", my_dataset_function) +``` + +Here, the snippet associates a dataset "my_dataset" with a function that returns the data. +The registration stays effective until the process exists. + +The function can processes data from its original format into either one of the following: +1. Detectron2's standard dataset dict, described below. This will work with many other builtin + features in detectron2, so it's recommended to use it when it's sufficient for your task. +2. Your custom dataset dict. You can also return arbitrary dicts in your own format, + such as adding extra keys for new tasks. + Then you will need to handle them properly downstream as well. + See below for more details. + +#### Standard Dataset Dicts + +For standard tasks +(instance detection, instance/semantic/panoptic segmentation, keypoint detection), +we load the original dataset into `list[dict]` with a specification similar to COCO's json annotations. +This is our standard representation for a dataset. + +Each dict contains information about one image. +The dict may have the following fields, +and the required fields vary based on what the dataloader or the task needs (see more below). + ++ `file_name`: the full path to the image file. Will apply rotation and flipping if the image has such exif information. ++ `height`, `width`: integer. The shape of image. ++ `image_id` (str or int): a unique id that identifies this image. Used + during evaluation to identify the images, but a dataset may use it for different purposes. ++ `annotations` (list[dict]): each dict corresponds to annotations of one instance + in this image. Required by instance detection/segmentation or keypoint detection tasks. + + Images with empty `annotations` will by default be removed from training, + but can be included using `DATALOADER.FILTER_EMPTY_ANNOTATIONS`. + + Each dict contains the following keys, of which `bbox`,`bbox_mode` and `category_id` are required: + + `bbox` (list[float]): list of 4 numbers representing the bounding box of the instance. + + `bbox_mode` (int): the format of bbox. + It must be a member of + [structures.BoxMode](../modules/structures.html#detectron2.structures.BoxMode). + Currently supports: `BoxMode.XYXY_ABS`, `BoxMode.XYWH_ABS`. + + `category_id` (int): an integer in the range [0, num_categories) representing the category label. + The value num_categories is reserved to represent the "background" category, if applicable. + + `segmentation` (list[list[float]] or dict): the segmentation mask of the instance. + + If `list[list[float]]`, it represents a list of polygons, one for each connected component + of the object. Each `list[float]` is one simple polygon in the format of `[x1, y1, ..., xn, yn]`. + The Xs and Ys are either relative coordinates in [0, 1], or absolute coordinates, + depend on whether "bbox_mode" is relative. + + If `dict`, it represents the per-pixel segmentation mask in COCO's RLE format. The dict should have + keys "size" and "counts". You can convert a uint8 segmentation mask of 0s and 1s into + RLE format by `pycocotools.mask.encode(np.asarray(mask, order="F"))`. + + `keypoints` (list[float]): in the format of [x1, y1, v1,..., xn, yn, vn]. + v[i] means the [visibility](http://cocodataset.org/#format-data) of this keypoint. + `n` must be equal to the number of keypoint categories. + The Xs and Ys are either relative coordinates in [0, 1], or absolute coordinates, + depend on whether "bbox_mode" is relative. + + Note that the coordinate annotations in COCO format are integers in range [0, H-1 or W-1]. + By default, detectron2 adds 0.5 to absolute keypoint coordinates to convert them from discrete + pixel indices to floating point coordinates. + + `iscrowd`: 0 (default) or 1. Whether this instance is labeled as COCO's "crowd + region". Don't include this field if you don't know what it means. ++ `sem_seg_file_name`: the full path to the ground truth semantic segmentation file. + Required by semantic segmentation task. + It should be an image whose pixel values are integer labels. + + +Fast R-CNN (with precomputed proposals) is rarely used today. +To train a Fast R-CNN, the following extra keys are needed: + ++ `proposal_boxes` (array): 2D numpy array with shape (K, 4) representing K precomputed proposal boxes for this image. ++ `proposal_objectness_logits` (array): numpy array with shape (K, ), which corresponds to the objectness + logits of proposals in 'proposal_boxes'. ++ `proposal_bbox_mode` (int): the format of the precomputed proposal bbox. + It must be a member of + [structures.BoxMode](../modules/structures.html#detectron2.structures.BoxMode). + Default is `BoxMode.XYXY_ABS`. + +#### Custom Dataset Dicts for New Tasks + +In the `list[dict]` that your dataset function returns, the dictionary can also have arbitrary custom data. +This will be useful for a new task that needs extra information not supported +by the standard dataset dicts. In this case, you need to make sure the downstream code can handle your data +correctly. Usually this requires writing a new `mapper` for the dataloader (see [Use Custom Dataloaders](./data_loading.md)). + +When designing a custom format, note that all dicts are stored in memory +(sometimes serialized and with multiple copies). +To save memory, each dict is meant to contain small but sufficient information +about each sample, such as file names and annotations. +Loading full samples typically happens in the data loader. + +For attributes shared among the entire dataset, use `Metadata` (see below). +To avoid extra memory, do not save such information repeatly for each sample. + +### "Metadata" for Datasets + +Each dataset is associated with some metadata, accessible through +`MetadataCatalog.get(dataset_name).some_metadata`. +Metadata is a key-value mapping that contains information that's shared among +the entire dataset, and usually is used to interpret what's in the dataset, e.g., +names of classes, colors of classes, root of files, etc. +This information will be useful for augmentation, evaluation, visualization, logging, etc. +The structure of metadata depends on the what is needed from the corresponding downstream code. + +If you register a new dataset through `DatasetCatalog.register`, +you may also want to add its corresponding metadata through +`MetadataCatalog.get(dataset_name).some_key = some_value`, to enable any features that need the metadata. +You can do it like this (using the metadata key "thing_classes" as an example): + +```python +from detectron2.data import MetadataCatalog +MetadataCatalog.get("my_dataset").thing_classes = ["person", "dog"] +``` + +Here is a list of metadata keys that are used by builtin features in detectron2. +If you add your own dataset without these metadata, some features may be +unavailable to you: + +* `thing_classes` (list[str]): Used by all instance detection/segmentation tasks. + A list of names for each instance/thing category. + If you load a COCO format dataset, it will be automatically set by the function `load_coco_json`. + +* `thing_colors` (list[tuple(r, g, b)]): Pre-defined color (in [0, 255]) for each thing category. + Used for visualization. If not given, random colors are used. + +* `stuff_classes` (list[str]): Used by semantic and panoptic segmentation tasks. + A list of names for each stuff category. + +* `stuff_colors` (list[tuple(r, g, b)]): Pre-defined color (in [0, 255]) for each stuff category. + Used for visualization. If not given, random colors are used. + +* `keypoint_names` (list[str]): Used by keypoint localization. A list of names for each keypoint. + +* `keypoint_flip_map` (list[tuple[str]]): Used by the keypoint localization task. A list of pairs of names, + where each pair are the two keypoints that should be flipped if the image is + flipped horizontally during augmentation. +* `keypoint_connection_rules`: list[tuple(str, str, (r, g, b))]. Each tuple specifies a pair of keypoints + that are connected and the color to use for the line between them when visualized. + +Some additional metadata that are specific to the evaluation of certain datasets (e.g. COCO): + +* `thing_dataset_id_to_contiguous_id` (dict[int->int]): Used by all instance detection/segmentation tasks in the COCO format. + A mapping from instance class ids in the dataset to contiguous ids in range [0, #class). + Will be automatically set by the function `load_coco_json`. + +* `stuff_dataset_id_to_contiguous_id` (dict[int->int]): Used when generating prediction json files for + semantic/panoptic segmentation. + A mapping from semantic segmentation class ids in the dataset + to contiguous ids in [0, num_categories). It is useful for evaluation only. + +* `json_file`: The COCO annotation json file. Used by COCO evaluation for COCO-format datasets. +* `panoptic_root`, `panoptic_json`: Used by panoptic evaluation. +* `evaluator_type`: Used by the builtin main training script to select + evaluator. Don't use it in a new training script. + You can just provide the [DatasetEvaluator](../modules/evaluation.html#detectron2.evaluation.DatasetEvaluator) + for your dataset directly in your main script. + +NOTE: For background on the concept of "thing" and "stuff", see +[On Seeing Stuff: The Perception of Materials by Humans and Machines](http://persci.mit.edu/pub_pdfs/adelson_spie_01.pdf). +In detectron2, the term "thing" is used for instance-level tasks, +and "stuff" is used for semantic segmentation tasks. +Both are used in panoptic segmentation. + +### Register a COCO Format Dataset + +If your dataset is already a json file in the COCO format, +the dataset and its associated metadata can be registered easily with: +```python +from detectron2.data.datasets import register_coco_instances +register_coco_instances("my_dataset", {}, "json_annotation.json", "path/to/image/dir") +``` + +If your dataset is in COCO format but with extra custom per-instance annotations, +the [load_coco_json](../modules/data.html#detectron2.data.datasets.load_coco_json) +function might be useful. + +### Update the Config for New Datasets + +Once you've registered the dataset, you can use the name of the dataset (e.g., "my_dataset" in +example above) in `cfg.DATASETS.{TRAIN,TEST}`. +There are other configs you might want to change to train or evaluate on new datasets: + +* `MODEL.ROI_HEADS.NUM_CLASSES` and `MODEL.RETINANET.NUM_CLASSES` are the number of thing classes + for R-CNN and RetinaNet models, respectively. +* `MODEL.ROI_KEYPOINT_HEAD.NUM_KEYPOINTS` sets the number of keypoints for Keypoint R-CNN. + You'll also need to set [Keypoint OKS](http://cocodataset.org/#keypoints-eval) + with `TEST.KEYPOINT_OKS_SIGMAS` for evaluation. +* `MODEL.SEM_SEG_HEAD.NUM_CLASSES` sets the number of stuff classes for Semantic FPN & Panoptic FPN. +* If you're training Fast R-CNN (with precomputed proposals), `DATASETS.PROPOSAL_FILES_{TRAIN,TEST}` + need to match the datasets. The format of proposal files are documented + [here](../modules/data.html#detectron2.data.load_proposals_into_dataset). + +New models +(e.g. [TensorMask](../../projects/TensorMask), +[PointRend](../../projects/PointRend)) +often have similar configs of their own that need to be changed as well. diff --git a/preprocess/mhp_extension/detectron2/docs/tutorials/deployment.md b/preprocess/mhp_extension/detectron2/docs/tutorials/deployment.md new file mode 100644 index 0000000000000000000000000000000000000000..a473247abf7df74e35b6de71c018f1aa34eaf435 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/docs/tutorials/deployment.md @@ -0,0 +1,92 @@ +# Deployment + +## Caffe2 Deployment +We currently support converting a detectron2 model to Caffe2 format through ONNX. +The converted Caffe2 model is able to run without detectron2 dependency in either Python or C++. +It has a runtime optimized for CPU & mobile inference, but not for GPU inference. + +Caffe2 conversion requires PyTorch โ‰ฅ 1.4 and ONNX โ‰ฅ 1.6. + +### Coverage + +It supports 3 most common meta architectures: `GeneralizedRCNN`, `RetinaNet`, `PanopticFPN`, +and most official models under these 3 meta architectures. + +Users' custom extensions under these architectures (added through registration) are supported +as long as they do not contain control flow or operators not available in Caffe2 (e.g. deformable convolution). +For example, custom backbones and heads are often supported out of the box. + +### Usage + +The conversion APIs are documented at [the API documentation](../modules/export). +We provide a tool, `caffe2_converter.py` as an example that uses +these APIs to convert a standard model. + +To convert an official Mask R-CNN trained on COCO, first +[prepare the COCO dataset](../../datasets/), then pick the model from [Model Zoo](../../MODEL_ZOO.md), and run: +``` +cd tools/deploy/ && ./caffe2_converter.py --config-file ../../configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml \ + --output ./caffe2_model --run-eval \ + MODEL.WEIGHTS detectron2://COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x/137849600/model_final_f10217.pkl \ + MODEL.DEVICE cpu +``` + +Note that: +1. The conversion needs valid sample inputs & weights to trace the model. That's why the script requires the dataset. + You can modify the script to obtain sample inputs in other ways. +2. With the `--run-eval` flag, it will evaluate the converted models to verify its accuracy. + The accuracy is typically slightly different (within 0.1 AP) from PyTorch due to + numerical precisions between different implementations. + It's recommended to always verify the accuracy in case your custom model is not supported by the + conversion. + +The converted model is available at the specified `caffe2_model/` directory. Two files `model.pb` +and `model_init.pb` that contain network structure and network parameters are necessary for deployment. +These files can then be loaded in C++ or Python using Caffe2's APIs. + +The script generates `model.svg` file which contains a visualization of the network. +You can also load `model.pb` to tools such as [netron](https://github.com/lutzroeder/netron) to visualize it. + +### Use the model in C++/Python + +The model can be loaded in C++. An example [caffe2_mask_rcnn.cpp](../../tools/deploy/) is given, +which performs CPU/GPU inference using `COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x`. + +The C++ example needs to be built with: +* PyTorch with caffe2 inside +* gflags, glog, opencv +* protobuf headers that match the version of your caffe2 +* MKL headers if caffe2 is built with MKL + +The following can compile the example inside [official detectron2 docker](../../docker/): +``` +sudo apt update && sudo apt install libgflags-dev libgoogle-glog-dev libopencv-dev +pip install mkl-include +wget https://github.com/protocolbuffers/protobuf/releases/download/v3.6.1/protobuf-cpp-3.6.1.tar.gz +tar xf protobuf-cpp-3.6.1.tar.gz +export CPATH=$(readlink -f ./protobuf-3.6.1/src/):$HOME/.local/include +export CMAKE_PREFIX_PATH=$HOME/.local/lib/python3.6/site-packages/torch/ +mkdir build && cd build +cmake -DTORCH_CUDA_ARCH_LIST=$TORCH_CUDA_ARCH_LIST .. && make + +# To run: +./caffe2_mask_rcnn --predict_net=./model.pb --init_net=./model_init.pb --input=input.jpg +``` + +Note that: + +* All converted models (the .pb files) take two input tensors: + "data" is an NCHW image, and "im_info" is an Nx3 tensor consisting of (height, width, 1.0) for + each image (the shape of "data" might be larger than that in "im_info" due to padding). + +* The converted models do not contain post-processing operations that + transform raw layer outputs into formatted predictions. + The example only produces raw outputs (28x28 masks) from the final + layers that are not post-processed, because in actual deployment, an application often needs + its custom lightweight post-processing (e.g. full-image masks for every detected object is often not necessary). + +We also provide a python wrapper around the converted model, in the +[Caffe2Model.\_\_call\_\_](../modules/export.html#detectron2.export.Caffe2Model.__call__) method. +This method has an interface that's identical to the [pytorch versions of models](./models.md), +and it internally applies pre/post-processing code to match the formats. +They can serve as a reference for pre/post-processing in actual deployment. diff --git a/preprocess/mhp_extension/detectron2/docs/tutorials/evaluation.md b/preprocess/mhp_extension/detectron2/docs/tutorials/evaluation.md new file mode 100644 index 0000000000000000000000000000000000000000..c71adb7eb2e554e5ea848f1feb44bbee01a13f8e --- /dev/null +++ b/preprocess/mhp_extension/detectron2/docs/tutorials/evaluation.md @@ -0,0 +1,43 @@ + +# Evaluation + +Evaluation is a process that takes a number of inputs/outputs pairs and aggregate them. +You can always [use the model](./models.md) directly and just parse its inputs/outputs manually to perform +evaluation. +Alternatively, evaluation is implemented in detectron2 using the [DatasetEvaluator](../modules/evaluation.html#detectron2.evaluation.DatasetEvaluator) +interface. + +Detectron2 includes a few `DatasetEvaluator` that computes metrics using standard dataset-specific +APIs (e.g., COCO, LVIS). +You can also implement your own `DatasetEvaluator` that performs some other jobs +using the inputs/outputs pairs. +For example, to count how many instances are detected on the validation set: + +``` +class Counter(DatasetEvaluator): + def reset(self): + self.count = 0 + def process(self, inputs, outputs): + for output in outputs: + self.count += len(output["instances"]) + def evaluate(self): + # save self.count somewhere, or print it, or return it. + return {"count": self.count} +``` + +Once you have some `DatasetEvaluator`, you can run it with +[inference_on_dataset](../modules/evaluation.html#detectron2.evaluation.inference_on_dataset). +For example, + +```python +val_results = inference_on_dataset( + model, + val_data_loader, + DatasetEvaluators([COCOEvaluator(...), Counter()])) +``` +Compared to running the evaluation manually using the model, the benefit of this function is that +you can merge evaluators together using [DatasetEvaluators](../modules/evaluation.html#detectron2.evaluation.DatasetEvaluators). +In this way you can run all evaluations without having to go through the dataset multiple times. + +The `inference_on_dataset` function also provides accurate speed benchmarks for the +given model and dataset. diff --git a/preprocess/mhp_extension/detectron2/docs/tutorials/extend.md b/preprocess/mhp_extension/detectron2/docs/tutorials/extend.md new file mode 100644 index 0000000000000000000000000000000000000000..4232185757139e45078bf58c4f0fffb5fa0e4c04 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/docs/tutorials/extend.md @@ -0,0 +1,53 @@ +# Extend Detectron2's Defaults + +__Research is about doing things in new ways__. +This brings a tension in how to create abstractions in code, +which is a challenge for any research engineering project of a significant size: + +1. On one hand, it needs to have very thin abstractions to allow for the possibility of doing + everything in new ways. It should be reasonably easy to break existing + abstractions and replace them with new ones. + +2. On the other hand, such a project also needs reasonably high-level + abstractions, so that users can easily do things in standard ways, + without worrying too much about the details that only certain researchers care about. + +In detectron2, there are two types of interfaces that address this tension together: + +1. Functions and classes that take a config (`cfg`) argument + (sometimes with only a few extra arguments). + + Such functions and classes implement + the "standard default" behavior: it will read what it needs from the + config and do the "standard" thing. + Users only need to load a given config and pass it around, without having to worry about + which arguments are used and what they all mean. + +2. Functions and classes that have well-defined explicit arguments. + + Each of these is a small building block of the entire system. + They require users' expertise to understand what each argument should be, + and require more effort to stitch together to a larger system. + But they can be stitched together in more flexible ways. + + When you need to implement something not supported by the "standard defaults" + included in detectron2, these well-defined components can be reused. + +3. (experimental) A few classes are implemented with the + [@configurable](../../modules/config.html#detectron2.config.configurable) + decorator - they can be called with either a config, or with explicit arguments. + Their explicit argument interfaces are currently __experimental__ and subject to change. + + +If you only need the standard behavior, the [Beginner's Tutorial](./getting_started.md) +should suffice. If you need to extend detectron2 to your own needs, +see the following tutorials for more details: + +* Detectron2 includes a few standard datasets. To use custom ones, see + [Use Custom Datasets](./datasets.md). +* Detectron2 contains the standard logic that creates a data loader for training/testing from a + dataset, but you can write your own as well. See [Use Custom Data Loaders](./data_loading.md). +* Detectron2 implements many standard detection models, and provide ways for you + to overwrite their behaviors. See [Use Models](./models.md) and [Write Models](./write-models.md). +* Detectron2 provides a default training loop that is good for common training tasks. + You can customize it with hooks, or write your own loop instead. See [training](./training.md). diff --git a/preprocess/mhp_extension/detectron2/docs/tutorials/getting_started.md b/preprocess/mhp_extension/detectron2/docs/tutorials/getting_started.md new file mode 100644 index 0000000000000000000000000000000000000000..acaf13f02c906b45ffc2f49ee5a0ce01d82b4786 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/docs/tutorials/getting_started.md @@ -0,0 +1,79 @@ +## Getting Started with Detectron2 + +This document provides a brief intro of the usage of builtin command-line tools in detectron2. + +For a tutorial that involves actual coding with the API, +see our [Colab Notebook](https://colab.research.google.com/drive/16jcaJoc6bCFAQ96jDe2HwtXj7BMD_-m5) +which covers how to run inference with an +existing model, and how to train a builtin model on a custom dataset. + +For more advanced tutorials, refer to our [documentation](https://detectron2.readthedocs.io/tutorials/extend.html). + + +### Inference Demo with Pre-trained Models + +1. Pick a model and its config file from + [model zoo](MODEL_ZOO.md), + for example, `mask_rcnn_R_50_FPN_3x.yaml`. +2. We provide `demo.py` that is able to run builtin standard models. Run it with: +``` +cd demo/ +python demo.py --config-file ../configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml \ + --input input1.jpg input2.jpg \ + [--other-options] + --opts MODEL.WEIGHTS detectron2://COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x/137849600/model_final_f10217.pkl +``` +The configs are made for training, therefore we need to specify `MODEL.WEIGHTS` to a model from model zoo for evaluation. +This command will run the inference and show visualizations in an OpenCV window. + +For details of the command line arguments, see `demo.py -h` or look at its source code +to understand its behavior. Some common arguments are: +* To run __on your webcam__, replace `--input files` with `--webcam`. +* To run __on a video__, replace `--input files` with `--video-input video.mp4`. +* To run __on cpu__, add `MODEL.DEVICE cpu` after `--opts`. +* To save outputs to a directory (for images) or a file (for webcam or video), use `--output`. + + +### Training & Evaluation in Command Line + +We provide a script in "tools/{,plain_}train_net.py", that is made to train +all the configs provided in detectron2. +You may want to use it as a reference to write your own training script. + +To train a model with "train_net.py", first +setup the corresponding datasets following +[datasets/README.md](./datasets/README.md), +then run: +``` +cd tools/ +./train_net.py --num-gpus 8 \ + --config-file ../configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml +``` + +The configs are made for 8-GPU training. +To train on 1 GPU, you may need to [change some parameters](https://arxiv.org/abs/1706.02677), e.g.: +``` +./train_net.py \ + --config-file ../configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml \ + --num-gpus 1 SOLVER.IMS_PER_BATCH 2 SOLVER.BASE_LR 0.0025 +``` + +For most models, CPU training is not supported. + +To evaluate a model's performance, use +``` +./train_net.py \ + --config-file ../configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml \ + --eval-only MODEL.WEIGHTS /path/to/checkpoint_file +``` +For more options, see `./train_net.py -h`. + +### Use Detectron2 APIs in Your Code + +See our [Colab Notebook](https://colab.research.google.com/drive/16jcaJoc6bCFAQ96jDe2HwtXj7BMD_-m5) +to learn how to use detectron2 APIs to: +1. run inference with an existing model +2. train a builtin model on a custom dataset + +See [detectron2/projects](https://github.com/facebookresearch/detectron2/tree/master/projects) +for more ways to build your project on detectron2. diff --git a/preprocess/mhp_extension/detectron2/docs/tutorials/index.rst b/preprocess/mhp_extension/detectron2/docs/tutorials/index.rst new file mode 100644 index 0000000000000000000000000000000000000000..896e71e64139a35a566bbdd76e4b57006af35e2d --- /dev/null +++ b/preprocess/mhp_extension/detectron2/docs/tutorials/index.rst @@ -0,0 +1,18 @@ +Tutorials +====================================== + +.. toctree:: + :maxdepth: 2 + + install + getting_started + builtin_datasets + extend + datasets + data_loading + models + write-models + training + evaluation + configs + deployment diff --git a/preprocess/mhp_extension/detectron2/docs/tutorials/install.md b/preprocess/mhp_extension/detectron2/docs/tutorials/install.md new file mode 100644 index 0000000000000000000000000000000000000000..3985f8ae4f5ecde26b310b4ab01c49b922f742e9 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/docs/tutorials/install.md @@ -0,0 +1,184 @@ +## Installation + +Our [Colab Notebook](https://colab.research.google.com/drive/16jcaJoc6bCFAQ96jDe2HwtXj7BMD_-m5) +has step-by-step instructions that install detectron2. +The [Dockerfile](docker) +also installs detectron2 with a few simple commands. + +### Requirements +- Linux or macOS with Python โ‰ฅ 3.6 +- PyTorch โ‰ฅ 1.4 +- [torchvision](https://github.com/pytorch/vision/) that matches the PyTorch installation. + You can install them together at [pytorch.org](https://pytorch.org) to make sure of this. +- OpenCV, optional, needed by demo and visualization +- pycocotools: `pip install cython; pip install -U 'git+https://github.com/cocodataset/cocoapi.git#subdirectory=PythonAPI'` + + +### Build Detectron2 from Source + +gcc & g++ โ‰ฅ 5 are required. [ninja](https://ninja-build.org/) is recommended for faster build. +After having them, run: +``` +python -m pip install 'git+https://github.com/facebookresearch/detectron2.git' +# (add --user if you don't have permission) + +# Or, to install it from a local clone: +git clone https://github.com/facebookresearch/detectron2.git +python -m pip install -e detectron2 + +# Or if you are on macOS +# CC=clang CXX=clang++ python -m pip install -e . +``` + +To __rebuild__ detectron2 that's built from a local clone, use `rm -rf build/ **/*.so` to clean the +old build first. You often need to rebuild detectron2 after reinstalling PyTorch. + +### Install Pre-Built Detectron2 (Linux only) +``` +# for CUDA 10.1: +python -m pip install detectron2 -f https://dl.fbaipublicfiles.com/detectron2/wheels/cu101/index.html +``` +You can replace cu101 with "cu{100,92}" or "cpu". + +Note that: +1. Such installation has to be used with certain version of official PyTorch release. + See [releases](https://github.com/facebookresearch/detectron2/releases) for requirements. + It will not work with a different version of PyTorch or a non-official build of PyTorch. +2. Such installation is out-of-date w.r.t. master branch of detectron2. It may not be + compatible with the master branch of a research project that uses detectron2 (e.g. those in + [projects](projects) or [meshrcnn](https://github.com/facebookresearch/meshrcnn/)). + +### Common Installation Issues + +If you met issues using the pre-built detectron2, please uninstall it and try building it from source. + +Click each issue for its solutions: + +
+ +Undefined torch/aten/caffe2 symbols, or segmentation fault immediately when running the library. + +
+ +This usually happens when detectron2 or torchvision is not +compiled with the version of PyTorch you're running. + +Pre-built torchvision or detectron2 has to work with the corresponding official release of pytorch. +If the error comes from a pre-built torchvision, uninstall torchvision and pytorch and reinstall them +following [pytorch.org](http://pytorch.org). So the versions will match. + +If the error comes from a pre-built detectron2, check [release notes](https://github.com/facebookresearch/detectron2/releases) +to see the corresponding pytorch version required for each pre-built detectron2. + +If the error comes from detectron2 or torchvision that you built manually from source, +remove files you built (`build/`, `**/*.so`) and rebuild it so it can pick up the version of pytorch currently in your environment. + +If you cannot resolve this problem, please include the output of `gdb -ex "r" -ex "bt" -ex "quit" --args python -m detectron2.utils.collect_env` +in your issue. +
+ +
+ +Undefined C++ symbols (e.g. `GLIBCXX`) or C++ symbols not found. + +
+Usually it's because the library is compiled with a newer C++ compiler but run with an old C++ runtime. + +This often happens with old anaconda. +Try `conda update libgcc`. Then rebuild detectron2. + +The fundamental solution is to run the code with proper C++ runtime. +One way is to use `LD_PRELOAD=/path/to/libstdc++.so`. + +
+ +
+ +"Not compiled with GPU support" or "Detectron2 CUDA Compiler: not available". + +
+CUDA is not found when building detectron2. +You should make sure + +``` +python -c 'import torch; from torch.utils.cpp_extension import CUDA_HOME; print(torch.cuda.is_available(), CUDA_HOME)' +``` + +print valid outputs at the time you build detectron2. + +Most models can run inference (but not training) without GPU support. To use CPUs, set `MODEL.DEVICE='cpu'` in the config. +
+ +
+ +"invalid device function" or "no kernel image is available for execution". + +
+Two possibilities: + +* You build detectron2 with one version of CUDA but run it with a different version. + + To check whether it is the case, + use `python -m detectron2.utils.collect_env` to find out inconsistent CUDA versions. + In the output of this command, you should expect "Detectron2 CUDA Compiler", "CUDA_HOME", "PyTorch built with - CUDA" + to contain cuda libraries of the same version. + + When they are inconsistent, + you need to either install a different build of PyTorch (or build by yourself) + to match your local CUDA installation, or install a different version of CUDA to match PyTorch. + +* Detectron2 or PyTorch/torchvision is not built for the correct GPU architecture (compute compatibility). + + The GPU architecture for PyTorch/detectron2/torchvision is available in the "architecture flags" in + `python -m detectron2.utils.collect_env`. + + The GPU architecture flags of detectron2/torchvision by default matches the GPU model detected + during compilation. This means the compiled code may not work on a different GPU model. + To overwrite the GPU architecture for detectron2/torchvision, use `TORCH_CUDA_ARCH_LIST` environment variable during compilation. + + For example, `export TORCH_CUDA_ARCH_LIST=6.0,7.0` makes it compile for both P100s and V100s. + Visit [developer.nvidia.com/cuda-gpus](https://developer.nvidia.com/cuda-gpus) to find out + the correct compute compatibility number for your device. + +
+ +
+ +Undefined CUDA symbols; cannot open libcudart.so; other nvcc failures. + +
+The version of NVCC you use to build detectron2 or torchvision does +not match the version of CUDA you are running with. +This often happens when using anaconda's CUDA runtime. + +Use `python -m detectron2.utils.collect_env` to find out inconsistent CUDA versions. +In the output of this command, you should expect "Detectron2 CUDA Compiler", "CUDA_HOME", "PyTorch built with - CUDA" +to contain cuda libraries of the same version. + +When they are inconsistent, +you need to either install a different build of PyTorch (or build by yourself) +to match your local CUDA installation, or install a different version of CUDA to match PyTorch. +
+ + +
+ +"ImportError: cannot import name '_C'". + +
+Please build and install detectron2 following the instructions above. + +If you are running code from detectron2's root directory, `cd` to a different one. +Otherwise you may not import the code that you installed. +
+ +
+ +ONNX conversion segfault after some "TraceWarning". + +
+The ONNX package is compiled with too old compiler. + +Please build and install ONNX from its source code using a compiler +whose version is closer to what's used by PyTorch (available in `torch.__config__.show()`). +
diff --git a/preprocess/mhp_extension/detectron2/docs/tutorials/models.md b/preprocess/mhp_extension/detectron2/docs/tutorials/models.md new file mode 100644 index 0000000000000000000000000000000000000000..456f36d1c03f657ba0b63eb6f26506c4b1b0d60f --- /dev/null +++ b/preprocess/mhp_extension/detectron2/docs/tutorials/models.md @@ -0,0 +1,151 @@ +# Use Models + +Models (and their sub-models) in detectron2 are built by +functions such as `build_model`, `build_backbone`, `build_roi_heads`: +```python +from detectron2.modeling import build_model +model = build_model(cfg) # returns a torch.nn.Module +``` + +`build_model` only builds the model structure, and fill it with random parameters. +See below for how to load an existing checkpoint to the model, +and how to use the `model` object. + +### Load/Save a Checkpoint +```python +from detectron2.checkpoint import DetectionCheckpointer +DetectionCheckpointer(model).load(file_path) # load a file to model + +checkpointer = DetectionCheckpointer(model, save_dir="output") +checkpointer.save("model_999") # save to output/model_999.pth +``` + +Detectron2's checkpointer recognizes models in pytorch's `.pth` format, as well as the `.pkl` files +in our model zoo. +See [API doc](../modules/checkpoint.html#detectron2.checkpoint.DetectionCheckpointer) +for more details about its usage. + +The model files can be arbitrarily manipulated using `torch.{load,save}` for `.pth` files or +`pickle.{dump,load}` for `.pkl` files. + +### Use a Model + +A model can be called by `outputs = model(inputs)`, where `inputs` is a `list[dict]`. +Each dict corresponds to one image and the required keys +depend on the type of model, and whether the model is in training or evaluation mode. +For example, in order to do inference, +all existing models expect the "image" key, and optionally "height" and "width". +The detailed format of inputs and outputs of existing models are explained below. + +When in training mode, all models are required to be used under an `EventStorage`. +The training statistics will be put into the storage: +```python +from detectron2.utils.events import EventStorage +with EventStorage() as storage: + losses = model(inputs) +``` + +If you only want to do simple inference using an existing model, +[DefaultPredictor](../modules/engine.html#detectron2.engine.defaults.DefaultPredictor) +is a wrapper around model that provides such basic functionality. +It includes default behavior including model loading, preprocessing, +and operates on single image rather than batches. + +### Model Input Format + +Users can implement custom models that support any arbitrary input format. +Here we describe the standard input format that all builtin models support in detectron2. +They all take a `list[dict]` as the inputs. Each dict +corresponds to information about one image. + +The dict may contain the following keys: + +* "image": `Tensor` in (C, H, W) format. The meaning of channels are defined by `cfg.INPUT.FORMAT`. + Image normalization, if any, will be performed inside the model using + `cfg.MODEL.PIXEL_{MEAN,STD}`. +* "instances": an [Instances](../modules/structures.html#detectron2.structures.Instances) + object, with the following fields: + + "gt_boxes": a [Boxes](../modules/structures.html#detectron2.structures.Boxes) object storing N boxes, one for each instance. + + "gt_classes": `Tensor` of long type, a vector of N labels, in range [0, num_categories). + + "gt_masks": a [PolygonMasks](../modules/structures.html#detectron2.structures.PolygonMasks) + or [BitMasks](../modules/structures.html#detectron2.structures.BitMasks) object storing N masks, one for each instance. + + "gt_keypoints": a [Keypoints](../modules/structures.html#detectron2.structures.Keypoints) + object storing N keypoint sets, one for each instance. +* "proposals": an [Instances](../modules/structures.html#detectron2.structures.Instances) + object used only in Fast R-CNN style models, with the following fields: + + "proposal_boxes": a [Boxes](../modules/structures.html#detectron2.structures.Boxes) object storing P proposal boxes. + + "objectness_logits": `Tensor`, a vector of P scores, one for each proposal. +* "height", "width": the **desired** output height and width, which is not necessarily the same + as the height or width of the `image` input field. + For example, the `image` input field might be a resized image, + but you may want the outputs to be in **original** resolution. + + If provided, the model will produce output in this resolution, + rather than in the resolution of the `image` as input into the model. This is more efficient and accurate. +* "sem_seg": `Tensor[int]` in (H, W) format. The semantic segmentation ground truth. + Values represent category labels starting from 0. + + +#### How it connects to data loader: + +The output of the default [DatasetMapper]( ../modules/data.html#detectron2.data.DatasetMapper) is a dict +that follows the above format. +After the data loader performs batching, it becomes `list[dict]` which the builtin models support. + + +### Model Output Format + +When in training mode, the builtin models output a `dict[str->ScalarTensor]` with all the losses. + +When in inference mode, the builtin models output a `list[dict]`, one dict for each image. +Based on the tasks the model is doing, each dict may contain the following fields: + +* "instances": [Instances](../modules/structures.html#detectron2.structures.Instances) + object with the following fields: + * "pred_boxes": [Boxes](../modules/structures.html#detectron2.structures.Boxes) object storing N boxes, one for each detected instance. + * "scores": `Tensor`, a vector of N scores. + * "pred_classes": `Tensor`, a vector of N labels in range [0, num_categories). + + "pred_masks": a `Tensor` of shape (N, H, W), masks for each detected instance. + + "pred_keypoints": a `Tensor` of shape (N, num_keypoint, 3). + Each row in the last dimension is (x, y, score). Scores are larger than 0. +* "sem_seg": `Tensor` of (num_categories, H, W), the semantic segmentation prediction. +* "proposals": [Instances](../modules/structures.html#detectron2.structures.Instances) + object with the following fields: + * "proposal_boxes": [Boxes](../modules/structures.html#detectron2.structures.Boxes) + object storing N boxes. + * "objectness_logits": a torch vector of N scores. +* "panoptic_seg": A tuple of `(Tensor, list[dict])`. The tensor has shape (H, W), where each element + represent the segment id of the pixel. Each dict describes one segment id and has the following fields: + * "id": the segment id + * "isthing": whether the segment is a thing or stuff + * "category_id": the category id of this segment. It represents the thing + class id when `isthing==True`, and the stuff class id otherwise. + + +### Partially execute a model: + +Sometimes you may want to obtain an intermediate tensor inside a model. +Since there are typically hundreds of intermediate tensors, there isn't an API that provides you +the intermediate result you need. +You have the following options: + +1. Write a (sub)model. Following the [tutorial](./write-models.md), you can + rewrite a model component (e.g. a head of a model), such that it + does the same thing as the existing component, but returns the output + you need. +2. Partially execute a model. You can create the model as usual, + but use custom code to execute it instead of its `forward()`. For example, + the following code obtains mask features before mask head. + +```python +images = ImageList.from_tensors(...) # preprocessed input tensor +model = build_model(cfg) +features = model.backbone(images.tensor) +proposals, _ = model.proposal_generator(images, features) +instances = model.roi_heads._forward_box(features, proposals) +mask_features = [features[f] for f in model.roi_heads.in_features] +mask_features = model.roi_heads.mask_pooler(mask_features, [x.pred_boxes for x in instances]) +``` + +Note that both options require you to read the existing forward code to understand +how to write code to obtain the outputs you need. diff --git a/preprocess/mhp_extension/detectron2/docs/tutorials/training.md b/preprocess/mhp_extension/detectron2/docs/tutorials/training.md new file mode 100644 index 0000000000000000000000000000000000000000..dc7d537254c398252e3b91c25e33489aa91709c4 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/docs/tutorials/training.md @@ -0,0 +1,50 @@ +# Training + +From the previous tutorials, you may now have a custom model and data loader. + +You are free to create your own optimizer, and write the training logic: it's +usually easy with PyTorch, and allow researchers to see the entire training +logic more clearly and have full control. +One such example is provided in [tools/plain_train_net.py](../../tools/plain_train_net.py). + +We also provide a standarized "trainer" abstraction with a +[minimal hook system](../modules/engine.html#detectron2.engine.HookBase) +that helps simplify the standard types of training. + +You can use +[SimpleTrainer().train()](../modules/engine.html#detectron2.engine.SimpleTrainer) +which provides minimal abstraction for single-cost single-optimizer single-data-source training. +The builtin `train_net.py` script uses +[DefaultTrainer().train()](../modules/engine.html#detectron2.engine.defaults.DefaultTrainer), +which includes more standard default behavior that one might want to opt in, +including default configurations for learning rate schedule, +logging, evaluation, checkpointing etc. +This also means that it's less likely to support some non-standard behavior +you might want during research. + +To customize the training loops, you can: + +1. If your customization is similar to what `DefaultTrainer` is already doing, +you can change behavior of `DefaultTrainer` by overwriting [its methods](../modules/engine.html#detectron2.engine.defaults.DefaultTrainer) +in a subclass, like what [tools/train_net.py](../../tools/train_net.py) does. +2. If you need something very novel, you can start from [tools/plain_train_net.py](../../tools/plain_train_net.py) to implement them yourself. + +### Logging of Metrics + +During training, metrics are saved to a centralized [EventStorage](../modules/utils.html#detectron2.utils.events.EventStorage). +You can use the following code to access it and log metrics to it: +``` +from detectron2.utils.events import get_event_storage + +# inside the model: +if self.training: + value = # compute the value from inputs + storage = get_event_storage() + storage.put_scalar("some_accuracy", value) +``` + +Refer to its documentation for more details. + +Metrics are then saved to various destinations with [EventWriter](../modules/utils.html#module-detectron2.utils.events). +DefaultTrainer enables a few `EventWriter` with default configurations. +See above for how to customize them. diff --git a/preprocess/mhp_extension/detectron2/docs/tutorials/write-models.md b/preprocess/mhp_extension/detectron2/docs/tutorials/write-models.md new file mode 100644 index 0000000000000000000000000000000000000000..bb87d586d609ca94240f32f2eaab7eadb0d07b93 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/docs/tutorials/write-models.md @@ -0,0 +1,39 @@ +# Write Models + +If you are trying to do something completely new, you may wish to implement +a model entirely from scratch within detectron2. However, in many situations you may +be interested in modifying or extending some components of an existing model. +Therefore, we also provide a registration mechanism that lets you override the +behavior of certain internal components of standard models. + +For example, to add a new backbone, import this code in your code: +```python +from detectron2.modeling import BACKBONE_REGISTRY, Backbone, ShapeSpec + +@BACKBONE_REGISTRY.register() +class ToyBackBone(Backbone): + def __init__(self, cfg, input_shape): + # create your own backbone + self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=16, padding=3) + + def forward(self, image): + return {"conv1": self.conv1(image)} + + def output_shape(self): + return {"conv1": ShapeSpec(channels=64, stride=16)} +``` +Then, you can use `cfg.MODEL.BACKBONE.NAME = 'ToyBackBone'` in your config object. +`build_model(cfg)` will then call your `ToyBackBone` instead. + +As another example, to add new abilities to the ROI heads in the Generalized R-CNN meta-architecture, +you can implement a new +[ROIHeads](../modules/modeling.html#detectron2.modeling.ROIHeads) subclass and put it in the `ROI_HEADS_REGISTRY`. +See [densepose in detectron2](../../projects/DensePose) +and [meshrcnn](https://github.com/facebookresearch/meshrcnn) +for examples that implement new ROIHeads to perform new tasks. +And [projects/](../../projects/) +contains more examples that implement different architectures. + +A complete list of registries can be found in [API documentation](../modules/modeling.html#model-registries). +You can register components in these registries to customize different parts of a model, or the +entire model. diff --git a/preprocess/mhp_extension/detectron2/projects/DensePose/README.md b/preprocess/mhp_extension/detectron2/projects/DensePose/README.md new file mode 100644 index 0000000000000000000000000000000000000000..fd2f1ee3382365ab53ae44471c90266dff42d883 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/DensePose/README.md @@ -0,0 +1,54 @@ +# DensePose in Detectron2 +**Dense Human Pose Estimation In The Wild** + +_Rฤฑza Alp Gรผler, Natalia Neverova, Iasonas Kokkinos_ + +[[`densepose.org`](https://densepose.org)] [[`arXiv`](https://arxiv.org/abs/1802.00434)] [[`BibTeX`](#CitingDensePose)] + +Dense human pose estimation aims at mapping all human pixels of an RGB image to the 3D surface of the human body. + +
+ +
+ +In this repository, we provide the code to train and evaluate DensePose-RCNN. We also provide tools to visualize +DensePose annotation and results. + +# Quick Start + +See [ Getting Started ](doc/GETTING_STARTED.md) + +# Model Zoo and Baselines + +We provide a number of baseline results and trained models available for download. See [Model Zoo](doc/MODEL_ZOO.md) for details. + +# License + +Detectron2 is released under the [Apache 2.0 license](../../LICENSE) + +## Citing DensePose + +If you use DensePose, please take the references from the following BibTeX entries: + +For DensePose with estimated confidences: + +``` +@InProceedings{Neverova2019DensePoseConfidences, + title = {Correlated Uncertainty for Learning Dense Correspondences from Noisy Labels}, + author = {Neverova, Natalia and Novotny, David and Vedaldi, Andrea}, + journal = {Advances in Neural Information Processing Systems}, + year = {2019}, +} +``` + +For the original DensePose: + +``` +@InProceedings{Guler2018DensePose, + title={DensePose: Dense Human Pose Estimation In The Wild}, + author={R\{i}za Alp G\"uler, Natalia Neverova, Iasonas Kokkinos}, + journal={The IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, + year={2018} +} +``` + diff --git a/preprocess/mhp_extension/detectron2/projects/DensePose/apply_net.py b/preprocess/mhp_extension/detectron2/projects/DensePose/apply_net.py new file mode 100755 index 0000000000000000000000000000000000000000..7262f7c059b42225b809429654d34f29dbd2801f --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/DensePose/apply_net.py @@ -0,0 +1,318 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +import argparse +import glob +import logging +import os +import pickle +import sys +from typing import Any, ClassVar, Dict, List +import torch + +from detectron2.config import get_cfg +from detectron2.data.detection_utils import read_image +from detectron2.engine.defaults import DefaultPredictor +from detectron2.structures.boxes import BoxMode +from detectron2.structures.instances import Instances +from detectron2.utils.logger import setup_logger + +from densepose import add_densepose_config +from densepose.utils.logger import verbosity_to_level +from densepose.vis.base import CompoundVisualizer +from densepose.vis.bounding_box import ScoredBoundingBoxVisualizer +from densepose.vis.densepose import ( + DensePoseResultsContourVisualizer, + DensePoseResultsFineSegmentationVisualizer, + DensePoseResultsUVisualizer, + DensePoseResultsVVisualizer, +) +from densepose.vis.extractor import CompoundExtractor, create_extractor + +DOC = """Apply Net - a tool to print / visualize DensePose results +""" + +LOGGER_NAME = "apply_net" +logger = logging.getLogger(LOGGER_NAME) + +_ACTION_REGISTRY: Dict[str, "Action"] = {} + + +class Action(object): + @classmethod + def add_arguments(cls: type, parser: argparse.ArgumentParser): + parser.add_argument( + "-v", + "--verbosity", + action="count", + help="Verbose mode. Multiple -v options increase the verbosity.", + ) + + +def register_action(cls: type): + """ + Decorator for action classes to automate action registration + """ + global _ACTION_REGISTRY + _ACTION_REGISTRY[cls.COMMAND] = cls + return cls + + +class InferenceAction(Action): + @classmethod + def add_arguments(cls: type, parser: argparse.ArgumentParser): + super(InferenceAction, cls).add_arguments(parser) + parser.add_argument("cfg", metavar="", help="Config file") + parser.add_argument("model", metavar="", help="Model file") + parser.add_argument("input", metavar="", help="Input data") + parser.add_argument( + "--opts", + help="Modify config options using the command-line 'KEY VALUE' pairs", + default=[], + nargs=argparse.REMAINDER, + ) + + @classmethod + def execute(cls: type, args: argparse.Namespace): + logger.info(f"Loading config from {args.cfg}") + opts = [] + cfg = cls.setup_config(args.cfg, args.model, args, opts) + logger.info(f"Loading model from {args.model}") + predictor = DefaultPredictor(cfg) + logger.info(f"Loading data from {args.input}") + file_list = cls._get_input_file_list(args.input) + if len(file_list) == 0: + logger.warning(f"No input images for {args.input}") + return + context = cls.create_context(args) + for file_name in file_list: + img = read_image(file_name, format="BGR") # predictor expects BGR image. + with torch.no_grad(): + outputs = predictor(img)["instances"] + cls.execute_on_outputs(context, {"file_name": file_name, "image": img}, outputs) + cls.postexecute(context) + + @classmethod + def setup_config( + cls: type, config_fpath: str, model_fpath: str, args: argparse.Namespace, opts: List[str] + ): + cfg = get_cfg() + add_densepose_config(cfg) + cfg.merge_from_file(config_fpath) + cfg.merge_from_list(args.opts) + if opts: + cfg.merge_from_list(opts) + cfg.MODEL.WEIGHTS = model_fpath + cfg.freeze() + return cfg + + @classmethod + def _get_input_file_list(cls: type, input_spec: str): + if os.path.isdir(input_spec): + file_list = [ + os.path.join(input_spec, fname) + for fname in os.listdir(input_spec) + if os.path.isfile(os.path.join(input_spec, fname)) + ] + elif os.path.isfile(input_spec): + file_list = [input_spec] + else: + file_list = glob.glob(input_spec) + return file_list + + +@register_action +class DumpAction(InferenceAction): + """ + Dump action that outputs results to a pickle file + """ + + COMMAND: ClassVar[str] = "dump" + + @classmethod + def add_parser(cls: type, subparsers: argparse._SubParsersAction): + parser = subparsers.add_parser(cls.COMMAND, help="Dump model outputs to a file.") + cls.add_arguments(parser) + parser.set_defaults(func=cls.execute) + + @classmethod + def add_arguments(cls: type, parser: argparse.ArgumentParser): + super(DumpAction, cls).add_arguments(parser) + parser.add_argument( + "--output", + metavar="", + default="results.pkl", + help="File name to save dump to", + ) + + @classmethod + def execute_on_outputs( + cls: type, context: Dict[str, Any], entry: Dict[str, Any], outputs: Instances + ): + image_fpath = entry["file_name"] + logger.info(f"Processing {image_fpath}") + result = {"file_name": image_fpath} + if outputs.has("scores"): + result["scores"] = outputs.get("scores").cpu() + if outputs.has("pred_boxes"): + result["pred_boxes_XYXY"] = outputs.get("pred_boxes").tensor.cpu() + if outputs.has("pred_densepose"): + boxes_XYWH = BoxMode.convert( + result["pred_boxes_XYXY"], BoxMode.XYXY_ABS, BoxMode.XYWH_ABS + ) + result["pred_densepose"] = outputs.get("pred_densepose").to_result(boxes_XYWH) + context["results"].append(result) + + @classmethod + def create_context(cls: type, args: argparse.Namespace): + context = {"results": [], "out_fname": args.output} + return context + + @classmethod + def postexecute(cls: type, context: Dict[str, Any]): + out_fname = context["out_fname"] + out_dir = os.path.dirname(out_fname) + if len(out_dir) > 0 and not os.path.exists(out_dir): + os.makedirs(out_dir) + with open(out_fname, "wb") as hFile: + pickle.dump(context["results"], hFile) + logger.info(f"Output saved to {out_fname}") + + +@register_action +class ShowAction(InferenceAction): + """ + Show action that visualizes selected entries on an image + """ + + COMMAND: ClassVar[str] = "show" + VISUALIZERS: ClassVar[Dict[str, object]] = { + "dp_contour": DensePoseResultsContourVisualizer, + "dp_segm": DensePoseResultsFineSegmentationVisualizer, + "dp_u": DensePoseResultsUVisualizer, + "dp_v": DensePoseResultsVVisualizer, + "bbox": ScoredBoundingBoxVisualizer, + } + + @classmethod + def add_parser(cls: type, subparsers: argparse._SubParsersAction): + parser = subparsers.add_parser(cls.COMMAND, help="Visualize selected entries") + cls.add_arguments(parser) + parser.set_defaults(func=cls.execute) + + @classmethod + def add_arguments(cls: type, parser: argparse.ArgumentParser): + super(ShowAction, cls).add_arguments(parser) + parser.add_argument( + "visualizations", + metavar="", + help="Comma separated list of visualizations, possible values: " + "[{}]".format(",".join(sorted(cls.VISUALIZERS.keys()))), + ) + parser.add_argument( + "--min_score", + metavar="", + default=0.8, + type=float, + help="Minimum detection score to visualize", + ) + parser.add_argument( + "--nms_thresh", metavar="", default=None, type=float, help="NMS threshold" + ) + parser.add_argument( + "--output", + metavar="", + default="outputres.png", + help="File name to save output to", + ) + + @classmethod + def setup_config( + cls: type, config_fpath: str, model_fpath: str, args: argparse.Namespace, opts: List[str] + ): + opts.append("MODEL.ROI_HEADS.SCORE_THRESH_TEST") + opts.append(str(args.min_score)) + if args.nms_thresh is not None: + opts.append("MODEL.ROI_HEADS.NMS_THRESH_TEST") + opts.append(str(args.nms_thresh)) + cfg = super(ShowAction, cls).setup_config(config_fpath, model_fpath, args, opts) + return cfg + + @classmethod + def execute_on_outputs( + cls: type, context: Dict[str, Any], entry: Dict[str, Any], outputs: Instances + ): + import cv2 + import numpy as np + + visualizer = context["visualizer"] + extractor = context["extractor"] + image_fpath = entry["file_name"] + logger.info(f"Processing {image_fpath}") + image = cv2.cvtColor(entry["image"], cv2.COLOR_BGR2GRAY) + image = np.tile(image[:, :, np.newaxis], [1, 1, 3]) + data = extractor(outputs) + image_vis = visualizer.visualize(image, data) + entry_idx = context["entry_idx"] + 1 + out_fname = cls._get_out_fname(entry_idx, context["out_fname"]) + out_dir = os.path.dirname(out_fname) + if len(out_dir) > 0 and not os.path.exists(out_dir): + os.makedirs(out_dir) + cv2.imwrite(out_fname, image_vis) + logger.info(f"Output saved to {out_fname}") + context["entry_idx"] += 1 + + @classmethod + def postexecute(cls: type, context: Dict[str, Any]): + pass + + @classmethod + def _get_out_fname(cls: type, entry_idx: int, fname_base: str): + base, ext = os.path.splitext(fname_base) + return base + ".{0:04d}".format(entry_idx) + ext + + @classmethod + def create_context(cls: type, args: argparse.Namespace) -> Dict[str, Any]: + vis_specs = args.visualizations.split(",") + visualizers = [] + extractors = [] + for vis_spec in vis_specs: + vis = cls.VISUALIZERS[vis_spec]() + visualizers.append(vis) + extractor = create_extractor(vis) + extractors.append(extractor) + visualizer = CompoundVisualizer(visualizers) + extractor = CompoundExtractor(extractors) + context = { + "extractor": extractor, + "visualizer": visualizer, + "out_fname": args.output, + "entry_idx": 0, + } + return context + + +def create_argument_parser() -> argparse.ArgumentParser: + parser = argparse.ArgumentParser( + description=DOC, + formatter_class=lambda prog: argparse.HelpFormatter(prog, max_help_position=120), + ) + parser.set_defaults(func=lambda _: parser.print_help(sys.stdout)) + subparsers = parser.add_subparsers(title="Actions") + for _, action in _ACTION_REGISTRY.items(): + action.add_parser(subparsers) + return parser + + +def main(): + parser = create_argument_parser() + args = parser.parse_args() + verbosity = args.verbosity if hasattr(args, "verbosity") else None + global logger + logger = setup_logger(name=LOGGER_NAME) + logger.setLevel(verbosity_to_level(verbosity)) + args.func(args) + + +if __name__ == "__main__": + main() diff --git a/preprocess/mhp_extension/detectron2/projects/DensePose/configs/Base-DensePose-RCNN-FPN.yaml b/preprocess/mhp_extension/detectron2/projects/DensePose/configs/Base-DensePose-RCNN-FPN.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3ed1bcd68744a22472cc8b391993e4175013dc42 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/DensePose/configs/Base-DensePose-RCNN-FPN.yaml @@ -0,0 +1,47 @@ +MODEL: + META_ARCHITECTURE: "GeneralizedRCNN" + BACKBONE: + NAME: "build_resnet_fpn_backbone" + RESNETS: + OUT_FEATURES: ["res2", "res3", "res4", "res5"] + FPN: + IN_FEATURES: ["res2", "res3", "res4", "res5"] + ANCHOR_GENERATOR: + SIZES: [[32], [64], [128], [256], [512]] # One size for each in feature map + ASPECT_RATIOS: [[0.5, 1.0, 2.0]] # Three aspect ratios (same for all in feature maps) + RPN: + IN_FEATURES: ["p2", "p3", "p4", "p5", "p6"] + PRE_NMS_TOPK_TRAIN: 2000 # Per FPN level + PRE_NMS_TOPK_TEST: 1000 # Per FPN level + # Detectron1 uses 2000 proposals per-batch, + # (See "modeling/rpn/rpn_outputs.py" for details of this legacy issue) + # which is approximately 1000 proposals per-image since the default batch size for FPN is 2. + POST_NMS_TOPK_TRAIN: 1000 + POST_NMS_TOPK_TEST: 1000 + + DENSEPOSE_ON: True + ROI_HEADS: + NAME: "DensePoseROIHeads" + IN_FEATURES: ["p2", "p3", "p4", "p5"] + NUM_CLASSES: 1 + ROI_BOX_HEAD: + NAME: "FastRCNNConvFCHead" + NUM_FC: 2 + POOLER_RESOLUTION: 7 + POOLER_SAMPLING_RATIO: 2 + POOLER_TYPE: "ROIAlign" + ROI_DENSEPOSE_HEAD: + NAME: "DensePoseV1ConvXHead" + POOLER_TYPE: "ROIAlign" + NUM_COARSE_SEGM_CHANNELS: 2 +DATASETS: + TRAIN: ("densepose_coco_2014_train", "densepose_coco_2014_valminusminival") + TEST: ("densepose_coco_2014_minival",) +SOLVER: + IMS_PER_BATCH: 16 + BASE_LR: 0.01 + STEPS: (60000, 80000) + MAX_ITER: 90000 + WARMUP_FACTOR: 0.1 +INPUT: + MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) diff --git a/preprocess/mhp_extension/detectron2/projects/DensePose/configs/densepose_rcnn_R_101_FPN_DL_WC1_s1x.yaml b/preprocess/mhp_extension/detectron2/projects/DensePose/configs/densepose_rcnn_R_101_FPN_DL_WC1_s1x.yaml new file mode 100644 index 0000000000000000000000000000000000000000..15475b1ac3bb7272a7ebc0061a55119ffd2591b9 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/DensePose/configs/densepose_rcnn_R_101_FPN_DL_WC1_s1x.yaml @@ -0,0 +1,16 @@ +_BASE_: "Base-DensePose-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" + RESNETS: + DEPTH: 101 + ROI_DENSEPOSE_HEAD: + NAME: "DensePoseDeepLabHead" + UV_CONFIDENCE: + ENABLED: True + TYPE: "iid_iso" + POINT_REGRESSION_WEIGHTS: 0.0005 +SOLVER: + CLIP_GRADIENTS: + ENABLED: True + MAX_ITER: 130000 + STEPS: (100000, 120000) diff --git a/preprocess/mhp_extension/detectron2/projects/DensePose/configs/densepose_rcnn_R_101_FPN_DL_WC2_s1x.yaml b/preprocess/mhp_extension/detectron2/projects/DensePose/configs/densepose_rcnn_R_101_FPN_DL_WC2_s1x.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7546b967ab89129c9a276f19b1cf2d6b59f1a462 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/DensePose/configs/densepose_rcnn_R_101_FPN_DL_WC2_s1x.yaml @@ -0,0 +1,16 @@ +_BASE_: "Base-DensePose-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" + RESNETS: + DEPTH: 101 + ROI_DENSEPOSE_HEAD: + NAME: "DensePoseDeepLabHead" + UV_CONFIDENCE: + ENABLED: True + TYPE: "indep_aniso" + POINT_REGRESSION_WEIGHTS: 0.0005 +SOLVER: + CLIP_GRADIENTS: + ENABLED: True + MAX_ITER: 130000 + STEPS: (100000, 120000) diff --git a/preprocess/mhp_extension/detectron2/projects/DensePose/configs/densepose_rcnn_R_101_FPN_DL_s1x.yaml b/preprocess/mhp_extension/detectron2/projects/DensePose/configs/densepose_rcnn_R_101_FPN_DL_s1x.yaml new file mode 100644 index 0000000000000000000000000000000000000000..045f7f02f1b4eb0c0ef1733c3ac65e3aa70168de --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/DensePose/configs/densepose_rcnn_R_101_FPN_DL_s1x.yaml @@ -0,0 +1,10 @@ +_BASE_: "Base-DensePose-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" + RESNETS: + DEPTH: 101 + ROI_DENSEPOSE_HEAD: + NAME: "DensePoseDeepLabHead" +SOLVER: + MAX_ITER: 130000 + STEPS: (100000, 120000) diff --git a/preprocess/mhp_extension/detectron2/projects/DensePose/configs/densepose_rcnn_R_101_FPN_WC1_s1x.yaml b/preprocess/mhp_extension/detectron2/projects/DensePose/configs/densepose_rcnn_R_101_FPN_WC1_s1x.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ace62094fbc4ce2024810333c11c7a955d8eeb22 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/DensePose/configs/densepose_rcnn_R_101_FPN_WC1_s1x.yaml @@ -0,0 +1,16 @@ +_BASE_: "Base-DensePose-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" + RESNETS: + DEPTH: 101 + ROI_DENSEPOSE_HEAD: + UV_CONFIDENCE: + ENABLED: True + TYPE: "iid_iso" + POINT_REGRESSION_WEIGHTS: 0.0005 +SOLVER: + CLIP_GRADIENTS: + ENABLED: True + MAX_ITER: 130000 + STEPS: (100000, 120000) + WARMUP_FACTOR: 0.025 diff --git a/preprocess/mhp_extension/detectron2/projects/DensePose/configs/densepose_rcnn_R_101_FPN_WC2_s1x.yaml b/preprocess/mhp_extension/detectron2/projects/DensePose/configs/densepose_rcnn_R_101_FPN_WC2_s1x.yaml new file mode 100644 index 0000000000000000000000000000000000000000..766c098f6dcdd1fb3f67957d7d1d982b37747b96 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/DensePose/configs/densepose_rcnn_R_101_FPN_WC2_s1x.yaml @@ -0,0 +1,16 @@ +_BASE_: "Base-DensePose-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" + RESNETS: + DEPTH: 101 + ROI_DENSEPOSE_HEAD: + UV_CONFIDENCE: + ENABLED: True + TYPE: "indep_aniso" + POINT_REGRESSION_WEIGHTS: 0.0005 +SOLVER: + CLIP_GRADIENTS: + ENABLED: True + MAX_ITER: 130000 + STEPS: (100000, 120000) + WARMUP_FACTOR: 0.025 diff --git a/preprocess/mhp_extension/detectron2/projects/DensePose/configs/densepose_rcnn_R_101_FPN_s1x.yaml b/preprocess/mhp_extension/detectron2/projects/DensePose/configs/densepose_rcnn_R_101_FPN_s1x.yaml new file mode 100644 index 0000000000000000000000000000000000000000..af44fb767edf9bf093463e62f93e070d0d019c5a --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/DensePose/configs/densepose_rcnn_R_101_FPN_s1x.yaml @@ -0,0 +1,8 @@ +_BASE_: "Base-DensePose-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" + RESNETS: + DEPTH: 101 +SOLVER: + MAX_ITER: 130000 + STEPS: (100000, 120000) diff --git a/preprocess/mhp_extension/detectron2/projects/DensePose/configs/densepose_rcnn_R_101_FPN_s1x_legacy.yaml b/preprocess/mhp_extension/detectron2/projects/DensePose/configs/densepose_rcnn_R_101_FPN_s1x_legacy.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8e79a1b9549cf19ed4a43cf9caf3dc88f6133310 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/DensePose/configs/densepose_rcnn_R_101_FPN_s1x_legacy.yaml @@ -0,0 +1,17 @@ +_BASE_: "Base-DensePose-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" + RESNETS: + DEPTH: 101 + ROI_DENSEPOSE_HEAD: + NUM_COARSE_SEGM_CHANNELS: 15 + POOLER_RESOLUTION: 14 + HEATMAP_SIZE: 56 + INDEX_WEIGHTS: 2.0 + PART_WEIGHTS: 0.3 + POINT_REGRESSION_WEIGHTS: 0.1 + DECODER_ON: False +SOLVER: + BASE_LR: 0.002 + MAX_ITER: 130000 + STEPS: (100000, 120000) diff --git a/preprocess/mhp_extension/detectron2/projects/DensePose/configs/densepose_rcnn_R_50_FPN_DL_WC1_s1x.yaml b/preprocess/mhp_extension/detectron2/projects/DensePose/configs/densepose_rcnn_R_50_FPN_DL_WC1_s1x.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f3720eff56ce042a68da6c99f484b963cae2c7d9 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/DensePose/configs/densepose_rcnn_R_50_FPN_DL_WC1_s1x.yaml @@ -0,0 +1,16 @@ +_BASE_: "Base-DensePose-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + RESNETS: + DEPTH: 50 + ROI_DENSEPOSE_HEAD: + NAME: "DensePoseDeepLabHead" + UV_CONFIDENCE: + ENABLED: True + TYPE: "iid_iso" + POINT_REGRESSION_WEIGHTS: 0.0005 +SOLVER: + CLIP_GRADIENTS: + ENABLED: True + MAX_ITER: 130000 + STEPS: (100000, 120000) diff --git a/preprocess/mhp_extension/detectron2/projects/DensePose/configs/densepose_rcnn_R_50_FPN_DL_WC2_s1x.yaml b/preprocess/mhp_extension/detectron2/projects/DensePose/configs/densepose_rcnn_R_50_FPN_DL_WC2_s1x.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5a47cc05e6e9dc882778c6b502d93cbcec88fb88 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/DensePose/configs/densepose_rcnn_R_50_FPN_DL_WC2_s1x.yaml @@ -0,0 +1,16 @@ +_BASE_: "Base-DensePose-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + RESNETS: + DEPTH: 50 + ROI_DENSEPOSE_HEAD: + NAME: "DensePoseDeepLabHead" + UV_CONFIDENCE: + ENABLED: True + TYPE: "indep_aniso" + POINT_REGRESSION_WEIGHTS: 0.0005 +SOLVER: + CLIP_GRADIENTS: + ENABLED: True + MAX_ITER: 130000 + STEPS: (100000, 120000) diff --git a/preprocess/mhp_extension/detectron2/projects/DensePose/configs/densepose_rcnn_R_50_FPN_DL_s1x.yaml b/preprocess/mhp_extension/detectron2/projects/DensePose/configs/densepose_rcnn_R_50_FPN_DL_s1x.yaml new file mode 100644 index 0000000000000000000000000000000000000000..52a170b4a28289ad943314f77256e34800d23121 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/DensePose/configs/densepose_rcnn_R_50_FPN_DL_s1x.yaml @@ -0,0 +1,10 @@ +_BASE_: "Base-DensePose-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + RESNETS: + DEPTH: 50 + ROI_DENSEPOSE_HEAD: + NAME: "DensePoseDeepLabHead" +SOLVER: + MAX_ITER: 130000 + STEPS: (100000, 120000) diff --git a/preprocess/mhp_extension/detectron2/projects/DensePose/configs/densepose_rcnn_R_50_FPN_WC1_s1x.yaml b/preprocess/mhp_extension/detectron2/projects/DensePose/configs/densepose_rcnn_R_50_FPN_WC1_s1x.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d36e54256ac22f1b01604e54430da24972f06eeb --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/DensePose/configs/densepose_rcnn_R_50_FPN_WC1_s1x.yaml @@ -0,0 +1,16 @@ +_BASE_: "Base-DensePose-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + RESNETS: + DEPTH: 50 + ROI_DENSEPOSE_HEAD: + UV_CONFIDENCE: + ENABLED: True + TYPE: "iid_iso" + POINT_REGRESSION_WEIGHTS: 0.0005 +SOLVER: + CLIP_GRADIENTS: + ENABLED: True + MAX_ITER: 130000 + STEPS: (100000, 120000) + WARMUP_FACTOR: 0.025 diff --git a/preprocess/mhp_extension/detectron2/projects/DensePose/configs/densepose_rcnn_R_50_FPN_WC2_s1x.yaml b/preprocess/mhp_extension/detectron2/projects/DensePose/configs/densepose_rcnn_R_50_FPN_WC2_s1x.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e880d469564a3757ba3f4d708054074cefda49b6 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/DensePose/configs/densepose_rcnn_R_50_FPN_WC2_s1x.yaml @@ -0,0 +1,16 @@ +_BASE_: "Base-DensePose-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + RESNETS: + DEPTH: 50 + ROI_DENSEPOSE_HEAD: + UV_CONFIDENCE: + ENABLED: True + TYPE: "indep_aniso" + POINT_REGRESSION_WEIGHTS: 0.0005 +SOLVER: + CLIP_GRADIENTS: + ENABLED: True + MAX_ITER: 130000 + STEPS: (100000, 120000) + WARMUP_FACTOR: 0.025 diff --git a/preprocess/mhp_extension/detectron2/projects/DensePose/configs/densepose_rcnn_R_50_FPN_s1x.yaml b/preprocess/mhp_extension/detectron2/projects/DensePose/configs/densepose_rcnn_R_50_FPN_s1x.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d2dd14c6f92f3850b99e6f1c828c0fcee52120e1 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/DensePose/configs/densepose_rcnn_R_50_FPN_s1x.yaml @@ -0,0 +1,8 @@ +_BASE_: "Base-DensePose-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + RESNETS: + DEPTH: 50 +SOLVER: + MAX_ITER: 130000 + STEPS: (100000, 120000) diff --git a/preprocess/mhp_extension/detectron2/projects/DensePose/configs/densepose_rcnn_R_50_FPN_s1x_legacy.yaml b/preprocess/mhp_extension/detectron2/projects/DensePose/configs/densepose_rcnn_R_50_FPN_s1x_legacy.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6c5391f3b3c3d437312a290d29b0656cb3804b25 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/DensePose/configs/densepose_rcnn_R_50_FPN_s1x_legacy.yaml @@ -0,0 +1,17 @@ +_BASE_: "Base-DensePose-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + RESNETS: + DEPTH: 50 + ROI_DENSEPOSE_HEAD: + NUM_COARSE_SEGM_CHANNELS: 15 + POOLER_RESOLUTION: 14 + HEATMAP_SIZE: 56 + INDEX_WEIGHTS: 2.0 + PART_WEIGHTS: 0.3 + POINT_REGRESSION_WEIGHTS: 0.1 + DECODER_ON: False +SOLVER: + BASE_LR: 0.002 + MAX_ITER: 130000 + STEPS: (100000, 120000) diff --git a/preprocess/mhp_extension/detectron2/projects/DensePose/configs/evolution/Base-RCNN-FPN-MC.yaml b/preprocess/mhp_extension/detectron2/projects/DensePose/configs/evolution/Base-RCNN-FPN-MC.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5a20882a9fd275bac3e3cf49c128684c73085ca1 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/DensePose/configs/evolution/Base-RCNN-FPN-MC.yaml @@ -0,0 +1,91 @@ +MODEL: + META_ARCHITECTURE: "GeneralizedRCNN" + BACKBONE: + NAME: "build_resnet_fpn_backbone" + RESNETS: + OUT_FEATURES: ["res2", "res3", "res4", "res5"] + FPN: + IN_FEATURES: ["res2", "res3", "res4", "res5"] + ANCHOR_GENERATOR: + SIZES: [[32], [64], [128], [256], [512]] # One size for each in feature map + ASPECT_RATIOS: [[0.5, 1.0, 2.0]] # Three aspect ratios (same for all in feature maps) + RPN: + IN_FEATURES: ["p2", "p3", "p4", "p5", "p6"] + PRE_NMS_TOPK_TRAIN: 2000 # Per FPN level + PRE_NMS_TOPK_TEST: 1000 # Per FPN level + # Detectron1 uses 2000 proposals per-batch, + # (See "modeling/rpn/rpn_outputs.py" for details of this legacy issue) + # which is approximately 1000 proposals per-image since the default batch size for FPN is 2. + POST_NMS_TOPK_TRAIN: 1000 + POST_NMS_TOPK_TEST: 1000 + ROI_HEADS: + NAME: "StandardROIHeads" + IN_FEATURES: ["p2", "p3", "p4", "p5"] + NUM_CLASSES: 1 + ROI_BOX_HEAD: + NAME: "FastRCNNConvFCHead" + NUM_FC: 2 + POOLER_RESOLUTION: 7 + ROI_MASK_HEAD: + NAME: "MaskRCNNConvUpsampleHead" + NUM_CONV: 4 + POOLER_RESOLUTION: 14 +DATASETS: + TRAIN: ("base_coco_2017_train",) + TEST: ("base_coco_2017_val", "densepose_chimps") + CATEGORY_MAPS: + "base_coco_2017_train": + "16": 1 # bird -> person + "17": 1 # cat -> person + "18": 1 # dog -> person + "19": 1 # horse -> person + "20": 1 # sheep -> person + "21": 1 # cow -> person + "22": 1 # elephant -> person + "23": 1 # bear -> person + "24": 1 # zebra -> person + "25": 1 # girafe -> person + "base_coco_2017_val": + "16": 1 # bird -> person + "17": 1 # cat -> person + "18": 1 # dog -> person + "19": 1 # horse -> person + "20": 1 # sheep -> person + "21": 1 # cow -> person + "22": 1 # elephant -> person + "23": 1 # bear -> person + "24": 1 # zebra -> person + "25": 1 # girafe -> person + WHITELISTED_CATEGORIES: + "base_coco_2017_train": + - 1 # person + - 16 # bird + - 17 # cat + - 18 # dog + - 19 # horse + - 20 # sheep + - 21 # cow + - 22 # elephant + - 23 # bear + - 24 # zebra + - 25 # girafe + "base_coco_2017_val": + - 1 # person + - 16 # bird + - 17 # cat + - 18 # dog + - 19 # horse + - 20 # sheep + - 21 # cow + - 22 # elephant + - 23 # bear + - 24 # zebra + - 25 # girafe +SOLVER: + IMS_PER_BATCH: 16 + BASE_LR: 0.02 + STEPS: (60000, 80000) + MAX_ITER: 90000 +INPUT: + MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) +VERSION: 2 diff --git a/preprocess/mhp_extension/detectron2/projects/DensePose/configs/evolution/faster_rcnn_R_50_FPN_1x_MC.yaml b/preprocess/mhp_extension/detectron2/projects/DensePose/configs/evolution/faster_rcnn_R_50_FPN_1x_MC.yaml new file mode 100644 index 0000000000000000000000000000000000000000..80139ad9e40c09fdd862cdac80aa18c5cabc0a1e --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/DensePose/configs/evolution/faster_rcnn_R_50_FPN_1x_MC.yaml @@ -0,0 +1,7 @@ +_BASE_: "Base-RCNN-FPN-MC.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: False + DENSEPOSE_ON: False + RESNETS: + DEPTH: 50 diff --git a/preprocess/mhp_extension/detectron2/projects/DensePose/configs/quick_schedules/densepose_rcnn_R_50_FPN_DL_instant_test.yaml b/preprocess/mhp_extension/detectron2/projects/DensePose/configs/quick_schedules/densepose_rcnn_R_50_FPN_DL_instant_test.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b90989eef81e27d23119d2cd4627e8cea211ac51 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/DensePose/configs/quick_schedules/densepose_rcnn_R_50_FPN_DL_instant_test.yaml @@ -0,0 +1,11 @@ +_BASE_: "../Base-DensePose-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + ROI_DENSEPOSE_HEAD: + NAME: "DensePoseDeepLabHead" +DATASETS: + TRAIN: ("densepose_coco_2014_minival_100",) + TEST: ("densepose_coco_2014_minival_100",) +SOLVER: + MAX_ITER: 40 + STEPS: (30,) diff --git a/preprocess/mhp_extension/detectron2/projects/DensePose/configs/quick_schedules/densepose_rcnn_R_50_FPN_TTA_inference_acc_test.yaml b/preprocess/mhp_extension/detectron2/projects/DensePose/configs/quick_schedules/densepose_rcnn_R_50_FPN_TTA_inference_acc_test.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7d412740340d924bacc3baa57f32bfea0b871511 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/DensePose/configs/quick_schedules/densepose_rcnn_R_50_FPN_TTA_inference_acc_test.yaml @@ -0,0 +1,13 @@ +_BASE_: "../densepose_rcnn_R_50_FPN_s1x.yaml" +MODEL: + WEIGHTS: "https://dl.fbaipublicfiles.com/densepose/densepose_rcnn_R_50_FPN_s1x/165712039/model_final_162be9.pkl" +DATASETS: + TRAIN: () + TEST: ("densepose_coco_2014_minival_100",) +TEST: + AUG: + ENABLED: True + MIN_SIZES: (400, 500, 600, 700, 800, 900, 1000, 1100, 1200) + MAX_SIZE: 4000 + FLIP: True + EXPECTED_RESULTS: [["bbox_TTA", "AP", 61.74, 0.03], ["densepose_gps_TTA", "AP", 60.22, 0.03], ["densepose_gpsm_TTA", "AP", 63.85, 0.03]] diff --git a/preprocess/mhp_extension/detectron2/projects/DensePose/configs/quick_schedules/densepose_rcnn_R_50_FPN_WC1_instant_test.yaml b/preprocess/mhp_extension/detectron2/projects/DensePose/configs/quick_schedules/densepose_rcnn_R_50_FPN_WC1_instant_test.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f0fe61151adf255baba717f3e65ff6fab52829a6 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/DensePose/configs/quick_schedules/densepose_rcnn_R_50_FPN_WC1_instant_test.yaml @@ -0,0 +1,19 @@ +_BASE_: "../Base-DensePose-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + RESNETS: + DEPTH: 50 + ROI_DENSEPOSE_HEAD: + UV_CONFIDENCE: + ENABLED: True + TYPE: "iid_iso" + POINT_REGRESSION_WEIGHTS: 0.0005 +DATASETS: + TRAIN: ("densepose_coco_2014_minival_100",) + TEST: ("densepose_coco_2014_minival_100",) +SOLVER: + CLIP_GRADIENTS: + ENABLED: True + MAX_ITER: 40 + STEPS: (30,) + WARMUP_FACTOR: 0.025 diff --git a/preprocess/mhp_extension/detectron2/projects/DensePose/configs/quick_schedules/densepose_rcnn_R_50_FPN_WC2_instant_test.yaml b/preprocess/mhp_extension/detectron2/projects/DensePose/configs/quick_schedules/densepose_rcnn_R_50_FPN_WC2_instant_test.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f0d9358c8846452314697a19b5e2ea9e075ddaeb --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/DensePose/configs/quick_schedules/densepose_rcnn_R_50_FPN_WC2_instant_test.yaml @@ -0,0 +1,19 @@ +_BASE_: "../Base-DensePose-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + RESNETS: + DEPTH: 50 + ROI_DENSEPOSE_HEAD: + UV_CONFIDENCE: + ENABLED: True + TYPE: "indep_aniso" + POINT_REGRESSION_WEIGHTS: 0.0005 +DATASETS: + TRAIN: ("densepose_coco_2014_minival_100",) + TEST: ("densepose_coco_2014_minival_100",) +SOLVER: + CLIP_GRADIENTS: + ENABLED: True + MAX_ITER: 40 + STEPS: (30,) + WARMUP_FACTOR: 0.025 diff --git a/preprocess/mhp_extension/detectron2/projects/DensePose/configs/quick_schedules/densepose_rcnn_R_50_FPN_inference_acc_test.yaml b/preprocess/mhp_extension/detectron2/projects/DensePose/configs/quick_schedules/densepose_rcnn_R_50_FPN_inference_acc_test.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3c5a7d20989e774cbba2b443e3026a2361201d0f --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/DensePose/configs/quick_schedules/densepose_rcnn_R_50_FPN_inference_acc_test.yaml @@ -0,0 +1,8 @@ +_BASE_: "../densepose_rcnn_R_50_FPN_s1x.yaml" +MODEL: + WEIGHTS: "https://dl.fbaipublicfiles.com/densepose/densepose_rcnn_R_50_FPN_s1x/165712039/model_final_162be9.pkl" +DATASETS: + TRAIN: () + TEST: ("densepose_coco_2014_minival_100",) +TEST: + EXPECTED_RESULTS: [["bbox", "AP", 59.27, 0.025], ["densepose_gps", "AP", 60.11, 0.02], ["densepose_gpsm", "AP", 64.20, 0.02]] diff --git a/preprocess/mhp_extension/detectron2/projects/DensePose/configs/quick_schedules/densepose_rcnn_R_50_FPN_instant_test.yaml b/preprocess/mhp_extension/detectron2/projects/DensePose/configs/quick_schedules/densepose_rcnn_R_50_FPN_instant_test.yaml new file mode 100644 index 0000000000000000000000000000000000000000..057c8768186e8a818228aa2f028ba3007374c571 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/DensePose/configs/quick_schedules/densepose_rcnn_R_50_FPN_instant_test.yaml @@ -0,0 +1,9 @@ +_BASE_: "../Base-DensePose-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" +DATASETS: + TRAIN: ("densepose_coco_2014_minival_100",) + TEST: ("densepose_coco_2014_minival_100",) +SOLVER: + MAX_ITER: 40 + STEPS: (30,) diff --git a/preprocess/mhp_extension/detectron2/projects/DensePose/configs/quick_schedules/densepose_rcnn_R_50_FPN_training_acc_test.yaml b/preprocess/mhp_extension/detectron2/projects/DensePose/configs/quick_schedules/densepose_rcnn_R_50_FPN_training_acc_test.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b991160c79e5a95feac22be30deea10d200178d4 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/DensePose/configs/quick_schedules/densepose_rcnn_R_50_FPN_training_acc_test.yaml @@ -0,0 +1,14 @@ +_BASE_: "../Base-DensePose-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + ROI_HEADS: + NUM_CLASSES: 1 +DATASETS: + TRAIN: ("densepose_coco_2014_minival",) + TEST: ("densepose_coco_2014_minival",) +SOLVER: + MAX_ITER: 6000 + STEPS: (5500, 5800) +TEST: + EXPECTED_RESULTS: [["bbox", "AP", 58.27, 1.0], ["densepose_gps", "AP", 42.47, 1.5], ["densepose_gpsm", "AP", 49.20, 1.5]] + diff --git a/preprocess/mhp_extension/detectron2/projects/DensePose/densepose/__init__.py b/preprocess/mhp_extension/detectron2/projects/DensePose/densepose/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..aea5a1a9c3e63ce168a41545322599ccc4adbbb8 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/DensePose/densepose/__init__.py @@ -0,0 +1,9 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +from .data.datasets import builtin # just to register data +from .config import add_densepose_config, add_dataset_category_config +from .densepose_head import ROI_DENSEPOSE_HEAD_REGISTRY +from .evaluator import DensePoseCOCOEvaluator +from .roi_head import DensePoseROIHeads +from .data.structures import DensePoseDataRelative, DensePoseList, DensePoseTransformData +from .modeling.test_time_augmentation import DensePoseGeneralizedRCNNWithTTA +from .utils.transform import load_from_cfg diff --git a/preprocess/mhp_extension/detectron2/projects/DensePose/densepose/config.py b/preprocess/mhp_extension/detectron2/projects/DensePose/densepose/config.py new file mode 100644 index 0000000000000000000000000000000000000000..2d76056b362beb7c0832e775b9e3415dd42767a5 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/DensePose/densepose/config.py @@ -0,0 +1,68 @@ +# -*- coding = utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +from detectron2.config import CfgNode as CN + + +def add_dataset_category_config(cfg: CN): + """ + Add config for additional category-related dataset options + - category whitelisting + - category mapping + """ + _C = cfg + _C.DATASETS.CATEGORY_MAPS = CN(new_allowed=True) + _C.DATASETS.WHITELISTED_CATEGORIES = CN(new_allowed=True) + + +def add_densepose_config(cfg: CN): + """ + Add config for densepose head. + """ + _C = cfg + + _C.MODEL.DENSEPOSE_ON = True + + _C.MODEL.ROI_DENSEPOSE_HEAD = CN() + _C.MODEL.ROI_DENSEPOSE_HEAD.NAME = "" + _C.MODEL.ROI_DENSEPOSE_HEAD.NUM_STACKED_CONVS = 8 + # Number of parts used for point labels + _C.MODEL.ROI_DENSEPOSE_HEAD.NUM_PATCHES = 24 + _C.MODEL.ROI_DENSEPOSE_HEAD.DECONV_KERNEL = 4 + _C.MODEL.ROI_DENSEPOSE_HEAD.CONV_HEAD_DIM = 512 + _C.MODEL.ROI_DENSEPOSE_HEAD.CONV_HEAD_KERNEL = 3 + _C.MODEL.ROI_DENSEPOSE_HEAD.UP_SCALE = 2 + _C.MODEL.ROI_DENSEPOSE_HEAD.HEATMAP_SIZE = 112 + _C.MODEL.ROI_DENSEPOSE_HEAD.POOLER_TYPE = "ROIAlignV2" + _C.MODEL.ROI_DENSEPOSE_HEAD.POOLER_RESOLUTION = 28 + _C.MODEL.ROI_DENSEPOSE_HEAD.POOLER_SAMPLING_RATIO = 2 + _C.MODEL.ROI_DENSEPOSE_HEAD.NUM_COARSE_SEGM_CHANNELS = 2 # 15 or 2 + # Overlap threshold for an RoI to be considered foreground (if >= FG_IOU_THRESHOLD) + _C.MODEL.ROI_DENSEPOSE_HEAD.FG_IOU_THRESHOLD = 0.7 + # Loss weights for annotation masks.(14 Parts) + _C.MODEL.ROI_DENSEPOSE_HEAD.INDEX_WEIGHTS = 5.0 + # Loss weights for surface parts. (24 Parts) + _C.MODEL.ROI_DENSEPOSE_HEAD.PART_WEIGHTS = 1.0 + # Loss weights for UV regression. + _C.MODEL.ROI_DENSEPOSE_HEAD.POINT_REGRESSION_WEIGHTS = 0.01 + # For Decoder + _C.MODEL.ROI_DENSEPOSE_HEAD.DECODER_ON = True + _C.MODEL.ROI_DENSEPOSE_HEAD.DECODER_NUM_CLASSES = 256 + _C.MODEL.ROI_DENSEPOSE_HEAD.DECODER_CONV_DIMS = 256 + _C.MODEL.ROI_DENSEPOSE_HEAD.DECODER_NORM = "" + _C.MODEL.ROI_DENSEPOSE_HEAD.DECODER_COMMON_STRIDE = 4 + # For DeepLab head + _C.MODEL.ROI_DENSEPOSE_HEAD.DEEPLAB = CN() + _C.MODEL.ROI_DENSEPOSE_HEAD.DEEPLAB.NORM = "GN" + _C.MODEL.ROI_DENSEPOSE_HEAD.DEEPLAB.NONLOCAL_ON = 0 + # Confidences + # Enable learning confidences (variances) along with the actual values + _C.MODEL.ROI_DENSEPOSE_HEAD.UV_CONFIDENCE = CN({"ENABLED": False}) + # UV confidence lower bound + _C.MODEL.ROI_DENSEPOSE_HEAD.UV_CONFIDENCE.EPSILON = 0.01 + # Statistical model type for confidence learning, possible values: + # - "iid_iso": statistically independent identically distributed residuals + # with isotropic covariance + # - "indep_aniso": statistically independent residuals with anisotropic + # covariances + _C.MODEL.ROI_DENSEPOSE_HEAD.UV_CONFIDENCE.TYPE = "iid_iso" diff --git a/preprocess/mhp_extension/detectron2/projects/DensePose/densepose/densepose_coco_evaluation.py b/preprocess/mhp_extension/detectron2/projects/DensePose/densepose/densepose_coco_evaluation.py new file mode 100644 index 0000000000000000000000000000000000000000..489e7b006da436531e37ebeb1f01f13bad60874d --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/DensePose/densepose/densepose_coco_evaluation.py @@ -0,0 +1,1138 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# This is a modified version of cocoeval.py where we also have the densepose evaluation. + +__author__ = "tsungyi" + +import copy +import datetime +import itertools +import logging +import numpy as np +import pickle +import time +from collections import defaultdict +from enum import Enum +from typing import Any, Dict, Tuple +import scipy.spatial.distance as ssd +from fvcore.common.file_io import PathManager +from pycocotools import mask as maskUtils +from scipy.io import loadmat +from scipy.ndimage import zoom as spzoom + +from .data.structures import DensePoseDataRelative, DensePoseResult + +logger = logging.getLogger(__name__) + + +class DensePoseEvalMode(str, Enum): + # use both masks and geodesic distances (GPS * IOU) to compute scores + GPSM = "gpsm" + # use only geodesic distances (GPS) to compute scores + GPS = "gps" + # use only masks (IOU) to compute scores + IOU = "iou" + + +class DensePoseDataMode(str, Enum): + # use estimated IUV data (default mode) + IUV_DT = "iuvdt" + # use ground truth IUV data + IUV_GT = "iuvgt" + # use ground truth labels I and set UV to 0 + I_GT_UV_0 = "igtuv0" + # use ground truth labels I and estimated UV coordinates + I_GT_UV_DT = "igtuvdt" + # use estimated labels I and set UV to 0 + I_DT_UV_0 = "idtuv0" + + +class DensePoseCocoEval(object): + # Interface for evaluating detection on the Microsoft COCO dataset. + # + # The usage for CocoEval is as follows: + # cocoGt=..., cocoDt=... # load dataset and results + # E = CocoEval(cocoGt,cocoDt); # initialize CocoEval object + # E.params.recThrs = ...; # set parameters as desired + # E.evaluate(); # run per image evaluation + # E.accumulate(); # accumulate per image results + # E.summarize(); # display summary metrics of results + # For example usage see evalDemo.m and http://mscoco.org/. + # + # The evaluation parameters are as follows (defaults in brackets): + # imgIds - [all] N demo ids to use for evaluation + # catIds - [all] K cat ids to use for evaluation + # iouThrs - [.5:.05:.95] T=10 IoU thresholds for evaluation + # recThrs - [0:.01:1] R=101 recall thresholds for evaluation + # areaRng - [...] A=4 object area ranges for evaluation + # maxDets - [1 10 100] M=3 thresholds on max detections per image + # iouType - ['segm'] set iouType to 'segm', 'bbox', 'keypoints' or 'densepose' + # iouType replaced the now DEPRECATED useSegm parameter. + # useCats - [1] if true use category labels for evaluation + # Note: if useCats=0 category labels are ignored as in proposal scoring. + # Note: multiple areaRngs [Ax2] and maxDets [Mx1] can be specified. + # + # evaluate(): evaluates detections on every image and every category and + # concats the results into the "evalImgs" with fields: + # dtIds - [1xD] id for each of the D detections (dt) + # gtIds - [1xG] id for each of the G ground truths (gt) + # dtMatches - [TxD] matching gt id at each IoU or 0 + # gtMatches - [TxG] matching dt id at each IoU or 0 + # dtScores - [1xD] confidence of each dt + # gtIgnore - [1xG] ignore flag for each gt + # dtIgnore - [TxD] ignore flag for each dt at each IoU + # + # accumulate(): accumulates the per-image, per-category evaluation + # results in "evalImgs" into the dictionary "eval" with fields: + # params - parameters used for evaluation + # date - date evaluation was performed + # counts - [T,R,K,A,M] parameter dimensions (see above) + # precision - [TxRxKxAxM] precision for every evaluation setting + # recall - [TxKxAxM] max recall for every evaluation setting + # Note: precision and recall==-1 for settings with no gt objects. + # + # See also coco, mask, pycocoDemo, pycocoEvalDemo + # + # Microsoft COCO Toolbox. version 2.0 + # Data, paper, and tutorials available at: http://mscoco.org/ + # Code written by Piotr Dollar and Tsung-Yi Lin, 2015. + # Licensed under the Simplified BSD License [see coco/license.txt] + def __init__( + self, + cocoGt=None, + cocoDt=None, + iouType: str = "densepose", + dpEvalMode: DensePoseEvalMode = DensePoseEvalMode.GPS, + dpDataMode: DensePoseDataMode = DensePoseDataMode.IUV_DT, + ): + """ + Initialize CocoEval using coco APIs for gt and dt + :param cocoGt: coco object with ground truth annotations + :param cocoDt: coco object with detection results + :return: None + """ + self.cocoGt = cocoGt # ground truth COCO API + self.cocoDt = cocoDt # detections COCO API + self._dpEvalMode = dpEvalMode + self._dpDataMode = dpDataMode + self.params = {} # evaluation parameters + self.evalImgs = defaultdict(list) # per-image per-category eval results [KxAxI] + self.eval = {} # accumulated evaluation results + self._gts = defaultdict(list) # gt for evaluation + self._dts = defaultdict(list) # dt for evaluation + self.params = Params(iouType=iouType) # parameters + self._paramsEval = {} # parameters for evaluation + self.stats = [] # result summarization + self.ious = {} # ious between all gts and dts + if cocoGt is not None: + self.params.imgIds = sorted(cocoGt.getImgIds()) + self.params.catIds = sorted(cocoGt.getCatIds()) + self.ignoreThrBB = 0.7 + self.ignoreThrUV = 0.9 + + def _loadGEval(self): + smpl_subdiv_fpath = PathManager.get_local_path( + "https://dl.fbaipublicfiles.com/densepose/data/SMPL_subdiv.mat" + ) + pdist_transform_fpath = PathManager.get_local_path( + "https://dl.fbaipublicfiles.com/densepose/data/SMPL_SUBDIV_TRANSFORM.mat" + ) + pdist_matrix_fpath = PathManager.get_local_path( + "https://dl.fbaipublicfiles.com/densepose/data/Pdist_matrix.pkl", timeout_sec=120 + ) + SMPL_subdiv = loadmat(smpl_subdiv_fpath) + self.PDIST_transform = loadmat(pdist_transform_fpath) + self.PDIST_transform = self.PDIST_transform["index"].squeeze() + UV = np.array([SMPL_subdiv["U_subdiv"], SMPL_subdiv["V_subdiv"]]).squeeze() + ClosestVertInds = np.arange(UV.shape[1]) + 1 + self.Part_UVs = [] + self.Part_ClosestVertInds = [] + for i in np.arange(24): + self.Part_UVs.append(UV[:, SMPL_subdiv["Part_ID_subdiv"].squeeze() == (i + 1)]) + self.Part_ClosestVertInds.append( + ClosestVertInds[SMPL_subdiv["Part_ID_subdiv"].squeeze() == (i + 1)] + ) + + with open(pdist_matrix_fpath, "rb") as hFile: + arrays = pickle.load(hFile, encoding="latin1") + self.Pdist_matrix = arrays["Pdist_matrix"] + self.Part_ids = np.array(SMPL_subdiv["Part_ID_subdiv"].squeeze()) + # Mean geodesic distances for parts. + self.Mean_Distances = np.array([0, 0.351, 0.107, 0.126, 0.237, 0.173, 0.142, 0.128, 0.150]) + # Coarse Part labels. + self.CoarseParts = np.array( + [0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8] + ) + + def _prepare(self): + """ + Prepare ._gts and ._dts for evaluation based on params + :return: None + """ + + def _toMask(anns, coco): + # modify ann['segmentation'] by reference + for ann in anns: + rle = coco.annToRLE(ann) + ann["segmentation"] = rle + + def _getIgnoreRegion(iid, coco): + img = coco.imgs[iid] + + if "ignore_regions_x" not in img.keys(): + return None + + if len(img["ignore_regions_x"]) == 0: + return None + + rgns_merged = [] + for region_x, region_y in zip(img["ignore_regions_x"], img["ignore_regions_y"]): + rgns = [iter(region_x), iter(region_y)] + rgns_merged.append([next(it) for it in itertools.cycle(rgns)]) + rles = maskUtils.frPyObjects(rgns_merged, img["height"], img["width"]) + rle = maskUtils.merge(rles) + return maskUtils.decode(rle) + + def _checkIgnore(dt, iregion): + if iregion is None: + return True + + bb = np.array(dt["bbox"]).astype(np.int) + x1, y1, x2, y2 = bb[0], bb[1], bb[0] + bb[2], bb[1] + bb[3] + x2 = min([x2, iregion.shape[1]]) + y2 = min([y2, iregion.shape[0]]) + + if bb[2] * bb[3] == 0: + return False + + crop_iregion = iregion[y1:y2, x1:x2] + + if crop_iregion.sum() == 0: + return True + + if "densepose" not in dt.keys(): # filtering boxes + return crop_iregion.sum() / bb[2] / bb[3] < self.ignoreThrBB + + # filtering UVs + ignoremask = np.require(crop_iregion, requirements=["F"]) + mask = self._extract_mask(dt) + uvmask = np.require(np.asarray(mask > 0), dtype=np.uint8, requirements=["F"]) + uvmask_ = maskUtils.encode(uvmask) + ignoremask_ = maskUtils.encode(ignoremask) + uviou = maskUtils.iou([uvmask_], [ignoremask_], [1])[0] + return uviou < self.ignoreThrUV + + p = self.params + + if p.useCats: + gts = self.cocoGt.loadAnns(self.cocoGt.getAnnIds(imgIds=p.imgIds, catIds=p.catIds)) + dts = self.cocoDt.loadAnns(self.cocoDt.getAnnIds(imgIds=p.imgIds, catIds=p.catIds)) + else: + gts = self.cocoGt.loadAnns(self.cocoGt.getAnnIds(imgIds=p.imgIds)) + dts = self.cocoDt.loadAnns(self.cocoDt.getAnnIds(imgIds=p.imgIds)) + + imns = self.cocoGt.loadImgs(p.imgIds) + self.size_mapping = {} + for im in imns: + self.size_mapping[im["id"]] = [im["height"], im["width"]] + + # if iouType == 'uv', add point gt annotations + if p.iouType == "densepose": + self._loadGEval() + + # convert ground truth to mask if iouType == 'segm' + if p.iouType == "segm": + _toMask(gts, self.cocoGt) + _toMask(dts, self.cocoDt) + + # set ignore flag + for gt in gts: + gt["ignore"] = gt["ignore"] if "ignore" in gt else 0 + gt["ignore"] = "iscrowd" in gt and gt["iscrowd"] + if p.iouType == "keypoints": + gt["ignore"] = (gt["num_keypoints"] == 0) or gt["ignore"] + if p.iouType == "densepose": + gt["ignore"] = ("dp_x" in gt) == 0 + + self._gts = defaultdict(list) # gt for evaluation + self._dts = defaultdict(list) # dt for evaluation + self._igrgns = defaultdict(list) + + for gt in gts: + iid = gt["image_id"] + if iid not in self._igrgns.keys(): + self._igrgns[iid] = _getIgnoreRegion(iid, self.cocoGt) + if _checkIgnore(gt, self._igrgns[iid]): + self._gts[iid, gt["category_id"]].append(gt) + for dt in dts: + iid = dt["image_id"] + if (iid not in self._igrgns) or _checkIgnore(dt, self._igrgns[iid]): + self._dts[iid, dt["category_id"]].append(dt) + + self.evalImgs = defaultdict(list) # per-image per-category evaluation results + self.eval = {} # accumulated evaluation results + + def evaluate(self): + """ + Run per image evaluation on given images and store results (a list of dict) in self.evalImgs + :return: None + """ + tic = time.time() + logger.info("Running per image DensePose evaluation... {}".format(self.params.iouType)) + p = self.params + # add backward compatibility if useSegm is specified in params + if p.useSegm is not None: + p.iouType = "segm" if p.useSegm == 1 else "bbox" + logger.info("useSegm (deprecated) is not None. Running DensePose evaluation") + p.imgIds = list(np.unique(p.imgIds)) + if p.useCats: + p.catIds = list(np.unique(p.catIds)) + p.maxDets = sorted(p.maxDets) + self.params = p + + self._prepare() + # loop through images, area range, max detection number + catIds = p.catIds if p.useCats else [-1] + + if p.iouType in ["segm", "bbox"]: + computeIoU = self.computeIoU + elif p.iouType == "keypoints": + computeIoU = self.computeOks + elif p.iouType == "densepose": + computeIoU = self.computeOgps + if self._dpEvalMode == DensePoseEvalMode.GPSM: + self.real_ious = { + (imgId, catId): self.computeDPIoU(imgId, catId) + for imgId in p.imgIds + for catId in catIds + } + + self.ious = { + (imgId, catId): computeIoU(imgId, catId) for imgId in p.imgIds for catId in catIds + } + + evaluateImg = self.evaluateImg + maxDet = p.maxDets[-1] + self.evalImgs = [ + evaluateImg(imgId, catId, areaRng, maxDet) + for catId in catIds + for areaRng in p.areaRng + for imgId in p.imgIds + ] + self._paramsEval = copy.deepcopy(self.params) + toc = time.time() + logger.info("DensePose evaluation DONE (t={:0.2f}s).".format(toc - tic)) + + def getDensePoseMask(self, polys): + maskGen = np.zeros([256, 256]) + for i in range(1, 15): + if polys[i - 1]: + currentMask = maskUtils.decode(polys[i - 1]) + maskGen[currentMask > 0] = i + return maskGen + + def _generate_rlemask_on_image(self, mask, imgId, data): + bbox_xywh = np.array(data["bbox"]) + x, y, w, h = bbox_xywh + im_h, im_w = self.size_mapping[imgId] + im_mask = np.zeros((im_h, im_w), dtype=np.uint8) + if mask is not None: + x0 = max(int(x), 0) + x1 = min(int(x + w), im_w, int(x) + mask.shape[1]) + y0 = max(int(y), 0) + y1 = min(int(y + h), im_h, int(y) + mask.shape[0]) + y = int(y) + x = int(x) + im_mask[y0:y1, x0:x1] = mask[y0 - y : y1 - y, x0 - x : x1 - x] + im_mask = np.require(np.asarray(im_mask > 0), dtype=np.uint8, requirements=["F"]) + rle_mask = maskUtils.encode(np.array(im_mask[:, :, np.newaxis], order="F"))[0] + return rle_mask + + def computeDPIoU(self, imgId, catId): + p = self.params + if p.useCats: + gt = self._gts[imgId, catId] + dt = self._dts[imgId, catId] + else: + gt = [_ for cId in p.catIds for _ in self._gts[imgId, cId]] + dt = [_ for cId in p.catIds for _ in self._dts[imgId, cId]] + if len(gt) == 0 and len(dt) == 0: + return [] + inds = np.argsort([-d["score"] for d in dt], kind="mergesort") + dt = [dt[i] for i in inds] + if len(dt) > p.maxDets[-1]: + dt = dt[0 : p.maxDets[-1]] + + gtmasks = [] + for g in gt: + if DensePoseDataRelative.S_KEY in g: + mask = self.getDensePoseMask(g[DensePoseDataRelative.S_KEY]) + _, _, w, h = g["bbox"] + scale_x = float(max(w, 1)) / mask.shape[1] + scale_y = float(max(h, 1)) / mask.shape[0] + mask = spzoom(mask, (scale_y, scale_x), order=1, prefilter=False) + mask = np.array(mask > 0.5, dtype=np.uint8) + rle_mask = self._generate_rlemask_on_image(mask, imgId, g) + elif "segmentation" in g: + segmentation = g["segmentation"] + if isinstance(segmentation, list) and segmentation: + # polygons + im_h, im_w = self.size_mapping[imgId] + rles = maskUtils.frPyObjects(segmentation, im_h, im_w) + rle_mask = maskUtils.merge(rles) + elif isinstance(segmentation, dict): + if isinstance(segmentation["counts"], list): + # uncompressed RLE + im_h, im_w = self.size_mapping[imgId] + rle_mask = maskUtils.frPyObjects(segmentation, im_h, im_w) + else: + # compressed RLE + rle_mask = segmentation + else: + rle_mask = self._generate_rlemask_on_image(None, imgId, g) + else: + rle_mask = self._generate_rlemask_on_image(None, imgId, g) + gtmasks.append(rle_mask) + + dtmasks = [] + for d in dt: + mask = self._extract_mask(d) + mask = np.require(np.asarray(mask > 0), dtype=np.uint8, requirements=["F"]) + rle_mask = self._generate_rlemask_on_image(mask, imgId, d) + dtmasks.append(rle_mask) + + # compute iou between each dt and gt region + iscrowd = [int(o["iscrowd"]) for o in gt] + iousDP = maskUtils.iou(dtmasks, gtmasks, iscrowd) + return iousDP + + def computeIoU(self, imgId, catId): + p = self.params + if p.useCats: + gt = self._gts[imgId, catId] + dt = self._dts[imgId, catId] + else: + gt = [_ for cId in p.catIds for _ in self._gts[imgId, cId]] + dt = [_ for cId in p.catIds for _ in self._dts[imgId, cId]] + if len(gt) == 0 and len(dt) == 0: + return [] + inds = np.argsort([-d["score"] for d in dt], kind="mergesort") + dt = [dt[i] for i in inds] + if len(dt) > p.maxDets[-1]: + dt = dt[0 : p.maxDets[-1]] + + if p.iouType == "segm": + g = [g["segmentation"] for g in gt] + d = [d["segmentation"] for d in dt] + elif p.iouType == "bbox": + g = [g["bbox"] for g in gt] + d = [d["bbox"] for d in dt] + else: + raise Exception("unknown iouType for iou computation") + + # compute iou between each dt and gt region + iscrowd = [int(o["iscrowd"]) for o in gt] + ious = maskUtils.iou(d, g, iscrowd) + return ious + + def computeOks(self, imgId, catId): + p = self.params + # dimension here should be Nxm + gts = self._gts[imgId, catId] + dts = self._dts[imgId, catId] + inds = np.argsort([-d["score"] for d in dts], kind="mergesort") + dts = [dts[i] for i in inds] + if len(dts) > p.maxDets[-1]: + dts = dts[0 : p.maxDets[-1]] + # if len(gts) == 0 and len(dts) == 0: + if len(gts) == 0 or len(dts) == 0: + return [] + ious = np.zeros((len(dts), len(gts))) + sigmas = ( + np.array( + [ + 0.26, + 0.25, + 0.25, + 0.35, + 0.35, + 0.79, + 0.79, + 0.72, + 0.72, + 0.62, + 0.62, + 1.07, + 1.07, + 0.87, + 0.87, + 0.89, + 0.89, + ] + ) + / 10.0 + ) + vars = (sigmas * 2) ** 2 + k = len(sigmas) + # compute oks between each detection and ground truth object + for j, gt in enumerate(gts): + # create bounds for ignore regions(double the gt bbox) + g = np.array(gt["keypoints"]) + xg = g[0::3] + yg = g[1::3] + vg = g[2::3] + k1 = np.count_nonzero(vg > 0) + bb = gt["bbox"] + x0 = bb[0] - bb[2] + x1 = bb[0] + bb[2] * 2 + y0 = bb[1] - bb[3] + y1 = bb[1] + bb[3] * 2 + for i, dt in enumerate(dts): + d = np.array(dt["keypoints"]) + xd = d[0::3] + yd = d[1::3] + if k1 > 0: + # measure the per-keypoint distance if keypoints visible + dx = xd - xg + dy = yd - yg + else: + # measure minimum distance to keypoints in (x0,y0) & (x1,y1) + z = np.zeros(k) + dx = np.max((z, x0 - xd), axis=0) + np.max((z, xd - x1), axis=0) + dy = np.max((z, y0 - yd), axis=0) + np.max((z, yd - y1), axis=0) + e = (dx ** 2 + dy ** 2) / vars / (gt["area"] + np.spacing(1)) / 2 + if k1 > 0: + e = e[vg > 0] + ious[i, j] = np.sum(np.exp(-e)) / e.shape[0] + return ious + + def _extract_mask(self, dt: Dict[str, Any]) -> np.ndarray: + (densepose_shape, densepose_data_encoded), densepose_bbox_xywh = dt["densepose"] + densepose_data = DensePoseResult.decode_png_data(densepose_shape, densepose_data_encoded) + return densepose_data[0] + + def _extract_iuv( + self, densepose_data: np.ndarray, py: np.ndarray, px: np.ndarray, gt: Dict[str, Any] + ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + """ + Extract arrays of I, U and V values at given points as numpy arrays + given the data mode stored in self._dpDataMode + """ + if self._dpDataMode == DensePoseDataMode.IUV_DT: + # estimated labels and UV (default) + ipoints = densepose_data[0, py, px] + upoints = densepose_data[1, py, px] / 255.0 # convert from uint8 by /255. + vpoints = densepose_data[2, py, px] / 255.0 + elif self._dpDataMode == DensePoseDataMode.IUV_GT: + # ground truth + ipoints = np.array(gt["dp_I"]) + upoints = np.array(gt["dp_U"]) + vpoints = np.array(gt["dp_V"]) + elif self._dpDataMode == DensePoseDataMode.I_GT_UV_0: + # ground truth labels, UV = 0 + ipoints = np.array(gt["dp_I"]) + upoints = upoints * 0.0 + vpoints = vpoints * 0.0 + elif self._dpDataMode == DensePoseDataMode.I_GT_UV_DT: + # ground truth labels, estimated UV + ipoints = np.array(gt["dp_I"]) + upoints = densepose_data[1, py, px] / 255.0 # convert from uint8 by /255. + vpoints = densepose_data[2, py, px] / 255.0 + elif self._dpDataMode == DensePoseDataMode.I_DT_UV_0: + # estimated labels, UV = 0 + ipoints = densepose_data[0, py, px] + upoints = upoints * 0.0 + vpoints = vpoints * 0.0 + else: + raise ValueError(f"Unknown data mode: {self._dpDataMode}") + return ipoints, upoints, vpoints + + def computeOgps(self, imgId, catId): + p = self.params + # dimension here should be Nxm + g = self._gts[imgId, catId] + d = self._dts[imgId, catId] + inds = np.argsort([-d_["score"] for d_ in d], kind="mergesort") + d = [d[i] for i in inds] + if len(d) > p.maxDets[-1]: + d = d[0 : p.maxDets[-1]] + # if len(gts) == 0 and len(dts) == 0: + if len(g) == 0 or len(d) == 0: + return [] + ious = np.zeros((len(d), len(g))) + # compute opgs between each detection and ground truth object + # sigma = self.sigma #0.255 # dist = 0.3m corresponds to ogps = 0.5 + # 1 # dist = 0.3m corresponds to ogps = 0.96 + # 1.45 # dist = 1.7m (person height) corresponds to ogps = 0.5) + for j, gt in enumerate(g): + if not gt["ignore"]: + g_ = gt["bbox"] + for i, dt in enumerate(d): + # + dy = int(dt["bbox"][3]) + dx = int(dt["bbox"][2]) + dp_x = np.array(gt["dp_x"]) * g_[2] / 255.0 + dp_y = np.array(gt["dp_y"]) * g_[3] / 255.0 + py = (dp_y + g_[1] - dt["bbox"][1]).astype(np.int) + px = (dp_x + g_[0] - dt["bbox"][0]).astype(np.int) + # + pts = np.zeros(len(px)) + pts[px >= dx] = -1 + pts[py >= dy] = -1 + pts[px < 0] = -1 + pts[py < 0] = -1 + if len(pts) < 1: + ogps = 0.0 + elif np.max(pts) == -1: + ogps = 0.0 + else: + px[pts == -1] = 0 + py[pts == -1] = 0 + (densepose_shape, densepose_data_encoded), densepose_bbox_xywh = dt[ + "densepose" + ] + densepose_data = DensePoseResult.decode_png_data( + densepose_shape, densepose_data_encoded + ) + assert densepose_data.shape[2] == dx, ( + "DensePoseData width {} should be equal to " + "detection bounding box width {}".format(densepose_data.shape[2], dx) + ) + assert densepose_data.shape[1] == dy, ( + "DensePoseData height {} should be equal to " + "detection bounding box height {}".format(densepose_data.shape[1], dy) + ) + ipoints, upoints, vpoints = self._extract_iuv(densepose_data, py, px, gt) + ipoints[pts == -1] = 0 + # Find closest vertices in subsampled mesh. + cVerts, cVertsGT = self.findAllClosestVerts(gt, upoints, vpoints, ipoints) + # Get pairwise geodesic distances between gt and estimated mesh points. + dist = self.getDistances(cVertsGT, cVerts) + # Compute the Ogps measure. + # Find the mean geodesic normalization distance for + # each GT point, based on which part it is on. + Current_Mean_Distances = self.Mean_Distances[ + self.CoarseParts[self.Part_ids[cVertsGT[cVertsGT > 0].astype(int) - 1]] + ] + # Compute gps + ogps_values = np.exp(-(dist ** 2) / (2 * (Current_Mean_Distances ** 2))) + # + if len(dist) > 0: + ogps = np.sum(ogps_values) / len(dist) + ious[i, j] = ogps + + gbb = [gt["bbox"] for gt in g] + dbb = [dt["bbox"] for dt in d] + + # compute iou between each dt and gt region + iscrowd = [int(o["iscrowd"]) for o in g] + ious_bb = maskUtils.iou(dbb, gbb, iscrowd) + return ious, ious_bb + + def evaluateImg(self, imgId, catId, aRng, maxDet): + """ + perform evaluation for single category and image + :return: dict (single image results) + """ + + p = self.params + if p.useCats: + gt = self._gts[imgId, catId] + dt = self._dts[imgId, catId] + else: + gt = [_ for cId in p.catIds for _ in self._gts[imgId, cId]] + dt = [_ for cId in p.catIds for _ in self._dts[imgId, cId]] + if len(gt) == 0 and len(dt) == 0: + return None + + for g in gt: + # g['_ignore'] = g['ignore'] + if g["ignore"] or (g["area"] < aRng[0] or g["area"] > aRng[1]): + g["_ignore"] = True + else: + g["_ignore"] = False + + # sort dt highest score first, sort gt ignore last + gtind = np.argsort([g["_ignore"] for g in gt], kind="mergesort") + gt = [gt[i] for i in gtind] + dtind = np.argsort([-d["score"] for d in dt], kind="mergesort") + dt = [dt[i] for i in dtind[0:maxDet]] + iscrowd = [int(o["iscrowd"]) for o in gt] + # load computed ious + if p.iouType == "densepose": + # print('Checking the length', len(self.ious[imgId, catId])) + # if len(self.ious[imgId, catId]) == 0: + # print(self.ious[imgId, catId]) + ious = ( + self.ious[imgId, catId][0][:, gtind] + if len(self.ious[imgId, catId]) > 0 + else self.ious[imgId, catId] + ) + ioubs = ( + self.ious[imgId, catId][1][:, gtind] + if len(self.ious[imgId, catId]) > 0 + else self.ious[imgId, catId] + ) + if self._dpEvalMode == DensePoseEvalMode.GPSM: + iousM = ( + self.real_ious[imgId, catId][:, gtind] + if len(self.real_ious[imgId, catId]) > 0 + else self.real_ious[imgId, catId] + ) + else: + ious = ( + self.ious[imgId, catId][:, gtind] + if len(self.ious[imgId, catId]) > 0 + else self.ious[imgId, catId] + ) + + T = len(p.iouThrs) + G = len(gt) + D = len(dt) + gtm = np.zeros((T, G)) + dtm = np.zeros((T, D)) + gtIg = np.array([g["_ignore"] for g in gt]) + dtIg = np.zeros((T, D)) + if np.all(gtIg) and p.iouType == "densepose": + dtIg = np.logical_or(dtIg, True) + + if len(ious) > 0: # and not p.iouType == 'densepose': + for tind, t in enumerate(p.iouThrs): + for dind, d in enumerate(dt): + # information about best match so far (m=-1 -> unmatched) + iou = min([t, 1 - 1e-10]) + m = -1 + for gind, _g in enumerate(gt): + # if this gt already matched, and not a crowd, continue + if gtm[tind, gind] > 0 and not iscrowd[gind]: + continue + # if dt matched to reg gt, and on ignore gt, stop + if m > -1 and gtIg[m] == 0 and gtIg[gind] == 1: + break + if p.iouType == "densepose": + if self._dpEvalMode == DensePoseEvalMode.GPSM: + new_iou = np.sqrt(iousM[dind, gind] * ious[dind, gind]) + elif self._dpEvalMode == DensePoseEvalMode.IOU: + new_iou = iousM[dind, gind] + elif self._dpEvalMode == DensePoseEvalMode.GPS: + new_iou = ious[dind, gind] + else: + new_iou = ious[dind, gind] + if new_iou < iou: + continue + if new_iou == 0.0: + continue + # if match successful and best so far, store appropriately + iou = new_iou + m = gind + # if match made store id of match for both dt and gt + if m == -1: + continue + dtIg[tind, dind] = gtIg[m] + dtm[tind, dind] = gt[m]["id"] + gtm[tind, m] = d["id"] + + if p.iouType == "densepose": + if not len(ioubs) == 0: + for dind, d in enumerate(dt): + # information about best match so far (m=-1 -> unmatched) + if dtm[tind, dind] == 0: + ioub = 0.8 + m = -1 + for gind, _g in enumerate(gt): + # if this gt already matched, and not a crowd, continue + if gtm[tind, gind] > 0 and not iscrowd[gind]: + continue + # continue to next gt unless better match made + if ioubs[dind, gind] < ioub: + continue + # if match successful and best so far, store appropriately + ioub = ioubs[dind, gind] + m = gind + # if match made store id of match for both dt and gt + if m > -1: + dtIg[:, dind] = gtIg[m] + if gtIg[m]: + dtm[tind, dind] = gt[m]["id"] + gtm[tind, m] = d["id"] + # set unmatched detections outside of area range to ignore + a = np.array([d["area"] < aRng[0] or d["area"] > aRng[1] for d in dt]).reshape((1, len(dt))) + dtIg = np.logical_or(dtIg, np.logical_and(dtm == 0, np.repeat(a, T, 0))) + # store results for given image and category + # print('Done with the function', len(self.ious[imgId, catId])) + return { + "image_id": imgId, + "category_id": catId, + "aRng": aRng, + "maxDet": maxDet, + "dtIds": [d["id"] for d in dt], + "gtIds": [g["id"] for g in gt], + "dtMatches": dtm, + "gtMatches": gtm, + "dtScores": [d["score"] for d in dt], + "gtIgnore": gtIg, + "dtIgnore": dtIg, + } + + def accumulate(self, p=None): + """ + Accumulate per image evaluation results and store the result in self.eval + :param p: input params for evaluation + :return: None + """ + logger.info("Accumulating evaluation results...") + tic = time.time() + if not self.evalImgs: + logger.info("Please run evaluate() first") + # allows input customized parameters + if p is None: + p = self.params + p.catIds = p.catIds if p.useCats == 1 else [-1] + T = len(p.iouThrs) + R = len(p.recThrs) + K = len(p.catIds) if p.useCats else 1 + A = len(p.areaRng) + M = len(p.maxDets) + precision = -(np.ones((T, R, K, A, M))) # -1 for the precision of absent categories + recall = -(np.ones((T, K, A, M))) + + # create dictionary for future indexing + logger.info("Categories: {}".format(p.catIds)) + _pe = self._paramsEval + catIds = _pe.catIds if _pe.useCats else [-1] + setK = set(catIds) + setA = set(map(tuple, _pe.areaRng)) + setM = set(_pe.maxDets) + setI = set(_pe.imgIds) + # get inds to evaluate + k_list = [n for n, k in enumerate(p.catIds) if k in setK] + m_list = [m for n, m in enumerate(p.maxDets) if m in setM] + a_list = [n for n, a in enumerate(map(lambda x: tuple(x), p.areaRng)) if a in setA] + i_list = [n for n, i in enumerate(p.imgIds) if i in setI] + I0 = len(_pe.imgIds) + A0 = len(_pe.areaRng) + # retrieve E at each category, area range, and max number of detections + for k, k0 in enumerate(k_list): + Nk = k0 * A0 * I0 + for a, a0 in enumerate(a_list): + Na = a0 * I0 + for m, maxDet in enumerate(m_list): + E = [self.evalImgs[Nk + Na + i] for i in i_list] + E = [e for e in E if e is not None] + if len(E) == 0: + continue + dtScores = np.concatenate([e["dtScores"][0:maxDet] for e in E]) + + # different sorting method generates slightly different results. + # mergesort is used to be consistent as Matlab implementation. + inds = np.argsort(-dtScores, kind="mergesort") + + dtm = np.concatenate([e["dtMatches"][:, 0:maxDet] for e in E], axis=1)[:, inds] + dtIg = np.concatenate([e["dtIgnore"][:, 0:maxDet] for e in E], axis=1)[:, inds] + gtIg = np.concatenate([e["gtIgnore"] for e in E]) + npig = np.count_nonzero(gtIg == 0) + if npig == 0: + continue + tps = np.logical_and(dtm, np.logical_not(dtIg)) + fps = np.logical_and(np.logical_not(dtm), np.logical_not(dtIg)) + tp_sum = np.cumsum(tps, axis=1).astype(dtype=np.float) + fp_sum = np.cumsum(fps, axis=1).astype(dtype=np.float) + for t, (tp, fp) in enumerate(zip(tp_sum, fp_sum)): + tp = np.array(tp) + fp = np.array(fp) + nd = len(tp) + rc = tp / npig + pr = tp / (fp + tp + np.spacing(1)) + q = np.zeros((R,)) + + if nd: + recall[t, k, a, m] = rc[-1] + else: + recall[t, k, a, m] = 0 + + # numpy is slow without cython optimization for accessing elements + # use python array gets significant speed improvement + pr = pr.tolist() + q = q.tolist() + + for i in range(nd - 1, 0, -1): + if pr[i] > pr[i - 1]: + pr[i - 1] = pr[i] + + inds = np.searchsorted(rc, p.recThrs, side="left") + try: + for ri, pi in enumerate(inds): + q[ri] = pr[pi] + except Exception: + pass + precision[t, :, k, a, m] = np.array(q) + logger.info( + "Final: max precision {}, min precision {}".format(np.max(precision), np.min(precision)) + ) + self.eval = { + "params": p, + "counts": [T, R, K, A, M], + "date": datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), + "precision": precision, + "recall": recall, + } + toc = time.time() + logger.info("DONE (t={:0.2f}s).".format(toc - tic)) + + def summarize(self): + """ + Compute and display summary metrics for evaluation results. + Note this function can *only* be applied on the default parameter setting + """ + + def _summarize(ap=1, iouThr=None, areaRng="all", maxDets=100): + p = self.params + iStr = " {:<18} {} @[ {}={:<9} | area={:>6s} | maxDets={:>3d} ] = {:0.3f}" + titleStr = "Average Precision" if ap == 1 else "Average Recall" + typeStr = "(AP)" if ap == 1 else "(AR)" + measure = "IoU" + if self.params.iouType == "keypoints": + measure = "OKS" + elif self.params.iouType == "densepose": + measure = "OGPS" + iouStr = ( + "{:0.2f}:{:0.2f}".format(p.iouThrs[0], p.iouThrs[-1]) + if iouThr is None + else "{:0.2f}".format(iouThr) + ) + + aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng] + mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets] + if ap == 1: + # dimension of precision: [TxRxKxAxM] + s = self.eval["precision"] + # IoU + if iouThr is not None: + t = np.where(np.abs(iouThr - p.iouThrs) < 0.001)[0] + s = s[t] + s = s[:, :, :, aind, mind] + else: + # dimension of recall: [TxKxAxM] + s = self.eval["recall"] + if iouThr is not None: + t = np.where(iouThr == p.iouThrs)[0] + s = s[t] + s = s[:, :, aind, mind] + if len(s[s > -1]) == 0: + mean_s = -1 + else: + mean_s = np.mean(s[s > -1]) + logger.info(iStr.format(titleStr, typeStr, measure, iouStr, areaRng, maxDets, mean_s)) + return mean_s + + def _summarizeDets(): + stats = np.zeros((12,)) + stats[0] = _summarize(1) + stats[1] = _summarize(1, iouThr=0.5, maxDets=self.params.maxDets[2]) + stats[2] = _summarize(1, iouThr=0.75, maxDets=self.params.maxDets[2]) + stats[3] = _summarize(1, areaRng="small", maxDets=self.params.maxDets[2]) + stats[4] = _summarize(1, areaRng="medium", maxDets=self.params.maxDets[2]) + stats[5] = _summarize(1, areaRng="large", maxDets=self.params.maxDets[2]) + stats[6] = _summarize(0, maxDets=self.params.maxDets[0]) + stats[7] = _summarize(0, maxDets=self.params.maxDets[1]) + stats[8] = _summarize(0, maxDets=self.params.maxDets[2]) + stats[9] = _summarize(0, areaRng="small", maxDets=self.params.maxDets[2]) + stats[10] = _summarize(0, areaRng="medium", maxDets=self.params.maxDets[2]) + stats[11] = _summarize(0, areaRng="large", maxDets=self.params.maxDets[2]) + return stats + + def _summarizeKps(): + stats = np.zeros((10,)) + stats[0] = _summarize(1, maxDets=20) + stats[1] = _summarize(1, maxDets=20, iouThr=0.5) + stats[2] = _summarize(1, maxDets=20, iouThr=0.75) + stats[3] = _summarize(1, maxDets=20, areaRng="medium") + stats[4] = _summarize(1, maxDets=20, areaRng="large") + stats[5] = _summarize(0, maxDets=20) + stats[6] = _summarize(0, maxDets=20, iouThr=0.5) + stats[7] = _summarize(0, maxDets=20, iouThr=0.75) + stats[8] = _summarize(0, maxDets=20, areaRng="medium") + stats[9] = _summarize(0, maxDets=20, areaRng="large") + return stats + + def _summarizeUvs(): + stats = np.zeros((10,)) + stats[0] = _summarize(1, maxDets=self.params.maxDets[0]) + stats[1] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.5) + stats[2] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.75) + stats[3] = _summarize(1, maxDets=self.params.maxDets[0], areaRng="medium") + stats[4] = _summarize(1, maxDets=self.params.maxDets[0], areaRng="large") + stats[5] = _summarize(0, maxDets=self.params.maxDets[0]) + stats[6] = _summarize(0, maxDets=self.params.maxDets[0], iouThr=0.5) + stats[7] = _summarize(0, maxDets=self.params.maxDets[0], iouThr=0.75) + stats[8] = _summarize(0, maxDets=self.params.maxDets[0], areaRng="medium") + stats[9] = _summarize(0, maxDets=self.params.maxDets[0], areaRng="large") + return stats + + def _summarizeUvsOld(): + stats = np.zeros((18,)) + stats[0] = _summarize(1, maxDets=self.params.maxDets[0]) + stats[1] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.5) + stats[2] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.55) + stats[3] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.60) + stats[4] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.65) + stats[5] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.70) + stats[6] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.75) + stats[7] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.80) + stats[8] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.85) + stats[9] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.90) + stats[10] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.95) + stats[11] = _summarize(1, maxDets=self.params.maxDets[0], areaRng="medium") + stats[12] = _summarize(1, maxDets=self.params.maxDets[0], areaRng="large") + stats[13] = _summarize(0, maxDets=self.params.maxDets[0]) + stats[14] = _summarize(0, maxDets=self.params.maxDets[0], iouThr=0.5) + stats[15] = _summarize(0, maxDets=self.params.maxDets[0], iouThr=0.75) + stats[16] = _summarize(0, maxDets=self.params.maxDets[0], areaRng="medium") + stats[17] = _summarize(0, maxDets=self.params.maxDets[0], areaRng="large") + return stats + + if not self.eval: + raise Exception("Please run accumulate() first") + iouType = self.params.iouType + if iouType in ["segm", "bbox"]: + summarize = _summarizeDets + elif iouType in ["keypoints"]: + summarize = _summarizeKps + elif iouType in ["densepose"]: + summarize = _summarizeUvs + self.stats = summarize() + + def __str__(self): + self.summarize() + + # ================ functions for dense pose ============================== + def findAllClosestVerts(self, gt, U_points, V_points, Index_points): + # + I_gt = np.array(gt["dp_I"]) + U_gt = np.array(gt["dp_U"]) + V_gt = np.array(gt["dp_V"]) + # + # print(I_gt) + # + ClosestVerts = np.ones(Index_points.shape) * -1 + for i in np.arange(24): + # + if sum(Index_points == (i + 1)) > 0: + UVs = np.array( + [U_points[Index_points == (i + 1)], V_points[Index_points == (i + 1)]] + ) + Current_Part_UVs = self.Part_UVs[i] + Current_Part_ClosestVertInds = self.Part_ClosestVertInds[i] + D = ssd.cdist(Current_Part_UVs.transpose(), UVs.transpose()).squeeze() + ClosestVerts[Index_points == (i + 1)] = Current_Part_ClosestVertInds[ + np.argmin(D, axis=0) + ] + # + ClosestVertsGT = np.ones(Index_points.shape) * -1 + for i in np.arange(24): + if sum(I_gt == (i + 1)) > 0: + UVs = np.array([U_gt[I_gt == (i + 1)], V_gt[I_gt == (i + 1)]]) + Current_Part_UVs = self.Part_UVs[i] + Current_Part_ClosestVertInds = self.Part_ClosestVertInds[i] + D = ssd.cdist(Current_Part_UVs.transpose(), UVs.transpose()).squeeze() + ClosestVertsGT[I_gt == (i + 1)] = Current_Part_ClosestVertInds[np.argmin(D, axis=0)] + # + return ClosestVerts, ClosestVertsGT + + def getDistances(self, cVertsGT, cVerts): + + ClosestVertsTransformed = self.PDIST_transform[cVerts.astype(int) - 1] + ClosestVertsGTTransformed = self.PDIST_transform[cVertsGT.astype(int) - 1] + # + ClosestVertsTransformed[cVerts < 0] = 0 + ClosestVertsGTTransformed[cVertsGT < 0] = 0 + # + cVertsGT = ClosestVertsGTTransformed + cVerts = ClosestVertsTransformed + # + n = 27554 + dists = [] + for d in range(len(cVertsGT)): + if cVertsGT[d] > 0: + if cVerts[d] > 0: + i = cVertsGT[d] - 1 + j = cVerts[d] - 1 + if j == i: + dists.append(0) + elif j > i: + ccc = i + i = j + j = ccc + i = n - i - 1 + j = n - j - 1 + k = (n * (n - 1) / 2) - (n - i) * ((n - i) - 1) / 2 + j - i - 1 + k = (n * n - n) / 2 - k - 1 + dists.append(self.Pdist_matrix[int(k)][0]) + else: + i = n - i - 1 + j = n - j - 1 + k = (n * (n - 1) / 2) - (n - i) * ((n - i) - 1) / 2 + j - i - 1 + k = (n * n - n) / 2 - k - 1 + dists.append(self.Pdist_matrix[int(k)][0]) + else: + dists.append(np.inf) + return np.atleast_1d(np.array(dists).squeeze()) + + +class Params: + """ + Params for coco evaluation api + """ + + def setDetParams(self): + self.imgIds = [] + self.catIds = [] + # np.arange causes trouble. the data point on arange is slightly larger than the true value + self.iouThrs = np.linspace(0.5, 0.95, np.round((0.95 - 0.5) / 0.05) + 1, endpoint=True) + self.recThrs = np.linspace(0.0, 1.00, np.round((1.00 - 0.0) / 0.01) + 1, endpoint=True) + self.maxDets = [1, 10, 100] + self.areaRng = [ + [0 ** 2, 1e5 ** 2], + [0 ** 2, 32 ** 2], + [32 ** 2, 96 ** 2], + [96 ** 2, 1e5 ** 2], + ] + self.areaRngLbl = ["all", "small", "medium", "large"] + self.useCats = 1 + + def setKpParams(self): + self.imgIds = [] + self.catIds = [] + # np.arange causes trouble. the data point on arange is slightly larger than the true value + self.iouThrs = np.linspace(0.5, 0.95, np.round((0.95 - 0.5) / 0.05) + 1, endpoint=True) + self.recThrs = np.linspace(0.0, 1.00, np.round((1.00 - 0.0) / 0.01) + 1, endpoint=True) + self.maxDets = [20] + self.areaRng = [[0 ** 2, 1e5 ** 2], [32 ** 2, 96 ** 2], [96 ** 2, 1e5 ** 2]] + self.areaRngLbl = ["all", "medium", "large"] + self.useCats = 1 + + def setUvParams(self): + self.imgIds = [] + self.catIds = [] + self.iouThrs = np.linspace(0.5, 0.95, int(np.round((0.95 - 0.5) / 0.05)) + 1, endpoint=True) + self.recThrs = np.linspace(0.0, 1.00, int(np.round((1.00 - 0.0) / 0.01)) + 1, endpoint=True) + self.maxDets = [20] + self.areaRng = [[0 ** 2, 1e5 ** 2], [32 ** 2, 96 ** 2], [96 ** 2, 1e5 ** 2]] + self.areaRngLbl = ["all", "medium", "large"] + self.useCats = 1 + + def __init__(self, iouType="segm"): + if iouType == "segm" or iouType == "bbox": + self.setDetParams() + elif iouType == "keypoints": + self.setKpParams() + elif iouType == "densepose": + self.setUvParams() + else: + raise Exception("iouType not supported") + self.iouType = iouType + # useSegm is deprecated + self.useSegm = None diff --git a/preprocess/mhp_extension/detectron2/projects/DensePose/densepose/densepose_head.py b/preprocess/mhp_extension/detectron2/projects/DensePose/densepose/densepose_head.py new file mode 100644 index 0000000000000000000000000000000000000000..363970681db36a41d5bc5b1960960a2a8bf23855 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/DensePose/densepose/densepose_head.py @@ -0,0 +1,1216 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import math +from dataclasses import dataclass +from enum import Enum +import fvcore.nn.weight_init as weight_init +import torch +from torch import nn +from torch.nn import functional as F + +from detectron2.config import CfgNode +from detectron2.layers import Conv2d, ConvTranspose2d, interpolate +from detectron2.structures.boxes import matched_boxlist_iou +from detectron2.utils.registry import Registry + +from .data.structures import DensePoseOutput + +ROI_DENSEPOSE_HEAD_REGISTRY = Registry("ROI_DENSEPOSE_HEAD") + + +class DensePoseUVConfidenceType(Enum): + """ + Statistical model type for confidence learning, possible values: + - "iid_iso": statistically independent identically distributed residuals + with anisotropic covariance + - "indep_aniso": statistically independent residuals with anisotropic + covariances + For details, see: + N. Neverova, D. Novotny, A. Vedaldi "Correlated Uncertainty for Learning + Dense Correspondences from Noisy Labels", p. 918--926, in Proc. NIPS 2019 + """ + + # fmt: off + IID_ISO = "iid_iso" + INDEP_ANISO = "indep_aniso" + # fmt: on + + +@dataclass +class DensePoseUVConfidenceConfig: + """ + Configuration options for confidence on UV data + """ + + enabled: bool = False + # lower bound on UV confidences + epsilon: float = 0.01 + type: DensePoseUVConfidenceType = DensePoseUVConfidenceType.IID_ISO + + +@dataclass +class DensePoseConfidenceModelConfig: + """ + Configuration options for confidence models + """ + + # confidence for U and V values + uv_confidence: DensePoseUVConfidenceConfig + + @staticmethod + def from_cfg(cfg: CfgNode) -> "DensePoseConfidenceModelConfig": + return DensePoseConfidenceModelConfig( + uv_confidence=DensePoseUVConfidenceConfig( + enabled=cfg.MODEL.ROI_DENSEPOSE_HEAD.UV_CONFIDENCE.ENABLED, + epsilon=cfg.MODEL.ROI_DENSEPOSE_HEAD.UV_CONFIDENCE.EPSILON, + type=DensePoseUVConfidenceType(cfg.MODEL.ROI_DENSEPOSE_HEAD.UV_CONFIDENCE.TYPE), + ) + ) + + +def initialize_module_params(module): + for name, param in module.named_parameters(): + if "bias" in name: + nn.init.constant_(param, 0) + elif "weight" in name: + nn.init.kaiming_normal_(param, mode="fan_out", nonlinearity="relu") + + +@ROI_DENSEPOSE_HEAD_REGISTRY.register() +class DensePoseDeepLabHead(nn.Module): + def __init__(self, cfg, input_channels): + super(DensePoseDeepLabHead, self).__init__() + # fmt: off + hidden_dim = cfg.MODEL.ROI_DENSEPOSE_HEAD.CONV_HEAD_DIM + kernel_size = cfg.MODEL.ROI_DENSEPOSE_HEAD.CONV_HEAD_KERNEL + norm = cfg.MODEL.ROI_DENSEPOSE_HEAD.DEEPLAB.NORM + self.n_stacked_convs = cfg.MODEL.ROI_DENSEPOSE_HEAD.NUM_STACKED_CONVS + self.use_nonlocal = cfg.MODEL.ROI_DENSEPOSE_HEAD.DEEPLAB.NONLOCAL_ON + # fmt: on + pad_size = kernel_size // 2 + n_channels = input_channels + + self.ASPP = ASPP(input_channels, [6, 12, 56], n_channels) # 6, 12, 56 + self.add_module("ASPP", self.ASPP) + + if self.use_nonlocal: + self.NLBlock = NONLocalBlock2D(input_channels, bn_layer=True) + self.add_module("NLBlock", self.NLBlock) + # weight_init.c2_msra_fill(self.ASPP) + + for i in range(self.n_stacked_convs): + norm_module = nn.GroupNorm(32, hidden_dim) if norm == "GN" else None + layer = Conv2d( + n_channels, + hidden_dim, + kernel_size, + stride=1, + padding=pad_size, + bias=not norm, + norm=norm_module, + ) + weight_init.c2_msra_fill(layer) + n_channels = hidden_dim + layer_name = self._get_layer_name(i) + self.add_module(layer_name, layer) + self.n_out_channels = hidden_dim + # initialize_module_params(self) + + def forward(self, features): + x0 = features + x = self.ASPP(x0) + if self.use_nonlocal: + x = self.NLBlock(x) + output = x + for i in range(self.n_stacked_convs): + layer_name = self._get_layer_name(i) + x = getattr(self, layer_name)(x) + x = F.relu(x) + output = x + return output + + def _get_layer_name(self, i): + layer_name = "body_conv_fcn{}".format(i + 1) + return layer_name + + +# Copied from +# https://github.com/pytorch/vision/blob/master/torchvision/models/segmentation/deeplabv3.py +# See https://arxiv.org/pdf/1706.05587.pdf for details +class ASPPConv(nn.Sequential): + def __init__(self, in_channels, out_channels, dilation): + modules = [ + nn.Conv2d( + in_channels, out_channels, 3, padding=dilation, dilation=dilation, bias=False + ), + nn.GroupNorm(32, out_channels), + nn.ReLU(), + ] + super(ASPPConv, self).__init__(*modules) + + +class ASPPPooling(nn.Sequential): + def __init__(self, in_channels, out_channels): + super(ASPPPooling, self).__init__( + nn.AdaptiveAvgPool2d(1), + nn.Conv2d(in_channels, out_channels, 1, bias=False), + nn.GroupNorm(32, out_channels), + nn.ReLU(), + ) + + def forward(self, x): + size = x.shape[-2:] + x = super(ASPPPooling, self).forward(x) + return F.interpolate(x, size=size, mode="bilinear", align_corners=False) + + +class ASPP(nn.Module): + def __init__(self, in_channels, atrous_rates, out_channels): + super(ASPP, self).__init__() + modules = [] + modules.append( + nn.Sequential( + nn.Conv2d(in_channels, out_channels, 1, bias=False), + nn.GroupNorm(32, out_channels), + nn.ReLU(), + ) + ) + + rate1, rate2, rate3 = tuple(atrous_rates) + modules.append(ASPPConv(in_channels, out_channels, rate1)) + modules.append(ASPPConv(in_channels, out_channels, rate2)) + modules.append(ASPPConv(in_channels, out_channels, rate3)) + modules.append(ASPPPooling(in_channels, out_channels)) + + self.convs = nn.ModuleList(modules) + + self.project = nn.Sequential( + nn.Conv2d(5 * out_channels, out_channels, 1, bias=False), + # nn.BatchNorm2d(out_channels), + nn.ReLU() + # nn.Dropout(0.5) + ) + + def forward(self, x): + res = [] + for conv in self.convs: + res.append(conv(x)) + res = torch.cat(res, dim=1) + return self.project(res) + + +# copied from +# https://github.com/AlexHex7/Non-local_pytorch/blob/master/lib/non_local_embedded_gaussian.py +# See https://arxiv.org/abs/1711.07971 for details +class _NonLocalBlockND(nn.Module): + def __init__( + self, in_channels, inter_channels=None, dimension=3, sub_sample=True, bn_layer=True + ): + super(_NonLocalBlockND, self).__init__() + + assert dimension in [1, 2, 3] + + self.dimension = dimension + self.sub_sample = sub_sample + + self.in_channels = in_channels + self.inter_channels = inter_channels + + if self.inter_channels is None: + self.inter_channels = in_channels // 2 + if self.inter_channels == 0: + self.inter_channels = 1 + + if dimension == 3: + conv_nd = nn.Conv3d + max_pool_layer = nn.MaxPool3d(kernel_size=(1, 2, 2)) + bn = nn.GroupNorm # (32, hidden_dim) #nn.BatchNorm3d + elif dimension == 2: + conv_nd = nn.Conv2d + max_pool_layer = nn.MaxPool2d(kernel_size=(2, 2)) + bn = nn.GroupNorm # (32, hidden_dim)nn.BatchNorm2d + else: + conv_nd = nn.Conv1d + max_pool_layer = nn.MaxPool1d(kernel_size=2) + bn = nn.GroupNorm # (32, hidden_dim)nn.BatchNorm1d + + self.g = conv_nd( + in_channels=self.in_channels, + out_channels=self.inter_channels, + kernel_size=1, + stride=1, + padding=0, + ) + + if bn_layer: + self.W = nn.Sequential( + conv_nd( + in_channels=self.inter_channels, + out_channels=self.in_channels, + kernel_size=1, + stride=1, + padding=0, + ), + bn(32, self.in_channels), + ) + nn.init.constant_(self.W[1].weight, 0) + nn.init.constant_(self.W[1].bias, 0) + else: + self.W = conv_nd( + in_channels=self.inter_channels, + out_channels=self.in_channels, + kernel_size=1, + stride=1, + padding=0, + ) + nn.init.constant_(self.W.weight, 0) + nn.init.constant_(self.W.bias, 0) + + self.theta = conv_nd( + in_channels=self.in_channels, + out_channels=self.inter_channels, + kernel_size=1, + stride=1, + padding=0, + ) + self.phi = conv_nd( + in_channels=self.in_channels, + out_channels=self.inter_channels, + kernel_size=1, + stride=1, + padding=0, + ) + + if sub_sample: + self.g = nn.Sequential(self.g, max_pool_layer) + self.phi = nn.Sequential(self.phi, max_pool_layer) + + def forward(self, x): + """ + :param x: (b, c, t, h, w) + :return: + """ + + batch_size = x.size(0) + + g_x = self.g(x).view(batch_size, self.inter_channels, -1) + g_x = g_x.permute(0, 2, 1) + + theta_x = self.theta(x).view(batch_size, self.inter_channels, -1) + theta_x = theta_x.permute(0, 2, 1) + phi_x = self.phi(x).view(batch_size, self.inter_channels, -1) + f = torch.matmul(theta_x, phi_x) + f_div_C = F.softmax(f, dim=-1) + + y = torch.matmul(f_div_C, g_x) + y = y.permute(0, 2, 1).contiguous() + y = y.view(batch_size, self.inter_channels, *x.size()[2:]) + W_y = self.W(y) + z = W_y + x + + return z + + +class NONLocalBlock2D(_NonLocalBlockND): + def __init__(self, in_channels, inter_channels=None, sub_sample=True, bn_layer=True): + super(NONLocalBlock2D, self).__init__( + in_channels, + inter_channels=inter_channels, + dimension=2, + sub_sample=sub_sample, + bn_layer=bn_layer, + ) + + +@ROI_DENSEPOSE_HEAD_REGISTRY.register() +class DensePoseV1ConvXHead(nn.Module): + def __init__(self, cfg, input_channels): + super(DensePoseV1ConvXHead, self).__init__() + # fmt: off + hidden_dim = cfg.MODEL.ROI_DENSEPOSE_HEAD.CONV_HEAD_DIM + kernel_size = cfg.MODEL.ROI_DENSEPOSE_HEAD.CONV_HEAD_KERNEL + self.n_stacked_convs = cfg.MODEL.ROI_DENSEPOSE_HEAD.NUM_STACKED_CONVS + # fmt: on + pad_size = kernel_size // 2 + n_channels = input_channels + for i in range(self.n_stacked_convs): + layer = Conv2d(n_channels, hidden_dim, kernel_size, stride=1, padding=pad_size) + layer_name = self._get_layer_name(i) + self.add_module(layer_name, layer) + n_channels = hidden_dim + self.n_out_channels = n_channels + initialize_module_params(self) + + def forward(self, features): + x = features + output = x + for i in range(self.n_stacked_convs): + layer_name = self._get_layer_name(i) + x = getattr(self, layer_name)(x) + x = F.relu(x) + output = x + return output + + def _get_layer_name(self, i): + layer_name = "body_conv_fcn{}".format(i + 1) + return layer_name + + +class DensePosePredictor(nn.Module): + def __init__(self, cfg, input_channels): + + super(DensePosePredictor, self).__init__() + dim_in = input_channels + n_segm_chan = cfg.MODEL.ROI_DENSEPOSE_HEAD.NUM_COARSE_SEGM_CHANNELS + dim_out_patches = cfg.MODEL.ROI_DENSEPOSE_HEAD.NUM_PATCHES + 1 + kernel_size = cfg.MODEL.ROI_DENSEPOSE_HEAD.DECONV_KERNEL + self.ann_index_lowres = ConvTranspose2d( + dim_in, n_segm_chan, kernel_size, stride=2, padding=int(kernel_size / 2 - 1) + ) + self.index_uv_lowres = ConvTranspose2d( + dim_in, dim_out_patches, kernel_size, stride=2, padding=int(kernel_size / 2 - 1) + ) + self.u_lowres = ConvTranspose2d( + dim_in, dim_out_patches, kernel_size, stride=2, padding=int(kernel_size / 2 - 1) + ) + self.v_lowres = ConvTranspose2d( + dim_in, dim_out_patches, kernel_size, stride=2, padding=int(kernel_size / 2 - 1) + ) + self.scale_factor = cfg.MODEL.ROI_DENSEPOSE_HEAD.UP_SCALE + self.confidence_model_cfg = DensePoseConfidenceModelConfig.from_cfg(cfg) + self._initialize_confidence_estimation_layers(cfg, self.confidence_model_cfg, dim_in) + initialize_module_params(self) + + def forward(self, head_outputs): + ann_index_lowres = self.ann_index_lowres(head_outputs) + index_uv_lowres = self.index_uv_lowres(head_outputs) + u_lowres = self.u_lowres(head_outputs) + v_lowres = self.v_lowres(head_outputs) + + def interp2d(input): + return interpolate( + input, scale_factor=self.scale_factor, mode="bilinear", align_corners=False + ) + + ann_index = interp2d(ann_index_lowres) + index_uv = interp2d(index_uv_lowres) + u = interp2d(u_lowres) + v = interp2d(v_lowres) + ( + (sigma_1, sigma_2, kappa_u, kappa_v), + (sigma_1_lowres, sigma_2_lowres, kappa_u_lowres, kappa_v_lowres), + (ann_index, index_uv), + ) = self._forward_confidence_estimation_layers( + self.confidence_model_cfg, head_outputs, interp2d, ann_index, index_uv + ) + return ( + (ann_index, index_uv, u, v), + (ann_index_lowres, index_uv_lowres, u_lowres, v_lowres), + (sigma_1, sigma_2, kappa_u, kappa_v), + (sigma_1_lowres, sigma_2_lowres, kappa_u_lowres, kappa_v_lowres), + ) + + def _initialize_confidence_estimation_layers( + self, cfg: CfgNode, confidence_model_cfg: DensePoseConfidenceModelConfig, dim_in: int + ): + dim_out_patches = cfg.MODEL.ROI_DENSEPOSE_HEAD.NUM_PATCHES + 1 + kernel_size = cfg.MODEL.ROI_DENSEPOSE_HEAD.DECONV_KERNEL + if confidence_model_cfg.uv_confidence.enabled: + if confidence_model_cfg.uv_confidence.type == DensePoseUVConfidenceType.IID_ISO: + self.sigma_2_lowres = ConvTranspose2d( + dim_in, dim_out_patches, kernel_size, stride=2, padding=int(kernel_size / 2 - 1) + ) + elif confidence_model_cfg.uv_confidence.type == DensePoseUVConfidenceType.INDEP_ANISO: + self.sigma_2_lowres = ConvTranspose2d( + dim_in, dim_out_patches, kernel_size, stride=2, padding=int(kernel_size / 2 - 1) + ) + self.kappa_u_lowres = ConvTranspose2d( + dim_in, dim_out_patches, kernel_size, stride=2, padding=int(kernel_size / 2 - 1) + ) + self.kappa_v_lowres = ConvTranspose2d( + dim_in, dim_out_patches, kernel_size, stride=2, padding=int(kernel_size / 2 - 1) + ) + else: + raise ValueError( + f"Unknown confidence model type: {confidence_model_cfg.confidence_model_type}" + ) + + def _forward_confidence_estimation_layers( + self, confidence_model_cfg, head_outputs, interp2d, ann_index, index_uv + ): + sigma_1, sigma_2, kappa_u, kappa_v = None, None, None, None + sigma_1_lowres, sigma_2_lowres, kappa_u_lowres, kappa_v_lowres = None, None, None, None + if confidence_model_cfg.uv_confidence.enabled: + if confidence_model_cfg.uv_confidence.type == DensePoseUVConfidenceType.IID_ISO: + sigma_2_lowres = self.sigma_2_lowres(head_outputs) + sigma_2 = interp2d(sigma_2_lowres) + elif confidence_model_cfg.uv_confidence.type == DensePoseUVConfidenceType.INDEP_ANISO: + sigma_2_lowres = self.sigma_2_lowres(head_outputs) + kappa_u_lowres = self.kappa_u_lowres(head_outputs) + kappa_v_lowres = self.kappa_v_lowres(head_outputs) + sigma_2 = interp2d(sigma_2_lowres) + kappa_u = interp2d(kappa_u_lowres) + kappa_v = interp2d(kappa_v_lowres) + else: + raise ValueError( + f"Unknown confidence model type: {confidence_model_cfg.confidence_model_type}" + ) + return ( + (sigma_1, sigma_2, kappa_u, kappa_v), + (sigma_1_lowres, sigma_2_lowres, kappa_u_lowres, kappa_v_lowres), + (ann_index, index_uv), + ) + + +class DensePoseDataFilter(object): + def __init__(self, cfg): + self.iou_threshold = cfg.MODEL.ROI_DENSEPOSE_HEAD.FG_IOU_THRESHOLD + + @torch.no_grad() + def __call__(self, proposals_with_targets): + """ + Filters proposals with targets to keep only the ones relevant for + DensePose training + proposals: list(Instances), each element of the list corresponds to + various instances (proposals, GT for boxes and densepose) for one + image + """ + proposals_filtered = [] + for proposals_per_image in proposals_with_targets: + if not hasattr(proposals_per_image, "gt_densepose"): + continue + assert hasattr(proposals_per_image, "gt_boxes") + assert hasattr(proposals_per_image, "proposal_boxes") + gt_boxes = proposals_per_image.gt_boxes + est_boxes = proposals_per_image.proposal_boxes + # apply match threshold for densepose head + iou = matched_boxlist_iou(gt_boxes, est_boxes) + iou_select = iou > self.iou_threshold + proposals_per_image = proposals_per_image[iou_select] + assert len(proposals_per_image.gt_boxes) == len(proposals_per_image.proposal_boxes) + # filter out any target without densepose annotation + gt_densepose = proposals_per_image.gt_densepose + assert len(proposals_per_image.gt_boxes) == len(proposals_per_image.gt_densepose) + selected_indices = [ + i for i, dp_target in enumerate(gt_densepose) if dp_target is not None + ] + if len(selected_indices) != len(gt_densepose): + proposals_per_image = proposals_per_image[selected_indices] + assert len(proposals_per_image.gt_boxes) == len(proposals_per_image.proposal_boxes) + assert len(proposals_per_image.gt_boxes) == len(proposals_per_image.gt_densepose) + proposals_filtered.append(proposals_per_image) + return proposals_filtered + + +def build_densepose_head(cfg, input_channels): + head_name = cfg.MODEL.ROI_DENSEPOSE_HEAD.NAME + return ROI_DENSEPOSE_HEAD_REGISTRY.get(head_name)(cfg, input_channels) + + +def build_densepose_predictor(cfg, input_channels): + predictor = DensePosePredictor(cfg, input_channels) + return predictor + + +def build_densepose_data_filter(cfg): + dp_filter = DensePoseDataFilter(cfg) + return dp_filter + + +def densepose_inference(densepose_outputs, densepose_confidences, detections): + """ + Infer dense pose estimate based on outputs from the DensePose head + and detections. The estimate for each detection instance is stored in its + "pred_densepose" attribute. + + Args: + densepose_outputs (tuple(`torch.Tensor`)): iterable containing 4 elements: + - s (:obj: `torch.Tensor`): coarse segmentation tensor of size (N, A, H, W), + - i (:obj: `torch.Tensor`): fine segmentation tensor of size (N, C, H, W), + - u (:obj: `torch.Tensor`): U coordinates for each class of size (N, C, H, W), + - v (:obj: `torch.Tensor`): V coordinates for each class of size (N, C, H, W), + where N is the total number of detections in a batch, + A is the number of coarse segmentations labels + (e.g. 15 for coarse body parts + background), + C is the number of fine segmentation labels + (e.g. 25 for fine body parts + background), + W is the resolution along the X axis + H is the resolution along the Y axis + densepose_confidences (tuple(`torch.Tensor`)): iterable containing 4 elements: + - sigma_1 (:obj: `torch.Tensor`): global confidences for UV coordinates + of size (N, C, H, W) + - sigma_2 (:obj: `torch.Tensor`): individual confidences for UV coordinates + of size (N, C, H, W) + - kappa_u (:obj: `torch.Tensor`): first component of confidence direction + vector of size (N, C, H, W) + - kappa_v (:obj: `torch.Tensor`): second component of confidence direction + vector of size (N, C, H, W) + detections (list[Instances]): A list of N Instances, where N is the number of images + in the batch. Instances are modified by this method: "pred_densepose" attribute + is added to each instance, the attribute contains the corresponding + DensePoseOutput object. + """ + # DensePose outputs: segmentation, body part indices, U, V + s, index_uv, u, v = densepose_outputs + sigma_1, sigma_2, kappa_u, kappa_v = densepose_confidences + k = 0 + for detection in detections: + n_i = len(detection) + s_i = s[k : k + n_i] + index_uv_i = index_uv[k : k + n_i] + u_i = u[k : k + n_i] + v_i = v[k : k + n_i] + _local_vars = locals() + confidences = { + name: _local_vars[name] + for name in ("sigma_1", "sigma_2", "kappa_u", "kappa_v") + if _local_vars.get(name) is not None + } + densepose_output_i = DensePoseOutput(s_i, index_uv_i, u_i, v_i, confidences) + detection.pred_densepose = densepose_output_i + k += n_i + + +def _linear_interpolation_utilities(v_norm, v0_src, size_src, v0_dst, size_dst, size_z): + """ + Computes utility values for linear interpolation at points v. + The points are given as normalized offsets in the source interval + (v0_src, v0_src + size_src), more precisely: + v = v0_src + v_norm * size_src / 256.0 + The computed utilities include lower points v_lo, upper points v_hi, + interpolation weights v_w and flags j_valid indicating whether the + points falls into the destination interval (v0_dst, v0_dst + size_dst). + + Args: + v_norm (:obj: `torch.Tensor`): tensor of size N containing + normalized point offsets + v0_src (:obj: `torch.Tensor`): tensor of size N containing + left bounds of source intervals for normalized points + size_src (:obj: `torch.Tensor`): tensor of size N containing + source interval sizes for normalized points + v0_dst (:obj: `torch.Tensor`): tensor of size N containing + left bounds of destination intervals + size_dst (:obj: `torch.Tensor`): tensor of size N containing + destination interval sizes + size_z (int): interval size for data to be interpolated + + Returns: + v_lo (:obj: `torch.Tensor`): int tensor of size N containing + indices of lower values used for interpolation, all values are + integers from [0, size_z - 1] + v_hi (:obj: `torch.Tensor`): int tensor of size N containing + indices of upper values used for interpolation, all values are + integers from [0, size_z - 1] + v_w (:obj: `torch.Tensor`): float tensor of size N containing + interpolation weights + j_valid (:obj: `torch.Tensor`): uint8 tensor of size N containing + 0 for points outside the estimation interval + (v0_est, v0_est + size_est) and 1 otherwise + """ + v = v0_src + v_norm * size_src / 256.0 + j_valid = (v - v0_dst >= 0) * (v - v0_dst < size_dst) + v_grid = (v - v0_dst) * size_z / size_dst + v_lo = v_grid.floor().long().clamp(min=0, max=size_z - 1) + v_hi = (v_lo + 1).clamp(max=size_z - 1) + v_grid = torch.min(v_hi.float(), v_grid) + v_w = v_grid - v_lo.float() + return v_lo, v_hi, v_w, j_valid + + +def _grid_sampling_utilities( + zh, zw, bbox_xywh_est, bbox_xywh_gt, index_gt, x_norm, y_norm, index_bbox +): + """ + Prepare tensors used in grid sampling. + + Args: + z_est (:obj: `torch.Tensor`): tensor of size (N,C,H,W) with estimated + values of Z to be extracted for the points X, Y and channel + indices I + bbox_xywh_est (:obj: `torch.Tensor`): tensor of size (N, 4) containing + estimated bounding boxes in format XYWH + bbox_xywh_gt (:obj: `torch.Tensor`): tensor of size (N, 4) containing + matched ground truth bounding boxes in format XYWH + index_gt (:obj: `torch.Tensor`): tensor of size K with point labels for + ground truth points + x_norm (:obj: `torch.Tensor`): tensor of size K with X normalized + coordinates of ground truth points. Image X coordinates can be + obtained as X = Xbbox + x_norm * Wbbox / 255 + y_norm (:obj: `torch.Tensor`): tensor of size K with Y normalized + coordinates of ground truth points. Image Y coordinates can be + obtained as Y = Ybbox + y_norm * Hbbox / 255 + index_bbox (:obj: `torch.Tensor`): tensor of size K with bounding box + indices for each ground truth point. The values are thus in + [0, N-1] + + Returns: + j_valid (:obj: `torch.Tensor`): uint8 tensor of size M containing + 0 for points to be discarded and 1 for points to be selected + y_lo (:obj: `torch.Tensor`): int tensor of indices of upper values + in z_est for each point + y_hi (:obj: `torch.Tensor`): int tensor of indices of lower values + in z_est for each point + x_lo (:obj: `torch.Tensor`): int tensor of indices of left values + in z_est for each point + x_hi (:obj: `torch.Tensor`): int tensor of indices of right values + in z_est for each point + w_ylo_xlo (:obj: `torch.Tensor`): float tensor of size M; + contains upper-left value weight for each point + w_ylo_xhi (:obj: `torch.Tensor`): float tensor of size M; + contains upper-right value weight for each point + w_yhi_xlo (:obj: `torch.Tensor`): float tensor of size M; + contains lower-left value weight for each point + w_yhi_xhi (:obj: `torch.Tensor`): float tensor of size M; + contains lower-right value weight for each point + """ + + x0_gt, y0_gt, w_gt, h_gt = bbox_xywh_gt[index_bbox].unbind(dim=1) + x0_est, y0_est, w_est, h_est = bbox_xywh_est[index_bbox].unbind(dim=1) + x_lo, x_hi, x_w, jx_valid = _linear_interpolation_utilities( + x_norm, x0_gt, w_gt, x0_est, w_est, zw + ) + y_lo, y_hi, y_w, jy_valid = _linear_interpolation_utilities( + y_norm, y0_gt, h_gt, y0_est, h_est, zh + ) + j_valid = jx_valid * jy_valid + + w_ylo_xlo = (1.0 - x_w) * (1.0 - y_w) + w_ylo_xhi = x_w * (1.0 - y_w) + w_yhi_xlo = (1.0 - x_w) * y_w + w_yhi_xhi = x_w * y_w + + return j_valid, y_lo, y_hi, x_lo, x_hi, w_ylo_xlo, w_ylo_xhi, w_yhi_xlo, w_yhi_xhi + + +def _extract_at_points_packed( + z_est, + index_bbox_valid, + slice_index_uv, + y_lo, + y_hi, + x_lo, + x_hi, + w_ylo_xlo, + w_ylo_xhi, + w_yhi_xlo, + w_yhi_xhi, +): + """ + Extract ground truth values z_gt for valid point indices and estimated + values z_est using bilinear interpolation over top-left (y_lo, x_lo), + top-right (y_lo, x_hi), bottom-left (y_hi, x_lo) and bottom-right + (y_hi, x_hi) values in z_est with corresponding weights: + w_ylo_xlo, w_ylo_xhi, w_yhi_xlo and w_yhi_xhi. + Use slice_index_uv to slice dim=1 in z_est + """ + z_est_sampled = ( + z_est[index_bbox_valid, slice_index_uv, y_lo, x_lo] * w_ylo_xlo + + z_est[index_bbox_valid, slice_index_uv, y_lo, x_hi] * w_ylo_xhi + + z_est[index_bbox_valid, slice_index_uv, y_hi, x_lo] * w_yhi_xlo + + z_est[index_bbox_valid, slice_index_uv, y_hi, x_hi] * w_yhi_xhi + ) + return z_est_sampled + + +def _resample_data( + z, bbox_xywh_src, bbox_xywh_dst, wout, hout, mode="nearest", padding_mode="zeros" +): + """ + Args: + z (:obj: `torch.Tensor`): tensor of size (N,C,H,W) with data to be + resampled + bbox_xywh_src (:obj: `torch.Tensor`): tensor of size (N,4) containing + source bounding boxes in format XYWH + bbox_xywh_dst (:obj: `torch.Tensor`): tensor of size (N,4) containing + destination bounding boxes in format XYWH + Return: + zresampled (:obj: `torch.Tensor`): tensor of size (N, C, Hout, Wout) + with resampled values of z, where D is the discretization size + """ + n = bbox_xywh_src.size(0) + assert n == bbox_xywh_dst.size(0), ( + "The number of " + "source ROIs for resampling ({}) should be equal to the number " + "of destination ROIs ({})".format(bbox_xywh_src.size(0), bbox_xywh_dst.size(0)) + ) + x0src, y0src, wsrc, hsrc = bbox_xywh_src.unbind(dim=1) + x0dst, y0dst, wdst, hdst = bbox_xywh_dst.unbind(dim=1) + x0dst_norm = 2 * (x0dst - x0src) / wsrc - 1 + y0dst_norm = 2 * (y0dst - y0src) / hsrc - 1 + x1dst_norm = 2 * (x0dst + wdst - x0src) / wsrc - 1 + y1dst_norm = 2 * (y0dst + hdst - y0src) / hsrc - 1 + grid_w = torch.arange(wout, device=z.device, dtype=torch.float) / wout + grid_h = torch.arange(hout, device=z.device, dtype=torch.float) / hout + grid_w_expanded = grid_w[None, None, :].expand(n, hout, wout) + grid_h_expanded = grid_h[None, :, None].expand(n, hout, wout) + dx_expanded = (x1dst_norm - x0dst_norm)[:, None, None].expand(n, hout, wout) + dy_expanded = (y1dst_norm - y0dst_norm)[:, None, None].expand(n, hout, wout) + x0_expanded = x0dst_norm[:, None, None].expand(n, hout, wout) + y0_expanded = y0dst_norm[:, None, None].expand(n, hout, wout) + grid_x = grid_w_expanded * dx_expanded + x0_expanded + grid_y = grid_h_expanded * dy_expanded + y0_expanded + grid = torch.stack((grid_x, grid_y), dim=3) + # resample Z from (N, C, H, W) into (N, C, Hout, Wout) + zresampled = F.grid_sample(z, grid, mode=mode, padding_mode=padding_mode, align_corners=True) + return zresampled + + +def _extract_single_tensors_from_matches_one_image( + proposals_targets, bbox_with_dp_offset, bbox_global_offset +): + i_gt_all = [] + x_norm_all = [] + y_norm_all = [] + u_gt_all = [] + v_gt_all = [] + s_gt_all = [] + bbox_xywh_gt_all = [] + bbox_xywh_est_all = [] + # Ibbox_all == k should be true for all data that corresponds + # to bbox_xywh_gt[k] and bbox_xywh_est[k] + # index k here is global wrt images + i_bbox_all = [] + # at offset k (k is global) contains index of bounding box data + # within densepose output tensor + i_with_dp = [] + + boxes_xywh_est = proposals_targets.proposal_boxes.clone() + boxes_xywh_gt = proposals_targets.gt_boxes.clone() + n_i = len(boxes_xywh_est) + assert n_i == len(boxes_xywh_gt) + + if n_i: + boxes_xywh_est.tensor[:, 2] -= boxes_xywh_est.tensor[:, 0] + boxes_xywh_est.tensor[:, 3] -= boxes_xywh_est.tensor[:, 1] + boxes_xywh_gt.tensor[:, 2] -= boxes_xywh_gt.tensor[:, 0] + boxes_xywh_gt.tensor[:, 3] -= boxes_xywh_gt.tensor[:, 1] + if hasattr(proposals_targets, "gt_densepose"): + densepose_gt = proposals_targets.gt_densepose + for k, box_xywh_est, box_xywh_gt, dp_gt in zip( + range(n_i), boxes_xywh_est.tensor, boxes_xywh_gt.tensor, densepose_gt + ): + if (dp_gt is not None) and (len(dp_gt.x) > 0): + i_gt_all.append(dp_gt.i) + x_norm_all.append(dp_gt.x) + y_norm_all.append(dp_gt.y) + u_gt_all.append(dp_gt.u) + v_gt_all.append(dp_gt.v) + s_gt_all.append(dp_gt.segm.unsqueeze(0)) + bbox_xywh_gt_all.append(box_xywh_gt.view(-1, 4)) + bbox_xywh_est_all.append(box_xywh_est.view(-1, 4)) + i_bbox_k = torch.full_like(dp_gt.i, bbox_with_dp_offset + len(i_with_dp)) + i_bbox_all.append(i_bbox_k) + i_with_dp.append(bbox_global_offset + k) + return ( + i_gt_all, + x_norm_all, + y_norm_all, + u_gt_all, + v_gt_all, + s_gt_all, + bbox_xywh_gt_all, + bbox_xywh_est_all, + i_bbox_all, + i_with_dp, + ) + + +def _extract_single_tensors_from_matches(proposals_with_targets): + i_img = [] + i_gt_all = [] + x_norm_all = [] + y_norm_all = [] + u_gt_all = [] + v_gt_all = [] + s_gt_all = [] + bbox_xywh_gt_all = [] + bbox_xywh_est_all = [] + i_bbox_all = [] + i_with_dp_all = [] + n = 0 + for i, proposals_targets_per_image in enumerate(proposals_with_targets): + n_i = proposals_targets_per_image.proposal_boxes.tensor.size(0) + if not n_i: + continue + ( + i_gt_img, + x_norm_img, + y_norm_img, + u_gt_img, + v_gt_img, + s_gt_img, + bbox_xywh_gt_img, + bbox_xywh_est_img, + i_bbox_img, + i_with_dp_img, + ) = _extract_single_tensors_from_matches_one_image( # noqa + proposals_targets_per_image, len(i_with_dp_all), n + ) + i_gt_all.extend(i_gt_img) + x_norm_all.extend(x_norm_img) + y_norm_all.extend(y_norm_img) + u_gt_all.extend(u_gt_img) + v_gt_all.extend(v_gt_img) + s_gt_all.extend(s_gt_img) + bbox_xywh_gt_all.extend(bbox_xywh_gt_img) + bbox_xywh_est_all.extend(bbox_xywh_est_img) + i_bbox_all.extend(i_bbox_img) + i_with_dp_all.extend(i_with_dp_img) + i_img.extend([i] * len(i_with_dp_img)) + n += n_i + # concatenate all data into a single tensor + if (n > 0) and (len(i_with_dp_all) > 0): + i_gt = torch.cat(i_gt_all, 0).long() + x_norm = torch.cat(x_norm_all, 0) + y_norm = torch.cat(y_norm_all, 0) + u_gt = torch.cat(u_gt_all, 0) + v_gt = torch.cat(v_gt_all, 0) + s_gt = torch.cat(s_gt_all, 0) + bbox_xywh_gt = torch.cat(bbox_xywh_gt_all, 0) + bbox_xywh_est = torch.cat(bbox_xywh_est_all, 0) + i_bbox = torch.cat(i_bbox_all, 0).long() + else: + i_gt = None + x_norm = None + y_norm = None + u_gt = None + v_gt = None + s_gt = None + bbox_xywh_gt = None + bbox_xywh_est = None + i_bbox = None + return ( + i_img, + i_with_dp_all, + bbox_xywh_est, + bbox_xywh_gt, + i_gt, + x_norm, + y_norm, + u_gt, + v_gt, + s_gt, + i_bbox, + ) + + +class IIDIsotropicGaussianUVLoss(nn.Module): + """ + Loss for the case of iid residuals with isotropic covariance: + $Sigma_i = sigma_i^2 I$ + The loss (negative log likelihood) is then: + $1/2 sum_{i=1}^n (log(2 pi) + 2 log sigma_i^2 + ||delta_i||^2 / sigma_i^2)$, + where $delta_i=(u - u', v - v')$ is a 2D vector containing UV coordinates + difference between estimated and ground truth UV values + For details, see: + N. Neverova, D. Novotny, A. Vedaldi "Correlated Uncertainty for Learning + Dense Correspondences from Noisy Labels", p. 918--926, in Proc. NIPS 2019 + """ + + def __init__(self, sigma_lower_bound: float): + super(IIDIsotropicGaussianUVLoss, self).__init__() + self.sigma_lower_bound = sigma_lower_bound + self.log2pi = math.log(2 * math.pi) + + def forward( + self, + u: torch.Tensor, + v: torch.Tensor, + sigma_u: torch.Tensor, + target_u: torch.Tensor, + target_v: torch.Tensor, + ): + # compute $\sigma_i^2$ + # use sigma_lower_bound to avoid degenerate solution for variance + # (sigma -> 0) + sigma2 = F.softplus(sigma_u) + self.sigma_lower_bound + # compute \|delta_i\|^2 + delta_t_delta = (u - target_u) ** 2 + (v - target_v) ** 2 + # the total loss from the formula above: + loss = 0.5 * (self.log2pi + 2 * torch.log(sigma2) + delta_t_delta / sigma2) + return loss.sum() + + +class IndepAnisotropicGaussianUVLoss(nn.Module): + """ + Loss for the case of independent residuals with anisotropic covariances: + $Sigma_i = sigma_i^2 I + r_i r_i^T$ + The loss (negative log likelihood) is then: + $1/2 sum_{i=1}^n (log(2 pi) + + log sigma_i^2 (sigma_i^2 + ||r_i||^2) + + ||delta_i||^2 / sigma_i^2 + - ^2 / (sigma_i^2 * (sigma_i^2 + ||r_i||^2)))$, + where $delta_i=(u - u', v - v')$ is a 2D vector containing UV coordinates + difference between estimated and ground truth UV values + For details, see: + N. Neverova, D. Novotny, A. Vedaldi "Correlated Uncertainty for Learning + Dense Correspondences from Noisy Labels", p. 918--926, in Proc. NIPS 2019 + """ + + def __init__(self, sigma_lower_bound: float): + super(IndepAnisotropicGaussianUVLoss, self).__init__() + self.sigma_lower_bound = sigma_lower_bound + self.log2pi = math.log(2 * math.pi) + + def forward( + self, + u: torch.Tensor, + v: torch.Tensor, + sigma_u: torch.Tensor, + kappa_u_est: torch.Tensor, + kappa_v_est: torch.Tensor, + target_u: torch.Tensor, + target_v: torch.Tensor, + ): + # compute $\sigma_i^2$ + sigma2 = F.softplus(sigma_u) + self.sigma_lower_bound + # compute \|r_i\|^2 + r_sqnorm2 = kappa_u_est ** 2 + kappa_v_est ** 2 + delta_u = u - target_u + delta_v = v - target_v + # compute \|delta_i\|^2 + delta_sqnorm = delta_u ** 2 + delta_v ** 2 + delta_u_r_u = delta_u * kappa_u_est + delta_v_r_v = delta_v * kappa_v_est + # compute the scalar product + delta_r = delta_u_r_u + delta_v_r_v + # compute squared scalar product ^2 + delta_r_sqnorm = delta_r ** 2 + denom2 = sigma2 * (sigma2 + r_sqnorm2) + loss = 0.5 * ( + self.log2pi + torch.log(denom2) + delta_sqnorm / sigma2 - delta_r_sqnorm / denom2 + ) + return loss.sum() + + +class DensePoseLosses(object): + def __init__(self, cfg): + # fmt: off + self.heatmap_size = cfg.MODEL.ROI_DENSEPOSE_HEAD.HEATMAP_SIZE + self.w_points = cfg.MODEL.ROI_DENSEPOSE_HEAD.POINT_REGRESSION_WEIGHTS + self.w_part = cfg.MODEL.ROI_DENSEPOSE_HEAD.PART_WEIGHTS + self.w_segm = cfg.MODEL.ROI_DENSEPOSE_HEAD.INDEX_WEIGHTS + self.n_segm_chan = cfg.MODEL.ROI_DENSEPOSE_HEAD.NUM_COARSE_SEGM_CHANNELS + # fmt: on + self.confidence_model_cfg = DensePoseConfidenceModelConfig.from_cfg(cfg) + if self.confidence_model_cfg.uv_confidence.type == DensePoseUVConfidenceType.IID_ISO: + self.uv_loss_with_confidences = IIDIsotropicGaussianUVLoss( + self.confidence_model_cfg.uv_confidence.epsilon + ) + elif self.confidence_model_cfg.uv_confidence.type == DensePoseUVConfidenceType.INDEP_ANISO: + self.uv_loss_with_confidences = IndepAnisotropicGaussianUVLoss( + self.confidence_model_cfg.uv_confidence.epsilon + ) + + def __call__(self, proposals_with_gt, densepose_outputs, densepose_confidences): + losses = {} + # densepose outputs are computed for all images and all bounding boxes; + # i.e. if a batch has 4 images with (3, 1, 2, 1) proposals respectively, + # the outputs will have size(0) == 3+1+2+1 == 7 + s, index_uv, u, v = densepose_outputs + sigma_1, sigma_2, kappa_u, kappa_v = densepose_confidences + conf_type = self.confidence_model_cfg.uv_confidence.type + assert u.size(2) == v.size(2) + assert u.size(3) == v.size(3) + assert u.size(2) == index_uv.size(2) + assert u.size(3) == index_uv.size(3) + + with torch.no_grad(): + ( + index_uv_img, + i_with_dp, + bbox_xywh_est, + bbox_xywh_gt, + index_gt_all, + x_norm, + y_norm, + u_gt_all, + v_gt_all, + s_gt, + index_bbox, + ) = _extract_single_tensors_from_matches( # noqa + proposals_with_gt + ) + n_batch = len(i_with_dp) + + # NOTE: we need to keep the same computation graph on all the GPUs to + # perform reduction properly. Hence even if we have no data on one + # of the GPUs, we still need to generate the computation graph. + # Add fake (zero) loss in the form Tensor.sum() * 0 + if not n_batch: + losses["loss_densepose_I"] = index_uv.sum() * 0 + losses["loss_densepose_S"] = s.sum() * 0 + if self.confidence_model_cfg.uv_confidence.enabled: + losses["loss_densepose_UV"] = (u.sum() + v.sum()) * 0 + if conf_type == DensePoseUVConfidenceType.IID_ISO: + losses["loss_densepose_UV"] += sigma_2.sum() * 0 + elif conf_type == DensePoseUVConfidenceType.INDEP_ANISO: + losses["loss_densepose_UV"] += ( + sigma_2.sum() + kappa_u.sum() + kappa_v.sum() + ) * 0 + else: + losses["loss_densepose_U"] = u.sum() * 0 + losses["loss_densepose_V"] = v.sum() * 0 + return losses + + zh = u.size(2) + zw = u.size(3) + + ( + j_valid, + y_lo, + y_hi, + x_lo, + x_hi, + w_ylo_xlo, + w_ylo_xhi, + w_yhi_xlo, + w_yhi_xhi, + ) = _grid_sampling_utilities( # noqa + zh, zw, bbox_xywh_est, bbox_xywh_gt, index_gt_all, x_norm, y_norm, index_bbox + ) + + j_valid_fg = j_valid * (index_gt_all > 0) + + u_gt = u_gt_all[j_valid_fg] + u_est_all = _extract_at_points_packed( + u[i_with_dp], + index_bbox, + index_gt_all, + y_lo, + y_hi, + x_lo, + x_hi, + w_ylo_xlo, + w_ylo_xhi, + w_yhi_xlo, + w_yhi_xhi, + ) + u_est = u_est_all[j_valid_fg] + + v_gt = v_gt_all[j_valid_fg] + v_est_all = _extract_at_points_packed( + v[i_with_dp], + index_bbox, + index_gt_all, + y_lo, + y_hi, + x_lo, + x_hi, + w_ylo_xlo, + w_ylo_xhi, + w_yhi_xlo, + w_yhi_xhi, + ) + v_est = v_est_all[j_valid_fg] + + index_uv_gt = index_gt_all[j_valid] + index_uv_est_all = _extract_at_points_packed( + index_uv[i_with_dp], + index_bbox, + slice(None), + y_lo, + y_hi, + x_lo, + x_hi, + w_ylo_xlo[:, None], + w_ylo_xhi[:, None], + w_yhi_xlo[:, None], + w_yhi_xhi[:, None], + ) + index_uv_est = index_uv_est_all[j_valid, :] + + if self.confidence_model_cfg.uv_confidence.enabled: + sigma_2_est_all = _extract_at_points_packed( + sigma_2[i_with_dp], + index_bbox, + index_gt_all, + y_lo, + y_hi, + x_lo, + x_hi, + w_ylo_xlo, + w_ylo_xhi, + w_yhi_xlo, + w_yhi_xhi, + ) + sigma_2_est = sigma_2_est_all[j_valid_fg] + if conf_type in [DensePoseUVConfidenceType.INDEP_ANISO]: + kappa_u_est_all = _extract_at_points_packed( + kappa_u[i_with_dp], + index_bbox, + index_gt_all, + y_lo, + y_hi, + x_lo, + x_hi, + w_ylo_xlo, + w_ylo_xhi, + w_yhi_xlo, + w_yhi_xhi, + ) + kappa_u_est = kappa_u_est_all[j_valid_fg] + kappa_v_est_all = _extract_at_points_packed( + kappa_v[i_with_dp], + index_bbox, + index_gt_all, + y_lo, + y_hi, + x_lo, + x_hi, + w_ylo_xlo, + w_ylo_xhi, + w_yhi_xlo, + w_yhi_xhi, + ) + kappa_v_est = kappa_v_est_all[j_valid_fg] + + # Resample everything to the estimated data size, no need to resample + # S_est then: + s_est = s[i_with_dp] + with torch.no_grad(): + s_gt = _resample_data( + s_gt.unsqueeze(1), + bbox_xywh_gt, + bbox_xywh_est, + self.heatmap_size, + self.heatmap_size, + mode="nearest", + padding_mode="zeros", + ).squeeze(1) + + # add point-based losses: + if self.confidence_model_cfg.uv_confidence.enabled: + if conf_type == DensePoseUVConfidenceType.IID_ISO: + uv_loss = ( + self.uv_loss_with_confidences(u_est, v_est, sigma_2_est, u_gt, v_gt) + * self.w_points + ) + losses["loss_densepose_UV"] = uv_loss + elif conf_type == DensePoseUVConfidenceType.INDEP_ANISO: + uv_loss = ( + self.uv_loss_with_confidences( + u_est, v_est, sigma_2_est, kappa_u_est, kappa_v_est, u_gt, v_gt + ) + * self.w_points + ) + losses["loss_densepose_UV"] = uv_loss + else: + raise ValueError(f"Unknown confidence model type: {conf_type}") + else: + u_loss = F.smooth_l1_loss(u_est, u_gt, reduction="sum") * self.w_points + losses["loss_densepose_U"] = u_loss + v_loss = F.smooth_l1_loss(v_est, v_gt, reduction="sum") * self.w_points + losses["loss_densepose_V"] = v_loss + index_uv_loss = F.cross_entropy(index_uv_est, index_uv_gt.long()) * self.w_part + losses["loss_densepose_I"] = index_uv_loss + + if self.n_segm_chan == 2: + s_gt = s_gt > 0 + s_loss = F.cross_entropy(s_est, s_gt.long()) * self.w_segm + losses["loss_densepose_S"] = s_loss + return losses + + +def build_densepose_losses(cfg): + losses = DensePoseLosses(cfg) + return losses diff --git a/preprocess/mhp_extension/detectron2/projects/DensePose/densepose/evaluator.py b/preprocess/mhp_extension/detectron2/projects/DensePose/densepose/evaluator.py new file mode 100644 index 0000000000000000000000000000000000000000..3bb002b5093365f12edf5f4610ab261491d12bc8 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/DensePose/densepose/evaluator.py @@ -0,0 +1,158 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +import contextlib +import copy +import io +import itertools +import json +import logging +import os +from collections import OrderedDict +import torch +from fvcore.common.file_io import PathManager +from pycocotools.coco import COCO + +from detectron2.data import MetadataCatalog +from detectron2.evaluation import DatasetEvaluator +from detectron2.structures import BoxMode +from detectron2.utils.comm import all_gather, is_main_process, synchronize +from detectron2.utils.logger import create_small_table + +from .densepose_coco_evaluation import DensePoseCocoEval, DensePoseEvalMode + + +class DensePoseCOCOEvaluator(DatasetEvaluator): + def __init__(self, dataset_name, distributed, output_dir=None): + self._distributed = distributed + self._output_dir = output_dir + + self._cpu_device = torch.device("cpu") + self._logger = logging.getLogger(__name__) + + self._metadata = MetadataCatalog.get(dataset_name) + json_file = PathManager.get_local_path(self._metadata.json_file) + with contextlib.redirect_stdout(io.StringIO()): + self._coco_api = COCO(json_file) + + def reset(self): + self._predictions = [] + + def process(self, inputs, outputs): + """ + Args: + inputs: the inputs to a COCO model (e.g., GeneralizedRCNN). + It is a list of dict. Each dict corresponds to an image and + contains keys like "height", "width", "file_name", "image_id". + outputs: the outputs of a COCO model. It is a list of dicts with key + "instances" that contains :class:`Instances`. + The :class:`Instances` object needs to have `densepose` field. + """ + for input, output in zip(inputs, outputs): + instances = output["instances"].to(self._cpu_device) + + boxes = instances.pred_boxes.tensor.clone() + boxes = BoxMode.convert(boxes, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS) + instances.pred_densepose = instances.pred_densepose.to_result(boxes) + + json_results = prediction_to_json(instances, input["image_id"]) + self._predictions.extend(json_results) + + def evaluate(self): + if self._distributed: + synchronize() + predictions = all_gather(self._predictions) + predictions = list(itertools.chain(*predictions)) + if not is_main_process(): + return + else: + predictions = self._predictions + + return copy.deepcopy(self._eval_predictions(predictions)) + + def _eval_predictions(self, predictions): + """ + Evaluate predictions on densepose. + Return results with the metrics of the tasks. + """ + self._logger.info("Preparing results for COCO format ...") + + if self._output_dir: + file_path = os.path.join(self._output_dir, "coco_densepose_results.json") + with open(file_path, "w") as f: + json.dump(predictions, f) + f.flush() + os.fsync(f.fileno()) + + self._logger.info("Evaluating predictions ...") + res = OrderedDict() + results_gps, results_gpsm = _evaluate_predictions_on_coco(self._coco_api, predictions) + res["densepose_gps"] = results_gps + res["densepose_gpsm"] = results_gpsm + return res + + +def prediction_to_json(instances, img_id): + """ + Args: + instances (Instances): the output of the model + img_id (str): the image id in COCO + + Returns: + list[dict]: the results in densepose evaluation format + """ + scores = instances.scores.tolist() + + results = [] + for k in range(len(instances)): + densepose = instances.pred_densepose[k] + result = { + "image_id": img_id, + "category_id": 1, # densepose only has one class + "bbox": densepose[1], + "score": scores[k], + "densepose": densepose, + } + results.append(result) + return results + + +def _evaluate_predictions_on_coco(coco_gt, coco_results): + metrics = ["AP", "AP50", "AP75", "APm", "APl"] + + logger = logging.getLogger(__name__) + + if len(coco_results) == 0: # cocoapi does not handle empty results very well + logger.warn("No predictions from the model! Set scores to -1") + results_gps = {metric: -1 for metric in metrics} + results_gpsm = {metric: -1 for metric in metrics} + return results_gps, results_gpsm + + coco_dt = coco_gt.loadRes(coco_results) + results_gps = _evaluate_predictions_on_coco_gps(coco_gt, coco_dt, metrics) + logger.info( + "Evaluation results for densepose, GPS metric: \n" + create_small_table(results_gps) + ) + results_gpsm = _evaluate_predictions_on_coco_gpsm(coco_gt, coco_dt, metrics) + logger.info( + "Evaluation results for densepose, GPSm metric: \n" + create_small_table(results_gpsm) + ) + return results_gps, results_gpsm + + +def _evaluate_predictions_on_coco_gps(coco_gt, coco_dt, metrics): + coco_eval = DensePoseCocoEval(coco_gt, coco_dt, "densepose", dpEvalMode=DensePoseEvalMode.GPS) + coco_eval.evaluate() + coco_eval.accumulate() + coco_eval.summarize() + results = {metric: float(coco_eval.stats[idx] * 100) for idx, metric in enumerate(metrics)} + return results + + +def _evaluate_predictions_on_coco_gpsm(coco_gt, coco_dt, metrics): + coco_eval = DensePoseCocoEval(coco_gt, coco_dt, "densepose", dpEvalMode=DensePoseEvalMode.GPSM) + coco_eval.evaluate() + coco_eval.accumulate() + coco_eval.summarize() + results = {metric: float(coco_eval.stats[idx] * 100) for idx, metric in enumerate(metrics)} + return results diff --git a/preprocess/mhp_extension/detectron2/projects/DensePose/densepose/modeling/test_time_augmentation.py b/preprocess/mhp_extension/detectron2/projects/DensePose/densepose/modeling/test_time_augmentation.py new file mode 100644 index 0000000000000000000000000000000000000000..fcf69db1b6e4c687bc4e284e2795cab61ebf043f --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/DensePose/densepose/modeling/test_time_augmentation.py @@ -0,0 +1,75 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +from detectron2.modeling.test_time_augmentation import GeneralizedRCNNWithTTA + + +class DensePoseGeneralizedRCNNWithTTA(GeneralizedRCNNWithTTA): + def __init__(self, cfg, model, transform_data, tta_mapper=None, batch_size=1): + """ + Args: + cfg (CfgNode): + model (GeneralizedRCNN): a GeneralizedRCNN to apply TTA on. + transform_data (DensePoseTransformData): contains symmetry label + transforms used for horizontal flip + tta_mapper (callable): takes a dataset dict and returns a list of + augmented versions of the dataset dict. Defaults to + `DatasetMapperTTA(cfg)`. + batch_size (int): batch the augmented images into this batch size for inference. + """ + self._transform_data = transform_data + super().__init__(cfg=cfg, model=model, tta_mapper=tta_mapper, batch_size=batch_size) + + # the implementation follows closely the one from detectron2/modeling + def _inference_one_image(self, input): + """ + Args: + input (dict): one dataset dict + + Returns: + dict: one output dict + """ + + augmented_inputs, aug_vars = self._get_augmented_inputs(input) + # Detect boxes from all augmented versions + with self._turn_off_roi_heads(["mask_on", "keypoint_on", "densepose_on"]): + # temporarily disable roi heads + all_boxes, all_scores, all_classes = self._get_augmented_boxes( + augmented_inputs, aug_vars + ) + merged_instances = self._merge_detections( + all_boxes, all_scores, all_classes, (aug_vars["height"], aug_vars["width"]) + ) + + if self.cfg.MODEL.MASK_ON or self.cfg.MODEL.DENSEPOSE_ON: + # Use the detected boxes to obtain new fields + augmented_instances = self._rescale_detected_boxes( + augmented_inputs, merged_instances, aug_vars + ) + # run forward on the detected boxes + outputs = self._batch_inference( + augmented_inputs, augmented_instances, do_postprocess=False + ) + # Delete now useless variables to avoid being out of memory + del augmented_inputs, augmented_instances, merged_instances + # average the predictions + if self.cfg.MODEL.MASK_ON: + outputs[0].pred_masks = self._reduce_pred_masks(outputs, aug_vars) + if self.cfg.MODEL.DENSEPOSE_ON: + outputs[0].pred_densepose = self._reduce_pred_densepose(outputs, aug_vars) + # postprocess + output = self._detector_postprocess(outputs[0], aug_vars) + return {"instances": output} + else: + return {"instances": merged_instances} + + def _reduce_pred_densepose(self, outputs, aug_vars): + for idx, output in enumerate(outputs): + if aug_vars["do_hflip"][idx]: + output.pred_densepose.hflip(self._transform_data) + # Less memory-intensive averaging + for attr in "SIUV": + setattr( + outputs[0].pred_densepose, + attr, + sum(getattr(o.pred_densepose, attr) for o in outputs) / len(outputs), + ) + return outputs[0].pred_densepose diff --git a/preprocess/mhp_extension/detectron2/projects/DensePose/densepose/roi_head.py b/preprocess/mhp_extension/detectron2/projects/DensePose/densepose/roi_head.py new file mode 100644 index 0000000000000000000000000000000000000000..023119760b77cf5294ed18292e77e7f495099770 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/DensePose/densepose/roi_head.py @@ -0,0 +1,213 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +import numpy as np +from typing import Dict +import fvcore.nn.weight_init as weight_init +import torch +import torch.nn as nn +from torch.nn import functional as F + +from detectron2.layers import Conv2d, ShapeSpec, get_norm +from detectron2.modeling import ROI_HEADS_REGISTRY, StandardROIHeads +from detectron2.modeling.poolers import ROIPooler +from detectron2.modeling.roi_heads import select_foreground_proposals + +from .densepose_head import ( + build_densepose_data_filter, + build_densepose_head, + build_densepose_losses, + build_densepose_predictor, + densepose_inference, +) + + +class Decoder(nn.Module): + """ + A semantic segmentation head described in detail in the Panoptic Feature Pyramid Networks paper + (https://arxiv.org/abs/1901.02446). It takes FPN features as input and merges information from + all levels of the FPN into single output. + """ + + def __init__(self, cfg, input_shape: Dict[str, ShapeSpec], in_features): + super(Decoder, self).__init__() + + # fmt: off + self.in_features = in_features + feature_strides = {k: v.stride for k, v in input_shape.items()} + feature_channels = {k: v.channels for k, v in input_shape.items()} + num_classes = cfg.MODEL.ROI_DENSEPOSE_HEAD.DECODER_NUM_CLASSES + conv_dims = cfg.MODEL.ROI_DENSEPOSE_HEAD.DECODER_CONV_DIMS + self.common_stride = cfg.MODEL.ROI_DENSEPOSE_HEAD.DECODER_COMMON_STRIDE + norm = cfg.MODEL.ROI_DENSEPOSE_HEAD.DECODER_NORM + # fmt: on + + self.scale_heads = [] + for in_feature in self.in_features: + head_ops = [] + head_length = max( + 1, int(np.log2(feature_strides[in_feature]) - np.log2(self.common_stride)) + ) + for k in range(head_length): + conv = Conv2d( + feature_channels[in_feature] if k == 0 else conv_dims, + conv_dims, + kernel_size=3, + stride=1, + padding=1, + bias=not norm, + norm=get_norm(norm, conv_dims), + activation=F.relu, + ) + weight_init.c2_msra_fill(conv) + head_ops.append(conv) + if feature_strides[in_feature] != self.common_stride: + head_ops.append( + nn.Upsample(scale_factor=2, mode="bilinear", align_corners=False) + ) + self.scale_heads.append(nn.Sequential(*head_ops)) + self.add_module(in_feature, self.scale_heads[-1]) + self.predictor = Conv2d(conv_dims, num_classes, kernel_size=1, stride=1, padding=0) + weight_init.c2_msra_fill(self.predictor) + + def forward(self, features): + for i, _ in enumerate(self.in_features): + if i == 0: + x = self.scale_heads[i](features[i]) + else: + x = x + self.scale_heads[i](features[i]) + x = self.predictor(x) + return x + + +@ROI_HEADS_REGISTRY.register() +class DensePoseROIHeads(StandardROIHeads): + """ + A Standard ROIHeads which contains an addition of DensePose head. + """ + + def __init__(self, cfg, input_shape): + super().__init__(cfg, input_shape) + self._init_densepose_head(cfg, input_shape) + + def _init_densepose_head(self, cfg, input_shape): + # fmt: off + self.densepose_on = cfg.MODEL.DENSEPOSE_ON + if not self.densepose_on: + return + self.densepose_data_filter = build_densepose_data_filter(cfg) + dp_pooler_resolution = cfg.MODEL.ROI_DENSEPOSE_HEAD.POOLER_RESOLUTION + dp_pooler_sampling_ratio = cfg.MODEL.ROI_DENSEPOSE_HEAD.POOLER_SAMPLING_RATIO + dp_pooler_type = cfg.MODEL.ROI_DENSEPOSE_HEAD.POOLER_TYPE + self.use_decoder = cfg.MODEL.ROI_DENSEPOSE_HEAD.DECODER_ON + # fmt: on + if self.use_decoder: + dp_pooler_scales = (1.0 / input_shape[self.in_features[0]].stride,) + else: + dp_pooler_scales = tuple(1.0 / input_shape[k].stride for k in self.in_features) + in_channels = [input_shape[f].channels for f in self.in_features][0] + + if self.use_decoder: + self.decoder = Decoder(cfg, input_shape, self.in_features) + + self.densepose_pooler = ROIPooler( + output_size=dp_pooler_resolution, + scales=dp_pooler_scales, + sampling_ratio=dp_pooler_sampling_ratio, + pooler_type=dp_pooler_type, + ) + self.densepose_head = build_densepose_head(cfg, in_channels) + self.densepose_predictor = build_densepose_predictor( + cfg, self.densepose_head.n_out_channels + ) + self.densepose_losses = build_densepose_losses(cfg) + + def _forward_densepose(self, features, instances): + """ + Forward logic of the densepose prediction branch. + + Args: + features (list[Tensor]): #level input features for densepose prediction + instances (list[Instances]): the per-image instances to train/predict densepose. + In training, they can be the proposals. + In inference, they can be the predicted boxes. + + Returns: + In training, a dict of losses. + In inference, update `instances` with new fields "densepose" and return it. + """ + if not self.densepose_on: + return {} if self.training else instances + + features = [features[f] for f in self.in_features] + if self.training: + proposals, _ = select_foreground_proposals(instances, self.num_classes) + proposals_dp = self.densepose_data_filter(proposals) + if len(proposals_dp) > 0: + # NOTE may deadlock in DDP if certain workers have empty proposals_dp + proposal_boxes = [x.proposal_boxes for x in proposals_dp] + + if self.use_decoder: + features = [self.decoder(features)] + + features_dp = self.densepose_pooler(features, proposal_boxes) + densepose_head_outputs = self.densepose_head(features_dp) + densepose_outputs, _, confidences, _ = self.densepose_predictor( + densepose_head_outputs + ) + densepose_loss_dict = self.densepose_losses( + proposals_dp, densepose_outputs, confidences + ) + return densepose_loss_dict + else: + pred_boxes = [x.pred_boxes for x in instances] + + if self.use_decoder: + features = [self.decoder(features)] + + features_dp = self.densepose_pooler(features, pred_boxes) + if len(features_dp) > 0: + densepose_head_outputs = self.densepose_head(features_dp) + densepose_outputs, _, confidences, _ = self.densepose_predictor( + densepose_head_outputs + ) + else: + # If no detection occurred instances + # set densepose_outputs to empty tensors + empty_tensor = torch.zeros(size=(0, 0, 0, 0), device=features_dp.device) + densepose_outputs = tuple([empty_tensor] * 4) + confidences = tuple([empty_tensor] * 4) + + densepose_inference(densepose_outputs, confidences, instances) + return instances + + def forward(self, images, features, proposals, targets=None): + instances, losses = super().forward(images, features, proposals, targets) + del targets, images + + if self.training: + losses.update(self._forward_densepose(features, instances)) + return instances, losses + + def forward_with_given_boxes(self, features, instances): + """ + Use the given boxes in `instances` to produce other (non-box) per-ROI outputs. + + This is useful for downstream tasks where a box is known, but need to obtain + other attributes (outputs of other heads). + Test-time augmentation also uses this. + + Args: + features: same as in `forward()` + instances (list[Instances]): instances to predict other outputs. Expect the keys + "pred_boxes" and "pred_classes" to exist. + + Returns: + instances (list[Instances]): + the same `Instances` objects, with extra + fields such as `pred_masks` or `pred_keypoints`. + """ + + instances = super().forward_with_given_boxes(features, instances) + instances = self._forward_densepose(features, instances) + return instances diff --git a/preprocess/mhp_extension/detectron2/projects/DensePose/densepose/utils/dbhelper.py b/preprocess/mhp_extension/detectron2/projects/DensePose/densepose/utils/dbhelper.py new file mode 100644 index 0000000000000000000000000000000000000000..b28862cdede26c13200d928118d5bc5c00e3d2aa --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/DensePose/densepose/utils/dbhelper.py @@ -0,0 +1,145 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +from typing import Any, Dict, Optional, Tuple + + +class EntrySelector(object): + """ + Base class for entry selectors + """ + + @staticmethod + def from_string(spec: str) -> "EntrySelector": + if spec == "*": + return AllEntrySelector() + return FieldEntrySelector(spec) + + +class AllEntrySelector(EntrySelector): + """ + Selector that accepts all entries + """ + + SPECIFIER = "*" + + def __call__(self, entry): + return True + + +class FieldEntrySelector(EntrySelector): + """ + Selector that accepts only entries that match provided field + specifier(s). Only a limited set of specifiers is supported for now: + ::=[] + ::=[] + is a valid identifier + ::= "int" | "str" + ::= "=" + ::= "," + ::= ":" + ::= | + ::= + ::= "-" + is a string without spaces and special symbols + (e.g. , , , ) + """ + + _SPEC_DELIM = "," + _TYPE_DELIM = ":" + _RANGE_DELIM = "-" + _EQUAL = "=" + _ERROR_PREFIX = "Invalid field selector specifier" + + class _FieldEntryValuePredicate(object): + """ + Predicate that checks strict equality for the specified entry field + """ + + def __init__(self, name: str, typespec: str, value: str): + import builtins + + self.name = name + self.type = getattr(builtins, typespec) if typespec is not None else str + self.value = value + + def __call__(self, entry): + return entry[self.name] == self.type(self.value) + + class _FieldEntryRangePredicate(object): + """ + Predicate that checks whether an entry field falls into the specified range + """ + + def __init__(self, name: str, typespec: str, vmin: str, vmax: str): + import builtins + + self.name = name + self.type = getattr(builtins, typespec) if typespec is not None else str + self.vmin = vmin + self.vmax = vmax + + def __call__(self, entry): + return (entry[self.name] >= self.type(self.vmin)) and ( + entry[self.name] <= self.type(self.vmax) + ) + + def __init__(self, spec: str): + self._predicates = self._parse_specifier_into_predicates(spec) + + def __call__(self, entry: Dict[str, Any]): + for predicate in self._predicates: + if not predicate(entry): + return False + return True + + def _parse_specifier_into_predicates(self, spec: str): + predicates = [] + specs = spec.split(self._SPEC_DELIM) + for subspec in specs: + eq_idx = subspec.find(self._EQUAL) + if eq_idx > 0: + field_name_with_type = subspec[:eq_idx] + field_name, field_type = self._parse_field_name_type(field_name_with_type) + field_value_or_range = subspec[eq_idx + 1 :] + if self._is_range_spec(field_value_or_range): + vmin, vmax = self._get_range_spec(field_value_or_range) + predicate = FieldEntrySelector._FieldEntryRangePredicate( + field_name, field_type, vmin, vmax + ) + else: + predicate = FieldEntrySelector._FieldEntryValuePredicate( + field_name, field_type, field_value_or_range + ) + predicates.append(predicate) + elif eq_idx == 0: + self._parse_error(f'"{subspec}", field name is empty!') + else: + self._parse_error(f'"{subspec}", should have format ' "=!") + return predicates + + def _parse_field_name_type(self, field_name_with_type: str) -> Tuple[str, Optional[str]]: + type_delim_idx = field_name_with_type.find(self._TYPE_DELIM) + if type_delim_idx > 0: + field_name = field_name_with_type[:type_delim_idx] + field_type = field_name_with_type[type_delim_idx + 1 :] + elif type_delim_idx == 0: + self._parse_error(f'"{field_name_with_type}", field name is empty!') + else: + field_name = field_name_with_type + field_type = None + return field_name, field_type + + def _is_range_spec(self, field_value_or_range): + delim_idx = field_value_or_range.find(self._RANGE_DELIM) + return delim_idx > 0 + + def _get_range_spec(self, field_value_or_range): + if self._is_range_spec(field_value_or_range): + delim_idx = field_value_or_range.find(self._RANGE_DELIM) + vmin = field_value_or_range[:delim_idx] + vmax = field_value_or_range[delim_idx + 1 :] + return vmin, vmax + else: + self._parse_error('"field_value_or_range", range of values expected!') + + def _parse_error(self, msg): + raise ValueError(f"{self._ERROR_PREFIX}: {msg}") diff --git a/preprocess/mhp_extension/detectron2/projects/DensePose/densepose/utils/logger.py b/preprocess/mhp_extension/detectron2/projects/DensePose/densepose/utils/logger.py new file mode 100644 index 0000000000000000000000000000000000000000..e3fa45e0c0218bdd2e79c08b0d8ff83abc3e4308 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/DensePose/densepose/utils/logger.py @@ -0,0 +1,13 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import logging + + +def verbosity_to_level(verbosity): + if verbosity is not None: + if verbosity == 0: + return logging.WARNING + elif verbosity == 1: + return logging.INFO + elif verbosity >= 2: + return logging.DEBUG + return logging.WARNING diff --git a/preprocess/mhp_extension/detectron2/projects/DensePose/densepose/utils/transform.py b/preprocess/mhp_extension/detectron2/projects/DensePose/densepose/utils/transform.py new file mode 100644 index 0000000000000000000000000000000000000000..b7cfe097234dbd3ff19b84ecdfb63fd8bf5fd4b6 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/DensePose/densepose/utils/transform.py @@ -0,0 +1,16 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +from fvcore.common.file_io import PathManager + +from detectron2.data import MetadataCatalog + +from densepose import DensePoseTransformData + + +def load_for_dataset(dataset_name): + path = MetadataCatalog.get(dataset_name).densepose_transform_src + densepose_transform_data_fpath = PathManager.get_local_path(path) + return DensePoseTransformData.load(densepose_transform_data_fpath) + + +def load_from_cfg(cfg): + return load_for_dataset(cfg.DATASETS.TEST[0]) diff --git a/preprocess/mhp_extension/detectron2/projects/DensePose/densepose/vis/base.py b/preprocess/mhp_extension/detectron2/projects/DensePose/densepose/vis/base.py new file mode 100644 index 0000000000000000000000000000000000000000..2aa3e6e9f44ae2ce888f6e24dd11c8428734417b --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/DensePose/densepose/vis/base.py @@ -0,0 +1,191 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import logging +import numpy as np +import cv2 +import torch + +Image = np.ndarray +Boxes = torch.Tensor + + +class MatrixVisualizer(object): + """ + Base visualizer for matrix data + """ + + def __init__( + self, + inplace=True, + cmap=cv2.COLORMAP_PARULA, + val_scale=1.0, + alpha=0.7, + interp_method_matrix=cv2.INTER_LINEAR, + interp_method_mask=cv2.INTER_NEAREST, + ): + self.inplace = inplace + self.cmap = cmap + self.val_scale = val_scale + self.alpha = alpha + self.interp_method_matrix = interp_method_matrix + self.interp_method_mask = interp_method_mask + + def visualize(self, image_bgr, mask, matrix, bbox_xywh): + self._check_image(image_bgr) + self._check_mask_matrix(mask, matrix) + if self.inplace: + image_target_bgr = image_bgr + else: + image_target_bgr = image_bgr * 0 + x, y, w, h = [int(v) for v in bbox_xywh] + if w <= 0 or h <= 0: + return image_bgr + mask, matrix = self._resize(mask, matrix, w, h) + mask_bg = np.tile((mask == 0)[:, :, np.newaxis], [1, 1, 3]) + matrix_scaled = matrix.astype(np.float32) * self.val_scale + _EPSILON = 1e-6 + if np.any(matrix_scaled > 255 + _EPSILON): + logger = logging.getLogger(__name__) + logger.warning( + f"Matrix has values > {255 + _EPSILON} after " f"scaling, clipping to [0..255]" + ) + matrix_scaled_8u = matrix_scaled.clip(0, 255).astype(np.uint8) + matrix_vis = cv2.applyColorMap(matrix_scaled_8u, self.cmap) + matrix_vis[mask_bg] = image_target_bgr[y : y + h, x : x + w, :][mask_bg] + image_target_bgr[y : y + h, x : x + w, :] = ( + image_target_bgr[y : y + h, x : x + w, :] * (1.0 - self.alpha) + matrix_vis * self.alpha + ) + return image_target_bgr.astype(np.uint8) + + def _resize(self, mask, matrix, w, h): + if (w != mask.shape[1]) or (h != mask.shape[0]): + mask = cv2.resize(mask, (w, h), self.interp_method_mask) + if (w != matrix.shape[1]) or (h != matrix.shape[0]): + matrix = cv2.resize(matrix, (w, h), self.interp_method_matrix) + return mask, matrix + + def _check_image(self, image_rgb): + assert len(image_rgb.shape) == 3 + assert image_rgb.shape[2] == 3 + assert image_rgb.dtype == np.uint8 + + def _check_mask_matrix(self, mask, matrix): + assert len(matrix.shape) == 2 + assert len(mask.shape) == 2 + assert mask.dtype == np.uint8 + + +class RectangleVisualizer(object): + + _COLOR_GREEN = (18, 127, 15) + + def __init__(self, color=_COLOR_GREEN, thickness=1): + self.color = color + self.thickness = thickness + + def visualize(self, image_bgr, bbox_xywh, color=None, thickness=None): + x, y, w, h = bbox_xywh + color = color or self.color + thickness = thickness or self.thickness + cv2.rectangle(image_bgr, (int(x), int(y)), (int(x + w), int(y + h)), color, thickness) + return image_bgr + + +class PointsVisualizer(object): + + _COLOR_GREEN = (18, 127, 15) + + def __init__(self, color_bgr=_COLOR_GREEN, r=5): + self.color_bgr = color_bgr + self.r = r + + def visualize(self, image_bgr, pts_xy, colors_bgr=None, rs=None): + for j, pt_xy in enumerate(pts_xy): + x, y = pt_xy + color_bgr = colors_bgr[j] if colors_bgr is not None else self.color_bgr + r = rs[j] if rs is not None else self.r + cv2.circle(image_bgr, (x, y), r, color_bgr, -1) + return image_bgr + + +class TextVisualizer(object): + + _COLOR_GRAY = (218, 227, 218) + _COLOR_WHITE = (255, 255, 255) + + def __init__( + self, + font_face=cv2.FONT_HERSHEY_SIMPLEX, + font_color_bgr=_COLOR_GRAY, + font_scale=0.35, + font_line_type=cv2.LINE_AA, + font_line_thickness=1, + fill_color_bgr=_COLOR_WHITE, + fill_color_transparency=1.0, + frame_color_bgr=_COLOR_WHITE, + frame_color_transparency=1.0, + frame_thickness=1, + ): + self.font_face = font_face + self.font_color_bgr = font_color_bgr + self.font_scale = font_scale + self.font_line_type = font_line_type + self.font_line_thickness = font_line_thickness + self.fill_color_bgr = fill_color_bgr + self.fill_color_transparency = fill_color_transparency + self.frame_color_bgr = frame_color_bgr + self.frame_color_transparency = frame_color_transparency + self.frame_thickness = frame_thickness + + def visualize(self, image_bgr, txt, topleft_xy): + txt_w, txt_h = self.get_text_size_wh(txt) + topleft_xy = tuple(map(int, topleft_xy)) + x, y = topleft_xy + if self.frame_color_transparency < 1.0: + t = self.frame_thickness + image_bgr[y - t : y + txt_h + t, x - t : x + txt_w + t, :] = ( + image_bgr[y - t : y + txt_h + t, x - t : x + txt_w + t, :] + * self.frame_color_transparency + + np.array(self.frame_color_bgr) * (1.0 - self.frame_color_transparency) + ).astype(np.float) + if self.fill_color_transparency < 1.0: + image_bgr[y : y + txt_h, x : x + txt_w, :] = ( + image_bgr[y : y + txt_h, x : x + txt_w, :] * self.fill_color_transparency + + np.array(self.fill_color_bgr) * (1.0 - self.fill_color_transparency) + ).astype(np.float) + cv2.putText( + image_bgr, + txt, + topleft_xy, + self.font_face, + self.font_scale, + self.font_color_bgr, + self.font_line_thickness, + self.font_line_type, + ) + return image_bgr + + def get_text_size_wh(self, txt): + ((txt_w, txt_h), _) = cv2.getTextSize( + txt, self.font_face, self.font_scale, self.font_line_thickness + ) + return txt_w, txt_h + + +class CompoundVisualizer(object): + def __init__(self, visualizers): + self.visualizers = visualizers + + def visualize(self, image_bgr, data): + assert len(data) == len( + self.visualizers + ), "The number of datas {} should match the number of visualizers" " {}".format( + len(data), len(self.visualizers) + ) + image = image_bgr + for i, visualizer in enumerate(self.visualizers): + image = visualizer.visualize(image, data[i]) + return image + + def __str__(self): + visualizer_str = ", ".join([str(v) for v in self.visualizers]) + return "Compound Visualizer [{}]".format(visualizer_str) diff --git a/preprocess/mhp_extension/detectron2/projects/DensePose/densepose/vis/bounding_box.py b/preprocess/mhp_extension/detectron2/projects/DensePose/densepose/vis/bounding_box.py new file mode 100644 index 0000000000000000000000000000000000000000..d7951d69e4a92d638debc79458dd2cfe58c650e3 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/DensePose/densepose/vis/bounding_box.py @@ -0,0 +1,37 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +from .base import RectangleVisualizer, TextVisualizer + + +class BoundingBoxVisualizer(object): + def __init__(self): + self.rectangle_visualizer = RectangleVisualizer() + + def visualize(self, image_bgr, boxes_xywh): + for bbox_xywh in boxes_xywh: + image_bgr = self.rectangle_visualizer.visualize(image_bgr, bbox_xywh) + return image_bgr + + +class ScoredBoundingBoxVisualizer(object): + def __init__(self, bbox_visualizer_params=None, score_visualizer_params=None): + if bbox_visualizer_params is None: + bbox_visualizer_params = {} + if score_visualizer_params is None: + score_visualizer_params = {} + self.visualizer_bbox = RectangleVisualizer(**bbox_visualizer_params) + self.visualizer_score = TextVisualizer(**score_visualizer_params) + + def visualize(self, image_bgr, scored_bboxes): + boxes_xywh, box_scores = scored_bboxes + assert len(boxes_xywh) == len( + box_scores + ), "Number of bounding boxes {} should be equal to the number of scores {}".format( + len(boxes_xywh), len(box_scores) + ) + for i, box_xywh in enumerate(boxes_xywh): + score_i = box_scores[i] + image_bgr = self.visualizer_bbox.visualize(image_bgr, box_xywh) + score_txt = "{0:6.4f}".format(score_i) + topleft_xy = box_xywh[0], box_xywh[1] + image_bgr = self.visualizer_score.visualize(image_bgr, score_txt, topleft_xy) + return image_bgr diff --git a/preprocess/mhp_extension/detectron2/projects/DensePose/densepose/vis/densepose.py b/preprocess/mhp_extension/detectron2/projects/DensePose/densepose/vis/densepose.py new file mode 100644 index 0000000000000000000000000000000000000000..f2e77dc2d8e0f8c041ac1217978c639a826f0857 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/DensePose/densepose/vis/densepose.py @@ -0,0 +1,593 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import logging +import numpy as np +from typing import Iterable, Optional, Tuple +import cv2 + +from ..data.structures import DensePoseDataRelative, DensePoseOutput, DensePoseResult +from .base import Boxes, Image, MatrixVisualizer, PointsVisualizer + + +class DensePoseResultsVisualizer(object): + def visualize(self, image_bgr: Image, densepose_result: Optional[DensePoseResult]) -> Image: + if densepose_result is None: + return image_bgr + context = self.create_visualization_context(image_bgr) + for i, result_encoded_w_shape in enumerate(densepose_result.results): + iuv_arr = DensePoseResult.decode_png_data(*result_encoded_w_shape) + bbox_xywh = densepose_result.boxes_xywh[i] + self.visualize_iuv_arr(context, iuv_arr, bbox_xywh) + image_bgr = self.context_to_image_bgr(context) + return image_bgr + + +class DensePoseMaskedColormapResultsVisualizer(DensePoseResultsVisualizer): + def __init__( + self, + data_extractor, + segm_extractor, + inplace=True, + cmap=cv2.COLORMAP_PARULA, + alpha=0.7, + val_scale=1.0, + ): + self.mask_visualizer = MatrixVisualizer( + inplace=inplace, cmap=cmap, val_scale=val_scale, alpha=alpha + ) + self.data_extractor = data_extractor + self.segm_extractor = segm_extractor + + def create_visualization_context(self, image_bgr: Image): + return image_bgr + + def context_to_image_bgr(self, context): + return context + + def get_image_bgr_from_context(self, context): + return context + + def visualize_iuv_arr(self, context, iuv_arr, bbox_xywh): + image_bgr = self.get_image_bgr_from_context(context) + matrix = self.data_extractor(iuv_arr) + segm = self.segm_extractor(iuv_arr) + mask = np.zeros(matrix.shape, dtype=np.uint8) + mask[segm > 0] = 1 + image_bgr = self.mask_visualizer.visualize(image_bgr, mask, matrix, bbox_xywh) + return image_bgr + + +def _extract_i_from_iuvarr(iuv_arr): + return iuv_arr[0, :, :] + + +def _extract_u_from_iuvarr(iuv_arr): + return iuv_arr[1, :, :] + + +def _extract_v_from_iuvarr(iuv_arr): + return iuv_arr[2, :, :] + + +class DensePoseResultsMplContourVisualizer(DensePoseResultsVisualizer): + def __init__(self, levels=10, **kwargs): + self.levels = levels + self.plot_args = kwargs + + def create_visualization_context(self, image_bgr: Image): + import matplotlib.pyplot as plt + from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas + + context = {} + context["image_bgr"] = image_bgr + dpi = 100 + height_inches = float(image_bgr.shape[0]) / dpi + width_inches = float(image_bgr.shape[1]) / dpi + fig = plt.figure(figsize=(width_inches, height_inches), dpi=dpi) + plt.axes([0, 0, 1, 1]) + plt.axis("off") + context["fig"] = fig + canvas = FigureCanvas(fig) + context["canvas"] = canvas + extent = (0, image_bgr.shape[1], image_bgr.shape[0], 0) + plt.imshow(image_bgr[:, :, ::-1], extent=extent) + return context + + def context_to_image_bgr(self, context): + fig = context["fig"] + w, h = map(int, fig.get_size_inches() * fig.get_dpi()) + canvas = context["canvas"] + canvas.draw() + image_1d = np.fromstring(canvas.tostring_rgb(), dtype="uint8") + image_rgb = image_1d.reshape(h, w, 3) + image_bgr = image_rgb[:, :, ::-1].copy() + return image_bgr + + def visualize_iuv_arr(self, context, iuv_arr: np.ndarray, bbox_xywh: Boxes) -> Image: + import matplotlib.pyplot as plt + + u = _extract_u_from_iuvarr(iuv_arr).astype(float) / 255.0 + v = _extract_v_from_iuvarr(iuv_arr).astype(float) / 255.0 + extent = ( + bbox_xywh[0], + bbox_xywh[0] + bbox_xywh[2], + bbox_xywh[1], + bbox_xywh[1] + bbox_xywh[3], + ) + plt.contour(u, self.levels, extent=extent, **self.plot_args) + plt.contour(v, self.levels, extent=extent, **self.plot_args) + + +class DensePoseResultsCustomContourVisualizer(DensePoseResultsVisualizer): + """ + Contour visualization using marching squares + """ + + def __init__(self, levels=10, **kwargs): + # TODO: colormap is hardcoded + cmap = cv2.COLORMAP_PARULA + if isinstance(levels, int): + self.levels = np.linspace(0, 1, levels) + else: + self.levels = levels + if "linewidths" in kwargs: + self.linewidths = kwargs["linewidths"] + else: + self.linewidths = [1] * len(self.levels) + self.plot_args = kwargs + img_colors_bgr = cv2.applyColorMap((self.levels * 255).astype(np.uint8), cmap) + self.level_colors_bgr = [ + [int(v) for v in img_color_bgr.ravel()] for img_color_bgr in img_colors_bgr + ] + + def create_visualization_context(self, image_bgr: Image): + return image_bgr + + def context_to_image_bgr(self, context): + return context + + def get_image_bgr_from_context(self, context): + return context + + def visualize_iuv_arr(self, context, iuv_arr: np.ndarray, bbox_xywh: Boxes) -> Image: + image_bgr = self.get_image_bgr_from_context(context) + segm = _extract_i_from_iuvarr(iuv_arr) + u = _extract_u_from_iuvarr(iuv_arr).astype(float) / 255.0 + v = _extract_v_from_iuvarr(iuv_arr).astype(float) / 255.0 + self._contours(image_bgr, u, segm, bbox_xywh) + self._contours(image_bgr, v, segm, bbox_xywh) + + def _contours(self, image_bgr, arr, segm, bbox_xywh): + for part_idx in range(1, DensePoseDataRelative.N_PART_LABELS + 1): + mask = segm == part_idx + if not np.any(mask): + continue + arr_min = np.amin(arr[mask]) + arr_max = np.amax(arr[mask]) + I, J = np.nonzero(mask) + i0 = np.amin(I) + i1 = np.amax(I) + 1 + j0 = np.amin(J) + j1 = np.amax(J) + 1 + if (j1 == j0 + 1) or (i1 == i0 + 1): + continue + Nw = arr.shape[1] - 1 + Nh = arr.shape[0] - 1 + for level_idx, level in enumerate(self.levels): + if (level < arr_min) or (level > arr_max): + continue + vp = arr[i0:i1, j0:j1] >= level + bin_codes = vp[:-1, :-1] + vp[1:, :-1] * 2 + vp[1:, 1:] * 4 + vp[:-1, 1:] * 8 + mp = mask[i0:i1, j0:j1] + bin_mask_codes = mp[:-1, :-1] + mp[1:, :-1] * 2 + mp[1:, 1:] * 4 + mp[:-1, 1:] * 8 + it = np.nditer(bin_codes, flags=["multi_index"]) + color_bgr = self.level_colors_bgr[level_idx] + linewidth = self.linewidths[level_idx] + while not it.finished: + if (it[0] != 0) and (it[0] != 15): + i, j = it.multi_index + if bin_mask_codes[i, j] != 0: + self._draw_line( + image_bgr, + arr, + mask, + level, + color_bgr, + linewidth, + it[0], + it.multi_index, + bbox_xywh, + Nw, + Nh, + (i0, j0), + ) + it.iternext() + + def _draw_line( + self, + image_bgr, + arr, + mask, + v, + color_bgr, + linewidth, + bin_code, + multi_idx, + bbox_xywh, + Nw, + Nh, + offset, + ): + lines = self._bin_code_2_lines(arr, v, bin_code, multi_idx, Nw, Nh, offset) + x0, y0, w, h = bbox_xywh + x1 = x0 + w + y1 = y0 + h + for line in lines: + x0r, y0r = line[0] + x1r, y1r = line[1] + pt0 = (int(x0 + x0r * (x1 - x0)), int(y0 + y0r * (y1 - y0))) + pt1 = (int(x0 + x1r * (x1 - x0)), int(y0 + y1r * (y1 - y0))) + cv2.line(image_bgr, pt0, pt1, color_bgr, linewidth) + + def _bin_code_2_lines(self, arr, v, bin_code, multi_idx, Nw, Nh, offset): + i0, j0 = offset + i, j = multi_idx + i += i0 + j += j0 + v0, v1, v2, v3 = arr[i, j], arr[i + 1, j], arr[i + 1, j + 1], arr[i, j + 1] + x0i = float(j) / Nw + y0j = float(i) / Nh + He = 1.0 / Nh + We = 1.0 / Nw + if (bin_code == 1) or (bin_code == 14): + a = (v - v0) / (v1 - v0) + b = (v - v0) / (v3 - v0) + pt1 = (x0i, y0j + a * He) + pt2 = (x0i + b * We, y0j) + return [(pt1, pt2)] + elif (bin_code == 2) or (bin_code == 13): + a = (v - v0) / (v1 - v0) + b = (v - v1) / (v2 - v1) + pt1 = (x0i, y0j + a * He) + pt2 = (x0i + b * We, y0j + He) + return [(pt1, pt2)] + elif (bin_code == 3) or (bin_code == 12): + a = (v - v0) / (v3 - v0) + b = (v - v1) / (v2 - v1) + pt1 = (x0i + a * We, y0j) + pt2 = (x0i + b * We, y0j + He) + return [(pt1, pt2)] + elif (bin_code == 4) or (bin_code == 11): + a = (v - v1) / (v2 - v1) + b = (v - v3) / (v2 - v3) + pt1 = (x0i + a * We, y0j + He) + pt2 = (x0i + We, y0j + b * He) + return [(pt1, pt2)] + elif (bin_code == 6) or (bin_code == 9): + a = (v - v0) / (v1 - v0) + b = (v - v3) / (v2 - v3) + pt1 = (x0i, y0j + a * He) + pt2 = (x0i + We, y0j + b * He) + return [(pt1, pt2)] + elif (bin_code == 7) or (bin_code == 8): + a = (v - v0) / (v3 - v0) + b = (v - v3) / (v2 - v3) + pt1 = (x0i + a * We, y0j) + pt2 = (x0i + We, y0j + b * He) + return [(pt1, pt2)] + elif bin_code == 5: + a1 = (v - v0) / (v1 - v0) + b1 = (v - v1) / (v2 - v1) + pt11 = (x0i, y0j + a1 * He) + pt12 = (x0i + b1 * We, y0j + He) + a2 = (v - v0) / (v3 - v0) + b2 = (v - v3) / (v2 - v3) + pt21 = (x0i + a2 * We, y0j) + pt22 = (x0i + We, y0j + b2 * He) + return [(pt11, pt12), (pt21, pt22)] + elif bin_code == 10: + a1 = (v - v0) / (v3 - v0) + b1 = (v - v0) / (v1 - v0) + pt11 = (x0i + a1 * We, y0j) + pt12 = (x0i, y0j + b1 * He) + a2 = (v - v1) / (v2 - v1) + b2 = (v - v3) / (v2 - v3) + pt21 = (x0i + a2 * We, y0j + He) + pt22 = (x0i + We, y0j + b2 * He) + return [(pt11, pt12), (pt21, pt22)] + return [] + + +try: + import matplotlib + + matplotlib.use("Agg") + DensePoseResultsContourVisualizer = DensePoseResultsMplContourVisualizer +except ModuleNotFoundError: + logger = logging.getLogger(__name__) + logger.warning("Could not import matplotlib, using custom contour visualizer") + DensePoseResultsContourVisualizer = DensePoseResultsCustomContourVisualizer + + +class DensePoseResultsFineSegmentationVisualizer(DensePoseMaskedColormapResultsVisualizer): + def __init__(self, inplace=True, cmap=cv2.COLORMAP_PARULA, alpha=0.7): + super(DensePoseResultsFineSegmentationVisualizer, self).__init__( + _extract_i_from_iuvarr, + _extract_i_from_iuvarr, + inplace, + cmap, + alpha, + val_scale=255.0 / DensePoseDataRelative.N_PART_LABELS, + ) + + +class DensePoseResultsUVisualizer(DensePoseMaskedColormapResultsVisualizer): + def __init__(self, inplace=True, cmap=cv2.COLORMAP_PARULA, alpha=0.7): + super(DensePoseResultsUVisualizer, self).__init__( + _extract_u_from_iuvarr, _extract_i_from_iuvarr, inplace, cmap, alpha, val_scale=1.0 + ) + + +class DensePoseResultsVVisualizer(DensePoseMaskedColormapResultsVisualizer): + def __init__(self, inplace=True, cmap=cv2.COLORMAP_PARULA, alpha=0.7): + super(DensePoseResultsVVisualizer, self).__init__( + _extract_v_from_iuvarr, _extract_i_from_iuvarr, inplace, cmap, alpha, val_scale=1.0 + ) + + +class DensePoseOutputsFineSegmentationVisualizer(object): + def __init__(self, inplace=True, cmap=cv2.COLORMAP_PARULA, alpha=0.7): + self.mask_visualizer = MatrixVisualizer( + inplace=inplace, + cmap=cmap, + val_scale=255.0 / DensePoseDataRelative.N_PART_LABELS, + alpha=alpha, + ) + + def visualize( + self, image_bgr: Image, dp_output_with_bboxes: Optional[Tuple[DensePoseOutput, Boxes]] + ) -> Image: + if dp_output_with_bboxes is None: + return image_bgr + densepose_output, bboxes_xywh = dp_output_with_bboxes + S = densepose_output.S + I = densepose_output.I # noqa + U = densepose_output.U + V = densepose_output.V + N = S.size(0) + assert N == I.size( + 0 + ), "densepose outputs S {} and I {}" " should have equal first dim size".format( + S.size(), I.size() + ) + assert N == U.size( + 0 + ), "densepose outputs S {} and U {}" " should have equal first dim size".format( + S.size(), U.size() + ) + assert N == V.size( + 0 + ), "densepose outputs S {} and V {}" " should have equal first dim size".format( + S.size(), V.size() + ) + assert N == len( + bboxes_xywh + ), "number of bounding boxes {}" " should be equal to first dim size of outputs {}".format( + len(bboxes_xywh), N + ) + for n in range(N): + Sn = S[n].argmax(dim=0) + In = I[n].argmax(dim=0) * (Sn > 0).long() + matrix = In.cpu().numpy().astype(np.uint8) + mask = np.zeros(matrix.shape, dtype=np.uint8) + mask[matrix > 0] = 1 + bbox_xywh = bboxes_xywh[n] + image_bgr = self.mask_visualizer.visualize(image_bgr, mask, matrix, bbox_xywh) + return image_bgr + + +class DensePoseOutputsUVisualizer(object): + def __init__(self, inplace=True, cmap=cv2.COLORMAP_PARULA, alpha=0.7): + self.mask_visualizer = MatrixVisualizer( + inplace=inplace, cmap=cmap, val_scale=1.0, alpha=alpha + ) + + def visualize( + self, image_bgr: Image, dp_output_with_bboxes: Optional[Tuple[DensePoseOutput, Boxes]] + ) -> Image: + if dp_output_with_bboxes is None: + return image_bgr + densepose_output, bboxes_xywh = dp_output_with_bboxes + assert isinstance( + densepose_output, DensePoseOutput + ), "DensePoseOutput expected, {} encountered".format(type(densepose_output)) + S = densepose_output.S + I = densepose_output.I # noqa + U = densepose_output.U + V = densepose_output.V + N = S.size(0) + assert N == I.size( + 0 + ), "densepose outputs S {} and I {}" " should have equal first dim size".format( + S.size(), I.size() + ) + assert N == U.size( + 0 + ), "densepose outputs S {} and U {}" " should have equal first dim size".format( + S.size(), U.size() + ) + assert N == V.size( + 0 + ), "densepose outputs S {} and V {}" " should have equal first dim size".format( + S.size(), V.size() + ) + assert N == len( + bboxes_xywh + ), "number of bounding boxes {}" " should be equal to first dim size of outputs {}".format( + len(bboxes_xywh), N + ) + for n in range(N): + Sn = S[n].argmax(dim=0) + In = I[n].argmax(dim=0) * (Sn > 0).long() + segmentation = In.cpu().numpy().astype(np.uint8) + mask = np.zeros(segmentation.shape, dtype=np.uint8) + mask[segmentation > 0] = 1 + Un = U[n].cpu().numpy().astype(np.float32) + Uvis = np.zeros(segmentation.shape, dtype=np.float32) + for partId in range(Un.shape[0]): + Uvis[segmentation == partId] = Un[partId][segmentation == partId].clip(0, 1) * 255 + bbox_xywh = bboxes_xywh[n] + image_bgr = self.mask_visualizer.visualize(image_bgr, mask, Uvis, bbox_xywh) + return image_bgr + + +class DensePoseOutputsVVisualizer(object): + def __init__(self, inplace=True, cmap=cv2.COLORMAP_PARULA, alpha=0.7): + self.mask_visualizer = MatrixVisualizer( + inplace=inplace, cmap=cmap, val_scale=1.0, alpha=alpha + ) + + def visualize( + self, image_bgr: Image, dp_output_with_bboxes: Optional[Tuple[DensePoseOutput, Boxes]] + ) -> Image: + if dp_output_with_bboxes is None: + return image_bgr + densepose_output, bboxes_xywh = dp_output_with_bboxes + assert isinstance( + densepose_output, DensePoseOutput + ), "DensePoseOutput expected, {} encountered".format(type(densepose_output)) + S = densepose_output.S + I = densepose_output.I # noqa + U = densepose_output.U + V = densepose_output.V + N = S.size(0) + assert N == I.size( + 0 + ), "densepose outputs S {} and I {}" " should have equal first dim size".format( + S.size(), I.size() + ) + assert N == U.size( + 0 + ), "densepose outputs S {} and U {}" " should have equal first dim size".format( + S.size(), U.size() + ) + assert N == V.size( + 0 + ), "densepose outputs S {} and V {}" " should have equal first dim size".format( + S.size(), V.size() + ) + assert N == len( + bboxes_xywh + ), "number of bounding boxes {}" " should be equal to first dim size of outputs {}".format( + len(bboxes_xywh), N + ) + for n in range(N): + Sn = S[n].argmax(dim=0) + In = I[n].argmax(dim=0) * (Sn > 0).long() + segmentation = In.cpu().numpy().astype(np.uint8) + mask = np.zeros(segmentation.shape, dtype=np.uint8) + mask[segmentation > 0] = 1 + Vn = V[n].cpu().numpy().astype(np.float32) + Vvis = np.zeros(segmentation.shape, dtype=np.float32) + for partId in range(Vn.size(0)): + Vvis[segmentation == partId] = Vn[partId][segmentation == partId].clip(0, 1) * 255 + bbox_xywh = bboxes_xywh[n] + image_bgr = self.mask_visualizer.visualize(image_bgr, mask, Vvis, bbox_xywh) + return image_bgr + + +class DensePoseDataCoarseSegmentationVisualizer(object): + """ + Visualizer for ground truth segmentation + """ + + def __init__(self, inplace=True, cmap=cv2.COLORMAP_PARULA, alpha=0.7): + self.mask_visualizer = MatrixVisualizer( + inplace=inplace, + cmap=cmap, + val_scale=255.0 / DensePoseDataRelative.N_BODY_PARTS, + alpha=alpha, + ) + + def visualize( + self, + image_bgr: Image, + bbox_densepose_datas: Optional[Tuple[Iterable[Boxes], Iterable[DensePoseDataRelative]]], + ) -> Image: + if bbox_densepose_datas is None: + return image_bgr + for bbox_xywh, densepose_data in zip(*bbox_densepose_datas): + matrix = densepose_data.segm.numpy() + mask = np.zeros(matrix.shape, dtype=np.uint8) + mask[matrix > 0] = 1 + image_bgr = self.mask_visualizer.visualize(image_bgr, mask, matrix, bbox_xywh.numpy()) + return image_bgr + + +class DensePoseDataPointsVisualizer(object): + def __init__(self, densepose_data_to_value_fn=None, cmap=cv2.COLORMAP_PARULA): + self.points_visualizer = PointsVisualizer() + self.densepose_data_to_value_fn = densepose_data_to_value_fn + self.cmap = cmap + + def visualize( + self, + image_bgr: Image, + bbox_densepose_datas: Optional[Tuple[Iterable[Boxes], Iterable[DensePoseDataRelative]]], + ) -> Image: + if bbox_densepose_datas is None: + return image_bgr + for bbox_xywh, densepose_data in zip(*bbox_densepose_datas): + x0, y0, w, h = bbox_xywh.numpy() + x = densepose_data.x.numpy() * w / 255.0 + x0 + y = densepose_data.y.numpy() * h / 255.0 + y0 + pts_xy = zip(x, y) + if self.densepose_data_to_value_fn is None: + image_bgr = self.points_visualizer.visualize(image_bgr, pts_xy) + else: + v = self.densepose_data_to_value_fn(densepose_data) + img_colors_bgr = cv2.applyColorMap(v, self.cmap) + colors_bgr = [ + [int(v) for v in img_color_bgr.ravel()] for img_color_bgr in img_colors_bgr + ] + image_bgr = self.points_visualizer.visualize(image_bgr, pts_xy, colors_bgr) + return image_bgr + + +def _densepose_data_u_for_cmap(densepose_data): + u = np.clip(densepose_data.u.numpy(), 0, 1) * 255.0 + return u.astype(np.uint8) + + +def _densepose_data_v_for_cmap(densepose_data): + v = np.clip(densepose_data.v.numpy(), 0, 1) * 255.0 + return v.astype(np.uint8) + + +def _densepose_data_i_for_cmap(densepose_data): + i = ( + np.clip(densepose_data.i.numpy(), 0.0, DensePoseDataRelative.N_PART_LABELS) + * 255.0 + / DensePoseDataRelative.N_PART_LABELS + ) + return i.astype(np.uint8) + + +class DensePoseDataPointsUVisualizer(DensePoseDataPointsVisualizer): + def __init__(self): + super(DensePoseDataPointsUVisualizer, self).__init__( + densepose_data_to_value_fn=_densepose_data_u_for_cmap + ) + + +class DensePoseDataPointsVVisualizer(DensePoseDataPointsVisualizer): + def __init__(self): + super(DensePoseDataPointsVVisualizer, self).__init__( + densepose_data_to_value_fn=_densepose_data_v_for_cmap + ) + + +class DensePoseDataPointsIVisualizer(DensePoseDataPointsVisualizer): + def __init__(self): + super(DensePoseDataPointsIVisualizer, self).__init__( + densepose_data_to_value_fn=_densepose_data_i_for_cmap + ) diff --git a/preprocess/mhp_extension/detectron2/projects/DensePose/densepose/vis/extractor.py b/preprocess/mhp_extension/detectron2/projects/DensePose/densepose/vis/extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..b715a4451e096d6d6c086f9bcf60f92d2ae692f8 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/DensePose/densepose/vis/extractor.py @@ -0,0 +1,152 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import logging +from typing import Sequence +import torch + +from detectron2.layers.nms import batched_nms +from detectron2.structures.instances import Instances + +from densepose.vis.bounding_box import BoundingBoxVisualizer, ScoredBoundingBoxVisualizer +from densepose.vis.densepose import DensePoseResultsVisualizer + +from .base import CompoundVisualizer + +Scores = Sequence[float] + + +def extract_scores_from_instances(instances: Instances, select=None): + if instances.has("scores"): + return instances.scores if select is None else instances.scores[select] + return None + + +def extract_boxes_xywh_from_instances(instances: Instances, select=None): + if instances.has("pred_boxes"): + boxes_xywh = instances.pred_boxes.tensor.clone() + boxes_xywh[:, 2] -= boxes_xywh[:, 0] + boxes_xywh[:, 3] -= boxes_xywh[:, 1] + return boxes_xywh if select is None else boxes_xywh[select] + return None + + +def create_extractor(visualizer: object): + """ + Create an extractor for the provided visualizer + """ + if isinstance(visualizer, CompoundVisualizer): + extractors = [create_extractor(v) for v in visualizer.visualizers] + return CompoundExtractor(extractors) + elif isinstance(visualizer, DensePoseResultsVisualizer): + return DensePoseResultExtractor() + elif isinstance(visualizer, ScoredBoundingBoxVisualizer): + return CompoundExtractor([extract_boxes_xywh_from_instances, extract_scores_from_instances]) + elif isinstance(visualizer, BoundingBoxVisualizer): + return extract_boxes_xywh_from_instances + else: + logger = logging.getLogger(__name__) + logger.error(f"Could not create extractor for {visualizer}") + return None + + +class BoundingBoxExtractor(object): + """ + Extracts bounding boxes from instances + """ + + def __call__(self, instances: Instances): + boxes_xywh = extract_boxes_xywh_from_instances(instances) + return boxes_xywh + + +class ScoredBoundingBoxExtractor(object): + """ + Extracts bounding boxes from instances + """ + + def __call__(self, instances: Instances, select=None): + scores = extract_scores_from_instances(instances) + boxes_xywh = extract_boxes_xywh_from_instances(instances) + if (scores is None) or (boxes_xywh is None): + return (boxes_xywh, scores) + if select is not None: + scores = scores[select] + boxes_xywh = boxes_xywh[select] + return (boxes_xywh, scores) + + +class DensePoseResultExtractor(object): + """ + Extracts DensePose result from instances + """ + + def __call__(self, instances: Instances, select=None): + boxes_xywh = extract_boxes_xywh_from_instances(instances) + if instances.has("pred_densepose") and (boxes_xywh is not None): + dpout = instances.pred_densepose + if select is not None: + dpout = dpout[select] + boxes_xywh = boxes_xywh[select] + return dpout.to_result(boxes_xywh) + else: + return None + + +class CompoundExtractor(object): + """ + Extracts data for CompoundVisualizer + """ + + def __init__(self, extractors): + self.extractors = extractors + + def __call__(self, instances: Instances, select=None): + datas = [] + for extractor in self.extractors: + data = extractor(instances, select) + datas.append(data) + return datas + + +class NmsFilteredExtractor(object): + """ + Extracts data in the format accepted by NmsFilteredVisualizer + """ + + def __init__(self, extractor, iou_threshold): + self.extractor = extractor + self.iou_threshold = iou_threshold + + def __call__(self, instances: Instances, select=None): + scores = extract_scores_from_instances(instances) + boxes_xywh = extract_boxes_xywh_from_instances(instances) + if boxes_xywh is None: + return None + select_local_idx = batched_nms( + boxes_xywh, + scores, + torch.zeros(len(scores), dtype=torch.int32), + iou_threshold=self.iou_threshold, + ).squeeze() + select_local = torch.zeros(len(boxes_xywh), dtype=torch.bool, device=boxes_xywh.device) + select_local[select_local_idx] = True + select = select_local if select is None else (select & select_local) + return self.extractor(instances, select=select) + + +class ScoreThresholdedExtractor(object): + """ + Extracts data in the format accepted by ScoreThresholdedVisualizer + """ + + def __init__(self, extractor, min_score): + self.extractor = extractor + self.min_score = min_score + + def __call__(self, instances: Instances, select=None): + scores = extract_scores_from_instances(instances) + if scores is None: + return None + select_local = scores > self.min_score + select = select_local if select is None else (select & select_local) + data = self.extractor(instances, select=select) + return data diff --git a/preprocess/mhp_extension/detectron2/projects/DensePose/dev/README.md b/preprocess/mhp_extension/detectron2/projects/DensePose/dev/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e3a94b67ed4b4d0c2934f074802cd00f3660f9a9 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/DensePose/dev/README.md @@ -0,0 +1,7 @@ + +## Some scripts for developers to use, include: + +- `run_instant_tests.sh`: run training for a few iterations. +- `run_inference_tests.sh`: run inference on a small dataset. +- `../../dev/linter.sh`: lint the codebase before commit +- `../../dev/parse_results.sh`: parse results from log file. diff --git a/preprocess/mhp_extension/detectron2/projects/DensePose/dev/run_inference_tests.sh b/preprocess/mhp_extension/detectron2/projects/DensePose/dev/run_inference_tests.sh new file mode 100755 index 0000000000000000000000000000000000000000..34f47d5a07a90c411e830c98a346845fa618f836 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/DensePose/dev/run_inference_tests.sh @@ -0,0 +1,33 @@ +#!/bin/bash -e +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +BIN="python train_net.py" +OUTPUT="inference_test_output" +NUM_GPUS=2 +IMS_PER_GPU=2 +IMS_PER_BATCH=$(( NUM_GPUS * IMS_PER_GPU )) + +CFG_LIST=( "${@:1}" ) + +if [ ${#CFG_LIST[@]} -eq 0 ]; then + CFG_LIST=( ./configs/quick_schedules/*inference_acc_test.yaml ) +fi + +echo "========================================================================" +echo "Configs to run:" +echo "${CFG_LIST[@]}" +echo "========================================================================" + +for cfg in "${CFG_LIST[@]}"; do + echo "========================================================================" + echo "Running $cfg ..." + echo "========================================================================" + $BIN \ + --eval-only \ + --num-gpus $NUM_GPUS \ + --config-file "$cfg" \ + OUTPUT_DIR "$OUTPUT" \ + SOLVER.IMS_PER_BATCH $IMS_PER_BATCH + rm -rf $OUTPUT +done + diff --git a/preprocess/mhp_extension/detectron2/projects/DensePose/dev/run_instant_tests.sh b/preprocess/mhp_extension/detectron2/projects/DensePose/dev/run_instant_tests.sh new file mode 100755 index 0000000000000000000000000000000000000000..a53785180974a70bce7fdb0c9da4024166efd596 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/DensePose/dev/run_instant_tests.sh @@ -0,0 +1,28 @@ +#!/bin/bash -e +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +BIN="python train_net.py" +OUTPUT="instant_test_output" +NUM_GPUS=2 +SOLVER_IMS_PER_BATCH=$((NUM_GPUS * 2)) + +CFG_LIST=( "${@:1}" ) +if [ ${#CFG_LIST[@]} -eq 0 ]; then + CFG_LIST=( ./configs/quick_schedules/*instant_test.yaml ) +fi + +echo "========================================================================" +echo "Configs to run:" +echo "${CFG_LIST[@]}" +echo "========================================================================" + +for cfg in "${CFG_LIST[@]}"; do + echo "========================================================================" + echo "Running $cfg ..." + echo "========================================================================" + $BIN --num-gpus $NUM_GPUS --config-file "$cfg" \ + SOLVER.IMS_PER_BATCH $SOLVER_IMS_PER_BATCH \ + OUTPUT_DIR "$OUTPUT" + rm -rf "$OUTPUT" +done + diff --git a/preprocess/mhp_extension/detectron2/projects/DensePose/doc/GETTING_STARTED.md b/preprocess/mhp_extension/detectron2/projects/DensePose/doc/GETTING_STARTED.md new file mode 100644 index 0000000000000000000000000000000000000000..a6bcbedee42835c99fa5aa1110309329dfbff6f0 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/DensePose/doc/GETTING_STARTED.md @@ -0,0 +1,58 @@ +# Getting Started with DensePose + +## Inference with Pre-trained Models + +1. Pick a model and its config file from [Model Zoo](MODEL_ZOO.md), for example [densepose_rcnn_R_50_FPN_s1x.yaml](../configs/densepose_rcnn_R_50_FPN_s1x.yaml) +2. Run the [Apply Net](TOOL_APPLY_NET.md) tool to visualize the results or save the to disk. For example, to use contour visualization for DensePose, one can run: +```bash +python apply_net.py show configs/densepose_rcnn_R_50_FPN_s1x.yaml densepose_rcnn_R_50_FPN_s1x.pkl image.jpg dp_contour,bbox --output image_densepose_contour.png +``` +Please see [Apply Net](TOOL_APPLY_NET.md) for more details on the tool. + +## Training + +First, prepare the [dataset](http://densepose.org/#dataset) into the following structure under the directory you'll run training scripts: +
+datasets/coco/
+  annotations/
+    densepose_{train,minival,valminusminival}2014.json
+    densepose_minival2014_100.json   (optional, for testing only)
+  {train,val}2014/
+    # image files that are mentioned in the corresponding json
+
+ +To train a model one can use the [train_net.py](../train_net.py) script. +This script was used to train all DensePose models in [Model Zoo](MODEL_ZOO.md). +For example, to launch end-to-end DensePose-RCNN training with ResNet-50 FPN backbone +on 8 GPUs following the s1x schedule, one can run +```bash +python train_net.py --config-file configs/densepose_rcnn_R_50_FPN_s1x.yaml --num-gpus 8 +``` +The configs are made for 8-GPU training. To train on 1 GPU, one can apply the +[linear learning rate scaling rule](https://arxiv.org/abs/1706.02677): +```bash +python train_net.py --config-file configs/densepose_rcnn_R_50_FPN_s1x.yaml \ + SOLVER.IMS_PER_BATCH 2 SOLVER.BASE_LR 0.0025 +``` + +## Evaluation + +Model testing can be done in the same way as training, except for an additional flag `--eval-only` and +model location specification through `MODEL.WEIGHTS model.pth` in the command line +```bash +python train_net.py --config-file configs/densepose_rcnn_R_50_FPN_s1x.yaml \ + --eval-only MODEL.WEIGHTS model.pth +``` + +## Tools + +We provide tools which allow one to: + - easily view DensePose annotated data in a dataset; + - perform DensePose inference on a set of images; + - visualize DensePose model results; + +`query_db` is a tool to print or visualize DensePose data in a dataset. +Please refer to [Query DB](TOOL_QUERY_DB.md) for more details on this tool + +`apply_net` is a tool to print or visualize DensePose results. +Please refer to [Apply Net](TOOL_APPLY_NET.md) for more details on this tool diff --git a/preprocess/mhp_extension/detectron2/projects/DensePose/doc/MODEL_ZOO.md b/preprocess/mhp_extension/detectron2/projects/DensePose/doc/MODEL_ZOO.md new file mode 100644 index 0000000000000000000000000000000000000000..c26308417de03efea3872b44fec43c74ead529e9 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/DensePose/doc/MODEL_ZOO.md @@ -0,0 +1,277 @@ +# Model Zoo and Baselines + +# Introduction + +We provide baselines trained with Detectron2 DensePose. The corresponding +configuration files can be found in the [configs](../configs) directory. +All models were trained on COCO `train2014` + `valminusminival2014` and +evaluated on COCO `minival2014`. For the details on common settings in which +baselines were trained, please check [Detectron 2 Model Zoo](../../../MODEL_ZOO.md). + +## License + +All models available for download through this document are licensed under the +[Creative Commons Attribution-ShareAlike 3.0 license](https://creativecommons.org/licenses/by-sa/3.0/) + +## COCO DensePose Baselines with DensePose-RCNN + +### Legacy Models + +Baselines trained using schedules from [Gรผler et al, 2018](https://arxiv.org/pdf/1802.00434.pdf) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Namelr
sched
train
time
(s/iter)
inference
time
(s/im)
train
mem
(GB)
box
AP
dp. AP
GPS
dp. AP
GPSm
model iddownload
R_50_FPN_s1x_legacys1x0.3070.0513.258.152.154.9164832157model | metrics
R_101_FPN_s1x_legacys1x0.3900.0634.359.553.256.1164832182model | metrics
+ +### Improved Baselines, Original Fully Convolutional Haad + +These models use an improved training schedule and Panoptic FPN head from [Kirillov et al, 2019](https://arxiv.org/abs/1901.02446). + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Namelr
sched
train
time
(s/iter)
inference
time
(s/im)
train
mem
(GB)
box
AP
dp. AP
GPS
dp. AP
GPSm
model iddownload
R_50_FPN_s1xs1x0.3590.0664.561.263.765.3165712039model | metrics
R_101_FPN_s1xs1x0.4280.0795.862.364.566.4165712084model | metrics
+ +### Improved Baselines, DeepLabV3 Head + +These models use an improved training schedule, Panoptic FPN head from [Kirillov et al, 2019](https://arxiv.org/abs/1901.02446) and DeepLabV3 head from [Chen et al, 2017](https://arxiv.org/abs/1706.05587). + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Namelr
sched
train
time
(s/iter)
inference
time
(s/im)
train
mem
(GB)
box
AP
dp. AP
GPS
dp. AP
GPSm
model iddownload
R_50_FPN_DL_s1xs1x0.3920.0706.761.165.666.8165712097model | metrics
R_101_FPN_DL_s1xs1x0.4780.0837.062.366.367.7165712116model | metrics
+ +### Baselines with Confidence Estimation + +These models perform additional estimation of confidence in regressed UV coodrinates, along the lines of [Neverova et al., 2019](https://papers.nips.cc/paper/8378-correlated-uncertainty-for-learning-dense-correspondences-from-noisy-labels). + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Namelr
sched
train
time
(s/iter)
inference
time
(s/im)
train
mem
(GB)
box
AP
dp. AP
GPS
dp. AP
GPSm
model iddownload
R_50_FPN_WC1_s1xs1x0.3530.0644.660.564.265.6173862049model | metrics
R_50_FPN_WC2_s1xs1x0.3640.0664.860.764.265.7173861455model | metrics
R_50_FPN_DL_WC1_s1xs1x0.3970.0686.761.165.867.1173067973model | metrics
R_50_FPN_DL_WC2_s1xs1x0.4100.0706.860.865.666.7173859335model | metrics
R_101_FPN_WC1_s1xs1x0.4350.0765.762.564.966.5171402969model | metrics
R_101_FPN_WC2_s1xs1x0.4500.0785.762.364.866.6173860702model | metrics
R_101_FPN_DL_WC1_s1xs1x0.4790.0817.962.066.267.4173858525model | metrics
R_101_FPN_DL_WC2_s1xs1x0.4910.0827.661.765.967.3173294801model | metrics
+ +## Old Baselines + +It is still possible to use some baselines from [DensePose 1](https://github.com/facebookresearch/DensePose). +Below are evaluation metrics for the baselines recomputed in the current framework: + +| Model | bbox AP | AP | AP50 | AP75 | APm |APl | +|-----|-----|-----|--- |--- |--- |--- | +| [`ResNet50_FPN_s1x-e2e`](https://dl.fbaipublicfiles.com/densepose/DensePose_ResNet50_FPN_s1x-e2e.pkl) | 54.673 | 48.894 | 84.963 | 50.717 | 43.132 | 50.433 | +| [`ResNet101_FPN_s1x-e2e`](https://dl.fbaipublicfiles.com/densepose/DensePose_ResNet101_FPN_s1x-e2e.pkl) | 56.032 | 51.088 | 86.250 | 55.057 | 46.542 | 52.563 | + +Note: these scores are close, but not strictly equal to the ones reported in the [DensePose 1 Model Zoo](https://github.com/facebookresearch/DensePose/blob/master/MODEL_ZOO.md), +which is due to small incompatibilities between the frameworks. diff --git a/preprocess/mhp_extension/detectron2/projects/DensePose/doc/TOOL_APPLY_NET.md b/preprocess/mhp_extension/detectron2/projects/DensePose/doc/TOOL_APPLY_NET.md new file mode 100644 index 0000000000000000000000000000000000000000..f5cf2579a83811e4b192b3688f241b570f62bcb5 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/DensePose/doc/TOOL_APPLY_NET.md @@ -0,0 +1,130 @@ +# Apply Net + +`apply_net` is a tool to print or visualize DensePose results on a set of images. +It has two modes: `dump` to save DensePose model results to a pickle file +and `show` to visualize them on images. + +## Dump Mode + +The general command form is: +```bash +python apply_net.py dump [-h] [-v] [--output ] +``` + +There are three mandatory arguments: + - ``, configuration file for a given model; + - ``, model file with trained parameters + - ``, input image file name, pattern or folder + +One can additionally provide `--output` argument to define the output file name, +which defaults to `output.pkl`. + + +Examples: + +1. Dump results of a DensePose model with ResNet-50 FPN backbone for images + in a folder `images` to file `dump.pkl`: +```bash +python apply_net.py dump configs/densepose_rcnn_R_50_FPN_s1x.yaml DensePose_ResNet50_FPN_s1x-e2e.pkl images --output dump.pkl -v +``` + +2. Dump results of a DensePose model with ResNet-50 FPN backbone for images + with file name matching a pattern `image*.jpg` to file `results.pkl`: +```bash +python apply_net.py dump configs/densepose_rcnn_R_50_FPN_s1x.yaml DensePose_ResNet50_FPN_s1x-e2e.pkl "image*.jpg" --output results.pkl -v +``` + +If you want to load the pickle file generated by the above command: +``` +# make sure DensePose is in your PYTHONPATH, or use the following line to add it: +sys.path.append("/your_detectron2_path/detectron2_repo/projects/DensePose/") + +f = open('/your_result_path/results.pkl', 'rb') +data = pickle.load(f) +``` + +The file `results.pkl` contains the list of results per image, for each image the result is a dictionary: +``` +data: [{'file_name': '/your_path/image1.jpg', + 'scores': tensor([0.9884]), + 'pred_boxes_XYXY': tensor([[ 69.6114, 0.0000, 706.9797, 706.0000]]), + 'pred_densepose': }, + {'file_name': '/your_path/image2.jpg', + 'scores': tensor([0.9999, 0.5373, 0.3991]), + 'pred_boxes_XYXY': tensor([[ 59.5734, 7.7535, 579.9311, 932.3619], + [612.9418, 686.1254, 612.9999, 704.6053], + [164.5081, 407.4034, 598.3944, 920.4266]]), + 'pred_densepose': }] +``` + +We can use the following code, to parse the outputs of the first +detected instance on the first image. +``` +img_id, instance_id = 0, 0 # Look at the first image and the first detected instance +bbox_xyxy = data[img_id]['pred_boxes_XYXY'][instance_id] +result_encoded = data[img_id]['pred_densepose'].results[instance_id] +iuv_arr = DensePoseResult.decode_png_data(*result_encoded) +``` +The array `bbox_xyxy` contains (x0, y0, x1, y1) of the bounding box. + +The shape of `iuv_arr` is `[3, H, W]`, where (H, W) is the shape of the bounding box. +- `iuv_arr[0,:,:]`: The patch index of image points, indicating which of the 24 surface patches the point is on. +- `iuv_arr[1,:,:]`: The U-coordinate value of image points. +- `iuv_arr[2,:,:]`: The V-coordinate value of image points. + + +## Visualization Mode + +The general command form is: +```bash +python apply_net.py show [-h] [-v] [--min_score ] [--nms_thresh ] [--output ] +``` + +There are four mandatory arguments: + - ``, configuration file for a given model; + - ``, model file with trained parameters + - ``, input image file name, pattern or folder + - ``, visualizations specifier; currently available visualizations are: + * `bbox` - bounding boxes of detected persons; + * `dp_segm` - segmentation masks for detected persons; + * `dp_u` - each body part is colored according to the estimated values of the + U coordinate in part parameterization; + * `dp_v` - each body part is colored according to the estimated values of the + V coordinate in part parameterization; + * `dp_contour` - plots contours with color-coded U and V coordinates + + +One can additionally provide the following optional arguments: + - `--min_score` to only show detections with sufficient scores that are not lower than provided value + - `--nms_thresh` to additionally apply non-maximum suppression to detections at a given threshold + - `--output` to define visualization file name template, which defaults to `output.png`. + To distinguish output file names for different images, the tool appends 1-based entry index, + e.g. output.0001.png, output.0002.png, etc... + + +The following examples show how to output results of a DensePose model +with ResNet-50 FPN backbone using different visualizations for image `image.jpg`: + +1. Show bounding box and segmentation: +```bash +python apply_net.py show configs/densepose_rcnn_R_50_FPN_s1x.yaml DensePose_ResNet50_FPN_s1x-e2e.pkl image.jpg bbox,dp_segm -v +``` +![Bounding Box + Segmentation Visualization](images/res_bbox_dp_segm.jpg) + +2. Show bounding box and estimated U coordinates for body parts: +```bash +python apply_net.py show configs/densepose_rcnn_R_50_FPN_s1x.yaml DensePose_ResNet50_FPN_s1x-e2e.pkl image.jpg bbox,dp_u -v +``` +![Bounding Box + U Coordinate Visualization](images/res_bbox_dp_u.jpg) + +3. Show bounding box and estimated V coordinates for body parts: +```bash +python apply_net.py show configs/densepose_rcnn_R_50_FPN_s1x.yaml DensePose_ResNet50_FPN_s1x-e2e.pkl image.jpg bbox,dp_v -v +``` +![Bounding Box + V Coordinate Visualization](images/res_bbox_dp_v.jpg) + +4. Show bounding box and estimated U and V coordinates via contour plots: +```bash +python apply_net.py show configs/densepose_rcnn_R_50_FPN_s1x.yaml DensePose_ResNet50_FPN_s1x-e2e.pkl image.jpg dp_contour,bbox -v +``` +![Bounding Box + Contour Visualization](images/res_bbox_dp_contour.jpg) diff --git a/preprocess/mhp_extension/detectron2/projects/DensePose/doc/TOOL_QUERY_DB.md b/preprocess/mhp_extension/detectron2/projects/DensePose/doc/TOOL_QUERY_DB.md new file mode 100644 index 0000000000000000000000000000000000000000..b0a764b8740597c6af634127b80b53d28913726f --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/DensePose/doc/TOOL_QUERY_DB.md @@ -0,0 +1,105 @@ + +# Query Dataset + +`query_db` is a tool to print or visualize DensePose data from a dataset. +It has two modes: `print` and `show` to output dataset entries to standard +output or to visualize them on images. + +## Print Mode + +The general command form is: +```bash +python query_db.py print [-h] [-v] [--max-entries N] +``` + +There are two mandatory arguments: + - ``, DensePose dataset specification, from which to select + the entries (e.g. `densepose_coco_2014_train`). + - ``, dataset entry selector which can be a single specification, + or a comma-separated list of specifications of the form + `field[:type]=value` for exact match with the value + or `field[:type]=min-max` for a range of values + +One can additionally limit the maximum number of entries to output +by providing `--max-entries` argument. + +Examples: + +1. Output at most 10 first entries from the `densepose_coco_2014_train` dataset: +```bash +python query_db.py print densepose_coco_2014_train \* --max-entries 10 -v +``` + +2. Output all entries with `file_name` equal to `COCO_train2014_000000000036.jpg`: +```bash +python query_db.py print densepose_coco_2014_train file_name=COCO_train2014_000000000036.jpg -v +``` + +3. Output all entries with `image_id` between 36 and 156: +```bash +python query_db.py print densepose_coco_2014_train image_id:int=36-156 -v +``` + +## Visualization Mode + +The general command form is: +```bash +python query_db.py show [-h] [-v] [--max-entries N] [--output ] +``` + +There are three mandatory arguments: + - ``, DensePose dataset specification, from which to select + the entries (e.g. `densepose_coco_2014_train`). + - ``, dataset entry selector which can be a single specification, + or a comma-separated list of specifications of the form + `field[:type]=value` for exact match with the value + or `field[:type]=min-max` for a range of values + - ``, visualizations specifier; currently available visualizations are: + * `bbox` - bounding boxes of annotated persons; + * `dp_i` - annotated points colored according to the containing part; + * `dp_pts` - annotated points in green color; + * `dp_segm` - segmentation masks for annotated persons; + * `dp_u` - annotated points colored according to their U coordinate in part parameterization; + * `dp_v` - annotated points colored according to their V coordinate in part parameterization; + +One can additionally provide one of the two optional arguments: + - `--max_entries` to limit the maximum number of entries to visualize + - `--output` to provide visualization file name template, which defaults + to `output.png`. To distinguish file names for different dataset + entries, the tool appends 1-based entry index to the output file name, + e.g. output.0001.png, output.0002.png, etc. + +The following examples show how to output different visualizations for image with `id = 322` +from `densepose_coco_2014_train` dataset: + +1. Show bounding box and segmentation: +```bash +python query_db.py show densepose_coco_2014_train image_id:int=322 bbox,dp_segm -v +``` +![Bounding Box + Segmentation Visualization](images/vis_bbox_dp_segm.jpg) + +2. Show bounding box and points colored according to the containing part: +```bash +python query_db.py show densepose_coco_2014_train image_id:int=322 bbox,dp_i -v +``` +![Bounding Box + Point Label Visualization](images/vis_bbox_dp_i.jpg) + +3. Show bounding box and annotated points in green color: +```bash +python query_db.py show densepose_coco_2014_train image_id:int=322 bbox,dp_segm -v +``` +![Bounding Box + Point Visualization](images/vis_bbox_dp_pts.jpg) + +4. Show bounding box and annotated points colored according to their U coordinate in part parameterization: +```bash +python query_db.py show densepose_coco_2014_train image_id:int=322 bbox,dp_u -v +``` +![Bounding Box + Point U Visualization](images/vis_bbox_dp_u.jpg) + +5. Show bounding box and annotated points colored according to their V coordinate in part parameterization: +```bash +python query_db.py show densepose_coco_2014_train image_id:int=322 bbox,dp_v -v +``` +![Bounding Box + Point V Visualization](images/vis_bbox_dp_v.jpg) + + diff --git a/preprocess/mhp_extension/detectron2/projects/DensePose/query_db.py b/preprocess/mhp_extension/detectron2/projects/DensePose/query_db.py new file mode 100755 index 0000000000000000000000000000000000000000..6d3ea2ffdff7559a8cd78df95a5fb7f308f33e1e --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/DensePose/query_db.py @@ -0,0 +1,250 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +import argparse +import logging +import os +import sys +from timeit import default_timer as timer +from typing import Any, ClassVar, Dict, List +import torch +from fvcore.common.file_io import PathManager + +from detectron2.data.catalog import DatasetCatalog +from detectron2.utils.logger import setup_logger + +from densepose.data.structures import DensePoseDataRelative +from densepose.utils.dbhelper import EntrySelector +from densepose.utils.logger import verbosity_to_level +from densepose.vis.base import CompoundVisualizer +from densepose.vis.bounding_box import BoundingBoxVisualizer +from densepose.vis.densepose import ( + DensePoseDataCoarseSegmentationVisualizer, + DensePoseDataPointsIVisualizer, + DensePoseDataPointsUVisualizer, + DensePoseDataPointsVisualizer, + DensePoseDataPointsVVisualizer, +) + +DOC = """Query DB - a tool to print / visualize data from a database +""" + +LOGGER_NAME = "query_db" + +logger = logging.getLogger(LOGGER_NAME) + +_ACTION_REGISTRY: Dict[str, "Action"] = {} + + +class Action(object): + @classmethod + def add_arguments(cls: type, parser: argparse.ArgumentParser): + parser.add_argument( + "-v", + "--verbosity", + action="count", + help="Verbose mode. Multiple -v options increase the verbosity.", + ) + + +def register_action(cls: type): + """ + Decorator for action classes to automate action registration + """ + global _ACTION_REGISTRY + _ACTION_REGISTRY[cls.COMMAND] = cls + return cls + + +class EntrywiseAction(Action): + @classmethod + def add_arguments(cls: type, parser: argparse.ArgumentParser): + super(EntrywiseAction, cls).add_arguments(parser) + parser.add_argument( + "dataset", metavar="", help="Dataset name (e.g. densepose_coco_2014_train)" + ) + parser.add_argument( + "selector", + metavar="", + help="Dataset entry selector in the form field1[:type]=value1[," + "field2[:type]=value_min-value_max...] which selects all " + "entries from the dataset that satisfy the constraints", + ) + parser.add_argument( + "--max-entries", metavar="N", help="Maximum number of entries to process", type=int + ) + + @classmethod + def execute(cls: type, args: argparse.Namespace): + dataset = setup_dataset(args.dataset) + entry_selector = EntrySelector.from_string(args.selector) + context = cls.create_context(args) + if args.max_entries is not None: + for _, entry in zip(range(args.max_entries), dataset): + if entry_selector(entry): + cls.execute_on_entry(entry, context) + else: + for entry in dataset: + if entry_selector(entry): + cls.execute_on_entry(entry, context) + + @classmethod + def create_context(cls: type, args: argparse.Namespace) -> Dict[str, Any]: + context = {} + return context + + +@register_action +class PrintAction(EntrywiseAction): + """ + Print action that outputs selected entries to stdout + """ + + COMMAND: ClassVar[str] = "print" + + @classmethod + def add_parser(cls: type, subparsers: argparse._SubParsersAction): + parser = subparsers.add_parser(cls.COMMAND, help="Output selected entries to stdout. ") + cls.add_arguments(parser) + parser.set_defaults(func=cls.execute) + + @classmethod + def add_arguments(cls: type, parser: argparse.ArgumentParser): + super(PrintAction, cls).add_arguments(parser) + + @classmethod + def execute_on_entry(cls: type, entry: Dict[str, Any], context: Dict[str, Any]): + import pprint + + printer = pprint.PrettyPrinter(indent=2, width=200, compact=True) + printer.pprint(entry) + + +@register_action +class ShowAction(EntrywiseAction): + """ + Show action that visualizes selected entries on an image + """ + + COMMAND: ClassVar[str] = "show" + VISUALIZERS: ClassVar[Dict[str, object]] = { + "dp_segm": DensePoseDataCoarseSegmentationVisualizer(), + "dp_i": DensePoseDataPointsIVisualizer(), + "dp_u": DensePoseDataPointsUVisualizer(), + "dp_v": DensePoseDataPointsVVisualizer(), + "dp_pts": DensePoseDataPointsVisualizer(), + "bbox": BoundingBoxVisualizer(), + } + + @classmethod + def add_parser(cls: type, subparsers: argparse._SubParsersAction): + parser = subparsers.add_parser(cls.COMMAND, help="Visualize selected entries") + cls.add_arguments(parser) + parser.set_defaults(func=cls.execute) + + @classmethod + def add_arguments(cls: type, parser: argparse.ArgumentParser): + super(ShowAction, cls).add_arguments(parser) + parser.add_argument( + "visualizations", + metavar="", + help="Comma separated list of visualizations, possible values: " + "[{}]".format(",".join(sorted(cls.VISUALIZERS.keys()))), + ) + parser.add_argument( + "--output", + metavar="", + default="output.png", + help="File name to save output to", + ) + + @classmethod + def execute_on_entry(cls: type, entry: Dict[str, Any], context: Dict[str, Any]): + import cv2 + import numpy as np + + image_fpath = PathManager.get_local_path(entry["file_name"]) + image = cv2.imread(image_fpath, cv2.IMREAD_GRAYSCALE) + image = np.tile(image[:, :, np.newaxis], [1, 1, 3]) + datas = cls._extract_data_for_visualizers_from_entry(context["vis_specs"], entry) + visualizer = context["visualizer"] + image_vis = visualizer.visualize(image, datas) + entry_idx = context["entry_idx"] + 1 + out_fname = cls._get_out_fname(entry_idx, context["out_fname"]) + cv2.imwrite(out_fname, image_vis) + logger.info(f"Output saved to {out_fname}") + context["entry_idx"] += 1 + + @classmethod + def _get_out_fname(cls: type, entry_idx: int, fname_base: str): + base, ext = os.path.splitext(fname_base) + return base + ".{0:04d}".format(entry_idx) + ext + + @classmethod + def create_context(cls: type, args: argparse.Namespace) -> Dict[str, Any]: + vis_specs = args.visualizations.split(",") + visualizers = [] + for vis_spec in vis_specs: + vis = cls.VISUALIZERS[vis_spec] + visualizers.append(vis) + context = { + "vis_specs": vis_specs, + "visualizer": CompoundVisualizer(visualizers), + "out_fname": args.output, + "entry_idx": 0, + } + return context + + @classmethod + def _extract_data_for_visualizers_from_entry( + cls: type, vis_specs: List[str], entry: Dict[str, Any] + ): + dp_list = [] + bbox_list = [] + for annotation in entry["annotations"]: + is_valid, _ = DensePoseDataRelative.validate_annotation(annotation) + if not is_valid: + continue + bbox = torch.as_tensor(annotation["bbox"]) + bbox_list.append(bbox) + dp_data = DensePoseDataRelative(annotation) + dp_list.append(dp_data) + datas = [] + for vis_spec in vis_specs: + datas.append(bbox_list if "bbox" == vis_spec else (bbox_list, dp_list)) + return datas + + +def setup_dataset(dataset_name): + logger.info("Loading dataset {}".format(dataset_name)) + start = timer() + dataset = DatasetCatalog.get(dataset_name) + stop = timer() + logger.info("Loaded dataset {} in {:.3f}s".format(dataset_name, stop - start)) + return dataset + + +def create_argument_parser() -> argparse.ArgumentParser: + parser = argparse.ArgumentParser( + description=DOC, + formatter_class=lambda prog: argparse.HelpFormatter(prog, max_help_position=120), + ) + parser.set_defaults(func=lambda _: parser.print_help(sys.stdout)) + subparsers = parser.add_subparsers(title="Actions") + for _, action in _ACTION_REGISTRY.items(): + action.add_parser(subparsers) + return parser + + +def main(): + parser = create_argument_parser() + args = parser.parse_args() + verbosity = args.verbosity if hasattr(args, "verbosity") else None + global logger + logger = setup_logger(name=LOGGER_NAME) + logger.setLevel(verbosity_to_level(verbosity)) + args.func(args) + + +if __name__ == "__main__": + main() diff --git a/preprocess/mhp_extension/detectron2/projects/DensePose/tests/common.py b/preprocess/mhp_extension/detectron2/projects/DensePose/tests/common.py new file mode 100644 index 0000000000000000000000000000000000000000..13bf0dd3ca113e0756d3023e36272675c6b972f9 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/DensePose/tests/common.py @@ -0,0 +1,110 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. + +import os +import torch + +from detectron2.config import get_cfg +from detectron2.engine import default_setup +from detectron2.modeling import build_model + +from densepose import add_dataset_category_config, add_densepose_config + +_BASE_CONFIG_DIR = "configs" +_EVOLUTION_CONFIG_SUB_DIR = "evolution" +_QUICK_SCHEDULES_CONFIG_SUB_DIR = "quick_schedules" +_BASE_CONFIG_FILE_PREFIX = "Base-" +_CONFIG_FILE_EXT = ".yaml" + + +def _get_base_config_dir(): + """ + Return the base directory for configurations + """ + return os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", _BASE_CONFIG_DIR) + + +def _get_evolution_config_dir(): + """ + Return the base directory for evolution configurations + """ + return os.path.join(_get_base_config_dir(), _EVOLUTION_CONFIG_SUB_DIR) + + +def _get_quick_schedules_config_dir(): + """ + Return the base directory for quick schedules configurations + """ + return os.path.join(_get_base_config_dir(), _QUICK_SCHEDULES_CONFIG_SUB_DIR) + + +def _collect_config_files(config_dir): + """ + Collect all configuration files (i.e. densepose_*.yaml) directly in the specified directory + """ + start = _get_base_config_dir() + results = [] + for entry in os.listdir(config_dir): + path = os.path.join(config_dir, entry) + if not os.path.isfile(path): + continue + _, ext = os.path.splitext(entry) + if ext != _CONFIG_FILE_EXT: + continue + if entry.startswith(_BASE_CONFIG_FILE_PREFIX): + continue + config_file = os.path.relpath(path, start) + results.append(config_file) + return results + + +def get_config_files(): + """ + Get all the configuration files (relative to the base configuration directory) + """ + return _collect_config_files(_get_base_config_dir()) + + +def get_evolution_config_files(): + """ + Get all the evolution configuration files (relative to the base configuration directory) + """ + return _collect_config_files(_get_evolution_config_dir()) + + +def get_quick_schedules_config_files(): + """ + Get all the quick schedules configuration files (relative to the base configuration directory) + """ + return _collect_config_files(_get_quick_schedules_config_dir()) + + +def _get_model_config(config_file): + """ + Load and return the configuration from the specified file (relative to the base configuration + directory) + """ + cfg = get_cfg() + add_dataset_category_config(cfg) + add_densepose_config(cfg) + path = os.path.join(_get_base_config_dir(), config_file) + cfg.merge_from_file(path) + if not torch.cuda.is_available(): + cfg.MODEL_DEVICE = "cpu" + return cfg + + +def get_model(config_file): + """ + Get the model from the specified file (relative to the base configuration directory) + """ + cfg = _get_model_config(config_file) + return build_model(cfg) + + +def setup(config_file): + """ + Setup the configuration from the specified file (relative to the base configuration directory) + """ + cfg = _get_model_config(config_file) + cfg.freeze() + default_setup(cfg, {}) diff --git a/preprocess/mhp_extension/detectron2/projects/DensePose/tests/test_model_e2e.py b/preprocess/mhp_extension/detectron2/projects/DensePose/tests/test_model_e2e.py new file mode 100644 index 0000000000000000000000000000000000000000..eed131080547d84185c1d33913014a2c977b119f --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/DensePose/tests/test_model_e2e.py @@ -0,0 +1,43 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. + +import unittest +import torch + +from detectron2.structures import BitMasks, Boxes, Instances + +from .common import get_model + + +# TODO(plabatut): Modularize detectron2 tests and re-use +def make_model_inputs(image, instances=None): + if instances is None: + return {"image": image} + + return {"image": image, "instances": instances} + + +def make_empty_instances(h, w): + instances = Instances((h, w)) + instances.gt_boxes = Boxes(torch.rand(0, 4)) + instances.gt_classes = torch.tensor([]).to(dtype=torch.int64) + instances.gt_masks = BitMasks(torch.rand(0, h, w)) + return instances + + +class ModelE2ETest(unittest.TestCase): + CONFIG_PATH = "" + + def setUp(self): + self.model = get_model(self.CONFIG_PATH) + + def _test_eval(self, sizes): + inputs = [make_model_inputs(torch.rand(3, size[0], size[1])) for size in sizes] + self.model.eval() + self.model(inputs) + + +class DensePoseRCNNE2ETest(ModelE2ETest): + CONFIG_PATH = "densepose_rcnn_R_101_FPN_s1x.yaml" + + def test_empty_data(self): + self._test_eval([(200, 250), (200, 249)]) diff --git a/preprocess/mhp_extension/detectron2/projects/DensePose/tests/test_setup.py b/preprocess/mhp_extension/detectron2/projects/DensePose/tests/test_setup.py new file mode 100644 index 0000000000000000000000000000000000000000..96827f14b3a71d571c2109791233b5bcf7ef35f8 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/DensePose/tests/test_setup.py @@ -0,0 +1,30 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. + +import unittest + +from .common import ( + get_config_files, + get_evolution_config_files, + get_quick_schedules_config_files, + setup, +) + + +class TestSetup(unittest.TestCase): + def _test_setup(self, config_file): + setup(config_file) + + def test_setup_configs(self): + config_files = get_config_files() + for config_file in config_files: + self._test_setup(config_file) + + def test_setup_evolution_configs(self): + config_files = get_evolution_config_files() + for config_file in config_files: + self._test_setup(config_file) + + def test_setup_quick_schedules_configs(self): + config_files = get_quick_schedules_config_files() + for config_file in config_files: + self._test_setup(config_file) diff --git a/preprocess/mhp_extension/detectron2/projects/DensePose/tests/test_structures.py b/preprocess/mhp_extension/detectron2/projects/DensePose/tests/test_structures.py new file mode 100644 index 0000000000000000000000000000000000000000..ad97c23a43a9a72db566ec272b10f5bbda874695 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/DensePose/tests/test_structures.py @@ -0,0 +1,25 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. + +import unittest + +from densepose.data.structures import normalized_coords_transform + + +class TestStructures(unittest.TestCase): + def test_normalized_coords_transform(self): + bbox = (32, 24, 288, 216) + x0, y0, w, h = bbox + xmin, ymin, xmax, ymax = x0, y0, x0 + w, y0 + h + f = normalized_coords_transform(*bbox) + # Top-left + expected_p, actual_p = (-1, -1), f((xmin, ymin)) + self.assertEqual(expected_p, actual_p) + # Top-right + expected_p, actual_p = (1, -1), f((xmax, ymin)) + self.assertEqual(expected_p, actual_p) + # Bottom-left + expected_p, actual_p = (-1, 1), f((xmin, ymax)) + self.assertEqual(expected_p, actual_p) + # Bottom-right + expected_p, actual_p = (1, 1), f((xmax, ymax)) + self.assertEqual(expected_p, actual_p) diff --git a/preprocess/mhp_extension/detectron2/projects/DensePose/train_net.py b/preprocess/mhp_extension/detectron2/projects/DensePose/train_net.py new file mode 100755 index 0000000000000000000000000000000000000000..9d2e7bd8b92964f752620d92e7acb662c0b86fa7 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/DensePose/train_net.py @@ -0,0 +1,122 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +""" +DensePose Training Script. + +This script is similar to the training script in detectron2/tools. + +It is an example of how a user might use detectron2 for a new project. +""" + +import logging +import os +from collections import OrderedDict +from fvcore.common.file_io import PathManager + +import detectron2.utils.comm as comm +from detectron2.checkpoint import DetectionCheckpointer +from detectron2.config import CfgNode, get_cfg +from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, hooks, launch +from detectron2.evaluation import COCOEvaluator, DatasetEvaluators, verify_results +from detectron2.modeling import DatasetMapperTTA +from detectron2.utils.logger import setup_logger + +from densepose import ( + DensePoseCOCOEvaluator, + DensePoseGeneralizedRCNNWithTTA, + add_dataset_category_config, + add_densepose_config, + load_from_cfg, +) +from densepose.data import DatasetMapper, build_detection_test_loader, build_detection_train_loader + + +class Trainer(DefaultTrainer): + @classmethod + def build_evaluator(cls, cfg: CfgNode, dataset_name, output_folder=None): + if output_folder is None: + output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") + evaluators = [COCOEvaluator(dataset_name, cfg, True, output_folder)] + if cfg.MODEL.DENSEPOSE_ON: + evaluators.append(DensePoseCOCOEvaluator(dataset_name, True, output_folder)) + return DatasetEvaluators(evaluators) + + @classmethod + def build_test_loader(cls, cfg: CfgNode, dataset_name): + return build_detection_test_loader(cfg, dataset_name, mapper=DatasetMapper(cfg, False)) + + @classmethod + def build_train_loader(cls, cfg: CfgNode): + return build_detection_train_loader(cfg, mapper=DatasetMapper(cfg, True)) + + @classmethod + def test_with_TTA(cls, cfg: CfgNode, model): + logger = logging.getLogger("detectron2.trainer") + # In the end of training, run an evaluation with TTA + # Only support some R-CNN models. + logger.info("Running inference with test-time augmentation ...") + transform_data = load_from_cfg(cfg) + model = DensePoseGeneralizedRCNNWithTTA(cfg, model, transform_data, DatasetMapperTTA(cfg)) + evaluators = [ + cls.build_evaluator( + cfg, name, output_folder=os.path.join(cfg.OUTPUT_DIR, "inference_TTA") + ) + for name in cfg.DATASETS.TEST + ] + res = cls.test(cfg, model, evaluators) + res = OrderedDict({k + "_TTA": v for k, v in res.items()}) + return res + + +def setup(args): + cfg = get_cfg() + add_dataset_category_config(cfg) + add_densepose_config(cfg) + cfg.merge_from_file(args.config_file) + cfg.merge_from_list(args.opts) + cfg.freeze() + default_setup(cfg, args) + # Setup logger for "densepose" module + setup_logger(output=cfg.OUTPUT_DIR, distributed_rank=comm.get_rank(), name="densepose") + return cfg + + +def main(args): + cfg = setup(args) + # disable strict kwargs checking: allow one to specify path handle + # hints through kwargs, like timeout in DP evaluation + PathManager.set_strict_kwargs_checking(False) + + if args.eval_only: + model = Trainer.build_model(cfg) + DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load( + cfg.MODEL.WEIGHTS, resume=args.resume + ) + res = Trainer.test(cfg, model) + if cfg.TEST.AUG.ENABLED: + res.update(Trainer.test_with_TTA(cfg, model)) + if comm.is_main_process(): + verify_results(cfg, res) + return res + + trainer = Trainer(cfg) + trainer.resume_or_load(resume=args.resume) + if cfg.TEST.AUG.ENABLED: + trainer.register_hooks( + [hooks.EvalHook(0, lambda: trainer.test_with_TTA(cfg, trainer.model))] + ) + return trainer.train() + + +if __name__ == "__main__": + args = default_argument_parser().parse_args() + print("Command Line Args:", args) + launch( + main, + args.num_gpus, + num_machines=args.num_machines, + machine_rank=args.machine_rank, + dist_url=args.dist_url, + args=(args,), + ) diff --git a/preprocess/mhp_extension/detectron2/projects/PointRend/README.md b/preprocess/mhp_extension/detectron2/projects/PointRend/README.md new file mode 100644 index 0000000000000000000000000000000000000000..443736fff35cc49e02807a7b941da19c0bdfa666 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/PointRend/README.md @@ -0,0 +1,135 @@ +# PointRend: Image Segmentation as Rendering + +Alexander Kirillov, Yuxin Wu, Kaiming He, Ross Girshick + +[[`arXiv`](https://arxiv.org/abs/1912.08193)] [[`BibTeX`](#CitingPointRend)] + +
+ +

+ +In this repository, we release code for PointRend in Detectron2. PointRend can be flexibly applied to both instance and semantic segmentation tasks by building on top of existing state-of-the-art models. + +## Installation +Install Detectron 2 following [INSTALL.md](https://github.com/facebookresearch/detectron2/blob/master/INSTALL.md). You are ready to go! + +## Quick start and visualization + +This [Colab Notebook](https://colab.research.google.com/drive/1isGPL5h5_cKoPPhVL9XhMokRtHDvmMVL) tutorial contains examples of PointRend usage and visualizations of its point sampling stages. + +## Training + +To train a model with 8 GPUs run: +```bash +cd /path/to/detectron2/projects/PointRend +python train_net.py --config-file configs/InstanceSegmentation/pointrend_rcnn_R_50_FPN_1x_coco.yaml --num-gpus 8 +``` + +## Evaluation + +Model evaluation can be done similarly: +```bash +cd /path/to/detectron2/projects/PointRend +python train_net.py --config-file configs/InstanceSegmentation/pointrend_rcnn_R_50_FPN_1x_coco.yaml --eval-only MODEL.WEIGHTS /path/to/model_checkpoint +``` + +# Pretrained Models + +## Instance Segmentation +#### COCO + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Mask
head
Backbonelr
sched
Output
resolution
mask
AP
mask
AP*
model iddownload
PointRendR50-FPN224×22436.239.7164254221model | metrics
PointRendR50-FPN224×22438.341.6164955410model | metrics
+ +AP* is COCO mask AP evaluated against the higher-quality LVIS annotations; see the paper for details. Run `python detectron2/datasets/prepare_cocofied_lvis.py` to prepare GT files for AP* evaluation. Since LVIS annotations are not exhaustive `lvis-api` and not `cocoapi` should be used to evaluate AP*. + +#### Cityscapes +Cityscapes model is trained with ImageNet pretraining. + + + + + + + + + + + + + + + + + + + + +
Mask
head
Backbonelr
sched
Output
resolution
mask
AP
model iddownload
PointRendR50-FPN224×22435.9164255101model | metrics
+ + +## Semantic Segmentation + +#### Cityscapes +Cityscapes model is trained with ImageNet pretraining. + + + + + + + + + + + + + + + + + + +
MethodBackboneOutput
resolution
mIoUmodel iddownload
SemanticFPN + PointRendR101-FPN1024×204878.6186480235model | metrics
+ +## Citing PointRend + +If you use PointRend, please use the following BibTeX entry. + +```BibTeX +@InProceedings{kirillov2019pointrend, + title={{PointRend}: Image Segmentation as Rendering}, + author={Alexander Kirillov and Yuxin Wu and Kaiming He and Ross Girshick}, + journal={ArXiv:1912.08193}, + year={2019} +} +``` diff --git a/preprocess/mhp_extension/detectron2/projects/PointRend/configs/InstanceSegmentation/Base-PointRend-RCNN-FPN.yaml b/preprocess/mhp_extension/detectron2/projects/PointRend/configs/InstanceSegmentation/Base-PointRend-RCNN-FPN.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d3917188afe04c7626e539f7c0bc28df4118a290 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/PointRend/configs/InstanceSegmentation/Base-PointRend-RCNN-FPN.yaml @@ -0,0 +1,21 @@ +_BASE_: "../../../../configs/Base-RCNN-FPN.yaml" +MODEL: + ROI_HEADS: + NAME: "PointRendROIHeads" + IN_FEATURES: ["p2", "p3", "p4", "p5"] + ROI_BOX_HEAD: + TRAIN_ON_PRED_BOXES: True + ROI_MASK_HEAD: + NAME: "CoarseMaskHead" + FC_DIM: 1024 + NUM_FC: 2 + OUTPUT_SIDE_RESOLUTION: 7 + IN_FEATURES: ["p2"] + POINT_HEAD_ON: True + POINT_HEAD: + FC_DIM: 256 + NUM_FC: 3 + IN_FEATURES: ["p2"] +INPUT: + # PointRend for instance segmenation does not work with "polygon" mask_format. + MASK_FORMAT: "bitmask" diff --git a/preprocess/mhp_extension/detectron2/projects/PointRend/configs/InstanceSegmentation/pointrend_rcnn_R_50_FPN_1x_cityscapes.yaml b/preprocess/mhp_extension/detectron2/projects/PointRend/configs/InstanceSegmentation/pointrend_rcnn_R_50_FPN_1x_cityscapes.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c23dbe1c8463d16f6be110ef49acd8c6142c3aa8 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/PointRend/configs/InstanceSegmentation/pointrend_rcnn_R_50_FPN_1x_cityscapes.yaml @@ -0,0 +1,23 @@ +_BASE_: Base-PointRend-RCNN-FPN.yaml +MODEL: + WEIGHTS: detectron2://ImageNetPretrained/MSRA/R-50.pkl + MASK_ON: true + RESNETS: + DEPTH: 50 + ROI_HEADS: + NUM_CLASSES: 8 + POINT_HEAD: + NUM_CLASSES: 8 +DATASETS: + TEST: ("cityscapes_fine_instance_seg_val",) + TRAIN: ("cityscapes_fine_instance_seg_train",) +SOLVER: + BASE_LR: 0.01 + IMS_PER_BATCH: 8 + MAX_ITER: 24000 + STEPS: (18000,) +INPUT: + MAX_SIZE_TEST: 2048 + MAX_SIZE_TRAIN: 2048 + MIN_SIZE_TEST: 1024 + MIN_SIZE_TRAIN: (800, 832, 864, 896, 928, 960, 992, 1024) diff --git a/preprocess/mhp_extension/detectron2/projects/PointRend/configs/InstanceSegmentation/pointrend_rcnn_R_50_FPN_1x_coco.yaml b/preprocess/mhp_extension/detectron2/projects/PointRend/configs/InstanceSegmentation/pointrend_rcnn_R_50_FPN_1x_coco.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e9fc573bf544de8610a65a7cda2a0df57aec0abf --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/PointRend/configs/InstanceSegmentation/pointrend_rcnn_R_50_FPN_1x_coco.yaml @@ -0,0 +1,9 @@ +_BASE_: Base-PointRend-RCNN-FPN.yaml +MODEL: + WEIGHTS: detectron2://ImageNetPretrained/MSRA/R-50.pkl + MASK_ON: true + RESNETS: + DEPTH: 50 +# To add COCO AP evaluation against the higher-quality LVIS annotations. +# DATASETS: +# TEST: ("coco_2017_val", "lvis_v0.5_val_cocofied") diff --git a/preprocess/mhp_extension/detectron2/projects/PointRend/configs/InstanceSegmentation/pointrend_rcnn_R_50_FPN_3x_coco.yaml b/preprocess/mhp_extension/detectron2/projects/PointRend/configs/InstanceSegmentation/pointrend_rcnn_R_50_FPN_3x_coco.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2f013f32aeb4122f50c5c4030e9738d9d474ba34 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/PointRend/configs/InstanceSegmentation/pointrend_rcnn_R_50_FPN_3x_coco.yaml @@ -0,0 +1,13 @@ +_BASE_: Base-PointRend-RCNN-FPN.yaml +MODEL: + WEIGHTS: detectron2://ImageNetPretrained/MSRA/R-50.pkl + MASK_ON: true + RESNETS: + DEPTH: 50 +SOLVER: + STEPS: (210000, 250000) + MAX_ITER: 270000 +# To add COCO AP evaluation against the higher-quality LVIS annotations. +# DATASETS: +# TEST: ("coco_2017_val", "lvis_v0.5_val_cocofied") + diff --git a/preprocess/mhp_extension/detectron2/projects/PointRend/configs/InstanceSegmentation/pointrend_rcnn_R_50_FPN_3x_parsing.yaml b/preprocess/mhp_extension/detectron2/projects/PointRend/configs/InstanceSegmentation/pointrend_rcnn_R_50_FPN_3x_parsing.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a4af81dab7b47371454a273ecf962ea47ac21d49 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/PointRend/configs/InstanceSegmentation/pointrend_rcnn_R_50_FPN_3x_parsing.yaml @@ -0,0 +1,20 @@ +_BASE_: Base-PointRend-RCNN-FPN.yaml +MODEL: + WEIGHTS: detectron2://ImageNetPretrained/MSRA/R-50.pkl + MASK_ON: true + RESNETS: + DEPTH: 50 + ROI_HEADS: + NUM_CLASSES: 1 + POINT_HEAD: + NUM_CLASSES: 1 +SOLVER: + STEPS: (210000, 250000) + MAX_ITER: 270000 + IMS_PER_BATCH: 1 +# To add COCO AP evaluation against the higher-quality LVIS annotations. +# DATASETS: +# TEST: ("coco_2017_val", "lvis_v0.5_val_cocofied") +DATASETS: + TRAIN: ("CIHP_train",) + TEST: ("CIHP_val",) diff --git a/preprocess/mhp_extension/detectron2/projects/PointRend/configs/InstanceSegmentation/pointrend_rcnn_X_101_32x8d_FPN_3x_parsing.yaml b/preprocess/mhp_extension/detectron2/projects/PointRend/configs/InstanceSegmentation/pointrend_rcnn_X_101_32x8d_FPN_3x_parsing.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8e52d82e39400f08f86a6e1a92e3e1c471403624 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/PointRend/configs/InstanceSegmentation/pointrend_rcnn_X_101_32x8d_FPN_3x_parsing.yaml @@ -0,0 +1,28 @@ +_BASE_: Base-PointRend-RCNN-FPN.yaml +MODEL: + WEIGHTS: "./X-101-32x8d.pkl" + PIXEL_STD: [57.375, 57.120, 58.395] + MASK_ON: true + RESNETS: + STRIDE_IN_1X1: False # this is a C2 model + NUM_GROUPS: 32 + WIDTH_PER_GROUP: 8 + DEPTH: 101 + ROI_HEADS: + NUM_CLASSES: 1 + POINT_HEAD: + NUM_CLASSES: 1 +SOLVER: + STEPS: (210000, 250000) + MAX_ITER: 270000 + IMS_PER_BATCH: 1 +# To add COCO AP evaluation against the higher-quality LVIS annotations. +# DATASETS: +# TEST: ("coco_2017_val", "lvis_v0.5_val_cocofied") +INPUT: + MIN_SIZE_TRAIN: (640, 864) + MIN_SIZE_TRAIN_SAMPLING: "range" + MAX_SIZE_TRAIN: 1440 +DATASETS: + TRAIN: ("CIHP_train",) + TEST: ("CIHP_val",) diff --git a/preprocess/mhp_extension/detectron2/projects/PointRend/configs/SemanticSegmentation/Base-PointRend-Semantic-FPN.yaml b/preprocess/mhp_extension/detectron2/projects/PointRend/configs/SemanticSegmentation/Base-PointRend-Semantic-FPN.yaml new file mode 100644 index 0000000000000000000000000000000000000000..00562a92363dc47c6ebe9ef8bebb89cd5e5b8502 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/PointRend/configs/SemanticSegmentation/Base-PointRend-Semantic-FPN.yaml @@ -0,0 +1,19 @@ +_BASE_: "../../../../configs/Base-RCNN-FPN.yaml" +MODEL: + META_ARCHITECTURE: "SemanticSegmentor" + BACKBONE: + FREEZE_AT: 0 + SEM_SEG_HEAD: + NAME: "PointRendSemSegHead" + POINT_HEAD: + NUM_CLASSES: 54 + FC_DIM: 256 + NUM_FC: 3 + IN_FEATURES: ["p2"] + TRAIN_NUM_POINTS: 1024 + SUBDIVISION_STEPS: 2 + SUBDIVISION_NUM_POINTS: 8192 + COARSE_SEM_SEG_HEAD_NAME: "SemSegFPNHead" +DATASETS: + TRAIN: ("coco_2017_train_panoptic_stuffonly",) + TEST: ("coco_2017_val_panoptic_stuffonly",) diff --git a/preprocess/mhp_extension/detectron2/projects/PointRend/configs/SemanticSegmentation/pointrend_semantic_R_101_FPN_1x_cityscapes.yaml b/preprocess/mhp_extension/detectron2/projects/PointRend/configs/SemanticSegmentation/pointrend_semantic_R_101_FPN_1x_cityscapes.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4965b068c11bc568317ea3cc8c83d8c44234b936 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/PointRend/configs/SemanticSegmentation/pointrend_semantic_R_101_FPN_1x_cityscapes.yaml @@ -0,0 +1,33 @@ +_BASE_: Base-PointRend-Semantic-FPN.yaml +MODEL: + WEIGHTS: detectron2://ImageNetPretrained/MSRA/R-101.pkl + RESNETS: + DEPTH: 101 + SEM_SEG_HEAD: + NUM_CLASSES: 19 + POINT_HEAD: + NUM_CLASSES: 19 + TRAIN_NUM_POINTS: 2048 + SUBDIVISION_NUM_POINTS: 8192 +DATASETS: + TRAIN: ("cityscapes_fine_sem_seg_train",) + TEST: ("cityscapes_fine_sem_seg_val",) +SOLVER: + BASE_LR: 0.01 + STEPS: (40000, 55000) + MAX_ITER: 65000 + IMS_PER_BATCH: 32 +INPUT: + MIN_SIZE_TRAIN: (512, 768, 1024, 1280, 1536, 1792, 2048) + MIN_SIZE_TRAIN_SAMPLING: "choice" + MIN_SIZE_TEST: 1024 + MAX_SIZE_TRAIN: 4096 + MAX_SIZE_TEST: 2048 + CROP: + ENABLED: True + TYPE: "absolute" + SIZE: (512, 1024) + SINGLE_CATEGORY_MAX_AREA: 0.75 + COLOR_AUG_SSD: True +DATALOADER: + NUM_WORKERS: 16 diff --git a/preprocess/mhp_extension/detectron2/projects/PointRend/configs/SemanticSegmentation/pointrend_semantic_R_50_FPN_1x_coco.yaml b/preprocess/mhp_extension/detectron2/projects/PointRend/configs/SemanticSegmentation/pointrend_semantic_R_50_FPN_1x_coco.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7948bd808ea9888b20d1e118abf6bb630c485f39 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/PointRend/configs/SemanticSegmentation/pointrend_semantic_R_50_FPN_1x_coco.yaml @@ -0,0 +1,5 @@ +_BASE_: Base-PointRend-Semantic-FPN.yaml +MODEL: + WEIGHTS: detectron2://ImageNetPretrained/MSRA/R-50.pkl + RESNETS: + DEPTH: 50 diff --git a/preprocess/mhp_extension/detectron2/projects/PointRend/finetune_net.py b/preprocess/mhp_extension/detectron2/projects/PointRend/finetune_net.py new file mode 100755 index 0000000000000000000000000000000000000000..b99baf939b3788a2ee9e339beaa503cfa4d6a14f --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/PointRend/finetune_net.py @@ -0,0 +1,139 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +""" +PointRend Training Script. + +This script is a simplified version of the training script in detectron2/tools. +""" + +import os +import torch + +import detectron2.utils.comm as comm +from detectron2.checkpoint import DetectionCheckpointer +from detectron2.config import get_cfg +from detectron2.data import MetadataCatalog, build_detection_train_loader +from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, launch +from detectron2.evaluation import ( + CityscapesInstanceEvaluator, + CityscapesSemSegEvaluator, + COCOEvaluator, + DatasetEvaluators, + LVISEvaluator, + SemSegEvaluator, + verify_results, +) + +from point_rend import SemSegDatasetMapper, add_pointrend_config + +os.environ['CUDA_VISIBLE_DEVICES'] = '4' +# Register Custom Dataset +from detectron2.data.datasets import register_coco_instances +register_coco_instances("CIHP_train", {}, "/data03/v_xuyunqiu/multi_parsing/data/msrcnn_finetune_annotations/CIHP_train.json", "/data03/v_xuyunqiu/data/instance-level_human_parsing/Training/Images") +register_coco_instances("CIHP_val", {}, "/data03/v_xuyunqiu/multi_parsing/data/msrcnn_finetune_annotations/CIHP_val.json", "/data03/v_xuyunqiu/data/instance-level_human_parsing/Validation/Images") + + +class Trainer(DefaultTrainer): + """ + We use the "DefaultTrainer" which contains a number pre-defined logic for + standard training workflow. They may not work for you, especially if you + are working on a new research project. In that case you can use the cleaner + "SimpleTrainer", or write your own training loop. + """ + + @classmethod + def build_evaluator(cls, cfg, dataset_name, output_folder=None): + """ + Create evaluator(s) for a given dataset. + This uses the special metadata "evaluator_type" associated with each builtin dataset. + For your own dataset, you can simply create an evaluator manually in your + script and do not have to worry about the hacky if-else logic here. + """ + if output_folder is None: + output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") + evaluator_list = [] + evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type + if evaluator_type == "lvis": + return LVISEvaluator(dataset_name, cfg, True, output_folder) + if evaluator_type == "coco": + return COCOEvaluator(dataset_name, cfg, True, output_folder) + if evaluator_type == "sem_seg": + return SemSegEvaluator( + dataset_name, + distributed=True, + num_classes=cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES, + ignore_label=cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE, + output_dir=output_folder, + ) + if evaluator_type == "cityscapes_instance": + assert ( + torch.cuda.device_count() >= comm.get_rank() + ), "CityscapesEvaluator currently do not work with multiple machines." + return CityscapesInstanceEvaluator(dataset_name) + if evaluator_type == "cityscapes_sem_seg": + assert ( + torch.cuda.device_count() >= comm.get_rank() + ), "CityscapesEvaluator currently do not work with multiple machines." + return CityscapesSemSegEvaluator(dataset_name) + if len(evaluator_list) == 0: + raise NotImplementedError( + "no Evaluator for the dataset {} with the type {}".format( + dataset_name, evaluator_type + ) + ) + if len(evaluator_list) == 1: + return evaluator_list[0] + return DatasetEvaluators(evaluator_list) + + @classmethod + def build_train_loader(cls, cfg): + if "SemanticSegmentor" in cfg.MODEL.META_ARCHITECTURE: + mapper = SemSegDatasetMapper(cfg, True) + else: + mapper = None + return build_detection_train_loader(cfg, mapper=mapper) + + +def setup(args): + """ + Create configs and perform basic setups. + """ + cfg = get_cfg() + add_pointrend_config(cfg) + cfg.merge_from_file(args.config_file) + cfg.merge_from_list(args.opts) + cfg.freeze() + default_setup(cfg, args) + return cfg + + +def main(args): + cfg = setup(args) + + if args.eval_only: + model = Trainer.build_model(cfg) + DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load( + cfg.MODEL.WEIGHTS, resume=args.resume + ) + res = Trainer.test(cfg, model) + if comm.is_main_process(): + verify_results(cfg, res) + return res + + trainer = Trainer(cfg) + trainer.resume_or_load(resume=args.resume) + return trainer.train() + + +if __name__ == "__main__": + args = default_argument_parser().parse_args() + print("Command Line Args:", args) + launch( + main, + args.num_gpus, + num_machines=args.num_machines, + machine_rank=args.machine_rank, + dist_url=args.dist_url, + args=(args,), + ) diff --git a/preprocess/mhp_extension/detectron2/projects/PointRend/logs/hadoop.kylin.libdfs.log b/preprocess/mhp_extension/detectron2/projects/PointRend/logs/hadoop.kylin.libdfs.log new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/preprocess/mhp_extension/detectron2/projects/PointRend/point_rend/__init__.py b/preprocess/mhp_extension/detectron2/projects/PointRend/point_rend/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4020fe0a287f87cb3bd2487b5b40b7e1e2647aa8 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/PointRend/point_rend/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +from .config import add_pointrend_config +from .coarse_mask_head import CoarseMaskHead +from .roi_heads import PointRendROIHeads +from .dataset_mapper import SemSegDatasetMapper +from .semantic_seg import PointRendSemSegHead diff --git a/preprocess/mhp_extension/detectron2/projects/PointRend/point_rend/coarse_mask_head.py b/preprocess/mhp_extension/detectron2/projects/PointRend/point_rend/coarse_mask_head.py new file mode 100644 index 0000000000000000000000000000000000000000..3f1cffb4c985dc3121a863eb7b378965b718a19d --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/PointRend/point_rend/coarse_mask_head.py @@ -0,0 +1,92 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import fvcore.nn.weight_init as weight_init +import torch +from torch import nn +from torch.nn import functional as F + +from detectron2.layers import Conv2d, ShapeSpec +from detectron2.modeling import ROI_MASK_HEAD_REGISTRY + + +@ROI_MASK_HEAD_REGISTRY.register() +class CoarseMaskHead(nn.Module): + """ + A mask head with fully connected layers. Given pooled features it first reduces channels and + spatial dimensions with conv layers and then uses FC layers to predict coarse masks analogously + to the standard box head. + """ + + def __init__(self, cfg, input_shape: ShapeSpec): + """ + The following attributes are parsed from config: + conv_dim: the output dimension of the conv layers + fc_dim: the feature dimenstion of the FC layers + num_fc: the number of FC layers + output_side_resolution: side resolution of the output square mask prediction + """ + super(CoarseMaskHead, self).__init__() + + # fmt: off + self.num_classes = cfg.MODEL.ROI_HEADS.NUM_CLASSES + conv_dim = cfg.MODEL.ROI_MASK_HEAD.CONV_DIM + self.fc_dim = cfg.MODEL.ROI_MASK_HEAD.FC_DIM + num_fc = cfg.MODEL.ROI_MASK_HEAD.NUM_FC + self.output_side_resolution = cfg.MODEL.ROI_MASK_HEAD.OUTPUT_SIDE_RESOLUTION + self.input_channels = input_shape.channels + self.input_h = input_shape.height + self.input_w = input_shape.width + # fmt: on + + self.conv_layers = [] + if self.input_channels > conv_dim: + self.reduce_channel_dim_conv = Conv2d( + self.input_channels, + conv_dim, + kernel_size=1, + stride=1, + padding=0, + bias=True, + activation=F.relu, + ) + self.conv_layers.append(self.reduce_channel_dim_conv) + + self.reduce_spatial_dim_conv = Conv2d( + conv_dim, conv_dim, kernel_size=2, stride=2, padding=0, bias=True, activation=F.relu + ) + self.conv_layers.append(self.reduce_spatial_dim_conv) + + input_dim = conv_dim * self.input_h * self.input_w + input_dim //= 4 + + self.fcs = [] + for k in range(num_fc): + fc = nn.Linear(input_dim, self.fc_dim) + self.add_module("coarse_mask_fc{}".format(k + 1), fc) + self.fcs.append(fc) + input_dim = self.fc_dim + + output_dim = self.num_classes * self.output_side_resolution * self.output_side_resolution + + self.prediction = nn.Linear(self.fc_dim, output_dim) + # use normal distribution initialization for mask prediction layer + nn.init.normal_(self.prediction.weight, std=0.001) + nn.init.constant_(self.prediction.bias, 0) + + for layer in self.conv_layers: + weight_init.c2_msra_fill(layer) + for layer in self.fcs: + weight_init.c2_xavier_fill(layer) + + def forward(self, x): + # unlike BaseMaskRCNNHead, this head only outputs intermediate + # features, because the features will be used later by PointHead. + N = x.shape[0] + x = x.view(N, self.input_channels, self.input_h, self.input_w) + for layer in self.conv_layers: + x = layer(x) + x = torch.flatten(x, start_dim=1) + for layer in self.fcs: + x = F.relu(layer(x)) + return self.prediction(x).view( + N, self.num_classes, self.output_side_resolution, self.output_side_resolution + ) diff --git a/preprocess/mhp_extension/detectron2/projects/PointRend/point_rend/color_augmentation.py b/preprocess/mhp_extension/detectron2/projects/PointRend/point_rend/color_augmentation.py new file mode 100644 index 0000000000000000000000000000000000000000..27344c470adac143186e61c8a5b0f39900937634 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/PointRend/point_rend/color_augmentation.py @@ -0,0 +1,98 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import numpy as np +import random +import cv2 +from fvcore.transforms.transform import Transform + + +class ColorAugSSDTransform(Transform): + """ + A color related data augmentation used in Single Shot Multibox Detector (SSD). + + Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy, + Scott Reed, Cheng-Yang Fu, Alexander C. Berg. + SSD: Single Shot MultiBox Detector. ECCV 2016. + + Implementation based on: + + https://github.com/weiliu89/caffe/blob + /4817bf8b4200b35ada8ed0dc378dceaf38c539e4 + /src/caffe/util/im_transforms.cpp + + https://github.com/chainer/chainercv/blob + /7159616642e0be7c5b3ef380b848e16b7e99355b/chainercv + /links/model/ssd/transforms.py + """ + + def __init__( + self, + img_format, + brightness_delta=32, + contrast_low=0.5, + contrast_high=1.5, + saturation_low=0.5, + saturation_high=1.5, + hue_delta=18, + ): + super().__init__() + assert img_format in ["BGR", "RGB"] + self.is_rgb = img_format == "RGB" + del img_format + self._set_attributes(locals()) + + def apply_coords(self, coords): + return coords + + def apply_segmentation(self, segmentation): + return segmentation + + def apply_image(self, img, interp=None): + if self.is_rgb: + img = img[:, :, [2, 1, 0]] + img = self.brightness(img) + if random.randrange(2): + img = self.contrast(img) + img = self.saturation(img) + img = self.hue(img) + else: + img = self.saturation(img) + img = self.hue(img) + img = self.contrast(img) + if self.is_rgb: + img = img[:, :, [2, 1, 0]] + return img + + def convert(self, img, alpha=1, beta=0): + img = img.astype(np.float32) * alpha + beta + img = np.clip(img, 0, 255) + return img.astype(np.uint8) + + def brightness(self, img): + if random.randrange(2): + return self.convert( + img, beta=random.uniform(-self.brightness_delta, self.brightness_delta) + ) + return img + + def contrast(self, img): + if random.randrange(2): + return self.convert(img, alpha=random.uniform(self.contrast_low, self.contrast_high)) + return img + + def saturation(self, img): + if random.randrange(2): + img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) + img[:, :, 1] = self.convert( + img[:, :, 1], alpha=random.uniform(self.saturation_low, self.saturation_high) + ) + return cv2.cvtColor(img, cv2.COLOR_HSV2BGR) + return img + + def hue(self, img): + if random.randrange(2): + img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) + img[:, :, 0] = ( + img[:, :, 0].astype(int) + random.randint(-self.hue_delta, self.hue_delta) + ) % 180 + return cv2.cvtColor(img, cv2.COLOR_HSV2BGR) + return img diff --git a/preprocess/mhp_extension/detectron2/projects/PointRend/point_rend/config.py b/preprocess/mhp_extension/detectron2/projects/PointRend/point_rend/config.py new file mode 100644 index 0000000000000000000000000000000000000000..74f63672bba7cd25679054b19ff87254a0e24974 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/PointRend/point_rend/config.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +from detectron2.config import CfgNode as CN + + +def add_pointrend_config(cfg): + """ + Add config for PointRend. + """ + # We retry random cropping until no single category in semantic segmentation GT occupies more + # than `SINGLE_CATEGORY_MAX_AREA` part of the crop. + cfg.INPUT.CROP.SINGLE_CATEGORY_MAX_AREA = 1.0 + # Color augmentatition from SSD paper for semantic segmentation model during training. + cfg.INPUT.COLOR_AUG_SSD = False + + # Names of the input feature maps to be used by a coarse mask head. + cfg.MODEL.ROI_MASK_HEAD.IN_FEATURES = ("p2",) + cfg.MODEL.ROI_MASK_HEAD.FC_DIM = 1024 + cfg.MODEL.ROI_MASK_HEAD.NUM_FC = 2 + # The side size of a coarse mask head prediction. + cfg.MODEL.ROI_MASK_HEAD.OUTPUT_SIDE_RESOLUTION = 7 + # True if point head is used. + cfg.MODEL.ROI_MASK_HEAD.POINT_HEAD_ON = False + + cfg.MODEL.POINT_HEAD = CN() + cfg.MODEL.POINT_HEAD.NAME = "StandardPointHead" + cfg.MODEL.POINT_HEAD.NUM_CLASSES = 80 + # Names of the input feature maps to be used by a mask point head. + cfg.MODEL.POINT_HEAD.IN_FEATURES = ("p2",) + # Number of points sampled during training for a mask point head. + cfg.MODEL.POINT_HEAD.TRAIN_NUM_POINTS = 14 * 14 + # Oversampling parameter for PointRend point sampling during training. Parameter `k` in the + # original paper. + cfg.MODEL.POINT_HEAD.OVERSAMPLE_RATIO = 3 + # Importance sampling parameter for PointRend point sampling during training. Parametr `beta` in + # the original paper. + cfg.MODEL.POINT_HEAD.IMPORTANCE_SAMPLE_RATIO = 0.75 + # Number of subdivision steps during inference. + cfg.MODEL.POINT_HEAD.SUBDIVISION_STEPS = 5 + # Maximum number of points selected at each subdivision step (N). + cfg.MODEL.POINT_HEAD.SUBDIVISION_NUM_POINTS = 28 * 28 + cfg.MODEL.POINT_HEAD.FC_DIM = 256 + cfg.MODEL.POINT_HEAD.NUM_FC = 3 + cfg.MODEL.POINT_HEAD.CLS_AGNOSTIC_MASK = False + # If True, then coarse prediction features are used as inout for each layer in PointRend's MLP. + cfg.MODEL.POINT_HEAD.COARSE_PRED_EACH_LAYER = True + cfg.MODEL.POINT_HEAD.COARSE_SEM_SEG_HEAD_NAME = "SemSegFPNHead" diff --git a/preprocess/mhp_extension/detectron2/projects/PointRend/point_rend/dataset_mapper.py b/preprocess/mhp_extension/detectron2/projects/PointRend/point_rend/dataset_mapper.py new file mode 100644 index 0000000000000000000000000000000000000000..76b64ee79b679741d547c5d1ffca55ac756051ae --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/PointRend/point_rend/dataset_mapper.py @@ -0,0 +1,121 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import copy +import logging +import numpy as np +import torch +from fvcore.common.file_io import PathManager +from fvcore.transforms.transform import CropTransform +from PIL import Image + +from detectron2.data import detection_utils as utils +from detectron2.data import transforms as T + +from .color_augmentation import ColorAugSSDTransform + +""" +This file contains the mapping that's applied to "dataset dicts" for semantic segmentation models. +Unlike the default DatasetMapper this mapper uses cropping as the last transformation. +""" + +__all__ = ["SemSegDatasetMapper"] + + +class SemSegDatasetMapper: + """ + A callable which takes a dataset dict in Detectron2 Dataset format, + and map it into a format used by semantic segmentation models. + + The callable currently does the following: + + 1. Read the image from "file_name" + 2. Applies geometric transforms to the image and annotation + 3. Find and applies suitable cropping to the image and annotation + 4. Prepare image and annotation to Tensors + """ + + def __init__(self, cfg, is_train=True): + if cfg.INPUT.CROP.ENABLED and is_train: + self.crop_gen = T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE) + logging.getLogger(__name__).info("CropGen used in training: " + str(self.crop_gen)) + else: + self.crop_gen = None + + self.tfm_gens = utils.build_transform_gen(cfg, is_train) + + if cfg.INPUT.COLOR_AUG_SSD: + self.tfm_gens.append(ColorAugSSDTransform(img_format=cfg.INPUT.FORMAT)) + logging.getLogger(__name__).info( + "Color augmnetation used in training: " + str(self.tfm_gens[-1]) + ) + + # fmt: off + self.img_format = cfg.INPUT.FORMAT + self.single_category_max_area = cfg.INPUT.CROP.SINGLE_CATEGORY_MAX_AREA + self.ignore_value = cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE + # fmt: on + + self.is_train = is_train + + def __call__(self, dataset_dict): + """ + Args: + dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format. + + Returns: + dict: a format that builtin models in detectron2 accept + """ + dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below + image = utils.read_image(dataset_dict["file_name"], format=self.img_format) + utils.check_image_size(dataset_dict, image) + assert "sem_seg_file_name" in dataset_dict + + image, transforms = T.apply_transform_gens(self.tfm_gens, image) + if self.is_train: + with PathManager.open(dataset_dict.pop("sem_seg_file_name"), "rb") as f: + sem_seg_gt = Image.open(f) + sem_seg_gt = np.asarray(sem_seg_gt, dtype="uint8") + sem_seg_gt = transforms.apply_segmentation(sem_seg_gt) + if self.crop_gen: + image, sem_seg_gt = crop_transform( + image, + sem_seg_gt, + self.crop_gen, + self.single_category_max_area, + self.ignore_value, + ) + dataset_dict["sem_seg"] = torch.as_tensor(sem_seg_gt.astype("long")) + + # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory, + # but not efficient on large generic data structures due to the use of pickle & mp.Queue. + # Therefore it's important to use torch.Tensor. + dataset_dict["image"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1))) + + if not self.is_train: + dataset_dict.pop("sem_seg_file_name", None) + return dataset_dict + + return dataset_dict + + +def crop_transform(image, sem_seg, crop_gen, single_category_max_area, ignore_value): + """ + Find a cropping window such that no single category occupies more than + `single_category_max_area` in `sem_seg`. The function retries random cropping 10 times max. + """ + if single_category_max_area >= 1.0: + crop_tfm = crop_gen.get_transform(image) + sem_seg_temp = crop_tfm.apply_segmentation(sem_seg) + else: + h, w = sem_seg.shape + crop_size = crop_gen.get_crop_size((h, w)) + for _ in range(10): + y0 = np.random.randint(h - crop_size[0] + 1) + x0 = np.random.randint(w - crop_size[1] + 1) + sem_seg_temp = sem_seg[y0 : y0 + crop_size[0], x0 : x0 + crop_size[1]] + labels, cnt = np.unique(sem_seg_temp, return_counts=True) + cnt = cnt[labels != ignore_value] + if len(cnt) > 1 and np.max(cnt) / np.sum(cnt) < single_category_max_area: + break + crop_tfm = CropTransform(x0, y0, crop_size[1], crop_size[0]) + image = crop_tfm.apply_image(image) + return image, sem_seg_temp diff --git a/preprocess/mhp_extension/detectron2/projects/PointRend/point_rend/point_features.py b/preprocess/mhp_extension/detectron2/projects/PointRend/point_rend/point_features.py new file mode 100644 index 0000000000000000000000000000000000000000..320a33de8505572eedcfa94d355bf2772ab75528 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/PointRend/point_rend/point_features.py @@ -0,0 +1,216 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import torch +from torch.nn import functional as F + +from detectron2.layers import cat +from detectron2.structures import Boxes + + +""" +Shape shorthand in this module: + + N: minibatch dimension size, i.e. the number of RoIs for instance segmenation or the + number of images for semantic segmenation. + R: number of ROIs, combined over all images, in the minibatch + P: number of points +""" + + +def point_sample(input, point_coords, **kwargs): + """ + A wrapper around :function:`torch.nn.functional.grid_sample` to support 3D point_coords tensors. + Unlike :function:`torch.nn.functional.grid_sample` it assumes `point_coords` to lie inside + [0, 1] x [0, 1] square. + + Args: + input (Tensor): A tensor of shape (N, C, H, W) that contains features map on a H x W grid. + point_coords (Tensor): A tensor of shape (N, P, 2) or (N, Hgrid, Wgrid, 2) that contains + [0, 1] x [0, 1] normalized point coordinates. + + Returns: + output (Tensor): A tensor of shape (N, C, P) or (N, C, Hgrid, Wgrid) that contains + features for points in `point_coords`. The features are obtained via bilinear + interplation from `input` the same way as :function:`torch.nn.functional.grid_sample`. + """ + add_dim = False + if point_coords.dim() == 3: + add_dim = True + point_coords = point_coords.unsqueeze(2) + output = F.grid_sample(input, 2.0 * point_coords - 1.0, **kwargs) + if add_dim: + output = output.squeeze(3) + return output + + +def generate_regular_grid_point_coords(R, side_size, device): + """ + Generate regular square grid of points in [0, 1] x [0, 1] coordinate space. + + Args: + R (int): The number of grids to sample, one for each region. + side_size (int): The side size of the regular grid. + device (torch.device): Desired device of returned tensor. + + Returns: + (Tensor): A tensor of shape (R, side_size^2, 2) that contains coordinates + for the regular grids. + """ + aff = torch.tensor([[[0.5, 0, 0.5], [0, 0.5, 0.5]]], device=device) + r = F.affine_grid(aff, torch.Size((1, 1, side_size, side_size)), align_corners=False) + return r.view(1, -1, 2).expand(R, -1, -1) + + +def get_uncertain_point_coords_with_randomness( + coarse_logits, uncertainty_func, num_points, oversample_ratio, importance_sample_ratio +): + """ + Sample points in [0, 1] x [0, 1] coordinate space based on their uncertainty. The unceratinties + are calculated for each point using 'uncertainty_func' function that takes point's logit + prediction as input. + See PointRend paper for details. + + Args: + coarse_logits (Tensor): A tensor of shape (N, C, Hmask, Wmask) or (N, 1, Hmask, Wmask) for + class-specific or class-agnostic prediction. + uncertainty_func: A function that takes a Tensor of shape (N, C, P) or (N, 1, P) that + contains logit predictions for P points and returns their uncertainties as a Tensor of + shape (N, 1, P). + num_points (int): The number of points P to sample. + oversample_ratio (int): Oversampling parameter. + importance_sample_ratio (float): Ratio of points that are sampled via importnace sampling. + + Returns: + point_coords (Tensor): A tensor of shape (N, P, 2) that contains the coordinates of P + sampled points. + """ + assert oversample_ratio >= 1 + assert importance_sample_ratio <= 1 and importance_sample_ratio >= 0 + num_boxes = coarse_logits.shape[0] + num_sampled = int(num_points * oversample_ratio) + point_coords = torch.rand(num_boxes, num_sampled, 2, device=coarse_logits.device) + point_logits = point_sample(coarse_logits, point_coords, align_corners=False) + # It is crucial to calculate uncertainty based on the sampled prediction value for the points. + # Calculating uncertainties of the coarse predictions first and sampling them for points leads + # to incorrect results. + # To illustrate this: assume uncertainty_func(logits)=-abs(logits), a sampled point between + # two coarse predictions with -1 and 1 logits has 0 logits, and therefore 0 uncertainty value. + # However, if we calculate uncertainties for the coarse predictions first, + # both will have -1 uncertainty, and the sampled point will get -1 uncertainty. + point_uncertainties = uncertainty_func(point_logits) + num_uncertain_points = int(importance_sample_ratio * num_points) + num_random_points = num_points - num_uncertain_points + idx = torch.topk(point_uncertainties[:, 0, :], k=num_uncertain_points, dim=1)[1] + shift = num_sampled * torch.arange(num_boxes, dtype=torch.long, device=coarse_logits.device) + idx += shift[:, None] + point_coords = point_coords.view(-1, 2)[idx.view(-1), :].view( + num_boxes, num_uncertain_points, 2 + ) + if num_random_points > 0: + point_coords = cat( + [ + point_coords, + torch.rand(num_boxes, num_random_points, 2, device=coarse_logits.device), + ], + dim=1, + ) + return point_coords + + +def get_uncertain_point_coords_on_grid(uncertainty_map, num_points): + """ + Find `num_points` most uncertain points from `uncertainty_map` grid. + + Args: + uncertainty_map (Tensor): A tensor of shape (N, 1, H, W) that contains uncertainty + values for a set of points on a regular H x W grid. + num_points (int): The number of points P to select. + + Returns: + point_indices (Tensor): A tensor of shape (N, P) that contains indices from + [0, H x W) of the most uncertain points. + point_coords (Tensor): A tensor of shape (N, P, 2) that contains [0, 1] x [0, 1] normalized + coordinates of the most uncertain points from the H x W grid. + """ + R, _, H, W = uncertainty_map.shape + h_step = 1.0 / float(H) + w_step = 1.0 / float(W) + + num_points = min(H * W, num_points) + point_indices = torch.topk(uncertainty_map.view(R, H * W), k=num_points, dim=1)[1] + point_coords = torch.zeros(R, num_points, 2, dtype=torch.float, device=uncertainty_map.device) + point_coords[:, :, 0] = w_step / 2.0 + (point_indices % W).to(torch.float) * w_step + point_coords[:, :, 1] = h_step / 2.0 + (point_indices // W).to(torch.float) * h_step + return point_indices, point_coords + + +def point_sample_fine_grained_features(features_list, feature_scales, boxes, point_coords): + """ + Get features from feature maps in `features_list` that correspond to specific point coordinates + inside each bounding box from `boxes`. + + Args: + features_list (list[Tensor]): A list of feature map tensors to get features from. + feature_scales (list[float]): A list of scales for tensors in `features_list`. + boxes (list[Boxes]): A list of I Boxes objects that contain R_1 + ... + R_I = R boxes all + together. + point_coords (Tensor): A tensor of shape (R, P, 2) that contains + [0, 1] x [0, 1] box-normalized coordinates of the P sampled points. + + Returns: + point_features (Tensor): A tensor of shape (R, C, P) that contains features sampled + from all features maps in feature_list for P sampled points for all R boxes in `boxes`. + point_coords_wrt_image (Tensor): A tensor of shape (R, P, 2) that contains image-level + coordinates of P points. + """ + cat_boxes = Boxes.cat(boxes) + num_boxes = [len(b) for b in boxes] + + point_coords_wrt_image = get_point_coords_wrt_image(cat_boxes.tensor, point_coords) + split_point_coords_wrt_image = torch.split(point_coords_wrt_image, num_boxes) + + point_features = [] + for idx_img, point_coords_wrt_image_per_image in enumerate(split_point_coords_wrt_image): + point_features_per_image = [] + for idx_feature, feature_map in enumerate(features_list): + h, w = feature_map.shape[-2:] + scale = torch.tensor([w, h], device=feature_map.device) / feature_scales[idx_feature] + point_coords_scaled = point_coords_wrt_image_per_image / scale + point_features_per_image.append( + point_sample( + feature_map[idx_img].unsqueeze(0), + point_coords_scaled.unsqueeze(0), + align_corners=False, + ) + .squeeze(0) + .transpose(1, 0) + ) + point_features.append(cat(point_features_per_image, dim=1)) + + return cat(point_features, dim=0), point_coords_wrt_image + + +def get_point_coords_wrt_image(boxes_coords, point_coords): + """ + Convert box-normalized [0, 1] x [0, 1] point cooordinates to image-level coordinates. + + Args: + boxes_coords (Tensor): A tensor of shape (R, 4) that contains bounding boxes. + coordinates. + point_coords (Tensor): A tensor of shape (R, P, 2) that contains + [0, 1] x [0, 1] box-normalized coordinates of the P sampled points. + + Returns: + point_coords_wrt_image (Tensor): A tensor of shape (R, P, 2) that contains + image-normalized coordinates of P sampled points. + """ + with torch.no_grad(): + point_coords_wrt_image = point_coords.clone() + point_coords_wrt_image[:, :, 0] = point_coords_wrt_image[:, :, 0] * ( + boxes_coords[:, None, 2] - boxes_coords[:, None, 0] + ) + point_coords_wrt_image[:, :, 1] = point_coords_wrt_image[:, :, 1] * ( + boxes_coords[:, None, 3] - boxes_coords[:, None, 1] + ) + point_coords_wrt_image[:, :, 0] += boxes_coords[:, None, 0] + point_coords_wrt_image[:, :, 1] += boxes_coords[:, None, 1] + return point_coords_wrt_image diff --git a/preprocess/mhp_extension/detectron2/projects/PointRend/point_rend/point_head.py b/preprocess/mhp_extension/detectron2/projects/PointRend/point_rend/point_head.py new file mode 100644 index 0000000000000000000000000000000000000000..6f35baea064fbee14d9bcd0b57e354f82bf54a8c --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/PointRend/point_rend/point_head.py @@ -0,0 +1,154 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import fvcore.nn.weight_init as weight_init +import torch +from torch import nn +from torch.nn import functional as F + +from detectron2.layers import ShapeSpec, cat +from detectron2.structures import BitMasks +from detectron2.utils.events import get_event_storage +from detectron2.utils.registry import Registry + +from .point_features import point_sample + +POINT_HEAD_REGISTRY = Registry("POINT_HEAD") +POINT_HEAD_REGISTRY.__doc__ = """ +Registry for point heads, which makes prediction for a given set of per-point features. + +The registered object will be called with `obj(cfg, input_shape)`. +""" + + +def roi_mask_point_loss(mask_logits, instances, points_coord): + """ + Compute the point-based loss for instance segmentation mask predictions. + + Args: + mask_logits (Tensor): A tensor of shape (R, C, P) or (R, 1, P) for class-specific or + class-agnostic, where R is the total number of predicted masks in all images, C is the + number of foreground classes, and P is the number of points sampled for each mask. + The values are logits. + instances (list[Instances]): A list of N Instances, where N is the number of images + in the batch. These instances are in 1:1 correspondence with the `mask_logits`. So, i_th + elememt of the list contains R_i objects and R_1 + ... + R_N is equal to R. + The ground-truth labels (class, box, mask, ...) associated with each instance are stored + in fields. + points_coords (Tensor): A tensor of shape (R, P, 2), where R is the total number of + predicted masks and P is the number of points for each mask. The coordinates are in + the image pixel coordinate space, i.e. [0, H] x [0, W]. + Returns: + point_loss (Tensor): A scalar tensor containing the loss. + """ + assert len(instances) == 0 or isinstance( + instances[0].gt_masks, BitMasks + ), "Point head works with GT in 'bitmask' format only. Set INPUT.MASK_FORMAT to 'bitmask'." + with torch.no_grad(): + cls_agnostic_mask = mask_logits.size(1) == 1 + total_num_masks = mask_logits.size(0) + + gt_classes = [] + gt_mask_logits = [] + idx = 0 + for instances_per_image in instances: + if not cls_agnostic_mask: + gt_classes_per_image = instances_per_image.gt_classes.to(dtype=torch.int64) + gt_classes.append(gt_classes_per_image) + + gt_bit_masks = instances_per_image.gt_masks.tensor + h, w = instances_per_image.gt_masks.image_size + scale = torch.tensor([w, h], dtype=torch.float, device=gt_bit_masks.device) + points_coord_grid_sample_format = ( + points_coord[idx : idx + len(instances_per_image)] / scale + ) + idx += len(instances_per_image) + gt_mask_logits.append( + point_sample( + gt_bit_masks.to(torch.float32).unsqueeze(1), + points_coord_grid_sample_format, + align_corners=False, + ).squeeze(1) + ) + gt_mask_logits = cat(gt_mask_logits) + + # torch.mean (in binary_cross_entropy_with_logits) doesn't + # accept empty tensors, so handle it separately + if gt_mask_logits.numel() == 0: + return mask_logits.sum() * 0 + + if cls_agnostic_mask: + mask_logits = mask_logits[:, 0] + else: + indices = torch.arange(total_num_masks) + gt_classes = cat(gt_classes, dim=0) + mask_logits = mask_logits[indices, gt_classes] + + # Log the training accuracy (using gt classes and 0.0 threshold for the logits) + mask_accurate = (mask_logits > 0.0) == gt_mask_logits.to(dtype=torch.uint8) + mask_accuracy = mask_accurate.nonzero().size(0) / mask_accurate.numel() + get_event_storage().put_scalar("point_rend/accuracy", mask_accuracy) + + point_loss = F.binary_cross_entropy_with_logits( + mask_logits, gt_mask_logits.to(dtype=torch.float32), reduction="mean" + ) + return point_loss + + +@POINT_HEAD_REGISTRY.register() +class StandardPointHead(nn.Module): + """ + A point head multi-layer perceptron which we model with conv1d layers with kernel 1. The head + takes both fine-grained and coarse prediction features as its input. + """ + + def __init__(self, cfg, input_shape: ShapeSpec): + """ + The following attributes are parsed from config: + fc_dim: the output dimension of each FC layers + num_fc: the number of FC layers + coarse_pred_each_layer: if True, coarse prediction features are concatenated to each + layer's input + """ + super(StandardPointHead, self).__init__() + # fmt: off + num_classes = cfg.MODEL.POINT_HEAD.NUM_CLASSES + fc_dim = cfg.MODEL.POINT_HEAD.FC_DIM + num_fc = cfg.MODEL.POINT_HEAD.NUM_FC + cls_agnostic_mask = cfg.MODEL.POINT_HEAD.CLS_AGNOSTIC_MASK + self.coarse_pred_each_layer = cfg.MODEL.POINT_HEAD.COARSE_PRED_EACH_LAYER + input_channels = input_shape.channels + # fmt: on + + fc_dim_in = input_channels + num_classes + self.fc_layers = [] + for k in range(num_fc): + fc = nn.Conv1d(fc_dim_in, fc_dim, kernel_size=1, stride=1, padding=0, bias=True) + self.add_module("fc{}".format(k + 1), fc) + self.fc_layers.append(fc) + fc_dim_in = fc_dim + fc_dim_in += num_classes if self.coarse_pred_each_layer else 0 + + num_mask_classes = 1 if cls_agnostic_mask else num_classes + self.predictor = nn.Conv1d(fc_dim_in, num_mask_classes, kernel_size=1, stride=1, padding=0) + + for layer in self.fc_layers: + weight_init.c2_msra_fill(layer) + # use normal distribution initialization for mask prediction layer + nn.init.normal_(self.predictor.weight, std=0.001) + if self.predictor.bias is not None: + nn.init.constant_(self.predictor.bias, 0) + + def forward(self, fine_grained_features, coarse_features): + x = torch.cat((fine_grained_features, coarse_features), dim=1) + for layer in self.fc_layers: + x = F.relu(layer(x)) + if self.coarse_pred_each_layer: + x = cat((x, coarse_features), dim=1) + return self.predictor(x) + + +def build_point_head(cfg, input_channels): + """ + Build a point head defined by `cfg.MODEL.POINT_HEAD.NAME`. + """ + head_name = cfg.MODEL.POINT_HEAD.NAME + return POINT_HEAD_REGISTRY.get(head_name)(cfg, input_channels) diff --git a/preprocess/mhp_extension/detectron2/projects/PointRend/point_rend/roi_heads.py b/preprocess/mhp_extension/detectron2/projects/PointRend/point_rend/roi_heads.py new file mode 100644 index 0000000000000000000000000000000000000000..4f7225bf10544461bbe1e3c777863557f2ad5808 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/PointRend/point_rend/roi_heads.py @@ -0,0 +1,227 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import numpy as np +import torch + +from detectron2.layers import ShapeSpec, cat, interpolate +from detectron2.modeling import ROI_HEADS_REGISTRY, StandardROIHeads +from detectron2.modeling.roi_heads.mask_head import ( + build_mask_head, + mask_rcnn_inference, + mask_rcnn_loss, +) +from detectron2.modeling.roi_heads.roi_heads import select_foreground_proposals + +from .point_features import ( + generate_regular_grid_point_coords, + get_uncertain_point_coords_on_grid, + get_uncertain_point_coords_with_randomness, + point_sample, + point_sample_fine_grained_features, +) +from .point_head import build_point_head, roi_mask_point_loss + + +def calculate_uncertainty(logits, classes): + """ + We estimate uncerainty as L1 distance between 0.0 and the logit prediction in 'logits' for the + foreground class in `classes`. + + Args: + logits (Tensor): A tensor of shape (R, C, ...) or (R, 1, ...) for class-specific or + class-agnostic, where R is the total number of predicted masks in all images and C is + the number of foreground classes. The values are logits. + classes (list): A list of length R that contains either predicted of ground truth class + for eash predicted mask. + + Returns: + scores (Tensor): A tensor of shape (R, 1, ...) that contains uncertainty scores with + the most uncertain locations having the highest uncertainty score. + """ + if logits.shape[1] == 1: + gt_class_logits = logits.clone() + else: + gt_class_logits = logits[ + torch.arange(logits.shape[0], device=logits.device), classes + ].unsqueeze(1) + return -(torch.abs(gt_class_logits)) + + +@ROI_HEADS_REGISTRY.register() +class PointRendROIHeads(StandardROIHeads): + """ + The RoI heads class for PointRend instance segmentation models. + + In this class we redefine the mask head of `StandardROIHeads` leaving all other heads intact. + To avoid namespace conflict with other heads we use names starting from `mask_` for all + variables that correspond to the mask head in the class's namespace. + """ + + def __init__(self, cfg, input_shape): + # TODO use explicit args style + super().__init__(cfg, input_shape) + self._init_mask_head(cfg, input_shape) + + def _init_mask_head(self, cfg, input_shape): + # fmt: off + self.mask_on = cfg.MODEL.MASK_ON + if not self.mask_on: + return + self.mask_coarse_in_features = cfg.MODEL.ROI_MASK_HEAD.IN_FEATURES + self.mask_coarse_side_size = cfg.MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION + self._feature_scales = {k: 1.0 / v.stride for k, v in input_shape.items()} + # fmt: on + + in_channels = np.sum([input_shape[f].channels for f in self.mask_coarse_in_features]) + self.mask_coarse_head = build_mask_head( + cfg, + ShapeSpec( + channels=in_channels, + width=self.mask_coarse_side_size, + height=self.mask_coarse_side_size, + ), + ) + self._init_point_head(cfg, input_shape) + + def _init_point_head(self, cfg, input_shape): + # fmt: off + self.mask_point_on = cfg.MODEL.ROI_MASK_HEAD.POINT_HEAD_ON + if not self.mask_point_on: + return + assert cfg.MODEL.ROI_HEADS.NUM_CLASSES == cfg.MODEL.POINT_HEAD.NUM_CLASSES + self.mask_point_in_features = cfg.MODEL.POINT_HEAD.IN_FEATURES + self.mask_point_train_num_points = cfg.MODEL.POINT_HEAD.TRAIN_NUM_POINTS + self.mask_point_oversample_ratio = cfg.MODEL.POINT_HEAD.OVERSAMPLE_RATIO + self.mask_point_importance_sample_ratio = cfg.MODEL.POINT_HEAD.IMPORTANCE_SAMPLE_RATIO + # next two parameters are use in the adaptive subdivions inference procedure + self.mask_point_subdivision_steps = cfg.MODEL.POINT_HEAD.SUBDIVISION_STEPS + self.mask_point_subdivision_num_points = cfg.MODEL.POINT_HEAD.SUBDIVISION_NUM_POINTS + # fmt: on + + in_channels = np.sum([input_shape[f].channels for f in self.mask_point_in_features]) + self.mask_point_head = build_point_head( + cfg, ShapeSpec(channels=in_channels, width=1, height=1) + ) + + def _forward_mask(self, features, instances): + """ + Forward logic of the mask prediction branch. + + Args: + features (dict[str, Tensor]): #level input features for mask prediction + instances (list[Instances]): the per-image instances to train/predict masks. + In training, they can be the proposals. + In inference, they can be the predicted boxes. + + Returns: + In training, a dict of losses. + In inference, update `instances` with new fields "pred_masks" and return it. + """ + if not self.mask_on: + return {} if self.training else instances + + if self.training: + proposals, _ = select_foreground_proposals(instances, self.num_classes) + proposal_boxes = [x.proposal_boxes for x in proposals] + mask_coarse_logits = self._forward_mask_coarse(features, proposal_boxes) + + losses = {"loss_mask": mask_rcnn_loss(mask_coarse_logits, proposals)} + losses.update(self._forward_mask_point(features, mask_coarse_logits, proposals)) + return losses + else: + pred_boxes = [x.pred_boxes for x in instances] + mask_coarse_logits = self._forward_mask_coarse(features, pred_boxes) + + mask_logits = self._forward_mask_point(features, mask_coarse_logits, instances) + mask_rcnn_inference(mask_logits, instances) + return instances + + def _forward_mask_coarse(self, features, boxes): + """ + Forward logic of the coarse mask head. + """ + point_coords = generate_regular_grid_point_coords( + np.sum(len(x) for x in boxes), self.mask_coarse_side_size, boxes[0].device + ) + mask_coarse_features_list = [features[k] for k in self.mask_coarse_in_features] + features_scales = [self._feature_scales[k] for k in self.mask_coarse_in_features] + # For regular grids of points, this function is equivalent to `len(features_list)' calls + # of `ROIAlign` (with `SAMPLING_RATIO=2`), and concat the results. + mask_features, _ = point_sample_fine_grained_features( + mask_coarse_features_list, features_scales, boxes, point_coords + ) + return self.mask_coarse_head(mask_features) + + def _forward_mask_point(self, features, mask_coarse_logits, instances): + """ + Forward logic of the mask point head. + """ + if not self.mask_point_on: + return {} if self.training else mask_coarse_logits + + mask_features_list = [features[k] for k in self.mask_point_in_features] + features_scales = [self._feature_scales[k] for k in self.mask_point_in_features] + + if self.training: + proposal_boxes = [x.proposal_boxes for x in instances] + gt_classes = cat([x.gt_classes for x in instances]) + with torch.no_grad(): + point_coords = get_uncertain_point_coords_with_randomness( + mask_coarse_logits, + lambda logits: calculate_uncertainty(logits, gt_classes), + self.mask_point_train_num_points, + self.mask_point_oversample_ratio, + self.mask_point_importance_sample_ratio, + ) + + fine_grained_features, point_coords_wrt_image = point_sample_fine_grained_features( + mask_features_list, features_scales, proposal_boxes, point_coords + ) + coarse_features = point_sample(mask_coarse_logits, point_coords, align_corners=False) + point_logits = self.mask_point_head(fine_grained_features, coarse_features) + return { + "loss_mask_point": roi_mask_point_loss( + point_logits, instances, point_coords_wrt_image + ) + } + else: + pred_boxes = [x.pred_boxes for x in instances] + pred_classes = cat([x.pred_classes for x in instances]) + # The subdivision code will fail with the empty list of boxes + if len(pred_classes) == 0: + return mask_coarse_logits + + mask_logits = mask_coarse_logits.clone() + for subdivions_step in range(self.mask_point_subdivision_steps): + mask_logits = interpolate( + mask_logits, scale_factor=2, mode="bilinear", align_corners=False + ) + # If `mask_point_subdivision_num_points` is larger or equal to the + # resolution of the next step, then we can skip this step + H, W = mask_logits.shape[-2:] + if ( + self.mask_point_subdivision_num_points >= 4 * H * W + and subdivions_step < self.mask_point_subdivision_steps - 1 + ): + continue + uncertainty_map = calculate_uncertainty(mask_logits, pred_classes) + point_indices, point_coords = get_uncertain_point_coords_on_grid( + uncertainty_map, self.mask_point_subdivision_num_points + ) + fine_grained_features, _ = point_sample_fine_grained_features( + mask_features_list, features_scales, pred_boxes, point_coords + ) + coarse_features = point_sample( + mask_coarse_logits, point_coords, align_corners=False + ) + point_logits = self.mask_point_head(fine_grained_features, coarse_features) + + # put mask point predictions to the right places on the upsampled grid. + R, C, H, W = mask_logits.shape + point_indices = point_indices.unsqueeze(1).expand(-1, C, -1) + mask_logits = ( + mask_logits.reshape(R, C, H * W) + .scatter_(2, point_indices, point_logits) + .view(R, C, H, W) + ) + return mask_logits diff --git a/preprocess/mhp_extension/detectron2/projects/PointRend/point_rend/semantic_seg.py b/preprocess/mhp_extension/detectron2/projects/PointRend/point_rend/semantic_seg.py new file mode 100644 index 0000000000000000000000000000000000000000..670a0ea201a6de82f3126171e6320d56f65e1ba7 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/PointRend/point_rend/semantic_seg.py @@ -0,0 +1,134 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import numpy as np +from typing import Dict +import torch +from torch import nn +from torch.nn import functional as F + +from detectron2.layers import ShapeSpec, cat +from detectron2.modeling import SEM_SEG_HEADS_REGISTRY + +from .point_features import ( + get_uncertain_point_coords_on_grid, + get_uncertain_point_coords_with_randomness, + point_sample, +) +from .point_head import build_point_head + + +def calculate_uncertainty(sem_seg_logits): + """ + For each location of the prediction `sem_seg_logits` we estimate uncerainty as the + difference between top first and top second predicted logits. + + Args: + mask_logits (Tensor): A tensor of shape (N, C, ...), where N is the minibatch size and + C is the number of foreground classes. The values are logits. + + Returns: + scores (Tensor): A tensor of shape (N, 1, ...) that contains uncertainty scores with + the most uncertain locations having the highest uncertainty score. + """ + top2_scores = torch.topk(sem_seg_logits, k=2, dim=1)[0] + return (top2_scores[:, 1] - top2_scores[:, 0]).unsqueeze(1) + + +@SEM_SEG_HEADS_REGISTRY.register() +class PointRendSemSegHead(nn.Module): + """ + A semantic segmentation head that combines a head set in `POINT_HEAD.COARSE_SEM_SEG_HEAD_NAME` + and a point head set in `MODEL.POINT_HEAD.NAME`. + """ + + def __init__(self, cfg, input_shape: Dict[str, ShapeSpec]): + super().__init__() + + self.ignore_value = cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE + + self.coarse_sem_seg_head = SEM_SEG_HEADS_REGISTRY.get( + cfg.MODEL.POINT_HEAD.COARSE_SEM_SEG_HEAD_NAME + )(cfg, input_shape) + self._init_point_head(cfg, input_shape) + + def _init_point_head(self, cfg, input_shape: Dict[str, ShapeSpec]): + # fmt: off + assert cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES == cfg.MODEL.POINT_HEAD.NUM_CLASSES + feature_channels = {k: v.channels for k, v in input_shape.items()} + self.in_features = cfg.MODEL.POINT_HEAD.IN_FEATURES + self.train_num_points = cfg.MODEL.POINT_HEAD.TRAIN_NUM_POINTS + self.oversample_ratio = cfg.MODEL.POINT_HEAD.OVERSAMPLE_RATIO + self.importance_sample_ratio = cfg.MODEL.POINT_HEAD.IMPORTANCE_SAMPLE_RATIO + self.subdivision_steps = cfg.MODEL.POINT_HEAD.SUBDIVISION_STEPS + self.subdivision_num_points = cfg.MODEL.POINT_HEAD.SUBDIVISION_NUM_POINTS + # fmt: on + + in_channels = np.sum([feature_channels[f] for f in self.in_features]) + self.point_head = build_point_head(cfg, ShapeSpec(channels=in_channels, width=1, height=1)) + + def forward(self, features, targets=None): + coarse_sem_seg_logits = self.coarse_sem_seg_head.layers(features) + + if self.training: + losses = self.coarse_sem_seg_head.losses(coarse_sem_seg_logits, targets) + + with torch.no_grad(): + point_coords = get_uncertain_point_coords_with_randomness( + coarse_sem_seg_logits, + calculate_uncertainty, + self.train_num_points, + self.oversample_ratio, + self.importance_sample_ratio, + ) + coarse_features = point_sample(coarse_sem_seg_logits, point_coords, align_corners=False) + + fine_grained_features = cat( + [ + point_sample(features[in_feature], point_coords, align_corners=False) + for in_feature in self.in_features + ] + ) + point_logits = self.point_head(fine_grained_features, coarse_features) + point_targets = ( + point_sample( + targets.unsqueeze(1).to(torch.float), + point_coords, + mode="nearest", + align_corners=False, + ) + .squeeze(1) + .to(torch.long) + ) + losses["loss_sem_seg_point"] = F.cross_entropy( + point_logits, point_targets, reduction="mean", ignore_index=self.ignore_value + ) + return None, losses + else: + sem_seg_logits = coarse_sem_seg_logits.clone() + for _ in range(self.subdivision_steps): + sem_seg_logits = F.interpolate( + sem_seg_logits, scale_factor=2, mode="bilinear", align_corners=False + ) + uncertainty_map = calculate_uncertainty(sem_seg_logits) + point_indices, point_coords = get_uncertain_point_coords_on_grid( + uncertainty_map, self.subdivision_num_points + ) + fine_grained_features = cat( + [ + point_sample(features[in_feature], point_coords, align_corners=False) + for in_feature in self.in_features + ] + ) + coarse_features = point_sample( + coarse_sem_seg_logits, point_coords, align_corners=False + ) + point_logits = self.point_head(fine_grained_features, coarse_features) + + # put sem seg point predictions to the right places on the upsampled grid. + N, C, H, W = sem_seg_logits.shape + point_indices = point_indices.unsqueeze(1).expand(-1, C, -1) + sem_seg_logits = ( + sem_seg_logits.reshape(N, C, H * W) + .scatter_(2, point_indices, point_logits) + .view(N, C, H, W) + ) + return sem_seg_logits, {} diff --git a/preprocess/mhp_extension/detectron2/projects/PointRend/run.sh b/preprocess/mhp_extension/detectron2/projects/PointRend/run.sh new file mode 100644 index 0000000000000000000000000000000000000000..4ee1614b02f784cb46fa65243174ea3588eb1adc --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/PointRend/run.sh @@ -0,0 +1,2 @@ +python finetune_net.py --config-file configs/InstanceSegmentation/pointrend_rcnn_X_101_32x8d_FPN_3x_parsing.yaml --num-gpus 1 +#python finetune_net.py --config-file configs/InstanceSegmentation/pointrend_rcnn_R_50_FPN_3x_parsing.yaml --num-gpus 1 diff --git a/preprocess/mhp_extension/detectron2/projects/PointRend/train_net.py b/preprocess/mhp_extension/detectron2/projects/PointRend/train_net.py new file mode 100755 index 0000000000000000000000000000000000000000..7832867ec668c5715c4124c02b72909a318836e8 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/PointRend/train_net.py @@ -0,0 +1,133 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +""" +PointRend Training Script. + +This script is a simplified version of the training script in detectron2/tools. +""" + +import os +import torch + +import detectron2.utils.comm as comm +from detectron2.checkpoint import DetectionCheckpointer +from detectron2.config import get_cfg +from detectron2.data import MetadataCatalog, build_detection_train_loader +from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, launch +from detectron2.evaluation import ( + CityscapesInstanceEvaluator, + CityscapesSemSegEvaluator, + COCOEvaluator, + DatasetEvaluators, + LVISEvaluator, + SemSegEvaluator, + verify_results, +) + +from point_rend import SemSegDatasetMapper, add_pointrend_config + + +class Trainer(DefaultTrainer): + """ + We use the "DefaultTrainer" which contains a number pre-defined logic for + standard training workflow. They may not work for you, especially if you + are working on a new research project. In that case you can use the cleaner + "SimpleTrainer", or write your own training loop. + """ + + @classmethod + def build_evaluator(cls, cfg, dataset_name, output_folder=None): + """ + Create evaluator(s) for a given dataset. + This uses the special metadata "evaluator_type" associated with each builtin dataset. + For your own dataset, you can simply create an evaluator manually in your + script and do not have to worry about the hacky if-else logic here. + """ + if output_folder is None: + output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") + evaluator_list = [] + evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type + if evaluator_type == "lvis": + return LVISEvaluator(dataset_name, cfg, True, output_folder) + if evaluator_type == "coco": + return COCOEvaluator(dataset_name, cfg, True, output_folder) + if evaluator_type == "sem_seg": + return SemSegEvaluator( + dataset_name, + distributed=True, + num_classes=cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES, + ignore_label=cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE, + output_dir=output_folder, + ) + if evaluator_type == "cityscapes_instance": + assert ( + torch.cuda.device_count() >= comm.get_rank() + ), "CityscapesEvaluator currently do not work with multiple machines." + return CityscapesInstanceEvaluator(dataset_name) + if evaluator_type == "cityscapes_sem_seg": + assert ( + torch.cuda.device_count() >= comm.get_rank() + ), "CityscapesEvaluator currently do not work with multiple machines." + return CityscapesSemSegEvaluator(dataset_name) + if len(evaluator_list) == 0: + raise NotImplementedError( + "no Evaluator for the dataset {} with the type {}".format( + dataset_name, evaluator_type + ) + ) + if len(evaluator_list) == 1: + return evaluator_list[0] + return DatasetEvaluators(evaluator_list) + + @classmethod + def build_train_loader(cls, cfg): + if "SemanticSegmentor" in cfg.MODEL.META_ARCHITECTURE: + mapper = SemSegDatasetMapper(cfg, True) + else: + mapper = None + return build_detection_train_loader(cfg, mapper=mapper) + + +def setup(args): + """ + Create configs and perform basic setups. + """ + cfg = get_cfg() + add_pointrend_config(cfg) + cfg.merge_from_file(args.config_file) + cfg.merge_from_list(args.opts) + cfg.freeze() + default_setup(cfg, args) + return cfg + + +def main(args): + cfg = setup(args) + + if args.eval_only: + model = Trainer.build_model(cfg) + DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load( + cfg.MODEL.WEIGHTS, resume=args.resume + ) + res = Trainer.test(cfg, model) + if comm.is_main_process(): + verify_results(cfg, res) + return res + + trainer = Trainer(cfg) + trainer.resume_or_load(resume=args.resume) + return trainer.train() + + +if __name__ == "__main__": + args = default_argument_parser().parse_args() + print("Command Line Args:", args) + launch( + main, + args.num_gpus, + num_machines=args.num_machines, + machine_rank=args.machine_rank, + dist_url=args.dist_url, + args=(args,), + ) diff --git a/preprocess/mhp_extension/detectron2/projects/README.md b/preprocess/mhp_extension/detectron2/projects/README.md new file mode 100644 index 0000000000000000000000000000000000000000..36263bd87401a98f273831f4ec98fcb5c65d3412 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/README.md @@ -0,0 +1,31 @@ + +Here are a few projects that are built on detectron2. +They are examples of how to use detectron2 as a library, to make your projects more +maintainable. + +## Projects by Facebook + +Note that these are research projects, and therefore may not have the same level +of support or stability of detectron2. + ++ [DensePose: Dense Human Pose Estimation In The Wild](DensePose) ++ [Scale-Aware Trident Networks for Object Detection](TridentNet) ++ [TensorMask: A Foundation for Dense Object Segmentation](TensorMask) ++ [Mesh R-CNN](https://github.com/facebookresearch/meshrcnn) ++ [PointRend: Image Segmentation as Rendering](PointRend) ++ [Momentum Contrast for Unsupervised Visual Representation Learning](https://github.com/facebookresearch/moco/tree/master/detection) + + +## External Projects + +External projects in the community that use detectron2: + + + ++ [VoVNet backbones](https://github.com/youngwanLEE/vovnet-detectron2). ++ [AdelaiDet](https://github.com/aim-uofa/adet), a detection toolbox from the Universtiy of Adelaide. ++ [CenterMask : Real-Time Anchor-Free Instance Segmentation](https://github.com/youngwanLEE/centermask2) diff --git a/preprocess/mhp_extension/detectron2/projects/TensorMask/README.md b/preprocess/mhp_extension/detectron2/projects/TensorMask/README.md new file mode 100644 index 0000000000000000000000000000000000000000..6831508b9aea37f0e88bec62c98f2bf2b64240ab --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/TensorMask/README.md @@ -0,0 +1,64 @@ + +# TensorMask in Detectron2 +**A Foundation for Dense Object Segmentation** + +Xinlei Chen, Ross Girshick, Kaiming He, Piotr Dollรกr + +[[`arXiv`](https://arxiv.org/abs/1903.12174)] [[`BibTeX`](#CitingTensorMask)] + +
+ +
+ +In this repository, we release code for TensorMask in Detectron2. +TensorMask is a dense sliding-window instance segmentation framework that, for the first time, achieves results close to the well-developed Mask R-CNN framework -- both qualitatively and quantitatively. It establishes a conceptually complementary direction for object instance segmentation research. + +## Installation +First install Detectron2 following the [documentation](https://detectron2.readthedocs.io/tutorials/install.html) and +[setup the dataset](../../datasets). Then compile the TensorMask-specific op (`swap_align2nat`): +```bash +cd /path/to/detectron2/projects/TensorMask +python setup.py build develop +``` + +## Training + +To train a model, run: +```bash +python /path/to/detectron2/projects/TensorMask/train_net.py --config-file +``` + +For example, to launch TensorMask BiPyramid training (1x schedule) with ResNet-50 backbone on 8 GPUs, +one should execute: +```bash +python /path/to/detectron2/projects/TensorMask/train_net.py --config-file configs/tensormask_R_50_FPN_1x.yaml --num-gpus 8 +``` + +## Evaluation + +Model evaluation can be done similarly (6x schedule with scale augmentation): +```bash +python /path/to/detectron2/projects/TensorMask/train_net.py --config-file configs/tensormask_R_50_FPN_6x.yaml --eval-only MODEL.WEIGHTS /path/to/model_checkpoint +``` + +# Pretrained Models + +| Backbone | lr sched | AP box | AP mask | download | +| -------- | -------- | -- | --- | -------- | +| R50 | 1x | 37.6 | 32.4 | model \|  metrics | +| R50 | 6x | 41.4 | 35.8 | model \|  metrics | + + +## Citing TensorMask + +If you use TensorMask, please use the following BibTeX entry. + +``` +@InProceedings{chen2019tensormask, + title={Tensormask: A Foundation for Dense Object Segmentation}, + author={Chen, Xinlei and Girshick, Ross and He, Kaiming and Doll{\'a}r, Piotr}, + journal={The International Conference on Computer Vision (ICCV)}, + year={2019} +} +``` + diff --git a/preprocess/mhp_extension/detectron2/projects/TensorMask/configs/Base-TensorMask.yaml b/preprocess/mhp_extension/detectron2/projects/TensorMask/configs/Base-TensorMask.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a7245349b4aa9cfa00f20074cc7cb5cdb02607f9 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/TensorMask/configs/Base-TensorMask.yaml @@ -0,0 +1,25 @@ +MODEL: + META_ARCHITECTURE: "TensorMask" + MASK_ON: True + BACKBONE: + NAME: "build_retinanet_resnet_fpn_backbone" + RESNETS: + OUT_FEATURES: ["res2", "res3", "res4", "res5"] + ANCHOR_GENERATOR: + SIZES: [[44, 60], [88, 120], [176, 240], [352, 480], [704, 960], [1408, 1920]] + ASPECT_RATIOS: [[1.0]] + FPN: + IN_FEATURES: ["res2", "res3", "res4", "res5"] + FUSE_TYPE: "avg" + TENSOR_MASK: + ALIGNED_ON: True + BIPYRAMID_ON: True +DATASETS: + TRAIN: ("coco_2017_train",) + TEST: ("coco_2017_val",) +SOLVER: + IMS_PER_BATCH: 16 + BASE_LR: 0.02 + STEPS: (60000, 80000) + MAX_ITER: 90000 +VERSION: 2 diff --git a/preprocess/mhp_extension/detectron2/projects/TensorMask/configs/tensormask_R_50_FPN_1x.yaml b/preprocess/mhp_extension/detectron2/projects/TensorMask/configs/tensormask_R_50_FPN_1x.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5d5eee135a93149a0c4b2148a47cee02e8aed8eb --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/TensorMask/configs/tensormask_R_50_FPN_1x.yaml @@ -0,0 +1,5 @@ +_BASE_: "Base-TensorMask.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + RESNETS: + DEPTH: 50 diff --git a/preprocess/mhp_extension/detectron2/projects/TensorMask/configs/tensormask_R_50_FPN_6x.yaml b/preprocess/mhp_extension/detectron2/projects/TensorMask/configs/tensormask_R_50_FPN_6x.yaml new file mode 100644 index 0000000000000000000000000000000000000000..366a965c4adfdbba2482593c0c81f3e6af50dfd2 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/TensorMask/configs/tensormask_R_50_FPN_6x.yaml @@ -0,0 +1,11 @@ +_BASE_: "Base-TensorMask.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + RESNETS: + DEPTH: 50 +SOLVER: + STEPS: (480000, 520000) + MAX_ITER: 540000 +INPUT: + MIN_SIZE_TRAIN_SAMPLING: "range" + MIN_SIZE_TRAIN: (640, 800) diff --git a/preprocess/mhp_extension/detectron2/projects/TensorMask/setup.py b/preprocess/mhp_extension/detectron2/projects/TensorMask/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..0194e76608966b528ab32879edc40a8e4ac3225f --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/TensorMask/setup.py @@ -0,0 +1,69 @@ +#!/usr/bin/env python +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +import glob +import os +from setuptools import find_packages, setup +import torch +from torch.utils.cpp_extension import CUDA_HOME, CppExtension, CUDAExtension + + +def get_extensions(): + this_dir = os.path.dirname(os.path.abspath(__file__)) + extensions_dir = os.path.join(this_dir, "tensormask", "layers", "csrc") + + main_source = os.path.join(extensions_dir, "vision.cpp") + sources = glob.glob(os.path.join(extensions_dir, "**", "*.cpp")) + source_cuda = glob.glob(os.path.join(extensions_dir, "**", "*.cu")) + glob.glob( + os.path.join(extensions_dir, "*.cu") + ) + + sources = [main_source] + sources + + extension = CppExtension + + extra_compile_args = {"cxx": []} + define_macros = [] + + if (torch.cuda.is_available() and CUDA_HOME is not None) or os.getenv("FORCE_CUDA", "0") == "1": + extension = CUDAExtension + sources += source_cuda + define_macros += [("WITH_CUDA", None)] + extra_compile_args["nvcc"] = [ + "-DCUDA_HAS_FP16=1", + "-D__CUDA_NO_HALF_OPERATORS__", + "-D__CUDA_NO_HALF_CONVERSIONS__", + "-D__CUDA_NO_HALF2_OPERATORS__", + ] + + # It's better if pytorch can do this by default .. + CC = os.environ.get("CC", None) + if CC is not None: + extra_compile_args["nvcc"].append("-ccbin={}".format(CC)) + + sources = [os.path.join(extensions_dir, s) for s in sources] + + include_dirs = [extensions_dir] + + ext_modules = [ + extension( + "tensormask._C", + sources, + include_dirs=include_dirs, + define_macros=define_macros, + extra_compile_args=extra_compile_args, + ) + ] + + return ext_modules + + +setup( + name="tensormask", + version="0.1", + author="FAIR", + packages=find_packages(exclude=("configs", "tests")), + python_requires=">=3.6", + ext_modules=get_extensions(), + cmdclass={"build_ext": torch.utils.cpp_extension.BuildExtension}, +) diff --git a/preprocess/mhp_extension/detectron2/projects/TensorMask/tensormask/__init__.py b/preprocess/mhp_extension/detectron2/projects/TensorMask/tensormask/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e3b642a55519867dc52ccc57a36c32c72c3d34da --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/TensorMask/tensormask/__init__.py @@ -0,0 +1,3 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +from .config import add_tensormask_config +from .arch import TensorMask diff --git a/preprocess/mhp_extension/detectron2/projects/TensorMask/tensormask/arch.py b/preprocess/mhp_extension/detectron2/projects/TensorMask/tensormask/arch.py new file mode 100644 index 0000000000000000000000000000000000000000..a3e89c6b4283b28fe8028300e146d7b7543f0da1 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/TensorMask/tensormask/arch.py @@ -0,0 +1,904 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import copy +import logging +import math +from typing import List +import torch +import torch.nn.functional as F +from fvcore.nn import sigmoid_focal_loss_star_jit, smooth_l1_loss +from torch import nn + +from detectron2.layers import ShapeSpec, batched_nms, cat, paste_masks_in_image +from detectron2.modeling.anchor_generator import DefaultAnchorGenerator +from detectron2.modeling.backbone import build_backbone +from detectron2.modeling.box_regression import Box2BoxTransform +from detectron2.modeling.meta_arch.build import META_ARCH_REGISTRY +from detectron2.modeling.meta_arch.retinanet import ( + permute_all_cls_and_box_to_N_HWA_K_and_concat, + permute_to_N_HWA_K, +) +from detectron2.structures import Boxes, ImageList, Instances +from detectron2.utils.logger import log_first_n + +from tensormask.layers import SwapAlign2Nat + +__all__ = ["TensorMask"] + + +def _assignment_rule( + gt_boxes, + anchor_boxes, + unit_lengths, + min_anchor_size, + scale_thresh=2.0, + spatial_thresh=1.0, + uniqueness_on=True, +): + """ + Given two lists of boxes of N ground truth boxes and M anchor boxes, + compute the assignment between the two, following the assignment rules in + https://arxiv.org/abs/1903.12174. + The box order must be (xmin, ymin, xmax, ymax), so please make sure to convert + to BoxMode.XYXY_ABS before calling this function. + + Args: + gt_boxes, anchor_boxes (Boxes): two Boxes. Contains N & M boxes/anchors, respectively. + unit_lengths (Tensor): Contains the unit lengths of M anchor boxes. + min_anchor_size (float): Minimum size of the anchor, in pixels + scale_thresh (float): The `scale` threshold: the maximum size of the anchor + should not be greater than scale_thresh x max(h, w) of + the ground truth box. + spatial_thresh (float): The `spatial` threshold: the l2 distance between the + center of the anchor and the ground truth box should not + be greater than spatial_thresh x u where u is the unit length. + + Returns: + matches (Tensor[int64]): a vector of length M, where matches[i] is a matched + ground-truth index in [0, N) + match_labels (Tensor[int8]): a vector of length M, where pred_labels[i] indicates + whether a prediction is a true or false positive or ignored + """ + gt_boxes, anchor_boxes = gt_boxes.tensor, anchor_boxes.tensor + N = gt_boxes.shape[0] + M = anchor_boxes.shape[0] + if N == 0 or M == 0: + return ( + gt_boxes.new_full((N,), 0, dtype=torch.int64), + gt_boxes.new_full((N,), -1, dtype=torch.int8), + ) + + # Containment rule + lt = torch.min(gt_boxes[:, None, :2], anchor_boxes[:, :2]) # [N,M,2] + rb = torch.max(gt_boxes[:, None, 2:], anchor_boxes[:, 2:]) # [N,M,2] + union = cat([lt, rb], dim=2) # [N,M,4] + + dummy_gt_boxes = torch.zeros_like(gt_boxes) + anchor = dummy_gt_boxes[:, None, :] + anchor_boxes[:, :] # [N,M,4] + + contain_matrix = torch.all(union == anchor, dim=2) # [N,M] + + # Centrality rule, scale + gt_size_lower = torch.max(gt_boxes[:, 2:] - gt_boxes[:, :2], dim=1)[0] # [N] + gt_size_upper = gt_size_lower * scale_thresh # [N] + # Fall back for small objects + gt_size_upper[gt_size_upper < min_anchor_size] = min_anchor_size + # Due to sampling of locations, the anchor sizes are deducted with sampling strides + anchor_size = ( + torch.max(anchor_boxes[:, 2:] - anchor_boxes[:, :2], dim=1)[0] - unit_lengths + ) # [M] + + size_diff_upper = gt_size_upper[:, None] - anchor_size # [N,M] + scale_matrix = size_diff_upper >= 0 # [N,M] + + # Centrality rule, spatial + gt_center = (gt_boxes[:, 2:] + gt_boxes[:, :2]) / 2 # [N,2] + anchor_center = (anchor_boxes[:, 2:] + anchor_boxes[:, :2]) / 2 # [M,2] + offset_center = gt_center[:, None, :] - anchor_center[:, :] # [N,M,2] + offset_center /= unit_lengths[:, None] # [N,M,2] + spatial_square = spatial_thresh * spatial_thresh + spatial_matrix = torch.sum(offset_center * offset_center, dim=2) <= spatial_square + + assign_matrix = (contain_matrix & scale_matrix & spatial_matrix).int() + + # assign_matrix is N (gt) x M (predicted) + # Max over gt elements (dim 0) to find best gt candidate for each prediction + matched_vals, matches = assign_matrix.max(dim=0) + match_labels = matches.new_full(matches.size(), 1, dtype=torch.int8) + + match_labels[matched_vals == 0] = 0 + match_labels[matched_vals == 1] = 1 + + # find all the elements that match to ground truths multiple times + not_unique_idxs = assign_matrix.sum(dim=0) > 1 + if uniqueness_on: + match_labels[not_unique_idxs] = 0 + else: + match_labels[not_unique_idxs] = -1 + + return matches, match_labels + + +# TODO make the paste_mask function in d2 core support mask list +def _paste_mask_lists_in_image(masks, boxes, image_shape, threshold=0.5): + """ + Paste a list of masks that are of various resolutions (e.g., 28 x 28) into an image. + The location, height, and width for pasting each mask is determined by their + corresponding bounding boxes in boxes. + + Args: + masks (list(Tensor)): A list of Tensor of shape (1, Hmask_i, Wmask_i). + Values are in [0, 1]. The list length, Bimg, is the + number of detected object instances in the image. + boxes (Boxes): A Boxes of length Bimg. boxes.tensor[i] and masks[i] correspond + to the same object instance. + image_shape (tuple): height, width + threshold (float): A threshold in [0, 1] for converting the (soft) masks to + binary masks. + + Returns: + img_masks (Tensor): A tensor of shape (Bimg, Himage, Wimage), where Bimg is the + number of detected object instances and Himage, Wimage are the image width + and height. img_masks[i] is a binary mask for object instance i. + """ + if len(masks) == 0: + return torch.empty((0, 1) + image_shape, dtype=torch.uint8) + + # Loop over masks groups. Each group has the same mask prediction size. + img_masks = [] + ind_masks = [] + mask_sizes = torch.tensor([m.shape[-1] for m in masks]) + unique_sizes = torch.unique(mask_sizes) + for msize in unique_sizes.tolist(): + cur_ind = torch.where(mask_sizes == msize)[0] + ind_masks.append(cur_ind) + + cur_masks = cat([masks[i] for i in cur_ind]) + cur_boxes = boxes[cur_ind] + img_masks.append(paste_masks_in_image(cur_masks, cur_boxes, image_shape, threshold)) + + img_masks = cat(img_masks) + ind_masks = cat(ind_masks) + + img_masks_out = torch.empty_like(img_masks) + img_masks_out[ind_masks, :, :] = img_masks + + return img_masks_out + + +def _postprocess(results, result_mask_info, output_height, output_width, mask_threshold=0.5): + """ + Post-process the output boxes for TensorMask. + The input images are often resized when entering an object detector. + As a result, we often need the outputs of the detector in a different + resolution from its inputs. + + This function will postprocess the raw outputs of TensorMask + to produce outputs according to the desired output resolution. + + Args: + results (Instances): the raw outputs from the detector. + `results.image_size` contains the input image resolution the detector sees. + This object might be modified in-place. Note that it does not contain the field + `pred_masks`, which is provided by another input `result_masks`. + result_mask_info (list[Tensor], Boxes): a pair of two items for mask related results. + The first item is a list of #detection tensors, each is the predicted masks. + The second item is the anchors corresponding to the predicted masks. + output_height, output_width: the desired output resolution. + + Returns: + Instances: the postprocessed output from the model, based on the output resolution + """ + scale_x, scale_y = (output_width / results.image_size[1], output_height / results.image_size[0]) + results = Instances((output_height, output_width), **results.get_fields()) + + output_boxes = results.pred_boxes + output_boxes.tensor[:, 0::2] *= scale_x + output_boxes.tensor[:, 1::2] *= scale_y + output_boxes.clip(results.image_size) + + inds_nonempty = output_boxes.nonempty() + results = results[inds_nonempty] + result_masks, result_anchors = result_mask_info + if result_masks: + result_anchors.tensor[:, 0::2] *= scale_x + result_anchors.tensor[:, 1::2] *= scale_y + result_masks = [x for (i, x) in zip(inds_nonempty.tolist(), result_masks) if i] + results.pred_masks = _paste_mask_lists_in_image( + result_masks, + result_anchors[inds_nonempty], + results.image_size, + threshold=mask_threshold, + ) + return results + + +class TensorMaskAnchorGenerator(DefaultAnchorGenerator): + """ + For a set of image sizes and feature maps, computes a set of anchors for TensorMask. + It also computes the unit lengths and indexes for each anchor box. + """ + + def grid_anchors_with_unit_lengths_and_indexes(self, grid_sizes): + anchors = [] + unit_lengths = [] + indexes = [] + for lvl, (size, stride, base_anchors) in enumerate( + zip(grid_sizes, self.strides, self.cell_anchors) + ): + grid_height, grid_width = size + device = base_anchors.device + shifts_x = torch.arange( + 0, grid_width * stride, step=stride, dtype=torch.float32, device=device + ) + shifts_y = torch.arange( + 0, grid_height * stride, step=stride, dtype=torch.float32, device=device + ) + shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x) + shifts = torch.stack((shift_x, shift_y, shift_x, shift_y), dim=2) + # Stack anchors in shapes of (HWA, 4) + cur_anchor = (shifts[:, :, None, :] + base_anchors.view(1, 1, -1, 4)).view(-1, 4) + anchors.append(cur_anchor) + unit_lengths.append( + torch.full((cur_anchor.shape[0],), stride, dtype=torch.float32, device=device) + ) + # create mask indexes using mesh grid + shifts_l = torch.full((1,), lvl, dtype=torch.int64, device=device) + shifts_i = torch.zeros((1,), dtype=torch.int64, device=device) + shifts_h = torch.arange(0, grid_height, dtype=torch.int64, device=device) + shifts_w = torch.arange(0, grid_width, dtype=torch.int64, device=device) + shifts_a = torch.arange(0, base_anchors.shape[0], dtype=torch.int64, device=device) + grids = torch.meshgrid(shifts_l, shifts_i, shifts_h, shifts_w, shifts_a) + + indexes.append(torch.stack(grids, dim=5).view(-1, 5)) + + return anchors, unit_lengths, indexes + + def forward(self, features): + """ + Returns: + list[list[Boxes]]: a list of #image elements. Each is a list of #feature level Boxes. + The Boxes contains anchors of this image on the specific feature level. + list[list[Tensor]]: a list of #image elements. Each is a list of #feature level tensors. + The tensor contains strides, or unit lengths for the anchors. + list[list[Tensor]]: a list of #image elements. Each is a list of #feature level tensors. + The Tensor contains indexes for the anchors, with the last dimension meaning + (L, N, H, W, A), where L is level, I is image (not set yet), H is height, + W is width, and A is anchor. + """ + num_images = len(features[0]) + grid_sizes = [feature_map.shape[-2:] for feature_map in features] + anchors_list, lengths_list, indexes_list = self.grid_anchors_with_unit_lengths_and_indexes( + grid_sizes + ) + + # Convert anchors from Tensor to Boxes + anchors_per_im = [Boxes(x) for x in anchors_list] + + # TODO it can be simplified to not return duplicated information for + # each image, just like detectron2's own AnchorGenerator + anchors = [copy.deepcopy(anchors_per_im) for _ in range(num_images)] + unit_lengths = [copy.deepcopy(lengths_list) for _ in range(num_images)] + indexes = [copy.deepcopy(indexes_list) for _ in range(num_images)] + + return anchors, unit_lengths, indexes + + +@META_ARCH_REGISTRY.register() +class TensorMask(nn.Module): + """ + TensorMask model. Creates FPN backbone, anchors and a head for classification + and box regression. Calculates and applies proper losses to class, box, and + masks. + """ + + def __init__(self, cfg): + super().__init__() + + # fmt: off + self.num_classes = cfg.MODEL.TENSOR_MASK.NUM_CLASSES + self.in_features = cfg.MODEL.TENSOR_MASK.IN_FEATURES + self.anchor_sizes = cfg.MODEL.ANCHOR_GENERATOR.SIZES + self.num_levels = len(cfg.MODEL.ANCHOR_GENERATOR.SIZES) + # Loss parameters: + self.focal_loss_alpha = cfg.MODEL.TENSOR_MASK.FOCAL_LOSS_ALPHA + self.focal_loss_gamma = cfg.MODEL.TENSOR_MASK.FOCAL_LOSS_GAMMA + # Inference parameters: + self.score_threshold = cfg.MODEL.TENSOR_MASK.SCORE_THRESH_TEST + self.topk_candidates = cfg.MODEL.TENSOR_MASK.TOPK_CANDIDATES_TEST + self.nms_threshold = cfg.MODEL.TENSOR_MASK.NMS_THRESH_TEST + self.detections_im = cfg.TEST.DETECTIONS_PER_IMAGE + # Mask parameters: + self.mask_on = cfg.MODEL.MASK_ON + self.mask_loss_weight = cfg.MODEL.TENSOR_MASK.MASK_LOSS_WEIGHT + self.mask_pos_weight = torch.tensor(cfg.MODEL.TENSOR_MASK.POSITIVE_WEIGHT, + dtype=torch.float32) + self.bipyramid_on = cfg.MODEL.TENSOR_MASK.BIPYRAMID_ON + # fmt: on + + # build the backbone + self.backbone = build_backbone(cfg) + + backbone_shape = self.backbone.output_shape() + feature_shapes = [backbone_shape[f] for f in self.in_features] + feature_strides = [x.stride for x in feature_shapes] + # build anchors + self.anchor_generator = TensorMaskAnchorGenerator(cfg, feature_shapes) + self.num_anchors = self.anchor_generator.num_cell_anchors[0] + anchors_min_level = cfg.MODEL.ANCHOR_GENERATOR.SIZES[0] + self.mask_sizes = [size // feature_strides[0] for size in anchors_min_level] + self.min_anchor_size = min(anchors_min_level) - feature_strides[0] + + # head of the TensorMask + self.head = TensorMaskHead( + cfg, self.num_levels, self.num_anchors, self.mask_sizes, feature_shapes + ) + # box transform + self.box2box_transform = Box2BoxTransform(weights=cfg.MODEL.TENSOR_MASK.BBOX_REG_WEIGHTS) + self.register_buffer("pixel_mean", torch.Tensor(cfg.MODEL.PIXEL_MEAN).view(-1, 1, 1)) + self.register_buffer("pixel_std", torch.Tensor(cfg.MODEL.PIXEL_STD).view(-1, 1, 1)) + + @property + def device(self): + return self.pixel_mean.device + + def forward(self, batched_inputs): + """ + Args: + batched_inputs: a list, batched outputs of :class:`DetectionTransform` . + Each item in the list contains the inputs for one image. + For now, each item in the list is a dict that contains: + image: Tensor, image in (C, H, W) format. + instances: Instances + Other information that's included in the original dicts, such as: + "height", "width" (int): the output resolution of the model, used in inference. + See :meth:`postprocess` for details. + Returns: + losses (dict[str: Tensor]): mapping from a named loss to a tensor + storing the loss. Used during training only. + """ + images = self.preprocess_image(batched_inputs) + if "instances" in batched_inputs[0]: + gt_instances = [x["instances"].to(self.device) for x in batched_inputs] + elif "targets" in batched_inputs[0]: + log_first_n( + logging.WARN, "'targets' in the model inputs is now renamed to 'instances'!", n=10 + ) + gt_instances = [x["targets"].to(self.device) for x in batched_inputs] + else: + gt_instances = None + + features = self.backbone(images.tensor) + features = [features[f] for f in self.in_features] + # apply the TensorMask head + pred_logits, pred_deltas, pred_masks = self.head(features) + # generate anchors based on features, is it image specific? + anchors, unit_lengths, indexes = self.anchor_generator(features) + + if self.training: + # get ground truths for class labels and box targets, it will label each anchor + gt_class_info, gt_delta_info, gt_mask_info, num_fg = self.get_ground_truth( + anchors, unit_lengths, indexes, gt_instances + ) + # compute the loss + return self.losses( + gt_class_info, + gt_delta_info, + gt_mask_info, + num_fg, + pred_logits, + pred_deltas, + pred_masks, + ) + else: + # do inference to get the output + results = self.inference(pred_logits, pred_deltas, pred_masks, anchors, indexes, images) + processed_results = [] + for results_im, input_im, image_size in zip( + results, batched_inputs, images.image_sizes + ): + height = input_im.get("height", image_size[0]) + width = input_im.get("width", image_size[1]) + # this is to do post-processing with the image size + result_box, result_mask = results_im + r = _postprocess(result_box, result_mask, height, width) + processed_results.append({"instances": r}) + return processed_results + + def losses( + self, + gt_class_info, + gt_delta_info, + gt_mask_info, + num_fg, + pred_logits, + pred_deltas, + pred_masks, + ): + """ + Args: + For `gt_class_info`, `gt_delta_info`, `gt_mask_info` and `num_fg` parameters, see + :meth:`TensorMask.get_ground_truth`. + For `pred_logits`, `pred_deltas` and `pred_masks`, see + :meth:`TensorMaskHead.forward`. + + Returns: + losses (dict[str: Tensor]): mapping from a named loss to a scalar tensor + storing the loss. Used during training only. The potential dict keys are: + "loss_cls", "loss_box_reg" and "loss_mask". + """ + gt_classes_target, gt_valid_inds = gt_class_info + gt_deltas, gt_fg_inds = gt_delta_info + gt_masks, gt_mask_inds = gt_mask_info + loss_normalizer = torch.tensor(max(1, num_fg), dtype=torch.float32, device=self.device) + + # classification and regression + pred_logits, pred_deltas = permute_all_cls_and_box_to_N_HWA_K_and_concat( + pred_logits, pred_deltas, self.num_classes + ) + loss_cls = ( + sigmoid_focal_loss_star_jit( + pred_logits[gt_valid_inds], + gt_classes_target[gt_valid_inds], + alpha=self.focal_loss_alpha, + gamma=self.focal_loss_gamma, + reduction="sum", + ) + / loss_normalizer + ) + + if num_fg == 0: + loss_box_reg = pred_deltas.sum() * 0 + else: + loss_box_reg = ( + smooth_l1_loss(pred_deltas[gt_fg_inds], gt_deltas, beta=0.0, reduction="sum") + / loss_normalizer + ) + losses = {"loss_cls": loss_cls, "loss_box_reg": loss_box_reg} + + # mask prediction + if self.mask_on: + loss_mask = 0 + for lvl in range(self.num_levels): + cur_level_factor = 2 ** lvl if self.bipyramid_on else 1 + for anc in range(self.num_anchors): + cur_gt_mask_inds = gt_mask_inds[lvl][anc] + if cur_gt_mask_inds is None: + loss_mask += pred_masks[lvl][anc][0, 0, 0, 0] * 0 + else: + cur_mask_size = self.mask_sizes[anc] * cur_level_factor + # TODO maybe there are numerical issues when mask sizes are large + cur_size_divider = torch.tensor( + self.mask_loss_weight / (cur_mask_size ** 2), + dtype=torch.float32, + device=self.device, + ) + + cur_pred_masks = pred_masks[lvl][anc][ + cur_gt_mask_inds[:, 0], # N + :, # V x U + cur_gt_mask_inds[:, 1], # H + cur_gt_mask_inds[:, 2], # W + ] + + loss_mask += F.binary_cross_entropy_with_logits( + cur_pred_masks.view(-1, cur_mask_size, cur_mask_size), # V, U + gt_masks[lvl][anc].to(dtype=torch.float32), + reduction="sum", + weight=cur_size_divider, + pos_weight=self.mask_pos_weight, + ) + losses["loss_mask"] = loss_mask / loss_normalizer + return losses + + @torch.no_grad() + def get_ground_truth(self, anchors, unit_lengths, indexes, targets): + """ + Args: + anchors (list[list[Boxes]]): a list of N=#image elements. Each is a + list of #feature level Boxes. The Boxes contains anchors of + this image on the specific feature level. + unit_lengths (list[list[Tensor]]): a list of N=#image elements. Each is a + list of #feature level Tensor. The tensor contains unit lengths for anchors of + this image on the specific feature level. + indexes (list[list[Tensor]]): a list of N=#image elements. Each is a + list of #feature level Tensor. The tensor contains the 5D index of + each anchor, the second dimension means (L, N, H, W, A), where L + is level, I is image, H is height, W is width, and A is anchor. + targets (list[Instances]): a list of N `Instances`s. The i-th + `Instances` contains the ground-truth per-instance annotations + for the i-th input image. Specify `targets` during training only. + + Returns: + gt_class_info (Tensor, Tensor): A pair of two tensors for classification. + The first one is an integer tensor of shape (R, #classes) storing ground-truth + labels for each anchor. R is the total number of anchors in the batch. + The second one is an integer tensor of shape (R,), to indicate which + anchors are valid for loss computation, which anchors are not. + gt_delta_info (Tensor, Tensor): A pair of two tensors for boxes. + The first one, of shape (F, 4). F=#foreground anchors. + The last dimension represents ground-truth box2box transform + targets (dx, dy, dw, dh) that map each anchor to its matched ground-truth box. + Only foreground anchors have values in this tensor. Could be `None` if F=0. + The second one, of shape (R,), is an integer tensor indicating which anchors + are foreground ones used for box regression. Could be `None` if F=0. + gt_mask_info (list[list[Tensor]], list[list[Tensor]]): A pair of two lists for masks. + The first one is a list of P=#feature level elements. Each is a + list of A=#anchor tensors. Each tensor contains the ground truth + masks of the same size and for the same feature level. Could be `None`. + The second one is a list of P=#feature level elements. Each is a + list of A=#anchor tensors. Each tensor contains the location of the ground truth + masks of the same size and for the same feature level. The second dimension means + (N, H, W), where N is image, H is height, and W is width. Could be `None`. + num_fg (int): F=#foreground anchors, used later for loss normalization. + """ + gt_classes = [] + gt_deltas = [] + gt_masks = [[[] for _ in range(self.num_anchors)] for _ in range(self.num_levels)] + gt_mask_inds = [[[] for _ in range(self.num_anchors)] for _ in range(self.num_levels)] + + anchors = [Boxes.cat(anchors_i) for anchors_i in anchors] + unit_lengths = [cat(unit_lengths_i) for unit_lengths_i in unit_lengths] + indexes = [cat(indexes_i) for indexes_i in indexes] + + num_fg = 0 + for i, (anchors_im, unit_lengths_im, indexes_im, targets_im) in enumerate( + zip(anchors, unit_lengths, indexes, targets) + ): + # Initialize all + gt_classes_i = torch.full_like( + unit_lengths_im, self.num_classes, dtype=torch.int64, device=self.device + ) + # Ground truth classes + has_gt = len(targets_im) > 0 + if has_gt: + # Compute the pairwise matrix + gt_matched_inds, anchor_labels = _assignment_rule( + targets_im.gt_boxes, anchors_im, unit_lengths_im, self.min_anchor_size + ) + # Find the foreground instances + fg_inds = anchor_labels == 1 + fg_anchors = anchors_im[fg_inds] + num_fg += len(fg_anchors) + # Find the ground truths for foreground instances + gt_fg_matched_inds = gt_matched_inds[fg_inds] + # Assign labels for foreground instances + gt_classes_i[fg_inds] = targets_im.gt_classes[gt_fg_matched_inds] + # Anchors with label -1 are ignored, others are left as negative + gt_classes_i[anchor_labels == -1] = -1 + + # Boxes + # Ground truth box regression, only for foregrounds + matched_gt_boxes = targets_im[gt_fg_matched_inds].gt_boxes + # Compute box regression offsets for foregrounds only + gt_deltas_i = self.box2box_transform.get_deltas( + fg_anchors.tensor, matched_gt_boxes.tensor + ) + gt_deltas.append(gt_deltas_i) + + # Masks + if self.mask_on: + # Compute masks for each level and each anchor + matched_indexes = indexes_im[fg_inds, :] + for lvl in range(self.num_levels): + ids_lvl = matched_indexes[:, 0] == lvl + if torch.any(ids_lvl): + cur_level_factor = 2 ** lvl if self.bipyramid_on else 1 + for anc in range(self.num_anchors): + ids_lvl_anchor = ids_lvl & (matched_indexes[:, 4] == anc) + if torch.any(ids_lvl_anchor): + gt_masks[lvl][anc].append( + targets_im[ + gt_fg_matched_inds[ids_lvl_anchor] + ].gt_masks.crop_and_resize( + fg_anchors[ids_lvl_anchor].tensor, + self.mask_sizes[anc] * cur_level_factor, + ) + ) + # Select (N, H, W) dimensions + gt_mask_inds_lvl_anc = matched_indexes[ids_lvl_anchor, 1:4] + # Set the image index to the current image + gt_mask_inds_lvl_anc[:, 0] = i + gt_mask_inds[lvl][anc].append(gt_mask_inds_lvl_anc) + gt_classes.append(gt_classes_i) + + # Classes and boxes + gt_classes = cat(gt_classes) + gt_valid_inds = gt_classes >= 0 + gt_fg_inds = gt_valid_inds & (gt_classes < self.num_classes) + gt_classes_target = torch.zeros( + (gt_classes.shape[0], self.num_classes), dtype=torch.float32, device=self.device + ) + gt_classes_target[gt_fg_inds, gt_classes[gt_fg_inds]] = 1 + gt_deltas = cat(gt_deltas) if gt_deltas else None + + # Masks + gt_masks = [[cat(mla) if mla else None for mla in ml] for ml in gt_masks] + gt_mask_inds = [[cat(ila) if ila else None for ila in il] for il in gt_mask_inds] + return ( + (gt_classes_target, gt_valid_inds), + (gt_deltas, gt_fg_inds), + (gt_masks, gt_mask_inds), + num_fg, + ) + + def inference(self, pred_logits, pred_deltas, pred_masks, anchors, indexes, images): + """ + Arguments: + pred_logits, pred_deltas, pred_masks: Same as the output of: + meth:`TensorMaskHead.forward` + anchors, indexes: Same as the input of meth:`TensorMask.get_ground_truth` + images (ImageList): the input images + + Returns: + results (List[Instances]): a list of #images elements. + """ + assert len(anchors) == len(images) + results = [] + + pred_logits = [permute_to_N_HWA_K(x, self.num_classes) for x in pred_logits] + pred_deltas = [permute_to_N_HWA_K(x, 4) for x in pred_deltas] + + pred_logits = cat(pred_logits, dim=1) + pred_deltas = cat(pred_deltas, dim=1) + + for img_idx, (anchors_im, indexes_im) in enumerate(zip(anchors, indexes)): + # Get the size of the current image + image_size = images.image_sizes[img_idx] + + logits_im = pred_logits[img_idx] + deltas_im = pred_deltas[img_idx] + + if self.mask_on: + masks_im = [[mla[img_idx] for mla in ml] for ml in pred_masks] + else: + masks_im = [None] * self.num_levels + results_im = self.inference_single_image( + logits_im, + deltas_im, + masks_im, + Boxes.cat(anchors_im), + cat(indexes_im), + tuple(image_size), + ) + results.append(results_im) + return results + + def inference_single_image( + self, pred_logits, pred_deltas, pred_masks, anchors, indexes, image_size + ): + """ + Single-image inference. Return bounding-box detection results by thresholding + on scores and applying non-maximum suppression (NMS). + + Arguments: + pred_logits (list[Tensor]): list of #feature levels. Each entry contains + tensor of size (AxHxW, K) + pred_deltas (list[Tensor]): Same shape as 'pred_logits' except that K becomes 4. + pred_masks (list[list[Tensor]]): List of #feature levels, each is a list of #anchors. + Each entry contains tensor of size (M_i*M_i, H, W). `None` if mask_on=False. + anchors (list[Boxes]): list of #feature levels. Each entry contains + a Boxes object, which contains all the anchors for that + image in that feature level. + image_size (tuple(H, W)): a tuple of the image height and width. + + Returns: + Same as `inference`, but for only one image. + """ + pred_logits = pred_logits.flatten().sigmoid_() + # We get top locations across all levels to accelerate the inference speed, + # which does not seem to affect the accuracy. + # First select values above the threshold + logits_top_idxs = torch.where(pred_logits > self.score_threshold)[0] + # Then get the top values + num_topk = min(self.topk_candidates, logits_top_idxs.shape[0]) + pred_prob, topk_idxs = pred_logits[logits_top_idxs].sort(descending=True) + # Keep top k scoring values + pred_prob = pred_prob[:num_topk] + # Keep top k values + top_idxs = logits_top_idxs[topk_idxs[:num_topk]] + + # class index + cls_idxs = top_idxs % self.num_classes + # HWA index + top_idxs //= self.num_classes + # predict boxes + pred_boxes = self.box2box_transform.apply_deltas( + pred_deltas[top_idxs], anchors[top_idxs].tensor + ) + # apply nms + keep = batched_nms(pred_boxes, pred_prob, cls_idxs, self.nms_threshold) + # pick the top ones + keep = keep[: self.detections_im] + + results = Instances(image_size) + results.pred_boxes = Boxes(pred_boxes[keep]) + results.scores = pred_prob[keep] + results.pred_classes = cls_idxs[keep] + + # deal with masks + result_masks, result_anchors = [], None + if self.mask_on: + # index and anchors, useful for masks + top_indexes = indexes[top_idxs] + top_anchors = anchors[top_idxs] + result_indexes = top_indexes[keep] + result_anchors = top_anchors[keep] + # Get masks and do sigmoid + for lvl, _, h, w, anc in result_indexes.tolist(): + cur_size = self.mask_sizes[anc] * (2 ** lvl if self.bipyramid_on else 1) + result_masks.append( + torch.sigmoid(pred_masks[lvl][anc][:, h, w].view(1, cur_size, cur_size)) + ) + + return results, (result_masks, result_anchors) + + def preprocess_image(self, batched_inputs): + """ + Normalize, pad and batch the input images. + """ + images = [x["image"].to(self.device) for x in batched_inputs] + images = [(x - self.pixel_mean) / self.pixel_std for x in images] + images = ImageList.from_tensors(images, self.backbone.size_divisibility) + return images + + +class TensorMaskHead(nn.Module): + def __init__(self, cfg, num_levels, num_anchors, mask_sizes, input_shape: List[ShapeSpec]): + """ + TensorMask head. + """ + super().__init__() + # fmt: off + self.in_features = cfg.MODEL.TENSOR_MASK.IN_FEATURES + in_channels = input_shape[0].channels + num_classes = cfg.MODEL.TENSOR_MASK.NUM_CLASSES + cls_channels = cfg.MODEL.TENSOR_MASK.CLS_CHANNELS + num_convs = cfg.MODEL.TENSOR_MASK.NUM_CONVS + # box parameters + bbox_channels = cfg.MODEL.TENSOR_MASK.BBOX_CHANNELS + # mask parameters + self.mask_on = cfg.MODEL.MASK_ON + self.mask_sizes = mask_sizes + mask_channels = cfg.MODEL.TENSOR_MASK.MASK_CHANNELS + self.align_on = cfg.MODEL.TENSOR_MASK.ALIGNED_ON + self.bipyramid_on = cfg.MODEL.TENSOR_MASK.BIPYRAMID_ON + # fmt: on + + # class subnet + cls_subnet = [] + cur_channels = in_channels + for _ in range(num_convs): + cls_subnet.append( + nn.Conv2d(cur_channels, cls_channels, kernel_size=3, stride=1, padding=1) + ) + cur_channels = cls_channels + cls_subnet.append(nn.ReLU()) + + self.cls_subnet = nn.Sequential(*cls_subnet) + self.cls_score = nn.Conv2d( + cur_channels, num_anchors * num_classes, kernel_size=3, stride=1, padding=1 + ) + modules_list = [self.cls_subnet, self.cls_score] + + # box subnet + bbox_subnet = [] + cur_channels = in_channels + for _ in range(num_convs): + bbox_subnet.append( + nn.Conv2d(cur_channels, bbox_channels, kernel_size=3, stride=1, padding=1) + ) + cur_channels = bbox_channels + bbox_subnet.append(nn.ReLU()) + + self.bbox_subnet = nn.Sequential(*bbox_subnet) + self.bbox_pred = nn.Conv2d( + cur_channels, num_anchors * 4, kernel_size=3, stride=1, padding=1 + ) + modules_list.extend([self.bbox_subnet, self.bbox_pred]) + + # mask subnet + if self.mask_on: + mask_subnet = [] + cur_channels = in_channels + for _ in range(num_convs): + mask_subnet.append( + nn.Conv2d(cur_channels, mask_channels, kernel_size=3, stride=1, padding=1) + ) + cur_channels = mask_channels + mask_subnet.append(nn.ReLU()) + + self.mask_subnet = nn.Sequential(*mask_subnet) + modules_list.append(self.mask_subnet) + for mask_size in self.mask_sizes: + cur_mask_module = "mask_pred_%02d" % mask_size + self.add_module( + cur_mask_module, + nn.Conv2d( + cur_channels, mask_size * mask_size, kernel_size=1, stride=1, padding=0 + ), + ) + modules_list.append(getattr(self, cur_mask_module)) + if self.align_on: + if self.bipyramid_on: + for lvl in range(num_levels): + cur_mask_module = "align2nat_%02d" % lvl + lambda_val = 2 ** lvl + setattr(self, cur_mask_module, SwapAlign2Nat(lambda_val)) + # Also the fusing layer, stay at the same channel size + mask_fuse = [ + nn.Conv2d(cur_channels, cur_channels, kernel_size=3, stride=1, padding=1), + nn.ReLU(), + ] + self.mask_fuse = nn.Sequential(*mask_fuse) + modules_list.append(self.mask_fuse) + else: + self.align2nat = SwapAlign2Nat(1) + + # Initialization + for modules in modules_list: + for layer in modules.modules(): + if isinstance(layer, nn.Conv2d): + torch.nn.init.normal_(layer.weight, mean=0, std=0.01) + torch.nn.init.constant_(layer.bias, 0) + + # Use prior in model initialization to improve stability + bias_value = -(math.log((1 - 0.01) / 0.01)) + torch.nn.init.constant_(self.cls_score.bias, bias_value) + + def forward(self, features): + """ + Arguments: + features (list[Tensor]): FPN feature map tensors in high to low resolution. + Each tensor in the list correspond to different feature levels. + + Returns: + pred_logits (list[Tensor]): #lvl tensors, each has shape (N, AxK, Hi, Wi). + The tensor predicts the classification probability + at each spatial position for each of the A anchors and K object + classes. + pred_deltas (list[Tensor]): #lvl tensors, each has shape (N, Ax4, Hi, Wi). + The tensor predicts 4-vector (dx,dy,dw,dh) box + regression values for every anchor. These values are the + relative offset between the anchor and the ground truth box. + pred_masks (list(list[Tensor])): #lvl list of tensors, each is a list of + A tensors of shape (N, M_{i,a}, Hi, Wi). + The tensor predicts a dense set of M_ixM_i masks at every location. + """ + pred_logits = [self.cls_score(self.cls_subnet(x)) for x in features] + pred_deltas = [self.bbox_pred(self.bbox_subnet(x)) for x in features] + + pred_masks = None + if self.mask_on: + mask_feats = [self.mask_subnet(x) for x in features] + + if self.bipyramid_on: + mask_feat_high_res = mask_feats[0] + H, W = mask_feat_high_res.shape[-2:] + mask_feats_up = [] + for lvl, mask_feat in enumerate(mask_feats): + lambda_val = 2.0 ** lvl + mask_feat_up = mask_feat + if lvl > 0: + mask_feat_up = F.interpolate( + mask_feat, scale_factor=lambda_val, mode="bilinear", align_corners=False + ) + mask_feats_up.append( + self.mask_fuse(mask_feat_up[:, :, :H, :W] + mask_feat_high_res) + ) + mask_feats = mask_feats_up + + pred_masks = [] + for lvl, mask_feat in enumerate(mask_feats): + cur_masks = [] + for mask_size in self.mask_sizes: + cur_mask_module = getattr(self, "mask_pred_%02d" % mask_size) + cur_mask = cur_mask_module(mask_feat) + if self.align_on: + if self.bipyramid_on: + cur_mask_module = getattr(self, "align2nat_%02d" % lvl) + cur_mask = cur_mask_module(cur_mask) + else: + cur_mask = self.align2nat(cur_mask) + cur_masks.append(cur_mask) + pred_masks.append(cur_masks) + return pred_logits, pred_deltas, pred_masks diff --git a/preprocess/mhp_extension/detectron2/projects/TensorMask/tensormask/config.py b/preprocess/mhp_extension/detectron2/projects/TensorMask/tensormask/config.py new file mode 100644 index 0000000000000000000000000000000000000000..44479f211811bd4060c6afef9ed86791b0dcd0d4 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/TensorMask/tensormask/config.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +from detectron2.config import CfgNode as CN + + +def add_tensormask_config(cfg): + """ + Add config for TensorMask. + """ + cfg.MODEL.TENSOR_MASK = CN() + + # Anchor parameters + cfg.MODEL.TENSOR_MASK.IN_FEATURES = ["p2", "p3", "p4", "p5", "p6", "p7"] + + # Convolutions to use in the towers + cfg.MODEL.TENSOR_MASK.NUM_CONVS = 4 + + # Number of foreground classes. + cfg.MODEL.TENSOR_MASK.NUM_CLASSES = 80 + # Channel size for the classification tower + cfg.MODEL.TENSOR_MASK.CLS_CHANNELS = 256 + + cfg.MODEL.TENSOR_MASK.SCORE_THRESH_TEST = 0.05 + # Only the top (1000 * #levels) candidate boxes across all levels are + # considered jointly during test (to improve speed) + cfg.MODEL.TENSOR_MASK.TOPK_CANDIDATES_TEST = 6000 + cfg.MODEL.TENSOR_MASK.NMS_THRESH_TEST = 0.5 + + # Box parameters + # Channel size for the box tower + cfg.MODEL.TENSOR_MASK.BBOX_CHANNELS = 128 + # Weights on (dx, dy, dw, dh) + cfg.MODEL.TENSOR_MASK.BBOX_REG_WEIGHTS = (1.5, 1.5, 0.75, 0.75) + + # Loss parameters + cfg.MODEL.TENSOR_MASK.FOCAL_LOSS_GAMMA = 3.0 + cfg.MODEL.TENSOR_MASK.FOCAL_LOSS_ALPHA = 0.3 + + # Mask parameters + # Channel size for the mask tower + cfg.MODEL.TENSOR_MASK.MASK_CHANNELS = 128 + # Mask loss weight + cfg.MODEL.TENSOR_MASK.MASK_LOSS_WEIGHT = 2.0 + # weight on positive pixels within the mask + cfg.MODEL.TENSOR_MASK.POSITIVE_WEIGHT = 1.5 + # Whether to predict in the aligned representation + cfg.MODEL.TENSOR_MASK.ALIGNED_ON = False + # Whether to use the bipyramid architecture + cfg.MODEL.TENSOR_MASK.BIPYRAMID_ON = False diff --git a/preprocess/mhp_extension/detectron2/projects/TensorMask/tensormask/layers/__init__.py b/preprocess/mhp_extension/detectron2/projects/TensorMask/tensormask/layers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..cbbac429a69ce7cb17872e27b868f5603de5dc64 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/TensorMask/tensormask/layers/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +from .swap_align2nat import SwapAlign2Nat, swap_align2nat + +__all__ = [k for k in globals().keys() if not k.startswith("_")] diff --git a/preprocess/mhp_extension/detectron2/projects/TensorMask/tensormask/layers/csrc/SwapAlign2Nat/SwapAlign2Nat.h b/preprocess/mhp_extension/detectron2/projects/TensorMask/tensormask/layers/csrc/SwapAlign2Nat/SwapAlign2Nat.h new file mode 100644 index 0000000000000000000000000000000000000000..2ec037391f1c5a40e69190bbdb50f71501d54825 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/TensorMask/tensormask/layers/csrc/SwapAlign2Nat/SwapAlign2Nat.h @@ -0,0 +1,54 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +#pragma once +#include + +namespace tensormask { + +#ifdef WITH_CUDA +at::Tensor SwapAlign2Nat_forward_cuda( + const at::Tensor& X, + const int lambda_val, + const float pad_val); + +at::Tensor SwapAlign2Nat_backward_cuda( + const at::Tensor& gY, + const int lambda_val, + const int batch_size, + const int channel, + const int height, + const int width); +#endif + +inline at::Tensor SwapAlign2Nat_forward( + const at::Tensor& X, + const int lambda_val, + const float pad_val) { + if (X.type().is_cuda()) { +#ifdef WITH_CUDA + return SwapAlign2Nat_forward_cuda(X, lambda_val, pad_val); +#else + AT_ERROR("Not compiled with GPU support"); +#endif + } + AT_ERROR("Not implemented on the CPU"); +} + +inline at::Tensor SwapAlign2Nat_backward( + const at::Tensor& gY, + const int lambda_val, + const int batch_size, + const int channel, + const int height, + const int width) { + if (gY.type().is_cuda()) { +#ifdef WITH_CUDA + return SwapAlign2Nat_backward_cuda( + gY, lambda_val, batch_size, channel, height, width); +#else + AT_ERROR("Not compiled with GPU support"); +#endif + } + AT_ERROR("Not implemented on the CPU"); +} + +} // namespace tensormask diff --git a/preprocess/mhp_extension/detectron2/projects/TensorMask/tensormask/layers/csrc/SwapAlign2Nat/SwapAlign2Nat_cuda.cu b/preprocess/mhp_extension/detectron2/projects/TensorMask/tensormask/layers/csrc/SwapAlign2Nat/SwapAlign2Nat_cuda.cu new file mode 100644 index 0000000000000000000000000000000000000000..06de4a4d046523be9959dee73dfc1c2c20852ce1 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/TensorMask/tensormask/layers/csrc/SwapAlign2Nat/SwapAlign2Nat_cuda.cu @@ -0,0 +1,526 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +#include +#include +#include +#include + +// TODO make it in a common file +#define CUDA_1D_KERNEL_LOOP(i, n) \ + for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ + i += blockDim.x * gridDim.x) + +template +__device__ inline T get_pixel_val( + const T* tensor, + const int idx, + const int H, + const int W, + const int y, + const int x, + const int V, + const int U, + const int v, + const int u, + const T pad_val) { + if ((y < 0) || (y >= H) || (x < 0) || (x >= W) || (v < 0) || (v >= V) || + (u < 0) || (u >= U)) { + return pad_val; + } else { + return tensor[(((idx * V + v) * U + u) * H + y) * W + x]; + } +} + +template +__device__ inline void add_pixel_val( + T* tensor, + const T val, + const int idx, + const int H, + const int W, + const int y, + const int x, + const int V, + const int U, + const int v, + const int u) { + if ((val == 0.) || (y < 0) || (y >= H) || (x < 0) || (x >= W) || (v < 0) || + (v >= V) || (u < 0) || (u >= U)) { + return; + } else { + atomicAdd(tensor + ((((idx * V + v) * U + u) * H + y) * W + x), val); + } +} + +template +__global__ void SwapAlign2NatForwardFeat( + const int nthreads, + const T* bottom_data, + const int Vout, + const int Uout, + const float hVout, + const float hUout, + const int Vin, + const int Uin, + const float lambda, + const int Hin, + const int Win, + const int Hout, + const int Wout, + const T pad_val, + T* top_data) { + CUDA_1D_KERNEL_LOOP(index, nthreads) { + int idx = index; + const int x = idx % Wout; + idx /= Wout; + const int y = idx % Hout; + idx /= Hout; + const int u = idx % Uout; + idx /= Uout; + const int v = idx % Vout; + idx /= Vout; + + const float ox = x * lambda + u - hUout + 0.5; + const int xf = static_cast(floor(ox)); + const int xc = static_cast(ceil(ox)); + const float xwc = ox - xf; + const float xwf = 1. - xwc; + + const float oy = y * lambda + v - hVout + 0.5; + const int yf = static_cast(floor(oy)); + const int yc = static_cast(ceil(oy)); + const float ywc = oy - yf; + const float ywf = 1. - ywc; + + const float ou = (u + 0.5) / lambda - 0.5; + const int uf = static_cast(floor(ou)); + const int uc = static_cast(ceil(ou)); + const float uwc = ou - uf; + const float uwf = 1. - uwc; + + const float ov = (v + 0.5) / lambda - 0.5; + const int vf = static_cast(floor(ov)); + const int vc = static_cast(ceil(ov)); + const float vwc = ov - vf; + const float vwf = 1. - vwc; + + T val = ywf * xwf * vwf * uwf * + get_pixel_val( + bottom_data, idx, Hin, Win, yf, xf, Vin, Uin, vf, uf, pad_val) + + ywf * xwf * vwf * uwc * + get_pixel_val( + bottom_data, idx, Hin, Win, yf, xf, Vin, Uin, vf, uc, pad_val) + + ywf * xwf * vwc * uwf * + get_pixel_val( + bottom_data, idx, Hin, Win, yf, xf, Vin, Uin, vc, uf, pad_val) + + ywf * xwf * vwc * uwc * + get_pixel_val( + bottom_data, idx, Hin, Win, yf, xf, Vin, Uin, vc, uc, pad_val) + + ywf * xwc * vwf * uwf * + get_pixel_val( + bottom_data, idx, Hin, Win, yf, xc, Vin, Uin, vf, uf, pad_val) + + ywf * xwc * vwf * uwc * + get_pixel_val( + bottom_data, idx, Hin, Win, yf, xc, Vin, Uin, vf, uc, pad_val) + + ywf * xwc * vwc * uwf * + get_pixel_val( + bottom_data, idx, Hin, Win, yf, xc, Vin, Uin, vc, uf, pad_val) + + ywf * xwc * vwc * uwc * + get_pixel_val( + bottom_data, idx, Hin, Win, yf, xc, Vin, Uin, vc, uc, pad_val) + + ywc * xwf * vwf * uwf * + get_pixel_val( + bottom_data, idx, Hin, Win, yc, xf, Vin, Uin, vf, uf, pad_val) + + ywc * xwf * vwf * uwc * + get_pixel_val( + bottom_data, idx, Hin, Win, yc, xf, Vin, Uin, vf, uc, pad_val) + + ywc * xwf * vwc * uwf * + get_pixel_val( + bottom_data, idx, Hin, Win, yc, xf, Vin, Uin, vc, uf, pad_val) + + ywc * xwf * vwc * uwc * + get_pixel_val( + bottom_data, idx, Hin, Win, yc, xf, Vin, Uin, vc, uc, pad_val) + + ywc * xwc * vwf * uwf * + get_pixel_val( + bottom_data, idx, Hin, Win, yc, xc, Vin, Uin, vf, uf, pad_val) + + ywc * xwc * vwf * uwc * + get_pixel_val( + bottom_data, idx, Hin, Win, yc, xc, Vin, Uin, vf, uc, pad_val) + + ywc * xwc * vwc * uwf * + get_pixel_val( + bottom_data, idx, Hin, Win, yc, xc, Vin, Uin, vc, uf, pad_val) + + ywc * xwc * vwc * uwc * + get_pixel_val( + bottom_data, idx, Hin, Win, yc, xc, Vin, Uin, vc, uc, pad_val); + + top_data[index] = val; + } +} + +template +__global__ void SwapAlign2NatBackwardFeat( + const int nthreads, + const T* top_diff, + const int Vout, + const int Uout, + const float hVout, + const float hUout, + const int Vin, + const int Uin, + const float lambda, + const int Hin, + const int Win, + const int Hout, + const int Wout, + T* bottom_diff) { + CUDA_1D_KERNEL_LOOP(index, nthreads) { + int idx = index; + const int x = idx % Wout; + idx /= Wout; + const int y = idx % Hout; + idx /= Hout; + const int u = idx % Uout; + idx /= Uout; + const int v = idx % Vout; + idx /= Vout; + + const float ox = x * lambda + u - hUout + 0.5; + const int xf = static_cast(floor(ox)); + const int xc = static_cast(ceil(ox)); + const float xwc = ox - xf; + const float xwf = 1. - xwc; + + const float oy = y * lambda + v - hVout + 0.5; + const int yf = static_cast(floor(oy)); + const int yc = static_cast(ceil(oy)); + const float ywc = oy - yf; + const float ywf = 1. - ywc; + + const float ou = (u + 0.5) / lambda - 0.5; + const int uf = static_cast(floor(ou)); + const int uc = static_cast(ceil(ou)); + const float uwc = ou - uf; + const float uwf = 1. - uwc; + + const float ov = (v + 0.5) / lambda - 0.5; + const int vf = static_cast(floor(ov)); + const int vc = static_cast(ceil(ov)); + const float vwc = ov - vf; + const float vwf = 1. - vwc; + + const T grad = top_diff[index]; + + add_pixel_val( + bottom_diff, + ywf * xwf * vwf * uwf * grad, + idx, + Hin, + Win, + yf, + xf, + Vin, + Uin, + vf, + uf); + add_pixel_val( + bottom_diff, + ywf * xwf * vwf * uwc * grad, + idx, + Hin, + Win, + yf, + xf, + Vin, + Uin, + vf, + uc); + add_pixel_val( + bottom_diff, + ywf * xwf * vwc * uwf * grad, + idx, + Hin, + Win, + yf, + xf, + Vin, + Uin, + vc, + uf); + add_pixel_val( + bottom_diff, + ywf * xwf * vwc * uwc * grad, + idx, + Hin, + Win, + yf, + xf, + Vin, + Uin, + vc, + uc); + add_pixel_val( + bottom_diff, + ywf * xwc * vwf * uwf * grad, + idx, + Hin, + Win, + yf, + xc, + Vin, + Uin, + vf, + uf); + add_pixel_val( + bottom_diff, + ywf * xwc * vwf * uwc * grad, + idx, + Hin, + Win, + yf, + xc, + Vin, + Uin, + vf, + uc); + add_pixel_val( + bottom_diff, + ywf * xwc * vwc * uwf * grad, + idx, + Hin, + Win, + yf, + xc, + Vin, + Uin, + vc, + uf); + add_pixel_val( + bottom_diff, + ywf * xwc * vwc * uwc * grad, + idx, + Hin, + Win, + yf, + xc, + Vin, + Uin, + vc, + uc); + add_pixel_val( + bottom_diff, + ywc * xwf * vwf * uwf * grad, + idx, + Hin, + Win, + yc, + xf, + Vin, + Uin, + vf, + uf); + add_pixel_val( + bottom_diff, + ywc * xwf * vwf * uwc * grad, + idx, + Hin, + Win, + yc, + xf, + Vin, + Uin, + vf, + uc); + add_pixel_val( + bottom_diff, + ywc * xwf * vwc * uwf * grad, + idx, + Hin, + Win, + yc, + xf, + Vin, + Uin, + vc, + uf); + add_pixel_val( + bottom_diff, + ywc * xwf * vwc * uwc * grad, + idx, + Hin, + Win, + yc, + xf, + Vin, + Uin, + vc, + uc); + add_pixel_val( + bottom_diff, + ywc * xwc * vwf * uwf * grad, + idx, + Hin, + Win, + yc, + xc, + Vin, + Uin, + vf, + uf); + add_pixel_val( + bottom_diff, + ywc * xwc * vwf * uwc * grad, + idx, + Hin, + Win, + yc, + xc, + Vin, + Uin, + vf, + uc); + add_pixel_val( + bottom_diff, + ywc * xwc * vwc * uwf * grad, + idx, + Hin, + Win, + yc, + xc, + Vin, + Uin, + vc, + uf); + add_pixel_val( + bottom_diff, + ywc * xwc * vwc * uwc * grad, + idx, + Hin, + Win, + yc, + xc, + Vin, + Uin, + vc, + uc); + } +} + +namespace tensormask { + +at::Tensor SwapAlign2Nat_forward_cuda( + const at::Tensor& X, + const int lambda_val, + const float pad_val) { + AT_ASSERTM(X.device().is_cuda(), "input must be a CUDA tensor"); + AT_ASSERTM(X.ndimension() == 4, "input must be a 4D tensor"); + AT_ASSERTM(lambda_val >= 1, "lambda should be greater or equal to 1"); + const int N = X.size(0); + const int C = X.size(1); + const int Vin = static_cast(sqrt(static_cast(C))); + const int Uin = C / Vin; + AT_ASSERTM( + C == Vin * Uin && Vin == Uin, "#channels should be a square number"); + const int Vout = lambda_val * Vin; + const int Uout = lambda_val * Uin; + const int Hin = X.size(2); + const int Win = X.size(3); + const float lambda = static_cast(lambda_val); + const int Hout = static_cast(ceil(Hin / lambda)); + const int Wout = static_cast(ceil(Win / lambda)); + const float hVout = Vout / 2.; + const float hUout = Uout / 2.; + + at::cuda::CUDAGuard device_guard(X.device()); + + at::Tensor Y = at::empty({N, Vout * Uout, Hout, Wout}, X.options()); + + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + dim3 grid(std::min(at::cuda::ATenCeilDiv(Y.numel(), 512L), 4096L)); + dim3 block(512); + + if (Y.numel() == 0) { + AT_CUDA_CHECK(cudaGetLastError()); + return Y; + } + + auto X_ = X.contiguous(); + AT_DISPATCH_FLOATING_TYPES(X.scalar_type(), "SwapAlign2Nat_forward", [&] { + SwapAlign2NatForwardFeat<<>>( + Y.numel(), + X_.data_ptr(), + Vout, + Uout, + hVout, + hUout, + Vin, + Uin, + lambda, + Hin, + Win, + Hout, + Wout, + pad_val, + Y.data_ptr()); + }); + cudaDeviceSynchronize(); + AT_CUDA_CHECK(cudaGetLastError()); + return Y; +} + +at::Tensor SwapAlign2Nat_backward_cuda( + const at::Tensor& gY, + const int lambda_val, + const int batch_size, + const int channel, + const int height, + const int width) { + AT_ASSERTM(gY.device().is_cuda(), "input gradient must be a CUDA tensor"); + AT_ASSERTM(gY.ndimension() == 4, "input gradient must be a 4D tensor"); + AT_ASSERTM(lambda_val >= 1, "lambda should be greater or equal to 1"); + const int Vin = static_cast(sqrt(static_cast(channel))); + const int Uin = channel / Vin; + const int Vout = lambda_val * Vin; + const int Uout = lambda_val * Uin; + const float hVout = Vout / 2.; + const float hUout = Uout / 2.; + const int Hout = gY.size(2); + const int Wout = gY.size(3); + + at::cuda::CUDAGuard device_guard(gY.device()); + + at::Tensor gX = at::zeros({batch_size, channel, height, width}, gY.options()); + + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + dim3 grid(std::min(at::cuda::ATenCeilDiv(gY.numel(), 512L), 4096L)); + dim3 block(512); + + // handle possibly empty gradients + if (gY.numel() == 0) { + AT_CUDA_CHECK(cudaGetLastError()); + return gX; + } + + auto gY_ = gY.contiguous(); + AT_DISPATCH_FLOATING_TYPES(gY.scalar_type(), "SwapAlign2Nat_backward", [&] { + SwapAlign2NatBackwardFeat<<>>( + gY.numel(), + gY_.data_ptr(), + Vout, + Uout, + hVout, + hUout, + Vin, + Uin, + static_cast(lambda_val), + height, + width, + Hout, + Wout, + gX.data_ptr()); + }); + AT_CUDA_CHECK(cudaGetLastError()); + return gX; +} + +} // namespace tensormask diff --git a/preprocess/mhp_extension/detectron2/projects/TensorMask/tensormask/layers/csrc/vision.cpp b/preprocess/mhp_extension/detectron2/projects/TensorMask/tensormask/layers/csrc/vision.cpp new file mode 100644 index 0000000000000000000000000000000000000000..ad8e472c2cfc7c10e00cd6b00fc22c0dd9384dd1 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/TensorMask/tensormask/layers/csrc/vision.cpp @@ -0,0 +1,19 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +#include +#include "SwapAlign2Nat/SwapAlign2Nat.h" + +namespace tensormask { + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def( + "swap_align2nat_forward", + &SwapAlign2Nat_forward, + "SwapAlign2Nat_forward"); + m.def( + "swap_align2nat_backward", + &SwapAlign2Nat_backward, + "SwapAlign2Nat_backward"); +} + +} // namespace tensormask diff --git a/preprocess/mhp_extension/detectron2/projects/TensorMask/tensormask/layers/swap_align2nat.py b/preprocess/mhp_extension/detectron2/projects/TensorMask/tensormask/layers/swap_align2nat.py new file mode 100644 index 0000000000000000000000000000000000000000..a72c98a968577eff2302d75e4cb41620e4ecf582 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/TensorMask/tensormask/layers/swap_align2nat.py @@ -0,0 +1,61 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +from torch import nn +from torch.autograd import Function +from torch.autograd.function import once_differentiable + +from tensormask import _C + + +class _SwapAlign2Nat(Function): + @staticmethod + def forward(ctx, X, lambda_val, pad_val): + ctx.lambda_val = lambda_val + ctx.input_shape = X.size() + + Y = _C.swap_align2nat_forward(X, lambda_val, pad_val) + return Y + + @staticmethod + @once_differentiable + def backward(ctx, gY): + lambda_val = ctx.lambda_val + bs, ch, h, w = ctx.input_shape + + gX = _C.swap_align2nat_backward(gY, lambda_val, bs, ch, h, w) + + return gX, None, None + + +swap_align2nat = _SwapAlign2Nat.apply + + +class SwapAlign2Nat(nn.Module): + """ + The op `SwapAlign2Nat` described in https://arxiv.org/abs/1903.12174. + Given an input tensor that predicts masks of shape (N, C=VxU, H, W), + apply the op, it will return masks of shape (N, V'xU', H', W') where + the unit lengths of (V, U) and (H, W) are swapped, and the mask representation + is transformed from aligned to natural. + Args: + lambda_val (int): the relative unit length ratio between (V, U) and (H, W), + as we always have larger unit lengths for (V, U) than (H, W), + lambda_val is always >= 1. + pad_val (float): padding value for the values falling outside of the input + tensor, default set to -6 as sigmoid(-6) is ~0, indicating + that is no masks outside of the tensor. + """ + + def __init__(self, lambda_val, pad_val=-6.0): + super(SwapAlign2Nat, self).__init__() + self.lambda_val = lambda_val + self.pad_val = pad_val + + def forward(self, X): + return swap_align2nat(X, self.lambda_val, self.pad_val) + + def __repr__(self): + tmpstr = self.__class__.__name__ + "(" + tmpstr += "lambda_val=" + str(self.lambda_val) + tmpstr += ", pad_val=" + str(self.pad_val) + tmpstr += ")" + return tmpstr diff --git a/preprocess/mhp_extension/detectron2/projects/TensorMask/tests/__init__.py b/preprocess/mhp_extension/detectron2/projects/TensorMask/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..168f9979a4623806934b0ff1102ac166704e7dec --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/TensorMask/tests/__init__.py @@ -0,0 +1 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved diff --git a/preprocess/mhp_extension/detectron2/projects/TensorMask/tests/test_swap_align2nat.py b/preprocess/mhp_extension/detectron2/projects/TensorMask/tests/test_swap_align2nat.py new file mode 100755 index 0000000000000000000000000000000000000000..b3d018ce199ddaa19af25e8304d969e8f59c747a --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/TensorMask/tests/test_swap_align2nat.py @@ -0,0 +1,32 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +import unittest +import torch +from torch.autograd import gradcheck + +from tensormask.layers.swap_align2nat import SwapAlign2Nat + + +class SwapAlign2NatTest(unittest.TestCase): + @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") + def test_swap_align2nat_gradcheck_cuda(self): + dtype = torch.float64 + device = torch.device("cuda") + m = SwapAlign2Nat(2).to(dtype=dtype, device=device) + x = torch.rand(2, 4, 10, 10, dtype=dtype, device=device, requires_grad=True) + + self.assertTrue(gradcheck(m, x), "gradcheck failed for SwapAlign2Nat CUDA") + + def _swap_align2nat(self, tensor, lambda_val): + """ + The basic setup for testing Swap_Align + """ + op = SwapAlign2Nat(lambda_val, pad_val=0.0) + input = torch.from_numpy(tensor[None, :, :, :].astype("float32")) + output = op.forward(input.cuda()).cpu().numpy() + return output[0] + + +if __name__ == "__main__": + unittest.main() diff --git a/preprocess/mhp_extension/detectron2/projects/TensorMask/train_net.py b/preprocess/mhp_extension/detectron2/projects/TensorMask/train_net.py new file mode 100755 index 0000000000000000000000000000000000000000..b898fc77b7f52cae6ff398ac5aec73c59ab928ab --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/TensorMask/train_net.py @@ -0,0 +1,70 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +""" +TensorMask Training Script. + +This script is a simplified version of the training script in detectron2/tools. +""" + +import os + +import detectron2.utils.comm as comm +from detectron2.checkpoint import DetectionCheckpointer +from detectron2.config import get_cfg +from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, launch +from detectron2.evaluation import COCOEvaluator, verify_results + +from tensormask import add_tensormask_config + + +class Trainer(DefaultTrainer): + @classmethod + def build_evaluator(cls, cfg, dataset_name, output_folder=None): + if output_folder is None: + output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") + return COCOEvaluator(dataset_name, cfg, True, output_folder) + + +def setup(args): + """ + Create configs and perform basic setups. + """ + cfg = get_cfg() + add_tensormask_config(cfg) + cfg.merge_from_file(args.config_file) + cfg.merge_from_list(args.opts) + cfg.freeze() + default_setup(cfg, args) + return cfg + + +def main(args): + cfg = setup(args) + + if args.eval_only: + model = Trainer.build_model(cfg) + DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load( + cfg.MODEL.WEIGHTS, resume=args.resume + ) + res = Trainer.test(cfg, model) + if comm.is_main_process(): + verify_results(cfg, res) + return res + + trainer = Trainer(cfg) + trainer.resume_or_load(resume=args.resume) + return trainer.train() + + +if __name__ == "__main__": + args = default_argument_parser().parse_args() + print("Command Line Args:", args) + launch( + main, + args.num_gpus, + num_machines=args.num_machines, + machine_rank=args.machine_rank, + dist_url=args.dist_url, + args=(args,), + ) diff --git a/preprocess/mhp_extension/detectron2/projects/TridentNet/README.md b/preprocess/mhp_extension/detectron2/projects/TridentNet/README.md new file mode 100644 index 0000000000000000000000000000000000000000..4b7a90102d008a498e93dff595a09206be5269e7 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/TridentNet/README.md @@ -0,0 +1,60 @@ + +# TridentNet in Detectron2 +**Scale-Aware Trident Networks for Object Detection** + +Yanghao Li\*, Yuntao Chen\*, Naiyan Wang, Zhaoxiang Zhang + +[[`TridentNet`](https://github.com/TuSimple/simpledet/tree/master/models/tridentnet)] [[`arXiv`](https://arxiv.org/abs/1901.01892)] [[`BibTeX`](#CitingTridentNet)] + +
+ +
+ +In this repository, we implement TridentNet-Fast in Detectron2. +Trident Network (TridentNet) aims to generate scale-specific feature maps with a uniform representational power. We construct a parallel multi-branch architecture in which each branch shares the same transformation parameters but with different receptive fields. TridentNet-Fast is a fast approximation version of TridentNet that could achieve significant improvements without any additional parameters and computational cost. + +## Training + +To train a model, run +```bash +python /path/to/detectron2/projects/TridentNet/train_net.py --config-file +``` + +For example, to launch end-to-end TridentNet training with ResNet-50 backbone on 8 GPUs, +one should execute: +```bash +python /path/to/detectron2/projects/TridentNet/train_net.py --config-file configs/tridentnet_fast_R_50_C4_1x.yaml --num-gpus 8 +``` + +## Evaluation + +Model evaluation can be done similarly: +```bash +python /path/to/detectron2/projects/TridentNet/train_net.py --config-file configs/tridentnet_fast_R_50_C4_1x.yaml --eval-only MODEL.WEIGHTS model.pth +``` + +## Results on MS-COCO in Detectron2 + +|Model|Backbone|Head|lr sched|AP|AP50|AP75|APs|APm|APl|download| +|-----|--------|----|--------|--|----|----|---|---|---|--------| +|Faster|R50-C4|C5-512ROI|1X|35.7|56.1|38.0|19.2|40.9|48.7|model \| metrics| +|TridentFast|R50-C4|C5-128ROI|1X|38.0|58.1|40.8|19.5|42.2|54.6|model \| metrics| +|Faster|R50-C4|C5-512ROI|3X|38.4|58.7|41.3|20.7|42.7|53.1|model \| metrics| +|TridentFast|R50-C4|C5-128ROI|3X|40.6|60.8|43.6|23.4|44.7|57.1|model \| metrics| +|Faster|R101-C4|C5-512ROI|3X|41.1|61.4|44.0|22.2|45.5|55.9|model \| metrics| +|TridentFast|R101-C4|C5-128ROI|3X|43.6|63.4|47.0|24.3|47.8|60.0|model \| metrics| + + +## Citing TridentNet + +If you use TridentNet, please use the following BibTeX entry. + +``` +@InProceedings{li2019scale, + title={Scale-Aware Trident Networks for Object Detection}, + author={Li, Yanghao and Chen, Yuntao and Wang, Naiyan and Zhang, Zhaoxiang}, + journal={The International Conference on Computer Vision (ICCV)}, + year={2019} +} +``` + diff --git a/preprocess/mhp_extension/detectron2/projects/TridentNet/configs/Base-TridentNet-Fast-C4.yaml b/preprocess/mhp_extension/detectron2/projects/TridentNet/configs/Base-TridentNet-Fast-C4.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8c3d80797ba9ae63a5669ccbd74a0d2006fee3b7 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/TridentNet/configs/Base-TridentNet-Fast-C4.yaml @@ -0,0 +1,29 @@ +MODEL: + META_ARCHITECTURE: "GeneralizedRCNN" + BACKBONE: + NAME: "build_trident_resnet_backbone" + ROI_HEADS: + NAME: "TridentRes5ROIHeads" + POSITIVE_FRACTION: 0.5 + BATCH_SIZE_PER_IMAGE: 128 + PROPOSAL_APPEND_GT: False + PROPOSAL_GENERATOR: + NAME: "TridentRPN" + RPN: + POST_NMS_TOPK_TRAIN: 500 + TRIDENT: + NUM_BRANCH: 3 + BRANCH_DILATIONS: [1, 2, 3] + TEST_BRANCH_IDX: 1 + TRIDENT_STAGE: "res4" +DATASETS: + TRAIN: ("coco_2017_train",) + TEST: ("coco_2017_val",) +SOLVER: + IMS_PER_BATCH: 16 + BASE_LR: 0.02 + STEPS: (60000, 80000) + MAX_ITER: 90000 +INPUT: + MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) +VERSION: 2 diff --git a/preprocess/mhp_extension/detectron2/projects/TridentNet/configs/tridentnet_fast_R_101_C4_3x.yaml b/preprocess/mhp_extension/detectron2/projects/TridentNet/configs/tridentnet_fast_R_101_C4_3x.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bc83c2f9e7b7653c8982e657b5f116abe6ad6e1f --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/TridentNet/configs/tridentnet_fast_R_101_C4_3x.yaml @@ -0,0 +1,9 @@ +_BASE_: "Base-TridentNet-Fast-C4.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" + MASK_ON: False + RESNETS: + DEPTH: 101 +SOLVER: + STEPS: (210000, 250000) + MAX_ITER: 270000 diff --git a/preprocess/mhp_extension/detectron2/projects/TridentNet/configs/tridentnet_fast_R_50_C4_1x.yaml b/preprocess/mhp_extension/detectron2/projects/TridentNet/configs/tridentnet_fast_R_50_C4_1x.yaml new file mode 100644 index 0000000000000000000000000000000000000000..fda2cb6622d732c0f70d74d567c26182a9a41c44 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/TridentNet/configs/tridentnet_fast_R_50_C4_1x.yaml @@ -0,0 +1,6 @@ +_BASE_: "Base-TridentNet-Fast-C4.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: False + RESNETS: + DEPTH: 50 diff --git a/preprocess/mhp_extension/detectron2/projects/TridentNet/configs/tridentnet_fast_R_50_C4_3x.yaml b/preprocess/mhp_extension/detectron2/projects/TridentNet/configs/tridentnet_fast_R_50_C4_3x.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ebf89d03ea043810b02e71ecc2c1711c250e161c --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/TridentNet/configs/tridentnet_fast_R_50_C4_3x.yaml @@ -0,0 +1,9 @@ +_BASE_: "Base-TridentNet-Fast-C4.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: False + RESNETS: + DEPTH: 50 +SOLVER: + STEPS: (210000, 250000) + MAX_ITER: 270000 diff --git a/preprocess/mhp_extension/detectron2/projects/TridentNet/train_net.py b/preprocess/mhp_extension/detectron2/projects/TridentNet/train_net.py new file mode 100755 index 0000000000000000000000000000000000000000..eac2ec5c39e4a3ce2221f354dcea288bffcb1fbb --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/TridentNet/train_net.py @@ -0,0 +1,67 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +""" +TridentNet Training Script. + +This script is a simplified version of the training script in detectron2/tools. +""" + +import os + +from detectron2.checkpoint import DetectionCheckpointer +from detectron2.config import get_cfg +from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, launch +from detectron2.evaluation import COCOEvaluator + +from tridentnet import add_tridentnet_config + + +class Trainer(DefaultTrainer): + @classmethod + def build_evaluator(cls, cfg, dataset_name, output_folder=None): + if output_folder is None: + output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") + return COCOEvaluator(dataset_name, cfg, True, output_folder) + + +def setup(args): + """ + Create configs and perform basic setups. + """ + cfg = get_cfg() + add_tridentnet_config(cfg) + cfg.merge_from_file(args.config_file) + cfg.merge_from_list(args.opts) + cfg.freeze() + default_setup(cfg, args) + return cfg + + +def main(args): + cfg = setup(args) + + if args.eval_only: + model = Trainer.build_model(cfg) + DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load( + cfg.MODEL.WEIGHTS, resume=args.resume + ) + res = Trainer.test(cfg, model) + return res + + trainer = Trainer(cfg) + trainer.resume_or_load(resume=args.resume) + return trainer.train() + + +if __name__ == "__main__": + args = default_argument_parser().parse_args() + print("Command Line Args:", args) + launch( + main, + args.num_gpus, + num_machines=args.num_machines, + machine_rank=args.machine_rank, + dist_url=args.dist_url, + args=(args,), + ) diff --git a/preprocess/mhp_extension/detectron2/projects/TridentNet/tridentnet/__init__.py b/preprocess/mhp_extension/detectron2/projects/TridentNet/tridentnet/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2fcdeb45a03d3835b3c2498ca8021a11d8cb4758 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/TridentNet/tridentnet/__init__.py @@ -0,0 +1,9 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +from .config import add_tridentnet_config +from .trident_backbone import ( + TridentBottleneckBlock, + build_trident_resnet_backbone, + make_trident_stage, +) +from .trident_rpn import TridentRPN +from .trident_rcnn import TridentRes5ROIHeads, TridentStandardROIHeads diff --git a/preprocess/mhp_extension/detectron2/projects/TridentNet/tridentnet/config.py b/preprocess/mhp_extension/detectron2/projects/TridentNet/tridentnet/config.py new file mode 100644 index 0000000000000000000000000000000000000000..f33f473cb32633d9ba6582f0406ffe0a929d23c6 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/TridentNet/tridentnet/config.py @@ -0,0 +1,26 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +from detectron2.config import CfgNode as CN + + +def add_tridentnet_config(cfg): + """ + Add config for tridentnet. + """ + _C = cfg + + _C.MODEL.TRIDENT = CN() + + # Number of branches for TridentNet. + _C.MODEL.TRIDENT.NUM_BRANCH = 3 + # Specify the dilations for each branch. + _C.MODEL.TRIDENT.BRANCH_DILATIONS = [1, 2, 3] + # Specify the stage for applying trident blocks. Default stage is Res4 according to the + # TridentNet paper. + _C.MODEL.TRIDENT.TRIDENT_STAGE = "res4" + # Specify the test branch index TridentNet Fast inference: + # - use -1 to aggregate results of all branches during inference. + # - otherwise, only using specified branch for fast inference. Recommended setting is + # to use the middle branch. + _C.MODEL.TRIDENT.TEST_BRANCH_IDX = 1 diff --git a/preprocess/mhp_extension/detectron2/projects/TridentNet/tridentnet/trident_backbone.py b/preprocess/mhp_extension/detectron2/projects/TridentNet/tridentnet/trident_backbone.py new file mode 100644 index 0000000000000000000000000000000000000000..232dfaf1ca01c0395c0ceea544bfbdee0d45ce1a --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/TridentNet/tridentnet/trident_backbone.py @@ -0,0 +1,223 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import fvcore.nn.weight_init as weight_init +import torch +import torch.nn.functional as F + +from detectron2.layers import Conv2d, FrozenBatchNorm2d, get_norm +from detectron2.modeling import BACKBONE_REGISTRY, ResNet, ResNetBlockBase, make_stage +from detectron2.modeling.backbone.resnet import BasicStem, BottleneckBlock, DeformBottleneckBlock + +from .trident_conv import TridentConv + +__all__ = ["TridentBottleneckBlock", "make_trident_stage", "build_trident_resnet_backbone"] + + +class TridentBottleneckBlock(ResNetBlockBase): + def __init__( + self, + in_channels, + out_channels, + *, + bottleneck_channels, + stride=1, + num_groups=1, + norm="BN", + stride_in_1x1=False, + num_branch=3, + dilations=(1, 2, 3), + concat_output=False, + test_branch_idx=-1, + ): + """ + Args: + num_branch (int): the number of branches in TridentNet. + dilations (tuple): the dilations of multiple branches in TridentNet. + concat_output (bool): if concatenate outputs of multiple branches in TridentNet. + Use 'True' for the last trident block. + """ + super().__init__(in_channels, out_channels, stride) + + assert num_branch == len(dilations) + + self.num_branch = num_branch + self.concat_output = concat_output + self.test_branch_idx = test_branch_idx + + if in_channels != out_channels: + self.shortcut = Conv2d( + in_channels, + out_channels, + kernel_size=1, + stride=stride, + bias=False, + norm=get_norm(norm, out_channels), + ) + else: + self.shortcut = None + + stride_1x1, stride_3x3 = (stride, 1) if stride_in_1x1 else (1, stride) + + self.conv1 = Conv2d( + in_channels, + bottleneck_channels, + kernel_size=1, + stride=stride_1x1, + bias=False, + norm=get_norm(norm, bottleneck_channels), + ) + + self.conv2 = TridentConv( + bottleneck_channels, + bottleneck_channels, + kernel_size=3, + stride=stride_3x3, + paddings=dilations, + bias=False, + groups=num_groups, + dilations=dilations, + num_branch=num_branch, + test_branch_idx=test_branch_idx, + norm=get_norm(norm, bottleneck_channels), + ) + + self.conv3 = Conv2d( + bottleneck_channels, + out_channels, + kernel_size=1, + bias=False, + norm=get_norm(norm, out_channels), + ) + + for layer in [self.conv1, self.conv2, self.conv3, self.shortcut]: + if layer is not None: # shortcut can be None + weight_init.c2_msra_fill(layer) + + def forward(self, x): + num_branch = self.num_branch if self.training or self.test_branch_idx == -1 else 1 + if not isinstance(x, list): + x = [x] * num_branch + out = [self.conv1(b) for b in x] + out = [F.relu_(b) for b in out] + + out = self.conv2(out) + out = [F.relu_(b) for b in out] + + out = [self.conv3(b) for b in out] + + if self.shortcut is not None: + shortcut = [self.shortcut(b) for b in x] + else: + shortcut = x + + out = [out_b + shortcut_b for out_b, shortcut_b in zip(out, shortcut)] + out = [F.relu_(b) for b in out] + if self.concat_output: + out = torch.cat(out) + return out + + +def make_trident_stage(block_class, num_blocks, first_stride, **kwargs): + """ + Create a resnet stage by creating many blocks for TridentNet. + """ + blocks = [] + for i in range(num_blocks - 1): + blocks.append(block_class(stride=first_stride if i == 0 else 1, **kwargs)) + kwargs["in_channels"] = kwargs["out_channels"] + blocks.append(block_class(stride=1, concat_output=True, **kwargs)) + return blocks + + +@BACKBONE_REGISTRY.register() +def build_trident_resnet_backbone(cfg, input_shape): + """ + Create a ResNet instance from config for TridentNet. + + Returns: + ResNet: a :class:`ResNet` instance. + """ + # need registration of new blocks/stems? + norm = cfg.MODEL.RESNETS.NORM + stem = BasicStem( + in_channels=input_shape.channels, + out_channels=cfg.MODEL.RESNETS.STEM_OUT_CHANNELS, + norm=norm, + ) + freeze_at = cfg.MODEL.BACKBONE.FREEZE_AT + + if freeze_at >= 1: + for p in stem.parameters(): + p.requires_grad = False + stem = FrozenBatchNorm2d.convert_frozen_batchnorm(stem) + + # fmt: off + out_features = cfg.MODEL.RESNETS.OUT_FEATURES + depth = cfg.MODEL.RESNETS.DEPTH + num_groups = cfg.MODEL.RESNETS.NUM_GROUPS + width_per_group = cfg.MODEL.RESNETS.WIDTH_PER_GROUP + bottleneck_channels = num_groups * width_per_group + in_channels = cfg.MODEL.RESNETS.STEM_OUT_CHANNELS + out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS + stride_in_1x1 = cfg.MODEL.RESNETS.STRIDE_IN_1X1 + res5_dilation = cfg.MODEL.RESNETS.RES5_DILATION + deform_on_per_stage = cfg.MODEL.RESNETS.DEFORM_ON_PER_STAGE + deform_modulated = cfg.MODEL.RESNETS.DEFORM_MODULATED + deform_num_groups = cfg.MODEL.RESNETS.DEFORM_NUM_GROUPS + num_branch = cfg.MODEL.TRIDENT.NUM_BRANCH + branch_dilations = cfg.MODEL.TRIDENT.BRANCH_DILATIONS + trident_stage = cfg.MODEL.TRIDENT.TRIDENT_STAGE + test_branch_idx = cfg.MODEL.TRIDENT.TEST_BRANCH_IDX + # fmt: on + assert res5_dilation in {1, 2}, "res5_dilation cannot be {}.".format(res5_dilation) + + num_blocks_per_stage = {50: [3, 4, 6, 3], 101: [3, 4, 23, 3], 152: [3, 8, 36, 3]}[depth] + + stages = [] + + res_stage_idx = {"res2": 2, "res3": 3, "res4": 4, "res5": 5} + out_stage_idx = [res_stage_idx[f] for f in out_features] + trident_stage_idx = res_stage_idx[trident_stage] + max_stage_idx = max(out_stage_idx) + for idx, stage_idx in enumerate(range(2, max_stage_idx + 1)): + dilation = res5_dilation if stage_idx == 5 else 1 + first_stride = 1 if idx == 0 or (stage_idx == 5 and dilation == 2) else 2 + stage_kargs = { + "num_blocks": num_blocks_per_stage[idx], + "first_stride": first_stride, + "in_channels": in_channels, + "bottleneck_channels": bottleneck_channels, + "out_channels": out_channels, + "num_groups": num_groups, + "norm": norm, + "stride_in_1x1": stride_in_1x1, + "dilation": dilation, + } + if stage_idx == trident_stage_idx: + assert not deform_on_per_stage[ + idx + ], "Not support deformable conv in Trident blocks yet." + stage_kargs["block_class"] = TridentBottleneckBlock + stage_kargs["num_branch"] = num_branch + stage_kargs["dilations"] = branch_dilations + stage_kargs["test_branch_idx"] = test_branch_idx + stage_kargs.pop("dilation") + elif deform_on_per_stage[idx]: + stage_kargs["block_class"] = DeformBottleneckBlock + stage_kargs["deform_modulated"] = deform_modulated + stage_kargs["deform_num_groups"] = deform_num_groups + else: + stage_kargs["block_class"] = BottleneckBlock + blocks = ( + make_trident_stage(**stage_kargs) + if stage_idx == trident_stage_idx + else make_stage(**stage_kargs) + ) + in_channels = out_channels + out_channels *= 2 + bottleneck_channels *= 2 + + if freeze_at >= stage_idx: + for block in blocks: + block.freeze() + stages.append(blocks) + return ResNet(stem, stages, out_features=out_features) diff --git a/preprocess/mhp_extension/detectron2/projects/TridentNet/tridentnet/trident_conv.py b/preprocess/mhp_extension/detectron2/projects/TridentNet/tridentnet/trident_conv.py new file mode 100644 index 0000000000000000000000000000000000000000..7e2d5252bda5ebb2e9eee10af9c9a14fc72bb8fe --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/TridentNet/tridentnet/trident_conv.py @@ -0,0 +1,107 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import torch +from torch import nn +from torch.nn import functional as F +from torch.nn.modules.utils import _pair + +from detectron2.layers.wrappers import _NewEmptyTensorOp + + +class TridentConv(nn.Module): + def __init__( + self, + in_channels, + out_channels, + kernel_size, + stride=1, + paddings=0, + dilations=1, + groups=1, + num_branch=1, + test_branch_idx=-1, + bias=False, + norm=None, + activation=None, + ): + super(TridentConv, self).__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.kernel_size = _pair(kernel_size) + self.num_branch = num_branch + self.stride = _pair(stride) + self.groups = groups + self.with_bias = bias + if isinstance(paddings, int): + paddings = [paddings] * self.num_branch + if isinstance(dilations, int): + dilations = [dilations] * self.num_branch + self.paddings = [_pair(padding) for padding in paddings] + self.dilations = [_pair(dilation) for dilation in dilations] + self.test_branch_idx = test_branch_idx + self.norm = norm + self.activation = activation + + assert len({self.num_branch, len(self.paddings), len(self.dilations)}) == 1 + + self.weight = nn.Parameter( + torch.Tensor(out_channels, in_channels // groups, *self.kernel_size) + ) + if bias: + self.bias = nn.Parameter(torch.Tensor(out_channels)) + else: + self.bias = None + + nn.init.kaiming_uniform_(self.weight, nonlinearity="relu") + if self.bias is not None: + nn.init.constant_(self.bias, 0) + + def forward(self, inputs): + num_branch = self.num_branch if self.training or self.test_branch_idx == -1 else 1 + assert len(inputs) == num_branch + + if inputs[0].numel() == 0: + output_shape = [ + (i + 2 * p - (di * (k - 1) + 1)) // s + 1 + for i, p, di, k, s in zip( + inputs[0].shape[-2:], self.padding, self.dilation, self.kernel_size, self.stride + ) + ] + output_shape = [input[0].shape[0], self.weight.shape[0]] + output_shape + return [_NewEmptyTensorOp.apply(input, output_shape) for input in inputs] + + if self.training or self.test_branch_idx == -1: + outputs = [ + F.conv2d(input, self.weight, self.bias, self.stride, padding, dilation, self.groups) + for input, dilation, padding in zip(inputs, self.dilations, self.paddings) + ] + else: + outputs = [ + F.conv2d( + inputs[0], + self.weight, + self.bias, + self.stride, + self.paddings[self.test_branch_idx], + self.dilations[self.test_branch_idx], + self.groups, + ) + ] + + if self.norm is not None: + outputs = [self.norm(x) for x in outputs] + if self.activation is not None: + outputs = [self.activation(x) for x in outputs] + return outputs + + def extra_repr(self): + tmpstr = "in_channels=" + str(self.in_channels) + tmpstr += ", out_channels=" + str(self.out_channels) + tmpstr += ", kernel_size=" + str(self.kernel_size) + tmpstr += ", num_branch=" + str(self.num_branch) + tmpstr += ", test_branch_idx=" + str(self.test_branch_idx) + tmpstr += ", stride=" + str(self.stride) + tmpstr += ", paddings=" + str(self.paddings) + tmpstr += ", dilations=" + str(self.dilations) + tmpstr += ", groups=" + str(self.groups) + tmpstr += ", bias=" + str(self.with_bias) + return tmpstr diff --git a/preprocess/mhp_extension/detectron2/projects/TridentNet/tridentnet/trident_rcnn.py b/preprocess/mhp_extension/detectron2/projects/TridentNet/tridentnet/trident_rcnn.py new file mode 100644 index 0000000000000000000000000000000000000000..65deb90977c525f9e42ea9b2581944832a9af47e --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/TridentNet/tridentnet/trident_rcnn.py @@ -0,0 +1,116 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +from detectron2.layers import batched_nms +from detectron2.modeling import ROI_HEADS_REGISTRY, StandardROIHeads +from detectron2.modeling.roi_heads.roi_heads import Res5ROIHeads +from detectron2.structures import Instances + + +def merge_branch_instances(instances, num_branch, nms_thresh, topk_per_image): + """ + Merge detection results from different branches of TridentNet. + Return detection results by applying non-maximum suppression (NMS) on bounding boxes + and keep the unsuppressed boxes and other instances (e.g mask) if any. + + Args: + instances (list[Instances]): A list of N * num_branch instances that store detection + results. Contain N images and each image has num_branch instances. + num_branch (int): Number of branches used for merging detection results for each image. + nms_thresh (float): The threshold to use for box non-maximum suppression. Value in [0, 1]. + topk_per_image (int): The number of top scoring detections to return. Set < 0 to return + all detections. + + Returns: + results: (list[Instances]): A list of N instances, one for each image in the batch, + that stores the topk most confidence detections after merging results from multiple + branches. + """ + if num_branch == 1: + return instances + + batch_size = len(instances) // num_branch + results = [] + for i in range(batch_size): + instance = Instances.cat([instances[i + batch_size * j] for j in range(num_branch)]) + + # Apply per-class NMS + keep = batched_nms( + instance.pred_boxes.tensor, instance.scores, instance.pred_classes, nms_thresh + ) + keep = keep[:topk_per_image] + result = instance[keep] + + results.append(result) + + return results + + +@ROI_HEADS_REGISTRY.register() +class TridentRes5ROIHeads(Res5ROIHeads): + """ + The TridentNet ROIHeads in a typical "C4" R-CNN model. + See :class:`Res5ROIHeads`. + """ + + def __init__(self, cfg, input_shape): + super().__init__(cfg, input_shape) + + self.num_branch = cfg.MODEL.TRIDENT.NUM_BRANCH + self.trident_fast = cfg.MODEL.TRIDENT.TEST_BRANCH_IDX != -1 + + def forward(self, images, features, proposals, targets=None): + """ + See :class:`Res5ROIHeads.forward`. + """ + num_branch = self.num_branch if self.training or not self.trident_fast else 1 + all_targets = targets * num_branch if targets is not None else None + pred_instances, losses = super().forward(images, features, proposals, all_targets) + del images, all_targets, targets + + if self.training: + return pred_instances, losses + else: + pred_instances = merge_branch_instances( + pred_instances, + num_branch, + self.box_predictor.test_nms_thresh, + self.box_predictor.test_topk_per_image, + ) + + return pred_instances, {} + + +@ROI_HEADS_REGISTRY.register() +class TridentStandardROIHeads(StandardROIHeads): + """ + The `StandardROIHeads` for TridentNet. + See :class:`StandardROIHeads`. + """ + + def __init__(self, cfg, input_shape): + super(TridentStandardROIHeads, self).__init__(cfg, input_shape) + + self.num_branch = cfg.MODEL.TRIDENT.NUM_BRANCH + self.trident_fast = cfg.MODEL.TRIDENT.TEST_BRANCH_IDX != -1 + + def forward(self, images, features, proposals, targets=None): + """ + See :class:`Res5ROIHeads.forward`. + """ + # Use 1 branch if using trident_fast during inference. + num_branch = self.num_branch if self.training or not self.trident_fast else 1 + # Duplicate targets for all branches in TridentNet. + all_targets = targets * num_branch if targets is not None else None + pred_instances, losses = super().forward(images, features, proposals, all_targets) + del images, all_targets, targets + + if self.training: + return pred_instances, losses + else: + pred_instances = merge_branch_instances( + pred_instances, + num_branch, + self.box_predictor.test_nms_thresh, + self.box_predictor.test_topk_per_image, + ) + + return pred_instances, {} diff --git a/preprocess/mhp_extension/detectron2/projects/TridentNet/tridentnet/trident_rpn.py b/preprocess/mhp_extension/detectron2/projects/TridentNet/tridentnet/trident_rpn.py new file mode 100644 index 0000000000000000000000000000000000000000..c30137f312232ccccd86182108949fbe34b97231 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/projects/TridentNet/tridentnet/trident_rpn.py @@ -0,0 +1,32 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import torch + +from detectron2.modeling import PROPOSAL_GENERATOR_REGISTRY +from detectron2.modeling.proposal_generator.rpn import RPN +from detectron2.structures import ImageList + + +@PROPOSAL_GENERATOR_REGISTRY.register() +class TridentRPN(RPN): + """ + Trident RPN subnetwork. + """ + + def __init__(self, cfg, input_shape): + super(TridentRPN, self).__init__(cfg, input_shape) + + self.num_branch = cfg.MODEL.TRIDENT.NUM_BRANCH + self.trident_fast = cfg.MODEL.TRIDENT.TEST_BRANCH_IDX != -1 + + def forward(self, images, features, gt_instances=None): + """ + See :class:`RPN.forward`. + """ + num_branch = self.num_branch if self.training or not self.trident_fast else 1 + # Duplicate images and gt_instances for all branches in TridentNet. + all_images = ImageList( + torch.cat([images.tensor] * num_branch), images.image_sizes * num_branch + ) + all_gt_instances = gt_instances * num_branch if gt_instances is not None else None + + return super(TridentRPN, self).forward(all_images, features, all_gt_instances) diff --git a/preprocess/mhp_extension/detectron2/setup.cfg b/preprocess/mhp_extension/detectron2/setup.cfg new file mode 100644 index 0000000000000000000000000000000000000000..b09bba99ca88d5cc900d1cc7fb0947d0443522be --- /dev/null +++ b/preprocess/mhp_extension/detectron2/setup.cfg @@ -0,0 +1,26 @@ +[isort] +line_length=100 +multi_line_output=3 +include_trailing_comma=True +known_standard_library=numpy,setuptools,mock +skip=./datasets,docs +skip_glob=*/__init__.py +known_myself=detectron2 +known_third_party=fvcore,matplotlib,cv2,torch,torchvision,PIL,pycocotools,yacs,termcolor,cityscapesscripts,tabulate,tqdm,scipy,lvis,psutil,pkg_resources,caffe2,onnx +no_lines_before=STDLIB,THIRDPARTY +sections=FUTURE,STDLIB,THIRDPARTY,myself,FIRSTPARTY,LOCALFOLDER +default_section=FIRSTPARTY + +[mypy] +python_version=3.6 +ignore_missing_imports = True +warn_unused_configs = True +disallow_untyped_defs = True +check_untyped_defs = True +warn_unused_ignores = True +warn_redundant_casts = True +show_column_numbers = True +follow_imports = silent +allow_redefinition = True +; Require all functions to be annotated +disallow_incomplete_defs = True diff --git a/preprocess/mhp_extension/detectron2/setup.py b/preprocess/mhp_extension/detectron2/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..a863fab1b7658a888df8623b57fe53673698cf60 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/setup.py @@ -0,0 +1,156 @@ +#!/usr/bin/env python +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +import glob +import os +import shutil +from os import path +from setuptools import find_packages, setup +from typing import List +import torch +from torch.utils.cpp_extension import CUDA_HOME, CppExtension, CUDAExtension + +torch_ver = [int(x) for x in torch.__version__.split(".")[:2]] +assert torch_ver >= [1, 4], "Requires PyTorch >= 1.4" + + +def get_version(): + init_py_path = path.join(path.abspath(path.dirname(__file__)), "detectron2", "__init__.py") + init_py = open(init_py_path, "r").readlines() + version_line = [l.strip() for l in init_py if l.startswith("__version__")][0] + version = version_line.split("=")[-1].strip().strip("'\"") + + # The following is used to build release packages. + # Users should never use it. + suffix = os.getenv("D2_VERSION_SUFFIX", "") + version = version + suffix + if os.getenv("BUILD_NIGHTLY", "0") == "1": + from datetime import datetime + + date_str = datetime.today().strftime("%y%m%d") + version = version + ".dev" + date_str + + new_init_py = [l for l in init_py if not l.startswith("__version__")] + new_init_py.append('__version__ = "{}"\n'.format(version)) + with open(init_py_path, "w") as f: + f.write("".join(new_init_py)) + return version + + +def get_extensions(): + this_dir = path.dirname(path.abspath(__file__)) + extensions_dir = path.join(this_dir, "detectron2", "layers", "csrc") + + main_source = path.join(extensions_dir, "vision.cpp") + sources = glob.glob(path.join(extensions_dir, "**", "*.cpp")) + source_cuda = glob.glob(path.join(extensions_dir, "**", "*.cu")) + glob.glob( + path.join(extensions_dir, "*.cu") + ) + + sources = [main_source] + sources + extension = CppExtension + + extra_compile_args = {"cxx": []} + define_macros = [] + + if ( + torch.cuda.is_available() and CUDA_HOME is not None and os.path.isdir(CUDA_HOME) + ) or os.getenv("FORCE_CUDA", "0") == "1": + extension = CUDAExtension + sources += source_cuda + define_macros += [("WITH_CUDA", None)] + extra_compile_args["nvcc"] = [ + "-DCUDA_HAS_FP16=1", + "-D__CUDA_NO_HALF_OPERATORS__", + "-D__CUDA_NO_HALF_CONVERSIONS__", + "-D__CUDA_NO_HALF2_OPERATORS__", + ] + + # It's better if pytorch can do this by default .. + CC = os.environ.get("CC", None) + if CC is not None: + extra_compile_args["nvcc"].append("-ccbin={}".format(CC)) + + include_dirs = [extensions_dir] + + ext_modules = [ + extension( + "detectron2._C", + sources, + include_dirs=include_dirs, + define_macros=define_macros, + extra_compile_args=extra_compile_args, + ) + ] + + return ext_modules + + +def get_model_zoo_configs() -> List[str]: + """ + Return a list of configs to include in package for model zoo. Copy over these configs inside + detectron2/model_zoo. + """ + + # Use absolute paths while symlinking. + source_configs_dir = path.join(path.dirname(path.realpath(__file__)), "configs") + destination = path.join( + path.dirname(path.realpath(__file__)), "detectron2", "model_zoo", "configs" + ) + # Symlink the config directory inside package to have a cleaner pip install. + + # Remove stale symlink/directory from a previous build. + if path.exists(source_configs_dir): + if path.islink(destination): + os.unlink(destination) + elif path.isdir(destination): + shutil.rmtree(destination) + + if not path.exists(destination): + try: + os.symlink(source_configs_dir, destination) + except OSError: + # Fall back to copying if symlink fails: ex. on Windows. + shutil.copytree(source_configs_dir, destination) + + config_paths = glob.glob("configs/**/*.yaml", recursive=True) + return config_paths + + +setup( + name="detectron2", + version=get_version(), + author="FAIR", + url="https://github.com/facebookresearch/detectron2", + description="Detectron2 is FAIR's next-generation research " + "platform for object detection and segmentation.", + packages=find_packages(exclude=("configs", "tests*")), + package_data={"detectron2.model_zoo": get_model_zoo_configs()}, + python_requires=">=3.6", + install_requires=[ + "termcolor>=1.1", + "Pillow", # you can also use pillow-simd for better performance + "yacs>=0.1.6", + "tabulate", + "cloudpickle", + "matplotlib", + "mock", + "tqdm>4.29.0", + "tensorboard", + "fvcore>=0.1.1", + "future", # used by caffe2 + "pydot", # used to save caffe2 SVGs + ], + extras_require={ + "all": ["shapely", "psutil"], + "dev": [ + "flake8==3.7.9", + "isort", + "black @ git+https://github.com/psf/black@673327449f86fce558adde153bb6cbe54bfebad2", + "flake8-bugbear", + "flake8-comprehensions", + ], + }, + ext_modules=get_extensions(), + cmdclass={"build_ext": torch.utils.cpp_extension.BuildExtension}, +) diff --git a/preprocess/mhp_extension/detectron2/tests/README.md b/preprocess/mhp_extension/detectron2/tests/README.md new file mode 100644 index 0000000000000000000000000000000000000000..f560384045ab4f6bc2beabef1170308fca117eb3 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/tests/README.md @@ -0,0 +1,9 @@ +## Unit Tests + +To run the unittests, do: +``` +cd detectron2 +python -m unittest discover -v -s ./tests +``` + +There are also end-to-end inference & training tests, in [dev/run_*_tests.sh](../dev). diff --git a/preprocess/mhp_extension/detectron2/tests/__init__.py b/preprocess/mhp_extension/detectron2/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..168f9979a4623806934b0ff1102ac166704e7dec --- /dev/null +++ b/preprocess/mhp_extension/detectron2/tests/__init__.py @@ -0,0 +1 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved diff --git a/preprocess/mhp_extension/detectron2/tests/layers/__init__.py b/preprocess/mhp_extension/detectron2/tests/layers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/preprocess/mhp_extension/detectron2/tests/layers/test_mask_ops.py b/preprocess/mhp_extension/detectron2/tests/layers/test_mask_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..d180627354b6b9d8e0776d70f78e91ee5e530210 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/tests/layers/test_mask_ops.py @@ -0,0 +1,190 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +import contextlib +import io +import numpy as np +import unittest +from collections import defaultdict +import torch +import tqdm +from fvcore.common.benchmark import benchmark +from fvcore.common.file_io import PathManager +from pycocotools.coco import COCO +from tabulate import tabulate +from torch.nn import functional as F + +from detectron2.data import MetadataCatalog +from detectron2.layers.mask_ops import ( + pad_masks, + paste_mask_in_image_old, + paste_masks_in_image, + scale_boxes, +) +from detectron2.structures import BitMasks, Boxes, BoxMode, PolygonMasks +from detectron2.structures.masks import polygons_to_bitmask + + +def iou_between_full_image_bit_masks(a, b): + intersect = (a & b).sum() + union = (a | b).sum() + return intersect / union + + +def rasterize_polygons_with_grid_sample(full_image_bit_mask, box, mask_size, threshold=0.5): + x0, y0, x1, y1 = box[0], box[1], box[2], box[3] + + img_h, img_w = full_image_bit_mask.shape + + mask_y = np.arange(0.0, mask_size) + 0.5 # mask y sample coords in [0.5, mask_size - 0.5] + mask_x = np.arange(0.0, mask_size) + 0.5 # mask x sample coords in [0.5, mask_size - 0.5] + mask_y = mask_y / mask_size * (y1 - y0) + y0 + mask_x = mask_x / mask_size * (x1 - x0) + x0 + + mask_x = (mask_x - 0.5) / (img_w - 1) * 2 + -1 + mask_y = (mask_y - 0.5) / (img_h - 1) * 2 + -1 + gy, gx = torch.meshgrid(torch.from_numpy(mask_y), torch.from_numpy(mask_x)) + ind = torch.stack([gx, gy], dim=-1).to(dtype=torch.float32) + + full_image_bit_mask = torch.from_numpy(full_image_bit_mask) + mask = F.grid_sample( + full_image_bit_mask[None, None, :, :].to(dtype=torch.float32), + ind[None, :, :, :], + align_corners=True, + ) + + return mask[0, 0] >= threshold + + +class TestMaskCropPaste(unittest.TestCase): + def setUp(self): + json_file = MetadataCatalog.get("coco_2017_val_100").json_file + if not PathManager.isfile(json_file): + raise unittest.SkipTest("{} not found".format(json_file)) + with contextlib.redirect_stdout(io.StringIO()): + json_file = PathManager.get_local_path(json_file) + self.coco = COCO(json_file) + + def test_crop_paste_consistency(self): + """ + rasterize_polygons_within_box (used in training) + and + paste_masks_in_image (used in inference) + should be inverse operations to each other. + + This function runs several implementation of the above two operations and prints + the reconstruction error. + """ + + anns = self.coco.loadAnns(self.coco.getAnnIds(iscrowd=False)) # avoid crowd annotations + + selected_anns = anns[:100] + + ious = [] + for ann in tqdm.tqdm(selected_anns): + results = self.process_annotation(ann) + ious.append([k[2] for k in results]) + + ious = np.array(ious) + mean_ious = ious.mean(axis=0) + table = [] + res_dic = defaultdict(dict) + for row, iou in zip(results, mean_ious): + table.append((row[0], row[1], iou)) + res_dic[row[0]][row[1]] = iou + print(tabulate(table, headers=["rasterize", "paste", "iou"], tablefmt="simple")) + # assert that the reconstruction is good: + self.assertTrue(res_dic["polygon"]["aligned"] > 0.94) + self.assertTrue(res_dic["roialign"]["aligned"] > 0.95) + + def process_annotation(self, ann, mask_side_len=28): + # Parse annotation data + img_info = self.coco.loadImgs(ids=[ann["image_id"]])[0] + height, width = img_info["height"], img_info["width"] + gt_polygons = [np.array(p, dtype=np.float64) for p in ann["segmentation"]] + gt_bbox = BoxMode.convert(ann["bbox"], BoxMode.XYWH_ABS, BoxMode.XYXY_ABS) + gt_bit_mask = polygons_to_bitmask(gt_polygons, height, width) + + # Run rasterize .. + torch_gt_bbox = torch.tensor(gt_bbox).to(dtype=torch.float32).reshape(-1, 4) + box_bitmasks = { + "polygon": PolygonMasks([gt_polygons]).crop_and_resize(torch_gt_bbox, mask_side_len)[0], + "gridsample": rasterize_polygons_with_grid_sample(gt_bit_mask, gt_bbox, mask_side_len), + "roialign": BitMasks(torch.from_numpy(gt_bit_mask[None, :, :])).crop_and_resize( + torch_gt_bbox, mask_side_len + )[0], + } + + # Run paste .. + results = defaultdict(dict) + for k, box_bitmask in box_bitmasks.items(): + padded_bitmask, scale = pad_masks(box_bitmask[None, :, :], 1) + scaled_boxes = scale_boxes(torch_gt_bbox, scale) + + r = results[k] + r["old"] = paste_mask_in_image_old( + padded_bitmask[0], scaled_boxes[0], height, width, threshold=0.5 + ) + r["aligned"] = paste_masks_in_image( + box_bitmask[None, :, :], Boxes(torch_gt_bbox), (height, width) + )[0] + + table = [] + for rasterize_method, r in results.items(): + for paste_method, mask in r.items(): + mask = np.asarray(mask) + iou = iou_between_full_image_bit_masks(gt_bit_mask.astype("uint8"), mask) + table.append((rasterize_method, paste_method, iou)) + return table + + def test_polygon_area(self): + # Draw polygon boxes + for d in [5.0, 10.0, 1000.0]: + polygon = PolygonMasks([[[0, 0, 0, d, d, d, d, 0]]]) + area = polygon.area()[0] + target = d ** 2 + self.assertEqual(area, target) + + # Draw polygon triangles + for d in [5.0, 10.0, 1000.0]: + polygon = PolygonMasks([[[0, 0, 0, d, d, d]]]) + area = polygon.area()[0] + target = d ** 2 / 2 + self.assertEqual(area, target) + + +def benchmark_paste(): + S = 800 + H, W = image_shape = (S, S) + N = 64 + torch.manual_seed(42) + masks = torch.rand(N, 28, 28) + + center = torch.rand(N, 2) * 600 + 100 + wh = torch.clamp(torch.randn(N, 2) * 40 + 200, min=50) + x0y0 = torch.clamp(center - wh * 0.5, min=0.0) + x1y1 = torch.clamp(center + wh * 0.5, max=S) + boxes = Boxes(torch.cat([x0y0, x1y1], axis=1)) + + def func(device, n=3): + m = masks.to(device=device) + b = boxes.to(device=device) + + def bench(): + for _ in range(n): + paste_masks_in_image(m, b, image_shape) + if device.type == "cuda": + torch.cuda.synchronize() + + return bench + + specs = [{"device": torch.device("cpu"), "n": 3}] + if torch.cuda.is_available(): + specs.append({"device": torch.device("cuda"), "n": 3}) + + benchmark(func, "paste_masks", specs, num_iters=10, warmup_iters=2) + + +if __name__ == "__main__": + benchmark_paste() + unittest.main() diff --git a/preprocess/mhp_extension/detectron2/tests/layers/test_nms_rotated.py b/preprocess/mhp_extension/detectron2/tests/layers/test_nms_rotated.py new file mode 100644 index 0000000000000000000000000000000000000000..94b346c524d2c372273dfe992df045962b9605cd --- /dev/null +++ b/preprocess/mhp_extension/detectron2/tests/layers/test_nms_rotated.py @@ -0,0 +1,188 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +from __future__ import absolute_import, division, print_function, unicode_literals +import numpy as np +import unittest +import torch +from torchvision import ops + +from detectron2.layers import batched_nms, batched_nms_rotated, nms_rotated + + +def nms_edit_distance(keep1, keep2): + """ + Compare the "keep" result of two nms call. + They are allowed to be different in terms of edit distance + due to floating point precision issues, e.g., + if a box happen to have an IoU of 0.5 with another box, + one implentation may choose to keep it while another may discard it. + """ + if torch.equal(keep1, keep2): + # they should be equal most of the time + return 0 + keep1, keep2 = tuple(keep1.cpu()), tuple(keep2.cpu()) + m, n = len(keep1), len(keep2) + + # edit distance with DP + f = [np.arange(n + 1), np.arange(n + 1)] + for i in range(m): + cur_row = i % 2 + other_row = (i + 1) % 2 + f[other_row][0] = i + 1 + for j in range(n): + f[other_row][j + 1] = ( + f[cur_row][j] + if keep1[i] == keep2[j] + else min(min(f[cur_row][j], f[cur_row][j + 1]), f[other_row][j]) + 1 + ) + return f[m % 2][n] + + +class TestNMSRotated(unittest.TestCase): + def reference_horizontal_nms(self, boxes, scores, iou_threshold): + """ + Args: + box_scores (N, 5): boxes in corner-form and probabilities. + (Note here 5 == 4 + 1, i.e., 4-dim horizontal box + 1-dim prob) + iou_threshold: intersection over union threshold. + Returns: + picked: a list of indexes of the kept boxes + """ + picked = [] + _, indexes = scores.sort(descending=True) + while len(indexes) > 0: + current = indexes[0] + picked.append(current.item()) + if len(indexes) == 1: + break + current_box = boxes[current, :] + indexes = indexes[1:] + rest_boxes = boxes[indexes, :] + iou = ops.box_iou(rest_boxes, current_box.unsqueeze(0)).squeeze(1) + indexes = indexes[iou <= iou_threshold] + + return torch.as_tensor(picked) + + def _create_tensors(self, N): + boxes = torch.rand(N, 4) * 100 + # Note: the implementation of this function in torchvision is: + # boxes[:, 2:] += torch.rand(N, 2) * 100 + # but it does not guarantee non-negative widths/heights constraints: + # boxes[:, 2] >= boxes[:, 0] and boxes[:, 3] >= boxes[:, 1]: + boxes[:, 2:] += boxes[:, :2] + scores = torch.rand(N) + return boxes, scores + + def test_batched_nms_rotated_0_degree_cpu(self): + N = 2000 + num_classes = 50 + boxes, scores = self._create_tensors(N) + idxs = torch.randint(0, num_classes, (N,)) + rotated_boxes = torch.zeros(N, 5) + rotated_boxes[:, 0] = (boxes[:, 0] + boxes[:, 2]) / 2.0 + rotated_boxes[:, 1] = (boxes[:, 1] + boxes[:, 3]) / 2.0 + rotated_boxes[:, 2] = boxes[:, 2] - boxes[:, 0] + rotated_boxes[:, 3] = boxes[:, 3] - boxes[:, 1] + err_msg = "Rotated NMS with 0 degree is incompatible with horizontal NMS for IoU={}" + for iou in [0.2, 0.5, 0.8]: + backup = boxes.clone() + keep_ref = batched_nms(boxes, scores, idxs, iou) + assert torch.allclose(boxes, backup), "boxes modified by batched_nms" + backup = rotated_boxes.clone() + keep = batched_nms_rotated(rotated_boxes, scores, idxs, iou) + assert torch.allclose( + rotated_boxes, backup + ), "rotated_boxes modified by batched_nms_rotated" + self.assertLessEqual(nms_edit_distance(keep, keep_ref), 1, err_msg.format(iou)) + + @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") + def test_batched_nms_rotated_0_degree_cuda(self): + N = 2000 + num_classes = 50 + boxes, scores = self._create_tensors(N) + idxs = torch.randint(0, num_classes, (N,)) + rotated_boxes = torch.zeros(N, 5) + rotated_boxes[:, 0] = (boxes[:, 0] + boxes[:, 2]) / 2.0 + rotated_boxes[:, 1] = (boxes[:, 1] + boxes[:, 3]) / 2.0 + rotated_boxes[:, 2] = boxes[:, 2] - boxes[:, 0] + rotated_boxes[:, 3] = boxes[:, 3] - boxes[:, 1] + err_msg = "Rotated NMS with 0 degree is incompatible with horizontal NMS for IoU={}" + for iou in [0.2, 0.5, 0.8]: + backup = boxes.clone() + keep_ref = batched_nms(boxes.cuda(), scores.cuda(), idxs, iou) + self.assertTrue(torch.allclose(boxes, backup), "boxes modified by batched_nms") + backup = rotated_boxes.clone() + keep = batched_nms_rotated(rotated_boxes.cuda(), scores.cuda(), idxs, iou) + self.assertTrue( + torch.allclose(rotated_boxes, backup), + "rotated_boxes modified by batched_nms_rotated", + ) + self.assertLessEqual(nms_edit_distance(keep, keep_ref), 1, err_msg.format(iou)) + + def test_nms_rotated_0_degree_cpu(self): + N = 1000 + boxes, scores = self._create_tensors(N) + rotated_boxes = torch.zeros(N, 5) + rotated_boxes[:, 0] = (boxes[:, 0] + boxes[:, 2]) / 2.0 + rotated_boxes[:, 1] = (boxes[:, 1] + boxes[:, 3]) / 2.0 + rotated_boxes[:, 2] = boxes[:, 2] - boxes[:, 0] + rotated_boxes[:, 3] = boxes[:, 3] - boxes[:, 1] + err_msg = "Rotated NMS incompatible between CPU and reference implementation for IoU={}" + for iou in [0.5]: + keep_ref = self.reference_horizontal_nms(boxes, scores, iou) + keep = nms_rotated(rotated_boxes, scores, iou) + self.assertLessEqual(nms_edit_distance(keep, keep_ref), 1, err_msg.format(iou)) + + def test_nms_rotated_90_degrees_cpu(self): + N = 1000 + boxes, scores = self._create_tensors(N) + rotated_boxes = torch.zeros(N, 5) + rotated_boxes[:, 0] = (boxes[:, 0] + boxes[:, 2]) / 2.0 + rotated_boxes[:, 1] = (boxes[:, 1] + boxes[:, 3]) / 2.0 + # Note for rotated_boxes[:, 2] and rotated_boxes[:, 3]: + # widths and heights are intentionally swapped here for 90 degrees case + # so that the reference horizontal nms could be used + rotated_boxes[:, 2] = boxes[:, 3] - boxes[:, 1] + rotated_boxes[:, 3] = boxes[:, 2] - boxes[:, 0] + + rotated_boxes[:, 4] = torch.ones(N) * 90 + err_msg = "Rotated NMS incompatible between CPU and reference implementation for IoU={}" + for iou in [0.2, 0.5, 0.8]: + keep_ref = self.reference_horizontal_nms(boxes, scores, iou) + keep = nms_rotated(rotated_boxes, scores, iou) + assert torch.equal(keep, keep_ref), err_msg.format(iou) + + def test_nms_rotated_180_degrees_cpu(self): + N = 1000 + boxes, scores = self._create_tensors(N) + rotated_boxes = torch.zeros(N, 5) + rotated_boxes[:, 0] = (boxes[:, 0] + boxes[:, 2]) / 2.0 + rotated_boxes[:, 1] = (boxes[:, 1] + boxes[:, 3]) / 2.0 + rotated_boxes[:, 2] = boxes[:, 2] - boxes[:, 0] + rotated_boxes[:, 3] = boxes[:, 3] - boxes[:, 1] + rotated_boxes[:, 4] = torch.ones(N) * 180 + err_msg = "Rotated NMS incompatible between CPU and reference implementation for IoU={}" + for iou in [0.2, 0.5, 0.8]: + keep_ref = self.reference_horizontal_nms(boxes, scores, iou) + keep = nms_rotated(rotated_boxes, scores, iou) + assert torch.equal(keep, keep_ref), err_msg.format(iou) + + @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") + def test_nms_rotated_0_degree_cuda(self): + N = 1000 + boxes, scores = self._create_tensors(N) + rotated_boxes = torch.zeros(N, 5) + rotated_boxes[:, 0] = (boxes[:, 0] + boxes[:, 2]) / 2.0 + rotated_boxes[:, 1] = (boxes[:, 1] + boxes[:, 3]) / 2.0 + rotated_boxes[:, 2] = boxes[:, 2] - boxes[:, 0] + rotated_boxes[:, 3] = boxes[:, 3] - boxes[:, 1] + err_msg = "Rotated NMS incompatible between CPU and CUDA for IoU={}" + + for iou in [0.2, 0.5, 0.8]: + r_cpu = nms_rotated(rotated_boxes, scores, iou) + r_cuda = nms_rotated(rotated_boxes.cuda(), scores.cuda(), iou) + + assert torch.equal(r_cpu, r_cuda.cpu()), err_msg.format(iou) + + +if __name__ == "__main__": + unittest.main() diff --git a/preprocess/mhp_extension/detectron2/tests/layers/test_roi_align.py b/preprocess/mhp_extension/detectron2/tests/layers/test_roi_align.py new file mode 100644 index 0000000000000000000000000000000000000000..633d7c29c41b94b8a57c15aff728f23a71b535d1 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/tests/layers/test_roi_align.py @@ -0,0 +1,152 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import numpy as np +import unittest +import cv2 +import torch +from fvcore.common.benchmark import benchmark + +from detectron2.layers.roi_align import ROIAlign + + +class ROIAlignTest(unittest.TestCase): + def test_forward_output(self): + input = np.arange(25).reshape(5, 5).astype("float32") + """ + 0 1 2 3 4 + 5 6 7 8 9 + 10 11 12 13 14 + 15 16 17 18 19 + 20 21 22 23 24 + """ + + output = self._simple_roialign(input, [1, 1, 3, 3], (4, 4), aligned=False) + output_correct = self._simple_roialign(input, [1, 1, 3, 3], (4, 4), aligned=True) + + # without correction: + old_results = [ + [7.5, 8, 8.5, 9], + [10, 10.5, 11, 11.5], + [12.5, 13, 13.5, 14], + [15, 15.5, 16, 16.5], + ] + + # with 0.5 correction: + correct_results = [ + [4.5, 5.0, 5.5, 6.0], + [7.0, 7.5, 8.0, 8.5], + [9.5, 10.0, 10.5, 11.0], + [12.0, 12.5, 13.0, 13.5], + ] + # This is an upsampled version of [[6, 7], [11, 12]] + + self.assertTrue(np.allclose(output.flatten(), np.asarray(old_results).flatten())) + self.assertTrue( + np.allclose(output_correct.flatten(), np.asarray(correct_results).flatten()) + ) + + # Also see similar issues in tensorflow at + # https://github.com/tensorflow/tensorflow/issues/26278 + + def test_resize(self): + H, W = 30, 30 + input = np.random.rand(H, W).astype("float32") * 100 + box = [10, 10, 20, 20] + output = self._simple_roialign(input, box, (5, 5), aligned=True) + + input2x = cv2.resize(input, (W // 2, H // 2), interpolation=cv2.INTER_LINEAR) + box2x = [x / 2 for x in box] + output2x = self._simple_roialign(input2x, box2x, (5, 5), aligned=True) + diff = np.abs(output2x - output) + self.assertTrue(diff.max() < 1e-4) + + def _simple_roialign(self, img, box, resolution, aligned=True): + """ + RoiAlign with scale 1.0 and 0 sample ratio. + """ + if isinstance(resolution, int): + resolution = (resolution, resolution) + op = ROIAlign(resolution, 1.0, 0, aligned=aligned) + input = torch.from_numpy(img[None, None, :, :].astype("float32")) + + rois = [0] + list(box) + rois = torch.from_numpy(np.asarray(rois)[None, :].astype("float32")) + output = op.forward(input, rois) + if torch.cuda.is_available(): + output_cuda = op.forward(input.cuda(), rois.cuda()).cpu() + self.assertTrue(torch.allclose(output, output_cuda)) + return output[0, 0] + + def _simple_roialign_with_grad(self, img, box, resolution, device): + if isinstance(resolution, int): + resolution = (resolution, resolution) + + op = ROIAlign(resolution, 1.0, 0, aligned=True) + input = torch.from_numpy(img[None, None, :, :].astype("float32")) + + rois = [0] + list(box) + rois = torch.from_numpy(np.asarray(rois)[None, :].astype("float32")) + input = input.to(device=device) + rois = rois.to(device=device) + input.requires_grad = True + output = op.forward(input, rois) + return input, output + + def test_empty_box(self): + img = np.random.rand(5, 5) + box = [3, 4, 5, 4] + o = self._simple_roialign(img, box, 7) + self.assertTrue(o.shape == (7, 7)) + self.assertTrue((o == 0).all()) + + for dev in ["cpu"] + ["cuda"] if torch.cuda.is_available() else []: + input, output = self._simple_roialign_with_grad(img, box, 7, torch.device(dev)) + output.sum().backward() + self.assertTrue(torch.allclose(input.grad, torch.zeros_like(input))) + + def test_empty_batch(self): + input = torch.zeros(0, 3, 10, 10, dtype=torch.float32) + rois = torch.zeros(0, 5, dtype=torch.float32) + op = ROIAlign((7, 7), 1.0, 0, aligned=True) + output = op.forward(input, rois) + self.assertTrue(output.shape == (0, 3, 7, 7)) + + +def benchmark_roi_align(): + from detectron2 import _C + + def random_boxes(mean_box, stdev, N, maxsize): + ret = torch.rand(N, 4) * stdev + torch.tensor(mean_box, dtype=torch.float) + ret.clamp_(min=0, max=maxsize) + return ret + + def func(N, C, H, W, nboxes_per_img): + input = torch.rand(N, C, H, W) + boxes = [] + batch_idx = [] + for k in range(N): + b = random_boxes([80, 80, 130, 130], 24, nboxes_per_img, H) + # try smaller boxes: + # b = random_boxes([100, 100, 110, 110], 4, nboxes_per_img, H) + boxes.append(b) + batch_idx.append(torch.zeros(nboxes_per_img, 1, dtype=torch.float32) + k) + boxes = torch.cat(boxes, axis=0) + batch_idx = torch.cat(batch_idx, axis=0) + boxes = torch.cat([batch_idx, boxes], axis=1) + + input = input.cuda() + boxes = boxes.cuda() + + def bench(): + _C.roi_align_forward(input, boxes, 1.0, 7, 7, 0, True) + torch.cuda.synchronize() + + return bench + + args = [dict(N=2, C=512, H=256, W=256, nboxes_per_img=500)] + benchmark(func, "cuda_roialign", args, num_iters=20, warmup_iters=1) + + +if __name__ == "__main__": + if torch.cuda.is_available(): + benchmark_roi_align() + unittest.main() diff --git a/preprocess/mhp_extension/detectron2/tests/layers/test_roi_align_rotated.py b/preprocess/mhp_extension/detectron2/tests/layers/test_roi_align_rotated.py new file mode 100644 index 0000000000000000000000000000000000000000..1915b59ff6774a54ee0e5dbfdbe0ecf89f2e2235 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/tests/layers/test_roi_align_rotated.py @@ -0,0 +1,176 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import logging +import unittest +import cv2 +import torch +from torch.autograd import Variable, gradcheck + +from detectron2.layers.roi_align import ROIAlign +from detectron2.layers.roi_align_rotated import ROIAlignRotated + +logger = logging.getLogger(__name__) + + +class ROIAlignRotatedTest(unittest.TestCase): + def _box_to_rotated_box(self, box, angle): + return [ + (box[0] + box[2]) / 2.0, + (box[1] + box[3]) / 2.0, + box[2] - box[0], + box[3] - box[1], + angle, + ] + + def _rot90(self, img, num): + num = num % 4 # note: -1 % 4 == 3 + for _ in range(num): + img = img.transpose(0, 1).flip(0) + return img + + def test_forward_output_0_90_180_270(self): + for i in range(4): + # i = 0, 1, 2, 3 corresponding to 0, 90, 180, 270 degrees + img = torch.arange(25, dtype=torch.float32).reshape(5, 5) + """ + 0 1 2 3 4 + 5 6 7 8 9 + 10 11 12 13 14 + 15 16 17 18 19 + 20 21 22 23 24 + """ + box = [1, 1, 3, 3] + rotated_box = self._box_to_rotated_box(box=box, angle=90 * i) + + result = self._simple_roi_align_rotated(img=img, box=rotated_box, resolution=(4, 4)) + + # Here's an explanation for 0 degree case: + # point 0 in the original input lies at [0.5, 0.5] + # (the center of bin [0, 1] x [0, 1]) + # point 1 in the original input lies at [1.5, 0.5], etc. + # since the resolution is (4, 4) that divides [1, 3] x [1, 3] + # into 4 x 4 equal bins, + # the top-left bin is [1, 1.5] x [1, 1.5], and its center + # (1.25, 1.25) lies at the 3/4 position + # between point 0 and point 1, point 5 and point 6, + # point 0 and point 5, point 1 and point 6, so it can be calculated as + # 0.25*(0*0.25+1*0.75)+(5*0.25+6*0.75)*0.75 = 4.5 + result_expected = torch.tensor( + [ + [4.5, 5.0, 5.5, 6.0], + [7.0, 7.5, 8.0, 8.5], + [9.5, 10.0, 10.5, 11.0], + [12.0, 12.5, 13.0, 13.5], + ] + ) + # This is also an upsampled version of [[6, 7], [11, 12]] + + # When the box is rotated by 90 degrees CCW, + # the result would be rotated by 90 degrees CW, thus it's -i here + result_expected = self._rot90(result_expected, -i) + + assert torch.allclose(result, result_expected) + + def test_resize(self): + H, W = 30, 30 + input = torch.rand(H, W) * 100 + box = [10, 10, 20, 20] + rotated_box = self._box_to_rotated_box(box, angle=0) + output = self._simple_roi_align_rotated(img=input, box=rotated_box, resolution=(5, 5)) + + input2x = cv2.resize(input.numpy(), (W // 2, H // 2), interpolation=cv2.INTER_LINEAR) + input2x = torch.from_numpy(input2x) + box2x = [x / 2 for x in box] + rotated_box2x = self._box_to_rotated_box(box2x, angle=0) + output2x = self._simple_roi_align_rotated(img=input2x, box=rotated_box2x, resolution=(5, 5)) + assert torch.allclose(output2x, output) + + def _simple_roi_align_rotated(self, img, box, resolution): + """ + RoiAlignRotated with scale 1.0 and 0 sample ratio. + """ + op = ROIAlignRotated(output_size=resolution, spatial_scale=1.0, sampling_ratio=0) + input = img[None, None, :, :] + + rois = [0] + list(box) + rois = torch.tensor(rois, dtype=torch.float32)[None, :] + result_cpu = op.forward(input, rois) + if torch.cuda.is_available(): + result_cuda = op.forward(input.cuda(), rois.cuda()) + assert torch.allclose(result_cpu, result_cuda.cpu()) + return result_cpu[0, 0] + + def test_empty_box(self): + img = torch.rand(5, 5) + out = self._simple_roi_align_rotated(img, [2, 3, 0, 0, 0], (7, 7)) + self.assertTrue((out == 0).all()) + + def test_roi_align_rotated_gradcheck_cpu(self): + dtype = torch.float64 + device = torch.device("cpu") + roi_align_rotated_op = ROIAlignRotated( + output_size=(5, 5), spatial_scale=0.5, sampling_ratio=1 + ).to(dtype=dtype, device=device) + x = torch.rand(1, 1, 10, 10, dtype=dtype, device=device, requires_grad=True) + # roi format is (batch index, x_center, y_center, width, height, angle) + rois = torch.tensor( + [[0, 4.5, 4.5, 9, 9, 0], [0, 2, 7, 4, 4, 0], [0, 7, 7, 4, 4, 0]], + dtype=dtype, + device=device, + ) + + def func(input): + return roi_align_rotated_op(input, rois) + + assert gradcheck(func, (x,)), "gradcheck failed for RoIAlignRotated CPU" + assert gradcheck(func, (x.transpose(2, 3),)), "gradcheck failed for RoIAlignRotated CPU" + + @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") + def test_roi_align_rotated_gradient_cuda(self): + """ + Compute gradients for ROIAlignRotated with multiple bounding boxes on the GPU, + and compare the result with ROIAlign + """ + # torch.manual_seed(123) + dtype = torch.float64 + device = torch.device("cuda") + pool_h, pool_w = (5, 5) + + roi_align = ROIAlign(output_size=(pool_h, pool_w), spatial_scale=1, sampling_ratio=2).to( + device=device + ) + + roi_align_rotated = ROIAlignRotated( + output_size=(pool_h, pool_w), spatial_scale=1, sampling_ratio=2 + ).to(device=device) + + x = torch.rand(1, 1, 10, 10, dtype=dtype, device=device, requires_grad=True) + # x_rotated = x.clone() won't work (will lead to grad_fun=CloneBackward)! + x_rotated = Variable(x.data.clone(), requires_grad=True) + + # roi_rotated format is (batch index, x_center, y_center, width, height, angle) + rois_rotated = torch.tensor( + [[0, 4.5, 4.5, 9, 9, 0], [0, 2, 7, 4, 4, 0], [0, 7, 7, 4, 4, 0]], + dtype=dtype, + device=device, + ) + + y_rotated = roi_align_rotated(x_rotated, rois_rotated) + s_rotated = y_rotated.sum() + s_rotated.backward() + + # roi format is (batch index, x1, y1, x2, y2) + rois = torch.tensor( + [[0, 0, 0, 9, 9], [0, 0, 5, 4, 9], [0, 5, 5, 9, 9]], dtype=dtype, device=device + ) + + y = roi_align(x, rois) + s = y.sum() + s.backward() + + assert torch.allclose( + x.grad, x_rotated.grad + ), "gradients for ROIAlign and ROIAlignRotated mismatch on CUDA" + + +if __name__ == "__main__": + unittest.main() diff --git a/preprocess/mhp_extension/detectron2/tests/modeling/__init__.py b/preprocess/mhp_extension/detectron2/tests/modeling/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/preprocess/mhp_extension/detectron2/tests/modeling/test_anchor_generator.py b/preprocess/mhp_extension/detectron2/tests/modeling/test_anchor_generator.py new file mode 100644 index 0000000000000000000000000000000000000000..bc14f0279ee682040082e51f96a41a267269d6ce --- /dev/null +++ b/preprocess/mhp_extension/detectron2/tests/modeling/test_anchor_generator.py @@ -0,0 +1,121 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import logging +import unittest +import torch + +from detectron2.config import get_cfg +from detectron2.layers import ShapeSpec +from detectron2.modeling.anchor_generator import DefaultAnchorGenerator, RotatedAnchorGenerator + +logger = logging.getLogger(__name__) + + +class TestAnchorGenerator(unittest.TestCase): + def test_default_anchor_generator(self): + cfg = get_cfg() + cfg.MODEL.ANCHOR_GENERATOR.SIZES = [[32, 64]] + cfg.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS = [[0.25, 1, 4]] + + anchor_generator = DefaultAnchorGenerator(cfg, [ShapeSpec(stride=4)]) + + # only the last two dimensions of features matter here + num_images = 2 + features = {"stage3": torch.rand(num_images, 96, 1, 2)} + anchors = anchor_generator([features["stage3"]]) + expected_anchor_tensor = torch.tensor( + [ + [-32.0, -8.0, 32.0, 8.0], + [-16.0, -16.0, 16.0, 16.0], + [-8.0, -32.0, 8.0, 32.0], + [-64.0, -16.0, 64.0, 16.0], + [-32.0, -32.0, 32.0, 32.0], + [-16.0, -64.0, 16.0, 64.0], + [-28.0, -8.0, 36.0, 8.0], # -28.0 == -32.0 + STRIDE (4) + [-12.0, -16.0, 20.0, 16.0], + [-4.0, -32.0, 12.0, 32.0], + [-60.0, -16.0, 68.0, 16.0], + [-28.0, -32.0, 36.0, 32.0], + [-12.0, -64.0, 20.0, 64.0], + ] + ) + + assert torch.allclose(anchors[0].tensor, expected_anchor_tensor) + + def test_default_anchor_generator_centered(self): + # test explicit args + anchor_generator = DefaultAnchorGenerator( + sizes=[32, 64], aspect_ratios=[0.25, 1, 4], strides=[4] + ) + + # only the last two dimensions of features matter here + num_images = 2 + features = {"stage3": torch.rand(num_images, 96, 1, 2)} + expected_anchor_tensor = torch.tensor( + [ + [-30.0, -6.0, 34.0, 10.0], + [-14.0, -14.0, 18.0, 18.0], + [-6.0, -30.0, 10.0, 34.0], + [-62.0, -14.0, 66.0, 18.0], + [-30.0, -30.0, 34.0, 34.0], + [-14.0, -62.0, 18.0, 66.0], + [-26.0, -6.0, 38.0, 10.0], + [-10.0, -14.0, 22.0, 18.0], + [-2.0, -30.0, 14.0, 34.0], + [-58.0, -14.0, 70.0, 18.0], + [-26.0, -30.0, 38.0, 34.0], + [-10.0, -62.0, 22.0, 66.0], + ] + ) + + anchors = anchor_generator([features["stage3"]]) + assert torch.allclose(anchors[0].tensor, expected_anchor_tensor) + + # doesn't work yet + # anchors = torch.jit.script(anchor_generator)([features["stage3"]]) + # assert torch.allclose(anchors[0].tensor, expected_anchor_tensor) + + def test_rrpn_anchor_generator(self): + cfg = get_cfg() + cfg.MODEL.ANCHOR_GENERATOR.SIZES = [[32, 64]] + cfg.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS = [[0.25, 1, 4]] + cfg.MODEL.ANCHOR_GENERATOR.ANGLES = [0, 45] # test single list[float] + anchor_generator = RotatedAnchorGenerator(cfg, [ShapeSpec(stride=4)]) + + # only the last two dimensions of features matter here + num_images = 2 + features = {"stage3": torch.rand(num_images, 96, 1, 2)} + anchors = anchor_generator([features["stage3"]]) + expected_anchor_tensor = torch.tensor( + [ + [0.0, 0.0, 64.0, 16.0, 0.0], + [0.0, 0.0, 64.0, 16.0, 45.0], + [0.0, 0.0, 32.0, 32.0, 0.0], + [0.0, 0.0, 32.0, 32.0, 45.0], + [0.0, 0.0, 16.0, 64.0, 0.0], + [0.0, 0.0, 16.0, 64.0, 45.0], + [0.0, 0.0, 128.0, 32.0, 0.0], + [0.0, 0.0, 128.0, 32.0, 45.0], + [0.0, 0.0, 64.0, 64.0, 0.0], + [0.0, 0.0, 64.0, 64.0, 45.0], + [0.0, 0.0, 32.0, 128.0, 0.0], + [0.0, 0.0, 32.0, 128.0, 45.0], + [4.0, 0.0, 64.0, 16.0, 0.0], # 4.0 == 0.0 + STRIDE (4) + [4.0, 0.0, 64.0, 16.0, 45.0], + [4.0, 0.0, 32.0, 32.0, 0.0], + [4.0, 0.0, 32.0, 32.0, 45.0], + [4.0, 0.0, 16.0, 64.0, 0.0], + [4.0, 0.0, 16.0, 64.0, 45.0], + [4.0, 0.0, 128.0, 32.0, 0.0], + [4.0, 0.0, 128.0, 32.0, 45.0], + [4.0, 0.0, 64.0, 64.0, 0.0], + [4.0, 0.0, 64.0, 64.0, 45.0], + [4.0, 0.0, 32.0, 128.0, 0.0], + [4.0, 0.0, 32.0, 128.0, 45.0], + ] + ) + + assert torch.allclose(anchors[0].tensor, expected_anchor_tensor) + + +if __name__ == "__main__": + unittest.main() diff --git a/preprocess/mhp_extension/detectron2/tests/modeling/test_box2box_transform.py b/preprocess/mhp_extension/detectron2/tests/modeling/test_box2box_transform.py new file mode 100644 index 0000000000000000000000000000000000000000..9d124d79fc0e17f268f6b5b50fcb8f8dfad59368 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/tests/modeling/test_box2box_transform.py @@ -0,0 +1,64 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import logging +import unittest +import torch + +from detectron2.modeling.box_regression import Box2BoxTransform, Box2BoxTransformRotated + +logger = logging.getLogger(__name__) + + +def random_boxes(mean_box, stdev, N): + return torch.rand(N, 4) * stdev + torch.tensor(mean_box, dtype=torch.float) + + +class TestBox2BoxTransform(unittest.TestCase): + def test_reconstruction(self): + weights = (5, 5, 10, 10) + b2b_tfm = Box2BoxTransform(weights=weights) + src_boxes = random_boxes([10, 10, 20, 20], 1, 10) + dst_boxes = random_boxes([10, 10, 20, 20], 1, 10) + + devices = [torch.device("cpu")] + if torch.cuda.is_available(): + devices.append(torch.device("cuda")) + for device in devices: + src_boxes = src_boxes.to(device=device) + dst_boxes = dst_boxes.to(device=device) + deltas = b2b_tfm.get_deltas(src_boxes, dst_boxes) + dst_boxes_reconstructed = b2b_tfm.apply_deltas(deltas, src_boxes) + assert torch.allclose(dst_boxes, dst_boxes_reconstructed) + + +def random_rotated_boxes(mean_box, std_length, std_angle, N): + return torch.cat( + [torch.rand(N, 4) * std_length, torch.rand(N, 1) * std_angle], dim=1 + ) + torch.tensor(mean_box, dtype=torch.float) + + +class TestBox2BoxTransformRotated(unittest.TestCase): + def test_reconstruction(self): + weights = (5, 5, 10, 10, 1) + b2b_transform = Box2BoxTransformRotated(weights=weights) + src_boxes = random_rotated_boxes([10, 10, 20, 20, -30], 5, 60.0, 10) + dst_boxes = random_rotated_boxes([10, 10, 20, 20, -30], 5, 60.0, 10) + + devices = [torch.device("cpu")] + if torch.cuda.is_available(): + devices.append(torch.device("cuda")) + for device in devices: + src_boxes = src_boxes.to(device=device) + dst_boxes = dst_boxes.to(device=device) + deltas = b2b_transform.get_deltas(src_boxes, dst_boxes) + dst_boxes_reconstructed = b2b_transform.apply_deltas(deltas, src_boxes) + assert torch.allclose(dst_boxes[:, :4], dst_boxes_reconstructed[:, :4], atol=1e-5) + # angle difference has to be normalized + assert torch.allclose( + (dst_boxes[:, 4] - dst_boxes_reconstructed[:, 4] + 180.0) % 360.0 - 180.0, + torch.zeros_like(dst_boxes[:, 4]), + atol=1e-4, + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/preprocess/mhp_extension/detectron2/tests/modeling/test_fast_rcnn.py b/preprocess/mhp_extension/detectron2/tests/modeling/test_fast_rcnn.py new file mode 100644 index 0000000000000000000000000000000000000000..70b64d3db497bac52e127d02a543b14d2e37e8eb --- /dev/null +++ b/preprocess/mhp_extension/detectron2/tests/modeling/test_fast_rcnn.py @@ -0,0 +1,106 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import logging +import unittest +import torch + +from detectron2.layers import ShapeSpec +from detectron2.modeling.box_regression import Box2BoxTransform, Box2BoxTransformRotated +from detectron2.modeling.roi_heads.fast_rcnn import FastRCNNOutputLayers +from detectron2.modeling.roi_heads.rotated_fast_rcnn import RotatedFastRCNNOutputLayers +from detectron2.structures import Boxes, Instances, RotatedBoxes +from detectron2.utils.events import EventStorage + +logger = logging.getLogger(__name__) + + +class FastRCNNTest(unittest.TestCase): + def test_fast_rcnn(self): + torch.manual_seed(132) + + box_head_output_size = 8 + + box_predictor = FastRCNNOutputLayers( + ShapeSpec(channels=box_head_output_size), + box2box_transform=Box2BoxTransform(weights=(10, 10, 5, 5)), + num_classes=5, + ) + feature_pooled = torch.rand(2, box_head_output_size) + predictions = box_predictor(feature_pooled) + + proposal_boxes = torch.tensor([[0.8, 1.1, 3.2, 2.8], [2.3, 2.5, 7, 8]], dtype=torch.float32) + gt_boxes = torch.tensor([[1, 1, 3, 3], [2, 2, 6, 6]], dtype=torch.float32) + proposal = Instances((10, 10)) + proposal.proposal_boxes = Boxes(proposal_boxes) + proposal.gt_boxes = Boxes(gt_boxes) + proposal.gt_classes = torch.tensor([1, 2]) + + with EventStorage(): # capture events in a new storage to discard them + losses = box_predictor.losses(predictions, [proposal]) + + expected_losses = { + "loss_cls": torch.tensor(1.7951188087), + "loss_box_reg": torch.tensor(4.0357131958), + } + for name in expected_losses.keys(): + assert torch.allclose(losses[name], expected_losses[name]) + + def test_fast_rcnn_empty_batch(self, device="cpu"): + box_predictor = FastRCNNOutputLayers( + ShapeSpec(channels=10), + box2box_transform=Box2BoxTransform(weights=(10, 10, 5, 5)), + num_classes=8, + ).to(device=device) + + logits = torch.randn(0, 100, requires_grad=True, device=device) + deltas = torch.randn(0, 4, requires_grad=True, device=device) + losses = box_predictor.losses([logits, deltas], []) + for value in losses.values(): + self.assertTrue(torch.allclose(value, torch.zeros_like(value))) + sum(losses.values()).backward() + self.assertTrue(logits.grad is not None) + self.assertTrue(deltas.grad is not None) + + predictions, _ = box_predictor.inference([logits, deltas], []) + self.assertEqual(len(predictions), 0) + + @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") + def test_fast_rcnn_empty_batch_cuda(self): + self.test_fast_rcnn_empty_batch(device=torch.device("cuda")) + + def test_fast_rcnn_rotated(self): + torch.manual_seed(132) + box_head_output_size = 8 + + box_predictor = RotatedFastRCNNOutputLayers( + ShapeSpec(channels=box_head_output_size), + box2box_transform=Box2BoxTransformRotated(weights=(10, 10, 5, 5, 1)), + num_classes=5, + ) + feature_pooled = torch.rand(2, box_head_output_size) + predictions = box_predictor(feature_pooled) + proposal_boxes = torch.tensor( + [[2, 1.95, 2.4, 1.7, 0], [4.65, 5.25, 4.7, 5.5, 0]], dtype=torch.float32 + ) + gt_boxes = torch.tensor([[2, 2, 2, 2, 0], [4, 4, 4, 4, 0]], dtype=torch.float32) + proposal = Instances((10, 10)) + proposal.proposal_boxes = RotatedBoxes(proposal_boxes) + proposal.gt_boxes = RotatedBoxes(gt_boxes) + proposal.gt_classes = torch.tensor([1, 2]) + + with EventStorage(): # capture events in a new storage to discard them + losses = box_predictor.losses(predictions, [proposal]) + + # Note: the expected losses are slightly different even if + # the boxes are essentially the same as in the FastRCNNOutput test, because + # bbox_pred in FastRCNNOutputLayers have different Linear layers/initialization + # between the two cases. + expected_losses = { + "loss_cls": torch.tensor(1.7920907736), + "loss_box_reg": torch.tensor(4.0410838127), + } + for name in expected_losses.keys(): + assert torch.allclose(losses[name], expected_losses[name]) + + +if __name__ == "__main__": + unittest.main() diff --git a/preprocess/mhp_extension/detectron2/tests/modeling/test_model_e2e.py b/preprocess/mhp_extension/detectron2/tests/modeling/test_model_e2e.py new file mode 100644 index 0000000000000000000000000000000000000000..95fe6a09fd15f877544392ddeccd9906025b0fdd --- /dev/null +++ b/preprocess/mhp_extension/detectron2/tests/modeling/test_model_e2e.py @@ -0,0 +1,154 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. + + +import unittest +import torch + +import detectron2.model_zoo as model_zoo +from detectron2.config import get_cfg +from detectron2.modeling import build_model +from detectron2.structures import BitMasks, Boxes, ImageList, Instances +from detectron2.utils.events import EventStorage + + +def get_model_zoo(config_path): + """ + Like model_zoo.get, but do not load any weights (even pretrained) + """ + cfg_file = model_zoo.get_config_file(config_path) + cfg = get_cfg() + cfg.merge_from_file(cfg_file) + if not torch.cuda.is_available(): + cfg.MODEL.DEVICE = "cpu" + return build_model(cfg) + + +def create_model_input(img, inst=None): + if inst is not None: + return {"image": img, "instances": inst} + else: + return {"image": img} + + +def get_empty_instance(h, w): + inst = Instances((h, w)) + inst.gt_boxes = Boxes(torch.rand(0, 4)) + inst.gt_classes = torch.tensor([]).to(dtype=torch.int64) + inst.gt_masks = BitMasks(torch.rand(0, h, w)) + return inst + + +def get_regular_bitmask_instances(h, w): + inst = Instances((h, w)) + inst.gt_boxes = Boxes(torch.rand(3, 4)) + inst.gt_boxes.tensor[:, 2:] += inst.gt_boxes.tensor[:, :2] + inst.gt_classes = torch.tensor([3, 4, 5]).to(dtype=torch.int64) + inst.gt_masks = BitMasks((torch.rand(3, h, w) > 0.5)) + return inst + + +class ModelE2ETest: + def setUp(self): + torch.manual_seed(43) + self.model = get_model_zoo(self.CONFIG_PATH) + + def _test_eval(self, input_sizes): + inputs = [create_model_input(torch.rand(3, s[0], s[1])) for s in input_sizes] + self.model.eval() + self.model(inputs) + + def _test_train(self, input_sizes, instances): + assert len(input_sizes) == len(instances) + inputs = [ + create_model_input(torch.rand(3, s[0], s[1]), inst) + for s, inst in zip(input_sizes, instances) + ] + self.model.train() + with EventStorage(): + losses = self.model(inputs) + sum(losses.values()).backward() + del losses + + def _inf_tensor(self, *shape): + return 1.0 / torch.zeros(*shape, device=self.model.device) + + def _nan_tensor(self, *shape): + return torch.zeros(*shape, device=self.model.device).fill_(float("nan")) + + def test_empty_data(self): + instances = [get_empty_instance(200, 250), get_empty_instance(200, 249)] + self._test_eval([(200, 250), (200, 249)]) + self._test_train([(200, 250), (200, 249)], instances) + + @unittest.skipIf(not torch.cuda.is_available(), "CUDA unavailable") + def test_eval_tocpu(self): + model = get_model_zoo(self.CONFIG_PATH).cpu() + model.eval() + input_sizes = [(200, 250), (200, 249)] + inputs = [create_model_input(torch.rand(3, s[0], s[1])) for s in input_sizes] + model(inputs) + + +class MaskRCNNE2ETest(ModelE2ETest, unittest.TestCase): + CONFIG_PATH = "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml" + + def test_half_empty_data(self): + instances = [get_empty_instance(200, 250), get_regular_bitmask_instances(200, 249)] + self._test_train([(200, 250), (200, 249)], instances) + + # This test is flaky because in some environment the output features are zero due to relu + # def test_rpn_inf_nan_data(self): + # self.model.eval() + # for tensor in [self._inf_tensor, self._nan_tensor]: + # images = ImageList(tensor(1, 3, 512, 512), [(510, 510)]) + # features = { + # "p2": tensor(1, 256, 256, 256), + # "p3": tensor(1, 256, 128, 128), + # "p4": tensor(1, 256, 64, 64), + # "p5": tensor(1, 256, 32, 32), + # "p6": tensor(1, 256, 16, 16), + # } + # props, _ = self.model.proposal_generator(images, features) + # self.assertEqual(len(props[0]), 0) + + def test_roiheads_inf_nan_data(self): + self.model.eval() + for tensor in [self._inf_tensor, self._nan_tensor]: + images = ImageList(tensor(1, 3, 512, 512), [(510, 510)]) + features = { + "p2": tensor(1, 256, 256, 256), + "p3": tensor(1, 256, 128, 128), + "p4": tensor(1, 256, 64, 64), + "p5": tensor(1, 256, 32, 32), + "p6": tensor(1, 256, 16, 16), + } + props = [Instances((510, 510))] + props[0].proposal_boxes = Boxes([[10, 10, 20, 20]]).to(device=self.model.device) + props[0].objectness_logits = torch.tensor([1.0]).reshape(1, 1) + det, _ = self.model.roi_heads(images, features, props) + self.assertEqual(len(det[0]), 0) + + +class RetinaNetE2ETest(ModelE2ETest, unittest.TestCase): + CONFIG_PATH = "COCO-Detection/retinanet_R_50_FPN_1x.yaml" + + def test_inf_nan_data(self): + self.model.eval() + self.model.score_threshold = -999999999 + for tensor in [self._inf_tensor, self._nan_tensor]: + images = ImageList(tensor(1, 3, 512, 512), [(510, 510)]) + features = [ + tensor(1, 256, 128, 128), + tensor(1, 256, 64, 64), + tensor(1, 256, 32, 32), + tensor(1, 256, 16, 16), + tensor(1, 256, 8, 8), + ] + anchors = self.model.anchor_generator(features) + box_cls, box_delta = self.model.head(features) + box_cls = [tensor(*k.shape) for k in box_cls] + box_delta = [tensor(*k.shape) for k in box_delta] + det = self.model.inference(box_cls, box_delta, anchors, images.image_sizes) + # all predictions (if any) are infinite or nan + if len(det[0]): + self.assertTrue(torch.isfinite(det[0].pred_boxes.tensor).sum() == 0) diff --git a/preprocess/mhp_extension/detectron2/tests/modeling/test_roi_heads.py b/preprocess/mhp_extension/detectron2/tests/modeling/test_roi_heads.py new file mode 100644 index 0000000000000000000000000000000000000000..5a0630353ca1c2fbb33d2dee7ddb922d57cad3cd --- /dev/null +++ b/preprocess/mhp_extension/detectron2/tests/modeling/test_roi_heads.py @@ -0,0 +1,108 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import logging +import unittest +import torch + +from detectron2.config import get_cfg +from detectron2.modeling.backbone import build_backbone +from detectron2.modeling.proposal_generator.build import build_proposal_generator +from detectron2.modeling.roi_heads import build_roi_heads +from detectron2.structures import Boxes, ImageList, Instances, RotatedBoxes +from detectron2.utils.events import EventStorage + +logger = logging.getLogger(__name__) + + +class ROIHeadsTest(unittest.TestCase): + def test_roi_heads(self): + torch.manual_seed(121) + cfg = get_cfg() + cfg.MODEL.ROI_HEADS.NAME = "StandardROIHeads" + cfg.MODEL.ROI_BOX_HEAD.NAME = "FastRCNNConvFCHead" + cfg.MODEL.ROI_BOX_HEAD.NUM_FC = 2 + cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE = "ROIAlignV2" + cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS = (10, 10, 5, 5) + backbone = build_backbone(cfg) + num_images = 2 + images_tensor = torch.rand(num_images, 20, 30) + image_sizes = [(10, 10), (20, 30)] + images = ImageList(images_tensor, image_sizes) + num_channels = 1024 + features = {"res4": torch.rand(num_images, num_channels, 1, 2)} + + image_shape = (15, 15) + gt_boxes0 = torch.tensor([[1, 1, 3, 3], [2, 2, 6, 6]], dtype=torch.float32) + gt_instance0 = Instances(image_shape) + gt_instance0.gt_boxes = Boxes(gt_boxes0) + gt_instance0.gt_classes = torch.tensor([2, 1]) + gt_boxes1 = torch.tensor([[1, 5, 2, 8], [7, 3, 10, 5]], dtype=torch.float32) + gt_instance1 = Instances(image_shape) + gt_instance1.gt_boxes = Boxes(gt_boxes1) + gt_instance1.gt_classes = torch.tensor([1, 2]) + gt_instances = [gt_instance0, gt_instance1] + + proposal_generator = build_proposal_generator(cfg, backbone.output_shape()) + roi_heads = build_roi_heads(cfg, backbone.output_shape()) + + with EventStorage(): # capture events in a new storage to discard them + proposals, proposal_losses = proposal_generator(images, features, gt_instances) + _, detector_losses = roi_heads(images, features, proposals, gt_instances) + + expected_losses = { + "loss_cls": torch.tensor(4.4236516953), + "loss_box_reg": torch.tensor(0.0091214813), + } + for name in expected_losses.keys(): + self.assertTrue(torch.allclose(detector_losses[name], expected_losses[name])) + + def test_rroi_heads(self): + torch.manual_seed(121) + cfg = get_cfg() + cfg.MODEL.PROPOSAL_GENERATOR.NAME = "RRPN" + cfg.MODEL.ANCHOR_GENERATOR.NAME = "RotatedAnchorGenerator" + cfg.MODEL.ROI_HEADS.NAME = "RROIHeads" + cfg.MODEL.ROI_BOX_HEAD.NAME = "FastRCNNConvFCHead" + cfg.MODEL.ROI_BOX_HEAD.NUM_FC = 2 + cfg.MODEL.RPN.BBOX_REG_WEIGHTS = (1, 1, 1, 1, 1) + cfg.MODEL.RPN.HEAD_NAME = "StandardRPNHead" + cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE = "ROIAlignRotated" + cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS = (10, 10, 5, 5, 1) + backbone = build_backbone(cfg) + num_images = 2 + images_tensor = torch.rand(num_images, 20, 30) + image_sizes = [(10, 10), (20, 30)] + images = ImageList(images_tensor, image_sizes) + num_channels = 1024 + features = {"res4": torch.rand(num_images, num_channels, 1, 2)} + + image_shape = (15, 15) + gt_boxes0 = torch.tensor([[2, 2, 2, 2, 30], [4, 4, 4, 4, 0]], dtype=torch.float32) + gt_instance0 = Instances(image_shape) + gt_instance0.gt_boxes = RotatedBoxes(gt_boxes0) + gt_instance0.gt_classes = torch.tensor([2, 1]) + gt_boxes1 = torch.tensor([[1.5, 5.5, 1, 3, 0], [8.5, 4, 3, 2, -50]], dtype=torch.float32) + gt_instance1 = Instances(image_shape) + gt_instance1.gt_boxes = RotatedBoxes(gt_boxes1) + gt_instance1.gt_classes = torch.tensor([1, 2]) + gt_instances = [gt_instance0, gt_instance1] + + proposal_generator = build_proposal_generator(cfg, backbone.output_shape()) + roi_heads = build_roi_heads(cfg, backbone.output_shape()) + + with EventStorage(): # capture events in a new storage to discard them + proposals, proposal_losses = proposal_generator(images, features, gt_instances) + _, detector_losses = roi_heads(images, features, proposals, gt_instances) + + expected_losses = { + "loss_cls": torch.tensor(4.381618499755859), + "loss_box_reg": torch.tensor(0.0011829272843897343), + } + for name in expected_losses.keys(): + err_msg = "detector_losses[{}] = {}, expected losses = {}".format( + name, detector_losses[name], expected_losses[name] + ) + self.assertTrue(torch.allclose(detector_losses[name], expected_losses[name]), err_msg) + + +if __name__ == "__main__": + unittest.main() diff --git a/preprocess/mhp_extension/detectron2/tests/modeling/test_roi_pooler.py b/preprocess/mhp_extension/detectron2/tests/modeling/test_roi_pooler.py new file mode 100644 index 0000000000000000000000000000000000000000..9aa3825c0196e4a6d89162e3d7c797e3d77b23bd --- /dev/null +++ b/preprocess/mhp_extension/detectron2/tests/modeling/test_roi_pooler.py @@ -0,0 +1,85 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import logging +import unittest +import torch + +from detectron2.modeling.poolers import ROIPooler +from detectron2.structures import Boxes, RotatedBoxes + +logger = logging.getLogger(__name__) + + +class TestROIPooler(unittest.TestCase): + def _rand_boxes(self, num_boxes, x_max, y_max): + coords = torch.rand(num_boxes, 4) + coords[:, 0] *= x_max + coords[:, 1] *= y_max + coords[:, 2] *= x_max + coords[:, 3] *= y_max + boxes = torch.zeros(num_boxes, 4) + boxes[:, 0] = torch.min(coords[:, 0], coords[:, 2]) + boxes[:, 1] = torch.min(coords[:, 1], coords[:, 3]) + boxes[:, 2] = torch.max(coords[:, 0], coords[:, 2]) + boxes[:, 3] = torch.max(coords[:, 1], coords[:, 3]) + return boxes + + def _test_roialignv2_roialignrotated_match(self, device): + pooler_resolution = 14 + canonical_level = 4 + canonical_scale_factor = 2 ** canonical_level + pooler_scales = (1.0 / canonical_scale_factor,) + sampling_ratio = 0 + + N, C, H, W = 2, 4, 10, 8 + N_rois = 10 + std = 11 + mean = 0 + feature = (torch.rand(N, C, H, W) - 0.5) * 2 * std + mean + + features = [feature.to(device)] + + rois = [] + rois_rotated = [] + for _ in range(N): + boxes = self._rand_boxes( + num_boxes=N_rois, x_max=W * canonical_scale_factor, y_max=H * canonical_scale_factor + ) + + rotated_boxes = torch.zeros(N_rois, 5) + rotated_boxes[:, 0] = (boxes[:, 0] + boxes[:, 2]) / 2.0 + rotated_boxes[:, 1] = (boxes[:, 1] + boxes[:, 3]) / 2.0 + rotated_boxes[:, 2] = boxes[:, 2] - boxes[:, 0] + rotated_boxes[:, 3] = boxes[:, 3] - boxes[:, 1] + rois.append(Boxes(boxes).to(device)) + rois_rotated.append(RotatedBoxes(rotated_boxes).to(device)) + + roialignv2_pooler = ROIPooler( + output_size=pooler_resolution, + scales=pooler_scales, + sampling_ratio=sampling_ratio, + pooler_type="ROIAlignV2", + ) + + roialignv2_out = roialignv2_pooler(features, rois) + + roialignrotated_pooler = ROIPooler( + output_size=pooler_resolution, + scales=pooler_scales, + sampling_ratio=sampling_ratio, + pooler_type="ROIAlignRotated", + ) + + roialignrotated_out = roialignrotated_pooler(features, rois_rotated) + + self.assertTrue(torch.allclose(roialignv2_out, roialignrotated_out, atol=1e-4)) + + def test_roialignv2_roialignrotated_match_cpu(self): + self._test_roialignv2_roialignrotated_match(device="cpu") + + @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") + def test_roialignv2_roialignrotated_match_cuda(self): + self._test_roialignv2_roialignrotated_match(device="cuda") + + +if __name__ == "__main__": + unittest.main() diff --git a/preprocess/mhp_extension/detectron2/tests/modeling/test_rpn.py b/preprocess/mhp_extension/detectron2/tests/modeling/test_rpn.py new file mode 100644 index 0000000000000000000000000000000000000000..967d2102b85f2d66e3f0b32b31805c4ac01afa0c --- /dev/null +++ b/preprocess/mhp_extension/detectron2/tests/modeling/test_rpn.py @@ -0,0 +1,234 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import logging +import unittest +import torch + +from detectron2.config import get_cfg +from detectron2.modeling.backbone import build_backbone +from detectron2.modeling.proposal_generator.build import build_proposal_generator +from detectron2.modeling.proposal_generator.rpn_outputs import find_top_rpn_proposals +from detectron2.structures import Boxes, ImageList, Instances, RotatedBoxes +from detectron2.utils.events import EventStorage + +logger = logging.getLogger(__name__) + + +class RPNTest(unittest.TestCase): + def test_rpn(self): + torch.manual_seed(121) + cfg = get_cfg() + cfg.MODEL.PROPOSAL_GENERATOR.NAME = "RPN" + cfg.MODEL.ANCHOR_GENERATOR.NAME = "DefaultAnchorGenerator" + cfg.MODEL.RPN.BBOX_REG_WEIGHTS = (1, 1, 1, 1) + backbone = build_backbone(cfg) + proposal_generator = build_proposal_generator(cfg, backbone.output_shape()) + num_images = 2 + images_tensor = torch.rand(num_images, 20, 30) + image_sizes = [(10, 10), (20, 30)] + images = ImageList(images_tensor, image_sizes) + image_shape = (15, 15) + num_channels = 1024 + features = {"res4": torch.rand(num_images, num_channels, 1, 2)} + gt_boxes = torch.tensor([[1, 1, 3, 3], [2, 2, 6, 6]], dtype=torch.float32) + gt_instances = Instances(image_shape) + gt_instances.gt_boxes = Boxes(gt_boxes) + with EventStorage(): # capture events in a new storage to discard them + proposals, proposal_losses = proposal_generator( + images, features, [gt_instances[0], gt_instances[1]] + ) + + expected_losses = { + "loss_rpn_cls": torch.tensor(0.0804563984), + "loss_rpn_loc": torch.tensor(0.0990132466), + } + for name in expected_losses.keys(): + err_msg = "proposal_losses[{}] = {}, expected losses = {}".format( + name, proposal_losses[name], expected_losses[name] + ) + self.assertTrue(torch.allclose(proposal_losses[name], expected_losses[name]), err_msg) + + expected_proposal_boxes = [ + Boxes(torch.tensor([[0, 0, 10, 10], [7.3365392685, 0, 10, 10]])), + Boxes( + torch.tensor( + [ + [0, 0, 30, 20], + [0, 0, 16.7862777710, 13.1362524033], + [0, 0, 30, 13.3173446655], + [0, 0, 10.8602609634, 20], + [7.7165775299, 0, 27.3875980377, 20], + ] + ) + ), + ] + + expected_objectness_logits = [ + torch.tensor([0.1225359365, -0.0133192837]), + torch.tensor([0.1415634006, 0.0989848152, 0.0565387346, -0.0072308783, -0.0428492837]), + ] + + for proposal, expected_proposal_box, im_size, expected_objectness_logit in zip( + proposals, expected_proposal_boxes, image_sizes, expected_objectness_logits + ): + self.assertEqual(len(proposal), len(expected_proposal_box)) + self.assertEqual(proposal.image_size, im_size) + self.assertTrue( + torch.allclose(proposal.proposal_boxes.tensor, expected_proposal_box.tensor) + ) + self.assertTrue(torch.allclose(proposal.objectness_logits, expected_objectness_logit)) + + def test_rrpn(self): + torch.manual_seed(121) + cfg = get_cfg() + cfg.MODEL.PROPOSAL_GENERATOR.NAME = "RRPN" + cfg.MODEL.ANCHOR_GENERATOR.NAME = "RotatedAnchorGenerator" + cfg.MODEL.ANCHOR_GENERATOR.SIZES = [[32, 64]] + cfg.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS = [[0.25, 1]] + cfg.MODEL.ANCHOR_GENERATOR.ANGLES = [[0, 60]] + cfg.MODEL.RPN.BBOX_REG_WEIGHTS = (1, 1, 1, 1, 1) + cfg.MODEL.RPN.HEAD_NAME = "StandardRPNHead" + backbone = build_backbone(cfg) + proposal_generator = build_proposal_generator(cfg, backbone.output_shape()) + num_images = 2 + images_tensor = torch.rand(num_images, 20, 30) + image_sizes = [(10, 10), (20, 30)] + images = ImageList(images_tensor, image_sizes) + image_shape = (15, 15) + num_channels = 1024 + features = {"res4": torch.rand(num_images, num_channels, 1, 2)} + gt_boxes = torch.tensor([[2, 2, 2, 2, 0], [4, 4, 4, 4, 0]], dtype=torch.float32) + gt_instances = Instances(image_shape) + gt_instances.gt_boxes = RotatedBoxes(gt_boxes) + with EventStorage(): # capture events in a new storage to discard them + proposals, proposal_losses = proposal_generator( + images, features, [gt_instances[0], gt_instances[1]] + ) + + expected_losses = { + "loss_rpn_cls": torch.tensor(0.043263837695121765), + "loss_rpn_loc": torch.tensor(0.14432406425476074), + } + for name in expected_losses.keys(): + err_msg = "proposal_losses[{}] = {}, expected losses = {}".format( + name, proposal_losses[name], expected_losses[name] + ) + self.assertTrue(torch.allclose(proposal_losses[name], expected_losses[name]), err_msg) + + expected_proposal_boxes = [ + RotatedBoxes( + torch.tensor( + [ + [0.60189795, 1.24095452, 61.98131943, 18.03621292, -4.07244873], + [15.64940453, 1.69624567, 59.59749603, 16.34339333, 2.62692475], + [-3.02982378, -2.69752932, 67.90952301, 59.62455750, 59.97010040], + [16.71863365, 1.98309708, 35.61507797, 32.81484985, 62.92267227], + [0.49432933, -7.92979717, 67.77606201, 62.93098450, -1.85656738], + [8.00880814, 1.36017394, 121.81007385, 32.74150467, 50.44297409], + [16.44299889, -4.82221127, 63.39775848, 61.22503662, 54.12270737], + [5.00000000, 5.00000000, 10.00000000, 10.00000000, -0.76943970], + [17.64130402, -0.98095351, 61.40377808, 16.28918839, 55.53118134], + [0.13016054, 4.60568953, 35.80157471, 32.30180359, 62.52872086], + [-4.26460743, 0.39604485, 124.30079651, 31.84611320, -1.58203125], + [7.52815342, -0.91636634, 62.39784622, 15.45565224, 60.79549789], + ] + ) + ), + RotatedBoxes( + torch.tensor( + [ + [0.07734215, 0.81635046, 65.33510590, 17.34688377, -1.51821899], + [-3.41833067, -3.11320257, 64.17595673, 60.55617905, 58.27033234], + [20.67383385, -6.16561556, 63.60531998, 62.52315903, 54.85546494], + [15.00000000, 10.00000000, 30.00000000, 20.00000000, -0.18218994], + [9.22646523, -6.84775209, 62.09895706, 65.46472931, -2.74307251], + [15.00000000, 4.93451595, 30.00000000, 9.86903191, -0.60272217], + [8.88342094, 2.65560246, 120.95362854, 32.45022202, 55.75970078], + [16.39088631, 2.33887148, 34.78761292, 35.61492920, 60.81977463], + [9.78298569, 10.00000000, 19.56597137, 20.00000000, -0.86660767], + [1.28576660, 5.49873352, 34.93610382, 33.22600174, 60.51599884], + [17.58912468, -1.63270092, 62.96052551, 16.45713997, 52.91245270], + [5.64749718, -1.90428460, 62.37649155, 16.19474792, 61.09543991], + [0.82255805, 2.34931135, 118.83985901, 32.83671188, 56.50753784], + [-5.33874989, 1.64404404, 125.28501892, 33.35424042, -2.80731201], + ] + ) + ), + ] + + expected_objectness_logits = [ + torch.tensor( + [ + 0.10111768, + 0.09112845, + 0.08466332, + 0.07589971, + 0.06650183, + 0.06350251, + 0.04299347, + 0.01864817, + 0.00986163, + 0.00078543, + -0.04573630, + -0.04799230, + ] + ), + torch.tensor( + [ + 0.11373727, + 0.09377633, + 0.05281663, + 0.05143715, + 0.04040275, + 0.03250912, + 0.01307789, + 0.01177734, + 0.00038105, + -0.00540255, + -0.01194804, + -0.01461012, + -0.03061717, + -0.03599222, + ] + ), + ] + + torch.set_printoptions(precision=8, sci_mode=False) + + for proposal, expected_proposal_box, im_size, expected_objectness_logit in zip( + proposals, expected_proposal_boxes, image_sizes, expected_objectness_logits + ): + self.assertEqual(len(proposal), len(expected_proposal_box)) + self.assertEqual(proposal.image_size, im_size) + # It seems that there's some randomness in the result across different machines: + # This test can be run on a local machine for 100 times with exactly the same result, + # However, a different machine might produce slightly different results, + # thus the atol here. + err_msg = "computed proposal boxes = {}, expected {}".format( + proposal.proposal_boxes.tensor, expected_proposal_box.tensor + ) + self.assertTrue( + torch.allclose( + proposal.proposal_boxes.tensor, expected_proposal_box.tensor, atol=1e-5 + ), + err_msg, + ) + + err_msg = "computed objectness logits = {}, expected {}".format( + proposal.objectness_logits, expected_objectness_logit + ) + self.assertTrue( + torch.allclose(proposal.objectness_logits, expected_objectness_logit, atol=1e-5), + err_msg, + ) + + def test_rpn_proposals_inf(self): + N, Hi, Wi, A = 3, 3, 3, 3 + proposals = [torch.rand(N, Hi * Wi * A, 4)] + pred_logits = [torch.rand(N, Hi * Wi * A)] + pred_logits[0][1][3:5].fill_(float("inf")) + images = ImageList.from_tensors([torch.rand(3, 10, 10)] * 3) + find_top_rpn_proposals(proposals, pred_logits, images, 0.5, 1000, 1000, 0, False) + + +if __name__ == "__main__": + unittest.main() diff --git a/preprocess/mhp_extension/detectron2/tests/structures/__init__.py b/preprocess/mhp_extension/detectron2/tests/structures/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/preprocess/mhp_extension/detectron2/tests/structures/test_boxes.py b/preprocess/mhp_extension/detectron2/tests/structures/test_boxes.py new file mode 100644 index 0000000000000000000000000000000000000000..4d33c3bf9b7471c7e4382bc9e66c26e1fb60e29f --- /dev/null +++ b/preprocess/mhp_extension/detectron2/tests/structures/test_boxes.py @@ -0,0 +1,182 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import json +import math +import numpy as np +import unittest +import torch + +from detectron2.structures import Boxes, BoxMode, pairwise_iou + + +class TestBoxMode(unittest.TestCase): + def _convert_xy_to_wh(self, x): + return BoxMode.convert(x, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS) + + def _convert_xywha_to_xyxy(self, x): + return BoxMode.convert(x, BoxMode.XYWHA_ABS, BoxMode.XYXY_ABS) + + def _convert_xywh_to_xywha(self, x): + return BoxMode.convert(x, BoxMode.XYWH_ABS, BoxMode.XYWHA_ABS) + + def test_box_convert_list(self): + for tp in [list, tuple]: + box = tp([5.0, 5.0, 10.0, 10.0]) + output = self._convert_xy_to_wh(box) + self.assertIsInstance(output, tp) + self.assertIsInstance(output[0], float) + self.assertEqual(output, tp([5.0, 5.0, 5.0, 5.0])) + + with self.assertRaises(Exception): + self._convert_xy_to_wh([box]) + + def test_box_convert_array(self): + box = np.asarray([[5, 5, 10, 10], [1, 1, 2, 3]]) + output = self._convert_xy_to_wh(box) + self.assertEqual(output.dtype, box.dtype) + self.assertEqual(output.shape, box.shape) + self.assertTrue((output[0] == [5, 5, 5, 5]).all()) + self.assertTrue((output[1] == [1, 1, 1, 2]).all()) + + def test_box_convert_cpu_tensor(self): + box = torch.tensor([[5, 5, 10, 10], [1, 1, 2, 3]]) + output = self._convert_xy_to_wh(box) + self.assertEqual(output.dtype, box.dtype) + self.assertEqual(output.shape, box.shape) + output = output.numpy() + self.assertTrue((output[0] == [5, 5, 5, 5]).all()) + self.assertTrue((output[1] == [1, 1, 1, 2]).all()) + + @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") + def test_box_convert_cuda_tensor(self): + box = torch.tensor([[5, 5, 10, 10], [1, 1, 2, 3]]).cuda() + output = self._convert_xy_to_wh(box) + self.assertEqual(output.dtype, box.dtype) + self.assertEqual(output.shape, box.shape) + self.assertEqual(output.device, box.device) + output = output.cpu().numpy() + self.assertTrue((output[0] == [5, 5, 5, 5]).all()) + self.assertTrue((output[1] == [1, 1, 1, 2]).all()) + + def test_box_convert_xywha_to_xyxy_list(self): + for tp in [list, tuple]: + box = tp([50, 50, 30, 20, 0]) + output = self._convert_xywha_to_xyxy(box) + self.assertIsInstance(output, tp) + self.assertEqual(output, tp([35, 40, 65, 60])) + + with self.assertRaises(Exception): + self._convert_xywha_to_xyxy([box]) + + def test_box_convert_xywha_to_xyxy_array(self): + for dtype in [np.float64, np.float32]: + box = np.asarray( + [ + [50, 50, 30, 20, 0], + [50, 50, 30, 20, 90], + [1, 1, math.sqrt(2), math.sqrt(2), -45], + ], + dtype=dtype, + ) + output = self._convert_xywha_to_xyxy(box) + self.assertEqual(output.dtype, box.dtype) + expected = np.asarray([[35, 40, 65, 60], [40, 35, 60, 65], [0, 0, 2, 2]], dtype=dtype) + self.assertTrue(np.allclose(output, expected, atol=1e-6), "output={}".format(output)) + + def test_box_convert_xywha_to_xyxy_tensor(self): + for dtype in [torch.float32, torch.float64]: + box = torch.tensor( + [ + [50, 50, 30, 20, 0], + [50, 50, 30, 20, 90], + [1, 1, math.sqrt(2), math.sqrt(2), -45], + ], + dtype=dtype, + ) + output = self._convert_xywha_to_xyxy(box) + self.assertEqual(output.dtype, box.dtype) + expected = torch.tensor([[35, 40, 65, 60], [40, 35, 60, 65], [0, 0, 2, 2]], dtype=dtype) + + self.assertTrue(torch.allclose(output, expected, atol=1e-6), "output={}".format(output)) + + def test_box_convert_xywh_to_xywha_list(self): + for tp in [list, tuple]: + box = tp([50, 50, 30, 20]) + output = self._convert_xywh_to_xywha(box) + self.assertIsInstance(output, tp) + self.assertEqual(output, tp([65, 60, 30, 20, 0])) + + with self.assertRaises(Exception): + self._convert_xywh_to_xywha([box]) + + def test_box_convert_xywh_to_xywha_array(self): + for dtype in [np.float64, np.float32]: + box = np.asarray([[30, 40, 70, 60], [30, 40, 60, 70], [-1, -1, 2, 2]], dtype=dtype) + output = self._convert_xywh_to_xywha(box) + self.assertEqual(output.dtype, box.dtype) + expected = np.asarray( + [[65, 70, 70, 60, 0], [60, 75, 60, 70, 0], [0, 0, 2, 2, 0]], dtype=dtype + ) + self.assertTrue(np.allclose(output, expected, atol=1e-6), "output={}".format(output)) + + def test_box_convert_xywh_to_xywha_tensor(self): + for dtype in [torch.float32, torch.float64]: + box = torch.tensor([[30, 40, 70, 60], [30, 40, 60, 70], [-1, -1, 2, 2]], dtype=dtype) + output = self._convert_xywh_to_xywha(box) + self.assertEqual(output.dtype, box.dtype) + expected = torch.tensor( + [[65, 70, 70, 60, 0], [60, 75, 60, 70, 0], [0, 0, 2, 2, 0]], dtype=dtype + ) + + self.assertTrue(torch.allclose(output, expected, atol=1e-6), "output={}".format(output)) + + def test_json_serializable(self): + payload = {"box_mode": BoxMode.XYWH_REL} + try: + json.dumps(payload) + except Exception: + self.fail("JSON serialization failed") + + def test_json_deserializable(self): + payload = '{"box_mode": 2}' + obj = json.loads(payload) + try: + obj["box_mode"] = BoxMode(obj["box_mode"]) + except Exception: + self.fail("JSON deserialization failed") + + +class TestBoxIOU(unittest.TestCase): + def test_pairwise_iou(self): + boxes1 = torch.tensor([[0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0]]) + + boxes2 = torch.tensor( + [ + [0.0, 0.0, 1.0, 1.0], + [0.0, 0.0, 0.5, 1.0], + [0.0, 0.0, 1.0, 0.5], + [0.0, 0.0, 0.5, 0.5], + [0.5, 0.5, 1.0, 1.0], + [0.5, 0.5, 1.5, 1.5], + ] + ) + + expected_ious = torch.tensor( + [ + [1.0, 0.5, 0.5, 0.25, 0.25, 0.25 / (2 - 0.25)], + [1.0, 0.5, 0.5, 0.25, 0.25, 0.25 / (2 - 0.25)], + ] + ) + + ious = pairwise_iou(Boxes(boxes1), Boxes(boxes2)) + + self.assertTrue(torch.allclose(ious, expected_ious)) + + +class TestBoxes(unittest.TestCase): + def test_empty_cat(self): + x = Boxes.cat([]) + self.assertTrue(x.tensor.shape, (0, 4)) + + +if __name__ == "__main__": + unittest.main() diff --git a/preprocess/mhp_extension/detectron2/tests/structures/test_imagelist.py b/preprocess/mhp_extension/detectron2/tests/structures/test_imagelist.py new file mode 100644 index 0000000000000000000000000000000000000000..abeb35569ddc34a618735f4989dfbfae23d47bc1 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/tests/structures/test_imagelist.py @@ -0,0 +1,38 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +import unittest +from typing import Sequence +import torch + +from detectron2.structures import ImageList + + +class TestImageList(unittest.TestCase): + def test_imagelist_padding_shape(self): + class TensorToImageList(torch.nn.Module): + def forward(self, tensors: Sequence[torch.Tensor]): + return ImageList.from_tensors(tensors, 4).tensor + + func = torch.jit.trace( + TensorToImageList(), ([torch.ones((3, 10, 10), dtype=torch.float32)],) + ) + ret = func([torch.ones((3, 15, 20), dtype=torch.float32)]) + self.assertEqual(list(ret.shape), [1, 3, 16, 20], str(ret.shape)) + + func = torch.jit.trace( + TensorToImageList(), + ( + [ + torch.ones((3, 16, 10), dtype=torch.float32), + torch.ones((3, 13, 11), dtype=torch.float32), + ], + ), + ) + ret = func( + [ + torch.ones((3, 25, 20), dtype=torch.float32), + torch.ones((3, 10, 10), dtype=torch.float32), + ] + ) + # does not support calling with different #images + self.assertEqual(list(ret.shape), [2, 3, 28, 20], str(ret.shape)) diff --git a/preprocess/mhp_extension/detectron2/tests/structures/test_instances.py b/preprocess/mhp_extension/detectron2/tests/structures/test_instances.py new file mode 100644 index 0000000000000000000000000000000000000000..79c5249217633d3f144d02f14d11f32d1d4be7c9 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/tests/structures/test_instances.py @@ -0,0 +1,25 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import unittest +import torch + +from detectron2.structures import Instances + + +class TestInstancesIndexing(unittest.TestCase): + def test_int_indexing(self): + attr1 = torch.tensor([[0.0, 0.0, 1.0], [0.0, 0.0, 0.5], [0.0, 0.0, 1.0], [0.0, 0.5, 0.5]]) + attr2 = torch.tensor([0.1, 0.2, 0.3, 0.4]) + instances = Instances((100, 100)) + instances.attr1 = attr1 + instances.attr2 = attr2 + for i in range(-len(instances), len(instances)): + inst = instances[i] + self.assertEqual((inst.attr1 == attr1[i]).all(), True) + self.assertEqual((inst.attr2 == attr2[i]).all(), True) + + self.assertRaises(IndexError, lambda: instances[len(instances)]) + self.assertRaises(IndexError, lambda: instances[-len(instances) - 1]) + + +if __name__ == "__main__": + unittest.main() diff --git a/preprocess/mhp_extension/detectron2/tests/structures/test_rotated_boxes.py b/preprocess/mhp_extension/detectron2/tests/structures/test_rotated_boxes.py new file mode 100644 index 0000000000000000000000000000000000000000..575ac480e39d7406e55f4ff45b867e6f5c3796a0 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/tests/structures/test_rotated_boxes.py @@ -0,0 +1,357 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +from __future__ import absolute_import, division, print_function, unicode_literals +import logging +import math +import random +import unittest +import torch +from fvcore.common.benchmark import benchmark + +from detectron2.layers.rotated_boxes import pairwise_iou_rotated +from detectron2.structures.boxes import Boxes +from detectron2.structures.rotated_boxes import RotatedBoxes, pairwise_iou + +logger = logging.getLogger(__name__) + + +class TestRotatedBoxesLayer(unittest.TestCase): + def test_iou_0_dim_cpu(self): + boxes1 = torch.rand(0, 5, dtype=torch.float32) + boxes2 = torch.rand(10, 5, dtype=torch.float32) + expected_ious = torch.zeros(0, 10, dtype=torch.float32) + ious = pairwise_iou_rotated(boxes1, boxes2) + self.assertTrue(torch.allclose(ious, expected_ious)) + + boxes1 = torch.rand(10, 5, dtype=torch.float32) + boxes2 = torch.rand(0, 5, dtype=torch.float32) + expected_ious = torch.zeros(10, 0, dtype=torch.float32) + ious = pairwise_iou_rotated(boxes1, boxes2) + self.assertTrue(torch.allclose(ious, expected_ious)) + + @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") + def test_iou_0_dim_cuda(self): + boxes1 = torch.rand(0, 5, dtype=torch.float32) + boxes2 = torch.rand(10, 5, dtype=torch.float32) + expected_ious = torch.zeros(0, 10, dtype=torch.float32) + ious_cuda = pairwise_iou_rotated(boxes1.cuda(), boxes2.cuda()) + self.assertTrue(torch.allclose(ious_cuda.cpu(), expected_ious)) + + boxes1 = torch.rand(10, 5, dtype=torch.float32) + boxes2 = torch.rand(0, 5, dtype=torch.float32) + expected_ious = torch.zeros(10, 0, dtype=torch.float32) + ious_cuda = pairwise_iou_rotated(boxes1.cuda(), boxes2.cuda()) + self.assertTrue(torch.allclose(ious_cuda.cpu(), expected_ious)) + + def test_iou_half_overlap_cpu(self): + boxes1 = torch.tensor([[0.5, 0.5, 1.0, 1.0, 0.0]], dtype=torch.float32) + boxes2 = torch.tensor([[0.25, 0.5, 0.5, 1.0, 0.0]], dtype=torch.float32) + expected_ious = torch.tensor([[0.5]], dtype=torch.float32) + ious = pairwise_iou_rotated(boxes1, boxes2) + self.assertTrue(torch.allclose(ious, expected_ious)) + + @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") + def test_iou_half_overlap_cuda(self): + boxes1 = torch.tensor([[0.5, 0.5, 1.0, 1.0, 0.0]], dtype=torch.float32) + boxes2 = torch.tensor([[0.25, 0.5, 0.5, 1.0, 0.0]], dtype=torch.float32) + expected_ious = torch.tensor([[0.5]], dtype=torch.float32) + ious_cuda = pairwise_iou_rotated(boxes1.cuda(), boxes2.cuda()) + self.assertTrue(torch.allclose(ious_cuda.cpu(), expected_ious)) + + def test_iou_precision(self): + for device in ["cpu"] + ["cuda"] if torch.cuda.is_available() else []: + boxes1 = torch.tensor([[565, 565, 10, 10.0, 0]], dtype=torch.float32, device=device) + boxes2 = torch.tensor([[565, 565, 10, 8.3, 0]], dtype=torch.float32, device=device) + iou = 8.3 / 10.0 + expected_ious = torch.tensor([[iou]], dtype=torch.float32) + ious = pairwise_iou_rotated(boxes1, boxes2) + self.assertTrue(torch.allclose(ious.cpu(), expected_ious)) + + @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") + def test_iou_too_many_boxes_cuda(self): + s1, s2 = 5, 1289035 + boxes1 = torch.zeros(s1, 5) + boxes2 = torch.zeros(s2, 5) + ious_cuda = pairwise_iou_rotated(boxes1.cuda(), boxes2.cuda()) + self.assertTupleEqual(tuple(ious_cuda.shape), (s1, s2)) + + def test_iou_extreme(self): + # Cause floating point issues in cuda kernels (#1266) + for device in ["cpu"] + ["cuda"] if torch.cuda.is_available() else []: + boxes1 = torch.tensor([[160.0, 153.0, 230.0, 23.0, -37.0]], device=device) + boxes2 = torch.tensor( + [ + [ + -1.117407639806935e17, + 1.3858420478349148e18, + 1000.0000610351562, + 1000.0000610351562, + 1612.0, + ] + ], + device=device, + ) + ious = pairwise_iou_rotated(boxes1, boxes2) + self.assertTrue(ious.min() >= 0, ious) + + +class TestRotatedBoxesStructure(unittest.TestCase): + def test_clip_area_0_degree(self): + for _ in range(50): + num_boxes = 100 + boxes_5d = torch.zeros(num_boxes, 5) + boxes_5d[:, 0] = torch.FloatTensor(num_boxes).uniform_(-100, 500) + boxes_5d[:, 1] = torch.FloatTensor(num_boxes).uniform_(-100, 500) + boxes_5d[:, 2] = torch.FloatTensor(num_boxes).uniform_(0, 500) + boxes_5d[:, 3] = torch.FloatTensor(num_boxes).uniform_(0, 500) + # Convert from (x_ctr, y_ctr, w, h, 0) to (x1, y1, x2, y2) + boxes_4d = torch.zeros(num_boxes, 4) + boxes_4d[:, 0] = boxes_5d[:, 0] - boxes_5d[:, 2] / 2.0 + boxes_4d[:, 1] = boxes_5d[:, 1] - boxes_5d[:, 3] / 2.0 + boxes_4d[:, 2] = boxes_5d[:, 0] + boxes_5d[:, 2] / 2.0 + boxes_4d[:, 3] = boxes_5d[:, 1] + boxes_5d[:, 3] / 2.0 + + image_size = (500, 600) + test_boxes_4d = Boxes(boxes_4d) + test_boxes_5d = RotatedBoxes(boxes_5d) + # Before clip + areas_4d = test_boxes_4d.area() + areas_5d = test_boxes_5d.area() + self.assertTrue(torch.allclose(areas_4d, areas_5d, atol=1e-1, rtol=1e-5)) + # After clip + test_boxes_4d.clip(image_size) + test_boxes_5d.clip(image_size) + areas_4d = test_boxes_4d.area() + areas_5d = test_boxes_5d.area() + self.assertTrue(torch.allclose(areas_4d, areas_5d, atol=1e-1, rtol=1e-5)) + + def test_clip_area_arbitrary_angle(self): + num_boxes = 100 + boxes_5d = torch.zeros(num_boxes, 5) + boxes_5d[:, 0] = torch.FloatTensor(num_boxes).uniform_(-100, 500) + boxes_5d[:, 1] = torch.FloatTensor(num_boxes).uniform_(-100, 500) + boxes_5d[:, 2] = torch.FloatTensor(num_boxes).uniform_(0, 500) + boxes_5d[:, 3] = torch.FloatTensor(num_boxes).uniform_(0, 500) + boxes_5d[:, 4] = torch.FloatTensor(num_boxes).uniform_(-1800, 1800) + clip_angle_threshold = random.uniform(0, 180) + + image_size = (500, 600) + test_boxes_5d = RotatedBoxes(boxes_5d) + # Before clip + areas_before = test_boxes_5d.area() + # After clip + test_boxes_5d.clip(image_size, clip_angle_threshold) + areas_diff = test_boxes_5d.area() - areas_before + + # the areas should only decrease after clipping + self.assertTrue(torch.all(areas_diff <= 0)) + # whenever the box is clipped (thus the area shrinks), + # the angle for the box must be within the clip_angle_threshold + # Note that the clip function will normalize the angle range + # to be within (-180, 180] + self.assertTrue( + torch.all(torch.abs(boxes_5d[:, 4][torch.where(areas_diff < 0)]) < clip_angle_threshold) + ) + + def test_normalize_angles(self): + # torch.manual_seed(0) + for _ in range(50): + num_boxes = 100 + boxes_5d = torch.zeros(num_boxes, 5) + boxes_5d[:, 0] = torch.FloatTensor(num_boxes).uniform_(-100, 500) + boxes_5d[:, 1] = torch.FloatTensor(num_boxes).uniform_(-100, 500) + boxes_5d[:, 2] = torch.FloatTensor(num_boxes).uniform_(0, 500) + boxes_5d[:, 3] = torch.FloatTensor(num_boxes).uniform_(0, 500) + boxes_5d[:, 4] = torch.FloatTensor(num_boxes).uniform_(-1800, 1800) + rotated_boxes = RotatedBoxes(boxes_5d) + normalized_boxes = rotated_boxes.clone() + normalized_boxes.normalize_angles() + self.assertTrue(torch.all(normalized_boxes.tensor[:, 4] >= -180)) + self.assertTrue(torch.all(normalized_boxes.tensor[:, 4] < 180)) + # x, y, w, h should not change + self.assertTrue(torch.allclose(boxes_5d[:, :4], normalized_boxes.tensor[:, :4])) + # the cos/sin values of the angles should stay the same + + self.assertTrue( + torch.allclose( + torch.cos(boxes_5d[:, 4] * math.pi / 180), + torch.cos(normalized_boxes.tensor[:, 4] * math.pi / 180), + atol=1e-5, + ) + ) + + self.assertTrue( + torch.allclose( + torch.sin(boxes_5d[:, 4] * math.pi / 180), + torch.sin(normalized_boxes.tensor[:, 4] * math.pi / 180), + atol=1e-5, + ) + ) + + def test_pairwise_iou_0_degree(self): + for device in ["cpu"] + ["cuda"] if torch.cuda.is_available() else []: + boxes1 = torch.tensor( + [[0.5, 0.5, 1.0, 1.0, 0.0], [0.5, 0.5, 1.0, 1.0, 0.0]], + dtype=torch.float32, + device=device, + ) + boxes2 = torch.tensor( + [ + [0.5, 0.5, 1.0, 1.0, 0.0], + [0.25, 0.5, 0.5, 1.0, 0.0], + [0.5, 0.25, 1.0, 0.5, 0.0], + [0.25, 0.25, 0.5, 0.5, 0.0], + [0.75, 0.75, 0.5, 0.5, 0.0], + [1.0, 1.0, 1.0, 1.0, 0.0], + ], + dtype=torch.float32, + device=device, + ) + expected_ious = torch.tensor( + [ + [1.0, 0.5, 0.5, 0.25, 0.25, 0.25 / (2 - 0.25)], + [1.0, 0.5, 0.5, 0.25, 0.25, 0.25 / (2 - 0.25)], + ], + dtype=torch.float32, + device=device, + ) + ious = pairwise_iou(RotatedBoxes(boxes1), RotatedBoxes(boxes2)) + self.assertTrue(torch.allclose(ious, expected_ious)) + + def test_pairwise_iou_45_degrees(self): + for device in ["cpu"] + ["cuda"] if torch.cuda.is_available() else []: + boxes1 = torch.tensor( + [ + [1, 1, math.sqrt(2), math.sqrt(2), 45], + [1, 1, 2 * math.sqrt(2), 2 * math.sqrt(2), -45], + ], + dtype=torch.float32, + device=device, + ) + boxes2 = torch.tensor([[1, 1, 2, 2, 0]], dtype=torch.float32, device=device) + expected_ious = torch.tensor([[0.5], [0.5]], dtype=torch.float32, device=device) + ious = pairwise_iou(RotatedBoxes(boxes1), RotatedBoxes(boxes2)) + self.assertTrue(torch.allclose(ious, expected_ious)) + + def test_pairwise_iou_orthogonal(self): + for device in ["cpu"] + ["cuda"] if torch.cuda.is_available() else []: + boxes1 = torch.tensor([[5, 5, 10, 6, 55]], dtype=torch.float32, device=device) + boxes2 = torch.tensor([[5, 5, 10, 6, -35]], dtype=torch.float32, device=device) + iou = (6.0 * 6.0) / (6.0 * 6.0 + 4.0 * 6.0 + 4.0 * 6.0) + expected_ious = torch.tensor([[iou]], dtype=torch.float32, device=device) + ious = pairwise_iou(RotatedBoxes(boxes1), RotatedBoxes(boxes2)) + self.assertTrue(torch.allclose(ious, expected_ious)) + + def test_pairwise_iou_large_close_boxes(self): + for device in ["cpu"] + ["cuda"] if torch.cuda.is_available() else []: + boxes1 = torch.tensor( + [[299.500000, 417.370422, 600.000000, 364.259186, 27.1828]], + dtype=torch.float32, + device=device, + ) + boxes2 = torch.tensor( + [[299.500000, 417.370422, 600.000000, 364.259155, 27.1828]], + dtype=torch.float32, + device=device, + ) + iou = 364.259155 / 364.259186 + expected_ious = torch.tensor([[iou]], dtype=torch.float32, device=device) + ious = pairwise_iou(RotatedBoxes(boxes1), RotatedBoxes(boxes2)) + self.assertTrue(torch.allclose(ious, expected_ious)) + + def test_pairwise_iou_many_boxes(self): + for device in ["cpu"] + ["cuda"] if torch.cuda.is_available() else []: + num_boxes1 = 100 + num_boxes2 = 200 + boxes1 = torch.stack( + [ + torch.tensor( + [5 + 20 * i, 5 + 20 * i, 10, 10, 0], dtype=torch.float32, device=device + ) + for i in range(num_boxes1) + ] + ) + boxes2 = torch.stack( + [ + torch.tensor( + [5 + 20 * i, 5 + 20 * i, 10, 1 + 9 * i / num_boxes2, 0], + dtype=torch.float32, + device=device, + ) + for i in range(num_boxes2) + ] + ) + expected_ious = torch.zeros(num_boxes1, num_boxes2, dtype=torch.float32, device=device) + for i in range(min(num_boxes1, num_boxes2)): + expected_ious[i][i] = (1 + 9 * i / num_boxes2) / 10.0 + ious = pairwise_iou(RotatedBoxes(boxes1), RotatedBoxes(boxes2)) + self.assertTrue(torch.allclose(ious, expected_ious)) + + def test_pairwise_iou_issue1207_simplified(self): + for device in ["cpu"] + ["cuda"] if torch.cuda.is_available() else []: + # Simplified test case of D2-issue-1207 + boxes1 = torch.tensor([[3, 3, 8, 2, -45.0]], device=device) + boxes2 = torch.tensor([[6, 0, 8, 2, -45.0]], device=device) + iou = 0.0 + expected_ious = torch.tensor([[iou]], dtype=torch.float32, device=device) + + ious = pairwise_iou(RotatedBoxes(boxes1), RotatedBoxes(boxes2)) + self.assertTrue(torch.allclose(ious, expected_ious)) + + def test_pairwise_iou_issue1207(self): + for device in ["cpu"] + ["cuda"] if torch.cuda.is_available() else []: + # The original test case in D2-issue-1207 + boxes1 = torch.tensor([[160.0, 153.0, 230.0, 23.0, -37.0]], device=device) + boxes2 = torch.tensor([[190.0, 127.0, 80.0, 21.0, -46.0]], device=device) + + iou = 0.0 + expected_ious = torch.tensor([[iou]], dtype=torch.float32, device=device) + + ious = pairwise_iou(RotatedBoxes(boxes1), RotatedBoxes(boxes2)) + self.assertTrue(torch.allclose(ious, expected_ious)) + + def test_empty_cat(self): + x = RotatedBoxes.cat([]) + self.assertTrue(x.tensor.shape, (0, 5)) + + +def benchmark_rotated_iou(): + num_boxes1 = 200 + num_boxes2 = 500 + boxes1 = torch.stack( + [ + torch.tensor([5 + 20 * i, 5 + 20 * i, 10, 10, 0], dtype=torch.float32) + for i in range(num_boxes1) + ] + ) + boxes2 = torch.stack( + [ + torch.tensor( + [5 + 20 * i, 5 + 20 * i, 10, 1 + 9 * i / num_boxes2, 0], dtype=torch.float32 + ) + for i in range(num_boxes2) + ] + ) + + def func(dev, n=1): + b1 = boxes1.to(device=dev) + b2 = boxes2.to(device=dev) + + def bench(): + for _ in range(n): + pairwise_iou_rotated(b1, b2) + if dev.type == "cuda": + torch.cuda.synchronize() + + return bench + + # only run it once per timed loop, since it's slow + args = [{"dev": torch.device("cpu"), "n": 1}] + if torch.cuda.is_available(): + args.append({"dev": torch.device("cuda"), "n": 10}) + + benchmark(func, "rotated_iou", args, warmup_iters=3) + + +if __name__ == "__main__": + unittest.main() + benchmark_rotated_iou() diff --git a/preprocess/mhp_extension/detectron2/tests/test_checkpoint.py b/preprocess/mhp_extension/detectron2/tests/test_checkpoint.py new file mode 100644 index 0000000000000000000000000000000000000000..725b488fdaec5d2b3a5c6d11c11d2c362453a2a4 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/tests/test_checkpoint.py @@ -0,0 +1,48 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import unittest +from collections import OrderedDict +import torch +from torch import nn + +from detectron2.checkpoint.c2_model_loading import align_and_update_state_dicts +from detectron2.utils.logger import setup_logger + + +class TestCheckpointer(unittest.TestCase): + def setUp(self): + setup_logger() + + def create_complex_model(self): + m = nn.Module() + m.block1 = nn.Module() + m.block1.layer1 = nn.Linear(2, 3) + m.layer2 = nn.Linear(3, 2) + m.res = nn.Module() + m.res.layer2 = nn.Linear(3, 2) + + state_dict = OrderedDict() + state_dict["layer1.weight"] = torch.rand(3, 2) + state_dict["layer1.bias"] = torch.rand(3) + state_dict["layer2.weight"] = torch.rand(2, 3) + state_dict["layer2.bias"] = torch.rand(2) + state_dict["res.layer2.weight"] = torch.rand(2, 3) + state_dict["res.layer2.bias"] = torch.rand(2) + return m, state_dict + + def test_complex_model_loaded(self): + for add_data_parallel in [False, True]: + model, state_dict = self.create_complex_model() + if add_data_parallel: + model = nn.DataParallel(model) + model_sd = model.state_dict() + + align_and_update_state_dicts(model_sd, state_dict) + for loaded, stored in zip(model_sd.values(), state_dict.values()): + # different tensor references + self.assertFalse(id(loaded) == id(stored)) + # same content + self.assertTrue(loaded.equal(stored)) + + +if __name__ == "__main__": + unittest.main() diff --git a/preprocess/mhp_extension/detectron2/tests/test_config.py b/preprocess/mhp_extension/detectron2/tests/test_config.py new file mode 100644 index 0000000000000000000000000000000000000000..650bdf2c42107c7031709653783cb2f3043e1bdf --- /dev/null +++ b/preprocess/mhp_extension/detectron2/tests/test_config.py @@ -0,0 +1,240 @@ +#!/usr/bin/env python +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + + +import os +import tempfile +import unittest +import torch + +from detectron2.config import configurable, downgrade_config, get_cfg, upgrade_config +from detectron2.layers import ShapeSpec + +_V0_CFG = """ +MODEL: + RPN_HEAD: + NAME: "TEST" +VERSION: 0 +""" + +_V1_CFG = """ +MODEL: + WEIGHT: "/path/to/weight" +""" + + +class TestConfigVersioning(unittest.TestCase): + def test_upgrade_downgrade_consistency(self): + cfg = get_cfg() + # check that custom is preserved + cfg.USER_CUSTOM = 1 + + down = downgrade_config(cfg, to_version=0) + up = upgrade_config(down) + self.assertTrue(up == cfg) + + def _merge_cfg_str(self, cfg, merge_str): + f = tempfile.NamedTemporaryFile(mode="w", suffix=".yaml", delete=False) + try: + f.write(merge_str) + f.close() + cfg.merge_from_file(f.name) + finally: + os.remove(f.name) + return cfg + + def test_auto_upgrade(self): + cfg = get_cfg() + latest_ver = cfg.VERSION + cfg.USER_CUSTOM = 1 + + self._merge_cfg_str(cfg, _V0_CFG) + + self.assertEqual(cfg.MODEL.RPN.HEAD_NAME, "TEST") + self.assertEqual(cfg.VERSION, latest_ver) + + def test_guess_v1(self): + cfg = get_cfg() + latest_ver = cfg.VERSION + self._merge_cfg_str(cfg, _V1_CFG) + self.assertEqual(cfg.VERSION, latest_ver) + + +class _TestClassA(torch.nn.Module): + @configurable + def __init__(self, arg1, arg2, arg3=3): + super().__init__() + self.arg1 = arg1 + self.arg2 = arg2 + self.arg3 = arg3 + assert arg1 == 1 + assert arg2 == 2 + assert arg3 == 3 + + @classmethod + def from_config(cls, cfg): + args = {"arg1": cfg.ARG1, "arg2": cfg.ARG2} + return args + + +class _TestClassB(_TestClassA): + @configurable + def __init__(self, input_shape, arg1, arg2, arg3=3): + """ + Doc of _TestClassB + """ + assert input_shape == "shape" + super().__init__(arg1, arg2, arg3) + + @classmethod + def from_config(cls, cfg, input_shape): # test extra positional arg in from_config + args = {"arg1": cfg.ARG1, "arg2": cfg.ARG2} + args["input_shape"] = input_shape + return args + + +class _LegacySubClass(_TestClassB): + # an old subclass written in cfg style + def __init__(self, cfg, input_shape, arg4=4): + super().__init__(cfg, input_shape) + assert self.arg1 == 1 + assert self.arg2 == 2 + assert self.arg3 == 3 + + +class _NewSubClassNewInit(_TestClassB): + # test new subclass with a new __init__ + @configurable + def __init__(self, input_shape, arg4=4, **kwargs): + super().__init__(input_shape, **kwargs) + assert self.arg1 == 1 + assert self.arg2 == 2 + assert self.arg3 == 3 + + +class _LegacySubClassNotCfg(_TestClassB): + # an old subclass written in cfg style, but argument is not called "cfg" + def __init__(self, config, input_shape): + super().__init__(config, input_shape) + assert self.arg1 == 1 + assert self.arg2 == 2 + assert self.arg3 == 3 + + +class _TestClassC(_TestClassB): + @classmethod + def from_config(cls, cfg, input_shape, **kwargs): # test extra kwarg overwrite + args = {"arg1": cfg.ARG1, "arg2": cfg.ARG2} + args["input_shape"] = input_shape + args.update(kwargs) + return args + + +class _TestClassD(_TestClassA): + @configurable + def __init__(self, input_shape: ShapeSpec, arg1: int, arg2, arg3=3): + assert input_shape == "shape" + super().__init__(arg1, arg2, arg3) + + # _TestClassA.from_config does not have input_shape args. + # Test whether input_shape will be forwarded to __init__ + + +class TestConfigurable(unittest.TestCase): + def testInitWithArgs(self): + _ = _TestClassA(arg1=1, arg2=2, arg3=3) + _ = _TestClassB("shape", arg1=1, arg2=2) + _ = _TestClassC("shape", arg1=1, arg2=2) + _ = _TestClassD("shape", arg1=1, arg2=2, arg3=3) + + def testPatchedAttr(self): + self.assertTrue("Doc" in _TestClassB.__init__.__doc__) + self.assertEqual(_TestClassD.__init__.__annotations__["arg1"], int) + + def testInitWithCfg(self): + cfg = get_cfg() + cfg.ARG1 = 1 + cfg.ARG2 = 2 + cfg.ARG3 = 3 + _ = _TestClassA(cfg) + _ = _TestClassB(cfg, input_shape="shape") + _ = _TestClassC(cfg, input_shape="shape") + _ = _TestClassD(cfg, input_shape="shape") + _ = _LegacySubClass(cfg, input_shape="shape") + _ = _NewSubClassNewInit(cfg, input_shape="shape") + _ = _LegacySubClassNotCfg(cfg, input_shape="shape") + with self.assertRaises(TypeError): + # disallow forwarding positional args to __init__ since it's prone to errors + _ = _TestClassD(cfg, "shape") + + # call with kwargs instead + _ = _TestClassA(cfg=cfg) + _ = _TestClassB(cfg=cfg, input_shape="shape") + _ = _TestClassC(cfg=cfg, input_shape="shape") + _ = _TestClassD(cfg=cfg, input_shape="shape") + _ = _LegacySubClass(cfg=cfg, input_shape="shape") + _ = _NewSubClassNewInit(cfg=cfg, input_shape="shape") + _ = _LegacySubClassNotCfg(config=cfg, input_shape="shape") + + def testInitWithCfgOverwrite(self): + cfg = get_cfg() + cfg.ARG1 = 1 + cfg.ARG2 = 999 # wrong config + with self.assertRaises(AssertionError): + _ = _TestClassA(cfg, arg3=3) + + # overwrite arg2 with correct config later: + _ = _TestClassA(cfg, arg2=2, arg3=3) + _ = _TestClassB(cfg, input_shape="shape", arg2=2, arg3=3) + _ = _TestClassC(cfg, input_shape="shape", arg2=2, arg3=3) + _ = _TestClassD(cfg, input_shape="shape", arg2=2, arg3=3) + + # call with kwargs cfg=cfg instead + _ = _TestClassA(cfg=cfg, arg2=2, arg3=3) + _ = _TestClassB(cfg=cfg, input_shape="shape", arg2=2, arg3=3) + _ = _TestClassC(cfg=cfg, input_shape="shape", arg2=2, arg3=3) + _ = _TestClassD(cfg=cfg, input_shape="shape", arg2=2, arg3=3) + + def testInitWithCfgWrongArgs(self): + cfg = get_cfg() + cfg.ARG1 = 1 + cfg.ARG2 = 2 + with self.assertRaises(TypeError): + _ = _TestClassB(cfg, "shape", not_exist=1) + with self.assertRaises(TypeError): + _ = _TestClassC(cfg, "shape", not_exist=1) + with self.assertRaises(TypeError): + _ = _TestClassD(cfg, "shape", not_exist=1) + + def testBadClass(self): + class _BadClass1: + @configurable + def __init__(self, a=1, b=2): + pass + + class _BadClass2: + @configurable + def __init__(self, a=1, b=2): + pass + + def from_config(self, cfg): # noqa + pass + + class _BadClass3: + @configurable + def __init__(self, a=1, b=2): + pass + + # bad name: must be cfg + @classmethod + def from_config(cls, config): # noqa + pass + + with self.assertRaises(AttributeError): + _ = _BadClass1(a=1) + + with self.assertRaises(TypeError): + _ = _BadClass2(a=1) + + with self.assertRaises(TypeError): + _ = _BadClass3(get_cfg()) diff --git a/preprocess/mhp_extension/detectron2/tests/test_export_caffe2.py b/preprocess/mhp_extension/detectron2/tests/test_export_caffe2.py new file mode 100644 index 0000000000000000000000000000000000000000..ad989c4a3d11e6675d26ae2690f06d2ffe30d44c --- /dev/null +++ b/preprocess/mhp_extension/detectron2/tests/test_export_caffe2.py @@ -0,0 +1,71 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +# -*- coding: utf-8 -*- + +import copy +import numpy as np +import os +import tempfile +import unittest +import cv2 +import torch +from fvcore.common.file_io import PathManager + +from detectron2 import model_zoo +from detectron2.checkpoint import DetectionCheckpointer +from detectron2.config import get_cfg +from detectron2.data import DatasetCatalog +from detectron2.modeling import build_model +from detectron2.utils.logger import setup_logger + + +@unittest.skipIf(os.environ.get("CIRCLECI"), "Require COCO data and model zoo.") +class TestCaffe2Export(unittest.TestCase): + def setUp(self): + setup_logger() + + def _test_model(self, config_path, device="cpu"): + # requires extra dependencies + from detectron2.export import Caffe2Model, add_export_config, export_caffe2_model + + cfg = get_cfg() + cfg.merge_from_file(model_zoo.get_config_file(config_path)) + cfg = add_export_config(cfg) + cfg.MODEL.DEVICE = device + + model = build_model(cfg) + DetectionCheckpointer(model).load(model_zoo.get_checkpoint_url(config_path)) + + inputs = [{"image": self._get_test_image()}] + c2_model = export_caffe2_model(cfg, model, copy.deepcopy(inputs)) + + with tempfile.TemporaryDirectory(prefix="detectron2_unittest") as d: + c2_model.save_protobuf(d) + c2_model.save_graph(os.path.join(d, "test.svg"), inputs=copy.deepcopy(inputs)) + c2_model = Caffe2Model.load_protobuf(d) + c2_model(inputs)[0]["instances"] + + def _get_test_image(self): + try: + file_name = DatasetCatalog.get("coco_2017_train")[0]["file_name"] + assert PathManager.exists(file_name) + except Exception: + self.skipTest("COCO dataset not available.") + + with PathManager.open(file_name, "rb") as f: + buf = f.read() + img = cv2.imdecode(np.frombuffer(buf, dtype=np.uint8), cv2.IMREAD_COLOR) + assert img is not None, file_name + return torch.from_numpy(img.transpose(2, 0, 1)) + + def testMaskRCNN(self): + self._test_model("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml") + + @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") + def testMaskRCNNGPU(self): + self._test_model("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml", device="cuda") + + def testRetinaNet(self): + self._test_model("COCO-Detection/retinanet_R_50_FPN_3x.yaml") + + def testPanopticFPN(self): + self._test_model("COCO-PanopticSegmentation/panoptic_fpn_R_50_3x.yaml") diff --git a/preprocess/mhp_extension/detectron2/tests/test_model_analysis.py b/preprocess/mhp_extension/detectron2/tests/test_model_analysis.py new file mode 100644 index 0000000000000000000000000000000000000000..0e3f84c9354746fc634aca997abb232424ddebb2 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/tests/test_model_analysis.py @@ -0,0 +1,58 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. + + +import unittest +import torch + +import detectron2.model_zoo as model_zoo +from detectron2.config import get_cfg +from detectron2.modeling import build_model +from detectron2.utils.analysis import flop_count_operators, parameter_count + + +def get_model_zoo(config_path): + """ + Like model_zoo.get, but do not load any weights (even pretrained) + """ + cfg_file = model_zoo.get_config_file(config_path) + cfg = get_cfg() + cfg.merge_from_file(cfg_file) + if not torch.cuda.is_available(): + cfg.MODEL.DEVICE = "cpu" + return build_model(cfg) + + +class RetinaNetTest(unittest.TestCase): + def setUp(self): + self.model = get_model_zoo("COCO-Detection/retinanet_R_50_FPN_1x.yaml") + + def test_flop(self): + # RetinaNet supports flop-counting with random inputs + inputs = [{"image": torch.rand(3, 800, 800)}] + res = flop_count_operators(self.model, inputs) + self.assertTrue(int(res["conv"]), 146) # 146B flops + + def test_param_count(self): + res = parameter_count(self.model) + self.assertTrue(res[""], 37915572) + self.assertTrue(res["backbone"], 31452352) + + +class FasterRCNNTest(unittest.TestCase): + def setUp(self): + self.model = get_model_zoo("COCO-Detection/faster_rcnn_R_50_FPN_1x.yaml") + + def test_flop(self): + # Faster R-CNN supports flop-counting with random inputs + inputs = [{"image": torch.rand(3, 800, 800)}] + res = flop_count_operators(self.model, inputs) + + # This only checks flops for backbone & proposal generator + # Flops for box head is not conv, and depends on #proposals, which is + # almost 0 for random inputs. + self.assertTrue(int(res["conv"]), 117) + + def test_param_count(self): + res = parameter_count(self.model) + self.assertTrue(res[""], 41699936) + self.assertTrue(res["backbone"], 26799296) diff --git a/preprocess/mhp_extension/detectron2/tests/test_model_zoo.py b/preprocess/mhp_extension/detectron2/tests/test_model_zoo.py new file mode 100644 index 0000000000000000000000000000000000000000..2d16c711af2ab797dab04d0573c2ed70e071ebfd --- /dev/null +++ b/preprocess/mhp_extension/detectron2/tests/test_model_zoo.py @@ -0,0 +1,29 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import logging +import unittest + +from detectron2 import model_zoo +from detectron2.modeling import FPN, GeneralizedRCNN + +logger = logging.getLogger(__name__) + + +class TestModelZoo(unittest.TestCase): + def test_get_returns_model(self): + model = model_zoo.get("Misc/scratch_mask_rcnn_R_50_FPN_3x_gn.yaml", trained=False) + self.assertIsInstance(model, GeneralizedRCNN) + self.assertIsInstance(model.backbone, FPN) + + def test_get_invalid_model(self): + self.assertRaises(RuntimeError, model_zoo.get, "Invalid/config.yaml") + + def test_get_url(self): + url = model_zoo.get_checkpoint_url("Misc/scratch_mask_rcnn_R_50_FPN_3x_gn.yaml") + self.assertEqual( + url, + "https://dl.fbaipublicfiles.com/detectron2/Misc/scratch_mask_rcnn_R_50_FPN_3x_gn/138602908/model_final_01ca85.pkl", # noqa + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/preprocess/mhp_extension/detectron2/tests/test_visualizer.py b/preprocess/mhp_extension/detectron2/tests/test_visualizer.py new file mode 100644 index 0000000000000000000000000000000000000000..1cdeddc6733e25d882bede48a404a1d52c0845de --- /dev/null +++ b/preprocess/mhp_extension/detectron2/tests/test_visualizer.py @@ -0,0 +1,143 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +# File: + +import numpy as np +import unittest +import torch + +from detectron2.data import MetadataCatalog +from detectron2.structures import BoxMode, Instances, RotatedBoxes +from detectron2.utils.visualizer import Visualizer + + +class TestVisualizer(unittest.TestCase): + def _random_data(self): + H, W = 100, 100 + N = 10 + img = np.random.rand(H, W, 3) * 255 + boxxy = np.random.rand(N, 2) * (H // 2) + boxes = np.concatenate((boxxy, boxxy + H // 2), axis=1) + + def _rand_poly(): + return np.random.rand(3, 2).flatten() * H + + polygons = [[_rand_poly() for _ in range(np.random.randint(1, 5))] for _ in range(N)] + + mask = np.zeros_like(img[:, :, 0], dtype=np.bool) + mask[:10, 10:20] = 1 + + labels = [str(i) for i in range(N)] + return img, boxes, labels, polygons, [mask] * N + + @property + def metadata(self): + return MetadataCatalog.get("coco_2017_train") + + def test_draw_dataset_dict(self): + img = np.random.rand(512, 512, 3) * 255 + dic = { + "annotations": [ + { + "bbox": [ + 368.9946492271106, + 330.891438763377, + 13.148537455410235, + 13.644708680142685, + ], + "bbox_mode": BoxMode.XYWH_ABS, + "category_id": 0, + "iscrowd": 1, + "segmentation": { + "counts": "_jh52m?2N2N2N2O100O10O001N1O2MceP2", + "size": [512, 512], + }, + } + ], + "height": 512, + "image_id": 1, + "width": 512, + } + v = Visualizer(img, self.metadata) + v.draw_dataset_dict(dic) + + def test_overlay_instances(self): + img, boxes, labels, polygons, masks = self._random_data() + + v = Visualizer(img, self.metadata) + output = v.overlay_instances(masks=polygons, boxes=boxes, labels=labels).get_image() + self.assertEqual(output.shape, img.shape) + + # Test 2x scaling + v = Visualizer(img, self.metadata, scale=2.0) + output = v.overlay_instances(masks=polygons, boxes=boxes, labels=labels).get_image() + self.assertEqual(output.shape[0], img.shape[0] * 2) + + # Test overlay masks + v = Visualizer(img, self.metadata) + output = v.overlay_instances(masks=masks, boxes=boxes, labels=labels).get_image() + self.assertEqual(output.shape, img.shape) + + def test_overlay_instances_no_boxes(self): + img, boxes, labels, polygons, _ = self._random_data() + v = Visualizer(img, self.metadata) + v.overlay_instances(masks=polygons, boxes=None, labels=labels).get_image() + + def test_draw_instance_predictions(self): + img, boxes, _, _, masks = self._random_data() + num_inst = len(boxes) + inst = Instances((img.shape[0], img.shape[1])) + inst.pred_classes = torch.randint(0, 80, size=(num_inst,)) + inst.scores = torch.rand(num_inst) + inst.pred_boxes = torch.from_numpy(boxes) + inst.pred_masks = torch.from_numpy(np.asarray(masks)) + + v = Visualizer(img, self.metadata) + v.draw_instance_predictions(inst) + + def test_draw_empty_mask_predictions(self): + img, boxes, _, _, masks = self._random_data() + num_inst = len(boxes) + inst = Instances((img.shape[0], img.shape[1])) + inst.pred_classes = torch.randint(0, 80, size=(num_inst,)) + inst.scores = torch.rand(num_inst) + inst.pred_boxes = torch.from_numpy(boxes) + inst.pred_masks = torch.from_numpy(np.zeros_like(np.asarray(masks))) + + v = Visualizer(img, self.metadata) + v.draw_instance_predictions(inst) + + def test_correct_output_shape(self): + img = np.random.rand(928, 928, 3) * 255 + v = Visualizer(img, self.metadata) + out = v.output.get_image() + self.assertEqual(out.shape, img.shape) + + def test_overlay_rotated_instances(self): + H, W = 100, 150 + img = np.random.rand(H, W, 3) * 255 + num_boxes = 50 + boxes_5d = torch.zeros(num_boxes, 5) + boxes_5d[:, 0] = torch.FloatTensor(num_boxes).uniform_(-0.1 * W, 1.1 * W) + boxes_5d[:, 1] = torch.FloatTensor(num_boxes).uniform_(-0.1 * H, 1.1 * H) + boxes_5d[:, 2] = torch.FloatTensor(num_boxes).uniform_(0, max(W, H)) + boxes_5d[:, 3] = torch.FloatTensor(num_boxes).uniform_(0, max(W, H)) + boxes_5d[:, 4] = torch.FloatTensor(num_boxes).uniform_(-1800, 1800) + rotated_boxes = RotatedBoxes(boxes_5d) + labels = [str(i) for i in range(num_boxes)] + + v = Visualizer(img, self.metadata) + output = v.overlay_instances(boxes=rotated_boxes, labels=labels).get_image() + self.assertEqual(output.shape, img.shape) + + def test_draw_no_metadata(self): + img, boxes, _, _, masks = self._random_data() + num_inst = len(boxes) + inst = Instances((img.shape[0], img.shape[1])) + inst.pred_classes = torch.randint(0, 80, size=(num_inst,)) + inst.scores = torch.rand(num_inst) + inst.pred_boxes = torch.from_numpy(boxes) + inst.pred_masks = torch.from_numpy(np.asarray(masks)) + + v = Visualizer(img, MetadataCatalog.get("asdfasdf")) + v.draw_instance_predictions(inst) diff --git a/preprocess/mhp_extension/detectron2/tools/README.md b/preprocess/mhp_extension/detectron2/tools/README.md new file mode 100644 index 0000000000000000000000000000000000000000..3733863970218bf8bdf9b32420163f4c858e209e --- /dev/null +++ b/preprocess/mhp_extension/detectron2/tools/README.md @@ -0,0 +1,45 @@ + +This directory contains a few scripts that use detectron2. + + +* `train_net.py` + +An example training script that's made to train builtin models of detectron2. + +For usage, see [GETTING_STARTED.md](../GETTING_STARTED.md). + +* `plain_train_net.py` + +Similar to `train_net.py`, but implements a training loop instead of using `Trainer`. +This script includes fewer features but it may be more friendly to hackers. + +* `benchmark.py` + +Benchmark the training speed, inference speed or data loading speed of a given config. + +Usage: +``` +python benchmark.py --config-file config.yaml --task train/eval/data [optional DDP flags] +``` + +* `visualize_json_results.py` + +Visualize the json instance detection/segmentation results dumped by `COCOEvalutor` or `LVISEvaluator` + +Usage: +``` +python visualize_json_results.py --input x.json --output dir/ --dataset coco_2017_val +``` +If not using a builtin dataset, you'll need your own script or modify this script. + +* `visualize_data.py` + +Visualize ground truth raw annotations or training data (after preprocessing/augmentations). + +Usage: +``` +python visualize_data.py --config-file config.yaml --source annotation/dataloader --output-dir dir/ [--show] +``` + +NOTE: the script does not stop by itself when using `--source dataloader` because a training +dataloader is usually infinite. diff --git a/preprocess/mhp_extension/detectron2/tools/analyze_model.py b/preprocess/mhp_extension/detectron2/tools/analyze_model.py new file mode 100755 index 0000000000000000000000000000000000000000..9c06ea4b5fbfd551d85702171976f9bc33f2e275 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/tools/analyze_model.py @@ -0,0 +1,127 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +import logging +import numpy as np +from collections import Counter +import tqdm + +from detectron2.checkpoint import DetectionCheckpointer +from detectron2.config import get_cfg +from detectron2.data import build_detection_test_loader +from detectron2.engine import default_argument_parser +from detectron2.modeling import build_model +from detectron2.utils.analysis import ( + activation_count_operators, + flop_count_operators, + parameter_count_table, +) +from detectron2.utils.logger import setup_logger + +logger = logging.getLogger("detectron2") + + +def setup(args): + cfg = get_cfg() + cfg.merge_from_file(args.config_file) + cfg.DATALOADER.NUM_WORKERS = 0 + cfg.merge_from_list(args.opts) + cfg.freeze() + setup_logger() + return cfg + + +def do_flop(cfg): + data_loader = build_detection_test_loader(cfg, cfg.DATASETS.TEST[0]) + model = build_model(cfg) + DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS) + model.eval() + + counts = Counter() + total_flops = [] + for idx, data in zip(tqdm.trange(args.num_inputs), data_loader): # noqa + count = flop_count_operators(model, data) + counts += count + total_flops.append(sum(count.values())) + logger.info( + "(G)Flops for Each Type of Operators:\n" + str([(k, v / idx) for k, v in counts.items()]) + ) + logger.info("Total (G)Flops: {}ยฑ{}".format(np.mean(total_flops), np.std(total_flops))) + + +def do_activation(cfg): + data_loader = build_detection_test_loader(cfg, cfg.DATASETS.TEST[0]) + model = build_model(cfg) + DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS) + model.eval() + + counts = Counter() + total_activations = [] + for idx, data in zip(tqdm.trange(args.num_inputs), data_loader): # noqa + count = activation_count_operators(model, data) + counts += count + total_activations.append(sum(count.values())) + logger.info( + "(Million) Activations for Each Type of Operators:\n" + + str([(k, v / idx) for k, v in counts.items()]) + ) + logger.info( + "Total (Million) Activations: {}ยฑ{}".format( + np.mean(total_activations), np.std(total_activations) + ) + ) + + +def do_parameter(cfg): + model = build_model(cfg) + logger.info("Parameter Count:\n" + parameter_count_table(model, max_depth=5)) + + +def do_structure(cfg): + model = build_model(cfg) + logger.info("Model Structure:\n" + str(model)) + + +if __name__ == "__main__": + parser = default_argument_parser( + epilog=""" +Examples: + +To show parameters of a model: +$ ./analyze_model.py --tasks parameter \\ + --config-file ../configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml + +Flops and activations are data-dependent, therefore inputs and model weights +are needed to count them: + +$ ./analyze_model.py --num-inputs 100 --tasks flop \\ + --config-file ../configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml \\ + MODEL.WEIGHTS /path/to/model.pkl +""" + ) + parser.add_argument( + "--tasks", + choices=["flop", "activation", "parameter", "structure"], + required=True, + nargs="+", + ) + parser.add_argument( + "--num-inputs", + default=100, + type=int, + help="number of inputs used to compute statistics for flops/activations, " + "both are data dependent.", + ) + args = parser.parse_args() + assert not args.eval_only + assert args.num_gpus == 1 + + cfg = setup(args) + + for task in args.tasks: + { + "flop": do_flop, + "activation": do_activation, + "parameter": do_parameter, + "structure": do_structure, + }[task](cfg) diff --git a/preprocess/mhp_extension/detectron2/tools/benchmark.py b/preprocess/mhp_extension/detectron2/tools/benchmark.py new file mode 100755 index 0000000000000000000000000000000000000000..9eec59f476882e4045ec3c682ffe515413a3be15 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/tools/benchmark.py @@ -0,0 +1,167 @@ +#!/usr/bin/env python +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +""" +A script to benchmark builtin models. + +Note: this script has an extra dependency of psutil. +""" + +import itertools +import logging +import psutil +import torch +import tqdm +from fvcore.common.timer import Timer +from torch.nn.parallel import DistributedDataParallel + +from detectron2.checkpoint import DetectionCheckpointer +from detectron2.config import get_cfg +from detectron2.data import ( + DatasetFromList, + build_detection_test_loader, + build_detection_train_loader, +) +from detectron2.engine import SimpleTrainer, default_argument_parser, hooks, launch +from detectron2.modeling import build_model +from detectron2.solver import build_optimizer +from detectron2.utils import comm +from detectron2.utils.events import CommonMetricPrinter +from detectron2.utils.logger import setup_logger + +logger = logging.getLogger("detectron2") + + +def setup(args): + cfg = get_cfg() + cfg.merge_from_file(args.config_file) + cfg.SOLVER.BASE_LR = 0.001 # Avoid NaNs. Not useful in this script anyway. + cfg.merge_from_list(args.opts) + cfg.freeze() + setup_logger(distributed_rank=comm.get_rank()) + return cfg + + +def benchmark_data(args): + cfg = setup(args) + + timer = Timer() + dataloader = build_detection_train_loader(cfg) + logger.info("Initialize loader using {} seconds.".format(timer.seconds())) + + timer.reset() + itr = iter(dataloader) + for i in range(10): # warmup + next(itr) + if i == 0: + startup_time = timer.seconds() + timer = Timer() + max_iter = 1000 + for _ in tqdm.trange(max_iter): + next(itr) + logger.info( + "{} iters ({} images) in {} seconds.".format( + max_iter, max_iter * cfg.SOLVER.IMS_PER_BATCH, timer.seconds() + ) + ) + logger.info("Startup time: {} seconds".format(startup_time)) + vram = psutil.virtual_memory() + logger.info( + "RAM Usage: {:.2f}/{:.2f} GB".format( + (vram.total - vram.available) / 1024 ** 3, vram.total / 1024 ** 3 + ) + ) + + # test for a few more rounds + for _ in range(10): + timer = Timer() + max_iter = 1000 + for _ in tqdm.trange(max_iter): + next(itr) + logger.info( + "{} iters ({} images) in {} seconds.".format( + max_iter, max_iter * cfg.SOLVER.IMS_PER_BATCH, timer.seconds() + ) + ) + + +def benchmark_train(args): + cfg = setup(args) + model = build_model(cfg) + logger.info("Model:\n{}".format(model)) + if comm.get_world_size() > 1: + model = DistributedDataParallel( + model, device_ids=[comm.get_local_rank()], broadcast_buffers=False + ) + optimizer = build_optimizer(cfg, model) + checkpointer = DetectionCheckpointer(model, optimizer=optimizer) + checkpointer.load(cfg.MODEL.WEIGHTS) + + cfg.defrost() + cfg.DATALOADER.NUM_WORKERS = 0 + data_loader = build_detection_train_loader(cfg) + dummy_data = list(itertools.islice(data_loader, 100)) + + def f(): + data = DatasetFromList(dummy_data, copy=False) + while True: + yield from data + + max_iter = 400 + trainer = SimpleTrainer(model, f(), optimizer) + trainer.register_hooks( + [hooks.IterationTimer(), hooks.PeriodicWriter([CommonMetricPrinter(max_iter)])] + ) + trainer.train(1, max_iter) + + +@torch.no_grad() +def benchmark_eval(args): + cfg = setup(args) + model = build_model(cfg) + model.eval() + logger.info("Model:\n{}".format(model)) + DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS) + + cfg.defrost() + cfg.DATALOADER.NUM_WORKERS = 0 + data_loader = build_detection_test_loader(cfg, cfg.DATASETS.TEST[0]) + dummy_data = list(itertools.islice(data_loader, 100)) + + def f(): + while True: + yield from DatasetFromList(dummy_data, copy=False) + + for _ in range(5): # warmup + model(dummy_data[0]) + + max_iter = 400 + timer = Timer() + with tqdm.tqdm(total=max_iter) as pbar: + for idx, d in enumerate(f()): + if idx == max_iter: + break + model(d) + pbar.update() + logger.info("{} iters in {} seconds.".format(max_iter, timer.seconds())) + + +if __name__ == "__main__": + parser = default_argument_parser() + parser.add_argument("--task", choices=["train", "eval", "data"], required=True) + args = parser.parse_args() + assert not args.eval_only + + if args.task == "data": + f = benchmark_data + elif args.task == "train": + """ + Note: training speed may not be representative. + The training cost of a R-CNN model varies with the content of the data + and the quality of the model. + """ + f = benchmark_train + elif args.task == "eval": + f = benchmark_eval + # only benchmark single-GPU inference. + assert args.num_gpus == 1 and args.num_machines == 1 + launch(f, args.num_gpus, args.num_machines, args.machine_rank, args.dist_url, args=(args,)) diff --git a/preprocess/mhp_extension/detectron2/tools/convert-torchvision-to-d2.py b/preprocess/mhp_extension/detectron2/tools/convert-torchvision-to-d2.py new file mode 100755 index 0000000000000000000000000000000000000000..18a24e4ef96d34a4a0d1f43debc2276260da1a2b --- /dev/null +++ b/preprocess/mhp_extension/detectron2/tools/convert-torchvision-to-d2.py @@ -0,0 +1,56 @@ +#!/usr/bin/env python +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +import pickle as pkl +import sys +import torch + +""" +Usage: + # download one of the ResNet{18,34,50,101,152} models from torchvision: + wget https://download.pytorch.org/models/resnet50-19c8e357.pth -O r50.pth + # run the conversion + ./convert-torchvision-to-d2.py r50.pth r50.pkl + + # Then, use r50.pkl with the following changes in config: + +MODEL: + WEIGHTS: "/path/to/r50.pkl" + PIXEL_MEAN: [123.675, 116.280, 103.530] + PIXEL_STD: [58.395, 57.120, 57.375] + RESNETS: + DEPTH: 50 + STRIDE_IN_1X1: False +INPUT: + FORMAT: "RGB" + + These models typically produce slightly worse results than the + pre-trained ResNets we use in official configs, which are the + original ResNet models released by MSRA. +""" + +if __name__ == "__main__": + input = sys.argv[1] + + obj = torch.load(input, map_location="cpu") + + newmodel = {} + for k in list(obj.keys()): + old_k = k + if "layer" not in k: + k = "stem." + k + for t in [1, 2, 3, 4]: + k = k.replace("layer{}".format(t), "res{}".format(t + 1)) + for t in [1, 2, 3]: + k = k.replace("bn{}".format(t), "conv{}.norm".format(t)) + k = k.replace("downsample.0", "shortcut") + k = k.replace("downsample.1", "shortcut.norm") + print(old_k, "->", k) + newmodel[k] = obj.pop(old_k).detach().numpy() + + res = {"model": newmodel, "__author__": "torchvision", "matching_heuristics": True} + + with open(sys.argv[2], "wb") as f: + pkl.dump(res, f) + if obj: + print("Unconverted keys:", obj.keys()) diff --git a/preprocess/mhp_extension/detectron2/tools/deploy/README.md b/preprocess/mhp_extension/detectron2/tools/deploy/README.md new file mode 100644 index 0000000000000000000000000000000000000000..b9d5b15512c0bd160accbb1823236b8954a37b86 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/tools/deploy/README.md @@ -0,0 +1,9 @@ + +This directory contains: + +1. A script that converts a detectron2 model to caffe2 format. + +2. An example that loads a Mask R-CNN model in caffe2 format and runs inference. + +See [tutorial](https://detectron2.readthedocs.io/tutorials/deployment.html) +for their usage. diff --git a/preprocess/mhp_extension/detectron2/tools/deploy/caffe2_converter.py b/preprocess/mhp_extension/detectron2/tools/deploy/caffe2_converter.py new file mode 100755 index 0000000000000000000000000000000000000000..08feb69fba090a302d1624d52d146ac7a0787223 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/tools/deploy/caffe2_converter.py @@ -0,0 +1,98 @@ +#!/usr/bin/env python +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +import argparse +import os +import onnx +import torch + +from detectron2.checkpoint import DetectionCheckpointer +from detectron2.config import get_cfg +from detectron2.data import build_detection_test_loader +from detectron2.evaluation import COCOEvaluator, inference_on_dataset, print_csv_format +from detectron2.export import Caffe2Tracer, add_export_config +from detectron2.modeling import build_model +from detectron2.utils.logger import setup_logger + + +def setup_cfg(args): + cfg = get_cfg() + # cuda context is initialized before creating dataloader, so we don't fork anymore + cfg.DATALOADER.NUM_WORKERS = 0 + cfg = add_export_config(cfg) + cfg.merge_from_file(args.config_file) + cfg.merge_from_list(args.opts) + cfg.freeze() + if cfg.MODEL.DEVICE != "cpu": + TORCH_VERSION = tuple(int(x) for x in torch.__version__.split(".")[:2]) + assert TORCH_VERSION >= (1, 5), "PyTorch>=1.5 required for GPU conversion!" + return cfg + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Convert a model using caffe2 tracing.") + parser.add_argument( + "--format", + choices=["caffe2", "onnx", "torchscript"], + help="output format", + default="caffe2", + ) + parser.add_argument("--config-file", default="", metavar="FILE", help="path to config file") + parser.add_argument("--run-eval", action="store_true") + parser.add_argument("--output", help="output directory for the converted model") + parser.add_argument( + "opts", + help="Modify config options using the command-line", + default=None, + nargs=argparse.REMAINDER, + ) + args = parser.parse_args() + logger = setup_logger() + logger.info("Command line arguments: " + str(args)) + os.makedirs(args.output, exist_ok=True) + + cfg = setup_cfg(args) + + # create a torch model + torch_model = build_model(cfg) + DetectionCheckpointer(torch_model).resume_or_load(cfg.MODEL.WEIGHTS) + + # get a sample data + data_loader = build_detection_test_loader(cfg, cfg.DATASETS.TEST[0]) + first_batch = next(iter(data_loader)) + + # convert and save caffe2 model + tracer = Caffe2Tracer(cfg, torch_model, first_batch) + if args.format == "caffe2": + caffe2_model = tracer.export_caffe2() + caffe2_model.save_protobuf(args.output) + # draw the caffe2 graph + caffe2_model.save_graph(os.path.join(args.output, "model.svg"), inputs=first_batch) + elif args.format == "onnx": + onnx_model = tracer.export_onnx() + onnx.save(onnx_model, os.path.join(args.output, "model.onnx")) + elif args.format == "torchscript": + script_model = tracer.export_torchscript() + script_model.save(os.path.join(args.output, "model.ts")) + + # Recursively print IR of all modules + with open(os.path.join(args.output, "model_ts_IR.txt"), "w") as f: + try: + f.write(script_model._actual_script_module._c.dump_to_str(True, False, False)) + except AttributeError: + pass + # Print IR of the entire graph (all submodules inlined) + with open(os.path.join(args.output, "model_ts_IR_inlined.txt"), "w") as f: + f.write(str(script_model.inlined_graph)) + # Print the model structure in pytorch style + with open(os.path.join(args.output, "model.txt"), "w") as f: + f.write(str(script_model)) + + # run evaluation with the converted model + if args.run_eval: + assert args.format == "caffe2", "Python inference in other format is not yet supported." + dataset = cfg.DATASETS.TEST[0] + data_loader = build_detection_test_loader(cfg, dataset) + # NOTE: hard-coded evaluator. change to the evaluator for your dataset + evaluator = COCOEvaluator(dataset, cfg, True, args.output) + metrics = inference_on_dataset(caffe2_model, data_loader, evaluator) + print_csv_format(metrics) diff --git a/preprocess/mhp_extension/detectron2/tools/deploy/caffe2_mask_rcnn.cpp b/preprocess/mhp_extension/detectron2/tools/deploy/caffe2_mask_rcnn.cpp new file mode 100644 index 0000000000000000000000000000000000000000..44370b4c518408f1f46345c7e3ac07c7db63a485 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/tools/deploy/caffe2_mask_rcnn.cpp @@ -0,0 +1,119 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +C10_DEFINE_string(predict_net, "", "path to model.pb"); +C10_DEFINE_string(init_net, "", "path to model_init.pb"); +C10_DEFINE_string(input, "", "path to input image"); + +using namespace std; +using namespace caffe2; + +int main(int argc, char** argv) { + caffe2::GlobalInit(&argc, &argv); + string predictNetPath = FLAGS_predict_net; + string initNetPath = FLAGS_init_net; + cv::Mat input = cv::imread(FLAGS_input, cv::IMREAD_COLOR); + + const int height = input.rows; + const int width = input.cols; + // FPN models require divisibility of 32 + assert(height % 32 == 0 && width % 32 == 0); + const int batch = 1; + const int channels = 3; + + // initialize Net and Workspace + caffe2::NetDef initNet_, predictNet_; + CAFFE_ENFORCE(ReadProtoFromFile(initNetPath, &initNet_)); + CAFFE_ENFORCE(ReadProtoFromFile(predictNetPath, &predictNet_)); + + Workspace workSpace; + for (auto& str : predictNet_.external_input()) { + workSpace.CreateBlob(str); + } + CAFFE_ENFORCE(workSpace.CreateNet(predictNet_)); + CAFFE_ENFORCE(workSpace.RunNetOnce(initNet_)); + + // setup inputs + auto data = BlobGetMutableTensor(workSpace.GetBlob("data"), caffe2::CPU); + data->Resize(batch, channels, height, width); + float* ptr = data->mutable_data(); + // HWC to CHW + for (int c = 0; c < 3; ++c) { + for (int i = 0; i < height * width; ++i) { + ptr[c * height * width + i] = static_cast(input.data[3 * i + c]); + } + } + + auto im_info = + BlobGetMutableTensor(workSpace.GetBlob("im_info"), caffe2::CPU); + im_info->Resize(batch, 3); + float* im_info_ptr = im_info->mutable_data(); + im_info_ptr[0] = height; + im_info_ptr[1] = width; + im_info_ptr[2] = 1.0; + + // run the network + CAFFE_ENFORCE(workSpace.RunNet(predictNet_.name())); + + // run 3 more times to benchmark + int N_benchmark = 3; + auto start_time = chrono::high_resolution_clock::now(); + for (int i = 0; i < N_benchmark; ++i) { + CAFFE_ENFORCE(workSpace.RunNet(predictNet_.name())); + } + auto end_time = chrono::high_resolution_clock::now(); + auto ms = chrono::duration_cast(end_time - start_time) + .count(); + cout << "Latency (should vary with different inputs): " + << ms * 1.0 / 1e6 / N_benchmark << " seconds" << endl; + + // parse Mask R-CNN outputs + caffe2::Tensor bbox( + workSpace.GetBlob("bbox_nms")->Get(), caffe2::CPU); + caffe2::Tensor scores( + workSpace.GetBlob("score_nms")->Get(), caffe2::CPU); + caffe2::Tensor labels( + workSpace.GetBlob("class_nms")->Get(), caffe2::CPU); + caffe2::Tensor mask_probs( + workSpace.GetBlob("mask_fcn_probs")->Get(), caffe2::CPU); + cout << "bbox:" << bbox.DebugString() << endl; + cout << "scores:" << scores.DebugString() << endl; + cout << "labels:" << labels.DebugString() << endl; + cout << "mask_probs: " << mask_probs.DebugString() << endl; + + int num_instances = bbox.sizes()[0]; + for (int i = 0; i < num_instances; ++i) { + float score = scores.data()[i]; + if (score < 0.6) + continue; // skip them + + const float* box = bbox.data() + i * 4; + int label = labels.data()[i]; + + cout << "Prediction " << i << ", xyxy=("; + cout << box[0] << ", " << box[1] << ", " << box[2] << ", " << box[3] + << "); score=" << score << "; label=" << label << endl; + + const float* mask = mask_probs.data() + + i * mask_probs.size_from_dim(1) + label * mask_probs.size_from_dim(2); + + // save the 28x28 mask + cv::Mat cv_mask(28, 28, CV_32FC1); + memcpy(cv_mask.data, mask, 28 * 28 * sizeof(float)); + cv::imwrite("mask" + std::to_string(i) + ".png", cv_mask * 255.); + } + return 0; +} diff --git a/preprocess/mhp_extension/detectron2/tools/deploy/torchscript_traced_mask_rcnn.cpp b/preprocess/mhp_extension/detectron2/tools/deploy/torchscript_traced_mask_rcnn.cpp new file mode 100644 index 0000000000000000000000000000000000000000..82fbdb052fa53543920bf8169a05982005e30cc5 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/tools/deploy/torchscript_traced_mask_rcnn.cpp @@ -0,0 +1,71 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. + +#include +#include +#include + +#include +#include + +using namespace std; + +// experimental. don't use +int main(int argc, const char* argv[]) { + if (argc != 3) { + return 1; + } + std::string image_file = argv[2]; + + torch::autograd::AutoGradMode guard(false); + auto module = torch::jit::load(argv[1]); + + assert(module.buffers().size() > 0); + // Assume that the entire model is on the same device. + // We just put input to this device. + auto device = (*begin(module.buffers())).device(); + + cv::Mat input_img = cv::imread(image_file, cv::IMREAD_COLOR); + const int height = input_img.rows; + const int width = input_img.cols; + // FPN models require divisibility of 32 + assert(height % 32 == 0 && width % 32 == 0); + const int channels = 3; + + auto input = torch::from_blob( + input_img.data, {1, height, width, channels}, torch::kUInt8); + // NHWC to NCHW + input = input.to(device, torch::kFloat).permute({0, 3, 1, 2}).contiguous(); + + std::array im_info_data{height * 1.0f, width * 1.0f, 1.0f}; + auto im_info = torch::from_blob(im_info_data.data(), {1, 3}).to(device); + + // run the network + auto output = module.forward({std::make_tuple(input, im_info)}); + + // run 3 more times to benchmark + int N_benchmark = 3; + auto start_time = chrono::high_resolution_clock::now(); + for (int i = 0; i < N_benchmark; ++i) { + output = module.forward({std::make_tuple(input, im_info)}); + } + auto end_time = chrono::high_resolution_clock::now(); + auto ms = chrono::duration_cast(end_time - start_time) + .count(); + cout << "Latency (should vary with different inputs): " + << ms * 1.0 / 1e6 / N_benchmark << " seconds" << endl; + + auto outputs = output.toTuple()->elements(); + // parse Mask R-CNN outputs + auto bbox = outputs[0].toTensor(), scores = outputs[1].toTensor(), + labels = outputs[2].toTensor(), mask_probs = outputs[3].toTensor(); + + cout << "bbox: " << bbox.toString() << " " << bbox.sizes() << endl; + cout << "scores: " << scores.toString() << " " << scores.sizes() << endl; + cout << "labels: " << labels.toString() << " " << labels.sizes() << endl; + cout << "mask_probs: " << mask_probs.toString() << " " << mask_probs.sizes() + << endl; + + int num_instances = bbox.sizes()[0]; + cout << bbox << endl; + return 0; +} diff --git a/preprocess/mhp_extension/detectron2/tools/finetune_net.py b/preprocess/mhp_extension/detectron2/tools/finetune_net.py new file mode 100755 index 0000000000000000000000000000000000000000..3e521859f70b89da747b324375a5110d8663fdc7 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/tools/finetune_net.py @@ -0,0 +1,183 @@ +#!/usr/bin/env python +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +""" +Detection Training Script. + +This scripts reads a given config file and runs the training or evaluation. +It is an entry point that is made to train standard models in detectron2. + +In order to let one script support training of many models, +this script contains logic that are specific to these built-in models and therefore +may not be suitable for your own project. +For example, your research project perhaps only needs a single "evaluator". + +Therefore, we recommend you to use detectron2 as an library and take +this file as an example of how to use the library. +You may want to write your own script with your data and other customizations. +""" + +import logging +import os +from collections import OrderedDict +import torch + +import detectron2.utils.comm as comm +from detectron2.checkpoint import DetectionCheckpointer +from detectron2.config import get_cfg +from detectron2.data import MetadataCatalog +from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, hooks, launch +from detectron2.evaluation import ( + CityscapesInstanceEvaluator, + CityscapesSemSegEvaluator, + COCOEvaluator, + COCOPanopticEvaluator, + DatasetEvaluators, + LVISEvaluator, + PascalVOCDetectionEvaluator, + SemSegEvaluator, + verify_results, +) +from detectron2.modeling import GeneralizedRCNNWithTTA + +# Register Custom Dataset +from detectron2.data.datasets import register_coco_instances + +register_coco_instances("CIHP_train", {}, "../../data/msrcnn_finetune_annotations/CIHP_train.json", + "../../data/instance-level_human_parsing/Training/Images") +register_coco_instances("CIHP_val", {}, "../../data/msrcnn_finetune_annotations/CIHP_val.json", + "../../data/instance-level_human_parsing/Validation/Images") +register_coco_instances("demo_train", {}, "../../demo/annotations/demo_train.json", + "../../demo/img") +register_coco_instances("demo_val", {}, "../../demo/annotations/demo_val.json", + "../../demo/img") + + +class Trainer(DefaultTrainer): + """ + We use the "DefaultTrainer" which contains pre-defined default logic for + standard training workflow. They may not work for you, especially if you + are working on a new research project. In that case you can use the cleaner + "SimpleTrainer", or write your own training loop. You can use + "tools/plain_train_net.py" as an example. + """ + + @classmethod + def build_evaluator(cls, cfg, dataset_name, output_folder=None): + """ + Create evaluator(s) for a given dataset. + This uses the special metadata "evaluator_type" associated with each builtin dataset. + For your own dataset, you can simply create an evaluator manually in your + script and do not have to worry about the hacky if-else logic here. + """ + if output_folder is None: + output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") + evaluator_list = [] + evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type + if evaluator_type in ["sem_seg", "coco_panoptic_seg"]: + evaluator_list.append( + SemSegEvaluator( + dataset_name, + distributed=True, + num_classes=cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES, + ignore_label=cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE, + output_dir=output_folder, + ) + ) + if evaluator_type in ["coco", "coco_panoptic_seg"]: + evaluator_list.append(COCOEvaluator(dataset_name, cfg, True, output_folder)) + if evaluator_type == "coco_panoptic_seg": + evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder)) + if evaluator_type == "cityscapes_instance": + assert ( + torch.cuda.device_count() >= comm.get_rank() + ), "CityscapesEvaluator currently do not work with multiple machines." + return CityscapesInstanceEvaluator(dataset_name) + if evaluator_type == "cityscapes_sem_seg": + assert ( + torch.cuda.device_count() >= comm.get_rank() + ), "CityscapesEvaluator currently do not work with multiple machines." + return CityscapesSemSegEvaluator(dataset_name) + elif evaluator_type == "pascal_voc": + return PascalVOCDetectionEvaluator(dataset_name) + elif evaluator_type == "lvis": + return LVISEvaluator(dataset_name, cfg, True, output_folder) + if len(evaluator_list) == 0: + raise NotImplementedError( + "no Evaluator for the dataset {} with the type {}".format( + dataset_name, evaluator_type + ) + ) + elif len(evaluator_list) == 1: + return evaluator_list[0] + return DatasetEvaluators(evaluator_list) + + @classmethod + def test_with_TTA(cls, cfg, model): + logger = logging.getLogger("detectron2.trainer") + # In the end of training, run an evaluation with TTA + # Only support some R-CNN models. + logger.info("Running inference with test-time augmentation ...") + model = GeneralizedRCNNWithTTA(cfg, model) + evaluators = [ + cls.build_evaluator( + cfg, name, output_folder=os.path.join(cfg.OUTPUT_DIR, "inference_TTA") + ) + for name in cfg.DATASETS.TEST + ] + res = cls.test(cfg, model, evaluators) + res = OrderedDict({k + "_TTA": v for k, v in res.items()}) + return res + + +def setup(args): + """ + Create configs and perform basic setups. + """ + cfg = get_cfg() + cfg.merge_from_file(args.config_file) + cfg.merge_from_list(args.opts) + cfg.freeze() + default_setup(cfg, args) + return cfg + + +def main(args): + cfg = setup(args) + + if args.eval_only: + model = Trainer.build_model(cfg) + DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load( + cfg.MODEL.WEIGHTS, resume=args.resume + ) + res = Trainer.test(cfg, model) + if cfg.TEST.AUG.ENABLED: + res.update(Trainer.test_with_TTA(cfg, model)) + if comm.is_main_process(): + verify_results(cfg, res) + return res + + """ + If you'd like to do anything fancier than the standard training logic, + consider writing your own training loop (see plain_train_net.py) or + subclassing the trainer. + """ + trainer = Trainer(cfg) + trainer.resume_or_load(resume=False) + if cfg.TEST.AUG.ENABLED: + trainer.register_hooks( + [hooks.EvalHook(0, lambda: trainer.test_with_TTA(cfg, trainer.model))] + ) + return trainer.train() + + +if __name__ == "__main__": + args = default_argument_parser().parse_args() + print("Command Line Args:", args) + launch( + main, + args.num_gpus, + num_machines=args.num_machines, + machine_rank=args.machine_rank, + dist_url=args.dist_url, + args=(args,), + ) diff --git a/preprocess/mhp_extension/detectron2/tools/inference.sh b/preprocess/mhp_extension/detectron2/tools/inference.sh new file mode 100644 index 0000000000000000000000000000000000000000..3b9d39ed92e9cb574ac4349f457a52a27c38aac3 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/tools/inference.sh @@ -0,0 +1,4 @@ +python finetune_net.py \ + --num-gpus 1 \ + --config-file ../configs/Misc/parsing_inference.yaml \ + --eval-only MODEL.WEIGHTS ./model_final.pth TEST.AUG.ENABLED False diff --git a/preprocess/mhp_extension/detectron2/tools/plain_train_net.py b/preprocess/mhp_extension/detectron2/tools/plain_train_net.py new file mode 100755 index 0000000000000000000000000000000000000000..52a0a281f84bb64fa49c7cb2122564146ee27752 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/tools/plain_train_net.py @@ -0,0 +1,237 @@ +#!/usr/bin/env python +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +""" +Detectron2 training script with a plain training loop. + +This script reads a given config file and runs the training or evaluation. +It is an entry point that is able to train standard models in detectron2. + +In order to let one script support training of many models, +this script contains logic that are specific to these built-in models and therefore +may not be suitable for your own project. +For example, your research project perhaps only needs a single "evaluator". + +Therefore, we recommend you to use detectron2 as a library and take +this file as an example of how to use the library. +You may want to write your own script with your data and other customizations. + +Compared to "train_net.py", this script supports fewer default features. +It also includes fewer abstraction, therefore is easier to add custom logic. +""" + +import logging +import os +from collections import OrderedDict +import torch +from torch.nn.parallel import DistributedDataParallel + +import detectron2.utils.comm as comm +from detectron2.checkpoint import DetectionCheckpointer, PeriodicCheckpointer +from detectron2.config import get_cfg +from detectron2.data import ( + MetadataCatalog, + build_detection_test_loader, + build_detection_train_loader, +) +from detectron2.engine import default_argument_parser, default_setup, launch +from detectron2.evaluation import ( + CityscapesInstanceEvaluator, + CityscapesSemSegEvaluator, + COCOEvaluator, + COCOPanopticEvaluator, + DatasetEvaluators, + LVISEvaluator, + PascalVOCDetectionEvaluator, + SemSegEvaluator, + inference_on_dataset, + print_csv_format, +) +from detectron2.modeling import build_model +from detectron2.solver import build_lr_scheduler, build_optimizer +from detectron2.utils.events import ( + CommonMetricPrinter, + EventStorage, + JSONWriter, + TensorboardXWriter, +) + +logger = logging.getLogger("detectron2") + + +def get_evaluator(cfg, dataset_name, output_folder=None): + """ + Create evaluator(s) for a given dataset. + This uses the special metadata "evaluator_type" associated with each builtin dataset. + For your own dataset, you can simply create an evaluator manually in your + script and do not have to worry about the hacky if-else logic here. + """ + if output_folder is None: + output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") + evaluator_list = [] + evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type + if evaluator_type in ["sem_seg", "coco_panoptic_seg"]: + evaluator_list.append( + SemSegEvaluator( + dataset_name, + distributed=True, + num_classes=cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES, + ignore_label=cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE, + output_dir=output_folder, + ) + ) + if evaluator_type in ["coco", "coco_panoptic_seg"]: + evaluator_list.append(COCOEvaluator(dataset_name, cfg, True, output_folder)) + if evaluator_type == "coco_panoptic_seg": + evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder)) + if evaluator_type == "cityscapes_instance": + assert ( + torch.cuda.device_count() >= comm.get_rank() + ), "CityscapesEvaluator currently do not work with multiple machines." + return CityscapesInstanceEvaluator(dataset_name) + if evaluator_type == "cityscapes_sem_seg": + assert ( + torch.cuda.device_count() >= comm.get_rank() + ), "CityscapesEvaluator currently do not work with multiple machines." + return CityscapesSemSegEvaluator(dataset_name) + if evaluator_type == "pascal_voc": + return PascalVOCDetectionEvaluator(dataset_name) + if evaluator_type == "lvis": + return LVISEvaluator(dataset_name, cfg, True, output_folder) + if len(evaluator_list) == 0: + raise NotImplementedError( + "no Evaluator for the dataset {} with the type {}".format(dataset_name, evaluator_type) + ) + if len(evaluator_list) == 1: + return evaluator_list[0] + return DatasetEvaluators(evaluator_list) + + +def do_test(cfg, model): + results = OrderedDict() + for dataset_name in cfg.DATASETS.TEST: + data_loader = build_detection_test_loader(cfg, dataset_name) + evaluator = get_evaluator( + cfg, dataset_name, os.path.join(cfg.OUTPUT_DIR, "inference", dataset_name) + ) + results_i = inference_on_dataset(model, data_loader, evaluator) + results[dataset_name] = results_i + if comm.is_main_process(): + logger.info("Evaluation results for {} in csv format:".format(dataset_name)) + print_csv_format(results_i) + if len(results) == 1: + results = list(results.values())[0] + return results + + +def do_train(cfg, model, resume=False): + model.train() + optimizer = build_optimizer(cfg, model) + scheduler = build_lr_scheduler(cfg, optimizer) + + checkpointer = DetectionCheckpointer( + model, cfg.OUTPUT_DIR, optimizer=optimizer, scheduler=scheduler + ) + start_iter = ( + checkpointer.resume_or_load(cfg.MODEL.WEIGHTS, resume=resume).get("iteration", -1) + 1 + ) + max_iter = cfg.SOLVER.MAX_ITER + + periodic_checkpointer = PeriodicCheckpointer( + checkpointer, cfg.SOLVER.CHECKPOINT_PERIOD, max_iter=max_iter + ) + + writers = ( + [ + CommonMetricPrinter(max_iter), + JSONWriter(os.path.join(cfg.OUTPUT_DIR, "metrics.json")), + TensorboardXWriter(cfg.OUTPUT_DIR), + ] + if comm.is_main_process() + else [] + ) + + # compared to "train_net.py", we do not support accurate timing and + # precise BN here, because they are not trivial to implement + data_loader = build_detection_train_loader(cfg) + logger.info("Starting training from iteration {}".format(start_iter)) + with EventStorage(start_iter) as storage: + for data, iteration in zip(data_loader, range(start_iter, max_iter)): + iteration = iteration + 1 + storage.step() + + loss_dict = model(data) + losses = sum(loss_dict.values()) + assert torch.isfinite(losses).all(), loss_dict + + loss_dict_reduced = {k: v.item() for k, v in comm.reduce_dict(loss_dict).items()} + losses_reduced = sum(loss for loss in loss_dict_reduced.values()) + if comm.is_main_process(): + storage.put_scalars(total_loss=losses_reduced, **loss_dict_reduced) + + optimizer.zero_grad() + losses.backward() + optimizer.step() + storage.put_scalar("lr", optimizer.param_groups[0]["lr"], smoothing_hint=False) + scheduler.step() + + if ( + cfg.TEST.EVAL_PERIOD > 0 + and iteration % cfg.TEST.EVAL_PERIOD == 0 + and iteration != max_iter + ): + do_test(cfg, model) + # Compared to "train_net.py", the test results are not dumped to EventStorage + comm.synchronize() + + if iteration - start_iter > 5 and (iteration % 20 == 0 or iteration == max_iter): + for writer in writers: + writer.write() + periodic_checkpointer.step(iteration) + + +def setup(args): + """ + Create configs and perform basic setups. + """ + cfg = get_cfg() + cfg.merge_from_file(args.config_file) + cfg.merge_from_list(args.opts) + cfg.freeze() + default_setup( + cfg, args + ) # if you don't like any of the default setup, write your own setup code + return cfg + + +def main(args): + cfg = setup(args) + + model = build_model(cfg) + logger.info("Model:\n{}".format(model)) + if args.eval_only: + DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load( + cfg.MODEL.WEIGHTS, resume=args.resume + ) + return do_test(cfg, model) + + distributed = comm.get_world_size() > 1 + if distributed: + model = DistributedDataParallel( + model, device_ids=[comm.get_local_rank()], broadcast_buffers=False + ) + + do_train(cfg, model, resume=args.resume) + return do_test(cfg, model) + + +if __name__ == "__main__": + args = default_argument_parser().parse_args() + print("Command Line Args:", args) + launch( + main, + args.num_gpus, + num_machines=args.num_machines, + machine_rank=args.machine_rank, + dist_url=args.dist_url, + args=(args,), + ) diff --git a/preprocess/mhp_extension/detectron2/tools/run.sh b/preprocess/mhp_extension/detectron2/tools/run.sh new file mode 100644 index 0000000000000000000000000000000000000000..b89267337002df6eff52a323a07801fb1da6476c --- /dev/null +++ b/preprocess/mhp_extension/detectron2/tools/run.sh @@ -0,0 +1,3 @@ +python finetune_net.py \ + --config-file ../configs/Misc/parsing_finetune_cihp+vip.yaml \ + --num-gpus 8 diff --git a/preprocess/mhp_extension/detectron2/tools/train_net.py b/preprocess/mhp_extension/detectron2/tools/train_net.py new file mode 100755 index 0000000000000000000000000000000000000000..b1c0ee443c81a0a0f217682cce6d9051ef07c20e --- /dev/null +++ b/preprocess/mhp_extension/detectron2/tools/train_net.py @@ -0,0 +1,171 @@ +#!/usr/bin/env python +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +""" +Detection Training Script. + +This scripts reads a given config file and runs the training or evaluation. +It is an entry point that is made to train standard models in detectron2. + +In order to let one script support training of many models, +this script contains logic that are specific to these built-in models and therefore +may not be suitable for your own project. +For example, your research project perhaps only needs a single "evaluator". + +Therefore, we recommend you to use detectron2 as an library and take +this file as an example of how to use the library. +You may want to write your own script with your data and other customizations. +""" + +import logging +import os +from collections import OrderedDict +import torch + +import detectron2.utils.comm as comm +from detectron2.checkpoint import DetectionCheckpointer +from detectron2.config import get_cfg +from detectron2.data import MetadataCatalog +from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, hooks, launch +from detectron2.evaluation import ( + CityscapesInstanceEvaluator, + CityscapesSemSegEvaluator, + COCOEvaluator, + COCOPanopticEvaluator, + DatasetEvaluators, + LVISEvaluator, + PascalVOCDetectionEvaluator, + SemSegEvaluator, + verify_results, +) +from detectron2.modeling import GeneralizedRCNNWithTTA + + +class Trainer(DefaultTrainer): + """ + We use the "DefaultTrainer" which contains pre-defined default logic for + standard training workflow. They may not work for you, especially if you + are working on a new research project. In that case you can use the cleaner + "SimpleTrainer", or write your own training loop. You can use + "tools/plain_train_net.py" as an example. + """ + + @classmethod + def build_evaluator(cls, cfg, dataset_name, output_folder=None): + """ + Create evaluator(s) for a given dataset. + This uses the special metadata "evaluator_type" associated with each builtin dataset. + For your own dataset, you can simply create an evaluator manually in your + script and do not have to worry about the hacky if-else logic here. + """ + if output_folder is None: + output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") + evaluator_list = [] + evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type + if evaluator_type in ["sem_seg", "coco_panoptic_seg"]: + evaluator_list.append( + SemSegEvaluator( + dataset_name, + distributed=True, + num_classes=cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES, + ignore_label=cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE, + output_dir=output_folder, + ) + ) + if evaluator_type in ["coco", "coco_panoptic_seg"]: + evaluator_list.append(COCOEvaluator(dataset_name, cfg, True, output_folder)) + if evaluator_type == "coco_panoptic_seg": + evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder)) + if evaluator_type == "cityscapes_instance": + assert ( + torch.cuda.device_count() >= comm.get_rank() + ), "CityscapesEvaluator currently do not work with multiple machines." + return CityscapesInstanceEvaluator(dataset_name) + if evaluator_type == "cityscapes_sem_seg": + assert ( + torch.cuda.device_count() >= comm.get_rank() + ), "CityscapesEvaluator currently do not work with multiple machines." + return CityscapesSemSegEvaluator(dataset_name) + elif evaluator_type == "pascal_voc": + return PascalVOCDetectionEvaluator(dataset_name) + elif evaluator_type == "lvis": + return LVISEvaluator(dataset_name, cfg, True, output_folder) + if len(evaluator_list) == 0: + raise NotImplementedError( + "no Evaluator for the dataset {} with the type {}".format( + dataset_name, evaluator_type + ) + ) + elif len(evaluator_list) == 1: + return evaluator_list[0] + return DatasetEvaluators(evaluator_list) + + @classmethod + def test_with_TTA(cls, cfg, model): + logger = logging.getLogger("detectron2.trainer") + # In the end of training, run an evaluation with TTA + # Only support some R-CNN models. + logger.info("Running inference with test-time augmentation ...") + model = GeneralizedRCNNWithTTA(cfg, model) + evaluators = [ + cls.build_evaluator( + cfg, name, output_folder=os.path.join(cfg.OUTPUT_DIR, "inference_TTA") + ) + for name in cfg.DATASETS.TEST + ] + res = cls.test(cfg, model, evaluators) + res = OrderedDict({k + "_TTA": v for k, v in res.items()}) + return res + + +def setup(args): + """ + Create configs and perform basic setups. + """ + cfg = get_cfg() + cfg.merge_from_file(args.config_file) + cfg.merge_from_list(args.opts) + cfg.freeze() + default_setup(cfg, args) + return cfg + + +def main(args): + cfg = setup(args) + + if args.eval_only: + model = Trainer.build_model(cfg) + DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load( + cfg.MODEL.WEIGHTS, resume=args.resume + ) + res = Trainer.test(cfg, model) + if cfg.TEST.AUG.ENABLED: + res.update(Trainer.test_with_TTA(cfg, model)) + if comm.is_main_process(): + verify_results(cfg, res) + return res + + """ + If you'd like to do anything fancier than the standard training logic, + consider writing your own training loop (see plain_train_net.py) or + subclassing the trainer. + """ + trainer = Trainer(cfg) + trainer.resume_or_load(resume=args.resume) + if cfg.TEST.AUG.ENABLED: + trainer.register_hooks( + [hooks.EvalHook(0, lambda: trainer.test_with_TTA(cfg, trainer.model))] + ) + return trainer.train() + + +if __name__ == "__main__": + args = default_argument_parser().parse_args() + print("Command Line Args:", args) + launch( + main, + args.num_gpus, + num_machines=args.num_machines, + machine_rank=args.machine_rank, + dist_url=args.dist_url, + args=(args,), + ) diff --git a/preprocess/mhp_extension/detectron2/tools/visualize_data.py b/preprocess/mhp_extension/detectron2/tools/visualize_data.py new file mode 100755 index 0000000000000000000000000000000000000000..b143b2d250787c2880657d42c9e9cc0c80c6a348 --- /dev/null +++ b/preprocess/mhp_extension/detectron2/tools/visualize_data.py @@ -0,0 +1,93 @@ +#!/usr/bin/env python +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import argparse +import os +from itertools import chain +import cv2 +import tqdm + +from detectron2.config import get_cfg +from detectron2.data import DatasetCatalog, MetadataCatalog, build_detection_train_loader +from detectron2.data import detection_utils as utils +from detectron2.data.build import filter_images_with_few_keypoints +from detectron2.utils.logger import setup_logger +from detectron2.utils.visualizer import Visualizer + + +def setup(args): + cfg = get_cfg() + if args.config_file: + cfg.merge_from_file(args.config_file) + cfg.merge_from_list(args.opts) + cfg.freeze() + return cfg + + +def parse_args(in_args=None): + parser = argparse.ArgumentParser(description="Visualize ground-truth data") + parser.add_argument( + "--source", + choices=["annotation", "dataloader"], + required=True, + help="visualize the annotations or the data loader (with pre-processing)", + ) + parser.add_argument("--config-file", metavar="FILE", help="path to config file") + parser.add_argument("--output-dir", default="./", help="path to output directory") + parser.add_argument("--show", action="store_true", help="show output in a window") + parser.add_argument( + "opts", + help="Modify config options using the command-line", + default=None, + nargs=argparse.REMAINDER, + ) + return parser.parse_args(in_args) + + +if __name__ == "__main__": + args = parse_args() + logger = setup_logger() + logger.info("Arguments: " + str(args)) + cfg = setup(args) + + dirname = args.output_dir + os.makedirs(dirname, exist_ok=True) + metadata = MetadataCatalog.get(cfg.DATASETS.TRAIN[0]) + + def output(vis, fname): + if args.show: + print(fname) + cv2.imshow("window", vis.get_image()[:, :, ::-1]) + cv2.waitKey() + else: + filepath = os.path.join(dirname, fname) + print("Saving to {} ...".format(filepath)) + vis.save(filepath) + + scale = 2.0 if args.show else 1.0 + if args.source == "dataloader": + train_data_loader = build_detection_train_loader(cfg) + for batch in train_data_loader: + for per_image in batch: + # Pytorch tensor is in (C, H, W) format + img = per_image["image"].permute(1, 2, 0).cpu().detach().numpy() + img = utils.convert_image_to_rgb(img, cfg.INPUT.FORMAT) + + visualizer = Visualizer(img, metadata=metadata, scale=scale) + target_fields = per_image["instances"].get_fields() + labels = [metadata.thing_classes[i] for i in target_fields["gt_classes"]] + vis = visualizer.overlay_instances( + labels=labels, + boxes=target_fields.get("gt_boxes", None), + masks=target_fields.get("gt_masks", None), + keypoints=target_fields.get("gt_keypoints", None), + ) + output(vis, str(per_image["image_id"]) + ".jpg") + else: + dicts = list(chain.from_iterable([DatasetCatalog.get(k) for k in cfg.DATASETS.TRAIN])) + if cfg.MODEL.KEYPOINT_ON: + dicts = filter_images_with_few_keypoints(dicts, 1) + for dic in tqdm.tqdm(dicts): + img = utils.read_image(dic["file_name"], "RGB") + visualizer = Visualizer(img, metadata=metadata, scale=scale) + vis = visualizer.draw_dataset_dict(dic) + output(vis, os.path.basename(dic["file_name"])) diff --git a/preprocess/mhp_extension/detectron2/tools/visualize_json_results.py b/preprocess/mhp_extension/detectron2/tools/visualize_json_results.py new file mode 100755 index 0000000000000000000000000000000000000000..d11ecb90382a630d90661bc65cefc4f8bf3486cf --- /dev/null +++ b/preprocess/mhp_extension/detectron2/tools/visualize_json_results.py @@ -0,0 +1,90 @@ +#!/usr/bin/env python +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +import argparse +import json +import numpy as np +import os +from collections import defaultdict +import cv2 +import tqdm +from fvcore.common.file_io import PathManager + +from detectron2.data import DatasetCatalog, MetadataCatalog +from detectron2.structures import Boxes, BoxMode, Instances +from detectron2.utils.logger import setup_logger +from detectron2.utils.visualizer import Visualizer + + +def create_instances(predictions, image_size): + ret = Instances(image_size) + + score = np.asarray([x["score"] for x in predictions]) + chosen = (score > args.conf_threshold).nonzero()[0] + score = score[chosen] + bbox = np.asarray([predictions[i]["bbox"] for i in chosen]).reshape(-1, 4) + bbox = BoxMode.convert(bbox, BoxMode.XYWH_ABS, BoxMode.XYXY_ABS) + + labels = np.asarray([dataset_id_map(predictions[i]["category_id"]) for i in chosen]) + + ret.scores = score + ret.pred_boxes = Boxes(bbox) + ret.pred_classes = labels + + try: + ret.pred_masks = [predictions[i]["segmentation"] for i in chosen] + except KeyError: + pass + return ret + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="A script that visualizes the json predictions from COCO or LVIS dataset." + ) + parser.add_argument("--input", required=True, help="JSON file produced by the model") + parser.add_argument("--output", required=True, help="output directory") + parser.add_argument("--dataset", help="name of the dataset", default="coco_2017_val") + parser.add_argument("--conf-threshold", default=0.5, type=float, help="confidence threshold") + args = parser.parse_args() + + logger = setup_logger() + + with PathManager.open(args.input, "r") as f: + predictions = json.load(f) + + pred_by_image = defaultdict(list) + for p in predictions: + pred_by_image[p["image_id"]].append(p) + + dicts = list(DatasetCatalog.get(args.dataset)) + metadata = MetadataCatalog.get(args.dataset) + if hasattr(metadata, "thing_dataset_id_to_contiguous_id"): + + def dataset_id_map(ds_id): + return metadata.thing_dataset_id_to_contiguous_id[ds_id] + + elif "lvis" in args.dataset: + # LVIS results are in the same format as COCO results, but have a different + # mapping from dataset category id to contiguous category id in [0, #categories - 1] + def dataset_id_map(ds_id): + return ds_id - 1 + + else: + raise ValueError("Unsupported dataset: {}".format(args.dataset)) + + os.makedirs(args.output, exist_ok=True) + + for dic in tqdm.tqdm(dicts): + img = cv2.imread(dic["file_name"], cv2.IMREAD_COLOR)[:, :, ::-1] + basename = os.path.basename(dic["file_name"]) + + predictions = create_instances(pred_by_image[dic["image_id"]], img.shape[:2]) + vis = Visualizer(img, metadata) + vis_pred = vis.draw_instance_predictions(predictions).get_image() + + vis = Visualizer(img, metadata) + vis_gt = vis.draw_dataset_dict(dic).get_image() + + concat = np.concatenate((vis_pred, vis_gt), axis=1) + cv2.imwrite(os.path.join(args.output, basename), concat[:, :, ::-1]) diff --git a/preprocess/mhp_extension/global_local_parsing/global_local_datasets.py b/preprocess/mhp_extension/global_local_parsing/global_local_datasets.py new file mode 100644 index 0000000000000000000000000000000000000000..8b00594ef3302af2a30440676f96a4904ffe9077 --- /dev/null +++ b/preprocess/mhp_extension/global_local_parsing/global_local_datasets.py @@ -0,0 +1,200 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +""" +@Author : Peike Li +@Contact : peike.li@yahoo.com +@File : datasets.py +@Time : 8/4/19 3:35 PM +@Desc : +@License : This source code is licensed under the license found in the + LICENSE file in the root directory of this source tree. +""" + +import os +import numpy as np +import random +import torch +import cv2 +from torch.utils import data +from utils.transforms import get_affine_transform + + +class CropDataSet(data.Dataset): + def __init__(self, root, split_name, crop_size=[473, 473], scale_factor=0.25, + rotation_factor=30, ignore_label=255, transform=None): + self.root = root + self.aspect_ratio = crop_size[1] * 1.0 / crop_size[0] + self.crop_size = np.asarray(crop_size) + self.ignore_label = ignore_label + self.scale_factor = scale_factor + self.rotation_factor = rotation_factor + self.flip_prob = 0.5 + self.transform = transform + self.split_name = split_name + + list_path = os.path.join(self.root, self.split_name + '.txt') + train_list = [i_id.strip() for i_id in open(list_path)] + + self.train_list = train_list + self.number_samples = len(self.train_list) + + def __len__(self): + return self.number_samples + + def _box2cs(self, box): + x, y, w, h = box[:4] + return self._xywh2cs(x, y, w, h) + + def _xywh2cs(self, x, y, w, h): + center = np.zeros((2), dtype=np.float32) + center[0] = x + w * 0.5 + center[1] = y + h * 0.5 + if w > self.aspect_ratio * h: + h = w * 1.0 / self.aspect_ratio + elif w < self.aspect_ratio * h: + w = h * self.aspect_ratio + scale = np.array([w * 1.0, h * 1.0], dtype=np.float32) + return center, scale + + def __getitem__(self, index): + train_item = self.train_list[index] + + im_path = os.path.join(self.root, self.split_name + '_images', train_item + '.jpg') + parsing_anno_path = os.path.join(self.root, self.split_name + '_segmentations', train_item + '.png') + + im = cv2.imread(im_path, cv2.IMREAD_COLOR) + h, w, _ = im.shape + parsing_anno = np.zeros((h, w), dtype=np.long) + + # Get person center and scale + person_center, s = self._box2cs([0, 0, w - 1, h - 1]) + r = 0 + + if self.split_name != 'test': + # Get pose annotation + parsing_anno = cv2.imread(parsing_anno_path, cv2.IMREAD_GRAYSCALE) + sf = self.scale_factor + rf = self.rotation_factor + s = s * np.clip(np.random.randn() * sf + 1, 1 - sf, 1 + sf) + r = np.clip(np.random.randn() * rf, -rf * 2, rf * 2) if random.random() <= 0.6 else 0 + + if random.random() <= self.flip_prob: + im = im[:, ::-1, :] + parsing_anno = parsing_anno[:, ::-1] + person_center[0] = im.shape[1] - person_center[0] - 1 + right_idx = [15, 17, 19] + left_idx = [14, 16, 18] + for i in range(0, 3): + right_pos = np.where(parsing_anno == right_idx[i]) + left_pos = np.where(parsing_anno == left_idx[i]) + parsing_anno[right_pos[0], right_pos[1]] = left_idx[i] + parsing_anno[left_pos[0], left_pos[1]] = right_idx[i] + + trans = get_affine_transform(person_center, s, r, self.crop_size) + input = cv2.warpAffine( + im, + trans, + (int(self.crop_size[1]), int(self.crop_size[0])), + flags=cv2.INTER_LINEAR, + borderMode=cv2.BORDER_CONSTANT, + borderValue=(0, 0, 0)) + + if self.transform: + input = self.transform(input) + + meta = { + 'name': train_item, + 'center': person_center, + 'height': h, + 'width': w, + 'scale': s, + 'rotation': r + } + + if self.split_name == 'val' or self.split_name == 'test': + return input, meta + else: + label_parsing = cv2.warpAffine( + parsing_anno, + trans, + (int(self.crop_size[1]), int(self.crop_size[0])), + flags=cv2.INTER_NEAREST, + borderMode=cv2.BORDER_CONSTANT, + borderValue=(255)) + + label_parsing = torch.from_numpy(label_parsing) + + return input, label_parsing, meta + + +class CropDataValSet(data.Dataset): + def __init__(self, root, split_name='crop_pic', crop_size=[473, 473], transform=None, flip=False): + self.root = root + self.crop_size = crop_size + self.transform = transform + self.flip = flip + self.split_name = split_name + self.root = root + self.aspect_ratio = crop_size[1] * 1.0 / crop_size[0] + self.crop_size = np.asarray(crop_size) + + list_path = os.path.join(self.root, self.split_name + '.txt') + val_list = [i_id.strip() for i_id in open(list_path)] + + self.val_list = val_list + self.number_samples = len(self.val_list) + + def __len__(self): + return len(self.val_list) + + def _box2cs(self, box): + x, y, w, h = box[:4] + return self._xywh2cs(x, y, w, h) + + def _xywh2cs(self, x, y, w, h): + center = np.zeros((2), dtype=np.float32) + center[0] = x + w * 0.5 + center[1] = y + h * 0.5 + if w > self.aspect_ratio * h: + h = w * 1.0 / self.aspect_ratio + elif w < self.aspect_ratio * h: + w = h * self.aspect_ratio + scale = np.array([w * 1.0, h * 1.0], dtype=np.float32) + + return center, scale + + def __getitem__(self, index): + val_item = self.val_list[index] + # Load training image + im_path = os.path.join(self.root, self.split_name, val_item + '.jpg') + im = cv2.imread(im_path, cv2.IMREAD_COLOR) + h, w, _ = im.shape + # Get person center and scale + person_center, s = self._box2cs([0, 0, w - 1, h - 1]) + r = 0 + trans = get_affine_transform(person_center, s, r, self.crop_size) + input = cv2.warpAffine( + im, + trans, + (int(self.crop_size[1]), int(self.crop_size[0])), + flags=cv2.INTER_LINEAR, + borderMode=cv2.BORDER_CONSTANT, + borderValue=(0, 0, 0)) + input = self.transform(input) + flip_input = input.flip(dims=[-1]) + if self.flip: + batch_input_im = torch.stack([input, flip_input]) + else: + batch_input_im = input + + meta = { + 'name': val_item, + 'center': person_center, + 'height': h, + 'width': w, + 'scale': s, + 'rotation': r + } + + return batch_input_im, meta diff --git a/preprocess/mhp_extension/global_local_parsing/global_local_evaluate.py b/preprocess/mhp_extension/global_local_parsing/global_local_evaluate.py new file mode 100644 index 0000000000000000000000000000000000000000..288e3c8214f945d5a4f5fc6824b74b3d42e037b2 --- /dev/null +++ b/preprocess/mhp_extension/global_local_parsing/global_local_evaluate.py @@ -0,0 +1,210 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +""" +@Author : Peike Li +@Contact : peike.li@yahoo.com +@File : evaluate.py +@Time : 8/4/19 3:36 PM +@Desc : +@License : This source code is licensed under the license found in the + LICENSE file in the root directory of this source tree. +""" + +import os +import argparse +import numpy as np +import torch + +from torch.utils import data +from tqdm import tqdm +from PIL import Image as PILImage +import torchvision.transforms as transforms +import torch.backends.cudnn as cudnn + +import networks +from utils.miou import compute_mean_ioU +from utils.transforms import BGR2RGB_transform +from utils.transforms import transform_parsing, transform_logits +from mhp_extension.global_local_parsing.global_local_datasets import CropDataValSet + + +def get_arguments(): + """Parse all the arguments provided from the CLI. + + Returns: + A list of parsed arguments. + """ + parser = argparse.ArgumentParser(description="Self Correction for Human Parsing") + + # Network Structure + parser.add_argument("--arch", type=str, default='resnet101') + # Data Preference + parser.add_argument("--data-dir", type=str, default='./data/LIP') + parser.add_argument("--batch-size", type=int, default=1) + parser.add_argument("--split-name", type=str, default='crop_pic') + parser.add_argument("--input-size", type=str, default='473,473') + parser.add_argument("--num-classes", type=int, default=20) + parser.add_argument("--ignore-label", type=int, default=255) + parser.add_argument("--random-mirror", action="store_true") + parser.add_argument("--random-scale", action="store_true") + # Evaluation Preference + parser.add_argument("--log-dir", type=str, default='./log') + parser.add_argument("--model-restore", type=str, default='./log/checkpoint.pth.tar') + parser.add_argument("--gpu", type=str, default='0', help="choose gpu device.") + parser.add_argument("--save-results", action="store_true", help="whether to save the results.") + parser.add_argument("--flip", action="store_true", help="random flip during the test.") + parser.add_argument("--multi-scales", type=str, default='1', help="multiple scales during the test") + return parser.parse_args() + + +def get_palette(num_cls): + """ Returns the color map for visualizing the segmentation mask. + Args: + num_cls: Number of classes + Returns: + The color map + """ + n = num_cls + palette = [0] * (n * 3) + for j in range(0, n): + lab = j + palette[j * 3 + 0] = 0 + palette[j * 3 + 1] = 0 + palette[j * 3 + 2] = 0 + i = 0 + while lab: + palette[j * 3 + 0] |= (((lab >> 0) & 1) << (7 - i)) + palette[j * 3 + 1] |= (((lab >> 1) & 1) << (7 - i)) + palette[j * 3 + 2] |= (((lab >> 2) & 1) << (7 - i)) + i += 1 + lab >>= 3 + return palette + + +def multi_scale_testing(model, batch_input_im, crop_size=[473, 473], flip=True, multi_scales=[1]): + flipped_idx = (15, 14, 17, 16, 19, 18) + if len(batch_input_im.shape) > 4: + batch_input_im = batch_input_im.squeeze() + if len(batch_input_im.shape) == 3: + batch_input_im = batch_input_im.unsqueeze(0) + + interp = torch.nn.Upsample(size=crop_size, mode='bilinear', align_corners=True) + ms_outputs = [] + for s in multi_scales: + interp_im = torch.nn.Upsample(scale_factor=s, mode='bilinear', align_corners=True) + scaled_im = interp_im(batch_input_im) + parsing_output = model(scaled_im) + parsing_output = parsing_output[0][-1] + output = parsing_output[0] + if flip: + flipped_output = parsing_output[1] + flipped_output[14:20, :, :] = flipped_output[flipped_idx, :, :] + output += flipped_output.flip(dims=[-1]) + output *= 0.5 + output = interp(output.unsqueeze(0)) + ms_outputs.append(output[0]) + ms_fused_parsing_output = torch.stack(ms_outputs) + ms_fused_parsing_output = ms_fused_parsing_output.mean(0) + ms_fused_parsing_output = ms_fused_parsing_output.permute(1, 2, 0) # HWC + parsing = torch.argmax(ms_fused_parsing_output, dim=2) + parsing = parsing.data.cpu().numpy() + ms_fused_parsing_output = ms_fused_parsing_output.data.cpu().numpy() + return parsing, ms_fused_parsing_output + + +def main(): + """Create the model and start the evaluation process.""" + args = get_arguments() + multi_scales = [float(i) for i in args.multi_scales.split(',')] + gpus = [int(i) for i in args.gpu.split(',')] + assert len(gpus) == 1 + if not args.gpu == 'None': + os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu + + cudnn.benchmark = True + cudnn.enabled = True + + h, w = map(int, args.input_size.split(',')) + input_size = [h, w] + + model = networks.init_model(args.arch, num_classes=args.num_classes, pretrained=None) + + IMAGE_MEAN = model.mean + IMAGE_STD = model.std + INPUT_SPACE = model.input_space + print('image mean: {}'.format(IMAGE_MEAN)) + print('image std: {}'.format(IMAGE_STD)) + print('input space:{}'.format(INPUT_SPACE)) + if INPUT_SPACE == 'BGR': + print('BGR Transformation') + transform = transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize(mean=IMAGE_MEAN, + std=IMAGE_STD), + + ]) + if INPUT_SPACE == 'RGB': + print('RGB Transformation') + transform = transforms.Compose([ + transforms.ToTensor(), + BGR2RGB_transform(), + transforms.Normalize(mean=IMAGE_MEAN, + std=IMAGE_STD), + ]) + + # Data loader + lip_test_dataset = CropDataValSet(args.data_dir, args.split_name, crop_size=input_size, transform=transform, + flip=args.flip) + num_samples = len(lip_test_dataset) + print('Totoal testing sample numbers: {}'.format(num_samples)) + testloader = data.DataLoader(lip_test_dataset, batch_size=args.batch_size, shuffle=False, pin_memory=True) + + # Load model weight + state_dict = torch.load(args.model_restore) + from collections import OrderedDict + new_state_dict = OrderedDict() + for k, v in state_dict.items(): + name = k[7:] # remove `module.` + new_state_dict[name] = v + model.load_state_dict(new_state_dict) + model.cuda() + model.eval() + + sp_results_dir = os.path.join(args.log_dir, args.split_name + '_parsing') + if not os.path.exists(sp_results_dir): + os.makedirs(sp_results_dir) + + palette = get_palette(20) + parsing_preds = [] + scales = np.zeros((num_samples, 2), dtype=np.float32) + centers = np.zeros((num_samples, 2), dtype=np.int32) + with torch.no_grad(): + for idx, batch in enumerate(tqdm(testloader)): + image, meta = batch + if (len(image.shape) > 4): + image = image.squeeze() + im_name = meta['name'][0] + c = meta['center'].numpy()[0] + s = meta['scale'].numpy()[0] + w = meta['width'].numpy()[0] + h = meta['height'].numpy()[0] + scales[idx, :] = s + centers[idx, :] = c + parsing, logits = multi_scale_testing(model, image.cuda(), crop_size=input_size, flip=args.flip, + multi_scales=multi_scales) + if args.save_results: + parsing_result = transform_parsing(parsing, c, s, w, h, input_size) + parsing_result_path = os.path.join(sp_results_dir, im_name + '.png') + output_im = PILImage.fromarray(np.asarray(parsing_result, dtype=np.uint8)) + output_im.putpalette(palette) + output_im.save(parsing_result_path) + # save logits + logits_result = transform_logits(logits, c, s, w, h, input_size) + logits_result_path = os.path.join(sp_results_dir, im_name + '.npy') + np.save(logits_result_path, logits_result) + return + + +if __name__ == '__main__': + main() diff --git a/preprocess/mhp_extension/global_local_parsing/global_local_train.py b/preprocess/mhp_extension/global_local_parsing/global_local_train.py new file mode 100644 index 0000000000000000000000000000000000000000..810b1dbbbc0bbc489830903770cc4d627e16c218 --- /dev/null +++ b/preprocess/mhp_extension/global_local_parsing/global_local_train.py @@ -0,0 +1,232 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +""" +@Author : Peike Li +@Contact : peike.li@yahoo.com +@File : train.py +@Time : 8/4/19 3:36 PM +@Desc : +@License : This source code is licensed under the license found in the + LICENSE file in the root directory of this source tree. +""" + +import os +import json +import timeit +import argparse + +import torch +import torch.optim as optim +import torchvision.transforms as transforms +import torch.backends.cudnn as cudnn +from torch.utils import data + +import networks +import utils.schp as schp +from datasets.datasets import LIPDataSet +from datasets.target_generation import generate_edge_tensor +from utils.transforms import BGR2RGB_transform +from utils.criterion import CriterionAll +from utils.encoding import DataParallelModel, DataParallelCriterion +from utils.warmup_scheduler import SGDRScheduler + + +def get_arguments(): + """Parse all the arguments provided from the CLI. + Returns: + A list of parsed arguments. + """ + parser = argparse.ArgumentParser(description="Self Correction for Human Parsing") + + # Network Structure + parser.add_argument("--arch", type=str, default='resnet101') + # Data Preference + parser.add_argument("--data-dir", type=str, default='./data/LIP') + parser.add_argument("--batch-size", type=int, default=16) + parser.add_argument("--input-size", type=str, default='473,473') + parser.add_argument("--split-name", type=str, default='crop_pic') + parser.add_argument("--num-classes", type=int, default=20) + parser.add_argument("--ignore-label", type=int, default=255) + parser.add_argument("--random-mirror", action="store_true") + parser.add_argument("--random-scale", action="store_true") + # Training Strategy + parser.add_argument("--learning-rate", type=float, default=7e-3) + parser.add_argument("--momentum", type=float, default=0.9) + parser.add_argument("--weight-decay", type=float, default=5e-4) + parser.add_argument("--gpu", type=str, default='0,1,2') + parser.add_argument("--start-epoch", type=int, default=0) + parser.add_argument("--epochs", type=int, default=150) + parser.add_argument("--eval-epochs", type=int, default=10) + parser.add_argument("--imagenet-pretrain", type=str, default='./pretrain_model/resnet101-imagenet.pth') + parser.add_argument("--log-dir", type=str, default='./log') + parser.add_argument("--model-restore", type=str, default='./log/checkpoint.pth.tar') + parser.add_argument("--schp-start", type=int, default=100, help='schp start epoch') + parser.add_argument("--cycle-epochs", type=int, default=10, help='schp cyclical epoch') + parser.add_argument("--schp-restore", type=str, default='./log/schp_checkpoint.pth.tar') + parser.add_argument("--lambda-s", type=float, default=1, help='segmentation loss weight') + parser.add_argument("--lambda-e", type=float, default=1, help='edge loss weight') + parser.add_argument("--lambda-c", type=float, default=0.1, help='segmentation-edge consistency loss weight') + return parser.parse_args() + + +def main(): + args = get_arguments() + print(args) + + start_epoch = 0 + cycle_n = 0 + + if not os.path.exists(args.log_dir): + os.makedirs(args.log_dir) + with open(os.path.join(args.log_dir, 'args.json'), 'w') as opt_file: + json.dump(vars(args), opt_file) + + gpus = [int(i) for i in args.gpu.split(',')] + if not args.gpu == 'None': + os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu + + input_size = list(map(int, args.input_size.split(','))) + + cudnn.enabled = True + cudnn.benchmark = True + + # Model Initialization + AugmentCE2P = networks.init_model(args.arch, num_classes=args.num_classes, pretrained=args.imagenet_pretrain) + model = DataParallelModel(AugmentCE2P) + model.cuda() + + IMAGE_MEAN = AugmentCE2P.mean + IMAGE_STD = AugmentCE2P.std + INPUT_SPACE = AugmentCE2P.input_space + print('image mean: {}'.format(IMAGE_MEAN)) + print('image std: {}'.format(IMAGE_STD)) + print('input space:{}'.format(INPUT_SPACE)) + + restore_from = args.model_restore + if os.path.exists(restore_from): + print('Resume training from {}'.format(restore_from)) + checkpoint = torch.load(restore_from) + model.load_state_dict(checkpoint['state_dict']) + start_epoch = checkpoint['epoch'] + + SCHP_AugmentCE2P = networks.init_model(args.arch, num_classes=args.num_classes, pretrained=args.imagenet_pretrain) + schp_model = DataParallelModel(SCHP_AugmentCE2P) + schp_model.cuda() + + if os.path.exists(args.schp_restore): + print('Resuming schp checkpoint from {}'.format(args.schp_restore)) + schp_checkpoint = torch.load(args.schp_restore) + schp_model_state_dict = schp_checkpoint['state_dict'] + cycle_n = schp_checkpoint['cycle_n'] + schp_model.load_state_dict(schp_model_state_dict) + + # Loss Function + criterion = CriterionAll(lambda_1=args.lambda_s, lambda_2=args.lambda_e, lambda_3=args.lambda_c, + num_classes=args.num_classes) + criterion = DataParallelCriterion(criterion) + criterion.cuda() + + # Data Loader + if INPUT_SPACE == 'BGR': + print('BGR Transformation') + transform = transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize(mean=IMAGE_MEAN, + std=IMAGE_STD), + ]) + + elif INPUT_SPACE == 'RGB': + print('RGB Transformation') + transform = transforms.Compose([ + transforms.ToTensor(), + BGR2RGB_transform(), + transforms.Normalize(mean=IMAGE_MEAN, + std=IMAGE_STD), + ]) + + train_dataset = LIPDataSet(args.data_dir, args.split_name, crop_size=input_size, transform=transform) + train_loader = data.DataLoader(train_dataset, batch_size=args.batch_size * len(gpus), + num_workers=16, shuffle=True, pin_memory=True, drop_last=True) + print('Total training samples: {}'.format(len(train_dataset))) + + # Optimizer Initialization + optimizer = optim.SGD(model.parameters(), lr=args.learning_rate, momentum=args.momentum, + weight_decay=args.weight_decay) + + lr_scheduler = SGDRScheduler(optimizer, total_epoch=args.epochs, + eta_min=args.learning_rate / 100, warmup_epoch=10, + start_cyclical=args.schp_start, cyclical_base_lr=args.learning_rate / 2, + cyclical_epoch=args.cycle_epochs) + + total_iters = args.epochs * len(train_loader) + start = timeit.default_timer() + for epoch in range(start_epoch, args.epochs): + lr_scheduler.step(epoch=epoch) + lr = lr_scheduler.get_lr()[0] + + model.train() + for i_iter, batch in enumerate(train_loader): + i_iter += len(train_loader) * epoch + + images, labels, _ = batch + labels = labels.cuda(non_blocking=True) + + edges = generate_edge_tensor(labels) + labels = labels.type(torch.cuda.LongTensor) + edges = edges.type(torch.cuda.LongTensor) + + preds = model(images) + + # Online Self Correction Cycle with Label Refinement + if cycle_n >= 1: + with torch.no_grad(): + soft_preds = schp_model(images) + soft_parsing = [] + soft_edge = [] + for soft_pred in soft_preds: + soft_parsing.append(soft_pred[0][-1]) + soft_edge.append(soft_pred[1][-1]) + soft_preds = torch.cat(soft_parsing, dim=0) + soft_edges = torch.cat(soft_edge, dim=0) + else: + soft_preds = None + soft_edges = None + + loss = criterion(preds, [labels, edges, soft_preds, soft_edges], cycle_n) + + optimizer.zero_grad() + loss.backward() + optimizer.step() + + if i_iter % 100 == 0: + print('iter = {} of {} completed, lr = {}, loss = {}'.format(i_iter, total_iters, lr, + loss.data.cpu().numpy())) + if (epoch + 1) % (args.eval_epochs) == 0: + schp.save_checkpoint({ + 'epoch': epoch + 1, + 'state_dict': model.state_dict(), + }, False, args.log_dir, filename='checkpoint_{}.pth.tar'.format(epoch + 1)) + + # Self Correction Cycle with Model Aggregation + if (epoch + 1) >= args.schp_start and (epoch + 1 - args.schp_start) % args.cycle_epochs == 0: + print('Self-correction cycle number {}'.format(cycle_n)) + schp.moving_average(schp_model, model, 1.0 / (cycle_n + 1)) + cycle_n += 1 + schp.bn_re_estimate(train_loader, schp_model) + schp.save_schp_checkpoint({ + 'state_dict': schp_model.state_dict(), + 'cycle_n': cycle_n, + }, False, args.log_dir, filename='schp_{}_checkpoint.pth.tar'.format(cycle_n)) + + torch.cuda.empty_cache() + end = timeit.default_timer() + print('epoch = {} of {} completed using {} s'.format(epoch, args.epochs, + (end - start) / (epoch - start_epoch + 1))) + + end = timeit.default_timer() + print('Training Finished in {} seconds'.format(end - start)) + + +if __name__ == '__main__': + main() diff --git a/preprocess/mhp_extension/global_local_parsing/make_id_list.py b/preprocess/mhp_extension/global_local_parsing/make_id_list.py new file mode 100644 index 0000000000000000000000000000000000000000..311edf45e2d5a00ad85f3df96530e2f51bfd4686 --- /dev/null +++ b/preprocess/mhp_extension/global_local_parsing/make_id_list.py @@ -0,0 +1,13 @@ +import os + +DATASET = 'VIP' # DATASET: MHPv2 or CIHP or VIP +TYPE = 'crop_pic' # crop_pic or DemoDataset +IMG_DIR = '../demo/cropped_img/crop_pic' +SAVE_DIR = '../demo/cropped_img' + +if not os.path.exists(SAVE_DIR): + os.makedirs(SAVE_DIR) + +with open(os.path.join(SAVE_DIR, TYPE + '.txt'), "w") as f: + for img_name in os.listdir(IMG_DIR): + f.write(img_name[:-4] + '\n') diff --git a/preprocess/mhp_extension/logits_fusion.py b/preprocess/mhp_extension/logits_fusion.py new file mode 100644 index 0000000000000000000000000000000000000000..07a8446282d24b7811b56de5b9591da29ffcdd60 --- /dev/null +++ b/preprocess/mhp_extension/logits_fusion.py @@ -0,0 +1,307 @@ +import argparse +import cv2 +import os +import json +import numpy as np +from PIL import Image as PILImage +import joblib + + +def mask_nms(masks, bbox_scores, instances_confidence_threshold=0.5, overlap_threshold=0.7): + """ + NMS-like procedure used in Panoptic Segmentation + Remove the overlap areas of different instances in Instance Segmentation + """ + panoptic_seg = np.zeros(masks.shape[:2], dtype=np.uint8) + sorted_inds = list(range(len(bbox_scores))) + current_segment_id = 0 + segments_score = [] + + for inst_id in sorted_inds: + score = bbox_scores[inst_id] + if score < instances_confidence_threshold: + break + mask = masks[:, :, inst_id] + mask_area = mask.sum() + + if mask_area == 0: + continue + + intersect = (mask > 0) & (panoptic_seg > 0) + intersect_area = intersect.sum() + + if intersect_area * 1.0 / mask_area > overlap_threshold: + continue + + if intersect_area > 0: + mask = mask & (panoptic_seg == 0) + + current_segment_id += 1 + # panoptic_seg[np.where(mask==1)] = current_segment_id + # panoptic_seg = panoptic_seg + current_segment_id*mask + panoptic_seg = np.where(mask == 0, panoptic_seg, current_segment_id) + segments_score.append(score) + # print(np.unique(panoptic_seg)) + return panoptic_seg, segments_score + + +def extend(si, sj, instance_label, global_label, panoptic_seg_mask, class_map): + """ + """ + directions = [[-1, 0], [0, 1], [1, 0], [0, -1], + [1, 1], [1, -1], [-1, 1], [-1, -1]] + + inst_class = instance_label[si, sj] + human_class = panoptic_seg_mask[si, sj] + global_class = class_map[inst_class] + queue = [[si, sj]] + + while len(queue) != 0: + cur = queue[0] + queue.pop(0) + + for direction in directions: + ni = cur[0] + direction[0] + nj = cur[1] + direction[1] + + if ni >= 0 and nj >= 0 and \ + ni < instance_label.shape[0] and \ + nj < instance_label.shape[1] and \ + instance_label[ni, nj] == 0 and \ + global_label[ni, nj] == global_class: + instance_label[ni, nj] = inst_class + # Using refined instance label to refine human label + panoptic_seg_mask[ni, nj] = human_class + queue.append([ni, nj]) + + +def refine(instance_label, panoptic_seg_mask, global_label, class_map): + """ + Inputs: + [ instance_label ] + np.array() with shape [h, w] + [ global_label ] with shape [h, w] + np.array() + """ + for i in range(instance_label.shape[0]): + for j in range(instance_label.shape[1]): + if instance_label[i, j] != 0: + extend(i, j, instance_label, global_label, panoptic_seg_mask, class_map) + + +def get_palette(num_cls): + """ Returns the color map for visualizing the segmentation mask. + Inputs: + =num_cls= + Number of classes. + Returns: + The color map. + """ + n = num_cls + palette = [0] * (n * 3) + for j in range(0, n): + lab = j + palette[j * 3 + 0] = 0 + palette[j * 3 + 1] = 0 + palette[j * 3 + 2] = 0 + i = 0 + while lab: + palette[j * 3 + 0] |= (((lab >> 0) & 1) << (7 - i)) + palette[j * 3 + 1] |= (((lab >> 1) & 1) << (7 - i)) + palette[j * 3 + 2] |= (((lab >> 2) & 1) << (7 - i)) + i += 1 + lab >>= 3 + return palette + + +def patch2img_output(patch_dir, img_name, img_height, img_width, bbox, bbox_type, num_class): + """transform bbox patch outputs to image output""" + assert bbox_type == 'gt' or 'msrcnn' + output = np.zeros((img_height, img_width, num_class), dtype='float') + output[:, :, 0] = np.inf + count_predictions = np.zeros((img_height, img_width, num_class), dtype='int32') + for i in range(len(bbox)): # person index starts from 1 + file_path = os.path.join(patch_dir, os.path.splitext(img_name)[0] + '_' + str(i + 1) + '_' + bbox_type + '.npy') + bbox_output = np.load(file_path) + output[bbox[i][1]:bbox[i][3] + 1, bbox[i][0]:bbox[i][2] + 1, 1:] += bbox_output[:, :, 1:] + count_predictions[bbox[i][1]:bbox[i][3] + 1, bbox[i][0]:bbox[i][2] + 1, 1:] += 1 + output[bbox[i][1]:bbox[i][3] + 1, bbox[i][0]:bbox[i][2] + 1, 0] \ + = np.minimum(output[bbox[i][1]:bbox[i][3] + 1, bbox[i][0]:bbox[i][2] + 1, 0], bbox_output[:, :, 0]) + + # Caution zero dividing. + count_predictions[count_predictions == 0] = 1 + return output / count_predictions + + +def get_instance(cat_gt, panoptic_seg_mask): + """ + """ + instance_gt = np.zeros_like(cat_gt, dtype=np.uint8) + num_humans = len(np.unique(panoptic_seg_mask)) - 1 + class_map = {} + + total_part_num = 0 + for id in range(1, num_humans + 1): + human_part_label = np.where(panoptic_seg_mask == id, cat_gt, 0).astype(np.uint8) + # human_part_label = (np.where(panoptic_seg_mask==id) * cat_gt).astype(np.uint8) + part_classes = np.unique(human_part_label) + + exceed = False + for part_id in part_classes: + if part_id == 0: # background + continue + total_part_num += 1 + + if total_part_num > 255: + print("total_part_num exceed, return current instance map: {}".format(total_part_num)) + exceed = True + break + class_map[total_part_num] = part_id + instance_gt[np.where(human_part_label == part_id)] = total_part_num + if exceed: + break + + # Make instance id continous. + ori_cur_labels = np.unique(instance_gt) + total_num_label = len(ori_cur_labels) + if instance_gt.max() + 1 != total_num_label: + for label in range(1, total_num_label): + instance_gt[instance_gt == ori_cur_labels[label]] = label + + final_class_map = {} + for label in range(1, total_num_label): + if label >= 1: + final_class_map[label] = class_map[ori_cur_labels[label]] + + return instance_gt, final_class_map + + +def compute_confidence(im_name, feature_map, class_map, + instance_label, output_dir, + panoptic_seg_mask, seg_score_list): + """ + """ + conf_file = open(os.path.join(output_dir, os.path.splitext(im_name)[0] + '.txt'), 'w') + + weighted_map = np.zeros_like(feature_map[:, :, 0]) + for index, score in enumerate(seg_score_list): + weighted_map += (panoptic_seg_mask == index + 1) * score + + for label in class_map.keys(): + cls = class_map[label] + confidence = feature_map[:, :, cls].reshape(-1)[np.where(instance_label.reshape(-1) == label)] + confidence = (weighted_map * feature_map[:, :, cls].copy()).reshape(-1)[ + np.where(instance_label.reshape(-1) == label)] + + confidence = confidence.sum() / len(confidence) + conf_file.write('{} {}\n'.format(cls, confidence)) + + conf_file.close() + + +def result_saving(fused_output, img_name, img_height, img_width, output_dir, mask_output_path, bbox_score, msrcnn_bbox): + if not os.path.exists(output_dir): + os.makedirs(output_dir) + + global_root = os.path.join(output_dir, 'global_parsing') + instance_root = os.path.join(output_dir, 'instance_parsing') + tag_dir = os.path.join(output_dir, 'global_tag') + + if not os.path.exists(global_root): + os.makedirs(global_root) + if not os.path.exists(instance_root): + os.makedirs(instance_root) + if not os.path.exists(tag_dir): + os.makedirs(tag_dir) + + # For visualizing indexed png image. + palette = get_palette(256) + + fused_output = cv2.resize(fused_output, dsize=(img_width, img_height), interpolation=cv2.INTER_LINEAR) + seg_pred = np.asarray(np.argmax(fused_output, axis=2), dtype=np.uint8) + masks = np.load(mask_output_path) + masks[np.where(seg_pred == 0)] = 0 + + panoptic_seg_mask = masks + seg_score_list = bbox_score + + instance_pred, class_map = get_instance(seg_pred, panoptic_seg_mask) + refine(instance_pred, panoptic_seg_mask, seg_pred, class_map) + + compute_confidence(img_name, fused_output, class_map, instance_pred, instance_root, + panoptic_seg_mask, seg_score_list) + + ins_seg_results = open(os.path.join(tag_dir, os.path.splitext(img_name)[0] + '.txt'), "a") + keep_human_id_list = list(np.unique(panoptic_seg_mask)) + if 0 in keep_human_id_list: + keep_human_id_list.remove(0) + for i in keep_human_id_list: + ins_seg_results.write('{:.6f} {} {} {} {}\n'.format(seg_score_list[i - 1], + int(msrcnn_bbox[i - 1][1]), int(msrcnn_bbox[i - 1][0]), + int(msrcnn_bbox[i - 1][3]), int(msrcnn_bbox[i - 1][2]))) + ins_seg_results.close() + + output_im_global = PILImage.fromarray(seg_pred) + output_im_instance = PILImage.fromarray(instance_pred) + output_im_tag = PILImage.fromarray(panoptic_seg_mask) + output_im_global.putpalette(palette) + output_im_instance.putpalette(palette) + output_im_tag.putpalette(palette) + + output_im_global.save(os.path.join(global_root, os.path.splitext(img_name)[0] + '.png')) + output_im_instance.save(os.path.join(instance_root, os.path.splitext(img_name)[0] + '.png')) + output_im_tag.save(os.path.join(tag_dir, os.path.splitext(img_name)[0] + '.png')) + + +def multi_process(a, args): + img_name = a['im_name'] + img_height = a['img_height'] + img_width = a['img_width'] + msrcnn_bbox = a['person_bbox'] + bbox_score = a['person_bbox_score'] + + ######### loading outputs from gloabl and local models ######### + global_output = np.load(os.path.join(args.global_output_dir, os.path.splitext(img_name)[0] + '.npy')) + + msrcnn_output = patch2img_output(args.msrcnn_output_dir, img_name, img_height, img_width, msrcnn_bbox, + bbox_type='msrcnn', num_class=20) + + gt_output = patch2img_output(args.gt_output_dir, img_name, img_height, img_width, msrcnn_bbox, bbox_type='msrcnn', + num_class=20) + + #### global and local branch logits fusion ##### +# fused_output = global_output + msrcnn_output + gt_output + fused_output = global_output + gt_output + + + mask_output_path = os.path.join(args.mask_output_dir, os.path.splitext(img_name)[0] + '_mask.npy') + result_saving(fused_output, img_name, img_height, img_width, args.save_dir, mask_output_path, bbox_score, msrcnn_bbox) + return + + +def main(args): + json_file = open(args.test_json_path) + anno = json.load(json_file)['root'] + + results = joblib.Parallel(n_jobs=24, verbose=10, pre_dispatch="all")( + [joblib.delayed(multi_process)(a, args) for i, a in enumerate(anno)] + ) + + +def get_arguments(): + parser = argparse.ArgumentParser(description="obtain final prediction by logits fusion") + parser.add_argument("--test_json_path", type=str, default='./data/CIHP/cascade_152_finetune/test.json') + parser.add_argument("--global_output_dir", type=str, + default='./data/CIHP/global/global_result-cihp-resnet101/global_output') +# parser.add_argument("--msrcnn_output_dir", type=str, +# default='./data/CIHP/cascade_152__finetune/msrcnn_result-cihp-resnet101/msrcnn_output') + parser.add_argument("--gt_output_dir", type=str, + default='./data/CIHP/cascade_152__finetune/gt_result-cihp-resnet101/gt_output') + parser.add_argument("--mask_output_dir", type=str, default='./data/CIHP/cascade_152_finetune/mask') + parser.add_argument("--save_dir", type=str, default='./data/CIHP/fusion_results/cihp-msrcnn_finetune') + return parser.parse_args() + + +if __name__ == '__main__': + args = get_arguments() + main(args) diff --git a/preprocess/mhp_extension/make_crop_and_mask_w_mask_nms.py b/preprocess/mhp_extension/make_crop_and_mask_w_mask_nms.py new file mode 100644 index 0000000000000000000000000000000000000000..1efc5ae86f81db7dcdae1d22db771d2a8e8d3ccf --- /dev/null +++ b/preprocess/mhp_extension/make_crop_and_mask_w_mask_nms.py @@ -0,0 +1,134 @@ +import numpy as np +import cv2, torch +import os +import json +import argparse +import pycocotools.mask as mask_util +from tqdm import tqdm + + +def bbox_expand(img_height, img_width, bbox, exp_ratio): + x_min, y_min, x_max, y_max = bbox[:] + exp_x = (x_max - x_min) * ((exp_ratio - 1) / 2) + exp_y = (y_max - y_min) * ((exp_ratio - 1) / 2) + new_x_min = 0 if x_min - exp_x < 0 else np.round(x_min - exp_x) + new_y_min = 0 if y_min - exp_y < 0 else np.round(y_min - exp_y) + new_x_max = img_width - 1 if x_max + exp_x > img_width - 1 else np.round(x_max + exp_x) + new_y_max = img_height - 1 if y_max + exp_y > img_height - 1 else np.round(y_max + exp_y) + return int(new_x_min), int(new_y_min), int(new_x_max), int(new_y_max) + + +def make_crop_and_mask(img_info, pred, file_list, crop_save_dir, mask_save_dir, args): + img_name = img_info['file_name'] + img_id = img_info['id'] - 1 # img_info['id'] start form 1 + img_w = img_info['width'] + img_h = img_info['height'] + + img = cv2.imread(os.path.join(args.img_dir, img_name)) + + exp_bbox = [] + ori_bbox = [] + bbox_name_list = [] + bbox_score_list = [] + person_idx = 0 + + panoptic_seg = np.zeros((img_h, img_w), dtype=np.uint8) + assert len(pred[img_id]['instances']) > 0, 'image without instance prediction' + + for instance in pred[img_id]['instances']: + score = instance['score'] + if score < args.conf_thres: + break + + mask = mask_util.decode(instance['segmentation']) + mask_area = mask.sum() + + if mask_area == 0: # if mask_area < img_w*img_h/1000: + continue + + intersect = (mask > 0) & (panoptic_seg > 0) + intersect_area = intersect.sum() + + if intersect_area * 1.0 / mask_area > args.overlap_threshold: # todo add args + continue + + if intersect_area > 0: + mask = mask & (panoptic_seg == 0) + + person_idx += 1 + panoptic_seg = np.where(mask == 0, panoptic_seg, person_idx) + + bbox_score_list.append(score) + + ins_bbox = instance['bbox'] # [x,y,w,h] format + x_min, y_min, box_w, box_h = ins_bbox + x_max, y_max = x_min + box_w, y_min + box_h + exp_x_min, exp_y_min, exp_x_max, exp_y_max = bbox_expand(img_h, img_w, [x_min, y_min, x_max, y_max], + args.exp_ratio) + crop_img = img[exp_y_min:exp_y_max + 1, exp_x_min:exp_x_max + 1, :] + exp_bbox.append([exp_x_min, exp_y_min, exp_x_max, exp_y_max]) + ori_bbox.append([x_min, y_min, x_max, y_max]) + bbox_name = os.path.splitext(img_name)[0] + '_' + str(person_idx) + '_msrcnn.jpg' + bbox_name_list.append(bbox_name) + + cv2.imwrite(os.path.join(crop_save_dir, bbox_name), crop_img) + + assert person_idx > 0, 'image without instance' + mask_name = os.path.splitext(img_name)[0] + '_mask.npy' + np.save(os.path.join(mask_save_dir, mask_name), panoptic_seg) + + ############## json writing ################## + item = {} + item['dataset'] = 'CIHP' + item['im_name'] = img_name + item['img_height'] = img_h + item['img_width'] = img_w + item['center'] = [img_h / 2, img_w / 2] + item['person_num'] = person_idx + item['person_bbox'] = exp_bbox + item['real_person_bbox'] = ori_bbox + item['person_bbox_score'] = bbox_score_list + item['bbox_name'] = bbox_name_list + item['mask_name'] = mask_name + file_list.append(item) + json_file = {'root': file_list} + return json_file, file_list + + +def get_arguments(): + parser = argparse.ArgumentParser(description="crop person val/test demo for inference") + parser.add_argument("--exp_ratio", type=float, default=1.2) + parser.add_argument("--overlap_threshold", type=float, default=0.5) + parser.add_argument("--conf_thres", type=float, default=0.5) + parser.add_argument("--img_dir", type=str, + default='/data03/v_xuyunqiu/data/instance-level_human_parsing/Testing/Images') + parser.add_argument("--save_dir", type=str, + default='/data03/v_xuyunqiu/Projects/experiment_data/testing/resnest_200_TTA_mask_nms_all_data') + parser.add_argument("--img_list", type=str, + default='/data03/v_xuyunqiu/Projects/pycococreator/annotations/CIHP_test.json') + parser.add_argument("--det_res", type=str, + default='/data02/v_xuyunqiu/detectron2-ResNeSt/tools/output_cihp_inference_resnest/inference_TTA/instances_predictions.pth') + return parser.parse_args() + + +def main(args): + img_info_list = json.load(open(args.img_list, encoding='UTF-8')) + pred = torch.load(args.det_res) + + crop_save_dir = os.path.join(args.save_dir, 'crop_pic') + if not os.path.exists(crop_save_dir): + os.makedirs(crop_save_dir) + mask_save_dir = os.path.join(args.save_dir, 'crop_mask') + if not os.path.exists(mask_save_dir): + os.makedirs(mask_save_dir) + + file_list = [] + for img_info in tqdm(img_info_list['images']): + json_file, file_list = make_crop_and_mask(img_info, pred, file_list, crop_save_dir, mask_save_dir, args) + with open(os.path.join(args.save_dir, 'crop.json'), 'w') as f: + json.dump(json_file, f, indent=2) + + +if __name__ == '__main__': + args = get_arguments() + main(args) diff --git a/preprocess/mhp_extension/scripts/make_coco_style_annotation.sh b/preprocess/mhp_extension/scripts/make_coco_style_annotation.sh new file mode 100644 index 0000000000000000000000000000000000000000..37a1e7d4944c318bc275a58dceeaf987bb6517dc --- /dev/null +++ b/preprocess/mhp_extension/scripts/make_coco_style_annotation.sh @@ -0,0 +1,14 @@ +python ./coco_style_annotation_creator/human_to_coco.py \ + --dataset 'CIHP' \ + --json_save_dir './data/CIHP/annotations' \ + --train_img_dir './data/CIHP/Training/Images' \ + --train_anno_dir './data/CIHP/Training/Human_ids' \ + --val_img_dir './data/CIHP/Validation/Images' \ + --val_anno_dir './data/CIHP/Validation/Human_ids' + + +python ./coco_style_annotation_creator/test_human2coco_format.py \ + --dataset 'CIHP' \ + --json_save_dir './data/CIHP/annotations' \ + --test_img_dir './data/CIHP/Testing/Images' + diff --git a/preprocess/mhp_extension/scripts/make_crop.sh b/preprocess/mhp_extension/scripts/make_crop.sh new file mode 100644 index 0000000000000000000000000000000000000000..604a433c0494b1ddba9223cd95bf6de2b4b150b0 --- /dev/null +++ b/preprocess/mhp_extension/scripts/make_crop.sh @@ -0,0 +1,6 @@ +python make_crop_and_mask_w_mask_nms.py \ + --img_dir './data/CIHP/Testing/Images' \ + --save_dir './data/CIHP/' \ + --img_list './data/CIHP/annotations/CIHP_val.json' \ + --det_res './data/CIHP/detectron2_prediction/inference/instances_predictions.pth' + diff --git a/preprocess/mhp_extension/scripts/parsing_fusion.sh b/preprocess/mhp_extension/scripts/parsing_fusion.sh new file mode 100644 index 0000000000000000000000000000000000000000..107bcf6b0532a7f807c76cd706e48aab767a5da3 --- /dev/null +++ b/preprocess/mhp_extension/scripts/parsing_fusion.sh @@ -0,0 +1,6 @@ +python logits_fusion.py \ +--test_json_path ./data/CIHP/crop.json \ +--global_output_dir ./data/CIHP/global_pic_parsing \ +--msrcnn_output_dir ./data/CIHP/crop_pic_parsing \ +--gt_output_dir ./data/CIHP/crop_pic_parsing \ +--save_dir ./data/CIHP/mhp_fusion_parsing diff --git a/preprocess/modules/__init__.py b/preprocess/modules/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8a098dee5911f3613d320d23db37bc401cf57fa4 --- /dev/null +++ b/preprocess/modules/__init__.py @@ -0,0 +1,5 @@ +from .bn import ABN, InPlaceABN, InPlaceABNSync +from .functions import ACT_RELU, ACT_LEAKY_RELU, ACT_ELU, ACT_NONE +from .misc import GlobalAvgPool2d, SingleGPU +from .residual import IdentityResidualBlock +from .dense import DenseModule diff --git a/preprocess/modules/bn.py b/preprocess/modules/bn.py new file mode 100644 index 0000000000000000000000000000000000000000..a794698867e89140a030d550d832e6fa12561c8b --- /dev/null +++ b/preprocess/modules/bn.py @@ -0,0 +1,132 @@ +import torch +import torch.nn as nn +import torch.nn.functional as functional + +try: + from queue import Queue +except ImportError: + from Queue import Queue + +from .functions import * + + +class ABN(nn.Module): + """Activated Batch Normalization + + This gathers a `BatchNorm2d` and an activation function in a single module + """ + + def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, activation="leaky_relu", slope=0.01): + """Creates an Activated Batch Normalization module + + Parameters + ---------- + num_features : int + Number of feature channels in the input and output. + eps : float + Small constant to prevent numerical issues. + momentum : float + Momentum factor applied to compute running statistics as. + affine : bool + If `True` apply learned scale and shift transformation after normalization. + activation : str + Name of the activation functions, one of: `leaky_relu`, `elu` or `none`. + slope : float + Negative slope for the `leaky_relu` activation. + """ + super(ABN, self).__init__() + self.num_features = num_features + self.affine = affine + self.eps = eps + self.momentum = momentum + self.activation = activation + self.slope = slope + if self.affine: + self.weight = nn.Parameter(torch.ones(num_features)) + self.bias = nn.Parameter(torch.zeros(num_features)) + else: + self.register_parameter('weight', None) + self.register_parameter('bias', None) + self.register_buffer('running_mean', torch.zeros(num_features)) + self.register_buffer('running_var', torch.ones(num_features)) + self.reset_parameters() + + def reset_parameters(self): + nn.init.constant_(self.running_mean, 0) + nn.init.constant_(self.running_var, 1) + if self.affine: + nn.init.constant_(self.weight, 1) + nn.init.constant_(self.bias, 0) + + def forward(self, x): + x = functional.batch_norm(x, self.running_mean, self.running_var, self.weight, self.bias, + self.training, self.momentum, self.eps) + + if self.activation == ACT_RELU: + return functional.relu(x, inplace=True) + elif self.activation == ACT_LEAKY_RELU: + return functional.leaky_relu(x, negative_slope=self.slope, inplace=True) + elif self.activation == ACT_ELU: + return functional.elu(x, inplace=True) + else: + return x + + def __repr__(self): + rep = '{name}({num_features}, eps={eps}, momentum={momentum},' \ + ' affine={affine}, activation={activation}' + if self.activation == "leaky_relu": + rep += ', slope={slope})' + else: + rep += ')' + return rep.format(name=self.__class__.__name__, **self.__dict__) + + +class InPlaceABN(ABN): + """InPlace Activated Batch Normalization""" + + def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, activation="leaky_relu", slope=0.01): + """Creates an InPlace Activated Batch Normalization module + + Parameters + ---------- + num_features : int + Number of feature channels in the input and output. + eps : float + Small constant to prevent numerical issues. + momentum : float + Momentum factor applied to compute running statistics as. + affine : bool + If `True` apply learned scale and shift transformation after normalization. + activation : str + Name of the activation functions, one of: `leaky_relu`, `elu` or `none`. + slope : float + Negative slope for the `leaky_relu` activation. + """ + super(InPlaceABN, self).__init__(num_features, eps, momentum, affine, activation, slope) + + def forward(self, x): + x, _, _ = inplace_abn(x, self.weight, self.bias, self.running_mean, self.running_var, + self.training, self.momentum, self.eps, self.activation, self.slope) + return x + + +class InPlaceABNSync(ABN): + """InPlace Activated Batch Normalization with cross-GPU synchronization + This assumes that it will be replicated across GPUs using the same mechanism as in `nn.DistributedDataParallel`. + """ + + def forward(self, x): + x, _, _ = inplace_abn_sync(x, self.weight, self.bias, self.running_mean, self.running_var, + self.training, self.momentum, self.eps, self.activation, self.slope) + return x + + def __repr__(self): + rep = '{name}({num_features}, eps={eps}, momentum={momentum},' \ + ' affine={affine}, activation={activation}' + if self.activation == "leaky_relu": + rep += ', slope={slope})' + else: + rep += ')' + return rep.format(name=self.__class__.__name__, **self.__dict__) + + diff --git a/preprocess/modules/deeplab.py b/preprocess/modules/deeplab.py new file mode 100644 index 0000000000000000000000000000000000000000..fd25b78369b27ef02c183a0b17b9bf8354c5f7c3 --- /dev/null +++ b/preprocess/modules/deeplab.py @@ -0,0 +1,84 @@ +import torch +import torch.nn as nn +import torch.nn.functional as functional + +from models._util import try_index +from .bn import ABN + + +class DeeplabV3(nn.Module): + def __init__(self, + in_channels, + out_channels, + hidden_channels=256, + dilations=(12, 24, 36), + norm_act=ABN, + pooling_size=None): + super(DeeplabV3, self).__init__() + self.pooling_size = pooling_size + + self.map_convs = nn.ModuleList([ + nn.Conv2d(in_channels, hidden_channels, 1, bias=False), + nn.Conv2d(in_channels, hidden_channels, 3, bias=False, dilation=dilations[0], padding=dilations[0]), + nn.Conv2d(in_channels, hidden_channels, 3, bias=False, dilation=dilations[1], padding=dilations[1]), + nn.Conv2d(in_channels, hidden_channels, 3, bias=False, dilation=dilations[2], padding=dilations[2]) + ]) + self.map_bn = norm_act(hidden_channels * 4) + + self.global_pooling_conv = nn.Conv2d(in_channels, hidden_channels, 1, bias=False) + self.global_pooling_bn = norm_act(hidden_channels) + + self.red_conv = nn.Conv2d(hidden_channels * 4, out_channels, 1, bias=False) + self.pool_red_conv = nn.Conv2d(hidden_channels, out_channels, 1, bias=False) + self.red_bn = norm_act(out_channels) + + self.reset_parameters(self.map_bn.activation, self.map_bn.slope) + + def reset_parameters(self, activation, slope): + gain = nn.init.calculate_gain(activation, slope) + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.xavier_normal_(m.weight.data, gain) + if hasattr(m, "bias") and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, ABN): + if hasattr(m, "weight") and m.weight is not None: + nn.init.constant_(m.weight, 1) + if hasattr(m, "bias") and m.bias is not None: + nn.init.constant_(m.bias, 0) + + def forward(self, x): + # Map convolutions + out = torch.cat([m(x) for m in self.map_convs], dim=1) + out = self.map_bn(out) + out = self.red_conv(out) + + # Global pooling + pool = self._global_pooling(x) + pool = self.global_pooling_conv(pool) + pool = self.global_pooling_bn(pool) + pool = self.pool_red_conv(pool) + if self.training or self.pooling_size is None: + pool = pool.repeat(1, 1, x.size(2), x.size(3)) + + out += pool + out = self.red_bn(out) + return out + + def _global_pooling(self, x): + if self.training or self.pooling_size is None: + pool = x.view(x.size(0), x.size(1), -1).mean(dim=-1) + pool = pool.view(x.size(0), x.size(1), 1, 1) + else: + pooling_size = (min(try_index(self.pooling_size, 0), x.shape[2]), + min(try_index(self.pooling_size, 1), x.shape[3])) + padding = ( + (pooling_size[1] - 1) // 2, + (pooling_size[1] - 1) // 2 if pooling_size[1] % 2 == 1 else (pooling_size[1] - 1) // 2 + 1, + (pooling_size[0] - 1) // 2, + (pooling_size[0] - 1) // 2 if pooling_size[0] % 2 == 1 else (pooling_size[0] - 1) // 2 + 1 + ) + + pool = functional.avg_pool2d(x, pooling_size, stride=1) + pool = functional.pad(pool, pad=padding, mode="replicate") + return pool diff --git a/preprocess/modules/dense.py b/preprocess/modules/dense.py new file mode 100644 index 0000000000000000000000000000000000000000..9638d6e86d2ae838550fefa9002a984af52e6cc8 --- /dev/null +++ b/preprocess/modules/dense.py @@ -0,0 +1,42 @@ +from collections import OrderedDict + +import torch +import torch.nn as nn + +from .bn import ABN + + +class DenseModule(nn.Module): + def __init__(self, in_channels, growth, layers, bottleneck_factor=4, norm_act=ABN, dilation=1): + super(DenseModule, self).__init__() + self.in_channels = in_channels + self.growth = growth + self.layers = layers + + self.convs1 = nn.ModuleList() + self.convs3 = nn.ModuleList() + for i in range(self.layers): + self.convs1.append(nn.Sequential(OrderedDict([ + ("bn", norm_act(in_channels)), + ("conv", nn.Conv2d(in_channels, self.growth * bottleneck_factor, 1, bias=False)) + ]))) + self.convs3.append(nn.Sequential(OrderedDict([ + ("bn", norm_act(self.growth * bottleneck_factor)), + ("conv", nn.Conv2d(self.growth * bottleneck_factor, self.growth, 3, padding=dilation, bias=False, + dilation=dilation)) + ]))) + in_channels += self.growth + + @property + def out_channels(self): + return self.in_channels + self.growth * self.layers + + def forward(self, x): + inputs = [x] + for i in range(self.layers): + x = torch.cat(inputs, dim=1) + x = self.convs1[i](x) + x = self.convs3[i](x) + inputs += [x] + + return torch.cat(inputs, dim=1) diff --git a/preprocess/modules/functions.py b/preprocess/modules/functions.py new file mode 100644 index 0000000000000000000000000000000000000000..aea9729c0e6944c07bbd63368956e63ab4c76c86 --- /dev/null +++ b/preprocess/modules/functions.py @@ -0,0 +1,244 @@ +from os import path +import torch +import torch.distributed as dist +import torch.autograd as autograd +import torch.cuda.comm as comm +from torch.autograd.function import once_differentiable +from torch.utils.cpp_extension import load + +_src_path = path.join(path.dirname(path.abspath(__file__)), "src") +_backend = load(name="inplace_abn", + extra_cflags=["-O3"], + sources=[path.join(_src_path, f) for f in [ + "inplace_abn.cpp", + "inplace_abn_cpu.cpp", + "inplace_abn_cuda.cu", + "inplace_abn_cuda_half.cu" + ]], + extra_cuda_cflags=["--expt-extended-lambda"]) + +# Activation names +ACT_RELU = "relu" +ACT_LEAKY_RELU = "leaky_relu" +ACT_ELU = "elu" +ACT_NONE = "none" + + +def _check(fn, *args, **kwargs): + success = fn(*args, **kwargs) + if not success: + raise RuntimeError("CUDA Error encountered in {}".format(fn)) + + +def _broadcast_shape(x): + out_size = [] + for i, s in enumerate(x.size()): + if i != 1: + out_size.append(1) + else: + out_size.append(s) + return out_size + + +def _reduce(x): + if len(x.size()) == 2: + return x.sum(dim=0) + else: + n, c = x.size()[0:2] + return x.contiguous().view((n, c, -1)).sum(2).sum(0) + + +def _count_samples(x): + count = 1 + for i, s in enumerate(x.size()): + if i != 1: + count *= s + return count + + +def _act_forward(ctx, x): + if ctx.activation == ACT_LEAKY_RELU: + _backend.leaky_relu_forward(x, ctx.slope) + elif ctx.activation == ACT_ELU: + _backend.elu_forward(x) + elif ctx.activation == ACT_NONE: + pass + + +def _act_backward(ctx, x, dx): + if ctx.activation == ACT_LEAKY_RELU: + _backend.leaky_relu_backward(x, dx, ctx.slope) + elif ctx.activation == ACT_ELU: + _backend.elu_backward(x, dx) + elif ctx.activation == ACT_NONE: + pass + + +class InPlaceABN(autograd.Function): + @staticmethod + def forward(ctx, x, weight, bias, running_mean, running_var, + training=True, momentum=0.1, eps=1e-05, activation=ACT_LEAKY_RELU, slope=0.01): + # Save context + ctx.training = training + ctx.momentum = momentum + ctx.eps = eps + ctx.activation = activation + ctx.slope = slope + ctx.affine = weight is not None and bias is not None + + # Prepare inputs + count = _count_samples(x) + x = x.contiguous() + weight = weight.contiguous() if ctx.affine else x.new_empty(0) + bias = bias.contiguous() if ctx.affine else x.new_empty(0) + + if ctx.training: + mean, var = _backend.mean_var(x) + + # Update running stats + running_mean.mul_((1 - ctx.momentum)).add_(ctx.momentum * mean) + running_var.mul_((1 - ctx.momentum)).add_(ctx.momentum * var * count / (count - 1)) + + # Mark in-place modified tensors + ctx.mark_dirty(x, running_mean, running_var) + else: + mean, var = running_mean.contiguous(), running_var.contiguous() + ctx.mark_dirty(x) + + # BN forward + activation + _backend.forward(x, mean, var, weight, bias, ctx.affine, ctx.eps) + _act_forward(ctx, x) + + # Output + ctx.var = var + ctx.save_for_backward(x, var, weight, bias) + ctx.mark_non_differentiable(running_mean, running_var) + return x, running_mean, running_var + + @staticmethod + @once_differentiable + def backward(ctx, dz, _drunning_mean, _drunning_var): + z, var, weight, bias = ctx.saved_tensors + dz = dz.contiguous() + + # Undo activation + _act_backward(ctx, z, dz) + + if ctx.training: + edz, eydz = _backend.edz_eydz(z, dz, weight, bias, ctx.affine, ctx.eps) + else: + # TODO: implement simplified CUDA backward for inference mode + edz = dz.new_zeros(dz.size(1)) + eydz = dz.new_zeros(dz.size(1)) + + dx = _backend.backward(z, dz, var, weight, bias, edz, eydz, ctx.affine, ctx.eps) + # dweight = eydz * weight.sign() if ctx.affine else None + dweight = eydz if ctx.affine else None + if dweight is not None: + dweight[weight < 0] *= -1 + dbias = edz if ctx.affine else None + + return dx, dweight, dbias, None, None, None, None, None, None, None + + +class InPlaceABNSync(autograd.Function): + @classmethod + def forward(cls, ctx, x, weight, bias, running_mean, running_var, + training=True, momentum=0.1, eps=1e-05, activation=ACT_LEAKY_RELU, slope=0.01, equal_batches=True): + # Save context + ctx.training = training + ctx.momentum = momentum + ctx.eps = eps + ctx.activation = activation + ctx.slope = slope + ctx.affine = weight is not None and bias is not None + + # Prepare inputs + ctx.world_size = dist.get_world_size() if dist.is_initialized() else 1 + + # count = _count_samples(x) + batch_size = x.new_tensor([x.shape[0]], dtype=torch.long) + + x = x.contiguous() + weight = weight.contiguous() if ctx.affine else x.new_empty(0) + bias = bias.contiguous() if ctx.affine else x.new_empty(0) + + if ctx.training: + mean, var = _backend.mean_var(x) + if ctx.world_size > 1: + # get global batch size + if equal_batches: + batch_size *= ctx.world_size + else: + dist.all_reduce(batch_size, dist.ReduceOp.SUM) + + ctx.factor = x.shape[0] / float(batch_size.item()) + + mean_all = mean.clone() * ctx.factor + dist.all_reduce(mean_all, dist.ReduceOp.SUM) + + var_all = (var + (mean - mean_all) ** 2) * ctx.factor + dist.all_reduce(var_all, dist.ReduceOp.SUM) + + mean = mean_all + var = var_all + + # Update running stats + running_mean.mul_((1 - ctx.momentum)).add_(ctx.momentum * mean) + count = batch_size.item() * x.view(x.shape[0], x.shape[1], -1).shape[-1] + running_var.mul_((1 - ctx.momentum)).add_(ctx.momentum * var * (float(count) / (count - 1))) + + # Mark in-place modified tensors + ctx.mark_dirty(x, running_mean, running_var) + else: + mean, var = running_mean.contiguous(), running_var.contiguous() + ctx.mark_dirty(x) + + # BN forward + activation + _backend.forward(x, mean, var, weight, bias, ctx.affine, ctx.eps) + _act_forward(ctx, x) + + # Output + ctx.var = var + ctx.save_for_backward(x, var, weight, bias) + ctx.mark_non_differentiable(running_mean, running_var) + return x, running_mean, running_var + + @staticmethod + @once_differentiable + def backward(ctx, dz, _drunning_mean, _drunning_var): + z, var, weight, bias = ctx.saved_tensors + dz = dz.contiguous() + + # Undo activation + _act_backward(ctx, z, dz) + + if ctx.training: + edz, eydz = _backend.edz_eydz(z, dz, weight, bias, ctx.affine, ctx.eps) + edz_local = edz.clone() + eydz_local = eydz.clone() + + if ctx.world_size > 1: + edz *= ctx.factor + dist.all_reduce(edz, dist.ReduceOp.SUM) + + eydz *= ctx.factor + dist.all_reduce(eydz, dist.ReduceOp.SUM) + else: + edz_local = edz = dz.new_zeros(dz.size(1)) + eydz_local = eydz = dz.new_zeros(dz.size(1)) + + dx = _backend.backward(z, dz, var, weight, bias, edz, eydz, ctx.affine, ctx.eps) + # dweight = eydz_local * weight.sign() if ctx.affine else None + dweight = eydz_local if ctx.affine else None + if dweight is not None: + dweight[weight < 0] *= -1 + dbias = edz_local if ctx.affine else None + + return dx, dweight, dbias, None, None, None, None, None, None, None + + +inplace_abn = InPlaceABN.apply +inplace_abn_sync = InPlaceABNSync.apply + +__all__ = ["inplace_abn", "inplace_abn_sync", "ACT_RELU", "ACT_LEAKY_RELU", "ACT_ELU", "ACT_NONE"] diff --git a/preprocess/modules/misc.py b/preprocess/modules/misc.py new file mode 100644 index 0000000000000000000000000000000000000000..3c50b69b38c950801baacba8b3684ffd23aef08b --- /dev/null +++ b/preprocess/modules/misc.py @@ -0,0 +1,21 @@ +import torch.nn as nn +import torch +import torch.distributed as dist + +class GlobalAvgPool2d(nn.Module): + def __init__(self): + """Global average pooling over the input's spatial dimensions""" + super(GlobalAvgPool2d, self).__init__() + + def forward(self, inputs): + in_size = inputs.size() + return inputs.view((in_size[0], in_size[1], -1)).mean(dim=2) + +class SingleGPU(nn.Module): + def __init__(self, module): + super(SingleGPU, self).__init__() + self.module=module + + def forward(self, input): + return self.module(input.cuda(non_blocking=True)) + diff --git a/preprocess/modules/residual.py b/preprocess/modules/residual.py new file mode 100644 index 0000000000000000000000000000000000000000..8a5c90e0606a451ff690f67a2feac28476241d86 --- /dev/null +++ b/preprocess/modules/residual.py @@ -0,0 +1,182 @@ +from collections import OrderedDict + +import torch.nn as nn + +from .bn import ABN, ACT_LEAKY_RELU, ACT_ELU, ACT_NONE +import torch.nn.functional as functional + + +class ResidualBlock(nn.Module): + """Configurable residual block + + Parameters + ---------- + in_channels : int + Number of input channels. + channels : list of int + Number of channels in the internal feature maps. Can either have two or three elements: if three construct + a residual block with two `3 x 3` convolutions, otherwise construct a bottleneck block with `1 x 1`, then + `3 x 3` then `1 x 1` convolutions. + stride : int + Stride of the first `3 x 3` convolution + dilation : int + Dilation to apply to the `3 x 3` convolutions. + groups : int + Number of convolution groups. This is used to create ResNeXt-style blocks and is only compatible with + bottleneck blocks. + norm_act : callable + Function to create normalization / activation Module. + dropout: callable + Function to create Dropout Module. + """ + + def __init__(self, + in_channels, + channels, + stride=1, + dilation=1, + groups=1, + norm_act=ABN, + dropout=None): + super(ResidualBlock, self).__init__() + + # Check parameters for inconsistencies + if len(channels) != 2 and len(channels) != 3: + raise ValueError("channels must contain either two or three values") + if len(channels) == 2 and groups != 1: + raise ValueError("groups > 1 are only valid if len(channels) == 3") + + is_bottleneck = len(channels) == 3 + need_proj_conv = stride != 1 or in_channels != channels[-1] + + if not is_bottleneck: + bn2 = norm_act(channels[1]) + bn2.activation = ACT_NONE + layers = [ + ("conv1", nn.Conv2d(in_channels, channels[0], 3, stride=stride, padding=dilation, bias=False, + dilation=dilation)), + ("bn1", norm_act(channels[0])), + ("conv2", nn.Conv2d(channels[0], channels[1], 3, stride=1, padding=dilation, bias=False, + dilation=dilation)), + ("bn2", bn2) + ] + if dropout is not None: + layers = layers[0:2] + [("dropout", dropout())] + layers[2:] + else: + bn3 = norm_act(channels[2]) + bn3.activation = ACT_NONE + layers = [ + ("conv1", nn.Conv2d(in_channels, channels[0], 1, stride=1, padding=0, bias=False)), + ("bn1", norm_act(channels[0])), + ("conv2", nn.Conv2d(channels[0], channels[1], 3, stride=stride, padding=dilation, bias=False, + groups=groups, dilation=dilation)), + ("bn2", norm_act(channels[1])), + ("conv3", nn.Conv2d(channels[1], channels[2], 1, stride=1, padding=0, bias=False)), + ("bn3", bn3) + ] + if dropout is not None: + layers = layers[0:4] + [("dropout", dropout())] + layers[4:] + self.convs = nn.Sequential(OrderedDict(layers)) + + if need_proj_conv: + self.proj_conv = nn.Conv2d(in_channels, channels[-1], 1, stride=stride, padding=0, bias=False) + self.proj_bn = norm_act(channels[-1]) + self.proj_bn.activation = ACT_NONE + + def forward(self, x): + if hasattr(self, "proj_conv"): + residual = self.proj_conv(x) + residual = self.proj_bn(residual) + else: + residual = x + x = self.convs(x) + residual + + if self.convs.bn1.activation == ACT_LEAKY_RELU: + return functional.leaky_relu(x, negative_slope=self.convs.bn1.slope, inplace=True) + elif self.convs.bn1.activation == ACT_ELU: + return functional.elu(x, inplace=True) + else: + return x + + +class IdentityResidualBlock(nn.Module): + def __init__(self, + in_channels, + channels, + stride=1, + dilation=1, + groups=1, + norm_act=ABN, + dropout=None): + """Configurable identity-mapping residual block + + Parameters + ---------- + in_channels : int + Number of input channels. + channels : list of int + Number of channels in the internal feature maps. Can either have two or three elements: if three construct + a residual block with two `3 x 3` convolutions, otherwise construct a bottleneck block with `1 x 1`, then + `3 x 3` then `1 x 1` convolutions. + stride : int + Stride of the first `3 x 3` convolution + dilation : int + Dilation to apply to the `3 x 3` convolutions. + groups : int + Number of convolution groups. This is used to create ResNeXt-style blocks and is only compatible with + bottleneck blocks. + norm_act : callable + Function to create normalization / activation Module. + dropout: callable + Function to create Dropout Module. + """ + super(IdentityResidualBlock, self).__init__() + + # Check parameters for inconsistencies + if len(channels) != 2 and len(channels) != 3: + raise ValueError("channels must contain either two or three values") + if len(channels) == 2 and groups != 1: + raise ValueError("groups > 1 are only valid if len(channels) == 3") + + is_bottleneck = len(channels) == 3 + need_proj_conv = stride != 1 or in_channels != channels[-1] + + self.bn1 = norm_act(in_channels) + if not is_bottleneck: + layers = [ + ("conv1", nn.Conv2d(in_channels, channels[0], 3, stride=stride, padding=dilation, bias=False, + dilation=dilation)), + ("bn2", norm_act(channels[0])), + ("conv2", nn.Conv2d(channels[0], channels[1], 3, stride=1, padding=dilation, bias=False, + dilation=dilation)) + ] + if dropout is not None: + layers = layers[0:2] + [("dropout", dropout())] + layers[2:] + else: + layers = [ + ("conv1", nn.Conv2d(in_channels, channels[0], 1, stride=stride, padding=0, bias=False)), + ("bn2", norm_act(channels[0])), + ("conv2", nn.Conv2d(channels[0], channels[1], 3, stride=1, padding=dilation, bias=False, + groups=groups, dilation=dilation)), + ("bn3", norm_act(channels[1])), + ("conv3", nn.Conv2d(channels[1], channels[2], 1, stride=1, padding=0, bias=False)) + ] + if dropout is not None: + layers = layers[0:4] + [("dropout", dropout())] + layers[4:] + self.convs = nn.Sequential(OrderedDict(layers)) + + if need_proj_conv: + self.proj_conv = nn.Conv2d(in_channels, channels[-1], 1, stride=stride, padding=0, bias=False) + + def forward(self, x): + if hasattr(self, "proj_conv"): + bn1 = self.bn1(x) + shortcut = self.proj_conv(bn1) + else: + shortcut = x.clone() + bn1 = self.bn1(x) + + out = self.convs(bn1) + out.add_(shortcut) + + return out diff --git a/preprocess/modules/src/checks.h b/preprocess/modules/src/checks.h new file mode 100644 index 0000000000000000000000000000000000000000..e761a6fe34d0789815b588eba7e3726026e0e868 --- /dev/null +++ b/preprocess/modules/src/checks.h @@ -0,0 +1,15 @@ +#pragma once + +#include + +// Define AT_CHECK for old version of ATen where the same function was called AT_ASSERT +#ifndef AT_CHECK +#define AT_CHECK AT_ASSERT +#endif + +#define CHECK_CUDA(x) AT_CHECK((x).type().is_cuda(), #x " must be a CUDA tensor") +#define CHECK_CPU(x) AT_CHECK(!(x).type().is_cuda(), #x " must be a CPU tensor") +#define CHECK_CONTIGUOUS(x) AT_CHECK((x).is_contiguous(), #x " must be contiguous") + +#define CHECK_CUDA_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x) +#define CHECK_CPU_INPUT(x) CHECK_CPU(x); CHECK_CONTIGUOUS(x) \ No newline at end of file diff --git a/preprocess/modules/src/inplace_abn.cpp b/preprocess/modules/src/inplace_abn.cpp new file mode 100644 index 0000000000000000000000000000000000000000..0a6b1128cc20cbfc476134154e23e5869a92b856 --- /dev/null +++ b/preprocess/modules/src/inplace_abn.cpp @@ -0,0 +1,95 @@ +#include + +#include + +#include "inplace_abn.h" + +std::vector mean_var(at::Tensor x) { + if (x.is_cuda()) { + if (x.type().scalarType() == at::ScalarType::Half) { + return mean_var_cuda_h(x); + } else { + return mean_var_cuda(x); + } + } else { + return mean_var_cpu(x); + } +} + +at::Tensor forward(at::Tensor x, at::Tensor mean, at::Tensor var, at::Tensor weight, at::Tensor bias, + bool affine, float eps) { + if (x.is_cuda()) { + if (x.type().scalarType() == at::ScalarType::Half) { + return forward_cuda_h(x, mean, var, weight, bias, affine, eps); + } else { + return forward_cuda(x, mean, var, weight, bias, affine, eps); + } + } else { + return forward_cpu(x, mean, var, weight, bias, affine, eps); + } +} + +std::vector edz_eydz(at::Tensor z, at::Tensor dz, at::Tensor weight, at::Tensor bias, + bool affine, float eps) { + if (z.is_cuda()) { + if (z.type().scalarType() == at::ScalarType::Half) { + return edz_eydz_cuda_h(z, dz, weight, bias, affine, eps); + } else { + return edz_eydz_cuda(z, dz, weight, bias, affine, eps); + } + } else { + return edz_eydz_cpu(z, dz, weight, bias, affine, eps); + } +} + +at::Tensor backward(at::Tensor z, at::Tensor dz, at::Tensor var, at::Tensor weight, at::Tensor bias, + at::Tensor edz, at::Tensor eydz, bool affine, float eps) { + if (z.is_cuda()) { + if (z.type().scalarType() == at::ScalarType::Half) { + return backward_cuda_h(z, dz, var, weight, bias, edz, eydz, affine, eps); + } else { + return backward_cuda(z, dz, var, weight, bias, edz, eydz, affine, eps); + } + } else { + return backward_cpu(z, dz, var, weight, bias, edz, eydz, affine, eps); + } +} + +void leaky_relu_forward(at::Tensor z, float slope) { + at::leaky_relu_(z, slope); +} + +void leaky_relu_backward(at::Tensor z, at::Tensor dz, float slope) { + if (z.is_cuda()) { + if (z.type().scalarType() == at::ScalarType::Half) { + return leaky_relu_backward_cuda_h(z, dz, slope); + } else { + return leaky_relu_backward_cuda(z, dz, slope); + } + } else { + return leaky_relu_backward_cpu(z, dz, slope); + } +} + +void elu_forward(at::Tensor z) { + at::elu_(z); +} + +void elu_backward(at::Tensor z, at::Tensor dz) { + if (z.is_cuda()) { + return elu_backward_cuda(z, dz); + } else { + return elu_backward_cpu(z, dz); + } +} + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("mean_var", &mean_var, "Mean and variance computation"); + m.def("forward", &forward, "In-place forward computation"); + m.def("edz_eydz", &edz_eydz, "First part of backward computation"); + m.def("backward", &backward, "Second part of backward computation"); + m.def("leaky_relu_forward", &leaky_relu_forward, "Leaky relu forward computation"); + m.def("leaky_relu_backward", &leaky_relu_backward, "Leaky relu backward computation and inversion"); + m.def("elu_forward", &elu_forward, "Elu forward computation"); + m.def("elu_backward", &elu_backward, "Elu backward computation and inversion"); +} diff --git a/preprocess/modules/src/inplace_abn.h b/preprocess/modules/src/inplace_abn.h new file mode 100644 index 0000000000000000000000000000000000000000..17afd1196449ecb6376f28961e54b55e1537492f --- /dev/null +++ b/preprocess/modules/src/inplace_abn.h @@ -0,0 +1,88 @@ +#pragma once + +#include + +#include + +std::vector mean_var_cpu(at::Tensor x); +std::vector mean_var_cuda(at::Tensor x); +std::vector mean_var_cuda_h(at::Tensor x); + +at::Tensor forward_cpu(at::Tensor x, at::Tensor mean, at::Tensor var, at::Tensor weight, at::Tensor bias, + bool affine, float eps); +at::Tensor forward_cuda(at::Tensor x, at::Tensor mean, at::Tensor var, at::Tensor weight, at::Tensor bias, + bool affine, float eps); +at::Tensor forward_cuda_h(at::Tensor x, at::Tensor mean, at::Tensor var, at::Tensor weight, at::Tensor bias, + bool affine, float eps); + +std::vector edz_eydz_cpu(at::Tensor z, at::Tensor dz, at::Tensor weight, at::Tensor bias, + bool affine, float eps); +std::vector edz_eydz_cuda(at::Tensor z, at::Tensor dz, at::Tensor weight, at::Tensor bias, + bool affine, float eps); +std::vector edz_eydz_cuda_h(at::Tensor z, at::Tensor dz, at::Tensor weight, at::Tensor bias, + bool affine, float eps); + +at::Tensor backward_cpu(at::Tensor z, at::Tensor dz, at::Tensor var, at::Tensor weight, at::Tensor bias, + at::Tensor edz, at::Tensor eydz, bool affine, float eps); +at::Tensor backward_cuda(at::Tensor z, at::Tensor dz, at::Tensor var, at::Tensor weight, at::Tensor bias, + at::Tensor edz, at::Tensor eydz, bool affine, float eps); +at::Tensor backward_cuda_h(at::Tensor z, at::Tensor dz, at::Tensor var, at::Tensor weight, at::Tensor bias, + at::Tensor edz, at::Tensor eydz, bool affine, float eps); + +void leaky_relu_backward_cpu(at::Tensor z, at::Tensor dz, float slope); +void leaky_relu_backward_cuda(at::Tensor z, at::Tensor dz, float slope); +void leaky_relu_backward_cuda_h(at::Tensor z, at::Tensor dz, float slope); + +void elu_backward_cpu(at::Tensor z, at::Tensor dz); +void elu_backward_cuda(at::Tensor z, at::Tensor dz); + +static void get_dims(at::Tensor x, int64_t& num, int64_t& chn, int64_t& sp) { + num = x.size(0); + chn = x.size(1); + sp = 1; + for (int64_t i = 2; i < x.ndimension(); ++i) + sp *= x.size(i); +} + +/* + * Specialized CUDA reduction functions for BN + */ +#ifdef __CUDACC__ + +#include "utils/cuda.cuh" + +template +__device__ T reduce(Op op, int plane, int N, int S) { + T sum = (T)0; + for (int batch = 0; batch < N; ++batch) { + for (int x = threadIdx.x; x < S; x += blockDim.x) { + sum += op(batch, plane, x); + } + } + + // sum over NumThreads within a warp + sum = warpSum(sum); + + // 'transpose', and reduce within warp again + __shared__ T shared[32]; + __syncthreads(); + if (threadIdx.x % WARP_SIZE == 0) { + shared[threadIdx.x / WARP_SIZE] = sum; + } + if (threadIdx.x >= blockDim.x / WARP_SIZE && threadIdx.x < WARP_SIZE) { + // zero out the other entries in shared + shared[threadIdx.x] = (T)0; + } + __syncthreads(); + if (threadIdx.x / WARP_SIZE == 0) { + sum = warpSum(shared[threadIdx.x]); + if (threadIdx.x == 0) { + shared[0] = sum; + } + } + __syncthreads(); + + // Everyone picks it up, should be broadcast into the whole gradInput + return shared[0]; +} +#endif diff --git a/preprocess/modules/src/inplace_abn_cpu.cpp b/preprocess/modules/src/inplace_abn_cpu.cpp new file mode 100644 index 0000000000000000000000000000000000000000..ffc6d38c52ea31661b8dd438dc3fe1958f50b61e --- /dev/null +++ b/preprocess/modules/src/inplace_abn_cpu.cpp @@ -0,0 +1,119 @@ +#include + +#include + +#include "utils/checks.h" +#include "inplace_abn.h" + +at::Tensor reduce_sum(at::Tensor x) { + if (x.ndimension() == 2) { + return x.sum(0); + } else { + auto x_view = x.view({x.size(0), x.size(1), -1}); + return x_view.sum(-1).sum(0); + } +} + +at::Tensor broadcast_to(at::Tensor v, at::Tensor x) { + if (x.ndimension() == 2) { + return v; + } else { + std::vector broadcast_size = {1, -1}; + for (int64_t i = 2; i < x.ndimension(); ++i) + broadcast_size.push_back(1); + + return v.view(broadcast_size); + } +} + +int64_t count(at::Tensor x) { + int64_t count = x.size(0); + for (int64_t i = 2; i < x.ndimension(); ++i) + count *= x.size(i); + + return count; +} + +at::Tensor invert_affine(at::Tensor z, at::Tensor weight, at::Tensor bias, bool affine, float eps) { + if (affine) { + return (z - broadcast_to(bias, z)) / broadcast_to(at::abs(weight) + eps, z); + } else { + return z; + } +} + +std::vector mean_var_cpu(at::Tensor x) { + auto num = count(x); + auto mean = reduce_sum(x) / num; + auto diff = x - broadcast_to(mean, x); + auto var = reduce_sum(diff.pow(2)) / num; + + return {mean, var}; +} + +at::Tensor forward_cpu(at::Tensor x, at::Tensor mean, at::Tensor var, at::Tensor weight, at::Tensor bias, + bool affine, float eps) { + auto gamma = affine ? at::abs(weight) + eps : at::ones_like(var); + auto mul = at::rsqrt(var + eps) * gamma; + + x.sub_(broadcast_to(mean, x)); + x.mul_(broadcast_to(mul, x)); + if (affine) x.add_(broadcast_to(bias, x)); + + return x; +} + +std::vector edz_eydz_cpu(at::Tensor z, at::Tensor dz, at::Tensor weight, at::Tensor bias, + bool affine, float eps) { + auto edz = reduce_sum(dz); + auto y = invert_affine(z, weight, bias, affine, eps); + auto eydz = reduce_sum(y * dz); + + return {edz, eydz}; +} + +at::Tensor backward_cpu(at::Tensor z, at::Tensor dz, at::Tensor var, at::Tensor weight, at::Tensor bias, + at::Tensor edz, at::Tensor eydz, bool affine, float eps) { + auto y = invert_affine(z, weight, bias, affine, eps); + auto mul = affine ? at::rsqrt(var + eps) * (at::abs(weight) + eps) : at::rsqrt(var + eps); + + auto num = count(z); + auto dx = (dz - broadcast_to(edz / num, dz) - y * broadcast_to(eydz / num, dz)) * broadcast_to(mul, dz); + return dx; +} + +void leaky_relu_backward_cpu(at::Tensor z, at::Tensor dz, float slope) { + CHECK_CPU_INPUT(z); + CHECK_CPU_INPUT(dz); + + AT_DISPATCH_FLOATING_TYPES(z.type(), "leaky_relu_backward_cpu", ([&] { + int64_t count = z.numel(); + auto *_z = z.data(); + auto *_dz = dz.data(); + + for (int64_t i = 0; i < count; ++i) { + if (_z[i] < 0) { + _z[i] *= 1 / slope; + _dz[i] *= slope; + } + } + })); +} + +void elu_backward_cpu(at::Tensor z, at::Tensor dz) { + CHECK_CPU_INPUT(z); + CHECK_CPU_INPUT(dz); + + AT_DISPATCH_FLOATING_TYPES(z.type(), "elu_backward_cpu", ([&] { + int64_t count = z.numel(); + auto *_z = z.data(); + auto *_dz = dz.data(); + + for (int64_t i = 0; i < count; ++i) { + if (_z[i] < 0) { + _z[i] = log1p(_z[i]); + _dz[i] *= (_z[i] + 1.f); + } + } + })); +} diff --git a/preprocess/modules/src/inplace_abn_cuda.cu b/preprocess/modules/src/inplace_abn_cuda.cu new file mode 100644 index 0000000000000000000000000000000000000000..b157b06d47173d1645c6a40c89f564b737e84d43 --- /dev/null +++ b/preprocess/modules/src/inplace_abn_cuda.cu @@ -0,0 +1,333 @@ +#include + +#include +#include + +#include + +#include "utils/checks.h" +#include "utils/cuda.cuh" +#include "inplace_abn.h" + +#include + +// Operations for reduce +template +struct SumOp { + __device__ SumOp(const T *t, int c, int s) + : tensor(t), chn(c), sp(s) {} + __device__ __forceinline__ T operator()(int batch, int plane, int n) { + return tensor[(batch * chn + plane) * sp + n]; + } + const T *tensor; + const int chn; + const int sp; +}; + +template +struct VarOp { + __device__ VarOp(T m, const T *t, int c, int s) + : mean(m), tensor(t), chn(c), sp(s) {} + __device__ __forceinline__ T operator()(int batch, int plane, int n) { + T val = tensor[(batch * chn + plane) * sp + n]; + return (val - mean) * (val - mean); + } + const T mean; + const T *tensor; + const int chn; + const int sp; +}; + +template +struct GradOp { + __device__ GradOp(T _weight, T _bias, const T *_z, const T *_dz, int c, int s) + : weight(_weight), bias(_bias), z(_z), dz(_dz), chn(c), sp(s) {} + __device__ __forceinline__ Pair operator()(int batch, int plane, int n) { + T _y = (z[(batch * chn + plane) * sp + n] - bias) / weight; + T _dz = dz[(batch * chn + plane) * sp + n]; + return Pair(_dz, _y * _dz); + } + const T weight; + const T bias; + const T *z; + const T *dz; + const int chn; + const int sp; +}; + +/*********** + * mean_var + ***********/ + +template +__global__ void mean_var_kernel(const T *x, T *mean, T *var, int num, int chn, int sp) { + int plane = blockIdx.x; + T norm = T(1) / T(num * sp); + + T _mean = reduce>(SumOp(x, chn, sp), plane, num, sp) * norm; + __syncthreads(); + T _var = reduce>(VarOp(_mean, x, chn, sp), plane, num, sp) * norm; + + if (threadIdx.x == 0) { + mean[plane] = _mean; + var[plane] = _var; + } +} + +std::vector mean_var_cuda(at::Tensor x) { + CHECK_CUDA_INPUT(x); + + // Extract dimensions + int64_t num, chn, sp; + get_dims(x, num, chn, sp); + + // Prepare output tensors + auto mean = at::empty({chn}, x.options()); + auto var = at::empty({chn}, x.options()); + + // Run kernel + dim3 blocks(chn); + dim3 threads(getNumThreads(sp)); + auto stream = at::cuda::getCurrentCUDAStream(); + AT_DISPATCH_FLOATING_TYPES(x.type(), "mean_var_cuda", ([&] { + mean_var_kernel<<>>( + x.data(), + mean.data(), + var.data(), + num, chn, sp); + })); + + return {mean, var}; +} + +/********** + * forward + **********/ + +template +__global__ void forward_kernel(T *x, const T *mean, const T *var, const T *weight, const T *bias, + bool affine, float eps, int num, int chn, int sp) { + int plane = blockIdx.x; + + T _mean = mean[plane]; + T _var = var[plane]; + T _weight = affine ? abs(weight[plane]) + eps : T(1); + T _bias = affine ? bias[plane] : T(0); + + T mul = rsqrt(_var + eps) * _weight; + + for (int batch = 0; batch < num; ++batch) { + for (int n = threadIdx.x; n < sp; n += blockDim.x) { + T _x = x[(batch * chn + plane) * sp + n]; + T _y = (_x - _mean) * mul + _bias; + + x[(batch * chn + plane) * sp + n] = _y; + } + } +} + +at::Tensor forward_cuda(at::Tensor x, at::Tensor mean, at::Tensor var, at::Tensor weight, at::Tensor bias, + bool affine, float eps) { + CHECK_CUDA_INPUT(x); + CHECK_CUDA_INPUT(mean); + CHECK_CUDA_INPUT(var); + CHECK_CUDA_INPUT(weight); + CHECK_CUDA_INPUT(bias); + + // Extract dimensions + int64_t num, chn, sp; + get_dims(x, num, chn, sp); + + // Run kernel + dim3 blocks(chn); + dim3 threads(getNumThreads(sp)); + auto stream = at::cuda::getCurrentCUDAStream(); + AT_DISPATCH_FLOATING_TYPES(x.type(), "forward_cuda", ([&] { + forward_kernel<<>>( + x.data(), + mean.data(), + var.data(), + weight.data(), + bias.data(), + affine, eps, num, chn, sp); + })); + + return x; +} + +/*********** + * edz_eydz + ***********/ + +template +__global__ void edz_eydz_kernel(const T *z, const T *dz, const T *weight, const T *bias, + T *edz, T *eydz, bool affine, float eps, int num, int chn, int sp) { + int plane = blockIdx.x; + + T _weight = affine ? abs(weight[plane]) + eps : 1.f; + T _bias = affine ? bias[plane] : 0.f; + + Pair res = reduce, GradOp>(GradOp(_weight, _bias, z, dz, chn, sp), plane, num, sp); + __syncthreads(); + + if (threadIdx.x == 0) { + edz[plane] = res.v1; + eydz[plane] = res.v2; + } +} + +std::vector edz_eydz_cuda(at::Tensor z, at::Tensor dz, at::Tensor weight, at::Tensor bias, + bool affine, float eps) { + CHECK_CUDA_INPUT(z); + CHECK_CUDA_INPUT(dz); + CHECK_CUDA_INPUT(weight); + CHECK_CUDA_INPUT(bias); + + // Extract dimensions + int64_t num, chn, sp; + get_dims(z, num, chn, sp); + + auto edz = at::empty({chn}, z.options()); + auto eydz = at::empty({chn}, z.options()); + + // Run kernel + dim3 blocks(chn); + dim3 threads(getNumThreads(sp)); + auto stream = at::cuda::getCurrentCUDAStream(); + AT_DISPATCH_FLOATING_TYPES(z.type(), "edz_eydz_cuda", ([&] { + edz_eydz_kernel<<>>( + z.data(), + dz.data(), + weight.data(), + bias.data(), + edz.data(), + eydz.data(), + affine, eps, num, chn, sp); + })); + + return {edz, eydz}; +} + +/*********** + * backward + ***********/ + +template +__global__ void backward_kernel(const T *z, const T *dz, const T *var, const T *weight, const T *bias, const T *edz, + const T *eydz, T *dx, bool affine, float eps, int num, int chn, int sp) { + int plane = blockIdx.x; + + T _weight = affine ? abs(weight[plane]) + eps : 1.f; + T _bias = affine ? bias[plane] : 0.f; + T _var = var[plane]; + T _edz = edz[plane]; + T _eydz = eydz[plane]; + + T _mul = _weight * rsqrt(_var + eps); + T count = T(num * sp); + + for (int batch = 0; batch < num; ++batch) { + for (int n = threadIdx.x; n < sp; n += blockDim.x) { + T _dz = dz[(batch * chn + plane) * sp + n]; + T _y = (z[(batch * chn + plane) * sp + n] - _bias) / _weight; + + dx[(batch * chn + plane) * sp + n] = (_dz - _edz / count - _y * _eydz / count) * _mul; + } + } +} + +at::Tensor backward_cuda(at::Tensor z, at::Tensor dz, at::Tensor var, at::Tensor weight, at::Tensor bias, + at::Tensor edz, at::Tensor eydz, bool affine, float eps) { + CHECK_CUDA_INPUT(z); + CHECK_CUDA_INPUT(dz); + CHECK_CUDA_INPUT(var); + CHECK_CUDA_INPUT(weight); + CHECK_CUDA_INPUT(bias); + CHECK_CUDA_INPUT(edz); + CHECK_CUDA_INPUT(eydz); + + // Extract dimensions + int64_t num, chn, sp; + get_dims(z, num, chn, sp); + + auto dx = at::zeros_like(z); + + // Run kernel + dim3 blocks(chn); + dim3 threads(getNumThreads(sp)); + auto stream = at::cuda::getCurrentCUDAStream(); + AT_DISPATCH_FLOATING_TYPES(z.type(), "backward_cuda", ([&] { + backward_kernel<<>>( + z.data(), + dz.data(), + var.data(), + weight.data(), + bias.data(), + edz.data(), + eydz.data(), + dx.data(), + affine, eps, num, chn, sp); + })); + + return dx; +} + +/************** + * activations + **************/ + +template +inline void leaky_relu_backward_impl(T *z, T *dz, float slope, int64_t count) { + // Create thrust pointers + thrust::device_ptr th_z = thrust::device_pointer_cast(z); + thrust::device_ptr th_dz = thrust::device_pointer_cast(dz); + + auto stream = at::cuda::getCurrentCUDAStream(); + thrust::transform_if(thrust::cuda::par.on(stream), + th_dz, th_dz + count, th_z, th_dz, + [slope] __device__ (const T& dz) { return dz * slope; }, + [] __device__ (const T& z) { return z < 0; }); + thrust::transform_if(thrust::cuda::par.on(stream), + th_z, th_z + count, th_z, + [slope] __device__ (const T& z) { return z / slope; }, + [] __device__ (const T& z) { return z < 0; }); +} + +void leaky_relu_backward_cuda(at::Tensor z, at::Tensor dz, float slope) { + CHECK_CUDA_INPUT(z); + CHECK_CUDA_INPUT(dz); + + int64_t count = z.numel(); + + AT_DISPATCH_FLOATING_TYPES(z.type(), "leaky_relu_backward_cuda", ([&] { + leaky_relu_backward_impl(z.data(), dz.data(), slope, count); + })); +} + +template +inline void elu_backward_impl(T *z, T *dz, int64_t count) { + // Create thrust pointers + thrust::device_ptr th_z = thrust::device_pointer_cast(z); + thrust::device_ptr th_dz = thrust::device_pointer_cast(dz); + + auto stream = at::cuda::getCurrentCUDAStream(); + thrust::transform_if(thrust::cuda::par.on(stream), + th_dz, th_dz + count, th_z, th_z, th_dz, + [] __device__ (const T& dz, const T& z) { return dz * (z + 1.); }, + [] __device__ (const T& z) { return z < 0; }); + thrust::transform_if(thrust::cuda::par.on(stream), + th_z, th_z + count, th_z, + [] __device__ (const T& z) { return log1p(z); }, + [] __device__ (const T& z) { return z < 0; }); +} + +void elu_backward_cuda(at::Tensor z, at::Tensor dz) { + CHECK_CUDA_INPUT(z); + CHECK_CUDA_INPUT(dz); + + int64_t count = z.numel(); + + AT_DISPATCH_FLOATING_TYPES(z.type(), "leaky_relu_backward_cuda", ([&] { + elu_backward_impl(z.data(), dz.data(), count); + })); +} diff --git a/preprocess/modules/src/inplace_abn_cuda_half.cu b/preprocess/modules/src/inplace_abn_cuda_half.cu new file mode 100644 index 0000000000000000000000000000000000000000..bb63e73f9d90179e5bd5dae5579c4844da9c25e2 --- /dev/null +++ b/preprocess/modules/src/inplace_abn_cuda_half.cu @@ -0,0 +1,275 @@ +#include + +#include + +#include + +#include "utils/checks.h" +#include "utils/cuda.cuh" +#include "inplace_abn.h" + +#include + +// Operations for reduce +struct SumOpH { + __device__ SumOpH(const half *t, int c, int s) + : tensor(t), chn(c), sp(s) {} + __device__ __forceinline__ float operator()(int batch, int plane, int n) { + return __half2float(tensor[(batch * chn + plane) * sp + n]); + } + const half *tensor; + const int chn; + const int sp; +}; + +struct VarOpH { + __device__ VarOpH(float m, const half *t, int c, int s) + : mean(m), tensor(t), chn(c), sp(s) {} + __device__ __forceinline__ float operator()(int batch, int plane, int n) { + const auto t = __half2float(tensor[(batch * chn + plane) * sp + n]); + return (t - mean) * (t - mean); + } + const float mean; + const half *tensor; + const int chn; + const int sp; +}; + +struct GradOpH { + __device__ GradOpH(float _weight, float _bias, const half *_z, const half *_dz, int c, int s) + : weight(_weight), bias(_bias), z(_z), dz(_dz), chn(c), sp(s) {} + __device__ __forceinline__ Pair operator()(int batch, int plane, int n) { + float _y = (__half2float(z[(batch * chn + plane) * sp + n]) - bias) / weight; + float _dz = __half2float(dz[(batch * chn + plane) * sp + n]); + return Pair(_dz, _y * _dz); + } + const float weight; + const float bias; + const half *z; + const half *dz; + const int chn; + const int sp; +}; + +/*********** + * mean_var + ***********/ + +__global__ void mean_var_kernel_h(const half *x, float *mean, float *var, int num, int chn, int sp) { + int plane = blockIdx.x; + float norm = 1.f / static_cast(num * sp); + + float _mean = reduce(SumOpH(x, chn, sp), plane, num, sp) * norm; + __syncthreads(); + float _var = reduce(VarOpH(_mean, x, chn, sp), plane, num, sp) * norm; + + if (threadIdx.x == 0) { + mean[plane] = _mean; + var[plane] = _var; + } +} + +std::vector mean_var_cuda_h(at::Tensor x) { + CHECK_CUDA_INPUT(x); + + // Extract dimensions + int64_t num, chn, sp; + get_dims(x, num, chn, sp); + + // Prepare output tensors + auto mean = at::empty({chn},x.options().dtype(at::kFloat)); + auto var = at::empty({chn},x.options().dtype(at::kFloat)); + + // Run kernel + dim3 blocks(chn); + dim3 threads(getNumThreads(sp)); + auto stream = at::cuda::getCurrentCUDAStream(); + mean_var_kernel_h<<>>( + reinterpret_cast(x.data()), + mean.data(), + var.data(), + num, chn, sp); + + return {mean, var}; +} + +/********** + * forward + **********/ + +__global__ void forward_kernel_h(half *x, const float *mean, const float *var, const float *weight, const float *bias, + bool affine, float eps, int num, int chn, int sp) { + int plane = blockIdx.x; + + const float _mean = mean[plane]; + const float _var = var[plane]; + const float _weight = affine ? abs(weight[plane]) + eps : 1.f; + const float _bias = affine ? bias[plane] : 0.f; + + const float mul = rsqrt(_var + eps) * _weight; + + for (int batch = 0; batch < num; ++batch) { + for (int n = threadIdx.x; n < sp; n += blockDim.x) { + half *x_ptr = x + (batch * chn + plane) * sp + n; + float _x = __half2float(*x_ptr); + float _y = (_x - _mean) * mul + _bias; + + *x_ptr = __float2half(_y); + } + } +} + +at::Tensor forward_cuda_h(at::Tensor x, at::Tensor mean, at::Tensor var, at::Tensor weight, at::Tensor bias, + bool affine, float eps) { + CHECK_CUDA_INPUT(x); + CHECK_CUDA_INPUT(mean); + CHECK_CUDA_INPUT(var); + CHECK_CUDA_INPUT(weight); + CHECK_CUDA_INPUT(bias); + + // Extract dimensions + int64_t num, chn, sp; + get_dims(x, num, chn, sp); + + // Run kernel + dim3 blocks(chn); + dim3 threads(getNumThreads(sp)); + auto stream = at::cuda::getCurrentCUDAStream(); + forward_kernel_h<<>>( + reinterpret_cast(x.data()), + mean.data(), + var.data(), + weight.data(), + bias.data(), + affine, eps, num, chn, sp); + + return x; +} + +__global__ void edz_eydz_kernel_h(const half *z, const half *dz, const float *weight, const float *bias, + float *edz, float *eydz, bool affine, float eps, int num, int chn, int sp) { + int plane = blockIdx.x; + + float _weight = affine ? abs(weight[plane]) + eps : 1.f; + float _bias = affine ? bias[plane] : 0.f; + + Pair res = reduce, GradOpH>(GradOpH(_weight, _bias, z, dz, chn, sp), plane, num, sp); + __syncthreads(); + + if (threadIdx.x == 0) { + edz[plane] = res.v1; + eydz[plane] = res.v2; + } +} + +std::vector edz_eydz_cuda_h(at::Tensor z, at::Tensor dz, at::Tensor weight, at::Tensor bias, + bool affine, float eps) { + CHECK_CUDA_INPUT(z); + CHECK_CUDA_INPUT(dz); + CHECK_CUDA_INPUT(weight); + CHECK_CUDA_INPUT(bias); + + // Extract dimensions + int64_t num, chn, sp; + get_dims(z, num, chn, sp); + + auto edz = at::empty({chn},z.options().dtype(at::kFloat)); + auto eydz = at::empty({chn},z.options().dtype(at::kFloat)); + + // Run kernel + dim3 blocks(chn); + dim3 threads(getNumThreads(sp)); + auto stream = at::cuda::getCurrentCUDAStream(); + edz_eydz_kernel_h<<>>( + reinterpret_cast(z.data()), + reinterpret_cast(dz.data()), + weight.data(), + bias.data(), + edz.data(), + eydz.data(), + affine, eps, num, chn, sp); + + return {edz, eydz}; +} + +__global__ void backward_kernel_h(const half *z, const half *dz, const float *var, const float *weight, const float *bias, const float *edz, + const float *eydz, half *dx, bool affine, float eps, int num, int chn, int sp) { + int plane = blockIdx.x; + + float _weight = affine ? abs(weight[plane]) + eps : 1.f; + float _bias = affine ? bias[plane] : 0.f; + float _var = var[plane]; + float _edz = edz[plane]; + float _eydz = eydz[plane]; + + float _mul = _weight * rsqrt(_var + eps); + float count = float(num * sp); + + for (int batch = 0; batch < num; ++batch) { + for (int n = threadIdx.x; n < sp; n += blockDim.x) { + float _dz = __half2float(dz[(batch * chn + plane) * sp + n]); + float _y = (__half2float(z[(batch * chn + plane) * sp + n]) - _bias) / _weight; + + dx[(batch * chn + plane) * sp + n] = __float2half((_dz - _edz / count - _y * _eydz / count) * _mul); + } + } +} + +at::Tensor backward_cuda_h(at::Tensor z, at::Tensor dz, at::Tensor var, at::Tensor weight, at::Tensor bias, + at::Tensor edz, at::Tensor eydz, bool affine, float eps) { + CHECK_CUDA_INPUT(z); + CHECK_CUDA_INPUT(dz); + CHECK_CUDA_INPUT(var); + CHECK_CUDA_INPUT(weight); + CHECK_CUDA_INPUT(bias); + CHECK_CUDA_INPUT(edz); + CHECK_CUDA_INPUT(eydz); + + // Extract dimensions + int64_t num, chn, sp; + get_dims(z, num, chn, sp); + + auto dx = at::zeros_like(z); + + // Run kernel + dim3 blocks(chn); + dim3 threads(getNumThreads(sp)); + auto stream = at::cuda::getCurrentCUDAStream(); + backward_kernel_h<<>>( + reinterpret_cast(z.data()), + reinterpret_cast(dz.data()), + var.data(), + weight.data(), + bias.data(), + edz.data(), + eydz.data(), + reinterpret_cast(dx.data()), + affine, eps, num, chn, sp); + + return dx; +} + +__global__ void leaky_relu_backward_impl_h(half *z, half *dz, float slope, int64_t count) { + for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < count; i += blockDim.x * gridDim.x){ + float _z = __half2float(z[i]); + if (_z < 0) { + dz[i] = __float2half(__half2float(dz[i]) * slope); + z[i] = __float2half(_z / slope); + } + } +} + +void leaky_relu_backward_cuda_h(at::Tensor z, at::Tensor dz, float slope) { + CHECK_CUDA_INPUT(z); + CHECK_CUDA_INPUT(dz); + + int64_t count = z.numel(); + dim3 threads(getNumThreads(count)); + dim3 blocks = (count + threads.x - 1) / threads.x; + auto stream = at::cuda::getCurrentCUDAStream(); + leaky_relu_backward_impl_h<<>>( + reinterpret_cast(z.data()), + reinterpret_cast(dz.data()), + slope, count); +} + diff --git a/preprocess/modules/src/utils/checks.h b/preprocess/modules/src/utils/checks.h new file mode 100644 index 0000000000000000000000000000000000000000..e761a6fe34d0789815b588eba7e3726026e0e868 --- /dev/null +++ b/preprocess/modules/src/utils/checks.h @@ -0,0 +1,15 @@ +#pragma once + +#include + +// Define AT_CHECK for old version of ATen where the same function was called AT_ASSERT +#ifndef AT_CHECK +#define AT_CHECK AT_ASSERT +#endif + +#define CHECK_CUDA(x) AT_CHECK((x).type().is_cuda(), #x " must be a CUDA tensor") +#define CHECK_CPU(x) AT_CHECK(!(x).type().is_cuda(), #x " must be a CPU tensor") +#define CHECK_CONTIGUOUS(x) AT_CHECK((x).is_contiguous(), #x " must be contiguous") + +#define CHECK_CUDA_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x) +#define CHECK_CPU_INPUT(x) CHECK_CPU(x); CHECK_CONTIGUOUS(x) \ No newline at end of file diff --git a/preprocess/modules/src/utils/common.h b/preprocess/modules/src/utils/common.h new file mode 100644 index 0000000000000000000000000000000000000000..e8403eef8a233b75dd4bb353c16486fe1be2039a --- /dev/null +++ b/preprocess/modules/src/utils/common.h @@ -0,0 +1,49 @@ +#pragma once + +#include + +/* + * Functions to share code between CPU and GPU + */ + +#ifdef __CUDACC__ +// CUDA versions + +#define HOST_DEVICE __host__ __device__ +#define INLINE_HOST_DEVICE __host__ __device__ inline +#define FLOOR(x) floor(x) + +#if __CUDA_ARCH__ >= 600 +// Recent compute capabilities have block-level atomicAdd for all data types, so we use that +#define ACCUM(x,y) atomicAdd_block(&(x),(y)) +#else +// Older architectures don't have block-level atomicAdd, nor atomicAdd for doubles, so we defer to atomicAdd for float +// and use the known atomicCAS-based implementation for double +template +__device__ inline data_t atomic_add(data_t *address, data_t val) { + return atomicAdd(address, val); +} + +template<> +__device__ inline double atomic_add(double *address, double val) { + unsigned long long int* address_as_ull = (unsigned long long int*)address; + unsigned long long int old = *address_as_ull, assumed; + do { + assumed = old; + old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); + } while (assumed != old); + return __longlong_as_double(old); +} + +#define ACCUM(x,y) atomic_add(&(x),(y)) +#endif // #if __CUDA_ARCH__ >= 600 + +#else +// CPU versions + +#define HOST_DEVICE +#define INLINE_HOST_DEVICE inline +#define FLOOR(x) std::floor(x) +#define ACCUM(x,y) (x) += (y) + +#endif // #ifdef __CUDACC__ \ No newline at end of file diff --git a/preprocess/modules/src/utils/cuda.cuh b/preprocess/modules/src/utils/cuda.cuh new file mode 100644 index 0000000000000000000000000000000000000000..60c0023835e02c5f7c539c28ac07b75b72df394b --- /dev/null +++ b/preprocess/modules/src/utils/cuda.cuh @@ -0,0 +1,71 @@ +#pragma once + +/* + * General settings and functions + */ +const int WARP_SIZE = 32; +const int MAX_BLOCK_SIZE = 1024; + +static int getNumThreads(int nElem) { + int threadSizes[6] = {32, 64, 128, 256, 512, MAX_BLOCK_SIZE}; + for (int i = 0; i < 6; ++i) { + if (nElem <= threadSizes[i]) { + return threadSizes[i]; + } + } + return MAX_BLOCK_SIZE; +} + +/* + * Reduction utilities + */ +template +__device__ __forceinline__ T WARP_SHFL_XOR(T value, int laneMask, int width = warpSize, + unsigned int mask = 0xffffffff) { +#if CUDART_VERSION >= 9000 + return __shfl_xor_sync(mask, value, laneMask, width); +#else + return __shfl_xor(value, laneMask, width); +#endif +} + +__device__ __forceinline__ int getMSB(int val) { return 31 - __clz(val); } + +template +struct Pair { + T v1, v2; + __device__ Pair() {} + __device__ Pair(T _v1, T _v2) : v1(_v1), v2(_v2) {} + __device__ Pair(T v) : v1(v), v2(v) {} + __device__ Pair(int v) : v1(v), v2(v) {} + __device__ Pair &operator+=(const Pair &a) { + v1 += a.v1; + v2 += a.v2; + return *this; + } +}; + +template +static __device__ __forceinline__ T warpSum(T val) { +#if __CUDA_ARCH__ >= 300 + for (int i = 0; i < getMSB(WARP_SIZE); ++i) { + val += WARP_SHFL_XOR(val, 1 << i, WARP_SIZE); + } +#else + __shared__ T values[MAX_BLOCK_SIZE]; + values[threadIdx.x] = val; + __threadfence_block(); + const int base = (threadIdx.x / WARP_SIZE) * WARP_SIZE; + for (int i = 1; i < WARP_SIZE; i++) { + val += values[base + ((i + threadIdx.x) % WARP_SIZE)]; + } +#endif + return val; +} + +template +static __device__ __forceinline__ Pair warpSum(Pair value) { + value.v1 = warpSum(value.v1); + value.v2 = warpSum(value.v2); + return value; +} \ No newline at end of file diff --git a/preprocess/networks/AugmentCE2P.py b/preprocess/networks/AugmentCE2P.py new file mode 100644 index 0000000000000000000000000000000000000000..b5d2c7f88e51dbde32c551ba933647a137395147 --- /dev/null +++ b/preprocess/networks/AugmentCE2P.py @@ -0,0 +1,337 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +""" +@Author : Peike Li +@Contact : peike.li@yahoo.com +@File : AugmentCE2P.py +@Time : 8/4/19 3:35 PM +@Desc : +@License : This source code is licensed under the license found in the + LICENSE file in the root directory of this source tree. +""" + +import functools + +import torch +import torch.nn as nn +from torch.nn import functional as F +# Note here we adopt the InplaceABNSync implementation from https://github.com/mapillary/inplace_abn +# By default, the InplaceABNSync module contains a BatchNorm Layer and a LeakyReLu layer +from modules import InPlaceABNSync + +BatchNorm2d = functools.partial(InPlaceABNSync, activation='none') + +affine_par = True + +pretrained_settings = { + 'resnet101': { + 'imagenet': { + 'input_space': 'BGR', + 'input_size': [3, 224, 224], + 'input_range': [0, 1], + 'mean': [0.406, 0.456, 0.485], + 'std': [0.225, 0.224, 0.229], + 'num_classes': 1000 + } + }, +} + + +def conv3x3(in_planes, out_planes, stride=1): + "3x3 convolution with padding" + return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, + padding=1, bias=False) + + +class Bottleneck(nn.Module): + expansion = 4 + + def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, fist_dilation=1, multi_grid=1): + super(Bottleneck, self).__init__() + self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) + self.bn1 = BatchNorm2d(planes) + self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, + padding=dilation * multi_grid, dilation=dilation * multi_grid, bias=False) + self.bn2 = BatchNorm2d(planes) + self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) + self.bn3 = BatchNorm2d(planes * 4) + self.relu = nn.ReLU(inplace=False) + self.relu_inplace = nn.ReLU(inplace=True) + self.downsample = downsample + self.dilation = dilation + self.stride = stride + + def forward(self, x): + residual = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out = out + residual + out = self.relu_inplace(out) + + return out + + +class PSPModule(nn.Module): + """ + Reference: + Zhao, Hengshuang, et al. *"Pyramid scene parsing network."* + """ + + def __init__(self, features, out_features=512, sizes=(1, 2, 3, 6)): + super(PSPModule, self).__init__() + + self.stages = [] + self.stages = nn.ModuleList([self._make_stage(features, out_features, size) for size in sizes]) + self.bottleneck = nn.Sequential( + nn.Conv2d(features + len(sizes) * out_features, out_features, kernel_size=3, padding=1, dilation=1, + bias=False), + InPlaceABNSync(out_features), + ) + + def _make_stage(self, features, out_features, size): + prior = nn.AdaptiveAvgPool2d(output_size=(size, size)) + conv = nn.Conv2d(features, out_features, kernel_size=1, bias=False) + bn = InPlaceABNSync(out_features) + return nn.Sequential(prior, conv, bn) + + def forward(self, feats): + h, w = feats.size(2), feats.size(3) + priors = [F.interpolate(input=stage(feats), size=(h, w), mode='bilinear', align_corners=True) for stage in + self.stages] + [feats] + bottle = self.bottleneck(torch.cat(priors, 1)) + return bottle + + +class ASPPModule(nn.Module): + """ + Reference: + Chen, Liang-Chieh, et al. *"Rethinking Atrous Convolution for Semantic Image Segmentation."* + """ + + def __init__(self, features, inner_features=256, out_features=512, dilations=(12, 24, 36)): + super(ASPPModule, self).__init__() + + self.conv1 = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)), + nn.Conv2d(features, inner_features, kernel_size=1, padding=0, dilation=1, + bias=False), + InPlaceABNSync(inner_features)) + self.conv2 = nn.Sequential( + nn.Conv2d(features, inner_features, kernel_size=1, padding=0, dilation=1, bias=False), + InPlaceABNSync(inner_features)) + self.conv3 = nn.Sequential( + nn.Conv2d(features, inner_features, kernel_size=3, padding=dilations[0], dilation=dilations[0], bias=False), + InPlaceABNSync(inner_features)) + self.conv4 = nn.Sequential( + nn.Conv2d(features, inner_features, kernel_size=3, padding=dilations[1], dilation=dilations[1], bias=False), + InPlaceABNSync(inner_features)) + self.conv5 = nn.Sequential( + nn.Conv2d(features, inner_features, kernel_size=3, padding=dilations[2], dilation=dilations[2], bias=False), + InPlaceABNSync(inner_features)) + + self.bottleneck = nn.Sequential( + nn.Conv2d(inner_features * 5, out_features, kernel_size=1, padding=0, dilation=1, bias=False), + InPlaceABNSync(out_features), + nn.Dropout2d(0.1) + ) + + def forward(self, x): + _, _, h, w = x.size() + + feat1 = F.interpolate(self.conv1(x), size=(h, w), mode='bilinear', align_corners=True) + + feat2 = self.conv2(x) + feat3 = self.conv3(x) + feat4 = self.conv4(x) + feat5 = self.conv5(x) + out = torch.cat((feat1, feat2, feat3, feat4, feat5), 1) + + bottle = self.bottleneck(out) + return bottle + + +class Edge_Module(nn.Module): + """ + Edge Learning Branch + """ + + def __init__(self, in_fea=[256, 512, 1024], mid_fea=256, out_fea=2): + super(Edge_Module, self).__init__() + + self.conv1 = nn.Sequential( + nn.Conv2d(in_fea[0], mid_fea, kernel_size=1, padding=0, dilation=1, bias=False), + InPlaceABNSync(mid_fea) + ) + self.conv2 = nn.Sequential( + nn.Conv2d(in_fea[1], mid_fea, kernel_size=1, padding=0, dilation=1, bias=False), + InPlaceABNSync(mid_fea) + ) + self.conv3 = nn.Sequential( + nn.Conv2d(in_fea[2], mid_fea, kernel_size=1, padding=0, dilation=1, bias=False), + InPlaceABNSync(mid_fea) + ) + self.conv4 = nn.Conv2d(mid_fea, out_fea, kernel_size=3, padding=1, dilation=1, bias=True) + self.conv5 = nn.Conv2d(out_fea * 3, out_fea, kernel_size=1, padding=0, dilation=1, bias=True) + + def forward(self, x1, x2, x3): + _, _, h, w = x1.size() + + edge1_fea = self.conv1(x1) + edge1 = self.conv4(edge1_fea) + edge2_fea = self.conv2(x2) + edge2 = self.conv4(edge2_fea) + edge3_fea = self.conv3(x3) + edge3 = self.conv4(edge3_fea) + + edge2_fea = F.interpolate(edge2_fea, size=(h, w), mode='bilinear', align_corners=True) + edge3_fea = F.interpolate(edge3_fea, size=(h, w), mode='bilinear', align_corners=True) + edge2 = F.interpolate(edge2, size=(h, w), mode='bilinear', align_corners=True) + edge3 = F.interpolate(edge3, size=(h, w), mode='bilinear', align_corners=True) + + edge = torch.cat([edge1, edge2, edge3], dim=1) + edge_fea = torch.cat([edge1_fea, edge2_fea, edge3_fea], dim=1) + edge = self.conv5(edge) + + return edge, edge_fea + + +class Decoder_Module(nn.Module): + """ + Parsing Branch Decoder Module. + """ + + def __init__(self, num_classes): + super(Decoder_Module, self).__init__() + self.conv1 = nn.Sequential( + nn.Conv2d(512, 256, kernel_size=1, padding=0, dilation=1, bias=False), + InPlaceABNSync(256) + ) + self.conv2 = nn.Sequential( + nn.Conv2d(256, 48, kernel_size=1, stride=1, padding=0, dilation=1, bias=False), + InPlaceABNSync(48) + ) + self.conv3 = nn.Sequential( + nn.Conv2d(304, 256, kernel_size=1, padding=0, dilation=1, bias=False), + InPlaceABNSync(256), + nn.Conv2d(256, 256, kernel_size=1, padding=0, dilation=1, bias=False), + InPlaceABNSync(256) + ) + + self.conv4 = nn.Conv2d(256, num_classes, kernel_size=1, padding=0, dilation=1, bias=True) + + def forward(self, xt, xl): + _, _, h, w = xl.size() + xt = F.interpolate(self.conv1(xt), size=(h, w), mode='bilinear', align_corners=True) + xl = self.conv2(xl) + x = torch.cat([xt, xl], dim=1) + x = self.conv3(x) + seg = self.conv4(x) + return seg, x + + +class ResNet(nn.Module): + def __init__(self, block, layers, num_classes): + self.inplanes = 128 + super(ResNet, self).__init__() + self.conv1 = conv3x3(3, 64, stride=2) + self.bn1 = BatchNorm2d(64) + self.relu1 = nn.ReLU(inplace=False) + self.conv2 = conv3x3(64, 64) + self.bn2 = BatchNorm2d(64) + self.relu2 = nn.ReLU(inplace=False) + self.conv3 = conv3x3(64, 128) + self.bn3 = BatchNorm2d(128) + self.relu3 = nn.ReLU(inplace=False) + + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + + self.layer1 = self._make_layer(block, 64, layers[0]) + self.layer2 = self._make_layer(block, 128, layers[1], stride=2) + self.layer3 = self._make_layer(block, 256, layers[2], stride=2) + self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=2, multi_grid=(1, 1, 1)) + + self.context_encoding = PSPModule(2048, 512) + + self.edge = Edge_Module() + self.decoder = Decoder_Module(num_classes) + + self.fushion = nn.Sequential( + nn.Conv2d(1024, 256, kernel_size=1, padding=0, dilation=1, bias=False), + InPlaceABNSync(256), + nn.Dropout2d(0.1), + nn.Conv2d(256, num_classes, kernel_size=1, padding=0, dilation=1, bias=True) + ) + + def _make_layer(self, block, planes, blocks, stride=1, dilation=1, multi_grid=1): + downsample = None + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + nn.Conv2d(self.inplanes, planes * block.expansion, + kernel_size=1, stride=stride, bias=False), + BatchNorm2d(planes * block.expansion, affine=affine_par)) + + layers = [] + generate_multi_grid = lambda index, grids: grids[index % len(grids)] if isinstance(grids, tuple) else 1 + layers.append(block(self.inplanes, planes, stride, dilation=dilation, downsample=downsample, + multi_grid=generate_multi_grid(0, multi_grid))) + self.inplanes = planes * block.expansion + for i in range(1, blocks): + layers.append( + block(self.inplanes, planes, dilation=dilation, multi_grid=generate_multi_grid(i, multi_grid))) + + return nn.Sequential(*layers) + + def forward(self, x): + x = self.relu1(self.bn1(self.conv1(x))) + x = self.relu2(self.bn2(self.conv2(x))) + x = self.relu3(self.bn3(self.conv3(x))) + x = self.maxpool(x) + x2 = self.layer1(x) + x3 = self.layer2(x2) + x4 = self.layer3(x3) + x5 = self.layer4(x4) + x = self.context_encoding(x5) + parsing_result, parsing_fea = self.decoder(x, x2) + # Edge Branch + edge_result, edge_fea = self.edge(x2, x3, x4) + # Fusion Branch + x = torch.cat([parsing_fea, edge_fea], dim=1) + fusion_result = self.fushion(x) + return [[parsing_result, fusion_result], [edge_result]] + + +def initialize_pretrained_model(model, settings, pretrained='./models/resnet101-imagenet.pth'): + model.input_space = settings['input_space'] + model.input_size = settings['input_size'] + model.input_range = settings['input_range'] + model.mean = settings['mean'] + model.std = settings['std'] + + if pretrained is not None: + saved_state_dict = torch.load(pretrained) + new_params = model.state_dict().copy() + for i in saved_state_dict: + i_parts = i.split('.') + if not i_parts[0] == 'fc': + new_params['.'.join(i_parts[0:])] = saved_state_dict[i] + model.load_state_dict(new_params) + + +def resnet101(num_classes=20, pretrained='./models/resnet101-imagenet.pth'): + model = ResNet(Bottleneck, [3, 4, 23, 3], num_classes) + settings = pretrained_settings['resnet101']['imagenet'] + initialize_pretrained_model(model, settings, pretrained) + return model diff --git a/preprocess/networks/__init__.py b/preprocess/networks/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..0fce5b997eb2567e2dfc894d4e75ea4a6e3f0e72 --- /dev/null +++ b/preprocess/networks/__init__.py @@ -0,0 +1,13 @@ +from __future__ import absolute_import + +from networks.AugmentCE2P import resnet101 + +__factory = { + 'resnet101': resnet101, +} + + +def init_model(name, *args, **kwargs): + if name not in __factory.keys(): + raise KeyError("Unknown model arch: {}".format(name)) + return __factory[name](*args, **kwargs) \ No newline at end of file diff --git a/preprocess/networks/backbone/mobilenetv2.py b/preprocess/networks/backbone/mobilenetv2.py new file mode 100644 index 0000000000000000000000000000000000000000..6f2fe342877cfbc5796efea85af9abccfb80a27e --- /dev/null +++ b/preprocess/networks/backbone/mobilenetv2.py @@ -0,0 +1,156 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +""" +@Author : Peike Li +@Contact : peike.li@yahoo.com +@File : mobilenetv2.py +@Time : 8/4/19 3:35 PM +@Desc : +@License : This source code is licensed under the license found in the + LICENSE file in the root directory of this source tree. +""" + +import torch.nn as nn +import math +import functools + +from modules import InPlaceABN, InPlaceABNSync + +BatchNorm2d = functools.partial(InPlaceABNSync, activation='none') + +__all__ = ['mobilenetv2'] + + +def conv_bn(inp, oup, stride): + return nn.Sequential( + nn.Conv2d(inp, oup, 3, stride, 1, bias=False), + BatchNorm2d(oup), + nn.ReLU6(inplace=True) + ) + + +def conv_1x1_bn(inp, oup): + return nn.Sequential( + nn.Conv2d(inp, oup, 1, 1, 0, bias=False), + BatchNorm2d(oup), + nn.ReLU6(inplace=True) + ) + + +class InvertedResidual(nn.Module): + def __init__(self, inp, oup, stride, expand_ratio): + super(InvertedResidual, self).__init__() + self.stride = stride + assert stride in [1, 2] + + hidden_dim = round(inp * expand_ratio) + self.use_res_connect = self.stride == 1 and inp == oup + + if expand_ratio == 1: + self.conv = nn.Sequential( + # dw + nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False), + BatchNorm2d(hidden_dim), + nn.ReLU6(inplace=True), + # pw-linear + nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False), + BatchNorm2d(oup), + ) + else: + self.conv = nn.Sequential( + # pw + nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False), + BatchNorm2d(hidden_dim), + nn.ReLU6(inplace=True), + # dw + nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False), + BatchNorm2d(hidden_dim), + nn.ReLU6(inplace=True), + # pw-linear + nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False), + BatchNorm2d(oup), + ) + + def forward(self, x): + if self.use_res_connect: + return x + self.conv(x) + else: + return self.conv(x) + + +class MobileNetV2(nn.Module): + def __init__(self, n_class=1000, input_size=224, width_mult=1.): + super(MobileNetV2, self).__init__() + block = InvertedResidual + input_channel = 32 + last_channel = 1280 + interverted_residual_setting = [ + # t, c, n, s + [1, 16, 1, 1], + [6, 24, 2, 2], # layer 2 + [6, 32, 3, 2], # layer 3 + [6, 64, 4, 2], + [6, 96, 3, 1], # layer 4 + [6, 160, 3, 2], + [6, 320, 1, 1], # layer 5 + ] + + # building first layer + assert input_size % 32 == 0 + input_channel = int(input_channel * width_mult) + self.last_channel = int(last_channel * width_mult) if width_mult > 1.0 else last_channel + self.features = [conv_bn(3, input_channel, 2)] + # building inverted residual blocks + for t, c, n, s in interverted_residual_setting: + output_channel = int(c * width_mult) + for i in range(n): + if i == 0: + self.features.append(block(input_channel, output_channel, s, expand_ratio=t)) + else: + self.features.append(block(input_channel, output_channel, 1, expand_ratio=t)) + input_channel = output_channel + # building last several layers + self.features.append(conv_1x1_bn(input_channel, self.last_channel)) + # make it nn.Sequential + self.features = nn.Sequential(*self.features) + + # building classifier + self.classifier = nn.Sequential( + nn.Dropout(0.2), + nn.Linear(self.last_channel, n_class), + ) + + self._initialize_weights() + + def forward(self, x): + x = self.features(x) + x = x.mean(3).mean(2) + x = self.classifier(x) + return x + + def _initialize_weights(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + m.weight.data.normal_(0, math.sqrt(2. / n)) + if m.bias is not None: + m.bias.data.zero_() + elif isinstance(m, BatchNorm2d): + m.weight.data.fill_(1) + m.bias.data.zero_() + elif isinstance(m, nn.Linear): + n = m.weight.size(1) + m.weight.data.normal_(0, 0.01) + m.bias.data.zero_() + + +def mobilenetv2(pretrained=False, **kwargs): + """Constructs a MobileNet_V2 model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model = MobileNetV2(n_class=1000, **kwargs) + if pretrained: + model.load_state_dict(load_url(model_urls['mobilenetv2']), strict=False) + return model diff --git a/preprocess/networks/backbone/resnet.py b/preprocess/networks/backbone/resnet.py new file mode 100644 index 0000000000000000000000000000000000000000..88d6f73bc4fc327e18123020e01ccf5c1b37f025 --- /dev/null +++ b/preprocess/networks/backbone/resnet.py @@ -0,0 +1,205 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +""" +@Author : Peike Li +@Contact : peike.li@yahoo.com +@File : resnet.py +@Time : 8/4/19 3:35 PM +@Desc : +@License : This source code is licensed under the license found in the + LICENSE file in the root directory of this source tree. +""" + +import functools +import torch.nn as nn +import math +from torch.utils.model_zoo import load_url + +from modules import InPlaceABNSync + +BatchNorm2d = functools.partial(InPlaceABNSync, activation='none') + +__all__ = ['ResNet', 'resnet18', 'resnet50', 'resnet101'] # resnet101 is coming soon! + +model_urls = { + 'resnet18': 'http://sceneparsing.csail.mit.edu/model/pretrained_resnet/resnet18-imagenet.pth', + 'resnet50': 'http://sceneparsing.csail.mit.edu/model/pretrained_resnet/resnet50-imagenet.pth', + 'resnet101': 'http://sceneparsing.csail.mit.edu/model/pretrained_resnet/resnet101-imagenet.pth' +} + + +def conv3x3(in_planes, out_planes, stride=1): + "3x3 convolution with padding" + return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, + padding=1, bias=False) + + +class BasicBlock(nn.Module): + expansion = 1 + + def __init__(self, inplanes, planes, stride=1, downsample=None): + super(BasicBlock, self).__init__() + self.conv1 = conv3x3(inplanes, planes, stride) + self.bn1 = BatchNorm2d(planes) + self.relu = nn.ReLU(inplace=True) + self.conv2 = conv3x3(planes, planes) + self.bn2 = BatchNorm2d(planes) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + residual = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out += residual + out = self.relu(out) + + return out + + +class Bottleneck(nn.Module): + expansion = 4 + + def __init__(self, inplanes, planes, stride=1, downsample=None): + super(Bottleneck, self).__init__() + self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) + self.bn1 = BatchNorm2d(planes) + self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, + padding=1, bias=False) + self.bn2 = BatchNorm2d(planes) + self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) + self.bn3 = BatchNorm2d(planes * 4) + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + residual = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out += residual + out = self.relu(out) + + return out + + +class ResNet(nn.Module): + + def __init__(self, block, layers, num_classes=1000): + self.inplanes = 128 + super(ResNet, self).__init__() + self.conv1 = conv3x3(3, 64, stride=2) + self.bn1 = BatchNorm2d(64) + self.relu1 = nn.ReLU(inplace=True) + self.conv2 = conv3x3(64, 64) + self.bn2 = BatchNorm2d(64) + self.relu2 = nn.ReLU(inplace=True) + self.conv3 = conv3x3(64, 128) + self.bn3 = BatchNorm2d(128) + self.relu3 = nn.ReLU(inplace=True) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + + self.layer1 = self._make_layer(block, 64, layers[0]) + self.layer2 = self._make_layer(block, 128, layers[1], stride=2) + self.layer3 = self._make_layer(block, 256, layers[2], stride=2) + self.layer4 = self._make_layer(block, 512, layers[3], stride=2) + self.avgpool = nn.AvgPool2d(7, stride=1) + self.fc = nn.Linear(512 * block.expansion, num_classes) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + m.weight.data.normal_(0, math.sqrt(2. / n)) + elif isinstance(m, BatchNorm2d): + m.weight.data.fill_(1) + m.bias.data.zero_() + + def _make_layer(self, block, planes, blocks, stride=1): + downsample = None + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + nn.Conv2d(self.inplanes, planes * block.expansion, + kernel_size=1, stride=stride, bias=False), + BatchNorm2d(planes * block.expansion), + ) + + layers = [] + layers.append(block(self.inplanes, planes, stride, downsample)) + self.inplanes = planes * block.expansion + for i in range(1, blocks): + layers.append(block(self.inplanes, planes)) + + return nn.Sequential(*layers) + + def forward(self, x): + x = self.relu1(self.bn1(self.conv1(x))) + x = self.relu2(self.bn2(self.conv2(x))) + x = self.relu3(self.bn3(self.conv3(x))) + x = self.maxpool(x) + + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.layer4(x) + + x = self.avgpool(x) + x = x.view(x.size(0), -1) + x = self.fc(x) + + return x + + +def resnet18(pretrained=False, **kwargs): + """Constructs a ResNet-18 model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs) + if pretrained: + model.load_state_dict(load_url(model_urls['resnet18'])) + return model + + +def resnet50(pretrained=False, **kwargs): + """Constructs a ResNet-50 model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs) + if pretrained: + model.load_state_dict(load_url(model_urls['resnet50']), strict=False) + return model + + +def resnet101(pretrained=False, **kwargs): + """Constructs a ResNet-101 model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs) + if pretrained: + model.load_state_dict(load_url(model_urls['resnet101']), strict=False) + return model diff --git a/preprocess/networks/backbone/resnext.py b/preprocess/networks/backbone/resnext.py new file mode 100644 index 0000000000000000000000000000000000000000..96adb54146addc523be71591eb93afcc2c25307f --- /dev/null +++ b/preprocess/networks/backbone/resnext.py @@ -0,0 +1,149 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +""" +@Author : Peike Li +@Contact : peike.li@yahoo.com +@File : resnext.py.py +@Time : 8/11/19 8:58 PM +@Desc : +@License : This source code is licensed under the license found in the + LICENSE file in the root directory of this source tree. +""" +import functools +import torch.nn as nn +import math +from torch.utils.model_zoo import load_url + +from modules import InPlaceABNSync + +BatchNorm2d = functools.partial(InPlaceABNSync, activation='none') + +__all__ = ['ResNeXt', 'resnext101'] # support resnext 101 + +model_urls = { + 'resnext50': 'http://sceneparsing.csail.mit.edu/model/pretrained_resnet/resnext50-imagenet.pth', + 'resnext101': 'http://sceneparsing.csail.mit.edu/model/pretrained_resnet/resnext101-imagenet.pth' +} + + +def conv3x3(in_planes, out_planes, stride=1): + "3x3 convolution with padding" + return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, + padding=1, bias=False) + + +class GroupBottleneck(nn.Module): + expansion = 2 + + def __init__(self, inplanes, planes, stride=1, groups=1, downsample=None): + super(GroupBottleneck, self).__init__() + self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) + self.bn1 = BatchNorm2d(planes) + self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, + padding=1, groups=groups, bias=False) + self.bn2 = BatchNorm2d(planes) + self.conv3 = nn.Conv2d(planes, planes * 2, kernel_size=1, bias=False) + self.bn3 = BatchNorm2d(planes * 2) + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + residual = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out += residual + out = self.relu(out) + + return out + + +class ResNeXt(nn.Module): + + def __init__(self, block, layers, groups=32, num_classes=1000): + self.inplanes = 128 + super(ResNeXt, self).__init__() + self.conv1 = conv3x3(3, 64, stride=2) + self.bn1 = BatchNorm2d(64) + self.relu1 = nn.ReLU(inplace=True) + self.conv2 = conv3x3(64, 64) + self.bn2 = BatchNorm2d(64) + self.relu2 = nn.ReLU(inplace=True) + self.conv3 = conv3x3(64, 128) + self.bn3 = BatchNorm2d(128) + self.relu3 = nn.ReLU(inplace=True) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + + self.layer1 = self._make_layer(block, 128, layers[0], groups=groups) + self.layer2 = self._make_layer(block, 256, layers[1], stride=2, groups=groups) + self.layer3 = self._make_layer(block, 512, layers[2], stride=2, groups=groups) + self.layer4 = self._make_layer(block, 1024, layers[3], stride=2, groups=groups) + self.avgpool = nn.AvgPool2d(7, stride=1) + self.fc = nn.Linear(1024 * block.expansion, num_classes) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels // m.groups + m.weight.data.normal_(0, math.sqrt(2. / n)) + elif isinstance(m, BatchNorm2d): + m.weight.data.fill_(1) + m.bias.data.zero_() + + def _make_layer(self, block, planes, blocks, stride=1, groups=1): + downsample = None + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + nn.Conv2d(self.inplanes, planes * block.expansion, + kernel_size=1, stride=stride, bias=False), + BatchNorm2d(planes * block.expansion), + ) + + layers = [] + layers.append(block(self.inplanes, planes, stride, groups, downsample)) + self.inplanes = planes * block.expansion + for i in range(1, blocks): + layers.append(block(self.inplanes, planes, groups=groups)) + + return nn.Sequential(*layers) + + def forward(self, x): + x = self.relu1(self.bn1(self.conv1(x))) + x = self.relu2(self.bn2(self.conv2(x))) + x = self.relu3(self.bn3(self.conv3(x))) + x = self.maxpool(x) + + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.layer4(x) + + x = self.avgpool(x) + x = x.view(x.size(0), -1) + x = self.fc(x) + + return x + + +def resnext101(pretrained=False, **kwargs): + """Constructs a ResNet-101 model. + Args: + pretrained (bool): If True, returns a model pre-trained on Places + """ + model = ResNeXt(GroupBottleneck, [3, 4, 23, 3], **kwargs) + if pretrained: + model.load_state_dict(load_url(model_urls['resnext101']), strict=False) + return model diff --git a/preprocess/networks/context_encoding/aspp.py b/preprocess/networks/context_encoding/aspp.py new file mode 100644 index 0000000000000000000000000000000000000000..d0ba531a8920665c982b1f3412bc030465d56d2a --- /dev/null +++ b/preprocess/networks/context_encoding/aspp.py @@ -0,0 +1,64 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +""" +@Author : Peike Li +@Contact : peike.li@yahoo.com +@File : aspp.py +@Time : 8/4/19 3:36 PM +@Desc : +@License : This source code is licensed under the license found in the + LICENSE file in the root directory of this source tree. +""" + +import torch +import torch.nn as nn +from torch.nn import functional as F + +from modules import InPlaceABNSync + + +class ASPPModule(nn.Module): + """ + Reference: + Chen, Liang-Chieh, et al. *"Rethinking Atrous Convolution for Semantic Image Segmentation."* + """ + def __init__(self, features, out_features=512, inner_features=256, dilations=(12, 24, 36)): + super(ASPPModule, self).__init__() + + self.conv1 = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)), + nn.Conv2d(features, inner_features, kernel_size=1, padding=0, dilation=1, + bias=False), + InPlaceABNSync(inner_features)) + self.conv2 = nn.Sequential( + nn.Conv2d(features, inner_features, kernel_size=1, padding=0, dilation=1, bias=False), + InPlaceABNSync(inner_features)) + self.conv3 = nn.Sequential( + nn.Conv2d(features, inner_features, kernel_size=3, padding=dilations[0], dilation=dilations[0], bias=False), + InPlaceABNSync(inner_features)) + self.conv4 = nn.Sequential( + nn.Conv2d(features, inner_features, kernel_size=3, padding=dilations[1], dilation=dilations[1], bias=False), + InPlaceABNSync(inner_features)) + self.conv5 = nn.Sequential( + nn.Conv2d(features, inner_features, kernel_size=3, padding=dilations[2], dilation=dilations[2], bias=False), + InPlaceABNSync(inner_features)) + + self.bottleneck = nn.Sequential( + nn.Conv2d(inner_features * 5, out_features, kernel_size=1, padding=0, dilation=1, bias=False), + InPlaceABNSync(out_features), + nn.Dropout2d(0.1) + ) + + def forward(self, x): + _, _, h, w = x.size() + + feat1 = F.interpolate(self.conv1(x), size=(h, w), mode='bilinear', align_corners=True) + + feat2 = self.conv2(x) + feat3 = self.conv3(x) + feat4 = self.conv4(x) + feat5 = self.conv5(x) + out = torch.cat((feat1, feat2, feat3, feat4, feat5), 1) + + bottle = self.bottleneck(out) + return bottle \ No newline at end of file diff --git a/preprocess/networks/context_encoding/ocnet.py b/preprocess/networks/context_encoding/ocnet.py new file mode 100644 index 0000000000000000000000000000000000000000..ac43ebf489ee478c48acf3f93b01b32bdb08cdf3 --- /dev/null +++ b/preprocess/networks/context_encoding/ocnet.py @@ -0,0 +1,226 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +""" +@Author : Peike Li +@Contact : peike.li@yahoo.com +@File : ocnet.py +@Time : 8/4/19 3:36 PM +@Desc : +@License : This source code is licensed under the license found in the + LICENSE file in the root directory of this source tree. +""" + +import functools + +import torch +import torch.nn as nn +from torch.autograd import Variable +from torch.nn import functional as F + +from modules import InPlaceABNSync +BatchNorm2d = functools.partial(InPlaceABNSync, activation='none') + + +class _SelfAttentionBlock(nn.Module): + ''' + The basic implementation for self-attention block/non-local block + Input: + N X C X H X W + Parameters: + in_channels : the dimension of the input feature map + key_channels : the dimension after the key/query transform + value_channels : the dimension after the value transform + scale : choose the scale to downsample the input feature maps (save memory cost) + Return: + N X C X H X W + position-aware context features.(w/o concate or add with the input) + ''' + + def __init__(self, in_channels, key_channels, value_channels, out_channels=None, scale=1): + super(_SelfAttentionBlock, self).__init__() + self.scale = scale + self.in_channels = in_channels + self.out_channels = out_channels + self.key_channels = key_channels + self.value_channels = value_channels + if out_channels == None: + self.out_channels = in_channels + self.pool = nn.MaxPool2d(kernel_size=(scale, scale)) + self.f_key = nn.Sequential( + nn.Conv2d(in_channels=self.in_channels, out_channels=self.key_channels, + kernel_size=1, stride=1, padding=0), + InPlaceABNSync(self.key_channels), + ) + self.f_query = self.f_key + self.f_value = nn.Conv2d(in_channels=self.in_channels, out_channels=self.value_channels, + kernel_size=1, stride=1, padding=0) + self.W = nn.Conv2d(in_channels=self.value_channels, out_channels=self.out_channels, + kernel_size=1, stride=1, padding=0) + nn.init.constant(self.W.weight, 0) + nn.init.constant(self.W.bias, 0) + + def forward(self, x): + batch_size, h, w = x.size(0), x.size(2), x.size(3) + if self.scale > 1: + x = self.pool(x) + + value = self.f_value(x).view(batch_size, self.value_channels, -1) + value = value.permute(0, 2, 1) + query = self.f_query(x).view(batch_size, self.key_channels, -1) + query = query.permute(0, 2, 1) + key = self.f_key(x).view(batch_size, self.key_channels, -1) + + sim_map = torch.matmul(query, key) + sim_map = (self.key_channels ** -.5) * sim_map + sim_map = F.softmax(sim_map, dim=-1) + + context = torch.matmul(sim_map, value) + context = context.permute(0, 2, 1).contiguous() + context = context.view(batch_size, self.value_channels, *x.size()[2:]) + context = self.W(context) + if self.scale > 1: + context = F.upsample(input=context, size=(h, w), mode='bilinear', align_corners=True) + return context + + +class SelfAttentionBlock2D(_SelfAttentionBlock): + def __init__(self, in_channels, key_channels, value_channels, out_channels=None, scale=1): + super(SelfAttentionBlock2D, self).__init__(in_channels, + key_channels, + value_channels, + out_channels, + scale) + + +class BaseOC_Module(nn.Module): + """ + Implementation of the BaseOC module + Parameters: + in_features / out_features: the channels of the input / output feature maps. + dropout: we choose 0.05 as the default value. + size: you can apply multiple sizes. Here we only use one size. + Return: + features fused with Object context information. + """ + + def __init__(self, in_channels, out_channels, key_channels, value_channels, dropout, sizes=([1])): + super(BaseOC_Module, self).__init__() + self.stages = [] + self.stages = nn.ModuleList( + [self._make_stage(in_channels, out_channels, key_channels, value_channels, size) for size in sizes]) + self.conv_bn_dropout = nn.Sequential( + nn.Conv2d(2 * in_channels, out_channels, kernel_size=1, padding=0), + InPlaceABNSync(out_channels), + nn.Dropout2d(dropout) + ) + + def _make_stage(self, in_channels, output_channels, key_channels, value_channels, size): + return SelfAttentionBlock2D(in_channels, + key_channels, + value_channels, + output_channels, + size) + + def forward(self, feats): + priors = [stage(feats) for stage in self.stages] + context = priors[0] + for i in range(1, len(priors)): + context += priors[i] + output = self.conv_bn_dropout(torch.cat([context, feats], 1)) + return output + + +class BaseOC_Context_Module(nn.Module): + """ + Output only the context features. + Parameters: + in_features / out_features: the channels of the input / output feature maps. + dropout: specify the dropout ratio + fusion: We provide two different fusion method, "concat" or "add" + size: we find that directly learn the attention weights on even 1/8 feature maps is hard. + Return: + features after "concat" or "add" + """ + + def __init__(self, in_channels, out_channels, key_channels, value_channels, dropout, sizes=([1])): + super(BaseOC_Context_Module, self).__init__() + self.stages = [] + self.stages = nn.ModuleList( + [self._make_stage(in_channels, out_channels, key_channels, value_channels, size) for size in sizes]) + self.conv_bn_dropout = nn.Sequential( + nn.Conv2d(in_channels, out_channels, kernel_size=1, padding=0), + InPlaceABNSync(out_channels), + ) + + def _make_stage(self, in_channels, output_channels, key_channels, value_channels, size): + return SelfAttentionBlock2D(in_channels, + key_channels, + value_channels, + output_channels, + size) + + def forward(self, feats): + priors = [stage(feats) for stage in self.stages] + context = priors[0] + for i in range(1, len(priors)): + context += priors[i] + output = self.conv_bn_dropout(context) + return output + + +class ASP_OC_Module(nn.Module): + def __init__(self, features, out_features=256, dilations=(12, 24, 36)): + super(ASP_OC_Module, self).__init__() + self.context = nn.Sequential(nn.Conv2d(features, out_features, kernel_size=3, padding=1, dilation=1, bias=True), + InPlaceABNSync(out_features), + BaseOC_Context_Module(in_channels=out_features, out_channels=out_features, + key_channels=out_features // 2, value_channels=out_features, + dropout=0, sizes=([2]))) + self.conv2 = nn.Sequential(nn.Conv2d(features, out_features, kernel_size=1, padding=0, dilation=1, bias=False), + InPlaceABNSync(out_features)) + self.conv3 = nn.Sequential( + nn.Conv2d(features, out_features, kernel_size=3, padding=dilations[0], dilation=dilations[0], bias=False), + InPlaceABNSync(out_features)) + self.conv4 = nn.Sequential( + nn.Conv2d(features, out_features, kernel_size=3, padding=dilations[1], dilation=dilations[1], bias=False), + InPlaceABNSync(out_features)) + self.conv5 = nn.Sequential( + nn.Conv2d(features, out_features, kernel_size=3, padding=dilations[2], dilation=dilations[2], bias=False), + InPlaceABNSync(out_features)) + + self.conv_bn_dropout = nn.Sequential( + nn.Conv2d(out_features * 5, out_features, kernel_size=1, padding=0, dilation=1, bias=False), + InPlaceABNSync(out_features), + nn.Dropout2d(0.1) + ) + + def _cat_each(self, feat1, feat2, feat3, feat4, feat5): + assert (len(feat1) == len(feat2)) + z = [] + for i in range(len(feat1)): + z.append(torch.cat((feat1[i], feat2[i], feat3[i], feat4[i], feat5[i]), 1)) + return z + + def forward(self, x): + if isinstance(x, Variable): + _, _, h, w = x.size() + elif isinstance(x, tuple) or isinstance(x, list): + _, _, h, w = x[0].size() + else: + raise RuntimeError('unknown input type') + + feat1 = self.context(x) + feat2 = self.conv2(x) + feat3 = self.conv3(x) + feat4 = self.conv4(x) + feat5 = self.conv5(x) + + if isinstance(x, Variable): + out = torch.cat((feat1, feat2, feat3, feat4, feat5), 1) + elif isinstance(x, tuple) or isinstance(x, list): + out = self._cat_each(feat1, feat2, feat3, feat4, feat5) + else: + raise RuntimeError('unknown input type') + output = self.conv_bn_dropout(out) + return output diff --git a/preprocess/networks/context_encoding/psp.py b/preprocess/networks/context_encoding/psp.py new file mode 100644 index 0000000000000000000000000000000000000000..47181dc3f5fddb1c7fb80ad58a6694aae9ebd746 --- /dev/null +++ b/preprocess/networks/context_encoding/psp.py @@ -0,0 +1,48 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +""" +@Author : Peike Li +@Contact : peike.li@yahoo.com +@File : psp.py +@Time : 8/4/19 3:36 PM +@Desc : +@License : This source code is licensed under the license found in the + LICENSE file in the root directory of this source tree. +""" + +import torch +import torch.nn as nn +from torch.nn import functional as F + +from modules import InPlaceABNSync + + +class PSPModule(nn.Module): + """ + Reference: + Zhao, Hengshuang, et al. *"Pyramid scene parsing network."* + """ + def __init__(self, features, out_features=512, sizes=(1, 2, 3, 6)): + super(PSPModule, self).__init__() + + self.stages = [] + self.stages = nn.ModuleList([self._make_stage(features, out_features, size) for size in sizes]) + self.bottleneck = nn.Sequential( + nn.Conv2d(features + len(sizes) * out_features, out_features, kernel_size=3, padding=1, dilation=1, + bias=False), + InPlaceABNSync(out_features), + ) + + def _make_stage(self, features, out_features, size): + prior = nn.AdaptiveAvgPool2d(output_size=(size, size)) + conv = nn.Conv2d(features, out_features, kernel_size=1, bias=False) + bn = InPlaceABNSync(out_features) + return nn.Sequential(prior, conv, bn) + + def forward(self, feats): + h, w = feats.size(2), feats.size(3) + priors = [F.interpolate(input=stage(feats), size=(h, w), mode='bilinear', align_corners=True) for stage in + self.stages] + [feats] + bottle = self.bottleneck(torch.cat(priors, 1)) + return bottle \ No newline at end of file diff --git a/preprocess/requirements.txt b/preprocess/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..fa113c8904cc69b76694a0a666de0fd895619770 --- /dev/null +++ b/preprocess/requirements.txt @@ -0,0 +1 @@ +opencv-python==4.4.0.46 diff --git a/preprocess/simple_extractor.py b/preprocess/simple_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..9c7325e95da1b89cf22576e41dfe4e8892d2104e --- /dev/null +++ b/preprocess/simple_extractor.py @@ -0,0 +1,350 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +""" +@Author : Peike Li +@Contact : peike.li@yahoo.com +@File : simple_extractor.py +@Time : 8/30/19 8:59 PM +@Desc : Simple Extractor +@License : This source code is licensed under the license found in the + LICENSE file in the root directory of this source tree. +""" + +import os +import torch +import argparse +import numpy as np +from PIL import Image +from tqdm import tqdm + +from torch.utils.data import DataLoader +import torchvision.transforms as transforms + +import os +import sys + +_THIS_DIR = os.path.dirname(os.path.abspath(__file__)) # .../DEMO/preprocess +if _THIS_DIR not in sys.path: + sys.path.insert(0, _THIS_DIR) + + +import networks +from utils.transforms import transform_logits +from datasets.simple_extractor_dataset import SimpleFolderDataset + + + +dataset_settings = { + 'lip': { + 'input_size': [473, 473], + 'num_classes': 20, + 'label': ['Background', 'Hat', 'Hair', 'Glove', 'Sunglasses', 'Upper-clothes', 'Dress', 'Coat', + 'Socks', 'Pants', 'Jumpsuits', 'Scarf', 'Skirt', 'Face', 'Left-arm', 'Right-arm', + 'Left-leg', 'Right-leg', 'Left-shoe', 'Right-shoe'] + }, + 'atr': { + 'input_size': [512, 512], + 'num_classes': 18, + 'label': ['Background', 'Hat', 'Hair', 'Sunglasses', 'Upper-clothes', 'Skirt', 'Pants', 'Dress', 'Belt', + 'Left-shoe', 'Right-shoe', 'Face', 'Left-leg', 'Right-leg', 'Left-arm', 'Right-arm', 'Bag', 'Scarf'] + }, + 'pascal': { + 'input_size': [512, 512], + 'num_classes': 7, + 'label': ['Background', 'Head', 'Torso', 'Upper Arms', 'Lower Arms', 'Upper Legs', 'Lower Legs'], + } +} + + +def get_arguments(): + """Parse all the arguments provided from the CLI. + Returns: + A list of parsed arguments. + """ + parser = argparse.ArgumentParser(description="Self Correction for Human Parsing") + + parser.add_argument("--dataset", type=str, default='atr', choices=['lip', 'atr', 'pascal']) + parser.add_argument("--model-restore", type=str, default='', help="restore pretrained model parameters.") + parser.add_argument("--gpu", type=str, default='0', help="choose gpu device.") + parser.add_argument("--category", type=str, default='Upper-clothes', help="category name (optional).") + parser.add_argument("--input-dir", type=str, default='', help="path of input image folder.") + parser.add_argument("--output-dir", type=str, default='', help="path of output image folder.") + parser.add_argument("--logits", action='store_true', default=False, help="whether to save the logits.") + + return parser.parse_args() + + +def get_palette(num_cls): + n = 18 + palette = [0] * (n * 3) + j = num_cls + lab = num_cls + palette[j * 3 + 0] = 0 + palette[j * 3 + 1] = 0 + palette[j * 3 + 2] = 0 + i = 0 + while lab: + palette[j * 3 + 0] = 255 + palette[j * 3 + 1] = 255 + palette[j * 3 + 2] = 255 + i += 1 + lab >>= 3 + return palette + + +# def run( +# *, +# category: str, +# input_dir: str, +# output_dir: str, +# dataset: str = "atr", +# model_restore: str = "", +# gpu: str = "0", +# logits: bool = False, +# ): +# """ +# โœ… ์™ธ๋ถ€(๋‹ค๋ฅธ ํŒŒ์ด์ฌ ์ฝ”๋“œ)์—์„œ import ํ•ด์„œ ํ˜ธ์ถœํ•˜๊ธฐ ์œ„ํ•œ ์—”ํŠธ๋ฆฌ ํ•จ์ˆ˜. +# - ๊ธฐ์กด main()์˜ ๋‚ด์šฉ์„ ๊ฑฐ์˜ ๊ทธ๋Œ€๋กœ ์˜ฎ๊น€ +# - CLI ์ธ์ž ๋Œ€์‹  ํŒŒ๋ผ๋ฏธํ„ฐ๋กœ ๋ฐ›์Œ +# """ +# # (์› ์ฝ”๋“œ ์œ ์ง€) single GPU๋งŒ ํ—ˆ์šฉ +# gpus = [int(i) for i in gpu.split(',')] +# assert len(gpus) == 1 +# if gpu != 'None': +# os.environ["CUDA_VISIBLE_DEVICES"] = gpu + +# num_classes = dataset_settings[dataset]['num_classes'] +# input_size = dataset_settings[dataset]['input_size'] +# label = dataset_settings[dataset]['label'] +# print("Evaluating total class number {} with {}".format(num_classes, label)) + +# model = networks.init_model('resnet101', num_classes=num_classes, pretrained=None) + +# if not model_restore: +# print("[simple_extractor] model_restore not provided โ†’ skip extractor.") +# return False + + +# state_dict = torch.load(model_restore)['state_dict'] + +# # print("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ args.model_restore: ", state_dict) +# from collections import OrderedDict +# new_state_dict = OrderedDict() +# for k, v in state_dict.items(): +# name = k[7:] # remove `module.` +# new_state_dict[name] = v +# model.load_state_dict(new_state_dict) +# model.cuda() +# model.eval() + +# transform = transforms.Compose([ +# transforms.ToTensor(), +# transforms.Normalize(mean=[0.406, 0.456, 0.485], std=[0.225, 0.224, 0.229]) +# ]) + +# # ----------------------------- +# # ์ž…๋ ฅ ํด๋” ์ด๋ฏธ์ง€ ๋กœ๋“œ +# # ----------------------------- +# if not input_dir: +# raise ValueError("--input-dir (input_dir) is required.") +# if not output_dir: +# raise ValueError("--output-dir (output_dir) is required.") + +# all_files = sorted([f for f in os.listdir(input_dir) +# if f.lower().endswith(('.png', '.jpg', '.jpeg'))]) +# selected_files = all_files[:] +# print(f"Total images found: {len(all_files)} โ†’ Using first {len(selected_files)} images") + +# dataset_obj = SimpleFolderDataset( +# root=input_dir, +# input_size=input_size, +# transform=transform, +# file_list=selected_files +# ) +# dataloader = DataLoader(dataset_obj) + +# os.makedirs(output_dir, exist_ok=True) + +# # NOTE: ๊ธฐ์กด ์ฝ”๋“œ๊ฐ€ palette = get_palette(4)๋กœ ๊ณ ์ •์ธ๋ฐ, +# # ์ง€๊ธˆ๋„ ๊ทธ๋Œ€๋กœ ์œ ์ง€ (ํ•„์š”ํ•˜๋ฉด category ๊ธฐ๋ฐ˜์œผ๋กœ ๋ฐ”๊พธ๋Š” ๊ฒƒ๋„ ๊ฐ€๋Šฅ) +# palette = get_palette(4) + +# with torch.no_grad(): +# for idx, batch in enumerate(tqdm(dataloader)): +# print("--: ", idx) +# image, meta = batch +# img_name = meta['name'][0] +# c = meta['center'].numpy()[0] +# s = meta['scale'].numpy()[0] +# w = meta['width'].numpy()[0] +# h = meta['height'].numpy()[0] + +# output = model(image.cuda()) +# upsample = torch.nn.Upsample(size=input_size, mode='bilinear', align_corners=True) +# upsample_output = upsample(output[0][-1][0].unsqueeze(0)) +# upsample_output = upsample_output.squeeze() +# upsample_output = upsample_output.permute(1, 2, 0) # CHW -> HWC + +# logits_result = transform_logits( +# upsample_output.data.cpu().numpy(), +# c, s, w, h, +# input_size=input_size +# ) +# parsing_result = np.argmax(logits_result, axis=2) + +# parsing_result_path = os.path.join(output_dir, img_name[:-4] + '.png') +# output_img = Image.fromarray(np.asarray(parsing_result, dtype=np.uint8)) +# output_img.putpalette(palette) +# output_img.save(parsing_result_path) + +# if logits: +# logits_result_path = os.path.join(output_dir, img_name[:-4] + '.npy') +# np.save(logits_result_path, logits_result) + +# return + + +def run( + *, + category: str, + input_path: str = "", + input_dir: str = "", + dataset: str = "atr", + model_restore: str = "", + gpu: str = "0", + logits: bool = False, +): + """ + - input_path (๋‹จ์ผ ํŒŒ์ผ) ๋˜๋Š” input_dir(ํด๋”) ์ค‘ ํ•˜๋‚˜๋ฅผ ๋ฐ›์•„ parsing ๊ฒฐ๊ณผ๋ฅผ ๋ฉ”๋ชจ๋ฆฌ๋กœ ๋ฐ˜ํ™˜. + - ํŒŒ์ผ ์ €์žฅ ์—†์Œ. + + Returns: + { + "images": List[PIL.Image], # parsing mask (palette ์ ์šฉ๋จ) + "logits": Optional[List[np.ndarray]], + "names": List[str], # ํŒŒ์ผ๋ช…๋“ค + } + """ + # single GPU๋งŒ ํ—ˆ์šฉ + gpus = [int(i) for i in gpu.split(',')] + assert len(gpus) == 1 + if gpu != 'None': + os.environ["CUDA_VISIBLE_DEVICES"] = gpu + + if not model_restore: + print("[simple_extractor] model_restore not provided โ†’ skip extractor.") + return {"images": [], "logits": [] if logits else None, "names": []} + + # ์ž…๋ ฅ ๊ฒ€์ฆ: ๋‘˜ ์ค‘ ํ•˜๋‚˜๋Š” ์žˆ์–ด์•ผ ํ•จ + if bool(input_path) == bool(input_dir): + raise ValueError("Provide exactly one of input_path or input_dir.") + + # ํŒŒ์ผ์ด๋ฉด ์กด์žฌ ํ™•์ธ + if input_path: + if not os.path.isfile(input_path): + raise FileNotFoundError(f"input_path not found or not a file: {input_path}") + + # ํด๋”๋ฉด ์กด์žฌ ํ™•์ธ + if input_dir: + if not os.path.isdir(input_dir): + raise NotADirectoryError(f"input_dir not found or not a directory: {input_dir}") + + num_classes = dataset_settings[dataset]['num_classes'] + input_size = dataset_settings[dataset]['input_size'] + label = dataset_settings[dataset]['label'] + print(f"Evaluating total class number {num_classes} with {label}") + + model = networks.init_model('resnet101', num_classes=num_classes, pretrained=None) + + state_dict = torch.load(model_restore)['state_dict'] + from collections import OrderedDict + new_state_dict = OrderedDict() + for k, v in state_dict.items(): + name = k[7:] # remove `module.` + new_state_dict[name] = v + + model.load_state_dict(new_state_dict) + model.cuda() + model.eval() + + transform = transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize(mean=[0.406, 0.456, 0.485], std=[0.225, 0.224, 0.229]) + ]) + + # ---- ํŒŒ์ผ ๋ฆฌ์ŠคํŠธ ๋งŒ๋“ค๊ธฐ (๋‹จ์ผ ํŒŒ์ผ/ํด๋” ๋ชจ๋‘ ๋Œ€์‘) ---- + if input_path: + # root๋Š” ํŒŒ์ผ์˜ ๋ถ€๋ชจ ๋””๋ ‰ํ„ฐ๋ฆฌ, file_list๋Š” ํŒŒ์ผ๋ช… 1๊ฐœ + root = os.path.dirname(input_path) + file_list = [os.path.basename(input_path)] + else: + root = input_dir + file_list = sorted([ + f for f in os.listdir(root) + if f.lower().endswith(('.png', '.jpg', '.jpeg')) + ]) + + dataset_obj = SimpleFolderDataset( + root=root, + input_size=input_size, + transform=transform, + file_list=file_list + ) + dataloader = DataLoader(dataset_obj) + + palette = get_palette(4) + + results_img = [] + results_logits = [] if logits else None + names = [] + + with torch.no_grad(): + for batch in tqdm(dataloader): + image, meta = batch + img_name = meta['name'][0] + names.append(img_name) + + c = meta['center'].numpy()[0] + s = meta['scale'].numpy()[0] + w = meta['width'].numpy()[0] + h = meta['height'].numpy()[0] + + output = model(image.cuda()) + upsample = torch.nn.Upsample(size=input_size, mode='bilinear', align_corners=True) + upsample_output = upsample(output[0][-1][0].unsqueeze(0)) + upsample_output = upsample_output.squeeze() + upsample_output = upsample_output.permute(1, 2, 0) + + logits_result = transform_logits( + upsample_output.data.cpu().numpy(), + c, s, w, h, + input_size=input_size + ) + parsing_result = np.argmax(logits_result, axis=2) + + out_img = Image.fromarray(np.asarray(parsing_result, dtype=np.uint8)) + out_img.putpalette(palette) + results_img.append(out_img) + + if logits: + results_logits.append(logits_result) + + return {"images": results_img, "logits": results_logits, "names": names} + + + + +def main(): + # โœ… CLI ํ˜ธํ™˜ ์œ ์ง€ + args = get_arguments() + run( + category=args.category, + input_dir=args.input_dir, + output_dir=args.output_dir, + ) + + +if __name__ == '__main__': + main() + diff --git a/preprocess/simple_extractor2.py b/preprocess/simple_extractor2.py new file mode 100644 index 0000000000000000000000000000000000000000..47d6f77681e2770519b98aef984bf5d0348a5be8 --- /dev/null +++ b/preprocess/simple_extractor2.py @@ -0,0 +1,159 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- +# + +# #!/usr/bin/env python +# -*- encoding: utf-8 -*- +""" +@Author : Peike Li +@Contact : peike.li@yahoo.com +@File : simple_extractor.py +@Time : 8/30/19 8:59 PM +@Desc : Simple Extractor (modified for single image input) +""" + +import os +import torch +import argparse +import numpy as np +from PIL import Image +from tqdm import tqdm +import cv2 + +from torch.utils.data import Dataset, DataLoader +import torchvision.transforms as transforms + +import networks +from preprocess.utils.transforms import transform_logits, get_affine_transform + + +class SimpleFileDataset(Dataset): + def __init__(self, image_path, input_size=[512, 512], transform=None): + self.image_path = image_path + self.input_size = np.asarray(input_size) + self.transform = transform + self.aspect_ratio = input_size[1] * 1.0 / input_size[0] + self.img_name = os.path.basename(image_path) + + def __len__(self): + return 1 + + def _box2cs(self, box): + x, y, w, h = box[:4] + return self._xywh2cs(x, y, w, h) + + def _xywh2cs(self, x, y, w, h): + center = np.zeros((2), dtype=np.float32) + center[0] = x + w * 0.5 + center[1] = y + h * 0.5 + if w > self.aspect_ratio * h: + h = w * 1.0 / self.aspect_ratio + elif w < self.aspect_ratio * h: + w = h * self.aspect_ratio + scale = np.array([w, h], dtype=np.float32) + return center, scale + + def __getitem__(self, index): + img = cv2.imread(self.image_path, cv2.IMREAD_COLOR) + h, w, _ = img.shape + person_center, s = self._box2cs([0, 0, w - 1, h - 1]) + r = 0 + trans = get_affine_transform(person_center, s, r, self.input_size) + input = cv2.warpAffine( + img, + trans, + (int(self.input_size[1]), int(self.input_size[0])), + flags=cv2.INTER_LINEAR, + borderMode=cv2.BORDER_CONSTANT, + borderValue=(0, 0, 0)) + input = self.transform(input) + meta = { + 'name': self.img_name, + 'center': person_center, + 'height': h, + 'width': w, + 'scale': s, + 'rotation': r + } + return input, meta + + +dataset_settings = { + 'atr': { + 'input_size': [512, 512], + 'num_classes': 18, + 'label': ['Background', 'Hat', 'Hair', 'Sunglasses', 'Upper-clothes', 'Skirt', 'Pants', 'Dress', 'Belt', + 'Left-shoe', 'Right-shoe', 'Face', 'Left-leg', 'Right-leg', 'Left-arm', 'Right-arm', 'Bag', 'Scarf'] + } +} + +def get_palette(num_cls): + n = 18 + palette = [0] * (n * 3) + j = num_cls + lab = num_cls + palette[j * 3 + 0] = 0 + palette[j * 3 + 1] = 0 + palette[j * 3 + 2] = 0 + i = 0 + while lab: + palette[j * 3 + 0] = 255 + palette[j * 3 + 1] = 255 + palette[j * 3 + 2] = 255 + i += 1 + lab >>= 3 + return palette + + +def masking(image_path, class_num=0): + num_classes = dataset_settings['atr']['num_classes'] + input_size = dataset_settings['atr']['input_size'] + label = dataset_settings['atr']['label'] + print("Evaluating total class number {} with {}".format(num_classes, label)) + + model = networks.init_model('resnet101', num_classes=num_classes, pretrained=None) + state_dict = torch.load('./ckpts/exp-schp-201908301523-atr.pth')['state_dict'] + + from collections import OrderedDict + new_state_dict = OrderedDict() + for k, v in state_dict.items(): + name = k[7:] # remove `module.` + new_state_dict[name] = v + model.load_state_dict(new_state_dict) + model.cuda() + model.eval() + + transform = transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize(mean=[0.406, 0.456, 0.485], std=[0.225, 0.224, 0.229]) + ]) + dataset = SimpleFileDataset(image_path=image_path, input_size=input_size, transform=transform) + dataloader = DataLoader(dataset) + + if not os.path.exists('./outputs'): + os.makedirs('./outputs') + + palette = get_palette(class_num) + with torch.no_grad(): + for idx, batch in enumerate(tqdm(dataloader)): + image, meta = batch + img_name = meta['name'][0] + c = meta['center'].numpy()[0] + s = meta['scale'].numpy()[0] + w = meta['width'].numpy()[0] + h = meta['height'].numpy()[0] + + output = model(image.cuda()) + upsample = torch.nn.Upsample(size=input_size, mode='bilinear', align_corners=True) + upsample_output = upsample(output[0][-1][0].unsqueeze(0)) + upsample_output = upsample_output.squeeze() + upsample_output = upsample_output.permute(1, 2, 0) + + logits_result = transform_logits(upsample_output.data.cpu().numpy(), c, s, w, h, input_size=input_size) + parsing_result = np.argmax(logits_result, axis=2) + parsing_result_path = os.path.join('./outputs', img_name[:-4] + '.png') + output_img = Image.fromarray(np.asarray(parsing_result, dtype=np.uint8)) + output_img.putpalette(palette) + output_img.save(parsing_result_path) + gray_img = output_img.convert('L') + + return gray_img diff --git a/preprocess/simple_extractor3.py b/preprocess/simple_extractor3.py new file mode 100644 index 0000000000000000000000000000000000000000..dcfc59cac29195544d69dd7a19c2b7e55c252b6c --- /dev/null +++ b/preprocess/simple_extractor3.py @@ -0,0 +1,185 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +""" +@Author : Peike Li +@Contact : peike.li@yahoo.com +@File : simple_extractor.py +@Time : 8/30/19 8:59 PM +@Desc : Simple Extractor +@License : This source code is licensed under the license found in the + LICENSE file in the root directory of this source tree. +""" + +import os +import torch +import argparse +import numpy as np +from PIL import Image +from tqdm import tqdm + +from torch.utils.data import DataLoader +import torchvision.transforms as transforms + +import networks +from utils.transforms import transform_logits +from datasets.simple_extractor_dataset import SimpleFolderDataset + +dataset_settings = { + 'lip': { + 'input_size': [473, 473], + 'num_classes': 20, + 'label': ['Background', 'Hat', 'Hair', 'Glove', 'Sunglasses', 'Upper-clothes', 'Dress', 'Coat', + 'Socks', 'Pants', 'Jumpsuits', 'Scarf', 'Skirt', 'Face', 'Left-arm', 'Right-arm', + 'Left-leg', 'Right-leg', 'Left-shoe', 'Right-shoe'] + }, + 'atr': { + 'input_size': [512, 512], + 'num_classes': 18, + 'label': ['Background', 'Hat', 'Hair', 'Sunglasses', 'Upper-clothes', 'Skirt', 'Pants', 'Dress', 'Belt', + 'Left-shoe', 'Right-shoe', 'Face', 'Left-leg', 'Right-leg', 'Left-arm', 'Right-arm', 'Bag', 'Scarf'] + }, + 'pascal': { + 'input_size': [512, 512], + 'num_classes': 7, + 'label': ['Background', 'Head', 'Torso', 'Upper Arms', 'Lower Arms', 'Upper Legs', 'Lower Legs'], + } +} + + +def get_arguments(): + """Parse all the arguments provided from the CLI. + Returns: + A list of parsed arguments. + """ + parser = argparse.ArgumentParser(description="Self Correction for Human Parsing") + + parser.add_argument("--dataset", type=str, default='atr', choices=['lip', 'atr', 'pascal']) + parser.add_argument("--model-restore", type=str, default='', help="restore pretrained model parameters.") + parser.add_argument("--gpu", type=str, default='0', help="choose gpu device.") + parser.add_argument("--category", type=str, default='Upper-clothes', help="path of input image folder.") + parser.add_argument("--input-dir", type=str, default='', help="path of input image folder.") + parser.add_argument("--output-dir", type=str, default='', help="path of output image folder.") + parser.add_argument("--logits", action='store_true', default=False, help="whether to save the logits.") + + return parser.parse_args() + + +# def get_palette(num_cls): +# """ Returns the color map for visualizing the segmentation mask. +# Args: +# num_cls: Number of classes +# Returns: +# The color map +# """ +# n = 18 +# palette = [0] * (n * 3) +# for j in range(5, 7): +# lab = j +# palette[j * 3 + 0] = 0 +# palette[j * 3 + 1] = 0 +# palette[j * 3 + 2] = 0 +# i = 0 +# while lab: +# palette[j * 3 + 0] = 255 +# palette[j * 3 + 1] = 255 +# palette[j * 3 + 2] = 255 +# i += 1 +# lab >>= 3 +# return palette + +def get_palette(num_cls): + n = 18 + palette = [0] * (n * 3) + j = num_cls + lab = num_cls + palette[j * 3 + 0] = 0 + palette[j * 3 + 1] = 0 + palette[j * 3 + 2] = 0 + i = 0 + while lab: + palette[j * 3 + 0] = 255 + palette[j * 3 + 1] = 255 + palette[j * 3 + 2] = 255 + i += 1 + lab >>= 3 + return palette + + +def main(): + args = get_arguments() + gpus = [int(i) for i in args.gpu.split(',')] + assert len(gpus) == 1 + if not args.gpu == 'None': + os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu + + num_classes = dataset_settings[args.dataset]['num_classes'] + input_size = dataset_settings[args.dataset]['input_size'] + label = dataset_settings[args.dataset]['label'] + print("Evaluating total class number {} with {}".format(num_classes, label)) + + model = networks.init_model('resnet101', num_classes=num_classes, pretrained=None) + + state_dict = torch.load(args.model_restore)['state_dict'] + print("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ args.model_restore: ", args.model_restore) + from collections import OrderedDict + new_state_dict = OrderedDict() + for k, v in state_dict.items(): + name = k[7:] # remove `module.` + new_state_dict[name] = v + model.load_state_dict(new_state_dict) + model.cuda() + model.eval() + + transform = transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize(mean=[0.406, 0.456, 0.485], std=[0.225, 0.224, 0.229]) + ]) + + # ----------------------------- + # ๐Ÿ“Œ ์ž…๋ ฅ ํด๋”์—์„œ ํŒŒ์ผ๋ช… ์ˆœ์œผ๋กœ ์•ž 200๊ฐœ๋งŒ ์‚ฌ์šฉ + # ----------------------------- + all_files = sorted([f for f in os.listdir(args.input_dir) + if f.lower().endswith(('.png', '.jpg', '.jpeg'))]) + selected_files = all_files[:] + print(f"Total images found: {len(all_files)} โ†’ Using first {len(selected_files)} images") + + dataset = SimpleFolderDataset(root=args.input_dir, + input_size=input_size, + transform=transform, + file_list=selected_files) # file_list ์ธ์ž๋กœ ์ „๋‹ฌ + dataloader = DataLoader(dataset) + + if not os.path.exists(args.output_dir): + os.makedirs(args.output_dir) + + palette = get_palette(4) + with torch.no_grad(): + for idx, batch in enumerate(tqdm(dataloader)): + print("--: ", idx) + image, meta = batch + img_name = meta['name'][0] + c = meta['center'].numpy()[0] + s = meta['scale'].numpy()[0] + w = meta['width'].numpy()[0] + h = meta['height'].numpy()[0] + + output = model(image.cuda()) + upsample = torch.nn.Upsample(size=input_size, mode='bilinear', align_corners=True) + upsample_output = upsample(output[0][-1][0].unsqueeze(0)) + upsample_output = upsample_output.squeeze() + upsample_output = upsample_output.permute(1, 2, 0) # CHW -> HWC + + logits_result = transform_logits(upsample_output.data.cpu().numpy(), c, s, w, h, input_size=input_size) + parsing_result = np.argmax(logits_result, axis=2) + parsing_result_path = os.path.join(args.output_dir, img_name[:-4] + '.png') + output_img = Image.fromarray(np.asarray(parsing_result, dtype=np.uint8)) + output_img.putpalette(palette) + output_img.save(parsing_result_path) + if args.logits: + logits_result_path = os.path.join(args.output_dir, img_name[:-4] + '.npy') + np.save(logits_result_path, logits_result) + return + +if __name__ == '__main__': + main() diff --git a/preprocess/train.py b/preprocess/train.py new file mode 100644 index 0000000000000000000000000000000000000000..c13c5040dae096a6da9d2d468942a19a5b3a3641 --- /dev/null +++ b/preprocess/train.py @@ -0,0 +1,231 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +""" +@Author : Peike Li +@Contact : peike.li@yahoo.com +@File : train.py +@Time : 8/4/19 3:36 PM +@Desc : +@License : This source code is licensed under the license found in the + LICENSE file in the root directory of this source tree. +""" + +import os +import json +import timeit +import argparse + +import torch +import torch.optim as optim +import torchvision.transforms as transforms +import torch.backends.cudnn as cudnn +from torch.utils import data + +import networks +import utils.schp as schp +from datasets.datasets import LIPDataSet +from datasets.target_generation import generate_edge_tensor +from utils.transforms import BGR2RGB_transform +from utils.criterion import CriterionAll +from utils.encoding import DataParallelModel, DataParallelCriterion +from utils.warmup_scheduler import SGDRScheduler + + +def get_arguments(): + """Parse all the arguments provided from the CLI. + Returns: + A list of parsed arguments. + """ + parser = argparse.ArgumentParser(description="Self Correction for Human Parsing") + + # Network Structure + parser.add_argument("--arch", type=str, default='resnet101') + # Data Preference + parser.add_argument("--data-dir", type=str, default='./data/LIP') + parser.add_argument("--batch-size", type=int, default=16) + parser.add_argument("--input-size", type=str, default='473,473') + parser.add_argument("--num-classes", type=int, default=20) + parser.add_argument("--ignore-label", type=int, default=255) + parser.add_argument("--random-mirror", action="store_true") + parser.add_argument("--random-scale", action="store_true") + # Training Strategy + parser.add_argument("--learning-rate", type=float, default=7e-3) + parser.add_argument("--momentum", type=float, default=0.9) + parser.add_argument("--weight-decay", type=float, default=5e-4) + parser.add_argument("--gpu", type=str, default='0,1,2') + parser.add_argument("--start-epoch", type=int, default=0) + parser.add_argument("--epochs", type=int, default=150) + parser.add_argument("--eval-epochs", type=int, default=10) + parser.add_argument("--imagenet-pretrain", type=str, default='./pretrain_model/resnet101-imagenet.pth') + parser.add_argument("--log-dir", type=str, default='./log') + parser.add_argument("--model-restore", type=str, default='./log/checkpoint.pth.tar') + parser.add_argument("--schp-start", type=int, default=100, help='schp start epoch') + parser.add_argument("--cycle-epochs", type=int, default=10, help='schp cyclical epoch') + parser.add_argument("--schp-restore", type=str, default='./log/schp_checkpoint.pth.tar') + parser.add_argument("--lambda-s", type=float, default=1, help='segmentation loss weight') + parser.add_argument("--lambda-e", type=float, default=1, help='edge loss weight') + parser.add_argument("--lambda-c", type=float, default=0.1, help='segmentation-edge consistency loss weight') + return parser.parse_args() + + +def main(): + args = get_arguments() + print(args) + + start_epoch = 0 + cycle_n = 0 + + if not os.path.exists(args.log_dir): + os.makedirs(args.log_dir) + with open(os.path.join(args.log_dir, 'args.json'), 'w') as opt_file: + json.dump(vars(args), opt_file) + + gpus = [int(i) for i in args.gpu.split(',')] + if not args.gpu == 'None': + os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu + + input_size = list(map(int, args.input_size.split(','))) + + cudnn.enabled = True + cudnn.benchmark = True + + # Model Initialization + AugmentCE2P = networks.init_model(args.arch, num_classes=args.num_classes, pretrained=args.imagenet_pretrain) + model = DataParallelModel(AugmentCE2P) + model.cuda() + + IMAGE_MEAN = AugmentCE2P.mean + IMAGE_STD = AugmentCE2P.std + INPUT_SPACE = AugmentCE2P.input_space + print('image mean: {}'.format(IMAGE_MEAN)) + print('image std: {}'.format(IMAGE_STD)) + print('input space:{}'.format(INPUT_SPACE)) + + restore_from = args.model_restore + if os.path.exists(restore_from): + print('Resume training from {}'.format(restore_from)) + checkpoint = torch.load(restore_from) + model.load_state_dict(checkpoint['state_dict']) + start_epoch = checkpoint['epoch'] + + SCHP_AugmentCE2P = networks.init_model(args.arch, num_classes=args.num_classes, pretrained=args.imagenet_pretrain) + schp_model = DataParallelModel(SCHP_AugmentCE2P) + schp_model.cuda() + + if os.path.exists(args.schp_restore): + print('Resuming schp checkpoint from {}'.format(args.schp_restore)) + schp_checkpoint = torch.load(args.schp_restore) + schp_model_state_dict = schp_checkpoint['state_dict'] + cycle_n = schp_checkpoint['cycle_n'] + schp_model.load_state_dict(schp_model_state_dict) + + # Loss Function + criterion = CriterionAll(lambda_1=args.lambda_s, lambda_2=args.lambda_e, lambda_3=args.lambda_c, + num_classes=args.num_classes) + criterion = DataParallelCriterion(criterion) + criterion.cuda() + + # Data Loader + if INPUT_SPACE == 'BGR': + print('BGR Transformation') + transform = transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize(mean=IMAGE_MEAN, + std=IMAGE_STD), + ]) + + elif INPUT_SPACE == 'RGB': + print('RGB Transformation') + transform = transforms.Compose([ + transforms.ToTensor(), + BGR2RGB_transform(), + transforms.Normalize(mean=IMAGE_MEAN, + std=IMAGE_STD), + ]) + + train_dataset = LIPDataSet(args.data_dir, 'train', crop_size=input_size, transform=transform) + train_loader = data.DataLoader(train_dataset, batch_size=args.batch_size * len(gpus), + num_workers=16, shuffle=True, pin_memory=True, drop_last=True) + print('Total training samples: {}'.format(len(train_dataset))) + + # Optimizer Initialization + optimizer = optim.SGD(model.parameters(), lr=args.learning_rate, momentum=args.momentum, + weight_decay=args.weight_decay) + + lr_scheduler = SGDRScheduler(optimizer, total_epoch=args.epochs, + eta_min=args.learning_rate / 100, warmup_epoch=10, + start_cyclical=args.schp_start, cyclical_base_lr=args.learning_rate / 2, + cyclical_epoch=args.cycle_epochs) + + total_iters = args.epochs * len(train_loader) + start = timeit.default_timer() + for epoch in range(start_epoch, args.epochs): + lr_scheduler.step(epoch=epoch) + lr = lr_scheduler.get_lr()[0] + + model.train() + for i_iter, batch in enumerate(train_loader): + i_iter += len(train_loader) * epoch + + images, labels, _ = batch + labels = labels.cuda(non_blocking=True) + + edges = generate_edge_tensor(labels) + labels = labels.type(torch.cuda.LongTensor) + edges = edges.type(torch.cuda.LongTensor) + + preds = model(images) + + # Online Self Correction Cycle with Label Refinement + if cycle_n >= 1: + with torch.no_grad(): + soft_preds = schp_model(images) + soft_parsing = [] + soft_edge = [] + for soft_pred in soft_preds: + soft_parsing.append(soft_pred[0][-1]) + soft_edge.append(soft_pred[1][-1]) + soft_preds = torch.cat(soft_parsing, dim=0) + soft_edges = torch.cat(soft_edge, dim=0) + else: + soft_preds = None + soft_edges = None + + loss = criterion(preds, [labels, edges, soft_preds, soft_edges], cycle_n) + + optimizer.zero_grad() + loss.backward() + optimizer.step() + + if i_iter % 100 == 0: + print('iter = {} of {} completed, lr = {}, loss = {}'.format(i_iter, total_iters, lr, + loss.data.cpu().numpy())) + if (epoch + 1) % (args.eval_epochs) == 0: + schp.save_schp_checkpoint({ + 'epoch': epoch + 1, + 'state_dict': model.state_dict(), + }, False, args.log_dir, filename='checkpoint_{}.pth.tar'.format(epoch + 1)) + + # Self Correction Cycle with Model Aggregation + if (epoch + 1) >= args.schp_start and (epoch + 1 - args.schp_start) % args.cycle_epochs == 0: + print('Self-correction cycle number {}'.format(cycle_n)) + schp.moving_average(schp_model, model, 1.0 / (cycle_n + 1)) + cycle_n += 1 + schp.bn_re_estimate(train_loader, schp_model) + schp.save_schp_checkpoint({ + 'state_dict': schp_model.state_dict(), + 'cycle_n': cycle_n, + }, False, args.log_dir, filename='schp_{}_checkpoint.pth.tar'.format(cycle_n)) + + torch.cuda.empty_cache() + end = timeit.default_timer() + print('epoch = {} of {} completed using {} s'.format(epoch, args.epochs, + (end - start) / (epoch - start_epoch + 1))) + + end = timeit.default_timer() + print('Training Finished in {} seconds'.format(end - start)) + + +if __name__ == '__main__': + main() diff --git a/preprocess/utils/__init__.py b/preprocess/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/preprocess/utils/consistency_loss.py b/preprocess/utils/consistency_loss.py new file mode 100644 index 0000000000000000000000000000000000000000..b872fdcc10ecef02762399278191e48e79ea9a1f --- /dev/null +++ b/preprocess/utils/consistency_loss.py @@ -0,0 +1,33 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +""" +@Author : Peike Li +@Contact : peike.li@yahoo.com +@File : kl_loss.py +@Time : 7/23/19 4:02 PM +@Desc : +@License : This source code is licensed under the license found in the + LICENSE file in the root directory of this source tree. +""" +import torch +import torch.nn.functional as F +from torch import nn +from datasets.target_generation import generate_edge_tensor + + +class ConsistencyLoss(nn.Module): + def __init__(self, ignore_index=255): + super(ConsistencyLoss, self).__init__() + self.ignore_index=ignore_index + + def forward(self, parsing, edge, label): + parsing_pre = torch.argmax(parsing, dim=1) + parsing_pre[label==self.ignore_index]=self.ignore_index + generated_edge = generate_edge_tensor(parsing_pre) + edge_pre = torch.argmax(edge, dim=1) + v_generate_edge = generated_edge[label!=255] + v_edge_pre = edge_pre[label!=255] + v_edge_pre = v_edge_pre.type(torch.cuda.FloatTensor) + positive_union = (v_generate_edge==1)&(v_edge_pre==1) # only the positive values count + return F.smooth_l1_loss(v_generate_edge[positive_union].squeeze(0), v_edge_pre[positive_union].squeeze(0)) diff --git a/preprocess/utils/criterion.py b/preprocess/utils/criterion.py new file mode 100644 index 0000000000000000000000000000000000000000..968894319042331482692e42804f103074e4b710 --- /dev/null +++ b/preprocess/utils/criterion.py @@ -0,0 +1,142 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +""" +@Author : Peike Li +@Contact : peike.li@yahoo.com +@File : criterion.py +@Time : 8/30/19 8:59 PM +@Desc : +@License : This source code is licensed under the license found in the + LICENSE file in the root directory of this source tree. +""" + +import torch.nn as nn +import torch +import numpy as np +from torch.nn import functional as F +from .lovasz_softmax import LovaszSoftmax +from .kl_loss import KLDivergenceLoss +from .consistency_loss import ConsistencyLoss + +NUM_CLASSES = 20 + + +class CriterionAll(nn.Module): + def __init__(self, use_class_weight=False, ignore_index=255, lambda_1=1, lambda_2=1, lambda_3=1, + num_classes=20): + super(CriterionAll, self).__init__() + self.ignore_index = ignore_index + self.use_class_weight = use_class_weight + self.criterion = torch.nn.CrossEntropyLoss(ignore_index=ignore_index) + self.lovasz = LovaszSoftmax(ignore_index=ignore_index) + self.kldiv = KLDivergenceLoss(ignore_index=ignore_index) + self.reg = ConsistencyLoss(ignore_index=ignore_index) + self.lamda_1 = lambda_1 + self.lamda_2 = lambda_2 + self.lamda_3 = lambda_3 + self.num_classes = num_classes + + def parsing_loss(self, preds, target, cycle_n=None): + """ + Loss function definition. + + Args: + preds: [[parsing result1, parsing result2],[edge result]] + target: [parsing label, egde label] + soft_preds: [[parsing result1, parsing result2],[edge result]] + Returns: + Calculated Loss. + """ + h, w = target[0].size(1), target[0].size(2) + + pos_num = torch.sum(target[1] == 1, dtype=torch.float) + neg_num = torch.sum(target[1] == 0, dtype=torch.float) + + weight_pos = neg_num / (pos_num + neg_num) + weight_neg = pos_num / (pos_num + neg_num) + weights = torch.tensor([weight_neg, weight_pos]) # edge loss weight + + loss = 0 + + # loss for segmentation + preds_parsing = preds[0] + for pred_parsing in preds_parsing: + scale_pred = F.interpolate(input=pred_parsing, size=(h, w), + mode='bilinear', align_corners=True) + + loss += 0.5 * self.lamda_1 * self.lovasz(scale_pred, target[0]) + if target[2] is None: + loss += 0.5 * self.lamda_1 * self.criterion(scale_pred, target[0]) + else: + soft_scale_pred = F.interpolate(input=target[2], size=(h, w), + mode='bilinear', align_corners=True) + soft_scale_pred = moving_average(soft_scale_pred, to_one_hot(target[0], num_cls=self.num_classes), + 1.0 / (cycle_n + 1.0)) + loss += 0.5 * self.lamda_1 * self.kldiv(scale_pred, soft_scale_pred, target[0]) + + # loss for edge + preds_edge = preds[1] + for pred_edge in preds_edge: + scale_pred = F.interpolate(input=pred_edge, size=(h, w), + mode='bilinear', align_corners=True) + if target[3] is None: + loss += self.lamda_2 * F.cross_entropy(scale_pred, target[1], + weights.cuda(), ignore_index=self.ignore_index) + else: + soft_scale_edge = F.interpolate(input=target[3], size=(h, w), + mode='bilinear', align_corners=True) + soft_scale_edge = moving_average(soft_scale_edge, to_one_hot(target[1], num_cls=2), + 1.0 / (cycle_n + 1.0)) + loss += self.lamda_2 * self.kldiv(scale_pred, soft_scale_edge, target[0]) + + # consistency regularization + preds_parsing = preds[0] + preds_edge = preds[1] + for pred_parsing in preds_parsing: + scale_pred = F.interpolate(input=pred_parsing, size=(h, w), + mode='bilinear', align_corners=True) + scale_edge = F.interpolate(input=preds_edge[0], size=(h, w), + mode='bilinear', align_corners=True) + loss += self.lamda_3 * self.reg(scale_pred, scale_edge, target[0]) + + return loss + + def forward(self, preds, target, cycle_n=None): + loss = self.parsing_loss(preds, target, cycle_n) + return loss + + def _generate_weights(self, masks, num_classes): + """ + masks: torch.Tensor with shape [B, H, W] + """ + masks_label = masks.data.cpu().numpy().astype(np.int64) + pixel_nums = [] + tot_pixels = 0 + for i in range(num_classes): + pixel_num_of_cls_i = np.sum(masks_label == i).astype(np.float) + pixel_nums.append(pixel_num_of_cls_i) + tot_pixels += pixel_num_of_cls_i + weights = [] + for i in range(num_classes): + weights.append( + (tot_pixels - pixel_nums[i]) / tot_pixels / (num_classes - 1) + ) + weights = np.array(weights, dtype=np.float) + # weights = torch.from_numpy(weights).float().to(masks.device) + return weights + + +def moving_average(target1, target2, alpha=1.0): + target = 0 + target += (1.0 - alpha) * target1 + target += target2 * alpha + return target + + +def to_one_hot(tensor, num_cls, dim=1, ignore_index=255): + b, h, w = tensor.shape + tensor[tensor == ignore_index] = 0 + onehot_tensor = torch.zeros(b, num_cls, h, w).cuda() + onehot_tensor.scatter_(dim, tensor.unsqueeze(dim), 1) + return onehot_tensor diff --git a/preprocess/utils/encoding.py b/preprocess/utils/encoding.py new file mode 100644 index 0000000000000000000000000000000000000000..e8654706c345e8a13219f2c8e4cfa7700f531612 --- /dev/null +++ b/preprocess/utils/encoding.py @@ -0,0 +1,188 @@ +##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +## Created by: Hang Zhang +## ECE Department, Rutgers University +## Email: zhang.hang@rutgers.edu +## Copyright (c) 2017 +## +## This source code is licensed under the MIT-style license found in the +## LICENSE file in the root directory of this source tree +##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +"""Encoding Data Parallel""" +import threading +import functools +import torch +from torch.autograd import Variable, Function +import torch.cuda.comm as comm +from torch.nn.parallel.data_parallel import DataParallel +from torch.nn.parallel.parallel_apply import get_a_var +from torch.nn.parallel._functions import ReduceAddCoalesced, Broadcast + +torch_ver = torch.__version__[:3] + +__all__ = ['allreduce', 'DataParallelModel', 'DataParallelCriterion', 'patch_replication_callback'] + +def allreduce(*inputs): + """Cross GPU all reduce autograd operation for calculate mean and + variance in SyncBN. + """ + return AllReduce.apply(*inputs) + +class AllReduce(Function): + @staticmethod + def forward(ctx, num_inputs, *inputs): + ctx.num_inputs = num_inputs + ctx.target_gpus = [inputs[i].get_device() for i in range(0, len(inputs), num_inputs)] + inputs = [inputs[i:i + num_inputs] + for i in range(0, len(inputs), num_inputs)] + # sort before reduce sum + inputs = sorted(inputs, key=lambda i: i[0].get_device()) + results = comm.reduce_add_coalesced(inputs, ctx.target_gpus[0]) + outputs = comm.broadcast_coalesced(results, ctx.target_gpus) + return tuple([t for tensors in outputs for t in tensors]) + + @staticmethod + def backward(ctx, *inputs): + inputs = [i.data for i in inputs] + inputs = [inputs[i:i + ctx.num_inputs] + for i in range(0, len(inputs), ctx.num_inputs)] + results = comm.reduce_add_coalesced(inputs, ctx.target_gpus[0]) + outputs = comm.broadcast_coalesced(results, ctx.target_gpus) + return (None,) + tuple([Variable(t) for tensors in outputs for t in tensors]) + +class Reduce(Function): + @staticmethod + def forward(ctx, *inputs): + ctx.target_gpus = [inputs[i].get_device() for i in range(len(inputs))] + inputs = sorted(inputs, key=lambda i: i.get_device()) + return comm.reduce_add(inputs) + + @staticmethod + def backward(ctx, gradOutput): + return Broadcast.apply(ctx.target_gpus, gradOutput) + + +class DataParallelModel(DataParallel): + """Implements data parallelism at the module level. + + This container parallelizes the application of the given module by + splitting the input across the specified devices by chunking in the + batch dimension. + In the forward pass, the module is replicated on each device, + and each replica handles a portion of the input. During the backwards pass, gradients from each replica are summed into the original module. + Note that the outputs are not gathered, please use compatible + :class:`encoding.parallel.DataParallelCriterion`. + + The batch size should be larger than the number of GPUs used. It should + also be an integer multiple of the number of GPUs so that each chunk is + the same size (so that each GPU processes the same number of samples). + + Args: + module: module to be parallelized + device_ids: CUDA devices (default: all devices) + + Reference: + Hang Zhang, Kristin Dana, Jianping Shi, Zhongyue Zhang, Xiaogang Wang, Ambrish Tyagi, + Amit Agrawal. โ€œContext Encoding for Semantic Segmentation. + *The IEEE Conference on Computer Vision and Pattern Recognition (CVPR) 2018* + + Example:: + + >>> net = encoding.nn.DataParallelModel(model, device_ids=[0, 1, 2]) + >>> y = net(x) + """ + def gather(self, outputs, output_device): + return outputs + + def replicate(self, module, device_ids): + modules = super(DataParallelModel, self).replicate(module, device_ids) + return modules + + +class DataParallelCriterion(DataParallel): + """ + Calculate loss in multiple-GPUs, which balance the memory usage for + Semantic Segmentation. + + The targets are splitted across the specified devices by chunking in + the batch dimension. Please use together with :class:`encoding.parallel.DataParallelModel`. + + Reference: + Hang Zhang, Kristin Dana, Jianping Shi, Zhongyue Zhang, Xiaogang Wang, Ambrish Tyagi, + Amit Agrawal. โ€œContext Encoding for Semantic Segmentation. + *The IEEE Conference on Computer Vision and Pattern Recognition (CVPR) 2018* + + Example:: + + >>> net = encoding.nn.DataParallelModel(model, device_ids=[0, 1, 2]) + >>> criterion = encoding.nn.DataParallelCriterion(criterion, device_ids=[0, 1, 2]) + >>> y = net(x) + >>> loss = criterion(y, target) + """ + def forward(self, inputs, *targets, **kwargs): + # input should be already scatterd + # scattering the targets instead + if not self.device_ids: + return self.module(inputs, *targets, **kwargs) + targets, kwargs = self.scatter(targets, kwargs, self.device_ids) + if len(self.device_ids) == 1: + return self.module(inputs, *targets[0], **kwargs[0]) + replicas = self.replicate(self.module, self.device_ids[:len(inputs)]) + outputs = _criterion_parallel_apply(replicas, inputs, targets, kwargs) + return Reduce.apply(*outputs) / len(outputs) + + +def _criterion_parallel_apply(modules, inputs, targets, kwargs_tup=None, devices=None): + assert len(modules) == len(inputs) + assert len(targets) == len(inputs) + if kwargs_tup: + assert len(modules) == len(kwargs_tup) + else: + kwargs_tup = ({},) * len(modules) + if devices is not None: + assert len(modules) == len(devices) + else: + devices = [None] * len(modules) + + lock = threading.Lock() + results = {} + if torch_ver != "0.3": + grad_enabled = torch.is_grad_enabled() + + def _worker(i, module, input, target, kwargs, device=None): + if torch_ver != "0.3": + torch.set_grad_enabled(grad_enabled) + if device is None: + device = get_a_var(input).get_device() + try: + if not isinstance(input, tuple): + input = (input,) + with torch.cuda.device(device): + output = module(*(input + target), **kwargs) + with lock: + results[i] = output + except Exception as e: + with lock: + results[i] = e + + if len(modules) > 1: + threads = [threading.Thread(target=_worker, + args=(i, module, input, target, + kwargs, device),) + for i, (module, input, target, kwargs, device) in + enumerate(zip(modules, inputs, targets, kwargs_tup, devices))] + + for thread in threads: + thread.start() + for thread in threads: + thread.join() + else: + _worker(0, modules[0], inputs[0], kwargs_tup[0], devices[0]) + + outputs = [] + for i in range(len(inputs)): + output = results[i] + if isinstance(output, Exception): + raise output + outputs.append(output) + return outputs diff --git a/preprocess/utils/kl_loss.py b/preprocess/utils/kl_loss.py new file mode 100644 index 0000000000000000000000000000000000000000..9a685d945fb852a81324513ae55498857f1a4552 --- /dev/null +++ b/preprocess/utils/kl_loss.py @@ -0,0 +1,44 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +""" +@Author : Peike Li +@Contact : peike.li@yahoo.com +@File : kl_loss.py +@Time : 7/23/19 4:02 PM +@Desc : +@License : This source code is licensed under the license found in the + LICENSE file in the root directory of this source tree. +""" +import torch +import torch.nn.functional as F +from torch import nn + + +def flatten_probas(input, target, labels, ignore=255): + """ + Flattens predictions in the batch. + """ + B, C, H, W = input.size() + input = input.permute(0, 2, 3, 1).contiguous().view(-1, C) # B * H * W, C = P, C + target = target.permute(0, 2, 3, 1).contiguous().view(-1, C) # B * H * W, C = P, C + labels = labels.view(-1) + if ignore is None: + return input, target + valid = (labels != ignore) + vinput = input[valid.nonzero().squeeze()] + vtarget = target[valid.nonzero().squeeze()] + return vinput, vtarget + + +class KLDivergenceLoss(nn.Module): + def __init__(self, ignore_index=255, T=1): + super(KLDivergenceLoss, self).__init__() + self.ignore_index=ignore_index + self.T = T + + def forward(self, input, target, label): + log_input_prob = F.log_softmax(input / self.T, dim=1) + target_porb = F.softmax(target / self.T, dim=1) + loss = F.kl_div(*flatten_probas(log_input_prob, target_porb, label, ignore=self.ignore_index)) + return self.T*self.T*loss # balanced diff --git a/preprocess/utils/lovasz_softmax.py b/preprocess/utils/lovasz_softmax.py new file mode 100644 index 0000000000000000000000000000000000000000..b6e444f684c0d9bda9d7c2d54a4e79fac0ddf081 --- /dev/null +++ b/preprocess/utils/lovasz_softmax.py @@ -0,0 +1,279 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +""" +@Author : Peike Li +@Contact : peike.li@yahoo.com +@File : lovasz_softmax.py +@Time : 8/30/19 7:12 PM +@Desc : Lovasz-Softmax and Jaccard hinge loss in PyTorch + Maxim Berman 2018 ESAT-PSI KU Leuven (MIT License) +@License : This source code is licensed under the license found in the + LICENSE file in the root directory of this source tree. +""" + +from __future__ import print_function, division + +import torch +from torch.autograd import Variable +import torch.nn.functional as F +import numpy as np +from torch import nn + +try: + from itertools import ifilterfalse +except ImportError: # py3k + from itertools import filterfalse as ifilterfalse + + +def lovasz_grad(gt_sorted): + """ + Computes gradient of the Lovasz extension w.r.t sorted errors + See Alg. 1 in paper + """ + p = len(gt_sorted) + gts = gt_sorted.sum() + intersection = gts - gt_sorted.float().cumsum(0) + union = gts + (1 - gt_sorted).float().cumsum(0) + jaccard = 1. - intersection / union + if p > 1: # cover 1-pixel case + jaccard[1:p] = jaccard[1:p] - jaccard[0:-1] + return jaccard + + +def iou_binary(preds, labels, EMPTY=1., ignore=None, per_image=True): + """ + IoU for foreground class + binary: 1 foreground, 0 background + """ + if not per_image: + preds, labels = (preds,), (labels,) + ious = [] + for pred, label in zip(preds, labels): + intersection = ((label == 1) & (pred == 1)).sum() + union = ((label == 1) | ((pred == 1) & (label != ignore))).sum() + if not union: + iou = EMPTY + else: + iou = float(intersection) / float(union) + ious.append(iou) + iou = mean(ious) # mean accross images if per_image + return 100 * iou + + +def iou(preds, labels, C, EMPTY=1., ignore=None, per_image=False): + """ + Array of IoU for each (non ignored) class + """ + if not per_image: + preds, labels = (preds,), (labels,) + ious = [] + for pred, label in zip(preds, labels): + iou = [] + for i in range(C): + if i != ignore: # The ignored label is sometimes among predicted classes (ENet - CityScapes) + intersection = ((label == i) & (pred == i)).sum() + union = ((label == i) | ((pred == i) & (label != ignore))).sum() + if not union: + iou.append(EMPTY) + else: + iou.append(float(intersection) / float(union)) + ious.append(iou) + ious = [mean(iou) for iou in zip(*ious)] # mean accross images if per_image + return 100 * np.array(ious) + + +# --------------------------- BINARY LOSSES --------------------------- + + +def lovasz_hinge(logits, labels, per_image=True, ignore=None): + """ + Binary Lovasz hinge loss + logits: [B, H, W] Variable, logits at each pixel (between -\infty and +\infty) + labels: [B, H, W] Tensor, binary ground truth masks (0 or 1) + per_image: compute the loss per image instead of per batch + ignore: void class id + """ + if per_image: + loss = mean(lovasz_hinge_flat(*flatten_binary_scores(log.unsqueeze(0), lab.unsqueeze(0), ignore)) + for log, lab in zip(logits, labels)) + else: + loss = lovasz_hinge_flat(*flatten_binary_scores(logits, labels, ignore)) + return loss + + +def lovasz_hinge_flat(logits, labels): + """ + Binary Lovasz hinge loss + logits: [P] Variable, logits at each prediction (between -\infty and +\infty) + labels: [P] Tensor, binary ground truth labels (0 or 1) + ignore: label to ignore + """ + if len(labels) == 0: + # only void pixels, the gradients should be 0 + return logits.sum() * 0. + signs = 2. * labels.float() - 1. + errors = (1. - logits * Variable(signs)) + errors_sorted, perm = torch.sort(errors, dim=0, descending=True) + perm = perm.data + gt_sorted = labels[perm] + grad = lovasz_grad(gt_sorted) + loss = torch.dot(F.relu(errors_sorted), Variable(grad)) + return loss + + +def flatten_binary_scores(scores, labels, ignore=None): + """ + Flattens predictions in the batch (binary case) + Remove labels equal to 'ignore' + """ + scores = scores.view(-1) + labels = labels.view(-1) + if ignore is None: + return scores, labels + valid = (labels != ignore) + vscores = scores[valid] + vlabels = labels[valid] + return vscores, vlabels + + +class StableBCELoss(torch.nn.modules.Module): + def __init__(self): + super(StableBCELoss, self).__init__() + + def forward(self, input, target): + neg_abs = - input.abs() + loss = input.clamp(min=0) - input * target + (1 + neg_abs.exp()).log() + return loss.mean() + + +def binary_xloss(logits, labels, ignore=None): + """ + Binary Cross entropy loss + logits: [B, H, W] Variable, logits at each pixel (between -\infty and +\infty) + labels: [B, H, W] Tensor, binary ground truth masks (0 or 1) + ignore: void class id + """ + logits, labels = flatten_binary_scores(logits, labels, ignore) + loss = StableBCELoss()(logits, Variable(labels.float())) + return loss + + +# --------------------------- MULTICLASS LOSSES --------------------------- + + +def lovasz_softmax(probas, labels, classes='present', per_image=False, ignore=255, weighted=None): + """ + Multi-class Lovasz-Softmax loss + probas: [B, C, H, W] Variable, class probabilities at each prediction (between 0 and 1). + Interpreted as binary (sigmoid) output with outputs of size [B, H, W]. + labels: [B, H, W] Tensor, ground truth labels (between 0 and C - 1) + classes: 'all' for all, 'present' for classes present in labels, or a list of classes to average. + per_image: compute the loss per image instead of per batch + ignore: void class labels + """ + if per_image: + loss = mean(lovasz_softmax_flat(*flatten_probas(prob.unsqueeze(0), lab.unsqueeze(0), ignore), classes=classes, weighted=weighted) + for prob, lab in zip(probas, labels)) + else: + loss = lovasz_softmax_flat(*flatten_probas(probas, labels, ignore), classes=classes, weighted=weighted ) + return loss + + +def lovasz_softmax_flat(probas, labels, classes='present', weighted=None): + """ + Multi-class Lovasz-Softmax loss + probas: [P, C] Variable, class probabilities at each prediction (between 0 and 1) + labels: [P] Tensor, ground truth labels (between 0 and C - 1) + classes: 'all' for all, 'present' for classes present in labels, or a list of classes to average. + """ + if probas.numel() == 0: + # only void pixels, the gradients should be 0 + return probas * 0. + C = probas.size(1) + losses = [] + class_to_sum = list(range(C)) if classes in ['all', 'present'] else classes + for c in class_to_sum: + fg = (labels == c).float() # foreground for class c + if (classes is 'present' and fg.sum() == 0): + continue + if C == 1: + if len(classes) > 1: + raise ValueError('Sigmoid output possible only with 1 class') + class_pred = probas[:, 0] + else: + class_pred = probas[:, c] + errors = (Variable(fg) - class_pred).abs() + errors_sorted, perm = torch.sort(errors, 0, descending=True) + perm = perm.data + fg_sorted = fg[perm] + if weighted is not None: + losses.append(weighted[c]*torch.dot(errors_sorted, Variable(lovasz_grad(fg_sorted)))) + else: + losses.append(torch.dot(errors_sorted, Variable(lovasz_grad(fg_sorted)))) + return mean(losses) + + +def flatten_probas(probas, labels, ignore=None): + """ + Flattens predictions in the batch + """ + if probas.dim() == 3: + # assumes output of a sigmoid layer + B, H, W = probas.size() + probas = probas.view(B, 1, H, W) + B, C, H, W = probas.size() + probas = probas.permute(0, 2, 3, 1).contiguous().view(-1, C) # B * H * W, C = P, C + labels = labels.view(-1) + if ignore is None: + return probas, labels + valid = (labels != ignore) + vprobas = probas[valid.nonzero().squeeze()] + vlabels = labels[valid] + return vprobas, vlabels + + +def xloss(logits, labels, ignore=None): + """ + Cross entropy loss + """ + return F.cross_entropy(logits, Variable(labels), ignore_index=255) + + +# --------------------------- HELPER FUNCTIONS --------------------------- +def isnan(x): + return x != x + + +def mean(l, ignore_nan=False, empty=0): + """ + nanmean compatible with generators. + """ + l = iter(l) + if ignore_nan: + l = ifilterfalse(isnan, l) + try: + n = 1 + acc = next(l) + except StopIteration: + if empty == 'raise': + raise ValueError('Empty mean') + return empty + for n, v in enumerate(l, 2): + acc += v + if n == 1: + return acc + return acc / n + +# --------------------------- Class --------------------------- +class LovaszSoftmax(nn.Module): + def __init__(self, per_image=False, ignore_index=255, weighted=None): + super(LovaszSoftmax, self).__init__() + self.lovasz_softmax = lovasz_softmax + self.per_image = per_image + self.ignore_index=ignore_index + self.weighted = weighted + + def forward(self, pred, label): + pred = F.softmax(pred, dim=1) + return self.lovasz_softmax(pred, label, per_image=self.per_image, ignore=self.ignore_index, weighted=self.weighted) \ No newline at end of file diff --git a/preprocess/utils/miou.py b/preprocess/utils/miou.py new file mode 100644 index 0000000000000000000000000000000000000000..51a2cc965a5c0cfd5497c9191906898da31485dd --- /dev/null +++ b/preprocess/utils/miou.py @@ -0,0 +1,155 @@ +import cv2 +import os +import numpy as np + +from collections import OrderedDict +from PIL import Image as PILImage +from utils.transforms import transform_parsing + +LABELS = ['Background', 'Hat', 'Hair', 'Glove', 'Sunglasses', 'Upper-clothes', 'Dress', 'Coat', \ + 'Socks', 'Pants', 'Jumpsuits', 'Scarf', 'Skirt', 'Face', 'Left-arm', 'Right-arm', 'Left-leg', + 'Right-leg', 'Left-shoe', 'Right-shoe'] + + +# LABELS = ['Background', 'Head', 'Torso', 'Upper Arms', 'Lower Arms', 'Upper Legs', 'Lower Legs'] + +def get_palette(num_cls): + """ Returns the color map for visualizing the segmentation mask. + Args: + num_cls: Number of classes + Returns: + The color map + """ + + n = num_cls + palette = [0] * (n * 3) + for j in range(0, n): + lab = j + palette[j * 3 + 0] = 0 + palette[j * 3 + 1] = 0 + palette[j * 3 + 2] = 0 + i = 0 + while lab: + palette[j * 3 + 0] |= (((lab >> 0) & 1) << (7 - i)) + palette[j * 3 + 1] |= (((lab >> 1) & 1) << (7 - i)) + palette[j * 3 + 2] |= (((lab >> 2) & 1) << (7 - i)) + i += 1 + lab >>= 3 + return palette + + +def get_confusion_matrix(gt_label, pred_label, num_classes): + """ + Calcute the confusion matrix by given label and pred + :param gt_label: the ground truth label + :param pred_label: the pred label + :param num_classes: the nunber of class + :return: the confusion matrix + """ + index = (gt_label * num_classes + pred_label).astype('int32') + label_count = np.bincount(index) + confusion_matrix = np.zeros((num_classes, num_classes)) + + for i_label in range(num_classes): + for i_pred_label in range(num_classes): + cur_index = i_label * num_classes + i_pred_label + if cur_index < len(label_count): + confusion_matrix[i_label, i_pred_label] = label_count[cur_index] + + return confusion_matrix + + +def compute_mean_ioU(preds, scales, centers, num_classes, datadir, input_size=[473, 473], dataset='val'): + val_file = os.path.join(datadir, dataset + '_id.txt') + val_id = [i_id.strip() for i_id in open(val_file)] + + confusion_matrix = np.zeros((num_classes, num_classes)) + + for i, pred_out in enumerate(preds): + im_name = val_id[i] + gt_path = os.path.join(datadir, dataset + '_segmentations', im_name + '.png') + gt = np.array(PILImage.open(gt_path)) + h, w = gt.shape + s = scales[i] + c = centers[i] + pred = transform_parsing(pred_out, c, s, w, h, input_size) + + gt = np.asarray(gt, dtype=np.int32) + pred = np.asarray(pred, dtype=np.int32) + + ignore_index = gt != 255 + + gt = gt[ignore_index] + pred = pred[ignore_index] + + confusion_matrix += get_confusion_matrix(gt, pred, num_classes) + + pos = confusion_matrix.sum(1) + res = confusion_matrix.sum(0) + tp = np.diag(confusion_matrix) + + pixel_accuracy = (tp.sum() / pos.sum()) * 100 + mean_accuracy = ((tp / np.maximum(1.0, pos)).mean()) * 100 + IoU_array = (tp / np.maximum(1.0, pos + res - tp)) + IoU_array = IoU_array * 100 + mean_IoU = IoU_array.mean() + print('Pixel accuracy: %f \n' % pixel_accuracy) + print('Mean accuracy: %f \n' % mean_accuracy) + print('Mean IU: %f \n' % mean_IoU) + name_value = [] + + for i, (label, iou) in enumerate(zip(LABELS, IoU_array)): + name_value.append((label, iou)) + + name_value.append(('Pixel accuracy', pixel_accuracy)) + name_value.append(('Mean accuracy', mean_accuracy)) + name_value.append(('Mean IU', mean_IoU)) + name_value = OrderedDict(name_value) + return name_value + + +def compute_mean_ioU_file(preds_dir, num_classes, datadir, dataset='val'): + list_path = os.path.join(datadir, dataset + '_id.txt') + val_id = [i_id.strip() for i_id in open(list_path)] + + confusion_matrix = np.zeros((num_classes, num_classes)) + + for i, im_name in enumerate(val_id): + gt_path = os.path.join(datadir, 'segmentations', im_name + '.png') + gt = cv2.imread(gt_path, cv2.IMREAD_GRAYSCALE) + + pred_path = os.path.join(preds_dir, im_name + '.png') + pred = np.asarray(PILImage.open(pred_path)) + + gt = np.asarray(gt, dtype=np.int32) + pred = np.asarray(pred, dtype=np.int32) + + ignore_index = gt != 255 + + gt = gt[ignore_index] + pred = pred[ignore_index] + + confusion_matrix += get_confusion_matrix(gt, pred, num_classes) + + pos = confusion_matrix.sum(1) + res = confusion_matrix.sum(0) + tp = np.diag(confusion_matrix) + + pixel_accuracy = (tp.sum() / pos.sum()) * 100 + mean_accuracy = ((tp / np.maximum(1.0, pos)).mean()) * 100 + IoU_array = (tp / np.maximum(1.0, pos + res - tp)) + IoU_array = IoU_array * 100 + mean_IoU = IoU_array.mean() + print('Pixel accuracy: %f \n' % pixel_accuracy) + print('Mean accuracy: %f \n' % mean_accuracy) + print('Mean IU: %f \n' % mean_IoU) + name_value = [] + + for i, (label, iou) in enumerate(zip(LABELS, IoU_array)): + name_value.append((label, iou)) + + name_value.append(('Pixel accuracy', pixel_accuracy)) + name_value.append(('Mean accuracy', mean_accuracy)) + name_value.append(('Mean IU', mean_IoU)) + name_value = OrderedDict(name_value) + return name_value diff --git a/preprocess/utils/schp.py b/preprocess/utils/schp.py new file mode 100644 index 0000000000000000000000000000000000000000..f57470452fac8183dc5c17156439416c15bd3265 --- /dev/null +++ b/preprocess/utils/schp.py @@ -0,0 +1,80 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +""" +@Author : Peike Li +@Contact : peike.li@yahoo.com +@File : schp.py +@Time : 4/8/19 2:11 PM +@Desc : +@License : This source code is licensed under the license found in the + LICENSE file in the root directory of this source tree. +""" + +import os +import torch +import modules + +def moving_average(net1, net2, alpha=1): + for param1, param2 in zip(net1.parameters(), net2.parameters()): + param1.data *= (1.0 - alpha) + param1.data += param2.data * alpha + + +def _check_bn(module, flag): + if issubclass(module.__class__, modules.bn.InPlaceABNSync): + flag[0] = True + + +def check_bn(model): + flag = [False] + model.apply(lambda module: _check_bn(module, flag)) + return flag[0] + + +def reset_bn(module): + if issubclass(module.__class__, modules.bn.InPlaceABNSync): + module.running_mean = torch.zeros_like(module.running_mean) + module.running_var = torch.ones_like(module.running_var) + + +def _get_momenta(module, momenta): + if issubclass(module.__class__, modules.bn.InPlaceABNSync): + momenta[module] = module.momentum + + +def _set_momenta(module, momenta): + if issubclass(module.__class__, modules.bn.InPlaceABNSync): + module.momentum = momenta[module] + + +def bn_re_estimate(loader, model): + if not check_bn(model): + print('No batch norm layer detected') + return + model.train() + momenta = {} + model.apply(reset_bn) + model.apply(lambda module: _get_momenta(module, momenta)) + n = 0 + for i_iter, batch in enumerate(loader): + images, labels, _ = batch + b = images.data.size(0) + momentum = b / (n + b) + for module in momenta.keys(): + module.momentum = momentum + model(images) + n += b + model.apply(lambda module: _set_momenta(module, momenta)) + + +def save_schp_checkpoint(states, is_best_parsing, output_dir, filename='schp_checkpoint.pth.tar'): + save_path = os.path.join(output_dir, filename) + if os.path.exists(save_path): + os.remove(save_path) + torch.save(states, save_path) + if is_best_parsing and 'state_dict' in states: + best_save_path = os.path.join(output_dir, 'model_parsing_best.pth.tar') + if os.path.exists(best_save_path): + os.remove(best_save_path) + torch.save(states, best_save_path) diff --git a/preprocess/utils/soft_dice_loss.py b/preprocess/utils/soft_dice_loss.py new file mode 100644 index 0000000000000000000000000000000000000000..cb5895fd37467d36f213f941d1b01d6d6f7f194c --- /dev/null +++ b/preprocess/utils/soft_dice_loss.py @@ -0,0 +1,111 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +""" +@Author : Peike Li +@Contact : peike.li@yahoo.com +@File : soft_dice_loss.py +@Time : 8/13/19 5:09 PM +@Desc : +@License : This source code is licensed under the license found in the + LICENSE file in the root directory of this source tree. +""" + +from __future__ import print_function, division + +import torch +import torch.nn.functional as F +from torch import nn + +try: + from itertools import ifilterfalse +except ImportError: # py3k + from itertools import filterfalse as ifilterfalse + + +def tversky_loss(probas, labels, alpha=0.5, beta=0.5, epsilon=1e-6): + ''' + Tversky loss function. + probas: [P, C] Variable, class probabilities at each prediction (between 0 and 1) + labels: [P] Tensor, ground truth labels (between 0 and C - 1) + + Same as soft dice loss when alpha=beta=0.5. + Same as Jaccord loss when alpha=beta=1.0. + See `Tversky loss function for image segmentation using 3D fully convolutional deep networks` + https://arxiv.org/pdf/1706.05721.pdf + ''' + C = probas.size(1) + losses = [] + for c in list(range(C)): + fg = (labels == c).float() + if fg.sum() == 0: + continue + class_pred = probas[:, c] + p0 = class_pred + p1 = 1 - class_pred + g0 = fg + g1 = 1 - fg + numerator = torch.sum(p0 * g0) + denominator = numerator + alpha * torch.sum(p0 * g1) + beta * torch.sum(p1 * g0) + losses.append(1 - ((numerator) / (denominator + epsilon))) + return mean(losses) + + +def flatten_probas(probas, labels, ignore=255): + """ + Flattens predictions in the batch + """ + B, C, H, W = probas.size() + probas = probas.permute(0, 2, 3, 1).contiguous().view(-1, C) # B * H * W, C = P, C + labels = labels.view(-1) + if ignore is None: + return probas, labels + valid = (labels != ignore) + vprobas = probas[valid.nonzero().squeeze()] + vlabels = labels[valid] + return vprobas, vlabels + + +def isnan(x): + return x != x + + +def mean(l, ignore_nan=False, empty=0): + """ + nanmean compatible with generators. + """ + l = iter(l) + if ignore_nan: + l = ifilterfalse(isnan, l) + try: + n = 1 + acc = next(l) + except StopIteration: + if empty == 'raise': + raise ValueError('Empty mean') + return empty + for n, v in enumerate(l, 2): + acc += v + if n == 1: + return acc + return acc / n + + +class SoftDiceLoss(nn.Module): + def __init__(self, ignore_index=255): + super(SoftDiceLoss, self).__init__() + self.ignore_index = ignore_index + + def forward(self, pred, label): + pred = F.softmax(pred, dim=1) + return tversky_loss(*flatten_probas(pred, label, ignore=self.ignore_index), alpha=0.5, beta=0.5) + + +class SoftJaccordLoss(nn.Module): + def __init__(self, ignore_index=255): + super(SoftJaccordLoss, self).__init__() + self.ignore_index = ignore_index + + def forward(self, pred, label): + pred = F.softmax(pred, dim=1) + return tversky_loss(*flatten_probas(pred, label, ignore=self.ignore_index), alpha=1.0, beta=1.0) diff --git a/preprocess/utils/transforms.py b/preprocess/utils/transforms.py new file mode 100644 index 0000000000000000000000000000000000000000..1442a728938ca19fcb4ac21ae6588266df45631c --- /dev/null +++ b/preprocess/utils/transforms.py @@ -0,0 +1,167 @@ +# ------------------------------------------------------------------------------ +# Copyright (c) Microsoft +# Licensed under the MIT License. +# Written by Bin Xiao (Bin.Xiao@microsoft.com) +# ------------------------------------------------------------------------------ + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import cv2 +import torch + +class BRG2Tensor_transform(object): + def __call__(self, pic): + img = torch.from_numpy(pic.transpose((2, 0, 1))) + if isinstance(img, torch.ByteTensor): + return img.float() + else: + return img + +class BGR2RGB_transform(object): + def __call__(self, tensor): + return tensor[[2,1,0],:,:] + +def flip_back(output_flipped, matched_parts): + ''' + ouput_flipped: numpy.ndarray(batch_size, num_joints, height, width) + ''' + assert output_flipped.ndim == 4,\ + 'output_flipped should be [batch_size, num_joints, height, width]' + + output_flipped = output_flipped[:, :, :, ::-1] + + for pair in matched_parts: + tmp = output_flipped[:, pair[0], :, :].copy() + output_flipped[:, pair[0], :, :] = output_flipped[:, pair[1], :, :] + output_flipped[:, pair[1], :, :] = tmp + + return output_flipped + + +def fliplr_joints(joints, joints_vis, width, matched_parts): + """ + flip coords + """ + # Flip horizontal + joints[:, 0] = width - joints[:, 0] - 1 + + # Change left-right parts + for pair in matched_parts: + joints[pair[0], :], joints[pair[1], :] = \ + joints[pair[1], :], joints[pair[0], :].copy() + joints_vis[pair[0], :], joints_vis[pair[1], :] = \ + joints_vis[pair[1], :], joints_vis[pair[0], :].copy() + + return joints*joints_vis, joints_vis + + +def transform_preds(coords, center, scale, input_size): + target_coords = np.zeros(coords.shape) + trans = get_affine_transform(center, scale, 0, input_size, inv=1) + for p in range(coords.shape[0]): + target_coords[p, 0:2] = affine_transform(coords[p, 0:2], trans) + return target_coords + +def transform_parsing(pred, center, scale, width, height, input_size): + + trans = get_affine_transform(center, scale, 0, input_size, inv=1) + target_pred = cv2.warpAffine( + pred, + trans, + (int(width), int(height)), #(int(width), int(height)), + flags=cv2.INTER_NEAREST, + borderMode=cv2.BORDER_CONSTANT, + borderValue=(0)) + + return target_pred + +def transform_logits(logits, center, scale, width, height, input_size): + + trans = get_affine_transform(center, scale, 0, input_size, inv=1) + channel = logits.shape[2] + target_logits = [] + for i in range(channel): + target_logit = cv2.warpAffine( + logits[:,:,i], + trans, + (int(width), int(height)), #(int(width), int(height)), + flags=cv2.INTER_LINEAR, + borderMode=cv2.BORDER_CONSTANT, + borderValue=(0)) + target_logits.append(target_logit) + target_logits = np.stack(target_logits,axis=2) + + return target_logits + + +def get_affine_transform(center, + scale, + rot, + output_size, + shift=np.array([0, 0], dtype=np.float32), + inv=0): + if not isinstance(scale, np.ndarray) and not isinstance(scale, list): + print(scale) + scale = np.array([scale, scale]) + + scale_tmp = scale + + src_w = scale_tmp[0] + dst_w = output_size[1] + dst_h = output_size[0] + + rot_rad = np.pi * rot / 180 + src_dir = get_dir([0, src_w * -0.5], rot_rad) + dst_dir = np.array([0, (dst_w-1) * -0.5], np.float32) + + src = np.zeros((3, 2), dtype=np.float32) + dst = np.zeros((3, 2), dtype=np.float32) + src[0, :] = center + scale_tmp * shift + src[1, :] = center + src_dir + scale_tmp * shift + dst[0, :] = [(dst_w-1) * 0.5, (dst_h-1) * 0.5] + dst[1, :] = np.array([(dst_w-1) * 0.5, (dst_h-1) * 0.5]) + dst_dir + + src[2:, :] = get_3rd_point(src[0, :], src[1, :]) + dst[2:, :] = get_3rd_point(dst[0, :], dst[1, :]) + + if inv: + trans = cv2.getAffineTransform(np.float32(dst), np.float32(src)) + else: + trans = cv2.getAffineTransform(np.float32(src), np.float32(dst)) + + return trans + + +def affine_transform(pt, t): + new_pt = np.array([pt[0], pt[1], 1.]).T + new_pt = np.dot(t, new_pt) + return new_pt[:2] + + +def get_3rd_point(a, b): + direct = a - b + return b + np.array([-direct[1], direct[0]], dtype=np.float32) + + +def get_dir(src_point, rot_rad): + sn, cs = np.sin(rot_rad), np.cos(rot_rad) + + src_result = [0, 0] + src_result[0] = src_point[0] * cs - src_point[1] * sn + src_result[1] = src_point[0] * sn + src_point[1] * cs + + return src_result + + +def crop(img, center, scale, output_size, rot=0): + trans = get_affine_transform(center, scale, rot, output_size) + + dst_img = cv2.warpAffine(img, + trans, + (int(output_size[1]), int(output_size[0])), + flags=cv2.INTER_LINEAR) + + return dst_img diff --git a/preprocess/utils/warmup_scheduler.py b/preprocess/utils/warmup_scheduler.py new file mode 100644 index 0000000000000000000000000000000000000000..2528a9c598d5ee3477d60e2f8591ec37e8afb41d --- /dev/null +++ b/preprocess/utils/warmup_scheduler.py @@ -0,0 +1,71 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +""" +@Author : Peike Li +@Contact : peike.li@yahoo.com +@File : warmup_scheduler.py +@Time : 3/28/19 2:24 PM +@Desc : +@License : This source code is licensed under the license found in the + LICENSE file in the root directory of this source tree. +""" + +import math +from torch.optim.lr_scheduler import _LRScheduler + + +class GradualWarmupScheduler(_LRScheduler): + """ Gradually warm-up learning rate with cosine annealing in optimizer. + Proposed in 'Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour'. + """ + + def __init__(self, optimizer, total_epoch, eta_min=0, warmup_epoch=10, last_epoch=-1): + self.total_epoch = total_epoch + self.eta_min = eta_min + self.warmup_epoch = warmup_epoch + super(GradualWarmupScheduler, self).__init__(optimizer, last_epoch) + + def get_lr(self): + if self.last_epoch <= self.warmup_epoch: + return [self.eta_min + self.last_epoch*(base_lr - self.eta_min)/self.warmup_epoch for base_lr in self.base_lrs] + else: + return [self.eta_min + (base_lr-self.eta_min)*(1+math.cos(math.pi*(self.last_epoch-self.warmup_epoch)/(self.total_epoch-self.warmup_epoch))) / 2 for base_lr in self.base_lrs] + + +class SGDRScheduler(_LRScheduler): + """ Consine annealing with warm up and restarts. + Proposed in `SGDR: Stochastic Gradient Descent with Warm Restarts`. + """ + def __init__(self, optimizer, total_epoch=150, start_cyclical=100, cyclical_base_lr=7e-4, cyclical_epoch=10, eta_min=0, warmup_epoch=10, last_epoch=-1): + self.total_epoch = total_epoch + self.start_cyclical = start_cyclical + self.cyclical_epoch = cyclical_epoch + self.cyclical_base_lr = cyclical_base_lr + self.eta_min = eta_min + self.warmup_epoch = warmup_epoch + super(SGDRScheduler, self).__init__(optimizer, last_epoch) + + def get_lr(self): + if self.last_epoch < self.warmup_epoch: + return [self.eta_min + self.last_epoch*(base_lr - self.eta_min)/self.warmup_epoch for base_lr in self.base_lrs] + elif self.last_epoch < self.start_cyclical: + return [self.eta_min + (base_lr-self.eta_min)*(1+math.cos(math.pi*(self.last_epoch-self.warmup_epoch)/(self.start_cyclical-self.warmup_epoch))) / 2 for base_lr in self.base_lrs] + else: + return [self.eta_min + (self.cyclical_base_lr-self.eta_min)*(1+math.cos(math.pi* ((self.last_epoch-self.start_cyclical)% self.cyclical_epoch)/self.cyclical_epoch)) / 2 for base_lr in self.base_lrs] + + +if __name__ == '__main__': + import matplotlib.pyplot as plt + import torch + model = torch.nn.Linear(10, 2) + optimizer = torch.optim.SGD(params=model.parameters(), lr=7e-3, momentum=0.9, weight_decay=5e-4) + scheduler_warmup = SGDRScheduler(optimizer, total_epoch=150, eta_min=7e-5, warmup_epoch=10, start_cyclical=100, cyclical_base_lr=3.5e-3, cyclical_epoch=10) + lr = [] + for epoch in range(0,150): + scheduler_warmup.step(epoch) + lr.append(scheduler_warmup.get_lr()) + plt.style.use('ggplot') + plt.plot(list(range(0,150)), lr) + plt.show() +